summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorEliot Horowitz <eliot@10gen.com>2011-12-24 15:33:26 -0500
committerEliot Horowitz <eliot@10gen.com>2011-12-24 15:33:45 -0500
commitae1ecd9c786911f9f1f0242f0f7d702b3e5dfeba (patch)
tree92f8e1649e6f080b251ff5f1763679a72eb59b34 /src
parentdfa4cd7e2cf109b072440155fabc08a93c8045a0 (diff)
downloadmongo-ae1ecd9c786911f9f1f0242f0f7d702b3e5dfeba.tar.gz
bulk move of code to src/ SERVER-4551
Diffstat (limited to 'src')
-rw-r--r--src/mongo/bson/README7
-rw-r--r--src/mongo/bson/bson-inl.h1007
-rw-r--r--src/mongo/bson/bson.h110
-rw-r--r--src/mongo/bson/bson_db.h88
-rw-r--r--src/mongo/bson/bsondemo/bsondemo.cpp113
-rw-r--r--src/mongo/bson/bsondemo/bsondemo.vcproj243
-rw-r--r--src/mongo/bson/bsondemo/bsondemo.vcxproj193
-rw-r--r--src/mongo/bson/bsondemo/bsondemo.vcxproj.filters52
-rw-r--r--src/mongo/bson/bsonelement.h583
-rw-r--r--src/mongo/bson/bsonmisc.h211
-rw-r--r--src/mongo/bson/bsonobj.h497
-rw-r--r--src/mongo/bson/bsonobjbuilder.h842
-rw-r--r--src/mongo/bson/bsonobjiterator.h161
-rw-r--r--src/mongo/bson/bsontypes.h107
-rw-r--r--src/mongo/bson/inline_decls.h68
-rw-r--r--src/mongo/bson/oid.cpp173
-rw-r--r--src/mongo/bson/oid.h138
-rw-r--r--src/mongo/bson/ordering.h73
-rw-r--r--src/mongo/bson/stringdata.h71
-rw-r--r--src/mongo/bson/util/atomic_int.h106
-rw-r--r--src/mongo/bson/util/builder.h322
-rw-r--r--src/mongo/bson/util/misc.h121
-rw-r--r--src/mongo/client/clientOnly.cpp92
-rw-r--r--src/mongo/client/connpool.cpp426
-rw-r--r--src/mongo/client/connpool.h291
-rw-r--r--src/mongo/client/constants.h26
-rw-r--r--src/mongo/client/dbclient.cpp1087
-rw-r--r--src/mongo/client/dbclient.h1049
-rw-r--r--src/mongo/client/dbclient_rs.cpp993
-rw-r--r--src/mongo/client/dbclient_rs.h355
-rw-r--r--src/mongo/client/dbclientcursor.cpp324
-rw-r--r--src/mongo/client/dbclientcursor.h243
-rw-r--r--src/mongo/client/dbclientmockcursor.h40
-rw-r--r--src/mongo/client/distlock.cpp958
-rw-r--r--src/mongo/client/distlock.h244
-rw-r--r--src/mongo/client/distlock_test.cpp446
-rw-r--r--src/mongo/client/examples/authTest.cpp54
-rw-r--r--src/mongo/client/examples/clientTest.cpp279
-rw-r--r--src/mongo/client/examples/first.cpp86
-rw-r--r--src/mongo/client/examples/httpClientTest.cpp58
-rw-r--r--src/mongo/client/examples/insert_demo.cpp47
-rw-r--r--src/mongo/client/examples/mongoperf.cpp269
-rwxr-xr-xsrc/mongo/client/examples/mongoperf.vcxproj113
-rwxr-xr-xsrc/mongo/client/examples/mongoperf.vcxproj.filters73
-rw-r--r--src/mongo/client/examples/rs.cpp118
-rw-r--r--src/mongo/client/examples/second.cpp56
-rwxr-xr-xsrc/mongo/client/examples/simple_client_demo.vcxproj107
-rwxr-xr-xsrc/mongo/client/examples/simple_client_demo.vcxproj.filters17
-rw-r--r--src/mongo/client/examples/tail.cpp46
-rw-r--r--src/mongo/client/examples/tutorial.cpp71
-rw-r--r--src/mongo/client/examples/whereExample.cpp69
-rw-r--r--src/mongo/client/gridfs.cpp245
-rw-r--r--src/mongo/client/gridfs.h205
-rw-r--r--src/mongo/client/model.cpp138
-rw-r--r--src/mongo/client/model.h62
-rw-r--r--src/mongo/client/mongo_client_lib.cpp82
-rw-r--r--src/mongo/client/parallel.cpp1515
-rw-r--r--src/mongo/client/parallel.h444
-rw-r--r--src/mongo/client/redef_macros.h61
-rw-r--r--src/mongo/client/simple_client_demo.cpp54
-rw-r--r--src/mongo/client/syncclusterconnection.cpp410
-rw-r--r--src/mongo/client/syncclusterconnection.h147
-rw-r--r--src/mongo/client/undef_macros.h61
-rw-r--r--src/mongo/db/background.h56
-rw-r--r--src/mongo/db/btree.cpp1980
-rw-r--r--src/mongo/db/btree.h1174
-rw-r--r--src/mongo/db/btreebuilder.cpp184
-rw-r--r--src/mongo/db/btreebuilder.h53
-rw-r--r--src/mongo/db/btreecursor.cpp457
-rw-r--r--src/mongo/db/cap.cpp457
-rw-r--r--src/mongo/db/client.cpp697
-rw-r--r--src/mongo/db/client.h286
-rw-r--r--src/mongo/db/client_common.h47
-rw-r--r--src/mongo/db/clientcursor.cpp747
-rw-r--r--src/mongo/db/clientcursor.h430
-rw-r--r--src/mongo/db/cloner.cpp763
-rw-r--r--src/mongo/db/cloner.h39
-rw-r--r--src/mongo/db/cmdline.cpp519
-rw-r--r--src/mongo/db/cmdline.h203
-rw-r--r--src/mongo/db/collection.h15
-rwxr-xr-xsrc/mongo/db/commands.cpp209
-rw-r--r--src/mongo/db/commands.h164
-rwxr-xr-xsrc/mongo/db/commands/aggregate.js184
-rw-r--r--src/mongo/db/commands/cloud.cpp90
-rw-r--r--src/mongo/db/commands/distinct.cpp157
-rwxr-xr-xsrc/mongo/db/commands/document_source_cursor.cpp100
-rw-r--r--src/mongo/db/commands/find_and_modify.cpp153
-rw-r--r--src/mongo/db/commands/group.cpp224
-rw-r--r--src/mongo/db/commands/isself.cpp246
-rw-r--r--src/mongo/db/commands/mr.cpp1317
-rw-r--r--src/mongo/db/commands/mr.h319
-rwxr-xr-xsrc/mongo/db/commands/pipeline.cpp405
-rwxr-xr-xsrc/mongo/db/commands/pipeline.h183
-rwxr-xr-xsrc/mongo/db/commands/pipeline_command.cpp187
-rw-r--r--src/mongo/db/common.cpp73
-rw-r--r--src/mongo/db/compact.cpp376
-rw-r--r--src/mongo/db/compact.h50
-rw-r--r--src/mongo/db/concurrency.h21
-rw-r--r--src/mongo/db/curop-inl.h1
-rw-r--r--src/mongo/db/curop.cpp173
-rw-r--r--src/mongo/db/curop.h313
-rw-r--r--src/mongo/db/cursor.cpp166
-rw-r--r--src/mongo/db/cursor.h246
-rwxr-xr-xsrc/mongo/db/d_concurrency.cpp231
-rw-r--r--src/mongo/db/d_concurrency.h67
-rw-r--r--src/mongo/db/d_globals.cpp20
-rw-r--r--src/mongo/db/d_globals.h27
-rw-r--r--src/mongo/db/database.cpp423
-rw-r--r--src/mongo/db/database.h145
-rw-r--r--src/mongo/db/databaseholder.h126
-rw-r--r--src/mongo/db/db.cpp1309
-rw-r--r--src/mongo/db/db.h120
-rwxr-xr-xsrc/mongo/db/db.rc12
-rwxr-xr-xsrc/mongo/db/db.vcxproj934
-rwxr-xr-xsrc/mongo/db/db.vcxproj.filters432
-rwxr-xr-xsrc/mongo/db/db_10.sln168
-rw-r--r--src/mongo/db/dbcommands.cpp1955
-rw-r--r--src/mongo/db/dbcommands_admin.cpp550
-rw-r--r--src/mongo/db/dbcommands_generic.cpp432
-rw-r--r--src/mongo/db/dbeval.cpp136
-rw-r--r--src/mongo/db/dbhelpers.cpp353
-rw-r--r--src/mongo/db/dbhelpers.h159
-rw-r--r--src/mongo/db/dbmessage.cpp108
-rw-r--r--src/mongo/db/dbmessage.h282
-rw-r--r--src/mongo/db/dbwebserver.cpp539
-rw-r--r--src/mongo/db/dbwebserver.h85
-rw-r--r--src/mongo/db/diskloc.h160
-rw-r--r--src/mongo/db/driverHelpers.cpp62
-rw-r--r--src/mongo/db/dur.cpp840
-rw-r--r--src/mongo/db/dur.h209
-rw-r--r--src/mongo/db/dur_commitjob.cpp240
-rw-r--r--src/mongo/db/dur_commitjob.h220
-rw-r--r--src/mongo/db/dur_journal.cpp748
-rw-r--r--src/mongo/db/dur_journal.h68
-rw-r--r--src/mongo/db/dur_journalformat.h174
-rw-r--r--src/mongo/db/dur_journalimpl.h103
-rw-r--r--src/mongo/db/dur_preplogbuffer.cpp177
-rw-r--r--src/mongo/db/dur_recover.cpp544
-rw-r--r--src/mongo/db/dur_recover.h50
-rw-r--r--src/mongo/db/dur_stats.h49
-rw-r--r--src/mongo/db/dur_writetodatafiles.cpp94
-rw-r--r--src/mongo/db/durop.cpp161
-rw-r--r--src/mongo/db/durop.h109
-rw-r--r--src/mongo/db/extsort.cpp245
-rw-r--r--src/mongo/db/extsort.h150
-rw-r--r--src/mongo/db/filever.h30
-rw-r--r--src/mongo/db/flushtest.cpp150
-rw-r--r--src/mongo/db/geo/2d.cpp3289
-rw-r--r--src/mongo/db/geo/core.h550
-rw-r--r--src/mongo/db/geo/haystack.cpp318
-rw-r--r--src/mongo/db/globals.h54
-rw-r--r--src/mongo/db/helpers/dblogger.h31
-rw-r--r--src/mongo/db/index.cpp446
-rw-r--r--src/mongo/db/index.h237
-rw-r--r--src/mongo/db/indexkey.cpp462
-rw-r--r--src/mongo/db/indexkey.h198
-rw-r--r--src/mongo/db/instance.cpp1148
-rw-r--r--src/mongo/db/instance.h174
-rw-r--r--src/mongo/db/introspect.cpp88
-rw-r--r--src/mongo/db/introspect.h34
-rw-r--r--src/mongo/db/javatest.cpp24
-rw-r--r--src/mongo/db/jsobj.cpp1268
-rw-r--r--src/mongo/db/jsobj.h47
-rw-r--r--src/mongo/db/jsobjmanipulator.h94
-rw-r--r--src/mongo/db/json.cpp651
-rw-r--r--src/mongo/db/json.h41
-rw-r--r--src/mongo/db/key.cpp678
-rw-r--r--src/mongo/db/key.h115
-rw-r--r--src/mongo/db/lasterror.cpp142
-rw-r--r--src/mongo/db/lasterror.h146
-rwxr-xr-xsrc/mongo/db/matcher.cpp1128
-rw-r--r--src/mongo/db/matcher.h276
-rw-r--r--src/mongo/db/matcher_covered.cpp101
-rw-r--r--src/mongo/db/minilex.h164
-rw-r--r--src/mongo/db/module.cpp68
-rw-r--r--src/mongo/db/module.h70
-rw-r--r--src/mongo/db/modules/mms.cpp170
-rwxr-xr-xsrc/mongo/db/mongo.icobin0 -> 51262 bytes
-rw-r--r--src/mongo/db/mongommf.cpp339
-rw-r--r--src/mongo/db/mongommf.h145
-rw-r--r--src/mongo/db/mongomutex.h388
-rw-r--r--src/mongo/db/namespace-inl.h132
-rw-r--r--src/mongo/db/namespace.cpp800
-rw-r--r--src/mongo/db/namespace.h629
-rw-r--r--src/mongo/db/namespacestring.h147
-rw-r--r--src/mongo/db/nonce.cpp95
-rw-r--r--src/mongo/db/nonce.h36
-rw-r--r--src/mongo/db/oplog.cpp872
-rw-r--r--src/mongo/db/oplog.h149
-rw-r--r--src/mongo/db/oplogreader.h121
-rw-r--r--src/mongo/db/ops/count.cpp103
-rw-r--r--src/mongo/db/ops/count.h30
-rw-r--r--src/mongo/db/ops/delete.cpp158
-rw-r--r--src/mongo/db/ops/delete.h33
-rw-r--r--src/mongo/db/ops/query.cpp870
-rw-r--r--src/mongo/db/ops/query.h248
-rw-r--r--src/mongo/db/ops/update.cpp1308
-rw-r--r--src/mongo/db/ops/update.h700
-rw-r--r--src/mongo/db/pagefault.cpp55
-rw-r--r--src/mongo/db/pagefault.h46
-rw-r--r--src/mongo/db/pcre.txt15
-rw-r--r--src/mongo/db/pdfile.cpp2425
-rw-r--r--src/mongo/db/pdfile.h546
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator.cpp92
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator.h259
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_add_to_set.cpp79
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_avg.cpp123
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_first.cpp49
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_last.cpp48
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_min_max.cpp67
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_push.cpp73
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_single_value.cpp32
-rwxr-xr-xsrc/mongo/db/pipeline/accumulator_sum.cpp74
-rwxr-xr-xsrc/mongo/db/pipeline/builder.cpp117
-rwxr-xr-xsrc/mongo/db/pipeline/builder.h95
-rwxr-xr-xsrc/mongo/db/pipeline/doc_mem_monitor.cpp68
-rwxr-xr-xsrc/mongo/db/pipeline/doc_mem_monitor.h94
-rwxr-xr-xsrc/mongo/db/pipeline/document.cpp219
-rwxr-xr-xsrc/mongo/db/pipeline/document.h246
-rwxr-xr-xsrc/mongo/db/pipeline/document_source.cpp52
-rwxr-xr-xsrc/mongo/db/pipeline/document_source.h985
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_bson_array.cpp83
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_command_futures.cpp132
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_filter.cpp98
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_filter_base.cpp85
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_group.cpp391
-rw-r--r--src/mongo/db/pipeline/document_source_limit.cpp83
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_match.cpp80
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_out.cpp56
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_project.cpp201
-rw-r--r--src/mongo/db/pipeline/document_source_skip.cpp99
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_sort.cpp216
-rwxr-xr-xsrc/mongo/db/pipeline/document_source_unwind.cpp234
-rwxr-xr-xsrc/mongo/db/pipeline/expression.cpp2815
-rwxr-xr-xsrc/mongo/db/pipeline/expression.h1223
-rwxr-xr-xsrc/mongo/db/pipeline/expression_context.cpp35
-rwxr-xr-xsrc/mongo/db/pipeline/expression_context.h67
-rwxr-xr-xsrc/mongo/db/pipeline/field_path.cpp87
-rwxr-xr-xsrc/mongo/db/pipeline/field_path.h82
-rwxr-xr-xsrc/mongo/db/pipeline/value.cpp1034
-rwxr-xr-xsrc/mongo/db/pipeline/value.h468
-rw-r--r--src/mongo/db/projection.cpp301
-rw-r--r--src/mongo/db/projection.h129
-rw-r--r--src/mongo/db/queryoptimizer.cpp1337
-rw-r--r--src/mongo/db/queryoptimizer.h599
-rw-r--r--src/mongo/db/queryoptimizercursor.cpp530
-rw-r--r--src/mongo/db/queryoptimizercursor.h150
-rw-r--r--src/mongo/db/querypattern.cpp99
-rw-r--r--src/mongo/db/querypattern.h78
-rw-r--r--src/mongo/db/queryutil-inl.h153
-rw-r--r--src/mongo/db/queryutil.cpp1551
-rw-r--r--src/mongo/db/queryutil.h443
-rw-r--r--src/mongo/db/record.cpp267
-rw-r--r--src/mongo/db/repl.cpp1516
-rw-r--r--src/mongo/db/repl.h199
-rw-r--r--src/mongo/db/repl/connections.h128
-rw-r--r--src/mongo/db/repl/consensus.cpp449
-rw-r--r--src/mongo/db/repl/health.cpp449
-rw-r--r--src/mongo/db/repl/health.h50
-rw-r--r--src/mongo/db/repl/heartbeat.cpp382
-rw-r--r--src/mongo/db/repl/manager.cpp274
-rw-r--r--src/mongo/db/repl/multicmd.h75
-rw-r--r--src/mongo/db/repl/replset_commands.cpp404
-rw-r--r--src/mongo/db/repl/rs.cpp778
-rw-r--r--src/mongo/db/repl/rs.h667
-rw-r--r--src/mongo/db/repl/rs_config.cpp662
-rw-r--r--src/mongo/db/repl/rs_config.h251
-rw-r--r--src/mongo/db/repl/rs_exception.h17
-rw-r--r--src/mongo/db/repl/rs_initialsync.cpp271
-rw-r--r--src/mongo/db/repl/rs_initiate.cpp269
-rw-r--r--src/mongo/db/repl/rs_member.h131
-rw-r--r--src/mongo/db/repl/rs_optime.h58
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp667
-rw-r--r--src/mongo/db/repl/rs_sync.cpp701
-rw-r--r--src/mongo/db/repl/test.html11
-rw-r--r--src/mongo/db/repl/testing.js42
-rw-r--r--src/mongo/db/repl_block.cpp256
-rw-r--r--src/mongo/db/repl_block.h39
-rw-r--r--src/mongo/db/replutil.h102
-rw-r--r--src/mongo/db/resource.h16
-rw-r--r--src/mongo/db/restapi.cpp294
-rw-r--r--src/mongo/db/restapi.h34
-rw-r--r--src/mongo/db/scanandorder.cpp105
-rw-r--r--src/mongo/db/scanandorder.h111
-rw-r--r--src/mongo/db/security.cpp106
-rwxr-xr-xsrc/mongo/db/security.h113
-rw-r--r--src/mongo/db/security_commands.cpp150
-rw-r--r--src/mongo/db/security_common.cpp148
-rw-r--r--src/mongo/db/security_common.h85
-rw-r--r--src/mongo/db/stats/counters.cpp207
-rw-r--r--src/mongo/db/stats/counters.h159
-rw-r--r--src/mongo/db/stats/fine_clock.h67
-rw-r--r--src/mongo/db/stats/service_stats.cpp68
-rw-r--r--src/mongo/db/stats/service_stats.h66
-rw-r--r--src/mongo/db/stats/snapshots.cpp227
-rw-r--r--src/mongo/db/stats/snapshots.h114
-rw-r--r--src/mongo/db/stats/top.cpp183
-rw-r--r--src/mongo/db/stats/top.h247
-rw-r--r--src/mongo/db/taskqueue.h106
-rw-r--r--src/mongo/db/tests.cpp68
-rw-r--r--src/mongo/dbtests/background_job_test.cpp109
-rw-r--r--src/mongo/dbtests/balancer_policy_tests.cpp203
-rw-r--r--src/mongo/dbtests/basictests.cpp695
-rw-r--r--src/mongo/dbtests/btreetests.cpp59
-rw-r--r--src/mongo/dbtests/btreetests.inl1713
-rw-r--r--src/mongo/dbtests/clienttests.cpp197
-rw-r--r--src/mongo/dbtests/commandtests.cpp98
-rw-r--r--src/mongo/dbtests/counttests.cpp142
-rw-r--r--src/mongo/dbtests/cursortests.cpp305
-rw-r--r--src/mongo/dbtests/d_chunk_manager_tests.cpp467
-rw-r--r--src/mongo/dbtests/dbtests.cpp29
-rw-r--r--src/mongo/dbtests/dbtests.h25
-rw-r--r--src/mongo/dbtests/directclienttests.cpp103
-rw-r--r--src/mongo/dbtests/framework.cpp446
-rw-r--r--src/mongo/dbtests/framework.h199
-rw-r--r--src/mongo/dbtests/histogram_test.cpp94
-rw-r--r--src/mongo/dbtests/jsobjtests.cpp2208
-rw-r--r--src/mongo/dbtests/jsontests.cpp1185
-rw-r--r--src/mongo/dbtests/jstests.cpp1052
-rw-r--r--src/mongo/dbtests/macrotests.cpp47
-rw-r--r--src/mongo/dbtests/matchertests.cpp163
-rw-r--r--src/mongo/dbtests/mmaptests.cpp219
-rw-r--r--src/mongo/dbtests/namespacetests.cpp1244
-rw-r--r--src/mongo/dbtests/pdfiletests.cpp407
-rw-r--r--src/mongo/dbtests/perf/btreeperf.cpp442
-rw-r--r--src/mongo/dbtests/perf/perftest.cpp761
-rw-r--r--src/mongo/dbtests/perftests.cpp1029
-rw-r--r--src/mongo/dbtests/queryoptimizercursortests.cpp2521
-rw-r--r--src/mongo/dbtests/queryoptimizertests.cpp1063
-rw-r--r--src/mongo/dbtests/querytests.cpp1408
-rw-r--r--src/mongo/dbtests/queryutiltests.cpp989
-rw-r--r--src/mongo/dbtests/replsettests.cpp227
-rw-r--r--src/mongo/dbtests/repltests.cpp1228
-rw-r--r--src/mongo/dbtests/sharding.cpp56
-rw-r--r--src/mongo/dbtests/socktests.cpp48
-rw-r--r--src/mongo/dbtests/spin_lock_test.cpp114
-rwxr-xr-xsrc/mongo/dbtests/test.sln26
-rw-r--r--src/mongo/dbtests/test.vcxproj776
-rwxr-xr-xsrc/mongo/dbtests/test.vcxproj.filters939
-rw-r--r--src/mongo/dbtests/threadedtests.cpp649
-rw-r--r--src/mongo/dbtests/updatetests.cpp877
-rw-r--r--src/mongo/pch.cpp41
-rw-r--r--src/mongo/pch.h184
-rw-r--r--src/mongo/s/balance.cpp348
-rw-r--r--src/mongo/s/balance.h105
-rw-r--r--src/mongo/s/balancer_policy.cpp192
-rw-r--r--src/mongo/s/balancer_policy.h98
-rw-r--r--src/mongo/s/chunk.cpp1104
-rw-r--r--src/mongo/s/chunk.h420
-rw-r--r--src/mongo/s/client.cpp326
-rw-r--r--src/mongo/s/client.h128
-rw-r--r--src/mongo/s/commands_admin.cpp1239
-rw-r--r--src/mongo/s/commands_public.cpp1565
-rw-r--r--src/mongo/s/config.cpp879
-rw-r--r--src/mongo/s/config.h268
-rw-r--r--src/mongo/s/config_migrate.cpp196
-rw-r--r--src/mongo/s/cursors.cpp316
-rw-r--r--src/mongo/s/cursors.h106
-rw-r--r--src/mongo/s/d_chunk_manager.cpp339
-rw-r--r--src/mongo/s/d_chunk_manager.h167
-rw-r--r--src/mongo/s/d_logic.cpp121
-rw-r--r--src/mongo/s/d_logic.h246
-rw-r--r--src/mongo/s/d_migrate.cpp1728
-rw-r--r--src/mongo/s/d_split.cpp830
-rw-r--r--src/mongo/s/d_state.cpp753
-rw-r--r--src/mongo/s/d_writeback.cpp179
-rw-r--r--src/mongo/s/d_writeback.h106
-rw-r--r--src/mongo/s/dbgrid.vcxproj691
-rwxr-xr-xsrc/mongo/s/dbgrid.vcxproj.filters614
-rw-r--r--src/mongo/s/default_version.cpp52
-rw-r--r--src/mongo/s/grid.cpp531
-rw-r--r--src/mongo/s/grid.h135
-rw-r--r--src/mongo/s/mr_shard.cpp316
-rw-r--r--src/mongo/s/mr_shard.h235
-rw-r--r--src/mongo/s/request.cpp164
-rw-r--r--src/mongo/s/request.h114
-rw-r--r--src/mongo/s/s_only.cpp111
-rw-r--r--src/mongo/s/security.cpp101
-rw-r--r--src/mongo/s/server.cpp429
-rw-r--r--src/mongo/s/server.h29
-rw-r--r--src/mongo/s/shard.cpp410
-rw-r--r--src/mongo/s/shard.h308
-rw-r--r--src/mongo/s/shard_version.cpp269
-rw-r--r--src/mongo/s/shard_version.h32
-rw-r--r--src/mongo/s/shardconnection.cpp248
-rw-r--r--src/mongo/s/shardkey.cpp273
-rw-r--r--src/mongo/s/shardkey.h124
-rw-r--r--src/mongo/s/stats.cpp28
-rw-r--r--src/mongo/s/stats.h30
-rw-r--r--src/mongo/s/strategy.cpp111
-rw-r--r--src/mongo/s/strategy.h59
-rw-r--r--src/mongo/s/strategy_shard.cpp414
-rw-r--r--src/mongo/s/strategy_single.cpp272
-rw-r--r--src/mongo/s/util.h183
-rw-r--r--src/mongo/s/writeback_listener.cpp285
-rw-r--r--src/mongo/s/writeback_listener.h89
-rw-r--r--src/mongo/scripting/bench.cpp785
-rw-r--r--src/mongo/scripting/engine.cpp519
-rw-r--r--src/mongo/scripting/engine.h235
-rw-r--r--src/mongo/scripting/engine_java.cpp764
-rw-r--r--src/mongo/scripting/engine_java.h223
-rw-r--r--src/mongo/scripting/engine_none.cpp24
-rw-r--r--src/mongo/scripting/engine_spidermonkey.cpp1766
-rw-r--r--src/mongo/scripting/engine_spidermonkey.h105
-rw-r--r--src/mongo/scripting/engine_v8.cpp1634
-rw-r--r--src/mongo/scripting/engine_v8.h254
-rw-r--r--src/mongo/scripting/sm_db.cpp1284
-rw-r--r--src/mongo/scripting/utils.cpp77
-rw-r--r--src/mongo/scripting/v8_db.cpp1128
-rw-r--r--src/mongo/scripting/v8_db.h94
-rw-r--r--src/mongo/scripting/v8_utils.cpp295
-rw-r--r--src/mongo/scripting/v8_utils.h43
-rw-r--r--src/mongo/scripting/v8_wrapper.cpp99
-rw-r--r--src/mongo/scripting/v8_wrapper.h34
-rw-r--r--src/mongo/server.h46
-rw-r--r--src/mongo/shell/collection.js893
-rw-r--r--src/mongo/shell/db.js881
-rw-r--r--src/mongo/shell/dbshell.cpp962
-rw-r--r--src/mongo/shell/mongo.js102
-rw-r--r--src/mongo/shell/mr.js95
-rw-r--r--src/mongo/shell/msvc/createCPPfromJavaScriptFiles.js105
-rwxr-xr-xsrc/mongo/shell/msvc/mongo.icobin0 -> 1078 bytes
-rw-r--r--src/mongo/shell/msvc/mongo.sln20
-rw-r--r--src/mongo/shell/msvc/mongo.vcxproj272
-rw-r--r--src/mongo/shell/msvc/mongo.vcxproj.filters285
-rw-r--r--src/mongo/shell/query.js344
-rwxr-xr-xsrc/mongo/shell/servers.js2618
-rw-r--r--src/mongo/shell/shell_utils.cpp985
-rw-r--r--src/mongo/shell/utils.h48
-rw-r--r--src/mongo/shell/utils.js1896
-rw-r--r--src/mongo/shell/utils_sh.js164
-rw-r--r--src/mongo/targetver.h20
-rw-r--r--src/mongo/tools/bridge.cpp166
-rw-r--r--src/mongo/tools/bsondump.cpp140
-rw-r--r--src/mongo/tools/dump.cpp527
-rw-r--r--src/mongo/tools/export.cpp248
-rw-r--r--src/mongo/tools/files.cpp164
-rw-r--r--src/mongo/tools/import.cpp463
-rw-r--r--src/mongo/tools/oplog.cpp108
-rw-r--r--src/mongo/tools/restore.cpp583
-rw-r--r--src/mongo/tools/sniffer.cpp566
-rw-r--r--src/mongo/tools/stat.cpp544
-rw-r--r--src/mongo/tools/stat_util.cpp269
-rw-r--r--src/mongo/tools/stat_util.h78
-rw-r--r--src/mongo/tools/tool.cpp526
-rw-r--r--src/mongo/tools/tool.h160
-rw-r--r--src/mongo/tools/top.cpp200
-rw-r--r--src/mongo/util/admin_access.h52
-rw-r--r--src/mongo/util/alignedbuilder.cpp141
-rw-r--r--src/mongo/util/alignedbuilder.h125
-rw-r--r--src/mongo/util/allocator.h39
-rw-r--r--src/mongo/util/array.h127
-rw-r--r--src/mongo/util/assert_util.cpp213
-rw-r--r--src/mongo/util/assert_util.h275
-rw-r--r--src/mongo/util/background.cpp190
-rw-r--r--src/mongo/util/background.h155
-rw-r--r--src/mongo/util/base64.cpp109
-rw-r--r--src/mongo/util/base64.h68
-rw-r--r--src/mongo/util/bson_util.h42
-rw-r--r--src/mongo/util/bufreader.h100
-rw-r--r--src/mongo/util/checksum.h37
-rw-r--r--src/mongo/util/compress.cpp31
-rw-r--r--src/mongo/util/compress.h21
-rw-r--r--src/mongo/util/concurrency/README39
-rw-r--r--src/mongo/util/concurrency/list.h99
-rw-r--r--src/mongo/util/concurrency/msg.h61
-rw-r--r--src/mongo/util/concurrency/mutex.h228
-rw-r--r--src/mongo/util/concurrency/mutexdebugger.h117
-rw-r--r--src/mongo/util/concurrency/mvar.h118
-rw-r--r--src/mongo/util/concurrency/race.h77
-rw-r--r--src/mongo/util/concurrency/rwlock.h271
-rw-r--r--src/mongo/util/concurrency/rwlockimpl.h170
-rw-r--r--src/mongo/util/concurrency/shared_mutex_win.hpp594
-rw-r--r--src/mongo/util/concurrency/spin_lock.cpp107
-rw-r--r--src/mongo/util/concurrency/spin_lock.h77
-rw-r--r--src/mongo/util/concurrency/synchronization.cpp81
-rw-r--r--src/mongo/util/concurrency/synchronization.h86
-rw-r--r--src/mongo/util/concurrency/task.cpp181
-rw-r--r--src/mongo/util/concurrency/task.h72
-rw-r--r--src/mongo/util/concurrency/thread_pool.cpp141
-rw-r--r--src/mongo/util/concurrency/thread_pool.h82
-rw-r--r--src/mongo/util/concurrency/threadlocal.h126
-rw-r--r--src/mongo/util/concurrency/value.h139
-rw-r--r--src/mongo/util/concurrency/vars.cpp56
-rw-r--r--src/mongo/util/debug_util.cpp60
-rw-r--r--src/mongo/util/debug_util.h106
-rw-r--r--src/mongo/util/embedded_builder.h92
-rw-r--r--src/mongo/util/file.h230
-rw-r--r--src/mongo/util/file_allocator.cpp329
-rw-r--r--src/mongo/util/file_allocator.h91
-rw-r--r--src/mongo/util/goodies.h475
-rw-r--r--src/mongo/util/hashtab.h179
-rw-r--r--src/mongo/util/heapcheck.h33
-rw-r--r--src/mongo/util/hex.h67
-rw-r--r--src/mongo/util/histogram.cpp131
-rw-r--r--src/mongo/util/histogram.h128
-rwxr-xr-xsrc/mongo/util/intrusive_counter.cpp30
-rwxr-xr-xsrc/mongo/util/intrusive_counter.h79
-rw-r--r--src/mongo/util/log.cpp197
-rw-r--r--src/mongo/util/log.h581
-rw-r--r--src/mongo/util/logfile.cpp253
-rw-r--r--src/mongo/util/logfile.h58
-rw-r--r--src/mongo/util/lruishmap.h78
-rw-r--r--src/mongo/util/md5.c381
-rw-r--r--src/mongo/util/md5.h91
-rw-r--r--src/mongo/util/md5.hpp58
-rw-r--r--src/mongo/util/md5main.cpp142
-rwxr-xr-xsrc/mongo/util/mmap.cpp211
-rw-r--r--src/mongo/util/mmap.h305
-rw-r--r--src/mongo/util/mmap_mm.cpp52
-rw-r--r--src/mongo/util/mmap_posix.cpp214
-rw-r--r--src/mongo/util/mmap_win.cpp202
-rwxr-xr-xsrc/mongo/util/mongoutils/README15
-rw-r--r--src/mongo/util/mongoutils/checksum.h32
-rw-r--r--src/mongo/util/mongoutils/hash.h41
-rw-r--r--src/mongo/util/mongoutils/html.h158
-rwxr-xr-xsrc/mongo/util/mongoutils/mongoutils.vcxproj75
-rwxr-xr-xsrc/mongo/util/mongoutils/mongoutils.vcxproj.filters10
-rw-r--r--src/mongo/util/mongoutils/str.h216
-rw-r--r--src/mongo/util/mongoutils/test.cpp45
-rw-r--r--src/mongo/util/moveablebuffer.h51
-rw-r--r--src/mongo/util/net/hostandport.h239
-rw-r--r--src/mongo/util/net/httpclient.cpp177
-rw-r--r--src/mongo/util/net/httpclient.h78
-rw-r--r--src/mongo/util/net/listen.cpp394
-rw-r--r--src/mongo/util/net/listen.h190
-rw-r--r--src/mongo/util/net/message.cpp64
-rw-r--r--src/mongo/util/net/message.h312
-rw-r--r--src/mongo/util/net/message_port.cpp303
-rw-r--r--src/mongo/util/net/message_port.h108
-rw-r--r--src/mongo/util/net/message_server.h66
-rw-r--r--src/mongo/util/net/message_server_asio.cpp261
-rw-r--r--src/mongo/util/net/message_server_port.cpp204
-rw-r--r--src/mongo/util/net/miniwebserver.cpp212
-rw-r--r--src/mongo/util/net/miniwebserver.h60
-rw-r--r--src/mongo/util/net/sock.cpp763
-rw-r--r--src/mongo/util/net/sock.h261
-rw-r--r--src/mongo/util/ntservice.cpp408
-rw-r--r--src/mongo/util/ntservice.h49
-rw-r--r--src/mongo/util/optime.h170
-rw-r--r--src/mongo/util/password.cpp91
-rw-r--r--src/mongo/util/password.h61
-rw-r--r--src/mongo/util/paths.h124
-rw-r--r--src/mongo/util/processinfo.cpp48
-rw-r--r--src/mongo/util/processinfo.h67
-rw-r--r--src/mongo/util/processinfo_darwin.cpp116
-rw-r--r--src/mongo/util/processinfo_linux2.cpp244
-rw-r--r--src/mongo/util/processinfo_none.cpp55
-rw-r--r--src/mongo/util/processinfo_win32.cpp102
-rw-r--r--src/mongo/util/queue.h106
-rw-r--r--src/mongo/util/ramlog.cpp190
-rw-r--r--src/mongo/util/ramlog.h65
-rw-r--r--src/mongo/util/scopeguard.h427
-rw-r--r--src/mongo/util/signal_handlers.cpp122
-rw-r--r--src/mongo/util/signal_handlers.h34
-rwxr-xr-xsrc/mongo/util/string_writer.h28
-rw-r--r--src/mongo/util/stringutils.cpp44
-rw-r--r--src/mongo/util/stringutils.h139
-rwxr-xr-xsrc/mongo/util/systeminfo.h41
-rwxr-xr-xsrc/mongo/util/systeminfo_linux2.cpp47
-rwxr-xr-xsrc/mongo/util/systeminfo_none.cpp26
-rwxr-xr-xsrc/mongo/util/systeminfo_win32.cpp48
-rw-r--r--src/mongo/util/text.cpp115
-rw-r--r--src/mongo/util/text.h148
-rw-r--r--src/mongo/util/time_support.h255
-rw-r--r--src/mongo/util/timer.h115
-rw-r--r--src/mongo/util/unittest.h62
-rw-r--r--src/mongo/util/util.cpp220
-rw-r--r--src/mongo/util/version.cpp288
-rw-r--r--src/mongo/util/version.h27
-rw-r--r--src/mongo/util/winutil.h44
-rw-r--r--src/third_party/README6
-rw-r--r--src/third_party/js-1.7/Makefile.in388
-rw-r--r--src/third_party/js-1.7/Makefile.ref375
-rw-r--r--src/third_party/js-1.7/README.html826
-rw-r--r--src/third_party/js-1.7/SpiderMonkey.rsp12
-rw-r--r--src/third_party/js-1.7/Y.js19
-rw-r--r--src/third_party/js-1.7/config.mk186
-rw-r--r--src/third_party/js-1.7/config/AIX4.1.mk65
-rw-r--r--src/third_party/js-1.7/config/AIX4.2.mk64
-rw-r--r--src/third_party/js-1.7/config/AIX4.3.mk65
-rw-r--r--src/third_party/js-1.7/config/CVS/Entries36
-rw-r--r--src/third_party/js-1.7/config/CVS/Repository1
-rw-r--r--src/third_party/js-1.7/config/CVS/Root1
-rw-r--r--src/third_party/js-1.7/config/CVS/Tag1
-rw-r--r--src/third_party/js-1.7/config/Darwin.mk83
-rwxr-xr-xsrc/third_party/js-1.7/config/Darwin1.3.mk81
-rwxr-xr-xsrc/third_party/js-1.7/config/Darwin1.4.mk41
-rwxr-xr-xsrc/third_party/js-1.7/config/Darwin5.2.mk81
-rw-r--r--src/third_party/js-1.7/config/Darwin5.3.mk81
-rw-r--r--src/third_party/js-1.7/config/HP-UXB.10.10.mk77
-rw-r--r--src/third_party/js-1.7/config/HP-UXB.10.20.mk77
-rw-r--r--src/third_party/js-1.7/config/HP-UXB.11.00.mk80
-rw-r--r--src/third_party/js-1.7/config/IRIX.mk87
-rw-r--r--src/third_party/js-1.7/config/IRIX5.3.mk44
-rw-r--r--src/third_party/js-1.7/config/IRIX6.1.mk44
-rw-r--r--src/third_party/js-1.7/config/IRIX6.2.mk44
-rw-r--r--src/third_party/js-1.7/config/IRIX6.3.mk44
-rw-r--r--src/third_party/js-1.7/config/IRIX6.5.mk44
-rw-r--r--src/third_party/js-1.7/config/Linux_All.mk103
-rwxr-xr-xsrc/third_party/js-1.7/config/Mac_OS10.0.mk82
-rw-r--r--src/third_party/js-1.7/config/OSF1V4.0.mk72
-rw-r--r--src/third_party/js-1.7/config/OSF1V5.0.mk69
-rw-r--r--src/third_party/js-1.7/config/SunOS4.1.4.mk101
-rw-r--r--src/third_party/js-1.7/config/SunOS5.3.mk91
-rw-r--r--src/third_party/js-1.7/config/SunOS5.4.mk92
-rw-r--r--src/third_party/js-1.7/config/SunOS5.5.1.mk44
-rw-r--r--src/third_party/js-1.7/config/SunOS5.5.mk87
-rw-r--r--src/third_party/js-1.7/config/SunOS5.6.mk89
-rw-r--r--src/third_party/js-1.7/config/SunOS5.7.mk44
-rw-r--r--src/third_party/js-1.7/config/SunOS5.8.mk44
-rw-r--r--src/third_party/js-1.7/config/SunOS5.9.mk44
-rw-r--r--src/third_party/js-1.7/config/WINNT4.0.mk117
-rw-r--r--src/third_party/js-1.7/config/WINNT5.0.mk117
-rw-r--r--src/third_party/js-1.7/config/WINNT5.1.mk117
-rw-r--r--src/third_party/js-1.7/config/WINNT5.2.mk117
-rw-r--r--src/third_party/js-1.7/config/dgux.mk64
-rw-r--r--src/third_party/js-1.7/fdlibm/.cvsignore7
-rw-r--r--src/third_party/js-1.7/fdlibm/CVS/Entries87
-rw-r--r--src/third_party/js-1.7/fdlibm/CVS/Repository1
-rw-r--r--src/third_party/js-1.7/fdlibm/CVS/Root1
-rw-r--r--src/third_party/js-1.7/fdlibm/CVS/Tag1
-rw-r--r--src/third_party/js-1.7/fdlibm/Makefile.in127
-rw-r--r--src/third_party/js-1.7/fdlibm/Makefile.ref192
-rw-r--r--src/third_party/js-1.7/fdlibm/e_acos.c147
-rw-r--r--src/third_party/js-1.7/fdlibm/e_acosh.c105
-rw-r--r--src/third_party/js-1.7/fdlibm/e_asin.c156
-rw-r--r--src/third_party/js-1.7/fdlibm/e_atan2.c165
-rw-r--r--src/third_party/js-1.7/fdlibm/e_atanh.c110
-rw-r--r--src/third_party/js-1.7/fdlibm/e_cosh.c133
-rw-r--r--src/third_party/js-1.7/fdlibm/e_exp.c202
-rw-r--r--src/third_party/js-1.7/fdlibm/e_fmod.c184
-rw-r--r--src/third_party/js-1.7/fdlibm/e_gamma.c71
-rw-r--r--src/third_party/js-1.7/fdlibm/e_gamma_r.c70
-rw-r--r--src/third_party/js-1.7/fdlibm/e_hypot.c173
-rw-r--r--src/third_party/js-1.7/fdlibm/e_j0.c524
-rw-r--r--src/third_party/js-1.7/fdlibm/e_j1.c523
-rw-r--r--src/third_party/js-1.7/fdlibm/e_jn.c315
-rw-r--r--src/third_party/js-1.7/fdlibm/e_lgamma.c71
-rw-r--r--src/third_party/js-1.7/fdlibm/e_lgamma_r.c347
-rw-r--r--src/third_party/js-1.7/fdlibm/e_log.c184
-rw-r--r--src/third_party/js-1.7/fdlibm/e_log10.c134
-rw-r--r--src/third_party/js-1.7/fdlibm/e_pow.c386
-rw-r--r--src/third_party/js-1.7/fdlibm/e_rem_pio2.c222
-rw-r--r--src/third_party/js-1.7/fdlibm/e_remainder.c120
-rw-r--r--src/third_party/js-1.7/fdlibm/e_scalb.c89
-rw-r--r--src/third_party/js-1.7/fdlibm/e_sinh.c122
-rw-r--r--src/third_party/js-1.7/fdlibm/e_sqrt.c497
-rw-r--r--src/third_party/js-1.7/fdlibm/fdlibm.h273
-rw-r--r--src/third_party/js-1.7/fdlibm/fdlibm.mak1453
-rw-r--r--src/third_party/js-1.7/fdlibm/fdlibm.mdpbin0 -> 42143 bytes
-rw-r--r--src/third_party/js-1.7/fdlibm/k_cos.c135
-rw-r--r--src/third_party/js-1.7/fdlibm/k_rem_pio2.c354
-rw-r--r--src/third_party/js-1.7/fdlibm/k_sin.c114
-rw-r--r--src/third_party/js-1.7/fdlibm/k_standard.c785
-rw-r--r--src/third_party/js-1.7/fdlibm/k_tan.c170
-rw-r--r--src/third_party/js-1.7/fdlibm/s_asinh.c101
-rw-r--r--src/third_party/js-1.7/fdlibm/s_atan.c175
-rw-r--r--src/third_party/js-1.7/fdlibm/s_cbrt.c133
-rw-r--r--src/third_party/js-1.7/fdlibm/s_ceil.c120
-rw-r--r--src/third_party/js-1.7/fdlibm/s_copysign.c72
-rw-r--r--src/third_party/js-1.7/fdlibm/s_cos.c118
-rw-r--r--src/third_party/js-1.7/fdlibm/s_erf.c356
-rw-r--r--src/third_party/js-1.7/fdlibm/s_expm1.c267
-rw-r--r--src/third_party/js-1.7/fdlibm/s_fabs.c70
-rw-r--r--src/third_party/js-1.7/fdlibm/s_finite.c71
-rw-r--r--src/third_party/js-1.7/fdlibm/s_floor.c121
-rw-r--r--src/third_party/js-1.7/fdlibm/s_frexp.c99
-rw-r--r--src/third_party/js-1.7/fdlibm/s_ilogb.c85
-rw-r--r--src/third_party/js-1.7/fdlibm/s_isnan.c74
-rw-r--r--src/third_party/js-1.7/fdlibm/s_ldexp.c66
-rw-r--r--src/third_party/js-1.7/fdlibm/s_lib_version.c73
-rw-r--r--src/third_party/js-1.7/fdlibm/s_log1p.c211
-rw-r--r--src/third_party/js-1.7/fdlibm/s_logb.c79
-rw-r--r--src/third_party/js-1.7/fdlibm/s_matherr.c64
-rw-r--r--src/third_party/js-1.7/fdlibm/s_modf.c132
-rw-r--r--src/third_party/js-1.7/fdlibm/s_nextafter.c124
-rw-r--r--src/third_party/js-1.7/fdlibm/s_rint.c131
-rw-r--r--src/third_party/js-1.7/fdlibm/s_scalbn.c107
-rw-r--r--src/third_party/js-1.7/fdlibm/s_signgam.c40
-rw-r--r--src/third_party/js-1.7/fdlibm/s_significand.c68
-rw-r--r--src/third_party/js-1.7/fdlibm/s_sin.c118
-rw-r--r--src/third_party/js-1.7/fdlibm/s_tan.c112
-rw-r--r--src/third_party/js-1.7/fdlibm/s_tanh.c122
-rw-r--r--src/third_party/js-1.7/fdlibm/w_acos.c78
-rw-r--r--src/third_party/js-1.7/fdlibm/w_acosh.c78
-rw-r--r--src/third_party/js-1.7/fdlibm/w_asin.c80
-rw-r--r--src/third_party/js-1.7/fdlibm/w_atan2.c79
-rw-r--r--src/third_party/js-1.7/fdlibm/w_atanh.c81
-rw-r--r--src/third_party/js-1.7/fdlibm/w_cosh.c77
-rw-r--r--src/third_party/js-1.7/fdlibm/w_exp.c88
-rw-r--r--src/third_party/js-1.7/fdlibm/w_fmod.c78
-rw-r--r--src/third_party/js-1.7/fdlibm/w_gamma.c85
-rw-r--r--src/third_party/js-1.7/fdlibm/w_gamma_r.c81
-rw-r--r--src/third_party/js-1.7/fdlibm/w_hypot.c78
-rw-r--r--src/third_party/js-1.7/fdlibm/w_j0.c105
-rw-r--r--src/third_party/js-1.7/fdlibm/w_j1.c106
-rw-r--r--src/third_party/js-1.7/fdlibm/w_jn.c128
-rw-r--r--src/third_party/js-1.7/fdlibm/w_lgamma.c85
-rw-r--r--src/third_party/js-1.7/fdlibm/w_lgamma_r.c81
-rw-r--r--src/third_party/js-1.7/fdlibm/w_log.c78
-rw-r--r--src/third_party/js-1.7/fdlibm/w_log10.c81
-rw-r--r--src/third_party/js-1.7/fdlibm/w_pow.c99
-rw-r--r--src/third_party/js-1.7/fdlibm/w_remainder.c77
-rw-r--r--src/third_party/js-1.7/fdlibm/w_scalb.c95
-rw-r--r--src/third_party/js-1.7/fdlibm/w_sinh.c77
-rw-r--r--src/third_party/js-1.7/fdlibm/w_sqrt.c77
-rw-r--r--src/third_party/js-1.7/js.c3181
-rw-r--r--src/third_party/js-1.7/js.mak4344
-rw-r--r--src/third_party/js-1.7/js.mdpbin0 -> 17922 bytes
-rw-r--r--src/third_party/js-1.7/js.msg301
-rw-r--r--src/third_party/js-1.7/js.pkg2
-rw-r--r--src/third_party/js-1.7/js3240.rc79
-rw-r--r--src/third_party/js-1.7/jsOS240.def654
-rw-r--r--src/third_party/js-1.7/jsapi.c5011
-rw-r--r--src/third_party/js-1.7/jsapi.h2220
-rw-r--r--src/third_party/js-1.7/jsarena.c502
-rw-r--r--src/third_party/js-1.7/jsarena.h303
-rw-r--r--src/third_party/js-1.7/jsarray.c1864
-rw-r--r--src/third_party/js-1.7/jsarray.h95
-rw-r--r--src/third_party/js-1.7/jsatom.c999
-rw-r--r--src/third_party/js-1.7/jsatom.h456
-rw-r--r--src/third_party/js-1.7/jsbit.h195
-rw-r--r--src/third_party/js-1.7/jsbool.c227
-rw-r--r--src/third_party/js-1.7/jsbool.h76
-rw-r--r--src/third_party/js-1.7/jsclist.h139
-rw-r--r--src/third_party/js-1.7/jscntxt.c1229
-rw-r--r--src/third_party/js-1.7/jscntxt.h1013
-rw-r--r--src/third_party/js-1.7/jscompat.h57
-rw-r--r--src/third_party/js-1.7/jsconfig.h208
-rw-r--r--src/third_party/js-1.7/jsconfig.mk181
-rw-r--r--src/third_party/js-1.7/jscpucfg.c380
-rw-r--r--src/third_party/js-1.7/jscpucfg.h212
-rw-r--r--src/third_party/js-1.7/jsdate.c2371
-rw-r--r--src/third_party/js-1.7/jsdate.h120
-rw-r--r--src/third_party/js-1.7/jsdbgapi.c1439
-rw-r--r--src/third_party/js-1.7/jsdbgapi.h406
-rw-r--r--src/third_party/js-1.7/jsdhash.c826
-rw-r--r--src/third_party/js-1.7/jsdhash.h581
-rw-r--r--src/third_party/js-1.7/jsdtoa.c3132
-rw-r--r--src/third_party/js-1.7/jsdtoa.h130
-rw-r--r--src/third_party/js-1.7/jsemit.c6845
-rw-r--r--src/third_party/js-1.7/jsemit.h743
-rw-r--r--src/third_party/js-1.7/jsexn.c1348
-rw-r--r--src/third_party/js-1.7/jsexn.h96
-rw-r--r--src/third_party/js-1.7/jsfile.c2735
-rw-r--r--src/third_party/js-1.7/jsfile.h56
-rw-r--r--src/third_party/js-1.7/jsfile.msg90
-rw-r--r--src/third_party/js-1.7/jsfun.c2330
-rw-r--r--src/third_party/js-1.7/jsfun.h170
-rw-r--r--src/third_party/js-1.7/jsgc.c3201
-rw-r--r--src/third_party/js-1.7/jsgc.h368
-rw-r--r--src/third_party/js-1.7/jshash.c483
-rw-r--r--src/third_party/js-1.7/jshash.h151
-rw-r--r--src/third_party/js-1.7/jsify.pl485
-rw-r--r--src/third_party/js-1.7/jsinterp.c6216
-rw-r--r--src/third_party/js-1.7/jsinterp.h361
-rw-r--r--src/third_party/js-1.7/jsiter.c1080
-rw-r--r--src/third_party/js-1.7/jsiter.h114
-rw-r--r--src/third_party/js-1.7/jskeyword.tbl124
-rw-r--r--src/third_party/js-1.7/jskwgen.c460
-rw-r--r--src/third_party/js-1.7/jslibmath.h266
-rw-r--r--src/third_party/js-1.7/jslock.c1303
-rw-r--r--src/third_party/js-1.7/jslock.h266
-rw-r--r--src/third_party/js-1.7/jslocko.asm60
-rw-r--r--src/third_party/js-1.7/jslog2.c94
-rw-r--r--src/third_party/js-1.7/jslong.c281
-rw-r--r--src/third_party/js-1.7/jslong.h437
-rw-r--r--src/third_party/js-1.7/jsmath.c514
-rw-r--r--src/third_party/js-1.7/jsmath.h57
-rw-r--r--src/third_party/js-1.7/jsnum.c1147
-rw-r--r--src/third_party/js-1.7/jsnum.h268
-rw-r--r--src/third_party/js-1.7/jsobj.c5035
-rw-r--r--src/third_party/js-1.7/jsobj.h596
-rw-r--r--src/third_party/js-1.7/jsopcode.c4794
-rw-r--r--src/third_party/js-1.7/jsopcode.h318
-rw-r--r--src/third_party/js-1.7/jsopcode.tbl478
-rw-r--r--src/third_party/js-1.7/jsosdep.h115
-rw-r--r--src/third_party/js-1.7/jsotypes.h202
-rw-r--r--src/third_party/js-1.7/jsparse.c6547
-rw-r--r--src/third_party/js-1.7/jsparse.h438
-rw-r--r--src/third_party/js-1.7/jsprf.c1264
-rw-r--r--src/third_party/js-1.7/jsprf.h150
-rw-r--r--src/third_party/js-1.7/jsproto.tbl116
-rw-r--r--src/third_party/js-1.7/jsprvtd.h202
-rw-r--r--src/third_party/js-1.7/jspubtd.h667
-rw-r--r--src/third_party/js-1.7/jsregexp.c4206
-rw-r--r--src/third_party/js-1.7/jsregexp.h183
-rw-r--r--src/third_party/js-1.7/jsscan.c2101
-rw-r--r--src/third_party/js-1.7/jsscan.h389
-rw-r--r--src/third_party/js-1.7/jsscope.c1776
-rw-r--r--src/third_party/js-1.7/jsscope.h407
-rw-r--r--src/third_party/js-1.7/jsscript.c1717
-rw-r--r--src/third_party/js-1.7/jsscript.h225
-rw-r--r--src/third_party/js-1.7/jsshell.msg50
-rw-r--r--src/third_party/js-1.7/jsstddef.h83
-rw-r--r--src/third_party/js-1.7/jsstr.c4818
-rw-r--r--src/third_party/js-1.7/jsstr.h500
-rw-r--r--src/third_party/js-1.7/jstypes.h464
-rw-r--r--src/third_party/js-1.7/jsutil.c198
-rw-r--r--src/third_party/js-1.7/jsutil.h106
-rw-r--r--src/third_party/js-1.7/jsxdrapi.c835
-rw-r--r--src/third_party/js-1.7/jsxdrapi.h223
-rw-r--r--src/third_party/js-1.7/jsxml.c8357
-rw-r--r--src/third_party/js-1.7/jsxml.h332
-rw-r--r--src/third_party/js-1.7/lock_SunOS.s114
-rw-r--r--src/third_party/js-1.7/perfect.js39
-rw-r--r--src/third_party/js-1.7/plify_jsdhash.sed33
-rw-r--r--src/third_party/js-1.7/prmjtime.c439
-rw-r--r--src/third_party/js-1.7/prmjtime.h95
-rw-r--r--src/third_party/js-1.7/resource.h15
-rw-r--r--src/third_party/js-1.7/rules.mk193
-rw-r--r--src/third_party/js-1.7/win32.order391
-rw-r--r--src/third_party/linenoise/Makefile7
-rw-r--r--src/third_party/linenoise/README.markdown47
-rw-r--r--src/third_party/linenoise/example.c27
-rw-r--r--src/third_party/linenoise/history.txt3
-rw-r--r--src/third_party/linenoise/linenoise.cpp2077
-rw-r--r--src/third_party/linenoise/linenoise.h56
-rw-r--r--src/third_party/linenoise/linenoise_win32.cpp442
-rw-r--r--src/third_party/pcre-7.4/config-cmake.h.in31
-rw-r--r--src/third_party/pcre-7.4/config.h239
-rw-r--r--src/third_party/pcre-7.4/config.h.generic278
-rw-r--r--src/third_party/pcre-7.4/config.h.in219
-rw-r--r--src/third_party/pcre-7.4/dftables.c199
-rw-r--r--src/third_party/pcre-7.4/pcre.h304
-rw-r--r--src/third_party/pcre-7.4/pcre.h.generic303
-rw-r--r--src/third_party/pcre-7.4/pcre.h.in303
-rw-r--r--src/third_party/pcre-7.4/pcre_chartables.c198
-rw-r--r--src/third_party/pcre-7.4/pcre_chartables.c.dist198
-rw-r--r--src/third_party/pcre-7.4/pcre_compile.c6145
-rw-r--r--src/third_party/pcre-7.4/pcre_config.c128
-rw-r--r--src/third_party/pcre-7.4/pcre_dfa_exec.c2896
-rw-r--r--src/third_party/pcre-7.4/pcre_exec.c4938
-rw-r--r--src/third_party/pcre-7.4/pcre_fullinfo.c165
-rw-r--r--src/third_party/pcre-7.4/pcre_get.c465
-rw-r--r--src/third_party/pcre-7.4/pcre_globals.c63
-rw-r--r--src/third_party/pcre-7.4/pcre_info.c93
-rw-r--r--src/third_party/pcre-7.4/pcre_internal.h1117
-rw-r--r--src/third_party/pcre-7.4/pcre_maketables.c143
-rw-r--r--src/third_party/pcre-7.4/pcre_newline.c164
-rw-r--r--src/third_party/pcre-7.4/pcre_ord2utf8.c85
-rw-r--r--src/third_party/pcre-7.4/pcre_refcount.c82
-rw-r--r--src/third_party/pcre-7.4/pcre_scanner.cc199
-rw-r--r--src/third_party/pcre-7.4/pcre_scanner.h172
-rw-r--r--src/third_party/pcre-7.4/pcre_scanner_unittest.cc158
-rw-r--r--src/third_party/pcre-7.4/pcre_stringpiece.cc43
-rw-r--r--src/third_party/pcre-7.4/pcre_stringpiece.h177
-rw-r--r--src/third_party/pcre-7.4/pcre_stringpiece.h.in177
-rw-r--r--src/third_party/pcre-7.4/pcre_stringpiece_unittest.cc151
-rw-r--r--src/third_party/pcre-7.4/pcre_study.c579
-rw-r--r--src/third_party/pcre-7.4/pcre_tables.c318
-rw-r--r--src/third_party/pcre-7.4/pcre_try_flipped.c137
-rw-r--r--src/third_party/pcre-7.4/pcre_ucp_searchfuncs.c179
-rw-r--r--src/third_party/pcre-7.4/pcre_valid_utf8.c162
-rw-r--r--src/third_party/pcre-7.4/pcre_version.c90
-rw-r--r--src/third_party/pcre-7.4/pcre_xclass.c148
-rw-r--r--src/third_party/pcre-7.4/pcrecpp.cc857
-rw-r--r--src/third_party/pcre-7.4/pcrecpp.h700
-rw-r--r--src/third_party/pcre-7.4/pcrecpp_internal.h68
-rw-r--r--src/third_party/pcre-7.4/pcrecpp_unittest.cc1240
-rw-r--r--src/third_party/pcre-7.4/pcrecpparg.h173
-rw-r--r--src/third_party/pcre-7.4/pcrecpparg.h.in173
-rw-r--r--src/third_party/pcre-7.4/pcredemo.c325
-rw-r--r--src/third_party/pcre-7.4/pcregrep.c2106
-rw-r--r--src/third_party/pcre-7.4/pcreposix.c338
-rw-r--r--src/third_party/pcre-7.4/pcreposix.h142
-rw-r--r--src/third_party/pcre-7.4/pcretest.c2396
-rw-r--r--src/third_party/pcre-7.4/ucp.h133
-rw-r--r--src/third_party/pcre-7.4/ucpinternal.h92
-rw-r--r--src/third_party/pcre-7.4/ucptable.h3068
-rw-r--r--src/third_party/pcre.py42
-rw-r--r--src/third_party/sm.py114
-rw-r--r--src/third_party/snappy.py14
-rwxr-xr-xsrc/third_party/snappy/COPYING28
-rwxr-xr-xsrc/third_party/snappy/README135
-rwxr-xr-xsrc/third_party/snappy/config.h124
-rwxr-xr-xsrc/third_party/snappy/snappy-internal.h150
-rwxr-xr-xsrc/third_party/snappy/snappy-sinksource.cc72
-rwxr-xr-xsrc/third_party/snappy/snappy-sinksource.h136
-rwxr-xr-xsrc/third_party/snappy/snappy-stubs-internal.cc42
-rwxr-xr-xsrc/third_party/snappy/snappy-stubs-internal.h478
-rwxr-xr-xsrc/third_party/snappy/snappy-stubs-public.h85
-rwxr-xr-xsrc/third_party/snappy/snappy.cc1026
-rwxr-xr-xsrc/third_party/snappy/snappy.h155
885 files changed, 349802 insertions, 0 deletions
diff --git a/src/mongo/bson/README b/src/mongo/bson/README
new file mode 100644
index 00000000000..01ed654bcd2
--- /dev/null
+++ b/src/mongo/bson/README
@@ -0,0 +1,7 @@
+"BSON" stands for "binary JSON" - a binary storage format that is JSON inspired
+(and adds a couple extra types such as Date).
+
+This is the C++ implementation. Implementations which translate BSON<->JSON
+are available for most languages at bsonspec.org.
+
+
diff --git a/src/mongo/bson/bson-inl.h b/src/mongo/bson/bson-inl.h
new file mode 100644
index 00000000000..9e8b3654802
--- /dev/null
+++ b/src/mongo/bson/bson-inl.h
@@ -0,0 +1,1007 @@
+/** @file bsoninlines.h
+ a goal here is that the most common bson methods can be used inline-only, a la boost.
+ thus some things are inline that wouldn't necessarily be otherwise.
+*/
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <map>
+#include <limits>
+
+#if defined(_WIN32)
+#undef max
+#undef min
+#endif
+
+namespace mongo {
+
+ inline bool isNaN(double d) {
+ return d != d;
+ }
+
+ inline bool isInf(double d, int* sign = 0) {
+ volatile double tmp = d;
+
+ if ((tmp == d) && ((tmp - d) != 0.0)) {
+ if ( sign ) {
+ *sign = (d < 0.0 ? -1 : 1);
+ }
+ return true;
+ }
+
+ if ( sign ) {
+ *sign = 0;
+ }
+
+ return false;
+ }
+
+ /* must be same type when called, unless both sides are #s
+ this large function is in header to facilitate inline-only use of bson
+ */
+ inline int compareElementValues(const BSONElement& l, const BSONElement& r) {
+ int f;
+
+ switch ( l.type() ) {
+ case EOO:
+ case Undefined: // EOO and Undefined are same canonicalType
+ case jstNULL:
+ case MaxKey:
+ case MinKey:
+ f = l.canonicalType() - r.canonicalType();
+ if ( f<0 ) return -1;
+ return f==0 ? 0 : 1;
+ case Bool:
+ return *l.value() - *r.value();
+ case Timestamp:
+ // unsigned compare for timestamps - note they are not really dates but (ordinal + time_t)
+ if ( l.date() < r.date() )
+ return -1;
+ return l.date() == r.date() ? 0 : 1;
+ case Date:
+ {
+ long long a = (long long) l.Date().millis;
+ long long b = (long long) r.Date().millis;
+ if( a < b )
+ return -1;
+ return a == b ? 0 : 1;
+ }
+ case NumberLong:
+ if( r.type() == NumberLong ) {
+ long long L = l._numberLong();
+ long long R = r._numberLong();
+ if( L < R ) return -1;
+ if( L == R ) return 0;
+ return 1;
+ }
+ goto dodouble;
+ case NumberInt:
+ if( r.type() == NumberInt ) {
+ int L = l._numberInt();
+ int R = r._numberInt();
+ if( L < R ) return -1;
+ return L == R ? 0 : 1;
+ }
+ // else fall through
+ case NumberDouble:
+dodouble:
+ {
+ double left = l.number();
+ double right = r.number();
+ if( left < right )
+ return -1;
+ if( left == right )
+ return 0;
+ if( isNaN(left) )
+ return isNaN(right) ? 0 : -1;
+ return 1;
+ }
+ case jstOID:
+ return memcmp(l.value(), r.value(), 12);
+ case Code:
+ case Symbol:
+ case String:
+ /* todo: a utf sort order version one day... */
+ {
+ // we use memcmp as we allow zeros in UTF8 strings
+ int lsz = l.valuestrsize();
+ int rsz = r.valuestrsize();
+ int common = min(lsz, rsz);
+ int res = memcmp(l.valuestr(), r.valuestr(), common);
+ if( res )
+ return res;
+ // longer string is the greater one
+ return lsz-rsz;
+ }
+ case Object:
+ case Array:
+ return l.embeddedObject().woCompare( r.embeddedObject() );
+ case DBRef: {
+ int lsz = l.valuesize();
+ int rsz = r.valuesize();
+ if ( lsz - rsz != 0 ) return lsz - rsz;
+ return memcmp(l.value(), r.value(), lsz);
+ }
+ case BinData: {
+ int lsz = l.objsize(); // our bin data size in bytes, not including the subtype byte
+ int rsz = r.objsize();
+ if ( lsz - rsz != 0 ) return lsz - rsz;
+ return memcmp(l.value()+4, r.value()+4, lsz+1 /*+1 for subtype byte*/);
+ }
+ case RegEx: {
+ int c = strcmp(l.regex(), r.regex());
+ if ( c )
+ return c;
+ return strcmp(l.regexFlags(), r.regexFlags());
+ }
+ case CodeWScope : {
+ f = l.canonicalType() - r.canonicalType();
+ if ( f )
+ return f;
+ f = strcmp( l.codeWScopeCode() , r.codeWScopeCode() );
+ if ( f )
+ return f;
+ f = strcmp( l.codeWScopeScopeData() , r.codeWScopeScopeData() );
+ if ( f )
+ return f;
+ return 0;
+ }
+ default:
+ assert( false);
+ }
+ return -1;
+ }
+
+ /* wo = "well ordered"
+ note: (mongodb related) : this can only change in behavior when index version # changes
+ */
+ inline int BSONElement::woCompare( const BSONElement &e,
+ bool considerFieldName ) const {
+ int lt = (int) canonicalType();
+ int rt = (int) e.canonicalType();
+ int x = lt - rt;
+ if( x != 0 && (!isNumber() || !e.isNumber()) )
+ return x;
+ if ( considerFieldName ) {
+ x = strcmp(fieldName(), e.fieldName());
+ if ( x != 0 )
+ return x;
+ }
+ x = compareElementValues(*this, e);
+ return x;
+ }
+
+ inline BSONObjIterator BSONObj::begin() const {
+ return BSONObjIterator(*this);
+ }
+
+ inline BSONObj BSONElement::embeddedObjectUserCheck() const {
+ if ( MONGO_likely(isABSONObj()) )
+ return BSONObj(value());
+ stringstream ss;
+ ss << "invalid parameter: expected an object (" << fieldName() << ")";
+ uasserted( 10065 , ss.str() );
+ return BSONObj(); // never reachable
+ }
+
+ inline BSONObj BSONElement::embeddedObject() const {
+ assert( isABSONObj() );
+ return BSONObj(value());
+ }
+
+ inline BSONObj BSONElement::codeWScopeObject() const {
+ assert( type() == CodeWScope );
+ int strSizeWNull = *(int *)( value() + 4 );
+ return BSONObj( value() + 4 + 4 + strSizeWNull );
+ }
+
+ // deep (full) equality
+ inline bool BSONObj::equal(const BSONObj &rhs) const {
+ BSONObjIterator i(*this);
+ BSONObjIterator j(rhs);
+ BSONElement l,r;
+ do {
+ // so far, equal...
+ l = i.next();
+ r = j.next();
+ if ( l.eoo() )
+ return r.eoo();
+ } while( l == r );
+ return false;
+ }
+
+ inline NOINLINE_DECL void BSONObj::_assertInvalid() const {
+ StringBuilder ss;
+ int os = objsize();
+ ss << "Invalid BSONObj size: " << os << " (0x" << toHex( &os, 4 ) << ')';
+ try {
+ BSONElement e = firstElement();
+ ss << " first element: " << e.toString();
+ }
+ catch ( ... ) { }
+ massert( 10334 , ss.str() , 0 );
+ }
+
+ /* the idea with NOINLINE_DECL here is to keep this from inlining in the
+ getOwned() method. the presumption being that is better.
+ */
+ inline NOINLINE_DECL BSONObj BSONObj::copy() const {
+ Holder *h = (Holder*) malloc(objsize() + sizeof(unsigned));
+ h->zero();
+ memcpy(h->data, objdata(), objsize());
+ return BSONObj(h);
+ }
+
+ inline BSONObj BSONObj::getOwned() const {
+ if ( isOwned() )
+ return *this;
+ return copy();
+ }
+
+ // wrap this element up as a singleton object.
+ inline BSONObj BSONElement::wrap() const {
+ BSONObjBuilder b(size()+6);
+ b.append(*this);
+ return b.obj();
+ }
+
+ inline BSONObj BSONElement::wrap( const char * newName ) const {
+ BSONObjBuilder b(size()+6+(int)strlen(newName));
+ b.appendAs(*this,newName);
+ return b.obj();
+ }
+
+ inline void BSONObj::getFields(unsigned n, const char **fieldNames, BSONElement *fields) const {
+ BSONObjIterator i(*this);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ const char *p = e.fieldName();
+ for( unsigned i = 0; i < n; i++ ) {
+ if( strcmp(p, fieldNames[i]) == 0 ) {
+ fields[i] = e;
+ break;
+ }
+ }
+ }
+ }
+
+ inline BSONElement BSONObj::getField(const StringData& name) const {
+ BSONObjIterator i(*this);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( strcmp(e.fieldName(), name.data()) == 0 )
+ return e;
+ }
+ return BSONElement();
+ }
+
+ inline int BSONObj::getIntField(const char *name) const {
+ BSONElement e = getField(name);
+ return e.isNumber() ? (int) e.number() : std::numeric_limits< int >::min();
+ }
+
+ inline bool BSONObj::getBoolField(const char *name) const {
+ BSONElement e = getField(name);
+ return e.type() == Bool ? e.boolean() : false;
+ }
+
+ inline const char * BSONObj::getStringField(const char *name) const {
+ BSONElement e = getField(name);
+ return e.type() == String ? e.valuestr() : "";
+ }
+
+ /* add all the fields from the object specified to this object */
+ inline BSONObjBuilder& BSONObjBuilder::appendElements(BSONObj x) {
+ BSONObjIterator it(x);
+ while ( it.moreWithEOO() ) {
+ BSONElement e = it.next();
+ if ( e.eoo() ) break;
+ append(e);
+ }
+ return *this;
+ }
+
+ /* add all the fields from the object specified to this object if they don't exist */
+ inline BSONObjBuilder& BSONObjBuilder::appendElementsUnique(BSONObj x) {
+ set<string> have;
+ {
+ BSONObjIterator i = iterator();
+ while ( i.more() )
+ have.insert( i.next().fieldName() );
+ }
+
+ BSONObjIterator it(x);
+ while ( it.more() ) {
+ BSONElement e = it.next();
+ if ( have.count( e.fieldName() ) )
+ continue;
+ append(e);
+ }
+ return *this;
+ }
+
+
+ inline bool BSONObj::isValid() const {
+ int x = objsize();
+ return x > 0 && x <= BSONObjMaxInternalSize;
+ }
+
+ inline bool BSONObj::getObjectID(BSONElement& e) const {
+ BSONElement f = getField("_id");
+ if( !f.eoo() ) {
+ e = f;
+ return true;
+ }
+ return false;
+ }
+
+ inline BSONObjBuilderValueStream::BSONObjBuilderValueStream( BSONObjBuilder * builder ) {
+ _fieldName = 0;
+ _builder = builder;
+ }
+
+ template<class T>
+ inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<( T value ) {
+ _builder->append(_fieldName, value);
+ _fieldName = 0;
+ return *_builder;
+ }
+
+ inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<( const BSONElement& e ) {
+ _builder->appendAs( e , _fieldName );
+ _fieldName = 0;
+ return *_builder;
+ }
+
+ inline Labeler BSONObjBuilderValueStream::operator<<( const Labeler::Label &l ) {
+ return Labeler( l, this );
+ }
+
+ inline void BSONObjBuilderValueStream::endField( const char *nextFieldName ) {
+ if ( _fieldName && haveSubobj() ) {
+ _builder->append( _fieldName, subobj()->done() );
+ }
+ _subobj.reset();
+ _fieldName = nextFieldName;
+ }
+
+ inline BSONObjBuilder *BSONObjBuilderValueStream::subobj() {
+ if ( !haveSubobj() )
+ _subobj.reset( new BSONObjBuilder() );
+ return _subobj.get();
+ }
+
+ template<class T> inline
+ BSONObjBuilder& Labeler::operator<<( T value ) {
+ s_->subobj()->append( l_.l_, value );
+ return *s_->_builder;
+ }
+
+ inline
+ BSONObjBuilder& Labeler::operator<<( const BSONElement& e ) {
+ s_->subobj()->appendAs( e, l_.l_ );
+ return *s_->_builder;
+ }
+
+ // {a: {b:1}} -> {a.b:1}
+ void nested2dotted(BSONObjBuilder& b, const BSONObj& obj, const string& base="");
+ inline BSONObj nested2dotted(const BSONObj& obj) {
+ BSONObjBuilder b;
+ nested2dotted(b, obj);
+ return b.obj();
+ }
+
+ // {a.b:1} -> {a: {b:1}}
+ void dotted2nested(BSONObjBuilder& b, const BSONObj& obj);
+ inline BSONObj dotted2nested(const BSONObj& obj) {
+ BSONObjBuilder b;
+ dotted2nested(b, obj);
+ return b.obj();
+ }
+
+ inline BSONObjIterator BSONObjBuilder::iterator() const {
+ const char * s = _b.buf() + _offset;
+ const char * e = _b.buf() + _b.len();
+ return BSONObjIterator( s , e );
+ }
+
+ inline bool BSONObjBuilder::hasField( const StringData& name ) const {
+ BSONObjIterator i = iterator();
+ while ( i.more() )
+ if ( strcmp( name.data() , i.next().fieldName() ) == 0 )
+ return true;
+ return false;
+ }
+
+ /* WARNING: nested/dotted conversions are not 100% reversible
+ * nested2dotted(dotted2nested({a.b: {c:1}})) -> {a.b.c: 1}
+ * also, dotted2nested ignores order
+ */
+
+ typedef map<string, BSONElement> BSONMap;
+ inline BSONMap bson2map(const BSONObj& obj) {
+ BSONMap m;
+ BSONObjIterator it(obj);
+ while (it.more()) {
+ BSONElement e = it.next();
+ m[e.fieldName()] = e;
+ }
+ return m;
+ }
+
+ struct BSONElementFieldNameCmp {
+ bool operator()( const BSONElement &l, const BSONElement &r ) const {
+ return strcmp( l.fieldName() , r.fieldName() ) <= 0;
+ }
+ };
+
+ typedef set<BSONElement, BSONElementFieldNameCmp> BSONSortedElements;
+ inline BSONSortedElements bson2set( const BSONObj& obj ) {
+ BSONSortedElements s;
+ BSONObjIterator it(obj);
+ while ( it.more() )
+ s.insert( it.next() );
+ return s;
+ }
+
+ inline string BSONObj::toString( bool isArray, bool full ) const {
+ if ( isEmpty() ) return "{}";
+ StringBuilder s;
+ toString(s, isArray, full);
+ return s.str();
+ }
+ inline void BSONObj::toString(StringBuilder& s, bool isArray, bool full ) const {
+ if ( isEmpty() ) {
+ s << "{}";
+ return;
+ }
+
+ s << ( isArray ? "[ " : "{ " );
+ BSONObjIterator i(*this);
+ bool first = true;
+ while ( 1 ) {
+ massert( 10327 , "Object does not end with EOO", i.moreWithEOO() );
+ BSONElement e = i.next( true );
+ massert( 10328 , "Invalid element size", e.size() > 0 );
+ massert( 10329 , "Element too large", e.size() < ( 1 << 30 ) );
+ int offset = (int) (e.rawdata() - this->objdata());
+ massert( 10330 , "Element extends past end of object",
+ e.size() + offset <= this->objsize() );
+ e.validate();
+ bool end = ( e.size() + offset == this->objsize() );
+ if ( e.eoo() ) {
+ massert( 10331 , "EOO Before end of object", end );
+ break;
+ }
+ if ( first )
+ first = false;
+ else
+ s << ", ";
+ e.toString(s, !isArray, full );
+ }
+ s << ( isArray ? " ]" : " }" );
+ }
+
+ inline void BSONElement::validate() const {
+ const BSONType t = type();
+
+ switch( t ) {
+ case DBRef:
+ case Code:
+ case Symbol:
+ case mongo::String: {
+ unsigned x = (unsigned) valuestrsize();
+ bool lenOk = x > 0 && x < (unsigned) BSONObjMaxInternalSize;
+ if( lenOk && valuestr()[x-1] == 0 )
+ return;
+ StringBuilder buf;
+ buf << "Invalid dbref/code/string/symbol size: " << x;
+ if( lenOk )
+ buf << " strnlen:" << mongo::strnlen( valuestr() , x );
+ msgasserted( 10321 , buf.str() );
+ break;
+ }
+ case CodeWScope: {
+ int totalSize = *( int * )( value() );
+ massert( 10322 , "Invalid CodeWScope size", totalSize >= 8 );
+ int strSizeWNull = *( int * )( value() + 4 );
+ massert( 10323 , "Invalid CodeWScope string size", totalSize >= strSizeWNull + 4 + 4 );
+ massert( 10324 , "Invalid CodeWScope string size",
+ strSizeWNull > 0 &&
+ (strSizeWNull - 1) == mongo::strnlen( codeWScopeCode(), strSizeWNull ) );
+ massert( 10325 , "Invalid CodeWScope size", totalSize >= strSizeWNull + 4 + 4 + 4 );
+ int objSize = *( int * )( value() + 4 + 4 + strSizeWNull );
+ massert( 10326 , "Invalid CodeWScope object size", totalSize == 4 + 4 + strSizeWNull + objSize );
+ // Subobject validation handled elsewhere.
+ }
+ case Object:
+ // We expect Object size validation to be handled elsewhere.
+ default:
+ break;
+ }
+ }
+
+ inline int BSONElement::size( int maxLen ) const {
+ if ( totalSize >= 0 )
+ return totalSize;
+
+ int remain = maxLen - fieldNameSize() - 1;
+
+ int x = 0;
+ switch ( type() ) {
+ case EOO:
+ case Undefined:
+ case jstNULL:
+ case MaxKey:
+ case MinKey:
+ break;
+ case mongo::Bool:
+ x = 1;
+ break;
+ case NumberInt:
+ x = 4;
+ break;
+ case Timestamp:
+ case mongo::Date:
+ case NumberDouble:
+ case NumberLong:
+ x = 8;
+ break;
+ case jstOID:
+ x = 12;
+ break;
+ case Symbol:
+ case Code:
+ case mongo::String:
+ massert( 10313 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
+ x = valuestrsize() + 4;
+ break;
+ case CodeWScope:
+ massert( 10314 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
+ x = objsize();
+ break;
+
+ case DBRef:
+ massert( 10315 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
+ x = valuestrsize() + 4 + 12;
+ break;
+ case Object:
+ case mongo::Array:
+ massert( 10316 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
+ x = objsize();
+ break;
+ case BinData:
+ massert( 10317 , "Insufficient bytes to calculate element size", maxLen == -1 || remain > 3 );
+ x = valuestrsize() + 4 + 1/*subtype*/;
+ break;
+ case RegEx: {
+ const char *p = value();
+ size_t len1 = ( maxLen == -1 ) ? strlen( p ) : (size_t)mongo::strnlen( p, remain );
+ //massert( 10318 , "Invalid regex string", len1 != -1 ); // ERH - 4/28/10 - don't think this does anything
+ p = p + len1 + 1;
+ size_t len2;
+ if( maxLen == -1 )
+ len2 = strlen( p );
+ else {
+ size_t x = remain - len1 - 1;
+ assert( x <= 0x7fffffff );
+ len2 = mongo::strnlen( p, (int) x );
+ }
+ //massert( 10319 , "Invalid regex options string", len2 != -1 ); // ERH - 4/28/10 - don't think this does anything
+ x = (int) (len1 + 1 + len2 + 1);
+ }
+ break;
+ default: {
+ StringBuilder ss;
+ ss << "BSONElement: bad type " << (int) type();
+ string msg = ss.str();
+ massert( 13655 , msg.c_str(),false);
+ }
+ }
+ totalSize = x + fieldNameSize() + 1; // BSONType
+
+ return totalSize;
+ }
+
+ inline int BSONElement::size() const {
+ if ( totalSize >= 0 )
+ return totalSize;
+
+ int x = 0;
+ switch ( type() ) {
+ case EOO:
+ case Undefined:
+ case jstNULL:
+ case MaxKey:
+ case MinKey:
+ break;
+ case mongo::Bool:
+ x = 1;
+ break;
+ case NumberInt:
+ x = 4;
+ break;
+ case Timestamp:
+ case mongo::Date:
+ case NumberDouble:
+ case NumberLong:
+ x = 8;
+ break;
+ case jstOID:
+ x = 12;
+ break;
+ case Symbol:
+ case Code:
+ case mongo::String:
+ x = valuestrsize() + 4;
+ break;
+ case DBRef:
+ x = valuestrsize() + 4 + 12;
+ break;
+ case CodeWScope:
+ case Object:
+ case mongo::Array:
+ x = objsize();
+ break;
+ case BinData:
+ x = valuestrsize() + 4 + 1/*subtype*/;
+ break;
+ case RegEx:
+ {
+ const char *p = value();
+ size_t len1 = strlen(p);
+ p = p + len1 + 1;
+ size_t len2;
+ len2 = strlen( p );
+ x = (int) (len1 + 1 + len2 + 1);
+ }
+ break;
+ default:
+ {
+ StringBuilder ss;
+ ss << "BSONElement: bad type " << (int) type();
+ string msg = ss.str();
+ massert(10320 , msg.c_str(),false);
+ }
+ }
+ totalSize = x + fieldNameSize() + 1; // BSONType
+
+ return totalSize;
+ }
+
+ inline string BSONElement::toString( bool includeFieldName, bool full ) const {
+ StringBuilder s;
+ toString(s, includeFieldName, full);
+ return s.str();
+ }
+ inline void BSONElement::toString(StringBuilder& s, bool includeFieldName, bool full ) const {
+ if ( includeFieldName && type() != EOO )
+ s << fieldName() << ": ";
+ switch ( type() ) {
+ case EOO:
+ s << "EOO";
+ break;
+ case mongo::Date:
+ s << "new Date(" << (long long) date() << ')';
+ break;
+ case RegEx: {
+ s << "/" << regex() << '/';
+ const char *p = regexFlags();
+ if ( p ) s << p;
+ }
+ break;
+ case NumberDouble:
+ s.appendDoubleNice( number() );
+ break;
+ case NumberLong:
+ s << _numberLong();
+ break;
+ case NumberInt:
+ s << _numberInt();
+ break;
+ case mongo::Bool:
+ s << ( boolean() ? "true" : "false" );
+ break;
+ case Object:
+ embeddedObject().toString(s, false, full);
+ break;
+ case mongo::Array:
+ embeddedObject().toString(s, true, full);
+ break;
+ case Undefined:
+ s << "undefined";
+ break;
+ case jstNULL:
+ s << "null";
+ break;
+ case MaxKey:
+ s << "MaxKey";
+ break;
+ case MinKey:
+ s << "MinKey";
+ break;
+ case CodeWScope:
+ s << "CodeWScope( "
+ << codeWScopeCode() << ", " << codeWScopeObject().toString(false, full) << ")";
+ break;
+ case Code:
+ if ( !full && valuestrsize() > 80 ) {
+ s.write(valuestr(), 70);
+ s << "...";
+ }
+ else {
+ s.write(valuestr(), valuestrsize()-1);
+ }
+ break;
+ case Symbol:
+ case mongo::String:
+ s << '"';
+ if ( !full && valuestrsize() > 160 ) {
+ s.write(valuestr(), 150);
+ s << "...\"";
+ }
+ else {
+ s.write(valuestr(), valuestrsize()-1);
+ s << '"';
+ }
+ break;
+ case DBRef:
+ s << "DBRef('" << valuestr() << "',";
+ {
+ mongo::OID *x = (mongo::OID *) (valuestr() + valuestrsize());
+ s << *x << ')';
+ }
+ break;
+ case jstOID:
+ s << "ObjectId('";
+ s << __oid() << "')";
+ break;
+ case BinData:
+ s << "BinData";
+ if (full) {
+ int len;
+ const char* data = binDataClean(len);
+ s << '(' << binDataType() << ", " << toHex(data, len) << ')';
+ }
+ break;
+ case Timestamp:
+ s << "Timestamp " << timestampTime() << "|" << timestampInc();
+ break;
+ default:
+ s << "?type=" << type();
+ break;
+ }
+ }
+
+ /* return has eoo() true if no match
+ supports "." notation to reach into embedded objects
+ */
+ inline BSONElement BSONObj::getFieldDotted(const char *name) const {
+ BSONElement e = getField( name );
+ if ( e.eoo() ) {
+ const char *p = strchr(name, '.');
+ if ( p ) {
+ string left(name, p-name);
+ BSONObj sub = getObjectField(left.c_str());
+ return sub.isEmpty() ? BSONElement() : sub.getFieldDotted(p+1);
+ }
+ }
+
+ return e;
+ }
+
+ inline BSONObj BSONObj::getObjectField(const char *name) const {
+ BSONElement e = getField(name);
+ BSONType t = e.type();
+ return t == Object || t == Array ? e.embeddedObject() : BSONObj();
+ }
+
+ inline int BSONObj::nFields() const {
+ int n = 0;
+ BSONObjIterator i(*this);
+ while ( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ n++;
+ }
+ return n;
+ }
+
+ inline BSONObj::BSONObj() {
+ /* little endian ordering here, but perhaps that is ok regardless as BSON is spec'd
+ to be little endian external to the system. (i.e. the rest of the implementation of bson,
+ not this part, fails to support big endian)
+ */
+ static char p[] = { /*size*/5, 0, 0, 0, /*eoo*/0 };
+ _objdata = p;
+ }
+
+ inline BSONObj BSONElement::Obj() const { return embeddedObjectUserCheck(); }
+
+ inline BSONElement BSONElement::operator[] (const string& field) const {
+ BSONObj o = Obj();
+ return o[field];
+ }
+
+ inline void BSONObj::elems(vector<BSONElement> &v) const {
+ BSONObjIterator i(*this);
+ while( i.more() )
+ v.push_back(i.next());
+ }
+
+ inline void BSONObj::elems(list<BSONElement> &v) const {
+ BSONObjIterator i(*this);
+ while( i.more() )
+ v.push_back(i.next());
+ }
+
+ template <class T>
+ void BSONObj::Vals(vector<T>& v) const {
+ BSONObjIterator i(*this);
+ while( i.more() ) {
+ T t;
+ i.next().Val(t);
+ v.push_back(t);
+ }
+ }
+ template <class T>
+ void BSONObj::Vals(list<T>& v) const {
+ BSONObjIterator i(*this);
+ while( i.more() ) {
+ T t;
+ i.next().Val(t);
+ v.push_back(t);
+ }
+ }
+
+ template <class T>
+ void BSONObj::vals(vector<T>& v) const {
+ BSONObjIterator i(*this);
+ while( i.more() ) {
+ try {
+ T t;
+ i.next().Val(t);
+ v.push_back(t);
+ }
+ catch(...) { }
+ }
+ }
+ template <class T>
+ void BSONObj::vals(list<T>& v) const {
+ BSONObjIterator i(*this);
+ while( i.more() ) {
+ try {
+ T t;
+ i.next().Val(t);
+ v.push_back(t);
+ }
+ catch(...) { }
+ }
+ }
+
+ inline ostream& operator<<( ostream &s, const BSONObj &o ) {
+ return s << o.toString();
+ }
+
+ inline ostream& operator<<( ostream &s, const BSONElement &e ) {
+ return s << e.toString();
+ }
+
+ inline StringBuilder& operator<<( StringBuilder &s, const BSONObj &o ) {
+ o.toString( s );
+ return s;
+ }
+ inline StringBuilder& operator<<( StringBuilder &s, const BSONElement &e ) {
+ e.toString( s );
+ return s;
+ }
+
+
+ inline void BSONElement::Val(BSONObj& v) const { v = Obj(); }
+
+ template<typename T>
+ inline BSONFieldValue<BSONObj> BSONField<T>::query( const char * q , const T& t ) const {
+ BSONObjBuilder b;
+ b.append( q , t );
+ return BSONFieldValue<BSONObj>( _name , b.obj() );
+ }
+
+ // used by jsonString()
+ inline string escape( string s , bool escape_slash=false) {
+ StringBuilder ret;
+ for ( string::iterator i = s.begin(); i != s.end(); ++i ) {
+ switch ( *i ) {
+ case '"':
+ ret << "\\\"";
+ break;
+ case '\\':
+ ret << "\\\\";
+ break;
+ case '/':
+ ret << (escape_slash ? "\\/" : "/");
+ break;
+ case '\b':
+ ret << "\\b";
+ break;
+ case '\f':
+ ret << "\\f";
+ break;
+ case '\n':
+ ret << "\\n";
+ break;
+ case '\r':
+ ret << "\\r";
+ break;
+ case '\t':
+ ret << "\\t";
+ break;
+ default:
+ if ( *i >= 0 && *i <= 0x1f ) {
+ //TODO: these should be utf16 code-units not bytes
+ char c = *i;
+ ret << "\\u00" << toHexLower(&c, 1);
+ }
+ else {
+ ret << *i;
+ }
+ }
+ }
+ return ret.str();
+ }
+
+ inline string BSONObj::hexDump() const {
+ stringstream ss;
+ const char *d = objdata();
+ int size = objsize();
+ for( int i = 0; i < size; ++i ) {
+ ss.width( 2 );
+ ss.fill( '0' );
+ ss << hex << (unsigned)(unsigned char)( d[ i ] ) << dec;
+ if ( ( d[ i ] >= '0' && d[ i ] <= '9' ) || ( d[ i ] >= 'A' && d[ i ] <= 'z' ) )
+ ss << '\'' << d[ i ] << '\'';
+ if ( i != size - 1 )
+ ss << ' ';
+ }
+ return ss.str();
+ }
+
+ inline void BSONObjBuilder::appendKeys( const BSONObj& keyPattern , const BSONObj& values ) {
+ BSONObjIterator i(keyPattern);
+ BSONObjIterator j(values);
+
+ while ( i.more() && j.more() ) {
+ appendAs( j.next() , i.next().fieldName() );
+ }
+
+ assert( ! i.more() );
+ assert( ! j.more() );
+ }
+
+ inline BSONObj BSONObj::removeField(const StringData& name) const {
+ BSONObjBuilder b;
+ BSONObjIterator i(*this);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ const char *fname = e.fieldName();
+ if( strcmp(name.data(), fname) )
+ b.append(e);
+ }
+ return b.obj();
+ }
+}
diff --git a/src/mongo/bson/bson.h b/src/mongo/bson/bson.h
new file mode 100644
index 00000000000..9515adfd829
--- /dev/null
+++ b/src/mongo/bson/bson.h
@@ -0,0 +1,110 @@
+/** @file bson.h
+
+ Main bson include file for mongodb c++ clients. MongoDB includes ../db/jsobj.h instead.
+ This file, however, pulls in much less code / dependencies.
+
+ @see bsondemo
+*/
+
+/*
+ * Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ Main include file for C++ BSON module when using standalone (sans MongoDB client).
+
+ "BSON" stands for "binary JSON" -- ie a binary way to represent objects that would be
+ represented in JSON (plus a few extensions useful for databases & other languages).
+
+ http://www.bsonspec.org/
+*/
+
+#pragma once
+
+#if defined(MONGO_EXPOSE_MACROS)
+#error this header is for client programs, not the mongo database itself. include jsobj.h instead.
+/* because we define simplistic assert helpers here that don't pull in a bunch of util -- so that
+ BSON can be used header only.
+ */
+#endif
+
+#include <cstdlib>
+#include <memory>
+#include <iostream>
+#include <sstream>
+#include <boost/utility.hpp>
+
+namespace bson {
+
+ using std::string;
+ using std::stringstream;
+
+ class assertion : public std::exception {
+ public:
+ assertion( unsigned u , const string& s )
+ : id( u ) , msg( s ) {
+ stringstream ss;
+ ss << "BsonAssertion id: " << u << " " << s;
+ full = ss.str();
+ }
+
+ virtual ~assertion() throw() {}
+
+ virtual const char* what() const throw() { return full.c_str(); }
+
+ unsigned id;
+ string msg;
+ string full;
+ };
+}
+
+namespace mongo {
+#if !defined(assert)
+ inline void assert(bool expr) {
+ if(!expr) {
+ throw bson::assertion( 0 , "assertion failure in bson library" );
+ }
+ }
+#endif
+#if !defined(uassert)
+ inline void uasserted(unsigned msgid, std::string s) {
+ throw bson::assertion( msgid , s );
+ }
+
+ inline void uassert(unsigned msgid, std::string msg, bool expr) {
+ if( !expr )
+ uasserted( msgid , msg );
+ }
+ inline void msgasserted(int msgid, const char *msg) {
+ throw bson::assertion( msgid , msg );
+ }
+ inline void msgasserted(int msgid, const std::string &msg) { msgasserted(msgid, msg.c_str()); }
+ inline void massert(unsigned msgid, std::string msg, bool expr) {
+ if(!expr) {
+ std::cout << "assertion failure in bson library: " << msgid << ' ' << msg << std::endl;
+ throw bson::assertion( msgid , msg );
+ }
+ }
+#endif
+}
+
+#include "util/builder.h"
+#include "bsontypes.h"
+#include "oid.h"
+#include "bsonelement.h"
+#include "bsonobj.h"
+#include "bsonobjbuilder.h"
+#include "bsonobjiterator.h"
+#include "bson-inl.h"
diff --git a/src/mongo/bson/bson_db.h b/src/mongo/bson/bson_db.h
new file mode 100644
index 00000000000..3f597bde3e1
--- /dev/null
+++ b/src/mongo/bson/bson_db.h
@@ -0,0 +1,88 @@
+/** @file bson_db.h
+
+ This file contains the implementation of BSON-related methods that are required
+ by the MongoDB database server.
+
+ Normally, for standalone BSON usage, you do not want this file - it will tend to
+ pull in some other files from the MongoDB project. Thus, bson.h (the main file
+ one would use) does not include this file.
+*/
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../util/optime.h"
+#include "../util/time_support.h"
+
+namespace mongo {
+
+ /**
+ Timestamps are a special BSON datatype that is used internally for replication.
+ Append a timestamp element to the object being ebuilt.
+ @param time - in millis (but stored in seconds)
+ */
+ inline BSONObjBuilder& BSONObjBuilder::appendTimestamp( const StringData& fieldName , unsigned long long time , unsigned int inc ) {
+ OpTime t( (unsigned) (time / 1000) , inc );
+ appendTimestamp( fieldName , t.asDate() );
+ return *this;
+ }
+
+ inline OpTime BSONElement::_opTime() const {
+ if( type() == mongo::Date || type() == Timestamp )
+ return OpTime( *reinterpret_cast< const unsigned long long* >( value() ) );
+ return OpTime();
+ }
+
+ inline string BSONElement::_asCode() const {
+ switch( type() ) {
+ case mongo::String:
+ case Code:
+ return string(valuestr(), valuestrsize()-1);
+ case CodeWScope:
+ return string(codeWScopeCode(), *(int*)(valuestr())-1);
+ default:
+ log() << "can't convert type: " << (int)(type()) << " to code" << endl;
+ }
+ uassert( 10062 , "not code" , 0 );
+ return "";
+ }
+
+ inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<(DateNowLabeler& id) {
+ _builder->appendDate(_fieldName, jsTime());
+ _fieldName = 0;
+ return *_builder;
+ }
+
+ inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<(NullLabeler& id) {
+ _builder->appendNull(_fieldName);
+ _fieldName = 0;
+ return *_builder;
+ }
+
+ inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<(MinKeyLabeler& id) {
+ _builder->appendMinKey(_fieldName);
+ _fieldName = 0;
+ return *_builder;
+ }
+
+ inline BSONObjBuilder& BSONObjBuilderValueStream::operator<<(MaxKeyLabeler& id) {
+ _builder->appendMaxKey(_fieldName);
+ _fieldName = 0;
+ return *_builder;
+ }
+
+}
diff --git a/src/mongo/bson/bsondemo/bsondemo.cpp b/src/mongo/bson/bsondemo/bsondemo.cpp
new file mode 100644
index 00000000000..b53a7b39baa
--- /dev/null
+++ b/src/mongo/bson/bsondemo/bsondemo.cpp
@@ -0,0 +1,113 @@
+/** @file bsondemo.cpp
+
+ Example of use of BSON from C++.
+
+ Requires boost (headers only).
+ Works headers only (the parts actually exercised herein that is - some functions require .cpp files).
+
+ To build and run:
+ g++ -o bsondemo bsondemo.cpp
+ ./bsondemo
+
+ Windows: project files are available in this directory for bsondemo.cpp for use with Visual Studio.
+*/
+
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "../bson.h"
+#include <iostream>
+#include <vector>
+
+using namespace std;
+using namespace bson;
+
+void iter(bo o) {
+ /* iterator example */
+ cout << "\niter()\n";
+ for( bo::iterator i(o); i.more(); ) {
+ cout << ' ' << i.next().toString() << '\n';
+ }
+}
+
+int main() {
+ cout << "build bits: " << 8 * sizeof(char *) << '\n' << endl;
+
+ /* a bson object defaults on construction to { } */
+ bo empty;
+ cout << "empty: " << empty << endl;
+
+ /* make a simple { name : 'joe', age : 33.7 } object */
+ {
+ bob b;
+ b.append("name", "joe");
+ b.append("age", 33.7);
+ b.obj();
+ }
+
+ /* make { name : 'joe', age : 33.7 } with a more compact notation. */
+ bo x = bob().append("name", "joe").append("age", 33.7).obj();
+
+ /* convert from bson to json */
+ string json = x.toString();
+ cout << "json for x:" << json << endl;
+
+ /* access some fields of bson object x */
+ cout << "Some x things: " << x["name"] << ' ' << x["age"].Number() << ' ' << x.isEmpty() << endl;
+
+ /* make a bit more complex object with some nesting
+ { x : 'asdf', y : true, subobj : { z : 3, q : 4 } }
+ */
+ bo y = BSON( "x" << "asdf" << "y" << true << "subobj" << BSON( "z" << 3 << "q" << 4 ) );
+
+ /* print it */
+ cout << "y: " << y << endl;
+
+ /* reach in and get subobj.z */
+ cout << "subobj.z: " << y.getFieldDotted("subobj.z").Number() << endl;
+
+ /* alternate syntax: */
+ cout << "subobj.z: " << y["subobj"]["z"].Number() << endl;
+
+ /* fetch all *top level* elements from object y into a vector */
+ vector<be> v;
+ y.elems(v);
+ cout << v[0] << endl;
+
+ /* into an array */
+ list<be> L;
+ y.elems(L);
+
+ bo sub = y["subobj"].Obj();
+
+ /* grab all the int's that were in subobj. if it had elements that were not ints, we throw an exception
+ (capital V on Vals() means exception if wrong type found
+ */
+ vector<int> myints;
+ sub.Vals(myints);
+ cout << "my ints: " << myints[0] << ' ' << myints[1] << endl;
+
+ /* grab all the string values from x. if the field isn't of string type, just skip it --
+ lowercase v on vals() indicates skip don't throw.
+ */
+ vector<string> strs;
+ x.vals(strs);
+ cout << strs.size() << " strings, first one: " << strs[0] << endl;
+
+ iter(y);
+ return 0;
+}
+
diff --git a/src/mongo/bson/bsondemo/bsondemo.vcproj b/src/mongo/bson/bsondemo/bsondemo.vcproj
new file mode 100644
index 00000000000..8432cebfd87
--- /dev/null
+++ b/src/mongo/bson/bsondemo/bsondemo.vcproj
@@ -0,0 +1,243 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="9.00"
+ Name="bsondemo"
+ ProjectGUID="{C9DB5EB7-81AA-4185-BAA1-DA035654402F}"
+ RootNamespace="bsondemo"
+ Keyword="Win32Proj"
+ TargetFrameworkVersion="196613"
+ >
+ <Platforms>
+ <Platform
+ Name="Win32"
+ />
+ </Platforms>
+ <ToolFiles>
+ </ToolFiles>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ OutputDirectory="$(SolutionDir)$(ConfigurationName)"
+ IntermediateDirectory="$(ConfigurationName)"
+ ConfigurationType="1"
+ CharacterSet="1"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ Optimization="0"
+ AdditionalIncludeDirectories="c:\program files\boost\latest;c:\boost;\boost"
+ PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
+ MinimalRebuild="true"
+ BasicRuntimeChecks="3"
+ RuntimeLibrary="3"
+ UsePrecompiledHeader="0"
+ WarningLevel="3"
+ DebugInformationFormat="4"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ LinkIncremental="2"
+ GenerateDebugInformation="true"
+ SubSystem="1"
+ TargetMachine="1"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ OutputDirectory="$(SolutionDir)$(ConfigurationName)"
+ IntermediateDirectory="$(ConfigurationName)"
+ ConfigurationType="1"
+ CharacterSet="1"
+ WholeProgramOptimization="1"
+ >
+ <Tool
+ Name="VCPreBuildEventTool"
+ />
+ <Tool
+ Name="VCCustomBuildTool"
+ />
+ <Tool
+ Name="VCXMLDataGeneratorTool"
+ />
+ <Tool
+ Name="VCWebServiceProxyGeneratorTool"
+ />
+ <Tool
+ Name="VCMIDLTool"
+ />
+ <Tool
+ Name="VCCLCompilerTool"
+ Optimization="2"
+ EnableIntrinsicFunctions="true"
+ AdditionalIncludeDirectories="c:\program files\boost\latest;c:\boost;\boost"
+ PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
+ RuntimeLibrary="2"
+ EnableFunctionLevelLinking="true"
+ UsePrecompiledHeader="0"
+ WarningLevel="3"
+ DebugInformationFormat="3"
+ />
+ <Tool
+ Name="VCManagedResourceCompilerTool"
+ />
+ <Tool
+ Name="VCResourceCompilerTool"
+ />
+ <Tool
+ Name="VCPreLinkEventTool"
+ />
+ <Tool
+ Name="VCLinkerTool"
+ LinkIncremental="1"
+ GenerateDebugInformation="true"
+ SubSystem="1"
+ OptimizeReferences="2"
+ EnableCOMDATFolding="2"
+ TargetMachine="1"
+ />
+ <Tool
+ Name="VCALinkTool"
+ />
+ <Tool
+ Name="VCManifestTool"
+ />
+ <Tool
+ Name="VCXDCMakeTool"
+ />
+ <Tool
+ Name="VCBscMakeTool"
+ />
+ <Tool
+ Name="VCFxCopTool"
+ />
+ <Tool
+ Name="VCAppVerifierTool"
+ />
+ <Tool
+ Name="VCPostBuildEventTool"
+ />
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <Filter
+ Name="Source Files"
+ Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"
+ UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"
+ >
+ <File
+ RelativePath=".\bsondemo.cpp"
+ >
+ </File>
+ </Filter>
+ <Filter
+ Name="bson"
+ >
+ <File
+ RelativePath="..\bson.h"
+ >
+ </File>
+ <File
+ RelativePath="..\bson_db.h"
+ >
+ </File>
+ <File
+ RelativePath="..\bsonelement.h"
+ >
+ </File>
+ <File
+ RelativePath="..\bsoninlines.h"
+ >
+ </File>
+ <File
+ RelativePath="..\bsonmisc.h"
+ >
+ </File>
+ <File
+ RelativePath="..\bsonobj.h"
+ >
+ </File>
+ <File
+ RelativePath="..\bsonobjbuilder.h"
+ >
+ </File>
+ <File
+ RelativePath="..\bsonobjiterator.h"
+ >
+ </File>
+ <File
+ RelativePath="..\bsontypes.h"
+ >
+ </File>
+ <File
+ RelativePath="..\oid.h"
+ >
+ </File>
+ <File
+ RelativePath="..\ordering.h"
+ >
+ </File>
+ <Filter
+ Name="util"
+ >
+ <File
+ RelativePath="..\util\builder.h"
+ >
+ </File>
+ <File
+ RelativePath="..\util\misc.h"
+ >
+ </File>
+ </Filter>
+ </Filter>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/src/mongo/bson/bsondemo/bsondemo.vcxproj b/src/mongo/bson/bsondemo/bsondemo.vcxproj
new file mode 100644
index 00000000000..2ad53894d7d
--- /dev/null
+++ b/src/mongo/bson/bsondemo/bsondemo.vcxproj
@@ -0,0 +1,193 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug|x64">
+ <Configuration>Debug</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|x64">
+ <Configuration>Release</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{C9DB5EB7-81AA-4185-BAA1-DA035654402F}</ProjectGuid>
+ <RootNamespace>bsondemo</RootNamespace>
+ <Keyword>Win32Proj</Keyword>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+ <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <MinimalRebuild>No</MinimalRebuild>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>EditAndContinue</DebugInformationFormat>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <TargetMachine>MachineX86</TargetMachine>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <TargetMachine>MachineX86</TargetMachine>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="bsondemo.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\bson.h" />
+ <ClInclude Include="..\bson_db.h" />
+ <ClInclude Include="..\bsonelement.h" />
+ <ClInclude Include="..\bsoninlines.h" />
+ <ClInclude Include="..\bsonmisc.h" />
+ <ClInclude Include="..\bsonobj.h" />
+ <ClInclude Include="..\bsonobjbuilder.h" />
+ <ClInclude Include="..\bsonobjiterator.h" />
+ <ClInclude Include="..\bsontypes.h" />
+ <ClInclude Include="..\oid.h" />
+ <ClInclude Include="..\ordering.h" />
+ <ClInclude Include="..\util\builder.h" />
+ <ClInclude Include="..\util\misc.h" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project>
diff --git a/src/mongo/bson/bsondemo/bsondemo.vcxproj.filters b/src/mongo/bson/bsondemo/bsondemo.vcxproj.filters
new file mode 100644
index 00000000000..35f14d5193b
--- /dev/null
+++ b/src/mongo/bson/bsondemo/bsondemo.vcxproj.filters
@@ -0,0 +1,52 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <ClCompile Include="bsondemo.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\ordering.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bsonelement.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bsoninlines.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bsonmisc.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bsonobj.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bsonobjbuilder.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bsonobjiterator.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bsontypes.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\builder.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\misc.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\oid.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson_db.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ </ItemGroup>
+ <ItemGroup>
+ <Filter Include="bson">
+ <UniqueIdentifier>{ea599740-3c6f-40dd-a121-e825d82ae4aa}</UniqueIdentifier>
+ </Filter>
+ </ItemGroup>
+</Project>
diff --git a/src/mongo/bson/bsonelement.h b/src/mongo/bson/bsonelement.h
new file mode 100644
index 00000000000..57cc2ae5775
--- /dev/null
+++ b/src/mongo/bson/bsonelement.h
@@ -0,0 +1,583 @@
+// BSONElement
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <vector>
+#include <string.h>
+#include "util/builder.h"
+#include "bsontypes.h"
+
+namespace mongo {
+ class OpTime;
+ class BSONObj;
+ class BSONElement;
+ class BSONObjBuilder;
+}
+
+namespace bson {
+ typedef mongo::BSONElement be;
+ typedef mongo::BSONObj bo;
+ typedef mongo::BSONObjBuilder bob;
+}
+
+namespace mongo {
+
+ /* l and r MUST have same type when called: check that first. */
+ int compareElementValues(const BSONElement& l, const BSONElement& r);
+
+
+ /** BSONElement represents an "element" in a BSONObj. So for the object { a : 3, b : "abc" },
+ 'a : 3' is the first element (key+value).
+
+ The BSONElement object points into the BSONObj's data. Thus the BSONObj must stay in scope
+ for the life of the BSONElement.
+
+ internals:
+ <type><fieldName ><value>
+ -------- size() ------------
+ -fieldNameSize-
+ value()
+ type()
+ */
+ class BSONElement {
+ public:
+ /** These functions, which start with a capital letter, throw a UserException if the
+ element is not of the required type. Example:
+
+ string foo = obj["foo"].String(); // exception if not a string type or DNE
+ */
+ string String() const { return chk(mongo::String).valuestr(); }
+ Date_t Date() const { return chk(mongo::Date).date(); }
+ double Number() const { return chk(isNumber()).number(); }
+ double Double() const { return chk(NumberDouble)._numberDouble(); }
+ long long Long() const { return chk(NumberLong)._numberLong(); }
+ int Int() const { return chk(NumberInt)._numberInt(); }
+ bool Bool() const { return chk(mongo::Bool).boolean(); }
+ vector<BSONElement> Array() const; // see implementation for detailed comments
+ mongo::OID OID() const { return chk(jstOID).__oid(); }
+ void Null() const { chk(isNull()); } // throw UserException if not null
+ void OK() const { chk(ok()); } // throw UserException if element DNE
+
+ /** @return the embedded object associated with this field.
+ Note the returned object is a reference to within the parent bson object. If that
+ object is out of scope, this pointer will no longer be valid. Call getOwned() on the
+ returned BSONObj if you need your own copy.
+ throws UserException if the element is not of type object.
+ */
+ BSONObj Obj() const;
+
+ /** populate v with the value of the element. If type does not match, throw exception.
+ useful in templates -- see also BSONObj::Vals().
+ */
+ void Val(Date_t& v) const { v = Date(); }
+ void Val(long long& v) const { v = Long(); }
+ void Val(bool& v) const { v = Bool(); }
+ void Val(BSONObj& v) const;
+ void Val(mongo::OID& v) const { v = OID(); }
+ void Val(int& v) const { v = Int(); }
+ void Val(double& v) const { v = Double(); }
+ void Val(string& v) const { v = String(); }
+
+ /** Use ok() to check if a value is assigned:
+ if( myObj["foo"].ok() ) ...
+ */
+ bool ok() const { return !eoo(); }
+
+ string toString( bool includeFieldName = true, bool full=false) const;
+ void toString(StringBuilder& s, bool includeFieldName = true, bool full=false) const;
+ string jsonString( JsonStringFormat format, bool includeFieldNames = true, int pretty = 0 ) const;
+ operator string() const { return toString(); }
+
+ /** Returns the type of the element */
+ BSONType type() const { return (BSONType) *data; }
+
+ /** retrieve a field within this element
+ throws exception if *this is not an embedded object
+ */
+ BSONElement operator[] (const string& field) const;
+
+ /** returns the tyoe of the element fixed for the main type
+ the main purpose is numbers. any numeric type will return NumberDouble
+ Note: if the order changes, indexes have to be re-built or than can be corruption
+ */
+ int canonicalType() const;
+
+ /** Indicates if it is the end-of-object element, which is present at the end of
+ every BSON object.
+ */
+ bool eoo() const { return type() == EOO; }
+
+ /** Size of the element.
+ @param maxLen If maxLen is specified, don't scan more than maxLen bytes to calculate size.
+ */
+ int size( int maxLen ) const;
+ int size() const;
+
+ /** Wrap this element up as a singleton object. */
+ BSONObj wrap() const;
+
+ /** Wrap this element up as a singleton object with a new name. */
+ BSONObj wrap( const char* newName) const;
+
+ /** field name of the element. e.g., for
+ name : "Joe"
+ "name" is the fieldname
+ */
+ const char * fieldName() const {
+ if ( eoo() ) return ""; // no fieldname for it.
+ return data + 1;
+ }
+
+ /** raw data of the element's value (so be careful). */
+ const char * value() const {
+ return (data + fieldNameSize() + 1);
+ }
+ /** size in bytes of the element's value (when applicable). */
+ int valuesize() const {
+ return size() - fieldNameSize() - 1;
+ }
+
+ bool isBoolean() const { return type() == mongo::Bool; }
+
+ /** @return value of a boolean element.
+ You must assure element is a boolean before
+ calling. */
+ bool boolean() const {
+ return *value() ? true : false;
+ }
+
+ bool booleanSafe() const { return isBoolean() && boolean(); }
+
+ /** Retrieve a java style date value from the element.
+ Ensure element is of type Date before calling.
+ @see Bool(), trueValue()
+ */
+ Date_t date() const {
+ return *reinterpret_cast< const Date_t* >( value() );
+ }
+
+ /** Convert the value to boolean, regardless of its type, in a javascript-like fashion
+ (i.e., treats zero and null and eoo as false).
+ */
+ bool trueValue() const;
+
+ /** True if number, string, bool, date, OID */
+ bool isSimpleType() const;
+
+ /** True if element is of a numeric type. */
+ bool isNumber() const;
+
+ /** Return double value for this field. MUST be NumberDouble type. */
+ double _numberDouble() const {return *reinterpret_cast< const double* >( value() ); }
+ /** Return int value for this field. MUST be NumberInt type. */
+ int _numberInt() const {return *reinterpret_cast< const int* >( value() ); }
+ /** Return long long value for this field. MUST be NumberLong type. */
+ long long _numberLong() const {return *reinterpret_cast< const long long* >( value() ); }
+
+ /** Retrieve int value for the element safely. Zero returned if not a number. */
+ int numberInt() const;
+ /** Retrieve long value for the element safely. Zero returned if not a number. */
+ long long numberLong() const;
+ /** Retrieve the numeric value of the element. If not of a numeric type, returns 0.
+ Note: casts to double, data loss may occur with large (>52 bit) NumberLong values.
+ */
+ double numberDouble() const;
+ /** Retrieve the numeric value of the element. If not of a numeric type, returns 0.
+ Note: casts to double, data loss may occur with large (>52 bit) NumberLong values.
+ */
+ double number() const { return numberDouble(); }
+
+ /** Retrieve the object ID stored in the object.
+ You must ensure the element is of type jstOID first. */
+ const mongo::OID &__oid() const { return *reinterpret_cast< const mongo::OID* >( value() ); }
+
+ /** True if element is null. */
+ bool isNull() const {
+ return type() == jstNULL;
+ }
+
+ /** Size (length) of a string element.
+ You must assure of type String first.
+ @return string size including terminating null
+ */
+ int valuestrsize() const {
+ return *reinterpret_cast< const int* >( value() );
+ }
+
+ // for objects the size *includes* the size of the size field
+ int objsize() const {
+ return *reinterpret_cast< const int* >( value() );
+ }
+
+ /** Get a string's value. Also gives you start of the real data for an embedded object.
+ You must assure data is of an appropriate type first -- see also valuestrsafe().
+ */
+ const char * valuestr() const {
+ return value() + 4;
+ }
+
+ /** Get the string value of the element. If not a string returns "". */
+ const char *valuestrsafe() const {
+ return type() == mongo::String ? valuestr() : "";
+ }
+ /** Get the string value of the element. If not a string returns "". */
+ string str() const {
+ return type() == mongo::String ? string(valuestr(), valuestrsize()-1) : string();
+ }
+
+ /** Get javascript code of a CodeWScope data element. */
+ const char * codeWScopeCode() const {
+ return value() + 8;
+ }
+ /** Get the scope SavedContext of a CodeWScope data element. */
+ const char * codeWScopeScopeData() const {
+ // TODO fix
+ return codeWScopeCode() + strlen( codeWScopeCode() ) + 1;
+ }
+
+ /** Get the embedded object this element holds. */
+ BSONObj embeddedObject() const;
+
+ /* uasserts if not an object */
+ BSONObj embeddedObjectUserCheck() const;
+
+ BSONObj codeWScopeObject() const;
+
+ /** Get raw binary data. Element must be of type BinData. Doesn't handle type 2 specially */
+ const char *binData(int& len) const {
+ // BinData: <int len> <byte subtype> <byte[len] data>
+ assert( type() == BinData );
+ len = valuestrsize();
+ return value() + 5;
+ }
+ /** Get binary data. Element must be of type BinData. Handles type 2 */
+ const char *binDataClean(int& len) const {
+ // BinData: <int len> <byte subtype> <byte[len] data>
+ if (binDataType() != ByteArrayDeprecated) {
+ return binData(len);
+ }
+ else {
+ // Skip extra size
+ len = valuestrsize() - 4;
+ return value() + 5 + 4;
+ }
+ }
+
+ BinDataType binDataType() const {
+ // BinData: <int len> <byte subtype> <byte[len] data>
+ assert( type() == BinData );
+ unsigned char c = (value() + 4)[0];
+ return (BinDataType)c;
+ }
+
+ /** Retrieve the regex string for a Regex element */
+ const char *regex() const {
+ assert(type() == RegEx);
+ return value();
+ }
+
+ /** Retrieve the regex flags (options) for a Regex element */
+ const char *regexFlags() const {
+ const char *p = regex();
+ return p + strlen(p) + 1;
+ }
+
+ /** like operator== but doesn't check the fieldname,
+ just the value.
+ */
+ bool valuesEqual(const BSONElement& r) const {
+ return woCompare( r , false ) == 0;
+ }
+
+ /** Returns true if elements are equal. */
+ bool operator==(const BSONElement& r) const {
+ return woCompare( r , true ) == 0;
+ }
+ /** Returns true if elements are unequal. */
+ bool operator!=(const BSONElement& r) const { return !operator==(r); }
+
+ /** Well ordered comparison.
+ @return <0: l<r. 0:l==r. >0:l>r
+ order by type, field name, and field value.
+ If considerFieldName is true, pay attention to the field name.
+ */
+ int woCompare( const BSONElement &e, bool considerFieldName = true ) const;
+
+ const char * rawdata() const { return data; }
+
+ /** 0 == Equality, just not defined yet */
+ int getGtLtOp( int def = 0 ) const;
+
+ /** Constructs an empty element */
+ BSONElement();
+
+ /** Check that data is internally consistent. */
+ void validate() const;
+
+ /** True if this element may contain subobjects. */
+ bool mayEncapsulate() const {
+ switch ( type() ) {
+ case Object:
+ case mongo::Array:
+ case CodeWScope:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ /** True if this element can be a BSONObj */
+ bool isABSONObj() const {
+ switch( type() ) {
+ case Object:
+ case mongo::Array:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ Date_t timestampTime() const {
+ unsigned long long t = ((unsigned int*)(value() + 4 ))[0];
+ return t * 1000;
+ }
+ unsigned int timestampInc() const {
+ return ((unsigned int*)(value() ))[0];
+ }
+
+ const char * dbrefNS() const {
+ uassert( 10063 , "not a dbref" , type() == DBRef );
+ return value() + 4;
+ }
+
+ const mongo::OID& dbrefOID() const {
+ uassert( 10064 , "not a dbref" , type() == DBRef );
+ const char * start = value();
+ start += 4 + *reinterpret_cast< const int* >( start );
+ return *reinterpret_cast< const mongo::OID* >( start );
+ }
+
+ /** this does not use fieldName in the comparison, just the value */
+ bool operator<( const BSONElement& other ) const {
+ int x = (int)canonicalType() - (int)other.canonicalType();
+ if ( x < 0 ) return true;
+ else if ( x > 0 ) return false;
+ return compareElementValues(*this,other) < 0;
+ }
+
+ // @param maxLen don't scan more than maxLen bytes
+ explicit BSONElement(const char *d, int maxLen) : data(d) {
+ if ( eoo() ) {
+ totalSize = 1;
+ fieldNameSize_ = 0;
+ }
+ else {
+ totalSize = -1;
+ fieldNameSize_ = -1;
+ if ( maxLen != -1 ) {
+ int size = (int) strnlen( fieldName(), maxLen - 1 );
+ uassert( 10333 , "Invalid field name", size != -1 );
+ fieldNameSize_ = size + 1;
+ }
+ }
+ }
+
+ explicit BSONElement(const char *d) : data(d) {
+ fieldNameSize_ = -1;
+ totalSize = -1;
+ if ( eoo() ) {
+ fieldNameSize_ = 0;
+ totalSize = 1;
+ }
+ }
+
+ string _asCode() const;
+ OpTime _opTime() const;
+
+ private:
+ const char *data;
+ mutable int fieldNameSize_; // cached value
+ int fieldNameSize() const {
+ if ( fieldNameSize_ == -1 )
+ fieldNameSize_ = (int)strlen( fieldName() ) + 1;
+ return fieldNameSize_;
+ }
+ mutable int totalSize; /* caches the computed size */
+
+ friend class BSONObjIterator;
+ friend class BSONObj;
+ const BSONElement& chk(int t) const {
+ if ( t != type() ) {
+ StringBuilder ss;
+ if( eoo() )
+ ss << "field not found, expected type " << t;
+ else
+ ss << "wrong type for field (" << fieldName() << ") " << type() << " != " << t;
+ uasserted(13111, ss.str() );
+ }
+ return *this;
+ }
+ const BSONElement& chk(bool expr) const {
+ uassert(13118, "unexpected or missing type value in BSON object", expr);
+ return *this;
+ }
+ };
+
+
+ inline int BSONElement::canonicalType() const {
+ BSONType t = type();
+ switch ( t ) {
+ case MinKey:
+ case MaxKey:
+ return t;
+ case EOO:
+ case Undefined:
+ return 0;
+ case jstNULL:
+ return 5;
+ case NumberDouble:
+ case NumberInt:
+ case NumberLong:
+ return 10;
+ case mongo::String:
+ case Symbol:
+ return 15;
+ case Object:
+ return 20;
+ case mongo::Array:
+ return 25;
+ case BinData:
+ return 30;
+ case jstOID:
+ return 35;
+ case mongo::Bool:
+ return 40;
+ case mongo::Date:
+ case Timestamp:
+ return 45;
+ case RegEx:
+ return 50;
+ case DBRef:
+ return 55;
+ case Code:
+ return 60;
+ case CodeWScope:
+ return 65;
+ default:
+ assert(0);
+ return -1;
+ }
+ }
+
+ inline bool BSONElement::trueValue() const {
+ switch( type() ) {
+ case NumberLong:
+ return *reinterpret_cast< const long long* >( value() ) != 0;
+ case NumberDouble:
+ return *reinterpret_cast< const double* >( value() ) != 0;
+ case NumberInt:
+ return *reinterpret_cast< const int* >( value() ) != 0;
+ case mongo::Bool:
+ return boolean();
+ case EOO:
+ case jstNULL:
+ case Undefined:
+ return false;
+
+ default:
+ ;
+ }
+ return true;
+ }
+
+ /** @return true if element is of a numeric type. */
+ inline bool BSONElement::isNumber() const {
+ switch( type() ) {
+ case NumberLong:
+ case NumberDouble:
+ case NumberInt:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ inline bool BSONElement::isSimpleType() const {
+ switch( type() ) {
+ case NumberLong:
+ case NumberDouble:
+ case NumberInt:
+ case mongo::String:
+ case mongo::Bool:
+ case mongo::Date:
+ case jstOID:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ inline double BSONElement::numberDouble() const {
+ switch( type() ) {
+ case NumberDouble:
+ return _numberDouble();
+ case NumberInt:
+ return *reinterpret_cast< const int* >( value() );
+ case NumberLong:
+ return (double) *reinterpret_cast< const long long* >( value() );
+ default:
+ return 0;
+ }
+ }
+
+ /** Retrieve int value for the element safely. Zero returned if not a number. Converted to int if another numeric type. */
+ inline int BSONElement::numberInt() const {
+ switch( type() ) {
+ case NumberDouble:
+ return (int) _numberDouble();
+ case NumberInt:
+ return _numberInt();
+ case NumberLong:
+ return (int) _numberLong();
+ default:
+ return 0;
+ }
+ }
+
+ /** Retrieve long value for the element safely. Zero returned if not a number. */
+ inline long long BSONElement::numberLong() const {
+ switch( type() ) {
+ case NumberDouble:
+ return (long long) _numberDouble();
+ case NumberInt:
+ return _numberInt();
+ case NumberLong:
+ return _numberLong();
+ default:
+ return 0;
+ }
+ }
+
+ inline BSONElement::BSONElement() {
+ static char z = 0;
+ data = &z;
+ fieldNameSize_ = 0;
+ totalSize = 1;
+ }
+
+}
diff --git a/src/mongo/bson/bsonmisc.h b/src/mongo/bson/bsonmisc.h
new file mode 100644
index 00000000000..8a379396d17
--- /dev/null
+++ b/src/mongo/bson/bsonmisc.h
@@ -0,0 +1,211 @@
+// @file bsonmisc.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongo {
+
+ int getGtLtOp(const BSONElement& e);
+
+ struct BSONElementCmpWithoutField {
+ bool operator()( const BSONElement &l, const BSONElement &r ) const {
+ return l.woCompare( r, false ) < 0;
+ }
+ };
+
+ class BSONObjCmp {
+ public:
+ BSONObjCmp( const BSONObj &order = BSONObj() ) : _order( order ) {}
+ bool operator()( const BSONObj &l, const BSONObj &r ) const {
+ return l.woCompare( r, _order ) < 0;
+ }
+ BSONObj order() const { return _order; }
+ private:
+ BSONObj _order;
+ };
+
+ typedef set<BSONObj,BSONObjCmp> BSONObjSet;
+
+ enum FieldCompareResult {
+ LEFT_SUBFIELD = -2,
+ LEFT_BEFORE = -1,
+ SAME = 0,
+ RIGHT_BEFORE = 1 ,
+ RIGHT_SUBFIELD = 2
+ };
+
+ FieldCompareResult compareDottedFieldNames( const string& l , const string& r );
+
+ /** Use BSON macro to build a BSONObj from a stream
+
+ e.g.,
+ BSON( "name" << "joe" << "age" << 33 )
+
+ with auto-generated object id:
+ BSON( GENOID << "name" << "joe" << "age" << 33 )
+
+ The labels GT, GTE, LT, LTE, NE can be helpful for stream-oriented construction
+ of a BSONObj, particularly when assembling a Query. For example,
+ BSON( "a" << GT << 23.4 << NE << 30 << "b" << 2 ) produces the object
+ { a: { \$gt: 23.4, \$ne: 30 }, b: 2 }.
+ */
+#define BSON(x) (( mongo::BSONObjBuilder(64) << x ).obj())
+
+ /** Use BSON_ARRAY macro like BSON macro, but without keys
+
+ BSONArray arr = BSON_ARRAY( "hello" << 1 << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
+
+ */
+#define BSON_ARRAY(x) (( mongo::BSONArrayBuilder() << x ).arr())
+
+ /* Utility class to auto assign object IDs.
+ Example:
+ cout << BSON( GENOID << "z" << 3 ); // { _id : ..., z : 3 }
+ */
+ extern struct GENOIDLabeler { } GENOID;
+
+ /* Utility class to add a Date element with the current time
+ Example:
+ cout << BSON( "created" << DATENOW ); // { created : "2009-10-09 11:41:42" }
+ */
+ extern struct DateNowLabeler { } DATENOW;
+
+ /* Utility class to assign a NULL value to a given attribute
+ Example:
+ cout << BSON( "a" << BSONNULL ); // { a : null }
+ */
+ extern struct NullLabeler { } BSONNULL;
+
+ /* Utility class to add the minKey (minus infinity) to a given attribute
+ Example:
+ cout << BSON( "a" << MINKEY ); // { "a" : { "$minKey" : 1 } }
+ */
+ extern struct MinKeyLabeler { } MINKEY;
+ extern struct MaxKeyLabeler { } MAXKEY;
+
+ // Utility class to implement GT, GTE, etc as described above.
+ class Labeler {
+ public:
+ struct Label {
+ Label( const char *l ) : l_( l ) {}
+ const char *l_;
+ };
+ Labeler( const Label &l, BSONObjBuilderValueStream *s ) : l_( l ), s_( s ) {}
+ template<class T>
+ BSONObjBuilder& operator<<( T value );
+
+ /* the value of the element e is appended i.e. for
+ "age" << GT << someElement
+ one gets
+ { age : { $gt : someElement's value } }
+ */
+ BSONObjBuilder& operator<<( const BSONElement& e );
+ private:
+ const Label &l_;
+ BSONObjBuilderValueStream *s_;
+ };
+
+ extern Labeler::Label GT;
+ extern Labeler::Label GTE;
+ extern Labeler::Label LT;
+ extern Labeler::Label LTE;
+ extern Labeler::Label NE;
+ extern Labeler::Label SIZE;
+
+
+ // $or helper: OR(BSON("x" << GT << 7), BSON("y" << LT << 6));
+ // becomes : {$or: [{x: {$gt: 7}}, {y: {$lt: 6}}]}
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b);
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c);
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d);
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e);
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e, const BSONObj& f);
+ // definitions in bsonobjbuilder.h b/c of incomplete types
+
+ // Utility class to implement BSON( key << val ) as described above.
+ class BSONObjBuilderValueStream : public boost::noncopyable {
+ public:
+ friend class Labeler;
+ BSONObjBuilderValueStream( BSONObjBuilder * builder );
+
+ BSONObjBuilder& operator<<( const BSONElement& e );
+
+ template<class T>
+ BSONObjBuilder& operator<<( T value );
+
+ BSONObjBuilder& operator<<(DateNowLabeler& id);
+
+ BSONObjBuilder& operator<<(NullLabeler& id);
+
+ BSONObjBuilder& operator<<(MinKeyLabeler& id);
+ BSONObjBuilder& operator<<(MaxKeyLabeler& id);
+
+ Labeler operator<<( const Labeler::Label &l );
+
+ void endField( const char *nextFieldName = 0 );
+ bool subobjStarted() const { return _fieldName != 0; }
+
+ private:
+ const char * _fieldName;
+ BSONObjBuilder * _builder;
+
+ bool haveSubobj() const { return _subobj.get() != 0; }
+ BSONObjBuilder *subobj();
+ auto_ptr< BSONObjBuilder > _subobj;
+ };
+
+ /**
+ used in conjuction with BSONObjBuilder, allows for proper buffer size to prevent crazy memory usage
+ */
+ class BSONSizeTracker {
+ public:
+ BSONSizeTracker() {
+ _pos = 0;
+ for ( int i=0; i<SIZE; i++ )
+ _sizes[i] = 512; // this is the default, so just be consistent
+ }
+
+ ~BSONSizeTracker() {
+ }
+
+ void got( int size ) {
+ _sizes[_pos++] = size;
+ if ( _pos >= SIZE )
+ _pos = 0;
+ }
+
+ /**
+ * right now choosing largest size
+ */
+ int getSize() const {
+ int x = 16; // sane min
+ for ( int i=0; i<SIZE; i++ ) {
+ if ( _sizes[i] > x )
+ x = _sizes[i];
+ }
+ return x;
+ }
+
+ private:
+ enum { SIZE = 10 };
+ int _pos;
+ int _sizes[SIZE];
+ };
+
+ // considers order
+ bool fieldsMatch(const BSONObj& lhs, const BSONObj& rhs);
+}
diff --git a/src/mongo/bson/bsonobj.h b/src/mongo/bson/bsonobj.h
new file mode 100644
index 00000000000..e8ce462403b
--- /dev/null
+++ b/src/mongo/bson/bsonobj.h
@@ -0,0 +1,497 @@
+// @file bsonobj.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <boost/intrusive_ptr.hpp>
+#include <set>
+#include <list>
+#include <vector>
+#include "util/atomic_int.h"
+#include "util/builder.h"
+#include "stringdata.h"
+
+namespace mongo {
+
+ typedef set< BSONElement, BSONElementCmpWithoutField > BSONElementSet;
+ typedef multiset< BSONElement, BSONElementCmpWithoutField > BSONElementMSet;
+
+ /**
+ C++ representation of a "BSON" object -- that is, an extended JSON-style
+ object in a binary representation.
+
+ See bsonspec.org.
+
+ Note that BSONObj's have a smart pointer capability built in -- so you can
+ pass them around by value. The reference counts used to implement this
+ do not use locking, so copying and destroying BSONObj's are not thread-safe
+ operations.
+
+ BSON object format:
+
+ code
+ <unsigned totalSize> {<byte BSONType><cstring FieldName><Data>}* EOO
+
+ totalSize includes itself.
+
+ Data:
+ Bool: <byte>
+ EOO: nothing follows
+ Undefined: nothing follows
+ OID: an OID object
+ NumberDouble: <double>
+ NumberInt: <int32>
+ String: <unsigned32 strsizewithnull><cstring>
+ Date: <8bytes>
+ Regex: <cstring regex><cstring options>
+ Object: a nested object, leading with its entire size, which terminates with EOO.
+ Array: same as object
+ DBRef: <strlen> <cstring ns> <oid>
+ DBRef: a database reference: basically a collection name plus an Object ID
+ BinData: <int len> <byte subtype> <byte[len] data>
+ Code: a function (not a closure): same format as String.
+ Symbol: a language symbol (say a python symbol). same format as String.
+ Code With Scope: <total size><String><Object>
+ \endcode
+ */
+ class BSONObj {
+ public:
+
+ /** Construct a BSONObj from data in the proper format.
+ * Use this constructor when something else owns msgdata's buffer
+ */
+ explicit BSONObj(const char *msgdata) {
+ init(msgdata);
+ }
+
+ /** Construct a BSONObj from data in the proper format.
+ * Use this constructor when you want BSONObj to free(holder) when it is no longer needed
+ * BSONObj::Holder has an extra 4 bytes for a ref-count before the start of the object
+ */
+ class Holder;
+ explicit BSONObj(Holder* holder) {
+ init(holder);
+ }
+
+ explicit BSONObj(const Record *r);
+
+ /** Construct an empty BSONObj -- that is, {}. */
+ BSONObj();
+
+ ~BSONObj() {
+ _objdata = 0; // defensive
+ }
+
+ /**
+ A BSONObj can use a buffer it "owns" or one it does not.
+
+ OWNED CASE
+ If the BSONObj owns the buffer, the buffer can be shared among several BSONObj's (by assignment).
+ In this case the buffer is basically implemented as a shared_ptr.
+ Since BSONObj's are typically immutable, this works well.
+
+ UNOWNED CASE
+ A BSONObj can also point to BSON data in some other data structure it does not "own" or free later.
+ For example, in a memory mapped file. In this case, it is important the original data stays in
+ scope for as long as the BSONObj is in use. If you think the original data may go out of scope,
+ call BSONObj::getOwned() to promote your BSONObj to having its own copy.
+
+ On a BSONObj assignment, if the source is unowned, both the source and dest will have unowned
+ pointers to the original buffer after the assignment.
+
+ If you are not sure about ownership but need the buffer to last as long as the BSONObj, call
+ getOwned(). getOwned() is a no-op if the buffer is already owned. If not already owned, a malloc
+ and memcpy will result.
+
+ Most ways to create BSONObj's create 'owned' variants. Unowned versions can be created with:
+ (1) specifying true for the ifree parameter in the constructor
+ (2) calling BSONObjBuilder::done(). Use BSONObjBuilder::obj() to get an owned copy
+ (3) retrieving a subobject retrieves an unowned pointer into the parent BSON object
+
+ @return true if this is in owned mode
+ */
+ bool isOwned() const { return _holder.get() != 0; }
+
+ /** assure the data buffer is under the control of this BSONObj and not a remote buffer
+ @see isOwned()
+ */
+ BSONObj getOwned() const;
+
+ /** @return a new full (and owned) copy of the object. */
+ BSONObj copy() const;
+
+ /** Readable representation of a BSON object in an extended JSON-style notation.
+ This is an abbreviated representation which might be used for logging.
+ */
+ string toString( bool isArray = false, bool full=false ) const;
+ void toString(StringBuilder& s, bool isArray = false, bool full=false ) const;
+
+ /** Properly formatted JSON string.
+ @param pretty if true we try to add some lf's and indentation
+ */
+ string jsonString( JsonStringFormat format = Strict, int pretty = 0 ) const;
+
+ /** note: addFields always adds _id even if not specified */
+ int addFields(BSONObj& from, set<string>& fields); /* returns n added */
+
+ /** remove specified field and return a new object with the remaining fields.
+ slowish as builds a full new object
+ */
+ BSONObj removeField(const StringData& name) const;
+
+ /** returns # of top level fields in the object
+ note: iterates to count the fields
+ */
+ int nFields() const;
+
+ /** adds the field names to the fields set. does NOT clear it (appends). */
+ int getFieldNames(set<string>& fields) const;
+
+ /** @return the specified element. element.eoo() will be true if not found.
+ @param name field to find. supports dot (".") notation to reach into embedded objects.
+ for example "x.y" means "in the nested object in field x, retrieve field y"
+ */
+ BSONElement getFieldDotted(const char *name) const;
+ /** @return the specified element. element.eoo() will be true if not found.
+ @param name field to find. supports dot (".") notation to reach into embedded objects.
+ for example "x.y" means "in the nested object in field x, retrieve field y"
+ */
+ BSONElement getFieldDotted(const string& name) const {
+ return getFieldDotted( name.c_str() );
+ }
+
+ /** Like getFieldDotted(), but expands arrays and returns all matching objects.
+ * Turning off expandLastArray allows you to retrieve nested array objects instead of
+ * their contents.
+ */
+ void getFieldsDotted(const StringData& name, BSONElementSet &ret, bool expandLastArray = true ) const;
+ void getFieldsDotted(const StringData& name, BSONElementMSet &ret, bool expandLastArray = true ) const;
+
+ /** Like getFieldDotted(), but returns first array encountered while traversing the
+ dotted fields of name. The name variable is updated to represent field
+ names with respect to the returned element. */
+ BSONElement getFieldDottedOrArray(const char *&name) const;
+
+ /** Get the field of the specified name. eoo() is true on the returned
+ element if not found.
+ */
+ BSONElement getField(const StringData& name) const;
+
+ /** Get several fields at once. This is faster than separate getField() calls as the size of
+ elements iterated can then be calculated only once each.
+ @param n number of fieldNames, and number of elements in the fields array
+ @param fields if a field is found its element is stored in its corresponding position in this array.
+ if not found the array element is unchanged.
+ */
+ void getFields(unsigned n, const char **fieldNames, BSONElement *fields) const;
+
+ /** Get the field of the specified name. eoo() is true on the returned
+ element if not found.
+ */
+ BSONElement operator[] (const char *field) const {
+ return getField(field);
+ }
+
+ BSONElement operator[] (const string& field) const {
+ return getField(field);
+ }
+
+ BSONElement operator[] (int field) const {
+ StringBuilder ss;
+ ss << field;
+ string s = ss.str();
+ return getField(s.c_str());
+ }
+
+ /** @return true if field exists */
+ bool hasField( const char * name ) const { return !getField(name).eoo(); }
+ /** @return true if field exists */
+ bool hasElement(const char *name) const { return hasField(name); }
+
+ /** @return "" if DNE or wrong type */
+ const char * getStringField(const char *name) const;
+
+ /** @return subobject of the given name */
+ BSONObj getObjectField(const char *name) const;
+
+ /** @return INT_MIN if not present - does some type conversions */
+ int getIntField(const char *name) const;
+
+ /** @return false if not present
+ @see BSONElement::trueValue()
+ */
+ bool getBoolField(const char *name) const;
+
+ /**
+ sets element field names to empty string
+ If a field in pattern is missing, it is omitted from the returned
+ object.
+ */
+ BSONObj extractFieldsUnDotted(BSONObj pattern) const;
+
+ /** extract items from object which match a pattern object.
+ e.g., if pattern is { x : 1, y : 1 }, builds an object with
+ x and y elements of this object, if they are present.
+ returns elements with original field names
+ */
+ BSONObj extractFields(const BSONObj &pattern , bool fillWithNull=false) const;
+
+ BSONObj filterFieldsUndotted(const BSONObj &filter, bool inFilter) const;
+
+ BSONElement getFieldUsingIndexNames(const char *fieldName, const BSONObj &indexKey) const;
+
+ /** arrays are bson objects with numeric and increasing field names
+ @return true if field names are numeric and increasing
+ */
+ bool couldBeArray() const;
+
+ /** @return the raw data of the object */
+ const char *objdata() const {
+ return _objdata;
+ }
+ /** @return total size of the BSON object in bytes */
+ int objsize() const { return *(reinterpret_cast<const int*>(objdata())); }
+
+ /** performs a cursory check on the object's size only. */
+ bool isValid() const;
+
+ /** @return if the user is a valid user doc
+ criter: isValid() no . or $ field names
+ */
+ bool okForStorage() const;
+
+ /** @return true if object is empty -- i.e., {} */
+ bool isEmpty() const { return objsize() <= 5; }
+
+ void dump() const;
+
+ /** Alternative output format */
+ string hexDump() const;
+
+ /**wo='well ordered'. fields must be in same order in each object.
+ Ordering is with respect to the signs of the elements
+ and allows ascending / descending key mixing.
+ @return <0 if l<r. 0 if l==r. >0 if l>r
+ */
+ int woCompare(const BSONObj& r, const Ordering &o,
+ bool considerFieldName=true) const;
+
+ /**wo='well ordered'. fields must be in same order in each object.
+ Ordering is with respect to the signs of the elements
+ and allows ascending / descending key mixing.
+ @return <0 if l<r. 0 if l==r. >0 if l>r
+ */
+ int woCompare(const BSONObj& r, const BSONObj &ordering = BSONObj(),
+ bool considerFieldName=true) const;
+
+ bool operator<( const BSONObj& other ) const { return woCompare( other ) < 0; }
+ bool operator<=( const BSONObj& other ) const { return woCompare( other ) <= 0; }
+ bool operator>( const BSONObj& other ) const { return woCompare( other ) > 0; }
+ bool operator>=( const BSONObj& other ) const { return woCompare( other ) >= 0; }
+
+ /**
+ * @param useDotted whether to treat sort key fields as possibly dotted and expand into them
+ */
+ int woSortOrder( const BSONObj& r , const BSONObj& sortKey , bool useDotted=false ) const;
+
+ bool equal(const BSONObj& r) const;
+
+ /** This is "shallow equality" -- ints and doubles won't match. for a
+ deep equality test use woCompare (which is slower).
+ */
+ bool binaryEqual(const BSONObj& r) const {
+ int os = objsize();
+ if ( os == r.objsize() ) {
+ return (os == 0 || memcmp(objdata(),r.objdata(),os)==0);
+ }
+ return false;
+ }
+
+ /** @return first field of the object */
+ BSONElement firstElement() const { return BSONElement(objdata() + 4); }
+
+ /** faster than firstElement().fieldName() - for the first element we can easily find the fieldname without
+ computing the element size.
+ */
+ const char * firstElementFieldName() const {
+ const char *p = objdata() + 4;
+ return *p == EOO ? "" : p+1;
+ }
+
+ BSONType firstElementType() const {
+ const char *p = objdata() + 4;
+ return (BSONType) *p;
+ }
+
+ /** Get the _id field from the object. For good performance drivers should
+ assure that _id is the first element of the object; however, correct operation
+ is assured regardless.
+ @return true if found
+ */
+ bool getObjectID(BSONElement& e) const;
+
+ /** @return A hash code for the object */
+ int hash() const {
+ unsigned x = 0;
+ const char *p = objdata();
+ for ( int i = 0; i < objsize(); i++ )
+ x = x * 131 + p[i];
+ return (x & 0x7fffffff) | 0x8000000; // must be > 0
+ }
+
+ // Return a version of this object where top level elements of types
+ // that are not part of the bson wire protocol are replaced with
+ // string identifier equivalents.
+ // TODO Support conversion of element types other than min and max.
+ BSONObj clientReadable() const;
+
+ /** Return new object with the field names replaced by those in the
+ passed object. */
+ BSONObj replaceFieldNames( const BSONObj &obj ) const;
+
+ /** true unless corrupt */
+ bool valid() const;
+
+ /** @return an md5 value for this object. */
+ string md5() const;
+
+ bool operator==( const BSONObj& other ) const { return equal( other ); }
+ bool operator!=(const BSONObj& other) const { return !operator==( other); }
+
+ enum MatchType {
+ Equality = 0,
+ LT = 0x1,
+ LTE = 0x3,
+ GTE = 0x6,
+ GT = 0x4,
+ opIN = 0x8, // { x : { $in : [1,2,3] } }
+ NE = 0x9,
+ opSIZE = 0x0A,
+ opALL = 0x0B,
+ NIN = 0x0C,
+ opEXISTS = 0x0D,
+ opMOD = 0x0E,
+ opTYPE = 0x0F,
+ opREGEX = 0x10,
+ opOPTIONS = 0x11,
+ opELEM_MATCH = 0x12,
+ opNEAR = 0x13,
+ opWITHIN = 0x14,
+ opMAX_DISTANCE=0x15
+ };
+
+ /** add all elements of the object to the specified vector */
+ void elems(vector<BSONElement> &) const;
+ /** add all elements of the object to the specified list */
+ void elems(list<BSONElement> &) const;
+
+ /** add all values of the object to the specified vector. If type mismatches, exception.
+ this is most useful when the BSONObj is an array, but can be used with non-arrays too in theory.
+
+ example:
+ bo sub = y["subobj"].Obj();
+ vector<int> myints;
+ sub.Vals(myints);
+ */
+ template <class T>
+ void Vals(vector<T> &) const;
+ /** add all values of the object to the specified list. If type mismatches, exception. */
+ template <class T>
+ void Vals(list<T> &) const;
+
+ /** add all values of the object to the specified vector. If type mismatches, skip. */
+ template <class T>
+ void vals(vector<T> &) const;
+ /** add all values of the object to the specified list. If type mismatches, skip. */
+ template <class T>
+ void vals(list<T> &) const;
+
+ friend class BSONObjIterator;
+ typedef BSONObjIterator iterator;
+
+ /** use something like this:
+ for( BSONObj::iterator i = myObj.begin(); i.more(); ) {
+ BSONElement e = i.next();
+ ...
+ }
+ */
+ BSONObjIterator begin() const;
+
+ void appendSelfToBufBuilder(BufBuilder& b) const {
+ assert( objsize() );
+ b.appendBuf(reinterpret_cast<const void *>( objdata() ), objsize());
+ }
+
+#pragma pack(1)
+ class Holder : boost::noncopyable {
+ private:
+ Holder(); // this class should never be explicitly created
+ AtomicUInt refCount;
+ public:
+ char data[4]; // start of object
+
+ void zero() { refCount.zero(); }
+
+ // these are called automatically by boost::intrusive_ptr
+ friend void intrusive_ptr_add_ref(Holder* h) { h->refCount++; }
+ friend void intrusive_ptr_release(Holder* h) {
+#if defined(_DEBUG) // cant use dassert or DEV here
+ assert((int)h->refCount > 0); // make sure we haven't already freed the buffer
+#endif
+ if(--(h->refCount) == 0){
+#if defined(_DEBUG)
+ unsigned sz = (unsigned&) *h->data;
+ assert(sz < BSONObjMaxInternalSize * 3);
+ memset(h->data, 0xdd, sz);
+#endif
+ free(h);
+ }
+ }
+ };
+#pragma pack()
+
+ private:
+ const char *_objdata;
+ boost::intrusive_ptr< Holder > _holder;
+
+ void _assertInvalid() const;
+
+ void init(Holder *holder) {
+ _holder = holder; // holder is now managed by intrusive_ptr
+ init(holder->data);
+ }
+ void init(const char *data) {
+ _objdata = data;
+ if ( !isValid() )
+ _assertInvalid();
+ }
+ };
+
+ ostream& operator<<( ostream &s, const BSONObj &o );
+ ostream& operator<<( ostream &s, const BSONElement &e );
+
+ StringBuilder& operator<<( StringBuilder &s, const BSONObj &o );
+ StringBuilder& operator<<( StringBuilder &s, const BSONElement &e );
+
+
+ struct BSONArray : BSONObj {
+ // Don't add anything other than forwarding constructors!!!
+ BSONArray(): BSONObj() {}
+ explicit BSONArray(const BSONObj& obj): BSONObj(obj) {}
+ };
+
+}
diff --git a/src/mongo/bson/bsonobjbuilder.h b/src/mongo/bson/bsonobjbuilder.h
new file mode 100644
index 00000000000..1fdbcba18a6
--- /dev/null
+++ b/src/mongo/bson/bsonobjbuilder.h
@@ -0,0 +1,842 @@
+/* bsonobjbuilder.h
+
+ Classes in this file:
+ BSONObjBuilder
+ BSONArrayBuilder
+*/
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <limits>
+#include <cmath>
+#include <boost/static_assert.hpp>
+#include "bsonelement.h"
+#include "bsonobj.h"
+#include "bsonmisc.h"
+
+namespace mongo {
+
+ using namespace std;
+
+#if defined(_WIN32)
+// warning: 'this' : used in base member initializer list
+#pragma warning( disable : 4355 )
+#endif
+
+ template<typename T>
+ class BSONFieldValue {
+ public:
+ BSONFieldValue( const string& name , const T& t ) {
+ _name = name;
+ _t = t;
+ }
+
+ const T& value() const { return _t; }
+ const string& name() const { return _name; }
+
+ private:
+ string _name;
+ T _t;
+ };
+
+ template<typename T>
+ class BSONField {
+ public:
+ BSONField( const string& name , const string& longName="" )
+ : _name(name), _longName(longName) {}
+ const string& name() const { return _name; }
+ operator string() const { return _name; }
+
+ BSONFieldValue<T> make( const T& t ) const {
+ return BSONFieldValue<T>( _name , t );
+ }
+
+ BSONFieldValue<BSONObj> gt( const T& t ) const { return query( "$gt" , t ); }
+ BSONFieldValue<BSONObj> lt( const T& t ) const { return query( "$lt" , t ); }
+
+ BSONFieldValue<BSONObj> query( const char * q , const T& t ) const;
+
+ BSONFieldValue<T> operator()( const T& t ) const {
+ return BSONFieldValue<T>( _name , t );
+ }
+
+ private:
+ string _name;
+ string _longName;
+ };
+
+ /** Utility for creating a BSONObj.
+ See also the BSON() and BSON_ARRAY() macros.
+ */
+ class BSONObjBuilder : boost::noncopyable {
+ public:
+ /** @param initsize this is just a hint as to the final size of the object */
+ BSONObjBuilder(int initsize=512) : _b(_buf), _buf(initsize + sizeof(unsigned)), _offset( sizeof(unsigned) ), _s( this ) , _tracker(0) , _doneCalled(false) {
+ _b.appendNum((unsigned)0); // ref-count
+ _b.skip(4); /*leave room for size field and ref-count*/
+ }
+
+ /** @param baseBuilder construct a BSONObjBuilder using an existing BufBuilder
+ * This is for more efficient adding of subobjects/arrays. See docs for subobjStart for example.
+ */
+ BSONObjBuilder( BufBuilder &baseBuilder ) : _b( baseBuilder ), _buf( 0 ), _offset( baseBuilder.len() ), _s( this ) , _tracker(0) , _doneCalled(false) {
+ _b.skip( 4 );
+ }
+
+ BSONObjBuilder( const BSONSizeTracker & tracker ) : _b(_buf) , _buf(tracker.getSize() + sizeof(unsigned) ), _offset( sizeof(unsigned) ), _s( this ) , _tracker( (BSONSizeTracker*)(&tracker) ) , _doneCalled(false) {
+ _b.appendNum((unsigned)0); // ref-count
+ _b.skip(4);
+ }
+
+ ~BSONObjBuilder() {
+ if ( !_doneCalled && _b.buf() && _buf.getSize() == 0 ) {
+ _done();
+ }
+ }
+
+ /** add all the fields from the object specified to this object */
+ BSONObjBuilder& appendElements(BSONObj x);
+
+ /** add all the fields from the object specified to this object if they don't exist already */
+ BSONObjBuilder& appendElementsUnique( BSONObj x );
+
+ /** append element to the object we are building */
+ BSONObjBuilder& append( const BSONElement& e) {
+ assert( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ _b.appendBuf((void*) e.rawdata(), e.size());
+ return *this;
+ }
+
+ /** append an element but with a new name */
+ BSONObjBuilder& appendAs(const BSONElement& e, const StringData& fieldName) {
+ assert( !e.eoo() ); // do not append eoo, that would corrupt us. the builder auto appends when done() is called.
+ _b.appendNum((char) e.type());
+ _b.appendStr(fieldName);
+ _b.appendBuf((void *) e.value(), e.valuesize());
+ return *this;
+ }
+
+ /** add a subobject as a member */
+ BSONObjBuilder& append(const StringData& fieldName, BSONObj subObj) {
+ _b.appendNum((char) Object);
+ _b.appendStr(fieldName);
+ _b.appendBuf((void *) subObj.objdata(), subObj.objsize());
+ return *this;
+ }
+
+ /** add a subobject as a member */
+ BSONObjBuilder& appendObject(const StringData& fieldName, const char * objdata , int size = 0 ) {
+ assert( objdata );
+ if ( size == 0 ) {
+ size = *((int*)objdata);
+ }
+
+ assert( size > 4 && size < 100000000 );
+
+ _b.appendNum((char) Object);
+ _b.appendStr(fieldName);
+ _b.appendBuf((void*)objdata, size );
+ return *this;
+ }
+
+ /** add header for a new subobject and return bufbuilder for writing to
+ * the subobject's body
+ *
+ * example:
+ *
+ * BSONObjBuilder b;
+ * BSONObjBuilder sub (b.subobjStart("fieldName"));
+ * // use sub
+ * sub.done()
+ * // use b and convert to object
+ */
+ BufBuilder &subobjStart(const StringData& fieldName) {
+ _b.appendNum((char) Object);
+ _b.appendStr(fieldName);
+ return _b;
+ }
+
+ /** add a subobject as a member with type Array. Thus arr object should have "0", "1", ...
+ style fields in it.
+ */
+ BSONObjBuilder& appendArray(const StringData& fieldName, const BSONObj &subObj) {
+ _b.appendNum((char) Array);
+ _b.appendStr(fieldName);
+ _b.appendBuf((void *) subObj.objdata(), subObj.objsize());
+ return *this;
+ }
+ BSONObjBuilder& append(const StringData& fieldName, BSONArray arr) {
+ return appendArray(fieldName, arr);
+ }
+
+ /** add header for a new subarray and return bufbuilder for writing to
+ the subarray's body */
+ BufBuilder &subarrayStart(const StringData& fieldName) {
+ _b.appendNum((char) Array);
+ _b.appendStr(fieldName);
+ return _b;
+ }
+
+ /** Append a boolean element */
+ BSONObjBuilder& appendBool(const StringData& fieldName, int val) {
+ _b.appendNum((char) Bool);
+ _b.appendStr(fieldName);
+ _b.appendNum((char) (val?1:0));
+ return *this;
+ }
+
+ /** Append a boolean element */
+ BSONObjBuilder& append(const StringData& fieldName, bool val) {
+ _b.appendNum((char) Bool);
+ _b.appendStr(fieldName);
+ _b.appendNum((char) (val?1:0));
+ return *this;
+ }
+
+ /** Append a 32 bit integer element */
+ BSONObjBuilder& append(const StringData& fieldName, int n) {
+ _b.appendNum((char) NumberInt);
+ _b.appendStr(fieldName);
+ _b.appendNum(n);
+ return *this;
+ }
+
+ /** Append a 32 bit unsigned element - cast to a signed int. */
+ BSONObjBuilder& append(const StringData& fieldName, unsigned n) {
+ return append(fieldName, (int) n);
+ }
+
+ /** Append a NumberLong */
+ BSONObjBuilder& append(const StringData& fieldName, long long n) {
+ _b.appendNum((char) NumberLong);
+ _b.appendStr(fieldName);
+ _b.appendNum(n);
+ return *this;
+ }
+
+ /** appends a number. if n < max(int)/2 then uses int, otherwise long long */
+ BSONObjBuilder& appendIntOrLL( const StringData& fieldName , long long n ) {
+ long long x = n;
+ if ( x < 0 )
+ x = x * -1;
+ if ( x < ( (numeric_limits<int>::max)() / 2 ) ) // extra () to avoid max macro on windows
+ append( fieldName , (int)n );
+ else
+ append( fieldName , n );
+ return *this;
+ }
+
+ /**
+ * appendNumber is a series of method for appending the smallest sensible type
+ * mostly for JS
+ */
+ BSONObjBuilder& appendNumber( const StringData& fieldName , int n ) {
+ return append( fieldName , n );
+ }
+
+ BSONObjBuilder& appendNumber( const StringData& fieldName , double d ) {
+ return append( fieldName , d );
+ }
+
+ BSONObjBuilder& appendNumber( const StringData& fieldName , size_t n ) {
+ static size_t maxInt = (size_t)pow( 2.0 , 30.0 );
+
+ if ( n < maxInt )
+ append( fieldName , (int)n );
+ else
+ append( fieldName , (long long)n );
+ return *this;
+ }
+
+ BSONObjBuilder& appendNumber( const StringData& fieldName , long long l ) {
+ static long long maxInt = (int)pow( 2.0 , 30.0 );
+ static long long maxDouble = (long long)pow( 2.0 , 40.0 );
+ long long x = l >= 0 ? l : -l;
+ if ( x < maxInt )
+ append( fieldName , (int)l );
+ else if ( x < maxDouble )
+ append( fieldName , (double)l );
+ else
+ append( fieldName , l );
+ return *this;
+ }
+
+ /** Append a double element */
+ BSONObjBuilder& append(const StringData& fieldName, double n) {
+ _b.appendNum((char) NumberDouble);
+ _b.appendStr(fieldName);
+ _b.appendNum(n);
+ return *this;
+ }
+
+ /** tries to append the data as a number
+ * @return true if the data was able to be converted to a number
+ */
+ bool appendAsNumber( const StringData& fieldName , const string& data );
+
+ /** Append a BSON Object ID (OID type).
+ @deprecated Generally, it is preferred to use the append append(name, oid)
+ method for this.
+ */
+ BSONObjBuilder& appendOID(const StringData& fieldName, OID *oid = 0 , bool generateIfBlank = false ) {
+ _b.appendNum((char) jstOID);
+ _b.appendStr(fieldName);
+ if ( oid )
+ _b.appendBuf( (void *) oid, 12 );
+ else {
+ OID tmp;
+ if ( generateIfBlank )
+ tmp.init();
+ else
+ tmp.clear();
+ _b.appendBuf( (void *) &tmp, 12 );
+ }
+ return *this;
+ }
+
+ /**
+ Append a BSON Object ID.
+ @param fieldName Field name, e.g., "_id".
+ @returns the builder object
+ */
+ BSONObjBuilder& append( const StringData& fieldName, OID oid ) {
+ _b.appendNum((char) jstOID);
+ _b.appendStr(fieldName);
+ _b.appendBuf( (void *) &oid, 12 );
+ return *this;
+ }
+
+ /**
+ Generate and assign an object id for the _id field.
+ _id should be the first element in the object for good performance.
+ */
+ BSONObjBuilder& genOID() {
+ return append("_id", OID::gen());
+ }
+
+ /** Append a time_t date.
+ @param dt a C-style 32 bit date value, that is
+ the number of seconds since January 1, 1970, 00:00:00 GMT
+ */
+ BSONObjBuilder& appendTimeT(const StringData& fieldName, time_t dt) {
+ _b.appendNum((char) Date);
+ _b.appendStr(fieldName);
+ _b.appendNum(static_cast<unsigned long long>(dt) * 1000);
+ return *this;
+ }
+ /** Append a date.
+ @param dt a Java-style 64 bit date value, that is
+ the number of milliseconds since January 1, 1970, 00:00:00 GMT
+ */
+ BSONObjBuilder& appendDate(const StringData& fieldName, Date_t dt) {
+ /* easy to pass a time_t to this and get a bad result. thus this warning. */
+#if defined(_DEBUG) && defined(MONGO_EXPOSE_MACROS)
+ if( dt > 0 && dt <= 0xffffffff ) {
+ static int n;
+ if( n++ == 0 )
+ log() << "DEV WARNING appendDate() called with a tiny (but nonzero) date" << endl;
+ }
+#endif
+ _b.appendNum((char) Date);
+ _b.appendStr(fieldName);
+ _b.appendNum(dt);
+ return *this;
+ }
+ BSONObjBuilder& append(const StringData& fieldName, Date_t dt) {
+ return appendDate(fieldName, dt);
+ }
+
+ /** Append a regular expression value
+ @param regex the regular expression pattern
+ @param regex options such as "i" or "g"
+ */
+ BSONObjBuilder& appendRegex(const StringData& fieldName, const StringData& regex, const StringData& options = "") {
+ _b.appendNum((char) RegEx);
+ _b.appendStr(fieldName);
+ _b.appendStr(regex);
+ _b.appendStr(options);
+ return *this;
+ }
+
+ BSONObjBuilder& appendCode(const StringData& fieldName, const StringData& code) {
+ _b.appendNum((char) Code);
+ _b.appendStr(fieldName);
+ _b.appendNum((int) code.size()+1);
+ _b.appendStr(code);
+ return *this;
+ }
+
+ /** Append a string element.
+ @param sz size includes terminating null character */
+ BSONObjBuilder& append(const StringData& fieldName, const char *str, int sz) {
+ _b.appendNum((char) String);
+ _b.appendStr(fieldName);
+ _b.appendNum((int)sz);
+ _b.appendBuf(str, sz);
+ return *this;
+ }
+ /** Append a string element */
+ BSONObjBuilder& append(const StringData& fieldName, const char *str) {
+ return append(fieldName, str, (int) strlen(str)+1);
+ }
+ /** Append a string element */
+ BSONObjBuilder& append(const StringData& fieldName, const string& str) {
+ return append(fieldName, str.c_str(), (int) str.size()+1);
+ }
+
+ BSONObjBuilder& appendSymbol(const StringData& fieldName, const StringData& symbol) {
+ _b.appendNum((char) Symbol);
+ _b.appendStr(fieldName);
+ _b.appendNum((int) symbol.size()+1);
+ _b.appendStr(symbol);
+ return *this;
+ }
+
+ /** Append a Null element to the object */
+ BSONObjBuilder& appendNull( const StringData& fieldName ) {
+ _b.appendNum( (char) jstNULL );
+ _b.appendStr( fieldName );
+ return *this;
+ }
+
+ // Append an element that is less than all other keys.
+ BSONObjBuilder& appendMinKey( const StringData& fieldName ) {
+ _b.appendNum( (char) MinKey );
+ _b.appendStr( fieldName );
+ return *this;
+ }
+ // Append an element that is greater than all other keys.
+ BSONObjBuilder& appendMaxKey( const StringData& fieldName ) {
+ _b.appendNum( (char) MaxKey );
+ _b.appendStr( fieldName );
+ return *this;
+ }
+
+ // Append a Timestamp field -- will be updated to next OpTime on db insert.
+ BSONObjBuilder& appendTimestamp( const StringData& fieldName ) {
+ _b.appendNum( (char) Timestamp );
+ _b.appendStr( fieldName );
+ _b.appendNum( (unsigned long long) 0 );
+ return *this;
+ }
+
+ BSONObjBuilder& appendTimestamp( const StringData& fieldName , unsigned long long val ) {
+ _b.appendNum( (char) Timestamp );
+ _b.appendStr( fieldName );
+ _b.appendNum( val );
+ return *this;
+ }
+
+ /**
+ Timestamps are a special BSON datatype that is used internally for replication.
+ Append a timestamp element to the object being ebuilt.
+ @param time - in millis (but stored in seconds)
+ */
+ BSONObjBuilder& appendTimestamp( const StringData& fieldName , unsigned long long time , unsigned int inc );
+
+ /*
+ Append an element of the deprecated DBRef type.
+ @deprecated
+ */
+ BSONObjBuilder& appendDBRef( const StringData& fieldName, const StringData& ns, const OID &oid ) {
+ _b.appendNum( (char) DBRef );
+ _b.appendStr( fieldName );
+ _b.appendNum( (int) ns.size() + 1 );
+ _b.appendStr( ns );
+ _b.appendBuf( (void *) &oid, 12 );
+ return *this;
+ }
+
+ /** Append a binary data element
+ @param fieldName name of the field
+ @param len length of the binary data in bytes
+ @param subtype subtype information for the data. @see enum BinDataType in bsontypes.h.
+ Use BinDataGeneral if you don't care about the type.
+ @param data the byte array
+ */
+ BSONObjBuilder& appendBinData( const StringData& fieldName, int len, BinDataType type, const void *data ) {
+ _b.appendNum( (char) BinData );
+ _b.appendStr( fieldName );
+ _b.appendNum( len );
+ _b.appendNum( (char) type );
+ _b.appendBuf( data, len );
+ return *this;
+ }
+
+ /**
+ Subtype 2 is deprecated.
+ Append a BSON bindata bytearray element.
+ @param data a byte array
+ @param len the length of data
+ */
+ BSONObjBuilder& appendBinDataArrayDeprecated( const char * fieldName , const void * data , int len ) {
+ _b.appendNum( (char) BinData );
+ _b.appendStr( fieldName );
+ _b.appendNum( len + 4 );
+ _b.appendNum( (char)0x2 );
+ _b.appendNum( len );
+ _b.appendBuf( data, len );
+ return *this;
+ }
+
+ /** Append to the BSON object a field of type CodeWScope. This is a javascript code
+ fragment accompanied by some scope that goes with it.
+ */
+ BSONObjBuilder& appendCodeWScope( const StringData& fieldName, const StringData& code, const BSONObj &scope ) {
+ _b.appendNum( (char) CodeWScope );
+ _b.appendStr( fieldName );
+ _b.appendNum( ( int )( 4 + 4 + code.size() + 1 + scope.objsize() ) );
+ _b.appendNum( ( int ) code.size() + 1 );
+ _b.appendStr( code );
+ _b.appendBuf( ( void * )scope.objdata(), scope.objsize() );
+ return *this;
+ }
+
+ void appendUndefined( const StringData& fieldName ) {
+ _b.appendNum( (char) Undefined );
+ _b.appendStr( fieldName );
+ }
+
+ /* helper function -- see Query::where() for primary way to do this. */
+ void appendWhere( const StringData& code, const BSONObj &scope ) {
+ appendCodeWScope( "$where" , code , scope );
+ }
+
+ /**
+ these are the min/max when comparing, not strict min/max elements for a given type
+ */
+ void appendMinForType( const StringData& fieldName , int type );
+ void appendMaxForType( const StringData& fieldName , int type );
+
+ /** Append an array of values. */
+ template < class T >
+ BSONObjBuilder& append( const StringData& fieldName, const vector< T >& vals );
+
+ template < class T >
+ BSONObjBuilder& append( const StringData& fieldName, const list< T >& vals );
+
+ /** Append a set of values. */
+ template < class T >
+ BSONObjBuilder& append( const StringData& fieldName, const set< T >& vals );
+
+ /**
+ * destructive
+ * The returned BSONObj will free the buffer when it is finished.
+ * @return owned BSONObj
+ */
+ BSONObj obj() {
+ bool own = owned();
+ massert( 10335 , "builder does not own memory", own );
+ doneFast();
+ BSONObj::Holder* h = (BSONObj::Holder*)_b.buf();
+ decouple(); // sets _b.buf() to NULL
+ return BSONObj(h);
+ }
+
+ /** Fetch the object we have built.
+ BSONObjBuilder still frees the object when the builder goes out of
+ scope -- very important to keep in mind. Use obj() if you
+ would like the BSONObj to last longer than the builder.
+ */
+ BSONObj done() {
+ return BSONObj(_done());
+ }
+
+ // Like 'done' above, but does not construct a BSONObj to return to the caller.
+ void doneFast() {
+ (void)_done();
+ }
+
+ /** Peek at what is in the builder, but leave the builder ready for more appends.
+ The returned object is only valid until the next modification or destruction of the builder.
+ Intended use case: append a field if not already there.
+ */
+ BSONObj asTempObj() {
+ BSONObj temp(_done());
+ _b.setlen(_b.len()-1); //next append should overwrite the EOO
+ _doneCalled = false;
+ return temp;
+ }
+
+ /* assume ownership of the buffer - you must then free it (with free()) */
+ char* decouple(int& l) {
+ char *x = _done();
+ assert( x );
+ l = _b.len();
+ _b.decouple();
+ return x;
+ }
+ void decouple() {
+ _b.decouple(); // post done() call version. be sure jsobj frees...
+ }
+
+ void appendKeys( const BSONObj& keyPattern , const BSONObj& values );
+
+ static string numStr( int i ) {
+ if (i>=0 && i<100 && numStrsReady)
+ return numStrs[i];
+ StringBuilder o;
+ o << i;
+ return o.str();
+ }
+
+ /** Stream oriented way to add field names and values. */
+ BSONObjBuilderValueStream &operator<<(const char * name ) {
+ _s.endField( name );
+ return _s;
+ }
+
+ /** Stream oriented way to add field names and values. */
+ BSONObjBuilder& operator<<( GENOIDLabeler ) { return genOID(); }
+
+ // prevent implicit string conversions which would allow bad things like BSON( BSON( "foo" << 1 ) << 2 )
+ struct ForceExplicitString {
+ ForceExplicitString( const string &str ) : str_( str ) {}
+ string str_;
+ };
+
+ /** Stream oriented way to add field names and values. */
+ BSONObjBuilderValueStream &operator<<( const ForceExplicitString& name ) {
+ return operator<<( name.str_.c_str() );
+ }
+
+ Labeler operator<<( const Labeler::Label &l ) {
+ massert( 10336 , "No subobject started", _s.subobjStarted() );
+ return _s << l;
+ }
+
+ template<typename T>
+ BSONObjBuilderValueStream& operator<<( const BSONField<T>& f ) {
+ _s.endField( f.name().c_str() );
+ return _s;
+ }
+
+ template<typename T>
+ BSONObjBuilder& operator<<( const BSONFieldValue<T>& v ) {
+ append( v.name().c_str() , v.value() );
+ return *this;
+ }
+
+ BSONObjBuilder& operator<<( const BSONElement& e ){
+ append( e );
+ return *this;
+ }
+
+ /** @return true if we are using our own bufbuilder, and not an alternate that was given to us in our constructor */
+ bool owned() const { return &_b == &_buf; }
+
+ BSONObjIterator iterator() const ;
+
+ bool hasField( const StringData& name ) const ;
+
+ int len() const { return _b.len(); }
+
+ BufBuilder& bb() { return _b; }
+
+ private:
+ char* _done() {
+ if ( _doneCalled )
+ return _b.buf() + _offset;
+
+ _doneCalled = true;
+ _s.endField();
+ _b.appendNum((char) EOO);
+ char *data = _b.buf() + _offset;
+ int size = _b.len() - _offset;
+ *((int*)data) = size;
+ if ( _tracker )
+ _tracker->got( size );
+ return data;
+ }
+
+ BufBuilder &_b;
+ BufBuilder _buf;
+ int _offset;
+ BSONObjBuilderValueStream _s;
+ BSONSizeTracker * _tracker;
+ bool _doneCalled;
+
+ static const string numStrs[100]; // cache of 0 to 99 inclusive
+ static bool numStrsReady; // for static init safety. see comments in db/jsobj.cpp
+ };
+
+ class BSONArrayBuilder : boost::noncopyable {
+ public:
+ BSONArrayBuilder() : _i(0), _b() {}
+ BSONArrayBuilder( BufBuilder &_b ) : _i(0), _b(_b) {}
+ BSONArrayBuilder( int initialSize ) : _i(0), _b(initialSize) {}
+
+ template <typename T>
+ BSONArrayBuilder& append(const T& x) {
+ _b.append(num(), x);
+ return *this;
+ }
+
+ BSONArrayBuilder& append(const BSONElement& e) {
+ _b.appendAs(e, num());
+ return *this;
+ }
+
+ template <typename T>
+ BSONArrayBuilder& operator<<(const T& x) {
+ return append(x);
+ }
+
+ void appendNull() {
+ _b.appendNull(num());
+ }
+
+ /**
+ * destructive - ownership moves to returned BSONArray
+ * @return owned BSONArray
+ */
+ BSONArray arr() { return BSONArray(_b.obj()); }
+
+ BSONObj done() { return _b.done(); }
+
+ void doneFast() { _b.doneFast(); }
+
+ template <typename T>
+ BSONArrayBuilder& append(const StringData& name, const T& x) {
+ fill( name );
+ append( x );
+ return *this;
+ }
+
+ // These two just use next position
+ BufBuilder &subobjStart() { return _b.subobjStart( num() ); }
+ BufBuilder &subarrayStart() { return _b.subarrayStart( num() ); }
+
+ // These fill missing entries up to pos. if pos is < next pos is ignored
+ BufBuilder &subobjStart(int pos) {
+ fill(pos);
+ return _b.subobjStart( num() );
+ }
+ BufBuilder &subarrayStart(int pos) {
+ fill(pos);
+ return _b.subarrayStart( num() );
+ }
+
+ // These should only be used where you really need interface compatability with BSONObjBuilder
+ // Currently they are only used by update.cpp and it should probably stay that way
+ BufBuilder &subobjStart( const StringData& name ) {
+ fill( name );
+ return _b.subobjStart( num() );
+ }
+
+ BufBuilder &subarrayStart( const char *name ) {
+ fill( name );
+ return _b.subarrayStart( num() );
+ }
+
+ void appendArray( const StringData& name, BSONObj subObj ) {
+ fill( name );
+ _b.appendArray( num(), subObj );
+ }
+
+ void appendAs( const BSONElement &e, const char *name) {
+ fill( name );
+ append( e );
+ }
+
+ int len() const { return _b.len(); }
+ int arrSize() const { return _i; }
+
+ private:
+ // These two are undefined privates to prevent their accidental
+ // use as we don't support unsigned ints in BSON
+ BSONObjBuilder& append(const StringData& fieldName, unsigned int val);
+ BSONObjBuilder& append(const StringData& fieldName, unsigned long long val);
+
+ void fill( const StringData& name ) {
+ char *r;
+ long int n = strtol( name.data(), &r, 10 );
+ if ( *r )
+ uasserted( 13048, (string)"can't append to array using string field name [" + name.data() + "]" );
+ fill(n);
+ }
+
+ void fill (int upTo){
+ // if this is changed make sure to update error message and jstests/set7.js
+ const int maxElems = 1500000;
+ BOOST_STATIC_ASSERT(maxElems < (BSONObjMaxUserSize/10));
+ uassert(15891, "can't backfill array to larger than 1,500,000 elements", upTo <= maxElems);
+
+ while( _i < upTo )
+ append( nullElt() );
+ }
+
+ static BSONElement nullElt() {
+ static BSONObj n = nullObj();
+ return n.firstElement();
+ }
+
+ static BSONObj nullObj() {
+ BSONObjBuilder _b;
+ _b.appendNull( "" );
+ return _b.obj();
+ }
+
+ string num() { return _b.numStr(_i++); }
+ int _i;
+ BSONObjBuilder _b;
+ };
+
+ template < class T >
+ inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const vector< T >& vals ) {
+ BSONObjBuilder arrBuilder;
+ for ( unsigned int i = 0; i < vals.size(); ++i )
+ arrBuilder.append( numStr( i ), vals[ i ] );
+ appendArray( fieldName, arrBuilder.done() );
+ return *this;
+ }
+
+ template < class L >
+ inline BSONObjBuilder& _appendIt( BSONObjBuilder& _this, const StringData& fieldName, const L& vals ) {
+ BSONObjBuilder arrBuilder;
+ int n = 0;
+ for( typename L::const_iterator i = vals.begin(); i != vals.end(); i++ )
+ arrBuilder.append( BSONObjBuilder::numStr(n++), *i );
+ _this.appendArray( fieldName, arrBuilder.done() );
+ return _this;
+ }
+
+ template < class T >
+ inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const list< T >& vals ) {
+ return _appendIt< list< T > >( *this, fieldName, vals );
+ }
+
+ template < class T >
+ inline BSONObjBuilder& BSONObjBuilder::append( const StringData& fieldName, const set< T >& vals ) {
+ return _appendIt< set< T > >( *this, fieldName, vals );
+ }
+
+
+ // $or helper: OR(BSON("x" << GT << 7), BSON("y" << LT 6));
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b)
+ { return BSON( "$or" << BSON_ARRAY(a << b) ); }
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c)
+ { return BSON( "$or" << BSON_ARRAY(a << b << c) ); }
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d)
+ { return BSON( "$or" << BSON_ARRAY(a << b << c << d) ); }
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e)
+ { return BSON( "$or" << BSON_ARRAY(a << b << c << d << e) ); }
+ inline BSONObj OR(const BSONObj& a, const BSONObj& b, const BSONObj& c, const BSONObj& d, const BSONObj& e, const BSONObj& f)
+ { return BSON( "$or" << BSON_ARRAY(a << b << c << d << e << f) ); }
+
+}
diff --git a/src/mongo/bson/bsonobjiterator.h b/src/mongo/bson/bsonobjiterator.h
new file mode 100644
index 00000000000..39ae24d9b86
--- /dev/null
+++ b/src/mongo/bson/bsonobjiterator.h
@@ -0,0 +1,161 @@
+// bsonobjiterator.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <boost/preprocessor/cat.hpp> // like the ## operator but works with __LINE__
+
+namespace mongo {
+
+ /** iterator for a BSONObj
+
+ Note each BSONObj ends with an EOO element: so you will get more() on an empty
+ object, although next().eoo() will be true.
+
+ The BSONObj must stay in scope for the duration of the iterator's execution.
+
+ todo: we may want to make a more stl-like iterator interface for this
+ with things like begin() and end()
+ */
+ class BSONObjIterator {
+ public:
+ /** Create an iterator for a BSON object.
+ */
+ BSONObjIterator(const BSONObj& jso) {
+ int sz = jso.objsize();
+ if ( MONGO_unlikely(sz == 0) ) {
+ _pos = _theend = 0;
+ return;
+ }
+ _pos = jso.objdata() + 4;
+ _theend = jso.objdata() + sz - 1;
+ }
+
+ BSONObjIterator( const char * start , const char * end ) {
+ _pos = start + 4;
+ _theend = end - 1;
+ }
+
+ /** @return true if more elements exist to be enumerated. */
+ bool more() { return _pos < _theend; }
+
+ /** @return true if more elements exist to be enumerated INCLUDING the EOO element which is always at the end. */
+ bool moreWithEOO() { return _pos <= _theend; }
+
+ /** @return the next element in the object. For the final element, element.eoo() will be true. */
+ BSONElement next( bool checkEnd ) {
+ assert( _pos <= _theend );
+ BSONElement e( _pos, checkEnd ? (int)(_theend + 1 - _pos) : -1 );
+ _pos += e.size( checkEnd ? (int)(_theend + 1 - _pos) : -1 );
+ return e;
+ }
+ BSONElement next() {
+ assert( _pos <= _theend );
+ BSONElement e(_pos);
+ _pos += e.size();
+ return e;
+ }
+ void operator++() { next(); }
+ void operator++(int) { next(); }
+
+ BSONElement operator*() {
+ assert( _pos <= _theend );
+ return BSONElement(_pos);
+ }
+
+ private:
+ const char* _pos;
+ const char* _theend;
+ };
+
+ class BSONObjIteratorSorted {
+ public:
+ BSONObjIteratorSorted( const BSONObj& o );
+
+ ~BSONObjIteratorSorted() {
+ assert( _fields );
+ delete[] _fields;
+ _fields = 0;
+ }
+
+ bool more() {
+ return _cur < _nfields;
+ }
+
+ BSONElement next() {
+ assert( _fields );
+ if ( _cur < _nfields )
+ return BSONElement( _fields[_cur++] );
+ return BSONElement();
+ }
+
+ private:
+ const char ** _fields;
+ int _nfields;
+ int _cur;
+ };
+
+ /** transform a BSON array into a vector of BSONElements.
+ we match array # positions with their vector position, and ignore
+ any fields with non-numeric field names.
+ */
+ inline vector<BSONElement> BSONElement::Array() const {
+ chk(mongo::Array);
+ vector<BSONElement> v;
+ BSONObjIterator i(Obj());
+ while( i.more() ) {
+ BSONElement e = i.next();
+ const char *f = e.fieldName();
+ try {
+ unsigned u = stringToNum(f);
+ assert( u < 1000000 );
+ if( u >= v.size() )
+ v.resize(u+1);
+ v[u] = e;
+ }
+ catch(unsigned) { }
+ }
+ return v;
+ }
+
+ /** Similar to BOOST_FOREACH
+ *
+ * because the iterator is defined outside of the for, you must use {} around
+ * the surrounding scope. Don't do this:
+ *
+ * if (foo)
+ * BSONForEach(e, obj)
+ * doSomething(e);
+ *
+ * but this is OK:
+ *
+ * if (foo) {
+ * BSONForEach(e, obj)
+ * doSomething(e);
+ * }
+ *
+ */
+
+#define BSONForEach(e, obj) \
+ BSONObjIterator BOOST_PP_CAT(it_,__LINE__)(obj); \
+ for ( BSONElement e; \
+ (BOOST_PP_CAT(it_,__LINE__).more() ? \
+ (e = BOOST_PP_CAT(it_,__LINE__).next(), true) : \
+ false) ; \
+ /*nothing*/ )
+
+}
diff --git a/src/mongo/bson/bsontypes.h b/src/mongo/bson/bsontypes.h
new file mode 100644
index 00000000000..9d95e8e9ad4
--- /dev/null
+++ b/src/mongo/bson/bsontypes.h
@@ -0,0 +1,107 @@
+// bsontypes.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "util/misc.h"
+
+namespace bson { }
+
+namespace mongo {
+
+ using namespace std;
+
+ class BSONArrayBuilder;
+ class BSONElement;
+ class BSONObj;
+ class BSONObjBuilder;
+ class BSONObjBuilderValueStream;
+ class BSONObjIterator;
+ class Ordering;
+ class Record;
+ struct BSONArray; // empty subclass of BSONObj useful for overloading
+ struct BSONElementCmpWithoutField;
+
+ extern BSONObj maxKey;
+ extern BSONObj minKey;
+
+ /**
+ the complete list of valid BSON types
+ see also bsonspec.org
+ */
+ enum BSONType {
+ /** smaller than all other types */
+ MinKey=-1,
+ /** end of object */
+ EOO=0,
+ /** double precision floating point value */
+ NumberDouble=1,
+ /** character string, stored in utf8 */
+ String=2,
+ /** an embedded object */
+ Object=3,
+ /** an embedded array */
+ Array=4,
+ /** binary data */
+ BinData=5,
+ /** Undefined type */
+ Undefined=6,
+ /** ObjectId */
+ jstOID=7,
+ /** boolean type */
+ Bool=8,
+ /** date type */
+ Date=9,
+ /** null type */
+ jstNULL=10,
+ /** regular expression, a pattern with options */
+ RegEx=11,
+ /** deprecated / will be redesigned */
+ DBRef=12,
+ /** deprecated / use CodeWScope */
+ Code=13,
+ /** a programming language (e.g., Python) symbol */
+ Symbol=14,
+ /** javascript code that can execute on the database server, with SavedContext */
+ CodeWScope=15,
+ /** 32 bit signed integer */
+ NumberInt = 16,
+ /** Updated to a Date with value next OpTime on insert */
+ Timestamp = 17,
+ /** 64 bit integer */
+ NumberLong = 18,
+ /** max type that is not MaxKey */
+ JSTypeMax=18,
+ /** larger than all other types */
+ MaxKey=127
+ };
+
+ /* subtypes of BinData.
+ bdtCustom and above are ones that the JS compiler understands, but are
+ opaque to the database.
+ */
+ enum BinDataType {
+ BinDataGeneral=0,
+ Function=1,
+ ByteArrayDeprecated=2, /* use BinGeneral instead */
+ bdtUUID = 3,
+ MD5Type=5,
+ bdtCustom=128
+ };
+
+}
+
diff --git a/src/mongo/bson/inline_decls.h b/src/mongo/bson/inline_decls.h
new file mode 100644
index 00000000000..30da9b4560d
--- /dev/null
+++ b/src/mongo/bson/inline_decls.h
@@ -0,0 +1,68 @@
+// inline_decls.h
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#if defined(__GNUC__)
+
+#define NOINLINE_DECL __attribute__((noinline))
+
+#elif defined(_MSC_VER)
+
+#define NOINLINE_DECL __declspec(noinline)
+
+#else
+
+#define NOINLINE_DECL
+
+#endif
+
+namespace mongo {
+
+/* Note: do not clutter code with these -- ONLY use in hot spots / significant loops. */
+
+#if !defined(__GNUC__)
+
+// branch prediction. indicate we expect to be true
+# define MONGO_likely(x) ((bool)(x))
+
+// branch prediction. indicate we expect to be false
+# define MONGO_unlikely(x) ((bool)(x))
+
+# if defined(_WIN32)
+ // prefetch data from memory
+ inline void prefetch(const void *p) {
+#if defined(_MM_HINT_T0)
+ _mm_prefetch((char *) p, _MM_HINT_T0);
+#endif
+ }
+#else
+ inline void prefetch(void *p) { }
+#endif
+
+#else
+
+# define MONGO_likely(x) ( __builtin_expect((bool)(x), 1) )
+# define MONGO_unlikely(x) ( __builtin_expect((bool)(x), 0) )
+
+ inline void prefetch(void *p) {
+ __builtin_prefetch(p);
+ }
+
+#endif
+
+}
diff --git a/src/mongo/bson/oid.cpp b/src/mongo/bson/oid.cpp
new file mode 100644
index 00000000000..3aee14a3585
--- /dev/null
+++ b/src/mongo/bson/oid.cpp
@@ -0,0 +1,173 @@
+// @file oid.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "oid.h"
+#include "util/atomic_int.h"
+#include "../db/nonce.h"
+#include "bsonobjbuilder.h"
+
+BOOST_STATIC_ASSERT( sizeof(mongo::OID) == 12 );
+
+namespace mongo {
+
+ // machine # before folding in the process id
+ OID::MachineAndPid OID::ourMachine;
+
+ unsigned OID::ourPid() {
+ unsigned pid;
+#if defined(_WIN32)
+ pid = (unsigned short) GetCurrentProcessId();
+#elif defined(__linux__) || defined(__APPLE__) || defined(__sunos__)
+ pid = (unsigned short) getpid();
+#else
+ pid = (unsigned short) Security::getNonce();
+#endif
+ return pid;
+ }
+
+ void OID::foldInPid(OID::MachineAndPid& x) {
+ unsigned p = ourPid();
+ x._pid ^= (unsigned short) p;
+ // when the pid is greater than 16 bits, let the high bits modulate the machine id field.
+ unsigned short& rest = (unsigned short &) x._machineNumber[1];
+ rest ^= p >> 16;
+ }
+
+ OID::MachineAndPid OID::genMachineAndPid() {
+ BOOST_STATIC_ASSERT( sizeof(mongo::OID::MachineAndPid) == 5 );
+
+ // this is not called often, so the following is not expensive, and gives us some
+ // testing that nonce generation is working right and that our OIDs are (perhaps) ok.
+ {
+ nonce64 a = Security::getNonceDuringInit();
+ nonce64 b = Security::getNonceDuringInit();
+ nonce64 c = Security::getNonceDuringInit();
+ assert( !(a==b && b==c) );
+ }
+
+ unsigned long long n = Security::getNonceDuringInit();
+ OID::MachineAndPid x = ourMachine = (OID::MachineAndPid&) n;
+ foldInPid(x);
+ return x;
+ }
+
+ // after folding in the process id
+ OID::MachineAndPid OID::ourMachineAndPid = OID::genMachineAndPid();
+
+ void OID::regenMachineId() {
+ ourMachineAndPid = genMachineAndPid();
+ }
+
+ inline bool OID::MachineAndPid::operator!=(const OID::MachineAndPid& rhs) const {
+ return _pid != rhs._pid || _machineNumber != rhs._machineNumber;
+ }
+
+ unsigned OID::getMachineId() {
+ unsigned char x[4];
+ x[0] = ourMachineAndPid._machineNumber[0];
+ x[1] = ourMachineAndPid._machineNumber[1];
+ x[2] = ourMachineAndPid._machineNumber[2];
+ x[3] = 0;
+ return (unsigned&) x[0];
+ }
+
+ void OID::justForked() {
+ MachineAndPid x = ourMachine;
+ // we let the random # for machine go into all 5 bytes of MachineAndPid, and then
+ // xor in the pid into _pid. this reduces the probability of collisions.
+ foldInPid(x);
+ ourMachineAndPid = genMachineAndPid();
+ assert( x != ourMachineAndPid );
+ ourMachineAndPid = x;
+ }
+
+ void OID::init() {
+ static AtomicUInt inc = (unsigned) Security::getNonce();
+
+ {
+ unsigned t = (unsigned) time(0);
+ unsigned char *T = (unsigned char *) &t;
+ _time[0] = T[3]; // big endian order because we use memcmp() to compare OID's
+ _time[1] = T[2];
+ _time[2] = T[1];
+ _time[3] = T[0];
+ }
+
+ _machineAndPid = ourMachineAndPid;
+
+ {
+ int new_inc = inc++;
+ unsigned char *T = (unsigned char *) &new_inc;
+ _inc[0] = T[2];
+ _inc[1] = T[1];
+ _inc[2] = T[0];
+ }
+ }
+
+ void OID::init( string s ) {
+ assert( s.size() == 24 );
+ const char *p = s.c_str();
+ for( int i = 0; i < 12; i++ ) {
+ data[i] = fromHex(p);
+ p += 2;
+ }
+ }
+
+ void OID::init(Date_t date, bool max) {
+ int time = (int) (date / 1000);
+ char* T = (char *) &time;
+ data[0] = T[3];
+ data[1] = T[2];
+ data[2] = T[1];
+ data[3] = T[0];
+
+ if (max)
+ *(long long*)(data + 4) = 0xFFFFFFFFFFFFFFFFll;
+ else
+ *(long long*)(data + 4) = 0x0000000000000000ll;
+ }
+
+ time_t OID::asTimeT() {
+ int time;
+ char* T = (char *) &time;
+ T[0] = data[3];
+ T[1] = data[2];
+ T[2] = data[1];
+ T[3] = data[0];
+ return time;
+ }
+
+ const string BSONObjBuilder::numStrs[] = {
+ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9",
+ "10", "11", "12", "13", "14", "15", "16", "17", "18", "19",
+ "20", "21", "22", "23", "24", "25", "26", "27", "28", "29",
+ "30", "31", "32", "33", "34", "35", "36", "37", "38", "39",
+ "40", "41", "42", "43", "44", "45", "46", "47", "48", "49",
+ "50", "51", "52", "53", "54", "55", "56", "57", "58", "59",
+ "60", "61", "62", "63", "64", "65", "66", "67", "68", "69",
+ "70", "71", "72", "73", "74", "75", "76", "77", "78", "79",
+ "80", "81", "82", "83", "84", "85", "86", "87", "88", "89",
+ "90", "91", "92", "93", "94", "95", "96", "97", "98", "99",
+ };
+
+ // This is to ensure that BSONObjBuilder doesn't try to use numStrs before the strings have been constructed
+ // I've tested just making numStrs a char[][], but the overhead of constructing the strings each time was too high
+ // numStrsReady will be 0 until after numStrs is initialized because it is a static variable
+ bool BSONObjBuilder::numStrsReady = (numStrs[0].size() > 0);
+
+}
diff --git a/src/mongo/bson/oid.h b/src/mongo/bson/oid.h
new file mode 100644
index 00000000000..e5963a0e81d
--- /dev/null
+++ b/src/mongo/bson/oid.h
@@ -0,0 +1,138 @@
+// oid.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <boost/functional/hash.hpp>
+#include "../util/hex.h"
+
+namespace mongo {
+
+#pragma pack(1)
+ /** Object ID type.
+ BSON objects typically have an _id field for the object id. This field should be the first
+ member of the object when present. class OID is a special type that is a 12 byte id which
+ is likely to be unique to the system. You may also use other types for _id's.
+ When _id field is missing from a BSON object, on an insert the database may insert one
+ automatically in certain circumstances.
+
+ Warning: You must call OID::newState() after a fork().
+
+ Typical contents of the BSON ObjectID is a 12-byte value consisting of a 4-byte timestamp (seconds since epoch),
+ a 3-byte machine id, a 2-byte process id, and a 3-byte counter. Note that the timestamp and counter fields must
+ be stored big endian unlike the rest of BSON. This is because they are compared byte-by-byte and we want to ensure
+ a mostly increasing order.
+ */
+ class OID {
+ public:
+ OID() : a(0), b(0) { }
+
+ /** init from a 24 char hex string */
+ explicit OID(const string &s) { init(s); }
+
+ /** initialize to 'null' */
+ void clear() { a = 0; b = 0; }
+
+ const unsigned char *getData() const { return data; }
+
+ bool operator==(const OID& r) const { return a==r.a && b==r.b; }
+ bool operator!=(const OID& r) const { return a!=r.a || b!=r.b; }
+ int compare( const OID& other ) const { return memcmp( data , other.data , 12 ); }
+ bool operator<( const OID& other ) const { return compare( other ) < 0; }
+ bool operator<=( const OID& other ) const { return compare( other ) <= 0; }
+
+ /** @return the object ID output as 24 hex digits */
+ string str() const { return toHexLower(data, 12); }
+ string toString() const { return str(); }
+
+ static OID gen() { OID o; o.init(); return o; }
+
+ /** sets the contents to a new oid / randomized value */
+ void init();
+
+ /** init from a 24 char hex string */
+ void init( string s );
+
+ /** Set to the min/max OID that could be generated at given timestamp. */
+ void init( Date_t date, bool max=false );
+
+ time_t asTimeT();
+ Date_t asDateT() { return asTimeT() * (long long)1000; }
+
+ bool isSet() const { return a || b; }
+
+ void hash_combine(size_t &seed) const {
+ boost::hash_combine(seed, a);
+ boost::hash_combine(seed, b);
+ }
+
+ /** call this after a fork to update the process id */
+ static void justForked();
+
+ static unsigned getMachineId(); // features command uses
+ static void regenMachineId(); // used by unit tests
+
+ private:
+ struct MachineAndPid {
+ unsigned char _machineNumber[3];
+ unsigned short _pid;
+ bool operator!=(const OID::MachineAndPid& rhs) const;
+ };
+ static MachineAndPid ourMachine, ourMachineAndPid;
+ union {
+ struct {
+ // 12 bytes total
+ unsigned char _time[4];
+ MachineAndPid _machineAndPid;
+ unsigned char _inc[3];
+ };
+ struct {
+ long long a;
+ unsigned b;
+ };
+ unsigned char data[12];
+ };
+
+ static unsigned ourPid();
+ static void foldInPid(MachineAndPid& x);
+ static MachineAndPid genMachineAndPid();
+ };
+#pragma pack()
+
+ ostream& operator<<( ostream &s, const OID &o );
+ inline StringBuilder& operator<< (StringBuilder& s, const OID& o) { return (s << o.str()); }
+
+ /** Formatting mode for generating JSON from BSON.
+ See <http://mongodb.onconfluence.com/display/DOCS/Mongo+Extended+JSON>
+ for details.
+ */
+ enum JsonStringFormat {
+ /** strict RFC format */
+ Strict,
+ /** 10gen format, which is close to JS format. This form is understandable by
+ javascript running inside the Mongo server via eval() */
+ TenGen,
+ /** Javascript JSON compatible */
+ JS
+ };
+
+ inline ostream& operator<<( ostream &s, const OID &o ) {
+ s << o.str();
+ return s;
+ }
+
+}
diff --git a/src/mongo/bson/ordering.h b/src/mongo/bson/ordering.h
new file mode 100644
index 00000000000..bca3296f340
--- /dev/null
+++ b/src/mongo/bson/ordering.h
@@ -0,0 +1,73 @@
+// ordering.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongo {
+
+ // todo: ideally move to db/ instead of bson/, but elim any dependencies first
+
+ /** A precomputation of a BSON index or sort key pattern. That is something like:
+ { a : 1, b : -1 }
+ The constructor is private to make conversion more explicit so we notice where we call make().
+ Over time we should push this up higher and higher.
+ */
+ class Ordering {
+ unsigned bits;
+ Ordering(unsigned b) : bits(b) { }
+ public:
+ Ordering(const Ordering& r) : bits(r.bits) { }
+ void operator=(const Ordering& r) {
+ bits = r.bits;
+ }
+
+ /** so, for key pattern { a : 1, b : -1 }
+ get(0) == 1
+ get(1) == -1
+ */
+ int get(int i) const {
+ return ((1 << i) & bits) ? -1 : 1;
+ }
+
+ // for woCompare...
+ unsigned descending(unsigned mask) const { return bits & mask; }
+
+ /*operator string() const {
+ StringBuilder buf(32);
+ for ( unsigned i=0; i<nkeys; i++)
+ buf.append( get(i) > 0 ? "+" : "-" );
+ return buf.str();
+ }*/
+
+ static Ordering make(const BSONObj& obj) {
+ unsigned b = 0;
+ BSONObjIterator k(obj);
+ unsigned n = 0;
+ while( 1 ) {
+ BSONElement e = k.next();
+ if( e.eoo() )
+ break;
+ uassert( 13103, "too many compound keys", n <= 31 );
+ if( e.number() < 0 )
+ b |= (1 << n);
+ n++;
+ }
+ return Ordering(b);
+ }
+ };
+
+}
diff --git a/src/mongo/bson/stringdata.h b/src/mongo/bson/stringdata.h
new file mode 100644
index 00000000000..1fb4e7d25d3
--- /dev/null
+++ b/src/mongo/bson/stringdata.h
@@ -0,0 +1,71 @@
+// stringdata.h
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+#include <cstring>
+
+namespace mongo {
+
+ using std::string;
+
+ /** A StringData object wraps a 'const string&' or a 'const char*' without
+ * copying its contents. The most common usage is as a function argument that
+ * takes any of the two forms of strings above. Fundamentally, this class tries
+ * go around the fact that string literals in C++ are char[N]'s.
+ *
+ * Note that the object StringData wraps around must be alive while the StringData
+ * is.
+ */
+ class StringData {
+ public:
+ /** Construct a StringData, for the case where the length of
+ * string is not known. 'c' must be a pointer to a null-terminated string.
+ */
+ StringData( const char* c )
+ : _data(c), _size((unsigned) strlen(c)) {}
+
+ /** Construct a StringData explicitly, for the case where the length of the string
+ * is already known. 'c' must be a pointer to a null-terminated string, and strlenOfc
+ * must be the length that std::strlen(c) would return, a.k.a the index of the
+ * terminator in c.
+ */
+ StringData( const char* c, unsigned len )
+ : _data(c), _size(len) {}
+
+ /** Construct a StringData, for the case of a std::string. */
+ StringData( const string& s )
+ : _data(s.c_str()), _size((unsigned) s.size()) {}
+
+ // Construct a StringData explicitly, for the case of a literal whose size is
+ // known at compile time.
+ struct LiteralTag {};
+ template<size_t N>
+ StringData( const char (&val)[N], LiteralTag )
+ : _data(&val[0]), _size(N-1) {}
+
+ // accessors
+ const char* data() const { return _data; }
+ unsigned size() const { return _size; }
+
+ private:
+ const char* const _data; // is always null terminated
+ const unsigned _size; // 'size' does not include the null terminator
+ };
+
+} // namespace mongo
diff --git a/src/mongo/bson/util/atomic_int.h b/src/mongo/bson/util/atomic_int.h
new file mode 100644
index 00000000000..e85a023c3bc
--- /dev/null
+++ b/src/mongo/bson/util/atomic_int.h
@@ -0,0 +1,106 @@
+// atomic_int.h
+// atomic wrapper for unsigned
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#if defined(_WIN32)
+# include <windows.h>
+#endif
+
+namespace mongo {
+
+ struct AtomicUInt {
+ AtomicUInt() : x(0) {}
+ AtomicUInt(unsigned z) : x(z) { }
+
+ operator unsigned() const { return x; }
+ unsigned get() const { return x; }
+
+ inline AtomicUInt operator++(); // ++prefix
+ inline AtomicUInt operator++(int);// postfix++
+ inline AtomicUInt operator--(); // --prefix
+ inline AtomicUInt operator--(int); // postfix--
+
+ inline void zero();
+
+ volatile unsigned x;
+ };
+
+#if defined(_WIN32)
+ void AtomicUInt::zero() {
+ InterlockedExchange((volatile long*)&x, 0);
+ }
+ AtomicUInt AtomicUInt::operator++() {
+ return InterlockedIncrement((volatile long*)&x);
+ }
+ AtomicUInt AtomicUInt::operator++(int) {
+ return InterlockedIncrement((volatile long*)&x)-1;
+ }
+ AtomicUInt AtomicUInt::operator--() {
+ return InterlockedDecrement((volatile long*)&x);
+ }
+ AtomicUInt AtomicUInt::operator--(int) {
+ return InterlockedDecrement((volatile long*)&x)+1;
+ }
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ // this is in GCC >= 4.1
+ inline void AtomicUInt::zero() { x = 0; } // TODO: this isn't thread safe - maybe
+ AtomicUInt AtomicUInt::operator++() {
+ return __sync_add_and_fetch(&x, 1);
+ }
+ AtomicUInt AtomicUInt::operator++(int) {
+ return __sync_fetch_and_add(&x, 1);
+ }
+ AtomicUInt AtomicUInt::operator--() {
+ return __sync_add_and_fetch(&x, -1);
+ }
+ AtomicUInt AtomicUInt::operator--(int) {
+ return __sync_fetch_and_add(&x, -1);
+ }
+#elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
+ inline void AtomicUInt::zero() { x = 0; } // TODO: this isn't thread safe
+ // from boost 1.39 interprocess/detail/atomic.hpp
+ inline unsigned atomic_int_helper(volatile unsigned *x, int val) {
+ int r;
+ asm volatile
+ (
+ "lock\n\t"
+ "xadd %1, %0":
+ "+m"( *x ), "=r"( r ): // outputs (%0, %1)
+ "1"( val ): // inputs (%2 == %1)
+ "memory", "cc" // clobbers
+ );
+ return r;
+ }
+ AtomicUInt AtomicUInt::operator++() {
+ return atomic_int_helper(&x, 1)+1;
+ }
+ AtomicUInt AtomicUInt::operator++(int) {
+ return atomic_int_helper(&x, 1);
+ }
+ AtomicUInt AtomicUInt::operator--() {
+ return atomic_int_helper(&x, -1)-1;
+ }
+ AtomicUInt AtomicUInt::operator--(int) {
+ return atomic_int_helper(&x, -1);
+ }
+#else
+# error "unsupported compiler or platform"
+#endif
+
+} // namespace mongo
diff --git a/src/mongo/bson/util/builder.h b/src/mongo/bson/util/builder.h
new file mode 100644
index 00000000000..f189f58b27e
--- /dev/null
+++ b/src/mongo/bson/util/builder.h
@@ -0,0 +1,322 @@
+/* builder.h */
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+#include <string.h>
+#include <stdio.h>
+#include "../inline_decls.h"
+#include "../stringdata.h"
+
+namespace mongo {
+
+ /* Note the limit here is rather arbitrary and is simply a standard. generally the code works
+ with any object that fits in ram.
+
+ Also note that the server has some basic checks to enforce this limit but those checks are not exhaustive
+ for example need to check for size too big after
+ update $push (append) operation
+ various db.eval() type operations
+ */
+ const int BSONObjMaxUserSize = 16 * 1024 * 1024;
+
+ /*
+ Sometimeswe we need objects slightly larger - an object in the replication local.oplog
+ is slightly larger than a user object for example.
+ */
+ const int BSONObjMaxInternalSize = BSONObjMaxUserSize + ( 16 * 1024 );
+
+ const int BufferMaxSize = 64 * 1024 * 1024;
+
+ class StringBuilder;
+
+ void msgasserted(int msgid, const char *msg);
+
+ class TrivialAllocator {
+ public:
+ void* Malloc(size_t sz) { return malloc(sz); }
+ void* Realloc(void *p, size_t sz) { return realloc(p, sz); }
+ void Free(void *p) { free(p); }
+ };
+
+ class StackAllocator {
+ public:
+ enum { SZ = 512 };
+ void* Malloc(size_t sz) {
+ if( sz <= SZ ) return buf;
+ return malloc(sz);
+ }
+ void* Realloc(void *p, size_t sz) {
+ if( p == buf ) {
+ if( sz <= SZ ) return buf;
+ void *d = malloc(sz);
+ if ( d == 0 )
+ msgasserted( 15912 , "out of memory StackAllocator::Realloc" );
+ memcpy(d, p, SZ);
+ return d;
+ }
+ return realloc(p, sz);
+ }
+ void Free(void *p) {
+ if( p != buf )
+ free(p);
+ }
+ private:
+ char buf[SZ];
+ };
+
+ template< class Allocator >
+ class _BufBuilder {
+ // non-copyable, non-assignable
+ _BufBuilder( const _BufBuilder& );
+ _BufBuilder& operator=( const _BufBuilder& );
+ Allocator al;
+ public:
+ _BufBuilder(int initsize = 512) : size(initsize) {
+ if ( size > 0 ) {
+ data = (char *) al.Malloc(size);
+ if( data == 0 )
+ msgasserted(10000, "out of memory BufBuilder");
+ }
+ else {
+ data = 0;
+ }
+ l = 0;
+ }
+ ~_BufBuilder() { kill(); }
+
+ void kill() {
+ if ( data ) {
+ al.Free(data);
+ data = 0;
+ }
+ }
+
+ void reset() {
+ l = 0;
+ }
+ void reset( int maxSize ) {
+ l = 0;
+ if ( maxSize && size > maxSize ) {
+ al.Free(data);
+ data = (char*)al.Malloc(maxSize);
+ if ( data == 0 )
+ msgasserted( 15913 , "out of memory BufBuilder::reset" );
+ size = maxSize;
+ }
+ }
+
+ /** leave room for some stuff later
+ @return point to region that was skipped. pointer may change later (on realloc), so for immediate use only
+ */
+ char* skip(int n) { return grow(n); }
+
+ /* note this may be deallocated (realloced) if you keep writing. */
+ char* buf() { return data; }
+ const char* buf() const { return data; }
+
+ /* assume ownership of the buffer - you must then free() it */
+ void decouple() { data = 0; }
+
+ void appendUChar(unsigned char j) {
+ *((unsigned char*)grow(sizeof(unsigned char))) = j;
+ }
+ void appendChar(char j) {
+ *((char*)grow(sizeof(char))) = j;
+ }
+ void appendNum(char j) {
+ *((char*)grow(sizeof(char))) = j;
+ }
+ void appendNum(short j) {
+ *((short*)grow(sizeof(short))) = j;
+ }
+ void appendNum(int j) {
+ *((int*)grow(sizeof(int))) = j;
+ }
+ void appendNum(unsigned j) {
+ *((unsigned*)grow(sizeof(unsigned))) = j;
+ }
+ void appendNum(bool j) {
+ *((bool*)grow(sizeof(bool))) = j;
+ }
+ void appendNum(double j) {
+ *((double*)grow(sizeof(double))) = j;
+ }
+ void appendNum(long long j) {
+ *((long long*)grow(sizeof(long long))) = j;
+ }
+ void appendNum(unsigned long long j) {
+ *((unsigned long long*)grow(sizeof(unsigned long long))) = j;
+ }
+
+ void appendBuf(const void *src, size_t len) {
+ memcpy(grow((int) len), src, len);
+ }
+
+ template<class T>
+ void appendStruct(const T& s) {
+ appendBuf(&s, sizeof(T));
+ }
+
+ void appendStr(const StringData &str , bool includeEndingNull = true ) {
+ const int len = str.size() + ( includeEndingNull ? 1 : 0 );
+ memcpy(grow(len), str.data(), len);
+ }
+
+ /** @return length of current string */
+ int len() const { return l; }
+ void setlen( int newLen ) { l = newLen; }
+ /** @return size of the buffer */
+ int getSize() const { return size; }
+
+ /* returns the pre-grow write position */
+ inline char* grow(int by) {
+ int oldlen = l;
+ l += by;
+ if ( l > size ) {
+ grow_reallocate();
+ }
+ return data + oldlen;
+ }
+
+ private:
+ /* "slow" portion of 'grow()' */
+ void NOINLINE_DECL grow_reallocate() {
+ int a = size * 2;
+ if ( a == 0 )
+ a = 512;
+ if ( l > a )
+ a = l + 16 * 1024;
+ if ( a > BufferMaxSize )
+ msgasserted(13548, "BufBuilder grow() > 64MB");
+ data = (char *) al.Realloc(data, a);
+ size= a;
+ }
+
+ char *data;
+ int l;
+ int size;
+
+ friend class StringBuilder;
+ };
+
+ typedef _BufBuilder<TrivialAllocator> BufBuilder;
+
+ /** The StackBufBuilder builds smaller datasets on the stack instead of using malloc.
+ this can be significantly faster for small bufs. However, you can not decouple() the
+ buffer with StackBufBuilder.
+ While designed to be a variable on the stack, if you were to dynamically allocate one,
+ nothing bad would happen. In fact in some circumstances this might make sense, say,
+ embedded in some other object.
+ */
+ class StackBufBuilder : public _BufBuilder<StackAllocator> {
+ public:
+ StackBufBuilder() : _BufBuilder<StackAllocator>(StackAllocator::SZ) { }
+ void decouple(); // not allowed. not implemented.
+ };
+
+#if defined(_WIN32)
+#pragma warning( push )
+// warning C4996: 'sprintf': This function or variable may be unsafe. Consider using sprintf_s instead. To disable deprecation, use _CRT_SECURE_NO_WARNINGS.
+#pragma warning( disable : 4996 )
+#endif
+
+ /** stringstream deals with locale so this is a lot faster than std::stringstream for UTF8 */
+ class StringBuilder {
+ public:
+ StringBuilder( int initsize=256 )
+ : _buf( initsize ) {
+ }
+
+ StringBuilder& operator<<( double x ) {
+ return SBNUM( x , 25 , "%g" );
+ }
+ StringBuilder& operator<<( int x ) {
+ return SBNUM( x , 11 , "%d" );
+ }
+ StringBuilder& operator<<( unsigned x ) {
+ return SBNUM( x , 11 , "%u" );
+ }
+ StringBuilder& operator<<( long x ) {
+ return SBNUM( x , 22 , "%ld" );
+ }
+ StringBuilder& operator<<( unsigned long x ) {
+ return SBNUM( x , 22 , "%lu" );
+ }
+ StringBuilder& operator<<( long long x ) {
+ return SBNUM( x , 22 , "%lld" );
+ }
+ StringBuilder& operator<<( unsigned long long x ) {
+ return SBNUM( x , 22 , "%llu" );
+ }
+ StringBuilder& operator<<( short x ) {
+ return SBNUM( x , 8 , "%hd" );
+ }
+ StringBuilder& operator<<( char c ) {
+ _buf.grow( 1 )[0] = c;
+ return *this;
+ }
+
+ void appendDoubleNice( double x ) {
+ int prev = _buf.l;
+ char * start = _buf.grow( 32 );
+ int z = sprintf( start , "%.16g" , x );
+ assert( z >= 0 );
+ _buf.l = prev + z;
+ if( strchr(start, '.') == 0 && strchr(start, 'E') == 0 && strchr(start, 'N') == 0 ) {
+ write( ".0" , 2 );
+ }
+ }
+
+ void write( const char* buf, int len) { memcpy( _buf.grow( len ) , buf , len ); }
+
+ void append( const StringData& str ) { memcpy( _buf.grow( str.size() ) , str.data() , str.size() ); }
+
+ StringBuilder& operator<<( const StringData& str ) {
+ append( str );
+ return *this;
+ }
+
+ void reset( int maxSize = 0 ) { _buf.reset( maxSize ); }
+
+ std::string str() const { return std::string(_buf.data, _buf.l); }
+
+ int len() const { return _buf.l; }
+
+ private:
+ BufBuilder _buf;
+
+ // non-copyable, non-assignable
+ StringBuilder( const StringBuilder& );
+ StringBuilder& operator=( const StringBuilder& );
+
+ template <typename T>
+ StringBuilder& SBNUM(T val,int maxSize,const char *macro) {
+ int prev = _buf.l;
+ int z = sprintf( _buf.grow(maxSize) , macro , (val) );
+ assert( z >= 0 );
+ _buf.l = prev + z;
+ return *this;
+ }
+ };
+
+#if defined(_WIN32)
+#pragma warning( pop )
+#endif
+
+} // namespace mongo
diff --git a/src/mongo/bson/util/misc.h b/src/mongo/bson/util/misc.h
new file mode 100644
index 00000000000..b547c981bdf
--- /dev/null
+++ b/src/mongo/bson/util/misc.h
@@ -0,0 +1,121 @@
+/* @file misc.h
+*/
+
+/*
+ * Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <ctime>
+
+namespace mongo {
+
+ using namespace std;
+
+ inline void time_t_to_String(time_t t, char *buf) {
+#if defined(_WIN32)
+ ctime_s(buf, 32, &t);
+#else
+ ctime_r(&t, buf);
+#endif
+ buf[24] = 0; // don't want the \n
+ }
+
+ inline string time_t_to_String(time_t t = time(0) ) {
+ char buf[64];
+#if defined(_WIN32)
+ ctime_s(buf, sizeof(buf), &t);
+#else
+ ctime_r(&t, buf);
+#endif
+ buf[24] = 0; // don't want the \n
+ return buf;
+ }
+
+ inline string time_t_to_String_no_year(time_t t) {
+ char buf[64];
+#if defined(_WIN32)
+ ctime_s(buf, sizeof(buf), &t);
+#else
+ ctime_r(&t, buf);
+#endif
+ buf[19] = 0;
+ return buf;
+ }
+
+ inline string time_t_to_String_short(time_t t) {
+ char buf[64];
+#if defined(_WIN32)
+ ctime_s(buf, sizeof(buf), &t);
+#else
+ ctime_r(&t, buf);
+#endif
+ buf[19] = 0;
+ if( buf[0] && buf[1] && buf[2] && buf[3] )
+ return buf + 4; // skip day of week
+ return buf;
+ }
+
+ struct Date_t {
+ // TODO: make signed (and look for related TODO's)
+ unsigned long long millis;
+ Date_t(): millis(0) {}
+ Date_t(unsigned long long m): millis(m) {}
+ operator unsigned long long&() { return millis; }
+ operator const unsigned long long&() const { return millis; }
+ void toTm (tm *buf) {
+ time_t dtime = (time_t) millis/1000;
+#if defined(_WIN32)
+ gmtime_s(buf, &dtime);
+#else
+ gmtime_r(&dtime, buf);
+#endif
+ }
+ string toString() const {
+ char buf[64];
+ time_t_to_String(millis/1000, buf);
+ return buf;
+ }
+ };
+
+ // Like strlen, but only scans up to n bytes.
+ // Returns -1 if no '0' found.
+ inline int strnlen( const char *s, int n ) {
+ for( int i = 0; i < n; ++i )
+ if ( !s[ i ] )
+ return i;
+ return -1;
+ }
+
+ inline bool isNumber( char c ) {
+ return c >= '0' && c <= '9';
+ }
+
+ inline unsigned stringToNum(const char *str) {
+ unsigned x = 0;
+ const char *p = str;
+ while( 1 ) {
+ if( !isNumber(*p) ) {
+ if( *p == 0 && p != str )
+ break;
+ throw 0;
+ }
+ x = x * 10 + *p++ - '0';
+ }
+ return x;
+ }
+
+}
diff --git a/src/mongo/client/clientOnly.cpp b/src/mongo/client/clientOnly.cpp
new file mode 100644
index 00000000000..161f0a82a81
--- /dev/null
+++ b/src/mongo/client/clientOnly.cpp
@@ -0,0 +1,92 @@
+// clientOnly.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "../client/dbclient.h"
+#include "../db/cmdline.h"
+#include "../db/client_common.h"
+#include "../s/shard.h"
+#include "../util/timer.h"
+
+namespace mongo {
+
+ CmdLine cmdLine;
+
+ const char * curNs = "in client mode";
+
+ bool dbexitCalled = false;
+
+ string dynHostMyName() { return ""; }
+
+ void dynHostResolve(string& name, int& port) {
+ assert(false);
+ }
+
+ void exitCleanly( ExitCode code ) {
+ dbexit( code );
+ }
+
+ void dbexit( ExitCode returnCode, const char *whyMsg , bool tryToGetLock ) {
+ dbexitCalled = true;
+ out() << "dbexit called" << endl;
+ if ( whyMsg )
+ out() << " b/c " << whyMsg << endl;
+ out() << "exiting" << endl;
+ ::exit( returnCode );
+ }
+
+ bool inShutdown() {
+ return dbexitCalled;
+ }
+
+ void setupSignals() {
+ // maybe should do SIGPIPE here, not sure
+ }
+
+ string getDbContext() {
+ return "in client only mode";
+ }
+
+ bool haveLocalShardingInfo( const string& ns ) {
+ return false;
+ }
+
+ DBClientBase * createDirectClient() {
+ uassert( 10256 , "no createDirectClient in clientOnly" , 0 );
+ return 0;
+ }
+
+ void Shard::getAllShards( vector<Shard>& all ) {
+ assert(0);
+ }
+
+ bool Shard::isAShardNode( const string& ident ) {
+ assert(0);
+ return false;
+ }
+
+ string prettyHostName() {
+ assert(0);
+ return "";
+ }
+
+ ClientBasic* ClientBasic::getCurrent() {
+ return 0;
+ }
+
+
+}
diff --git a/src/mongo/client/connpool.cpp b/src/mongo/client/connpool.cpp
new file mode 100644
index 00000000000..5089471f521
--- /dev/null
+++ b/src/mongo/client/connpool.cpp
@@ -0,0 +1,426 @@
+/* connpool.cpp
+*/
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// _ todo: reconnect?
+
+#include "pch.h"
+#include "connpool.h"
+//#include "../db/commands.h"
+#include "syncclusterconnection.h"
+#include "../s/shard.h"
+
+namespace mongo {
+
+ // ------ PoolForHost ------
+
+ PoolForHost::~PoolForHost() {
+ while ( ! _pool.empty() ) {
+ StoredConnection sc = _pool.top();
+ delete sc.conn;
+ _pool.pop();
+ }
+ }
+
+ void PoolForHost::done( DBConnectionPool * pool, DBClientBase * c ) {
+ if ( _pool.size() >= _maxPerHost ) {
+ pool->onDestroy( c );
+ delete c;
+ }
+ else {
+ _pool.push(c);
+ }
+ }
+
+ DBClientBase * PoolForHost::get( DBConnectionPool * pool , double socketTimeout ) {
+
+ time_t now = time(0);
+
+ while ( ! _pool.empty() ) {
+ StoredConnection sc = _pool.top();
+ _pool.pop();
+
+ if ( ! sc.ok( now ) ) {
+ pool->onDestroy( sc.conn );
+ delete sc.conn;
+ continue;
+ }
+
+ assert( sc.conn->getSoTimeout() == socketTimeout );
+
+ return sc.conn;
+
+ }
+
+ return NULL;
+ }
+
+ void PoolForHost::flush() {
+ vector<StoredConnection> all;
+ while ( ! _pool.empty() ) {
+ StoredConnection c = _pool.top();
+ _pool.pop();
+ all.push_back( c );
+ bool res;
+ c.conn->isMaster( res );
+ }
+
+ for ( vector<StoredConnection>::iterator i=all.begin(); i != all.end(); ++i ) {
+ _pool.push( *i );
+ }
+ }
+
+ void PoolForHost::getStaleConnections( vector<DBClientBase*>& stale ) {
+ time_t now = time(0);
+
+ vector<StoredConnection> all;
+ while ( ! _pool.empty() ) {
+ StoredConnection c = _pool.top();
+ _pool.pop();
+
+ if ( c.ok( now ) )
+ all.push_back( c );
+ else
+ stale.push_back( c.conn );
+ }
+
+ for ( size_t i=0; i<all.size(); i++ ) {
+ _pool.push( all[i] );
+ }
+ }
+
+
+ PoolForHost::StoredConnection::StoredConnection( DBClientBase * c ) {
+ conn = c;
+ when = time(0);
+ }
+
+ bool PoolForHost::StoredConnection::ok( time_t now ) {
+ // if connection has been idle for 30 minutes, kill it
+ return ( now - when ) < 1800;
+ }
+
+ void PoolForHost::createdOne( DBClientBase * base) {
+ if ( _created == 0 )
+ _type = base->type();
+ _created++;
+ }
+
+ unsigned PoolForHost::_maxPerHost = 50;
+
+ // ------ DBConnectionPool ------
+
+ DBConnectionPool pool;
+
+ DBConnectionPool::DBConnectionPool()
+ : _mutex("DBConnectionPool") ,
+ _name( "dbconnectionpool" ) ,
+ _hooks( new list<DBConnectionHook*>() ) {
+ }
+
+ DBClientBase* DBConnectionPool::_get(const string& ident , double socketTimeout ) {
+ assert( ! inShutdown() );
+ scoped_lock L(_mutex);
+ PoolForHost& p = _pools[PoolKey(ident,socketTimeout)];
+ return p.get( this , socketTimeout );
+ }
+
+ DBClientBase* DBConnectionPool::_finishCreate( const string& host , double socketTimeout , DBClientBase* conn ) {
+ {
+ scoped_lock L(_mutex);
+ PoolForHost& p = _pools[PoolKey(host,socketTimeout)];
+ p.createdOne( conn );
+ }
+
+ try {
+ onCreate( conn );
+ onHandedOut( conn );
+ }
+ catch ( std::exception & ) {
+ delete conn;
+ throw;
+ }
+
+ return conn;
+ }
+
+ DBClientBase* DBConnectionPool::get(const ConnectionString& url, double socketTimeout) {
+ DBClientBase * c = _get( url.toString() , socketTimeout );
+ if ( c ) {
+ try {
+ onHandedOut( c );
+ }
+ catch ( std::exception& ) {
+ delete c;
+ throw;
+ }
+ return c;
+ }
+
+ string errmsg;
+ c = url.connect( errmsg, socketTimeout );
+ uassert( 13328 , _name + ": connect failed " + url.toString() + " : " + errmsg , c );
+
+ return _finishCreate( url.toString() , socketTimeout , c );
+ }
+
+ DBClientBase* DBConnectionPool::get(const string& host, double socketTimeout) {
+ DBClientBase * c = _get( host , socketTimeout );
+ if ( c ) {
+ try {
+ onHandedOut( c );
+ }
+ catch ( std::exception& ) {
+ delete c;
+ throw;
+ }
+ return c;
+ }
+
+ string errmsg;
+ ConnectionString cs = ConnectionString::parse( host , errmsg );
+ uassert( 13071 , (string)"invalid hostname [" + host + "]" + errmsg , cs.isValid() );
+
+ c = cs.connect( errmsg, socketTimeout );
+ if ( ! c )
+ throw SocketException( SocketException::CONNECT_ERROR , host , 11002 , str::stream() << _name << " error: " << errmsg );
+ return _finishCreate( host , socketTimeout , c );
+ }
+
+ void DBConnectionPool::release(const string& host, DBClientBase *c) {
+ if ( c->isFailed() ) {
+ onDestroy( c );
+ delete c;
+ return;
+ }
+ scoped_lock L(_mutex);
+ _pools[PoolKey(host,c->getSoTimeout())].done(this,c);
+ }
+
+
+ DBConnectionPool::~DBConnectionPool() {
+ // connection closing is handled by ~PoolForHost
+ }
+
+ void DBConnectionPool::flush() {
+ scoped_lock L(_mutex);
+ for ( PoolMap::iterator i = _pools.begin(); i != _pools.end(); i++ ) {
+ PoolForHost& p = i->second;
+ p.flush();
+ }
+ }
+
+ void DBConnectionPool::addHook( DBConnectionHook * hook ) {
+ _hooks->push_back( hook );
+ }
+
+ void DBConnectionPool::onCreate( DBClientBase * conn ) {
+ if ( _hooks->size() == 0 )
+ return;
+
+ for ( list<DBConnectionHook*>::iterator i = _hooks->begin(); i != _hooks->end(); i++ ) {
+ (*i)->onCreate( conn );
+ }
+ }
+
+ void DBConnectionPool::onHandedOut( DBClientBase * conn ) {
+ if ( _hooks->size() == 0 )
+ return;
+
+ for ( list<DBConnectionHook*>::iterator i = _hooks->begin(); i != _hooks->end(); i++ ) {
+ (*i)->onHandedOut( conn );
+ }
+ }
+
+ void DBConnectionPool::onDestroy( DBClientBase * conn ) {
+ if ( _hooks->size() == 0 )
+ return;
+
+ for ( list<DBConnectionHook*>::iterator i = _hooks->begin(); i != _hooks->end(); i++ ) {
+ (*i)->onDestroy( conn );
+ }
+ }
+
+ void DBConnectionPool::appendInfo( BSONObjBuilder& b ) {
+
+ int avail = 0;
+ long long created = 0;
+
+
+ map<ConnectionString::ConnectionType,long long> createdByType;
+
+ set<string> replicaSets;
+
+ BSONObjBuilder bb( b.subobjStart( "hosts" ) );
+ {
+ scoped_lock lk( _mutex );
+ for ( PoolMap::iterator i=_pools.begin(); i!=_pools.end(); ++i ) {
+ if ( i->second.numCreated() == 0 )
+ continue;
+
+ string s = str::stream() << i->first.ident << "::" << i->first.timeout;
+
+ BSONObjBuilder temp( bb.subobjStart( s ) );
+ temp.append( "available" , i->second.numAvailable() );
+ temp.appendNumber( "created" , i->second.numCreated() );
+ temp.done();
+
+ avail += i->second.numAvailable();
+ created += i->second.numCreated();
+
+ long long& x = createdByType[i->second.type()];
+ x += i->second.numCreated();
+
+ {
+ string setName = i->first.ident;
+ if ( setName.find( "/" ) != string::npos ) {
+ setName = setName.substr( 0 , setName.find( "/" ) );
+ replicaSets.insert( setName );
+ }
+ }
+ }
+ }
+ bb.done();
+
+
+ BSONObjBuilder setBuilder( b.subobjStart( "replicaSets" ) );
+ for ( set<string>::iterator i=replicaSets.begin(); i!=replicaSets.end(); ++i ) {
+ string rs = *i;
+ ReplicaSetMonitorPtr m = ReplicaSetMonitor::get( rs );
+ if ( ! m ) {
+ warning() << "no monitor for set: " << rs << endl;
+ continue;
+ }
+
+ BSONObjBuilder temp( setBuilder.subobjStart( rs ) );
+ m->appendInfo( temp );
+ temp.done();
+ }
+ setBuilder.done();
+
+ {
+ BSONObjBuilder temp( bb.subobjStart( "createdByType" ) );
+ for ( map<ConnectionString::ConnectionType,long long>::iterator i=createdByType.begin(); i!=createdByType.end(); ++i ) {
+ temp.appendNumber( ConnectionString::typeToString( i->first ) , i->second );
+ }
+ temp.done();
+ }
+
+ b.append( "totalAvailable" , avail );
+ b.appendNumber( "totalCreated" , created );
+ }
+
+ bool DBConnectionPool::serverNameCompare::operator()( const string& a , const string& b ) const{
+ const char* ap = a.c_str();
+ const char* bp = b.c_str();
+
+ while (true){
+ if (*ap == '\0' || *ap == '/'){
+ if (*bp == '\0' || *bp == '/')
+ return false; // equal strings
+ else
+ return true; // a is shorter
+ }
+
+ if (*bp == '\0' || *bp == '/')
+ return false; // b is shorter
+
+ if ( *ap < *bp)
+ return true;
+ else if (*ap > *bp)
+ return false;
+
+ ++ap;
+ ++bp;
+ }
+ assert(false);
+ }
+
+ bool DBConnectionPool::poolKeyCompare::operator()( const PoolKey& a , const PoolKey& b ) const {
+ if (DBConnectionPool::serverNameCompare()( a.ident , b.ident ))
+ return true;
+
+ if (DBConnectionPool::serverNameCompare()( b.ident , a.ident ))
+ return false;
+
+ return a.timeout < b.timeout;
+ }
+
+
+ void DBConnectionPool::taskDoWork() {
+ vector<DBClientBase*> toDelete;
+
+ {
+ // we need to get the connections inside the lock
+ // but we can actually delete them outside
+ scoped_lock lk( _mutex );
+ for ( PoolMap::iterator i=_pools.begin(); i!=_pools.end(); ++i ) {
+ i->second.getStaleConnections( toDelete );
+ }
+ }
+
+ for ( size_t i=0; i<toDelete.size(); i++ ) {
+ try {
+ onDestroy( toDelete[i] );
+ delete toDelete[i];
+ }
+ catch ( ... ) {
+ // we don't care if there was a socket error
+ }
+ }
+ }
+
+ // ------ ScopedDbConnection ------
+
+ ScopedDbConnection * ScopedDbConnection::steal() {
+ assert( _conn );
+ ScopedDbConnection * n = new ScopedDbConnection( _host , _conn, _socketTimeout );
+ _conn = 0;
+ return n;
+ }
+
+ void ScopedDbConnection::_setSocketTimeout(){
+ if( ! _conn ) return;
+ if( _conn->type() == ConnectionString::MASTER )
+ (( DBClientConnection* ) _conn)->setSoTimeout( _socketTimeout );
+ else if( _conn->type() == ConnectionString::SYNC )
+ (( SyncClusterConnection* ) _conn)->setAllSoTimeouts( _socketTimeout );
+ }
+
+ ScopedDbConnection::~ScopedDbConnection() {
+ if ( _conn ) {
+ if ( ! _conn->isFailed() ) {
+ /* see done() comments above for why we log this line */
+ log() << "~ScopedDbConnection: _conn != null" << endl;
+ }
+ kill();
+ }
+ }
+
+ ScopedDbConnection::ScopedDbConnection(const Shard& shard, double socketTimeout )
+ : _host( shard.getConnString() ) , _conn( pool.get(_host, socketTimeout) ), _socketTimeout( socketTimeout ) {
+ _setSocketTimeout();
+ }
+
+ ScopedDbConnection::ScopedDbConnection(const Shard* shard, double socketTimeout )
+ : _host( shard->getConnString() ) , _conn( pool.get(_host, socketTimeout) ), _socketTimeout( socketTimeout ) {
+ _setSocketTimeout();
+ }
+
+ AtomicUInt AScopedConnection::_numConnections;
+
+} // namespace mongo
diff --git a/src/mongo/client/connpool.h b/src/mongo/client/connpool.h
new file mode 100644
index 00000000000..8733abb1f90
--- /dev/null
+++ b/src/mongo/client/connpool.h
@@ -0,0 +1,291 @@
+/** @file connpool.h */
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stack>
+#include "dbclient.h"
+#include "redef_macros.h"
+
+#include "../util/background.h"
+
+namespace mongo {
+
+ class Shard;
+ class DBConnectionPool;
+
+ /**
+ * not thread safe
+ * thread safety is handled by DBConnectionPool
+ */
+ class PoolForHost {
+ public:
+ PoolForHost()
+ : _created(0) {}
+
+ PoolForHost( const PoolForHost& other ) {
+ assert(other._pool.size() == 0);
+ _created = other._created;
+ assert( _created == 0 );
+ }
+
+ ~PoolForHost();
+
+ int numAvailable() const { return (int)_pool.size(); }
+
+ void createdOne( DBClientBase * base );
+ long long numCreated() const { return _created; }
+
+ ConnectionString::ConnectionType type() const { assert(_created); return _type; }
+
+ /**
+ * gets a connection or return NULL
+ */
+ DBClientBase * get( DBConnectionPool * pool , double socketTimeout );
+
+ void done( DBConnectionPool * pool , DBClientBase * c );
+
+ void flush();
+
+ void getStaleConnections( vector<DBClientBase*>& stale );
+
+ static void setMaxPerHost( unsigned max ) { _maxPerHost = max; }
+ static unsigned getMaxPerHost() { return _maxPerHost; }
+ private:
+
+ struct StoredConnection {
+ StoredConnection( DBClientBase * c );
+
+ bool ok( time_t now );
+
+ DBClientBase* conn;
+ time_t when;
+ };
+
+ std::stack<StoredConnection> _pool;
+
+ long long _created;
+ ConnectionString::ConnectionType _type;
+
+ static unsigned _maxPerHost;
+ };
+
+ class DBConnectionHook {
+ public:
+ virtual ~DBConnectionHook() {}
+ virtual void onCreate( DBClientBase * conn ) {}
+ virtual void onHandedOut( DBClientBase * conn ) {}
+ virtual void onDestroy( DBClientBase * conn ) {}
+ };
+
+ /** Database connection pool.
+
+ Generally, use ScopedDbConnection and do not call these directly.
+
+ This class, so far, is suitable for use with unauthenticated connections.
+ Support for authenticated connections requires some adjustements: please
+ request...
+
+ Usage:
+
+ {
+ ScopedDbConnection c("myserver");
+ c.conn()...
+ }
+ */
+ class DBConnectionPool : public PeriodicTask {
+
+ public:
+
+ DBConnectionPool();
+ ~DBConnectionPool();
+
+ /** right now just controls some asserts. defaults to "dbconnectionpool" */
+ void setName( const string& name ) { _name = name; }
+
+ void onCreate( DBClientBase * conn );
+ void onHandedOut( DBClientBase * conn );
+ void onDestroy( DBClientBase * conn );
+
+ void flush();
+
+ DBClientBase *get(const string& host, double socketTimeout = 0);
+ DBClientBase *get(const ConnectionString& host, double socketTimeout = 0);
+
+ void release(const string& host, DBClientBase *c);
+
+ void addHook( DBConnectionHook * hook ); // we take ownership
+ void appendInfo( BSONObjBuilder& b );
+
+ /** compares server namees, but is smart about replica set names */
+ struct serverNameCompare {
+ bool operator()( const string& a , const string& b ) const;
+ };
+
+ virtual string taskName() const { return "DBConnectionPool-cleaner"; }
+ virtual void taskDoWork();
+
+ private:
+ DBConnectionPool( DBConnectionPool& p );
+
+ DBClientBase* _get( const string& ident , double socketTimeout );
+
+ DBClientBase* _finishCreate( const string& ident , double socketTimeout, DBClientBase* conn );
+
+ struct PoolKey {
+ PoolKey( string i , double t ) : ident( i ) , timeout( t ) {}
+ string ident;
+ double timeout;
+ };
+
+ struct poolKeyCompare {
+ bool operator()( const PoolKey& a , const PoolKey& b ) const;
+ };
+
+ typedef map<PoolKey,PoolForHost,poolKeyCompare> PoolMap; // servername -> pool
+
+ mongo::mutex _mutex;
+ string _name;
+
+ PoolMap _pools;
+
+ // pointers owned by me, right now they leak on shutdown
+ // _hooks itself also leaks because it creates a shutdown race condition
+ list<DBConnectionHook*> * _hooks;
+
+ };
+
+ extern DBConnectionPool pool;
+
+ class AScopedConnection : boost::noncopyable {
+ public:
+ AScopedConnection() { _numConnections++; }
+ virtual ~AScopedConnection() { _numConnections--; }
+
+ virtual DBClientBase* get() = 0;
+ virtual void done() = 0;
+ virtual string getHost() const = 0;
+
+ /**
+ * @return true iff this has a connection to the db
+ */
+ virtual bool ok() const = 0;
+
+ /**
+ * @return total number of current instances of AScopedConnection
+ */
+ static int getNumConnections() { return _numConnections; }
+
+ private:
+ static AtomicUInt _numConnections;
+ };
+
+ /** Use to get a connection from the pool. On exceptions things
+ clean up nicely (i.e. the socket gets closed automatically when the
+ scopeddbconnection goes out of scope).
+ */
+ class ScopedDbConnection : public AScopedConnection {
+ public:
+ /** the main constructor you want to use
+ throws UserException if can't connect
+ */
+ explicit ScopedDbConnection(const string& host, double socketTimeout = 0) : _host(host), _conn( pool.get(host, socketTimeout) ), _socketTimeout( socketTimeout ) {
+ _setSocketTimeout();
+ }
+
+ ScopedDbConnection() : _host( "" ) , _conn(0), _socketTimeout( 0 ) {}
+
+ /* @param conn - bind to an existing connection */
+ ScopedDbConnection(const string& host, DBClientBase* conn, double socketTimeout = 0 ) : _host( host ) , _conn( conn ), _socketTimeout( socketTimeout ) {
+ _setSocketTimeout();
+ }
+
+ /** throws UserException if can't connect */
+ explicit ScopedDbConnection(const ConnectionString& url, double socketTimeout = 0 ) : _host(url.toString()), _conn( pool.get(url, socketTimeout) ), _socketTimeout( socketTimeout ) {
+ _setSocketTimeout();
+ }
+
+ /** throws UserException if can't connect */
+ explicit ScopedDbConnection(const Shard& shard, double socketTimeout = 0 );
+ explicit ScopedDbConnection(const Shard* shard, double socketTimeout = 0 );
+
+ ~ScopedDbConnection();
+
+ /** get the associated connection object */
+ DBClientBase* operator->() {
+ uassert( 11004 , "connection was returned to the pool already" , _conn );
+ return _conn;
+ }
+
+ /** get the associated connection object */
+ DBClientBase& conn() {
+ uassert( 11005 , "connection was returned to the pool already" , _conn );
+ return *_conn;
+ }
+
+ /** get the associated connection object */
+ DBClientBase* get() {
+ uassert( 13102 , "connection was returned to the pool already" , _conn );
+ return _conn;
+ }
+
+ bool ok() const { return _conn > 0; }
+
+ string getHost() const { return _host; }
+
+ /** Force closure of the connection. You should call this if you leave it in
+ a bad state. Destructor will do this too, but it is verbose.
+ */
+ void kill() {
+ delete _conn;
+ _conn = 0;
+ }
+
+ /** Call this when you are done with the connection.
+
+ If you do not call done() before this object goes out of scope,
+ we can't be sure we fully read all expected data of a reply on the socket. so
+ we don't try to reuse the connection in that situation.
+ */
+ void done() {
+ if ( ! _conn )
+ return;
+
+ /* we could do this, but instead of assume one is using autoreconnect mode on the connection
+ if ( _conn->isFailed() )
+ kill();
+ else
+ */
+ pool.release(_host, _conn);
+ _conn = 0;
+ }
+
+ ScopedDbConnection * steal();
+
+ private:
+
+ void _setSocketTimeout();
+
+ const string _host;
+ DBClientBase *_conn;
+ const double _socketTimeout;
+
+ };
+
+} // namespace mongo
+
+#include "undef_macros.h"
diff --git a/src/mongo/client/constants.h b/src/mongo/client/constants.h
new file mode 100644
index 00000000000..54f3fd216f2
--- /dev/null
+++ b/src/mongo/client/constants.h
@@ -0,0 +1,26 @@
+// constants.h
+
+#pragma once
+
+namespace mongo {
+
+ /* query results include a 32 result flag word consisting of these bits */
+ enum ResultFlagType {
+ /* returned, with zero results, when getMore is called but the cursor id
+ is not valid at the server. */
+ ResultFlag_CursorNotFound = 1,
+
+ /* { $err : ... } is being returned */
+ ResultFlag_ErrSet = 2,
+
+ /* Have to update config from the server, usually $err is also set */
+ ResultFlag_ShardConfigStale = 4,
+
+ /* for backward compatability: this let's us know the server supports
+ the QueryOption_AwaitData option. if it doesn't, a repl slave client should sleep
+ a little between getMore's.
+ */
+ ResultFlag_AwaitCapable = 8
+ };
+
+}
diff --git a/src/mongo/client/dbclient.cpp b/src/mongo/client/dbclient.cpp
new file mode 100644
index 00000000000..b38a85d4253
--- /dev/null
+++ b/src/mongo/client/dbclient.cpp
@@ -0,0 +1,1087 @@
+// dbclient.cpp - connect to a Mongo database as a database, from C++
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "dbclient.h"
+#include "../bson/util/builder.h"
+#include "../db/jsobj.h"
+#include "../db/json.h"
+#include "../db/instance.h"
+#include "../util/md5.hpp"
+#include "../db/dbmessage.h"
+#include "../db/cmdline.h"
+#include "connpool.h"
+#include "../s/util.h"
+#include "syncclusterconnection.h"
+
+namespace mongo {
+
+ void ConnectionString::_fillServers( string s ) {
+
+ {
+ string::size_type idx = s.find( '/' );
+ if ( idx != string::npos ) {
+ _setName = s.substr( 0 , idx );
+ s = s.substr( idx + 1 );
+ _type = SET;
+ }
+ }
+
+ string::size_type idx;
+ while ( ( idx = s.find( ',' ) ) != string::npos ) {
+ _servers.push_back( s.substr( 0 , idx ) );
+ s = s.substr( idx + 1 );
+ }
+ _servers.push_back( s );
+
+ }
+
+ void ConnectionString::_finishInit() {
+ stringstream ss;
+ if ( _type == SET )
+ ss << _setName << "/";
+ for ( unsigned i=0; i<_servers.size(); i++ ) {
+ if ( i > 0 )
+ ss << ",";
+ ss << _servers[i].toString();
+ }
+ _string = ss.str();
+ }
+
+
+ DBClientBase* ConnectionString::connect( string& errmsg, double socketTimeout ) const {
+ switch ( _type ) {
+ case MASTER: {
+ DBClientConnection * c = new DBClientConnection(true);
+ c->setSoTimeout( socketTimeout );
+ log(1) << "creating new connection to:" << _servers[0] << endl;
+ if ( ! c->connect( _servers[0] , errmsg ) ) {
+ delete c;
+ return 0;
+ }
+ log(1) << "connected connection!" << endl;
+ return c;
+ }
+
+ case PAIR:
+ case SET: {
+ DBClientReplicaSet * set = new DBClientReplicaSet( _setName , _servers , socketTimeout );
+ if( ! set->connect() ) {
+ delete set;
+ errmsg = "connect failed to set ";
+ errmsg += toString();
+ return 0;
+ }
+ return set;
+ }
+
+ case SYNC: {
+ // TODO , don't copy
+ list<HostAndPort> l;
+ for ( unsigned i=0; i<_servers.size(); i++ )
+ l.push_back( _servers[i] );
+ SyncClusterConnection* c = new SyncClusterConnection( l, socketTimeout );
+ return c;
+ }
+
+ case INVALID:
+ throw UserException( 13421 , "trying to connect to invalid ConnectionString" );
+ break;
+ }
+
+ assert( 0 );
+ return 0;
+ }
+
+ ConnectionString ConnectionString::parse( const string& host , string& errmsg ) {
+
+ string::size_type i = host.find( '/' );
+ if ( i != string::npos && i != 0) {
+ // replica set
+ return ConnectionString( SET , host.substr( i + 1 ) , host.substr( 0 , i ) );
+ }
+
+ int numCommas = str::count( host , ',' );
+
+ if( numCommas == 0 )
+ return ConnectionString( HostAndPort( host ) );
+
+ if ( numCommas == 1 )
+ return ConnectionString( PAIR , host );
+
+ if ( numCommas == 2 )
+ return ConnectionString( SYNC , host );
+
+ errmsg = (string)"invalid hostname [" + host + "]";
+ return ConnectionString(); // INVALID
+ }
+
+ string ConnectionString::typeToString( ConnectionType type ) {
+ switch ( type ) {
+ case INVALID:
+ return "invalid";
+ case MASTER:
+ return "master";
+ case PAIR:
+ return "pair";
+ case SET:
+ return "set";
+ case SYNC:
+ return "sync";
+ }
+ assert(0);
+ return "";
+ }
+
+
+ Query& Query::where(const string &jscode, BSONObj scope) {
+ /* use where() before sort() and hint() and explain(), else this will assert. */
+ assert( ! isComplex() );
+ BSONObjBuilder b;
+ b.appendElements(obj);
+ b.appendWhere(jscode, scope);
+ obj = b.obj();
+ return *this;
+ }
+
+ void Query::makeComplex() {
+ if ( isComplex() )
+ return;
+ BSONObjBuilder b;
+ b.append( "query", obj );
+ obj = b.obj();
+ }
+
+ Query& Query::sort(const BSONObj& s) {
+ appendComplex( "orderby", s );
+ return *this;
+ }
+
+ Query& Query::hint(BSONObj keyPattern) {
+ appendComplex( "$hint", keyPattern );
+ return *this;
+ }
+
+ Query& Query::explain() {
+ appendComplex( "$explain", true );
+ return *this;
+ }
+
+ Query& Query::snapshot() {
+ appendComplex( "$snapshot", true );
+ return *this;
+ }
+
+ Query& Query::minKey( const BSONObj &val ) {
+ appendComplex( "$min", val );
+ return *this;
+ }
+
+ Query& Query::maxKey( const BSONObj &val ) {
+ appendComplex( "$max", val );
+ return *this;
+ }
+
+ bool Query::isComplex( bool * hasDollar ) const {
+ if ( obj.hasElement( "query" ) ) {
+ if ( hasDollar )
+ hasDollar[0] = false;
+ return true;
+ }
+
+ if ( obj.hasElement( "$query" ) ) {
+ if ( hasDollar )
+ hasDollar[0] = true;
+ return true;
+ }
+
+ return false;
+ }
+
+ BSONObj Query::getFilter() const {
+ bool hasDollar;
+ if ( ! isComplex( &hasDollar ) )
+ return obj;
+
+ return obj.getObjectField( hasDollar ? "$query" : "query" );
+ }
+ BSONObj Query::getSort() const {
+ if ( ! isComplex() )
+ return BSONObj();
+ BSONObj ret = obj.getObjectField( "orderby" );
+ if (ret.isEmpty())
+ ret = obj.getObjectField( "$orderby" );
+ return ret;
+ }
+ BSONObj Query::getHint() const {
+ if ( ! isComplex() )
+ return BSONObj();
+ return obj.getObjectField( "$hint" );
+ }
+ bool Query::isExplain() const {
+ return isComplex() && obj.getBoolField( "$explain" );
+ }
+
+ string Query::toString() const {
+ return obj.toString();
+ }
+
+ /* --- dbclientcommands --- */
+
+ bool DBClientWithCommands::isOk(const BSONObj& o) {
+ return o["ok"].trueValue();
+ }
+
+ bool DBClientWithCommands::isNotMasterErrorString( const BSONElement& e ) {
+ return e.type() == String && str::contains( e.valuestr() , "not master" );
+ }
+
+
+ enum QueryOptions DBClientWithCommands::availableOptions() {
+ if ( !_haveCachedAvailableOptions ) {
+ BSONObj ret;
+ if ( runCommand( "admin", BSON( "availablequeryoptions" << 1 ), ret ) ) {
+ _cachedAvailableOptions = ( enum QueryOptions )( ret.getIntField( "options" ) );
+ }
+ _haveCachedAvailableOptions = true;
+ }
+ return _cachedAvailableOptions;
+ }
+
+ inline bool DBClientWithCommands::runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options) {
+ string ns = dbname + ".$cmd";
+ info = findOne(ns, cmd, 0 , options);
+ return isOk(info);
+ }
+
+ /* note - we build a bson obj here -- for something that is super common like getlasterror you
+ should have that object prebuilt as that would be faster.
+ */
+ bool DBClientWithCommands::simpleCommand(const string &dbname, BSONObj *info, const string &command) {
+ BSONObj o;
+ if ( info == 0 )
+ info = &o;
+ BSONObjBuilder b;
+ b.append(command, 1);
+ return runCommand(dbname, b.done(), *info);
+ }
+
+ unsigned long long DBClientWithCommands::count(const string &myns, const BSONObj& query, int options, int limit, int skip ) {
+ NamespaceString ns(myns);
+ BSONObj cmd = _countCmd( myns , query , options , limit , skip );
+ BSONObj res;
+ if( !runCommand(ns.db.c_str(), cmd, res, options) )
+ uasserted(11010,string("count fails:") + res.toString());
+ return res["n"].numberLong();
+ }
+
+ BSONObj DBClientWithCommands::_countCmd(const string &myns, const BSONObj& query, int options, int limit, int skip ) {
+ NamespaceString ns(myns);
+ BSONObjBuilder b;
+ b.append( "count" , ns.coll );
+ b.append( "query" , query );
+ if ( limit )
+ b.append( "limit" , limit );
+ if ( skip )
+ b.append( "skip" , skip );
+ return b.obj();
+ }
+
+ BSONObj DBClientWithCommands::getLastErrorDetailed(bool fsync, bool j, int w, int wtimeout) {
+ BSONObj info;
+ BSONObjBuilder b;
+ b.append( "getlasterror", 1 );
+
+ if ( fsync )
+ b.append( "fsync", 1 );
+ if ( j )
+ b.append( "j", 1 );
+
+ // only affects request when greater than one node
+ if ( w >= 1 )
+ b.append( "w", w );
+ else if ( w == -1 )
+ b.append( "w", "majority" );
+
+ if ( wtimeout > 0 )
+ b.append( "wtimeout", wtimeout );
+
+ runCommand("admin", b.obj(), info);
+
+ return info;
+ }
+
+ string DBClientWithCommands::getLastError(bool fsync, bool j, int w, int wtimeout) {
+ BSONObj info = getLastErrorDetailed(fsync, j, w, wtimeout);
+ return getLastErrorString( info );
+ }
+
+ string DBClientWithCommands::getLastErrorString( const BSONObj& info ) {
+ BSONElement e = info["err"];
+ if( e.eoo() ) return "";
+ if( e.type() == Object ) return e.toString();
+ return e.str();
+ }
+
+ const BSONObj getpreverrorcmdobj = fromjson("{getpreverror:1}");
+
+ BSONObj DBClientWithCommands::getPrevError() {
+ BSONObj info;
+ runCommand("admin", getpreverrorcmdobj, info);
+ return info;
+ }
+
+ BSONObj getnoncecmdobj = fromjson("{getnonce:1}");
+
+ string DBClientWithCommands::createPasswordDigest( const string & username , const string & clearTextPassword ) {
+ md5digest d;
+ {
+ md5_state_t st;
+ md5_init(&st);
+ md5_append(&st, (const md5_byte_t *) username.data(), username.length());
+ md5_append(&st, (const md5_byte_t *) ":mongo:", 7 );
+ md5_append(&st, (const md5_byte_t *) clearTextPassword.data(), clearTextPassword.length());
+ md5_finish(&st, d);
+ }
+ return digestToString( d );
+ }
+
+ bool DBClientWithCommands::auth(const string &dbname, const string &username, const string &password_text, string& errmsg, bool digestPassword) {
+ string password = password_text;
+ if( digestPassword )
+ password = createPasswordDigest( username , password_text );
+
+ BSONObj info;
+ string nonce;
+ if( !runCommand(dbname, getnoncecmdobj, info) ) {
+ errmsg = "getnonce fails - connection problem?";
+ return false;
+ }
+ {
+ BSONElement e = info.getField("nonce");
+ assert( e.type() == String );
+ nonce = e.valuestr();
+ }
+
+ BSONObj authCmd;
+ BSONObjBuilder b;
+ {
+
+ b << "authenticate" << 1 << "nonce" << nonce << "user" << username;
+ md5digest d;
+ {
+ md5_state_t st;
+ md5_init(&st);
+ md5_append(&st, (const md5_byte_t *) nonce.c_str(), nonce.size() );
+ md5_append(&st, (const md5_byte_t *) username.data(), username.length());
+ md5_append(&st, (const md5_byte_t *) password.c_str(), password.size() );
+ md5_finish(&st, d);
+ }
+ b << "key" << digestToString( d );
+ authCmd = b.done();
+ }
+
+ if( runCommand(dbname, authCmd, info) )
+ return true;
+
+ errmsg = info.toString();
+ return false;
+ }
+
+ BSONObj ismastercmdobj = fromjson("{\"ismaster\":1}");
+
+ bool DBClientWithCommands::isMaster(bool& isMaster, BSONObj *info) {
+ BSONObj o;
+ if ( info == 0 )
+ info = &o;
+ bool ok = runCommand("admin", ismastercmdobj, *info);
+ isMaster = info->getField("ismaster").trueValue();
+ return ok;
+ }
+
+ bool DBClientWithCommands::createCollection(const string &ns, long long size, bool capped, int max, BSONObj *info) {
+ assert(!capped||size);
+ BSONObj o;
+ if ( info == 0 ) info = &o;
+ BSONObjBuilder b;
+ string db = nsToDatabase(ns.c_str());
+ b.append("create", ns.c_str() + db.length() + 1);
+ if ( size ) b.append("size", size);
+ if ( capped ) b.append("capped", true);
+ if ( max ) b.append("max", max);
+ return runCommand(db.c_str(), b.done(), *info);
+ }
+
+ bool DBClientWithCommands::copyDatabase(const string &fromdb, const string &todb, const string &fromhost, BSONObj *info) {
+ BSONObj o;
+ if ( info == 0 ) info = &o;
+ BSONObjBuilder b;
+ b.append("copydb", 1);
+ b.append("fromhost", fromhost);
+ b.append("fromdb", fromdb);
+ b.append("todb", todb);
+ return runCommand("admin", b.done(), *info);
+ }
+
+ bool DBClientWithCommands::setDbProfilingLevel(const string &dbname, ProfilingLevel level, BSONObj *info ) {
+ BSONObj o;
+ if ( info == 0 ) info = &o;
+
+ if ( level ) {
+ // Create system.profile collection. If it already exists this does nothing.
+ // TODO: move this into the db instead of here so that all
+ // drivers don't have to do this.
+ string ns = dbname + ".system.profile";
+ createCollection(ns.c_str(), 1024 * 1024, true, 0, info);
+ }
+
+ BSONObjBuilder b;
+ b.append("profile", (int) level);
+ return runCommand(dbname, b.done(), *info);
+ }
+
+ BSONObj getprofilingcmdobj = fromjson("{\"profile\":-1}");
+
+ bool DBClientWithCommands::getDbProfilingLevel(const string &dbname, ProfilingLevel& level, BSONObj *info) {
+ BSONObj o;
+ if ( info == 0 ) info = &o;
+ if ( runCommand(dbname, getprofilingcmdobj, *info) ) {
+ level = (ProfilingLevel) info->getIntField("was");
+ return true;
+ }
+ return false;
+ }
+
+ DBClientWithCommands::MROutput DBClientWithCommands::MRInline (BSON("inline" << 1));
+
+ BSONObj DBClientWithCommands::mapreduce(const string &ns, const string &jsmapf, const string &jsreducef, BSONObj query, MROutput output) {
+ BSONObjBuilder b;
+ b.append("mapreduce", nsGetCollection(ns));
+ b.appendCode("map", jsmapf);
+ b.appendCode("reduce", jsreducef);
+ if( !query.isEmpty() )
+ b.append("query", query);
+ b.append("out", output.out);
+ BSONObj info;
+ runCommand(nsGetDB(ns), b.done(), info);
+ return info;
+ }
+
+ bool DBClientWithCommands::eval(const string &dbname, const string &jscode, BSONObj& info, BSONElement& retValue, BSONObj *args) {
+ BSONObjBuilder b;
+ b.appendCode("$eval", jscode);
+ if ( args )
+ b.appendArray("args", *args);
+ bool ok = runCommand(dbname, b.done(), info);
+ if ( ok )
+ retValue = info.getField("retval");
+ return ok;
+ }
+
+ bool DBClientWithCommands::eval(const string &dbname, const string &jscode) {
+ BSONObj info;
+ BSONElement retValue;
+ return eval(dbname, jscode, info, retValue);
+ }
+
+ list<string> DBClientWithCommands::getDatabaseNames() {
+ BSONObj info;
+ uassert( 10005 , "listdatabases failed" , runCommand( "admin" , BSON( "listDatabases" << 1 ) , info ) );
+ uassert( 10006 , "listDatabases.databases not array" , info["databases"].type() == Array );
+
+ list<string> names;
+
+ BSONObjIterator i( info["databases"].embeddedObjectUserCheck() );
+ while ( i.more() ) {
+ names.push_back( i.next().embeddedObjectUserCheck()["name"].valuestr() );
+ }
+
+ return names;
+ }
+
+ list<string> DBClientWithCommands::getCollectionNames( const string& db ) {
+ list<string> names;
+
+ string ns = db + ".system.namespaces";
+ auto_ptr<DBClientCursor> c = query( ns.c_str() , BSONObj() );
+ while ( c->more() ) {
+ string name = c->next()["name"].valuestr();
+ if ( name.find( "$" ) != string::npos )
+ continue;
+ names.push_back( name );
+ }
+ return names;
+ }
+
+ bool DBClientWithCommands::exists( const string& ns ) {
+ list<string> names;
+
+ string db = nsGetDB( ns ) + ".system.namespaces";
+ BSONObj q = BSON( "name" << ns );
+ return count( db.c_str() , q, QueryOption_SlaveOk ) != 0;
+ }
+
+ /* --- dbclientconnection --- */
+
+ bool DBClientConnection::auth(const string &dbname, const string &username, const string &password_text, string& errmsg, bool digestPassword) {
+ string password = password_text;
+ if( digestPassword )
+ password = createPasswordDigest( username , password_text );
+
+ if( autoReconnect ) {
+ /* note we remember the auth info before we attempt to auth -- if the connection is broken, we will
+ then have it for the next autoreconnect attempt.
+ */
+ pair<string,string> p = pair<string,string>(username, password);
+ authCache[dbname] = p;
+ }
+
+ return DBClientBase::auth(dbname, username, password.c_str(), errmsg, false);
+ }
+
+ /** query N objects from the database into an array. makes sense mostly when you want a small number of results. if a huge number, use
+ query() and iterate the cursor.
+ */
+ void DBClientInterface::findN(vector<BSONObj>& out, const string& ns, Query query, int nToReturn, int nToSkip, const BSONObj *fieldsToReturn, int queryOptions) {
+ out.reserve(nToReturn);
+
+ auto_ptr<DBClientCursor> c =
+ this->query(ns, query, nToReturn, nToSkip, fieldsToReturn, queryOptions);
+
+ uassert( 10276 , str::stream() << "DBClientBase::findN: transport error: " << getServerAddress() << " ns: " << ns << " query: " << query.toString(), c.get() );
+
+ if ( c->hasResultFlag( ResultFlag_ShardConfigStale ) )
+ throw RecvStaleConfigException( ns , "findN stale config" );
+
+ for( int i = 0; i < nToReturn; i++ ) {
+ if ( !c->more() )
+ break;
+ out.push_back( c->nextSafe().copy() );
+ }
+ }
+
+ BSONObj DBClientInterface::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) {
+ vector<BSONObj> v;
+ findN(v, ns, query, 1, 0, fieldsToReturn, queryOptions);
+ return v.empty() ? BSONObj() : v[0];
+ }
+
+ bool DBClientConnection::connect(const HostAndPort& server, string& errmsg) {
+ _server = server;
+ _serverString = _server.toString();
+ return _connect( errmsg );
+ }
+
+ bool DBClientConnection::_connect( string& errmsg ) {
+ _serverString = _server.toString();
+
+ // we keep around SockAddr for connection life -- maybe MessagingPort
+ // requires that?
+ server.reset(new SockAddr(_server.host().c_str(), _server.port()));
+ p.reset(new MessagingPort( _so_timeout, _logLevel ));
+
+ if (_server.host().empty() || server->getAddr() == "0.0.0.0") {
+ stringstream s;
+ errmsg =
+ str::stream() << "couldn't connect to server " << _server.toString();
+ return false;
+ }
+
+ // if( _so_timeout == 0 ){
+ // printStackTrace();
+ // log() << "Connecting to server " << _serverString << " timeout " << _so_timeout << endl;
+ // }
+ if ( !p->connect(*server) ) {
+ errmsg = str::stream() << "couldn't connect to server " << _server.toString();
+ _failed = true;
+ return false;
+ }
+
+#ifdef MONGO_SSL
+ if ( cmdLine.sslOnNormalPorts ) {
+ p->secure( sslManager() );
+ }
+#endif
+
+ return true;
+ }
+
+
+ inline bool DBClientConnection::runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options) {
+ if ( DBClientWithCommands::runCommand( dbname , cmd , info , options ) )
+ return true;
+
+ if ( clientSet && isNotMasterErrorString( info["errmsg"] ) ) {
+ clientSet->isntMaster();
+ }
+
+ return false;
+ }
+
+
+ void DBClientConnection::_checkConnection() {
+ if ( !_failed )
+ return;
+ if ( lastReconnectTry && time(0)-lastReconnectTry < 2 ) {
+ // we wait a little before reconnect attempt to avoid constant hammering.
+ // but we throw we don't want to try to use a connection in a bad state
+ throw SocketException( SocketException::FAILED_STATE , toString() );
+ }
+ if ( !autoReconnect )
+ throw SocketException( SocketException::FAILED_STATE , toString() );
+
+ lastReconnectTry = time(0);
+ log(_logLevel) << "trying reconnect to " << _serverString << endl;
+ string errmsg;
+ _failed = false;
+ if ( ! _connect(errmsg) ) {
+ _failed = true;
+ log(_logLevel) << "reconnect " << _serverString << " failed " << errmsg << endl;
+ throw SocketException( SocketException::CONNECT_ERROR , toString() );
+ }
+
+ log(_logLevel) << "reconnect " << _serverString << " ok" << endl;
+ for( map< string, pair<string,string> >::iterator i = authCache.begin(); i != authCache.end(); i++ ) {
+ const char *dbname = i->first.c_str();
+ const char *username = i->second.first.c_str();
+ const char *password = i->second.second.c_str();
+ if( !DBClientBase::auth(dbname, username, password, errmsg, false) )
+ log(_logLevel) << "reconnect: auth failed db:" << dbname << " user:" << username << ' ' << errmsg << '\n';
+ }
+ }
+
+ auto_ptr<DBClientCursor> DBClientBase::query(const string &ns, Query query, int nToReturn,
+ int nToSkip, const BSONObj *fieldsToReturn, int queryOptions , int batchSize ) {
+ auto_ptr<DBClientCursor> c( new DBClientCursor( this,
+ ns, query.obj, nToReturn, nToSkip,
+ fieldsToReturn, queryOptions , batchSize ) );
+ if ( c->init() )
+ return c;
+ return auto_ptr< DBClientCursor >( 0 );
+ }
+
+ auto_ptr<DBClientCursor> DBClientBase::getMore( const string &ns, long long cursorId, int nToReturn, int options ) {
+ auto_ptr<DBClientCursor> c( new DBClientCursor( this, ns, cursorId, nToReturn, options ) );
+ if ( c->init() )
+ return c;
+ return auto_ptr< DBClientCursor >( 0 );
+ }
+
+ struct DBClientFunConvertor {
+ void operator()( DBClientCursorBatchIterator &i ) {
+ while( i.moreInCurrentBatch() ) {
+ _f( i.nextSafe() );
+ }
+ }
+ boost::function<void(const BSONObj &)> _f;
+ };
+
+ unsigned long long DBClientConnection::query( boost::function<void(const BSONObj&)> f, const string& ns, Query query, const BSONObj *fieldsToReturn, int queryOptions ) {
+ DBClientFunConvertor fun;
+ fun._f = f;
+ boost::function<void(DBClientCursorBatchIterator &)> ptr( fun );
+ return DBClientConnection::query( ptr, ns, query, fieldsToReturn, queryOptions );
+ }
+
+ unsigned long long DBClientConnection::query( boost::function<void(DBClientCursorBatchIterator &)> f, const string& ns, Query query, const BSONObj *fieldsToReturn, int queryOptions ) {
+ // mask options
+ queryOptions &= (int)( QueryOption_NoCursorTimeout | QueryOption_SlaveOk );
+ unsigned long long n = 0;
+
+ bool doExhaust = ( availableOptions() & QueryOption_Exhaust );
+ if ( doExhaust ) {
+ queryOptions |= (int)QueryOption_Exhaust;
+ }
+ auto_ptr<DBClientCursor> c( this->query(ns, query, 0, 0, fieldsToReturn, queryOptions) );
+ uassert( 13386, "socket error for mapping query", c.get() );
+
+ if ( !doExhaust ) {
+ while( c->more() ) {
+ DBClientCursorBatchIterator i( *c );
+ f( i );
+ n += i.n();
+ }
+ return n;
+ }
+
+ try {
+ while( 1 ) {
+ while( c->moreInCurrentBatch() ) {
+ DBClientCursorBatchIterator i( *c );
+ f( i );
+ n += i.n();
+ }
+
+ if( c->getCursorId() == 0 )
+ break;
+
+ c->exhaustReceiveMore();
+ }
+ }
+ catch(std::exception&) {
+ /* connection CANNOT be used anymore as more data may be on the way from the server.
+ we have to reconnect.
+ */
+ _failed = true;
+ p->shutdown();
+ throw;
+ }
+
+ return n;
+ }
+
+ void DBClientBase::insert( const string & ns , BSONObj obj , int flags) {
+ Message toSend;
+
+ BufBuilder b;
+ b.appendNum( flags );
+ b.appendStr( ns );
+ obj.appendSelfToBufBuilder( b );
+
+ toSend.setData( dbInsert , b.buf() , b.len() );
+
+ say( toSend );
+ }
+
+ void DBClientBase::insert( const string & ns , const vector< BSONObj > &v , int flags) {
+ Message toSend;
+
+ BufBuilder b;
+ b.appendNum( flags );
+ b.appendStr( ns );
+ for( vector< BSONObj >::const_iterator i = v.begin(); i != v.end(); ++i )
+ i->appendSelfToBufBuilder( b );
+
+ toSend.setData( dbInsert, b.buf(), b.len() );
+
+ say( toSend );
+ }
+
+ void DBClientBase::remove( const string & ns , Query obj , bool justOne ) {
+ Message toSend;
+
+ BufBuilder b;
+ int opts = 0;
+ b.appendNum( opts );
+ b.appendStr( ns );
+
+ int flags = 0;
+ if ( justOne )
+ flags |= RemoveOption_JustOne;
+ b.appendNum( flags );
+
+ obj.obj.appendSelfToBufBuilder( b );
+
+ toSend.setData( dbDelete , b.buf() , b.len() );
+
+ say( toSend );
+ }
+
+ void DBClientBase::update( const string & ns , Query query , BSONObj obj , bool upsert , bool multi ) {
+
+ BufBuilder b;
+ b.appendNum( (int)0 ); // reserved
+ b.appendStr( ns );
+
+ int flags = 0;
+ if ( upsert ) flags |= UpdateOption_Upsert;
+ if ( multi ) flags |= UpdateOption_Multi;
+ b.appendNum( flags );
+
+ query.obj.appendSelfToBufBuilder( b );
+ obj.appendSelfToBufBuilder( b );
+
+ Message toSend;
+ toSend.setData( dbUpdate , b.buf() , b.len() );
+
+ say( toSend );
+
+
+ }
+
+
+
+ auto_ptr<DBClientCursor> DBClientWithCommands::getIndexes( const string &ns ) {
+ return query( Namespace( ns.c_str() ).getSisterNS( "system.indexes" ).c_str() , BSON( "ns" << ns ) );
+ }
+
+ void DBClientWithCommands::dropIndex( const string& ns , BSONObj keys ) {
+ dropIndex( ns , genIndexName( keys ) );
+ }
+
+
+ void DBClientWithCommands::dropIndex( const string& ns , const string& indexName ) {
+ BSONObj info;
+ if ( ! runCommand( nsToDatabase( ns.c_str() ) ,
+ BSON( "deleteIndexes" << NamespaceString( ns ).coll << "index" << indexName ) ,
+ info ) ) {
+ log(_logLevel) << "dropIndex failed: " << info << endl;
+ uassert( 10007 , "dropIndex failed" , 0 );
+ }
+ resetIndexCache();
+ }
+
+ void DBClientWithCommands::dropIndexes( const string& ns ) {
+ BSONObj info;
+ uassert( 10008 , "dropIndexes failed" , runCommand( nsToDatabase( ns.c_str() ) ,
+ BSON( "deleteIndexes" << NamespaceString( ns ).coll << "index" << "*") ,
+ info ) );
+ resetIndexCache();
+ }
+
+ void DBClientWithCommands::reIndex( const string& ns ) {
+ list<BSONObj> all;
+ auto_ptr<DBClientCursor> i = getIndexes( ns );
+ while ( i->more() ) {
+ all.push_back( i->next().getOwned() );
+ }
+
+ dropIndexes( ns );
+
+ for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ) {
+ BSONObj o = *i;
+ insert( Namespace( ns.c_str() ).getSisterNS( "system.indexes" ).c_str() , o );
+ }
+
+ }
+
+
+ string DBClientWithCommands::genIndexName( const BSONObj& keys ) {
+ stringstream ss;
+
+ bool first = 1;
+ for ( BSONObjIterator i(keys); i.more(); ) {
+ BSONElement f = i.next();
+
+ if ( first )
+ first = 0;
+ else
+ ss << "_";
+
+ ss << f.fieldName() << "_";
+ if( f.isNumber() )
+ ss << f.numberInt();
+ }
+ return ss.str();
+ }
+
+ bool DBClientWithCommands::ensureIndex( const string &ns , BSONObj keys , bool unique, const string & name , bool cache, bool background, int version ) {
+ BSONObjBuilder toSave;
+ toSave.append( "ns" , ns );
+ toSave.append( "key" , keys );
+
+ string cacheKey(ns);
+ cacheKey += "--";
+
+ if ( name != "" ) {
+ toSave.append( "name" , name );
+ cacheKey += name;
+ }
+ else {
+ string nn = genIndexName( keys );
+ toSave.append( "name" , nn );
+ cacheKey += nn;
+ }
+
+ if( version >= 0 )
+ toSave.append("v", version);
+
+ if ( unique )
+ toSave.appendBool( "unique", unique );
+
+ if( background )
+ toSave.appendBool( "background", true );
+
+ if ( _seenIndexes.count( cacheKey ) )
+ return 0;
+
+ if ( cache )
+ _seenIndexes.insert( cacheKey );
+
+ insert( Namespace( ns.c_str() ).getSisterNS( "system.indexes" ).c_str() , toSave.obj() );
+ return 1;
+ }
+
+ void DBClientWithCommands::resetIndexCache() {
+ _seenIndexes.clear();
+ }
+
+ /* -- DBClientCursor ---------------------------------------------- */
+
+#ifdef _DEBUG
+#define CHECK_OBJECT( o , msg ) massert( 10337 , (string)"object not valid" + (msg) , (o).isValid() )
+#else
+#define CHECK_OBJECT( o , msg )
+#endif
+
+ void assembleRequest( const string &ns, BSONObj query, int nToReturn, int nToSkip, const BSONObj *fieldsToReturn, int queryOptions, Message &toSend ) {
+ CHECK_OBJECT( query , "assembleRequest query" );
+ // see query.h for the protocol we are using here.
+ BufBuilder b;
+ int opts = queryOptions;
+ b.appendNum(opts);
+ b.appendStr(ns);
+ b.appendNum(nToSkip);
+ b.appendNum(nToReturn);
+ query.appendSelfToBufBuilder(b);
+ if ( fieldsToReturn )
+ fieldsToReturn->appendSelfToBufBuilder(b);
+ toSend.setData(dbQuery, b.buf(), b.len());
+ }
+
+ void DBClientConnection::say( Message &toSend, bool isRetry ) {
+ checkConnection();
+ try {
+ port().say( toSend );
+ }
+ catch( SocketException & ) {
+ _failed = true;
+ throw;
+ }
+ }
+
+ void DBClientConnection::sayPiggyBack( Message &toSend ) {
+ port().piggyBack( toSend );
+ }
+
+ bool DBClientConnection::recv( Message &m ) {
+ return port().recv(m);
+ }
+
+ bool DBClientConnection::call( Message &toSend, Message &response, bool assertOk , string * actualServer ) {
+ /* todo: this is very ugly messagingport::call returns an error code AND can throw
+ an exception. we should make it return void and just throw an exception anytime
+ it fails
+ */
+ try {
+ if ( !port().call(toSend, response) ) {
+ _failed = true;
+ if ( assertOk )
+ uasserted( 10278 , str::stream() << "dbclient error communicating with server: " << getServerAddress() );
+
+ return false;
+ }
+ }
+ catch( SocketException & ) {
+ _failed = true;
+ throw;
+ }
+ return true;
+ }
+
+ BSONElement getErrField(const BSONObj& o) {
+ BSONElement first = o.firstElement();
+ if( strcmp(first.fieldName(), "$err") == 0 )
+ return first;
+
+ // temp - will be DEV only later
+ /*DEV*/
+ if( 1 ) {
+ BSONElement e = o["$err"];
+ if( !e.eoo() ) {
+ wassert(false);
+ }
+ return e;
+ }
+
+ return BSONElement();
+ }
+
+ bool hasErrField( const BSONObj& o ){
+ return ! getErrField( o ).eoo();
+ }
+
+ void DBClientConnection::checkResponse( const char *data, int nReturned, bool* retry, string* host ) {
+ /* check for errors. the only one we really care about at
+ * this stage is "not master"
+ */
+
+ *retry = false;
+ *host = _serverString;
+
+ if ( clientSet && nReturned ) {
+ assert(data);
+ BSONObj o(data);
+ if ( isNotMasterErrorString( getErrField(o) ) ) {
+ clientSet->isntMaster();
+ }
+ }
+ }
+
+ void DBClientConnection::killCursor( long long cursorId ) {
+ StackBufBuilder b;
+ b.appendNum( (int)0 ); // reserved
+ b.appendNum( (int)1 ); // number
+ b.appendNum( cursorId );
+
+ Message m;
+ m.setData( dbKillCursors , b.buf() , b.len() );
+
+ if ( _lazyKillCursor )
+ sayPiggyBack( m );
+ else
+ say(m);
+ }
+
+#ifdef MONGO_SSL
+ SSLManager* DBClientConnection::sslManager() {
+ if ( _sslManager )
+ return _sslManager;
+
+ SSLManager* s = new SSLManager(true);
+ _sslManager = s;
+ return s;
+ }
+
+ SSLManager* DBClientConnection::_sslManager = 0;
+#endif
+
+ AtomicUInt DBClientConnection::_numConnections;
+ bool DBClientConnection::_lazyKillCursor = true;
+
+
+ bool serverAlive( const string &uri ) {
+ DBClientConnection c( false, 0, 20 ); // potentially the connection to server could fail while we're checking if it's alive - so use timeouts
+ string err;
+ if ( !c.connect( uri, err ) )
+ return false;
+ if ( !c.simpleCommand( "admin", 0, "ping" ) )
+ return false;
+ return true;
+ }
+
+
+ /** @return the database name portion of an ns string */
+ string nsGetDB( const string &ns ) {
+ string::size_type pos = ns.find( "." );
+ if ( pos == string::npos )
+ return ns;
+
+ return ns.substr( 0 , pos );
+ }
+
+ /** @return the collection name portion of an ns string */
+ string nsGetCollection( const string &ns ) {
+ string::size_type pos = ns.find( "." );
+ if ( pos == string::npos )
+ return "";
+
+ return ns.substr( pos + 1 );
+ }
+
+
+} // namespace mongo
diff --git a/src/mongo/client/dbclient.h b/src/mongo/client/dbclient.h
new file mode 100644
index 00000000000..76c1358f752
--- /dev/null
+++ b/src/mongo/client/dbclient.h
@@ -0,0 +1,1049 @@
+/** @file dbclient.h
+
+ Core MongoDB C++ driver interfaces are defined here.
+*/
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../pch.h"
+#include "../util/net/message.h"
+#include "../util/net/message_port.h"
+#include "../db/jsobj.h"
+#include "../db/json.h"
+#include <stack>
+
+namespace mongo {
+
+ /** the query field 'options' can have these bits set: */
+ enum QueryOptions {
+ /** Tailable means cursor is not closed when the last data is retrieved. rather, the cursor marks
+ the final object's position. you can resume using the cursor later, from where it was located,
+ if more data were received. Set on dbQuery and dbGetMore.
+
+ like any "latent cursor", the cursor may become invalid at some point -- for example if that
+ final object it references were deleted. Thus, you should be prepared to requery if you get back
+ ResultFlag_CursorNotFound.
+ */
+ QueryOption_CursorTailable = 1 << 1,
+
+ /** allow query of replica slave. normally these return an error except for namespace "local".
+ */
+ QueryOption_SlaveOk = 1 << 2,
+
+ // findingStart mode is used to find the first operation of interest when
+ // we are scanning through a repl log. For efficiency in the common case,
+ // where the first operation of interest is closer to the tail than the head,
+ // we start from the tail of the log and work backwards until we find the
+ // first operation of interest. Then we scan forward from that first operation,
+ // actually returning results to the client. During the findingStart phase,
+ // we release the db mutex occasionally to avoid blocking the db process for
+ // an extended period of time.
+ QueryOption_OplogReplay = 1 << 3,
+
+ /** The server normally times out idle cursors after an inactivy period to prevent excess memory uses
+ Set this option to prevent that.
+ */
+ QueryOption_NoCursorTimeout = 1 << 4,
+
+ /** Use with QueryOption_CursorTailable. If we are at the end of the data, block for a while rather
+ than returning no data. After a timeout period, we do return as normal.
+ */
+ QueryOption_AwaitData = 1 << 5,
+
+ /** Stream the data down full blast in multiple "more" packages, on the assumption that the client
+ will fully read all data queried. Faster when you are pulling a lot of data and know you want to
+ pull it all down. Note: it is not allowed to not read all the data unless you close the connection.
+
+ Use the query( boost::function<void(const BSONObj&)> f, ... ) version of the connection's query()
+ method, and it will take care of all the details for you.
+ */
+ QueryOption_Exhaust = 1 << 6,
+
+ /** When sharded, this means its ok to return partial results
+ Usually we will fail a query if all required shards aren't up
+ If this is set, it'll be a partial result set
+ */
+ QueryOption_PartialResults = 1 << 7 ,
+
+ QueryOption_AllSupported = QueryOption_CursorTailable | QueryOption_SlaveOk | QueryOption_OplogReplay | QueryOption_NoCursorTimeout | QueryOption_AwaitData | QueryOption_Exhaust | QueryOption_PartialResults
+
+ };
+
+ enum UpdateOptions {
+ /** Upsert - that is, insert the item if no matching item is found. */
+ UpdateOption_Upsert = 1 << 0,
+
+ /** Update multiple documents (if multiple documents match query expression).
+ (Default is update a single document and stop.) */
+ UpdateOption_Multi = 1 << 1,
+
+ /** flag from mongo saying this update went everywhere */
+ UpdateOption_Broadcast = 1 << 2
+ };
+
+ enum RemoveOptions {
+ /** only delete one option */
+ RemoveOption_JustOne = 1 << 0,
+
+ /** flag from mongo saying this update went everywhere */
+ RemoveOption_Broadcast = 1 << 1
+ };
+
+
+ /**
+ * need to put in DbMesssage::ReservedOptions as well
+ */
+ enum InsertOptions {
+ /** With muli-insert keep processing inserts if one fails */
+ InsertOption_ContinueOnError = 1 << 0
+ };
+
+ class DBClientBase;
+
+ /**
+ * ConnectionString handles parsing different ways to connect to mongo and determining method
+ * samples:
+ * server
+ * server:port
+ * foo/server:port,server:port SET
+ * server,server,server SYNC
+ *
+ * tyipcal use
+ * string errmsg,
+ * ConnectionString cs = ConnectionString::parse( url , errmsg );
+ * if ( ! cs.isValid() ) throw "bad: " + errmsg;
+ * DBClientBase * conn = cs.connect( errmsg );
+ */
+ class ConnectionString {
+ public:
+ enum ConnectionType { INVALID , MASTER , PAIR , SET , SYNC };
+
+ ConnectionString() {
+ _type = INVALID;
+ }
+
+ ConnectionString( const HostAndPort& server ) {
+ _type = MASTER;
+ _servers.push_back( server );
+ _finishInit();
+ }
+
+ ConnectionString( ConnectionType type , const string& s , const string& setName = "" ) {
+ _type = type;
+ _setName = setName;
+ _fillServers( s );
+
+ switch ( _type ) {
+ case MASTER:
+ assert( _servers.size() == 1 );
+ break;
+ case SET:
+ assert( _setName.size() );
+ assert( _servers.size() >= 1 ); // 1 is ok since we can derive
+ break;
+ case PAIR:
+ assert( _servers.size() == 2 );
+ break;
+ default:
+ assert( _servers.size() > 0 );
+ }
+
+ _finishInit();
+ }
+
+ ConnectionString( const string& s , ConnectionType favoredMultipleType ) {
+ _type = INVALID;
+
+ _fillServers( s );
+ if ( _type != INVALID ) {
+ // set already
+ }
+ else if ( _servers.size() == 1 ) {
+ _type = MASTER;
+ }
+ else {
+ _type = favoredMultipleType;
+ assert( _type == SET || _type == SYNC );
+ }
+ _finishInit();
+ }
+
+ bool isValid() const { return _type != INVALID; }
+
+ string toString() const { return _string; }
+
+ DBClientBase* connect( string& errmsg, double socketTimeout = 0 ) const;
+
+ string getSetName() const { return _setName; }
+
+ vector<HostAndPort> getServers() const { return _servers; }
+
+ ConnectionType type() const { return _type; }
+
+ static ConnectionString parse( const string& url , string& errmsg );
+
+ static string typeToString( ConnectionType type );
+
+ private:
+
+ void _fillServers( string s );
+ void _finishInit();
+
+ ConnectionType _type;
+ vector<HostAndPort> _servers;
+ string _string;
+ string _setName;
+ };
+
+ /**
+ * controls how much a clients cares about writes
+ * default is NORMAL
+ */
+ enum WriteConcern {
+ W_NONE = 0 , // TODO: not every connection type fully supports this
+ W_NORMAL = 1
+ // TODO SAFE = 2
+ };
+
+ class BSONObj;
+ class ScopedDbConnection;
+ class DBClientCursor;
+ class DBClientCursorBatchIterator;
+
+ /** Represents a Mongo query expression. Typically one uses the QUERY(...) macro to construct a Query object.
+ Examples:
+ QUERY( "age" << 33 << "school" << "UCLA" ).sort("name")
+ QUERY( "age" << GT << 30 << LT << 50 )
+ */
+ class Query {
+ public:
+ BSONObj obj;
+ Query() : obj(BSONObj()) { }
+ Query(const BSONObj& b) : obj(b) { }
+ Query(const string &json) :
+ obj(fromjson(json)) { }
+ Query(const char * json) :
+ obj(fromjson(json)) { }
+
+ /** Add a sort (ORDER BY) criteria to the query expression.
+ @param sortPattern the sort order template. For example to order by name ascending, time descending:
+ { name : 1, ts : -1 }
+ i.e.
+ BSON( "name" << 1 << "ts" << -1 )
+ or
+ fromjson(" name : 1, ts : -1 ")
+ */
+ Query& sort(const BSONObj& sortPattern);
+
+ /** Add a sort (ORDER BY) criteria to the query expression.
+ This version of sort() assumes you want to sort on a single field.
+ @param asc = 1 for ascending order
+ asc = -1 for descending order
+ */
+ Query& sort(const string &field, int asc = 1) { sort( BSON( field << asc ) ); return *this; }
+
+ /** Provide a hint to the query.
+ @param keyPattern Key pattern for the index to use.
+ Example:
+ hint("{ts:1}")
+ */
+ Query& hint(BSONObj keyPattern);
+ Query& hint(const string &jsonKeyPatt) { return hint(fromjson(jsonKeyPatt)); }
+
+ /** Provide min and/or max index limits for the query.
+ min <= x < max
+ */
+ Query& minKey(const BSONObj &val);
+ /**
+ max is exclusive
+ */
+ Query& maxKey(const BSONObj &val);
+
+ /** Return explain information about execution of this query instead of the actual query results.
+ Normally it is easier to use the mongo shell to run db.find(...).explain().
+ */
+ Query& explain();
+
+ /** Use snapshot mode for the query. Snapshot mode assures no duplicates are returned, or objects missed, which were
+ present at both the start and end of the query's execution (if an object is new during the query, or deleted during
+ the query, it may or may not be returned, even with snapshot mode).
+
+ Note that short query responses (less than 1MB) are always effectively snapshotted.
+
+ Currently, snapshot mode may not be used with sorting or explicit hints.
+ */
+ Query& snapshot();
+
+ /** Queries to the Mongo database support a $where parameter option which contains
+ a javascript function that is evaluated to see whether objects being queried match
+ its criteria. Use this helper to append such a function to a query object.
+ Your query may also contain other traditional Mongo query terms.
+
+ @param jscode The javascript function to evaluate against each potential object
+ match. The function must return true for matched objects. Use the this
+ variable to inspect the current object.
+ @param scope SavedContext for the javascript object. List in a BSON object any
+ variables you would like defined when the jscode executes. One can think
+ of these as "bind variables".
+
+ Examples:
+ conn.findOne("test.coll", Query("{a:3}").where("this.b == 2 || this.c == 3"));
+ Query badBalance = Query().where("this.debits - this.credits < 0");
+ */
+ Query& where(const string &jscode, BSONObj scope);
+ Query& where(const string &jscode) { return where(jscode, BSONObj()); }
+
+ /**
+ * @return true if this query has an orderby, hint, or some other field
+ */
+ bool isComplex( bool * hasDollar = 0 ) const;
+
+ BSONObj getFilter() const;
+ BSONObj getSort() const;
+ BSONObj getHint() const;
+ bool isExplain() const;
+
+ string toString() const;
+ operator string() const { return toString(); }
+ private:
+ void makeComplex();
+ template< class T >
+ void appendComplex( const char *fieldName, const T& val ) {
+ makeComplex();
+ BSONObjBuilder b;
+ b.appendElements(obj);
+ b.append(fieldName, val);
+ obj = b.obj();
+ }
+ };
+
+ /**
+ * Represents a full query description, including all options required for the query to be passed on
+ * to other hosts
+ */
+ class QuerySpec {
+ public:
+
+ string _ns;
+ int _ntoskip;
+ int _ntoreturn;
+ int _options;
+ BSONObj _query;
+ BSONObj _fields;
+ Query _queryObj;
+
+ QuerySpec( const string& ns,
+ const BSONObj& query, const BSONObj& fields,
+ int ntoskip, int ntoreturn, int options )
+ : _ns( ns ), _ntoskip( ntoskip ), _ntoreturn( ntoreturn ), _options( options ),
+ _query( query ), _fields( fields )
+ {
+ _query = _query.getOwned();
+ _fields = _fields.getOwned();
+ _queryObj = Query( _query );
+ }
+
+ QuerySpec() {}
+
+ bool isEmpty() const {
+ return _ns.size() == 0;
+ }
+
+ bool isExplain() const {
+ return _queryObj.isExplain();
+ }
+
+ BSONObj filter() const {
+ return _queryObj.getFilter();
+ }
+
+ BSONObj hint() const {
+ return _queryObj.getHint();
+ }
+
+ BSONObj sort() const {
+ return _queryObj.getSort();
+ }
+
+ BSONObj query(){
+ return _query;
+ }
+
+ BSONObj fields() const { return _fields; }
+
+ string ns() const { return _ns; }
+
+ int ntoskip() const { return _ntoskip; }
+
+ int ntoreturn() const { return _ntoreturn; }
+
+ int options() const { return _options; }
+
+ string toString() const {
+ return str::stream() << "QSpec " << BSON( "ns" << _ns << "n2skip" << _ntoskip << "n2return" << _ntoreturn << "options" << _options
+ << "query" << _query << "fields" << _fields );
+ }
+
+ };
+
+
+ /** Typically one uses the QUERY(...) macro to construct a Query object.
+ Example: QUERY( "age" << 33 << "school" << "UCLA" )
+ */
+#define QUERY(x) mongo::Query( BSON(x) )
+
+ // Useful utilities for namespaces
+ /** @return the database name portion of an ns string */
+ string nsGetDB( const string &ns );
+
+ /** @return the collection name portion of an ns string */
+ string nsGetCollection( const string &ns );
+
+ /**
+ interface that handles communication with the db
+ */
+ class DBConnector {
+ public:
+ virtual ~DBConnector() {}
+ /** actualServer is set to the actual server where they call went if there was a choice (SlaveOk) */
+ virtual bool call( Message &toSend, Message &response, bool assertOk=true , string * actualServer = 0 ) = 0;
+ virtual void say( Message &toSend, bool isRetry = false ) = 0;
+ virtual void sayPiggyBack( Message &toSend ) = 0;
+ /* used by QueryOption_Exhaust. To use that your subclass must implement this. */
+ virtual bool recv( Message& m ) { assert(false); return false; }
+ // In general, for lazy queries, we'll need to say, recv, then checkResponse
+ virtual void checkResponse( const char* data, int nReturned, bool* retry = NULL, string* targetHost = NULL ) {
+ if( retry ) *retry = false; if( targetHost ) *targetHost = "";
+ }
+ virtual bool lazySupported() const = 0;
+ };
+
+ /**
+ The interface that any db connection should implement
+ */
+ class DBClientInterface : boost::noncopyable {
+ public:
+ virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
+ const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 ) = 0;
+
+ virtual void insert( const string &ns, BSONObj obj , int flags=0) = 0;
+
+ virtual void insert( const string &ns, const vector< BSONObj >& v , int flags=0) = 0;
+
+ virtual void remove( const string &ns , Query query, bool justOne = 0 ) = 0;
+
+ virtual void update( const string &ns , Query query , BSONObj obj , bool upsert = 0 , bool multi = 0 ) = 0;
+
+ virtual ~DBClientInterface() { }
+
+ /**
+ @return a single object that matches the query. if none do, then the object is empty
+ @throws AssertionException
+ */
+ virtual BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+
+ /** query N objects from the database into an array. makes sense mostly when you want a small number of results. if a huge number, use
+ query() and iterate the cursor.
+ */
+ void findN(vector<BSONObj>& out, const string&ns, Query query, int nToReturn, int nToSkip = 0, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+
+ virtual string getServerAddress() const = 0;
+
+ /** don't use this - called automatically by DBClientCursor for you */
+ virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn = 0, int options = 0 ) = 0;
+ };
+
+ /**
+ DB "commands"
+ Basically just invocations of connection.$cmd.findOne({...});
+ */
+ class DBClientWithCommands : public DBClientInterface {
+ set<string> _seenIndexes;
+ public:
+ /** controls how chatty the client is about network errors & such. See log.h */
+ int _logLevel;
+
+ DBClientWithCommands() : _logLevel(0), _cachedAvailableOptions( (enum QueryOptions)0 ), _haveCachedAvailableOptions(false) { }
+
+ /** helper function. run a simple command where the command expression is simply
+ { command : 1 }
+ @param info -- where to put result object. may be null if caller doesn't need that info
+ @param command -- command name
+ @return true if the command returned "ok".
+ */
+ bool simpleCommand(const string &dbname, BSONObj *info, const string &command);
+
+ /** Run a database command. Database commands are represented as BSON objects. Common database
+ commands have prebuilt helper functions -- see below. If a helper is not available you can
+ directly call runCommand.
+
+ @param dbname database name. Use "admin" for global administrative commands.
+ @param cmd the command object to execute. For example, { ismaster : 1 }
+ @param info the result object the database returns. Typically has { ok : ..., errmsg : ... } fields
+ set.
+ @param options see enum QueryOptions - normally not needed to run a command
+ @return true if the command returned "ok".
+ */
+ virtual bool runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0);
+
+ /** Authorize access to a particular database.
+ Authentication is separate for each database on the server -- you may authenticate for any
+ number of databases on a single connection.
+ The "admin" database is special and once authenticated provides access to all databases on the
+ server.
+ @param digestPassword if password is plain text, set this to true. otherwise assumed to be pre-digested
+ @return true if successful
+ */
+ virtual bool auth(const string &dbname, const string &username, const string &pwd, string& errmsg, bool digestPassword = true);
+
+ /** count number of objects in collection ns that match the query criteria specified
+ throws UserAssertion if database returns an error
+ */
+ virtual unsigned long long count(const string &ns, const BSONObj& query = BSONObj(), int options=0, int limit=0, int skip=0 );
+
+ string createPasswordDigest( const string &username , const string &clearTextPassword );
+
+ /** returns true in isMaster parm if this db is the current master
+ of a replica pair.
+
+ pass in info for more details e.g.:
+ { "ismaster" : 1.0 , "msg" : "not paired" , "ok" : 1.0 }
+
+ returns true if command invoked successfully.
+ */
+ virtual bool isMaster(bool& isMaster, BSONObj *info=0);
+
+ /**
+ Create a new collection in the database. Normally, collection creation is automatic. You would
+ use this function if you wish to specify special options on creation.
+
+ If the collection already exists, no action occurs.
+
+ @param ns fully qualified collection name
+ @param size desired initial extent size for the collection.
+ Must be <= 1000000000 for normal collections.
+ For fixed size (capped) collections, this size is the total/max size of the
+ collection.
+ @param capped if true, this is a fixed size collection (where old data rolls out).
+ @param max maximum number of objects if capped (optional).
+
+ returns true if successful.
+ */
+ bool createCollection(const string &ns, long long size = 0, bool capped = false, int max = 0, BSONObj *info = 0);
+
+ /** Get error result from the last write operation (insert/update/delete) on this connection.
+ @return error message text, or empty string if no error.
+ */
+ string getLastError(bool fsync = false, bool j = false, int w = 0, int wtimeout = 0);
+
+ /** Get error result from the last write operation (insert/update/delete) on this connection.
+ @return full error object.
+
+ If "w" is -1, wait for propagation to majority of nodes.
+ If "wtimeout" is 0, the operation will block indefinitely if needed.
+ */
+ virtual BSONObj getLastErrorDetailed(bool fsync = false, bool j = false, int w = 0, int wtimeout = 0);
+
+ /** Can be called with the returned value from getLastErrorDetailed to extract an error string.
+ If all you need is the string, just call getLastError() instead.
+ */
+ static string getLastErrorString( const BSONObj& res );
+
+ /** Return the last error which has occurred, even if not the very last operation.
+
+ @return { err : <error message>, nPrev : <how_many_ops_back_occurred>, ok : 1 }
+
+ result.err will be null if no error has occurred.
+ */
+ BSONObj getPrevError();
+
+ /** Reset the previous error state for this connection (accessed via getLastError and
+ getPrevError). Useful when performing several operations at once and then checking
+ for an error after attempting all operations.
+ */
+ bool resetError() { return simpleCommand("admin", 0, "reseterror"); }
+
+ /** Delete the specified collection. */
+ virtual bool dropCollection( const string &ns ) {
+ string db = nsGetDB( ns );
+ string coll = nsGetCollection( ns );
+ uassert( 10011 , "no collection name", coll.size() );
+
+ BSONObj info;
+
+ bool res = runCommand( db.c_str() , BSON( "drop" << coll ) , info );
+ resetIndexCache();
+ return res;
+ }
+
+ /** Perform a repair and compaction of the specified database. May take a long time to run. Disk space
+ must be available equal to the size of the database while repairing.
+ */
+ bool repairDatabase(const string &dbname, BSONObj *info = 0) {
+ return simpleCommand(dbname, info, "repairDatabase");
+ }
+
+ /** Copy database from one server or name to another server or name.
+
+ Generally, you should dropDatabase() first as otherwise the copied information will MERGE
+ into whatever data is already present in this database.
+
+ For security reasons this function only works when you are authorized to access the "admin" db. However,
+ if you have access to said db, you can copy any database from one place to another.
+ TODO: this needs enhancement to be more flexible in terms of security.
+
+ This method provides a way to "rename" a database by copying it to a new db name and
+ location. The copy is "repaired" and compacted.
+
+ fromdb database name from which to copy.
+ todb database name to copy to.
+ fromhost hostname of the database (and optionally, ":port") from which to
+ copy the data. copies from self if "".
+
+ returns true if successful
+ */
+ bool copyDatabase(const string &fromdb, const string &todb, const string &fromhost = "", BSONObj *info = 0);
+
+ /** The Mongo database provides built-in performance profiling capabilities. Uset setDbProfilingLevel()
+ to enable. Profiling information is then written to the system.profiling collection, which one can
+ then query.
+ */
+ enum ProfilingLevel {
+ ProfileOff = 0,
+ ProfileSlow = 1, // log very slow (>100ms) operations
+ ProfileAll = 2
+
+ };
+ bool setDbProfilingLevel(const string &dbname, ProfilingLevel level, BSONObj *info = 0);
+ bool getDbProfilingLevel(const string &dbname, ProfilingLevel& level, BSONObj *info = 0);
+
+
+ /** This implicitly converts from char*, string, and BSONObj to be an argument to mapreduce
+ You shouldn't need to explicitly construct this
+ */
+ struct MROutput {
+ MROutput(const char* collection) : out(BSON("replace" << collection)) {}
+ MROutput(const string& collection) : out(BSON("replace" << collection)) {}
+ MROutput(const BSONObj& obj) : out(obj) {}
+
+ BSONObj out;
+ };
+ static MROutput MRInline;
+
+ /** Run a map/reduce job on the server.
+
+ See http://www.mongodb.org/display/DOCS/MapReduce
+
+ ns namespace (db+collection name) of input data
+ jsmapf javascript map function code
+ jsreducef javascript reduce function code.
+ query optional query filter for the input
+ output either a string collection name or an object representing output type
+ if not specified uses inline output type
+
+ returns a result object which contains:
+ { result : <collection_name>,
+ numObjects : <number_of_objects_scanned>,
+ timeMillis : <job_time>,
+ ok : <1_if_ok>,
+ [, err : <errmsg_if_error>]
+ }
+
+ For example one might call:
+ result.getField("ok").trueValue()
+ on the result to check if ok.
+ */
+ BSONObj mapreduce(const string &ns, const string &jsmapf, const string &jsreducef, BSONObj query = BSONObj(), MROutput output = MRInline);
+
+ /** Run javascript code on the database server.
+ dbname database SavedContext in which the code runs. The javascript variable 'db' will be assigned
+ to this database when the function is invoked.
+ jscode source code for a javascript function.
+ info the command object which contains any information on the invocation result including
+ the return value and other information. If an error occurs running the jscode, error
+ information will be in info. (try "out() << info.toString()")
+ retValue return value from the jscode function.
+ args args to pass to the jscode function. when invoked, the 'args' variable will be defined
+ for use by the jscode.
+
+ returns true if runs ok.
+
+ See testDbEval() in dbclient.cpp for an example of usage.
+ */
+ bool eval(const string &dbname, const string &jscode, BSONObj& info, BSONElement& retValue, BSONObj *args = 0);
+
+ /** validate a collection, checking for errors and reporting back statistics.
+ this operation is slow and blocking.
+ */
+ bool validate( const string &ns , bool scandata=true ) {
+ BSONObj cmd = BSON( "validate" << nsGetCollection( ns ) << "scandata" << scandata );
+ BSONObj info;
+ return runCommand( nsGetDB( ns ).c_str() , cmd , info );
+ }
+
+ /* The following helpers are simply more convenient forms of eval() for certain common cases */
+
+ /* invocation with no return value of interest -- with or without one simple parameter */
+ bool eval(const string &dbname, const string &jscode);
+ template< class T >
+ bool eval(const string &dbname, const string &jscode, T parm1) {
+ BSONObj info;
+ BSONElement retValue;
+ BSONObjBuilder b;
+ b.append("0", parm1);
+ BSONObj args = b.done();
+ return eval(dbname, jscode, info, retValue, &args);
+ }
+
+ /** eval invocation with one parm to server and one numeric field (either int or double) returned */
+ template< class T, class NumType >
+ bool eval(const string &dbname, const string &jscode, T parm1, NumType& ret) {
+ BSONObj info;
+ BSONElement retValue;
+ BSONObjBuilder b;
+ b.append("0", parm1);
+ BSONObj args = b.done();
+ if ( !eval(dbname, jscode, info, retValue, &args) )
+ return false;
+ ret = (NumType) retValue.number();
+ return true;
+ }
+
+ /**
+ get a list of all the current databases
+ uses the { listDatabases : 1 } command.
+ throws on error
+ */
+ list<string> getDatabaseNames();
+
+ /**
+ get a list of all the current collections in db
+ */
+ list<string> getCollectionNames( const string& db );
+
+ bool exists( const string& ns );
+
+ /** Create an index if it does not already exist.
+ ensureIndex calls are remembered so it is safe/fast to call this function many
+ times in your code.
+ @param ns collection to be indexed
+ @param keys the "key pattern" for the index. e.g., { name : 1 }
+ @param unique if true, indicates that key uniqueness should be enforced for this index
+ @param name if not specified, it will be created from the keys automatically (which is recommended)
+ @param cache if set to false, the index cache for the connection won't remember this call
+ @param background build index in the background (see mongodb docs/wiki for details)
+ @param v index version. leave at default value. (unit tests set this parameter.)
+ @return whether or not sent message to db.
+ should be true on first call, false on subsequent unless resetIndexCache was called
+ */
+ virtual bool ensureIndex( const string &ns , BSONObj keys , bool unique = false, const string &name = "",
+ bool cache = true, bool background = false, int v = -1 );
+
+ /**
+ clears the index cache, so the subsequent call to ensureIndex for any index will go to the server
+ */
+ virtual void resetIndexCache();
+
+ virtual auto_ptr<DBClientCursor> getIndexes( const string &ns );
+
+ virtual void dropIndex( const string& ns , BSONObj keys );
+ virtual void dropIndex( const string& ns , const string& indexName );
+
+ /**
+ drops all indexes for the collection
+ */
+ virtual void dropIndexes( const string& ns );
+
+ virtual void reIndex( const string& ns );
+
+ string genIndexName( const BSONObj& keys );
+
+ /** Erase / drop an entire database */
+ virtual bool dropDatabase(const string &dbname, BSONObj *info = 0) {
+ bool ret = simpleCommand(dbname, info, "dropDatabase");
+ resetIndexCache();
+ return ret;
+ }
+
+ virtual string toString() = 0;
+
+ protected:
+ /** if the result of a command is ok*/
+ bool isOk(const BSONObj&);
+
+ /** if the element contains a not master error */
+ bool isNotMasterErrorString( const BSONElement& e );
+
+ BSONObj _countCmd(const string &ns, const BSONObj& query, int options, int limit, int skip );
+
+ enum QueryOptions availableOptions();
+
+ private:
+ enum QueryOptions _cachedAvailableOptions;
+ bool _haveCachedAvailableOptions;
+ };
+
+ /**
+ abstract class that implements the core db operations
+ */
+ class DBClientBase : public DBClientWithCommands, public DBConnector {
+ protected:
+ WriteConcern _writeConcern;
+
+ public:
+ DBClientBase() {
+ _writeConcern = W_NORMAL;
+ }
+
+ WriteConcern getWriteConcern() const { return _writeConcern; }
+ void setWriteConcern( WriteConcern w ) { _writeConcern = w; }
+
+ /** send a query to the database.
+ @param ns namespace to query, format is <dbname>.<collectname>[.<collectname>]*
+ @param query query to perform on the collection. this is a BSONObj (binary JSON)
+ You may format as
+ { query: { ... }, orderby: { ... } }
+ to specify a sort order.
+ @param nToReturn n to return (i.e., limit). 0 = unlimited
+ @param nToSkip start with the nth item
+ @param fieldsToReturn optional template of which fields to select. if unspecified, returns all fields
+ @param queryOptions see options enum at top of this file
+
+ @return cursor. 0 if error (connection failure)
+ @throws AssertionException
+ */
+ virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
+ const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 );
+
+ /** don't use this - called automatically by DBClientCursor for you
+ @param cursorId id of cursor to retrieve
+ @return an handle to a previously allocated cursor
+ @throws AssertionException
+ */
+ virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn = 0, int options = 0 );
+
+ /**
+ insert an object into the database
+ */
+ virtual void insert( const string &ns , BSONObj obj , int flags=0);
+
+ /**
+ insert a vector of objects into the database
+ */
+ virtual void insert( const string &ns, const vector< BSONObj >& v , int flags=0);
+
+ /**
+ remove matching objects from the database
+ @param justOne if this true, then once a single match is found will stop
+ */
+ virtual void remove( const string &ns , Query q , bool justOne = 0 );
+
+ /**
+ updates objects matching query
+ */
+ virtual void update( const string &ns , Query query , BSONObj obj , bool upsert = false , bool multi = false );
+
+ virtual bool isFailed() const = 0;
+
+ virtual void killCursor( long long cursorID ) = 0;
+
+ virtual bool callRead( Message& toSend , Message& response ) = 0;
+ // virtual bool callWrite( Message& toSend , Message& response ) = 0; // TODO: add this if needed
+
+ virtual ConnectionString::ConnectionType type() const = 0;
+
+ virtual double getSoTimeout() const = 0;
+
+ }; // DBClientBase
+
+ class DBClientReplicaSet;
+
+ class ConnectException : public UserException {
+ public:
+ ConnectException(string msg) : UserException(9000,msg) { }
+ };
+
+ /**
+ A basic connection to the database.
+ This is the main entry point for talking to a simple Mongo setup
+ */
+ class DBClientConnection : public DBClientBase {
+ public:
+ /**
+ @param _autoReconnect if true, automatically reconnect on a connection failure
+ @param cp used by DBClientReplicaSet. You do not need to specify this parameter
+ @param timeout tcp timeout in seconds - this is for read/write, not connect.
+ Connect timeout is fixed, but short, at 5 seconds.
+ */
+ DBClientConnection(bool _autoReconnect=false, DBClientReplicaSet* cp=0, double so_timeout=0) :
+ clientSet(cp), _failed(false), autoReconnect(_autoReconnect), lastReconnectTry(0), _so_timeout(so_timeout) {
+ _numConnections++;
+ }
+
+ virtual ~DBClientConnection() {
+ _numConnections--;
+ }
+
+ /** Connect to a Mongo database server.
+
+ If autoReconnect is true, you can try to use the DBClientConnection even when
+ false was returned -- it will try to connect again.
+
+ @param serverHostname host to connect to. can include port number ( 127.0.0.1 , 127.0.0.1:5555 )
+ If you use IPv6 you must add a port number ( ::1:27017 )
+ @param errmsg any relevant error message will appended to the string
+ @deprecated please use HostAndPort
+ @return false if fails to connect.
+ */
+ virtual bool connect(const char * hostname, string& errmsg) {
+ // TODO: remove this method
+ HostAndPort t( hostname );
+ return connect( t , errmsg );
+ }
+
+ /** Connect to a Mongo database server.
+
+ If autoReconnect is true, you can try to use the DBClientConnection even when
+ false was returned -- it will try to connect again.
+
+ @param server server to connect to.
+ @param errmsg any relevant error message will appended to the string
+ @return false if fails to connect.
+ */
+ virtual bool connect(const HostAndPort& server, string& errmsg);
+
+ /** Connect to a Mongo database server. Exception throwing version.
+ Throws a UserException if cannot connect.
+
+ If autoReconnect is true, you can try to use the DBClientConnection even when
+ false was returned -- it will try to connect again.
+
+ @param serverHostname host to connect to. can include port number ( 127.0.0.1 , 127.0.0.1:5555 )
+ */
+ void connect(const string& serverHostname) {
+ string errmsg;
+ if( !connect(HostAndPort(serverHostname), errmsg) )
+ throw ConnectException(string("can't connect ") + errmsg);
+ }
+
+ virtual bool auth(const string &dbname, const string &username, const string &pwd, string& errmsg, bool digestPassword = true);
+
+ virtual auto_ptr<DBClientCursor> query(const string &ns, Query query=Query(), int nToReturn = 0, int nToSkip = 0,
+ const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 ) {
+ checkConnection();
+ return DBClientBase::query( ns, query, nToReturn, nToSkip, fieldsToReturn, queryOptions , batchSize );
+ }
+
+ /** Uses QueryOption_Exhaust
+ Exhaust mode sends back all data queries as fast as possible, with no back-and-for for OP_GETMORE. If you are certain
+ you will exhaust the query, it could be useful.
+
+ Use DBClientCursorBatchIterator version if you want to do items in large blocks, perhaps to avoid granular locking and such.
+ */
+ unsigned long long query( boost::function<void(const BSONObj&)> f, const string& ns, Query query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+ unsigned long long query( boost::function<void(DBClientCursorBatchIterator&)> f, const string& ns, Query query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+
+ virtual bool runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0);
+
+ /**
+ @return true if this connection is currently in a failed state. When autoreconnect is on,
+ a connection will transition back to an ok state after reconnecting.
+ */
+ bool isFailed() const { return _failed; }
+
+ MessagingPort& port() { assert(p); return *p; }
+
+ string toStringLong() const {
+ stringstream ss;
+ ss << _serverString;
+ if ( _failed ) ss << " failed";
+ return ss.str();
+ }
+
+ /** Returns the address of the server */
+ string toString() { return _serverString; }
+
+ string getServerAddress() const { return _serverString; }
+
+ virtual void killCursor( long long cursorID );
+ virtual bool callRead( Message& toSend , Message& response ) { return call( toSend , response ); }
+ virtual void say( Message &toSend, bool isRetry = false );
+ virtual bool recv( Message& m );
+ virtual void checkResponse( const char *data, int nReturned, bool* retry = NULL, string* host = NULL );
+ virtual bool call( Message &toSend, Message &response, bool assertOk = true , string * actualServer = 0 );
+ virtual ConnectionString::ConnectionType type() const { return ConnectionString::MASTER; }
+ void setSoTimeout(double to) { _so_timeout = to; }
+ double getSoTimeout() const { return _so_timeout; }
+
+ virtual bool lazySupported() const { return true; }
+
+ static int getNumConnections() {
+ return _numConnections;
+ }
+
+ static void setLazyKillCursor( bool lazy ) { _lazyKillCursor = lazy; }
+ static bool getLazyKillCursor() { return _lazyKillCursor; }
+
+ protected:
+ friend class SyncClusterConnection;
+ virtual void sayPiggyBack( Message &toSend );
+
+ DBClientReplicaSet *clientSet;
+ boost::scoped_ptr<MessagingPort> p;
+ boost::scoped_ptr<SockAddr> server;
+ bool _failed;
+ const bool autoReconnect;
+ time_t lastReconnectTry;
+ HostAndPort _server; // remember for reconnects
+ string _serverString;
+ void _checkConnection();
+
+ // throws SocketException if in failed state and not reconnecting or if waiting to reconnect
+ void checkConnection() { if( _failed ) _checkConnection(); }
+
+ map< string, pair<string,string> > authCache;
+ double _so_timeout;
+ bool _connect( string& errmsg );
+
+ static AtomicUInt _numConnections;
+ static bool _lazyKillCursor; // lazy means we piggy back kill cursors on next op
+
+#ifdef MONGO_SSL
+ static SSLManager* sslManager();
+ static SSLManager* _sslManager;
+#endif
+ };
+
+ /** pings server to check if it's up
+ */
+ bool serverAlive( const string &uri );
+
+ DBClientBase * createDirectClient();
+
+ BSONElement getErrField( const BSONObj& result );
+ bool hasErrField( const BSONObj& result );
+
+ inline std::ostream& operator<<( std::ostream &s, const Query &q ) {
+ return s << q.toString();
+ }
+
+} // namespace mongo
+
+#include "dbclientcursor.h"
+#include "dbclient_rs.h"
+#include "undef_macros.h"
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
new file mode 100644
index 00000000000..2d9e0fbabba
--- /dev/null
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -0,0 +1,993 @@
+// dbclient.cpp - connect to a Mongo database as a database, from C++
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "dbclient.h"
+#include "../bson/util/builder.h"
+#include "../db/jsobj.h"
+#include "../db/json.h"
+#include "../db/dbmessage.h"
+#include "connpool.h"
+#include "dbclient_rs.h"
+#include "../util/background.h"
+#include "../util/timer.h"
+
+namespace mongo {
+
+ // --------------------------------
+ // ----- ReplicaSetMonitor ---------
+ // --------------------------------
+
+ // global background job responsible for checking every X amount of time
+ class ReplicaSetMonitorWatcher : public BackgroundJob {
+ public:
+ ReplicaSetMonitorWatcher() : _safego("ReplicaSetMonitorWatcher::_safego") , _started(false) {}
+
+ virtual string name() const { return "ReplicaSetMonitorWatcher"; }
+
+ void safeGo() {
+ // check outside of lock for speed
+ if ( _started )
+ return;
+
+ scoped_lock lk( _safego );
+ if ( _started )
+ return;
+ _started = true;
+
+ go();
+ }
+ protected:
+ void run() {
+ log() << "starting" << endl;
+ while ( ! inShutdown() ) {
+ sleepsecs( 10 );
+ try {
+ ReplicaSetMonitor::checkAll( true );
+ }
+ catch ( std::exception& e ) {
+ error() << "check failed: " << e.what() << endl;
+ }
+ catch ( ... ) {
+ error() << "unkown error" << endl;
+ }
+ }
+ }
+
+ mongo::mutex _safego;
+ bool _started;
+
+ } replicaSetMonitorWatcher;
+
+ string seedString( const vector<HostAndPort>& servers ){
+ string seedStr;
+ for ( unsigned i = 0; i < servers.size(); i++ ){
+ seedStr += servers[i].toString();
+ if( i < servers.size() - 1 ) seedStr += ",";
+ }
+
+ return seedStr;
+ }
+
+ ReplicaSetMonitor::ReplicaSetMonitor( const string& name , const vector<HostAndPort>& servers )
+ : _lock( "ReplicaSetMonitor instance" ) , _checkConnectionLock( "ReplicaSetMonitor check connection lock" ), _name( name ) , _master(-1), _nextSlave(0) {
+
+ uassert( 13642 , "need at least 1 node for a replica set" , servers.size() > 0 );
+
+ if ( _name.size() == 0 ) {
+ warning() << "replica set name empty, first node: " << servers[0] << endl;
+ }
+
+ log() << "starting new replica set monitor for replica set " << _name << " with seed of " << seedString( servers ) << endl;
+
+ string errmsg;
+ for ( unsigned i = 0; i < servers.size(); i++ ) {
+
+ // Don't check servers we have already
+ if( _find_inlock( servers[i] ) >= 0 ) continue;
+
+ auto_ptr<DBClientConnection> conn( new DBClientConnection( true , 0, 5.0 ) );
+ try{
+ if( ! conn->connect( servers[i] , errmsg ) ){
+ throw DBException( errmsg, 15928 );
+ }
+ log() << "successfully connected to seed " << servers[i] << " for replica set " << this->_name << endl;
+ }
+ catch( DBException& e ){
+ log() << "error connecting to seed " << servers[i] << causedBy( e ) << endl;
+ // skip seeds that don't work
+ continue;
+ }
+
+ string maybePrimary;
+ _checkConnection( conn.get(), maybePrimary, false, -1 );
+ }
+
+ // Check everything to get the first data
+ _check( true );
+
+ log() << "replica set monitor for replica set " << _name << " started, address is " << getServerAddress() << endl;
+
+ }
+
+ ReplicaSetMonitor::~ReplicaSetMonitor() {
+ _nodes.clear();
+ _master = -1;
+ }
+
+ ReplicaSetMonitorPtr ReplicaSetMonitor::get( const string& name , const vector<HostAndPort>& servers ) {
+ scoped_lock lk( _setsLock );
+ ReplicaSetMonitorPtr& m = _sets[name];
+ if ( ! m )
+ m.reset( new ReplicaSetMonitor( name , servers ) );
+
+ replicaSetMonitorWatcher.safeGo();
+
+ return m;
+ }
+
+ ReplicaSetMonitorPtr ReplicaSetMonitor::get( const string& name ) {
+ scoped_lock lk( _setsLock );
+ map<string,ReplicaSetMonitorPtr>::const_iterator i = _sets.find( name );
+ if ( i == _sets.end() )
+ return ReplicaSetMonitorPtr();
+ return i->second;
+ }
+
+
+ void ReplicaSetMonitor::checkAll( bool checkAllSecondaries ) {
+ set<string> seen;
+
+ while ( true ) {
+ ReplicaSetMonitorPtr m;
+ {
+ scoped_lock lk( _setsLock );
+ for ( map<string,ReplicaSetMonitorPtr>::iterator i=_sets.begin(); i!=_sets.end(); ++i ) {
+ string name = i->first;
+ if ( seen.count( name ) )
+ continue;
+ LOG(1) << "checking replica set: " << name << endl;
+ seen.insert( name );
+ m = i->second;
+ break;
+ }
+ }
+
+ if ( ! m )
+ break;
+
+ m->check( checkAllSecondaries );
+ }
+
+
+ }
+
+ void ReplicaSetMonitor::setConfigChangeHook( ConfigChangeHook hook ) {
+ massert( 13610 , "ConfigChangeHook already specified" , _hook == 0 );
+ _hook = hook;
+ }
+
+ string ReplicaSetMonitor::getServerAddress() const {
+ scoped_lock lk( _lock );
+ return _getServerAddress_inlock();
+ }
+
+ string ReplicaSetMonitor::_getServerAddress_inlock() const {
+ StringBuilder ss;
+ if ( _name.size() )
+ ss << _name << "/";
+
+ for ( unsigned i=0; i<_nodes.size(); i++ ) {
+ if ( i > 0 )
+ ss << ",";
+ ss << _nodes[i].addr.toString();
+ }
+
+ return ss.str();
+ }
+
+ bool ReplicaSetMonitor::contains( const string& server ) const {
+ scoped_lock lk( _lock );
+ for ( unsigned i=0; i<_nodes.size(); i++ ) {
+ if ( _nodes[i].addr == server )
+ return true;
+ }
+ return false;
+ }
+
+
+ void ReplicaSetMonitor::notifyFailure( const HostAndPort& server ) {
+ scoped_lock lk( _lock );
+ if ( _master >= 0 && _master < (int)_nodes.size() ) {
+ if ( server == _nodes[_master].addr ) {
+ _nodes[_master].ok = false;
+ _master = -1;
+ }
+ }
+ }
+
+
+
+ HostAndPort ReplicaSetMonitor::getMaster() {
+ {
+ scoped_lock lk( _lock );
+ if ( _master >= 0 && _nodes[_master].ok )
+ return _nodes[_master].addr;
+ }
+
+ _check( false );
+
+ scoped_lock lk( _lock );
+ uassert( 10009 , str::stream() << "ReplicaSetMonitor no master found for set: " << _name , _master >= 0 );
+ return _nodes[_master].addr;
+ }
+
+ HostAndPort ReplicaSetMonitor::getSlave( const HostAndPort& prev ) {
+ // make sure its valid
+
+ bool wasFound = false;
+ bool wasMaster = false;
+
+ // This is always true, since checked in port()
+ assert( prev.port() >= 0 );
+ if( prev.host().size() ){
+ scoped_lock lk( _lock );
+ for ( unsigned i=0; i<_nodes.size(); i++ ) {
+ if ( prev != _nodes[i].addr )
+ continue;
+
+ wasFound = true;
+
+ if ( _nodes[i].okForSecondaryQueries() )
+ return prev;
+
+ wasMaster = _nodes[i].ok && ! _nodes[i].secondary;
+
+ break;
+ }
+ }
+
+ if( prev.host().size() ){
+ if( wasFound ){ LOG(1) << "slave '" << prev << ( wasMaster ? "' is master node, trying to find another node" :
+ "' is no longer ok to use" ) << endl; }
+ else{ LOG(1) << "slave '" << prev << "' was not found in the replica set" << endl; }
+ }
+ else LOG(1) << "slave '" << prev << "' is not initialized or invalid" << endl;
+
+ return getSlave();
+ }
+
+ HostAndPort ReplicaSetMonitor::getSlave() {
+ LOG(2) << "dbclient_rs getSlave " << getServerAddress() << endl;
+
+ scoped_lock lk( _lock );
+
+ for ( unsigned ii = 0; ii < _nodes.size(); ii++ ) {
+ _nextSlave = ( _nextSlave + 1 ) % _nodes.size();
+ if ( _nextSlave != _master ) {
+ if ( _nodes[ _nextSlave ].okForSecondaryQueries() )
+ return _nodes[ _nextSlave ].addr;
+ LOG(2) << "dbclient_rs getSlave not selecting " << _nodes[_nextSlave] << ", not currently okForSecondaryQueries" << endl;
+ }
+ }
+ uassert(15899, str::stream() << "No suitable member found for slaveOk query in replica set: " << _name, _master >= 0 && _nodes[_master].ok);
+
+ // Fall back to primary
+ assert( static_cast<unsigned>(_master) < _nodes.size() );
+ LOG(2) << "dbclient_rs getSlave no member in secondary state found, returning primary " << _nodes[ _master ] << endl;
+ return _nodes[_master].addr;
+ }
+
+ /**
+ * notify the monitor that server has faild
+ */
+ void ReplicaSetMonitor::notifySlaveFailure( const HostAndPort& server ) {
+ int x = _find( server );
+ if ( x >= 0 ) {
+ scoped_lock lk( _lock );
+ _nodes[x].ok = false;
+ }
+ }
+
+ void ReplicaSetMonitor::_checkStatus(DBClientConnection *conn) {
+ BSONObj status;
+
+ if (!conn->runCommand("admin", BSON("replSetGetStatus" << 1), status) ) {
+ LOG(1) << "dbclient_rs replSetGetStatus failed" << endl;
+ return;
+ }
+ if( !status.hasField("members") ) {
+ log() << "dbclient_rs error expected members field in replSetGetStatus result" << endl;
+ return;
+ }
+ if( status["members"].type() != Array) {
+ log() << "dbclient_rs error expected members field in replSetGetStatus result to be an array" << endl;
+ return;
+ }
+
+ BSONObjIterator hi(status["members"].Obj());
+ while (hi.more()) {
+ BSONObj member = hi.next().Obj();
+ string host = member["name"].String();
+
+ int m = -1;
+ if ((m = _find(host)) < 0) {
+ LOG(1) << "dbclient_rs _checkStatus couldn't _find(" << host << ')' << endl;
+ continue;
+ }
+
+ double state = member["state"].Number();
+ if (member["health"].Number() == 1 && (state == 1 || state == 2)) {
+ LOG(1) << "dbclient_rs nodes["<<m<<"].ok = true " << host << endl;
+ scoped_lock lk( _lock );
+ _nodes[m].ok = true;
+ }
+ else {
+ LOG(1) << "dbclient_rs nodes["<<m<<"].ok = false " << host << endl;
+ scoped_lock lk( _lock );
+ _nodes[m].ok = false;
+ }
+ }
+ }
+
+ NodeDiff ReplicaSetMonitor::_getHostDiff_inlock( const BSONObj& hostList ){
+
+ NodeDiff diff;
+ set<int> nodesFound;
+
+ int index = 0;
+ BSONObjIterator hi( hostList );
+ while( hi.more() ){
+
+ string toCheck = hi.next().String();
+ int nodeIndex = _find_inlock( toCheck );
+
+ // Node-to-add
+ if( nodeIndex < 0 ) diff.first.insert( toCheck );
+ else nodesFound.insert( nodeIndex );
+
+ index++;
+ }
+
+ for( size_t i = 0; i < _nodes.size(); i++ ){
+ if( nodesFound.find( static_cast<int>(i) ) == nodesFound.end() ) diff.second.insert( static_cast<int>(i) );
+ }
+
+ return diff;
+ }
+
+ bool ReplicaSetMonitor::_shouldChangeHosts( const BSONObj& hostList, bool inlock ){
+
+ int origHosts = 0;
+ if( ! inlock ){
+ scoped_lock lk( _lock );
+ origHosts = _nodes.size();
+ }
+ else origHosts = _nodes.size();
+ int numHosts = 0;
+ bool changed = false;
+
+ BSONObjIterator hi(hostList);
+ while ( hi.more() ) {
+ string toCheck = hi.next().String();
+
+ numHosts++;
+ int index = 0;
+ if( ! inlock ) index = _find( toCheck );
+ else index = _find_inlock( toCheck );
+
+ if ( index >= 0 ) continue;
+
+ changed = true;
+ break;
+ }
+
+ return changed || origHosts != numHosts;
+
+ }
+
+ void ReplicaSetMonitor::_checkHosts( const BSONObj& hostList, bool& changed ) {
+
+ // Fast path, still requires intermittent locking
+ if( ! _shouldChangeHosts( hostList, false ) ){
+ changed = false;
+ return;
+ }
+
+ // Slow path, double-checked though
+ scoped_lock lk( _lock );
+
+ // Our host list may have changed while waiting for another thread in the meantime,
+ // so double-check here
+ // TODO: Do we really need this much protection, this should be pretty rare and not triggered
+ // from lots of threads, duping old behavior for safety
+ if( ! _shouldChangeHosts( hostList, true ) ){
+ changed = false;
+ return;
+ }
+
+ // LogLevel can be pretty low, since replica set reconfiguration should be pretty rare and we
+ // want to record our changes
+ log() << "changing hosts to " << hostList << " from " << _getServerAddress_inlock() << endl;
+
+ NodeDiff diff = _getHostDiff_inlock( hostList );
+ set<string> added = diff.first;
+ set<int> removed = diff.second;
+
+ assert( added.size() > 0 || removed.size() > 0 );
+ changed = true;
+
+ // Delete from the end so we don't invalidate as we delete, delete indices are ascending
+ for( set<int>::reverse_iterator i = removed.rbegin(), end = removed.rend(); i != end; ++i ){
+
+ log() << "erasing host " << _nodes[ *i ] << " from replica set " << this->_name << endl;
+
+ _nodes.erase( _nodes.begin() + *i );
+ }
+
+ // Add new nodes
+ for( set<string>::iterator i = added.begin(), end = added.end(); i != end; ++i ){
+
+ log() << "trying to add new host " << *i << " to replica set " << this->_name << endl;
+
+ // Connect to new node
+ HostAndPort h( *i );
+ DBClientConnection * newConn = new DBClientConnection( true, 0, 5.0 );
+
+ string errmsg;
+ try{
+ if( ! newConn->connect( h , errmsg ) ){
+ throw DBException( errmsg, 15927 );
+ }
+ log() << "successfully connected to new host " << *i << " in replica set " << this->_name << endl;
+ }
+ catch( DBException& e ){
+ warning() << "cannot connect to new host " << *i << " to replica set " << this->_name << causedBy( e ) << endl;
+ delete newConn;
+ newConn = NULL;
+ }
+
+ _nodes.push_back( Node( h , newConn ) );
+ }
+
+ }
+
+
+
+ bool ReplicaSetMonitor::_checkConnection( DBClientConnection * c , string& maybePrimary , bool verbose , int nodesOffset ) {
+ scoped_lock lk( _checkConnectionLock );
+ bool isMaster = false;
+ bool changed = false;
+ try {
+ Timer t;
+ BSONObj o;
+ c->isMaster(isMaster, &o);
+ if ( o["setName"].type() != String || o["setName"].String() != _name ) {
+ warning() << "node: " << c->getServerAddress() << " isn't a part of set: " << _name
+ << " ismaster: " << o << endl;
+ if ( nodesOffset >= 0 )
+ _nodes[nodesOffset].ok = false;
+ return false;
+ }
+
+ if ( nodesOffset >= 0 ) {
+ _nodes[nodesOffset].pingTimeMillis = t.millis();
+ _nodes[nodesOffset].hidden = o["hidden"].trueValue();
+ _nodes[nodesOffset].secondary = o["secondary"].trueValue();
+ _nodes[nodesOffset].ismaster = o["ismaster"].trueValue();
+
+ _nodes[nodesOffset].lastIsMaster = o.copy();
+ }
+
+ log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: " << c->toString() << ' ' << o << endl;
+
+ // add other nodes
+ BSONArrayBuilder b;
+ if ( o["hosts"].type() == Array ) {
+ if ( o["primary"].type() == String )
+ maybePrimary = o["primary"].String();
+
+ BSONObjIterator it( o["hosts"].Obj() );
+ while( it.more() ) b.append( it.next() );
+ }
+ if (o.hasField("passives") && o["passives"].type() == Array) {
+ BSONObjIterator it( o["passives"].Obj() );
+ while( it.more() ) b.append( it.next() );
+ }
+
+ _checkHosts( b.arr(), changed);
+ _checkStatus(c);
+
+
+ }
+ catch ( std::exception& e ) {
+ log( ! verbose ) << "ReplicaSetMonitor::_checkConnection: caught exception " << c->toString() << ' ' << e.what() << endl;
+ _nodes[nodesOffset].ok = false;
+ }
+
+ if ( changed && _hook )
+ _hook( this );
+
+ return isMaster;
+ }
+
+ void ReplicaSetMonitor::_check( bool checkAllSecondaries ) {
+
+ bool triedQuickCheck = false;
+
+ LOG(1) << "_check : " << getServerAddress() << endl;
+
+ int newMaster = -1;
+
+ for ( int retry = 0; retry < 2; retry++ ) {
+ for ( unsigned i=0; i<_nodes.size(); i++ ) {
+ shared_ptr<DBClientConnection> c;
+ {
+ scoped_lock lk( _lock );
+ c = _nodes[i].conn;
+ }
+
+ string maybePrimary;
+ if ( _checkConnection( c.get() , maybePrimary , retry , i ) ) {
+ _master = i;
+ newMaster = i;
+ if ( ! checkAllSecondaries )
+ return;
+ }
+
+ if ( ! triedQuickCheck && maybePrimary.size() ) {
+ int x = _find( maybePrimary );
+ if ( x >= 0 ) {
+ triedQuickCheck = true;
+ string dummy;
+ shared_ptr<DBClientConnection> testConn;
+ {
+ scoped_lock lk( _lock );
+ testConn = _nodes[x].conn;
+ }
+ if ( _checkConnection( testConn.get() , dummy , false , x ) ) {
+ _master = x;
+ newMaster = x;
+ if ( ! checkAllSecondaries )
+ return;
+ }
+ }
+ }
+
+ }
+
+ if ( newMaster >= 0 )
+ return;
+
+ sleepsecs(1);
+ }
+
+ }
+
+ void ReplicaSetMonitor::check( bool checkAllSecondaries ) {
+ // first see if the current master is fine
+ if ( _master >= 0 ) {
+ string temp;
+ if ( _checkConnection( _nodes[_master].conn.get() , temp , false , _master ) ) {
+ if ( ! checkAllSecondaries ) {
+ // current master is fine, so we're done
+ return;
+ }
+ }
+ }
+
+ // we either have no master, or the current is dead
+ _check( checkAllSecondaries );
+ }
+
+ int ReplicaSetMonitor::_find( const string& server ) const {
+ scoped_lock lk( _lock );
+ return _find_inlock( server );
+ }
+
+ int ReplicaSetMonitor::_find_inlock( const string& server ) const {
+ for ( unsigned i=0; i<_nodes.size(); i++ )
+ if ( _nodes[i].addr == server )
+ return i;
+ return -1;
+ }
+
+
+ int ReplicaSetMonitor::_find( const HostAndPort& server ) const {
+ scoped_lock lk( _lock );
+ for ( unsigned i=0; i<_nodes.size(); i++ )
+ if ( _nodes[i].addr == server )
+ return i;
+ return -1;
+ }
+
+ void ReplicaSetMonitor::appendInfo( BSONObjBuilder& b ) const {
+ scoped_lock lk( _lock );
+ BSONArrayBuilder hosts( b.subarrayStart( "hosts" ) );
+ for ( unsigned i=0; i<_nodes.size(); i++ ) {
+ hosts.append( BSON( "addr" << _nodes[i].addr <<
+ // "lastIsMaster" << _nodes[i].lastIsMaster << // this is a potential race, so only used when debugging
+ "ok" << _nodes[i].ok <<
+ "ismaster" << _nodes[i].ismaster <<
+ "hidden" << _nodes[i].hidden <<
+ "secondary" << _nodes[i].secondary <<
+ "pingTimeMillis" << _nodes[i].pingTimeMillis ) );
+
+ }
+ hosts.done();
+
+ b.append( "master" , _master );
+ b.append( "nextSlave" , _nextSlave );
+ }
+
+
+ mongo::mutex ReplicaSetMonitor::_setsLock( "ReplicaSetMonitor" );
+ map<string,ReplicaSetMonitorPtr> ReplicaSetMonitor::_sets;
+ ReplicaSetMonitor::ConfigChangeHook ReplicaSetMonitor::_hook;
+ // --------------------------------
+ // ----- DBClientReplicaSet ---------
+ // --------------------------------
+
+ DBClientReplicaSet::DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers, double so_timeout )
+ : _monitor( ReplicaSetMonitor::get( name , servers ) ),
+ _so_timeout( so_timeout ) {
+ }
+
+ DBClientReplicaSet::~DBClientReplicaSet() {
+ }
+
+ DBClientConnection * DBClientReplicaSet::checkMaster() {
+ HostAndPort h = _monitor->getMaster();
+
+ if ( h == _masterHost && _master ) {
+ // a master is selected. let's just make sure connection didn't die
+ if ( ! _master->isFailed() )
+ return _master.get();
+ _monitor->notifyFailure( _masterHost );
+ }
+
+ _masterHost = _monitor->getMaster();
+ _master.reset( new DBClientConnection( true , this , _so_timeout ) );
+ string errmsg;
+ if ( ! _master->connect( _masterHost , errmsg ) ) {
+ _monitor->notifyFailure( _masterHost );
+ uasserted( 13639 , str::stream() << "can't connect to new replica set master [" << _masterHost.toString() << "] err: " << errmsg );
+ }
+ _auth( _master.get() );
+ return _master.get();
+ }
+
+ DBClientConnection * DBClientReplicaSet::checkSlave() {
+ HostAndPort h = _monitor->getSlave( _slaveHost );
+
+ if ( h == _slaveHost && _slave ) {
+ if ( ! _slave->isFailed() )
+ return _slave.get();
+ _monitor->notifySlaveFailure( _slaveHost );
+ _slaveHost = _monitor->getSlave();
+ }
+ else {
+ _slaveHost = h;
+ }
+
+ _slave.reset( new DBClientConnection( true , this , _so_timeout ) );
+ _slave->connect( _slaveHost );
+ _auth( _slave.get() );
+ return _slave.get();
+ }
+
+
+ void DBClientReplicaSet::_auth( DBClientConnection * conn ) {
+ for ( list<AuthInfo>::iterator i=_auths.begin(); i!=_auths.end(); ++i ) {
+ const AuthInfo& a = *i;
+ string errmsg;
+ if ( ! conn->auth( a.dbname , a.username , a.pwd , errmsg, a.digestPassword ) )
+ warning() << "cached auth failed for set: " << _monitor->getName() << " db: " << a.dbname << " user: " << a.username << endl;
+
+ }
+
+ }
+
+ DBClientConnection& DBClientReplicaSet::masterConn() {
+ return *checkMaster();
+ }
+
+ DBClientConnection& DBClientReplicaSet::slaveConn() {
+ return *checkSlave();
+ }
+
+ bool DBClientReplicaSet::connect() {
+ try {
+ checkMaster();
+ }
+ catch (AssertionException&) {
+ if (_master && _monitor) {
+ _monitor->notifyFailure(_masterHost);
+ }
+ return false;
+ }
+ return true;
+ }
+
+ bool DBClientReplicaSet::auth(const string &dbname, const string &username, const string &pwd, string& errmsg, bool digestPassword ) {
+ DBClientConnection * m = checkMaster();
+
+ // first make sure it actually works
+ if( ! m->auth(dbname, username, pwd, errmsg, digestPassword ) )
+ return false;
+
+ // now that it does, we should save so that for a new node we can auth
+ _auths.push_back( AuthInfo( dbname , username , pwd , digestPassword ) );
+ return true;
+ }
+
+ // ------------- simple functions -----------------
+
+ void DBClientReplicaSet::insert( const string &ns , BSONObj obj , int flags) {
+ checkMaster()->insert(ns, obj, flags);
+ }
+
+ void DBClientReplicaSet::insert( const string &ns, const vector< BSONObj >& v , int flags) {
+ checkMaster()->insert(ns, v, flags);
+ }
+
+ void DBClientReplicaSet::remove( const string &ns , Query obj , bool justOne ) {
+ checkMaster()->remove(ns, obj, justOne);
+ }
+
+ void DBClientReplicaSet::update( const string &ns , Query query , BSONObj obj , bool upsert , bool multi ) {
+ return checkMaster()->update(ns, query, obj, upsert,multi);
+ }
+
+ auto_ptr<DBClientCursor> DBClientReplicaSet::query(const string &ns, Query query, int nToReturn, int nToSkip,
+ const BSONObj *fieldsToReturn, int queryOptions, int batchSize) {
+
+ if ( queryOptions & QueryOption_SlaveOk ) {
+ // we're ok sending to a slave
+ // we'll try 2 slaves before just using master
+ // checkSlave will try a different slave automatically after a failure
+ for ( int i=0; i<3; i++ ) {
+ try {
+ return checkSlaveQueryResult( checkSlave()->query(ns,query,nToReturn,nToSkip,fieldsToReturn,queryOptions,batchSize) );
+ }
+ catch ( DBException &e ) {
+ LOG(1) << "can't query replica set slave " << i << " : " << _slaveHost << causedBy( e ) << endl;
+ }
+ }
+ }
+
+ return checkMaster()->query(ns,query,nToReturn,nToSkip,fieldsToReturn,queryOptions,batchSize);
+ }
+
+ BSONObj DBClientReplicaSet::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) {
+ if ( queryOptions & QueryOption_SlaveOk ) {
+ // we're ok sending to a slave
+ // we'll try 2 slaves before just using master
+ // checkSlave will try a different slave automatically after a failure
+ for ( int i=0; i<3; i++ ) {
+ try {
+ return checkSlave()->findOne(ns,query,fieldsToReturn,queryOptions);
+ }
+ catch ( DBException &e ) {
+ LOG(1) << "can't findone replica set slave " << i << " : " << _slaveHost << causedBy( e ) << endl;
+ }
+ }
+ }
+
+ return checkMaster()->findOne(ns,query,fieldsToReturn,queryOptions);
+ }
+
+ void DBClientReplicaSet::killCursor( long long cursorID ) {
+ // we should neve call killCursor on a replica set conncetion
+ // since we don't know which server it belongs to
+ // can't assume master because of slave ok
+ // and can have a cursor survive a master change
+ assert(0);
+ }
+
+ void DBClientReplicaSet::isntMaster() {
+ log() << "got not master for: " << _masterHost << endl;
+ _monitor->notifyFailure( _masterHost );
+ _master.reset();
+ }
+
+ auto_ptr<DBClientCursor> DBClientReplicaSet::checkSlaveQueryResult( auto_ptr<DBClientCursor> result ){
+ BSONObj error;
+ bool isError = result->peekError( &error );
+ if( ! isError ) return result;
+
+ // We only check for "not master or secondary" errors here
+
+ // If the error code here ever changes, we need to change this code also
+ BSONElement code = error["code"];
+ if( code.isNumber() && code.Int() == 13436 /* not master or secondary */ ){
+ isntSecondary();
+ throw DBException( str::stream() << "slave " << _slaveHost.toString() << " is no longer secondary", 14812 );
+ }
+
+ return result;
+ }
+
+ void DBClientReplicaSet::isntSecondary() {
+ log() << "slave no longer has secondary status: " << _slaveHost << endl;
+ // Failover to next slave
+ _monitor->notifySlaveFailure( _slaveHost );
+ _slave.reset();
+ }
+
+ void DBClientReplicaSet::say( Message& toSend, bool isRetry ) {
+
+ if( ! isRetry )
+ _lazyState = LazyState();
+
+ int lastOp = -1;
+ bool slaveOk = false;
+
+ if ( ( lastOp = toSend.operation() ) == dbQuery ) {
+ // TODO: might be possible to do this faster by changing api
+ DbMessage dm( toSend );
+ QueryMessage qm( dm );
+ if ( ( slaveOk = ( qm.queryOptions & QueryOption_SlaveOk ) ) ) {
+
+ for ( int i = _lazyState._retries; i < 3; i++ ) {
+ try {
+ DBClientConnection* slave = checkSlave();
+ slave->say( toSend );
+
+ _lazyState._lastOp = lastOp;
+ _lazyState._slaveOk = slaveOk;
+ _lazyState._retries = i;
+ _lazyState._lastClient = slave;
+ return;
+ }
+ catch ( DBException &e ) {
+ LOG(1) << "can't callLazy replica set slave " << i << " : " << _slaveHost << causedBy( e ) << endl;
+ }
+ }
+ }
+ }
+
+ DBClientConnection* master = checkMaster();
+ master->say( toSend );
+
+ _lazyState._lastOp = lastOp;
+ _lazyState._slaveOk = slaveOk;
+ _lazyState._retries = 3;
+ _lazyState._lastClient = master;
+ return;
+ }
+
+ bool DBClientReplicaSet::recv( Message& m ) {
+
+ assert( _lazyState._lastClient );
+
+ // TODO: It would be nice if we could easily wrap a conn error as a result error
+ try {
+ return _lazyState._lastClient->recv( m );
+ }
+ catch( DBException& e ){
+ log() << "could not receive data from " << _lazyState._lastClient << causedBy( e ) << endl;
+ return false;
+ }
+ }
+
+ void DBClientReplicaSet::checkResponse( const char* data, int nReturned, bool* retry, string* targetHost ){
+
+ // For now, do exactly as we did before, so as not to break things. In general though, we
+ // should fix this so checkResponse has a more consistent contract.
+ if( ! retry ){
+ if( _lazyState._lastClient )
+ return _lazyState._lastClient->checkResponse( data, nReturned );
+ else
+ return checkMaster()->checkResponse( data, nReturned );
+ }
+
+ *retry = false;
+ if( targetHost && _lazyState._lastClient ) *targetHost = _lazyState._lastClient->getServerAddress();
+ else if (targetHost) *targetHost = "";
+
+ if( ! _lazyState._lastClient ) return;
+ if( nReturned != 1 && nReturned != -1 ) return;
+
+ BSONObj dataObj;
+ if( nReturned == 1 ) dataObj = BSONObj( data );
+
+ // Check if we should retry here
+ if( _lazyState._lastOp == dbQuery && _lazyState._slaveOk ){
+
+ // Check the error code for a slave not secondary error
+ if( nReturned == -1 ||
+ ( hasErrField( dataObj ) && ! dataObj["code"].eoo() && dataObj["code"].Int() == 13436 ) ){
+
+ bool wasMaster = false;
+ if( _lazyState._lastClient == _slave.get() ){
+ isntSecondary();
+ }
+ else if( _lazyState._lastClient == _master.get() ){
+ wasMaster = true;
+ isntMaster();
+ }
+ else
+ warning() << "passed " << dataObj << " but last rs client " << _lazyState._lastClient->toString() << " is not master or secondary" << endl;
+
+ if( _lazyState._retries < 3 ){
+ _lazyState._retries++;
+ *retry = true;
+ }
+ else{
+ (void)wasMaster; // silence set-but-not-used warning
+ // assert( wasMaster );
+ // printStackTrace();
+ log() << "too many retries (" << _lazyState._retries << "), could not get data from replica set" << endl;
+ }
+ }
+ }
+ }
+
+
+ bool DBClientReplicaSet::call( Message &toSend, Message &response, bool assertOk , string * actualServer ) {
+ const char * ns = 0;
+
+ if ( toSend.operation() == dbQuery ) {
+ // TODO: might be possible to do this faster by changing api
+ DbMessage dm( toSend );
+ QueryMessage qm( dm );
+ ns = qm.ns;
+
+ if ( qm.queryOptions & QueryOption_SlaveOk ) {
+ for ( int i=0; i<3; i++ ) {
+ try {
+ DBClientConnection* s = checkSlave();
+ if ( actualServer )
+ *actualServer = s->getServerAddress();
+ return s->call( toSend , response , assertOk );
+ }
+ catch ( DBException &e ) {
+ LOG(1) << "can't call replica set slave " << i << " : " << _slaveHost << causedBy( e ) << endl;
+ if ( actualServer )
+ *actualServer = "";
+ }
+ }
+ }
+ }
+
+ DBClientConnection* m = checkMaster();
+ if ( actualServer )
+ *actualServer = m->getServerAddress();
+
+ if ( ! m->call( toSend , response , assertOk ) )
+ return false;
+
+ if ( ns ) {
+ QueryResult * res = (QueryResult*)response.singleData();
+ if ( res->nReturned == 1 ) {
+ BSONObj x(res->data() );
+ if ( str::contains( ns , "$cmd" ) ) {
+ if ( isNotMasterErrorString( x["errmsg"] ) )
+ isntMaster();
+ }
+ else {
+ if ( isNotMasterErrorString( getErrField( x ) ) )
+ isntMaster();
+ }
+ }
+ }
+
+ return true;
+ }
+
+}
diff --git a/src/mongo/client/dbclient_rs.h b/src/mongo/client/dbclient_rs.h
new file mode 100644
index 00000000000..0edcea42716
--- /dev/null
+++ b/src/mongo/client/dbclient_rs.h
@@ -0,0 +1,355 @@
+/** @file dbclient_rs.h Connect to a Replica Set, from C++ */
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../pch.h"
+#include "dbclient.h"
+
+namespace mongo {
+
+ class ReplicaSetMonitor;
+ typedef shared_ptr<ReplicaSetMonitor> ReplicaSetMonitorPtr;
+ typedef pair<set<string>,set<int> > NodeDiff;
+
+ /**
+ * manages state about a replica set for client
+ * keeps tabs on whose master and what slaves are up
+ * can hand a slave to someone for SLAVE_OK
+ * one instace per process per replica set
+ * TODO: we might be able to use a regular Node * to avoid _lock
+ */
+ class ReplicaSetMonitor {
+ public:
+
+ typedef boost::function1<void,const ReplicaSetMonitor*> ConfigChangeHook;
+
+ /**
+ * gets a cached Monitor per name or will create if doesn't exist
+ */
+ static ReplicaSetMonitorPtr get( const string& name , const vector<HostAndPort>& servers );
+
+ /**
+ * gets a cached Monitor per name or will return none if it doesn't exist
+ */
+ static ReplicaSetMonitorPtr get( const string& name );
+
+
+ /**
+ * checks all sets for current master and new secondaries
+ * usually only called from a BackgroundJob
+ */
+ static void checkAll( bool checkAllSecondaries );
+
+ /**
+ * this is called whenever the config of any repclia set changes
+ * currently only 1 globally
+ * asserts if one already exists
+ * ownership passes to ReplicaSetMonitor and the hook will actually never be deleted
+ */
+ static void setConfigChangeHook( ConfigChangeHook hook );
+
+ ~ReplicaSetMonitor();
+
+ /** @return HostAndPort or throws an exception */
+ HostAndPort getMaster();
+
+ /**
+ * notify the monitor that server has faild
+ */
+ void notifyFailure( const HostAndPort& server );
+
+ /** @return prev if its still ok, and if not returns a random slave that is ok for reads */
+ HostAndPort getSlave( const HostAndPort& prev );
+
+ /** @return a random slave that is ok for reads */
+ HostAndPort getSlave();
+
+
+ /**
+ * notify the monitor that server has faild
+ */
+ void notifySlaveFailure( const HostAndPort& server );
+
+ /**
+ * checks for current master and new secondaries
+ */
+ void check( bool checkAllSecondaries );
+
+ string getName() const { return _name; }
+
+ string getServerAddress() const;
+
+ bool contains( const string& server ) const;
+
+ void appendInfo( BSONObjBuilder& b ) const;
+
+ private:
+ /**
+ * This populates a list of hosts from the list of seeds (discarding the
+ * seed list).
+ * @param name set name
+ * @param servers seeds
+ */
+ ReplicaSetMonitor( const string& name , const vector<HostAndPort>& servers );
+
+ void _check( bool checkAllSecondaries );
+
+ /**
+ * Use replSetGetStatus command to make sure hosts in host list are up
+ * and readable. Sets Node::ok appropriately.
+ */
+ void _checkStatus(DBClientConnection *conn);
+
+ /**
+ * Add array of hosts to host list. Doesn't do anything if hosts are
+ * already in host list.
+ * @param hostList the list of hosts to add
+ * @param changed if new hosts were added
+ */
+ void _checkHosts(const BSONObj& hostList, bool& changed);
+
+ /**
+ * Updates host list.
+ * @param c the connection to check
+ * @param maybePrimary OUT
+ * @param verbose
+ * @param nodesOffset - offset into _nodes array, -1 for not in it
+ * @return if the connection is good
+ */
+ bool _checkConnection( DBClientConnection * c , string& maybePrimary , bool verbose , int nodesOffset );
+
+ string _getServerAddress_inlock() const;
+
+ NodeDiff _getHostDiff_inlock( const BSONObj& hostList );
+ bool _shouldChangeHosts( const BSONObj& hostList, bool inlock );
+
+
+ int _find( const string& server ) const ;
+ int _find_inlock( const string& server ) const ;
+ int _find( const HostAndPort& server ) const ;
+
+ mutable mongo::mutex _lock; // protects _nodes
+ mutable mongo::mutex _checkConnectionLock;
+
+ string _name;
+
+ // note these get copied around in the nodes vector so be sure to maintain copyable semantics here
+ struct Node {
+ Node( const HostAndPort& a , DBClientConnection* c )
+ : addr( a ) , conn(c) , ok(true) ,
+ ismaster(false), secondary( false ) , hidden( false ) , pingTimeMillis(0) {
+ ok = conn.get() == NULL;
+ }
+
+ bool okForSecondaryQueries() const {
+ return ok && secondary && ! hidden;
+ }
+
+ BSONObj toBSON() const {
+ return BSON( "addr" << addr.toString() <<
+ "isMaster" << ismaster <<
+ "secondary" << secondary <<
+ "hidden" << hidden <<
+ "ok" << ok );
+ }
+
+ string toString() const {
+ return toBSON().toString();
+ }
+
+ HostAndPort addr;
+ shared_ptr<DBClientConnection> conn;
+
+ // if this node is in a failure state
+ // used for slave routing
+ // this is too simple, should make it better
+ bool ok;
+
+ // as reported by ismaster
+ BSONObj lastIsMaster;
+
+ bool ismaster;
+ bool secondary;
+ bool hidden;
+
+ int pingTimeMillis;
+
+ };
+
+ /**
+ * Host list.
+ */
+ vector<Node> _nodes;
+
+ int _master; // which node is the current master. -1 means no master is known
+ int _nextSlave; // which node is the current slave
+
+ static mongo::mutex _setsLock; // protects _sets
+ static map<string,ReplicaSetMonitorPtr> _sets; // set name to Monitor
+
+ static ConfigChangeHook _hook;
+ };
+
+ /** Use this class to connect to a replica set of servers. The class will manage
+ checking for which server in a replica set is master, and do failover automatically.
+
+ This can also be used to connect to replica pairs since pairs are a subset of sets
+
+ On a failover situation, expect at least one operation to return an error (throw
+ an exception) before the failover is complete. Operations are not retried.
+ */
+ class DBClientReplicaSet : public DBClientBase {
+
+ public:
+ /** Call connect() after constructing. autoReconnect is always on for DBClientReplicaSet connections. */
+ DBClientReplicaSet( const string& name , const vector<HostAndPort>& servers, double so_timeout=0 );
+ virtual ~DBClientReplicaSet();
+
+ /** Returns false if nomember of the set were reachable, or neither is
+ * master, although,
+ * when false returned, you can still try to use this connection object, it will
+ * try reconnects.
+ */
+ bool connect();
+
+ /** Authorize. Authorizes all nodes as needed
+ */
+ virtual bool auth(const string &dbname, const string &username, const string &pwd, string& errmsg, bool digestPassword = true );
+
+ // ----------- simple functions --------------
+
+ /** throws userassertion "no master found" */
+ virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
+ const BSONObj *fieldsToReturn = 0, int queryOptions = 0 , int batchSize = 0 );
+
+ /** throws userassertion "no master found" */
+ virtual BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+
+ virtual void insert( const string &ns , BSONObj obj , int flags=0);
+
+ /** insert multiple objects. Note that single object insert is asynchronous, so this version
+ is only nominally faster and not worth a special effort to try to use. */
+ virtual void insert( const string &ns, const vector< BSONObj >& v , int flags=0);
+
+ virtual void remove( const string &ns , Query obj , bool justOne = 0 );
+
+ virtual void update( const string &ns , Query query , BSONObj obj , bool upsert = 0 , bool multi = 0 );
+
+ virtual void killCursor( long long cursorID );
+
+ // ---- access raw connections ----
+
+ DBClientConnection& masterConn();
+ DBClientConnection& slaveConn();
+
+ // ---- callback pieces -------
+
+ virtual void say( Message &toSend, bool isRetry = false );
+ virtual bool recv( Message &toRecv );
+ virtual void checkResponse( const char* data, int nReturned, bool* retry = NULL, string* targetHost = NULL );
+
+ /* this is the callback from our underlying connections to notify us that we got a "not master" error.
+ */
+ void isntMaster();
+
+ /* this is used to indicate we got a "not master or secondary" error from a secondary.
+ */
+ void isntSecondary();
+
+ // ----- status ------
+
+ virtual bool isFailed() const { return ! _master || _master->isFailed(); }
+
+ // ----- informational ----
+
+ double getSoTimeout() const { return _so_timeout; }
+
+ string toString() { return getServerAddress(); }
+
+ string getServerAddress() const { return _monitor->getServerAddress(); }
+
+ virtual ConnectionString::ConnectionType type() const { return ConnectionString::SET; }
+ virtual bool lazySupported() const { return true; }
+
+ // ---- low level ------
+
+ virtual bool call( Message &toSend, Message &response, bool assertOk=true , string * actualServer = 0 );
+ virtual bool callRead( Message& toSend , Message& response ) { return checkMaster()->callRead( toSend , response ); }
+
+
+ protected:
+ virtual void sayPiggyBack( Message &toSend ) { checkMaster()->say( toSend ); }
+
+ private:
+
+ // Used to simplify slave-handling logic on errors
+ auto_ptr<DBClientCursor> checkSlaveQueryResult( auto_ptr<DBClientCursor> result );
+
+ DBClientConnection * checkMaster();
+ DBClientConnection * checkSlave();
+
+ void _auth( DBClientConnection * conn );
+
+ ReplicaSetMonitorPtr _monitor;
+
+ HostAndPort _masterHost;
+ scoped_ptr<DBClientConnection> _master;
+
+ HostAndPort _slaveHost;
+ scoped_ptr<DBClientConnection> _slave;
+
+ double _so_timeout;
+
+ /**
+ * for storing authentication info
+ * fields are exactly for DBClientConnection::auth
+ */
+ struct AuthInfo {
+ AuthInfo( string d , string u , string p , bool di )
+ : dbname( d ) , username( u ) , pwd( p ) , digestPassword( di ) {}
+ string dbname;
+ string username;
+ string pwd;
+ bool digestPassword;
+ };
+
+ // we need to store so that when we connect to a new node on failure
+ // we can re-auth
+ // this could be a security issue, as the password is stored in memory
+ // not sure if/how we should handle
+ list<AuthInfo> _auths;
+
+ protected:
+
+ /**
+ * for storing (non-threadsafe) information between lazy calls
+ */
+ class LazyState {
+ public:
+ LazyState() : _lastClient( NULL ), _lastOp( -1 ), _slaveOk( false ), _retries( 0 ) {}
+ DBClientConnection* _lastClient;
+ int _lastOp;
+ bool _slaveOk;
+ int _retries;
+
+ } _lazyState;
+
+ };
+
+
+}
diff --git a/src/mongo/client/dbclientcursor.cpp b/src/mongo/client/dbclientcursor.cpp
new file mode 100644
index 00000000000..79510b766d8
--- /dev/null
+++ b/src/mongo/client/dbclientcursor.cpp
@@ -0,0 +1,324 @@
+// dbclient.cpp - connect to a Mongo database as a database, from C++
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "dbclient.h"
+#include "../db/dbmessage.h"
+#include "../db/cmdline.h"
+#include "connpool.h"
+#include "../s/shard.h"
+#include "../s/util.h"
+
+namespace mongo {
+
+ void assembleRequest( const string &ns, BSONObj query, int nToReturn, int nToSkip, const BSONObj *fieldsToReturn, int queryOptions, Message &toSend );
+
+ int DBClientCursor::nextBatchSize() {
+
+ if ( nToReturn == 0 )
+ return batchSize;
+
+ if ( batchSize == 0 )
+ return nToReturn;
+
+ return batchSize < nToReturn ? batchSize : nToReturn;
+ }
+
+ void DBClientCursor::_assembleInit( Message& toSend ) {
+ if ( !cursorId ) {
+ assembleRequest( ns, query, nextBatchSize() , nToSkip, fieldsToReturn, opts, toSend );
+ }
+ else {
+ BufBuilder b;
+ b.appendNum( opts );
+ b.appendStr( ns );
+ b.appendNum( nToReturn );
+ b.appendNum( cursorId );
+ toSend.setData( dbGetMore, b.buf(), b.len() );
+ }
+ }
+
+ bool DBClientCursor::init() {
+ Message toSend;
+ _assembleInit( toSend );
+
+ if ( !_client->call( toSend, *batch.m, false ) ) {
+ // log msg temp?
+ log() << "DBClientCursor::init call() failed" << endl;
+ return false;
+ }
+ if ( batch.m->empty() ) {
+ // log msg temp?
+ log() << "DBClientCursor::init message from call() was empty" << endl;
+ return false;
+ }
+ dataReceived();
+ return true;
+ }
+
+ void DBClientCursor::initLazy( bool isRetry ) {
+ verify( 15875 , _client->lazySupported() );
+ Message toSend;
+ _assembleInit( toSend );
+ _client->say( toSend, isRetry );
+ }
+
+ bool DBClientCursor::initLazyFinish( bool& retry ) {
+
+ bool recvd = _client->recv( *batch.m );
+
+ // If we get a bad response, return false
+ if ( ! recvd || batch.m->empty() ) {
+
+ if( !recvd )
+ log() << "DBClientCursor::init lazy say() failed" << endl;
+ if( batch.m->empty() )
+ log() << "DBClientCursor::init message from say() was empty" << endl;
+
+ _client->checkResponse( NULL, -1, &retry, &_lazyHost );
+
+ return false;
+
+ }
+
+ dataReceived( retry, _lazyHost );
+ return ! retry;
+ }
+
+ void DBClientCursor::requestMore() {
+ assert( cursorId && batch.pos == batch.nReturned );
+
+ if (haveLimit) {
+ nToReturn -= batch.nReturned;
+ assert(nToReturn > 0);
+ }
+ BufBuilder b;
+ b.appendNum(opts);
+ b.appendStr(ns);
+ b.appendNum(nextBatchSize());
+ b.appendNum(cursorId);
+
+ Message toSend;
+ toSend.setData(dbGetMore, b.buf(), b.len());
+ auto_ptr<Message> response(new Message());
+
+ if ( _client ) {
+ _client->call( toSend, *response );
+ this->batch.m = response;
+ dataReceived();
+ }
+ else {
+ assert( _scopedHost.size() );
+ ScopedDbConnection conn( _scopedHost );
+ conn->call( toSend , *response );
+ _client = conn.get();
+ this->batch.m = response;
+ dataReceived();
+ _client = 0;
+ conn.done();
+ }
+ }
+
+ /** with QueryOption_Exhaust, the server just blasts data at us (marked at end with cursorid==0). */
+ void DBClientCursor::exhaustReceiveMore() {
+ assert( cursorId && batch.pos == batch.nReturned );
+ assert( !haveLimit );
+ auto_ptr<Message> response(new Message());
+ assert( _client );
+ if ( _client->recv(*response) ) {
+ batch.m = response;
+ dataReceived();
+ }
+ }
+
+ void DBClientCursor::dataReceived( bool& retry, string& host ) {
+
+ QueryResult *qr = (QueryResult *) batch.m->singleData();
+ resultFlags = qr->resultFlags();
+
+ if ( qr->resultFlags() & ResultFlag_ErrSet ) {
+ wasError = true;
+ }
+
+ if ( qr->resultFlags() & ResultFlag_CursorNotFound ) {
+ // cursor id no longer valid at the server.
+ assert( qr->cursorId == 0 );
+ cursorId = 0; // 0 indicates no longer valid (dead)
+ if ( ! ( opts & QueryOption_CursorTailable ) )
+ throw UserException( 13127 , "getMore: cursor didn't exist on server, possible restart or timeout?" );
+ }
+
+ if ( cursorId == 0 || ! ( opts & QueryOption_CursorTailable ) ) {
+ // only set initially: we don't want to kill it on end of data
+ // if it's a tailable cursor
+ cursorId = qr->cursorId;
+ }
+
+ batch.nReturned = qr->nReturned;
+ batch.pos = 0;
+ batch.data = qr->data();
+
+ _client->checkResponse( batch.data, batch.nReturned, &retry, &host ); // watches for "not master"
+
+ if( qr->resultFlags() & ResultFlag_ShardConfigStale ) {
+ BSONObj error;
+ assert( peekError( &error ) );
+ throw RecvStaleConfigException( error["ns"].String(),
+ (string)"stale config on lazy receive" + causedBy( getErrField( error ) ) );
+ }
+
+ /* this assert would fire the way we currently work:
+ assert( nReturned || cursorId == 0 );
+ */
+ }
+
+ /** If true, safe to call next(). Requests more from server if necessary. */
+ bool DBClientCursor::more() {
+ _assertIfNull();
+
+ if ( !_putBack.empty() )
+ return true;
+
+ if (haveLimit && batch.pos >= nToReturn)
+ return false;
+
+ if ( batch.pos < batch.nReturned )
+ return true;
+
+ if ( cursorId == 0 )
+ return false;
+
+ requestMore();
+ return batch.pos < batch.nReturned;
+ }
+
+ BSONObj DBClientCursor::next() {
+ DEV _assertIfNull();
+ if ( !_putBack.empty() ) {
+ BSONObj ret = _putBack.top();
+ _putBack.pop();
+ return ret;
+ }
+
+ uassert(13422, "DBClientCursor next() called but more() is false", batch.pos < batch.nReturned);
+
+ batch.pos++;
+ BSONObj o(batch.data);
+ batch.data += o.objsize();
+ /* todo would be good to make data null at end of batch for safety */
+ return o;
+ }
+
+ void DBClientCursor::peek(vector<BSONObj>& v, int atMost) {
+ int m = atMost;
+
+ /*
+ for( stack<BSONObj>::iterator i = _putBack.begin(); i != _putBack.end(); i++ ) {
+ if( m == 0 )
+ return;
+ v.push_back(*i);
+ m--;
+ n++;
+ }
+ */
+
+ int p = batch.pos;
+ const char *d = batch.data;
+ while( m && p < batch.nReturned ) {
+ BSONObj o(d);
+ d += o.objsize();
+ p++;
+ m--;
+ v.push_back(o);
+ }
+ }
+
+ BSONObj DBClientCursor::peekFirst(){
+ vector<BSONObj> v;
+ peek( v, 1 );
+
+ if( v.size() > 0 ) return v[0];
+ else return BSONObj();
+ }
+
+ bool DBClientCursor::peekError(BSONObj* error){
+ if( ! wasError ) return false;
+
+ vector<BSONObj> v;
+ peek(v, 1);
+
+ assert( v.size() == 1 );
+ assert( hasErrField( v[0] ) );
+
+ if( error ) *error = v[0].getOwned();
+ return true;
+ }
+
+ void DBClientCursor::attach( AScopedConnection * conn ) {
+ assert( _scopedHost.size() == 0 );
+ assert( conn );
+ assert( conn->get() );
+
+ if ( conn->get()->type() == ConnectionString::SET ||
+ conn->get()->type() == ConnectionString::SYNC ) {
+ if( _lazyHost.size() > 0 )
+ _scopedHost = _lazyHost;
+ else if( _client )
+ _scopedHost = _client->getServerAddress();
+ else
+ massert(14821, "No client or lazy client specified, cannot store multi-host connection.", false);
+ }
+ else {
+ _scopedHost = conn->getHost();
+ }
+
+ conn->done();
+ _client = 0;
+ _lazyHost = "";
+ }
+
+ DBClientCursor::~DBClientCursor() {
+ if (!this)
+ return;
+
+ DESTRUCTOR_GUARD (
+
+ if ( cursorId && _ownCursor && ! inShutdown() ) {
+ BufBuilder b;
+ b.appendNum( (int)0 ); // reserved
+ b.appendNum( (int)1 ); // number
+ b.appendNum( cursorId );
+
+ Message m;
+ m.setData( dbKillCursors , b.buf() , b.len() );
+
+ if ( _client ) {
+ _client->sayPiggyBack( m );
+ }
+ else {
+ assert( _scopedHost.size() );
+ ScopedDbConnection conn( _scopedHost );
+ conn->sayPiggyBack( m );
+ conn.done();
+ }
+ }
+
+ );
+ }
+
+
+} // namespace mongo
diff --git a/src/mongo/client/dbclientcursor.h b/src/mongo/client/dbclientcursor.h
new file mode 100644
index 00000000000..31bf1bb1d5e
--- /dev/null
+++ b/src/mongo/client/dbclientcursor.h
@@ -0,0 +1,243 @@
+// file dbclientcursor.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../pch.h"
+#include "../util/net/message.h"
+#include "../db/jsobj.h"
+#include "../db/json.h"
+#include <stack>
+
+namespace mongo {
+
+ class AScopedConnection;
+
+ /** for mock purposes only -- do not create variants of DBClientCursor, nor hang code here
+ @see DBClientMockCursor
+ */
+ class DBClientCursorInterface : boost::noncopyable {
+ public:
+ virtual ~DBClientCursorInterface() {}
+ virtual bool more() = 0;
+ virtual BSONObj next() = 0;
+ // TODO bring more of the DBClientCursor interface to here
+ protected:
+ DBClientCursorInterface() {}
+ };
+
+ /** Queries return a cursor object */
+ class DBClientCursor : public DBClientCursorInterface {
+ public:
+ /** If true, safe to call next(). Requests more from server if necessary. */
+ bool more();
+
+ /** If true, there is more in our local buffers to be fetched via next(). Returns
+ false when a getMore request back to server would be required. You can use this
+ if you want to exhaust whatever data has been fetched to the client already but
+ then perhaps stop.
+ */
+ int objsLeftInBatch() const { _assertIfNull(); return _putBack.size() + batch.nReturned - batch.pos; }
+ bool moreInCurrentBatch() { return objsLeftInBatch() > 0; }
+
+ /** next
+ @return next object in the result cursor.
+ on an error at the remote server, you will get back:
+ { $err: <string> }
+ if you do not want to handle that yourself, call nextSafe().
+ */
+ BSONObj next();
+
+ /**
+ restore an object previously returned by next() to the cursor
+ */
+ void putBack( const BSONObj &o ) { _putBack.push( o.getOwned() ); }
+
+ /** throws AssertionException if get back { $err : ... } */
+ BSONObj nextSafe() {
+ BSONObj o = next();
+ if( strcmp(o.firstElementFieldName(), "$err") == 0 ) {
+ string s = "nextSafe(): " + o.toString();
+ if( logLevel >= 5 )
+ log() << s << endl;
+ uasserted(13106, s);
+ }
+ return o;
+ }
+
+ /** peek ahead at items buffered for future next() calls.
+ never requests new data from the server. so peek only effective
+ with what is already buffered.
+ WARNING: no support for _putBack yet!
+ */
+ void peek(vector<BSONObj>&, int atMost);
+
+ // Peeks at first element, if exists
+ BSONObj peekFirst();
+
+ /**
+ * peek ahead and see if an error occurred, and get the error if so.
+ */
+ bool peekError(BSONObj* error = NULL);
+
+ /**
+ iterate the rest of the cursor and return the number if items
+ */
+ int itcount() {
+ int c = 0;
+ while ( more() ) {
+ next();
+ c++;
+ }
+ return c;
+ }
+
+ /** cursor no longer valid -- use with tailable cursors.
+ note you should only rely on this once more() returns false;
+ 'dead' may be preset yet some data still queued and locally
+ available from the dbclientcursor.
+ */
+ bool isDead() const { return !this || cursorId == 0; }
+
+ bool tailable() const { return (opts & QueryOption_CursorTailable) != 0; }
+
+ /** see ResultFlagType (constants.h) for flag values
+ mostly these flags are for internal purposes -
+ ResultFlag_ErrSet is the possible exception to that
+ */
+ bool hasResultFlag( int flag ) {
+ _assertIfNull();
+ return (resultFlags & flag) != 0;
+ }
+
+ DBClientCursor( DBClientBase* client, const string &_ns, BSONObj _query, int _nToReturn,
+ int _nToSkip, const BSONObj *_fieldsToReturn, int queryOptions , int bs ) :
+ _client(client),
+ ns(_ns),
+ query(_query),
+ nToReturn(_nToReturn),
+ haveLimit( _nToReturn > 0 && !(queryOptions & QueryOption_CursorTailable)),
+ nToSkip(_nToSkip),
+ fieldsToReturn(_fieldsToReturn),
+ opts(queryOptions),
+ batchSize(bs==1?2:bs),
+ cursorId(),
+ _ownCursor( true ),
+ wasError( false ) {
+ }
+
+ DBClientCursor( DBClientBase* client, const string &_ns, long long _cursorId, int _nToReturn, int options ) :
+ _client(client),
+ ns(_ns),
+ nToReturn( _nToReturn ),
+ haveLimit( _nToReturn > 0 && !(options & QueryOption_CursorTailable)),
+ opts( options ),
+ cursorId(_cursorId),
+ _ownCursor( true ) {
+ }
+
+ virtual ~DBClientCursor();
+
+ long long getCursorId() const { return cursorId; }
+
+ /** by default we "own" the cursor and will send the server a KillCursor
+ message when ~DBClientCursor() is called. This function overrides that.
+ */
+ void decouple() { _ownCursor = false; }
+
+ void attach( AScopedConnection * conn );
+
+ Message* getMessage(){ return batch.m.get(); }
+
+ /**
+ * actually does the query
+ */
+ bool init();
+
+ void initLazy( bool isRetry = false );
+ bool initLazyFinish( bool& retry );
+
+ class Batch : boost::noncopyable {
+ friend class DBClientCursor;
+ auto_ptr<Message> m;
+ int nReturned;
+ int pos;
+ const char *data;
+ public:
+ Batch() : m( new Message() ), nReturned(), pos(), data() { }
+ };
+
+ private:
+ friend class DBClientBase;
+ friend class DBClientConnection;
+
+ int nextBatchSize();
+
+ Batch batch;
+ DBClientBase* _client;
+ string ns;
+ BSONObj query;
+ int nToReturn;
+ bool haveLimit;
+ int nToSkip;
+ const BSONObj *fieldsToReturn;
+ int opts;
+ int batchSize;
+ stack< BSONObj > _putBack;
+ int resultFlags;
+ long long cursorId;
+ bool _ownCursor; // see decouple()
+ string _scopedHost;
+ string _lazyHost;
+ bool wasError;
+
+ void dataReceived() { bool retry; string lazyHost; dataReceived( retry, lazyHost ); }
+ void dataReceived( bool& retry, string& lazyHost );
+ void requestMore();
+ void exhaustReceiveMore(); // for exhaust
+
+ // Don't call from a virtual function
+ void _assertIfNull() const { uassert(13348, "connection died", this); }
+
+ // non-copyable , non-assignable
+ DBClientCursor( const DBClientCursor& );
+ DBClientCursor& operator=( const DBClientCursor& );
+
+ // init pieces
+ void _assembleInit( Message& toSend );
+ };
+
+ /** iterate over objects in current batch only - will not cause a network call
+ */
+ class DBClientCursorBatchIterator {
+ public:
+ DBClientCursorBatchIterator( DBClientCursor &c ) : _c( c ), _n() {}
+ bool moreInCurrentBatch() { return _c.moreInCurrentBatch(); }
+ BSONObj nextSafe() {
+ massert( 13383, "BatchIterator empty", moreInCurrentBatch() );
+ ++_n;
+ return _c.nextSafe();
+ }
+ int n() const { return _n; }
+ private:
+ DBClientCursor &_c;
+ int _n;
+ };
+
+} // namespace mongo
+
+#include "undef_macros.h"
diff --git a/src/mongo/client/dbclientmockcursor.h b/src/mongo/client/dbclientmockcursor.h
new file mode 100644
index 00000000000..8d85ff5ad2e
--- /dev/null
+++ b/src/mongo/client/dbclientmockcursor.h
@@ -0,0 +1,40 @@
+//@file dbclientmockcursor.h
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "dbclientcursor.h"
+
+namespace mongo {
+
+ class DBClientMockCursor : public DBClientCursorInterface {
+ public:
+ DBClientMockCursor( const BSONArray& mockCollection ) : _iter( mockCollection ) {}
+ virtual ~DBClientMockCursor() {}
+
+ bool more() { return _iter.more(); }
+ BSONObj next() { return _iter.next().Obj(); }
+
+ private:
+ BSONObjIterator _iter;
+
+ // non-copyable , non-assignable
+ DBClientMockCursor( const DBClientMockCursor& );
+ DBClientMockCursor& operator=( const DBClientMockCursor& );
+ };
+
+} // namespace mongo
diff --git a/src/mongo/client/distlock.cpp b/src/mongo/client/distlock.cpp
new file mode 100644
index 00000000000..595fc38197c
--- /dev/null
+++ b/src/mongo/client/distlock.cpp
@@ -0,0 +1,958 @@
+// @file distlock.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "dbclient.h"
+#include "distlock.h"
+
+namespace mongo {
+
+ LabeledLevel DistributedLock::logLvl( 1 );
+ DistributedLock::LastPings DistributedLock::lastPings;
+
+ ThreadLocalValue<string> distLockIds("");
+
+ /* ==================
+ * Module initialization
+ */
+
+ boost::once_flag _init = BOOST_ONCE_INIT;
+ static string* _cachedProcessString = NULL;
+
+ static void initModule() {
+ // cache process string
+ stringstream ss;
+ ss << getHostName() << ":" << cmdLine.port << ":" << time(0) << ":" << rand();
+ _cachedProcessString = new string( ss.str() );
+ }
+
+ /* =================== */
+
+ string getDistLockProcess() {
+ boost::call_once( initModule, _init );
+ assert( _cachedProcessString );
+ return *_cachedProcessString;
+ }
+
+ string getDistLockId() {
+ string s = distLockIds.get();
+ if ( s.empty() ) {
+ stringstream ss;
+ ss << getDistLockProcess() << ":" << getThreadName() << ":" << rand();
+ s = ss.str();
+ distLockIds.set( s );
+ }
+ return s;
+ }
+
+
+ class DistributedLockPinger {
+ public:
+
+ DistributedLockPinger()
+ : _mutex( "DistributedLockPinger" ) {
+ }
+
+ void _distLockPingThread( ConnectionString addr, string process, unsigned long long sleepTime ) {
+
+ setThreadName( "LockPinger" );
+
+ string pingId = pingThreadId( addr, process );
+
+ log( DistributedLock::logLvl - 1 ) << "creating distributed lock ping thread for " << addr
+ << " and process " << process
+ << " (sleeping for " << sleepTime << "ms)" << endl;
+
+ static int loops = 0;
+ while( ! inShutdown() && ! shouldKill( addr, process ) ) {
+
+ log( DistributedLock::logLvl + 2 ) << "distributed lock pinger '" << pingId << "' about to ping." << endl;
+
+ Date_t pingTime;
+
+ try {
+ ScopedDbConnection conn( addr, 30.0 );
+
+ pingTime = jsTime();
+
+ // refresh the entry corresponding to this process in the lockpings collection
+ conn->update( DistributedLock::lockPingNS ,
+ BSON( "_id" << process ) ,
+ BSON( "$set" << BSON( "ping" << pingTime ) ) ,
+ true );
+
+ string err = conn->getLastError();
+ if ( ! err.empty() ) {
+ warning() << "pinging failed for distributed lock pinger '" << pingId << "'."
+ << causedBy( err ) << endl;
+ conn.done();
+
+ // Sleep for normal ping time
+ sleepmillis(sleepTime);
+ continue;
+ }
+
+ // remove really old entries from the lockpings collection if they're not holding a lock
+ // (this may happen if an instance of a process was taken down and no new instance came up to
+ // replace it for a quite a while)
+ // if the lock is taken, the take-over mechanism should handle the situation
+ auto_ptr<DBClientCursor> c = conn->query( DistributedLock::locksNS , BSONObj() );
+ set<string> pids;
+ while ( c->more() ) {
+ BSONObj lock = c->next();
+ if ( ! lock["process"].eoo() ) {
+ pids.insert( lock["process"].valuestrsafe() );
+ }
+ }
+
+ Date_t fourDays = pingTime - ( 4 * 86400 * 1000 ); // 4 days
+ conn->remove( DistributedLock::lockPingNS , BSON( "_id" << BSON( "$nin" << pids ) << "ping" << LT << fourDays ) );
+ err = conn->getLastError();
+ if ( ! err.empty() ) {
+ warning() << "ping cleanup for distributed lock pinger '" << pingId << " failed."
+ << causedBy( err ) << endl;
+ conn.done();
+
+ // Sleep for normal ping time
+ sleepmillis(sleepTime);
+ continue;
+ }
+
+ // create index so remove is fast even with a lot of servers
+ if ( loops++ == 0 ) {
+ conn->ensureIndex( DistributedLock::lockPingNS , BSON( "ping" << 1 ) );
+ }
+
+ log( DistributedLock::logLvl - ( loops % 10 == 0 ? 1 : 0 ) ) << "cluster " << addr << " pinged successfully at " << pingTime
+ << " by distributed lock pinger '" << pingId
+ << "', sleeping for " << sleepTime << "ms" << endl;
+
+ // Remove old locks, if possible
+ // Make sure no one else is adding to this list at the same time
+ scoped_lock lk( _mutex );
+
+ int numOldLocks = _oldLockOIDs.size();
+ if( numOldLocks > 0 )
+ log( DistributedLock::logLvl - 1 ) << "trying to delete " << _oldLockOIDs.size() << " old lock entries for process " << process << endl;
+
+ bool removed = false;
+ for( list<OID>::iterator i = _oldLockOIDs.begin(); i != _oldLockOIDs.end();
+ i = ( removed ? _oldLockOIDs.erase( i ) : ++i ) ) {
+ removed = false;
+ try {
+ // Got OID from lock with id, so we don't need to specify id again
+ conn->update( DistributedLock::locksNS ,
+ BSON( "ts" << *i ),
+ BSON( "$set" << BSON( "state" << 0 ) ) );
+
+ // Either the update went through or it didn't, either way we're done trying to
+ // unlock
+ log( DistributedLock::logLvl - 1 ) << "handled late remove of old distributed lock with ts " << *i << endl;
+ removed = true;
+ }
+ catch( UpdateNotTheSame& ) {
+ log( DistributedLock::logLvl - 1 ) << "partially removed old distributed lock with ts " << *i << endl;
+ removed = true;
+ }
+ catch ( std::exception& e) {
+ warning() << "could not remove old distributed lock with ts " << *i
+ << causedBy( e ) << endl;
+ }
+
+ }
+
+ if( numOldLocks > 0 && _oldLockOIDs.size() > 0 ){
+ log( DistributedLock::logLvl - 1 ) << "not all old lock entries could be removed for process " << process << endl;
+ }
+
+ conn.done();
+
+ }
+ catch ( std::exception& e ) {
+ warning() << "distributed lock pinger '" << pingId << "' detected an exception while pinging."
+ << causedBy( e ) << endl;
+ }
+
+ sleepmillis(sleepTime);
+ }
+
+ warning() << "removing distributed lock ping thread '" << pingId << "'" << endl;
+
+
+ if( shouldKill( addr, process ) )
+ finishKill( addr, process );
+
+ }
+
+ void distLockPingThread( ConnectionString addr, long long clockSkew, string processId, unsigned long long sleepTime ) {
+ try {
+ jsTimeVirtualThreadSkew( clockSkew );
+ _distLockPingThread( addr, processId, sleepTime );
+ }
+ catch ( std::exception& e ) {
+ error() << "unexpected error while running distributed lock pinger for " << addr << ", process " << processId << causedBy( e ) << endl;
+ }
+ catch ( ... ) {
+ error() << "unknown error while running distributed lock pinger for " << addr << ", process " << processId << endl;
+ }
+ }
+
+ string pingThreadId( const ConnectionString& conn, const string& processId ) {
+ return conn.toString() + "/" + processId;
+ }
+
+ string got( DistributedLock& lock, unsigned long long sleepTime ) {
+
+ // Make sure we don't start multiple threads for a process id
+ scoped_lock lk( _mutex );
+
+ const ConnectionString& conn = lock.getRemoteConnection();
+ const string& processId = lock.getProcessId();
+ string s = pingThreadId( conn, processId );
+
+ // Ignore if we already have a pinging thread for this process.
+ if ( _seen.count( s ) > 0 ) return s;
+
+ // Check our clock skew
+ try {
+ if( lock.isRemoteTimeSkewed() ) {
+ throw LockException( str::stream() << "clock skew of the cluster " << conn.toString() << " is too far out of bounds to allow distributed locking." , 13650 );
+ }
+ }
+ catch( LockException& e) {
+ throw LockException( str::stream() << "error checking clock skew of cluster " << conn.toString() << causedBy( e ) , 13651);
+ }
+
+ boost::thread t( boost::bind( &DistributedLockPinger::distLockPingThread, this, conn, getJSTimeVirtualThreadSkew(), processId, sleepTime) );
+
+ _seen.insert( s );
+
+ return s;
+ }
+
+ void addUnlockOID( const OID& oid ) {
+ // Modifying the lock from some other thread
+ scoped_lock lk( _mutex );
+ _oldLockOIDs.push_back( oid );
+ }
+
+ bool willUnlockOID( const OID& oid ) {
+ scoped_lock lk( _mutex );
+ return find( _oldLockOIDs.begin(), _oldLockOIDs.end(), oid ) != _oldLockOIDs.end();
+ }
+
+ void kill( const ConnectionString& conn, const string& processId ) {
+ // Make sure we're in a consistent state before other threads can see us
+ scoped_lock lk( _mutex );
+
+ string pingId = pingThreadId( conn, processId );
+
+ assert( _seen.count( pingId ) > 0 );
+ _kill.insert( pingId );
+
+ }
+
+ bool shouldKill( const ConnectionString& conn, const string& processId ) {
+ return _kill.count( pingThreadId( conn, processId ) ) > 0;
+ }
+
+ void finishKill( const ConnectionString& conn, const string& processId ) {
+ // Make sure we're in a consistent state before other threads can see us
+ scoped_lock lk( _mutex );
+
+ string pingId = pingThreadId( conn, processId );
+
+ _kill.erase( pingId );
+ _seen.erase( pingId );
+
+ }
+
+ set<string> _kill;
+ set<string> _seen;
+ mongo::mutex _mutex;
+ list<OID> _oldLockOIDs;
+
+ } distLockPinger;
+
+
+ const string DistributedLock::lockPingNS = "config.lockpings";
+ const string DistributedLock::locksNS = "config.locks";
+
+ /**
+ * Create a new distributed lock, potentially with a custom sleep and takeover time. If a custom sleep time is
+ * specified (time between pings)
+ */
+ DistributedLock::DistributedLock( const ConnectionString& conn , const string& name , unsigned long long lockTimeout, bool asProcess )
+ : _conn(conn) , _name(name) , _id( BSON( "_id" << name ) ), _processId( asProcess ? getDistLockId() : getDistLockProcess() ),
+ _lockTimeout( lockTimeout == 0 ? LOCK_TIMEOUT : lockTimeout ), _maxClockSkew( _lockTimeout / LOCK_SKEW_FACTOR ), _maxNetSkew( _maxClockSkew ), _lockPing( _maxClockSkew ),
+ _mutex( "DistributedLock" )
+ {
+ log( logLvl - 1 ) << "created new distributed lock for " << name << " on " << conn
+ << " ( lock timeout : " << _lockTimeout
+ << ", ping interval : " << _lockPing << ", process : " << asProcess << " )" << endl;
+
+
+ }
+
+ DistributedLock::PingData DistributedLock::LastPings::getLastPing( const ConnectionString& conn, const string& lockName ){
+ scoped_lock lock( _mutex );
+ return _lastPings[ std::pair< string, string >( conn.toString(), lockName ) ];
+ }
+
+ void DistributedLock::LastPings::setLastPing( const ConnectionString& conn, const string& lockName, const PingData& pd ){
+ scoped_lock lock( _mutex );
+ _lastPings[ std::pair< string, string >( conn.toString(), lockName ) ] = pd;
+ }
+
+ Date_t DistributedLock::getRemoteTime() {
+ return DistributedLock::remoteTime( _conn, _maxNetSkew );
+ }
+
+ bool DistributedLock::isRemoteTimeSkewed() {
+ return !DistributedLock::checkSkew( _conn, NUM_LOCK_SKEW_CHECKS, _maxClockSkew, _maxNetSkew );
+ }
+
+ const ConnectionString& DistributedLock::getRemoteConnection() {
+ return _conn;
+ }
+
+ const string& DistributedLock::getProcessId() {
+ return _processId;
+ }
+
+ /**
+ * Returns the remote time as reported by the cluster or server. The maximum difference between the reported time
+ * and the actual time on the remote server (at the completion of the function) is the maxNetSkew
+ */
+ Date_t DistributedLock::remoteTime( const ConnectionString& cluster, unsigned long long maxNetSkew ) {
+
+ ConnectionString server( *cluster.getServers().begin() );
+ ScopedDbConnection conn( server );
+
+ BSONObj result;
+ long long delay;
+
+ try {
+ Date_t then = jsTime();
+ bool success = conn->runCommand( string("admin"), BSON( "serverStatus" << 1 ), result );
+ delay = jsTime() - then;
+
+ if( !success )
+ throw TimeNotFoundException( str::stream() << "could not get status from server "
+ << server.toString() << " in cluster " << cluster.toString()
+ << " to check time", 13647 );
+
+ // Make sure that our delay is not more than 2x our maximum network skew, since this is the max our remote
+ // time value can be off by if we assume a response in the middle of the delay.
+ if( delay > (long long) (maxNetSkew * 2) )
+ throw TimeNotFoundException( str::stream() << "server " << server.toString()
+ << " in cluster " << cluster.toString()
+ << " did not respond within max network delay of "
+ << maxNetSkew << "ms", 13648 );
+ }
+ catch(...) {
+ conn.done();
+ throw;
+ }
+
+ conn.done();
+
+ return result["localTime"].Date() - (delay / 2);
+
+ }
+
+ bool DistributedLock::checkSkew( const ConnectionString& cluster, unsigned skewChecks, unsigned long long maxClockSkew, unsigned long long maxNetSkew ) {
+
+ vector<HostAndPort> servers = cluster.getServers();
+
+ if(servers.size() < 1) return true;
+
+ vector<long long> avgSkews;
+
+ for(unsigned i = 0; i < skewChecks; i++) {
+
+ // Find the average skew for each server
+ unsigned s = 0;
+ for(vector<HostAndPort>::iterator si = servers.begin(); si != servers.end(); ++si,s++) {
+
+ if(i == 0) avgSkews.push_back(0);
+
+ // Could check if this is self, but shouldn't matter since local network connection should be fast.
+ ConnectionString server( *si );
+
+ vector<long long> skew;
+
+ BSONObj result;
+
+ Date_t remote = remoteTime( server, maxNetSkew );
+ Date_t local = jsTime();
+
+ // Remote time can be delayed by at most MAX_NET_SKEW
+
+ // Skew is how much time we'd have to add to local to get to remote
+ avgSkews[s] += (long long) (remote - local);
+
+ log( logLvl + 1 ) << "skew from remote server " << server << " found: " << (long long) (remote - local) << endl;
+
+ }
+ }
+
+ // Analyze skews
+
+ long long serverMaxSkew = 0;
+ long long serverMinSkew = 0;
+
+ for(unsigned s = 0; s < avgSkews.size(); s++) {
+
+ long long avgSkew = (avgSkews[s] /= skewChecks);
+
+ // Keep track of max and min skews
+ if(s == 0) {
+ serverMaxSkew = avgSkew;
+ serverMinSkew = avgSkew;
+ }
+ else {
+ if(avgSkew > serverMaxSkew)
+ serverMaxSkew = avgSkew;
+ if(avgSkew < serverMinSkew)
+ serverMinSkew = avgSkew;
+ }
+
+ }
+
+ long long totalSkew = serverMaxSkew - serverMinSkew;
+
+ // Make sure our max skew is not more than our pre-set limit
+ if(totalSkew > (long long) maxClockSkew) {
+ log( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is out of " << maxClockSkew << "ms bounds." << endl;
+ return false;
+ }
+
+ log( logLvl + 1 ) << "total clock skew of " << totalSkew << "ms for servers " << cluster << " is in " << maxClockSkew << "ms bounds." << endl;
+ return true;
+ }
+
+ // For use in testing, ping thread should run indefinitely in practice.
+ bool DistributedLock::killPinger( DistributedLock& lock ) {
+ if( lock._threadId == "") return false;
+
+ distLockPinger.kill( lock._conn, lock._processId );
+ return true;
+ }
+
+ // Semantics of this method are basically that if the lock cannot be acquired, returns false, can be retried.
+ // If the lock should not be tried again (some unexpected error) a LockException is thrown.
+ // If we are only trying to re-enter a currently held lock, reenter should be true.
+ // Note: reenter doesn't actually make this lock re-entrant in the normal sense, since it can still only
+ // be unlocked once, instead it is used to verify that the lock is already held.
+ bool DistributedLock::lock_try( const string& why , bool reenter, BSONObj * other ) {
+
+ // TODO: Start pinging only when we actually get the lock?
+ // If we don't have a thread pinger, make sure we shouldn't have one
+ if( _threadId == "" ){
+ scoped_lock lk( _mutex );
+ _threadId = distLockPinger.got( *this, _lockPing );
+ }
+
+ // This should always be true, if not, we are using the lock incorrectly.
+ assert( _name != "" );
+
+ // write to dummy if 'other' is null
+ BSONObj dummyOther;
+ if ( other == NULL )
+ other = &dummyOther;
+
+ ScopedDbConnection conn( _conn );
+
+ BSONObjBuilder queryBuilder;
+ queryBuilder.appendElements( _id );
+ queryBuilder.append( "state" , 0 );
+
+ {
+ // make sure its there so we can use simple update logic below
+ BSONObj o = conn->findOne( locksNS , _id ).getOwned();
+
+ // Case 1: No locks
+ if ( o.isEmpty() ) {
+ try {
+ log( logLvl ) << "inserting initial doc in " << locksNS << " for lock " << _name << endl;
+ conn->insert( locksNS , BSON( "_id" << _name << "state" << 0 << "who" << "" ) );
+ }
+ catch ( UserException& e ) {
+ warning() << "could not insert initial doc for distributed lock " << _name << causedBy( e ) << endl;
+ }
+ }
+
+ // Case 2: A set lock that we might be able to force
+ else if ( o["state"].numberInt() > 0 ) {
+
+ string lockName = o["_id"].String() + string("/") + o["process"].String();
+
+ bool canReenter = reenter && o["process"].String() == _processId && ! distLockPinger.willUnlockOID( o["ts"].OID() ) && o["state"].numberInt() == 2;
+ if( reenter && ! canReenter ) {
+ log( logLvl - 1 ) << "not re-entering distributed lock " << lockName;
+ if( o["process"].String() != _processId ) log( logLvl - 1 ) << ", different process " << _processId << endl;
+ else if( o["state"].numberInt() == 2 ) log( logLvl - 1 ) << ", state not finalized" << endl;
+ else log( logLvl - 1 ) << ", ts " << o["ts"].OID() << " scheduled for late unlock" << endl;
+
+ // reset since we've been bounced by a previous lock not being where we thought it was,
+ // and should go through full forcing process if required.
+ // (in theory we should never see a ping here if used correctly)
+ *other = o; other->getOwned(); conn.done(); resetLastPing();
+ return false;
+ }
+
+ BSONObj lastPing = conn->findOne( lockPingNS , o["process"].wrap( "_id" ) );
+ if ( lastPing.isEmpty() ) {
+ log( logLvl ) << "empty ping found for process in lock '" << lockName << "'" << endl;
+ // TODO: Using 0 as a "no time found" value Will fail if dates roll over, but then, so will a lot.
+ lastPing = BSON( "_id" << o["process"].String() << "ping" << (Date_t) 0 );
+ }
+
+ unsigned long long elapsed = 0;
+ unsigned long long takeover = _lockTimeout;
+ PingData _lastPingCheck = getLastPing();
+
+ log( logLvl ) << "checking last ping for lock '" << lockName << "'" << " against process " << _lastPingCheck.get<0>() << " and ping " << _lastPingCheck.get<1>() << endl;
+
+ try {
+
+ Date_t remote = remoteTime( _conn );
+
+ // Timeout the elapsed time using comparisons of remote clock
+ // For non-finalized locks, timeout 15 minutes since last seen (ts)
+ // For finalized locks, timeout 15 minutes since last ping
+ bool recPingChange = o["state"].numberInt() == 2 && ( _lastPingCheck.get<0>() != lastPing["_id"].String() || _lastPingCheck.get<1>() != lastPing["ping"].Date() );
+ bool recTSChange = _lastPingCheck.get<3>() != o["ts"].OID();
+
+ if( recPingChange || recTSChange ) {
+ // If the ping has changed since we last checked, mark the current date and time
+ setLastPing( PingData( lastPing["_id"].String().c_str(), lastPing["ping"].Date(), remote, o["ts"].OID() ) );
+ }
+ else {
+
+ // GOTCHA! Due to network issues, it is possible that the current time
+ // is less than the remote time. We *have* to check this here, otherwise
+ // we overflow and our lock breaks.
+ if(_lastPingCheck.get<2>() >= remote)
+ elapsed = 0;
+ else
+ elapsed = remote - _lastPingCheck.get<2>();
+ }
+ }
+ catch( LockException& e ) {
+
+ // Remote server cannot be found / is not responsive
+ warning() << "Could not get remote time from " << _conn << causedBy( e );
+ // If our config server is having issues, forget all the pings until we can see it again
+ resetLastPing();
+
+ }
+
+ if ( elapsed <= takeover && ! canReenter ) {
+ log( logLvl ) << "could not force lock '" << lockName << "' because elapsed time " << elapsed << " <= takeover time " << takeover << endl;
+ *other = o; other->getOwned(); conn.done();
+ return false;
+ }
+ else if( elapsed > takeover && canReenter ) {
+ log( logLvl - 1 ) << "not re-entering distributed lock " << lockName << "' because elapsed time " << elapsed << " > takeover time " << takeover << endl;
+ *other = o; other->getOwned(); conn.done();
+ return false;
+ }
+
+ log( logLvl - 1 ) << ( canReenter ? "re-entering" : "forcing" ) << " lock '" << lockName << "' because "
+ << ( canReenter ? "re-entering is allowed, " : "" )
+ << "elapsed time " << elapsed << " > takeover time " << takeover << endl;
+
+ if( elapsed > takeover ) {
+
+ // Lock may forced, reset our timer if succeeds or fails
+ // Ensures that another timeout must happen if something borks up here, and resets our pristine
+ // ping state if acquired.
+ resetLastPing();
+
+ try {
+
+ // Check the clock skew again. If we check this before we get a lock
+ // and after the lock times out, we can be pretty sure the time is
+ // increasing at the same rate on all servers and therefore our
+ // timeout is accurate
+ uassert( 14023, str::stream() << "remote time in cluster " << _conn.toString() << " is now skewed, cannot force lock.", !isRemoteTimeSkewed() );
+
+ // Make sure we break the lock with the correct "ts" (OID) value, otherwise
+ // we can overwrite a new lock inserted in the meantime.
+ conn->update( locksNS , BSON( "_id" << _id["_id"].String() << "state" << o["state"].numberInt() << "ts" << o["ts"] ),
+ BSON( "$set" << BSON( "state" << 0 ) ) );
+
+ BSONObj err = conn->getLastErrorDetailed();
+ string errMsg = DBClientWithCommands::getLastErrorString(err);
+
+ // TODO: Clean up all the extra code to exit this method, probably with a refactor
+ if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
+ ( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "Could not force lock '" << lockName << "' "
+ << ( !errMsg.empty() ? causedBy(errMsg) : string("(another force won)") ) << endl;
+ *other = o; other->getOwned(); conn.done();
+ return false;
+ }
+
+ }
+ catch( UpdateNotTheSame& ) {
+ // Ok to continue since we know we forced at least one lock document, and all lock docs
+ // are required for a lock to be held.
+ warning() << "lock forcing " << lockName << " inconsistent" << endl;
+ }
+ catch( std::exception& e ) {
+ conn.done();
+ throw LockException( str::stream() << "exception forcing distributed lock "
+ << lockName << causedBy( e ), 13660);
+ }
+
+ }
+ else {
+
+ assert( canReenter );
+
+ // Lock may be re-entered, reset our timer if succeeds or fails
+ // Not strictly necessary, but helpful for small timeouts where thread scheduling is significant.
+ // This ensures that two attempts are still required for a force if not acquired, and resets our
+ // state if we are acquired.
+ resetLastPing();
+
+ // Test that the lock is held by trying to update the finalized state of the lock to the same state
+ // if it does not update or does not update on all servers, we can't re-enter.
+ try {
+
+ // Test the lock with the correct "ts" (OID) value
+ conn->update( locksNS , BSON( "_id" << _id["_id"].String() << "state" << 2 << "ts" << o["ts"] ),
+ BSON( "$set" << BSON( "state" << 2 ) ) );
+
+ BSONObj err = conn->getLastErrorDetailed();
+ string errMsg = DBClientWithCommands::getLastErrorString(err);
+
+ // TODO: Clean up all the extra code to exit this method, probably with a refactor
+ if ( ! errMsg.empty() || ! err["n"].type() || err["n"].numberInt() < 1 ) {
+ ( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "Could not re-enter lock '" << lockName << "' "
+ << ( !errMsg.empty() ? causedBy(errMsg) : string("(not sure lock is held)") )
+ << " gle: " << err
+ << endl;
+ *other = o; other->getOwned(); conn.done();
+ return false;
+ }
+
+ }
+ catch( UpdateNotTheSame& ) {
+ // NOT ok to continue since our lock isn't held by all servers, so isn't valid.
+ warning() << "inconsistent state re-entering lock, lock " << lockName << " not held" << endl;
+ *other = o; other->getOwned(); conn.done();
+ return false;
+ }
+ catch( std::exception& e ) {
+ conn.done();
+ throw LockException( str::stream() << "exception re-entering distributed lock "
+ << lockName << causedBy( e ), 13660);
+ }
+
+ log( logLvl - 1 ) << "re-entered distributed lock '" << lockName << "'" << endl;
+ *other = o; other->getOwned(); conn.done();
+ return true;
+
+ }
+
+ log( logLvl - 1 ) << "lock '" << lockName << "' successfully forced" << endl;
+
+ // We don't need the ts value in the query, since we will only ever replace locks with state=0.
+ }
+ // Case 3: We have an expired lock
+ else if ( o["ts"].type() ) {
+ queryBuilder.append( o["ts"] );
+ }
+ }
+
+ // Always reset our ping if we're trying to get a lock, since getting a lock implies the lock state is open
+ // and no locks need to be forced. If anything goes wrong, we don't want to remember an old lock.
+ resetLastPing();
+
+ bool gotLock = false;
+ BSONObj currLock;
+
+ BSONObj lockDetails = BSON( "state" << 1 << "who" << getDistLockId() << "process" << _processId <<
+ "when" << jsTime() << "why" << why << "ts" << OID::gen() );
+ BSONObj whatIWant = BSON( "$set" << lockDetails );
+
+ BSONObj query = queryBuilder.obj();
+
+ string lockName = _name + string("/") + _processId;
+
+ try {
+
+ // Main codepath to acquire lock
+
+ log( logLvl ) << "about to acquire distributed lock '" << lockName << ":\n"
+ << lockDetails.jsonString(Strict, true) << "\n"
+ << query.jsonString(Strict, true) << endl;
+
+ conn->update( locksNS , query , whatIWant );
+
+ BSONObj err = conn->getLastErrorDetailed();
+ string errMsg = DBClientWithCommands::getLastErrorString(err);
+
+ currLock = conn->findOne( locksNS , _id );
+
+ if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
+ ( errMsg.empty() ? log( logLvl - 1 ) : warning() ) << "could not acquire lock '" << lockName << "' "
+ << ( !errMsg.empty() ? causedBy( errMsg ) : string("(another update won)") ) << endl;
+ *other = currLock;
+ other->getOwned();
+ gotLock = false;
+ }
+ else {
+ gotLock = true;
+ }
+
+ }
+ catch ( UpdateNotTheSame& up ) {
+
+ // this means our update got through on some, but not others
+ warning() << "distributed lock '" << lockName << " did not propagate properly." << causedBy( up ) << endl;
+
+ // Overall protection derives from:
+ // All unlocking updates use the ts value when setting state to 0
+ // This ensures that during locking, we can override all smaller ts locks with
+ // our own safe ts value and not be unlocked afterward.
+ for ( unsigned i = 0; i < up.size(); i++ ) {
+
+ ScopedDbConnection indDB( up[i].first );
+ BSONObj indUpdate;
+
+ try {
+
+ indUpdate = indDB->findOne( locksNS , _id );
+
+ // If we override this lock in any way, grab and protect it.
+ // We assume/ensure that if a process does not have all lock documents, it is no longer
+ // holding the lock.
+ // Note - finalized locks may compete too, but we know they've won already if competing
+ // in this round. Cleanup of crashes during finalizing may take a few tries.
+ if( indUpdate["ts"] < lockDetails["ts"] || indUpdate["state"].numberInt() == 0 ) {
+
+ BSONObj grabQuery = BSON( "_id" << _id["_id"].String() << "ts" << indUpdate["ts"].OID() );
+
+ // Change ts so we won't be forced, state so we won't be relocked
+ BSONObj grabChanges = BSON( "ts" << lockDetails["ts"].OID() << "state" << 1 );
+
+ // Either our update will succeed, and we'll grab the lock, or it will fail b/c some other
+ // process grabbed the lock (which will change the ts), but the lock will be set until forcing
+ indDB->update( locksNS, grabQuery, BSON( "$set" << grabChanges ) );
+
+ indUpdate = indDB->findOne( locksNS, _id );
+
+ // Our lock should now be set until forcing.
+ assert( indUpdate["state"].numberInt() == 1 );
+
+ }
+ // else our lock is the same, in which case we're safe, or it's a bigger lock,
+ // in which case we won't need to protect anything since we won't have the lock.
+
+ }
+ catch( std::exception& e ) {
+ conn.done();
+ throw LockException( str::stream() << "distributed lock " << lockName
+ << " had errors communicating with individual server "
+ << up[1].first << causedBy( e ), 13661 );
+ }
+
+ assert( !indUpdate.isEmpty() );
+
+ // Find max TS value
+ if ( currLock.isEmpty() || currLock["ts"] < indUpdate["ts"] ) {
+ currLock = indUpdate.getOwned();
+ }
+
+ indDB.done();
+
+ }
+
+ // Locks on all servers are now set and safe until forcing
+
+ if ( currLock["ts"] == lockDetails["ts"] ) {
+ log( logLvl - 1 ) << "lock update won, completing lock propagation for '" << lockName << "'" << endl;
+ gotLock = true;
+ }
+ else {
+ log( logLvl - 1 ) << "lock update lost, lock '" << lockName << "' not propagated." << endl;
+
+ // Register the lock for deletion, to speed up failover
+ // Not strictly necessary, but helpful
+ distLockPinger.addUnlockOID( lockDetails["ts"].OID() );
+
+ gotLock = false;
+ }
+ }
+ catch( std::exception& e ) {
+ conn.done();
+ throw LockException( str::stream() << "exception creating distributed lock "
+ << lockName << causedBy( e ), 13663 );
+ }
+
+ // Complete lock propagation
+ if( gotLock ) {
+
+ // This is now safe, since we know that no new locks will be placed on top of the ones we've checked for at
+ // least 15 minutes. Sets the state = 2, so that future clients can determine that the lock is truly set.
+ // The invariant for rollbacks is that we will never force locks with state = 2 and active pings, since that
+ // indicates the lock is active, but this means the process creating/destroying them must explicitly poll
+ // when something goes wrong.
+ try {
+
+ BSONObjBuilder finalLockDetails;
+ BSONObjIterator bi( lockDetails );
+ while( bi.more() ) {
+ BSONElement el = bi.next();
+ if( (string) ( el.fieldName() ) == "state" )
+ finalLockDetails.append( "state", 2 );
+ else finalLockDetails.append( el );
+ }
+
+ conn->update( locksNS , _id , BSON( "$set" << finalLockDetails.obj() ) );
+
+ BSONObj err = conn->getLastErrorDetailed();
+ string errMsg = DBClientWithCommands::getLastErrorString(err);
+
+ currLock = conn->findOne( locksNS , _id );
+
+ if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ) {
+ warning() << "could not finalize winning lock " << lockName
+ << ( !errMsg.empty() ? causedBy( errMsg ) : " (did not update lock) " ) << endl;
+ gotLock = false;
+ }
+ else {
+ // SUCCESS!
+ gotLock = true;
+ }
+
+ }
+ catch( std::exception& e ) {
+ conn.done();
+
+ // Register the bad final lock for deletion, in case it exists
+ distLockPinger.addUnlockOID( lockDetails["ts"].OID() );
+
+ throw LockException( str::stream() << "exception finalizing winning lock"
+ << causedBy( e ), 13662 );
+ }
+
+ }
+
+ *other = currLock;
+ other->getOwned();
+
+ // Log our lock results
+ if(gotLock)
+ log( logLvl - 1 ) << "distributed lock '" << lockName << "' acquired, ts : " << currLock["ts"].OID() << endl;
+ else
+ log( logLvl - 1 ) << "distributed lock '" << lockName << "' was not acquired." << endl;
+
+ conn.done();
+
+ return gotLock;
+ }
+
+ // Unlock now takes an optional pointer to the lock, so you can be specific about which
+ // particular lock you want to unlock. This is required when the config server is down,
+ // and so cannot tell you what lock ts you should try later.
+ void DistributedLock::unlock( BSONObj* oldLockPtr ) {
+
+ assert( _name != "" );
+
+ string lockName = _name + string("/") + _processId;
+
+ const int maxAttempts = 3;
+ int attempted = 0;
+
+ BSONObj oldLock;
+ if( oldLockPtr ) oldLock = *oldLockPtr;
+
+ while ( ++attempted <= maxAttempts ) {
+
+ ScopedDbConnection conn( _conn );
+
+ try {
+
+ if( oldLock.isEmpty() )
+ oldLock = conn->findOne( locksNS, _id );
+
+ if( oldLock["state"].eoo() || oldLock["state"].numberInt() != 2 || oldLock["ts"].eoo() ) {
+ warning() << "cannot unlock invalid distributed lock " << oldLock << endl;
+ conn.done();
+ break;
+ }
+
+ // Use ts when updating lock, so that new locks can be sure they won't get trampled.
+ conn->update( locksNS ,
+ BSON( "_id" << _id["_id"].String() << "ts" << oldLock["ts"].OID() ),
+ BSON( "$set" << BSON( "state" << 0 ) ) );
+
+ // Check that the lock was actually unlocked... if not, try again
+ BSONObj err = conn->getLastErrorDetailed();
+ string errMsg = DBClientWithCommands::getLastErrorString(err);
+
+ if ( !errMsg.empty() || !err["n"].type() || err["n"].numberInt() < 1 ){
+ warning() << "distributed lock unlock update failed, retrying "
+ << ( errMsg.empty() ? causedBy( "( update not registered )" ) : causedBy( errMsg ) ) << endl;
+ conn.done();
+ continue;
+ }
+
+ log( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked. " << endl;
+ conn.done();
+ return;
+ }
+ catch( UpdateNotTheSame& ) {
+ log( logLvl - 1 ) << "distributed lock '" << lockName << "' unlocked (messily). " << endl;
+ conn.done();
+ break;
+ }
+ catch ( std::exception& e) {
+ warning() << "distributed lock '" << lockName << "' failed unlock attempt."
+ << causedBy( e ) << endl;
+
+ conn.done();
+ // TODO: If our lock timeout is small, sleeping this long may be unsafe.
+ if( attempted != maxAttempts) sleepsecs(1 << attempted);
+ }
+ }
+
+ if( attempted > maxAttempts && ! oldLock.isEmpty() && ! oldLock["ts"].eoo() ) {
+
+ log( logLvl - 1 ) << "could not unlock distributed lock with ts " << oldLock["ts"].OID()
+ << ", will attempt again later" << endl;
+
+ // We couldn't unlock the lock at all, so try again later in the pinging thread...
+ distLockPinger.addUnlockOID( oldLock["ts"].OID() );
+ }
+ else if( attempted > maxAttempts ) {
+ warning() << "could not unlock untracked distributed lock, a manual force may be required" << endl;
+ }
+
+ warning() << "distributed lock '" << lockName << "' couldn't consummate unlock request. "
+ << "lock may be taken over after " << ( _lockTimeout / (60 * 1000) )
+ << " minutes timeout." << endl;
+ }
+
+
+
+}
diff --git a/src/mongo/client/distlock.h b/src/mongo/client/distlock.h
new file mode 100644
index 00000000000..106a5d00001
--- /dev/null
+++ b/src/mongo/client/distlock.h
@@ -0,0 +1,244 @@
+// distlock.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../pch.h"
+#include "dbclient.h"
+#include "connpool.h"
+#include "redef_macros.h"
+#include "syncclusterconnection.h"
+
+#define LOCK_TIMEOUT (15 * 60 * 1000)
+#define LOCK_SKEW_FACTOR (30)
+#define LOCK_PING (LOCK_TIMEOUT / LOCK_SKEW_FACTOR)
+#define MAX_LOCK_NET_SKEW (LOCK_TIMEOUT / LOCK_SKEW_FACTOR)
+#define MAX_LOCK_CLOCK_SKEW (LOCK_TIMEOUT / LOCK_SKEW_FACTOR)
+#define NUM_LOCK_SKEW_CHECKS (3)
+
+// The maximum clock skew we need to handle between config servers is
+// 2 * MAX_LOCK_NET_SKEW + MAX_LOCK_CLOCK_SKEW.
+
+// Net effect of *this* clock being slow is effectively a multiplier on the max net skew
+// and a linear increase or decrease of the max clock skew.
+
+namespace mongo {
+
+ /**
+ * Exception class to encapsulate exceptions while managing distributed locks
+ */
+ class LockException : public DBException {
+ public:
+ LockException( const char * msg , int code ) : DBException( msg, code ) {}
+ LockException( const string& msg, int code ) : DBException( msg, code ) {}
+ virtual ~LockException() throw() { }
+ };
+
+ /**
+ * Indicates an error in retrieving time values from remote servers.
+ */
+ class TimeNotFoundException : public LockException {
+ public:
+ TimeNotFoundException( const char * msg , int code ) : LockException( msg, code ) {}
+ TimeNotFoundException( const string& msg, int code ) : LockException( msg, code ) {}
+ virtual ~TimeNotFoundException() throw() { }
+ };
+
+ /**
+ * The distributed lock is a configdb backed way of synchronizing system-wide tasks. A task must be identified by a
+ * unique name across the system (e.g., "balancer"). A lock is taken by writing a document in the configdb's locks
+ * collection with that name.
+ *
+ * To be maintained, each taken lock needs to be revalidaded ("pinged") within a pre-established amount of time. This
+ * class does this maintenance automatically once a DistributedLock object was constructed.
+ */
+ class DistributedLock {
+ public:
+
+ static LabeledLevel logLvl;
+
+ typedef boost::tuple<string, Date_t, Date_t, OID> PingData;
+
+ class LastPings {
+ public:
+ LastPings() : _mutex( "DistributedLock::LastPings" ) {}
+ ~LastPings(){}
+
+ PingData getLastPing( const ConnectionString& conn, const string& lockName );
+ void setLastPing( const ConnectionString& conn, const string& lockName, const PingData& pd );
+
+ mongo::mutex _mutex;
+ map< std::pair<string, string>, PingData > _lastPings;
+ };
+
+ static LastPings lastPings;
+
+ /**
+ * The constructor does not connect to the configdb yet and constructing does not mean the lock was acquired.
+ * Construction does trigger a lock "pinging" mechanism, though.
+ *
+ * @param conn address of config(s) server(s)
+ * @param name identifier for the lock
+ * @param lockTimeout how long can the log go "unpinged" before a new attempt to lock steals it (in minutes).
+ * @param lockPing how long to wait between lock pings
+ * @param legacy use legacy logic
+ *
+ */
+ DistributedLock( const ConnectionString& conn , const string& name , unsigned long long lockTimeout = 0, bool asProcess = false );
+ ~DistributedLock(){};
+
+ /**
+ * Attempts to acquire 'this' lock, checking if it could or should be stolen from the previous holder. Please
+ * consider using the dist_lock_try construct to acquire this lock in an exception safe way.
+ *
+ * @param why human readable description of why the lock is being taken (used to log)
+ * @param whether this is a lock re-entry or a new lock
+ * @param other configdb's lock document that is currently holding the lock, if lock is taken, or our own lock
+ * details if not
+ * @return true if it managed to grab the lock
+ */
+ bool lock_try( const string& why , bool reenter = false, BSONObj * other = 0 );
+
+ /**
+ * Releases a previously taken lock.
+ */
+ void unlock( BSONObj* oldLockPtr = NULL );
+
+ Date_t getRemoteTime();
+
+ bool isRemoteTimeSkewed();
+
+ const string& getProcessId();
+
+ const ConnectionString& getRemoteConnection();
+
+ /**
+ * Check the skew between a cluster of servers
+ */
+ static bool checkSkew( const ConnectionString& cluster, unsigned skewChecks = NUM_LOCK_SKEW_CHECKS, unsigned long long maxClockSkew = MAX_LOCK_CLOCK_SKEW, unsigned long long maxNetSkew = MAX_LOCK_NET_SKEW );
+
+ /**
+ * Get the remote time from a server or cluster
+ */
+ static Date_t remoteTime( const ConnectionString& cluster, unsigned long long maxNetSkew = MAX_LOCK_NET_SKEW );
+
+ static bool killPinger( DistributedLock& lock );
+
+ /**
+ * Namespace for lock pings
+ */
+ static const string lockPingNS;
+
+ /**
+ * Namespace for locks
+ */
+ static const string locksNS;
+
+ const ConnectionString _conn;
+ const string _name;
+ const BSONObj _id;
+ const string _processId;
+
+ // Timeout for lock, usually LOCK_TIMEOUT
+ const unsigned long long _lockTimeout;
+ const unsigned long long _maxClockSkew;
+ const unsigned long long _maxNetSkew;
+ const unsigned long long _lockPing;
+
+ private:
+
+ void resetLastPing(){ lastPings.setLastPing( _conn, _name, PingData() ); }
+ void setLastPing( const PingData& pd ){ lastPings.setLastPing( _conn, _name, pd ); }
+ PingData getLastPing(){ return lastPings.getLastPing( _conn, _name ); }
+
+ // May or may not exist, depending on startup
+ mongo::mutex _mutex;
+ string _threadId;
+
+ };
+
+ class dist_lock_try {
+ public:
+
+ dist_lock_try() : _lock(NULL), _got(false) {}
+
+ dist_lock_try( const dist_lock_try& that ) : _lock(that._lock), _got(that._got), _other(that._other) {
+ _other.getOwned();
+
+ // Make sure the lock ownership passes to this object,
+ // so we only unlock once.
+ ((dist_lock_try&) that)._got = false;
+ ((dist_lock_try&) that)._lock = NULL;
+ ((dist_lock_try&) that)._other = BSONObj();
+ }
+
+ // Needed so we can handle lock exceptions in context of lock try.
+ dist_lock_try& operator=( const dist_lock_try& that ){
+
+ if( this == &that ) return *this;
+
+ _lock = that._lock;
+ _got = that._got;
+ _other = that._other;
+ _other.getOwned();
+ _why = that._why;
+
+ // Make sure the lock ownership passes to this object,
+ // so we only unlock once.
+ ((dist_lock_try&) that)._got = false;
+ ((dist_lock_try&) that)._lock = NULL;
+ ((dist_lock_try&) that)._other = BSONObj();
+
+ return *this;
+ }
+
+ dist_lock_try( DistributedLock * lock , string why )
+ : _lock(lock), _why(why) {
+ _got = _lock->lock_try( why , false , &_other );
+ }
+
+ ~dist_lock_try() {
+ if ( _got ) {
+ assert( ! _other.isEmpty() );
+ _lock->unlock( &_other );
+ }
+ }
+
+ bool reestablish(){
+ return retry();
+ }
+
+ bool retry() {
+ assert( _lock );
+ assert( _got );
+ assert( ! _other.isEmpty() );
+
+ return _got = _lock->lock_try( _why , true, &_other );
+ }
+
+ bool got() const { return _got; }
+ BSONObj other() const { return _other; }
+
+ private:
+ DistributedLock * _lock;
+ bool _got;
+ BSONObj _other;
+ string _why;
+ };
+
+}
+
diff --git a/src/mongo/client/distlock_test.cpp b/src/mongo/client/distlock_test.cpp
new file mode 100644
index 00000000000..a46caa44c11
--- /dev/null
+++ b/src/mongo/client/distlock_test.cpp
@@ -0,0 +1,446 @@
+// distlock_test.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include "../pch.h"
+#include "dbclient.h"
+#include "distlock.h"
+#include "../db/commands.h"
+#include "../util/bson_util.h"
+#include "../util/timer.h"
+
+// Modify some config options for the RNG, since they cause MSVC to fail
+#include <boost/config.hpp>
+
+#if defined(BOOST_MSVC) && defined(BOOST_NO_MEMBER_TEMPLATE_FRIENDS)
+#undef BOOST_NO_MEMBER_TEMPLATE_FRIENDS
+#define BOOST_RNG_HACK
+#endif
+
+// Well, sort-of cross-platform RNG
+#include <boost/random/mersenne_twister.hpp>
+
+#ifdef BOOST_RNG_HACK
+#define BOOST_NO_MEMBER_TEMPLATE_FRIENDS
+#undef BOOST_RNG_HACK
+#endif
+
+
+#include <boost/random/uniform_int.hpp>
+#include <boost/random/variate_generator.hpp>
+
+
+// TODO: Make a method in BSONObj if useful, don't modify for now
+#define string_field(obj, name, def) ( obj.hasField(name) ? obj[name].String() : def )
+#define number_field(obj, name, def) ( obj.hasField(name) ? obj[name].Number() : def )
+
+namespace mongo {
+
+ class TestDistLockWithSync: public Command {
+ public:
+ TestDistLockWithSync() :
+ Command("_testDistLockWithSyncCluster") {
+ }
+ virtual void help(stringstream& help) const {
+ help << "should not be calling this directly" << endl;
+ }
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual LockType locktype() const {
+ return NONE;
+ }
+
+ static void runThread() {
+ while (keepGoing) {
+ if (current->lock_try( "test" )) {
+ count++;
+ int before = count;
+ sleepmillis(3);
+ int after = count;
+
+ if (after != before) {
+ error() << " before: " << before << " after: " << after
+ << endl;
+ }
+
+ current->unlock();
+ }
+ }
+ }
+
+ bool run(const string&, BSONObj& cmdObj, int, string& errmsg,
+ BSONObjBuilder& result, bool) {
+ Timer t;
+ DistributedLock lk(ConnectionString(cmdObj["host"].String(),
+ ConnectionString::SYNC), "testdistlockwithsync", 0, 0);
+ current = &lk;
+ count = 0;
+ gotit = 0;
+ errors = 0;
+ keepGoing = true;
+
+ vector<shared_ptr<boost::thread> > l;
+ for (int i = 0; i < 4; i++) {
+ l.push_back(
+ shared_ptr<boost::thread> (new boost::thread(runThread)));
+ }
+
+ int secs = 10;
+ if (cmdObj["secs"].isNumber())
+ secs = cmdObj["secs"].numberInt();
+ sleepsecs(secs);
+ keepGoing = false;
+
+ for (unsigned i = 0; i < l.size(); i++)
+ l[i]->join();
+
+ current = 0;
+
+ result.append("count", count);
+ result.append("gotit", gotit);
+ result.append("errors", errors);
+ result.append("timeMS", t.millis());
+
+ return errors == 0;
+ }
+
+ // variables for test
+ static DistributedLock * current;
+ static int gotit;
+ static int errors;
+ static AtomicUInt count;
+
+ static bool keepGoing;
+
+ } testDistLockWithSyncCmd;
+
+ DistributedLock * TestDistLockWithSync::current;
+ AtomicUInt TestDistLockWithSync::count;
+ int TestDistLockWithSync::gotit;
+ int TestDistLockWithSync::errors;
+ bool TestDistLockWithSync::keepGoing;
+
+
+
+ class TestDistLockWithSkew: public Command {
+ public:
+
+ static const int logLvl = 1;
+
+ TestDistLockWithSkew() :
+ Command("_testDistLockWithSkew") {
+ }
+ virtual void help(stringstream& help) const {
+ help << "should not be calling this directly" << endl;
+ }
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual LockType locktype() const {
+ return NONE;
+ }
+
+ void runThread(ConnectionString& hostConn, unsigned threadId, unsigned seed,
+ BSONObj& cmdObj, BSONObjBuilder& result) {
+
+ stringstream ss;
+ ss << "thread-" << threadId;
+ setThreadName(ss.str().c_str());
+
+ // Lock name
+ string lockName = string_field(cmdObj, "lockName", this->name + "_lock");
+
+ // Range of clock skew in diff threads
+ int skewRange = (int) number_field(cmdObj, "skewRange", 1);
+
+ // How long to wait with the lock
+ int threadWait = (int) number_field(cmdObj, "threadWait", 30);
+ if(threadWait <= 0) threadWait = 1;
+
+ // Max amount of time (ms) a thread waits before checking the lock again
+ int threadSleep = (int) number_field(cmdObj, "threadSleep", 30);
+ if(threadSleep <= 0) threadSleep = 1;
+
+ // How long until the lock is forced in ms, only compared locally
+ unsigned long long takeoverMS = (unsigned long long) number_field(cmdObj, "takeoverMS", 0);
+
+ // Whether or not we should hang some threads
+ int hangThreads = (int) number_field(cmdObj, "hangThreads", 0);
+
+
+ boost::mt19937 gen((boost::mt19937::result_type) seed);
+
+ boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomSkew(gen, boost::uniform_int<>(0, skewRange));
+ boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomWait(gen, boost::uniform_int<>(1, threadWait));
+ boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomSleep(gen, boost::uniform_int<>(1, threadSleep));
+ boost::variate_generator<boost::mt19937&, boost::uniform_int<> > randomNewLock(gen, boost::uniform_int<>(0, 3));
+
+
+ int skew = 0;
+ if (!lock.get()) {
+
+ // Pick a skew, but the first two threads skew the whole range
+ if(threadId == 0)
+ skew = -skewRange / 2;
+ else if(threadId == 1)
+ skew = skewRange / 2;
+ else skew = randomSkew() - (skewRange / 2);
+
+ // Skew this thread
+ jsTimeVirtualThreadSkew( skew );
+
+ log() << "Initializing lock with skew of " << skew << " for thread " << threadId << endl;
+
+ lock.reset(new DistributedLock(hostConn, lockName, takeoverMS, true ));
+
+ log() << "Skewed time " << jsTime() << " for thread " << threadId << endl
+ << " max wait (with lock: " << threadWait << ", after lock: " << threadSleep << ")" << endl
+ << " takeover in " << takeoverMS << "(ms remote)" << endl;
+
+ }
+
+ DistributedLock* myLock = lock.get();
+
+ bool errors = false;
+ BSONObj lockObj;
+ while (keepGoing) {
+ try {
+
+ if (myLock->lock_try("Testing distributed lock with skew.", false, &lockObj )) {
+
+ log() << "**** Locked for thread " << threadId << " with ts " << lockObj["ts"] << endl;
+
+ if( count % 2 == 1 && ! myLock->lock_try( "Testing lock re-entry.", true ) ) {
+ errors = true;
+ log() << "**** !Could not re-enter lock already held" << endl;
+ break;
+ }
+
+ if( count % 3 == 1 && myLock->lock_try( "Testing lock non-re-entry.", false ) ) {
+ errors = true;
+ log() << "**** !Invalid lock re-entry" << endl;
+ break;
+ }
+
+ count++;
+ int before = count;
+ int sleep = randomWait();
+ sleepmillis(sleep);
+ int after = count;
+
+ if(after != before) {
+ errors = true;
+ log() << "**** !Bad increment while sleeping with lock for: " << sleep << "ms" << endl;
+ break;
+ }
+
+ // Unlock only half the time...
+ if(hangThreads == 0 || threadId % hangThreads != 0) {
+ log() << "**** Unlocking for thread " << threadId << " with ts " << lockObj["ts"] << endl;
+ myLock->unlock( &lockObj );
+ }
+ else {
+ log() << "**** Not unlocking for thread " << threadId << endl;
+ assert( DistributedLock::killPinger( *myLock ) );
+ // We're simulating a crashed process...
+ break;
+ }
+ }
+
+ }
+ catch( LockException& e ) {
+ log() << "*** !Could not try distributed lock." << causedBy( e ) << endl;
+ break;
+ }
+
+ // Create a new lock 1/3 of the time
+ if( randomNewLock() > 1 ){
+ lock.reset(new DistributedLock( hostConn, lockName, takeoverMS, true ));
+ myLock = lock.get();
+ }
+
+ sleepmillis(randomSleep());
+ }
+
+ result << "errors" << errors
+ << "skew" << skew
+ << "takeover" << (long long) takeoverMS
+ << "localTimeout" << (takeoverMS > 0);
+
+ }
+
+ void test(ConnectionString& hostConn, string& lockName, unsigned seed) {
+ return;
+ }
+
+ bool run(const string&, BSONObj& cmdObj, int, string& errmsg,
+ BSONObjBuilder& result, bool) {
+
+ Timer t;
+
+ ConnectionString hostConn(cmdObj["host"].String(),
+ ConnectionString::SYNC);
+
+ unsigned seed = (unsigned) number_field(cmdObj, "seed", 0);
+ int numThreads = (int) number_field(cmdObj, "numThreads", 4);
+ int wait = (int) number_field(cmdObj, "wait", 10000);
+
+ log() << "Starting " << this->name << " with -" << endl
+ << " seed: " << seed << endl
+ << " numThreads: " << numThreads << endl
+ << " total wait: " << wait << endl << endl;
+
+ // Skew host clocks if needed
+ try {
+ skewClocks( hostConn, cmdObj );
+ }
+ catch( DBException e ) {
+ errmsg = str::stream() << "Clocks could not be skewed." << causedBy( e );
+ return false;
+ }
+
+ count = 0;
+ keepGoing = true;
+
+ vector<shared_ptr<boost::thread> > threads;
+ vector<shared_ptr<BSONObjBuilder> > results;
+ for (int i = 0; i < numThreads; i++) {
+ results.push_back(shared_ptr<BSONObjBuilder> (new BSONObjBuilder()));
+ threads.push_back(shared_ptr<boost::thread> (new boost::thread(
+ boost::bind(&TestDistLockWithSkew::runThread, this,
+ hostConn, (unsigned) i, seed + i, boost::ref(cmdObj),
+ boost::ref(*(results[i].get()))))));
+ }
+
+ sleepsecs(wait / 1000);
+ keepGoing = false;
+
+ bool errors = false;
+ for (unsigned i = 0; i < threads.size(); i++) {
+ threads[i]->join();
+ errors = errors || results[i].get()->obj()["errors"].Bool();
+ }
+
+ result.append("count", count);
+ result.append("errors", errors);
+ result.append("timeMS", t.millis());
+
+ return !errors;
+
+ }
+
+ /**
+ * Skews the clocks of a remote cluster by a particular amount, specified by
+ * the "skewHosts" element in a BSONObj.
+ */
+ static void skewClocks( ConnectionString& cluster, BSONObj& cmdObj ) {
+
+ vector<long long> skew;
+ if(cmdObj.hasField("skewHosts")) {
+ bsonArrToNumVector<long long>(cmdObj["skewHosts"], skew);
+ }
+ else {
+ log( logLvl ) << "No host clocks to skew." << endl;
+ return;
+ }
+
+ log( logLvl ) << "Skewing clocks of hosts " << cluster << endl;
+
+ unsigned s = 0;
+ for(vector<long long>::iterator i = skew.begin(); i != skew.end(); ++i,s++) {
+
+ ConnectionString server( cluster.getServers()[s] );
+ ScopedDbConnection conn( server );
+
+ BSONObj result;
+ try {
+ bool success = conn->runCommand( string("admin"), BSON( "_skewClockCommand" << 1 << "skew" << *i ), result );
+
+ uassert(13678, str::stream() << "Could not communicate with server " << server.toString() << " in cluster " << cluster.toString() << " to change skew by " << *i, success );
+
+ log( logLvl + 1 ) << " Skewed host " << server << " clock by " << *i << endl;
+ }
+ catch(...) {
+ conn.done();
+ throw;
+ }
+
+ conn.done();
+
+ }
+
+ }
+
+ // variables for test
+ thread_specific_ptr<DistributedLock> lock;
+ AtomicUInt count;
+ bool keepGoing;
+
+ } testDistLockWithSkewCmd;
+
+
+ /**
+ * Utility command to virtually skew the clock of a mongo server a particular amount.
+ * This skews the clock globally, per-thread skew is also possible.
+ */
+ class SkewClockCommand: public Command {
+ public:
+ SkewClockCommand() :
+ Command("_skewClockCommand") {
+ }
+ virtual void help(stringstream& help) const {
+ help << "should not be calling this directly" << endl;
+ }
+
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual LockType locktype() const {
+ return NONE;
+ }
+
+ bool run(const string&, BSONObj& cmdObj, int, string& errmsg,
+ BSONObjBuilder& result, bool) {
+
+ long long skew = (long long) number_field(cmdObj, "skew", 0);
+
+ log() << "Adjusting jsTime() clock skew to " << skew << endl;
+
+ jsTimeVirtualSkew( skew );
+
+ log() << "JSTime adjusted, now is " << jsTime() << endl;
+
+ return true;
+
+ }
+
+ } testSkewClockCommand;
+
+}
+
diff --git a/src/mongo/client/examples/authTest.cpp b/src/mongo/client/examples/authTest.cpp
new file mode 100644
index 00000000000..71cdd390cff
--- /dev/null
+++ b/src/mongo/client/examples/authTest.cpp
@@ -0,0 +1,54 @@
+// authTest.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+
+#include "client/dbclient.h"
+
+using namespace mongo;
+
+int main( int argc, const char **argv ) {
+
+ const char *port = "27017";
+ if ( argc != 1 ) {
+ if ( argc != 3 )
+ throw -12;
+ port = argv[ 2 ];
+ }
+
+ DBClientConnection conn;
+ string errmsg;
+ if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) {
+ cout << "couldn't connect : " << errmsg << endl;
+ throw -11;
+ }
+
+ {
+ // clean up old data from any previous tests
+ conn.remove( "test.system.users" , BSONObj() );
+ }
+
+ conn.insert( "test.system.users" , BSON( "user" << "eliot" << "pwd" << conn.createPasswordDigest( "eliot" , "bar" ) ) );
+
+ errmsg.clear();
+ bool ok = conn.auth( "test" , "eliot" , "bar" , errmsg );
+ if ( ! ok )
+ cout << errmsg << endl;
+ MONGO_assert( ok );
+
+ MONGO_assert( ! conn.auth( "test" , "eliot" , "bars" , errmsg ) );
+}
diff --git a/src/mongo/client/examples/clientTest.cpp b/src/mongo/client/examples/clientTest.cpp
new file mode 100644
index 00000000000..aaea6bd1bdf
--- /dev/null
+++ b/src/mongo/client/examples/clientTest.cpp
@@ -0,0 +1,279 @@
+// clientTest.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * a simple test for the c++ driver
+ */
+
+// this header should be first to ensure that it includes cleanly in any context
+#include "client/dbclient.h"
+
+#include <iostream>
+
+#ifndef assert
+# define assert(x) MONGO_assert(x)
+#endif
+
+using namespace std;
+using namespace mongo;
+
+int main( int argc, const char **argv ) {
+
+ const char *port = "27017";
+ if ( argc != 1 ) {
+ if ( argc != 3 )
+ throw -12;
+ port = argv[ 2 ];
+ }
+
+ DBClientConnection conn;
+ string errmsg;
+ if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) {
+ cout << "couldn't connect : " << errmsg << endl;
+ throw -11;
+ }
+
+ const char * ns = "test.test1";
+
+ conn.dropCollection(ns);
+
+ // clean up old data from any previous tests
+ conn.remove( ns, BSONObj() );
+ assert( conn.findOne( ns , BSONObj() ).isEmpty() );
+
+ // test insert
+ conn.insert( ns ,BSON( "name" << "eliot" << "num" << 1 ) );
+ assert( ! conn.findOne( ns , BSONObj() ).isEmpty() );
+
+ // test remove
+ conn.remove( ns, BSONObj() );
+ assert( conn.findOne( ns , BSONObj() ).isEmpty() );
+
+
+ // insert, findOne testing
+ conn.insert( ns , BSON( "name" << "eliot" << "num" << 1 ) );
+ {
+ BSONObj res = conn.findOne( ns , BSONObj() );
+ assert( strstr( res.getStringField( "name" ) , "eliot" ) );
+ assert( ! strstr( res.getStringField( "name2" ) , "eliot" ) );
+ assert( 1 == res.getIntField( "num" ) );
+ }
+
+
+ // cursor
+ conn.insert( ns ,BSON( "name" << "sara" << "num" << 2 ) );
+ {
+ auto_ptr<DBClientCursor> cursor = conn.query( ns , BSONObj() );
+ int count = 0;
+ while ( cursor->more() ) {
+ count++;
+ BSONObj obj = cursor->next();
+ }
+ assert( count == 2 );
+ }
+
+ {
+ auto_ptr<DBClientCursor> cursor = conn.query( ns , BSON( "num" << 1 ) );
+ int count = 0;
+ while ( cursor->more() ) {
+ count++;
+ BSONObj obj = cursor->next();
+ }
+ assert( count == 1 );
+ }
+
+ {
+ auto_ptr<DBClientCursor> cursor = conn.query( ns , BSON( "num" << 3 ) );
+ int count = 0;
+ while ( cursor->more() ) {
+ count++;
+ BSONObj obj = cursor->next();
+ }
+ assert( count == 0 );
+ }
+
+ // update
+ {
+ BSONObj res = conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() );
+ assert( ! strstr( res.getStringField( "name2" ) , "eliot" ) );
+
+ BSONObj after = BSONObjBuilder().appendElements( res ).append( "name2" , "h" ).obj();
+
+ conn.update( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() , after );
+ res = conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() );
+ assert( ! strstr( res.getStringField( "name2" ) , "eliot" ) );
+ assert( conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() ).isEmpty() );
+
+ conn.update( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() , after );
+ res = conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() );
+ assert( strstr( res.getStringField( "name" ) , "eliot" ) );
+ assert( strstr( res.getStringField( "name2" ) , "h" ) );
+ assert( conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() ).isEmpty() );
+
+ // upsert
+ conn.update( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() , after , 1 );
+ assert( ! conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() ).isEmpty() );
+
+ }
+
+ {
+ // ensure index
+ assert( conn.ensureIndex( ns , BSON( "name" << 1 ) ) );
+ assert( ! conn.ensureIndex( ns , BSON( "name" << 1 ) ) );
+ }
+
+ {
+ // hint related tests
+ assert( conn.findOne(ns, "{}")["name"].str() == "sara" );
+
+ assert( conn.findOne(ns, "{ name : 'eliot' }")["name"].str() == "eliot" );
+ assert( conn.getLastError() == "" );
+
+ // nonexistent index test
+ bool asserted = false;
+ try {
+ conn.findOne(ns, Query("{name:\"eliot\"}").hint("{foo:1}"));
+ }
+ catch ( ... ) {
+ asserted = true;
+ }
+ assert( asserted );
+
+ //existing index
+ assert( conn.findOne(ns, Query("{name:'eliot'}").hint("{name:1}")).hasElement("name") );
+
+ // run validate
+ assert( conn.validate( ns ) );
+ }
+
+ {
+ // timestamp test
+
+ const char * tsns = "test.tstest1";
+ conn.dropCollection( tsns );
+
+ {
+ mongo::BSONObjBuilder b;
+ b.appendTimestamp( "ts" );
+ conn.insert( tsns , b.obj() );
+ }
+
+ mongo::BSONObj out = conn.findOne( tsns , mongo::BSONObj() );
+ Date_t oldTime = out["ts"].timestampTime();
+ unsigned int oldInc = out["ts"].timestampInc();
+
+ {
+ mongo::BSONObjBuilder b1;
+ b1.append( out["_id"] );
+
+ mongo::BSONObjBuilder b2;
+ b2.append( out["_id"] );
+ b2.appendTimestamp( "ts" );
+
+ conn.update( tsns , b1.obj() , b2.obj() );
+ }
+
+ BSONObj found = conn.findOne( tsns , mongo::BSONObj() );
+ cout << "old: " << out << "\nnew: " << found << endl;
+ assert( ( oldTime < found["ts"].timestampTime() ) ||
+ ( oldTime == found["ts"].timestampTime() && oldInc < found["ts"].timestampInc() ) );
+
+ }
+
+ {
+ // check that killcursors doesn't affect last error
+ assert( conn.getLastError().empty() );
+
+ BufBuilder b;
+ b.appendNum( (int)0 ); // reserved
+ b.appendNum( (int)-1 ); // invalid # of cursors triggers exception
+ b.appendNum( (int)-1 ); // bogus cursor id
+
+ Message m;
+ m.setData( dbKillCursors, b.buf(), b.len() );
+
+ // say() is protected in DBClientConnection, so get superclass
+ static_cast< DBConnector* >( &conn )->say( m );
+
+ assert( conn.getLastError().empty() );
+ }
+
+ {
+ list<string> l = conn.getDatabaseNames();
+ for ( list<string>::iterator i = l.begin(); i != l.end(); i++ ) {
+ cout << "db name : " << *i << endl;
+ }
+
+ l = conn.getCollectionNames( "test" );
+ for ( list<string>::iterator i = l.begin(); i != l.end(); i++ ) {
+ cout << "coll name : " << *i << endl;
+ }
+ }
+
+ {
+ //Map Reduce (this mostly just tests that it compiles with all output types)
+ const string ns = "test.mr";
+ conn.insert(ns, BSON("a" << 1));
+ conn.insert(ns, BSON("a" << 1));
+
+ const char* map = "function() { emit(this.a, 1); }";
+ const char* reduce = "function(key, values) { return Array.sum(values); }";
+
+ const string outcoll = ns + ".out";
+
+ BSONObj out;
+ out = conn.mapreduce(ns, map, reduce, BSONObj()); // default to inline
+ //MONGO_PRINT(out);
+ out = conn.mapreduce(ns, map, reduce, BSONObj(), outcoll);
+ //MONGO_PRINT(out);
+ out = conn.mapreduce(ns, map, reduce, BSONObj(), outcoll.c_str());
+ //MONGO_PRINT(out);
+ out = conn.mapreduce(ns, map, reduce, BSONObj(), BSON("reduce" << outcoll));
+ //MONGO_PRINT(out);
+ }
+
+ {
+ // test timeouts
+
+ DBClientConnection conn( true , 0 , 2 );
+ if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) {
+ cout << "couldn't connect : " << errmsg << endl;
+ throw -11;
+ }
+ conn.insert( "test.totest" , BSON( "x" << 1 ) );
+ BSONObj res;
+
+ bool gotError = false;
+ assert( conn.eval( "test" , "return db.totest.findOne().x" , res ) );
+ try {
+ conn.eval( "test" , "sleep(5000); return db.totest.findOne().x" , res );
+ }
+ catch ( std::exception& e ) {
+ gotError = true;
+ log() << e.what() << endl;
+ }
+ assert( gotError );
+ // sleep so the server isn't locked anymore
+ sleepsecs( 4 );
+
+ assert( conn.eval( "test" , "return db.totest.findOne().x" , res ) );
+
+
+ }
+
+ cout << "client test finished!" << endl;
+}
diff --git a/src/mongo/client/examples/first.cpp b/src/mongo/client/examples/first.cpp
new file mode 100644
index 00000000000..ab5efb325f5
--- /dev/null
+++ b/src/mongo/client/examples/first.cpp
@@ -0,0 +1,86 @@
+// first.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * this is a good first example of how to use mongo from c++
+ */
+
+#include <iostream>
+
+#include "client/dbclient.h"
+
+using namespace std;
+
+void insert( mongo::DBClientConnection & conn , const char * name , int num ) {
+ mongo::BSONObjBuilder obj;
+ obj.append( "name" , name );
+ obj.append( "num" , num );
+ conn.insert( "test.people" , obj.obj() );
+}
+
+int main( int argc, const char **argv ) {
+
+ const char *port = "27017";
+ if ( argc != 1 ) {
+ if ( argc != 3 )
+ throw -12;
+ port = argv[ 2 ];
+ }
+
+ mongo::DBClientConnection conn;
+ string errmsg;
+ if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) {
+ cout << "couldn't connect : " << errmsg << endl;
+ throw -11;
+ }
+
+ {
+ // clean up old data from any previous tests
+ mongo::BSONObjBuilder query;
+ conn.remove( "test.people" , query.obj() );
+ }
+
+ insert( conn , "eliot" , 15 );
+ insert( conn , "sara" , 23 );
+
+ {
+ mongo::BSONObjBuilder query;
+ auto_ptr<mongo::DBClientCursor> cursor = conn.query( "test.people" , query.obj() );
+ cout << "using cursor" << endl;
+ while ( cursor->more() ) {
+ mongo::BSONObj obj = cursor->next();
+ cout << "\t" << obj.jsonString() << endl;
+ }
+
+ }
+
+ {
+ mongo::BSONObjBuilder query;
+ query.append( "name" , "eliot" );
+ mongo::BSONObj res = conn.findOne( "test.people" , query.obj() );
+ cout << res.isEmpty() << "\t" << res.jsonString() << endl;
+ }
+
+ {
+ mongo::BSONObjBuilder query;
+ query.append( "name" , "asd" );
+ mongo::BSONObj res = conn.findOne( "test.people" , query.obj() );
+ cout << res.isEmpty() << "\t" << res.jsonString() << endl;
+ }
+
+
+}
diff --git a/src/mongo/client/examples/httpClientTest.cpp b/src/mongo/client/examples/httpClientTest.cpp
new file mode 100644
index 00000000000..4055d4492d5
--- /dev/null
+++ b/src/mongo/client/examples/httpClientTest.cpp
@@ -0,0 +1,58 @@
+// httpClientTest.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+
+#include "client/dbclient.h"
+#include "util/net/httpclient.h"
+
+using namespace mongo;
+
+void play( string url ) {
+ cout << "[" << url << "]" << endl;
+
+ HttpClient c;
+ HttpClient::Result r;
+ MONGO_assert( c.get( url , &r ) == 200 );
+
+ HttpClient::Headers h = r.getHeaders();
+ MONGO_assert( h["Content-Type"].find( "text/html" ) == 0 );
+
+ cout << "\tHeaders" << endl;
+ for ( HttpClient::Headers::iterator i = h.begin() ; i != h.end(); ++i ) {
+ cout << "\t\t" << i->first << "\t" << i->second << endl;
+ }
+
+}
+
+int main( int argc, const char **argv ) {
+
+ int port = 27017;
+ if ( argc != 1 ) {
+ if ( argc != 3 )
+ throw -12;
+ port = atoi( argv[ 2 ] );
+ }
+ port += 1000;
+
+ play( str::stream() << "http://localhost:" << port << "/" );
+
+#ifdef MONGO_SSL
+ play( "https://www.10gen.com/" );
+#endif
+
+}
diff --git a/src/mongo/client/examples/insert_demo.cpp b/src/mongo/client/examples/insert_demo.cpp
new file mode 100644
index 00000000000..14ac79ee1a0
--- /dev/null
+++ b/src/mongo/client/examples/insert_demo.cpp
@@ -0,0 +1,47 @@
+/*
+ C++ client program which inserts documents in a MongoDB database.
+
+ How to build and run:
+
+ Using mongo_client_lib.cpp:
+ g++ -I .. -I ../.. insert_demo.cpp ../mongo_client_lib.cpp -lboost_thread-mt -lboost_filesystem
+ ./a.out
+*/
+
+#include <iostream>
+#include "dbclient.h" // the mongo c++ driver
+
+using namespace std;
+using namespace mongo;
+using namespace bson;
+
+int main() {
+ try {
+ cout << "connecting to localhost..." << endl;
+ DBClientConnection c;
+ c.connect("localhost");
+ cout << "connected ok" << endl;
+
+ bo o = BSON( "hello" << "world" );
+
+ cout << "inserting..." << endl;
+
+ time_t start = time(0);
+ for( unsigned i = 0; i < 1000000; i++ ) {
+ c.insert("test.foo", o);
+ }
+
+ // wait until all operations applied
+ cout << "getlasterror returns: \"" << c.getLastError() << '"' << endl;
+
+ time_t done = time(0);
+ time_t dt = done-start;
+ cout << dt << " seconds " << 1000000/dt << " per second" << endl;
+ }
+ catch(DBException& e) {
+ cout << "caught DBException " << e.toString() << endl;
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/src/mongo/client/examples/mongoperf.cpp b/src/mongo/client/examples/mongoperf.cpp
new file mode 100644
index 00000000000..68ebd6b10f2
--- /dev/null
+++ b/src/mongo/client/examples/mongoperf.cpp
@@ -0,0 +1,269 @@
+/*
+ How to build and run:
+
+ scons mongoperf
+ ./mongoperf -h
+*/
+
+#define MONGO_EXPOSE_MACROS 1
+
+#include <iostream>
+#include "../dbclient.h" // the mongo c++ driver
+#include "../../util/mmap.h"
+#include <assert.h>
+#include "../../util/logfile.h"
+#include "../../util/timer.h"
+#include "../../util/time_support.h"
+#include "../../bson/util/atomic_int.h"
+
+using namespace std;
+using namespace mongo;
+using namespace bson;
+
+int dummy;
+LogFile *lf = 0;
+MemoryMappedFile *mmfFile;
+char *mmf = 0;
+bo options;
+unsigned long long len; // file len
+const unsigned PG = 4096;
+unsigned nThreadsRunning = 0;
+
+// as this is incremented A LOT, at some point this becomes a bottleneck if very high ops/second (in cache) things are happening.
+AtomicUInt iops;
+
+SimpleMutex m("mperf");
+
+int syncDelaySecs = 0;
+
+void syncThread() {
+ while( 1 ) {
+ mongo::Timer t;
+ mmfFile->flush(true);
+ cout << " mmf sync took " << t.millis() << "ms" << endl;
+ sleepsecs(syncDelaySecs);
+ }
+}
+
+char* round(char* x) {
+ size_t f = (size_t) x;
+ char *p = (char *) ((f+PG-1)/PG*PG);
+ return p;
+}
+
+struct Aligned {
+ char x[8192];
+ char* addr() { return round(x); }
+};
+
+unsigned long long rrand() {
+ // RAND_MAX is very small on windows
+ return (static_cast<unsigned long long>(rand()) << 15) ^ rand();
+}
+
+void workerThread() {
+ bool r = options["r"].trueValue();
+ bool w = options["w"].trueValue();
+ //cout << "read:" << r << " write:" << w << endl;
+ long long su = options["sleepMicros"].numberLong();
+ Aligned a;
+ while( 1 ) {
+ unsigned long long rofs = (rrand() * PG) % len;
+ unsigned long long wofs = (rrand() * PG) % len;
+ if( mmf ) {
+ if( r ) {
+ dummy += mmf[rofs];
+ iops++;
+ }
+ if( w ) {
+ mmf[wofs] = 3;
+ iops++;
+ }
+ }
+ else {
+ if( r ) {
+ lf->readAt(rofs, a.addr(), PG);
+ iops++;
+ }
+ if( w ) {
+ lf->writeAt(wofs, a.addr(), PG);
+ iops++;
+ }
+ }
+ long long micros = su / nThreadsRunning;
+ if( micros ) {
+ sleepmicros(micros);
+ }
+ }
+}
+
+void go() {
+ assert( options["r"].trueValue() || options["w"].trueValue() );
+ MemoryMappedFile f;
+ cout << "creating test file size:";
+ len = options["fileSizeMB"].numberLong();
+ if( len == 0 ) len = 1;
+ cout << len << "MB ..." << endl;
+
+ if( 0 && len > 2000 && !options["mmf"].trueValue() ) {
+ // todo make tests use 64 bit offsets in their i/o -- i.e. adjust LogFile::writeAt and such
+ cout << "\nsizes > 2GB not yet supported with mmf:false" << endl;
+ return;
+ }
+ len *= 1024 * 1024;
+ const char *fname = "./mongoperf__testfile__tmp";
+ try {
+ boost::filesystem::remove(fname);
+ }
+ catch(...) {
+ cout << "error deleting file " << fname << endl;
+ return;
+ }
+ lf = new LogFile(fname,true);
+ const unsigned sz = 1024 * 1024 * 32; // needs to be big as we are using synchronousAppend. if we used a regular MongoFile it wouldn't have to be
+ char *buf = (char*) malloc(sz+4096);
+ const char *p = round(buf);
+ for( unsigned long long i = 0; i < len; i += sz ) {
+ lf->synchronousAppend(p, sz);
+ if( i % (1024ULL*1024*1024) == 0 && i ) {
+ cout << i / (1024ULL*1024*1024) << "GB..." << endl;
+ }
+ }
+ BSONObj& o = options;
+
+ if( o["mmf"].trueValue() ) {
+ delete lf;
+ lf = 0;
+ mmfFile = new MemoryMappedFile();
+ mmf = (char *) mmfFile->map(fname);
+ assert( mmf );
+
+ syncDelaySecs = options["syncDelay"].numberInt();
+ if( syncDelaySecs ) {
+ boost::thread t(syncThread);
+ }
+ }
+
+ cout << "testing..."<< endl;
+
+ unsigned wthr = (unsigned) o["nThreads"].Int();
+ if( wthr < 1 ) {
+ cout << "bad threads field value" << endl;
+ return;
+ }
+ unsigned i = 0;
+ unsigned d = 1;
+ unsigned &nthr = nThreadsRunning;
+ while( 1 ) {
+ if( i++ % 8 == 0 ) {
+ if( nthr < wthr ) {
+ while( nthr < wthr && nthr < d ) {
+ nthr++;
+ boost::thread w(workerThread);
+ }
+ cout << "new thread, total running : " << nthr << endl;
+ d *= 2;
+ }
+ }
+ sleepsecs(1);
+ unsigned long long w = iops.get();
+ iops.zero();
+ w /= 1; // 1 secs
+ cout << w << " ops/sec ";
+ if( mmf == 0 )
+ // only writing 4 bytes with mmf so we don't say this
+ cout << (w * PG / 1024 / 1024) << " MB/sec";
+ cout << endl;
+ }
+}
+
+int main(int argc, char *argv[]) {
+
+ try {
+ cout << "mongoperf" << endl;
+
+ if( argc > 1 ) {
+cout <<
+
+"\n"
+"usage:\n"
+"\n"
+" mongoperf < myjsonconfigfile\n"
+"\n"
+" {\n"
+" nThreads:<n>, // number of threads (default 1)\n"
+" fileSizeMB:<n>, // test file size (default 1MB)\n"
+" sleepMicros:<n>, // pause for sleepMicros/nThreads between each operation (default 0)\n"
+" mmf:<bool>, // if true do i/o's via memory mapped files (default false)\n"
+" r:<bool>, // do reads (default false)\n"
+" w:<bool>, // do writes (default false)\n"
+" syncDelay:<n> // secs between fsyncs, like --syncdelay in mongod. (default 0/never)\n"
+" }\n"
+"\n"
+"mongoperf is a performance testing tool. the initial tests are of disk subsystem performance; \n"
+" tests of mongos and mongod will be added later.\n"
+"most fields are optional.\n"
+"non-mmf io is direct io (no caching). use a large file size to test making the heads\n"
+" move significantly and to avoid i/o coalescing\n"
+"mmf io uses caching (the file system cache).\n"
+"\n"
+
+<< endl;
+ return 0;
+ }
+
+ cout << "use -h for help" << endl;
+
+ char input[1024];
+ memset(input, 0, sizeof(input));
+ cin.read(input, 1000);
+ if( *input == 0 ) {
+ cout << "error no options found on stdin for mongoperf" << endl;
+ return 2;
+ }
+
+ string s = input;
+ str::stripTrailing(s, "\n\r\0x1a");
+ try {
+ options = fromjson(s);
+ }
+ catch(...) {
+ cout << s << endl;
+ cout << "couldn't parse json options" << endl;
+ return -1;
+ }
+ cout << "options:\n" << options.toString() << endl;
+
+ go();
+#if 0
+ cout << "connecting to localhost..." << endl;
+ DBClientConnection c;
+ c.connect("localhost");
+ cout << "connected ok" << endl;
+ unsigned long long count = c.count("test.foo");
+ cout << "count of exiting documents in collection test.foo : " << count << endl;
+
+ bo o = BSON( "hello" << "world" );
+ c.insert("test.foo", o);
+
+ string e = c.getLastError();
+ if( !e.empty() ) {
+ cout << "insert #1 failed: " << e << endl;
+ }
+
+ // make an index with a unique key constraint
+ c.ensureIndex("test.foo", BSON("hello"<<1), /*unique*/true);
+
+ c.insert("test.foo", o); // will cause a dup key error on "hello" field
+ cout << "we expect a dup key error here:" << endl;
+ cout << " " << c.getLastErrorDetailed().toString() << endl;
+#endif
+ }
+ catch(DBException& e) {
+ cout << "caught DBException " << e.toString() << endl;
+ return 1;
+ }
+
+ return 0;
+}
+
diff --git a/src/mongo/client/examples/mongoperf.vcxproj b/src/mongo/client/examples/mongoperf.vcxproj
new file mode 100755
index 00000000000..89168370733
--- /dev/null
+++ b/src/mongo/client/examples/mongoperf.vcxproj
@@ -0,0 +1,113 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{79D4E297-BFB7-4FF2-9B13-08A146582E46}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>mongoperf</RootNamespace>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup>
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <UseDebugLibraries>true</UseDebugLibraries>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+ <IncludePath>..\..;..\..\third_party\pcre-7.4;$(IncludePath)</IncludePath>
+ <LibraryPath>\boost\lib\vs2010_32;$(LibraryPath)</LibraryPath>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <LinkIncremental>true</LinkIncremental>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <LinkIncremental>false</LinkIncremental>
+ </PropertyGroup>
+ <ItemDefinitionGroup>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ </ClCompile>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions> _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalDependencies>ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions> _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <AdditionalDependencies>ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\util\logfile.cpp" />
+ <ClCompile Include="..\..\util\mmap.cpp" />
+ <ClCompile Include="..\..\util\mmap_win.cpp" />
+ <ClCompile Include="..\mongo_client_lib.cpp" />
+ <ClCompile Include="mongoperf.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\bson\bson-inl.h" />
+ <ClInclude Include="..\..\bson\bson.h" />
+ <ClInclude Include="..\..\bson\bsonelement.h" />
+ <ClInclude Include="..\..\bson\bsonmisc.h" />
+ <ClInclude Include="..\..\bson\bsonobj.h" />
+ <ClInclude Include="..\..\bson\bsonobjbuilder.h" />
+ <ClInclude Include="..\..\bson\bsonobjiterator.h" />
+ <ClInclude Include="..\..\bson\bsontypes.h" />
+ <ClInclude Include="..\..\bson\bson_db.h" />
+ <ClInclude Include="..\..\bson\inline_decls.h" />
+ <ClInclude Include="..\..\bson\oid.h" />
+ <ClInclude Include="..\..\bson\ordering.h" />
+ <ClInclude Include="..\..\bson\stringdata.h" />
+ <ClInclude Include="..\..\util\logfile.h" />
+ <ClInclude Include="..\..\util\mmap.h" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project> \ No newline at end of file
diff --git a/src/mongo/client/examples/mongoperf.vcxproj.filters b/src/mongo/client/examples/mongoperf.vcxproj.filters
new file mode 100755
index 00000000000..ab12575af08
--- /dev/null
+++ b/src/mongo/client/examples/mongoperf.vcxproj.filters
@@ -0,0 +1,73 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <ClCompile Include="mongoperf.cpp" />
+ <ClCompile Include="..\mongo_client_lib.cpp">
+ <Filter>shared files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\mmap.cpp">
+ <Filter>shared files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\mmap_win.cpp">
+ <Filter>shared files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\logfile.cpp">
+ <Filter>shared files</Filter>
+ </ClCompile>
+ </ItemGroup>
+ <ItemGroup>
+ <Filter Include="shared files">
+ <UniqueIdentifier>{847e788b-8e8c-48de-829f-6876c9008440}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="includes">
+ <UniqueIdentifier>{d855a95e-71ad-4f54-ae1b-94e7aa894394}</UniqueIdentifier>
+ </Filter>
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\bson\bson-inl.h">
+ <Filter>includes</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\bson\inline_decls.h">
+ <Filter>includes</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\bson\bson.h">
+ <Filter>includes</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\bson\bson_db.h">
+ <Filter>includes</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\bson\bsonelement.h">
+ <Filter>includes</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\bson\bsonmisc.h">
+ <Filter>includes</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\bson\bsonobj.h">
+ <Filter>includes</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\bson\bsonobjbuilder.h">
+ <Filter>includes</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\bson\bsonobjiterator.h">
+ <Filter>includes</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\bson\bsontypes.h">
+ <Filter>includes</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\util\logfile.h">
+ <Filter>includes</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\bson\stringdata.h">
+ <Filter>includes</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\bson\oid.h">
+ <Filter>includes</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\bson\ordering.h">
+ <Filter>includes</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\util\mmap.h">
+ <Filter>includes</Filter>
+ </ClInclude>
+ </ItemGroup>
+</Project> \ No newline at end of file
diff --git a/src/mongo/client/examples/rs.cpp b/src/mongo/client/examples/rs.cpp
new file mode 100644
index 00000000000..3307d87b56b
--- /dev/null
+++ b/src/mongo/client/examples/rs.cpp
@@ -0,0 +1,118 @@
+// rs.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * example of using replica sets from c++
+ */
+
+#include "client/dbclient.h"
+#include <iostream>
+#include <vector>
+
+using namespace mongo;
+using namespace std;
+
+void workerThread( string collName , bool print , DBClientReplicaSet * conn ) {
+
+ while ( true ) {
+ try {
+ conn->update( collName , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) , true );
+
+ BSONObj x = conn->findOne( collName , BSONObj() );
+
+ if ( print ) {
+ cout << x << endl;
+ }
+
+ BSONObj a = conn->slaveConn().findOne( collName , BSONObj() , 0 , QueryOption_SlaveOk );
+ BSONObj b = conn->findOne( collName , BSONObj() , 0 , QueryOption_SlaveOk );
+
+ if ( print ) {
+ cout << "\t A " << a << endl;
+ cout << "\t B " << b << endl;
+ }
+ }
+ catch ( std::exception& e ) {
+ cout << "ERROR: " << e.what() << endl;
+ }
+ sleepmillis( 10 );
+ }
+}
+
+int main( int argc , const char ** argv ) {
+
+ unsigned nThreads = 1;
+ bool print = false;
+ bool testTimeout = false;
+
+ for ( int i=1; i<argc; i++ ) {
+ if ( mongoutils::str::equals( "--threads" , argv[i] ) ) {
+ nThreads = atoi( argv[++i] );
+ }
+ else if ( mongoutils::str::equals( "--print" , argv[i] ) ) {
+ print = true;
+ }
+ // Run a special mode to demonstrate the DBClientReplicaSet so_timeout option.
+ else if ( mongoutils::str::equals( "--testTimeout" , argv[i] ) ) {
+ testTimeout = true;
+ }
+ else {
+ cerr << "unknown option: " << argv[i] << endl;
+ return 1;
+ }
+
+ }
+
+ string errmsg;
+ ConnectionString cs = ConnectionString::parse( "foo/127.0.0.1" , errmsg );
+ if ( ! cs.isValid() ) {
+ cout << "error parsing url: " << errmsg << endl;
+ return 1;
+ }
+
+ DBClientReplicaSet * conn = dynamic_cast<DBClientReplicaSet*>(cs.connect( errmsg, testTimeout ? 10 : 0 ));
+ if ( ! conn ) {
+ cout << "error connecting: " << errmsg << endl;
+ return 2;
+ }
+
+ string collName = "test.rs1";
+
+ conn->dropCollection( collName );
+
+ if ( testTimeout ) {
+ conn->insert( collName, BSONObj() );
+ try {
+ conn->count( collName, BSON( "$where" << "sleep(40000)" ) );
+ } catch( DBException& ) {
+ return 0;
+ }
+ cout << "expected socket exception" << endl;
+ return 1;
+ }
+
+ vector<boost::shared_ptr<boost::thread> > threads;
+ for ( unsigned i=0; i<nThreads; i++ ) {
+ string errmsg;
+ threads.push_back( boost::shared_ptr<boost::thread>( new boost::thread( boost::bind( workerThread , collName , print , (DBClientReplicaSet*)cs.connect(errmsg) ) ) ) );
+ }
+
+ for ( unsigned i=0; i<threads.size(); i++ ) {
+ threads[i]->join();
+ }
+
+}
diff --git a/src/mongo/client/examples/second.cpp b/src/mongo/client/examples/second.cpp
new file mode 100644
index 00000000000..6cc2111580f
--- /dev/null
+++ b/src/mongo/client/examples/second.cpp
@@ -0,0 +1,56 @@
+// second.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+
+#include "client/dbclient.h"
+
+using namespace std;
+using namespace mongo;
+
+int main( int argc, const char **argv ) {
+
+ const char *port = "27017";
+ if ( argc != 1 ) {
+ if ( argc != 3 )
+ throw -12;
+ port = argv[ 2 ];
+ }
+
+ DBClientConnection conn;
+ string errmsg;
+ if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) {
+ cout << "couldn't connect : " << errmsg << endl;
+ throw -11;
+ }
+
+ const char * ns = "test.second";
+
+ conn.remove( ns , BSONObj() );
+
+ conn.insert( ns , BSON( "name" << "eliot" << "num" << 17 ) );
+ conn.insert( ns , BSON( "name" << "sara" << "num" << 24 ) );
+
+ auto_ptr<DBClientCursor> cursor = conn.query( ns , BSONObj() );
+ cout << "using cursor" << endl;
+ while ( cursor->more() ) {
+ BSONObj obj = cursor->next();
+ cout << "\t" << obj.jsonString() << endl;
+ }
+
+ conn.ensureIndex( ns , BSON( "name" << 1 << "num" << -1 ) );
+}
diff --git a/src/mongo/client/examples/simple_client_demo.vcxproj b/src/mongo/client/examples/simple_client_demo.vcxproj
new file mode 100755
index 00000000000..358513f307a
--- /dev/null
+++ b/src/mongo/client/examples/simple_client_demo.vcxproj
@@ -0,0 +1,107 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{89C30BC3-2874-4F2C-B4DA-EB04E9782236}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>simple_client_demo</RootNamespace>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+
+ <PropertyGroup>
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <UseDebugLibraries>true</UseDebugLibraries>
+ </PropertyGroup>
+
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+ <IncludePath>..\..;..\..\third_party\pcre-7.4;$(IncludePath)</IncludePath>
+ <LibraryPath>\boost\lib\vs2010_32;$(LibraryPath)</LibraryPath>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <LinkIncremental>true</LinkIncremental>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <LinkIncremental>false</LinkIncremental>
+ </PropertyGroup>
+
+ <ItemDefinitionGroup>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ </ClCompile>
+ </ItemDefinitionGroup>
+
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions> _CRT_SECURE_NO_WARNINGS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalDependencies>ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions> _CRT_SECURE_NO_WARNINGS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <AdditionalDependencies>ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+
+ <ItemGroup>
+ <ClCompile Include="..\mongo_client_lib.cpp" />
+ <ClCompile Include="..\simple_client_demo.cpp" />
+ </ItemGroup>
+
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+
+</Project> \ No newline at end of file
diff --git a/src/mongo/client/examples/simple_client_demo.vcxproj.filters b/src/mongo/client/examples/simple_client_demo.vcxproj.filters
new file mode 100755
index 00000000000..8aa5a1a96c5
--- /dev/null
+++ b/src/mongo/client/examples/simple_client_demo.vcxproj.filters
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <Filter Include="Source Files">
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+ </Filter>
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\simple_client_demo.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\mongo_client_lib.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ </ItemGroup>
+</Project> \ No newline at end of file
diff --git a/src/mongo/client/examples/tail.cpp b/src/mongo/client/examples/tail.cpp
new file mode 100644
index 00000000000..90e62d279c1
--- /dev/null
+++ b/src/mongo/client/examples/tail.cpp
@@ -0,0 +1,46 @@
+// tail.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* example of using a tailable cursor */
+
+#include "../../client/dbclient.h"
+#include "../../util/goodies.h"
+
+using namespace mongo;
+
+void tail(DBClientBase& conn, const char *ns) {
+ BSONElement lastId = minKey.firstElement();
+ Query query = Query();
+
+ auto_ptr<DBClientCursor> c =
+ conn.query(ns, query, 0, 0, 0, QueryOption_CursorTailable);
+
+ while( 1 ) {
+ if( !c->more() ) {
+ if( c->isDead() ) {
+ break; // we need to requery
+ }
+
+ // all data (so far) exhausted, wait for more
+ sleepsecs(1);
+ continue;
+ }
+ BSONObj o = c->next();
+ lastId = o["_id"];
+ cout << o.toString() << endl;
+ }
+}
diff --git a/src/mongo/client/examples/tutorial.cpp b/src/mongo/client/examples/tutorial.cpp
new file mode 100644
index 00000000000..aa5ad02b55d
--- /dev/null
+++ b/src/mongo/client/examples/tutorial.cpp
@@ -0,0 +1,71 @@
+//tutorial.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include "../../client/dbclient.h"
+
+// g++ tutorial.cpp -lmongoclient -lboost_thread -lboost_filesystem -o tutorial
+// Might need a variant of the above compile line. This worked for me:
+//g++ tutorial.cpp -L[mongo directory] -L/opt/local/lib -lmongoclient -lboost_thread-mt -lboost_filesystem -lboost_system -I/opt/local/include -o tutorial
+
+using namespace mongo;
+
+void printIfAge(DBClientConnection& c, int age) {
+ auto_ptr<DBClientCursor> cursor = c.query("tutorial.persons", QUERY( "age" << age ).sort("name") );
+ while( cursor->more() ) {
+ BSONObj p = cursor->next();
+ cout << p.getStringField("name") << endl;
+ }
+}
+
+void run() {
+ DBClientConnection c;
+ c.connect("localhost"); //"192.168.58.1");
+ cout << "connected ok" << endl;
+ BSONObj p = BSON( "name" << "Joe" << "age" << 33 );
+ c.insert("tutorial.persons", p);
+ p = BSON( "name" << "Jane" << "age" << 40 );
+ c.insert("tutorial.persons", p);
+ p = BSON( "name" << "Abe" << "age" << 33 );
+ c.insert("tutorial.persons", p);
+ p = BSON( "name" << "Methuselah" << "age" << BSONNULL);
+ c.insert("tutorial.persons", p);
+ p = BSON( "name" << "Samantha" << "age" << 21 << "city" << "Los Angeles" << "state" << "CA" );
+ c.insert("tutorial.persons", p);
+
+ c.ensureIndex("tutorial.persons", fromjson("{age:1}"));
+
+ cout << "count:" << c.count("tutorial.persons") << endl;
+
+ auto_ptr<DBClientCursor> cursor = c.query("tutorial.persons", BSONObj());
+ while( cursor->more() ) {
+ cout << cursor->next().toString() << endl;
+ }
+
+ cout << "\nprintifage:\n";
+ printIfAge(c, 33);
+}
+
+int main() {
+ try {
+ run();
+ }
+ catch( DBException &e ) {
+ cout << "caught " << e.what() << endl;
+ }
+ return 0;
+}
diff --git a/src/mongo/client/examples/whereExample.cpp b/src/mongo/client/examples/whereExample.cpp
new file mode 100644
index 00000000000..12b68d7add3
--- /dev/null
+++ b/src/mongo/client/examples/whereExample.cpp
@@ -0,0 +1,69 @@
+// @file whereExample.cpp
+// @see http://www.mongodb.org/display/DOCS/Server-side+Code+Execution
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+
+#include "client/dbclient.h"
+
+using namespace std;
+using namespace mongo;
+
+int main( int argc, const char **argv ) {
+
+ const char *port = "27017";
+ if ( argc != 1 ) {
+ if ( argc != 3 )
+ throw -12;
+ port = argv[ 2 ];
+ }
+
+ DBClientConnection conn;
+ string errmsg;
+ if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) {
+ cout << "couldn't connect : " << errmsg << endl;
+ throw -11;
+ }
+
+ const char * ns = "test.where";
+
+ conn.remove( ns , BSONObj() );
+
+ conn.insert( ns , BSON( "name" << "eliot" << "num" << 17 ) );
+ conn.insert( ns , BSON( "name" << "sara" << "num" << 24 ) );
+
+ auto_ptr<DBClientCursor> cursor = conn.query( ns , BSONObj() );
+
+ while ( cursor->more() ) {
+ BSONObj obj = cursor->next();
+ cout << "\t" << obj.jsonString() << endl;
+ }
+
+ cout << "now using $where" << endl;
+
+ Query q = Query("{}").where("this.name == name" , BSON( "name" << "sara" ));
+
+ cursor = conn.query( ns , q );
+
+ int num = 0;
+ while ( cursor->more() ) {
+ BSONObj obj = cursor->next();
+ cout << "\t" << obj.jsonString() << endl;
+ num++;
+ }
+ MONGO_assert( num == 1 );
+}
diff --git a/src/mongo/client/gridfs.cpp b/src/mongo/client/gridfs.cpp
new file mode 100644
index 00000000000..449cb4067d2
--- /dev/null
+++ b/src/mongo/client/gridfs.cpp
@@ -0,0 +1,245 @@
+// gridfs.cpp
+
+/* Copyright 2009 10gen
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include <fcntl.h>
+#include <utility>
+
+#include "gridfs.h"
+#include <boost/smart_ptr.hpp>
+
+#if defined(_WIN32)
+#include <io.h>
+#endif
+
+#ifndef MIN
+#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
+#endif
+
+namespace mongo {
+
+ const unsigned DEFAULT_CHUNK_SIZE = 256 * 1024;
+
+ GridFSChunk::GridFSChunk( BSONObj o ) {
+ _data = o;
+ }
+
+ GridFSChunk::GridFSChunk( BSONObj fileObject , int chunkNumber , const char * data , int len ) {
+ BSONObjBuilder b;
+ b.appendAs( fileObject["_id"] , "files_id" );
+ b.append( "n" , chunkNumber );
+ b.appendBinData( "data" , len, BinDataGeneral, data );
+ _data = b.obj();
+ }
+
+
+ GridFS::GridFS( DBClientBase& client , const string& dbName , const string& prefix ) : _client( client ) , _dbName( dbName ) , _prefix( prefix ) {
+ _filesNS = dbName + "." + prefix + ".files";
+ _chunksNS = dbName + "." + prefix + ".chunks";
+ _chunkSize = DEFAULT_CHUNK_SIZE;
+
+ client.ensureIndex( _filesNS , BSON( "filename" << 1 ) );
+ client.ensureIndex( _chunksNS , BSON( "files_id" << 1 << "n" << 1 ) );
+ }
+
+ GridFS::~GridFS() {
+
+ }
+
+ void GridFS::setChunkSize(unsigned int size) {
+ massert( 13296 , "invalid chunk size is specified", (size == 0));
+ _chunkSize = size;
+ }
+
+ BSONObj GridFS::storeFile( const char* data , size_t length , const string& remoteName , const string& contentType) {
+ char const * const end = data + length;
+
+ OID id;
+ id.init();
+ BSONObj idObj = BSON("_id" << id);
+
+ int chunkNumber = 0;
+ while (data < end) {
+ int chunkLen = MIN(_chunkSize, (unsigned)(end-data));
+ GridFSChunk c(idObj, chunkNumber, data, chunkLen);
+ _client.insert( _chunksNS.c_str() , c._data );
+
+ chunkNumber++;
+ data += chunkLen;
+ }
+
+ return insertFile(remoteName, id, length, contentType);
+ }
+
+
+ BSONObj GridFS::storeFile( const string& fileName , const string& remoteName , const string& contentType) {
+ uassert( 10012 , "file doesn't exist" , fileName == "-" || boost::filesystem::exists( fileName ) );
+
+ FILE* fd;
+ if (fileName == "-")
+ fd = stdin;
+ else
+ fd = fopen( fileName.c_str() , "rb" );
+ uassert( 10013 , "error opening file", fd);
+
+ OID id;
+ id.init();
+ BSONObj idObj = BSON("_id" << id);
+
+ int chunkNumber = 0;
+ gridfs_offset length = 0;
+ while (!feof(fd)) {
+ //boost::scoped_array<char>buf (new char[_chunkSize+1]);
+ char * buf = new char[_chunkSize+1];
+ char* bufPos = buf;//.get();
+ unsigned int chunkLen = 0; // how much in the chunk now
+ while(chunkLen != _chunkSize && !feof(fd)) {
+ int readLen = fread(bufPos, 1, _chunkSize - chunkLen, fd);
+ chunkLen += readLen;
+ bufPos += readLen;
+
+ assert(chunkLen <= _chunkSize);
+ }
+
+ GridFSChunk c(idObj, chunkNumber, buf, chunkLen);
+ _client.insert( _chunksNS.c_str() , c._data );
+
+ length += chunkLen;
+ chunkNumber++;
+ delete[] buf;
+ }
+
+ if (fd != stdin)
+ fclose( fd );
+
+ return insertFile((remoteName.empty() ? fileName : remoteName), id, length, contentType);
+ }
+
+ BSONObj GridFS::insertFile(const string& name, const OID& id, gridfs_offset length, const string& contentType) {
+
+ BSONObj res;
+ if ( ! _client.runCommand( _dbName.c_str() , BSON( "filemd5" << id << "root" << _prefix ) , res ) )
+ throw UserException( 9008 , "filemd5 failed" );
+
+ BSONObjBuilder file;
+ file << "_id" << id
+ << "filename" << name
+ << "chunkSize" << _chunkSize
+ << "uploadDate" << DATENOW
+ << "md5" << res["md5"]
+ ;
+
+ if (length < 1024*1024*1024) { // 2^30
+ file << "length" << (int) length;
+ }
+ else {
+ file << "length" << (long long) length;
+ }
+
+ if (!contentType.empty())
+ file << "contentType" << contentType;
+
+ BSONObj ret = file.obj();
+ _client.insert(_filesNS.c_str(), ret);
+
+ return ret;
+ }
+
+ void GridFS::removeFile( const string& fileName ) {
+ auto_ptr<DBClientCursor> files = _client.query( _filesNS , BSON( "filename" << fileName ) );
+ while (files->more()) {
+ BSONObj file = files->next();
+ BSONElement id = file["_id"];
+ _client.remove( _filesNS.c_str() , BSON( "_id" << id ) );
+ _client.remove( _chunksNS.c_str() , BSON( "files_id" << id ) );
+ }
+ }
+
+ GridFile::GridFile(const GridFS * grid , BSONObj obj ) {
+ _grid = grid;
+ _obj = obj;
+ }
+
+ GridFile GridFS::findFile( const string& fileName ) const {
+ return findFile( BSON( "filename" << fileName ) );
+ };
+
+ GridFile GridFS::findFile( BSONObj query ) const {
+ query = BSON("query" << query << "orderby" << BSON("uploadDate" << -1));
+ return GridFile( this , _client.findOne( _filesNS.c_str() , query ) );
+ }
+
+ auto_ptr<DBClientCursor> GridFS::list() const {
+ return _client.query( _filesNS.c_str() , BSONObj() );
+ }
+
+ auto_ptr<DBClientCursor> GridFS::list( BSONObj o ) const {
+ return _client.query( _filesNS.c_str() , o );
+ }
+
+ BSONObj GridFile::getMetadata() const {
+ BSONElement meta_element = _obj["metadata"];
+ if( meta_element.eoo() ) {
+ return BSONObj();
+ }
+
+ return meta_element.embeddedObject();
+ }
+
+ GridFSChunk GridFile::getChunk( int n ) const {
+ _exists();
+ BSONObjBuilder b;
+ b.appendAs( _obj["_id"] , "files_id" );
+ b.append( "n" , n );
+
+ BSONObj o = _grid->_client.findOne( _grid->_chunksNS.c_str() , b.obj() );
+ uassert( 10014 , "chunk is empty!" , ! o.isEmpty() );
+ return GridFSChunk(o);
+ }
+
+ gridfs_offset GridFile::write( ostream & out ) const {
+ _exists();
+
+ const int num = getNumChunks();
+
+ for ( int i=0; i<num; i++ ) {
+ GridFSChunk c = getChunk( i );
+
+ int len;
+ const char * data = c.data( len );
+ out.write( data , len );
+ }
+
+ return getContentLength();
+ }
+
+ gridfs_offset GridFile::write( const string& where ) const {
+ if (where == "-") {
+ return write( cout );
+ }
+ else {
+ ofstream out(where.c_str() , ios::out | ios::binary );
+ uassert(13325, "couldn't open file: " + where, out.is_open() );
+ return write( out );
+ }
+ }
+
+ void GridFile::_exists() const {
+ uassert( 10015 , "doesn't exists" , exists() );
+ }
+
+}
diff --git a/src/mongo/client/gridfs.h b/src/mongo/client/gridfs.h
new file mode 100644
index 00000000000..5a19aa142ca
--- /dev/null
+++ b/src/mongo/client/gridfs.h
@@ -0,0 +1,205 @@
+/** @file gridfs.h */
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "dbclient.h"
+#include "redef_macros.h"
+
+namespace mongo {
+
+ typedef unsigned long long gridfs_offset;
+
+ class GridFS;
+ class GridFile;
+
+ class GridFSChunk {
+ public:
+ GridFSChunk( BSONObj data );
+ GridFSChunk( BSONObj fileId , int chunkNumber , const char * data , int len );
+
+ int len() const {
+ int len;
+ _data["data"].binDataClean( len );
+ return len;
+ }
+
+ const char * data( int & len ) const {
+ return _data["data"].binDataClean( len );
+ }
+
+ private:
+ BSONObj _data;
+ friend class GridFS;
+ };
+
+
+ /**
+ GridFS is for storing large file-style objects in MongoDB.
+ @see http://www.mongodb.org/display/DOCS/GridFS+Specification
+ */
+ class GridFS {
+ public:
+ /**
+ * @param client - db connection
+ * @param dbName - root database name
+ * @param prefix - if you want your data somewhere besides <dbname>.fs
+ */
+ GridFS( DBClientBase& client , const string& dbName , const string& prefix="fs" );
+ ~GridFS();
+
+ /**
+ * @param
+ */
+ void setChunkSize(unsigned int size);
+
+ /**
+ * puts the file reference by fileName into the db
+ * @param fileName local filename relative to process
+ * @param remoteName optional filename to use for file stored in GridFS
+ * (default is to use fileName parameter)
+ * @param contentType optional MIME type for this object.
+ * (default is to omit)
+ * @return the file object
+ */
+ BSONObj storeFile( const string& fileName , const string& remoteName="" , const string& contentType="");
+
+ /**
+ * puts the file represented by data into the db
+ * @param data pointer to buffer to store in GridFS
+ * @param length length of buffer
+ * @param remoteName optional filename to use for file stored in GridFS
+ * (default is to use fileName parameter)
+ * @param contentType optional MIME type for this object.
+ * (default is to omit)
+ * @return the file object
+ */
+ BSONObj storeFile( const char* data , size_t length , const string& remoteName , const string& contentType="");
+
+ /**
+ * removes file referenced by fileName from the db
+ * @param fileName filename (in GridFS) of the file to remove
+ * @return the file object
+ */
+ void removeFile( const string& fileName );
+
+ /**
+ * returns a file object matching the query
+ */
+ GridFile findFile( BSONObj query ) const;
+
+ /**
+ * equiv to findFile( { filename : filename } )
+ */
+ GridFile findFile( const string& fileName ) const;
+
+ /**
+ * convenience method to get all the files
+ */
+ auto_ptr<DBClientCursor> list() const;
+
+ /**
+ * convenience method to get all the files with a filter
+ */
+ auto_ptr<DBClientCursor> list( BSONObj query ) const;
+
+ private:
+ DBClientBase& _client;
+ string _dbName;
+ string _prefix;
+ string _filesNS;
+ string _chunksNS;
+ unsigned int _chunkSize;
+
+ // insert fileobject. All chunks must be in DB.
+ BSONObj insertFile(const string& name, const OID& id, gridfs_offset length, const string& contentType);
+
+ friend class GridFile;
+ };
+
+ /**
+ wrapper for a file stored in the Mongo database
+ */
+ class GridFile {
+ public:
+ /**
+ * @return whether or not this file exists
+ * findFile will always return a GriFile, so need to check this
+ */
+ bool exists() const {
+ return ! _obj.isEmpty();
+ }
+
+ string getFilename() const {
+ return _obj["filename"].str();
+ }
+
+ int getChunkSize() const {
+ return (int)(_obj["chunkSize"].number());
+ }
+
+ gridfs_offset getContentLength() const {
+ return (gridfs_offset)(_obj["length"].number());
+ }
+
+ string getContentType() const {
+ return _obj["contentType"].valuestr();
+ }
+
+ Date_t getUploadDate() const {
+ return _obj["uploadDate"].date();
+ }
+
+ string getMD5() const {
+ return _obj["md5"].str();
+ }
+
+ BSONElement getFileField( const string& name ) const {
+ return _obj[name];
+ }
+
+ BSONObj getMetadata() const;
+
+ int getNumChunks() const {
+ return (int) ceil( (double)getContentLength() / (double)getChunkSize() );
+ }
+
+ GridFSChunk getChunk( int n ) const;
+
+ /**
+ write the file to the output stream
+ */
+ gridfs_offset write( ostream & out ) const;
+
+ /**
+ write the file to this filename
+ */
+ gridfs_offset write( const string& where ) const;
+
+ private:
+ GridFile(const GridFS * grid , BSONObj obj );
+
+ void _exists() const;
+
+ const GridFS * _grid;
+ BSONObj _obj;
+
+ friend class GridFS;
+ };
+}
+
+#include "undef_macros.h"
diff --git a/src/mongo/client/model.cpp b/src/mongo/client/model.cpp
new file mode 100644
index 00000000000..bd10a3c5528
--- /dev/null
+++ b/src/mongo/client/model.cpp
@@ -0,0 +1,138 @@
+// model.cpp
+
+/* Copyright 2009 10gen
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "model.h"
+#include "connpool.h"
+
+namespace mongo {
+
+ bool Model::load(BSONObj& query) {
+ ScopedDbConnection conn( modelServer() );
+
+ BSONObj b = conn->findOne(getNS(), query);
+ conn.done();
+
+ if ( b.isEmpty() )
+ return false;
+
+ unserialize(b);
+ _id = b["_id"].wrap().getOwned();
+ return true;
+ }
+
+ void Model::remove( bool safe ) {
+ uassert( 10016 , "_id isn't set - needed for remove()" , _id["_id"].type() );
+
+ ScopedDbConnection conn( modelServer() );
+ conn->remove( getNS() , _id );
+
+ string errmsg = "";
+ if ( safe )
+ errmsg = conn->getLastError();
+
+ conn.done();
+
+ if ( safe && errmsg.size() )
+ throw UserException( 9002 , (string)"error on Model::remove: " + errmsg );
+ }
+
+ void Model::save( bool safe ) {
+ ScopedDbConnection conn( modelServer() );
+
+ BSONObjBuilder b;
+ serialize( b );
+
+ BSONElement myId;
+ {
+ BSONObjIterator i = b.iterator();
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( strcmp( e.fieldName() , "_id" ) == 0 ) {
+ myId = e;
+ break;
+ }
+ }
+ }
+
+ if ( myId.type() ) {
+ if ( _id.isEmpty() ) {
+ _id = myId.wrap();
+ }
+ else if ( myId.woCompare( _id.firstElement() ) ) {
+ stringstream ss;
+ ss << "_id from serialize and stored differ: ";
+ ss << '[' << myId << "] != ";
+ ss << '[' << _id.firstElement() << ']';
+ throw UserException( 13121 , ss.str() );
+ }
+ }
+
+ if ( _id.isEmpty() ) {
+ OID oid;
+ oid.init();
+ b.appendOID( "_id" , &oid );
+
+ BSONObj o = b.obj();
+ conn->insert( getNS() , o );
+ _id = o["_id"].wrap().getOwned();
+
+ log(4) << "inserted new model " << getNS() << " " << o << endl;
+ }
+ else {
+ if ( myId.eoo() ) {
+ myId = _id["_id"];
+ b.append( myId );
+ }
+
+ assert( ! myId.eoo() );
+
+ BSONObjBuilder qb;
+ qb.append( myId );
+
+ BSONObj q = qb.obj();
+ BSONObj o = b.obj();
+
+ log(4) << "updated model" << getNS() << " " << q << " " << o << endl;
+
+ conn->update( getNS() , q , o , true );
+
+ }
+
+ string errmsg = "";
+ if ( safe )
+ errmsg = conn->getLastError();
+
+ conn.done();
+
+ if ( safe && errmsg.size() )
+ throw UserException( 9003 , (string)"error on Model::save: " + errmsg );
+ }
+
+ BSONObj Model::toObject() {
+ BSONObjBuilder b;
+ serialize( b );
+ return b.obj();
+ }
+
+ void Model::append( const char * name , BSONObjBuilder& b ) {
+ BSONObjBuilder bb( b.subobjStart( name ) );
+ serialize( bb );
+ bb.done();
+ }
+
+} // namespace mongo
diff --git a/src/mongo/client/model.h b/src/mongo/client/model.h
new file mode 100644
index 00000000000..7dd31434f49
--- /dev/null
+++ b/src/mongo/client/model.h
@@ -0,0 +1,62 @@
+/** @file model.h */
+
+/* Copyright 2009 10gen
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "dbclient.h"
+#include "redef_macros.h"
+
+namespace mongo {
+
+ /** Model is a base class for defining objects which are serializable to the Mongo
+ database via the database driver.
+
+ Definition
+ Your serializable class should inherit from Model and implement the abstract methods
+ below.
+
+ Loading
+ To load, first construct an (empty) object. Then call load(). Do not load an object
+ more than once.
+ */
+ class Model {
+ public:
+ Model() { }
+ virtual ~Model() { }
+
+ virtual const char * getNS() = 0;
+ virtual void serialize(BSONObjBuilder& to) = 0;
+ virtual void unserialize(const BSONObj& from) = 0;
+ virtual BSONObj toObject();
+ virtual void append( const char * name , BSONObjBuilder& b );
+
+ virtual string modelServer() = 0;
+
+ /** Load a single object.
+ @return true if successful.
+ */
+ virtual bool load(BSONObj& query);
+ virtual void save( bool safe=false );
+ virtual void remove( bool safe=false );
+
+ protected:
+ BSONObj _id;
+ };
+
+} // namespace mongo
+
+#include "undef_macros.h"
diff --git a/src/mongo/client/mongo_client_lib.cpp b/src/mongo/client/mongo_client_lib.cpp
new file mode 100644
index 00000000000..58e3f6c6c35
--- /dev/null
+++ b/src/mongo/client/mongo_client_lib.cpp
@@ -0,0 +1,82 @@
+/* @file client_lib.cpp
+
+ MongoDB C++ Driver
+
+ Normally one includes dbclient.h, and links against libmongoclient.a, when connecting to MongoDB
+ from C++. However, if you have a situation where the pre-built library does not work, you can use
+ this file instead to build all the necessary symbols. To do so, include mongo_client_lib.cpp in your
+ project.
+
+ GCC
+ ---
+ For example, to build and run simple_client_demo.cpp with GCC and run it:
+
+ g++ -I .. simple_client_demo.cpp mongo_client_lib.cpp -lboost_thread-mt -lboost_filesystem
+ ./a.out
+
+ Visual Studio (2010 tested)
+ ---------------------------
+ First, see client/examples/simple_client_demo.vcxproj.
+ - Be sure to include your boost include directory in your project as an Additional Include Directory.
+ - Define _CRT_SECURE_NO_WARNINGS to avoid warnings on use of strncpy and such by the MongoDB client code.
+ - Include the boost libraries directory.
+ - Linker.Input.Additional Dependencies - add ws2_32.lib for the Winsock library.
+*/
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(_WIN32)
+// C4800 forcing value to bool 'true' or 'false' (performance warning)
+#pragma warning( disable : 4800 )
+#endif
+
+#include "../util/md5main.cpp"
+
+#define MONGO_EXPOSE_MACROS
+#include "../pch.h"
+
+#include "../util/assert_util.cpp"
+#include "../util/net/message.cpp"
+#include "../util/util.cpp"
+#include "../util/background.cpp"
+#include "../util/base64.cpp"
+#include "../util/net/sock.cpp"
+#include "../util/log.cpp"
+#include "../util/password.cpp"
+#include "../util/net/message_port.cpp"
+#include "../util/concurrency/thread_pool.cpp"
+#include "../util/concurrency/vars.cpp"
+#include "../util/concurrency/task.cpp"
+#include "../util/concurrency/spin_lock.cpp"
+#include "connpool.cpp"
+#include "syncclusterconnection.cpp"
+#include "dbclient.cpp"
+#include "clientOnly.cpp"
+#include "gridfs.cpp"
+#include "dbclientcursor.cpp"
+#include "../util/text.cpp"
+#include "dbclient_rs.cpp"
+#include "../bson/oid.cpp"
+#include "../db/lasterror.cpp"
+#include "../db/json.cpp"
+#include "../db/jsobj.cpp"
+#include "../db/nonce.cpp"
+#include "../pch.cpp"
+
+extern "C" {
+#include "../util/md5.c"
+}
+
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
new file mode 100644
index 00000000000..5324de52c84
--- /dev/null
+++ b/src/mongo/client/parallel.cpp
@@ -0,0 +1,1515 @@
+// parallel.cpp
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "pch.h"
+#include "parallel.h"
+#include "connpool.h"
+#include "../db/queryutil.h"
+#include "../db/dbmessage.h"
+#include "../s/util.h"
+#include "../s/shard.h"
+#include "../s/chunk.h"
+#include "../s/config.h"
+#include "../s/grid.h"
+
+namespace mongo {
+
+ LabeledLevel pc( "pcursor", 2 );
+
+ // -------- ClusteredCursor -----------
+
+ ClusteredCursor::ClusteredCursor( const QuerySpec& q ) {
+ _ns = q.ns();
+ _query = q.filter().copy();
+ _options = q.options();
+ _fields = q.fields().copy();
+ _batchSize = q.ntoreturn();
+ if ( _batchSize == 1 )
+ _batchSize = 2;
+
+ _done = false;
+ _didInit = false;
+ }
+
+ ClusteredCursor::ClusteredCursor( QueryMessage& q ) {
+ _ns = q.ns;
+ _query = q.query.copy();
+ _options = q.queryOptions;
+ _fields = q.fields.copy();
+ _batchSize = q.ntoreturn;
+ if ( _batchSize == 1 )
+ _batchSize = 2;
+
+ _done = false;
+ _didInit = false;
+ }
+
+ ClusteredCursor::ClusteredCursor( const string& ns , const BSONObj& q , int options , const BSONObj& fields ) {
+ _ns = ns;
+ _query = q.getOwned();
+ _options = options;
+ _fields = fields.getOwned();
+ _batchSize = 0;
+
+ _done = false;
+ _didInit = false;
+ }
+
+ ClusteredCursor::~ClusteredCursor() {
+ _done = true; // just in case
+ }
+
+ void ClusteredCursor::init() {
+ if ( _didInit )
+ return;
+ _didInit = true;
+ _init();
+ }
+
+ void ClusteredCursor::_checkCursor( DBClientCursor * cursor ) {
+ assert( cursor );
+
+ if ( cursor->hasResultFlag( ResultFlag_ShardConfigStale ) ) {
+ throw RecvStaleConfigException( _ns , "ClusteredCursor::_checkCursor" );
+ }
+
+ if ( cursor->hasResultFlag( ResultFlag_ErrSet ) ) {
+ BSONObj o = cursor->next();
+ throw UserException( o["code"].numberInt() , o["$err"].String() );
+ }
+ }
+
+ auto_ptr<DBClientCursor> ClusteredCursor::query( const string& server , int num , BSONObj extra , int skipLeft , bool lazy ) {
+ uassert( 10017 , "cursor already done" , ! _done );
+ assert( _didInit );
+
+ BSONObj q = _query;
+ if ( ! extra.isEmpty() ) {
+ q = concatQuery( q , extra );
+ }
+
+ try {
+ ShardConnection conn( server , _ns );
+
+ if ( conn.setVersion() ) {
+ conn.done();
+ throw RecvStaleConfigException( _ns , "ClusteredCursor::query" , true );
+ }
+
+ LOG(5) << "ClusteredCursor::query (" << type() << ") server:" << server
+ << " ns:" << _ns << " query:" << q << " num:" << num
+ << " _fields:" << _fields << " options: " << _options << endl;
+
+ auto_ptr<DBClientCursor> cursor =
+ conn->query( _ns , q , num , 0 , ( _fields.isEmpty() ? 0 : &_fields ) , _options , _batchSize == 0 ? 0 : _batchSize + skipLeft );
+
+ if ( ! cursor.get() && _options & QueryOption_PartialResults ) {
+ _done = true;
+ conn.done();
+ return cursor;
+ }
+
+ massert( 13633 , str::stream() << "error querying server: " << server , cursor.get() );
+
+ cursor->attach( &conn ); // this calls done on conn
+ assert( ! conn.ok() );
+ _checkCursor( cursor.get() );
+ return cursor;
+ }
+ catch ( SocketException& e ) {
+ if ( ! ( _options & QueryOption_PartialResults ) )
+ throw e;
+ _done = true;
+ return auto_ptr<DBClientCursor>();
+ }
+ }
+
+ BSONObj ClusteredCursor::explain( const string& server , BSONObj extra ) {
+ BSONObj q = _query;
+ if ( ! extra.isEmpty() ) {
+ q = concatQuery( q , extra );
+ }
+
+ BSONObj o;
+
+ ShardConnection conn( server , _ns );
+ auto_ptr<DBClientCursor> cursor = conn->query( _ns , Query( q ).explain() , abs( _batchSize ) * -1 , 0 , _fields.isEmpty() ? 0 : &_fields );
+ if ( cursor.get() && cursor->more() )
+ o = cursor->next().getOwned();
+ conn.done();
+ return o;
+ }
+
+ BSONObj ClusteredCursor::concatQuery( const BSONObj& query , const BSONObj& extraFilter ) {
+ if ( ! query.hasField( "query" ) )
+ return _concatFilter( query , extraFilter );
+
+ BSONObjBuilder b;
+ BSONObjIterator i( query );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+
+ if ( strcmp( e.fieldName() , "query" ) ) {
+ b.append( e );
+ continue;
+ }
+
+ b.append( "query" , _concatFilter( e.embeddedObjectUserCheck() , extraFilter ) );
+ }
+ return b.obj();
+ }
+
+ BSONObj ClusteredCursor::_concatFilter( const BSONObj& filter , const BSONObj& extra ) {
+ BSONObjBuilder b;
+ b.appendElements( filter );
+ b.appendElements( extra );
+ return b.obj();
+ // TODO: should do some simplification here if possibl ideally
+ }
+
+ void ClusteredCursor::explain(BSONObjBuilder& b) {
+ // Note: by default we filter out allPlans and oldPlan in the shell's
+ // explain() function. If you add any recursive structures, make sure to
+ // edit the JS to make sure everything gets filtered.
+
+ b.append( "clusteredType" , type() );
+
+ long long millis = 0;
+ double numExplains = 0;
+
+ map<string,long long> counters;
+
+ map<string,list<BSONObj> > out;
+ {
+ _explain( out );
+
+ BSONObjBuilder x( b.subobjStart( "shards" ) );
+ for ( map<string,list<BSONObj> >::iterator i=out.begin(); i!=out.end(); ++i ) {
+ string shard = i->first;
+ list<BSONObj> l = i->second;
+ BSONArrayBuilder y( x.subarrayStart( shard ) );
+ for ( list<BSONObj>::iterator j=l.begin(); j!=l.end(); ++j ) {
+ BSONObj temp = *j;
+ y.append( temp );
+
+ BSONObjIterator k( temp );
+ while ( k.more() ) {
+ BSONElement z = k.next();
+ if ( z.fieldName()[0] != 'n' )
+ continue;
+ long long& c = counters[z.fieldName()];
+ c += z.numberLong();
+ }
+
+ millis += temp["millis"].numberLong();
+ numExplains++;
+ }
+ y.done();
+ }
+ x.done();
+ }
+
+ for ( map<string,long long>::iterator i=counters.begin(); i!=counters.end(); ++i )
+ b.appendNumber( i->first , i->second );
+
+ b.appendNumber( "millisShardTotal" , millis );
+ b.append( "millisShardAvg" , (int)((double)millis / numExplains ) );
+ b.append( "numQueries" , (int)numExplains );
+ b.append( "numShards" , (int)out.size() );
+ }
+
+ // -------- FilteringClientCursor -----------
+ FilteringClientCursor::FilteringClientCursor( const BSONObj filter )
+ : _matcher( filter ) , _done( true ) {
+ }
+
+ FilteringClientCursor::FilteringClientCursor( auto_ptr<DBClientCursor> cursor , const BSONObj filter )
+ : _matcher( filter ) , _cursor( cursor ) , _done( cursor.get() == 0 ) {
+ }
+
+ FilteringClientCursor::FilteringClientCursor( DBClientCursor* cursor , const BSONObj filter )
+ : _matcher( filter ) , _cursor( cursor ) , _done( cursor == 0 ) {
+ }
+
+
+ FilteringClientCursor::~FilteringClientCursor() {
+ }
+
+ void FilteringClientCursor::reset( auto_ptr<DBClientCursor> cursor ) {
+ _cursor = cursor;
+ _next = BSONObj();
+ _done = _cursor.get() == 0;
+ }
+
+ void FilteringClientCursor::reset( DBClientCursor* cursor ) {
+ _cursor.reset( cursor );
+ _next = BSONObj();
+ _done = cursor == 0;
+ }
+
+
+ bool FilteringClientCursor::more() {
+ if ( ! _next.isEmpty() )
+ return true;
+
+ if ( _done )
+ return false;
+
+ _advance();
+ return ! _next.isEmpty();
+ }
+
+ BSONObj FilteringClientCursor::next() {
+ assert( ! _next.isEmpty() );
+ assert( ! _done );
+
+ BSONObj ret = _next;
+ _next = BSONObj();
+ _advance();
+ return ret;
+ }
+
+ BSONObj FilteringClientCursor::peek() {
+ if ( _next.isEmpty() )
+ _advance();
+ return _next;
+ }
+
+ void FilteringClientCursor::_advance() {
+ assert( _next.isEmpty() );
+ if ( ! _cursor.get() || _done )
+ return;
+
+ while ( _cursor->more() ) {
+ _next = _cursor->next();
+ if ( _matcher.matches( _next ) ) {
+ if ( ! _cursor->moreInCurrentBatch() )
+ _next = _next.getOwned();
+ return;
+ }
+ _next = BSONObj();
+ }
+ _done = true;
+ }
+
+ // -------- SerialServerClusteredCursor -----------
+
+ SerialServerClusteredCursor::SerialServerClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q , int sortOrder) : ClusteredCursor( q ) {
+ for ( set<ServerAndQuery>::const_iterator i = servers.begin(); i!=servers.end(); i++ )
+ _servers.push_back( *i );
+
+ if ( sortOrder > 0 )
+ sort( _servers.begin() , _servers.end() );
+ else if ( sortOrder < 0 )
+ sort( _servers.rbegin() , _servers.rend() );
+
+ _serverIndex = 0;
+
+ _needToSkip = q.ntoskip;
+ }
+
+ bool SerialServerClusteredCursor::more() {
+
+ // TODO: optimize this by sending on first query and then back counting
+ // tricky in case where 1st server doesn't have any after
+ // need it to send n skipped
+ while ( _needToSkip > 0 && _current.more() ) {
+ _current.next();
+ _needToSkip--;
+ }
+
+ if ( _current.more() )
+ return true;
+
+ if ( _serverIndex >= _servers.size() ) {
+ return false;
+ }
+
+ ServerAndQuery& sq = _servers[_serverIndex++];
+
+ _current.reset( query( sq._server , 0 , sq._extra ) );
+ return more();
+ }
+
+ BSONObj SerialServerClusteredCursor::next() {
+ uassert( 10018 , "no more items" , more() );
+ return _current.next();
+ }
+
+ void SerialServerClusteredCursor::_explain( map< string,list<BSONObj> >& out ) {
+ for ( unsigned i=0; i<_servers.size(); i++ ) {
+ ServerAndQuery& sq = _servers[i];
+ list<BSONObj> & l = out[sq._server];
+ l.push_back( explain( sq._server , sq._extra ) );
+ }
+ }
+
+ // -------- ParallelSortClusteredCursor -----------
+
+ ParallelSortClusteredCursor::ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q ,
+ const BSONObj& sortKey )
+ : ClusteredCursor( q ) , _servers( servers ) {
+ _sortKey = sortKey.getOwned();
+ _needToSkip = q.ntoskip;
+ _finishCons();
+ }
+
+ ParallelSortClusteredCursor::ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , const string& ns ,
+ const Query& q ,
+ int options , const BSONObj& fields )
+ : ClusteredCursor( ns , q.obj , options , fields ) , _servers( servers ) {
+ _sortKey = q.getSort().copy();
+ _needToSkip = 0;
+ _finishCons();
+ }
+
+ ParallelSortClusteredCursor::ParallelSortClusteredCursor( const QuerySpec& qSpec, const CommandInfo& cInfo )
+ : ClusteredCursor( qSpec ),
+ _qSpec( qSpec ), _cInfo( cInfo ), _totalTries( 0 )
+ {
+ _finishCons();
+ }
+
+ ParallelSortClusteredCursor::ParallelSortClusteredCursor( const set<Shard>& qShards, const QuerySpec& qSpec )
+ : ClusteredCursor( qSpec ),
+ _qSpec( qSpec ), _totalTries( 0 )
+ {
+ for( set<Shard>::const_iterator i = qShards.begin(), end = qShards.end(); i != end; ++i )
+ _qShards.insert( *i );
+
+ _finishCons();
+ }
+
+ void ParallelSortClusteredCursor::_finishCons() {
+ _numServers = _servers.size();
+ _cursors = 0;
+
+ if( ! _qSpec.isEmpty() ){
+
+ _needToSkip = _qSpec.ntoskip();
+ _cursors = 0;
+ _sortKey = _qSpec.sort();
+ _fields = _qSpec.fields();
+
+ if( ! isVersioned() ) assert( _cInfo.isEmpty() );
+ }
+
+ if ( ! _sortKey.isEmpty() && ! _fields.isEmpty() ) {
+ // we need to make sure the sort key is in the projection
+
+ set<string> sortKeyFields;
+ _sortKey.getFieldNames(sortKeyFields);
+
+ BSONObjBuilder b;
+ bool isNegative = false;
+ {
+ BSONObjIterator i( _fields );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ b.append( e );
+
+ string fieldName = e.fieldName();
+
+ // exact field
+ bool found = sortKeyFields.erase(fieldName);
+
+ // subfields
+ set<string>::const_iterator begin = sortKeyFields.lower_bound(fieldName + ".\x00");
+ set<string>::const_iterator end = sortKeyFields.lower_bound(fieldName + ".\xFF");
+ sortKeyFields.erase(begin, end);
+
+ if ( ! e.trueValue() ) {
+ uassert( 13431 , "have to have sort key in projection and removing it" , !found && begin == end );
+ }
+ else if (!e.isABSONObj()) {
+ isNegative = true;
+ }
+ }
+ }
+
+ if (isNegative) {
+ for (set<string>::const_iterator it(sortKeyFields.begin()), end(sortKeyFields.end()); it != end; ++it) {
+ b.append(*it, 1);
+ }
+ }
+
+ _fields = b.obj();
+ }
+
+ if( ! _qSpec.isEmpty() ){
+ _qSpec._fields = _fields;
+ }
+ }
+
+ void ParallelConnectionMetadata::cleanup( bool full ){
+
+ if( full || errored ) retryNext = false;
+
+ if( ! retryNext && pcState ){
+
+ if( errored && pcState->conn ){
+ // Don't return this conn to the pool if it's bad
+ pcState->conn->kill();
+ pcState->conn.reset();
+ }
+ else if( initialized ){
+
+ assert( pcState->cursor );
+ assert( pcState->conn );
+
+ if( ! finished && pcState->conn->ok() ){
+ try{
+ // Complete the call if only halfway done
+ bool retry = false;
+ pcState->cursor->initLazyFinish( retry );
+ }
+ catch( std::exception& ){
+ warning() << "exception closing cursor" << endl;
+ }
+ catch( ... ){
+ warning() << "unknown exception closing cursor" << endl;
+ }
+ }
+ }
+
+ // Double-check conn is closed
+ if( pcState->conn ){
+ pcState->conn->done();
+ }
+
+ pcState.reset();
+ }
+ else assert( finished || ! initialized );
+
+ initialized = false;
+ finished = false;
+ completed = false;
+ errored = false;
+ }
+
+
+
+ BSONObj ParallelConnectionState::toBSON() const {
+
+ BSONObj cursorPeek = BSON( "no cursor" << "" );
+ if( cursor ){
+ vector<BSONObj> v;
+ cursor->peek( v, 1 );
+ if( v.size() == 0 ) cursorPeek = BSON( "no data" << "" );
+ else cursorPeek = BSON( "" << v[0] );
+ }
+
+ BSONObj stateObj =
+ BSON( "conn" << ( conn ? ( conn->ok() ? conn->conn().toString() : "(done)" ) : "" ) <<
+ "vinfo" << ( manager ? ( str::stream() << manager->getns() << " @ " << manager->getVersion().toString() ) :
+ primary->toString() ) );
+
+ // Append cursor data if exists
+ BSONObjBuilder stateB;
+ stateB.appendElements( stateObj );
+ if( ! cursor ) stateB.append( "cursor", "(none)" );
+ else {
+ vector<BSONObj> v;
+ cursor->peek( v, 1 );
+ if( v.size() == 0 ) stateB.append( "cursor", "(empty)" );
+ else stateB.append( "cursor", v[0] );
+ }
+ return stateB.obj().getOwned();
+ }
+
+ BSONObj ParallelConnectionMetadata::toBSON() const {
+ return BSON( "state" << ( pcState ? pcState->toBSON() : BSONObj() ) <<
+ "retryNext" << retryNext <<
+ "init" << initialized <<
+ "finish" << finished <<
+ "errored" << errored );
+ }
+
+ BSONObj ParallelSortClusteredCursor::toBSON() const {
+
+ BSONObjBuilder b;
+
+ b.append( "tries", _totalTries );
+
+ {
+ BSONObjBuilder bb;
+ for( map< Shard, PCMData >::const_iterator i = _cursorMap.begin(), end = _cursorMap.end(); i != end; ++i ){
+ bb.append( i->first.toString(), i->second.toBSON() );
+ }
+ b.append( "cursors", bb.obj().getOwned() );
+ }
+
+ {
+ BSONObjBuilder bb;
+ for( map< string, int >::const_iterator i = _staleNSMap.begin(), end = _staleNSMap.end(); i != end; ++i ){
+ bb.append( i->first, i->second );
+ }
+ b.append( "staleTries", bb.obj().getOwned() );
+ }
+
+ return b.obj().getOwned();
+ }
+
+ string ParallelSortClusteredCursor::toString() const {
+ return str::stream() << "PCursor : " << toBSON();
+ }
+
+ void ParallelSortClusteredCursor::fullInit(){
+ startInit();
+ finishInit();
+ }
+
+ void ParallelSortClusteredCursor::_markStaleNS( const NamespaceString& staleNS, bool& forceReload, bool& fullReload ){
+ if( _staleNSMap.find( staleNS ) == _staleNSMap.end() ){
+ forceReload = false;
+ fullReload = false;
+ _staleNSMap[ staleNS ] = 1;
+ }
+ else{
+ int tries = ++_staleNSMap[ staleNS ];
+
+ if( tries >= 5 ) throw SendStaleConfigException( staleNS, str::stream() << "too many retries of stale version info" );
+
+ forceReload = tries > 1;
+ fullReload = tries > 2;
+ }
+ }
+
+ void ParallelSortClusteredCursor::_handleStaleNS( const NamespaceString& staleNS, bool forceReload, bool fullReload ){
+
+ DBConfigPtr config = grid.getDBConfig( staleNS.db );
+
+ // Reload db if needed, make sure it works
+ if( config && fullReload && ! config->reload() ){
+ // We didn't find the db after the reload, the db may have been dropped,
+ // reset this ptr
+ config.reset();
+ }
+
+ if( ! config ){
+ warning() << "cannot reload database info for stale namespace " << staleNS << endl;
+ }
+ else {
+ // Reload chunk manager, potentially forcing the namespace
+ config->getChunkManagerIfExists( staleNS, true, forceReload );
+ }
+
+ }
+
+ void ParallelSortClusteredCursor::startInit() {
+
+ bool returnPartial = ( _qSpec.options() & QueryOption_PartialResults );
+ bool specialVersion = _cInfo.versionedNS.size() > 0;
+ bool specialFilter = ! _cInfo.cmdFilter.isEmpty();
+ NamespaceString ns = specialVersion ? _cInfo.versionedNS : _qSpec.ns();
+
+ ChunkManagerPtr manager;
+ ShardPtr primary;
+
+ log( pc ) << "creating pcursor over " << _qSpec << " and " << _cInfo << endl;
+
+ set<Shard> todoStorage;
+ set<Shard>& todo = todoStorage;
+ string vinfo;
+
+ if( isVersioned() ){
+
+ DBConfigPtr config = grid.getDBConfig( ns.db ); // Gets or loads the config
+ uassert( 15989, "database not found for parallel cursor request", config );
+
+ // Try to get either the chunk manager or the primary shard
+ int cmRetries = 0;
+ // We need to test config->isSharded() to avoid throwing a stupid exception in most cases
+ // b/c that's how getChunkManager works
+ // This loop basically retries getting either the chunk manager or primary, one or the other *should* exist
+ // eventually? TODO: Verify that we need / don't need the loop b/c we are / are not protected by const fields or mutexes
+ while( ! ( config->isSharded( ns ) && ( manager = config->getChunkManagerIfExists( ns ) ).get() ) &&
+ ! ( primary = config->getShardIfExists( ns ) ) &&
+ cmRetries++ < 5 ) sleepmillis( 100 ); // TODO: Do we need to loop here?
+
+ uassert( 15919, "too many retries for chunk manager or primary", cmRetries < 5 );
+ assert( manager || primary );
+ assert( ! manager || ! primary );
+
+ if( manager ) vinfo = ( str::stream() << "[" << manager->getns() << " @ " << manager->getVersion().toString() << "]" );
+ else vinfo = (str::stream() << "[unsharded @ " << primary->toString() << "]" );
+
+ if( manager ) manager->getShardsForQuery( todo, specialFilter ? _cInfo.cmdFilter : _qSpec.filter() );
+ else if( primary ) todo.insert( *primary );
+
+ // Close all cursors on extra shards first, as these will be invalid
+ for( map< Shard, PCMData >::iterator i = _cursorMap.begin(), end = _cursorMap.end(); i != end; ++i ){
+
+ log( pc ) << "closing cursor on shard " << i->first << " as the connection is no longer required by " << vinfo << endl;
+
+ // Force total cleanup of these connections
+ if( todo.find( i->first ) == todo.end() ) i->second.cleanup();
+ }
+ }
+ else{
+
+ // Don't use version to get shards here
+ todo = _qShards;
+ vinfo = str::stream() << "[" << _qShards.size() << " shards specified]";
+
+ }
+
+ assert( todo.size() );
+
+ log( pc ) << "initializing over " << todo.size() << " shards required by " << vinfo << endl;
+
+ // Don't retry indefinitely for whatever reason
+ _totalTries++;
+ uassert( 15986, "too many retries in total", _totalTries < 10 );
+
+ for( set<Shard>::iterator i = todo.begin(), end = todo.end(); i != end; ++i ){
+
+ const Shard& shard = *i;
+ PCMData& mdata = _cursorMap[ shard ];
+
+ log( pc ) << "initializing on shard " << shard << ", current connection state is " << mdata.toBSON() << endl;
+
+ // This may be the first time connecting to this shard, if so we can get an error here
+ try {
+
+ if( mdata.initialized ){
+
+ assert( mdata.pcState );
+
+ PCStatePtr state = mdata.pcState;
+
+ bool compatiblePrimary = true;
+ bool compatibleManager = true;
+
+ // Only check for compatibility if we aren't forcing the shard choices
+ if( isVersioned() ){
+
+ if( primary && ! state->primary )
+ warning() << "Collection becoming unsharded detected" << endl;
+ if( manager && ! state->manager )
+ warning() << "Collection becoming sharded detected" << endl;
+ if( primary && state->primary && primary != state->primary )
+ warning() << "Weird shift of primary detected" << endl;
+
+ compatiblePrimary = primary && state->primary && primary == state->primary;
+ compatibleManager = manager && state->manager && manager->compatibleWith( state->manager, shard );
+
+ }
+
+ if( compatiblePrimary || compatibleManager ){
+ // If we're compatible, don't need to retry unless forced
+ if( ! mdata.retryNext ) continue;
+ // Do partial cleanup
+ mdata.cleanup( false );
+ }
+ else {
+ // Force total cleanup of connection if no longer compatible
+ mdata.cleanup();
+ }
+ }
+ else {
+ // Cleanup connection if we're not yet initialized
+ mdata.cleanup( false );
+ }
+
+ mdata.pcState.reset( new PCState() );
+ PCStatePtr state = mdata.pcState;
+
+ // Setup manager / primary
+ if( manager ) state->manager = manager;
+ else if( primary ) state->primary = primary;
+
+ assert( ! primary || shard == *primary || ! isVersioned() );
+
+ // Setup conn
+ if( ! state->conn ) state->conn.reset( new ShardConnection( shard, ns, manager ) );
+
+ if( state->conn->setVersion() ){
+ // It's actually okay if we set the version here, since either the manager will be verified as
+ // compatible, or if the manager doesn't exist, we don't care about version consistency
+ log( pc ) << "needed to set remote version on connection to value compatible with " << vinfo << endl;
+ }
+
+ // Setup cursor
+ if( ! state->cursor ){
+ state->cursor.reset( new DBClientCursor( state->conn->get(), _qSpec.ns(), _qSpec.query(),
+ isCommand() ? 1 : 0, // nToReturn (0 if query indicates multi)
+ 0, // nToSkip
+ // Does this need to be a ptr?
+ _qSpec.fields().isEmpty() ? 0 : &_qSpec._fields, // fieldsToReturn
+ _qSpec.options(), // options
+ _qSpec.ntoreturn() == 0 ? 0 : _qSpec.ntoreturn() + _qSpec.ntoskip() ) ); // batchSize
+ }
+
+ bool lazyInit = state->conn->get()->lazySupported();
+ if( lazyInit ){
+
+ // Need to keep track if this is a second or third try for replica sets
+ state->cursor->initLazy( mdata.retryNext );
+ mdata.retryNext = false;
+ mdata.initialized = true;
+ }
+ else{
+
+ // Without full initialization, throw an exception
+ uassert( 15987, str::stream() << "could not fully initialize cursor on shard " << shard.toString() << ", current connection state is " << mdata.toBSON().toString(), state->cursor->init() );
+ mdata.retryNext = false;
+ mdata.initialized = true;
+ mdata.finished = true;
+ }
+
+
+ log( pc ) << "initialized " << ( isCommand() ? "command " : "query " ) << ( lazyInit ? "(lazily) " : "(full) " ) << "on shard " << shard << ", current connection state is " << mdata.toBSON() << endl;
+
+ }
+ catch( SendStaleConfigException& e ){
+
+ // Our version isn't compatible with the current version anymore on at least one shard, need to retry immediately
+ NamespaceString staleNS = e.getns();
+
+ // Probably need to retry fully
+ bool forceReload, fullReload;
+ _markStaleNS( staleNS, forceReload, fullReload );
+
+ int logLevel = fullReload ? 0 : 1;
+ log( pc + logLevel ) << "stale config of ns " << staleNS << " during initialization, will retry with forced : " << forceReload << ", full : " << fullReload << endl;
+
+ // This is somewhat strange
+ if( staleNS != ns )
+ warning() << "versioned ns " << ns << " doesn't match stale config namespace " << staleNS << endl;
+
+ _handleStaleNS( staleNS, forceReload, fullReload );
+
+ // Restart with new chunk manager
+ startInit();
+ return;
+ }
+ catch( SocketException& e ){
+ warning() << "socket exception when initializing on " << shard << ", current connection state is " << mdata.toBSON() << causedBy( e ) << endl;
+ mdata.errored = true;
+ if( returnPartial ){
+ mdata.cleanup();
+ continue;
+ }
+ throw;
+ }
+ catch( DBException& e ){
+ warning() << "db exception when initializing on " << shard << ", current connection state is " << mdata.toBSON() << causedBy( e ) << endl;
+ mdata.errored = true;
+ if( returnPartial && e.getCode() == 15925 /* From above! */ ){
+ mdata.cleanup();
+ continue;
+ }
+ throw;
+ }
+ catch( std::exception& e){
+ warning() << "exception when initializing on " << shard << ", current connection state is " << mdata.toBSON() << causedBy( e ) << endl;
+ mdata.errored = true;
+ throw;
+ }
+ catch( ... ){
+ warning() << "unknown exception when initializing on " << shard << ", current connection state is " << mdata.toBSON() << endl;
+ mdata.errored = true;
+ throw;
+ }
+ }
+
+ // Sanity check final init'ed connections
+ for( map< Shard, PCMData >::iterator i = _cursorMap.begin(), end = _cursorMap.end(); i != end; ++i ){
+
+ const Shard& shard = i->first;
+ PCMData& mdata = i->second;
+
+ if( ! mdata.pcState ) continue;
+
+ // Make sure all state is in shards
+ assert( todo.find( shard ) != todo.end() );
+ assert( mdata.initialized = true );
+ if( ! mdata.completed ) assert( mdata.pcState->conn->ok() );
+ assert( mdata.pcState->cursor );
+ if( isVersioned() ) assert( mdata.pcState->primary || mdata.pcState->manager );
+ else assert( ! mdata.pcState->primary || ! mdata.pcState->manager );
+ assert( ! mdata.retryNext );
+
+ if( mdata.completed ) assert( mdata.finished );
+ if( mdata.finished ) assert( mdata.initialized );
+ if( ! returnPartial ) assert( mdata.initialized );
+ }
+
+ }
+
+
+ void ParallelSortClusteredCursor::finishInit(){
+
+ bool returnPartial = ( _qSpec.options() & QueryOption_PartialResults );
+ bool specialVersion = _cInfo.versionedNS.size() > 0;
+ string ns = specialVersion ? _cInfo.versionedNS : _qSpec.ns();
+
+ bool retry = false;
+ set< string > staleNSes;
+
+ log( pc ) << "finishing over " << _cursorMap.size() << " shards" << endl;
+
+ for( map< Shard, PCMData >::iterator i = _cursorMap.begin(), end = _cursorMap.end(); i != end; ++i ){
+
+ const Shard& shard = i->first;
+ PCMData& mdata = i->second;
+
+ log( pc ) << "finishing on shard " << shard << ", current connection state is " << mdata.toBSON() << endl;
+
+ // Ignore empty conns for now
+ if( ! mdata.pcState ) continue;
+
+ PCStatePtr state = mdata.pcState;
+
+ try {
+
+ // Sanity checks
+ if( ! mdata.completed ) assert( state->conn && state->conn->ok() );
+ assert( state->cursor );
+ if( isVersioned() ){
+ assert( state->manager || state->primary );
+ assert( ! state->manager || ! state->primary );
+ }
+ else assert( ! state->manager && ! state->primary );
+
+
+ // If we weren't init'ing lazily, ignore this
+ if( ! mdata.finished ){
+
+ mdata.finished = true;
+
+ // Mark the cursor as non-retry by default
+ mdata.retryNext = false;
+
+ if( ! state->cursor->initLazyFinish( mdata.retryNext ) ){
+ if( ! mdata.retryNext ){
+ uassert( 15988, "error querying server", false );
+ }
+ else{
+ retry = true;
+ continue;
+ }
+ }
+
+ mdata.completed = false;
+ }
+
+ if( ! mdata.completed ){
+
+ mdata.completed = true;
+
+ // Make sure we didn't get an error we should rethrow
+ // TODO : Rename/refactor this to something better
+ _checkCursor( state->cursor.get() );
+
+ // Finalize state
+ state->cursor->attach( state->conn.get() ); // Closes connection for us
+
+ log( pc ) << "finished on shard " << shard << ", current connection state is " << mdata.toBSON() << endl;
+ }
+ }
+ catch( RecvStaleConfigException& e ){
+ retry = true;
+
+ // Will retry all at once
+ staleNSes.insert( e.getns() );
+
+ // Fully clear this cursor, as it needs to be re-established
+ mdata.cleanup();
+ continue;
+ }
+ catch ( MsgAssertionException& e ){
+ warning() << "socket (msg) exception when finishing on " << shard << ", current connection state is " << mdata.toBSON() << causedBy( e ) << endl;
+ mdata.errored = true;
+ if( returnPartial ){
+ mdata.cleanup();
+ continue;
+ }
+ throw;
+ }
+ catch( SocketException& e ){
+ warning() << "socket exception when finishing on " << shard << ", current connection state is " << mdata.toBSON() << causedBy( e ) << endl;
+ mdata.errored = true;
+ if( returnPartial ){
+ mdata.cleanup();
+ continue;
+ }
+ throw;
+ }
+ catch( DBException& e ){
+ warning() << "db exception when finishing on " << shard << ", current connection state is " << mdata.toBSON() << causedBy( e ) << endl;
+ mdata.errored = true;
+ throw;
+ }
+ catch( std::exception& e){
+ warning() << "exception when finishing on " << shard << ", current connection state is " << mdata.toBSON() << causedBy( e ) << endl;
+ mdata.errored = true;
+ throw;
+ }
+ catch( ... ){
+ warning() << "unknown exception when finishing on " << shard << ", current connection state is " << mdata.toBSON() << endl;
+ mdata.errored = true;
+ throw;
+ }
+
+ }
+
+ // Retry logic for single refresh of namespaces / retry init'ing connections
+ if( retry ){
+
+ // Refresh stale namespaces
+ if( staleNSes.size() ){
+ for( set<string>::iterator i = staleNSes.begin(), end = staleNSes.end(); i != end; ++i ){
+
+ const string& staleNS = *i;
+
+ bool forceReload, fullReload;
+ _markStaleNS( staleNS, forceReload, fullReload );
+
+ int logLevel = fullReload ? 0 : 1;
+ log( pc + logLevel ) << "stale config of ns " << staleNS << " on finishing query, will retry with forced : " << forceReload << ", full : " << fullReload << endl;
+
+ // This is somewhat strange
+ if( staleNS != ns )
+ warning() << "versioned ns " << ns << " doesn't match stale config namespace " << staleNS << endl;
+
+ _handleStaleNS( staleNS, forceReload, fullReload );
+ }
+ }
+
+ // Re-establish connections we need to
+ startInit();
+ finishInit();
+ return;
+ }
+
+ // Sanity check and clean final connections
+ map< Shard, PCMData >::iterator i = _cursorMap.begin();
+ while( i != _cursorMap.end() ){
+
+ // const Shard& shard = i->first;
+ PCMData& mdata = i->second;
+
+ // Erase empty stuff
+ if( ! mdata.pcState ){
+ log() << "PCursor erasing empty state " << mdata.toBSON() << endl;
+ _cursorMap.erase( i++ );
+ continue;
+ }
+ else ++i;
+
+ // Make sure all state is in shards
+ assert( mdata.initialized = true );
+ assert( mdata.finished = true );
+ assert( mdata.completed = true );
+ assert( ! mdata.pcState->conn->ok() );
+ assert( mdata.pcState->cursor );
+ if( isVersioned() ) assert( mdata.pcState->primary || mdata.pcState->manager );
+ else assert( ! mdata.pcState->primary && ! mdata.pcState->manager );
+ }
+
+ // TODO : More cleanup of metadata?
+
+ // LEGACY STUFF NOW
+
+ _cursors = new FilteringClientCursor[ _cursorMap.size() ];
+
+ // Put the cursors in the legacy format
+ int index = 0;
+ for( map< Shard, PCMData >::iterator i = _cursorMap.begin(), end = _cursorMap.end(); i != end; ++i ){
+
+ PCMData& mdata = i->second;
+
+ _cursors[ index ].reset( mdata.pcState->cursor.get() );
+ _servers.insert( ServerAndQuery( i->first.getConnString(), BSONObj() ) );
+
+ index++;
+ }
+
+ _numServers = _cursorMap.size();
+
+ }
+
+ bool ParallelSortClusteredCursor::isSharded() {
+ // LEGACY is always unsharded
+ if( _qSpec.isEmpty() ) return false;
+
+ if( ! isVersioned() ) return false;
+
+ if( _cursorMap.size() > 1 ) return true;
+ if( _cursorMap.begin()->second.pcState->manager ) return true;
+ return false;
+ }
+
+ ShardPtr ParallelSortClusteredCursor::getPrimary() {
+ if( isSharded() || ! isVersioned() ) return ShardPtr();
+ return _cursorMap.begin()->second.pcState->primary;
+ }
+
+ void ParallelSortClusteredCursor::getQueryShards( set<Shard>& shards ) {
+ for( map< Shard, PCMData >::iterator i = _cursorMap.begin(), end = _cursorMap.end(); i != end; ++i ){
+ shards.insert( i->first );
+ }
+ }
+
+ ChunkManagerPtr ParallelSortClusteredCursor::getChunkManager( const Shard& shard ) {
+ if( ! isSharded() ) return ChunkManagerPtr();
+
+ map<Shard,PCMData>::iterator i = _cursorMap.find( shard );
+
+ if( i == _cursorMap.end() ) return ChunkManagerPtr();
+ else return i->second.pcState->manager;
+ }
+
+ DBClientCursorPtr ParallelSortClusteredCursor::getShardCursor( const Shard& shard ) {
+ map<Shard,PCMData>::iterator i = _cursorMap.find( shard );
+
+ if( i == _cursorMap.end() ) return DBClientCursorPtr();
+ else return i->second.pcState->cursor;
+ }
+
+ void ParallelSortClusteredCursor::_init() {
+ if( ! _qSpec.isEmpty() ) fullInit();
+ else _oldInit();
+ }
+
+
+ // DEPRECATED
+
+
+ // TODO: Merge with futures API? We do a lot of error checking here that would be useful elsewhere.
+ void ParallelSortClusteredCursor::_oldInit() {
+
+ // log() << "Starting parallel search..." << endl;
+
+ // make sure we're not already initialized
+ assert( ! _cursors );
+ _cursors = new FilteringClientCursor[_numServers];
+
+ bool returnPartial = ( _options & QueryOption_PartialResults );
+
+ vector<ServerAndQuery> queries( _servers.begin(), _servers.end() );
+ set<int> retryQueries;
+ int finishedQueries = 0;
+
+ vector< shared_ptr<ShardConnection> > conns;
+ vector<string> servers;
+
+ // Since we may get all sorts of errors, record them all as they come and throw them later if necessary
+ vector<string> staleConfigExs;
+ vector<string> socketExs;
+ vector<string> otherExs;
+ bool allConfigStale = false;
+
+ int retries = -1;
+
+ // Loop through all the queries until we've finished or gotten a socket exception on all of them
+ // We break early for non-socket exceptions, and socket exceptions if we aren't returning partial results
+ do {
+ retries++;
+
+ bool firstPass = retryQueries.size() == 0;
+
+ if( ! firstPass ){
+ log() << "retrying " << ( returnPartial ? "(partial) " : "" ) << "parallel connection to ";
+ for( set<int>::iterator it = retryQueries.begin(); it != retryQueries.end(); ++it ){
+ log() << queries[*it]._server << ", ";
+ }
+ log() << finishedQueries << " finished queries." << endl;
+ }
+
+ size_t num = 0;
+ for ( vector<ServerAndQuery>::iterator it = queries.begin(); it != queries.end(); ++it ) {
+ size_t i = num++;
+
+ const ServerAndQuery& sq = *it;
+
+ // If we're not retrying this cursor on later passes, continue
+ if( ! firstPass && retryQueries.find( i ) == retryQueries.end() ) continue;
+
+ // log() << "Querying " << _query << " from " << _ns << " for " << sq._server << endl;
+
+ BSONObj q = _query;
+ if ( ! sq._extra.isEmpty() ) {
+ q = concatQuery( q , sq._extra );
+ }
+
+ string errLoc = " @ " + sq._server;
+
+ if( firstPass ){
+
+ // This may be the first time connecting to this shard, if so we can get an error here
+ try {
+ conns.push_back( shared_ptr<ShardConnection>( new ShardConnection( sq._server , _ns ) ) );
+ }
+ catch( std::exception& e ){
+ socketExs.push_back( e.what() + errLoc );
+ if( ! returnPartial ){
+ num--;
+ break;
+ }
+ conns.push_back( shared_ptr<ShardConnection>() );
+ continue;
+ }
+
+ servers.push_back( sq._server );
+ }
+
+ if ( conns[i]->setVersion() ) {
+ conns[i]->done();
+ staleConfigExs.push_back( (string)"stale config detected for " + RecvStaleConfigException( _ns , "ParallelCursor::_init" , true ).what() + errLoc );
+ break;
+ }
+
+ LOG(5) << "ParallelSortClusteredCursor::init server:" << sq._server << " ns:" << _ns
+ << " query:" << q << " _fields:" << _fields << " options: " << _options << endl;
+
+ if( ! _cursors[i].raw() )
+ _cursors[i].reset( new DBClientCursor( conns[i]->get() , _ns , q ,
+ 0 , // nToReturn
+ 0 , // nToSkip
+ _fields.isEmpty() ? 0 : &_fields , // fieldsToReturn
+ _options ,
+ _batchSize == 0 ? 0 : _batchSize + _needToSkip // batchSize
+ ) );
+
+ try{
+ _cursors[i].raw()->initLazy( ! firstPass );
+ }
+ catch( SocketException& e ){
+ socketExs.push_back( e.what() + errLoc );
+ _cursors[i].reset( NULL );
+ conns[i]->done();
+ if( ! returnPartial ) break;
+ }
+ catch( std::exception& e){
+ otherExs.push_back( e.what() + errLoc );
+ _cursors[i].reset( NULL );
+ conns[i]->done();
+ break;
+ }
+
+ }
+
+ // Go through all the potentially started cursors and finish initializing them or log any errors and
+ // potentially retry
+ // TODO: Better error classification would make this easier, errors are indicated in all sorts of ways
+ // here that we need to trap.
+ for ( size_t i = 0; i < num; i++ ) {
+
+ // log() << "Finishing query for " << cons[i].get()->getHost() << endl;
+ string errLoc = " @ " + queries[i]._server;
+
+ if( ! _cursors[i].raw() || ( ! firstPass && retryQueries.find( i ) == retryQueries.end() ) ){
+ if( conns[i] ) conns[i].get()->done();
+ continue;
+ }
+
+ assert( conns[i] );
+ retryQueries.erase( i );
+
+ bool retry = false;
+
+ try {
+
+ if( ! _cursors[i].raw()->initLazyFinish( retry ) ) {
+
+ warning() << "invalid result from " << conns[i]->getHost() << ( retry ? ", retrying" : "" ) << endl;
+ _cursors[i].reset( NULL );
+
+ if( ! retry ){
+ socketExs.push_back( str::stream() << "error querying server: " << servers[i] );
+ conns[i]->done();
+ }
+ else {
+ retryQueries.insert( i );
+ }
+
+ continue;
+ }
+ }
+ catch ( StaleConfigException& e ){
+ // Our stored configuration data is actually stale, we need to reload it
+ // when we throw our exception
+ allConfigStale = true;
+
+ staleConfigExs.push_back( (string)"stale config detected when receiving response for " + e.what() + errLoc );
+ _cursors[i].reset( NULL );
+ conns[i]->done();
+ continue;
+ }
+ catch ( MsgAssertionException& e ){
+ socketExs.push_back( e.what() + errLoc );
+ _cursors[i].reset( NULL );
+ conns[i]->done();
+ continue;
+ }
+ catch ( SocketException& e ) {
+ socketExs.push_back( e.what() + errLoc );
+ _cursors[i].reset( NULL );
+ conns[i]->done();
+ continue;
+ }
+ catch( std::exception& e ){
+ otherExs.push_back( e.what() + errLoc );
+ _cursors[i].reset( NULL );
+ conns[i]->done();
+ continue;
+ }
+
+ try {
+ _cursors[i].raw()->attach( conns[i].get() ); // this calls done on conn
+ _checkCursor( _cursors[i].raw() );
+
+ finishedQueries++;
+ }
+ catch ( StaleConfigException& e ){
+
+ // Our stored configuration data is actually stale, we need to reload it
+ // when we throw our exception
+ allConfigStale = true;
+
+ staleConfigExs.push_back( (string)"stale config detected for " + e.what() + errLoc );
+ _cursors[i].reset( NULL );
+ conns[i]->done();
+ continue;
+ }
+ catch( std::exception& e ){
+ otherExs.push_back( e.what() + errLoc );
+ _cursors[i].reset( NULL );
+ conns[i]->done();
+ continue;
+ }
+ }
+
+ // Don't exceed our max retries, should not happen
+ assert( retries < 5 );
+ }
+ while( retryQueries.size() > 0 /* something to retry */ &&
+ ( socketExs.size() == 0 || returnPartial ) /* no conn issues */ &&
+ staleConfigExs.size() == 0 /* no config issues */ &&
+ otherExs.size() == 0 /* no other issues */);
+
+ // Assert that our conns are all closed!
+ for( vector< shared_ptr<ShardConnection> >::iterator i = conns.begin(); i < conns.end(); ++i ){
+ assert( ! (*i) || ! (*i)->ok() );
+ }
+
+ // Handle errors we got during initialization.
+ // If we're returning partial results, we can ignore socketExs, but nothing else
+ // Log a warning in any case, so we don't lose these messages
+ bool throwException = ( socketExs.size() > 0 && ! returnPartial ) || staleConfigExs.size() > 0 || otherExs.size() > 0;
+
+ if( socketExs.size() > 0 || staleConfigExs.size() > 0 || otherExs.size() > 0 ) {
+
+ vector<string> errMsgs;
+
+ errMsgs.insert( errMsgs.end(), staleConfigExs.begin(), staleConfigExs.end() );
+ errMsgs.insert( errMsgs.end(), otherExs.begin(), otherExs.end() );
+ errMsgs.insert( errMsgs.end(), socketExs.begin(), socketExs.end() );
+
+ stringstream errMsg;
+ errMsg << "could not initialize cursor across all shards because : ";
+ for( vector<string>::iterator i = errMsgs.begin(); i != errMsgs.end(); i++ ){
+ if( i != errMsgs.begin() ) errMsg << " :: and :: ";
+ errMsg << *i;
+ }
+
+ if( throwException && staleConfigExs.size() > 0 )
+ throw RecvStaleConfigException( _ns , errMsg.str() , ! allConfigStale );
+ else if( throwException )
+ throw DBException( errMsg.str(), 14827 );
+ else
+ warning() << errMsg.str() << endl;
+ }
+
+ if( retries > 0 )
+ log() << "successfully finished parallel query after " << retries << " retries" << endl;
+
+ }
+
+ ParallelSortClusteredCursor::~ParallelSortClusteredCursor() {
+ // Clear out our metadata before removing legacy cursor data
+ _cursorMap.clear();
+ for( int i = 0; i < _numServers; i++ ) _cursors[i].release();
+
+ delete [] _cursors;
+ _cursors = 0;
+ }
+
+ bool ParallelSortClusteredCursor::more() {
+
+ if ( _needToSkip > 0 ) {
+ int n = _needToSkip;
+ _needToSkip = 0;
+
+ while ( n > 0 && more() ) {
+ BSONObj x = next();
+ n--;
+ }
+
+ _needToSkip = n;
+ }
+
+ for ( int i=0; i<_numServers; i++ ) {
+ if ( _cursors[i].more() )
+ return true;
+ }
+ return false;
+ }
+
+ BSONObj ParallelSortClusteredCursor::next() {
+ BSONObj best = BSONObj();
+ int bestFrom = -1;
+
+ for ( int i=0; i<_numServers; i++) {
+ if ( ! _cursors[i].more() )
+ continue;
+
+ BSONObj me = _cursors[i].peek();
+
+ if ( best.isEmpty() ) {
+ best = me;
+ bestFrom = i;
+ if( _sortKey.isEmpty() ) break;
+ continue;
+ }
+
+ int comp = best.woSortOrder( me , _sortKey , true );
+ if ( comp < 0 )
+ continue;
+
+ best = me;
+ bestFrom = i;
+ }
+
+ uassert( 10019 , "no more elements" , ! best.isEmpty() );
+ _cursors[bestFrom].next();
+
+ return best;
+ }
+
+ void ParallelSortClusteredCursor::_explain( map< string,list<BSONObj> >& out ) {
+ for ( set<ServerAndQuery>::iterator i=_servers.begin(); i!=_servers.end(); ++i ) {
+ const ServerAndQuery& sq = *i;
+ list<BSONObj> & l = out[sq._server];
+ l.push_back( explain( sq._server , sq._extra ) );
+ }
+
+ }
+
+ // -----------------
+ // ---- Future -----
+ // -----------------
+
+ Future::CommandResult::CommandResult( const string& server , const string& db , const BSONObj& cmd , int options , DBClientBase * conn )
+ :_server(server) ,_db(db) , _options(options), _cmd(cmd) ,_conn(conn) ,_done(false)
+ {
+ init();
+ }
+
+ void Future::CommandResult::init(){
+ try {
+ if ( ! _conn ){
+ _connHolder.reset( new ScopedDbConnection( _server ) );
+ _conn = _connHolder->get();
+ }
+
+ if ( _conn->lazySupported() ) {
+ _cursor.reset( new DBClientCursor(_conn, _db + ".$cmd", _cmd, -1/*limit*/, 0, NULL, _options, 0));
+ _cursor->initLazy();
+ }
+ else {
+ _done = true; // we set _done first because even if there is an error we're done
+ _ok = _conn->runCommand( _db , _cmd , _res , _options );
+ }
+ }
+ catch ( std::exception& e ) {
+ error() << "Future::spawnComand (part 1) exception: " << e.what() << endl;
+ _ok = false;
+ _done = true;
+ }
+ }
+
+ bool Future::CommandResult::join( int maxRetries ) {
+ if (_done)
+ return _ok;
+
+
+ _ok = false;
+ for( int i = 1; i <= maxRetries; i++ ){
+
+ try {
+ bool retry = false;
+ bool finished = _cursor->initLazyFinish( retry );
+
+ // Shouldn't need to communicate with server any more
+ if ( _connHolder )
+ _connHolder->done();
+
+ uassert(14812, str::stream() << "Error running command on server: " << _server, finished);
+ massert(14813, "Command returned nothing", _cursor->more());
+
+ _res = _cursor->nextSafe();
+ _ok = _res["ok"].trueValue();
+
+ break;
+ }
+ catch ( RecvStaleConfigException& e ){
+
+ assert( versionManager.isVersionableCB( _conn ) );
+
+ if( i >= maxRetries ){
+ error() << "Future::spawnComand (part 2) stale config exception" << causedBy( e ) << endl;
+ throw e;
+ }
+
+ if( i >= maxRetries / 2 ){
+ if( ! versionManager.forceRemoteCheckShardVersionCB( e.getns() ) ){
+ error() << "Future::spawnComand (part 2) no config detected" << causedBy( e ) << endl;
+ throw e;
+ }
+ }
+
+ versionManager.checkShardVersionCB( _conn, e.getns(), false, 1 );
+
+ LOG( i > 1 ? 0 : 1 ) << "retrying lazy command" << causedBy( e ) << endl;
+
+ assert( _conn->lazySupported() );
+ _done = false;
+ init();
+ continue;
+ }
+ catch ( std::exception& e ) {
+ error() << "Future::spawnComand (part 2) exception: " << causedBy( e ) << endl;
+ break;
+ }
+
+ }
+
+ _done = true;
+ return _ok;
+ }
+
+ shared_ptr<Future::CommandResult> Future::spawnCommand( const string& server , const string& db , const BSONObj& cmd , int options , DBClientBase * conn ) {
+ shared_ptr<Future::CommandResult> res (new Future::CommandResult( server , db , cmd , options , conn ));
+ return res;
+ }
+
+}
diff --git a/src/mongo/client/parallel.h b/src/mongo/client/parallel.h
new file mode 100644
index 00000000000..1cbbd9a1cb5
--- /dev/null
+++ b/src/mongo/client/parallel.h
@@ -0,0 +1,444 @@
+// parallel.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ tools for working in parallel/sharded/clustered environment
+ */
+
+#pragma once
+
+#include "../pch.h"
+#include "dbclient.h"
+#include "redef_macros.h"
+#include "../db/dbmessage.h"
+#include "../db/matcher.h"
+#include "../util/concurrency/mvar.h"
+
+namespace mongo {
+
+ /**
+ * holder for a server address and a query to run
+ */
+ class ServerAndQuery {
+ public:
+ ServerAndQuery( const string& server , BSONObj extra = BSONObj() , BSONObj orderObject = BSONObj() ) :
+ _server( server ) , _extra( extra.getOwned() ) , _orderObject( orderObject.getOwned() ) {
+ }
+
+ bool operator<( const ServerAndQuery& other ) const {
+ if ( ! _orderObject.isEmpty() )
+ return _orderObject.woCompare( other._orderObject ) < 0;
+
+ if ( _server < other._server )
+ return true;
+ if ( other._server > _server )
+ return false;
+ return _extra.woCompare( other._extra ) < 0;
+ }
+
+ string toString() const {
+ StringBuilder ss;
+ ss << "server:" << _server << " _extra:" << _extra.toString() << " _orderObject:" << _orderObject.toString();
+ return ss.str();
+ }
+
+ operator string() const {
+ return toString();
+ }
+
+ string _server;
+ BSONObj _extra;
+ BSONObj _orderObject;
+ };
+
+ /**
+ * this is a cursor that works over a set of servers
+ * can be used in serial/paralellel as controlled by sub classes
+ */
+ class ClusteredCursor {
+ public:
+ ClusteredCursor( const QuerySpec& q );
+ ClusteredCursor( QueryMessage& q );
+ ClusteredCursor( const string& ns , const BSONObj& q , int options=0 , const BSONObj& fields=BSONObj() );
+ virtual ~ClusteredCursor();
+
+ /** call before using */
+ void init();
+
+ virtual bool more() = 0;
+ virtual BSONObj next() = 0;
+
+ static BSONObj concatQuery( const BSONObj& query , const BSONObj& extraFilter );
+
+ virtual string type() const = 0;
+
+ virtual void explain(BSONObjBuilder& b);
+
+ protected:
+
+ virtual void _init() = 0;
+
+ auto_ptr<DBClientCursor> query( const string& server , int num = 0 , BSONObj extraFilter = BSONObj() , int skipLeft = 0 , bool lazy=false );
+ BSONObj explain( const string& server , BSONObj extraFilter = BSONObj() );
+
+ /**
+ * checks the cursor for any errors
+ * will throw an exceptionif an error is encountered
+ */
+ void _checkCursor( DBClientCursor * cursor );
+
+ static BSONObj _concatFilter( const BSONObj& filter , const BSONObj& extraFilter );
+
+ virtual void _explain( map< string,list<BSONObj> >& out ) = 0;
+
+ string _ns;
+ BSONObj _query;
+ int _options;
+ BSONObj _fields;
+ int _batchSize;
+
+ bool _didInit;
+
+ bool _done;
+ };
+
+
+ class FilteringClientCursor {
+ public:
+ FilteringClientCursor( const BSONObj filter = BSONObj() );
+ FilteringClientCursor( DBClientCursor* cursor , const BSONObj filter = BSONObj() );
+ FilteringClientCursor( auto_ptr<DBClientCursor> cursor , const BSONObj filter = BSONObj() );
+ ~FilteringClientCursor();
+
+ void reset( auto_ptr<DBClientCursor> cursor );
+ void reset( DBClientCursor* cursor );
+
+ bool more();
+ BSONObj next();
+
+ BSONObj peek();
+
+ DBClientCursor* raw() { return _cursor.get(); }
+
+ // Required for new PCursor
+ void release(){ _cursor.release(); }
+
+ private:
+ void _advance();
+
+ Matcher _matcher;
+ auto_ptr<DBClientCursor> _cursor;
+
+ BSONObj _next;
+ bool _done;
+ };
+
+
+ class Servers {
+ public:
+ Servers() {
+ }
+
+ void add( const ServerAndQuery& s ) {
+ add( s._server , s._extra );
+ }
+
+ void add( const string& server , const BSONObj& filter ) {
+ vector<BSONObj>& mine = _filters[server];
+ mine.push_back( filter.getOwned() );
+ }
+
+ // TOOO: pick a less horrible name
+ class View {
+ View( const Servers* s ) {
+ for ( map<string, vector<BSONObj> >::const_iterator i=s->_filters.begin(); i!=s->_filters.end(); ++i ) {
+ _servers.push_back( i->first );
+ _filters.push_back( i->second );
+ }
+ }
+ public:
+ int size() const {
+ return _servers.size();
+ }
+
+ string getServer( int n ) const {
+ return _servers[n];
+ }
+
+ vector<BSONObj> getFilter( int n ) const {
+ return _filters[ n ];
+ }
+
+ private:
+ vector<string> _servers;
+ vector< vector<BSONObj> > _filters;
+
+ friend class Servers;
+ };
+
+ View view() const {
+ return View( this );
+ }
+
+
+ private:
+ map<string, vector<BSONObj> > _filters;
+
+ friend class View;
+ };
+
+
+ /**
+ * runs a query in serial across any number of servers
+ * returns all results from 1 server, then the next, etc...
+ */
+ class SerialServerClusteredCursor : public ClusteredCursor {
+ public:
+ SerialServerClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q , int sortOrder=0);
+ virtual bool more();
+ virtual BSONObj next();
+ virtual string type() const { return "SerialServer"; }
+
+ protected:
+ virtual void _explain( map< string,list<BSONObj> >& out );
+
+ void _init() {}
+
+ vector<ServerAndQuery> _servers;
+ unsigned _serverIndex;
+
+ FilteringClientCursor _current;
+
+ int _needToSkip;
+ };
+
+
+
+ class CommandInfo {
+ public:
+ string versionedNS;
+ BSONObj cmdFilter;
+
+ CommandInfo() {}
+ CommandInfo( const string& vns, const BSONObj& filter ) : versionedNS( vns ), cmdFilter( filter ) {}
+
+ bool isEmpty(){
+ return versionedNS.size() == 0;
+ }
+
+ string toString() const {
+ return str::stream() << "CInfo " << BSON( "v_ns" << versionedNS << "filter" << cmdFilter );
+ }
+ };
+
+ class ShardConnection;
+ typedef shared_ptr<ShardConnection> ShardConnectionPtr;
+
+ class DBClientCursor;
+ typedef shared_ptr<DBClientCursor> DBClientCursorPtr;
+
+ class Shard;
+ typedef shared_ptr<Shard> ShardPtr;
+
+ class ChunkManager;
+ typedef shared_ptr<const ChunkManager> ChunkManagerPtr;
+
+ class ParallelConnectionState {
+ public:
+
+ ShardConnectionPtr conn;
+ DBClientCursorPtr cursor;
+
+ // Version information
+ ChunkManagerPtr manager;
+ ShardPtr primary;
+
+ BSONObj toBSON() const;
+
+ string toString() const {
+ return str::stream() << "PCState : " << toBSON();
+ }
+ };
+
+ typedef ParallelConnectionState PCState;
+ typedef shared_ptr<PCState> PCStatePtr;
+
+ class ParallelConnectionMetadata {
+ public:
+
+ ParallelConnectionMetadata() :
+ retryNext( false ), initialized( false ), finished( false ), completed( false ), errored( false ) { }
+
+ ~ParallelConnectionMetadata(){
+ cleanup( true );
+ }
+
+ void cleanup( bool full = true );
+
+ PCStatePtr pcState;
+
+ bool retryNext;
+
+ bool initialized;
+ bool finished;
+ bool completed;
+
+ bool errored;
+
+ BSONObj toBSON() const;
+
+ string toString() const {
+ return str::stream() << "PCMData : " << toBSON();
+ }
+ };
+
+ typedef ParallelConnectionMetadata PCMData;
+ typedef shared_ptr<PCMData> PCMDataPtr;
+
+ /**
+ * Runs a query in parallel across N servers. New logic has several modes -
+ * 1) Standard query, enforces compatible chunk versions for queries across all results
+ * 2) Standard query, sent to particular servers with no compatible chunk version enforced, but handling
+ * stale configuration exceptions
+ * 3) Command query, either enforcing compatible chunk versions or sent to particular shards.
+ */
+ class ParallelSortClusteredCursor : public ClusteredCursor {
+ public:
+
+ ParallelSortClusteredCursor( const QuerySpec& qSpec, const CommandInfo& cInfo = CommandInfo() );
+ ParallelSortClusteredCursor( const set<Shard>& servers, const QuerySpec& qSpec );
+
+ // LEGACY Constructors
+ ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , QueryMessage& q , const BSONObj& sortKey );
+ ParallelSortClusteredCursor( const set<ServerAndQuery>& servers , const string& ns ,
+ const Query& q , int options=0, const BSONObj& fields=BSONObj() );
+
+ virtual ~ParallelSortClusteredCursor();
+ virtual bool more();
+ virtual BSONObj next();
+ virtual string type() const { return "ParallelSort"; }
+
+ void fullInit();
+ void startInit();
+ void finishInit();
+
+ bool isCommand(){ return NamespaceString( _qSpec.ns() ).isCommand(); }
+ bool isVersioned(){ return _qShards.size() == 0; }
+
+ bool isSharded();
+ ShardPtr getPrimary();
+ void getQueryShards( set<Shard>& shards );
+ ChunkManagerPtr getChunkManager( const Shard& shard );
+ DBClientCursorPtr getShardCursor( const Shard& shard );
+
+ BSONObj toBSON() const;
+ string toString() const;
+
+ protected:
+ void _finishCons();
+ void _init();
+ void _oldInit();
+
+ virtual void _explain( map< string,list<BSONObj> >& out );
+
+ void _markStaleNS( const NamespaceString& staleNS, bool& forceReload, bool& fullReload );
+ void _handleStaleNS( const NamespaceString& staleNS, bool forceReload, bool fullReload );
+
+ set<Shard> _qShards;
+ QuerySpec _qSpec;
+ CommandInfo _cInfo;
+
+ // Count round-trips req'd for namespaces and total
+ map<string,int> _staleNSMap;
+ int _totalTries;
+
+ map<Shard,PCMData> _cursorMap;
+
+ // LEGACY BELOW
+ int _numServers;
+ set<ServerAndQuery> _servers;
+ BSONObj _sortKey;
+
+ FilteringClientCursor * _cursors;
+ int _needToSkip;
+ };
+
+ /**
+ * tools for doing asynchronous operations
+ * right now uses underlying sync network ops and uses another thread
+ * should be changed to use non-blocking io
+ */
+ class Future {
+ public:
+ class CommandResult {
+ public:
+
+ string getServer() const { return _server; }
+
+ bool isDone() const { return _done; }
+
+ bool ok() const {
+ assert( _done );
+ return _ok;
+ }
+
+ BSONObj result() const {
+ assert( _done );
+ return _res;
+ }
+
+ /**
+ blocks until command is done
+ returns ok()
+ */
+ bool join( int maxRetries = 1 );
+
+ private:
+
+ CommandResult( const string& server , const string& db , const BSONObj& cmd , int options , DBClientBase * conn );
+ void init();
+
+ string _server;
+ string _db;
+ int _options;
+ BSONObj _cmd;
+ DBClientBase * _conn;
+ scoped_ptr<ScopedDbConnection> _connHolder; // used if not provided a connection
+
+ scoped_ptr<DBClientCursor> _cursor;
+
+ BSONObj _res;
+ bool _ok;
+ bool _done;
+
+ friend class Future;
+ };
+
+
+ /**
+ * @param server server name
+ * @param db db name
+ * @param cmd cmd to exec
+ * @param conn optional connection to use. will use standard pooled if non-specified
+ */
+ static shared_ptr<CommandResult> spawnCommand( const string& server , const string& db , const BSONObj& cmd , int options , DBClientBase * conn = 0 );
+ };
+
+
+}
+
+#include "undef_macros.h"
diff --git a/src/mongo/client/redef_macros.h b/src/mongo/client/redef_macros.h
new file mode 100644
index 00000000000..897912dba2b
--- /dev/null
+++ b/src/mongo/client/redef_macros.h
@@ -0,0 +1,61 @@
+/** @file redef_macros.h macros the implementation uses.
+
+ @see undef_macros.h undefines these after use to minimize name pollution.
+*/
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// If you define a new global un-prefixed macro, please add it here and in undef_macros
+
+// #pragma once // this file is intended to be processed multiple times
+
+#if defined(MONGO_MACROS_CLEANED)
+
+// util/allocator.h
+#define malloc MONGO_malloc
+#define realloc MONGO_realloc
+
+// util/assert_util.h
+#define assert MONGO_assert
+#define dassert MONGO_dassert
+#define wassert MONGO_wassert
+#define massert MONGO_massert
+#define uassert MONGO_uassert
+#define BOOST_CHECK_EXCEPTION MONGO_BOOST_CHECK_EXCEPTION
+#define DESTRUCTOR_GUARD MONGO_DESTRUCTOR_GUARD
+
+// util/goodies.h
+#define PRINT MONGO_PRINT
+#define PRINTFL MONGO_PRINTFL
+#define asctime MONGO_asctime
+#define gmtime MONGO_gmtime
+#define localtime MONGO_localtime
+#define ctime MONGO_ctime
+
+// util/debug_util.h
+#define DEV MONGO_DEV
+#define DEBUGGING MONGO_DEBUGGING
+#define SOMETIMES MONGO_SOMETIMES
+#define OCCASIONALLY MONGO_OCCASIONALLY
+#define RARELY MONGO_RARELY
+#define ONCE MONGO_ONCE
+
+// util/log.h
+#define LOG MONGO_LOG
+
+#undef MONGO_MACROS_CLEANED
+#endif
+
diff --git a/src/mongo/client/simple_client_demo.cpp b/src/mongo/client/simple_client_demo.cpp
new file mode 100644
index 00000000000..f4278dd4e54
--- /dev/null
+++ b/src/mongo/client/simple_client_demo.cpp
@@ -0,0 +1,54 @@
+/* simple_client_demo.cpp
+
+ See also : http://www.mongodb.org/pages/viewpage.action?pageId=133415
+
+ How to build and run:
+
+ (1) Using the mongoclient:
+ g++ simple_client_demo.cpp -lmongoclient -lboost_thread-mt -lboost_filesystem -lboost_program_options
+ ./a.out
+
+ (2) using client_lib.cpp:
+ g++ -I .. simple_client_demo.cpp mongo_client_lib.cpp -lboost_thread-mt -lboost_filesystem
+ ./a.out
+*/
+
+#include <iostream>
+#include "dbclient.h" // the mongo c++ driver
+
+using namespace std;
+using namespace mongo;
+using namespace bson;
+
+int main() {
+ try {
+ cout << "connecting to localhost..." << endl;
+ DBClientConnection c;
+ c.connect("localhost");
+ cout << "connected ok" << endl;
+ unsigned long long count = c.count("test.foo");
+ cout << "count of exiting documents in collection test.foo : " << count << endl;
+
+ bo o = BSON( "hello" << "world" );
+ c.insert("test.foo", o);
+
+ string e = c.getLastError();
+ if( !e.empty() ) {
+ cout << "insert #1 failed: " << e << endl;
+ }
+
+ // make an index with a unique key constraint
+ c.ensureIndex("test.foo", BSON("hello"<<1), /*unique*/true);
+
+ c.insert("test.foo", o); // will cause a dup key error on "hello" field
+ cout << "we expect a dup key error here:" << endl;
+ cout << " " << c.getLastErrorDetailed().toString() << endl;
+ }
+ catch(DBException& e) {
+ cout << "caught DBException " << e.toString() << endl;
+ return 1;
+ }
+
+ return 0;
+}
+
diff --git a/src/mongo/client/syncclusterconnection.cpp b/src/mongo/client/syncclusterconnection.cpp
new file mode 100644
index 00000000000..601cdcbd758
--- /dev/null
+++ b/src/mongo/client/syncclusterconnection.cpp
@@ -0,0 +1,410 @@
+// syncclusterconnection.cpp
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "pch.h"
+#include "syncclusterconnection.h"
+#include "../db/dbmessage.h"
+
+// error codes 8000-8009
+
+namespace mongo {
+
+ SyncClusterConnection::SyncClusterConnection( const list<HostAndPort> & L, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
+ {
+ stringstream s;
+ int n=0;
+ for( list<HostAndPort>::const_iterator i = L.begin(); i != L.end(); i++ ) {
+ if( ++n > 1 ) s << ',';
+ s << i->toString();
+ }
+ _address = s.str();
+ }
+ for( list<HostAndPort>::const_iterator i = L.begin(); i != L.end(); i++ )
+ _connect( i->toString() );
+ }
+
+ SyncClusterConnection::SyncClusterConnection( string commaSeperated, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
+ _address = commaSeperated;
+ string::size_type idx;
+ while ( ( idx = commaSeperated.find( ',' ) ) != string::npos ) {
+ string h = commaSeperated.substr( 0 , idx );
+ commaSeperated = commaSeperated.substr( idx + 1 );
+ _connect( h );
+ }
+ _connect( commaSeperated );
+ uassert( 8004 , "SyncClusterConnection needs 3 servers" , _conns.size() == 3 );
+ }
+
+ SyncClusterConnection::SyncClusterConnection( string a , string b , string c, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
+ _address = a + "," + b + "," + c;
+ // connect to all even if not working
+ _connect( a );
+ _connect( b );
+ _connect( c );
+ }
+
+ SyncClusterConnection::SyncClusterConnection( SyncClusterConnection& prev, double socketTimeout) : _mutex("SyncClusterConnection"), _socketTimeout( socketTimeout ) {
+ assert(0);
+ }
+
+ SyncClusterConnection::~SyncClusterConnection() {
+ for ( size_t i=0; i<_conns.size(); i++ )
+ delete _conns[i];
+ _conns.clear();
+ }
+
+ bool SyncClusterConnection::prepare( string& errmsg ) {
+ _lastErrors.clear();
+ return fsync( errmsg );
+ }
+
+ bool SyncClusterConnection::fsync( string& errmsg ) {
+ bool ok = true;
+ errmsg = "";
+ for ( size_t i=0; i<_conns.size(); i++ ) {
+ BSONObj res;
+ try {
+ if ( _conns[i]->simpleCommand( "admin" , &res , "fsync" ) )
+ continue;
+ }
+ catch ( DBException& e ) {
+ errmsg += e.toString();
+ }
+ catch ( std::exception& e ) {
+ errmsg += e.what();
+ }
+ catch ( ... ) {
+ }
+ ok = false;
+ errmsg += " " + _conns[i]->toString() + ":" + res.toString();
+ }
+ return ok;
+ }
+
+ void SyncClusterConnection::_checkLast() {
+ _lastErrors.clear();
+ vector<string> errors;
+
+ for ( size_t i=0; i<_conns.size(); i++ ) {
+ BSONObj res;
+ string err;
+ try {
+ if ( ! _conns[i]->runCommand( "admin" , BSON( "getlasterror" << 1 << "fsync" << 1 ) , res ) )
+ err = "cmd failed: ";
+ }
+ catch ( std::exception& e ) {
+ err += e.what();
+ }
+ catch ( ... ) {
+ err += "unknown failure";
+ }
+ _lastErrors.push_back( res.getOwned() );
+ errors.push_back( err );
+ }
+
+ assert( _lastErrors.size() == errors.size() && _lastErrors.size() == _conns.size() );
+
+ stringstream err;
+ bool ok = true;
+
+ for ( size_t i = 0; i<_conns.size(); i++ ) {
+ BSONObj res = _lastErrors[i];
+ if ( res["ok"].trueValue() && (res["fsyncFiles"].numberInt() > 0 || res.hasElement("waited")))
+ continue;
+ ok = false;
+ err << _conns[i]->toString() << ": " << res << " " << errors[i];
+ }
+
+ if ( ok )
+ return;
+ throw UserException( 8001 , (string)"SyncClusterConnection write op failed: " + err.str() );
+ }
+
+ BSONObj SyncClusterConnection::getLastErrorDetailed(bool fsync, bool j, int w, int wtimeout) {
+ if ( _lastErrors.size() )
+ return _lastErrors[0];
+ return DBClientBase::getLastErrorDetailed(fsync,j,w,wtimeout);
+ }
+
+ void SyncClusterConnection::_connect( string host ) {
+ log() << "SyncClusterConnection connecting to [" << host << "]" << endl;
+ DBClientConnection * c = new DBClientConnection( true );
+ c->setSoTimeout( _socketTimeout );
+ string errmsg;
+ if ( ! c->connect( host , errmsg ) )
+ log() << "SyncClusterConnection connect fail to: " << host << " errmsg: " << errmsg << endl;
+ _connAddresses.push_back( host );
+ _conns.push_back( c );
+ }
+
+ bool SyncClusterConnection::callRead( Message& toSend , Message& response ) {
+ // TODO: need to save state of which one to go back to somehow...
+ return _conns[0]->callRead( toSend , response );
+ }
+
+ BSONObj SyncClusterConnection::findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions) {
+
+ if ( ns.find( ".$cmd" ) != string::npos ) {
+ string cmdName = query.obj.firstElementFieldName();
+
+ int lockType = _lockType( cmdName );
+
+ if ( lockType > 0 ) { // write $cmd
+ string errmsg;
+ if ( ! prepare( errmsg ) )
+ throw UserException( 13104 , (string)"SyncClusterConnection::findOne prepare failed: " + errmsg );
+
+ vector<BSONObj> all;
+ for ( size_t i=0; i<_conns.size(); i++ ) {
+ all.push_back( _conns[i]->findOne( ns , query , 0 , queryOptions ).getOwned() );
+ }
+
+ _checkLast();
+
+ for ( size_t i=0; i<all.size(); i++ ) {
+ BSONObj temp = all[i];
+ if ( isOk( temp ) )
+ continue;
+ stringstream ss;
+ ss << "write $cmd failed on a node: " << temp.jsonString();
+ ss << " " << _conns[i]->toString();
+ ss << " ns: " << ns;
+ ss << " cmd: " << query.toString();
+ throw UserException( 13105 , ss.str() );
+ }
+
+ return all[0];
+ }
+ }
+
+ return DBClientBase::findOne( ns , query , fieldsToReturn , queryOptions );
+ }
+
+ bool SyncClusterConnection::auth(const string &dbname, const string &username, const string &password_text, string& errmsg, bool digestPassword) {
+ for (vector<DBClientConnection*>::iterator it = _conns.begin(); it < _conns.end(); it++) {
+ massert( 15848, "sync cluster of sync clusters?", (*it)->type() != ConnectionString::SYNC);
+
+ if (!(*it)->auth(dbname, username, password_text, errmsg, digestPassword)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ auto_ptr<DBClientCursor> SyncClusterConnection::query(const string &ns, Query query, int nToReturn, int nToSkip,
+ const BSONObj *fieldsToReturn, int queryOptions, int batchSize ) {
+ _lastErrors.clear();
+ if ( ns.find( ".$cmd" ) != string::npos ) {
+ string cmdName = query.obj.firstElementFieldName();
+ int lockType = _lockType( cmdName );
+ uassert( 13054 , (string)"write $cmd not supported in SyncClusterConnection::query for:" + cmdName , lockType <= 0 );
+ }
+
+ return _queryOnActive( ns , query , nToReturn , nToSkip , fieldsToReturn , queryOptions , batchSize );
+ }
+
+ bool SyncClusterConnection::_commandOnActive(const string &dbname, const BSONObj& cmd, BSONObj &info, int options ) {
+ auto_ptr<DBClientCursor> cursor = _queryOnActive( dbname + ".$cmd" , cmd , 1 , 0 , 0 , options , 0 );
+ if ( cursor->more() )
+ info = cursor->next().copy();
+ else
+ info = BSONObj();
+ return isOk( info );
+ }
+
+ auto_ptr<DBClientCursor> SyncClusterConnection::_queryOnActive(const string &ns, Query query, int nToReturn, int nToSkip,
+ const BSONObj *fieldsToReturn, int queryOptions, int batchSize ) {
+
+ for ( size_t i=0; i<_conns.size(); i++ ) {
+ try {
+ auto_ptr<DBClientCursor> cursor =
+ _conns[i]->query( ns , query , nToReturn , nToSkip , fieldsToReturn , queryOptions , batchSize );
+ if ( cursor.get() )
+ return cursor;
+ log() << "query failed to: " << _conns[i]->toString() << " no data" << endl;
+ }
+ catch ( std::exception& e ) {
+ log() << "query failed to: " << _conns[i]->toString() << " exception: " << e.what() << endl;
+ }
+ catch ( ... ) {
+ log() << "query failed to: " << _conns[i]->toString() << " exception" << endl;
+ }
+ }
+ throw UserException( 8002 , "all servers down!" );
+ }
+
+ auto_ptr<DBClientCursor> SyncClusterConnection::getMore( const string &ns, long long cursorId, int nToReturn, int options ) {
+ uassert( 10022 , "SyncClusterConnection::getMore not supported yet" , 0);
+ auto_ptr<DBClientCursor> c;
+ return c;
+ }
+
+ void SyncClusterConnection::insert( const string &ns, BSONObj obj , int flags) {
+
+ uassert( 13119 , (string)"SyncClusterConnection::insert obj has to have an _id: " + obj.jsonString() ,
+ ns.find( ".system.indexes" ) != string::npos || obj["_id"].type() );
+
+ string errmsg;
+ if ( ! prepare( errmsg ) )
+ throw UserException( 8003 , (string)"SyncClusterConnection::insert prepare failed: " + errmsg );
+
+ for ( size_t i=0; i<_conns.size(); i++ ) {
+ _conns[i]->insert( ns , obj , flags);
+ }
+
+ _checkLast();
+ }
+
+ void SyncClusterConnection::insert( const string &ns, const vector< BSONObj >& v , int flags) {
+ uassert( 10023 , "SyncClusterConnection bulk insert not implemented" , 0);
+ }
+
+ void SyncClusterConnection::remove( const string &ns , Query query, bool justOne ) {
+ string errmsg;
+ if ( ! prepare( errmsg ) )
+ throw UserException( 8020 , (string)"SyncClusterConnection::remove prepare failed: " + errmsg );
+
+ for ( size_t i=0; i<_conns.size(); i++ ) {
+ _conns[i]->remove( ns , query , justOne );
+ }
+
+ _checkLast();
+ }
+
+ void SyncClusterConnection::update( const string &ns , Query query , BSONObj obj , bool upsert , bool multi ) {
+
+ if ( upsert ) {
+ uassert( 13120 , "SyncClusterConnection::update upsert query needs _id" , query.obj["_id"].type() );
+ }
+
+ if ( _writeConcern ) {
+ string errmsg;
+ if ( ! prepare( errmsg ) )
+ throw UserException( 8005 , (string)"SyncClusterConnection::udpate prepare failed: " + errmsg );
+ }
+
+ for ( size_t i = 0; i < _conns.size(); i++ ) {
+ try {
+ _conns[i]->update( ns , query , obj , upsert , multi );
+ }
+ catch ( std::exception& e ) {
+ if ( _writeConcern )
+ throw e;
+ }
+ }
+
+ if ( _writeConcern ) {
+ _checkLast();
+ assert( _lastErrors.size() > 1 );
+
+ int a = _lastErrors[0]["n"].numberInt();
+ for ( unsigned i=1; i<_lastErrors.size(); i++ ) {
+ int b = _lastErrors[i]["n"].numberInt();
+ if ( a == b )
+ continue;
+
+ throw UpdateNotTheSame( 8017 ,
+ str::stream()
+ << "update not consistent "
+ << " ns: " << ns
+ << " query: " << query.toString()
+ << " update: " << obj
+ << " gle1: " << _lastErrors[0]
+ << " gle2: " << _lastErrors[i] ,
+ _connAddresses , _lastErrors );
+ }
+ }
+ }
+
+ string SyncClusterConnection::_toString() const {
+ stringstream ss;
+ ss << "SyncClusterConnection [" << _address << "]";
+ return ss.str();
+ }
+
+ bool SyncClusterConnection::call( Message &toSend, Message &response, bool assertOk , string * actualServer ) {
+ uassert( 8006 , "SyncClusterConnection::call can only be used directly for dbQuery" ,
+ toSend.operation() == dbQuery );
+
+ DbMessage d( toSend );
+ uassert( 8007 , "SyncClusterConnection::call can't handle $cmd" , strstr( d.getns(), "$cmd" ) == 0 );
+
+ for ( size_t i=0; i<_conns.size(); i++ ) {
+ try {
+ bool ok = _conns[i]->call( toSend , response , assertOk );
+ if ( ok ) {
+ if ( actualServer )
+ *actualServer = _connAddresses[i];
+ return ok;
+ }
+ log() << "call failed to: " << _conns[i]->toString() << " no data" << endl;
+ }
+ catch ( ... ) {
+ log() << "call failed to: " << _conns[i]->toString() << " exception" << endl;
+ }
+ }
+ throw UserException( 8008 , "all servers down!" );
+ }
+
+ void SyncClusterConnection::say( Message &toSend, bool isRetry ) {
+ string errmsg;
+ if ( ! prepare( errmsg ) )
+ throw UserException( 13397 , (string)"SyncClusterConnection::say prepare failed: " + errmsg );
+
+ for ( size_t i=0; i<_conns.size(); i++ ) {
+ _conns[i]->say( toSend );
+ }
+
+ _checkLast();
+ }
+
+ void SyncClusterConnection::sayPiggyBack( Message &toSend ) {
+ assert(0);
+ }
+
+ int SyncClusterConnection::_lockType( const string& name ) {
+ {
+ scoped_lock lk(_mutex);
+ map<string,int>::iterator i = _lockTypes.find( name );
+ if ( i != _lockTypes.end() )
+ return i->second;
+ }
+
+ BSONObj info;
+ uassert( 13053 , str::stream() << "help failed: " << info , _commandOnActive( "admin" , BSON( name << "1" << "help" << 1 ) , info ) );
+
+ int lockType = info["lockType"].numberInt();
+
+ scoped_lock lk(_mutex);
+ _lockTypes[name] = lockType;
+ return lockType;
+ }
+
+ void SyncClusterConnection::killCursor( long long cursorID ) {
+ // should never need to do this
+ assert(0);
+ }
+
+ void SyncClusterConnection::setAllSoTimeouts( double socketTimeout ){
+ _socketTimeout = socketTimeout;
+ for ( size_t i=0; i<_conns.size(); i++ )
+
+ if( _conns[i] ) _conns[i]->setSoTimeout( socketTimeout );
+ }
+
+}
diff --git a/src/mongo/client/syncclusterconnection.h b/src/mongo/client/syncclusterconnection.h
new file mode 100644
index 00000000000..d2374ddaa45
--- /dev/null
+++ b/src/mongo/client/syncclusterconnection.h
@@ -0,0 +1,147 @@
+// @file syncclusterconnection.h
+
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../pch.h"
+#include "dbclient.h"
+#include "redef_macros.h"
+
+namespace mongo {
+
+ /**
+ * This is a connection to a cluster of servers that operate as one
+ * for super high durability.
+ *
+ * Write operations are two-phase. First, all nodes are asked to fsync. If successful
+ * everywhere, the write is sent everywhere and then followed by an fsync. There is no
+ * rollback if a problem occurs during the second phase. Naturally, with all these fsyncs,
+ * these operations will be quite slow -- use sparingly.
+ *
+ * Read operations are sent to a single random node.
+ *
+ * The class checks if a command is read or write style, and sends to a single
+ * node if a read lock command and to all in two phases with a write style command.
+ */
+ class SyncClusterConnection : public DBClientBase {
+ public:
+ /**
+ * @param commaSeparated should be 3 hosts comma separated
+ */
+ SyncClusterConnection( const list<HostAndPort> &, double socketTimeout = 0);
+ SyncClusterConnection( string commaSeparated, double socketTimeout = 0);
+ SyncClusterConnection( string a , string b , string c, double socketTimeout = 0 );
+ ~SyncClusterConnection();
+
+ /**
+ * @return true if all servers are up and ready for writes
+ */
+ bool prepare( string& errmsg );
+
+ /**
+ * runs fsync on all servers
+ */
+ bool fsync( string& errmsg );
+
+ // --- from DBClientInterface
+
+ virtual BSONObj findOne(const string &ns, const Query& query, const BSONObj *fieldsToReturn, int queryOptions);
+
+ virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn, int nToSkip,
+ const BSONObj *fieldsToReturn, int queryOptions, int batchSize );
+
+ virtual auto_ptr<DBClientCursor> getMore( const string &ns, long long cursorId, int nToReturn, int options );
+
+ virtual void insert( const string &ns, BSONObj obj, int flags=0);
+
+ virtual void insert( const string &ns, const vector< BSONObj >& v, int flags=0);
+
+ virtual void remove( const string &ns , Query query, bool justOne );
+
+ virtual void update( const string &ns , Query query , BSONObj obj , bool upsert , bool multi );
+
+ virtual bool call( Message &toSend, Message &response, bool assertOk , string * actualServer );
+ virtual void say( Message &toSend, bool isRetry = false );
+ virtual void sayPiggyBack( Message &toSend );
+
+ virtual void killCursor( long long cursorID );
+
+ virtual string getServerAddress() const { return _address; }
+ virtual bool isFailed() const { return false; }
+ virtual string toString() { return _toString(); }
+
+ virtual BSONObj getLastErrorDetailed(bool fsync=false, bool j=false, int w=0, int wtimeout=0);
+
+ virtual bool callRead( Message& toSend , Message& response );
+
+ virtual ConnectionString::ConnectionType type() const { return ConnectionString::SYNC; }
+
+ void setAllSoTimeouts( double socketTimeout );
+ double getSoTimeout() const { return _socketTimeout; }
+
+ virtual bool auth(const string &dbname, const string &username, const string &password_text, string& errmsg, bool digestPassword);
+
+ virtual bool lazySupported() const { return false; }
+ private:
+ SyncClusterConnection( SyncClusterConnection& prev, double socketTimeout = 0 );
+ string _toString() const;
+ bool _commandOnActive(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0);
+ auto_ptr<DBClientCursor> _queryOnActive(const string &ns, Query query, int nToReturn, int nToSkip,
+ const BSONObj *fieldsToReturn, int queryOptions, int batchSize );
+ int _lockType( const string& name );
+ void _checkLast();
+ void _connect( string host );
+
+ string _address;
+ vector<string> _connAddresses;
+ vector<DBClientConnection*> _conns;
+ map<string,int> _lockTypes;
+ mongo::mutex _mutex;
+
+ vector<BSONObj> _lastErrors;
+
+ double _socketTimeout;
+ };
+
+ class UpdateNotTheSame : public UserException {
+ public:
+ UpdateNotTheSame( int code , const string& msg , const vector<string>& addrs , const vector<BSONObj>& lastErrors )
+ : UserException( code , msg ) , _addrs( addrs ) , _lastErrors( lastErrors ) {
+ assert( _addrs.size() == _lastErrors.size() );
+ }
+
+ virtual ~UpdateNotTheSame() throw() {
+ }
+
+ unsigned size() const {
+ return _addrs.size();
+ }
+
+ pair<string,BSONObj> operator[](unsigned i) const {
+ return make_pair( _addrs[i] , _lastErrors[i] );
+ }
+
+ private:
+
+ vector<string> _addrs;
+ vector<BSONObj> _lastErrors;
+ };
+
+};
+
+#include "undef_macros.h"
diff --git a/src/mongo/client/undef_macros.h b/src/mongo/client/undef_macros.h
new file mode 100644
index 00000000000..30ece615747
--- /dev/null
+++ b/src/mongo/client/undef_macros.h
@@ -0,0 +1,61 @@
+/** @file undef_macros.h remove mongo implementation macros after using */
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// If you define a new global un-prefixed macro, please add it here and in redef_macros
+
+// #pragma once // this file is intended to be processed multiple times
+
+
+/** MONGO_EXPOSE_MACROS - when defined, indicates that you are compiling a mongo program rather
+ than just using the C++ driver.
+*/
+#if !defined(MONGO_EXPOSE_MACROS) && !defined(MONGO_MACROS_CLEANED)
+
+// util/allocator.h
+#undef malloc
+#undef realloc
+
+// util/assert_util.h
+#undef assert
+#undef dassert
+#undef wassert
+#undef massert
+#undef uassert
+#undef BOOST_CHECK_EXCEPTION
+#undef DESTRUCTOR_GUARD
+
+// util/goodies.h
+#undef PRINT
+#undef PRINTFL
+#undef asctime
+#undef gmtime
+#undef localtime
+#undef ctime
+
+// util/debug_util.h
+#undef DEV
+#undef DEBUGGING
+#undef SOMETIMES
+#undef OCCASIONALLY
+#undef RARELY
+#undef ONCE
+
+// util/log.h
+#undef LOG
+
+#define MONGO_MACROS_CLEANED
+#endif
diff --git a/src/mongo/db/background.h b/src/mongo/db/background.h
new file mode 100644
index 00000000000..ea424c97107
--- /dev/null
+++ b/src/mongo/db/background.h
@@ -0,0 +1,56 @@
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* background.h
+
+ Concurrency coordination for administrative operations.
+*/
+
+#pragma once
+
+namespace mongo {
+
+ /* these are administrative operations / jobs
+ for a namespace running in the background, and that only one
+ at a time per namespace is permitted, and that if in progress,
+ you aren't allowed to do other NamespaceDetails major manipulations
+ (such as dropping ns or db) even in the foreground and must
+ instead uassert.
+
+ It's assumed this is not for super-high RPS things, so we don't do
+ anything special in the implementation here to be fast.
+ */
+ class BackgroundOperation : public boost::noncopyable {
+ public:
+ static bool inProgForDb(const char *db);
+ static bool inProgForNs(const char *ns);
+ static void assertNoBgOpInProgForDb(const char *db);
+ static void assertNoBgOpInProgForNs(const char *ns);
+ static void dump(stringstream&);
+
+ /* check for in progress before instantiating */
+ BackgroundOperation(const char *ns);
+
+ virtual ~BackgroundOperation();
+
+ private:
+ NamespaceString _ns;
+ static map<string, unsigned> dbsInProg;
+ static set<string> nsInProg;
+ };
+
+} // namespace mongo
+
diff --git a/src/mongo/db/btree.cpp b/src/mongo/db/btree.cpp
new file mode 100644
index 00000000000..5c55fad33c3
--- /dev/null
+++ b/src/mongo/db/btree.cpp
@@ -0,0 +1,1980 @@
+// btree.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "db.h"
+#include "btree.h"
+#include "pdfile.h"
+#include "json.h"
+#include "clientcursor.h"
+#include "client.h"
+#include "dbhelpers.h"
+#include "curop-inl.h"
+#include "stats/counters.h"
+#include "dur_commitjob.h"
+#include "btreebuilder.h"
+#include "../util/unittest.h"
+#include "../server.h"
+
+namespace mongo {
+
+ BOOST_STATIC_ASSERT( Record::HeaderSize == 16 );
+ BOOST_STATIC_ASSERT( Record::HeaderSize + BtreeData_V1::BucketSize == 8192 );
+
+ NOINLINE_DECL void checkFailed(unsigned line) {
+ static time_t last;
+ if( time(0) - last >= 10 ) {
+ msgasserted(15898, str::stream() << "error in index possibly corruption consider repairing " << line);
+ }
+ }
+
+ /** data check. like assert, but gives a reasonable error message to the user. */
+#define check(expr) if(!(expr) ) { checkFailed(__LINE__); }
+
+#define VERIFYTHISLOC dassert( thisLoc.btree<V>() == this );
+
+ template< class Loc >
+ __KeyNode<Loc> & __KeyNode<Loc>::writing() const {
+ return *getDur().writing( const_cast< __KeyNode<Loc> * >( this ) );
+ }
+
+ // BucketBasics::lowWaterMark()
+ //
+ // We define this value as the maximum number of bytes such that, if we have
+ // fewer than this many bytes, we must be able to either merge with or receive
+ // keys from any neighboring node. If our utilization goes below this value we
+ // know we can bring up the utilization with a simple operation. Ignoring the
+ // 90/10 split policy which is sometimes employed and our 'unused' nodes, this
+ // is a lower bound on bucket utilization for non root buckets.
+ //
+ // Note that the exact value here depends on the implementation of
+ // rebalancedSeparatorPos(). The conditions for lowWaterMark - 1 are as
+ // follows: We know we cannot merge with the neighbor, so the total data size
+ // for us, the neighbor, and the separator must be at least
+ // BtreeBucket<V>::bodySize() + 1. We must be able to accept one key of any
+ // allowed size, so our size plus storage for that additional key must be
+ // <= BtreeBucket<V>::bodySize() / 2. This way, with the extra key we'll have a
+ // new bucket data size < half the total data size and by the implementation
+ // of rebalancedSeparatorPos() the key must be added.
+
+ static const int split_debug = 0;
+ static const int insert_debug = 0;
+
+ /**
+ * this error is ok/benign when doing a background indexing -- that logic in pdfile checks explicitly
+ * for the 10287 error code.
+ */
+ static void alreadyInIndex() {
+ // we don't use massert() here as that does logging and this is 'benign' - see catches in _indexRecord()
+ throw MsgAssertionException(10287, "btree: key+recloc already in index");
+ }
+
+ /* BucketBasics --------------------------------------------------- */
+
+ template< class V >
+ void BucketBasics<V>::assertWritable() {
+ if( cmdLine.dur )
+ dur::assertAlreadyDeclared(this, V::BucketSize);
+ }
+
+ template< class V >
+ string BtreeBucket<V>::bucketSummary() const {
+ stringstream ss;
+ ss << " Bucket info:" << endl;
+ ss << " n: " << this->n << endl;
+ ss << " parent: " << this->parent.toString() << endl;
+ ss << " nextChild: " << this->nextChild.toString() << endl;
+ ss << " flags:" << this->flags << endl;
+ ss << " emptySize: " << this->emptySize << " topSize: " << this->topSize << endl;
+ return ss.str();
+ }
+
+ template< class V >
+ int BucketBasics<V>::Size() const {
+ return V::BucketSize;
+ }
+
+ template< class V >
+ void BucketBasics<V>::_shape(int level, stringstream& ss) const {
+ for ( int i = 0; i < level; i++ ) ss << ' ';
+ ss << "*[" << this->n << "]\n";
+ for ( int i = 0; i < this->n; i++ ) {
+ if ( !k(i).prevChildBucket.isNull() ) {
+ DiskLoc ll = k(i).prevChildBucket;
+ ll.btree<V>()->_shape(level+1,ss);
+ }
+ }
+ if ( !this->nextChild.isNull() ) {
+ DiskLoc ll = this->nextChild;
+ ll.btree<V>()->_shape(level+1,ss);
+ }
+ }
+
+ int bt_fv=0;
+ int bt_dmp=0;
+
+ template< class V >
+ void BtreeBucket<V>::dumpTree(const DiskLoc &thisLoc, const BSONObj &order) const {
+ bt_dmp=1;
+ fullValidate(thisLoc, order);
+ bt_dmp=0;
+ }
+
+ template< class V >
+ long long BtreeBucket<V>::fullValidate(const DiskLoc& thisLoc, const BSONObj &order, long long *unusedCount, bool strict, unsigned depth) const {
+ {
+ bool f = false;
+ assert( f = true );
+ massert( 10281 , "assert is misdefined", f);
+ }
+
+ killCurrentOp.checkForInterrupt();
+ this->assertValid(order, true);
+
+ if ( bt_dmp ) {
+ _log() << thisLoc.toString() << ' ';
+ ((BtreeBucket *) this)->dump(depth);
+ }
+
+ // keycount
+ long long kc = 0;
+
+ for ( int i = 0; i < this->n; i++ ) {
+ const _KeyNode& kn = this->k(i);
+
+ if ( kn.isUsed() ) {
+ kc++;
+ }
+ else {
+ if ( unusedCount ) {
+ ++( *unusedCount );
+ }
+ }
+ if ( !kn.prevChildBucket.isNull() ) {
+ DiskLoc left = kn.prevChildBucket;
+ const BtreeBucket *b = left.btree<V>();
+ if ( strict ) {
+ assert( b->parent == thisLoc );
+ }
+ else {
+ wassert( b->parent == thisLoc );
+ }
+ kc += b->fullValidate(kn.prevChildBucket, order, unusedCount, strict, depth+1);
+ }
+ }
+ if ( !this->nextChild.isNull() ) {
+ DiskLoc ll = this->nextChild;
+ const BtreeBucket *b = ll.btree<V>();
+ if ( strict ) {
+ assert( b->parent == thisLoc );
+ }
+ else {
+ wassert( b->parent == thisLoc );
+ }
+ kc += b->fullValidate(this->nextChild, order, unusedCount, strict, depth+1);
+ }
+
+ return kc;
+ }
+
+ int nDumped = 0;
+
+ template< class V >
+ void BucketBasics<V>::assertValid(const Ordering &order, bool force) const {
+ if ( !debug && !force )
+ return;
+ {
+ int foo = this->n;
+ wassert( foo >= 0 && this->n < Size() );
+ foo = this->emptySize;
+ wassert( foo >= 0 && this->emptySize < V::BucketSize );
+ wassert( this->topSize >= this->n && this->topSize <= V::BucketSize );
+ }
+
+ // this is very slow so don't do often
+ {
+ static int _k;
+ if( ++_k % 128 )
+ return;
+ }
+
+ DEV {
+ // slow:
+ for ( int i = 0; i < this->n-1; i++ ) {
+ Key k1 = keyNode(i).key;
+ Key k2 = keyNode(i+1).key;
+ int z = k1.woCompare(k2, order); //OK
+ if ( z > 0 ) {
+ out() << "ERROR: btree key order corrupt. Keys:" << endl;
+ if ( ++nDumped < 5 ) {
+ for ( int j = 0; j < this->n; j++ ) {
+ out() << " " << keyNode(j).key.toString() << endl;
+ }
+ ((BtreeBucket<V> *) this)->dump();
+ }
+ wassert(false);
+ break;
+ }
+ else if ( z == 0 ) {
+ if ( !(k(i).recordLoc < k(i+1).recordLoc) ) {
+ out() << "ERROR: btree key order corrupt (recordloc's wrong):" << endl;
+ out() << " k(" << i << ")" << keyNode(i).key.toString() << " RL:" << k(i).recordLoc.toString() << endl;
+ out() << " k(" << i+1 << ")" << keyNode(i+1).key.toString() << " RL:" << k(i+1).recordLoc.toString() << endl;
+ wassert( k(i).recordLoc < k(i+1).recordLoc );
+ }
+ }
+ }
+ }
+ else {
+ //faster:
+ if ( this->n > 1 ) {
+ Key k1 = keyNode(0).key;
+ Key k2 = keyNode(this->n-1).key;
+ int z = k1.woCompare(k2, order);
+ //wassert( z <= 0 );
+ if ( z > 0 ) {
+ problem() << "btree keys out of order" << '\n';
+ ONCE {
+ ((BtreeBucket<V> *) this)->dump();
+ }
+ assert(false);
+ }
+ }
+ }
+ }
+
+ template< class V >
+ inline void BucketBasics<V>::markUnused(int keypos) {
+ assert( keypos >= 0 && keypos < this->n );
+ k(keypos).setUnused();
+ }
+
+ template< class V >
+ inline int BucketBasics<V>::totalDataSize() const {
+ return (int) (Size() - (this->data-(char*)this));
+ }
+
+ template< class V >
+ void BucketBasics<V>::init() {
+ this->_init();
+ this->parent.Null();
+ this->nextChild.Null();
+ this->flags = Packed;
+ this->n = 0;
+ this->emptySize = totalDataSize();
+ this->topSize = 0;
+ }
+
+ /** see _alloc */
+ template< class V >
+ inline void BucketBasics<V>::_unalloc(int bytes) {
+ this->topSize -= bytes;
+ this->emptySize += bytes;
+ }
+
+ /**
+ * we allocate space from the end of the buffer for data.
+ * the keynodes grow from the front.
+ */
+ template< class V >
+ inline int BucketBasics<V>::_alloc(int bytes) {
+ assert( this->emptySize >= bytes );
+ this->topSize += bytes;
+ this->emptySize -= bytes;
+ int ofs = totalDataSize() - this->topSize;
+ assert( ofs > 0 );
+ return ofs;
+ }
+
+ template< class V >
+ void BucketBasics<V>::_delKeyAtPos(int keypos, bool mayEmpty) {
+ // TODO This should be keypos < n
+ assert( keypos >= 0 && keypos <= this->n );
+ assert( childForPos(keypos).isNull() );
+ // TODO audit cases where nextChild is null
+ assert( ( mayEmpty && this->n > 0 ) || this->n > 1 || this->nextChild.isNull() );
+ this->emptySize += sizeof(_KeyNode);
+ this->n--;
+ for ( int j = keypos; j < this->n; j++ )
+ k(j) = k(j+1);
+ setNotPacked();
+ }
+
+ /**
+ * pull rightmost key from the bucket. this version requires its right child to be null so it
+ * does not bother returning that value.
+ */
+ template< class V >
+ void BucketBasics<V>::popBack(DiskLoc& recLoc, Key &key) {
+ massert( 10282 , "n==0 in btree popBack()", this->n > 0 );
+ assert( k(this->n-1).isUsed() ); // no unused skipping in this function at this point - btreebuilder doesn't require that
+ KeyNode kn = keyNode(this->n-1);
+ recLoc = kn.recordLoc;
+ key.assign(kn.key);
+ int keysize = kn.key.dataSize();
+
+ massert( 10283 , "rchild not null in btree popBack()", this->nextChild.isNull());
+
+ // weirdly, we also put the rightmost down pointer in nextchild, even when bucket isn't full.
+ this->nextChild = kn.prevChildBucket;
+
+ this->n--;
+ // This is risky because the key we are returning points to this unalloc'ed memory,
+ // and we are assuming that the last key points to the last allocated
+ // bson region.
+ this->emptySize += sizeof(_KeyNode);
+ _unalloc(keysize);
+ }
+
+ /** add a key. must be > all existing. be careful to set next ptr right. */
+ template< class V >
+ bool BucketBasics<V>::_pushBack(const DiskLoc recordLoc, const Key& key, const Ordering &order, const DiskLoc prevChild) {
+ int bytesNeeded = key.dataSize() + sizeof(_KeyNode);
+ if ( bytesNeeded > this->emptySize )
+ return false;
+ assert( bytesNeeded <= this->emptySize );
+ if( this->n ) {
+ const KeyNode klast = keyNode(this->n-1);
+ if( klast.key.woCompare(key, order) > 0 ) {
+ log() << "btree bucket corrupt? consider reindexing or running validate command" << endl;
+ log() << " klast: " << keyNode(this->n-1).key.toString() << endl;
+ log() << " key: " << key.toString() << endl;
+ DEV klast.key.woCompare(key, order);
+ assert(false);
+ }
+ }
+ this->emptySize -= sizeof(_KeyNode);
+ _KeyNode& kn = k(this->n++);
+ kn.prevChildBucket = prevChild;
+ kn.recordLoc = recordLoc;
+ kn.setKeyDataOfs( (short) _alloc(key.dataSize()) );
+ short ofs = kn.keyDataOfs();
+ char *p = dataAt(ofs);
+ memcpy(p, key.data(), key.dataSize());
+
+ return true;
+ }
+
+ /* durability note
+ we do separate intent declarations herein. arguably one could just declare
+ the whole bucket given we do group commits. this is something we could investigate
+ later as to what is faster under what situations.
+ */
+ /** insert a key in a bucket with no complexity -- no splits required
+ @return false if a split is required.
+ */
+ template< class V >
+ bool BucketBasics<V>::basicInsert(const DiskLoc thisLoc, int &keypos, const DiskLoc recordLoc, const Key& key, const Ordering &order) const {
+ check( this->n < 1024 );
+ check( keypos >= 0 && keypos <= this->n );
+ int bytesNeeded = key.dataSize() + sizeof(_KeyNode);
+ if ( bytesNeeded > this->emptySize ) {
+ _pack(thisLoc, order, keypos);
+ if ( bytesNeeded > this->emptySize )
+ return false;
+ }
+
+ BucketBasics *b;
+ {
+ const char *p = (const char *) &k(keypos);
+ const char *q = (const char *) &k(this->n+1);
+ // declare that we will write to [k(keypos),k(n)]
+ // todo: this writes a medium amount to the journal. we may want to add a verb "shift" to the redo log so
+ // we can log a very small amount.
+ b = (BucketBasics*) getDur().writingAtOffset((void *) this, p-(char*)this, q-p);
+
+ // e.g. n==3, keypos==2
+ // 1 4 9
+ // ->
+ // 1 4 _ 9
+ for ( int j = this->n; j > keypos; j-- ) // make room
+ b->k(j) = b->k(j-1);
+ }
+
+ getDur().declareWriteIntent(&b->emptySize, sizeof(this->emptySize)+sizeof(this->topSize)+sizeof(this->n));
+ b->emptySize -= sizeof(_KeyNode);
+ b->n++;
+
+ // This _KeyNode was marked for writing above.
+ _KeyNode& kn = b->k(keypos);
+ kn.prevChildBucket.Null();
+ kn.recordLoc = recordLoc;
+ kn.setKeyDataOfs((short) b->_alloc(key.dataSize()) );
+ char *p = b->dataAt(kn.keyDataOfs());
+ getDur().declareWriteIntent(p, key.dataSize());
+ memcpy(p, key.data(), key.dataSize());
+ return true;
+ }
+
+ /**
+ * With this implementation, refPos == 0 disregards effect of refPos.
+ * index > 0 prevents creation of an empty bucket.
+ */
+ template< class V >
+ bool BucketBasics<V>::mayDropKey( int index, int refPos ) const {
+ return index > 0 && ( index != refPos ) && k( index ).isUnused() && k( index ).prevChildBucket.isNull();
+ }
+
+ template< class V >
+ int BucketBasics<V>::packedDataSize( int refPos ) const {
+ if ( this->flags & Packed ) {
+ return V::BucketSize - this->emptySize - headerSize();
+ }
+ int size = 0;
+ for( int j = 0; j < this->n; ++j ) {
+ if ( mayDropKey( j, refPos ) ) {
+ continue;
+ }
+ size += keyNode( j ).key.dataSize() + sizeof( _KeyNode );
+ }
+ return size;
+ }
+
+ /**
+ * when we delete things we just leave empty space until the node is
+ * full and then we repack it.
+ */
+ template< class V >
+ void BucketBasics<V>::_pack(const DiskLoc thisLoc, const Ordering &order, int &refPos) const {
+ if ( this->flags & Packed )
+ return;
+
+ VERIFYTHISLOC
+
+ /** TODO perhaps this can be optimized. for example if packing does no write, we can skip intent decl.
+ an empirical approach is probably best than just adding new code : perhaps the bucket would need
+ declaration anyway within the group commit interval, in which case we would just be adding
+ code and complexity without benefit.
+ */
+ thisLoc.btreemod<V>()->_packReadyForMod(order, refPos);
+ }
+
+ /** version when write intent already declared */
+ template< class V >
+ void BucketBasics<V>::_packReadyForMod( const Ordering &order, int &refPos ) {
+ assertWritable();
+
+ if ( this->flags & Packed )
+ return;
+
+ int tdz = totalDataSize();
+ char temp[V::BucketSize];
+ int ofs = tdz;
+ this->topSize = 0;
+ int i = 0;
+ for ( int j = 0; j < this->n; j++ ) {
+ if( mayDropKey( j, refPos ) ) {
+ continue; // key is unused and has no children - drop it
+ }
+ if( i != j ) {
+ if ( refPos == j ) {
+ refPos = i; // i < j so j will never be refPos again
+ }
+ k( i ) = k( j );
+ }
+ short ofsold = k(i).keyDataOfs();
+ int sz = keyNode(i).key.dataSize();
+ ofs -= sz;
+ this->topSize += sz;
+ memcpy(temp+ofs, dataAt(ofsold), sz);
+ k(i).setKeyDataOfsSavingUse( ofs );
+ ++i;
+ }
+ if ( refPos == this->n ) {
+ refPos = i;
+ }
+ this->n = i;
+ int dataUsed = tdz - ofs;
+ memcpy(this->data + ofs, temp + ofs, dataUsed);
+
+ // assertWritable();
+ // TEMP TEST getDur().declareWriteIntent(this, sizeof(*this));
+
+ this->emptySize = tdz - dataUsed - this->n * sizeof(_KeyNode);
+ {
+ int foo = this->emptySize;
+ assert( foo >= 0 );
+ }
+
+ setPacked();
+
+ assertValid( order );
+ }
+
+ template< class V >
+ inline void BucketBasics<V>::truncateTo(int N, const Ordering &order, int &refPos) {
+ d.dbMutex.assertWriteLocked();
+ assertWritable();
+
+ this->n = N;
+ setNotPacked();
+ _packReadyForMod( order, refPos );
+ }
+
+ /**
+ * In the standard btree algorithm, we would split based on the
+ * existing keys _and_ the new key. But that's more work to
+ * implement, so we split the existing keys and then add the new key.
+ *
+ * There are several published heuristic algorithms for doing splits,
+ * but basically what you want are (1) even balancing between the two
+ * sides and (2) a small split key so the parent can have a larger
+ * branching factor.
+ *
+ * We just have a simple algorithm right now: if a key includes the
+ * halfway point (or 10% way point) in terms of bytes, split on that key;
+ * otherwise split on the key immediately to the left of the halfway
+ * point (or 10% point).
+ *
+ * This function is expected to be called on a packed bucket.
+ */
+ template< class V >
+ int BucketBasics<V>::splitPos( int keypos ) const {
+ assert( this->n > 2 );
+ int split = 0;
+ int rightSize = 0;
+ // when splitting a btree node, if the new key is greater than all the other keys, we should not do an even split, but a 90/10 split.
+ // see SERVER-983
+ // TODO I think we only want to do the 90% split on the rhs node of the tree.
+ int rightSizeLimit = ( this->topSize + sizeof( _KeyNode ) * this->n ) / ( keypos == this->n ? 10 : 2 );
+ for( int i = this->n - 1; i > -1; --i ) {
+ rightSize += keyNode( i ).key.dataSize() + sizeof( _KeyNode );
+ if ( rightSize > rightSizeLimit ) {
+ split = i;
+ break;
+ }
+ }
+ // safeguards - we must not create an empty bucket
+ if ( split < 1 ) {
+ split = 1;
+ }
+ else if ( split > this->n - 2 ) {
+ split = this->n - 2;
+ }
+
+ return split;
+ }
+
+ template< class V >
+ void BucketBasics<V>::reserveKeysFront( int nAdd ) {
+ assert( this->emptySize >= int( sizeof( _KeyNode ) * nAdd ) );
+ this->emptySize -= sizeof( _KeyNode ) * nAdd;
+ for( int i = this->n - 1; i > -1; --i ) {
+ k( i + nAdd ) = k( i );
+ }
+ this->n += nAdd;
+ }
+
+ template< class V >
+ void BucketBasics<V>::setKey( int i, const DiskLoc recordLoc, const Key &key, const DiskLoc prevChildBucket ) {
+ _KeyNode &kn = k( i );
+ kn.recordLoc = recordLoc;
+ kn.prevChildBucket = prevChildBucket;
+ short ofs = (short) _alloc( key.dataSize() );
+ kn.setKeyDataOfs( ofs );
+ char *p = dataAt( ofs );
+ memcpy( p, key.data(), key.dataSize() );
+ }
+
+ template< class V >
+ void BucketBasics<V>::dropFront( int nDrop, const Ordering &order, int &refpos ) {
+ for( int i = nDrop; i < this->n; ++i ) {
+ k( i - nDrop ) = k( i );
+ }
+ this->n -= nDrop;
+ setNotPacked();
+ _packReadyForMod( order, refpos );
+ }
+
+ /* - BtreeBucket --------------------------------------------------- */
+
+ /** @return largest key in the subtree. */
+ template< class V >
+ void BtreeBucket<V>::findLargestKey(const DiskLoc& thisLoc, DiskLoc& largestLoc, int& largestKey) {
+ DiskLoc loc = thisLoc;
+ while ( 1 ) {
+ const BtreeBucket *b = loc.btree<V>();
+ if ( !b->nextChild.isNull() ) {
+ loc = b->nextChild;
+ continue;
+ }
+
+ assert(b->n>0);
+ largestLoc = loc;
+ largestKey = b->n-1;
+
+ break;
+ }
+ }
+
+ /**
+ * NOTE Currently the Ordering implementation assumes a compound index will
+ * not have more keys than an unsigned variable has bits. The same
+ * assumption is used in the implementation below with respect to the 'mask'
+ * variable.
+ *
+ * @param l a regular bsonobj
+ * @param rBegin composed partly of an existing bsonobj, and the remaining keys are taken from a vector of elements that frequently changes
+ *
+ * see
+ * jstests/index_check6.js
+ * https://jira.mongodb.org/browse/SERVER-371
+ */
+ /* static */
+ template< class V >
+ int BtreeBucket<V>::customBSONCmp( const BSONObj &l, const BSONObj &rBegin, int rBeginLen, bool rSup, const vector< const BSONElement * > &rEnd, const vector< bool > &rEndInclusive, const Ordering &o, int direction ) {
+ BSONObjIterator ll( l );
+ BSONObjIterator rr( rBegin );
+ vector< const BSONElement * >::const_iterator rr2 = rEnd.begin();
+ vector< bool >::const_iterator inc = rEndInclusive.begin();
+ unsigned mask = 1;
+ for( int i = 0; i < rBeginLen; ++i, mask <<= 1 ) {
+ BSONElement lll = ll.next();
+ BSONElement rrr = rr.next();
+ ++rr2;
+ ++inc;
+
+ int x = lll.woCompare( rrr, false );
+ if ( o.descending( mask ) )
+ x = -x;
+ if ( x != 0 )
+ return x;
+ }
+ if ( rSup ) {
+ return -direction;
+ }
+ for( ; ll.more(); mask <<= 1 ) {
+ BSONElement lll = ll.next();
+ BSONElement rrr = **rr2;
+ ++rr2;
+ int x = lll.woCompare( rrr, false );
+ if ( o.descending( mask ) )
+ x = -x;
+ if ( x != 0 )
+ return x;
+ if ( !*inc ) {
+ return -direction;
+ }
+ ++inc;
+ }
+ return 0;
+ }
+
+ template< class V >
+ bool BtreeBucket<V>::exists(const IndexDetails& idx, const DiskLoc &thisLoc, const Key& key, const Ordering& order) const {
+ int pos;
+ bool found;
+ DiskLoc b = locate(idx, thisLoc, key, order, pos, found, minDiskLoc);
+
+ // skip unused keys
+ while ( 1 ) {
+ if( b.isNull() )
+ break;
+ const BtreeBucket *bucket = b.btree<V>();
+ const _KeyNode& kn = bucket->k(pos);
+ if ( kn.isUsed() )
+ return bucket->keyAt(pos).woEqual(key);
+ b = bucket->advance(b, pos, 1, "BtreeBucket<V>::exists");
+ }
+ return false;
+ }
+
+ template< class V >
+ bool BtreeBucket<V>::wouldCreateDup(
+ const IndexDetails& idx, const DiskLoc &thisLoc,
+ const Key& key, const Ordering& order,
+ const DiskLoc &self) const {
+ int pos;
+ bool found;
+ DiskLoc b = locate(idx, thisLoc, key, order, pos, found, minDiskLoc);
+
+ while ( !b.isNull() ) {
+ // we skip unused keys
+ const BtreeBucket *bucket = b.btree<V>();
+ const _KeyNode& kn = bucket->k(pos);
+ if ( kn.isUsed() ) {
+ if( bucket->keyAt(pos).woEqual(key) )
+ return kn.recordLoc != self;
+ break;
+ }
+ b = bucket->advance(b, pos, 1, "BtreeBucket<V>::dupCheck");
+ }
+
+ return false;
+ }
+
+ template< class V >
+ string BtreeBucket<V>::dupKeyError( const IndexDetails& idx , const Key& key ) {
+ stringstream ss;
+ ss << "E11000 duplicate key error ";
+ ss << "index: " << idx.indexNamespace() << " ";
+ ss << "dup key: " << key.toString();
+ return ss.str();
+ }
+
+ /**
+ * Find a key withing this btree bucket.
+ *
+ * When duplicate keys are allowed, we use the DiskLoc of the record as if it were part of the
+ * key. That assures that even when there are many duplicates (e.g., 1 million) for a key,
+ * our performance is still good.
+ *
+ * assertIfDup: if the key exists (ignoring the recordLoc), uassert
+ *
+ * pos: for existing keys k0...kn-1.
+ * returns # it goes BEFORE. so key[pos-1] < key < key[pos]
+ * returns n if it goes after the last existing key.
+ * note result might be an Unused location!
+ */
+
+ bool guessIncreasing = false;
+ template< class V >
+ bool BtreeBucket<V>::find(const IndexDetails& idx, const Key& key, const DiskLoc &rl,
+ const Ordering &order, int& pos, bool assertIfDup) const {
+ Loc recordLoc;
+ recordLoc = rl;
+ globalIndexCounters.btree( (char*)this );
+
+ // binary search for this key
+ bool dupsChecked = false;
+ int l=0;
+ int h=this->n-1;
+ int m = (l+h)/2;
+ if( guessIncreasing ) {
+ m = h;
+ }
+ while ( l <= h ) {
+ KeyNode M = this->keyNode(m);
+ int x = key.woCompare(M.key, order);
+ if ( x == 0 ) {
+ if( assertIfDup ) {
+ if( k(m).isUnused() ) {
+ // ok that key is there if unused. but we need to check that there aren't other
+ // entries for the key then. as it is very rare that we get here, we don't put any
+ // coding effort in here to make this particularly fast
+ if( !dupsChecked ) {
+ dupsChecked = true;
+ if( idx.head.btree<V>()->exists(idx, idx.head, key, order) ) {
+ if( idx.head.btree<V>()->wouldCreateDup(idx, idx.head, key, order, recordLoc) )
+ uasserted( ASSERT_ID_DUPKEY , dupKeyError( idx , key ) );
+ else
+ alreadyInIndex();
+ }
+ }
+ }
+ else {
+ if( M.recordLoc == recordLoc )
+ alreadyInIndex();
+ uasserted( ASSERT_ID_DUPKEY , dupKeyError( idx , key ) );
+ }
+ }
+
+ // dup keys allowed. use recordLoc as if it is part of the key
+ Loc unusedRL = M.recordLoc;
+ unusedRL.GETOFS() &= ~1; // so we can test equality without the used bit messing us up
+ x = recordLoc.compare(unusedRL);
+ }
+ if ( x < 0 ) // key < M.key
+ h = m-1;
+ else if ( x > 0 )
+ l = m+1;
+ else {
+ // found it.
+ pos = m;
+ return true;
+ }
+ m = (l+h)/2;
+ }
+ // not found
+ pos = l;
+ if ( pos != this->n ) {
+ Key keyatpos = keyNode(pos).key;
+ wassert( key.woCompare(keyatpos, order) <= 0 );
+ if ( pos > 0 ) {
+ if( !( keyNode(pos-1).key.woCompare(key, order) <= 0 ) ) {
+ DEV {
+ log() << key.toString() << endl;
+ log() << keyNode(pos-1).key.toString() << endl;
+ }
+ wassert(false);
+ }
+ }
+ }
+
+ return false;
+ }
+
+ template< class V >
+ void BtreeBucket<V>::delBucket(const DiskLoc thisLoc, const IndexDetails& id) {
+ ClientCursor::informAboutToDeleteBucket(thisLoc); // slow...
+ assert( !isHead() );
+
+ DiskLoc ll = this->parent;
+ const BtreeBucket *p = ll.btree<V>();
+ int parentIdx = indexInParent( thisLoc );
+ p->childForPos( parentIdx ).writing().Null();
+ deallocBucket( thisLoc, id );
+ }
+
+ template< class V >
+ void BtreeBucket<V>::deallocBucket(const DiskLoc thisLoc, const IndexDetails &id) {
+#if 0
+ // as a temporary defensive measure, we zap the whole bucket, AND don't truly delete
+ // it (meaning it is ineligible for reuse).
+ memset(this, 0, Size());
+#else
+ // defensive:
+ this->n = -1;
+ this->parent.Null();
+ string ns = id.indexNamespace();
+ theDataFileMgr._deleteRecord(nsdetails(ns.c_str()), ns.c_str(), thisLoc.rec(), thisLoc);
+#endif
+ }
+
+ /** note: may delete the entire bucket! this invalid upon return sometimes. */
+ template< class V >
+ void BtreeBucket<V>::delKeyAtPos( const DiskLoc thisLoc, IndexDetails& id, int p, const Ordering &order) {
+ assert(this->n>0);
+ DiskLoc left = this->childForPos(p);
+
+ if ( this->n == 1 ) {
+ if ( left.isNull() && this->nextChild.isNull() ) {
+ this->_delKeyAtPos(p);
+ if ( isHead() ) {
+ // we don't delete the top bucket ever
+ }
+ else {
+ if ( !mayBalanceWithNeighbors( thisLoc, id, order ) ) {
+ // An empty bucket is only allowed as a transient state. If
+ // there are no neighbors to balance with, we delete ourself.
+ // This condition is only expected in legacy btrees.
+ delBucket(thisLoc, id);
+ }
+ }
+ return;
+ }
+ deleteInternalKey( thisLoc, p, id, order );
+ return;
+ }
+
+ if ( left.isNull() ) {
+ this->_delKeyAtPos(p);
+ mayBalanceWithNeighbors( thisLoc, id, order );
+ }
+ else {
+ deleteInternalKey( thisLoc, p, id, order );
+ }
+ }
+
+ /**
+ * This function replaces the specified key (k) by either the prev or next
+ * key in the btree (k'). We require that k have either a left or right
+ * child. If k has a left child, we set k' to the prev key of k, which must
+ * be a leaf present in the left child. If k does not have a left child, we
+ * set k' to the next key of k, which must be a leaf present in the right
+ * child. When we replace k with k', we copy k' over k (which may cause a
+ * split) and then remove k' from its original location. Because k' is
+ * stored in a descendent of k, replacing k by k' will not modify the
+ * storage location of the original k', and we can easily remove k' from
+ * its original location.
+ *
+ * This function is only needed in cases where k has a left or right child;
+ * in other cases a simpler key removal implementation is possible.
+ *
+ * NOTE on legacy btree structures:
+ * In legacy btrees, k' can be a nonleaf. In such a case we 'delete' k by
+ * marking it as an unused node rather than replacing it with k'. Also, k'
+ * may be a leaf but marked as an unused node. In such a case we replace
+ * k by k', preserving the key's unused marking. This function is only
+ * expected to mark a key as unused when handling a legacy btree.
+ */
+ template< class V >
+ void BtreeBucket<V>::deleteInternalKey( const DiskLoc thisLoc, int keypos, IndexDetails &id, const Ordering &order ) {
+ DiskLoc lchild = this->childForPos( keypos );
+ DiskLoc rchild = this->childForPos( keypos + 1 );
+ assert( !lchild.isNull() || !rchild.isNull() );
+ int advanceDirection = lchild.isNull() ? 1 : -1;
+ int advanceKeyOfs = keypos;
+ DiskLoc advanceLoc = advance( thisLoc, advanceKeyOfs, advanceDirection, __FUNCTION__ );
+ // advanceLoc must be a descentant of thisLoc, because thisLoc has a
+ // child in the proper direction and all descendants of thisLoc must be
+ // nonempty because they are not the root.
+
+ if ( !advanceLoc.btree<V>()->childForPos( advanceKeyOfs ).isNull() ||
+ !advanceLoc.btree<V>()->childForPos( advanceKeyOfs + 1 ).isNull() ) {
+ // only expected with legacy btrees, see note above
+ this->markUnused( keypos );
+ return;
+ }
+
+ KeyNode kn = advanceLoc.btree<V>()->keyNode( advanceKeyOfs );
+ // Because advanceLoc is a descendant of thisLoc, updating thisLoc will
+ // not affect packing or keys of advanceLoc and kn will be stable
+ // during the following setInternalKey()
+ setInternalKey( thisLoc, keypos, kn.recordLoc, kn.key, order, this->childForPos( keypos ), this->childForPos( keypos + 1 ), id );
+ advanceLoc.btreemod<V>()->delKeyAtPos( advanceLoc, id, advanceKeyOfs, order );
+ }
+
+//#define BTREE(loc) (static_cast<DiskLoc>(loc).btree<V>())
+#define BTREE(loc) (loc.template btree<V>())
+//#define BTREEMOD(loc) (static_cast<DiskLoc>(loc).btreemod<V>())
+#define BTREEMOD(loc) (loc.template btreemod<V>())
+
+ template< class V >
+ void BtreeBucket<V>::replaceWithNextChild( const DiskLoc thisLoc, IndexDetails &id ) {
+ assert( this->n == 0 && !this->nextChild.isNull() );
+ if ( this->parent.isNull() ) {
+ assert( id.head == thisLoc );
+ id.head.writing() = this->nextChild;
+ }
+ else {
+ DiskLoc ll = this->parent;
+ ll.btree<V>()->childForPos( indexInParent( thisLoc ) ).writing() = this->nextChild;
+ }
+ BTREE(this->nextChild)->parent.writing() = this->parent;
+ ClientCursor::informAboutToDeleteBucket( thisLoc );
+ deallocBucket( thisLoc, id );
+ }
+
+ template< class V >
+ bool BtreeBucket<V>::canMergeChildren( const DiskLoc &thisLoc, int leftIndex ) const {
+ assert( leftIndex >= 0 && leftIndex < this->n );
+ DiskLoc leftNodeLoc = this->childForPos( leftIndex );
+ DiskLoc rightNodeLoc = this->childForPos( leftIndex + 1 );
+ if ( leftNodeLoc.isNull() || rightNodeLoc.isNull() ) {
+ // TODO if this situation is possible in long term implementation, maybe we should compact somehow anyway
+ return false;
+ }
+ int pos = 0;
+ {
+ const BtreeBucket *l = leftNodeLoc.btree<V>();
+ const BtreeBucket *r = rightNodeLoc.btree<V>();
+ if ( ( this->headerSize() + l->packedDataSize( pos ) + r->packedDataSize( pos ) + keyNode( leftIndex ).key.dataSize() + sizeof(_KeyNode) > unsigned( V::BucketSize ) ) ) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * This implementation must respect the meaning and value of lowWaterMark.
+ * Also see comments in splitPos().
+ */
+ template< class V >
+ int BtreeBucket<V>::rebalancedSeparatorPos( const DiskLoc &thisLoc, int leftIndex ) const {
+ int split = -1;
+ int rightSize = 0;
+ const BtreeBucket *l = BTREE(this->childForPos( leftIndex ));
+ const BtreeBucket *r = BTREE(this->childForPos( leftIndex + 1 ));
+
+ int KNS = sizeof( _KeyNode );
+ int rightSizeLimit = ( l->topSize + l->n * KNS + keyNode( leftIndex ).key.dataSize() + KNS + r->topSize + r->n * KNS ) / 2;
+ // This constraint should be ensured by only calling this function
+ // if we go below the low water mark.
+ assert( rightSizeLimit < BtreeBucket<V>::bodySize() );
+ for( int i = r->n - 1; i > -1; --i ) {
+ rightSize += r->keyNode( i ).key.dataSize() + KNS;
+ if ( rightSize > rightSizeLimit ) {
+ split = l->n + 1 + i;
+ break;
+ }
+ }
+ if ( split == -1 ) {
+ rightSize += keyNode( leftIndex ).key.dataSize() + KNS;
+ if ( rightSize > rightSizeLimit ) {
+ split = l->n;
+ }
+ }
+ if ( split == -1 ) {
+ for( int i = l->n - 1; i > -1; --i ) {
+ rightSize += l->keyNode( i ).key.dataSize() + KNS;
+ if ( rightSize > rightSizeLimit ) {
+ split = i;
+ break;
+ }
+ }
+ }
+ // safeguards - we must not create an empty bucket
+ if ( split < 1 ) {
+ split = 1;
+ }
+ else if ( split > l->n + 1 + r->n - 2 ) {
+ split = l->n + 1 + r->n - 2;
+ }
+
+ return split;
+ }
+
+ template< class V >
+ void BtreeBucket<V>::doMergeChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order ) {
+ DiskLoc leftNodeLoc = this->childForPos( leftIndex );
+ DiskLoc rightNodeLoc = this->childForPos( leftIndex + 1 );
+ BtreeBucket *l = leftNodeLoc.btreemod<V>();
+ BtreeBucket *r = rightNodeLoc.btreemod<V>();
+ int pos = 0;
+ l->_packReadyForMod( order, pos );
+ r->_packReadyForMod( order, pos ); // pack r in case there are droppable keys
+
+ // We know the additional keys below will fit in l because canMergeChildren()
+ // must be true.
+ int oldLNum = l->n;
+ {
+ KeyNode kn = keyNode( leftIndex );
+ l->pushBack( kn.recordLoc, kn.key, order, l->nextChild ); // left child's right child becomes old parent key's left child
+ }
+ for( int i = 0; i < r->n; ++i ) {
+ KeyNode kn = r->keyNode( i );
+ l->pushBack( kn.recordLoc, kn.key, order, kn.prevChildBucket );
+ }
+ l->nextChild = r->nextChild;
+ l->fixParentPtrs( leftNodeLoc, oldLNum );
+ r->delBucket( rightNodeLoc, id );
+ this->childForPos( leftIndex + 1 ) = leftNodeLoc;
+ this->childForPos( leftIndex ) = DiskLoc();
+ this->_delKeyAtPos( leftIndex, true );
+ if ( this->n == 0 ) {
+ // will trash this and thisLoc
+ // TODO To ensure all leaves are of equal height, we should ensure
+ // this is only called on the root.
+ replaceWithNextChild( thisLoc, id );
+ }
+ else {
+ // balance recursively - maybe we should do this even when n == 0?
+ mayBalanceWithNeighbors( thisLoc, id, order );
+ }
+ }
+
+ template< class V >
+ int BtreeBucket<V>::indexInParent( const DiskLoc &thisLoc ) const {
+ assert( !this->parent.isNull() );
+ const BtreeBucket *p = BTREE(this->parent);
+ if ( p->nextChild == thisLoc ) {
+ return p->n;
+ }
+ else {
+ for( int i = 0; i < p->n; ++i ) {
+ if ( p->k( i ).prevChildBucket == thisLoc ) {
+ return i;
+ }
+ }
+ }
+ out() << "ERROR: can't find ref to child bucket.\n";
+ out() << "child: " << thisLoc << "\n";
+ dump();
+ out() << "Parent: " << this->parent << "\n";
+ p->dump();
+ assert(false);
+ return -1; // just to compile
+ }
+
+ template< class V >
+ bool BtreeBucket<V>::tryBalanceChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order ) const {
+ // If we can merge, then we must merge rather than balance to preserve
+ // bucket utilization constraints.
+ if ( canMergeChildren( thisLoc, leftIndex ) ) {
+ return false;
+ }
+ thisLoc.btreemod<V>()->doBalanceChildren( thisLoc, leftIndex, id, order );
+ return true;
+ }
+
+ template< class V >
+ void BtreeBucket<V>::doBalanceLeftToRight( const DiskLoc thisLoc, int leftIndex, int split,
+ BtreeBucket *l, const DiskLoc lchild,
+ BtreeBucket *r, const DiskLoc rchild,
+ IndexDetails &id, const Ordering &order ) {
+ // TODO maybe do some audits the same way pushBack() does?
+ // As a precondition, rchild + the old separator are <= half a body size,
+ // and lchild is at most completely full. Based on the value of split,
+ // rchild will get <= half of the total bytes which is at most 75%
+ // of a full body. So rchild will have room for the following keys:
+ int rAdd = l->n - split;
+ r->reserveKeysFront( rAdd );
+ for( int i = split + 1, j = 0; i < l->n; ++i, ++j ) {
+ KeyNode kn = l->keyNode( i );
+ r->setKey( j, kn.recordLoc, kn.key, kn.prevChildBucket );
+ }
+ {
+ KeyNode kn = keyNode( leftIndex );
+ r->setKey( rAdd - 1, kn.recordLoc, kn.key, l->nextChild ); // left child's right child becomes old parent key's left child
+ }
+ r->fixParentPtrs( rchild, 0, rAdd - 1 );
+ {
+ KeyNode kn = l->keyNode( split );
+ l->nextChild = kn.prevChildBucket;
+ // Because lchild is a descendant of thisLoc, updating thisLoc will
+ // not affect packing or keys of lchild and kn will be stable
+ // during the following setInternalKey()
+ setInternalKey( thisLoc, leftIndex, kn.recordLoc, kn.key, order, lchild, rchild, id );
+ }
+ int zeropos = 0;
+ // lchild and rchild cannot be merged, so there must be >0 (actually more)
+ // keys to the left of split.
+ l->truncateTo( split, order, zeropos );
+ }
+
+ template< class V >
+ void BtreeBucket<V>::doBalanceRightToLeft( const DiskLoc thisLoc, int leftIndex, int split,
+ BtreeBucket *l, const DiskLoc lchild,
+ BtreeBucket *r, const DiskLoc rchild,
+ IndexDetails &id, const Ordering &order ) {
+ // As a precondition, lchild + the old separator are <= half a body size,
+ // and rchild is at most completely full. Based on the value of split,
+ // lchild will get less than half of the total bytes which is at most 75%
+ // of a full body. So lchild will have room for the following keys:
+ int lN = l->n;
+ {
+ KeyNode kn = keyNode( leftIndex );
+ l->pushBack( kn.recordLoc, kn.key, order, l->nextChild ); // left child's right child becomes old parent key's left child
+ }
+ for( int i = 0; i < split - lN - 1; ++i ) {
+ KeyNode kn = r->keyNode( i );
+ l->pushBack( kn.recordLoc, kn.key, order, kn.prevChildBucket );
+ }
+ {
+ KeyNode kn = r->keyNode( split - lN - 1 );
+ l->nextChild = kn.prevChildBucket;
+ // Child lN was lchild's old nextChild, and don't need to fix that one.
+ l->fixParentPtrs( lchild, lN + 1, l->n );
+ // Because rchild is a descendant of thisLoc, updating thisLoc will
+ // not affect packing or keys of rchild and kn will be stable
+ // during the following setInternalKey()
+ setInternalKey( thisLoc, leftIndex, kn.recordLoc, kn.key, order, lchild, rchild, id );
+ }
+ int zeropos = 0;
+ // lchild and rchild cannot be merged, so there must be >0 (actually more)
+ // keys to the right of split.
+ r->dropFront( split - lN, order, zeropos );
+ }
+
+ template< class V >
+ void BtreeBucket<V>::doBalanceChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order ) {
+ DiskLoc lchild = this->childForPos( leftIndex );
+ DiskLoc rchild = this->childForPos( leftIndex + 1 );
+ int zeropos = 0;
+ BtreeBucket *l = lchild.btreemod<V>();
+ l->_packReadyForMod( order, zeropos );
+ BtreeBucket *r = rchild.btreemod<V>();
+ r->_packReadyForMod( order, zeropos );
+ int split = rebalancedSeparatorPos( thisLoc, leftIndex );
+
+ // By definition, if we are below the low water mark and cannot merge
+ // then we must actively balance.
+ assert( split != l->n );
+ if ( split < l->n ) {
+ doBalanceLeftToRight( thisLoc, leftIndex, split, l, lchild, r, rchild, id, order );
+ }
+ else {
+ doBalanceRightToLeft( thisLoc, leftIndex, split, l, lchild, r, rchild, id, order );
+ }
+ }
+
+ template< class V >
+ bool BtreeBucket<V>::mayBalanceWithNeighbors( const DiskLoc thisLoc, IndexDetails &id, const Ordering &order ) const {
+ if ( this->parent.isNull() ) { // we are root, there are no neighbors
+ return false;
+ }
+
+ if ( this->packedDataSize( 0 ) >= this->lowWaterMark() ) {
+ return false;
+ }
+
+ const BtreeBucket *p = BTREE(this->parent);
+ int parentIdx = indexInParent( thisLoc );
+
+ // TODO will missing neighbor case be possible long term? Should we try to merge/balance somehow in that case if so?
+ bool mayBalanceRight = ( ( parentIdx < p->n ) && !p->childForPos( parentIdx + 1 ).isNull() );
+ bool mayBalanceLeft = ( ( parentIdx > 0 ) && !p->childForPos( parentIdx - 1 ).isNull() );
+
+ // Balance if possible on one side - we merge only if absolutely necessary
+ // to preserve btree bucket utilization constraints since that's a more
+ // heavy duty operation (especially if we must re-split later).
+ if ( mayBalanceRight &&
+ p->tryBalanceChildren( this->parent, parentIdx, id, order ) ) {
+ return true;
+ }
+ if ( mayBalanceLeft &&
+ p->tryBalanceChildren( this->parent, parentIdx - 1, id, order ) ) {
+ return true;
+ }
+
+ BtreeBucket *pm = BTREEMOD(this->parent);
+ if ( mayBalanceRight ) {
+ pm->doMergeChildren( this->parent, parentIdx, id, order );
+ return true;
+ }
+ else if ( mayBalanceLeft ) {
+ pm->doMergeChildren( this->parent, parentIdx - 1, id, order );
+ return true;
+ }
+
+ return false;
+ }
+
+ /** remove a key from the index */
+ template< class V >
+ bool BtreeBucket<V>::unindex(const DiskLoc thisLoc, IndexDetails& id, const BSONObj& key, const DiskLoc recordLoc ) const {
+ int pos;
+ bool found;
+ const Ordering ord = Ordering::make(id.keyPattern());
+ DiskLoc loc = locate(id, thisLoc, key, ord, pos, found, recordLoc, 1);
+ if ( found ) {
+ if ( key.objsize() > this->KeyMax ) {
+ OCCASIONALLY problem() << "unindex: key too large to index but was found for " << id.indexNamespace() << " reIndex suggested" << endl;
+ }
+ loc.btreemod<V>()->delKeyAtPos(loc, id, pos, ord);
+ return true;
+ }
+ return false;
+ }
+
+ template< class V >
+ inline void BtreeBucket<V>::fix(const DiskLoc thisLoc, const DiskLoc child) {
+ if ( !child.isNull() ) {
+ if ( insert_debug )
+ out() << " fix " << child.toString() << ".parent=" << thisLoc.toString() << endl;
+ child.btree<V>()->parent.writing() = thisLoc;
+ }
+ }
+
+ /**
+ * This can cause a lot of additional page writes when we assign buckets to
+ * different parents. Maybe get rid of parent ptrs?
+ */
+ template< class V >
+ void BtreeBucket<V>::fixParentPtrs(const DiskLoc thisLoc, int firstIndex, int lastIndex) const {
+ VERIFYTHISLOC
+ if ( lastIndex == -1 ) {
+ lastIndex = this->n;
+ }
+ for ( int i = firstIndex; i <= lastIndex; i++ ) {
+ fix(thisLoc, this->childForPos(i));
+ }
+ }
+
+ template< class V >
+ void BtreeBucket<V>::setInternalKey( const DiskLoc thisLoc, int keypos,
+ const DiskLoc recordLoc, const Key &key, const Ordering &order,
+ const DiskLoc lchild, const DiskLoc rchild, IndexDetails &idx ) {
+ this->childForPos( keypos ).Null();
+
+ // This may leave the bucket empty (n == 0) which is ok only as a
+ // transient state. In the instant case, the implementation of
+ // insertHere behaves correctly when n == 0 and as a side effect
+ // increments n.
+ this->_delKeyAtPos( keypos, true );
+
+ // Ensure we do not orphan neighbor's old child.
+ assert( this->childForPos( keypos ) == rchild );
+
+ // Just set temporarily - required to pass validation in insertHere()
+ this->childForPos( keypos ) = lchild;
+
+ insertHere( thisLoc, keypos, recordLoc, key, order, lchild, rchild, idx );
+ }
+
+ /**
+ * insert a key in this bucket, splitting if necessary.
+ * @keypos - where to insert the key in range 0..n. 0=make leftmost, n=make rightmost.
+ * NOTE this function may free some data, and as a result the value passed for keypos may
+ * be invalid after calling insertHere()
+ *
+ * Some of the write intent signaling below relies on the implementation of
+ * the optimized write intent code in basicInsert().
+ */
+ template< class V >
+ void BtreeBucket<V>::insertHere( const DiskLoc thisLoc, int keypos,
+ const DiskLoc recordLoc, const Key& key, const Ordering& order,
+ const DiskLoc lchild, const DiskLoc rchild, IndexDetails& idx) const {
+ if ( insert_debug )
+ out() << " " << thisLoc.toString() << ".insertHere " << key.toString() << '/' << recordLoc.toString() << ' '
+ << lchild.toString() << ' ' << rchild.toString() << " keypos:" << keypos << endl;
+
+ if ( !this->basicInsert(thisLoc, keypos, recordLoc, key, order) ) {
+ // If basicInsert() fails, the bucket will be packed as required by split().
+ thisLoc.btreemod<V>()->split(thisLoc, keypos, recordLoc, key, order, lchild, rchild, idx);
+ return;
+ }
+
+ {
+ const _KeyNode *_kn = &k(keypos);
+ _KeyNode *kn = (_KeyNode *) getDur().alreadyDeclared((_KeyNode*) _kn); // already declared intent in basicInsert()
+ if ( keypos+1 == this->n ) { // last key
+ if ( this->nextChild != lchild ) {
+ out() << "ERROR nextChild != lchild" << endl;
+ out() << " thisLoc: " << thisLoc.toString() << ' ' << idx.indexNamespace() << endl;
+ out() << " keyPos: " << keypos << " n:" << this->n << endl;
+ out() << " nextChild: " << this->nextChild.toString() << " lchild: " << lchild.toString() << endl;
+ out() << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
+ out() << " key: " << key.toString() << endl;
+ dump();
+ assert(false);
+ }
+ kn->prevChildBucket = this->nextChild;
+ assert( kn->prevChildBucket == lchild );
+ this->nextChild.writing() = rchild;
+ if ( !rchild.isNull() )
+ BTREE(rchild)->parent.writing() = thisLoc;
+ }
+ else {
+ kn->prevChildBucket = lchild;
+ if ( k(keypos+1).prevChildBucket != lchild ) {
+ out() << "ERROR k(keypos+1).prevChildBucket != lchild" << endl;
+ out() << " thisLoc: " << thisLoc.toString() << ' ' << idx.indexNamespace() << endl;
+ out() << " keyPos: " << keypos << " n:" << this->n << endl;
+ out() << " k(keypos+1).pcb: " << k(keypos+1).prevChildBucket.toString() << " lchild: " << lchild.toString() << endl;
+ out() << " recordLoc: " << recordLoc.toString() << " rchild: " << rchild.toString() << endl;
+ out() << " key: " << key.toString() << endl;
+ dump();
+ assert(false);
+ }
+ const Loc *pc = &k(keypos+1).prevChildBucket;
+ *getDur().alreadyDeclared( const_cast<Loc*>(pc) ) = rchild; // declared in basicInsert()
+ if ( !rchild.isNull() )
+ rchild.btree<V>()->parent.writing() = thisLoc;
+ }
+ return;
+ }
+ }
+
+ template< class V >
+ void BtreeBucket<V>::split(const DiskLoc thisLoc, int keypos, const DiskLoc recordLoc, const Key& key, const Ordering& order, const DiskLoc lchild, const DiskLoc rchild, IndexDetails& idx) {
+ this->assertWritable();
+
+ if ( split_debug )
+ out() << " " << thisLoc.toString() << ".split" << endl;
+
+ int split = this->splitPos( keypos );
+ DiskLoc rLoc = addBucket(idx);
+ BtreeBucket *r = rLoc.btreemod<V>();
+ if ( split_debug )
+ out() << " split:" << split << ' ' << keyNode(split).key.toString() << " n:" << this->n << endl;
+ for ( int i = split+1; i < this->n; i++ ) {
+ KeyNode kn = keyNode(i);
+ r->pushBack(kn.recordLoc, kn.key, order, kn.prevChildBucket);
+ }
+ r->nextChild = this->nextChild;
+ r->assertValid( order );
+
+ if ( split_debug )
+ out() << " new rLoc:" << rLoc.toString() << endl;
+ r = 0;
+ rLoc.btree<V>()->fixParentPtrs(rLoc);
+
+ {
+ KeyNode splitkey = keyNode(split);
+ this->nextChild = splitkey.prevChildBucket; // splitkey key gets promoted, its children will be thisLoc (l) and rLoc (r)
+ if ( split_debug ) {
+ out() << " splitkey key:" << splitkey.key.toString() << endl;
+ }
+
+ // Because thisLoc is a descendant of parent, updating parent will
+ // not affect packing or keys of thisLoc and splitkey will be stable
+ // during the following:
+
+ // promote splitkey to a parent this->node
+ if ( this->parent.isNull() ) {
+ // make a new parent if we were the root
+ DiskLoc L = addBucket(idx);
+ BtreeBucket *p = L.btreemod<V>();
+ p->pushBack(splitkey.recordLoc, splitkey.key, order, thisLoc);
+ p->nextChild = rLoc;
+ p->assertValid( order );
+ this->parent = idx.head.writing() = L;
+ if ( split_debug )
+ out() << " we were root, making new root:" << hex << this->parent.getOfs() << dec << endl;
+ rLoc.btree<V>()->parent.writing() = this->parent;
+ }
+ else {
+ // set this before calling _insert - if it splits it will do fixParent() logic and change the value.
+ rLoc.btree<V>()->parent.writing() = this->parent;
+ if ( split_debug )
+ out() << " promoting splitkey key " << splitkey.key.toString() << endl;
+ BTREE(this->parent)->_insert(this->parent, splitkey.recordLoc, splitkey.key, order, /*dupsallowed*/true, thisLoc, rLoc, idx);
+ }
+ }
+
+ int newpos = keypos;
+ // note this may trash splitkey.key. thus we had to promote it before finishing up here.
+ this->truncateTo(split, order, newpos);
+
+ // add our this->new key, there is room this->now
+ {
+ if ( keypos <= split ) {
+ if ( split_debug )
+ out() << " keypos<split, insertHere() the new key" << endl;
+ insertHere(thisLoc, newpos, recordLoc, key, order, lchild, rchild, idx);
+ }
+ else {
+ int kp = keypos-split-1;
+ assert(kp>=0);
+ BTREE(rLoc)->insertHere(rLoc, kp, recordLoc, key, order, lchild, rchild, idx);
+ }
+ }
+
+ if ( split_debug )
+ out() << " split end " << hex << thisLoc.getOfs() << dec << endl;
+ }
+
+ /** start a new index off, empty */
+ template< class V >
+ DiskLoc BtreeBucket<V>::addBucket(const IndexDetails& id) {
+ string ns = id.indexNamespace();
+ DiskLoc loc = theDataFileMgr.insert(ns.c_str(), 0, V::BucketSize, true);
+ BtreeBucket *b = BTREEMOD(loc);
+ b->init();
+ return loc;
+ }
+
+ void renameIndexNamespace(const char *oldNs, const char *newNs) {
+ renameNamespace( oldNs, newNs );
+ }
+
+ template< class V >
+ const DiskLoc BtreeBucket<V>::getHead(const DiskLoc& thisLoc) const {
+ DiskLoc p = thisLoc;
+ while ( !BTREE(p)->isHead() )
+ p = BTREE(p)->parent;
+ return p;
+ }
+
+ template< class V >
+ DiskLoc BtreeBucket<V>::advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) const {
+ if ( keyOfs < 0 || keyOfs >= this->n ) {
+ out() << "ASSERT failure BtreeBucket<V>::advance, caller: " << caller << endl;
+ out() << " thisLoc: " << thisLoc.toString() << endl;
+ out() << " keyOfs: " << keyOfs << " n:" << this->n << " direction: " << direction << endl;
+ out() << bucketSummary() << endl;
+ assert(false);
+ }
+ int adj = direction < 0 ? 1 : 0;
+ int ko = keyOfs + direction;
+ DiskLoc nextDown = this->childForPos(ko+adj);
+ if ( !nextDown.isNull() ) {
+ while ( 1 ) {
+ keyOfs = direction>0 ? 0 : BTREE(nextDown)->n - 1;
+ DiskLoc loc = BTREE(nextDown)->childForPos(keyOfs + adj);
+ if ( loc.isNull() )
+ break;
+ nextDown = loc;
+ }
+ return nextDown;
+ }
+
+ if ( ko < this->n && ko >= 0 ) {
+ keyOfs = ko;
+ return thisLoc;
+ }
+
+ // end of bucket. traverse back up.
+ DiskLoc childLoc = thisLoc;
+ DiskLoc ancestor = this->parent;
+ while ( 1 ) {
+ if ( ancestor.isNull() )
+ break;
+ const BtreeBucket *an = BTREE(ancestor);
+ for ( int i = 0; i < an->n; i++ ) {
+ if ( an->childForPos(i+adj) == childLoc ) {
+ keyOfs = i;
+ return ancestor;
+ }
+ }
+ assert( direction<0 || an->nextChild == childLoc );
+ // parent exhausted also, keep going up
+ childLoc = ancestor;
+ ancestor = an->parent;
+ }
+
+ return DiskLoc();
+ }
+
+ template< class V >
+ DiskLoc BtreeBucket<V>::locate(const IndexDetails& idx, const DiskLoc& thisLoc, const BSONObj& key, const Ordering &order, int& pos, bool& found, const DiskLoc &recordLoc, int direction) const {
+ KeyOwned k(key);
+ return locate(idx, thisLoc, k, order, pos, found, recordLoc, direction);
+ }
+
+ template< class V >
+ DiskLoc BtreeBucket<V>::locate(const IndexDetails& idx, const DiskLoc& thisLoc, const Key& key, const Ordering &order, int& pos, bool& found, const DiskLoc &recordLoc, int direction) const {
+ int p;
+ found = find(idx, key, recordLoc, order, p, /*assertIfDup*/ false);
+ if ( found ) {
+ pos = p;
+ return thisLoc;
+ }
+
+ DiskLoc child = this->childForPos(p);
+
+ if ( !child.isNull() ) {
+ DiskLoc l = BTREE(child)->locate(idx, child, key, order, pos, found, recordLoc, direction);
+ if ( !l.isNull() )
+ return l;
+ }
+
+ pos = p;
+ if ( direction < 0 )
+ return --pos == -1 ? DiskLoc() /*theend*/ : thisLoc;
+ else
+ return pos == this->n ? DiskLoc() /*theend*/ : thisLoc;
+ }
+
+ template< class V >
+ bool BtreeBucket<V>::customFind( int l, int h, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, DiskLoc &thisLoc, int &keyOfs, pair< DiskLoc, int > &bestParent ) {
+ const BtreeBucket<V> * bucket = BTREE(thisLoc);
+ while( 1 ) {
+ if ( l + 1 == h ) {
+ keyOfs = ( direction > 0 ) ? h : l;
+ DiskLoc next = bucket->k( h ).prevChildBucket;
+ if ( !next.isNull() ) {
+ bestParent = make_pair( thisLoc, keyOfs );
+ thisLoc = next;
+ return true;
+ }
+ else {
+ return false;
+ }
+ }
+ int m = l + ( h - l ) / 2;
+ int cmp = customBSONCmp( bucket->keyNode( m ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction );
+ if ( cmp < 0 ) {
+ l = m;
+ }
+ else if ( cmp > 0 ) {
+ h = m;
+ }
+ else {
+ if ( direction < 0 ) {
+ l = m;
+ }
+ else {
+ h = m;
+ }
+ }
+ }
+ }
+
+ /**
+ * find smallest/biggest value greater-equal/less-equal than specified
+ * starting thisLoc + keyOfs will be strictly less than/strictly greater than keyBegin/keyBeginLen/keyEnd
+ * All the direction checks below allowed me to refactor the code, but possibly separate forward and reverse implementations would be more efficient
+ */
+ template< class V >
+ void BtreeBucket<V>::advanceTo(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction ) const {
+ int l,h;
+ bool dontGoUp;
+ if ( direction > 0 ) {
+ l = keyOfs;
+ h = this->n - 1;
+ dontGoUp = ( customBSONCmp( keyNode( h ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) >= 0 );
+ }
+ else {
+ l = 0;
+ h = keyOfs;
+ dontGoUp = ( customBSONCmp( keyNode( l ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) <= 0 );
+ }
+ pair< DiskLoc, int > bestParent;
+ if ( dontGoUp ) {
+ // this comparison result assures h > l
+ if ( !customFind( l, h, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction, thisLoc, keyOfs, bestParent ) ) {
+ return;
+ }
+ }
+ else {
+ // go up parents until rightmost/leftmost node is >=/<= target or at top
+ while( !BTREE(thisLoc)->parent.isNull() ) {
+ thisLoc = BTREE(thisLoc)->parent;
+ if ( direction > 0 ) {
+ if ( customBSONCmp( BTREE(thisLoc)->keyNode( BTREE(thisLoc)->n - 1 ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) >= 0 ) {
+ break;
+ }
+ }
+ else {
+ if ( customBSONCmp( BTREE(thisLoc)->keyNode( 0 ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction ) <= 0 ) {
+ break;
+ }
+ }
+ }
+ }
+ customLocate( thisLoc, keyOfs, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction, bestParent );
+ }
+
+ /** @param thisLoc in/out param. perhaps thisLoc isn't the best name given that.
+ Ut is used by advanceTo, which skips
+ from one key to another key without necessarily checking all the keys
+ between them in the btree (it can skip to different btree buckets).
+ The advanceTo function can get called a lot, and for different targets
+ we want to advance to, don't want to create a bson obj in a new
+ buffer each time we call that function. The
+ customLocate function necessary for advanceTo, and does the same thing
+ as normal locate function but takes basically the same arguments
+ as advanceTo.
+ */
+ template< class V >
+ void BtreeBucket<V>::customLocate(DiskLoc &locInOut, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey,
+ const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive,
+ const Ordering &order, int direction, pair< DiskLoc, int > &bestParent ) {
+ dassert( direction == 1 || direction == -1 );
+ const BtreeBucket<V> *bucket = BTREE(locInOut);
+ if ( bucket->n == 0 ) {
+ locInOut = DiskLoc();
+ return;
+ }
+ // go down until find smallest/biggest >=/<= target
+ while( 1 ) {
+ int l = 0;
+ int h = bucket->n - 1;
+
+ // +direction: 0, -direction: h
+ int z = (1-direction)/2*h;
+
+ // leftmost/rightmost key may possibly be >=/<= search key
+ int res = customBSONCmp( bucket->keyNode( z ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction );
+ bool firstCheck = direction*res >= 0;
+
+ if ( firstCheck ) {
+ DiskLoc next;
+ keyOfs = z;
+ if ( direction > 0 ) {
+ dassert( z == 0 );
+ next = bucket->k( 0 ).prevChildBucket;
+ }
+ else {
+ next = bucket->nextChild;
+ }
+ if ( !next.isNull() ) {
+ bestParent = pair< DiskLoc, int >( locInOut, keyOfs );
+ locInOut = next;
+ bucket = BTREE(locInOut);
+ continue;
+ }
+ else {
+ return;
+ }
+ }
+
+ res = customBSONCmp( bucket->keyNode( h-z ).key.toBson(), keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction );
+ bool secondCheck = direction*res < 0;
+
+ if ( secondCheck ) {
+ DiskLoc next;
+ if ( direction > 0 ) {
+ next = bucket->nextChild;
+ }
+ else {
+ next = bucket->k( 0 ).prevChildBucket;
+ }
+ if ( next.isNull() ) {
+ // if bestParent is null, we've hit the end and locInOut gets set to DiskLoc()
+ locInOut = bestParent.first;
+ keyOfs = bestParent.second;
+ return;
+ }
+ else {
+ locInOut = next;
+ bucket = BTREE(locInOut);
+ continue;
+ }
+ }
+
+ if ( !customFind( l, h, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction, locInOut, keyOfs, bestParent ) ) {
+ return;
+ }
+ bucket = BTREE(locInOut);
+ }
+ }
+
+ /** @thisLoc disk location of *this */
+ template< class V >
+ void BtreeBucket<V>::insertStepOne(DiskLoc thisLoc,
+ Continuation<V>& c,
+ bool dupsAllowed) const {
+ dassert( c.key.dataSize() <= this->KeyMax );
+ assert( c.key.dataSize() > 0 );
+
+ int pos;
+ bool found = find(c.idx, c.key, c.recordLoc, c.order, pos, !dupsAllowed);
+
+ if ( found ) {
+ const _KeyNode& kn = k(pos);
+ if ( kn.isUnused() ) {
+ log(4) << "btree _insert: reusing unused key" << endl;
+ c.b = this;
+ c.pos = pos;
+ c.op = Continuation<V>::SetUsed;
+ return;
+ }
+
+ DEV {
+ log() << "_insert(): key already exists in index (ok for background:true)\n";
+ log() << " " << c.idx.indexNamespace() << " thisLoc:" << thisLoc.toString() << '\n';
+ log() << " " << c.key.toString() << '\n';
+ log() << " " << "recordLoc:" << c.recordLoc.toString() << " pos:" << pos << endl;
+ log() << " old l r: " << this->childForPos(pos).toString() << ' ' << this->childForPos(pos+1).toString() << endl;
+ }
+ alreadyInIndex();
+ }
+
+ Loc ch = this->childForPos(pos);
+ DiskLoc child = ch;
+
+ if ( child.isNull() ) {
+ // A this->new key will be inserted at the same tree height as an adjacent existing key.
+ c.bLoc = thisLoc;
+ c.b = this;
+ c.pos = pos;
+ c.op = Continuation<V>::InsertHere;
+ return;
+ }
+
+ child.btree<V>()->insertStepOne(child, c, dupsAllowed);
+ }
+
+ /** @thisLoc disk location of *this */
+ template< class V >
+ int BtreeBucket<V>::_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
+ const Key& key, const Ordering &order, bool dupsAllowed,
+ const DiskLoc lChild, const DiskLoc rChild, IndexDetails& idx) const {
+ if ( key.dataSize() > this->KeyMax ) {
+ problem() << "ERROR: key too large len:" << key.dataSize() << " max:" << this->KeyMax << ' ' << key.dataSize() << ' ' << idx.indexNamespace() << endl;
+ return 2;
+ }
+ assert( key.dataSize() > 0 );
+
+ int pos;
+ bool found = find(idx, key, recordLoc, order, pos, !dupsAllowed);
+ if ( insert_debug ) {
+ out() << " " << thisLoc.toString() << '.' << "_insert " <<
+ key.toString() << '/' << recordLoc.toString() <<
+ " l:" << lChild.toString() << " r:" << rChild.toString() << endl;
+ out() << " found:" << found << " pos:" << pos << " n:" << this->n << endl;
+ }
+
+ if ( found ) {
+ const _KeyNode& kn = k(pos);
+ if ( kn.isUnused() ) {
+ log(4) << "btree _insert: reusing unused key" << endl;
+ massert( 10285 , "_insert: reuse key but lchild is not null", lChild.isNull());
+ massert( 10286 , "_insert: reuse key but rchild is not null", rChild.isNull());
+ kn.writing().setUsed();
+ return 0;
+ }
+
+ DEV {
+ log() << "_insert(): key already exists in index (ok for background:true)\n";
+ log() << " " << idx.indexNamespace() << " thisLoc:" << thisLoc.toString() << '\n';
+ log() << " " << key.toString() << '\n';
+ log() << " " << "recordLoc:" << recordLoc.toString() << " pos:" << pos << endl;
+ log() << " old l r: " << this->childForPos(pos).toString() << ' ' << this->childForPos(pos+1).toString() << endl;
+ log() << " new l r: " << lChild.toString() << ' ' << rChild.toString() << endl;
+ }
+ alreadyInIndex();
+ }
+
+ DEBUGGING out() << "TEMP: key: " << key.toString() << endl;
+ Loc ch = this->childForPos(pos);
+ DiskLoc child = ch;
+ if ( insert_debug )
+ out() << " getChild(" << pos << "): " << child.toString() << endl;
+ // In current usage, rChild isNull() for a new key and false when we are
+ // promoting a split key. These are the only two cases where _insert()
+ // is called currently.
+ if ( child.isNull() || !rChild.isNull() ) {
+ // A new key will be inserted at the same tree height as an adjacent existing key.
+ insertHere(thisLoc, pos, recordLoc, key, order, lChild, rChild, idx);
+ return 0;
+ }
+
+ return child.btree<V>()->_insert(child, recordLoc, key, order, dupsAllowed, /*lchild*/DiskLoc(), /*rchild*/DiskLoc(), idx);
+ }
+
+ template< class V >
+ void BtreeBucket<V>::dump(unsigned depth) const {
+ string indent = string(depth, ' ');
+ _log() << "BUCKET n:" << this->n;
+ _log() << " parent:" << hex << this->parent.getOfs() << dec;
+ for ( int i = 0; i < this->n; i++ ) {
+ _log() << '\n' << indent;
+ KeyNode k = keyNode(i);
+ string ks = k.key.toString();
+ _log() << " " << hex << k.prevChildBucket.getOfs() << '\n';
+ _log() << indent << " " << i << ' ' << ks.substr(0, 30) << " Loc:" << k.recordLoc.toString() << dec;
+ if ( this->k(i).isUnused() )
+ _log() << " UNUSED";
+ }
+ _log() << "\n" << indent << " " << hex << this->nextChild.getOfs() << dec << endl;
+ }
+
+ template< class V >
+ void BtreeBucket<V>::twoStepInsert(DiskLoc thisLoc, Continuation<V> &c, bool dupsAllowed) const
+ {
+
+ if ( c.key.dataSize() > this->KeyMax ) {
+ problem() << "ERROR: key too large len:" << c.key.dataSize() << " max:" << this->KeyMax << ' ' << c.key.dataSize() << ' ' << c.idx.indexNamespace() << endl;
+ return; // op=Nothing
+ }
+ insertStepOne(thisLoc, c, dupsAllowed);
+ }
+
+ /** todo: meaning of return code unclear clean up */
+ template< class V >
+ int BtreeBucket<V>::bt_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
+ const BSONObj& _key, const Ordering &order, bool dupsAllowed,
+ IndexDetails& idx, bool toplevel) const
+ {
+ guessIncreasing = _key.firstElementType() == jstOID && idx.isIdIndex();
+ KeyOwned key(_key);
+
+ dassert(toplevel);
+ if ( toplevel ) {
+ if ( key.dataSize() > this->KeyMax ) {
+ problem() << "Btree::insert: key too large to index, skipping " << idx.indexNamespace() << ' ' << key.dataSize() << ' ' << key.toString() << endl;
+ return 3;
+ }
+ }
+
+ int x;
+ try {
+ x = _insert(thisLoc, recordLoc, key, order, dupsAllowed, DiskLoc(), DiskLoc(), idx);
+ this->assertValid( order );
+ }
+ catch( ... ) {
+ guessIncreasing = false;
+ throw;
+ }
+ guessIncreasing = false;
+ return x;
+ }
+
+ template< class V >
+ void BtreeBucket<V>::shape(stringstream& ss) const {
+ this->_shape(0, ss);
+ }
+
+ template< class V >
+ int BtreeBucket<V>::getKeyMax() {
+ return V::KeyMax;
+ }
+
+ template< class V >
+ DiskLoc BtreeBucket<V>::findSingle( const IndexDetails& indexdetails , const DiskLoc& thisLoc, const BSONObj& key ) const {
+ int pos;
+ bool found;
+ // TODO: is it really ok here that the order is a default?
+ // for findById() use, yes. for checkNoIndexConflicts, no?
+ Ordering o = Ordering::make(BSONObj());
+ DiskLoc bucket = locate( indexdetails , indexdetails.head , key , o , pos , found , minDiskLoc );
+ if ( bucket.isNull() )
+ return bucket;
+
+ const BtreeBucket<V> *b = bucket.btree<V>();
+ while ( 1 ) {
+ const _KeyNode& knraw = b->k(pos);
+ if ( knraw.isUsed() )
+ break;
+ bucket = b->advance( bucket , pos , 1 , "findSingle" );
+ if ( bucket.isNull() )
+ return bucket;
+ b = bucket.btree<V>();
+ }
+ KeyNode kn = b->keyNode( pos );
+ if ( KeyOwned(key).woCompare( kn.key, o ) != 0 )
+ return DiskLoc();
+ return kn.recordLoc;
+ }
+
+} // namespace mongo
+
+#include "db.h"
+#include "dbhelpers.h"
+
+namespace mongo {
+
+ template< class V >
+ void BtreeBucket<V>::a_test(IndexDetails& id) {
+ BtreeBucket *b = id.head.btreemod<V>();
+
+ // record locs for testing
+ DiskLoc A(1, 20);
+ DiskLoc B(1, 30);
+ DiskLoc C(1, 40);
+
+ DiskLoc rl;
+ BSONObj key = fromjson("{x:9}");
+ BSONObj orderObj = fromjson("{}");
+ Ordering order = Ordering::make(orderObj);
+
+ b->bt_insert(id.head, A, key, order, true, id);
+ A.GETOFS() += 2;
+ b->bt_insert(id.head, A, key, order, true, id);
+ A.GETOFS() += 2;
+ b->bt_insert(id.head, A, key, order, true, id);
+ A.GETOFS() += 2;
+ b->bt_insert(id.head, A, key, order, true, id);
+ A.GETOFS() += 2;
+ assert( b->k(0).isUsed() );
+// b->k(0).setUnused();
+ b->k(1).setUnused();
+ b->k(2).setUnused();
+ b->k(3).setUnused();
+
+ b->dumpTree(id.head, orderObj);
+
+ /* b->bt_insert(id.head, B, key, order, false, id);
+ b->k(1).setUnused();
+ b->dumpTree(id.head, order);
+ b->bt_insert(id.head, A, key, order, false, id);
+ b->dumpTree(id.head, order);
+ */
+
+ // this should assert. does it? (it might "accidentally" though, not asserting proves a problem, asserting proves nothing)
+ b->bt_insert(id.head, C, key, order, false, id);
+
+ // b->dumpTree(id.head, order);
+ }
+
+ template class BucketBasics<V0>;
+ template class BucketBasics<V1>;
+ template class BtreeBucket<V0>;
+ template class BtreeBucket<V1>;
+ template struct __KeyNode<DiskLoc>;
+ template struct __KeyNode<DiskLoc56Bit>;
+
+ struct BTUnitTest : public UnitTest {
+ void run() {
+ DiskLoc big(0xf12312, 0x70001234);
+ DiskLoc56Bit bigl;
+ {
+ bigl = big;
+ assert( big == bigl );
+ DiskLoc e = bigl;
+ assert( big == e );
+ }
+ {
+ DiskLoc d;
+ assert( d.isNull() );
+ DiskLoc56Bit l;
+ l = d;
+ assert( l.isNull() );
+ d = l;
+ assert( d.isNull() );
+ assert( l < bigl );
+ }
+ }
+ } btunittest;
+
+}
diff --git a/src/mongo/db/btree.h b/src/mongo/db/btree.h
new file mode 100644
index 00000000000..85e5172d163
--- /dev/null
+++ b/src/mongo/db/btree.h
@@ -0,0 +1,1174 @@
+// btree.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "jsobj.h"
+#include "diskloc.h"
+#include "pdfile.h"
+#include "key.h"
+
+namespace mongo {
+
+ /**
+ * Our btree implementation generally follows the standard btree algorithm,
+ * which is described in many places. The nodes of our btree are referred to
+ * as buckets below. These buckets are of size BucketSize and their body is
+ * an ordered array of <bson key, disk loc> pairs, where disk loc is the disk
+ * location of a document and bson key is a projection of this document into
+ * the schema of the index for this btree. Ordering is determined on the
+ * basis of bson key first and then disk loc in case of a tie. All bson keys
+ * for a btree have identical schemas with empty string field names and may
+ * not have an objsize() exceeding KeyMax. The btree's buckets are
+ * themselves organized into an ordered tree. Although there are exceptions,
+ * generally buckets with n keys have n+1 children and the body of a bucket is
+ * at least lowWaterMark bytes. A more strictly enforced requirement is that
+ * a non root bucket must have at least one key except in certain transient
+ * states.
+ *
+ * Our btrees support the following primary read operations: finding a
+ * specified key; iterating from a starting key to the next or previous
+ * ordered key; and skipping from a starting key to another specified key
+ * without checking every intermediate key. The primary write operations
+ * are insertion and deletion of keys. Insertion may trigger a bucket split
+ * if necessary to avoid bucket overflow. In such a case, subsequent splits
+ * will occur recursively as necessary. Deletion may trigger a bucket
+ * rebalance, in which a size deficient bucket is filled with keys from an
+ * adjacent bucket. In this case, splitting may potentially occur in the
+ * parent. Deletion may alternatively trigger a merge, in which the keys
+ * from two buckets and a key from their shared parent are combined into the
+ * same bucket. In such a case, rebalancing or merging may proceed
+ * recursively from the parent.
+ *
+ * While the btree data format has been relatively constant over time, btrees
+ * initially created by versions of mongo earlier than the current version
+ * may embody different properties than freshly created btrees (while
+ * following the same data format). These older btrees are referred to
+ * below as legacy btrees.
+ */
+
+ const int OldBucketSize = 8192;
+
+#pragma pack(1)
+ template< class Version > class BucketBasics;
+
+ /**
+ * This is the fixed width data component for storage of a key within a
+ * bucket. It contains an offset pointer to the variable width bson
+ * data component. A _KeyNode may be 'unused', please see below.
+ */
+ template< class Loc >
+ struct __KeyNode {
+ /** Signals that we are writing this _KeyNode and casts away const */
+ __KeyNode<Loc> & writing() const;
+ /**
+ * The 'left' child bucket of this key. If this is the i-th key, it
+ * points to the i index child bucket.
+ */
+ Loc prevChildBucket;
+ /** The location of the record associated with this key. */
+ Loc recordLoc;
+ short keyDataOfs() const { return (short) _kdo; }
+
+ /** Offset within current bucket of the variable width bson key for this _KeyNode. */
+ unsigned short _kdo;
+ void setKeyDataOfs(short s) {
+ _kdo = s;
+ assert(s>=0);
+ }
+ /** Seems to be redundant. */
+ void setKeyDataOfsSavingUse(short s) {
+ _kdo = s;
+ assert(s>=0);
+ }
+ /**
+ * Unused keys are not returned by read operations. Keys may be marked
+ * as unused in cases where it is difficult to delete them while
+ * maintaining the constraints required of a btree.
+ *
+ * Setting ofs to odd is the sentinel for unused, as real recordLoc's
+ * are always even numbers. Note we need to keep its value basically
+ * the same as we use the recordLoc as part of the key in the index
+ * (to handle duplicate keys efficiently).
+ *
+ * Flagging keys as unused is a feature that is being phased out in favor
+ * of deleting the keys outright. The current btree implementation is
+ * not expected to mark a key as unused in a non legacy btree.
+ */
+ void setUnused() {
+ recordLoc.GETOFS() |= 1;
+ }
+ void setUsed() { recordLoc.GETOFS() &= ~1; }
+ int isUnused() const {
+ return recordLoc.getOfs() & 1;
+ }
+ int isUsed() const {
+ return !isUnused();
+ }
+ };
+
+ /**
+ * This structure represents header data for a btree bucket. An object of
+ * this type is typically allocated inside of a buffer of size BucketSize,
+ * resulting in a full bucket with an appropriate header.
+ *
+ * The body of a btree bucket contains an array of _KeyNode objects starting
+ * from its lowest indexed bytes and growing to higher indexed bytes. The
+ * body also contains variable width bson keys, which are allocated from the
+ * highest indexed bytes toward lower indexed bytes.
+ *
+ * |hhhh|kkkkkkk--------bbbbbbbbbbbuuubbbuubbb|
+ * h = header data
+ * k = KeyNode data
+ * - = empty space
+ * b = bson key data
+ * u = unused (old) bson key data, that may be garbage collected
+ */
+ class BtreeData_V0 {
+ protected:
+ /** Parent bucket of this bucket, which isNull() for the root bucket. */
+ DiskLoc parent;
+ /** Given that there are n keys, this is the n index child. */
+ DiskLoc nextChild;
+ /** can be reused, value is 8192 in current pdfile version Apr2010 */
+ unsigned short _wasSize;
+ /** zero */
+ unsigned short _reserved1;
+ int flags;
+
+ void _init() {
+ _reserved1 = 0;
+ _wasSize = BucketSize;
+ reserved = 0;
+ }
+
+ /** basicInsert() assumes the next three members are consecutive and in this order: */
+
+ /** Size of the empty region. */
+ int emptySize;
+ /** Size used for bson storage, including storage of old keys. */
+ int topSize;
+ /* Number of keys in the bucket. */
+ int n;
+
+ int reserved;
+ /* Beginning of the bucket's body */
+ char data[4];
+
+ public:
+ typedef __KeyNode<DiskLoc> _KeyNode;
+ typedef DiskLoc Loc;
+ typedef KeyBson Key;
+ typedef KeyBson KeyOwned;
+ enum { BucketSize = 8192 };
+
+ // largest key size we allow. note we very much need to support bigger keys (somehow) in the future.
+ static const int KeyMax = OldBucketSize / 10;
+ };
+
+ // a a a ofs ofs ofs ofs
+ class DiskLoc56Bit {
+ int ofs;
+ unsigned char _a[3];
+ unsigned long long Z() const {
+ // endian
+ return *((unsigned long long*)this) & 0x00ffffffffffffffULL;
+ }
+ enum {
+ // first bit of offsets used in _KeyNode we don't use -1 here.
+ OurNullOfs = -2
+ };
+ public:
+ template< class V >
+ const BtreeBucket<V> * btree() const {
+ return DiskLoc(*this).btree<V>();
+ }
+ template< class V >
+ BtreeBucket<V> * btreemod() const {
+ return DiskLoc(*this).btreemod<V>();
+ }
+ operator const DiskLoc() const {
+ // endian
+ if( isNull() ) return DiskLoc();
+ unsigned a = *((unsigned *) (_a-1));
+ return DiskLoc(a >> 8, ofs);
+ }
+ int& GETOFS() { return ofs; }
+ int getOfs() const { return ofs; }
+ bool operator<(const DiskLoc56Bit& rhs) const {
+ // the orderering of dup keys in btrees isn't too critical, but we'd like to put items that are
+ // close together on disk close together in the tree, so we do want the file # to be the most significant
+ // bytes
+ return Z() < rhs.Z();
+ }
+ int compare(const DiskLoc56Bit& rhs) const {
+ unsigned long long a = Z();
+ unsigned long long b = rhs.Z();
+ if( a < b ) return -1;
+ return a == b ? 0 : 1;
+ }
+ bool operator==(const DiskLoc56Bit& rhs) const { return Z() == rhs.Z(); }
+ bool operator!=(const DiskLoc56Bit& rhs) const { return Z() != rhs.Z(); }
+ bool operator==(const DiskLoc& rhs) const {
+ return DiskLoc(*this) == rhs;
+ }
+ bool operator!=(const DiskLoc& rhs) const { return !(*this==rhs); }
+ bool isNull() const { return ofs < 0; }
+ void Null() {
+ ofs = OurNullOfs;
+ _a[0] = _a[1] = _a[2] = 0;
+ }
+ string toString() const { return DiskLoc(*this).toString(); }
+ void operator=(const DiskLoc& loc) {
+ ofs = loc.getOfs();
+ int la = loc.a();
+ assert( la <= 0xffffff ); // must fit in 3 bytes
+ if( la < 0 ) {
+ assert( la == -1 );
+ la = 0;
+ ofs = OurNullOfs;
+ }
+ memcpy(_a, &la, 3); // endian
+ dassert( ofs != 0 );
+ }
+ DiskLoc56Bit& writing() const {
+ return *((DiskLoc56Bit*) getDur().writingPtr((void*)this, 7));
+ }
+ };
+
+ class BtreeData_V1 {
+ public:
+ typedef DiskLoc56Bit Loc;
+ //typedef DiskLoc Loc;
+ typedef __KeyNode<Loc> _KeyNode;
+ typedef KeyV1 Key;
+ typedef KeyV1Owned KeyOwned;
+ enum { BucketSize = 8192-16 }; // leave room for Record header
+ // largest key size we allow. note we very much need to support bigger keys (somehow) in the future.
+ static const int KeyMax = 1024;
+ protected:
+ /** Parent bucket of this bucket, which isNull() for the root bucket. */
+ Loc parent;
+ /** Given that there are n keys, this is the n index child. */
+ Loc nextChild;
+
+ unsigned short flags;
+
+ /** basicInsert() assumes the next three members are consecutive and in this order: */
+
+ /** Size of the empty region. */
+ unsigned short emptySize;
+ /** Size used for bson storage, including storage of old keys. */
+ unsigned short topSize;
+ /* Number of keys in the bucket. */
+ unsigned short n;
+
+ /* Beginning of the bucket's body */
+ char data[4];
+
+ void _init() { }
+ };
+
+ typedef BtreeData_V0 V0;
+ typedef BtreeData_V1 V1;
+
+ /**
+ * This class adds functionality to BtreeData for managing a single bucket.
+ * The following policies are used in an attempt to encourage simplicity:
+ *
+ * Const member functions of this class are those which may be called on
+ * an object for which writing has not been signaled. Non const member
+ * functions may only be called on objects for which writing has been
+ * signaled. Note that currently some const functions write to the
+ * underlying memory representation of this bucket using optimized methods
+ * to signal write operations.
+ *
+ * DiskLoc parameters that may shadow references within the btree should
+ * be passed by value rather than by reference to non const member
+ * functions or to const member functions which may perform writes. This way
+ * a callee need not worry that write operations will change or invalidate
+ * its arguments.
+ *
+ * The current policy for dealing with bson arguments is the opposite of
+ * what is described above for DiskLoc arguments. We do not want to copy
+ * bson into memory as an intermediate step for btree changes, and if bson
+ * is to be moved it must be copied to the new location before the old
+ * location is invalidated. Care should be taken in cases where that invalid
+ * memory may be implicitly referenced by function arguments.
+ *
+ * A number of functions below require a thisLoc argument, which must be the
+ * disk location of the bucket mapped to 'this'.
+ */
+ template< class Version >
+ class BucketBasics : public Version {
+ public:
+ template <class U> friend class BtreeBuilder;
+ typedef typename Version::Key Key;
+ typedef typename Version::_KeyNode _KeyNode;
+ typedef typename Version::Loc Loc;
+
+ int getN() const { return this->n; }
+
+ /**
+ * This is an in memory wrapper for a _KeyNode, and not itself part of btree
+ * storage. This object and its BSONObj 'key' will become invalid if the
+ * _KeyNode data that generated it is moved within the btree. In general,
+ * a KeyNode should not be expected to be valid after a write.
+ */
+ class KeyNode {
+ public:
+ KeyNode(const BucketBasics<Version>& bb, const _KeyNode &k);
+ const Loc& prevChildBucket;
+ const Loc& recordLoc;
+ /* Points to the bson key storage for a _KeyNode */
+ Key key;
+ };
+ friend class KeyNode;
+
+ /** Assert write intent declared for this bucket already. */
+ void assertWritable();
+
+ void assertValid(const Ordering &order, bool force = false) const;
+ void assertValid(const BSONObj &orderObj, bool force = false) const { return assertValid(Ordering::make(orderObj),force); }
+
+ /**
+ * @return KeyNode for key at index i. The KeyNode will become invalid
+ * if the key is moved or reassigned, or if the node is packed. In general
+ * a KeyNode should not be expected to be valid after a write.
+ */
+ const KeyNode keyNode(int i) const {
+ if ( i >= this->n ) {
+ massert( 13000 , (string)"invalid keyNode: " + BSON( "i" << i << "n" << this->n ).jsonString() , i < this->n );
+ }
+ return KeyNode(*this, k(i));
+ }
+
+ static int headerSize() {
+ const BucketBasics *d = 0;
+ return (char*)&(d->data) - (char*)&(d->parent);
+ }
+ static int bodySize() { return Version::BucketSize - headerSize(); }
+ static int lowWaterMark() { return bodySize() / 2 - Version::KeyMax - sizeof( _KeyNode ) + 1; } // see comment in btree.cpp
+
+ // for testing
+ int nKeys() const { return this->n; }
+ const DiskLoc getNextChild() const { return this->nextChild; }
+
+ protected:
+ char * dataAt(short ofs) { return this->data + ofs; }
+
+ /** Initialize the header for a new node. */
+ void init();
+
+ /**
+ * Preconditions:
+ * - 0 <= keypos <= n
+ * - If key is inserted at position keypos, the bucket's keys will still be
+ * in order.
+ * Postconditions:
+ * - If key can fit in the bucket, the bucket may be packed and keypos
+ * may be decreased to reflect deletion of earlier indexed keys during
+ * packing, the key will be inserted at the updated keypos index with
+ * a null prevChildBucket, the subsequent keys shifted to the right,
+ * and the function will return true.
+ * - If key cannot fit in the bucket, the bucket will be packed and
+ * the function will return false.
+ * Although this function is marked const, it modifies the underlying
+ * btree representation through an optimized write intent mechanism.
+ */
+ bool basicInsert(const DiskLoc thisLoc, int &keypos, const DiskLoc recordLoc, const Key& key, const Ordering &order) const;
+
+ /**
+ * Preconditions:
+ * - key / recordLoc are > all existing keys
+ * - The keys in prevChild and their descendents are between all existing
+ * keys and 'key'.
+ * Postconditions:
+ * - If there is space for key without packing, it is inserted as the
+ * last key with specified prevChild and true is returned.
+ * Importantly, nextChild is not updated!
+ * - Otherwise false is returned and there is no change.
+ */
+ bool _pushBack(const DiskLoc recordLoc, const Key& key, const Ordering &order, const DiskLoc prevChild);
+ void pushBack(const DiskLoc recordLoc, const Key& key, const Ordering &order, const DiskLoc prevChild) {
+ bool ok = _pushBack( recordLoc , key , order , prevChild );
+ assert(ok);
+ }
+
+ /**
+ * This is a special purpose function used by BtreeBuilder. The
+ * interface is quite dangerous if you're not careful. The bson key
+ * returned here points to bucket memory that has been invalidated but
+ * not yet reclaimed.
+ *
+ * TODO Maybe this could be replaced with two functions, one which
+ * returns the last key without deleting it and another which simply
+ * deletes the last key. Then the caller would have enough control to
+ * ensure proper memory integrity.
+ *
+ * Preconditions:
+ * - bucket is not empty
+ * - last key of bucket is used (not unused)
+ * - nextChild isNull()
+ * - _unalloc will work correctly as used - see code
+ * Postconditions:
+ * - The last key of the bucket is removed, and its key and recLoc are
+ * returned. As mentioned above, the key points to unallocated memory.
+ */
+ void popBack(DiskLoc& recLoc, Key &key);
+
+ /**
+ * Preconditions:
+ * - 0 <= keypos < n
+ * - there is no child bucket at keypos
+ * - n > 1
+ * - if mayEmpty == false or nextChild.isNull(), n > 0
+ * Postconditions:
+ * - The key at keypos is removed, and remaining keys are shifted over.
+ * - The bucket becomes unpacked.
+ * - if mayEmpty is true and nextChild.isNull(), the bucket may have no keys.
+ */
+ void _delKeyAtPos(int keypos, bool mayEmpty = false);
+
+ /* !Packed means there is deleted fragment space within the bucket.
+ We "repack" when we run out of space before considering the node
+ to be full.
+ */
+ enum Flags { Packed=1 };
+
+ /** n == 0 is ok */
+ const Loc& childForPos(int p) const { return p == this->n ? this->nextChild : k(p).prevChildBucket; }
+ Loc& childForPos(int p) { return p == this->n ? this->nextChild : k(p).prevChildBucket; }
+
+ /** Same as bodySize(). */
+ int totalDataSize() const;
+ /**
+ * @return true when a key may be dropped by pack()
+ * @param index index of the key that may be dropped
+ * @param refPos index of a particular key of interest, which must not
+ * be dropped; = 0 to safely ignore
+ */
+ bool mayDropKey( int index, int refPos ) const;
+
+ /**
+ * Pack the bucket to reclaim space from invalidated memory.
+ * @refPos is an index in the bucket which may be updated if we
+ * delete keys from the bucket
+ * This function may cast away const and perform a write.
+ * Preconditions: none
+ * Postconditions:
+ * - Bucket will be packed
+ * - Some unused nodes may be dropped, but not ones at index 0 or refPos
+ * - Some used nodes may be moved
+ * - If refPos is the index of an existing key, it will be updated to that
+ * key's new index if the key is moved.
+ */
+ void _pack(const DiskLoc thisLoc, const Ordering &order, int &refPos) const;
+ /** Pack when already writable */
+ void _packReadyForMod(const Ordering &order, int &refPos);
+
+ /** @return the size the bucket's body would have if we were to call pack() */
+ int packedDataSize( int refPos ) const;
+ void setNotPacked() { this->flags &= ~Packed; }
+ void setPacked() { this->flags |= Packed; }
+ /**
+ * Preconditions: 'bytes' is <= emptySize
+ * Postconditions: A buffer of size 'bytes' is allocated on the top side,
+ * and its offset is returned.
+ */
+ int _alloc(int bytes);
+ /**
+ * This function can be used to deallocate the lowest byte index bson
+ * buffer in the top region, which in some but not all cases is for the
+ * n - 1 index key. This function only works correctly in certain
+ * special cases, please be careful.
+ * Preconditions: 'bytes' <= topSize
+ * Postconditions: The top region is decreased
+ */
+ void _unalloc(int bytes);
+ /**
+ * Preconditions: 'N' <= n
+ * Postconditions:
+ * - All keys after the N index key are dropped.
+ * - Then bucket is packed, without dropping refPos if < refPos N.
+ */
+ void truncateTo(int N, const Ordering &order, int &refPos);
+ /**
+ * Preconditions:
+ * - 'nDrop' < n
+ * - for now, refPos should be zero.
+ * Postconditions:
+ * - All keys before the nDrop index key are dropped.
+ * - The bucket is packed.
+ */
+ void dropFront(int nDrop, const Ordering &order, int &refPos);
+ /**
+ * Preconditions: 0 <= keypos < n
+ * Postconditions: keypos indexed key is marked unused.
+ */
+ void markUnused(int keypos);
+
+ /**
+ * BtreeBuilder uses the parent var as a temp place to maintain a linked list chain.
+ * we use tempNext() when we do that to be less confusing. (one might have written a union in C)
+ */
+ DiskLoc tempNext() const { return this->parent; }
+ void setTempNext(DiskLoc l) { this->parent = l; }
+
+ void _shape(int level, stringstream&) const;
+ int Size() const;
+
+ /** @return i-indexed _KeyNode, without bounds checking */
+ public:
+ const _KeyNode& k(int i) const { return ((const _KeyNode*)this->data)[i]; }
+ _KeyNode& _k(int i) { return ((_KeyNode*)this->data)[i]; }
+ protected:
+ _KeyNode& k(int i) { return ((_KeyNode*)this->data)[i]; }
+
+ /**
+ * Preconditions: 'this' is packed
+ * @return the key index to be promoted on split
+ * @param keypos The requested index of a key to insert, which may affect
+ * the choice of split position.
+ */
+ int splitPos( int keypos ) const;
+
+ /**
+ * Preconditions: nAdd * sizeof( _KeyNode ) <= emptySize
+ * Postconditions:
+ * - Increases indexes of existing _KeyNode objects by nAdd, reserving
+ * space for additional _KeyNode objects at front.
+ * - Does not initialize ofs values for the bson data of these
+ * _KeyNode objects.
+ */
+ void reserveKeysFront( int nAdd );
+
+ /**
+ * Preconditions:
+ * - 0 <= i < n
+ * - The bson 'key' must fit in the bucket without packing.
+ * - If 'key' and 'prevChildBucket' are set at index i, the btree
+ * ordering properties will be maintained.
+ * Postconditions:
+ * - The specified key is set at index i, replacing the existing
+ * _KeyNode data and without shifting any other _KeyNode objects.
+ */
+ void setKey( int i, const DiskLoc recordLoc, const Key& key, const DiskLoc prevChildBucket );
+ };
+
+ template< class V>
+ struct Continuation;
+
+ /**
+ * This class adds functionality for manipulating buckets that are assembled
+ * in a tree. The requirements for const and non const functions and
+ * arguments are generally the same as in BtreeBucket. Because this class
+ * deals with tree structure, some functions that are marked const may
+ * trigger modification of another node in the btree or potentially of the
+ * current node. In such cases, the function's implementation explicitly
+ * casts away const when indicating an intent to write to the durability
+ * layer. The DiskLocs provided to such functions should be passed by
+ * value if they shadow pointers within the btree.
+ *
+ * To clarify enforcement of referential integrity in this implementation,
+ * we use the following pattern when deleting data we have a persistent
+ * pointer to. The pointer is cleared or removed explicitly, then the data
+ * it pointed to is cleaned up with a helper function.
+ *
+ * TODO It might make sense to put some of these functions in a class
+ * representing a full btree instead of a single btree bucket. That would
+ * allow us to use the const qualifier in a manner more consistent with
+ * standard usage. Right now the interface is for both a node and a tree,
+ * so assignment of const is sometimes nonideal.
+ *
+ * TODO There are several cases in which the 'this' pointer is invalidated
+ * as a result of deallocation. A seperate class representing a btree would
+ * alleviate some fragile cases where the implementation must currently
+ * behave correctly if the 'this' pointer is suddenly invalidated by a
+ * callee.
+ */
+ template< class V >
+ class BtreeBucket : public BucketBasics<V> {
+ friend class BtreeCursor;
+ friend struct Continuation<V>;
+ public:
+ // make compiler happy:
+ typedef typename V::Key Key;
+ typedef typename V::KeyOwned KeyOwned;
+ typedef typename BucketBasics<V>::KeyNode KeyNode;
+ typedef typename BucketBasics<V>::_KeyNode _KeyNode;
+ typedef typename BucketBasics<V>::Loc Loc;
+ const _KeyNode& k(int i) const { return static_cast< const BucketBasics<V> * >(this)->k(i); }
+ protected:
+ _KeyNode& k(int i) { return static_cast< BucketBasics<V> * >(this)->_k(i); }
+ public:
+ const KeyNode keyNode(int i) const { return static_cast< const BucketBasics<V> * >(this)->keyNode(i); }
+
+ bool isHead() const { return this->parent.isNull(); }
+ void dumpTree(const DiskLoc &thisLoc, const BSONObj &order) const;
+ long long fullValidate(const DiskLoc& thisLoc, const BSONObj &order, long long *unusedCount = 0, bool strict = false, unsigned depth=0) const; /* traverses everything */
+
+ bool isUsed( int i ) const { return this->k(i).isUsed(); }
+ string bucketSummary() const;
+ void dump(unsigned depth=0) const;
+
+ /**
+ * @return true if key exists in index
+ *
+ * @order - indicates order of keys in the index. this is basically the index's key pattern, e.g.:
+ * BSONObj order = ((IndexDetails&)idx).keyPattern();
+ * likewise below in bt_insert() etc.
+ */
+ private:
+ bool exists(const IndexDetails& idx, const DiskLoc &thisLoc, const Key& key, const Ordering& order) const;
+ public:
+
+ /**
+ * @param self - Don't complain about ourself already being in the index case.
+ * @return true = There is a duplicate used key.
+ */
+ bool wouldCreateDup(
+ const IndexDetails& idx, const DiskLoc &thisLoc,
+ const Key& key, const Ordering& order,
+ const DiskLoc &self) const;
+
+ /**
+ * Preconditions: none
+ * Postconditions: @return a new bucket allocated from pdfile storage
+ * and init()-ed. This bucket is suitable to for use as a new root
+ * or any other new node in the tree.
+ */
+ static DiskLoc addBucket(const IndexDetails&);
+
+ /**
+ * Preconditions: none
+ * Postconditions:
+ * - Some header values in this bucket are cleared, and the bucket is
+ * deallocated from pdfile storage.
+ * - The memory at thisLoc is invalidated, and 'this' is invalidated.
+ */
+ void deallocBucket(const DiskLoc thisLoc, const IndexDetails &id);
+
+ /**
+ * Preconditions:
+ * - 'key' has a valid schema for this index.
+ * - All other paramenters are valid and consistent with this index if applicable.
+ * Postconditions:
+ * - If key is bigger than KeyMax, @return 2 or 3 and no change.
+ * - If key / recordLoc exist in the btree as an unused key, set them
+ * as used and @return 0
+ * - If key / recordLoc exist in the btree as a used key, @throw
+ * exception 10287 and no change.
+ * - If key / recordLoc do not exist in the btree, they are inserted
+ * and @return 0. The root of the btree may be changed, so
+ * 'this'/thisLoc may no longer be the root upon return.
+ */
+ int bt_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
+ const BSONObj& key, const Ordering &order, bool dupsAllowed,
+ IndexDetails& idx, bool toplevel = true) const;
+
+ /** does the insert in two steps - can then use an upgradable lock for step 1, which
+ is the part which may have page faults. also that step is most of the computational work.
+ */
+ void twoStepInsert(DiskLoc thisLoc, Continuation<V> &c, bool dupsAllowed) const;
+
+ /**
+ * Preconditions:
+ * - 'key' has a valid schema for this index, and may have objsize() > KeyMax.
+ * Postconditions:
+ * - If key / recordLoc are in the btree, they are removed (possibly
+ * by being marked as an unused key), @return true, and potentially
+ * invalidate 'this' / thisLoc and change the head.
+ * - If key / recordLoc are not in the btree, @return false and do nothing.
+ */
+ bool unindex(const DiskLoc thisLoc, IndexDetails& id, const BSONObj& key, const DiskLoc recordLoc) const;
+
+ /**
+ * locate may return an "unused" key that is just a marker. so be careful.
+ * looks for a key:recordloc pair.
+ *
+ * @found - returns true if exact match found. note you can get back a position
+ * result even if found is false.
+ */
+ DiskLoc locate(const IndexDetails &idx , const DiskLoc& thisLoc, const BSONObj& key, const Ordering &order,
+ int& pos, bool& found, const DiskLoc &recordLoc, int direction=1) const;
+ DiskLoc locate(const IndexDetails &idx , const DiskLoc& thisLoc, const Key& key, const Ordering &order,
+ int& pos, bool& found, const DiskLoc &recordLoc, int direction=1) const;
+
+ /**
+ * find the first instance of the key
+ * does not handle dups
+ * WARNING: findSingle may not be compound index safe. this may need to change. see notes in
+ * findSingle code.
+ * @return the record location of the first match
+ */
+ DiskLoc findSingle( const IndexDetails &indexdetails , const DiskLoc& thisLoc, const BSONObj& key ) const;
+
+ /**
+ * Advance to next or previous key in the index.
+ * @param direction to advance.
+ */
+ DiskLoc advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) const;
+
+ /** Advance in specified direction to the specified key */
+ void advanceTo(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction ) const;
+
+ /** Locate a key with fields comprised of a combination of keyBegin fields and keyEnd fields. */
+ static void customLocate(DiskLoc &locInOut, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, pair< DiskLoc, int > &bestParent ) ;
+
+ /** @return head of the btree by traversing from current bucket. */
+ const DiskLoc getHead(const DiskLoc& thisLoc) const;
+
+ /** get tree shape */
+ void shape(stringstream&) const;
+
+ static void a_test(IndexDetails&);
+
+ static int getKeyMax();
+
+ protected:
+ /**
+ * Preconditions:
+ * - 0 <= firstIndex <= n
+ * - -1 <= lastIndex <= n ( -1 is equivalent to n )
+ * Postconditions:
+ * - Any children at indexes firstIndex through lastIndex (inclusive)
+ * will have their parent pointers set to thisLoc.
+ */
+ void fixParentPtrs(const DiskLoc thisLoc, int firstIndex = 0, int lastIndex = -1) const;
+
+ /**
+ * Preconditions:
+ * - thisLoc is not the btree head.
+ * - n == 0 is ok
+ * Postconditions:
+ * - All cursors pointing to this bucket will be updated.
+ * - This bucket's parent's child pointer is set to null.
+ * - This bucket is deallocated from pdfile storage.
+ * - 'this' and thisLoc are invalidated.
+ */
+ void delBucket(const DiskLoc thisLoc, const IndexDetails&);
+
+ /**
+ * Preconditions: 0 <= p < n
+ * Postconditions:
+ * - The key at index p is removed from the btree.
+ * - 'this' and thisLoc may be invalidated.
+ * - The tree head may change.
+ */
+ void delKeyAtPos(const DiskLoc thisLoc, IndexDetails& id, int p, const Ordering &order);
+
+ /**
+ * Preconditions:
+ * - n == 0 is ok
+ * Postconditions:
+ * - If thisLoc is head, or if its body has at least lowWaterMark bytes,
+ * return false and do nothing.
+ * - Otherwise, if thisLoc has left or right neighbors, either balance
+ * or merge with them and return true. Also, 'this' and thisLoc may
+ * be invalidated and the tree head may change.
+ */
+ bool mayBalanceWithNeighbors(const DiskLoc thisLoc, IndexDetails &id, const Ordering &order) const;
+
+ /**
+ * Preconditions:
+ * - 0 <= leftIndex < n
+ * - The child at leftIndex or the child at leftIndex + 1 contains
+ * fewer than lowWaterMark bytes.
+ * Postconditions:
+ * - If the child bucket at leftIndex can merge with the child index
+ * at leftIndex + 1, do nothing and return false.
+ * - Otherwise, balance keys between the leftIndex child and the
+ * leftIndex + 1 child, return true, and possibly change the tree head.
+ */
+ bool tryBalanceChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order ) const;
+
+ /**
+ * Preconditions:
+ * - All preconditions of tryBalanceChildren.
+ * - The leftIndex child and leftIndex + 1 child cannot be merged.
+ * Postconditions:
+ * - Keys are moved between the leftIndex child and the leftIndex + 1
+ * child such that neither child has fewer than lowWaterMark bytes.
+ * The tree head may change.
+ */
+ void doBalanceChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order );
+
+ /**
+ * Preconditions:
+ * - All preconditions of doBalanceChildren
+ * - The leftIndex and leftIndex + 1 children are packed.
+ * - The leftIndex + 1 child has fewer than lowWaterMark bytes.
+ * - split returned by rebalancedSeparatorPos()
+ * Postconditions:
+ * - The key in lchild at index split is set as thisLoc's key at index
+ * leftIndex, which may trigger a split and change the tree head.
+ * The previous key in thisLoc at index leftIndex and all keys with
+ * indexes greater than split in lchild are moved to rchild.
+ */
+ void doBalanceLeftToRight( const DiskLoc thisLoc, int leftIndex, int split,
+ BtreeBucket<V> *l, const DiskLoc lchild,
+ BtreeBucket<V> *r, const DiskLoc rchild,
+ IndexDetails &id, const Ordering &order );
+ /**
+ * Preconditions:
+ * - All preconditions of doBalanceChildren
+ * - The leftIndex and leftIndex + 1 children are packed.
+ * - The leftIndex child has fewer than lowWaterMark bytes.
+ * - split returned by rebalancedSeparatorPos()
+ * Postconditions:
+ * - The key in rchild at index split - l->n - 1 is set as thisLoc's key
+ * at index leftIndex, which may trigger a split and change the tree
+ * head. The previous key in thisLoc at index leftIndex and all keys
+ * with indexes less than split - l->n - 1 in rchild are moved to
+ * lchild.
+ */
+ void doBalanceRightToLeft( const DiskLoc thisLoc, int leftIndex, int split,
+ BtreeBucket<V> *l, const DiskLoc lchild,
+ BtreeBucket<V> *r, const DiskLoc rchild,
+ IndexDetails &id, const Ordering &order );
+
+ /**
+ * Preconditions:
+ * - 0 <= leftIndex < n
+ * - this->canMergeChildren( thisLoc, leftIndex ) == true
+ * Postconditions:
+ * - All of the above mentioned keys will be placed in the left child.
+ * - The tree may be updated recursively, resulting in 'this' and
+ * thisLoc being invalidated and the tree head being changed.
+ */
+ void doMergeChildren( const DiskLoc thisLoc, int leftIndex, IndexDetails &id, const Ordering &order);
+
+ /**
+ * Preconditions:
+ * - n == 0
+ * - !nextChild.isNull()
+ * Postconditions:
+ * - 'this' and thisLoc are deallocated (and invalidated), any cursors
+ * to them are updated, and the tree head may change.
+ * - nextChild replaces thisLoc in the btree structure.
+ */
+ void replaceWithNextChild( const DiskLoc thisLoc, IndexDetails &id );
+
+ /**
+ * @return true iff the leftIndex and leftIndex + 1 children both exist,
+ * and if their body sizes when packed and the thisLoc key at leftIndex
+ * would fit in a single bucket body.
+ */
+ bool canMergeChildren( const DiskLoc &thisLoc, int leftIndex ) const;
+
+ /**
+ * Preconditions:
+ * - leftIndex and leftIndex + 1 children are packed
+ * - leftIndex or leftIndex + 1 child is below lowWaterMark
+ * @return index of the rebalanced separator; the index value is
+ * determined as if we had a bucket with body
+ * <left bucket keys array>.push( <old separator> ).concat( <right bucket keys array> )
+ * and called splitPos( 0 ) on it.
+ */
+ int rebalancedSeparatorPos( const DiskLoc &thisLoc, int leftIndex ) const;
+
+ /**
+ * Preconditions: thisLoc has a parent
+ * @return parent's index of thisLoc.
+ */
+ int indexInParent( const DiskLoc &thisLoc ) const;
+
+ public:
+ Key keyAt(int i) const {
+ if( i >= this->n )
+ return Key();
+ return Key(this->data + k(i).keyDataOfs());
+ }
+ protected:
+
+ /**
+ * Preconditions:
+ * - This bucket is packed.
+ * - Cannot add a key of size KeyMax to this bucket.
+ * - 0 <= keypos <= n is the position of a new key that will be inserted
+ * - lchild is equal to the existing child at index keypos.
+ * Postconditions:
+ * - The thisLoc bucket is split into two packed buckets, possibly
+ * invalidating the initial position of keypos, with a split key
+ * promoted to the parent. The new key key/recordLoc will be inserted
+ * into one of the split buckets, and lchild/rchild set appropriately.
+ * Splitting may occur recursively, possibly changing the tree head.
+ */
+ void split(const DiskLoc thisLoc, int keypos,
+ const DiskLoc recordLoc, const Key& key,
+ const Ordering& order, const DiskLoc lchild, const DiskLoc rchild, IndexDetails& idx);
+
+ /**
+ * Preconditions:
+ * - 0 <= keypos <= n
+ * - If key / recordLoc are inserted at position keypos, with provided
+ * lchild and rchild, the btree ordering requirements will be
+ * maintained.
+ * - lchild is equal to the existing child at index keypos.
+ * - n == 0 is ok.
+ * Postconditions:
+ * - The key / recordLoc are inserted at position keypos, and the
+ * bucket is split if necessary, which may change the tree head.
+ * - The bucket may be packed or split, invalidating the specified value
+ * of keypos.
+ * This function will always modify thisLoc, but it's marked const because
+ * it commonly relies on the specialized writ]e intent mechanism of basicInsert().
+ */
+ void insertHere(const DiskLoc thisLoc, int keypos,
+ const DiskLoc recordLoc, const Key& key, const Ordering &order,
+ const DiskLoc lchild, const DiskLoc rchild, IndexDetails &idx) const;
+
+ /** bt_insert() is basically just a wrapper around this. */
+ int _insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
+ const Key& key, const Ordering &order, bool dupsAllowed,
+ const DiskLoc lChild, const DiskLoc rChild, IndexDetails &idx) const;
+
+ void insertStepOne(DiskLoc thisLoc, Continuation<V>& c, bool dupsAllowed) const;
+
+ bool find(const IndexDetails& idx, const Key& key, const DiskLoc &recordLoc, const Ordering &order, int& pos, bool assertIfDup) const;
+ static bool customFind( int l, int h, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction, DiskLoc &thisLoc, int &keyOfs, pair< DiskLoc, int > &bestParent ) ;
+ static void findLargestKey(const DiskLoc& thisLoc, DiskLoc& largestLoc, int& largestKey);
+ static int customBSONCmp( const BSONObj &l, const BSONObj &rBegin, int rBeginLen, bool rSup, const vector< const BSONElement * > &rEnd, const vector< bool > &rEndInclusive, const Ordering &o, int direction );
+
+ /** If child is non null, set its parent to thisLoc */
+ static void fix(const DiskLoc thisLoc, const DiskLoc child);
+
+ /**
+ * Preconditions:
+ * - 0 <= keypos < n
+ * - If the specified key and recordLoc are placed in keypos of thisLoc,
+ * and lchild and rchild are set, the btree ordering properties will
+ * be maintained.
+ * - rchild == childForPos( keypos + 1 )
+ * - childForPos( keypos ) is referenced elsewhere if nonnull.
+ * Postconditions:
+ * - The key at keypos will be replaced with the specified key and
+ * lchild, potentially splitting this bucket and changing the tree
+ * head.
+ * - childForPos( keypos ) will be orphaned.
+ */
+ void setInternalKey( const DiskLoc thisLoc, int keypos,
+ const DiskLoc recordLoc, const Key &key, const Ordering &order,
+ const DiskLoc lchild, const DiskLoc rchild, IndexDetails &idx);
+
+ /**
+ * Preconditions:
+ * - 0 <= keypos < n
+ * - The keypos or keypos+1 indexed child is non null.
+ * Postconditions:
+ * - The specified key is deleted by replacing it with another key if
+ * possible. This replacement may cause a split and change the tree
+ * head. The replacement key will be deleted from its original
+ * location, potentially causing merges and splits that may invalidate
+ * 'this' and thisLoc and change the tree head.
+ * - If the key cannot be replaced, it will be marked as unused. This
+ * is only expected in legacy btrees.
+ */
+ void deleteInternalKey( const DiskLoc thisLoc, int keypos, IndexDetails &id, const Ordering &order );
+ public:
+ /** simply builds and returns a dup key error message string */
+ static string dupKeyError( const IndexDetails& idx , const Key& key );
+ };
+#pragma pack()
+
+ class FieldRangeVector;
+ class FieldRangeVectorIterator;
+
+ class BtreeCursor : public Cursor {
+ protected:
+ BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails&, const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int direction );
+ BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction );
+ public:
+ virtual ~BtreeCursor();
+ /** makes an appropriate subclass depending on the index version */
+ static BtreeCursor* make( NamespaceDetails *_d, const IndexDetails&, const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int direction );
+ static BtreeCursor* make( NamespaceDetails *_d, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction );
+ static BtreeCursor* make( NamespaceDetails *_d, int _idxNo, const IndexDetails&, const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int direction );
+ static BtreeCursor* make( NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction );
+
+ virtual bool ok() { return !bucket.isNull(); }
+ virtual bool advance();
+ virtual void noteLocation(); // updates keyAtKeyOfs...
+ virtual void checkLocation() = 0;
+ virtual bool supportGetMore() { return true; }
+ virtual bool supportYields() { return true; }
+
+ /**
+ * used for multikey index traversal to avoid sending back dups. see Matcher::matches().
+ * if a multikey index traversal:
+ * if loc has already been sent, returns true.
+ * otherwise, marks loc as sent.
+ * @return false if the loc has not been seen
+ */
+ virtual bool getsetdup(DiskLoc loc) {
+ if( _multikey ) {
+ pair<set<DiskLoc>::iterator, bool> p = _dups.insert(loc);
+ return !p.second;
+ }
+ return false;
+ }
+
+ virtual bool modifiedKeys() const { return _multikey; }
+ virtual bool isMultiKey() const { return _multikey; }
+
+ /*const _KeyNode& _currKeyNode() const {
+ assert( !bucket.isNull() );
+ const _KeyNode& kn = keyNode(keyOfs);
+ assert( kn.isUsed() );
+ return kn;
+ }*/
+
+ /** returns BSONObj() if ofs is out of range */
+ virtual BSONObj keyAt(int ofs) const = 0;
+
+ virtual BSONObj currKey() const = 0;
+ virtual BSONObj indexKeyPattern() { return indexDetails.keyPattern(); }
+
+ virtual void aboutToDeleteBucket(const DiskLoc& b) {
+ if ( bucket == b )
+ keyOfs = -1;
+ }
+
+ virtual DiskLoc currLoc() = 0; // { return !bucket.isNull() ? _currKeyNode().recordLoc : DiskLoc(); }
+ virtual DiskLoc refLoc() { return currLoc(); }
+ virtual Record* _current() { return currLoc().rec(); }
+ virtual BSONObj current() { return BSONObj(_current()); }
+ virtual string toString();
+
+ BSONObj prettyKey( const BSONObj &key ) const {
+ return key.replaceFieldNames( indexDetails.keyPattern() ).clientReadable();
+ }
+
+ virtual BSONObj prettyIndexBounds() const;
+
+ virtual CoveredIndexMatcher *matcher() const { return _matcher.get(); }
+ virtual shared_ptr< CoveredIndexMatcher > matcherPtr() const { return _matcher; }
+
+ virtual void setMatcher( shared_ptr< CoveredIndexMatcher > matcher ) { _matcher = matcher; }
+
+ virtual long long nscanned() { return _nscanned; }
+
+ /** for debugging only */
+ const DiskLoc getBucket() const { return bucket; }
+ int getKeyOfs() const { return keyOfs; }
+
+ // just for unit tests
+ virtual bool curKeyHasChild() = 0;
+
+ protected:
+ /**
+ * Our btrees may (rarely) have "unused" keys when items are deleted.
+ * Skip past them.
+ */
+ virtual bool skipUnusedKeys() = 0;
+
+ bool skipOutOfRangeKeysAndCheckEnd();
+ void skipAndCheck();
+ void checkEnd();
+
+ /** selective audits on construction */
+ void audit();
+
+ virtual void _audit() = 0;
+ virtual DiskLoc _locate(const BSONObj& key, const DiskLoc& loc) = 0;
+ virtual DiskLoc _advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) = 0;
+ virtual void _advanceTo(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction ) = 0;
+
+ /** set initial bucket */
+ void initWithoutIndependentFieldRanges();
+
+ /** if afterKey is true, we want the first key with values of the keyBegin fields greater than keyBegin */
+ void advanceTo( const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive );
+
+ set<DiskLoc> _dups;
+ NamespaceDetails * const d;
+ const int idxNo;
+ BSONObj startKey;
+ BSONObj endKey;
+ bool _endKeyInclusive;
+ bool _multikey; // this must be updated every getmore batch in case someone added a multikey
+ const IndexDetails& indexDetails;
+ const BSONObj _order;
+ const Ordering _ordering;
+ DiskLoc bucket;
+ int keyOfs;
+ const int _direction; // 1=fwd,-1=reverse
+ BSONObj keyAtKeyOfs; // so we can tell if things moved around on us between the query and the getMore call
+ DiskLoc locAtKeyOfs;
+ const shared_ptr< FieldRangeVector > _bounds;
+ auto_ptr< FieldRangeVectorIterator > _boundsIterator;
+ shared_ptr< CoveredIndexMatcher > _matcher;
+ bool _independentFieldRanges;
+ long long _nscanned;
+ };
+
+ template< class V >
+ struct Continuation {
+ //Continuation(const typename V::Key & k);
+ Continuation(DiskLoc thisLoc, DiskLoc _recordLoc, const BSONObj &_key,
+ Ordering _order, IndexDetails& _idx) :
+ bLoc(thisLoc), recordLoc(_recordLoc), key(_key), order(_order), idx(_idx) {
+ op = Nothing;
+ }
+
+ DiskLoc bLoc;
+ DiskLoc recordLoc;
+ typename V::KeyOwned key;
+ const Ordering order;
+ IndexDetails& idx;
+ enum Op { Nothing, SetUsed, InsertHere } op;
+
+ int pos;
+ const BtreeBucket<V> *b;
+
+ void stepTwo() {
+ if( op == Nothing )
+ return;
+ else if( op == SetUsed ) {
+ const typename V::_KeyNode& kn = b->k(pos);
+ kn.writing().setUsed();
+ }
+ else {
+ b->insertHere(bLoc, pos, recordLoc, key, order, DiskLoc(), DiskLoc(), idx);
+ }
+ }
+ };
+
+ /** Renames the index namespace for this btree's index. */
+ void renameIndexNamespace(const char *oldNs, const char *newNs);
+
+ /**
+ * give us a writable version of the btree bucket (declares write intent).
+ * note it is likely more efficient to declare write intent on something smaller when you can.
+ */
+ template< class V >
+ BtreeBucket<V> * DiskLoc::btreemod() const {
+ assert( _a != -1 );
+ BtreeBucket<V> *b = const_cast< BtreeBucket<V> * >( btree<V>() );
+ return static_cast< BtreeBucket<V>* >( getDur().writingPtr( b, V::BucketSize ) );
+ }
+
+ template< class V >
+ BucketBasics<V>::KeyNode::KeyNode(const BucketBasics<V>& bb, const _KeyNode &k) :
+ prevChildBucket(k.prevChildBucket),
+ recordLoc(k.recordLoc), key(bb.data+k.keyDataOfs())
+ { }
+
+} // namespace mongo;
diff --git a/src/mongo/db/btreebuilder.cpp b/src/mongo/db/btreebuilder.cpp
new file mode 100644
index 00000000000..0ec587a1958
--- /dev/null
+++ b/src/mongo/db/btreebuilder.cpp
@@ -0,0 +1,184 @@
+// btreebuilder.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "db.h"
+#include "btree.h"
+#include "pdfile.h"
+#include "json.h"
+#include "clientcursor.h"
+#include "client.h"
+#include "dbhelpers.h"
+#include "curop-inl.h"
+#include "stats/counters.h"
+#include "dur_commitjob.h"
+#include "btreebuilder.h"
+
+namespace mongo {
+
+ /* --- BtreeBuilder --- */
+
+ template<class V>
+ BtreeBuilder<V>::BtreeBuilder(bool _dupsAllowed, IndexDetails& _idx) :
+ dupsAllowed(_dupsAllowed),
+ idx(_idx),
+ n(0),
+ order( idx.keyPattern() ),
+ ordering( Ordering::make(idx.keyPattern()) ) {
+ first = cur = BtreeBucket<V>::addBucket(idx);
+ b = cur.btreemod<V>();
+ committed = false;
+ }
+
+ template<class V>
+ void BtreeBuilder<V>::newBucket() {
+ DiskLoc L = BtreeBucket<V>::addBucket(idx);
+ b->setTempNext(L);
+ cur = L;
+ b = cur.btreemod<V>();
+ }
+
+ template<class V>
+ void BtreeBuilder<V>::mayCommitProgressDurably() {
+ if ( getDur().commitIfNeeded() ) {
+ b = cur.btreemod<V>();
+ }
+ }
+
+ template<class V>
+ void BtreeBuilder<V>::addKey(BSONObj& _key, DiskLoc loc) {
+
+ auto_ptr< KeyOwned > key( new KeyOwned(_key) );
+ if ( key->dataSize() > BtreeBucket<V>::KeyMax ) {
+ problem() << "Btree::insert: key too large to index, skipping " << idx.indexNamespace()
+ << ' ' << key->dataSize() << ' ' << key->toString() << endl;
+ return;
+ }
+
+ if( !dupsAllowed ) {
+ if( n > 0 ) {
+ int cmp = keyLast->woCompare(*key, ordering);
+ massert( 10288 , "bad key order in BtreeBuilder - server internal error", cmp <= 0 );
+ if( cmp == 0 ) {
+ //if( !dupsAllowed )
+ uasserted( ASSERT_ID_DUPKEY , BtreeBucket<V>::dupKeyError( idx , *keyLast ) );
+ }
+ }
+ }
+
+ if ( ! b->_pushBack(loc, *key, ordering, DiskLoc()) ) {
+ // bucket was full
+ newBucket();
+ b->pushBack(loc, *key, ordering, DiskLoc());
+ }
+ keyLast = key;
+ n++;
+ mayCommitProgressDurably();
+ }
+
+ template<class V>
+ void BtreeBuilder<V>::buildNextLevel(DiskLoc loc) {
+ int levels = 1;
+ while( 1 ) {
+ if( loc.btree<V>()->tempNext().isNull() ) {
+ // only 1 bucket at this level. we are done.
+ getDur().writingDiskLoc(idx.head) = loc;
+ break;
+ }
+ levels++;
+
+ DiskLoc upLoc = BtreeBucket<V>::addBucket(idx);
+ DiskLoc upStart = upLoc;
+ BtreeBucket<V> *up = upLoc.btreemod<V>();
+
+ DiskLoc xloc = loc;
+ while( !xloc.isNull() ) {
+ if ( getDur().commitIfNeeded() ) {
+ b = cur.btreemod<V>();
+ up = upLoc.btreemod<V>();
+ }
+
+ BtreeBucket<V> *x = xloc.btreemod<V>();
+ Key k;
+ DiskLoc r;
+ x->popBack(r,k);
+ bool keepX = ( x->n != 0 );
+ DiskLoc keepLoc = keepX ? xloc : x->nextChild;
+
+ if ( ! up->_pushBack(r, k, ordering, keepLoc) ) {
+ // current bucket full
+ DiskLoc n = BtreeBucket<V>::addBucket(idx);
+ up->setTempNext(n);
+ upLoc = n;
+ up = upLoc.btreemod<V>();
+ up->pushBack(r, k, ordering, keepLoc);
+ }
+
+ DiskLoc nextLoc = x->tempNext(); // get next in chain at current level
+ if ( keepX ) {
+ x->parent = upLoc;
+ }
+ else {
+ if ( !x->nextChild.isNull() ) {
+ DiskLoc ll = x->nextChild;
+ ll.btreemod<V>()->parent = upLoc;
+ //(x->nextChild.btreemod<V>())->parent = upLoc;
+ }
+ x->deallocBucket( xloc, idx );
+ }
+ xloc = nextLoc;
+ }
+
+ loc = upStart;
+ mayCommitProgressDurably();
+ }
+
+ if( levels > 1 )
+ log(2) << "btree levels: " << levels << endl;
+ }
+
+ /** when all addKeys are done, we then build the higher levels of the tree */
+ template<class V>
+ void BtreeBuilder<V>::commit() {
+ buildNextLevel(first);
+ committed = true;
+ }
+
+ template<class V>
+ BtreeBuilder<V>::~BtreeBuilder() {
+ DESTRUCTOR_GUARD(
+ if( !committed ) {
+ log(2) << "Rolling back partially built index space" << endl;
+ DiskLoc x = first;
+ while( !x.isNull() ) {
+ DiskLoc next = x.btree<V>()->tempNext();
+ string ns = idx.indexNamespace();
+ theDataFileMgr._deleteRecord(nsdetails(ns.c_str()), ns.c_str(), x.rec(), x);
+ x = next;
+ getDur().commitIfNeeded();
+ }
+ assert( idx.head.isNull() );
+ log(2) << "done rollback" << endl;
+ }
+ )
+ }
+
+ template class BtreeBuilder<V0>;
+ template class BtreeBuilder<V1>;
+
+}
diff --git a/src/mongo/db/btreebuilder.h b/src/mongo/db/btreebuilder.h
new file mode 100644
index 00000000000..6de55d89299
--- /dev/null
+++ b/src/mongo/db/btreebuilder.h
@@ -0,0 +1,53 @@
+#pragma once
+
+#include "btree.h"
+
+namespace mongo {
+
+ /**
+ * build btree from the bottom up
+ */
+ template< class V >
+ class BtreeBuilder {
+ typedef typename V::KeyOwned KeyOwned;
+ typedef typename V::Key Key;
+
+ bool dupsAllowed;
+ IndexDetails& idx;
+ /** Number of keys added to btree. */
+ unsigned long long n;
+ /** Last key passed to addKey(). */
+ auto_ptr< typename V::KeyOwned > keyLast;
+ BSONObj order;
+ Ordering ordering;
+ /** true iff commit() completed successfully. */
+ bool committed;
+
+ DiskLoc cur, first;
+ BtreeBucket<V> *b;
+
+ void newBucket();
+ void buildNextLevel(DiskLoc);
+ void mayCommitProgressDurably();
+
+ public:
+ ~BtreeBuilder();
+
+ BtreeBuilder(bool _dupsAllowed, IndexDetails& _idx);
+
+ /**
+ * Preconditions: 'key' is > or >= last key passed to this function (depends on _dupsAllowed)
+ * Postconditions: 'key' is added to intermediate storage.
+ */
+ void addKey(BSONObj& key, DiskLoc loc);
+
+ /**
+ * commit work. if not called, destructor will clean up partially completed work
+ * (in case exception has happened).
+ */
+ void commit();
+
+ unsigned long long getn() { return n; }
+ };
+
+}
diff --git a/src/mongo/db/btreecursor.cpp b/src/mongo/db/btreecursor.cpp
new file mode 100644
index 00000000000..7ddd4874ef6
--- /dev/null
+++ b/src/mongo/db/btreecursor.cpp
@@ -0,0 +1,457 @@
+// btreecursor.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "btree.h"
+#include "pdfile.h"
+#include "jsobj.h"
+#include "curop-inl.h"
+#include "queryutil.h"
+
+namespace mongo {
+
+ template< class V >
+ class BtreeCursorImpl : public BtreeCursor {
+ public:
+ typedef typename BucketBasics<V>::KeyNode KeyNode;
+ typedef typename V::Key Key;
+ typedef typename V::_KeyNode _KeyNode;
+
+ BtreeCursorImpl(NamespaceDetails *a, int b, const IndexDetails& c, const BSONObj &d, const BSONObj &e, bool f, int g) :
+ BtreeCursor(a,b,c,d,e,f,g) { }
+ BtreeCursorImpl(NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction ) :
+ BtreeCursor(_d,_idxNo,_id,_bounds,_direction )
+ {
+ pair< DiskLoc, int > noBestParent;
+ indexDetails.head.btree<V>()->customLocate( bucket, keyOfs, startKey, 0, false, _boundsIterator->cmp(), _boundsIterator->inc(), _ordering, _direction, noBestParent );
+ skipAndCheck();
+ dassert( _dups.size() == 0 );
+ }
+
+ virtual DiskLoc currLoc() {
+ if( bucket.isNull() ) return DiskLoc();
+ return currKeyNode().recordLoc;
+ }
+
+ virtual BSONObj keyAt(int ofs) const {
+ assert( !bucket.isNull() );
+ const BtreeBucket<V> *b = bucket.btree<V>();
+ int n = b->getN();
+ if( n == 0xffff ) {
+ throw UserException(15850, "keyAt bucket deleted");
+ }
+ dassert( n >= 0 && n < 10000 );
+ return ofs >= n ? BSONObj() : b->keyNode(ofs).key.toBson();
+ }
+
+ virtual BSONObj currKey() const {
+ assert( !bucket.isNull() );
+ return bucket.btree<V>()->keyNode(keyOfs).key.toBson();
+ }
+
+ virtual bool curKeyHasChild() {
+ return !currKeyNode().prevChildBucket.isNull();
+ }
+
+ bool skipUnusedKeys() {
+ int u = 0;
+ while ( 1 ) {
+ if ( !ok() )
+ break;
+ const _KeyNode& kn = keyNode(keyOfs);
+ if ( kn.isUsed() )
+ break;
+ bucket = _advance(bucket, keyOfs, _direction, "skipUnusedKeys");
+ u++;
+ //don't include unused keys in nscanned
+ //++_nscanned;
+ }
+ if ( u > 10 )
+ OCCASIONALLY log() << "btree unused skipped:" << u << '\n';
+ return u;
+ }
+
+ /* Since the last noteLocation(), our key may have moved around, and that old cached
+ information may thus be stale and wrong (although often it is right). We check
+ that here; if we have moved, we have to search back for where we were at.
+
+ i.e., after operations on the index, the BtreeCursor's cached location info may
+ be invalid. This function ensures validity, so you should call it before using
+ the cursor if other writers have used the database since the last noteLocation
+ call.
+ */
+ void checkLocation() {
+ if ( eof() )
+ return;
+
+ _multikey = d->isMultikey(idxNo);
+
+ if ( keyOfs >= 0 ) {
+ assert( !keyAtKeyOfs.isEmpty() );
+
+ try {
+ // Note keyAt() returns an empty BSONObj if keyOfs is now out of range,
+ // which is possible as keys may have been deleted.
+ int x = 0;
+ while( 1 ) {
+ // if ( b->keyAt(keyOfs).woEqual(keyAtKeyOfs) &&
+ // b->k(keyOfs).recordLoc == locAtKeyOfs ) {
+ if ( keyAt(keyOfs).binaryEqual(keyAtKeyOfs) ) {
+ const _KeyNode& kn = keyNode(keyOfs);
+ if( kn.recordLoc == locAtKeyOfs ) {
+ if ( !kn.isUsed() ) {
+ // we were deleted but still exist as an unused
+ // marker key. advance.
+ skipUnusedKeys();
+ }
+ return;
+ }
+ }
+
+ // we check one key earlier too, in case a key was just deleted. this is
+ // important so that multi updates are reasonably fast.
+ if( keyOfs == 0 || x++ )
+ break;
+ keyOfs--;
+ }
+ }
+ catch(UserException& e) {
+ if( e.getCode() != 15850 )
+ throw;
+ // hack: fall through if bucket was just deleted. should only happen under deleteObjects()
+ DEV log() << "debug info: bucket was deleted" << endl;
+ }
+ }
+
+ /* normally we don't get to here. when we do, old position is no longer
+ valid and we must refind where we left off (which is expensive)
+ */
+
+ /* TODO: Switch to keep indexdetails and do idx.head! */
+ bucket = _locate(keyAtKeyOfs, locAtKeyOfs);
+ RARELY log() << "key seems to have moved in the index, refinding. " << bucket.toString() << endl;
+ if ( ! bucket.isNull() )
+ skipUnusedKeys();
+
+ }
+
+ protected:
+ virtual void _advanceTo(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction ) {
+ thisLoc.btree<V>()->advanceTo(thisLoc, keyOfs, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction);
+ }
+ virtual DiskLoc _advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) {
+ return thisLoc.btree<V>()->advance(thisLoc, keyOfs, direction, caller);
+ }
+ virtual void _audit() {
+ out() << "BtreeCursor(). dumping head bucket" << endl;
+ indexDetails.head.btree<V>()->dump();
+ }
+ virtual DiskLoc _locate(const BSONObj& key, const DiskLoc& loc) {
+ bool found;
+ return indexDetails.head.btree<V>()->
+ locate(indexDetails, indexDetails.head, key, _ordering, keyOfs, found, loc, _direction);
+ }
+
+ const _KeyNode& keyNode(int keyOfs) const {
+ return bucket.btree<V>()->k(keyOfs);
+ }
+
+ private:
+ const KeyNode currKeyNode() const {
+ assert( !bucket.isNull() );
+ const BtreeBucket<V> *b = bucket.btree<V>();
+ return b->keyNode(keyOfs);
+ }
+ };
+
+ template class BtreeCursorImpl<V0>;
+ template class BtreeCursorImpl<V1>;
+
+ /*
+ class BtreeCursorV1 : public BtreeCursor {
+ public:
+ typedef BucketBasics<V1>::KeyNode KeyNode;
+ typedef V1::Key Key;
+
+ BtreeCursorV1(NamespaceDetails *a, int b, const IndexDetails& c, const BSONObj &d, const BSONObj &e, bool f, int g) :
+ BtreeCursor(a,b,c,d,e,f,g) { }
+ BtreeCursorV1(NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction) :
+ BtreeCursor(_d,_idxNo,_id,_bounds,_direction)
+ {
+ pair< DiskLoc, int > noBestParent;
+ indexDetails.head.btree<V1>()->customLocate( bucket, keyOfs, startKey, 0, false, _boundsIterator->cmp(), _boundsIterator->inc(), _ordering, _direction, noBestParent );
+ skipAndCheck();
+ dassert( _dups.size() == 0 );
+ }
+
+ virtual DiskLoc currLoc() {
+ if( bucket.isNull() ) return DiskLoc();
+ return currKeyNode().recordLoc;
+ }
+
+ virtual BSONObj currKey() const {
+ assert( !bucket.isNull() );
+ return bucket.btree<V1>()->keyNode(keyOfs).key.toBson();
+ }
+
+ protected:
+ virtual void _advanceTo(DiskLoc &thisLoc, int &keyOfs, const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive, const Ordering &order, int direction ) {
+ thisLoc.btree<V1>()->advanceTo(thisLoc, keyOfs, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, order, direction);
+ }
+ virtual DiskLoc _advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) {
+ return thisLoc.btree<V1>()->advance(thisLoc, keyOfs, direction, caller);
+ }
+ virtual void _audit() {
+ out() << "BtreeCursor(). dumping head bucket" << endl;
+ indexDetails.head.btree<V1>()->dump();
+ }
+ virtual DiskLoc _locate(const BSONObj& key, const DiskLoc& loc);
+ virtual const _KeyNode& keyNode(int keyOfs) {
+ return bucket.btree<V1>()->k(keyOfs);
+ }
+
+ private:
+ const KeyNode currKeyNode() const {
+ assert( !bucket.isNull() );
+ const BtreeBucket<V1> *b = bucket.btree<V1>();
+ return b->keyNode(keyOfs);
+ }
+ };*/
+
+ BtreeCursor* BtreeCursor::make(
+ NamespaceDetails *_d, const IndexDetails& _id,
+ const shared_ptr< FieldRangeVector > &_bounds, int _direction )
+ {
+ return make( _d, _d->idxNo( (IndexDetails&) _id), _id, _bounds, _direction );
+ }
+
+ BtreeCursor* BtreeCursor::make(
+ NamespaceDetails *_d, const IndexDetails& _id,
+ const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int direction)
+ {
+ return make( _d, _d->idxNo( (IndexDetails&) _id), _id, startKey, endKey, endKeyInclusive, direction );
+ }
+
+
+ BtreeCursor* BtreeCursor::make(
+ NamespaceDetails *_d, int _idxNo, const IndexDetails& _id,
+ const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int direction)
+ {
+ int v = _id.version();
+ BtreeCursor *c = 0;
+ if( v == 1 ) {
+ c = new BtreeCursorImpl<V1>(_d,_idxNo,_id,startKey,endKey,endKeyInclusive,direction);
+ }
+ else if( v == 0 ) {
+ c = new BtreeCursorImpl<V0>(_d,_idxNo,_id,startKey,endKey,endKeyInclusive,direction);
+ }
+ else {
+ uasserted(14800, str::stream() << "unsupported index version " << v);
+ }
+ c->initWithoutIndependentFieldRanges();
+ dassert( c->_dups.size() == 0 );
+ return c;
+ }
+
+ BtreeCursor* BtreeCursor::make(
+ NamespaceDetails *_d, int _idxNo, const IndexDetails& _id,
+ const shared_ptr< FieldRangeVector > &_bounds, int _direction )
+ {
+ int v = _id.version();
+ if( v == 1 )
+ return new BtreeCursorImpl<V1>(_d,_idxNo,_id,_bounds,_direction);
+ if( v == 0 )
+ return new BtreeCursorImpl<V0>(_d,_idxNo,_id,_bounds,_direction);
+ uasserted(14801, str::stream() << "unsupported index version " << v);
+
+ // just check we are in sync with this method
+ dassert( IndexDetails::isASupportedIndexVersionNumber(v) );
+
+ return 0;
+ }
+
+ BtreeCursor::BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails &_id,
+ const BSONObj &_startKey, const BSONObj &_endKey, bool endKeyInclusive, int _direction ) :
+ d(_d), idxNo(_idxNo),
+ startKey( _startKey ),
+ endKey( _endKey ),
+ _endKeyInclusive( endKeyInclusive ),
+ _multikey( d->isMultikey( idxNo ) ),
+ indexDetails( _id ),
+ _order( _id.keyPattern() ),
+ _ordering( Ordering::make( _order ) ),
+ _direction( _direction ),
+ _independentFieldRanges( false ),
+ _nscanned( 0 ) {
+ audit();
+ }
+
+ BtreeCursor::BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails& _id, const shared_ptr< FieldRangeVector > &_bounds, int _direction )
+ :
+ d(_d), idxNo(_idxNo),
+ _endKeyInclusive( true ),
+ _multikey( d->isMultikey( idxNo ) ),
+ indexDetails( _id ),
+ _order( _id.keyPattern() ),
+ _ordering( Ordering::make( _order ) ),
+ _direction( _direction ),
+ _bounds( ( assert( _bounds.get() ), _bounds ) ),
+ _boundsIterator( new FieldRangeVectorIterator( *_bounds ) ),
+ _independentFieldRanges( true ),
+ _nscanned( 0 ) {
+ audit();
+ startKey = _bounds->startKey();
+ _boundsIterator->advance( startKey ); // handles initialization
+ _boundsIterator->prepDive();
+ bucket = indexDetails.head;
+ keyOfs = 0;
+ }
+
+ /** Properly destroy forward declared class members. */
+ BtreeCursor::~BtreeCursor() {}
+
+ void BtreeCursor::audit() {
+ dassert( d->idxNo((IndexDetails&) indexDetails) == idxNo );
+ }
+
+ void BtreeCursor::initWithoutIndependentFieldRanges() {
+ if ( indexDetails.getSpec().getType() ) {
+ startKey = indexDetails.getSpec().getType()->fixKey( startKey );
+ endKey = indexDetails.getSpec().getType()->fixKey( endKey );
+ }
+ bucket = _locate(startKey, _direction > 0 ? minDiskLoc : maxDiskLoc);
+ if ( ok() ) {
+ _nscanned = 1;
+ }
+ skipUnusedKeys();
+ checkEnd();
+ }
+
+ void BtreeCursor::skipAndCheck() {
+ long long startNscanned = _nscanned;
+ skipUnusedKeys();
+ while( 1 ) {
+ if ( !skipOutOfRangeKeysAndCheckEnd() ) {
+ break;
+ }
+ do {
+ if ( _nscanned > startNscanned + 20 ) {
+ skipUnusedKeys();
+ return;
+ }
+ } while( skipOutOfRangeKeysAndCheckEnd() );
+ if ( !skipUnusedKeys() ) {
+ break;
+ }
+ }
+ }
+
+ bool BtreeCursor::skipOutOfRangeKeysAndCheckEnd() {
+ if ( !ok() ) {
+ return false;
+ }
+ int ret = _boundsIterator->advance( currKey() );
+ if ( ret == -2 ) {
+ bucket = DiskLoc();
+ return false;
+ }
+ else if ( ret == -1 ) {
+ ++_nscanned;
+ return false;
+ }
+ ++_nscanned;
+ advanceTo( currKey(), ret, _boundsIterator->after(), _boundsIterator->cmp(), _boundsIterator->inc() );
+ return true;
+ }
+
+ // Return a value in the set {-1, 0, 1} to represent the sign of parameter i.
+ int sgn( int i ) {
+ if ( i == 0 )
+ return 0;
+ return i > 0 ? 1 : -1;
+ }
+
+ // Check if the current key is beyond endKey.
+ void BtreeCursor::checkEnd() {
+ if ( bucket.isNull() )
+ return;
+ if ( !endKey.isEmpty() ) {
+ int cmp = sgn( endKey.woCompare( currKey(), _order ) );
+ if ( ( cmp != 0 && cmp != _direction ) ||
+ ( cmp == 0 && !_endKeyInclusive ) )
+ bucket = DiskLoc();
+ }
+ }
+
+ void BtreeCursor::advanceTo( const BSONObj &keyBegin, int keyBeginLen, bool afterKey, const vector< const BSONElement * > &keyEnd, const vector< bool > &keyEndInclusive) {
+ _advanceTo( bucket, keyOfs, keyBegin, keyBeginLen, afterKey, keyEnd, keyEndInclusive, _ordering, _direction );
+ }
+
+ bool BtreeCursor::advance() {
+ killCurrentOp.checkForInterrupt();
+ if ( bucket.isNull() )
+ return false;
+
+ bucket = _advance(bucket, keyOfs, _direction, "BtreeCursor::advance");
+
+ if ( !_independentFieldRanges ) {
+ skipUnusedKeys();
+ checkEnd();
+ if ( ok() ) {
+ ++_nscanned;
+ }
+ }
+ else {
+ skipAndCheck();
+ }
+ return ok();
+ }
+
+ void BtreeCursor::noteLocation() {
+ if ( !eof() ) {
+ BSONObj o = currKey().getOwned();
+ keyAtKeyOfs = o;
+ locAtKeyOfs = currLoc();
+ }
+ }
+
+ string BtreeCursor::toString() {
+ string s = string("BtreeCursor ") + indexDetails.indexName();
+ if ( _direction < 0 ) s += " reverse";
+ if ( _bounds.get() && _bounds->size() > 1 ) s += " multi";
+ return s;
+ }
+
+ BSONObj BtreeCursor::prettyIndexBounds() const {
+ if ( !_independentFieldRanges ) {
+ return BSON( "start" << prettyKey( startKey ) << "end" << prettyKey( endKey ) );
+ }
+ else {
+ return _bounds->obj();
+ }
+ }
+
+ /* ----------------------------------------------------------------------------- */
+
+ struct BtreeCursorUnitTest {
+ BtreeCursorUnitTest() {
+ assert( minDiskLoc.compare(maxDiskLoc) < 0 );
+ }
+ } btut;
+
+} // namespace mongo
diff --git a/src/mongo/db/cap.cpp b/src/mongo/db/cap.cpp
new file mode 100644
index 00000000000..a8be2383115
--- /dev/null
+++ b/src/mongo/db/cap.cpp
@@ -0,0 +1,457 @@
+// @file cap.cpp capped collection related
+// the "old" version (<= v1.6)
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "pdfile.h"
+#include "db.h"
+#include "../util/mmap.h"
+#include "../util/hashtab.h"
+#include "../scripting/engine.h"
+#include "btree.h"
+#include <algorithm>
+#include <list>
+#include "json.h"
+#include "clientcursor.h"
+
+/*
+ capped collection layout
+
+ d's below won't exist if things align perfectly:
+
+ extent1 -> extent2 -> extent3
+ ------------------- ----------------------- ---------------------
+ d r r r r r r r r d d r r r r d r r r r r d d r r r r r r r r r d
+ ^ ^
+ oldest newest
+
+ ^cappedFirstDeletedInCurExtent()
+ ^cappedLastDelRecLastExtent()
+ ^cappedListOfAllDeletedRecords()
+*/
+
+
+namespace mongo {
+
+ /* combine adjacent deleted records *for the current extent* of the capped collection
+
+ this is O(n^2) but we call it for capped tables where typically n==1 or 2!
+ (or 3...there will be a little unused sliver at the end of the extent.)
+ */
+ void NamespaceDetails::compact() {
+ assert(capped);
+
+ list<DiskLoc> drecs;
+
+ // Pull out capExtent's DRs from deletedList
+ DiskLoc i = cappedFirstDeletedInCurExtent();
+ for (; !i.isNull() && inCapExtent( i ); i = i.drec()->nextDeleted )
+ drecs.push_back( i );
+
+ getDur().writingDiskLoc( cappedFirstDeletedInCurExtent() ) = i;
+
+ // This is the O(n^2) part.
+ drecs.sort();
+
+ list<DiskLoc>::iterator j = drecs.begin();
+ assert( j != drecs.end() );
+ DiskLoc a = *j;
+ while ( 1 ) {
+ j++;
+ if ( j == drecs.end() ) {
+ DEBUGGING out() << "TEMP: compact adddelrec\n";
+ addDeletedRec(a.drec(), a);
+ break;
+ }
+ DiskLoc b = *j;
+ while ( a.a() == b.a() && a.getOfs() + a.drec()->lengthWithHeaders == b.getOfs() ) {
+ // a & b are adjacent. merge.
+ getDur().writingInt( a.drec()->lengthWithHeaders ) += b.drec()->lengthWithHeaders;
+ j++;
+ if ( j == drecs.end() ) {
+ DEBUGGING out() << "temp: compact adddelrec2\n";
+ addDeletedRec(a.drec(), a);
+ return;
+ }
+ b = *j;
+ }
+ DEBUGGING out() << "temp: compact adddelrec3\n";
+ addDeletedRec(a.drec(), a);
+ a = b;
+ }
+ }
+
+ DiskLoc &NamespaceDetails::cappedFirstDeletedInCurExtent() {
+ if ( cappedLastDelRecLastExtent().isNull() )
+ return cappedListOfAllDeletedRecords();
+ else
+ return cappedLastDelRecLastExtent().drec()->nextDeleted;
+ }
+
+ void NamespaceDetails::cappedCheckMigrate() {
+ // migrate old NamespaceDetails format
+ assert( capped );
+ if ( capExtent.a() == 0 && capExtent.getOfs() == 0 ) {
+ //capFirstNewRecord = DiskLoc();
+ capFirstNewRecord.writing().setInvalid();
+ // put all the DeletedRecords in cappedListOfAllDeletedRecords()
+ for ( int i = 1; i < Buckets; ++i ) {
+ DiskLoc first = deletedList[ i ];
+ if ( first.isNull() )
+ continue;
+ DiskLoc last = first;
+ for (; !last.drec()->nextDeleted.isNull(); last = last.drec()->nextDeleted );
+ last.drec()->nextDeleted.writing() = cappedListOfAllDeletedRecords();
+ cappedListOfAllDeletedRecords().writing() = first;
+ deletedList[i].writing() = DiskLoc();
+ }
+ // NOTE cappedLastDelRecLastExtent() set to DiskLoc() in above
+
+ // Last, in case we're killed before getting here
+ capExtent.writing() = firstExtent;
+ }
+ }
+
+ bool NamespaceDetails::inCapExtent( const DiskLoc &dl ) const {
+ assert( !dl.isNull() );
+ // We could have a rec or drec, doesn't matter.
+ bool res = dl.drec()->myExtentLoc(dl) == capExtent;
+ DEV {
+ // old implementation. this check is temp to test works the same. new impl should be a little faster.
+ assert( res == (dl.drec()->myExtent( dl ) == capExtent.ext()) );
+ }
+ return res;
+ }
+
+ bool NamespaceDetails::nextIsInCapExtent( const DiskLoc &dl ) const {
+ assert( !dl.isNull() );
+ DiskLoc next = dl.drec()->nextDeleted;
+ if ( next.isNull() )
+ return false;
+ return inCapExtent( next );
+ }
+
+ void NamespaceDetails::advanceCapExtent( const char *ns ) {
+ // We want cappedLastDelRecLastExtent() to be the last DeletedRecord of the prev cap extent
+ // (or DiskLoc() if new capExtent == firstExtent)
+ if ( capExtent == lastExtent )
+ getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = DiskLoc();
+ else {
+ DiskLoc i = cappedFirstDeletedInCurExtent();
+ for (; !i.isNull() && nextIsInCapExtent( i ); i = i.drec()->nextDeleted );
+ getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = i;
+ }
+
+ getDur().writingDiskLoc( capExtent ) = theCapExtent()->xnext.isNull() ? firstExtent : theCapExtent()->xnext;
+
+ /* this isn't true if a collection has been renamed...that is ok just used for diagnostics */
+ //dassert( theCapExtent()->ns == ns );
+
+ theCapExtent()->assertOk();
+ getDur().writingDiskLoc( capFirstNewRecord ) = DiskLoc();
+ }
+
+ DiskLoc NamespaceDetails::__capAlloc( int len ) {
+ DiskLoc prev = cappedLastDelRecLastExtent();
+ DiskLoc i = cappedFirstDeletedInCurExtent();
+ DiskLoc ret;
+ for (; !i.isNull() && inCapExtent( i ); prev = i, i = i.drec()->nextDeleted ) {
+ // We need to keep at least one DR per extent in cappedListOfAllDeletedRecords(),
+ // so make sure there's space to create a DR at the end.
+ if ( i.drec()->lengthWithHeaders >= len + 24 ) {
+ ret = i;
+ break;
+ }
+ }
+
+ /* unlink ourself from the deleted list */
+ if ( !ret.isNull() ) {
+ if ( prev.isNull() )
+ cappedListOfAllDeletedRecords().writing() = ret.drec()->nextDeleted;
+ else
+ prev.drec()->nextDeleted.writing() = ret.drec()->nextDeleted;
+ ret.drec()->nextDeleted.writing().setInvalid(); // defensive.
+ assert( ret.drec()->extentOfs < ret.getOfs() );
+ }
+
+ return ret;
+ }
+
+ DiskLoc NamespaceDetails::cappedAlloc(const char *ns, int len) {
+ // signal done allocating new extents.
+ if ( !cappedLastDelRecLastExtent().isValid() )
+ getDur().writingDiskLoc( cappedLastDelRecLastExtent() ) = DiskLoc();
+
+ assert( len < 400000000 );
+ int passes = 0;
+ int maxPasses = ( len / 30 ) + 2; // 30 is about the smallest entry that could go in the oplog
+ if ( maxPasses < 5000 ) {
+ // this is for bacwards safety since 5000 was the old value
+ maxPasses = 5000;
+ }
+ DiskLoc loc;
+
+ // delete records until we have room and the max # objects limit achieved.
+
+ /* this fails on a rename -- that is ok but must keep commented out */
+ //assert( theCapExtent()->ns == ns );
+
+ theCapExtent()->assertOk();
+ DiskLoc firstEmptyExtent;
+ while ( 1 ) {
+ if ( stats.nrecords < max ) {
+ loc = __capAlloc( len );
+ if ( !loc.isNull() )
+ break;
+ }
+
+ // If on first iteration through extents, don't delete anything.
+ if ( !capFirstNewRecord.isValid() ) {
+ advanceCapExtent( ns );
+
+ if ( capExtent != firstExtent )
+ capFirstNewRecord.writing().setInvalid();
+ // else signal done with first iteration through extents.
+ continue;
+ }
+
+ if ( !capFirstNewRecord.isNull() &&
+ theCapExtent()->firstRecord == capFirstNewRecord ) {
+ // We've deleted all records that were allocated on the previous
+ // iteration through this extent.
+ advanceCapExtent( ns );
+ continue;
+ }
+
+ if ( theCapExtent()->firstRecord.isNull() ) {
+ if ( firstEmptyExtent.isNull() )
+ firstEmptyExtent = capExtent;
+ advanceCapExtent( ns );
+ if ( firstEmptyExtent == capExtent ) {
+ maybeComplain( ns, len );
+ return DiskLoc();
+ }
+ continue;
+ }
+
+ DiskLoc fr = theCapExtent()->firstRecord;
+ theDataFileMgr.deleteRecord(ns, fr.rec(), fr, true); // ZZZZZZZZZZZZ
+ compact();
+ if( ++passes > maxPasses ) {
+ log() << "passes ns:" << ns << " len:" << len << " maxPasses: " << maxPasses << '\n';
+ log() << "passes max:" << max << " nrecords:" << stats.nrecords << " datasize: " << stats.datasize << endl;
+ massert( 10345 , "passes >= maxPasses in capped collection alloc", false );
+ }
+ }
+
+ // Remember first record allocated on this iteration through capExtent.
+ if ( capFirstNewRecord.isValid() && capFirstNewRecord.isNull() )
+ getDur().writingDiskLoc(capFirstNewRecord) = loc;
+
+ return loc;
+ }
+
+ void NamespaceDetails::dumpExtents() {
+ cout << "dumpExtents:" << endl;
+ for ( DiskLoc i = firstExtent; !i.isNull(); i = i.ext()->xnext ) {
+ Extent *e = i.ext();
+ stringstream ss;
+ e->dump(ss);
+ cout << ss.str() << endl;
+ }
+ }
+
+ void NamespaceDetails::cappedDumpDelInfo() {
+ cout << "dl[0]: " << deletedList[0].toString() << endl;
+ for( DiskLoc z = deletedList[0]; !z.isNull(); z = z.drec()->nextDeleted ) {
+ cout << " drec:" << z.toString() << " dreclen:" << hex << z.drec()->lengthWithHeaders <<
+ " ext:" << z.drec()->myExtent(z)->myLoc.toString() << endl;
+ }
+ cout << "dl[1]: " << deletedList[1].toString() << endl;
+ }
+
+ void NamespaceDetails::cappedTruncateLastDelUpdate() {
+ if ( capExtent == firstExtent ) {
+ // Only one extent of the collection is in use, so there
+ // is no deleted record in a previous extent, so nullify
+ // cappedLastDelRecLastExtent().
+ cappedLastDelRecLastExtent().writing() = DiskLoc();
+ }
+ else {
+ // Scan through all deleted records in the collection
+ // until the last deleted record for the extent prior
+ // to the new capExtent is found. Then set
+ // cappedLastDelRecLastExtent() to that deleted record.
+ DiskLoc i = cappedListOfAllDeletedRecords();
+ for( ;
+ !i.drec()->nextDeleted.isNull() &&
+ !inCapExtent( i.drec()->nextDeleted );
+ i = i.drec()->nextDeleted );
+ // In our capped storage model, every extent must have at least one
+ // deleted record. Here we check that 'i' is not the last deleted
+ // record. (We expect that there will be deleted records in the new
+ // capExtent as well.)
+ assert( !i.drec()->nextDeleted.isNull() );
+ cappedLastDelRecLastExtent().writing() = i;
+ }
+ }
+
+ void NamespaceDetails::cappedTruncateAfter(const char *ns, DiskLoc end, bool inclusive) {
+ DEV assert( this == nsdetails(ns) );
+ assert( cappedLastDelRecLastExtent().isValid() );
+
+ // We iteratively remove the newest document until the newest document
+ // is 'end', then we remove 'end' if requested.
+ bool foundLast = false;
+ while( 1 ) {
+ if ( foundLast ) {
+ // 'end' has been found and removed, so break.
+ break;
+ }
+ getDur().commitIfNeeded();
+ // 'curr' will point to the newest document in the collection.
+ DiskLoc curr = theCapExtent()->lastRecord;
+ assert( !curr.isNull() );
+ if ( curr == end ) {
+ if ( inclusive ) {
+ // 'end' has been found, so break next iteration.
+ foundLast = true;
+ }
+ else {
+ // 'end' has been found, so break.
+ break;
+ }
+ }
+
+ // TODO The algorithm used in this function cannot generate an
+ // empty collection, but we could call emptyCappedCollection() in
+ // this case instead of asserting.
+ uassert( 13415, "emptying the collection is not allowed", stats.nrecords > 1 );
+
+ // Delete the newest record, and coalesce the new deleted
+ // record with existing deleted records.
+ theDataFileMgr.deleteRecord(ns, curr.rec(), curr, true);
+ compact();
+
+ // This is the case where we have not yet had to remove any
+ // documents to make room for other documents, and we are allocating
+ // documents from free space in fresh extents instead of reusing
+ // space from familiar extents.
+ if ( !capLooped() ) {
+
+ // We just removed the last record from the 'capExtent', and
+ // the 'capExtent' can't be empty, so we set 'capExtent' to
+ // capExtent's prev extent.
+ if ( theCapExtent()->lastRecord.isNull() ) {
+ assert( !theCapExtent()->xprev.isNull() );
+ // NOTE Because we didn't delete the last document, and
+ // capLooped() is false, capExtent is not the first extent
+ // so xprev will be nonnull.
+ capExtent.writing() = theCapExtent()->xprev;
+ theCapExtent()->assertOk();
+
+ // update cappedLastDelRecLastExtent()
+ cappedTruncateLastDelUpdate();
+ }
+ continue;
+ }
+
+ // This is the case where capLooped() is true, and we just deleted
+ // from capExtent, and we just deleted capFirstNewRecord, which was
+ // the last record on the fresh side of capExtent.
+ // NOTE In this comparison, curr and potentially capFirstNewRecord
+ // may point to invalid data, but we can still compare the
+ // references themselves.
+ if ( curr == capFirstNewRecord ) {
+
+ // Set 'capExtent' to the first nonempty extent prior to the
+ // initial capExtent. There must be such an extent because we
+ // have not deleted the last document in the collection. It is
+ // possible that all extents other than the capExtent are empty.
+ // In this case we will keep the initial capExtent and specify
+ // that all records contained within are on the fresh rather than
+ // stale side of the extent.
+ DiskLoc newCapExtent = capExtent;
+ do {
+ // Find the previous extent, looping if necessary.
+ newCapExtent = ( newCapExtent == firstExtent ) ? lastExtent : newCapExtent.ext()->xprev;
+ newCapExtent.ext()->assertOk();
+ }
+ while ( newCapExtent.ext()->firstRecord.isNull() );
+ capExtent.writing() = newCapExtent;
+
+ // Place all documents in the new capExtent on the fresh side
+ // of the capExtent by setting capFirstNewRecord to the first
+ // document in the new capExtent.
+ capFirstNewRecord.writing() = theCapExtent()->firstRecord;
+
+ // update cappedLastDelRecLastExtent()
+ cappedTruncateLastDelUpdate();
+ }
+ }
+ }
+
+ void NamespaceDetails::emptyCappedCollection( const char *ns ) {
+ DEV assert( this == nsdetails(ns) );
+ massert( 13424, "collection must be capped", capped );
+ massert( 13425, "background index build in progress", !indexBuildInProgress );
+ massert( 13426, "indexes present", nIndexes == 0 );
+
+ // Clear all references to this namespace.
+ ClientCursor::invalidate( ns );
+ NamespaceDetailsTransient::clearForPrefix( ns );
+
+ // Get a writeable reference to 'this' and reset all pertinent
+ // attributes.
+ NamespaceDetails *t = writingWithoutExtra();
+
+ t->cappedLastDelRecLastExtent() = DiskLoc();
+ t->cappedListOfAllDeletedRecords() = DiskLoc();
+
+ // preserve firstExtent/lastExtent
+ t->capExtent = firstExtent;
+ t->stats.datasize = stats.nrecords = 0;
+ // lastExtentSize preserve
+ // nIndexes preserve 0
+ // capped preserve true
+ // max preserve
+ t->paddingFactor = 1.0;
+ t->flags = 0;
+ t->capFirstNewRecord = DiskLoc();
+ t->capFirstNewRecord.setInvalid();
+ t->cappedLastDelRecLastExtent().setInvalid();
+ // dataFileVersion preserve
+ // indexFileVersion preserve
+ t->multiKeyIndexBits = 0;
+ t->reservedA = 0;
+ t->extraOffset = 0;
+ // indexBuildInProgress preserve 0
+ memset(t->reserved, 0, sizeof(t->reserved));
+
+ // Reset all existing extents and recreate the deleted list.
+ for( DiskLoc ext = firstExtent; !ext.isNull(); ext = ext.ext()->xnext ) {
+ DiskLoc prev = ext.ext()->xprev;
+ DiskLoc next = ext.ext()->xnext;
+ DiskLoc empty = ext.ext()->reuse( ns, true );
+ ext.ext()->xprev.writing() = prev;
+ ext.ext()->xnext.writing() = next;
+ addDeletedRec( empty.drec(), empty );
+ }
+ }
+
+}
diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp
new file mode 100644
index 00000000000..92b78d87ee5
--- /dev/null
+++ b/src/mongo/db/client.cpp
@@ -0,0 +1,697 @@
+// client.cpp
+
+/**
+* Copyright (C) 2009 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* Client represents a connection to the database (the server-side) and corresponds
+ to an open socket (or logical connection if pooling on sockets) from a client.
+*/
+
+#include "pch.h"
+#include "db.h"
+#include "client.h"
+#include "curop-inl.h"
+#include "json.h"
+#include "security.h"
+#include "commands.h"
+#include "instance.h"
+#include "../s/d_logic.h"
+#include "dbwebserver.h"
+#include "../util/mongoutils/html.h"
+#include "../util/mongoutils/checksum.h"
+#include "../util/file_allocator.h"
+#include "repl/rs.h"
+#include "../scripting/engine.h"
+
+namespace mongo {
+
+ Client* Client::syncThread;
+ mongo::mutex Client::clientsMutex("clientsMutex");
+ set<Client*> Client::clients; // always be in clientsMutex when manipulating this
+
+ TSP_DEFINE(Client, currentClient)
+
+#if defined(_DEBUG)
+ struct StackChecker;
+ ThreadLocalValue<StackChecker *> checker;
+
+ struct StackChecker {
+ enum { SZ = 256 * 1024 };
+ char buf[SZ];
+ StackChecker() {
+ checker.set(this);
+ }
+ void init() {
+ memset(buf, 42, sizeof(buf));
+ }
+ static void check(const char *tname) {
+ static int max;
+ StackChecker *sc = checker.get();
+ const char *p = sc->buf;
+ int i = 0;
+ for( ; i < SZ; i++ ) {
+ if( p[i] != 42 )
+ break;
+ }
+ int z = SZ-i;
+ if( z > max ) {
+ max = z;
+ log() << "thread " << tname << " stack usage was " << z << " bytes" << endl;
+ }
+ wassert( i > 16000 );
+ }
+ };
+#endif
+
+ /* each thread which does db operations has a Client object in TLS.
+ call this when your thread starts.
+ */
+#if defined _DEBUG
+ static unsigned long long nThreads = 0;
+ void assertStartingUp() {
+ assert( nThreads <= 1 );
+ }
+#else
+ void assertStartingUp() { }
+#endif
+
+ Client& Client::initThread(const char *desc, AbstractMessagingPort *mp) {
+#if defined(_DEBUG)
+ {
+ nThreads++; // never decremented. this is for casi class asserts
+ if( sizeof(void*) == 8 ) {
+ StackChecker sc;
+ sc.init();
+ }
+ }
+#endif
+ assert( currentClient.get() == 0 );
+ Client *c = new Client(desc, mp);
+ currentClient.reset(c);
+ mongo::lastError.initThread();
+ return *c;
+ }
+
+ Client::Client(const char *desc, AbstractMessagingPort *p) :
+ _context(0),
+ _shutdown(false),
+ _desc(desc),
+ _god(0),
+ _lastOp(0),
+ _mp(p),
+ _sometimes(0)
+ {
+ _hasWrittenThisPass = false;
+ _pageFaultRetryableSection = 0;
+ _connectionId = setThreadName(desc);
+ _curOp = new CurOp( this );
+#ifndef _WIN32
+ stringstream temp;
+ temp << hex << showbase << pthread_self();
+ _threadId = temp.str();
+#endif
+ scoped_lock bl(clientsMutex);
+ clients.insert(this);
+ }
+
+ Client::~Client() {
+ _god = 0;
+
+ if ( _context )
+ error() << "Client::~Client _context should be null but is not; client:" << _desc << endl;
+
+ if ( ! _shutdown ) {
+ error() << "Client::shutdown not called: " << _desc << endl;
+ }
+
+ if ( ! inShutdown() ) {
+ // we can't clean up safely once we're in shutdown
+ scoped_lock bl(clientsMutex);
+ if ( ! _shutdown )
+ clients.erase(this);
+ delete _curOp;
+ }
+ }
+
+ bool Client::shutdown() {
+#if defined(_DEBUG)
+ {
+ if( sizeof(void*) == 8 ) {
+ StackChecker::check( desc() );
+ }
+ }
+#endif
+ _shutdown = true;
+ if ( inShutdown() )
+ return false;
+ {
+ scoped_lock bl(clientsMutex);
+ clients.erase(this);
+ if ( isSyncThread() ) {
+ syncThread = 0;
+ }
+ }
+
+ return false;
+ }
+
+ BSONObj CachedBSONObj::_tooBig = fromjson("{\"$msg\":\"query not recording (too large)\"}");
+ Client::Context::Context( string ns , Database * db, bool doauth ) :
+ _client( currentClient.get() ),
+ _oldContext( _client->_context ),
+ _path( mongo::dbpath ), // is this right? could be a different db? may need a dassert for this
+ _justCreated(false),
+ _ns( ns ),
+ _db(db)
+ {
+ assert( db == 0 || db->isOk() );
+ _client->_context = this;
+ checkNsAccess( doauth );
+ _client->checkLocks();
+ }
+
+ Client::Context::Context(const string& ns, string path , bool doauth ) :
+ _client( currentClient.get() ),
+ _oldContext( _client->_context ),
+ _path( path ),
+ _justCreated(false), // set for real in finishInit
+ _ns( ns ),
+ _db(0)
+ {
+ _finishInit( doauth );
+ _client->checkLocks();
+ }
+
+ /** "read lock, and set my context, all in one operation"
+ * This handles (if not recursively locked) opening an unopened database.
+ */
+ Client::ReadContext::ReadContext(const string& ns, string path, bool doauth ) {
+ {
+ lk.reset( new _LockCollectionForReading(ns) );
+ Database *db = dbHolder().get(ns, path);
+ if( db ) {
+ c.reset( new Context(path, ns, db, doauth) );
+ return;
+ }
+ }
+
+ // we usually don't get here, so doesn't matter how fast this part is
+ {
+ int x = d.dbMutex.getState();
+ if( x > 0 ) {
+ // write locked already
+ DEV RARELY log() << "write locked on ReadContext construction " << ns << endl;
+ c.reset( new Context(ns, path, doauth) );
+ }
+ else if( x == -1 ) {
+ lk.reset(0);
+ {
+ writelock w;
+ Context c(ns, path, doauth);
+ }
+ // db could be closed at this interim point -- that is ok, we will throw, and don't mind throwing.
+ lk.reset( new _LockCollectionForReading(ns) );
+ c.reset( new Context(ns, path, doauth) );
+ }
+ else {
+ assert( x < -1 );
+ uasserted(15928, str::stream() << "can't open a database from a nested read lock " << ns);
+ }
+ }
+
+ // todo: are receipts of thousands of queries for a nonexisting database a potential
+ // cause of bad performance due to the write lock acquisition above? let's fix that.
+ // it would be easy to first check that there is at least a .ns file, or something similar.
+ }
+
+ void Client::Context::checkNotStale() const {
+ switch ( _client->_curOp->getOp() ) {
+ case dbGetMore: // getMore's are special and should be handled else where
+ case dbUpdate: // update & delete check shard version in instance.cpp, so don't check here as well
+ case dbDelete:
+ break;
+ default: {
+ string errmsg;
+ if ( ! shardVersionOk( _ns , errmsg ) ) {
+ ostringstream os;
+ os << "[" << _ns << "] shard version not ok in Client::Context: " << errmsg;
+ throw SendStaleConfigException( _ns, os.str() );
+ }
+ }
+ }
+ }
+
+ // invoked from ReadContext
+ Client::Context::Context(const string& path, const string& ns, Database *db , bool doauth) :
+ _client( currentClient.get() ),
+ _oldContext( _client->_context ),
+ _path( path ),
+ _justCreated(false),
+ _ns( ns ),
+ _db(db)
+ {
+ assert(_db);
+ checkNotStale();
+ _client->_context = this;
+ _client->_curOp->enter( this );
+ checkNsAccess( doauth, d.dbMutex.getState() );
+ _client->checkLocks();
+ }
+
+ void Client::Context::_finishInit( bool doauth ) {
+ int lockState = d.dbMutex.getState();
+ assert( lockState );
+ if ( lockState > 0 && FileAllocator::get()->hasFailed() ) {
+ uassert(14031, "Can't take a write lock while out of disk space", false);
+ }
+
+ _db = dbHolderUnchecked().getOrCreate( _ns , _path , _justCreated );
+ assert(_db);
+ checkNotStale();
+ _client->_context = this;
+ _client->_curOp->enter( this );
+ checkNsAccess( doauth, lockState );
+ }
+
+ void Client::Context::_auth( int lockState ) {
+ if ( _client->_ai.isAuthorizedForLock( _db->name , lockState ) )
+ return;
+
+ // before we assert, do a little cleanup
+ _client->_context = _oldContext; // note: _oldContext may be null
+
+ stringstream ss;
+ ss << "unauthorized db:" << _db->name << " lock type:" << lockState << " client:" << _client->clientAddress();
+ uasserted( 10057 , ss.str() );
+ }
+
+ Client::Context::~Context() {
+ DEV assert( _client == currentClient.get() );
+ _client->_curOp->leave( this );
+ _client->_context = _oldContext; // note: _oldContext may be null
+ }
+
+ bool Client::Context::inDB( const string& db , const string& path ) const {
+ if ( _path != path )
+ return false;
+
+ if ( db == _ns )
+ return true;
+
+ string::size_type idx = _ns.find( db );
+ if ( idx != 0 )
+ return false;
+
+ return _ns[db.size()] == '.';
+ }
+
+ void Client::Context::checkNsAccess( bool doauth, int lockState ) {
+ if ( 0 ) { // SERVER-4276
+ uassert( 15929, "client access to index backing namespace prohibited", NamespaceString::normal( _ns.c_str() ) );
+ }
+ if ( doauth ) {
+ _auth( lockState );
+ }
+ }
+
+ void Client::appendLastOp( BSONObjBuilder& b ) const {
+ // _lastOp is never set if replication is off
+ if( theReplSet || ! _lastOp.isNull() ) {
+ b.appendTimestamp( "lastOp" , _lastOp.asDate() );
+ }
+ }
+
+ string Client::clientAddress(bool includePort) const {
+ if( _curOp )
+ return _curOp->getRemoteString(includePort);
+ return "";
+ }
+
+ string Client::toString() const {
+ stringstream ss;
+ if ( _curOp )
+ ss << _curOp->infoNoauth().jsonString();
+ return ss.str();
+ }
+
+ string sayClientState() {
+ Client* c = currentClient.get();
+ if ( !c )
+ return "no client";
+ return c->toString();
+ }
+
+ Client* curopWaitingForLock( int type ) {
+ Client * c = currentClient.get();
+ assert( c );
+ CurOp * co = c->curop();
+ if ( co ) {
+ co->waitingForLock( type );
+ }
+ return c;
+ }
+ void curopGotLock(Client *c) {
+ assert(c);
+ CurOp * co = c->curop();
+ if ( co )
+ co->gotLock();
+ }
+
+ void KillCurrentOp::interruptJs( AtomicUInt *op ) {
+ if ( !globalScriptEngine )
+ return;
+ if ( !op ) {
+ globalScriptEngine->interruptAll();
+ }
+ else {
+ globalScriptEngine->interrupt( *op );
+ }
+ }
+
+ void KillCurrentOp::killAll() {
+ _globalKill = true;
+ interruptJs( 0 );
+ }
+
+ void KillCurrentOp::kill(AtomicUInt i) {
+ bool found = false;
+ {
+ scoped_lock l( Client::clientsMutex );
+ for( set< Client* >::const_iterator j = Client::clients.begin(); !found && j != Client::clients.end(); ++j ) {
+ for( CurOp *k = ( *j )->curop(); !found && k; k = k->parent() ) {
+ if ( k->opNum() == i ) {
+ k->kill();
+ for( CurOp *l = ( *j )->curop(); l != k; l = l->parent() ) {
+ l->kill();
+ }
+ found = true;
+ }
+ }
+ }
+ }
+ if ( found ) {
+ interruptJs( &i );
+ }
+ }
+
+ void Client::gotHandshake( const BSONObj& o ) {
+ BSONObjIterator i(o);
+
+ {
+ BSONElement id = i.next();
+ assert( id.type() );
+ _remoteId = id.wrap( "_id" );
+ }
+
+ BSONObjBuilder b;
+ while ( i.more() )
+ b.append( i.next() );
+
+ b.appendElementsUnique( _handshake );
+
+ _handshake = b.obj();
+
+ if (theReplSet && o.hasField("member")) {
+ theReplSet->ghost->associateSlave(_remoteId, o["member"].Int());
+ }
+ }
+
+ ClientBasic* ClientBasic::getCurrent() {
+ return currentClient.get();
+ }
+
+ class HandshakeCmd : public Command {
+ public:
+ void help(stringstream& h) const { h << "internal"; }
+ HandshakeCmd() : Command( "handshake" ) {}
+ virtual LockType locktype() const { return NONE; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return false; }
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ Client& c = cc();
+ c.gotHandshake( cmdObj );
+ return 1;
+ }
+
+ } handshakeCmd;
+
+ class ClientListPlugin : public WebStatusPlugin {
+ public:
+ ClientListPlugin() : WebStatusPlugin( "clients" , 20 ) {}
+ virtual void init() {}
+
+ virtual void run( stringstream& ss ) {
+ using namespace mongoutils::html;
+
+ ss << "\n<table border=1 cellpadding=2 cellspacing=0>";
+ ss << "<tr align='left'>"
+ << th( a("", "Connections to the database, both internal and external.", "Client") )
+ << th( a("http://www.mongodb.org/display/DOCS/Viewing+and+Terminating+Current+Operation", "", "OpId") )
+ << "<th>Active</th>"
+ << "<th>LockType</th>"
+ << "<th>Waiting</th>"
+ << "<th>SecsRunning</th>"
+ << "<th>Op</th>"
+ << th( a("http://www.mongodb.org/display/DOCS/Developer+FAQ#DeveloperFAQ-What%27sa%22namespace%22%3F", "", "Namespace") )
+ << "<th>Query</th>"
+ << "<th>client</th>"
+ << "<th>msg</th>"
+ << "<th>progress</th>"
+
+ << "</tr>\n";
+ {
+ scoped_lock bl(Client::clientsMutex);
+ for( set<Client*>::iterator i = Client::clients.begin(); i != Client::clients.end(); i++ ) {
+ Client *c = *i;
+ CurOp& co = *(c->curop());
+ ss << "<tr><td>" << c->desc() << "</td>";
+
+ tablecell( ss , co.opNum() );
+ tablecell( ss , co.active() );
+ {
+ int lt = co.getLockType();
+ if( lt == -1 ) tablecell(ss, "R");
+ else if( lt == 1 ) tablecell(ss, "W");
+ else
+ tablecell( ss , lt);
+ }
+ tablecell( ss , co.isWaitingForLock() );
+ if ( co.active() )
+ tablecell( ss , co.elapsedSeconds() );
+ else
+ tablecell( ss , "" );
+ tablecell( ss , co.getOp() );
+ tablecell( ss , co.getNS() );
+ if ( co.haveQuery() ) {
+ tablecell( ss , co.query() );
+ }
+ else
+ tablecell( ss , "" );
+ tablecell( ss , co.getRemoteString() );
+
+ tablecell( ss , co.getMessage() );
+ tablecell( ss , co.getProgressMeter().toString() );
+
+
+ ss << "</tr>\n";
+ }
+ }
+ ss << "</table>\n";
+
+ }
+
+ } clientListPlugin;
+
+ int Client::recommendedYieldMicros( int * writers , int * readers ) {
+ int num = 0;
+ int w = 0;
+ int r = 0;
+ {
+ scoped_lock bl(clientsMutex);
+ for ( set<Client*>::iterator i=clients.begin(); i!=clients.end(); ++i ) {
+ Client* c = *i;
+ if ( c->curop()->isWaitingForLock() ) {
+ num++;
+ if ( c->curop()->getLockType() > 0 )
+ w++;
+ else
+ r++;
+ }
+ }
+ }
+
+ if ( writers )
+ *writers = w;
+ if ( readers )
+ *readers = r;
+
+ int time = r * 100;
+ time += w * 500;
+
+ time = min( time , 1000000 );
+
+ // if there has been a kill request for this op - we should yield to allow the op to stop
+ // This function returns empty string if we aren't interrupted
+ if ( *killCurrentOp.checkForInterruptNoAssert() ) {
+ return 100;
+ }
+
+ return time;
+ }
+
+ int Client::getActiveClientCount( int& writers, int& readers ) {
+ writers = 0;
+ readers = 0;
+
+ scoped_lock bl(clientsMutex);
+ for ( set<Client*>::iterator i=clients.begin(); i!=clients.end(); ++i ) {
+ Client* c = *i;
+ if ( ! c->curop()->active() )
+ continue;
+
+ int l = c->curop()->getLockType();
+ if ( l > 0 )
+ writers++;
+ else if ( l < 0 )
+ readers++;
+
+ }
+
+ return writers + readers;
+ }
+
+ void OpDebug::reset() {
+ extra.reset();
+
+ op = 0;
+ iscommand = false;
+ ns = "";
+ query = BSONObj();
+ updateobj = BSONObj();
+
+ cursorid = -1;
+ ntoreturn = -1;
+ ntoskip = -1;
+ exhaust = false;
+
+ nscanned = -1;
+ idhack = false;
+ scanAndOrder = false;
+ moved = false;
+ fastmod = false;
+ fastmodinsert = false;
+ upsert = false;
+ keyUpdates = 0; // unsigned, so -1 not possible
+
+ exceptionInfo.reset();
+
+ executionTime = 0;
+ nreturned = -1;
+ responseLength = -1;
+ }
+
+
+#define OPDEBUG_TOSTRING_HELP(x) if( x >= 0 ) s << " " #x ":" << (x)
+#define OPDEBUG_TOSTRING_HELP_BOOL(x) if( x ) s << " " #x ":" << (x)
+ string OpDebug::toString() const {
+ StringBuilder s( ns.size() + 64 );
+ if ( iscommand )
+ s << "command ";
+ else
+ s << opToString( op ) << ' ';
+ s << ns.toString();
+
+ if ( ! query.isEmpty() ) {
+ if ( iscommand )
+ s << " command: ";
+ else
+ s << " query: ";
+ s << query.toString();
+ }
+
+ if ( ! updateobj.isEmpty() ) {
+ s << " update: ";
+ updateobj.toString( s );
+ }
+
+ OPDEBUG_TOSTRING_HELP( cursorid );
+ OPDEBUG_TOSTRING_HELP( ntoreturn );
+ OPDEBUG_TOSTRING_HELP( ntoskip );
+ OPDEBUG_TOSTRING_HELP_BOOL( exhaust );
+
+ OPDEBUG_TOSTRING_HELP( nscanned );
+ OPDEBUG_TOSTRING_HELP_BOOL( idhack );
+ OPDEBUG_TOSTRING_HELP_BOOL( scanAndOrder );
+ OPDEBUG_TOSTRING_HELP_BOOL( moved );
+ OPDEBUG_TOSTRING_HELP_BOOL( fastmod );
+ OPDEBUG_TOSTRING_HELP_BOOL( fastmodinsert );
+ OPDEBUG_TOSTRING_HELP_BOOL( upsert );
+ OPDEBUG_TOSTRING_HELP( keyUpdates );
+
+ if ( extra.len() )
+ s << " " << extra.str();
+
+ if ( ! exceptionInfo.empty() ) {
+ s << " exception: " << exceptionInfo.msg;
+ if ( exceptionInfo.code )
+ s << " code:" << exceptionInfo.code;
+ }
+
+ OPDEBUG_TOSTRING_HELP( nreturned );
+ if ( responseLength )
+ s << " reslen:" << responseLength;
+ s << " " << executionTime << "ms";
+
+ return s.str();
+ }
+
+#define OPDEBUG_APPEND_NUMBER(x) if( x != -1 ) b.append( #x , (x) )
+#define OPDEBUG_APPEND_BOOL(x) if( x ) b.appendBool( #x , (x) )
+ void OpDebug::append( const CurOp& curop, BSONObjBuilder& b ) const {
+ b.append( "op" , iscommand ? "command" : opToString( op ) );
+ b.append( "ns" , ns.toString() );
+ if ( ! query.isEmpty() )
+ b.append( iscommand ? "command" : "query" , query );
+ else if ( ! iscommand && curop.haveQuery() )
+ curop.appendQuery( b , "query" );
+
+ if ( ! updateobj.isEmpty() )
+ b.append( "updateobj" , updateobj );
+
+ OPDEBUG_APPEND_NUMBER( cursorid );
+ OPDEBUG_APPEND_NUMBER( ntoreturn );
+ OPDEBUG_APPEND_NUMBER( ntoskip );
+ OPDEBUG_APPEND_BOOL( exhaust );
+
+ OPDEBUG_APPEND_NUMBER( nscanned );
+ OPDEBUG_APPEND_BOOL( idhack );
+ OPDEBUG_APPEND_BOOL( scanAndOrder );
+ OPDEBUG_APPEND_BOOL( moved );
+ OPDEBUG_APPEND_BOOL( fastmod );
+ OPDEBUG_APPEND_BOOL( fastmodinsert );
+ OPDEBUG_APPEND_BOOL( upsert );
+ OPDEBUG_APPEND_NUMBER( keyUpdates );
+
+ if ( ! exceptionInfo.empty() )
+ exceptionInfo.append( b , "exception" , "exceptionCode" );
+
+ OPDEBUG_APPEND_NUMBER( nreturned );
+ OPDEBUG_APPEND_NUMBER( responseLength );
+ b.append( "millis" , executionTime );
+
+ }
+
+}
diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h
new file mode 100644
index 00000000000..6aa8bc00f02
--- /dev/null
+++ b/src/mongo/db/client.h
@@ -0,0 +1,286 @@
+/* @file db/client.h
+
+ "Client" represents a connection to the database (the server-side) and corresponds
+ to an open socket (or logical connection if pooling on sockets) from a client.
+
+ todo: switch to asio...this will fit nicely with that.
+*/
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "security.h"
+#include "namespace-inl.h"
+#include "lasterror.h"
+#include "stats/top.h"
+#include "../db/client_common.h"
+#include "../util/concurrency/threadlocal.h"
+#include "../util/net/message_port.h"
+#include "../util/concurrency/rwlock.h"
+#include "d_concurrency.h"
+
+namespace mongo {
+
+ extern class ReplSet *theReplSet;
+ class AuthenticationInfo;
+ class Database;
+ class CurOp;
+ class Command;
+ class Client;
+ class AbstractMessagingPort;
+ class LockCollectionForReading;
+ class PageFaultRetryableSection;
+
+#if defined(CLC)
+ typedef LockCollectionForReading _LockCollectionForReading;
+#else
+ typedef readlock _LockCollectionForReading;
+#endif
+
+ TSP_DECLARE(Client, currentClient)
+
+ typedef long long ConnectionId;
+
+ /** the database's concept of an outside "client" */
+ class Client : public ClientBasic {
+ static Client *syncThread;
+ public:
+ // always be in clientsMutex when manipulating this. killop stuff uses these.
+ static set<Client*> clients;
+ static mongo::mutex clientsMutex;
+ static int getActiveClientCount( int& writers , int& readers );
+ class Context;
+ ~Client();
+ static int recommendedYieldMicros( int * writers = 0 , int * readers = 0 );
+
+ /** each thread which does db operations has a Client object in TLS.
+ * call this when your thread starts.
+ */
+ static Client& initThread(const char *desc, AbstractMessagingPort *mp = 0);
+
+ static void initThreadIfNotAlready(const char *desc) {
+ if( currentClient.get() )
+ return;
+ initThread(desc);
+ }
+
+ /** this has to be called as the client goes away, but before thread termination
+ * @return true if anything was done
+ */
+ bool shutdown();
+
+ /** set so isSyncThread() works */
+ void iAmSyncThread() {
+ wassert( syncThread == 0 );
+ syncThread = this;
+ }
+ /** @return true if this client is the replication secondary pull thread. not used much, is used in create index sync code. */
+ bool isSyncThread() const { return this == syncThread; }
+
+ string clientAddress(bool includePort=false) const;
+ const AuthenticationInfo * getAuthenticationInfo() const { return &_ai; }
+ AuthenticationInfo * getAuthenticationInfo() { return &_ai; }
+ bool isAdmin() { return _ai.isAuthorized( "admin" ); }
+ CurOp* curop() const { return _curOp; }
+ Context* getContext() const { return _context; }
+ Database* database() const { return _context ? _context->db() : 0; }
+ const char *ns() const { return _context->ns(); }
+ const char *desc() const { return _desc; }
+ void setLastOp( OpTime op ) { _lastOp = op; }
+ OpTime getLastOp() const { return _lastOp; }
+
+ /** caution -- use Context class instead */
+ void setContext(Context *c) { _context = c; }
+
+ /* report what the last operation was. used by getlasterror */
+ void appendLastOp( BSONObjBuilder& b ) const;
+
+ bool isGod() const { return _god; } /* this is for map/reduce writes */
+ string toString() const;
+ void gotHandshake( const BSONObj& o );
+ bool hasRemote() const { return _mp; }
+ HostAndPort getRemote() const { assert( _mp ); return _mp->remote(); }
+ BSONObj getRemoteID() const { return _remoteId; }
+ BSONObj getHandshake() const { return _handshake; }
+ AbstractMessagingPort * port() const { return _mp; }
+ ConnectionId getConnectionId() const { return _connectionId; }
+ private:
+ Client(const char *desc, AbstractMessagingPort *p = 0);
+ friend class CurOp;
+ ConnectionId _connectionId; // > 0 for things "conn", 0 otherwise
+ string _threadId; // "" on non support systems
+ CurOp * _curOp;
+ Context * _context;
+ bool _shutdown; // to track if Client::shutdown() gets called
+ const char * const _desc;
+ bool _god;
+ AuthenticationInfo _ai;
+ OpTime _lastOp;
+ BSONObj _handshake;
+ BSONObj _remoteId;
+ AbstractMessagingPort * const _mp;
+ unsigned _sometimes;
+ public:
+ bool _hasWrittenThisPass;
+ PageFaultRetryableSection *_pageFaultRetryableSection;
+
+ /** the concept here is the same as MONGO_SOMETIMES. however that
+ macro uses a static that will be shared by all threads, and each
+ time incremented it might eject that line from the other cpu caches (?),
+ so idea is that this is better.
+ */
+ bool sometimes(unsigned howOften) { return ++_sometimes % howOften == 0; }
+
+ /* set _god=true temporarily, safely */
+ class GodScope {
+ bool _prev;
+ public:
+ GodScope();
+ ~GodScope();
+ };
+
+ //static void assureDatabaseIsOpen(const string& ns, string path=dbpath);
+
+ /** "read lock, and set my context, all in one operation"
+ * This handles (if not recursively locked) opening an unopened database.
+ */
+ class ReadContext : boost::noncopyable {
+ public:
+ ReadContext(const string& ns, string path=dbpath, bool doauth=true );
+ Context& ctx() { return *c.get(); }
+ private:
+ scoped_ptr<_LockCollectionForReading> lk;
+ scoped_ptr<Context> c;
+ };
+
+ /* Set database we want to use, then, restores when we finish (are out of scope)
+ Note this is also helpful if an exception happens as the state if fixed up.
+ */
+ class Context : boost::noncopyable {
+ public:
+ /** this is probably what you want */
+ Context(const string& ns, string path=dbpath, bool doauth=true );
+
+ /** note: this does not call finishInit -- i.e., does not call
+ shardVersionOk() for example.
+ see also: reset().
+ */
+ Context( string ns , Database * db, bool doauth=true );
+
+ // used by ReadContext
+ Context(const string& path, const string& ns, Database *db, bool doauth);
+
+ ~Context();
+ Client* getClient() const { return _client; }
+ Database* db() const { return _db; }
+ const char * ns() const { return _ns.c_str(); }
+ bool equals( const string& ns , const string& path=dbpath ) const { return _ns == ns && _path == path; }
+
+ /** @return if the db was created by this Context */
+ bool justCreated() const { return _justCreated; }
+
+ /** @return true iff the current Context is using db/path */
+ bool inDB( const string& db , const string& path=dbpath ) const;
+
+ void _clear() { // this is sort of an "early destruct" indication, _ns can never be uncleared
+ const_cast<string&>(_ns).empty();
+ _db = 0;
+ }
+
+ /** call before unlocking, so clear any non-thread safe state
+ * _db gets restored on the relock
+ */
+ void unlocked() { _db = 0; }
+
+ /** call after going back into the lock, will re-establish non-thread safe stuff */
+ void relocked() { _finishInit(); }
+
+ private:
+ friend class CurOp;
+ void _finishInit( bool doauth=true);
+ void _auth( int lockState );
+ void checkNotStale() const;
+ void checkNsAccess( bool doauth, int lockState = d.dbMutex.getState() );
+ Client * const _client;
+ Context * const _oldContext;
+ const string _path;
+ bool _justCreated;
+ const string _ns;
+ Database * _db;
+ }; // class Client::Context
+
+ struct LockStatus {
+ LockStatus();
+ string whichCollection;
+ unsigned excluder, global, collection;
+ string toString() const;
+ } lockStatus;
+
+#if defined(CLC)
+ void checkLocks() const;
+#else
+ void checkLocks() const { }
+#endif
+
+ }; // class Client
+
+ /** get the Client object for this thread. */
+ inline Client& cc() {
+ Client * c = currentClient.get();
+ assert( c );
+ return *c;
+ }
+
+ inline Client::GodScope::GodScope() {
+ _prev = cc()._god;
+ cc()._god = true;
+ }
+ inline Client::GodScope::~GodScope() { cc()._god = _prev; }
+
+ /* this unreadlocks and then writelocks; i.e. it does NOT upgrade inside the
+ lock (and is thus wrong to use if you need that, which is usually).
+ that said we use it today for a specific case where the usage is correct.
+ */
+#if 0
+ inline void mongolock::releaseAndWriteLock() {
+ if( !_writelock ) {
+
+#if BOOST_VERSION >= 103500
+ int s = d.dbMutex.getState();
+ if( s != -1 ) {
+ log() << "error: releaseAndWriteLock() s == " << s << endl;
+ msgasserted( 12600, "releaseAndWriteLock: unlock_shared failed, probably recursive" );
+ }
+#endif
+
+ _writelock = true;
+ d.dbMutex.unlock_shared();
+ d.dbMutex.lock();
+
+ // todo: unlocked() method says to call it before unlocking, not after. so fix this here,
+ // or fix the doc there.
+ if ( cc().getContext() )
+ cc().getContext()->unlocked();
+ }
+ }
+#endif
+
+ inline bool haveClient() { return currentClient.get() > 0; }
+
+};
diff --git a/src/mongo/db/client_common.h b/src/mongo/db/client_common.h
new file mode 100644
index 00000000000..eb70105ef99
--- /dev/null
+++ b/src/mongo/db/client_common.h
@@ -0,0 +1,47 @@
+// client_common.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+//#include "../pch.h"
+//#include "security.h"
+#include "../util/net/hostandport.h"
+
+namespace mongo {
+
+ class AuthenticationInfo;
+
+ /**
+ * this is the base class for Client and ClientInfo
+ * Client is for mongod
+ * Client is for mongos
+ * They should converge slowly
+ * The idea is this has the basic api so that not all code has to be duplicated
+ */
+ class ClientBasic : boost::noncopyable {
+ public:
+ virtual ~ClientBasic(){}
+ virtual const AuthenticationInfo * getAuthenticationInfo() const = 0;
+ virtual AuthenticationInfo * getAuthenticationInfo() = 0;
+
+ virtual bool hasRemote() const = 0;
+ virtual HostAndPort getRemote() const = 0;
+
+ static ClientBasic* getCurrent();
+ };
+}
diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp
new file mode 100644
index 00000000000..dc04ec38f63
--- /dev/null
+++ b/src/mongo/db/clientcursor.cpp
@@ -0,0 +1,747 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* clientcursor.cpp
+
+ ClientCursor is a wrapper that represents a cursorid from our database
+ application's perspective.
+
+ Cursor -- and its derived classes -- are our internal cursors.
+*/
+
+#include "pch.h"
+#include "clientcursor.h"
+#include "introspect.h"
+#include <time.h>
+#include "db.h"
+#include "commands.h"
+#include "repl_block.h"
+#include "../util/processinfo.h"
+#include "../util/timer.h"
+#include "../server.h"
+
+namespace mongo {
+
+ CCById ClientCursor::clientCursorsById;
+ boost::recursive_mutex& ClientCursor::ccmutex( *(new boost::recursive_mutex()) );
+ long long ClientCursor::numberTimedOut = 0;
+
+ void aboutToDeleteForSharding( const Database* db , const DiskLoc& dl ); // from s/d_logic.h
+
+ /*static*/ void ClientCursor::assertNoCursors() {
+ recursive_scoped_lock lock(ccmutex);
+ if( clientCursorsById.size() ) {
+ log() << "ERROR clientcursors exist but should not at this point" << endl;
+ ClientCursor *cc = clientCursorsById.begin()->second;
+ log() << "first one: " << cc->_cursorid << ' ' << cc->_ns << endl;
+ clientCursorsById.clear();
+ assert(false);
+ }
+ }
+
+
+ void ClientCursor::setLastLoc_inlock(DiskLoc L) {
+ assert( _pos != -2 ); // defensive - see ~ClientCursor
+
+ if ( L == _lastLoc )
+ return;
+
+ CCByLoc& bl = byLoc();
+
+ if ( !_lastLoc.isNull() ) {
+ bl.erase( ByLocKey( _lastLoc, _cursorid ) );
+ }
+
+ if ( !L.isNull() )
+ bl[ByLocKey(L,_cursorid)] = this;
+ _lastLoc = L;
+ }
+
+ /* ------------------------------------------- */
+
+ /* must call this when a btree node is updated */
+ //void removedKey(const DiskLoc& btreeLoc, int keyPos) {
+ //}
+
+ // ns is either a full namespace or "dbname." when invalidating for a whole db
+ void ClientCursor::invalidate(const char *ns) {
+ d.dbMutex.assertWriteLocked();
+ int len = strlen(ns);
+ const char* dot = strchr(ns, '.');
+ assert( len > 0 && dot);
+
+ bool isDB = (dot == &ns[len-1]); // first (and only) dot is the last char
+
+ {
+ //cout << "\nTEMP invalidate " << ns << endl;
+ recursive_scoped_lock lock(ccmutex);
+
+ Database *db = cc().database();
+ assert(db);
+ assert( str::startsWith(ns, db->name) );
+
+ for( CCById::iterator i = clientCursorsById.begin(); i != clientCursorsById.end(); /*++i*/ ) {
+ ClientCursor *cc = i->second;
+
+ ++i; // we may be removing this node
+
+ if( cc->_db != db )
+ continue;
+
+ if (isDB) {
+ // already checked that db matched above
+ dassert( str::startsWith(cc->_ns.c_str(), ns) );
+ delete cc; //removes self from ccByID
+ }
+ else {
+ if ( str::equals(cc->_ns.c_str(), ns) )
+ delete cc; //removes self from ccByID
+ }
+ }
+
+ /*
+ note : we can't iterate byloc because clientcursors may exist with a loc of null in which case
+ they are not in the map. perhaps they should not exist though in the future? something to
+ change???
+
+ CCByLoc& bl = db->ccByLoc;
+ for ( CCByLoc::iterator i = bl.begin(); i != bl.end(); ++i ) {
+ ClientCursor *cc = i->second;
+ if ( strncmp(ns, cc->ns.c_str(), len) == 0 ) {
+ assert( cc->_db == db );
+ toDelete.push_back(i->second);
+ }
+ }*/
+
+ /*cout << "TEMP after invalidate " << endl;
+ for( auto i = clientCursorsById.begin(); i != clientCursorsById.end(); ++i ) {
+ cout << " " << i->second->ns << endl;
+ }
+ cout << "TEMP after invalidate done" << endl;*/
+ }
+ }
+
+ /* note called outside of locks (other than ccmutex) so care must be exercised */
+ bool ClientCursor::shouldTimeout( unsigned millis ) {
+ _idleAgeMillis += millis;
+ return _idleAgeMillis > 600000 && _pinValue == 0;
+ }
+
+ /* called every 4 seconds. millis is amount of idle time passed since the last call -- could be zero */
+ void ClientCursor::idleTimeReport(unsigned millis) {
+ bool foundSomeToTimeout = false;
+
+ // two passes so that we don't need to readlock unless we really do some timeouts
+ // we assume here that incrementing _idleAgeMillis outside readlock is ok.
+ {
+ recursive_scoped_lock lock(ccmutex);
+ {
+ unsigned sz = clientCursorsById.size();
+ static time_t last;
+ if( sz >= 100000 ) {
+ if( time(0) - last > 300 ) {
+ last = time(0);
+ log() << "warning number of open cursors is very large: " << sz << endl;
+ }
+ }
+ }
+ for ( CCById::iterator i = clientCursorsById.begin(); i != clientCursorsById.end(); ) {
+ CCById::iterator j = i;
+ i++;
+ if( j->second->shouldTimeout( millis ) ) {
+ foundSomeToTimeout = true;
+ break;
+ }
+ }
+ }
+
+ if( foundSomeToTimeout ) {
+ // todo: ideally all readlocks automatically note what we are locking for so this
+ // can be reported in currentop command. e.g. something like:
+ // readlock lk("", "timeout cursors");
+ readlock lk("");
+ recursive_scoped_lock lock(ccmutex);
+ for ( CCById::iterator i = clientCursorsById.begin(); i != clientCursorsById.end(); ) {
+ CCById::iterator j = i;
+ i++;
+ if( j->second->shouldTimeout(0) ) {
+ numberTimedOut++;
+ LOG(1) << "killing old cursor " << j->second->_cursorid << ' ' << j->second->_ns
+ << " idle:" << j->second->idleTime() << "ms\n";
+ delete j->second;
+ }
+ }
+ }
+ }
+
+ /* must call when a btree bucket going away.
+ note this is potentially slow
+ */
+ void ClientCursor::informAboutToDeleteBucket(const DiskLoc& b) {
+ recursive_scoped_lock lock(ccmutex);
+ Database *db = cc().database();
+ CCByLoc& bl = db->ccByLoc;
+ RARELY if ( bl.size() > 70 ) {
+ log() << "perf warning: byLoc.size=" << bl.size() << " in aboutToDeleteBucket\n";
+ }
+ if( bl.size() == 0 ) {
+ DEV tlog() << "debug warning: no cursors found in informAboutToDeleteBucket()" << endl;
+ }
+ for ( CCByLoc::iterator i = bl.begin(); i != bl.end(); i++ )
+ i->second->_c->aboutToDeleteBucket(b);
+ }
+ void aboutToDeleteBucket(const DiskLoc& b) {
+ ClientCursor::informAboutToDeleteBucket(b);
+ }
+
+ /* must call this on a delete so we clean up the cursors. */
+ void ClientCursor::aboutToDelete(const DiskLoc& dl) {
+ recursive_scoped_lock lock(ccmutex);
+
+ Database *db = cc().database();
+ assert(db);
+
+ aboutToDeleteForSharding( db , dl );
+
+ CCByLoc& bl = db->ccByLoc;
+ CCByLoc::iterator j = bl.lower_bound(ByLocKey::min(dl));
+ CCByLoc::iterator stop = bl.upper_bound(ByLocKey::max(dl));
+ if ( j == stop )
+ return;
+
+ vector<ClientCursor*> toAdvance;
+
+ while ( 1 ) {
+ toAdvance.push_back(j->second);
+ DEV assert( j->first.loc == dl );
+ ++j;
+ if ( j == stop )
+ break;
+ }
+
+ if( toAdvance.size() >= 3000 ) {
+ log() << "perf warning MPW101: " << toAdvance.size() << " cursors for one diskloc "
+ << dl.toString()
+ << ' ' << toAdvance[1000]->_ns
+ << ' ' << toAdvance[2000]->_ns
+ << ' ' << toAdvance[1000]->_pinValue
+ << ' ' << toAdvance[2000]->_pinValue
+ << ' ' << toAdvance[1000]->_pos
+ << ' ' << toAdvance[2000]->_pos
+ << ' ' << toAdvance[1000]->_idleAgeMillis
+ << ' ' << toAdvance[2000]->_idleAgeMillis
+ << ' ' << toAdvance[1000]->_doingDeletes
+ << ' ' << toAdvance[2000]->_doingDeletes
+ << endl;
+ //wassert( toAdvance.size() < 5000 );
+ }
+
+ for ( vector<ClientCursor*>::iterator i = toAdvance.begin(); i != toAdvance.end(); ++i ) {
+ ClientCursor* cc = *i;
+ wassert(cc->_db == db);
+
+ if ( cc->_doingDeletes ) continue;
+
+ Cursor *c = cc->_c.get();
+ if ( c->capped() ) {
+ /* note we cannot advance here. if this condition occurs, writes to the oplog
+ have "caught" the reader. skipping ahead, the reader would miss postentially
+ important data.
+ */
+ delete cc;
+ continue;
+ }
+
+ c->checkLocation();
+ DiskLoc tmp1 = c->refLoc();
+ if ( tmp1 != dl ) {
+ // This might indicate a failure to call ClientCursor::updateLocation() but it can
+ // also happen during correct operation, see SERVER-2009.
+ problem() << "warning: cursor loc " << tmp1 << " does not match byLoc position " << dl << " !" << endl;
+ }
+ else {
+ c->advance();
+ }
+ while (!c->eof() && c->refLoc() == dl) {
+ /* We don't delete at EOF because we want to return "no more results" rather than "no such cursor".
+ * The loop is to handle MultiKey indexes where the deleted record is pointed to by multiple adjacent keys.
+ * In that case we need to advance until we get to the next distinct record or EOF.
+ * SERVER-4154
+ */
+ c->advance();
+ }
+ cc->updateLocation();
+ }
+ }
+ void aboutToDelete(const DiskLoc& dl) { ClientCursor::aboutToDelete(dl); }
+
+ ClientCursor::ClientCursor(int queryOptions, const shared_ptr<Cursor>& c, const string& ns, BSONObj query ) :
+ _ns(ns), _db( cc().database() ),
+ _c(c), _pos(0),
+ _query(query), _queryOptions(queryOptions),
+ _idleAgeMillis(0), _pinValue(0),
+ _doingDeletes(false), _yieldSometimesTracker(128,10) {
+
+ d.dbMutex.assertAtLeastReadLocked();
+
+ assert( _db );
+ assert( str::startsWith(_ns, _db->name) );
+ if( queryOptions & QueryOption_NoCursorTimeout )
+ noTimeout();
+ recursive_scoped_lock lock(ccmutex);
+ _cursorid = allocCursorId_inlock();
+ clientCursorsById.insert( make_pair(_cursorid, this) );
+
+ if ( ! _c->modifiedKeys() ) {
+ // store index information so we can decide if we can
+ // get something out of the index key rather than full object
+
+ int x = 0;
+ BSONObjIterator i( _c->indexKeyPattern() );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.isNumber() ) {
+ // only want basic index fields, not "2d" etc
+ _indexedFields[e.fieldName()] = x;
+ }
+ x++;
+ }
+ }
+
+ }
+
+
+ ClientCursor::~ClientCursor() {
+ if( _pos == -2 ) {
+ // defensive: destructor called twice
+ wassert(false);
+ return;
+ }
+
+ {
+ recursive_scoped_lock lock(ccmutex);
+ setLastLoc_inlock( DiskLoc() ); // removes us from bylocation multimap
+ clientCursorsById.erase(_cursorid);
+
+ // defensive:
+ (CursorId&)_cursorid = -1;
+ _pos = -2;
+ }
+ }
+
+ bool ClientCursor::getFieldsDotted( const string& name, BSONElementSet &ret, BSONObj& holder ) {
+
+ map<string,int>::const_iterator i = _indexedFields.find( name );
+ if ( i == _indexedFields.end() ) {
+ current().getFieldsDotted( name , ret );
+ return false;
+ }
+
+ int x = i->second;
+
+ holder = currKey();
+ BSONObjIterator it( holder );
+ while ( x && it.more() ) {
+ it.next();
+ x--;
+ }
+ assert( x == 0 );
+ ret.insert( it.next() );
+ return true;
+ }
+
+ BSONElement ClientCursor::getFieldDotted( const string& name , BSONObj& holder , bool * fromKey ) {
+
+ map<string,int>::const_iterator i = _indexedFields.find( name );
+ if ( i == _indexedFields.end() ) {
+ if ( fromKey )
+ *fromKey = false;
+ holder = current();
+ return holder.getFieldDotted( name );
+ }
+
+ int x = i->second;
+
+ holder = currKey();
+ BSONObjIterator it( holder );
+ while ( x && it.more() ) {
+ it.next();
+ x--;
+ }
+ assert( x == 0 );
+
+ if ( fromKey )
+ *fromKey = true;
+ return it.next();
+ }
+
+ BSONObj ClientCursor::extractFields(const BSONObj &pattern , bool fillWithNull ) {
+ BSONObjBuilder b( pattern.objsize() * 2 );
+
+ BSONObj holder;
+
+ BSONObjIterator i( pattern );
+ while ( i.more() ) {
+ BSONElement key = i.next();
+ BSONElement value = getFieldDotted( key.fieldName() , holder );
+
+ if ( value.type() ) {
+ b.appendAs( value , key.fieldName() );
+ continue;
+ }
+
+ if ( fillWithNull )
+ b.appendNull( key.fieldName() );
+
+ }
+
+ return b.obj();
+ }
+
+
+ /* call when cursor's location changes so that we can update the
+ cursorsbylocation map. if you are locked and internally iterating, only
+ need to call when you are ready to "unlock".
+ */
+ void ClientCursor::updateLocation() {
+ assert( _cursorid );
+ _idleAgeMillis = 0;
+ DiskLoc cl = _c->refLoc();
+ if ( lastLoc() == cl ) {
+ //log() << "info: lastloc==curloc " << ns << '\n';
+ }
+ else {
+ recursive_scoped_lock lock(ccmutex);
+ setLastLoc_inlock(cl);
+ }
+ // may be necessary for MultiCursor even when cl hasn't changed
+ _c->noteLocation();
+ }
+
+ int ClientCursor::suggestYieldMicros() {
+ int writers = 0;
+ int readers = 0;
+
+ int micros = Client::recommendedYieldMicros( &writers , &readers );
+
+ if ( micros > 0 && writers == 0 && d.dbMutex.getState() <= 0 ) {
+ // we have a read lock, and only reads are coming on, so why bother unlocking
+ return 0;
+ }
+
+ wassert( micros < 10000000 );
+ dassert( micros < 1000001 );
+ return micros;
+ }
+
+ Record* ClientCursor::_recordForYield( ClientCursor::RecordNeeds need ) {
+ if ( need == DontNeed ) {
+ return 0;
+ }
+ else if ( need == MaybeCovered ) {
+ // TODO
+ return 0;
+ }
+ else if ( need == WillNeed ) {
+ // no-op
+ }
+ else {
+ warning() << "don't understand RecordNeeds: " << (int)need << endl;
+ return 0;
+ }
+
+ DiskLoc l = currLoc();
+ if ( l.isNull() )
+ return 0;
+
+ Record * rec = l.rec();
+ if ( rec->likelyInPhysicalMemory() )
+ return 0;
+
+ return rec;
+ }
+
+ bool ClientCursor::yieldSometimes( RecordNeeds need, bool *yielded ) {
+ if ( yielded ) {
+ *yielded = false;
+ }
+ if ( ! _yieldSometimesTracker.intervalHasElapsed() ) {
+ Record* rec = _recordForYield( need );
+ if ( rec ) {
+ // yield for page fault
+ if ( yielded ) {
+ *yielded = true;
+ }
+ return yield( suggestYieldMicros() , rec );
+ }
+ return true;
+ }
+
+ int micros = suggestYieldMicros();
+ if ( micros > 0 ) {
+ if ( yielded ) {
+ *yielded = true;
+ }
+ return yield( micros , _recordForYield( need ) );
+ }
+ return true;
+ }
+
+ void ClientCursor::staticYield( int micros , const StringData& ns , Record * rec ) {
+ killCurrentOp.checkForInterrupt( false );
+ {
+ auto_ptr<LockMongoFilesShared> lk;
+ if ( rec ) {
+ // need to lock this else rec->touch won't be safe file could disappear
+ lk.reset( new LockMongoFilesShared() );
+ }
+
+ dbtempreleasecond unlock;
+ if ( unlock.unlocked() ) {
+ if ( micros == -1 )
+ micros = Client::recommendedYieldMicros();
+ if ( micros > 0 )
+ sleepmicros( micros );
+ }
+ else {
+ CurOp * c = cc().curop();
+ while ( c->parent() )
+ c = c->parent();
+ LOGSOME << "warning ClientCursor::yield can't unlock b/c of recursive lock"
+ << " ns: " << ns
+ << " top: " << c->info()
+ << endl;
+ }
+
+ if ( rec )
+ rec->touch();
+
+ lk.reset(0); // need to release this before dbtempreleasecond
+ }
+ }
+
+ bool ClientCursor::prepareToYield( YieldData &data ) {
+ if ( ! _c->supportYields() )
+ return false;
+ if ( ! _c->prepareToYield() ) {
+ return false;
+ }
+ // need to store in case 'this' gets deleted
+ data._id = _cursorid;
+
+ data._doingDeletes = _doingDeletes;
+ _doingDeletes = false;
+
+ updateLocation();
+
+ {
+ /* a quick test that our temprelease is safe.
+ todo: make a YieldingCursor class
+ and then make the following code part of a unit test.
+ */
+ const int test = 0;
+ static bool inEmpty = false;
+ if( test && !inEmpty ) {
+ inEmpty = true;
+ log() << "TEST: manipulate collection during cc:yield" << endl;
+ if( test == 1 )
+ Helpers::emptyCollection(_ns.c_str());
+ else if( test == 2 ) {
+ BSONObjBuilder b; string m;
+ dropCollection(_ns.c_str(), m, b);
+ }
+ else {
+ dropDatabase(_ns.c_str());
+ }
+ }
+ }
+ return true;
+ }
+
+ bool ClientCursor::recoverFromYield( const YieldData &data ) {
+ ClientCursor *cc = ClientCursor::find( data._id , false );
+ if ( cc == 0 ) {
+ // id was deleted
+ return false;
+ }
+
+ cc->_doingDeletes = data._doingDeletes;
+ cc->_c->recoverFromYield();
+ return true;
+ }
+
+ /** @return true if cursor is still ok */
+ bool ClientCursor::yield( int micros , Record * recordToLoad ) {
+
+ if ( ! _c->supportYields() ) // so me cursors (geo@oct2011) don't support yielding
+ return true;
+
+ YieldData data;
+ prepareToYield( data );
+ staticYield( micros , _ns , recordToLoad );
+ return ClientCursor::recoverFromYield( data );
+ }
+
+ long long ctmLast = 0; // so we don't have to do find() which is a little slow very often.
+ long long ClientCursor::allocCursorId_inlock() {
+ long long ctm = curTimeMillis64();
+ dassert( ctm );
+ long long x;
+ while ( 1 ) {
+ x = (((long long)rand()) << 32);
+ x = x ^ ctm;
+ if ( ctm != ctmLast || ClientCursor::find_inlock(x, false) == 0 )
+ break;
+ }
+ ctmLast = ctm;
+ return x;
+ }
+
+ void ClientCursor::storeOpForSlave( DiskLoc last ) {
+ if ( ! ( _queryOptions & QueryOption_OplogReplay ))
+ return;
+
+ if ( last.isNull() )
+ return;
+
+ BSONElement e = last.obj()["ts"];
+ if ( e.type() == Date || e.type() == Timestamp )
+ _slaveReadTill = e._opTime();
+ }
+
+ void ClientCursor::updateSlaveLocation( CurOp& curop ) {
+ if ( _slaveReadTill.isNull() )
+ return;
+ mongo::updateSlaveLocation( curop , _ns.c_str() , _slaveReadTill );
+ }
+
+
+ void ClientCursor::appendStats( BSONObjBuilder& result ) {
+ recursive_scoped_lock lock(ccmutex);
+ result.appendNumber("totalOpen", clientCursorsById.size() );
+ result.appendNumber("clientCursors_size", (int) numCursors());
+ result.appendNumber("timedOut" , numberTimedOut);
+ unsigned pinned = 0;
+ unsigned notimeout = 0;
+ for ( CCById::iterator i = clientCursorsById.begin(); i != clientCursorsById.end(); i++ ) {
+ unsigned p = i->second->_pinValue;
+ if( p >= 100 )
+ pinned++;
+ else if( p > 0 )
+ notimeout++;
+ }
+ if( pinned )
+ result.append("pinned", pinned);
+ if( notimeout )
+ result.append("totalNoTimeout", notimeout);
+ }
+
+ // QUESTION: Restrict to the namespace from which this command was issued?
+ // Alternatively, make this command admin-only?
+ class CmdCursorInfo : public Command {
+ public:
+ CmdCursorInfo() : Command( "cursorInfo", true ) {}
+ virtual bool slaveOk() const { return true; }
+ virtual void help( stringstream& help ) const {
+ help << " example: { cursorInfo : 1 }";
+ }
+ virtual LockType locktype() const { return NONE; }
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ ClientCursor::appendStats( result );
+ return true;
+ }
+ } cmdCursorInfo;
+
+ struct Mem {
+ Mem() { res = virt = mapped = 0; }
+ int res;
+ int virt;
+ int mapped;
+ bool grew(const Mem& r) {
+ return (r.res && (((double)res)/r.res)>1.1 ) ||
+ (r.virt && (((double)virt)/r.virt)>1.1 ) ||
+ (r.mapped && (((double)mapped)/r.mapped)>1.1 );
+ }
+ };
+
+ /** called once a minute from killcursors thread */
+ void sayMemoryStatus() {
+ static time_t last;
+ static Mem mlast;
+ try {
+ ProcessInfo p;
+ if ( !cmdLine.quiet && p.supported() ) {
+ Mem m;
+ m.res = p.getResidentSize();
+ m.virt = p.getVirtualMemorySize();
+ m.mapped = (int) (MemoryMappedFile::totalMappedLength() / ( 1024 * 1024 ));
+ if( time(0)-last >= 300 || m.grew(mlast) ) {
+ log() << "mem (MB) res:" << m.res << " virt:" << m.virt << " mapped:" << m.mapped << endl;
+ if( m.virt - (cmdLine.dur?2:1)*m.mapped > 5000 ) {
+ ONCE log() << "warning virtual/mapped memory differential is large. journaling:" << cmdLine.dur << endl;
+ }
+ last = time(0);
+ mlast = m;
+ }
+ }
+ }
+ catch(...) {
+ log() << "ProcessInfo exception" << endl;
+ }
+ }
+
+ /** thread for timing out old cursors */
+ void ClientCursorMonitor::run() {
+ Client::initThread("clientcursormon");
+ Client& client = cc();
+ Timer t;
+ const int Secs = 4;
+ unsigned n = 0;
+ while ( ! inShutdown() ) {
+ ClientCursor::idleTimeReport( t.millisReset() );
+ sleepsecs(Secs);
+ if( ++n % (60/4) == 0 /*once a minute*/ ) {
+ sayMemoryStatus();
+ }
+ }
+ client.shutdown();
+ }
+
+ void ClientCursor::find( const string& ns , set<CursorId>& all ) {
+ recursive_scoped_lock lock(ccmutex);
+
+ for ( CCById::iterator i=clientCursorsById.begin(); i!=clientCursorsById.end(); ++i ) {
+ if ( i->second->_ns == ns )
+ all.insert( i->first );
+ }
+ }
+
+ int ClientCursor::erase(int n, long long *ids) {
+ int found = 0;
+ for ( int i = 0; i < n; i++ ) {
+ if ( erase(ids[i]) )
+ found++;
+
+ if ( inShutdown() )
+ break;
+ }
+ return found;
+
+ }
+
+ ClientCursorMonitor clientCursorMonitor;
+
+} // namespace mongo
diff --git a/src/mongo/db/clientcursor.h b/src/mongo/db/clientcursor.h
new file mode 100644
index 00000000000..e570820f62c
--- /dev/null
+++ b/src/mongo/db/clientcursor.h
@@ -0,0 +1,430 @@
+/* clientcursor.h */
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* Cursor -- and its derived classes -- are our internal cursors.
+
+ ClientCursor is a wrapper that represents a cursorid from our database
+ application's perspective.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "cursor.h"
+#include "jsobj.h"
+#include "../util/net/message.h"
+#include "../util/net/listen.h"
+#include "../util/background.h"
+#include "diskloc.h"
+#include "dbhelpers.h"
+#include "matcher.h"
+#include "../client/dbclient.h"
+#include "projection.h"
+#include "s/d_chunk_manager.h"
+
+namespace mongo {
+
+ typedef long long CursorId; /* passed to the client so it can send back on getMore */
+ class Cursor; /* internal server cursor base class */
+ class ClientCursor;
+ class ParsedQuery;
+
+ struct ByLocKey {
+
+ ByLocKey( const DiskLoc & l , const CursorId& i ) : loc(l), id(i) {}
+
+ static ByLocKey min( const DiskLoc& l ) { return ByLocKey( l , numeric_limits<long long>::min() ); }
+ static ByLocKey max( const DiskLoc& l ) { return ByLocKey( l , numeric_limits<long long>::max() ); }
+
+ bool operator<( const ByLocKey &other ) const {
+ int x = loc.compare( other.loc );
+ if ( x )
+ return x < 0;
+ return id < other.id;
+ }
+
+ DiskLoc loc;
+ CursorId id;
+
+ };
+
+ /* todo: make this map be per connection. this will prevent cursor hijacking security attacks perhaps.
+ * ERH: 9/2010 this may not work since some drivers send getMore over a different connection
+ */
+ typedef map<CursorId, ClientCursor*> CCById;
+ typedef map<ByLocKey, ClientCursor*> CCByLoc;
+
+ extern BSONObj id_obj;
+
+ class ClientCursor {
+ friend class CmdCursorInfo;
+ public:
+ static void assertNoCursors();
+
+ /* use this to assure we don't in the background time out cursor while it is under use.
+ if you are using noTimeout() already, there is no risk anyway.
+ Further, this mechanism guards against two getMore requests on the same cursor executing
+ at the same time - which might be bad. That should never happen, but if a client driver
+ had a bug, it could (or perhaps some sort of attack situation).
+ */
+ class Pointer : boost::noncopyable {
+ ClientCursor *_c;
+ public:
+ ClientCursor * c() { return _c; }
+ void release() {
+ if( _c ) {
+ assert( _c->_pinValue >= 100 );
+ _c->_pinValue -= 100;
+ _c = 0;
+ }
+ }
+ /**
+ * call this if during a yield, the cursor got deleted
+ * if so, we don't want to use the point address
+ */
+ void deleted() {
+ _c = 0;
+ }
+ ~Pointer() { release(); }
+ Pointer(long long cursorid) {
+ recursive_scoped_lock lock(ccmutex);
+ _c = ClientCursor::find_inlock(cursorid, true);
+ if( _c ) {
+ if( _c->_pinValue >= 100 ) {
+ _c = 0;
+ uasserted(12051, "clientcursor already in use? driver problem?");
+ }
+ _c->_pinValue += 100;
+ }
+ }
+ };
+
+ // This object assures safe and reliable cleanup of the ClientCursor.
+ // The implementation assumes that there will be no duplicate ids among cursors
+ // (which is assured if cursors must last longer than 1 second).
+ class CleanupPointer : boost::noncopyable {
+ public:
+ CleanupPointer() : _c( 0 ), _id( -1 ) {}
+ void reset( ClientCursor *c = 0 ) {
+ if ( c == _c )
+ return;
+ if ( _c ) {
+ // be careful in case cursor was deleted by someone else
+ ClientCursor::erase( _id );
+ }
+ if ( c ) {
+ _c = c;
+ _id = c->_cursorid;
+ }
+ else {
+ _c = 0;
+ _id = -1;
+ }
+ }
+ ~CleanupPointer() {
+ DESTRUCTOR_GUARD ( reset(); );
+ }
+ operator bool() { return _c; }
+ ClientCursor * operator-> () { return _c; }
+ private:
+ ClientCursor *_c;
+ CursorId _id;
+ };
+
+ ClientCursor(int queryOptions, const shared_ptr<Cursor>& c, const string& ns, BSONObj query = BSONObj() );
+
+ ~ClientCursor();
+
+ // *************** basic accessors *******************
+
+ CursorId cursorid() const { return _cursorid; }
+ string ns() const { return _ns; }
+ Database * db() const { return _db; }
+ const BSONObj& query() const { return _query; }
+ int queryOptions() const { return _queryOptions; }
+
+ DiskLoc lastLoc() const { return _lastLoc; }
+
+ /* Get rid of cursors for namespaces 'ns'. When dropping a db, ns is "dbname."
+ Used by drop, dropIndexes, dropDatabase.
+ */
+ static void invalidate(const char *ns);
+
+ /**
+ * @param microsToSleep -1 : ask client
+ * >=0 : sleep for that amount
+ * @param recordToLoad after yielding lock, load this record with only mmutex
+ * do a dbtemprelease
+ * note: caller should check matcher.docMatcher().atomic() first and not yield if atomic -
+ * we don't do herein as this->matcher (above) is only initialized for true queries/getmore.
+ * (ie not set for remote/update)
+ * @return if the cursor is still valid.
+ * if false is returned, then this ClientCursor should be considered deleted -
+ * in fact, the whole database could be gone.
+ */
+ bool yield( int microsToSleep = -1 , Record * recordToLoad = 0 );
+
+ enum RecordNeeds {
+ DontNeed = -1 , MaybeCovered = 0 , WillNeed = 100
+ };
+
+ /**
+ * @param needRecord whether or not the next record has to be read from disk for sure
+ * if this is true, will yield of next record isn't in memory
+ * @param yielded true if a yield occurred, and potentially if a yield did not occur
+ * @return same as yield()
+ */
+ bool yieldSometimes( RecordNeeds need, bool *yielded = 0 );
+
+ static int suggestYieldMicros();
+ static void staticYield( int micros , const StringData& ns , Record * rec );
+
+ struct YieldData { CursorId _id; bool _doingDeletes; };
+ bool prepareToYield( YieldData &data );
+ static bool recoverFromYield( const YieldData &data );
+
+ struct YieldLock : boost::noncopyable {
+ explicit YieldLock( ptr<ClientCursor> cc )
+ : _canYield(cc->_c->supportYields()) {
+ if ( _canYield ) {
+ cc->prepareToYield( _data );
+ _unlock.reset(new dbtempreleasecond());
+ }
+ }
+ ~YieldLock() {
+ if ( _unlock ) {
+ log( LL_WARNING ) << "ClientCursor::YieldLock not closed properly" << endl;
+ relock();
+ }
+ }
+ bool stillOk() {
+ if ( ! _canYield )
+ return true;
+ relock();
+ return ClientCursor::recoverFromYield( _data );
+ }
+ void relock() {
+ _unlock.reset();
+ }
+ private:
+ const bool _canYield;
+ YieldData _data;
+ scoped_ptr<dbtempreleasecond> _unlock;
+ };
+
+ // --- some pass through helpers for Cursor ---
+
+ Cursor* c() const { return _c.get(); }
+ int pos() const { return _pos; }
+
+ void incPos( int n ) { _pos += n; } // TODO: this is bad
+ void setPos( int n ) { _pos = n; } // TODO : this is bad too
+
+ BSONObj indexKeyPattern() { return _c->indexKeyPattern(); }
+ bool modifiedKeys() const { return _c->modifiedKeys(); }
+ bool isMultiKey() const { return _c->isMultiKey(); }
+
+ bool ok() { return _c->ok(); }
+ bool advance() { return _c->advance(); }
+ BSONObj current() { return _c->current(); }
+ DiskLoc currLoc() { return _c->currLoc(); }
+ BSONObj currKey() const { return _c->currKey(); }
+
+ /**
+ * same as BSONObj::getFieldsDotted
+ * if it can be retrieved from key, it is
+ * @param holder keeps the currKey in scope by keeping a reference to it here. generally you'll want
+ * holder and ret to destruct about the same time.
+ * @return if this was retrieved from key
+ */
+ bool getFieldsDotted( const string& name, BSONElementSet &ret, BSONObj& holder );
+
+ /**
+ * same as BSONObj::getFieldDotted
+ * if it can be retrieved from key, it is
+ * @return if this was retrieved from key
+ */
+ BSONElement getFieldDotted( const string& name , BSONObj& holder , bool * fromKey = 0 ) ;
+
+ /** extract items from object which match a pattern object.
+ * e.g., if pattern is { x : 1, y : 1 }, builds an object with
+ * x and y elements of this object, if they are present.
+ * returns elements with original field names
+ * NOTE: copied from BSONObj::extractFields
+ */
+ BSONObj extractFields(const BSONObj &pattern , bool fillWithNull = false) ;
+
+ bool currentIsDup() { return _c->getsetdup( _c->currLoc() ); }
+
+ bool currentMatches() {
+ if ( ! _c->matcher() )
+ return true;
+ return _c->matcher()->matchesCurrent( _c.get() );
+ }
+
+ void setChunkManager( ShardChunkManagerPtr manager ){ _chunkManager = manager; }
+ ShardChunkManagerPtr getChunkManager(){ return _chunkManager; }
+
+ private:
+ void setLastLoc_inlock(DiskLoc);
+
+ static ClientCursor* find_inlock(CursorId id, bool warn = true) {
+ CCById::iterator it = clientCursorsById.find(id);
+ if ( it == clientCursorsById.end() ) {
+ if ( warn )
+ OCCASIONALLY out() << "ClientCursor::find(): cursor not found in map " << id << " (ok after a drop)\n";
+ return 0;
+ }
+ return it->second;
+ }
+ public:
+ static ClientCursor* find(CursorId id, bool warn = true) {
+ recursive_scoped_lock lock(ccmutex);
+ ClientCursor *c = find_inlock(id, warn);
+ // if this asserts, your code was not thread safe - you either need to set no timeout
+ // for the cursor or keep a ClientCursor::Pointer in scope for it.
+ massert( 12521, "internal error: use of an unlocked ClientCursor", c == 0 || c->_pinValue );
+ return c;
+ }
+
+ static bool erase(CursorId id) {
+ recursive_scoped_lock lock(ccmutex);
+ ClientCursor *cc = find_inlock(id);
+ if ( cc ) {
+ assert( cc->_pinValue < 100 ); // you can't still have an active ClientCursor::Pointer
+ delete cc;
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * @return number of cursors found
+ */
+ static int erase( int n , long long * ids );
+
+ /* call when cursor's location changes so that we can update the
+ cursorsbylocation map. if you are locked and internally iterating, only
+ need to call when you are ready to "unlock".
+ */
+ void updateLocation();
+
+ void mayUpgradeStorage() {
+ /* if ( !ids_.get() )
+ return;
+ stringstream ss;
+ ss << ns << "." << cursorid;
+ ids_->mayUpgradeStorage( ss.str() );*/
+ }
+
+ /**
+ * @param millis amount of idle passed time since last call
+ */
+ bool shouldTimeout( unsigned millis );
+
+ void storeOpForSlave( DiskLoc last );
+ void updateSlaveLocation( CurOp& curop );
+
+ unsigned idleTime() const { return _idleAgeMillis; }
+
+ void setDoingDeletes( bool doingDeletes ) {_doingDeletes = doingDeletes; }
+
+ void slaveReadTill( const OpTime& t ) { _slaveReadTill = t; }
+
+ public: // static methods
+
+ static void idleTimeReport(unsigned millis);
+
+ static void appendStats( BSONObjBuilder& result );
+ static unsigned numCursors() { return clientCursorsById.size(); }
+ static void informAboutToDeleteBucket(const DiskLoc& b);
+ static void aboutToDelete(const DiskLoc& dl);
+ static void find( const string& ns , set<CursorId>& all );
+
+
+ private: // methods
+
+ // cursors normally timeout after an inactivy period to prevent excess memory use
+ // setting this prevents timeout of the cursor in question.
+ void noTimeout() { _pinValue++; }
+
+ CCByLoc& byLoc() { return _db->ccByLoc; }
+
+ Record* _recordForYield( RecordNeeds need );
+
+ private:
+
+ CursorId _cursorid;
+
+ const string _ns;
+ Database * _db;
+
+ const shared_ptr<Cursor> _c;
+ map<string,int> _indexedFields; // map from indexed field to offset in key object
+ int _pos; // # objects into the cursor so far
+
+ const BSONObj _query; // used for logging diags only; optional in constructor
+ int _queryOptions; // see enum QueryOptions dbclient.h
+
+ OpTime _slaveReadTill;
+
+ DiskLoc _lastLoc; // use getter and setter not this (important)
+ unsigned _idleAgeMillis; // how long has the cursor been around, relative to server idle time
+
+ /* 0 = normal
+ 1 = no timeout allowed
+ 100 = in use (pinned) -- see Pointer class
+ */
+ unsigned _pinValue;
+
+ bool _doingDeletes; // when true we are the delete and aboutToDelete shouldn't manipulate us
+ ElapsedTracker _yieldSometimesTracker;
+
+ ShardChunkManagerPtr _chunkManager;
+
+ public:
+ shared_ptr<ParsedQuery> pq;
+ shared_ptr<Projection> fields; // which fields query wants returned
+ Message originalMessage; // this is effectively an auto ptr for data the matcher points to
+
+
+
+ private: // static members
+
+ static CCById clientCursorsById;
+ static long long numberTimedOut;
+ static boost::recursive_mutex& ccmutex; // must use this for all statics above!
+ static CursorId allocCursorId_inlock();
+
+ };
+
+ class ClientCursorMonitor : public BackgroundJob {
+ public:
+ string name() const { return "ClientCursorMonitor"; }
+ void run();
+ };
+
+} // namespace mongo
+
+// ClientCursor should only be used with auto_ptr because it needs to be
+// release()ed after a yield if stillOk() returns false and these pointer types
+// do not support releasing. This will prevent them from being used accidentally
+namespace boost{
+ template<> class scoped_ptr<mongo::ClientCursor> {};
+ template<> class shared_ptr<mongo::ClientCursor> {};
+}
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
new file mode 100644
index 00000000000..e35ae95052d
--- /dev/null
+++ b/src/mongo/db/cloner.cpp
@@ -0,0 +1,763 @@
+// cloner.cpp - copy a database (export/import basically)
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "cloner.h"
+#include "pdfile.h"
+#include "../client/dbclient.h"
+#include "../bson/util/builder.h"
+#include "jsobj.h"
+#include "ops/query.h"
+#include "commands.h"
+#include "db.h"
+#include "instance.h"
+#include "repl.h"
+
+namespace mongo {
+
+ BSONElement getErrField(const BSONObj& o);
+
+ void ensureHaveIdIndex(const char *ns);
+
+ bool replAuthenticate(DBClientBase *);
+
+ /** Selectively release the mutex based on a parameter. */
+ class dbtempreleaseif {
+ public:
+ dbtempreleaseif( bool release ) : _impl( release ? new dbtemprelease() : 0 ) {}
+ private:
+ shared_ptr< dbtemprelease > _impl;
+ };
+
+ void mayInterrupt( bool mayBeInterrupted ) {
+ if ( mayBeInterrupted ) {
+ killCurrentOp.checkForInterrupt( false );
+ }
+ }
+
+ class Cloner: boost::noncopyable {
+ auto_ptr< DBClientWithCommands > conn;
+ void copy(const char *from_ns, const char *to_ns, bool isindex, bool logForRepl,
+ bool masterSameProcess, bool slaveOk, bool mayYield, bool mayBeInterrupted, Query q = Query());
+ struct Fun;
+ public:
+ Cloner() { }
+
+ /* slaveOk - if true it is ok if the source of the data is !ismaster.
+ useReplAuth - use the credentials we normally use as a replication slave for the cloning
+ snapshot - use $snapshot mode for copying collections. note this should not be used when it isn't required, as it will be slower.
+ for example repairDatabase need not use it.
+ */
+ void setConnection( DBClientWithCommands *c ) { conn.reset( c ); }
+
+ /** copy the entire database */
+ bool go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk, bool useReplAuth, bool snapshot, bool mayYield, bool mayBeInterrupted, int *errCode = 0);
+
+ bool copyCollection( const string& ns , const BSONObj& query , string& errmsg , bool mayYield, bool mayBeInterrupted, bool copyIndexes = true, bool logForRepl = true );
+ };
+
+ /* for index info object:
+ { "name" : "name_1" , "ns" : "foo.index3" , "key" : { "name" : 1.0 } }
+ we need to fix up the value in the "ns" parameter so that the name prefix is correct on a
+ copy to a new name.
+ */
+ BSONObj fixindex(BSONObj o) {
+ BSONObjBuilder b;
+ BSONObjIterator i(o);
+ while ( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+
+ // for now, skip the "v" field so that v:0 indexes will be upgraded to v:1
+ if ( string("v") == e.fieldName() ) {
+ continue;
+ }
+
+ if ( string("ns") == e.fieldName() ) {
+ uassert( 10024 , "bad ns field for index during dbcopy", e.type() == String);
+ const char *p = strchr(e.valuestr(), '.');
+ uassert( 10025 , "bad ns field for index during dbcopy [2]", p);
+ string newname = cc().database()->name + p;
+ b.append("ns", newname);
+ }
+ else
+ b.append(e);
+ }
+ BSONObj res= b.obj();
+
+ /* if( mod ) {
+ out() << "before: " << o.toString() << endl;
+ o.dump();
+ out() << "after: " << res.toString() << endl;
+ res.dump();
+ }*/
+
+ return res;
+ }
+
+ struct Cloner::Fun {
+ Fun() : lastLog(0) { }
+ time_t lastLog;
+ void operator()( DBClientCursorBatchIterator &i ) {
+ mongolock l( true );
+ if ( context ) {
+ context->relocked();
+ }
+
+ while( i.moreInCurrentBatch() ) {
+ if ( n % 128 == 127 /*yield some*/ ) {
+ time_t now = time(0);
+ if( now - lastLog >= 60 ) {
+ // report progress
+ if( lastLog )
+ log() << "clone " << to_collection << ' ' << n << endl;
+ lastLog = now;
+ }
+ mayInterrupt( _mayBeInterrupted );
+ dbtempreleaseif t( _mayYield );
+ }
+
+ BSONObj tmp = i.nextSafe();
+
+ /* assure object is valid. note this will slow us down a little. */
+ if ( !tmp.valid() ) {
+ stringstream ss;
+ ss << "Cloner: skipping corrupt object from " << from_collection;
+ BSONElement e = tmp.firstElement();
+ try {
+ e.validate();
+ ss << " firstElement: " << e;
+ }
+ catch( ... ) {
+ ss << " firstElement corrupt";
+ }
+ out() << ss.str() << endl;
+ continue;
+ }
+
+ ++n;
+
+ BSONObj js = tmp;
+ if ( isindex ) {
+ assert( strstr(from_collection, "system.indexes") );
+ js = fixindex(tmp);
+ storedForLater->push_back( js.getOwned() );
+ continue;
+ }
+
+ try {
+ theDataFileMgr.insertWithObjMod(to_collection, js);
+ if ( logForRepl )
+ logOp("i", to_collection, js);
+
+ getDur().commitIfNeeded();
+ }
+ catch( UserException& e ) {
+ log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
+ }
+
+ RARELY if ( time( 0 ) - saveLast > 60 ) {
+ log() << n << " objects cloned so far from collection " << from_collection << endl;
+ saveLast = time( 0 );
+ }
+ }
+ }
+ int n;
+ bool isindex;
+ const char *from_collection;
+ const char *to_collection;
+ time_t saveLast;
+ list<BSONObj> *storedForLater;
+ bool logForRepl;
+ Client::Context *context;
+ bool _mayYield;
+ bool _mayBeInterrupted;
+ };
+
+ /* copy the specified collection
+ isindex - if true, this is system.indexes collection, in which we do some transformation when copying.
+ */
+ void Cloner::copy(const char *from_collection, const char *to_collection, bool isindex, bool logForRepl, bool masterSameProcess, bool slaveOk, bool mayYield, bool mayBeInterrupted, Query query) {
+ list<BSONObj> storedForLater;
+
+ Fun f;
+ f.n = 0;
+ f.isindex = isindex;
+ f.from_collection = from_collection;
+ f.to_collection = to_collection;
+ f.saveLast = time( 0 );
+ f.storedForLater = &storedForLater;
+ f.logForRepl = logForRepl;
+ f._mayYield = mayYield;
+ f._mayBeInterrupted = mayBeInterrupted;
+
+ int options = QueryOption_NoCursorTimeout | ( slaveOk ? QueryOption_SlaveOk : 0 );
+ {
+ f.context = cc().getContext();
+ mayInterrupt( mayBeInterrupted );
+ dbtempreleaseif r( mayYield );
+ DBClientConnection *remote = dynamic_cast< DBClientConnection* >( conn.get() );
+ if ( remote ) {
+ remote->query( boost::function<void(DBClientCursorBatchIterator &)>( f ), from_collection, query, 0, options );
+ }
+ else {
+ // there is no exhaust mode for direct client, so we have this hack
+ auto_ptr<DBClientCursor> c = conn->query( from_collection, query, 0, 0, 0, options );
+ assert( c.get() );
+ while( c->more() ) {
+ DBClientCursorBatchIterator i( *c );
+ f( i );
+ }
+ }
+ }
+
+ if ( storedForLater.size() ) {
+ for ( list<BSONObj>::iterator i = storedForLater.begin(); i!=storedForLater.end(); i++ ) {
+ BSONObj js = *i;
+ try {
+ theDataFileMgr.insertWithObjMod(to_collection, js);
+ if ( logForRepl )
+ logOp("i", to_collection, js);
+
+ getDur().commitIfNeeded();
+ }
+ catch( UserException& e ) {
+ log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n';
+ }
+ }
+ }
+ }
+
+ bool copyCollectionFromRemote(const string& host, const string& ns, string& errmsg) {
+ Cloner c;
+
+ DBClientConnection *conn = new DBClientConnection();
+ // cloner owns conn in auto_ptr
+ c.setConnection(conn);
+ uassert(15908, errmsg, conn->connect(host, errmsg) && replAuthenticate(conn));
+
+ return c.copyCollection(ns, BSONObj(), errmsg, true, false, /*copyIndexes*/ true, false);
+ }
+
+ bool Cloner::copyCollection( const string& ns, const BSONObj& query, string& errmsg,
+ bool mayYield, bool mayBeInterrupted, bool copyIndexes, bool logForRepl ) {
+
+ writelock lk(ns); // TODO: make this lower down
+ Client::Context ctx(ns);
+
+ {
+ // config
+ string temp = ctx.db()->name + ".system.namespaces";
+ BSONObj config = conn->findOne( temp , BSON( "name" << ns ) );
+ if ( config["options"].isABSONObj() )
+ if ( ! userCreateNS( ns.c_str() , config["options"].Obj() , errmsg, logForRepl , 0 ) )
+ return false;
+ }
+
+ {
+ // main data
+ copy( ns.c_str() , ns.c_str() , /*isindex*/false , logForRepl , false , true , mayYield, mayBeInterrupted, Query(query).snapshot() );
+ }
+
+ /* TODO : copyIndexes bool does not seem to be implemented! */
+ if( !copyIndexes ) {
+ log() << "ERROR copy collection copyIndexes not implemented? " << ns << endl;
+ }
+
+ {
+ // indexes
+ string temp = ctx.db()->name + ".system.indexes";
+ copy( temp.c_str() , temp.c_str() , /*isindex*/true , logForRepl , false , true , mayYield, mayBeInterrupted, BSON( "ns" << ns ) );
+ }
+ getDur().commitIfNeeded();
+ return true;
+ }
+
+ extern bool inDBRepair;
+ void ensureIdIndexForNewNs(const char *ns);
+
+ bool Cloner::go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk, bool useReplAuth, bool snapshot, bool mayYield, bool mayBeInterrupted, int *errCode) {
+ if ( errCode ) {
+ *errCode = 0;
+ }
+ massert( 10289 , "useReplAuth is not written to replication log", !useReplAuth || !logForRepl );
+
+ string todb = cc().database()->name;
+ stringstream a,b;
+ a << "localhost:" << cmdLine.port;
+ b << "127.0.0.1:" << cmdLine.port;
+ bool masterSameProcess = ( a.str() == masterHost || b.str() == masterHost );
+ if ( masterSameProcess ) {
+ if ( fromdb == todb && cc().database()->path == dbpath ) {
+ // guard against an "infinite" loop
+ /* if you are replicating, the local.sources config may be wrong if you get this */
+ errmsg = "can't clone from self (localhost).";
+ return false;
+ }
+ }
+ /* todo: we can put these releases inside dbclient or a dbclient specialization.
+ or just wait until we get rid of global lock anyway.
+ */
+ string ns = fromdb + ".system.namespaces";
+ list<BSONObj> toClone;
+ {
+ mayInterrupt( mayBeInterrupted );
+ dbtempreleaseif r( mayYield );
+
+ // just using exhaust for collection copying right now
+ auto_ptr<DBClientCursor> c;
+ {
+ if ( conn.get() ) {
+ // nothing to do
+ }
+ else if ( !masterSameProcess ) {
+ ConnectionString cs = ConnectionString::parse( masterHost, errmsg );
+ auto_ptr<DBClientBase> con( cs.connect( errmsg ));
+ if ( !con.get() )
+ return false;
+ if( !replAuthenticate(con.get()) )
+ return false;
+
+ conn = con;
+ }
+ else {
+ conn.reset( new DBDirectClient() );
+ }
+ // todo: if snapshot (bool param to this func) is true, we need to snapshot this query?
+ // only would be relevant if a thousands of collections -- maybe even then it is hard
+ // to exceed a single cursor batch.
+ // for repl it is probably ok as we apply oplog section after the clone (i.e. repl
+ // doesnt not use snapshot=true).
+ c = conn->query( ns.c_str(), BSONObj(), 0, 0, 0, slaveOk ? QueryOption_SlaveOk : 0 );
+ }
+
+ if ( c.get() == 0 ) {
+ errmsg = "query failed " + ns;
+ return false;
+ }
+
+ if ( c->more() ) {
+ BSONObj first = c->next();
+ if( !getErrField(first).eoo() ) {
+ if ( errCode ) {
+ *errCode = first.getIntField("code");
+ }
+ errmsg = "query failed " + ns;
+ return false;
+ }
+ c->putBack( first );
+ }
+
+ while ( c->more() ) {
+ BSONObj collection = c->next();
+
+ log(2) << "\t cloner got " << collection << endl;
+
+ BSONElement e = collection.getField("name");
+ if ( e.eoo() ) {
+ string s = "bad system.namespaces object " + collection.toString();
+ massert( 10290 , s.c_str(), false);
+ }
+ assert( !e.eoo() );
+ assert( e.type() == String );
+ const char *from_name = e.valuestr();
+
+ if( strstr(from_name, ".system.") ) {
+ /* system.users and s.js is cloned -- but nothing else from system.
+ * system.indexes is handled specially at the end*/
+ if( legalClientSystemNS( from_name , true ) == 0 ) {
+ log(2) << "\t\t not cloning because system collection" << endl;
+ continue;
+ }
+ }
+ if( ! NamespaceString::normal( from_name ) ) {
+ log(2) << "\t\t not cloning because has $ " << endl;
+ continue;
+ }
+ toClone.push_back( collection.getOwned() );
+ }
+ }
+
+ for ( list<BSONObj>::iterator i=toClone.begin(); i != toClone.end(); i++ ) {
+ {
+ mayInterrupt( mayBeInterrupted );
+ dbtempreleaseif r( mayYield );
+ }
+ BSONObj collection = *i;
+ log(2) << " really will clone: " << collection << endl;
+ const char * from_name = collection["name"].valuestr();
+ BSONObj options = collection.getObjectField("options");
+
+ /* change name "<fromdb>.collection" -> <todb>.collection */
+ const char *p = strchr(from_name, '.');
+ assert(p);
+ string to_name = todb + p;
+
+ bool wantIdIndex = false;
+ {
+ string err;
+ const char *toname = to_name.c_str();
+ /* we defer building id index for performance - building it in batch is much faster */
+ userCreateNS(toname, options, err, logForRepl, &wantIdIndex);
+ }
+ log(1) << "\t\t cloning " << from_name << " -> " << to_name << endl;
+ Query q;
+ if( snapshot )
+ q.snapshot();
+ copy(from_name, to_name.c_str(), false, logForRepl, masterSameProcess, slaveOk, mayYield, mayBeInterrupted, q);
+
+ if( wantIdIndex ) {
+ /* we need dropDups to be true as we didn't do a true snapshot and this is before applying oplog operations
+ that occur during the initial sync. inDBRepair makes dropDups be true.
+ */
+ bool old = inDBRepair;
+ try {
+ inDBRepair = true;
+ ensureIdIndexForNewNs(to_name.c_str());
+ inDBRepair = old;
+ }
+ catch(...) {
+ inDBRepair = old;
+ throw;
+ }
+ }
+ }
+
+ // now build the indexes
+
+ string system_indexes_from = fromdb + ".system.indexes";
+ string system_indexes_to = todb + ".system.indexes";
+ /* [dm]: is the ID index sometimes not called "_id_"? There is other code in the system that looks for a "_id" prefix
+ rather than this exact value. we should standardize. OR, remove names - which is in the bugdb. Anyway, this
+ is dubious here at the moment.
+ */
+ // won't need a snapshot of the query of system.indexes as there can never be very many.
+ copy(system_indexes_from.c_str(), system_indexes_to.c_str(), true, logForRepl, masterSameProcess, slaveOk, mayYield, mayBeInterrupted, BSON( "name" << NE << "_id_" ) );
+
+ return true;
+ }
+
+ bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication,
+ bool slaveOk, bool useReplAuth, bool snapshot, bool mayYield, bool mayBeInterrupted, int *errCode) {
+ Cloner c;
+ return c.go(masterHost, errmsg, fromdb, logForReplication, slaveOk, useReplAuth, snapshot, mayYield, mayBeInterrupted, errCode);
+ }
+
+ /* Usage:
+ mydb.$cmd.findOne( { clone: "fromhost" } );
+ */
+ class CmdClone : public Command {
+ public:
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual LockType locktype() const { return WRITE; }
+ virtual void help( stringstream &help ) const {
+ help << "clone this database from an instance of the db on another host\n";
+ help << "{ clone : \"host13\" }";
+ }
+ CmdClone() : Command("clone") { }
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ string from = cmdObj.getStringField("clone");
+ if ( from.empty() )
+ return false;
+ /* replication note: we must logOp() not the command, but the cloned data -- if the slave
+ were to clone it would get a different point-in-time and not match.
+ */
+ return cloneFrom(from.c_str(), errmsg, dbname,
+ /*logForReplication=*/!fromRepl, /*slaveOk*/false, /*usereplauth*/false, /*snapshot*/true, /*mayYield*/true, /*mayBeInterrupted*/false);
+ }
+ } cmdclone;
+
+ class CmdCloneCollection : public Command {
+ public:
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual LockType locktype() const { return NONE; }
+ CmdCloneCollection() : Command("cloneCollection") { }
+ virtual void help( stringstream &help ) const {
+ help << "{ cloneCollection: <namespace>, from: <host> [,query: <query_filter>] [,copyIndexes:<bool>] }"
+ "\nCopies a collection from one server to another. Do not use on a single server as the destination "
+ "is placed at the same db.collection (namespace) as the source.\n"
+ "Warning: the local copy of 'ns' is emptied before the copying begins. Any existing data will be lost there."
+ ;
+ }
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ string fromhost = cmdObj.getStringField("from");
+ if ( fromhost.empty() ) {
+ errmsg = "missing 'from' parameter";
+ return false;
+ }
+ {
+ HostAndPort h(fromhost);
+ if( h.isSelf() ) {
+ errmsg = "can't cloneCollection from self";
+ return false;
+ }
+ }
+ string collection = cmdObj.getStringField("cloneCollection");
+ if ( collection.empty() ) {
+ errmsg = "bad 'cloneCollection' value";
+ return false;
+ }
+ BSONObj query = cmdObj.getObjectField("query");
+ if ( query.isEmpty() )
+ query = BSONObj();
+
+ BSONElement copyIndexesSpec = cmdObj.getField("copyindexes");
+ bool copyIndexes = copyIndexesSpec.isBoolean() ? copyIndexesSpec.boolean() : true;
+
+ log() << "cloneCollection. db:" << dbname << " collection:" << collection << " from: " << fromhost
+ << " query: " << query << " " << ( copyIndexes ? "" : ", not copying indexes" ) << endl;
+
+ Cloner c;
+ auto_ptr<DBClientConnection> myconn;
+ myconn.reset( new DBClientConnection() );
+ if ( ! myconn->connect( fromhost , errmsg ) )
+ return false;
+
+ c.setConnection( myconn.release() );
+
+ return c.copyCollection( collection , query, errmsg , true, false, copyIndexes );
+ }
+ } cmdclonecollection;
+
+
+ thread_specific_ptr< DBClientConnection > authConn_;
+ /* Usage:
+ admindb.$cmd.findOne( { copydbgetnonce: 1, fromhost: <hostname> } );
+ */
+ class CmdCopyDbGetNonce : public Command {
+ public:
+ CmdCopyDbGetNonce() : Command("copydbgetnonce") { }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual LockType locktype() const { return WRITE; }
+ virtual void help( stringstream &help ) const {
+ help << "get a nonce for subsequent copy db request from secure server\n";
+ help << "usage: {copydbgetnonce: 1, fromhost: <hostname>}";
+ }
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ string fromhost = cmdObj.getStringField("fromhost");
+ if ( fromhost.empty() ) {
+ /* copy from self */
+ stringstream ss;
+ ss << "localhost:" << cmdLine.port;
+ fromhost = ss.str();
+ }
+ authConn_.reset( new DBClientConnection() );
+ BSONObj ret;
+ {
+ dbtemprelease t;
+ if ( !authConn_->connect( fromhost, errmsg ) )
+ return false;
+ if( !authConn_->runCommand( "admin", BSON( "getnonce" << 1 ), ret ) ) {
+ errmsg = "couldn't get nonce " + ret.toString();
+ return false;
+ }
+ }
+ result.appendElements( ret );
+ return true;
+ }
+ } cmdcopydbgetnonce;
+
+ /* Usage:
+ admindb.$cmd.findOne( { copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db>[, username: <username>, nonce: <nonce>, key: <key>] } );
+ */
+ class CmdCopyDb : public Command {
+ public:
+ CmdCopyDb() : Command("copydb") { }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual LockType locktype() const { return WRITE; }
+ virtual void help( stringstream &help ) const {
+ help << "copy a database from another host to this host\n";
+ help << "usage: {copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db>[, slaveOk: <bool>, username: <username>, nonce: <nonce>, key: <key>]}";
+ }
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool slaveOk = cmdObj["slaveOk"].trueValue();
+ string fromhost = cmdObj.getStringField("fromhost");
+ if ( fromhost.empty() ) {
+ /* copy from self */
+ stringstream ss;
+ ss << "localhost:" << cmdLine.port;
+ fromhost = ss.str();
+ }
+ string fromdb = cmdObj.getStringField("fromdb");
+ string todb = cmdObj.getStringField("todb");
+ if ( fromhost.empty() || todb.empty() || fromdb.empty() ) {
+ errmsg = "parms missing - {copydb: 1, fromhost: <hostname>, fromdb: <db>, todb: <db>}";
+ return false;
+ }
+ Cloner c;
+ string username = cmdObj.getStringField( "username" );
+ string nonce = cmdObj.getStringField( "nonce" );
+ string key = cmdObj.getStringField( "key" );
+ if ( !username.empty() && !nonce.empty() && !key.empty() ) {
+ uassert( 13008, "must call copydbgetnonce first", authConn_.get() );
+ BSONObj ret;
+ {
+ dbtemprelease t;
+ if ( !authConn_->runCommand( fromdb, BSON( "authenticate" << 1 << "user" << username << "nonce" << nonce << "key" << key ), ret ) ) {
+ errmsg = "unable to login " + ret.toString();
+ return false;
+ }
+ }
+ c.setConnection( authConn_.release() );
+ }
+ Client::Context ctx(todb);
+ bool res = c.go(fromhost.c_str(), errmsg, fromdb, /*logForReplication=*/!fromRepl, slaveOk, /*replauth*/false, /*snapshot*/true, /*mayYield*/true, /*mayBeInterrupted*/ false);
+ return res;
+ }
+ } cmdcopydb;
+
+ class CmdRenameCollection : public Command {
+ public:
+ CmdRenameCollection() : Command( "renameCollection" ) {}
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool requiresAuth() { return false; } // do our own auth
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual LockType locktype() const { return WRITE; }
+ virtual bool logTheOp() {
+ return true; // can't log steps when doing fast rename within a db, so always log the op rather than individual steps comprising it.
+ }
+ virtual void help( stringstream &help ) const {
+ help << " example: { renameCollection: foo.a, to: bar.b }";
+ }
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ string source = cmdObj.getStringField( name.c_str() );
+ string target = cmdObj.getStringField( "to" );
+ uassert(15967,"invalid collection name: " + target, NamespaceString::validCollectionName(target.c_str()));
+ if ( source.empty() || target.empty() ) {
+ errmsg = "invalid command syntax";
+ return false;
+ }
+
+ bool capped = false;
+ long long size = 0;
+ {
+ Client::Context ctx( source ); // auths against source
+ NamespaceDetails *nsd = nsdetails( source.c_str() );
+ uassert( 10026 , "source namespace does not exist", nsd );
+ capped = nsd->capped;
+ if ( capped )
+ for( DiskLoc i = nsd->firstExtent; !i.isNull(); i = i.ext()->xnext )
+ size += i.ext()->length;
+ }
+
+ Client::Context ctx( target ); //auths against target
+
+ if ( nsdetails( target.c_str() ) ) {
+ uassert( 10027 , "target namespace exists", cmdObj["dropTarget"].trueValue() );
+ BSONObjBuilder bb( result.subobjStart( "dropTarget" ) );
+ dropCollection( target , errmsg , bb );
+ bb.done();
+ if ( errmsg.size() > 0 )
+ return false;
+ }
+
+ {
+ char from[256];
+ nsToDatabase( source.c_str(), from );
+ char to[256];
+ nsToDatabase( target.c_str(), to );
+ if ( strcmp( from, to ) == 0 ) {
+ renameNamespace( source.c_str(), target.c_str() );
+ // make sure we drop counters etc
+ Top::global.collectionDropped( source );
+ return true;
+ }
+ }
+
+ BSONObjBuilder spec;
+ if ( capped ) {
+ spec.appendBool( "capped", true );
+ spec.append( "size", double( size ) );
+ }
+ if ( !userCreateNS( target.c_str(), spec.done(), errmsg, false ) )
+ return false;
+
+ auto_ptr< DBClientCursor > c;
+ DBDirectClient bridge;
+
+ {
+ c = bridge.query( source, BSONObj() );
+ }
+ while( 1 ) {
+ {
+ if ( !c->more() )
+ break;
+ }
+ BSONObj o = c->next();
+ theDataFileMgr.insertWithObjMod( target.c_str(), o );
+ }
+
+ char cl[256];
+ nsToDatabase( source.c_str(), cl );
+ string sourceIndexes = string( cl ) + ".system.indexes";
+ nsToDatabase( target.c_str(), cl );
+ string targetIndexes = string( cl ) + ".system.indexes";
+ {
+ c = bridge.query( sourceIndexes, QUERY( "ns" << source ) );
+ }
+ while( 1 ) {
+ {
+ if ( !c->more() )
+ break;
+ }
+ BSONObj o = c->next();
+ BSONObjBuilder b;
+ BSONObjIterator i( o );
+ while( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ if ( strcmp( e.fieldName(), "ns" ) == 0 ) {
+ b.append( "ns", target );
+ }
+ else {
+ b.append( e );
+ }
+ }
+ BSONObj n = b.done();
+ theDataFileMgr.insertWithObjMod( targetIndexes.c_str(), n );
+ }
+
+ {
+ Client::Context ctx( source );
+ dropCollection( source, errmsg, result );
+ }
+ return true;
+ }
+ } cmdrenamecollection;
+
+} // namespace mongo
diff --git a/src/mongo/db/cloner.h b/src/mongo/db/cloner.h
new file mode 100644
index 00000000000..130fea0fac1
--- /dev/null
+++ b/src/mongo/db/cloner.h
@@ -0,0 +1,39 @@
+// cloner.h - copy a database (export/import basically)
+
+/**
+ * Copyright (C) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "jsobj.h"
+
+namespace mongo {
+
+ /**
+ * @param slaveOk - if true it is ok if the source of the data is !ismaster.
+ * @param useReplAuth - use the credentials we normally use as a replication slave for the cloning
+ * @param snapshot - use $snapshot mode for copying collections. note this should not be used when it isn't required, as it will be slower.
+ * for example repairDatabase need not use it.
+ * @param errCode - If provided, this will be set on error to the server's error code. Currently
+ * this will only be set if there is an error in the initial system.namespaces query.
+ */
+ bool cloneFrom(const char *masterHost, string& errmsg, const string& fromdb, bool logForReplication,
+ bool slaveOk, bool useReplAuth, bool snapshot, bool mayYield,
+ bool mayBeInterrupted, int *errCode = 0);
+
+ bool copyCollectionFromRemote(const string& host, const string& ns, string& errmsg);
+
+} // namespace mongo
diff --git a/src/mongo/db/cmdline.cpp b/src/mongo/db/cmdline.cpp
new file mode 100644
index 00000000000..a9b0d7097ca
--- /dev/null
+++ b/src/mongo/db/cmdline.cpp
@@ -0,0 +1,519 @@
+// cmdline.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "cmdline.h"
+#include "commands.h"
+#include "../util/password.h"
+#include "../util/processinfo.h"
+#include "../util/net/listen.h"
+#include "security_common.h"
+#ifdef _WIN32
+#include <direct.h>
+#else
+#include <sys/types.h>
+#include <sys/wait.h>
+#endif
+#include "globals.h"
+
+#define MAX_LINE_LENGTH 256
+
+namespace po = boost::program_options;
+namespace fs = boost::filesystem;
+
+namespace mongo {
+
+ void setupSignals( bool inFork );
+ string getHostNameCached();
+ static BSONArray argvArray;
+ static BSONObj parsedOpts;
+
+ void CmdLine::addGlobalOptions( boost::program_options::options_description& general ,
+ boost::program_options::options_description& hidden ) {
+ /* support for -vv -vvvv etc. */
+ for (string s = "vv"; s.length() <= 12; s.append("v")) {
+ hidden.add_options()(s.c_str(), "verbose");
+ }
+
+ general.add_options()
+ ("help,h", "show this usage information")
+ ("version", "show version information")
+ ("config,f", po::value<string>(), "configuration file specifying additional options")
+ ("verbose,v", "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
+ ("quiet", "quieter output")
+ ("port", po::value<int>(&cmdLine.port), "specify port number")
+ ("bind_ip", po::value<string>(&cmdLine.bind_ip), "comma separated list of ip addresses to listen on - all local ips by default")
+ ("maxConns",po::value<int>(), "max number of simultaneous connections")
+ ("objcheck", "inspect client data for validity on receipt")
+ ("logpath", po::value<string>() , "log file to send write to instead of stdout - has to be a file, not directory" )
+ ("logappend" , "append to logpath instead of over-writing" )
+ ("pidfilepath", po::value<string>(), "full path to pidfile (if not set, no pidfile is created)")
+ ("keyFile", po::value<string>(), "private key for cluster authentication (only for replica sets)")
+#ifndef _WIN32
+ ("nounixsocket", "disable listening on unix sockets")
+ ("unixSocketPrefix", po::value<string>(), "alternative directory for UNIX domain sockets (defaults to /tmp)")
+ ("fork" , "fork server process" )
+ ("syslog" , "log to system's syslog facility instead of file or stdout" )
+#endif
+ ;
+
+ hidden.add_options()
+ ("cloud", po::value<string>(), "custom dynamic host naming")
+#ifdef MONGO_SSL
+ ("sslOnNormalPorts" , "use ssl on configured ports" )
+ ("sslPEMKeyFile" , po::value<string>(&cmdLine.sslPEMKeyFile), "PEM file for ssl" )
+ ("sslPEMKeyPassword" , new PasswordValue(&cmdLine.sslPEMKeyPassword) , "PEM file password" )
+#endif
+ ;
+
+ }
+
+
+#if defined(_WIN32)
+ void CmdLine::addWindowsOptions( boost::program_options::options_description& windows ,
+ boost::program_options::options_description& hidden ) {
+ windows.add_options()
+ ("install", "install mongodb service")
+ ("remove", "remove mongodb service")
+ ("reinstall", "reinstall mongodb service (equivilant of mongod --remove followed by mongod --install)")
+ ("serviceName", po::value<string>(), "windows service name")
+ ("serviceDisplayName", po::value<string>(), "windows service display name")
+ ("serviceDescription", po::value<string>(), "windows service description")
+ ("serviceUser", po::value<string>(), "user name service executes as")
+ ("servicePassword", po::value<string>(), "password used to authenticate serviceUser")
+ ;
+ hidden.add_options()("service", "start mongodb service");
+ }
+#endif
+
+ void CmdLine::parseConfigFile( istream &f, stringstream &ss ) {
+ string s;
+ char line[MAX_LINE_LENGTH];
+
+ while ( f ) {
+ f.getline(line, MAX_LINE_LENGTH);
+ s = line;
+ std::remove(s.begin(), s.end(), ' ');
+ std::remove(s.begin(), s.end(), '\t');
+ boost::to_upper(s);
+
+ if ( s.find( "FASTSYNC" ) != string::npos )
+ cout << "warning \"fastsync\" should not be put in your configuration file" << endl;
+
+ if ( s.c_str()[0] == '#' ) {
+ // skipping commented line
+ } else if ( s.find( "=FALSE" ) == string::npos ) {
+ ss << line << endl;
+ } else {
+ cout << "warning: remove or comment out this line by starting it with \'#\', skipping now : " << line << endl;
+ }
+ }
+ return;
+ }
+
+#ifndef _WIN32
+ // support for exit value propagation with fork
+ void launchSignal( int sig ) {
+ if ( sig == SIGUSR2 ) {
+ pid_t cur = getpid();
+
+ if ( cur == cmdLine.parentProc || cur == cmdLine.leaderProc ) {
+ // signal indicates successful start allowing us to exit
+ _exit(0);
+ }
+ }
+ }
+
+ void setupLaunchSignals() {
+ assert( signal(SIGUSR2 , launchSignal ) != SIG_ERR );
+ }
+
+
+ void CmdLine::launchOk() {
+ if ( cmdLine.doFork ) {
+ // killing leader will propagate to parent
+ assert( kill( cmdLine.leaderProc, SIGUSR2 ) == 0 );
+ }
+ }
+#endif
+
+ bool CmdLine::store( int argc , char ** argv ,
+ boost::program_options::options_description& visible,
+ boost::program_options::options_description& hidden,
+ boost::program_options::positional_options_description& positional,
+ boost::program_options::variables_map &params ) {
+
+
+ {
+ // setup binary name
+ cmdLine.binaryName = argv[0];
+ size_t i = cmdLine.binaryName.rfind( '/' );
+ if ( i != string::npos )
+ cmdLine.binaryName = cmdLine.binaryName.substr( i + 1 );
+
+ // setup cwd
+ char buffer[1024];
+#ifdef _WIN32
+ assert( _getcwd( buffer , 1000 ) );
+#else
+ assert( getcwd( buffer , 1000 ) );
+#endif
+ cmdLine.cwd = buffer;
+ }
+
+
+ /* don't allow guessing - creates ambiguities when some options are
+ * prefixes of others. allow long disguises and don't allow guessing
+ * to get away with our vvvvvvv trick. */
+ int style = (((po::command_line_style::unix_style ^
+ po::command_line_style::allow_guessing) |
+ po::command_line_style::allow_long_disguise) ^
+ po::command_line_style::allow_sticky);
+
+
+ try {
+
+ po::options_description all;
+ all.add( visible );
+ all.add( hidden );
+
+ po::store( po::command_line_parser(argc, argv)
+ .options( all )
+ .positional( positional )
+ .style( style )
+ .run(),
+ params );
+
+ if ( params.count("config") ) {
+ ifstream f( params["config"].as<string>().c_str() );
+ if ( ! f.is_open() ) {
+ cout << "ERROR: could not read from config file" << endl << endl;
+ cout << visible << endl;
+ return false;
+ }
+
+ stringstream ss;
+ CmdLine::parseConfigFile( f, ss );
+ po::store( po::parse_config_file( ss , all ) , params );
+ f.close();
+ }
+
+ po::notify(params);
+ }
+ catch (po::error &e) {
+ cout << "error command line: " << e.what() << endl;
+ cout << "use --help for help" << endl;
+ //cout << visible << endl;
+ return false;
+ }
+
+ if (params.count("verbose")) {
+ logLevel = 1;
+ }
+
+ for (string s = "vv"; s.length() <= 12; s.append("v")) {
+ if (params.count(s)) {
+ logLevel = s.length();
+ }
+ }
+
+ if (params.count("quiet")) {
+ cmdLine.quiet = true;
+ }
+
+ if ( params.count( "maxConns" ) ) {
+ int newSize = params["maxConns"].as<int>();
+ if ( newSize < 5 ) {
+ out() << "maxConns has to be at least 5" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+ else if ( newSize >= 10000000 ) {
+ out() << "maxConns can't be greater than 10000000" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+ connTicketHolder.resize( newSize );
+ }
+
+ if (params.count("objcheck")) {
+ cmdLine.objcheck = true;
+ }
+
+ string logpath;
+
+#ifndef _WIN32
+ if (params.count("unixSocketPrefix")) {
+ cmdLine.socket = params["unixSocketPrefix"].as<string>();
+ if (!fs::is_directory(cmdLine.socket)) {
+ cout << cmdLine.socket << " must be a directory" << endl;
+ ::exit(-1);
+ }
+ }
+
+ if (params.count("nounixsocket")) {
+ cmdLine.noUnixSocket = true;
+ }
+
+ if (params.count("fork")) {
+ cmdLine.doFork = true;
+ if ( ! params.count( "logpath" ) && ! params.count( "syslog" ) ) {
+ cout << "--fork has to be used with --logpath or --syslog" << endl;
+ ::exit(-1);
+ }
+
+ if ( params.count( "logpath" ) ) {
+ // test logpath
+ logpath = params["logpath"].as<string>();
+ assert( logpath.size() );
+ if ( logpath[0] != '/' ) {
+ logpath = cmdLine.cwd + "/" + logpath;
+ }
+ FILE * test = fopen( logpath.c_str() , "a" );
+ if ( ! test ) {
+ cout << "can't open [" << logpath << "] for log file: " << errnoWithDescription() << endl;
+ ::exit(-1);
+ }
+ fclose( test );
+ }
+
+ cout.flush();
+ cerr.flush();
+
+ cmdLine.parentProc = getpid();
+
+ // facilitate clean exit when child starts successfully
+ setupLaunchSignals();
+
+ pid_t c = fork();
+ if ( c ) {
+ int pstat;
+ waitpid(c, &pstat, 0);
+
+ if ( WIFEXITED(pstat) ) {
+ if ( ! WEXITSTATUS(pstat) ) {
+ cout << "child process started successfully, parent exiting" << endl;
+ }
+
+ _exit( WEXITSTATUS(pstat) );
+ }
+
+ _exit(50);
+ }
+
+ if ( chdir("/") < 0 ) {
+ cout << "Cant chdir() while forking server process: " << strerror(errno) << endl;
+ ::exit(-1);
+ }
+ setsid();
+
+ cmdLine.leaderProc = getpid();
+
+ pid_t c2 = fork();
+ if ( c2 ) {
+ int pstat;
+ cout << "forked process: " << c2 << endl;
+ waitpid(c2, &pstat, 0);
+
+ if ( WIFEXITED(pstat) ) {
+ _exit( WEXITSTATUS(pstat) );
+ }
+
+ _exit(51);
+ }
+
+ // stdout handled in initLogging
+ //fclose(stdout);
+ //freopen("/dev/null", "w", stdout);
+
+ fclose(stderr);
+ fclose(stdin);
+
+ FILE* f = freopen("/dev/null", "w", stderr);
+ if ( f == NULL ) {
+ cout << "Cant reassign stderr while forking server process: " << strerror(errno) << endl;
+ ::exit(-1);
+ }
+
+ f = freopen("/dev/null", "r", stdin);
+ if ( f == NULL ) {
+ cout << "Cant reassign stdin while forking server process: " << strerror(errno) << endl;
+ ::exit(-1);
+ }
+
+ setupCoreSignals();
+ setupSignals( true );
+ }
+
+ if (params.count("syslog")) {
+ StringBuilder sb(128);
+ sb << cmdLine.binaryName << "." << cmdLine.port;
+ Logstream::useSyslog( sb.str().c_str() );
+ }
+#endif
+ if (params.count("logpath")) {
+ if ( params.count("syslog") ) {
+ cout << "Cant use both a logpath and syslog " << endl;
+ ::exit(-1);
+ }
+
+ if ( logpath.size() == 0 )
+ logpath = params["logpath"].as<string>();
+ uassert( 10033 , "logpath has to be non-zero" , logpath.size() );
+ initLogging( logpath , params.count( "logappend" ) );
+ }
+
+ if ( params.count("pidfilepath")) {
+ writePidFile( params["pidfilepath"].as<string>() );
+ }
+
+ if (params.count("keyFile")) {
+ const string f = params["keyFile"].as<string>();
+
+ if (!setUpSecurityKey(f)) {
+ // error message printed in setUpPrivateKey
+ dbexit(EXIT_BADOPTIONS);
+ }
+
+ cmdLine.keyFile = true;
+ noauth = false;
+ }
+ else {
+ cmdLine.keyFile = false;
+ }
+
+#ifdef MONGO_SSL
+ if (params.count("sslOnNormalPorts") ) {
+ cmdLine.sslOnNormalPorts = true;
+
+ if ( cmdLine.sslPEMKeyPassword.size() == 0 ) {
+ log() << "need sslPEMKeyPassword" << endl;
+ dbexit(EXIT_BADOPTIONS);
+ }
+
+ if ( cmdLine.sslPEMKeyFile.size() == 0 ) {
+ log() << "need sslPEMKeyFile" << endl;
+ dbexit(EXIT_BADOPTIONS);
+ }
+
+ cmdLine.sslServerManager = new SSLManager( false );
+ cmdLine.sslServerManager->setupPEM( cmdLine.sslPEMKeyFile , cmdLine.sslPEMKeyPassword );
+ }
+
+ if ( cmdLine.sslPEMKeyFile.size() || cmdLine.sslPEMKeyPassword.size() ) {
+ log() << "need to enable sslOnNormalPorts" << endl;
+ dbexit(EXIT_BADOPTIONS);
+ }
+#endif
+
+ {
+ BSONObjBuilder b;
+ for (po::variables_map::const_iterator it(params.begin()), end(params.end()); it != end; it++){
+ if (!it->second.defaulted()){
+ const string& key = it->first;
+ const po::variable_value& value = it->second;
+ const type_info& type = value.value().type();
+
+ if (type == typeid(string)){
+ if (value.as<string>().empty())
+ b.appendBool(key, true); // boost po uses empty string for flags like --quiet
+ else
+ b.append(key, value.as<string>());
+ }
+ else if (type == typeid(int))
+ b.append(key, value.as<int>());
+ else if (type == typeid(double))
+ b.append(key, value.as<double>());
+ else if (type == typeid(bool))
+ b.appendBool(key, value.as<bool>());
+ else if (type == typeid(long))
+ b.appendNumber(key, (long long)value.as<long>());
+ else if (type == typeid(unsigned))
+ b.appendNumber(key, (long long)value.as<unsigned>());
+ else if (type == typeid(unsigned long long))
+ b.appendNumber(key, (long long)value.as<unsigned long long>());
+ else if (type == typeid(vector<string>))
+ b.append(key, value.as<vector<string> >());
+ else
+ b.append(key, "UNKNOWN TYPE: " + demangleName(type));
+ }
+ }
+ parsedOpts = b.obj();
+ }
+
+ {
+ BSONArrayBuilder b;
+ for (int i=0; i < argc; i++)
+ b << argv[i];
+ argvArray = b.arr();
+ }
+
+ return true;
+ }
+
+ void printCommandLineOpts() {
+ log() << "options: " << parsedOpts << endl;
+ }
+
+ void ignoreSignal( int sig ) {}
+
+ void setupCoreSignals() {
+#if !defined(_WIN32)
+ assert( signal(SIGUSR1 , rotateLogs ) != SIG_ERR );
+ assert( signal(SIGHUP , ignoreSignal ) != SIG_ERR );
+#endif
+ }
+
+ class CmdGetCmdLineOpts : Command {
+ public:
+ CmdGetCmdLineOpts(): Command("getCmdLineOpts") {}
+ void help(stringstream& h) const { h << "get argv"; }
+ virtual LockType locktype() const { return NONE; }
+ virtual bool adminOnly() const { return true; }
+ virtual bool slaveOk() const { return true; }
+
+ virtual bool run(const string&, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ result.append("argv", argvArray);
+ result.append("parsed", parsedOpts);
+ return true;
+ }
+
+ } cmdGetCmdLineOpts;
+
+ string prettyHostName() {
+ StringBuilder s(128);
+ s << getHostNameCached();
+ if( cmdLine.port != CmdLine::DefaultDBPort )
+ s << ':' << mongo::cmdLine.port;
+ return s.str();
+ }
+
+ casi< map<string,ParameterValidator*> * > pv_all (NULL);
+
+ ParameterValidator::ParameterValidator( const string& name ) : _name( name ) {
+ if ( ! pv_all)
+ pv_all.ref() = new map<string,ParameterValidator*>();
+ (*pv_all.ref())[_name] = this;
+ }
+
+ ParameterValidator * ParameterValidator::get( const string& name ) {
+ map<string,ParameterValidator*>::const_iterator i = pv_all.get()->find( name );
+ if ( i == pv_all.get()->end() )
+ return NULL;
+ return i->second;
+ }
+
+}
diff --git a/src/mongo/db/cmdline.h b/src/mongo/db/cmdline.h
new file mode 100644
index 00000000000..5fe6ceb1005
--- /dev/null
+++ b/src/mongo/db/cmdline.h
@@ -0,0 +1,203 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "jsobj.h"
+
+namespace mongo {
+
+#ifdef MONGO_SSL
+ class SSLManager;
+#endif
+
+ /* command line options
+ */
+ /* concurrency: OK/READ */
+ struct CmdLine {
+
+ CmdLine();
+
+ string binaryName; // mongod or mongos
+ string cwd; // cwd of when process started
+
+ // this is suboptimal as someone could rename a binary. todo...
+ bool isMongos() const { return binaryName == "mongos"; }
+
+ int port; // --port
+ enum {
+ DefaultDBPort = 27017,
+ ConfigServerPort = 27019,
+ ShardServerPort = 27018
+ };
+ bool isDefaultPort() const { return port == DefaultDBPort; }
+
+ string bind_ip; // --bind_ip
+ bool rest; // --rest
+ bool jsonp; // --jsonp
+
+ string _replSet; // --replSet[/<seedlist>]
+ string ourSetName() const {
+ string setname;
+ size_t sl = _replSet.find('/');
+ if( sl == string::npos )
+ return _replSet;
+ return _replSet.substr(0, sl);
+ }
+ bool usingReplSets() const { return !_replSet.empty(); }
+
+ // for master/slave replication
+ string source; // --source
+ string only; // --only
+
+ bool quiet; // --quiet
+ bool noTableScan; // --notablescan no table scans allowed
+ bool prealloc; // --noprealloc no preallocation of data files
+ bool preallocj; // --nopreallocj no preallocation of journal files
+ bool smallfiles; // --smallfiles allocate smaller data files
+
+ bool configsvr; // --configsvr
+
+ bool quota; // --quota
+ int quotaFiles; // --quotaFiles
+ bool cpu; // --cpu show cpu time periodically
+
+ bool dur; // --dur durability (now --journal)
+ unsigned journalCommitInterval; // group/batch commit interval ms
+
+ /** --durOptions 7 dump journal and terminate without doing anything further
+ --durOptions 4 recover and terminate without listening
+ */
+ enum { // bits to be ORed
+ DurDumpJournal = 1, // dump diagnostics on the journal during recovery
+ DurScanOnly = 2, // don't do any real work, just scan and dump if dump specified
+ DurRecoverOnly = 4, // terminate after recovery step
+ DurParanoid = 8, // paranoid mode enables extra checks
+ DurAlwaysCommit = 16, // do a group commit every time the writelock is released
+ DurAlwaysRemap = 32, // remap the private view after every group commit (may lag to the next write lock acquisition, but will do all files then)
+ DurNoCheckSpace = 64 // don't check that there is enough room for journal files before startup (for diskfull tests)
+ };
+ int durOptions; // --durOptions <n> for debugging
+
+ bool objcheck; // --objcheck
+
+ long long oplogSize; // --oplogSize
+ int defaultProfile; // --profile
+ int slowMS; // --time in ms that is "slow"
+
+ int pretouch; // --pretouch for replication application (experimental)
+ bool moveParanoia; // for move chunk paranoia
+ double syncdelay; // seconds between fsyncs
+
+ bool noUnixSocket; // --nounixsocket
+ bool doFork; // --fork
+ string socket; // UNIX domain socket directory
+
+ bool keyFile;
+
+#ifndef _WIN32
+ pid_t parentProc; // --fork pid of initial process
+ pid_t leaderProc; // --fork pid of leader process
+#endif
+
+#ifdef MONGO_SSL
+ bool sslOnNormalPorts; // --sslOnNormalPorts
+ string sslPEMKeyFile; // --sslPEMKeyFile
+ string sslPEMKeyPassword; // --sslPEMKeyPassword
+
+ SSLManager* sslServerManager; // currently leaks on close
+#endif
+
+ static void launchOk();
+
+ static void addGlobalOptions( boost::program_options::options_description& general ,
+ boost::program_options::options_description& hidden );
+
+ static void addWindowsOptions( boost::program_options::options_description& windows ,
+ boost::program_options::options_description& hidden );
+
+
+ static void parseConfigFile( istream &f, stringstream &ss);
+ /**
+ * @return true if should run program, false if should exit
+ */
+ static bool store( int argc , char ** argv ,
+ boost::program_options::options_description& visible,
+ boost::program_options::options_description& hidden,
+ boost::program_options::positional_options_description& positional,
+ boost::program_options::variables_map &output );
+
+ time_t started;
+ };
+
+ // todo move to cmdline.cpp?
+ inline CmdLine::CmdLine() :
+ port(DefaultDBPort), rest(false), jsonp(false), quiet(false), noTableScan(false), prealloc(true), preallocj(true), smallfiles(sizeof(int*) == 4),
+ configsvr(false),
+ quota(false), quotaFiles(8), cpu(false), durOptions(0), objcheck(false), oplogSize(0), defaultProfile(0), slowMS(100), pretouch(0), moveParanoia( true ),
+ syncdelay(60), noUnixSocket(false), doFork(0), socket("/tmp")
+ {
+ started = time(0);
+
+ journalCommitInterval = 0; // 0 means use default
+ dur = false;
+#if defined(_DURABLEDEFAULTON)
+ dur = true;
+#endif
+ if( sizeof(void*) == 8 )
+ dur = true;
+#if defined(_DURABLEDEFAULTOFF)
+ dur = false;
+#endif
+
+#ifdef MONGO_SSL
+ sslOnNormalPorts = false;
+ sslServerManager = 0;
+#endif
+ }
+
+ extern CmdLine cmdLine;
+
+ void setupLaunchSignals();
+ void setupCoreSignals();
+
+ string prettyHostName();
+
+ void printCommandLineOpts();
+
+ /**
+ * used for setParameter command
+ * so you can write validation code that lives with code using it
+ * rather than all in the command place
+ * also lets you have mongos or mongod specific code
+ * without pulling it all sorts of things
+ */
+ class ParameterValidator {
+ public:
+ ParameterValidator( const string& name );
+ virtual ~ParameterValidator() {}
+
+ virtual bool isValid( BSONElement e , string& errmsg ) const = 0;
+
+ static ParameterValidator * get( const string& name );
+
+ private:
+ const string _name;
+ };
+
+}
+
diff --git a/src/mongo/db/collection.h b/src/mongo/db/collection.h
new file mode 100644
index 00000000000..998b2f0beac
--- /dev/null
+++ b/src/mongo/db/collection.h
@@ -0,0 +1,15 @@
+// @file collection.h
+
+#pragma once
+
+#include "namespace.h"
+
+namespace mongo {
+
+ class Collection {
+ public:
+ NamespaceDetails * const d;
+ NamespaceDetailsTransient * const nsd;
+ };
+
+}
diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp
new file mode 100755
index 00000000000..cbe9ffc6861
--- /dev/null
+++ b/src/mongo/db/commands.cpp
@@ -0,0 +1,209 @@
+/* commands.cpp
+ db "commands" (sent via db.$cmd.findOne(...))
+ */
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "jsobj.h"
+#include "commands.h"
+#include "client.h"
+#include "replutil.h"
+
+namespace mongo {
+
+ map<string,Command*> * Command::_commandsByBestName;
+ map<string,Command*> * Command::_webCommands;
+ map<string,Command*> * Command::_commands;
+
+ string Command::parseNsFullyQualified(const string& dbname, const BSONObj& cmdObj) const {
+ string s = cmdObj.firstElement().valuestr();
+ NamespaceString nss(s);
+ // these are for security, do not remove:
+ verify(15966, dbname == nss.db || dbname == "admin" );
+ verify(15962, !nss.db.empty() );
+ return s;
+ }
+
+ /*virtual*/ string Command::parseNs(const string& dbname, const BSONObj& cmdObj) const {
+ string coll = cmdObj.firstElement().valuestr();
+#if defined(CLC)
+ DEV if( mongoutils::str::startsWith(coll, dbname+'.') ) {
+ log() << "DEBUG parseNs Command's collection name looks like it includes the db name\n"
+ << dbname << '\n'
+ << coll << '\n'
+ << cmdObj.toString() << endl;
+ dassert(false);
+ }
+#endif
+ return dbname + '.' + coll;
+ }
+
+ void Command::htmlHelp(stringstream& ss) const {
+ string helpStr;
+ {
+ stringstream h;
+ help(h);
+ helpStr = h.str();
+ }
+ ss << "\n<tr><td>";
+ bool web = _webCommands->count(name) != 0;
+ if( web ) ss << "<a href=\"/" << name << "?text=1\">";
+ ss << name;
+ if( web ) ss << "</a>";
+ ss << "</td>\n";
+ ss << "<td>";
+ int l = locktype();
+ //if( l == NONE ) ss << "N ";
+ if( l == READ ) ss << "R ";
+ else if( l == WRITE ) ss << "W ";
+ if( slaveOk() )
+ ss << "S ";
+ if( adminOnly() )
+ ss << "A";
+ ss << "</td>";
+ ss << "<td>";
+ if( helpStr != "no help defined" ) {
+ const char *p = helpStr.c_str();
+ while( *p ) {
+ if( *p == '<' ) {
+ ss << "&lt;";
+ p++; continue;
+ }
+ else if( *p == '{' )
+ ss << "<code>";
+ else if( *p == '}' ) {
+ ss << "}</code>";
+ p++;
+ continue;
+ }
+ if( strncmp(p, "http:", 5) == 0 ) {
+ ss << "<a href=\"";
+ const char *q = p;
+ while( *q && *q != ' ' && *q != '\n' )
+ ss << *q++;
+ ss << "\">";
+ q = p;
+ if( startsWith(q, "http://www.mongodb.org/display/") )
+ q += 31;
+ while( *q && *q != ' ' && *q != '\n' ) {
+ ss << (*q == '+' ? ' ' : *q);
+ q++;
+ if( *q == '#' )
+ while( *q && *q != ' ' && *q != '\n' ) q++;
+ }
+ ss << "</a>";
+ p = q;
+ continue;
+ }
+ if( *p == '\n' ) ss << "<br>";
+ else ss << *p;
+ p++;
+ }
+ }
+ ss << "</td>";
+ ss << "</tr>\n";
+ }
+
+ Command::Command(const char *_name, bool web, const char *oldName) : name(_name) {
+ // register ourself.
+ if ( _commands == 0 )
+ _commands = new map<string,Command*>;
+ if( _commandsByBestName == 0 )
+ _commandsByBestName = new map<string,Command*>;
+ Command*& c = (*_commands)[name];
+ if ( c )
+ log() << "warning: 2 commands with name: " << _name << endl;
+ c = this;
+ (*_commandsByBestName)[name] = this;
+
+ if( web ) {
+ if( _webCommands == 0 )
+ _webCommands = new map<string,Command*>;
+ (*_webCommands)[name] = this;
+ }
+
+ if( oldName )
+ (*_commands)[oldName] = this;
+ }
+
+ void Command::help( stringstream& help ) const {
+ help << "no help defined";
+ }
+
+ Command* Command::findCommand( const string& name ) {
+ map<string,Command*>::iterator i = _commands->find( name );
+ if ( i == _commands->end() )
+ return 0;
+ return i->second;
+ }
+
+
+ Command::LockType Command::locktype( const string& name ) {
+ Command * c = findCommand( name );
+ if ( ! c )
+ return WRITE;
+ return c->locktype();
+ }
+
+ void Command::logIfSlow( const Timer& timer, const string& msg ) {
+ int ms = timer.millis();
+ if ( ms > cmdLine.slowMS ) {
+ out() << msg << " took " << ms << " ms." << endl;
+ }
+ }
+
+}
+
+#include "../client/connpool.h"
+
+namespace mongo {
+
+ extern DBConnectionPool pool;
+
+ class PoolFlushCmd : public Command {
+ public:
+ PoolFlushCmd() : Command( "connPoolSync" , false , "connpoolsync" ) {}
+ virtual void help( stringstream &help ) const { help<<"internal"; }
+ virtual LockType locktype() const { return NONE; }
+ virtual bool run(const string&, mongo::BSONObj&, int, std::string&, mongo::BSONObjBuilder& result, bool) {
+ pool.flush();
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ } poolFlushCmd;
+
+ class PoolStats : public Command {
+ public:
+ PoolStats() : Command( "connPoolStats" ) {}
+ virtual void help( stringstream &help ) const { help<<"stats about connection pool"; }
+ virtual LockType locktype() const { return NONE; }
+ virtual bool run(const string&, mongo::BSONObj&, int, std::string&, mongo::BSONObjBuilder& result, bool) {
+ pool.appendInfo( result );
+ result.append( "numDBClientConnection" , DBClientConnection::getNumConnections() );
+ result.append( "numAScopedConnection" , AScopedConnection::getNumConnections() );
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ } poolStatsCmd;
+
+} // namespace mongo
diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h
new file mode 100644
index 00000000000..85cdd38d7a4
--- /dev/null
+++ b/src/mongo/db/commands.h
@@ -0,0 +1,164 @@
+// commands.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "jsobj.h"
+#include "../util/mongoutils/str.h"
+
+namespace mongo {
+
+ class BSONObj;
+ class BSONObjBuilder;
+ class Client;
+ class Timer;
+
+ /** mongodb "commands" (sent via db.$cmd.findOne(...))
+ subclass to make a command. define a singleton object for it.
+ */
+ class Command {
+ protected:
+ string parseNsFullyQualified(const string& dbname, const BSONObj& cmdObj) const;
+ public:
+ // only makes sense for commands where 1st parm is the collection.
+ virtual string parseNs(const string& dbname, const BSONObj& cmdObj) const;
+
+ enum LockType { READ = -1 , NONE = 0 , WRITE = 1 };
+
+ const string name;
+
+ /* run the given command
+ implement this...
+
+ fromRepl - command is being invoked as part of replication syncing. In this situation you
+ normally do not want to log the command to the local oplog.
+
+ return value is true if succeeded. if false, set errmsg text.
+ */
+ virtual bool run(const string& db, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl = false ) = 0;
+
+ /*
+ note: logTheTop() MUST be false if READ
+ if NONE, can't use Client::Context setup
+ use with caution
+ */
+ virtual LockType locktype() const = 0;
+
+ /* Return true if only the admin ns has privileges to run this command. */
+ virtual bool adminOnly() const {
+ return false;
+ }
+
+ void htmlHelp(stringstream&) const;
+
+ /* Like adminOnly, but even stricter: we must either be authenticated for admin db,
+ or, if running without auth, on the local interface. Used for things which
+ are so major that remote invocation may not make sense (e.g., shutdownServer).
+
+ When localHostOnlyIfNoAuth() is true, adminOnly() must also be true.
+ */
+ virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) { return false; }
+
+ /* Return true if slaves are allowed to execute the command
+ (the command directly from a client -- if fromRepl, always allowed).
+ */
+ virtual bool slaveOk() const = 0;
+
+ /* Return true if the client force a command to be run on a slave by
+ turning on the 'slaveOk' option in the command query.
+ */
+ virtual bool slaveOverrideOk() {
+ return false;
+ }
+
+ /* Override and return true to if true,log the operation (logOp()) to the replication log.
+ (not done if fromRepl of course)
+
+ Note if run() returns false, we do NOT log.
+ */
+ virtual bool logTheOp() { return false; }
+
+ virtual void help( stringstream& help ) const;
+
+ /* Return true if authentication and security applies to the commands. Some commands
+ (e.g., getnonce, authenticate) can be done by anyone even unauthorized.
+ */
+ virtual bool requiresAuth() { return true; }
+
+ /* Return true if a replica set secondary should go into "recovering"
+ (unreadable) state while running this command.
+ */
+ virtual bool maintenanceMode() const { return false; }
+
+ /* Return true if command should be permitted when a replica set secondary is in "recovering"
+ (unreadable) state.
+ */
+ virtual bool maintenanceOk() const { return true; /* assumed true prior to commit */ }
+
+ /** @param webUI expose the command in the web ui as localhost:28017/<name>
+ @param oldName an optional old, deprecated name for the command
+ */
+ Command(const char *_name, bool webUI = false, const char *oldName = 0);
+
+ virtual ~Command() {}
+
+ protected:
+ BSONObj getQuery( const BSONObj& cmdObj ) {
+ if ( cmdObj["query"].type() == Object )
+ return cmdObj["query"].embeddedObject();
+ if ( cmdObj["q"].type() == Object )
+ return cmdObj["q"].embeddedObject();
+ return BSONObj();
+ }
+
+ static void logIfSlow( const Timer& cmdTimer, const string& msg);
+
+ static map<string,Command*> * _commands;
+ static map<string,Command*> * _commandsByBestName;
+ static map<string,Command*> * _webCommands;
+
+ public:
+ static const map<string,Command*>* commandsByBestName() { return _commandsByBestName; }
+ static const map<string,Command*>* webCommands() { return _webCommands; }
+ /** @return if command was found and executed */
+ static bool runAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder, int queryOptions = 0);
+ static LockType locktype( const string& name );
+ static Command * findCommand( const string& name );
+ };
+
+ class CmdShutdown : public Command {
+ public:
+ virtual bool requiresAuth() { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) { return true; }
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream& help ) const;
+ CmdShutdown() : Command("shutdown") {}
+ bool run(const string& dbname, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl);
+ private:
+ bool shutdownHelper();
+ };
+
+ bool _runCommands(const char *ns, BSONObj& jsobj, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl, int queryOptions);
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/aggregate.js b/src/mongo/db/commands/aggregate.js
new file mode 100755
index 00000000000..7741e3121ff
--- /dev/null
+++ b/src/mongo/db/commands/aggregate.js
@@ -0,0 +1,184 @@
+/* sample aggregate command queries */
+
+// make sure we're using the right db; this is the same as "use mydb;" in shell
+db = db.getSisterDB("mydb");
+
+// just passing through fields
+var p1 = db.runCommand(
+{ aggregate : "article", pipeline : [
+ { $project : {
+ tags : 1,
+ pageViews : 1
+ }}
+]});
+
+// unwinding an array
+var p2 = db.runCommand(
+{ aggregate : "article", pipeline : [
+ { $project : {
+ author : 1,
+ tag : { $unwind : "tags" },
+ pageViews : 1
+ }}
+]});
+
+// pulling values out of subdocuments
+var p3 = db.runCommand(
+{ aggregate : "article", pipeline : [
+ { $project : {
+ otherfoo : "other.foo",
+ otherbar : "other.bar"
+ }}
+]});
+
+// projection includes a computed value
+var p4 = db.runCommand(
+{ aggregate : "article", pipeline : [
+ { $project : {
+ author : 1,
+ daveWroteIt : { $eq:["$author", "dave"] }
+ }}
+]});
+
+// projection includes a virtual (fabricated) document
+var p5 = db.runCommand(
+{ aggregate : "article", pipeline : [
+ { $project : {
+ author : 1,
+ pageViews : 1,
+ tag : { $unwind : "tags" }
+ }},
+ { $project : {
+ author : 1,
+ subDocument : { foo : "pageViews", bar : "tag" }
+ }}
+]});
+
+// multi-step aggregate
+// nested expressions in computed fields
+var p6 = db.runCommand(
+{ aggregate : "article", pipeline : [
+ { $project : {
+ author : 1,
+ tag : { $unwind : "tags" },
+ pageViews : 1
+ }},
+ { $project : {
+ author : 1,
+ tag : 1,
+ pageViews : 1,
+ daveWroteIt : { $eq:["$author", "dave"] },
+ weLikeIt : { $or:[ { $eq:["$author", "dave"] },
+ { $eq:["$tag", "good"] } ] }
+ }}
+]});
+
+// slightly more complex computed expression; $ifnull
+var p7 = db.runCommand(
+{ aggregate : "article", pipeline : [
+ { $project : {
+ theSum : { $add:["$pageViews",
+ { $ifnull:["$other.foo",
+ "$other.bar"] } ] }
+ }}
+]});
+
+// dotted path inclusion; _id exclusion
+var p8 = db.runCommand(
+{ aggregate : "article", pipeline : [
+ { $project : {
+ _id : 0,
+ author : 1,
+ tag : { $unwind : "tags" },
+ "comments.author" : 1
+ }}
+]});
+
+
+// simple matching
+var m1 = db.runCommand(
+{ aggregate : "article", pipeline : [
+ { $match : { author : "dave" } }
+]});
+
+// combining matching with a projection
+var m2 = db.runCommand(
+{ aggregate : "article", pipeline : [
+ { $project : {
+ title : 1,
+ author : 1,
+ pageViews : 1,
+ tag : { $unwind : "tags" },
+ comments : 1
+ }},
+ { $match : { tag : "nasty" } }
+]});
+
+
+// group by tag
+var g1 = db.runCommand(
+{ aggregate : "article", pipeline : [
+ { $project : {
+ author : 1,
+ tag : { $unwind : "tags" },
+ pageViews : 1
+ }},
+ { $group : {
+ _id: { tag : 1 },
+ docsByTag : { $sum : 1 },
+ viewsByTag : { $sum : "$pageViews" }
+ }}
+]});
+
+// $max, and averaging in a final projection
+var g2 = db.runCommand(
+{ aggregate : "article", pipeline : [
+ { $project : {
+ author : 1,
+ tag : { $unwind : "tags" },
+ pageViews : 1
+ }},
+ { $group : {
+ _id: { tag : 1 },
+ docsByTag : { $sum : 1 },
+ viewsByTag : { $sum : "$pageViews" },
+ mostViewsByTag : { $max : "$pageViews" },
+ }},
+ { $project : {
+ _id: false,
+ tag : "_id.tag",
+ mostViewsByTag : 1,
+ docsByTag : 1,
+ viewsByTag : 1,
+ avgByTag : { $divide:["$viewsByTag", "$docsByTag"] }
+ }}
+]});
+
+// $push as an accumulator; can pivot data
+var g3 = db.runCommand(
+{ aggregate : "article", pipeline : [
+ { $project : {
+ author : 1,
+ tag : { $unwind : "tags" }
+ }},
+ { $group : {
+ _id : { tag : 1 },
+ authors : { $push : "$author" }
+ }}
+]});
+
+// $avg, and averaging in a final projection
+var g4 = db.runCommand(
+{ aggregate : "article", pipeline : [
+ { $project : {
+ author : 1,
+ tag : { $unwind : "tags" },
+ pageViews : 1
+ }},
+ { $group : {
+ _id: { tag : 1 },
+ docsByTag : { $sum : 1 },
+ viewsByTag : { $sum : "$pageViews" },
+ avgByTag : { $avg : "$pageViews" },
+ }}
+]});
diff --git a/src/mongo/db/commands/cloud.cpp b/src/mongo/db/commands/cloud.cpp
new file mode 100644
index 00000000000..8f9d9d2e4b5
--- /dev/null
+++ b/src/mongo/db/commands/cloud.cpp
@@ -0,0 +1,90 @@
+#include "../commands.h"
+#include <map>
+#include "../../util/concurrency/value.h"
+#include "../../util/mongoutils/str.h"
+#include "../../util/net/hostandport.h"
+
+using namespace mongoutils;
+
+namespace mongo {
+
+ mapsf<string,string> dynHostNames;
+ extern DiagStr _hostNameCached;
+
+ string dynHostMyName() {
+ if( !str::startsWith(_hostNameCached, '#') )
+ return "";
+ return _hostNameCached;
+ }
+
+ void dynHostResolve(string& name, int& port) {
+ assert( !name.empty() );
+ assert( !str::contains(name, ':') );
+ assert( str::startsWith(name, '#') );
+ string s = dynHostNames.get(name);
+ if( s.empty() ) {
+ name.clear();
+ return;
+ }
+ assert( !str::startsWith(s, '#') );
+ HostAndPort hp(s);
+ if( hp.hasPort() ) {
+ port = hp.port();
+ log() << "info: dynhost in:" << name << " out:" << hp.toString() << endl;
+ }
+ name = hp.host();
+ }
+
+ /**
+ { cloud:1, nodes: {
+ name : <ip>, ...
+ },
+ me : <mylogicalname>
+ }
+ */
+ class CmdCloud : public Command {
+ public:
+ virtual LockType locktype() const { return NONE; }
+ virtual bool logTheOp() { return false; }
+ virtual bool adminOnly() const { return true; } // very important
+ virtual bool localHostOnlyIfNoAuth(const BSONObj&) { return true; }
+ virtual bool slaveOk() const { return true; }
+ virtual void help( stringstream& help ) const {
+ help << "internal\n";
+ help << "{cloud:1,nodes:...,me:<my_logical_name>}";
+ }
+ CmdCloud() : Command("cloud") {}
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ assert(!fromRepl);
+ BSONObj nodes = cmdObj["nodes"].Obj();
+ map<string,string> ipmap;
+ for( BSONObj::iterator i(nodes); i.more(); ) {
+ BSONElement e = i.next();
+ assert( *e.fieldName() == '#' );
+ ipmap[e.fieldName()] = e.String();
+ }
+
+ string me = cmdObj["me"].String();
+ assert( !me.empty() && me[0] == '#' );
+
+ log(/*1*/) << "CmdCloud" << endl;
+
+ if( me != _hostNameCached.get() ) {
+ log() << "CmdCloud new 'me' value:" << me << endl;
+ _hostNameCached = me;
+ }
+
+ dynHostNames.swap(ipmap);
+ return true;
+ }
+ } cmdCloud;
+
+ BSONObj fromjson(const string &str);
+
+ void cloudCmdLineParamIs(string cmd) {
+ string errmsg;
+ BSONObjBuilder res;
+ BSONObj o = fromjson(cmd);
+ cmdCloud.run("", o, 0, errmsg, res, false);
+ }
+}
diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp
new file mode 100644
index 00000000000..1926e6abddb
--- /dev/null
+++ b/src/mongo/db/commands/distinct.cpp
@@ -0,0 +1,157 @@
+// distinct.cpp
+
+/**
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+//#include "pch.h"
+#include "../commands.h"
+#include "../instance.h"
+#include "../queryoptimizer.h"
+#include "../clientcursor.h"
+#include "../../util/timer.h"
+
+namespace mongo {
+
+ class DistinctCommand : public Command {
+ public:
+ DistinctCommand() : Command("distinct") {}
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return READ; }
+ virtual void help( stringstream &help ) const {
+ help << "{ distinct : 'collection name' , key : 'a.b' , query : {} }";
+ }
+
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ Timer t;
+ string ns = dbname + '.' + cmdObj.firstElement().valuestr();
+
+ string key = cmdObj["key"].valuestrsafe();
+ BSONObj keyPattern = BSON( key << 1 );
+
+ BSONObj query = getQuery( cmdObj );
+
+ int bufSize = BSONObjMaxUserSize - 4096;
+ BufBuilder bb( bufSize );
+ char * start = bb.buf();
+
+ BSONArrayBuilder arr( bb );
+ BSONElementSet values;
+
+ long long nscanned = 0; // locations looked at
+ long long nscannedObjects = 0; // full objects looked at
+ long long n = 0; // matches
+ MatchDetails md;
+
+ NamespaceDetails * d = nsdetails( ns.c_str() );
+
+ if ( ! d ) {
+ result.appendArray( "values" , BSONObj() );
+ result.append( "stats" , BSON( "n" << 0 << "nscanned" << 0 << "nscannedObjects" << 0 ) );
+ return true;
+ }
+
+ shared_ptr<Cursor> cursor;
+ if ( ! query.isEmpty() ) {
+ cursor = NamespaceDetailsTransient::getCursor(ns.c_str() , query , BSONObj() );
+ }
+ else {
+
+ // query is empty, so lets see if we can find an index
+ // with the key so we don't have to hit the raw data
+ NamespaceDetails::IndexIterator ii = d->ii();
+ while ( ii.more() ) {
+ IndexDetails& idx = ii.next();
+
+ if ( d->isMultikey( ii.pos() - 1 ) )
+ continue;
+
+ if ( idx.inKeyPattern( key ) ) {
+ cursor = bestGuessCursor( ns.c_str() , BSONObj() , idx.keyPattern() );
+ if( cursor.get() ) break;
+ }
+
+ }
+
+ if ( ! cursor.get() )
+ cursor = NamespaceDetailsTransient::getCursor(ns.c_str() , query , BSONObj() );
+
+ }
+
+
+ assert( cursor );
+ string cursorName = cursor->toString();
+
+ auto_ptr<ClientCursor> cc (new ClientCursor(QueryOption_NoCursorTimeout, cursor, ns));
+
+ while ( cursor->ok() ) {
+ nscanned++;
+ bool loadedObject = false;
+
+ if ( cursor->currentMatches( &md ) && !cursor->getsetdup( cursor->currLoc() ) ) {
+ n++;
+
+ BSONObj holder;
+ BSONElementSet temp;
+ loadedObject = ! cc->getFieldsDotted( key , temp, holder );
+
+ for ( BSONElementSet::iterator i=temp.begin(); i!=temp.end(); ++i ) {
+ BSONElement e = *i;
+ if ( values.count( e ) )
+ continue;
+
+ int now = bb.len();
+
+ uassert(10044, "distinct too big, 16mb cap", ( now + e.size() + 1024 ) < bufSize );
+
+ arr.append( e );
+ BSONElement x( start + now );
+
+ values.insert( x );
+ }
+ }
+
+ if ( loadedObject || md._loadedObject )
+ nscannedObjects++;
+
+ cursor->advance();
+
+ if (!cc->yieldSometimes( ClientCursor::MaybeCovered )) {
+ cc.release();
+ break;
+ }
+
+ RARELY killCurrentOp.checkForInterrupt();
+ }
+
+ assert( start == bb.buf() );
+
+ result.appendArray( "values" , arr.done() );
+
+ {
+ BSONObjBuilder b;
+ b.appendNumber( "n" , n );
+ b.appendNumber( "nscanned" , nscanned );
+ b.appendNumber( "nscannedObjects" , nscannedObjects );
+ b.appendNumber( "timems" , t.millis() );
+ b.append( "cursor" , cursorName );
+ result.append( "stats" , b.obj() );
+ }
+
+ return true;
+ }
+
+ } distinctCmd;
+
+}
diff --git a/src/mongo/db/commands/document_source_cursor.cpp b/src/mongo/db/commands/document_source_cursor.cpp
new file mode 100755
index 00000000000..49bb9f19d9e
--- /dev/null
+++ b/src/mongo/db/commands/document_source_cursor.cpp
@@ -0,0 +1,100 @@
+/**
+ * Copyright 2011 (c) 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "db/pipeline/document_source.h"
+
+#include "db/cursor.h"
+#include "db/pipeline/document.h"
+
+namespace mongo {
+
+ DocumentSourceCursor::~DocumentSourceCursor() {
+ }
+
+ bool DocumentSourceCursor::eof() {
+ /* if we haven't gotten the first one yet, do so now */
+ if (!pCurrent.get())
+ findNext();
+
+ return (pCurrent.get() == NULL);
+ }
+
+ bool DocumentSourceCursor::advance() {
+ /* if we haven't gotten the first one yet, do so now */
+ if (!pCurrent.get())
+ findNext();
+
+ findNext();
+ return (pCurrent.get() != NULL);
+ }
+
+ intrusive_ptr<Document> DocumentSourceCursor::getCurrent() {
+ /* if we haven't gotten the first one yet, do so now */
+ if (!pCurrent.get())
+ findNext();
+
+ return pCurrent;
+ }
+
+ void DocumentSourceCursor::findNext() {
+ /* standard cursor usage pattern */
+ while(pCursor->ok()) {
+ CoveredIndexMatcher *pCIM; // save intermediate result
+ if ((!(pCIM = pCursor->matcher()) ||
+ pCIM->matchesCurrent(pCursor.get())) &&
+ !pCursor->getsetdup(pCursor->currLoc())) {
+
+ /* grab the matching document */
+ BSONObj documentObj(pCursor->current());
+ pCurrent = Document::createFromBsonObj(&documentObj);
+ pCursor->advance();
+ return;
+ }
+
+ pCursor->advance();
+ }
+
+ /* if we got here, there aren't any more documents */
+ pCurrent.reset();
+ }
+
+ void DocumentSourceCursor::setSource(
+ const intrusive_ptr<DocumentSource> &pSource) {
+ /* this doesn't take a source */
+ assert(false);
+ }
+
+ void DocumentSourceCursor::sourceToBson(BSONObjBuilder *pBuilder) const {
+ /* this has no analog in the BSON world */
+ assert(false);
+ }
+
+ DocumentSourceCursor::DocumentSourceCursor(
+ const shared_ptr<Cursor> &pTheCursor):
+ pCursor(pTheCursor),
+ pCurrent() {
+ }
+
+ intrusive_ptr<DocumentSourceCursor> DocumentSourceCursor::create(
+ const shared_ptr<Cursor> &pCursor) {
+ assert(pCursor.get());
+ intrusive_ptr<DocumentSourceCursor> pSource(
+ new DocumentSourceCursor(pCursor));
+ return pSource;
+ }
+}
diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp
new file mode 100644
index 00000000000..0cf766fcf87
--- /dev/null
+++ b/src/mongo/db/commands/find_and_modify.cpp
@@ -0,0 +1,153 @@
+// find_and_modify.cpp
+
+/**
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../commands.h"
+#include "../instance.h"
+#include "../clientcursor.h"
+
+namespace mongo {
+
+ /* Find and Modify an object returning either the old (default) or new value*/
+ class CmdFindAndModify : public Command {
+ public:
+ virtual void help( stringstream &help ) const {
+ help <<
+ "{ findAndModify: \"collection\", query: {processed:false}, update: {$set: {processed:true}}, new: true}\n"
+ "{ findAndModify: \"collection\", query: {processed:false}, remove: true, sort: {priority:-1}}\n"
+ "Either update or remove is required, all other fields have default values.\n"
+ "Output is in the \"value\" field\n";
+ }
+
+ CmdFindAndModify() : Command("findAndModify", false, "findandmodify") { }
+ virtual bool logTheOp() { return false; } // the modifications will be logged directly
+ virtual bool slaveOk() const { return false; }
+ virtual LockType locktype() const { return WRITE; }
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ static DBDirectClient db;
+
+ string ns = dbname + '.' + cmdObj.firstElement().valuestr();
+
+ BSONObj origQuery = cmdObj.getObjectField("query"); // defaults to {}
+ Query q (origQuery);
+ BSONElement sort = cmdObj["sort"];
+ if (!sort.eoo())
+ q.sort(sort.embeddedObjectUserCheck());
+
+ bool upsert = cmdObj["upsert"].trueValue();
+
+ BSONObj fieldsHolder (cmdObj.getObjectField("fields"));
+ const BSONObj* fields = (fieldsHolder.isEmpty() ? NULL : &fieldsHolder);
+
+ Projection projection;
+ if (fields) {
+ projection.init(fieldsHolder);
+ if (!projection.includeID())
+ fields = NULL; // do projection in post-processing
+ }
+
+ BSONObj out = db.findOne(ns, q, fields);
+ if (out.isEmpty()) {
+ if (!upsert) {
+ result.appendNull("value");
+ return true;
+ }
+
+ BSONElement update = cmdObj["update"];
+ uassert(13329, "upsert mode requires update field", !update.eoo());
+ uassert(13330, "upsert mode requires query field", !origQuery.isEmpty());
+ db.update(ns, origQuery, update.embeddedObjectUserCheck(), true);
+
+ BSONObj gle = db.getLastErrorDetailed();
+ result.append("lastErrorObject", gle);
+ if (gle["err"].type() == String) {
+ errmsg = gle["err"].String();
+ return false;
+ }
+
+ if (cmdObj["new"].trueValue()) {
+ BSONElement _id = gle["upserted"];
+ if (_id.eoo())
+ _id = origQuery["_id"];
+
+ out = db.findOne(ns, QUERY("_id" << _id), fields);
+ }
+
+ }
+ else {
+
+ if (cmdObj["remove"].trueValue()) {
+ uassert(12515, "can't remove and update", cmdObj["update"].eoo());
+ db.remove(ns, QUERY("_id" << out["_id"]), 1);
+
+ BSONObj gle = db.getLastErrorDetailed();
+ result.append("lastErrorObject", gle);
+ if (gle["err"].type() == String) {
+ errmsg = gle["err"].String();
+ return false;
+ }
+
+ }
+ else { // update
+
+ BSONElement queryId = origQuery["_id"];
+ if (queryId.eoo() || getGtLtOp(queryId) != BSONObj::Equality) {
+ // need to include original query for $ positional operator
+
+ BSONObjBuilder b;
+ b.append(out["_id"]);
+ BSONObjIterator it(origQuery);
+ while (it.more()) {
+ BSONElement e = it.next();
+ if (strcmp(e.fieldName(), "_id"))
+ b.append(e);
+ }
+ q = Query(b.obj());
+ }
+
+ if (q.isComplex()) // update doesn't work with complex queries
+ q = Query(q.getFilter().getOwned());
+
+ BSONElement update = cmdObj["update"];
+ uassert(12516, "must specify remove or update", !update.eoo());
+ db.update(ns, q, update.embeddedObjectUserCheck());
+
+ BSONObj gle = db.getLastErrorDetailed();
+ result.append("lastErrorObject", gle);
+ if (gle["err"].type() == String) {
+ errmsg = gle["err"].String();
+ return false;
+ }
+
+ if (cmdObj["new"].trueValue())
+ out = db.findOne(ns, QUERY("_id" << out["_id"]), fields);
+ }
+ }
+
+ if (!fieldsHolder.isEmpty() && !fields){
+ // we need to run projection but haven't yet
+ out = projection.transform(out);
+ }
+
+ result.append("value", out);
+
+ return true;
+ }
+ } cmdFindAndModify;
+
+
+}
diff --git a/src/mongo/db/commands/group.cpp b/src/mongo/db/commands/group.cpp
new file mode 100644
index 00000000000..69fee587a47
--- /dev/null
+++ b/src/mongo/db/commands/group.cpp
@@ -0,0 +1,224 @@
+// group.cpp
+
+/**
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../commands.h"
+#include "../instance.h"
+#include "../queryoptimizer.h"
+#include "../../scripting/engine.h"
+#include "../clientcursor.h"
+
+namespace mongo {
+
+ class GroupCommand : public Command {
+ public:
+ GroupCommand() : Command("group") {}
+ virtual LockType locktype() const { return READ; }
+ virtual bool slaveOk() const { return false; }
+ virtual bool slaveOverrideOk() { return true; }
+ virtual void help( stringstream &help ) const {
+ help << "http://www.mongodb.org/display/DOCS/Aggregation";
+ }
+
+ BSONObj getKey( const BSONObj& obj , const BSONObj& keyPattern , ScriptingFunction func , double avgSize , Scope * s ) {
+ if ( func ) {
+ BSONObjBuilder b( obj.objsize() + 32 );
+ b.append( "0" , obj );
+ const BSONObj& key = b.obj();
+ int res = s->invoke( func , &key, 0 );
+ uassert( 10041 , (string)"invoke failed in $keyf: " + s->getError() , res == 0 );
+ int type = s->type("return");
+ uassert( 10042 , "return of $key has to be an object" , type == Object );
+ return s->getObject( "return" );
+ }
+ return obj.extractFields( keyPattern , true ).getOwned();
+ }
+
+ bool group( string realdbname , const string& ns , const BSONObj& query ,
+ BSONObj keyPattern , string keyFunctionCode , string reduceCode , const char * reduceScope ,
+ BSONObj initial , string finalize ,
+ string& errmsg , BSONObjBuilder& result ) {
+
+
+ auto_ptr<Scope> s = globalScriptEngine->getPooledScope( realdbname );
+ s->localConnect( realdbname.c_str() );
+
+ if ( reduceScope )
+ s->init( reduceScope );
+
+ s->setObject( "$initial" , initial , true );
+
+ s->exec( "$reduce = " + reduceCode , "reduce setup" , false , true , true , 100 );
+ s->exec( "$arr = [];" , "reduce setup 2" , false , true , true , 100 );
+ ScriptingFunction f = s->createFunction(
+ "function(){ "
+ " if ( $arr[n] == null ){ "
+ " next = {}; "
+ " Object.extend( next , $key ); "
+ " Object.extend( next , $initial , true ); "
+ " $arr[n] = next; "
+ " next = null; "
+ " } "
+ " $reduce( obj , $arr[n] ); "
+ "}" );
+
+ ScriptingFunction keyFunction = 0;
+ if ( keyFunctionCode.size() ) {
+ keyFunction = s->createFunction( keyFunctionCode.c_str() );
+ }
+
+
+ double keysize = keyPattern.objsize() * 3;
+ double keynum = 1;
+
+ map<BSONObj,int,BSONObjCmp> map;
+ list<BSONObj> blah;
+
+ shared_ptr<Cursor> cursor = NamespaceDetailsTransient::getCursor(ns.c_str() , query);
+ ClientCursor::CleanupPointer ccPointer;
+ ccPointer.reset( new ClientCursor( QueryOption_NoCursorTimeout, cursor, ns ) );
+
+ while ( cursor->ok() ) {
+
+ if ( !ccPointer->yieldSometimes( ClientCursor::MaybeCovered ) ||
+ !cursor->ok() ) {
+ break;
+ }
+
+ if ( !cursor->currentMatches() || cursor->getsetdup( cursor->currLoc() ) ) {
+ cursor->advance();
+ continue;
+ }
+
+ if ( !ccPointer->yieldSometimes( ClientCursor::WillNeed ) ||
+ !cursor->ok() ) {
+ break;
+ }
+
+ BSONObj obj = cursor->current();
+ cursor->advance();
+
+ BSONObj key = getKey( obj , keyPattern , keyFunction , keysize / keynum , s.get() );
+ keysize += key.objsize();
+ keynum++;
+
+ int& n = map[key];
+ if ( n == 0 ) {
+ n = map.size();
+ s->setObject( "$key" , key , true );
+
+ uassert( 10043 , "group() can't handle more than 20000 unique keys" , n <= 20000 );
+ }
+
+ s->setObject( "obj" , obj , true );
+ s->setNumber( "n" , n - 1 );
+ if ( s->invoke( f , 0, 0 , 0 , true ) ) {
+ throw UserException( 9010 , (string)"reduce invoke failed: " + s->getError() );
+ }
+ }
+ ccPointer.reset();
+
+ if (!finalize.empty()) {
+ s->exec( "$finalize = " + finalize , "finalize define" , false , true , true , 100 );
+ ScriptingFunction g = s->createFunction(
+ "function(){ "
+ " for(var i=0; i < $arr.length; i++){ "
+ " var ret = $finalize($arr[i]); "
+ " if (ret !== undefined) "
+ " $arr[i] = ret; "
+ " } "
+ "}" );
+ s->invoke( g , 0, 0 , 0 , true );
+ }
+
+ result.appendArray( "retval" , s->getObject( "$arr" ) );
+ result.append( "count" , keynum - 1 );
+ result.append( "keys" , (int)(map.size()) );
+ s->exec( "$arr = [];" , "reduce setup 2" , false , true , true , 100 );
+ s->gc();
+
+ return true;
+ }
+
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+
+ if ( !globalScriptEngine ) {
+ errmsg = "server-side JavaScript execution is disabled";
+ return false;
+ }
+
+ /* db.$cmd.findOne( { group : <p> } ) */
+ const BSONObj& p = jsobj.firstElement().embeddedObjectUserCheck();
+
+ BSONObj q;
+ if ( p["cond"].type() == Object )
+ q = p["cond"].embeddedObject();
+ else if ( p["condition"].type() == Object )
+ q = p["condition"].embeddedObject();
+ else
+ q = getQuery( p );
+
+ if ( p["ns"].type() != String ) {
+ errmsg = "ns has to be set";
+ return false;
+ }
+
+ string ns = dbname + "." + p["ns"].String();
+
+ BSONObj key;
+ string keyf;
+ if ( p["key"].type() == Object ) {
+ key = p["key"].embeddedObjectUserCheck();
+ if ( ! p["$keyf"].eoo() ) {
+ errmsg = "can't have key and $keyf";
+ return false;
+ }
+ }
+ else if ( p["$keyf"].type() ) {
+ keyf = p["$keyf"]._asCode();
+ }
+ else {
+ // no key specified, will use entire object as key
+ }
+
+ BSONElement reduce = p["$reduce"];
+ if ( reduce.eoo() ) {
+ errmsg = "$reduce has to be set";
+ return false;
+ }
+
+ BSONElement initial = p["initial"];
+ if ( initial.type() != Object ) {
+ errmsg = "initial has to be an object";
+ return false;
+ }
+
+
+ string finalize;
+ if (p["finalize"].type())
+ finalize = p["finalize"]._asCode();
+
+ return group( dbname , ns , q ,
+ key , keyf , reduce._asCode() , reduce.type() != CodeWScope ? 0 : reduce.codeWScopeScopeData() ,
+ initial.embeddedObject() , finalize ,
+ errmsg , result );
+ }
+
+ } cmdGroup;
+
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/isself.cpp b/src/mongo/db/commands/isself.cpp
new file mode 100644
index 00000000000..ebf6d5bceec
--- /dev/null
+++ b/src/mongo/db/commands/isself.cpp
@@ -0,0 +1,246 @@
+// isself.cpp
+
+#include "pch.h"
+#include "../../util/net/listen.h"
+#include "../commands.h"
+#include "../../client/dbclient.h"
+#include "../security.h"
+
+#include <boost/algorithm/string.hpp>
+
+#ifndef _WIN32
+# ifndef __sunos__
+# include <ifaddrs.h>
+# endif
+# include <sys/resource.h>
+# include <sys/stat.h>
+
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include <errno.h>
+#include <netdb.h>
+#ifdef __openbsd__
+# include <sys/uio.h>
+#endif
+
+#endif
+
+
+namespace mongo {
+
+#if !defined(_WIN32) && !defined(__sunos__)
+
+ vector<string> getMyAddrs() {
+ vector<string> out;
+ ifaddrs * addrs;
+
+ if ( ! cmdLine.bind_ip.empty() ) {
+ boost::split( out, cmdLine.bind_ip, boost::is_any_of( ", " ) );
+ return out;
+ }
+
+ int status = getifaddrs(&addrs);
+ massert(13469, "getifaddrs failure: " + errnoWithDescription(errno), status == 0);
+
+ // based on example code from linux getifaddrs manpage
+ for (ifaddrs * addr = addrs; addr != NULL; addr = addr->ifa_next) {
+ if ( addr->ifa_addr == NULL ) continue;
+ int family = addr->ifa_addr->sa_family;
+ char host[NI_MAXHOST];
+
+ if (family == AF_INET || family == AF_INET6) {
+ status = getnameinfo(addr->ifa_addr,
+ (family == AF_INET ? sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6)),
+ host, NI_MAXHOST, NULL, 0, NI_NUMERICHOST);
+ if ( status != 0 ) {
+ freeifaddrs( addrs );
+ addrs = NULL;
+ msgasserted( 13470, string("getnameinfo() failed: ") + gai_strerror(status) );
+ }
+
+ out.push_back(host);
+ }
+
+ }
+
+ freeifaddrs( addrs );
+ addrs = NULL;
+
+ if (logLevel >= 1) {
+ log(1) << "getMyAddrs():";
+ for (vector<string>::const_iterator it=out.begin(), end=out.end(); it!=end; ++it) {
+ log(1) << " [" << *it << ']';
+ }
+ log(1) << endl;
+ }
+
+ return out;
+ }
+
+ vector<string> getAllIPs(StringData iporhost) {
+ addrinfo* addrs = NULL;
+ addrinfo hints;
+ memset(&hints, 0, sizeof(addrinfo));
+ hints.ai_socktype = SOCK_STREAM;
+ hints.ai_family = (IPv6Enabled() ? AF_UNSPEC : AF_INET);
+
+ static string portNum = BSONObjBuilder::numStr(cmdLine.port);
+
+ vector<string> out;
+
+ int ret = getaddrinfo(iporhost.data(), portNum.c_str(), &hints, &addrs);
+ if ( ret ) {
+ warning() << "getaddrinfo(\"" << iporhost.data() << "\") failed: " << gai_strerror(ret) << endl;
+ return out;
+ }
+
+ for (addrinfo* addr = addrs; addr != NULL; addr = addr->ai_next) {
+ int family = addr->ai_family;
+ char host[NI_MAXHOST];
+
+ if (family == AF_INET || family == AF_INET6) {
+ int status = getnameinfo(addr->ai_addr, addr->ai_addrlen, host, NI_MAXHOST, NULL, 0, NI_NUMERICHOST);
+
+ massert(13472, string("getnameinfo() failed: ") + gai_strerror(status), status == 0);
+
+ out.push_back(host);
+ }
+
+ }
+
+ freeaddrinfo(addrs);
+
+ if (logLevel >= 1) {
+ log(1) << "getallIPs(\"" << iporhost << "\"):";
+ for (vector<string>::const_iterator it=out.begin(), end=out.end(); it!=end; ++it) {
+ log(1) << " [" << *it << ']';
+ }
+ log(1) << endl;
+ }
+
+ return out;
+ }
+#endif
+
+
+ class IsSelfCommand : public Command {
+ public:
+ IsSelfCommand() : Command("_isSelf") , _cacheLock( "IsSelfCommand::_cacheLock" ) {}
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream &help ) const {
+ help << "{ _isSelf : 1 } INTERNAL ONLY";
+ }
+
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ init();
+ result.append( "id" , _id );
+ return true;
+ }
+
+ void init() {
+ scoped_lock lk( _cacheLock );
+ if ( ! _id.isSet() )
+ _id.init();
+ }
+
+ OID _id;
+
+ mongo::mutex _cacheLock;
+ map<string,bool> _cache;
+ } isSelfCommand;
+
+ bool HostAndPort::isSelf() const {
+
+ if( dyn() ) {
+ LOG(2) << "isSelf " << _dynName << ' ' << dynHostMyName() << endl;
+ return dynHostMyName() == _dynName;
+ }
+
+ int _p = port();
+ int p = _p == -1 ? CmdLine::DefaultDBPort : _p;
+
+ if( p != cmdLine.port ) {
+ // shortcut - ports have to match at the very least
+ return false;
+ }
+
+ string host = str::stream() << this->host() << ":" << p;
+
+ {
+ // check cache for this host
+ // debatably something _could_ change, but I'm not sure right now (erh 10/14/2010)
+ scoped_lock lk( isSelfCommand._cacheLock );
+ map<string,bool>::const_iterator i = isSelfCommand._cache.find( host );
+ if ( i != isSelfCommand._cache.end() )
+ return i->second;
+ }
+
+#if !defined(_WIN32) && !defined(__sunos__)
+ // on linux and os x we can do a quick check for an ip match
+
+ const vector<string> myaddrs = getMyAddrs();
+ const vector<string> addrs = getAllIPs(_host);
+
+ for (vector<string>::const_iterator i=myaddrs.begin(), iend=myaddrs.end(); i!=iend; ++i) {
+ for (vector<string>::const_iterator j=addrs.begin(), jend=addrs.end(); j!=jend; ++j) {
+ string a = *i;
+ string b = *j;
+
+ if ( a == b ||
+ ( str::startsWith( a , "127." ) && str::startsWith( b , "127." ) ) // 127. is all loopback
+ ) {
+
+ // add to cache
+ scoped_lock lk( isSelfCommand._cacheLock );
+ isSelfCommand._cache[host] = true;
+ return true;
+ }
+ }
+ }
+
+#endif
+
+ if ( ! Listener::getTimeTracker() ) {
+ // this ensures we are actually running a server
+ // this may return true later, so may want to retry
+ return false;
+ }
+
+ try {
+ isSelfCommand.init();
+ DBClientConnection conn;
+ string errmsg;
+ if ( ! conn.connect( host , errmsg ) ) {
+ // should this go in the cache?
+ return false;
+ }
+
+ if (!noauth && cmdLine.keyFile &&
+ !conn.auth("local", internalSecurity.user, internalSecurity.pwd, errmsg, false)) {
+ return false;
+ }
+
+ BSONObj out;
+ bool ok = conn.simpleCommand( "admin" , &out , "_isSelf" );
+ bool me = ok && out["id"].type() == jstOID && isSelfCommand._id == out["id"].OID();
+
+ // add to cache
+ scoped_lock lk( isSelfCommand._cacheLock );
+ isSelfCommand._cache[host] = me;
+
+ return me;
+ }
+ catch ( std::exception& e ) {
+ warning() << "could't check isSelf (" << host << ") " << e.what() << endl;
+ }
+
+ return false;
+ }
+
+}
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
new file mode 100644
index 00000000000..add76c39c47
--- /dev/null
+++ b/src/mongo/db/commands/mr.cpp
@@ -0,0 +1,1317 @@
+// mr.cpp
+
+/**
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db.h"
+#include "../instance.h"
+#include "../commands.h"
+#include "../../scripting/engine.h"
+#include "../../client/dbclient.h"
+#include "../../client/connpool.h"
+#include "../../client/parallel.h"
+#include "../queryoptimizer.h"
+#include "../matcher.h"
+#include "../clientcursor.h"
+#include "../replutil.h"
+#include "../../s/d_chunk_manager.h"
+#include "../../s/d_logic.h"
+#include "../../s/grid.h"
+
+#include "mr.h"
+
+namespace mongo {
+
+ namespace mr {
+
+ AtomicUInt Config::JOB_NUMBER;
+
+ JSFunction::JSFunction( string type , const BSONElement& e ) {
+ _type = type;
+ _code = e._asCode();
+
+ if ( e.type() == CodeWScope )
+ _wantedScope = e.codeWScopeObject();
+ }
+
+ void JSFunction::init( State * state ) {
+ _scope = state->scope();
+ assert( _scope );
+ _scope->init( &_wantedScope );
+
+ _func = _scope->createFunction( _code.c_str() );
+ uassert( 13598 , str::stream() << "couldn't compile code for: " << _type , _func );
+
+ // install in JS scope so that it can be called in JS mode
+ _scope->setFunction(_type.c_str(), _code.c_str());
+ }
+
+ void JSMapper::init( State * state ) {
+ _func.init( state );
+ _params = state->config().mapParams;
+ }
+
+ /**
+ * Applies the map function to an object, which should internally call emit()
+ */
+ void JSMapper::map( const BSONObj& o ) {
+ Scope * s = _func.scope();
+ assert( s );
+ if ( s->invoke( _func.func() , &_params, &o , 0 , true, false, true ) )
+ throw UserException( 9014, str::stream() << "map invoke failed: " + s->getError() );
+ }
+
+ /**
+ * Applies the finalize function to a tuple obj (key, val)
+ * Returns tuple obj {_id: key, value: newval}
+ */
+ BSONObj JSFinalizer::finalize( const BSONObj& o ) {
+ Scope * s = _func.scope();
+
+ Scope::NoDBAccess no = s->disableDBAccess( "can't access db inside finalize" );
+ s->invokeSafe( _func.func() , &o, 0 );
+
+ // don't want to use o.objsize() to size b
+ // since there are many cases where the point of finalize
+ // is converting many fields to 1
+ BSONObjBuilder b;
+ b.append( o.firstElement() );
+ s->append( b , "value" , "return" );
+ return b.obj();
+ }
+
+ void JSReducer::init( State * state ) {
+ _func.init( state );
+ }
+
+ /**
+ * Reduces a list of tuple objects (key, value) to a single tuple {"0": key, "1": value}
+ */
+ BSONObj JSReducer::reduce( const BSONList& tuples ) {
+ if (tuples.size() <= 1)
+ return tuples[0];
+ BSONObj key;
+ int endSizeEstimate = 16;
+ _reduce( tuples , key , endSizeEstimate );
+
+ BSONObjBuilder b(endSizeEstimate);
+ b.appendAs( key.firstElement() , "0" );
+ _func.scope()->append( b , "1" , "return" );
+ return b.obj();
+ }
+
+ /**
+ * Reduces a list of tuple object (key, value) to a single tuple {_id: key, value: val}
+ * Also applies a finalizer method if present.
+ */
+ BSONObj JSReducer::finalReduce( const BSONList& tuples , Finalizer * finalizer ) {
+
+ BSONObj res;
+ BSONObj key;
+
+ if (tuples.size() == 1) {
+ // 1 obj, just use it
+ key = tuples[0];
+ BSONObjBuilder b(key.objsize());
+ BSONObjIterator it(key);
+ b.appendAs( it.next() , "_id" );
+ b.appendAs( it.next() , "value" );
+ res = b.obj();
+ }
+ else {
+ // need to reduce
+ int endSizeEstimate = 16;
+ _reduce( tuples , key , endSizeEstimate );
+ BSONObjBuilder b(endSizeEstimate);
+ b.appendAs( key.firstElement() , "_id" );
+ _func.scope()->append( b , "value" , "return" );
+ res = b.obj();
+ }
+
+ if ( finalizer ) {
+ res = finalizer->finalize( res );
+ }
+
+ return res;
+ }
+
+ /**
+ * actually applies a reduce, to a list of tuples (key, value).
+ * After the call, tuples will hold a single tuple {"0": key, "1": value}
+ */
+ void JSReducer::_reduce( const BSONList& tuples , BSONObj& key , int& endSizeEstimate ) {
+ uassert( 10074 , "need values" , tuples.size() );
+
+ int sizeEstimate = ( tuples.size() * tuples.begin()->getField( "value" ).size() ) + 128;
+
+ // need to build the reduce args: ( key, [values] )
+ BSONObjBuilder reduceArgs( sizeEstimate );
+ boost::scoped_ptr<BSONArrayBuilder> valueBuilder;
+ int sizeSoFar = 0;
+ unsigned n = 0;
+ for ( ; n<tuples.size(); n++ ) {
+ BSONObjIterator j(tuples[n]);
+ BSONElement keyE = j.next();
+ if ( n == 0 ) {
+ reduceArgs.append( keyE );
+ key = keyE.wrap();
+ sizeSoFar = 5 + keyE.size();
+ valueBuilder.reset(new BSONArrayBuilder( reduceArgs.subarrayStart( "tuples" ) ));
+ }
+
+ BSONElement ee = j.next();
+
+ uassert( 13070 , "value too large to reduce" , ee.size() < ( BSONObjMaxUserSize / 2 ) );
+
+ if ( sizeSoFar + ee.size() > BSONObjMaxUserSize ) {
+ assert( n > 1 ); // if not, inf. loop
+ break;
+ }
+
+ valueBuilder->append( ee );
+ sizeSoFar += ee.size();
+ }
+ assert(valueBuilder);
+ valueBuilder->done();
+ BSONObj args = reduceArgs.obj();
+
+ Scope * s = _func.scope();
+
+ s->invokeSafe( _func.func() , &args, 0, 0, false, true, true );
+ ++numReduces;
+
+ if ( s->type( "return" ) == Array ) {
+ uasserted( 10075 , "reduce -> multiple not supported yet");
+ return;
+ }
+
+ endSizeEstimate = key.objsize() + ( args.objsize() / tuples.size() );
+
+ if ( n == tuples.size() )
+ return;
+
+ // the input list was too large, add the rest of elmts to new tuples and reduce again
+ // note: would be better to use loop instead of recursion to avoid stack overflow
+ BSONList x;
+ for ( ; n < tuples.size(); n++ ) {
+ x.push_back( tuples[n] );
+ }
+ BSONObjBuilder temp( endSizeEstimate );
+ temp.append( key.firstElement() );
+ s->append( temp , "1" , "return" );
+ x.push_back( temp.obj() );
+ _reduce( x , key , endSizeEstimate );
+ }
+
+ Config::Config( const string& _dbname , const BSONObj& cmdObj ) {
+
+ dbname = _dbname;
+ ns = dbname + "." + cmdObj.firstElement().valuestr();
+
+ verbose = cmdObj["verbose"].trueValue();
+ jsMode = cmdObj["jsMode"].trueValue();
+ splitInfo = 0;
+ if (cmdObj.hasField("splitInfo"))
+ splitInfo = cmdObj["splitInfo"].Int();
+
+ jsMaxKeys = 500000;
+ reduceTriggerRatio = 10.0;
+ maxInMemSize = 500 * 1024;
+
+ uassert( 13602 , "outType is no longer a valid option" , cmdObj["outType"].eoo() );
+
+ if ( cmdObj["out"].type() == String ) {
+ finalShort = cmdObj["out"].String();
+ outType = REPLACE;
+ }
+ else if ( cmdObj["out"].type() == Object ) {
+ BSONObj o = cmdObj["out"].embeddedObject();
+
+ BSONElement e = o.firstElement();
+ string t = e.fieldName();
+
+ if ( t == "normal" || t == "replace" ) {
+ outType = REPLACE;
+ finalShort = e.String();
+ }
+ else if ( t == "merge" ) {
+ outType = MERGE;
+ finalShort = e.String();
+ }
+ else if ( t == "reduce" ) {
+ outType = REDUCE;
+ finalShort = e.String();
+ }
+ else if ( t == "inline" ) {
+ outType = INMEMORY;
+ }
+ else {
+ uasserted( 13522 , str::stream() << "unknown out specifier [" << t << "]" );
+ }
+
+ if (o.hasElement("db")) {
+ outDB = o["db"].String();
+ }
+
+ if (o.hasElement("nonAtomic")) {
+ outNonAtomic = o["nonAtomic"].Bool();
+ if (outNonAtomic)
+ uassert( 15895 , "nonAtomic option cannot be used with this output type", (outType == REDUCE || outType == MERGE) );
+ }
+ }
+ else {
+ uasserted( 13606 , "'out' has to be a string or an object" );
+ }
+
+ if ( outType != INMEMORY ) { // setup names
+ tempLong = str::stream() << (outDB.empty() ? dbname : outDB) << ".tmp.mr." << cmdObj.firstElement().String() << "_" << JOB_NUMBER++;
+
+ incLong = tempLong + "_inc";
+
+ finalLong = str::stream() << (outDB.empty() ? dbname : outDB) << "." << finalShort;
+ }
+
+ {
+ // scope and code
+
+ if ( cmdObj["scope"].type() == Object )
+ scopeSetup = cmdObj["scope"].embeddedObjectUserCheck();
+
+ mapper.reset( new JSMapper( cmdObj["map"] ) );
+ reducer.reset( new JSReducer( cmdObj["reduce"] ) );
+ if ( cmdObj["finalize"].type() && cmdObj["finalize"].trueValue() )
+ finalizer.reset( new JSFinalizer( cmdObj["finalize"] ) );
+
+ if ( cmdObj["mapparams"].type() == Array ) {
+ mapParams = cmdObj["mapparams"].embeddedObjectUserCheck();
+ }
+
+ }
+
+ {
+ // query options
+ BSONElement q = cmdObj["query"];
+ if ( q.type() == Object )
+ filter = q.embeddedObjectUserCheck();
+ else
+ uassert( 13608 , "query has to be blank or an Object" , ! q.trueValue() );
+
+
+ BSONElement s = cmdObj["sort"];
+ if ( s.type() == Object )
+ sort = s.embeddedObjectUserCheck();
+ else
+ uassert( 13609 , "sort has to be blank or an Object" , ! s.trueValue() );
+
+ if ( cmdObj["limit"].isNumber() )
+ limit = cmdObj["limit"].numberLong();
+ else
+ limit = 0;
+ }
+ }
+
+ /**
+ * Create temporary collection, set up indexes
+ */
+ void State::prepTempCollection() {
+ if ( ! _onDisk )
+ return;
+
+ if (_config.incLong != _config.tempLong) {
+ // create the inc collection and make sure we have index on "0" key
+ _db.dropCollection( _config.incLong );
+ {
+ writelock l( _config.incLong );
+ Client::Context ctx( _config.incLong );
+ string err;
+ if ( ! userCreateNS( _config.incLong.c_str() , BSON( "autoIndexId" << 0 ) , err , false ) ) {
+ uasserted( 13631 , str::stream() << "userCreateNS failed for mr incLong ns: " << _config.incLong << " err: " << err );
+ }
+ }
+
+ BSONObj sortKey = BSON( "0" << 1 );
+ _db.ensureIndex( _config.incLong , sortKey );
+ }
+
+ // create temp collection
+ _db.dropCollection( _config.tempLong );
+ {
+ writelock lock( _config.tempLong.c_str() );
+ Client::Context ctx( _config.tempLong.c_str() );
+ string errmsg;
+ if ( ! userCreateNS( _config.tempLong.c_str() , BSONObj() , errmsg , true ) ) {
+ uasserted( 13630 , str::stream() << "userCreateNS failed for mr tempLong ns: " << _config.tempLong << " err: " << errmsg );
+ }
+ }
+
+ {
+ // copy indexes
+ auto_ptr<DBClientCursor> idx = _db.getIndexes( _config.finalLong );
+ while ( idx->more() ) {
+ BSONObj i = idx->next();
+
+ BSONObjBuilder b( i.objsize() + 16 );
+ b.append( "ns" , _config.tempLong );
+ BSONObjIterator j( i );
+ while ( j.more() ) {
+ BSONElement e = j.next();
+ if ( str::equals( e.fieldName() , "_id" ) ||
+ str::equals( e.fieldName() , "ns" ) )
+ continue;
+
+ b.append( e );
+ }
+
+ BSONObj indexToInsert = b.obj();
+ insert( Namespace( _config.tempLong.c_str() ).getSisterNS( "system.indexes" ).c_str() , indexToInsert );
+ }
+
+ }
+
+ }
+
+ /**
+ * For inline mode, appends results to output object.
+ * Makes sure (key, value) tuple is formatted as {_id: key, value: val}
+ */
+ void State::appendResults( BSONObjBuilder& final ) {
+ if ( _onDisk ) {
+ if (!_config.outDB.empty()) {
+ BSONObjBuilder loc;
+ if ( !_config.outDB.empty())
+ loc.append( "db" , _config.outDB );
+ if ( !_config.finalShort.empty() )
+ loc.append( "collection" , _config.finalShort );
+ final.append("result", loc.obj());
+ }
+ else {
+ if ( !_config.finalShort.empty() )
+ final.append( "result" , _config.finalShort );
+ }
+
+ if ( _config.splitInfo > 0 ) {
+ // add split points, used for shard
+ BSONObj res;
+ BSONObj idKey = BSON( "_id" << 1 );
+ if ( ! _db.runCommand( "admin" , BSON( "splitVector" << _config.finalLong << "keyPattern" << idKey << "maxChunkSizeBytes" << _config.splitInfo ) , res ) ) {
+ uasserted( 15921 , str::stream() << "splitVector failed: " << res );
+ }
+ if ( res.hasField( "splitKeys" ) )
+ final.append( res.getField( "splitKeys" ) );
+ }
+ return;
+ }
+
+ if (_jsMode) {
+ ScriptingFunction getResult = _scope->createFunction("var map = _mrMap; var result = []; for (key in map) { result.push({_id: key, value: map[key]}) } return result;");
+ _scope->invoke(getResult, 0, 0, 0, false);
+ BSONObj obj = _scope->getObject("return");
+ final.append("results", BSONArray(obj));
+ return;
+ }
+
+ uassert( 13604 , "too much data for in memory map/reduce" , _size < BSONObjMaxUserSize );
+
+ BSONArrayBuilder b( (int)(_size * 1.2) ); // _size is data size, doesn't count overhead and keys
+
+ for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ) {
+ BSONObj key = i->first;
+ BSONList& all = i->second;
+
+ assert( all.size() == 1 );
+
+ BSONObjIterator vi( all[0] );
+ vi.next();
+
+ BSONObjBuilder temp( b.subobjStart() );
+ temp.appendAs( key.firstElement() , "_id" );
+ temp.appendAs( vi.next() , "value" );
+ temp.done();
+ }
+
+ BSONArray res = b.arr();
+ final.append( "results" , res );
+ }
+
+ /**
+ * Does post processing on output collection.
+ * This may involve replacing, merging or reducing.
+ */
+ long long State::postProcessCollection(CurOp* op, ProgressMeterHolder& pm) {
+ if ( _onDisk == false || _config.outType == Config::INMEMORY )
+ return _temp->size();
+
+ if (_config.outNonAtomic)
+ return postProcessCollectionNonAtomic(op, pm);
+ writelock lock;
+ return postProcessCollectionNonAtomic(op, pm);
+ }
+
+ long long State::postProcessCollectionNonAtomic(CurOp* op, ProgressMeterHolder& pm) {
+
+ if ( _config.finalLong == _config.tempLong )
+ return _db.count( _config.finalLong );
+
+ if ( _config.outType == Config::REPLACE || _db.count( _config.finalLong ) == 0 ) {
+ writelock lock;
+ // replace: just rename from temp to final collection name, dropping previous collection
+ _db.dropCollection( _config.finalLong );
+ BSONObj info;
+ if ( ! _db.runCommand( "admin" , BSON( "renameCollection" << _config.tempLong << "to" << _config.finalLong ) , info ) ) {
+ uasserted( 10076 , str::stream() << "rename failed: " << info );
+ }
+
+ _db.dropCollection( _config.tempLong );
+ }
+ else if ( _config.outType == Config::MERGE ) {
+ // merge: upsert new docs into old collection
+ op->setMessage( "m/r: merge post processing" , _db.count( _config.tempLong, BSONObj() ) );
+ auto_ptr<DBClientCursor> cursor = _db.query( _config.tempLong , BSONObj() );
+ while ( cursor->more() ) {
+ writelock lock;
+ BSONObj o = cursor->next();
+ Helpers::upsert( _config.finalLong , o );
+ getDur().commitIfNeeded();
+ pm.hit();
+ }
+ _db.dropCollection( _config.tempLong );
+ pm.finished();
+ }
+ else if ( _config.outType == Config::REDUCE ) {
+ // reduce: apply reduce op on new result and existing one
+ BSONList values;
+
+ op->setMessage( "m/r: reduce post processing" , _db.count( _config.tempLong, BSONObj() ) );
+ auto_ptr<DBClientCursor> cursor = _db.query( _config.tempLong , BSONObj() );
+ while ( cursor->more() ) {
+ writelock lock;
+ BSONObj temp = cursor->next();
+ BSONObj old;
+
+ bool found;
+ {
+ Client::Context tx( _config.finalLong );
+ found = Helpers::findOne( _config.finalLong.c_str() , temp["_id"].wrap() , old , true );
+ }
+
+ if ( found ) {
+ // need to reduce
+ values.clear();
+ values.push_back( temp );
+ values.push_back( old );
+ Helpers::upsert( _config.finalLong , _config.reducer->finalReduce( values , _config.finalizer.get() ) );
+ }
+ else {
+ Helpers::upsert( _config.finalLong , temp );
+ }
+ getDur().commitIfNeeded();
+ pm.hit();
+ }
+ _db.dropCollection( _config.tempLong );
+ pm.finished();
+ }
+
+ return _db.count( _config.finalLong );
+ }
+
+ /**
+ * Insert doc in collection
+ */
+ void State::insert( const string& ns , const BSONObj& o ) {
+ assert( _onDisk );
+
+ writelock l( ns );
+ Client::Context ctx( ns );
+
+ theDataFileMgr.insertAndLog( ns.c_str() , o , false );
+ }
+
+ /**
+ * Insert doc into the inc collection, taking proper lock
+ */
+ void State::insertToInc( BSONObj& o ) {
+ writelock l(_config.incLong);
+ Client::Context ctx(_config.incLong);
+ _insertToInc(o);
+ }
+
+ /**
+ * Insert doc into the inc collection
+ */
+ void State::_insertToInc( BSONObj& o ) {
+ assert( _onDisk );
+ theDataFileMgr.insertWithObjMod( _config.incLong.c_str() , o , true );
+ getDur().commitIfNeeded();
+ }
+
+ State::State( const Config& c ) : _config( c ), _size(0), _dupCount(0), _numEmits(0) {
+ _temp.reset( new InMemory() );
+ _onDisk = _config.outType != Config::INMEMORY;
+ }
+
+ bool State::sourceExists() {
+ return _db.exists( _config.ns );
+ }
+
+ long long State::incomingDocuments() {
+ return _db.count( _config.ns , _config.filter , QueryOption_SlaveOk , (unsigned) _config.limit );
+ }
+
+ State::~State() {
+ if ( _onDisk ) {
+ try {
+ _db.dropCollection( _config.tempLong );
+ _db.dropCollection( _config.incLong );
+ }
+ catch ( std::exception& e ) {
+ error() << "couldn't cleanup after map reduce: " << e.what() << endl;
+ }
+ }
+
+ if (_scope) {
+ // cleanup js objects
+ ScriptingFunction cleanup = _scope->createFunction("delete _emitCt; delete _keyCt; delete _mrMap;");
+ _scope->invoke(cleanup, 0, 0, 0, true);
+ }
+ }
+
+ /**
+ * Initialize the mapreduce operation, creating the inc collection
+ */
+ void State::init() {
+ // setup js
+ _scope.reset(globalScriptEngine->getPooledScope( _config.dbname ).release() );
+ _scope->localConnect( _config.dbname.c_str() );
+
+ if ( ! _config.scopeSetup.isEmpty() )
+ _scope->init( &_config.scopeSetup );
+
+ _config.mapper->init( this );
+ _config.reducer->init( this );
+ if ( _config.finalizer )
+ _config.finalizer->init( this );
+ _scope->setBoolean("_doFinal", _config.finalizer);
+
+ // by default start in JS mode, will be faster for small jobs
+ _jsMode = _config.jsMode;
+// _jsMode = true;
+ switchMode(_jsMode);
+
+ // global JS map/reduce hashmap
+ // we use a standard JS object which means keys are only simple types
+ // we could also add a real hashmap from a library, still we need to add object comparison methods
+// _scope->setObject("_mrMap", BSONObj(), false);
+ ScriptingFunction init = _scope->createFunction("_emitCt = 0; _keyCt = 0; _dupCt = 0; _redCt = 0; if (typeof(_mrMap) === 'undefined') { _mrMap = {}; }");
+ _scope->invoke(init, 0, 0, 0, true);
+
+ // js function to run reduce on all keys
+// redfunc = _scope->createFunction("for (var key in hashmap) { print('Key is ' + key); list = hashmap[key]; ret = reduce(key, list); print('Value is ' + ret); };");
+ _reduceAll = _scope->createFunction("var map = _mrMap; var list, ret; for (var key in map) { list = map[key]; if (list.length != 1) { ret = _reduce(key, list); map[key] = [ret]; ++_redCt; } } _dupCt = 0;");
+ _reduceAndEmit = _scope->createFunction("var map = _mrMap; var list, ret; for (var key in map) { list = map[key]; if (list.length == 1) { ret = list[0]; } else { ret = _reduce(key, list); ++_redCt; } emit(key, ret); }; delete _mrMap;");
+ _reduceAndFinalize = _scope->createFunction("var map = _mrMap; var list, ret; for (var key in map) { list = map[key]; if (list.length == 1) { if (!_doFinal) {continue;} ret = list[0]; } else { ret = _reduce(key, list); ++_redCt; }; if (_doFinal){ ret = _finalize(key, ret); } map[key] = ret; }");
+ _reduceAndFinalizeAndInsert = _scope->createFunction("var map = _mrMap; var list, ret; for (var key in map) { list = map[key]; if (list.length == 1) { ret = list[0]; } else { ret = _reduce(key, list); ++_redCt; }; if (_doFinal){ ret = _finalize(key, ret); } _nativeToTemp({_id: key, value: ret}); }");
+
+ }
+
+ void State::switchMode(bool jsMode) {
+ _jsMode = jsMode;
+ if (jsMode) {
+ // emit function that stays in JS
+ _scope->setFunction("emit", "function(key, value) { if (typeof(key) === 'object') { _bailFromJS(key, value); return; }; ++_emitCt; var map = _mrMap; var list = map[key]; if (!list) { ++_keyCt; list = []; map[key] = list; } else { ++_dupCt; } list.push(value); }");
+ _scope->injectNative("_bailFromJS", _bailFromJS, this);
+ }
+ else {
+ // emit now populates C++ map
+ _scope->injectNative( "emit" , fast_emit, this );
+ }
+ }
+
+ void State::bailFromJS() {
+ log(1) << "M/R: Switching from JS mode to mixed mode" << endl;
+
+ // reduce and reemit into c++
+ switchMode(false);
+ _scope->invoke(_reduceAndEmit, 0, 0, 0, true);
+ // need to get the real number emitted so far
+ _numEmits = _scope->getNumberInt("_emitCt");
+ _config.reducer->numReduces = _scope->getNumberInt("_redCt");
+ }
+
+ /**
+ * Applies last reduce and finalize on a list of tuples (key, val)
+ * Inserts single result {_id: key, value: val} into temp collection
+ */
+ void State::finalReduce( BSONList& values ) {
+ if ( !_onDisk || values.size() == 0 )
+ return;
+
+ BSONObj res = _config.reducer->finalReduce( values , _config.finalizer.get() );
+ insert( _config.tempLong , res );
+ }
+
+ BSONObj _nativeToTemp( const BSONObj& args, void* data ) {
+ State* state = (State*) data;
+ BSONObjIterator it(args);
+ state->insert(state->_config.tempLong, it.next().Obj());
+ return BSONObj();
+ }
+
+// BSONObj _nativeToInc( const BSONObj& args, void* data ) {
+// State* state = (State*) data;
+// BSONObjIterator it(args);
+// const BSONObj& obj = it.next().Obj();
+// state->_insertToInc(const_cast<BSONObj&>(obj));
+// return BSONObj();
+// }
+
+ /**
+ * Applies last reduce and finalize.
+ * After calling this method, the temp collection will be completed.
+ * If inline, the results will be in the in memory map
+ */
+ void State::finalReduce( CurOp * op , ProgressMeterHolder& pm ) {
+
+ if (_jsMode) {
+ // apply the reduce within JS
+ if (_onDisk) {
+ _scope->injectNative("_nativeToTemp", _nativeToTemp, this);
+ _scope->invoke(_reduceAndFinalizeAndInsert, 0, 0, 0, true);
+ return;
+ }
+ else {
+ _scope->invoke(_reduceAndFinalize, 0, 0, 0, true);
+ return;
+ }
+ }
+
+ if ( ! _onDisk ) {
+ // all data has already been reduced, just finalize
+ if ( _config.finalizer ) {
+ long size = 0;
+ for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ) {
+ BSONObj key = i->first;
+ BSONList& all = i->second;
+
+ assert( all.size() == 1 );
+
+ BSONObj res = _config.finalizer->finalize( all[0] );
+
+ all.clear();
+ all.push_back( res );
+ size += res.objsize();
+ }
+ _size = size;
+ }
+ return;
+ }
+
+ // use index on "0" to pull sorted data
+ assert( _temp->size() == 0 );
+ BSONObj sortKey = BSON( "0" << 1 );
+ {
+ bool foundIndex = false;
+
+ auto_ptr<DBClientCursor> idx = _db.getIndexes( _config.incLong );
+ while ( idx.get() && idx->more() ) {
+ BSONObj x = idx->next();
+ if ( sortKey.woCompare( x["key"].embeddedObject() ) == 0 ) {
+ foundIndex = true;
+ break;
+ }
+ }
+
+ assert( foundIndex );
+ }
+
+ readlock rl( _config.incLong.c_str() );
+ Client::Context ctx( _config.incLong );
+
+ BSONObj prev;
+ BSONList all;
+
+ assert( pm == op->setMessage( "m/r: (3/3) final reduce to collection" , _db.count( _config.incLong, BSONObj(), QueryOption_SlaveOk ) ) );
+
+ shared_ptr<Cursor> temp = bestGuessCursor( _config.incLong.c_str() , BSONObj() , sortKey );
+ auto_ptr<ClientCursor> cursor( new ClientCursor( QueryOption_NoCursorTimeout , temp , _config.incLong.c_str() ) );
+
+ // iterate over all sorted objects
+ while ( cursor->ok() ) {
+ BSONObj o = cursor->current().getOwned();
+ cursor->advance();
+
+ pm.hit();
+
+ if ( o.woSortOrder( prev , sortKey ) == 0 ) {
+ // object is same as previous, add to array
+ all.push_back( o );
+ if ( pm->hits() % 1000 == 0 ) {
+ if ( ! cursor->yield() ) {
+ cursor.release();
+ break;
+ }
+ killCurrentOp.checkForInterrupt();
+ }
+ continue;
+ }
+
+ ClientCursor::YieldLock yield (cursor.get());
+
+ try {
+ // reduce a finalize array
+ finalReduce( all );
+ }
+ catch (...) {
+ yield.relock();
+ cursor.release();
+ throw;
+ }
+
+ all.clear();
+ prev = o;
+ all.push_back( o );
+
+ if ( ! yield.stillOk() ) {
+ cursor.release();
+ break;
+ }
+
+ killCurrentOp.checkForInterrupt();
+ }
+
+ // we need to release here since we temp release below
+ cursor.release();
+
+ {
+ dbtempreleasecond tl;
+ if ( ! tl.unlocked() )
+ log( LL_WARNING ) << "map/reduce can't temp release" << endl;
+ // reduce and finalize last array
+ finalReduce( all );
+ }
+
+ pm.finished();
+ }
+
+ /**
+ * Attempts to reduce objects in the memory map.
+ * A new memory map will be created to hold the results.
+ * If applicable, objects with unique key may be dumped to inc collection.
+ * Input and output objects are both {"0": key, "1": val}
+ */
+ void State::reduceInMemory() {
+
+ if (_jsMode) {
+ // in js mode the reduce is applied when writing to collection
+ return;
+ }
+
+ auto_ptr<InMemory> n( new InMemory() ); // for new data
+ long nSize = 0;
+ _dupCount = 0;
+
+ for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); ++i ) {
+ BSONObj key = i->first;
+ BSONList& all = i->second;
+
+ if ( all.size() == 1 ) {
+ // only 1 value for this key
+ if ( _onDisk ) {
+ // this key has low cardinality, so just write to collection
+ writelock l(_config.incLong);
+ Client::Context ctx(_config.incLong.c_str());
+ _insertToInc( *(all.begin()) );
+ }
+ else {
+ // add to new map
+ _add( n.get() , all[0] , nSize );
+ }
+ }
+ else if ( all.size() > 1 ) {
+ // several values, reduce and add to map
+ BSONObj res = _config.reducer->reduce( all );
+ _add( n.get() , res , nSize );
+ }
+ }
+
+ // swap maps
+ _temp.reset( n.release() );
+ _size = nSize;
+ }
+
+ /**
+ * Dumps the entire in memory map to the inc collection.
+ */
+ void State::dumpToInc() {
+ if ( ! _onDisk )
+ return;
+
+ writelock l(_config.incLong);
+ Client::Context ctx(_config.incLong);
+
+ for ( InMemory::iterator i=_temp->begin(); i!=_temp->end(); i++ ) {
+ BSONList& all = i->second;
+ if ( all.size() < 1 )
+ continue;
+
+ for ( BSONList::iterator j=all.begin(); j!=all.end(); j++ )
+ _insertToInc( *j );
+ }
+ _temp->clear();
+ _size = 0;
+
+ }
+
+ /**
+ * Adds object to in memory map
+ */
+ void State::emit( const BSONObj& a ) {
+ _numEmits++;
+ _add( _temp.get() , a , _size );
+ }
+
+ void State::_add( InMemory* im, const BSONObj& a , long& size ) {
+ BSONList& all = (*im)[a];
+ all.push_back( a );
+ size += a.objsize() + 16;
+ if (all.size() > 1)
+ ++_dupCount;
+ }
+
+ /**
+ * this method checks the size of in memory map and potentially flushes to disk
+ */
+ void State::checkSize() {
+ if (_jsMode) {
+ // try to reduce if it is beneficial
+ int dupCt = _scope->getNumberInt("_dupCt");
+ int keyCt = _scope->getNumberInt("_keyCt");
+
+ if (keyCt > _config.jsMaxKeys) {
+ // too many keys for JS, switch to mixed
+ _bailFromJS(BSONObj(), this);
+ // then fall through to check map size
+ }
+ else if (dupCt > (keyCt * _config.reduceTriggerRatio)) {
+ // reduce now to lower mem usage
+ Timer t;
+ _scope->invoke(_reduceAll, 0, 0, 0, true);
+ log(1) << " MR - did reduceAll: keys=" << keyCt << " dups=" << dupCt << " newKeys=" << _scope->getNumberInt("_keyCt") << " time=" << t.millis() << "ms" << endl;
+ return;
+ }
+ }
+
+ if (_jsMode)
+ return;
+
+ if (_size > _config.maxInMemSize || _dupCount > (_temp->size() * _config.reduceTriggerRatio)) {
+ // attempt to reduce in memory map, if memory is too high or we have many duplicates
+ long oldSize = _size;
+ Timer t;
+ reduceInMemory();
+ log(1) << " MR - did reduceInMemory: size=" << oldSize << " dups=" << _dupCount << " newSize=" << _size << " time=" << t.millis() << "ms" << endl;
+
+ // if size is still high, or values are not reducing well, dump
+ if ( _onDisk && (_size > _config.maxInMemSize || _size > oldSize / 2) ) {
+ dumpToInc();
+ log(1) << " MR - dumping to db" << endl;
+ }
+ }
+ }
+
+ /**
+ * emit that will be called by js function
+ */
+ BSONObj fast_emit( const BSONObj& args, void* data ) {
+ uassert( 10077 , "fast_emit takes 2 args" , args.nFields() == 2 );
+ uassert( 13069 , "an emit can't be more than half max bson size" , args.objsize() < ( BSONObjMaxUserSize / 2 ) );
+
+ State* state = (State*) data;
+ if ( args.firstElement().type() == Undefined ) {
+ BSONObjBuilder b( args.objsize() );
+ b.appendNull( "" );
+ BSONObjIterator i( args );
+ i.next();
+ b.append( i.next() );
+ state->emit( b.obj() );
+ }
+ else {
+ state->emit( args );
+ }
+ return BSONObj();
+ }
+
+ /**
+ * function is called when we realize we cant use js mode for m/r on the 1st key
+ */
+ BSONObj _bailFromJS( const BSONObj& args, void* data ) {
+ State* state = (State*) data;
+ state->bailFromJS();
+
+ // emit this particular key if there is one
+ if (!args.isEmpty()) {
+ fast_emit(args, data);
+ }
+ return BSONObj();
+ }
+
+ /**
+ * This class represents a map/reduce command executed on a single server
+ */
+ class MapReduceCommand : public Command {
+ public:
+ MapReduceCommand() : Command("mapReduce", false, "mapreduce") {}
+
+ /* why !replset ?
+ bad things happen with --slave (i think because of this)
+ */
+ virtual bool slaveOk() const { return !replSet; }
+
+ virtual bool slaveOverrideOk() { return true; }
+
+ virtual void help( stringstream &help ) const {
+ help << "Run a map/reduce operation on the server.\n";
+ help << "Note this is used for aggregation, not querying, in MongoDB.\n";
+ help << "http://www.mongodb.org/display/DOCS/MapReduce";
+ }
+
+ virtual LockType locktype() const { return NONE; }
+
+ bool run(const string& dbname , BSONObj& cmd, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ Timer t;
+ Client& client = cc();
+ CurOp * op = client.curop();
+
+ Config config( dbname , cmd );
+
+ log(1) << "mr ns: " << config.ns << endl;
+
+ bool shouldHaveData = false;
+
+ long long num = 0;
+ long long inReduce = 0;
+
+ BSONObjBuilder countsBuilder;
+ BSONObjBuilder timingBuilder;
+ State state( config );
+ if ( ! state.sourceExists() ) {
+ errmsg = "ns doesn't exist";
+ return false;
+ }
+
+ if (replSet && state.isOnDisk()) {
+ // this means that it will be doing a write operation, make sure we are on Master
+ // ideally this check should be in slaveOk(), but at that point config is not known
+ if (!isMaster(dbname.c_str())) {
+ errmsg = "not master";
+ return false;
+ }
+ }
+
+ if (state.isOnDisk() && !client.getAuthenticationInfo()->isAuthorized(dbname)) {
+ errmsg = "read-only user cannot output mapReduce to collection, use inline instead";
+ return false;
+ }
+
+ try {
+ state.init();
+ state.prepTempCollection();
+ ProgressMeterHolder pm( op->setMessage( "m/r: (1/3) emit phase" , state.incomingDocuments() ) );
+
+ wassert( config.limit < 0x4000000 ); // see case on next line to 32 bit unsigned
+ long long mapTime = 0;
+ {
+ readlock lock( config.ns );
+ Client::Context ctx( config.ns );
+
+ ShardChunkManagerPtr chunkManager;
+ if ( shardingState.needShardChunkManager( config.ns ) ) {
+ chunkManager = shardingState.getShardChunkManager( config.ns );
+ }
+
+ // obtain cursor on data to apply mr to, sorted
+ shared_ptr<Cursor> temp = NamespaceDetailsTransient::getCursor( config.ns.c_str(), config.filter, config.sort );
+ uassert( 15876, str::stream() << "could not create cursor over " << config.ns << " for query : " << config.filter << " sort : " << config.sort, temp.get() );
+ auto_ptr<ClientCursor> cursor( new ClientCursor( QueryOption_NoCursorTimeout , temp , config.ns.c_str() ) );
+ uassert( 15877, str::stream() << "could not create client cursor over " << config.ns << " for query : " << config.filter << " sort : " << config.sort, cursor.get() );
+
+ Timer mt;
+ // go through each doc
+ while ( cursor->ok() ) {
+ if ( ! cursor->currentMatches() ) {
+ cursor->advance();
+ continue;
+ }
+
+ // make sure we dont process duplicates in case data gets moved around during map
+ // TODO This won't actually help when data gets moved, it's to handle multikeys.
+ if ( cursor->currentIsDup() ) {
+ cursor->advance();
+ continue;
+ }
+
+ BSONObj o = cursor->current();
+ cursor->advance();
+
+ // check to see if this is a new object we don't own yet
+ // because of a chunk migration
+ if ( chunkManager && ! chunkManager->belongsToMe( o ) )
+ continue;
+
+ // do map
+ if ( config.verbose ) mt.reset();
+ config.mapper->map( o );
+ if ( config.verbose ) mapTime += mt.micros();
+
+ num++;
+ if ( num % 1000 == 0 ) {
+ // try to yield lock regularly
+ ClientCursor::YieldLock yield (cursor.get());
+ Timer t;
+ // check if map needs to be dumped to disk
+ state.checkSize();
+ inReduce += t.micros();
+
+ if ( ! yield.stillOk() ) {
+ cursor.release();
+ break;
+ }
+
+ killCurrentOp.checkForInterrupt();
+ }
+ pm.hit();
+
+ if ( config.limit && num >= config.limit )
+ break;
+ }
+ }
+ pm.finished();
+
+ killCurrentOp.checkForInterrupt();
+ // update counters
+ countsBuilder.appendNumber( "input" , num );
+ countsBuilder.appendNumber( "emit" , state.numEmits() );
+ if ( state.numEmits() )
+ shouldHaveData = true;
+
+ timingBuilder.append( "mapTime" , mapTime / 1000 );
+ timingBuilder.append( "emitLoop" , t.millis() );
+
+ op->setMessage( "m/r: (2/3) final reduce in memory" );
+ Timer t;
+ // do reduce in memory
+ // this will be the last reduce needed for inline mode
+ state.reduceInMemory();
+ // if not inline: dump the in memory map to inc collection, all data is on disk
+ state.dumpToInc();
+ // final reduce
+ state.finalReduce( op , pm );
+ inReduce += t.micros();
+ countsBuilder.appendNumber( "reduce" , state.numReduces() );
+ timingBuilder.append( "reduceTime" , inReduce / 1000 );
+ timingBuilder.append( "mode" , state.jsMode() ? "js" : "mixed" );
+
+ long long finalCount = state.postProcessCollection(op, pm);
+ state.appendResults( result );
+
+ timingBuilder.append( "total" , t.millis() );
+ result.append( "timeMillis" , t.millis() );
+ countsBuilder.appendNumber( "output" , finalCount );
+ if ( config.verbose ) result.append( "timing" , timingBuilder.obj() );
+ result.append( "counts" , countsBuilder.obj() );
+
+ if ( finalCount == 0 && shouldHaveData ) {
+ result.append( "cmd" , cmd );
+ errmsg = "there were emits but no data!";
+ return false;
+ }
+
+ }
+ catch( SendStaleConfigException& e ){
+ log() << "mr detected stale config, should retry" << causedBy(e) << endl;
+ throw e;
+ }
+ // TODO: The error handling code for queries is v. fragile,
+ // *requires* rethrow AssertionExceptions - should probably fix.
+ catch ( AssertionException& e ){
+ log() << "mr failed, removing collection" << causedBy(e) << endl;
+ throw e;
+ }
+ catch ( std::exception& e ){
+ log() << "mr failed, removing collection" << causedBy(e) << endl;
+ throw e;
+ }
+ catch ( ... ) {
+ log() << "mr failed for unknown reason, removing collection" << endl;
+ throw;
+ }
+
+ return true;
+ }
+
+ } mapReduceCommand;
+
+ /**
+ * This class represents a map/reduce command executed on the output server of a sharded env
+ */
+ class MapReduceFinishCommand : public Command {
+ public:
+ MapReduceFinishCommand() : Command( "mapreduce.shardedfinish" ) {}
+ virtual bool slaveOk() const { return !replSet; }
+ virtual bool slaveOverrideOk() { return true; }
+
+ virtual LockType locktype() const { return NONE; }
+ bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ ShardedConnectionInfo::addHook();
+ // legacy name
+ string shardedOutputCollection = cmdObj["shardedOutputCollection"].valuestrsafe();
+ string inputNS = cmdObj["inputNS"].valuestrsafe();
+ if (inputNS.empty())
+ inputNS = dbname + "." + shardedOutputCollection;
+
+ Client& client = cc();
+ CurOp * op = client.curop();
+
+ Config config( dbname , cmdObj.firstElement().embeddedObjectUserCheck() );
+ State state(config);
+ state.init();
+
+ // no need for incremental collection because records are already sorted
+ config.incLong = config.tempLong;
+
+ BSONObj shardCounts = cmdObj["shardCounts"].embeddedObjectUserCheck();
+ BSONObj counts = cmdObj["counts"].embeddedObjectUserCheck();
+
+ ProgressMeterHolder pm( op->setMessage( "m/r: merge sort and reduce" ) );
+ set<ServerAndQuery> servers;
+ vector< auto_ptr<DBClientCursor> > shardCursors;
+
+ {
+ // parse per shard results
+ BSONObjIterator i( shardCounts );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ string shard = e.fieldName();
+// BSONObj res = e.embeddedObjectUserCheck();
+ servers.insert( shard );
+ }
+ }
+
+ state.prepTempCollection();
+
+ BSONList values;
+ if (!config.outDB.empty()) {
+ BSONObjBuilder loc;
+ if ( !config.outDB.empty())
+ loc.append( "db" , config.outDB );
+ if ( !config.finalShort.empty() )
+ loc.append( "collection" , config.finalShort );
+ result.append("result", loc.obj());
+ }
+ else {
+ if ( !config.finalShort.empty() )
+ result.append( "result" , config.finalShort );
+ }
+
+ // fetch result from other shards 1 chunk at a time
+ // it would be better to do just one big $or query, but then the sorting would not be efficient
+ string shardName = shardingState.getShardName();
+ DBConfigPtr confOut = grid.getDBConfig( dbname , false );
+ vector<ChunkPtr> chunks;
+ if ( confOut->isSharded(config.finalLong) ) {
+ ChunkManagerPtr cm = confOut->getChunkManager( config.finalLong );
+ const ChunkMap& chunkMap = cm->getChunkMap();
+ for ( ChunkMap::const_iterator it = chunkMap.begin(); it != chunkMap.end(); ++it ) {
+ ChunkPtr chunk = it->second;
+ if (chunk->getShard().getName() == shardName) chunks.push_back(chunk);
+ }
+ }
+
+ long long inputCount = 0;
+ unsigned int index = 0;
+ BSONObj query;
+ BSONArrayBuilder chunkSizes;
+ while (true) {
+ ChunkPtr chunk;
+ if (chunks.size() > 0) {
+ chunk = chunks[index];
+ BSONObjBuilder b;
+ b.appendAs(chunk->getMin().firstElement(), "$gte");
+ b.appendAs(chunk->getMax().firstElement(), "$lt");
+ query = BSON("_id" << b.obj());
+// chunkSizes.append(min);
+ }
+
+ // reduce from each shard for a chunk
+ BSONObj sortKey = BSON( "_id" << 1 );
+ ParallelSortClusteredCursor cursor( servers , inputNS , Query( query ).sort( sortKey ) );
+ cursor.init();
+ int chunkSize = 0;
+
+ while ( cursor.more() || !values.empty() ) {
+ BSONObj t;
+ if (cursor.more()) {
+ t = cursor.next().getOwned();
+ ++inputCount;
+
+ if ( values.size() == 0 ) {
+ values.push_back( t );
+ continue;
+ }
+
+ if ( t.woSortOrder( *(values.begin()) , sortKey ) == 0 ) {
+ values.push_back( t );
+ continue;
+ }
+ }
+
+ BSONObj res = config.reducer->finalReduce( values , config.finalizer.get());
+ chunkSize += res.objsize();
+ if (state.isOnDisk())
+ state.insertToInc(res);
+ else
+ state.emit(res);
+ values.clear();
+ if (!t.isEmpty())
+ values.push_back( t );
+ }
+
+ if (chunk) {
+ chunkSizes.append(chunk->getMin());
+ chunkSizes.append(chunkSize);
+ }
+ if (++index >= chunks.size())
+ break;
+ }
+
+ result.append( "chunkSizes" , chunkSizes.arr() );
+
+ long long outputCount = state.postProcessCollection(op, pm);
+ state.appendResults( result );
+
+ BSONObjBuilder countsB(32);
+ countsB.append("input", inputCount);
+ countsB.append("reduce", state.numReduces());
+ countsB.append("output", outputCount);
+ result.append( "counts" , countsB.obj() );
+
+ return 1;
+ }
+ } mapReduceFinishCommand;
+
+ }
+
+}
+
diff --git a/src/mongo/db/commands/mr.h b/src/mongo/db/commands/mr.h
new file mode 100644
index 00000000000..592769d82da
--- /dev/null
+++ b/src/mongo/db/commands/mr.h
@@ -0,0 +1,319 @@
+// mr.h
+
+/**
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "pch.h"
+
+namespace mongo {
+
+ namespace mr {
+
+ typedef vector<BSONObj> BSONList;
+
+ class State;
+
+ // ------------ function interfaces -----------
+
+ class Mapper : boost::noncopyable {
+ public:
+ virtual ~Mapper() {}
+ virtual void init( State * state ) = 0;
+
+ virtual void map( const BSONObj& o ) = 0;
+ };
+
+ class Finalizer : boost::noncopyable {
+ public:
+ virtual ~Finalizer() {}
+ virtual void init( State * state ) = 0;
+
+ /**
+ * this takes a tuple and returns a tuple
+ */
+ virtual BSONObj finalize( const BSONObj& tuple ) = 0;
+ };
+
+ class Reducer : boost::noncopyable {
+ public:
+ Reducer() : numReduces(0) {}
+ virtual ~Reducer() {}
+ virtual void init( State * state ) = 0;
+
+ virtual BSONObj reduce( const BSONList& tuples ) = 0;
+ /** this means its a final reduce, even if there is no finalizer */
+ virtual BSONObj finalReduce( const BSONList& tuples , Finalizer * finalizer ) = 0;
+
+ long long numReduces;
+ };
+
+ // ------------ js function implementations -----------
+
+ /**
+ * used as a holder for Scope and ScriptingFunction
+ * visitor like pattern as Scope is gotten from first access
+ */
+ class JSFunction : boost::noncopyable {
+ public:
+ /**
+ * @param type (map|reduce|finalize)
+ */
+ JSFunction( string type , const BSONElement& e );
+ virtual ~JSFunction() {}
+
+ virtual void init( State * state );
+
+ Scope * scope() const { return _scope; }
+ ScriptingFunction func() const { return _func; }
+
+ private:
+ string _type;
+ string _code; // actual javascript code
+ BSONObj _wantedScope; // this is for CodeWScope
+
+ Scope * _scope; // this is not owned by us, and might be shared
+ ScriptingFunction _func;
+ };
+
+ class JSMapper : public Mapper {
+ public:
+ JSMapper( const BSONElement & code ) : _func( "_map" , code ) {}
+ virtual void map( const BSONObj& o );
+ virtual void init( State * state );
+
+ private:
+ JSFunction _func;
+ BSONObj _params;
+ };
+
+ class JSReducer : public Reducer {
+ public:
+ JSReducer( const BSONElement& code ) : _func( "_reduce" , code ) {}
+ virtual void init( State * state );
+
+ virtual BSONObj reduce( const BSONList& tuples );
+ virtual BSONObj finalReduce( const BSONList& tuples , Finalizer * finalizer );
+
+ private:
+
+ /**
+ * result in "return"
+ * @param key OUT
+ * @param endSizeEstimate OUT
+ */
+ void _reduce( const BSONList& values , BSONObj& key , int& endSizeEstimate );
+
+ JSFunction _func;
+ };
+
+ class JSFinalizer : public Finalizer {
+ public:
+ JSFinalizer( const BSONElement& code ) : _func( "_finalize" , code ) {}
+ virtual BSONObj finalize( const BSONObj& o );
+ virtual void init( State * state ) { _func.init( state ); }
+ private:
+ JSFunction _func;
+
+ };
+
+ // -----------------
+
+
+ class TupleKeyCmp {
+ public:
+ TupleKeyCmp() {}
+ bool operator()( const BSONObj &l, const BSONObj &r ) const {
+ return l.firstElement().woCompare( r.firstElement() ) < 0;
+ }
+ };
+
+ typedef map< BSONObj,BSONList,TupleKeyCmp > InMemory; // from key to list of tuples
+
+ /**
+ * holds map/reduce config information
+ */
+ class Config {
+ public:
+ Config( const string& _dbname , const BSONObj& cmdObj );
+
+ string dbname;
+ string ns;
+
+ // options
+ bool verbose;
+ bool jsMode;
+ int splitInfo;
+
+ // query options
+
+ BSONObj filter;
+ BSONObj sort;
+ long long limit;
+
+ // functions
+
+ scoped_ptr<Mapper> mapper;
+ scoped_ptr<Reducer> reducer;
+ scoped_ptr<Finalizer> finalizer;
+
+ BSONObj mapParams;
+ BSONObj scopeSetup;
+
+ // output tables
+ string incLong;
+ string tempLong;
+
+ string finalShort;
+ string finalLong;
+
+ string outDB;
+
+ // max number of keys allowed in JS map before switching mode
+ long jsMaxKeys;
+ // ratio of duplicates vs unique keys before reduce is triggered in js mode
+ float reduceTriggerRatio;
+ // maximum size of map before it gets dumped to disk
+ long maxInMemSize;
+
+ enum { REPLACE , // atomically replace the collection
+ MERGE , // merge keys, override dups
+ REDUCE , // merge keys, reduce dups
+ INMEMORY // only store in memory, limited in size
+ } outType;
+
+ // if true, no lock during output operation
+ bool outNonAtomic;
+
+ static AtomicUInt JOB_NUMBER;
+ }; // end MRsetup
+
+ /**
+ * stores information about intermediate map reduce state
+ * controls flow of data from map->reduce->finalize->output
+ */
+ class State {
+ public:
+ State( const Config& c );
+ ~State();
+
+ void init();
+
+ // ---- prep -----
+ bool sourceExists();
+
+ long long incomingDocuments();
+
+ // ---- map stage ----
+
+ /**
+ * stages on in in-memory storage
+ */
+ void emit( const BSONObj& a );
+
+ /**
+ * if size is big, run a reduce
+ * if its still big, dump to temp collection
+ */
+ void checkSize();
+
+ /**
+ * run reduce on _temp
+ */
+ void reduceInMemory();
+
+ /**
+ * transfers in memory storage to temp collection
+ */
+ void dumpToInc();
+ void insertToInc( BSONObj& o );
+ void _insertToInc( BSONObj& o );
+
+ // ------ reduce stage -----------
+
+ void prepTempCollection();
+
+ void finalReduce( BSONList& values );
+
+ void finalReduce( CurOp * op , ProgressMeterHolder& pm );
+
+ // ------- cleanup/data positioning ----------
+
+ /**
+ @return number objects in collection
+ */
+ long long postProcessCollection( CurOp* op , ProgressMeterHolder& pm );
+ long long postProcessCollectionNonAtomic( CurOp* op , ProgressMeterHolder& pm );
+
+ /**
+ * if INMEMORY will append
+ * may also append stats or anything else it likes
+ */
+ void appendResults( BSONObjBuilder& b );
+
+ // -------- util ------------
+
+ /**
+ * inserts with correct replication semantics
+ */
+ void insert( const string& ns , const BSONObj& o );
+
+ // ------ simple accessors -----
+
+ /** State maintains ownership, do no use past State lifetime */
+ Scope* scope() { return _scope.get(); }
+
+ const Config& config() { return _config; }
+
+ const bool isOnDisk() { return _onDisk; }
+
+ long long numEmits() const { if (_jsMode) return _scope->getNumberLongLong("_emitCt"); return _numEmits; }
+ long long numReduces() const { if (_jsMode) return _scope->getNumberLongLong("_redCt"); return _config.reducer->numReduces; }
+
+ bool jsMode() {return _jsMode;}
+ void switchMode(bool jsMode);
+ void bailFromJS();
+
+ const Config& _config;
+ DBDirectClient _db;
+
+ protected:
+
+ void _add( InMemory* im , const BSONObj& a , long& size );
+
+ scoped_ptr<Scope> _scope;
+ bool _onDisk; // if the end result of this map reduce is disk or not
+
+ scoped_ptr<InMemory> _temp;
+ long _size; // bytes in _temp
+ long _dupCount; // number of duplicate key entries
+
+ long long _numEmits;
+
+ bool _jsMode;
+ ScriptingFunction _reduceAll;
+ ScriptingFunction _reduceAndEmit;
+ ScriptingFunction _reduceAndFinalize;
+ ScriptingFunction _reduceAndFinalizeAndInsert;
+ };
+
+ BSONObj fast_emit( const BSONObj& args, void* data );
+ BSONObj _bailFromJS( const BSONObj& args, void* data );
+
+ } // end mr namespace
+}
+
+
diff --git a/src/mongo/db/commands/pipeline.cpp b/src/mongo/db/commands/pipeline.cpp
new file mode 100755
index 00000000000..4ad5e342aed
--- /dev/null
+++ b/src/mongo/db/commands/pipeline.cpp
@@ -0,0 +1,405 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "db/commands/pipeline.h"
+
+#include "db/cursor.h"
+#include "db/pipeline/accumulator.h"
+#include "db/pipeline/document.h"
+#include "db/pipeline/document_source.h"
+#include "db/pipeline/expression.h"
+#include "db/pipeline/expression_context.h"
+#include "db/pdfile.h"
+#include "util/mongoutils/str.h"
+
+namespace mongo {
+
+ const char Pipeline::commandName[] = "aggregate";
+ const char Pipeline::pipelineName[] = "pipeline";
+ const char Pipeline::fromRouterName[] = "fromRouter";
+ const char Pipeline::splitMongodPipelineName[] = "splitMongodPipeline";
+
+ Pipeline::~Pipeline() {
+ }
+
+ Pipeline::Pipeline(const intrusive_ptr<ExpressionContext> &pTheCtx):
+ collectionName(),
+ sourceVector(),
+ splitMongodPipeline(DEBUG_BUILD == 1), /* test: always split for DEV */
+ pCtx(pTheCtx) {
+ }
+
+
+
+ /* this structure is used to make a lookup table of operators */
+ struct StageDesc {
+ const char *pName;
+ intrusive_ptr<DocumentSource> (*pFactory)(
+ BSONElement *, const intrusive_ptr<ExpressionContext> &);
+ };
+
+ /* this table must be in alphabetical order by name for bsearch() */
+ static const StageDesc stageDesc[] = {
+#ifdef NEVER /* disabled for now in favor of $match */
+ {DocumentSourceFilter::filterName,
+ DocumentSourceFilter::createFromBson},
+#endif
+ {DocumentSourceGroup::groupName,
+ DocumentSourceGroup::createFromBson},
+ {DocumentSourceLimit::limitName,
+ DocumentSourceLimit::createFromBson},
+ {DocumentSourceMatch::matchName,
+ DocumentSourceMatch::createFromBson},
+#ifdef LATER /* https://jira.mongodb.org/browse/SERVER-3253 */
+ {DocumentSourceOut::outName,
+ DocumentSourceOut::createFromBson},
+#endif
+ {DocumentSourceProject::projectName,
+ DocumentSourceProject::createFromBson},
+ {DocumentSourceSkip::skipName,
+ DocumentSourceSkip::createFromBson},
+ {DocumentSourceSort::sortName,
+ DocumentSourceSort::createFromBson},
+ {DocumentSourceUnwind::unwindName,
+ DocumentSourceUnwind::createFromBson},
+ };
+ static const size_t nStageDesc = sizeof(stageDesc) / sizeof(StageDesc);
+
+ static int stageDescCmp(const void *pL, const void *pR) {
+ return strcmp(((const StageDesc *)pL)->pName,
+ ((const StageDesc *)pR)->pName);
+ }
+
+ boost::shared_ptr<Pipeline> Pipeline::parseCommand(
+ string &errmsg, BSONObj &cmdObj,
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ boost::shared_ptr<Pipeline> pPipeline(new Pipeline(pCtx));
+ vector<BSONElement> pipeline;
+
+ /* gather the specification for the aggregation */
+ for(BSONObj::iterator cmdIterator = cmdObj.begin();
+ cmdIterator.more(); ) {
+ BSONElement cmdElement(cmdIterator.next());
+ const char *pFieldName = cmdElement.fieldName();
+
+ /* look for the aggregation command */
+ if (!strcmp(pFieldName, commandName)) {
+ pPipeline->collectionName = cmdElement.String();
+ continue;
+ }
+
+ /* check for the collection name */
+ if (!strcmp(pFieldName, pipelineName)) {
+ pipeline = cmdElement.Array();
+ continue;
+ }
+
+ /* if the request came from the router, we're in a shard */
+ if (!strcmp(pFieldName, fromRouterName)) {
+ pCtx->setInShard(cmdElement.Bool());
+ continue;
+ }
+
+ /* check for debug options */
+ if (!strcmp(pFieldName, splitMongodPipelineName)) {
+ pPipeline->splitMongodPipeline = true;
+ continue;
+ }
+
+ /* we didn't recognize a field in the command */
+ ostringstream sb;
+ sb <<
+ "Pipeline::parseCommand(): unrecognized field \"" <<
+ cmdElement.fieldName();
+ errmsg = sb.str();
+ return boost::shared_ptr<Pipeline>();
+ }
+
+ /*
+ If we get here, we've harvested the fields we expect for a pipeline.
+
+ Set up the specified document source pipeline.
+ */
+ SourceVector *pSourceVector = &pPipeline->sourceVector; // shorthand
+
+ /* iterate over the steps in the pipeline */
+ const size_t nSteps = pipeline.size();
+ for(size_t iStep = 0; iStep < nSteps; ++iStep) {
+ /* pull out the pipeline element as an object */
+ BSONElement pipeElement(pipeline[iStep]);
+ uassert(15942, str::stream() << "pipeline element " <<
+ iStep << " is not an object",
+ pipeElement.type() == Object);
+ BSONObj bsonObj(pipeElement.Obj());
+
+ intrusive_ptr<DocumentSource> pSource;
+
+ /* use the object to add a DocumentSource to the processing chain */
+ BSONObjIterator bsonIterator(bsonObj);
+ while(bsonIterator.more()) {
+ BSONElement bsonElement(bsonIterator.next());
+ const char *pFieldName = bsonElement.fieldName();
+
+ /* select the appropriate operation and instantiate */
+ StageDesc key;
+ key.pName = pFieldName;
+ const StageDesc *pDesc = (const StageDesc *)
+ bsearch(&key, stageDesc, nStageDesc, sizeof(StageDesc),
+ stageDescCmp);
+ if (pDesc)
+ pSource = (*pDesc->pFactory)(&bsonElement, pCtx);
+ else {
+ ostringstream sb;
+ sb <<
+ "Pipeline::run(): unrecognized pipeline op \"" <<
+ pFieldName;
+ errmsg = sb.str();
+ return shared_ptr<Pipeline>();
+ }
+ }
+
+ pSourceVector->push_back(pSource);
+ }
+
+ /* if there aren't any pipeline stages, there's nothing more to do */
+ if (!pSourceVector->size())
+ return pPipeline;
+
+ /*
+ Move filters up where possible.
+
+ CW TODO -- move filter past projections where possible, and noting
+ corresponding field renaming.
+ */
+
+ /*
+ Wherever there is a match immediately following a sort, swap them.
+ This means we sort fewer items. Neither changes the documents in
+ the stream, so this transformation shouldn't affect the result.
+
+ We do this first, because then when we coalesce operators below,
+ any adjacent matches will be combined.
+ */
+ for(size_t srcn = pSourceVector->size(), srci = 1;
+ srci < srcn; ++srci) {
+ intrusive_ptr<DocumentSource> &pSource = pSourceVector->at(srci);
+ if (dynamic_cast<DocumentSourceMatch *>(pSource.get())) {
+ intrusive_ptr<DocumentSource> &pPrevious =
+ pSourceVector->at(srci - 1);
+ if (dynamic_cast<DocumentSourceSort *>(pPrevious.get())) {
+ /* swap this item with the previous */
+ intrusive_ptr<DocumentSource> pTemp(pPrevious);
+ pPrevious = pSource;
+ pSource = pTemp;
+ }
+ }
+ }
+
+ /*
+ Coalesce adjacent filters where possible. Two adjacent filters
+ are equivalent to one filter whose predicate is the conjunction of
+ the two original filters' predicates. For now, capture this by
+ giving any DocumentSource the option to absorb it's successor; this
+ will also allow adjacent projections to coalesce when possible.
+
+ Run through the DocumentSources, and give each one the opportunity
+ to coalesce with its successor. If successful, remove the
+ successor.
+
+ Move all document sources to a temporary list.
+ */
+ SourceVector tempVector(*pSourceVector);
+ pSourceVector->clear();
+
+ /* move the first one to the final list */
+ pSourceVector->push_back(tempVector[0]);
+
+ /* run through the sources, coalescing them or keeping them */
+ for(size_t tempn = tempVector.size(), tempi = 1;
+ tempi < tempn; ++tempi) {
+ /*
+ If we can't coalesce the source with the last, then move it
+ to the final list, and make it the new last. (If we succeeded,
+ then we're still on the same last, and there's no need to move
+ or do anything with the source -- the destruction of tempVector
+ will take care of the rest.)
+ */
+ intrusive_ptr<DocumentSource> &pLastSource = pSourceVector->back();
+ intrusive_ptr<DocumentSource> &pTemp = tempVector.at(tempi);
+ if (!pLastSource->coalesce(pTemp))
+ pSourceVector->push_back(pTemp);
+ }
+
+ /* optimize the elements in the pipeline */
+ for(SourceVector::iterator iter(pSourceVector->begin()),
+ listEnd(pSourceVector->end()); iter != listEnd; ++iter)
+ (*iter)->optimize();
+
+ return pPipeline;
+ }
+
+ shared_ptr<Pipeline> Pipeline::splitForSharded() {
+ /* create an initialize the shard spec we'll return */
+ shared_ptr<Pipeline> pShardPipeline(new Pipeline(pCtx));
+ pShardPipeline->collectionName = collectionName;
+
+ /* put the source list aside */
+ SourceVector tempVector(sourceVector);
+ sourceVector.clear();
+
+ /*
+ Run through the pipeline, looking for points to split it into
+ shard pipelines, and the rest.
+ */
+ while(!tempVector.empty()) {
+ intrusive_ptr<DocumentSource> &pSource = tempVector.front();
+
+#ifdef MONGODB_SERVER3832 /* see https://jira.mongodb.org/browse/SERVER-3832 */
+ DocumentSourceSort *pSort =
+ dynamic_cast<DocumentSourceSort *>(pSource.get());
+ if (pSort) {
+ /*
+ There's no point in sorting until the result is combined.
+ Therefore, sorts should be done in mongos, and not in
+ the shard at all. Add all the remaining operators to
+ the mongos list and quit.
+
+ TODO: unless the sort key is the shard key.
+ TODO: we could also do a merge sort in mongos in the
+ future, and split here.
+ */
+ for(size_t tempn = tempVector.size(), tempi = 0;
+ tempi < tempn; ++tempi)
+ sourceVector.push_back(tempVector[tempi]);
+ break;
+ }
+#endif
+
+ /* hang on to this in advance, in case it is a group */
+ DocumentSourceGroup *pGroup =
+ dynamic_cast<DocumentSourceGroup *>(pSource.get());
+
+ /* move the source from the tempVector to the shard sourceVector */
+ pShardPipeline->sourceVector.push_back(pSource);
+ tempVector.erase(tempVector.begin());
+
+ /*
+ If we found a group, that's a split point.
+ */
+ if (pGroup) {
+ /* start this pipeline with the group merger */
+ sourceVector.push_back(pGroup->createMerger());
+
+ /* and then add everything that remains and quit */
+ for(size_t tempn = tempVector.size(), tempi = 0;
+ tempi < tempn; ++tempi)
+ sourceVector.push_back(tempVector[tempi]);
+ break;
+ }
+ }
+
+ return pShardPipeline;
+ }
+
+ void Pipeline::getCursorMods(BSONObjBuilder *pQueryBuilder,
+ BSONObjBuilder *pSortBuilder) {
+ /* look for an initial $match */
+ if (!sourceVector.size())
+ return;
+ const intrusive_ptr<DocumentSource> &pMC = sourceVector.front();
+ const DocumentSourceMatch *pMatch =
+ dynamic_cast<DocumentSourceMatch *>(pMC.get());
+
+ if (pMatch) {
+ /* build the query */
+ pMatch->toMatcherBson(pQueryBuilder);
+
+ /* remove the match from the pipeline */
+ sourceVector.erase(sourceVector.begin());
+ }
+
+ /* look for an initial $sort */
+ if (!sourceVector.size())
+ return;
+#ifdef MONGODB_SERVER3832 /* see https://jira.mongodb.org/browse/SERVER-3832 */
+ const intrusive_ptr<DocumentSource> &pSC = sourceVector.front();
+ const DocumentSourceSort *pSort =
+ dynamic_cast<DocumentSourceSort *>(pSC.get());
+
+ if (pSort) {
+ /* build the sort key */
+ pSort->sortKeyToBson(pSortBuilder, false);
+
+ /* remove the sort from the pipeline */
+ sourceVector.erase(sourceVector.begin());
+ }
+#endif
+ }
+
+ void Pipeline::toBson(BSONObjBuilder *pBuilder) const {
+ /* create an array out of the pipeline operations */
+ BSONArrayBuilder arrayBuilder;
+ for(SourceVector::const_iterator iter(sourceVector.begin()),
+ listEnd(sourceVector.end()); iter != listEnd; ++iter) {
+ intrusive_ptr<DocumentSource> pSource(*iter);
+ pSource->addToBsonArray(&arrayBuilder);
+ }
+
+ /* add the top-level items to the command */
+ pBuilder->append(commandName, getCollectionName());
+ pBuilder->append(pipelineName, arrayBuilder.arr());
+
+ bool btemp;
+ if ((btemp = getSplitMongodPipeline())) {
+ pBuilder->append(splitMongodPipelineName, btemp);
+ }
+ if ((btemp = pCtx->getInRouter())) {
+ pBuilder->append(fromRouterName, btemp);
+ }
+ }
+
+ bool Pipeline::run(BSONObjBuilder &result, string &errmsg,
+ intrusive_ptr<DocumentSource> pSource) {
+ /* chain together the sources we found */
+ for(SourceVector::iterator iter(sourceVector.begin()),
+ listEnd(sourceVector.end()); iter != listEnd; ++iter) {
+ intrusive_ptr<DocumentSource> pTemp(*iter);
+ pTemp->setSource(pSource);
+ pSource = pTemp;
+ }
+ /* pSource is left pointing at the last source in the chain */
+
+ /*
+ Iterate through the resulting documents, and add them to the result.
+ */
+ BSONArrayBuilder resultArray; // where we'll stash the results
+ for(bool hasDocument = !pSource->eof(); hasDocument;
+ hasDocument = pSource->advance()) {
+ boost::intrusive_ptr<Document> pDocument(pSource->getCurrent());
+
+ /* add the document to the result set */
+ BSONObjBuilder documentBuilder;
+ pDocument->toBson(&documentBuilder);
+ resultArray.append(documentBuilder.done());
+ }
+
+ result.appendArray("result", resultArray.arr());
+
+ return true;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/pipeline.h b/src/mongo/db/commands/pipeline.h
new file mode 100755
index 00000000000..ef9cc6afe51
--- /dev/null
+++ b/src/mongo/db/commands/pipeline.h
@@ -0,0 +1,183 @@
+/**
+ * Copyright 2011 (c) 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "pch.h"
+
+#include "db/jsobj.h"
+#include "util/timer.h"
+#include "db/commands.h"
+
+namespace mongo {
+ class BSONObj;
+ class BSONObjBuilder;
+ class DocumentSource;
+ class DocumentSourceProject;
+ class Expression;
+ class ExpressionContext;
+ class ExpressionNary;
+ struct OpDesc; // local private struct
+
+ /** mongodb "commands" (sent via db.$cmd.findOne(...))
+ subclass to make a command. define a singleton object for it.
+ */
+ class Pipeline :
+ boost::noncopyable {
+ public:
+ virtual ~Pipeline();
+
+ /*
+ Create a pipeline from the command.
+
+ @param errmsg where to write errors, if there are any
+ @param cmdObj the command object sent from the client
+ @returns the pipeline, if created, otherwise a NULL reference
+ */
+ static boost::shared_ptr<Pipeline> parseCommand(
+ string &errmsg, BSONObj &cmdObj,
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ /*
+ Get the collection name from the command.
+
+ @returns the collection name
+ */
+ string getCollectionName() const;
+
+ /*
+ Split the current Pipeline into a Pipeline for each shard, and
+ a Pipeline that combines the results within mongos.
+
+ This permanently alters this pipeline for the merging operation.
+
+ @returns the Spec for the pipeline command that should be sent
+ to the shards
+ */
+ boost::shared_ptr<Pipeline> splitForSharded();
+
+ /*
+ Get Cursor creation modifiers.
+
+ If we have a $match or a $sort at the beginning of the pipeline,
+ these can be extracted and used to modify the cursor we'll use for
+ the initial collection scan.
+
+ If there is a Matcher query at the beginning of the pipeline,
+ get it, by adding its terms to the object under construction. If
+ not, this adds nothing to the object under construction.
+
+ If there is a sort at the beginning of the pipeline, get it, by
+ adding its terms to the object under construction. If not, this adds
+ nothing.
+
+ Optimization steps in parseCommand make sure that for any pairs
+ of adjacent matches and sorts, the match comes first. This ensures
+ that we sort a minimum of items, and doesn't change the result.
+ When getCursorMods() examines the pipeline, it looks for an initial
+ $match. If present, that is put into pQueryBuilder. If there is
+ a query, then the next stage is checked for a $sort, which will go
+ into pSortBuilder. If there is no initial $match, then a check is
+ made for an initial $sort, which will then still be put into
+ pSortBuilder.
+
+ As a side-effect, retrieving the Cursor modifications removes them
+ from the pipeline.
+
+ @param pQueryBuilder an initialized object builder
+ @param pSortBuilder an initialized object builder
+ */
+ void getCursorMods(BSONObjBuilder *pQueryBuilder,
+ BSONObjBuilder *pSortBuilder);
+
+ /*
+ Write the Pipeline as a BSONObj command. This should be the
+ inverse of parseCommand().
+
+ This is only intended to be used by the shard command obtained
+ from splitForSharded(). Some pipeline operations in the merge
+ process do not have equivalent command forms, and using this on
+ the mongos Pipeline will cause assertions.
+
+ @param the builder to write the command to
+ */
+ void toBson(BSONObjBuilder *pBuilder) const;
+
+ /*
+ Run the Pipeline on the given source.
+
+ @param result builder to write the result to
+ @param errmsg place to put error messages, if any
+ @param pSource the document source to use at the head of the chain
+ @returns true on success, false if an error occurs
+ */
+ bool run(BSONObjBuilder &result, string &errmsg,
+ intrusive_ptr<DocumentSource> pSource);
+
+ /*
+ Debugging: should the processing pipeline be split within
+ mongod, simulating the real mongos/mongod split? This is determined
+ by setting the splitMongodPipeline field in an "aggregate"
+ command.
+
+ The split itself is handled by the caller, which is currently
+ pipeline_command.cpp.
+
+ @returns true if the pipeline is to be split
+ */
+ bool getSplitMongodPipeline() const;
+
+ /*
+ The aggregation command name.
+ */
+ static const char commandName[];
+
+ private:
+ static const char pipelineName[];
+ static const char fromRouterName[];
+ static const char splitMongodPipelineName[];
+
+ Pipeline(const intrusive_ptr<ExpressionContext> &pCtx);
+
+ string collectionName;
+ typedef vector<intrusive_ptr<DocumentSource> > SourceVector;
+ SourceVector sourceVector;
+
+ bool splitMongodPipeline;
+ intrusive_ptr<ExpressionContext> pCtx;
+ };
+
+} // namespace mongo
+
+
+/* ======================= INLINED IMPLEMENTATIONS ========================== */
+
+namespace mongo {
+
+ inline string Pipeline::getCollectionName() const {
+ return collectionName;
+ }
+
+ inline bool Pipeline::getSplitMongodPipeline() const {
+ if (!DEBUG_BUILD)
+ return false;
+
+ return splitMongodPipeline;
+ }
+
+} // namespace mongo
+
+
diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp
new file mode 100755
index 00000000000..9863e14556c
--- /dev/null
+++ b/src/mongo/db/commands/pipeline_command.cpp
@@ -0,0 +1,187 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "db/commands/pipeline.h"
+#include "db/cursor.h"
+#include "db/pdfile.h"
+#include "db/pipeline/accumulator.h"
+#include "db/pipeline/document.h"
+#include "db/pipeline/document_source.h"
+#include "db/pipeline/expression.h"
+#include "db/pipeline/expression_context.h"
+#include "db/queryoptimizer.h"
+
+namespace mongo {
+
+ /** mongodb "commands" (sent via db.$cmd.findOne(...))
+ subclass to make a command. define a singleton object for it.
+ */
+ class PipelineCommand :
+ public Command {
+ public:
+ // virtuals from Command
+ virtual ~PipelineCommand();
+ virtual bool run(const string &db, BSONObj &cmdObj, int options,
+ string &errmsg, BSONObjBuilder &result, bool fromRepl);
+ virtual LockType locktype() const;
+ virtual bool slaveOk() const;
+ virtual void help(stringstream &help) const;
+
+ PipelineCommand();
+ };
+
+ // self-registering singleton static instance
+ static PipelineCommand pipelineCommand;
+
+ PipelineCommand::PipelineCommand():
+ Command(Pipeline::commandName) {
+ }
+
+ Command::LockType PipelineCommand::locktype() const {
+ return READ;
+ }
+
+ bool PipelineCommand::slaveOk() const {
+ return true;
+ }
+
+ void PipelineCommand::help(stringstream &help) const {
+ help << "{ pipeline : [ { <data-pipe-op>: {...}}, ... ] }";
+ }
+
+ PipelineCommand::~PipelineCommand() {
+ }
+
+ bool PipelineCommand::run(const string &db, BSONObj &cmdObj,
+ int options, string &errmsg,
+ BSONObjBuilder &result, bool fromRepl) {
+
+ intrusive_ptr<ExpressionContext> pCtx(ExpressionContext::create());
+
+ /* try to parse the command; if this fails, then we didn't run */
+ boost::shared_ptr<Pipeline> pPipeline(
+ Pipeline::parseCommand(errmsg, cmdObj, pCtx));
+ if (!pPipeline.get())
+ return false;
+
+ /* get a query to use, if any */
+ BSONObjBuilder queryBuilder;
+ BSONObjBuilder sortBuilder;
+ pPipeline->getCursorMods(&queryBuilder, &sortBuilder);
+ BSONObj query(queryBuilder.done());
+ BSONObj sort(sortBuilder.done());
+
+ /* for debugging purposes, show what the query and sort are */
+ DEV {
+ (log() << "\n---- query BSON\n" <<
+ query.jsonString(Strict, 1) << "\n----\n").flush();
+ (log() << "\n---- sort BSON\n" <<
+ sort.jsonString(Strict, 1) << "\n----\n").flush();
+ }
+
+ /* create a cursor for that query */
+ string fullName(db + "." + pPipeline->getCollectionName());
+ shared_ptr<Cursor> pCursor(
+ NamespaceDetailsTransient::getCursor(
+ fullName.c_str(), query
+#ifdef MONGODB_SERVER3832 /* see https://jira.mongodb.org/browse/SERVER-3832 */
+ , sort
+#endif
+ ));
+
+ /* wrap the cursor with a DocumentSource */
+ intrusive_ptr<DocumentSource> pSource(
+ DocumentSourceCursor::create(pCursor));
+
+ /* this is the normal non-debug path */
+ if (!pPipeline->getSplitMongodPipeline())
+ return pPipeline->run(result, errmsg, pSource);
+
+ /* setup as if we're in the router */
+ pCtx->setInRouter(true);
+
+ /*
+ Here, we'll split the pipeline in the same way we would for sharding,
+ for testing purposes.
+
+ Run the shard pipeline first, then feed the results into the remains
+ of the existing pipeline.
+
+ Start by splitting the pipeline.
+ */
+ shared_ptr<Pipeline> pShardSplit(
+ pPipeline->splitForSharded());
+
+ /*
+ Write the split pipeline as we would in order to transmit it to
+ the shard servers.
+ */
+ BSONObjBuilder shardBuilder;
+ pShardSplit->toBson(&shardBuilder);
+ BSONObj shardBson(shardBuilder.done());
+
+ DEV (log() << "\n---- shardBson\n" <<
+ shardBson.jsonString(Strict, 1) << "\n----\n").flush();
+
+ /* for debugging purposes, show what the pipeline now looks like */
+ DEV {
+ BSONObjBuilder pipelineBuilder;
+ pPipeline->toBson(&pipelineBuilder);
+ BSONObj pipelineBson(pipelineBuilder.done());
+ (log() << "\n---- pipelineBson\n" <<
+ pipelineBson.jsonString(Strict, 1) << "\n----\n").flush();
+ }
+
+ /* on the shard servers, create the local pipeline */
+ intrusive_ptr<ExpressionContext> pShardCtx(ExpressionContext::create());
+ shared_ptr<Pipeline> pShardPipeline(
+ Pipeline::parseCommand(errmsg, shardBson, pShardCtx));
+ if (!pShardPipeline.get()) {
+ return false;
+ }
+
+ /* run the shard pipeline */
+ BSONObjBuilder shardResultBuilder;
+ string shardErrmsg;
+ pShardPipeline->run(shardResultBuilder, shardErrmsg, pSource);
+ BSONObj shardResult(shardResultBuilder.done());
+
+ /* pick out the shard result, and prepare to read it */
+ intrusive_ptr<DocumentSourceBsonArray> pShardSource;
+ BSONObjIterator shardIter(shardResult);
+ while(shardIter.more()) {
+ BSONElement shardElement(shardIter.next());
+ const char *pFieldName = shardElement.fieldName();
+
+ if (strcmp(pFieldName, "result") == 0) {
+ pShardSource = DocumentSourceBsonArray::create(&shardElement);
+
+ /*
+ Connect the output of the shard pipeline with the mongos
+ pipeline that will merge the results.
+ */
+ return pPipeline->run(result, errmsg, pShardSource);
+ }
+ }
+
+ /* NOTREACHED */
+ assert(false);
+ return false;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/common.cpp b/src/mongo/db/common.cpp
new file mode 100644
index 00000000000..cd073f8b059
--- /dev/null
+++ b/src/mongo/db/common.cpp
@@ -0,0 +1,73 @@
+/** @file common.cpp
+ Common code for server binaries (mongos, mongod, test).
+ Nothing used by driver should be here.
+ */
+
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+//#include "pch.h"
+//#include "concurrency.h"
+#include "jsobjmanipulator.h"
+
+/**
+ * this just has globals
+ */
+namespace mongo {
+
+ /** called by mongos, mongod, test. do not call from clients and such.
+ invoked before about everything except global var construction.
+ */
+ void doPreServerStartupInits() {
+#if defined(RLIMIT_NPROC) && defined(RLIMIT_NOFILE)
+ //Check that # of files rlmit > 1000 , and # of processes > # of files/2
+ const unsigned int minNumFiles = 1000;
+ const double filesToProcsRatio = 2.0;
+ struct rlimit rlnproc;
+ struct rlimit rlnofile;
+
+ if(!getrlimit(RLIMIT_NPROC,&rlnproc) && !getrlimit(RLIMIT_NOFILE,&rlnofile)){
+ if(rlnofile.rlim_cur < minNumFiles){
+ log() << "Warning: soft rlimits too low. Number of files is " << rlnofile.rlim_cur << ", should be at least " << minNumFiles << endl;
+ }
+ if(rlnproc.rlim_cur < rlnofile.rlim_cur/filesToProcsRatio){
+ log() << "Warning: soft rlimits too low. " << rlnproc.rlim_cur << " processes, " << rlnofile.rlim_cur << " files. Number of processes should be at least "<< 1/filesToProcsRatio << " times number of files." << endl;
+ }
+ }
+ else{
+ log() << "Warning: getrlimit failed" << endl;
+ }
+#endif
+ }
+
+ NOINLINE_DECL OpTime OpTime::skewed() {
+ bool toLog = false;
+ ONCE toLog = true;
+ RARELY toLog = true;
+ last.i++;
+ if ( last.i & 0x80000000 )
+ toLog = true;
+ if ( toLog ) {
+ log() << "clock skew detected prev: " << last.secs << " now: " << (unsigned) time(0) << endl;
+ }
+ if ( last.i & 0x80000000 ) {
+ log() << "error large clock skew detected, shutting down" << endl;
+ throw ClockSkewException();
+ }
+ return last;
+ }
+
+}
diff --git a/src/mongo/db/compact.cpp b/src/mongo/db/compact.cpp
new file mode 100644
index 00000000000..32931b6c5fd
--- /dev/null
+++ b/src/mongo/db/compact.cpp
@@ -0,0 +1,376 @@
+/** @file compact.cpp
+ compaction of deleted space in pdfiles (datafiles)
+*/
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,b
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "pdfile.h"
+#include "concurrency.h"
+#include "commands.h"
+#include "curop-inl.h"
+#include "background.h"
+#include "extsort.h"
+#include "compact.h"
+#include "../util/concurrency/task.h"
+#include "../util/timer.h"
+
+namespace mongo {
+
+ char faux;
+
+ void addRecordToRecListInExtent(Record *r, DiskLoc loc);
+ DiskLoc allocateSpaceForANewRecord(const char *ns, NamespaceDetails *d, int lenWHdr, bool god);
+ void freeExtents(DiskLoc firstExt, DiskLoc lastExt);
+
+ /* this should be done in alloc record not here, but doing here for now.
+ really dumb; it's a start.
+ */
+ unsigned quantizeMask(unsigned x) {
+ if( x > 4096 * 20 )
+ return ~4095;
+ if( x >= 512 )
+ return ~63;
+ return ~0;
+ }
+
+ /** @return number of skipped (invalid) documents */
+ unsigned compactExtent(const char *ns, NamespaceDetails *d, const DiskLoc ext, int n,
+ const scoped_array<IndexSpec> &indexSpecs,
+ scoped_array<SortPhaseOne>& phase1, int nidx, bool validate,
+ double pf, int pb)
+ {
+ log() << "compact extent #" << n << endl;
+ unsigned oldObjSize = 0; // we'll report what the old padding was
+ unsigned oldObjSizeWithPadding = 0;
+
+ Extent *e = ext.ext();
+ e->assertOk();
+ assert( e->validates() );
+ unsigned skipped = 0;
+
+ {
+ // the next/prev pointers within the extent might not be in order so we first page the whole thing in
+ // sequentially
+ log() << "compact paging in len=" << e->length/1000000.0 << "MB" << endl;
+ Timer t;
+ MAdvise adv(e, e->length, MAdvise::Sequential);
+ const char *p = (const char *) e;
+ for( int i = 0; i < e->length; i += 4096 ) {
+ faux += p[i];
+ }
+ int ms = t.millis();
+ if( ms > 1000 )
+ log() << "compact end paging in " << ms << "ms " << e->length/1000000.0/ms << "MB/sec" << endl;
+ }
+
+ {
+ log() << "compact copying records" << endl;
+ unsigned totalSize = 0;
+ int nrecs = 0;
+ DiskLoc L = e->firstRecord;
+ if( !L.isNull() ) {
+ while( 1 ) {
+ Record *recOld = L.rec();
+ L = recOld->nextInExtent(L);
+ nrecs++;
+ BSONObj objOld(recOld);
+
+ if( !validate || objOld.valid() ) {
+ unsigned sz = objOld.objsize();
+
+ oldObjSize += sz;
+ oldObjSizeWithPadding += recOld->netLength();
+
+ unsigned lenWHdr = sz + Record::HeaderSize;
+ unsigned lenWPadding = lenWHdr;
+ {
+ lenWPadding = static_cast<unsigned>(pf*lenWPadding);
+ lenWPadding += pb;
+ lenWPadding = lenWPadding & quantizeMask(lenWPadding);
+ if( lenWPadding < lenWHdr || lenWPadding > BSONObjMaxUserSize / 2 ) {
+ lenWPadding = lenWHdr;
+ }
+ }
+ totalSize += lenWPadding;
+ DiskLoc loc = allocateSpaceForANewRecord(ns, d, lenWPadding, false);
+ uassert(14024, "compact error out of space during compaction", !loc.isNull());
+ Record *recNew = loc.rec();
+ recNew = (Record *) getDur().writingPtr(recNew, lenWHdr);
+ addRecordToRecListInExtent(recNew, loc);
+ memcpy(recNew->data, objOld.objdata(), sz);
+
+ {
+ // extract keys for all indexes we will be rebuilding
+ for( int x = 0; x < nidx; x++ ) {
+ phase1[x].addKeys(indexSpecs[x], objOld, loc);
+ }
+ }
+ }
+ else {
+ if( ++skipped <= 10 )
+ log() << "compact skipping invalid object" << endl;
+ }
+
+ if( L.isNull() ) {
+ // we just did the very last record from the old extent. it's still pointed to
+ // by the old extent ext, but that will be fixed below after this loop
+ break;
+ }
+
+ // remove the old records (orphan them) periodically so our commit block doesn't get too large
+ bool stopping = false;
+ RARELY stopping = *killCurrentOp.checkForInterruptNoAssert() != 0;
+ if( stopping || getDur().aCommitIsNeeded() ) {
+ e->firstRecord.writing() = L;
+ Record *r = L.rec();
+ getDur().writingInt(r->prevOfs) = DiskLoc::NullOfs;
+ getDur().commitIfNeeded();
+ killCurrentOp.checkForInterrupt(false);
+ }
+ }
+ } // if !L.isNull()
+
+ assert( d->firstExtent == ext );
+ assert( d->lastExtent != ext );
+ DiskLoc newFirst = e->xnext;
+ d->firstExtent.writing() = newFirst;
+ newFirst.ext()->xprev.writing().Null();
+ getDur().writing(e)->markEmpty();
+ freeExtents(ext,ext);
+ getDur().commitIfNeeded();
+
+ {
+ double op = 1.0;
+ if( oldObjSize )
+ op = static_cast<double>(oldObjSizeWithPadding)/oldObjSize;
+ log() << "compact " << nrecs << " documents " << totalSize/1000000.0 << "MB"
+ << " oldPadding: " << op << ' ' << static_cast<unsigned>(op*100.0)/100
+ << endl;
+ }
+ }
+
+ return skipped;
+ }
+
+ extern SortPhaseOne *precalced;
+
+ bool _compact(const char *ns, NamespaceDetails *d, string& errmsg, bool validate, BSONObjBuilder& result, double pf, int pb) {
+ //int les = d->lastExtentSize;
+
+ // this is a big job, so might as well make things tidy before we start just to be nice.
+ getDur().commitNow();
+
+ list<DiskLoc> extents;
+ for( DiskLoc L = d->firstExtent; !L.isNull(); L = L.ext()->xnext )
+ extents.push_back(L);
+ log() << "compact " << extents.size() << " extents" << endl;
+
+ ProgressMeterHolder pm( cc().curop()->setMessage( "compact extent" , extents.size() ) );
+
+ // same data, but might perform a little different after compact?
+ NamespaceDetailsTransient::get(ns).clearQueryCache();
+
+ int nidx = d->nIndexes;
+ scoped_array<IndexSpec> indexSpecs( new IndexSpec[nidx] );
+ scoped_array<SortPhaseOne> phase1( new SortPhaseOne[nidx] );
+ {
+ NamespaceDetails::IndexIterator ii = d->ii();
+ int x = 0;
+ while( ii.more() ) {
+ BSONObjBuilder b;
+ IndexDetails& idx = ii.next();
+ BSONObj::iterator i(idx.info.obj());
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if( !str::equals(e.fieldName(), "v") && !str::equals(e.fieldName(), "background") ) {
+ b.append(e);
+ }
+ }
+ BSONObj o = b.obj().getOwned();
+ phase1[x].sorter.reset( new BSONObjExternalSorter( idx.idxInterface(), o.getObjectField("key") ) );
+ phase1[x].sorter->hintNumObjects( d->stats.nrecords );
+ indexSpecs[x++].reset(o);
+ }
+ }
+
+ log() << "compact orphan deleted lists" << endl;
+ for( int i = 0; i < Buckets; i++ ) {
+ d->deletedList[i].writing().Null();
+ }
+
+
+
+ // Start over from scratch with our extent sizing and growth
+ d->lastExtentSize=0;
+
+ // before dropping indexes, at least make sure we can allocate one extent!
+ uassert(14025, "compact error no space available to allocate", !allocateSpaceForANewRecord(ns, d, Record::HeaderSize+1, false).isNull());
+
+ // note that the drop indexes call also invalidates all clientcursors for the namespace, which is important and wanted here
+ log() << "compact dropping indexes" << endl;
+ BSONObjBuilder b;
+ if( !dropIndexes(d, ns, "*", errmsg, b, true) ) {
+ errmsg = "compact drop indexes failed";
+ log() << errmsg << endl;
+ return false;
+ }
+
+ getDur().commitNow();
+
+ long long skipped = 0;
+ int n = 0;
+ for( list<DiskLoc>::iterator i = extents.begin(); i != extents.end(); i++ ) {
+ skipped += compactExtent(ns, d, *i, n++, indexSpecs, phase1, nidx, validate, pf, pb);
+ pm.hit();
+ }
+
+ if( skipped ) {
+ result.append("invalidObjects", skipped);
+ }
+
+ assert( d->firstExtent.ext()->xprev.isNull() );
+
+ // indexes will do their own progress meter?
+ pm.finished();
+
+ // build indexes
+ NamespaceString s(ns);
+ string si = s.db + ".system.indexes";
+ for( int i = 0; i < nidx; i++ ) {
+ killCurrentOp.checkForInterrupt(false);
+ BSONObj info = indexSpecs[i].info;
+ log() << "compact create index " << info["key"].Obj().toString() << endl;
+ try {
+ precalced = &phase1[i];
+ theDataFileMgr.insert(si.c_str(), info.objdata(), info.objsize());
+ }
+ catch(...) {
+ precalced = 0;
+ throw;
+ }
+ precalced = 0;
+ }
+
+ return true;
+ }
+
+ bool compact(const string& ns, string &errmsg, bool validate, BSONObjBuilder& result, double pf, int pb) {
+ massert( 14028, "bad ns", NamespaceString::normal(ns.c_str()) );
+ massert( 14027, "can't compact a system namespace", !str::contains(ns, ".system.") ); // items in system.indexes cannot be moved there are pointers to those disklocs in NamespaceDetails
+
+ bool ok;
+ {
+ writelock lk;
+ BackgroundOperation::assertNoBgOpInProgForNs(ns.c_str());
+ Client::Context ctx(ns);
+ NamespaceDetails *d = nsdetails(ns.c_str());
+ massert( 13660, str::stream() << "namespace " << ns << " does not exist", d );
+ massert( 13661, "cannot compact capped collection", !d->capped );
+ log() << "compact " << ns << " begin" << endl;
+ if( pf != 0 || pb != 0 ) {
+ log() << "paddingFactor:" << pf << " paddingBytes:" << pb << endl;
+ }
+ try {
+ ok = _compact(ns.c_str(), d, errmsg, validate, result, pf, pb);
+ }
+ catch(...) {
+ log() << "compact " << ns << " end (with error)" << endl;
+ throw;
+ }
+ log() << "compact " << ns << " end" << endl;
+ }
+ return ok;
+ }
+
+ bool isCurrentlyAReplSetPrimary();
+
+ class CompactCmd : public Command {
+ public:
+ virtual LockType locktype() const { return NONE; }
+ virtual bool adminOnly() const { return false; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool maintenanceMode() const { return true; }
+ virtual bool logTheOp() { return false; }
+ virtual void help( stringstream& help ) const {
+ help << "compact collection\n"
+ "warning: this operation blocks the server and is slow. you can cancel with cancelOp()\n"
+ "{ compact : <collection_name>, [force:true], [validate:true] }\n"
+ " force - allows to run on a replica set primary\n"
+ " validate - check records are noncorrupt before adding to newly compacting extents. slower but safer (default is true in this version)\n";
+ }
+ virtual bool requiresAuth() { return true; }
+ CompactCmd() : Command("compact") { }
+
+ virtual bool run(const string& db, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ string coll = cmdObj.firstElement().valuestr();
+ if( coll.empty() || db.empty() ) {
+ errmsg = "no collection name specified";
+ return false;
+ }
+
+ if( isCurrentlyAReplSetPrimary() && !cmdObj["force"].trueValue() ) {
+ errmsg = "will not run compact on an active replica set primary as this is a slow blocking operation. use force:true to force";
+ return false;
+ }
+
+ string ns = db + '.' + coll;
+ if ( ! NamespaceString::normal(ns.c_str()) ) {
+ errmsg = "bad namespace name";
+ return false;
+ }
+
+ // parameter validation to avoid triggering assertions in compact()
+ if ( str::contains(ns, ".system.") ) {
+ errmsg = "can't compact a system namespace";
+ return false;
+ }
+
+ {
+ writelock lk;
+ Client::Context ctx(ns);
+ NamespaceDetails *d = nsdetails(ns.c_str());
+ if( ! d ) {
+ errmsg = "namespace does not exist";
+ return false;
+ }
+
+ if ( d->capped ) {
+ errmsg = "cannot compact a capped collection";
+ return false;
+ }
+ }
+
+ double pf = 1.0;
+ int pb = 0;
+ if( cmdObj.hasElement("paddingFactor") ) {
+ pf = cmdObj["paddingFactor"].Number();
+ assert( pf >= 1.0 && pf <= 4.0 );
+ }
+ if( cmdObj.hasElement("paddingBytes") ) {
+ pb = (int) cmdObj["paddingBytes"].Number();
+ assert( pb >= 0 && pb <= 1024 * 1024 );
+ }
+
+ bool validate = !cmdObj.hasElement("validate") || cmdObj["validate"].trueValue(); // default is true at the moment
+ bool ok = compact(ns, errmsg, validate, result, pf, pb);
+ return ok;
+ }
+ };
+ static CompactCmd compactCmd;
+
+}
diff --git a/src/mongo/db/compact.h b/src/mongo/db/compact.h
new file mode 100644
index 00000000000..7bf49c8e1b8
--- /dev/null
+++ b/src/mongo/db/compact.h
@@ -0,0 +1,50 @@
+// compact.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+namespace mongo {
+
+ /** for bottom up fastbuildindex (where we presort keys) */
+ struct SortPhaseOne {
+ SortPhaseOne() {
+ n = 0;
+ nkeys = 0;
+ multi = false;
+ }
+ shared_ptr<BSONObjExternalSorter> sorter;
+ unsigned long long n; // # of records
+ unsigned long long nkeys;
+ bool multi; // multikey index
+
+ void addKeys(const IndexSpec& spec, const BSONObj& o, DiskLoc loc) {
+ BSONObjSet keys;
+ spec.getKeys(o, keys);
+ int k = 0;
+ for ( BSONObjSet::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ if( ++k == 2 ) {
+ multi = true;
+ }
+ sorter->add(*i, loc);
+ nkeys++;
+ }
+ n++;
+ }
+ };
+
+}
diff --git a/src/mongo/db/concurrency.h b/src/mongo/db/concurrency.h
new file mode 100644
index 00000000000..33bc0caac77
--- /dev/null
+++ b/src/mongo/db/concurrency.h
@@ -0,0 +1,21 @@
+// @file concurrency.h
+
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "mongomutex.h"
diff --git a/src/mongo/db/curop-inl.h b/src/mongo/db/curop-inl.h
new file mode 100644
index 00000000000..7dd678b185d
--- /dev/null
+++ b/src/mongo/db/curop-inl.h
@@ -0,0 +1 @@
+#include "curop.h"
diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp
new file mode 100644
index 00000000000..3cc452b46cc
--- /dev/null
+++ b/src/mongo/db/curop.cpp
@@ -0,0 +1,173 @@
+/**
+* Copyright (C) 2009 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "curop.h"
+#include "database.h"
+
+namespace mongo {
+
+ // todo : move more here
+
+ CurOp::CurOp( Client * client , CurOp * wrapped ) :
+ _client(client),
+ _wrapped(wrapped)
+ {
+ if ( _wrapped )
+ _client->_curOp = this;
+ _start = _checkpoint = 0;
+ _active = false;
+ _reset();
+ _op = 0;
+ // These addresses should never be written to again. The zeroes are
+ // placed here as a precaution because currentOp may be accessed
+ // without the db mutex.
+ memset(_ns, 0, sizeof(_ns));
+ }
+
+ void CurOp::_reset() {
+ _command = false;
+ _lockType = 0;
+ _dbprofile = 0;
+ _end = 0;
+ _waitingForLock = false;
+ _message = "";
+ _progressMeter.finished();
+ _killed = false;
+ _numYields = 0;
+ }
+
+ void CurOp::reset() {
+ _reset();
+ _start = _checkpoint = 0;
+ _opNum = _nextOpNum++;
+ _ns[0] = 0;
+ _debug.reset();
+ _query.reset();
+ _active = true; // this should be last for ui clarity
+ }
+
+ void CurOp::reset( const HostAndPort& remote, int op ) {
+ reset();
+ if( _remote != remote ) {
+ // todo : _remote is not thread safe yet is used as such!
+ _remote = remote;
+ }
+ _op = op;
+ }
+
+ ProgressMeter& CurOp::setMessage( const char * msg , unsigned long long progressMeterTotal , int secondsBetween ) {
+ if ( progressMeterTotal ) {
+ if ( _progressMeter.isActive() ) {
+ cout << "about to assert, old _message: " << _message << " new message:" << msg << endl;
+ assert( ! _progressMeter.isActive() );
+ }
+ _progressMeter.reset( progressMeterTotal , secondsBetween );
+ }
+ else {
+ _progressMeter.finished();
+ }
+ _message = msg;
+ return _progressMeter;
+ }
+
+
+ BSONObj CurOp::info() {
+ if( ! cc().getAuthenticationInfo()->isAuthorized("admin") ) {
+ BSONObjBuilder b;
+ b.append("err", "unauthorized");
+ return b.obj();
+ }
+ return infoNoauth();
+ }
+
+ CurOp::~CurOp() {
+ if ( _wrapped ) {
+ scoped_lock bl(Client::clientsMutex);
+ _client->_curOp = _wrapped;
+ }
+ _client = 0;
+ }
+
+ void CurOp::enter( Client::Context * context ) {
+ ensureStarted();
+ setNS( context->ns() );
+ _dbprofile = context->_db ? context->_db->profile : 0;
+ }
+
+ void CurOp::leave( Client::Context * context ) {
+ unsigned long long now = curTimeMicros64();
+ Top::global.record( _ns , _op , _lockType , now - _checkpoint , _command );
+ _checkpoint = now;
+ }
+
+ BSONObj CurOp::infoNoauth() {
+ BSONObjBuilder b;
+ b.append("opid", _opNum);
+ bool a = _active && _start;
+ b.append("active", a);
+ if ( _lockType )
+ b.append("lockType" , _lockType > 0 ? "write" : "read" );
+ b.append("waitingForLock" , _waitingForLock );
+
+ if( a ) {
+ b.append("secs_running", elapsedSeconds() );
+ }
+
+ b.append( "op" , opToString( _op ) );
+
+ b.append("ns", _ns);
+
+ _query.append( b , "query" );
+
+ if( !_remote.empty() ) {
+ b.append("client", _remote.toString());
+ }
+
+ if ( _client ) {
+ b.append( "desc" , _client->desc() );
+ if ( _client->_threadId.size() )
+ b.append( "threadId" , _client->_threadId );
+ if ( _client->_connectionId )
+ b.appendNumber( "connectionId" , _client->_connectionId );
+ }
+
+ if ( ! _message.empty() ) {
+ if ( _progressMeter.isActive() ) {
+ StringBuilder buf(128);
+ buf << _message.toString() << " " << _progressMeter.toString();
+ b.append( "msg" , buf.str() );
+ BSONObjBuilder sub( b.subobjStart( "progress" ) );
+ sub.appendNumber( "done" , (long long)_progressMeter.done() );
+ sub.appendNumber( "total" , (long long)_progressMeter.total() );
+ sub.done();
+ }
+ else {
+ b.append( "msg" , _message.toString() );
+ }
+ }
+
+ if( killed() )
+ b.append("killed", true);
+
+ b.append( "numYields" , _numYields );
+
+ return b.obj();
+ }
+
+ AtomicUInt CurOp::_nextOpNum;
+
+}
diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h
new file mode 100644
index 00000000000..192404d8796
--- /dev/null
+++ b/src/mongo/db/curop.h
@@ -0,0 +1,313 @@
+// @file curop.h
+
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#pragma once
+
+#include "namespace-inl.h"
+#include "client.h"
+#include "../bson/util/atomic_int.h"
+#include "../util/concurrency/spin_lock.h"
+#include "../util/time_support.h"
+#include "../util/net/hostandport.h"
+
+namespace mongo {
+
+ class CurOp;
+
+ /* lifespan is different than CurOp because of recursives with DBDirectClient */
+ class OpDebug {
+ public:
+ OpDebug() : ns(""){ reset(); }
+
+ void reset();
+
+ string toString() const;
+ void append( const CurOp& curop, BSONObjBuilder& b ) const;
+
+ // -------------------
+
+ StringBuilder extra; // weird things we need to fix later
+
+ // basic options
+ int op;
+ bool iscommand;
+ Namespace ns;
+ BSONObj query;
+ BSONObj updateobj;
+
+ // detailed options
+ long long cursorid;
+ int ntoreturn;
+ int ntoskip;
+ bool exhaust;
+
+ // debugging/profile info
+ int nscanned;
+ bool idhack; // indicates short circuited code path on an update to make the update faster
+ bool scanAndOrder; // scanandorder query plan aspect was used
+ bool moved; // update resulted in a move (moves are expensive)
+ bool fastmod;
+ bool fastmodinsert; // upsert of an $operation. builds a default object
+ bool upsert; // true if the update actually did an insert
+ int keyUpdates;
+
+ // error handling
+ ExceptionInfo exceptionInfo;
+
+ // response info
+ int executionTime;
+ int nreturned;
+ int responseLength;
+ };
+
+ /**
+ * stores a copy of a bson obj in a fixed size buffer
+ * if its too big for the buffer, says "too big"
+ * useful for keeping a copy around indefinitely without wasting a lot of space or doing malloc
+ */
+ class CachedBSONObj {
+ public:
+ enum { TOO_BIG_SENTINEL = 1 } ;
+ static BSONObj _tooBig; // { $msg : "query not recording (too large)" }
+
+ CachedBSONObj() {
+ _size = (int*)_buf;
+ reset();
+ }
+
+ void reset( int sz = 0 ) {
+ _lock.lock();
+ _reset( sz );
+ _lock.unlock();
+ }
+
+ void set( const BSONObj& o ) {
+ scoped_spinlock lk(_lock);
+ int sz = o.objsize();
+ if ( sz > (int) sizeof(_buf) ) {
+ _reset(TOO_BIG_SENTINEL);
+ }
+ else {
+ memcpy(_buf, o.objdata(), sz );
+ }
+ }
+
+ int size() const { return *_size; }
+ bool have() const { return size() > 0; }
+
+ BSONObj get() const {
+ scoped_spinlock lk(_lock);
+ return _get();
+ }
+
+ void append( BSONObjBuilder& b , const StringData& name ) const {
+ scoped_spinlock lk(_lock);
+ BSONObj temp = _get();
+ b.append( name , temp );
+ }
+
+ private:
+ /** you have to be locked when you call this */
+ BSONObj _get() const {
+ int sz = size();
+ if ( sz == 0 )
+ return BSONObj();
+ if ( sz == TOO_BIG_SENTINEL )
+ return _tooBig;
+ return BSONObj( _buf ).copy();
+ }
+
+ /** you have to be locked when you call this */
+ void _reset( int sz ) { _size[0] = sz; }
+
+ mutable SpinLock _lock;
+ int * _size;
+ char _buf[512];
+ };
+
+ /* Current operation (for the current Client).
+ an embedded member of Client class, and typically used from within the mutex there.
+ */
+ class CurOp : boost::noncopyable {
+ public:
+ CurOp( Client * client , CurOp * wrapped = 0 );
+ ~CurOp();
+
+ bool haveQuery() const { return _query.have(); }
+ BSONObj query() { return _query.get(); }
+ void appendQuery( BSONObjBuilder& b , const StringData& name ) const { _query.append( b , name ); }
+
+ void ensureStarted() {
+ if ( _start == 0 )
+ _start = _checkpoint = curTimeMicros64();
+ }
+ bool isStarted() const { return _start > 0; }
+ void enter( Client::Context * context );
+ void leave( Client::Context * context );
+ void reset();
+ void reset( const HostAndPort& remote, int op );
+ void markCommand() { _command = true; }
+
+ void waitingForLock( int type ) {
+ _waitingForLock = true;
+ if ( type > 0 )
+ _lockType = 1;
+ else
+ _lockType = -1;
+ }
+ void gotLock() { _waitingForLock = false; }
+ OpDebug& debug() { return _debug; }
+ int profileLevel() const { return _dbprofile; }
+ const char * getNS() const { return _ns; }
+
+ bool shouldDBProfile( int ms ) const {
+ if ( _dbprofile <= 0 )
+ return false;
+
+ return _dbprofile >= 2 || ms >= cmdLine.slowMS;
+ }
+
+ AtomicUInt opNum() const { return _opNum; }
+
+ /** if this op is running */
+ bool active() const { return _active; }
+
+ int getLockType() const { return _lockType; }
+ bool isWaitingForLock() const { return _waitingForLock; }
+ int getOp() const { return _op; }
+ unsigned long long startTime() { // micros
+ ensureStarted();
+ return _start;
+ }
+ void done() {
+ _active = false;
+ _end = curTimeMicros64();
+ }
+ unsigned long long totalTimeMicros() {
+ massert( 12601 , "CurOp not marked done yet" , ! _active );
+ return _end - startTime();
+ }
+ int totalTimeMillis() { return (int) (totalTimeMicros() / 1000); }
+ int elapsedMillis() {
+ unsigned long long total = curTimeMicros64() - startTime();
+ return (int) (total / 1000);
+ }
+ int elapsedSeconds() { return elapsedMillis() / 1000; }
+ void setQuery(const BSONObj& query) { _query.set( query ); }
+ Client * getClient() const { return _client; }
+ BSONObj info();
+ BSONObj infoNoauth();
+ string getRemoteString( bool includePort = true ) { return _remote.toString(includePort); }
+ ProgressMeter& setMessage( const char * msg , unsigned long long progressMeterTotal = 0 , int secondsBetween = 3 );
+ string getMessage() const { return _message.toString(); }
+ ProgressMeter& getProgressMeter() { return _progressMeter; }
+ CurOp *parent() const { return _wrapped; }
+ void kill() { _killed = true; }
+ bool killed() const { return _killed; }
+ void yielded() { _numYields++; }
+ void setNS(const char *ns) {
+ strncpy(_ns, ns, Namespace::MaxNsLen);
+ _ns[Namespace::MaxNsLen] = 0;
+ }
+
+ private:
+ friend class Client;
+ void _reset();
+
+ static AtomicUInt _nextOpNum;
+ Client * _client;
+ CurOp * _wrapped;
+ unsigned long long _start;
+ unsigned long long _checkpoint;
+ unsigned long long _end;
+ bool _active;
+ int _op;
+ bool _command;
+ int _lockType; // see concurrency.h for values
+ bool _waitingForLock;
+ int _dbprofile; // 0=off, 1=slow, 2=all
+ AtomicUInt _opNum; // todo: simple being "unsigned" may make more sense here
+ char _ns[Namespace::MaxNsLen+2];
+ HostAndPort _remote; // CAREFUL here with thread safety
+ CachedBSONObj _query; // CachedBSONObj is thread safe
+ OpDebug _debug;
+ ThreadSafeString _message;
+ ProgressMeter _progressMeter;
+ volatile bool _killed;
+ int _numYields;
+ };
+
+ /* _globalKill: we are shutting down
+ otherwise kill attribute set on specified CurOp
+ this class does not handle races between interruptJs and the checkForInterrupt functions - those must be
+ handled by the client of this class
+ */
+ extern class KillCurrentOp {
+ public:
+ void killAll();
+ void kill(AtomicUInt i);
+
+ /** @return true if global interrupt and should terminate the operation */
+ bool globalInterruptCheck() const { return _globalKill; }
+
+ void checkForInterrupt( bool heedMutex = true ) {
+ Client& c = cc();
+ if ( heedMutex && d.dbMutex.isWriteLocked() )
+ return;
+ if( _globalKill )
+ uasserted(11600,"interrupted at shutdown");
+ if( c.curop()->killed() )
+ uasserted(11601,"interrupted");
+ if( c.sometimes(1024) ) {
+ AbstractMessagingPort *p = cc().port();
+ if( p )
+ p->assertStillConnected();
+ }
+ }
+
+ /** @return "" if not interrupted. otherwise, you should stop. */
+ const char *checkForInterruptNoAssert( /*bool heedMutex = true*/ ) {
+ Client& c = cc();
+ // always called withi false so commented out:
+ /*if ( heedMutex && d.dbMutex.isWriteLocked() )
+ return "";*/
+ if( _globalKill )
+ return "interrupted at shutdown";
+ if( c.curop()->killed() )
+ return "interrupted";
+ if( c.sometimes(1024) ) {
+ try {
+ AbstractMessagingPort *p = cc().port();
+ if( p )
+ p->assertStillConnected();
+ }
+ catch(...) {
+ log() << "no longer connected to client";
+ return "no longer connected to client";
+ }
+ }
+ return "";
+ }
+
+ private:
+ void interruptJs( AtomicUInt *op );
+ volatile bool _globalKill;
+ } killCurrentOp;
+
+}
diff --git a/src/mongo/db/cursor.cpp b/src/mongo/db/cursor.cpp
new file mode 100644
index 00000000000..ac7afc1532b
--- /dev/null
+++ b/src/mongo/db/cursor.cpp
@@ -0,0 +1,166 @@
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "pdfile.h"
+#include "curop-inl.h"
+
+namespace mongo {
+
+ bool BasicCursor::advance() {
+ killCurrentOp.checkForInterrupt();
+ if ( eof() ) {
+ if ( tailable_ && !last.isNull() ) {
+ curr = s->next( last );
+ }
+ else {
+ return false;
+ }
+ }
+ else {
+ last = curr;
+ curr = s->next( curr );
+ }
+ incNscanned();
+ return ok();
+ }
+
+ /* these will be used outside of mutexes - really functors - thus the const */
+ class Forward : public AdvanceStrategy {
+ virtual DiskLoc next( const DiskLoc &prev ) const {
+ return prev.rec()->getNext( prev );
+ }
+ } _forward;
+
+ class Reverse : public AdvanceStrategy {
+ virtual DiskLoc next( const DiskLoc &prev ) const {
+ return prev.rec()->getPrev( prev );
+ }
+ } _reverse;
+
+ const AdvanceStrategy *forward() {
+ return &_forward;
+ }
+ const AdvanceStrategy *reverse() {
+ return &_reverse;
+ }
+
+ DiskLoc nextLoop( NamespaceDetails *nsd, const DiskLoc &prev ) {
+ assert( nsd->capLooped() );
+ DiskLoc next = forward()->next( prev );
+ if ( !next.isNull() )
+ return next;
+ return nsd->firstRecord();
+ }
+
+ DiskLoc prevLoop( NamespaceDetails *nsd, const DiskLoc &curr ) {
+ assert( nsd->capLooped() );
+ DiskLoc prev = reverse()->next( curr );
+ if ( !prev.isNull() )
+ return prev;
+ return nsd->lastRecord();
+ }
+
+ ForwardCappedCursor::ForwardCappedCursor( NamespaceDetails *_nsd, const DiskLoc &startLoc ) :
+ nsd( _nsd ) {
+ if ( !nsd )
+ return;
+ DiskLoc start = startLoc;
+ if ( start.isNull() ) {
+ if ( !nsd->capLooped() )
+ start = nsd->firstRecord();
+ else {
+ start = nsd->capExtent.ext()->firstRecord;
+ if ( !start.isNull() && start == nsd->capFirstNewRecord ) {
+ start = nsd->capExtent.ext()->lastRecord;
+ start = nextLoop( nsd, start );
+ }
+ }
+ }
+ curr = start;
+ s = this;
+ incNscanned();
+ }
+
+ DiskLoc ForwardCappedCursor::next( const DiskLoc &prev ) const {
+ assert( nsd );
+ if ( !nsd->capLooped() )
+ return forward()->next( prev );
+
+ DiskLoc i = prev;
+ // Last record
+ if ( i == nsd->capExtent.ext()->lastRecord )
+ return DiskLoc();
+ i = nextLoop( nsd, i );
+ // If we become capFirstNewRecord from same extent, advance to next extent.
+ if ( i == nsd->capFirstNewRecord &&
+ i != nsd->capExtent.ext()->firstRecord )
+ i = nextLoop( nsd, nsd->capExtent.ext()->lastRecord );
+ // If we have just gotten to beginning of capExtent, skip to capFirstNewRecord
+ if ( i == nsd->capExtent.ext()->firstRecord )
+ i = nsd->capFirstNewRecord;
+ return i;
+ }
+
+ ReverseCappedCursor::ReverseCappedCursor( NamespaceDetails *_nsd, const DiskLoc &startLoc ) :
+ nsd( _nsd ) {
+ if ( !nsd )
+ return;
+ DiskLoc start = startLoc;
+ if ( start.isNull() ) {
+ if ( !nsd->capLooped() ) {
+ start = nsd->lastRecord();
+ }
+ else {
+ start = nsd->capExtent.ext()->lastRecord;
+ }
+ }
+ curr = start;
+ s = this;
+ incNscanned();
+ }
+
+ DiskLoc ReverseCappedCursor::next( const DiskLoc &prev ) const {
+ assert( nsd );
+ if ( !nsd->capLooped() )
+ return reverse()->next( prev );
+
+ DiskLoc i = prev;
+ // Last record
+ if ( nsd->capFirstNewRecord == nsd->capExtent.ext()->firstRecord ) {
+ if ( i == nextLoop( nsd, nsd->capExtent.ext()->lastRecord ) ) {
+ return DiskLoc();
+ }
+ }
+ else {
+ if ( i == nsd->capExtent.ext()->firstRecord ) {
+ return DiskLoc();
+ }
+ }
+ // If we are capFirstNewRecord, advance to prev extent, otherwise just get prev.
+ if ( i == nsd->capFirstNewRecord )
+ i = prevLoop( nsd, nsd->capExtent.ext()->firstRecord );
+ else
+ i = prevLoop( nsd, i );
+ // If we just became last in cap extent, advance past capFirstNewRecord
+ // (We know capExtent.ext()->firstRecord != capFirstNewRecord, since would
+ // have returned DiskLoc() earlier otherwise.)
+ if ( i == nsd->capExtent.ext()->lastRecord )
+ i = reverse()->next( nsd->capFirstNewRecord );
+
+ return i;
+ }
+} // namespace mongo
diff --git a/src/mongo/db/cursor.h b/src/mongo/db/cursor.h
new file mode 100644
index 00000000000..8e9e922733d
--- /dev/null
+++ b/src/mongo/db/cursor.h
@@ -0,0 +1,246 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+
+#include "jsobj.h"
+#include "diskloc.h"
+#include "matcher.h"
+
+namespace mongo {
+
+ class NamespaceDetails;
+ class Record;
+ class CoveredIndexMatcher;
+
+ /* Query cursors, base class. This is for our internal cursors. "ClientCursor" is a separate
+ concept and is for the user's cursor.
+
+ WARNING concurrency: the vfunctions below are called back from within a
+ ClientCursor::ccmutex. Don't cause a deadlock, you've been warned.
+ */
+ class Cursor : boost::noncopyable {
+ public:
+ virtual ~Cursor() {}
+ virtual bool ok() = 0;
+ bool eof() { return !ok(); }
+ virtual Record* _current() = 0;
+ virtual BSONObj current() = 0;
+ virtual DiskLoc currLoc() = 0;
+ virtual bool advance() = 0; /*true=ok*/
+ virtual BSONObj currKey() const { return BSONObj(); }
+
+ // DiskLoc the cursor requires for continued operation. Before this
+ // DiskLoc is deleted, the cursor must be incremented or destroyed.
+ virtual DiskLoc refLoc() = 0;
+
+ /* Implement these if you want the cursor to be "tailable" */
+
+ /* Request that the cursor starts tailing after advancing past last record. */
+ /* The implementation may or may not honor this request. */
+ virtual void setTailable() {}
+ /* indicates if tailing is enabled. */
+ virtual bool tailable() {
+ return false;
+ }
+
+ virtual void aboutToDeleteBucket(const DiskLoc& b) { }
+
+ /* optional to implement. if implemented, means 'this' is a prototype */
+ virtual Cursor* clone() {
+ return 0;
+ }
+
+ virtual BSONObj indexKeyPattern() {
+ return BSONObj();
+ }
+
+ virtual bool supportGetMore() = 0;
+
+ /* called after every query block is iterated -- i.e. between getMore() blocks
+ so you can note where we are, if necessary.
+ */
+ virtual void noteLocation() { }
+
+ /* called before query getmore block is iterated */
+ virtual void checkLocation() { }
+
+ /**
+ * Called before a document pointed at by an earlier iterate of this cursor is to be
+ * modified. It is ok if the current iterate also points to the document to be modified.
+ */
+ virtual void prepareToTouchEarlierIterate() { noteLocation(); }
+
+ /** Recover from a previous call to prepareToTouchEarlierIterate(). */
+ virtual void recoverFromTouchingEarlierIterate() { checkLocation(); }
+
+ virtual bool supportYields() = 0;
+
+ /** Called before a ClientCursor yield. */
+ virtual bool prepareToYield() { noteLocation(); return supportYields(); }
+
+ /** Called after a ClientCursor yield. Recovers from a previous call to prepareToYield(). */
+ virtual void recoverFromYield() { checkLocation(); }
+
+ virtual string toString() { return "abstract?"; }
+
+ /* used for multikey index traversal to avoid sending back dups. see Matcher::matches().
+ if a multikey index traversal:
+ if loc has already been sent, returns true.
+ otherwise, marks loc as sent.
+ */
+ virtual bool getsetdup(DiskLoc loc) = 0;
+
+ virtual bool isMultiKey() const = 0;
+
+ virtual bool autoDedup() const { return true; }
+
+ /**
+ * return true if the keys in the index have been modified from the main doc
+ * if you have { a : 1 , b : [ 1 , 2 ] }
+ * an index on { a : 1 } would not be modified
+ * an index on { b : 1 } would be since the values of the array are put in the index
+ * not the array
+ */
+ virtual bool modifiedKeys() const = 0;
+
+ virtual BSONObj prettyIndexBounds() const { return BSONArray(); }
+
+ virtual bool capped() const { return false; }
+
+ virtual long long nscanned() = 0;
+
+ // The implementation may return different matchers depending on the
+ // position of the cursor. If matcher() is nonzero at the start,
+ // matcher() should be checked each time advance() is called.
+ // Implementations which generate their own matcher should return this
+ // to avoid a matcher being set manually.
+ // Note that the return values differ subtly here
+
+ // Used when we want fast matcher lookup
+ virtual CoveredIndexMatcher *matcher() const { return 0; }
+ // Used when we need to share this matcher with someone else
+ virtual shared_ptr< CoveredIndexMatcher > matcherPtr() const { return shared_ptr< CoveredIndexMatcher >(); }
+
+ virtual bool currentMatches( MatchDetails *details = 0 ) {
+ return !matcher() || matcher()->matchesCurrent( this, details );
+ }
+
+ // A convenience function for setting the value of matcher() manually
+ // so it may accessed later. Implementations which must generate
+ // their own matcher() should assert here.
+ virtual void setMatcher( shared_ptr< CoveredIndexMatcher > matcher ) {
+ massert( 13285, "manual matcher config not allowed", false );
+ }
+
+ virtual void explainDetails( BSONObjBuilder& b ) { return; }
+ };
+
+ // strategy object implementing direction of traversal.
+ class AdvanceStrategy {
+ public:
+ virtual ~AdvanceStrategy() { }
+ virtual DiskLoc next( const DiskLoc &prev ) const = 0;
+ };
+
+ const AdvanceStrategy *forward();
+ const AdvanceStrategy *reverse();
+
+ /* table-scan style cursor */
+ class BasicCursor : public Cursor {
+ public:
+ BasicCursor(DiskLoc dl, const AdvanceStrategy *_s = forward()) : curr(dl), s( _s ), _nscanned() {
+ incNscanned();
+ init();
+ }
+ BasicCursor(const AdvanceStrategy *_s = forward()) : s( _s ), _nscanned() {
+ init();
+ }
+ bool ok() { return !curr.isNull(); }
+ Record* _current() {
+ assert( ok() );
+ return curr.rec();
+ }
+ BSONObj current() {
+ Record *r = _current();
+ BSONObj j(r);
+ return j;
+ }
+ virtual DiskLoc currLoc() { return curr; }
+ virtual DiskLoc refLoc() { return curr.isNull() ? last : curr; }
+ bool advance();
+ virtual string toString() { return "BasicCursor"; }
+ virtual void setTailable() {
+ if ( !curr.isNull() || !last.isNull() )
+ tailable_ = true;
+ }
+ virtual bool tailable() { return tailable_; }
+ virtual bool getsetdup(DiskLoc loc) { return false; }
+ virtual bool isMultiKey() const { return false; }
+ virtual bool modifiedKeys() const { return false; }
+ virtual bool supportGetMore() { return true; }
+ virtual bool supportYields() { return true; }
+ virtual CoveredIndexMatcher *matcher() const { return _matcher.get(); }
+ virtual shared_ptr< CoveredIndexMatcher > matcherPtr() const { return _matcher; }
+ virtual void setMatcher( shared_ptr< CoveredIndexMatcher > matcher ) { _matcher = matcher; }
+ virtual long long nscanned() { return _nscanned; }
+
+ protected:
+ DiskLoc curr, last;
+ const AdvanceStrategy *s;
+ void incNscanned() { if ( !curr.isNull() ) { ++_nscanned; } }
+ private:
+ bool tailable_;
+ shared_ptr< CoveredIndexMatcher > _matcher;
+ long long _nscanned;
+ void init() { tailable_ = false; }
+ };
+
+ /* used for order { $natural: -1 } */
+ class ReverseCursor : public BasicCursor {
+ public:
+ ReverseCursor(DiskLoc dl) : BasicCursor( dl, reverse() ) { }
+ ReverseCursor() : BasicCursor( reverse() ) { }
+ virtual string toString() { return "ReverseCursor"; }
+ };
+
+ class ForwardCappedCursor : public BasicCursor, public AdvanceStrategy {
+ public:
+ ForwardCappedCursor( NamespaceDetails *nsd = 0, const DiskLoc &startLoc = DiskLoc() );
+ virtual string toString() {
+ return "ForwardCappedCursor";
+ }
+ virtual DiskLoc next( const DiskLoc &prev ) const;
+ virtual bool capped() const { return true; }
+ private:
+ NamespaceDetails *nsd;
+ };
+
+ class ReverseCappedCursor : public BasicCursor, public AdvanceStrategy {
+ public:
+ ReverseCappedCursor( NamespaceDetails *nsd = 0, const DiskLoc &startLoc = DiskLoc() );
+ virtual string toString() {
+ return "ReverseCappedCursor";
+ }
+ virtual DiskLoc next( const DiskLoc &prev ) const;
+ virtual bool capped() const { return true; }
+ private:
+ NamespaceDetails *nsd;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/d_concurrency.cpp b/src/mongo/db/d_concurrency.cpp
new file mode 100755
index 00000000000..e3ad974cbfc
--- /dev/null
+++ b/src/mongo/db/d_concurrency.cpp
@@ -0,0 +1,231 @@
+// @file d_concurrency.cpp
+
+#include "pch.h"
+#include "d_concurrency.h"
+#include "../util/concurrency/threadlocal.h"
+#include "../util/concurrency/rwlock.h"
+#include "../util/concurrency/value.h"
+#include "../util/assert_util.h"
+#include "client.h"
+#include "namespacestring.h"
+#include "d_globals.h"
+
+// oplog locking
+// no top level read locks
+// system.profile writing
+// oplog now
+// yielding
+// commitIfNeeded
+
+namespace mongo {
+
+ using namespace clcimpl;
+
+ Client::LockStatus::LockStatus() {
+ excluder=global=collection=0;
+ }
+
+ namespace clcimpl {
+ Shared::Shared(unsigned& _state, RWLock& lock) : state(_state) {
+ rw = 0;
+ if( state ) {
+ // already locked
+ dassert( (state & (AcquireShared|AcquireExclusive)) == 0 );
+ return;
+ }
+ rw = &lock;
+ state = AcquireShared;
+ rw->lock_shared();
+ state = LockedShared;
+ }
+ Shared::~Shared() {
+ if( rw ) {
+ state = Unlocked;
+ rw->unlock_shared();
+ }
+ }
+ Exclusive::Exclusive(unsigned& _state, RWLock& lock) : state(_state) {
+ rw = 0;
+ if( state ) {
+ // already locked
+ dassert( (state & (AcquireShared|AcquireExclusive)) == 0 );
+ assert( state == LockedExclusive ); // can't be in shared state
+ return;
+ }
+ rw = &lock;
+ state = AcquireExclusive;
+ rw->lock();
+ state = LockedExclusive;
+ }
+ Exclusive::~Exclusive() {
+ if( rw ) {
+ state = Unlocked;
+ rw->unlock();
+ }
+ }
+ } // clcimpl namespace
+
+ // this tie-in temporary until MongoMutex is folded in more directly.
+ // called when the lock has been achieved
+ void MongoMutex::lockedExclusively() {
+ Client& c = cc();
+ curopGotLock(&c); // hopefully lockStatus replaces one day
+ c.lockStatus.global = clcimpl::LockedExclusive;
+ _minfo.entered(); // hopefully eliminate one day
+ }
+
+ void MongoMutex::unlockingExclusively() {
+ Client& c = cc();
+ _minfo.leaving();
+ c.lockStatus.global = Unlocked;
+ }
+
+ MongoMutex::MongoMutex(const char *name) : _m(name) {
+ static int n = 0;
+ assert( ++n == 1 ); // below releasingWriteLock we assume MongoMutex is a singleton, and uses dbMutex ref above
+ _remapPrivateViewRequested = false;
+ }
+
+ bool subcollectionOf(const string& parent, const char *child) {
+ if( parent == child )
+ return true;
+ if( !str::startsWith(child, parent) )
+ return false;
+ const char *p = child + parent.size();
+ uassert(15963, str::stream() << "bad collection name: " << child, !str::endsWith(p, '.'));
+ return *p == '.' && p[1] == '$';
+ }
+
+ // (maybe tbd) ...
+ // we will use the global write lock for writing to system.* collections for simplicity
+ // for now; this has some advantages in speed as we don't need to latch just for that then;
+ // also there are cases to be handled carefully otherwise such as namespacedetails methods
+ // reaching into system.indexes implicitly
+ // exception : system.profile
+ static bool lkspecial(const string& ns) {
+ NamespaceString s(ns);
+ return s.isSystem() && s.coll != "system.profile";
+ }
+
+ /** Notes on d.writeExcluder
+ we want to be able to block any attempted write while allowing reads; additionally
+ force non-greedy acquisition so that reads can continue --
+ that is, disallow greediness of write lock acquisitions. This is for that purpose. The
+ #1 need is by groupCommitWithLimitedLocks() but useful elsewhere such as for lock and fsync.
+ */
+
+ ExcludeAllWrites::ExcludeAllWrites() :
+ lk(cc().lockStatus.excluder, d.writeExcluder),
+ gslk()
+ {
+ LOG(3) << "ExcludeAllWrites" << endl;
+ wassert( !d.dbMutex.isWriteLocked() );
+ };
+ ExcludeAllWrites::~ExcludeAllWrites() {
+ }
+
+ // CLC turns on the "collection level concurrency" code
+ // (which is under development and not finished)
+#if defined(CLC)
+ // called after a context is set. check that the correct collection is locked
+ void Client::checkLocks() const {
+ DEV {
+ if( !d.dbMutex.isWriteLocked() ) {
+ const char *n = ns();
+ if( lockStatus.whichCollection.empty() ) {
+ log() << "DEBUG checkLocks error expected to already be locked: " << n << endl;
+ dassert(false);
+ }
+ dassert( subcollectionOf(lockStatus.whichCollection, n) || lkspecial(n) );
+ }
+ }
+ }
+#endif
+
+ // we don't keep these locks in the namespacedetailstransient and Database
+ // objects -- that makes things safer as we need not prove to ourselves that they
+ // are always in scope when we need them.
+ // todo: we don't clean these locks up yet.
+ // todo: avoiding the mutex here might be nice.
+ class LockObjectForEachCollection {
+ //mapsf<string,RWLock*> dblocks;
+ mapsf<string,RWLock*> nslocks;
+ public:
+ /*RWLock& fordb(string db) {
+ mapsf<string,RWLock*>::ref r(dblocks);
+ RWLock*& rw = r[db];
+ if( rw == 0 )
+ rw = new RWLock(0);
+ return *rw;
+ }*/
+ RWLock& forns(string ns) {
+ mapsf<string,RWLock*>::ref r(nslocks);
+#if defined(CLC)
+ massert(15964, str::stream() << "bad collection name to lock: " << ns, str::contains(ns, '.'));
+#endif
+ RWLock*& rw = r[ns];
+ if( rw == 0 ) {
+ rw = new RWLock(0);
+ }
+ return *rw;
+ }
+ } theLocks;
+
+#if defined(CLC)
+ LockCollectionForWriting::Locks::Locks(string ns) :
+ excluder(d.writeExcluder),
+ gslk(),
+ clk(theLocks.forns(ns),true)
+ { }
+ LockCollectionForWriting::~LockCollectionForWriting() {
+ if( locks.get() ) {
+ Client::LockStatus& s = cc().lockStatus;
+ s.whichCollection.clear();
+ }
+ }
+ LockCollectionForWriting::LockCollectionForWriting(string coll)
+ {
+ Client::LockStatus& s = cc().lockStatus;
+ LockBits b(s.state);
+ if( !s.whichCollection.empty() ) {
+ if( !subcollectionOf(s.whichCollection, coll.c_str()) ) {
+ massert(15937, str::stream() << "can't nest lock of " << coll << " beneath " << s.whichCollection, false);
+ }
+ if( b.get(LockBits::Collection) != LockBits::Exclusive ) {
+ massert(15938, str::stream() << "want collection write lock but it is already read locked " << s.state, false);
+ }
+ return;
+ }
+ verify(15965, !lkspecial(coll)); // you must global write lock for writes to special's
+ s.whichCollection = coll;
+ b.set(LockBits::Collection, LockBits::NotLocked, LockBits::Exclusive);
+ locks.reset( new Locks(coll) );
+ }
+#endif
+
+ LockCollectionForReading::LockCollectionForReading(string ns) :
+ gslk(),
+ clk( cc().lockStatus.collection, theLocks.forns(ns) )
+ {
+ Client::LockStatus& s = cc().lockStatus;
+ if( s.whichCollection.empty() ) {
+ s.whichCollection = ns;
+ }
+ else {
+ if( !subcollectionOf(s.whichCollection, ns.c_str()) ) {
+ if( lkspecial(ns) )
+ return;
+ massert(15939,
+ str::stream() << "can't nest lock of " << ns << " beneath " << s.whichCollection,
+ false);
+ }
+ }
+ }
+ LockCollectionForReading::~LockCollectionForReading() {
+ if( !clk.recursed() ) {
+ Client::LockStatus& s = cc().lockStatus;
+ s.whichCollection.clear();
+ }
+ }
+
+}
diff --git a/src/mongo/db/d_concurrency.h b/src/mongo/db/d_concurrency.h
new file mode 100644
index 00000000000..ba2f64f5126
--- /dev/null
+++ b/src/mongo/db/d_concurrency.h
@@ -0,0 +1,67 @@
+// @file d_concurrency.h
+
+#pragma once
+
+#include "../util/concurrency/rwlock.h"
+#include "db/mongomutex.h"
+
+namespace mongo {
+
+ namespace clcimpl {
+ enum LockStates { Unlocked, AcquireShared=1, LockedShared=2, AcquireExclusive=4, LockedExclusive=8 };
+ class Shared : boost::noncopyable {
+ unsigned& state;
+ RWLock *rw;
+ public:
+ Shared(unsigned& state, RWLock& lock);
+ ~Shared();
+ bool recursed() const { return rw == 0; }
+ };
+ class Exclusive : boost::noncopyable {
+ unsigned& state;
+ RWLock *rw;
+ public:
+ Exclusive(unsigned& state, RWLock& lock);
+ ~Exclusive();
+ };
+ }
+
+ typedef readlock GlobalSharedLock;
+
+ class ExcludeAllWrites : boost::noncopyable {
+ clcimpl::Exclusive lk;
+ GlobalSharedLock gslk;
+ public:
+ ExcludeAllWrites();
+ ~ExcludeAllWrites();
+ };
+
+ class todoGlobalWriteLock : boost::noncopyable {
+ public:
+ };
+
+ class LockCollectionForReading : boost::noncopyable {
+ GlobalSharedLock gslk;
+ clcimpl::Shared clk;
+ public:
+ LockCollectionForReading(string coll);
+ ~LockCollectionForReading();
+ };
+
+#if defined(CLC)
+ class LockCollectionForWriting : boost::noncopyable {
+ struct Locks {
+ Locks(string ns);
+ SimpleRWLock::Shared excluder;
+ GlobalSharedLock gslk;
+ rwlock clk;
+ };
+ scoped_ptr<Locks> locks;
+ public:
+ LockCollectionForWriting(string db);
+ ~LockCollectionForWriting();
+ };
+#else
+#endif
+
+}
diff --git a/src/mongo/db/d_globals.cpp b/src/mongo/db/d_globals.cpp
new file mode 100644
index 00000000000..7e0fd9e8cb0
--- /dev/null
+++ b/src/mongo/db/d_globals.cpp
@@ -0,0 +1,20 @@
+// @file d_globals.cpp
+
+#include "pch.h"
+#include "d_globals.h"
+#include "../util/concurrency/rwlock.h"
+#include "clientcursor.h"
+#include "mongomutex.h"
+
+namespace mongo {
+
+ DGlobals::DGlobals() :
+ writeExcluder( *(new RWLock("writeexcluder")) ),
+ dbMutex( *(new MongoMutex("dbMutex")) ),
+ clientCursorMonitor( *(new ClientCursorMonitor()) )
+ {
+ }
+
+ DGlobals d;
+
+}
diff --git a/src/mongo/db/d_globals.h b/src/mongo/db/d_globals.h
new file mode 100644
index 00000000000..7c95d463cc3
--- /dev/null
+++ b/src/mongo/db/d_globals.h
@@ -0,0 +1,27 @@
+// @file d_globals.h
+//
+// these are global variables used in mongod ("d"). also used in test binary as that is effectively a variation on mongod code.
+// that is, these are not in mongos.
+//
+
+#pragma once
+
+namespace mongo {
+
+ class RWLock;
+ class MongoMutex;
+ class ClientCursorMonitor;
+
+ struct DGlobals : boost::noncopyable {
+ DGlobals();
+
+ // these are intentionally never deleted:
+ RWLock& writeExcluder;
+ MongoMutex &dbMutex;
+ ClientCursorMonitor& clientCursorMonitor;
+
+ };
+
+ extern DGlobals d;
+
+};
diff --git a/src/mongo/db/database.cpp b/src/mongo/db/database.cpp
new file mode 100644
index 00000000000..2d55fd35626
--- /dev/null
+++ b/src/mongo/db/database.cpp
@@ -0,0 +1,423 @@
+// database.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "pdfile.h"
+#include "database.h"
+#include "instance.h"
+#include "clientcursor.h"
+#include "databaseholder.h"
+
+namespace mongo {
+
+ bool Database::_openAllFiles = true;
+
+ void assertDbAtLeastReadLocked(const Database *) {
+ // temp impl
+ d.dbMutex.assertAtLeastReadLocked();
+ }
+
+ void assertDbWriteLocked(const Database *) {
+ // temp impl
+ d.dbMutex.assertWriteLocked();
+ }
+
+ Database::~Database() {
+ d.dbMutex.assertWriteLocked();
+ magic = 0;
+ size_t n = _files.size();
+ for ( size_t i = 0; i < n; i++ )
+ delete _files[i];
+ if( ccByLoc.size() ) {
+ log() << "\n\n\nWARNING: ccByLoc not empty on database close! " << ccByLoc.size() << ' ' << name << endl;
+ }
+ }
+
+ Database::Database(const char *nm, bool& newDb, const string& _path )
+ : name(nm), path(_path), namespaceIndex( path, name ),
+ profileName(name + ".system.profile")
+ {
+ try {
+ {
+ // check db name is valid
+ size_t L = strlen(nm);
+ uassert( 10028 , "db name is empty", L > 0 );
+ uassert( 10032 , "db name too long", L < 64 );
+ uassert( 10029 , "bad db name [1]", *nm != '.' );
+ uassert( 10030 , "bad db name [2]", nm[L-1] != '.' );
+ uassert( 10031 , "bad char(s) in db name", strchr(nm, ' ') == 0 );
+ }
+ newDb = namespaceIndex.exists();
+ profile = cmdLine.defaultProfile;
+ checkDuplicateUncasedNames(true);
+ // If already exists, open. Otherwise behave as if empty until
+ // there's a write, then open.
+ if ( ! newDb || cmdLine.defaultProfile ) {
+ namespaceIndex.init();
+ if( _openAllFiles )
+ openAllFiles();
+ }
+ magic = 781231;
+ } catch(std::exception& e) {
+ log() << "warning database " << path << ' ' << nm << " could not be opened" << endl;
+ log() << e.what() << endl;
+ // since destructor won't be called:
+ for ( size_t i = 0; i < _files.size(); i++ )
+ delete _files[i];
+ throw;
+ }
+ }
+
+ void Database::checkDuplicateUncasedNames(bool inholderlock) const {
+ string duplicate = duplicateUncasedName(inholderlock, name, path );
+ if ( !duplicate.empty() ) {
+ stringstream ss;
+ ss << "db already exists with different case other: [" << duplicate << "] me [" << name << "]";
+ uasserted( DatabaseDifferCaseCode , ss.str() );
+ }
+ }
+
+ /*static*/
+ string Database::duplicateUncasedName( bool inholderlock, const string &name, const string &path, set< string > *duplicates ) {
+ d.dbMutex.assertAtLeastReadLocked();
+
+ if ( duplicates ) {
+ duplicates->clear();
+ }
+
+ vector<string> others;
+ getDatabaseNames( others , path );
+
+ set<string> allShortNames;
+ dbHolder().getAllShortNames( inholderlock, allShortNames );
+
+ others.insert( others.end(), allShortNames.begin(), allShortNames.end() );
+
+ for ( unsigned i=0; i<others.size(); i++ ) {
+
+ if ( strcasecmp( others[i].c_str() , name.c_str() ) )
+ continue;
+
+ if ( strcmp( others[i].c_str() , name.c_str() ) == 0 )
+ continue;
+
+ if ( duplicates ) {
+ duplicates->insert( others[i] );
+ } else {
+ return others[i];
+ }
+ }
+ if ( duplicates ) {
+ return duplicates->empty() ? "" : *duplicates->begin();
+ }
+ return "";
+ }
+
+ boost::filesystem::path Database::fileName( int n ) const {
+ stringstream ss;
+ ss << name << '.' << n;
+ boost::filesystem::path fullName;
+ fullName = boost::filesystem::path(path);
+ if ( directoryperdb )
+ fullName /= name;
+ fullName /= ss.str();
+ return fullName;
+ }
+
+ bool Database::openExistingFile( int n ) {
+ assert(this);
+ d.dbMutex.assertWriteLocked();
+ {
+ // must not yet be visible to others as we aren't in the db's write lock and
+ // we will write to _files vector - thus this assert.
+ bool loaded = dbHolder().__isLoaded(name, path);
+ assert( !loaded );
+ }
+ // additionally must be in the dbholder mutex (no assert for that yet)
+
+ // todo: why here? that could be bad as we may be read locked only here
+ namespaceIndex.init();
+
+ if ( n < 0 || n >= DiskLoc::MaxFiles ) {
+ massert( 15924 , str::stream() << "getFile(): bad file number value " << n << " (corrupt db?): run repair", false);
+ }
+
+ {
+ if( n < (int) _files.size() && _files[n] ) {
+ dlog(2) << "openExistingFile " << n << " is already open" << endl;
+ return true;
+ }
+ }
+
+ {
+ boost::filesystem::path fullName = fileName( n );
+ string fullNameString = fullName.string();
+ MongoDataFile *df = new MongoDataFile(n);
+ try {
+ if( !df->openExisting( fullNameString.c_str() ) ) {
+ delete df;
+ return false;
+ }
+ }
+ catch ( AssertionException& ) {
+ delete df;
+ throw;
+ }
+ while ( n >= (int) _files.size() ) {
+ _files.push_back(0);
+ }
+ _files[n] = df;
+ }
+
+ return true;
+ }
+
+ // todo : we stop once a datafile dne.
+ // if one datafile were missing we should keep going for
+ // repair purposes yet we do not.
+ void Database::openAllFiles() {
+ //log() << "TEMP openallfiles " << path << ' ' << name << endl;
+ assert(this);
+ int n = 0;
+ while( openExistingFile(n) ) {
+ n++;
+ }
+
+ /*
+ int n = 0;
+ while( exists(n) ) {
+ getFile(n);
+ n++;
+ }
+ // If last file is empty, consider it preallocated and make sure it's not mapped
+ // until a write is requested
+ if ( n > 1 && getFile( n - 1 )->getHeader()->isEmpty() ) {
+ delete _files[ n - 1 ];
+ _files.pop_back();
+ }*/
+ }
+
+ // todo: this is called a lot. streamline the common case
+ MongoDataFile* Database::getFile( int n, int sizeNeeded , bool preallocateOnly) {
+ assert(this);
+ DEV assertDbAtLeastReadLocked(this);
+
+ namespaceIndex.init();
+ if ( n < 0 || n >= DiskLoc::MaxFiles ) {
+ out() << "getFile(): n=" << n << endl;
+ massert( 10295 , "getFile(): bad file number value (corrupt db?): run repair", false);
+ }
+ DEV {
+ if ( n > 100 ) {
+ out() << "getFile(): n=" << n << endl;
+ }
+ }
+ MongoDataFile* p = 0;
+ if ( !preallocateOnly ) {
+ while ( n >= (int) _files.size() ) {
+ DEV if( !d.dbMutex.isWriteLocked() ) {
+ log() << "error: getFile() called in a read lock, yet file to return is not yet open" << endl;
+ log() << " getFile(" << n << ") _files.size:" <<_files.size() << ' ' << fileName(n).string() << endl;
+ log() << " context ns: " << cc().ns() << " openallfiles:" << _openAllFiles << endl;
+ }
+ assertDbWriteLocked(this);
+ _files.push_back(0);
+ }
+ p = _files[n];
+ }
+ if ( p == 0 ) {
+ assertDbWriteLocked(this);
+ boost::filesystem::path fullName = fileName( n );
+ string fullNameString = fullName.string();
+ p = new MongoDataFile(n);
+ int minSize = 0;
+ if ( n != 0 && _files[ n - 1 ] )
+ minSize = _files[ n - 1 ]->getHeader()->fileLength;
+ if ( sizeNeeded + DataFileHeader::HeaderSize > minSize )
+ minSize = sizeNeeded + DataFileHeader::HeaderSize;
+ try {
+ p->open( fullNameString.c_str(), minSize, preallocateOnly );
+ }
+ catch ( AssertionException& ) {
+ delete p;
+ throw;
+ }
+ if ( preallocateOnly )
+ delete p;
+ else
+ _files[n] = p;
+ }
+ return preallocateOnly ? 0 : p;
+ }
+
+ MongoDataFile* Database::addAFile( int sizeNeeded, bool preallocateNextFile ) {
+ assertDbWriteLocked(this);
+ int n = (int) _files.size();
+ MongoDataFile *ret = getFile( n, sizeNeeded );
+ if ( preallocateNextFile )
+ preallocateAFile();
+ return ret;
+ }
+
+ bool fileIndexExceedsQuota( const char *ns, int fileIndex, bool enforceQuota ) {
+ return
+ cmdLine.quota &&
+ enforceQuota &&
+ fileIndex >= cmdLine.quotaFiles &&
+ // we don't enforce the quota on "special" namespaces as that could lead to problems -- e.g.
+ // rejecting an index insert after inserting the main record.
+ !NamespaceString::special( ns ) &&
+ NamespaceString( ns ).db != "local";
+ }
+
+ MongoDataFile* Database::suitableFile( const char *ns, int sizeNeeded, bool preallocate, bool enforceQuota ) {
+
+ // check existing files
+ for ( int i=numFiles()-1; i>=0; i-- ) {
+ MongoDataFile* f = getFile( i );
+ if ( f->getHeader()->unusedLength >= sizeNeeded ) {
+ if ( fileIndexExceedsQuota( ns, i-1, enforceQuota ) ) // NOTE i-1 is the value used historically for this check.
+ ;
+ else
+ return f;
+ }
+ }
+
+ if ( fileIndexExceedsQuota( ns, numFiles(), enforceQuota ) )
+ uasserted(12501, "quota exceeded");
+
+ // allocate files until we either get one big enough or hit maxSize
+ for ( int i = 0; i < 8; i++ ) {
+ MongoDataFile* f = addAFile( sizeNeeded, preallocate );
+
+ if ( f->getHeader()->unusedLength >= sizeNeeded )
+ return f;
+
+ if ( f->getHeader()->fileLength >= MongoDataFile::maxSize() ) // this is as big as they get so might as well stop
+ return f;
+ }
+
+ uasserted(14810, "couldn't allocate space (suitableFile)"); // callers don't check for null return code
+ return 0;
+ }
+
+ MongoDataFile* Database::newestFile() {
+ int n = numFiles();
+ if ( n == 0 )
+ return 0;
+ return getFile(n-1);
+ }
+
+
+ Extent* Database::allocExtent( const char *ns, int size, bool capped, bool enforceQuota ) {
+ // todo: when profiling, these may be worth logging into profile collection
+ bool fromFreeList = true;
+ Extent *e = DataFileMgr::allocFromFreeList( ns, size, capped );
+ if( e == 0 ) {
+ fromFreeList = false;
+ e = suitableFile( ns, size, !capped, enforceQuota )->createExtent( ns, size, capped );
+ }
+ LOG(1) << "allocExtent " << ns << " size " << size << ' ' << fromFreeList << endl;
+ return e;
+ }
+
+
+ bool Database::setProfilingLevel( int newLevel , string& errmsg ) {
+ if ( profile == newLevel )
+ return true;
+
+ if ( newLevel < 0 || newLevel > 2 ) {
+ errmsg = "profiling level has to be >=0 and <= 2";
+ return false;
+ }
+
+ if ( newLevel == 0 ) {
+ profile = 0;
+ return true;
+ }
+
+ assert( cc().database() == this );
+
+ if ( ! namespaceIndex.details( profileName.c_str() ) ) {
+ log() << "creating profile collection: " << profileName << endl;
+ BSONObjBuilder spec;
+ spec.appendBool( "capped", true );
+ spec.append( "size", 1024*1024 );
+ if ( ! userCreateNS( profileName.c_str(), spec.done(), errmsg , false /* we don't replica profile messages */ ) ) {
+ return false;
+ }
+ }
+ profile = newLevel;
+ return true;
+ }
+
+ bool Database::exists(int n) const {
+ return boost::filesystem::exists( fileName( n ) );
+ }
+
+ int Database::numFiles() const {
+ DEV assertDbAtLeastReadLocked(this);
+ return (int) _files.size();
+ }
+
+ void Database::flushFiles( bool sync ) {
+ assertDbAtLeastReadLocked(this);
+ for( vector<MongoDataFile*>::iterator i = _files.begin(); i != _files.end(); i++ ) {
+ MongoDataFile *f = *i;
+ f->flush(sync);
+ }
+ }
+
+ long long Database::fileSize() const {
+ long long size=0;
+ for (int n=0; exists(n); n++)
+ size += boost::filesystem::file_size( fileName(n) );
+ return size;
+ }
+
+ Database* DatabaseHolder::getOrCreate( const string& ns , const string& path , bool& justCreated ) {
+ d.dbMutex.assertAtLeastReadLocked();
+
+ DBs& m = _paths[path];
+
+ string dbname = _todb( ns );
+
+ {
+ DBs::iterator i = m.find(dbname);
+ if( i != m.end() ) {
+ justCreated = false;
+ return i->second;
+ }
+ }
+
+ // todo: protect against getting sprayed with requests for different db names that DNE -
+ // that would make the DBs map very large. not clear what to do to handle though,
+ // perhaps just log it, which is what we do here with the "> 40" :
+ bool cant = !d.dbMutex.isWriteLocked();
+ if( logLevel >= 1 || m.size() > 40 || cant || DEBUG_BUILD ) {
+ log() << "opening db: " << (path==dbpath?"":path) << ' ' << dbname << endl;
+ }
+ massert(15927, "can't open database in a read lock. if db was just closed, consider retrying the query. might otherwise indicate an internal error", !cant);
+
+ Database *db = new Database( dbname.c_str() , justCreated , path );
+ m[dbname] = db;
+ _size++;
+ return db;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/database.h b/src/mongo/db/database.h
new file mode 100644
index 00000000000..a7867e20e8c
--- /dev/null
+++ b/src/mongo/db/database.h
@@ -0,0 +1,145 @@
+// database.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "cmdline.h"
+#include "namespace.h"
+
+namespace mongo {
+
+ class Extent;
+ class MongoDataFile;
+ class ClientCursor;
+ struct ByLocKey;
+ typedef map<ByLocKey, ClientCursor*> CCByLoc;
+
+ /**
+ * Database represents a database database
+ * Each database database has its own set of files -- dbname.ns, dbname.0, dbname.1, ...
+ * NOT memory mapped
+ */
+ class Database {
+ public:
+ static bool _openAllFiles;
+
+ // you probably need to be in dbHolderMutex when constructing this
+ Database(const char *nm, /*out*/ bool& newDb, const string& _path = dbpath);
+ private:
+ ~Database(); // closes files and other cleanup see below.
+ public:
+ /* you must use this to close - there is essential code in this method that is not in the ~Database destructor.
+ thus the destructor is private. this could be cleaned up one day...
+ */
+ static void closeDatabase( const char *db, const string& path );
+
+ void openAllFiles();
+
+ /**
+ * tries to make sure that this hasn't been deleted
+ */
+ bool isOk() const { return magic == 781231; }
+
+ bool isEmpty() { return ! namespaceIndex.allocated(); }
+
+ /**
+ * total file size of Database in bytes
+ */
+ long long fileSize() const;
+
+ int numFiles() const;
+
+ /**
+ * returns file valid for file number n
+ */
+ boost::filesystem::path fileName( int n ) const;
+
+ private:
+ bool exists(int n) const;
+ bool openExistingFile( int n );
+
+ public:
+ /**
+ * return file n. if it doesn't exist, create it
+ */
+ MongoDataFile* getFile( int n, int sizeNeeded = 0, bool preallocateOnly = false );
+
+ MongoDataFile* addAFile( int sizeNeeded, bool preallocateNextFile );
+
+ /**
+ * makes sure we have an extra file at the end that is empty
+ * safe to call this multiple times - the implementation will only preallocate one file
+ */
+ void preallocateAFile() { getFile( numFiles() , 0, true ); }
+
+ MongoDataFile* suitableFile( const char *ns, int sizeNeeded, bool preallocate, bool enforceQuota );
+
+ Extent* allocExtent( const char *ns, int size, bool capped, bool enforceQuota );
+
+ MongoDataFile* newestFile();
+
+ /**
+ * @return true if success. false if bad level or error creating profile ns
+ */
+ bool setProfilingLevel( int newLevel , string& errmsg );
+
+ void flushFiles( bool sync );
+
+ /**
+ * @return true if ns is part of the database
+ * ns=foo.bar, db=foo returns true
+ */
+ bool ownsNS( const string& ns ) const {
+ if ( ! startsWith( ns , name ) )
+ return false;
+ return ns[name.size()] == '.';
+ }
+ private:
+ /**
+ * @throws DatabaseDifferCaseCode if the name is a duplicate based on
+ * case insensitive matching.
+ */
+ void checkDuplicateUncasedNames(bool inholderlockalready) const;
+ public:
+ /**
+ * @return name of an existing database with same text name but different
+ * casing, if one exists. Otherwise the empty string is returned. If
+ * 'duplicates' is specified, it is filled with all duplicate names.
+ */
+ static string duplicateUncasedName( bool inholderlockalready, const string &name, const string &path, set< string > *duplicates = 0 );
+
+ const string name; // "alleyinsider"
+ const string path;
+
+ private:
+
+ // must be in the dbLock when touching this (and write locked when writing to of course)
+ // however during Database object construction we aren't, which is ok as it isn't yet visible
+ // to others and we are in the dbholder lock then.
+ vector<MongoDataFile*> _files;
+
+ public: // this should be private later
+
+ NamespaceIndex namespaceIndex;
+ int profile; // 0=off.
+ const string profileName; // "alleyinsider.system.profile"
+ CCByLoc ccByLoc;
+ int magic; // used for making sure the object is still loaded in memory
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/databaseholder.h b/src/mongo/db/databaseholder.h
new file mode 100644
index 00000000000..7c878c4ed63
--- /dev/null
+++ b/src/mongo/db/databaseholder.h
@@ -0,0 +1,126 @@
+// @file databaseholder.h
+
+#pragma once
+
+namespace mongo {
+
+ /**
+ * path + dbname -> Database
+ */
+ class DatabaseHolder {
+ typedef map<string,Database*> DBs;
+ typedef map<string,DBs> Paths;
+ public:
+ DatabaseHolder() : _size(0) { }
+
+ bool __isLoaded( const string& ns , const string& path ) const {
+ Paths::const_iterator x = _paths.find( path );
+ if ( x == _paths.end() )
+ return false;
+ const DBs& m = x->second;
+
+ string db = _todb( ns );
+
+ DBs::const_iterator it = m.find(db);
+ return it != m.end();
+ }
+ // must be write locked as otherwise isLoaded could go false->true on you
+ // in the background and you might not expect that.
+ bool _isLoaded( const string& ns , const string& path ) const {
+ d.dbMutex.assertWriteLocked();
+ return __isLoaded(ns,path);
+ }
+
+ Database * get( const string& ns , const string& path ) const {
+ d.dbMutex.assertAtLeastReadLocked();
+ Paths::const_iterator x = _paths.find( path );
+ if ( x == _paths.end() )
+ return 0;
+ const DBs& m = x->second;
+ string db = _todb( ns );
+ DBs::const_iterator it = m.find(db);
+ if ( it != m.end() )
+ return it->second;
+ return 0;
+ }
+
+ void _put( const string& ns , const string& path , Database * db ) {
+ d.dbMutex.assertAtLeastReadLocked();
+ DBs& m = _paths[path];
+ Database*& d = m[_todb(ns)];
+ if( d ) {
+ dlog(2) << "info dbholder put db was already set " << ns << endl;
+ }
+ else {
+ _size++;
+ }
+ d = db;
+ }
+
+ Database* getOrCreate( const string& ns , const string& path , bool& justCreated );
+
+ void erase( const string& ns , const string& path ) {
+ d.dbMutex.assertWriteLocked(); // write lock req'd as a Database obj can be in use dbHolderMutex is mainly just to control the holder itself
+ DBs& m = _paths[path];
+ _size -= (int)m.erase( _todb( ns ) );
+ }
+
+ /** @param force - force close even if something underway - use at shutdown */
+ bool closeAll( const string& path , BSONObjBuilder& result, bool force );
+
+ // "info" as this is informational only could change on you if you are not write locked
+ int sizeInfo() const { return _size; }
+
+ void forEach(boost::function<void(Database *)> f) const {
+ d.dbMutex.assertWriteLocked();
+ for ( Paths::const_iterator i=_paths.begin(); i!=_paths.end(); i++ ) {
+ DBs m = i->second;
+ for( DBs::const_iterator j=m.begin(); j!=m.end(); j++ ) {
+ f(j->second);
+ }
+ }
+ }
+
+ /**
+ * gets all unique db names, ignoring paths
+ */
+ void getAllShortNames( bool locked, set<string>& all ) const {
+ d.dbMutex.assertAtLeastReadLocked();
+ for ( Paths::const_iterator i=_paths.begin(); i!=_paths.end(); i++ ) {
+ DBs m = i->second;
+ for( DBs::const_iterator j=m.begin(); j!=m.end(); j++ ) {
+ all.insert( j->first );
+ }
+ }
+ }
+
+ private:
+ static string _todb( const string& ns ) {
+ string d = __todb( ns );
+ uassert( 13280 , (string)"invalid db name: " + ns , NamespaceString::validDBName( d ) );
+ return d;
+ }
+ static string __todb( const string& ns ) {
+ size_t i = ns.find( '.' );
+ if ( i == string::npos ) {
+ uassert( 13074 , "db name can't be empty" , ns.size() );
+ return ns;
+ }
+ uassert( 13075 , "db name can't be empty" , i > 0 );
+ return ns.substr( 0 , i );
+ }
+ Paths _paths;
+ int _size;
+ };
+
+ DatabaseHolder& dbHolderUnchecked();
+ inline const DatabaseHolder& dbHolder() {
+ dassert( d.dbMutex.atLeastReadLocked() );
+ return dbHolderUnchecked();
+ }
+ inline DatabaseHolder& dbHolderW() {
+ dassert( d.dbMutex.isWriteLocked() );
+ return dbHolderUnchecked();
+ }
+
+}
diff --git a/src/mongo/db/db.cpp b/src/mongo/db/db.cpp
new file mode 100644
index 00000000000..af03b447976
--- /dev/null
+++ b/src/mongo/db/db.cpp
@@ -0,0 +1,1309 @@
+// @file db.cpp : Defines main() for the mongod program.
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "db.h"
+#include "introspect.h"
+#include "repl.h"
+#include "../util/unittest.h"
+#include "../util/file_allocator.h"
+#include "../util/background.h"
+#include "../util/text.h"
+#include "dbmessage.h"
+#include "instance.h"
+#include "clientcursor.h"
+#include "pdfile.h"
+#include "stats/counters.h"
+#include "repl/rs.h"
+#include "../scripting/engine.h"
+#include "module.h"
+#include "cmdline.h"
+#include "stats/snapshots.h"
+#include "../util/concurrency/task.h"
+#include "../util/version.h"
+#include "../util/ramlog.h"
+#include "../util/net/message_server.h"
+#include "client.h"
+#include "restapi.h"
+#include "dbwebserver.h"
+#include "dur.h"
+#include "concurrency.h"
+#include "../s/d_writeback.h"
+#include "d_globals.h"
+
+#if defined(_WIN32)
+# include "../util/ntservice.h"
+#else
+# include <sys/file.h>
+#endif
+
+namespace mongo {
+
+ namespace dur {
+ extern unsigned long long DataLimitPerJournalFile;
+ }
+
+ /* only off if --nocursors which is for debugging. */
+ extern bool useCursors;
+
+ /* only off if --nohints */
+ extern bool useHints;
+
+ extern int diagLogging;
+ extern unsigned lenForNewNsFiles;
+ extern int lockFile;
+ extern bool checkNsFilesOnLoad;
+ extern string repairpath;
+
+ void setupSignals( bool inFork );
+ void startReplication();
+ void exitCleanly( ExitCode code );
+
+ CmdLine cmdLine;
+ static bool scriptingEnabled = true;
+ bool noHttpInterface = false;
+ bool shouldRepairDatabases = 0;
+ static bool forceRepair = 0;
+ Timer startupSrandTimer;
+
+ const char *ourgetns() {
+ Client *c = currentClient.get();
+ if ( ! c )
+ return "";
+ Client::Context* cc = c->getContext();
+ return cc ? cc->ns() : "";
+ }
+
+ struct MyStartupTests {
+ MyStartupTests() {
+ assert( sizeof(OID) == 12 );
+ }
+ } mystartupdbcpp;
+
+ QueryResult* emptyMoreResult(long long);
+
+
+ /* todo: make this a real test. the stuff in dbtests/ seem to do all dbdirectclient which exhaust doesn't support yet. */
+// QueryOption_Exhaust
+#define TESTEXHAUST 0
+#if( TESTEXHAUST )
+ void testExhaust() {
+ sleepsecs(1);
+ unsigned n = 0;
+ auto f = [&n](const BSONObj& o) {
+ assert( o.valid() );
+ //cout << o << endl;
+ n++;
+ bool testClosingSocketOnError = false;
+ if( testClosingSocketOnError )
+ assert(false);
+ };
+ DBClientConnection db(false);
+ db.connect("localhost");
+ const char *ns = "local.foo";
+ if( db.count(ns) < 10000 )
+ for( int i = 0; i < 20000; i++ )
+ db.insert(ns, BSON("aaa" << 3 << "b" << "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"));
+
+ try {
+ db.query(f, ns, Query() );
+ }
+ catch(...) {
+ cout << "hmmm" << endl;
+ }
+
+ try {
+ db.query(f, ns, Query() );
+ }
+ catch(...) {
+ cout << "caught" << endl;
+ }
+
+ cout << n << endl;
+ };
+#endif
+
+ void sysRuntimeInfo() {
+ out() << "sysinfo:" << endl;
+#if defined(_SC_PAGE_SIZE)
+ out() << " page size: " << (int) sysconf(_SC_PAGE_SIZE) << endl;
+#endif
+#if defined(_SC_PHYS_PAGES)
+ out() << " _SC_PHYS_PAGES: " << sysconf(_SC_PHYS_PAGES) << endl;
+#endif
+#if defined(_SC_AVPHYS_PAGES)
+ out() << " _SC_AVPHYS_PAGES: " << sysconf(_SC_AVPHYS_PAGES) << endl;
+#endif
+ }
+
+ /* if server is really busy, wait a bit */
+ void beNice() {
+ sleepmicros( Client::recommendedYieldMicros() );
+ }
+
+ class MyMessageHandler : public MessageHandler {
+ public:
+ virtual void connected( AbstractMessagingPort* p ) {
+ Client& c = Client::initThread("conn", p);
+ c.getAuthenticationInfo()->isLocalHost = p->remote().isLocalHost();
+ }
+
+ virtual void process( Message& m , AbstractMessagingPort* port , LastError * le) {
+ while ( true ) {
+ if ( inShutdown() ) {
+ log() << "got request after shutdown()" << endl;
+ break;
+ }
+
+ lastError.startRequest( m , le );
+
+ DbResponse dbresponse;
+ assembleResponse( m, dbresponse, port->remote() );
+
+ if ( dbresponse.response ) {
+ port->reply(m, *dbresponse.response, dbresponse.responseTo);
+ if( dbresponse.exhaust ) {
+ MsgData *header = dbresponse.response->header();
+ QueryResult *qr = (QueryResult *) header;
+ long long cursorid = qr->cursorId;
+ if( cursorid ) {
+ assert( dbresponse.exhaust && *dbresponse.exhaust != 0 );
+ string ns = dbresponse.exhaust; // before reset() free's it...
+ m.reset();
+ BufBuilder b(512);
+ b.appendNum((int) 0 /*size set later in appendData()*/);
+ b.appendNum(header->id);
+ b.appendNum(header->responseTo);
+ b.appendNum((int) dbGetMore);
+ b.appendNum((int) 0);
+ b.appendStr(ns);
+ b.appendNum((int) 0); // ntoreturn
+ b.appendNum(cursorid);
+ m.appendData(b.buf(), b.len());
+ b.decouple();
+ DEV log() << "exhaust=true sending more" << endl;
+ beNice();
+ continue; // this goes back to top loop
+ }
+ }
+ }
+ break;
+ }
+ }
+
+ virtual void disconnected( AbstractMessagingPort* p ) {
+ Client * c = currentClient.get();
+ if( c ) c->shutdown();
+ globalScriptEngine->threadDone();
+ }
+
+ };
+
+ void listen(int port) {
+ //testTheDb();
+ MessageServer::Options options;
+ options.port = port;
+ options.ipList = cmdLine.bind_ip;
+
+ MessageServer * server = createServer( options , new MyMessageHandler() );
+ server->setAsTimeTracker();
+
+ startReplication();
+ if ( !noHttpInterface )
+ boost::thread web( boost::bind(&webServerThread, new RestAdminAccess() /* takes ownership */));
+
+#if(TESTEXHAUST)
+ boost::thread thr(testExhaust);
+#endif
+ server->run();
+ }
+
+
+ bool doDBUpgrade( const string& dbName , string errmsg , DataFileHeader * h ) {
+ static DBDirectClient db;
+
+ if ( h->version == 4 && h->versionMinor == 4 ) {
+ assert( PDFILE_VERSION == 4 );
+ assert( PDFILE_VERSION_MINOR == 5 );
+
+ list<string> colls = db.getCollectionNames( dbName );
+ for ( list<string>::iterator i=colls.begin(); i!=colls.end(); i++) {
+ string c = *i;
+ log() << "\t upgrading collection:" << c << endl;
+ BSONObj out;
+ bool ok = db.runCommand( dbName , BSON( "reIndex" << c.substr( dbName.size() + 1 ) ) , out );
+ if ( ! ok ) {
+ errmsg = "reindex failed";
+ log() << "\t\t reindex failed: " << out << endl;
+ return false;
+ }
+ }
+
+ h->versionMinor = 5;
+ return true;
+ }
+
+ // do this in the general case
+ return repairDatabase( dbName.c_str(), errmsg );
+ }
+
+ // ran at startup.
+ static void repairDatabasesAndCheckVersion() {
+ // LastError * le = lastError.get( true );
+ Client::GodScope gs;
+ log(1) << "enter repairDatabases (to check pdfile version #)" << endl;
+
+ //assert(checkNsFilesOnLoad);
+ checkNsFilesOnLoad = false; // we are mainly just checking the header - don't scan the whole .ns file for every db here.
+
+ dblock lk;
+ vector< string > dbNames;
+ getDatabaseNames( dbNames );
+ for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) {
+ string dbName = *i;
+ log(1) << "\t" << dbName << endl;
+ Client::Context ctx( dbName );
+ MongoDataFile *p = cc().database()->getFile( 0 );
+ DataFileHeader *h = p->getHeader();
+ if ( !h->isCurrentVersion() || forceRepair ) {
+
+ if( h->version <= 0 ) {
+ uasserted(14026,
+ str::stream() << "db " << dbName << " appears corrupt pdfile version: " << h->version
+ << " info: " << h->versionMinor << ' ' << h->fileLength);
+ }
+
+ log() << "****" << endl;
+ log() << "****" << endl;
+ log() << "need to upgrade database " << dbName << " with pdfile version " << h->version << "." << h->versionMinor << ", "
+ << "new version: " << PDFILE_VERSION << "." << PDFILE_VERSION_MINOR << endl;
+ if ( shouldRepairDatabases ) {
+ // QUESTION: Repair even if file format is higher version than code?
+ log() << "\t starting upgrade" << endl;
+ string errmsg;
+ assert( doDBUpgrade( dbName , errmsg , h ) );
+ }
+ else {
+ log() << "\t Not upgrading, exiting" << endl;
+ log() << "\t run --upgrade to upgrade dbs, then start again" << endl;
+ log() << "****" << endl;
+ dbexit( EXIT_NEED_UPGRADE );
+ shouldRepairDatabases = 1;
+ return;
+ }
+ }
+ else {
+ Database::closeDatabase( dbName.c_str(), dbpath );
+ }
+ }
+
+ log(1) << "done repairDatabases" << endl;
+
+ if ( shouldRepairDatabases ) {
+ log() << "finished checking dbs" << endl;
+ cc().shutdown();
+ dbexit( EXIT_CLEAN );
+ }
+
+ checkNsFilesOnLoad = true;
+ }
+
+ void clearTmpFiles() {
+ boost::filesystem::path path( dbpath );
+ for ( boost::filesystem::directory_iterator i( path );
+ i != boost::filesystem::directory_iterator(); ++i ) {
+ string fileName = boost::filesystem::path(*i).leaf();
+ if ( boost::filesystem::is_directory( *i ) &&
+ fileName.length() && fileName[ 0 ] == '$' )
+ boost::filesystem::remove_all( *i );
+ }
+ }
+
+ void checkIfReplMissingFromCommandLine() {
+ if( !cmdLine.usingReplSets() ) {
+ Client::GodScope gs;
+ DBDirectClient c;
+ unsigned long long x =
+ c.count("local.system.replset");
+ if( x ) {
+ log() << endl;
+ log() << "** warning: mongod started without --replSet yet " << x << " documents are present in local.system.replset" << endl;
+ log() << "** restart with --replSet unless you are doing maintenance and no other clients are connected" << endl;
+ log() << endl;
+ }
+ }
+ }
+
+ void clearTmpCollections() {
+ writelock lk; // _openAllFiles is false at this point, so this is helpful for the query below to work as you can't open files when readlocked
+ Client::GodScope gs;
+ vector< string > toDelete;
+ DBDirectClient cli;
+ auto_ptr< DBClientCursor > c = cli.query( "local.system.namespaces", Query( fromjson( "{name:/^local.temp./}" ) ) );
+ while( c->more() ) {
+ BSONObj o = c->next();
+ toDelete.push_back( o.getStringField( "name" ) );
+ }
+ for( vector< string >::iterator i = toDelete.begin(); i != toDelete.end(); ++i ) {
+ log() << "Dropping old temporary collection: " << *i << endl;
+ cli.dropCollection( *i );
+ }
+ }
+
+ /**
+ * does background async flushes of mmapped files
+ */
+ class DataFileSync : public BackgroundJob {
+ public:
+ string name() const { return "DataFileSync"; }
+ void run() {
+ if( cmdLine.syncdelay == 0 )
+ log() << "warning: --syncdelay 0 is not recommended and can have strange performance" << endl;
+ else if( cmdLine.syncdelay == 1 )
+ log() << "--syncdelay 1" << endl;
+ else if( cmdLine.syncdelay != 60 )
+ log(1) << "--syncdelay " << cmdLine.syncdelay << endl;
+ int time_flushing = 0;
+ while ( ! inShutdown() ) {
+ _diaglog.flush();
+ if ( cmdLine.syncdelay == 0 ) {
+ // in case at some point we add an option to change at runtime
+ sleepsecs(5);
+ continue;
+ }
+
+ sleepmillis( (long long) std::max(0.0, (cmdLine.syncdelay * 1000) - time_flushing) );
+
+ if ( inShutdown() ) {
+ // occasional issue trying to flush during shutdown when sleep interrupted
+ break;
+ }
+
+ Date_t start = jsTime();
+ int numFiles = MemoryMappedFile::flushAll( true );
+ time_flushing = (int) (jsTime() - start);
+
+ globalFlushCounters.flushed(time_flushing);
+
+ if( logLevel >= 1 || time_flushing >= 10000 ) {
+ log() << "flushing mmaps took " << time_flushing << "ms " << " for " << numFiles << " files" << endl;
+ }
+ }
+ }
+
+ } dataFileSync;
+
+ const char * jsInterruptCallback() {
+ // should be safe to interrupt in js code, even if we have a write lock
+ return killCurrentOp.checkForInterruptNoAssert();
+ }
+
+ unsigned jsGetInterruptSpecCallback() {
+ return cc().curop()->opNum();
+ }
+
+ void _initAndListen(int listenPort ) {
+
+ Client::initThread("initandlisten");
+
+ Database::_openAllFiles = false;
+
+ Logstream::get().addGlobalTee( new RamLog("global") );
+
+ bool is32bit = sizeof(int*) == 4;
+
+ {
+#if !defined(_WIN32)
+ pid_t pid = getpid();
+#else
+ DWORD pid=GetCurrentProcessId();
+#endif
+ Nullstream& l = log();
+ l << "MongoDB starting : pid=" << pid << " port=" << cmdLine.port << " dbpath=" << dbpath;
+ if( replSettings.master ) l << " master=" << replSettings.master;
+ if( replSettings.slave ) l << " slave=" << (int) replSettings.slave;
+ l << ( is32bit ? " 32" : " 64" ) << "-bit host=" << getHostNameCached() << endl;
+ }
+ DEV log() << "_DEBUG build (which is slower)" << endl;
+ show_warnings();
+ log() << mongodVersion() << endl;
+ printGitVersion();
+ printSysInfo();
+ printCommandLineOpts();
+
+ {
+ stringstream ss;
+ ss << endl;
+ ss << "*********************************************************************" << endl;
+ ss << " ERROR: dbpath (" << dbpath << ") does not exist." << endl;
+ ss << " Create this directory or give existing directory in --dbpath." << endl;
+ ss << " See http://www.mongodb.org/display/DOCS/Starting+and+Stopping+Mongo" << endl;
+ ss << "*********************************************************************" << endl;
+ uassert( 10296 , ss.str().c_str(), boost::filesystem::exists( dbpath ) );
+ }
+ {
+ stringstream ss;
+ ss << "repairpath (" << repairpath << ") does not exist";
+ uassert( 12590 , ss.str().c_str(), boost::filesystem::exists( repairpath ) );
+ }
+
+ acquirePathLock(forceRepair);
+ remove_all( dbpath + "/_tmp/" );
+
+ FileAllocator::get()->start();
+
+ MONGO_BOOST_CHECK_EXCEPTION_WITH_MSG( clearTmpFiles(), "clear tmp files" );
+
+ dur::startup();
+
+ if( cmdLine.durOptions & CmdLine::DurRecoverOnly )
+ return;
+
+ // comes after getDur().startup() because this reads from the database
+ clearTmpCollections();
+
+ checkIfReplMissingFromCommandLine();
+
+ Module::initAll();
+
+ if ( scriptingEnabled ) {
+ ScriptEngine::setup();
+ globalScriptEngine->setCheckInterruptCallback( jsInterruptCallback );
+ globalScriptEngine->setGetInterruptSpecCallback( jsGetInterruptSpecCallback );
+ }
+
+ repairDatabasesAndCheckVersion();
+
+ /* we didn't want to pre-open all files for the repair check above. for regular
+ operation we do for read/write lock concurrency reasons.
+ */
+ Database::_openAllFiles = true;
+
+ if ( shouldRepairDatabases )
+ return;
+
+ /* this is for security on certain platforms (nonce generation) */
+ srand((unsigned) (curTimeMicros() ^ startupSrandTimer.micros()));
+
+ snapshotThread.go();
+ d.clientCursorMonitor.go();
+ PeriodicTask::theRunner->go();
+
+#ifndef _WIN32
+ CmdLine::launchOk();
+#endif
+ listen(listenPort);
+
+ // listen() will return when exit code closes its socket.
+ exitCleanly(EXIT_NET_ERROR);
+ }
+
+ void testPretouch();
+
+ void initAndListen(int listenPort) {
+ try {
+ _initAndListen(listenPort);
+ }
+ catch ( DBException &e ) {
+ log() << "exception in initAndListen: " << e.toString() << ", terminating" << endl;
+ dbexit( EXIT_UNCAUGHT );
+ }
+ catch ( std::exception &e ) {
+ log() << "exception in initAndListen std::exception: " << e.what() << ", terminating" << endl;
+ dbexit( EXIT_UNCAUGHT );
+ }
+ catch ( int& n ) {
+ log() << "exception in initAndListen int: " << n << ", terminating" << endl;
+ dbexit( EXIT_UNCAUGHT );
+ }
+ catch(...) {
+ log() << "exception in initAndListen, terminating" << endl;
+ dbexit( EXIT_UNCAUGHT );
+ }
+ }
+
+#if defined(_WIN32)
+ bool initService() {
+ ServiceController::reportStatus( SERVICE_RUNNING );
+ initAndListen( cmdLine.port );
+ return true;
+ }
+#endif
+
+} // namespace mongo
+
+using namespace mongo;
+
+#include <boost/program_options.hpp>
+#undef assert
+#define assert MONGO_assert
+
+namespace po = boost::program_options;
+
+void show_help_text(po::options_description options) {
+ show_warnings();
+ cout << options << endl;
+};
+
+/* Return error string or "" if no errors. */
+string arg_error_check(int argc, char* argv[]) {
+ return "";
+}
+
+int main(int argc, char* argv[]) {
+ static StaticObserver staticObserver;
+ doPreServerStartupInits();
+ getcurns = ourgetns;
+
+ po::options_description general_options("General options");
+#if defined(_WIN32)
+ po::options_description windows_scm_options("Windows Service Control Manager options");
+#endif
+ po::options_description replication_options("Replication options");
+ po::options_description ms_options("Master/slave options");
+ po::options_description rs_options("Replica set options");
+ po::options_description sharding_options("Sharding options");
+ po::options_description visible_options("Allowed options");
+ po::options_description hidden_options("Hidden options");
+
+ po::positional_options_description positional_options;
+
+ CmdLine::addGlobalOptions( general_options , hidden_options );
+
+ general_options.add_options()
+ ("auth", "run with security")
+ ("cpu", "periodically show cpu and iowait utilization")
+ ("dbpath", po::value<string>() , "directory for datafiles")
+ ("diaglog", po::value<int>(), "0=off 1=W 2=R 3=both 7=W+some reads")
+ ("directoryperdb", "each database will be stored in a separate directory")
+ ("journal", "enable journaling")
+ ("journalOptions", po::value<int>(), "journal diagnostic options")
+ ("journalCommitInterval", po::value<unsigned>(), "how often to group/batch commit (ms)")
+ ("ipv6", "enable IPv6 support (disabled by default)")
+ ("jsonp","allow JSONP access via http (has security implications)")
+ ("noauth", "run without security")
+ ("nohttpinterface", "disable http interface")
+ ("nojournal", "disable journaling (journaling is on by default for 64 bit)")
+ ("noprealloc", "disable data file preallocation - will often hurt performance")
+ ("noscripting", "disable scripting engine")
+ ("notablescan", "do not allow table scans")
+ ("nssize", po::value<int>()->default_value(16), ".ns file size (in MB) for new databases")
+ ("profile",po::value<int>(), "0=off 1=slow, 2=all")
+ ("quota", "limits each database to a certain number of files (8 default)")
+ ("quotaFiles", po::value<int>(), "number of files allower per db, requires --quota")
+ ("rest","turn on simple rest api")
+ ("repair", "run repair on all dbs")
+ ("repairpath", po::value<string>() , "root directory for repair files - defaults to dbpath" )
+ ("slowms",po::value<int>(&cmdLine.slowMS)->default_value(100), "value of slow for profile and console log" )
+ ("smallfiles", "use a smaller default file size")
+#if defined(__linux__)
+ ("shutdown", "kill a running server (for init scripts)")
+#endif
+ ("syncdelay",po::value<double>(&cmdLine.syncdelay)->default_value(60), "seconds between disk syncs (0=never, but not recommended)")
+ ("sysinfo", "print some diagnostic system information")
+ ("upgrade", "upgrade db if needed")
+ ;
+
+#if defined(_WIN32)
+ CmdLine::addWindowsOptions( windows_scm_options, hidden_options );
+#endif
+
+ replication_options.add_options()
+ ("oplogSize", po::value<int>(), "size limit (in MB) for op log")
+ ;
+
+ ms_options.add_options()
+ ("master", "master mode")
+ ("slave", "slave mode")
+ ("source", po::value<string>(), "when slave: specify master as <server:port>")
+ ("only", po::value<string>(), "when slave: specify a single database to replicate")
+ ("slavedelay", po::value<int>(), "specify delay (in seconds) to be used when applying master ops to slave")
+ ("autoresync", "automatically resync if slave data is stale")
+ ;
+
+ rs_options.add_options()
+ ("replSet", po::value<string>(), "arg is <setname>[/<optionalseedhostlist>]")
+ ;
+
+ sharding_options.add_options()
+ ("configsvr", "declare this is a config db of a cluster; default port 27019; default dir /data/configdb")
+ ("shardsvr", "declare this is a shard db of a cluster; default port 27018")
+ ("noMoveParanoia" , "turn off paranoid saving of data for moveChunk. this is on by default for now, but default will switch" )
+ ;
+
+ hidden_options.add_options()
+ ("fastsync", "indicate that this instance is starting from a dbpath snapshot of the repl peer")
+ ("pretouch", po::value<int>(), "n pretouch threads for applying replicationed operations") // experimental
+ ("command", po::value< vector<string> >(), "command")
+ ("cacheSize", po::value<long>(), "cache size (in MB) for rec store")
+ ("nodur", "disable journaling")
+ // things we don't want people to use
+ ("nocursors", "diagnostic/debugging option that turns off cursors DO NOT USE IN PRODUCTION")
+ ("nohints", "ignore query hints")
+ ("nopreallocj", "don't preallocate journal files")
+ ("dur", "enable journaling") // old name for --journal
+ ("durOptions", po::value<int>(), "durability diagnostic options") // deprecated name
+ // deprecated pairing command line options
+ ("pairwith", "DEPRECATED")
+ ("arbiter", "DEPRECATED")
+ ("opIdMem", "DEPRECATED")
+ ;
+
+
+ positional_options.add("command", 3);
+ visible_options.add(general_options);
+#if defined(_WIN32)
+ visible_options.add(windows_scm_options);
+#endif
+ visible_options.add(replication_options);
+ visible_options.add(ms_options);
+ visible_options.add(rs_options);
+ visible_options.add(sharding_options);
+ Module::addOptions( visible_options );
+
+ setupCoreSignals();
+ setupSignals( false );
+
+ dbExecCommand = argv[0];
+
+ srand(curTimeMicros());
+#if( BOOST_VERSION >= 104500 )
+ boost::filesystem::path::default_name_check( boost::filesystem2::no_check );
+#else
+ boost::filesystem::path::default_name_check( boost::filesystem::no_check );
+#endif
+
+ {
+ unsigned x = 0x12345678;
+ unsigned char& b = (unsigned char&) x;
+ if ( b != 0x78 ) {
+ out() << "big endian cpus not yet supported" << endl;
+ return 33;
+ }
+ }
+
+ if( argc == 1 )
+ cout << dbExecCommand << " --help for help and startup options" << endl;
+
+ {
+ po::variables_map params;
+
+ string error_message = arg_error_check(argc, argv);
+ if (error_message != "") {
+ cout << error_message << endl << endl;
+ show_help_text(visible_options);
+ return 0;
+ }
+
+ if ( ! CmdLine::store( argc , argv , visible_options , hidden_options , positional_options , params ) )
+ return 0;
+
+ if (params.count("help")) {
+ show_help_text(visible_options);
+ return 0;
+ }
+ if (params.count("version")) {
+ cout << mongodVersion() << endl;
+ printGitVersion();
+ return 0;
+ }
+ if ( params.count( "dbpath" ) ) {
+ dbpath = params["dbpath"].as<string>();
+ if ( params.count( "fork" ) && dbpath[0] != '/' ) {
+ // we need to change dbpath if we fork since we change
+ // cwd to "/"
+ // fork only exists on *nix
+ // so '/' is safe
+ dbpath = cmdLine.cwd + "/" + dbpath;
+ }
+ }
+ else {
+ dbpath = "/data/db/";
+ }
+#ifdef _WIN32
+ if (dbpath.size() > 1 && dbpath[dbpath.size()-1] == '/') {
+ // size() check is for the unlikely possibility of --dbpath "/"
+ dbpath = dbpath.erase(dbpath.size()-1);
+ }
+#endif
+
+ if ( params.count("directoryperdb")) {
+ directoryperdb = true;
+ }
+ if (params.count("cpu")) {
+ cmdLine.cpu = true;
+ }
+ if (params.count("noauth")) {
+ noauth = true;
+ }
+ if (params.count("auth")) {
+ noauth = false;
+ }
+ if (params.count("quota")) {
+ cmdLine.quota = true;
+ }
+ if (params.count("quotaFiles")) {
+ cmdLine.quota = true;
+ cmdLine.quotaFiles = params["quotaFiles"].as<int>() - 1;
+ }
+ bool journalExplicit = false;
+ if( params.count("nodur") || params.count( "nojournal" ) ) {
+ journalExplicit = true;
+ cmdLine.dur = false;
+ }
+ if( params.count("dur") || params.count( "journal" ) ) {
+ if (journalExplicit) {
+ log() << "Can't specify both --journal and --nojournal options." << endl;
+ return EXIT_BADOPTIONS;
+ }
+ journalExplicit = true;
+ cmdLine.dur = true;
+ }
+ if (params.count("durOptions")) {
+ cmdLine.durOptions = params["durOptions"].as<int>();
+ }
+ if( params.count("journalCommitInterval") ) {
+ // don't check if dur is false here as many will just use the default, and will default to off on win32.
+ // ie no point making life a little more complex by giving an error on a dev environment.
+ cmdLine.journalCommitInterval = params["journalCommitInterval"].as<unsigned>();
+ if( cmdLine.journalCommitInterval <= 1 || cmdLine.journalCommitInterval > 300 ) {
+ out() << "--journalCommitInterval out of allowed range (0-300ms)" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+ }
+ if (params.count("journalOptions")) {
+ cmdLine.durOptions = params["journalOptions"].as<int>();
+ }
+ if (params.count("repairpath")) {
+ repairpath = params["repairpath"].as<string>();
+ if (!repairpath.size()) {
+ out() << "repairpath is empty" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+ }
+ if (params.count("nocursors")) {
+ useCursors = false;
+ }
+ if (params.count("nohints")) {
+ useHints = false;
+ }
+ if (params.count("nopreallocj")) {
+ cmdLine.preallocj = false;
+ }
+ if (params.count("nohttpinterface")) {
+ noHttpInterface = true;
+ }
+ if (params.count("rest")) {
+ cmdLine.rest = true;
+ }
+ if (params.count("jsonp")) {
+ cmdLine.jsonp = true;
+ }
+ if (params.count("noscripting")) {
+ scriptingEnabled = false;
+ }
+ if (params.count("noprealloc")) {
+ cmdLine.prealloc = false;
+ cout << "note: noprealloc may hurt performance in many applications" << endl;
+ }
+ if (params.count("smallfiles")) {
+ cmdLine.smallfiles = true;
+ assert( dur::DataLimitPerJournalFile >= 128 * 1024 * 1024 );
+ dur::DataLimitPerJournalFile = 128 * 1024 * 1024;
+ }
+ if (params.count("diaglog")) {
+ int x = params["diaglog"].as<int>();
+ if ( x < 0 || x > 7 ) {
+ out() << "can't interpret --diaglog setting" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+ _diaglog.setLevel(x);
+ }
+ if (params.count("sysinfo")) {
+ sysRuntimeInfo();
+ return 0;
+ }
+ if (params.count("repair")) {
+ Record::MemoryTrackingEnabled = false;
+ shouldRepairDatabases = 1;
+ forceRepair = 1;
+ }
+ if (params.count("upgrade")) {
+ Record::MemoryTrackingEnabled = false;
+ shouldRepairDatabases = 1;
+ }
+ if (params.count("notablescan")) {
+ cmdLine.noTableScan = true;
+ }
+ if (params.count("master")) {
+ replSettings.master = true;
+ }
+ if (params.count("slave")) {
+ replSettings.slave = SimpleSlave;
+ }
+ if (params.count("slavedelay")) {
+ replSettings.slavedelay = params["slavedelay"].as<int>();
+ }
+ if (params.count("fastsync")) {
+ replSettings.fastsync = true;
+ }
+ if (params.count("autoresync")) {
+ replSettings.autoresync = true;
+ if( params.count("replSet") ) {
+ out() << "--autoresync is not used with --replSet" << endl;
+ out() << "see http://www.mongodb.org/display/DOCS/Resyncing+a+Very+Stale+Replica+Set+Member" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+ }
+ if (params.count("source")) {
+ /* specifies what the source in local.sources should be */
+ cmdLine.source = params["source"].as<string>().c_str();
+ }
+ if( params.count("pretouch") ) {
+ cmdLine.pretouch = params["pretouch"].as<int>();
+ }
+ if (params.count("replSet")) {
+ if (params.count("slavedelay")) {
+ out() << "--slavedelay cannot be used with --replSet" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+ else if (params.count("only")) {
+ out() << "--only cannot be used with --replSet" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+ /* seed list of hosts for the repl set */
+ cmdLine._replSet = params["replSet"].as<string>().c_str();
+ }
+ if (params.count("only")) {
+ cmdLine.only = params["only"].as<string>().c_str();
+ }
+ if( params.count("nssize") ) {
+ int x = params["nssize"].as<int>();
+ if (x <= 0 || x > (0x7fffffff/1024/1024)) {
+ out() << "bad --nssize arg" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+ lenForNewNsFiles = x * 1024 * 1024;
+ assert(lenForNewNsFiles > 0);
+ }
+ if (params.count("oplogSize")) {
+ long long x = params["oplogSize"].as<int>();
+ if (x <= 0) {
+ out() << "bad --oplogSize arg" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+ // note a small size such as x==1 is ok for an arbiter.
+ if( x > 1000 && sizeof(void*) == 4 ) {
+ out() << "--oplogSize of " << x << "MB is too big for 32 bit version. Use 64 bit build instead." << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+ cmdLine.oplogSize = x * 1024 * 1024;
+ assert(cmdLine.oplogSize > 0);
+ }
+ if (params.count("cacheSize")) {
+ long x = params["cacheSize"].as<long>();
+ if (x <= 0) {
+ out() << "bad --cacheSize arg" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+ log() << "--cacheSize option not currently supported" << endl;
+ }
+ if (params.count("port") == 0 ) {
+ if( params.count("configsvr") ) {
+ cmdLine.port = CmdLine::ConfigServerPort;
+ }
+ if( params.count("shardsvr") ) {
+ if( params.count("configsvr") ) {
+ log() << "can't do --shardsvr and --configsvr at the same time" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+ cmdLine.port = CmdLine::ShardServerPort;
+ }
+ }
+ else {
+ if ( cmdLine.port <= 0 || cmdLine.port > 65535 ) {
+ out() << "bad --port number" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+ }
+ if ( params.count("configsvr" ) ) {
+ cmdLine.configsvr = true;
+ if (cmdLine.usingReplSets() || replSettings.master || replSettings.slave) {
+ log() << "replication should not be enabled on a config server" << endl;
+ ::exit(-1);
+ }
+ if ( params.count( "nodur" ) == 0 && params.count( "nojournal" ) == 0 )
+ cmdLine.dur = true;
+ if ( params.count( "dbpath" ) == 0 )
+ dbpath = "/data/configdb";
+ }
+ if ( params.count( "profile" ) ) {
+ cmdLine.defaultProfile = params["profile"].as<int>();
+ }
+ if (params.count("ipv6")) {
+ enableIPv6();
+ }
+ if (params.count("noMoveParanoia")) {
+ cmdLine.moveParanoia = false;
+ }
+ if (params.count("pairwith") || params.count("arbiter") || params.count("opIdMem")) {
+ out() << "****" << endl;
+ out() << "Replica Pairs have been deprecated. Invalid options: --pairwith, --arbiter, and/or --opIdMem" << endl;
+ out() << "<http://www.mongodb.org/display/DOCS/Replica+Pairs>" << endl;
+ out() << "****" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ }
+
+ // needs to be after things like --configsvr parsing, thus here.
+ if( repairpath.empty() )
+ repairpath = dbpath;
+
+ Module::configAll( params );
+ dataFileSync.go();
+
+ if (params.count("command")) {
+ vector<string> command = params["command"].as< vector<string> >();
+
+ if (command[0].compare("run") == 0) {
+ if (command.size() > 1) {
+ cout << "Too many parameters to 'run' command" << endl;
+ cout << visible_options << endl;
+ return 0;
+ }
+
+ initAndListen(cmdLine.port);
+ return 0;
+ }
+
+ if (command[0].compare("dbpath") == 0) {
+ cout << dbpath << endl;
+ return 0;
+ }
+
+ cout << "Invalid command: " << command[0] << endl;
+ cout << visible_options << endl;
+ return 0;
+ }
+
+ if( cmdLine.pretouch )
+ log() << "--pretouch " << cmdLine.pretouch << endl;
+
+#ifdef __linux__
+ if (params.count("shutdown")){
+ bool failed = false;
+
+ string name = ( boost::filesystem::path( dbpath ) / "mongod.lock" ).native_file_string();
+ if ( !boost::filesystem::exists( name ) || boost::filesystem::file_size( name ) == 0 )
+ failed = true;
+
+ pid_t pid;
+ string procPath;
+ if (!failed){
+ try {
+ ifstream f (name.c_str());
+ f >> pid;
+ procPath = (str::stream() << "/proc/" << pid);
+ if (!boost::filesystem::exists(procPath))
+ failed = true;
+
+ string exePath = procPath + "/exe";
+ if (boost::filesystem::exists(exePath)){
+ char buf[256];
+ int ret = readlink(exePath.c_str(), buf, sizeof(buf)-1);
+ buf[ret] = '\0'; // readlink doesn't terminate string
+ if (ret == -1) {
+ int e = errno;
+ cerr << "Error resolving " << exePath << ": " << errnoWithDescription(e);
+ failed = true;
+ }
+ else if (!endsWith(buf, "mongod")){
+ cerr << "Process " << pid << " is running " << buf << " not mongod" << endl;
+ ::exit(-1);
+ }
+ }
+ }
+ catch (const std::exception& e){
+ cerr << "Error reading pid from lock file [" << name << "]: " << e.what() << endl;
+ failed = true;
+ }
+ }
+
+ if (failed) {
+ cerr << "There doesn't seem to be a server running with dbpath: " << dbpath << endl;
+ ::exit(-1);
+ }
+
+ cout << "killing process with pid: " << pid << endl;
+ int ret = kill(pid, SIGTERM);
+ if (ret) {
+ int e = errno;
+ cerr << "failed to kill process: " << errnoWithDescription(e) << endl;
+ ::exit(-1);
+ }
+
+ while (boost::filesystem::exists(procPath)) {
+ sleepsecs(1);
+ }
+
+ ::exit(0);
+ }
+#endif
+
+#if defined(_WIN32)
+ if (serviceParamsCheck( params, dbpath, argc, argv )) {
+ return 0;
+ }
+#endif
+
+
+ if (sizeof(void*) == 4 && !journalExplicit){
+ // trying to make this stand out more like startup warnings
+ log() << endl;
+ warning() << "32-bit servers don't have journaling enabled by default. Please use --journal if you want durability." << endl;
+ log() << endl;
+ }
+
+ }
+
+ UnitTest::runTests();
+ initAndListen(cmdLine.port);
+ dbexit(EXIT_CLEAN);
+ return 0;
+}
+
+namespace mongo {
+
+ string getDbContext();
+
+#undef out
+
+
+#if !defined(_WIN32)
+
+} // namespace mongo
+
+#include <signal.h>
+#include <string.h>
+
+namespace mongo {
+
+ void pipeSigHandler( int signal ) {
+#ifdef psignal
+ psignal( signal, "Signal Received : ");
+#else
+ cout << "got pipe signal:" << signal << endl;
+#endif
+ }
+
+ void abruptQuit(int x) {
+ ostringstream ossSig;
+ ossSig << "Got signal: " << x << " (" << strsignal( x ) << ")." << endl;
+ rawOut( ossSig.str() );
+
+ /*
+ ostringstream ossOp;
+ ossOp << "Last op: " << currentOp.infoNoauth() << endl;
+ rawOut( ossOp.str() );
+ */
+
+ ostringstream oss;
+ oss << "Backtrace:" << endl;
+ printStackTrace( oss );
+ rawOut( oss.str() );
+
+ // Don't go through normal shutdown procedure. It may make things worse.
+ ::exit(EXIT_ABRUPT);
+
+ }
+
+ void abruptQuitWithAddrSignal( int signal, siginfo_t *siginfo, void * ) {
+ ostringstream oss;
+ oss << "Invalid";
+ if ( signal == SIGSEGV || signal == SIGBUS ) {
+ oss << " access";
+ } else {
+ oss << " operation";
+ }
+ oss << " at address: " << siginfo->si_addr << endl;
+ rawOut( oss.str() );
+ abruptQuit( signal );
+ }
+
+ sigset_t asyncSignals;
+ // The above signals will be processed by this thread only, in order to
+ // ensure the db and log mutexes aren't held.
+ void interruptThread() {
+ int x;
+ sigwait( &asyncSignals, &x );
+ log() << "got kill or ctrl c or hup signal " << x << " (" << strsignal( x ) << "), will terminate after current cmd ends" << endl;
+ Client::initThread( "interruptThread" );
+ exitCleanly( EXIT_KILL );
+ }
+
+ // this will be called in certain c++ error cases, for example if there are two active
+ // exceptions
+ void myterminate() {
+ rawOut( "terminate() called, printing stack:" );
+ printStackTrace();
+ ::abort();
+ }
+
+ // this gets called when new fails to allocate memory
+ void my_new_handler() {
+ rawOut( "out of memory, printing stack and exiting:" );
+ printStackTrace();
+ ::exit(EXIT_ABRUPT);
+ }
+
+ void setupSignals_ignoreHelper( int signal ) {}
+
+ void setupSignals( bool inFork ) {
+ struct sigaction addrSignals;
+ memset( &addrSignals, 0, sizeof( struct sigaction ) );
+ addrSignals.sa_sigaction = abruptQuitWithAddrSignal;
+ sigemptyset( &addrSignals.sa_mask );
+ addrSignals.sa_flags = SA_SIGINFO;
+
+ assert( sigaction(SIGSEGV, &addrSignals, 0) == 0 );
+ assert( sigaction(SIGBUS, &addrSignals, 0) == 0 );
+ assert( sigaction(SIGILL, &addrSignals, 0) == 0 );
+ assert( sigaction(SIGFPE, &addrSignals, 0) == 0 );
+
+ assert( signal(SIGABRT, abruptQuit) != SIG_ERR );
+ assert( signal(SIGQUIT, abruptQuit) != SIG_ERR );
+ assert( signal(SIGPIPE, pipeSigHandler) != SIG_ERR );
+
+ setupSIGTRAPforGDB();
+
+ sigemptyset( &asyncSignals );
+
+ if ( inFork )
+ assert( signal( SIGHUP , setupSignals_ignoreHelper ) != SIG_ERR );
+ else
+ sigaddset( &asyncSignals, SIGHUP );
+
+ sigaddset( &asyncSignals, SIGINT );
+ sigaddset( &asyncSignals, SIGTERM );
+ assert( pthread_sigmask( SIG_SETMASK, &asyncSignals, 0 ) == 0 );
+ boost::thread it( interruptThread );
+
+ set_terminate( myterminate );
+ set_new_handler( my_new_handler );
+ }
+
+#else
+ void consoleTerminate( const char* controlCodeName ) {
+ Client::initThread( "consoleTerminate" );
+ log() << "got " << controlCodeName << ", will terminate after current cmd ends" << endl;
+ exitCleanly( EXIT_KILL );
+ }
+
+ BOOL CtrlHandler( DWORD fdwCtrlType ) {
+
+ switch( fdwCtrlType ) {
+
+ case CTRL_C_EVENT:
+ rawOut( "Ctrl-C signal" );
+ consoleTerminate( "CTRL_C_EVENT" );
+ return TRUE ;
+
+ case CTRL_CLOSE_EVENT:
+ rawOut( "CTRL_CLOSE_EVENT signal" );
+ consoleTerminate( "CTRL_CLOSE_EVENT" );
+ return TRUE ;
+
+ case CTRL_BREAK_EVENT:
+ rawOut( "CTRL_BREAK_EVENT signal" );
+ consoleTerminate( "CTRL_BREAK_EVENT" );
+ return TRUE;
+
+ case CTRL_LOGOFF_EVENT:
+ rawOut( "CTRL_LOGOFF_EVENT signal" );
+ consoleTerminate( "CTRL_LOGOFF_EVENT" );
+ return TRUE;
+
+ case CTRL_SHUTDOWN_EVENT:
+ rawOut( "CTRL_SHUTDOWN_EVENT signal" );
+ consoleTerminate( "CTRL_SHUTDOWN_EVENT" );
+ return TRUE;
+
+ default:
+ return FALSE;
+ }
+ }
+
+ LPTOP_LEVEL_EXCEPTION_FILTER filtLast = 0;
+ ::HANDLE standardOut = GetStdHandle(STD_OUTPUT_HANDLE);
+ LONG WINAPI exceptionFilter(struct _EXCEPTION_POINTERS *ExceptionInfo) {
+ {
+ // given the severity of the event we write to console in addition to the --logFile
+ // (rawOut writes to the logfile, if a special one were specified)
+ DWORD written;
+ WriteFile(standardOut, "unhandled windows exception\n", 20, &written, 0);
+ FlushFileBuffers(standardOut);
+ }
+
+ DWORD ec = ExceptionInfo->ExceptionRecord->ExceptionCode;
+ if( ec == EXCEPTION_ACCESS_VIOLATION ) {
+ rawOut("access violation");
+ }
+ else {
+ rawOut("unhandled windows exception");
+ char buf[64];
+ strcpy(buf, "ec=0x");
+ _ui64toa(ec, buf+5, 16);
+ rawOut(buf);
+ }
+ if( filtLast )
+ return filtLast(ExceptionInfo);
+ return EXCEPTION_EXECUTE_HANDLER;
+ }
+
+ // called by mongoAbort()
+ extern void (*reportEventToSystem)(const char *msg);
+ void reportEventToSystemImpl(const char *msg) {
+ static ::HANDLE hEventLog = RegisterEventSource( NULL, TEXT("mongod") );
+ if( hEventLog ) {
+ std::wstring s = toNativeString(msg);
+ LPCTSTR txt = s.c_str();
+ BOOL ok = ReportEvent(
+ hEventLog, EVENTLOG_ERROR_TYPE,
+ 0, 0, NULL,
+ 1,
+ 0,
+ &txt,
+ 0);
+ wassert(ok);
+ }
+ }
+
+ void myPurecallHandler() {
+ printStackTrace();
+ mongoAbort("pure virtual");
+ }
+
+ void setupSignals( bool inFork ) {
+ reportEventToSystem = reportEventToSystemImpl;
+ filtLast = SetUnhandledExceptionFilter(exceptionFilter);
+ massert(10297 , "Couldn't register Windows Ctrl-C handler", SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE));
+ _set_purecall_handler( myPurecallHandler );
+ }
+
+#endif
+
+} // namespace mongo
diff --git a/src/mongo/db/db.h b/src/mongo/db/db.h
new file mode 100644
index 00000000000..6a31a06f77c
--- /dev/null
+++ b/src/mongo/db/db.h
@@ -0,0 +1,120 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "../util/net/message.h"
+#include "concurrency.h"
+#include "pdfile.h"
+#include "curop.h"
+#include "client.h"
+#include "databaseholder.h"
+
+namespace mongo {
+
+ struct dbtemprelease {
+ Client::Context * _context;
+ int _locktype;
+
+ dbtemprelease() {
+ const Client& c = cc();
+ _context = c.getContext();
+ _locktype = d.dbMutex.getState();
+ assert( _locktype );
+
+ if ( _locktype > 0 ) {
+ massert( 10298 , "can't temprelease nested write lock", _locktype == 1);
+ if ( _context ) _context->unlocked();
+ d.dbMutex.unlock();
+ }
+ else {
+ massert( 10299 , "can't temprelease nested read lock", _locktype == -1);
+ if ( _context ) _context->unlocked();
+ d.dbMutex.unlock_shared();
+ }
+
+ verify( 14814 , c.curop() );
+ c.curop()->yielded();
+
+ }
+ ~dbtemprelease() {
+ if ( _locktype > 0 )
+ d.dbMutex.lock();
+ else
+ d.dbMutex.lock_shared();
+
+ if ( _context ) _context->relocked();
+ }
+ };
+
+ /** must be write locked
+ no assert (and no release) if nested write lock
+ a lot like dbtempreleasecond but no malloc so should be a tiny bit faster
+ */
+ struct dbtempreleasewritelock {
+ Client::Context * _context;
+ int _locktype;
+ dbtempreleasewritelock() {
+ const Client& c = cc();
+ _context = c.getContext();
+ _locktype = d.dbMutex.getState();
+ assert( _locktype >= 1 );
+ if( _locktype > 1 )
+ return; // nested
+ if ( _context )
+ _context->unlocked();
+ d.dbMutex.unlock();
+ verify( 14845 , c.curop() );
+ c.curop()->yielded();
+ }
+ ~dbtempreleasewritelock() {
+ if ( _locktype == 1 )
+ d.dbMutex.lock();
+ if ( _context )
+ _context->relocked();
+ }
+ };
+
+ /**
+ only does a temp release if we're not nested and have a lock
+ */
+ struct dbtempreleasecond {
+ dbtemprelease * real;
+ int locktype;
+
+ dbtempreleasecond() {
+ real = 0;
+ locktype = d.dbMutex.getState();
+ if ( locktype == 1 || locktype == -1 )
+ real = new dbtemprelease();
+ }
+
+ ~dbtempreleasecond() {
+ if ( real ) {
+ delete real;
+ real = 0;
+ }
+ }
+
+ bool unlocked() {
+ return real != 0;
+ }
+ };
+
+} // namespace mongo
+
+#include "concurrency.h"
diff --git a/src/mongo/db/db.rc b/src/mongo/db/db.rc
new file mode 100755
index 00000000000..b589458cf73
--- /dev/null
+++ b/src/mongo/db/db.rc
@@ -0,0 +1,12 @@
+// Microsoft Visual C++ generated resource script.
+//
+#include "resource.h"
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// Icon
+//
+// Icon with lowest ID value placed first to ensure application icon
+// remains consistent on all systems.
+IDI_ICON2 ICON "mongo.ico"
+///////////////////////////////////////////////////////////////////////////// \ No newline at end of file
diff --git a/src/mongo/db/db.vcxproj b/src/mongo/db/db.vcxproj
new file mode 100755
index 00000000000..8963f0af580
--- /dev/null
+++ b/src/mongo/db/db.vcxproj
@@ -0,0 +1,934 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug|x64">
+ <Configuration>Debug</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|x64">
+ <Configuration>Release</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectName>mongod</ProjectName>
+ <ProjectGuid>{215B2D68-0A70-4D10-8E75-B31010C62A91}</ProjectGuid>
+ <RootNamespace>db</RootNamespace>
+ <Keyword>Win32Proj</Keyword>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseOfMfc>false</UseOfMfc>
+ <UseOfAtl>false</UseOfAtl>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseOfMfc>false</UseOfMfc>
+ <UseOfAtl>false</UseOfAtl>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+ <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">.;..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..;$(IncludePath)</IncludePath>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;c:\boost;\boost</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <MinimalRebuild>No</MinimalRebuild>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>EditAndContinue</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
+ <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
+ <IgnoreSpecificDefaultLibraries>%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <TargetMachine>MachineX86</TargetMachine>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;c:\boost;\boost</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ <MinimalRebuild>No</MinimalRebuild>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
+ <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
+ <IgnoreSpecificDefaultLibraries>%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;c:\boost;\boost</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;;;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ <MinimalRebuild>No</MinimalRebuild>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <TargetMachine>MachineX86</TargetMachine>
+ <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;c:\boost;\boost</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>;;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ <MinimalRebuild>No</MinimalRebuild>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\bson\oid.cpp" />
+ <ClCompile Include="..\client\dbclientcursor.cpp" />
+ <ClCompile Include="..\client\dbclient_rs.cpp" />
+ <ClCompile Include="..\client\distlock.cpp" />
+ <ClCompile Include="..\client\model.cpp" />
+ <ClCompile Include="..\s\default_version.cpp" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcrecpp.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_chartables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_compile.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_config.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_dfa_exec.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_exec.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_fullinfo.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_get.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_globals.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_info.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_maketables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_newline.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ord2utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_refcount.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_scanner.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_stringpiece.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_study.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_tables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_try_flipped.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ucp_searchfuncs.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_valid_utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_version.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_xclass.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcreposix.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\scripting\bench.cpp" />
+ <ClCompile Include="..\shell\mongo.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\s\chunk.cpp" />
+ <ClCompile Include="..\s\config.cpp" />
+ <ClCompile Include="..\s\d_chunk_manager.cpp" />
+ <ClCompile Include="..\s\d_migrate.cpp" />
+ <ClCompile Include="..\s\d_split.cpp" />
+ <ClCompile Include="..\s\d_state.cpp" />
+ <ClCompile Include="..\s\d_writeback.cpp" />
+ <ClCompile Include="..\s\grid.cpp" />
+ <ClCompile Include="..\s\shard.cpp" />
+ <ClCompile Include="..\s\shardconnection.cpp" />
+ <ClCompile Include="..\s\shardkey.cpp" />
+ <ClCompile Include="..\third_party\snappy\snappy-sinksource.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\snappy\snappy.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\alignedbuilder.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\compress.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\spin_lock.cpp" />
+ <ClCompile Include="..\util\concurrency\synchronization.cpp" />
+ <ClCompile Include="..\util\concurrency\task.cpp" />
+ <ClCompile Include="..\util\concurrency\thread_pool.cpp" />
+ <ClCompile Include="..\util\concurrency\vars.cpp" />
+ <ClCompile Include="..\util\file_allocator.cpp" />
+ <ClCompile Include="..\util\intrusive_counter.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\log.cpp" />
+ <ClCompile Include="..\util\logfile.cpp" />
+ <ClCompile Include="..\util\net\listen.cpp" />
+ <ClCompile Include="..\util\net\miniwebserver.cpp" />
+ <ClCompile Include="..\util\processinfo.cpp" />
+ <ClCompile Include="..\util\ramlog.cpp" />
+ <ClCompile Include="..\util\stringutils.cpp" />
+ <ClCompile Include="..\util\systeminfo_win32.cpp" />
+ <ClCompile Include="..\util\text.cpp" />
+ <ClCompile Include="..\util\version.cpp" />
+ <ClCompile Include="btreebuilder.cpp" />
+ <ClCompile Include="cap.cpp" />
+ <ClCompile Include="commands\cloud.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="commands\distinct.cpp">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="commands\document_source_cursor.cpp" />
+ <ClCompile Include="commands\find_and_modify.cpp" />
+ <ClCompile Include="commands\group.cpp" />
+ <ClCompile Include="commands\isself.cpp" />
+ <ClCompile Include="commands\mr.cpp" />
+ <ClCompile Include="commands\pipeline_command.cpp" />
+ <ClCompile Include="commands\pipeline.cpp" />
+ <ClCompile Include="compact.cpp" />
+ <ClCompile Include="curop.cpp" />
+ <ClCompile Include="dbcommands_generic.cpp" />
+ <ClCompile Include="dbmessage.cpp" />
+ <ClCompile Include="dur.cpp" />
+ <ClCompile Include="durop.cpp" />
+ <ClCompile Include="dur_commitjob.cpp" />
+ <ClCompile Include="dur_journal.cpp" />
+ <ClCompile Include="dur_preplogbuffer.cpp" />
+ <ClCompile Include="dur_recover.cpp" />
+ <ClCompile Include="dur_writetodatafiles.cpp" />
+ <ClCompile Include="d_concurrency.cpp" />
+ <ClCompile Include="d_globals.cpp" />
+ <ClCompile Include="geo\2d.cpp" />
+ <ClCompile Include="geo\haystack.cpp" />
+ <ClCompile Include="key.cpp" />
+ <ClCompile Include="mongommf.cpp" />
+ <ClCompile Include="oplog.cpp" />
+ <ClCompile Include="ops\count.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="ops\delete.cpp" />
+ <ClCompile Include="ops\query.cpp" />
+ <ClCompile Include="ops\update.cpp" />
+ <ClCompile Include="pagefault.cpp" />
+ <ClCompile Include="pipeline\accumulator.cpp" />
+ <ClCompile Include="pipeline\accumulator_add_to_set.cpp" />
+ <ClCompile Include="pipeline\accumulator_avg.cpp" />
+ <ClCompile Include="pipeline\accumulator_first.cpp" />
+ <ClCompile Include="pipeline\accumulator_last.cpp" />
+ <ClCompile Include="pipeline\accumulator_min_max.cpp" />
+ <ClCompile Include="pipeline\accumulator_push.cpp" />
+ <ClCompile Include="pipeline\accumulator_single_value.cpp" />
+ <ClCompile Include="pipeline\accumulator_sum.cpp" />
+ <ClCompile Include="pipeline\builder.cpp" />
+ <ClCompile Include="pipeline\document.cpp" />
+ <ClCompile Include="pipeline\document_source.cpp" />
+ <ClCompile Include="pipeline\document_source_bson_array.cpp" />
+ <ClCompile Include="pipeline\document_source_command_futures.cpp" />
+ <ClCompile Include="pipeline\document_source_filter.cpp" />
+ <ClCompile Include="pipeline\document_source_filter_base.cpp" />
+ <ClCompile Include="pipeline\document_source_group.cpp" />
+ <ClCompile Include="pipeline\document_source_limit.cpp" />
+ <ClCompile Include="pipeline\document_source_match.cpp" />
+ <ClCompile Include="pipeline\document_source_out.cpp" />
+ <ClCompile Include="pipeline\document_source_project.cpp" />
+ <ClCompile Include="pipeline\document_source_skip.cpp" />
+ <ClCompile Include="pipeline\document_source_sort.cpp" />
+ <ClCompile Include="pipeline\document_source_unwind.cpp" />
+ <ClCompile Include="pipeline\doc_mem_monitor.cpp" />
+ <ClCompile Include="pipeline\expression.cpp" />
+ <ClCompile Include="pipeline\expression_context.cpp" />
+ <ClCompile Include="pipeline\field_path.cpp" />
+ <ClCompile Include="pipeline\value.cpp" />
+ <ClCompile Include="projection.cpp" />
+ <ClCompile Include="queryoptimizercursor.cpp" />
+ <ClCompile Include="querypattern.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="record.cpp" />
+ <ClCompile Include="repl.cpp" />
+ <ClCompile Include="repl\consensus.cpp" />
+ <ClCompile Include="repl\heartbeat.cpp" />
+ <ClCompile Include="repl\manager.cpp" />
+ <ClCompile Include="repl\rs_initialsync.cpp" />
+ <ClCompile Include="repl\rs_initiate.cpp" />
+ <ClCompile Include="repl\rs_rollback.cpp" />
+ <ClCompile Include="repl\rs_sync.cpp" />
+ <ClCompile Include="repl_block.cpp" />
+ <ClCompile Include="restapi.cpp" />
+ <ClCompile Include="..\client\connpool.cpp" />
+ <ClCompile Include="..\client\dbclient.cpp" />
+ <ClCompile Include="..\client\syncclusterconnection.cpp" />
+ <ClCompile Include="..\pch.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="client.cpp" />
+ <ClCompile Include="clientcursor.cpp" />
+ <ClCompile Include="cloner.cpp" />
+ <ClCompile Include="commands.cpp" />
+ <ClCompile Include="common.cpp">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="cursor.cpp" />
+ <ClCompile Include="database.cpp" />
+ <ClCompile Include="db.cpp" />
+ <ClCompile Include="dbcommands.cpp" />
+ <ClCompile Include="dbcommands_admin.cpp" />
+ <ClCompile Include="dbeval.cpp" />
+ <ClCompile Include="dbhelpers.cpp" />
+ <ClCompile Include="dbwebserver.cpp" />
+ <ClCompile Include="extsort.cpp" />
+ <ClCompile Include="index.cpp" />
+ <ClCompile Include="indexkey.cpp" />
+ <ClCompile Include="instance.cpp" />
+ <ClCompile Include="introspect.cpp" />
+ <ClCompile Include="jsobj.cpp" />
+ <ClCompile Include="json.cpp" />
+ <ClCompile Include="lasterror.cpp" />
+ <ClCompile Include="matcher.cpp" />
+ <ClCompile Include="matcher_covered.cpp" />
+ <ClCompile Include="..\util\mmap_win.cpp" />
+ <ClCompile Include="modules\mms.cpp" />
+ <ClCompile Include="module.cpp" />
+ <ClCompile Include="namespace.cpp" />
+ <ClCompile Include="nonce.cpp" />
+ <ClCompile Include="..\client\parallel.cpp" />
+ <ClCompile Include="pdfile.cpp" />
+ <ClCompile Include="queryoptimizer.cpp" />
+ <ClCompile Include="scanandorder.cpp" />
+ <ClCompile Include="security.cpp" />
+ <ClCompile Include="security_commands.cpp" />
+ <ClCompile Include="security_common.cpp" />
+ <ClCompile Include="tests.cpp" />
+ <ClCompile Include="cmdline.cpp" />
+ <ClCompile Include="queryutil.cpp" />
+ <ClCompile Include="..\util\assert_util.cpp" />
+ <ClCompile Include="..\util\background.cpp" />
+ <ClCompile Include="..\util\base64.cpp" />
+ <ClCompile Include="..\util\mmap.cpp" />
+ <ClCompile Include="..\util\ntservice.cpp" />
+ <ClCompile Include="..\util\processinfo_win32.cpp" />
+ <ClCompile Include="..\util\util.cpp" />
+ <ClCompile Include="..\util\net\httpclient.cpp" />
+ <ClCompile Include="..\util\md5.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeaderFile>
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeaderFile>
+ </ClCompile>
+ <ClCompile Include="..\util\md5main.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Use</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Use</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message.cpp" />
+ <ClCompile Include="..\util\net\message_port.cpp" />
+ <ClCompile Include="..\util\net\message_server_port.cpp" />
+ <ClCompile Include="..\util\net\sock.cpp" />
+ <ClCompile Include="..\s\d_logic.cpp" />
+ <ClCompile Include="..\scripting\engine.cpp" />
+ <ClCompile Include="..\scripting\engine_spidermonkey.cpp" />
+ <ClCompile Include="..\scripting\utils.cpp" />
+ <ClCompile Include="stats\counters.cpp" />
+ <ClCompile Include="stats\snapshots.cpp" />
+ <ClCompile Include="stats\top.cpp" />
+ <ClCompile Include="btree.cpp" />
+ <ClCompile Include="btreecursor.cpp" />
+ <ClCompile Include="repl\health.cpp" />
+ <ClCompile Include="repl\rs.cpp" />
+ <ClCompile Include="repl\replset_commands.cpp" />
+ <ClCompile Include="repl\rs_config.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="..\jstests\dur\basic1.sh" />
+ <None Include="..\jstests\dur\dur1.js" />
+ <None Include="..\jstests\replsets\replset1.js" />
+ <None Include="..\jstests\replsets\replset2.js" />
+ <None Include="..\jstests\replsets\replset3.js" />
+ <None Include="..\jstests\replsets\replset4.js" />
+ <None Include="..\jstests\replsets\replset5.js" />
+ <None Include="..\jstests\replsets\replsetadd.js" />
+ <None Include="..\jstests\replsets\replsetarb1.js" />
+ <None Include="..\jstests\replsets\replsetarb2.js" />
+ <None Include="..\jstests\replsets\replsetprio1.js" />
+ <None Include="..\jstests\replsets\replsetrestart1.js" />
+ <None Include="..\jstests\replsets\replsetrestart2.js" />
+ <None Include="..\jstests\replsets\replset_remove_node.js" />
+ <None Include="..\jstests\replsets\rollback.js" />
+ <None Include="..\jstests\replsets\rollback2.js" />
+ <None Include="..\jstests\replsets\sync1.js" />
+ <None Include="..\jstests\replsets\twosets.js" />
+ <None Include="..\SConstruct" />
+ <None Include="..\util\mongoutils\README" />
+ <None Include="mongo.ico" />
+ <None Include="repl\notes.txt" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\bson\bson-inl.h" />
+ <ClInclude Include="..\bson\bson.h" />
+ <ClInclude Include="..\bson\bson_db.h" />
+ <ClInclude Include="..\bson\inline_decls.h" />
+ <ClInclude Include="..\bson\stringdata.h" />
+ <ClInclude Include="..\bson\util\atomic_int.h" />
+ <ClInclude Include="..\bson\util\builder.h" />
+ <ClInclude Include="..\bson\util\misc.h" />
+ <ClInclude Include="..\client\dbclientcursor.h" />
+ <ClInclude Include="..\client\distlock.h" />
+ <ClInclude Include="..\client\gridfs.h" />
+ <ClInclude Include="..\client\parallel.h" />
+ <ClInclude Include="..\s\d_logic.h" />
+ <ClInclude Include="..\targetver.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\config.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\pcre.h" />
+ <ClInclude Include="..\third_party\snappy\config.h" />
+ <ClInclude Include="..\third_party\snappy\snappy.h" />
+ <ClInclude Include="..\util\alignedbuilder.h" />
+ <ClInclude Include="..\util\concurrency\race.h" />
+ <ClInclude Include="..\util\concurrency\rwlock.h" />
+ <ClInclude Include="..\util\concurrency\msg.h" />
+ <ClInclude Include="..\util\concurrency\mutex.h" />
+ <ClInclude Include="..\util\concurrency\mvar.h" />
+ <ClInclude Include="..\util\concurrency\task.h" />
+ <ClInclude Include="..\util\concurrency\thread_pool.h" />
+ <ClInclude Include="..\util\intrusive_counter.h" />
+ <ClInclude Include="..\util\logfile.h" />
+ <ClInclude Include="..\util\mongoutils\checksum.h" />
+ <ClInclude Include="..\util\mongoutils\html.h" />
+ <ClInclude Include="..\util\mongoutils\str.h" />
+ <ClInclude Include="..\util\net\hostandport.h" />
+ <ClInclude Include="..\util\net\listen.h" />
+ <ClInclude Include="..\util\net\message_port.h" />
+ <ClInclude Include="..\util\net\miniwebserver.h" />
+ <ClInclude Include="..\util\paths.h" />
+ <ClInclude Include="..\util\ramlog.h" />
+ <ClInclude Include="..\util\systeminfo.h" />
+ <ClInclude Include="..\util\text.h" />
+ <ClInclude Include="..\util\time_support.h" />
+ <ClInclude Include="databaseholder.h" />
+ <ClInclude Include="durop.h" />
+ <ClInclude Include="dur_commitjob.h" />
+ <ClInclude Include="dur_journal.h" />
+ <ClInclude Include="dur_journalformat.h" />
+ <ClInclude Include="dur_journalimpl.h" />
+ <ClInclude Include="dur_stats.h" />
+ <ClInclude Include="d_globals.h" />
+ <ClInclude Include="geo\core.h" />
+ <ClInclude Include="globals.h" />
+ <ClInclude Include="helpers\dblogger.h" />
+ <ClInclude Include="instance.h" />
+ <ClInclude Include="mongommf.h" />
+ <ClInclude Include="mongomutex.h" />
+ <ClInclude Include="namespace-inl.h" />
+ <ClInclude Include="namespacestring.h" />
+ <ClInclude Include="oplogreader.h" />
+ <ClInclude Include="ops\count.h" />
+ <ClInclude Include="ops\delete.h" />
+ <ClInclude Include="ops\update.h" />
+ <ClInclude Include="pagefault.h" />
+ <ClInclude Include="pipeline\accumulator.h" />
+ <ClInclude Include="pipeline\builder.h" />
+ <ClInclude Include="pipeline\document.h" />
+ <ClInclude Include="pipeline\document_source.h" />
+ <ClInclude Include="pipeline\doc_mem_monitor.h" />
+ <ClInclude Include="pipeline\expression.h" />
+ <ClInclude Include="pipeline\expression_context.h" />
+ <ClInclude Include="pipeline\field_path.h" />
+ <ClInclude Include="pipeline\value.h" />
+ <ClInclude Include="projection.h" />
+ <ClInclude Include="queryutil.h" />
+ <ClInclude Include="repl.h" />
+ <ClInclude Include="replpair.h" />
+ <ClInclude Include="repl\connections.h" />
+ <ClInclude Include="repl\multicmd.h" />
+ <ClInclude Include="repl\rsmember.h" />
+ <ClInclude Include="repl\rs_optime.h" />
+ <ClInclude Include="stats\counters.h" />
+ <ClInclude Include="stats\snapshots.h" />
+ <ClInclude Include="stats\top.h" />
+ <ClInclude Include="..\client\connpool.h" />
+ <ClInclude Include="..\client\dbclient.h" />
+ <ClInclude Include="..\client\model.h" />
+ <ClInclude Include="..\client\redef_macros.h" />
+ <ClInclude Include="..\client\syncclusterconnection.h" />
+ <ClInclude Include="..\client\undef_macros.h" />
+ <ClInclude Include="background.h" />
+ <ClInclude Include="client.h" />
+ <ClInclude Include="clientcursor.h" />
+ <ClInclude Include="cmdline.h" />
+ <ClInclude Include="commands.h" />
+ <ClInclude Include="concurrency.h" />
+ <ClInclude Include="curop.h" />
+ <ClInclude Include="cursor.h" />
+ <ClInclude Include="database.h" />
+ <ClInclude Include="db.h" />
+ <ClInclude Include="dbhelpers.h" />
+ <ClInclude Include="dbinfo.h" />
+ <ClInclude Include="dbmessage.h" />
+ <ClInclude Include="diskloc.h" />
+ <ClInclude Include="index.h" />
+ <ClInclude Include="indexkey.h" />
+ <ClInclude Include="introspect.h" />
+ <ClInclude Include="json.h" />
+ <ClInclude Include="matcher.h" />
+ <ClInclude Include="namespace.h" />
+ <ClInclude Include="..\pch.h" />
+ <ClInclude Include="pdfile.h" />
+ <ClInclude Include="..\grid\protocol.h" />
+ <ClInclude Include="query.h" />
+ <ClInclude Include="queryoptimizer.h" />
+ <ClInclude Include="resource.h" />
+ <ClInclude Include="scanandorder.h" />
+ <ClInclude Include="security.h" />
+ <ClInclude Include="..\util\allocator.h" />
+ <ClInclude Include="..\util\array.h" />
+ <ClInclude Include="..\util\assert_util.h" />
+ <ClInclude Include="..\util\background.h" />
+ <ClInclude Include="..\util\base64.h" />
+ <ClInclude Include="..\util\builder.h" />
+ <ClInclude Include="..\util\debug_util.h" />
+ <ClInclude Include="..\util\embedded_builder.h" />
+ <ClInclude Include="..\util\file.h" />
+ <ClInclude Include="..\util\file_allocator.h" />
+ <ClInclude Include="..\util\goodies.h" />
+ <ClInclude Include="..\util\hashtab.h" />
+ <ClInclude Include="..\util\hex.h" />
+ <ClInclude Include="lasterror.h" />
+ <ClInclude Include="..\util\log.h" />
+ <ClInclude Include="..\util\lruishmap.h" />
+ <ClInclude Include="..\util\mmap.h" />
+ <ClInclude Include="..\util\ntservice.h" />
+ <ClInclude Include="..\util\optime.h" />
+ <ClInclude Include="..\util\processinfo.h" />
+ <ClInclude Include="..\util\queue.h" />
+ <ClInclude Include="..\util\ramstore.h" />
+ <ClInclude Include="..\util\unittest.h" />
+ <ClInclude Include="..\util\concurrency\list.h" />
+ <ClInclude Include="..\util\concurrency\value.h" />
+ <ClInclude Include="..\util\web\html.h" />
+ <ClInclude Include="..\util\net\httpclient.h" />
+ <ClInclude Include="..\util\md5.h" />
+ <ClInclude Include="..\util\md5.hpp" />
+ <ClInclude Include="..\util\net\message.h" />
+ <ClInclude Include="..\util\net\message_server.h" />
+ <ClInclude Include="..\util\net\sock.h" />
+ <ClInclude Include="..\scripting\engine.h" />
+ <ClInclude Include="..\scripting\engine_spidermonkey.h" />
+ <ClInclude Include="..\scripting\engine_v8.h" />
+ <ClInclude Include="..\scripting\v8_db.h" />
+ <ClInclude Include="..\scripting\v8_utils.h" />
+ <ClInclude Include="..\scripting\v8_wrapper.h" />
+ <ClInclude Include="btree.h" />
+ <ClInclude Include="repl\health.h" />
+ <ClInclude Include="repl\rs.h" />
+ <ClInclude Include="repl\rs_config.h" />
+ <ClInclude Include="..\bson\bsonelement.h" />
+ <ClInclude Include="..\bson\bsoninlines.h" />
+ <ClInclude Include="..\bson\bsonmisc.h" />
+ <ClInclude Include="..\bson\bsonobj.h" />
+ <ClInclude Include="..\bson\bsonobjbuilder.h" />
+ <ClInclude Include="..\bson\bsonobjiterator.h" />
+ <ClInclude Include="..\bson\bsontypes.h" />
+ <ClInclude Include="jsobj.h" />
+ <ClInclude Include="..\bson\oid.h" />
+ <ClInclude Include="..\bson\ordering.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <Library Include="..\..\js\js32d.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js32r.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js64d.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js64r.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ </ItemGroup>
+ <ItemGroup>
+ <ResourceCompile Include="db.rc" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project> \ No newline at end of file
diff --git a/src/mongo/db/db.vcxproj.filters b/src/mongo/db/db.vcxproj.filters
new file mode 100755
index 00000000000..a39df0dc796
--- /dev/null
+++ b/src/mongo/db/db.vcxproj.filters
@@ -0,0 +1,432 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <ClCompile Include="..\bson\oid.cpp" />
+ <ClCompile Include="..\client\dbclientcursor.cpp" />
+ <ClCompile Include="..\client\dbclient_rs.cpp" />
+ <ClCompile Include="..\client\distlock.cpp" />
+ <ClCompile Include="..\client\model.cpp" />
+ <ClCompile Include="..\scripting\bench.cpp" />
+ <ClCompile Include="..\shell\mongo.cpp" />
+ <ClCompile Include="..\s\chunk.cpp" />
+ <ClCompile Include="..\s\config.cpp" />
+ <ClCompile Include="..\s\d_chunk_manager.cpp" />
+ <ClCompile Include="..\s\d_migrate.cpp" />
+ <ClCompile Include="..\s\d_split.cpp" />
+ <ClCompile Include="..\s\d_state.cpp" />
+ <ClCompile Include="..\s\d_writeback.cpp" />
+ <ClCompile Include="..\s\grid.cpp" />
+ <ClCompile Include="..\s\shard.cpp" />
+ <ClCompile Include="..\s\shardconnection.cpp" />
+ <ClCompile Include="..\s\shardkey.cpp" />
+ <ClCompile Include="..\util\alignedbuilder.cpp" />
+ <ClCompile Include="..\util\concurrency\spin_lock.cpp" />
+ <ClCompile Include="..\util\concurrency\synchronization.cpp" />
+ <ClCompile Include="..\util\concurrency\task.cpp" />
+ <ClCompile Include="..\util\concurrency\thread_pool.cpp" />
+ <ClCompile Include="..\util\concurrency\vars.cpp" />
+ <ClCompile Include="..\util\log.cpp" />
+ <ClCompile Include="..\util\logfile.cpp" />
+ <ClCompile Include="..\util\processinfo.cpp" />
+ <ClCompile Include="..\util\stringutils.cpp" />
+ <ClCompile Include="..\util\text.cpp" />
+ <ClCompile Include="..\util\version.cpp" />
+ <ClCompile Include="cap.cpp" />
+ <ClCompile Include="commands\distinct.cpp" />
+ <ClCompile Include="commands\group.cpp" />
+ <ClCompile Include="commands\isself.cpp" />
+ <ClCompile Include="commands\mr.cpp" />
+ <ClCompile Include="compact.cpp" />
+ <ClCompile Include="dbcommands_generic.cpp" />
+ <ClCompile Include="dur.cpp" />
+ <ClCompile Include="durop.cpp" />
+ <ClCompile Include="dur_commitjob.cpp" />
+ <ClCompile Include="dur_journal.cpp" />
+ <ClCompile Include="dur_preplogbuffer.cpp" />
+ <ClCompile Include="dur_recover.cpp" />
+ <ClCompile Include="dur_writetodatafiles.cpp" />
+ <ClCompile Include="geo\2d.cpp" />
+ <ClCompile Include="geo\haystack.cpp" />
+ <ClCompile Include="mongommf.cpp" />
+ <ClCompile Include="oplog.cpp" />
+ <ClCompile Include="projection.cpp" />
+ <ClCompile Include="repl.cpp" />
+ <ClCompile Include="repl\consensus.cpp" />
+ <ClCompile Include="repl\heartbeat.cpp" />
+ <ClCompile Include="repl\manager.cpp" />
+ <ClCompile Include="repl\rs_initialsync.cpp" />
+ <ClCompile Include="repl\rs_initiate.cpp" />
+ <ClCompile Include="repl\rs_rollback.cpp" />
+ <ClCompile Include="repl\rs_sync.cpp" />
+ <ClCompile Include="repl_block.cpp" />
+ <ClCompile Include="restapi.cpp" />
+ <ClCompile Include="..\client\connpool.cpp" />
+ <ClCompile Include="..\client\dbclient.cpp" />
+ <ClCompile Include="..\client\syncclusterconnection.cpp" />
+ <ClCompile Include="..\pch.cpp" />
+ <ClCompile Include="client.cpp" />
+ <ClCompile Include="clientcursor.cpp" />
+ <ClCompile Include="cloner.cpp" />
+ <ClCompile Include="commands.cpp" />
+ <ClCompile Include="common.cpp" />
+ <ClCompile Include="cursor.cpp" />
+ <ClCompile Include="database.cpp" />
+ <ClCompile Include="db.cpp" />
+ <ClCompile Include="dbcommands.cpp" />
+ <ClCompile Include="dbcommands_admin.cpp" />
+ <ClCompile Include="dbeval.cpp" />
+ <ClCompile Include="dbhelpers.cpp" />
+ <ClCompile Include="dbwebserver.cpp" />
+ <ClCompile Include="extsort.cpp" />
+ <ClCompile Include="index.cpp" />
+ <ClCompile Include="indexkey.cpp" />
+ <ClCompile Include="instance.cpp" />
+ <ClCompile Include="introspect.cpp" />
+ <ClCompile Include="jsobj.cpp" />
+ <ClCompile Include="json.cpp" />
+ <ClCompile Include="lasterror.cpp" />
+ <ClCompile Include="matcher.cpp" />
+ <ClCompile Include="matcher_covered.cpp" />
+ <ClCompile Include="..\util\mmap_win.cpp" />
+ <ClCompile Include="modules\mms.cpp" />
+ <ClCompile Include="module.cpp" />
+ <ClCompile Include="namespace.cpp" />
+ <ClCompile Include="nonce.cpp" />
+ <ClCompile Include="..\client\parallel.cpp" />
+ <ClCompile Include="pdfile.cpp" />
+ <ClCompile Include="queryoptimizer.cpp" />
+ <ClCompile Include="security.cpp" />
+ <ClCompile Include="security_commands.cpp" />
+ <ClCompile Include="tests.cpp" />
+ <ClCompile Include="cmdline.cpp" />
+ <ClCompile Include="queryutil.cpp" />
+ <ClCompile Include="..\util\assert_util.cpp" />
+ <ClCompile Include="..\util\background.cpp" />
+ <ClCompile Include="..\util\base64.cpp" />
+ <ClCompile Include="..\util\mmap.cpp" />
+ <ClCompile Include="..\util\ntservice.cpp" />
+ <ClCompile Include="..\util\processinfo_win32.cpp" />
+ <ClCompile Include="..\util\util.cpp" />
+ <ClCompile Include="..\util\md5.c" />
+ <ClCompile Include="..\util\md5main.cpp" />
+ <ClCompile Include="..\s\d_logic.cpp" />
+ <ClCompile Include="..\scripting\engine.cpp" />
+ <ClCompile Include="..\scripting\engine_spidermonkey.cpp" />
+ <ClCompile Include="..\scripting\utils.cpp" />
+ <ClCompile Include="stats\counters.cpp" />
+ <ClCompile Include="stats\snapshots.cpp" />
+ <ClCompile Include="stats\top.cpp" />
+ <ClCompile Include="btree.cpp" />
+ <ClCompile Include="btreecursor.cpp" />
+ <ClCompile Include="repl\health.cpp" />
+ <ClCompile Include="repl\rs.cpp" />
+ <ClCompile Include="repl\replset_commands.cpp" />
+ <ClCompile Include="repl\rs_config.cpp" />
+ <ClCompile Include="..\util\file_allocator.cpp" />
+ <ClCompile Include="querypattern.cpp" />
+ <ClCompile Include="..\util\ramlog.cpp" />
+ <ClCompile Include="key.cpp" />
+ <ClCompile Include="btreebuilder.cpp" />
+ <ClCompile Include="queryoptimizercursor.cpp" />
+ <ClCompile Include="record.cpp" />
+ <ClCompile Include="ops\delete.cpp" />
+ <ClCompile Include="ops\update.cpp" />
+ <ClCompile Include="security_common.cpp" />
+ <ClCompile Include="ops\query.cpp" />
+ <ClCompile Include="..\util\net\httpclient.cpp" />
+ <ClCompile Include="..\util\net\message.cpp" />
+ <ClCompile Include="..\util\net\message_server_port.cpp" />
+ <ClCompile Include="..\util\net\sock.cpp" />
+ <ClCompile Include="..\util\net\miniwebserver.cpp" />
+ <ClCompile Include="..\util\net\listen.cpp" />
+ <ClCompile Include="..\util\net\message_port.cpp" />
+ <ClCompile Include="dbmessage.cpp" />
+ <ClCompile Include="commands\find_and_modify.cpp" />
+ <ClCompile Include="..\util\compress.cpp">
+ <Filter>snappy</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\snappy\snappy-sinksource.cc">
+ <Filter>snappy</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\snappy\snappy.cc">
+ <Filter>snappy</Filter>
+ </ClCompile>
+ <ClCompile Include="scanandorder.cpp" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcrecpp.cc" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_chartables.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_compile.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_config.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_dfa_exec.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_exec.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_fullinfo.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_get.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_globals.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_info.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_maketables.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_newline.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ord2utf8.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_refcount.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_scanner.cc" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_stringpiece.cc" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_study.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_tables.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_try_flipped.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ucp_searchfuncs.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_valid_utf8.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_version.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_xclass.c" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcreposix.c" />
+ <ClCompile Include="commands\cloud.cpp" />
+ <ClCompile Include="commands\pipeline_command.cpp" />
+ <ClCompile Include="commands\pipeline.cpp" />
+ <ClCompile Include="pipeline\accumulator.cpp" />
+ <ClCompile Include="pipeline\accumulator_add_to_set.cpp" />
+ <ClCompile Include="pipeline\accumulator_avg.cpp" />
+ <ClCompile Include="pipeline\accumulator_first.cpp" />
+ <ClCompile Include="pipeline\accumulator_last.cpp" />
+ <ClCompile Include="pipeline\accumulator_min_max.cpp" />
+ <ClCompile Include="pipeline\accumulator_push.cpp" />
+ <ClCompile Include="pipeline\accumulator_single_value.cpp" />
+ <ClCompile Include="pipeline\accumulator_sum.cpp" />
+ <ClCompile Include="pipeline\builder.cpp" />
+ <ClCompile Include="pipeline\doc_mem_monitor.cpp" />
+ <ClCompile Include="pipeline\document.cpp" />
+ <ClCompile Include="pipeline\document_source.cpp" />
+ <ClCompile Include="pipeline\document_source_bson_array.cpp" />
+ <ClCompile Include="pipeline\document_source_command_futures.cpp" />
+ <ClCompile Include="pipeline\document_source_filter.cpp" />
+ <ClCompile Include="pipeline\document_source_filter_base.cpp" />
+ <ClCompile Include="pipeline\document_source_group.cpp" />
+ <ClCompile Include="pipeline\document_source_limit.cpp" />
+ <ClCompile Include="pipeline\document_source_match.cpp" />
+ <ClCompile Include="pipeline\document_source_out.cpp" />
+ <ClCompile Include="pipeline\document_source_project.cpp" />
+ <ClCompile Include="pipeline\document_source_skip.cpp" />
+ <ClCompile Include="pipeline\document_source_sort.cpp" />
+ <ClCompile Include="pipeline\document_source_unwind.cpp" />
+ <ClCompile Include="pipeline\expression.cpp" />
+ <ClCompile Include="pipeline\expression_context.cpp" />
+ <ClCompile Include="pipeline\field_path.cpp" />
+ <ClCompile Include="pipeline\value.cpp" />
+ <ClCompile Include="..\util\intrusive_counter.cpp" />
+ <ClCompile Include="..\util\systeminfo_win32.cpp" />
+ <ClCompile Include="commands\document_source_cursor.cpp" />
+ <ClCompile Include="d_concurrency.cpp" />
+ <ClCompile Include="..\s\default_version.cpp" />
+ <ClCompile Include="ops\count.cpp" />
+ <ClCompile Include="pagefault.cpp" />
+ <ClCompile Include="d_globals.cpp" />
+ <ClCompile Include="curop.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\client\dbclientcursor.h" />
+ <ClInclude Include="..\client\distlock.h" />
+ <ClInclude Include="..\client\gridfs.h" />
+ <ClInclude Include="..\client\parallel.h" />
+ <ClInclude Include="..\s\d_logic.h" />
+ <ClInclude Include="..\targetver.h" />
+ <ClInclude Include="..\util\concurrency\rwlock.h" />
+ <ClInclude Include="..\util\concurrency\msg.h" />
+ <ClInclude Include="..\util\concurrency\mutex.h" />
+ <ClInclude Include="..\util\concurrency\mvar.h" />
+ <ClInclude Include="..\util\concurrency\task.h" />
+ <ClInclude Include="..\util\concurrency\thread_pool.h" />
+ <ClInclude Include="..\util\logfile.h" />
+ <ClInclude Include="..\util\mongoutils\checksum.h" />
+ <ClInclude Include="..\util\mongoutils\html.h" />
+ <ClInclude Include="..\util\mongoutils\str.h" />
+ <ClInclude Include="..\util\paths.h" />
+ <ClInclude Include="..\util\ramlog.h" />
+ <ClInclude Include="..\util\text.h" />
+ <ClInclude Include="..\util\time_support.h" />
+ <ClInclude Include="durop.h" />
+ <ClInclude Include="dur_commitjob.h" />
+ <ClInclude Include="dur_journal.h" />
+ <ClInclude Include="dur_journalformat.h" />
+ <ClInclude Include="dur_stats.h" />
+ <ClInclude Include="geo\core.h" />
+ <ClInclude Include="helpers\dblogger.h" />
+ <ClInclude Include="instance.h" />
+ <ClInclude Include="mongommf.h" />
+ <ClInclude Include="mongomutex.h" />
+ <ClInclude Include="namespace-inl.h" />
+ <ClInclude Include="oplogreader.h" />
+ <ClInclude Include="projection.h" />
+ <ClInclude Include="repl.h" />
+ <ClInclude Include="replpair.h" />
+ <ClInclude Include="repl\connections.h" />
+ <ClInclude Include="repl\multicmd.h" />
+ <ClInclude Include="repl\rsmember.h" />
+ <ClInclude Include="repl\rs_optime.h" />
+ <ClInclude Include="stats\counters.h" />
+ <ClInclude Include="stats\snapshots.h" />
+ <ClInclude Include="stats\top.h" />
+ <ClInclude Include="..\client\connpool.h" />
+ <ClInclude Include="..\client\dbclient.h" />
+ <ClInclude Include="..\client\model.h" />
+ <ClInclude Include="..\client\redef_macros.h" />
+ <ClInclude Include="..\client\syncclusterconnection.h" />
+ <ClInclude Include="..\client\undef_macros.h" />
+ <ClInclude Include="background.h" />
+ <ClInclude Include="client.h" />
+ <ClInclude Include="clientcursor.h" />
+ <ClInclude Include="cmdline.h" />
+ <ClInclude Include="commands.h" />
+ <ClInclude Include="concurrency.h" />
+ <ClInclude Include="curop.h" />
+ <ClInclude Include="cursor.h" />
+ <ClInclude Include="database.h" />
+ <ClInclude Include="db.h" />
+ <ClInclude Include="dbhelpers.h" />
+ <ClInclude Include="dbinfo.h" />
+ <ClInclude Include="dbmessage.h" />
+ <ClInclude Include="diskloc.h" />
+ <ClInclude Include="index.h" />
+ <ClInclude Include="indexkey.h" />
+ <ClInclude Include="introspect.h" />
+ <ClInclude Include="json.h" />
+ <ClInclude Include="matcher.h" />
+ <ClInclude Include="namespace.h" />
+ <ClInclude Include="..\pch.h" />
+ <ClInclude Include="pdfile.h" />
+ <ClInclude Include="..\grid\protocol.h" />
+ <ClInclude Include="query.h" />
+ <ClInclude Include="queryoptimizer.h" />
+ <ClInclude Include="resource.h" />
+ <ClInclude Include="scanandorder.h" />
+ <ClInclude Include="security.h" />
+ <ClInclude Include="..\util\allocator.h" />
+ <ClInclude Include="..\util\array.h" />
+ <ClInclude Include="..\util\assert_util.h" />
+ <ClInclude Include="..\util\background.h" />
+ <ClInclude Include="..\util\base64.h" />
+ <ClInclude Include="..\util\builder.h" />
+ <ClInclude Include="..\util\debug_util.h" />
+ <ClInclude Include="..\util\embedded_builder.h" />
+ <ClInclude Include="..\util\file.h" />
+ <ClInclude Include="..\util\file_allocator.h" />
+ <ClInclude Include="..\util\goodies.h" />
+ <ClInclude Include="..\util\hashtab.h" />
+ <ClInclude Include="..\util\hex.h" />
+ <ClInclude Include="lasterror.h" />
+ <ClInclude Include="..\util\log.h" />
+ <ClInclude Include="..\util\lruishmap.h" />
+ <ClInclude Include="..\util\mmap.h" />
+ <ClInclude Include="..\util\ntservice.h" />
+ <ClInclude Include="..\util\optime.h" />
+ <ClInclude Include="..\util\processinfo.h" />
+ <ClInclude Include="..\util\queue.h" />
+ <ClInclude Include="..\util\ramstore.h" />
+ <ClInclude Include="..\util\unittest.h" />
+ <ClInclude Include="..\util\concurrency\list.h" />
+ <ClInclude Include="..\util\concurrency\value.h" />
+ <ClInclude Include="..\util\web\html.h" />
+ <ClInclude Include="..\util\md5.h" />
+ <ClInclude Include="..\util\md5.hpp" />
+ <ClInclude Include="..\scripting\engine.h" />
+ <ClInclude Include="..\scripting\engine_spidermonkey.h" />
+ <ClInclude Include="..\scripting\engine_v8.h" />
+ <ClInclude Include="..\scripting\v8_db.h" />
+ <ClInclude Include="..\scripting\v8_utils.h" />
+ <ClInclude Include="..\scripting\v8_wrapper.h" />
+ <ClInclude Include="btree.h" />
+ <ClInclude Include="repl\health.h" />
+ <ClInclude Include="repl\rs.h" />
+ <ClInclude Include="repl\rs_config.h" />
+ <ClInclude Include="..\bson\bsonelement.h" />
+ <ClInclude Include="..\bson\bsoninlines.h" />
+ <ClInclude Include="..\bson\bsonmisc.h" />
+ <ClInclude Include="..\bson\bsonobj.h" />
+ <ClInclude Include="..\bson\bsonobjbuilder.h" />
+ <ClInclude Include="..\bson\bsonobjiterator.h" />
+ <ClInclude Include="..\bson\bsontypes.h" />
+ <ClInclude Include="jsobj.h" />
+ <ClInclude Include="..\bson\oid.h" />
+ <ClInclude Include="..\bson\ordering.h" />
+ <ClInclude Include="dur_journalimpl.h" />
+ <ClInclude Include="..\util\concurrency\race.h" />
+ <ClInclude Include="..\util\alignedbuilder.h" />
+ <ClInclude Include="queryutil.h" />
+ <ClInclude Include="..\bson\bson.h" />
+ <ClInclude Include="..\bson\bson_db.h" />
+ <ClInclude Include="..\bson\bson-inl.h" />
+ <ClInclude Include="..\bson\inline_decls.h" />
+ <ClInclude Include="..\bson\stringdata.h" />
+ <ClInclude Include="..\bson\util\atomic_int.h" />
+ <ClInclude Include="..\bson\util\builder.h" />
+ <ClInclude Include="..\bson\util\misc.h" />
+ <ClInclude Include="ops\delete.h" />
+ <ClInclude Include="ops\update.h" />
+ <ClInclude Include="..\util\net\httpclient.h" />
+ <ClInclude Include="..\util\net\message.h" />
+ <ClInclude Include="..\util\net\message_server.h" />
+ <ClInclude Include="..\util\net\sock.h" />
+ <ClInclude Include="..\third_party\snappy\config.h">
+ <Filter>snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy.h">
+ <Filter>snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\pcre-7.4\config.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\pcre.h" />
+ <ClInclude Include="globals.h" />
+ <ClInclude Include="..\util\net\hostandport.h" />
+ <ClInclude Include="..\util\net\listen.h" />
+ <ClInclude Include="..\util\net\message_port.h" />
+ <ClInclude Include="..\util\net\miniwebserver.h" />
+ <ClInclude Include="databaseholder.h" />
+ <ClInclude Include="pipeline\accumulator.h" />
+ <ClInclude Include="pipeline\builder.h" />
+ <ClInclude Include="pipeline\doc_mem_monitor.h" />
+ <ClInclude Include="pipeline\document.h" />
+ <ClInclude Include="pipeline\document_source.h" />
+ <ClInclude Include="pipeline\expression.h" />
+ <ClInclude Include="pipeline\expression_context.h" />
+ <ClInclude Include="pipeline\field_path.h" />
+ <ClInclude Include="pipeline\value.h" />
+ <ClInclude Include="..\util\intrusive_counter.h" />
+ <ClInclude Include="..\util\systeminfo.h" />
+ <ClInclude Include="namespacestring.h" />
+ <ClInclude Include="ops\count.h" />
+ <ClInclude Include="pagefault.h" />
+ <ClInclude Include="d_globals.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <ResourceCompile Include="db.rc" />
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="..\jstests\dur\basic1.sh" />
+ <None Include="..\jstests\dur\dur1.js" />
+ <None Include="..\jstests\replsets\replset1.js" />
+ <None Include="..\jstests\replsets\replset2.js" />
+ <None Include="..\jstests\replsets\replset3.js" />
+ <None Include="..\jstests\replsets\replset4.js" />
+ <None Include="..\jstests\replsets\replset5.js" />
+ <None Include="..\jstests\replsets\replsetadd.js" />
+ <None Include="..\jstests\replsets\replsetarb1.js" />
+ <None Include="..\jstests\replsets\replsetarb2.js" />
+ <None Include="..\jstests\replsets\replsetprio1.js" />
+ <None Include="..\jstests\replsets\replsetrestart1.js" />
+ <None Include="..\jstests\replsets\replsetrestart2.js" />
+ <None Include="..\jstests\replsets\replset_remove_node.js" />
+ <None Include="..\jstests\replsets\rollback.js" />
+ <None Include="..\jstests\replsets\rollback2.js" />
+ <None Include="..\jstests\replsets\sync1.js" />
+ <None Include="..\jstests\replsets\twosets.js" />
+ <None Include="..\SConstruct" />
+ <None Include="..\util\mongoutils\README" />
+ <None Include="mongo.ico" />
+ <None Include="repl\notes.txt" />
+ </ItemGroup>
+ <ItemGroup>
+ <Library Include="..\..\js\js32d.lib" />
+ <Library Include="..\..\js\js32r.lib" />
+ <Library Include="..\..\js\js64d.lib" />
+ <Library Include="..\..\js\js64r.lib" />
+ </ItemGroup>
+ <ItemGroup>
+ <Filter Include="snappy">
+ <UniqueIdentifier>{bb99c086-7926-4f50-838d-f5f0c18397c0}</UniqueIdentifier>
+ </Filter>
+ </ItemGroup>
+</Project> \ No newline at end of file
diff --git a/src/mongo/db/db_10.sln b/src/mongo/db/db_10.sln
new file mode 100755
index 00000000000..c1d83f3901a
--- /dev/null
+++ b/src/mongo/db/db_10.sln
@@ -0,0 +1,168 @@
+
+Microsoft Visual Studio Solution File, Format Version 11.00
+# Visual Studio 2010
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "examples", "examples", "{4082881B-EB00-486F-906C-843B8EC06E18}"
+ ProjectSection(SolutionItems) = preProject
+ driverHelpers.cpp = driverHelpers.cpp
+ EndProjectSection
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tools", "tools", "{2B262D59-9DC7-4BF1-A431-1BD4966899A5}"
+ ProjectSection(SolutionItems) = preProject
+ ..\shell\msvc\createCPPfromJavaScriptFiles.js = ..\shell\msvc\createCPPfromJavaScriptFiles.js
+ EndProjectSection
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "unix files", "unix files", "{2F760952-C71B-4865-998F-AABAE96D1373}"
+ ProjectSection(SolutionItems) = preProject
+ ..\util\processinfo_darwin.cpp = ..\util\processinfo_darwin.cpp
+ ..\util\processinfo_linux2.cpp = ..\util\processinfo_linux2.cpp
+ ..\util\processinfo_none.cpp = ..\util\processinfo_none.cpp
+ EndProjectSection
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "other", "other", "{12B11474-2D74-48C3-BB3D-F03249BEA88F}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mongod", "db.vcxproj", "{215B2D68-0A70-4D10-8E75-B31010C62A91}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mongos", "..\s\dbgrid.vcxproj", "{E03717ED-69B4-4D21-BC55-DF6690B585C6}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test", "..\dbtests\test.vcxproj", "{215B2D68-0A70-4D10-8E75-B33010C62A91}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bsondemo", "..\bson\bsondemo\bsondemo.vcxproj", "{C9DB5EB7-81AA-4185-BAA1-DA035654402F}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mongoutils test program", "..\util\mongoutils\mongoutils.vcxproj", "{7B84584E-92BC-4DB9-971B-A1A8F93E5053}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "simple_client_demo", "..\client\examples\simple_client_demo.vcxproj", "{89C30BC3-2874-4F2C-B4DA-EB04E9782236}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mongo", "..\shell\msvc\mongo.vcxproj", "{FE959BD8-8EE2-4555-AE59-9FA14FFD410E}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mongoperf", "..\client\examples\mongoperf.vcxproj", "{79D4E297-BFB7-4FF2-9B13-08A146582E46}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Debug|Mixed Platforms = Debug|Mixed Platforms
+ Debug|Win32 = Debug|Win32
+ Debug|x64 = Debug|x64
+ Release|Any CPU = Release|Any CPU
+ Release|Mixed Platforms = Release|Mixed Platforms
+ Release|Win32 = Release|Win32
+ Release|x64 = Release|x64
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {215B2D68-0A70-4D10-8E75-B31010C62A91}.Debug|Any CPU.ActiveCfg = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B31010C62A91}.Debug|Mixed Platforms.ActiveCfg = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B31010C62A91}.Debug|Mixed Platforms.Build.0 = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B31010C62A91}.Debug|Win32.ActiveCfg = Debug|Win32
+ {215B2D68-0A70-4D10-8E75-B31010C62A91}.Debug|Win32.Build.0 = Debug|Win32
+ {215B2D68-0A70-4D10-8E75-B31010C62A91}.Debug|x64.ActiveCfg = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B31010C62A91}.Debug|x64.Build.0 = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B31010C62A91}.Release|Any CPU.ActiveCfg = Release|x64
+ {215B2D68-0A70-4D10-8E75-B31010C62A91}.Release|Mixed Platforms.ActiveCfg = Release|x64
+ {215B2D68-0A70-4D10-8E75-B31010C62A91}.Release|Mixed Platforms.Build.0 = Release|x64
+ {215B2D68-0A70-4D10-8E75-B31010C62A91}.Release|Win32.ActiveCfg = Release|Win32
+ {215B2D68-0A70-4D10-8E75-B31010C62A91}.Release|Win32.Build.0 = Release|Win32
+ {215B2D68-0A70-4D10-8E75-B31010C62A91}.Release|x64.ActiveCfg = Release|x64
+ {215B2D68-0A70-4D10-8E75-B31010C62A91}.Release|x64.Build.0 = Release|x64
+ {E03717ED-69B4-4D21-BC55-DF6690B585C6}.Debug|Any CPU.ActiveCfg = Debug|x64
+ {E03717ED-69B4-4D21-BC55-DF6690B585C6}.Debug|Mixed Platforms.ActiveCfg = Debug|x64
+ {E03717ED-69B4-4D21-BC55-DF6690B585C6}.Debug|Mixed Platforms.Build.0 = Debug|x64
+ {E03717ED-69B4-4D21-BC55-DF6690B585C6}.Debug|Win32.ActiveCfg = Debug|Win32
+ {E03717ED-69B4-4D21-BC55-DF6690B585C6}.Debug|Win32.Build.0 = Debug|Win32
+ {E03717ED-69B4-4D21-BC55-DF6690B585C6}.Debug|x64.ActiveCfg = Debug|x64
+ {E03717ED-69B4-4D21-BC55-DF6690B585C6}.Debug|x64.Build.0 = Debug|x64
+ {E03717ED-69B4-4D21-BC55-DF6690B585C6}.Release|Any CPU.ActiveCfg = Release|x64
+ {E03717ED-69B4-4D21-BC55-DF6690B585C6}.Release|Mixed Platforms.ActiveCfg = Release|x64
+ {E03717ED-69B4-4D21-BC55-DF6690B585C6}.Release|Mixed Platforms.Build.0 = Release|x64
+ {E03717ED-69B4-4D21-BC55-DF6690B585C6}.Release|Win32.ActiveCfg = Release|Win32
+ {E03717ED-69B4-4D21-BC55-DF6690B585C6}.Release|Win32.Build.0 = Release|Win32
+ {E03717ED-69B4-4D21-BC55-DF6690B585C6}.Release|x64.ActiveCfg = Release|x64
+ {E03717ED-69B4-4D21-BC55-DF6690B585C6}.Release|x64.Build.0 = Release|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|Any CPU.ActiveCfg = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|Mixed Platforms.ActiveCfg = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|Mixed Platforms.Build.0 = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|Win32.ActiveCfg = Debug|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|Win32.Build.0 = Debug|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|x64.ActiveCfg = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|x64.Build.0 = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|Any CPU.ActiveCfg = Release|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|Mixed Platforms.ActiveCfg = Release|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|Mixed Platforms.Build.0 = Release|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|Win32.ActiveCfg = Release|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|Win32.Build.0 = Release|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|x64.ActiveCfg = Release|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|x64.Build.0 = Release|x64
+ {C9DB5EB7-81AA-4185-BAA1-DA035654402F}.Debug|Any CPU.ActiveCfg = Debug|x64
+ {C9DB5EB7-81AA-4185-BAA1-DA035654402F}.Debug|Mixed Platforms.ActiveCfg = Debug|x64
+ {C9DB5EB7-81AA-4185-BAA1-DA035654402F}.Debug|Mixed Platforms.Build.0 = Debug|x64
+ {C9DB5EB7-81AA-4185-BAA1-DA035654402F}.Debug|Win32.ActiveCfg = Debug|Win32
+ {C9DB5EB7-81AA-4185-BAA1-DA035654402F}.Debug|Win32.Build.0 = Debug|Win32
+ {C9DB5EB7-81AA-4185-BAA1-DA035654402F}.Debug|x64.ActiveCfg = Debug|x64
+ {C9DB5EB7-81AA-4185-BAA1-DA035654402F}.Debug|x64.Build.0 = Debug|x64
+ {C9DB5EB7-81AA-4185-BAA1-DA035654402F}.Release|Any CPU.ActiveCfg = Release|x64
+ {C9DB5EB7-81AA-4185-BAA1-DA035654402F}.Release|Mixed Platforms.ActiveCfg = Release|x64
+ {C9DB5EB7-81AA-4185-BAA1-DA035654402F}.Release|Mixed Platforms.Build.0 = Release|x64
+ {C9DB5EB7-81AA-4185-BAA1-DA035654402F}.Release|Win32.ActiveCfg = Release|Win32
+ {C9DB5EB7-81AA-4185-BAA1-DA035654402F}.Release|Win32.Build.0 = Release|Win32
+ {C9DB5EB7-81AA-4185-BAA1-DA035654402F}.Release|x64.ActiveCfg = Release|x64
+ {C9DB5EB7-81AA-4185-BAA1-DA035654402F}.Release|x64.Build.0 = Release|x64
+ {7B84584E-92BC-4DB9-971B-A1A8F93E5053}.Debug|Any CPU.ActiveCfg = Debug|Win32
+ {7B84584E-92BC-4DB9-971B-A1A8F93E5053}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
+ {7B84584E-92BC-4DB9-971B-A1A8F93E5053}.Debug|Mixed Platforms.Build.0 = Debug|Win32
+ {7B84584E-92BC-4DB9-971B-A1A8F93E5053}.Debug|Win32.ActiveCfg = Debug|Win32
+ {7B84584E-92BC-4DB9-971B-A1A8F93E5053}.Debug|Win32.Build.0 = Debug|Win32
+ {7B84584E-92BC-4DB9-971B-A1A8F93E5053}.Debug|x64.ActiveCfg = Debug|Win32
+ {7B84584E-92BC-4DB9-971B-A1A8F93E5053}.Release|Any CPU.ActiveCfg = Release|Win32
+ {7B84584E-92BC-4DB9-971B-A1A8F93E5053}.Release|Mixed Platforms.ActiveCfg = Release|Win32
+ {7B84584E-92BC-4DB9-971B-A1A8F93E5053}.Release|Mixed Platforms.Build.0 = Release|Win32
+ {7B84584E-92BC-4DB9-971B-A1A8F93E5053}.Release|Win32.ActiveCfg = Release|Win32
+ {7B84584E-92BC-4DB9-971B-A1A8F93E5053}.Release|Win32.Build.0 = Release|Win32
+ {7B84584E-92BC-4DB9-971B-A1A8F93E5053}.Release|x64.ActiveCfg = Release|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Debug|Any CPU.ActiveCfg = Debug|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Debug|Mixed Platforms.Build.0 = Debug|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Debug|Win32.ActiveCfg = Debug|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Debug|Win32.Build.0 = Debug|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Debug|x64.ActiveCfg = Debug|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Release|Any CPU.ActiveCfg = Release|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Release|Mixed Platforms.ActiveCfg = Release|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Release|Mixed Platforms.Build.0 = Release|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Release|Win32.ActiveCfg = Release|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Release|Win32.Build.0 = Release|Win32
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236}.Release|x64.ActiveCfg = Release|Win32
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Debug|Any CPU.ActiveCfg = Debug|Win32
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Debug|Mixed Platforms.Build.0 = Debug|Win32
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Debug|Win32.ActiveCfg = Debug|Win32
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Debug|Win32.Build.0 = Debug|Win32
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Debug|x64.ActiveCfg = Debug|Win32
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Release|Any CPU.ActiveCfg = Release|Win32
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Release|Mixed Platforms.ActiveCfg = Release|Win32
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Release|Mixed Platforms.Build.0 = Release|Win32
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Release|Win32.ActiveCfg = Release|Win32
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Release|Win32.Build.0 = Release|Win32
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Release|x64.ActiveCfg = Release|Win32
+ {79D4E297-BFB7-4FF2-9B13-08A146582E46}.Debug|Any CPU.ActiveCfg = Debug|Win32
+ {79D4E297-BFB7-4FF2-9B13-08A146582E46}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
+ {79D4E297-BFB7-4FF2-9B13-08A146582E46}.Debug|Mixed Platforms.Build.0 = Debug|Win32
+ {79D4E297-BFB7-4FF2-9B13-08A146582E46}.Debug|Win32.ActiveCfg = Debug|Win32
+ {79D4E297-BFB7-4FF2-9B13-08A146582E46}.Debug|Win32.Build.0 = Debug|Win32
+ {79D4E297-BFB7-4FF2-9B13-08A146582E46}.Debug|x64.ActiveCfg = Debug|Win32
+ {79D4E297-BFB7-4FF2-9B13-08A146582E46}.Release|Any CPU.ActiveCfg = Release|Win32
+ {79D4E297-BFB7-4FF2-9B13-08A146582E46}.Release|Mixed Platforms.ActiveCfg = Release|Win32
+ {79D4E297-BFB7-4FF2-9B13-08A146582E46}.Release|Mixed Platforms.Build.0 = Release|Win32
+ {79D4E297-BFB7-4FF2-9B13-08A146582E46}.Release|Win32.ActiveCfg = Release|Win32
+ {79D4E297-BFB7-4FF2-9B13-08A146582E46}.Release|Win32.Build.0 = Release|Win32
+ {79D4E297-BFB7-4FF2-9B13-08A146582E46}.Release|x64.ActiveCfg = Release|Win32
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(NestedProjects) = preSolution
+ {2B262D59-9DC7-4BF1-A431-1BD4966899A5} = {12B11474-2D74-48C3-BB3D-F03249BEA88F}
+ {2F760952-C71B-4865-998F-AABAE96D1373} = {12B11474-2D74-48C3-BB3D-F03249BEA88F}
+ {4082881B-EB00-486F-906C-843B8EC06E18} = {12B11474-2D74-48C3-BB3D-F03249BEA88F}
+ {C9DB5EB7-81AA-4185-BAA1-DA035654402F} = {12B11474-2D74-48C3-BB3D-F03249BEA88F}
+ {7B84584E-92BC-4DB9-971B-A1A8F93E5053} = {12B11474-2D74-48C3-BB3D-F03249BEA88F}
+ {89C30BC3-2874-4F2C-B4DA-EB04E9782236} = {12B11474-2D74-48C3-BB3D-F03249BEA88F}
+ {79D4E297-BFB7-4FF2-9B13-08A146582E46} = {12B11474-2D74-48C3-BB3D-F03249BEA88F}
+ EndGlobalSection
+EndGlobal
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
new file mode 100644
index 00000000000..570c897fae4
--- /dev/null
+++ b/src/mongo/db/dbcommands.cpp
@@ -0,0 +1,1955 @@
+// dbcommands.cpp
+
+/**
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* SHARDING:
+ I believe this file is for mongod only.
+ See s/commnands_public.cpp for mongos.
+*/
+
+#include "pch.h"
+#include "ops/count.h"
+#include "ops/query.h"
+#include "pdfile.h"
+#include "jsobj.h"
+#include "../bson/util/builder.h"
+#include <time.h>
+#include "introspect.h"
+#include "btree.h"
+#include "../util/lruishmap.h"
+#include "../util/md5.hpp"
+#include "../util/processinfo.h"
+#include "../util/ramlog.h"
+#include "json.h"
+#include "repl.h"
+#include "repl_block.h"
+#include "replutil.h"
+#include "commands.h"
+#include "db.h"
+#include "instance.h"
+#include "lasterror.h"
+#include "security.h"
+#include "queryoptimizer.h"
+#include "../scripting/engine.h"
+#include "stats/counters.h"
+#include "background.h"
+#include "../util/version.h"
+#include "../s/d_writeback.h"
+#include "dur_stats.h"
+
+namespace mongo {
+
+ namespace dur {
+ void setAgeOutJournalFiles(bool rotate);
+ }
+ /** @return true if fields found */
+ bool setParmsMongodSpecific(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ BSONElement e = cmdObj["ageOutJournalFiles"];
+ if( !e.eoo() ) {
+ bool r = e.trueValue();
+ log() << "ageOutJournalFiles " << r << endl;
+ dur::setAgeOutJournalFiles(r);
+ return true;
+ }
+ return false;
+ }
+
+ /* reset any errors so that getlasterror comes back clean.
+
+ useful before performing a long series of operations where we want to
+ see if any of the operations triggered an error, but don't want to check
+ after each op as that woudl be a client/server turnaround.
+ */
+ class CmdResetError : public Command {
+ public:
+ virtual LockType locktype() const { return NONE; }
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void help( stringstream& help ) const {
+ help << "reset error state (used with getpreverror)";
+ }
+ CmdResetError() : Command("resetError", false, "reseterror") {}
+ bool run(const string& db, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ LastError *le = lastError.get();
+ assert( le );
+ le->reset();
+ return true;
+ }
+ } cmdResetError;
+
+ /* set by replica sets if specified in the configuration.
+ a pointer is used to avoid any possible locking issues with lockless reading (see below locktype() is NONE
+ and would like to keep that)
+ (for now, it simply orphans any old copy as config changes should be extremely rare).
+ note: once non-null, never goes to null again.
+ */
+ BSONObj *getLastErrorDefault = 0;
+
+ class CmdGetLastError : public Command {
+ public:
+ CmdGetLastError() : Command("getLastError", false, "getlasterror") { }
+ virtual LockType locktype() const { return NONE; }
+ virtual bool logTheOp() { return false; }
+ virtual bool slaveOk() const { return true; }
+ virtual void help( stringstream& help ) const {
+ help << "return error status of the last operation on this connection\n"
+ << "options:\n"
+ << " { fsync:true } - fsync before returning, or wait for journal commit if running with --journal\n"
+ << " { j:true } - wait for journal commit if running with --journal\n"
+ << " { w:n } - await replication to n servers (including self) before returning\n"
+ << " { wtimeout:m} - timeout for w in m milliseconds";
+ }
+ bool run(const string& dbname, BSONObj& _cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ LastError *le = lastError.disableForCommand();
+
+ bool err = false;
+
+ if ( le->nPrev != 1 )
+ err = LastError::noError.appendSelf( result , false );
+ else
+ err = le->appendSelf( result , false );
+
+ Client& c = cc();
+ c.appendLastOp( result );
+
+ result.appendNumber( "connectionId" , c.getConnectionId() ); // for sharding; also useful in general for debugging
+
+ BSONObj cmdObj = _cmdObj;
+ {
+ BSONObj::iterator i(_cmdObj);
+ i.next();
+ if( !i.more() ) {
+ /* empty, use default */
+ BSONObj *def = getLastErrorDefault;
+ if( def )
+ cmdObj = *def;
+ }
+ }
+
+ if ( cmdObj["j"].trueValue() ) {
+ if( !getDur().awaitCommit() ) {
+ // --journal is off
+ result.append("jnote", "journaling not enabled on this server");
+ }
+ if( cmdObj["fsync"].trueValue() ) {
+ errmsg = "fsync and j options are not used together";
+ return false;
+ }
+ }
+ else if ( cmdObj["fsync"].trueValue() ) {
+ Timer t;
+ if( !getDur().awaitCommit() ) {
+ // if get here, not running with --journal
+ log() << "fsync from getlasterror" << endl;
+ result.append( "fsyncFiles" , MemoryMappedFile::flushAll( true ) );
+ }
+ else {
+ // this perhaps is temp. how long we wait for the group commit to occur.
+ result.append( "waited", t.millis() );
+ }
+ }
+
+ if ( err ) {
+ // doesn't make sense to wait for replication
+ // if there was an error
+ return true;
+ }
+
+ BSONElement e = cmdObj["w"];
+ if ( e.ok() ) {
+ int timeout = cmdObj["wtimeout"].numberInt();
+ Timer t;
+
+ long long passes = 0;
+ char buf[32];
+ while ( 1 ) {
+ OpTime op(c.getLastOp());
+
+ if ( op.isNull() ) {
+ if ( anyReplEnabled() ) {
+ result.append( "wnote" , "no write has been done on this connection" );
+ }
+ else if ( e.isNumber() && e.numberInt() <= 1 ) {
+ // don't do anything
+ // w=1 and no repl, so this is fine
+ }
+ else {
+ // w=2 and no repl
+ result.append( "wnote" , "no replication has been enabled, so w=2+ won't work" );
+ result.append( "err", "norepl" );
+ return true;
+ }
+ break;
+ }
+
+ // check this first for w=0 or w=1
+ if ( opReplicatedEnough( op, e ) ) {
+ break;
+ }
+
+ // if replication isn't enabled (e.g., config servers)
+ if ( ! anyReplEnabled() ) {
+ result.append( "err", "norepl" );
+ return true;
+ }
+
+
+ if ( timeout > 0 && t.millis() >= timeout ) {
+ result.append( "wtimeout" , true );
+ errmsg = "timed out waiting for slaves";
+ result.append( "waited" , t.millis() );
+ result.append( "err" , "timeout" );
+ return true;
+ }
+
+ assert( sprintf( buf , "w block pass: %lld" , ++passes ) < 30 );
+ c.curop()->setMessage( buf );
+ sleepmillis(1);
+ killCurrentOp.checkForInterrupt();
+ }
+ result.appendNumber( "wtime" , t.millis() );
+ }
+
+ result.appendNull( "err" );
+ return true;
+ }
+ } cmdGetLastError;
+
+ class CmdGetPrevError : public Command {
+ public:
+ virtual LockType locktype() const { return NONE; }
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual void help( stringstream& help ) const {
+ help << "check for errors since last reseterror commandcal";
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ CmdGetPrevError() : Command("getPrevError", false, "getpreverror") {}
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ LastError *le = lastError.disableForCommand();
+ le->appendSelf( result );
+ if ( le->valid )
+ result.append( "nPrev", le->nPrev );
+ else
+ result.append( "nPrev", -1 );
+ return true;
+ }
+ } cmdGetPrevError;
+
+ CmdShutdown cmdShutdown;
+
+ void CmdShutdown::help( stringstream& help ) const {
+ help << "shutdown the database. must be ran against admin db and "
+ << "either (1) ran from localhost or (2) authenticated. If "
+ << "this is a primary in a replica set and there is no member "
+ << "within 10 seconds of its optime, it will not shutdown "
+ << "without force : true. You can also specify timeoutSecs : "
+ << "N to wait N seconds for other members to catch up.";
+ }
+
+ bool CmdShutdown::run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool force = cmdObj.hasField("force") && cmdObj["force"].trueValue();
+
+ if (!force && theReplSet && theReplSet->isPrimary()) {
+ long long timeout, now, start;
+ timeout = now = start = curTimeMicros64()/1000000;
+ if (cmdObj.hasField("timeoutSecs")) {
+ timeout += cmdObj["timeoutSecs"].numberLong();
+ }
+
+ OpTime lastOp = theReplSet->lastOpTimeWritten;
+ OpTime closest = theReplSet->lastOtherOpTime();
+ long long int diff = lastOp.getSecs() - closest.getSecs();
+ while (now <= timeout && (diff < 0 || diff > 10)) {
+ sleepsecs(1);
+ now++;
+
+ lastOp = theReplSet->lastOpTimeWritten;
+ closest = theReplSet->lastOtherOpTime();
+ diff = lastOp.getSecs() - closest.getSecs();
+ }
+
+ if (diff < 0 || diff > 10) {
+ errmsg = "no secondaries within 10 seconds of my optime";
+ result.append("closest", closest.getSecs());
+ result.append("difference", diff);
+ return false;
+ }
+
+ // step down
+ theReplSet->stepDown(120);
+
+ log() << "waiting for secondaries to catch up" << endl;
+
+ lastOp = theReplSet->lastOpTimeWritten;
+ while (lastOp != closest && now - start < 60) {
+ closest = theReplSet->lastOtherOpTime();
+
+ now++;
+ sleepsecs(1);
+ }
+
+ // regardless of whether they caught up, we'll shut down
+ }
+
+ return shutdownHelper();
+ }
+
+ class CmdDropDatabase : public Command {
+ public:
+ virtual bool logTheOp() {
+ return true;
+ }
+ virtual void help( stringstream& help ) const {
+ help << "drop (delete) this database";
+ }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual LockType locktype() const { return WRITE; }
+ CmdDropDatabase() : Command("dropDatabase") {}
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ BSONElement e = cmdObj.firstElement();
+ log() << "dropDatabase " << dbname << endl;
+ int p = (int) e.number();
+ if ( p != 1 )
+ return false;
+ dropDatabase(dbname);
+ result.append( "dropped" , dbname );
+ return true;
+ }
+ } cmdDropDatabase;
+
+ class CmdRepairDatabase : public Command {
+ public:
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool maintenanceMode() const { return true; }
+ virtual void help( stringstream& help ) const {
+ help << "repair database. also compacts. note: slow.";
+ }
+ virtual LockType locktype() const { return WRITE; }
+ CmdRepairDatabase() : Command("repairDatabase") {}
+ bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ BSONElement e = cmdObj.firstElement();
+ log() << "repairDatabase " << dbname << endl;
+ int p = (int) e.number();
+ if ( p != 1 ) {
+ errmsg = "bad option";
+ return false;
+ }
+ e = cmdObj.getField( "preserveClonedFilesOnFailure" );
+ bool preserveClonedFilesOnFailure = e.isBoolean() && e.boolean();
+ e = cmdObj.getField( "backupOriginalFiles" );
+ bool backupOriginalFiles = e.isBoolean() && e.boolean();
+ return repairDatabase( dbname, errmsg, preserveClonedFilesOnFailure, backupOriginalFiles );
+ }
+ } cmdRepairDatabase;
+
+ /* set db profiling level
+ todo: how do we handle profiling information put in the db with replication?
+ sensibly or not?
+ */
+ class CmdProfile : public Command {
+ public:
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void help( stringstream& help ) const {
+ help << "enable or disable performance profiling\n";
+ help << "{ profile : <n> }\n";
+ help << "0=off 1=log slow ops 2=log all\n";
+ help << "-1 to get current values\n";
+ help << "http://www.mongodb.org/display/DOCS/Database+Profiler";
+ }
+ virtual LockType locktype() const { return WRITE; }
+ CmdProfile() : Command("profile") {}
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ BSONElement e = cmdObj.firstElement();
+ result.append("was", cc().database()->profile);
+ result.append("slowms", cmdLine.slowMS );
+
+ int p = (int) e.number();
+ bool ok = false;
+
+ if ( p == -1 )
+ ok = true;
+ else if ( p >= 0 && p <= 2 ) {
+ ok = cc().database()->setProfilingLevel( p , errmsg );
+ }
+
+ BSONElement slow = cmdObj["slowms"];
+ if ( slow.isNumber() )
+ cmdLine.slowMS = slow.numberInt();
+
+ return ok;
+ }
+ } cmdProfile;
+
+ class CmdServerStatus : public Command {
+ public:
+ virtual bool slaveOk() const {
+ return true;
+ }
+ CmdServerStatus() : Command("serverStatus", true) {}
+
+ virtual LockType locktype() const { return NONE; }
+
+ virtual void help( stringstream& help ) const {
+ help << "returns lots of administrative server statistics";
+ }
+
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ long long start = Listener::getElapsedTimeMillis();
+ BSONObjBuilder timeBuilder(128);
+
+
+ bool authed = cc().getAuthenticationInfo()->isAuthorizedReads("admin");
+
+ result.append( "host" , prettyHostName() );
+ result.append("version", versionString);
+ result.append("process","mongod");
+ result.append("uptime",(double) (time(0)-cmdLine.started));
+ result.append("uptimeEstimate",(double) (start/1000));
+ result.appendDate( "localTime" , jsTime() );
+
+ {
+ BSONObjBuilder t;
+
+ unsigned long long last, start, timeLocked;
+ d.dbMutex.info().getTimingInfo(start, timeLocked);
+ last = curTimeMicros64();
+ double tt = (double) last-start;
+ double tl = (double) timeLocked;
+ t.append("totalTime", tt);
+ t.append("lockTime", tl);
+ t.append("ratio", (tt ? tl/tt : 0));
+
+ {
+ BSONObjBuilder ttt( t.subobjStart( "currentQueue" ) );
+ int w=0, r=0;
+ Client::recommendedYieldMicros( &w , &r );
+ ttt.append( "total" , w + r );
+ ttt.append( "readers" , r );
+ ttt.append( "writers" , w );
+ ttt.done();
+ }
+
+ {
+ BSONObjBuilder ttt( t.subobjStart( "activeClients" ) );
+ int w=0, r=0;
+ Client::getActiveClientCount( w , r );
+ ttt.append( "total" , w + r );
+ ttt.append( "readers" , r );
+ ttt.append( "writers" , w );
+ ttt.done();
+ }
+
+
+
+ result.append( "globalLock" , t.obj() );
+ }
+ timeBuilder.appendNumber( "after basic" , Listener::getElapsedTimeMillis() - start );
+
+ {
+
+ BSONObjBuilder t( result.subobjStart( "mem" ) );
+
+ t.append("bits", ( sizeof(int*) == 4 ? 32 : 64 ) );
+
+ ProcessInfo p;
+ int v = 0;
+ if ( p.supported() ) {
+ t.appendNumber( "resident" , p.getResidentSize() );
+ v = p.getVirtualMemorySize();
+ t.appendNumber( "virtual" , v );
+ t.appendBool( "supported" , true );
+ }
+ else {
+ result.append( "note" , "not all mem info support on this platform" );
+ t.appendBool( "supported" , false );
+ }
+
+ timeBuilder.appendNumber( "middle of mem" , Listener::getElapsedTimeMillis() - start );
+
+ int m = (int) (MemoryMappedFile::totalMappedLength() / ( 1024 * 1024 ));
+ t.appendNumber( "mapped" , m );
+
+ if ( cmdLine.dur ) {
+ m *= 2;
+ t.appendNumber( "mappedWithJournal" , m );
+ }
+
+ int overhead = v - m - connTicketHolder.used();
+
+ if( overhead > 4000 ) {
+ t.append("note", "virtual minus mapped is large. could indicate a memory leak");
+ log() << "warning: virtual size (" << v << "MB) - mapped size (" << m << "MB) is large (" << overhead << "MB). could indicate a memory leak" << endl;
+ }
+
+ t.done();
+
+ }
+ timeBuilder.appendNumber( "after mem" , Listener::getElapsedTimeMillis() - start );
+
+ {
+ BSONObjBuilder bb( result.subobjStart( "connections" ) );
+ bb.append( "current" , connTicketHolder.used() );
+ bb.append( "available" , connTicketHolder.available() );
+ bb.done();
+ }
+ timeBuilder.appendNumber( "after connections" , Listener::getElapsedTimeMillis() - start );
+
+ {
+ BSONObjBuilder bb( result.subobjStart( "extra_info" ) );
+ bb.append("note", "fields vary by platform");
+ ProcessInfo p;
+ p.getExtraInfo(bb);
+ bb.done();
+ timeBuilder.appendNumber( "after extra info" , Listener::getElapsedTimeMillis() - start );
+
+ }
+
+ {
+ BSONObjBuilder bb( result.subobjStart( "indexCounters" ) );
+ globalIndexCounters.append( bb );
+ bb.done();
+ }
+
+ {
+ BSONObjBuilder bb( result.subobjStart( "backgroundFlushing" ) );
+ globalFlushCounters.append( bb );
+ bb.done();
+ }
+
+ {
+ BSONObjBuilder bb( result.subobjStart( "cursors" ) );
+ ClientCursor::appendStats( bb );
+ bb.done();
+ }
+
+ {
+ BSONObjBuilder bb( result.subobjStart( "network" ) );
+ networkCounter.append( bb );
+ bb.done();
+ }
+
+
+ timeBuilder.appendNumber( "after counters" , Listener::getElapsedTimeMillis() - start );
+
+ if ( anyReplEnabled() ) {
+ BSONObjBuilder bb( result.subobjStart( "repl" ) );
+ appendReplicationInfo( bb , authed , cmdObj["repl"].numberInt() );
+ bb.done();
+
+ if ( ! _isMaster() ) {
+ result.append( "opcountersRepl" , replOpCounters.getObj() );
+ }
+
+ }
+
+ timeBuilder.appendNumber( "after repl" , Listener::getElapsedTimeMillis() - start );
+
+ result.append( "opcounters" , globalOpCounters.getObj() );
+
+ {
+ BSONObjBuilder asserts( result.subobjStart( "asserts" ) );
+ asserts.append( "regular" , assertionCount.regular );
+ asserts.append( "warning" , assertionCount.warning );
+ asserts.append( "msg" , assertionCount.msg );
+ asserts.append( "user" , assertionCount.user );
+ asserts.append( "rollovers" , assertionCount.rollovers );
+ asserts.done();
+ }
+
+ timeBuilder.appendNumber( "after asserts" , Listener::getElapsedTimeMillis() - start );
+
+ result.append( "writeBacksQueued" , ! writeBackManager.queuesEmpty() );
+
+ if( cmdLine.dur ) {
+ result.append("dur", dur::stats.asObj());
+ }
+
+ timeBuilder.appendNumber( "after dur" , Listener::getElapsedTimeMillis() - start );
+
+ {
+ RamLog* rl = RamLog::get( "warnings" );
+ verify(15880, rl);
+
+ if (rl->lastWrite() >= time(0)-(10*60)){ // only show warnings from last 10 minutes
+ vector<const char*> lines;
+ rl->get( lines );
+
+ BSONArrayBuilder arr( result.subarrayStart( "warnings" ) );
+ for ( unsigned i=std::max(0,(int)lines.size()-10); i<lines.size(); i++ )
+ arr.append( lines[i] );
+ arr.done();
+ }
+ }
+
+ if ( ! authed )
+ result.append( "note" , "run against admin for more info" );
+
+ timeBuilder.appendNumber( "at end" , Listener::getElapsedTimeMillis() - start );
+ if ( Listener::getElapsedTimeMillis() - start > 1000 ) {
+ BSONObj t = timeBuilder.obj();
+ log() << "serverStatus was very slow: " << t << endl;
+ result.append( "timing" , t );
+ }
+
+ return true;
+ }
+ } cmdServerStatus;
+
+ class CmdGetOpTime : public Command {
+ public:
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void help( stringstream& help ) const { help << "internal"; }
+ virtual LockType locktype() const { return NONE; }
+ CmdGetOpTime() : Command("getoptime") { }
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ writelock l( "" );
+ result.appendDate("optime", OpTime::now().asDate());
+ return true;
+ }
+ } cmdgetoptime;
+
+ /*
+ class Cmd : public Command {
+ public:
+ Cmd() : Command("") { }
+ bool adminOnly() const { return true; }
+ bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) {
+ return true;
+ }
+ } cmd;
+ */
+
+ class CmdDiagLogging : public Command {
+ public:
+ virtual bool slaveOk() const {
+ return true;
+ }
+ CmdDiagLogging() : Command("diagLogging") { }
+ bool adminOnly() const {
+ return true;
+ }
+ void help(stringstream& h) const { h << "http://www.mongodb.org/display/DOCS/Monitoring+and+Diagnostics#MonitoringandDiagnostics-DatabaseRecord%2FReplay"; }
+ virtual LockType locktype() const { return WRITE; }
+ bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ int was = _diaglog.setLevel( cmdObj.firstElement().numberInt() );
+ _diaglog.flush();
+ if ( !cmdLine.quiet )
+ tlog() << "CMD: diagLogging set to " << _diaglog.getLevel() << " from: " << was << endl;
+ result.append( "was" , was );
+ return true;
+ }
+ } cmddiaglogging;
+
+ /* remove bit from a bit array - actually remove its slot, not a clear
+ note: this function does not work with x == 63 -- that is ok
+ but keep in mind in the future if max indexes were extended to
+ exactly 64 it would be a problem
+ */
+ unsigned long long removeBit(unsigned long long b, int x) {
+ unsigned long long tmp = b;
+ return
+ (tmp & ((((unsigned long long) 1) << x)-1)) |
+ ((tmp >> (x+1)) << x);
+ }
+
+ struct DBCommandsUnitTest {
+ DBCommandsUnitTest() {
+ assert( removeBit(1, 0) == 0 );
+ assert( removeBit(2, 0) == 1 );
+ assert( removeBit(2, 1) == 0 );
+ assert( removeBit(255, 1) == 127 );
+ assert( removeBit(21, 2) == 9 );
+ assert( removeBit(0x4000000000000001ULL, 62) == 1 );
+ }
+ } dbc_unittest;
+
+ void assureSysIndexesEmptied(const char *ns, IndexDetails *exceptForIdIndex);
+ int removeFromSysIndexes(const char *ns, const char *idxName);
+
+ bool dropIndexes( NamespaceDetails *d, const char *ns, const char *name, string &errmsg, BSONObjBuilder &anObjBuilder, bool mayDeleteIdIndex ) {
+
+ BackgroundOperation::assertNoBgOpInProgForNs(ns);
+
+ d = d->writingWithExtra();
+ d->aboutToDeleteAnIndex();
+
+ /* there may be pointers pointing at keys in the btree(s). kill them. */
+ ClientCursor::invalidate(ns);
+
+ // delete a specific index or all?
+ if ( *name == '*' && name[1] == 0 ) {
+ log(4) << " d->nIndexes was " << d->nIndexes << '\n';
+ anObjBuilder.append("nIndexesWas", (double)d->nIndexes);
+ IndexDetails *idIndex = 0;
+ if( d->nIndexes ) {
+ for ( int i = 0; i < d->nIndexes; i++ ) {
+ if ( !mayDeleteIdIndex && d->idx(i).isIdIndex() ) {
+ idIndex = &d->idx(i);
+ }
+ else {
+ d->idx(i).kill_idx();
+ }
+ }
+ d->nIndexes = 0;
+ }
+ if ( idIndex ) {
+ d->addIndex(ns) = *idIndex;
+ wassert( d->nIndexes == 1 );
+ }
+ /* assuming here that id index is not multikey: */
+ d->multiKeyIndexBits = 0;
+ assureSysIndexesEmptied(ns, idIndex);
+ anObjBuilder.append("msg", mayDeleteIdIndex ?
+ "indexes dropped for collection" :
+ "non-_id indexes dropped for collection");
+ }
+ else {
+ // delete just one index
+ int x = d->findIndexByName(name);
+ if ( x >= 0 ) {
+ log(4) << " d->nIndexes was " << d->nIndexes << endl;
+ anObjBuilder.append("nIndexesWas", (double)d->nIndexes);
+
+ /* note it is important we remove the IndexDetails with this
+ call, otherwise, on recreate, the old one would be reused, and its
+ IndexDetails::info ptr would be bad info.
+ */
+ IndexDetails *id = &d->idx(x);
+ if ( !mayDeleteIdIndex && id->isIdIndex() ) {
+ errmsg = "may not delete _id index";
+ return false;
+ }
+ id->kill_idx();
+ d->multiKeyIndexBits = removeBit(d->multiKeyIndexBits, x);
+ d->nIndexes--;
+ for ( int i = x; i < d->nIndexes; i++ )
+ d->idx(i) = d->idx(i+1);
+ }
+ else {
+ int n = removeFromSysIndexes(ns, name); // just in case an orphaned listing there - i.e. should have been repaired but wasn't
+ if( n ) {
+ log() << "info: removeFromSysIndexes cleaned up " << n << " entries" << endl;
+ }
+ log() << "dropIndexes: " << name << " not found" << endl;
+ errmsg = "index not found";
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /* drop collection */
+ class CmdDrop : public Command {
+ public:
+ CmdDrop() : Command("drop") { }
+ virtual bool logTheOp() {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual void help( stringstream& help ) const { help << "drop a collection\n{drop : <collectionName>}"; }
+ virtual LockType locktype() const { return WRITE; }
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string nsToDrop = dbname + '.' + cmdObj.firstElement().valuestr();
+ NamespaceDetails *d = nsdetails(nsToDrop.c_str());
+ if ( !cmdLine.quiet )
+ tlog() << "CMD: drop " << nsToDrop << endl;
+ if ( d == 0 ) {
+ errmsg = "ns not found";
+ return false;
+ }
+ uassert( 10039 , "can't drop collection with reserved $ character in name", strchr(nsToDrop.c_str(), '$') == 0 );
+ dropCollection( nsToDrop, errmsg, result );
+ return true;
+ }
+ } cmdDrop;
+
+ /* select count(*) */
+ class CmdCount : public Command {
+ public:
+ virtual LockType locktype() const { return READ; }
+ CmdCount() : Command("count") { }
+ virtual bool logTheOp() { return false; }
+ virtual bool slaveOk() const {
+ // ok on --slave setups
+ return replSettings.slave == SimpleSlave;
+ }
+ virtual bool slaveOverrideOk() { return true; }
+ virtual bool maintenanceOk() const { return false; }
+ virtual bool adminOnly() const { return false; }
+ virtual void help( stringstream& help ) const { help << "count objects in collection"; }
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string ns = parseNs(dbname, cmdObj);
+ string err;
+ long long n = runCount(ns.c_str(), cmdObj, err);
+ long long nn = n;
+ bool ok = true;
+ if ( n == -1 ) {
+ nn = 0;
+ result.appendBool( "missing" , true );
+ }
+ else if ( n < 0 ) {
+ nn = 0;
+ ok = false;
+ if ( !err.empty() )
+ errmsg = err;
+ }
+ result.append("n", (double) nn);
+ return ok;
+ }
+ } cmdCount;
+
+ /* create collection */
+ class CmdCreate : public Command {
+ public:
+ CmdCreate() : Command("create") { }
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+ virtual LockType locktype() const { return WRITE; }
+ virtual void help( stringstream& help ) const {
+ help << "create a collection explicitly\n"
+ "{ create: <ns>[, capped: <bool>, size: <collSizeInBytes>, max: <nDocs>] }";
+ }
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ uassert(15888, "must pass name of collection to create", cmdObj.firstElement().valuestrsafe()[0] != '\0');
+ string ns = dbname + '.' + cmdObj.firstElement().valuestr();
+ string err;
+ uassert(14832, "specify size:<n> when capped is true", !cmdObj["capped"].trueValue() || cmdObj["size"].isNumber() || cmdObj.hasField("$nExtents"));
+ bool ok = userCreateNS(ns.c_str(), cmdObj, err, ! fromRepl );
+ if ( !ok && !err.empty() )
+ errmsg = err;
+ return ok;
+ }
+ } cmdCreate;
+
+ /* "dropIndexes" is now the preferred form - "deleteIndexes" deprecated */
+ class CmdDropIndexes : public Command {
+ public:
+ virtual bool logTheOp() {
+ return true;
+ }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual LockType locktype() const { return WRITE; }
+ virtual void help( stringstream& help ) const {
+ help << "drop indexes for a collection";
+ }
+ CmdDropIndexes() : Command("dropIndexes", false, "deleteIndexes") { }
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& anObjBuilder, bool /*fromRepl*/) {
+ BSONElement e = jsobj.firstElement();
+ string toDeleteNs = dbname + '.' + e.valuestr();
+ NamespaceDetails *d = nsdetails(toDeleteNs.c_str());
+ if ( !cmdLine.quiet )
+ tlog() << "CMD: dropIndexes " << toDeleteNs << endl;
+ if ( d ) {
+ BSONElement f = jsobj.getField("index");
+ if ( f.type() == String ) {
+ return dropIndexes( d, toDeleteNs.c_str(), f.valuestr(), errmsg, anObjBuilder, false );
+ }
+ else if ( f.type() == Object ) {
+ int idxId = d->findIndexByKeyPattern( f.embeddedObject() );
+ if ( idxId < 0 ) {
+ errmsg = "can't find index with key:";
+ errmsg += f.embeddedObject().toString();
+ return false;
+ }
+ else {
+ IndexDetails& ii = d->idx( idxId );
+ string iName = ii.indexName();
+ return dropIndexes( d, toDeleteNs.c_str(), iName.c_str() , errmsg, anObjBuilder, false );
+ }
+ }
+ else {
+ errmsg = "invalid index name spec";
+ return false;
+ }
+ }
+ else {
+ errmsg = "ns not found";
+ return false;
+ }
+ }
+ } cmdDropIndexes;
+
+ class CmdReIndex : public Command {
+ public:
+ virtual bool logTheOp() { return false; } // only reindexes on the one node
+ virtual bool slaveOk() const { return true; } // can reindex on a secondary
+ virtual LockType locktype() const { return WRITE; }
+ virtual void help( stringstream& help ) const {
+ help << "re-index a collection";
+ }
+ CmdReIndex() : Command("reIndex") { }
+ bool run(const string& dbname , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ static DBDirectClient db;
+
+ BSONElement e = jsobj.firstElement();
+ string toDeleteNs = dbname + '.' + e.valuestr();
+ NamespaceDetails *d = nsdetails(toDeleteNs.c_str());
+ tlog() << "CMD: reIndex " << toDeleteNs << endl;
+ BackgroundOperation::assertNoBgOpInProgForNs(toDeleteNs.c_str());
+
+ if ( ! d ) {
+ errmsg = "ns not found";
+ return false;
+ }
+
+ list<BSONObj> all;
+ auto_ptr<DBClientCursor> i = db.query( dbname + ".system.indexes" , BSON( "ns" << toDeleteNs ) , 0 , 0 , 0 , QueryOption_SlaveOk );
+ BSONObjBuilder b;
+ while ( i->more() ) {
+ BSONObj o = i->next().removeField("v").getOwned();
+ b.append( BSONObjBuilder::numStr( all.size() ) , o );
+ all.push_back( o );
+ }
+
+
+ bool ok = dropIndexes( d, toDeleteNs.c_str(), "*" , errmsg, result, true );
+ if ( ! ok ) {
+ errmsg = "dropIndexes failed";
+ return false;
+ }
+
+ for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ) {
+ BSONObj o = *i;
+ log(1) << "reIndex ns: " << toDeleteNs << " index: " << o << endl;
+ theDataFileMgr.insertWithObjMod( Namespace( toDeleteNs.c_str() ).getSisterNS( "system.indexes" ).c_str() , o , true );
+ }
+
+ result.append( "nIndexes" , (int)all.size() );
+ result.appendArray( "indexes" , b.obj() );
+ return true;
+ }
+ } cmdReIndex;
+
+ class CmdListDatabases : public Command {
+ public:
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool slaveOverrideOk() {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream& help ) const { help << "list databases on this server"; }
+ CmdListDatabases() : Command("listDatabases" , true ) {}
+ bool run(const string& dbname , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ vector< string > dbNames;
+ getDatabaseNames( dbNames );
+ vector< BSONObj > dbInfos;
+
+ set<string> seen;
+ boost::intmax_t totalSize = 0;
+ for ( vector< string >::iterator i = dbNames.begin(); i != dbNames.end(); ++i ) {
+ BSONObjBuilder b;
+ b.append( "name", *i );
+
+ boost::intmax_t size = dbSize( i->c_str() );
+ b.append( "sizeOnDisk", (double) size );
+ totalSize += size;
+
+ {
+ Client::ReadContext rc( *i + ".system.namespaces" );
+ b.appendBool( "empty", rc.ctx().db()->isEmpty() );
+ }
+
+ dbInfos.push_back( b.obj() );
+
+ seen.insert( i->c_str() );
+ }
+
+ // TODO: erh 1/1/2010 I think this is broken where path != dbpath ??
+ set<string> allShortNames;
+ {
+ readlock lk;
+ dbHolder().getAllShortNames( false, allShortNames );
+ }
+
+ for ( set<string>::iterator i = allShortNames.begin(); i != allShortNames.end(); i++ ) {
+ string name = *i;
+
+ if ( seen.count( name ) )
+ continue;
+
+ BSONObjBuilder b;
+ b.append( "name" , name );
+ b.append( "sizeOnDisk" , (double)1.0 );
+
+ {
+ readlock lk( name );
+ Client::Context ctx( name );
+ b.appendBool( "empty", ctx.db()->isEmpty() );
+ }
+
+ dbInfos.push_back( b.obj() );
+ }
+
+ result.append( "databases", dbInfos );
+ result.append( "totalSize", double( totalSize ) );
+ return true;
+ }
+ } cmdListDatabases;
+
+ /* note an access to a database right after this will open it back up - so this is mainly
+ for diagnostic purposes.
+ */
+ class CmdCloseAllDatabases : public Command {
+ public:
+ virtual void help( stringstream& help ) const { help << "Close all database files.\nA new request will cause an immediate reopening; thus, this is mostly for testing purposes."; }
+ virtual bool adminOnly() const { return true; }
+ virtual bool slaveOk() const { return false; }
+ virtual LockType locktype() const { return WRITE; }
+
+ CmdCloseAllDatabases() : Command( "closeAllDatabases" ) {}
+ bool run(const string& dbname , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ bool ok;
+ try {
+ ok = dbHolderW().closeAll( dbpath , result, false );
+ }
+ catch(DBException&) {
+ throw;
+ }
+ catch(...) {
+ log() << "ERROR uncaught exception in command closeAllDatabases" << endl;
+ errmsg = "unexpected uncaught exception";
+ return false;
+ }
+ return ok;
+ }
+ } cmdCloseAllDatabases;
+
+ class CmdFileMD5 : public Command {
+ public:
+ CmdFileMD5() : Command( "filemd5" ) {}
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void help( stringstream& help ) const {
+ help << " example: { filemd5 : ObjectId(aaaaaaa) , root : \"fs\" }";
+ }
+ virtual LockType locktype() const { return READ; }
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ string ns = dbname;
+ ns += ".";
+ {
+ string root = jsobj.getStringField( "root" );
+ if ( root.size() == 0 )
+ root = "fs";
+ ns += root;
+ }
+ ns += ".chunks"; // make this an option in jsobj
+
+ md5digest d;
+ md5_state_t st;
+ md5_init(&st);
+
+ BSONObj query = BSON( "files_id" << jsobj["filemd5"] );
+ BSONObj sort = BSON( "files_id" << 1 << "n" << 1 );
+
+ shared_ptr<Cursor> cursor = bestGuessCursor(ns.c_str(), query, sort);
+ if ( ! cursor ) {
+ errmsg = "need an index on { files_id : 1 , n : 1 }";
+ return false;
+ }
+ auto_ptr<ClientCursor> cc (new ClientCursor(QueryOption_NoCursorTimeout, cursor, ns.c_str()));
+
+ int n = 0;
+ while ( cursor->ok() ) {
+ if ( ! cursor->matcher()->matchesCurrent( cursor.get() ) ) {
+ log() << "**** NOT MATCHING ****" << endl;
+ PRINT(cursor->current());
+ cursor->advance();
+ continue;
+ }
+
+ BSONObj obj = cursor->current();
+ cursor->advance();
+
+ BSONElement ne = obj["n"];
+ assert(ne.isNumber());
+ int myn = ne.numberInt();
+ if ( n != myn ) {
+ log() << "should have chunk: " << n << " have:" << myn << endl;
+ dumpChunks( ns , query , sort );
+ uassert( 10040 , "chunks out of order" , n == myn );
+ }
+
+ int len;
+ const char * data = obj["data"].binDataClean( len );
+
+ ClientCursor::YieldLock yield (cc.get());
+ try {
+ md5_append( &st , (const md5_byte_t*)(data) , len );
+ n++;
+ }
+ catch (...) {
+ if ( ! yield.stillOk() ) // relocks
+ cc.release();
+ throw;
+ }
+
+ if ( ! yield.stillOk() ) {
+ cc.release();
+ uasserted(13281, "File deleted during filemd5 command");
+ }
+ }
+
+ md5_finish(&st, d);
+
+ result.append( "numChunks" , n );
+ result.append( "md5" , digestToString( d ) );
+ return true;
+ }
+
+ void dumpChunks( const string& ns , const BSONObj& query , const BSONObj& sort ) {
+ DBDirectClient client;
+ Query q(query);
+ q.sort(sort);
+ auto_ptr<DBClientCursor> c = client.query(ns, q);
+ while(c->more())
+ PRINT(c->nextSafe());
+ }
+ } cmdFileMD5;
+
+ static IndexDetails *cmdIndexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern ) {
+ if ( ns[ 0 ] == '\0' || min.isEmpty() || max.isEmpty() ) {
+ errmsg = "invalid command syntax (note: min and max are required)";
+ return 0;
+ }
+ return indexDetailsForRange( ns, errmsg, min, max, keyPattern );
+ }
+
+ class CmdDatasize : public Command {
+ virtual string parseNs(const string& dbname, const BSONObj& cmdObj) const {
+ return parseNsFullyQualified(dbname, cmdObj);
+ }
+ public:
+ CmdDatasize() : Command( "dataSize", false, "datasize" ) {}
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return READ; }
+ virtual void help( stringstream &help ) const {
+ help <<
+ "determine data size for a set of data in a certain range"
+ "\nexample: { dataSize:\"blog.posts\", keyPattern:{x:1}, min:{x:10}, max:{x:55} }"
+ "\nkeyPattern, min, and max parameters are optional."
+ "\nnote: This command may take a while to run";
+ }
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ Timer timer;
+
+ string ns = jsobj.firstElement().String();
+ BSONObj min = jsobj.getObjectField( "min" );
+ BSONObj max = jsobj.getObjectField( "max" );
+ BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
+ bool estimate = jsobj["estimate"].trueValue();
+
+ Client::Context ctx( ns );
+ NamespaceDetails *d = nsdetails(ns.c_str());
+
+ if ( ! d || d->stats.nrecords == 0 ) {
+ result.appendNumber( "size" , 0 );
+ result.appendNumber( "numObjects" , 0 );
+ result.append( "millis" , timer.millis() );
+ return true;
+ }
+
+ result.appendBool( "estimate" , estimate );
+
+ shared_ptr<Cursor> c;
+ if ( min.isEmpty() && max.isEmpty() ) {
+ if ( estimate ) {
+ result.appendNumber( "size" , d->stats.datasize );
+ result.appendNumber( "numObjects" , d->stats.nrecords );
+ result.append( "millis" , timer.millis() );
+ return 1;
+ }
+ c = theDataFileMgr.findAll( ns.c_str() );
+ }
+ else if ( min.isEmpty() || max.isEmpty() ) {
+ errmsg = "only one of min or max specified";
+ return false;
+ }
+ else {
+ IndexDetails *idx = cmdIndexDetailsForRange( ns.c_str(), errmsg, min, max, keyPattern );
+ if ( idx == 0 )
+ return false;
+
+ c.reset( BtreeCursor::make( d, d->idxNo(*idx), *idx, min, max, false, 1 ) );
+ }
+
+ long long avgObjSize = d->stats.datasize / d->stats.nrecords;
+
+ long long maxSize = jsobj["maxSize"].numberLong();
+ long long maxObjects = jsobj["maxObjects"].numberLong();
+
+ long long size = 0;
+ long long numObjects = 0;
+ while( c->ok() ) {
+
+ if ( estimate )
+ size += avgObjSize;
+ else
+ size += c->currLoc().rec()->netLength();
+
+ numObjects++;
+
+ if ( ( maxSize && size > maxSize ) ||
+ ( maxObjects && numObjects > maxObjects ) ) {
+ result.appendBool( "maxReached" , true );
+ break;
+ }
+
+ c->advance();
+ }
+
+ ostringstream os;
+ os << "Finding size for ns: " << ns;
+ if ( ! min.isEmpty() ) {
+ os << " between " << min << " and " << max;
+ }
+ logIfSlow( timer , os.str() );
+
+ result.appendNumber( "size", size );
+ result.appendNumber( "numObjects" , numObjects );
+ result.append( "millis" , timer.millis() );
+ return true;
+ }
+ } cmdDatasize;
+
+ namespace {
+ long long getIndexSizeForCollection(string db, string ns, BSONObjBuilder* details=NULL, int scale = 1 ) {
+ d.dbMutex.assertAtLeastReadLocked();
+
+ NamespaceDetails * nsd = nsdetails( ns.c_str() );
+ if ( ! nsd )
+ return 0;
+
+ long long totalSize = 0;
+
+ NamespaceDetails::IndexIterator ii = nsd->ii();
+ while ( ii.more() ) {
+ IndexDetails& d = ii.next();
+ string collNS = d.indexNamespace();
+ NamespaceDetails * mine = nsdetails( collNS.c_str() );
+ if ( ! mine ) {
+ log() << "error: have index [" << collNS << "] but no NamespaceDetails" << endl;
+ continue;
+ }
+ totalSize += mine->stats.datasize;
+ if ( details )
+ details->appendNumber( d.indexName() , mine->stats.datasize / scale );
+ }
+ return totalSize;
+ }
+ }
+
+ class CollectionStats : public Command {
+ public:
+ CollectionStats() : Command( "collStats", false, "collstats" ) {}
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return READ; }
+ virtual void help( stringstream &help ) const {
+ help << "{ collStats:\"blog.posts\" , scale : 1 } scale divides sizes e.g. for KB use 1024\n"
+ " avgObjSize - in bytes";
+ }
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ string ns = dbname + "." + jsobj.firstElement().valuestr();
+ Client::Context cx( ns );
+
+ NamespaceDetails * nsd = nsdetails( ns.c_str() );
+ if ( ! nsd ) {
+ errmsg = "ns not found";
+ return false;
+ }
+
+ result.append( "ns" , ns.c_str() );
+
+ int scale = 1;
+ if ( jsobj["scale"].isNumber() ) {
+ scale = jsobj["scale"].numberInt();
+ if ( scale <= 0 ) {
+ errmsg = "scale has to be > 0";
+ return false;
+ }
+ }
+ else if ( jsobj["scale"].trueValue() ) {
+ errmsg = "scale has to be a number > 0";
+ return false;
+ }
+
+ bool verbose = jsobj["verbose"].trueValue();
+
+ long long size = nsd->stats.datasize / scale;
+ result.appendNumber( "count" , nsd->stats.nrecords );
+ result.appendNumber( "size" , size );
+ if( nsd->stats.nrecords )
+ result.append ( "avgObjSize" , double(size) / double(nsd->stats.nrecords) );
+
+ int numExtents;
+ BSONArrayBuilder extents;
+
+ result.appendNumber( "storageSize" , nsd->storageSize( &numExtents , verbose ? &extents : 0 ) / scale );
+ result.append( "numExtents" , numExtents );
+ result.append( "nindexes" , nsd->nIndexes );
+ result.append( "lastExtentSize" , nsd->lastExtentSize / scale );
+ result.append( "paddingFactor" , nsd->paddingFactor );
+ result.append( "flags" , nsd->flags );
+
+ BSONObjBuilder indexSizes;
+ result.appendNumber( "totalIndexSize" , getIndexSizeForCollection(dbname, ns, &indexSizes, scale) / scale );
+ result.append("indexSizes", indexSizes.obj());
+
+ if ( nsd->capped ) {
+ result.append( "capped" , nsd->capped );
+ result.append( "max" , nsd->max );
+ }
+
+ if ( verbose )
+ result.appendArray( "extents" , extents.arr() );
+
+ return true;
+ }
+ } cmdCollectionStats;
+
+ class DBStats : public Command {
+ public:
+ DBStats() : Command( "dbStats", false, "dbstats" ) {}
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return READ; }
+ virtual void help( stringstream &help ) const {
+ help <<
+ "Get stats on a database. Not instantaneous. Slower for databases with large .ns files.\n" <<
+ "Example: { dbStats:1, scale:1 }";
+ }
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ int scale = 1;
+ if ( jsobj["scale"].isNumber() ) {
+ scale = jsobj["scale"].numberInt();
+ if ( scale <= 0 ) {
+ errmsg = "scale has to be > 0";
+ return false;
+ }
+ }
+ else if ( jsobj["scale"].trueValue() ) {
+ errmsg = "scale has to be a number > 0";
+ return false;
+ }
+
+ list<string> collections;
+ Database* d = cc().database();
+ if ( d )
+ d->namespaceIndex.getNamespaces( collections );
+
+ long long ncollections = 0;
+ long long objects = 0;
+ long long size = 0;
+ long long storageSize = 0;
+ long long numExtents = 0;
+ long long indexes = 0;
+ long long indexSize = 0;
+
+ for (list<string>::const_iterator it = collections.begin(); it != collections.end(); ++it) {
+ const string ns = *it;
+
+ NamespaceDetails * nsd = nsdetails( ns.c_str() );
+ if ( ! nsd ) {
+ errmsg = "missing ns: ";
+ errmsg += ns;
+ return false;
+ }
+
+ ncollections += 1;
+ objects += nsd->stats.nrecords;
+ size += nsd->stats.datasize;
+
+ int temp;
+ storageSize += nsd->storageSize( &temp );
+ numExtents += temp;
+
+ indexes += nsd->nIndexes;
+ indexSize += getIndexSizeForCollection(dbname, ns);
+ }
+
+ result.append ( "db" , dbname );
+ result.appendNumber( "collections" , ncollections );
+ result.appendNumber( "objects" , objects );
+ result.append ( "avgObjSize" , objects == 0 ? 0 : double(size) / double(objects) );
+ result.appendNumber( "dataSize" , size / scale );
+ result.appendNumber( "storageSize" , storageSize / scale);
+ result.appendNumber( "numExtents" , numExtents );
+ result.appendNumber( "indexes" , indexes );
+ result.appendNumber( "indexSize" , indexSize / scale );
+ result.appendNumber( "fileSize" , d->fileSize() / scale );
+ if( d )
+ result.appendNumber( "nsSizeMB", (int) d->namespaceIndex.fileLength() / 1024 / 1024 );
+
+ return true;
+ }
+ } cmdDBStats;
+
+ /* convertToCapped seems to use this */
+ class CmdCloneCollectionAsCapped : public Command {
+ public:
+ CmdCloneCollectionAsCapped() : Command( "cloneCollectionAsCapped" ) {}
+ virtual bool slaveOk() const { return false; }
+ virtual LockType locktype() const { return WRITE; }
+ virtual void help( stringstream &help ) const {
+ help << "{ cloneCollectionAsCapped:<fromName>, toCollection:<toName>, size:<sizeInBytes> }";
+ }
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ string from = jsobj.getStringField( "cloneCollectionAsCapped" );
+ string to = jsobj.getStringField( "toCollection" );
+ long long size = (long long)jsobj.getField( "size" ).number();
+
+ if ( from.empty() || to.empty() || size == 0 ) {
+ errmsg = "invalid command spec";
+ return false;
+ }
+
+ string fromNs = dbname + "." + from;
+ string toNs = dbname + "." + to;
+ NamespaceDetails *nsd = nsdetails( fromNs.c_str() );
+ massert( 10301 , "source collection " + fromNs + " does not exist", nsd );
+ long long excessSize = nsd->stats.datasize - size * 2; // datasize and extentSize can't be compared exactly, so add some padding to 'size'
+ DiskLoc extent = nsd->firstExtent;
+ for( ; excessSize > extent.ext()->length && extent != nsd->lastExtent; extent = extent.ext()->xnext ) {
+ excessSize -= extent.ext()->length;
+ log( 2 ) << "cloneCollectionAsCapped skipping extent of size " << extent.ext()->length << endl;
+ log( 6 ) << "excessSize: " << excessSize << endl;
+ }
+ DiskLoc startLoc = extent.ext()->firstRecord;
+
+ CursorId id;
+ {
+ shared_ptr<Cursor> c = theDataFileMgr.findAll( fromNs.c_str(), startLoc );
+ ClientCursor *cc = new ClientCursor(0, c, fromNs.c_str());
+ id = cc->cursorid();
+ }
+
+ DBDirectClient client;
+ Client::Context ctx( toNs );
+ BSONObjBuilder spec;
+ spec.appendBool( "capped", true );
+ spec.append( "size", double( size ) );
+ if ( !userCreateNS( toNs.c_str(), spec.done(), errmsg, true ) )
+ return false;
+
+ auto_ptr< DBClientCursor > c = client.getMore( fromNs, id );
+ while( c->more() ) {
+ BSONObj obj = c->next();
+ theDataFileMgr.insertAndLog( toNs.c_str(), obj, true );
+ getDur().commitIfNeeded();
+ }
+
+ return true;
+ }
+ } cmdCloneCollectionAsCapped;
+
+ /* jan2010:
+ Converts the given collection to a capped collection w/ the specified size.
+ This command is not highly used, and is not currently supported with sharded
+ environments.
+ */
+ class CmdConvertToCapped : public Command {
+ public:
+ CmdConvertToCapped() : Command( "convertToCapped" ) {}
+ virtual bool slaveOk() const { return false; }
+ virtual LockType locktype() const { return WRITE; }
+ virtual void help( stringstream &help ) const {
+ help << "{ convertToCapped:<fromCollectionName>, size:<sizeInBytes> }";
+ }
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ BackgroundOperation::assertNoBgOpInProgForDb(dbname.c_str());
+
+ string from = jsobj.getStringField( "convertToCapped" );
+ long long size = (long long)jsobj.getField( "size" ).number();
+
+ if ( from.empty() || size == 0 ) {
+ errmsg = "invalid command spec";
+ return false;
+ }
+
+ string shortTmpName = str::stream() << ".tmp.convertToCapped." << from;
+ string longTmpName = str::stream() << dbname << "." << shortTmpName;
+
+ DBDirectClient client;
+ client.dropCollection( longTmpName );
+
+ BSONObj info;
+ if ( !client.runCommand( dbname ,
+ BSON( "cloneCollectionAsCapped" << from << "toCollection" << shortTmpName << "size" << double( size ) ),
+ info ) ) {
+ errmsg = "cloneCollectionAsCapped failed: " + info.toString();
+ return false;
+ }
+
+ if ( !client.dropCollection( dbname + "." + from ) ) {
+ errmsg = "failed to drop original collection";
+ return false;
+ }
+
+ if ( !client.runCommand( "admin",
+ BSON( "renameCollection" << longTmpName <<
+ "to" << ( dbname + "." + from ) ),
+ info ) ) {
+ errmsg = "renameCollection failed: " + info.toString();
+ return false;
+ }
+
+ return true;
+ }
+ } cmdConvertToCapped;
+
+ /* Returns client's uri */
+ class CmdWhatsMyUri : public Command {
+ public:
+ CmdWhatsMyUri() : Command("whatsmyuri") { }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream &help ) const {
+ help << "{whatsmyuri:1}";
+ }
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ BSONObj info = cc().curop()->infoNoauth();
+ result << "you" << info[ "client" ];
+ return true;
+ }
+ } cmdWhatsMyUri;
+
+ /* For testing only, not for general use */
+ class GodInsert : public Command {
+ public:
+ GodInsert() : Command( "godinsert" ) { }
+ virtual bool adminOnly() const { return false; }
+ virtual bool logTheOp() { return false; }
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+ virtual bool requiresAuth() { return true; }
+ virtual void help( stringstream &help ) const {
+ help << "internal. for testing only.";
+ }
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+
+ AuthenticationInfo *ai = cc().getAuthenticationInfo();
+ if ( ! ai->isLocalHost ) {
+ errmsg = "godinsert only works locally";
+ return false;
+ }
+
+ string coll = cmdObj[ "godinsert" ].valuestrsafe();
+ log() << "test only command godinsert invoked coll:" << coll << endl;
+ uassert( 13049, "godinsert must specify a collection", !coll.empty() );
+ string ns = dbname + "." + coll;
+ BSONObj obj = cmdObj[ "obj" ].embeddedObjectUserCheck();
+ {
+ dblock lk;
+ Client::Context ctx( ns );
+ theDataFileMgr.insertWithObjMod( ns.c_str(), obj, true );
+ }
+ return true;
+ }
+ } cmdGodInsert;
+
+ class DBHashCmd : public Command {
+ public:
+ DBHashCmd() : Command( "dbHash", false, "dbhash" ) {}
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return READ; }
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ list<string> colls;
+ Database* db = cc().database();
+ if ( db )
+ db->namespaceIndex.getNamespaces( colls );
+ colls.sort();
+
+ result.appendNumber( "numCollections" , (long long)colls.size() );
+ result.append( "host" , prettyHostName() );
+
+ md5_state_t globalState;
+ md5_init(&globalState);
+
+ BSONObjBuilder bb( result.subobjStart( "collections" ) );
+ for ( list<string>::iterator i=colls.begin(); i != colls.end(); i++ ) {
+ string c = *i;
+ if ( c.find( ".system.profil" ) != string::npos )
+ continue;
+
+ shared_ptr<Cursor> cursor;
+
+ NamespaceDetails * nsd = nsdetails( c.c_str() );
+
+ // debug SERVER-761
+ NamespaceDetails::IndexIterator ii = nsd->ii();
+ while( ii.more() ) {
+ const IndexDetails &idx = ii.next();
+ if ( !idx.head.isValid() || !idx.info.isValid() ) {
+ log() << "invalid index for ns: " << c << " " << idx.head << " " << idx.info;
+ if ( idx.info.isValid() )
+ log() << " " << idx.info.obj();
+ log() << endl;
+ }
+ }
+
+ int idNum = nsd->findIdIndex();
+ if ( idNum >= 0 ) {
+ cursor.reset( BtreeCursor::make( nsd , idNum , nsd->idx( idNum ) , BSONObj() , BSONObj() , false , 1 ) );
+ }
+ else if ( c.find( ".system." ) != string::npos ) {
+ continue;
+ }
+ else if ( nsd->capped ) {
+ cursor = findTableScan( c.c_str() , BSONObj() );
+ }
+ else {
+ log() << "can't find _id index for: " << c << endl;
+ continue;
+ }
+
+ md5_state_t st;
+ md5_init(&st);
+
+ long long n = 0;
+ while ( cursor->ok() ) {
+ BSONObj c = cursor->current();
+ md5_append( &st , (const md5_byte_t*)c.objdata() , c.objsize() );
+ n++;
+ cursor->advance();
+ }
+ md5digest d;
+ md5_finish(&st, d);
+ string hash = digestToString( d );
+
+ bb.append( c.c_str() + ( dbname.size() + 1 ) , hash );
+
+ md5_append( &globalState , (const md5_byte_t*)hash.c_str() , hash.size() );
+ }
+ bb.done();
+
+ md5digest d;
+ md5_finish(&globalState, d);
+ string hash = digestToString( d );
+
+ result.append( "md5" , hash );
+
+ return 1;
+ }
+
+ } dbhashCmd;
+
+ /* for diagnostic / testing purposes. */
+ class CmdSleep : public Command {
+ public:
+ virtual LockType locktype() const { return NONE; }
+ virtual bool adminOnly() const { return true; }
+ virtual bool logTheOp() { return false; }
+ virtual bool slaveOk() const { return true; }
+ virtual void help( stringstream& help ) const {
+ help << "internal testing command. Makes db block (in a read lock) for 100 seconds\n";
+ help << "w:true write lock. secs:<seconds>";
+ }
+ CmdSleep() : Command("sleep") { }
+ bool run(const string& ns, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ log() << "test only command sleep invoked" << endl;
+ int secs = 100;
+ if ( cmdObj["secs"].isNumber() )
+ secs = cmdObj["secs"].numberInt();
+ if( cmdObj.getBoolField("w") ) {
+ writelock lk("");
+ sleepsecs(secs);
+ }
+ else {
+ readlock lk("");
+ sleepsecs(secs);
+ }
+ return true;
+ }
+ } cmdSleep;
+
+ // just for testing
+ class CapTrunc : public Command {
+ public:
+ CapTrunc() : Command( "captrunc" ) {}
+ virtual bool slaveOk() const { return false; }
+ virtual LockType locktype() const { return WRITE; }
+ virtual bool requiresAuth() { return true; }
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string coll = cmdObj[ "captrunc" ].valuestrsafe();
+ uassert( 13416, "captrunc must specify a collection", !coll.empty() );
+ string ns = dbname + "." + coll;
+ int n = cmdObj.getIntField( "n" );
+
+ // inclusive range?
+ bool inc = cmdObj.getBoolField( "inc" );
+ NamespaceDetails *nsd = nsdetails( ns.c_str() );
+ ReverseCappedCursor c( nsd );
+ massert( 13417, "captrunc collection not found or empty", c.ok() );
+ for( int i = 0; i < n; ++i ) {
+ massert( 13418, "captrunc invalid n", c.advance() );
+ }
+ DiskLoc end = c.currLoc();
+ nsd->cappedTruncateAfter( ns.c_str(), end, inc );
+ return true;
+ }
+ } capTruncCmd;
+
+ // just for testing
+ class EmptyCapped : public Command {
+ public:
+ EmptyCapped() : Command( "emptycapped" ) {}
+ virtual bool slaveOk() const { return false; }
+ virtual LockType locktype() const { return WRITE; }
+ virtual bool requiresAuth() { return true; }
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string coll = cmdObj[ "emptycapped" ].valuestrsafe();
+ uassert( 13428, "emptycapped must specify a collection", !coll.empty() );
+ string ns = dbname + "." + coll;
+ NamespaceDetails *nsd = nsdetails( ns.c_str() );
+ massert( 13429, "emptycapped no such collection", nsd );
+ nsd->emptyCappedCollection( ns.c_str() );
+ return true;
+ }
+ } emptyCappedCmd;
+
+ bool _execCommand(Command *c, const string& dbname, BSONObj& cmdObj, int queryOptions, BSONObjBuilder& result, bool fromRepl) {
+
+ try {
+ string errmsg;
+ if ( ! c->run(dbname, cmdObj, queryOptions, errmsg, result, fromRepl ) ) {
+ result.append( "errmsg" , errmsg );
+ return false;
+ }
+ }
+ catch ( SendStaleConfigException& e ){
+ log(1) << "command failed because of stale config, can retry" << causedBy( e ) << endl;
+ throw;
+ }
+ catch ( DBException& e ) {
+
+ // TODO: Rethrown errors have issues here, should divorce SendStaleConfigException from the DBException tree
+
+ stringstream ss;
+ ss << "exception: " << e.what();
+ result.append( "errmsg" , ss.str() );
+ result.append( "code" , e.getCode() );
+ return false;
+ }
+
+ return true;
+ }
+
+ /**
+ * this handles
+ - auth
+ - maintenance mode
+ - locking
+ - context
+ then calls run()
+ */
+ bool execCommand( Command * c ,
+ Client& client , int queryOptions ,
+ const char *cmdns, BSONObj& cmdObj ,
+ BSONObjBuilder& result,
+ bool fromRepl ) {
+
+ string dbname = nsToDatabase( cmdns );
+
+ AuthenticationInfo *ai = client.getAuthenticationInfo();
+
+ if( c->adminOnly() && c->localHostOnlyIfNoAuth( cmdObj ) && noauth && !ai->isLocalHost ) {
+ result.append( "errmsg" ,
+ "unauthorized: this command must run from localhost when running db without auth" );
+ log() << "command denied: " << cmdObj.toString() << endl;
+ return false;
+ }
+
+ if ( c->adminOnly() && ! fromRepl && dbname != "admin" ) {
+ result.append( "errmsg" , "access denied; use admin db" );
+ log() << "command denied: " << cmdObj.toString() << endl;
+ return false;
+ }
+
+ if ( cmdObj["help"].trueValue() ) {
+ client.curop()->ensureStarted();
+ stringstream ss;
+ ss << "help for: " << c->name << " ";
+ c->help( ss );
+ result.append( "help" , ss.str() );
+ result.append( "lockType" , c->locktype() );
+ return true;
+ }
+
+ bool canRunHere =
+ isMaster( dbname.c_str() ) ||
+ c->slaveOk() ||
+ ( c->slaveOverrideOk() && ( queryOptions & QueryOption_SlaveOk ) ) ||
+ fromRepl;
+
+ if ( ! canRunHere ) {
+ result.append( "errmsg" , "not master" );
+ result.append( "note" , "from execCommand" );
+ return false;
+ }
+
+ if ( ! c->maintenanceOk() && theReplSet && ! isMaster( dbname.c_str() ) && ! theReplSet->isSecondary() ) {
+ result.append( "errmsg" , "node is recovering" );
+ result.append( "note" , "from execCommand" );
+ return false;
+ }
+
+ if ( c->adminOnly() )
+ log( 2 ) << "command: " << cmdObj << endl;
+
+ if (c->maintenanceMode() && theReplSet && theReplSet->isSecondary()) {
+ theReplSet->setMaintenanceMode(true);
+ }
+
+ bool retval = false;
+ if ( c->locktype() == Command::NONE ) {
+ // we also trust that this won't crash
+ retval = true;
+
+ if ( c->requiresAuth() ) {
+ // test that the user at least as read permissions
+ if ( ! client.getAuthenticationInfo()->isAuthorizedReads( dbname ) ) {
+ result.append( "errmsg" , "need to login" );
+ retval = false;
+ }
+ }
+
+ if (retval) {
+ client.curop()->ensureStarted();
+ retval = _execCommand(c, dbname , cmdObj , queryOptions, result , fromRepl );
+ }
+ }
+ else if( c->locktype() != Command::WRITE ) {
+ // read lock
+ assert( ! c->logTheOp() );
+ string ns = c->parseNs(dbname, cmdObj);
+ Client::ReadContext ctx( ns , dbpath, c->requiresAuth() ); // read locks
+ client.curop()->ensureStarted();
+ retval = _execCommand(c, dbname , cmdObj , queryOptions, result , fromRepl );
+ }
+ else {
+ dassert( c->locktype() == Command::WRITE );
+ writelock lk;
+ client.curop()->ensureStarted();
+ Client::Context ctx( dbname , dbpath , c->requiresAuth() );
+ retval = _execCommand(c, dbname , cmdObj , queryOptions, result , fromRepl );
+ if ( retval && c->logTheOp() && ! fromRepl ) {
+ logOp("c", cmdns, cmdObj);
+ }
+ }
+
+ if (c->maintenanceMode() && theReplSet) {
+ theReplSet->setMaintenanceMode(false);
+ }
+
+ return retval;
+ }
+
+
+ /* TODO make these all command objects -- legacy stuff here
+
+ usage:
+ abc.$cmd.findOne( { ismaster:1 } );
+
+ returns true if ran a cmd
+ */
+ bool _runCommands(const char *ns, BSONObj& _cmdobj, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl, int queryOptions) {
+ string dbname = nsToDatabase( ns );
+
+ if( logLevel >= 1 )
+ log() << "run command " << ns << ' ' << _cmdobj << endl;
+
+ const char *p = strchr(ns, '.');
+ if ( !p ) return false;
+ if ( strcmp(p, ".$cmd") != 0 ) return false;
+
+ BSONObj jsobj;
+ {
+ BSONElement e = _cmdobj.firstElement();
+ if ( e.type() == Object && (e.fieldName()[0] == '$'
+ ? str::equals("query", e.fieldName()+1)
+ : str::equals("query", e.fieldName())))
+ {
+ jsobj = e.embeddedObject();
+ }
+ else {
+ jsobj = _cmdobj;
+ }
+ }
+
+ Client& client = cc();
+ bool ok = false;
+
+ BSONElement e = jsobj.firstElement();
+
+ Command * c = e.type() ? Command::findCommand( e.fieldName() ) : 0;
+
+ if ( c ) {
+ ok = execCommand( c , client , queryOptions , ns , jsobj , anObjBuilder , fromRepl );
+ }
+ else {
+ anObjBuilder.append("errmsg", str::stream() << "no such cmd: " << e.fieldName() );
+ anObjBuilder.append("bad cmd" , _cmdobj );
+ }
+
+ // switch to bool, but wait a bit longer before switching?
+ // anObjBuilder.append("ok", ok);
+ anObjBuilder.append("ok", ok?1.0:0.0);
+ BSONObj x = anObjBuilder.done();
+ b.appendBuf((void*) x.objdata(), x.objsize());
+
+ return true;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/dbcommands_admin.cpp b/src/mongo/db/dbcommands_admin.cpp
new file mode 100644
index 00000000000..ffcc3f261fe
--- /dev/null
+++ b/src/mongo/db/dbcommands_admin.cpp
@@ -0,0 +1,550 @@
+// dbcommands_admin.cpp
+
+/**
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**
+ this file has dbcommands that are for dba type administration
+ mostly around dbs and collections
+ NOT system stuff
+*/
+
+
+#include "pch.h"
+#include "jsobj.h"
+#include "pdfile.h"
+#include "namespace-inl.h"
+#include "commands.h"
+#include "cmdline.h"
+#include "btree.h"
+#include "curop-inl.h"
+#include "../util/background.h"
+#include "../util/logfile.h"
+#include "../util/alignedbuilder.h"
+#include "../util/paths.h"
+#include "../scripting/engine.h"
+#include "../util/timer.h"
+
+namespace mongo {
+
+ class CleanCmd : public Command {
+ public:
+ CleanCmd() : Command( "clean" ) {}
+
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return WRITE; }
+
+ virtual void help(stringstream& h) const { h << "internal"; }
+
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ string dropns = dbname + "." + cmdObj.firstElement().valuestrsafe();
+
+ if ( !cmdLine.quiet )
+ tlog() << "CMD: clean " << dropns << endl;
+
+ NamespaceDetails *d = nsdetails(dropns.c_str());
+
+ if ( ! d ) {
+ errmsg = "ns not found";
+ return 0;
+ }
+
+ for ( int i = 0; i < Buckets; i++ )
+ d->deletedList[i].Null();
+
+ result.append("ns", dropns.c_str());
+ return 1;
+ }
+
+ } cleanCmd;
+
+ namespace dur {
+ boost::filesystem::path getJournalDir();
+ }
+
+ class JournalLatencyTestCmd : public Command {
+ public:
+ JournalLatencyTestCmd() : Command( "journalLatencyTest" ) {}
+
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+ virtual bool adminOnly() const { return true; }
+ virtual void help(stringstream& h) const { h << "test how long to write and fsync to a test file in the journal/ directory"; }
+
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ boost::filesystem::path p = dur::getJournalDir();
+ p /= "journalLatencyTest";
+
+ // remove file if already present
+ try {
+ remove(p);
+ }
+ catch(...) { }
+
+ BSONObjBuilder bb[2];
+ for( int pass = 0; pass < 2; pass++ ) {
+ LogFile f(p.string());
+ AlignedBuilder b(1024 * 1024);
+ {
+ Timer t;
+ for( int i = 0 ; i < 100; i++ ) {
+ f.synchronousAppend(b.buf(), 8192);
+ }
+ bb[pass].append("8KB", t.millis() / 100.0);
+ }
+ {
+ const int N = 50;
+ Timer t2;
+ long long x = 0;
+ for( int i = 0 ; i < N; i++ ) {
+ Timer t;
+ f.synchronousAppend(b.buf(), 8192);
+ x += t.micros();
+ sleepmillis(4);
+ }
+ long long y = t2.micros() - 4*N*1000;
+ // not really trusting the timer granularity on all platforms so whichever is higher of x and y
+ bb[pass].append("8KBWithPauses", max(x,y) / (N*1000.0));
+ }
+ {
+ Timer t;
+ for( int i = 0 ; i < 20; i++ ) {
+ f.synchronousAppend(b.buf(), 1024 * 1024);
+ }
+ bb[pass].append("1MB", t.millis() / 20.0);
+ }
+ // second time around, we are prealloced.
+ }
+ result.append("timeMillis", bb[0].obj());
+ result.append("timeMillisWithPrealloc", bb[1].obj());
+
+ try {
+ remove(p);
+ }
+ catch(...) { }
+
+ try {
+ result.append("onSamePartition", onSamePartition(dur::getJournalDir().string(), dbpath));
+ }
+ catch(...) { }
+
+ return 1;
+ }
+ } journalLatencyTestCmd;
+
+ class ValidateCmd : public Command {
+ public:
+ ValidateCmd() : Command( "validate" ) {}
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ virtual void help(stringstream& h) const { h << "Validate contents of a namespace by scanning its data structures for correctness. Slow.\n"
+ "Add full:true option to do a more thorough check"; }
+
+ virtual LockType locktype() const { return READ; }
+ //{ validate: "collectionnamewithoutthedbpart" [, scandata: <bool>] [, full: <bool> } */
+
+ bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ string ns = dbname + "." + cmdObj.firstElement().valuestrsafe();
+ NamespaceDetails * d = nsdetails( ns.c_str() );
+ if ( !cmdLine.quiet )
+ tlog() << "CMD: validate " << ns << endl;
+
+ if ( ! d ) {
+ errmsg = "ns not found";
+ return 0;
+ }
+
+ result.append( "ns", ns );
+ validateNS( ns.c_str() , d, cmdObj, result);
+ return 1;
+ }
+
+ private:
+ void validateNS(const char *ns, NamespaceDetails *d, const BSONObj& cmdObj, BSONObjBuilder& result) {
+ const bool full = cmdObj["full"].trueValue();
+ const bool scanData = full || cmdObj["scandata"].trueValue();
+
+ bool valid = true;
+ BSONArrayBuilder errors; // explanation(s) for why valid = false
+ if ( d->capped ){
+ result.append("capped", d->capped);
+ result.append("max", d->max);
+ }
+
+ result.append("firstExtent", str::stream() << d->firstExtent.toString() << " ns:" << d->firstExtent.ext()->nsDiagnostic.toString());
+ result.append( "lastExtent", str::stream() << d->lastExtent.toString() << " ns:" << d->lastExtent.ext()->nsDiagnostic.toString());
+
+ BSONArrayBuilder extentData;
+
+ try {
+ d->firstExtent.ext()->assertOk();
+ d->lastExtent.ext()->assertOk();
+
+ DiskLoc el = d->firstExtent;
+ int ne = 0;
+ while( !el.isNull() ) {
+ Extent *e = el.ext();
+ e->assertOk();
+ el = e->xnext;
+ ne++;
+ if ( full )
+ extentData << e->dump();
+
+ killCurrentOp.checkForInterrupt();
+ }
+ result.append("extentCount", ne);
+ }
+ catch (...) {
+ valid=false;
+ errors << "extent asserted";
+ }
+
+ if ( full )
+ result.appendArray( "extents" , extentData.arr() );
+
+
+ result.appendNumber("datasize", d->stats.datasize);
+ result.appendNumber("nrecords", d->stats.nrecords);
+ result.appendNumber("lastExtentSize", d->lastExtentSize);
+ result.appendNumber("padding", d->paddingFactor);
+
+
+ try {
+
+ try {
+ result.append("firstExtentDetails", d->firstExtent.ext()->dump());
+
+ valid = valid && d->firstExtent.ext()->validates() &&
+ d->firstExtent.ext()->xprev.isNull();
+ }
+ catch (...) {
+ errors << "exception firstextent";
+ valid = false;
+ }
+
+ set<DiskLoc> recs;
+ if( scanData ) {
+ shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
+ int n = 0;
+ int nInvalid = 0;
+ long long len = 0;
+ long long nlen = 0;
+ int outOfOrder = 0;
+ DiskLoc cl_last;
+ while ( c->ok() ) {
+ n++;
+
+ DiskLoc cl = c->currLoc();
+ if ( n < 1000000 )
+ recs.insert(cl);
+ if ( d->capped ) {
+ if ( cl < cl_last )
+ outOfOrder++;
+ cl_last = cl;
+ }
+
+ Record *r = c->_current();
+ len += r->lengthWithHeaders;
+ nlen += r->netLength();
+
+ if (full){
+ BSONObj obj(r);
+ if (!obj.isValid() || !obj.valid()){ // both fast and deep checks
+ valid = false;
+ if (nInvalid == 0) // only log once;
+ errors << "invalid bson object detected (see logs for more info)";
+
+ nInvalid++;
+ if (strcmp("_id", obj.firstElementFieldName()) == 0){
+ try {
+ obj.firstElement().validate(); // throws on error
+ log() << "Invalid bson detected in " << ns << " with _id: " << obj.firstElement().toString(false) << endl;
+ }
+ catch(...){
+ log() << "Invalid bson detected in " << ns << " with corrupt _id" << endl;
+ }
+ }
+ else {
+ log() << "Invalid bson detected in " << ns << " and couldn't find _id" << endl;
+ }
+ }
+ }
+
+ c->advance();
+ }
+ if ( d->capped && !d->capLooped() ) {
+ result.append("cappedOutOfOrder", outOfOrder);
+ if ( outOfOrder > 1 ) {
+ valid = false;
+ errors << "too many out of order records";
+ }
+ }
+ result.append("objectsFound", n);
+
+ if (full) {
+ result.append("invalidObjects", nInvalid);
+ }
+
+ result.appendNumber("bytesWithHeaders", len);
+ result.appendNumber("bytesWithoutHeaders", nlen);
+ }
+
+ BSONArrayBuilder deletedListArray;
+ for ( int i = 0; i < Buckets; i++ ) {
+ deletedListArray << d->deletedList[i].isNull();
+ }
+
+ int ndel = 0;
+ long long delSize = 0;
+ int incorrect = 0;
+ for ( int i = 0; i < Buckets; i++ ) {
+ DiskLoc loc = d->deletedList[i];
+ try {
+ int k = 0;
+ while ( !loc.isNull() ) {
+ if ( recs.count(loc) )
+ incorrect++;
+ ndel++;
+
+ if ( loc.questionable() ) {
+ if( d->capped && !loc.isValid() && i == 1 ) {
+ /* the constructor for NamespaceDetails intentionally sets deletedList[1] to invalid
+ see comments in namespace.h
+ */
+ break;
+ }
+
+ if ( loc.a() <= 0 || strstr(ns, "hudsonSmall") == 0 ) {
+ string err (str::stream() << "bad deleted loc: " << loc.toString() << " bucket:" << i << " k:" << k);
+ errors << err;
+
+ valid = false;
+ break;
+ }
+ }
+
+ DeletedRecord *d = loc.drec();
+ delSize += d->lengthWithHeaders;
+ loc = d->nextDeleted;
+ k++;
+ killCurrentOp.checkForInterrupt();
+ }
+ }
+ catch (...) {
+ errors << ("exception in deleted chain for bucket " + BSONObjBuilder::numStr(i));
+ valid = false;
+ }
+ }
+ result.appendNumber("deletedCount", ndel);
+ result.appendNumber("deletedSize", delSize);
+
+ if ( incorrect ) {
+ errors << (BSONObjBuilder::numStr(incorrect) + " records from datafile are in deleted list");
+ valid = false;
+ }
+
+ int idxn = 0;
+ try {
+ result.append("nIndexes", d->nIndexes);
+ BSONObjBuilder indexes; // not using subObjStart to be exception safe
+ NamespaceDetails::IndexIterator i = d->ii();
+ while( i.more() ) {
+ IndexDetails& id = i.next();
+ long long keys = id.idxInterface().fullValidate(id.head, id.keyPattern());
+ indexes.appendNumber(id.indexNamespace(), keys);
+ }
+ result.append("keysPerIndex", indexes.done());
+ }
+ catch (...) {
+ errors << ("exception during index validate idxn " + BSONObjBuilder::numStr(idxn));
+ valid=false;
+ }
+
+ }
+ catch (AssertionException) {
+ errors << "exception during validate";
+ valid = false;
+ }
+
+ result.appendBool("valid", valid);
+ result.append("errors", errors.arr());
+
+ if ( !full ){
+ result.append("warning", "Some checks omitted for speed. use {full:true} option to do more thorough scan.");
+ }
+
+ if ( !valid ) {
+ result.append("advice", "ns corrupt, requires repair");
+ }
+
+ }
+ } validateCmd;
+
+ bool lockedForWriting = false; // read from db/instance.cpp
+ static bool unlockRequested = false;
+ static mongo::mutex fsyncLockMutex("fsyncLock");
+ static boost::condition fsyncLockCondition;
+ static OID fsyncLockID; // identifies the current lock job
+
+ /*
+ class UnlockCommand : public Command {
+ public:
+ UnlockCommand() : Command( "unlock" ) { }
+ virtual bool readOnly() { return true; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual bool run(const char *ns, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if( lockedForWriting ) {
+ log() << "command: unlock requested" << endl;
+ errmsg = "unlock requested";
+ unlockRequested = true;
+ }
+ else {
+ errmsg = "not locked, so cannot unlock";
+ return 0;
+ }
+ return 1;
+ }
+
+ } unlockCommand;
+ */
+ /* see unlockFsync() for unlocking:
+ db.$cmd.sys.unlock.findOne()
+ */
+ class FSyncCommand : public Command {
+ static const char* url() { return "http://www.mongodb.org/display/DOCS/fsync+Command"; }
+ class LockDBJob : public BackgroundJob {
+ protected:
+ virtual string name() const { return "lockdbjob"; }
+ void run() {
+ Client::initThread("fsyncjob");
+ Client& c = cc();
+ {
+ scoped_lock lk(fsyncLockMutex);
+ while (lockedForWriting){ // there is a small window for two LockDBJob's to be active. This prevents it.
+ fsyncLockCondition.wait(lk.boost());
+ }
+ lockedForWriting = true;
+ fsyncLockID.init();
+ }
+ readlock lk("");
+ MemoryMappedFile::flushAll(true);
+ log() << "db is now locked for snapshotting, no writes allowed. db.fsyncUnlock() to unlock" << endl;
+ log() << " For more info see " << FSyncCommand::url() << endl;
+ _ready = true;
+ {
+ scoped_lock lk(fsyncLockMutex);
+ while( !unlockRequested ) {
+ fsyncLockCondition.wait(lk.boost());
+ }
+ unlockRequested = false;
+ lockedForWriting = false;
+ fsyncLockCondition.notify_all();
+ }
+ c.shutdown();
+ }
+ public:
+ bool& _ready;
+ LockDBJob(bool& ready) : BackgroundJob( true /* delete self */ ), _ready(ready) {
+ _ready = false;
+ }
+ };
+ public:
+ FSyncCommand() : Command( "fsync" ) {}
+ virtual LockType locktype() const { return WRITE; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return true; }
+ /*virtual bool localHostOnlyIfNoAuth(const BSONObj& cmdObj) {
+ string x = cmdObj["exec"].valuestrsafe();
+ return !x.empty();
+ }*/
+ virtual void help(stringstream& h) const { h << url(); }
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ bool sync = !cmdObj["async"].trueValue(); // async means do an fsync, but return immediately
+ bool lock = cmdObj["lock"].trueValue();
+ log() << "CMD fsync: sync:" << sync << " lock:" << lock << endl;
+
+ if( lock ) {
+ // fsync and lock variation
+
+ uassert(12034, "fsync: can't lock while an unlock is pending", !unlockRequested);
+ uassert(12032, "fsync: sync option must be true when using lock", sync);
+ /* With releaseEarly(), we must be extremely careful we don't do anything
+ where we would have assumed we were locked. profiling is one of those things.
+ Perhaps at profile time we could check if we released early -- however,
+ we need to be careful to keep that code very fast it's a very common code path when on.
+ */
+ uassert(12033, "fsync: profiling must be off to enter locked mode", cc().database()->profile == 0);
+
+ // todo future: Perhaps we could do this in the background thread. As is now, writes may interleave between
+ // the releaseEarly below and the acquisition of the readlock in the background thread.
+ // However the real problem is that it seems complex to unlock here and then have a window for
+ // writes before the bg job -- can be done correctly but harder to reason about correctness.
+ // If this command ran within a read lock in the first place, would it work, and then that
+ // would be quite easy?
+ // Or, could we downgrade the write lock to a read lock, wait for ready, then release?
+ getDur().syncDataAndTruncateJournal();
+
+ bool ready = false;
+ LockDBJob *l = new LockDBJob(ready);
+
+ d.dbMutex.releaseEarly();
+
+ // There is a narrow window for another lock request to come in
+ // here before the LockDBJob grabs the readlock. LockDBJob will
+ // ensure that the requests are serialized and never running
+ // concurrently
+
+ l->go();
+ // don't return until background thread has acquired the read lock
+ while( !ready ) {
+ sleepmillis(10);
+ }
+ result.append("info", "now locked against writes, use db.fsyncUnlock() to unlock");
+ result.append("seeAlso", url());
+ }
+ else {
+ // the simple fsync command case
+
+ if (sync)
+ getDur().commitNow();
+ result.append( "numFiles" , MemoryMappedFile::flushAll( sync ) );
+ }
+ return 1;
+ }
+
+ } fsyncCmd;
+
+ // Note that this will only unlock the current lock. If another thread
+ // relocks before we return we still consider the unlocking successful.
+ // This is imporant because if two scripts are trying to fsync-lock, each
+ // one must be assured that between the fsync return and the call to unlock
+ // that the database is fully locked
+ void unlockFsyncAndWait(){
+ scoped_lock lk(fsyncLockMutex);
+ if (lockedForWriting) { // could have handled another unlock before we grabbed the lock
+ OID curOp = fsyncLockID;
+ unlockRequested = true;
+ fsyncLockCondition.notify_all();
+ while (lockedForWriting && fsyncLockID == curOp){
+ fsyncLockCondition.wait( lk.boost() );
+ }
+ }
+ }
+}
+
diff --git a/src/mongo/db/dbcommands_generic.cpp b/src/mongo/db/dbcommands_generic.cpp
new file mode 100644
index 00000000000..cfd833aa72d
--- /dev/null
+++ b/src/mongo/db/dbcommands_generic.cpp
@@ -0,0 +1,432 @@
+/** @file dbcommands_generic.cpp commands suited for any mongo server (both mongod, mongos) */
+
+/**
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "ops/query.h"
+#include "pdfile.h"
+#include "jsobj.h"
+#include "../bson/util/builder.h"
+#include <time.h>
+#include "introspect.h"
+#include "btree.h"
+#include "../util/lruishmap.h"
+#include "../util/md5.hpp"
+#include "../util/processinfo.h"
+#include "json.h"
+#include "repl.h"
+#include "repl_block.h"
+#include "replutil.h"
+#include "commands.h"
+#include "db.h"
+#include "instance.h"
+#include "lasterror.h"
+#include "security.h"
+#include "../scripting/engine.h"
+#include "stats/counters.h"
+#include "background.h"
+#include "../util/version.h"
+#include "../util/ramlog.h"
+#include "repl/multicmd.h"
+#include "server.h"
+
+namespace mongo {
+
+#if 0
+ namespace cloud {
+ SimpleMutex mtx("cloud");
+ Guarded< vector<string>, mtx > ips;
+ bool startedThread = false;
+
+ void thread() {
+ bson::bo cmd;
+ while( 1 ) {
+ list<Target> L;
+ {
+ SimpleMutex::scoped_lock lk(mtx);
+ if( ips.ref(lk).empty() )
+ continue;
+ for( unsigned i = 0; i < ips.ref(lk).size(); i++ ) {
+ L.push_back( Target(ips.ref(lk)[i]) );
+ }
+ }
+
+
+ /** repoll as machines might be down on the first lookup (only if not found previously) */
+ sleepsecs(6);
+ }
+ }
+ }
+
+ class CmdCloud : public Command {
+ public:
+ CmdCloud() : Command( "cloud" ) { }
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream &help ) const {
+ help << "internal command facilitating running in certain cloud computing environments";
+ }
+ bool run(const string& dbname, BSONObj& obj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ if( !obj.hasElement("servers") ) {
+ vector<string> ips;
+ obj["servers"].Obj().Vals(ips);
+ {
+ SimpleMutex::scoped_lock lk(cloud::mtx);
+ cloud::ips.ref(lk).swap(ips);
+ if( !cloud::startedThread ) {
+ cloud::startedThread = true;
+ boost::thread thr(cloud::thread);
+ }
+ }
+ }
+ return true;
+ }
+ } cmdCloud;
+#endif
+
+ class CmdBuildInfo : public Command {
+ public:
+ CmdBuildInfo() : Command( "buildInfo", true, "buildinfo" ) {}
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return false; }
+ virtual bool requiresAuth() { return false; }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream &help ) const {
+ help << "get version #, etc.\n";
+ help << "{ buildinfo:1 }";
+ }
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ result << "version" << versionString << "gitVersion" << gitVersion() << "sysInfo" << sysInfo();
+ result << "versionArray" << versionArray;
+ result << "bits" << ( sizeof( int* ) == 4 ? 32 : 64 );
+ result.appendBool( "debug" , debug );
+ result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize);
+ return true;
+ }
+ } cmdBuildInfo;
+
+ /** experimental. either remove or add support in repl sets also. in a repl set, getting this setting from the
+ repl set config could make sense.
+ */
+ unsigned replApplyBatchSize = 1;
+
+ class CmdGet : public Command {
+ public:
+ CmdGet() : Command( "getParameter" ) { }
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream &help ) const {
+ help << "get administrative option(s)\nexample:\n";
+ help << "{ getParameter:1, notablescan:1 }\n";
+ help << "supported so far:\n";
+ help << " quiet\n";
+ help << " notablescan\n";
+ help << " logLevel\n";
+ help << " syncdelay\n";
+ help << "{ getParameter:'*' } to get everything\n";
+ }
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ bool all = *cmdObj.firstElement().valuestrsafe() == '*';
+
+ int before = result.len();
+
+ if( all || cmdObj.hasElement("quiet") ) {
+ result.append("quiet", cmdLine.quiet );
+ }
+ if( all || cmdObj.hasElement("notablescan") ) {
+ result.append("notablescan", cmdLine.noTableScan);
+ }
+ if( all || cmdObj.hasElement("logLevel") ) {
+ result.append("logLevel", logLevel);
+ }
+ if( all || cmdObj.hasElement("syncdelay") ) {
+ result.append("syncdelay", cmdLine.syncdelay);
+ }
+ if( all || cmdObj.hasElement("replApplyBatchSize") ) {
+ result.append("replApplyBatchSize", replApplyBatchSize);
+ }
+
+ if ( before == result.len() ) {
+ errmsg = "no option found to get";
+ return false;
+ }
+ return true;
+ }
+ } cmdGet;
+
+ // tempish
+ bool setParmsMongodSpecific(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl );
+
+ class CmdSet : public Command {
+ public:
+ CmdSet() : Command( "setParameter" ) { }
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream &help ) const {
+ help << "set administrative option(s)\n";
+ help << "{ setParameter:1, <param>:<value> }\n";
+ help << "supported so far:\n";
+ help << " journalCommitInterval\n";
+ help << " logLevel\n";
+ help << " notablescan\n";
+ help << " quiet\n";
+ help << " syncdelay\n";
+ }
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ int s = 0;
+ bool found = setParmsMongodSpecific(dbname, cmdObj, errmsg, result, fromRepl);
+ if( cmdObj.hasElement("journalCommitInterval") ) {
+ if( !cmdLine.dur ) {
+ errmsg = "journaling is off";
+ return false;
+ }
+ int x = (int) cmdObj["journalCommitInterval"].Number();
+ assert( x > 1 && x < 500 );
+ cmdLine.journalCommitInterval = x;
+ log() << "setParameter journalCommitInterval=" << x << endl;
+ s++;
+ }
+ if( cmdObj.hasElement("notablescan") ) {
+ assert( !cmdLine.isMongos() );
+ if( s == 0 )
+ result.append("was", cmdLine.noTableScan);
+ cmdLine.noTableScan = cmdObj["notablescan"].Bool();
+ s++;
+ }
+ if( cmdObj.hasElement("quiet") ) {
+ if( s == 0 )
+ result.append("was", cmdLine.quiet );
+ cmdLine.quiet = cmdObj["quiet"].Bool();
+ s++;
+ }
+ if( cmdObj.hasElement("syncdelay") ) {
+ assert( !cmdLine.isMongos() );
+ if( s == 0 )
+ result.append("was", cmdLine.syncdelay );
+ cmdLine.syncdelay = cmdObj["syncdelay"].Number();
+ s++;
+ }
+ if( cmdObj.hasElement( "logLevel" ) ) {
+ if( s == 0 )
+ result.append("was", logLevel );
+ logLevel = cmdObj["logLevel"].numberInt();
+ s++;
+ }
+ if( cmdObj.hasElement( "replApplyBatchSize" ) ) {
+ if( s == 0 )
+ result.append("was", replApplyBatchSize );
+ BSONElement e = cmdObj["replApplyBatchSize"];
+ ParameterValidator * v = ParameterValidator::get( e.fieldName() );
+ assert( v );
+ if ( ! v->isValid( e , errmsg ) )
+ return false;
+ replApplyBatchSize = e.numberInt();
+ s++;
+ }
+ if( cmdObj.hasElement( "traceExceptions" ) ) {
+ if( s == 0 ) result.append( "was", DBException::traceExceptions );
+ DBException::traceExceptions = cmdObj["traceExceptions"].Bool();
+ s++;
+ }
+
+ if( s == 0 && !found ) {
+ errmsg = "no option found to set, use help:true to see options ";
+ return false;
+ }
+
+ return true;
+ }
+ } cmdSet;
+
+ class PingCommand : public Command {
+ public:
+ PingCommand() : Command( "ping" ) {}
+ virtual bool slaveOk() const { return true; }
+ virtual void help( stringstream &help ) const { help << "a way to check that the server is alive. responds immediately even if server is in a db lock."; }
+ virtual LockType locktype() const { return NONE; }
+ virtual bool requiresAuth() { return false; }
+ virtual bool run(const string& badns, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ // IMPORTANT: Don't put anything in here that might lock db - including authentication
+ return true;
+ }
+ } pingCmd;
+
+ class FeaturesCmd : public Command {
+ public:
+ FeaturesCmd() : Command( "features", true ) {}
+ void help(stringstream& h) const { h << "return build level feature settings"; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool readOnly() { return true; }
+ virtual LockType locktype() const { return NONE; }
+ virtual bool run(const string& ns, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if ( globalScriptEngine ) {
+ BSONObjBuilder bb( result.subobjStart( "js" ) );
+ result.append( "utf8" , globalScriptEngine->utf8Ok() );
+ bb.done();
+ }
+ if ( cmdObj["oidReset"].trueValue() ) {
+ result.append( "oidMachineOld" , OID::getMachineId() );
+ OID::regenMachineId();
+ }
+ result.append( "oidMachine" , OID::getMachineId() );
+ return true;
+ }
+
+ } featuresCmd;
+
+ class LogRotateCmd : public Command {
+ public:
+ LogRotateCmd() : Command( "logRotate" ) {}
+ virtual LockType locktype() const { return NONE; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual bool run(const string& ns, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ rotateLogs();
+ return 1;
+ }
+
+ } logRotateCmd;
+
+ class ListCommandsCmd : public Command {
+ public:
+ virtual void help( stringstream &help ) const { help << "get a list of all db commands"; }
+ ListCommandsCmd() : Command( "listCommands", false ) {}
+ virtual LockType locktype() const { return NONE; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return false; }
+ virtual bool run(const string& ns, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ BSONObjBuilder b( result.subobjStart( "commands" ) );
+ for ( map<string,Command*>::iterator i=_commands->begin(); i!=_commands->end(); ++i ) {
+ Command * c = i->second;
+
+ // don't show oldnames
+ if (i->first != c->name)
+ continue;
+
+ BSONObjBuilder temp( b.subobjStart( c->name ) );
+
+ {
+ stringstream help;
+ c->help( help );
+ temp.append( "help" , help.str() );
+ }
+ temp.append( "lockType" , c->locktype() );
+ temp.append( "slaveOk" , c->slaveOk() );
+ temp.append( "adminOnly" , c->adminOnly() );
+ //optionally indicates that the command can be forced to run on a slave/secondary
+ if ( c->slaveOverrideOk() ) temp.append( "slaveOverrideOk" , c->slaveOverrideOk() );
+ temp.done();
+ }
+ b.done();
+
+ return 1;
+ }
+
+ } listCommandsCmd;
+
+ bool CmdShutdown::shutdownHelper() {
+ Client * c = currentClient.get();
+ if ( c ) {
+ c->shutdown();
+ }
+
+ log() << "terminating, shutdown command received" << endl;
+
+ dbexit( EXIT_CLEAN , "shutdown called" , true ); // this never returns
+ assert(0);
+ return true;
+ }
+
+ /* for testing purposes only */
+ class CmdForceError : public Command {
+ public:
+ virtual void help( stringstream& help ) const {
+ help << "for testing purposes only. forces a user assertion exception";
+ }
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual LockType locktype() const { return NONE; }
+ CmdForceError() : Command("forceerror") {}
+ bool run(const string& dbnamne, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ uassert( 10038 , "forced error", false);
+ return true;
+ }
+ } cmdForceError;
+
+ class AvailableQueryOptions : public Command {
+ public:
+ AvailableQueryOptions() : Command( "availableQueryOptions" , false , "availablequeryoptions" ) {}
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ result << "options" << QueryOption_AllSupported;
+ return true;
+ }
+ } availableQueryOptionsCmd;
+
+ class GetLogCmd : public Command {
+ public:
+ GetLogCmd() : Command( "getLog" ){}
+
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+ virtual bool adminOnly() const { return true; }
+
+ virtual void help( stringstream& help ) const {
+ help << "{ getLog : '*' } OR { getLog : 'global' }";
+ }
+
+ virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string p = cmdObj.firstElement().String();
+ if ( p == "*" ) {
+ vector<string> names;
+ RamLog::getNames( names );
+
+ BSONArrayBuilder arr;
+ for ( unsigned i=0; i<names.size(); i++ ) {
+ arr.append( names[i] );
+ }
+
+ result.appendArray( "names" , arr.arr() );
+ }
+ else {
+ RamLog* rl = RamLog::get( p );
+ if ( ! rl ) {
+ errmsg = str::stream() << "no RamLog named: " << p;
+ return false;
+ }
+
+ vector<const char*> lines;
+ rl->get( lines );
+
+ BSONArrayBuilder arr( result.subarrayStart( "log" ) );
+ for ( unsigned i=0; i<lines.size(); i++ )
+ arr.append( lines[i] );
+ arr.done();
+ }
+ return true;
+ }
+
+ } getLogCmd;
+
+}
diff --git a/src/mongo/db/dbeval.cpp b/src/mongo/db/dbeval.cpp
new file mode 100644
index 00000000000..9e77d8c8097
--- /dev/null
+++ b/src/mongo/db/dbeval.cpp
@@ -0,0 +1,136 @@
+/* commands.cpp
+ db "commands" (sent via db.$cmd.findOne(...))
+ */
+
+/**
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "ops/query.h"
+#include "pdfile.h"
+#include "jsobj.h"
+#include "../bson/util/builder.h"
+#include <time.h>
+#include "introspect.h"
+#include "btree.h"
+#include "../util/lruishmap.h"
+#include "json.h"
+#include "repl.h"
+#include "commands.h"
+#include "cmdline.h"
+
+#include "../scripting/engine.h"
+
+namespace mongo {
+
+ const int edebug=0;
+
+ bool dbEval(const string& dbName, BSONObj& cmd, BSONObjBuilder& result, string& errmsg) {
+ BSONElement e = cmd.firstElement();
+ uassert( 10046 , "eval needs Code" , e.type() == Code || e.type() == CodeWScope || e.type() == String );
+
+ const char *code = 0;
+ switch ( e.type() ) {
+ case String:
+ case Code:
+ code = e.valuestr();
+ break;
+ case CodeWScope:
+ code = e.codeWScopeCode();
+ break;
+ default:
+ assert(0);
+ }
+ assert( code );
+
+ if ( ! globalScriptEngine ) {
+ errmsg = "db side execution is disabled";
+ return false;
+ }
+
+ auto_ptr<Scope> s = globalScriptEngine->getPooledScope( dbName );
+ ScriptingFunction f = s->createFunction(code);
+ if ( f == 0 ) {
+ errmsg = (string)"compile failed: " + s->getError();
+ return false;
+ }
+
+ if ( e.type() == CodeWScope )
+ s->init( e.codeWScopeScopeData() );
+ s->localConnect( dbName.c_str() );
+
+ BSONObj args;
+ {
+ BSONElement argsElement = cmd.getField("args");
+ if ( argsElement.type() == Array ) {
+ args = argsElement.embeddedObject();
+ if ( edebug ) {
+ out() << "args:" << args.toString() << endl;
+ out() << "code:\n" << code << endl;
+ }
+ }
+ }
+
+ int res;
+ {
+ Timer t;
+ res = s->invoke(f, &args, 0, cmdLine.quota ? 10 * 60 * 1000 : 0 );
+ int m = t.millis();
+ if ( m > cmdLine.slowMS ) {
+ out() << "dbeval slow, time: " << dec << m << "ms " << dbName << endl;
+ if ( m >= 1000 ) log() << code << endl;
+ else OCCASIONALLY log() << code << endl;
+ }
+ }
+ if ( res ) {
+ result.append("errno", (double) res);
+ errmsg = "invoke failed: ";
+ errmsg += s->getError();
+ return false;
+ }
+
+ s->append( result , "retval" , "return" );
+
+ return true;
+ }
+
+ class CmdEval : public Command {
+ public:
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual void help( stringstream &help ) const {
+ help << "Evaluate javascript at the server.\n" "http://www.mongodb.org/display/DOCS/Server-side+Code+Execution";
+ }
+ virtual LockType locktype() const { return NONE; }
+ CmdEval() : Command("eval", false, "$eval") { }
+ bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+
+ AuthenticationInfo *ai = cc().getAuthenticationInfo();
+ uassert( 12598 , "$eval reads unauthorized", ai->isAuthorizedReads(dbname.c_str()) );
+
+ if ( cmdObj["nolock"].trueValue() ) {
+ return dbEval(dbname, cmdObj, result, errmsg);
+ }
+
+ // write security will be enforced in DBDirectClient
+ mongolock lk( ai->isAuthorized( dbname.c_str() ) );
+ Client::Context ctx( dbname );
+
+ return dbEval(dbname, cmdObj, result, errmsg);
+ }
+ } cmdeval;
+
+} // namespace mongo
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
new file mode 100644
index 00000000000..39540c9ce89
--- /dev/null
+++ b/src/mongo/db/dbhelpers.cpp
@@ -0,0 +1,353 @@
+// dbhelpers.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "db.h"
+#include "dbhelpers.h"
+#include "json.h"
+#include "queryoptimizer.h"
+#include "btree.h"
+#include "pdfile.h"
+#include "oplog.h"
+#include "ops/update.h"
+#include "ops/delete.h"
+
+namespace mongo {
+
+ void Helpers::ensureIndex(const char *ns, BSONObj keyPattern, bool unique, const char *name) {
+ NamespaceDetails *d = nsdetails(ns);
+ if( d == 0 )
+ return;
+
+ {
+ NamespaceDetails::IndexIterator i = d->ii();
+ while( i.more() ) {
+ if( i.next().keyPattern().woCompare(keyPattern) == 0 )
+ return;
+ }
+ }
+
+ if( d->nIndexes >= NamespaceDetails::NIndexesMax ) {
+ problem() << "Helper::ensureIndex fails, MaxIndexes exceeded " << ns << '\n';
+ return;
+ }
+
+ string system_indexes = cc().database()->name + ".system.indexes";
+
+ BSONObjBuilder b;
+ b.append("name", name);
+ b.append("ns", ns);
+ b.append("key", keyPattern);
+ b.appendBool("unique", unique);
+ BSONObj o = b.done();
+
+ theDataFileMgr.insert(system_indexes.c_str(), o.objdata(), o.objsize());
+ }
+
+ /* fetch a single object from collection ns that matches query
+ set your db SavedContext first
+ */
+ bool Helpers::findOne(const char *ns, const BSONObj &query, BSONObj& result, bool requireIndex) {
+ DiskLoc loc = findOne( ns, query, requireIndex );
+ if ( loc.isNull() )
+ return false;
+ result = loc.obj();
+ return true;
+ }
+
+ /* fetch a single object from collection ns that matches query
+ set your db SavedContext first
+ */
+ DiskLoc Helpers::findOne(const char *ns, const BSONObj &query, bool requireIndex) {
+ shared_ptr<Cursor> c = NamespaceDetailsTransient::getCursor( ns, query, BSONObj(), requireIndex );
+ while( c->ok() ) {
+ if ( c->currentMatches() && !c->getsetdup( c->currLoc() ) ) {
+ return c->currLoc();
+ }
+ c->advance();
+ }
+ return DiskLoc();
+ }
+
+ bool Helpers::findById(Client& c, const char *ns, BSONObj query, BSONObj& result ,
+ bool * nsFound , bool * indexFound ) {
+ d.dbMutex.assertAtLeastReadLocked();
+ Database *database = c.database();
+ assert( database );
+ NamespaceDetails *d = database->namespaceIndex.details(ns);
+ if ( ! d )
+ return false;
+ if ( nsFound )
+ *nsFound = 1;
+
+ int idxNo = d->findIdIndex();
+ if ( idxNo < 0 )
+ return false;
+ if ( indexFound )
+ *indexFound = 1;
+
+ IndexDetails& i = d->idx( idxNo );
+
+ BSONObj key = i.getKeyFromQuery( query );
+
+ DiskLoc loc = i.idxInterface().findSingle(i , i.head , key);
+ if ( loc.isNull() )
+ return false;
+ result = loc.obj();
+ return true;
+ }
+
+ DiskLoc Helpers::findById(NamespaceDetails *d, BSONObj idquery) {
+ assert(d);
+ int idxNo = d->findIdIndex();
+ uassert(13430, "no _id index", idxNo>=0);
+ IndexDetails& i = d->idx( idxNo );
+ BSONObj key = i.getKeyFromQuery( idquery );
+ return i.idxInterface().findSingle(i , i.head , key);
+ }
+
+ bool Helpers::isEmpty(const char *ns, bool doAuth) {
+ Client::Context context(ns, dbpath, doAuth);
+ shared_ptr<Cursor> c = DataFileMgr::findAll(ns);
+ return !c->ok();
+ }
+
+ /* Get the first object from a collection. Generally only useful if the collection
+ only ever has a single object -- which is a "singleton collection.
+
+ Returns: true if object exists.
+ */
+ bool Helpers::getSingleton(const char *ns, BSONObj& result) {
+ Client::Context context(ns);
+
+ shared_ptr<Cursor> c = DataFileMgr::findAll(ns);
+ if ( !c->ok() ) {
+ context.getClient()->curop()->done();
+ return false;
+ }
+
+ result = c->current();
+ context.getClient()->curop()->done();
+ return true;
+ }
+
+ bool Helpers::getLast(const char *ns, BSONObj& result) {
+ Client::Context ctx(ns);
+ shared_ptr<Cursor> c = findTableScan(ns, reverseNaturalObj);
+ if( !c->ok() )
+ return false;
+ result = c->current();
+ return true;
+ }
+
+ void Helpers::upsert( const string& ns , const BSONObj& o ) {
+ BSONElement e = o["_id"];
+ assert( e.type() );
+ BSONObj id = e.wrap();
+
+ OpDebug debug;
+ Client::Context context(ns);
+ updateObjects(ns.c_str(), o, /*pattern=*/id, /*upsert=*/true, /*multi=*/false , /*logtheop=*/true , debug );
+ }
+
+ void Helpers::putSingleton(const char *ns, BSONObj obj) {
+ OpDebug debug;
+ Client::Context context(ns);
+ updateObjects(ns, obj, /*pattern=*/BSONObj(), /*upsert=*/true, /*multi=*/false , /*logtheop=*/true , debug );
+ context.getClient()->curop()->done();
+ }
+
+ void Helpers::putSingletonGod(const char *ns, BSONObj obj, bool logTheOp) {
+ OpDebug debug;
+ Client::Context context(ns);
+ _updateObjects(/*god=*/true, ns, obj, /*pattern=*/BSONObj(), /*upsert=*/true, /*multi=*/false , logTheOp , debug );
+ context.getClient()->curop()->done();
+ }
+
+ BSONObj Helpers::toKeyFormat( const BSONObj& o , BSONObj& key ) {
+ BSONObjBuilder me;
+ BSONObjBuilder k;
+
+ BSONObjIterator i( o );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ k.append( e.fieldName() , 1 );
+ me.appendAs( e , "" );
+ }
+ key = k.obj();
+ return me.obj();
+ }
+
+ long long Helpers::removeRange( const string& ns , const BSONObj& min , const BSONObj& max , bool yield , bool maxInclusive , RemoveCallback * callback ) {
+ BSONObj keya , keyb;
+ BSONObj minClean = toKeyFormat( min , keya );
+ BSONObj maxClean = toKeyFormat( max , keyb );
+ assert( keya == keyb );
+
+ Client::Context ctx(ns);
+ NamespaceDetails* nsd = nsdetails( ns.c_str() );
+ if ( ! nsd )
+ return 0;
+
+ int ii = nsd->findIndexByKeyPattern( keya );
+ assert( ii >= 0 );
+
+ long long num = 0;
+
+ IndexDetails& i = nsd->idx( ii );
+
+ shared_ptr<Cursor> c( BtreeCursor::make( nsd , ii , i , minClean , maxClean , maxInclusive, 1 ) );
+ auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
+ cc->setDoingDeletes( true );
+
+ while ( c->ok() ) {
+
+ if ( yield && ! cc->yieldSometimes( ClientCursor::WillNeed) ) {
+ // cursor got finished by someone else, so we're done
+ cc.release(); // if the collection/db is dropped, cc may be deleted
+ break;
+ }
+
+ if ( ! c->ok() )
+ break;
+
+ DiskLoc rloc = c->currLoc();
+
+ if ( callback )
+ callback->goingToDelete( c->current() );
+
+ c->advance();
+ c->noteLocation();
+
+ logOp( "d" , ns.c_str() , rloc.obj()["_id"].wrap() );
+ theDataFileMgr.deleteRecord(ns.c_str() , rloc.rec(), rloc);
+ num++;
+
+ c->checkLocation();
+
+ getDur().commitIfNeeded();
+
+
+ }
+
+ return num;
+ }
+
+ void Helpers::emptyCollection(const char *ns) {
+ Client::Context context(ns);
+ deleteObjects(ns, BSONObj(), false);
+ }
+
+ DbSet::~DbSet() {
+ if ( name_.empty() )
+ return;
+ try {
+ Client::Context c( name_.c_str() );
+ if ( nsdetails( name_.c_str() ) ) {
+ string errmsg;
+ BSONObjBuilder result;
+ dropCollection( name_, errmsg, result );
+ }
+ }
+ catch ( ... ) {
+ problem() << "exception cleaning up DbSet" << endl;
+ }
+ }
+
+ void DbSet::reset( const string &name, const BSONObj &key ) {
+ if ( !name.empty() )
+ name_ = name;
+ if ( !key.isEmpty() )
+ key_ = key.getOwned();
+ Client::Context c( name_.c_str() );
+ if ( nsdetails( name_.c_str() ) ) {
+ Helpers::emptyCollection( name_.c_str() );
+ }
+ else {
+ string err;
+ massert( 10303 , err, userCreateNS( name_.c_str(), fromjson( "{autoIndexId:false}" ), err, false ) );
+ }
+ Helpers::ensureIndex( name_.c_str(), key_, true, "setIdx" );
+ }
+
+ bool DbSet::get( const BSONObj &obj ) const {
+ Client::Context c( name_.c_str() );
+ BSONObj temp;
+ return Helpers::findOne( name_.c_str(), obj, temp, true );
+ }
+
+ void DbSet::set( const BSONObj &obj, bool val ) {
+ Client::Context c( name_.c_str() );
+ if ( val ) {
+ try {
+ BSONObj k = obj;
+ theDataFileMgr.insertWithObjMod( name_.c_str(), k, false );
+ }
+ catch ( DBException& ) {
+ // dup key - already in set
+ }
+ }
+ else {
+ deleteObjects( name_.c_str(), obj, true, false, false );
+ }
+ }
+
+ RemoveSaver::RemoveSaver( const string& a , const string& b , const string& why) : _out(0) {
+ static int NUM = 0;
+
+ _root = dbpath;
+ if ( a.size() )
+ _root /= a;
+ if ( b.size() )
+ _root /= b;
+ assert( a.size() || b.size() );
+
+ _file = _root;
+
+ stringstream ss;
+ ss << why << "." << terseCurrentTime(false) << "." << NUM++ << ".bson";
+ _file /= ss.str();
+
+ }
+
+ RemoveSaver::~RemoveSaver() {
+ if ( _out ) {
+ _out->close();
+ delete _out;
+ _out = 0;
+ }
+ }
+
+ void RemoveSaver::goingToDelete( const BSONObj& o ) {
+ if ( ! _out ) {
+ create_directories( _root );
+ _out = new ofstream();
+ _out->open( _file.string().c_str() , ios_base::out | ios_base::binary );
+ if ( ! _out->good() ) {
+ log( LL_WARNING ) << "couldn't create file: " << _file.string() << " for remove saving" << endl;
+ delete _out;
+ _out = 0;
+ return;
+ }
+
+ }
+ _out->write( o.objdata() , o.objsize() );
+ }
+
+
+} // namespace mongo
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
new file mode 100644
index 00000000000..99d401fa1f8
--- /dev/null
+++ b/src/mongo/db/dbhelpers.h
@@ -0,0 +1,159 @@
+/* @file dbhelpers.h
+
+ db helpers are helper functions and classes that let us easily manipulate the local
+ database instance in-proc.
+*/
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "client.h"
+#include "db.h"
+
+namespace mongo {
+
+ const BSONObj reverseNaturalObj = BSON( "$natural" << -1 );
+
+ class Cursor;
+ class CoveredIndexMatcher;
+
+ /**
+ all helpers assume locking is handled above them
+ */
+ struct Helpers {
+
+ /* ensure the specified index exists.
+
+ @param keyPattern key pattern, e.g., { ts : 1 }
+ @param name index name, e.g., "name_1"
+
+ This method can be a little (not much) cpu-slow, so you may wish to use
+ OCCASIONALLY ensureIndex(...);
+
+ Note: use ensureHaveIdIndex() for the _id index: it is faster.
+ Note: does nothing if collection does not yet exist.
+ */
+ static void ensureIndex(const char *ns, BSONObj keyPattern, bool unique, const char *name);
+
+ /* fetch a single object from collection ns that matches query.
+ set your db SavedContext first.
+
+ @param query - the query to perform. note this is the low level portion of query so "orderby : ..."
+ won't work.
+
+ @param requireIndex if true, assert if no index for the query. a way to guard against
+ writing a slow query.
+
+ @return true if object found
+ */
+ static bool findOne(const char *ns, const BSONObj &query, BSONObj& result, bool requireIndex = false);
+ static DiskLoc findOne(const char *ns, const BSONObj &query, bool requireIndex);
+
+ /**
+ * @param foundIndex if passed in will be set to 1 if ns and index found
+ * @return true if object found
+ */
+ static bool findById(Client&, const char *ns, BSONObj query, BSONObj& result ,
+ bool * nsFound = 0 , bool * indexFound = 0 );
+
+ /* uasserts if no _id index.
+ @return null loc if not found */
+ static DiskLoc findById(NamespaceDetails *d, BSONObj query);
+
+ /** Get/put the first (or last) object from a collection. Generally only useful if the collection
+ only ever has a single object -- which is a "singleton collection".
+
+ You do not need to set the database (Context) before calling.
+
+ @return true if object exists.
+ */
+ static bool getSingleton(const char *ns, BSONObj& result);
+ static void putSingleton(const char *ns, BSONObj obj);
+ static void putSingletonGod(const char *ns, BSONObj obj, bool logTheOp);
+ static bool getFirst(const char *ns, BSONObj& result) { return getSingleton(ns, result); }
+ static bool getLast(const char *ns, BSONObj& result); // get last object int he collection; e.g. {$natural : -1}
+
+ /**
+ * you have to lock
+ * you do not have to have Context set
+ * o has to have an _id field or will assert
+ */
+ static void upsert( const string& ns , const BSONObj& o );
+
+ /** You do not need to set the database before calling.
+ @return true if collection is empty.
+ */
+ static bool isEmpty(const char *ns, bool doAuth=true);
+
+ // TODO: this should be somewhere else probably
+ static BSONObj toKeyFormat( const BSONObj& o , BSONObj& key );
+
+ class RemoveCallback {
+ public:
+ virtual ~RemoveCallback() {}
+ virtual void goingToDelete( const BSONObj& o ) = 0;
+ };
+ /* removeRange: operation is oplog'd */
+ static long long removeRange( const string& ns , const BSONObj& min , const BSONObj& max , bool yield = false , bool maxInclusive = false , RemoveCallback * callback = 0 );
+
+ /* Remove all objects from a collection.
+ You do not need to set the database before calling.
+ */
+ static void emptyCollection(const char *ns);
+
+ };
+
+ class Database;
+
+ // manage a set using collection backed storage
+ class DbSet {
+ public:
+ DbSet( const string &name = "", const BSONObj &key = BSONObj() ) :
+ name_( name ),
+ key_( key.getOwned() ) {
+ }
+ ~DbSet();
+ void reset( const string &name = "", const BSONObj &key = BSONObj() );
+ bool get( const BSONObj &obj ) const;
+ void set( const BSONObj &obj, bool val );
+ private:
+ string name_;
+ BSONObj key_;
+ };
+
+
+ /**
+ * user for saving deleted bson objects to a flat file
+ */
+ class RemoveSaver : public Helpers::RemoveCallback , boost::noncopyable {
+ public:
+ RemoveSaver( const string& type , const string& ns , const string& why);
+ ~RemoveSaver();
+
+ void goingToDelete( const BSONObj& o );
+
+ private:
+ path _root;
+ path _file;
+ ofstream* _out;
+
+ };
+
+
+} // namespace mongo
diff --git a/src/mongo/db/dbmessage.cpp b/src/mongo/db/dbmessage.cpp
new file mode 100644
index 00000000000..c86b5a05240
--- /dev/null
+++ b/src/mongo/db/dbmessage.cpp
@@ -0,0 +1,108 @@
+// dbmessage.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "dbmessage.h"
+#include "../client/dbclient.h"
+
+namespace mongo {
+
+ string Message::toString() const {
+ stringstream ss;
+ ss << "op: " << opToString( operation() ) << " len: " << size();
+ if ( operation() >= 2000 && operation() < 2100 ) {
+ DbMessage d(*this);
+ ss << " ns: " << d.getns();
+ switch ( operation() ) {
+ case dbUpdate: {
+ int flags = d.pullInt();
+ BSONObj q = d.nextJsObj();
+ BSONObj o = d.nextJsObj();
+ ss << " flags: " << flags << " query: " << q << " update: " << o;
+ break;
+ }
+ case dbInsert:
+ ss << d.nextJsObj();
+ break;
+ case dbDelete: {
+ int flags = d.pullInt();
+ BSONObj q = d.nextJsObj();
+ ss << " flags: " << flags << " query: " << q;
+ break;
+ }
+ default:
+ ss << " CANNOT HANDLE YET";
+ }
+
+
+ }
+ return ss.str();
+ }
+
+
+ void replyToQuery(int queryResultFlags,
+ AbstractMessagingPort* p, Message& requestMsg,
+ void *data, int size,
+ int nReturned, int startingFrom,
+ long long cursorId
+ ) {
+ BufBuilder b(32768);
+ b.skip(sizeof(QueryResult));
+ b.appendBuf(data, size);
+ QueryResult *qr = (QueryResult *) b.buf();
+ qr->_resultFlags() = queryResultFlags;
+ qr->len = b.len();
+ qr->setOperation(opReply);
+ qr->cursorId = cursorId;
+ qr->startingFrom = startingFrom;
+ qr->nReturned = nReturned;
+ b.decouple();
+ Message resp(qr, true);
+ p->reply(requestMsg, resp, requestMsg.header()->id);
+ }
+
+ void replyToQuery(int queryResultFlags,
+ AbstractMessagingPort* p, Message& requestMsg,
+ BSONObj& responseObj) {
+ replyToQuery(queryResultFlags,
+ p, requestMsg,
+ (void *) responseObj.objdata(), responseObj.objsize(), 1);
+ }
+
+ void replyToQuery(int queryResultFlags, Message &m, DbResponse &dbresponse, BSONObj obj) {
+ BufBuilder b;
+ b.skip(sizeof(QueryResult));
+ b.appendBuf((void*) obj.objdata(), obj.objsize());
+ QueryResult* msgdata = (QueryResult *) b.buf();
+ b.decouple();
+ QueryResult *qr = msgdata;
+ qr->_resultFlags() = queryResultFlags;
+ qr->len = b.len();
+ qr->setOperation(opReply);
+ qr->cursorId = 0;
+ qr->startingFrom = 0;
+ qr->nReturned = 1;
+ Message *resp = new Message();
+ resp->setData(msgdata, true); // transport will free
+ dbresponse.response = resp;
+ dbresponse.responseTo = m.header()->id;
+ }
+
+
+
+}
diff --git a/src/mongo/db/dbmessage.h b/src/mongo/db/dbmessage.h
new file mode 100644
index 00000000000..a789bff849c
--- /dev/null
+++ b/src/mongo/db/dbmessage.h
@@ -0,0 +1,282 @@
+// dbmessage.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "diskloc.h"
+#include "jsobj.h"
+#include "namespace-inl.h"
+#include "../util/net/message.h"
+#include "../client/constants.h"
+#include "instance.h"
+
+namespace mongo {
+
+ /* db response format
+
+ Query or GetMore: // see struct QueryResult
+ int resultFlags;
+ int64 cursorID;
+ int startingFrom;
+ int nReturned;
+ list of marshalled JSObjects;
+ */
+
+/* db request message format
+
+ unsigned opid; // arbitary; will be echoed back
+ byte operation;
+ int options;
+
+ then for:
+
+ dbInsert:
+ string collection;
+ a series of JSObjects
+ dbDelete:
+ string collection;
+ int flags=0; // 1=DeleteSingle
+ JSObject query;
+ dbUpdate:
+ string collection;
+ int flags; // 1=upsert
+ JSObject query;
+ JSObject objectToUpdate;
+ objectToUpdate may include { $inc: <field> } or { $set: ... }, see struct Mod.
+ dbQuery:
+ string collection;
+ int nToSkip;
+ int nToReturn; // how many you want back as the beginning of the cursor data (0=no limit)
+ // greater than zero is simply a hint on how many objects to send back per "cursor batch".
+ // a negative number indicates a hard limit.
+ JSObject query;
+ [JSObject fieldsToReturn]
+ dbGetMore:
+ string collection; // redundant, might use for security.
+ int nToReturn;
+ int64 cursorID;
+ dbKillCursors=2007:
+ int n;
+ int64 cursorIDs[n];
+
+ Note that on Update, there is only one object, which is different
+ from insert where you can pass a list of objects to insert in the db.
+ Note that the update field layout is very similar layout to Query.
+*/
+
+
+#pragma pack(1)
+ struct QueryResult : public MsgData {
+ long long cursorId;
+ int startingFrom;
+ int nReturned;
+ const char *data() {
+ return (char *) (((int *)&nReturned)+1);
+ }
+ int resultFlags() {
+ return dataAsInt();
+ }
+ int& _resultFlags() {
+ return dataAsInt();
+ }
+ void setResultFlagsToOk() {
+ _resultFlags() = ResultFlag_AwaitCapable;
+ }
+ void initializeResultFlags() {
+ _resultFlags() = 0;
+ }
+ };
+
+#pragma pack()
+
+ /* For the database/server protocol, these objects and functions encapsulate
+ the various messages transmitted over the connection.
+
+ See http://www.mongodb.org/display/DOCS/Mongo+Wire+Protocol
+ */
+ class DbMessage {
+ public:
+ DbMessage(const Message& _m) : m(_m) , mark(0) {
+ // for received messages, Message has only one buffer
+ theEnd = _m.singleData()->_data + _m.header()->dataLen();
+ char *r = _m.singleData()->_data;
+ reserved = (int *) r;
+ data = r + 4;
+ nextjsobj = data;
+ }
+
+ /** the 32 bit field before the ns
+ * track all bit usage here as its cross op
+ * 0: InsertOption_ContinueOnError
+ * 1: fromWriteback
+ */
+ int& reservedField() { return *reserved; }
+
+ const char * getns() const {
+ return data;
+ }
+ void getns(Namespace& ns) const {
+ ns = data;
+ }
+
+ const char * afterNS() const {
+ return data + strlen( data ) + 1;
+ }
+
+ int getInt( int num ) const {
+ const int * foo = (const int*)afterNS();
+ return foo[num];
+ }
+
+ int getQueryNToReturn() const {
+ return getInt( 1 );
+ }
+
+ /**
+ * get an int64 at specified offsetBytes after ns
+ */
+ long long getInt64( int offsetBytes ) const {
+ const char * x = afterNS();
+ x += offsetBytes;
+ const long long * ll = (const long long*)x;
+ return ll[0];
+ }
+
+ void resetPull() { nextjsobj = data; }
+ int pullInt() const { return pullInt(); }
+ int& pullInt() {
+ if ( nextjsobj == data )
+ nextjsobj += strlen(data) + 1; // skip namespace
+ int& i = *((int *)nextjsobj);
+ nextjsobj += 4;
+ return i;
+ }
+ long long pullInt64() const {
+ return pullInt64();
+ }
+ long long &pullInt64() {
+ if ( nextjsobj == data )
+ nextjsobj += strlen(data) + 1; // skip namespace
+ long long &i = *((long long *)nextjsobj);
+ nextjsobj += 8;
+ return i;
+ }
+
+ OID* getOID() const {
+ return (OID *) (data + strlen(data) + 1); // skip namespace
+ }
+
+ void getQueryStuff(const char *&query, int& ntoreturn) {
+ int *i = (int *) (data + strlen(data) + 1);
+ ntoreturn = *i;
+ i++;
+ query = (const char *) i;
+ }
+
+ /* for insert and update msgs */
+ bool moreJSObjs() const {
+ return nextjsobj != 0;
+ }
+ BSONObj nextJsObj() {
+ if ( nextjsobj == data ) {
+ nextjsobj += strlen(data) + 1; // skip namespace
+ massert( 13066 , "Message contains no documents", theEnd > nextjsobj );
+ }
+ massert( 10304 , "Client Error: Remaining data too small for BSON object", theEnd - nextjsobj > 3 );
+ BSONObj js(nextjsobj);
+ massert( 10305 , "Client Error: Invalid object size", js.objsize() > 3 );
+ massert( 10306 , "Client Error: Next object larger than space left in message",
+ js.objsize() < ( theEnd - data ) );
+ if ( cmdLine.objcheck && !js.valid() ) {
+ massert( 10307 , "Client Error: bad object in message", false);
+ }
+ nextjsobj += js.objsize();
+ if ( nextjsobj >= theEnd )
+ nextjsobj = 0;
+ return js;
+ }
+
+ const Message& msg() const { return m; }
+
+ void markSet() {
+ mark = nextjsobj;
+ }
+
+ void markReset() {
+ assert( mark );
+ nextjsobj = mark;
+ }
+
+ private:
+ const Message& m;
+ int* reserved;
+ const char *data;
+ const char *nextjsobj;
+ const char *theEnd;
+
+ const char * mark;
+
+ public:
+ enum ReservedOptions {
+ Reserved_InsertOption_ContinueOnError = 1 << 0 ,
+ Reserved_FromWriteback = 1 << 1
+ };
+ };
+
+
+ /* a request to run a query, received from the database */
+ class QueryMessage {
+ public:
+ const char *ns;
+ int ntoskip;
+ int ntoreturn;
+ int queryOptions;
+ BSONObj query;
+ BSONObj fields;
+
+ /* parses the message into the above fields */
+ QueryMessage(DbMessage& d) {
+ ns = d.getns();
+ ntoskip = d.pullInt();
+ ntoreturn = d.pullInt();
+ query = d.nextJsObj();
+ if ( d.moreJSObjs() ) {
+ fields = d.nextJsObj();
+ }
+ queryOptions = d.msg().header()->dataAsInt();
+ }
+ };
+
+ void replyToQuery(int queryResultFlags,
+ AbstractMessagingPort* p, Message& requestMsg,
+ void *data, int size,
+ int nReturned, int startingFrom = 0,
+ long long cursorId = 0
+ );
+
+
+ /* object reply helper. */
+ void replyToQuery(int queryResultFlags,
+ AbstractMessagingPort* p, Message& requestMsg,
+ BSONObj& responseObj);
+
+ /* helper to do a reply using a DbResponse object */
+ void replyToQuery(int queryResultFlags, Message &m, DbResponse &dbresponse, BSONObj obj);
+
+
+} // namespace mongo
diff --git a/src/mongo/db/dbwebserver.cpp b/src/mongo/db/dbwebserver.cpp
new file mode 100644
index 00000000000..eb19ba3be6c
--- /dev/null
+++ b/src/mongo/db/dbwebserver.cpp
@@ -0,0 +1,539 @@
+/* dbwebserver.cpp
+
+ This is the administrative web page displayed on port 28017.
+*/
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../util/net/miniwebserver.h"
+#include "../util/mongoutils/html.h"
+#include "../util/md5.hpp"
+#include "db.h"
+#include "instance.h"
+#include "security.h"
+#include "stats/snapshots.h"
+#include "background.h"
+#include "commands.h"
+#include "../util/version.h"
+#include "../util/ramlog.h"
+#include "pcrecpp.h"
+#include "../util/admin_access.h"
+#include "dbwebserver.h"
+#include <boost/date_time/posix_time/posix_time.hpp>
+#undef assert
+#define assert MONGO_assert
+
+namespace mongo {
+
+ using namespace mongoutils::html;
+ using namespace bson;
+
+ time_t started = time(0);
+
+ struct Timing {
+ Timing() {
+ start = timeLocked = 0;
+ }
+ unsigned long long start, timeLocked;
+ };
+
+ bool execCommand( Command * c ,
+ Client& client , int queryOptions ,
+ const char *ns, BSONObj& cmdObj ,
+ BSONObjBuilder& result,
+ bool fromRepl );
+
+ class DbWebServer : public MiniWebServer {
+ public:
+ DbWebServer(const string& ip, int port, const AdminAccess* webUsers)
+ : MiniWebServer("admin web console", ip, port), _webUsers(webUsers) {
+ WebStatusPlugin::initAll();
+ }
+
+ private:
+ const AdminAccess* _webUsers; // not owned here
+
+ void doUnlockedStuff(stringstream& ss) {
+ /* this is in the header already ss << "port: " << port << '\n'; */
+ ss << "<pre>";
+ ss << mongodVersion() << '\n';
+ ss << "git hash: " << gitVersion() << '\n';
+ ss << "sys info: " << sysInfo() << '\n';
+ ss << "uptime: " << time(0)-started << " seconds\n";
+ ss << "</pre>";
+ }
+
+ bool allowed( const char * rq , vector<string>& headers, const SockAddr &from ) {
+ if ( from.isLocalHost() || !_webUsers->haveAdminUsers() ) {
+ cmdAuthenticate.authenticate( "admin", "RestUser", false );
+ return true;
+ }
+
+ string auth = getHeader( rq , "Authorization" );
+
+ if ( auth.size() > 0 && auth.find( "Digest " ) == 0 ) {
+ auth = auth.substr( 7 ) + ", ";
+
+ map<string,string> parms;
+ pcrecpp::StringPiece input( auth );
+
+ string name, val;
+ pcrecpp::RE re("(\\w+)=\"?(.*?)\"?, ");
+ while ( re.Consume( &input, &name, &val) ) {
+ parms[name] = val;
+ }
+
+ BSONObj user = _webUsers->getAdminUser( parms["username"] );
+ if ( ! user.isEmpty() ) {
+ string ha1 = user["pwd"].str();
+ string ha2 = md5simpledigest( (string)"GET" + ":" + parms["uri"] );
+
+ stringstream r;
+ r << ha1 << ':' << parms["nonce"];
+ if ( parms["nc"].size() && parms["cnonce"].size() && parms["qop"].size() ) {
+ r << ':';
+ r << parms["nc"];
+ r << ':';
+ r << parms["cnonce"];
+ r << ':';
+ r << parms["qop"];
+ }
+ r << ':';
+ r << ha2;
+ string r1 = md5simpledigest( r.str() );
+
+ if ( r1 == parms["response"] ) {
+ cmdAuthenticate.authenticate( "admin", user["user"].str(), user[ "readOnly" ].isBoolean() && user[ "readOnly" ].boolean() );
+ return true;
+ }
+ }
+ }
+
+ stringstream authHeader;
+ authHeader
+ << "WWW-Authenticate: "
+ << "Digest realm=\"mongo\", "
+ << "nonce=\"abc\", "
+ << "algorithm=MD5, qop=\"auth\" "
+ ;
+
+ headers.push_back( authHeader.str() );
+ return 0;
+ }
+
+ virtual void doRequest(
+ const char *rq, // the full request
+ string url,
+ // set these and return them:
+ string& responseMsg,
+ int& responseCode,
+ vector<string>& headers, // if completely empty, content-type: text/html will be added
+ const SockAddr &from
+ ) {
+ if ( url.size() > 1 ) {
+
+ if ( ! allowed( rq , headers, from ) ) {
+ responseCode = 401;
+ headers.push_back( "Content-Type: text/plain;charset=utf-8" );
+ responseMsg = "not allowed\n";
+ return;
+ }
+
+ {
+ BSONObj params;
+ const size_t pos = url.find( "?" );
+ if ( pos != string::npos ) {
+ MiniWebServer::parseParams( params , url.substr( pos + 1 ) );
+ url = url.substr(0, pos);
+ }
+
+ DbWebHandler * handler = DbWebHandler::findHandler( url );
+ if ( handler ) {
+ if ( handler->requiresREST( url ) && ! cmdLine.rest ) {
+ _rejectREST( responseMsg , responseCode , headers );
+ }
+ else {
+ string callback = params.getStringField("jsonp");
+ uassert(13453, "server not started with --jsonp", callback.empty() || cmdLine.jsonp);
+
+ handler->handle( rq , url , params , responseMsg , responseCode , headers , from );
+
+ if (responseCode == 200 && !callback.empty()) {
+ responseMsg = callback + '(' + responseMsg + ')';
+ }
+ }
+ return;
+ }
+ }
+
+
+ if ( ! cmdLine.rest ) {
+ _rejectREST( responseMsg , responseCode , headers );
+ return;
+ }
+
+ responseCode = 404;
+ headers.push_back( "Content-Type: text/html;charset=utf-8" );
+ responseMsg = "<html><body>unknown url</body></html>\n";
+ return;
+ }
+
+ // generate home page
+
+ if ( ! allowed( rq , headers, from ) ) {
+ responseCode = 401;
+ headers.push_back( "Content-Type: text/plain;charset=utf-8" );
+ responseMsg = "not allowed\n";
+ return;
+ }
+
+ responseCode = 200;
+ stringstream ss;
+ string dbname;
+ {
+ stringstream z;
+ z << cmdLine.binaryName << ' ' << prettyHostName();
+ dbname = z.str();
+ }
+ ss << start(dbname) << h2(dbname);
+ ss << "<p><a href=\"/_commands\">List all commands</a> | \n";
+ ss << "<a href=\"/_replSet\">Replica set status</a></p>\n";
+
+ //ss << "<a href=\"/_status\">_status</a>";
+ {
+ const map<string, Command*> *m = Command::webCommands();
+ if( m ) {
+ ss <<
+ a("",
+ "These read-only context-less commands can be executed from the web interface. "
+ "Results are json format, unless ?text=1 is appended in which case the result is output as text "
+ "for easier human viewing",
+ "Commands")
+ << ": ";
+ for( map<string, Command*>::const_iterator i = m->begin(); i != m->end(); i++ ) {
+ stringstream h;
+ i->second->help(h);
+ string help = h.str();
+ ss << "<a href=\"/" << i->first << "?text=1\"";
+ if( help != "no help defined" )
+ ss << " title=\"" << help << '"';
+ ss << ">" << i->first << "</a> ";
+ }
+ ss << '\n';
+ }
+ }
+ ss << '\n';
+ /*
+ ss << "HTTP <a "
+ "title=\"click for documentation on this http interface\""
+ "href=\"http://www.mongodb.org/display/DOCS/Http+Interface\">admin port</a>:" << _port << "<p>\n";
+ */
+
+ doUnlockedStuff(ss);
+
+ WebStatusPlugin::runAll( ss );
+
+ ss << "</body></html>\n";
+ responseMsg = ss.str();
+ headers.push_back( "Content-Type: text/html;charset=utf-8" );
+ }
+
+ void _rejectREST( string& responseMsg , int& responseCode, vector<string>& headers ) {
+ responseCode = 403;
+ stringstream ss;
+ ss << "REST is not enabled. use --rest to turn on.\n";
+ ss << "check that port " << _port << " is secured for the network too.\n";
+ responseMsg = ss.str();
+ headers.push_back( "Content-Type: text/plain;charset=utf-8" );
+ }
+
+ };
+ // ---
+
+ bool prisort( const Prioritizable * a , const Prioritizable * b ) {
+ return a->priority() < b->priority();
+ }
+
+ // -- status framework ---
+ WebStatusPlugin::WebStatusPlugin( const string& secionName , double priority , const string& subheader )
+ : Prioritizable(priority), _name( secionName ) , _subHeading( subheader ) {
+ if ( ! _plugins )
+ _plugins = new vector<WebStatusPlugin*>();
+ _plugins->push_back( this );
+ }
+
+ void WebStatusPlugin::initAll() {
+ if ( ! _plugins )
+ return;
+
+ sort( _plugins->begin(), _plugins->end() , prisort );
+
+ for ( unsigned i=0; i<_plugins->size(); i++ )
+ (*_plugins)[i]->init();
+ }
+
+ void WebStatusPlugin::runAll( stringstream& ss ) {
+ if ( ! _plugins )
+ return;
+
+ for ( unsigned i=0; i<_plugins->size(); i++ ) {
+ WebStatusPlugin * p = (*_plugins)[i];
+ ss << "<hr>\n"
+ << "<b>" << p->_name << "</b>";
+
+ ss << " " << p->_subHeading;
+
+ ss << "<br>\n";
+
+ p->run(ss);
+ }
+
+ }
+
+ vector<WebStatusPlugin*> * WebStatusPlugin::_plugins = 0;
+
+ // -- basic statuc plugins --
+
+ class LogPlugin : public WebStatusPlugin {
+ public:
+ LogPlugin() : WebStatusPlugin( "Log" , 100 ), _log(0) {
+ }
+
+ virtual void init() {
+ _log = RamLog::get( "global" );
+ if ( ! _log ) {
+ _log = new RamLog("global");
+ Logstream::get().addGlobalTee( _log );
+ }
+ }
+
+ virtual void run( stringstream& ss ) {
+ _log->toHTML( ss );
+ }
+ RamLog * _log;
+ };
+
+ LogPlugin * logPlugin = new LogPlugin();
+
+ // -- handler framework ---
+
+ DbWebHandler::DbWebHandler( const string& name , double priority , bool requiresREST )
+ : Prioritizable(priority), _name(name) , _requiresREST(requiresREST) {
+
+ {
+ // setup strings
+ _defaultUrl = "/";
+ _defaultUrl += name;
+
+ stringstream ss;
+ ss << name << " priority: " << priority << " rest: " << requiresREST;
+ _toString = ss.str();
+ }
+
+ {
+ // add to handler list
+ if ( ! _handlers )
+ _handlers = new vector<DbWebHandler*>();
+ _handlers->push_back( this );
+ sort( _handlers->begin() , _handlers->end() , prisort );
+ }
+ }
+
+ DbWebHandler * DbWebHandler::findHandler( const string& url ) {
+ if ( ! _handlers )
+ return 0;
+
+ for ( unsigned i=0; i<_handlers->size(); i++ ) {
+ DbWebHandler * h = (*_handlers)[i];
+ if ( h->handles( url ) )
+ return h;
+ }
+
+ return 0;
+ }
+
+ vector<DbWebHandler*> * DbWebHandler::_handlers = 0;
+
+ // --- basic handlers ---
+
+ class FavIconHandler : public DbWebHandler {
+ public:
+ FavIconHandler() : DbWebHandler( "favicon.ico" , 0 , false ) {}
+
+ virtual void handle( const char *rq, string url, BSONObj params,
+ string& responseMsg, int& responseCode,
+ vector<string>& headers, const SockAddr &from ) {
+ responseCode = 404;
+ headers.push_back( "Content-Type: text/plain;charset=utf-8" );
+ responseMsg = "no favicon\n";
+ }
+
+ } faviconHandler;
+
+ class StatusHandler : public DbWebHandler {
+ public:
+ StatusHandler() : DbWebHandler( "_status" , 1 , false ) {}
+
+ virtual void handle( const char *rq, string url, BSONObj params,
+ string& responseMsg, int& responseCode,
+ vector<string>& headers, const SockAddr &from ) {
+ headers.push_back( "Content-Type: application/json;charset=utf-8" );
+ responseCode = 200;
+
+ static vector<string> commands;
+ if ( commands.size() == 0 ) {
+ commands.push_back( "serverStatus" );
+ commands.push_back( "buildinfo" );
+ }
+
+ BSONObjBuilder buf(1024);
+
+ for ( unsigned i=0; i<commands.size(); i++ ) {
+ string cmd = commands[i];
+
+ Command * c = Command::findCommand( cmd );
+ assert( c );
+ assert( c->locktype() == 0 );
+
+ BSONObj co;
+ {
+ BSONObjBuilder b;
+ b.append( cmd , 1 );
+
+ if ( cmd == "serverStatus" && params["repl"].type() ) {
+ b.append( "repl" , atoi( params["repl"].valuestr() ) );
+ }
+
+ co = b.obj();
+ }
+
+ string errmsg;
+
+ BSONObjBuilder sub;
+ if ( ! c->run( "admin.$cmd" , co , 0, errmsg , sub , false ) )
+ buf.append( cmd , errmsg );
+ else
+ buf.append( cmd , sub.obj() );
+ }
+
+ responseMsg = buf.obj().jsonString();
+
+ }
+
+ } statusHandler;
+
+ class CommandListHandler : public DbWebHandler {
+ public:
+ CommandListHandler() : DbWebHandler( "_commands" , 1 , true ) {}
+
+ virtual void handle( const char *rq, string url, BSONObj params,
+ string& responseMsg, int& responseCode,
+ vector<string>& headers, const SockAddr &from ) {
+ headers.push_back( "Content-Type: text/html;charset=utf-8" );
+ responseCode = 200;
+
+ stringstream ss;
+ ss << start("Commands List");
+ ss << p( a("/", "back", "Home") );
+ ss << p( "<b>MongoDB List of <a href=\"http://www.mongodb.org/display/DOCS/Commands\">Commands</a></b>\n" );
+ const map<string, Command*> *m = Command::commandsByBestName();
+ ss << "S:slave-ok R:read-lock W:write-lock A:admin-only<br>\n";
+ ss << table();
+ ss << "<tr><th>Command</th><th>Attributes</th><th>Help</th></tr>\n";
+ for( map<string, Command*>::const_iterator i = m->begin(); i != m->end(); i++ )
+ i->second->htmlHelp(ss);
+ ss << _table() << _end();
+
+ responseMsg = ss.str();
+ }
+ } commandListHandler;
+
+ class CommandsHandler : public DbWebHandler {
+ public:
+ CommandsHandler() : DbWebHandler( "DUMMY COMMANDS" , 2 , true ) {}
+
+ bool _cmd( const string& url , string& cmd , bool& text, bo params ) const {
+ cmd = str::after(url, '/');
+ text = params["text"].boolean();
+ return true;
+ }
+
+ Command * _cmd( const string& cmd ) const {
+ const map<string,Command*> *m = Command::webCommands();
+ if( ! m )
+ return 0;
+
+ map<string,Command*>::const_iterator i = m->find(cmd);
+ if ( i == m->end() )
+ return 0;
+
+ return i->second;
+ }
+
+ virtual bool handles( const string& url ) const {
+ string cmd;
+ bool text;
+ if ( ! _cmd( url , cmd , text, bo() ) )
+ return false;
+ return _cmd(cmd) != 0;
+ }
+
+ virtual void handle( const char *rq, string url, BSONObj params,
+ string& responseMsg, int& responseCode,
+ vector<string>& headers, const SockAddr &from ) {
+ string cmd;
+ bool text = false;
+ assert( _cmd( url , cmd , text, params ) );
+ Command * c = _cmd( cmd );
+ assert( c );
+
+ BSONObj cmdObj = BSON( cmd << 1 );
+ Client& client = cc();
+
+ BSONObjBuilder result;
+ execCommand(c, client, 0, "admin.", cmdObj , result, false);
+
+ responseCode = 200;
+
+ string j = result.done().jsonString(Strict, text );
+ responseMsg = j;
+
+ if( text ) {
+ headers.push_back( "Content-Type: text/plain;charset=utf-8" );
+ responseMsg += '\n';
+ }
+ else {
+ headers.push_back( "Content-Type: application/json;charset=utf-8" );
+ }
+
+ }
+
+ } commandsHandler;
+
+ // --- external ----
+
+ void webServerThread(const AdminAccess* adminAccess) {
+ boost::scoped_ptr<const AdminAccess> adminAccessPtr(adminAccess); // adminAccess is owned here
+ Client::initThread("websvr");
+ const int p = cmdLine.port + 1000;
+ DbWebServer mini(cmdLine.bind_ip, p, adminAccessPtr.get());
+ mini.initAndListen();
+ cc().shutdown();
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/dbwebserver.h b/src/mongo/db/dbwebserver.h
new file mode 100644
index 00000000000..bdbcba2c07d
--- /dev/null
+++ b/src/mongo/db/dbwebserver.h
@@ -0,0 +1,85 @@
+/** @file dbwebserver.h
+ */
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "../util/admin_access.h"
+
+namespace mongo {
+
+ class Prioritizable {
+ public:
+ Prioritizable( double p ) : _priority(p) {}
+ double priority() const { return _priority; }
+ private:
+ double _priority;
+ };
+
+ class DbWebHandler : public Prioritizable {
+ public:
+ DbWebHandler( const string& name , double priority , bool requiresREST );
+ virtual ~DbWebHandler() {}
+
+ virtual bool handles( const string& url ) const { return url == _defaultUrl; }
+
+ virtual bool requiresREST( const string& url ) const { return _requiresREST; }
+
+ virtual void handle( const char *rq, // the full request
+ string url,
+ BSONObj params,
+ // set these and return them:
+ string& responseMsg,
+ int& responseCode,
+ vector<string>& headers, // if completely empty, content-type: text/html will be added
+ const SockAddr &from
+ ) = 0;
+
+ string toString() const { return _toString; }
+ static DbWebHandler * findHandler( const string& url );
+
+ private:
+ string _name;
+ bool _requiresREST;
+
+ string _defaultUrl;
+ string _toString;
+
+ static vector<DbWebHandler*> * _handlers;
+ };
+
+ class WebStatusPlugin : public Prioritizable {
+ public:
+ WebStatusPlugin( const string& secionName , double priority , const string& subheader = "" );
+ virtual ~WebStatusPlugin() {}
+
+ virtual void run( stringstream& ss ) = 0;
+ /** called when web server stats up */
+ virtual void init() = 0;
+
+ static void initAll();
+ static void runAll( stringstream& ss );
+ private:
+ string _name;
+ string _subHeading;
+ static vector<WebStatusPlugin*> * _plugins;
+
+ };
+
+ void webServerThread( const AdminAccess* admins );
+ string prettyHostName();
+
+};
diff --git a/src/mongo/db/diskloc.h b/src/mongo/db/diskloc.h
new file mode 100644
index 00000000000..5295df3e260
--- /dev/null
+++ b/src/mongo/db/diskloc.h
@@ -0,0 +1,160 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* @file diskloc.h
+
+ Storage subsystem management.
+ Lays out our datafiles on disk, manages disk space.
+*/
+
+#pragma once
+
+#include "jsobj.h"
+
+namespace mongo {
+
+ class Record;
+ class DeletedRecord;
+ class Extent;
+ class MongoDataFile;
+ class DiskLoc;
+
+ template< class Version > class BtreeBucket;
+
+#pragma pack(1)
+ /** represents a disk location/offset on disk in a database. 64 bits.
+ it is assumed these will be passed around by value a lot so don't do anything to make them large
+ (such as adding a virtual function)
+ */
+ class DiskLoc {
+ int _a; // this will be volume, file #, etsc. but is a logical value could be anything depending on storage engine
+ int ofs;
+
+ public:
+
+ enum SentinelValues {
+ /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */
+ NullOfs = -1,
+ MaxFiles=16000 // thus a limit of about 32TB of data per db
+ };
+
+ DiskLoc(int a, int Ofs) : _a(a), ofs(Ofs) { }
+ DiskLoc() { Null(); }
+ DiskLoc(const DiskLoc& l) {
+ _a=l._a;
+ ofs=l.ofs;
+ }
+
+ bool questionable() const {
+ return ofs < -1 ||
+ _a < -1 ||
+ _a > 524288;
+ }
+
+ bool isNull() const { return _a == -1; }
+ void Null() {
+ _a = -1;
+ ofs = 0; /* note NullOfs is different. todo clean up. see refs to NullOfs in code - use is valid but outside DiskLoc context so confusing as-is. */
+ }
+ void assertOk() { assert(!isNull()); }
+ void setInvalid() {
+ _a = -2;
+ ofs = 0;
+ }
+ bool isValid() const { return _a != -2; }
+
+ string toString() const {
+ if ( isNull() )
+ return "null";
+ stringstream ss;
+ ss << hex << _a << ':' << ofs;
+ return ss.str();
+ }
+
+ BSONObj toBSONObj() const { return BSON( "file" << _a << "offset" << ofs ); }
+
+ int a() const { return _a; }
+
+ int& GETOFS() { return ofs; }
+ int getOfs() const { return ofs; }
+ void set(int a, int b) {
+ _a=a;
+ ofs=b;
+ }
+
+ void inc(int amt) {
+ assert( !isNull() );
+ ofs += amt;
+ }
+
+ bool sameFile(DiskLoc b) {
+ return _a== b._a;
+ }
+
+ bool operator==(const DiskLoc& b) const {
+ return _a==b._a&& ofs == b.ofs;
+ }
+ bool operator!=(const DiskLoc& b) const {
+ return !(*this==b);
+ }
+ const DiskLoc& operator=(const DiskLoc& b) {
+ _a=b._a;
+ ofs = b.ofs;
+ //assert(ofs!=0);
+ return *this;
+ }
+ int compare(const DiskLoc& b) const {
+ int x = _a - b._a;
+ if ( x )
+ return x;
+ return ofs - b.ofs;
+ }
+ bool operator<(const DiskLoc& b) const {
+ return compare(b) < 0;
+ }
+
+ /**
+ * Marks this disk loc for writing
+ * @returns a non const reference to this disk loc
+ * This function explicitly signals we are writing and casts away const
+ */
+ DiskLoc& writing() const; // see dur.h
+
+ /* Get the "thing" associated with this disk location.
+ it is assumed the object is what you say it is -- you must assure that
+ (think of this as an unchecked type cast)
+ Note: set your Context first so that the database to which the diskloc applies is known.
+ */
+ BSONObj obj() const;
+ Record* rec() const;
+ DeletedRecord* drec() const;
+ Extent* ext() const;
+
+ template< class V >
+ const BtreeBucket<V> * btree() const;
+
+ // Explicitly signals we are writing and casts away const
+ template< class V >
+ BtreeBucket<V> * btreemod() const;
+
+ /*MongoDataFile& pdf() const;*/
+ };
+#pragma pack()
+
+ const DiskLoc minDiskLoc(0, 1);
+ const DiskLoc maxDiskLoc(0x7fffffff, 0x7fffffff);
+
+} // namespace mongo
diff --git a/src/mongo/db/driverHelpers.cpp b/src/mongo/db/driverHelpers.cpp
new file mode 100644
index 00000000000..12aa01886c4
--- /dev/null
+++ b/src/mongo/db/driverHelpers.cpp
@@ -0,0 +1,62 @@
+// driverHelpers.cpp
+
+/**
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**
+ this file has dbcommands that are for drivers
+ mostly helpers
+*/
+
+
+#include "pch.h"
+#include "jsobj.h"
+#include "pdfile.h"
+#include "namespace-inl.h"
+#include "commands.h"
+#include "cmdline.h"
+#include "btree.h"
+#include "curop-inl.h"
+#include "../util/background.h"
+#include "../scripting/engine.h"
+
+namespace mongo {
+
+ class BasicDriverHelper : public Command {
+ public:
+ BasicDriverHelper( const char * name ) : Command( name ) {}
+
+ virtual LockType locktype() const { return NONE; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool slaveOverrideOk() { return true; }
+ };
+
+ class ObjectIdTest : public BasicDriverHelper {
+ public:
+ ObjectIdTest() : BasicDriverHelper( "driverOIDTest" ) {}
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if ( cmdObj.firstElement().type() != jstOID ) {
+ errmsg = "not oid";
+ return false;
+ }
+
+ const OID& oid = cmdObj.firstElement().__oid();
+ result.append( "oid" , oid );
+ result.append( "str" , oid.str() );
+
+ return true;
+ }
+ } driverObjectIdTest;
+}
diff --git a/src/mongo/db/dur.cpp b/src/mongo/db/dur.cpp
new file mode 100644
index 00000000000..822fa5232c0
--- /dev/null
+++ b/src/mongo/db/dur.cpp
@@ -0,0 +1,840 @@
+// @file dur.cpp durability in the storage engine (crash-safeness / journaling)
+
+/**
+* Copyright (C) 2009 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/*
+ phases:
+
+ PREPLOGBUFFER
+ we will build an output buffer ourself and then use O_DIRECT
+ we could be in read lock for this
+ for very large objects write directly to redo log in situ?
+ WRITETOJOURNAL
+ we could be unlocked (the main db lock that is...) for this, with sufficient care, but there is some complexity
+ have to handle falling behind which would use too much ram (going back into a read lock would suffice to stop that).
+ for now (1.7.5/1.8.0) we are in read lock which is not ideal.
+ WRITETODATAFILES
+ apply the writes back to the non-private MMF after they are for certain in redo log
+ REMAPPRIVATEVIEW
+ we could in a write lock quickly flip readers back to the main view, then stay in read lock and do our real
+ remapping. with many files (e.g., 1000), remapping could be time consuming (several ms), so we don't want
+ to be too frequent.
+ there could be a slow down immediately after remapping as fresh copy-on-writes for commonly written pages will
+ be required. so doing these remaps fractionally is helpful.
+
+ mutexes:
+
+ READLOCK dbMutex
+ LOCK groupCommitMutex
+ PREPLOGBUFFER()
+ READLOCK mmmutex
+ commitJob.reset()
+ UNLOCK dbMutex // now other threads can write
+ WRITETOJOURNAL()
+ WRITETODATAFILES()
+ UNLOCK mmmutex
+ UNLOCK groupCommitMutex
+
+ on the next write lock acquisition for dbMutex: // see MongoMutex::_acquiredWriteLock()
+ REMAPPRIVATEVIEW()
+
+ @see https://docs.google.com/drawings/edit?id=1TklsmZzm7ohIZkwgeK6rMvsdaR13KjtJYMsfLr175Zc
+*/
+
+#include "pch.h"
+#include "cmdline.h"
+#include "client.h"
+#include "dur.h"
+#include "dur_journal.h"
+#include "dur_commitjob.h"
+#include "dur_recover.h"
+#include "dur_stats.h"
+#include "../util/concurrency/race.h"
+#include "../util/mongoutils/hash.h"
+#include "../util/mongoutils/str.h"
+#include "../util/timer.h"
+
+using namespace mongoutils;
+
+namespace mongo {
+
+ namespace dur {
+
+ void PREPLOGBUFFER(JSectHeader& outParm);
+ void WRITETOJOURNAL(JSectHeader h, AlignedBuilder& uncompressed);
+ void WRITETODATAFILES(const JSectHeader& h, AlignedBuilder& uncompressed);
+
+ /** declared later in this file
+ only used in this file -- use DurableInterface::commitNow() outside
+ */
+ static void groupCommit();
+
+ CommitJob& commitJob = *(new CommitJob()); // don't destroy
+
+ Stats stats;
+
+ void Stats::S::reset() {
+ memset(this, 0, sizeof(*this));
+ }
+
+ Stats::Stats() {
+ _a.reset();
+ _b.reset();
+ curr = &_a;
+ _intervalMicros = 3000000;
+ }
+
+ Stats::S * Stats::other() {
+ return curr == &_a ? &_b : &_a;
+ }
+ string _CSVHeader();
+
+ string Stats::S::_CSVHeader() {
+ return "cmts jrnMB\twrDFMB\tcIWLk\tearly\tprpLgB wrToJ\twrToDF\trmpPrVw";
+ }
+
+ string Stats::S::_asCSV() {
+ stringstream ss;
+ ss <<
+ setprecision(2) <<
+ _commits << '\t' << fixed <<
+ _journaledBytes / 1000000.0 << '\t' <<
+ _writeToDataFilesBytes / 1000000.0 << '\t' <<
+ _commitsInWriteLock << '\t' <<
+ _earlyCommits << '\t' <<
+ (unsigned) (_prepLogBufferMicros/1000) << '\t' <<
+ (unsigned) (_writeToJournalMicros/1000) << '\t' <<
+ (unsigned) (_writeToDataFilesMicros/1000) << '\t' <<
+ (unsigned) (_remapPrivateViewMicros/1000);
+ return ss.str();
+ }
+
+ //int getAgeOutJournalFiles();
+ BSONObj Stats::S::_asObj() {
+ BSONObjBuilder b;
+ b <<
+ "commits" << _commits <<
+ "journaledMB" << _journaledBytes / 1000000.0 <<
+ "writeToDataFilesMB" << _writeToDataFilesBytes / 1000000.0 <<
+ "compression" << _journaledBytes / (_uncompressedBytes+1.0) <<
+ "commitsInWriteLock" << _commitsInWriteLock <<
+ "earlyCommits" << _earlyCommits <<
+ "timeMs" <<
+ BSON( "dt" << _dtMillis <<
+ "prepLogBuffer" << (unsigned) (_prepLogBufferMicros/1000) <<
+ "writeToJournal" << (unsigned) (_writeToJournalMicros/1000) <<
+ "writeToDataFiles" << (unsigned) (_writeToDataFilesMicros/1000) <<
+ "remapPrivateView" << (unsigned) (_remapPrivateViewMicros/1000)
+ );
+ /*int r = getAgeOutJournalFiles();
+ if( r == -1 )
+ b << "ageOutJournalFiles" << "mutex timeout";
+ if( r == 0 )
+ b << "ageOutJournalFiles" << false;*/
+ if( cmdLine.journalCommitInterval != 0 )
+ b << "journalCommitIntervalMs" << cmdLine.journalCommitInterval;
+ return b.obj();
+ }
+
+ BSONObj Stats::asObj() {
+ return other()->_asObj();
+ }
+
+ void Stats::rotate() {
+ unsigned long long now = curTimeMicros64();
+ unsigned long long dt = now - _lastRotate;
+ if( dt >= _intervalMicros && _intervalMicros ) {
+ // rotate
+ curr->_dtMillis = (unsigned) (dt/1000);
+ _lastRotate = now;
+ curr = other();
+ curr->reset();
+ }
+ }
+
+ void NonDurableImpl::setNoJournal(void *dst, void *src, unsigned len) {
+ memcpy(dst, src, len);
+ }
+
+ void DurableImpl::setNoJournal(void *dst, void *src, unsigned len) {
+ // we are at least read locked, so we need not worry about REMAPPRIVATEVIEW herein.
+ DEV d.dbMutex.assertAtLeastReadLocked();
+
+ MemoryMappedFile::makeWritable(dst, len);
+
+ // we enter the RecoveryJob mutex here, so that if WRITETODATAFILES is happening we do not
+ // conflict with it
+ scoped_lock lk1( RecoveryJob::get()._mx );
+
+ // we stay in this mutex for everything to work with DurParanoid/validateSingleMapMatches
+ //
+ // either of these mutexes also makes setNoJournal threadsafe, which is good as we call it from a read
+ // (not a write) lock in class SlaveTracking
+ //
+ scoped_lock lk( privateViews._mutex() );
+
+ size_t ofs;
+ MongoMMF *f = privateViews.find_inlock(dst, ofs);
+ assert(f);
+ void *w = (((char *)f->view_write())+ofs);
+ // first write it to the writable (file) view
+ memcpy(w, src, len);
+ if( memcmp(w, dst, len) ) {
+ // if we get here, a copy-on-write had previously occurred. so write it to the private view too
+ // to keep them in sync. we do this as we do not want to cause a copy on write unnecessarily.
+ memcpy(dst, src, len);
+ }
+ }
+
+ /** base declare write intent function that all the helpers call. */
+ void DurableImpl::declareWriteIntent(void *p, unsigned len) {
+ commitJob.note(p, len);
+ }
+
+ static DurableImpl* durableImpl = new DurableImpl();
+ static NonDurableImpl* nonDurableImpl = new NonDurableImpl();
+ DurableInterface* DurableInterface::_impl = nonDurableImpl;
+
+ void DurableInterface::enableDurability() {
+ assert(_impl == nonDurableImpl);
+ _impl = durableImpl;
+ }
+
+ void DurableInterface::disableDurability() {
+ assert(_impl == durableImpl);
+ massert(13616, "can't disable durability with pending writes", !commitJob.hasWritten());
+ _impl = nonDurableImpl;
+ }
+
+ bool DurableImpl::commitNow() {
+ stats.curr->_earlyCommits++;
+ groupCommit();
+ return true;
+ }
+
+ bool DurableImpl::awaitCommit() {
+ commitJob._notify.awaitBeyondNow();
+ return true;
+ }
+
+ /** Declare that a file has been created
+ Normally writes are applied only after journaling, for safety. But here the file
+ is created first, and the journal will just replay the creation if the create didn't
+ happen because of crashing.
+ */
+ void DurableImpl::createdFile(string filename, unsigned long long len) {
+ shared_ptr<DurOp> op( new FileCreatedOp(filename, len) );
+ commitJob.noteOp(op);
+ }
+
+ void* DurableImpl::writingPtr(void *x, unsigned len) {
+ void *p = x;
+ declareWriteIntent(p, len);
+ return p;
+ }
+
+ /** declare intent to write
+ @param ofs offset within buf at which we will write
+ @param len the length at ofs we will write
+ @return new buffer pointer.
+ */
+ void* DurableImpl::writingAtOffset(void *buf, unsigned ofs, unsigned len) {
+ char *p = (char *) buf;
+ declareWriteIntent(p+ofs, len);
+ return p;
+ }
+
+ void* DurableImpl::writingRangesAtOffsets(void *buf, const vector< pair< long long, unsigned > > &ranges ) {
+ char *p = (char *) buf;
+ for( vector< pair< long long, unsigned > >::const_iterator i = ranges.begin();
+ i != ranges.end(); ++i ) {
+ declareWriteIntent( p + i->first, i->second );
+ }
+ return p;
+ }
+
+ bool DurableImpl::aCommitIsNeeded() const {
+ DEV commitJob._nSinceCommitIfNeededCall = 0;
+ return commitJob.bytes() > UncommittedBytesLimit;
+ }
+
+ bool DurableImpl::commitIfNeeded() {
+ if ( !d.dbMutex.isWriteLocked() )
+ return false;
+
+ DEV commitJob._nSinceCommitIfNeededCall = 0;
+ if (commitJob.bytes() > UncommittedBytesLimit) { // should this also fire if CmdLine::DurAlwaysCommit?
+ stats.curr->_earlyCommits++;
+ groupCommit();
+ return true;
+ }
+ return false;
+ }
+
+ /** Used in _DEBUG builds to check that we didn't overwrite the last intent
+ that was declared. called just before writelock release. we check a few
+ bytes after the declared region to see if they changed.
+
+ @see MongoMutex::_releasedWriteLock
+
+ SLOW
+ */
+#if 0
+ void DurableImpl::debugCheckLastDeclaredWrite() {
+ static int n;
+ ++n;
+
+ assert(debug && cmdLine.dur);
+ if (commitJob.writes().empty())
+ return;
+ const WriteIntent &i = commitJob.lastWrite();
+ size_t ofs;
+ MongoMMF *mmf = privateViews.find(i.start(), ofs);
+ if( mmf == 0 )
+ return;
+ size_t past = ofs + i.length();
+ if( mmf->length() < past + 8 )
+ return; // too close to end of view
+ char *priv = (char *) mmf->getView();
+ char *writ = (char *) mmf->view_write();
+ unsigned long long *a = (unsigned long long *) (priv+past);
+ unsigned long long *b = (unsigned long long *) (writ+past);
+ if( *a != *b ) {
+ for( set<WriteIntent>::iterator it(commitJob.writes().begin()), end((commitJob.writes().begin())); it != end; ++it ) {
+ const WriteIntent& wi = *it;
+ char *r1 = (char*) wi.start();
+ char *r2 = (char*) wi.end();
+ if( r1 <= (((char*)a)+8) && r2 > (char*)a ) {
+ //log() << "it's ok " << wi.p << ' ' << wi.len << endl;
+ return;
+ }
+ }
+ log() << "journal data after write area " << i.start() << " does not agree" << endl;
+ log() << " was: " << ((void*)b) << " " << hexdump((char*)b, 8) << endl;
+ log() << " now: " << ((void*)a) << " " << hexdump((char*)a, 8) << endl;
+ log() << " n: " << n << endl;
+ log() << endl;
+ }
+ }
+#endif
+
+ // Functor to be called over all MongoFiles
+
+ class validateSingleMapMatches {
+ public:
+ validateSingleMapMatches(unsigned long long& bytes) :_bytes(bytes) {}
+ void operator () (MongoFile *mf) {
+ if( mf->isMongoMMF() ) {
+ MongoMMF *mmf = (MongoMMF*) mf;
+ const unsigned char *p = (const unsigned char *) mmf->getView();
+ const unsigned char *w = (const unsigned char *) mmf->view_write();
+
+ if (!p || !w) return; // File not fully opened yet
+
+ _bytes += mmf->length();
+
+ assert( mmf->length() == (unsigned) mmf->length() );
+ {
+ scoped_lock lk( privateViews._mutex() ); // see setNoJournal
+ if (memcmp(p, w, (unsigned) mmf->length()) == 0)
+ return; // next file
+ }
+
+ unsigned low = 0xffffffff;
+ unsigned high = 0;
+ log() << "DurParanoid mismatch in " << mmf->filename() << endl;
+ int logged = 0;
+ unsigned lastMismatch = 0xffffffff;
+ for( unsigned i = 0; i < mmf->length(); i++ ) {
+ if( p[i] != w[i] ) {
+ if( lastMismatch != 0xffffffff && lastMismatch+1 != i )
+ log() << endl; // separate blocks of mismatches
+ lastMismatch= i;
+ if( ++logged < 60 ) {
+ if( logged == 1 )
+ log() << "ofs % 628 = 0x" << hex << (i%628) << endl; // for .ns files to find offset in record
+ stringstream ss;
+ ss << "mismatch ofs:" << hex << i << "\tfilemap:" << setw(2) << (unsigned) w[i] << "\tprivmap:" << setw(2) << (unsigned) p[i];
+ if( p[i] > 32 && p[i] <= 126 )
+ ss << '\t' << p[i];
+ log() << ss.str() << endl;
+ }
+ if( logged == 60 )
+ log() << "..." << endl;
+ if( i < low ) low = i;
+ if( i > high ) high = i;
+ }
+ }
+ if( low != 0xffffffff ) {
+ std::stringstream ss;
+ ss << "journal error warning views mismatch " << mmf->filename() << ' ' << (hex) << low << ".." << high << " len:" << high-low+1;
+ log() << ss.str() << endl;
+ log() << "priv loc: " << (void*)(p+low) << ' ' << endl;
+ set<WriteIntent>& b = commitJob.writes();
+ (void)b; // mark as unused. Useful for inspection in debugger
+
+ // should we abort() here so this isn't unnoticed in some circumstances?
+ massert(13599, "Written data does not match in-memory view. Missing WriteIntent?", false);
+ }
+ }
+ }
+ private:
+ unsigned long long& _bytes;
+ };
+
+ /** (SLOW) diagnostic to check that the private view and the non-private view are in sync.
+ */
+ void debugValidateAllMapsMatch() {
+ if( ! (cmdLine.durOptions & CmdLine::DurParanoid) )
+ return;
+
+ unsigned long long bytes = 0;
+ Timer t;
+ MongoFile::forEach(validateSingleMapMatches(bytes));
+ OCCASIONALLY log() << "DurParanoid map check " << t.millis() << "ms for " << (bytes / (1024*1024)) << "MB" << endl;
+ }
+
+ extern size_t privateMapBytes;
+
+ static void _REMAPPRIVATEVIEW() {
+ // todo: Consider using ProcessInfo herein and watching for getResidentSize to drop. that could be a way
+ // to assure very good behavior here.
+
+ static unsigned startAt;
+ static unsigned long long lastRemap;
+
+ LOG(4) << "journal REMAPPRIVATEVIEW" << endl;
+
+ d.dbMutex.assertWriteLocked();
+ d.dbMutex._remapPrivateViewRequested = false;
+ assert( !commitJob.hasWritten() );
+
+ // we want to remap all private views about every 2 seconds. there could be ~1000 views so
+ // we do a little each pass; beyond the remap time, more significantly, there will be copy on write
+ // faults after remapping, so doing a little bit at a time will avoid big load spikes on
+ // remapping.
+ unsigned long long now = curTimeMicros64();
+ double fraction = (now-lastRemap)/2000000.0;
+ if( cmdLine.durOptions & CmdLine::DurAlwaysRemap )
+ fraction = 1;
+ lastRemap = now;
+
+ LockMongoFilesShared lk;
+ set<MongoFile*>& files = MongoFile::getAllFiles();
+ unsigned sz = files.size();
+ if( sz == 0 )
+ return;
+
+ {
+ // be careful not to use too much memory if the write rate is
+ // extremely high
+ double f = privateMapBytes / ((double)UncommittedBytesLimit);
+ if( f > fraction ) {
+ fraction = f;
+ }
+ privateMapBytes = 0;
+ }
+
+ unsigned ntodo = (unsigned) (sz * fraction);
+ if( ntodo < 1 ) ntodo = 1;
+ if( ntodo > sz ) ntodo = sz;
+
+ const set<MongoFile*>::iterator b = files.begin();
+ const set<MongoFile*>::iterator e = files.end();
+ set<MongoFile*>::iterator i = b;
+ // skip to our starting position
+ for( unsigned x = 0; x < startAt; x++ ) {
+ i++;
+ if( i == e ) i = b;
+ }
+ unsigned startedAt = startAt;
+ startAt = (startAt + ntodo) % sz; // mark where to start next time
+
+ Timer t;
+ for( unsigned x = 0; x < ntodo; x++ ) {
+ dassert( i != e );
+ if( (*i)->isMongoMMF() ) {
+ MongoMMF *mmf = (MongoMMF*) *i;
+ assert(mmf);
+ if( mmf->willNeedRemap() ) {
+ mmf->willNeedRemap() = false;
+ mmf->remapThePrivateView();
+ }
+ i++;
+ if( i == e ) i = b;
+ }
+ }
+ LOG(2) << "journal REMAPPRIVATEVIEW done startedAt: " << startedAt << " n:" << ntodo << ' ' << t.millis() << "ms" << endl;
+ }
+
+ /** We need to remap the private views periodically. otherwise they would become very large.
+ Call within write lock. See top of file for more commentary.
+ */
+ void REMAPPRIVATEVIEW() {
+ Timer t;
+ _REMAPPRIVATEVIEW();
+ stats.curr->_remapPrivateViewMicros += t.micros();
+ }
+
+ // lock order: dbMutex first, then this
+ mutex groupCommitMutex("groupCommit");
+
+ bool _groupCommitWithLimitedLocks() {
+
+ int p = 0;
+ LOG(4) << "groupcommitll " << p++ << endl;
+
+ scoped_ptr<ExcludeAllWrites> lk1( new ExcludeAllWrites() );
+
+ LOG(4) << "groupcommitll " << p++ << endl;
+
+ scoped_lock lk2(groupCommitMutex);
+
+ LOG(4) << "groupcommitll " << p++ << endl;
+
+ commitJob.beginCommit();
+
+ if( !commitJob.hasWritten() ) {
+ // getlasterror request could have came after the data was already committed
+ commitJob.notifyCommitted();
+ return true;
+ }
+
+ LOG(4) << "groupcommitll " << p++ << endl;
+
+ JSectHeader h;
+ PREPLOGBUFFER(h); // need to be in readlock (writes excluded) for this
+
+ LOG(4) << "groupcommitll " << p++ << endl;
+
+ LockMongoFilesShared lk3;
+
+ LOG(4) << "groupcommitll " << p++ << endl;
+
+ unsigned abLen = commitJob._ab.len();
+ commitJob.reset(); // must be reset before allowing anyone to write
+ DEV assert( !commitJob.hasWritten() );
+
+ LOG(4) << "groupcommitll " << p++ << endl;
+
+ // release the readlock -- allowing others to now write while we are writing to the journal (etc.)
+ lk1.reset();
+
+ LOG(4) << "groupcommitll " << p++ << endl;
+
+ // ****** now other threads can do writes ******
+
+ WRITETOJOURNAL(h, commitJob._ab);
+ assert( abLen == commitJob._ab.len() ); // a check that no one touched the builder while we were doing work. if so, our locking is wrong.
+
+ LOG(4) << "groupcommitll " << p++ << endl;
+
+ // data is now in the journal, which is sufficient for acknowledging getLastError.
+ // (ok to crash after that)
+ commitJob.notifyCommitted();
+
+ LOG(4) << "groupcommitll " << p++ << " WRITETODATAFILES()" << endl;
+
+ WRITETODATAFILES(h, commitJob._ab);
+ assert( abLen == commitJob._ab.len() ); // check again wasn't modded
+ commitJob._ab.reset();
+
+ LOG(4) << "groupcommitll " << p++ << endl;
+
+ // can't : d.dbMutex._remapPrivateViewRequested = true;
+
+ return true;
+ }
+
+ /** @return true if committed; false if lock acquisition timed out (we only try for a read lock herein and only wait for a certain duration). */
+ bool groupCommitWithLimitedLocks() {
+ try {
+ return _groupCommitWithLimitedLocks();
+ }
+ catch(DBException& e ) {
+ log() << "dbexception in groupCommitLL causing immediate shutdown: " << e.toString() << endl;
+ mongoAbort("dur1");
+ }
+ catch(std::ios_base::failure& e) {
+ log() << "ios_base exception in groupCommitLL causing immediate shutdown: " << e.what() << endl;
+ mongoAbort("dur2");
+ }
+ catch(std::bad_alloc& e) {
+ log() << "bad_alloc exception in groupCommitLL causing immediate shutdown: " << e.what() << endl;
+ mongoAbort("dur3");
+ }
+ catch(std::exception& e) {
+ log() << "exception in dur::groupCommitLL causing immediate shutdown: " << e.what() << endl;
+ mongoAbort("dur4");
+ }
+ return false;
+ }
+
+ static void _groupCommit() {
+
+ LOG(4) << "_groupCommit " << endl;
+
+ // we need to be at least read locked on the dbMutex so that we know the write intent data
+ // structures are not changing while we work
+ d.dbMutex.assertAtLeastReadLocked();
+
+ commitJob.beginCommit();
+
+ if( !commitJob.hasWritten() ) {
+ // getlasterror request could have came after the data was already committed
+ commitJob.notifyCommitted();
+ return;
+ }
+
+ // we need to make sure two group commits aren't running at the same time
+ // (and we are only read locked in the dbMutex, so it could happen)
+ scoped_lock lk(groupCommitMutex);
+
+ JSectHeader h;
+ PREPLOGBUFFER(h);
+
+ // todo : write to the journal outside locks, as this write can be slow.
+ // however, be careful then about remapprivateview as that cannot be done
+ // if new writes are then pending in the private maps.
+ WRITETOJOURNAL(h, commitJob._ab);
+
+ // data is now in the journal, which is sufficient for acknowledging getLastError.
+ // (ok to crash after that)
+ commitJob.notifyCommitted();
+
+ WRITETODATAFILES(h, commitJob._ab);
+ debugValidateAllMapsMatch();
+
+ commitJob.reset();
+ commitJob._ab.reset();
+
+ // REMAPPRIVATEVIEW
+ //
+ // remapping private views must occur after WRITETODATAFILES otherwise
+ // we wouldn't see newly written data on reads.
+ //
+ DEV assert( !commitJob.hasWritten() );
+ if( !d.dbMutex.isWriteLocked() ) {
+ // this needs done in a write lock (as there is a short window during remapping when each view
+ // might not exist) thus we do it on the next acquisition of that instead of here (there is no
+ // rush if you aren't writing anyway -- but it must happen, if it is done, before any uncommitted
+ // writes occur). If desired, perhaps this can be eliminated on posix as it may be that the remap
+ // is race-free there.
+ //
+ d.dbMutex._remapPrivateViewRequested = true;
+ }
+ else {
+ stats.curr->_commitsInWriteLock++;
+ // however, if we are already write locked, we must do it now -- up the call tree someone
+ // may do a write without a new lock acquisition. this can happen when MongoMMF::close() calls
+ // this method when a file (and its views) is about to go away.
+ //
+ REMAPPRIVATEVIEW();
+ }
+ }
+
+ /** locking: in read lock when called
+ or, for early commits (commitIfNeeded), in write lock
+ @see MongoMMF::close()
+ */
+ static void groupCommit() {
+ try {
+ _groupCommit();
+ }
+ catch(DBException& e ) {
+ log() << "dbexception in groupCommit causing immediate shutdown: " << e.toString() << endl;
+ mongoAbort("gc1");
+ }
+ catch(std::ios_base::failure& e) {
+ log() << "ios_base exception in groupCommit causing immediate shutdown: " << e.what() << endl;
+ mongoAbort("gc2");
+ }
+ catch(std::bad_alloc& e) {
+ log() << "bad_alloc exception in groupCommit causing immediate shutdown: " << e.what() << endl;
+ mongoAbort("gc3");
+ }
+ catch(std::exception& e) {
+ log() << "exception in dur::groupCommit causing immediate shutdown: " << e.what() << endl;
+ mongoAbort("gc4");
+ }
+ LOG(4) << "groupCommit end" << endl;
+ }
+
+ static void go() {
+ const int N = 10;
+ static int n;
+ if( privateMapBytes < UncommittedBytesLimit && ++n % N && (cmdLine.durOptions&CmdLine::DurAlwaysRemap)==0 ) {
+ // limited locks version doesn't do any remapprivateview at all, so only try this if privateMapBytes
+ // is in an acceptable range. also every Nth commit, we do everything so we can do some remapping;
+ // remapping a lot all at once could cause jitter from a large amount of copy-on-writes all at once.
+ if( groupCommitWithLimitedLocks() )
+ return;
+ }
+ else {
+ readlocktry lk("", 1000);
+ if( lk.got() ) {
+ groupCommit();
+ return;
+ }
+ }
+
+ // starvation on read locks could occur. so if read lock acquisition is slow, try to get a
+ // write lock instead. otherwise journaling could be delayed too long (too much data will
+ // not accumulate though, as commitIfNeeded logic will have executed in the meantime if there
+ // has been writes)
+ writelock lk;
+ groupCommit();
+ }
+
+ /** called when a MongoMMF is closing -- we need to go ahead and group commit in that case before its
+ views disappear
+ */
+ void closingFileNotification() {
+ if (!cmdLine.dur)
+ return;
+
+ if( d.dbMutex.atLeastReadLocked() ) {
+ groupCommit();
+ }
+ else {
+ assert( inShutdown() );
+ if( commitJob.hasWritten() ) {
+ log() << "journal warning files are closing outside locks with writes pending" << endl;
+ }
+ }
+ }
+
+ extern int groupCommitIntervalMs;
+ boost::filesystem::path getJournalDir();
+
+ void durThread() {
+ Client::initThread("journal");
+
+ bool samePartition = true;
+ try {
+ const string dbpathDir = boost::filesystem::path(dbpath).native_directory_string();
+ samePartition = onSamePartition(getJournalDir().string(), dbpathDir);
+ }
+ catch(...) {
+ }
+
+ while( !inShutdown() ) {
+ RACECHECK
+
+ unsigned ms = cmdLine.journalCommitInterval;
+ if( ms == 0 ) {
+ // use default
+ ms = samePartition ? 100 : 30;
+ }
+
+ unsigned oneThird = (ms / 3) + 1; // +1 so never zero
+
+ try {
+ stats.rotate();
+
+ // we do this in a couple blocks (the invoke()), which makes it a tiny bit faster (only a little) on throughput,
+ // but is likely also less spiky on our cpu usage, which is good.
+
+ // commit sooner if one or more getLastError j:true is pending
+ sleepmillis(oneThird);
+ for( unsigned i = 1; i <= 2; i++ ) {
+ if( commitJob._notify.nWaiting() )
+ break;
+ commitJob.wi()._deferred.invoke();
+ sleepmillis(oneThird);
+ }
+
+ go();
+ }
+ catch(std::exception& e) {
+ log() << "exception in durThread causing immediate shutdown: " << e.what() << endl;
+ mongoAbort("exception in durThread");
+ }
+ }
+ cc().shutdown();
+ }
+
+ void recover();
+
+ unsigned notesThisLock = 0;
+
+ void releasingWriteLock() {
+ DEV notesThisLock = 0;
+ // implicit commitIfNeeded check on each write unlock
+ DEV commitJob._nSinceCommitIfNeededCall = 0; // implicit commit if needed
+ if( commitJob.bytes() > UncommittedBytesLimit || cmdLine.durOptions & CmdLine::DurAlwaysCommit ) {
+ stats.curr->_earlyCommits++;
+ groupCommit();
+ }
+ }
+
+ void preallocateFiles();
+
+ /** at startup, recover, and then start the journal threads */
+ void startup() {
+ if( !cmdLine.dur )
+ return;
+
+#if defined(_DURABLEDEFAULTON)
+ DEV {
+ if( time(0) & 1 ) {
+ cmdLine.durOptions |= CmdLine::DurAlwaysCommit;
+ log() << "_DEBUG _DURABLEDEFAULTON : forcing DurAlwaysCommit mode for this run" << endl;
+ }
+ if( time(0) & 2 ) {
+ cmdLine.durOptions |= CmdLine::DurAlwaysRemap;
+ log() << "_DEBUG _DURABLEDEFAULTON : forcing DurAlwaysRemap mode for this run" << endl;
+ }
+ }
+#endif
+
+ DurableInterface::enableDurability();
+
+ journalMakeDir();
+ try {
+ recover();
+ }
+ catch(...) {
+ log() << "exception during recovery" << endl;
+ throw;
+ }
+
+ preallocateFiles();
+
+ boost::thread t(durThread);
+ }
+
+ void DurableImpl::syncDataAndTruncateJournal() {
+ d.dbMutex.assertWriteLocked();
+
+ // a commit from the commit thread won't begin while we are in the write lock,
+ // but it may already be in progress and the end of that work is done outside
+ // (dbMutex) locks. This line waits for that to complete if already underway.
+ {
+ scoped_lock lk(groupCommitMutex);
+ }
+
+ groupCommit();
+ MongoFile::flushAll(true);
+ journalCleanup();
+
+ assert(!haveJournalFiles()); // Double check post-conditions
+ }
+
+ } // namespace dur
+
+} // namespace mongo
diff --git a/src/mongo/db/dur.h b/src/mongo/db/dur.h
new file mode 100644
index 00000000000..f06ff500195
--- /dev/null
+++ b/src/mongo/db/dur.h
@@ -0,0 +1,209 @@
+// @file dur.h durability support
+
+#pragma once
+
+#include "diskloc.h"
+#include "mongommf.h"
+
+namespace mongo {
+
+ class NamespaceDetails;
+
+ void mongoAbort(const char *msg);
+ void abort(); // not defined -- use mongoAbort() instead
+
+ namespace dur {
+
+ // a smaller limit is likely better on 32 bit
+#if defined(__i386__) || defined(_M_IX86)
+ const unsigned UncommittedBytesLimit = 50 * 1024 * 1024;
+#else
+ const unsigned UncommittedBytesLimit = 100 * 1024 * 1024;
+#endif
+
+ /** Call during startup so durability module can initialize
+ Throws if fatal error
+ Does nothing if cmdLine.dur is false
+ */
+ void startup();
+
+ class DurableInterface : boost::noncopyable {
+ public:
+ virtual ~DurableInterface() { log() << "ERROR warning ~DurableInterface not intended to be called" << endl; }
+
+ /** Declare that a file has been created
+ Normally writes are applied only after journaling, for safety. But here the file
+ is created first, and the journal will just replay the creation if the create didn't
+ happen because of crashing.
+ */
+ virtual void createdFile(string filename, unsigned long long len) = 0;
+
+ /** Declarations of write intent.
+
+ Use these methods to declare "i'm about to write to x and it should be logged for redo."
+
+ Failure to call writing...() is checked in _DEBUG mode by using a read only mapped view
+ (i.e., you'll segfault if the code is covered in that situation). The _DEBUG check doesn't
+ verify that your length is correct though.
+ */
+
+ /** declare intent to write to x for up to len
+ @return pointer where to write. this is modified when testIntent is true.
+ */
+ virtual void* writingPtr(void *x, unsigned len) = 0;
+
+ /** declare write intent; should already be in the write view to work correctly when testIntent is true.
+ if you aren't, use writingPtr() instead.
+ */
+ virtual void declareWriteIntent(void *x, unsigned len) = 0;
+
+ /** declare intent to write
+ @param ofs offset within buf at which we will write
+ @param len the length at ofs we will write
+ @return new buffer pointer. this is modified when testIntent is true.
+ */
+ virtual void* writingAtOffset(void *buf, unsigned ofs, unsigned len) = 0;
+
+ /** declare intent to write
+ @param ranges vector of pairs representing ranges. Each pair
+ comprises an offset from buf where a range begins, then the
+ range length.
+ @return new buffer pointer. this is modified when testIntent is true.
+ */
+ virtual void* writingRangesAtOffsets(void *buf, const vector< pair< long long, unsigned > > &ranges ) = 0;
+
+ /** Wait for acknowledgement of the next group commit.
+ @return true if --dur is on. There will be delay.
+ @return false if --dur is off.
+ */
+ virtual bool awaitCommit() = 0;
+
+ /** Commit immediately.
+
+ Generally, you do not want to do this often, as highly granular committing may affect
+ performance.
+
+ Does not return until the commit is complete.
+
+ You must be at least read locked when you call this. Ideally, you are not write locked
+ and then read operations can occur concurrently.
+
+ @return true if --dur is on.
+ @return false if --dur is off. (in which case there is action)
+ */
+ virtual bool commitNow() = 0;
+
+ /** Commit if enough bytes have been modified. Current threshold is 50MB
+
+ The idea is that long running write operations that dont yield
+ (like creating an index or update with $atomic) can call this
+ whenever the db is in a sane state and it will prevent commits
+ from growing too large.
+ @return true if commited
+ */
+ virtual bool commitIfNeeded() = 0;
+
+ /** @return true if time to commit but does NOT do a commit */
+ virtual bool aCommitIsNeeded() const = 0;
+
+ /** Declare write intent for a DiskLoc. @see DiskLoc::writing() */
+ inline DiskLoc& writingDiskLoc(DiskLoc& d) { return *((DiskLoc*) writingPtr(&d, sizeof(d))); }
+
+ /** Declare write intent for an int */
+ inline int& writingInt(const int& d) { return *((int*) writingPtr((int*) &d, sizeof(d))); }
+
+ /** "assume i've already indicated write intent, let me write"
+ redeclaration is fine too, but this is faster.
+ */
+ template <typename T>
+ inline
+ T* alreadyDeclared(T *x) {
+#if defined(_TESTINTENT)
+ return (T*) MongoMMF::switchToPrivateView(x);
+#else
+ return x;
+#endif
+ }
+
+ /** declare intent to write to x for sizeof(*x) */
+ template <typename T>
+ inline
+ T* writing(T *x) {
+ return (T*) writingPtr(x, sizeof(T));
+ }
+
+ /** write something that doesn't have to be journaled, as this write is "unimportant".
+ a good example is paddingFactor.
+ can be thought of as memcpy(dst,src,len)
+ the dur implementation acquires a mutex in this method, so do not assume it is faster
+ without measuring!
+ */
+ virtual void setNoJournal(void *dst, void *src, unsigned len) = 0;
+
+ /** Commits pending changes, flushes all changes to main data
+ files, then removes the journal.
+
+ This is useful as a "barrier" to ensure that writes before this
+ call will never go through recovery and be applied to files
+ that have had changes made after this call applied.
+ */
+ virtual void syncDataAndTruncateJournal() = 0;
+
+ static DurableInterface& getDur() { return *_impl; }
+
+ private:
+ /** Intentionally unimplemented method.
+ It's very easy to manipulate Record::data open ended. Thus a call to writing(Record*) is suspect.
+ This will override the templated version and yield an unresolved external.
+ */
+ Record* writing(Record* r);
+ /** Intentionally unimplemented method. BtreeBuckets are allocated in buffers larger than sizeof( BtreeBucket ). */
+// BtreeBucket* writing( BtreeBucket* );
+ /** Intentionally unimplemented method. NamespaceDetails may be based on references to 'Extra' objects. */
+ NamespaceDetails* writing( NamespaceDetails* );
+
+ static DurableInterface* _impl; // NonDurableImpl at startup()
+ static void enableDurability(); // makes _impl a DurableImpl
+ static void disableDurability(); // makes _impl a NonDurableImpl
+
+ // these need to be able to enable/disable Durability
+ friend void startup();
+ friend class TempDisableDurability;
+ }; // class DurableInterface
+
+ class NonDurableImpl : public DurableInterface {
+ void* writingPtr(void *x, unsigned len) { return x; }
+ void* writingAtOffset(void *buf, unsigned ofs, unsigned len) { return buf; }
+ void* writingRangesAtOffsets(void *buf, const vector< pair< long long, unsigned > > &ranges) { return buf; }
+ void declareWriteIntent(void *, unsigned) { }
+ void createdFile(string filename, unsigned long long len) { }
+ bool awaitCommit() { return false; }
+ bool commitNow() { return false; }
+ bool commitIfNeeded() { return false; }
+ bool aCommitIsNeeded() const { return false; }
+ void setNoJournal(void *dst, void *src, unsigned len);
+ void syncDataAndTruncateJournal() {}
+ };
+
+ class DurableImpl : public DurableInterface {
+ void* writingPtr(void *x, unsigned len);
+ void* writingAtOffset(void *buf, unsigned ofs, unsigned len);
+ void* writingRangesAtOffsets(void *buf, const vector< pair< long long, unsigned > > &ranges);
+ void declareWriteIntent(void *, unsigned);
+ void createdFile(string filename, unsigned long long len);
+ bool awaitCommit();
+ bool commitNow();
+ bool aCommitIsNeeded() const;
+ bool commitIfNeeded();
+ void setNoJournal(void *dst, void *src, unsigned len);
+ void syncDataAndTruncateJournal();
+ };
+
+ } // namespace dur
+
+ inline dur::DurableInterface& getDur() { return dur::DurableInterface::getDur(); }
+
+ /** declare that we are modifying a diskloc and this is a datafile write. */
+ inline DiskLoc& DiskLoc::writing() const { return getDur().writingDiskLoc(*const_cast< DiskLoc * >( this )); }
+
+}
diff --git a/src/mongo/db/dur_commitjob.cpp b/src/mongo/db/dur_commitjob.cpp
new file mode 100644
index 00000000000..5a9e9cb5679
--- /dev/null
+++ b/src/mongo/db/dur_commitjob.cpp
@@ -0,0 +1,240 @@
+/* @file dur_commitjob.cpp */
+
+/**
+* Copyright (C) 2009 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "dur_commitjob.h"
+#include "dur_stats.h"
+#include "taskqueue.h"
+#include "client.h"
+
+namespace mongo {
+
+ namespace dur {
+
+ BOOST_STATIC_ASSERT( UncommittedBytesLimit > BSONObjMaxInternalSize * 3 );
+ BOOST_STATIC_ASSERT( sizeof(void*)==4 || UncommittedBytesLimit > BSONObjMaxInternalSize * 6 );
+
+ void Writes::D::go(const Writes::D& d) {
+ commitJob.wi()._insertWriteIntent(d.p, d.len);
+ }
+
+ void WriteIntent::absorb(const WriteIntent& other) {
+ dassert(overlaps(other));
+
+ void* newStart = min(start(), other.start());
+ p = max(p, other.p);
+ len = (char*)p - (char*)newStart;
+
+ dassert(contains(other));
+ }
+
+ void Writes::clear() {
+ d.dbMutex.assertAtLeastReadLocked();
+
+ _alreadyNoted.clear();
+ _writes.clear();
+ _ops.clear();
+ _drained = false;
+#if defined(DEBUG_WRITE_INTENT)
+ cout << "_debug clear\n";
+ _debug.clear();
+#endif
+ }
+
+#if defined(DEBUG_WRITE_INTENT)
+ void assertAlreadyDeclared(void *p, int len) {
+ if( commitJob.wi()._debug[p] >= len )
+ return;
+ log() << "assertAlreadyDeclared fails " << (void*)p << " len:" << len << ' ' << commitJob.wi()._debug[p] << endl;
+ printStackTrace();
+ abort();
+ }
+#endif
+
+ void Writes::_insertWriteIntent(void* p, int len) {
+ WriteIntent wi(p, len);
+
+ if (_writes.empty()) {
+ _writes.insert(wi);
+ return;
+ }
+
+ typedef set<WriteIntent>::const_iterator iterator; // shorter
+
+ iterator closest = _writes.lower_bound(wi);
+ // closest.end() >= wi.end()
+
+ if ((closest != _writes.end() && closest->overlaps(wi)) || // high end
+ (closest != _writes.begin() && (--closest)->overlaps(wi))) { // low end
+ if (closest->contains(wi))
+ return; // nothing to do
+
+ // find overlapping range and merge into wi
+ iterator end(closest);
+ iterator begin(closest);
+ while ( end->overlaps(wi)) { wi.absorb(*end); ++end; if (end == _writes.end()) break; } // look forwards
+ while (begin->overlaps(wi)) { wi.absorb(*begin); if (begin == _writes.begin()) break; --begin; } // look backwards
+ if (!begin->overlaps(wi)) ++begin; // make inclusive
+
+ DEV { // ensure we're not deleting anything we shouldn't
+ for (iterator it(begin); it != end; ++it) {
+ assert(wi.contains(*it));
+ }
+ }
+
+ _writes.erase(begin, end);
+ _writes.insert(wi);
+
+ DEV { // ensure there are no overlaps
+ // this can be very slow - n^2 - so make it RARELY
+ RARELY {
+ for (iterator it(_writes.begin()), end(boost::prior(_writes.end())); it != end; ++it) {
+ assert(!it->overlaps(*boost::next(it)));
+ }
+ }
+ }
+ }
+ else { // no entries overlapping wi
+ _writes.insert(closest, wi);
+ }
+ }
+
+ /** note an operation other than a "basic write" */
+ void CommitJob::noteOp(shared_ptr<DurOp> p) {
+ d.dbMutex.assertWriteLocked();
+ dassert( cmdLine.dur );
+ cc()._hasWrittenThisPass = true;
+ if( !_hasWritten ) {
+ assert( !d.dbMutex._remapPrivateViewRequested );
+ _hasWritten = true;
+ }
+ _wi._ops.push_back(p);
+ }
+
+ size_t privateMapBytes = 0; // used by _REMAPPRIVATEVIEW to track how much / how fast to remap
+
+ void CommitJob::beginCommit() {
+ DEV d.dbMutex.assertAtLeastReadLocked();
+ _commitNumber = _notify.now();
+ stats.curr->_commits++;
+ }
+
+ void CommitJob::reset() {
+ _hasWritten = false;
+ _wi.clear();
+ privateMapBytes += _bytes;
+ _bytes = 0;
+ _nSinceCommitIfNeededCall = 0;
+ }
+
+ CommitJob::CommitJob() : _ab(4 * 1024 * 1024) , _hasWritten(false),
+ _bytes(0), _nSinceCommitIfNeededCall(0) {
+ _commitNumber = 0;
+ }
+
+ extern unsigned notesThisLock;
+
+ void CommitJob::note(void* p, int len) {
+ // from the point of view of the dur module, it would be fine (i think) to only
+ // be read locked here. but must be at least read locked to avoid race with
+ // remapprivateview
+ DEV notesThisLock++;
+ DEV d.dbMutex.assertWriteLocked();
+ dassert( cmdLine.dur );
+ cc()._hasWrittenThisPass = true;
+ if( !_wi._alreadyNoted.checkAndSet(p, len) ) {
+ MemoryMappedFile::makeWritable(p, len);
+
+ if( !_hasWritten ) {
+ // you can't be writing if one of these is pending, so this is a verification.
+ assert( !d.dbMutex._remapPrivateViewRequested ); // safe to assert here since it must be the first write in a write lock
+
+ // we don't bother doing a group commit when nothing is written, so we have a var to track that
+ _hasWritten = true;
+ }
+
+ /** tips for debugging:
+ if you have an incorrect diff between data files in different folders
+ (see jstests/dur/quick.js for example),
+ turn this on and see what is logged. if you have a copy of its output from before the
+ regression, a simple diff of these lines would tell you a lot likely.
+ */
+#if 0 && defined(_DEBUG)
+ {
+ static int n;
+ if( ++n < 10000 ) {
+ size_t ofs;
+ MongoMMF *mmf = privateViews._find(w.p, ofs);
+ if( mmf ) {
+ log() << "DEBUG note write intent " << w.p << ' ' << mmf->filename() << " ofs:" << hex << ofs << " len:" << w.len << endl;
+ }
+ else {
+ log() << "DEBUG note write intent " << w.p << ' ' << w.len << " NOT FOUND IN privateViews" << endl;
+ }
+ }
+ else if( n == 10000 ) {
+ log() << "DEBUG stopping write intent logging, too much to log" << endl;
+ }
+ }
+#endif
+
+ // remember intent. we will journal it in a bit
+ _wi.insertWriteIntent(p, len);
+ wassert( _wi._writes.size() < 2000000 );
+ //assert( _wi._writes.size() < 20000000 );
+
+ {
+ // a bit over conservative in counting pagebytes used
+ static size_t lastPos; // note this doesn't reset with each commit, but that is ok we aren't being that precise
+ size_t x = ((size_t) p) & ~0xfff; // round off to page address (4KB)
+ if( x != lastPos ) {
+ lastPos = x;
+ unsigned b = (len+4095) & ~0xfff;
+ _bytes += b;
+#if defined(_DEBUG)
+ _nSinceCommitIfNeededCall++;
+ if( _nSinceCommitIfNeededCall >= 80 ) {
+ if( _nSinceCommitIfNeededCall % 40 == 0 ) {
+ log() << "debug nsincecommitifneeded:" << _nSinceCommitIfNeededCall << " bytes:" << _bytes << endl;
+ if( _nSinceCommitIfNeededCall == 120 || _nSinceCommitIfNeededCall == 1200 ) {
+ log() << "_DEBUG printing stack given high nsinccommitifneeded number" << endl;
+ printStackTrace();
+ }
+ }
+ }
+#endif
+ if (_bytes > UncommittedBytesLimit * 3) {
+ static time_t lastComplain;
+ static unsigned nComplains;
+ // throttle logging
+ if( ++nComplains < 100 || time(0) - lastComplain >= 60 ) {
+ lastComplain = time(0);
+ warning() << "DR102 too much data written uncommitted " << _bytes/1000000.0 << "MB" << endl;
+ if( nComplains < 10 || nComplains % 10 == 0 ) {
+ // wassert makes getLastError show an error, so we just print stack trace
+ printStackTrace();
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ }
+}
diff --git a/src/mongo/db/dur_commitjob.h b/src/mongo/db/dur_commitjob.h
new file mode 100644
index 00000000000..bfc5e3c268f
--- /dev/null
+++ b/src/mongo/db/dur_commitjob.h
@@ -0,0 +1,220 @@
+/* @file dur_commitjob.h used by dur.cpp
+*/
+
+/**
+* Copyright (C) 2009 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../util/alignedbuilder.h"
+#include "../util/mongoutils/hash.h"
+#include "../util/concurrency/synchronization.h"
+#include "cmdline.h"
+#include "durop.h"
+#include "dur.h"
+#include "taskqueue.h"
+
+//#define DEBUG_WRITE_INTENT 1
+
+namespace mongo {
+ namespace dur {
+
+ /** declaration of an intent to write to a region of a memory mapped view
+ *
+ * We store the end rather than the start pointer to make operator< faster
+ * since that is heavily used in set lookup.
+ */
+ struct WriteIntent { /* copyable */
+ WriteIntent() : /*w_ptr(0), */ p(0) { }
+ WriteIntent(void *a, unsigned b) : /*w_ptr(0), */ p((char*)a+b), len(b) { }
+
+ void* start() const { return (char*)p - len; }
+ void* end() const { return p; }
+ unsigned length() const { return len; }
+
+ bool operator < (const WriteIntent& rhs) const { return end() < rhs.end(); }
+
+ // can they be merged?
+ bool overlaps(const WriteIntent& rhs) const {
+ return (start() <= rhs.end() && end() >= rhs.start());
+ }
+
+ // is merging necessary?
+ bool contains(const WriteIntent& rhs) const {
+ return (start() <= rhs.start() && end() >= rhs.end());
+ }
+
+ // merge into me
+ void absorb(const WriteIntent& other);
+
+ friend ostream& operator << (ostream& out, const WriteIntent& wi) {
+ return (out << "p: " << wi.p << " end: " << wi.end() << " len: " << wi.len);
+ }
+
+ //mutable void *w_ptr; // writable mapping of p.
+ // mutable because set::iterator is const but this isn't used in op<
+#if defined(_EXPERIMENTAL)
+ mutable unsigned ofsInJournalBuffer;
+#endif
+ private:
+ void *p; // intent to write up to p
+ unsigned len; // up to this len
+ };
+
+ /** try to remember things we have already marked for journaling. false negatives are ok if infrequent -
+ we will just log them twice.
+ */
+ template<int Prime>
+ class Already : boost::noncopyable {
+ public:
+ Already() { clear(); }
+ void clear() { memset(this, 0, sizeof(*this)); }
+
+ /* see if we have Already recorded/indicated our write intent for this region of memory.
+ automatically upgrades the length if the length was shorter previously.
+ @return true if already indicated.
+ */
+ bool checkAndSet(void* p, int len) {
+ unsigned x = mongoutils::hashPointer(p);
+ pair<void*, int>& nd = nodes[x % N];
+ if( nd.first == p ) {
+ if( nd.second < len ) {
+ nd.second = len;
+ return false; // haven't indicated this len yet
+ }
+ return true; // already indicated
+ }
+ nd.first = p;
+ nd.second = len;
+ return false; // a new set
+ }
+
+ private:
+ enum { N = Prime }; // this should be small the idea is that it fits in the cpu cache easily
+ pair<void*,int> nodes[N];
+ };
+
+ /** our record of pending/uncommitted write intents */
+ class Writes : boost::noncopyable {
+ struct D {
+ void *p;
+ unsigned len;
+ static void go(const D& d);
+ };
+ public:
+ TaskQueue<D> _deferred;
+ Already<127> _alreadyNoted;
+ set<WriteIntent> _writes;
+ vector< shared_ptr<DurOp> > _ops; // all the ops other than basic writes
+ bool _drained; // _deferred is drained? for asserting/testing
+
+ /** reset the Writes structure (empties all the above) */
+ void clear();
+
+ /** merges into set (ie non-deferred version) */
+ void _insertWriteIntent(void* p, int len);
+
+ void insertWriteIntent(void* p, int len) {
+#if defined(DEBUG_WRITE_INTENT)
+ if( _debug[p] < len )
+ _debug[p] = len;
+#endif
+ D d;
+ d.p = p;
+ d.len = len;
+ _deferred.defer(d);
+ }
+
+#ifdef _DEBUG
+ WriteIntent _last;
+#endif
+#if defined(DEBUG_WRITE_INTENT)
+ map<void*,int> _debug;
+#endif
+ };
+
+#if defined(DEBUG_WRITE_INTENT)
+ void assertAlreadyDeclared(void *, int len);
+#else
+ inline void assertAlreadyDeclared(void *, int len) { }
+#endif
+
+ /** A commit job object for a group commit. Currently there is one instance of this object.
+
+ concurrency: assumption is caller is appropriately locking.
+ for example note() invocations are from the write lock.
+ other uses are in a read lock from a single thread (durThread)
+ */
+ class CommitJob : boost::noncopyable {
+ public:
+ AlignedBuilder _ab; // for direct i/o writes to journal
+
+ CommitJob();
+
+ ~CommitJob(){ assert(!"shouldn't destroy CommitJob!"); }
+
+ /** record/note an intent to write */
+ void note(void* p, int len);
+
+ /** note an operation other than a "basic write" */
+ void noteOp(shared_ptr<DurOp> p);
+
+ set<WriteIntent>& writes() {
+ if( !_wi._drained ) {
+ // generally, you don't want to use the set until it is prepared (after deferred ops are applied)
+ // thus this assert here.
+ assert(false);
+ }
+ return _wi._writes;
+ }
+
+ vector< shared_ptr<DurOp> >& ops() { return _wi._ops; }
+
+ /** this method is safe to call outside of locks. when haswritten is false we don't do any group commit and avoid even
+ trying to acquire a lock, which might be helpful at times.
+ */
+ bool hasWritten() const { return _hasWritten; }
+
+ /** we use the commitjob object over and over, calling reset() rather than reconstructing */
+ void reset();
+
+ void beginCommit();
+
+ /** the commit code calls this when data reaches the journal (on disk) */
+ void notifyCommitted() { _notify.notifyAll(_commitNumber); }
+
+ /** we check how much written and if it is getting to be a lot, we commit sooner. */
+ size_t bytes() const { return _bytes; }
+
+#if defined(_DEBUG)
+ const WriteIntent& lastWrite() const { return _wi._last; }
+#endif
+
+ Writes& wi() { return _wi; }
+ private:
+ NotifyAll::When _commitNumber;
+ bool _hasWritten;
+ Writes _wi; // todo: fix name
+ size_t _bytes;
+ public:
+ NotifyAll _notify; // for getlasterror fsync:true acknowledgements
+ unsigned _nSinceCommitIfNeededCall;
+ };
+
+ extern CommitJob& commitJob;
+
+ }
+}
diff --git a/src/mongo/db/dur_journal.cpp b/src/mongo/db/dur_journal.cpp
new file mode 100644
index 00000000000..6a6609f55ee
--- /dev/null
+++ b/src/mongo/db/dur_journal.cpp
@@ -0,0 +1,748 @@
+// @file dur_journal.cpp writing to the writeahead logging journal
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "client.h"
+#include "namespace.h"
+#include "dur_journal.h"
+#include "dur_journalformat.h"
+#include "dur_stats.h"
+#include "../util/logfile.h"
+#include "../util/timer.h"
+#include "../util/alignedbuilder.h"
+#include "../util/net/listen.h" // getelapsedtimemillis
+#include <boost/static_assert.hpp>
+#include <boost/filesystem.hpp>
+#undef assert
+#define assert MONGO_assert
+#include "../util/mongoutils/str.h"
+#include "dur_journalimpl.h"
+#include "../util/file.h"
+#include "../util/checksum.h"
+#include "../util/concurrency/race.h"
+#include "../util/compress.h"
+#include "../server.h"
+
+using namespace mongoutils;
+
+namespace mongo {
+
+ class AlignedBuilder;
+
+ unsigned goodRandomNumberSlow();
+
+ namespace dur {
+ // Rotate after reaching this data size in a journal (j._<n>) file
+ // We use a smaller size for 32 bit as the journal is mmapped during recovery (only)
+ // Note if you take a set of datafiles, including journal files, from 32->64 or vice-versa, it must
+ // work. (and should as-is)
+ // --smallfiles makes the limit small.
+
+#if defined(_DEBUG)
+ unsigned long long DataLimitPerJournalFile = 128 * 1024 * 1024;
+#elif defined(__APPLE__)
+ // assuming a developer box if OS X
+ unsigned long long DataLimitPerJournalFile = 256 * 1024 * 1024;
+#else
+ unsigned long long DataLimitPerJournalFile = (sizeof(void*)==4) ? 256 * 1024 * 1024 : 1 * 1024 * 1024 * 1024;
+#endif
+
+ BOOST_STATIC_ASSERT( sizeof(Checksum) == 16 );
+ BOOST_STATIC_ASSERT( sizeof(JHeader) == 8192 );
+ BOOST_STATIC_ASSERT( sizeof(JSectHeader) == 20 );
+ BOOST_STATIC_ASSERT( sizeof(JSectFooter) == 32 );
+ BOOST_STATIC_ASSERT( sizeof(JEntry) == 12 );
+ BOOST_STATIC_ASSERT( sizeof(LSNFile) == 88 );
+
+ bool usingPreallocate = false;
+
+ void removeOldJournalFile(path p);
+
+ boost::filesystem::path getJournalDir() {
+ boost::filesystem::path p(dbpath);
+ p /= "journal";
+ return p;
+ }
+
+ path lsnPath() {
+ return getJournalDir()/"lsn";
+ }
+
+ /** this should be called when something really bad happens so that we can flag appropriately
+ */
+ void journalingFailure(const char *msg) {
+ /** todo:
+ (1) don't log too much
+ (2) make an indicator in the journal dir that something bad happened.
+ (2b) refuse to do a recovery startup if that is there without manual override.
+ */
+ log() << "journaling failure/error: " << msg << endl;
+ assert(false);
+ }
+
+ JSectFooter::JSectFooter() {
+ memset(this, 0, sizeof(*this));
+ sentinel = JEntry::OpCode_Footer;
+ }
+
+ JSectFooter::JSectFooter(const void* begin, int len) { // needs buffer to compute hash
+ sentinel = JEntry::OpCode_Footer;
+ reserved = 0;
+ magic[0] = magic[1] = magic[2] = magic[3] = '\n';
+
+ Checksum c;
+ c.gen(begin, (unsigned) len);
+ memcpy(hash, c.bytes, sizeof(hash));
+ }
+
+ bool JSectFooter::checkHash(const void* begin, int len) const {
+ if( !magicOk() ) {
+ log() << "journal footer not valid" << endl;
+ return false;
+ }
+ Checksum c;
+ c.gen(begin, len);
+ DEV log() << "checkHash len:" << len << " hash:" << toHex(hash, 16) << " current:" << toHex(c.bytes, 16) << endl;
+ if( memcmp(hash, c.bytes, sizeof(hash)) == 0 )
+ return true;
+ log() << "journal checkHash mismatch, got: " << toHex(c.bytes, 16) << " expected: " << toHex(hash,16) << endl;
+ return false;
+ }
+
+ JHeader::JHeader(string fname) {
+ magic[0] = 'j'; magic[1] = '\n';
+ _version = CurrentVersion;
+ memset(ts, 0, sizeof(ts));
+ time_t t = time(0);
+ strncpy(ts, time_t_to_String_short(t).c_str(), sizeof(ts)-1);
+ memset(dbpath, 0, sizeof(dbpath));
+ strncpy(dbpath, fname.c_str(), sizeof(dbpath)-1);
+ {
+ fileId = t&0xffffffff;
+ fileId |= ((unsigned long long)goodRandomNumberSlow()) << 32;
+ }
+ memset(reserved3, 0, sizeof(reserved3));
+ txt2[0] = txt2[1] = '\n';
+ n1 = n2 = n3 = n4 = '\n';
+ }
+
+ Journal j;
+
+ const unsigned long long LsnShutdownSentinel = ~((unsigned long long)0);
+
+ Journal::Journal() :
+ _curLogFileMutex("JournalLfMutex") {
+ _ageOut = true;
+ _written = 0;
+ _nextFileNumber = 0;
+ _curLogFile = 0;
+ _curFileId = 0;
+ _preFlushTime = 0;
+ _lastFlushTime = 0;
+ _writeToLSNNeeded = false;
+ }
+
+ path Journal::getFilePathFor(int filenumber) const {
+ boost::filesystem::path p(dir);
+ p /= string(str::stream() << "j._" << filenumber);
+ return p;
+ }
+
+ /** never throws
+ @return true if journal dir is not empty
+ */
+ bool haveJournalFiles() {
+ try {
+ for ( boost::filesystem::directory_iterator i( getJournalDir() );
+ i != boost::filesystem::directory_iterator();
+ ++i ) {
+ string fileName = boost::filesystem::path(*i).leaf();
+ if( str::startsWith(fileName, "j._") )
+ return true;
+ }
+ }
+ catch(...) { }
+ return false;
+ }
+
+ /** throws */
+ void removeJournalFiles() {
+ log() << "removeJournalFiles" << endl;
+ try {
+ for ( boost::filesystem::directory_iterator i( getJournalDir() );
+ i != boost::filesystem::directory_iterator();
+ ++i ) {
+ string fileName = boost::filesystem::path(*i).leaf();
+ if( str::startsWith(fileName, "j._") ) {
+ try {
+ removeOldJournalFile(*i);
+ }
+ catch(std::exception& e) {
+ log() << "couldn't remove " << fileName << ' ' << e.what() << endl;
+ throw;
+ }
+ }
+ }
+ try {
+ boost::filesystem::remove(lsnPath());
+ }
+ catch(...) {
+ log() << "couldn't remove " << lsnPath().string() << endl;
+ throw;
+ }
+ }
+ catch( std::exception& e ) {
+ log() << "error removing journal files " << e.what() << endl;
+ throw;
+ }
+ assert(!haveJournalFiles());
+
+ flushMyDirectory(getJournalDir() / "file"); // flushes parent of argument (in this case journal dir)
+
+ log(1) << "removeJournalFiles end" << endl;
+ }
+
+ /** at clean shutdown */
+ bool okToCleanUp = false; // successful recovery would set this to true
+ void Journal::cleanup(bool _log) {
+ if( !okToCleanUp )
+ return;
+
+ if( _log )
+ log() << "journalCleanup..." << endl;
+ try {
+ SimpleMutex::scoped_lock lk(_curLogFileMutex);
+ closeCurrentJournalFile();
+ removeJournalFiles();
+ }
+ catch(std::exception& e) {
+ log() << "error couldn't remove journal file during shutdown " << e.what() << endl;
+ throw;
+ }
+ }
+ void journalCleanup(bool log) { j.cleanup(log); }
+
+ bool _preallocateIsFaster() {
+ bool faster = false;
+ boost::filesystem::path p = getJournalDir() / "tempLatencyTest";
+ try { remove(p); } catch(...) { }
+ try {
+ AlignedBuilder b(8192);
+ int millis[2];
+ const int N = 50;
+ for( int pass = 0; pass < 2; pass++ ) {
+ LogFile f(p.string());
+ Timer t;
+ for( int i = 0 ; i < N; i++ ) {
+ f.synchronousAppend(b.buf(), 8192);
+ }
+ millis[pass] = t.millis();
+ // second time through, file exists and is prealloc case
+ }
+ int diff = millis[0] - millis[1];
+ if( diff > 2 * N ) {
+ // at least 2ms faster for prealloc case?
+ faster = true;
+ log() << "preallocateIsFaster=true " << diff / (1.0*N) << endl;
+ }
+ }
+ catch(...) {
+ log() << "info preallocateIsFaster couldn't run; returning false" << endl;
+ }
+ try { remove(p); } catch(...) { }
+ return faster;
+ }
+ bool preallocateIsFaster() {
+ Timer t;
+ bool res = false;
+ if( _preallocateIsFaster() && _preallocateIsFaster() ) {
+ // maybe system is just super busy at the moment? sleep a second to let it calm down.
+ // deciding to to prealloc is a medium big decision:
+ sleepsecs(1);
+ res = _preallocateIsFaster();
+ }
+ if( t.millis() > 3000 )
+ log() << "preallocateIsFaster check took " << t.millis()/1000.0 << " secs" << endl;
+ return res;
+ }
+
+ // throws
+ void preallocateFile(boost::filesystem::path p, unsigned long long len) {
+ if( exists(p) )
+ return;
+
+ log() << "preallocating a journal file " << p.string() << endl;
+
+ const unsigned BLKSZ = 1024 * 1024;
+ assert( len % BLKSZ == 0 );
+
+ AlignedBuilder b(BLKSZ);
+ memset((void*)b.buf(), 0, BLKSZ);
+
+ ProgressMeter m(len, 3/*secs*/, 10/*hits between time check (once every 6.4MB)*/);
+
+ File f;
+ f.open( p.string().c_str() , /*read-only*/false , /*direct-io*/false );
+ assert( f.is_open() );
+ fileofs loc = 0;
+ while ( loc < len ) {
+ f.write( loc , b.buf() , BLKSZ );
+ loc += BLKSZ;
+ m.hit(BLKSZ);
+ }
+ assert( loc == len );
+ f.fsync();
+ }
+
+ const int NUM_PREALLOC_FILES = 3;
+ inline boost::filesystem::path preallocPath(int n) {
+ assert(n >= 0);
+ assert(n < NUM_PREALLOC_FILES);
+ string fn = str::stream() << "prealloc." << n;
+ return getJournalDir() / fn;
+ }
+
+ // throws
+ void _preallocateFiles() {
+ for( int i = 0; i < NUM_PREALLOC_FILES; i++ ) {
+ boost::filesystem::path filepath = preallocPath(i);
+
+ unsigned long long limit = DataLimitPerJournalFile;
+ if( debug && i == 1 ) {
+ // moving 32->64, the prealloc files would be short. that is "ok", but we want to exercise that
+ // case, so we force exercising here when _DEBUG is set by arbitrarily stopping prealloc at a low
+ // limit for a file. also we want to be able to change in the future the constant without a lot of
+ // work anyway.
+ limit = 16 * 1024 * 1024;
+ }
+ preallocateFile(filepath, limit);
+ }
+ }
+
+ void checkFreeSpace() {
+ unsigned long long spaceNeeded = static_cast<unsigned long long>(3 * DataLimitPerJournalFile * 1.1); // add 10% for headroom
+ unsigned long long freeSpace = File::freeSpace(getJournalDir().string());
+ unsigned long long prealloced = 0;
+ for( int i = 0; i < NUM_PREALLOC_FILES; i++ ) {
+ boost::filesystem::path filepath = preallocPath(i);
+ if (exists(filepath))
+ prealloced += file_size(filepath);
+ }
+
+ if (freeSpace + prealloced < spaceNeeded) {
+ log() << endl;
+ error() << "Insufficient free space for journals." << endl;
+ log() << "Please make at least " << spaceNeeded/(1024*1024) << "MB available in " << getJournalDir().string() << endl;
+ log() << endl;
+ throw UserException(15926, "Insufficient free space for journals");
+ }
+ }
+
+ void preallocateFiles() {
+ if (! (cmdLine.durOptions & CmdLine::DurNoCheckSpace))
+ checkFreeSpace();
+
+ if( exists(preallocPath(0)) || // if enabled previously, keep using
+ exists(preallocPath(1)) ||
+ ( cmdLine.preallocj && preallocateIsFaster() ) ) {
+ usingPreallocate = true;
+ try {
+ _preallocateFiles();
+ }
+ catch(...) {
+ log() << "warning caught exception in preallocateFiles, continuing" << endl;
+ }
+ }
+ j.open();
+ }
+
+ void removeOldJournalFile(path p) {
+ if( usingPreallocate ) {
+ try {
+ for( int i = 0; i < NUM_PREALLOC_FILES; i++ ) {
+ boost::filesystem::path filepath = preallocPath(i);
+ if( !boost::filesystem::exists(filepath) ) {
+ // we can recycle this file into this prealloc file location
+ boost::filesystem::path temppath = filepath.string() + ".temp";
+ boost::filesystem::rename(p, temppath);
+ {
+ // zero the header
+ File f;
+ f.open(temppath.string().c_str(), false, false);
+ char buf[8192];
+ memset(buf, 0, 8192);
+ f.write(0, buf, 8192);
+ f.truncate(DataLimitPerJournalFile);
+ f.fsync();
+ }
+ boost::filesystem::rename(temppath, filepath);
+ return;
+ }
+ }
+ } catch(...) {
+ log() << "warning exception in dur::removeOldJournalFile " << p.string() << endl;
+ // fall through and try to delete the file
+ }
+ }
+
+ // already have 3 prealloc files, so delete this file
+ try {
+ boost::filesystem::remove(p);
+ }
+ catch(...) {
+ log() << "warning exception removing " << p.string() << endl;
+ }
+ }
+
+ // find a prealloc.<n> file, presumably to take and use
+ path findPrealloced() {
+ try {
+ for( int i = 0; i < NUM_PREALLOC_FILES; i++ ) {
+ boost::filesystem::path filepath = preallocPath(i);
+ if( boost::filesystem::exists(filepath) )
+ return filepath;
+ }
+ } catch(...) {
+ log() << "warning exception in dur::findPrealloced()" << endl;
+ }
+ return path();
+ }
+
+ /** assure journal/ dir exists. throws. call during startup. */
+ void journalMakeDir() {
+ j.init();
+
+ boost::filesystem::path p = getJournalDir();
+ j.dir = p.string();
+ log() << "journal dir=" << j.dir << endl;
+ if( !exists(j.dir) ) {
+ try {
+ create_directory(j.dir);
+ }
+ catch(std::exception& e) {
+ log() << "error creating directory " << j.dir << ' ' << e.what() << endl;
+ throw;
+ }
+ }
+ }
+
+ void Journal::_open() {
+ _curFileId = 0;
+ assert( _curLogFile == 0 );
+ path fname = getFilePathFor(_nextFileNumber);
+
+ // if we have a prealloced file, use it
+ {
+ path p = findPrealloced();
+ if( !p.empty() ) {
+ try {
+ {
+ // JHeader::fileId must be updated before renaming to be race-safe
+ LogFile f(p.string());
+ JHeader h(p.string());
+ AlignedBuilder b(8192);
+ b.appendStruct(h);
+ f.synchronousAppend(b.buf(), b.len());
+ }
+ boost::filesystem::rename(p, fname);
+ }
+ catch(...) {
+ log() << "warning couldn't write to / rename file " << p.string() << endl;
+ }
+ }
+ }
+
+ _curLogFile = new LogFile(fname.string());
+ _nextFileNumber++;
+ {
+ JHeader h(fname.string());
+ _curFileId = h.fileId;
+ assert(_curFileId);
+ AlignedBuilder b(8192);
+ b.appendStruct(h);
+ _curLogFile->synchronousAppend(b.buf(), b.len());
+ }
+ }
+
+ void Journal::init() {
+ assert( _curLogFile == 0 );
+ MongoFile::notifyPreFlush = preFlush;
+ MongoFile::notifyPostFlush = postFlush;
+ }
+
+ void Journal::open() {
+ assert( MongoFile::notifyPreFlush == preFlush );
+ SimpleMutex::scoped_lock lk(_curLogFileMutex);
+ _open();
+ }
+
+ void LSNFile::set(unsigned long long x) {
+ memset(this, 0, sizeof(*this));
+ lsn = x;
+ checkbytes = ~x;
+ }
+
+ /** logs details of the situation, and returns 0, if anything surprising in the LSNFile
+ if something highly surprising, throws to abort
+ */
+ unsigned long long LSNFile::get() {
+ uassert(13614, str::stream() << "unexpected version number of lsn file in journal/ directory got: " << ver , ver == 0);
+ if( ~lsn != checkbytes ) {
+ log() << "lsnfile not valid. recovery will be from log start. lsn: " << hex << lsn << " checkbytes: " << hex << checkbytes << endl;
+ return 0;
+ }
+ return lsn;
+ }
+
+ /** called during recovery (the error message text below assumes that)
+ */
+ unsigned long long journalReadLSN() {
+ if( !MemoryMappedFile::exists(lsnPath()) ) {
+ log() << "info no lsn file in journal/ directory" << endl;
+ return 0;
+ }
+
+ try {
+ // os can flush as it likes. if it flushes slowly, we will just do extra work on recovery.
+ // however, given we actually close the file when writing, that seems unlikely.
+ LSNFile L;
+ File f;
+ f.open(lsnPath().string().c_str());
+ assert(f.is_open());
+ if( f.len() == 0 ) {
+ // this could be 'normal' if we crashed at the right moment
+ log() << "info lsn file is zero bytes long" << endl;
+ return 0;
+ }
+ f.read(0,(char*)&L, sizeof(L));
+ unsigned long long lsn = L.get();
+ return lsn;
+ }
+ catch(std::exception& e) {
+ uasserted(13611, str::stream() << "can't read lsn file in journal directory : " << e.what());
+ }
+ return 0;
+ }
+
+ unsigned long long getLastDataFileFlushTime() {
+ return j.lastFlushTime();
+ }
+
+ /** remember "last sequence number" to speed recoveries
+ concurrency: called by durThread only.
+ */
+ void Journal::updateLSNFile() {
+ RACECHECK
+ if( !_writeToLSNNeeded )
+ return;
+ _writeToLSNNeeded = false;
+ try {
+ // os can flush as it likes. if it flushes slowly, we will just do extra work on recovery.
+ // however, given we actually close the file, that seems unlikely.
+ File f;
+ f.open(lsnPath().string().c_str());
+ if( !f.is_open() ) {
+ // can get 0 if an i/o error
+ log() << "warning: open of lsn file failed" << endl;
+ return;
+ }
+ LOG(1) << "lsn set " << _lastFlushTime << endl;
+ LSNFile lsnf;
+ lsnf.set(_lastFlushTime);
+ f.write(0, (char*)&lsnf, sizeof(lsnf));
+ // do we want to fsync here? if we do it probably needs to be async so the durthread
+ // is not delayed.
+ }
+ catch(std::exception& e) {
+ log() << "warning: write to lsn file failed " << e.what() << endl;
+ // keep running (ignore the error). recovery will be slow.
+ }
+ }
+
+ void Journal::preFlush() {
+ j._preFlushTime = Listener::getElapsedTimeMillis();
+ }
+
+ void Journal::postFlush() {
+ j._lastFlushTime = j._preFlushTime;
+ j._writeToLSNNeeded = true;
+ }
+
+ // call from within _curLogFileMutex
+ void Journal::closeCurrentJournalFile() {
+ if (!_curLogFile)
+ return;
+
+ JFile jf;
+ jf.filename = _curLogFile->_name;
+ jf.lastEventTimeMs = Listener::getElapsedTimeMillis();
+ _oldJournalFiles.push_back(jf);
+
+ delete _curLogFile; // close
+ _curLogFile = 0;
+ _written = 0;
+ }
+
+ /** remove older journal files.
+ be in _curLogFileMutex but not dbMutex when calling
+ */
+ void Journal::removeUnneededJournalFiles() {
+ while( !_oldJournalFiles.empty() ) {
+ JFile f = _oldJournalFiles.front();
+
+ if( f.lastEventTimeMs < _lastFlushTime + ExtraKeepTimeMs ) {
+ // eligible for deletion
+ path p( f.filename );
+ log() << "old journal file will be removed: " << f.filename << endl;
+ removeOldJournalFile(p);
+ }
+ else {
+ break;
+ }
+
+ _oldJournalFiles.pop_front();
+ }
+ }
+
+ /*int getAgeOutJournalFiles() {
+ mutex::try_lock lk(j._curLogFileMutex, 4000);
+ if( !lk.ok )
+ return -1;
+ return j._ageOut ? 1 : 0;
+ }*/
+ void setAgeOutJournalFiles(bool a) {
+ SimpleMutex::scoped_lock lk(j._curLogFileMutex);
+ j._ageOut = a;
+ }
+
+ void Journal::_rotate() {
+ if( d.dbMutex.atLeastReadLocked() ) {
+ LOGSOME << "info journal _rotate called insider dbMutex - ok but should be somewhat rare" << endl;
+ }
+
+ RACECHECK;
+
+ _curLogFileMutex.dassertLocked();
+
+ if ( inShutdown() || !_curLogFile )
+ return;
+
+ j.updateLSNFile();
+
+ if( _curLogFile && _written < DataLimitPerJournalFile )
+ return;
+
+ if( _curLogFile ) {
+ _curLogFile->truncate();
+ closeCurrentJournalFile();
+ removeUnneededJournalFiles();
+ }
+
+ try {
+ Timer t;
+ _open();
+ int ms = t.millis();
+ if( ms >= 200 ) {
+ log() << "DR101 latency warning on journal file open " << ms << "ms" << endl;
+ }
+ }
+ catch(std::exception& e) {
+ log() << "warning exception opening journal file " << e.what() << endl;
+ throw;
+ }
+ }
+
+ /** write (append) the buffer we have built to the journal and fsync it.
+ outside of dbMutex lock as this could be slow.
+ @param uncompressed - a buffer that will be written to the journal after compression
+ will not return until on disk
+ */
+ void WRITETOJOURNAL(JSectHeader h, AlignedBuilder& uncompressed) {
+ Timer t;
+ j.journal(h, uncompressed);
+ stats.curr->_writeToJournalMicros += t.micros();
+ }
+ void Journal::journal(const JSectHeader& h, const AlignedBuilder& uncompressed) {
+ RACECHECK
+ static AlignedBuilder b(32*1024*1024);
+ /* buffer to journal will be
+ JSectHeader
+ compressed operations
+ JSectFooter
+ */
+ const unsigned headTailSize = sizeof(JSectHeader) + sizeof(JSectFooter);
+ const unsigned max = maxCompressedLength(uncompressed.len()) + headTailSize;
+ b.reset(max);
+
+ {
+ dassert( h.sectionLen() == (unsigned) 0xffffffff ); // we will backfill later
+ b.appendStruct(h);
+ }
+
+ size_t compressedLength = 0;
+ rawCompress(uncompressed.buf(), uncompressed.len(), b.cur(), &compressedLength);
+ assert( compressedLength < 0xffffffff );
+ assert( compressedLength < max );
+ b.skip(compressedLength);
+
+ // footer
+ unsigned L = 0xffffffff;
+ {
+ // pad to alignment, and set the total section length in the JSectHeader
+ assert( 0xffffe000 == (~(Alignment-1)) );
+ unsigned lenUnpadded = b.len() + sizeof(JSectFooter);
+ L = (lenUnpadded + Alignment-1) & (~(Alignment-1));
+ dassert( L >= lenUnpadded );
+
+ ((JSectHeader*)b.atOfs(0))->setSectionLen(lenUnpadded);
+
+ JSectFooter f(b.buf(), b.len()); // computes checksum
+ b.appendStruct(f);
+ dassert( b.len() == lenUnpadded );
+
+ b.skip(L - lenUnpadded);
+ dassert( b.len() % Alignment == 0 );
+ }
+
+ try {
+ SimpleMutex::scoped_lock lk(_curLogFileMutex);
+
+ // must already be open -- so that _curFileId is correct for previous buffer building
+ assert( _curLogFile );
+
+ stats.curr->_uncompressedBytes += b.len();
+ unsigned w = b.len();
+ _written += w;
+ assert( w <= L );
+ stats.curr->_journaledBytes += L;
+ _curLogFile->synchronousAppend((const void *) b.buf(), L);
+ _rotate();
+ }
+ catch(std::exception& e) {
+ log() << "error exception in dur::journal " << e.what() << endl;
+ throw;
+ }
+ }
+
+ }
+}
+
+/* todo
+ test (and handle) disk full on journal append. best quick thing to do is to terminate.
+ if we roll back operations, there are nuances such as is ReplSetImpl::lastOpTimeWritten too new in ram then?
+*/
diff --git a/src/mongo/db/dur_journal.h b/src/mongo/db/dur_journal.h
new file mode 100644
index 00000000000..664f63942e0
--- /dev/null
+++ b/src/mongo/db/dur_journal.h
@@ -0,0 +1,68 @@
+// @file dur_journal.h
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+namespace mongo {
+ class AlignedBuilder;
+
+ namespace dur {
+
+ /** true if ok to cleanup journal files at termination. otherwise, files journal will be retained.
+ */
+ extern bool okToCleanUp;
+
+ /** at termination after db files closed & fsynced
+ also after recovery
+ closes and removes journal files
+ @param log report in log that we are cleaning up if we actually do any work
+ */
+ void journalCleanup(bool log = false);
+
+ /** assure journal/ dir exists. throws */
+ void journalMakeDir();
+
+ /** check if time to rotate files; assure a file is open.
+ done separately from the journal() call as we can do this part
+ outside of lock.
+ only called by durThread.
+ */
+ void journalRotate();
+
+ /** flag that something has gone wrong during writing to the journal
+ (not for recovery mode)
+ */
+ void journalingFailure(const char *msg);
+
+ /** read lsn from disk from the last run before doing recovery */
+ unsigned long long journalReadLSN();
+
+ unsigned long long getLastDataFileFlushTime();
+
+ /** never throws.
+ @return true if there are any journal files in the journal dir.
+ */
+ bool haveJournalFiles();
+
+ // in case disk controller buffers writes
+ const long long ExtraKeepTimeMs = 10000;
+
+ const unsigned JournalCommitIntervalDefault = 100;
+
+ }
+}
diff --git a/src/mongo/db/dur_journalformat.h b/src/mongo/db/dur_journalformat.h
new file mode 100644
index 00000000000..10ed8487b71
--- /dev/null
+++ b/src/mongo/db/dur_journalformat.h
@@ -0,0 +1,174 @@
+// @file dur_journalformat.h The format of our journal files.
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+namespace mongo {
+
+ namespace dur {
+
+ const unsigned Alignment = 8192;
+
+#pragma pack(1)
+ /** beginning header for a journal/j._<n> file
+ there is nothing important int this header at this time. except perhaps version #.
+ */
+ struct JHeader {
+ JHeader() { }
+ JHeader(string fname);
+
+ char magic[2]; // "j\n". j means journal, then a linefeed, fwiw if you were to run "less" on the file or something...
+
+ // x4142 is asci--readable if you look at the file with head/less -- thus the starting values were near
+ // that. simply incrementing the version # is safe on a fwd basis.
+#if defined(_NOCOMPRESS)
+ enum { CurrentVersion = 0x4148 };
+#else
+ enum { CurrentVersion = 0x4149 };
+#endif
+ unsigned short _version;
+
+ // these are just for diagnostic ease (make header more useful as plain text)
+ char n1; // '\n'
+ char ts[20]; // ascii timestamp of file generation. for user reading, not used by code.
+ char n2; // '\n'
+ char dbpath[128]; // path/filename of this file for human reading and diagnostics. not used by code.
+ char n3, n4; // '\n', '\n'
+
+ unsigned long long fileId; // unique identifier that will be in each JSectHeader. important as we recycle prealloced files
+
+ char reserved3[8026]; // 8KB total for the file header
+ char txt2[2]; // "\n\n" at the end
+
+ bool versionOk() const { return _version == CurrentVersion; }
+ bool valid() const { return magic[0] == 'j' && txt2[1] == '\n' && fileId; }
+ };
+
+ /** "Section" header. A section corresponds to a group commit.
+ len is length of the entire section including header and footer.
+ header and footer are not compressed, just the stuff in between.
+ */
+ struct JSectHeader {
+ private:
+ unsigned _sectionLen; // unpadded length in bytes of the whole section
+ public:
+ unsigned long long seqNumber; // sequence number that can be used on recovery to not do too much work
+ unsigned long long fileId; // matches JHeader::fileId
+ unsigned sectionLen() const { return _sectionLen; }
+
+ // we store the unpadded length so we can use that when we uncompress. to
+ // get the true total size this must be rounded up to the Alignment.
+ void setSectionLen(unsigned lenUnpadded) { _sectionLen = lenUnpadded; }
+
+ unsigned sectionLenWithPadding() const {
+ unsigned x = (sectionLen() + (Alignment-1)) & (~(Alignment-1));
+ dassert( x % Alignment == 0 );
+ return x;
+ }
+ };
+
+ /** an individual write operation within a group commit section. Either the entire section should
+ be applied, or nothing. (We check the md5 for the whole section before doing anything on recovery.)
+ */
+ struct JEntry {
+ enum OpCodes {
+ OpCode_Footer = 0xffffffff,
+ OpCode_DbContext = 0xfffffffe,
+ OpCode_FileCreated = 0xfffffffd,
+ OpCode_DropDb = 0xfffffffc,
+ OpCode_Min = 0xfffff000
+ };
+ union {
+ unsigned len; // length in bytes of the data of the JEntry. does not include the JEntry header
+ OpCodes opcode;
+ };
+
+ unsigned ofs; // offset in file
+
+ // sentinel and masks for _fileNo
+ enum {
+ DotNsSuffix = 0x7fffffff, // ".ns" file
+ LocalDbBit = 0x80000000 // assuming "local" db instead of using the JDbContext
+ };
+ int _fileNo; // high bit is set to indicate it should be the <dbpath>/local database
+ // char data[len] follows
+
+ const char * srcData() const {
+ const int *i = &_fileNo;
+ return (const char *) (i+1);
+ }
+
+ int getFileNo() const { return _fileNo & (~LocalDbBit); }
+ void setFileNo(int f) { _fileNo = f; }
+ bool isNsSuffix() const { return getFileNo() == DotNsSuffix; }
+
+ void setLocalDbContextBit() { _fileNo |= LocalDbBit; }
+ bool isLocalDbContext() const { return _fileNo & LocalDbBit; }
+ void clearLocalDbContextBit() { _fileNo = getFileNo(); }
+
+ static string suffix(int fileno) {
+ if( fileno == DotNsSuffix ) return "ns";
+ stringstream ss;
+ ss << fileno;
+ return ss.str();
+ }
+ };
+
+ /** group commit section footer. md5 is a key field. */
+ struct JSectFooter {
+ JSectFooter();
+ JSectFooter(const void* begin, int len); // needs buffer to compute hash
+ unsigned sentinel;
+ unsigned char hash[16];
+ unsigned long long reserved;
+ char magic[4]; // "\n\n\n\n"
+
+ /** used by recovery to see if buffer is valid
+ @param begin the buffer
+ @param len buffer len
+ @return true if buffer looks valid
+ */
+ bool checkHash(const void* begin, int len) const;
+
+ bool magicOk() const { return *((unsigned*)magic) == 0x0a0a0a0a; }
+ };
+
+ /** declares "the next entry(s) are for this database / file path prefix" */
+ struct JDbContext {
+ JDbContext() : sentinel(JEntry::OpCode_DbContext) { }
+ const unsigned sentinel; // compare to JEntry::len -- zero is our sentinel
+ //char dbname[];
+ };
+
+ /** "last sequence number" */
+ struct LSNFile {
+ unsigned ver;
+ unsigned reserved2;
+ unsigned long long lsn;
+ unsigned long long checkbytes;
+ unsigned long long reserved[8];
+
+ void set(unsigned long long lsn);
+ unsigned long long get();
+ };
+
+#pragma pack()
+
+ }
+
+}
diff --git a/src/mongo/db/dur_journalimpl.h b/src/mongo/db/dur_journalimpl.h
new file mode 100644
index 00000000000..8aad70b0e5c
--- /dev/null
+++ b/src/mongo/db/dur_journalimpl.h
@@ -0,0 +1,103 @@
+// @file dur_journal.h
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "dur_journalformat.h"
+#include "../util/logfile.h"
+
+namespace mongo {
+ namespace dur {
+
+ /** the writeahead journal for durability */
+ class Journal {
+ public:
+ string dir; // set by journalMakeDir() during initialization
+
+ Journal();
+
+ /** call during startup by journalMakeDir() */
+ void init();
+
+ /** check if time to rotate files. assure a file is open.
+ done separately from the journal() call as we can do this part
+ outside of lock.
+ thread: durThread()
+ */
+ void rotate();
+
+ /** append to the journal file
+ */
+ void journal(const JSectHeader& h, const AlignedBuilder& b);
+
+ boost::filesystem::path getFilePathFor(int filenumber) const;
+
+ unsigned long long lastFlushTime() const { return _lastFlushTime; }
+ void cleanup(bool log); // closes and removes journal files
+
+ unsigned long long curFileId() const { return _curFileId; }
+
+ void assureLogFileOpen() {
+ SimpleMutex::scoped_lock lk(_curLogFileMutex);
+ if( _curLogFile == 0 )
+ _open();
+ }
+
+ /** open a journal file to journal operations to. */
+ void open();
+
+ private:
+ /** check if time to rotate files. assure a file is open.
+ * internally called with every commit
+ */
+ void _rotate();
+
+ void _open();
+ void closeCurrentJournalFile();
+ void removeUnneededJournalFiles();
+
+ unsigned long long _written; // bytes written so far to the current journal (log) file
+ unsigned _nextFileNumber;
+ public:
+ SimpleMutex _curLogFileMutex;
+ bool _ageOut;
+ private:
+
+ LogFile *_curLogFile; // use _curLogFileMutex
+ unsigned long long _curFileId; // current file id see JHeader::fileId
+
+ struct JFile {
+ string filename;
+ unsigned long long lastEventTimeMs;
+ };
+
+ // files which have been closed but not unlinked (rotated out) yet
+ // ordered oldest to newest
+ list<JFile> _oldJournalFiles; // use _curLogFileMutex
+
+ // lsn related
+ static void preFlush();
+ static void postFlush();
+ unsigned long long _preFlushTime;
+ unsigned long long _lastFlushTime; // data < this time is fsynced in the datafiles (unless hard drive controller is caching)
+ bool _writeToLSNNeeded;
+ void updateLSNFile();
+ };
+
+ }
+}
diff --git a/src/mongo/db/dur_preplogbuffer.cpp b/src/mongo/db/dur_preplogbuffer.cpp
new file mode 100644
index 00000000000..10b63c0e549
--- /dev/null
+++ b/src/mongo/db/dur_preplogbuffer.cpp
@@ -0,0 +1,177 @@
+// @file dur_preplogbuffer.cpp
+
+/**
+* Copyright (C) 2009 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/*
+ PREPLOGBUFFER
+ we will build an output buffer ourself and then use O_DIRECT
+ we could be in read lock for this
+ for very large objects write directly to redo log in situ?
+ @see https://docs.google.com/drawings/edit?id=1TklsmZzm7ohIZkwgeK6rMvsdaR13KjtJYMsfLr175Zc
+*/
+
+#include "pch.h"
+#include "cmdline.h"
+#include "dur.h"
+#include "dur_journal.h"
+#include "dur_journalimpl.h"
+#include "dur_commitjob.h"
+#include "../util/mongoutils/hash.h"
+#include "../util/mongoutils/str.h"
+#include "../util/alignedbuilder.h"
+#include "../util/timer.h"
+#include "dur_stats.h"
+#include "../server.h"
+
+using namespace mongoutils;
+
+namespace mongo {
+ namespace dur {
+
+ extern Journal j;
+
+ RelativePath local = RelativePath::fromRelativePath("local");
+
+ static MongoMMF* findMMF_inlock(void *ptr, size_t &ofs) {
+ MongoMMF *f = privateViews.find_inlock(ptr, ofs);
+ if( f == 0 ) {
+ error() << "findMMF_inlock failed " << privateViews.numberOfViews_inlock() << endl;
+ printStackTrace(); // we want a stack trace and the assert below didn't print a trace once in the real world - not sure why
+ stringstream ss;
+ ss << "view pointer cannot be resolved " << hex << (size_t) ptr;
+ journalingFailure(ss.str().c_str()); // asserts, which then abends
+ }
+ return f;
+ }
+
+ /** put the basic write operation into the buffer (bb) to be journaled */
+ static void prepBasicWrite_inlock(AlignedBuilder&bb, const WriteIntent *i, RelativePath& lastDbPath) {
+ size_t ofs = 1;
+ MongoMMF *mmf = findMMF_inlock(i->start(), /*out*/ofs);
+
+ if( unlikely(!mmf->willNeedRemap()) ) {
+ // tag this mmf as needed a remap of its private view later.
+ // usually it will already be dirty/already set, so we do the if above first
+ // to avoid possibility of cpu cache line contention
+ mmf->willNeedRemap() = true;
+ }
+
+ // since we have already looked up the mmf, we go ahead and remember the write view location
+ // so we don't have to find the MongoMMF again later in WRITETODATAFILES()
+ //
+ // this was for WRITETODATAFILES_Impl2 so commented out now
+ //
+ /*
+ dassert( i->w_ptr == 0 );
+ i->w_ptr = ((char*)mmf->view_write()) + ofs;
+ */
+
+ JEntry e;
+ e.len = min(i->length(), (unsigned)(mmf->length() - ofs)); //dont write past end of file
+ assert( ofs <= 0x80000000 );
+ e.ofs = (unsigned) ofs;
+ e.setFileNo( mmf->fileSuffixNo() );
+ if( mmf->relativePath() == local ) {
+ e.setLocalDbContextBit();
+ }
+ else if( mmf->relativePath() != lastDbPath ) {
+ lastDbPath = mmf->relativePath();
+ JDbContext c;
+ bb.appendStruct(c);
+ bb.appendStr(lastDbPath.toString());
+ }
+ bb.appendStruct(e);
+#if defined(_EXPERIMENTAL)
+ i->ofsInJournalBuffer = bb.len();
+#endif
+ bb.appendBuf(i->start(), e.len);
+
+ if (unlikely(e.len != (unsigned)i->length())) {
+ log() << "journal info splitting prepBasicWrite at boundary" << endl;
+
+ // This only happens if we write to the last byte in a file and
+ // the fist byte in another file that is mapped adjacently. I
+ // think most OSs leave at least a one page gap between
+ // mappings, but better to be safe.
+
+ WriteIntent next ((char*)i->start() + e.len, i->length() - e.len);
+ prepBasicWrite_inlock(bb, &next, lastDbPath);
+ }
+ }
+
+ /** basic write ops / write intents. note there is no particular order to these : if we have
+ two writes to the same location during the group commit interval, it is likely
+ (although not assured) that it is journaled here once.
+ */
+ static void prepBasicWrites(AlignedBuilder& bb) {
+ scoped_lock lk(privateViews._mutex());
+
+ // each time events switch to a different database we journal a JDbContext
+ RelativePath lastDbPath;
+
+ for( set<WriteIntent>::iterator i = commitJob.writes().begin(); i != commitJob.writes().end(); i++ ) {
+ prepBasicWrite_inlock(bb, &(*i), lastDbPath);
+ }
+ }
+
+ static void resetLogBuffer(/*out*/JSectHeader& h, AlignedBuilder& bb) {
+ bb.reset();
+
+ h.setSectionLen(0xffffffff); // total length, will fill in later
+ h.seqNumber = getLastDataFileFlushTime();
+ h.fileId = j.curFileId();
+ }
+
+ /** we will build an output buffer ourself and then use O_DIRECT
+ we could be in read lock for this
+ caller handles locking
+ @return partially populated sectheader and _ab set
+ */
+ static void _PREPLOGBUFFER(JSectHeader& h) {
+ assert( cmdLine.dur );
+
+ {
+ // now that we are locked, fully drain deferred notes of write intents
+ DEV d.dbMutex.assertAtLeastReadLocked();
+ Writes& writes = commitJob.wi();
+ writes._deferred.invoke();
+ writes._drained = true;
+ }
+
+ AlignedBuilder& bb = commitJob._ab;
+ resetLogBuffer(h, bb); // adds JSectHeader
+
+ // ops other than basic writes (DurOp's)
+ {
+ for( vector< shared_ptr<DurOp> >::iterator i = commitJob.ops().begin(); i != commitJob.ops().end(); ++i ) {
+ (*i)->serialize(bb);
+ }
+ }
+
+ prepBasicWrites(bb);
+
+ return;
+ }
+ void PREPLOGBUFFER(/*out*/ JSectHeader& h) {
+ Timer t;
+ j.assureLogFileOpen(); // so fileId is set
+ _PREPLOGBUFFER(h);
+ stats.curr->_prepLogBufferMicros += t.micros();
+ }
+
+ }
+}
diff --git a/src/mongo/db/dur_recover.cpp b/src/mongo/db/dur_recover.cpp
new file mode 100644
index 00000000000..a0a8843572c
--- /dev/null
+++ b/src/mongo/db/dur_recover.cpp
@@ -0,0 +1,544 @@
+// @file dur_recover.cpp crash recovery via the journal
+
+/**
+* Copyright (C) 2009 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "dur.h"
+#include "dur_stats.h"
+#include "dur_recover.h"
+#include "dur_journal.h"
+#include "dur_journalformat.h"
+#include "durop.h"
+#include "namespace.h"
+#include "../util/mongoutils/str.h"
+#include "../util/bufreader.h"
+#include "../util/concurrency/race.h"
+#include "pdfile.h"
+#include "database.h"
+#include "db.h"
+#include "../util/unittest.h"
+#include "../util/checksum.h"
+#include "cmdline.h"
+#include "curop.h"
+#include "mongommf.h"
+#include "../util/compress.h"
+
+#include <sys/stat.h>
+#include <fcntl.h>
+
+using namespace mongoutils;
+
+namespace mongo {
+
+ namespace dur {
+
+ struct ParsedJournalEntry { /*copyable*/
+ ParsedJournalEntry() : e(0) { }
+
+ // relative path of database for the operation.
+ // might be a pointer into mmaped Journal file
+ const char *dbName;
+
+ // thse are pointers into the memory mapped journal file
+ const JEntry *e; // local db sentinel is already parsed out here into dbName
+
+ // if not one of the two simple JEntry's above, this is the operation:
+ shared_ptr<DurOp> op;
+ };
+
+ void removeJournalFiles();
+ path getJournalDir();
+
+ /** get journal filenames, in order. throws if unexpected content found */
+ static void getFiles(path dir, vector<path>& files) {
+ map<unsigned,path> m;
+ for ( boost::filesystem::directory_iterator i( dir );
+ i != boost::filesystem::directory_iterator();
+ ++i ) {
+ boost::filesystem::path filepath = *i;
+ string fileName = boost::filesystem::path(*i).leaf();
+ if( str::startsWith(fileName, "j._") ) {
+ unsigned u = str::toUnsigned( str::after(fileName, '_') );
+ if( m.count(u) ) {
+ uasserted(13531, str::stream() << "unexpected files in journal directory " << dir.string() << " : " << fileName);
+ }
+ m.insert( pair<unsigned,path>(u,filepath) );
+ }
+ }
+ for( map<unsigned,path>::iterator i = m.begin(); i != m.end(); ++i ) {
+ if( i != m.begin() && m.count(i->first - 1) == 0 ) {
+ uasserted(13532,
+ str::stream() << "unexpected file in journal directory " << dir.string()
+ << " : " << boost::filesystem::path(i->second).leaf() << " : can't find its preceeding file");
+ }
+ files.push_back(i->second);
+ }
+ }
+
+ /** read through the memory mapped data of a journal file (journal/j._<n> file)
+ throws
+ */
+ class JournalSectionIterator : boost::noncopyable {
+ auto_ptr<BufReader> _entries;
+ const JSectHeader _h;
+ const char *_lastDbName; // pointer into mmaped journal file
+ const bool _doDurOps;
+ string _uncompressed;
+ public:
+ JournalSectionIterator(const JSectHeader& h, const void *compressed, unsigned compressedLen, bool doDurOpsRecovering) :
+ _h(h),
+ _lastDbName(0)
+ , _doDurOps(doDurOpsRecovering)
+ {
+ assert( doDurOpsRecovering );
+ bool ok = uncompress((const char *)compressed, compressedLen, &_uncompressed);
+ if( !ok ) {
+ // it should always be ok (i think?) as there is a previous check to see that the JSectFooter is ok
+ log() << "couldn't uncompress journal section" << endl;
+ msgasserted(15874, "couldn't uncompress journal section");
+ }
+ const char *p = _uncompressed.c_str();
+ assert( compressedLen == _h.sectionLen() - sizeof(JSectFooter) - sizeof(JSectHeader) );
+ _entries = auto_ptr<BufReader>( new BufReader(p, _uncompressed.size()) );
+ }
+
+ // we work with the uncompressed buffer when doing a WRITETODATAFILES (for speed)
+ JournalSectionIterator(const JSectHeader &h, const void *p, unsigned len) :
+ _entries( new BufReader((const char *) p, len) ),
+ _h(h),
+ _lastDbName(0)
+ , _doDurOps(false)
+
+ { }
+
+ bool atEof() const { return _entries->atEof(); }
+
+ unsigned long long seqNumber() const { return _h.seqNumber; }
+
+ /** get the next entry from the log. this function parses and combines JDbContext and JEntry's.
+ * throws on premature end of section.
+ */
+ void next(ParsedJournalEntry& e) {
+ unsigned lenOrOpCode;
+ _entries->read(lenOrOpCode);
+
+ if (lenOrOpCode > JEntry::OpCode_Min) {
+ switch( lenOrOpCode ) {
+
+ case JEntry::OpCode_Footer: {
+ assert( false );
+ }
+
+ case JEntry::OpCode_FileCreated:
+ case JEntry::OpCode_DropDb: {
+ e.dbName = 0;
+ boost::shared_ptr<DurOp> op = DurOp::read(lenOrOpCode, *_entries);
+ if (_doDurOps) {
+ e.op = op;
+ }
+ return;
+ }
+
+ case JEntry::OpCode_DbContext: {
+ _lastDbName = (const char*) _entries->pos();
+ const unsigned limit = std::min((unsigned)Namespace::MaxNsLen, _entries->remaining());
+ const unsigned len = strnlen(_lastDbName, limit);
+ massert(13533, "problem processing journal file during recovery", _lastDbName[len] == '\0');
+ _entries->skip(len+1); // skip '\0' too
+ _entries->read(lenOrOpCode); // read this for the fall through
+ }
+ // fall through as a basic operation always follows jdbcontext, and we don't have anything to return yet
+
+ default:
+ // fall through
+ ;
+ }
+ }
+
+ // JEntry - a basic write
+ assert( lenOrOpCode && lenOrOpCode < JEntry::OpCode_Min );
+ _entries->rewind(4);
+ e.e = (JEntry *) _entries->skip(sizeof(JEntry));
+ e.dbName = e.e->isLocalDbContext() ? "local" : _lastDbName;
+ assert( e.e->len == lenOrOpCode );
+ _entries->skip(e.e->len);
+ }
+
+ };
+
+ static string fileName(const char* dbName, int fileNo) {
+ stringstream ss;
+ ss << dbName << '.';
+ assert( fileNo >= 0 );
+ if( fileNo == JEntry::DotNsSuffix )
+ ss << "ns";
+ else
+ ss << fileNo;
+
+ // relative name -> full path name
+ path full(dbpath);
+ full /= ss.str();
+ return full.string();
+ }
+
+ RecoveryJob::~RecoveryJob() {
+ DESTRUCTOR_GUARD(
+ if( !_mmfs.empty() )
+ close();
+ )
+ }
+
+ void RecoveryJob::close() {
+ scoped_lock lk(_mx);
+ _close();
+ }
+
+ void RecoveryJob::_close() {
+ MongoFile::flushAll(true);
+ _mmfs.clear();
+ }
+
+ void RecoveryJob::write(const ParsedJournalEntry& entry) {
+ //TODO(mathias): look into making some of these dasserts
+ assert(entry.e);
+ assert(entry.dbName);
+ assert(strnlen(entry.dbName, MaxDatabaseNameLen) < MaxDatabaseNameLen);
+
+ const string fn = fileName(entry.dbName, entry.e->getFileNo());
+ MongoFile* file;
+ {
+ MongoFileFinder finder; // must release lock before creating new MongoMMF
+ file = finder.findByPath(fn);
+ }
+
+ MongoMMF* mmf;
+ if (file) {
+ assert(file->isMongoMMF());
+ mmf = (MongoMMF*)file;
+ }
+ else {
+ if( !_recovering ) {
+ log() << "journal error applying writes, file " << fn << " is not open" << endl;
+ assert(false);
+ }
+ boost::shared_ptr<MongoMMF> sp (new MongoMMF);
+ assert(sp->open(fn, false));
+ _mmfs.push_back(sp);
+ mmf = sp.get();
+ }
+
+ if ((entry.e->ofs + entry.e->len) <= mmf->length()) {
+ assert(mmf->view_write());
+ assert(entry.e->srcData());
+
+ void* dest = (char*)mmf->view_write() + entry.e->ofs;
+ memcpy(dest, entry.e->srcData(), entry.e->len);
+ stats.curr->_writeToDataFilesBytes += entry.e->len;
+ }
+ else {
+ massert(13622, "Trying to write past end of file in WRITETODATAFILES", _recovering);
+ }
+ }
+
+ void RecoveryJob::applyEntry(const ParsedJournalEntry& entry, bool apply, bool dump) {
+ if( entry.e ) {
+ if( dump ) {
+ stringstream ss;
+ ss << " BASICWRITE " << setw(20) << entry.dbName << '.';
+ if( entry.e->isNsSuffix() )
+ ss << "ns";
+ else
+ ss << setw(2) << entry.e->getFileNo();
+ ss << ' ' << setw(6) << entry.e->len << ' ' << /*hex << setw(8) << (size_t) fqe.srcData << dec <<*/
+ " " << hexdump(entry.e->srcData(), entry.e->len);
+ log() << ss.str() << endl;
+ }
+ if( apply ) {
+ write(entry);
+ }
+ }
+ else if(entry.op) {
+ // a DurOp subclass operation
+ if( dump ) {
+ log() << " OP " << entry.op->toString() << endl;
+ }
+ if( apply ) {
+ if( entry.op->needFilesClosed() ) {
+ _close(); // locked in processSection
+ }
+ entry.op->replay();
+ }
+ }
+ }
+
+ void RecoveryJob::applyEntries(const vector<ParsedJournalEntry> &entries) {
+ bool apply = (cmdLine.durOptions & CmdLine::DurScanOnly) == 0;
+ bool dump = cmdLine.durOptions & CmdLine::DurDumpJournal;
+ if( dump )
+ log() << "BEGIN section" << endl;
+
+ for( vector<ParsedJournalEntry>::const_iterator i = entries.begin(); i != entries.end(); ++i ) {
+ applyEntry(*i, apply, dump);
+ }
+
+ if( dump )
+ log() << "END section" << endl;
+ }
+
+ void RecoveryJob::processSection(const JSectHeader *h, const void *p, unsigned len, const JSectFooter *f) {
+ scoped_lock lk(_mx);
+ RACECHECK
+
+ /** todo: we should really verify the checksum to see that seqNumber is ok?
+ that is expensive maybe there is some sort of checksum of just the header
+ within the header itself
+ */
+ if( _recovering && _lastDataSyncedFromLastRun > h->seqNumber + ExtraKeepTimeMs ) {
+ if( h->seqNumber != _lastSeqMentionedInConsoleLog ) {
+ static int n;
+ if( ++n < 10 ) {
+ log() << "recover skipping application of section seq:" << h->seqNumber << " < lsn:" << _lastDataSyncedFromLastRun << endl;
+ }
+ else if( n == 10 ) {
+ log() << "recover skipping application of section more..." << endl;
+ }
+ _lastSeqMentionedInConsoleLog = h->seqNumber;
+ }
+ return;
+ }
+
+ auto_ptr<JournalSectionIterator> i;
+ if( _recovering ) {
+ i = auto_ptr<JournalSectionIterator>(new JournalSectionIterator(*h, p, len, _recovering));
+ }
+ else {
+ i = auto_ptr<JournalSectionIterator>(new JournalSectionIterator(*h, /*after header*/p, /*w/out header*/len));
+ }
+
+ // we use a static so that we don't have to reallocate every time through. occasionally we
+ // go back to a small allocation so that if there were a spiky growth it won't stick forever.
+ static vector<ParsedJournalEntry> entries;
+ entries.clear();
+/** TEMP uncomment
+ RARELY OCCASIONALLY {
+ if( entries.capacity() > 2048 ) {
+ entries.shrink_to_fit();
+ entries.reserve(2048);
+ }
+ }
+*/
+
+ // first read all entries to make sure this section is valid
+ ParsedJournalEntry e;
+ while( !i->atEof() ) {
+ i->next(e);
+ entries.push_back(e);
+ }
+
+ // after the entries check the footer checksum
+ if( _recovering ) {
+ assert( ((const char *)h) + sizeof(JSectHeader) == p );
+ if( !f->checkHash(h, len + sizeof(JSectHeader)) ) {
+ msgasserted(13594, "journal checksum doesn't match");
+ }
+ }
+
+ // got all the entries for one group commit. apply them:
+ applyEntries(entries);
+ }
+
+ /** apply a specific journal file, that is already mmap'd
+ @param p start of the memory mapped file
+ @return true if this is detected to be the last file (ends abruptly)
+ */
+ bool RecoveryJob::processFileBuffer(const void *p, unsigned len) {
+ try {
+ unsigned long long fileId;
+ BufReader br(p,len);
+
+ {
+ // read file header
+ JHeader h;
+ br.read(h);
+
+ /* [dm] not automatically handled. we should eventually handle this automatically. i think:
+ (1) if this is the final journal file
+ (2) and the file size is just the file header in length (or less) -- this is a bit tricky to determine if prealloced
+ then can just assume recovery ended cleanly and not error out (still should log).
+ */
+ uassert(13537,
+ "journal file header invalid. This could indicate corruption in a journal file, or perhaps a crash where sectors in file header were in flight written out of order at time of crash (unlikely but possible).",
+ h.valid());
+
+ if( !h.versionOk() ) {
+ log() << "journal file version number mismatch got:" << hex << h._version
+ << " expected:" << hex << (unsigned) JHeader::CurrentVersion
+ << ". if you have just upgraded, recover with old version of mongod, terminate cleanly, then upgrade."
+ << endl;
+ uasserted(13536, str::stream() << "journal version number mismatch " << h._version);
+ }
+ fileId = h.fileId;
+ if(cmdLine.durOptions & CmdLine::DurDumpJournal) {
+ log() << "JHeader::fileId=" << fileId << endl;
+ }
+ }
+
+ // read sections
+ while ( !br.atEof() ) {
+ JSectHeader h;
+ br.peek(h);
+ if( h.fileId != fileId ) {
+ if( debug || (cmdLine.durOptions & CmdLine::DurDumpJournal) ) {
+ log() << "Ending processFileBuffer at differing fileId want:" << fileId << " got:" << h.fileId << endl;
+ log() << " sect len:" << h.sectionLen() << " seqnum:" << h.seqNumber << endl;
+ }
+ return true;
+ }
+ unsigned slen = h.sectionLen();
+ unsigned dataLen = slen - sizeof(JSectHeader) - sizeof(JSectFooter);
+ const char *hdr = (const char *) br.skip(h.sectionLenWithPadding());
+ const char *data = hdr + sizeof(JSectHeader);
+ const char *footer = data + dataLen;
+ processSection((const JSectHeader*) hdr, data, dataLen, (const JSectFooter*) footer);
+
+ // ctrl c check
+ killCurrentOp.checkForInterrupt(false);
+ }
+ }
+ catch( BufReader::eof& ) {
+ if( cmdLine.durOptions & CmdLine::DurDumpJournal )
+ log() << "ABRUPT END" << endl;
+ return true; // abrupt end
+ }
+
+ return false; // non-abrupt end
+ }
+
+ /** apply a specific journal file */
+ bool RecoveryJob::processFile(path journalfile) {
+ log() << "recover " << journalfile.string() << endl;
+
+ try {
+ if( boost::filesystem::file_size( journalfile.string() ) == 0 ) {
+ log() << "recover info " << journalfile.string() << " has zero length" << endl;
+ return true;
+ }
+ } catch(...) {
+ // if something weird like a permissions problem keep going so the massert down below can happen (presumably)
+ log() << "recover exception checking filesize" << endl;
+ }
+
+ MemoryMappedFile f;
+ void *p = f.mapWithOptions(journalfile.string().c_str(), MongoFile::READONLY | MongoFile::SEQUENTIAL);
+ massert(13544, str::stream() << "recover error couldn't open " << journalfile.string(), p);
+ return processFileBuffer(p, (unsigned) f.length());
+ }
+
+ /** @param files all the j._0 style files we need to apply for recovery */
+ void RecoveryJob::go(vector<path>& files) {
+ log() << "recover begin" << endl;
+ _recovering = true;
+
+ // load the last sequence number synced to the datafiles on disk before the last crash
+ _lastDataSyncedFromLastRun = journalReadLSN();
+ log() << "recover lsn: " << _lastDataSyncedFromLastRun << endl;
+
+ for( unsigned i = 0; i != files.size(); ++i ) {
+ bool abruptEnd = processFile(files[i]);
+ if( abruptEnd && i+1 < files.size() ) {
+ log() << "recover error: abrupt end to file " << files[i].string() << ", yet it isn't the last journal file" << endl;
+ close();
+ uasserted(13535, "recover abrupt journal file end");
+ }
+ }
+
+ close();
+
+ if( cmdLine.durOptions & CmdLine::DurScanOnly ) {
+ uasserted(13545, str::stream() << "--durOptions " << (int) CmdLine::DurScanOnly << " (scan only) specified");
+ }
+
+ log() << "recover cleaning up" << endl;
+ removeJournalFiles();
+ log() << "recover done" << endl;
+ okToCleanUp = true;
+ _recovering = false;
+ }
+
+ void _recover() {
+ assert( cmdLine.dur );
+
+ boost::filesystem::path p = getJournalDir();
+ if( !exists(p) ) {
+ log() << "directory " << p.string() << " does not exist, there will be no recovery startup step" << endl;
+ okToCleanUp = true;
+ return;
+ }
+
+ vector<path> journalFiles;
+ getFiles(p, journalFiles);
+
+ if( journalFiles.empty() ) {
+ log() << "recover : no journal files present, no recovery needed" << endl;
+ okToCleanUp = true;
+ return;
+ }
+
+ RecoveryJob::get().go(journalFiles);
+ }
+
+ extern mutex groupCommitMutex;
+
+ /** recover from a crash
+ called during startup
+ throws on error
+ */
+ void recover() {
+ // we use a lock so that exitCleanly will wait for us
+ // to finish (or at least to notice what is up and stop)
+ writelock lk;
+
+ // this is so the mutexdebugger doesn't get confused. we are actually single threaded
+ // at this point in the program so it wouldn't have been a true problem (I think)
+ scoped_lock lk2(groupCommitMutex);
+
+ _recover(); // throws on interruption
+ }
+
+ struct BufReaderY { int a,b; };
+ class BufReaderUnitTest : public UnitTest {
+ public:
+ void run() {
+ BufReader r((void*) "abcdabcdabcd", 12);
+ char x;
+ BufReaderY y;
+ r.read(x); //cout << x; // a
+ assert( x == 'a' );
+ r.read(y);
+ r.read(x);
+ assert( x == 'b' );
+ }
+ } brunittest;
+
+ // can't free at termination because order of destruction of global vars is arbitrary
+ RecoveryJob &RecoveryJob::_instance = *(new RecoveryJob());
+
+ } // namespace dur
+
+} // namespace mongo
+
diff --git a/src/mongo/db/dur_recover.h b/src/mongo/db/dur_recover.h
new file mode 100644
index 00000000000..955e730ea05
--- /dev/null
+++ b/src/mongo/db/dur_recover.h
@@ -0,0 +1,50 @@
+// @file dur.h durability support
+
+#pragma once
+
+#include "dur_journalformat.h"
+#include "../util/concurrency/mutex.h"
+#include "../util/file.h"
+
+namespace mongo {
+ class MongoMMF;
+
+ namespace dur {
+ struct ParsedJournalEntry;
+
+ /** call go() to execute a recovery from existing journal files.
+ */
+ class RecoveryJob : boost::noncopyable {
+ public:
+ RecoveryJob() : _lastDataSyncedFromLastRun(0),
+ _mx("recovery"), _recovering(false) { _lastSeqMentionedInConsoleLog = 1; }
+ void go(vector<path>& files);
+ ~RecoveryJob();
+
+ /** @param data data between header and footer. compressed if recovering. */
+ void processSection(const JSectHeader *h, const void *data, unsigned len, const JSectFooter *f);
+
+ void close(); // locks and calls _close()
+
+ static RecoveryJob & get() { return _instance; }
+ private:
+ void write(const ParsedJournalEntry& entry); // actually writes to the file
+ void applyEntry(const ParsedJournalEntry& entry, bool apply, bool dump);
+ void applyEntries(const vector<ParsedJournalEntry> &entries);
+ bool processFileBuffer(const void *, unsigned len);
+ bool processFile(path journalfile);
+ void _close(); // doesn't lock
+
+ list<boost::shared_ptr<MongoMMF> > _mmfs;
+
+ unsigned long long _lastDataSyncedFromLastRun;
+ unsigned long long _lastSeqMentionedInConsoleLog;
+ public:
+ mongo::mutex _mx; // protects _mmfs; see setNoJournal() too
+ private:
+ bool _recovering; // are we in recovery or WRITETODATAFILES
+
+ static RecoveryJob &_instance;
+ };
+ }
+}
diff --git a/src/mongo/db/dur_stats.h b/src/mongo/db/dur_stats.h
new file mode 100644
index 00000000000..50a26d1f215
--- /dev/null
+++ b/src/mongo/db/dur_stats.h
@@ -0,0 +1,49 @@
+// @file dur_stats.h
+
+namespace mongo {
+ namespace dur {
+
+ /** journaling stats. the model here is that the commit thread is the only writer, and that reads are
+ uncommon (from a serverStatus command and such). Thus, there should not be multicore chatter overhead.
+ */
+ struct Stats {
+ Stats();
+ void rotate();
+ BSONObj asObj();
+ unsigned _intervalMicros;
+ struct S {
+ BSONObj _asObj();
+ string _asCSV();
+ string _CSVHeader();
+ void reset();
+
+ unsigned _commits;
+ unsigned _earlyCommits; // count of early commits from commitIfNeeded() or from getDur().commitNow()
+ unsigned long long _journaledBytes;
+ unsigned long long _uncompressedBytes;
+ unsigned long long _writeToDataFilesBytes;
+
+ unsigned long long _prepLogBufferMicros;
+ unsigned long long _writeToJournalMicros;
+ unsigned long long _writeToDataFilesMicros;
+ unsigned long long _remapPrivateViewMicros;
+
+ // undesirable to be in write lock for the group commit (it can be done in a read lock), so good if we
+ // have visibility when this happens. can happen for a couple reasons
+ // - read lock starvation
+ // - file being closed
+ // - data being written faster than the normal group commit interval
+ unsigned _commitsInWriteLock;
+
+ unsigned _dtMillis;
+ };
+ S *curr;
+ private:
+ S _a,_b;
+ unsigned long long _lastRotate;
+ S* other();
+ };
+ extern Stats stats;
+
+ }
+}
diff --git a/src/mongo/db/dur_writetodatafiles.cpp b/src/mongo/db/dur_writetodatafiles.cpp
new file mode 100644
index 00000000000..d77b0482c20
--- /dev/null
+++ b/src/mongo/db/dur_writetodatafiles.cpp
@@ -0,0 +1,94 @@
+// @file dur_writetodatafiles.cpp apply the writes back to the non-private MMF after they are for certain in redo log
+
+/**
+* Copyright (C) 2009 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "dur_commitjob.h"
+#include "dur_stats.h"
+#include "dur_recover.h"
+#include "../util/timer.h"
+
+namespace mongo {
+ namespace dur {
+
+ void debugValidateAllMapsMatch();
+
+ static void WRITETODATAFILES_Impl1(const JSectHeader& h, AlignedBuilder& uncompressed) {
+ LockMongoFilesShared lk;
+ LOG(3) << "journal WRITETODATAFILES 1" << endl;
+ RecoveryJob::get().processSection(&h, uncompressed.buf(), uncompressed.len(), 0);
+ LOG(3) << "journal WRITETODATAFILES 2" << endl;
+ }
+
+#if 0
+ // the old implementation. doesn't work with groupCommitWithLimitedLocks()
+ void WRITETODATAFILES_Impl2() {
+ /* we go backwards as what is at the end is most likely in the cpu cache. it won't be much, but we'll take it. */
+ for( set<WriteIntent>::const_iterator it(commitJob.writes().begin()), end(commitJob.writes().end()); it != end; ++it ) {
+ const WriteIntent& intent = *it;
+ stats.curr->_writeToDataFilesBytes += intent.length();
+ dassert(intent.w_ptr);
+ memcpy(intent.w_ptr, intent.start(), intent.length());
+ }
+ }
+#endif
+
+#if defined(_EXPERIMENTAL)
+ // doesn't work with groupCommitWithLimitedLocks()
+ void WRITETODATAFILES_Impl3() {
+ /* we go backwards as what is at the end is most likely in the cpu cache. it won't be much, but we'll take it. */
+ for( set<WriteIntent>::const_iterator it(commitJob.writes().begin()), end(commitJob.writes().end()); it != end; ++it ) {
+ const WriteIntent& intent = *it;
+ stats.curr->_writeToDataFilesBytes += intent.length();
+ dassert(intent.w_ptr);
+ memcpy(intent.w_ptr,
+ commitJob._ab.atOfs(intent.ofsInJournalBuffer),
+ intent.length());
+ }
+ }
+#endif
+
+ /** apply the writes back to the non-private MMF after they are for certain in redo log
+
+ (1) todo we don't need to write back everything every group commit. we MUST write back
+ that which is going to be a remapped on its private view - but that might not be all
+ views.
+
+ (2) todo should we do this using N threads? would be quite easy
+ see Hackenberg paper table 5 and 6. 2 threads might be a good balance.
+
+ (3) with enough work, we could do this outside the read lock. it's a bit tricky though.
+ - we couldn't do it from the private views then as they may be changing. would have to then
+ be from the journal alignedbuffer.
+ - we need to be careful the file isn't unmapped on us -- perhaps a mutex or something
+ with MongoMMF on closes or something to coordinate that.
+
+ concurrency: in mmmutex, not necessarily in dbMutex
+
+ @see https://docs.google.com/drawings/edit?id=1TklsmZzm7ohIZkwgeK6rMvsdaR13KjtJYMsfLr175Zc&hl=en
+ */
+
+ void WRITETODATAFILES(const JSectHeader& h, AlignedBuilder& uncompressed) {
+ Timer t;
+ WRITETODATAFILES_Impl1(h, uncompressed);
+ unsigned long long m = t.micros();
+ stats.curr->_writeToDataFilesMicros += m;
+ LOG(2) << "journal WRITETODATAFILES " << m / 1000.0 << "ms" << endl;
+ }
+
+ }
+}
diff --git a/src/mongo/db/durop.cpp b/src/mongo/db/durop.cpp
new file mode 100644
index 00000000000..80ee5043410
--- /dev/null
+++ b/src/mongo/db/durop.cpp
@@ -0,0 +1,161 @@
+// @file durop.cpp
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "concurrency.h"
+#include "../util/alignedbuilder.h"
+#include "../util/mongoutils/str.h"
+#include "../util/file.h"
+#include "mongommf.h"
+#include "durop.h"
+#include "../util/file_allocator.h"
+
+using namespace mongoutils;
+
+namespace mongo {
+
+ extern string dbpath; // --dbpath parm
+
+ void _deleteDataFiles(const char *);
+
+ namespace dur {
+
+ /** read a durop from journal file referenced by br.
+ @param opcode the opcode which has already been written from the bufreader
+ */
+ shared_ptr<DurOp> DurOp::read(unsigned opcode, BufReader& br) {
+ shared_ptr<DurOp> op;
+ switch( opcode ) {
+ case JEntry::OpCode_FileCreated:
+ op = shared_ptr<DurOp>( new FileCreatedOp(br) );
+ break;
+ case JEntry::OpCode_DropDb:
+ op = shared_ptr<DurOp>( new DropDbOp(br) );
+ break;
+ default:
+ massert(13546, (str::stream() << "journal recover: unrecognized opcode in journal " << opcode), false);
+ }
+ return op;
+ }
+
+ void DurOp::serialize(AlignedBuilder& ab) {
+ ab.appendNum(_opcode);
+ _serialize(ab);
+ }
+
+ DropDbOp::DropDbOp(BufReader& log) : DurOp(JEntry::OpCode_DropDb) {
+ unsigned long long reserved;
+ log.read(reserved);
+ log.read(reserved);
+ log.readStr(_db);
+ string reservedStr;
+ log.readStr(reservedStr);
+ }
+
+ void DropDbOp::_serialize(AlignedBuilder& ab) {
+ ab.appendNum((unsigned long long) 0); // reserved for future use
+ ab.appendNum((unsigned long long) 0); // reserved for future use
+ ab.appendStr(_db);
+ ab.appendStr(""); // reserved
+ }
+
+ /** throws */
+ void DropDbOp::replay() {
+ log() << "recover replay drop db " << _db << endl;
+ _deleteDataFiles(_db.c_str());
+ }
+
+ FileCreatedOp::FileCreatedOp(string f, unsigned long long l) :
+ DurOp(JEntry::OpCode_FileCreated) {
+ _p = RelativePath::fromFullPath(f);
+ _len = l;
+ }
+
+ FileCreatedOp::FileCreatedOp(BufReader& log) : DurOp(JEntry::OpCode_FileCreated) {
+ unsigned long long reserved;
+ log.read(reserved);
+ log.read(reserved);
+ log.read(_len); // size of file, not length of name
+ string s;
+ log.readStr(s);
+ _p._p = s;
+ }
+
+ void FileCreatedOp::_serialize(AlignedBuilder& ab) {
+ ab.appendNum((unsigned long long) 0); // reserved for future use
+ ab.appendNum((unsigned long long) 0); // reserved for future use
+ ab.appendNum(_len);
+ ab.appendStr(_p.toString());
+ }
+
+ string FileCreatedOp::toString() {
+ return str::stream() << "FileCreatedOp " << _p.toString() << ' ' << _len/1024.0/1024.0 << "MB";
+ }
+
+ // if an operation deletes or creates a file (or moves etc.), it may need files closed.
+ bool FileCreatedOp::needFilesClosed() {
+ return exists( _p.asFullPath() );
+ }
+
+ void FileCreatedOp::replay() {
+ // i believe the code assumes new files are filled with zeros. thus we have to recreate the file,
+ // or rewrite at least, even if it were the right length. perhaps one day we should change that
+ // although easier to avoid defects if we assume it is zeros perhaps.
+ string full = _p.asFullPath();
+ if( exists(full) ) {
+ try {
+ remove(full);
+ }
+ catch(std::exception& e) {
+ log(1) << "recover info FileCreateOp::replay unlink " << e.what() << endl;
+ }
+ }
+
+ log() << "recover create file " << full << ' ' << _len/1024.0/1024.0 << "MB" << endl;
+ if( MemoryMappedFile::exists(full) ) {
+ // first delete if exists.
+ try {
+ remove(full);
+ }
+ catch(...) {
+ log() << "warning could not delete file " << full << endl;
+ }
+ }
+ ensureParentDirCreated(full);
+ File f;
+ f.open(full.c_str());
+ massert(13547, str::stream() << "recover couldn't create file " << full, f.is_open());
+ unsigned long long left = _len;
+ const unsigned blksz = 64 * 1024;
+ scoped_array<char> v( new char[blksz] );
+ memset( v.get(), 0, blksz );
+ fileofs ofs = 0;
+ while( left ) {
+ unsigned long long w = left < blksz ? left : blksz;
+ f.write(ofs, v.get(), (unsigned) w);
+ left -= w;
+ ofs += w;
+ }
+ f.fsync();
+ flushMyDirectory(full);
+ massert(13628, str::stream() << "recover failure writing file " << full, !f.bad() );
+ }
+
+ }
+
+}
diff --git a/src/mongo/db/durop.h b/src/mongo/db/durop.h
new file mode 100644
index 00000000000..9ab1bfcbede
--- /dev/null
+++ b/src/mongo/db/durop.h
@@ -0,0 +1,109 @@
+// @file durop.h class DurOp and descendants
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "dur_journalformat.h"
+#include "../util/bufreader.h"
+#include "../util/paths.h"
+
+namespace mongo {
+
+ class AlignedBuilder;
+
+ namespace dur {
+
+ /** DurOp - Operations we journal that aren't just basic writes.
+ *
+ * Basic writes are logged as JEntry's, and indicated in ram temporarily as struct dur::WriteIntent.
+ * We don't make WriteIntent inherit from DurOp to keep it as lean as possible as there will be millions of
+ * them (we don't want a vtable for example there).
+ *
+ * For each op we want to journal, we define a subclass.
+ */
+ class DurOp { /* copyable */
+ public:
+ // @param opcode a sentinel value near max unsigned which uniquely identifies the operation.
+ // @see dur::JEntry
+ DurOp(unsigned opcode) : _opcode(opcode) { }
+
+ virtual ~DurOp() { }
+
+ /** serialize the op out to a builder which will then be written (presumably) to the journal */
+ void serialize(AlignedBuilder& ab);
+
+ /** read a durop from journal file referenced by br.
+ @param opcode the opcode which has already been written from the bufreader
+ */
+ static shared_ptr<DurOp> read(unsigned opcode, BufReader& br);
+
+ /** replay the operation (during recovery)
+ throws
+
+ For now, these are not replayed during the normal WRITETODATAFILES phase, since these
+ operations are handled in other parts of the code. At some point this may change.
+ */
+ virtual void replay() = 0;
+
+ virtual string toString() = 0;
+
+ /** if the op requires all file to be closed before doing its work, returns true. */
+ virtual bool needFilesClosed() { return false; }
+
+ protected:
+ /** DurOp will have already written the opcode for you */
+ virtual void _serialize(AlignedBuilder& ab) = 0;
+
+ private:
+ const unsigned _opcode;
+ };
+
+ /** indicates creation of a new file */
+ class FileCreatedOp : public DurOp {
+ public:
+ FileCreatedOp(BufReader& log);
+ /** param f filename to create with path */
+ FileCreatedOp(string f, unsigned long long l);
+ virtual void replay();
+ virtual string toString();
+ virtual bool needFilesClosed();
+ protected:
+ virtual void _serialize(AlignedBuilder& ab);
+ private:
+ RelativePath _p;
+ unsigned long long _len; // size of file, not length of name
+ };
+
+ /** record drop of a database */
+ class DropDbOp : public DurOp {
+ public:
+ DropDbOp(BufReader& log);
+ DropDbOp(string db) :
+ DurOp(JEntry::OpCode_DropDb), _db(db) { }
+ virtual void replay();
+ virtual string toString() { return string("DropDbOp ") + _db; }
+ virtual bool needFilesClosed() { return true; }
+ protected:
+ virtual void _serialize(AlignedBuilder& ab);
+ private:
+ string _db;
+ };
+
+ }
+
+}
diff --git a/src/mongo/db/extsort.cpp b/src/mongo/db/extsort.cpp
new file mode 100644
index 00000000000..06a9756cc0a
--- /dev/null
+++ b/src/mongo/db/extsort.cpp
@@ -0,0 +1,245 @@
+// extsort.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "extsort.h"
+#include "namespace-inl.h"
+#include "../util/file.h"
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+namespace mongo {
+
+ IndexInterface *BSONObjExternalSorter::extSortIdxInterface;
+ Ordering BSONObjExternalSorter::extSortOrder( Ordering::make(BSONObj()) );
+ unsigned long long BSONObjExternalSorter::_compares = 0;
+
+ BSONObjExternalSorter::BSONObjExternalSorter( IndexInterface &i, const BSONObj & order , long maxFileSize )
+ : _idxi(i), _order( order.getOwned() ) , _maxFilesize( maxFileSize ) ,
+ _arraySize(1000000), _cur(0), _curSizeSoFar(0), _sorted(0) {
+
+ stringstream rootpath;
+ rootpath << dbpath;
+ if ( dbpath[dbpath.size()-1] != '/' )
+ rootpath << "/";
+ rootpath << "_tmp/esort." << time(0) << "." << rand() << "/";
+ _root = rootpath.str();
+
+ log(1) << "external sort root: " << _root.string() << endl;
+
+ create_directories( _root );
+ _compares = 0;
+ }
+
+ BSONObjExternalSorter::~BSONObjExternalSorter() {
+ if ( _cur ) {
+ delete _cur;
+ _cur = 0;
+ }
+ unsigned long removed = remove_all( _root );
+ wassert( removed == 1 + _files.size() );
+ }
+
+ void BSONObjExternalSorter::_sortInMem() {
+ // extSortComp needs to use glbals
+ // qsort_r only seems available on bsd, which is what i really want to use
+ dblock l;
+ extSortIdxInterface = &_idxi;
+ extSortOrder = Ordering::make(_order);
+ _cur->sort( BSONObjExternalSorter::extSortComp );
+ }
+
+ void BSONObjExternalSorter::sort() {
+ uassert( 10048 , "already sorted" , ! _sorted );
+
+ _sorted = true;
+
+ if ( _cur && _files.size() == 0 ) {
+ _sortInMem();
+ log(1) << "\t\t not using file. size:" << _curSizeSoFar << " _compares:" << _compares << endl;
+ return;
+ }
+
+ if ( _cur ) {
+ finishMap();
+ }
+
+ if ( _cur ) {
+ delete _cur;
+ _cur = 0;
+ }
+
+ if ( _files.size() == 0 )
+ return;
+
+ }
+
+ void BSONObjExternalSorter::add( const BSONObj& o , const DiskLoc & loc ) {
+ uassert( 10049 , "sorted already" , ! _sorted );
+
+ if ( ! _cur ) {
+ _cur = new InMemory( _arraySize );
+ }
+
+ Data& d = _cur->getNext();
+ d.first = o.getOwned();
+ d.second = loc;
+
+ long size = o.objsize();
+ _curSizeSoFar += size + sizeof( DiskLoc ) + sizeof( BSONObj );
+
+ if ( _cur->hasSpace() == false || _curSizeSoFar > _maxFilesize ) {
+ finishMap();
+ log(1) << "finishing map" << endl;
+ }
+
+ }
+
+ void BSONObjExternalSorter::finishMap() {
+ uassert( 10050 , "bad" , _cur );
+
+ _curSizeSoFar = 0;
+ if ( _cur->size() == 0 )
+ return;
+
+ _sortInMem();
+
+ stringstream ss;
+ ss << _root.string() << "/file." << _files.size();
+ string file = ss.str();
+
+ // todo: it may make sense to fadvise that this not be cached so that building the index doesn't
+ // eject other things the db is using from the file system cache. while we will soon be reading
+ // this back, if it fit in ram, there wouldn't have been a need for an external sort in the first
+ // place.
+
+ ofstream out;
+ out.open( file.c_str() , ios_base::out | ios_base::binary );
+ assertStreamGood( 10051 , (string)"couldn't open file: " + file , out );
+
+ int num = 0;
+ for ( InMemory::iterator i=_cur->begin(); i != _cur->end(); ++i ) {
+ Data p = *i;
+ out.write( p.first.objdata() , p.first.objsize() );
+ out.write( (char*)(&p.second) , sizeof( DiskLoc ) );
+ num++;
+ }
+
+ _cur->clear();
+
+ _files.push_back( file );
+ out.close();
+
+ log(2) << "Added file: " << file << " with " << num << "objects for external sort" << endl;
+ }
+
+ // ---------------------------------
+
+ BSONObjExternalSorter::Iterator::Iterator( BSONObjExternalSorter * sorter ) :
+ _cmp( sorter->_idxi, sorter->_order ) , _in( 0 ) {
+
+ for ( list<string>::iterator i=sorter->_files.begin(); i!=sorter->_files.end(); i++ ) {
+ _files.push_back( new FileIterator( *i ) );
+ _stash.push_back( pair<Data,bool>( Data( BSONObj() , DiskLoc() ) , false ) );
+ }
+
+ if ( _files.size() == 0 && sorter->_cur ) {
+ _in = sorter->_cur;
+ _it = sorter->_cur->begin();
+ }
+ }
+
+ BSONObjExternalSorter::Iterator::~Iterator() {
+ for ( vector<FileIterator*>::iterator i=_files.begin(); i!=_files.end(); i++ )
+ delete *i;
+ _files.clear();
+ }
+
+ bool BSONObjExternalSorter::Iterator::more() {
+
+ if ( _in )
+ return _it != _in->end();
+
+ for ( vector<FileIterator*>::iterator i=_files.begin(); i!=_files.end(); i++ )
+ if ( (*i)->more() )
+ return true;
+ for ( vector< pair<Data,bool> >::iterator i=_stash.begin(); i!=_stash.end(); i++ )
+ if ( i->second )
+ return true;
+ return false;
+ }
+
+ BSONObjExternalSorter::Data BSONObjExternalSorter::Iterator::next() {
+
+ if ( _in ) {
+ Data& d = *_it;
+ ++_it;
+ return d;
+ }
+
+ Data best;
+ int slot = -1;
+
+ for ( unsigned i=0; i<_stash.size(); i++ ) {
+
+ if ( ! _stash[i].second ) {
+ if ( _files[i]->more() )
+ _stash[i] = pair<Data,bool>( _files[i]->next() , true );
+ else
+ continue;
+ }
+
+ if ( slot == -1 || _cmp( best , _stash[i].first ) == 0 ) {
+ best = _stash[i].first;
+ slot = i;
+ }
+
+ }
+
+ assert( slot >= 0 );
+ _stash[slot].second = false;
+
+ return best;
+ }
+
+ // -----------------------------------
+
+ BSONObjExternalSorter::FileIterator::FileIterator( string file ) {
+ unsigned long long length;
+ _buf = (char*)_file.map( file.c_str() , length , MemoryMappedFile::SEQUENTIAL );
+ massert( 10308 , "mmap failed" , _buf );
+ assert( length == (unsigned long long) file_size( file ) );
+ _end = _buf + length;
+ }
+ BSONObjExternalSorter::FileIterator::~FileIterator() {}
+
+ bool BSONObjExternalSorter::FileIterator::more() {
+ return _buf < _end;
+ }
+
+ BSONObjExternalSorter::Data BSONObjExternalSorter::FileIterator::next() {
+ BSONObj o( _buf );
+ _buf += o.objsize();
+ DiskLoc * l = (DiskLoc*)_buf;
+ _buf += 8;
+ return Data( o , *l );
+ }
+
+}
diff --git a/src/mongo/db/extsort.h b/src/mongo/db/extsort.h
new file mode 100644
index 00000000000..15a6d441849
--- /dev/null
+++ b/src/mongo/db/extsort.h
@@ -0,0 +1,150 @@
+// extsort.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "jsobj.h"
+#include "namespace-inl.h"
+#include "curop-inl.h"
+#include "../util/array.h"
+
+namespace mongo {
+
+ /**
+ for external (disk) sorting by BSONObj and attaching a value
+ */
+ class BSONObjExternalSorter : boost::noncopyable {
+ public:
+ BSONObjExternalSorter( IndexInterface &i, const BSONObj & order = BSONObj() , long maxFileSize = 1024 * 1024 * 100 );
+ ~BSONObjExternalSorter();
+ typedef pair<BSONObj,DiskLoc> Data;
+
+ private:
+ IndexInterface& _idxi;
+
+ static int _compare(IndexInterface& i, const Data& l, const Data& r, const Ordering& order) {
+ RARELY killCurrentOp.checkForInterrupt();
+ _compares++;
+ int x = i.keyCompare(l.first, r.first, order);
+ if ( x )
+ return x;
+ return l.second.compare( r.second );
+ }
+
+ class MyCmp {
+ public:
+ MyCmp( IndexInterface& i, BSONObj order = BSONObj() ) : _i(i), _order( Ordering::make(order) ) {}
+ bool operator()( const Data &l, const Data &r ) const {
+ return _compare(_i, l, r, _order) < 0;
+ };
+ private:
+ IndexInterface& _i;
+ const Ordering _order;
+ };
+
+ static IndexInterface *extSortIdxInterface;
+ static Ordering extSortOrder;
+ static int extSortComp( const void *lv, const void *rv ) {
+ DEV RARELY {
+ d.dbMutex.assertWriteLocked(); // must be as we use a global var
+ }
+ Data * l = (Data*)lv;
+ Data * r = (Data*)rv;
+ return _compare(*extSortIdxInterface, *l, *r, extSortOrder);
+ };
+
+ class FileIterator : boost::noncopyable {
+ public:
+ FileIterator( string file );
+ ~FileIterator();
+ bool more();
+ Data next();
+ private:
+ MemoryMappedFile _file;
+ char * _buf;
+ char * _end;
+ };
+
+ public:
+
+ typedef FastArray<Data> InMemory;
+
+ class Iterator : boost::noncopyable {
+ public:
+
+ Iterator( BSONObjExternalSorter * sorter );
+ ~Iterator();
+ bool more();
+ Data next();
+
+ private:
+ MyCmp _cmp;
+ vector<FileIterator*> _files;
+ vector< pair<Data,bool> > _stash;
+
+ InMemory * _in;
+ InMemory::iterator _it;
+
+ };
+
+ void add( const BSONObj& o , const DiskLoc & loc );
+ void add( const BSONObj& o , int a , int b ) {
+ add( o , DiskLoc( a , b ) );
+ }
+
+ /* call after adding values, and before fetching the iterator */
+ void sort();
+
+ auto_ptr<Iterator> iterator() {
+ uassert( 10052 , "not sorted" , _sorted );
+ return auto_ptr<Iterator>( new Iterator( this ) );
+ }
+
+ int numFiles() {
+ return _files.size();
+ }
+
+ long getCurSizeSoFar() { return _curSizeSoFar; }
+
+ void hintNumObjects( long long numObjects ) {
+ if ( numObjects < _arraySize )
+ _arraySize = (int)(numObjects + 100);
+ }
+
+ private:
+
+ void _sortInMem();
+
+ void sort( string file );
+ void finishMap();
+
+ BSONObj _order;
+ long _maxFilesize;
+ path _root;
+
+ int _arraySize;
+ InMemory * _cur;
+ long _curSizeSoFar;
+
+ list<string> _files;
+ bool _sorted;
+
+ static unsigned long long _compares;
+ };
+}
diff --git a/src/mongo/db/filever.h b/src/mongo/db/filever.h
new file mode 100644
index 00000000000..e89a8243dcf
--- /dev/null
+++ b/src/mongo/db/filever.h
@@ -0,0 +1,30 @@
+/* filever.h */
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+namespace mongo {
+
+ inline void checkDataFileVersion(NamespaceDetails& d) {
+ }
+
+ inline void checkIndexFileVersion(NamespaceDetails& d) {
+ }
+
+}
+
diff --git a/src/mongo/db/flushtest.cpp b/src/mongo/db/flushtest.cpp
new file mode 100644
index 00000000000..2009d922950
--- /dev/null
+++ b/src/mongo/db/flushtest.cpp
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include <stdio.h>
+#include "../util/goodies.h"
+#include <fcntl.h>
+
+namespace mongo {
+
+#if defined(F_FULLFSYNC)
+ void fullsync(int f) {
+ fcntl( f, F_FULLFSYNC );
+ }
+#else
+ void fullsync(int f) {
+ fdatasync(f);
+ }
+#endif
+
+ int main(int argc, char* argv[], char *envp[] ) {
+ cout << "hello" << endl;
+
+ FILE *f = fopen("/data/db/temptest", "a");
+
+ if ( f == 0 ) {
+ cout << "can't open file\n";
+ return 1;
+ }
+
+ {
+ Timer t;
+ for ( int i = 0; i < 50000; i++ )
+ fwrite("abc", 3, 1, f);
+ cout << "small writes: " << t.millis() << "ms" << endl;
+ }
+
+ {
+ Timer t;
+ for ( int i = 0; i < 10000; i++ ) {
+ fwrite("abc", 3, 1, f);
+ fflush(f);
+ fsync( fileno( f ) );
+ }
+ int ms = t.millis();
+ cout << "flush: " << ms << "ms, " << ms / 10000.0 << "ms/request" << endl;
+ }
+
+ {
+ Timer t;
+ for ( int i = 0; i < 500; i++ ) {
+ fwrite("abc", 3, 1, f);
+ fflush(f);
+ fsync( fileno( f ) );
+ sleepmillis(2);
+ }
+ int ms = t.millis() - 500 * 2;
+ cout << "flush with sleeps: " << ms << "ms, " << ms / 500.0 << "ms/request" << endl;
+ }
+
+ char buf[8192];
+ for ( int pass = 0; pass < 2; pass++ ) {
+ cout << "pass " << pass << endl;
+ {
+ Timer t;
+ int n = 500;
+ for ( int i = 0; i < n; i++ ) {
+ if ( pass == 0 )
+ fwrite("abc", 3, 1, f);
+ else
+ fwrite(buf, 8192, 1, f);
+ buf[0]++;
+ fflush(f);
+ fullsync(fileno(f));
+ }
+ int ms = t.millis();
+ cout << "fullsync: " << ms << "ms, " << ms / ((double) n) << "ms/request" << endl;
+ }
+
+ {
+ Timer t;
+ for ( int i = 0; i < 500; i++ ) {
+ if ( pass == 0 )
+ fwrite("abc", 3, 1, f);
+ else
+ fwrite(buf, 8192, 1, f);
+ buf[0]++;
+ fflush(f);
+ fullsync(fileno(f));
+ sleepmillis(2);
+ }
+ int ms = t.millis() - 2 * 500;
+ cout << "fullsync with sleeps: " << ms << "ms, " << ms / 500.0 << "ms/request" << endl;
+ }
+ }
+
+ // without growing
+ {
+ fclose(f);
+ /* try from beginning of the file, where we aren't appending and changing the file length,
+ to see if this is faster as the directory entry then doesn't have to be flushed (if noatime in effect).
+ */
+ f = fopen("/data/db/temptest", "r+");
+ Timer t;
+ int n = 500;
+ for ( int i = 0; i < n; i++ ) {
+ fwrite("xyz", 3, 1, f);
+ fflush(f);
+ fullsync(fileno(f));
+ }
+ int ms = t.millis();
+ cout << "fullsync without growing: " << ms << "ms, " << ms / ((double) n) << "ms/request" << endl;
+ }
+
+ // without growing, with delay
+ {
+ fclose(f);
+ /* try from beginning of the file, where we aren't appending and changing the file length,
+ to see if this is faster as the directory entry then doesn't have to be flushed (if noatime in effect).
+ */
+ f = fopen("/data/db/temptest", "r+");
+ Timer t;
+ int n = 500;
+ for ( int i = 0; i < n; i++ ) {
+ fwrite("xyz", 3, 1, f);
+ fflush(f);
+ fullsync(fileno(f));
+ sleepmillis(2);
+ }
+ int ms = t.millis() - 2 * 500;
+ cout << "fullsync without growing with sleeps: " << ms << "ms, " << ms / ((double) n) << "ms/request" << endl;
+ }
+
+ return 0;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/geo/2d.cpp b/src/mongo/db/geo/2d.cpp
new file mode 100644
index 00000000000..f05ce4315b2
--- /dev/null
+++ b/src/mongo/db/geo/2d.cpp
@@ -0,0 +1,3289 @@
+// geo2d.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../namespace-inl.h"
+#include "../jsobj.h"
+#include "../index.h"
+#include "../../util/unittest.h"
+#include "../commands.h"
+#include "../pdfile.h"
+#include "../btree.h"
+#include "../curop-inl.h"
+#include "../matcher.h"
+#include "../queryutil.h"
+#include "core.h"
+#include "../../util/timer.h"
+
+// Note: we use indexinterface herein to talk to the btree code. In the future it would be nice to
+// be able to use the V1 key class (see key.h) instead of toBson() which has some cost.
+// toBson() is new with v1 so this could be slower than it used to be? a quick profiling
+// might make sense.
+
+namespace mongo {
+
+ class GeoKeyNode {
+ GeoKeyNode();
+ public:
+ GeoKeyNode( DiskLoc bucket, int keyOfs, DiskLoc r, BSONObj k) : _bucket( bucket ), _keyOfs( keyOfs ), recordLoc(r), _key(k) { }
+ const DiskLoc _bucket;
+ const int _keyOfs;
+ const DiskLoc recordLoc;
+ const BSONObj _key;
+ };
+
+ // just use old indexes for geo for now. todo.
+// typedef BtreeBucket<V0> GeoBtreeBucket;
+// typedef GeoBtreeBucket::KeyNode GeoKeyNode;
+
+//#define BTREE btree<V0>
+
+#if 0
+# define CDEBUG -1
+#else
+# define CDEBUG 10
+#endif
+
+#if 0
+# define GEODEBUGGING
+# define GEODEBUG(x) cout << x << endl;
+# define GEODEBUGPRINT(x) PRINT(x)
+ inline void PREFIXDEBUG(GeoHash prefix, const GeoConvert* g) {
+ if (!prefix.constrains()) {
+ cout << "\t empty prefix" << endl;
+ return ;
+ }
+
+ Point ll (g, prefix); // lower left
+ prefix.move(1,1);
+ Point tr (g, prefix); // top right
+
+ Point center ( (ll._x+tr._x)/2, (ll._y+tr._y)/2 );
+ double radius = fabs(ll._x - tr._x) / 2;
+
+ cout << "\t ll: " << ll.toString() << " tr: " << tr.toString()
+ << " center: " << center.toString() << " radius: " << radius << endl;
+
+ }
+#else
+# define GEODEBUG(x)
+# define GEODEBUGPRINT(x)
+# define PREFIXDEBUG(x, y)
+#endif
+
+ const double EARTH_RADIUS_KM = 6371;
+ const double EARTH_RADIUS_MILES = EARTH_RADIUS_KM * 0.621371192;
+
+ enum GeoDistType {
+ GEO_PLAIN,
+ GEO_SPHERE
+ };
+
+ inline double computeXScanDistance(double y, double maxDistDegrees) {
+ // TODO: this overestimates for large madDistDegrees far from the equator
+ return maxDistDegrees / min(cos(deg2rad(min(+89.0, y + maxDistDegrees))),
+ cos(deg2rad(max(-89.0, y - maxDistDegrees))));
+ }
+
+ GeoBitSets geoBitSets;
+
+ const string GEO2DNAME = "2d";
+
+ class Geo2dType : public IndexType , public GeoConvert {
+ public:
+ virtual ~Geo2dType() { }
+
+ Geo2dType( const IndexPlugin * plugin , const IndexSpec* spec )
+ : IndexType( plugin , spec ) {
+
+ BSONObjBuilder orderBuilder;
+
+ BSONObjIterator i( spec->keyPattern );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.type() == String && GEO2DNAME == e.valuestr() ) {
+ uassert( 13022 , "can't have 2 geo field" , _geo.size() == 0 );
+ uassert( 13023 , "2d has to be first in index" , _other.size() == 0 );
+ _geo = e.fieldName();
+ }
+ else {
+ _other.push_back( e.fieldName() );
+ }
+ orderBuilder.append( "" , 1 );
+ }
+
+ uassert( 13024 , "no geo field specified" , _geo.size() );
+
+ double bits = _configval( spec , "bits" , 26 ); // for lat/long, ~ 1ft
+
+ uassert( 13028 , "bits in geo index must be between 1 and 32" , bits > 0 && bits <= 32 );
+
+ _bits = (unsigned) bits;
+
+ _max = _configval( spec , "max" , 180.0 );
+ _min = _configval( spec , "min" , -180.0 );
+
+ double numBuckets = (1024 * 1024 * 1024 * 4.0);
+
+ _scaling = numBuckets / ( _max - _min );
+
+ _order = orderBuilder.obj();
+
+ GeoHash a(0, 0, _bits);
+ GeoHash b = a;
+ b.move(1, 1);
+
+ // Epsilon is 1/100th of a bucket size
+ // TODO: Can we actually find error bounds for the sqrt function?
+ double epsilon = 0.001 / _scaling;
+ _error = distance(a, b) + epsilon;
+
+ // Error in radians
+ _errorSphere = deg2rad( _error );
+
+ }
+
+ double _configval( const IndexSpec* spec , const string& name , double def ) {
+ BSONElement e = spec->info[name];
+ if ( e.isNumber() ) {
+ return e.numberDouble();
+ }
+ return def;
+ }
+
+ virtual BSONObj fixKey( const BSONObj& in ) {
+ if ( in.firstElement().type() == BinData )
+ return in;
+
+ BSONObjBuilder b(in.objsize()+16);
+
+ if ( in.firstElement().isABSONObj() )
+ _hash( in.firstElement().embeddedObject() ).append( b , "" );
+ else if ( in.firstElement().type() == String )
+ GeoHash( in.firstElement().valuestr() ).append( b , "" );
+ else if ( in.firstElement().type() == RegEx )
+ GeoHash( in.firstElement().regex() ).append( b , "" );
+ else
+ return in;
+
+ BSONObjIterator i(in);
+ i.next();
+ while ( i.more() )
+ b.append( i.next() );
+ return b.obj();
+ }
+
+ /** Finds the key objects to put in an index */
+ virtual void getKeys( const BSONObj& obj, BSONObjSet& keys ) const {
+ getKeys( obj, &keys, NULL );
+ }
+
+ /** Finds all locations in a geo-indexed object */
+ // TODO: Can we just return references to the locs, if they won't change?
+ void getKeys( const BSONObj& obj, vector< BSONObj >& locs ) const {
+ getKeys( obj, NULL, &locs );
+ }
+
+ /** Finds the key objects and/or locations for a geo-indexed object */
+ void getKeys( const BSONObj &obj, BSONObjSet* keys, vector< BSONObj >* locs ) const {
+
+ BSONElementMSet bSet;
+
+ // Get all the nested location fields, but don't return individual elements from
+ // the last array, if it exists.
+ obj.getFieldsDotted(_geo.c_str(), bSet, false);
+
+ if( bSet.empty() )
+ return;
+
+ for( BSONElementMSet::iterator setI = bSet.begin(); setI != bSet.end(); ++setI ) {
+
+ BSONElement geo = *setI;
+
+ GEODEBUG( "Element " << geo << " found for query " << _geo.c_str() );
+
+ if ( geo.eoo() || ! geo.isABSONObj() )
+ continue;
+
+ //
+ // Grammar for location lookup:
+ // locs ::= [loc,loc,...,loc]|{<k>:loc,<k>:loc,...,<k>:loc}|loc
+ // loc ::= { <k1> : #, <k2> : # }|[#, #]|{}
+ //
+ // Empty locations are ignored, preserving single-location semantics
+ //
+
+ BSONObj embed = geo.embeddedObject();
+ if ( embed.isEmpty() )
+ continue;
+
+ // Differentiate between location arrays and locations
+ // by seeing if the first element value is a number
+ bool singleElement = embed.firstElement().isNumber();
+
+ BSONObjIterator oi(embed);
+
+ while( oi.more() ) {
+
+ BSONObj locObj;
+
+ if( singleElement ) locObj = embed;
+ else {
+ BSONElement locElement = oi.next();
+
+ uassert( 13654, str::stream() << "location object expected, location array not in correct format",
+ locElement.isABSONObj() );
+
+ locObj = locElement.embeddedObject();
+
+ if( locObj.isEmpty() )
+ continue;
+ }
+
+ BSONObjBuilder b(64);
+
+ // Remember the actual location object if needed
+ if( locs )
+ locs->push_back( locObj );
+
+ // Stop if we don't need to get anything but location objects
+ if( ! keys ) {
+ if( singleElement ) break;
+ else continue;
+ }
+
+ _hash( locObj ).append( b , "" );
+
+ // Go through all the other index keys
+ for ( vector<string>::const_iterator i = _other.begin(); i != _other.end(); ++i ) {
+
+ // Get *all* fields for the index key
+ BSONElementSet eSet;
+ obj.getFieldsDotted( *i, eSet );
+
+
+ if ( eSet.size() == 0 )
+ b.appendAs( _spec->missingField(), "" );
+ else if ( eSet.size() == 1 )
+ b.appendAs( *(eSet.begin()), "" );
+ else {
+
+ // If we have more than one key, store as an array of the objects
+
+ BSONArrayBuilder aBuilder;
+
+ for( BSONElementSet::iterator ei = eSet.begin(); ei != eSet.end(); ++ei ) {
+ aBuilder.append( *ei );
+ }
+
+ BSONArray arr = aBuilder.arr();
+
+ b.append( "", arr );
+
+ }
+
+ }
+
+ keys->insert( b.obj() );
+
+ if( singleElement ) break;
+
+ }
+ }
+
+ }
+
+ BSONObj _fromBSONHash( const BSONElement& e ) const {
+ return _unhash( _tohash( e ) );
+ }
+
+ BSONObj _fromBSONHash( const BSONObj& o ) const {
+ return _unhash( _tohash( o.firstElement() ) );
+ }
+
+ GeoHash _tohash( const BSONElement& e ) const {
+ if ( e.isABSONObj() )
+ return _hash( e.embeddedObject() );
+
+ return GeoHash( e , _bits );
+ }
+
+ GeoHash _hash( const BSONObj& o ) const {
+ BSONObjIterator i(o);
+ uassert( 13067 , "geo field is empty" , i.more() );
+ BSONElement x = i.next();
+ uassert( 13068 , "geo field only has 1 element" , i.more() );
+ BSONElement y = i.next();
+
+ uassert( 13026 , "geo values have to be numbers: " + o.toString() , x.isNumber() && y.isNumber() );
+
+ return hash( x.number() , y.number() );
+ }
+
+ GeoHash hash( const Point& p ) const {
+ return hash( p._x, p._y );
+ }
+
+ GeoHash hash( double x , double y ) const {
+ return GeoHash( _convert(x), _convert(y) , _bits );
+ }
+
+ BSONObj _unhash( const GeoHash& h ) const {
+ unsigned x , y;
+ h.unhash( x , y );
+ BSONObjBuilder b;
+ b.append( "x" , _unconvert( x ) );
+ b.append( "y" , _unconvert( y ) );
+ return b.obj();
+ }
+
+ unsigned _convert( double in ) const {
+ uassert( 13027 , str::stream() << "point not in interval of [ " << _min << ", " << _max << " )", in < _max && in >= _min );
+ in -= _min;
+ assert( in >= 0 );
+ return (unsigned)(in * _scaling);
+ }
+
+ double _unconvert( unsigned in ) const {
+ double x = in;
+ x /= _scaling;
+ x += _min;
+ return x;
+ }
+
+ void unhash( const GeoHash& h , double& x , double& y ) const {
+ unsigned a,b;
+ h.unhash(a,b);
+ x = _unconvert( a );
+ y = _unconvert( b );
+ }
+
+ double distance( const GeoHash& a , const GeoHash& b ) const {
+ double ax,ay,bx,by;
+ unhash( a , ax , ay );
+ unhash( b , bx , by );
+
+ double dx = bx - ax;
+ double dy = by - ay;
+
+ return sqrt( ( dx * dx ) + ( dy * dy ) );
+ }
+
+ double sizeDiag( const GeoHash& a ) const {
+ GeoHash b = a;
+ b.move( 1 , 1 );
+ return distance( a , b );
+ }
+
+ double sizeEdge( const GeoHash& a ) const {
+
+ if( ! a.constrains() )
+ return _max - _min;
+
+ double ax,ay,bx,by;
+ GeoHash b = a;
+ b.move( 1 , 1 );
+ unhash( a, ax, ay );
+ unhash( b, bx, by );
+
+ // _min and _max are a singularity
+ if (bx == _min)
+ bx = _max;
+
+ return (fabs(ax-bx));
+ }
+
+ const IndexDetails* getDetails() const {
+ return _spec->getDetails();
+ }
+
+ virtual shared_ptr<Cursor> newCursor( const BSONObj& query , const BSONObj& order , int numWanted ) const;
+
+ virtual IndexSuitability suitability( const BSONObj& query , const BSONObj& order ) const {
+ BSONElement e = query.getFieldDotted(_geo.c_str());
+ switch ( e.type() ) {
+ case Object: {
+ BSONObj sub = e.embeddedObject();
+ switch ( sub.firstElement().getGtLtOp() ) {
+ case BSONObj::opNEAR:
+ case BSONObj::opWITHIN:
+ return OPTIMAL;
+ default:
+ // We can try to match if there's no other indexing defined,
+ // this is assumed a point
+ return HELPFUL;
+ }
+ }
+ case Array:
+ // We can try to match if there's no other indexing defined,
+ // this is assumed a point
+ return HELPFUL;
+ default:
+ return USELESS;
+ }
+ }
+
+ string _geo;
+ vector<string> _other;
+
+ unsigned _bits;
+ double _max;
+ double _min;
+ double _scaling;
+
+ BSONObj _order;
+ double _error;
+ double _errorSphere;
+ };
+
+ class Box {
+ public:
+
+ Box( const Geo2dType * g , const GeoHash& hash )
+ : _min( g , hash ) ,
+ _max( _min._x + g->sizeEdge( hash ) , _min._y + g->sizeEdge( hash ) ) {
+ }
+
+ Box( double x , double y , double size )
+ : _min( x , y ) ,
+ _max( x + size , y + size ) {
+ }
+
+ Box( Point min , Point max )
+ : _min( min ) , _max( max ) {
+ }
+
+ Box() {}
+
+ BSONArray toBSON() const {
+ return BSON_ARRAY( BSON_ARRAY( _min._x << _min._y ) << BSON_ARRAY( _max._x << _max._y ) );
+ }
+
+ string toString() const {
+ StringBuilder buf(64);
+ buf << _min.toString() << " -->> " << _max.toString();
+ return buf.str();
+ }
+
+ bool between( double min , double max , double val , double fudge=0) const {
+ return val + fudge >= min && val <= max + fudge;
+ }
+
+ bool onBoundary( double bound, double val, double fudge = 0 ) const {
+ return ( val >= bound - fudge && val <= bound + fudge );
+ }
+
+ bool mid( double amin , double amax , double bmin , double bmax , bool min , double& res ) const {
+ assert( amin <= amax );
+ assert( bmin <= bmax );
+
+ if ( amin < bmin ) {
+ if ( amax < bmin )
+ return false;
+ res = min ? bmin : amax;
+ return true;
+ }
+ if ( amin > bmax )
+ return false;
+ res = min ? amin : bmax;
+ return true;
+ }
+
+ double intersects( const Box& other ) const {
+
+ Point boundMin(0,0);
+ Point boundMax(0,0);
+
+ if ( mid( _min._x , _max._x , other._min._x , other._max._x , true , boundMin._x ) == false ||
+ mid( _min._x , _max._x , other._min._x , other._max._x , false , boundMax._x ) == false ||
+ mid( _min._y , _max._y , other._min._y , other._max._y , true , boundMin._y ) == false ||
+ mid( _min._y , _max._y , other._min._y , other._max._y , false , boundMax._y ) == false )
+ return 0;
+
+ Box intersection( boundMin , boundMax );
+
+ return intersection.area() / area();
+ }
+
+ double area() const {
+ return ( _max._x - _min._x ) * ( _max._y - _min._y );
+ }
+
+ double maxDim() const {
+ return max( _max._x - _min._x, _max._y - _min._y );
+ }
+
+ Point center() const {
+ return Point( ( _min._x + _max._x ) / 2 ,
+ ( _min._y + _max._y ) / 2 );
+ }
+
+ void truncate( const Geo2dType* g ) {
+ if( _min._x < g->_min ) _min._x = g->_min;
+ if( _min._y < g->_min ) _min._y = g->_min;
+ if( _max._x > g->_max ) _max._x = g->_max;
+ if( _max._y > g->_max ) _max._y = g->_max;
+ }
+
+ void fudge( const Geo2dType* g ) {
+ _min._x -= g->_error;
+ _min._y -= g->_error;
+ _max._x += g->_error;
+ _max._y += g->_error;
+ }
+
+ bool onBoundary( Point p, double fudge = 0 ) {
+ return onBoundary( _min._x, p._x, fudge ) ||
+ onBoundary( _max._x, p._x, fudge ) ||
+ onBoundary( _min._y, p._y, fudge ) ||
+ onBoundary( _max._y, p._y, fudge );
+ }
+
+ bool inside( Point p , double fudge = 0 ) {
+ bool res = inside( p._x , p._y , fudge );
+ //cout << "is : " << p.toString() << " in " << toString() << " = " << res << endl;
+ return res;
+ }
+
+ bool inside( double x , double y , double fudge = 0 ) {
+ return
+ between( _min._x , _max._x , x , fudge ) &&
+ between( _min._y , _max._y , y , fudge );
+ }
+
+ bool contains(const Box& other, double fudge=0) {
+ return inside(other._min, fudge) && inside(other._max, fudge);
+ }
+
+ Point _min;
+ Point _max;
+ };
+
+
+ class Polygon {
+ public:
+
+ Polygon( void ) : _centroidCalculated( false ) {}
+
+ Polygon( vector<Point> points ) : _centroidCalculated( false ),
+ _points( points ) { }
+
+ void add( Point p ) {
+ _centroidCalculated = false;
+ _points.push_back( p );
+ }
+
+ int size( void ) const {
+ return _points.size();
+ }
+
+ /**
+ * Determine if the point supplied is contained by the current polygon.
+ *
+ * The algorithm uses a ray casting method.
+ */
+ bool contains( const Point& p ) const {
+ return contains( p, 0 ) > 0;
+ }
+
+ int contains( const Point &p, double fudge ) const {
+
+ Box fudgeBox( Point( p._x - fudge, p._y - fudge ), Point( p._x + fudge, p._y + fudge ) );
+
+ int counter = 0;
+ Point p1 = _points[0];
+ for ( int i = 1; i <= size(); i++ ) {
+ Point p2 = _points[i % size()];
+
+ GEODEBUG( "Doing intersection check of " << fudgeBox.toString() << " with seg " << p1.toString() << " to " << p2.toString() );
+
+ // We need to check whether or not this segment intersects our error box
+ if( fudge > 0 &&
+ // Points not too far below box
+ fudgeBox._min._y <= std::max( p1._y, p2._y ) &&
+ // Points not too far above box
+ fudgeBox._max._y >= std::min( p1._y, p2._y ) &&
+ // Points not too far to left of box
+ fudgeBox._min._x <= std::max( p1._x, p2._x ) &&
+ // Points not too far to right of box
+ fudgeBox._max._x >= std::min( p1._x, p2._x ) ) {
+
+ GEODEBUG( "Doing detailed check" );
+
+ // If our box contains one or more of these points, we need to do an exact check.
+ if( fudgeBox.inside(p1) ) {
+ GEODEBUG( "Point 1 inside" );
+ return 0;
+ }
+ if( fudgeBox.inside(p2) ) {
+ GEODEBUG( "Point 2 inside" );
+ return 0;
+ }
+
+ // Do intersection check for vertical sides
+ if ( p1._y != p2._y ) {
+
+ double invSlope = ( p2._x - p1._x ) / ( p2._y - p1._y );
+
+ double xintersT = ( fudgeBox._max._y - p1._y ) * invSlope + p1._x;
+ if( fudgeBox._min._x <= xintersT && fudgeBox._max._x >= xintersT ) {
+ GEODEBUG( "Top intersection @ " << xintersT );
+ return 0;
+ }
+
+ double xintersB = ( fudgeBox._min._y - p1._y ) * invSlope + p1._x;
+ if( fudgeBox._min._x <= xintersB && fudgeBox._max._x >= xintersB ) {
+ GEODEBUG( "Bottom intersection @ " << xintersB );
+ return 0;
+ }
+
+ }
+
+ // Do intersection check for horizontal sides
+ if( p1._x != p2._x ) {
+
+ double slope = ( p2._y - p1._y ) / ( p2._x - p1._x );
+
+ double yintersR = ( p1._x - fudgeBox._max._x ) * slope + p1._y;
+ if( fudgeBox._min._y <= yintersR && fudgeBox._max._y >= yintersR ) {
+ GEODEBUG( "Right intersection @ " << yintersR );
+ return 0;
+ }
+
+ double yintersL = ( p1._x - fudgeBox._min._x ) * slope + p1._y;
+ if( fudgeBox._min._y <= yintersL && fudgeBox._max._y >= yintersL ) {
+ GEODEBUG( "Left intersection @ " << yintersL );
+ return 0;
+ }
+
+ }
+
+ }
+ else if( fudge == 0 ){
+
+ // If this is an exact vertex, we won't intersect, so check this
+ if( p._y == p1._y && p._x == p1._x ) return 1;
+ else if( p._y == p2._y && p._x == p2._x ) return 1;
+
+ // If this is a horizontal line we won't intersect, so check this
+ if( p1._y == p2._y && p._y == p1._y ){
+ // Check that the x-coord lies in the line
+ if( p._x >= std::min( p1._x, p2._x ) && p._x <= std::max( p1._x, p2._x ) ) return 1;
+ }
+
+ }
+
+ // Normal intersection test.
+ // TODO: Invert these for clearer logic?
+ if ( p._y > std::min( p1._y, p2._y ) ) {
+ if ( p._y <= std::max( p1._y, p2._y ) ) {
+ if ( p._x <= std::max( p1._x, p2._x ) ) {
+ if ( p1._y != p2._y ) {
+ double xinters = (p._y-p1._y)*(p2._x-p1._x)/(p2._y-p1._y)+p1._x;
+ // Special case of point on vertical line
+ if ( p1._x == p2._x && p._x == p1._x ){
+
+ // Need special case for the vertical edges, for example:
+ // 1) \e pe/----->
+ // vs.
+ // 2) \ep---e/----->
+ //
+ // if we count exact as intersection, then 1 is in but 2 is out
+ // if we count exact as no-int then 1 is out but 2 is in.
+
+ return 1;
+ }
+ else if( p1._x == p2._x || p._x <= xinters ) {
+ counter++;
+ }
+ }
+ }
+ }
+ }
+
+ p1 = p2;
+ }
+
+ if ( counter % 2 == 0 ) {
+ return -1;
+ }
+ else {
+ return 1;
+ }
+ }
+
+ /**
+ * Calculate the centroid, or center of mass of the polygon object.
+ */
+ Point centroid( void ) {
+
+ /* Centroid is cached, it won't change betwen points */
+ if ( _centroidCalculated ) {
+ return _centroid;
+ }
+
+ Point cent;
+ double signedArea = 0.0;
+ double area = 0.0; // Partial signed area
+
+ /// For all vertices except last
+ int i = 0;
+ for ( i = 0; i < size() - 1; ++i ) {
+ area = _points[i]._x * _points[i+1]._y - _points[i+1]._x * _points[i]._y ;
+ signedArea += area;
+ cent._x += ( _points[i]._x + _points[i+1]._x ) * area;
+ cent._y += ( _points[i]._y + _points[i+1]._y ) * area;
+ }
+
+ // Do last vertex
+ area = _points[i]._x * _points[0]._y - _points[0]._x * _points[i]._y;
+ cent._x += ( _points[i]._x + _points[0]._x ) * area;
+ cent._y += ( _points[i]._y + _points[0]._y ) * area;
+ signedArea += area;
+ signedArea *= 0.5;
+ cent._x /= ( 6 * signedArea );
+ cent._y /= ( 6 * signedArea );
+
+ _centroidCalculated = true;
+ _centroid = cent;
+
+ return cent;
+ }
+
+ Box bounds( void ) {
+
+ // TODO: Cache this
+
+ _bounds._max = _points[0];
+ _bounds._min = _points[0];
+
+ for ( int i = 1; i < size(); i++ ) {
+
+ _bounds._max._x = max( _bounds._max._x, _points[i]._x );
+ _bounds._max._y = max( _bounds._max._y, _points[i]._y );
+ _bounds._min._x = min( _bounds._min._x, _points[i]._x );
+ _bounds._min._y = min( _bounds._min._y, _points[i]._y );
+
+ }
+
+ return _bounds;
+
+ }
+
+ private:
+
+ bool _centroidCalculated;
+ Point _centroid;
+
+ Box _bounds;
+
+ vector<Point> _points;
+ };
+
+ class Geo2dPlugin : public IndexPlugin {
+ public:
+ Geo2dPlugin() : IndexPlugin( GEO2DNAME ) {
+ }
+
+ virtual IndexType* generate( const IndexSpec* spec ) const {
+ return new Geo2dType( this , spec );
+ }
+ } geo2dplugin;
+
+ void __forceLinkGeoPlugin() {
+ geo2dplugin.getName();
+ }
+
+
+
+ class GeoHopper;
+
+ class GeoPoint {
+ public:
+
+ GeoPoint() : _distance( -1 ), _exact( false ), _dirty( false )
+ {}
+
+ //// Distance not used ////
+
+ GeoPoint( const GeoKeyNode& node )
+ : _key( node._key ) , _loc( node.recordLoc ) , _o( node.recordLoc.obj() ), _distance( -1 ) , _exact( false ), _dirty( false ), _bucket( node._bucket ), _pos( node._keyOfs ) {
+ }
+
+ //// Immediate initialization of distance ////
+
+ GeoPoint( const GeoKeyNode& node, double distance, bool exact )
+ : _key( node._key ) , _loc( node.recordLoc ) , _o( node.recordLoc.obj() ), _distance( distance ), _exact( exact ), _dirty( false ) {
+ }
+
+ GeoPoint( const GeoPoint& pt, double distance, bool exact )
+ : _key( pt.key() ) , _loc( pt.loc() ) , _o( pt.obj() ), _distance( distance ), _exact( exact ), _dirty( false ) {
+ }
+
+ bool operator<( const GeoPoint& other ) const {
+ if( _distance != other._distance ) return _distance < other._distance;
+ if( _exact != other._exact ) return _exact < other._exact;
+ return _loc < other._loc;
+ }
+
+ double distance() const {
+ return _distance;
+ }
+
+ bool isExact() const {
+ return _exact;
+ }
+
+ BSONObj key() const {
+ return _key;
+ }
+
+ bool hasLoc() const {
+ return _loc.isNull();
+ }
+
+ DiskLoc loc() const {
+ assert( ! _dirty );
+ return _loc;
+ }
+
+ BSONObj obj() const {
+ return _o;
+ }
+
+ BSONObj pt() const {
+ return _pt;
+ }
+
+ bool isEmpty() {
+ return _o.isEmpty();
+ }
+
+ bool isCleanAndEmpty() {
+ return isEmpty() && ! isDirty();
+ }
+
+ string toString() const {
+ return str::stream() << "Point from " << _key << " - " << _o << " dist : " << _distance << ( _exact ? " (ex)" : " (app)" );
+ }
+
+
+ // TODO: Recover from yield by finding all the changed disk locs here, modifying the _seenPts array.
+ // Not sure yet the correct thing to do about _seen.
+ // Definitely need to re-find our current max/min locations too
+ bool unDirty( const Geo2dType* g, DiskLoc& oldLoc ){
+
+ assert( _dirty );
+ assert( ! _id.isEmpty() );
+
+ oldLoc = _loc;
+ _loc = DiskLoc();
+
+ // Fast undirty
+ IndexInterface& ii = g->getDetails()->idxInterface();
+ // Check this position and the one immediately preceding
+ for( int i = 0; i < 2; i++ ){
+ if( _pos - i < 0 ) continue;
+
+ // log() << "bucket : " << _bucket << " pos " << _pos << endl;
+
+ BSONObj key;
+ DiskLoc loc;
+ ii.keyAt( _bucket, _pos - i, key, loc );
+
+ // log() << "Loc: " << loc << " Key : " << key << endl;
+
+ if( loc.isNull() ) continue;
+
+ if( key.binaryEqual( _key ) && loc.obj()["_id"].wrap( "" ).binaryEqual( _id ) ){
+ _pos = _pos - i;
+ _loc = loc;
+ _dirty = false;
+ _o = loc.obj();
+ return true;
+ }
+ }
+
+ // Slow undirty
+ scoped_ptr<BtreeCursor> cursor( BtreeCursor::make( nsdetails( g->getDetails()->parentNS().c_str() ),
+ *( g->getDetails() ), _key, _key, true, 1 ) );
+
+ int count = 0;
+ while( cursor->ok() ){
+ count++;
+ if( cursor->current()["_id"].wrap( "" ).binaryEqual( _id ) ){
+ _bucket = cursor->getBucket();
+ _pos = cursor->getKeyOfs();
+ _loc = cursor->currLoc();
+ _o = _loc.obj();
+ break;
+ }
+ else{
+ LOG( CDEBUG + 1 ) << "Key doesn't match : " << cursor->current()["_id"] << " saved : " << _id << endl;
+ }
+ cursor->advance();
+ }
+
+ if( ! count ) { LOG( CDEBUG ) << "No key found for " << _key << endl; }
+
+ _dirty = false;
+
+ return _loc == oldLoc;
+ }
+
+ bool isDirty(){
+ return _dirty;
+ }
+
+ bool makeDirty(){
+ if( ! _dirty ){
+ assert( ! obj()["_id"].eoo() );
+ assert( ! _bucket.isNull() );
+ assert( _pos >= 0 );
+
+ if( _id.isEmpty() ){
+ _id = obj()["_id"].wrap( "" ).getOwned();
+ }
+ _o = BSONObj();
+ _key = _key.getOwned();
+ _pt = _pt.getOwned();
+ _dirty = true;
+
+ return true;
+ }
+
+ return false;
+ }
+
+ BSONObj _key;
+ DiskLoc _loc;
+ BSONObj _o;
+ BSONObj _pt;
+
+ double _distance;
+ bool _exact;
+
+ BSONObj _id;
+ bool _dirty;
+ DiskLoc _bucket;
+ int _pos;
+ };
+
+ // GeoBrowse subclasses this
+ class GeoAccumulator {
+ public:
+ GeoAccumulator( const Geo2dType * g , const BSONObj& filter, bool uniqueDocs, bool needDistance )
+ : _g(g) ,
+ _lookedAt(0) ,
+ _matchesPerfd(0) ,
+ _objectsLoaded(0) ,
+ _pointsLoaded(0) ,
+ _found(0) ,
+ _uniqueDocs( uniqueDocs ) ,
+ _needDistance( needDistance )
+ {
+ if ( ! filter.isEmpty() ) {
+ _matcher.reset( new CoveredIndexMatcher( filter , g->keyPattern() ) );
+ GEODEBUG( "Matcher is now " << _matcher->docMatcher().toString() );
+ }
+ }
+
+ virtual ~GeoAccumulator() { }
+
+ enum KeyResult { BAD, BORDER, GOOD };
+
+ virtual void add( const GeoKeyNode& node ) {
+
+ GEODEBUG( "\t\t\t\t checking key " << node._key.toString() )
+
+ _lookedAt++;
+
+ ////
+ // Approximate distance check using key data
+ ////
+ double keyD = 0;
+ Point keyP( _g, GeoHash( node._key.firstElement(), _g->_bits ) );
+ KeyResult keyOk = approxKeyCheck( keyP, keyD );
+ if ( keyOk == BAD ) {
+ GEODEBUG( "\t\t\t\t bad distance : " << node.recordLoc.obj() << "\t" << keyD );
+ return;
+ }
+ GEODEBUG( "\t\t\t\t good distance : " << node.recordLoc.obj() << "\t" << keyD );
+
+ ////
+ // Check for match using other key (and potentially doc) criteria
+ ////
+ // Remember match results for each object
+ map<DiskLoc, bool>::iterator match = _matched.find( node.recordLoc );
+ bool newDoc = match == _matched.end();
+ if( newDoc ) {
+
+ GEODEBUG( "\t\t\t\t matching new doc with " << (_matcher ? _matcher->docMatcher().toString() : "(empty)" ) );
+
+ // matcher
+ MatchDetails details;
+ if ( _matcher.get() ) {
+ bool good = _matcher->matchesWithSingleKeyIndex( node._key , node.recordLoc , &details );
+
+ _matchesPerfd++;
+
+ if ( details._loadedObject )
+ _objectsLoaded++;
+
+ if ( ! good ) {
+ GEODEBUG( "\t\t\t\t didn't match : " << node.recordLoc.obj()["_id"] );
+ _matched[ node.recordLoc ] = false;
+ return;
+ }
+ }
+
+ _matched[ node.recordLoc ] = true;
+
+ if ( ! details._loadedObject ) // don't double count
+ _objectsLoaded++;
+
+ }
+ else if( !((*match).second) ) {
+ GEODEBUG( "\t\t\t\t previously didn't match : " << node.recordLoc.obj()["_id"] );
+ return;
+ }
+
+ ////
+ // Exact check with particular data fields
+ ////
+ // Can add multiple points
+ int diff = addSpecific( node , keyP, keyOk == BORDER, keyD, newDoc );
+ if( diff > 0 ) _found += diff;
+ else _found -= -diff;
+
+ }
+
+ virtual void getPointsFor( const BSONObj& key, const BSONObj& obj, vector< BSONObj >& locsForNode, bool allPoints = false ){
+
+ // Find all the location objects from the keys
+ vector< BSONObj > locs;
+ _g->getKeys( obj, allPoints ? locsForNode : locs );
+ _pointsLoaded++;
+
+ if( allPoints ) return;
+ if( locs.size() == 1 ){
+ locsForNode.push_back( locs[0] );
+ return;
+ }
+
+ // Find the particular location we want
+ GeoHash keyHash( key.firstElement(), _g->_bits );
+
+ // log() << "Hash: " << node.key << " and " << keyHash.getHash() << " unique " << _uniqueDocs << endl;
+ for( vector< BSONObj >::iterator i = locs.begin(); i != locs.end(); ++i ) {
+
+ // Ignore all locations not hashed to the key's hash, since we may see
+ // those later
+ if( _g->_hash( *i ) != keyHash ) continue;
+
+ locsForNode.push_back( *i );
+
+ }
+
+ }
+
+ virtual int addSpecific( const GeoKeyNode& node, const Point& p , bool inBounds, double d, bool newDoc ) = 0;
+ virtual KeyResult approxKeyCheck( const Point& p , double& keyD ) = 0;
+ virtual bool exactDocCheck( const Point& p , double& d ) = 0;
+ virtual bool expensiveExactCheck(){ return false; }
+
+
+ long long found() const {
+ return _found;
+ }
+
+ const Geo2dType * _g;
+ map<DiskLoc, bool> _matched;
+ shared_ptr<CoveredIndexMatcher> _matcher;
+
+ long long _lookedAt;
+ long long _matchesPerfd;
+ long long _objectsLoaded;
+ long long _pointsLoaded;
+ long long _found;
+
+ bool _uniqueDocs;
+ bool _needDistance;
+
+ };
+
+
+ struct BtreeLocation {
+ BtreeLocation() { }
+
+ scoped_ptr<BtreeCursor> _cursor;
+ scoped_ptr<FieldRangeSet> _frs;
+ scoped_ptr<IndexSpec> _spec;
+
+ BSONObj key() {
+ return _cursor->currKey();
+ }
+
+ bool hasPrefix( const GeoHash& hash ) {
+ BSONObj k = key();
+ BSONElement e = k.firstElement();
+ if ( e.eoo() )
+ return false;
+ return GeoHash( e ).hasPrefix( hash );
+ }
+
+ bool checkAndAdvance( const GeoHash& hash, int& totalFound, GeoAccumulator* all ){
+ if( ! _cursor->ok() || ! hasPrefix( hash ) ) return false;
+
+ if( all ){
+ totalFound++;
+ GeoKeyNode n( _cursor->getBucket(), _cursor->getKeyOfs(), _cursor->currLoc(), _cursor->currKey() );
+ all->add( n );
+ }
+ _cursor->advance();
+
+ return true;
+ }
+
+ void save(){
+ _cursor->noteLocation();
+ }
+
+ void restore(){
+ _cursor->checkLocation();
+ }
+
+ string toString() {
+ stringstream ss;
+ ss << "bucket: " << _cursor->getBucket().toString() << " pos: " << _cursor->getKeyOfs() <<
+ ( _cursor->ok() ? ( str::stream() << " k: " << _cursor->currKey() << " o : " << _cursor->current()["_id"] ) : (string)"[none]" ) << endl;
+ return ss.str();
+ }
+
+ // Returns the min and max keys which bound a particular location.
+ // The only time these may be equal is when we actually equal the location
+ // itself, otherwise our expanding algorithm will fail.
+ static bool initial( const IndexDetails& id , const Geo2dType * spec ,
+ BtreeLocation& min , BtreeLocation& max ,
+ GeoHash start ,
+ int & found , GeoAccumulator * hopper ) {
+
+ //Ordering ordering = Ordering::make(spec->_order);
+
+ // Would be nice to build this directly, but bug in max/min queries SERVER-3766 and lack of interface
+ // makes this easiest for now.
+ BSONObj minQuery = BSON( spec->_geo << BSON( "$gt" << MINKEY << start.wrap( "$lte" ).firstElement() ) );
+ BSONObj maxQuery = BSON( spec->_geo << BSON( "$lt" << MAXKEY << start.wrap( "$gt" ).firstElement() ) );
+
+ // log() << "MinQuery: " << minQuery << endl;
+ // log() << "MaxQuery: " << maxQuery << endl;
+
+ min._frs.reset( new FieldRangeSet( spec->getDetails()->parentNS().c_str(),
+ minQuery,
+ true,
+ false ) );
+
+ max._frs.reset( new FieldRangeSet( spec->getDetails()->parentNS().c_str(),
+ maxQuery,
+ true,
+ false ) );
+
+
+ BSONObjBuilder bob;
+ bob.append( spec->_geo, 1 );
+ for( vector<string>::const_iterator i = spec->_other.begin(); i != spec->_other.end(); i++ ){
+ bob.append( *i, 1 );
+ }
+ BSONObj iSpec = bob.obj();
+
+ min._spec.reset( new IndexSpec( iSpec ) );
+ max._spec.reset( new IndexSpec( iSpec ) );
+
+ shared_ptr<FieldRangeVector> frvMin( new FieldRangeVector( *(min._frs), *(min._spec), -1 ) );
+ shared_ptr<FieldRangeVector> frvMax( new FieldRangeVector( *(max._frs), *(max._spec), 1 ) );
+
+ min._cursor.reset(
+ BtreeCursor::make( nsdetails( spec->getDetails()->parentNS().c_str() ), *( spec->getDetails() ),
+ frvMin, -1 )
+ );
+
+ max._cursor.reset(
+ BtreeCursor::make( nsdetails( spec->getDetails()->parentNS().c_str() ), *( spec->getDetails() ),
+ frvMax, 1 )
+ );
+
+ // if( hopper ) min.checkCur( found, hopper );
+ // if( hopper ) max.checkCur( found, hopper );
+
+ return min._cursor->ok() || max._cursor->ok();
+ }
+ };
+
+
+ class GeoCursorBase : public Cursor {
+ public:
+
+ static const shared_ptr< CoveredIndexMatcher > emptyMatcher;
+
+ GeoCursorBase( const Geo2dType * spec )
+ : _spec( spec ), _id( _spec->getDetails() ) {
+
+ }
+
+ virtual DiskLoc refLoc() { return DiskLoc(); }
+
+ virtual BSONObj indexKeyPattern() {
+ return _spec->keyPattern();
+ }
+
+ virtual void noteLocation() {
+ // no-op since these are meant to be safe
+ }
+
+ /* called before query getmore block is iterated */
+ virtual void checkLocation() {
+ // no-op since these are meant to be safe
+ }
+
+ virtual bool supportGetMore() { return false; }
+ virtual bool supportYields() { return false; }
+
+ virtual bool getsetdup(DiskLoc loc) { return false; }
+ virtual bool modifiedKeys() const { return true; }
+ virtual bool isMultiKey() const { return false; }
+
+ virtual bool autoDedup() const { return false; }
+
+ const Geo2dType * _spec;
+ const IndexDetails * _id;
+ };
+
+ const shared_ptr< CoveredIndexMatcher > GeoCursorBase::emptyMatcher( new CoveredIndexMatcher( BSONObj(), BSONObj(), false ) );
+
+ // TODO: Pull out the cursor bit from the browse, have GeoBrowse as field of cursor to clean up
+ // this hierarchy a bit. Also probably useful to look at whether GeoAccumulator can be a member instead
+ // of a superclass.
+ class GeoBrowse : public GeoCursorBase , public GeoAccumulator {
+ public:
+
+ // The max points which should be added to an expanding box at one time
+ static const int maxPointsHeuristic = 50;
+
+ // Expand states
+ enum State {
+ START ,
+ DOING_EXPAND ,
+ DONE_NEIGHBOR ,
+ DONE
+ } _state;
+
+ GeoBrowse( const Geo2dType * g , string type , BSONObj filter = BSONObj(), bool uniqueDocs = true, bool needDistance = false )
+ : GeoCursorBase( g ), GeoAccumulator( g , filter, uniqueDocs, needDistance ) ,
+ _type( type ) , _filter( filter ) , _firstCall(true), _noted( false ), _nscanned(), _nDirtied(0), _nChangedOnYield(0), _nRemovedOnYield(0), _centerPrefix(0, 0, 0) {
+
+ // Set up the initial expand state
+ _state = START;
+ _neighbor = -1;
+ _foundInExp = 0;
+
+ }
+
+ virtual string toString() {
+ return (string)"GeoBrowse-" + _type;
+ }
+
+ virtual bool ok() {
+
+ bool filled = false;
+
+ LOG( CDEBUG ) << "Checking cursor, in state " << (int) _state << ", first call " << _firstCall <<
+ ", empty : " << _cur.isEmpty() << ", dirty : " << _cur.isDirty() << ", stack : " << _stack.size() << endl;
+
+ bool first = _firstCall;
+ if ( _firstCall ) {
+ fillStack( maxPointsHeuristic );
+ filled = true;
+ _firstCall = false;
+ }
+ if ( ! _cur.isCleanAndEmpty() || _stack.size() ) {
+ if ( first ) {
+ ++_nscanned;
+ }
+
+ if( _noted && filled ) noteLocation();
+ return true;
+ }
+
+ while ( moreToDo() ) {
+
+ LOG( CDEBUG ) << "Refilling stack..." << endl;
+
+ fillStack( maxPointsHeuristic );
+ filled = true;
+
+ if ( ! _cur.isCleanAndEmpty() ) {
+ if ( first ) {
+ ++_nscanned;
+ }
+
+ if( _noted && filled ) noteLocation();
+ return true;
+ }
+ }
+
+ if( _noted && filled ) noteLocation();
+ return false;
+ }
+
+ virtual bool advance() {
+ _cur._o = BSONObj();
+
+ if ( _stack.size() ) {
+ _cur = _stack.front();
+ _stack.pop_front();
+ ++_nscanned;
+ return true;
+ }
+
+ if ( ! moreToDo() )
+ return false;
+
+ bool filled = false;
+ while ( _cur.isCleanAndEmpty() && moreToDo() ){
+ fillStack( maxPointsHeuristic );
+ filled = true;
+ }
+
+ if( _noted && filled ) noteLocation();
+ return ! _cur.isCleanAndEmpty() && ++_nscanned;
+ }
+
+ virtual void noteLocation() {
+ _noted = true;
+
+ LOG( CDEBUG ) << "Noting location with " << _stack.size() << ( _cur.isEmpty() ? "" : " + 1 " ) << " points " << endl;
+
+ // Make sure we advance past the point we're at now,
+ // since the current location may move on an update/delete
+ // if( _state == DOING_EXPAND ){
+ // if( _min.hasPrefix( _prefix ) ){ _min.advance( -1, _foundInExp, this ); }
+ // if( _max.hasPrefix( _prefix ) ){ _max.advance( 1, _foundInExp, this ); }
+ // }
+
+ // Remember where our _max, _min are
+ _min.save();
+ _max.save();
+
+ LOG( CDEBUG ) << "Min " << _min.toString() << endl;
+ LOG( CDEBUG ) << "Max " << _max.toString() << endl;
+
+ // Dirty all our queued stuff
+ for( list<GeoPoint>::iterator i = _stack.begin(); i != _stack.end(); i++ ){
+
+ LOG( CDEBUG ) << "Undirtying stack point with id " << i->_id << endl;
+
+ if( i->makeDirty() ) _nDirtied++;
+ assert( i->isDirty() );
+ }
+
+ // Check current item
+ if( ! _cur.isEmpty() ){
+ if( _cur.makeDirty() ) _nDirtied++;
+ }
+
+ // Our cached matches become invalid now
+ _matched.clear();
+ }
+
+ void fixMatches( DiskLoc oldLoc, DiskLoc newLoc ){
+ map<DiskLoc, bool>::iterator match = _matched.find( oldLoc );
+ if( match != _matched.end() ){
+ bool val = match->second;
+ _matched.erase( oldLoc );
+ _matched[ newLoc ] = val;
+ }
+ }
+
+ /* called before query getmore block is iterated */
+ virtual void checkLocation() {
+
+ LOG( CDEBUG ) << "Restoring location with " << _stack.size() << ( ! _cur.isDirty() ? "" : " + 1 " ) << " points " << endl;
+
+ // We can assume an error was thrown earlier if this database somehow disappears
+
+ // Recall our _max, _min
+ _min.restore();
+ _max.restore();
+
+ LOG( CDEBUG ) << "Min " << _min.toString() << endl;
+ LOG( CDEBUG ) << "Max " << _max.toString() << endl;
+
+ // If the current key moved, we may have been advanced past the current point - need to check this
+ // if( _state == DOING_EXPAND ){
+ // if( _min.hasPrefix( _prefix ) ){ _min.advance( -1, _foundInExp, this ); }
+ // if( _max.hasPrefix( _prefix ) ){ _max.advance( 1, _foundInExp, this ); }
+ //}
+
+ // Undirty all the queued stuff
+ // Dirty all our queued stuff
+ list<GeoPoint>::iterator i = _stack.begin();
+ while( i != _stack.end() ){
+
+ LOG( CDEBUG ) << "Undirtying stack point with id " << i->_id << endl;
+
+ DiskLoc oldLoc;
+ if( i->unDirty( _spec, oldLoc ) ){
+ // Document is in same location
+ LOG( CDEBUG ) << "Undirtied " << oldLoc << endl;
+
+ i++;
+ }
+ else if( ! i->loc().isNull() ){
+
+ // Re-found document somewhere else
+ LOG( CDEBUG ) << "Changed location of " << i->_id << " : " << i->loc() << " vs " << oldLoc << endl;
+
+ _nChangedOnYield++;
+ fixMatches( oldLoc, i->loc() );
+ i++;
+ }
+ else {
+
+ // Can't re-find document
+ LOG( CDEBUG ) << "Removing document " << i->_id << endl;
+
+ _nRemovedOnYield++;
+ _found--;
+ assert( _found >= 0 );
+
+ // Can't find our key again, remove
+ i = _stack.erase( i );
+ }
+ }
+
+ if( _cur.isDirty() ){
+ LOG( CDEBUG ) << "Undirtying cur point with id : " << _cur._id << endl;
+ }
+
+ // Check current item
+ DiskLoc oldLoc;
+ if( _cur.isDirty() && ! _cur.unDirty( _spec, oldLoc ) ){
+ if( _cur.loc().isNull() ){
+
+ // Document disappeared!
+ LOG( CDEBUG ) << "Removing cur point " << _cur._id << endl;
+
+ _nRemovedOnYield++;
+ advance();
+ }
+ else{
+
+ // Document moved
+ LOG( CDEBUG ) << "Changed location of cur point " << _cur._id << " : " << _cur.loc() << " vs " << oldLoc << endl;
+
+ _nChangedOnYield++;
+ fixMatches( oldLoc, _cur.loc() );
+ }
+ }
+
+ _noted = false;
+ }
+
+ virtual Record* _current() { assert(ok()); LOG( CDEBUG + 1 ) << "_current " << _cur._loc.obj()["_id"] << endl; return _cur._loc.rec(); }
+ virtual BSONObj current() { assert(ok()); LOG( CDEBUG + 1 ) << "current " << _cur._o << endl; return _cur._o; }
+ virtual DiskLoc currLoc() { assert(ok()); LOG( CDEBUG + 1 ) << "currLoc " << _cur._loc << endl; return _cur._loc; }
+ virtual BSONObj currKey() const { return _cur._key; }
+
+ virtual CoveredIndexMatcher* matcher() const {
+ if( _matcher.get() ) return _matcher.get();
+ else return GeoCursorBase::emptyMatcher.get();
+ }
+
+ virtual shared_ptr< CoveredIndexMatcher > matcherPtr() const {
+ if( _matcher.get() ) return _matcher;
+ else return GeoCursorBase::emptyMatcher;
+ }
+
+ // Are we finished getting points?
+ virtual bool moreToDo() {
+ return _state != DONE;
+ }
+
+ virtual bool supportGetMore() { return true; }
+
+ // Fills the stack, but only checks a maximum number of maxToCheck points at a time.
+ // Further calls to this function will continue the expand/check neighbors algorithm.
+ virtual void fillStack( int maxToCheck, int maxToAdd = -1, bool onlyExpand = false ) {
+
+#ifdef GEODEBUGGING
+ log() << "Filling stack with maximum of " << maxToCheck << ", state : " << (int) _state << endl;
+#endif
+
+ if( maxToAdd < 0 ) maxToAdd = maxToCheck;
+ int maxFound = _foundInExp + maxToCheck;
+ assert( maxToCheck > 0 );
+ assert( maxFound > 0 );
+ assert( _found <= 0x7fffffff ); // conversion to int
+ int maxAdded = static_cast<int>(_found) + maxToAdd;
+ assert( maxAdded >= 0 ); // overflow check
+
+ bool isNeighbor = _centerPrefix.constrains();
+
+ // Starting a box expansion
+ if ( _state == START ) {
+
+ // Get the very first hash point, if required
+ if( ! isNeighbor )
+ _prefix = expandStartHash();
+
+ GEODEBUG( "initializing btree" );
+
+#ifdef GEODEBUGGING
+ log() << "Initializing from b-tree with hash of " << _prefix << " @ " << Box( _g, _prefix ) << endl;
+#endif
+
+ if ( ! BtreeLocation::initial( *_id , _spec , _min , _max , _prefix , _foundInExp , this ) )
+ _state = isNeighbor ? DONE_NEIGHBOR : DONE;
+ else {
+ _state = DOING_EXPAND;
+ _lastPrefix.reset();
+ }
+
+ GEODEBUG( (_state == DONE_NEIGHBOR || _state == DONE ? "not initialized" : "initializedFig") );
+
+ }
+
+ // Doing the actual box expansion
+ if ( _state == DOING_EXPAND ) {
+
+ while ( true ) {
+
+ GEODEBUG( "box prefix [" << _prefix << "]" );
+#ifdef GEODEBUGGING
+ if( _prefix.constrains() ) {
+ log() << "current expand box : " << Box( _g, _prefix ).toString() << endl;
+ }
+ else {
+ log() << "max expand box." << endl;
+ }
+#endif
+
+ GEODEBUG( "expanding box points... ");
+
+ // Record the prefix we're actively exploring...
+ _expPrefix.reset( new GeoHash( _prefix ) );
+
+ // Find points inside this prefix
+ while ( _min.checkAndAdvance( _prefix, _foundInExp, this ) && _foundInExp < maxFound && _found < maxAdded );
+ while ( _max.checkAndAdvance( _prefix, _foundInExp, this ) && _foundInExp < maxFound && _found < maxAdded );
+
+#ifdef GEODEBUGGING
+
+ log() << "finished expand, checked : " << ( maxToCheck - ( maxFound - _foundInExp ) )
+ << " found : " << ( maxToAdd - ( maxAdded - _found ) )
+ << " max : " << maxToCheck << " / " << maxToAdd << endl;
+
+#endif
+
+ GEODEBUG( "finished expand, found : " << ( maxToAdd - ( maxAdded - _found ) ) );
+ if( _foundInExp >= maxFound || _found >= maxAdded ) return;
+
+ // We've searched this prefix fully, remember
+ _lastPrefix.reset( new GeoHash( _prefix ));
+
+ // If we've searched the entire space, we're finished.
+ if ( ! _prefix.constrains() ) {
+ GEODEBUG( "box exhausted" );
+ _state = DONE;
+ notePrefix();
+ return;
+ }
+
+ // If we won't fit in the box, and we're not doing a sub-scan, increase the size
+ if ( ! fitsInBox( _g->sizeEdge( _prefix ) ) && _fringe.size() == 0 ) {
+
+ // If we're still not expanded bigger than the box size, expand again
+ // TODO: Is there an advantage to scanning prior to expanding?
+ _prefix = _prefix.up();
+ continue;
+
+ }
+
+ // log() << "finished box prefix [" << _prefix << "]" << endl;
+
+ // We're done and our size is large enough
+ _state = DONE_NEIGHBOR;
+
+ // Go to the next sub-box, if applicable
+ if( _fringe.size() > 0 ) _fringe.pop_back();
+ // Go to the next neighbor if this was the last sub-search
+ if( _fringe.size() == 0 ) _neighbor++;
+
+ break;
+
+ }
+
+ notePrefix();
+ }
+
+ // If we doeighbors
+ if( onlyExpand ) return;
+
+ // If we're done expanding the current box...
+ if( _state == DONE_NEIGHBOR ) {
+
+ // Iterate to the next neighbor
+ // Loop is useful for cases where we want to skip over boxes entirely,
+ // otherwise recursion increments the neighbors.
+ for ( ; _neighbor < 9; _neighbor++ ) {
+
+ // If we have no fringe for the neighbor, make sure we have the default fringe
+ if( _fringe.size() == 0 ) _fringe.push_back( "" );
+
+ if( ! isNeighbor ) {
+ _centerPrefix = _prefix;
+ _centerBox = Box( _g, _centerPrefix );
+ isNeighbor = true;
+ }
+
+ int i = (_neighbor / 3) - 1;
+ int j = (_neighbor % 3) - 1;
+
+ if ( ( i == 0 && j == 0 ) ||
+ ( i < 0 && _centerPrefix.atMinX() ) ||
+ ( i > 0 && _centerPrefix.atMaxX() ) ||
+ ( j < 0 && _centerPrefix.atMinY() ) ||
+ ( j > 0 && _centerPrefix.atMaxY() ) ) {
+
+ //log() << "not moving to neighbor " << _neighbor << " @ " << i << " , " << j << " fringe : " << _fringe.size() << " " << _centerPrefix << endl;
+ //log() << _centerPrefix.atMinX() << " "
+ // << _centerPrefix.atMinY() << " "
+ // << _centerPrefix.atMaxX() << " "
+ // << _centerPrefix.atMaxY() << " " << endl;
+
+ continue; // main box or wrapped edge
+ // TODO: We may want to enable wrapping in future, probably best as layer on top of
+ // this search.
+ }
+
+ // Make sure we've got a reasonable center
+ assert( _centerPrefix.constrains() );
+
+ GeoHash _neighborPrefix = _centerPrefix;
+ _neighborPrefix.move( i, j );
+
+ //log() << "moving to neighbor " << _neighbor << " @ " << i << " , " << j << " fringe : " << _fringe.size() << " " << _centerPrefix << " " << _neighborPrefix << endl;
+
+ GEODEBUG( "moving to neighbor " << _neighbor << " @ " << i << " , " << j << " fringe : " << _fringe.size() );
+ PREFIXDEBUG( _centerPrefix, _g );
+ PREFIXDEBUG( _neighborPrefix , _g );
+ while( _fringe.size() > 0 ) {
+
+ _prefix = _neighborPrefix + _fringe.back();
+ Box cur( _g , _prefix );
+
+ PREFIXDEBUG( _prefix, _g );
+
+ double intAmt = intersectsBox( cur );
+
+ // No intersection
+ if( intAmt <= 0 ) {
+ GEODEBUG( "skipping box" << cur.toString() );
+ _fringe.pop_back();
+ continue;
+ }
+ // Small intersection, refine search
+ else if( intAmt < 0.5 && _prefix.canRefine() && _fringe.back().size() < 4 /* two bits */ ) {
+
+ GEODEBUG( "Intersection small : " << intAmt << ", adding to fringe: " << _fringe.back() << " curr prefix : " << _prefix << " bits : " << _prefix.getBits() );
+
+ // log() << "Diving to level : " << ( _fringe.back().size() / 2 + 1 ) << endl;
+
+ string lastSuffix = _fringe.back();
+ _fringe.pop_back();
+ _fringe.push_back( lastSuffix + "00" );
+ _fringe.push_back( lastSuffix + "01" );
+ _fringe.push_back( lastSuffix + "11" );
+ _fringe.push_back( lastSuffix + "10" );
+
+ continue;
+ }
+
+ // Restart our search from a diff box.
+ _state = START;
+
+ assert( ! onlyExpand );
+
+ assert( _found <= 0x7fffffff );
+ fillStack( maxFound - _foundInExp, maxAdded - static_cast<int>(_found) );
+
+ // When we return from the recursive fillStack call, we'll either have checked enough points or
+ // be entirely done. Max recurse depth is < 8 * 16.
+
+ // If we're maxed out on points, return
+ if( _foundInExp >= maxFound || _found >= maxAdded ) {
+ // Make sure we'll come back to add more points
+ assert( _state == DOING_EXPAND );
+ return;
+ }
+
+ // Otherwise we must be finished to return
+ assert( _state == DONE );
+ return;
+
+ }
+
+ }
+
+ // Finished with neighbors
+ _state = DONE;
+ }
+
+ }
+
+ // The initial geo hash box for our first expansion
+ virtual GeoHash expandStartHash() = 0;
+
+ // Whether the current box width is big enough for our search area
+ virtual bool fitsInBox( double width ) = 0;
+
+ // The amount the current box overlaps our search area
+ virtual double intersectsBox( Box& cur ) = 0;
+
+ bool remembered( BSONObj o ){
+ BSONObj seenId = o["_id"].wrap("").getOwned();
+ if( _seenIds.find( seenId ) != _seenIds.end() ){
+ LOG( CDEBUG + 1 ) << "Object " << o["_id"] << " already seen." << endl;
+ return true;
+ }
+ else{
+ _seenIds.insert( seenId );
+ LOG( CDEBUG + 1 ) << "Object " << o["_id"] << " remembered." << endl;
+ return false;
+ }
+ }
+
+ virtual int addSpecific( const GeoKeyNode& node , const Point& keyP , bool onBounds , double keyD , bool potentiallyNewDoc ) {
+
+ int found = 0;
+
+ // We need to handle every possible point in this method, even those not in the key value, to
+ // avoid us tracking which hashes we've already seen.
+ if( ! potentiallyNewDoc ){
+ // log() << "Already handled doc!" << endl;
+ return 0;
+ }
+
+ // Final check for new doc
+ // OK to touch, since we're probably returning this object now
+ if( remembered( node.recordLoc.obj() ) ) return 0;
+
+ if( _uniqueDocs && ! onBounds ) {
+ //log() << "Added ind to " << _type << endl;
+ _stack.push_front( GeoPoint( node ) );
+ found++;
+ }
+ else {
+ // We now handle every possible point in the document, even those not in the key value,
+ // since we're iterating through them anyway - prevents us from having to save the hashes
+ // we've seen per-doc
+
+ // If we're filtering by hash, get the original
+ bool expensiveExact = expensiveExactCheck();
+
+ vector< BSONObj > locs;
+ getPointsFor( node._key, node.recordLoc.obj(), locs, true );
+ for( vector< BSONObj >::iterator i = locs.begin(); i != locs.end(); ++i ){
+
+ double d = -1;
+ Point p( *i );
+
+ // We can avoid exact document checks by redoing approx checks,
+ // if the exact checks are more expensive.
+ bool needExact = true;
+ if( expensiveExact ){
+ assert( false );
+ KeyResult result = approxKeyCheck( p, d );
+ if( result == BAD ) continue;
+ else if( result == GOOD ) needExact = false;
+ }
+
+ if( ! needExact || exactDocCheck( p, d ) ){
+ //log() << "Added mult to " << _type << endl;
+ _stack.push_front( GeoPoint( node ) );
+ found++;
+ // If returning unique, just exit after first point is added
+ if( _uniqueDocs ) break;
+ }
+ }
+ }
+
+ while( _cur.isCleanAndEmpty() && _stack.size() > 0 ){
+ _cur = _stack.front();
+ _stack.pop_front();
+ }
+
+ return found;
+ }
+
+ virtual long long nscanned() {
+ if ( _firstCall ) {
+ ok();
+ }
+ return _nscanned;
+ }
+
+ virtual void explainDetails( BSONObjBuilder& b ){
+ b << "lookedAt" << _lookedAt;
+ b << "matchesPerfd" << _matchesPerfd;
+ b << "objectsLoaded" << _objectsLoaded;
+ b << "pointsLoaded" << _pointsLoaded;
+ b << "pointsSavedForYield" << _nDirtied;
+ b << "pointsChangedOnYield" << _nChangedOnYield;
+ b << "pointsRemovedOnYield" << _nRemovedOnYield;
+ }
+
+ virtual BSONObj prettyIndexBounds() const {
+
+ vector<GeoHash>::const_iterator i = _expPrefixes.end();
+ if( _expPrefixes.size() > 0 && *(--i) != *( _expPrefix.get() ) )
+ _expPrefixes.push_back( *( _expPrefix.get() ) );
+
+ BSONObjBuilder bob;
+ BSONArrayBuilder bab;
+ for( i = _expPrefixes.begin(); i != _expPrefixes.end(); ++i ){
+ bab << Box( _g, *i ).toBSON();
+ }
+ bob << _g->_geo << bab.arr();
+
+ return bob.obj();
+
+ }
+
+ void notePrefix() {
+ _expPrefixes.push_back( _prefix );
+ }
+
+ string _type;
+ BSONObj _filter;
+ list<GeoPoint> _stack;
+ set<BSONObj> _seenIds;
+
+ GeoPoint _cur;
+ bool _firstCall;
+ bool _noted;
+
+ long long _nscanned;
+ long long _nDirtied;
+ long long _nChangedOnYield;
+ long long _nRemovedOnYield;
+
+ // The current box we're expanding (-1 is first/center box)
+ int _neighbor;
+
+ // The points we've found so far
+ // TODO: Long long?
+ int _foundInExp;
+
+ // The current hash prefix we're expanding and the center-box hash prefix
+ GeoHash _prefix;
+ shared_ptr<GeoHash> _lastPrefix;
+ GeoHash _centerPrefix;
+ list<string> _fringe;
+ int recurseDepth;
+ Box _centerBox;
+
+ // Start and end of our search range in the current box
+ BtreeLocation _min;
+ BtreeLocation _max;
+
+ shared_ptr<GeoHash> _expPrefix;
+ mutable vector<GeoHash> _expPrefixes;
+
+ };
+
+
+ class GeoHopper : public GeoBrowse {
+ public:
+ typedef multiset<GeoPoint> Holder;
+
+ GeoHopper( const Geo2dType * g , unsigned max , const Point& n , const BSONObj& filter = BSONObj() , double maxDistance = numeric_limits<double>::max() , GeoDistType type=GEO_PLAIN, bool uniqueDocs = false, bool needDistance = true )
+ : GeoBrowse( g, "search", filter, uniqueDocs, needDistance ), _max( max ) , _near( n ), _maxDistance( maxDistance ), _type( type ), _distError( type == GEO_PLAIN ? g->_error : g->_errorSphere ), _farthest(0)
+ {}
+
+ virtual KeyResult approxKeyCheck( const Point& p, double& d ) {
+
+ // Always check approximate distance, since it lets us avoid doing
+ // checks of the rest of the object if it succeeds
+
+ switch (_type) {
+ case GEO_PLAIN:
+ d = _near.distance( p );
+ break;
+ case GEO_SPHERE:
+ checkEarthBounds( p );
+ d = spheredist_deg( _near, p );
+ break;
+ default: assert( false );
+ }
+ assert( d >= 0 );
+
+ GEODEBUG( "\t\t\t\t\t\t\t checkDistance " << _near.toString()
+ << "\t" << p.toString() << "\t" << d
+ << " farthest: " << farthest() );
+
+ // If we need more points
+ double borderDist = ( _points.size() < _max ? _maxDistance : farthest() );
+
+ if( d >= borderDist - 2 * _distError && d <= borderDist + 2 * _distError ) return BORDER;
+ else return d < borderDist ? GOOD : BAD;
+
+ }
+
+ virtual bool exactDocCheck( const Point& p, double& d ){
+
+ bool within = false;
+
+ // Get the appropriate distance for the type
+ switch ( _type ) {
+ case GEO_PLAIN:
+ d = _near.distance( p );
+ within = _near.distanceWithin( p, _maxDistance );
+ break;
+ case GEO_SPHERE:
+ checkEarthBounds( p );
+ d = spheredist_deg( _near, p );
+ within = ( d <= _maxDistance );
+ break;
+ default: assert( false );
+ }
+
+ return within;
+ }
+
+ // Always in distance units, whether radians or normal
+ double farthest() const {
+ return _farthest;
+ }
+
+ virtual int addSpecific( const GeoKeyNode& node, const Point& keyP, bool onBounds, double keyD, bool potentiallyNewDoc ) {
+
+ // Unique documents
+
+ GeoPoint newPoint( node, keyD, false );
+
+ int prevSize = _points.size();
+
+ // STEP 1 : Remove old duplicate points from the set if needed
+ if( _uniqueDocs ){
+
+ // Lookup old point with same doc
+ map< DiskLoc , Holder::iterator >::iterator oldPointIt = _seenPts.find( newPoint.loc() );
+
+ if( oldPointIt != _seenPts.end() ){
+ const GeoPoint& oldPoint = *(oldPointIt->second);
+ // We don't need to care if we've already seen this same approx pt or better,
+ // or we've already gone to disk once for the point
+ if( oldPoint < newPoint ){
+ GEODEBUG( "\t\tOld point closer than new point" );
+ return 0;
+ }
+ GEODEBUG( "\t\tErasing old point " << oldPointIt->first.obj() );
+ _points.erase( oldPointIt->second );
+ }
+ }
+
+ Holder::iterator newIt = _points.insert( newPoint );
+ if( _uniqueDocs ) _seenPts[ newPoint.loc() ] = newIt;
+
+ GEODEBUG( "\t\tInserted new point " << newPoint.toString() << " approx : " << keyD );
+
+ assert( _max > 0 );
+
+ Holder::iterator lastPtIt = _points.end();
+ lastPtIt--;
+ _farthest = lastPtIt->distance() + 2 * _distError;
+
+ return _points.size() - prevSize;
+
+ }
+
+ // Removes extra points from end of _points set.
+ // Check can be a bit costly if we have lots of exact points near borders,
+ // so we'll do this every once and awhile.
+ void processExtraPoints(){
+
+ if( _points.size() == 0 ) return;
+
+ int prevSize = _points.size();
+
+ // Erase all points from the set with a position >= _max *and*
+ // whose distance isn't close to the _max - 1 position distance
+
+ int numToErase = _points.size() - _max;
+ if( numToErase < 0 ) numToErase = 0;
+
+ // Get the first point definitely in the _points array
+ Holder::iterator startErase = _points.end();
+ for( int i = 0; i < numToErase + 1; i++ ) startErase--;
+ _farthest = startErase->distance() + 2 * _distError;
+
+ GEODEBUG( "\t\tPotentially erasing " << numToErase << " points, " << " size : " << _points.size() << " max : " << _max << " dist : " << startErase->distance() << " farthest dist : " << _farthest << " from error : " << _distError );
+
+ startErase++;
+ while( numToErase > 0 && startErase->distance() <= _farthest ){
+ GEODEBUG( "\t\tNot erasing point " << startErase->toString() );
+ numToErase--;
+ startErase++;
+ assert( startErase != _points.end() || numToErase == 0 );
+ }
+
+ if( _uniqueDocs ){
+ for( Holder::iterator i = startErase; i != _points.end(); ++i )
+ _seenPts.erase( i->loc() );
+ }
+
+ _points.erase( startErase, _points.end() );
+
+ int diff = _points.size() - prevSize;
+ if( diff > 0 ) _found += diff;
+ else _found -= -diff;
+
+ }
+
+ unsigned _max;
+ Point _near;
+ Holder _points;
+ double _maxDistance;
+ GeoDistType _type;
+ double _distError;
+ double _farthest;
+
+ // Safe to use currently since we don't yield in $near searches. If we do start to yield, we may need to
+ // replace dirtied disklocs in our holder / ensure our logic is correct.
+ map< DiskLoc , Holder::iterator > _seenPts;
+
+ };
+
+
+
+ class GeoSearch : public GeoHopper {
+ public:
+ GeoSearch( const Geo2dType * g , const Point& startPt , int numWanted=100 , BSONObj filter=BSONObj() , double maxDistance = numeric_limits<double>::max() , GeoDistType type=GEO_PLAIN, bool uniqueDocs = false, bool needDistance = false )
+ : GeoHopper( g , numWanted , startPt , filter , maxDistance, type, uniqueDocs, needDistance ),
+ _start( g->hash( startPt._x, startPt._y ) ),
+ // TODO: Remove numWanted...
+ _numWanted( numWanted ),
+ _type(type)
+ {
+
+ assert( g->getDetails() );
+ _nscanned = 0;
+ _found = 0;
+
+ if( _maxDistance < 0 ){
+ _scanDistance = numeric_limits<double>::max();
+ }
+ else if (type == GEO_PLAIN) {
+ _scanDistance = maxDistance + _spec->_error;
+ }
+ else if (type == GEO_SPHERE) {
+ checkEarthBounds( startPt );
+ // TODO: consider splitting into x and y scan distances
+ _scanDistance = computeXScanDistance( startPt._y, rad2deg( _maxDistance ) + _spec->_error );
+ }
+
+ assert( _scanDistance > 0 );
+
+ }
+
+
+ /** Check if we've already looked at a key. ALSO marks as seen, anticipating a follow-up call
+ to add(). This is broken out to avoid some work extracting the key bson if it's an
+ already seen point.
+ */
+ private:
+ set< pair<DiskLoc,int> > _seen;
+ public:
+
+ void exec() {
+
+ if( _numWanted == 0 ) return;
+
+ /*
+ * Search algorithm
+ * 1) use geohash prefix to find X items
+ * 2) compute max distance from want to an item
+ * 3) find optimal set of boxes that complete circle
+ * 4) use regular btree cursors to scan those boxes
+ */
+
+#ifdef GEODEBUGGING
+
+ log() << "start near search for " << _numWanted << " points near " << _near << " (max dist " << _maxDistance << ")" << endl;
+
+#endif
+
+ // Part 1
+ {
+ do {
+ long long f = found();
+ assert( f <= 0x7fffffff );
+ fillStack( maxPointsHeuristic, _numWanted - static_cast<int>(f) , true );
+ processExtraPoints();
+ } while( _state != DONE && _state != DONE_NEIGHBOR &&
+ found() < _numWanted &&
+ (! _prefix.constrains() || _g->sizeEdge( _prefix ) <= _scanDistance ) );
+
+ // If we couldn't scan or scanned everything, we're done
+ if( _state == DONE ){
+ expandEndPoints();
+ return;
+ }
+ }
+
+#ifdef GEODEBUGGING
+
+ log() << "part 1 of near search completed, found " << found() << " points (out of " << _foundInExp << " scanned)"
+ << " in expanded region " << _prefix << " @ " << Box( _g, _prefix )
+ << " with furthest distance " << farthest() << endl;
+
+#endif
+
+ // Part 2
+ {
+
+ // Find farthest distance for completion scan
+ double farDist = farthest();
+ if( found() < _numWanted ) {
+ // Not enough found in Phase 1
+ farDist = _scanDistance;
+ }
+ else if ( _type == GEO_PLAIN ) {
+ // Enough found, but need to search neighbor boxes
+ farDist += _spec->_error;
+ }
+ else if ( _type == GEO_SPHERE ) {
+ // Enough found, but need to search neighbor boxes
+ farDist = std::min( _scanDistance, computeXScanDistance( _near._y, rad2deg( farDist ) ) + 2 * _spec->_error );
+ }
+ assert( farDist >= 0 );
+ GEODEBUGPRINT( farDist );
+
+ // Find the box that includes all the points we need to return
+ _want = Box( _near._x - farDist , _near._y - farDist , farDist * 2 );
+ GEODEBUGPRINT( _want.toString() );
+
+ // log() << "Found : " << found() << " wanted : " << _numWanted << " Far distance : " << farDist << " box : " << _want << endl;
+
+ // Remember the far distance for further scans
+ _scanDistance = farDist;
+
+ // Reset the search, our distances have probably changed
+ if( _state == DONE_NEIGHBOR ){
+ _state = DOING_EXPAND;
+ _neighbor = -1;
+ }
+
+#ifdef GEODEBUGGING
+
+ log() << "resetting search with start at " << _start << " (edge length " << _g->sizeEdge( _start ) << ")" << endl;
+
+#endif
+
+ // Do regular search in the full region
+ do {
+ fillStack( maxPointsHeuristic );
+ processExtraPoints();
+ }
+ while( _state != DONE );
+
+ }
+
+ GEODEBUG( "done near search with " << _points.size() << " points " );
+
+ expandEndPoints();
+
+ }
+
+ void addExactPoints( const GeoPoint& pt, Holder& points, bool force ){
+ int before, after;
+ addExactPoints( pt, points, before, after, force );
+ }
+
+ void addExactPoints( const GeoPoint& pt, Holder& points, int& before, int& after, bool force ){
+
+ before = 0;
+ after = 0;
+
+ GEODEBUG( "Adding exact points for " << pt.toString() );
+
+ if( pt.isExact() ){
+ if( force ) points.insert( pt );
+ return;
+ }
+
+ vector<BSONObj> locs;
+ getPointsFor( pt.key(), pt.obj(), locs, _uniqueDocs );
+
+ GeoPoint nearestPt( pt, -1, true );
+
+ for( vector<BSONObj>::iterator i = locs.begin(); i != locs.end(); i++ ){
+
+ Point loc( *i );
+
+ double d;
+ if( ! exactDocCheck( loc, d ) ) continue;
+
+ if( _uniqueDocs && ( nearestPt.distance() < 0 || d < nearestPt.distance() ) ){
+ nearestPt._distance = d;
+ nearestPt._pt = *i;
+ continue;
+ }
+ else if( ! _uniqueDocs ){
+ GeoPoint exactPt( pt, d, true );
+ exactPt._pt = *i;
+ GEODEBUG( "Inserting exact pt " << exactPt.toString() << " for " << pt.toString() << " exact : " << d << " is less? " << ( exactPt < pt ) << " bits : " << _g->_bits );
+ points.insert( exactPt );
+ exactPt < pt ? before++ : after++;
+ }
+
+ }
+
+ if( _uniqueDocs && nearestPt.distance() >= 0 ){
+ GEODEBUG( "Inserting unique exact pt " << nearestPt.toString() << " for " << pt.toString() << " exact : " << nearestPt.distance() << " is less? " << ( nearestPt < pt ) << " bits : " << _g->_bits );
+ points.insert( nearestPt );
+ if( nearestPt < pt ) before++;
+ else after++;
+ }
+
+ }
+
+ // TODO: Refactor this back into holder class, allow to run periodically when we are seeing a lot of pts
+ void expandEndPoints( bool finish = true ){
+
+ processExtraPoints();
+
+ // All points in array *could* be in maxDistance
+
+ // Step 1 : Trim points to max size
+ // TODO: This check will do little for now, but is skeleton for future work in incremental $near
+ // searches
+ if( _max > 0 ){
+
+ int numToErase = _points.size() - _max;
+
+ if( numToErase > 0 ){
+
+ Holder tested;
+
+ // Work backward through all points we're not sure belong in the set
+ Holder::iterator maybePointIt = _points.end();
+ maybePointIt--;
+ double approxMin = maybePointIt->distance() - 2 * _distError;
+
+ GEODEBUG( "\t\tNeed to erase " << numToErase << " max : " << _max << " min dist " << approxMin << " error : " << _distError << " starting from : " << (*maybePointIt).toString() );
+
+ // Insert all
+ int erased = 0;
+ while( _points.size() > 0 && ( maybePointIt->distance() >= approxMin || erased < numToErase ) ){
+
+ Holder::iterator current = maybePointIt--;
+
+ addExactPoints( *current, tested, true );
+ _points.erase( current );
+ erased++;
+
+ if( tested.size() )
+ approxMin = tested.begin()->distance() - 2 * _distError;
+
+ }
+
+ GEODEBUG( "\t\tEnding search at point " << ( _points.size() == 0 ? "(beginning)" : maybePointIt->toString() ) );
+
+ int numToAddBack = erased - numToErase;
+ assert( numToAddBack >= 0 );
+
+ GEODEBUG( "\t\tNum tested valid : " << tested.size() << " erased : " << erased << " added back : " << numToAddBack );
+
+#ifdef GEODEBUGGING
+ for( Holder::iterator it = tested.begin(); it != tested.end(); it++ ){
+ log() << "Tested Point: " << *it << endl;
+ }
+#endif
+ Holder::iterator testedIt = tested.begin();
+ for( int i = 0; i < numToAddBack && testedIt != tested.end(); i++ ){
+ _points.insert( *testedIt );
+ testedIt++;
+ }
+ }
+ }
+
+#ifdef GEODEBUGGING
+ for( Holder::iterator it = _points.begin(); it != _points.end(); it++ ){
+ log() << "Point: " << *it << endl;
+ }
+#endif
+ // We've now trimmed first set of unneeded points
+
+ GEODEBUG( "\t\t Start expanding, num points : " << _points.size() << " max : " << _max );
+
+ // Step 2: iterate through all points and add as needed
+
+ unsigned expandedPoints = 0;
+ Holder::iterator it = _points.begin();
+ double expandWindowEnd = -1;
+ while( it != _points.end() ){
+ const GeoPoint& currPt = *it;
+
+ // TODO: If one point is exact, maybe not 2 * _distError
+
+ // See if we're in an expand window
+ bool inWindow = currPt.distance() <= expandWindowEnd;
+ // If we're not, and we're done with points, break
+ if( ! inWindow && expandedPoints >= _max ) break;
+
+ bool expandApprox = ! currPt.isExact() && ( ! _uniqueDocs || ( finish && _needDistance ) || inWindow );
+
+ if( expandApprox ){
+
+ // Add new point(s)
+ // These will only be added in a radius of 2 * _distError around the current point,
+ // so should not affect previously valid points.
+ int before, after;
+ addExactPoints( currPt, _points, before, after, false );
+ expandedPoints += before;
+
+ if( _max > 0 && expandedPoints < _max )
+ expandWindowEnd = currPt.distance() + 2 * _distError;
+
+ // Iterate to the next point
+ Holder::iterator current = it++;
+ // Erase the current point
+ _points.erase( current );
+
+ }
+ else{
+ expandedPoints++;
+ it++;
+ }
+ }
+
+ GEODEBUG( "\t\tFinished expanding, num points : " << _points.size() << " max : " << _max );
+
+ // Finish
+ // TODO: Don't really need to trim?
+ for( ; expandedPoints > _max; expandedPoints-- ) it--;
+ _points.erase( it, _points.end() );
+
+#ifdef GEODEBUGGING
+ for( Holder::iterator it = _points.begin(); it != _points.end(); it++ ){
+ log() << "Point: " << *it << endl;
+ }
+#endif
+ }
+
+ virtual GeoHash expandStartHash(){
+ return _start;
+ }
+
+ // Whether the current box width is big enough for our search area
+ virtual bool fitsInBox( double width ){
+ return width >= _scanDistance;
+ }
+
+ // Whether the current box overlaps our search area
+ virtual double intersectsBox( Box& cur ){
+ return cur.intersects( _want );
+ }
+
+ GeoHash _start;
+ int _numWanted;
+ double _scanDistance;
+
+ long long _nscanned;
+ int _found;
+ GeoDistType _type;
+
+ Box _want;
+ };
+
+ class GeoSearchCursor : public GeoCursorBase {
+ public:
+
+ GeoSearchCursor( shared_ptr<GeoSearch> s )
+ : GeoCursorBase( s->_spec ) ,
+ _s( s ) , _cur( s->_points.begin() ) , _end( s->_points.end() ), _nscanned() {
+ if ( _cur != _end ) {
+ ++_nscanned;
+ }
+ }
+
+ virtual ~GeoSearchCursor() {}
+
+ virtual bool ok() {
+ return _cur != _end;
+ }
+
+ virtual Record* _current() { assert(ok()); return _cur->_loc.rec(); }
+ virtual BSONObj current() { assert(ok()); return _cur->_o; }
+ virtual DiskLoc currLoc() { assert(ok()); return _cur->_loc; }
+ virtual bool advance() {
+ if( ok() ){
+ _cur++;
+ incNscanned();
+ return ok();
+ }
+ return false;
+ }
+ virtual BSONObj currKey() const { return _cur->_key; }
+
+ virtual string toString() {
+ return "GeoSearchCursor";
+ }
+
+
+ virtual BSONObj prettyStartKey() const {
+ return BSON( _s->_g->_geo << _s->_prefix.toString() );
+ }
+ virtual BSONObj prettyEndKey() const {
+ GeoHash temp = _s->_prefix;
+ temp.move( 1 , 1 );
+ return BSON( _s->_g->_geo << temp.toString() );
+ }
+
+ virtual long long nscanned() { return _nscanned; }
+
+ virtual CoveredIndexMatcher* matcher() const {
+ if( _s->_matcher.get() ) return _s->_matcher.get();
+ else return emptyMatcher.get();
+ }
+
+ virtual shared_ptr< CoveredIndexMatcher > matcherPtr() const {
+ if( _s->_matcher.get() ) return _s->_matcher;
+ else return emptyMatcher;
+ }
+
+ shared_ptr<GeoSearch> _s;
+ GeoHopper::Holder::iterator _cur;
+ GeoHopper::Holder::iterator _end;
+
+ void incNscanned() { if ( ok() ) { ++_nscanned; } }
+ long long _nscanned;
+ };
+
+ class GeoCircleBrowse : public GeoBrowse {
+ public:
+
+ GeoCircleBrowse( const Geo2dType * g , const BSONObj& circle , BSONObj filter = BSONObj() , const string& type="$center", bool uniqueDocs = true )
+ : GeoBrowse( g , "circle" , filter, uniqueDocs ) {
+
+ uassert( 13060 , "$center needs 2 fields (middle,max distance)" , circle.nFields() == 2 );
+
+ BSONObjIterator i(circle);
+ BSONElement center = i.next();
+
+ uassert( 13656 , "the first field of $center object must be a location object" , center.isABSONObj() );
+
+ // Get geohash and exact center point
+ // TODO: For wrapping search, may be useful to allow center points outside-of-bounds here.
+ // Calculating the nearest point as a hash start inside the region would then be required.
+ _start = g->_tohash(center);
+ _startPt = Point(center);
+
+ _maxDistance = i.next().numberDouble();
+ uassert( 13061 , "need a max distance >= 0 " , _maxDistance >= 0 );
+
+ if (type == "$center") {
+ // Look in box with bounds of maxDistance in either direction
+ _type = GEO_PLAIN;
+ _xScanDistance = _maxDistance + _g->_error;
+ _yScanDistance = _maxDistance + _g->_error;
+ }
+ else if (type == "$centerSphere") {
+ // Same, but compute maxDistance using spherical transform
+
+ uassert(13461, "Spherical MaxDistance > PI. Are you sure you are using radians?", _maxDistance < M_PI);
+ checkEarthBounds( _startPt );
+
+ _type = GEO_SPHERE;
+ _yScanDistance = rad2deg( _maxDistance ) + _g->_error;
+ _xScanDistance = computeXScanDistance(_startPt._y, _yScanDistance);
+
+ uassert(13462, "Spherical distance would require wrapping, which isn't implemented yet",
+ (_startPt._x + _xScanDistance < 180) && (_startPt._x - _xScanDistance > -180) &&
+ (_startPt._y + _yScanDistance < 90) && (_startPt._y - _yScanDistance > -90));
+ }
+ else {
+ uassert(13460, "invalid $center query type: " + type, false);
+ }
+
+ // Bounding box includes fudge factor.
+ // TODO: Is this correct, since fudge factor may be spherically transformed?
+ _bBox._min = Point( _startPt._x - _xScanDistance, _startPt._y - _yScanDistance );
+ _bBox._max = Point( _startPt._x + _xScanDistance, _startPt._y + _yScanDistance );
+
+ GEODEBUG( "Bounding box for circle query : " << _bBox.toString() << " (max distance : " << _maxDistance << ")" << " starting from " << _startPt.toString() );
+
+ ok();
+ }
+
+ virtual GeoHash expandStartHash() {
+ return _start;
+ }
+
+ virtual bool fitsInBox( double width ) {
+ return width >= std::max(_xScanDistance, _yScanDistance);
+ }
+
+ virtual double intersectsBox( Box& cur ) {
+ return cur.intersects( _bBox );
+ }
+
+ virtual KeyResult approxKeyCheck( const Point& p, double& d ) {
+
+ // Inexact hash distance checks.
+ double error = 0;
+ switch (_type) {
+ case GEO_PLAIN:
+ d = _startPt.distance( p );
+ error = _g->_error;
+ break;
+ case GEO_SPHERE: {
+ checkEarthBounds( p );
+ d = spheredist_deg( _startPt, p );
+ error = _g->_errorSphere;
+ break;
+ }
+ default: assert( false );
+ }
+
+ // If our distance is in the error bounds...
+ if( d >= _maxDistance - error && d <= _maxDistance + error ) return BORDER;
+ return d > _maxDistance ? BAD : GOOD;
+ }
+
+ virtual bool exactDocCheck( const Point& p, double& d ){
+
+ switch (_type) {
+ case GEO_PLAIN: {
+ if( _startPt.distanceWithin( p, _maxDistance ) ) return true;
+ break;
+ }
+ case GEO_SPHERE:
+ checkEarthBounds( p );
+ if( spheredist_deg( _startPt , p ) <= _maxDistance ) return true;
+ break;
+ default: assert( false );
+ }
+
+ return false;
+ }
+
+ GeoDistType _type;
+ GeoHash _start;
+ Point _startPt;
+ double _maxDistance; // user input
+ double _xScanDistance; // effected by GeoDistType
+ double _yScanDistance; // effected by GeoDistType
+ Box _bBox;
+
+ };
+
+ class GeoBoxBrowse : public GeoBrowse {
+ public:
+
+ GeoBoxBrowse( const Geo2dType * g , const BSONObj& box , BSONObj filter = BSONObj(), bool uniqueDocs = true )
+ : GeoBrowse( g , "box" , filter, uniqueDocs ) {
+
+ uassert( 13063 , "$box needs 2 fields (bottomLeft,topRight)" , box.nFields() == 2 );
+
+ // Initialize an *exact* box from the given obj.
+ BSONObjIterator i(box);
+ _want._min = Point( i.next() );
+ _want._max = Point( i.next() );
+
+ _wantRegion = _want;
+ _wantRegion.fudge( g ); // Need to make sure we're checking regions within error bounds of where we want
+ fixBox( g, _wantRegion );
+ fixBox( g, _want );
+
+ uassert( 13064 , "need an area > 0 " , _want.area() > 0 );
+
+ Point center = _want.center();
+ _start = _g->hash( center._x , center._y );
+
+ GEODEBUG( "center : " << center.toString() << "\t" << _prefix );
+
+ _fudge = _g->_error;
+ _wantLen = _fudge +
+ std::max( ( _want._max._x - _want._min._x ) ,
+ ( _want._max._y - _want._min._y ) ) / 2;
+
+ ok();
+ }
+
+ void fixBox( const Geo2dType* g, Box& box ) {
+ if( box._min._x > box._max._x )
+ swap( box._min._x, box._max._x );
+ if( box._min._y > box._max._y )
+ swap( box._min._y, box._max._y );
+
+ double gMin = g->_min;
+ double gMax = g->_max;
+
+ if( box._min._x < gMin ) box._min._x = gMin;
+ if( box._min._y < gMin ) box._min._y = gMin;
+ if( box._max._x > gMax) box._max._x = gMax;
+ if( box._max._y > gMax ) box._max._y = gMax;
+ }
+
+ void swap( double& a, double& b ) {
+ double swap = a;
+ a = b;
+ b = swap;
+ }
+
+ virtual GeoHash expandStartHash() {
+ return _start;
+ }
+
+ virtual bool fitsInBox( double width ) {
+ return width >= _wantLen;
+ }
+
+ virtual double intersectsBox( Box& cur ) {
+ return cur.intersects( _wantRegion );
+ }
+
+ virtual KeyResult approxKeyCheck( const Point& p, double& d ) {
+ if( _want.onBoundary( p, _fudge ) ) return BORDER;
+ else return _want.inside( p, _fudge ) ? GOOD : BAD;
+
+ }
+
+ virtual bool exactDocCheck( const Point& p, double& d ){
+ return _want.inside( p );
+ }
+
+ Box _want;
+ Box _wantRegion;
+ double _wantLen;
+ double _fudge;
+
+ GeoHash _start;
+
+ };
+
+ class GeoPolygonBrowse : public GeoBrowse {
+ public:
+
+ GeoPolygonBrowse( const Geo2dType* g , const BSONObj& polyPoints ,
+ BSONObj filter = BSONObj(), bool uniqueDocs = true ) : GeoBrowse( g , "polygon" , filter, uniqueDocs ) {
+
+ GEODEBUG( "In Polygon" )
+
+ BSONObjIterator i( polyPoints );
+ BSONElement first = i.next();
+ _poly.add( Point( first ) );
+
+ while ( i.more() ) {
+ _poly.add( Point( i.next() ) );
+ }
+
+ uassert( 14030, "polygon must be defined by three points or more", _poly.size() >= 3 );
+
+ _bounds = _poly.bounds();
+ _bounds.fudge( g ); // We need to check regions within the error bounds of these bounds
+ _bounds.truncate( g ); // We don't need to look anywhere outside the space
+
+ _maxDim = _g->_error + _bounds.maxDim() / 2;
+
+ ok();
+ }
+
+ // The initial geo hash box for our first expansion
+ virtual GeoHash expandStartHash() {
+ return _g->hash( _bounds.center() );
+ }
+
+ // Whether the current box width is big enough for our search area
+ virtual bool fitsInBox( double width ) {
+ return _maxDim <= width;
+ }
+
+ // Whether the current box overlaps our search area
+ virtual double intersectsBox( Box& cur ) {
+ return cur.intersects( _bounds );
+ }
+
+ virtual KeyResult approxKeyCheck( const Point& p, double& d ) {
+
+ int in = _poly.contains( p, _g->_error );
+
+ if( in == 0 ) return BORDER;
+ else return in > 0 ? GOOD : BAD;
+
+ }
+
+ virtual bool exactDocCheck( const Point& p, double& d ){
+ return _poly.contains( p );
+ }
+
+ private:
+
+ Polygon _poly;
+ Box _bounds;
+ double _maxDim;
+
+ GeoHash _start;
+ };
+
+ shared_ptr<Cursor> Geo2dType::newCursor( const BSONObj& query , const BSONObj& order , int numWanted ) const {
+ if ( numWanted < 0 )
+ numWanted = numWanted * -1;
+ else if ( numWanted == 0 )
+ numWanted = 100;
+
+ BSONObjIterator i(query);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+
+ if ( _geo != e.fieldName() )
+ continue;
+
+ if ( e.type() == Array ) {
+ // If we get an array query, assume it is a location, and do a $within { $center : [[x, y], 0] } search
+ shared_ptr<Cursor> c( new GeoCircleBrowse( this , BSON( "0" << e.embeddedObjectUserCheck() << "1" << 0 ), query.filterFieldsUndotted( BSON( _geo << "" ), false ), "$center", true ) );
+ return c;
+ }
+ else if ( e.type() == Object ) {
+
+ // TODO: Filter out _geo : { $special... } field so it doesn't get matched accidentally,
+ // if matcher changes
+
+ switch ( e.embeddedObject().firstElement().getGtLtOp() ) {
+ case BSONObj::opNEAR: {
+ BSONObj n = e.embeddedObject();
+ e = n.firstElement();
+
+ const char* suffix = e.fieldName() + 5; // strlen("$near") == 5;
+ GeoDistType type;
+ if (suffix[0] == '\0') {
+ type = GEO_PLAIN;
+ }
+ else if (strcmp(suffix, "Sphere") == 0) {
+ type = GEO_SPHERE;
+ }
+ else {
+ uassert(13464, string("invalid $near search type: ") + e.fieldName(), false);
+ type = GEO_PLAIN; // prevents uninitialized warning
+ }
+
+ double maxDistance = numeric_limits<double>::max();
+ if ( e.isABSONObj() && e.embeddedObject().nFields() > 2 ) {
+ BSONObjIterator i(e.embeddedObject());
+ i.next();
+ i.next();
+ BSONElement e = i.next();
+ if ( e.isNumber() )
+ maxDistance = e.numberDouble();
+ }
+ {
+ BSONElement e = n["$maxDistance"];
+ if ( e.isNumber() )
+ maxDistance = e.numberDouble();
+ }
+
+ bool uniqueDocs = false;
+ if( ! n["$uniqueDocs"].eoo() ) uniqueDocs = n["$uniqueDocs"].trueValue();
+
+ shared_ptr<GeoSearch> s( new GeoSearch( this , Point( e ) , numWanted , query , maxDistance, type, uniqueDocs ) );
+ s->exec();
+ shared_ptr<Cursor> c;
+ c.reset( new GeoSearchCursor( s ) );
+ return c;
+ }
+ case BSONObj::opWITHIN: {
+
+ e = e.embeddedObject().firstElement();
+ uassert( 13057 , "$within has to take an object or array" , e.isABSONObj() );
+
+ BSONObj context = e.embeddedObject();
+ e = e.embeddedObject().firstElement();
+ string type = e.fieldName();
+
+ bool uniqueDocs = true;
+ if( ! context["$uniqueDocs"].eoo() ) uniqueDocs = context["$uniqueDocs"].trueValue();
+
+ if ( startsWith(type, "$center") ) {
+ uassert( 13059 , "$center has to take an object or array" , e.isABSONObj() );
+ shared_ptr<Cursor> c( new GeoCircleBrowse( this , e.embeddedObjectUserCheck() , query , type, uniqueDocs ) );
+ return c;
+ }
+ else if ( type == "$box" ) {
+ uassert( 13065 , "$box has to take an object or array" , e.isABSONObj() );
+ shared_ptr<Cursor> c( new GeoBoxBrowse( this , e.embeddedObjectUserCheck() , query, uniqueDocs ) );
+ return c;
+ }
+ else if ( startsWith( type, "$poly" ) ) {
+ uassert( 14029 , "$polygon has to take an object or array" , e.isABSONObj() );
+ shared_ptr<Cursor> c( new GeoPolygonBrowse( this , e.embeddedObjectUserCheck() , query, uniqueDocs ) );
+ return c;
+ }
+ throw UserException( 13058 , str::stream() << "unknown $within information : " << context << ", a shape must be specified." );
+ }
+ default:
+ // Otherwise... assume the object defines a point, and we want to do a zero-radius $within $center
+ shared_ptr<Cursor> c( new GeoCircleBrowse( this , BSON( "0" << e.embeddedObjectUserCheck() << "1" << 0 ), query.filterFieldsUndotted( BSON( _geo << "" ), false ) ) );
+ return c;
+ }
+ }
+ }
+
+ throw UserException( 13042 , (string)"missing geo field (" + _geo + ") in : " + query.toString() );
+ }
+
+ // ------
+ // commands
+ // ------
+
+ class Geo2dFindNearCmd : public Command {
+ public:
+ Geo2dFindNearCmd() : Command( "geoNear" ) {}
+ virtual LockType locktype() const { return READ; }
+ bool slaveOk() const { return true; }
+ void help(stringstream& h) const { h << "http://www.mongodb.org/display/DOCS/Geospatial+Indexing#GeospatialIndexing-geoNearCommand"; }
+ bool slaveOverrideOk() { return true; }
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ string ns = dbname + "." + cmdObj.firstElement().valuestr();
+
+ NamespaceDetails * d = nsdetails( ns.c_str() );
+ if ( ! d ) {
+ errmsg = "can't find ns";
+ return false;
+ }
+
+ vector<int> idxs;
+ d->findIndexByType( GEO2DNAME , idxs );
+
+ if ( idxs.size() > 1 ) {
+ errmsg = "more than 1 geo indexes :(";
+ return false;
+ }
+
+ if ( idxs.size() == 0 ) {
+ errmsg = "no geo index :(";
+ return false;
+ }
+
+ int geoIdx = idxs[0];
+
+ result.append( "ns" , ns );
+
+ IndexDetails& id = d->idx( geoIdx );
+ Geo2dType * g = (Geo2dType*)id.getSpec().getType();
+ assert( &id == g->getDetails() );
+
+ int numWanted = 100;
+ if ( cmdObj["num"].isNumber() ) {
+ numWanted = cmdObj["num"].numberInt();
+ assert( numWanted >= 0 );
+ }
+
+ bool uniqueDocs = false;
+ if( ! cmdObj["uniqueDocs"].eoo() ) uniqueDocs = cmdObj["uniqueDocs"].trueValue();
+
+ bool includeLocs = false;
+ if( ! cmdObj["includeLocs"].eoo() ) includeLocs = cmdObj["includeLocs"].trueValue();
+
+ uassert(13046, "'near' param missing/invalid", !cmdObj["near"].eoo());
+ const Point n( cmdObj["near"] );
+ result.append( "near" , g->_tohash( cmdObj["near"] ).toString() );
+
+ BSONObj filter;
+ if ( cmdObj["query"].type() == Object )
+ filter = cmdObj["query"].embeddedObject();
+
+ double maxDistance = numeric_limits<double>::max();
+ if ( cmdObj["maxDistance"].isNumber() )
+ maxDistance = cmdObj["maxDistance"].number();
+
+ GeoDistType type = GEO_PLAIN;
+ if ( cmdObj["spherical"].trueValue() )
+ type = GEO_SPHERE;
+
+ GeoSearch gs( g , n , numWanted , filter , maxDistance , type, uniqueDocs, true );
+
+ if ( cmdObj["start"].type() == String) {
+ GeoHash start ((string) cmdObj["start"].valuestr());
+ gs._start = start;
+ }
+
+ gs.exec();
+
+ double distanceMultiplier = 1;
+ if ( cmdObj["distanceMultiplier"].isNumber() )
+ distanceMultiplier = cmdObj["distanceMultiplier"].number();
+
+ double totalDistance = 0;
+
+ BSONObjBuilder arr( result.subarrayStart( "results" ) );
+ int x = 0;
+ for ( GeoHopper::Holder::iterator i=gs._points.begin(); i!=gs._points.end(); i++ ) {
+
+ const GeoPoint& p = *i;
+ double dis = distanceMultiplier * p.distance();
+ totalDistance += dis;
+
+ BSONObjBuilder bb( arr.subobjStart( BSONObjBuilder::numStr( x++ ) ) );
+ bb.append( "dis" , dis );
+ if( includeLocs ){
+ if( p._pt.couldBeArray() ) bb.append( "loc", BSONArray( p._pt ) );
+ else bb.append( "loc" , p._pt );
+ }
+ bb.append( "obj" , p._o );
+ bb.done();
+
+ if ( arr.len() > BSONObjMaxUserSize ) {
+ warning() << "Too many results to fit in single document. Truncating..." << endl;
+ break;
+ }
+ }
+ arr.done();
+
+ BSONObjBuilder stats( result.subobjStart( "stats" ) );
+ stats.append( "time" , cc().curop()->elapsedMillis() );
+ stats.appendNumber( "btreelocs" , gs._nscanned );
+ stats.appendNumber( "nscanned" , gs._lookedAt );
+ stats.appendNumber( "objectsLoaded" , gs._objectsLoaded );
+ stats.append( "avgDistance" , totalDistance / x );
+ stats.append( "maxDistance" , gs.farthest() );
+ stats.done();
+
+ return true;
+ }
+
+ } geo2dFindNearCmd;
+
+ class GeoWalkCmd : public Command {
+ public:
+ GeoWalkCmd() : Command( "geoWalk" ) {}
+ virtual LockType locktype() const { return READ; }
+ bool slaveOk() const { return true; }
+ bool slaveOverrideOk() { return true; }
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ string ns = dbname + "." + cmdObj.firstElement().valuestr();
+
+ NamespaceDetails * d = nsdetails( ns.c_str() );
+ if ( ! d ) {
+ errmsg = "can't find ns";
+ return false;
+ }
+
+ int geoIdx = -1;
+ {
+ NamespaceDetails::IndexIterator ii = d->ii();
+ while ( ii.more() ) {
+ IndexDetails& id = ii.next();
+ if ( id.getSpec().getTypeName() == GEO2DNAME ) {
+ if ( geoIdx >= 0 ) {
+ errmsg = "2 geo indexes :(";
+ return false;
+ }
+ geoIdx = ii.pos() - 1;
+ }
+ }
+ }
+
+ if ( geoIdx < 0 ) {
+ errmsg = "no geo index :(";
+ return false;
+ }
+
+
+ IndexDetails& id = d->idx( geoIdx );
+ Geo2dType * g = (Geo2dType*)id.getSpec().getType();
+ assert( &id == g->getDetails() );
+
+ int max = 100000;
+
+ auto_ptr<BtreeCursor> bc( BtreeCursor::make( d , geoIdx , id , BSONObj() , BSONObj() , true , 1 ) );
+ BtreeCursor &c = *bc;
+ while ( c.ok() && max-- ) {
+ GeoHash h( c.currKey().firstElement() );
+ int len;
+ cout << "\t" << h.toString()
+ << "\t" << c.current()[g->_geo]
+ << "\t" << hex << h.getHash()
+ << "\t" << hex << ((long long*)c.currKey().firstElement().binData(len))[0]
+ << "\t" << c.current()["_id"]
+ << endl;
+ c.advance();
+ }
+
+ return true;
+ }
+
+ } geoWalkCmd;
+
+ struct GeoUnitTest : public UnitTest {
+
+ int round( double d ) {
+ return (int)(.5+(d*1000));
+ }
+
+#define GEOHEQ(a,b) if ( a.toString() != b ){ cout << "[" << a.toString() << "] != [" << b << "]" << endl; assert( a == GeoHash(b) ); }
+
+ void run() {
+ assert( ! GeoHash::isBitSet( 0 , 0 ) );
+ assert( ! GeoHash::isBitSet( 0 , 31 ) );
+ assert( GeoHash::isBitSet( 1 , 31 ) );
+
+ IndexSpec i( BSON( "loc" << "2d" ) );
+ Geo2dType g( &geo2dplugin , &i );
+ {
+ double x = 73.01212;
+ double y = 41.352964;
+ BSONObj in = BSON( "x" << x << "y" << y );
+ GeoHash h = g._hash( in );
+ BSONObj out = g._unhash( h );
+ assert( round(x) == round( out["x"].number() ) );
+ assert( round(y) == round( out["y"].number() ) );
+ assert( round( in["x"].number() ) == round( out["x"].number() ) );
+ assert( round( in["y"].number() ) == round( out["y"].number() ) );
+ }
+
+ {
+ double x = -73.01212;
+ double y = 41.352964;
+ BSONObj in = BSON( "x" << x << "y" << y );
+ GeoHash h = g._hash( in );
+ BSONObj out = g._unhash( h );
+ assert( round(x) == round( out["x"].number() ) );
+ assert( round(y) == round( out["y"].number() ) );
+ assert( round( in["x"].number() ) == round( out["x"].number() ) );
+ assert( round( in["y"].number() ) == round( out["y"].number() ) );
+ }
+
+ {
+ GeoHash h( "0000" );
+ h.move( 0 , 1 );
+ GEOHEQ( h , "0001" );
+ h.move( 0 , -1 );
+ GEOHEQ( h , "0000" );
+
+ h.init( "0001" );
+ h.move( 0 , 1 );
+ GEOHEQ( h , "0100" );
+ h.move( 0 , -1 );
+ GEOHEQ( h , "0001" );
+
+
+ h.init( "0000" );
+ h.move( 1 , 0 );
+ GEOHEQ( h , "0010" );
+ }
+
+ {
+ Box b( 5 , 5 , 2 );
+ assert( "(5,5) -->> (7,7)" == b.toString() );
+ }
+
+ {
+ GeoHash a = g.hash( 1 , 1 );
+ GeoHash b = g.hash( 4 , 5 );
+ assert( 5 == (int)(g.distance( a , b ) ) );
+ a = g.hash( 50 , 50 );
+ b = g.hash( 42 , 44 );
+ assert( round(10) == round(g.distance( a , b )) );
+ }
+
+ {
+ GeoHash x("0000");
+ assert( 0 == x.getHash() );
+ x.init( 0 , 1 , 32 );
+ GEOHEQ( x , "0000000000000000000000000000000000000000000000000000000000000001" )
+
+ assert( GeoHash( "1100").hasPrefix( GeoHash( "11" ) ) );
+ assert( ! GeoHash( "1000").hasPrefix( GeoHash( "11" ) ) );
+ }
+
+ {
+ GeoHash x("1010");
+ GEOHEQ( x , "1010" );
+ GeoHash y = x + "01";
+ GEOHEQ( y , "101001" );
+ }
+
+ {
+
+ GeoHash a = g.hash( 5 , 5 );
+ GeoHash b = g.hash( 5 , 7 );
+ GeoHash c = g.hash( 100 , 100 );
+ /*
+ cout << "a: " << a << endl;
+ cout << "b: " << b << endl;
+ cout << "c: " << c << endl;
+
+ cout << "a: " << a.toStringHex1() << endl;
+ cout << "b: " << b.toStringHex1() << endl;
+ cout << "c: " << c.toStringHex1() << endl;
+ */
+ BSONObj oa = a.wrap();
+ BSONObj ob = b.wrap();
+ BSONObj oc = c.wrap();
+ /*
+ cout << "a: " << oa.hexDump() << endl;
+ cout << "b: " << ob.hexDump() << endl;
+ cout << "c: " << oc.hexDump() << endl;
+ */
+ assert( oa.woCompare( ob ) < 0 );
+ assert( oa.woCompare( oc ) < 0 );
+
+ }
+
+ {
+ GeoHash x( "000000" );
+ x.move( -1 , 0 );
+ GEOHEQ( x , "101010" );
+ x.move( 1 , -1 );
+ GEOHEQ( x , "010101" );
+ x.move( 0 , 1 );
+ GEOHEQ( x , "000000" );
+ }
+
+ {
+ GeoHash prefix( "110011000000" );
+ GeoHash entry( "1100110000011100000111000001110000011100000111000001000000000000" );
+ assert( ! entry.hasPrefix( prefix ) );
+
+ entry = GeoHash("1100110000001100000111000001110000011100000111000001000000000000");
+ assert( entry.toString().find( prefix.toString() ) == 0 );
+ assert( entry.hasPrefix( GeoHash( "1100" ) ) );
+ assert( entry.hasPrefix( prefix ) );
+ }
+
+ {
+ GeoHash a = g.hash( 50 , 50 );
+ GeoHash b = g.hash( 48 , 54 );
+ assert( round( 4.47214 ) == round( g.distance( a , b ) ) );
+ }
+
+
+ {
+ Box b( Point( 29.762283 , -95.364271 ) , Point( 29.764283000000002 , -95.36227099999999 ) );
+ assert( b.inside( 29.763 , -95.363 ) );
+ assert( ! b.inside( 32.9570255 , -96.1082497 ) );
+ assert( ! b.inside( 32.9570255 , -96.1082497 , .01 ) );
+ }
+
+ {
+ GeoHash a( "11001111" );
+ assert( GeoHash( "11" ) == a.commonPrefix( GeoHash("11") ) );
+ assert( GeoHash( "11" ) == a.commonPrefix( GeoHash("11110000") ) );
+ }
+
+ {
+ int N = 10000;
+ {
+ Timer t;
+ for ( int i=0; i<N; i++ ) {
+ unsigned x = (unsigned)rand();
+ unsigned y = (unsigned)rand();
+ GeoHash h( x , y );
+ unsigned a,b;
+ h.unhash_slow( a,b );
+ assert( a == x );
+ assert( b == y );
+ }
+ //cout << "slow: " << t.millis() << endl;
+ }
+
+ {
+ Timer t;
+ for ( int i=0; i<N; i++ ) {
+ unsigned x = (unsigned)rand();
+ unsigned y = (unsigned)rand();
+ GeoHash h( x , y );
+ unsigned a,b;
+ h.unhash_fast( a,b );
+ assert( a == x );
+ assert( b == y );
+ }
+ //cout << "fast: " << t.millis() << endl;
+ }
+
+ }
+
+ {
+ // see http://en.wikipedia.org/wiki/Great-circle_distance#Worked_example
+
+ {
+ Point BNA (-86.67, 36.12);
+ Point LAX (-118.40, 33.94);
+
+ double dist1 = spheredist_deg(BNA, LAX);
+ double dist2 = spheredist_deg(LAX, BNA);
+
+ // target is 0.45306
+ assert( 0.45305 <= dist1 && dist1 <= 0.45307 );
+ assert( 0.45305 <= dist2 && dist2 <= 0.45307 );
+ }
+ {
+ Point BNA (-1.5127, 0.6304);
+ Point LAX (-2.0665, 0.5924);
+
+ double dist1 = spheredist_rad(BNA, LAX);
+ double dist2 = spheredist_rad(LAX, BNA);
+
+ // target is 0.45306
+ assert( 0.45305 <= dist1 && dist1 <= 0.45307 );
+ assert( 0.45305 <= dist2 && dist2 <= 0.45307 );
+ }
+ {
+ Point JFK (-73.77694444, 40.63861111 );
+ Point LAX (-118.40, 33.94);
+
+ double dist = spheredist_deg(JFK, LAX) * EARTH_RADIUS_MILES;
+ assert( dist > 2469 && dist < 2470 );
+ }
+
+ {
+ Point BNA (-86.67, 36.12);
+ Point LAX (-118.40, 33.94);
+ Point JFK (-73.77694444, 40.63861111 );
+ assert( spheredist_deg(BNA, BNA) < 1e-6);
+ assert( spheredist_deg(LAX, LAX) < 1e-6);
+ assert( spheredist_deg(JFK, JFK) < 1e-6);
+
+ Point zero (0, 0);
+ Point antizero (0,-180);
+
+ // these were known to cause NaN
+ assert( spheredist_deg(zero, zero) < 1e-6);
+ assert( fabs(M_PI-spheredist_deg(zero, antizero)) < 1e-6);
+ assert( fabs(M_PI-spheredist_deg(antizero, zero)) < 1e-6);
+ }
+ }
+ }
+ } geoUnitTest;
+
+
+}
+
diff --git a/src/mongo/db/geo/core.h b/src/mongo/db/geo/core.h
new file mode 100644
index 00000000000..c49131e0162
--- /dev/null
+++ b/src/mongo/db/geo/core.h
@@ -0,0 +1,550 @@
+// core.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../../pch.h"
+#include "../jsobj.h"
+
+#include <cmath>
+
+#ifndef M_PI
+# define M_PI 3.14159265358979323846
+#endif
+
+namespace mongo {
+
+ class GeoBitSets {
+ public:
+ GeoBitSets() {
+ for ( int i=0; i<32; i++ ) {
+ masks32[i] = ( 1 << ( 31 - i ) );
+ }
+ for ( int i=0; i<64; i++ ) {
+ masks64[i] = ( 1LL << ( 63 - i ) );
+ }
+
+ for ( unsigned i=0; i<16; i++ ) {
+ unsigned fixed = 0;
+ for ( int j=0; j<4; j++ ) {
+ if ( i & ( 1 << j ) )
+ fixed |= ( 1 << ( j * 2 ) );
+ }
+ hashedToNormal[fixed] = i;
+ }
+
+ long long currAllX = 0, currAllY = 0;
+ for ( int i = 0; i < 64; i++ ){
+ if( i % 2 == 0 ){
+ allX[ i / 2 ] = currAllX;
+ currAllX = currAllX + ( 1LL << ( 63 - i ) );
+ }
+ else{
+ allY[ i / 2 ] = currAllY;
+ currAllY = currAllY + ( 1LL << ( 63 - i ) );
+ }
+ }
+ }
+ int masks32[32];
+ long long masks64[64];
+ long long allX[32];
+ long long allY[32];
+
+ unsigned hashedToNormal[256];
+ };
+
+ extern GeoBitSets geoBitSets;
+
+ class GeoHash {
+ public:
+
+ GeoHash()
+ : _hash(0),_bits(0) {
+ }
+
+ explicit GeoHash( const char * hash ) {
+ init( hash );
+ }
+
+ explicit GeoHash( const string& hash ) {
+ init( hash );
+ }
+
+ static GeoHash makeFromBinData(const char *bindata, unsigned bits) {
+ GeoHash h;
+ h._bits = bits;
+ h._copy( (char*)&h._hash , bindata );
+ h._fix();
+ return h;
+ }
+
+ explicit GeoHash( const BSONElement& e , unsigned bits=32 ) {
+ _bits = bits;
+ if ( e.type() == BinData ) {
+ int len = 0;
+ _copy( (char*)&_hash , e.binData( len ) );
+ assert( len == 8 );
+ _bits = bits;
+ }
+ else {
+ cout << "GeoHash bad element: " << e << endl;
+ uassert(13047,"wrong type for geo index. if you're using a pre-release version, need to rebuild index",0);
+ }
+ _fix();
+ }
+
+ GeoHash( unsigned x , unsigned y , unsigned bits=32) {
+ init( x , y , bits );
+ }
+
+ GeoHash( const GeoHash& old ) {
+ _hash = old._hash;
+ _bits = old._bits;
+ }
+
+ GeoHash( long long hash , unsigned bits )
+ : _hash( hash ) , _bits( bits ) {
+ _fix();
+ }
+
+ void init( unsigned x , unsigned y , unsigned bits ) {
+ assert( bits <= 32 );
+ _hash = 0;
+ _bits = bits;
+ for ( unsigned i=0; i<bits; i++ ) {
+ if ( isBitSet( x , i ) ) _hash |= geoBitSets.masks64[i*2];
+ if ( isBitSet( y , i ) ) _hash |= geoBitSets.masks64[(i*2)+1];
+ }
+ }
+
+ void unhash_fast( unsigned& x , unsigned& y ) const {
+ x = 0;
+ y = 0;
+ char * c = (char*)(&_hash);
+ for ( int i=0; i<8; i++ ) {
+ unsigned t = (unsigned)(c[i]) & 0x55;
+ y |= ( geoBitSets.hashedToNormal[t] << (4*(i)) );
+
+ t = ( (unsigned)(c[i]) >> 1 ) & 0x55;
+ x |= ( geoBitSets.hashedToNormal[t] << (4*(i)) );
+ }
+ }
+
+ void unhash_slow( unsigned& x , unsigned& y ) const {
+ x = 0;
+ y = 0;
+ for ( unsigned i=0; i<_bits; i++ ) {
+ if ( getBitX(i) )
+ x |= geoBitSets.masks32[i];
+ if ( getBitY(i) )
+ y |= geoBitSets.masks32[i];
+ }
+ }
+
+ void unhash( unsigned& x , unsigned& y ) const {
+ unhash_fast( x , y );
+ }
+
+ /**
+ * @param 0 = high
+ */
+ static bool isBitSet( unsigned val , unsigned bit ) {
+ return geoBitSets.masks32[bit] & val;
+ }
+
+ GeoHash up() const {
+ return GeoHash( _hash , _bits - 1 );
+ }
+
+ bool hasPrefix( const GeoHash& other ) const {
+ assert( other._bits <= _bits );
+ if ( other._bits == 0 )
+ return true;
+ long long x = other._hash ^ _hash;
+ x = x >> (64-(other._bits*2));
+ return x == 0;
+ }
+
+
+ string toString() const {
+ StringBuilder buf( _bits * 2 );
+ for ( unsigned x=0; x<_bits*2; x++ )
+ buf.append( _hash & geoBitSets.masks64[x] ? "1" : "0" );
+ return buf.str();
+ }
+
+ string toStringHex1() const {
+ stringstream ss;
+ ss << hex << _hash;
+ return ss.str();
+ }
+
+ void init( const string& s ) {
+ _hash = 0;
+ _bits = s.size() / 2;
+ for ( unsigned pos=0; pos<s.size(); pos++ )
+ if ( s[pos] == '1' )
+ setBit( pos , 1 );
+ }
+
+ void setBit( unsigned pos , bool one ) {
+ assert( pos < _bits * 2 );
+ if ( one )
+ _hash |= geoBitSets.masks64[pos];
+ else if ( _hash & geoBitSets.masks64[pos] )
+ _hash &= ~geoBitSets.masks64[pos];
+ }
+
+ bool getBit( unsigned pos ) const {
+ return _hash & geoBitSets.masks64[pos];
+ }
+
+ bool getBitX( unsigned pos ) const {
+ assert( pos < 32 );
+ return getBit( pos * 2 );
+ }
+
+ bool getBitY( unsigned pos ) const {
+ assert( pos < 32 );
+ return getBit( ( pos * 2 ) + 1 );
+ }
+
+ BSONObj wrap( const char* name = "" ) const {
+ BSONObjBuilder b(20);
+ append( b , name );
+ BSONObj o = b.obj();
+ if( ! strlen( name ) ) assert( o.objsize() == 20 );
+ return o;
+ }
+
+ bool constrains() const {
+ return _bits > 0;
+ }
+
+ bool canRefine() const {
+ return _bits < 32;
+ }
+
+ bool atMinX() const {
+ return ( _hash & geoBitSets.allX[ _bits ] ) == 0;
+ }
+
+ bool atMinY() const {
+ //log() << " MinY : " << hex << (unsigned long long) _hash << " " << _bits << " " << hex << (unsigned long long) geoBitSets.allY[ _bits ] << endl;
+ return ( _hash & geoBitSets.allY[ _bits ] ) == 0;
+ }
+
+ bool atMaxX() const {
+ return ( _hash & geoBitSets.allX[ _bits ] ) == geoBitSets.allX[ _bits ];
+ }
+
+ bool atMaxY() const {
+ return ( _hash & geoBitSets.allY[ _bits ] ) == geoBitSets.allY[ _bits ];
+ }
+
+ void move( int x , int y ) {
+ assert( _bits );
+ _move( 0 , x );
+ _move( 1 , y );
+ }
+
+ void _move( unsigned offset , int d ) {
+ if ( d == 0 )
+ return;
+ assert( d <= 1 && d>= -1 ); // TEMP
+
+ bool from, to;
+ if ( d > 0 ) {
+ from = 0;
+ to = 1;
+ }
+ else {
+ from = 1;
+ to = 0;
+ }
+
+ unsigned pos = ( _bits * 2 ) - 1;
+ if ( offset == 0 )
+ pos--;
+ while ( true ) {
+ if ( getBit(pos) == from ) {
+ setBit( pos , to );
+ return;
+ }
+
+ if ( pos < 2 ) {
+ // overflow
+ for ( ; pos < ( _bits * 2 ) ; pos += 2 ) {
+ setBit( pos , from );
+ }
+ return;
+ }
+
+ setBit( pos , from );
+ pos -= 2;
+ }
+
+ assert(0);
+ }
+
+ GeoHash& operator=(const GeoHash& h) {
+ _hash = h._hash;
+ _bits = h._bits;
+ return *this;
+ }
+
+ bool operator==(const GeoHash& h ) const {
+ return _hash == h._hash && _bits == h._bits;
+ }
+
+ bool operator!=(const GeoHash& h ) const {
+ return !( *this == h );
+ }
+
+ bool operator<(const GeoHash& h ) const {
+ if( _hash != h._hash ) return _hash < h._hash;
+ return _bits < h._bits;
+ }
+
+ GeoHash& operator+=( const char * s ) {
+ unsigned pos = _bits * 2;
+ _bits += strlen(s) / 2;
+ assert( _bits <= 32 );
+ while ( s[0] ) {
+ if ( s[0] == '1' )
+ setBit( pos , 1 );
+ pos++;
+ s++;
+ }
+
+ return *this;
+ }
+
+ GeoHash operator+( const char * s ) const {
+ GeoHash n = *this;
+ n+=s;
+ return n;
+ }
+
+ GeoHash operator+( string s ) const {
+ return operator+( s.c_str() );
+ }
+
+ void _fix() {
+ static long long FULL = 0xFFFFFFFFFFFFFFFFLL;
+ long long mask = FULL << ( 64 - ( _bits * 2 ) );
+ _hash &= mask;
+ }
+
+ void append( BSONObjBuilder& b , const char * name ) const {
+ char buf[8];
+ _copy( buf , (char*)&_hash );
+ b.appendBinData( name , 8 , bdtCustom , buf );
+ }
+
+ long long getHash() const {
+ return _hash;
+ }
+
+ unsigned getBits() const {
+ return _bits;
+ }
+
+ GeoHash commonPrefix( const GeoHash& other ) const {
+ unsigned i=0;
+ for ( ; i<_bits && i<other._bits; i++ ) {
+ if ( getBitX( i ) == other.getBitX( i ) &&
+ getBitY( i ) == other.getBitY( i ) )
+ continue;
+ break;
+ }
+ return GeoHash(_hash,i);
+ }
+
+ private:
+
+ static void _copy( char * dst , const char * src ) {
+ for ( unsigned a=0; a<8; a++ ) {
+ dst[a] = src[7-a];
+ }
+ }
+
+ long long _hash;
+ unsigned _bits; // bits per field, so 1 to 32
+ };
+
+ inline ostream& operator<<( ostream &s, const GeoHash &h ) {
+ s << h.toString();
+ return s;
+ }
+
+ class GeoConvert {
+ public:
+ virtual ~GeoConvert() {}
+
+ virtual void unhash( const GeoHash& h , double& x , double& y ) const = 0;
+ virtual GeoHash hash( double x , double y ) const = 0;
+ };
+
+ class Point {
+ public:
+
+ Point( const GeoConvert * g , const GeoHash& hash ) {
+ g->unhash( hash , _x , _y );
+ }
+
+ explicit Point( const BSONElement& e ) {
+ BSONObjIterator i(e.Obj());
+ _x = i.next().number();
+ _y = i.next().number();
+ }
+
+ explicit Point( const BSONObj& o ) {
+ BSONObjIterator i(o);
+ _x = i.next().number();
+ _y = i.next().number();
+ }
+
+ Point( double x , double y )
+ : _x( x ) , _y( y ) {
+ }
+
+ Point() : _x(0),_y(0) {
+ }
+
+ GeoHash hash( const GeoConvert * g ) {
+ return g->hash( _x , _y );
+ }
+
+ double distance( const Point& p ) const {
+ double a = _x - p._x;
+ double b = _y - p._y;
+
+ // Avoid numerical error if possible...
+ if( a == 0 ) return abs( _y - p._y );
+ if( b == 0 ) return abs( _x - p._x );
+
+ return sqrt( ( a * a ) + ( b * b ) );
+ }
+
+ /**
+ * Distance method that compares x or y coords when other direction is zero,
+ * avoids numerical error when distances are very close to radius but axis-aligned.
+ *
+ * An example of the problem is:
+ * (52.0 - 51.9999) - 0.0001 = 3.31965e-15 and 52.0 - 51.9999 > 0.0001 in double arithmetic
+ * but:
+ * 51.9999 + 0.0001 <= 52.0
+ *
+ * This avoids some (but not all!) suprising results in $center queries where points are
+ * ( radius + center.x, center.y ) or vice-versa.
+ */
+ bool distanceWithin( const Point& p, double radius ) const {
+ double a = _x - p._x;
+ double b = _y - p._y;
+
+ if( a == 0 ) {
+ //
+ // Note: For some, unknown reason, when a 32-bit g++ optimizes this call, the sum is
+ // calculated imprecisely. We need to force the compiler to always evaluate it correctly,
+ // hence the weirdness.
+ //
+ // On some 32-bit linux machines, removing the volatile keyword or calculating the sum inline
+ // will make certain geo tests fail. Of course this check will force volatile for all 32-bit systems,
+ // not just affected systems.
+ if( sizeof(void*) <= 4 ){
+ volatile double sum = _y > p._y ? p._y + radius : _y + radius;
+ return _y > p._y ? sum >= _y : sum >= p._y;
+ }
+ else {
+ // Original math, correct for most systems
+ return _y > p._y ? p._y + radius >= _y : _y + radius >= p._y;
+ }
+ }
+ if( b == 0 ) {
+ if( sizeof(void*) <= 4 ){
+ volatile double sum = _x > p._x ? p._x + radius : _x + radius;
+ return _x > p._x ? sum >= _x : sum >= p._x;
+ }
+ else {
+ return _x > p._x ? p._x + radius >= _x : _x + radius >= p._x;
+ }
+ }
+
+ return sqrt( ( a * a ) + ( b * b ) ) <= radius;
+ }
+
+ string toString() const {
+ StringBuilder buf(32);
+ buf << "(" << _x << "," << _y << ")";
+ return buf.str();
+
+ }
+
+ double _x;
+ double _y;
+ };
+
+
+ extern const double EARTH_RADIUS_KM;
+ extern const double EARTH_RADIUS_MILES;
+
+ // Technically lat/long bounds, not really tied to earth radius.
+ inline void checkEarthBounds( Point p ) {
+ uassert( 14808, str::stream() << "point " << p.toString() << " must be in earth-like bounds of long : [-180, 180), lat : [-90, 90] ",
+ p._x >= -180 && p._x < 180 && p._y >= -90 && p._y <= 90 );
+ }
+
+ inline double deg2rad(double deg) { return deg * (M_PI/180); }
+ inline double rad2deg(double rad) { return rad * (180/M_PI); }
+
+ // WARNING: _x and _y MUST be longitude and latitude in that order
+ // note: multiply by earth radius for distance
+ inline double spheredist_rad( const Point& p1, const Point& p2 ) {
+ // this uses the n-vector formula: http://en.wikipedia.org/wiki/N-vector
+ // If you try to match the code to the formula, note that I inline the cross-product.
+ // TODO: optimize with SSE
+
+ double sin_x1(sin(p1._x)), cos_x1(cos(p1._x));
+ double sin_y1(sin(p1._y)), cos_y1(cos(p1._y));
+ double sin_x2(sin(p2._x)), cos_x2(cos(p2._x));
+ double sin_y2(sin(p2._y)), cos_y2(cos(p2._y));
+
+ double cross_prod =
+ (cos_y1*cos_x1 * cos_y2*cos_x2) +
+ (cos_y1*sin_x1 * cos_y2*sin_x2) +
+ (sin_y1 * sin_y2);
+
+ if (cross_prod >= 1 || cross_prod <= -1) {
+ // fun with floats
+ assert( fabs(cross_prod)-1 < 1e-6 );
+ return cross_prod > 0 ? 0 : M_PI;
+ }
+
+ return acos(cross_prod);
+ }
+
+ // note: return is still in radians as that can be multiplied by radius to get arc length
+ inline double spheredist_deg( const Point& p1, const Point& p2 ) {
+ return spheredist_rad(
+ Point( deg2rad(p1._x), deg2rad(p1._y) ),
+ Point( deg2rad(p2._x), deg2rad(p2._y) )
+ );
+ }
+
+}
diff --git a/src/mongo/db/geo/haystack.cpp b/src/mongo/db/geo/haystack.cpp
new file mode 100644
index 00000000000..104665087f6
--- /dev/null
+++ b/src/mongo/db/geo/haystack.cpp
@@ -0,0 +1,318 @@
+// db/geo/haystack.cpp
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../namespace-inl.h"
+#include "../jsobj.h"
+#include "../index.h"
+#include "../../util/unittest.h"
+#include "../commands.h"
+#include "../pdfile.h"
+#include "../btree.h"
+#include "../curop-inl.h"
+#include "../matcher.h"
+#include "core.h"
+#include "../../util/timer.h"
+
+#define GEOQUADDEBUG(x)
+//#define GEOQUADDEBUG(x) cout << x << endl
+
+/**
+ * this is a geo based search piece, which is different than regular geo lookup
+ * this is useful when you want to look for something within a region where the ratio is low
+ * works well for search for restaurants withing 25 miles with a certain name
+ * should not be used for finding the closest restaurants that are open
+ */
+namespace mongo {
+
+ string GEOSEARCHNAME = "geoHaystack";
+
+ class GeoHaystackSearchHopper {
+ public:
+ GeoHaystackSearchHopper( const BSONObj& n , double maxDistance , unsigned limit , const string& geoField )
+ : _near( n ) , _maxDistance( maxDistance ) , _limit( limit ) , _geoField(geoField) {
+
+ }
+
+ void got( const DiskLoc& loc ) {
+ Point p( loc.obj().getFieldDotted( _geoField ) );
+ if ( _near.distance( p ) > _maxDistance )
+ return;
+ _locs.push_back( loc );
+ }
+
+ int append( BSONArrayBuilder& b ) {
+ for ( unsigned i=0; i<_locs.size() && i<_limit; i++ )
+ b.append( _locs[i].obj() );
+ return _locs.size();
+ }
+
+ Point _near;
+ double _maxDistance;
+ unsigned _limit;
+ string _geoField;
+
+ vector<DiskLoc> _locs;
+ };
+
+ class GeoHaystackSearchIndex : public IndexType {
+
+ public:
+
+ GeoHaystackSearchIndex( const IndexPlugin* plugin , const IndexSpec* spec )
+ : IndexType( plugin , spec ) {
+
+ BSONElement e = spec->info["bucketSize"];
+ uassert( 13321 , "need bucketSize" , e.isNumber() );
+ _bucketSize = e.numberDouble();
+
+ BSONObjBuilder orderBuilder;
+
+ BSONObjIterator i( spec->keyPattern );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.type() == String && GEOSEARCHNAME == e.valuestr() ) {
+ uassert( 13314 , "can't have 2 geo fields" , _geo.size() == 0 );
+ uassert( 13315 , "2d has to be first in index" , _other.size() == 0 );
+ _geo = e.fieldName();
+ }
+ else {
+ _other.push_back( e.fieldName() );
+ }
+ orderBuilder.append( "" , 1 );
+ }
+
+ uassert( 13316 , "no geo field specified" , _geo.size() );
+ uassert( 13317 , "no other fields specified" , _other.size() );
+ uassert( 13326 , "quadrant search can only have 1 other field for now" , _other.size() == 1 );
+ _order = orderBuilder.obj();
+ }
+
+ int hash( const BSONElement& e ) const {
+ uassert( 13322 , "not a number" , e.isNumber() );
+ return hash( e.numberDouble() );
+ }
+
+ int hash( double d ) const {
+ d += 180;
+ d /= _bucketSize;
+ return (int)d;
+ }
+
+ string makeString( int hashedX , int hashedY ) const {
+ stringstream ss;
+ ss << hashedX << "_" << hashedY;
+ return ss.str();
+ }
+
+ void _add( const BSONObj& obj, const string& root , const BSONElement& e , BSONObjSet& keys ) const {
+ BSONObjBuilder buf;
+ buf.append( "" , root );
+ if ( e.eoo() )
+ buf.appendNull( "" );
+ else
+ buf.appendAs( e , "" );
+
+ BSONObj key = buf.obj();
+ GEOQUADDEBUG( obj << "\n\t" << root << "\n\t" << key );
+ keys.insert( key );
+ }
+
+ void getKeys( const BSONObj &obj, BSONObjSet &keys ) const {
+
+ BSONElement loc = obj.getFieldDotted( _geo );
+ if ( loc.eoo() )
+ return;
+
+ uassert( 13323 , "latlng not an array" , loc.isABSONObj() );
+ string root;
+ {
+ BSONObjIterator i( loc.Obj() );
+ BSONElement x = i.next();
+ BSONElement y = i.next();
+ root = makeString( hash(x) , hash(y) );
+ }
+
+
+ assert( _other.size() == 1 );
+
+ BSONElementSet all;
+ obj.getFieldsDotted( _other[0] , all );
+
+ if ( all.size() == 0 ) {
+ _add( obj , root , BSONElement() , keys );
+ }
+ else {
+ for ( BSONElementSet::iterator i=all.begin(); i!=all.end(); ++i ) {
+ _add( obj , root , *i , keys );
+ }
+ }
+
+ }
+
+ shared_ptr<Cursor> newCursor( const BSONObj& query , const BSONObj& order , int numWanted ) const {
+ shared_ptr<Cursor> c;
+ assert(0);
+ return c;
+ }
+
+ void searchCommand( NamespaceDetails* nsd , int idxNo ,
+ const BSONObj& n /*near*/ , double maxDistance , const BSONObj& search ,
+ BSONObjBuilder& result , unsigned limit ) {
+
+ Timer t;
+
+ log(1) << "SEARCH near:" << n << " maxDistance:" << maxDistance << " search: " << search << endl;
+ int x,y;
+ {
+ BSONObjIterator i( n );
+ x = hash( i.next() );
+ y = hash( i.next() );
+ }
+ int scale = (int)ceil( maxDistance / _bucketSize );
+
+ GeoHaystackSearchHopper hopper(n,maxDistance,limit,_geo);
+
+ long long btreeMatches = 0;
+
+ for ( int a=-scale; a<=scale; a++ ) {
+ for ( int b=-scale; b<=scale; b++ ) {
+
+ BSONObjBuilder bb;
+ bb.append( "" , makeString( x + a , y + b ) );
+ for ( unsigned i=0; i<_other.size(); i++ ) {
+ BSONElement e = search.getFieldDotted( _other[i] );
+ if ( e.eoo() )
+ bb.appendNull( "" );
+ else
+ bb.appendAs( e , "" );
+ }
+
+ BSONObj key = bb.obj();
+
+ GEOQUADDEBUG( "KEY: " << key );
+
+ set<DiskLoc> thisPass;
+ scoped_ptr<BtreeCursor> cursor( BtreeCursor::make( nsd , idxNo , *getDetails() , key , key , true , 1 ) );
+ while ( cursor->ok() ) {
+ pair<set<DiskLoc>::iterator, bool> p = thisPass.insert( cursor->currLoc() );
+ if ( p.second ) {
+ hopper.got( cursor->currLoc() );
+ GEOQUADDEBUG( "\t" << cursor->current() );
+ btreeMatches++;
+ }
+ cursor->advance();
+ }
+ }
+
+ }
+
+ BSONArrayBuilder arr( result.subarrayStart( "results" ) );
+ int num = hopper.append( arr );
+ arr.done();
+
+ {
+ BSONObjBuilder b( result.subobjStart( "stats" ) );
+ b.append( "time" , t.millis() );
+ b.appendNumber( "btreeMatches" , btreeMatches );
+ b.append( "n" , num );
+ b.done();
+ }
+ }
+
+ const IndexDetails* getDetails() const {
+ return _spec->getDetails();
+ }
+
+ string _geo;
+ vector<string> _other;
+
+ BSONObj _order;
+
+ double _bucketSize;
+ };
+
+ class GeoHaystackSearchIndexPlugin : public IndexPlugin {
+ public:
+ GeoHaystackSearchIndexPlugin() : IndexPlugin( GEOSEARCHNAME ) {
+ }
+
+ virtual IndexType* generate( const IndexSpec* spec ) const {
+ return new GeoHaystackSearchIndex( this , spec );
+ }
+
+ } nameIndexPlugin;
+
+
+ class GeoHaystackSearchCommand : public Command {
+ public:
+ GeoHaystackSearchCommand() : Command( "geoSearch" ) {}
+ virtual LockType locktype() const { return READ; }
+ bool slaveOk() const { return true; }
+ bool slaveOverrideOk() const { return true; }
+ bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+
+ string ns = dbname + "." + cmdObj.firstElement().valuestr();
+
+ NamespaceDetails * d = nsdetails( ns.c_str() );
+ if ( ! d ) {
+ errmsg = "can't find ns";
+ return false;
+ }
+
+ vector<int> idxs;
+ d->findIndexByType( GEOSEARCHNAME , idxs );
+ if ( idxs.size() == 0 ) {
+ errmsg = "no geoSearch index";
+ return false;
+ }
+ if ( idxs.size() > 1 ) {
+ errmsg = "more than 1 geosearch index";
+ return false;
+ }
+
+ int idxNum = idxs[0];
+
+ IndexDetails& id = d->idx( idxNum );
+ GeoHaystackSearchIndex * si = (GeoHaystackSearchIndex*)id.getSpec().getType();
+ assert( &id == si->getDetails() );
+
+ BSONElement n = cmdObj["near"];
+ BSONElement maxDistance = cmdObj["maxDistance"];
+ BSONElement search = cmdObj["search"];
+
+ uassert( 13318 , "near needs to be an array" , n.isABSONObj() );
+ uassert( 13319 , "maxDistance needs a number" , maxDistance.isNumber() );
+ uassert( 13320 , "search needs to be an object" , search.type() == Object );
+
+ unsigned limit = 50;
+ if ( cmdObj["limit"].isNumber() )
+ limit = (unsigned)cmdObj["limit"].numberInt();
+
+ si->searchCommand( d , idxNum , n.Obj() , maxDistance.numberDouble() , search.Obj() , result , limit );
+
+ return 1;
+ }
+
+ } nameSearchCommand;
+
+
+
+
+
+}
diff --git a/src/mongo/db/globals.h b/src/mongo/db/globals.h
new file mode 100644
index 00000000000..093bec76a0e
--- /dev/null
+++ b/src/mongo/db/globals.h
@@ -0,0 +1,54 @@
+// @file globals.h
+// grouping of global variables to make concurrency work clearer
+
+#pragma once
+
+namespace mongo {
+
+ void assertStartingUp();
+
+ // this is prototype for now, we'll see if it is helpful
+
+ /** "value is Const After Server Init" helper
+ *
+ * Example:
+ *
+ * casi<int> foo = 3;
+ * foo.ref() = 4; // asserts if not still in server init
+ * int x = foo+1; // ok anytime
+ *
+ */
+ template< class T >
+ class casi : boost::noncopyable {
+ T val;
+ public:
+ casi(const T& t) : val(t) {
+ DEV assertStartingUp();
+ }
+ operator const T& () { return val; }
+ T& ref() {
+ DEV assertStartingUp();
+ return val;
+ }
+ };
+
+ /** partially specialized for cases where out global variable is a pointer -- we want the value
+ * pointed at to be constant, not just the pointer itself
+ */
+ template< typename T >
+ class casi<T*> : boost::noncopyable {
+ T * val;
+ void operator=(T*);
+ public:
+ casi(T* t) : val(t) {
+ DEV assertStartingUp();
+ }
+ operator const T* () { return val; }
+ const T* get() { return val; }
+ T*& ref() {
+ DEV assertStartingUp();
+ return val;
+ }
+ };
+
+}
diff --git a/src/mongo/db/helpers/dblogger.h b/src/mongo/db/helpers/dblogger.h
new file mode 100644
index 00000000000..4d6ee6d78c4
--- /dev/null
+++ b/src/mongo/db/helpers/dblogger.h
@@ -0,0 +1,31 @@
+// @file db.logger.h
+
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+namespace mongo {
+
+ /** helper to log (and read log) of a capped collection in the database */
+ class DBLogger {
+ bool _inited;
+ public:
+ const string _ns;
+ DBLogger(string ns) : _inited(false), _ns(ns) { }
+ };
+
+}
diff --git a/src/mongo/db/index.cpp b/src/mongo/db/index.cpp
new file mode 100644
index 00000000000..5eaeab551df
--- /dev/null
+++ b/src/mongo/db/index.cpp
@@ -0,0 +1,446 @@
+/** @file index.cpp */
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "namespace-inl.h"
+#include "index.h"
+#include "btree.h"
+#include "background.h"
+#include "repl/rs.h"
+#include "ops/delete.h"
+
+
+namespace mongo {
+
+ template< class V >
+ class IndexInterfaceImpl : public IndexInterface {
+ public:
+ typedef typename V::KeyOwned KeyOwned;
+ typedef Continuation<V> Cont;
+ virtual int keyCompare(const BSONObj& l,const BSONObj& r, const Ordering &ordering);
+
+ Cont *c[NamespaceDetails::NIndexesMax];
+ int n;
+
+ public:
+ IndexInterfaceImpl() { n = 0; }
+
+ /* lacking CONCURRENCY WRITE this supports only one writer */
+ void _phasedBegin() {
+ // we do this here as phasedFinish can throw exceptions (we could catch there, but just as easy to do here)
+ for( int i = 0; i < n; i++ ) {
+ delete c[i];
+ c[i] = 0; // defensive
+ }
+ n = 0;
+ }
+ void phasedQueueItemToInsert(
+ int idxNo,
+ DiskLoc thisLoc, DiskLoc _recordLoc, const BSONObj &_key,
+ const Ordering& _order, IndexDetails& _idx, bool dupsAllowed)
+ {
+ if( idxNo >= n )
+ n = idxNo + 1;
+ Cont *C = c[idxNo] = new Cont(thisLoc, _recordLoc, _key, _order, _idx);
+ thisLoc.btree<V>()->twoStepInsert(thisLoc, *C, dupsAllowed);
+ }
+ void _phasedFinish() {
+ for( int i = 0; i < n; i++ ) {
+ // if mixing v0 and v1 indexes, in that case (only) there could be nulls in the list
+ if( c[i] ) {
+ c[i]->stepTwo();
+ }
+ }
+ }
+
+/* virtual DiskLoc locate(const IndexDetails &idx , const DiskLoc& thisLoc, const BSONObj& key, const Ordering &order,
+ int& pos, bool& found, const DiskLoc &recordLoc, int direction) {
+ return thisLoc.btree<V>()->locate(idx, thisLoc, key, order, pos, found, recordLoc, direction);
+ }
+ */
+ virtual long long fullValidate(const DiskLoc& thisLoc, const BSONObj &order) {
+ return thisLoc.btree<V>()->fullValidate(thisLoc, order);
+ }
+ virtual DiskLoc findSingle(const IndexDetails &indexdetails , const DiskLoc& thisLoc, const BSONObj& key) const {
+ return thisLoc.btree<V>()->findSingle(indexdetails,thisLoc,key);
+ }
+ virtual bool unindex(const DiskLoc thisLoc, IndexDetails& id, const BSONObj& key, const DiskLoc recordLoc) const {
+ return thisLoc.btree<V>()->unindex(thisLoc, id, key, recordLoc);
+ }
+ virtual int bt_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
+ const BSONObj& key, const Ordering &order, bool dupsAllowed,
+ IndexDetails& idx, bool toplevel = true) const {
+ return thisLoc.btree<V>()->bt_insert(thisLoc, recordLoc, key, order, dupsAllowed, idx, toplevel);
+ }
+ virtual DiskLoc addBucket(const IndexDetails& id) {
+ return BtreeBucket<V>::addBucket(id);
+ }
+ virtual void uassertIfDups(IndexDetails& idx, vector<BSONObj*>& addedKeys, DiskLoc head, DiskLoc self, const Ordering& ordering) {
+ const BtreeBucket<V> *h = head.btree<V>();
+ for( vector<BSONObj*>::iterator i = addedKeys.begin(); i != addedKeys.end(); i++ ) {
+ KeyOwned k(**i);
+ bool dup = h->wouldCreateDup(idx, head, k, ordering, self);
+ uassert( 11001 , h->dupKeyError( idx , k ) , !dup);
+ }
+ }
+
+ // for geo:
+ virtual bool isUsed(DiskLoc thisLoc, int pos) { return thisLoc.btree<V>()->isUsed(pos); }
+ virtual void keyAt(DiskLoc thisLoc, int pos, BSONObj& key, DiskLoc& recordLoc) {
+ recordLoc = DiskLoc();
+ const BtreeBucket<V>* bucket = thisLoc.btree<V>();
+ int n = bucket->nKeys();
+
+ if( pos < 0 || pos >= n || n == 0xffff /* bucket deleted */ || ! bucket->isUsed( pos ) ){
+ // log() << "Pos: " << pos << " n " << n << endl;
+ return;
+ }
+
+ typename BtreeBucket<V>::KeyNode kn = bucket->keyNode(pos);
+ key = kn.key.toBson();
+ recordLoc = kn.recordLoc;
+ }
+ virtual BSONObj keyAt(DiskLoc thisLoc, int pos) {
+ return thisLoc.btree<V>()->keyAt(pos).toBson();
+ }
+ virtual DiskLoc locate(const IndexDetails &idx , const DiskLoc& thisLoc, const BSONObj& key, const Ordering &order,
+ int& pos, bool& found, const DiskLoc &recordLoc, int direction=1) {
+ return thisLoc.btree<V>()->locate(idx, thisLoc, key, order, pos, found, recordLoc, direction);
+ }
+ virtual DiskLoc advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) {
+ return thisLoc.btree<V>()->advance(thisLoc,keyOfs,direction,caller);
+ }
+ };
+
+ int oldCompare(const BSONObj& l,const BSONObj& r, const Ordering &o); // key.cpp
+
+ template <>
+ int IndexInterfaceImpl< V0 >::keyCompare(const BSONObj& l, const BSONObj& r, const Ordering &ordering) {
+ return oldCompare(l, r, ordering);
+ }
+
+ template <>
+ int IndexInterfaceImpl< V1 >::keyCompare(const BSONObj& l, const BSONObj& r, const Ordering &ordering) {
+ return l.woCompare(r, ordering, /*considerfieldname*/false);
+ }
+
+ IndexInterfaceImpl<V0> iii_v0;
+ IndexInterfaceImpl<V1> iii_v1;
+
+ IndexInterface *IndexDetails::iis[] = { &iii_v0, &iii_v1 };
+
+ void IndexInterface::phasedBegin() {
+ iii_v0._phasedBegin();
+ iii_v1._phasedBegin();
+ }
+ void IndexInterface::phasedFinish() {
+ iii_v0._phasedFinish();
+ iii_v1._phasedFinish();
+ }
+
+ int removeFromSysIndexes(const char *ns, const char *idxName) {
+ string system_indexes = cc().database()->name + ".system.indexes";
+ BSONObjBuilder b;
+ b.append("ns", ns);
+ b.append("name", idxName); // e.g.: { name: "ts_1", ns: "foo.coll" }
+ BSONObj cond = b.done();
+ return (int) deleteObjects(system_indexes.c_str(), cond, false, false, true);
+ }
+
+ /* this is just an attempt to clean up old orphaned stuff on a delete all indexes
+ call. repair database is the clean solution, but this gives one a lighter weight
+ partial option. see dropIndexes()
+ */
+ void assureSysIndexesEmptied(const char *ns, IndexDetails *idIndex) {
+ string system_indexes = cc().database()->name + ".system.indexes";
+ BSONObjBuilder b;
+ b.append("ns", ns);
+ if( idIndex ) {
+ b.append("name", BSON( "$ne" << idIndex->indexName().c_str() ));
+ }
+ BSONObj cond = b.done();
+ int n = (int) deleteObjects(system_indexes.c_str(), cond, false, false, true);
+ if( n ) {
+ log() << "info: assureSysIndexesEmptied cleaned up " << n << " entries" << endl;
+ }
+ }
+
+ int IndexDetails::keyPatternOffset( const string& key ) const {
+ BSONObjIterator i( keyPattern() );
+ int n = 0;
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( key == e.fieldName() )
+ return n;
+ n++;
+ }
+ return -1;
+ }
+
+ const IndexSpec& IndexDetails::getSpec() const {
+ SimpleMutex::scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
+ return NamespaceDetailsTransient::get_inlock( info.obj()["ns"].valuestr() ).getIndexSpec( this );
+ }
+
+ /* delete this index. does NOT clean up the system catalog
+ (system.indexes or system.namespaces) -- only NamespaceIndex.
+ */
+ void IndexDetails::kill_idx() {
+ string ns = indexNamespace(); // e.g. foo.coll.$ts_1
+ try {
+
+ string pns = parentNS(); // note we need a copy, as parentNS() won't work after the drop() below
+
+ // clean up parent namespace index cache
+ NamespaceDetailsTransient::get( pns.c_str() ).deletedIndex();
+
+ string name = indexName();
+
+ /* important to catch exception here so we can finish cleanup below. */
+ try {
+ dropNS(ns.c_str());
+ }
+ catch(DBException& ) {
+ log(2) << "IndexDetails::kill(): couldn't drop ns " << ns << endl;
+ }
+ head.setInvalid();
+ info.setInvalid();
+
+ // clean up in system.indexes. we do this last on purpose.
+ int n = removeFromSysIndexes(pns.c_str(), name.c_str());
+ wassert( n == 1 );
+
+ }
+ catch ( DBException &e ) {
+ log() << "exception in kill_idx: " << e << ", ns: " << ns << endl;
+ }
+ }
+
+ void IndexDetails::getKeysFromObject( const BSONObj& obj, BSONObjSet& keys) const {
+ getSpec().getKeys( obj, keys );
+ }
+
+ void setDifference(BSONObjSet &l, BSONObjSet &r, vector<BSONObj*> &diff) {
+ // l and r must use the same ordering spec.
+ verify( 14819, l.key_comp().order() == r.key_comp().order() );
+ BSONObjSet::iterator i = l.begin();
+ BSONObjSet::iterator j = r.begin();
+ while ( 1 ) {
+ if ( i == l.end() )
+ break;
+ while ( j != r.end() && j->woCompare( *i ) < 0 )
+ j++;
+ if ( j == r.end() || i->woCompare(*j) != 0 ) {
+ const BSONObj *jo = &*i;
+ diff.push_back( (BSONObj *) jo );
+ }
+ i++;
+ }
+ }
+
+ void getIndexChanges(vector<IndexChanges>& v, NamespaceDetails& d, BSONObj newObj, BSONObj oldObj, bool &changedId) {
+ int z = d.nIndexesBeingBuilt();
+ v.resize(z);
+ for( int i = 0; i < z; i++ ) {
+ IndexDetails& idx = d.idx(i);
+ BSONObj idxKey = idx.info.obj().getObjectField("key"); // eg { ts : 1 }
+ IndexChanges& ch = v[i];
+ idx.getKeysFromObject(oldObj, ch.oldkeys);
+ idx.getKeysFromObject(newObj, ch.newkeys);
+ if( ch.newkeys.size() > 1 )
+ d.setIndexIsMultikey(i);
+ setDifference(ch.oldkeys, ch.newkeys, ch.removed);
+ setDifference(ch.newkeys, ch.oldkeys, ch.added);
+ if ( ch.removed.size() > 0 && ch.added.size() > 0 && idx.isIdIndex() ) {
+ changedId = true;
+ }
+ }
+ }
+
+ void dupCheck(vector<IndexChanges>& v, NamespaceDetails& d, DiskLoc curObjLoc) {
+ int z = d.nIndexesBeingBuilt();
+ for( int i = 0; i < z; i++ ) {
+ IndexDetails& idx = d.idx(i);
+ v[i].dupCheck(idx, curObjLoc);
+ }
+ }
+
+ // should be { <something> : <simpletype[1|-1]>, .keyp.. }
+ static bool validKeyPattern(BSONObj kp) {
+ BSONObjIterator i(kp);
+ while( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if( e.type() == Object || e.type() == Array )
+ return false;
+ }
+ return true;
+ }
+
+ /* Prepare to build an index. Does not actually build it (except for a special _id case).
+ - We validate that the params are good
+ - That the index does not already exist
+ - Creates the source collection if it DNE
+
+ example of 'io':
+ { ns : 'test.foo', name : 'z', key : { z : 1 } }
+
+ throws DBException
+
+ @param sourceNS - source NS we are indexing
+ @param sourceCollection - its details ptr
+ @return true if ok to continue. when false we stop/fail silently (index already exists)
+ */
+ bool prepareToBuildIndex(const BSONObj& io, bool god, string& sourceNS, NamespaceDetails *&sourceCollection, BSONObj& fixedIndexObject ) {
+ sourceCollection = 0;
+
+ // logical name of the index. todo: get rid of the name, we don't need it!
+ const char *name = io.getStringField("name");
+ uassert(12523, "no index name specified", *name);
+
+ // the collection for which we are building an index
+ sourceNS = io.getStringField("ns");
+ uassert(10096, "invalid ns to index", sourceNS.find( '.' ) != string::npos);
+ uassert(10097, "bad table to index name on add index attempt",
+ cc().database()->name == nsToDatabase(sourceNS.c_str()));
+
+ BSONObj key = io.getObjectField("key");
+ uassert(12524, "index key pattern too large", key.objsize() <= 2048);
+ if( !validKeyPattern(key) ) {
+ string s = string("bad index key pattern ") + key.toString();
+ uasserted(10098 , s.c_str());
+ }
+
+ if ( sourceNS.empty() || key.isEmpty() ) {
+ log(2) << "bad add index attempt name:" << (name?name:"") << "\n ns:" <<
+ sourceNS << "\n idxobj:" << io.toString() << endl;
+ string s = "bad add index attempt " + sourceNS + " key:" + key.toString();
+ uasserted(12504, s);
+ }
+
+ sourceCollection = nsdetails(sourceNS.c_str());
+ if( sourceCollection == 0 ) {
+ // try to create it
+ string err;
+ if ( !userCreateNS(sourceNS.c_str(), BSONObj(), err, false) ) {
+ problem() << "ERROR: failed to create collection while adding its index. " << sourceNS << endl;
+ return false;
+ }
+ sourceCollection = nsdetails(sourceNS.c_str());
+ tlog() << "info: creating collection " << sourceNS << " on add index" << endl;
+ assert( sourceCollection );
+ }
+
+ if ( sourceCollection->findIndexByName(name) >= 0 ) {
+ // index already exists.
+ return false;
+ }
+ if( sourceCollection->findIndexByKeyPattern(key) >= 0 ) {
+ log(2) << "index already exists with diff name " << name << ' ' << key.toString() << endl;
+ return false;
+ }
+
+ if ( sourceCollection->nIndexes >= NamespaceDetails::NIndexesMax ) {
+ stringstream ss;
+ ss << "add index fails, too many indexes for " << sourceNS << " key:" << key.toString();
+ string s = ss.str();
+ log() << s << '\n';
+ uasserted(12505,s);
+ }
+
+ /* we can't build a new index for the ns if a build is already in progress in the background -
+ EVEN IF this is a foreground build.
+ */
+ uassert(12588, "cannot add index with a background operation in progress",
+ !BackgroundOperation::inProgForNs(sourceNS.c_str()));
+
+ /* this is because we want key patterns like { _id : 1 } and { _id : <someobjid> } to
+ all be treated as the same pattern.
+ */
+ if ( IndexDetails::isIdIndexPattern(key) ) {
+ if( !god ) {
+ ensureHaveIdIndex( sourceNS.c_str() );
+ return false;
+ }
+ }
+ else {
+ /* is buildIndexes:false set for this replica set member?
+ if so we don't build any indexes except _id
+ */
+ if( theReplSet && !theReplSet->buildIndexes() )
+ return false;
+ }
+
+ string pluginName = IndexPlugin::findPluginName( key );
+ IndexPlugin * plugin = pluginName.size() ? IndexPlugin::get( pluginName ) : 0;
+
+
+ {
+ BSONObj o = io;
+ if ( plugin ) {
+ o = plugin->adjustIndexSpec(o);
+ }
+ BSONObjBuilder b;
+ int v = DefaultIndexVersionNumber;
+ if( !o["v"].eoo() ) {
+ double vv = o["v"].Number();
+ // note (one day) we may be able to fresh build less versions than we can use
+ // isASupportedIndexVersionNumber() is what we can use
+ uassert(14803, str::stream() << "this version of mongod cannot build new indexes of version number " << vv,
+ vv == 0 || vv == 1);
+ v = (int) vv;
+ }
+ // idea is to put things we use a lot earlier
+ b.append("v", v);
+ b.append(o["key"]);
+ if( o["unique"].trueValue() )
+ b.appendBool("unique", true); // normalize to bool true in case was int 1 or something...
+ b.append(o["ns"]);
+
+ {
+ // stripping _id
+ BSONObjIterator i(o);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ string s = e.fieldName();
+ if( s != "_id" && s != "v" && s != "ns" && s != "unique" && s != "key" )
+ b.append(e);
+ }
+ }
+
+ fixedIndexObject = b.obj();
+ }
+
+ return true;
+ }
+
+ void IndexSpec::reset( const IndexDetails * details ) {
+ _details = details;
+ reset( details->info );
+ }
+
+ void IndexSpec::reset( const BSONObj& _info ) {
+ info = _info;
+ keyPattern = info["key"].embeddedObjectUserCheck();
+ if ( keyPattern.objsize() == 0 ) {
+ out() << info.toString() << endl;
+ assert(false);
+ }
+ _init();
+ }
+
+}
diff --git a/src/mongo/db/index.h b/src/mongo/db/index.h
new file mode 100644
index 00000000000..d297f8a4ca1
--- /dev/null
+++ b/src/mongo/db/index.h
@@ -0,0 +1,237 @@
+// index.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "diskloc.h"
+#include "jsobj.h"
+#include "indexkey.h"
+#include "key.h"
+
+namespace mongo {
+
+ class IndexInterface {
+ protected:
+ virtual ~IndexInterface() { }
+ public:
+ static void phasedBegin();
+ virtual void phasedQueueItemToInsert(
+ int idxNo,
+ DiskLoc thisLoc, DiskLoc _recordLoc, const BSONObj &_key,
+ const Ordering& _order, IndexDetails& _idx, bool dupsAllowed) = 0;
+ static void phasedFinish();
+
+ virtual int keyCompare(const BSONObj& l,const BSONObj& r, const Ordering &ordering) = 0;
+ virtual long long fullValidate(const DiskLoc& thisLoc, const BSONObj &order) = 0;
+ virtual DiskLoc findSingle(const IndexDetails &indexdetails , const DiskLoc& thisLoc, const BSONObj& key) const = 0;
+ virtual bool unindex(const DiskLoc thisLoc, IndexDetails& id, const BSONObj& key, const DiskLoc recordLoc) const = 0;
+ virtual int bt_insert(const DiskLoc thisLoc, const DiskLoc recordLoc,
+ const BSONObj& key, const Ordering &order, bool dupsAllowed,
+ IndexDetails& idx, bool toplevel = true) const = 0;
+ virtual DiskLoc addBucket(const IndexDetails&) = 0;
+ virtual void uassertIfDups(IndexDetails& idx, vector<BSONObj*>& addedKeys, DiskLoc head,
+ DiskLoc self, const Ordering& ordering) = 0;
+
+ // these are for geo
+ virtual bool isUsed(DiskLoc thisLoc, int pos) = 0;
+ virtual void keyAt(DiskLoc thisLoc, int pos, BSONObj&, DiskLoc& recordLoc) = 0;
+ virtual BSONObj keyAt(DiskLoc thisLoc, int pos) = 0;
+ virtual DiskLoc locate(const IndexDetails &idx , const DiskLoc& thisLoc, const BSONObj& key, const Ordering &order,
+ int& pos, bool& found, const DiskLoc &recordLoc, int direction=1) = 0;
+ virtual DiskLoc advance(const DiskLoc& thisLoc, int& keyOfs, int direction, const char *caller) = 0;
+ };
+
+ /* Details about a particular index. There is one of these effectively for each object in
+ system.namespaces (although this also includes the head pointer, which is not in that
+ collection).
+
+ ** MemoryMapped Record ** (i.e., this is on disk data)
+ */
+ class IndexDetails {
+ public:
+ /**
+ * btree head disk location
+ * TODO We should make this variable private, since btree operations
+ * may change its value and we don't want clients to rely on an old
+ * value. If we create a btree class, we can provide a btree object
+ * to clients instead of 'head'.
+ */
+ DiskLoc head;
+
+ /* Location of index info object. Format:
+
+ { name:"nameofindex", ns:"parentnsname", key: {keypattobject}
+ [, unique: <bool>, background: <bool>, v:<version>]
+ }
+
+ This object is in the system.indexes collection. Note that since we
+ have a pointer to the object here, the object in system.indexes MUST NEVER MOVE.
+ */
+ DiskLoc info;
+
+ /* extract key value from the query object
+ e.g., if key() == { x : 1 },
+ { x : 70, y : 3 } -> { x : 70 }
+ */
+ BSONObj getKeyFromQuery(const BSONObj& query) const {
+ BSONObj k = keyPattern();
+ BSONObj res = query.extractFieldsUnDotted(k);
+ return res;
+ }
+
+ /* pull out the relevant key objects from obj, so we
+ can index them. Note that the set is multiple elements
+ only when it's a "multikey" array.
+ keys will be left empty if key not found in the object.
+ */
+ void getKeysFromObject( const BSONObj& obj, BSONObjSet& keys) const;
+
+ /* get the key pattern for this object.
+ e.g., { lastname:1, firstname:1 }
+ */
+ BSONObj keyPattern() const {
+ return info.obj().getObjectField("key");
+ }
+
+ /**
+ * @return offset into keyPattern for key
+ -1 if doesn't exist
+ */
+ int keyPatternOffset( const string& key ) const;
+ bool inKeyPattern( const string& key ) const { return keyPatternOffset( key ) >= 0; }
+
+ /* true if the specified key is in the index */
+ bool hasKey(const BSONObj& key);
+
+ // returns name of this index's storage area
+ // database.table.$index
+ string indexNamespace() const {
+ BSONObj io = info.obj();
+ string s;
+ s.reserve(Namespace::MaxNsLen);
+ s = io.getStringField("ns");
+ assert( !s.empty() );
+ s += ".$";
+ s += io.getStringField("name");
+ return s;
+ }
+
+ string indexName() const { // e.g. "ts_1"
+ BSONObj io = info.obj();
+ return io.getStringField("name");
+ }
+
+ static bool isIdIndexPattern( const BSONObj &pattern ) {
+ BSONObjIterator i(pattern);
+ BSONElement e = i.next();
+ if( strcmp(e.fieldName(), "_id") != 0 ) return false;
+ return i.next().eoo();
+ }
+
+ /* returns true if this is the _id index. */
+ bool isIdIndex() const {
+ return isIdIndexPattern( keyPattern() );
+ }
+
+ /* gets not our namespace name (indexNamespace for that),
+ but the collection we index, its name.
+ */
+ string parentNS() const {
+ BSONObj io = info.obj();
+ return io.getStringField("ns");
+ }
+
+ static int versionForIndexObj( const BSONObj &obj ) {
+ BSONElement e = obj["v"];
+ if( e.type() == NumberInt )
+ return e._numberInt();
+ // should normally be an int. this is for backward compatibility
+ int v = e.numberInt();
+ uassert(14802, "index v field should be Integer type", v == 0);
+ return v;
+ }
+
+ int version() const {
+ return versionForIndexObj( info.obj() );
+ }
+
+ /** @return true if index has unique constraint */
+ bool unique() const {
+ BSONObj io = info.obj();
+ return io["unique"].trueValue() ||
+ /* temp: can we juse make unique:true always be there for _id and get rid of this? */
+ isIdIndex();
+ }
+
+ /** return true if dropDups was set when building index (if any duplicates, dropdups drops the duplicating objects) */
+ bool dropDups() const {
+ return info.obj().getBoolField( "dropDups" );
+ }
+
+ /** delete this index. does NOT clean up the system catalog
+ (system.indexes or system.namespaces) -- only NamespaceIndex.
+ */
+ void kill_idx();
+
+ const IndexSpec& getSpec() const;
+
+ string toString() const {
+ return info.obj().toString();
+ }
+
+ /** @return true if supported. supported means we can use the index, including adding new keys.
+ it may not mean we can build the index version in question: we may not maintain building
+ of indexes in old formats in the future.
+ */
+ static bool isASupportedIndexVersionNumber(int v) { return (v&1)==v; } // v == 0 || v == 1
+
+ /** @return the interface for this interface, which varies with the index version.
+ used for backward compatibility of index versions/formats.
+ */
+ IndexInterface& idxInterface() const {
+ int v = version();
+ dassert( isASupportedIndexVersionNumber(v) );
+ return *iis[v&1];
+ }
+
+ static IndexInterface *iis[];
+ };
+
+ struct IndexChanges { /*on an update*/
+ BSONObjSet oldkeys;
+ BSONObjSet newkeys;
+ vector<BSONObj*> removed; // these keys were removed as part of the change
+ vector<BSONObj*> added; // these keys were added as part of the change
+
+ /** @curObjLoc - the object we want to add's location. if it is already in the
+ index, that is allowed here (for bg indexing case).
+ */
+ void dupCheck(IndexDetails& idx, DiskLoc curObjLoc) {
+ if( added.empty() || !idx.unique() )
+ return;
+ const Ordering ordering = Ordering::make(idx.keyPattern());
+ idx.idxInterface().uassertIfDups(idx, added, idx.head, curObjLoc, ordering); // "E11001 duplicate key on update"
+ }
+ };
+
+ class NamespaceDetails;
+ // changedId should be initialized to false
+ void getIndexChanges(vector<IndexChanges>& v, NamespaceDetails& d, BSONObj newObj, BSONObj oldObj, bool &cangedId);
+ void dupCheck(vector<IndexChanges>& v, NamespaceDetails& d, DiskLoc curObjLoc);
+} // namespace mongo
diff --git a/src/mongo/db/indexkey.cpp b/src/mongo/db/indexkey.cpp
new file mode 100644
index 00000000000..18dfcb079b9
--- /dev/null
+++ b/src/mongo/db/indexkey.cpp
@@ -0,0 +1,462 @@
+// index_key.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "namespace-inl.h"
+#include "index.h"
+#include "btree.h"
+#include "ops/query.h"
+#include "background.h"
+#include "../util/text.h"
+
+namespace mongo {
+
+ /** old (<= v1.8) : 0
+ 1 is new version
+ */
+ const int DefaultIndexVersionNumber = 1;
+
+ map<string,IndexPlugin*> * IndexPlugin::_plugins;
+
+ IndexType::IndexType( const IndexPlugin * plugin , const IndexSpec * spec )
+ : _plugin( plugin ) , _spec( spec ) {
+
+ }
+
+ IndexType::~IndexType() {
+ }
+
+ const BSONObj& IndexType::keyPattern() const {
+ return _spec->keyPattern;
+ }
+
+ IndexPlugin::IndexPlugin( const string& name )
+ : _name( name ) {
+ if ( ! _plugins )
+ _plugins = new map<string,IndexPlugin*>();
+ (*_plugins)[name] = this;
+ }
+
+ string IndexPlugin::findPluginName( const BSONObj& keyPattern ) {
+ string pluginName = "";
+
+ BSONObjIterator i( keyPattern );
+
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.type() != String )
+ continue;
+
+ uassert( 13007 , "can only have 1 index plugin / bad index key pattern" , pluginName.size() == 0 || pluginName == e.String() );
+ pluginName = e.String();
+ }
+
+ return pluginName;
+ }
+
+ int IndexType::compare( const BSONObj& l , const BSONObj& r ) const {
+ return l.woCompare( r , _spec->keyPattern );
+ }
+
+ void IndexSpec::_init() {
+ assert( keyPattern.objsize() );
+
+ // some basics
+ _nFields = keyPattern.nFields();
+ _sparse = info["sparse"].trueValue();
+ uassert( 13529 , "sparse only works for single field keys" , ! _sparse || _nFields );
+
+
+ {
+ // build _nullKey
+
+ BSONObjBuilder b;
+ BSONObjIterator i( keyPattern );
+
+ while( i.more() ) {
+ BSONElement e = i.next();
+ _fieldNames.push_back( e.fieldName() );
+ _fixed.push_back( BSONElement() );
+ b.appendNull( "" );
+ }
+ _nullKey = b.obj();
+ }
+
+ {
+ // _nullElt
+ BSONObjBuilder b;
+ b.appendNull( "" );
+ _nullObj = b.obj();
+ _nullElt = _nullObj.firstElement();
+ }
+
+ {
+ // _undefinedElt
+ BSONObjBuilder b;
+ b.appendUndefined( "" );
+ _undefinedObj = b.obj();
+ _undefinedElt = _undefinedObj.firstElement();
+ }
+
+ {
+ // handle plugins
+ string pluginName = IndexPlugin::findPluginName( keyPattern );
+ if ( pluginName.size() ) {
+ IndexPlugin * plugin = IndexPlugin::get( pluginName );
+ if ( ! plugin ) {
+ log() << "warning: can't find plugin [" << pluginName << "]" << endl;
+ }
+ else {
+ _indexType.reset( plugin->generate( this ) );
+ }
+ }
+ }
+
+ _finishedInit = true;
+ }
+
+ void assertParallelArrays( const char *first, const char *second ) {
+ stringstream ss;
+ ss << "cannot index parallel arrays [" << first << "] [" << second << "]";
+ uasserted( ParallelArraysCode , ss.str() );
+ }
+
+ class KeyGeneratorV0 {
+ public:
+ KeyGeneratorV0( const IndexSpec &spec ) : _spec( spec ) {}
+
+ void getKeys( const BSONObj &obj, BSONObjSet &keys ) const {
+ if ( _spec._indexType.get() ) { //plugin (eg geo)
+ _spec._indexType->getKeys( obj , keys );
+ return;
+ }
+ vector<const char*> fieldNames( _spec._fieldNames );
+ vector<BSONElement> fixed( _spec._fixed );
+ _getKeys( fieldNames , fixed , obj, keys );
+ if ( keys.empty() && ! _spec._sparse )
+ keys.insert( _spec._nullKey );
+ }
+
+ private:
+ void _getKeys( vector<const char*> fieldNames , vector<BSONElement> fixed , const BSONObj &obj, BSONObjSet &keys ) const {
+ BSONElement arrElt;
+ unsigned arrIdx = ~0;
+ int numNotFound = 0;
+
+ for( unsigned i = 0; i < fieldNames.size(); ++i ) {
+ if ( *fieldNames[ i ] == '\0' )
+ continue;
+
+ BSONElement e = obj.getFieldDottedOrArray( fieldNames[ i ] );
+
+ if ( e.eoo() ) {
+ e = _spec._nullElt; // no matching field
+ numNotFound++;
+ }
+
+ if ( e.type() != Array )
+ fieldNames[ i ] = ""; // no matching field or non-array match
+
+ if ( *fieldNames[ i ] == '\0' )
+ fixed[ i ] = e; // no need for further object expansion (though array expansion still possible)
+
+ if ( e.type() == Array && arrElt.eoo() ) { // we only expand arrays on a single path -- track the path here
+ arrIdx = i;
+ arrElt = e;
+ }
+
+ // enforce single array path here
+ if ( e.type() == Array && e.rawdata() != arrElt.rawdata() ) {
+ assertParallelArrays( e.fieldName(), arrElt.fieldName() );
+ }
+ }
+
+ bool allFound = true; // have we found elements for all field names in the key spec?
+ for( vector<const char*>::const_iterator i = fieldNames.begin(); i != fieldNames.end(); ++i ) {
+ if ( **i != '\0' ) {
+ allFound = false;
+ break;
+ }
+ }
+
+ if ( _spec._sparse && numNotFound == _spec._nFields ) {
+ // we didn't find any fields
+ // so we're not going to index this document
+ return;
+ }
+
+ bool insertArrayNull = false;
+
+ if ( allFound ) {
+ if ( arrElt.eoo() ) {
+ // no terminal array element to expand
+ BSONObjBuilder b(_spec._sizeTracker);
+ for( vector< BSONElement >::iterator i = fixed.begin(); i != fixed.end(); ++i )
+ b.appendAs( *i, "" );
+ keys.insert( b.obj() );
+ }
+ else {
+ // terminal array element to expand, so generate all keys
+ BSONObjIterator i( arrElt.embeddedObject() );
+ if ( i.more() ) {
+ while( i.more() ) {
+ BSONObjBuilder b(_spec._sizeTracker);
+ for( unsigned j = 0; j < fixed.size(); ++j ) {
+ if ( j == arrIdx )
+ b.appendAs( i.next(), "" );
+ else
+ b.appendAs( fixed[ j ], "" );
+ }
+ keys.insert( b.obj() );
+ }
+ }
+ else if ( fixed.size() > 1 ) {
+ insertArrayNull = true;
+ }
+ }
+ }
+ else {
+ // nonterminal array element to expand, so recurse
+ assert( !arrElt.eoo() );
+ BSONObjIterator i( arrElt.embeddedObject() );
+ if ( i.more() ) {
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.type() == Object ) {
+ _getKeys( fieldNames, fixed, e.embeddedObject(), keys );
+ }
+ }
+ }
+ else {
+ insertArrayNull = true;
+ }
+ }
+
+ if ( insertArrayNull ) {
+ // x : [] - need to insert undefined
+ BSONObjBuilder b(_spec._sizeTracker);
+ for( unsigned j = 0; j < fixed.size(); ++j ) {
+ if ( j == arrIdx ) {
+ b.appendUndefined( "" );
+ }
+ else {
+ BSONElement e = fixed[j];
+ if ( e.eoo() )
+ b.appendNull( "" );
+ else
+ b.appendAs( e , "" );
+ }
+ }
+ keys.insert( b.obj() );
+ }
+ }
+
+ const IndexSpec &_spec;
+ };
+
+ class KeyGeneratorV1 {
+ public:
+ KeyGeneratorV1( const IndexSpec &spec ) : _spec( spec ) {}
+
+ void getKeys( const BSONObj &obj, BSONObjSet &keys ) const {
+ if ( _spec._indexType.get() ) { //plugin (eg geo)
+ _spec._indexType->getKeys( obj , keys );
+ return;
+ }
+ vector<const char*> fieldNames( _spec._fieldNames );
+ vector<BSONElement> fixed( _spec._fixed );
+ _getKeys( fieldNames , fixed , obj, keys );
+ if ( keys.empty() && ! _spec._sparse )
+ keys.insert( _spec._nullKey );
+ }
+
+ private:
+ /**
+ * @param arrayNestedArray - set if the returned element is an array nested directly within arr.
+ */
+ BSONElement extractNextElement( const BSONObj &obj, const BSONObj &arr, const char *&field, bool &arrayNestedArray ) const {
+ string firstField = mongoutils::str::before( field, '.' );
+ bool haveObjField = !obj.getField( firstField ).eoo();
+ BSONElement arrField = arr.getField( firstField );
+ bool haveArrField = !arrField.eoo();
+
+ // An index component field name cannot exist in both a document array and one of that array's children.
+ uassert( 15855 , str::stream() << "Ambiguous field name found in array (do not use numeric field names in embedded elements in an array), field: '" << arrField.fieldName() << "' for array: " << arr, !haveObjField || !haveArrField );
+
+ arrayNestedArray = false;
+ if ( haveObjField ) {
+ return obj.getFieldDottedOrArray( field );
+ }
+ else if ( haveArrField ) {
+ if ( arrField.type() == Array ) {
+ arrayNestedArray = true;
+ }
+ return arr.getFieldDottedOrArray( field );
+ }
+ return BSONElement();
+ }
+
+ void _getKeysArrEltFixed( vector<const char*> &fieldNames , vector<BSONElement> &fixed , const BSONElement &arrEntry, BSONObjSet &keys, int numNotFound, const BSONElement &arrObjElt, const set< unsigned > &arrIdxs, bool mayExpandArrayUnembedded ) const {
+ // set up any terminal array values
+ for( set<unsigned>::const_iterator j = arrIdxs.begin(); j != arrIdxs.end(); ++j ) {
+ if ( *fieldNames[ *j ] == '\0' ) {
+ fixed[ *j ] = mayExpandArrayUnembedded ? arrEntry : arrObjElt;
+ }
+ }
+ // recurse
+ _getKeys( fieldNames, fixed, ( arrEntry.type() == Object ) ? arrEntry.embeddedObject() : BSONObj(), keys, numNotFound, arrObjElt.embeddedObject() );
+ }
+
+ /**
+ * @param fieldNames - fields to index, may be postfixes in recursive calls
+ * @param fixed - values that have already been identified for their index fields
+ * @param obj - object from which keys should be extracted, based on names in fieldNames
+ * @param keys - set where index keys are written
+ * @param numNotFound - number of index fields that have already been identified as missing
+ * @param array - array from which keys should be extracted, based on names in fieldNames
+ * If obj and array are both nonempty, obj will be one of the elements of array.
+ */
+ void _getKeys( vector<const char*> fieldNames , vector<BSONElement> fixed , const BSONObj &obj, BSONObjSet &keys, int numNotFound = 0, const BSONObj &array = BSONObj() ) const {
+ BSONElement arrElt;
+ set<unsigned> arrIdxs;
+ bool mayExpandArrayUnembedded = true;
+ for( unsigned i = 0; i < fieldNames.size(); ++i ) {
+ if ( *fieldNames[ i ] == '\0' ) {
+ continue;
+ }
+
+ bool arrayNestedArray;
+ // Extract element matching fieldName[ i ] from object xor array.
+ BSONElement e = extractNextElement( obj, array, fieldNames[ i ], arrayNestedArray );
+
+ if ( e.eoo() ) {
+ // if field not present, set to null
+ fixed[ i ] = _spec._nullElt;
+ // done expanding this field name
+ fieldNames[ i ] = "";
+ numNotFound++;
+ }
+ else if ( e.type() == Array ) {
+ arrIdxs.insert( i );
+ if ( arrElt.eoo() ) {
+ // we only expand arrays on a single path -- track the path here
+ arrElt = e;
+ }
+ else if ( e.rawdata() != arrElt.rawdata() ) {
+ // enforce single array path here
+ assertParallelArrays( e.fieldName(), arrElt.fieldName() );
+ }
+ if ( arrayNestedArray ) {
+ mayExpandArrayUnembedded = false;
+ }
+ }
+ else {
+ // not an array - no need for further expansion
+ fixed[ i ] = e;
+ }
+ }
+
+ if ( arrElt.eoo() ) {
+ // No array, so generate a single key.
+ if ( _spec._sparse && numNotFound == _spec._nFields ) {
+ return;
+ }
+ BSONObjBuilder b(_spec._sizeTracker);
+ for( vector< BSONElement >::iterator i = fixed.begin(); i != fixed.end(); ++i ) {
+ b.appendAs( *i, "" );
+ }
+ keys.insert( b.obj() );
+ }
+ else if ( arrElt.embeddedObject().firstElement().eoo() ) {
+ // Empty array, so set matching fields to undefined.
+ _getKeysArrEltFixed( fieldNames, fixed, _spec._undefinedElt, keys, numNotFound, arrElt, arrIdxs, true );
+ }
+ else {
+ // Non empty array that can be expanded, so generate a key for each member.
+ BSONObj arrObj = arrElt.embeddedObject();
+ BSONObjIterator i( arrObj );
+ while( i.more() ) {
+ _getKeysArrEltFixed( fieldNames, fixed, i.next(), keys, numNotFound, arrElt, arrIdxs, mayExpandArrayUnembedded );
+ }
+ }
+ }
+
+ const IndexSpec &_spec;
+ };
+
+ void IndexSpec::getKeys( const BSONObj &obj, BSONObjSet &keys ) const {
+ switch( indexVersion() ) {
+ case 0: {
+ KeyGeneratorV0 g( *this );
+ g.getKeys( obj, keys );
+ break;
+ }
+ case 1: {
+ KeyGeneratorV1 g( *this );
+ g.getKeys( obj, keys );
+ break;
+ }
+ default:
+ massert( 15869, "Invalid index version for key generation.", false );
+ }
+ }
+
+ bool anyElementNamesMatch( const BSONObj& a , const BSONObj& b ) {
+ BSONObjIterator x(a);
+ while ( x.more() ) {
+ BSONElement e = x.next();
+ BSONObjIterator y(b);
+ while ( y.more() ) {
+ BSONElement f = y.next();
+ FieldCompareResult res = compareDottedFieldNames( e.fieldName() , f.fieldName() );
+ if ( res == SAME || res == LEFT_SUBFIELD || res == RIGHT_SUBFIELD )
+ return true;
+ }
+ }
+ return false;
+ }
+
+ IndexSuitability IndexSpec::suitability( const BSONObj& query , const BSONObj& order ) const {
+ if ( _indexType.get() )
+ return _indexType->suitability( query , order );
+ return _suitability( query , order );
+ }
+
+ IndexSuitability IndexSpec::_suitability( const BSONObj& query , const BSONObj& order ) const {
+ // TODO: optimize
+ if ( anyElementNamesMatch( keyPattern , query ) == 0 && anyElementNamesMatch( keyPattern , order ) == 0 )
+ return USELESS;
+ return HELPFUL;
+ }
+
+ IndexSuitability IndexType::suitability( const BSONObj& query , const BSONObj& order ) const {
+ return _spec->_suitability( query , order );
+ }
+
+ int IndexSpec::indexVersion() const {
+ if ( !info.hasField( "v" ) ) {
+ return DefaultIndexVersionNumber;
+ }
+ return IndexDetails::versionForIndexObj( info );
+ }
+
+ bool IndexType::scanAndOrderRequired( const BSONObj& query , const BSONObj& order ) const {
+ return ! order.isEmpty();
+ }
+
+}
diff --git a/src/mongo/db/indexkey.h b/src/mongo/db/indexkey.h
new file mode 100644
index 00000000000..12cd755e8a0
--- /dev/null
+++ b/src/mongo/db/indexkey.h
@@ -0,0 +1,198 @@
+// index_key.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "diskloc.h"
+#include "jsobj.h"
+#include <map>
+
+namespace mongo {
+
+ extern const int DefaultIndexVersionNumber;
+
+ const int ParallelArraysCode = 10088;
+
+ class Cursor;
+ class IndexSpec;
+ class IndexType; // TODO: this name sucks
+ class IndexPlugin;
+ class IndexDetails;
+
+ enum IndexSuitability { USELESS = 0 , HELPFUL = 1 , OPTIMAL = 2 };
+
+ /**
+ * this represents an instance of a index plugin
+ * done this way so parsing, etc... can be cached
+ * so if there is a FTS IndexPlugin, for each index using FTS
+ * there will be 1 of these, and it can have things pre-parsed, etc...
+ */
+ class IndexType : boost::noncopyable {
+ public:
+ IndexType( const IndexPlugin * plugin , const IndexSpec * spec );
+ virtual ~IndexType();
+
+ virtual void getKeys( const BSONObj &obj, BSONObjSet &keys ) const = 0;
+ virtual shared_ptr<Cursor> newCursor( const BSONObj& query , const BSONObj& order , int numWanted ) const = 0;
+
+ /** optional op : changes query to match what's in the index */
+ virtual BSONObj fixKey( const BSONObj& in ) { return in; }
+
+ /** optional op : compare 2 objects with regards to this index */
+ virtual int compare( const BSONObj& l , const BSONObj& r ) const;
+
+ /** @return plugin */
+ const IndexPlugin * getPlugin() const { return _plugin; }
+
+ const BSONObj& keyPattern() const;
+
+ virtual IndexSuitability suitability( const BSONObj& query , const BSONObj& order ) const ;
+
+ virtual bool scanAndOrderRequired( const BSONObj& query , const BSONObj& order ) const ;
+
+ protected:
+ const IndexPlugin * _plugin;
+ const IndexSpec * _spec;
+ };
+
+ /**
+ * this represents a plugin
+ * a plugin could be something like full text search, sparse index, etc...
+ * 1 of these exists per type of index per server
+ * 1 IndexType is created per index using this plugin
+ */
+ class IndexPlugin : boost::noncopyable {
+ public:
+ IndexPlugin( const string& name );
+ virtual ~IndexPlugin() {}
+
+ virtual IndexType* generate( const IndexSpec * spec ) const = 0;
+
+ string getName() const { return _name; }
+
+ /**
+ * @return new keyPattern
+ * if nothing changes, should return keyPattern
+ */
+ virtual BSONObj adjustIndexSpec( const BSONObj& spec ) const { return spec; }
+
+ // ------- static below -------
+
+ static IndexPlugin* get( const string& name ) {
+ if ( ! _plugins )
+ return 0;
+ map<string,IndexPlugin*>::iterator i = _plugins->find( name );
+ if ( i == _plugins->end() )
+ return 0;
+ return i->second;
+ }
+
+ /**
+ * @param keyPattern { x : "fts" }
+ * @return "" or the name
+ */
+ static string findPluginName( const BSONObj& keyPattern );
+
+ private:
+ string _name;
+ static map<string,IndexPlugin*> * _plugins;
+ };
+
+ /* precomputed details about an index, used for inserting keys on updates
+ stored/cached in NamespaceDetailsTransient, or can be used standalone
+ */
+ class IndexSpec {
+ public:
+ BSONObj keyPattern; // e.g., { name : 1 }
+ BSONObj info; // this is the same as IndexDetails::info.obj()
+
+ IndexSpec()
+ : _details(0) , _finishedInit(false) {
+ }
+
+ explicit IndexSpec( const BSONObj& k , const BSONObj& m = BSONObj() )
+ : keyPattern(k) , info(m) , _details(0) , _finishedInit(false) {
+ _init();
+ }
+
+ /**
+ this is a DiscLoc of an IndexDetails info
+ should have a key field
+ */
+ explicit IndexSpec( const DiskLoc& loc ) {
+ reset( loc );
+ }
+
+ void reset( const BSONObj& info );
+ void reset( const DiskLoc& infoLoc ) { reset(infoLoc.obj()); }
+ void reset( const IndexDetails * details );
+
+ void getKeys( const BSONObj &obj, BSONObjSet &keys ) const;
+
+ BSONElement missingField() const { return _nullElt; }
+
+ string getTypeName() const {
+ if ( _indexType.get() )
+ return _indexType->getPlugin()->getName();
+ return "";
+ }
+
+ IndexType* getType() const {
+ return _indexType.get();
+ }
+
+ const IndexDetails * getDetails() const {
+ return _details;
+ }
+
+ IndexSuitability suitability( const BSONObj& query , const BSONObj& order ) const ;
+
+ protected:
+
+ int indexVersion() const;
+
+ IndexSuitability _suitability( const BSONObj& query , const BSONObj& order ) const ;
+
+ BSONSizeTracker _sizeTracker;
+ vector<const char*> _fieldNames;
+ vector<BSONElement> _fixed;
+
+ BSONObj _nullKey; // a full key with all fields null
+ BSONObj _nullObj; // only used for _nullElt
+ BSONElement _nullElt; // jstNull
+
+ BSONObj _undefinedObj; // only used for _undefinedElt
+ BSONElement _undefinedElt; // undefined
+
+ int _nFields; // number of fields in the index
+ bool _sparse; // if the index is sparse
+ shared_ptr<IndexType> _indexType;
+ const IndexDetails * _details;
+
+ void _init();
+
+ friend class IndexType;
+ friend class KeyGeneratorV0;
+ friend class KeyGeneratorV1;
+ public:
+ bool _finishedInit;
+ };
+
+
+} // namespace mongo
diff --git a/src/mongo/db/instance.cpp b/src/mongo/db/instance.cpp
new file mode 100644
index 00000000000..c8f8c6ea85b
--- /dev/null
+++ b/src/mongo/db/instance.cpp
@@ -0,0 +1,1148 @@
+// instance.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "db.h"
+#include "../bson/util/atomic_int.h"
+#include "introspect.h"
+#include "repl.h"
+#include "dbmessage.h"
+#include "instance.h"
+#include "lasterror.h"
+#include "security.h"
+#include "json.h"
+#include "replutil.h"
+#include "../s/d_logic.h"
+#include "../util/file_allocator.h"
+#include "../util/goodies.h"
+#include "cmdline.h"
+#if !defined(_WIN32)
+#include <sys/file.h>
+#endif
+#include "stats/counters.h"
+#include "background.h"
+#include "dur_journal.h"
+#include "dur_recover.h"
+#include "d_concurrency.h"
+#include "ops/count.h"
+#include "ops/delete.h"
+#include "ops/query.h"
+#include "ops/update.h"
+#include "pagefault.h"
+
+namespace mongo {
+
+ // "diaglog"
+ inline void opread(Message& m) { if( _diaglog.getLevel() & 2 ) _diaglog.readop((char *) m.singleData(), m.header()->len); }
+ inline void opwrite(Message& m) { if( _diaglog.getLevel() & 1 ) _diaglog.write((char *) m.singleData(), m.header()->len); }
+
+ void receivedKillCursors(Message& m);
+ void receivedUpdate(Message& m, CurOp& op);
+ void receivedDelete(Message& m, CurOp& op);
+ void receivedInsert(Message& m, CurOp& op);
+ bool receivedGetMore(DbResponse& dbresponse, Message& m, CurOp& curop );
+
+ int nloggedsome = 0;
+#define LOGWITHRATELIMIT if( ++nloggedsome < 1000 || nloggedsome % 100 == 0 )
+
+ string dbExecCommand;
+
+ DiagLog _diaglog;
+
+ bool useCursors = true;
+ bool useHints = true;
+
+ KillCurrentOp killCurrentOp;
+
+ int lockFile = 0;
+#ifdef _WIN32
+ HANDLE lockFileHandle;
+#endif
+
+ // see FSyncCommand:
+ extern bool lockedForWriting;
+
+ OpTime OpTime::now() {
+ DEV d.dbMutex.assertWriteLocked();
+ return now_inlock();
+ }
+ OpTime OpTime::last_inlock(){
+ DEV d.dbMutex.assertAtLeastReadLocked();
+ return last;
+ }
+
+ // OpTime::now() uses dbMutex, thus it is in this file not in the cpp files used by drivers and such
+ void BSONElementManipulator::initTimestamp() {
+ massert( 10332 , "Expected CurrentTime type", _element.type() == Timestamp );
+ unsigned long long &timestamp = *( reinterpret_cast< unsigned long long* >( value() ) );
+ if ( timestamp == 0 )
+ timestamp = OpTime::now().asDate();
+ }
+ void BSONElementManipulator::SetNumber(double d) {
+ if ( _element.type() == NumberDouble )
+ *getDur().writing( reinterpret_cast< double * >( value() ) ) = d;
+ else if ( _element.type() == NumberInt )
+ *getDur().writing( reinterpret_cast< int * >( value() ) ) = (int) d;
+ else assert(0);
+ }
+ void BSONElementManipulator::SetLong(long long n) {
+ assert( _element.type() == NumberLong );
+ *getDur().writing( reinterpret_cast< long long * >(value()) ) = n;
+ }
+ void BSONElementManipulator::SetInt(int n) {
+ assert( _element.type() == NumberInt );
+ getDur().writingInt( *reinterpret_cast< int * >( value() ) ) = n;
+ }
+ /* dur:: version */
+ void BSONElementManipulator::ReplaceTypeAndValue( const BSONElement &e ) {
+ char *d = data();
+ char *v = value();
+ int valsize = e.valuesize();
+ int ofs = (int) (v-d);
+ dassert( ofs > 0 );
+ char *p = (char *) getDur().writingPtr(d, valsize + ofs);
+ *p = e.type();
+ memcpy( p + ofs, e.value(), valsize );
+ }
+
+ void inProgCmd( Message &m, DbResponse &dbresponse ) {
+ BSONObjBuilder b;
+
+ if( ! cc().isAdmin() ) {
+ b.append("err", "unauthorized");
+ }
+ else {
+ DbMessage d(m);
+ QueryMessage q(d);
+ bool all = q.query["$all"].trueValue();
+ vector<BSONObj> vals;
+ {
+ Client& me = cc();
+ scoped_lock bl(Client::clientsMutex);
+ auto_ptr<Matcher> m(new Matcher(q.query));
+ for( set<Client*>::iterator i = Client::clients.begin(); i != Client::clients.end(); i++ ) {
+ Client *c = *i;
+ assert( c );
+ CurOp* co = c->curop();
+ if ( c == &me && !co ) {
+ continue;
+ }
+ assert( co );
+ if( all || co->active() ) {
+ BSONObj info = co->infoNoauth();
+ if ( all || m->matches( info )) {
+ vals.push_back( info );
+ }
+ }
+ }
+ }
+ b.append("inprog", vals);
+ unsigned x = lockedForWriting;
+ if( x ) {
+ b.append("fsyncLock", x);
+ b.append("info", "use db.fsyncUnlock() to terminate the fsync write/snapshot lock");
+ }
+ }
+
+ replyToQuery(0, m, dbresponse, b.obj());
+ }
+
+ void killOp( Message &m, DbResponse &dbresponse ) {
+ BSONObj obj;
+ if( ! cc().isAdmin() ) {
+ obj = fromjson("{\"err\":\"unauthorized\"}");
+ }
+ /*else if( !dbMutexInfo.isLocked() )
+ obj = fromjson("{\"info\":\"no op in progress/not locked\"}");
+ */
+ else {
+ DbMessage d(m);
+ QueryMessage q(d);
+ BSONElement e = q.query.getField("op");
+ if( !e.isNumber() ) {
+ obj = fromjson("{\"err\":\"no op number field specified?\"}");
+ }
+ else {
+ log() << "going to kill op: " << e << endl;
+ obj = fromjson("{\"info\":\"attempting to kill op\"}");
+ killCurrentOp.kill( (unsigned) e.number() );
+ }
+ }
+ replyToQuery(0, m, dbresponse, obj);
+ }
+
+ void unlockFsyncAndWait();
+ void unlockFsync(const char *ns, Message& m, DbResponse &dbresponse) {
+ BSONObj obj;
+ if ( ! cc().isAdmin() ) { // checks auth
+ obj = fromjson("{\"err\":\"unauthorized\"}");
+ }
+ else if (strncmp(ns, "admin.", 6) != 0 ) {
+ obj = fromjson("{\"err\":\"unauthorized - this command must be run against the admin DB\"}");
+ }
+ else {
+ if( lockedForWriting ) {
+ log() << "command: unlock requested" << endl;
+ obj = fromjson("{ok:1,\"info\":\"unlock completed\"}");
+ unlockFsyncAndWait();
+ }
+ else {
+ obj = fromjson("{ok:0,\"errmsg\":\"not locked\"}");
+ }
+ }
+ replyToQuery(0, m, dbresponse, obj);
+ }
+
+ static bool receivedQuery(Client& c, DbResponse& dbresponse, Message& m ) {
+ bool ok = true;
+ MSGID responseTo = m.header()->id;
+
+ DbMessage d(m);
+ QueryMessage q(d);
+ auto_ptr< Message > resp( new Message() );
+
+ CurOp& op = *(c.curop());
+
+ shared_ptr<AssertionException> ex;
+
+ try {
+ dbresponse.exhaust = runQuery(m, q, op, *resp);
+ assert( !resp->empty() );
+ }
+ catch ( SendStaleConfigException& e ){
+ ex.reset( new SendStaleConfigException( e.getns(), e.getInfo().msg ) );
+ ok = false;
+ }
+ catch ( AssertionException& e ) {
+ ex.reset( new AssertionException( e.getInfo().msg, e.getCode() ) );
+ ok = false;
+ }
+
+ if( ex ){
+
+ op.debug().exceptionInfo = ex->getInfo();
+ LOGWITHRATELIMIT {
+ log() << "assertion " << ex->toString() << " ns:" << q.ns << " query:" <<
+ (q.query.valid() ? q.query.toString() : "query object is corrupt") << endl;
+ if( q.ntoskip || q.ntoreturn )
+ log() << " ntoskip:" << q.ntoskip << " ntoreturn:" << q.ntoreturn << endl;
+ }
+
+ SendStaleConfigException* scex = NULL;
+ if ( ex->getCode() == SendStaleConfigCode ) scex = static_cast<SendStaleConfigException*>( ex.get() );
+
+ BSONObjBuilder err;
+ ex->getInfo().append( err );
+ if( scex ) err.append( "ns", scex->getns() );
+ BSONObj errObj = err.done();
+
+ log() << errObj << endl;
+
+ BufBuilder b;
+ b.skip(sizeof(QueryResult));
+ b.appendBuf((void*) errObj.objdata(), errObj.objsize());
+
+ // todo: call replyToQuery() from here instead of this!!! see dbmessage.h
+ QueryResult * msgdata = (QueryResult *) b.buf();
+ b.decouple();
+ QueryResult *qr = msgdata;
+ qr->_resultFlags() = ResultFlag_ErrSet;
+ if( scex ) qr->_resultFlags() |= ResultFlag_ShardConfigStale;
+ qr->len = b.len();
+ qr->setOperation(opReply);
+ qr->cursorId = 0;
+ qr->startingFrom = 0;
+ qr->nReturned = 1;
+ resp.reset( new Message() );
+ resp->setData( msgdata, true );
+
+ }
+
+ op.debug().responseLength = resp->header()->dataLen();
+
+ dbresponse.response = resp.release();
+ dbresponse.responseTo = responseTo;
+
+ return ok;
+ }
+
+ void (*reportEventToSystem)(const char *msg) = 0;
+
+ void mongoAbort(const char *msg) {
+ if( reportEventToSystem )
+ reportEventToSystem(msg);
+ rawOut(msg);
+ ::abort();
+ }
+
+ // Returns false when request includes 'end'
+ void _assembleResponse( Message &m, DbResponse &dbresponse, const HostAndPort& remote ) {
+
+ // before we lock...
+ int op = m.operation();
+ bool isCommand = false;
+ const char *ns = m.singleData()->_data + 4;
+ if ( op == dbQuery ) {
+ if( strstr(ns, ".$cmd") ) {
+ isCommand = true;
+ opwrite(m);
+ if( strstr(ns, ".$cmd.sys.") ) {
+ if( strstr(ns, "$cmd.sys.inprog") ) {
+ inProgCmd(m, dbresponse);
+ return;
+ }
+ if( strstr(ns, "$cmd.sys.killop") ) {
+ killOp(m, dbresponse);
+ return;
+ }
+ if( strstr(ns, "$cmd.sys.unlock") ) {
+ unlockFsync(ns, m, dbresponse);
+ return;
+ }
+ }
+ }
+ else {
+ opread(m);
+ }
+ }
+ else if( op == dbGetMore ) {
+ opread(m);
+ }
+ else {
+ opwrite(m);
+ }
+
+ globalOpCounters.gotOp( op , isCommand );
+
+ Client& c = cc();
+
+ auto_ptr<CurOp> nestedOp;
+ CurOp* currentOpP = c.curop();
+ if ( currentOpP->active() ) {
+ nestedOp.reset( new CurOp( &c , currentOpP ) );
+ currentOpP = nestedOp.get();
+ }
+ CurOp& currentOp = *currentOpP;
+ currentOp.reset(remote,op);
+
+ OpDebug& debug = currentOp.debug();
+ debug.op = op;
+
+ int logThreshold = cmdLine.slowMS;
+ bool log = logLevel >= 1;
+
+ if ( op == dbQuery ) {
+ if ( handlePossibleShardedMessage( m , &dbresponse ) )
+ return;
+ receivedQuery(c , dbresponse, m );
+ }
+ else if ( op == dbGetMore ) {
+ if ( ! receivedGetMore(dbresponse, m, currentOp) )
+ log = true;
+ }
+ else if ( op == dbMsg ) {
+ // deprecated - replaced by commands
+ char *p = m.singleData()->_data;
+ int len = strlen(p);
+ if ( len > 400 )
+ out() << curTimeMillis64() % 10000 <<
+ " long msg received, len:" << len << endl;
+
+ Message *resp = new Message();
+ if ( strcmp( "end" , p ) == 0 )
+ resp->setData( opReply , "dbMsg end no longer supported" );
+ else
+ resp->setData( opReply , "i am fine - dbMsg deprecated");
+
+ dbresponse.response = resp;
+ dbresponse.responseTo = m.header()->id;
+ }
+ else {
+ const char *ns = m.singleData()->_data + 4;
+ char cl[256];
+ nsToDatabase(ns, cl);
+ if( ! c.getAuthenticationInfo()->isAuthorized(cl) ) {
+ uassert_nothrow("unauthorized");
+ }
+ else {
+ try {
+ if ( op == dbInsert ) {
+ receivedInsert(m, currentOp);
+ }
+ else if ( op == dbUpdate ) {
+ receivedUpdate(m, currentOp);
+ }
+ else if ( op == dbDelete ) {
+ receivedDelete(m, currentOp);
+ }
+ else if ( op == dbKillCursors ) {
+ currentOp.ensureStarted();
+ logThreshold = 10;
+ receivedKillCursors(m);
+ }
+ else {
+ mongo::log() << " operation isn't supported: " << op << endl;
+ currentOp.done();
+ log = true;
+ }
+ }
+ catch ( UserException& ue ) {
+ tlog(3) << " Caught Assertion in " << opToString(op) << ", continuing " << ue.toString() << endl;
+ debug.exceptionInfo = ue.getInfo();
+ }
+ catch ( AssertionException& e ) {
+ tlog(3) << " Caught Assertion in " << opToString(op) << ", continuing " << e.toString() << endl;
+ debug.exceptionInfo = e.getInfo();
+ log = true;
+ }
+ }
+ }
+ currentOp.ensureStarted();
+ currentOp.done();
+ debug.executionTime = currentOp.totalTimeMillis();
+
+ //DEV log = true;
+ if ( log || debug.executionTime > logThreshold ) {
+ if( logLevel < 3 && op == dbGetMore && strstr(ns, ".oplog.") && debug.executionTime < 4300 && !log ) {
+ /* it's normal for getMore on the oplog to be slow because of use of awaitdata flag. */
+ }
+ else {
+ mongo::tlog() << debug << endl;
+ }
+ }
+
+ if ( currentOp.shouldDBProfile( debug.executionTime ) ) {
+ // performance profiling is on
+ if ( d.dbMutex.getState() < 0 ) {
+ mongo::log(1) << "note: not profiling because recursive read lock" << endl;
+ }
+ else {
+ writelock lk;
+ if ( dbHolder()._isLoaded( nsToDatabase( currentOp.getNS() ) , dbpath ) ) {
+ Client::Context cx( currentOp.getNS() );
+ profile(c , currentOp );
+ }
+ else {
+ mongo::log() << "note: not profiling because db went away - probably a close on: " << currentOp.getNS() << endl;
+ }
+ }
+ }
+
+ debug.reset();
+ } /* _assembleResponse() */
+
+ void assembleResponse( Message &m, DbResponse &dbresponse, const HostAndPort& remote ) {
+ PageFaultRetryableSection s;
+ while( 1 ) {
+ try {
+ _assembleResponse( m, dbresponse, remote );
+ break;
+ }
+ catch( PageFaultException& e ) {
+ DEV log() << "TEMP PageFaultException touch and retry" << endl;
+ e.touch();
+ }
+ }
+ }
+
+ void receivedKillCursors(Message& m) {
+ int *x = (int *) m.singleData()->_data;
+ x++; // reserved
+ int n = *x++;
+
+ uassert( 13659 , "sent 0 cursors to kill" , n != 0 );
+ massert( 13658 , str::stream() << "bad kill cursors size: " << m.dataSize() , m.dataSize() == 8 + ( 8 * n ) );
+ uassert( 13004 , str::stream() << "sent negative cursors to kill: " << n , n >= 1 );
+
+ if ( n > 2000 ) {
+ log( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
+ assert( n < 30000 );
+ }
+
+ int found = ClientCursor::erase(n, (long long *) x);
+
+ if ( logLevel > 0 || found != n ) {
+ log( found == n ) << "killcursors: found " << found << " of " << n << endl;
+ }
+
+ }
+
+ /* db - database name
+ path - db directory
+ */
+ /*static*/ void Database::closeDatabase( const char *db, const string& path ) {
+ assertInWriteLock();
+
+ Client::Context * ctx = cc().getContext();
+ assert( ctx );
+ assert( ctx->inDB( db , path ) );
+ Database *database = ctx->db();
+ assert( database->name == db );
+
+ oplogCheckCloseDatabase( database ); // oplog caches some things, dirty its caches
+
+ if( BackgroundOperation::inProgForDb(db) ) {
+ log() << "warning: bg op in prog during close db? " << db << endl;
+ }
+
+ /* important: kill all open cursors on the database */
+ string prefix(db);
+ prefix += '.';
+ ClientCursor::invalidate(prefix.c_str());
+
+ NamespaceDetailsTransient::clearForPrefix( prefix.c_str() );
+
+ dbHolderW().erase( db, path );
+ ctx->_clear();
+ delete database; // closes files
+ }
+
+ void receivedUpdate(Message& m, CurOp& op) {
+ DbMessage d(m);
+ const char *ns = d.getns();
+ op.debug().ns = ns;
+ int flags = d.pullInt();
+ BSONObj query = d.nextJsObj();
+
+ assert( d.moreJSObjs() );
+ assert( query.objsize() < m.header()->dataLen() );
+ BSONObj toupdate = d.nextJsObj();
+ uassert( 10055 , "update object too large", toupdate.objsize() <= BSONObjMaxUserSize);
+ assert( toupdate.objsize() < m.header()->dataLen() );
+ assert( query.objsize() + toupdate.objsize() < m.header()->dataLen() );
+ bool upsert = flags & UpdateOption_Upsert;
+ bool multi = flags & UpdateOption_Multi;
+ bool broadcast = flags & UpdateOption_Broadcast;
+
+ op.debug().query = query;
+ op.setQuery(query);
+
+ writelock lk;
+
+ // void ReplSetImpl::relinquish() uses big write lock so
+ // this is thus synchronized given our lock above.
+ uassert( 10054 , "not master", isMasterNs( ns ) );
+
+ // if this ever moves to outside of lock, need to adjust check Client::Context::_finishInit
+ if ( ! broadcast && handlePossibleShardedMessage( m , 0 ) )
+ return;
+
+ Client::Context ctx( ns );
+
+ UpdateResult res = updateObjects(ns, toupdate, query, upsert, multi, true, op.debug() );
+ lastError.getSafe()->recordUpdate( res.existing , res.num , res.upserted ); // for getlasterror
+ }
+
+ void receivedDelete(Message& m, CurOp& op) {
+ DbMessage d(m);
+ const char *ns = d.getns();
+ op.debug().ns = ns;
+ int flags = d.pullInt();
+ bool justOne = flags & RemoveOption_JustOne;
+ bool broadcast = flags & RemoveOption_Broadcast;
+ assert( d.moreJSObjs() );
+ BSONObj pattern = d.nextJsObj();
+
+ op.debug().query = pattern;
+ op.setQuery(pattern);
+
+ writelock lk(ns);
+
+ // writelock is used to synchronize stepdowns w/ writes
+ uassert( 10056 , "not master", isMasterNs( ns ) );
+
+ // if this ever moves to outside of lock, need to adjust check Client::Context::_finishInit
+ if ( ! broadcast && handlePossibleShardedMessage( m , 0 ) )
+ return;
+
+ Client::Context ctx(ns);
+
+ long long n = deleteObjects(ns, pattern, justOne, true);
+ lastError.getSafe()->recordDelete( n );
+ }
+
+ QueryResult* emptyMoreResult(long long);
+
+ void OpTime::waitForDifferent(unsigned millis){
+ DEV d.dbMutex.assertAtLeastReadLocked();
+
+ if (*this != last) return; // check early
+
+ boost::xtime timeout;
+ boost::xtime_get(&timeout, boost::TIME_UTC);
+
+ timeout.nsec += millis * 1000*1000;
+ if (timeout.nsec >= 1000*1000*1000){
+ timeout.nsec -= 1000*1000*1000;
+ timeout.sec += 1;
+ }
+
+ do {
+ dbtemprelease tmp;
+ boost::mutex::scoped_lock lk(notifyMutex());
+ if (!notifier().timed_wait(lk, timeout))
+ return; // timed out
+ } while (*this != last);
+ }
+
+ bool receivedGetMore(DbResponse& dbresponse, Message& m, CurOp& curop ) {
+ bool ok = true;
+
+ DbMessage d(m);
+
+ const char *ns = d.getns();
+ int ntoreturn = d.pullInt();
+ long long cursorid = d.pullInt64();
+
+ curop.debug().ns = ns;
+ curop.debug().ntoreturn = ntoreturn;
+ curop.debug().cursorid = cursorid;
+
+ time_t start = 0;
+ int pass = 0;
+ bool exhaust = false;
+ QueryResult* msgdata;
+ OpTime last;
+ while( 1 ) {
+ try {
+ Client::ReadContext ctx(ns);
+ if (str::startsWith(ns, "local.oplog.")){
+ if (pass == 0)
+ last = OpTime::last_inlock();
+ else
+ last.waitForDifferent(1000/*ms*/);
+ }
+ msgdata = processGetMore(ns, ntoreturn, cursorid, curop, pass, exhaust);
+ }
+ catch ( AssertionException& e ) {
+ exhaust = false;
+ curop.debug().exceptionInfo = e.getInfo();
+ msgdata = emptyMoreResult(cursorid);
+ ok = false;
+ }
+ if (msgdata == 0) {
+ exhaust = false;
+ massert(13073, "shutting down", !inShutdown() );
+ if( pass == 0 ) {
+ start = time(0);
+ }
+ else {
+ if( time(0) - start >= 4 ) {
+ // after about 4 seconds, return. pass stops at 1000 normally.
+ // we want to return occasionally so slave can checkpoint.
+ pass = 10000;
+ }
+ }
+ pass++;
+ if (debug)
+ sleepmillis(20);
+ else
+ sleepmillis(2);
+ continue;
+ }
+ break;
+ };
+
+ Message *resp = new Message();
+ resp->setData(msgdata, true);
+ curop.debug().responseLength = resp->header()->dataLen();
+ curop.debug().nreturned = msgdata->nReturned;
+
+ dbresponse.response = resp;
+ dbresponse.responseTo = m.header()->id;
+
+ if( exhaust ) {
+ curop.debug().exhaust = true;
+ dbresponse.exhaust = ns;
+ }
+
+ return ok;
+ }
+
+ void checkAndInsert(const char *ns, /*modifies*/BSONObj& js) {
+ uassert( 10059 , "object to insert too large", js.objsize() <= BSONObjMaxUserSize);
+ {
+ // check no $ modifiers. note we only check top level. (scanning deep would be quite expensive)
+ BSONObjIterator i( js );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ uassert( 13511 , "document to insert can't have $ fields" , e.fieldName()[0] != '$' );
+ }
+ }
+ theDataFileMgr.insertWithObjMod(ns, js, false); // js may be modified in the call to add an _id field.
+ logOp("i", ns, js);
+ }
+
+ NOINLINE_DECL void insertMulti(bool keepGoing, const char *ns, vector<BSONObj>& objs) {
+ size_t i;
+ for (i=0; i<objs.size(); i++){
+ try {
+ checkAndInsert(ns, objs[i]);
+ getDur().commitIfNeeded();
+ } catch (const UserException&) {
+ if (!keepGoing || i == objs.size()-1){
+ globalOpCounters.incInsertInWriteLock(i);
+ throw;
+ }
+ // otherwise ignore and keep going
+ }
+ }
+
+ globalOpCounters.incInsertInWriteLock(i);
+ }
+
+ void receivedInsert(Message& m, CurOp& op) {
+ DbMessage d(m);
+ const char *ns = d.getns();
+ op.debug().ns = ns;
+
+ if( !d.moreJSObjs() ) {
+ // strange. should we complain?
+ return;
+ }
+ BSONObj first = d.nextJsObj();
+
+ vector<BSONObj> multi;
+ while (d.moreJSObjs()){
+ if (multi.empty()) // first pass
+ multi.push_back(first);
+ multi.push_back( d.nextJsObj() );
+ }
+
+ writelock lk(ns);
+ //LockCollectionExclusively lk(ns);
+
+ // CONCURRENCY TODO: is being read locked in big log sufficient here?
+ // writelock is used to synchronize stepdowns w/ writes
+ uassert( 10058 , "not master", isMasterNs(ns) );
+
+ if ( handlePossibleShardedMessage( m , 0 ) )
+ return;
+
+ Client::Context ctx(ns);
+
+ if( !multi.empty() ) {
+ const bool keepGoing = d.reservedField() & InsertOption_ContinueOnError;
+ insertMulti(keepGoing, ns, multi);
+ return;
+ }
+
+ checkAndInsert(ns, first);
+ globalOpCounters.incInsertInWriteLock(1);
+ }
+
+ void getDatabaseNames( vector< string > &names , const string& usePath ) {
+ boost::filesystem::path path( usePath );
+ for ( boost::filesystem::directory_iterator i( path );
+ i != boost::filesystem::directory_iterator(); ++i ) {
+ if ( directoryperdb ) {
+ boost::filesystem::path p = *i;
+ string dbName = p.leaf();
+ p /= ( dbName + ".ns" );
+ if ( MMF::exists( p ) )
+ names.push_back( dbName );
+ }
+ else {
+ string fileName = boost::filesystem::path(*i).leaf();
+ if ( fileName.length() > 3 && fileName.substr( fileName.length() - 3, 3 ) == ".ns" )
+ names.push_back( fileName.substr( 0, fileName.length() - 3 ) );
+ }
+ }
+ }
+
+ /* returns true if there is data on this server. useful when starting replication.
+ local database does NOT count except for rsoplog collection.
+ used to set the hasData field on replset heartbeat command response
+ */
+ bool replHasDatabases() {
+ vector<string> names;
+ getDatabaseNames(names);
+ if( names.size() >= 2 ) return true;
+ if( names.size() == 1 ) {
+ if( names[0] != "local" )
+ return true;
+ // we have a local database. return true if oplog isn't empty
+ {
+ readlock lk(rsoplog);
+ BSONObj o;
+ if( Helpers::getFirst(rsoplog, o) )
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool DBDirectClient::call( Message &toSend, Message &response, bool assertOk , string * actualServer ) {
+ if ( lastError._get() )
+ lastError.startRequest( toSend, lastError._get() );
+ DbResponse dbResponse;
+ assembleResponse( toSend, dbResponse , _clientHost );
+ assert( dbResponse.response );
+ dbResponse.response->concat(); // can get rid of this if we make response handling smarter
+ response = *dbResponse.response;
+ getDur().commitIfNeeded();
+ return true;
+ }
+
+ void DBDirectClient::say( Message &toSend, bool isRetry ) {
+ if ( lastError._get() )
+ lastError.startRequest( toSend, lastError._get() );
+ DbResponse dbResponse;
+ assembleResponse( toSend, dbResponse , _clientHost );
+ getDur().commitIfNeeded();
+ }
+
+ auto_ptr<DBClientCursor> DBDirectClient::query(const string &ns, Query query, int nToReturn , int nToSkip ,
+ const BSONObj *fieldsToReturn , int queryOptions ) {
+
+ //if ( ! query.obj.isEmpty() || nToReturn != 0 || nToSkip != 0 || fieldsToReturn || queryOptions )
+ return DBClientBase::query( ns , query , nToReturn , nToSkip , fieldsToReturn , queryOptions );
+ //
+ //assert( query.obj.isEmpty() );
+ //throw UserException( (string)"yay:" + ns );
+ }
+
+ void DBDirectClient::killCursor( long long id ) {
+ ClientCursor::erase( id );
+ }
+
+ HostAndPort DBDirectClient::_clientHost = HostAndPort( "0.0.0.0" , 0 );
+
+ unsigned long long DBDirectClient::count(const string &ns, const BSONObj& query, int options, int limit, int skip ) {
+ LockCollectionForReading lk( ns );
+ string errmsg;
+ long long res = runCount( ns.c_str() , _countCmd( ns , query , options , limit , skip ) , errmsg );
+ if ( res == -1 )
+ return 0;
+ uassert( 13637 , str::stream() << "count failed in DBDirectClient: " << errmsg , res >= 0 );
+ return (unsigned long long )res;
+ }
+
+ DBClientBase * createDirectClient() {
+ return new DBDirectClient();
+ }
+
+ mongo::mutex exitMutex("exit");
+ AtomicUInt numExitCalls = 0;
+
+ bool inShutdown() {
+ return numExitCalls > 0;
+ }
+
+ void tryToOutputFatal( const string& s ) {
+ try {
+ rawOut( s );
+ return;
+ }
+ catch ( ... ) {}
+
+ try {
+ cerr << s << endl;
+ return;
+ }
+ catch ( ... ) {}
+
+ // uh - oh, not sure there is anything else we can do...
+ }
+
+ /** also called by ntservice.cpp */
+ void shutdownServer() {
+
+ log() << "shutdown: going to close listening sockets..." << endl;
+ ListeningSockets::get()->closeAll();
+
+ log() << "shutdown: going to flush diaglog..." << endl;
+ _diaglog.flush();
+
+ /* must do this before unmapping mem or you may get a seg fault */
+ log() << "shutdown: going to close sockets..." << endl;
+ boost::thread close_socket_thread( boost::bind(MessagingPort::closeAllSockets, 0) );
+
+ // wait until file preallocation finishes
+ // we would only hang here if the file_allocator code generates a
+ // synchronous signal, which we don't expect
+ log() << "shutdown: waiting for fs preallocator..." << endl;
+ FileAllocator::get()->waitUntilFinished();
+
+ if( cmdLine.dur ) {
+ log() << "shutdown: lock for final commit..." << endl;
+ {
+ int n = 10;
+ while( 1 ) {
+ // we may already be in a read lock from earlier in the call stack, so do read lock here
+ // to be consistent with that.
+ readlocktry w("", 20000);
+ if( w.got() ) {
+ log() << "shutdown: final commit..." << endl;
+ getDur().commitNow();
+ break;
+ }
+ if( --n <= 0 ) {
+ log() << "shutdown: couldn't acquire write lock, aborting" << endl;
+ mongoAbort("couldn't acquire write lock");
+ }
+ log() << "shutdown: waiting for write lock..." << endl;
+ }
+ }
+ MemoryMappedFile::flushAll(true);
+ }
+
+ log() << "shutdown: closing all files..." << endl;
+ stringstream ss3;
+ MemoryMappedFile::closeAllFiles( ss3 );
+ log() << ss3.str() << endl;
+
+ if( cmdLine.dur ) {
+ dur::journalCleanup(true);
+ }
+
+#if !defined(__sunos__)
+ if ( lockFile ) {
+ log() << "shutdown: removing fs lock..." << endl;
+ /* This ought to be an unlink(), but Eliot says the last
+ time that was attempted, there was a race condition
+ with acquirePathLock(). */
+#ifdef _WIN32
+ if( _chsize( lockFile , 0 ) )
+ log() << "couldn't remove fs lock " << WSAGetLastError() << endl;
+ CloseHandle(lockFileHandle);
+#else
+ if( ftruncate( lockFile , 0 ) )
+ log() << "couldn't remove fs lock " << errnoWithDescription() << endl;
+ flock( lockFile, LOCK_UN );
+#endif
+ }
+#endif
+ }
+
+ void exitCleanly( ExitCode code ) {
+ killCurrentOp.killAll();
+ {
+ dblock lk;
+ log() << "now exiting" << endl;
+ dbexit( code );
+ }
+ }
+
+
+ namespace dur {
+ extern mutex groupCommitMutex;
+ }
+
+ /* not using log() herein in case we are already locked */
+ NOINLINE_DECL void dbexit( ExitCode rc, const char *why, bool tryToGetLock ) {
+
+ auto_ptr<writelocktry> wlt;
+ if ( tryToGetLock ) {
+ wlt.reset( new writelocktry( "" , 2 * 60 * 1000 ) );
+ uassert( 13455 , "dbexit timed out getting lock" , wlt->got() );
+ }
+
+ Client * c = currentClient.get();
+ {
+ scoped_lock lk( exitMutex );
+ if ( numExitCalls++ > 0 ) {
+ if ( numExitCalls > 5 ) {
+ // this means something horrible has happened
+ ::_exit( rc );
+ }
+ stringstream ss;
+ ss << "dbexit: " << why << "; exiting immediately";
+ tryToOutputFatal( ss.str() );
+ if ( c ) c->shutdown();
+ ::exit( rc );
+ }
+ }
+
+ {
+ stringstream ss;
+ ss << "dbexit: " << why;
+ tryToOutputFatal( ss.str() );
+ }
+
+ try {
+ shutdownServer(); // gracefully shutdown instance
+ }
+ catch ( ... ) {
+ tryToOutputFatal( "shutdown failed with exception" );
+ }
+
+#if defined(_DEBUG)
+ try {
+ mutexDebugger.programEnding();
+ }
+ catch (...) { }
+#endif
+
+ // block the dur thread from doing any work for the rest of the run
+ log(2) << "shutdown: groupCommitMutex" << endl;
+ scoped_lock lk(dur::groupCommitMutex);
+
+#ifdef _WIN32
+ // Windows Service Controller wants to be told when we are down,
+ // so don't call ::exit() yet, or say "really exiting now"
+ //
+ if ( rc == EXIT_WINDOWS_SERVICE_STOP ) {
+ if ( c ) c->shutdown();
+ return;
+ }
+#endif
+ tryToOutputFatal( "dbexit: really exiting now" );
+ if ( c ) c->shutdown();
+ ::exit(rc);
+ }
+
+#if !defined(__sunos__)
+ void writePid(int fd) {
+ stringstream ss;
+ ss << getpid() << endl;
+ string s = ss.str();
+ const char * data = s.c_str();
+#ifdef _WIN32
+ assert ( _write( fd, data, strlen( data ) ) );
+#else
+ assert ( write( fd, data, strlen( data ) ) );
+#endif
+ }
+
+ void acquirePathLock(bool doingRepair) {
+ string name = ( boost::filesystem::path( dbpath ) / "mongod.lock" ).native_file_string();
+
+ bool oldFile = false;
+
+ if ( boost::filesystem::exists( name ) && boost::filesystem::file_size( name ) > 0 ) {
+ oldFile = true;
+ }
+
+#ifdef _WIN32
+ lockFileHandle = CreateFileA( name.c_str(), GENERIC_READ | GENERIC_WRITE,
+ 0 /* do not allow anyone else access */, NULL,
+ OPEN_ALWAYS /* success if fh can open */, 0, NULL );
+
+ if (lockFileHandle == INVALID_HANDLE_VALUE) {
+ DWORD code = GetLastError();
+ char *msg;
+ FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
+ NULL, code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPSTR)&msg, 0, NULL);
+ string m = msg;
+ str::stripTrailing(m, "\r\n");
+ uasserted( 13627 , str::stream() << "Unable to create/open lock file: " << name << ' ' << m << ". Is a mongod instance already running?" );
+ }
+ lockFile = _open_osfhandle((intptr_t)lockFileHandle, 0);
+#else
+ lockFile = open( name.c_str(), O_RDWR | O_CREAT , S_IRWXU | S_IRWXG | S_IRWXO );
+ if( lockFile <= 0 ) {
+ uasserted( 10309 , str::stream() << "Unable to create/open lock file: " << name << ' ' << errnoWithDescription() << " Is a mongod instance already running?" );
+ }
+ if (flock( lockFile, LOCK_EX | LOCK_NB ) != 0) {
+ close ( lockFile );
+ lockFile = 0;
+ uassert( 10310 , "Unable to lock file: " + name + ". Is a mongod instance already running?", 0 );
+ }
+#endif
+
+ if ( oldFile ) {
+ // we check this here because we want to see if we can get the lock
+ // if we can't, then its probably just another mongod running
+
+ string errmsg;
+ if (cmdLine.dur) {
+ if (!dur::haveJournalFiles()) {
+
+ vector<string> dbnames;
+ getDatabaseNames( dbnames );
+
+ if ( dbnames.size() == 0 ) {
+ // this means that mongod crashed
+ // between initial startup and when journaling was initialized
+ // it is safe to continue
+ }
+ else {
+ errmsg = str::stream()
+ << "************** \n"
+ << "old lock file: " << name << ". probably means unclean shutdown,\n"
+ << "but there are no journal files to recover.\n"
+ << "this is likely human error or filesystem corruption.\n"
+ << "found " << dbnames.size() << " dbs.\n"
+ << "see: http://dochub.mongodb.org/core/repair for more information\n"
+ << "*************";
+ }
+
+
+ }
+ }
+ else {
+ if (!dur::haveJournalFiles() && !doingRepair) {
+ errmsg = str::stream()
+ << "************** \n"
+ << "Unclean shutdown detected.\n"
+ << "Please visit http://dochub.mongodb.org/core/repair for recovery instructions.\n"
+ << "*************";
+ }
+ }
+
+ if (!errmsg.empty()) {
+ cout << errmsg << endl;
+#ifdef _WIN32
+ CloseHandle( lockFileHandle );
+#else
+ close ( lockFile );
+#endif
+ lockFile = 0;
+ uassert( 12596 , "old lock file" , 0 );
+ }
+ }
+
+ // Not related to lock file, but this is where we handle unclean shutdown
+ if( !cmdLine.dur && dur::haveJournalFiles() ) {
+ cout << "**************" << endl;
+ cout << "Error: journal files are present in journal directory, yet starting without journaling enabled." << endl;
+ cout << "It is recommended that you start with journaling enabled so that recovery may occur." << endl;
+ cout << "**************" << endl;
+ uasserted(13597, "can't start without --journal enabled when journal/ files are present");
+ }
+
+#ifdef _WIN32
+ uassert( 13625, "Unable to truncate lock file", _chsize(lockFile, 0) == 0);
+ writePid( lockFile );
+ _commit( lockFile );
+#else
+ uassert( 13342, "Unable to truncate lock file", ftruncate(lockFile, 0) == 0);
+ writePid( lockFile );
+ fsync( lockFile );
+ flushMyDirectory(name);
+#endif
+ }
+#else
+ void acquirePathLock(bool) {
+ // TODO - this is very bad that the code above not running here.
+
+ // Not related to lock file, but this is where we handle unclean shutdown
+ if( !cmdLine.dur && dur::haveJournalFiles() ) {
+ cout << "**************" << endl;
+ cout << "Error: journal files are present in journal directory, yet starting without --journal enabled." << endl;
+ cout << "It is recommended that you start with journaling enabled so that recovery may occur." << endl;
+ cout << "Alternatively (not recommended), you can backup everything, then delete the journal files, and run --repair" << endl;
+ cout << "**************" << endl;
+ uasserted(13618, "can't start without --journal enabled when journal/ files are present");
+ }
+ }
+#endif
+
+} // namespace mongo
diff --git a/src/mongo/db/instance.h b/src/mongo/db/instance.h
new file mode 100644
index 00000000000..9dde729997d
--- /dev/null
+++ b/src/mongo/db/instance.h
@@ -0,0 +1,174 @@
+// instance.h : Global state functions.
+//
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+
+#include "../client/dbclient.h"
+#include "curop-inl.h"
+#include "security.h"
+#include "cmdline.h"
+#include "client.h"
+
+namespace mongo {
+
+ extern string dbExecCommand;
+
+ /** a high level recording of operations to the database - sometimes used for diagnostics
+ and debugging.
+ */
+ class DiagLog {
+ ofstream *f; // note this is never freed
+ /* 0 = off; 1 = writes, 2 = reads, 3 = both
+ 7 = log a few reads, and all writes.
+ */
+ int level;
+ mongo::mutex mutex;
+ void openFile() {
+ assert( f == 0 );
+ stringstream ss;
+ ss << dbpath << "/diaglog." << hex << time(0);
+ string name = ss.str();
+ f = new ofstream(name.c_str(), ios::out | ios::binary);
+ if ( ! f->good() ) {
+ problem() << "diagLogging couldn't open " << name << endl;
+ // todo what is this? :
+ throw 1717;
+ }
+ else {
+ log() << "diagLogging using file " << name << endl;
+ }
+ }
+ public:
+ DiagLog() : f(0) , level(0), mutex("DiagLog") { }
+ int getLevel() const { return level; }
+ /**
+ * @return old
+ */
+ int setLevel( int newLevel ) {
+ scoped_lock lk(mutex);
+ int old = level;
+ log() << "diagLogging level=" << newLevel << endl;
+ if( f == 0 ) {
+ openFile();
+ }
+ level = newLevel; // must be done AFTER f is set
+ return old;
+ }
+ void flush() {
+ if ( level ) {
+ log() << "flushing diag log" << endl;
+ scoped_lock lk(mutex);
+ f->flush();
+ }
+ }
+ void write(char *data,int len) {
+ if ( level & 1 ) {
+ scoped_lock lk(mutex);
+ f->write(data,len);
+ }
+ }
+ void readop(char *data, int len) {
+ if ( level & 2 ) {
+ bool log = (level & 4) == 0;
+ OCCASIONALLY log = true;
+ if ( log ) {
+ scoped_lock lk(mutex);
+ assert( f );
+ f->write(data,len);
+ }
+ }
+ }
+ };
+
+ extern DiagLog _diaglog;
+
+ /* we defer response until we unlock. don't want a blocked socket to
+ keep things locked.
+ */
+ struct DbResponse {
+ Message *response;
+ MSGID responseTo;
+ const char *exhaust; /* points to ns if exhaust mode. 0=normal mode*/
+ DbResponse(Message *r, MSGID rt) : response(r), responseTo(rt), exhaust(0) { }
+ DbResponse() {
+ response = 0;
+ exhaust = 0;
+ }
+ ~DbResponse() { delete response; }
+ };
+
+ void assembleResponse( Message &m, DbResponse &dbresponse, const HostAndPort &client );
+
+ void getDatabaseNames( vector< string > &names , const string& usePath = dbpath );
+
+ /* returns true if there is no data on this server. useful when starting replication.
+ local database does NOT count.
+ */
+ bool replHasDatabases();
+
+ /** "embedded" calls to the local server directly.
+ Caller does not need to lock, that is handled within.
+ */
+ class DBDirectClient : public DBClientBase {
+ public:
+ virtual auto_ptr<DBClientCursor> query(const string &ns, Query query, int nToReturn = 0, int nToSkip = 0,
+ const BSONObj *fieldsToReturn = 0, int queryOptions = 0);
+
+ virtual bool isFailed() const {
+ return false;
+ }
+ virtual string toString() {
+ return "DBDirectClient";
+ }
+ virtual string getServerAddress() const {
+ return "localhost"; // TODO: should this have the port?
+ }
+ virtual bool call( Message &toSend, Message &response, bool assertOk=true , string * actualServer = 0 );
+ virtual void say( Message &toSend, bool isRetry = false );
+ virtual void sayPiggyBack( Message &toSend ) {
+ // don't need to piggy back when connected locally
+ return say( toSend );
+ }
+
+ virtual void killCursor( long long cursorID );
+
+ virtual bool callRead( Message& toSend , Message& response ) {
+ return call( toSend , response );
+ }
+
+ virtual unsigned long long count(const string &ns, const BSONObj& query = BSONObj(), int options=0, int limit=0, int skip=0 );
+
+ virtual ConnectionString::ConnectionType type() const { return ConnectionString::MASTER; }
+
+ double getSoTimeout() const { return 0; }
+
+ virtual bool lazySupported() const { return true; }
+ private:
+ static HostAndPort _clientHost;
+ };
+
+ extern int lockFile;
+#ifdef _WIN32
+ extern HANDLE lockFileHandle;
+#endif
+ void acquirePathLock(bool doingRepair=false); // if doingRepair=true don't consider unclean shutdown an error
+ void maybeCreatePidFile();
+
+} // namespace mongo
diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp
new file mode 100644
index 00000000000..7e1d19ce2f3
--- /dev/null
+++ b/src/mongo/db/introspect.cpp
@@ -0,0 +1,88 @@
+// introspect.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "introspect.h"
+#include "../bson/util/builder.h"
+#include "../util/goodies.h"
+#include "pdfile.h"
+#include "jsobj.h"
+#include "pdfile.h"
+#include "curop.h"
+
+namespace mongo {
+
+ BufBuilder profileBufBuilder; // reused, instead of allocated every time - avoids a malloc/free cycle
+
+ void profile( const Client& c , CurOp& currentOp ) {
+ assertInWriteLock();
+
+ Database *db = c.database();
+ DEV assert( db );
+ const char *ns = db->profileName.c_str();
+
+ // build object
+ profileBufBuilder.reset();
+ BSONObjBuilder b(profileBufBuilder);
+ b.appendDate("ts", jsTime());
+ currentOp.debug().append( currentOp , b );
+
+ b.append("client", c.clientAddress() );
+
+ if ( c.getAuthenticationInfo() )
+ b.append( "user" , c.getAuthenticationInfo()->getUser( nsToDatabase( ns ) ) );
+
+ BSONObj p = b.done();
+
+ if (p.objsize() > 100*1024){
+ string small = p.toString(/*isArray*/false, /*full*/false);
+
+ warning() << "can't add full line to system.profile: " << small;
+
+ // rebuild with limited info
+ BSONObjBuilder b(profileBufBuilder);
+ b.appendDate("ts", jsTime());
+ b.append("client", c.clientAddress() );
+ if ( c.getAuthenticationInfo() )
+ b.append( "user" , c.getAuthenticationInfo()->getUser( nsToDatabase( ns ) ) );
+
+ b.append("err", "profile line too large (max is 100KB)");
+ if (small.size() < 100*1024){ // should be much smaller but if not don't break anything
+ b.append("abbreviated", small);
+ }
+
+ p = b.done();
+ }
+
+ // write: not replicated
+ NamespaceDetails *d = db->namespaceIndex.details(ns);
+ if( d ) {
+ int len = p.objsize();
+ Record *r = theDataFileMgr.fast_oplog_insert(d, ns, len);
+ memcpy(getDur().writingPtr(r->data, len), p.objdata(), len);
+ }
+ else {
+ static time_t last;
+ if( time(0) > last+10 ) {
+ log() << "profile: warning ns " << ns << " does not exist" << endl;
+ last = time(0);
+ }
+ }
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/introspect.h b/src/mongo/db/introspect.h
new file mode 100644
index 00000000000..209eeacab7c
--- /dev/null
+++ b/src/mongo/db/introspect.h
@@ -0,0 +1,34 @@
+// introspect.h
+// system management stuff.
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "jsobj.h"
+#include "pdfile.h"
+
+namespace mongo {
+
+ /* --- profiling --------------------------------------------
+ do when database->profile is set
+ */
+
+ void profile( const Client& c , CurOp& currentOp );
+
+} // namespace mongo
diff --git a/src/mongo/db/javatest.cpp b/src/mongo/db/javatest.cpp
new file mode 100644
index 00000000000..22f2bdf8d3c
--- /dev/null
+++ b/src/mongo/db/javatest.cpp
@@ -0,0 +1,24 @@
+// javatest.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "javajs.h"
+
+int main() {
+ JavaJS = new JavaJSImpl();
+ javajstest();
+}
diff --git a/src/mongo/db/jsobj.cpp b/src/mongo/db/jsobj.cpp
new file mode 100644
index 00000000000..1e850982396
--- /dev/null
+++ b/src/mongo/db/jsobj.cpp
@@ -0,0 +1,1268 @@
+/** @file jsobj.cpp - BSON implementation
+ http://www.mongodb.org/display/DOCS/BSON
+*/
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "../bson/oid.h"
+#include "jsobj.h"
+#include "nonce.h"
+#include "../bson/util/atomic_int.h"
+#include "../util/base64.h"
+#include "../util/md5.hpp"
+#include <limits>
+#include <cmath>
+#include "../util/unittest.h"
+#include "../util/embedded_builder.h"
+#include "../util/stringutils.h"
+#include "../util/mongoutils/str.h"
+#include "json.h"
+#include "jsobjmanipulator.h"
+#include "../util/optime.h"
+#include <boost/static_assert.hpp>
+#undef assert
+#define assert MONGO_assert
+
+// make sure our assumptions are valid
+BOOST_STATIC_ASSERT( sizeof(short) == 2 );
+BOOST_STATIC_ASSERT( sizeof(int) == 4 );
+BOOST_STATIC_ASSERT( sizeof(long long) == 8 );
+BOOST_STATIC_ASSERT( sizeof(double) == 8 );
+BOOST_STATIC_ASSERT( sizeof(mongo::Date_t) == 8 );
+BOOST_STATIC_ASSERT( sizeof(mongo::OID) == 12 );
+
+namespace mongo {
+
+ BSONElement eooElement;
+
+ GENOIDLabeler GENOID;
+
+ DateNowLabeler DATENOW;
+ NullLabeler BSONNULL;
+
+ MinKeyLabeler MINKEY;
+ MaxKeyLabeler MAXKEY;
+
+ // need to move to bson/, but has dependency on base64 so move that to bson/util/ first.
+ inline string BSONElement::jsonString( JsonStringFormat format, bool includeFieldNames, int pretty ) const {
+ BSONType t = type();
+ int sign;
+ if ( t == Undefined )
+ return "undefined";
+
+ stringstream s;
+ if ( includeFieldNames )
+ s << '"' << escape( fieldName() ) << "\" : ";
+ switch ( type() ) {
+ case mongo::String:
+ case Symbol:
+ s << '"' << escape( string(valuestr(), valuestrsize()-1) ) << '"';
+ break;
+ case NumberLong:
+ s << _numberLong();
+ break;
+ case NumberInt:
+ case NumberDouble:
+ if ( number() >= -numeric_limits< double >::max() &&
+ number() <= numeric_limits< double >::max() ) {
+ s.precision( 16 );
+ s << number();
+ }
+ else if ( mongo::isNaN(number()) ) {
+ s << "NaN";
+ }
+ else if ( mongo::isInf(number(), &sign) ) {
+ s << ( sign == 1 ? "Infinity" : "-Infinity");
+ }
+ else {
+ StringBuilder ss;
+ ss << "Number " << number() << " cannot be represented in JSON";
+ string message = ss.str();
+ massert( 10311 , message.c_str(), false );
+ }
+ break;
+ case mongo::Bool:
+ s << ( boolean() ? "true" : "false" );
+ break;
+ case jstNULL:
+ s << "null";
+ break;
+ case Object:
+ s << embeddedObject().jsonString( format, pretty );
+ break;
+ case mongo::Array: {
+ if ( embeddedObject().isEmpty() ) {
+ s << "[]";
+ break;
+ }
+ s << "[ ";
+ BSONObjIterator i( embeddedObject() );
+ BSONElement e = i.next();
+ if ( !e.eoo() ) {
+ int count = 0;
+ while ( 1 ) {
+ if( pretty ) {
+ s << '\n';
+ for( int x = 0; x < pretty; x++ )
+ s << " ";
+ }
+
+ if (strtol(e.fieldName(), 0, 10) > count) {
+ s << "undefined";
+ }
+ else {
+ s << e.jsonString( format, false, pretty?pretty+1:0 );
+ e = i.next();
+ }
+ count++;
+ if ( e.eoo() )
+ break;
+ s << ", ";
+ }
+ }
+ s << " ]";
+ break;
+ }
+ case DBRef: {
+ mongo::OID *x = (mongo::OID *) (valuestr() + valuestrsize());
+ if ( format == TenGen )
+ s << "Dbref( ";
+ else
+ s << "{ \"$ref\" : ";
+ s << '"' << valuestr() << "\", ";
+ if ( format != TenGen )
+ s << "\"$id\" : ";
+ s << '"' << *x << "\" ";
+ if ( format == TenGen )
+ s << ')';
+ else
+ s << '}';
+ break;
+ }
+ case jstOID:
+ if ( format == TenGen ) {
+ s << "ObjectId( ";
+ }
+ else {
+ s << "{ \"$oid\" : ";
+ }
+ s << '"' << __oid() << '"';
+ if ( format == TenGen ) {
+ s << " )";
+ }
+ else {
+ s << " }";
+ }
+ break;
+ case BinData: {
+ int len = *(int *)( value() );
+ BinDataType type = BinDataType( *(char *)( (int *)( value() ) + 1 ) );
+ s << "{ \"$binary\" : \"";
+ char *start = ( char * )( value() ) + sizeof( int ) + 1;
+ base64::encode( s , start , len );
+ s << "\", \"$type\" : \"" << hex;
+ s.width( 2 );
+ s.fill( '0' );
+ s << type << dec;
+ s << "\" }";
+ break;
+ }
+ case mongo::Date:
+ if ( format == Strict )
+ s << "{ \"$date\" : ";
+ else
+ s << "Date( ";
+ if( pretty ) {
+ Date_t d = date();
+ if( d == 0 ) s << '0';
+ else
+ s << '"' << date().toString() << '"';
+ }
+ else
+ s << date();
+ if ( format == Strict )
+ s << " }";
+ else
+ s << " )";
+ break;
+ case RegEx:
+ if ( format == Strict ) {
+ s << "{ \"$regex\" : \"" << escape( regex() );
+ s << "\", \"$options\" : \"" << regexFlags() << "\" }";
+ }
+ else {
+ s << "/" << escape( regex() , true ) << "/";
+ // FIXME Worry about alpha order?
+ for ( const char *f = regexFlags(); *f; ++f ) {
+ switch ( *f ) {
+ case 'g':
+ case 'i':
+ case 'm':
+ s << *f;
+ default:
+ break;
+ }
+ }
+ }
+ break;
+
+ case CodeWScope: {
+ BSONObj scope = codeWScopeObject();
+ if ( ! scope.isEmpty() ) {
+ s << "{ \"$code\" : " << _asCode() << " , "
+ << " \"$scope\" : " << scope.jsonString() << " }";
+ break;
+ }
+ }
+
+ case Code:
+ s << _asCode();
+ break;
+
+ case Timestamp:
+ s << "{ \"t\" : " << timestampTime() << " , \"i\" : " << timestampInc() << " }";
+ break;
+
+ case MinKey:
+ s << "{ \"$minKey\" : 1 }";
+ break;
+
+ case MaxKey:
+ s << "{ \"$maxKey\" : 1 }";
+ break;
+
+ default:
+ StringBuilder ss;
+ ss << "Cannot create a properly formatted JSON string with "
+ << "element: " << toString() << " of type: " << type();
+ string message = ss.str();
+ massert( 10312 , message.c_str(), false );
+ }
+ return s.str();
+ }
+
+ int BSONElement::getGtLtOp( int def ) const {
+ const char *fn = fieldName();
+ if ( fn[0] == '$' && fn[1] ) {
+ if ( fn[2] == 't' ) {
+ if ( fn[1] == 'g' ) {
+ if ( fn[3] == 0 ) return BSONObj::GT;
+ else if ( fn[3] == 'e' && fn[4] == 0 ) return BSONObj::GTE;
+ }
+ else if ( fn[1] == 'l' ) {
+ if ( fn[3] == 0 ) return BSONObj::LT;
+ else if ( fn[3] == 'e' && fn[4] == 0 ) return BSONObj::LTE;
+ }
+ }
+ else if ( fn[1] == 'n' && fn[2] == 'e' ) {
+ if ( fn[3] == 0 )
+ return BSONObj::NE;
+ if ( fn[3] == 'a' && fn[4] == 'r') // matches anything with $near prefix
+ return BSONObj::opNEAR;
+ }
+ else if ( fn[1] == 'm' ) {
+ if ( fn[2] == 'o' && fn[3] == 'd' && fn[4] == 0 )
+ return BSONObj::opMOD;
+ if ( fn[2] == 'a' && fn[3] == 'x' && fn[4] == 'D' && fn[5] == 'i' && fn[6] == 's' && fn[7] == 't' && fn[8] == 'a' && fn[9] == 'n' && fn[10] == 'c' && fn[11] == 'e' && fn[12] == 0 )
+ return BSONObj::opMAX_DISTANCE;
+ }
+ else if ( fn[1] == 't' && fn[2] == 'y' && fn[3] == 'p' && fn[4] == 'e' && fn[5] == 0 )
+ return BSONObj::opTYPE;
+ else if ( fn[1] == 'i' && fn[2] == 'n' && fn[3] == 0 )
+ return BSONObj::opIN;
+ else if ( fn[1] == 'n' && fn[2] == 'i' && fn[3] == 'n' && fn[4] == 0 )
+ return BSONObj::NIN;
+ else if ( fn[1] == 'a' && fn[2] == 'l' && fn[3] == 'l' && fn[4] == 0 )
+ return BSONObj::opALL;
+ else if ( fn[1] == 's' && fn[2] == 'i' && fn[3] == 'z' && fn[4] == 'e' && fn[5] == 0 )
+ return BSONObj::opSIZE;
+ else if ( fn[1] == 'e' ) {
+ if ( fn[2] == 'x' && fn[3] == 'i' && fn[4] == 's' && fn[5] == 't' && fn[6] == 's' && fn[7] == 0 )
+ return BSONObj::opEXISTS;
+ if ( fn[2] == 'l' && fn[3] == 'e' && fn[4] == 'm' && fn[5] == 'M' && fn[6] == 'a' && fn[7] == 't' && fn[8] == 'c' && fn[9] == 'h' && fn[10] == 0 )
+ return BSONObj::opELEM_MATCH;
+ }
+ else if ( fn[1] == 'r' && fn[2] == 'e' && fn[3] == 'g' && fn[4] == 'e' && fn[5] == 'x' && fn[6] == 0 )
+ return BSONObj::opREGEX;
+ else if ( fn[1] == 'o' && fn[2] == 'p' && fn[3] == 't' && fn[4] == 'i' && fn[5] == 'o' && fn[6] == 'n' && fn[7] == 's' && fn[8] == 0 )
+ return BSONObj::opOPTIONS;
+ else if ( fn[1] == 'w' && fn[2] == 'i' && fn[3] == 't' && fn[4] == 'h' && fn[5] == 'i' && fn[6] == 'n' && fn[7] == 0 )
+ return BSONObj::opWITHIN;
+ }
+ return def;
+ }
+
+ /* Matcher --------------------------------------*/
+
+// If the element is something like:
+// a : { $gt : 3 }
+// we append
+// a : 3
+// else we just append the element.
+//
+ void appendElementHandlingGtLt(BSONObjBuilder& b, const BSONElement& e) {
+ if ( e.type() == Object ) {
+ BSONElement fe = e.embeddedObject().firstElement();
+ const char *fn = fe.fieldName();
+ if ( fn[0] == '$' && fn[1] && fn[2] == 't' ) {
+ b.appendAs(fe, e.fieldName());
+ return;
+ }
+ }
+ b.append(e);
+ }
+
+ int getGtLtOp(const BSONElement& e) {
+ if ( e.type() != Object )
+ return BSONObj::Equality;
+
+ BSONElement fe = e.embeddedObject().firstElement();
+ return fe.getGtLtOp();
+ }
+
+ FieldCompareResult compareDottedFieldNames( const string& l , const string& r ) {
+ static int maxLoops = 1024 * 1024;
+
+ size_t lstart = 0;
+ size_t rstart = 0;
+
+ for ( int i=0; i<maxLoops; i++ ) {
+
+ size_t a = l.find( '.' , lstart );
+ size_t b = r.find( '.' , rstart );
+
+ size_t lend = a == string::npos ? l.size() : a;
+ size_t rend = b == string::npos ? r.size() : b;
+
+ const string& c = l.substr( lstart , lend - lstart );
+ const string& d = r.substr( rstart , rend - rstart );
+
+ int x = lexNumCmp( c.c_str(), d.c_str() );
+
+ if ( x < 0 )
+ return LEFT_BEFORE;
+ if ( x > 0 )
+ return RIGHT_BEFORE;
+
+ lstart = lend + 1;
+ rstart = rend + 1;
+
+ if ( lstart >= l.size() ) {
+ if ( rstart >= r.size() )
+ return SAME;
+ return RIGHT_SUBFIELD;
+ }
+ if ( rstart >= r.size() )
+ return LEFT_SUBFIELD;
+ }
+
+ log() << "compareDottedFieldNames ERROR l: " << l << " r: " << r << " TOO MANY LOOPS" << endl;
+ assert(0);
+ return SAME; // will never get here
+ }
+
+ /* BSONObj ------------------------------------------------------------*/
+
+ string BSONObj::md5() const {
+ md5digest d;
+ md5_state_t st;
+ md5_init(&st);
+ md5_append( &st , (const md5_byte_t*)_objdata , objsize() );
+ md5_finish(&st, d);
+ return digestToString( d );
+ }
+
+ string BSONObj::jsonString( JsonStringFormat format, int pretty ) const {
+
+ if ( isEmpty() ) return "{}";
+
+ StringBuilder s;
+ s << "{ ";
+ BSONObjIterator i(*this);
+ BSONElement e = i.next();
+ if ( !e.eoo() )
+ while ( 1 ) {
+ s << e.jsonString( format, true, pretty?pretty+1:0 );
+ e = i.next();
+ if ( e.eoo() )
+ break;
+ s << ",";
+ if ( pretty ) {
+ s << '\n';
+ for( int x = 0; x < pretty; x++ )
+ s << " ";
+ }
+ else {
+ s << " ";
+ }
+ }
+ s << " }";
+ return s.str();
+ }
+
+ bool BSONObj::valid() const {
+ try {
+ BSONObjIterator it(*this);
+ while( it.moreWithEOO() ) {
+ // both throw exception on failure
+ BSONElement e = it.next(true);
+ e.validate();
+
+ if (e.eoo()) {
+ if (it.moreWithEOO())
+ return false;
+ return true;
+ }
+ else if (e.isABSONObj()) {
+ if(!e.embeddedObject().valid())
+ return false;
+ }
+ else if (e.type() == CodeWScope) {
+ if(!e.codeWScopeObject().valid())
+ return false;
+ }
+ }
+ }
+ catch (...) {
+ }
+ return false;
+ }
+
+ int BSONObj::woCompare(const BSONObj& r, const Ordering &o, bool considerFieldName) const {
+ if ( isEmpty() )
+ return r.isEmpty() ? 0 : -1;
+ if ( r.isEmpty() )
+ return 1;
+
+ BSONObjIterator i(*this);
+ BSONObjIterator j(r);
+ unsigned mask = 1;
+ while ( 1 ) {
+ // so far, equal...
+
+ BSONElement l = i.next();
+ BSONElement r = j.next();
+ if ( l.eoo() )
+ return r.eoo() ? 0 : -1;
+ if ( r.eoo() )
+ return 1;
+
+ int x;
+ {
+ x = l.woCompare( r, considerFieldName );
+ if( o.descending(mask) )
+ x = -x;
+ }
+ if ( x != 0 )
+ return x;
+ mask <<= 1;
+ }
+ return -1;
+ }
+
+ /* well ordered compare */
+ int BSONObj::woCompare(const BSONObj &r, const BSONObj &idxKey,
+ bool considerFieldName) const {
+ if ( isEmpty() )
+ return r.isEmpty() ? 0 : -1;
+ if ( r.isEmpty() )
+ return 1;
+
+ bool ordered = !idxKey.isEmpty();
+
+ BSONObjIterator i(*this);
+ BSONObjIterator j(r);
+ BSONObjIterator k(idxKey);
+ while ( 1 ) {
+ // so far, equal...
+
+ BSONElement l = i.next();
+ BSONElement r = j.next();
+ BSONElement o;
+ if ( ordered )
+ o = k.next();
+ if ( l.eoo() )
+ return r.eoo() ? 0 : -1;
+ if ( r.eoo() )
+ return 1;
+
+ int x;
+ /*
+ if( ordered && o.type() == String && strcmp(o.valuestr(), "ascii-proto") == 0 &&
+ l.type() == String && r.type() == String ) {
+ // note: no negative support yet, as this is just sort of a POC
+ x = _stricmp(l.valuestr(), r.valuestr());
+ }
+ else*/ {
+ x = l.woCompare( r, considerFieldName );
+ if ( ordered && o.number() < 0 )
+ x = -x;
+ }
+ if ( x != 0 )
+ return x;
+ }
+ return -1;
+ }
+
+ BSONObj staticNull = fromjson( "{'':null}" );
+ BSONObj makeUndefined() {
+ BSONObjBuilder b;
+ b.appendUndefined( "" );
+ return b.obj();
+ }
+ BSONObj staticUndefined = makeUndefined();
+
+ /* well ordered compare */
+ int BSONObj::woSortOrder(const BSONObj& other, const BSONObj& sortKey , bool useDotted ) const {
+ if ( isEmpty() )
+ return other.isEmpty() ? 0 : -1;
+ if ( other.isEmpty() )
+ return 1;
+
+ uassert( 10060 , "woSortOrder needs a non-empty sortKey" , ! sortKey.isEmpty() );
+
+ BSONObjIterator i(sortKey);
+ while ( 1 ) {
+ BSONElement f = i.next();
+ if ( f.eoo() )
+ return 0;
+
+ BSONElement l = useDotted ? getFieldDotted( f.fieldName() ) : getField( f.fieldName() );
+ if ( l.eoo() )
+ l = staticNull.firstElement();
+ BSONElement r = useDotted ? other.getFieldDotted( f.fieldName() ) : other.getField( f.fieldName() );
+ if ( r.eoo() )
+ r = staticNull.firstElement();
+
+ int x = l.woCompare( r, false );
+ if ( f.number() < 0 )
+ x = -x;
+ if ( x != 0 )
+ return x;
+ }
+ return -1;
+ }
+
+ template <typename BSONElementColl>
+ void _getFieldsDotted( const BSONObj* obj, const StringData& name, BSONElementColl &ret, bool expandLastArray ) {
+ BSONElement e = obj->getField( name );
+
+ if ( e.eoo() ) {
+ const char *p = strchr(name.data(), '.');
+ if ( p ) {
+ string left(name.data(), p-name.data());
+ const char* next = p+1;
+ BSONElement e = obj->getField( left.c_str() );
+
+ if (e.type() == Object) {
+ e.embeddedObject().getFieldsDotted(next, ret, expandLastArray );
+ }
+ else if (e.type() == Array) {
+ bool allDigits = false;
+ if ( isdigit( *next ) ) {
+ const char * temp = next + 1;
+ while ( isdigit( *temp ) )
+ temp++;
+ allDigits = (*temp == '.' || *temp == '\0');
+ }
+ if (allDigits) {
+ e.embeddedObject().getFieldsDotted(next, ret, expandLastArray );
+ }
+ else {
+ BSONObjIterator i(e.embeddedObject());
+ while ( i.more() ) {
+ BSONElement e2 = i.next();
+ if (e2.type() == Object || e2.type() == Array)
+ e2.embeddedObject().getFieldsDotted(next, ret, expandLastArray );
+ }
+ }
+ }
+ else {
+ // do nothing: no match
+ }
+ }
+ }
+ else {
+ if (e.type() == Array && expandLastArray) {
+ BSONObjIterator i(e.embeddedObject());
+ while ( i.more() )
+ ret.insert(i.next());
+ }
+ else {
+ ret.insert(e);
+ }
+ }
+ }
+
+ void BSONObj::getFieldsDotted(const StringData& name, BSONElementSet &ret, bool expandLastArray ) const {
+ _getFieldsDotted( this, name, ret, expandLastArray );
+ }
+ void BSONObj::getFieldsDotted(const StringData& name, BSONElementMSet &ret, bool expandLastArray ) const {
+ _getFieldsDotted( this, name, ret, expandLastArray );
+ }
+
+ BSONElement BSONObj::getFieldDottedOrArray(const char *&name) const {
+ const char *p = strchr(name, '.');
+
+ BSONElement sub;
+
+ if ( p ) {
+ sub = getField( string(name, p-name) );
+ name = p + 1;
+ }
+ else {
+ sub = getField( name );
+ name = name + strlen(name);
+ }
+
+ if ( sub.eoo() )
+ return eooElement;
+ else if ( sub.type() == Array || name[0] == '\0' )
+ return sub;
+ else if ( sub.type() == Object )
+ return sub.embeddedObject().getFieldDottedOrArray( name );
+ else
+ return eooElement;
+ }
+
+ /**
+ sets element field names to empty string
+ If a field in pattern is missing, it is omitted from the returned
+ object.
+ */
+ BSONObj BSONObj::extractFieldsUnDotted(BSONObj pattern) const {
+ BSONObjBuilder b;
+ BSONObjIterator i(pattern);
+ while ( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ BSONElement x = getField(e.fieldName());
+ if ( !x.eoo() )
+ b.appendAs(x, "");
+ }
+ return b.obj();
+ }
+
+ BSONObj BSONObj::extractFields(const BSONObj& pattern , bool fillWithNull ) const {
+ BSONObjBuilder b(32); // scanandorder.h can make a zillion of these, so we start the allocation very small
+ BSONObjIterator i(pattern);
+ while ( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ BSONElement x = getFieldDotted(e.fieldName());
+ if ( ! x.eoo() )
+ b.appendAs( x, e.fieldName() );
+ else if ( fillWithNull )
+ b.appendNull( e.fieldName() );
+ }
+ return b.obj();
+ }
+
+ BSONObj BSONObj::filterFieldsUndotted( const BSONObj &filter, bool inFilter ) const {
+ BSONObjBuilder b;
+ BSONObjIterator i( *this );
+ while( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ BSONElement x = filter.getField( e.fieldName() );
+ if ( ( x.eoo() && !inFilter ) ||
+ ( !x.eoo() && inFilter ) )
+ b.append( e );
+ }
+ return b.obj();
+ }
+
+ BSONElement BSONObj::getFieldUsingIndexNames(const char *fieldName, const BSONObj &indexKey) const {
+ BSONObjIterator i( indexKey );
+ int j = 0;
+ while( i.moreWithEOO() ) {
+ BSONElement f = i.next();
+ if ( f.eoo() )
+ return BSONElement();
+ if ( strcmp( f.fieldName(), fieldName ) == 0 )
+ break;
+ ++j;
+ }
+ BSONObjIterator k( *this );
+ while( k.moreWithEOO() ) {
+ BSONElement g = k.next();
+ if ( g.eoo() )
+ return BSONElement();
+ if ( j == 0 ) {
+ return g;
+ }
+ --j;
+ }
+ return BSONElement();
+ }
+
+ /* grab names of all the fields in this object */
+ int BSONObj::getFieldNames(set<string>& fields) const {
+ int n = 0;
+ BSONObjIterator i(*this);
+ while ( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ fields.insert(e.fieldName());
+ n++;
+ }
+ return n;
+ }
+
+ /* note: addFields always adds _id even if not specified
+ returns n added not counting _id unless requested.
+ */
+ int BSONObj::addFields(BSONObj& from, set<string>& fields) {
+ assert( isEmpty() && !isOwned() ); /* partial implementation for now... */
+
+ BSONObjBuilder b;
+
+ int N = fields.size();
+ int n = 0;
+ BSONObjIterator i(from);
+ bool gotId = false;
+ while ( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ const char *fname = e.fieldName();
+ if ( fields.count(fname) ) {
+ b.append(e);
+ ++n;
+ gotId = gotId || strcmp(fname, "_id")==0;
+ if ( n == N && gotId )
+ break;
+ }
+ else if ( strcmp(fname, "_id")==0 ) {
+ b.append(e);
+ gotId = true;
+ if ( n == N && gotId )
+ break;
+ }
+ }
+
+ if ( n ) {
+ *this = b.obj();
+ }
+
+ return n;
+ }
+
+ bool BSONObj::couldBeArray() const {
+ BSONObjIterator i( *this );
+ int index = 0;
+ while( i.moreWithEOO() ){
+ BSONElement e = i.next();
+ if( e.eoo() ) break;
+
+ // TODO: If actually important, may be able to do int->char* much faster
+ if( strcmp( e.fieldName(), ((string)( mongoutils::str::stream() << index )).c_str() ) != 0 )
+ return false;
+ index++;
+ }
+ return true;
+ }
+
+ BSONObj BSONObj::clientReadable() const {
+ BSONObjBuilder b;
+ BSONObjIterator i( *this );
+ while( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ switch( e.type() ) {
+ case MinKey: {
+ BSONObjBuilder m;
+ m.append( "$minElement", 1 );
+ b.append( e.fieldName(), m.done() );
+ break;
+ }
+ case MaxKey: {
+ BSONObjBuilder m;
+ m.append( "$maxElement", 1 );
+ b.append( e.fieldName(), m.done() );
+ break;
+ }
+ default:
+ b.append( e );
+ }
+ }
+ return b.obj();
+ }
+
+ BSONObj BSONObj::replaceFieldNames( const BSONObj &names ) const {
+ BSONObjBuilder b;
+ BSONObjIterator i( *this );
+ BSONObjIterator j( names );
+ BSONElement f = j.moreWithEOO() ? j.next() : BSONObj().firstElement();
+ while( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ if ( !f.eoo() ) {
+ b.appendAs( e, f.fieldName() );
+ f = j.next();
+ }
+ else {
+ b.append( e );
+ }
+ }
+ return b.obj();
+ }
+
+ bool BSONObj::okForStorage() const {
+ BSONObjIterator i( *this );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ const char * name = e.fieldName();
+
+ if ( strchr( name , '.' ) ||
+ strchr( name , '$' ) ) {
+ return
+ strcmp( name , "$ref" ) == 0 ||
+ strcmp( name , "$id" ) == 0
+ ;
+ }
+
+ if ( e.mayEncapsulate() ) {
+ switch ( e.type() ) {
+ case Object:
+ case Array:
+ if ( ! e.embeddedObject().okForStorage() )
+ return false;
+ break;
+ case CodeWScope:
+ if ( ! e.codeWScopeObject().okForStorage() )
+ return false;
+ break;
+ default:
+ uassert( 12579, "unhandled cases in BSONObj okForStorage" , 0 );
+ }
+
+ }
+ }
+ return true;
+ }
+
+ void BSONObj::dump() const {
+ out() << hex;
+ const char *p = objdata();
+ for ( int i = 0; i < objsize(); i++ ) {
+ out() << i << '\t' << ( 0xff & ( (unsigned) *p ) );
+ if ( *p >= 'A' && *p <= 'z' )
+ out() << '\t' << *p;
+ out() << endl;
+ p++;
+ }
+ }
+
+ void nested2dotted(BSONObjBuilder& b, const BSONObj& obj, const string& base) {
+ BSONObjIterator it(obj);
+ while (it.more()) {
+ BSONElement e = it.next();
+ if (e.type() == Object) {
+ string newbase = base + e.fieldName() + ".";
+ nested2dotted(b, e.embeddedObject(), newbase);
+ }
+ else {
+ string newbase = base + e.fieldName();
+ b.appendAs(e, newbase);
+ }
+ }
+ }
+
+ void dotted2nested(BSONObjBuilder& b, const BSONObj& obj) {
+ //use map to sort fields
+ BSONMap sorted = bson2map(obj);
+ EmbeddedBuilder eb(&b);
+ for(BSONMap::const_iterator it=sorted.begin(); it!=sorted.end(); ++it) {
+ eb.appendAs(it->second, it->first);
+ }
+ eb.done();
+ }
+
+ /*-- test things ----------------------------------------------------*/
+
+#pragma pack(1)
+ struct MaxKeyData {
+ MaxKeyData() {
+ totsize=7;
+ maxkey=MaxKey;
+ name=0;
+ eoo=EOO;
+ }
+ int totsize;
+ char maxkey;
+ char name;
+ char eoo;
+ } maxkeydata;
+ BSONObj maxKey((const char *) &maxkeydata);
+
+ struct MinKeyData {
+ MinKeyData() {
+ totsize=7;
+ minkey=MinKey;
+ name=0;
+ eoo=EOO;
+ }
+ int totsize;
+ char minkey;
+ char name;
+ char eoo;
+ } minkeydata;
+ BSONObj minKey((const char *) &minkeydata);
+
+ /*
+ struct JSObj0 {
+ JSObj0() {
+ totsize = 5;
+ eoo = EOO;
+ }
+ int totsize;
+ char eoo;
+ } js0;
+ */
+#pragma pack()
+
+ struct BsonUnitTest : public UnitTest {
+ void testRegex() {
+
+ BSONObjBuilder b;
+ b.appendRegex("x", "foo");
+ BSONObj o = b.done();
+
+ BSONObjBuilder c;
+ c.appendRegex("x", "goo");
+ BSONObj p = c.done();
+
+ assert( !o.binaryEqual( p ) );
+ assert( o.woCompare( p ) < 0 );
+
+ }
+ void testoid() {
+ OID id;
+ id.init();
+ // sleepsecs(3);
+
+ OID b;
+ // goes with sleep above...
+ // b.init();
+ // assert( memcmp(id.getData(), b.getData(), 12) < 0 );
+
+ b.init( id.str() );
+ assert( b == id );
+ }
+
+ void testbounds() {
+ BSONObj l , r;
+ {
+ BSONObjBuilder b;
+ b.append( "x" , numeric_limits<long long>::max() );
+ l = b.obj();
+ }
+ {
+ BSONObjBuilder b;
+ b.append( "x" , numeric_limits<double>::max() );
+ r = b.obj();
+ }
+ assert( l.woCompare( r ) < 0 );
+ assert( r.woCompare( l ) > 0 );
+ {
+ BSONObjBuilder b;
+ b.append( "x" , numeric_limits<int>::max() );
+ l = b.obj();
+ }
+ assert( l.woCompare( r ) < 0 );
+ assert( r.woCompare( l ) > 0 );
+ }
+
+ void testorder() {
+ {
+ BSONObj x,y,z;
+ { BSONObjBuilder b; b.append( "x" , (long long)2 ); x = b.obj(); }
+ { BSONObjBuilder b; b.append( "x" , (int)3 ); y = b.obj(); }
+ { BSONObjBuilder b; b.append( "x" , (long long)4 ); z = b.obj(); }
+ assert( x.woCompare( y ) < 0 );
+ assert( x.woCompare( z ) < 0 );
+ assert( y.woCompare( x ) > 0 );
+ assert( z.woCompare( x ) > 0 );
+ assert( y.woCompare( z ) < 0 );
+ assert( z.woCompare( y ) > 0 );
+ }
+
+ {
+ BSONObj ll,d,i,n,u;
+ { BSONObjBuilder b; b.append( "x" , (long long)2 ); ll = b.obj(); }
+ { BSONObjBuilder b; b.append( "x" , (double)2 ); d = b.obj(); }
+ { BSONObjBuilder b; b.append( "x" , (int)2 ); i = b.obj(); }
+ { BSONObjBuilder b; b.appendNull( "x" ); n = b.obj(); }
+ { BSONObjBuilder b; u = b.obj(); }
+
+ assert( ll.woCompare( u ) == d.woCompare( u ) );
+ assert( ll.woCompare( u ) == i.woCompare( u ) );
+ BSONObj k = BSON( "x" << 1 );
+ assert( ll.woCompare( u , k ) == d.woCompare( u , k ) );
+ assert( ll.woCompare( u , k ) == i.woCompare( u , k ) );
+
+ assert( u.woCompare( ll ) == u.woCompare( d ) );
+ assert( u.woCompare( ll ) == u.woCompare( i ) );
+ assert( u.woCompare( ll , k ) == u.woCompare( d , k ) );
+ assert( u.woCompare( ll , k ) == u.woCompare( d , k ) );
+
+ assert( i.woCompare( n ) == d.woCompare( n ) );
+
+ assert( ll.woCompare( n ) == d.woCompare( n ) );
+ assert( ll.woCompare( n ) == i.woCompare( n ) );
+ assert( ll.woCompare( n , k ) == d.woCompare( n , k ) );
+ assert( ll.woCompare( n , k ) == i.woCompare( n , k ) );
+
+ assert( n.woCompare( ll ) == n.woCompare( d ) );
+ assert( n.woCompare( ll ) == n.woCompare( i ) );
+ assert( n.woCompare( ll , k ) == n.woCompare( d , k ) );
+ assert( n.woCompare( ll , k ) == n.woCompare( d , k ) );
+ }
+
+ {
+ BSONObj l,r;
+ { BSONObjBuilder b; b.append( "x" , "eliot" ); l = b.obj(); }
+ { BSONObjBuilder b; b.appendSymbol( "x" , "eliot" ); r = b.obj(); }
+ assert( l.woCompare( r ) == 0 );
+ assert( r.woCompare( l ) == 0 );
+ }
+ }
+
+ void run() {
+ testRegex();
+ BSONObjBuilder A,B,C;
+ A.append("x", 2);
+ B.append("x", 2.0);
+ C.append("x", 2.1);
+ BSONObj a = A.done();
+ BSONObj b = B.done();
+ BSONObj c = C.done();
+ assert( !a.binaryEqual( b ) ); // comments on operator==
+ int cmp = a.woCompare(b);
+ assert( cmp == 0 );
+ cmp = a.woCompare(c);
+ assert( cmp < 0 );
+ testoid();
+ testbounds();
+ testorder();
+ }
+ } bson_unittest;
+
+ Labeler::Label GT( "$gt" );
+ Labeler::Label GTE( "$gte" );
+ Labeler::Label LT( "$lt" );
+ Labeler::Label LTE( "$lte" );
+ Labeler::Label NE( "$ne" );
+ Labeler::Label SIZE( "$size" );
+
+ void BSONObjBuilder::appendMinForType( const StringData& fieldName , int t ) {
+ switch ( t ) {
+
+ // Shared canonical types
+ case NumberInt:
+ case NumberDouble:
+ case NumberLong:
+ append( fieldName , - numeric_limits<double>::max() ); return;
+ case Symbol:
+ case String:
+ append( fieldName , "" ); return;
+ case Date:
+ // min varies with V0 and V1 indexes, so we go one type lower.
+ appendBool(fieldName, true);
+ //appendDate( fieldName , numeric_limits<long long>::min() );
+ return;
+ case Timestamp: // TODO integrate with Date SERVER-3304
+ appendTimestamp( fieldName , 0 ); return;
+ case Undefined: // shared with EOO
+ appendUndefined( fieldName ); return;
+
+ // Separate canonical types
+ case MinKey:
+ appendMinKey( fieldName ); return;
+ case MaxKey:
+ appendMaxKey( fieldName ); return;
+ case jstOID: {
+ OID o;
+ memset(&o, 0, sizeof(o));
+ appendOID( fieldName , &o);
+ return;
+ }
+ case Bool:
+ appendBool( fieldName , false); return;
+ case jstNULL:
+ appendNull( fieldName ); return;
+ case Object:
+ append( fieldName , BSONObj() ); return;
+ case Array:
+ appendArray( fieldName , BSONObj() ); return;
+ case BinData:
+ appendBinData( fieldName , 0 , BinDataGeneral , (const char *) 0 ); return;
+ case RegEx:
+ appendRegex( fieldName , "" ); return;
+ case DBRef: {
+ OID o;
+ memset(&o, 0, sizeof(o));
+ appendDBRef( fieldName , "" , o );
+ return;
+ }
+ case Code:
+ appendCode( fieldName , "" ); return;
+ case CodeWScope:
+ appendCodeWScope( fieldName , "" , BSONObj() ); return;
+ };
+ log() << "type not supported for appendMinElementForType: " << t << endl;
+ uassert( 10061 , "type not supported for appendMinElementForType" , false );
+ }
+
+ void BSONObjBuilder::appendMaxForType( const StringData& fieldName , int t ) {
+ switch ( t ) {
+
+ // Shared canonical types
+ case NumberInt:
+ case NumberDouble:
+ case NumberLong:
+ append( fieldName , numeric_limits<double>::max() ); return;
+ case Symbol:
+ case String:
+ appendMinForType( fieldName, Object ); return;
+ case Date:
+ appendDate( fieldName , numeric_limits<long long>::max() ); return;
+ case Timestamp: // TODO integrate with Date SERVER-3304
+ appendTimestamp( fieldName , numeric_limits<unsigned long long>::max() ); return;
+ case Undefined: // shared with EOO
+ appendUndefined( fieldName ); return;
+
+ // Separate canonical types
+ case MinKey:
+ appendMinKey( fieldName ); return;
+ case MaxKey:
+ appendMaxKey( fieldName ); return;
+ case jstOID: {
+ OID o;
+ memset(&o, 0xFF, sizeof(o));
+ appendOID( fieldName , &o);
+ return;
+ }
+ case Bool:
+ appendBool( fieldName , true ); return;
+ case jstNULL:
+ appendNull( fieldName ); return;
+ case Object:
+ appendMinForType( fieldName, Array ); return;
+ case Array:
+ appendMinForType( fieldName, BinData ); return;
+ case BinData:
+ appendMinForType( fieldName, jstOID ); return;
+ case RegEx:
+ appendMinForType( fieldName, DBRef ); return;
+ case DBRef:
+ appendMinForType( fieldName, Code ); return;
+ case Code:
+ appendMinForType( fieldName, CodeWScope ); return;
+ case CodeWScope:
+ // This upper bound may change if a new bson type is added.
+ appendMinForType( fieldName , MaxKey ); return;
+ }
+ log() << "type not supported for appendMaxElementForType: " << t << endl;
+ uassert( 14853 , "type not supported for appendMaxElementForType" , false );
+ }
+
+ int BSONElementFieldSorter( const void * a , const void * b ) {
+ const char * x = *((const char**)a);
+ const char * y = *((const char**)b);
+ x++; y++;
+ return lexNumCmp( x , y );
+ }
+
+ bool fieldsMatch(const BSONObj& lhs, const BSONObj& rhs) {
+ BSONObjIterator l(lhs);
+ BSONObjIterator r(rhs);
+
+ while (l.more() && r.more()){
+ if (strcmp(l.next().fieldName(), r.next().fieldName())) {
+ return false;
+ }
+ }
+
+ return !(l.more() || r.more()); // false if lhs and rhs have diff nFields()
+ }
+
+ BSONObjIteratorSorted::BSONObjIteratorSorted( const BSONObj& o ) {
+ _nfields = o.nFields();
+ _fields = new const char*[_nfields];
+ int x = 0;
+ BSONObjIterator i( o );
+ while ( i.more() ) {
+ _fields[x++] = i.next().rawdata();
+ assert( _fields[x-1] );
+ }
+ assert( x == _nfields );
+ qsort( _fields , _nfields , sizeof(char*) , BSONElementFieldSorter );
+ _cur = 0;
+ }
+
+ bool BSONObjBuilder::appendAsNumber( const StringData& fieldName , const string& data ) {
+ if ( data.size() == 0 || data == "-" || data == ".")
+ return false;
+
+ unsigned int pos=0;
+ if ( data[0] == '-' )
+ pos++;
+
+ bool hasDec = false;
+
+ for ( ; pos<data.size(); pos++ ) {
+ if ( isdigit(data[pos]) )
+ continue;
+
+ if ( data[pos] == '.' ) {
+ if ( hasDec )
+ return false;
+ hasDec = true;
+ continue;
+ }
+
+ return false;
+ }
+
+ if ( hasDec ) {
+ double d = atof( data.c_str() );
+ append( fieldName , d );
+ return true;
+ }
+
+ if ( data.size() < 8 ) {
+ append( fieldName , atoi( data.c_str() ) );
+ return true;
+ }
+
+ try {
+ long long num = boost::lexical_cast<long long>( data );
+ append( fieldName , num );
+ return true;
+ }
+ catch(bad_lexical_cast &) {
+ return false;
+ }
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/jsobj.h b/src/mongo/db/jsobj.h
new file mode 100644
index 00000000000..ae039529fbf
--- /dev/null
+++ b/src/mongo/db/jsobj.h
@@ -0,0 +1,47 @@
+/** @file jsobj.h
+ BSON classes
+*/
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ BSONObj and its helpers
+
+ "BSON" stands for "binary JSON" -- ie a binary way to represent objects that would be
+ represented in JSON (plus a few extensions useful for databases & other languages).
+
+ http://www.bsonspec.org/
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "../bson/util/builder.h"
+#include "../util/optime.h"
+//#include "boost/utility.hpp"
+//#include <set>
+#include "../bson/bsontypes.h"
+#include "../bson/oid.h"
+#include "../bson/bsonelement.h"
+#include "../bson/bsonobj.h"
+#include "../bson/bsonmisc.h"
+#include "../bson/bsonobjbuilder.h"
+#include "../bson/bsonobjiterator.h"
+#include "../bson/bson-inl.h"
+#include "../bson/ordering.h"
+#include "../bson/stringdata.h"
+#include "../bson/bson_db.h"
+
diff --git a/src/mongo/db/jsobjmanipulator.h b/src/mongo/db/jsobjmanipulator.h
new file mode 100644
index 00000000000..860e575940e
--- /dev/null
+++ b/src/mongo/db/jsobjmanipulator.h
@@ -0,0 +1,94 @@
+/** jsobjManipulator.h */
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "jsobj.h"
+//#include "dur.h"
+
+namespace mongo {
+
+ /** Manipulate the binary representation of a BSONElement in-place.
+ Careful, this casts away const.
+ */
+ class BSONElementManipulator {
+ public:
+ BSONElementManipulator( const BSONElement &element ) :
+ _element( element ) {
+ assert( !_element.eoo() );
+ }
+ /** Replace a Timestamp type with a Date type initialized to
+ OpTime::now().asDate()
+ */
+ void initTimestamp();
+
+ // Note the ones with a capital letter call getDur().writing and journal
+
+ /** Change the value, in place, of the number. */
+ void setNumber(double d) {
+ if ( _element.type() == NumberDouble ) *reinterpret_cast< double * >( value() ) = d;
+ else if ( _element.type() == NumberInt ) *reinterpret_cast< int * >( value() ) = (int) d;
+ else assert(0);
+ }
+ void SetNumber(double d);
+ void setLong(long long n) {
+ assert( _element.type() == NumberLong );
+ *reinterpret_cast< long long * >( value() ) = n;
+ }
+ void SetLong(long long n);
+ void setInt(int n) {
+ assert( _element.type() == NumberInt );
+ *reinterpret_cast< int * >( value() ) = n;
+ }
+ void SetInt(int n);
+
+ /** Replace the type and value of the element with the type and value of e,
+ preserving the original fieldName */
+ void replaceTypeAndValue( const BSONElement &e ) {
+ *data() = e.type();
+ memcpy( value(), e.value(), e.valuesize() );
+ }
+
+ /* dur:: version */
+ void ReplaceTypeAndValue( const BSONElement &e );
+
+ static void lookForTimestamps( const BSONObj& obj ) {
+ // If have a Timestamp field as the first or second element,
+ // update it to a Date field set to OpTime::now().asDate(). The
+ // replacement policy is a work in progress.
+
+ BSONObjIterator i( obj );
+ for( int j = 0; i.moreWithEOO() && j < 2; ++j ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ if ( e.type() == Timestamp ) {
+ BSONElementManipulator( e ).initTimestamp();
+ break;
+ }
+ }
+ }
+ private:
+ char *data() { return nonConst( _element.rawdata() ); }
+ char *value() { return nonConst( _element.value() ); }
+ static char *nonConst( const char *s ) { return const_cast< char * >( s ); }
+
+ const BSONElement _element;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/json.cpp b/src/mongo/db/json.cpp
new file mode 100644
index 00000000000..73457a2bfbb
--- /dev/null
+++ b/src/mongo/db/json.cpp
@@ -0,0 +1,651 @@
+// json.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#define BOOST_SPIRIT_THREADSAFE
+#if BOOST_VERSION >= 103800
+#define BOOST_SPIRIT_USE_OLD_NAMESPACE
+#include <boost/spirit/include/classic_core.hpp>
+#include <boost/spirit/include/classic_loops.hpp>
+#include <boost/spirit/include/classic_lists.hpp>
+#else
+#include <boost/spirit/core.hpp>
+#include <boost/spirit/utility/loops.hpp>
+#include <boost/spirit/utility/lists.hpp>
+#endif
+#undef assert
+#define assert MONGO_assert
+
+#include "json.h"
+#include "../bson/util/builder.h"
+#include "../util/base64.h"
+#include "../util/hex.h"
+
+
+using namespace boost::spirit;
+
+namespace mongo {
+
+ struct ObjectBuilder : boost::noncopyable {
+ ~ObjectBuilder() {
+ unsigned i = builders.size();
+ if ( i ) {
+ i--;
+ for ( ; i>=1; i-- ) {
+ if ( builders[i] ) {
+ builders[i]->done();
+ }
+ }
+ }
+ }
+ BSONObjBuilder *back() {
+ return builders.back().get();
+ }
+ // Storage for field names of elements within builders.back().
+ const char *fieldName() {
+ return fieldNames.back().c_str();
+ }
+ bool empty() const {
+ return builders.size() == 0;
+ }
+ void init() {
+ boost::shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ builders.push_back( b );
+ fieldNames.push_back( "" );
+ indexes.push_back( 0 );
+ }
+ void pushObject( const char *fieldName ) {
+ boost::shared_ptr< BSONObjBuilder > b( new BSONObjBuilder( builders.back()->subobjStart( fieldName ) ) );
+ builders.push_back( b );
+ fieldNames.push_back( "" );
+ indexes.push_back( 0 );
+ }
+ void pushArray( const char *fieldName ) {
+ boost::shared_ptr< BSONObjBuilder > b( new BSONObjBuilder( builders.back()->subarrayStart( fieldName ) ) );
+ builders.push_back( b );
+ fieldNames.push_back( "" );
+ indexes.push_back( 0 );
+ }
+ BSONObj pop() {
+ BSONObj ret;
+ if ( back()->owned() )
+ ret = back()->obj();
+ else
+ ret = back()->done();
+ builders.pop_back();
+ fieldNames.pop_back();
+ indexes.pop_back();
+ return ret;
+ }
+ void nameFromIndex() {
+ fieldNames.back() = BSONObjBuilder::numStr( indexes.back() );
+ }
+ string popString() {
+ string ret = ss.str();
+ ss.str( "" );
+ return ret;
+ }
+ // Cannot use auto_ptr because its copy constructor takes a non const reference.
+ vector< boost::shared_ptr< BSONObjBuilder > > builders;
+ vector< string > fieldNames;
+ vector< int > indexes;
+ stringstream ss;
+ string ns;
+ OID oid;
+ string binData;
+ BinDataType binDataType;
+ string regex;
+ string regexOptions;
+ Date_t date;
+ OpTime timestamp;
+ };
+
+ struct objectStart {
+ objectStart( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char &c ) const {
+ if ( b.empty() )
+ b.init();
+ else
+ b.pushObject( b.fieldName() );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct arrayStart {
+ arrayStart( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char &c ) const {
+ b.pushArray( b.fieldName() );
+ b.nameFromIndex();
+ }
+ ObjectBuilder &b;
+ };
+
+ struct arrayNext {
+ arrayNext( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char &c ) const {
+ ++b.indexes.back();
+ b.nameFromIndex();
+ }
+ ObjectBuilder &b;
+ };
+
+ struct ch {
+ ch( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char c ) const {
+ b.ss << c;
+ }
+ ObjectBuilder &b;
+ };
+
+ struct chE {
+ chE( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char c ) const {
+ char o = '\0';
+ switch ( c ) {
+ case '\"':
+ o = '\"';
+ break;
+ case '\'':
+ o = '\'';
+ break;
+ case '\\':
+ o = '\\';
+ break;
+ case '/':
+ o = '/';
+ break;
+ case 'b':
+ o = '\b';
+ break;
+ case 'f':
+ o = '\f';
+ break;
+ case 'n':
+ o = '\n';
+ break;
+ case 'r':
+ o = '\r';
+ break;
+ case 't':
+ o = '\t';
+ break;
+ case 'v':
+ o = '\v';
+ break;
+ default:
+ assert( false );
+ }
+ b.ss << o;
+ }
+ ObjectBuilder &b;
+ };
+
+ struct chU {
+ chU( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ unsigned char first = fromHex( start );
+ unsigned char second = fromHex( start + 2 );
+ if ( first == 0 && second < 0x80 )
+ b.ss << second;
+ else if ( first < 0x08 ) {
+ b.ss << char( 0xc0 | ( ( first << 2 ) | ( second >> 6 ) ) );
+ b.ss << char( 0x80 | ( ~0xc0 & second ) );
+ }
+ else {
+ b.ss << char( 0xe0 | ( first >> 4 ) );
+ b.ss << char( 0x80 | ( ~0xc0 & ( ( first << 2 ) | ( second >> 6 ) ) ) );
+ b.ss << char( 0x80 | ( ~0xc0 & second ) );
+ }
+ }
+ ObjectBuilder &b;
+ };
+
+ struct chClear {
+ chClear( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char c ) const {
+ b.popString();
+ }
+ ObjectBuilder &b;
+ };
+
+ struct fieldNameEnd {
+ fieldNameEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ string name = b.popString();
+ massert( 10338 , "Invalid use of reserved field name: " + name,
+ name != "$oid" &&
+ name != "$binary" &&
+ name != "$type" &&
+ name != "$date" &&
+ name != "$timestamp" &&
+ name != "$regex" &&
+ name != "$options" );
+ b.fieldNames.back() = name;
+ }
+ ObjectBuilder &b;
+ };
+
+ struct unquotedFieldNameEnd {
+ unquotedFieldNameEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ string name( start, end );
+ b.fieldNames.back() = name;
+ }
+ ObjectBuilder &b;
+ };
+
+ struct stringEnd {
+ stringEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->append( b.fieldName(), b.popString() );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct numberValue {
+ numberValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ string raw(start);
+ double val;
+
+ // strtod isn't able to deal with NaN and inf in a portable way.
+ // Correspondingly, we perform the conversions explicitly.
+
+ if ( ! raw.compare(0, 3, "NaN" ) ) {
+ val = std::numeric_limits<double>::quiet_NaN();
+ }
+ else if ( ! raw.compare(0, 8, "Infinity" ) ) {
+ val = std::numeric_limits<double>::infinity();
+ }
+ else if ( ! raw.compare(0, 9, "-Infinity" ) ) {
+ val = -std::numeric_limits<double>::infinity();
+ }
+ else {
+ // We re-parse the numeric string here because spirit parsing of strings
+ // to doubles produces different results from strtod in some cases and
+ // we want to use strtod to ensure consistency with other string to
+ // double conversions in our code.
+
+ val = strtod( start, 0 );
+ }
+
+ b.back()->append( b.fieldName(), val );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct intValue {
+ intValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( long long num ) const {
+ if (num >= numeric_limits<int>::min() && num <= numeric_limits<int>::max())
+ b.back()->append( b.fieldName(), (int)num );
+ else
+ b.back()->append( b.fieldName(), num );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct subobjectEnd {
+ subobjectEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.pop();
+ }
+ ObjectBuilder &b;
+ };
+
+ struct arrayEnd {
+ arrayEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.pop();
+ }
+ ObjectBuilder &b;
+ };
+
+ struct trueValue {
+ trueValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendBool( b.fieldName(), true );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct falseValue {
+ falseValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendBool( b.fieldName(), false );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct nullValue {
+ nullValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendNull( b.fieldName() );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct undefinedValue {
+ undefinedValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendUndefined( b.fieldName() );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct dbrefNS {
+ dbrefNS( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.ns = b.popString();
+ }
+ ObjectBuilder &b;
+ };
+
+// NOTE s must be 24 characters.
+ OID stringToOid( const char *s ) {
+ OID oid;
+ char *oidP = (char *)( &oid );
+ for ( int i = 0; i < 12; ++i )
+ oidP[ i ] = fromHex( s + ( i * 2 ) );
+ return oid;
+ }
+
+ struct oidValue {
+ oidValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.oid = stringToOid( start );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct dbrefEnd {
+ dbrefEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendDBRef( b.fieldName(), b.ns, b.oid );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct oidEnd {
+ oidEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendOID( b.fieldName(), &b.oid );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct timestampEnd {
+ timestampEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendTimestamp( b.fieldName(), b.timestamp.asDate() );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct binDataBinary {
+ binDataBinary( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ massert( 10339 , "Badly formatted bindata", ( end - start ) % 4 == 0 );
+ string encoded( start, end );
+ b.binData = base64::decode( encoded );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct binDataType {
+ binDataType( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.binDataType = BinDataType( fromHex( start ) );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct binDataEnd {
+ binDataEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendBinData( b.fieldName(), b.binData.length(),
+ b.binDataType, b.binData.data() );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct timestampSecs {
+ timestampSecs( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( unsigned long long x) const {
+ b.timestamp = OpTime( (unsigned) (x/1000) , 0);
+ }
+ ObjectBuilder &b;
+ };
+
+ struct timestampInc {
+ timestampInc( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( unsigned x) const {
+ b.timestamp = OpTime(b.timestamp.getSecs(), x);
+ }
+ ObjectBuilder &b;
+ };
+
+ struct dateValue {
+ dateValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( Date_t v ) const {
+ b.date = v;
+ }
+ ObjectBuilder &b;
+ };
+
+ struct dateEnd {
+ dateEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendDate( b.fieldName(), b.date );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct regexValue {
+ regexValue( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.regex = b.popString();
+ }
+ ObjectBuilder &b;
+ };
+
+ struct regexOptions {
+ regexOptions( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.regexOptions = string( start, end );
+ }
+ ObjectBuilder &b;
+ };
+
+ struct regexEnd {
+ regexEnd( ObjectBuilder &_b ) : b( _b ) {}
+ void operator() ( const char *start, const char *end ) const {
+ b.back()->appendRegex( b.fieldName(), b.regex, b.regexOptions );
+ }
+ ObjectBuilder &b;
+ };
+
+// One gotcha with this parsing library is probably best ilustrated with an
+// example. Say we have a production like this:
+// z = ( ch_p( 'a' )[ foo ] >> ch_p( 'b' ) ) | ( ch_p( 'a' )[ foo ] >> ch_p( 'c' ) );
+// On input "ac", action foo() will be called twice -- once as the parser tries
+// to match "ab", again as the parser successfully matches "ac". Sometimes
+// the grammar can be modified to eliminate these situations. Here, for example:
+// z = ch_p( 'a' )[ foo ] >> ( ch_p( 'b' ) | ch_p( 'c' ) );
+// However, this is not always possible. In my implementation I've tried to
+// stick to the following pattern: store fields fed to action callbacks
+// temporarily as ObjectBuilder members, then append to a BSONObjBuilder once
+// the parser has completely matched a nonterminal and won't backtrack. It's
+// worth noting here that this parser follows a short-circuit convention. So,
+// in the original z example on line 3, if the input was "ab", foo() would only
+// be called once.
+ struct JsonGrammar : public grammar< JsonGrammar > {
+ public:
+ JsonGrammar( ObjectBuilder &_b ) : b( _b ) {}
+
+ template < typename ScannerT >
+ struct definition {
+ definition( JsonGrammar const &self ) {
+ object = ch_p( '{' )[ objectStart( self.b ) ] >> !members >> '}';
+ members = list_p((fieldName >> ':' >> value) , ',');
+ fieldName =
+ str[ fieldNameEnd( self.b ) ] |
+ singleQuoteStr[ fieldNameEnd( self.b ) ] |
+ unquotedFieldName[ unquotedFieldNameEnd( self.b ) ];
+ array = ch_p( '[' )[ arrayStart( self.b ) ] >> !elements >> ']';
+ elements = list_p(value, ch_p(',')[arrayNext( self.b )]);
+ value =
+ str[ stringEnd( self.b ) ] |
+ number[ numberValue( self.b ) ] |
+ integer |
+ array[ arrayEnd( self.b ) ] |
+ lexeme_d[ str_p( "true" ) ][ trueValue( self.b ) ] |
+ lexeme_d[ str_p( "false" ) ][ falseValue( self.b ) ] |
+ lexeme_d[ str_p( "null" ) ][ nullValue( self.b ) ] |
+ lexeme_d[ str_p( "undefined" ) ][ undefinedValue( self.b ) ] |
+ singleQuoteStr[ stringEnd( self.b ) ] |
+ date[ dateEnd( self.b ) ] |
+ oid[ oidEnd( self.b ) ] |
+ bindata[ binDataEnd( self.b ) ] |
+ dbref[ dbrefEnd( self.b ) ] |
+ timestamp[ timestampEnd( self.b ) ] |
+ regex[ regexEnd( self.b ) ] |
+ object[ subobjectEnd( self.b ) ] ;
+ // NOTE lexeme_d and rules don't mix well, so we have this mess.
+ // NOTE We use range_p rather than cntrl_p, because the latter is locale dependent.
+ str = lexeme_d[ ch_p( '"' )[ chClear( self.b ) ] >>
+ *( ( ch_p( '\\' ) >>
+ (
+ ch_p( 'b' )[ chE( self.b ) ] |
+ ch_p( 'f' )[ chE( self.b ) ] |
+ ch_p( 'n' )[ chE( self.b ) ] |
+ ch_p( 'r' )[ chE( self.b ) ] |
+ ch_p( 't' )[ chE( self.b ) ] |
+ ch_p( 'v' )[ chE( self.b ) ] |
+ ( ch_p( 'u' ) >> ( repeat_p( 4 )[ xdigit_p ][ chU( self.b ) ] ) ) |
+ ( ~ch_p('x') & (~range_p('0','9'))[ ch( self.b ) ] ) // hex and octal aren't supported
+ )
+ ) |
+ ( ~range_p( 0x00, 0x1f ) & ~ch_p( '"' ) & ( ~ch_p( '\\' ) )[ ch( self.b ) ] ) ) >> '"' ];
+
+ singleQuoteStr = lexeme_d[ ch_p( '\'' )[ chClear( self.b ) ] >>
+ *( ( ch_p( '\\' ) >>
+ (
+ ch_p( 'b' )[ chE( self.b ) ] |
+ ch_p( 'f' )[ chE( self.b ) ] |
+ ch_p( 'n' )[ chE( self.b ) ] |
+ ch_p( 'r' )[ chE( self.b ) ] |
+ ch_p( 't' )[ chE( self.b ) ] |
+ ch_p( 'v' )[ chE( self.b ) ] |
+ ( ch_p( 'u' ) >> ( repeat_p( 4 )[ xdigit_p ][ chU( self.b ) ] ) ) |
+ ( ~ch_p('x') & (~range_p('0','9'))[ ch( self.b ) ] ) // hex and octal aren't supported
+ )
+ ) |
+ ( ~range_p( 0x00, 0x1f ) & ~ch_p( '\'' ) & ( ~ch_p( '\\' ) )[ ch( self.b ) ] ) ) >> '\'' ];
+
+ // real_p accepts numbers with nonsignificant zero prefixes, which
+ // aren't allowed in JSON. Oh well.
+ number = strict_real_p | str_p( "NaN" ) | str_p( "Infinity" ) | str_p( "-Infinity" );
+
+ static int_parser<long long, 10, 1, numeric_limits<long long>::digits10 + 1> long_long_p;
+ integer = long_long_p[ intValue(self.b) ];
+
+ // We allow a subset of valid js identifier names here.
+ unquotedFieldName = lexeme_d[ ( alpha_p | ch_p( '$' ) | ch_p( '_' ) ) >> *( ( alnum_p | ch_p( '$' ) | ch_p( '_' )) ) ];
+
+ dbref = dbrefS | dbrefT;
+ dbrefS = ch_p( '{' ) >> "\"$ref\"" >> ':' >>
+ str[ dbrefNS( self.b ) ] >> ',' >> "\"$id\"" >> ':' >> quotedOid >> '}';
+ dbrefT = str_p( "Dbref" ) >> '(' >> str[ dbrefNS( self.b ) ] >> ',' >>
+ quotedOid >> ')';
+
+ timestamp = ch_p( '{' ) >> "\"$timestamp\"" >> ':' >> '{' >>
+ "\"t\"" >> ':' >> uint_parser<unsigned long long, 10, 1, -1>()[ timestampSecs(self.b) ] >> ',' >>
+ "\"i\"" >> ':' >> uint_parser<unsigned int, 10, 1, -1>()[ timestampInc(self.b) ] >> '}' >>'}';
+
+ oid = oidS | oidT;
+ oidS = ch_p( '{' ) >> "\"$oid\"" >> ':' >> quotedOid >> '}';
+ oidT = str_p( "ObjectId" ) >> '(' >> quotedOid >> ')';
+
+ quotedOid = lexeme_d[ '"' >> ( repeat_p( 24 )[ xdigit_p ] )[ oidValue( self.b ) ] >> '"' ];
+
+ bindata = ch_p( '{' ) >> "\"$binary\"" >> ':' >>
+ lexeme_d[ '"' >> ( *( range_p( 'A', 'Z' ) | range_p( 'a', 'z' ) | range_p( '0', '9' ) | ch_p( '+' ) | ch_p( '/' ) ) >> *ch_p( '=' ) )[ binDataBinary( self.b ) ] >> '"' ] >> ',' >> "\"$type\"" >> ':' >>
+ lexeme_d[ '"' >> ( repeat_p( 2 )[ xdigit_p ] )[ binDataType( self.b ) ] >> '"' ] >> '}';
+
+ // TODO: this will need to use a signed parser at some point
+ date = dateS | dateT;
+ dateS = ch_p( '{' ) >> "\"$date\"" >> ':' >> uint_parser< Date_t >()[ dateValue( self.b ) ] >> '}';
+ dateT = !str_p("new") >> str_p( "Date" ) >> '(' >> uint_parser< Date_t >()[ dateValue( self.b ) ] >> ')';
+
+ regex = regexS | regexT;
+ regexS = ch_p( '{' ) >> "\"$regex\"" >> ':' >> str[ regexValue( self.b ) ] >> ',' >> "\"$options\"" >> ':' >> lexeme_d[ '"' >> ( *( alpha_p ) )[ regexOptions( self.b ) ] >> '"' ] >> '}';
+ // FIXME Obviously it would be nice to unify this with str.
+ regexT = lexeme_d[ ch_p( '/' )[ chClear( self.b ) ] >>
+ *( ( ch_p( '\\' ) >>
+ ( ch_p( '"' )[ chE( self.b ) ] |
+ ch_p( '\\' )[ chE( self.b ) ] |
+ ch_p( '/' )[ chE( self.b ) ] |
+ ch_p( 'b' )[ chE( self.b ) ] |
+ ch_p( 'f' )[ chE( self.b ) ] |
+ ch_p( 'n' )[ chE( self.b ) ] |
+ ch_p( 'r' )[ chE( self.b ) ] |
+ ch_p( 't' )[ chE( self.b ) ] |
+ ( ch_p( 'u' ) >> ( repeat_p( 4 )[ xdigit_p ][ chU( self.b ) ] ) ) ) ) |
+ ( ~range_p( 0x00, 0x1f ) & ~ch_p( '/' ) & ( ~ch_p( '\\' ) )[ ch( self.b ) ] ) ) >> str_p( "/" )[ regexValue( self.b ) ]
+ >> ( *( ch_p( 'i' ) | ch_p( 'g' ) | ch_p( 'm' ) ) )[ regexOptions( self.b ) ] ];
+ }
+ rule< ScannerT > object, members, array, elements, value, str, number, integer,
+ dbref, dbrefS, dbrefT, timestamp, timestampS, timestampT, oid, oidS, oidT,
+ bindata, date, dateS, dateT, regex, regexS, regexT, quotedOid, fieldName,
+ unquotedFieldName, singleQuoteStr;
+ const rule< ScannerT > &start() const {
+ return object;
+ }
+ };
+ ObjectBuilder &b;
+ };
+
+ BSONObj fromjson( const char *str , int* len) {
+ if ( str[0] == '\0' ) {
+ if (len) *len = 0;
+ return BSONObj();
+ }
+
+ ObjectBuilder b;
+ JsonGrammar parser( b );
+ parse_info<> result = parse( str, parser, space_p );
+ if (len) {
+ *len = result.stop - str;
+ }
+ else if ( !result.full ) {
+ int limit = strnlen(result.stop , 10);
+ if (limit == -1) limit = 10;
+ msgasserted(10340, "Failure parsing JSON string near: " + string( result.stop, limit ));
+ }
+ BSONObj ret = b.pop();
+ assert( b.empty() );
+ return ret;
+ }
+
+ BSONObj fromjson( const string &str ) {
+ return fromjson( str.c_str() );
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/json.h b/src/mongo/db/json.h
new file mode 100644
index 00000000000..68dae042574
--- /dev/null
+++ b/src/mongo/db/json.h
@@ -0,0 +1,41 @@
+/** @file json.h */
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "jsobj.h"
+
+namespace mongo {
+
+ /** Create a BSONObj from a JSON <http://www.json.org> string. In addition
+ to the JSON extensions extensions described here
+ <http://mongodb.onconfluence.com/display/DOCS/Mongo+Extended+JSON>,
+ this function accepts certain unquoted field names and allows single quotes
+ to optionally be used when specifying field names and string values instead
+ of double quotes. JSON unicode escape sequences (of the form \uXXXX) are
+ converted to utf8.
+ \throws MsgAssertionException if parsing fails. The message included with
+ this assertion includes a rough indication of where parsing failed.
+ */
+ BSONObj fromjson(const string &str);
+
+ /** len will be size of JSON object in text chars. */
+ BSONObj fromjson(const char *str, int* len=NULL);
+
+} // namespace mongo
diff --git a/src/mongo/db/key.cpp b/src/mongo/db/key.cpp
new file mode 100644
index 00000000000..47449986d21
--- /dev/null
+++ b/src/mongo/db/key.cpp
@@ -0,0 +1,678 @@
+// @file key.cpp
+
+/**
+* Copyright (C) 2011 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "key.h"
+#include "../util/unittest.h"
+
+namespace mongo {
+
+ extern const Ordering nullOrdering = Ordering::make(BSONObj());
+
+ // KeyBson is for V0 (version #0) indexes
+
+ int oldCompare(const BSONObj& l,const BSONObj& r, const Ordering &o);
+
+ // "old" = pre signed dates & such; i.e. btree V0
+ /* must be same canon type when called */
+ int oldCompareElementValues(const BSONElement& l, const BSONElement& r) {
+ dassert( l.canonicalType() == r.canonicalType() );
+ int f;
+ double x;
+
+ switch ( l.type() ) {
+ case EOO:
+ case Undefined: // EOO and Undefined are same canonicalType
+ case jstNULL:
+ case MaxKey:
+ case MinKey:
+ return 0;
+ case Bool:
+ return *l.value() - *r.value();
+ case Timestamp:
+ case Date:
+ // unsigned dates for old version
+ if ( l.date() < r.date() )
+ return -1;
+ return l.date() == r.date() ? 0 : 1;
+ case NumberLong:
+ if( r.type() == NumberLong ) {
+ long long L = l._numberLong();
+ long long R = r._numberLong();
+ if( L < R ) return -1;
+ if( L == R ) return 0;
+ return 1;
+ }
+ // else fall through
+ case NumberInt:
+ case NumberDouble: {
+ double left = l.number();
+ double right = r.number();
+ bool lNan = !( left <= numeric_limits< double >::max() &&
+ left >= -numeric_limits< double >::max() );
+ bool rNan = !( right <= numeric_limits< double >::max() &&
+ right >= -numeric_limits< double >::max() );
+ if ( lNan ) {
+ if ( rNan ) {
+ return 0;
+ }
+ else {
+ return -1;
+ }
+ }
+ else if ( rNan ) {
+ return 1;
+ }
+ x = left - right;
+ if ( x < 0 ) return -1;
+ return x == 0 ? 0 : 1;
+ }
+ case jstOID:
+ return memcmp(l.value(), r.value(), 12);
+ case Code:
+ case Symbol:
+ case String:
+ // nulls not allowed in the middle of strings in the old version
+ return strcmp(l.valuestr(), r.valuestr());
+ case Object:
+ case Array:
+ return oldCompare(l.embeddedObject(), r.embeddedObject(), nullOrdering);
+ case DBRef: {
+ int lsz = l.valuesize();
+ int rsz = r.valuesize();
+ if ( lsz - rsz != 0 ) return lsz - rsz;
+ return memcmp(l.value(), r.value(), lsz);
+ }
+ case BinData: {
+ int lsz = l.objsize(); // our bin data size in bytes, not including the subtype byte
+ int rsz = r.objsize();
+ if ( lsz - rsz != 0 ) return lsz - rsz;
+ return memcmp(l.value()+4, r.value()+4, lsz+1);
+ }
+ case RegEx: {
+ int c = strcmp(l.regex(), r.regex());
+ if ( c )
+ return c;
+ return strcmp(l.regexFlags(), r.regexFlags());
+ }
+ case CodeWScope : {
+ f = l.canonicalType() - r.canonicalType();
+ if ( f )
+ return f;
+ f = strcmp( l.codeWScopeCode() , r.codeWScopeCode() );
+ if ( f )
+ return f;
+ f = strcmp( l.codeWScopeScopeData() , r.codeWScopeScopeData() );
+ if ( f )
+ return f;
+ return 0;
+ }
+ default:
+ out() << "oldCompareElementValues: bad type " << (int) l.type() << endl;
+ assert(false);
+ }
+ return -1;
+ }
+
+ int oldElemCompare(const BSONElement&l , const BSONElement& r) {
+ int lt = (int) l.canonicalType();
+ int rt = (int) r.canonicalType();
+ int x = lt - rt;
+ if( x )
+ return x;
+ return oldCompareElementValues(l, r);
+ }
+
+ // pre signed dates & such
+ int oldCompare(const BSONObj& l,const BSONObj& r, const Ordering &o) {
+ BSONObjIterator i(l);
+ BSONObjIterator j(r);
+ unsigned mask = 1;
+ while ( 1 ) {
+ // so far, equal...
+
+ BSONElement l = i.next();
+ BSONElement r = j.next();
+ if ( l.eoo() )
+ return r.eoo() ? 0 : -1;
+ if ( r.eoo() )
+ return 1;
+
+ int x;
+ {
+ x = oldElemCompare(l, r);
+ if( o.descending(mask) )
+ x = -x;
+ }
+ if ( x != 0 )
+ return x;
+ mask <<= 1;
+ }
+ return -1;
+ }
+
+ /* old style compares:
+ - dates are unsigned
+ - strings no nulls
+ */
+ int KeyBson::woCompare(const KeyBson& r, const Ordering &o) const {
+ return oldCompare(_o, r._o, o);
+ }
+
+ // woEqual could be made faster than woCompare but this is for backward compatibility so not worth a big effort
+ bool KeyBson::woEqual(const KeyBson& r) const {
+ return oldCompare(_o, r._o, nullOrdering) == 0;
+ }
+
+ // [ ][HASMORE][x][y][canontype_4bits]
+ enum CanonicalsEtc {
+ cminkey=1,
+ cnull=2,
+ cdouble=4,
+ cstring=6,
+ cbindata=7,
+ coid=8,
+ cfalse=10,
+ ctrue=11,
+ cdate=12,
+ cmaxkey=14,
+ cCANONTYPEMASK = 0xf,
+ cY = 0x10,
+ cint = cY | cdouble,
+ cX = 0x20,
+ clong = cX | cdouble,
+ cHASMORE = 0x40,
+ cNOTUSED = 0x80 // but see IsBSON sentinel - this bit not usable without great care
+ };
+
+ // bindata bson type
+ const unsigned BinDataLenMask = 0xf0; // lengths are powers of 2 of this value
+ const unsigned BinDataTypeMask = 0x0f; // 0-7 as you would expect, 8-15 are 128+value. see BinDataType.
+ const int BinDataLenMax = 32;
+ const int BinDataLengthToCode[] = {
+ 0x00, 0x10, 0x20, 0x30, 0x40, 0x50, 0x60, 0x70,
+ 0x80, -1/*9*/, 0x90/*10*/, -1/*11*/, 0xa0/*12*/, -1/*13*/, 0xb0/*14*/, -1/*15*/,
+ 0xc0/*16*/, -1, -1, -1, 0xd0/*20*/, -1, -1, -1,
+ 0xe0/*24*/, -1, -1, -1, -1, -1, -1, -1,
+ 0xf0/*32*/
+ };
+ const int BinDataCodeToLength[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 32
+ };
+
+ int binDataCodeToLength(int codeByte) {
+ return BinDataCodeToLength[codeByte >> 4];
+ }
+
+ /** object cannot be represented in compact format. so store in traditional bson format
+ with a leading sentinel byte IsBSON to indicate it's in that format.
+
+ Given that the KeyV1Owned constructor already grabbed a bufbuilder, we reuse it here
+ so that we don't have to do an extra malloc.
+ */
+ void KeyV1Owned::traditional(const BSONObj& obj) {
+ b.reset();
+ b.appendUChar(IsBSON);
+ b.appendBuf(obj.objdata(), obj.objsize());
+ _keyData = (const unsigned char *) b.buf();
+ }
+
+ KeyV1Owned::KeyV1Owned(const KeyV1& rhs) {
+ b.appendBuf( rhs.data(), rhs.dataSize() );
+ _keyData = (const unsigned char *) b.buf();
+ dassert( b.len() == dataSize() ); // check datasize method is correct
+ dassert( (*_keyData & cNOTUSED) == 0 );
+ }
+
+ // fromBSON to Key format
+ KeyV1Owned::KeyV1Owned(const BSONObj& obj) {
+ BSONObj::iterator i(obj);
+ unsigned char bits = 0;
+ while( 1 ) {
+ BSONElement e = i.next();
+ if( i.more() )
+ bits |= cHASMORE;
+ switch( e.type() ) {
+ case MinKey:
+ b.appendUChar(cminkey|bits);
+ break;
+ case jstNULL:
+ b.appendUChar(cnull|bits);
+ break;
+ case MaxKey:
+ b.appendUChar(cmaxkey|bits);
+ break;
+ case Bool:
+ b.appendUChar( (e.boolean()?ctrue:cfalse) | bits );
+ break;
+ case jstOID:
+ b.appendUChar(coid|bits);
+ b.appendBuf(&e.__oid(), sizeof(OID));
+ break;
+ case BinData:
+ {
+ int t = e.binDataType();
+ // 0-7 and 0x80 to 0x87 are supported by Key
+ if( (t & 0x78) == 0 && t != ByteArrayDeprecated ) {
+ int len;
+ const char * d = e.binData(len);
+ if( len <= BinDataLenMax ) {
+ int code = BinDataLengthToCode[len];
+ if( code >= 0 ) {
+ if( t >= 128 )
+ t = (t-128) | 0x08;
+ dassert( (code&t) == 0 );
+ b.appendUChar( cbindata|bits );
+ b.appendUChar( code | t );
+ b.appendBuf(d, len);
+ break;
+ }
+ }
+ }
+ traditional(obj);
+ return;
+ }
+ case Date:
+ b.appendUChar(cdate|bits);
+ b.appendStruct(e.date());
+ break;
+ case String:
+ {
+ b.appendUChar(cstring|bits);
+ // note we do not store the terminating null, to save space.
+ unsigned x = (unsigned) e.valuestrsize() - 1;
+ if( x > 255 ) {
+ traditional(obj);
+ return;
+ }
+ b.appendUChar(x);
+ b.appendBuf(e.valuestr(), x);
+ break;
+ }
+ case NumberInt:
+ b.appendUChar(cint|bits);
+ b.appendNum((double) e._numberInt());
+ break;
+ case NumberLong:
+ {
+ long long n = e._numberLong();
+ long long m = 2LL << 52;
+ DEV {
+ long long d = m-1;
+ assert( ((long long) ((double) -d)) == -d );
+ }
+ if( n >= m || n <= -m ) {
+ // can't represent exactly as a double
+ traditional(obj);
+ return;
+ }
+ b.appendUChar(clong|bits);
+ b.appendNum((double) n);
+ break;
+ }
+ case NumberDouble:
+ {
+ double d = e._numberDouble();
+ if( isNaN(d) ) {
+ traditional(obj);
+ return;
+ }
+ b.appendUChar(cdouble|bits);
+ b.appendNum(d);
+ break;
+ }
+ default:
+ // if other types involved, store as traditional BSON
+ traditional(obj);
+ return;
+ }
+ if( !i.more() )
+ break;
+ bits = 0;
+ }
+ _keyData = (const unsigned char *) b.buf();
+ dassert( b.len() == dataSize() ); // check datasize method is correct
+ dassert( (*_keyData & cNOTUSED) == 0 );
+ }
+
+ BSONObj KeyV1::toBson() const {
+ assert( _keyData != 0 );
+ if( !isCompactFormat() )
+ return bson();
+
+ BSONObjBuilder b(512);
+ const unsigned char *p = _keyData;
+ while( 1 ) {
+ unsigned bits = *p++;
+
+ switch( bits & 0x3f ) {
+ case cminkey: b.appendMinKey(""); break;
+ case cnull: b.appendNull(""); break;
+ case cfalse: b.appendBool("", false); break;
+ case ctrue: b.appendBool("", true); break;
+ case cmaxkey:
+ b.appendMaxKey("");
+ break;
+ case cstring:
+ {
+ unsigned sz = *p++;
+ // we build the element ourself as we have to null terminate it
+ BufBuilder &bb = b.bb();
+ bb.appendNum((char) String);
+ bb.appendUChar(0); // fieldname ""
+ bb.appendNum(sz+1);
+ bb.appendBuf(p, sz);
+ bb.appendUChar(0); // null char at end of string
+ p += sz;
+ break;
+ }
+ case coid:
+ b.appendOID("", (OID *) p);
+ p += sizeof(OID);
+ break;
+ case cbindata:
+ {
+ int len = binDataCodeToLength(*p);
+ int subtype = (*p) & BinDataTypeMask;
+ if( subtype & 0x8 ) {
+ subtype = (subtype & 0x7) | 0x80;
+ }
+ b.appendBinData("", len, (BinDataType) subtype, ++p);
+ p += len;
+ break;
+ }
+ case cdate:
+ b.appendDate("", (Date_t&) *p);
+ p += 8;
+ break;
+ case cdouble:
+ b.append("", (double&) *p);
+ p += sizeof(double);
+ break;
+ case cint:
+ b.append("", (int) ((double&) *p));
+ p += sizeof(double);
+ break;
+ case clong:
+ b.append("", (long long) ((double&) *p));
+ p += sizeof(double);
+ break;
+ default:
+ assert(false);
+ }
+
+ if( (bits & cHASMORE) == 0 )
+ break;
+ }
+ return b.obj();
+ }
+
+ static int compare(const unsigned char *&l, const unsigned char *&r) {
+ int lt = (*l & cCANONTYPEMASK);
+ int rt = (*r & cCANONTYPEMASK);
+ int x = lt - rt;
+ if( x )
+ return x;
+
+ l++; r++;
+
+ // same type
+ switch( lt ) {
+ case cdouble:
+ {
+ double L = *((double *) l);
+ double R = *((double *) r);
+ if( L < R )
+ return -1;
+ if( L != R )
+ return 1;
+ l += 8; r += 8;
+ break;
+ }
+ case cstring:
+ {
+ int lsz = *l;
+ int rsz = *r;
+ int common = min(lsz, rsz);
+ l++; r++; // skip the size byte
+ // use memcmp as we (will) allow zeros in UTF8 strings
+ int res = memcmp(l, r, common);
+ if( res )
+ return res;
+ // longer string is the greater one
+ int diff = lsz-rsz;
+ if( diff )
+ return diff;
+ l += lsz; r += lsz;
+ break;
+ }
+ case cbindata:
+ {
+ int L = *l;
+ int R = *r;
+ int llen = binDataCodeToLength(L);
+ int diff = L-R; // checks length and subtype simultaneously
+ if( diff ) {
+ // unfortunately nibbles are backwards to do subtype and len in one check (could bit swap...)
+ int rlen = binDataCodeToLength(R);
+ if( llen != rlen )
+ return llen - rlen;
+ return diff;
+ }
+ // same length, same type
+ l++; r++;
+ int res = memcmp(l, r, llen);
+ if( res )
+ return res;
+ l += llen; r += llen;
+ break;
+ }
+ case cdate:
+ {
+ long long L = *((long long *) l);
+ long long R = *((long long *) r);
+ if( L < R )
+ return -1;
+ if( L > R )
+ return 1;
+ l += 8; r += 8;
+ break;
+ }
+ case coid:
+ {
+ int res = memcmp(l, r, sizeof(OID));
+ if( res )
+ return res;
+ l += 12; r += 12;
+ break;
+ }
+ default:
+ // all the others are a match -- e.g. null == null
+ ;
+ }
+
+ return 0;
+ }
+
+ // at least one of this and right are traditional BSON format
+ int NOINLINE_DECL KeyV1::compareHybrid(const KeyV1& right, const Ordering& order) const {
+ BSONObj L = toBson();
+ BSONObj R = right.toBson();
+ return L.woCompare(R, order, /*considerfieldname*/false);
+ }
+
+ int KeyV1::woCompare(const KeyV1& right, const Ordering &order) const {
+ const unsigned char *l = _keyData;
+ const unsigned char *r = right._keyData;
+
+ if( (*l|*r) == IsBSON ) // only can do this if cNOTUSED maintained
+ return compareHybrid(right, order);
+
+ unsigned mask = 1;
+ while( 1 ) {
+ char lval = *l;
+ char rval = *r;
+ {
+ int x = compare(l, r); // updates l and r pointers
+ if( x ) {
+ if( order.descending(mask) )
+ x = -x;
+ return x;
+ }
+ }
+
+ {
+ int x = ((int)(lval & cHASMORE)) - ((int)(rval & cHASMORE));
+ if( x )
+ return x;
+ if( (lval & cHASMORE) == 0 )
+ break;
+ }
+
+ mask <<= 1;
+ }
+
+ return 0;
+ }
+
+ static unsigned sizes[] = {
+ 0,
+ 1, //cminkey=1,
+ 1, //cnull=2,
+ 0,
+ 9, //cdouble=4,
+ 0,
+ 0, //cstring=6,
+ 0,
+ 13, //coid=8,
+ 0,
+ 1, //cfalse=10,
+ 1, //ctrue=11,
+ 9, //cdate=12,
+ 0,
+ 1, //cmaxkey=14,
+ 0
+ };
+
+ inline unsigned sizeOfElement(const unsigned char *p) {
+ unsigned type = *p & cCANONTYPEMASK;
+ unsigned sz = sizes[type];
+ if( sz == 0 ) {
+ if( type == cstring ) {
+ sz = ((unsigned) p[1]) + 2;
+ }
+ else {
+ assert( type == cbindata );
+ sz = binDataCodeToLength(p[1]) + 2;
+ }
+ }
+ return sz;
+ }
+
+ int KeyV1::dataSize() const {
+ const unsigned char *p = _keyData;
+ if( !isCompactFormat() ) {
+ return bson().objsize() + 1;
+ }
+
+ bool more;
+ do {
+ unsigned z = sizeOfElement(p);
+ more = (*p & cHASMORE) != 0;
+ p += z;
+ } while( more );
+ return p - _keyData;
+ }
+
+ bool KeyV1::woEqual(const KeyV1& right) const {
+ const unsigned char *l = _keyData;
+ const unsigned char *r = right._keyData;
+
+ if( (*l|*r) == IsBSON ) {
+ return toBson().equal(right.toBson());
+ }
+
+ while( 1 ) {
+ char lval = *l;
+ char rval = *r;
+ if( (lval&(cCANONTYPEMASK|cHASMORE)) != (rval&(cCANONTYPEMASK|cHASMORE)) )
+ return false;
+ l++; r++;
+ switch( lval&cCANONTYPEMASK ) {
+ case coid:
+ if( *((unsigned*) l) != *((unsigned*) r) )
+ return false;
+ l += 4; r += 4;
+ case cdate:
+ if( *((unsigned long long *) l) != *((unsigned long long *) r) )
+ return false;
+ l += 8; r += 8;
+ break;
+ case cdouble:
+ if( *((double *) l) != *((double *) r) )
+ return false;
+ l += 8; r += 8;
+ break;
+ case cstring:
+ {
+ if( *l != *r )
+ return false; // not same length
+ unsigned sz = ((unsigned) *l) + 1;
+ if( memcmp(l, r, sz) )
+ return false;
+ l += sz; r += sz;
+ break;
+ }
+ case cbindata:
+ {
+ if( *l != *r )
+ return false; // len or subtype mismatch
+ int len = binDataCodeToLength(*l) + 1;
+ if( memcmp(l, r, len) )
+ return false;
+ l += len; r += len;
+ break;
+ }
+ case cminkey:
+ case cnull:
+ case cfalse:
+ case ctrue:
+ case cmaxkey:
+ break;
+ default:
+ assert(false);
+ }
+ if( (lval&cHASMORE) == 0 )
+ break;
+ }
+ return true;
+ }
+
+ struct CmpUnitTest : public UnitTest {
+ void run() {
+ char a[2];
+ char b[2];
+ a[0] = -3;
+ a[1] = 0;
+ b[0] = 3;
+ b[1] = 0;
+ assert( strcmp(a,b)>0 && memcmp(a,b,2)>0 );
+ }
+ } cunittest;
+
+}
diff --git a/src/mongo/db/key.h b/src/mongo/db/key.h
new file mode 100644
index 00000000000..9284cdc7422
--- /dev/null
+++ b/src/mongo/db/key.h
@@ -0,0 +1,115 @@
+// @file key.h class(es) representing individual keys in a btree
+
+/**
+* Copyright (C) 2011 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "jsobj.h"
+
+namespace mongo {
+
+ /** Key class for precomputing a small format index key that is denser than a traditional BSONObj.
+
+ KeyBson is a legacy wrapper implementation for old BSONObj style keys for v:0 indexes.
+
+ KeyV1 is the new implementation.
+ */
+ class KeyBson /* "KeyV0" */ {
+ public:
+ KeyBson() { }
+ explicit KeyBson(const char *keyData) : _o(keyData) { }
+ explicit KeyBson(const BSONObj& obj) : _o(obj) { }
+ int woCompare(const KeyBson& r, const Ordering &o) const;
+ BSONObj toBson() const { return _o; }
+ string toString() const { return _o.toString(); }
+ int dataSize() const { return _o.objsize(); }
+ const char * data() const { return _o.objdata(); }
+ BSONElement _firstElement() const { return _o.firstElement(); }
+ bool isCompactFormat() const { return false; }
+ bool woEqual(const KeyBson& r) const;
+ void assign(const KeyBson& rhs) { *this = rhs; }
+ private:
+ BSONObj _o;
+ };
+
+ class KeyV1Owned;
+
+ // corresponding to BtreeData_V1
+ class KeyV1 {
+ void operator=(const KeyV1&); // disallowed just to make people be careful as we don't own the buffer
+ KeyV1(const KeyV1Owned&); // disallowed as this is not a great idea as KeyV1Owned likely will go out of scope
+ public:
+ KeyV1() { _keyData = 0; }
+ ~KeyV1() { DEV _keyData = (const unsigned char *) 1; }
+
+ KeyV1(const KeyV1& rhs) : _keyData(rhs._keyData) {
+ dassert( _keyData > (const unsigned char *) 1 );
+ }
+
+ // explicit version of operator= to be safe
+ void assign(const KeyV1& rhs) {
+ _keyData = rhs._keyData;
+ }
+
+ /** @param keyData can be a buffer containing data in either BSON format, OR in KeyV1 format.
+ when BSON, we are just a wrapper
+ */
+ explicit KeyV1(const char *keyData) : _keyData((unsigned char *) keyData) { }
+
+ int woCompare(const KeyV1& r, const Ordering &o) const;
+ bool woEqual(const KeyV1& r) const;
+ BSONObj toBson() const;
+ string toString() const { return toBson().toString(); }
+
+ /** get the key data we want to store in the btree bucket */
+ const char * data() const { return (const char *) _keyData; }
+
+ /** @return size of data() */
+ int dataSize() const;
+
+ /** only used by geo, which always has bson keys */
+ BSONElement _firstElement() const { return bson().firstElement(); }
+ bool isCompactFormat() const { return *_keyData != IsBSON; }
+ protected:
+ enum { IsBSON = 0xff };
+ const unsigned char *_keyData;
+ BSONObj bson() const {
+ dassert( !isCompactFormat() );
+ return BSONObj((const char *) _keyData+1);
+ }
+ private:
+ int compareHybrid(const KeyV1& right, const Ordering& order) const;
+ };
+
+ class KeyV1Owned : public KeyV1 {
+ void operator=(const KeyV1Owned&);
+ public:
+ /** @obj a BSON object to be translated to KeyV1 format. If the object isn't
+ representable in KeyV1 format (which happens, intentionally, at times)
+ it will stay as bson herein.
+ */
+ KeyV1Owned(const BSONObj& obj);
+
+ /** makes a copy (memcpy's the whole thing) */
+ KeyV1Owned(const KeyV1& rhs);
+
+ private:
+ StackBufBuilder b;
+ void traditional(const BSONObj& obj); // store as traditional bson not as compact format
+ };
+
+};
diff --git a/src/mongo/db/lasterror.cpp b/src/mongo/db/lasterror.cpp
new file mode 100644
index 00000000000..4ed4dfb0571
--- /dev/null
+++ b/src/mongo/db/lasterror.cpp
@@ -0,0 +1,142 @@
+// lasterror.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+
+#include "../util/unittest.h"
+#include "../util/net/message.h"
+
+
+#include "lasterror.h"
+#include "jsobj.h"
+
+namespace mongo {
+
+ LastError LastError::noError;
+ LastErrorHolder lastError;
+
+ bool isShell = false;
+ void raiseError(int code , const char *msg) {
+ LastError *le = lastError.get();
+ if ( le == 0 ) {
+ /* might be intentional (non-user thread) */
+ DEV {
+ static unsigned n;
+ if( ++n < 4 && !isShell ) log() << "dev: lastError==0 won't report:" << msg << endl;
+ }
+ }
+ else if ( le->disabled ) {
+ log() << "lastError disabled, can't report: " << code << ":" << msg << endl;
+ }
+ else {
+ le->raiseError(code, msg);
+ }
+ }
+
+ bool LastError::appendSelf( BSONObjBuilder &b , bool blankErr ) {
+ if ( !valid ) {
+ if ( blankErr )
+ b.appendNull( "err" );
+ b.append( "n", 0 );
+ return false;
+ }
+
+ if ( msg.empty() ) {
+ if ( blankErr ) {
+ b.appendNull( "err" );
+ }
+ }
+ else {
+ b.append( "err", msg );
+ }
+
+ if ( code )
+ b.append( "code" , code );
+ if ( updatedExisting != NotUpdate )
+ b.appendBool( "updatedExisting", updatedExisting == True );
+ if ( upsertedId.isSet() )
+ b.append( "upserted" , upsertedId );
+ if ( writebackId.isSet() ) {
+ b.append( "writeback" , writebackId );
+ b.append( "instanceIdent" , prettyHostName() ); // this can be any unique string
+ }
+ b.appendNumber( "n", nObjects );
+
+ return ! msg.empty();
+ }
+
+ LastErrorHolder::~LastErrorHolder() {
+ }
+
+
+ LastError * LastErrorHolder::disableForCommand() {
+ LastError *le = _get();
+ uassert(13649, "no operation yet", le);
+ le->disabled = true;
+ le->nPrev--; // caller is a command that shouldn't count as an operation
+ return le;
+ }
+
+ LastError * LastErrorHolder::get( bool create ) {
+ LastError *ret = _get( create );
+ if ( ret && !ret->disabled )
+ return ret;
+ return 0;
+ }
+
+ LastError * LastErrorHolder::_get( bool create ) {
+ LastError * le = _tl.get();
+ if ( ! le && create ) {
+ le = new LastError();
+ _tl.reset( le );
+ }
+ return le;
+ }
+
+ void LastErrorHolder::release() {
+ _tl.release();
+ }
+
+ /** ok to call more than once. */
+ void LastErrorHolder::initThread() {
+ if( ! _tl.get() )
+ _tl.reset( new LastError() );
+ }
+
+ void LastErrorHolder::reset( LastError * le ) {
+ _tl.reset( le );
+ }
+
+ void prepareErrForNewRequest( Message &m, LastError * err ) {
+ // a killCursors message shouldn't affect last error
+ assert( err );
+ if ( m.operation() == dbKillCursors ) {
+ err->disabled = true;
+ }
+ else {
+ err->disabled = false;
+ err->nPrev++;
+ }
+ }
+
+ LastError * LastErrorHolder::startRequest( Message& m , LastError * le ) {
+ assert( le );
+ prepareErrForNewRequest( m, le );
+ return le;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/lasterror.h b/src/mongo/db/lasterror.h
new file mode 100644
index 00000000000..86250e496a8
--- /dev/null
+++ b/src/mongo/db/lasterror.h
@@ -0,0 +1,146 @@
+// lasterror.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../bson/oid.h"
+
+namespace mongo {
+ class BSONObjBuilder;
+ class Message;
+
+ struct LastError {
+ int code;
+ string msg;
+ enum UpdatedExistingType { NotUpdate, True, False } updatedExisting;
+ OID upsertedId;
+ OID writebackId;
+ long long nObjects;
+ int nPrev;
+ bool valid;
+ bool disabled;
+ void writeback( OID& oid ) {
+ reset( true );
+ writebackId = oid;
+ }
+ void raiseError(int _code , const char *_msg) {
+ reset( true );
+ code = _code;
+ msg = _msg;
+ }
+ void recordUpdate( bool _updateObjects , long long _nObjects , OID _upsertedId ) {
+ reset( true );
+ nObjects = _nObjects;
+ updatedExisting = _updateObjects ? True : False;
+ if ( _upsertedId.isSet() )
+ upsertedId = _upsertedId;
+
+ }
+ void recordDelete( long long nDeleted ) {
+ reset( true );
+ nObjects = nDeleted;
+ }
+ LastError() {
+ reset();
+ }
+ void reset( bool _valid = false ) {
+ code = 0;
+ msg.clear();
+ updatedExisting = NotUpdate;
+ nObjects = 0;
+ nPrev = 1;
+ valid = _valid;
+ disabled = false;
+ upsertedId.clear();
+ writebackId.clear();
+ }
+
+ /**
+ * @return if there is an err
+ */
+ bool appendSelf( BSONObjBuilder &b , bool blankErr = true );
+
+ struct Disabled : boost::noncopyable {
+ Disabled( LastError * le ) {
+ _le = le;
+ if ( _le ) {
+ _prev = _le->disabled;
+ _le->disabled = true;
+ }
+ else {
+ _prev = false;
+ }
+ }
+
+ ~Disabled() {
+ if ( _le )
+ _le->disabled = _prev;
+ }
+
+ LastError * _le;
+ bool _prev;
+ };
+
+ static LastError noError;
+ };
+
+ extern class LastErrorHolder {
+ public:
+ LastErrorHolder(){}
+ ~LastErrorHolder();
+
+ LastError * get( bool create = false );
+ LastError * getSafe() {
+ LastError * le = get(false);
+ if ( ! le ) {
+ error() << " no LastError!" << endl;
+ assert( le );
+ }
+ return le;
+ }
+
+ LastError * _get( bool create = false ); // may return a disabled LastError
+
+ void reset( LastError * le );
+
+ /** ok to call more than once. */
+ void initThread();
+
+ int getID();
+
+ void release();
+
+ /** when db receives a message/request, call this */
+ LastError * startRequest( Message& m , LastError * connectionOwned );
+
+ void disconnect( int clientId );
+
+ // used to disable lastError reporting while processing a killCursors message
+ // disable causes get() to return 0.
+ LastError *disableForCommand(); // only call once per command invocation!
+ private:
+ boost::thread_specific_ptr<LastError> _tl;
+
+ struct Status {
+ time_t time;
+ LastError *lerr;
+ };
+ } lastError;
+
+ void raiseError(int code , const char *msg);
+
+} // namespace mongo
diff --git a/src/mongo/db/matcher.cpp b/src/mongo/db/matcher.cpp
new file mode 100755
index 00000000000..2631845a757
--- /dev/null
+++ b/src/mongo/db/matcher.cpp
@@ -0,0 +1,1128 @@
+// matcher.cpp
+
+/* Matcher is our boolean expression evaluator for "where" clauses */
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "matcher.h"
+#include "../util/goodies.h"
+#include "../util/unittest.h"
+#include "diskloc.h"
+#include "../scripting/engine.h"
+#include "db.h"
+#include "queryutil.h"
+#include "client.h"
+
+#include "pdfile.h"
+
+namespace {
+ inline pcrecpp::RE_Options flags2options(const char* flags) {
+ pcrecpp::RE_Options options;
+ options.set_utf8(true);
+ while ( flags && *flags ) {
+ if ( *flags == 'i' )
+ options.set_caseless(true);
+ else if ( *flags == 'm' )
+ options.set_multiline(true);
+ else if ( *flags == 'x' )
+ options.set_extended(true);
+ else if ( *flags == 's' )
+ options.set_dotall(true);
+ flags++;
+ }
+ return options;
+ }
+}
+
+//#define DEBUGMATCHER(x) cout << x << endl;
+#define DEBUGMATCHER(x)
+
+namespace mongo {
+
+ extern BSONObj staticNull;
+
+ class Where {
+ public:
+ Where() {
+ jsScope = 0;
+ func = 0;
+ }
+ ~Where() {
+
+ if ( scope.get() ){
+ try {
+ scope->execSetup( "_mongo.readOnly = false;" , "make not read only" );
+ }
+ catch( DBException& e ){
+ warning() << "javascript scope cleanup interrupted" << causedBy( e ) << endl;
+ }
+ }
+
+ if ( jsScope ) {
+ delete jsScope;
+ jsScope = 0;
+ }
+ func = 0;
+ }
+
+ auto_ptr<Scope> scope;
+ ScriptingFunction func;
+ BSONObj *jsScope;
+
+ void setFunc(const char *code) {
+ massert( 10341 , "scope has to be created first!" , scope.get() );
+ func = scope->createFunction( code );
+ }
+
+ };
+
+ Matcher::~Matcher() {
+ delete _where;
+ _where = 0;
+ }
+
+ ElementMatcher::ElementMatcher( BSONElement e , int op, bool isNot )
+ : _toMatch( e ) , _compareOp( op ), _isNot( isNot ), _subMatcherOnPrimitives(false) {
+ if ( op == BSONObj::opMOD ) {
+ BSONObj o = e.embeddedObject();
+ _mod = o["0"].numberInt();
+ _modm = o["1"].numberInt();
+
+ uassert( 10073 , "mod can't be 0" , _mod );
+ }
+ else if ( op == BSONObj::opTYPE ) {
+ _type = (BSONType)(e.numberInt());
+ }
+ else if ( op == BSONObj::opELEM_MATCH ) {
+ BSONElement m = e;
+ uassert( 12517 , "$elemMatch needs an Object" , m.type() == Object );
+ BSONObj x = m.embeddedObject();
+ if ( x.firstElement().getGtLtOp() == 0 ) {
+ _subMatcher.reset( new Matcher( x ) );
+ _subMatcherOnPrimitives = false;
+ }
+ else {
+ // meant to act on primitives
+ _subMatcher.reset( new Matcher( BSON( "" << x ) ) );
+ _subMatcherOnPrimitives = true;
+ }
+ }
+ }
+
+ ElementMatcher::ElementMatcher( BSONElement e , int op , const BSONObj& array, bool isNot )
+ : _toMatch( e ) , _compareOp( op ), _isNot( isNot ), _subMatcherOnPrimitives(false) {
+
+ _myset.reset( new set<BSONElement,element_lt>() );
+
+ BSONObjIterator i( array );
+ while ( i.more() ) {
+ BSONElement ie = i.next();
+ if ( op == BSONObj::opALL && ie.type() == Object && ie.embeddedObject().firstElement().getGtLtOp() == BSONObj::opELEM_MATCH ) {
+ shared_ptr<Matcher> s;
+ s.reset( new Matcher( ie.embeddedObject().firstElement().embeddedObjectUserCheck() ) );
+ _allMatchers.push_back( s );
+ }
+ else if ( ie.type() == RegEx ) {
+ if ( !_myregex.get() ) {
+ _myregex.reset( new vector< RegexMatcher >() );
+ }
+ _myregex->push_back( RegexMatcher() );
+ RegexMatcher &rm = _myregex->back();
+ rm._re.reset( new pcrecpp::RE( ie.regex(), flags2options( ie.regexFlags() ) ) );
+ rm._fieldName = 0; // no need for field name
+ rm._regex = ie.regex();
+ rm._flags = ie.regexFlags();
+ rm._isNot = false;
+ bool purePrefix;
+ string prefix = simpleRegex(rm._regex, rm._flags, &purePrefix);
+ if (purePrefix)
+ rm._prefix = prefix;
+ }
+ else {
+ uassert( 15882, "$elemMatch not allowed within $in",
+ ie.type() != Object ||
+ ie.embeddedObject().firstElement().getGtLtOp() != BSONObj::opELEM_MATCH );
+ _myset->insert(ie);
+ }
+ }
+
+ if ( _allMatchers.size() ) {
+ uassert( 13020 , "with $all, can't mix $elemMatch and others" , _myset->size() == 0 && !_myregex.get());
+ }
+
+ }
+
+ int ElementMatcher::inverseOfNegativeCompareOp() const {
+ verify( 15892, negativeCompareOp() );
+ return _compareOp == BSONObj::NE ? BSONObj::Equality : BSONObj::opIN;
+ }
+
+ bool ElementMatcher::negativeCompareOpContainsNull() const {
+ verify( 15893, negativeCompareOp() );
+ return (_compareOp == BSONObj::NE && _toMatch.type() != jstNULL) ||
+ (_compareOp == BSONObj::NIN && _myset->count( staticNull.firstElement()) == 0 );
+ }
+
+ void Matcher::addRegex(const char *fieldName, const char *regex, const char *flags, bool isNot) {
+
+ RegexMatcher rm;
+ rm._re.reset( new pcrecpp::RE(regex, flags2options(flags)) );
+ rm._fieldName = fieldName;
+ rm._regex = regex;
+ rm._flags = flags;
+ rm._isNot = isNot;
+ _regexs.push_back(rm);
+
+ if (!isNot) { //TODO something smarter
+ bool purePrefix;
+ string prefix = simpleRegex(regex, flags, &purePrefix);
+ if (purePrefix)
+ rm._prefix = prefix;
+ }
+ }
+
+ bool Matcher::addOp( const BSONElement &e, const BSONElement &fe, bool isNot, const char *& regex, const char *&flags ) {
+ const char *fn = fe.fieldName();
+ int op = fe.getGtLtOp( -1 );
+ if ( op == -1 ) {
+ if ( !isNot && fn[1] == 'r' && fn[2] == 'e' && fn[3] == 'f' && fn[4] == 0 ) {
+ return false; // { $ref : xxx } - treat as normal object
+ }
+ uassert( 10068 , (string)"invalid operator: " + fn , op != -1 );
+ }
+
+ switch ( op ) {
+ case BSONObj::GT:
+ case BSONObj::GTE:
+ case BSONObj::LT:
+ case BSONObj::LTE: {
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ _builders.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), op, isNot);
+ break;
+ }
+ case BSONObj::NE: {
+ _haveNeg = true;
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ _builders.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), BSONObj::NE, isNot);
+ break;
+ }
+ case BSONObj::opALL:
+ _all = true;
+ case BSONObj::opIN: {
+ uassert( 13276 , "$in needs an array" , fe.isABSONObj() );
+ _basics.push_back( ElementMatcher( e , op , fe.embeddedObject(), isNot ) );
+ BSONObjIterator i( fe.embeddedObject() );
+ while( i.more() ) {
+ if ( i.next().type() == Array ) {
+ _hasArray = true;
+ }
+ }
+ break;
+ }
+ case BSONObj::NIN:
+ uassert( 13277 , "$nin needs an array" , fe.isABSONObj() );
+ _haveNeg = true;
+ _basics.push_back( ElementMatcher( e , op , fe.embeddedObject(), isNot ) );
+ break;
+ case BSONObj::opMOD:
+ case BSONObj::opTYPE:
+ case BSONObj::opELEM_MATCH: {
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ _builders.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ // these are types where ElementMatcher has all the info
+ _basics.push_back( ElementMatcher( b->done().firstElement() , op, isNot ) );
+ break;
+ }
+ case BSONObj::opSIZE: {
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ _builders.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), BSONObj::opSIZE, isNot);
+ _haveSize = true;
+ break;
+ }
+ case BSONObj::opEXISTS: {
+ shared_ptr< BSONObjBuilder > b( new BSONObjBuilder() );
+ _builders.push_back( b );
+ b->appendAs(fe, e.fieldName());
+ addBasic(b->done().firstElement(), BSONObj::opEXISTS, isNot);
+ break;
+ }
+ case BSONObj::opREGEX: {
+ uassert( 13032, "can't use $not with $regex, use BSON regex type instead", !isNot );
+ if ( fe.type() == RegEx ) {
+ regex = fe.regex();
+ flags = fe.regexFlags();
+ }
+ else {
+ regex = fe.valuestrsafe();
+ }
+ break;
+ }
+ case BSONObj::opOPTIONS: {
+ uassert( 13029, "can't use $not with $options, use BSON regex type instead", !isNot );
+ flags = fe.valuestrsafe();
+ break;
+ }
+ case BSONObj::opNEAR:
+ case BSONObj::opWITHIN:
+ case BSONObj::opMAX_DISTANCE:
+ break;
+ default:
+ uassert( 10069 , (string)"BUG - can't operator for: " + fn , 0 );
+ }
+ return true;
+ }
+
+ void Matcher::parseExtractedClause( const BSONElement &e, list< shared_ptr< Matcher > > &matchers ) {
+ uassert( 13086, "$and/$or/$nor must be a nonempty array", e.type() == Array && e.embeddedObject().nFields() > 0 );
+ BSONObjIterator j( e.embeddedObject() );
+ while( j.more() ) {
+ BSONElement f = j.next();
+ uassert( 13087, "$and/$or/$nor match element must be an object", f.type() == Object );
+ matchers.push_back( shared_ptr< Matcher >( new Matcher( f.embeddedObject(), true ) ) );
+ }
+ }
+
+ bool Matcher::parseClause( const BSONElement &e ) {
+ const char *ef = e.fieldName();
+
+ if ( ef[ 0 ] != '$' )
+ return false;
+
+ // $and
+ if ( ef[ 1 ] == 'a' && ef[ 2 ] == 'n' && ef[ 3 ] == 'd' ) {
+ parseExtractedClause( e, _andMatchers );
+ return true;
+ }
+
+ // $or
+ if ( ef[ 1 ] == 'o' && ef[ 2 ] == 'r' && ef[ 3 ] == 0 ) {
+ parseExtractedClause( e, _orMatchers );
+ return true;
+ }
+
+ // $nor
+ if ( ef[ 1 ] == 'n' && ef[ 2 ] == 'o' && ef[ 3 ] == 'r' && ef[ 4 ] == 0 ) {
+ parseExtractedClause( e, _norMatchers );
+ return true;
+ }
+
+ // $comment
+ if ( ef[ 1 ] == 'c' && ef[ 2 ] == 'o' && ef[ 3 ] == 'm' && str::equals( ef , "$comment" ) ) {
+ return true;
+ }
+
+ return false;
+ }
+
+ // $where: function()...
+ NOINLINE_DECL void Matcher::parseWhere( const BSONElement &e ) {
+ uassert(15902 , "$where expression has an unexpected type", e.type() == String || e.type() == CodeWScope || e.type() == Code );
+ uassert( 10066 , "$where may only appear once in query", _where == 0 );
+ uassert( 10067 , "$where query, but no script engine", globalScriptEngine );
+ massert( 13089 , "no current client needed for $where" , haveClient() );
+ _where = new Where();
+ _where->scope = globalScriptEngine->getPooledScope( cc().ns() );
+ _where->scope->localConnect( cc().database()->name.c_str() );
+
+ if ( e.type() == CodeWScope ) {
+ _where->setFunc( e.codeWScopeCode() );
+ _where->jsScope = new BSONObj( e.codeWScopeScopeData() );
+ }
+ else {
+ const char *code = e.valuestr();
+ _where->setFunc(code);
+ }
+
+ _where->scope->execSetup( "_mongo.readOnly = true;" , "make read only" );
+ }
+
+ void Matcher::parseMatchExpressionElement( const BSONElement &e, bool nested ) {
+
+ uassert( 13629 , "can't have undefined in a query expression" , e.type() != Undefined );
+
+ if ( parseClause( e ) ) {
+ return;
+ }
+
+ const char *fn = e.fieldName();
+ if ( str::equals(fn, "$where") ) {
+ parseWhere(e);
+ return;
+ }
+
+ if ( e.type() == RegEx ) {
+ addRegex( fn, e.regex(), e.regexFlags() );
+ return;
+ }
+
+ // greater than / less than...
+ // e.g., e == { a : { $gt : 3 } }
+ // or
+ // { a : { $in : [1,2,3] } }
+ if ( e.type() == Object ) {
+ // support {$regex:"a|b", $options:"imx"}
+ const char* regex = NULL;
+ const char* flags = "";
+
+ // e.g., fe == { $gt : 3 }
+ BSONObjIterator j(e.embeddedObject());
+ bool isOperator = false;
+ while ( j.more() ) {
+ BSONElement fe = j.next();
+ const char *fn = fe.fieldName();
+
+ if ( fn[0] == '$' && fn[1] ) {
+ isOperator = true;
+
+ if ( fn[1] == 'n' && fn[2] == 'o' && fn[3] == 't' && fn[4] == 0 ) {
+ _haveNeg = true;
+ switch( fe.type() ) {
+ case Object: {
+ BSONObjIterator k( fe.embeddedObject() );
+ uassert( 13030, "$not cannot be empty", k.more() );
+ while( k.more() ) {
+ addOp( e, k.next(), true, regex, flags );
+ }
+ break;
+ }
+ case RegEx:
+ addRegex( e.fieldName(), fe.regex(), fe.regexFlags(), true );
+ break;
+ default:
+ uassert( 13031, "invalid use of $not", false );
+ }
+ }
+ else {
+ if ( !addOp( e, fe, false, regex, flags ) ) {
+ isOperator = false;
+ break;
+ }
+ }
+ }
+ else {
+ isOperator = false;
+ break;
+ }
+ }
+ if (regex) {
+ addRegex(e.fieldName(), regex, flags);
+ }
+ if ( isOperator )
+ return;
+ }
+
+ if ( e.type() == Array ) {
+ _hasArray = true;
+ }
+ else if( *fn == '$' ) {
+ if( str::equals(fn, "$atomic") || str::equals(fn, "$isolated") ) {
+ uassert( 14844, "$atomic specifier must be a top level field", !nested );
+ _atomic = e.trueValue();
+ return;
+ }
+ }
+
+ // normal, simple case e.g. { a : "foo" }
+ addBasic(e, BSONObj::Equality, false);
+ }
+
+ /* _jsobj - the query pattern
+ */
+ Matcher::Matcher(const BSONObj &jsobj, bool nested) :
+ _where(0), _jsobj(jsobj), _haveSize(), _all(), _hasArray(0), _haveNeg(), _atomic(false) {
+
+ BSONObjIterator i(_jsobj);
+ while ( i.more() ) {
+ parseMatchExpressionElement( i.next(), nested );
+ }
+ }
+
+ Matcher::Matcher( const Matcher &docMatcher, const BSONObj &key ) :
+ _where(0), _constrainIndexKey( key ), _haveSize(), _all(), _hasArray(0), _haveNeg(), _atomic(false) {
+ // Filter out match components that will provide an incorrect result
+ // given a key from a single key index.
+ for( vector< ElementMatcher >::const_iterator i = docMatcher._basics.begin(); i != docMatcher._basics.end(); ++i ) {
+ if ( key.hasField( i->_toMatch.fieldName() ) ) {
+ switch( i->_compareOp ) {
+ case BSONObj::opSIZE:
+ case BSONObj::opALL:
+ case BSONObj::NE:
+ case BSONObj::NIN:
+ case BSONObj::opEXISTS: // We can't match on index in this case.
+ case BSONObj::opTYPE: // For $type:10 (null), a null key could be a missing field or a null value field.
+ break;
+ case BSONObj::opIN: {
+ bool inContainsArray = false;
+ for( set<BSONElement,element_lt>::const_iterator j = i->_myset->begin(); j != i->_myset->end(); ++j ) {
+ if ( j->type() == Array ) {
+ inContainsArray = true;
+ break;
+ }
+ }
+ // Can't match an array to its first indexed element.
+ if ( !i->_isNot && !inContainsArray ) {
+ _basics.push_back( *i );
+ }
+ break;
+ }
+ default: {
+ // Can't match an array to its first indexed element.
+ if ( !i->_isNot && i->_toMatch.type() != Array ) {
+ _basics.push_back( *i );
+ }
+ }
+ }
+ }
+ }
+ for( vector<RegexMatcher>::const_iterator it = docMatcher._regexs.begin();
+ it != docMatcher._regexs.end();
+ ++it) {
+ if ( !it->_isNot && key.hasField( it->_fieldName ) ) {
+ _regexs.push_back(*it);
+ }
+ }
+ // Recursively filter match components for and and or matchers.
+ for( list< shared_ptr< Matcher > >::const_iterator i = docMatcher._andMatchers.begin(); i != docMatcher._andMatchers.end(); ++i ) {
+ _andMatchers.push_back( shared_ptr< Matcher >( new Matcher( **i, key ) ) );
+ }
+ for( list< shared_ptr< Matcher > >::const_iterator i = docMatcher._orMatchers.begin(); i != docMatcher._orMatchers.end(); ++i ) {
+ _orMatchers.push_back( shared_ptr< Matcher >( new Matcher( **i, key ) ) );
+ }
+ }
+
+ inline bool regexMatches(const RegexMatcher& rm, const BSONElement& e) {
+ switch (e.type()) {
+ case String:
+ case Symbol:
+ if (rm._prefix.empty())
+ return rm._re->PartialMatch(e.valuestr());
+ else
+ return !strncmp(e.valuestr(), rm._prefix.c_str(), rm._prefix.size());
+ case RegEx:
+ return !strcmp(rm._regex, e.regex()) && !strcmp(rm._flags, e.regexFlags());
+ default:
+ return false;
+ }
+ }
+
+ inline int Matcher::valuesMatch(const BSONElement& l, const BSONElement& r, int op, const ElementMatcher& bm) const {
+ assert( op != BSONObj::NE && op != BSONObj::NIN );
+
+ if ( op == BSONObj::Equality ) {
+ return l.valuesEqual(r);
+ }
+
+ if ( op == BSONObj::opIN ) {
+ // { $in : [1,2,3] }
+ int count = bm._myset->count(l);
+ if ( count )
+ return count;
+ if ( bm._myregex.get() ) {
+ for( vector<RegexMatcher>::const_iterator i = bm._myregex->begin(); i != bm._myregex->end(); ++i ) {
+ if ( regexMatches( *i, l ) ) {
+ return true;
+ }
+ }
+ }
+ }
+
+ if ( op == BSONObj::opSIZE ) {
+ if ( l.type() != Array )
+ return 0;
+ int count = 0;
+ BSONObjIterator i( l.embeddedObject() );
+ while( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ ++count;
+ }
+ return count == r.number();
+ }
+
+ if ( op == BSONObj::opMOD ) {
+ if ( ! l.isNumber() )
+ return false;
+
+ return l.numberLong() % bm._mod == bm._modm;
+ }
+
+ if ( op == BSONObj::opTYPE ) {
+ return bm._type == l.type();
+ }
+
+ /* check LT, GTE, ... */
+ if ( l.canonicalType() != r.canonicalType() )
+ return false;
+ int c = compareElementValues(l, r);
+ if ( c < -1 ) c = -1;
+ if ( c > 1 ) c = 1;
+ int z = 1 << (c+1);
+ return (op & z);
+ }
+
+ int Matcher::inverseMatch(const char *fieldName, const BSONElement &toMatch, const BSONObj &obj, const ElementMatcher& bm , MatchDetails * details ) const {
+ int inverseRet = matchesDotted( fieldName, toMatch, obj, bm.inverseOfNegativeCompareOp(), bm , false , details );
+ if ( bm.negativeCompareOpContainsNull() ) {
+ return ( inverseRet <= 0 ) ? 1 : 0;
+ }
+ return -inverseRet;
+ }
+
+ int retExistsFound( const ElementMatcher &bm ) {
+ return bm._toMatch.trueValue() ? 1 : -1;
+ }
+
+ /* Check if a particular field matches.
+
+ fieldName - field to match "a.b" if we are reaching into an embedded object.
+ toMatch - element we want to match.
+ obj - database object to check against
+ compareOp - Equality, LT, GT, etc. This may be different than, and should supersede, the compare op in em.
+ isArr -
+
+ Special forms:
+
+ { "a.b" : 3 } means obj.a.b == 3
+ { a : { $lt : 3 } } means obj.a < 3
+ { a : { $in : [1,2] } } means [1,2].contains(obj.a)
+
+ return value
+ -1 mismatch
+ 0 missing element
+ 1 match
+ */
+ int Matcher::matchesDotted(const char *fieldName, const BSONElement& toMatch, const BSONObj& obj, int compareOp, const ElementMatcher& em , bool isArr, MatchDetails * details ) const {
+ DEBUGMATCHER( "\t matchesDotted : " << fieldName << " hasDetails: " << ( details ? "yes" : "no" ) );
+
+ if ( compareOp == BSONObj::opALL ) {
+
+ if ( em._allMatchers.size() ) {
+ // $all query matching will not be performed against indexes, so the field
+ // to match is always extracted from the full document.
+ BSONElement e = obj.getFieldDotted( fieldName );
+ // The $all/$elemMatch operator only matches arrays.
+ if ( e.type() != Array ) {
+ return -1;
+ }
+
+ for ( unsigned i=0; i<em._allMatchers.size(); i++ ) {
+ bool found = false;
+ BSONObjIterator x( e.embeddedObject() );
+ while ( x.more() ) {
+ BSONElement f = x.next();
+
+ if ( f.type() != Object )
+ continue;
+ if ( em._allMatchers[i]->matches( f.embeddedObject() ) ) {
+ found = true;
+ break;
+ }
+ }
+
+ if ( ! found )
+ return -1;
+ }
+
+ return 1;
+ }
+
+ if ( em._myset->size() == 0 && !em._myregex.get() )
+ return -1; // is this desired?
+
+ BSONElementSet myValues;
+ obj.getFieldsDotted( fieldName , myValues );
+
+ for( set< BSONElement, element_lt >::const_iterator i = em._myset->begin(); i != em._myset->end(); ++i ) {
+ // ignore nulls
+ if ( i->type() == jstNULL )
+ continue;
+
+ if ( myValues.count( *i ) == 0 )
+ return -1;
+ }
+
+ if ( !em._myregex.get() )
+ return 1;
+
+ for( vector< RegexMatcher >::const_iterator i = em._myregex->begin(); i != em._myregex->end(); ++i ) {
+ bool match = false;
+ for( BSONElementSet::const_iterator j = myValues.begin(); j != myValues.end(); ++j ) {
+ if ( regexMatches( *i, *j ) ) {
+ match = true;
+ break;
+ }
+ }
+ if ( !match )
+ return -1;
+ }
+
+ return 1;
+ } // end opALL
+
+ if ( compareOp == BSONObj::NE || compareOp == BSONObj::NIN ) {
+ return inverseMatch( fieldName, toMatch, obj, em , details );
+ }
+
+ BSONElement e;
+ bool indexed = !_constrainIndexKey.isEmpty();
+ if ( indexed ) {
+ e = obj.getFieldUsingIndexNames(fieldName, _constrainIndexKey);
+ if( e.eoo() ) {
+ cout << "obj: " << obj << endl;
+ cout << "fieldName: " << fieldName << endl;
+ cout << "_constrainIndexKey: " << _constrainIndexKey << endl;
+ assert( !e.eoo() );
+ }
+ }
+ else {
+
+ const char *p = strchr(fieldName, '.');
+ if ( p ) {
+ string left(fieldName, p-fieldName);
+
+ BSONElement se = obj.getField(left.c_str());
+ if ( se.eoo() )
+ ;
+ else if ( se.type() != Object && se.type() != Array )
+ ;
+ else {
+ BSONObj eo = se.embeddedObject();
+ return matchesDotted(p+1, toMatch, eo, compareOp, em, se.type() == Array , details );
+ }
+ }
+
+ // An array was encountered while scanning for components of the field name.
+ if ( isArr ) {
+ DEBUGMATCHER( "\t\t isArr 1 : obj : " << obj );
+ BSONObjIterator ai(obj);
+ bool found = false;
+ while ( ai.moreWithEOO() ) {
+ BSONElement z = ai.next();
+
+ if( strcmp(z.fieldName(),fieldName) == 0 ) {
+ if ( compareOp == BSONObj::opEXISTS ) {
+ return retExistsFound( em );
+ }
+ if (valuesMatch(z, toMatch, compareOp, em) ) {
+ // "field.<n>" array notation was used
+ if ( details )
+ details->_elemMatchKey = z.fieldName();
+ return 1;
+ }
+ }
+
+ if ( z.type() == Object ) {
+ BSONObj eo = z.embeddedObject();
+ int cmp = matchesDotted(fieldName, toMatch, eo, compareOp, em, false, details );
+ if ( cmp > 0 ) {
+ if ( details )
+ details->_elemMatchKey = z.fieldName();
+ return 1;
+ }
+ else if ( cmp < 0 ) {
+ found = true;
+ }
+ }
+ }
+ return found ? -1 : 0;
+ }
+
+ if( p ) {
+ // Left portion of field name was not found or wrong type.
+ return 0;
+ }
+ else {
+ e = obj.getField(fieldName);
+ }
+ }
+
+ if ( compareOp == BSONObj::opEXISTS ) {
+ if( e.eoo() ) {
+ return 0;
+ } else {
+ return retExistsFound( em );
+ }
+ }
+ else if ( ( e.type() != Array || indexed || compareOp == BSONObj::opSIZE ) &&
+ valuesMatch(e, toMatch, compareOp, em ) ) {
+ return 1;
+ }
+ else if ( e.type() == Array && compareOp != BSONObj::opSIZE ) {
+ BSONObjIterator ai(e.embeddedObject());
+
+ while ( ai.moreWithEOO() ) {
+ BSONElement z = ai.next();
+
+ if ( compareOp == BSONObj::opELEM_MATCH ) {
+ if ( z.type() == Object ) {
+ if ( em._subMatcher->matches( z.embeddedObject() ) ) {
+ if ( details )
+ details->_elemMatchKey = z.fieldName();
+ return 1;
+ }
+ }
+ else if ( em._subMatcherOnPrimitives ) {
+ if ( z.type() && em._subMatcher->matches( z.wrap( "" ) ) ) {
+ if ( details )
+ details->_elemMatchKey = z.fieldName();
+ return 1;
+ }
+ }
+ }
+ else {
+ if ( valuesMatch( z, toMatch, compareOp, em) ) {
+ if ( details )
+ details->_elemMatchKey = z.fieldName();
+ return 1;
+ }
+ }
+
+ }
+
+ // match an entire array to itself
+ if ( compareOp == BSONObj::Equality && e.woCompare( toMatch , false ) == 0 ) {
+ return 1;
+ }
+ if ( compareOp == BSONObj::opIN && valuesMatch( e, toMatch, compareOp, em ) ) {
+ return 1;
+ }
+ }
+ else if ( e.eoo() ) {
+ return 0;
+ }
+ return -1;
+ }
+
+ extern int dump;
+
+ /* See if an object matches the query.
+ */
+ bool Matcher::matches(const BSONObj& jsobj , MatchDetails * details ) const {
+ LOG(5) << "Matcher::matches() " << jsobj.toString() << endl;
+
+ /* assuming there is usually only one thing to match. if more this
+ could be slow sometimes. */
+
+ // check normal non-regex cases:
+ for ( unsigned i = 0; i < _basics.size(); i++ ) {
+ const ElementMatcher& bm = _basics[i];
+ const BSONElement& m = bm._toMatch;
+ // -1=mismatch. 0=missing element. 1=match
+ int cmp = matchesDotted(m.fieldName(), m, jsobj, bm._compareOp, bm , false , details );
+ if ( cmp == 0 && bm._compareOp == BSONObj::opEXISTS ) {
+ // If missing, match cmp is opposite of $exists spec.
+ cmp = -retExistsFound(bm);
+ }
+ if ( bm._isNot )
+ cmp = -cmp;
+ if ( cmp < 0 )
+ return false;
+ if ( cmp == 0 ) {
+ /* missing is ok iff we were looking for null */
+ if ( m.type() == jstNULL || m.type() == Undefined ||
+ ( ( bm._compareOp == BSONObj::opIN || bm._compareOp == BSONObj::NIN ) && bm._myset->count( staticNull.firstElement() ) > 0 ) ) {
+ if ( bm.negativeCompareOp() ^ bm._isNot ) {
+ return false;
+ }
+ }
+ else {
+ if ( !bm._isNot ) {
+ return false;
+ }
+ }
+ }
+ }
+
+ for (vector<RegexMatcher>::const_iterator it = _regexs.begin();
+ it != _regexs.end();
+ ++it) {
+ BSONElementSet s;
+ if ( !_constrainIndexKey.isEmpty() ) {
+ BSONElement e = jsobj.getFieldUsingIndexNames(it->_fieldName, _constrainIndexKey);
+
+ // Should only have keys nested one deep here, for geo-indices
+ // TODO: future indices may nest deeper?
+ if( e.type() == Array ){
+ BSONObjIterator i( e.Obj() );
+ while( i.more() ){
+ s.insert( i.next() );
+ }
+ }
+ else if ( !e.eoo() )
+ s.insert( e );
+
+ }
+ else {
+ jsobj.getFieldsDotted( it->_fieldName, s );
+ }
+ bool match = false;
+ for( BSONElementSet::const_iterator i = s.begin(); i != s.end(); ++i )
+ if ( regexMatches(*it, *i) )
+ match = true;
+ if ( !match ^ it->_isNot )
+ return false;
+ }
+
+ if ( _orDedupConstraints.size() > 0 ) {
+ for( vector< shared_ptr< FieldRangeVector > >::const_iterator i = _orDedupConstraints.begin();
+ i != _orDedupConstraints.end(); ++i ) {
+ if ( (*i)->matches( jsobj ) ) {
+ return false;
+ }
+ }
+ }
+
+ if ( _andMatchers.size() > 0 ) {
+ for( list< shared_ptr< Matcher > >::const_iterator i = _andMatchers.begin();
+ i != _andMatchers.end(); ++i ) {
+ // SERVER-3192 Track field matched using details the same as for
+ // top level fields, at least for now.
+ if ( !(*i)->matches( jsobj, details ) ) {
+ return false;
+ }
+ }
+ }
+
+ if ( _orMatchers.size() > 0 ) {
+ bool match = false;
+ for( list< shared_ptr< Matcher > >::const_iterator i = _orMatchers.begin();
+ i != _orMatchers.end(); ++i ) {
+ // SERVER-205 don't submit details - we don't want to track field
+ // matched within $or
+ if ( (*i)->matches( jsobj ) ) {
+ match = true;
+ break;
+ }
+ }
+ if ( !match ) {
+ return false;
+ }
+ }
+
+ if ( _norMatchers.size() > 0 ) {
+ for( list< shared_ptr< Matcher > >::const_iterator i = _norMatchers.begin();
+ i != _norMatchers.end(); ++i ) {
+ // SERVER-205 don't submit details - we don't want to track field
+ // matched within $nor
+ if ( (*i)->matches( jsobj ) ) {
+ return false;
+ }
+ }
+ }
+
+ if ( _where ) {
+ if ( _where->func == 0 ) {
+ uassert( 10070 , "$where compile error", false);
+ return false; // didn't compile
+ }
+
+ if ( _where->jsScope ) {
+ _where->scope->init( _where->jsScope );
+ }
+ _where->scope->setObject( "obj", const_cast< BSONObj & >( jsobj ) );
+ _where->scope->setBoolean( "fullObject" , true ); // this is a hack b/c fullObject used to be relevant
+
+ int err = _where->scope->invoke( _where->func , 0, &jsobj , 1000 * 60 , false );
+ if ( err == -3 ) { // INVOKE_ERROR
+ stringstream ss;
+ ss << "error on invocation of $where function:\n"
+ << _where->scope->getError();
+ uassert( 10071 , ss.str(), false);
+ return false;
+ }
+ else if ( err != 0 ) { // ! INVOKE_SUCCESS
+ uassert( 10072 , "unknown error in invocation of $where function", false);
+ return false;
+ }
+ return _where->scope->getBoolean( "return" ) != 0;
+
+ }
+
+ return true;
+ }
+
+ bool Matcher::keyMatch( const Matcher &docMatcher ) const {
+ // Quick check certain non key match cases.
+ if ( docMatcher._all
+ || docMatcher._haveSize
+ || docMatcher._hasArray // We can't match an array to its first indexed element using keymatch
+ || docMatcher._haveNeg ) {
+ return false;
+ }
+
+ // Check that all match components are available in the index matcher.
+ if ( !( _basics.size() == docMatcher._basics.size() && _regexs.size() == docMatcher._regexs.size() && !docMatcher._where ) ) {
+ return false;
+ }
+ if ( _andMatchers.size() != docMatcher._andMatchers.size() ) {
+ return false;
+ }
+ if ( _orMatchers.size() != docMatcher._orMatchers.size() ) {
+ return false;
+ }
+ if ( docMatcher._norMatchers.size() > 0 ) {
+ return false;
+ }
+ if ( docMatcher._orDedupConstraints.size() > 0 ) {
+ return false;
+ }
+
+ // Recursively check that all submatchers support key match.
+ {
+ list< shared_ptr< Matcher > >::const_iterator i = _andMatchers.begin();
+ list< shared_ptr< Matcher > >::const_iterator j = docMatcher._andMatchers.begin();
+ while( i != _andMatchers.end() ) {
+ if ( !(*i)->keyMatch( **j ) ) {
+ return false;
+ }
+ ++i; ++j;
+ }
+ }
+ {
+ list< shared_ptr< Matcher > >::const_iterator i = _orMatchers.begin();
+ list< shared_ptr< Matcher > >::const_iterator j = docMatcher._orMatchers.begin();
+ while( i != _orMatchers.end() ) {
+ if ( !(*i)->keyMatch( **j ) ) {
+ return false;
+ }
+ ++i; ++j;
+ }
+ }
+ // Nor matchers and or dedup constraints aren't created for index matchers,
+ // so no need to check those here.
+ return true;
+ }
+
+
+ /*- just for testing -- */
+#pragma pack(1)
+ struct JSObj1 {
+ JSObj1() {
+ totsize=sizeof(JSObj1);
+ n = NumberDouble;
+ strcpy_s(nname, 5, "abcd");
+ N = 3.1;
+ s = String;
+ strcpy_s(sname, 7, "abcdef");
+ slen = 10;
+ strcpy_s(sval, 10, "123456789");
+ eoo = EOO;
+ }
+ unsigned totsize;
+
+ char n;
+ char nname[5];
+ double N;
+
+ char s;
+ char sname[7];
+ unsigned slen;
+ char sval[10];
+
+ char eoo;
+ };
+#pragma pack()
+
+ struct JSObj1 js1;
+
+#pragma pack(1)
+ struct JSObj2 {
+ JSObj2() {
+ totsize=sizeof(JSObj2);
+ s = String;
+ strcpy_s(sname, 7, "abcdef");
+ slen = 10;
+ strcpy_s(sval, 10, "123456789");
+ eoo = EOO;
+ }
+ unsigned totsize;
+ char s;
+ char sname[7];
+ unsigned slen;
+ char sval[10];
+ char eoo;
+ } js2;
+
+ struct JSUnitTest : public UnitTest {
+ void run() {
+
+ BSONObj j1((const char *) &js1);
+ BSONObj j2((const char *) &js2);
+ Matcher m(j2);
+ assert( m.matches(j1) );
+ js2.sval[0] = 'z';
+ assert( !m.matches(j1) );
+ Matcher n(j1);
+ assert( n.matches(j1) );
+ assert( !n.matches(j2) );
+
+ BSONObj j0 = BSONObj();
+// BSONObj j0((const char *) &js0);
+ Matcher p(j0);
+ assert( p.matches(j1) );
+ assert( p.matches(j2) );
+ }
+ } jsunittest;
+
+#pragma pack()
+
+ struct RXTest : public UnitTest {
+
+ RXTest() {
+ }
+
+ void run() {
+ /*
+ static const boost::regex e("(\\d{4}[- ]){3}\\d{4}");
+ static const boost::regex b(".....");
+ out() << "regex result: " << regex_match("hello", e) << endl;
+ out() << "regex result: " << regex_match("abcoo", b) << endl;
+ */
+
+ int ret = 0;
+
+ pcre_config( PCRE_CONFIG_UTF8 , &ret );
+ massert( 10342 , "pcre not compiled with utf8 support" , ret );
+
+ pcrecpp::RE re1(")({a}h.*o");
+ pcrecpp::RE re("h.llo");
+ assert( re.FullMatch("hello") );
+ assert( !re1.FullMatch("hello") );
+
+
+ pcrecpp::RE_Options options;
+ options.set_utf8(true);
+ pcrecpp::RE part("dwi", options);
+ assert( part.PartialMatch("dwight") );
+
+ pcre_config( PCRE_CONFIG_UNICODE_PROPERTIES , &ret );
+ if ( ! ret )
+ cout << "warning: some regex utf8 things will not work. pcre build doesn't have --enable-unicode-properties" << endl;
+
+ }
+ } rxtest;
+
+} // namespace mongo
diff --git a/src/mongo/db/matcher.h b/src/mongo/db/matcher.h
new file mode 100644
index 00000000000..b6994a79229
--- /dev/null
+++ b/src/mongo/db/matcher.h
@@ -0,0 +1,276 @@
+// matcher.h
+
+/* Matcher is our boolean expression evaluator for "where" clauses */
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "jsobj.h"
+#include "pcrecpp.h"
+
+namespace mongo {
+
+ class Cursor;
+ class CoveredIndexMatcher;
+ class Matcher;
+ class FieldRangeVector;
+
+ class RegexMatcher {
+ public:
+ const char *_fieldName;
+ const char *_regex;
+ const char *_flags;
+ string _prefix;
+ shared_ptr< pcrecpp::RE > _re;
+ bool _isNot;
+ RegexMatcher() : _isNot() {}
+ };
+
+ struct element_lt {
+ bool operator()(const BSONElement& l, const BSONElement& r) const {
+ int x = (int) l.canonicalType() - (int) r.canonicalType();
+ if ( x < 0 ) return true;
+ else if ( x > 0 ) return false;
+ return compareElementValues(l,r) < 0;
+ }
+ };
+
+
+ class ElementMatcher {
+ public:
+
+ ElementMatcher() {
+ }
+
+ ElementMatcher( BSONElement e , int op, bool isNot );
+
+ ElementMatcher( BSONElement e , int op , const BSONObj& array, bool isNot );
+
+ ~ElementMatcher() { }
+
+ bool negativeCompareOp() const { return _compareOp == BSONObj::NE || _compareOp == BSONObj::NIN; }
+ int inverseOfNegativeCompareOp() const;
+ bool negativeCompareOpContainsNull() const;
+
+ BSONElement _toMatch;
+ int _compareOp;
+ bool _isNot;
+ shared_ptr< set<BSONElement,element_lt> > _myset;
+ shared_ptr< vector<RegexMatcher> > _myregex;
+
+ // these are for specific operators
+ int _mod;
+ int _modm;
+ BSONType _type;
+
+ shared_ptr<Matcher> _subMatcher;
+ bool _subMatcherOnPrimitives ;
+
+ vector< shared_ptr<Matcher> > _allMatchers;
+ };
+
+ class Where; // used for $where javascript eval
+ class DiskLoc;
+
+ struct MatchDetails {
+ MatchDetails() {
+ reset();
+ }
+
+ void reset() {
+ _loadedObject = false;
+ _elemMatchKey = 0;
+ }
+
+ string toString() const {
+ stringstream ss;
+ ss << "loadedObject: " << _loadedObject << " ";
+ ss << "elemMatchKey: " << ( _elemMatchKey ? _elemMatchKey : "NULL" ) << " ";
+ return ss.str();
+ }
+
+ bool _loadedObject;
+ const char * _elemMatchKey; // warning, this may go out of scope if matched object does
+ };
+
+ /* Match BSON objects against a query pattern.
+
+ e.g.
+ db.foo.find( { a : 3 } );
+
+ { a : 3 } is the pattern object. See wiki documentation for full info.
+
+ GT/LT:
+ { a : { $gt : 3 } }
+ Not equal:
+ { a : { $ne : 3 } }
+
+ TODO: we should rewrite the matcher to be more an AST style.
+ */
+ class Matcher : boost::noncopyable {
+ int matchesDotted(
+ const char *fieldName,
+ const BSONElement& toMatch, const BSONObj& obj,
+ int compareOp, const ElementMatcher& bm, bool isArr , MatchDetails * details ) const;
+
+ /**
+ * Perform a NE or NIN match by returning the inverse of the opposite matching operation.
+ * Missing values are considered matches unless the match must not equal null.
+ */
+ int inverseMatch(
+ const char *fieldName,
+ const BSONElement &toMatch, const BSONObj &obj,
+ const ElementMatcher&bm, MatchDetails * details ) const;
+
+ public:
+ static int opDirection(int op) {
+ return op <= BSONObj::LTE ? -1 : 1;
+ }
+
+ Matcher(const BSONObj &pattern, bool nested=false);
+
+ ~Matcher();
+
+ bool matches(const BSONObj& j, MatchDetails * details = 0 ) const;
+
+ bool atomic() const { return _atomic; }
+
+ string toString() const {
+ return _jsobj.toString();
+ }
+
+ void addOrDedupConstraint( const shared_ptr< FieldRangeVector > &frv ) {
+ _orDedupConstraints.push_back( frv );
+ }
+
+ void popOrClause() {
+ _orMatchers.pop_front();
+ }
+
+ /**
+ * @return true if this key matcher will return the same true/false
+ * value as the provided doc matcher.
+ */
+ bool keyMatch( const Matcher &docMatcher ) const;
+
+ bool singleSimpleCriterion() const {
+ return false; // TODO SERVER-958
+// // TODO Really check, especially if all basics are ok.
+// // $all, etc
+// // _orConstraints?
+// return ( ( basics.size() + nRegex ) < 2 ) && !where && !_orMatchers.size() && !_norMatchers.size();
+ }
+
+ const BSONObj *getQuery() const { return &_jsobj; };
+
+ private:
+ /**
+ * Generate a matcher for the provided index key format using the
+ * provided full doc matcher.
+ */
+ Matcher( const Matcher &docMatcher, const BSONObj &constrainIndexKey );
+
+ void addBasic(const BSONElement &e, int c, bool isNot) {
+ // TODO May want to selectively ignore these element types based on op type.
+ if ( e.type() == MinKey || e.type() == MaxKey )
+ return;
+ _basics.push_back( ElementMatcher( e , c, isNot ) );
+ }
+
+ void addRegex(const char *fieldName, const char *regex, const char *flags, bool isNot = false);
+ bool addOp( const BSONElement &e, const BSONElement &fe, bool isNot, const char *& regex, const char *&flags );
+
+ int valuesMatch(const BSONElement& l, const BSONElement& r, int op, const ElementMatcher& bm) const;
+
+ bool parseClause( const BSONElement &e );
+ void parseExtractedClause( const BSONElement &e, list< shared_ptr< Matcher > > &matchers );
+
+ void parseWhere( const BSONElement &e );
+ void parseMatchExpressionElement( const BSONElement &e, bool nested );
+
+ Where *_where; // set if query uses $where
+ BSONObj _jsobj; // the query pattern. e.g., { name: "joe" }
+ BSONObj _constrainIndexKey;
+ vector<ElementMatcher> _basics;
+ bool _haveSize;
+ bool _all;
+ bool _hasArray;
+ bool _haveNeg;
+
+ /* $atomic - if true, a multi document operation (some removes, updates)
+ should be done atomically. in that case, we do not yield -
+ i.e. we stay locked the whole time.
+ http://www.mongodb.org/display/DOCS/Removing[
+ */
+ bool _atomic;
+
+ vector<RegexMatcher> _regexs;
+
+ // so we delete the mem when we're done:
+ vector< shared_ptr< BSONObjBuilder > > _builders;
+ list< shared_ptr< Matcher > > _andMatchers;
+ list< shared_ptr< Matcher > > _orMatchers;
+ list< shared_ptr< Matcher > > _norMatchers;
+ vector< shared_ptr< FieldRangeVector > > _orDedupConstraints;
+
+ friend class CoveredIndexMatcher;
+ };
+
+ // If match succeeds on index key, then attempt to match full document.
+ class CoveredIndexMatcher : boost::noncopyable {
+ public:
+ CoveredIndexMatcher(const BSONObj &pattern, const BSONObj &indexKeyPattern , bool alwaysUseRecord=false );
+ bool matches(const BSONObj &o) { return _docMatcher->matches( o ); }
+ bool matchesWithSingleKeyIndex(const BSONObj &key, const DiskLoc &recLoc , MatchDetails * details = 0 ) {
+ return matches( key, recLoc, details, true );
+ }
+ /**
+ * This is the preferred method for matching against a cursor, as it
+ * can handle both multi and single key cursors.
+ */
+ bool matchesCurrent( Cursor * cursor , MatchDetails * details = 0 );
+ bool needRecord() { return _needRecord; }
+
+ Matcher& docMatcher() { return *_docMatcher; }
+
+ // once this is called, shouldn't use this matcher for matching any more
+ void advanceOrClause( const shared_ptr< FieldRangeVector > &frv ) {
+ _docMatcher->addOrDedupConstraint( frv );
+ // TODO this is not yet optimal. Since we could skip an entire
+ // or clause (if a match is impossible) between calls to advanceOrClause()
+ // we may not pop all the clauses we can.
+ _docMatcher->popOrClause();
+ }
+
+ CoveredIndexMatcher *nextClauseMatcher( const BSONObj &indexKeyPattern, bool alwaysUseRecord=false ) {
+ return new CoveredIndexMatcher( _docMatcher, indexKeyPattern, alwaysUseRecord );
+ }
+
+ string toString() const;
+
+ private:
+ bool matches(const BSONObj &key, const DiskLoc &recLoc , MatchDetails * details = 0 , bool keyUsable = true );
+ CoveredIndexMatcher(const shared_ptr< Matcher > &docMatcher, const BSONObj &indexKeyPattern , bool alwaysUseRecord=false );
+ void init( bool alwaysUseRecord );
+ shared_ptr< Matcher > _docMatcher;
+ Matcher _keyMatcher;
+
+ bool _needRecord; // if the key itself isn't good enough to determine a positive match
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/matcher_covered.cpp b/src/mongo/db/matcher_covered.cpp
new file mode 100644
index 00000000000..c6c89d03007
--- /dev/null
+++ b/src/mongo/db/matcher_covered.cpp
@@ -0,0 +1,101 @@
+// matcher_covered.cpp
+
+/* Matcher is our boolean expression evaluator for "where" clauses */
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "matcher.h"
+#include "../util/goodies.h"
+#include "../util/unittest.h"
+#include "diskloc.h"
+#include "../scripting/engine.h"
+#include "db.h"
+#include "client.h"
+
+#include "pdfile.h"
+
+namespace mongo {
+
+ CoveredIndexMatcher::CoveredIndexMatcher( const BSONObj &jsobj, const BSONObj &indexKeyPattern, bool alwaysUseRecord) :
+ _docMatcher( new Matcher( jsobj ) ),
+ _keyMatcher( *_docMatcher, indexKeyPattern ) {
+ init( alwaysUseRecord );
+ }
+
+ CoveredIndexMatcher::CoveredIndexMatcher( const shared_ptr< Matcher > &docMatcher, const BSONObj &indexKeyPattern , bool alwaysUseRecord ) :
+ _docMatcher( docMatcher ),
+ _keyMatcher( *_docMatcher, indexKeyPattern ) {
+ init( alwaysUseRecord );
+ }
+
+ void CoveredIndexMatcher::init( bool alwaysUseRecord ) {
+ _needRecord =
+ alwaysUseRecord ||
+ !_keyMatcher.keyMatch( *_docMatcher );
+ }
+
+ bool CoveredIndexMatcher::matchesCurrent( Cursor * cursor , MatchDetails * details ) {
+ // bool keyUsable = ! cursor->isMultiKey() && check for $orish like conditions in matcher SERVER-1264
+ return matches( cursor->currKey() , cursor->currLoc() , details ,
+ !cursor->indexKeyPattern().isEmpty() // unindexed cursor
+ && !cursor->isMultiKey() // multikey cursor
+ );
+ }
+
+ bool CoveredIndexMatcher::matches(const BSONObj &key, const DiskLoc &recLoc , MatchDetails * details , bool keyUsable ) {
+
+ LOG(5) << "CoveredIndexMatcher::matches() " << key.toString() << ' ' << recLoc.toString() << ' ' << keyUsable << endl;
+
+ dassert( key.isValid() );
+
+ if ( details )
+ details->reset();
+
+ if ( keyUsable ) {
+ if ( !_keyMatcher.matches(key, details ) ) {
+ return false;
+ }
+ if ( ! _needRecord ) {
+ return true;
+ }
+ }
+
+ if ( details )
+ details->_loadedObject = true;
+
+ bool res = _docMatcher->matches(recLoc.obj() , details );
+ LOG(5) << "CoveredIndexMatcher _docMatcher->matches() returns " << res << endl;
+ return res;
+ }
+
+ string CoveredIndexMatcher::toString() const {
+ StringBuilder buf;
+ buf << "(CoveredIndexMatcher ";
+
+ if ( _needRecord )
+ buf << "needRecord ";
+
+ buf << "keyMatcher: " << _keyMatcher.toString() << " ";
+
+ if ( _docMatcher )
+ buf << "docMatcher: " << _docMatcher->toString() << " ";
+
+ buf << ")";
+ return buf.str();
+ }
+}
diff --git a/src/mongo/db/minilex.h b/src/mongo/db/minilex.h
new file mode 100644
index 00000000000..677514aa47c
--- /dev/null
+++ b/src/mongo/db/minilex.h
@@ -0,0 +1,164 @@
+// minilex.h
+// mini js lexical analyzer. idea is to be dumb and fast.
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#error does anything use this?
+
+namespace mongo {
+
+#if defined(_WIN32)
+
+} // namespace mongo
+
+#include <hash_map>
+using namespace stdext;
+
+namespace mongo {
+
+ typedef const char * MyStr;
+ struct less_str {
+ bool operator()(const MyStr & x, const MyStr & y) const {
+ if ( strcmp(x, y) > 0)
+ return true;
+
+ return false;
+ }
+ };
+
+ typedef hash_map<const char*, int, hash_compare<const char *, less_str> > strhashmap;
+
+#else
+
+} // namespace mongo
+
+#include <ext/hash_map>
+
+namespace mongo {
+
+ using namespace __gnu_cxx;
+
+ typedef const char * MyStr;
+ struct eq_str {
+ bool operator()(const MyStr & x, const MyStr & y) const {
+ if ( strcmp(x, y) == 0)
+ return true;
+
+ return false;
+ }
+ };
+
+ typedef hash_map<const char*, int, hash<const char *>, eq_str > strhashmap;
+
+#endif
+
+ /*
+ struct MiniLexNotUsed {
+ strhashmap reserved;
+ bool ic[256]; // ic=Identifier Character
+ bool starter[256];
+
+ // dm: very dumb about comments and escaped quotes -- but we are faster then at least,
+ // albeit returning too much (which is ok for jsbobj current usage).
+ void grabVariables(char *code , strhashmap& vars) { // 'code' modified and must stay in scope*/
+ char *p = code;
+ char last = 0;
+ while ( *p ) {
+ if ( starter[*p] ) {
+ char *q = p+1;
+ while ( *q && ic[*q] ) q++;
+ const char *identifier = p;
+ bool done = *q == 0;
+ *q = 0;
+ if ( !reserved.count(identifier) ) {
+ // we try to be smart about 'obj' but have to be careful as obj.obj
+ // can happen; this is so that nFields is right for simplistic where cases
+ // so we can stop scanning in jsobj when we find the field of interest.
+ if ( strcmp(identifier,"obj")==0 && p>code && p[-1] != '.' )
+ ;
+ else
+ vars[identifier] = 1;
+ }
+ if ( done )
+ break;
+ p = q + 1;
+ continue;
+ }
+
+ if ( *p == '\'' ) {
+ p++;
+ while ( *p && *p != '\'' ) p++;
+ }
+ else if ( *p == '"' ) {
+ p++;
+ while ( *p && *p != '"' ) p++;
+ }
+ p++;
+ }
+}
+
+MiniLex() {
+ strhashmap atest;
+ atest["foo"] = 3;
+ assert( atest.count("bar") == 0 );
+ assert( atest.count("foo") == 1 );
+ assert( atest["foo"] == 3 );
+
+ for ( int i = 0; i < 256; i++ ) {
+ ic[i] = starter[i] = false;
+ }
+ for ( int i = 'a'; i <= 'z'; i++ )
+ ic[i] = starter[i] = true;
+ for ( int i = 'A'; i <= 'Z'; i++ )
+ ic[i] = starter[i] = true;
+ for ( int i = '0'; i <= '9'; i++ )
+ ic[i] = true;
+ for ( int i = 128; i < 256; i++ )
+ ic[i] = starter[i] = true;
+ ic['$'] = starter['$'] = true;
+ ic['_'] = starter['_'] = true;
+
+ reserved["break"] = true;
+ reserved["case"] = true;
+ reserved["catch"] = true;
+ reserved["continue"] = true;
+ reserved["default"] = true;
+ reserved["delete"] = true;
+ reserved["do"] = true;
+ reserved["else"] = true;
+ reserved["finally"] = true;
+ reserved["for"] = true;
+ reserved["function"] = true;
+ reserved["if"] = true;
+ reserved["in"] = true;
+ reserved["instanceof"] = true;
+ reserved["new"] = true;
+ reserved["return"] = true;
+ reserved["switch"] = true;
+ reserved["this"] = true;
+ reserved["throw"] = true;
+ reserved["try"] = true;
+ reserved["typeof"] = true;
+ reserved["var"] = true;
+ reserved["void"] = true;
+ reserved["while"] = true;
+ reserved["with "] = true;
+}
+};
+*/
+
+} // namespace mongo
diff --git a/src/mongo/db/module.cpp b/src/mongo/db/module.cpp
new file mode 100644
index 00000000000..4269c5e99a0
--- /dev/null
+++ b/src/mongo/db/module.cpp
@@ -0,0 +1,68 @@
+// module.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "pch.h"
+#include "module.h"
+
+namespace mongo {
+
+ std::list<Module*> * Module::_all;
+
+ Module::Module( const string& name )
+ : _name( name ) , _options( (string)"Module " + name + " options" ) {
+ if ( ! _all )
+ _all = new list<Module*>();
+ _all->push_back( this );
+ }
+
+ Module::~Module() {}
+
+ void Module::addOptions( boost::program_options::options_description& options ) {
+ if ( ! _all ) {
+ return;
+ }
+ for ( list<Module*>::iterator i=_all->begin(); i!=_all->end(); i++ ) {
+ Module* m = *i;
+ options.add( m->_options );
+ }
+ }
+
+ void Module::configAll( boost::program_options::variables_map& params ) {
+ if ( ! _all ) {
+ return;
+ }
+ for ( list<Module*>::iterator i=_all->begin(); i!=_all->end(); i++ ) {
+ Module* m = *i;
+ m->config( params );
+ }
+
+ }
+
+
+ void Module::initAll() {
+ if ( ! _all ) {
+ return;
+ }
+ for ( list<Module*>::iterator i=_all->begin(); i!=_all->end(); i++ ) {
+ Module* m = *i;
+ m->init();
+ }
+
+ }
+
+}
diff --git a/src/mongo/db/module.h b/src/mongo/db/module.h
new file mode 100644
index 00000000000..71f276e0585
--- /dev/null
+++ b/src/mongo/db/module.h
@@ -0,0 +1,70 @@
+// module.h
+
+/**
+* Copyright (C) 2008 10gen Inc.info
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include <boost/program_options.hpp>
+#include <list>
+
+namespace mongo {
+
+ /**
+ * Module is the base class for adding modules to MongoDB
+ * modules allow adding hooks and features to mongo
+ * the idea is to add hooks into the main code for module support where needed
+ * some ideas are: monitoring, indexes, full text search
+ */
+ class Module {
+ public:
+ Module( const string& name );
+ virtual ~Module();
+
+ boost::program_options::options_description_easy_init add_options() {
+ return _options.add_options();
+ }
+
+ /**
+ * read config from command line
+ */
+ virtual void config( boost::program_options::variables_map& params ) = 0;
+
+ /**
+ * called after configuration when the server is ready start
+ */
+ virtual void init() = 0;
+
+ /**
+ * called when the database is about to shutdown
+ */
+ virtual void shutdown() = 0;
+
+ const string& getName() { return _name; }
+
+ // --- static things
+
+ static void addOptions( boost::program_options::options_description& options );
+ static void configAll( boost::program_options::variables_map& params );
+ static void initAll();
+
+ private:
+ static std::list<Module*> * _all;
+ string _name;
+ boost::program_options::options_description _options;
+ };
+}
diff --git a/src/mongo/db/modules/mms.cpp b/src/mongo/db/modules/mms.cpp
new file mode 100644
index 00000000000..418a553f283
--- /dev/null
+++ b/src/mongo/db/modules/mms.cpp
@@ -0,0 +1,170 @@
+// @file mms.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "pch.h"
+#include "../db.h"
+#include "../instance.h"
+#include "../module.h"
+#include "../../util/net/httpclient.h"
+#include "../../util/background.h"
+#include "../commands.h"
+
+namespace po = boost::program_options;
+
+namespace mongo {
+
+ /** Mongo Monitoring Service
+ if enabled, this runs in the background ands pings mss
+ */
+ class MMS : public BackgroundJob , Module {
+ public:
+
+ MMS()
+ : Module( "mms" ) , _baseurl( "" ) ,
+ _secsToSleep(1) , _token( "" ) , _name( "" ) {
+
+ add_options()
+ ( "mms-url" , po::value<string>()->default_value("http://mms.10gen.com/ping") , "url for mongo monitoring server" )
+ ( "mms-token" , po::value<string>() , "account token for mongo monitoring server" )
+ ( "mms-name" , po::value<string>() , "server name for mongo monitoring server" )
+ ( "mms-interval" , po::value<int>()->default_value(30) , "ping interval (in seconds) for mongo monitoring server" )
+ ;
+ }
+
+ ~MMS() {}
+
+ void config( boost::program_options::variables_map& params ) {
+ _baseurl = params["mms-url"].as<string>();
+ if ( params.count( "mms-token" ) ) {
+ _token = params["mms-token"].as<string>();
+ }
+ if ( params.count( "mms-name" ) ) {
+ _name = params["mms-name"].as<string>();
+ }
+ _secsToSleep = params["mms-interval"].as<int>();
+ }
+
+ void run() {
+ if ( _token.size() == 0 && _name.size() == 0 ) {
+ log(1) << "mms not configured" << endl;
+ return;
+ }
+
+ if ( _token.size() == 0 ) {
+ log() << "no token for mms - not running" << endl;
+ return;
+ }
+
+ if ( _name.size() == 0 ) {
+ log() << "no name for mms - not running" << endl;
+ return;
+ }
+
+ log() << "mms monitor staring... token:" << _token << " name:" << _name << " interval: " << _secsToSleep << endl;
+ Client::initThread( "mms" );
+ Client& c = cc();
+
+
+ // TODO: using direct client is bad, but easy for now
+
+ while ( ! inShutdown() ) {
+ sleepsecs( _secsToSleep );
+
+ try {
+ stringstream url;
+ url << _baseurl << "?"
+ << "token=" << _token << "&"
+ << "name=" << _name << "&"
+ << "ts=" << time(0)
+ ;
+
+ BSONObjBuilder bb;
+ // duplicated so the post has everything
+ bb.append( "token" , _token );
+ bb.append( "name" , _name );
+ bb.appendDate( "ts" , jsTime() );
+
+ // any commands
+ _add( bb , "buildinfo" );
+ _add( bb , "serverStatus" );
+
+ BSONObj postData = bb.obj();
+
+ log(1) << "mms url: " << url.str() << "\n\t post: " << postData << endl;;
+
+ HttpClient c;
+ HttpClient::Result r;
+ int rc = c.post( url.str() , postData.jsonString() , &r );
+ log(1) << "\t response code: " << rc << endl;
+ if ( rc != 200 ) {
+ log() << "mms error response code:" << rc << endl;
+ log(1) << "mms error body:" << r.getEntireResponse() << endl;
+ }
+ }
+ catch ( std::exception& e ) {
+ log() << "mms exception: " << e.what() << endl;
+ }
+ }
+
+ c.shutdown();
+ }
+
+ void _add( BSONObjBuilder& postData , const char* cmd ) {
+ Command * c = Command::findCommand( cmd );
+ if ( ! c ) {
+ log() << "MMS can't find command: " << cmd << endl;
+ postData.append( cmd , "can't find command" );
+ return;
+ }
+
+ if ( c->locktype() ) {
+ log() << "MMS can only use noLocking commands not: " << cmd << endl;
+ postData.append( cmd , "not noLocking" );
+ return;
+ }
+
+ BSONObj co = BSON( cmd << 1 );
+
+ string errmsg;
+ BSONObjBuilder sub;
+ if ( ! c->run( "admin.$cmd" , co , 0 , errmsg , sub , false ) )
+ postData.append( cmd , errmsg );
+ else
+ postData.append( cmd , sub.obj() );
+ }
+
+
+ void init() { go(); }
+
+ void shutdown() {
+ // TODO
+ }
+
+ private:
+ string _baseurl;
+ int _secsToSleep;
+
+ string _token;
+ string _name;
+
+ } /*mms*/ ;
+
+}
+
+
+
diff --git a/src/mongo/db/mongo.ico b/src/mongo/db/mongo.ico
new file mode 100755
index 00000000000..5258b6e0446
--- /dev/null
+++ b/src/mongo/db/mongo.ico
Binary files differ
diff --git a/src/mongo/db/mongommf.cpp b/src/mongo/db/mongommf.cpp
new file mode 100644
index 00000000000..af2e822404e
--- /dev/null
+++ b/src/mongo/db/mongommf.cpp
@@ -0,0 +1,339 @@
+// @file mongommf.cpp
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* this module adds some of our layers atop memory mapped files - specifically our handling of private views & such
+ if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile class, not this.
+*/
+
+#include "pch.h"
+#include "cmdline.h"
+#include "mongommf.h"
+#include "dur.h"
+#include "dur_journalformat.h"
+#include "../util/mongoutils/str.h"
+#include "mongomutex.h"
+#include "d_globals.h"
+
+using namespace mongoutils;
+
+namespace mongo {
+
+#if defined(_WIN32)
+ extern mutex mapViewMutex;
+
+ __declspec(noinline) void makeChunkWritable(size_t chunkno) {
+ scoped_lock lk(mapViewMutex);
+
+ if( writable.get(chunkno) ) // double check lock
+ return;
+
+ // remap all maps in this chunk. common case is a single map, but could have more than one with smallfiles or .ns files
+ size_t chunkStart = chunkno * MemoryMappedFile::ChunkSize;
+ size_t chunkNext = chunkStart + MemoryMappedFile::ChunkSize;
+
+ scoped_lock lk2(privateViews._mutex());
+ map<void*,MongoMMF*>::iterator i = privateViews.finditer_inlock((void*) (chunkNext-1));
+ while( 1 ) {
+ const pair<void*,MongoMMF*> x = *(--i);
+ MongoMMF *mmf = x.second;
+ if( mmf == 0 )
+ break;
+
+ size_t viewStart = (size_t) x.first;
+ size_t viewEnd = (size_t) (viewStart + mmf->length());
+ if( viewEnd <= chunkStart )
+ break;
+
+ size_t protectStart = max(viewStart, chunkStart);
+ dassert(protectStart<chunkNext);
+
+ size_t protectEnd = min(viewEnd, chunkNext);
+ size_t protectSize = protectEnd - protectStart;
+ dassert(protectSize>0&&protectSize<=MemoryMappedFile::ChunkSize);
+
+ DWORD old;
+ bool ok = VirtualProtect((void*)protectStart, protectSize, PAGE_WRITECOPY, &old);
+ if( !ok ) {
+ DWORD e = GetLastError();
+ log() << "VirtualProtect failed (mcw) " << mmf->filename() << ' ' << chunkno << hex << protectStart << ' ' << protectSize << ' ' << errnoWithDescription(e) << endl;
+ assert(false);
+ }
+ }
+
+ writable.set(chunkno);
+ }
+
+ void* MemoryMappedFile::createPrivateMap() {
+ assert( maphandle );
+ scoped_lock lk(mapViewMutex);
+ void *p = MapViewOfFile(maphandle, FILE_MAP_READ, 0, 0, 0);
+ if ( p == 0 ) {
+ DWORD e = GetLastError();
+ log() << "createPrivateMap failed " << filename() << " " <<
+ errnoWithDescription(e) << " filelen:" << len <<
+ ((sizeof(void*) == 4 ) ? " (32 bit build)" : "") <<
+ endl;
+ }
+ else {
+ clearWritableBits(p);
+ views.push_back(p);
+ }
+ return p;
+ }
+
+ void* MemoryMappedFile::remapPrivateView(void *oldPrivateAddr) {
+ d.dbMutex.assertWriteLocked(); // short window where we are unmapped so must be exclusive
+
+ // the mapViewMutex is to assure we get the same address on the remap
+ scoped_lock lk(mapViewMutex);
+
+ clearWritableBits(oldPrivateAddr);
+#if 1
+ // https://jira.mongodb.org/browse/SERVER-2942
+ DWORD old;
+ bool ok = VirtualProtect(oldPrivateAddr, (SIZE_T) len, PAGE_READONLY, &old);
+ if( !ok ) {
+ DWORD e = GetLastError();
+ log() << "VirtualProtect failed in remapPrivateView " << filename() << hex << oldPrivateAddr << ' ' << len << ' ' << errnoWithDescription(e) << endl;
+ assert(false);
+ }
+ return oldPrivateAddr;
+#else
+ if( !UnmapViewOfFile(oldPrivateAddr) ) {
+ DWORD e = GetLastError();
+ log() << "UnMapViewOfFile failed " << filename() << ' ' << errnoWithDescription(e) << endl;
+ assert(false);
+ }
+
+ // we want the new address to be the same as the old address in case things keep pointers around (as namespaceindex does).
+ void *p = MapViewOfFileEx(maphandle, FILE_MAP_READ, 0, 0,
+ /*dwNumberOfBytesToMap 0 means to eof*/0 /*len*/,
+ oldPrivateAddr);
+
+ if ( p == 0 ) {
+ DWORD e = GetLastError();
+ log() << "MapViewOfFileEx failed " << filename() << " " << errnoWithDescription(e) << endl;
+ assert(p);
+ }
+ assert(p == oldPrivateAddr);
+ return p;
+#endif
+ }
+#endif
+
+ void MongoMMF::remapThePrivateView() {
+ assert( cmdLine.dur );
+
+ // todo 1.9 : it turns out we require that we always remap to the same address.
+ // so the remove / add isn't necessary and can be removed
+ privateViews.remove(_view_private);
+ _view_private = remapPrivateView(_view_private);
+ privateViews.add(_view_private, this);
+ }
+
+ /** register view. threadsafe */
+ void PointerToMMF::add(void *view, MongoMMF *f) {
+ assert(view);
+ assert(f);
+ mutex::scoped_lock lk(_m);
+ _views.insert( pair<void*,MongoMMF*>(view,f) );
+ }
+
+ /** de-register view. threadsafe */
+ void PointerToMMF::remove(void *view) {
+ if( view ) {
+ mutex::scoped_lock lk(_m);
+ _views.erase(view);
+ }
+ }
+
+ PointerToMMF::PointerToMMF() : _m("PointerToMMF") {
+#if defined(SIZE_MAX)
+ size_t max = SIZE_MAX;
+#else
+ size_t max = ~((size_t)0);
+#endif
+ assert( max > (size_t) this ); // just checking that no one redef'd SIZE_MAX and that it is sane
+
+ // this way we don't need any boundary checking in _find()
+ _views.insert( pair<void*,MongoMMF*>((void*)0,(MongoMMF*)0) );
+ _views.insert( pair<void*,MongoMMF*>((void*)max,(MongoMMF*)0) );
+ }
+
+ /** underscore version of find is for when you are already locked
+ @param ofs out return our offset in the view
+ @return the MongoMMF to which this pointer belongs
+ */
+ MongoMMF* PointerToMMF::find_inlock(void *p, /*out*/ size_t& ofs) {
+ //
+ // .................memory..........................
+ // v1 p v2
+ // [--------------------] [-------]
+ //
+ // e.g., _find(p) == v1
+ //
+ const pair<void*,MongoMMF*> x = *(--_views.upper_bound(p));
+ MongoMMF *mmf = x.second;
+ if( mmf ) {
+ size_t o = ((char *)p) - ((char*)x.first);
+ if( o < mmf->length() ) {
+ ofs = o;
+ return mmf;
+ }
+ }
+ return 0;
+ }
+
+ /** find associated MMF object for a given pointer.
+ threadsafe
+ @param ofs out returns offset into the view of the pointer, if found.
+ @return the MongoMMF to which this pointer belongs. null if not found.
+ */
+ MongoMMF* PointerToMMF::find(void *p, /*out*/ size_t& ofs) {
+ mutex::scoped_lock lk(_m);
+ return find_inlock(p, ofs);
+ }
+
+ PointerToMMF privateViews;
+
+ /* void* MongoMMF::switchToPrivateView(void *readonly_ptr) {
+ assert( cmdLine.dur );
+ assert( testIntent );
+
+ void *p = readonly_ptr;
+
+ {
+ size_t ofs=0;
+ MongoMMF *mmf = ourReadViews.find(p, ofs);
+ if( mmf ) {
+ void *res = ((char *)mmf->_view_private) + ofs;
+ return res;
+ }
+ }
+
+ {
+ size_t ofs=0;
+ MongoMMF *mmf = privateViews.find(p, ofs);
+ if( mmf ) {
+ log() << "dur: perf warning p=" << p << " is already in the writable view of " << mmf->filename() << endl;
+ return p;
+ }
+ }
+
+ // did you call writing() with a pointer that isn't into a datafile?
+ log() << "dur error switchToPrivateView " << p << endl;
+ return p;
+ }*/
+
+ /* switch to _view_write. normally, this is a bad idea since your changes will not
+ show up in _view_private if there have been changes there; thus the leading underscore
+ as a tad of a "warning". but useful when done with some care, such as during
+ initialization.
+ */
+ void* MongoMMF::_switchToWritableView(void *p) {
+ size_t ofs;
+ MongoMMF *f = privateViews.find(p, ofs);
+ assert( f );
+ return (((char *)f->_view_write)+ofs);
+ }
+
+ extern string dbpath;
+
+ // here so that it is precomputed...
+ void MongoMMF::setPath(string f) {
+ string suffix;
+ string prefix;
+ bool ok = str::rSplitOn(f, '.', prefix, suffix);
+ uassert(13520, str::stream() << "MongoMMF only supports filenames in a certain format " << f, ok);
+ if( suffix == "ns" )
+ _fileSuffixNo = dur::JEntry::DotNsSuffix;
+ else
+ _fileSuffixNo = (int) str::toUnsigned(suffix);
+
+ _p = RelativePath::fromFullPath(prefix);
+ }
+
+ bool MongoMMF::open(string fname, bool sequentialHint) {
+ LOG(3) << "mmf open " << fname << endl;
+ setPath(fname);
+ _view_write = mapWithOptions(fname.c_str(), sequentialHint ? SEQUENTIAL : 0);
+ return finishOpening();
+ }
+
+ bool MongoMMF::create(string fname, unsigned long long& len, bool sequentialHint) {
+ LOG(3) << "mmf create " << fname << endl;
+ setPath(fname);
+ _view_write = map(fname.c_str(), len, sequentialHint ? SEQUENTIAL : 0);
+ return finishOpening();
+ }
+
+ bool MongoMMF::finishOpening() {
+ LOG(3) << "mmf finishOpening " << (void*) _view_write << ' ' << filename() << " len:" << length() << endl;
+ if( _view_write ) {
+ if( cmdLine.dur ) {
+ _view_private = createPrivateMap();
+ if( _view_private == 0 ) {
+ msgasserted(13636, str::stream() << "file " << filename() << " open/create failed in createPrivateMap (look in log for more information)");
+ }
+ privateViews.add(_view_private, this); // note that testIntent builds use this, even though it points to view_write then...
+ }
+ else {
+ _view_private = _view_write;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ MongoMMF::MongoMMF() : _willNeedRemap(false) {
+ _view_write = _view_private = 0;
+ }
+
+ MongoMMF::~MongoMMF() {
+ try {
+ close();
+ }
+ catch(...) { error() << "exception in ~MongoMMF" << endl; }
+ }
+
+ namespace dur {
+ void closingFileNotification();
+ }
+
+ /*virtual*/ void MongoMMF::close() {
+ LOG(3) << "mmf close " << filename() << endl;
+
+ if( view_write() /*actually was opened*/ ) {
+ if( cmdLine.dur ) {
+ dur::closingFileNotification();
+ }
+ if( !d.dbMutex.isWriteLocked() ) {
+ assert( inShutdown() );
+ DEV {
+ log() << "is it really ok to close a mongommf outside a write lock? dbmutex status:" << d.dbMutex.getState() << " file:" << filename() << endl;
+ }
+ }
+ }
+
+ LockMongoFilesExclusive lk;
+ privateViews.remove(_view_private);
+ _view_write = _view_private = 0;
+ MemoryMappedFile::close();
+ }
+
+}
diff --git a/src/mongo/db/mongommf.h b/src/mongo/db/mongommf.h
new file mode 100644
index 00000000000..62a6cdfd3fd
--- /dev/null
+++ b/src/mongo/db/mongommf.h
@@ -0,0 +1,145 @@
+/** @file mongommf.h
+*
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../util/mmap.h"
+#include "../util/paths.h"
+
+namespace mongo {
+
+ /** MongoMMF adds some layers atop memory mapped files - specifically our handling of private views & such.
+ if you don't care about journaling/durability (temp sort files & such) use MemoryMappedFile class,
+ not this.
+ */
+ class MongoMMF : private MemoryMappedFile {
+ protected:
+ virtual void* viewForFlushing() { return _view_write; }
+
+ public:
+ MongoMMF();
+ virtual ~MongoMMF();
+ virtual void close();
+
+ /** @return true if opened ok. */
+ bool open(string fname, bool sequentialHint /*typically we open with this false*/);
+
+ /** @return file length */
+ unsigned long long length() const { return MemoryMappedFile::length(); }
+
+ string filename() const { return MemoryMappedFile::filename(); }
+
+ void flush(bool sync) { MemoryMappedFile::flush(sync); }
+
+ /* Creates with length if DNE, otherwise uses existing file length,
+ passed length.
+ @param sequentialHint if true will be sequentially accessed
+ @return true for ok
+ */
+ bool create(string fname, unsigned long long& len, bool sequentialHint);
+
+ /* Get the "standard" view (which is the private one).
+ @return the private view.
+ */
+ void* getView() const { return _view_private; }
+
+ /* Get the "write" view (which is required for writing).
+ @return the write view.
+ */
+ void* view_write() const { return _view_write; }
+
+
+ /* switch to _view_write. normally, this is a bad idea since your changes will not
+ show up in _view_private if there have been changes there; thus the leading underscore
+ as a tad of a "warning". but useful when done with some care, such as during
+ initialization.
+ */
+ static void* _switchToWritableView(void *private_ptr);
+
+ /** for a filename a/b/c.3
+ filePath() is "a/b/c"
+ fileSuffixNo() is 3
+ if the suffix is "ns", fileSuffixNo -1
+ */
+ const RelativePath& relativePath() const {
+ DEV assert( !_p._p.empty() );
+ return _p;
+ }
+
+ int fileSuffixNo() const { return _fileSuffixNo; }
+
+ /** true if we have written.
+ set in PREPLOGBUFFER, it is NOT set immediately on write intent declaration.
+ reset to false in REMAPPRIVATEVIEW
+ */
+ bool& willNeedRemap() { return _willNeedRemap; }
+
+ void remapThePrivateView();
+
+ virtual bool isMongoMMF() { return true; }
+
+ private:
+
+ void *_view_write;
+ void *_view_private;
+ bool _willNeedRemap;
+ RelativePath _p; // e.g. "somepath/dbname"
+ int _fileSuffixNo; // e.g. 3. -1="ns"
+
+ void setPath(string pathAndFileName);
+ bool finishOpening();
+ };
+
+ /** for durability support we want to be able to map pointers to specific MongoMMF objects.
+ */
+ class PointerToMMF : boost::noncopyable {
+ public:
+ PointerToMMF();
+
+ /** register view.
+ threadsafe
+ */
+ void add(void *view, MongoMMF *f);
+
+ /** de-register view.
+ threadsafe
+ */
+ void remove(void *view);
+
+ /** find associated MMF object for a given pointer.
+ threadsafe
+ @param ofs out returns offset into the view of the pointer, if found.
+ @return the MongoMMF to which this pointer belongs. null if not found.
+ */
+ MongoMMF* find(void *p, /*out*/ size_t& ofs);
+
+ /** for doing many finds in a row with one lock operation */
+ mutex& _mutex() { return _m; }
+ MongoMMF* find_inlock(void *p, /*out*/ size_t& ofs);
+
+ map<void*,MongoMMF*>::iterator finditer_inlock(void *p) { return _views.upper_bound(p); }
+
+ unsigned numberOfViews_inlock() const { return _views.size(); }
+
+ private:
+ mutex _m;
+ map<void*, MongoMMF*> _views;
+ };
+
+ // allows a pointer into any private view of a MongoMMF to be resolved to the MongoMMF object
+ extern PointerToMMF privateViews;
+}
diff --git a/src/mongo/db/mongomutex.h b/src/mongo/db/mongomutex.h
new file mode 100644
index 00000000000..08b091cae9c
--- /dev/null
+++ b/src/mongo/db/mongomutex.h
@@ -0,0 +1,388 @@
+// @file mongomutex.h
+
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Mutex heirarchy (1 = "leaf")
+ name level
+ Logstream::mutex 1
+ ClientCursor::ccmutex 2
+ dblock 3
+
+ End func name with _inlock to indicate "caller must lock before calling".
+*/
+
+#pragma once
+
+#include "../util/concurrency/rwlock.h"
+#include "../util/mmap.h"
+#include "../util/time_support.h"
+#include "d_globals.h"
+
+namespace mongo {
+
+ class Client;
+ Client* curopWaitingForLock( int type );
+ void curopGotLock(Client*);
+
+ /* mongomutex time stats */
+ class MutexInfo {
+ unsigned long long enter, timeLocked; // microseconds
+ int locked;
+ unsigned long long start; // last as we touch this least often
+ public:
+ MutexInfo() : timeLocked(0) , locked(0) {
+ start = curTimeMicros64();
+ }
+ void entered() {
+ if ( locked == 0 )
+ enter = curTimeMicros64();
+ locked++;
+ assert( locked >= 1 );
+ }
+ void leaving() {
+ locked--;
+ assert( locked >= 0 );
+ if ( locked == 0 )
+ timeLocked += curTimeMicros64() - enter;
+ }
+ int isLocked() const { return locked; }
+ void getTimingInfo(unsigned long long &s, unsigned long long &tl) const {
+ s = start;
+ tl = timeLocked;
+ }
+ unsigned long long getTimeLocked() const { return timeLocked; }
+ };
+
+ /** the 'big lock'. a read/write lock.
+ there is one of these, d.dbMutex.
+
+ generally if you need to declare a mutex use the right primitive class, not this.
+
+ use readlock and writelock classes for scoped locks on this rather than direct
+ manipulation.
+ */
+ class MongoMutex {
+ public:
+ MongoMutex(const char * name);
+
+ /** @return
+ * > 0 write lock
+ * = 0 no lock
+ * < 0 read lock
+ */
+ int getState() const { return _state.get(); }
+
+ bool atLeastReadLocked() const { return _state.get() != 0; }
+ void assertAtLeastReadLocked() const { assert(atLeastReadLocked()); }
+ bool isWriteLocked/*by our thread*/() const { return getState() > 0; }
+ void assertWriteLocked() const {
+ assert( getState() > 0 );
+ DEV assert( !_releasedEarly.get() );
+ }
+
+ // write lock. use the writelock scoped lock class, not this directly.
+ void lock() {
+ if ( _writeLockedAlready() )
+ return;
+
+ _state.set(1);
+
+ curopWaitingForLock( 1 ); // stats
+ _m.lock();
+ MongoFile::markAllWritable(); // for _DEBUG validation -- a no op for release build
+ _acquiredWriteLock();
+ }
+
+ // try write lock
+ bool lock_try( int millis ) {
+ if ( _writeLockedAlready() ) // adjusts _state
+ return true;
+
+ curopWaitingForLock( 1 );
+ bool got = _m.lock_try( millis );
+
+ if ( got ) {
+ _state.set(1);
+ MongoFile::markAllWritable(); // for _DEBUG validation -- a no op for release build
+ _acquiredWriteLock();
+ }
+
+ return got;
+ }
+
+ // un write lock
+ void unlock() {
+ int s = _state.get();
+ if( s > 1 ) {
+ _state.set(s-1); // recursive lock case
+ return;
+ }
+ if( s != 1 ) {
+ if( _releasedEarly.get() ) {
+ _releasedEarly.set(false);
+ return;
+ }
+ massert( 12599, "internal error: attempt to unlock when wasn't in a write lock", false);
+ }
+ _releasingWriteLock();
+ MongoFile::unmarkAllWritable(); // _DEBUG validation
+ _state.set(0);
+ _m.unlock();
+ }
+
+ /* unlock (write lock), and when unlock() is called later,
+ be smart then and don't unlock it again.
+ */
+ void releaseEarly() {
+ assert( getState() == 1 ); // must not be recursive
+ assert( !_releasedEarly.get() );
+ _releasedEarly.set(true);
+ unlock();
+ }
+
+ // read lock. don't call directly, use readlock.
+ void lock_shared() {
+ int s = _state.get();
+ if( s ) {
+ if( s > 0 ) {
+ // already in write lock - just be recursive and stay write locked
+ _state.set(s+1);
+ }
+ else {
+ // already in read lock - recurse
+ _state.set(s-1);
+ }
+ }
+ else {
+ _state.set(-1);
+ Client *c = curopWaitingForLock( -1 );
+ _m.lock_shared();
+ curopGotLock(c);
+ }
+ }
+
+ // try read lock
+ bool lock_shared_try( int millis ) {
+ int s = _state.get();
+ if ( s ) {
+ // we already have a lock, so no need to try
+ lock_shared();
+ return true;
+ }
+
+ /* [dm] should there be
+ Client *c = curopWaitingForLock( 1 );
+ here? i think so. seems to be missing.
+ */
+ bool got = _m.lock_shared_try( millis );
+ if ( got )
+ _state.set(-1);
+ return got;
+ }
+
+ void unlock_shared() {
+ int s = _state.get();
+ if( s > 0 ) {
+ wassert( s > 1 ); /* we must have done a lock write first to have s > 1 */
+ _state.set(s-1);
+ return;
+ }
+ if( s < -1 ) {
+ _state.set(s+1);
+ return;
+ }
+ wassert( s == -1 );
+ _state.set(0);
+ _m.unlock_shared();
+ }
+
+ MutexInfo& info() { return _minfo; }
+
+ private:
+ void lockedExclusively();
+ void unlockingExclusively();
+ void _acquiredWriteLock();
+ void _releasingWriteLock();
+
+ /* @return true if was already write locked. increments recursive lock count. */
+ bool _writeLockedAlready();
+
+ RWLock _m;
+
+ /* > 0 write lock with recurse count
+ < 0 read lock
+ */
+ ThreadLocalValue<int> _state;
+
+ MutexInfo _minfo;
+
+ public:
+ // indicates we need to call dur::REMAPPRIVATEVIEW on the next write lock
+ bool _remapPrivateViewRequested;
+
+ private:
+ /* See the releaseEarly() method.
+ we use a separate TLS value for releasedEarly - that is ok as
+ our normal/common code path, we never even touch it */
+ ThreadLocalValue<bool> _releasedEarly;
+
+ /* this is for fsyncAndLock command. otherwise write lock's greediness will
+ make us block on any attempted write lock the the fsync's lock.
+ */
+ //volatile bool _blockWrites;
+ };
+
+ namespace dur {
+ void REMAPPRIVATEVIEW();
+ void releasingWriteLock(); // because it's hard to include dur.h here
+ }
+
+ inline void MongoMutex::_releasingWriteLock() {
+ dur::releasingWriteLock();
+ unlockingExclusively();
+ }
+
+ inline void MongoMutex::_acquiredWriteLock() {
+ lockedExclusively();
+ if( _remapPrivateViewRequested ) {
+ dur::REMAPPRIVATEVIEW();
+ dassert( !_remapPrivateViewRequested );
+ }
+ }
+
+ string sayClientState();
+
+ /* @return true if was already write locked. increments recursive lock count. */
+ inline bool MongoMutex::_writeLockedAlready() {
+ int s = _state.get();
+ if( s > 0 ) {
+ _state.set(s+1);
+ return true;
+ }
+ massert( 10293 , string("internal error: locks are not upgradeable: ") + sayClientState() , s == 0 );
+ return false;
+ }
+
+ struct writelock {
+ writelock() { d.dbMutex.lock(); }
+ writelock(const string& ns) { d.dbMutex.lock(); }
+ ~writelock() {
+ DESTRUCTOR_GUARD(
+ d.dbMutex.unlock();
+ );
+ }
+ };
+
+ struct readlock {
+ readlock(const string& ns) {
+ d.dbMutex.lock_shared();
+ }
+ readlock() { d.dbMutex.lock_shared(); }
+ ~readlock() {
+ DESTRUCTOR_GUARD(
+ d.dbMutex.unlock_shared();
+ );
+ }
+ };
+ struct readlocktry {
+ readlocktry( const string&ns , int tryms ) {
+ _got = d.dbMutex.lock_shared_try( tryms );
+ }
+ ~readlocktry() {
+ if ( _got ) {
+ d.dbMutex.unlock_shared();
+ }
+ }
+ bool got() const { return _got; }
+ private:
+ bool _got;
+ };
+
+ struct writelocktry {
+ writelocktry( const string&ns , int tryms ) {
+ _got = d.dbMutex.lock_try( tryms );
+ }
+ ~writelocktry() {
+ if ( _got ) {
+ d.dbMutex.unlock();
+ }
+ }
+ bool got() const { return _got; }
+ private:
+ bool _got;
+ };
+
+ struct readlocktryassert : public readlocktry {
+ readlocktryassert(const string& ns, int tryms) :
+ readlocktry(ns,tryms) {
+ uassert(13142, "timeout getting readlock", got());
+ }
+ };
+
+ /** assure we have at least a read lock - they key with this being
+ if you have a write lock, that's ok too.
+ */
+ struct atleastreadlock {
+ atleastreadlock( const string& ns = "" ) {
+ _prev = d.dbMutex.getState();
+ if ( _prev == 0 )
+ d.dbMutex.lock_shared();
+ }
+ ~atleastreadlock() {
+ if ( _prev == 0 )
+ d.dbMutex.unlock_shared();
+ }
+ private:
+ int _prev;
+ };
+
+ /* parameterized choice of read or write locking
+ use readlock and writelock instead of this when statically known which you want
+ */
+ class mongolock {
+ bool _writelock;
+ public:
+ mongolock(bool write) : _writelock(write) {
+ if( _writelock ) {
+ d.dbMutex.lock();
+ }
+ else
+ d.dbMutex.lock_shared();
+ }
+ ~mongolock() {
+ DESTRUCTOR_GUARD(
+ if( _writelock ) {
+ d.dbMutex.unlock();
+ }
+ else {
+ d.dbMutex.unlock_shared();
+ }
+ );
+ }
+ /* this unlocks, does NOT upgrade. that works for our current usage */
+ //void releaseAndWriteLock();
+ };
+
+ /* deprecated - use writelock and readlock instead */
+ struct dblock : public writelock {
+ dblock() : writelock("") { }
+ };
+
+ // eliminate this - we should just type "d.dbMutex.assertWriteLocked();" instead
+ inline void assertInWriteLock() { d.dbMutex.assertWriteLocked(); }
+
+}
diff --git a/src/mongo/db/namespace-inl.h b/src/mongo/db/namespace-inl.h
new file mode 100644
index 00000000000..a621a229546
--- /dev/null
+++ b/src/mongo/db/namespace-inl.h
@@ -0,0 +1,132 @@
+// @file namespace-inl.h
+
+/**
+* Copyright (C) 2009 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "namespace.h"
+
+namespace mongo {
+
+ inline Namespace& Namespace::operator=(const char *ns) {
+ // we fill the remaining space with all zeroes here. as the full Namespace struct is in
+ // the datafiles (the .ns files specifically), that is helpful as then they are deterministic
+ // in the bytes they have for a given sequence of operations. that makes testing and debugging
+ // the data files easier.
+ //
+ // if profiling indicates this method is a significant bottleneck, we could have a version we
+ // use for reads which does not fill with zeroes, and keep the zeroing behavior on writes.
+ //
+ unsigned len = strlen(ns);
+ uassert( 10080 , "ns name too long, max size is 128", len < MaxNsLen);
+ memset(buf, 0, MaxNsLen);
+ memcpy(buf, ns, len);
+ return *this;
+ }
+
+ inline string Namespace::extraName(int i) const {
+ char ex[] = "$extra";
+ ex[5] += i;
+ string s = string(buf) + ex;
+ massert( 10348 , "$extra: ns name too long", s.size() < MaxNsLen);
+ return s;
+ }
+
+ inline bool Namespace::isExtra() const {
+ const char *p = strstr(buf, "$extr");
+ return p && p[5] && p[6] == 0; //==0 important in case an index uses name "$extra_1" for example
+ }
+
+ inline int Namespace::hash() const {
+ unsigned x = 0;
+ const char *p = buf;
+ while ( *p ) {
+ x = x * 131 + *p;
+ p++;
+ }
+ return (x & 0x7fffffff) | 0x8000000; // must be > 0
+ }
+
+ /* future : this doesn't need to be an inline. */
+ inline string Namespace::getSisterNS( const char * local ) const {
+ assert( local && local[0] != '.' );
+ string old(buf);
+ if ( old.find( "." ) != string::npos )
+ old = old.substr( 0 , old.find( "." ) );
+ return old + "." + local;
+ }
+
+ inline IndexDetails& NamespaceDetails::idx(int idxNo, bool missingExpected ) {
+ if( idxNo < NIndexesBase ) {
+ IndexDetails& id = _indexes[idxNo];
+ return id;
+ }
+ Extra *e = extra();
+ if ( ! e ) {
+ if ( missingExpected )
+ throw MsgAssertionException( 13283 , "Missing Extra" );
+ massert(14045, "missing Extra", e);
+ }
+ int i = idxNo - NIndexesBase;
+ if( i >= NIndexesExtra ) {
+ e = e->next(this);
+ if ( ! e ) {
+ if ( missingExpected )
+ throw MsgAssertionException( 14823 , "missing extra" );
+ massert(14824, "missing Extra", e);
+ }
+ i -= NIndexesExtra;
+ }
+ return e->details[i];
+ }
+
+ inline int NamespaceDetails::idxNo(IndexDetails& idx) {
+ IndexIterator i = ii();
+ while( i.more() ) {
+ if( &i.next() == &idx )
+ return i.pos()-1;
+ }
+ massert( 10349 , "E12000 idxNo fails", false);
+ return -1;
+ }
+
+ inline int NamespaceDetails::findIndexByKeyPattern(const BSONObj& keyPattern) {
+ IndexIterator i = ii();
+ while( i.more() ) {
+ if( i.next().keyPattern() == keyPattern )
+ return i.pos()-1;
+ }
+ return -1;
+ }
+
+ // @return offset in indexes[]
+ inline int NamespaceDetails::findIndexByName(const char *name) {
+ IndexIterator i = ii();
+ while( i.more() ) {
+ if ( strcmp(i.next().info.obj().getStringField("name"),name) == 0 )
+ return i.pos()-1;
+ }
+ return -1;
+ }
+
+ inline NamespaceDetails::IndexIterator::IndexIterator(NamespaceDetails *_d) {
+ d = _d;
+ i = 0;
+ n = d->nIndexes;
+ }
+
+}
diff --git a/src/mongo/db/namespace.cpp b/src/mongo/db/namespace.cpp
new file mode 100644
index 00000000000..af8b5694248
--- /dev/null
+++ b/src/mongo/db/namespace.cpp
@@ -0,0 +1,800 @@
+// namespace.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "pdfile.h"
+#include "db.h"
+#include "mongommf.h"
+#include "../util/hashtab.h"
+#include "../scripting/engine.h"
+#include "btree.h"
+#include <algorithm>
+#include <list>
+#include "queryutil.h"
+#include "json.h"
+#include "ops/delete.h"
+#include "ops/query.h"
+
+namespace mongo {
+
+ BOOST_STATIC_ASSERT( sizeof(Namespace) == 128 );
+
+ BSONObj idKeyPattern = fromjson("{\"_id\":1}");
+
+ /* deleted lists -- linked lists of deleted records -- are placed in 'buckets' of various sizes
+ so you can look for a deleterecord about the right size.
+ */
+ int bucketSizes[] = {
+ 32, 64, 128, 256, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000,
+ 0x8000, 0x10000, 0x20000, 0x40000, 0x80000, 0x100000, 0x200000,
+ 0x400000, 0x800000
+ };
+
+ NamespaceDetails::NamespaceDetails( const DiskLoc &loc, bool _capped ) {
+ /* be sure to initialize new fields here -- doesn't default to zeroes the way we use it */
+ firstExtent = lastExtent = capExtent = loc;
+ stats.datasize = stats.nrecords = 0;
+ lastExtentSize = 0;
+ nIndexes = 0;
+ capped = _capped;
+ max = 0x7fffffff;
+ paddingFactor = 1.0;
+ flags = 0;
+ capFirstNewRecord = DiskLoc();
+ // Signal that we are on first allocation iteration through extents.
+ capFirstNewRecord.setInvalid();
+ // For capped case, signal that we are doing initial extent allocation.
+ if ( capped )
+ cappedLastDelRecLastExtent().setInvalid();
+ assert( sizeof(dataFileVersion) == 2 );
+ dataFileVersion = 0;
+ indexFileVersion = 0;
+ multiKeyIndexBits = 0;
+ reservedA = 0;
+ extraOffset = 0;
+ indexBuildInProgress = 0;
+ reservedB = 0;
+ capped2.cc2_ptr = 0;
+ capped2.fileNumber = 0;
+ memset(reserved, 0, sizeof(reserved));
+ }
+
+ bool NamespaceIndex::exists() const {
+ return !MMF::exists(path());
+ }
+
+ boost::filesystem::path NamespaceIndex::path() const {
+ boost::filesystem::path ret( dir_ );
+ if ( directoryperdb )
+ ret /= database_;
+ ret /= ( database_ + ".ns" );
+ return ret;
+ }
+
+ void NamespaceIndex::maybeMkdir() const {
+ if ( !directoryperdb )
+ return;
+ boost::filesystem::path dir( dir_ );
+ dir /= database_;
+ if ( !boost::filesystem::exists( dir ) )
+ MONGO_BOOST_CHECK_EXCEPTION_WITH_MSG( boost::filesystem::create_directory( dir ), "create dir for db " );
+ }
+
+ unsigned lenForNewNsFiles = 16 * 1024 * 1024;
+
+#if defined(_DEBUG)
+ void NamespaceDetails::dump(const Namespace& k) {
+ if( !cmdLine.dur )
+ cout << "ns offsets which follow will not display correctly with --journal disabled" << endl;
+
+ size_t ofs = 1; // 1 is sentinel that the find call below failed
+ privateViews.find(this, /*out*/ofs);
+
+ cout << "ns" << hex << setw(8) << ofs << ' ';
+ cout << k.toString() << '\n';
+
+ if( k.isExtra() ) {
+ cout << "ns\t extra" << endl;
+ return;
+ }
+
+ cout << "ns " << firstExtent.toString() << ' ' << lastExtent.toString() << " nidx:" << nIndexes << '\n';
+ cout << "ns " << stats.datasize << ' ' << stats.nrecords << ' ' << nIndexes << '\n';
+ cout << "ns " << capped << ' ' << paddingFactor << ' ' << flags << ' ' << dataFileVersion << '\n';
+ cout << "ns " << multiKeyIndexBits << ' ' << indexBuildInProgress << '\n';
+ cout << "ns " << (int) reserved[0] << ' ' << (int) reserved[59];
+ cout << endl;
+ }
+#endif
+
+ void NamespaceDetails::onLoad(const Namespace& k) {
+
+ if( k.isExtra() ) {
+ /* overflow storage for indexes - so don't treat as a NamespaceDetails object. */
+ return;
+ }
+
+ if( indexBuildInProgress || capped2.cc2_ptr ) {
+ assertInWriteLock();
+ if( indexBuildInProgress ) {
+ log() << "indexBuildInProgress was " << indexBuildInProgress << " for " << k << ", indicating an abnormal db shutdown" << endl;
+ getDur().writingInt( indexBuildInProgress ) = 0;
+ }
+ if( capped2.cc2_ptr )
+ *getDur().writing(&capped2.cc2_ptr) = 0;
+ }
+ }
+
+ static void namespaceOnLoadCallback(const Namespace& k, NamespaceDetails& v) {
+ v.onLoad(k);
+ }
+
+ bool checkNsFilesOnLoad = true;
+
+ NOINLINE_DECL void NamespaceIndex::_init() {
+ assert( !ht );
+
+ d.dbMutex.assertWriteLocked();
+
+ /* if someone manually deleted the datafiles for a database,
+ we need to be sure to clear any cached info for the database in
+ local.*.
+ */
+ /*
+ if ( "local" != database_ ) {
+ DBInfo i(database_.c_str());
+ i.dbDropped();
+ }
+ */
+
+ unsigned long long len = 0;
+ boost::filesystem::path nsPath = path();
+ string pathString = nsPath.string();
+ void *p = 0;
+ if( MMF::exists(nsPath) ) {
+ if( f.open(pathString, true) ) {
+ len = f.length();
+ if ( len % (1024*1024) != 0 ) {
+ log() << "bad .ns file: " << pathString << endl;
+ uassert( 10079 , "bad .ns file length, cannot open database", len % (1024*1024) == 0 );
+ }
+ p = f.getView();
+ }
+ }
+ else {
+ // use lenForNewNsFiles, we are making a new database
+ massert( 10343, "bad lenForNewNsFiles", lenForNewNsFiles >= 1024*1024 );
+ maybeMkdir();
+ unsigned long long l = lenForNewNsFiles;
+ if( f.create(pathString, l, true) ) {
+ getDur().createdFile(pathString, l); // always a new file
+ len = l;
+ assert( len == lenForNewNsFiles );
+ p = f.getView();
+ }
+ }
+
+ if ( p == 0 ) {
+ /** TODO: this shouldn't terminate? */
+ log() << "error couldn't open file " << pathString << " terminating" << endl;
+ dbexit( EXIT_FS );
+ }
+
+
+ assert( len <= 0x7fffffff );
+ ht = new HashTable<Namespace,NamespaceDetails>(p, (int) len, "namespace index");
+ if( checkNsFilesOnLoad )
+ ht->iterAll(namespaceOnLoadCallback);
+ }
+
+ static void namespaceGetNamespacesCallback( const Namespace& k , NamespaceDetails& v , void * extra ) {
+ list<string> * l = (list<string>*)extra;
+ if ( ! k.hasDollarSign() )
+ l->push_back( (string)k );
+ }
+ void NamespaceIndex::getNamespaces( list<string>& tofill , bool onlyCollections ) const {
+ assert( onlyCollections ); // TODO: need to implement this
+ // need boost::bind or something to make this less ugly
+
+ if ( ht )
+ ht->iterAll( namespaceGetNamespacesCallback , (void*)&tofill );
+ }
+
+ void NamespaceDetails::addDeletedRec(DeletedRecord *d, DiskLoc dloc) {
+ BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::Extra) <= sizeof(NamespaceDetails) );
+
+ {
+ Record *r = (Record *) getDur().writingPtr(d, sizeof(Record));
+ d = &r->asDeleted();
+ // defensive code: try to make us notice if we reference a deleted record
+ (unsigned&) (r->data) = 0xeeeeeeee;
+ }
+ DEBUGGING log() << "TEMP: add deleted rec " << dloc.toString() << ' ' << hex << d->extentOfs << endl;
+ if ( capped ) {
+ if ( !cappedLastDelRecLastExtent().isValid() ) {
+ // Initial extent allocation. Insert at end.
+ d->nextDeleted = DiskLoc();
+ if ( cappedListOfAllDeletedRecords().isNull() )
+ getDur().writingDiskLoc( cappedListOfAllDeletedRecords() ) = dloc;
+ else {
+ DiskLoc i = cappedListOfAllDeletedRecords();
+ for (; !i.drec()->nextDeleted.isNull(); i = i.drec()->nextDeleted )
+ ;
+ i.drec()->nextDeleted.writing() = dloc;
+ }
+ }
+ else {
+ d->nextDeleted = cappedFirstDeletedInCurExtent();
+ getDur().writingDiskLoc( cappedFirstDeletedInCurExtent() ) = dloc;
+ // always compact() after this so order doesn't matter
+ }
+ }
+ else {
+ int b = bucket(d->lengthWithHeaders);
+ DiskLoc& list = deletedList[b];
+ DiskLoc oldHead = list;
+ getDur().writingDiskLoc(list) = dloc;
+ d->nextDeleted = oldHead;
+ }
+ }
+
+ /* predetermine location of the next alloc without actually doing it.
+ if cannot predetermine returns null (so still call alloc() then)
+ */
+ DiskLoc NamespaceDetails::allocWillBeAt(const char *ns, int lenToAlloc) {
+ if ( !capped ) {
+ lenToAlloc = (lenToAlloc + 3) & 0xfffffffc;
+ return __stdAlloc(lenToAlloc, true);
+ }
+ return DiskLoc();
+ }
+
+ /** allocate space for a new record from deleted lists.
+ @param lenToAlloc is WITH header
+ @param extentLoc OUT returns the extent location
+ @return null diskloc if no room - allocate a new extent then
+ */
+ DiskLoc NamespaceDetails::alloc(const char *ns, int lenToAlloc, DiskLoc& extentLoc) {
+ {
+ // align very slightly.
+ // note that if doing more coarse-grained quantization (really just if it isn't always
+ // a constant amount but if it varied by record size) then that quantization should
+ // NOT be done here but rather in __stdAlloc so that we can grab a deletedrecord that
+ // is just big enough if we happen to run into one.
+ lenToAlloc = (lenToAlloc + 3) & 0xfffffffc;
+ }
+
+ DiskLoc loc = _alloc(ns, lenToAlloc);
+ if ( loc.isNull() )
+ return loc;
+
+ const DeletedRecord *r = loc.drec();
+ //r = getDur().writing(r);
+
+ /* note we want to grab from the front so our next pointers on disk tend
+ to go in a forward direction which is important for performance. */
+ int regionlen = r->lengthWithHeaders;
+ extentLoc.set(loc.a(), r->extentOfs);
+ assert( r->extentOfs < loc.getOfs() );
+
+ DEBUGGING out() << "TEMP: alloc() returns " << loc.toString() << ' ' << ns << " lentoalloc:" << lenToAlloc << " ext:" << extentLoc.toString() << endl;
+
+ int left = regionlen - lenToAlloc;
+ if ( capped == 0 ) {
+ if ( left < 24 || left < (lenToAlloc >> 3) ) {
+ // you get the whole thing.
+ return loc;
+ }
+ }
+
+ /* split off some for further use. */
+ getDur().writingInt(r->lengthWithHeaders) = lenToAlloc;
+ DiskLoc newDelLoc = loc;
+ newDelLoc.inc(lenToAlloc);
+ DeletedRecord *newDel = DataFileMgr::makeDeletedRecord(newDelLoc, left);
+ DeletedRecord *newDelW = getDur().writing(newDel);
+ newDelW->extentOfs = r->extentOfs;
+ newDelW->lengthWithHeaders = left;
+ newDelW->nextDeleted.Null();
+
+ addDeletedRec(newDel, newDelLoc);
+
+ return loc;
+ }
+
+ /* for non-capped collections.
+ @param peekOnly just look up where and don't reserve
+ returned item is out of the deleted list upon return
+ */
+ DiskLoc NamespaceDetails::__stdAlloc(int len, bool peekOnly) {
+ DiskLoc *prev;
+ DiskLoc *bestprev = 0;
+ DiskLoc bestmatch;
+ int bestmatchlen = 0x7fffffff;
+ int b = bucket(len);
+ DiskLoc cur = deletedList[b];
+ prev = &deletedList[b];
+ int extra = 5; // look for a better fit, a little.
+ int chain = 0;
+ while ( 1 ) {
+ {
+ int a = cur.a();
+ if ( a < -1 || a >= 100000 ) {
+ problem() << "~~ Assertion - cur out of range in _alloc() " << cur.toString() <<
+ " a:" << a << " b:" << b << " chain:" << chain << '\n';
+ sayDbContext();
+ if ( cur == *prev )
+ prev->Null();
+ cur.Null();
+ }
+ }
+ if ( cur.isNull() ) {
+ // move to next bucket. if we were doing "extra", just break
+ if ( bestmatchlen < 0x7fffffff )
+ break;
+ b++;
+ if ( b > MaxBucket ) {
+ // out of space. alloc a new extent.
+ return DiskLoc();
+ }
+ cur = deletedList[b];
+ prev = &deletedList[b];
+ continue;
+ }
+ DeletedRecord *r = cur.drec();
+ if ( r->lengthWithHeaders >= len &&
+ r->lengthWithHeaders < bestmatchlen ) {
+ bestmatchlen = r->lengthWithHeaders;
+ bestmatch = cur;
+ bestprev = prev;
+ }
+ if ( bestmatchlen < 0x7fffffff && --extra <= 0 )
+ break;
+ if ( ++chain > 30 && b < MaxBucket ) {
+ // too slow, force move to next bucket to grab a big chunk
+ //b++;
+ chain = 0;
+ cur.Null();
+ }
+ else {
+ /*this defensive check only made sense for the mmap storage engine:
+ if ( r->nextDeleted.getOfs() == 0 ) {
+ problem() << "~~ Assertion - bad nextDeleted " << r->nextDeleted.toString() <<
+ " b:" << b << " chain:" << chain << ", fixing.\n";
+ r->nextDeleted.Null();
+ }*/
+ cur = r->nextDeleted;
+ prev = &r->nextDeleted;
+ }
+ }
+
+ /* unlink ourself from the deleted list */
+ if( !peekOnly ) {
+ const DeletedRecord *bmr = bestmatch.drec();
+ *getDur().writing(bestprev) = bmr->nextDeleted;
+ bmr->nextDeleted.writing().setInvalid(); // defensive.
+ assert(bmr->extentOfs < bestmatch.getOfs());
+ }
+
+ return bestmatch;
+ }
+
+ void NamespaceDetails::dumpDeleted(set<DiskLoc> *extents) {
+ for ( int i = 0; i < Buckets; i++ ) {
+ DiskLoc dl = deletedList[i];
+ while ( !dl.isNull() ) {
+ DeletedRecord *r = dl.drec();
+ DiskLoc extLoc(dl.a(), r->extentOfs);
+ if ( extents == 0 || extents->count(extLoc) <= 0 ) {
+ out() << " bucket " << i << endl;
+ out() << " " << dl.toString() << " ext:" << extLoc.toString();
+ if ( extents && extents->count(extLoc) <= 0 )
+ out() << '?';
+ out() << " len:" << r->lengthWithHeaders << endl;
+ }
+ dl = r->nextDeleted;
+ }
+ }
+ }
+
+ DiskLoc NamespaceDetails::firstRecord( const DiskLoc &startExtent ) const {
+ for (DiskLoc i = startExtent.isNull() ? firstExtent : startExtent;
+ !i.isNull(); i = i.ext()->xnext ) {
+ if ( !i.ext()->firstRecord.isNull() )
+ return i.ext()->firstRecord;
+ }
+ return DiskLoc();
+ }
+
+ DiskLoc NamespaceDetails::lastRecord( const DiskLoc &startExtent ) const {
+ for (DiskLoc i = startExtent.isNull() ? lastExtent : startExtent;
+ !i.isNull(); i = i.ext()->xprev ) {
+ if ( !i.ext()->lastRecord.isNull() )
+ return i.ext()->lastRecord;
+ }
+ return DiskLoc();
+ }
+
+ int n_complaints_cap = 0;
+ void NamespaceDetails::maybeComplain( const char *ns, int len ) const {
+ if ( ++n_complaints_cap < 8 ) {
+ out() << "couldn't make room for new record (len: " << len << ") in capped ns " << ns << '\n';
+ int i = 0;
+ for ( DiskLoc e = firstExtent; !e.isNull(); e = e.ext()->xnext, ++i ) {
+ out() << " Extent " << i;
+ if ( e == capExtent )
+ out() << " (capExtent)";
+ out() << '\n';
+ out() << " magic: " << hex << e.ext()->magic << dec << " extent->ns: " << e.ext()->nsDiagnostic.toString() << '\n';
+ out() << " fr: " << e.ext()->firstRecord.toString() <<
+ " lr: " << e.ext()->lastRecord.toString() << " extent->len: " << e.ext()->length << '\n';
+ }
+ assert( len * 5 > lastExtentSize ); // assume it is unusually large record; if not, something is broken
+ }
+ }
+
+ /* alloc with capped table handling. */
+ DiskLoc NamespaceDetails::_alloc(const char *ns, int len) {
+ if ( !capped )
+ return __stdAlloc(len, false);
+
+ return cappedAlloc(ns,len);
+ }
+
+ void NamespaceIndex::kill_ns(const char *ns) {
+ d.dbMutex.assertWriteLocked();
+ if ( !ht )
+ return;
+ Namespace n(ns);
+ ht->kill(n);
+
+ for( int i = 0; i<=1; i++ ) {
+ try {
+ Namespace extra(n.extraName(i).c_str());
+ ht->kill(extra);
+ }
+ catch(DBException&) {
+ dlog(3) << "caught exception in kill_ns" << endl;
+ }
+ }
+ }
+
+ void NamespaceIndex::add_ns(const char *ns, DiskLoc& loc, bool capped) {
+ NamespaceDetails details( loc, capped );
+ add_ns( ns, details );
+ }
+ void NamespaceIndex::add_ns( const char *ns, const NamespaceDetails &details ) {
+ d.dbMutex.assertWriteLocked();
+ init();
+ Namespace n(ns);
+ uassert( 10081 , "too many namespaces/collections", ht->put(n, details));
+ }
+
+ /* extra space for indexes when more than 10 */
+ NamespaceDetails::Extra* NamespaceIndex::newExtra(const char *ns, int i, NamespaceDetails *d) {
+ mongo::d.dbMutex.assertWriteLocked();
+ assert( i >= 0 && i <= 1 );
+ Namespace n(ns);
+ Namespace extra(n.extraName(i).c_str()); // throws userexception if ns name too long
+
+ massert( 10350 , "allocExtra: base ns missing?", d );
+ massert( 10351 , "allocExtra: extra already exists", ht->get(extra) == 0 );
+
+ NamespaceDetails::Extra temp;
+ temp.init();
+ uassert( 10082 , "allocExtra: too many namespaces/collections", ht->put(extra, (NamespaceDetails&) temp));
+ NamespaceDetails::Extra *e = (NamespaceDetails::Extra *) ht->get(extra);
+ return e;
+ }
+ NamespaceDetails::Extra* NamespaceDetails::allocExtra(const char *ns, int nindexessofar) {
+ NamespaceIndex *ni = nsindex(ns);
+ int i = (nindexessofar - NIndexesBase) / NIndexesExtra;
+ Extra *e = ni->newExtra(ns, i, this);
+ long ofs = e->ofsFrom(this);
+ if( i == 0 ) {
+ assert( extraOffset == 0 );
+ *getDur().writing(&extraOffset) = ofs;
+ assert( extra() == e );
+ }
+ else {
+ Extra *hd = extra();
+ assert( hd->next(this) == 0 );
+ hd->setNext(ofs);
+ }
+ return e;
+ }
+
+ /* you MUST call when adding an index. see pdfile.cpp */
+ IndexDetails& NamespaceDetails::addIndex(const char *thisns, bool resetTransient) {
+ IndexDetails *id;
+ try {
+ id = &idx(nIndexes,true);
+ }
+ catch(DBException&) {
+ allocExtra(thisns, nIndexes);
+ id = &idx(nIndexes,false);
+ }
+
+ (*getDur().writing(&nIndexes))++;
+ if ( resetTransient )
+ NamespaceDetailsTransient::get(thisns).addedIndex();
+ return *id;
+ }
+
+ // must be called when renaming a NS to fix up extra
+ void NamespaceDetails::copyingFrom(const char *thisns, NamespaceDetails *src) {
+ extraOffset = 0; // we are a copy -- the old value is wrong. fixing it up below.
+ Extra *se = src->extra();
+ int n = NIndexesBase;
+ if( se ) {
+ Extra *e = allocExtra(thisns, n);
+ while( 1 ) {
+ n += NIndexesExtra;
+ e->copy(this, *se);
+ se = se->next(src);
+ if( se == 0 ) break;
+ Extra *nxt = allocExtra(thisns, n);
+ e->setNext( nxt->ofsFrom(this) );
+ e = nxt;
+ }
+ assert( extraOffset );
+ }
+ }
+
+ /* returns index of the first index in which the field is present. -1 if not present.
+ (aug08 - this method not currently used)
+ */
+ int NamespaceDetails::fieldIsIndexed(const char *fieldName) {
+ massert( 10346 , "not implemented", false);
+ /*
+ for ( int i = 0; i < nIndexes; i++ ) {
+ IndexDetails& idx = indexes[i];
+ BSONObj idxKey = idx.info.obj().getObjectField("key"); // e.g., { ts : -1 }
+ if ( !idxKey.getField(fieldName).eoo() )
+ return i;
+ }*/
+ return -1;
+ }
+
+ long long NamespaceDetails::storageSize( int * numExtents , BSONArrayBuilder * extentInfo ) const {
+ Extent * e = firstExtent.ext();
+ assert( e );
+
+ long long total = 0;
+ int n = 0;
+ while ( e ) {
+ total += e->length;
+ n++;
+
+ if ( extentInfo ) {
+ extentInfo->append( BSON( "len" << e->length << "loc: " << e->myLoc.toBSONObj() ) );
+ }
+
+ e = e->getNextExtent();
+ }
+
+ if ( numExtents )
+ *numExtents = n;
+
+ return total;
+ }
+
+ NamespaceDetails *NamespaceDetails::writingWithExtra() {
+ vector< pair< long long, unsigned > > writeRanges;
+ writeRanges.push_back( make_pair( 0, sizeof( NamespaceDetails ) ) );
+ for( Extra *e = extra(); e; e = e->next( this ) ) {
+ writeRanges.push_back( make_pair( (char*)e - (char*)this, sizeof( Extra ) ) );
+ }
+ return reinterpret_cast< NamespaceDetails* >( getDur().writingRangesAtOffsets( this, writeRanges ) );
+ }
+
+ /* ------------------------------------------------------------------------- */
+
+ SimpleMutex NamespaceDetailsTransient::_qcMutex("qc");
+ SimpleMutex NamespaceDetailsTransient::_isMutex("is");
+ map< string, shared_ptr< NamespaceDetailsTransient > > NamespaceDetailsTransient::_nsdMap;
+ typedef map< string, shared_ptr< NamespaceDetailsTransient > >::iterator ouriter;
+
+ void NamespaceDetailsTransient::reset() {
+ DEV assertInWriteLock();
+ clearQueryCache();
+ _keysComputed = false;
+ _indexSpecs.clear();
+ }
+
+ /*static*/ NOINLINE_DECL NamespaceDetailsTransient& NamespaceDetailsTransient::make_inlock(const char *ns) {
+ shared_ptr< NamespaceDetailsTransient > &t = _nsdMap[ ns ];
+ assert( t.get() == 0 );
+ Database *database = cc().database();
+ assert( database );
+ if( _nsdMap.size() % 20000 == 10000 ) {
+ // so we notice if insanely large #s
+ log() << "opening namespace " << ns << endl;
+ log() << _nsdMap.size() << " namespaces in nsdMap" << endl;
+ }
+ t.reset( new NamespaceDetailsTransient(database, ns) );
+ return *t;
+ }
+
+ // note with repair there could be two databases with the same ns name.
+ // that is NOT handled here yet! TODO
+ // repair may not use nsdt though not sure. anyway, requires work.
+ NamespaceDetailsTransient::NamespaceDetailsTransient(Database *db, const char *ns) :
+ _ns(ns), _keysComputed(false), _qcWriteCount()
+ {
+ dassert(db);
+ }
+
+ NamespaceDetailsTransient::~NamespaceDetailsTransient() {
+ }
+
+ void NamespaceDetailsTransient::clearForPrefix(const char *prefix) {
+ assertInWriteLock();
+ vector< string > found;
+ for( ouriter i = _nsdMap.begin(); i != _nsdMap.end(); ++i )
+ if ( strncmp( i->first.c_str(), prefix, strlen( prefix ) ) == 0 )
+ found.push_back( i->first );
+ for( vector< string >::iterator i = found.begin(); i != found.end(); ++i ) {
+ _nsdMap[ *i ].reset();
+ }
+ }
+
+ void NamespaceDetailsTransient::eraseForPrefix(const char *prefix) {
+ assertInWriteLock();
+ vector< string > found;
+ for( ouriter i = _nsdMap.begin(); i != _nsdMap.end(); ++i )
+ if ( strncmp( i->first.c_str(), prefix, strlen( prefix ) ) == 0 )
+ found.push_back( i->first );
+ for( vector< string >::iterator i = found.begin(); i != found.end(); ++i ) {
+ _nsdMap.erase(*i);
+ }
+ }
+
+ void NamespaceDetailsTransient::computeIndexKeys() {
+ _keysComputed = true;
+ _indexKeys.clear();
+ NamespaceDetails *d = nsdetails(_ns.c_str());
+ if ( ! d )
+ return;
+ NamespaceDetails::IndexIterator i = d->ii();
+ while( i.more() )
+ i.next().keyPattern().getFieldNames(_indexKeys);
+ }
+
+
+ /* ------------------------------------------------------------------------- */
+
+ /* add a new namespace to the system catalog (<dbname>.system.namespaces).
+ options: { capped : ..., size : ... }
+ */
+ void addNewNamespaceToCatalog(const char *ns, const BSONObj *options = 0) {
+ LOG(1) << "New namespace: " << ns << endl;
+ if ( strstr(ns, "system.namespaces") ) {
+ // system.namespaces holds all the others, so it is not explicitly listed in the catalog.
+ // TODO: fix above should not be strstr!
+ return;
+ }
+
+ {
+ BSONObjBuilder b;
+ b.append("name", ns);
+ if ( options )
+ b.append("options", *options);
+ BSONObj j = b.done();
+ char database[256];
+ nsToDatabase(ns, database);
+ string s = database;
+ if( cmdLine.configsvr && (s != "config" && s != "admin") ) {
+ uasserted(14037, "can't create user databases on a --configsvr instance");
+ }
+ s += ".system.namespaces";
+ theDataFileMgr.insert(s.c_str(), j.objdata(), j.objsize(), true);
+ }
+ }
+
+ void renameNamespace( const char *from, const char *to ) {
+ NamespaceIndex *ni = nsindex( from );
+ assert( ni );
+ assert( ni->details( from ) );
+ assert( ! ni->details( to ) );
+
+ // Our namespace and index details will move to a different
+ // memory location. The only references to namespace and
+ // index details across commands are in cursors and nsd
+ // transient (including query cache) so clear these.
+ ClientCursor::invalidate( from );
+ NamespaceDetailsTransient::eraseForPrefix( from );
+
+ NamespaceDetails *details = ni->details( from );
+ ni->add_ns( to, *details );
+ NamespaceDetails *todetails = ni->details( to );
+ try {
+ todetails->copyingFrom(to, details); // fixes extraOffset
+ }
+ catch( DBException& ) {
+ // could end up here if .ns is full - if so try to clean up / roll back a little
+ ni->kill_ns(to);
+ throw;
+ }
+ ni->kill_ns( from );
+ details = todetails;
+
+ BSONObj oldSpec;
+ char database[MaxDatabaseNameLen];
+ nsToDatabase(from, database);
+ string s = database;
+ s += ".system.namespaces";
+ assert( Helpers::findOne( s.c_str(), BSON( "name" << from ), oldSpec ) );
+
+ BSONObjBuilder newSpecB;
+ BSONObjIterator i( oldSpec.getObjectField( "options" ) );
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if ( strcmp( e.fieldName(), "create" ) != 0 )
+ newSpecB.append( e );
+ else
+ newSpecB << "create" << to;
+ }
+ BSONObj newSpec = newSpecB.done();
+ addNewNamespaceToCatalog( to, newSpec.isEmpty() ? 0 : &newSpec );
+
+ deleteObjects( s.c_str(), BSON( "name" << from ), false, false, true );
+ // oldSpec variable no longer valid memory
+
+ BSONObj oldIndexSpec;
+ s = database;
+ s += ".system.indexes";
+ while( Helpers::findOne( s.c_str(), BSON( "ns" << from ), oldIndexSpec ) ) {
+ BSONObjBuilder newIndexSpecB;
+ BSONObjIterator i( oldIndexSpec );
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if ( strcmp( e.fieldName(), "ns" ) != 0 )
+ newIndexSpecB.append( e );
+ else
+ newIndexSpecB << "ns" << to;
+ }
+ BSONObj newIndexSpec = newIndexSpecB.done();
+ DiskLoc newIndexSpecLoc = theDataFileMgr.insert( s.c_str(), newIndexSpec.objdata(), newIndexSpec.objsize(), true, false );
+ int indexI = details->findIndexByName( oldIndexSpec.getStringField( "name" ) );
+ IndexDetails &indexDetails = details->idx(indexI);
+ string oldIndexNs = indexDetails.indexNamespace();
+ indexDetails.info = newIndexSpecLoc;
+ string newIndexNs = indexDetails.indexNamespace();
+
+ renameIndexNamespace( oldIndexNs.c_str(), newIndexNs.c_str() );
+ deleteObjects( s.c_str(), oldIndexSpec.getOwned(), true, false, true );
+ }
+ }
+
+ bool legalClientSystemNS( const string& ns , bool write ) {
+ if( ns == "local.system.replset" ) return true;
+
+ if ( ns.find( ".system.users" ) != string::npos )
+ return true;
+
+ if ( ns.find( ".system.js" ) != string::npos ) {
+ if ( write )
+ Scope::storedFuncMod();
+ return true;
+ }
+
+ return false;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/namespace.h b/src/mongo/db/namespace.h
new file mode 100644
index 00000000000..9ceb6a6f4e9
--- /dev/null
+++ b/src/mongo/db/namespace.h
@@ -0,0 +1,629 @@
+// namespace.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "namespacestring.h"
+#include "jsobj.h"
+#include "querypattern.h"
+#include "diskloc.h"
+#include "../util/hashtab.h"
+#include "mongommf.h"
+#include "d_concurrency.h"
+
+namespace mongo {
+
+ class Database;
+
+#pragma pack(1)
+ /* This helper class is used to make the HashMap below in NamespaceIndex e.g. see line:
+ HashTable<Namespace,NamespaceDetails> *ht;
+ */
+ class Namespace {
+ public:
+ explicit Namespace(const char *ns) { *this = ns; }
+ Namespace& operator=(const char *ns);
+
+ bool hasDollarSign() const { return strchr( buf , '$' ) > 0; }
+ void kill() { buf[0] = 0x7f; }
+ bool operator==(const char *r) const { return strcmp(buf, r) == 0; }
+ bool operator==(const Namespace& r) const { return strcmp(buf, r.buf) == 0; }
+ int hash() const; // value returned is always > 0
+
+ size_t size() const { return strlen( buf ); }
+
+ string toString() const { return (string) buf; }
+ operator string() const { return (string) buf; }
+
+ /* NamespaceDetails::Extra was added after fact to allow chaining of data blocks to support more than 10 indexes
+ (more than 10 IndexDetails). It's a bit hacky because of this late addition with backward
+ file support. */
+ string extraName(int i) const;
+ bool isExtra() const; /* ends with $extr... -- when true an extra block not a normal NamespaceDetails block */
+
+ /** ( foo.bar ).getSisterNS( "blah" ) == foo.blah
+ perhaps this should move to the NamespaceString helper?
+ */
+ string getSisterNS( const char * local ) const;
+
+ enum MaxNsLenValue { MaxNsLen = 128 };
+ private:
+ char buf[MaxNsLen];
+ };
+#pragma pack()
+
+} // namespace mongo
+
+#include "index.h"
+
+namespace mongo {
+
+ /** @return true if a client can modify this namespace even though it is under ".system."
+ For example <dbname>.system.users is ok for regular clients to update.
+ @param write used when .system.js
+ */
+ bool legalClientSystemNS( const string& ns , bool write );
+
+ /* deleted lists -- linked lists of deleted records -- are placed in 'buckets' of various sizes
+ so you can look for a deleterecord about the right size.
+ */
+ const int Buckets = 19;
+ const int MaxBucket = 18;
+
+ extern int bucketSizes[];
+
+#pragma pack(1)
+ /* NamespaceDetails : this is the "header" for a collection that has all its details.
+ It's in the .ns file and this is a memory mapped region (thus the pack pragma above).
+ */
+ class NamespaceDetails {
+ public:
+ enum { NIndexesMax = 64, NIndexesExtra = 30, NIndexesBase = 10 };
+
+ /*-------- data fields, as present on disk : */
+ DiskLoc firstExtent;
+ DiskLoc lastExtent;
+ /* NOTE: capped collections v1 override the meaning of deletedList.
+ deletedList[0] points to a list of free records (DeletedRecord's) for all extents in
+ the capped namespace.
+ deletedList[1] points to the last record in the prev extent. When the "current extent"
+ changes, this value is updated. !deletedList[1].isValid() when this value is not
+ yet computed.
+ */
+ DiskLoc deletedList[Buckets];
+ // ofs 168 (8 byte aligned)
+ struct Stats {
+ // datasize and nrecords MUST Be adjacent code assumes!
+ long long datasize; // this includes padding, but not record headers
+ long long nrecords;
+ } stats;
+ int lastExtentSize;
+ int nIndexes;
+ private:
+ // ofs 192
+ IndexDetails _indexes[NIndexesBase];
+ public:
+ // ofs 352 (16 byte aligned)
+ int capped;
+ int max; // max # of objects for a capped table. TODO: should this be 64 bit?
+ double paddingFactor; // 1.0 = no padding.
+ // ofs 386 (16)
+ int flags;
+ DiskLoc capExtent;
+ DiskLoc capFirstNewRecord;
+ unsigned short dataFileVersion; // NamespaceDetails version. So we can do backward compatibility in the future. See filever.h
+ unsigned short indexFileVersion;
+ unsigned long long multiKeyIndexBits;
+ private:
+ // ofs 400 (16)
+ unsigned long long reservedA;
+ long long extraOffset; // where the $extra info is located (bytes relative to this)
+ public:
+ int indexBuildInProgress; // 1 if in prog
+ unsigned reservedB;
+ // ofs 424 (8)
+ struct Capped2 {
+ unsigned long long cc2_ptr; // see capped.cpp
+ unsigned fileNumber;
+ } capped2;
+ char reserved[60];
+ /*-------- end data 496 bytes */
+
+ explicit NamespaceDetails( const DiskLoc &loc, bool _capped );
+
+ class Extra {
+ long long _next;
+ public:
+ IndexDetails details[NIndexesExtra];
+ private:
+ unsigned reserved2;
+ unsigned reserved3;
+ Extra(const Extra&) { assert(false); }
+ Extra& operator=(const Extra& r) { assert(false); return *this; }
+ public:
+ Extra() { }
+ long ofsFrom(NamespaceDetails *d) {
+ return ((char *) this) - ((char *) d);
+ }
+ void init() { memset(this, 0, sizeof(Extra)); }
+ Extra* next(NamespaceDetails *d) {
+ if( _next == 0 ) return 0;
+ return (Extra*) (((char *) d) + _next);
+ }
+ void setNext(long ofs) { *getDur().writing(&_next) = ofs; }
+ void copy(NamespaceDetails *d, const Extra& e) {
+ memcpy(this, &e, sizeof(Extra));
+ _next = 0;
+ }
+ };
+ Extra* extra() {
+ if( extraOffset == 0 ) return 0;
+ return (Extra *) (((char *) this) + extraOffset);
+ }
+ /* add extra space for indexes when more than 10 */
+ Extra* allocExtra(const char *ns, int nindexessofar);
+ void copyingFrom(const char *thisns, NamespaceDetails *src); // must be called when renaming a NS to fix up extra
+
+ /* called when loaded from disk */
+ void onLoad(const Namespace& k);
+
+ /* dump info on this namespace. for debugging. */
+ void dump(const Namespace& k);
+
+ /* dump info on all extents for this namespace. for debugging. */
+ void dumpExtents();
+
+ private:
+ Extent *theCapExtent() const { return capExtent.ext(); }
+ void advanceCapExtent( const char *ns );
+ DiskLoc __capAlloc(int len);
+ DiskLoc cappedAlloc(const char *ns, int len);
+ DiskLoc &cappedFirstDeletedInCurExtent();
+ bool nextIsInCapExtent( const DiskLoc &dl ) const;
+
+ public:
+ DiskLoc& cappedListOfAllDeletedRecords() { return deletedList[0]; }
+ DiskLoc& cappedLastDelRecLastExtent() { return deletedList[1]; }
+ void cappedDumpDelInfo();
+ bool capLooped() const { return capped && capFirstNewRecord.isValid(); }
+ bool inCapExtent( const DiskLoc &dl ) const;
+ void cappedCheckMigrate();
+ /**
+ * Truncate documents newer than the document at 'end' from the capped
+ * collection. The collection cannot be completely emptied using this
+ * function. An assertion will be thrown if that is attempted.
+ * @param inclusive - Truncate 'end' as well iff true
+ */
+ void cappedTruncateAfter(const char *ns, DiskLoc end, bool inclusive);
+ /** Remove all documents from the capped collection */
+ void emptyCappedCollection(const char *ns);
+
+ /* when a background index build is in progress, we don't count the index in nIndexes until
+ complete, yet need to still use it in _indexRecord() - thus we use this function for that.
+ */
+ int nIndexesBeingBuilt() const { return nIndexes + indexBuildInProgress; }
+
+ /* NOTE: be careful with flags. are we manipulating them in read locks? if so,
+ this isn't thread safe. TODO
+ */
+ enum NamespaceFlags {
+ Flag_HaveIdIndex = 1 << 0 // set when we have _id index (ONLY if ensureIdIndex was called -- 0 if that has never been called)
+ };
+
+ IndexDetails& idx(int idxNo, bool missingExpected = false );
+
+ /** get the IndexDetails for the index currently being built in the background. (there is at most one) */
+ IndexDetails& inProgIdx() {
+ DEV assert(indexBuildInProgress);
+ return idx(nIndexes);
+ }
+
+ class IndexIterator {
+ public:
+ int pos() { return i; } // note this is the next one to come
+ bool more() { return i < n; }
+ IndexDetails& next() { return d->idx(i++); }
+ private:
+ friend class NamespaceDetails;
+ int i, n;
+ NamespaceDetails *d;
+ IndexIterator(NamespaceDetails *_d);
+ };
+
+ IndexIterator ii() { return IndexIterator(this); }
+
+ /* hackish - find our index # in the indexes array */
+ int idxNo(IndexDetails& idx);
+
+ /* multikey indexes are indexes where there are more than one key in the index
+ for a single document. see multikey in wiki.
+ for these, we have to do some dedup work on queries.
+ */
+ bool isMultikey(int i) const { return (multiKeyIndexBits & (((unsigned long long) 1) << i)) != 0; }
+ void setIndexIsMultikey(int i) {
+ dassert( i < NIndexesMax );
+ unsigned long long x = ((unsigned long long) 1) << i;
+ if( multiKeyIndexBits & x ) return;
+ *getDur().writing(&multiKeyIndexBits) |= x;
+ }
+ void clearIndexIsMultikey(int i) {
+ dassert( i < NIndexesMax );
+ unsigned long long x = ((unsigned long long) 1) << i;
+ if( (multiKeyIndexBits & x) == 0 ) return;
+ *getDur().writing(&multiKeyIndexBits) &= ~x;
+ }
+
+ /* add a new index. does not add to system.indexes etc. - just to NamespaceDetails.
+ caller must populate returned object.
+ */
+ IndexDetails& addIndex(const char *thisns, bool resetTransient=true);
+
+ void aboutToDeleteAnIndex() {
+ *getDur().writing(&flags) = flags & ~Flag_HaveIdIndex;
+ }
+
+ /* returns index of the first index in which the field is present. -1 if not present. */
+ int fieldIsIndexed(const char *fieldName);
+
+ /* called to indicate that an update fit in place.
+ fits also called on an insert -- idea there is that if you had some mix and then went to
+ pure inserts it would adapt and PF would trend to 1.0. note update calls insert on a move
+ so there is a double count there that must be adjusted for below.
+
+ todo: greater sophistication could be helpful and added later. for example the absolute
+ size of documents might be considered -- in some cases smaller ones are more likely
+ to grow than larger ones in the same collection? (not always)
+ */
+ void paddingFits() {
+ MONGO_SOMETIMES(sometimes, 4) { // do this on a sampled basis to journal less
+ double x = paddingFactor - 0.001;
+ if ( x >= 1.0 ) {
+ *getDur().writing(&paddingFactor) = x;
+ //getDur().setNoJournal(&paddingFactor, &x, sizeof(x));
+ }
+ }
+ }
+ void paddingTooSmall() {
+ MONGO_SOMETIMES(sometimes, 4) { // do this on a sampled basis to journal less
+ /* the more indexes we have, the higher the cost of a move. so we take that into
+ account herein. note on a move that insert() calls paddingFits(), thus
+ here for example with no inserts and nIndexes = 1 we have
+ .001*4-.001 or a 3:1 ratio to non moves -> 75% nonmoves. insert heavy
+ can pushes this down considerably. further tweaking will be a good idea but
+ this should be an adequate starting point.
+ */
+ double N = min(nIndexes,7) + 3;
+ double x = paddingFactor + (0.001 * N);
+ if ( x <= 2.0 ) {
+ *getDur().writing(&paddingFactor) = x;
+ //getDur().setNoJournal(&paddingFactor, &x, sizeof(x));
+ }
+ }
+ }
+
+ // @return offset in indexes[]
+ int findIndexByName(const char *name);
+
+ // @return offset in indexes[]
+ int findIndexByKeyPattern(const BSONObj& keyPattern);
+
+ void findIndexByType( const string& name , vector<int>& matches ) {
+ IndexIterator i = ii();
+ while ( i.more() ) {
+ if ( i.next().getSpec().getTypeName() == name )
+ matches.push_back( i.pos() - 1 );
+ }
+ }
+
+ /* @return -1 = not found
+ generally id is first index, so not that expensive an operation (assuming present).
+ */
+ int findIdIndex() {
+ IndexIterator i = ii();
+ while( i.more() ) {
+ if( i.next().isIdIndex() )
+ return i.pos()-1;
+ }
+ return -1;
+ }
+
+ bool haveIdIndex() {
+ return (flags & NamespaceDetails::Flag_HaveIdIndex) || findIdIndex() >= 0;
+ }
+
+ /* return which "deleted bucket" for this size object */
+ static int bucket(int n) {
+ for ( int i = 0; i < Buckets; i++ )
+ if ( bucketSizes[i] > n )
+ return i;
+ return Buckets-1;
+ }
+
+ /* predetermine location of the next alloc without actually doing it.
+ if cannot predetermine returns null (so still call alloc() then)
+ */
+ DiskLoc allocWillBeAt(const char *ns, int lenToAlloc);
+
+ /* allocate a new record. lenToAlloc includes headers. */
+ DiskLoc alloc(const char *ns, int lenToAlloc, DiskLoc& extentLoc);
+
+ /* add a given record to the deleted chains for this NS */
+ void addDeletedRec(DeletedRecord *d, DiskLoc dloc);
+ void dumpDeleted(set<DiskLoc> *extents = 0);
+ // Start from firstExtent by default.
+ DiskLoc firstRecord( const DiskLoc &startExtent = DiskLoc() ) const;
+ // Start from lastExtent by default.
+ DiskLoc lastRecord( const DiskLoc &startExtent = DiskLoc() ) const;
+ long long storageSize( int * numExtents = 0 , BSONArrayBuilder * extentInfo = 0 ) const;
+
+ int averageObjectSize() {
+ if ( stats.nrecords == 0 )
+ return 5;
+ return (int) (stats.datasize / stats.nrecords);
+ }
+
+ NamespaceDetails *writingWithoutExtra() {
+ return ( NamespaceDetails* ) getDur().writingPtr( this, sizeof( NamespaceDetails ) );
+ }
+ /** Make all linked Extra objects writeable as well */
+ NamespaceDetails *writingWithExtra();
+
+ private:
+ DiskLoc _alloc(const char *ns, int len);
+ void maybeComplain( const char *ns, int len ) const;
+ DiskLoc __stdAlloc(int len, bool willBeAt);
+ void compact(); // combine adjacent deleted records
+ friend class NamespaceIndex;
+ struct ExtraOld {
+ // note we could use this field for more chaining later, so don't waste it:
+ unsigned long long reserved1;
+ IndexDetails details[NIndexesExtra];
+ unsigned reserved2;
+ unsigned reserved3;
+ };
+ /** Update cappedLastDelRecLastExtent() after capExtent changed in cappedTruncateAfter() */
+ void cappedTruncateLastDelUpdate();
+ BOOST_STATIC_ASSERT( NIndexesMax <= NIndexesBase + NIndexesExtra*2 );
+ BOOST_STATIC_ASSERT( NIndexesMax <= 64 ); // multiKey bits
+ BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::ExtraOld) == 496 );
+ BOOST_STATIC_ASSERT( sizeof(NamespaceDetails::Extra) == 496 );
+ }; // NamespaceDetails
+#pragma pack()
+
+ /* NamespaceDetailsTransient
+
+ these are things we know / compute about a namespace that are transient -- things
+ we don't actually store in the .ns file. so mainly caching of frequently used
+ information.
+
+ CAUTION: Are you maintaining this properly on a collection drop()? A dropdatabase()? Be careful.
+ The current field "allIndexKeys" may have too many keys in it on such an occurrence;
+ as currently used that does not cause anything terrible to happen.
+
+ todo: cleanup code, need abstractions and separation
+ */
+ // todo: multiple db's with the same name (repairDatbase) is not handled herein. that may be
+ // the way to go, if not used by repair, but need some sort of enforcement / asserts.
+ class NamespaceDetailsTransient : boost::noncopyable {
+ BOOST_STATIC_ASSERT( sizeof(NamespaceDetails) == 496 );
+
+ //Database *database;
+ const string _ns;
+ void reset();
+ static std::map< string, shared_ptr< NamespaceDetailsTransient > > _nsdMap;
+
+ NamespaceDetailsTransient(Database*,const char *ns);
+ public:
+ ~NamespaceDetailsTransient();
+ void addedIndex() { assertInWriteLock(); reset(); }
+ void deletedIndex() { assertInWriteLock(); reset(); }
+ /* Drop cached information on all namespaces beginning with the specified prefix.
+ Can be useful as index namespaces share the same start as the regular collection.
+ SLOW - sequential scan of all NamespaceDetailsTransient objects */
+ static void clearForPrefix(const char *prefix);
+ static void eraseForPrefix(const char *prefix);
+
+ /**
+ * @return a cursor interface to the query optimizer. The implementation may
+ * utilize a single query plan or interleave results from multiple query
+ * plans before settling on a single query plan. Note that the schema of
+ * currKey() documents, the matcher(), and the isMultiKey() nature of the
+ * cursor may change over the course of iteration.
+ *
+ * @param query - Query used to select indexes and populate matchers.
+ *
+ * @param order - Required ordering spec for documents produced by this cursor,
+ * empty object default indicates no order requirement. If no index exists that
+ * satisfies the required sort order, an empty shared_ptr is returned.
+ *
+ * @param requireIndex - If true, no unindexed (ie collection scan) cursors are
+ * used to generate the returned cursor. If an unindexed cursor is required, an
+ * assertion is raised by the cursor during iteration.
+ *
+ * @param simpleEqualityMatch - Set to true for certain simple queries -
+ * see queryoptimizer.cpp.
+ *
+ * The returned cursor may @throw inside of advance() or recoverFromYield() in
+ * certain error cases, for example if a capped overrun occurred during a yield.
+ * This indicates that the cursor was unable to perform a complete scan.
+ *
+ * This is a work in progress. Partial list of features not yet implemented:
+ * - covered indexes
+ * - in memory sorting
+ */
+ static shared_ptr<Cursor> getCursor( const char *ns, const BSONObj &query,
+ const BSONObj &order = BSONObj(), bool requireIndex = false,
+ bool *simpleEqualityMatch = 0 );
+
+ /* indexKeys() cache ---------------------------------------------------- */
+ /* assumed to be in write lock for this */
+ private:
+ bool _keysComputed;
+ set<string> _indexKeys;
+ void computeIndexKeys();
+ public:
+ /* get set of index keys for this namespace. handy to quickly check if a given
+ field is indexed (Note it might be a secondary component of a compound index.)
+ */
+ set<string>& indexKeys() {
+ DEV assertInWriteLock();
+ if ( !_keysComputed )
+ computeIndexKeys();
+ return _indexKeys;
+ }
+
+ /* IndexSpec caching */
+ private:
+ map<const IndexDetails*,IndexSpec> _indexSpecs;
+ static SimpleMutex _isMutex;
+ public:
+ const IndexSpec& getIndexSpec( const IndexDetails * details ) {
+ IndexSpec& spec = _indexSpecs[details];
+ if ( ! spec._finishedInit ) {
+ SimpleMutex::scoped_lock lk(_isMutex);
+ if ( ! spec._finishedInit ) {
+ spec.reset( details );
+ assert( spec._finishedInit );
+ }
+ }
+ return spec;
+ }
+
+ /* query cache (for query optimizer) ------------------------------------- */
+ private:
+ int _qcWriteCount;
+ map< QueryPattern, pair< BSONObj, long long > > _qcCache;
+ static NamespaceDetailsTransient& make_inlock(const char *ns);
+ public:
+ static SimpleMutex _qcMutex;
+
+ /* you must be in the qcMutex when calling this.
+ A NamespaceDetailsTransient object will not go out of scope on you if you are
+ d.dbMutex.atLeastReadLocked(), so you do't have to stay locked.
+ Creates a NamespaceDetailsTransient before returning if one DNE.
+ todo: avoid creating too many on erroneous ns queries.
+ */
+ static NamespaceDetailsTransient& get_inlock(const char *ns);
+
+ static NamespaceDetailsTransient& get(const char *ns) {
+ SimpleMutex::scoped_lock lk(_qcMutex);
+ return get_inlock(ns);
+ }
+
+ void clearQueryCache() { // public for unit tests
+ _qcCache.clear();
+ _qcWriteCount = 0;
+ }
+ /* you must notify the cache if you are doing writes, as query plan optimality will change */
+ void notifyOfWriteOp() {
+ if ( _qcCache.empty() )
+ return;
+ if ( ++_qcWriteCount >= 100 )
+ clearQueryCache();
+ }
+ BSONObj indexForPattern( const QueryPattern &pattern ) {
+ return _qcCache[ pattern ].first;
+ }
+ long long nScannedForPattern( const QueryPattern &pattern ) {
+ return _qcCache[ pattern ].second;
+ }
+ void registerIndexForPattern( const QueryPattern &pattern, const BSONObj &indexKey, long long nScanned ) {
+ _qcCache[ pattern ] = make_pair( indexKey, nScanned );
+ }
+
+ }; /* NamespaceDetailsTransient */
+
+ inline NamespaceDetailsTransient& NamespaceDetailsTransient::get_inlock(const char *ns) {
+ std::map< string, shared_ptr< NamespaceDetailsTransient > >::iterator i = _nsdMap.find(ns);
+ if( i != _nsdMap.end() &&
+ i->second.get() ) { // could be null ptr from clearForPrefix
+ return *i->second;
+ }
+ return make_inlock(ns);
+ }
+
+ /* NamespaceIndex is the ".ns" file you see in the data directory. It is the "system catalog"
+ if you will: at least the core parts. (Additional info in system.* collections.)
+ */
+ class NamespaceIndex {
+ public:
+ NamespaceIndex(const string &dir, const string &database) :
+ ht( 0 ), dir_( dir ), database_( database ) {}
+
+ /* returns true if new db will be created if we init lazily */
+ bool exists() const;
+
+ void init() {
+ if( !ht )
+ _init();
+ }
+
+ void add_ns(const char *ns, DiskLoc& loc, bool capped);
+ void add_ns( const char *ns, const NamespaceDetails &details );
+
+ NamespaceDetails* details(const char *ns) {
+ if ( !ht )
+ return 0;
+ Namespace n(ns);
+ NamespaceDetails *d = ht->get(n);
+ if ( d && d->capped )
+ d->cappedCheckMigrate();
+ return d;
+ }
+
+ void kill_ns(const char *ns);
+
+ bool find(const char *ns, DiskLoc& loc) {
+ NamespaceDetails *l = details(ns);
+ if ( l ) {
+ loc = l->firstExtent;
+ return true;
+ }
+ return false;
+ }
+
+ bool allocated() const { return ht != 0; }
+
+ void getNamespaces( list<string>& tofill , bool onlyCollections = true ) const;
+
+ NamespaceDetails::Extra* newExtra(const char *ns, int n, NamespaceDetails *d);
+
+ boost::filesystem::path path() const;
+
+ unsigned long long fileLength() const { return f.length(); }
+
+ private:
+ void _init();
+ void maybeMkdir() const;
+
+ MongoMMF f;
+ HashTable<Namespace,NamespaceDetails> *ht;
+ string dir_;
+ string database_;
+ };
+
+ extern string dbpath; // --dbpath parm
+ extern bool directoryperdb;
+
+ // Rename a namespace within current 'client' db.
+ // (Arguments should include db name)
+ void renameNamespace( const char *from, const char *to );
+
+
+} // namespace mongo
diff --git a/src/mongo/db/namespacestring.h b/src/mongo/db/namespacestring.h
new file mode 100644
index 00000000000..d982c5fff75
--- /dev/null
+++ b/src/mongo/db/namespacestring.h
@@ -0,0 +1,147 @@
+// @file namespacestring.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include <string>
+
+namespace mongo {
+
+ using std::string;
+
+ /* in the mongo source code, "client" means "database". */
+
+ const int MaxDatabaseNameLen = 256; // max str len for the db name, including null char
+
+ /* e.g.
+ NamespaceString ns("acme.orders");
+ cout << ns.coll; // "orders"
+ */
+ class NamespaceString {
+ public:
+ string db;
+ string coll; // note collection names can have periods in them for organizing purposes (e.g. "system.indexes")
+
+ NamespaceString( const char * ns ) { init(ns); }
+ NamespaceString( const string& ns ) { init(ns.c_str()); }
+
+ string ns() const { return db + '.' + coll; }
+
+ bool isSystem() const { return strncmp(coll.c_str(), "system.", 7) == 0; }
+ bool isCommand() const { return coll == "$cmd"; }
+
+ operator string() const { return ns(); }
+
+ bool operator==( const string& nsIn ) const { return nsIn == ns(); }
+ bool operator==( const char* nsIn ) const { return (string)nsIn == ns(); }
+ bool operator==( const NamespaceString& nsIn ) const { return nsIn.db == db && nsIn.coll == coll; }
+
+ bool operator!=( const string& nsIn ) const { return nsIn != ns(); }
+ bool operator!=( const char* nsIn ) const { return (string)nsIn != ns(); }
+ bool operator!=( const NamespaceString& nsIn ) const { return nsIn.db != db || nsIn.coll != coll; }
+
+ string toString() const { return ns(); }
+
+ /**
+ * @return true if ns is 'normal'. $ used for collections holding index data, which do not contain BSON objects in their records.
+ * special case for the local.oplog.$main ns -- naming it as such was a mistake.
+ */
+ static bool normal(const char* ns) {
+ const char *p = strchr(ns, '$');
+ if( p == 0 )
+ return true;
+ return strcmp( ns, "local.oplog.$main" ) == 0;
+ }
+
+ static bool special(const char *ns) {
+ return !normal(ns) || strstr(ns, ".system.");
+ }
+
+ /**
+ * samples:
+ * good:
+ * foo
+ * bar
+ * foo-bar
+ * bad:
+ * foo bar
+ * foo.bar
+ * foo"bar
+ *
+ * @param db - a possible database name
+ * @return if db is an allowed database name
+ */
+ static bool validDBName( const string& db ) {
+ if ( db.size() == 0 || db.size() > 64 )
+ return false;
+ size_t good = strcspn( db.c_str() , "/\\. \"" );
+ return good == db.size();
+ }
+
+ /**
+ * samples:
+ * good:
+ * foo.bar
+ * bad:
+ * foo.
+ *
+ * @param dbcoll - a possible collection name of the form db.coll
+ * @return if db.coll is an allowed collection name
+ */
+ static bool validCollectionName(const char* dbcoll){
+ const char *c = strchr( dbcoll, '.' ) + 1;
+ return normal(dbcoll) && c && *c;
+ }
+
+ private:
+ void init(const char *ns) {
+ const char *p = strchr(ns, '.');
+ if( p == 0 ) return;
+ db = string(ns, p - ns);
+ coll = p + 1;
+ }
+ };
+
+ // "database.a.b.c" -> "database"
+ inline void nsToDatabase(const char *ns, char *database) {
+ const char *p = ns;
+ char *q = database;
+ while ( *p != '.' ) {
+ if ( *p == 0 )
+ break;
+ *q++ = *p++;
+ }
+ *q = 0;
+ if (q-database>=MaxDatabaseNameLen) {
+ log() << "nsToDatabase: ns too long. terminating, buf overrun condition" << endl;
+ dbexit( EXIT_POSSIBLE_CORRUPTION );
+ }
+ }
+ inline string nsToDatabase(const char *ns) {
+ char buf[MaxDatabaseNameLen];
+ nsToDatabase(ns, buf);
+ return buf;
+ }
+ inline string nsToDatabase(const string& ns) {
+ size_t i = ns.find( '.' );
+ if ( i == string::npos )
+ return ns;
+ return ns.substr( 0 , i );
+ }
+
+}
diff --git a/src/mongo/db/nonce.cpp b/src/mongo/db/nonce.cpp
new file mode 100644
index 00000000000..379e88f116d
--- /dev/null
+++ b/src/mongo/db/nonce.cpp
@@ -0,0 +1,95 @@
+// nonce.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "nonce.h"
+#include "../util/time_support.h"
+
+extern int do_md5_test(void);
+
+namespace mongo {
+
+ BOOST_STATIC_ASSERT( sizeof(nonce64) == 8 );
+
+ static Security security; // needs to be static so _initialized is preset to false (see initsafe below)
+
+ Security::Security() {
+ static int n;
+ massert( 10352 , "Security is a singleton class", ++n == 1);
+ init();
+ }
+
+ NOINLINE_DECL void Security::init() {
+ if( _initialized ) return;
+ _initialized = true;
+
+#if defined(__linux__) || defined(__sunos__) || defined(__APPLE__)
+ _devrandom = new ifstream("/dev/urandom", ios::binary|ios::in);
+ massert( 10353 , "can't open dev/urandom", _devrandom->is_open() );
+#elif defined(_WIN32)
+ srand(curTimeMicros()); // perhaps not relevant for rand_s but we might want elsewhere anyway
+#else
+ srandomdev();
+#endif
+
+#ifndef NDEBUG
+ if ( do_md5_test() )
+ massert( 10354 , "md5 unit test fails", false);
+#endif
+ }
+
+ nonce64 Security::__getNonce() {
+ dassert( _initialized );
+ nonce64 n;
+#if defined(__linux__) || defined(__sunos__) || defined(__APPLE__)
+ _devrandom->read((char*)&n, sizeof(n));
+ massert(10355 , "devrandom failed", !_devrandom->fail());
+#elif defined(_WIN32)
+ unsigned a=0, b=0;
+ assert( rand_s(&a) == 0 );
+ assert( rand_s(&b) == 0 );
+ n = (((unsigned long long)a)<<32) | b;
+#else
+ n = (((unsigned long long)random())<<32) | random();
+#endif
+ return n;
+ }
+
+ SimpleMutex nonceMutex("nonce");
+ nonce64 Security::_getNonce() {
+ // not good this is a static as gcc will mutex protect it which costs time
+ SimpleMutex::scoped_lock lk(nonceMutex);
+ if( !_initialized )
+ init();
+ return __getNonce();
+ }
+
+ nonce64 Security::getNonceDuringInit() {
+ // the mutex might not be inited yet. init phase should be one thread anyway (hopefully we don't spawn threads therein)
+ if( !security._initialized )
+ security.init();
+ return security.__getNonce();
+ }
+
+ nonce64 Security::getNonce() {
+ return security._getNonce();
+ }
+
+ // name warns us this might be a little slow (see code above)
+ unsigned goodRandomNumberSlow() { return (unsigned) Security::getNonce(); }
+
+} // namespace mongo
diff --git a/src/mongo/db/nonce.h b/src/mongo/db/nonce.h
new file mode 100644
index 00000000000..d6a147ab1c0
--- /dev/null
+++ b/src/mongo/db/nonce.h
@@ -0,0 +1,36 @@
+// @file nonce.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongo {
+
+ typedef unsigned long long nonce64;
+
+ struct Security {
+ Security();
+ static nonce64 getNonce();
+ static nonce64 getNonceDuringInit(); // use this version during global var constructors
+ private:
+ nonce64 _getNonce();
+ nonce64 __getNonce();
+ ifstream *_devrandom;
+ bool _initialized;
+ void init(); // can call more than once
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/oplog.cpp b/src/mongo/db/oplog.cpp
new file mode 100644
index 00000000000..342f362a28f
--- /dev/null
+++ b/src/mongo/db/oplog.cpp
@@ -0,0 +1,872 @@
+// @file oplog.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "oplog.h"
+#include "repl_block.h"
+#include "repl.h"
+#include "commands.h"
+#include "repl/rs.h"
+#include "stats/counters.h"
+#include "../util/file.h"
+#include "../util/unittest.h"
+#include "queryoptimizer.h"
+#include "ops/update.h"
+#include "ops/delete.h"
+#include "ops/query.h"
+
+namespace mongo {
+
+ void logOpForSharding( const char * opstr , const char * ns , const BSONObj& obj , BSONObj * patt );
+
+ int __findingStartInitialTimeout = 5; // configurable for testing
+
+ // cached copies of these...so don't rename them, drop them, etc.!!!
+ static NamespaceDetails *localOplogMainDetails = 0;
+ static Database *localDB = 0;
+ static NamespaceDetails *rsOplogDetails = 0;
+ void oplogCheckCloseDatabase( Database * db ) {
+ localDB = 0;
+ localOplogMainDetails = 0;
+ rsOplogDetails = 0;
+ resetSlaveCache();
+ }
+
+ static void _logOpUninitialized(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb ) {
+ uassert(13288, "replSet error write op to db before replSet initialized", str::startsWith(ns, "local.") || *opstr == 'n');
+ }
+
+ /** write an op to the oplog that is already built.
+ todo : make _logOpRS() call this so we don't repeat ourself?
+ */
+ void _logOpObjRS(const BSONObj& op) {
+ DEV assertInWriteLock();
+
+ const OpTime ts = op["ts"]._opTime();
+ long long h = op["h"].numberLong();
+
+ {
+ const char *logns = rsoplog;
+ if ( rsOplogDetails == 0 ) {
+ Client::Context ctx( logns , dbpath, false);
+ localDB = ctx.db();
+ assert( localDB );
+ rsOplogDetails = nsdetails(logns);
+ massert(13389, "local.oplog.rs missing. did you drop it? if so restart server", rsOplogDetails);
+ }
+ Client::Context ctx( logns , localDB, false );
+ {
+ int len = op.objsize();
+ Record *r = theDataFileMgr.fast_oplog_insert(rsOplogDetails, logns, len);
+ memcpy(getDur().writingPtr(r->data, len), op.objdata(), len);
+ }
+ /* todo: now() has code to handle clock skew. but if the skew server to server is large it will get unhappy.
+ this code (or code in now() maybe) should be improved.
+ */
+ if( theReplSet ) {
+ if( !(theReplSet->lastOpTimeWritten<ts) ) {
+ log() << "replSet error possible failover clock skew issue? " << theReplSet->lastOpTimeWritten.toString() << ' ' << endl;
+ }
+ theReplSet->lastOpTimeWritten = ts;
+ theReplSet->lastH = h;
+ ctx.getClient()->setLastOp( ts );
+ }
+ }
+ }
+
+ /** given a BSON object, create a new one at dst which is the existing (partial) object
+ with a new object element appended at the end with fieldname "o".
+
+ @param partial already build object with everything except the o member. e.g. something like:
+ { ts:..., ns:..., os2:... }
+ @param o a bson object to be added with fieldname "o"
+ @dst where to put the newly built combined object. e.g. ends up as something like:
+ { ts:..., ns:..., os2:..., o:... }
+ */
+ void append_O_Obj(char *dst, const BSONObj& partial, const BSONObj& o) {
+ const int size1 = partial.objsize() - 1; // less the EOO char
+ const int oOfs = size1+3; // 3 = byte BSONOBJTYPE + byte 'o' + byte \0
+
+ void *p = getDur().writingPtr(dst, oOfs+o.objsize()+1);
+
+ memcpy(p, partial.objdata(), size1);
+
+ // adjust overall bson object size for the o: field
+ *(static_cast<unsigned*>(p)) += o.objsize() + 1/*fieldtype byte*/ + 2/*"o" fieldname*/;
+
+ char *b = static_cast<char *>(p);
+ b += size1;
+ *b++ = (char) Object;
+ *b++ = 'o'; // { o : ... }
+ *b++ = 0; // null terminate "o" fieldname
+ memcpy(b, o.objdata(), o.objsize());
+ b += o.objsize();
+ *b = EOO;
+ }
+
+ // global is safe as we are in write lock. we put the static outside the function to avoid the implicit mutex
+ // the compiler would use if inside the function. the reason this is static is to avoid a malloc/free for this
+ // on every logop call.
+ static BufBuilder logopbufbuilder(8*1024);
+ static void _logOpRS(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb ) {
+ DEV assertInWriteLock();
+
+ if ( strncmp(ns, "local.", 6) == 0 ) {
+ if ( strncmp(ns, "local.slaves", 12) == 0 )
+ resetSlaveCache();
+ return;
+ }
+
+ const OpTime ts = OpTime::now();
+ long long hashNew;
+ if( theReplSet ) {
+ massert(13312, "replSet error : logOp() but not primary?", theReplSet->box.getState().primary());
+ hashNew = (theReplSet->lastH * 131 + ts.asLL()) * 17 + theReplSet->selfId();
+ }
+ else {
+ // must be initiation
+ assert( *ns == 0 );
+ hashNew = 0;
+ }
+
+ /* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
+ instead we do a single copy to the destination position in the memory mapped file.
+ */
+
+ logopbufbuilder.reset();
+ BSONObjBuilder b(logopbufbuilder);
+ b.appendTimestamp("ts", ts.asDate());
+ b.append("h", hashNew);
+ b.append("op", opstr);
+ b.append("ns", ns);
+ if ( bb )
+ b.appendBool("b", *bb);
+ if ( o2 )
+ b.append("o2", *o2);
+ BSONObj partial = b.done();
+ int posz = partial.objsize();
+ int len = posz + obj.objsize() + 1 + 2 /*o:*/;
+
+ Record *r;
+ DEV assert( logNS == 0 );
+ {
+ const char *logns = rsoplog;
+ if ( rsOplogDetails == 0 ) {
+ Client::Context ctx( logns , dbpath, false);
+ localDB = ctx.db();
+ assert( localDB );
+ rsOplogDetails = nsdetails(logns);
+ massert(13347, "local.oplog.rs missing. did you drop it? if so restart server", rsOplogDetails);
+ }
+ Client::Context ctx( logns , localDB, false );
+ r = theDataFileMgr.fast_oplog_insert(rsOplogDetails, logns, len);
+ /* todo: now() has code to handle clock skew. but if the skew server to server is large it will get unhappy.
+ this code (or code in now() maybe) should be improved.
+ */
+ if( theReplSet ) {
+ if( !(theReplSet->lastOpTimeWritten<ts) ) {
+ log() << "replSet ERROR possible failover clock skew issue? " << theReplSet->lastOpTimeWritten << ' ' << ts << rsLog;
+ log() << "replSet " << theReplSet->isPrimary() << rsLog;
+ }
+ theReplSet->lastOpTimeWritten = ts;
+ theReplSet->lastH = hashNew;
+ ctx.getClient()->setLastOp( ts );
+ }
+ }
+
+ append_O_Obj(r->data, partial, obj);
+
+ if ( logLevel >= 6 ) {
+ BSONObj temp(r);
+ log( 6 ) << "logOp:" << temp << endl;
+ }
+ }
+
+ /* we write to local.opload.$main:
+ { ts : ..., op: ..., ns: ..., o: ... }
+ ts: an OpTime timestamp
+ op:
+ "i" insert
+ "u" update
+ "d" delete
+ "c" db cmd
+ "db" declares presence of a database (ns is set to the db name + '.')
+ "n" no op
+ logNS - where to log it. 0/null means "local.oplog.$main".
+ bb:
+ if not null, specifies a boolean to pass along to the other side as b: param.
+ used for "justOne" or "upsert" flags on 'd', 'u'
+ first: true
+ when set, indicates this is the first thing we have logged for this database.
+ thus, the slave does not need to copy down all the data when it sees this.
+
+ note this is used for single collection logging even when --replSet is enabled.
+ */
+ static void _logOpOld(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb ) {
+ DEV assertInWriteLock();
+ static BufBuilder bufbuilder(8*1024);
+
+ if ( strncmp(ns, "local.", 6) == 0 ) {
+ if ( strncmp(ns, "local.slaves", 12) == 0 ) {
+ resetSlaveCache();
+ }
+ return;
+ }
+
+ const OpTime ts = OpTime::now();
+ Client::Context context("",0,false);
+
+ /* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
+ instead we do a single copy to the destination position in the memory mapped file.
+ */
+
+ bufbuilder.reset();
+ BSONObjBuilder b(bufbuilder);
+ b.appendTimestamp("ts", ts.asDate());
+ b.append("op", opstr);
+ b.append("ns", ns);
+ if ( bb )
+ b.appendBool("b", *bb);
+ if ( o2 )
+ b.append("o2", *o2);
+ BSONObj partial = b.done(); // partial is everything except the o:... part.
+
+ int po_sz = partial.objsize();
+ int len = po_sz + obj.objsize() + 1 + 2 /*o:*/;
+
+ Record *r;
+ if( logNS == 0 ) {
+ logNS = "local.oplog.$main";
+ if ( localOplogMainDetails == 0 ) {
+ Client::Context ctx( logNS , dbpath, false);
+ localDB = ctx.db();
+ assert( localDB );
+ localOplogMainDetails = nsdetails(logNS);
+ assert( localOplogMainDetails );
+ }
+ Client::Context ctx( logNS , localDB, false );
+ r = theDataFileMgr.fast_oplog_insert(localOplogMainDetails, logNS, len);
+ }
+ else {
+ Client::Context ctx( logNS, dbpath, false );
+ assert( nsdetails( logNS ) );
+ // first we allocate the space, then we fill it below.
+ r = theDataFileMgr.fast_oplog_insert( nsdetails( logNS ), logNS, len);
+ }
+
+ append_O_Obj(r->data, partial, obj);
+
+ context.getClient()->setLastOp( ts );
+
+ if ( logLevel >= 6 ) {
+ BSONObj temp(r);
+ log( 6 ) << "logging op:" << temp << endl;
+ }
+
+ }
+
+ static void (*_logOp)(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb ) = _logOpOld;
+ void newReplUp() {
+ replSettings.master = true;
+ _logOp = _logOpRS;
+ }
+ void newRepl() {
+ replSettings.master = true;
+ _logOp = _logOpUninitialized;
+ }
+ void oldRepl() { _logOp = _logOpOld; }
+
+ void logKeepalive() {
+ _logOp("n", "", 0, BSONObj(), 0, 0);
+ }
+ void logOpComment(const BSONObj& obj) {
+ _logOp("n", "", 0, obj, 0, 0);
+ }
+ void logOpInitiate(const BSONObj& obj) {
+ _logOpRS("n", "", 0, obj, 0, 0);
+ }
+
+ /*@ @param opstr:
+ c userCreateNS
+ i insert
+ n no-op / keepalive
+ d delete / remove
+ u update
+ */
+ void logOp(const char *opstr, const char *ns, const BSONObj& obj, BSONObj *patt, bool *b) {
+ if ( replSettings.master ) {
+ _logOp(opstr, ns, 0, obj, patt, b);
+ }
+
+ logOpForSharding( opstr , ns , obj , patt );
+ }
+
+ void createOplog() {
+ dblock lk;
+
+ const char * ns = "local.oplog.$main";
+
+ bool rs = !cmdLine._replSet.empty();
+ if( rs )
+ ns = rsoplog;
+
+ Client::Context ctx(ns);
+
+ NamespaceDetails * nsd = nsdetails( ns );
+
+ if ( nsd ) {
+
+ if ( cmdLine.oplogSize != 0 ) {
+ int o = (int)(nsd->storageSize() / ( 1024 * 1024 ) );
+ int n = (int)(cmdLine.oplogSize / ( 1024 * 1024 ) );
+ if ( n != o ) {
+ stringstream ss;
+ ss << "cmdline oplogsize (" << n << ") different than existing (" << o << ") see: http://dochub.mongodb.org/core/increase-oplog";
+ log() << ss.str() << endl;
+ throw UserException( 13257 , ss.str() );
+ }
+ }
+
+ if( rs ) return;
+
+ DBDirectClient c;
+ BSONObj lastOp = c.findOne( ns, Query().sort(reverseNaturalObj) );
+ if ( !lastOp.isEmpty() ) {
+ OpTime::setLast( lastOp[ "ts" ].date() );
+ }
+ return;
+ }
+
+ /* create an oplog collection, if it doesn't yet exist. */
+ BSONObjBuilder b;
+ double sz;
+ if ( cmdLine.oplogSize != 0 )
+ sz = (double)cmdLine.oplogSize;
+ else {
+ /* not specified. pick a default size */
+ sz = 50.0 * 1000 * 1000;
+ if ( sizeof(int *) >= 8 ) {
+#if defined(__APPLE__)
+ // typically these are desktops (dev machines), so keep it smallish
+ sz = (256-64) * 1000 * 1000;
+#else
+ sz = 990.0 * 1000 * 1000;
+ boost::intmax_t free = File::freeSpace(dbpath); //-1 if call not supported.
+ double fivePct = free * 0.05;
+ if ( fivePct > sz )
+ sz = fivePct;
+#endif
+ }
+ }
+
+ log() << "******" << endl;
+ log() << "creating replication oplog of size: " << (int)( sz / ( 1024 * 1024 ) ) << "MB..." << endl;
+
+ b.append("size", sz);
+ b.appendBool("capped", 1);
+ b.appendBool("autoIndexId", false);
+
+ string err;
+ BSONObj o = b.done();
+ userCreateNS(ns, o, err, false);
+ if( !rs )
+ logOp( "n", "", BSONObj() );
+
+ /* sync here so we don't get any surprising lag later when we try to sync */
+ MemoryMappedFile::flushAll(true);
+ log() << "******" << endl;
+ }
+
+ // -------------------------------------
+
+ FindingStartCursor::FindingStartCursor( const QueryPlan & qp ) :
+ _qp( qp ),
+ _findingStart( true ),
+ _findingStartMode()
+ { init(); }
+
+ void FindingStartCursor::next() {
+ if ( !_findingStartCursor || !_findingStartCursor->ok() ) {
+ _findingStart = false;
+ _c = _qp.newCursor(); // on error, start from beginning
+ destroyClientCursor();
+ return;
+ }
+ switch( _findingStartMode ) {
+ // Initial mode: scan backwards from end of collection
+ case Initial: {
+ if ( !_matcher->matchesCurrent( _findingStartCursor->c() ) ) {
+ _findingStart = false; // found first record out of query range, so scan normally
+ _c = _qp.newCursor( _findingStartCursor->currLoc() );
+ destroyClientCursor();
+ return;
+ }
+ _findingStartCursor->advance();
+ RARELY {
+ if ( _findingStartTimer.seconds() >= __findingStartInitialTimeout ) {
+ // If we've scanned enough, switch to find extent mode.
+ createClientCursor( extentFirstLoc( _findingStartCursor->currLoc() ) );
+ _findingStartMode = FindExtent;
+ return;
+ }
+ }
+ return;
+ }
+ // FindExtent mode: moving backwards through extents, check first
+ // document of each extent.
+ case FindExtent: {
+ if ( !_matcher->matchesCurrent( _findingStartCursor->c() ) ) {
+ _findingStartMode = InExtent;
+ return;
+ }
+ DiskLoc prev = prevExtentFirstLoc( _findingStartCursor->currLoc() );
+ if ( prev.isNull() ) { // hit beginning, so start scanning from here
+ createClientCursor();
+ _findingStartMode = InExtent;
+ return;
+ }
+ // There might be a more efficient implementation than creating new cursor & client cursor each time,
+ // not worrying about that for now
+ createClientCursor( prev );
+ return;
+ }
+ // InExtent mode: once an extent is chosen, find starting doc in the extent.
+ case InExtent: {
+ if ( _matcher->matchesCurrent( _findingStartCursor->c() ) ) {
+ _findingStart = false; // found first record in query range, so scan normally
+ _c = _qp.newCursor( _findingStartCursor->currLoc() );
+ destroyClientCursor();
+ return;
+ }
+ _findingStartCursor->advance();
+ return;
+ }
+ default: {
+ massert( 14038, "invalid _findingStartMode", false );
+ }
+ }
+ }
+
+ DiskLoc FindingStartCursor::extentFirstLoc( const DiskLoc &rec ) {
+ Extent *e = rec.rec()->myExtent( rec );
+ if ( !_qp.nsd()->capLooped() || ( e->myLoc != _qp.nsd()->capExtent ) )
+ return e->firstRecord;
+ // Likely we are on the fresh side of capExtent, so return first fresh record.
+ // If we are on the stale side of capExtent, then the collection is small and it
+ // doesn't matter if we start the extent scan with capFirstNewRecord.
+ return _qp.nsd()->capFirstNewRecord;
+ }
+
+ void wassertExtentNonempty( const Extent *e ) {
+ // TODO ensure this requirement is clearly enforced, or fix.
+ wassert( !e->firstRecord.isNull() );
+ }
+
+ DiskLoc FindingStartCursor::prevExtentFirstLoc( const DiskLoc &rec ) {
+ Extent *e = rec.rec()->myExtent( rec );
+ if ( _qp.nsd()->capLooped() ) {
+ if ( e->xprev.isNull() ) {
+ e = _qp.nsd()->lastExtent.ext();
+ }
+ else {
+ e = e->xprev.ext();
+ }
+ if ( e->myLoc != _qp.nsd()->capExtent ) {
+ wassertExtentNonempty( e );
+ return e->firstRecord;
+ }
+ }
+ else {
+ if ( !e->xprev.isNull() ) {
+ e = e->xprev.ext();
+ wassertExtentNonempty( e );
+ return e->firstRecord;
+ }
+ }
+ return DiskLoc(); // reached beginning of collection
+ }
+
+ void FindingStartCursor::createClientCursor( const DiskLoc &startLoc ) {
+ shared_ptr<Cursor> c = _qp.newCursor( startLoc );
+ _findingStartCursor.reset( new ClientCursor(QueryOption_NoCursorTimeout, c, _qp.ns()) );
+ }
+
+ bool FindingStartCursor::firstDocMatchesOrEmpty() const {
+ shared_ptr<Cursor> c = _qp.newCursor();
+ return !c->ok() || _matcher->matchesCurrent( c.get() );
+ }
+
+ void FindingStartCursor::init() {
+ BSONElement tsElt = _qp.originalQuery()[ "ts" ];
+ massert( 13044, "no ts field in query", !tsElt.eoo() );
+ BSONObjBuilder b;
+ b.append( tsElt );
+ BSONObj tsQuery = b.obj();
+ _matcher.reset(new CoveredIndexMatcher(tsQuery, _qp.indexKey()));
+ if ( firstDocMatchesOrEmpty() ) {
+ _c = _qp.newCursor();
+ _findingStart = false;
+ return;
+ }
+ // Use a ClientCursor here so we can release db mutex while scanning
+ // oplog (can take quite a while with large oplogs).
+ shared_ptr<Cursor> c = _qp.newReverseCursor();
+ _findingStartCursor.reset( new ClientCursor(QueryOption_NoCursorTimeout, c, _qp.ns(), BSONObj()) );
+ _findingStartTimer.reset();
+ _findingStartMode = Initial;
+ }
+
+ // -------------------------------------
+
+ struct TestOpTime : public UnitTest {
+ void run() {
+ OpTime t;
+ for ( int i = 0; i < 10; i++ ) {
+ OpTime s = OpTime::now_inlock();
+ assert( s != t );
+ t = s;
+ }
+ OpTime q = t;
+ assert( q == t );
+ assert( !(q != t) );
+ }
+ } testoptime;
+
+ int _dummy_z;
+
+ void pretouchN(vector<BSONObj>& v, unsigned a, unsigned b) {
+ DEV assert( !d.dbMutex.isWriteLocked() );
+
+ Client *c = currentClient.get();
+ if( c == 0 ) {
+ Client::initThread("pretouchN");
+ c = &cc();
+ }
+
+ readlock lk("");
+ for( unsigned i = a; i <= b; i++ ) {
+ const BSONObj& op = v[i];
+ const char *which = "o";
+ const char *opType = op.getStringField("op");
+ if ( *opType == 'i' )
+ ;
+ else if( *opType == 'u' )
+ which = "o2";
+ else
+ continue;
+ /* todo : other operations */
+
+ try {
+ BSONObj o = op.getObjectField(which);
+ BSONElement _id;
+ if( o.getObjectID(_id) ) {
+ const char *ns = op.getStringField("ns");
+ BSONObjBuilder b;
+ b.append(_id);
+ BSONObj result;
+ Client::Context ctx( ns );
+ if( Helpers::findById(cc(), ns, b.done(), result) )
+ _dummy_z += result.objsize(); // touch
+ }
+ }
+ catch( DBException& e ) {
+ log() << "ignoring assertion in pretouchN() " << a << ' ' << b << ' ' << i << ' ' << e.toString() << endl;
+ }
+ }
+ }
+
+ void pretouchOperation(const BSONObj& op) {
+
+ if( d.dbMutex.isWriteLocked() )
+ return; // no point pretouching if write locked. not sure if this will ever fire, but just in case.
+
+ const char *which = "o";
+ const char *opType = op.getStringField("op");
+ if ( *opType == 'i' )
+ ;
+ else if( *opType == 'u' )
+ which = "o2";
+ else
+ return;
+ /* todo : other operations */
+
+ try {
+ BSONObj o = op.getObjectField(which);
+ BSONElement _id;
+ if( o.getObjectID(_id) ) {
+ const char *ns = op.getStringField("ns");
+ BSONObjBuilder b;
+ b.append(_id);
+ BSONObj result;
+ readlock lk(ns);
+ Client::Context ctx( ns );
+ if( Helpers::findById(cc(), ns, b.done(), result) )
+ _dummy_z += result.objsize(); // touch
+ }
+ }
+ catch( DBException& ) {
+ log() << "ignoring assertion in pretouchOperation()" << endl;
+ }
+ }
+
+ BSONObj Sync::getMissingDoc(const BSONObj& o) {
+ OplogReader missingObjReader;
+
+ uassert(15916, str::stream() << "Can no longer connect to initial sync source: " << hn, missingObjReader.connect(hn));
+
+ const char *ns = o.getStringField("ns");
+ // might be more than just _id in the update criteria
+ BSONObj query = BSONObjBuilder().append(o.getObjectField("o2")["_id"]).obj();
+ BSONObj missingObj;
+ try {
+ missingObj = missingObjReader.findOne(ns, query);
+ } catch(DBException& e) {
+ log() << "replication assertion fetching missing object: " << e.what() << endl;
+ throw;
+ }
+
+ return missingObj;
+ }
+
+ bool Sync::shouldRetry(const BSONObj& o) {
+ // we don't have the object yet, which is possible on initial sync. get it.
+ log() << "replication info adding missing object" << endl; // rare enough we can log
+
+ BSONObj missingObj = getMissingDoc(o);
+
+ if( missingObj.isEmpty() ) {
+ log() << "replication missing object not found on source. presumably deleted later in oplog" << endl;
+ log() << "replication o2: " << o.getObjectField("o2").toString() << endl;
+ log() << "replication o firstfield: " << o.getObjectField("o").firstElementFieldName() << endl;
+
+ return false;
+ }
+ else {
+ const char *ns = o.getStringField("ns");
+ Client::Context ctx(ns);
+ DiskLoc d = theDataFileMgr.insert(ns, (void*) missingObj.objdata(), missingObj.objsize());
+ uassert(15917, "Got bad disk location when attempting to insert", !d.isNull());
+
+ return true;
+ }
+ }
+
+ /** @param fromRepl false if from ApplyOpsCmd
+ @return true if was and update should have happened and the document DNE. see replset initial sync code.
+ */
+ bool applyOperation_inlock(const BSONObj& op , bool fromRepl ) {
+ assertInWriteLock();
+ LOG(6) << "applying op: " << op << endl;
+ bool failedUpdate = false;
+
+ OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;
+
+ const char *names[] = { "o", "ns", "op", "b" };
+ BSONElement fields[4];
+ op.getFields(4, names, fields);
+
+ BSONObj o;
+ if( fields[0].isABSONObj() )
+ o = fields[0].embeddedObject();
+
+ const char *ns = fields[1].valuestrsafe();
+
+ // operation type -- see logOp() comments for types
+ const char *opType = fields[2].valuestrsafe();
+
+ if ( *opType == 'i' ) {
+ opCounters->gotInsert();
+
+ const char *p = strchr(ns, '.');
+ if ( p && strcmp(p, ".system.indexes") == 0 ) {
+ // updates aren't allowed for indexes -- so we will do a regular insert. if index already
+ // exists, that is ok.
+ theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
+ }
+ else {
+ // do upserts for inserts as we might get replayed more than once
+ OpDebug debug;
+ BSONElement _id;
+ if( !o.getObjectID(_id) ) {
+ /* No _id. This will be very slow. */
+ Timer t;
+ updateObjects(ns, o, o, true, false, false, debug );
+ if( t.millis() >= 2 ) {
+ RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
+ }
+ }
+ else {
+ /* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */
+ RARELY ensureHaveIdIndex(ns); // otherwise updates will be slow
+
+ /* todo : it may be better to do an insert here, and then catch the dup key exception and do update
+ then. very few upserts will not be inserts...
+ */
+ BSONObjBuilder b;
+ b.append(_id);
+ updateObjects(ns, o, b.done(), true, false, false , debug );
+ }
+ }
+ }
+ else if ( *opType == 'u' ) {
+ opCounters->gotUpdate();
+ // dm do we create this for a capped collection?
+ // - if not, updates would be slow
+ // - but if were by id would be slow on primary too so maybe ok
+ // - if on primary was by another key and there are other indexes, this could be very bad w/out an index
+ // - if do create, odd to have on secondary but not primary. also can cause secondary to block for
+ // quite a while on creation.
+ RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
+ OpDebug debug;
+ BSONObj updateCriteria = op.getObjectField("o2");
+ bool upsert = fields[3].booleanSafe();
+ UpdateResult ur = updateObjects(ns, o, updateCriteria, upsert, /*multi*/ false, /*logop*/ false , debug );
+ if( ur.num == 0 ) {
+ if( ur.mod ) {
+ if( updateCriteria.nFields() == 1 ) {
+ // was a simple { _id : ... } update criteria
+ failedUpdate = true;
+ // todo: probably should assert in these failedUpdate cases if not in initialSync
+ }
+ // need to check to see if it isn't present so we can set failedUpdate correctly.
+ // note that adds some overhead for this extra check in some cases, such as an updateCriteria
+ // of the form
+ // { _id:..., { x : {$size:...} }
+ // thus this is not ideal.
+ else {
+ NamespaceDetails *nsd = nsdetails(ns);
+
+ if (nsd == NULL ||
+ (nsd->findIdIndex() >= 0 && Helpers::findById(nsd, updateCriteria).isNull()) ||
+ // capped collections won't have an _id index
+ (nsd->findIdIndex() < 0 && Helpers::findOne(ns, updateCriteria, false).isNull())) {
+ failedUpdate = true;
+ }
+
+ // Otherwise, it's present; zero objects were updated because of additional specifiers
+ // in the query for idempotence
+ }
+ }
+ else {
+ // this could happen benignly on an oplog duplicate replay of an upsert
+ // (because we are idempotent),
+ // if an regular non-mod update fails the item is (presumably) missing.
+ if( !upsert ) {
+ failedUpdate = true;
+ }
+ }
+ }
+ }
+ else if ( *opType == 'd' ) {
+ opCounters->gotDelete();
+ if ( opType[1] == 0 )
+ deleteObjects(ns, o, /*justOne*/ fields[3].booleanSafe());
+ else
+ assert( opType[1] == 'b' ); // "db" advertisement
+ }
+ else if ( *opType == 'c' ) {
+ opCounters->gotCommand();
+ BufBuilder bb;
+ BSONObjBuilder ob;
+ _runCommands(ns, o, bb, ob, true, 0);
+ }
+ else if ( *opType == 'n' ) {
+ // no op
+ }
+ else {
+ throw MsgAssertionException( 14825 , ErrorMsg("error in applyOperation : unknown opType ", *opType) );
+ }
+ return failedUpdate;
+ }
+
+ class ApplyOpsCmd : public Command {
+ public:
+ virtual bool slaveOk() const { return false; }
+ virtual LockType locktype() const { return WRITE; }
+ ApplyOpsCmd() : Command( "applyOps" ) {}
+ virtual void help( stringstream &help ) const {
+ help << "internal (sharding)\n{ applyOps : [ ] , preCondition : [ { ns : ... , q : ... , res : ... } ] }";
+ }
+ virtual bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+
+ if ( cmdObj.firstElement().type() != Array ) {
+ errmsg = "ops has to be an array";
+ return false;
+ }
+
+ BSONObj ops = cmdObj.firstElement().Obj();
+
+ {
+ // check input
+ BSONObjIterator i( ops );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.type() == Object )
+ continue;
+ errmsg = "op not an object: ";
+ errmsg += e.fieldName();
+ return false;
+ }
+ }
+
+ if ( cmdObj["preCondition"].type() == Array ) {
+ BSONObjIterator i( cmdObj["preCondition"].Obj() );
+ while ( i.more() ) {
+ BSONObj f = i.next().Obj();
+
+ BSONObj realres = db.findOne( f["ns"].String() , f["q"].Obj() );
+
+ Matcher m( f["res"].Obj() );
+ if ( ! m.matches( realres ) ) {
+ result.append( "got" , realres );
+ result.append( "whatFailed" , f );
+ errmsg = "pre-condition failed";
+ return false;
+ }
+ }
+ }
+
+ // apply
+ int num = 0;
+ BSONObjIterator i( ops );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ // todo SERVER-4259 ?
+ applyOperation_inlock( e.Obj() , false );
+ num++;
+ }
+
+ result.append( "applied" , num );
+
+ if ( ! fromRepl ) {
+ // We want this applied atomically on slaves
+ // so we re-wrap without the pre-condition for speed
+
+ string tempNS = str::stream() << dbname << ".$cmd";
+
+ logOp( "c" , tempNS.c_str() , cmdObj.firstElement().wrap() );
+ }
+
+ return true;
+ }
+
+ DBDirectClient db;
+
+ } applyOpsCmd;
+
+}
diff --git a/src/mongo/db/oplog.h b/src/mongo/db/oplog.h
new file mode 100644
index 00000000000..6c1644fe3ab
--- /dev/null
+++ b/src/mongo/db/oplog.h
@@ -0,0 +1,149 @@
+// oplog.h - writing to and reading from oplog
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/*
+
+ local.oplog.$main is the default
+*/
+
+#pragma once
+
+#include "pdfile.h"
+#include "db.h"
+#include "dbhelpers.h"
+#include "clientcursor.h"
+#include "../client/dbclient.h"
+#include "../util/optime.h"
+#include "../util/timer.h"
+
+namespace mongo {
+
+ void createOplog();
+
+ void _logOpObjRS(const BSONObj& op);
+
+ /** Write operation to the log (local.oplog.$main)
+
+ @param opstr
+ "i" insert
+ "u" update
+ "d" delete
+ "c" db cmd
+ "n" no-op
+ "db" declares presence of a database (ns is set to the db name + '.')
+
+ See _logOp() in oplog.cpp for more details.
+ */
+ void logOp(const char *opstr, const char *ns, const BSONObj& obj, BSONObj *patt = 0, bool *b = 0);
+
+ void logKeepalive();
+
+ /** puts obj in the oplog as a comment (a no-op). Just for diags.
+ convention is
+ { msg : "text", ... }
+ */
+ void logOpComment(const BSONObj& obj);
+
+ void oplogCheckCloseDatabase( Database * db );
+
+ extern int __findingStartInitialTimeout; // configurable for testing
+
+ class QueryPlan;
+
+ /** Implements an optimized procedure for finding the first op in the oplog. */
+ class FindingStartCursor {
+ public:
+
+ /**
+ * The cursor will attempt to find the first op in the oplog matching the
+ * 'ts' field of the qp's query.
+ */
+ FindingStartCursor( const QueryPlan & qp );
+
+ /** @return true if the first matching op in the oplog has been found. */
+ bool done() const { return !_findingStart; }
+
+ /** @return cursor pointing to the first matching op, if done(). */
+ shared_ptr<Cursor> cursor() { verify( 14835, done() ); return _c; }
+
+ /** Iterate the cursor, to continue trying to find matching op. */
+ void next();
+
+ /** Yield cursor, if not done(). */
+ bool prepareToYield() {
+ if ( _findingStartCursor ) {
+ return _findingStartCursor->prepareToYield( _yieldData );
+ }
+ return false;
+ }
+
+ /** Recover from cursor yield. */
+ void recoverFromYield() {
+ if ( _findingStartCursor ) {
+ if ( !ClientCursor::recoverFromYield( _yieldData ) ) {
+ _findingStartCursor.reset( 0 );
+ msgassertedNoTrace( 15889, "FindingStartCursor::recoverFromYield() failed to recover" );
+ }
+ }
+ }
+ private:
+ enum FindingStartMode { Initial, FindExtent, InExtent };
+ const QueryPlan &_qp;
+ bool _findingStart;
+ FindingStartMode _findingStartMode;
+ auto_ptr< CoveredIndexMatcher > _matcher;
+ Timer _findingStartTimer;
+ ClientCursor::CleanupPointer _findingStartCursor;
+ shared_ptr<Cursor> _c;
+ ClientCursor::YieldData _yieldData;
+ DiskLoc extentFirstLoc( const DiskLoc &rec );
+
+ DiskLoc prevExtentFirstLoc( const DiskLoc &rec );
+ void createClientCursor( const DiskLoc &startLoc = DiskLoc() );
+ void destroyClientCursor() {
+ _findingStartCursor.reset( 0 );
+ }
+ void init();
+ bool firstDocMatchesOrEmpty() const;
+ };
+
+ class Sync {
+ protected:
+ string hn;
+ public:
+ Sync(const string& hostname) : hn(hostname) {}
+ virtual ~Sync() {}
+ virtual BSONObj getMissingDoc(const BSONObj& o);
+
+ /**
+ * If applyOperation_inlock should be called again after an update fails.
+ */
+ virtual bool shouldRetry(const BSONObj& o);
+ };
+
+ void pretouchOperation(const BSONObj& op);
+ void pretouchN(vector<BSONObj>&, unsigned a, unsigned b);
+
+ /**
+ * take an op and apply locally
+ * used for applying from an oplog
+ * @param fromRepl really from replication or for testing/internal/command/etc...
+ * Returns if the op was an update that could not be applied (true on failure)
+ */
+ bool applyOperation_inlock(const BSONObj& op , bool fromRepl = true );
+}
diff --git a/src/mongo/db/oplogreader.h b/src/mongo/db/oplogreader.h
new file mode 100644
index 00000000000..6efd1469c01
--- /dev/null
+++ b/src/mongo/db/oplogreader.h
@@ -0,0 +1,121 @@
+/** @file oplogreader.h */
+
+#pragma once
+
+#include "../client/dbclient.h"
+#include "../client/constants.h"
+#include "dbhelpers.h"
+
+namespace mongo {
+
+ /* started abstracting out the querying of the primary/master's oplog
+ still fairly awkward but a start.
+ */
+ class OplogReader {
+ shared_ptr<DBClientConnection> _conn;
+ shared_ptr<DBClientCursor> cursor;
+ public:
+ OplogReader() { }
+ ~OplogReader() { }
+ void resetCursor() { cursor.reset(); }
+ void resetConnection() {
+ cursor.reset();
+ _conn.reset();
+ }
+ DBClientConnection* conn() { return _conn.get(); }
+ BSONObj findOne(const char *ns, const Query& q) {
+ return conn()->findOne(ns, q, 0, QueryOption_SlaveOk);
+ }
+ BSONObj getLastOp(const char *ns) {
+ return findOne(ns, Query().sort(reverseNaturalObj));
+ }
+
+ /* ok to call if already connected */
+ bool connect(string hostname);
+
+ bool connect(const BSONObj& rid, const int from, const string& to);
+
+ void tailCheck() {
+ if( cursor.get() && cursor->isDead() ) {
+ log() << "repl: old cursor isDead, will initiate a new one" << endl;
+ resetCursor();
+ }
+ }
+
+ bool haveCursor() { return cursor.get() != 0; }
+
+ /** this is ok but commented out as when used one should consider if QueryOption_OplogReplay
+ is needed; if not fine, but if so, need to change.
+ *//*
+ void query(const char *ns, const BSONObj& query) {
+ assert( !haveCursor() );
+ cursor.reset( _conn->query(ns, query, 0, 0, 0, QueryOption_SlaveOk).release() );
+ }*/
+
+ /** this can be used; it is commented out as it does not indicate
+ QueryOption_OplogReplay and that is likely important. could be uncommented
+ just need to add that.
+ */
+ /*
+ void queryGTE(const char *ns, OpTime t) {
+ BSONObjBuilder q;
+ q.appendDate("$gte", t.asDate());
+ BSONObjBuilder q2;
+ q2.append("ts", q.done());
+ query(ns, q2.done());
+ }
+ */
+
+ void tailingQuery(const char *ns, const BSONObj& query, const BSONObj* fields=0) {
+ assert( !haveCursor() );
+ log(2) << "repl: " << ns << ".find(" << query.toString() << ')' << endl;
+ cursor.reset( _conn->query( ns, query, 0, 0, fields,
+ QueryOption_CursorTailable | QueryOption_SlaveOk | QueryOption_OplogReplay |
+ /* TODO: slaveOk maybe shouldn't use? */
+ QueryOption_AwaitData
+ ).release() );
+ }
+
+ void tailingQueryGTE(const char *ns, OpTime t, const BSONObj* fields=0) {
+ BSONObjBuilder q;
+ q.appendDate("$gte", t.asDate());
+ BSONObjBuilder query;
+ query.append("ts", q.done());
+ tailingQuery(ns, query.done(), fields);
+ }
+
+ /* Do a tailing query, but only send the ts field back. */
+ void ghostQueryGTE(const char *ns, OpTime t) {
+ const BSONObj fields = BSON("ts" << 1 << "_id" << 0);
+ return tailingQueryGTE(ns, t, &fields);
+ }
+
+ bool more() {
+ uassert( 15910, "Doesn't have cursor for reading oplog", cursor.get() );
+ return cursor->more();
+ }
+
+ bool moreInCurrentBatch() {
+ uassert( 15911, "Doesn't have cursor for reading oplog", cursor.get() );
+ return cursor->moreInCurrentBatch();
+ }
+
+ /* old mongod's can't do the await flag... */
+ bool awaitCapable() {
+ return cursor->hasResultFlag(ResultFlag_AwaitCapable);
+ }
+
+ void peek(vector<BSONObj>& v, int n) {
+ if( cursor.get() )
+ cursor->peek(v,n);
+ }
+ BSONObj nextSafe() { return cursor->nextSafe(); }
+ BSONObj next() { return cursor->next(); }
+ void putBack(BSONObj op) { cursor->putBack(op); }
+
+ private:
+ bool commonConnect(const string& hostName);
+ bool passthroughHandshake(const BSONObj& rid, const int f);
+ };
+
+}
diff --git a/src/mongo/db/ops/count.cpp b/src/mongo/db/ops/count.cpp
new file mode 100644
index 00000000000..3c183596b9d
--- /dev/null
+++ b/src/mongo/db/ops/count.cpp
@@ -0,0 +1,103 @@
+// count.cpp
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "count.h"
+
+#include "../client.h"
+#include "../clientcursor.h"
+#include "../namespace.h"
+#include "../queryutil.h"
+
+namespace mongo {
+
+ long long runCount( const char *ns, const BSONObj &cmd, string &err ) {
+ Client::Context cx(ns);
+ NamespaceDetails *d = nsdetails( ns );
+ if ( !d ) {
+ err = "ns missing";
+ return -1;
+ }
+ BSONObj query = cmd.getObjectField("query");
+
+ // count of all objects
+ if ( query.isEmpty() ) {
+ return applySkipLimit( d->stats.nrecords , cmd );
+ }
+
+ string exceptionInfo;
+ long long count = 0;
+ long long skip = cmd["skip"].numberLong();
+ long long limit = cmd["limit"].numberLong();
+ bool simpleEqualityMatch;
+ shared_ptr<Cursor> cursor = NamespaceDetailsTransient::getCursor( ns, query, BSONObj(), false, &simpleEqualityMatch );
+ ClientCursor::CleanupPointer ccPointer;
+ ElapsedTracker timeToStartYielding( 256, 20 );
+ try {
+ while( cursor->ok() ) {
+ if ( !ccPointer ) {
+ if ( timeToStartYielding.intervalHasElapsed() ) {
+ // Lazily construct a ClientCursor, avoiding a performance regression when scanning a very
+ // small number of documents.
+ ccPointer.reset( new ClientCursor( QueryOption_NoCursorTimeout, cursor, ns ) );
+ }
+ }
+ else if ( !ccPointer->yieldSometimes( simpleEqualityMatch ? ClientCursor::DontNeed : ClientCursor::MaybeCovered ) ||
+ !cursor->ok() ) {
+ break;
+ }
+
+ // With simple equality matching there is no need to use the matcher because the bounds
+ // are enforced by the FieldRangeVectorIterator and only key fields have constraints. There
+ // is no need to do key deduping because an exact value is specified in the query for all key
+ // fields and duplicate keys are not allowed per document.
+ // NOTE In the distant past we used a min/max bounded BtreeCursor with a shallow
+ // equality comparison to check for matches in the simple match case. That may be
+ // more performant, but I don't think we've measured the performance.
+ if ( simpleEqualityMatch ||
+ ( cursor->currentMatches() && !cursor->getsetdup( cursor->currLoc() ) ) ) {
+
+ if ( skip > 0 ) {
+ --skip;
+ }
+ else {
+ ++count;
+ if ( limit > 0 && count >= limit ) {
+ break;
+ }
+ }
+ }
+ cursor->advance();
+ }
+ ccPointer.reset();
+ return count;
+
+ } catch ( const DBException &e ) {
+ exceptionInfo = e.toString();
+ } catch ( const std::exception &e ) {
+ exceptionInfo = e.what();
+ } catch ( ... ) {
+ exceptionInfo = "unknown exception";
+ }
+ // Historically we have returned zero in many count assertion cases - see SERVER-2291.
+ log() << "Count with ns: " << ns << " and query: " << query
+ << " failed with exception: " << exceptionInfo
+ << endl;
+ return 0;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/count.h b/src/mongo/db/ops/count.h
new file mode 100644
index 00000000000..807741e1253
--- /dev/null
+++ b/src/mongo/db/ops/count.h
@@ -0,0 +1,30 @@
+// count.h
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "../jsobj.h"
+#include "../diskloc.h"
+
+namespace mongo {
+
+ /**
+ * { count: "collectionname"[, query: <query>] }
+ * @return -1 on ns does not exist error and other errors, 0 on other errors, otherwise the match count.
+ */
+ long long runCount(const char *ns, const BSONObj& cmd, string& err);
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp
new file mode 100644
index 00000000000..e33611c151e
--- /dev/null
+++ b/src/mongo/db/ops/delete.cpp
@@ -0,0 +1,158 @@
+// delete.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "delete.h"
+#include "../queryoptimizer.h"
+#include "../oplog.h"
+
+namespace mongo {
+
+ /* ns: namespace, e.g. <database>.<collection>
+ pattern: the "where" clause / criteria
+ justOne: stop after 1 match
+ god: allow access to system namespaces, and don't yield
+ */
+ long long deleteObjects(const char *ns, BSONObj pattern, bool justOneOrig, bool logop, bool god, RemoveSaver * rs ) {
+ if( !god ) {
+ if ( strstr(ns, ".system.") ) {
+ /* note a delete from system.indexes would corrupt the db
+ if done here, as there are pointers into those objects in
+ NamespaceDetails.
+ */
+ uassert(12050, "cannot delete from system namespace", legalClientSystemNS( ns , true ) );
+ }
+ if ( strchr( ns , '$' ) ) {
+ log() << "cannot delete from collection with reserved $ in name: " << ns << endl;
+ uassert( 10100 , "cannot delete from collection with reserved $ in name", strchr(ns, '$') == 0 );
+ }
+ }
+
+ {
+ NamespaceDetails *d = nsdetails( ns );
+ if ( ! d )
+ return 0;
+ uassert( 10101 , "can't remove from a capped collection" , ! d->capped );
+ }
+
+ long long nDeleted = 0;
+
+ shared_ptr< Cursor > creal = NamespaceDetailsTransient::getCursor( ns, pattern, BSONObj(), false, 0 );
+
+ if( !creal->ok() )
+ return nDeleted;
+
+ shared_ptr< Cursor > cPtr = creal;
+ auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout, cPtr, ns) );
+ cc->setDoingDeletes( true );
+
+ CursorId id = cc->cursorid();
+
+ bool justOne = justOneOrig;
+ bool canYield = !god && !(creal->matcher() && creal->matcher()->docMatcher().atomic());
+
+ do {
+ // TODO: we can generalize this I believe
+ //
+ bool willNeedRecord = (creal->matcher() && creal->matcher()->needRecord()) || pattern.isEmpty() || isSimpleIdQuery( pattern );
+ if ( ! willNeedRecord ) {
+ // TODO: this is a total hack right now
+ // check if the index full encompasses query
+
+ if ( pattern.nFields() == 1 &&
+ str::equals( pattern.firstElement().fieldName() , creal->indexKeyPattern().firstElement().fieldName() ) )
+ willNeedRecord = true;
+ }
+
+ if ( canYield && ! cc->yieldSometimes( willNeedRecord ? ClientCursor::WillNeed : ClientCursor::MaybeCovered ) ) {
+ cc.release(); // has already been deleted elsewhere
+ // TODO should we assert or something?
+ break;
+ }
+ if ( !cc->ok() ) {
+ break; // if we yielded, could have hit the end
+ }
+
+ // this way we can avoid calling updateLocation() every time (expensive)
+ // as well as some other nuances handled
+ cc->setDoingDeletes( true );
+
+ DiskLoc rloc = cc->currLoc();
+ BSONObj key = cc->currKey();
+
+ bool match = creal->currentMatches();
+ bool dup = cc->c()->getsetdup(rloc);
+
+ if ( ! cc->advance() )
+ justOne = true;
+
+ if ( ! match )
+ continue;
+
+ assert( !dup ); // can't be a dup, we deleted it!
+
+ if ( !justOne ) {
+ /* NOTE: this is SLOW. this is not good, noteLocation() was designed to be called across getMore
+ blocks. here we might call millions of times which would be bad.
+ */
+ cc->c()->prepareToTouchEarlierIterate();
+ }
+
+ if ( logop ) {
+ BSONElement e;
+ if( BSONObj( rloc.rec() ).getObjectID( e ) ) {
+ BSONObjBuilder b;
+ b.append( e );
+ bool replJustOne = true;
+ logOp( "d", ns, b.done(), 0, &replJustOne );
+ }
+ else {
+ problem() << "deleted object without id, not logging" << endl;
+ }
+ }
+
+ if ( rs )
+ rs->goingToDelete( rloc.obj() /*cc->c->current()*/ );
+
+ theDataFileMgr.deleteRecord(ns, rloc.rec(), rloc);
+ nDeleted++;
+ if ( justOne ) {
+ break;
+ }
+ cc->c()->recoverFromTouchingEarlierIterate();
+
+ if( !god )
+ getDur().commitIfNeeded();
+
+ if( debug && god && nDeleted == 100 )
+ log() << "warning high number of deletes with god=true which could use significant memory" << endl;
+ }
+ while ( cc->ok() );
+
+ if ( cc.get() && ClientCursor::find( id , false ) == 0 ) {
+ // TODO: remove this and the id declaration above if this doesn't trigger
+ // if it does, then i'm very confused (ERH 06/2011)
+ error() << "this should be impossible" << endl;
+ printStackTrace();
+ cc.release();
+ }
+
+ return nDeleted;
+ }
+
+}
diff --git a/src/mongo/db/ops/delete.h b/src/mongo/db/ops/delete.h
new file mode 100644
index 00000000000..a74b7a664bc
--- /dev/null
+++ b/src/mongo/db/ops/delete.h
@@ -0,0 +1,33 @@
+// delete.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../../pch.h"
+#include "../jsobj.h"
+#include "../clientcursor.h"
+
+namespace mongo {
+
+ class RemoveSaver;
+
+ // If justOne is true, deletedId is set to the id of the deleted object.
+ long long deleteObjects(const char *ns, BSONObj pattern, bool justOne, bool logop = false, bool god=false, RemoveSaver * rs=0);
+
+
+}
diff --git a/src/mongo/db/ops/query.cpp b/src/mongo/db/ops/query.cpp
new file mode 100644
index 00000000000..15e3ed9053f
--- /dev/null
+++ b/src/mongo/db/ops/query.cpp
@@ -0,0 +1,870 @@
+// query.cpp
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "query.h"
+#include "../pdfile.h"
+#include "../jsobjmanipulator.h"
+#include "../../bson/util/builder.h"
+#include <time.h>
+#include "../introspect.h"
+#include "../btree.h"
+#include "../../util/lruishmap.h"
+#include "../json.h"
+#include "../repl.h"
+#include "../replutil.h"
+#include "../scanandorder.h"
+#include "../security.h"
+#include "../curop-inl.h"
+#include "../commands.h"
+#include "../queryoptimizer.h"
+#include "../lasterror.h"
+#include "../../s/d_logic.h"
+#include "../repl_block.h"
+#include "../../server.h"
+#include "../d_concurrency.h"
+
+namespace mongo {
+
+ /* We cut off further objects once we cross this threshold; thus, you might get
+ a little bit more than this, it is a threshold rather than a limit.
+ */
+ const int MaxBytesToReturnToClientAtOnce = 4 * 1024 * 1024;
+
+ //ns->query->DiskLoc
+// LRUishMap<BSONObj,DiskLoc,5> lrutest(123);
+
+ extern bool useCursors;
+ extern bool useHints;
+
+ bool runCommands(const char *ns, BSONObj& jsobj, CurOp& curop, BufBuilder &b, BSONObjBuilder& anObjBuilder, bool fromRepl, int queryOptions) {
+ try {
+ return _runCommands(ns, jsobj, b, anObjBuilder, fromRepl, queryOptions);
+ }
+ catch( SendStaleConfigException& ){
+ throw;
+ }
+ catch ( AssertionException& e ) {
+ assert( e.getCode() != SendStaleConfigCode && e.getCode() != RecvStaleConfigCode );
+
+ e.getInfo().append( anObjBuilder , "assertion" , "assertionCode" );
+ curop.debug().exceptionInfo = e.getInfo();
+ }
+ anObjBuilder.append("errmsg", "db assertion failure");
+ anObjBuilder.append("ok", 0.0);
+ BSONObj x = anObjBuilder.done();
+ b.appendBuf((void*) x.objdata(), x.objsize());
+ return true;
+ }
+
+
+ BSONObj id_obj = fromjson("{\"_id\":1}");
+ BSONObj empty_obj = fromjson("{}");
+
+
+ //int dump = 0;
+
+ /* empty result for error conditions */
+ QueryResult* emptyMoreResult(long long cursorid) {
+ BufBuilder b(32768);
+ b.skip(sizeof(QueryResult));
+ QueryResult *qr = (QueryResult *) b.buf();
+ qr->cursorId = 0; // 0 indicates no more data to retrieve.
+ qr->startingFrom = 0;
+ qr->len = b.len();
+ qr->setOperation(opReply);
+ qr->initializeResultFlags();
+ qr->nReturned = 0;
+ b.decouple();
+ return qr;
+ }
+
+ QueryResult* processGetMore(const char *ns, int ntoreturn, long long cursorid , CurOp& curop, int pass, bool& exhaust ) {
+ exhaust = false;
+ ClientCursor::Pointer p(cursorid);
+ ClientCursor *cc = p.c();
+
+ int bufSize = 512 + sizeof( QueryResult ) + MaxBytesToReturnToClientAtOnce;
+
+ BufBuilder b( bufSize );
+ b.skip(sizeof(QueryResult));
+ int resultFlags = ResultFlag_AwaitCapable;
+ int start = 0;
+ int n = 0;
+
+ if ( unlikely(!cc) ) {
+ LOGSOME << "getMore: cursorid not found " << ns << " " << cursorid << endl;
+ cursorid = 0;
+ resultFlags = ResultFlag_CursorNotFound;
+ }
+ else {
+ // check for spoofing of the ns such that it does not match the one originally there for the cursor
+ uassert(14833, "auth error", str::equals(ns, cc->ns().c_str()));
+
+ if ( pass == 0 )
+ cc->updateSlaveLocation( curop );
+
+ int queryOptions = cc->queryOptions();
+
+ curop.debug().query = cc->query();
+
+ start = cc->pos();
+ Cursor *c = cc->c();
+ c->checkLocation();
+ DiskLoc last;
+
+ scoped_ptr<Projection::KeyOnly> keyFieldsOnly;
+ if ( cc->modifiedKeys() == false && cc->isMultiKey() == false && cc->fields )
+ keyFieldsOnly.reset( cc->fields->checkKey( cc->indexKeyPattern() ) );
+
+ // This manager may be stale, but it's the state of chunking when the cursor was created.
+ ShardChunkManagerPtr manager = cc->getChunkManager();
+
+ while ( 1 ) {
+ if ( !c->ok() ) {
+ if ( c->tailable() ) {
+ /* when a tailable cursor hits "EOF", ok() goes false, and current() is null. however
+ advance() can still be retries as a reactivation attempt. when there is new data, it will
+ return true. that's what we are doing here.
+ */
+ if ( c->advance() )
+ continue;
+
+ if( n == 0 && (queryOptions & QueryOption_AwaitData) && pass < 1000 ) {
+ return 0;
+ }
+
+ break;
+ }
+ p.release();
+ bool ok = ClientCursor::erase(cursorid);
+ assert(ok);
+ cursorid = 0;
+ cc = 0;
+ break;
+ }
+
+ // in some cases (clone collection) there won't be a matcher
+ if ( c->matcher() && !c->matcher()->matchesCurrent( c ) ) {
+ }
+ else if ( manager && ! manager->belongsToMe( cc ) ){
+ LOG(2) << "cursor skipping document in un-owned chunk: " << c->current() << endl;
+ }
+ else {
+ if( c->getsetdup(c->currLoc()) ) {
+ //out() << " but it's a dup \n";
+ }
+ else {
+ last = c->currLoc();
+ n++;
+
+ if ( keyFieldsOnly ) {
+ fillQueryResultFromObj(b, 0, keyFieldsOnly->hydrate( c->currKey() ) );
+ }
+ else {
+ BSONObj js = c->current();
+ // show disk loc should be part of the main query, not in an $or clause, so this should be ok
+ fillQueryResultFromObj(b, cc->fields.get(), js, ( cc->pq.get() && cc->pq->showDiskLoc() ? &last : 0));
+ }
+
+ if ( ( ntoreturn && n >= ntoreturn ) || b.len() > MaxBytesToReturnToClientAtOnce ) {
+ c->advance();
+ cc->incPos( n );
+ break;
+ }
+ }
+ }
+ c->advance();
+
+ if ( ! cc->yieldSometimes( ClientCursor::MaybeCovered ) ) {
+ ClientCursor::erase(cursorid);
+ cursorid = 0;
+ cc = 0;
+ p.deleted();
+ break;
+ }
+ }
+
+ if ( cc ) {
+ cc->updateLocation();
+ cc->mayUpgradeStorage();
+ cc->storeOpForSlave( last );
+ exhaust = cc->queryOptions() & QueryOption_Exhaust;
+ }
+ }
+
+ QueryResult *qr = (QueryResult *) b.buf();
+ qr->len = b.len();
+ qr->setOperation(opReply);
+ qr->_resultFlags() = resultFlags;
+ qr->cursorId = cursorid;
+ qr->startingFrom = start;
+ qr->nReturned = n;
+ b.decouple();
+
+ return qr;
+ }
+
+ class ExplainBuilder {
+ // Note: by default we filter out allPlans and oldPlan in the shell's
+ // explain() function. If you add any recursive structures, make sure to
+ // edit the JS to make sure everything gets filtered.
+ public:
+ ExplainBuilder() : _i() {}
+ void ensureStartScan() {
+ if ( !_a.get() ) {
+ _a.reset( new BSONArrayBuilder() );
+ }
+ }
+ void noteCursor( Cursor *c ) {
+ BSONObjBuilder b( _a->subobjStart() );
+ b << "cursor" << c->toString() << "indexBounds" << c->prettyIndexBounds();
+ b.done();
+ }
+ void noteScan( Cursor *c, long long nscanned, long long nscannedObjects, int n, bool scanAndOrder,
+ int millis, bool hint, int nYields , int nChunkSkips , bool indexOnly ) {
+ if ( _i == 1 ) {
+ _c.reset( new BSONArrayBuilder() );
+ *_c << _b->obj();
+ }
+ if ( _i == 0 ) {
+ _b.reset( new BSONObjBuilder() );
+ }
+ else {
+ _b.reset( new BSONObjBuilder( _c->subobjStart() ) );
+ }
+ *_b << "cursor" << c->toString();
+ _b->appendNumber( "nscanned", nscanned );
+ _b->appendNumber( "nscannedObjects", nscannedObjects );
+ *_b << "n" << n;
+
+ if ( scanAndOrder )
+ *_b << "scanAndOrder" << true;
+
+ *_b << "millis" << millis;
+
+ *_b << "nYields" << nYields;
+ *_b << "nChunkSkips" << nChunkSkips;
+ *_b << "isMultiKey" << c->isMultiKey();
+ *_b << "indexOnly" << indexOnly;
+
+ *_b << "indexBounds" << c->prettyIndexBounds();
+
+ c->explainDetails( *_b );
+
+ if ( !hint ) {
+ *_b << "allPlans" << _a->arr();
+ }
+ if ( _i != 0 ) {
+ _b->done();
+ }
+ _a.reset( 0 );
+ ++_i;
+ }
+ BSONObj finishWithSuffix( long long nscanned, long long nscannedObjects, int n, int millis, const BSONObj &suffix ) {
+ if ( _i > 1 ) {
+ BSONObjBuilder b;
+ b << "clauses" << _c->arr();
+ b.appendNumber( "nscanned", nscanned );
+ b.appendNumber( "nscannedObjects", nscannedObjects );
+ b << "n" << n;
+ b << "millis" << millis;
+ b.appendElements( suffix );
+ return b.obj();
+ }
+ else {
+ stringstream host;
+ host << getHostNameCached() << ":" << cmdLine.port;
+ *_b << "server" << host.str();
+ _b->appendElements( suffix );
+ return _b->obj();
+ }
+ }
+ private:
+ auto_ptr< BSONArrayBuilder > _a;
+ auto_ptr< BSONObjBuilder > _b;
+ auto_ptr< BSONArrayBuilder > _c;
+ int _i;
+ };
+
+ // Implements database 'query' requests using the query optimizer's QueryOp interface
+ class UserQueryOp : public QueryOp {
+ public:
+
+ UserQueryOp( const ParsedQuery& pq, Message &response, ExplainBuilder &eb, CurOp &curop ) :
+ _buf( 32768 ) , // TODO be smarter here
+ _pq( pq ) ,
+ _ntoskip( pq.getSkip() ) ,
+ _nscanned(0), _oldNscanned(0), _nscannedObjects(0), _oldNscannedObjects(0),
+ _n(0),
+ _oldN(0),
+ _nYields(),
+ _nChunkSkips(),
+ _chunkManager( shardingState.needShardChunkManager(pq.ns()) ?
+ shardingState.getShardChunkManager(pq.ns()) : ShardChunkManagerPtr() ),
+ _inMemSort(false),
+ _capped(false),
+ _saveClientCursor(false),
+ _wouldSaveClientCursor(false),
+ _oplogReplay( pq.hasOption( QueryOption_OplogReplay) ),
+ _response( response ),
+ _eb( eb ),
+ _curop( curop ),
+ _yieldRecoveryFailed()
+ {}
+
+ virtual void _init() {
+ // only need to put the QueryResult fields there if we're building the first buffer in the message.
+ if ( _response.empty() ) {
+ _buf.skip( sizeof( QueryResult ) );
+ }
+
+ if ( _oplogReplay ) {
+ _findingStartCursor.reset( new FindingStartCursor( qp() ) );
+ _capped = true;
+ }
+ else {
+ _c = qp().newCursor( DiskLoc() , _pq.getNumToReturn() + _pq.getSkip() );
+ _capped = _c->capped();
+
+ // setup check for if we can only use index to extract
+ if ( _c->modifiedKeys() == false && _c->isMultiKey() == false && _pq.getFields() ) {
+ _keyFieldsOnly.reset( _pq.getFields()->checkKey( _c->indexKeyPattern() ) );
+ }
+ }
+
+ if ( qp().scanAndOrderRequired() ) {
+ _inMemSort = true;
+ _so.reset( new ScanAndOrder( _pq.getSkip() , _pq.getNumToReturn() , _pq.getOrder(), qp().multikeyFrs() ) );
+ }
+
+ if ( _pq.isExplain() ) {
+ _eb.noteCursor( _c.get() );
+ }
+
+ }
+
+ virtual bool prepareToYield() {
+ if ( _findingStartCursor.get() ) {
+ return _findingStartCursor->prepareToYield();
+ }
+ else {
+ if ( _c && !_cc ) {
+ _cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , _c , _pq.ns() ) );
+ }
+ if ( _cc ) {
+ return _cc->prepareToYield( _yieldData );
+ }
+ }
+ // no active cursor - ok to yield
+ return true;
+ }
+
+ virtual void recoverFromYield() {
+ _nYields++;
+
+ if ( _findingStartCursor.get() ) {
+ _findingStartCursor->recoverFromYield();
+ }
+ else if ( _cc && !ClientCursor::recoverFromYield( _yieldData ) ) {
+ _yieldRecoveryFailed = true;
+ _c.reset();
+ _cc.reset();
+ _so.reset();
+
+ if ( _capped ) {
+ msgassertedNoTrace( 13338, str::stream() << "capped cursor overrun during query: " << _pq.ns() );
+ }
+ else if ( qp().mustAssertOnYieldFailure() ) {
+ msgassertedNoTrace( 15890, str::stream() << "UserQueryOp::recoverFromYield() failed to recover: " << _pq.ns() );
+ }
+ else {
+ // we don't fail query since we're fine with returning partial data if collection dropped
+
+ // todo: this is wrong. the cursor could be gone if closeAllDatabases command just ran
+ }
+
+ }
+ }
+
+ virtual long long nscanned() {
+ if ( _findingStartCursor.get() ) {
+ return 0; // should only be one query plan, so value doesn't really matter.
+ }
+ return _c.get() ? _c->nscanned() : _nscanned;
+ }
+
+ virtual void next() {
+ if ( _findingStartCursor.get() ) {
+ if ( !_findingStartCursor->done() ) {
+ _findingStartCursor->next();
+ }
+ if ( _findingStartCursor->done() ) {
+ _c = _findingStartCursor->cursor();
+ _findingStartCursor.reset( 0 );
+ }
+ _capped = true;
+ return;
+ }
+
+ if ( !_c || !_c->ok() ) {
+ finish( false );
+ return;
+ }
+
+ bool mayCreateCursor1 = _pq.wantMore() && ! _inMemSort && _pq.getNumToReturn() != 1 && useCursors;
+
+ if( 0 ) {
+ cout << "SCANNING this: " << this << " key: " << _c->currKey() << " obj: " << _c->current() << endl;
+ }
+
+ if ( _pq.getMaxScan() && _nscanned >= _pq.getMaxScan() ) {
+ finish( true ); //?
+ return;
+ }
+
+ _nscanned = _c->nscanned();
+ if ( !matcher( _c )->matchesCurrent(_c.get() , &_details ) ) {
+ // not a match, continue onward
+ if ( _details._loadedObject )
+ _nscannedObjects++;
+ }
+ else {
+ _nscannedObjects++;
+ DiskLoc cl = _c->currLoc();
+ if ( _chunkManager && ! _chunkManager->belongsToMe( cl.obj() ) ) { // TODO: should make this covered at some point
+ _nChunkSkips++;
+ // log() << "TEMP skipping un-owned chunk: " << _c->current() << endl;
+ }
+ else if( _c->getsetdup(cl) ) {
+ // dup
+ }
+ else {
+ // got a match.
+
+ if ( _inMemSort ) {
+ // note: no cursors for non-indexed, ordered results. results must be fairly small.
+ _so->add( _pq.returnKey() ? _c->currKey() : _c->current(), _pq.showDiskLoc() ? &cl : 0 );
+ }
+ else if ( _ntoskip > 0 ) {
+ _ntoskip--;
+ }
+ else {
+ if ( _pq.isExplain() ) {
+ _n++;
+ if ( n() >= _pq.getNumToReturn() && !_pq.wantMore() ) {
+ // .limit() was used, show just that much.
+ finish( true ); //?
+ return;
+ }
+ }
+ else {
+
+ if ( _pq.returnKey() ) {
+ BSONObjBuilder bb( _buf );
+ bb.appendKeys( _c->indexKeyPattern() , _c->currKey() );
+ bb.done();
+ }
+ else if ( _keyFieldsOnly ) {
+ fillQueryResultFromObj( _buf , 0 , _keyFieldsOnly->hydrate( _c->currKey() ) );
+ }
+ else {
+ BSONObj js = _c->current();
+ assert( js.isValid() );
+
+ if ( _oplogReplay ) {
+ BSONElement e = js["ts"];
+ if ( e.type() == Date || e.type() == Timestamp )
+ _slaveReadTill = e._opTime();
+ }
+
+ fillQueryResultFromObj( _buf , _pq.getFields() , js , (_pq.showDiskLoc() ? &cl : 0));
+ }
+ _n++;
+ if ( ! _c->supportGetMore() ) {
+ if ( _pq.enough( n() ) || _buf.len() >= MaxBytesToReturnToClientAtOnce ) {
+ finish( true );
+ return;
+ }
+ }
+ else if ( _pq.enoughForFirstBatch( n() , _buf.len() ) ) {
+ /* if only 1 requested, no cursor saved for efficiency...we assume it is findOne() */
+ if ( mayCreateCursor1 ) {
+ _wouldSaveClientCursor = true;
+ if ( _c->advance() ) {
+ // more...so save a cursor
+ _saveClientCursor = true;
+ }
+ }
+ finish( true );
+ return;
+ }
+ }
+ }
+ }
+ }
+ _c->advance();
+ }
+
+ // this plan won, so set data for response broadly
+ void finish( bool stop ) {
+ massert( 13638, "client cursor dropped during explain query yield", !_pq.isExplain() || _c.get() );
+
+ if ( _pq.isExplain() ) {
+ _n = _inMemSort ? _so->size() : _n;
+ }
+ else if ( _inMemSort ) {
+ if( _so.get() )
+ _so->fill( _buf, _pq.getFields() , _n );
+ }
+
+ if ( _c.get() ) {
+ _nscanned = _c->nscanned();
+
+ if ( _pq.hasOption( QueryOption_CursorTailable ) && _pq.getNumToReturn() != 1 )
+ _c->setTailable();
+
+ // If the tailing request succeeded.
+ if ( _c->tailable() )
+ _saveClientCursor = true;
+ }
+
+ if ( _pq.isExplain() ) {
+ _eb.noteScan( _c.get(), _nscanned, _nscannedObjects, _n, scanAndOrderRequired(),
+ _curop.elapsedMillis(), useHints && !_pq.getHint().eoo(), _nYields ,
+ _nChunkSkips, _keyFieldsOnly.get() > 0 );
+ }
+ else {
+ if ( _buf.len() ) {
+ _response.appendData( _buf.buf(), _buf.len() );
+ _buf.decouple();
+ }
+ }
+
+ if ( stop ) {
+ setStop();
+ }
+ else {
+ setComplete();
+ }
+
+ }
+
+ void finishExplain( const BSONObj &suffix ) {
+ BSONObj obj = _eb.finishWithSuffix( totalNscanned(), nscannedObjects(), n(), _curop.elapsedMillis(), suffix);
+ fillQueryResultFromObj(_buf, 0, obj);
+ _n = 1;
+ _oldN = 0;
+ _response.appendData( _buf.buf(), _buf.len() );
+ _buf.decouple();
+ }
+
+ virtual bool mayRecordPlan() const {
+ return !_yieldRecoveryFailed && ( _pq.getNumToReturn() != 1 ) && ( ( _n > _pq.getNumToReturn() / 2 ) || ( complete() && !stopRequested() ) );
+ }
+
+ virtual QueryOp *_createChild() const {
+ if ( _pq.isExplain() ) {
+ _eb.ensureStartScan();
+ }
+ UserQueryOp *ret = new UserQueryOp( _pq, _response, _eb, _curop );
+ ret->_oldN = n();
+ ret->_oldNscanned = totalNscanned();
+ ret->_oldNscannedObjects = nscannedObjects();
+ ret->_ntoskip = _ntoskip;
+ return ret;
+ }
+
+ bool scanAndOrderRequired() const { return _inMemSort; }
+ shared_ptr<Cursor> cursor() { return _c; }
+ int n() const { return _oldN + _n; }
+ long long totalNscanned() const { return _nscanned + _oldNscanned; }
+ long long nscannedObjects() const { return _nscannedObjects + _oldNscannedObjects; }
+ bool saveClientCursor() const { return _saveClientCursor; }
+ bool wouldSaveClientCursor() const { return _wouldSaveClientCursor; }
+
+ void finishForOplogReplay( ClientCursor * cc ) {
+ if ( _oplogReplay && ! _slaveReadTill.isNull() )
+ cc->slaveReadTill( _slaveReadTill );
+
+ }
+
+ ShardChunkManagerPtr getChunkManager(){ return _chunkManager; }
+
+ private:
+ BufBuilder _buf;
+ const ParsedQuery& _pq;
+ scoped_ptr<Projection::KeyOnly> _keyFieldsOnly;
+
+ long long _ntoskip;
+ long long _nscanned;
+ long long _oldNscanned;
+ long long _nscannedObjects;
+ long long _oldNscannedObjects;
+ int _n; // found so far
+ int _oldN;
+
+ int _nYields;
+ int _nChunkSkips;
+
+ MatchDetails _details;
+
+ ShardChunkManagerPtr _chunkManager;
+
+ bool _inMemSort;
+ auto_ptr< ScanAndOrder > _so;
+
+ shared_ptr<Cursor> _c;
+ ClientCursor::CleanupPointer _cc;
+ ClientCursor::YieldData _yieldData;
+
+ bool _capped;
+ bool _saveClientCursor;
+ bool _wouldSaveClientCursor;
+ bool _oplogReplay;
+ auto_ptr< FindingStartCursor > _findingStartCursor;
+
+ Message &_response;
+ ExplainBuilder &_eb;
+ CurOp &_curop;
+ OpTime _slaveReadTill;
+
+ bool _yieldRecoveryFailed;
+ };
+
+ /* run a query -- includes checking for and running a Command \
+ @return points to ns if exhaust mode. 0=normal mode
+ */
+ const char *runQuery(Message& m, QueryMessage& q, CurOp& curop, Message &result) {
+ shared_ptr<ParsedQuery> pq_shared( new ParsedQuery(q) );
+ ParsedQuery& pq( *pq_shared );
+ int ntoskip = q.ntoskip;
+ BSONObj jsobj = q.query;
+ int queryOptions = q.queryOptions;
+ const char *ns = q.ns;
+
+ if( logLevel >= 2 )
+ log() << "runQuery called " << ns << " " << jsobj << endl;
+
+ curop.debug().ns = ns;
+ curop.debug().ntoreturn = pq.getNumToReturn();
+ curop.setQuery(jsobj);
+
+ if ( pq.couldBeCommand() ) {
+ BufBuilder bb;
+ bb.skip(sizeof(QueryResult));
+ BSONObjBuilder cmdResBuf;
+ if ( runCommands(ns, jsobj, curop, bb, cmdResBuf, false, queryOptions) ) {
+ curop.debug().iscommand = true;
+ curop.debug().query = jsobj;
+ curop.markCommand();
+
+ auto_ptr< QueryResult > qr;
+ qr.reset( (QueryResult *) bb.buf() );
+ bb.decouple();
+ qr->setResultFlagsToOk();
+ qr->len = bb.len();
+ curop.debug().responseLength = bb.len();
+ qr->setOperation(opReply);
+ qr->cursorId = 0;
+ qr->startingFrom = 0;
+ qr->nReturned = 1;
+ result.setData( qr.release(), true );
+ }
+ else {
+ uasserted(13530, "bad or malformed command request?");
+ }
+ return 0;
+ }
+
+ /* --- regular query --- */
+
+ int n = 0;
+ BSONElement hint = useHints ? pq.getHint() : BSONElement();
+ bool explain = pq.isExplain();
+ bool snapshot = pq.isSnapshot();
+ BSONObj order = pq.getOrder();
+ BSONObj query = pq.getFilter();
+
+ /* The ElemIter will not be happy if this isn't really an object. So throw exception
+ here when that is true.
+ (Which may indicate bad data from client.)
+ */
+ if ( query.objsize() == 0 ) {
+ out() << "Bad query object?\n jsobj:";
+ out() << jsobj.toString() << "\n query:";
+ out() << query.toString() << endl;
+ uassert( 10110 , "bad query object", false);
+ }
+
+ Client::ReadContext ctx( ns , dbpath ); // read locks
+
+ replVerifyReadsOk(pq);
+
+ if ( pq.hasOption( QueryOption_CursorTailable ) ) {
+ NamespaceDetails *d = nsdetails( ns );
+ uassert( 13051, "tailable cursor requested on non capped collection", d && d->capped );
+ const BSONObj nat1 = BSON( "$natural" << 1 );
+ if ( order.isEmpty() ) {
+ order = nat1;
+ }
+ else {
+ uassert( 13052, "only {$natural:1} order allowed for tailable cursor", order == nat1 );
+ }
+ }
+
+ BSONObj snapshotHint; // put here to keep the data in scope
+ if( snapshot ) {
+ NamespaceDetails *d = nsdetails(ns);
+ if ( d ) {
+ int i = d->findIdIndex();
+ if( i < 0 ) {
+ if ( strstr( ns , ".system." ) == 0 )
+ log() << "warning: no _id index on $snapshot query, ns:" << ns << endl;
+ }
+ else {
+ /* [dm] the name of an _id index tends to vary, so we build the hint the hard way here.
+ probably need a better way to specify "use the _id index" as a hint. if someone is
+ in the query optimizer please fix this then!
+ */
+ BSONObjBuilder b;
+ b.append("$hint", d->idx(i).indexName());
+ snapshotHint = b.obj();
+ hint = snapshotHint.firstElement();
+ }
+ }
+ }
+
+ if ( ! (explain || pq.showDiskLoc()) && isSimpleIdQuery( query ) && !pq.hasOption( QueryOption_CursorTailable ) ) {
+
+ bool nsFound = false;
+ bool indexFound = false;
+
+ BSONObj resObject;
+ Client& c = cc();
+ bool found = Helpers::findById( c, ns , query , resObject , &nsFound , &indexFound );
+ if ( nsFound == false || indexFound == true ) {
+ BufBuilder bb(sizeof(QueryResult)+resObject.objsize()+32);
+ bb.skip(sizeof(QueryResult));
+
+ curop.debug().idhack = true;
+ if ( found ) {
+ n = 1;
+ fillQueryResultFromObj( bb , pq.getFields() , resObject );
+ }
+ auto_ptr< QueryResult > qr;
+ qr.reset( (QueryResult *) bb.buf() );
+ bb.decouple();
+ qr->setResultFlagsToOk();
+ qr->len = bb.len();
+
+ curop.debug().responseLength = bb.len();
+ qr->setOperation(opReply);
+ qr->cursorId = 0;
+ qr->startingFrom = 0;
+ qr->nReturned = n;
+ result.setData( qr.release(), true );
+ return NULL;
+ }
+ }
+
+ // regular, not QO bypass query
+
+ BSONObj oldPlan;
+ if ( explain && ! pq.hasIndexSpecifier() ) {
+ MultiPlanScanner mps( ns, query, order );
+ if ( mps.usingCachedPlan() )
+ oldPlan = mps.oldExplain();
+ }
+ auto_ptr< MultiPlanScanner > mps( new MultiPlanScanner( ns, query, order, &hint, !explain, pq.getMin(), pq.getMax(), false, true ) );
+ BSONObj explainSuffix;
+ if ( explain ) {
+ BSONObjBuilder bb;
+ if ( !oldPlan.isEmpty() )
+ bb.append( "oldPlan", oldPlan.firstElement().embeddedObject().firstElement().embeddedObject() );
+ explainSuffix = bb.obj();
+ }
+ ExplainBuilder eb;
+ UserQueryOp original( pq, result, eb, curop );
+ shared_ptr< UserQueryOp > o = mps->runOp( original );
+ UserQueryOp &dqo = *o;
+ if ( ! dqo.complete() )
+ throw MsgAssertionException( dqo.exception() );
+ if ( explain ) {
+ dqo.finishExplain( explainSuffix );
+ }
+ n = dqo.n();
+ long long nscanned = dqo.totalNscanned();
+ curop.debug().scanAndOrder = dqo.scanAndOrderRequired();
+
+ shared_ptr<Cursor> cursor = dqo.cursor();
+ if( logLevel >= 5 )
+ log() << " used cursor: " << cursor.get() << endl;
+ long long cursorid = 0;
+ const char * exhaust = 0;
+ if ( dqo.saveClientCursor() || ( dqo.wouldSaveClientCursor() && mps->mayRunMore() ) ) {
+ ClientCursor *cc;
+ bool moreClauses = mps->mayRunMore();
+ if ( moreClauses ) {
+ // this MultiCursor will use a dumb NoOp to advance(), so no need to specify mayYield
+ shared_ptr< Cursor > multi( new MultiCursor( mps, cursor, dqo.matcher( cursor ), dqo ) );
+ cc = new ClientCursor(queryOptions, multi, ns, jsobj.getOwned());
+ }
+ else {
+ if( ! cursor->matcher() ) cursor->setMatcher( dqo.matcher( cursor ) );
+ cc = new ClientCursor( queryOptions, cursor, ns, jsobj.getOwned() );
+ }
+
+ cc->setChunkManager( dqo.getChunkManager() );
+
+ cursorid = cc->cursorid();
+ DEV tlog(2) << "query has more, cursorid: " << cursorid << endl;
+ cc->setPos( n );
+ cc->pq = pq_shared;
+ cc->fields = pq.getFieldPtr();
+ cc->originalMessage = m;
+ cc->updateLocation();
+ if ( !cc->ok() && cc->c()->tailable() )
+ DEV tlog() << "query has no more but tailable, cursorid: " << cursorid << endl;
+ if( queryOptions & QueryOption_Exhaust ) {
+ exhaust = ns;
+ curop.debug().exhaust = true;
+ }
+ dqo.finishForOplogReplay(cc);
+ }
+
+ QueryResult *qr = (QueryResult *) result.header();
+ qr->cursorId = cursorid;
+ qr->setResultFlagsToOk();
+ // qr->len is updated automatically by appendData()
+ curop.debug().responseLength = qr->len;
+ qr->setOperation(opReply);
+ qr->startingFrom = 0;
+ qr->nReturned = n;
+
+ int duration = curop.elapsedMillis();
+ bool dbprofile = curop.shouldDBProfile( duration );
+ if ( dbprofile || duration >= cmdLine.slowMS ) {
+ curop.debug().nscanned = (int) nscanned;
+ curop.debug().ntoskip = ntoskip;
+ }
+ curop.debug().nreturned = n;
+ return exhaust;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/ops/query.h b/src/mongo/db/ops/query.h
new file mode 100644
index 00000000000..3324b75fe16
--- /dev/null
+++ b/src/mongo/db/ops/query.h
@@ -0,0 +1,248 @@
+// query.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../../pch.h"
+#include "../../util/net/message.h"
+#include "../dbmessage.h"
+#include "../jsobj.h"
+#include "../diskloc.h"
+#include "../projection.h"
+
+// struct QueryOptions, QueryResult, QueryResultFlags in:
+#include "../../client/dbclient.h"
+
+namespace mongo {
+
+ extern const int MaxBytesToReturnToClientAtOnce;
+
+ QueryResult* processGetMore(const char *ns, int ntoreturn, long long cursorid , CurOp& op, int pass, bool& exhaust);
+
+ const char * runQuery(Message& m, QueryMessage& q, CurOp& curop, Message &result);
+
+ /* This is for languages whose "objects" are not well ordered (JSON is well ordered).
+ [ { a : ... } , { b : ... } ] -> { a : ..., b : ... }
+ */
+ inline BSONObj transformOrderFromArrayFormat(BSONObj order) {
+ /* note: this is slow, but that is ok as order will have very few pieces */
+ BSONObjBuilder b;
+ char p[2] = "0";
+
+ while ( 1 ) {
+ BSONObj j = order.getObjectField(p);
+ if ( j.isEmpty() )
+ break;
+ BSONElement e = j.firstElement();
+ uassert( 10102 , "bad order array", !e.eoo());
+ uassert( 10103 , "bad order array [2]", e.isNumber());
+ b.append(e);
+ (*p)++;
+ uassert( 10104 , "too many ordering elements", *p <= '9');
+ }
+
+ return b.obj();
+ }
+
+ /**
+ * this represents a total user query
+ * includes fields from the query message, both possible query levels
+ * parses everything up front
+ */
+ class ParsedQuery : boost::noncopyable {
+ public:
+ ParsedQuery( QueryMessage& qm )
+ : _ns( qm.ns ) , _ntoskip( qm.ntoskip ) , _ntoreturn( qm.ntoreturn ) , _options( qm.queryOptions ) {
+ init( qm.query );
+ initFields( qm.fields );
+ }
+ ParsedQuery( const char* ns , int ntoskip , int ntoreturn , int queryoptions , const BSONObj& query , const BSONObj& fields )
+ : _ns( ns ) , _ntoskip( ntoskip ) , _ntoreturn( ntoreturn ) , _options( queryoptions ) {
+ init( query );
+ initFields( fields );
+ }
+
+ const char * ns() const { return _ns; }
+ bool isLocalDB() const { return strncmp(_ns, "local.", 6) == 0; }
+
+ const BSONObj& getFilter() const { return _filter; }
+ Projection* getFields() const { return _fields.get(); }
+ shared_ptr<Projection> getFieldPtr() const { return _fields; }
+
+ int getSkip() const { return _ntoskip; }
+ int getNumToReturn() const { return _ntoreturn; }
+ bool wantMore() const { return _wantMore; }
+ int getOptions() const { return _options; }
+ bool hasOption( int x ) const { return x & _options; }
+
+ bool isExplain() const { return _explain; }
+ bool isSnapshot() const { return _snapshot; }
+ bool returnKey() const { return _returnKey; }
+ bool showDiskLoc() const { return _showDiskLoc; }
+
+ const BSONObj& getMin() const { return _min; }
+ const BSONObj& getMax() const { return _max; }
+ const BSONObj& getOrder() const { return _order; }
+ const BSONElement& getHint() const { return _hint; }
+ int getMaxScan() const { return _maxScan; }
+
+ bool couldBeCommand() const {
+ /* we assume you are using findOne() for running a cmd... */
+ return _ntoreturn == 1 && strstr( _ns , ".$cmd" );
+ }
+
+ bool hasIndexSpecifier() const {
+ return ! _hint.eoo() || ! _min.isEmpty() || ! _max.isEmpty();
+ }
+
+ /* if ntoreturn is zero, we return up to 101 objects. on the subsequent getmore, there
+ is only a size limit. The idea is that on a find() where one doesn't use much results,
+ we don't return much, but once getmore kicks in, we start pushing significant quantities.
+
+ The n limit (vs. size) is important when someone fetches only one small field from big
+ objects, which causes massive scanning server-side.
+ */
+ bool enoughForFirstBatch( int n , int len ) const {
+ if ( _ntoreturn == 0 )
+ return ( len > 1024 * 1024 ) || n >= 101;
+ return n >= _ntoreturn || len > MaxBytesToReturnToClientAtOnce;
+ }
+
+ bool enough( int n ) const {
+ if ( _ntoreturn == 0 )
+ return false;
+ return n >= _ntoreturn;
+ }
+
+ private:
+ void init( const BSONObj& q ) {
+ _reset();
+ uassert( 10105 , "bad skip value in query", _ntoskip >= 0);
+
+ if ( _ntoreturn < 0 ) {
+ /* _ntoreturn greater than zero is simply a hint on how many objects to send back per
+ "cursor batch".
+ A negative number indicates a hard limit.
+ */
+ _wantMore = false;
+ _ntoreturn = -_ntoreturn;
+ }
+
+
+ BSONElement e = q["query"];
+ if ( ! e.isABSONObj() )
+ e = q["$query"];
+
+ if ( e.isABSONObj() ) {
+ _filter = e.embeddedObject();
+ _initTop( q );
+ }
+ else {
+ _filter = q;
+ }
+ }
+
+ void _reset() {
+ _wantMore = true;
+ _explain = false;
+ _snapshot = false;
+ _returnKey = false;
+ _showDiskLoc = false;
+ _maxScan = 0;
+ }
+
+ void _initTop( const BSONObj& top ) {
+ BSONObjIterator i( top );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ const char * name = e.fieldName();
+
+ if ( strcmp( "$orderby" , name ) == 0 ||
+ strcmp( "orderby" , name ) == 0 ) {
+ if ( e.type() == Object ) {
+ _order = e.embeddedObject();
+ }
+ else if ( e.type() == Array ) {
+ _order = transformOrderFromArrayFormat( _order );
+ }
+ else {
+ uasserted(13513, "sort must be an object or array");
+ }
+ continue;
+ }
+
+ if( *name == '$' ) {
+ name++;
+ if ( strcmp( "explain" , name ) == 0 )
+ _explain = e.trueValue();
+ else if ( strcmp( "snapshot" , name ) == 0 )
+ _snapshot = e.trueValue();
+ else if ( strcmp( "min" , name ) == 0 )
+ _min = e.embeddedObject();
+ else if ( strcmp( "max" , name ) == 0 )
+ _max = e.embeddedObject();
+ else if ( strcmp( "hint" , name ) == 0 )
+ _hint = e;
+ else if ( strcmp( "returnKey" , name ) == 0 )
+ _returnKey = e.trueValue();
+ else if ( strcmp( "maxScan" , name ) == 0 )
+ _maxScan = e.numberInt();
+ else if ( strcmp( "showDiskLoc" , name ) == 0 )
+ _showDiskLoc = e.trueValue();
+ else if ( strcmp( "comment" , name ) == 0 ) {
+ ; // no-op
+ }
+ }
+ }
+
+ if ( _snapshot ) {
+ uassert( 12001 , "E12001 can't sort with $snapshot", _order.isEmpty() );
+ uassert( 12002 , "E12002 can't use hint with $snapshot", _hint.eoo() );
+ }
+
+ }
+
+ void initFields( const BSONObj& fields ) {
+ if ( fields.isEmpty() )
+ return;
+ _fields.reset( new Projection() );
+ _fields->init( fields );
+ }
+
+ const char * const _ns;
+ const int _ntoskip;
+ int _ntoreturn;
+ BSONObj _filter;
+ BSONObj _order;
+ const int _options;
+ shared_ptr< Projection > _fields;
+ bool _wantMore;
+ bool _explain;
+ bool _snapshot;
+ bool _returnKey;
+ bool _showDiskLoc;
+ BSONObj _min;
+ BSONObj _max;
+ BSONElement _hint;
+ int _maxScan;
+ };
+
+
+} // namespace mongo
+
+
diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp
new file mode 100644
index 00000000000..2abc6987218
--- /dev/null
+++ b/src/mongo/db/ops/update.cpp
@@ -0,0 +1,1308 @@
+// update.cpp
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "query.h"
+#include "../pdfile.h"
+#include "../jsobjmanipulator.h"
+#include "../queryoptimizer.h"
+#include "../repl.h"
+#include "../btree.h"
+#include "../../util/stringutils.h"
+#include "update.h"
+
+//#define DEBUGUPDATE(x) cout << x << endl;
+#define DEBUGUPDATE(x)
+
+namespace mongo {
+
+ const char* Mod::modNames[] = { "$inc", "$set", "$push", "$pushAll", "$pull", "$pullAll" , "$pop", "$unset" ,
+ "$bitand" , "$bitor" , "$bit" , "$addToSet", "$rename", "$rename"
+ };
+ unsigned Mod::modNamesNum = sizeof(Mod::modNames)/sizeof(char*);
+
+ bool Mod::_pullElementMatch( BSONElement& toMatch ) const {
+
+ if ( elt.type() != Object ) {
+ // if elt isn't an object, then comparison will work
+ return toMatch.woCompare( elt , false ) == 0;
+ }
+
+ if ( matcherOnPrimitive )
+ return matcher->matches( toMatch.wrap( "" ) );
+
+ if ( toMatch.type() != Object ) {
+ // looking for an object, so this can't match
+ return false;
+ }
+
+ // now we have an object on both sides
+ return matcher->matches( toMatch.embeddedObject() );
+ }
+
+ template< class Builder >
+ void Mod::appendIncremented( Builder& bb , const BSONElement& in, ModState& ms ) const {
+ BSONType a = in.type();
+ BSONType b = elt.type();
+
+ if ( a == NumberDouble || b == NumberDouble ) {
+ ms.incType = NumberDouble;
+ ms.incdouble = elt.numberDouble() + in.numberDouble();
+ }
+ else if ( a == NumberLong || b == NumberLong ) {
+ ms.incType = NumberLong;
+ ms.inclong = elt.numberLong() + in.numberLong();
+ }
+ else {
+ int x = elt.numberInt() + in.numberInt();
+ if ( x < 0 && elt.numberInt() > 0 && in.numberInt() > 0 ) {
+ // overflow
+ ms.incType = NumberLong;
+ ms.inclong = elt.numberLong() + in.numberLong();
+ }
+ else {
+ ms.incType = NumberInt;
+ ms.incint = elt.numberInt() + in.numberInt();
+ }
+ }
+
+ ms.appendIncValue( bb , false );
+ }
+
+ template< class Builder >
+ void appendUnset( Builder &b ) {
+ }
+
+ template<>
+ void appendUnset( BSONArrayBuilder &b ) {
+ b.appendNull();
+ }
+
+ template< class Builder >
+ void Mod::apply( Builder& b , BSONElement in , ModState& ms ) const {
+ if ( ms.dontApply ) {
+ return;
+ }
+
+ switch ( op ) {
+
+ case INC: {
+ appendIncremented( b , in , ms );
+ break;
+ }
+
+ case SET: {
+ _checkForAppending( elt );
+ b.appendAs( elt , shortFieldName );
+ break;
+ }
+
+ case UNSET: {
+ appendUnset( b );
+ break;
+ }
+
+ case PUSH: {
+ uassert( 10131 , "$push can only be applied to an array" , in.type() == Array );
+ BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
+ BSONObjIterator i( in.embeddedObject() );
+ int n=0;
+ while ( i.more() ) {
+ bb.append( i.next() );
+ n++;
+ }
+
+ ms.pushStartSize = n;
+
+ bb.appendAs( elt , bb.numStr( n ) );
+ bb.done();
+ break;
+ }
+
+ case ADDTOSET: {
+ uassert( 12592 , "$addToSet can only be applied to an array" , in.type() == Array );
+ BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
+
+ BSONObjIterator i( in.embeddedObject() );
+ int n=0;
+
+ if ( isEach() ) {
+
+ BSONElementSet toadd;
+ parseEach( toadd );
+
+ while ( i.more() ) {
+ BSONElement cur = i.next();
+ bb.append( cur );
+ n++;
+ toadd.erase( cur );
+ }
+
+ {
+ BSONObjIterator i( getEach() );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( toadd.count(e) ) {
+ bb.appendAs( e , BSONObjBuilder::numStr( n++ ) );
+ toadd.erase( e );
+ }
+ }
+ }
+
+ }
+ else {
+
+ bool found = false;
+
+ while ( i.more() ) {
+ BSONElement cur = i.next();
+ bb.append( cur );
+ n++;
+ if ( elt.woCompare( cur , false ) == 0 )
+ found = true;
+ }
+
+ if ( ! found )
+ bb.appendAs( elt , bb.numStr( n ) );
+
+ }
+
+ bb.done();
+ break;
+ }
+
+
+
+ case PUSH_ALL: {
+ uassert( 10132 , "$pushAll can only be applied to an array" , in.type() == Array );
+ uassert( 10133 , "$pushAll has to be passed an array" , elt.type() );
+
+ BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
+
+ BSONObjIterator i( in.embeddedObject() );
+ int n=0;
+ while ( i.more() ) {
+ bb.append( i.next() );
+ n++;
+ }
+
+ ms.pushStartSize = n;
+
+ i = BSONObjIterator( elt.embeddedObject() );
+ while ( i.more() ) {
+ bb.appendAs( i.next() , bb.numStr( n++ ) );
+ }
+
+ bb.done();
+ break;
+ }
+
+ case PULL:
+ case PULL_ALL: {
+ uassert( 10134 , "$pull/$pullAll can only be applied to an array" , in.type() == Array );
+ BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
+
+ int n = 0;
+
+ BSONObjIterator i( in.embeddedObject() );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ bool allowed = true;
+
+ if ( op == PULL ) {
+ allowed = ! _pullElementMatch( e );
+ }
+ else {
+ BSONObjIterator j( elt.embeddedObject() );
+ while( j.more() ) {
+ BSONElement arrJ = j.next();
+ if ( e.woCompare( arrJ, false ) == 0 ) {
+ allowed = false;
+ break;
+ }
+ }
+ }
+
+ if ( allowed )
+ bb.appendAs( e , bb.numStr( n++ ) );
+ }
+
+ bb.done();
+ break;
+ }
+
+ case POP: {
+ uassert( 10135 , "$pop can only be applied to an array" , in.type() == Array );
+ BSONObjBuilder bb( b.subarrayStart( shortFieldName ) );
+
+ int n = 0;
+
+ BSONObjIterator i( in.embeddedObject() );
+ if ( elt.isNumber() && elt.number() < 0 ) {
+ // pop from front
+ if ( i.more() ) {
+ i.next();
+ n++;
+ }
+
+ while( i.more() ) {
+ bb.appendAs( i.next() , bb.numStr( n - 1 ) );
+ n++;
+ }
+ }
+ else {
+ // pop from back
+ while( i.more() ) {
+ n++;
+ BSONElement arrI = i.next();
+ if ( i.more() ) {
+ bb.append( arrI );
+ }
+ }
+ }
+
+ ms.pushStartSize = n;
+ assert( ms.pushStartSize == in.embeddedObject().nFields() );
+ bb.done();
+ break;
+ }
+
+ case BIT: {
+ uassert( 10136 , "$bit needs an array" , elt.type() == Object );
+ uassert( 10137 , "$bit can only be applied to numbers" , in.isNumber() );
+ uassert( 10138 , "$bit cannot update a value of type double" , in.type() != NumberDouble );
+
+ int x = in.numberInt();
+ long long y = in.numberLong();
+
+ BSONObjIterator it( elt.embeddedObject() );
+ while ( it.more() ) {
+ BSONElement e = it.next();
+ uassert( 10139 , "$bit field must be number" , e.isNumber() );
+ if ( str::equals(e.fieldName(), "and") ) {
+ switch( in.type() ) {
+ case NumberInt: x = x&e.numberInt(); break;
+ case NumberLong: y = y&e.numberLong(); break;
+ default: assert( 0 );
+ }
+ }
+ else if ( str::equals(e.fieldName(), "or") ) {
+ switch( in.type() ) {
+ case NumberInt: x = x|e.numberInt(); break;
+ case NumberLong: y = y|e.numberLong(); break;
+ default: assert( 0 );
+ }
+ }
+ else {
+ uasserted(9016, str::stream() << "unknown $bit operation: " << e.fieldName());
+ }
+ }
+
+ switch( in.type() ) {
+ case NumberInt: b.append( shortFieldName , x ); break;
+ case NumberLong: b.append( shortFieldName , y ); break;
+ default: assert( 0 );
+ }
+
+ break;
+ }
+
+ case RENAME_FROM: {
+ break;
+ }
+
+ case RENAME_TO: {
+ ms.handleRename( b, shortFieldName );
+ break;
+ }
+
+ default:
+ stringstream ss;
+ ss << "Mod::apply can't handle type: " << op;
+ throw UserException( 9017, ss.str() );
+ }
+ }
+
+ // -1 inside a non-object (non-object could be array)
+ // 0 missing
+ // 1 found
+ int validRenamePath( BSONObj obj, const char *path ) {
+ while( const char *p = strchr( path, '.' ) ) {
+ string left( path, p - path );
+ BSONElement e = obj.getField( left );
+ if ( e.eoo() ) {
+ return 0;
+ }
+ if ( e.type() != Object ) {
+ return -1;
+ }
+ obj = e.embeddedObject();
+ path = p + 1;
+ }
+ return !obj.getField( path ).eoo();
+ }
+
+ auto_ptr<ModSetState> ModSet::prepare(const BSONObj &obj) const {
+ DEBUGUPDATE( "\t start prepare" );
+ auto_ptr<ModSetState> mss( new ModSetState( obj ) );
+
+
+ // Perform this check first, so that we don't leave a partially modified object on uassert.
+ for ( ModHolder::const_iterator i = _mods.begin(); i != _mods.end(); ++i ) {
+ DEBUGUPDATE( "\t\t prepare : " << i->first );
+ ModState& ms = mss->_mods[i->first];
+
+ const Mod& m = i->second;
+ BSONElement e = obj.getFieldDotted(m.fieldName);
+
+ ms.m = &m;
+ ms.old = e;
+
+ if ( m.op == Mod::RENAME_FROM ) {
+ int source = validRenamePath( obj, m.fieldName );
+ uassert( 13489, "$rename source field invalid", source != -1 );
+ if ( source != 1 ) {
+ ms.dontApply = true;
+ }
+ continue;
+ }
+
+ if ( m.op == Mod::RENAME_TO ) {
+ int source = validRenamePath( obj, m.renameFrom() );
+ if ( source == 1 ) {
+ int target = validRenamePath( obj, m.fieldName );
+ uassert( 13490, "$rename target field invalid", target != -1 );
+ ms.newVal = obj.getFieldDotted( m.renameFrom() );
+ mss->amIInPlacePossible( false );
+ }
+ else {
+ ms.dontApply = true;
+ }
+ continue;
+ }
+
+ if ( e.eoo() ) {
+ mss->amIInPlacePossible( m.op == Mod::UNSET );
+ continue;
+ }
+
+ switch( m.op ) {
+ case Mod::INC:
+ uassert( 10140 , "Cannot apply $inc modifier to non-number", e.isNumber() || e.eoo() );
+ if ( mss->amIInPlacePossible( e.isNumber() ) ) {
+ // check more typing info here
+ if ( m.elt.type() != e.type() ) {
+ // if i'm incrementing with a double, then the storage has to be a double
+ mss->amIInPlacePossible( m.elt.type() != NumberDouble );
+ }
+
+ // check for overflow
+ if ( e.type() == NumberInt && e.numberLong() + m.elt.numberLong() > numeric_limits<int>::max() ) {
+ mss->amIInPlacePossible( false );
+ }
+ }
+ break;
+
+ case Mod::SET:
+ mss->amIInPlacePossible( m.elt.type() == e.type() &&
+ m.elt.valuesize() == e.valuesize() );
+ break;
+
+ case Mod::PUSH:
+ case Mod::PUSH_ALL:
+ uassert( 10141 , "Cannot apply $push/$pushAll modifier to non-array", e.type() == Array || e.eoo() );
+ mss->amIInPlacePossible( false );
+ break;
+
+ case Mod::PULL:
+ case Mod::PULL_ALL: {
+ uassert( 10142 , "Cannot apply $pull/$pullAll modifier to non-array", e.type() == Array || e.eoo() );
+ BSONObjIterator i( e.embeddedObject() );
+ while( mss->_inPlacePossible && i.more() ) {
+ BSONElement arrI = i.next();
+ if ( m.op == Mod::PULL ) {
+ mss->amIInPlacePossible( ! m._pullElementMatch( arrI ) );
+ }
+ else if ( m.op == Mod::PULL_ALL ) {
+ BSONObjIterator j( m.elt.embeddedObject() );
+ while( mss->_inPlacePossible && j.moreWithEOO() ) {
+ BSONElement arrJ = j.next();
+ if ( arrJ.eoo() )
+ break;
+ mss->amIInPlacePossible( arrI.woCompare( arrJ, false ) );
+ }
+ }
+ }
+ break;
+ }
+
+ case Mod::POP: {
+ uassert( 10143 , "Cannot apply $pop modifier to non-array", e.type() == Array || e.eoo() );
+ mss->amIInPlacePossible( e.embeddedObject().isEmpty() );
+ break;
+ }
+
+ case Mod::ADDTOSET: {
+ uassert( 12591 , "Cannot apply $addToSet modifier to non-array", e.type() == Array || e.eoo() );
+
+ BSONObjIterator i( e.embeddedObject() );
+ if ( m.isEach() ) {
+ BSONElementSet toadd;
+ m.parseEach( toadd );
+ while( i.more() ) {
+ BSONElement arrI = i.next();
+ toadd.erase( arrI );
+ }
+ mss->amIInPlacePossible( toadd.size() == 0 );
+ }
+ else {
+ bool found = false;
+ while( i.more() ) {
+ BSONElement arrI = i.next();
+ if ( arrI.woCompare( m.elt , false ) == 0 ) {
+ found = true;
+ break;
+ }
+ }
+ mss->amIInPlacePossible( found );
+ }
+ break;
+ }
+
+ default:
+ // mods we don't know about shouldn't be done in place
+ mss->amIInPlacePossible( false );
+ }
+ }
+
+ DEBUGUPDATE( "\t mss\n" << mss->toString() << "\t--" );
+
+ return mss;
+ }
+
+ void ModState::appendForOpLog( BSONObjBuilder& b ) const {
+ if ( dontApply ) {
+ return;
+ }
+
+ if ( incType ) {
+ DEBUGUPDATE( "\t\t\t\t\t appendForOpLog inc fieldname: " << m->fieldName << " short:" << m->shortFieldName );
+ BSONObjBuilder bb( b.subobjStart( "$set" ) );
+ appendIncValue( bb , true );
+ bb.done();
+ return;
+ }
+
+ if ( m->op == Mod::RENAME_FROM ) {
+ DEBUGUPDATE( "\t\t\t\t\t appendForOpLog RENAME_FROM fieldName:" << m->fieldName );
+ BSONObjBuilder bb( b.subobjStart( "$unset" ) );
+ bb.append( m->fieldName, 1 );
+ bb.done();
+ return;
+ }
+
+ if ( m->op == Mod::RENAME_TO ) {
+ DEBUGUPDATE( "\t\t\t\t\t appendForOpLog RENAME_TO fieldName:" << m->fieldName );
+ BSONObjBuilder bb( b.subobjStart( "$set" ) );
+ bb.appendAs( newVal, m->fieldName );
+ return;
+ }
+
+ const char * name = fixedOpName ? fixedOpName : Mod::modNames[op()];
+
+ DEBUGUPDATE( "\t\t\t\t\t appendForOpLog name:" << name << " fixed: " << fixed << " fn: " << m->fieldName );
+
+ BSONObjBuilder bb( b.subobjStart( name ) );
+ if ( fixed ) {
+ bb.appendAs( *fixed , m->fieldName );
+ }
+ else {
+ bb.appendAs( m->elt , m->fieldName );
+ }
+ bb.done();
+ }
+
+ string ModState::toString() const {
+ stringstream ss;
+ if ( fixedOpName )
+ ss << " fixedOpName: " << fixedOpName;
+ if ( fixed )
+ ss << " fixed: " << fixed;
+ return ss.str();
+ }
+
+ template< class Builder >
+ void ModState::handleRename( Builder &newObjBuilder, const char *shortFieldName ) {
+ newObjBuilder.appendAs( newVal , shortFieldName );
+ BSONObjBuilder b;
+ b.appendAs( newVal, shortFieldName );
+ assert( _objData.isEmpty() );
+ _objData = b.obj();
+ newVal = _objData.firstElement();
+ }
+
+ void ModSetState::applyModsInPlace( bool isOnDisk ) {
+ // TODO i think this assert means that we can get rid of the isOnDisk param
+ // and just use isOwned as the determination
+ DEV assert( isOnDisk == ! _obj.isOwned() );
+
+ for ( ModStateHolder::iterator i = _mods.begin(); i != _mods.end(); ++i ) {
+ ModState& m = i->second;
+
+ if ( m.dontApply ) {
+ continue;
+ }
+
+ switch ( m.m->op ) {
+ case Mod::UNSET:
+ case Mod::ADDTOSET:
+ case Mod::RENAME_FROM:
+ case Mod::RENAME_TO:
+ // this should have been handled by prepare
+ break;
+ case Mod::PULL:
+ case Mod::PULL_ALL:
+ // this should have been handled by prepare
+ break;
+ case Mod::POP:
+ assert( m.old.eoo() || ( m.old.isABSONObj() && m.old.Obj().isEmpty() ) );
+ break;
+ // [dm] the BSONElementManipulator statements below are for replication (correct?)
+ case Mod::INC:
+ if ( isOnDisk )
+ m.m->IncrementMe( m.old );
+ else
+ m.m->incrementMe( m.old );
+ m.fixedOpName = "$set";
+ m.fixed = &(m.old);
+ break;
+ case Mod::SET:
+ if ( isOnDisk )
+ BSONElementManipulator( m.old ).ReplaceTypeAndValue( m.m->elt );
+ else
+ BSONElementManipulator( m.old ).replaceTypeAndValue( m.m->elt );
+ break;
+ default:
+ uassert( 13478 , "can't apply mod in place - shouldn't have gotten here" , 0 );
+ }
+ }
+ }
+
+ void ModSet::extractFields( map< string, BSONElement > &fields, const BSONElement &top, const string &base ) {
+ if ( top.type() != Object ) {
+ fields[ base + top.fieldName() ] = top;
+ return;
+ }
+ BSONObjIterator i( top.embeddedObject() );
+ bool empty = true;
+ while( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ extractFields( fields, e, base + top.fieldName() + "." );
+ empty = false;
+ }
+ if ( empty )
+ fields[ base + top.fieldName() ] = top;
+ }
+
+ template< class Builder >
+ void ModSetState::_appendNewFromMods( const string& root , ModState& m , Builder& b , set<string>& onedownseen ) {
+ const char * temp = m.fieldName();
+ temp += root.size();
+ const char * dot = strchr( temp , '.' );
+ if ( dot ) {
+ string nr( m.fieldName() , 0 , 1 + ( dot - m.fieldName() ) );
+ string nf( temp , 0 , dot - temp );
+ if ( onedownseen.count( nf ) )
+ return;
+ onedownseen.insert( nf );
+ BSONObjBuilder bb ( b.subobjStart( nf ) );
+ createNewFromMods( nr , bb , BSONObj() ); // don't infer an array from name
+ bb.done();
+ }
+ else {
+ appendNewFromMod( m , b );
+ }
+
+ }
+
+ template< class Builder >
+ void ModSetState::createNewFromMods( const string& root , Builder& b , const BSONObj &obj ) {
+ DEBUGUPDATE( "\t\t createNewFromMods root: " << root );
+ BSONObjIteratorSorted es( obj );
+ BSONElement e = es.next();
+
+ ModStateHolder::iterator m = _mods.lower_bound( root );
+ StringBuilder buf(root.size() + 2 );
+ buf << root << (char)255;
+ ModStateHolder::iterator mend = _mods.lower_bound( buf.str() );
+
+ set<string> onedownseen;
+
+ while ( e.type() && m != mend ) {
+ string field = root + e.fieldName();
+ FieldCompareResult cmp = compareDottedFieldNames( m->second.m->fieldName , field );
+
+ DEBUGUPDATE( "\t\t\t field:" << field << "\t mod:" << m->second.m->fieldName << "\t cmp:" << cmp << "\t short: " << e.fieldName() );
+
+ switch ( cmp ) {
+
+ case LEFT_SUBFIELD: { // Mod is embedded under this element
+ uassert( 10145 , str::stream() << "LEFT_SUBFIELD only supports Object: " << field << " not: " << e.type() , e.type() == Object || e.type() == Array );
+ if ( onedownseen.count( e.fieldName() ) == 0 ) {
+ onedownseen.insert( e.fieldName() );
+ if ( e.type() == Object ) {
+ BSONObjBuilder bb( b.subobjStart( e.fieldName() ) );
+ stringstream nr; nr << root << e.fieldName() << ".";
+ createNewFromMods( nr.str() , bb , e.embeddedObject() );
+ bb.done();
+ }
+ else {
+ BSONArrayBuilder ba( b.subarrayStart( e.fieldName() ) );
+ stringstream nr; nr << root << e.fieldName() << ".";
+ createNewFromMods( nr.str() , ba , e.embeddedObject() );
+ ba.done();
+ }
+ // inc both as we handled both
+ e = es.next();
+ m++;
+ }
+ else {
+ // this is a very weird case
+ // have seen it in production, but can't reproduce
+ // this assert prevents an inf. loop
+ // but likely isn't the correct solution
+ assert(0);
+ }
+ continue;
+ }
+ case LEFT_BEFORE: // Mod on a field that doesn't exist
+ DEBUGUPDATE( "\t\t\t\t creating new field for: " << m->second.m->fieldName );
+ _appendNewFromMods( root , m->second , b , onedownseen );
+ m++;
+ continue;
+ case SAME:
+ DEBUGUPDATE( "\t\t\t\t applying mod on: " << m->second.m->fieldName );
+ m->second.apply( b , e );
+ e = es.next();
+ m++;
+ continue;
+ case RIGHT_BEFORE: // field that doesn't have a MOD
+ DEBUGUPDATE( "\t\t\t\t just copying" );
+ b.append( e ); // if array, ignore field name
+ e = es.next();
+ continue;
+ case RIGHT_SUBFIELD:
+ massert( 10399 , "ModSet::createNewFromMods - RIGHT_SUBFIELD should be impossible" , 0 );
+ break;
+ default:
+ massert( 10400 , "unhandled case" , 0 );
+ }
+ }
+
+ // finished looping the mods, just adding the rest of the elements
+ while ( e.type() ) {
+ DEBUGUPDATE( "\t\t\t copying: " << e.fieldName() );
+ b.append( e ); // if array, ignore field name
+ e = es.next();
+ }
+
+ // do mods that don't have fields already
+ for ( ; m != mend; m++ ) {
+ DEBUGUPDATE( "\t\t\t\t appending from mod at end: " << m->second.m->fieldName );
+ _appendNewFromMods( root , m->second , b , onedownseen );
+ }
+ }
+
+ BSONObj ModSetState::createNewFromMods() {
+ BSONObjBuilder b( (int)(_obj.objsize() * 1.1) );
+ createNewFromMods( "" , b , _obj );
+ return _newFromMods = b.obj();
+ }
+
+ string ModSetState::toString() const {
+ stringstream ss;
+ for ( ModStateHolder::const_iterator i=_mods.begin(); i!=_mods.end(); ++i ) {
+ ss << "\t\t" << i->first << "\t" << i->second.toString() << "\n";
+ }
+ return ss.str();
+ }
+
+ bool ModSetState::FieldCmp::operator()( const string &l, const string &r ) const {
+ return lexNumCmp( l.c_str(), r.c_str() ) < 0;
+ }
+
+ BSONObj ModSet::createNewFromQuery( const BSONObj& query ) {
+ BSONObj newObj;
+
+ {
+ BSONObjBuilder bb;
+ EmbeddedBuilder eb( &bb );
+ BSONObjIteratorSorted i( query );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.fieldName()[0] == '$' ) // for $atomic and anything else we add
+ continue;
+
+ if ( e.type() == Object && e.embeddedObject().firstElementFieldName()[0] == '$' ) {
+ // this means this is a $gt type filter, so don't make part of the new object
+ continue;
+ }
+
+ eb.appendAs( e , e.fieldName() );
+ }
+ eb.done();
+ newObj = bb.obj();
+ }
+
+ auto_ptr<ModSetState> mss = prepare( newObj );
+
+ if ( mss->canApplyInPlace() )
+ mss->applyModsInPlace( false );
+ else
+ newObj = mss->createNewFromMods();
+
+ return newObj;
+ }
+
+ /* get special operations like $inc
+ { $inc: { a:1, b:1 } }
+ { $set: { a:77 } }
+ { $push: { a:55 } }
+ { $pushAll: { a:[77,88] } }
+ { $pull: { a:66 } }
+ { $pullAll : { a:[99,1010] } }
+ NOTE: MODIFIES source from object!
+ */
+ ModSet::ModSet(
+ const BSONObj &from ,
+ const set<string>& idxKeys,
+ const set<string> *backgroundKeys)
+ : _isIndexed(0) , _hasDynamicArray( false ) {
+
+ BSONObjIterator it(from);
+
+ while ( it.more() ) {
+ BSONElement e = it.next();
+ const char *fn = e.fieldName();
+
+ uassert( 10147 , "Invalid modifier specified: " + string( fn ), e.type() == Object );
+ BSONObj j = e.embeddedObject();
+ DEBUGUPDATE( "\t" << j );
+
+ BSONObjIterator jt(j);
+ Mod::Op op = opFromStr( fn );
+
+ while ( jt.more() ) {
+ BSONElement f = jt.next(); // x:44
+
+ const char * fieldName = f.fieldName();
+
+ uassert( 15896 , "Modified field name may not start with $", fieldName[0] != '$' || op == Mod::UNSET ); // allow remove of invalid field name in case it was inserted before this check was added (~ version 2.1)
+ uassert( 10148 , "Mod on _id not allowed", strcmp( fieldName, "_id" ) != 0 );
+ uassert( 10149 , "Invalid mod field name, may not end in a period", fieldName[ strlen( fieldName ) - 1 ] != '.' );
+ uassert( 10150 , "Field name duplication not allowed with modifiers", ! haveModForField( fieldName ) );
+ uassert( 10151 , "have conflicting mods in update" , ! haveConflictingMod( fieldName ) );
+ uassert( 10152 , "Modifier $inc allowed for numbers only", f.isNumber() || op != Mod::INC );
+ uassert( 10153 , "Modifier $pushAll/pullAll allowed for arrays only", f.type() == Array || ( op != Mod::PUSH_ALL && op != Mod::PULL_ALL ) );
+
+ if ( op == Mod::RENAME_TO ) {
+ uassert( 13494, "$rename target must be a string", f.type() == String );
+ const char *target = f.valuestr();
+ uassert( 13495, "$rename source must differ from target", strcmp( fieldName, target ) != 0 );
+ uassert( 13496, "invalid mod field name, source may not be empty", fieldName[0] );
+ uassert( 13479, "invalid mod field name, target may not be empty", target[0] );
+ uassert( 13480, "invalid mod field name, source may not begin or end in period", fieldName[0] != '.' && fieldName[ strlen( fieldName ) - 1 ] != '.' );
+ uassert( 13481, "invalid mod field name, target may not begin or end in period", target[0] != '.' && target[ strlen( target ) - 1 ] != '.' );
+ uassert( 13482, "$rename affecting _id not allowed", !( fieldName[0] == '_' && fieldName[1] == 'i' && fieldName[2] == 'd' && ( !fieldName[3] || fieldName[3] == '.' ) ) );
+ uassert( 13483, "$rename affecting _id not allowed", !( target[0] == '_' && target[1] == 'i' && target[2] == 'd' && ( !target[3] || target[3] == '.' ) ) );
+ uassert( 13484, "field name duplication not allowed with $rename target", !haveModForField( target ) );
+ uassert( 13485, "conflicting mods not allowed with $rename target", !haveConflictingMod( target ) );
+ uassert( 13486, "$rename target may not be a parent of source", !( strncmp( fieldName, target, strlen( target ) ) == 0 && fieldName[ strlen( target ) ] == '.' ) );
+ uassert( 13487, "$rename source may not be dynamic array", strstr( fieldName , ".$" ) == 0 );
+ uassert( 13488, "$rename target may not be dynamic array", strstr( target , ".$" ) == 0 );
+
+ Mod from;
+ from.init( Mod::RENAME_FROM, f );
+ from.setFieldName( fieldName );
+ updateIsIndexed( from, idxKeys, backgroundKeys );
+ _mods[ from.fieldName ] = from;
+
+ Mod to;
+ to.init( Mod::RENAME_TO, f );
+ to.setFieldName( target );
+ updateIsIndexed( to, idxKeys, backgroundKeys );
+ _mods[ to.fieldName ] = to;
+
+ DEBUGUPDATE( "\t\t " << fieldName << "\t" << from.fieldName << "\t" << to.fieldName );
+ continue;
+ }
+
+ _hasDynamicArray = _hasDynamicArray || strstr( fieldName , ".$" ) > 0;
+
+ Mod m;
+ m.init( op , f );
+ m.setFieldName( f.fieldName() );
+ updateIsIndexed( m, idxKeys, backgroundKeys );
+ _mods[m.fieldName] = m;
+
+ DEBUGUPDATE( "\t\t " << fieldName << "\t" << m.fieldName << "\t" << _hasDynamicArray );
+ }
+ }
+
+ }
+
+ ModSet * ModSet::fixDynamicArray( const char * elemMatchKey ) const {
+ ModSet * n = new ModSet();
+ n->_isIndexed = _isIndexed;
+ n->_hasDynamicArray = _hasDynamicArray;
+ for ( ModHolder::const_iterator i=_mods.begin(); i!=_mods.end(); i++ ) {
+ string s = i->first;
+ size_t idx = s.find( ".$" );
+ if ( idx == string::npos ) {
+ n->_mods[s] = i->second;
+ continue;
+ }
+ StringBuilder buf(s.size()+strlen(elemMatchKey));
+ buf << s.substr(0,idx+1) << elemMatchKey << s.substr(idx+2);
+ string fixed = buf.str();
+ DEBUGUPDATE( "fixed dynamic: " << s << " -->> " << fixed );
+ n->_mods[fixed] = i->second;
+ ModHolder::iterator temp = n->_mods.find( fixed );
+ temp->second.setFieldName( temp->first.c_str() );
+ }
+ return n;
+ }
+
+ void checkNoMods( BSONObj o ) {
+ BSONObjIterator i( o );
+ while( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ uassert( 10154 , "Modifiers and non-modifiers cannot be mixed", e.fieldName()[ 0 ] != '$' );
+ }
+ }
+
+ static void checkTooLarge(const BSONObj& newObj) {
+ uassert( 12522 , "$ operator made object too large" , newObj.objsize() <= BSONObjMaxUserSize );
+ }
+
+ /* note: this is only (as-is) called for
+
+ - not multi
+ - not mods is indexed
+ - not upsert
+ */
+ static UpdateResult _updateById(bool isOperatorUpdate, int idIdxNo, ModSet *mods, int profile, NamespaceDetails *d,
+ NamespaceDetailsTransient *nsdt,
+ bool god, const char *ns,
+ const BSONObj& updateobj, BSONObj patternOrig, bool logop, OpDebug& debug) {
+
+ DiskLoc loc;
+ {
+ IndexDetails& i = d->idx(idIdxNo);
+ BSONObj key = i.getKeyFromQuery( patternOrig );
+ loc = i.idxInterface().findSingle(i, i.head, key);
+ if( loc.isNull() ) {
+ // no upsert support in _updateById yet, so we are done.
+ return UpdateResult(0, 0, 0);
+ }
+ }
+ Record *r = loc.rec();
+
+ if ( ! r->likelyInPhysicalMemory() ) {
+ {
+ scoped_ptr<LockMongoFilesShared> lk( new LockMongoFilesShared() );
+ dbtempreleasewritelock t;
+ r->touch();
+ lk.reset(0); // we have to release mmmutex before we can re-acquire dbmutex
+ }
+
+ {
+ // we need to re-find in case something changed
+ d = nsdetails( ns );
+ if ( ! d ) {
+ // dropped
+ return UpdateResult(0, 0, 0);
+ }
+ nsdt = &NamespaceDetailsTransient::get(ns);
+ IndexDetails& i = d->idx(idIdxNo);
+ BSONObj key = i.getKeyFromQuery( patternOrig );
+ loc = i.idxInterface().findSingle(i, i.head, key);
+ if( loc.isNull() ) {
+ // no upsert support in _updateById yet, so we are done.
+ return UpdateResult(0, 0, 0);
+ }
+
+ r = loc.rec();
+ }
+ }
+
+ /* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
+ regular ones at the moment. */
+ if ( isOperatorUpdate ) {
+ const BSONObj& onDisk = loc.obj();
+ auto_ptr<ModSetState> mss = mods->prepare( onDisk );
+
+ if( mss->canApplyInPlace() ) {
+ mss->applyModsInPlace(true);
+ DEBUGUPDATE( "\t\t\t updateById doing in place update" );
+ }
+ else {
+ BSONObj newObj = mss->createNewFromMods();
+ checkTooLarge(newObj);
+ assert(nsdt);
+ theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , newObj.objdata(), newObj.objsize(), debug);
+ }
+
+ if ( logop ) {
+ DEV assert( mods->size() );
+
+ BSONObj pattern = patternOrig;
+ if ( mss->haveArrayDepMod() ) {
+ BSONObjBuilder patternBuilder;
+ patternBuilder.appendElements( pattern );
+ mss->appendSizeSpecForArrayDepMods( patternBuilder );
+ pattern = patternBuilder.obj();
+ }
+
+ if( mss->needOpLogRewrite() ) {
+ DEBUGUPDATE( "\t rewrite update: " << mss->getOpLogRewrite() );
+ logOp("u", ns, mss->getOpLogRewrite() , &pattern );
+ }
+ else {
+ logOp("u", ns, updateobj, &pattern );
+ }
+ }
+ return UpdateResult( 1 , 1 , 1);
+ } // end $operator update
+
+ // regular update
+ BSONElementManipulator::lookForTimestamps( updateobj );
+ checkNoMods( updateobj );
+ assert(nsdt);
+ theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , updateobj.objdata(), updateobj.objsize(), debug );
+ if ( logop ) {
+ logOp("u", ns, updateobj, &patternOrig );
+ }
+ return UpdateResult( 1 , 0 , 1 );
+ }
+
+ UpdateResult _updateObjects(bool god, const char *ns, const BSONObj& updateobj, BSONObj patternOrig, bool upsert, bool multi, bool logop , OpDebug& debug, RemoveSaver* rs ) {
+ DEBUGUPDATE( "update: " << ns << " update: " << updateobj << " query: " << patternOrig << " upsert: " << upsert << " multi: " << multi );
+ Client& client = cc();
+ int profile = client.database()->profile;
+
+ debug.updateobj = updateobj;
+
+ // idea with these here it to make them loop invariant for multi updates, and thus be a bit faster for that case
+ // The pointers may be left invalid on a failed or terminal yield recovery.
+ NamespaceDetails *d = nsdetails(ns); // can be null if an upsert...
+ NamespaceDetailsTransient *nsdt = &NamespaceDetailsTransient::get(ns);
+
+ auto_ptr<ModSet> mods;
+ bool isOperatorUpdate = updateobj.firstElementFieldName()[0] == '$';
+ int modsIsIndexed = false; // really the # of indexes
+ if ( isOperatorUpdate ) {
+ if( d && d->indexBuildInProgress ) {
+ set<string> bgKeys;
+ d->inProgIdx().keyPattern().getFieldNames(bgKeys);
+ mods.reset( new ModSet(updateobj, nsdt->indexKeys(), &bgKeys) );
+ }
+ else {
+ mods.reset( new ModSet(updateobj, nsdt->indexKeys()) );
+ }
+ modsIsIndexed = mods->isIndexed();
+ }
+
+ if( !multi && isSimpleIdQuery(patternOrig) && d && !modsIsIndexed ) {
+ int idxNo = d->findIdIndex();
+ if( idxNo >= 0 ) {
+ debug.idhack = true;
+ UpdateResult result = _updateById(isOperatorUpdate, idxNo, mods.get(), profile, d, nsdt, god, ns, updateobj, patternOrig, logop, debug);
+ if ( result.existing || ! upsert ) {
+ return result;
+ }
+ else if ( upsert && ! isOperatorUpdate && ! logop) {
+ // this handles repl inserts
+ checkNoMods( updateobj );
+ debug.upsert = true;
+ BSONObj no = updateobj;
+ theDataFileMgr.insertWithObjMod(ns, no, god);
+ return UpdateResult( 0 , 0 , 1 , no );
+ }
+ }
+ }
+
+ int numModded = 0;
+ long long nscanned = 0;
+ shared_ptr< Cursor > c = NamespaceDetailsTransient::getCursor( ns, patternOrig );
+
+ d = nsdetails(ns);
+ nsdt = &NamespaceDetailsTransient::get(ns);
+ bool autoDedup = c->autoDedup();
+
+ if( c->ok() ) {
+ set<DiskLoc> seenObjects;
+ MatchDetails details;
+ auto_ptr<ClientCursor> cc;
+ do {
+ nscanned++;
+
+ bool atomic = c->matcher() && c->matcher()->docMatcher().atomic();
+
+ if ( !atomic ) {
+ // *****************
+ if ( cc.get() == 0 ) {
+ shared_ptr< Cursor > cPtr = c;
+ cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , cPtr , ns ) );
+ }
+
+ bool didYield;
+ if ( ! cc->yieldSometimes( ClientCursor::WillNeed, &didYield ) ) {
+ cc.release();
+ break;
+ }
+ if ( !c->ok() ) {
+ break;
+ }
+
+ if ( didYield ) {
+ d = nsdetails(ns);
+ nsdt = &NamespaceDetailsTransient::get(ns);
+ }
+ // *****************
+ }
+
+ if ( !c->currentMatches( &details ) ) {
+ c->advance();
+
+ if ( nscanned % 256 == 0 && ! atomic ) {
+ if ( cc.get() == 0 ) {
+ shared_ptr< Cursor > cPtr = c;
+ cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , cPtr , ns ) );
+ }
+ if ( ! cc->yield() ) {
+ cc.release();
+ // TODO should we assert or something?
+ break;
+ }
+ if ( !c->ok() ) {
+ break;
+ }
+ d = nsdetails(ns);
+ nsdt = &NamespaceDetailsTransient::get(ns);
+ }
+ continue;
+ }
+
+ Record *r = c->_current();
+ DiskLoc loc = c->currLoc();
+
+ // TODO Maybe this is unnecessary since we have seenObjects
+ if ( c->getsetdup( loc ) && autoDedup ) {
+ c->advance();
+ continue;
+ }
+
+ BSONObj js(r);
+
+ BSONObj pattern = patternOrig;
+
+ if ( logop ) {
+ BSONObjBuilder idPattern;
+ BSONElement id;
+ // NOTE: If the matching object lacks an id, we'll log
+ // with the original pattern. This isn't replay-safe.
+ // It might make sense to suppress the log instead
+ // if there's no id.
+ if ( js.getObjectID( id ) ) {
+ idPattern.append( id );
+ pattern = idPattern.obj();
+ }
+ else {
+ uassert( 10157 , "multi-update requires all modified objects to have an _id" , ! multi );
+ }
+ }
+
+ if ( profile && !multi )
+ debug.nscanned = (int) nscanned;
+
+ /* look for $inc etc. note as listed here, all fields to inc must be this type, you can't set some
+ regular ones at the moment. */
+ if ( isOperatorUpdate ) {
+
+ if ( multi ) {
+ c->advance(); // go to next record in case this one moves
+ if ( autoDedup && seenObjects.count( loc ) )
+ continue;
+ }
+
+ const BSONObj& onDisk = loc.obj();
+
+ ModSet * useMods = mods.get();
+ bool forceRewrite = false;
+
+ auto_ptr<ModSet> mymodset;
+ if ( details._elemMatchKey && mods->hasDynamicArray() ) {
+ useMods = mods->fixDynamicArray( details._elemMatchKey );
+ mymodset.reset( useMods );
+ forceRewrite = true;
+ }
+
+ auto_ptr<ModSetState> mss = useMods->prepare( onDisk );
+
+ bool willAdvanceCursor = multi && c->ok() && ( modsIsIndexed || ! mss->canApplyInPlace() );
+
+ if ( willAdvanceCursor ) {
+ if ( cc.get() ) {
+ cc->setDoingDeletes( true );
+ }
+ c->prepareToTouchEarlierIterate();
+ }
+
+ if ( modsIsIndexed <= 0 && mss->canApplyInPlace() ) {
+ mss->applyModsInPlace( true );// const_cast<BSONObj&>(onDisk) );
+
+ DEBUGUPDATE( "\t\t\t doing in place update" );
+ if ( profile && !multi )
+ debug.fastmod = true;
+
+ if ( modsIsIndexed ) {
+ seenObjects.insert( loc );
+ }
+
+ d->paddingFits();
+ }
+ else {
+ if ( rs )
+ rs->goingToDelete( onDisk );
+
+ BSONObj newObj = mss->createNewFromMods();
+ checkTooLarge(newObj);
+ DiskLoc newLoc = theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , newObj.objdata(), newObj.objsize(), debug);
+ if ( newLoc != loc || modsIsIndexed ){
+ // log() << "Moved obj " << newLoc.obj()["_id"] << " from " << loc << " to " << newLoc << endl;
+ // object moved, need to make sure we don' get again
+ seenObjects.insert( newLoc );
+ }
+
+ }
+
+ if ( logop ) {
+ DEV assert( mods->size() );
+
+ if ( mss->haveArrayDepMod() ) {
+ BSONObjBuilder patternBuilder;
+ patternBuilder.appendElements( pattern );
+ mss->appendSizeSpecForArrayDepMods( patternBuilder );
+ pattern = patternBuilder.obj();
+ }
+
+ if ( forceRewrite || mss->needOpLogRewrite() ) {
+ DEBUGUPDATE( "\t rewrite update: " << mss->getOpLogRewrite() );
+ logOp("u", ns, mss->getOpLogRewrite() , &pattern );
+ }
+ else {
+ logOp("u", ns, updateobj, &pattern );
+ }
+ }
+ numModded++;
+ if ( ! multi )
+ return UpdateResult( 1 , 1 , numModded );
+ if ( willAdvanceCursor )
+ c->recoverFromTouchingEarlierIterate();
+
+ if ( nscanned % 64 == 0 && ! atomic ) {
+ if ( cc.get() == 0 ) {
+ shared_ptr< Cursor > cPtr = c;
+ cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , cPtr , ns ) );
+ }
+ if ( ! cc->yield() ) {
+ cc.release();
+ break;
+ }
+ if ( !c->ok() ) {
+ break;
+ }
+ d = nsdetails(ns);
+ nsdt = &NamespaceDetailsTransient::get(ns);
+ }
+
+ getDur().commitIfNeeded();
+
+ continue;
+ }
+
+ uassert( 10158 , "multi update only works with $ operators" , ! multi );
+
+ BSONElementManipulator::lookForTimestamps( updateobj );
+ checkNoMods( updateobj );
+ theDataFileMgr.updateRecord(ns, d, nsdt, r, loc , updateobj.objdata(), updateobj.objsize(), debug, god);
+ if ( logop ) {
+ DEV wassert( !god ); // god doesn't get logged, this would be bad.
+ logOp("u", ns, updateobj, &pattern );
+ }
+ return UpdateResult( 1 , 0 , 1 );
+ } while ( c->ok() );
+ } // endif
+
+ if ( numModded )
+ return UpdateResult( 1 , 1 , numModded );
+
+ // todo: no need for "if( profile )" here as that probably just makes things slower?
+ if ( profile )
+ debug.nscanned = (int) nscanned;
+
+ if ( upsert ) {
+ if ( updateobj.firstElementFieldName()[0] == '$' ) {
+ // upsert of an $operation. build a default object
+ BSONObj newObj = mods->createNewFromQuery( patternOrig );
+ checkNoMods( newObj );
+ debug.fastmodinsert = true;
+ theDataFileMgr.insertWithObjMod(ns, newObj, god);
+ if ( logop )
+ logOp( "i", ns, newObj );
+
+ return UpdateResult( 0 , 1 , 1 , newObj );
+ }
+ uassert( 10159 , "multi update only works with $ operators" , ! multi );
+ checkNoMods( updateobj );
+ debug.upsert = true;
+ BSONObj no = updateobj;
+ theDataFileMgr.insertWithObjMod(ns, no, god);
+ if ( logop )
+ logOp( "i", ns, no );
+ return UpdateResult( 0 , 0 , 1 , no );
+ }
+
+ return UpdateResult( 0 , isOperatorUpdate , 0 );
+ }
+
+ UpdateResult updateObjects(const char *ns, const BSONObj& updateobj, BSONObj patternOrig, bool upsert, bool multi, bool logop , OpDebug& debug ) {
+ uassert( 10155 , "cannot update reserved $ collection", strchr(ns, '$') == 0 );
+ if ( strstr(ns, ".system.") ) {
+ /* dm: it's very important that system.indexes is never updated as IndexDetails has pointers into it */
+ uassert( 10156 , str::stream() << "cannot update system collection: " << ns << " q: " << patternOrig << " u: " << updateobj , legalClientSystemNS( ns , true ) );
+ }
+ return _updateObjects(false, ns, updateobj, patternOrig, upsert, multi, logop, debug);
+ }
+
+}
diff --git a/src/mongo/db/ops/update.h b/src/mongo/db/ops/update.h
new file mode 100644
index 00000000000..9446db06d36
--- /dev/null
+++ b/src/mongo/db/ops/update.h
@@ -0,0 +1,700 @@
+// update.h
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "../../pch.h"
+#include "../jsobj.h"
+#include "../../util/embedded_builder.h"
+#include "../matcher.h"
+
+namespace mongo {
+
+ // ---------- public -------------
+
+ struct UpdateResult {
+ const bool existing; // if existing objects were modified
+ const bool mod; // was this a $ mod
+ const long long num; // how many objects touched
+ OID upserted; // if something was upserted, the new _id of the object
+
+ UpdateResult( bool e, bool m, unsigned long long n , const BSONObj& upsertedObject = BSONObj() )
+ : existing(e) , mod(m), num(n) {
+ upserted.clear();
+ BSONElement id = upsertedObject["_id"];
+ if ( ! e && n == 1 && id.type() == jstOID ) {
+ upserted = id.OID();
+ }
+ }
+ };
+
+ class RemoveSaver;
+
+ /* returns true if an existing object was updated, false if no existing object was found.
+ multi - update multiple objects - mostly useful with things like $set
+ god - allow access to system namespaces
+ */
+ UpdateResult updateObjects(const char *ns, const BSONObj& updateobj, BSONObj pattern, bool upsert, bool multi , bool logop , OpDebug& debug );
+ UpdateResult _updateObjects(bool god, const char *ns, const BSONObj& updateobj, BSONObj pattern,
+ bool upsert, bool multi , bool logop , OpDebug& debug , RemoveSaver * rs = 0 );
+
+
+
+ // ---------- private -------------
+
+ class ModState;
+ class ModSetState;
+
+ /* Used for modifiers such as $inc, $set, $push, ...
+ * stores the info about a single operation
+ * once created should never be modified
+ */
+ struct Mod {
+ // See opFromStr below
+ // 0 1 2 3 4 5 6 7 8 9 10 11 12 13
+ enum Op { INC, SET, PUSH, PUSH_ALL, PULL, PULL_ALL , POP, UNSET, BITAND, BITOR , BIT , ADDTOSET, RENAME_FROM, RENAME_TO } op;
+
+ static const char* modNames[];
+ static unsigned modNamesNum;
+
+ const char *fieldName;
+ const char *shortFieldName;
+
+ BSONElement elt; // x:5 note: this is the actual element from the updateobj
+ boost::shared_ptr<Matcher> matcher;
+ bool matcherOnPrimitive;
+
+ void init( Op o , BSONElement& e ) {
+ op = o;
+ elt = e;
+ if ( op == PULL && e.type() == Object ) {
+ BSONObj t = e.embeddedObject();
+ if ( t.firstElement().getGtLtOp() == 0 ) {
+ matcher.reset( new Matcher( t ) );
+ matcherOnPrimitive = false;
+ }
+ else {
+ matcher.reset( new Matcher( BSON( "" << t ) ) );
+ matcherOnPrimitive = true;
+ }
+ }
+ }
+
+ void setFieldName( const char * s ) {
+ fieldName = s;
+ shortFieldName = strrchr( fieldName , '.' );
+ if ( shortFieldName )
+ shortFieldName++;
+ else
+ shortFieldName = fieldName;
+ }
+
+ /**
+ * @param in incrememnts the actual value inside in
+ */
+ void incrementMe( BSONElement& in ) const {
+ BSONElementManipulator manip( in );
+ switch ( in.type() ) {
+ case NumberDouble:
+ manip.setNumber( elt.numberDouble() + in.numberDouble() );
+ break;
+ case NumberLong:
+ manip.setLong( elt.numberLong() + in.numberLong() );
+ break;
+ case NumberInt:
+ manip.setInt( elt.numberInt() + in.numberInt() );
+ break;
+ default:
+ assert(0);
+ }
+ }
+ void IncrementMe( BSONElement& in ) const {
+ BSONElementManipulator manip( in );
+ switch ( in.type() ) {
+ case NumberDouble:
+ manip.SetNumber( elt.numberDouble() + in.numberDouble() );
+ break;
+ case NumberLong:
+ manip.SetLong( elt.numberLong() + in.numberLong() );
+ break;
+ case NumberInt:
+ manip.SetInt( elt.numberInt() + in.numberInt() );
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ template< class Builder >
+ void appendIncremented( Builder& bb , const BSONElement& in, ModState& ms ) const;
+
+ bool operator<( const Mod &other ) const {
+ return strcmp( fieldName, other.fieldName ) < 0;
+ }
+
+ bool arrayDep() const {
+ switch (op) {
+ case PUSH:
+ case PUSH_ALL:
+ case POP:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static bool isIndexed( const string& fullName , const set<string>& idxKeys ) {
+ const char * fieldName = fullName.c_str();
+ // check if there is an index key that is a parent of mod
+ for( const char *dot = strchr( fieldName, '.' ); dot; dot = strchr( dot + 1, '.' ) )
+ if ( idxKeys.count( string( fieldName, dot - fieldName ) ) )
+ return true;
+
+ // check if there is an index key equal to mod
+ if ( idxKeys.count(fullName) )
+ return true;
+ // check if there is an index key that is a child of mod
+ set< string >::const_iterator j = idxKeys.upper_bound( fullName );
+ if ( j != idxKeys.end() && j->find( fullName ) == 0 && (*j)[fullName.size()] == '.' )
+ return true;
+
+ return false;
+ }
+
+ bool isIndexed( const set<string>& idxKeys ) const {
+ string fullName = fieldName;
+
+ if ( isIndexed( fullName , idxKeys ) )
+ return true;
+
+ if ( strstr( fieldName , "." ) ) {
+ // check for a.0.1
+ StringBuilder buf( fullName.size() + 1 );
+ for ( size_t i=0; i<fullName.size(); i++ ) {
+ char c = fullName[i];
+
+ if ( c == '$' &&
+ i > 0 && fullName[i-1] == '.' &&
+ i+1<fullName.size() &&
+ fullName[i+1] == '.' ) {
+ i++;
+ continue;
+ }
+
+ buf << c;
+
+ if ( c != '.' )
+ continue;
+
+ if ( ! isdigit( fullName[i+1] ) )
+ continue;
+
+ bool possible = true;
+ size_t j=i+2;
+ for ( ; j<fullName.size(); j++ ) {
+ char d = fullName[j];
+ if ( d == '.' )
+ break;
+ if ( isdigit( d ) )
+ continue;
+ possible = false;
+ break;
+ }
+
+ if ( possible )
+ i = j;
+ }
+ string x = buf.str();
+ if ( isIndexed( x , idxKeys ) )
+ return true;
+ }
+
+ return false;
+ }
+
+ template< class Builder >
+ void apply( Builder& b , BSONElement in , ModState& ms ) const;
+
+ /**
+ * @return true iff toMatch should be removed from the array
+ */
+ bool _pullElementMatch( BSONElement& toMatch ) const;
+
+ void _checkForAppending( const BSONElement& e ) const {
+ if ( e.type() == Object ) {
+ // this is a tiny bit slow, but rare and important
+ // only when setting something TO an object, not setting something in an object
+ // and it checks for { $set : { x : { 'a.b' : 1 } } }
+ // which is feel has been common
+ uassert( 12527 , "not okForStorage" , e.embeddedObject().okForStorage() );
+ }
+ }
+
+ bool isEach() const {
+ if ( elt.type() != Object )
+ return false;
+ BSONElement e = elt.embeddedObject().firstElement();
+ if ( e.type() != Array )
+ return false;
+ return strcmp( e.fieldName() , "$each" ) == 0;
+ }
+
+ BSONObj getEach() const {
+ return elt.embeddedObjectUserCheck().firstElement().embeddedObjectUserCheck();
+ }
+
+ void parseEach( BSONElementSet& s ) const {
+ BSONObjIterator i(getEach());
+ while ( i.more() ) {
+ s.insert( i.next() );
+ }
+ }
+
+ const char *renameFrom() const {
+ massert( 13492, "mod must be RENAME_TO type", op == Mod::RENAME_TO );
+ return elt.fieldName();
+ }
+ };
+
+ /**
+ * stores a set of Mods
+ * once created, should never be changed
+ */
+ class ModSet : boost::noncopyable {
+ typedef map<string,Mod> ModHolder;
+ ModHolder _mods;
+ int _isIndexed;
+ bool _hasDynamicArray;
+
+ static void extractFields( map< string, BSONElement > &fields, const BSONElement &top, const string &base );
+
+ FieldCompareResult compare( const ModHolder::iterator &m, map< string, BSONElement >::iterator &p, const map< string, BSONElement >::iterator &pEnd ) const {
+ bool mDone = ( m == _mods.end() );
+ bool pDone = ( p == pEnd );
+ assert( ! mDone );
+ assert( ! pDone );
+ if ( mDone && pDone )
+ return SAME;
+ // If one iterator is done we want to read from the other one, so say the other one is lower.
+ if ( mDone )
+ return RIGHT_BEFORE;
+ if ( pDone )
+ return LEFT_BEFORE;
+
+ return compareDottedFieldNames( m->first, p->first.c_str() );
+ }
+
+ bool mayAddEmbedded( map< string, BSONElement > &existing, string right ) {
+ for( string left = EmbeddedBuilder::splitDot( right );
+ left.length() > 0 && left[ left.length() - 1 ] != '.';
+ left += "." + EmbeddedBuilder::splitDot( right ) ) {
+ if ( existing.count( left ) > 0 && existing[ left ].type() != Object )
+ return false;
+ if ( haveModForField( left.c_str() ) )
+ return false;
+ }
+ return true;
+ }
+ static Mod::Op opFromStr( const char *fn ) {
+ assert( fn[0] == '$' );
+ switch( fn[1] ) {
+ case 'i': {
+ if ( fn[2] == 'n' && fn[3] == 'c' && fn[4] == 0 )
+ return Mod::INC;
+ break;
+ }
+ case 's': {
+ if ( fn[2] == 'e' && fn[3] == 't' && fn[4] == 0 )
+ return Mod::SET;
+ break;
+ }
+ case 'p': {
+ if ( fn[2] == 'u' ) {
+ if ( fn[3] == 's' && fn[4] == 'h' ) {
+ if ( fn[5] == 0 )
+ return Mod::PUSH;
+ if ( fn[5] == 'A' && fn[6] == 'l' && fn[7] == 'l' && fn[8] == 0 )
+ return Mod::PUSH_ALL;
+ }
+ else if ( fn[3] == 'l' && fn[4] == 'l' ) {
+ if ( fn[5] == 0 )
+ return Mod::PULL;
+ if ( fn[5] == 'A' && fn[6] == 'l' && fn[7] == 'l' && fn[8] == 0 )
+ return Mod::PULL_ALL;
+ }
+ }
+ else if ( fn[2] == 'o' && fn[3] == 'p' && fn[4] == 0 )
+ return Mod::POP;
+ break;
+ }
+ case 'u': {
+ if ( fn[2] == 'n' && fn[3] == 's' && fn[4] == 'e' && fn[5] == 't' && fn[6] == 0 )
+ return Mod::UNSET;
+ break;
+ }
+ case 'b': {
+ if ( fn[2] == 'i' && fn[3] == 't' ) {
+ if ( fn[4] == 0 )
+ return Mod::BIT;
+ if ( fn[4] == 'a' && fn[5] == 'n' && fn[6] == 'd' && fn[7] == 0 )
+ return Mod::BITAND;
+ if ( fn[4] == 'o' && fn[5] == 'r' && fn[6] == 0 )
+ return Mod::BITOR;
+ }
+ break;
+ }
+ case 'a': {
+ if ( fn[2] == 'd' && fn[3] == 'd' ) {
+ // add
+ if ( fn[4] == 'T' && fn[5] == 'o' && fn[6] == 'S' && fn[7] == 'e' && fn[8] == 't' && fn[9] == 0 )
+ return Mod::ADDTOSET;
+
+ }
+ break;
+ }
+ case 'r': {
+ if ( fn[2] == 'e' && fn[3] == 'n' && fn[4] == 'a' && fn[5] == 'm' && fn[6] =='e' ) {
+ return Mod::RENAME_TO; // with this return code we handle both RENAME_TO and RENAME_FROM
+ }
+ break;
+ }
+ default: break;
+ }
+ uassert( 10161 , "Invalid modifier specified " + string( fn ), false );
+ return Mod::INC;
+ }
+
+ ModSet() {}
+
+ void updateIsIndexed( const Mod &m, const set<string> &idxKeys, const set<string> *backgroundKeys ) {
+ if ( m.isIndexed( idxKeys ) ||
+ (backgroundKeys && m.isIndexed(*backgroundKeys)) ) {
+ _isIndexed++;
+ }
+ }
+
+ public:
+
+ ModSet( const BSONObj &from ,
+ const set<string>& idxKeys = set<string>(),
+ const set<string>* backgroundKeys = 0
+ );
+
+ // TODO: this is inefficient - should probably just handle when iterating
+ ModSet * fixDynamicArray( const char * elemMatchKey ) const;
+
+ bool hasDynamicArray() const { return _hasDynamicArray; }
+
+ /**
+ * creates a ModSetState suitable for operation on obj
+ * doesn't change or modify this ModSet or any underying Mod
+ */
+ auto_ptr<ModSetState> prepare( const BSONObj& obj ) const;
+
+ /**
+ * given a query pattern, builds an object suitable for an upsert
+ * will take the query spec and combine all $ operators
+ */
+ BSONObj createNewFromQuery( const BSONObj& query );
+
+ /**
+ *
+ */
+ int isIndexed() const {
+ return _isIndexed;
+ }
+
+ unsigned size() const { return _mods.size(); }
+
+ bool haveModForField( const char *fieldName ) const {
+ return _mods.find( fieldName ) != _mods.end();
+ }
+
+ bool haveConflictingMod( const string& fieldName ) {
+ size_t idx = fieldName.find( '.' );
+ if ( idx == string::npos )
+ idx = fieldName.size();
+
+ ModHolder::const_iterator start = _mods.lower_bound(fieldName.substr(0,idx));
+ for ( ; start != _mods.end(); start++ ) {
+ FieldCompareResult r = compareDottedFieldNames( fieldName , start->first );
+ switch ( r ) {
+ case LEFT_SUBFIELD: return true;
+ case LEFT_BEFORE: return false;
+ case SAME: return true;
+ case RIGHT_BEFORE: return false;
+ case RIGHT_SUBFIELD: return true;
+ }
+ }
+ return false;
+
+
+ }
+
+ };
+
+ /**
+ * stores any information about a single Mod operating on a single Object
+ */
+ class ModState {
+ public:
+ const Mod * m;
+ BSONElement old;
+ BSONElement newVal;
+ BSONObj _objData;
+
+ const char * fixedOpName;
+ BSONElement * fixed;
+ int pushStartSize;
+
+ BSONType incType;
+ int incint;
+ double incdouble;
+ long long inclong;
+
+ bool dontApply;
+
+ ModState() {
+ fixedOpName = 0;
+ fixed = 0;
+ pushStartSize = -1;
+ incType = EOO;
+ dontApply = false;
+ }
+
+ Mod::Op op() const {
+ return m->op;
+ }
+
+ const char * fieldName() const {
+ return m->fieldName;
+ }
+
+ bool needOpLogRewrite() const {
+ if ( dontApply )
+ return false;
+
+ if ( fixed || fixedOpName || incType )
+ return true;
+
+ switch( op() ) {
+ case Mod::RENAME_FROM:
+ case Mod::RENAME_TO:
+ return true;
+ case Mod::BIT:
+ case Mod::BITAND:
+ case Mod::BITOR:
+ // TODO: should we convert this to $set?
+ return false;
+ default:
+ return false;
+ }
+ }
+
+ void appendForOpLog( BSONObjBuilder& b ) const;
+
+ template< class Builder >
+ void apply( Builder& b , BSONElement in ) {
+ m->apply( b , in , *this );
+ }
+
+ template< class Builder >
+ void appendIncValue( Builder& b , bool useFullName ) const {
+ const char * n = useFullName ? m->fieldName : m->shortFieldName;
+
+ switch ( incType ) {
+ case NumberDouble:
+ b.append( n , incdouble ); break;
+ case NumberLong:
+ b.append( n , inclong ); break;
+ case NumberInt:
+ b.append( n , incint ); break;
+ default:
+ assert(0);
+ }
+ }
+
+ string toString() const;
+
+ template< class Builder >
+ void handleRename( Builder &newObjBuilder, const char *shortFieldName );
+ };
+
+ /**
+ * this is used to hold state, meta data while applying a ModSet to a BSONObj
+ * the goal is to make ModSet const so its re-usable
+ */
+ class ModSetState : boost::noncopyable {
+ struct FieldCmp {
+ bool operator()( const string &l, const string &r ) const;
+ };
+ typedef map<string,ModState,FieldCmp> ModStateHolder;
+ const BSONObj& _obj;
+ ModStateHolder _mods;
+ bool _inPlacePossible;
+ BSONObj _newFromMods; // keep this data alive, as oplog generation may depend on it
+
+ ModSetState( const BSONObj& obj )
+ : _obj( obj ) , _inPlacePossible(true) {
+ }
+
+ /**
+ * @return if in place is still possible
+ */
+ bool amIInPlacePossible( bool inPlacePossible ) {
+ if ( ! inPlacePossible )
+ _inPlacePossible = false;
+ return _inPlacePossible;
+ }
+
+ template< class Builder >
+ void createNewFromMods( const string& root , Builder& b , const BSONObj &obj );
+
+ template< class Builder >
+ void _appendNewFromMods( const string& root , ModState& m , Builder& b , set<string>& onedownseen );
+
+ template< class Builder >
+ void appendNewFromMod( ModState& ms , Builder& b ) {
+ if ( ms.dontApply ) {
+ return;
+ }
+
+ //const Mod& m = *(ms.m); // HACK
+ Mod& m = *((Mod*)(ms.m)); // HACK
+
+ switch ( m.op ) {
+
+ case Mod::PUSH: {
+ if ( m.isEach() ) {
+ b.appendArray( m.shortFieldName, m.getEach() );
+ } else {
+ BSONObjBuilder arr( b.subarrayStart( m.shortFieldName ) );
+ arr.appendAs( m.elt, "0" );
+ arr.done();
+ }
+ break;
+ }
+ case Mod::ADDTOSET: {
+ if ( m.isEach() ) {
+ // Remove any duplicates in given array
+ BSONObjBuilder arr( b.subarrayStart( m.shortFieldName ) );
+ BSONElementSet toadd;
+ m.parseEach( toadd );
+ BSONObjIterator i( m.getEach() );
+ int n = 0;
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( toadd.count(e) ) {
+ arr.appendAs( e , BSONObjBuilder::numStr( n++ ) );
+ toadd.erase( e );
+ }
+ }
+ arr.done();
+ }
+ else {
+ BSONObjBuilder arr( b.subarrayStart( m.shortFieldName ) );
+ arr.appendAs( m.elt, "0" );
+ arr.done();
+ }
+ break;
+ }
+
+ case Mod::PUSH_ALL: {
+ b.appendAs( m.elt, m.shortFieldName );
+ break;
+ }
+
+ case Mod::UNSET:
+ case Mod::PULL:
+ case Mod::PULL_ALL:
+ // no-op b/c unset/pull of nothing does nothing
+ break;
+
+ case Mod::INC:
+ ms.fixedOpName = "$set";
+ case Mod::SET: {
+ m._checkForAppending( m.elt );
+ b.appendAs( m.elt, m.shortFieldName );
+ break;
+ }
+ // shouldn't see RENAME_FROM here
+ case Mod::RENAME_TO:
+ ms.handleRename( b, m.shortFieldName );
+ break;
+ default:
+ stringstream ss;
+ ss << "unknown mod in appendNewFromMod: " << m.op;
+ throw UserException( 9015, ss.str() );
+ }
+
+ }
+
+ public:
+
+ bool canApplyInPlace() const {
+ return _inPlacePossible;
+ }
+
+ /**
+ * modified underlying _obj
+ * @param isOnDisk - true means this is an on disk object, and this update needs to be made durable
+ */
+ void applyModsInPlace( bool isOnDisk );
+
+ BSONObj createNewFromMods();
+
+ // re-writing for oplog
+
+ bool needOpLogRewrite() const {
+ for ( ModStateHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ )
+ if ( i->second.needOpLogRewrite() )
+ return true;
+ return false;
+ }
+
+ BSONObj getOpLogRewrite() const {
+ BSONObjBuilder b;
+ for ( ModStateHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ )
+ i->second.appendForOpLog( b );
+ return b.obj();
+ }
+
+ bool haveArrayDepMod() const {
+ for ( ModStateHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ )
+ if ( i->second.m->arrayDep() )
+ return true;
+ return false;
+ }
+
+ void appendSizeSpecForArrayDepMods( BSONObjBuilder &b ) const {
+ for ( ModStateHolder::const_iterator i = _mods.begin(); i != _mods.end(); i++ ) {
+ const ModState& m = i->second;
+ if ( m.m->arrayDep() ) {
+ if ( m.pushStartSize == -1 )
+ b.appendNull( m.fieldName() );
+ else
+ b << m.fieldName() << BSON( "$size" << m.pushStartSize );
+ }
+ }
+ }
+
+ string toString() const;
+
+ friend class ModSet;
+ };
+
+}
+
diff --git a/src/mongo/db/pagefault.cpp b/src/mongo/db/pagefault.cpp
new file mode 100644
index 00000000000..4b9b1b23e02
--- /dev/null
+++ b/src/mongo/db/pagefault.cpp
@@ -0,0 +1,55 @@
+// @file pagefault.cpp
+
+#include "pch.h"
+#include "diskloc.h"
+#include "pagefault.h"
+#include "client.h"
+#include "pdfile.h"
+#include "server.h"
+
+namespace mongo {
+
+ PageFaultException::PageFaultException(Record *_r)
+ {
+ assert( cc()._pageFaultRetryableSection != 0 );
+ cc()._pageFaultRetryableSection->_laps++;
+ assert( cc()._pageFaultRetryableSection->_laps < 1000 );
+ r = _r;
+ era = LockMongoFilesShared::getEra();
+ }
+
+ void PageFaultException::touch() {
+ assert( !d.dbMutex.atLeastReadLocked() );
+ LockMongoFilesShared lk;
+ if( LockMongoFilesShared::getEra() != era ) {
+ // files opened and closed. we don't try to handle but just bail out; this is much simpler
+ // and less error prone and saves us from taking a dbmutex readlock.
+ dlog(2) << "era changed" << endl;
+ return;
+ }
+ r->touch();
+ }
+
+ PageFaultRetryableSection::~PageFaultRetryableSection() {
+ cc()._pageFaultRetryableSection = old;
+ }
+ PageFaultRetryableSection::PageFaultRetryableSection() {
+ _laps = 0;
+ old = cc()._pageFaultRetryableSection;
+ if( d.dbMutex.atLeastReadLocked() ) {
+ cc()._pageFaultRetryableSection = 0;
+ if( debug || logLevel > 2 ) {
+ LOGSOME << "info PageFaultRetryableSection will not yield, already locked upon reaching" << endl;
+ }
+ }
+ else if( cc()._pageFaultRetryableSection ) {
+ cc()._pageFaultRetryableSection = 0;
+ dlog(2) << "info nested PageFaultRetryableSection will not yield on fault" << endl;
+ }
+ else {
+ cc()._pageFaultRetryableSection = this;
+ cc()._hasWrittenThisPass = false;
+ }
+ }
+
+}
diff --git a/src/mongo/db/pagefault.h b/src/mongo/db/pagefault.h
new file mode 100644
index 00000000000..8bbf4ecab52
--- /dev/null
+++ b/src/mongo/db/pagefault.h
@@ -0,0 +1,46 @@
+// @file pagefault.h
+
+// define this : _PAGEFAULTEXCEPTION
+
+#pragma once
+
+namespace mongo {
+
+ class Record;
+
+ class PageFaultException /*: public DBException*/ {
+ unsigned era;
+ Record *r;
+ public:
+ PageFaultException(const PageFaultException& rhs) : era(rhs.era), r(rhs.r) { }
+ explicit PageFaultException(Record*);
+ void touch();
+ };
+
+ class PageFaultRetryableSection : boost::noncopyable {
+ PageFaultRetryableSection *old;
+ public:
+ unsigned _laps;
+ PageFaultRetryableSection();
+ ~PageFaultRetryableSection();
+ };
+#if 0
+ inline void how_to_use_example() {
+ // ...
+ {
+ PageFaultRetryableSection s;
+ while( 1 ) {
+ try {
+ writelock lk; // or readlock
+ // do work
+ break;
+ }
+ catch( PageFaultException& e ) {
+ e.touch();
+ }
+ }
+ }
+ // ...
+ }
+#endif
+}
diff --git a/src/mongo/db/pcre.txt b/src/mongo/db/pcre.txt
new file mode 100644
index 00000000000..3e21047eabc
--- /dev/null
+++ b/src/mongo/db/pcre.txt
@@ -0,0 +1,15 @@
+
+
+You need to install pcre.
+
+This could be scripted:
+
+cd /tmp
+curl -O ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-7.4.tar.gz
+tar -xzf pcre-7.4.tar.gz
+./configure --enable-utf8 --with-match-limit=200000 --with-match-limit-recursion=4000
+make
+make install
+
+
+At that point is will be installed in /usr/*. the version in p/pcre-7.4 is for VC++.
diff --git a/src/mongo/db/pdfile.cpp b/src/mongo/db/pdfile.cpp
new file mode 100644
index 00000000000..069eeadec37
--- /dev/null
+++ b/src/mongo/db/pdfile.cpp
@@ -0,0 +1,2425 @@
+// pdfile.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/*
+todo:
+_ table scans must be sequential, not next/prev pointers
+_ coalesce deleted
+_ disallow system* manipulations from the database.
+*/
+
+#include "pch.h"
+#include "pdfile.h"
+#include "db.h"
+#include "../util/mmap.h"
+#include "../util/hashtab.h"
+#include "../util/file_allocator.h"
+#include "../util/processinfo.h"
+#include "../util/file.h"
+#include "btree.h"
+#include "btreebuilder.h"
+#include <algorithm>
+#include <list>
+#include "repl.h"
+#include "dbhelpers.h"
+#include "namespace-inl.h"
+#include "queryutil.h"
+#include "extsort.h"
+#include "curop-inl.h"
+#include "background.h"
+#include "compact.h"
+#include "ops/delete.h"
+#include "instance.h"
+#include "replutil.h"
+
+namespace mongo {
+
+ BOOST_STATIC_ASSERT( sizeof(Extent)-4 == 48+128 );
+ BOOST_STATIC_ASSERT( sizeof(DataFileHeader)-4 == 8192 );
+
+ void printMemInfo( const char * where ) {
+ cout << "mem info: ";
+ if ( where )
+ cout << where << " ";
+ ProcessInfo pi;
+ if ( ! pi.supported() ) {
+ cout << " not supported" << endl;
+ return;
+ }
+
+ cout << "vsize: " << pi.getVirtualMemorySize() << " resident: " << pi.getResidentSize() << " mapped: " << ( MemoryMappedFile::totalMappedLength() / ( 1024 * 1024 ) ) << endl;
+ }
+
+ bool isValidNS( const StringData& ns ) {
+ // TODO: should check for invalid characters
+
+ const char * x = strchr( ns.data() , '.' );
+ if ( ! x )
+ return false;
+
+ x++;
+ return *x > 0;
+ }
+
+ bool inDBRepair = false;
+ struct doingRepair {
+ doingRepair() {
+ assert( ! inDBRepair );
+ inDBRepair = true;
+ }
+ ~doingRepair() {
+ inDBRepair = false;
+ }
+ };
+
+ map<string, unsigned> BackgroundOperation::dbsInProg;
+ set<string> BackgroundOperation::nsInProg;
+
+ bool BackgroundOperation::inProgForDb(const char *db) {
+ assertInWriteLock();
+ return dbsInProg[db] != 0;
+ }
+
+ bool BackgroundOperation::inProgForNs(const char *ns) {
+ assertInWriteLock();
+ return nsInProg.count(ns) != 0;
+ }
+
+ void BackgroundOperation::assertNoBgOpInProgForDb(const char *db) {
+ uassert(12586, "cannot perform operation: a background operation is currently running for this database",
+ !inProgForDb(db));
+ }
+
+ void BackgroundOperation::assertNoBgOpInProgForNs(const char *ns) {
+ uassert(12587, "cannot perform operation: a background operation is currently running for this collection",
+ !inProgForNs(ns));
+ }
+
+ BackgroundOperation::BackgroundOperation(const char *ns) : _ns(ns) {
+ assertInWriteLock();
+ dbsInProg[_ns.db]++;
+ assert( nsInProg.count(_ns.ns()) == 0 );
+ nsInProg.insert(_ns.ns());
+ }
+
+ BackgroundOperation::~BackgroundOperation() {
+ wassert( d.dbMutex.isWriteLocked() );
+ dbsInProg[_ns.db]--;
+ nsInProg.erase(_ns.ns());
+ }
+
+ void BackgroundOperation::dump(stringstream& ss) {
+ if( nsInProg.size() ) {
+ ss << "\n<b>Background Jobs in Progress</b>\n";
+ for( set<string>::iterator i = nsInProg.begin(); i != nsInProg.end(); i++ )
+ ss << " " << *i << '\n';
+ }
+ for( map<string,unsigned>::iterator i = dbsInProg.begin(); i != dbsInProg.end(); i++ ) {
+ if( i->second )
+ ss << "database " << i->first << ": " << i->second << '\n';
+ }
+ }
+
+ /* ----------------------------------------- */
+
+ string dbpath = "/data/db/";
+ const char FREELIST_NS[] = ".$freelist";
+ bool directoryperdb = false;
+ string repairpath;
+ string pidfilepath;
+
+ DataFileMgr theDataFileMgr;
+ DatabaseHolder _dbHolder;
+ int MAGIC = 0x1000;
+
+ DatabaseHolder& dbHolderUnchecked() {
+ return _dbHolder;
+ }
+
+ void addNewNamespaceToCatalog(const char *ns, const BSONObj *options = 0);
+ void ensureIdIndexForNewNs(const char *ns) {
+ if ( ( strstr( ns, ".system." ) == 0 || legalClientSystemNS( ns , false ) ) &&
+ strstr( ns, FREELIST_NS ) == 0 ) {
+ log( 1 ) << "adding _id index for collection " << ns << endl;
+ ensureHaveIdIndex( ns );
+ }
+ }
+
+ string getDbContext() {
+ stringstream ss;
+ Client * c = currentClient.get();
+ if ( c ) {
+ Client::Context * cx = c->getContext();
+ if ( cx ) {
+ Database *database = cx->db();
+ if ( database ) {
+ ss << database->name << ' ';
+ ss << cx->ns() << ' ';
+ }
+ }
+ }
+ return ss.str();
+ }
+
+ /*---------------------------------------------------------------------*/
+
+ // inheritable class to implement an operation that may be applied to all
+ // files in a database using _applyOpToDataFiles()
+ class FileOp {
+ public:
+ virtual ~FileOp() {}
+ // Return true if file exists and operation successful
+ virtual bool apply( const boost::filesystem::path &p ) = 0;
+ virtual const char * op() const = 0;
+ };
+
+ void _applyOpToDataFiles( const char *database, FileOp &fo, bool afterAllocator = false, const string& path = dbpath );
+
+ void _deleteDataFiles(const char *database) {
+ if ( directoryperdb ) {
+ FileAllocator::get()->waitUntilFinished();
+ MONGO_BOOST_CHECK_EXCEPTION_WITH_MSG( boost::filesystem::remove_all( boost::filesystem::path( dbpath ) / database ), "delete data files with a directoryperdb" );
+ return;
+ }
+ class : public FileOp {
+ virtual bool apply( const boost::filesystem::path &p ) {
+ return boost::filesystem::remove( p );
+ }
+ virtual const char * op() const {
+ return "remove";
+ }
+ } deleter;
+ _applyOpToDataFiles( database, deleter, true );
+ }
+
+ int Extent::initialSize(int len) {
+ long long sz = len * 16;
+ if ( len < 1000 ) sz = len * 64;
+ if ( sz > 1000000000 )
+ sz = 1000000000;
+ int z = ((int)sz) & 0xffffff00;
+ assert( z > len );
+ return z;
+ }
+
+ bool _userCreateNS(const char *ns, const BSONObj& options, string& err, bool *deferIdIndex) {
+ if ( nsdetails(ns) ) {
+ err = "collection already exists";
+ return false;
+ }
+
+ log(1) << "create collection " << ns << ' ' << options << endl;
+
+ /* todo: do this only when we have allocated space successfully? or we could insert with a { ok: 0 } field
+ and then go back and set to ok : 1 after we are done.
+ */
+ bool isFreeList = strstr(ns, FREELIST_NS) != 0;
+ if( !isFreeList )
+ addNewNamespaceToCatalog(ns, options.isEmpty() ? 0 : &options);
+
+ long long size = Extent::initialSize(128);
+ {
+ BSONElement e = options.getField("size");
+ if ( e.isNumber() ) {
+ size = e.numberLong();
+ size += 256;
+ size &= 0xffffffffffffff00LL;
+ }
+ }
+
+ uassert( 10083 , "create collection invalid size spec", size > 0 );
+
+ bool newCapped = false;
+ int mx = 0;
+ if( options["capped"].trueValue() ) {
+ newCapped = true;
+ BSONElement e = options.getField("max");
+ if ( e.isNumber() ) {
+ mx = e.numberInt();
+ }
+ }
+
+ // $nExtents just for debug/testing.
+ BSONElement e = options.getField( "$nExtents" );
+ Database *database = cc().database();
+ if ( e.type() == Array ) {
+ // We create one extent per array entry, with size specified
+ // by the array value.
+ BSONObjIterator i( e.embeddedObject() );
+ while( i.more() ) {
+ BSONElement e = i.next();
+ int size = int( e.number() );
+ assert( size <= 0x7fffffff );
+ // $nExtents is just for testing - always allocate new extents
+ // rather than reuse existing extents so we have some predictibility
+ // in the extent size used by our tests
+ database->suitableFile( ns, (int) size, false, false )->createExtent( ns, (int) size, newCapped );
+ }
+ }
+ else if ( int( e.number() ) > 0 ) {
+ // We create '$nExtents' extents, each of size 'size'.
+ int nExtents = int( e.number() );
+ assert( size <= 0x7fffffff );
+ for ( int i = 0; i < nExtents; ++i ) {
+ assert( size <= 0x7fffffff );
+ // $nExtents is just for testing - always allocate new extents
+ // rather than reuse existing extents so we have some predictibility
+ // in the extent size used by our tests
+ database->suitableFile( ns, (int) size, false, false )->createExtent( ns, (int) size, newCapped );
+ }
+ }
+ else {
+ // This is the non test case, where we don't have a $nExtents spec.
+ while ( size > 0 ) {
+ int max = MongoDataFile::maxSize() - DataFileHeader::HeaderSize;
+ int desiredExtentSize = (int) (size > max ? max : size);
+ if ( desiredExtentSize < Extent::minSize() ) {
+ desiredExtentSize = Extent::minSize();
+ }
+ desiredExtentSize &= 0xffffff00;
+ Extent *e = database->allocExtent( ns, desiredExtentSize, newCapped, true );
+ size -= e->length;
+ }
+ }
+
+ NamespaceDetails *d = nsdetails(ns);
+ assert(d);
+
+ bool ensure = false;
+ if ( options.getField( "autoIndexId" ).type() ) {
+ if ( options["autoIndexId"].trueValue() ) {
+ ensure = true;
+ }
+ }
+ else {
+ if ( !newCapped ) {
+ ensure=true;
+ }
+ }
+ if( ensure ) {
+ if( deferIdIndex )
+ *deferIdIndex = true;
+ else
+ ensureIdIndexForNewNs( ns );
+ }
+
+ if ( mx > 0 )
+ getDur().writingInt( d->max ) = mx;
+
+ return true;
+ }
+
+ /** { ..., capped: true, size: ..., max: ... }
+ @param deferIdIndex - if not not, defers id index creation. sets the bool value to true if we wanted to create the id index.
+ @return true if successful
+ */
+ bool userCreateNS(const char *ns, BSONObj options, string& err, bool logForReplication, bool *deferIdIndex) {
+ const char *coll = strchr( ns, '.' ) + 1;
+ massert( 10356 , str::stream() << "invalid ns: " << ns , NamespaceString::validCollectionName(ns));
+ char cl[ 256 ];
+ nsToDatabase( ns, cl );
+ bool ok = _userCreateNS(ns, options, err, deferIdIndex);
+ if ( logForReplication && ok ) {
+ if ( options.getField( "create" ).eoo() ) {
+ BSONObjBuilder b;
+ b << "create" << coll;
+ b.appendElements( options );
+ options = b.obj();
+ }
+ string logNs = string( cl ) + ".$cmd";
+ logOp("c", logNs.c_str(), options);
+ }
+ return ok;
+ }
+
+ /*---------------------------------------------------------------------*/
+
+ int MongoDataFile::maxSize() {
+ if ( sizeof( int* ) == 4 ) {
+ return 512 * 1024 * 1024;
+ }
+ else if ( cmdLine.smallfiles ) {
+ return 0x7ff00000 >> 2;
+ }
+ else {
+ return 0x7ff00000;
+ }
+ }
+
+ NOINLINE_DECL void MongoDataFile::badOfs2(int ofs) const {
+ stringstream ss;
+ ss << "bad offset:" << ofs << " accessing file: " << mmf.filename() << " - consider repairing database";
+ uasserted(13441, ss.str());
+ }
+
+ NOINLINE_DECL void MongoDataFile::badOfs(int ofs) const {
+ stringstream ss;
+ ss << "bad offset:" << ofs << " accessing file: " << mmf.filename() << " - consider repairing database";
+ uasserted(13440, ss.str());
+ }
+
+ int MongoDataFile::defaultSize( const char *filename ) const {
+ int size;
+ if ( fileNo <= 4 )
+ size = (64*1024*1024) << fileNo;
+ else
+ size = 0x7ff00000;
+ if ( cmdLine.smallfiles ) {
+ size = size >> 2;
+ }
+ return size;
+ }
+
+ static void check(void *_mb) {
+ if( sizeof(char *) == 4 )
+ uassert( 10084 , "can't map file memory - mongo requires 64 bit build for larger datasets", _mb != 0);
+ else
+ uassert( 10085 , "can't map file memory", _mb != 0);
+ }
+
+ /** @return true if found and opened. if uninitialized (prealloc only) does not open. */
+ bool MongoDataFile::openExisting( const char *filename ) {
+ assert( _mb == 0 );
+ if( !exists(filename) )
+ return false;
+ if( !mmf.open(filename,false) ) {
+ dlog(2) << "info couldn't open " << filename << " probably end of datafile list" << endl;
+ return false;
+ }
+ _mb = mmf.getView(); assert(_mb);
+ unsigned long long sz = mmf.length();
+ assert( sz <= 0x7fffffff );
+ assert( sz % 4096 == 0 );
+ if( sz < 64*1024*1024 && !cmdLine.smallfiles ) {
+ if( sz >= 16*1024*1024 && sz % (1024*1024) == 0 ) {
+ log() << "info openExisting file size " << sz << " but cmdLine.smallfiles=false" << endl;
+ }
+ else {
+ log() << "openExisting size " << sz << " less then minimum file size expectation " << filename << endl;
+ assert(false);
+ }
+ }
+ check(_mb);
+ if( header()->uninitialized() )
+ return false;
+ return true;
+ }
+
+ void MongoDataFile::open( const char *filename, int minSize, bool preallocateOnly ) {
+ long size = defaultSize( filename );
+ while ( size < minSize ) {
+ if ( size < maxSize() / 2 )
+ size *= 2;
+ else {
+ size = maxSize();
+ break;
+ }
+ }
+ if ( size > maxSize() )
+ size = maxSize();
+
+ assert( size >= 64*1024*1024 || cmdLine.smallfiles );
+ assert( size % 4096 == 0 );
+
+ if ( preallocateOnly ) {
+ if ( cmdLine.prealloc ) {
+ FileAllocator::get()->requestAllocation( filename, size );
+ }
+ return;
+ }
+
+ {
+ assert( _mb == 0 );
+ unsigned long long sz = size;
+ if( mmf.create(filename, sz, false) )
+ _mb = mmf.getView();
+ assert( sz <= 0x7fffffff );
+ size = (int) sz;
+ }
+ check(_mb);
+ header()->init(fileNo, size, filename);
+ }
+
+ void MongoDataFile::flush( bool sync ) {
+ mmf.flush( sync );
+ }
+
+ void addNewExtentToNamespace(const char *ns, Extent *e, DiskLoc eloc, DiskLoc emptyLoc, bool capped) {
+ NamespaceIndex *ni = nsindex(ns);
+ NamespaceDetails *details = ni->details(ns);
+ if ( details ) {
+ assert( !details->lastExtent.isNull() );
+ assert( !details->firstExtent.isNull() );
+ getDur().writingDiskLoc(e->xprev) = details->lastExtent;
+ getDur().writingDiskLoc(details->lastExtent.ext()->xnext) = eloc;
+ assert( !eloc.isNull() );
+ getDur().writingDiskLoc(details->lastExtent) = eloc;
+ }
+ else {
+ ni->add_ns(ns, eloc, capped);
+ details = ni->details(ns);
+ }
+
+ {
+ NamespaceDetails *dw = details->writingWithoutExtra();
+ dw->lastExtentSize = e->length;
+ }
+ details->addDeletedRec(emptyLoc.drec(), emptyLoc);
+ }
+
+ Extent* MongoDataFile::createExtent(const char *ns, int approxSize, bool newCapped, int loops) {
+ {
+ // make sizes align with VM page size
+ int newSize = (approxSize + 0xfff) & 0xfffff000;
+ assert( newSize >= 0 );
+ if( newSize < Extent::maxSize() )
+ approxSize = newSize;
+ }
+ massert( 10357 , "shutdown in progress", ! inShutdown() );
+ massert( 10358 , "bad new extent size", approxSize >= Extent::minSize() && approxSize <= Extent::maxSize() );
+ massert( 10359 , "header==0 on new extent: 32 bit mmap space exceeded?", header() ); // null if file open failed
+ int ExtentSize = min(header()->unusedLength, approxSize);
+ DiskLoc loc;
+ if ( ExtentSize < Extent::minSize() ) {
+ /* note there could be a lot of looping here is db just started and
+ no files are open yet. we might want to do something about that. */
+ if ( loops > 8 ) {
+ assert( loops < 10000 );
+ out() << "warning: loops=" << loops << " fileno:" << fileNo << ' ' << ns << '\n';
+ }
+ log() << "newExtent: " << ns << " file " << fileNo << " full, adding a new file\n";
+ return cc().database()->addAFile( 0, true )->createExtent(ns, approxSize, newCapped, loops+1);
+ }
+ int offset = header()->unused.getOfs();
+
+ DataFileHeader *h = header();
+ h->unused.writing().set( fileNo, offset + ExtentSize );
+ getDur().writingInt(h->unusedLength) = h->unusedLength - ExtentSize;
+ loc.set(fileNo, offset);
+ Extent *e = _getExtent(loc);
+ DiskLoc emptyLoc = getDur().writing(e)->init(ns, ExtentSize, fileNo, offset, newCapped);
+
+ addNewExtentToNamespace(ns, e, loc, emptyLoc, newCapped);
+
+ DEV tlog(1) << "new extent " << ns << " size: 0x" << hex << ExtentSize << " loc: 0x" << hex << offset
+ << " emptyLoc:" << hex << emptyLoc.getOfs() << dec << endl;
+ return e;
+ }
+
+ Extent* DataFileMgr::allocFromFreeList(const char *ns, int approxSize, bool capped) {
+ string s = cc().database()->name + FREELIST_NS;
+ NamespaceDetails *f = nsdetails(s.c_str());
+ if( f ) {
+ int low, high;
+ if( capped ) {
+ // be strict about the size
+ low = approxSize;
+ if( low > 2048 ) low -= 256;
+ high = (int) (approxSize * 1.05) + 256;
+ }
+ else {
+ low = (int) (approxSize * 0.8);
+ high = (int) (approxSize * 1.4);
+ }
+ if( high <= 0 ) {
+ // overflowed
+ high = max(approxSize, Extent::maxSize());
+ }
+ int n = 0;
+ Extent *best = 0;
+ int bestDiff = 0x7fffffff;
+ {
+ Timer t;
+ DiskLoc L = f->firstExtent;
+ while( !L.isNull() ) {
+ Extent * e = L.ext();
+ if( e->length >= low && e->length <= high ) {
+ int diff = abs(e->length - approxSize);
+ if( diff < bestDiff ) {
+ bestDiff = diff;
+ best = e;
+ if( ((double) diff) / approxSize < 0.1 ) {
+ // close enough
+ break;
+ }
+ if( t.seconds() >= 2 ) {
+ // have spent lots of time in write lock, and we are in [low,high], so close enough
+ // could come into play if extent freelist is very long
+ break;
+ }
+ }
+ else {
+ OCCASIONALLY {
+ if( high < 64 * 1024 && t.seconds() >= 2 ) {
+ // be less picky if it is taking a long time
+ high = 64 * 1024;
+ }
+ }
+ }
+ }
+ L = e->xnext;
+ ++n;
+ }
+ if( t.seconds() >= 10 ) {
+ log() << "warning: slow scan in allocFromFreeList (in write lock)" << endl;
+ }
+ }
+
+ if( n > 128 ) log( n < 512 ) << "warning: newExtent " << n << " scanned\n";
+
+ if( best ) {
+ Extent *e = best;
+ // remove from the free list
+ if( !e->xprev.isNull() )
+ e->xprev.ext()->xnext.writing() = e->xnext;
+ if( !e->xnext.isNull() )
+ e->xnext.ext()->xprev.writing() = e->xprev;
+ if( f->firstExtent == e->myLoc )
+ f->firstExtent.writing() = e->xnext;
+ if( f->lastExtent == e->myLoc )
+ f->lastExtent.writing() = e->xprev;
+
+ // use it
+ OCCASIONALLY if( n > 512 ) log() << "warning: newExtent " << n << " scanned\n";
+ DiskLoc emptyLoc = e->reuse(ns, capped);
+ addNewExtentToNamespace(ns, e, e->myLoc, emptyLoc, capped);
+ return e;
+ }
+ }
+
+ return 0;
+ // return createExtent(ns, approxSize, capped);
+ }
+
+ /*---------------------------------------------------------------------*/
+
+ void Extent::markEmpty() {
+ xnext.Null();
+ xprev.Null();
+ firstRecord.Null();
+ lastRecord.Null();
+ }
+
+ DiskLoc Extent::reuse(const char *nsname, bool capped) {
+ return getDur().writing(this)->_reuse(nsname, capped);
+ }
+
+ void getEmptyLoc(const char *ns, const DiskLoc extentLoc, int extentLength, bool capped, /*out*/DiskLoc& emptyLoc, /*out*/int& delRecLength) {
+ emptyLoc = extentLoc;
+ emptyLoc.inc( Extent::HeaderSize() );
+ delRecLength = extentLength - Extent::HeaderSize();
+ if( delRecLength >= 32*1024 && str::contains(ns, '$') && !capped ) {
+ // probably an index. so skip forward to keep its records page aligned
+ int& ofs = emptyLoc.GETOFS();
+ int newOfs = (ofs + 0xfff) & ~0xfff;
+ delRecLength -= (newOfs-ofs);
+ dassert( delRecLength > 0 );
+ ofs = newOfs;
+ }
+ }
+
+ DiskLoc Extent::_reuse(const char *nsname, bool capped) {
+ LOG(3) << "reset extent was:" << nsDiagnostic.toString() << " now:" << nsname << '\n';
+ massert( 10360 , "Extent::reset bad magic value", magic == 0x41424344 );
+ nsDiagnostic = nsname;
+ markEmpty();
+
+ DiskLoc emptyLoc;
+ int delRecLength;
+ getEmptyLoc(nsname, myLoc, length, capped, emptyLoc, delRecLength);
+
+ // todo: some dup code here and below in Extent::init
+ DeletedRecord *empty = DataFileMgr::makeDeletedRecord(emptyLoc, delRecLength);
+ empty = getDur().writing(empty);
+ empty->lengthWithHeaders = delRecLength;
+ empty->extentOfs = myLoc.getOfs();
+ empty->nextDeleted.Null();
+
+ return emptyLoc;
+ }
+
+ /* assumes already zeroed -- insufficient for block 'reuse' perhaps */
+ DiskLoc Extent::init(const char *nsname, int _length, int _fileNo, int _offset, bool capped) {
+ magic = 0x41424344;
+ myLoc.set(_fileNo, _offset);
+ xnext.Null();
+ xprev.Null();
+ nsDiagnostic = nsname;
+ length = _length;
+ firstRecord.Null();
+ lastRecord.Null();
+
+ DiskLoc emptyLoc;
+ int delRecLength;
+ getEmptyLoc(nsname, myLoc, _length, capped, emptyLoc, delRecLength);
+
+ DeletedRecord *empty = getDur().writing( DataFileMgr::makeDeletedRecord(emptyLoc, delRecLength) );
+ empty->lengthWithHeaders = delRecLength;
+ empty->extentOfs = myLoc.getOfs();
+
+ return emptyLoc;
+ }
+
+ /*
+ Record* Extent::newRecord(int len) {
+ if( firstEmptyRegion.isNull() )8
+ return 0;
+
+ assert(len > 0);
+ int newRecSize = len + Record::HeaderSize;
+ DiskLoc newRecordLoc = firstEmptyRegion;
+ Record *r = getRecord(newRecordLoc);
+ int left = r->netLength() - len;
+ if( left < 0 ) {
+ //
+ firstEmptyRegion.Null();
+ return 0;
+ }
+
+ DiskLoc nextEmpty = r->next.getNextEmpty(firstEmptyRegion);
+ r->lengthWithHeaders = newRecSize;
+ r->next.markAsFirstOrLastInExtent(this); // we're now last in the extent
+ if( !lastRecord.isNull() ) {
+ assert(getRecord(lastRecord)->next.lastInExtent()); // it was the last one
+ getRecord(lastRecord)->next.set(newRecordLoc); // until now
+ r->prev.set(lastRecord);
+ }
+ else {
+ r->prev.markAsFirstOrLastInExtent(this); // we are the first in the extent
+ assert( firstRecord.isNull() );
+ firstRecord = newRecordLoc;
+ }
+ lastRecord = newRecordLoc;
+
+ if( left < Record::HeaderSize + 32 ) {
+ firstEmptyRegion.Null();
+ }
+ else {
+ firstEmptyRegion.inc(newRecSize);
+ Record *empty = getRecord(firstEmptyRegion);
+ empty->next.set(nextEmpty); // not for empty records, unless in-use records, next and prev can be null.
+ empty->prev.Null();
+ empty->lengthWithHeaders = left;
+ }
+
+ return r;
+ }
+ */
+
+ int Extent::maxSize() {
+ int maxExtentSize = 0x7ff00000;
+ if ( cmdLine.smallfiles ) {
+ maxExtentSize >>= 2;
+ }
+ return maxExtentSize;
+ }
+
+ /*---------------------------------------------------------------------*/
+
+ shared_ptr<Cursor> DataFileMgr::findAll(const char *ns, const DiskLoc &startLoc) {
+ NamespaceDetails * d = nsdetails( ns );
+ if ( ! d )
+ return shared_ptr<Cursor>(new BasicCursor(DiskLoc()));
+
+ DiskLoc loc = d->firstExtent;
+ Extent *e = getExtent(loc);
+
+ DEBUGGING {
+ out() << "listing extents for " << ns << endl;
+ DiskLoc tmp = loc;
+ set<DiskLoc> extents;
+
+ while ( 1 ) {
+ Extent *f = getExtent(tmp);
+ out() << "extent: " << tmp.toString() << endl;
+ extents.insert(tmp);
+ tmp = f->xnext;
+ if ( tmp.isNull() )
+ break;
+ f = f->getNextExtent();
+ }
+
+ out() << endl;
+ d->dumpDeleted(&extents);
+ }
+
+ if ( d->capped )
+ return shared_ptr<Cursor>( new ForwardCappedCursor( d , startLoc ) );
+
+ if ( !startLoc.isNull() )
+ return shared_ptr<Cursor>(new BasicCursor( startLoc ));
+
+ while ( e->firstRecord.isNull() && !e->xnext.isNull() ) {
+ /* todo: if extent is empty, free it for reuse elsewhere.
+ that is a bit complicated have to clean up the freelists.
+ */
+ RARELY out() << "info DFM::findAll(): extent " << loc.toString() << " was empty, skipping ahead. ns:" << ns << endl;
+ // find a nonempty extent
+ // it might be nice to free the whole extent here! but have to clean up free recs then.
+ e = e->getNextExtent();
+ }
+ return shared_ptr<Cursor>(new BasicCursor( e->firstRecord ));
+ }
+
+ /* get a table scan cursor, but can be forward or reverse direction.
+ order.$natural - if set, > 0 means forward (asc), < 0 backward (desc).
+ */
+ shared_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order, const DiskLoc &startLoc) {
+ BSONElement el = order.getField("$natural"); // e.g., { $natural : -1 }
+
+ if ( el.number() >= 0 )
+ return DataFileMgr::findAll(ns, startLoc);
+
+ // "reverse natural order"
+ NamespaceDetails *d = nsdetails(ns);
+
+ if ( !d )
+ return shared_ptr<Cursor>(new BasicCursor(DiskLoc()));
+
+ if ( !d->capped ) {
+ if ( !startLoc.isNull() )
+ return shared_ptr<Cursor>(new ReverseCursor( startLoc ));
+ Extent *e = d->lastExtent.ext();
+ while ( e->lastRecord.isNull() && !e->xprev.isNull() ) {
+ OCCASIONALLY out() << " findTableScan: extent empty, skipping ahead" << endl;
+ e = e->getPrevExtent();
+ }
+ return shared_ptr<Cursor>(new ReverseCursor( e->lastRecord ));
+ }
+ else {
+ return shared_ptr<Cursor>( new ReverseCappedCursor( d, startLoc ) );
+ }
+ }
+
+ void printFreeList() {
+ string s = cc().database()->name + FREELIST_NS;
+ log() << "dump freelist " << s << endl;
+ NamespaceDetails *freeExtents = nsdetails(s.c_str());
+ if( freeExtents == 0 ) {
+ log() << " freeExtents==0" << endl;
+ return;
+ }
+ DiskLoc a = freeExtents->firstExtent;
+ while( !a.isNull() ) {
+ Extent *e = a.ext();
+ log() << " extent " << a.toString() << " len:" << e->length << " prev:" << e->xprev.toString() << endl;
+ a = e->xnext;
+ }
+
+ log() << "end freelist" << endl;
+ }
+
+ /** free a list of extents that are no longer in use. this is a double linked list of extents
+ (could be just one in the list)
+ */
+ void freeExtents(DiskLoc firstExt, DiskLoc lastExt) {
+ {
+ assert( !firstExt.isNull() && !lastExt.isNull() );
+ Extent *f = firstExt.ext();
+ Extent *l = lastExt.ext();
+ assert( f->xprev.isNull() );
+ assert( l->xnext.isNull() );
+ assert( f==l || !f->xnext.isNull() );
+ assert( f==l || !l->xprev.isNull() );
+ }
+
+ string s = cc().database()->name + FREELIST_NS;
+ NamespaceDetails *freeExtents = nsdetails(s.c_str());
+ if( freeExtents == 0 ) {
+ string err;
+ _userCreateNS(s.c_str(), BSONObj(), err, 0); // todo: this actually allocates an extent, which is bad!
+ freeExtents = nsdetails(s.c_str());
+ massert( 10361 , "can't create .$freelist", freeExtents);
+ }
+ if( freeExtents->firstExtent.isNull() ) {
+ freeExtents->firstExtent.writing() = firstExt;
+ freeExtents->lastExtent.writing() = lastExt;
+ }
+ else {
+ DiskLoc a = freeExtents->firstExtent;
+ assert( a.ext()->xprev.isNull() );
+ getDur().writingDiskLoc( a.ext()->xprev ) = lastExt;
+ getDur().writingDiskLoc( lastExt.ext()->xnext ) = a;
+ getDur().writingDiskLoc( freeExtents->firstExtent ) = firstExt;
+ }
+
+ //printFreeList();
+ }
+
+ /* drop a collection/namespace */
+ void dropNS(const string& nsToDrop) {
+ NamespaceDetails* d = nsdetails(nsToDrop.c_str());
+ uassert( 10086 , (string)"ns not found: " + nsToDrop , d );
+
+ BackgroundOperation::assertNoBgOpInProgForNs(nsToDrop.c_str());
+
+ NamespaceString s(nsToDrop);
+ assert( s.db == cc().database()->name );
+ if( s.isSystem() ) {
+ if( s.coll == "system.profile" )
+ uassert( 10087 , "turn off profiling before dropping system.profile collection", cc().database()->profile == 0 );
+ else
+ uasserted( 12502, "can't drop system ns" );
+ }
+
+ {
+ // remove from the system catalog
+ BSONObj cond = BSON( "name" << nsToDrop ); // { name: "colltodropname" }
+ string system_namespaces = cc().database()->name + ".system.namespaces";
+ /*int n = */ deleteObjects(system_namespaces.c_str(), cond, false, false, true);
+ // no check of return code as this ns won't exist for some of the new storage engines
+ }
+
+ // free extents
+ if( !d->firstExtent.isNull() ) {
+ freeExtents(d->firstExtent, d->lastExtent);
+ getDur().writingDiskLoc( d->firstExtent ).setInvalid();
+ getDur().writingDiskLoc( d->lastExtent ).setInvalid();
+ }
+
+ // remove from the catalog hashtable
+ cc().database()->namespaceIndex.kill_ns(nsToDrop.c_str());
+ }
+
+ void dropCollection( const string &name, string &errmsg, BSONObjBuilder &result ) {
+ log(1) << "dropCollection: " << name << endl;
+ NamespaceDetails *d = nsdetails(name.c_str());
+ if( d == 0 )
+ return;
+
+ BackgroundOperation::assertNoBgOpInProgForNs(name.c_str());
+
+ if ( d->nIndexes != 0 ) {
+ try {
+ assert( dropIndexes(d, name.c_str(), "*", errmsg, result, true) );
+ }
+ catch( DBException& e ) {
+ stringstream ss;
+ ss << "drop: dropIndexes for collection failed - consider trying repair ";
+ ss << " cause: " << e.what();
+ uasserted(12503,ss.str());
+ }
+ assert( d->nIndexes == 0 );
+ }
+ log(1) << "\t dropIndexes done" << endl;
+ result.append("ns", name.c_str());
+ ClientCursor::invalidate(name.c_str());
+ Top::global.collectionDropped( name );
+ NamespaceDetailsTransient::eraseForPrefix( name.c_str() );
+ dropNS(name);
+ }
+
+ /* unindex all keys in index for this record. */
+ static void _unindexRecord(IndexDetails& id, BSONObj& obj, const DiskLoc& dl, bool logMissing = true) {
+ BSONObjSet keys;
+ id.getKeysFromObject(obj, keys);
+ IndexInterface& ii = id.idxInterface();
+ for ( BSONObjSet::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ BSONObj j = *i;
+
+ bool ok = false;
+ try {
+ ok = ii.unindex(id.head, id, j, dl);
+ }
+ catch (AssertionException& e) {
+ problem() << "Assertion failure: _unindex failed " << id.indexNamespace() << endl;
+ out() << "Assertion failure: _unindex failed: " << e.what() << '\n';
+ out() << " obj:" << obj.toString() << '\n';
+ out() << " key:" << j.toString() << '\n';
+ out() << " dl:" << dl.toString() << endl;
+ sayDbContext();
+ }
+
+ if ( !ok && logMissing ) {
+ log() << "unindex failed (key too big?) " << id.indexNamespace() << " key: " << j << " " << obj["_id"] << endl;
+ }
+ }
+ }
+//zzz
+ /* unindex all keys in all indexes for this record. */
+ static void unindexRecord(NamespaceDetails *d, Record *todelete, const DiskLoc& dl, bool noWarn = false) {
+ BSONObj obj(todelete);
+ int n = d->nIndexes;
+ for ( int i = 0; i < n; i++ )
+ _unindexRecord(d->idx(i), obj, dl, !noWarn);
+ if( d->indexBuildInProgress ) { // background index
+ // always pass nowarn here, as this one may be missing for valid reasons as we are concurrently building it
+ _unindexRecord(d->idx(n), obj, dl, false);
+ }
+ }
+
+ /* deletes a record, just the pdfile portion -- no index cleanup, no cursor cleanup, etc.
+ caller must check if capped
+ */
+ void DataFileMgr::_deleteRecord(NamespaceDetails *d, const char *ns, Record *todelete, const DiskLoc& dl) {
+ /* remove ourself from the record next/prev chain */
+ {
+ if ( todelete->prevOfs != DiskLoc::NullOfs )
+ getDur().writingInt( todelete->getPrev(dl).rec()->nextOfs ) = todelete->nextOfs;
+ if ( todelete->nextOfs != DiskLoc::NullOfs )
+ getDur().writingInt( todelete->getNext(dl).rec()->prevOfs ) = todelete->prevOfs;
+ }
+
+ /* remove ourself from extent pointers */
+ {
+ Extent *e = getDur().writing( todelete->myExtent(dl) );
+ if ( e->firstRecord == dl ) {
+ if ( todelete->nextOfs == DiskLoc::NullOfs )
+ e->firstRecord.Null();
+ else
+ e->firstRecord.set(dl.a(), todelete->nextOfs);
+ }
+ if ( e->lastRecord == dl ) {
+ if ( todelete->prevOfs == DiskLoc::NullOfs )
+ e->lastRecord.Null();
+ else
+ e->lastRecord.set(dl.a(), todelete->prevOfs);
+ }
+ }
+
+ /* add to the free list */
+ {
+ {
+ NamespaceDetails::Stats *s = getDur().writing(&d->stats);
+ s->datasize -= todelete->netLength();
+ s->nrecords--;
+ }
+
+ if ( strstr(ns, ".system.indexes") ) {
+ /* temp: if in system.indexes, don't reuse, and zero out: we want to be
+ careful until validated more, as IndexDetails has pointers
+ to this disk location. so an incorrectly done remove would cause
+ a lot of problems.
+ */
+ memset(getDur().writingPtr(todelete, todelete->lengthWithHeaders), 0, todelete->lengthWithHeaders);
+ }
+ else {
+ DEV {
+ unsigned long long *p = (unsigned long long *) todelete->data;
+ *getDur().writing(p) = 0;
+ //DEV memset(todelete->data, 0, todelete->netLength()); // attempt to notice invalid reuse.
+ }
+ d->addDeletedRec((DeletedRecord*)todelete, dl);
+ }
+ }
+ }
+
+ void DataFileMgr::deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK, bool noWarn, bool doLog ) {
+ dassert( todelete == dl.rec() );
+
+ NamespaceDetails* d = nsdetails(ns);
+ if ( d->capped && !cappedOK ) {
+ out() << "failing remove on a capped ns " << ns << endl;
+ uassert( 10089 , "can't remove from a capped collection" , 0 );
+ return;
+ }
+
+ BSONObj toDelete;
+ if ( doLog ) {
+ BSONElement e = dl.obj()["_id"];
+ if ( e.type() ) {
+ toDelete = e.wrap();
+ }
+ }
+
+ /* check if any cursors point to us. if so, advance them. */
+ ClientCursor::aboutToDelete(dl);
+
+ unindexRecord(d, todelete, dl, noWarn);
+
+ _deleteRecord(d, ns, todelete, dl);
+ NamespaceDetailsTransient::get( ns ).notifyOfWriteOp();
+
+ if ( ! toDelete.isEmpty() ) {
+ logOp( "d" , ns , toDelete );
+ }
+ }
+
+
+ /** Note: if the object shrinks a lot, we don't free up space, we leave extra at end of the record.
+ */
+ const DiskLoc DataFileMgr::updateRecord(
+ const char *ns,
+ NamespaceDetails *d,
+ NamespaceDetailsTransient *nsdt,
+ Record *toupdate, const DiskLoc& dl,
+ const char *_buf, int _len, OpDebug& debug, bool god) {
+
+ dassert( toupdate == dl.rec() );
+
+ BSONObj objOld(toupdate);
+ BSONObj objNew(_buf);
+ DEV assert( objNew.objsize() == _len );
+ DEV assert( objNew.objdata() == _buf );
+
+ if( !objNew.hasElement("_id") && objOld.hasElement("_id") ) {
+ /* add back the old _id value if the update removes it. Note this implementation is slow
+ (copies entire object multiple times), but this shouldn't happen often, so going for simple
+ code, not speed.
+ */
+ BSONObjBuilder b;
+ BSONElement e;
+ assert( objOld.getObjectID(e) );
+ b.append(e); // put _id first, for best performance
+ b.appendElements(objNew);
+ objNew = b.obj();
+ }
+
+ /* duplicate key check. we descend the btree twice - once for this check, and once for the actual inserts, further
+ below. that is suboptimal, but it's pretty complicated to do it the other way without rollbacks...
+ */
+ vector<IndexChanges> changes;
+ bool changedId = false;
+ getIndexChanges(changes, *d, objNew, objOld, changedId);
+ uassert( 13596 , str::stream() << "cannot change _id of a document old:" << objOld << " new:" << objNew , ! changedId );
+ dupCheck(changes, *d, dl);
+
+ if ( toupdate->netLength() < objNew.objsize() ) {
+ // doesn't fit. reallocate -----------------------------------------------------
+ uassert( 10003 , "failing update: objects in a capped ns cannot grow", !(d && d->capped));
+ d->paddingTooSmall();
+ debug.moved = true;
+ deleteRecord(ns, toupdate, dl);
+ return insert(ns, objNew.objdata(), objNew.objsize(), god);
+ }
+
+ nsdt->notifyOfWriteOp();
+ d->paddingFits();
+
+ /* have any index keys changed? */
+ {
+ int keyUpdates = 0;
+ int z = d->nIndexesBeingBuilt();
+ for ( int x = 0; x < z; x++ ) {
+ IndexDetails& idx = d->idx(x);
+ IndexInterface& ii = idx.idxInterface();
+ for ( unsigned i = 0; i < changes[x].removed.size(); i++ ) {
+ try {
+ bool found = ii.unindex(idx.head, idx, *changes[x].removed[i], dl);
+ if ( ! found ) {
+ RARELY warning() << "ns: " << ns << " couldn't unindex key: " << *changes[x].removed[i]
+ << " for doc: " << objOld["_id"] << endl;
+ }
+ }
+ catch (AssertionException&) {
+ debug.extra << " exception update unindex ";
+ problem() << " caught assertion update unindex " << idx.indexNamespace() << endl;
+ }
+ }
+ assert( !dl.isNull() );
+ BSONObj idxKey = idx.info.obj().getObjectField("key");
+ Ordering ordering = Ordering::make(idxKey);
+ keyUpdates += changes[x].added.size();
+ for ( unsigned i = 0; i < changes[x].added.size(); i++ ) {
+ try {
+ /* we did the dupCheck() above. so we don't have to worry about it here. */
+ ii.bt_insert(
+ idx.head,
+ dl, *changes[x].added[i], ordering, /*dupsAllowed*/true, idx);
+ }
+ catch (AssertionException& e) {
+ debug.extra << " exception update index ";
+ problem() << " caught assertion update index " << idx.indexNamespace() << " " << e << " " << objNew["_id"] << endl;
+ }
+ }
+ }
+
+ debug.keyUpdates = keyUpdates;
+ }
+
+ // update in place
+ int sz = objNew.objsize();
+ memcpy(getDur().writingPtr(toupdate->data, sz), objNew.objdata(), sz);
+ return dl;
+ }
+
+ int Extent::followupSize(int len, int lastExtentLen) {
+ assert( len < Extent::maxSize() );
+ int x = initialSize(len);
+ // changed from 1.20 to 1.35 in v2.1.x to get to larger extent size faster
+ int y = (int) (lastExtentLen < 4000000 ? lastExtentLen * 4.0 : lastExtentLen * 1.35);
+ int sz = y > x ? y : x;
+
+ if ( sz < lastExtentLen ) {
+ // this means there was an int overflow
+ // so we should turn it into maxSize
+ sz = Extent::maxSize();
+ }
+ else if ( sz > Extent::maxSize() ) {
+ sz = Extent::maxSize();
+ }
+
+ sz = ((int)sz) & 0xffffff00;
+ assert( sz > len );
+
+ return sz;
+ }
+
+ /* step one of adding keys to index idxNo for a new record
+ @return true means done. false means multikey involved and more work to do
+ */
+ static void _addKeysToIndexStepOneOfTwo(BSONObjSet & /*out*/keys, NamespaceDetails *d, int idxNo, BSONObj& obj, DiskLoc recordLoc, IndexDetails& idx) {
+ idx.getKeysFromObject(obj, keys);
+ if( keys.empty() )
+ return;
+ bool dupsAllowed = !idx.unique();
+ BSONObj order = idx.keyPattern();
+ IndexInterface& ii = idx.idxInterface();
+ Ordering ordering = Ordering::make(order);
+
+ assert( !recordLoc.isNull() );
+
+ try {
+ // we can't do the two step method with multi keys as insertion of one key changes the indexes
+ // structure. however we can do the first key of the set so we go ahead and do that FWIW
+ ii.phasedQueueItemToInsert(idxNo, idx.head, recordLoc, *keys.begin(), ordering, idx, dupsAllowed);
+ }
+ catch (AssertionException& e) {
+ if( e.getCode() == 10287 && idxNo == d->nIndexes ) {
+ DEV log() << "info: caught key already in index on bg indexing (ok)" << endl;
+ }
+ else {
+ throw;
+ }
+ }
+ }
+
+ namespace dur {
+ extern unsigned notesThisLock;
+ }
+
+ void upgradeToWritable(bool shouldBeUnlocked) {
+ // todo upgrade!
+ DEV {
+ // verify we haven't written yet (usually)
+
+ // test binary does special things so this would assert there so don't check there
+ if( shouldBeUnlocked && !cmdLine.binaryName.empty() && cmdLine.binaryName != "test" ) {
+ static unsigned long long zeroes;
+ static unsigned long long tot;
+ tot++;
+ if( dur::notesThisLock == 0 )
+ zeroes++;
+ if( tot > 1000 ) {
+ static int n;
+ DEV if( n++ == 0 )
+ log() << "warning upgradeToWritable: already in writable too often" << endl;
+ }
+ }
+ }
+ }
+
+ /** add index keys for a newly inserted record
+ done in two steps/phases to defer write lock portion
+ */
+ static void indexRecordUsingTwoSteps(NamespaceDetails *d, BSONObj obj, DiskLoc loc, bool shouldBeUnlocked) {
+ vector<int> multi;
+ vector<BSONObjSet> multiKeys;
+
+ IndexInterface::phasedBegin();
+
+ int n = d->nIndexesBeingBuilt();
+ {
+ BSONObjSet keys;
+ for ( int i = 0; i < n; i++ ) {
+ IndexDetails& idx = d->idx(i);
+ // this call throws on unique constraint violation. we haven't done any writes yet so that is fine.
+ _addKeysToIndexStepOneOfTwo(/*out*/keys, d, i, obj, loc, idx);
+ if( keys.size() > 1 ) {
+ multi.push_back(i);
+ multiKeys.push_back(BSONObjSet());
+ multiKeys[multiKeys.size()-1].swap(keys);
+ }
+ keys.clear();
+ }
+ }
+
+ // update lock to writable here. TODO
+
+ upgradeToWritable(shouldBeUnlocked);
+
+ IndexInterface::phasedFinish(); // step 2
+
+ // now finish adding multikeys
+ for( unsigned j = 0; j < multi.size(); j++ ) {
+ unsigned i = multi[j];
+ BSONObjSet& keys = multiKeys[j];
+ IndexDetails& idx = d->idx(i);
+ IndexInterface& ii = idx.idxInterface();
+ Ordering ordering = Ordering::make(idx.keyPattern());
+ d->setIndexIsMultikey(i);
+ for( BSONObjSet::iterator k = ++keys.begin()/*skip 1*/; k != keys.end(); k++ ) {
+ try {
+ ii.bt_insert(idx.head, loc, *k, ordering, !idx.unique(), idx);
+ } catch (AssertionException& e) {
+ if( e.getCode() == 10287 && (int) i == d->nIndexes ) {
+ DEV log() << "info: caught key already in index on bg indexing (ok)" << endl;
+ }
+ else {
+ /* roll back previously added index entries
+ note must do self index as it is multikey and could require some cleanup itself
+ */
+ for( int j = 0; j < n; j++ ) {
+ try {
+ _unindexRecord(d->idx(j), obj, loc, false);
+ }
+ catch(...) {
+ log(3) << "unindex fails on rollback after unique key constraint prevented insert\n";
+ }
+ }
+ throw;
+ }
+ }
+ }
+ }
+ }
+
+ /* add keys to index idxNo for a new record */
+ static void addKeysToIndex(NamespaceDetails *d, int idxNo, BSONObj& obj, DiskLoc recordLoc, bool dupsAllowed) {
+ IndexDetails& idx = d->idx(idxNo);
+ BSONObjSet keys;
+ idx.getKeysFromObject(obj, keys);
+ if( keys.empty() )
+ return;
+ BSONObj order = idx.keyPattern();
+ IndexInterface& ii = idx.idxInterface();
+ Ordering ordering = Ordering::make(order);
+ int n = 0;
+ for ( BSONObjSet::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ if( ++n == 2 ) {
+ d->setIndexIsMultikey(idxNo);
+ }
+ assert( !recordLoc.isNull() );
+ try {
+ ii.bt_insert(idx.head, recordLoc, *i, ordering, dupsAllowed, idx);
+ }
+ catch (AssertionException& e) {
+ if( e.getCode() == 10287 && idxNo == d->nIndexes ) {
+ DEV log() << "info: caught key already in index on bg indexing (ok)" << endl;
+ continue;
+ }
+ if( !dupsAllowed ) {
+ // dup key exception, presumably.
+ throw;
+ }
+ problem() << " caught assertion addKeysToIndex " << idx.indexNamespace() << " " << obj["_id"] << endl;
+ }
+ }
+ }
+
+#if 0
+ void testSorting() {
+ BSONObjBuilder b;
+ b.appendNull("");
+ BSONObj x = b.obj();
+
+ BSONObjExternalSorter sorter(*IndexDetails::iis[1]);
+
+ sorter.add(x, DiskLoc(3,7));
+ sorter.add(x, DiskLoc(4,7));
+ sorter.add(x, DiskLoc(2,7));
+ sorter.add(x, DiskLoc(1,7));
+ sorter.add(x, DiskLoc(3,77));
+
+ sorter.sort();
+
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ while( i->more() ) {
+ BSONObjExternalSorter::Data d = i->next();
+ /*cout << d.second.toString() << endl;
+ cout << d.first.objsize() << endl;
+ cout<<"SORTER next:" << d.first.toString() << endl;*/
+ }
+ }
+#endif
+
+ SortPhaseOne *precalced = 0;
+
+ template< class V >
+ void buildBottomUpPhases2And3(bool dupsAllowed, IndexDetails& idx, BSONObjExternalSorter& sorter,
+ bool dropDups, list<DiskLoc> &dupsToDrop, CurOp * op, SortPhaseOne *phase1, ProgressMeterHolder &pm,
+ Timer& t
+ )
+ {
+ BtreeBuilder<V> btBuilder(dupsAllowed, idx);
+ BSONObj keyLast;
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ assert( pm == op->setMessage( "index: (2/3) btree bottom up" , phase1->nkeys , 10 ) );
+ while( i->more() ) {
+ RARELY killCurrentOp.checkForInterrupt();
+ BSONObjExternalSorter::Data d = i->next();
+
+ try {
+ if ( !dupsAllowed && dropDups ) {
+ LastError::Disabled led( lastError.get() );
+ btBuilder.addKey(d.first, d.second);
+ }
+ else {
+ btBuilder.addKey(d.first, d.second);
+ }
+ }
+ catch( AssertionException& e ) {
+ if ( dupsAllowed ) {
+ // unknow exception??
+ throw;
+ }
+
+ if( e.interrupted() ) {
+ killCurrentOp.checkForInterrupt();
+ }
+
+ if ( ! dropDups )
+ throw;
+
+ /* we could queue these on disk, but normally there are very few dups, so instead we
+ keep in ram and have a limit.
+ */
+ dupsToDrop.push_back(d.second);
+ uassert( 10092 , "too may dups on index build with dropDups=true", dupsToDrop.size() < 1000000 );
+ }
+ pm.hit();
+ }
+ pm.finished();
+ op->setMessage( "index: (3/3) btree-middle" );
+ log(t.seconds() > 10 ? 0 : 1 ) << "\t done building bottom layer, going to commit" << endl;
+ btBuilder.commit();
+ if ( btBuilder.getn() != phase1->nkeys && ! dropDups ) {
+ warning() << "not all entries were added to the index, probably some keys were too large" << endl;
+ }
+ }
+
+ // throws DBException
+ unsigned long long fastBuildIndex(const char *ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) {
+ CurOp * op = cc().curop();
+
+ Timer t;
+
+ tlog(1) << "fastBuildIndex " << ns << " idxNo:" << idxNo << ' ' << idx.info.obj().toString() << endl;
+
+ bool dupsAllowed = !idx.unique();
+ bool dropDups = idx.dropDups() || inDBRepair;
+ BSONObj order = idx.keyPattern();
+
+ getDur().writingDiskLoc(idx.head).Null();
+
+ if ( logLevel > 1 ) printMemInfo( "before index start" );
+
+ /* get and sort all the keys ----- */
+ ProgressMeterHolder pm( op->setMessage( "index: (1/3) external sort" , d->stats.nrecords , 10 ) );
+ SortPhaseOne _ours;
+ SortPhaseOne *phase1 = precalced;
+ if( phase1 == 0 ) {
+ phase1 = &_ours;
+ SortPhaseOne& p1 = *phase1;
+ shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
+ p1.sorter.reset( new BSONObjExternalSorter(idx.idxInterface(), order) );
+ p1.sorter->hintNumObjects( d->stats.nrecords );
+ const IndexSpec& spec = idx.getSpec();
+ while ( c->ok() ) {
+ BSONObj o = c->current();
+ DiskLoc loc = c->currLoc();
+ p1.addKeys(spec, o, loc);
+ c->advance();
+ pm.hit();
+ if ( logLevel > 1 && p1.n % 10000 == 0 ) {
+ printMemInfo( "\t iterating objects" );
+ }
+ };
+ }
+ pm.finished();
+
+ BSONObjExternalSorter& sorter = *(phase1->sorter);
+
+ if( phase1->multi )
+ d->setIndexIsMultikey(idxNo);
+
+ if ( logLevel > 1 ) printMemInfo( "before final sort" );
+ phase1->sorter->sort();
+ if ( logLevel > 1 ) printMemInfo( "after final sort" );
+
+ log(t.seconds() > 5 ? 0 : 1) << "\t external sort used : " << sorter.numFiles() << " files " << " in " << t.seconds() << " secs" << endl;
+
+ list<DiskLoc> dupsToDrop;
+
+ /* build index --- */
+ if( idx.version() == 0 )
+ buildBottomUpPhases2And3<V0>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, phase1, pm, t);
+ else if( idx.version() == 1 )
+ buildBottomUpPhases2And3<V1>(dupsAllowed, idx, sorter, dropDups, dupsToDrop, op, phase1, pm, t);
+ else
+ assert(false);
+
+ log(1) << "\t fastBuildIndex dupsToDrop:" << dupsToDrop.size() << endl;
+
+ for( list<DiskLoc>::iterator i = dupsToDrop.begin(); i != dupsToDrop.end(); i++ ){
+ theDataFileMgr.deleteRecord( ns, i->rec(), *i, false /* cappedOk */ , true /* noWarn */ , isMaster( ns ) /* logOp */ );
+ getDur().commitIfNeeded();
+ }
+
+ return phase1->n;
+ }
+
+ class BackgroundIndexBuildJob : public BackgroundOperation {
+
+ unsigned long long addExistingToIndex(const char *ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) {
+ bool dupsAllowed = !idx.unique();
+ bool dropDups = idx.dropDups();
+
+ ProgressMeter& progress = cc().curop()->setMessage( "bg index build" , d->stats.nrecords );
+
+ unsigned long long n = 0;
+ auto_ptr<ClientCursor> cc;
+ {
+ shared_ptr<Cursor> c = theDataFileMgr.findAll(ns);
+ cc.reset( new ClientCursor(QueryOption_NoCursorTimeout, c, ns) );
+ }
+ CursorId id = cc->cursorid();
+
+ while ( cc->ok() ) {
+ BSONObj js = cc->current();
+ try {
+ {
+ if ( !dupsAllowed && dropDups ) {
+ LastError::Disabled led( lastError.get() );
+ addKeysToIndex(d, idxNo, js, cc->currLoc(), dupsAllowed);
+ }
+ else {
+ addKeysToIndex(d, idxNo, js, cc->currLoc(), dupsAllowed);
+ }
+ }
+ cc->advance();
+ }
+ catch( AssertionException& e ) {
+ if( e.interrupted() ) {
+ killCurrentOp.checkForInterrupt();
+ }
+
+ if ( dropDups ) {
+ DiskLoc toDelete = cc->currLoc();
+ bool ok = cc->advance();
+ cc->updateLocation();
+ theDataFileMgr.deleteRecord( ns, toDelete.rec(), toDelete, false, true , true );
+ if( ClientCursor::find(id, false) == 0 ) {
+ cc.release();
+ if( !ok ) {
+ /* we were already at the end. normal. */
+ }
+ else {
+ uasserted(12585, "cursor gone during bg index; dropDups");
+ }
+ break;
+ }
+ }
+ else {
+ log() << "background addExistingToIndex exception " << e.what() << endl;
+ throw;
+ }
+ }
+ n++;
+ progress.hit();
+
+ getDur().commitIfNeeded();
+
+ if ( cc->yieldSometimes( ClientCursor::WillNeed ) ) {
+ progress.setTotalWhileRunning( d->stats.nrecords );
+ }
+ else {
+ cc.release();
+ uasserted(12584, "cursor gone during bg index");
+ break;
+ }
+ }
+ progress.finished();
+ return n;
+ }
+
+ /* we do set a flag in the namespace for quick checking, but this is our authoritative info -
+ that way on a crash/restart, we don't think we are still building one. */
+ set<NamespaceDetails*> bgJobsInProgress;
+
+ void prep(const char *ns, NamespaceDetails *d) {
+ assertInWriteLock();
+ uassert( 13130 , "can't start bg index b/c in recursive lock (db.eval?)" , mongo::d.dbMutex.getState() == 1 );
+ bgJobsInProgress.insert(d);
+ }
+ void done(const char *ns, NamespaceDetails *d) {
+ NamespaceDetailsTransient::get(ns).addedIndex(); // clear query optimizer cache
+ assertInWriteLock();
+ }
+
+ public:
+ BackgroundIndexBuildJob(const char *ns) : BackgroundOperation(ns) { }
+
+ unsigned long long go(string ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) {
+ unsigned long long n = 0;
+
+ prep(ns.c_str(), d);
+ assert( idxNo == d->nIndexes );
+ try {
+ idx.head.writing() = idx.idxInterface().addBucket(idx);
+ n = addExistingToIndex(ns.c_str(), d, idx, idxNo);
+ }
+ catch(...) {
+ if( cc().database() && nsdetails(ns.c_str()) == d ) {
+ assert( idxNo == d->nIndexes );
+ done(ns.c_str(), d);
+ }
+ else {
+ log() << "ERROR: db gone during bg index?" << endl;
+ }
+ throw;
+ }
+ assert( idxNo == d->nIndexes );
+ done(ns.c_str(), d);
+ return n;
+ }
+ };
+
+ /**
+ * For the lifetime of this object, an index build is indicated on the specified
+ * namespace and the newest index is marked as absent. This simplifies
+ * the cleanup required on recovery.
+ */
+ class RecoverableIndexState {
+ public:
+ RecoverableIndexState( NamespaceDetails *d ) : _d( d ) {
+ indexBuildInProgress() = 1;
+ nIndexes()--;
+ }
+ ~RecoverableIndexState() {
+ DESTRUCTOR_GUARD (
+ nIndexes()++;
+ indexBuildInProgress() = 0;
+ )
+ }
+ private:
+ int &nIndexes() { return getDur().writingInt( _d->nIndexes ); }
+ int &indexBuildInProgress() { return getDur().writingInt( _d->indexBuildInProgress ); }
+ NamespaceDetails *_d;
+ };
+
+ // throws DBException
+ static void buildAnIndex(string ns, NamespaceDetails *d, IndexDetails& idx, int idxNo, bool background) {
+ tlog() << "build index " << ns << ' ' << idx.keyPattern() << ( background ? " background" : "" ) << endl;
+ Timer t;
+ unsigned long long n;
+
+ assert( !BackgroundOperation::inProgForNs(ns.c_str()) ); // should have been checked earlier, better not be...
+ assert( d->indexBuildInProgress == 0 );
+ assertInWriteLock();
+ RecoverableIndexState recoverable( d );
+
+ // Build index spec here in case the collection is empty and the index details are invalid
+ idx.getSpec();
+
+ if( inDBRepair || !background ) {
+ n = fastBuildIndex(ns.c_str(), d, idx, idxNo);
+ assert( !idx.head.isNull() );
+ }
+ else {
+ BackgroundIndexBuildJob j(ns.c_str());
+ n = j.go(ns, d, idx, idxNo);
+ }
+ tlog() << "build index done " << n << " records " << t.millis() / 1000.0 << " secs" << endl;
+ }
+
+ /* add keys to indexes for a new record */
+#if 0
+ static void oldIndexRecord__notused(NamespaceDetails *d, BSONObj obj, DiskLoc loc) {
+ int n = d->nIndexesBeingBuilt();
+ for ( int i = 0; i < n; i++ ) {
+ try {
+ bool unique = d->idx(i).unique();
+ addKeysToIndex(d, i, obj, loc, /*dupsAllowed*/!unique);
+ }
+ catch( DBException& ) {
+ /* try to roll back previously added index entries
+ note <= i (not < i) is important here as the index we were just attempted
+ may be multikey and require some cleanup.
+ */
+ for( int j = 0; j <= i; j++ ) {
+ try {
+ _unindexRecord(d->idx(j), obj, loc, false);
+ }
+ catch(...) {
+ log(3) << "unindex fails on rollback after unique failure\n";
+ }
+ }
+ throw;
+ }
+ }
+ }
+#endif
+
+ extern BSONObj id_obj; // { _id : 1 }
+
+ void ensureHaveIdIndex(const char *ns) {
+ NamespaceDetails *d = nsdetails(ns);
+ if ( d == 0 || (d->flags & NamespaceDetails::Flag_HaveIdIndex) )
+ return;
+
+ *getDur().writing(&d->flags) |= NamespaceDetails::Flag_HaveIdIndex;
+
+ {
+ NamespaceDetails::IndexIterator i = d->ii();
+ while( i.more() ) {
+ if( i.next().isIdIndex() )
+ return;
+ }
+ }
+
+ string system_indexes = cc().database()->name + ".system.indexes";
+
+ BSONObjBuilder b;
+ b.append("name", "_id_");
+ b.append("ns", ns);
+ b.append("key", id_obj);
+ BSONObj o = b.done();
+
+ /* edge case: note the insert could fail if we have hit maxindexes already */
+ theDataFileMgr.insert(system_indexes.c_str(), o.objdata(), o.objsize(), true);
+ }
+
+#pragma pack(1)
+ struct IDToInsert_ {
+ char type;
+ char _id[4];
+ OID oid;
+ IDToInsert_() {
+ type = (char) jstOID;
+ strcpy(_id, "_id");
+ assert( sizeof(IDToInsert_) == 17 );
+ }
+ } idToInsert_;
+ struct IDToInsert : public BSONElement {
+ IDToInsert() : BSONElement( ( char * )( &idToInsert_ ) ) {}
+ } idToInsert;
+#pragma pack()
+
+ void DataFileMgr::insertAndLog( const char *ns, const BSONObj &o, bool god ) {
+ BSONObj tmp = o;
+ insertWithObjMod( ns, tmp, god );
+ logOp( "i", ns, tmp );
+ }
+
+ /** @param o the object to insert. can be modified to add _id and thus be an in/out param
+ */
+ DiskLoc DataFileMgr::insertWithObjMod(const char *ns, BSONObj &o, bool god) {
+ bool addedID = false;
+ DiskLoc loc = insert( ns, o.objdata(), o.objsize(), god, true, &addedID );
+ if( addedID && !loc.isNull() )
+ o = BSONObj( loc.rec() );
+ return loc;
+ }
+
+ bool prepareToBuildIndex(const BSONObj& io, bool god, string& sourceNS, NamespaceDetails *&sourceCollection, BSONObj& fixedIndexObject );
+
+ // We are now doing two btree scans for all unique indexes (one here, and one when we've
+ // written the record to the collection. This could be made more efficient inserting
+ // dummy data here, keeping pointers to the btree nodes holding the dummy data and then
+ // updating the dummy data with the DiskLoc of the real record.
+ void checkNoIndexConflicts( NamespaceDetails *d, const BSONObj &obj ) {
+ for ( int idxNo = 0; idxNo < d->nIndexes; idxNo++ ) {
+ if( d->idx(idxNo).unique() ) {
+ IndexDetails& idx = d->idx(idxNo);
+ BSONObjSet keys;
+ idx.getKeysFromObject(obj, keys);
+ BSONObj order = idx.keyPattern();
+ IndexInterface& ii = idx.idxInterface();
+ for ( BSONObjSet::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ // WARNING: findSingle may not be compound index safe. this may need to change. see notes in
+ // findSingle code.
+ uassert( 12582, "duplicate key insert for unique index of capped collection",
+ ii.findSingle(idx, idx.head, *i ).isNull() );
+ }
+ }
+ }
+ }
+
+ /** add a record to the end of the linked list chain within this extent.
+ require: you must have already declared write intent for the record header.
+ */
+ void addRecordToRecListInExtent(Record *r, DiskLoc loc) {
+ dassert( loc.rec() == r );
+ Extent *e = r->myExtent(loc);
+ if ( e->lastRecord.isNull() ) {
+ Extent::FL *fl = getDur().writing(e->fl());
+ fl->firstRecord = fl->lastRecord = loc;
+ r->prevOfs = r->nextOfs = DiskLoc::NullOfs;
+ }
+ else {
+ Record *oldlast = e->lastRecord.rec();
+ r->prevOfs = e->lastRecord.getOfs();
+ r->nextOfs = DiskLoc::NullOfs;
+ getDur().writingInt(oldlast->nextOfs) = loc.getOfs();
+ getDur().writingDiskLoc(e->lastRecord) = loc;
+ }
+ }
+
+ NOINLINE_DECL DiskLoc outOfSpace(const char *ns, NamespaceDetails *d, int lenWHdr, bool god, DiskLoc extentLoc) {
+ DiskLoc loc;
+ if ( d->capped == 0 ) { // size capped doesn't grow
+ log(1) << "allocating new extent for " << ns << " padding:" << d->paddingFactor << " lenWHdr: " << lenWHdr << endl;
+ cc().database()->allocExtent(ns, Extent::followupSize(lenWHdr, d->lastExtentSize), false, !god);
+ loc = d->alloc(ns, lenWHdr, extentLoc);
+ if ( loc.isNull() ) {
+ log() << "warning: alloc() failed after allocating new extent. lenWHdr: " << lenWHdr << " last extent size:" << d->lastExtentSize << "; trying again\n";
+ for ( int z=0; z<10 && lenWHdr > d->lastExtentSize; z++ ) {
+ log() << "try #" << z << endl;
+ cc().database()->allocExtent(ns, Extent::followupSize(lenWHdr, d->lastExtentSize), false, !god);
+ loc = d->alloc(ns, lenWHdr, extentLoc);
+ if ( ! loc.isNull() )
+ break;
+ }
+ }
+ }
+ return loc;
+ }
+
+ /** used by insert and also compact
+ * @return null loc if out of space
+ */
+ DiskLoc allocateSpaceForANewRecord(const char *ns, NamespaceDetails *d, int lenWHdr, bool god) {
+ DiskLoc extentLoc;
+ DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
+ if ( loc.isNull() ) {
+ loc = outOfSpace(ns, d, lenWHdr, god, extentLoc);
+ }
+ return loc;
+ }
+
+ bool NOINLINE_DECL insert_checkSys(const char *sys, const char *ns, bool& wouldAddIndex, const void *obuf, bool god) {
+ uassert( 10095 , "attempt to insert in reserved database name 'system'", sys != ns);
+ if ( strstr(ns, ".system.") ) {
+ // later:check for dba-type permissions here if have that at some point separate
+ if ( strstr(ns, ".system.indexes" ) )
+ wouldAddIndex = true;
+ else if ( legalClientSystemNS( ns , true ) ) {
+ if ( obuf && strstr( ns , ".system.users" ) ) {
+ BSONObj t( reinterpret_cast<const char *>( obuf ) );
+ uassert( 14051 , "system.user entry needs 'user' field to be a string" , t["user"].type() == String );
+ uassert( 14052 , "system.user entry needs 'pwd' field to be a string" , t["pwd"].type() == String );
+ uassert( 14053 , "system.user entry needs 'user' field to be non-empty" , t["user"].String().size() );
+ uassert( 14054 , "system.user entry needs 'pwd' field to be non-empty" , t["pwd"].String().size() );
+ }
+ }
+ else if ( !god ) {
+ // todo this should probably uasseert rather than doing this:
+ log() << "ERROR: attempt to insert in system namespace " << ns << endl;
+ return false;
+ }
+ }
+ return true;
+ }
+
+ NOINLINE_DECL NamespaceDetails* insert_newNamespace(const char *ns, int len, bool god) {
+ addNewNamespaceToCatalog(ns);
+ /* todo: shouldn't be in the namespace catalog until after the allocations here work.
+ also if this is an addIndex, those checks should happen before this!
+ */
+ // This may create first file in the database.
+ int ies = Extent::initialSize(len);
+ if( str::contains(ns, '$') && len + Record::HeaderSize >= BtreeData_V1::BucketSize - 256 && len + Record::HeaderSize <= BtreeData_V1::BucketSize + 256 ) {
+ // probably an index. so we pick a value here for the first extent instead of using initialExtentSize() which is more
+ // for user collections. TODO: we could look at the # of records in the parent collection to be smarter here.
+ ies = (32+4) * 1024;
+ }
+ cc().database()->allocExtent(ns, ies, false, false);
+ NamespaceDetails *d = nsdetails(ns);
+ if ( !god )
+ ensureIdIndexForNewNs(ns);
+ return d;
+ }
+
+ void NOINLINE_DECL insert_makeIndex(NamespaceDetails *tableToIndex, const string& tabletoidxns, const DiskLoc& loc) {
+ uassert( 13143 , "can't create index on system.indexes" , tabletoidxns.find( ".system.indexes" ) == string::npos );
+
+ BSONObj info = loc.obj();
+ bool background = info["background"].trueValue();
+ // if this is not readable, let's move things along
+ if (background && ((!theReplSet && cc().isSyncThread()) || (theReplSet && !theReplSet->isSecondary()))) {
+ log() << "info: indexing in foreground on this replica; was a background index build on the primary" << endl;
+ background = false;
+ }
+
+ int idxNo = tableToIndex->nIndexes;
+ IndexDetails& idx = tableToIndex->addIndex(tabletoidxns.c_str(), !background); // clear transient info caches so they refresh; increments nIndexes
+ getDur().writingDiskLoc(idx.info) = loc;
+ try {
+ buildAnIndex(tabletoidxns, tableToIndex, idx, idxNo, background);
+ }
+ catch( DBException& e ) {
+ // save our error msg string as an exception or dropIndexes will overwrite our message
+ LastError *le = lastError.get();
+ int savecode = 0;
+ string saveerrmsg;
+ if ( le ) {
+ savecode = le->code;
+ saveerrmsg = le->msg;
+ }
+ else {
+ savecode = e.getCode();
+ saveerrmsg = e.what();
+ }
+
+ // roll back this index
+ string name = idx.indexName();
+ BSONObjBuilder b;
+ string errmsg;
+ bool ok = dropIndexes(tableToIndex, tabletoidxns.c_str(), name.c_str(), errmsg, b, true);
+ if( !ok ) {
+ log() << "failed to drop index after a unique key error building it: " << errmsg << ' ' << tabletoidxns << ' ' << name << endl;
+ }
+
+ assert( le && !saveerrmsg.empty() );
+ raiseError(savecode,saveerrmsg.c_str());
+ throw;
+ }
+ }
+
+ /* if god==true, you may pass in obuf of NULL and then populate the returned DiskLoc
+ after the call -- that will prevent a double buffer copy in some cases (btree.cpp).
+
+ @param mayAddIndex almost always true, except for invocation from rename namespace command.
+ @param addedID if not null, set to true if adding _id element. you must assure false before calling
+ if using.
+ */
+
+ DiskLoc DataFileMgr::insert(const char *ns, const void *obuf, int len, bool god, bool mayAddIndex, bool *addedID) {
+ bool wouldAddIndex = false;
+ massert( 10093 , "cannot insert into reserved $ collection", god || NamespaceString::normal( ns ) );
+ uassert( 10094 , str::stream() << "invalid ns: " << ns , isValidNS( ns ) );
+ {
+ const char *sys = strstr(ns, "system.");
+ if ( sys && !insert_checkSys(sys, ns, wouldAddIndex, obuf, god) )
+ return DiskLoc();
+ }
+ bool addIndex = wouldAddIndex && mayAddIndex;
+
+ NamespaceDetails *d = nsdetails(ns);
+ if ( d == 0 ) {
+ d = insert_newNamespace(ns, len, god);
+ }
+
+ NamespaceDetails *tableToIndex = 0;
+
+ string tabletoidxns;
+ BSONObj fixedIndexObject;
+ if ( addIndex ) {
+ assert( obuf );
+ BSONObj io((const char *) obuf);
+ if( !prepareToBuildIndex(io, god, tabletoidxns, tableToIndex, fixedIndexObject ) ) {
+ // prepare creates _id itself, or this indicates to fail the build silently (such
+ // as if index already exists)
+ return DiskLoc();
+ }
+ if ( ! fixedIndexObject.isEmpty() ) {
+ obuf = fixedIndexObject.objdata();
+ len = fixedIndexObject.objsize();
+ }
+ }
+
+ int addID = 0; // 0 if not adding _id; if adding, the length of that new element
+ if( !god ) {
+ /* Check if we have an _id field. If we don't, we'll add it.
+ Note that btree buckets which we insert aren't BSONObj's, but in that case god==true.
+ */
+ BSONObj io((const char *) obuf);
+ BSONElement idField = io.getField( "_id" );
+ uassert( 10099 , "_id cannot be an array", idField.type() != Array );
+ // we don't add _id for capped collections as they don't have an _id index
+ if( idField.eoo() && !wouldAddIndex && strstr(ns, ".local.") == 0 && d->haveIdIndex() ) {
+ if( addedID )
+ *addedID = true;
+ addID = len;
+ idToInsert_.oid.init();
+ len += idToInsert.size();
+ }
+
+ BSONElementManipulator::lookForTimestamps( io );
+ }
+
+ int lenWHdr = len + Record::HeaderSize;
+ lenWHdr = (int) (lenWHdr * d->paddingFactor);
+ if ( lenWHdr == 0 ) {
+ // old datafiles, backward compatible here.
+ assert( d->paddingFactor == 0 );
+ *getDur().writing(&d->paddingFactor) = 1.0;
+ lenWHdr = len + Record::HeaderSize;
+ }
+
+ // If the collection is capped, check if the new object will violate a unique index
+ // constraint before allocating space.
+ if ( d->nIndexes && d->capped && !god ) {
+ checkNoIndexConflicts( d, BSONObj( reinterpret_cast<const char *>( obuf ) ) );
+ }
+
+ bool earlyIndex = true;
+ DiskLoc loc;
+ if( addID || tableToIndex || d->capped ) {
+ // if need id, we don't do the early indexing. this is not the common case so that is sort of ok
+ earlyIndex = false;
+ loc = allocateSpaceForANewRecord(ns, d, lenWHdr, god);
+ }
+ else {
+ loc = d->allocWillBeAt(ns, lenWHdr);
+ if( loc.isNull() ) {
+ // need to get a new extent so we have to do the true alloc now (not common case)
+ earlyIndex = false;
+ loc = allocateSpaceForANewRecord(ns, d, lenWHdr, god);
+ }
+ }
+ if ( loc.isNull() ) {
+ log() << "insert: couldn't alloc space for object ns:" << ns << " capped:" << d->capped << endl;
+ assert(d->capped);
+ return DiskLoc();
+ }
+
+ if( earlyIndex ) {
+ // add record to indexes using two step method so we can do the reading outside a write lock
+ if ( d->nIndexes ) {
+ assert( obuf );
+ BSONObj obj((const char *) obuf);
+ try {
+ indexRecordUsingTwoSteps(d, obj, loc, true);
+ }
+ catch( AssertionException& ) {
+ // should be a dup key error on _id index
+ dassert( !tableToIndex && !d->capped );
+ // no need to delete/rollback the record as it was not added yet
+ throw;
+ }
+ }
+ // really allocate now
+ DiskLoc real = allocateSpaceForANewRecord(ns, d, lenWHdr, god);
+ assert( real == loc );
+ }
+
+ Record *r = loc.rec();
+ {
+ assert( r->lengthWithHeaders >= lenWHdr );
+ r = (Record*) getDur().writingPtr(r, lenWHdr);
+ if( addID ) {
+ /* a little effort was made here to avoid a double copy when we add an ID */
+ ((int&)*r->data) = *((int*) obuf) + idToInsert.size();
+ memcpy(r->data+4, idToInsert.rawdata(), idToInsert.size());
+ memcpy(r->data+4+idToInsert.size(), ((char *)obuf)+4, addID-4);
+ }
+ else {
+ if( obuf ) // obuf can be null from internal callers
+ memcpy(r->data, obuf, len);
+ }
+ }
+
+ addRecordToRecListInExtent(r, loc);
+
+ /* durability todo : this could be a bit annoying / slow to record constantly */
+ {
+ NamespaceDetails::Stats *s = getDur().writing(&d->stats);
+ s->datasize += r->netLength();
+ s->nrecords++;
+ }
+
+ // we don't bother resetting query optimizer stats for the god tables - also god is true when adding a btree bucket
+ if ( !god )
+ NamespaceDetailsTransient::get( ns ).notifyOfWriteOp();
+
+ if ( tableToIndex ) {
+ insert_makeIndex(tableToIndex, tabletoidxns, loc);
+ }
+
+ /* add this record to our indexes */
+ if ( !earlyIndex && d->nIndexes ) {
+ try {
+ BSONObj obj(r->data);
+ // not sure which of these is better -- either can be used. oldIndexRecord may be faster,
+ // but twosteps handles dup key errors more efficiently.
+ //oldIndexRecord(d, obj, loc);
+ indexRecordUsingTwoSteps(d, obj, loc, false);
+
+ }
+ catch( AssertionException& e ) {
+ // should be a dup key error on _id index
+ if( tableToIndex || d->capped ) {
+ massert( 12583, "unexpected index insertion failure on capped collection", !d->capped );
+ string s = e.toString();
+ s += " : on addIndex/capped - collection and its index will not match";
+ uassert_nothrow(s.c_str());
+ error() << s << endl;
+ }
+ else {
+ // normal case -- we can roll back
+ _deleteRecord(d, ns, r, loc);
+ throw;
+ }
+ }
+ }
+
+ d->paddingFits();
+
+ return loc;
+ }
+
+ /* special version of insert for transaction logging -- streamlined a bit.
+ assumes ns is capped and no indexes
+ */
+ Record* DataFileMgr::fast_oplog_insert(NamespaceDetails *d, const char *ns, int len) {
+ assert( d );
+ RARELY assert( d == nsdetails(ns) );
+ DEV assert( d == nsdetails(ns) );
+
+ DiskLoc extentLoc;
+ int lenWHdr = len + Record::HeaderSize;
+ DiskLoc loc = d->alloc(ns, lenWHdr, extentLoc);
+ assert( !loc.isNull() );
+
+ Record *r = loc.rec();
+ assert( r->lengthWithHeaders >= lenWHdr );
+
+ Extent *e = r->myExtent(loc);
+ if ( e->lastRecord.isNull() ) {
+ Extent::FL *fl = getDur().writing( e->fl() );
+ fl->firstRecord = fl->lastRecord = loc;
+
+ Record::NP *np = getDur().writing(r->np());
+ np->nextOfs = np->prevOfs = DiskLoc::NullOfs;
+ }
+ else {
+ Record *oldlast = e->lastRecord.rec();
+ Record::NP *np = getDur().writing(r->np());
+ np->prevOfs = e->lastRecord.getOfs();
+ np->nextOfs = DiskLoc::NullOfs;
+ getDur().writingInt( oldlast->nextOfs ) = loc.getOfs();
+ e->lastRecord.writing() = loc;
+ }
+
+ /* todo: don't update for oplog? seems wasteful. */
+ {
+ NamespaceDetails::Stats *s = getDur().writing(&d->stats);
+ s->datasize += r->netLength();
+ s->nrecords++;
+ }
+
+ return r;
+ }
+
+} // namespace mongo
+
+#include "clientcursor.h"
+
+namespace mongo {
+
+ void dropAllDatabasesExceptLocal() {
+ writelock lk("");
+
+ vector<string> n;
+ getDatabaseNames(n);
+ if( n.size() == 0 ) return;
+ log() << "dropAllDatabasesExceptLocal " << n.size() << endl;
+ for( vector<string>::iterator i = n.begin(); i != n.end(); i++ ) {
+ if( *i != "local" ) {
+ Client::Context ctx(*i);
+ dropDatabase(*i);
+ }
+ }
+ }
+
+ void dropDatabase(string db) {
+ log(1) << "dropDatabase " << db << endl;
+ Database *d = cc().database();
+ assert( d );
+ assert( d->name == db );
+
+ BackgroundOperation::assertNoBgOpInProgForDb(d->name.c_str());
+
+ mongo::d.dbMutex.assertWriteLocked();
+
+ // Not sure we need this here, so removed. If we do, we need to move it down
+ // within other calls both (1) as they could be called from elsewhere and
+ // (2) to keep the lock order right - groupcommitmutex must be locked before
+ // mmmutex (if both are locked).
+ //
+ // RWLockRecursive::Exclusive lk(MongoFile::mmmutex);
+
+ getDur().syncDataAndTruncateJournal();
+
+ Database::closeDatabase( d->name.c_str(), d->path );
+ d = 0; // d is now deleted
+
+ _deleteDataFiles( db.c_str() );
+ }
+
+ typedef boost::filesystem::path Path;
+
+ void boostRenameWrapper( const Path &from, const Path &to ) {
+ try {
+ boost::filesystem::rename( from, to );
+ }
+ catch ( const boost::filesystem::filesystem_error & ) {
+ // boost rename doesn't work across partitions
+ boost::filesystem::copy_file( from, to);
+ boost::filesystem::remove( from );
+ }
+ }
+
+ // back up original database files to 'temp' dir
+ void _renameForBackup( const char *database, const Path &reservedPath ) {
+ Path newPath( reservedPath );
+ if ( directoryperdb )
+ newPath /= database;
+ class Renamer : public FileOp {
+ public:
+ Renamer( const Path &newPath ) : newPath_( newPath ) {}
+ private:
+ const boost::filesystem::path &newPath_;
+ virtual bool apply( const Path &p ) {
+ if ( !boost::filesystem::exists( p ) )
+ return false;
+ boostRenameWrapper( p, newPath_ / ( p.leaf() + ".bak" ) );
+ return true;
+ }
+ virtual const char * op() const {
+ return "renaming";
+ }
+ } renamer( newPath );
+ _applyOpToDataFiles( database, renamer, true );
+ }
+
+ // move temp files to standard data dir
+ void _replaceWithRecovered( const char *database, const char *reservedPathString ) {
+ Path newPath( dbpath );
+ if ( directoryperdb )
+ newPath /= database;
+ class Replacer : public FileOp {
+ public:
+ Replacer( const Path &newPath ) : newPath_( newPath ) {}
+ private:
+ const boost::filesystem::path &newPath_;
+ virtual bool apply( const Path &p ) {
+ if ( !boost::filesystem::exists( p ) )
+ return false;
+ boostRenameWrapper( p, newPath_ / p.leaf() );
+ return true;
+ }
+ virtual const char * op() const {
+ return "renaming";
+ }
+ } replacer( newPath );
+ _applyOpToDataFiles( database, replacer, true, reservedPathString );
+ }
+
+ // generate a directory name for storing temp data files
+ Path uniqueReservedPath( const char *prefix ) {
+ Path repairPath = Path( repairpath );
+ Path reservedPath;
+ int i = 0;
+ bool exists = false;
+ do {
+ stringstream ss;
+ ss << prefix << "_repairDatabase_" << i++;
+ reservedPath = repairPath / ss.str();
+ BOOST_CHECK_EXCEPTION( exists = boost::filesystem::exists( reservedPath ) );
+ }
+ while ( exists );
+ return reservedPath;
+ }
+
+ boost::intmax_t dbSize( const char *database ) {
+ class SizeAccumulator : public FileOp {
+ public:
+ SizeAccumulator() : totalSize_( 0 ) {}
+ boost::intmax_t size() const {
+ return totalSize_;
+ }
+ private:
+ virtual bool apply( const boost::filesystem::path &p ) {
+ if ( !boost::filesystem::exists( p ) )
+ return false;
+ totalSize_ += boost::filesystem::file_size( p );
+ return true;
+ }
+ virtual const char *op() const {
+ return "checking size";
+ }
+ boost::intmax_t totalSize_;
+ };
+ SizeAccumulator sa;
+ _applyOpToDataFiles( database, sa );
+ return sa.size();
+ }
+
+ bool repairDatabase( string dbNameS , string &errmsg,
+ bool preserveClonedFilesOnFailure, bool backupOriginalFiles ) {
+ doingRepair dr;
+ dbNameS = nsToDatabase( dbNameS );
+ const char * dbName = dbNameS.c_str();
+
+ stringstream ss;
+ ss << "localhost:" << cmdLine.port;
+ string localhost = ss.str();
+
+ problem() << "repairDatabase " << dbName << endl;
+ assert( cc().database()->name == dbName );
+ assert( cc().database()->path == dbpath );
+
+ BackgroundOperation::assertNoBgOpInProgForDb(dbName);
+
+ getDur().syncDataAndTruncateJournal(); // Must be done before and after repair
+
+ boost::intmax_t totalSize = dbSize( dbName );
+ boost::intmax_t freeSize = File::freeSpace(repairpath);
+ if ( freeSize > -1 && freeSize < totalSize ) {
+ stringstream ss;
+ ss << "Cannot repair database " << dbName << " having size: " << totalSize
+ << " (bytes) because free disk space is: " << freeSize << " (bytes)";
+ errmsg = ss.str();
+ problem() << errmsg << endl;
+ return false;
+ }
+
+ Path reservedPath =
+ uniqueReservedPath( ( preserveClonedFilesOnFailure || backupOriginalFiles ) ?
+ "backup" : "_tmp" );
+ BOOST_CHECK_EXCEPTION( boost::filesystem::create_directory( reservedPath ) );
+ string reservedPathString = reservedPath.native_directory_string();
+
+ bool res;
+ {
+ // clone to temp location, which effectively does repair
+ Client::Context ctx( dbName, reservedPathString );
+ assert( ctx.justCreated() );
+
+ res = cloneFrom(localhost.c_str(), errmsg, dbName,
+ /*logForReplication=*/false, /*slaveOk*/false, /*replauth*/false,
+ /*snapshot*/false, /*mayYield*/false, /*mayBeInterrupted*/true);
+ Database::closeDatabase( dbName, reservedPathString.c_str() );
+ }
+
+ if ( !res ) {
+ errmsg = str::stream() << "clone failed for " << dbName << " with error: " << errmsg;
+ problem() << errmsg << endl;
+
+ if ( !preserveClonedFilesOnFailure )
+ BOOST_CHECK_EXCEPTION( boost::filesystem::remove_all( reservedPath ) );
+
+ getDur().syncDataAndTruncateJournal(); // Must be done before and after repair
+
+ return false;
+ }
+
+ MongoFile::flushAll(true);
+
+ Client::Context ctx( dbName );
+ Database::closeDatabase( dbName, dbpath );
+
+ if ( backupOriginalFiles ) {
+ _renameForBackup( dbName, reservedPath );
+ }
+ else {
+ _deleteDataFiles( dbName );
+ BOOST_CHECK_EXCEPTION( boost::filesystem::create_directory( Path( dbpath ) / dbName ) );
+ }
+
+ _replaceWithRecovered( dbName, reservedPathString.c_str() );
+
+ if ( !backupOriginalFiles )
+ BOOST_CHECK_EXCEPTION( boost::filesystem::remove_all( reservedPath ) );
+
+ getDur().syncDataAndTruncateJournal(); // Must be done before and after repair
+
+ return true;
+ }
+
+ void _applyOpToDataFiles( const char *database, FileOp &fo, bool afterAllocator, const string& path ) {
+ if ( afterAllocator )
+ FileAllocator::get()->waitUntilFinished();
+ string c = database;
+ c += '.';
+ boost::filesystem::path p(path);
+ if ( directoryperdb )
+ p /= database;
+ boost::filesystem::path q;
+ q = p / (c+"ns");
+ bool ok = false;
+ BOOST_CHECK_EXCEPTION( ok = fo.apply( q ) );
+ if ( ok )
+ log(2) << fo.op() << " file " << q.string() << endl;
+ int i = 0;
+ int extra = 10; // should not be necessary, this is defensive in case there are missing files
+ while ( 1 ) {
+ assert( i <= DiskLoc::MaxFiles );
+ stringstream ss;
+ ss << c << i;
+ q = p / ss.str();
+ BOOST_CHECK_EXCEPTION( ok = fo.apply(q) );
+ if ( ok ) {
+ if ( extra != 10 ) {
+ log(1) << fo.op() << " file " << q.string() << endl;
+ log() << " _applyOpToDataFiles() warning: extra == " << extra << endl;
+ }
+ }
+ else if ( --extra <= 0 )
+ break;
+ i++;
+ }
+ }
+
+ NamespaceDetails* nsdetails_notinline(const char *ns) { return nsdetails(ns); }
+
+ bool DatabaseHolder::closeAll( const string& path , BSONObjBuilder& result , bool force ) {
+ log() << "DatabaseHolder::closeAll path:" << path << endl;
+ d.dbMutex.assertWriteLocked();
+
+ map<string,Database*>& m = _paths[path];
+ _size -= m.size();
+
+ set< string > dbs;
+ for ( map<string,Database*>::iterator i = m.begin(); i != m.end(); i++ ) {
+ wassert( i->second->path == path );
+ dbs.insert( i->first );
+ }
+
+ currentClient.get()->getContext()->_clear();
+
+ BSONObjBuilder bb( result.subarrayStart( "dbs" ) );
+ int n = 0;
+ int nNotClosed = 0;
+ for( set< string >::iterator i = dbs.begin(); i != dbs.end(); ++i ) {
+ string name = *i;
+ log(2) << "DatabaseHolder::closeAll path:" << path << " name:" << name << endl;
+ Client::Context ctx( name , path );
+ if( !force && BackgroundOperation::inProgForDb(name.c_str()) ) {
+ log() << "WARNING: can't close database " << name << " because a bg job is in progress - try killOp command" << endl;
+ nNotClosed++;
+ }
+ else {
+ Database::closeDatabase( name.c_str() , path );
+ bb.append( bb.numStr( n++ ) , name );
+ }
+ }
+ bb.done();
+ if( nNotClosed )
+ result.append("nNotClosed", nNotClosed);
+ else {
+ ClientCursor::assertNoCursors();
+ }
+
+ return true;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/pdfile.h b/src/mongo/db/pdfile.h
new file mode 100644
index 00000000000..cd6062b1a48
--- /dev/null
+++ b/src/mongo/db/pdfile.h
@@ -0,0 +1,546 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* pdfile.h
+
+ Files:
+ database.ns - namespace index
+ database.1 - data files
+ database.2
+ ...
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "../util/mmap.h"
+#include "diskloc.h"
+#include "jsobjmanipulator.h"
+#include "namespace-inl.h"
+#include "client.h"
+#include "mongommf.h"
+
+namespace mongo {
+
+ class DataFileHeader;
+ class Extent;
+ class Record;
+ class Cursor;
+ class OpDebug;
+
+ void dropDatabase(string db);
+ bool repairDatabase(string db, string &errmsg, bool preserveClonedFilesOnFailure = false, bool backupOriginalFiles = false);
+
+ /* low level - only drops this ns */
+ void dropNS(const string& dropNs);
+
+ /* deletes this ns, indexes and cursors */
+ void dropCollection( const string &name, string &errmsg, BSONObjBuilder &result );
+ bool userCreateNS(const char *ns, BSONObj j, string& err, bool logForReplication, bool *deferIdIndex = 0);
+ shared_ptr<Cursor> findTableScan(const char *ns, const BSONObj& order, const DiskLoc &startLoc=DiskLoc());
+
+ bool isValidNS( const StringData& ns );
+
+ /*---------------------------------------------------------------------*/
+
+ class MongoDataFile {
+ friend class DataFileMgr;
+ friend class BasicCursor;
+ public:
+ MongoDataFile(int fn) : _mb(0), fileNo(fn) { }
+
+ /** @return true if found and opened. if uninitialized (prealloc only) does not open. */
+ bool openExisting( const char *filename );
+
+ /** creates if DNE */
+ void open(const char *filename, int requestedDataSize = 0, bool preallocateOnly = false);
+
+ /* allocate a new extent from this datafile.
+ @param capped - true if capped collection
+ @param loops is our recursion check variable - you want to pass in zero
+ */
+ Extent* createExtent(const char *ns, int approxSize, bool capped = false, int loops = 0);
+
+ DataFileHeader *getHeader() { return header(); }
+
+ unsigned long long length() const { return mmf.length(); }
+
+ /* return max size an extent may be */
+ static int maxSize();
+
+ /** fsync */
+ void flush( bool sync );
+
+ /** only use fore debugging */
+ Extent* debug_getExtent(DiskLoc loc) { return _getExtent( loc ); }
+ private:
+ void badOfs(int) const;
+ void badOfs2(int) const;
+ int defaultSize( const char *filename ) const;
+
+ Extent* getExtent(DiskLoc loc) const;
+ Extent* _getExtent(DiskLoc loc) const;
+ Record* recordAt(DiskLoc dl);
+ Record* makeRecord(DiskLoc dl, int size);
+ void grow(DiskLoc dl, int size);
+
+ char* p() const { return (char *) _mb; }
+ DataFileHeader* header() { return (DataFileHeader*) _mb; }
+
+ MongoMMF mmf;
+ void *_mb; // the memory mapped view
+ int fileNo;
+ };
+
+ class DataFileMgr {
+ friend class BasicCursor;
+ public:
+ void init(const string& path );
+
+ /* see if we can find an extent of the right size in the freelist. */
+ static Extent* allocFromFreeList(const char *ns, int approxSize, bool capped = false);
+
+ /** @return DiskLoc where item ends up */
+ // changedId should be initialized to false
+ const DiskLoc updateRecord(
+ const char *ns,
+ NamespaceDetails *d,
+ NamespaceDetailsTransient *nsdt,
+ Record *toupdate, const DiskLoc& dl,
+ const char *buf, int len, OpDebug& debug, bool god=false);
+
+ // The object o may be updated if modified on insert.
+ void insertAndLog( const char *ns, const BSONObj &o, bool god = false );
+
+ /** insert will add an _id to the object if not present. if you would like to see the final object
+ after such an addition, use this method.
+ @param o both and in and out param
+ */
+ DiskLoc insertWithObjMod(const char *ns, BSONObj & /*out*/o, bool god = false);
+
+ /** @param obj in value only for this version. */
+ void insertNoReturnVal(const char *ns, BSONObj o, bool god = false);
+
+ DiskLoc insert(const char *ns, const void *buf, int len, bool god = false, bool mayAddIndex = true, bool *addedID = 0);
+ static shared_ptr<Cursor> findAll(const char *ns, const DiskLoc &startLoc = DiskLoc());
+
+ /* special version of insert for transaction logging -- streamlined a bit.
+ assumes ns is capped and no indexes
+ no _id field check
+ */
+ Record* fast_oplog_insert(NamespaceDetails *d, const char *ns, int len);
+
+ static Extent* getExtent(const DiskLoc& dl);
+ static Record* getRecord(const DiskLoc& dl);
+ static DeletedRecord* makeDeletedRecord(const DiskLoc& dl, int len);
+
+ void deleteRecord(const char *ns, Record *todelete, const DiskLoc& dl, bool cappedOK = false, bool noWarn = false, bool logOp=false);
+
+ /* does not clean up indexes, etc. : just deletes the record in the pdfile. use deleteRecord() to unindex */
+ void _deleteRecord(NamespaceDetails *d, const char *ns, Record *todelete, const DiskLoc& dl);
+
+ private:
+ vector<MongoDataFile *> files;
+ };
+
+ extern DataFileMgr theDataFileMgr;
+
+#pragma pack(1)
+
+ class DeletedRecord {
+ public:
+ int lengthWithHeaders;
+ int extentOfs;
+ DiskLoc nextDeleted;
+ DiskLoc myExtentLoc(const DiskLoc& myLoc) const {
+ return DiskLoc(myLoc.a(), extentOfs);
+ }
+ Extent* myExtent(const DiskLoc& myLoc) {
+ return DataFileMgr::getExtent(DiskLoc(myLoc.a(), extentOfs));
+ }
+ };
+
+ /* Record is a record in a datafile. DeletedRecord is similar but for deleted space.
+
+ *11:03:20 AM) dm10gen: regarding extentOfs...
+ (11:03:42 AM) dm10gen: an extent is a continugous disk area, which contains many Records and DeleteRecords
+ (11:03:56 AM) dm10gen: a DiskLoc has two pieces, the fileno and ofs. (64 bit total)
+ (11:04:16 AM) dm10gen: to keep the headesr small, instead of storing a 64 bit ptr to the full extent address, we keep just the offset
+ (11:04:29 AM) dm10gen: we can do this as we know the record's address, and it has the same fileNo
+ (11:04:33 AM) dm10gen: see class DiskLoc for more info
+ (11:04:43 AM) dm10gen: so that is how Record::myExtent() works
+ (11:04:53 AM) dm10gen: on an alloc(), when we build a new Record, we must populate its extentOfs then
+ */
+ class Record {
+ public:
+ enum HeaderSizeValue { HeaderSize = 16 };
+ int lengthWithHeaders;
+ int extentOfs;
+ int nextOfs;
+ int prevOfs;
+
+ /** be careful when referencing this that your write intent was correct */
+ char data[4];
+
+ int netLength() {
+ return lengthWithHeaders - HeaderSize;
+ }
+ //void setNewLength(int netlen) { lengthWithHeaders = netlen + HeaderSize; }
+
+ /* use this when a record is deleted. basically a union with next/prev fields */
+ DeletedRecord& asDeleted() { return *((DeletedRecord*) this); }
+
+ Extent* myExtent(const DiskLoc& myLoc) { return DataFileMgr::getExtent(DiskLoc(myLoc.a(), extentOfs)); }
+
+ /* get the next record in the namespace, traversing extents as necessary */
+ DiskLoc getNext(const DiskLoc& myLoc);
+ DiskLoc getPrev(const DiskLoc& myLoc);
+
+ DiskLoc nextInExtent(const DiskLoc& myLoc) {
+ if ( nextOfs == DiskLoc::NullOfs )
+ return DiskLoc();
+ assert( nextOfs );
+ return DiskLoc(myLoc.a(), nextOfs);
+ }
+
+ struct NP {
+ int nextOfs;
+ int prevOfs;
+ };
+ NP* np() { return (NP*) &nextOfs; }
+
+ // ---------------------
+ // memory cache
+ // ---------------------
+
+ /**
+ * touches the data so that is in physical memory
+ * @param entireRecrd if false, only the header and first byte is touched
+ * if true, the entire record is touched
+ * */
+ void touch( bool entireRecrd = false );
+
+ /**
+ * @return if this record is likely in physical memory
+ * its not guaranteed because its possible it gets swapped out in a very unlucky windows
+ */
+ bool likelyInPhysicalMemory();
+
+ /**
+ * tell the cache this Record was accessed
+ * @return this, for simple chaining
+ */
+ Record* accessed();
+
+ static bool MemoryTrackingEnabled;
+ };
+
+ /* extents are datafile regions where all the records within the region
+ belong to the same namespace.
+
+ (11:12:35 AM) dm10gen: when the extent is allocated, all its empty space is stuck into one big DeletedRecord
+ (11:12:55 AM) dm10gen: and that is placed on the free list
+ */
+ class Extent {
+ public:
+ unsigned magic;
+ DiskLoc myLoc;
+ DiskLoc xnext, xprev; /* next/prev extent for this namespace */
+
+ /* which namespace this extent is for. this is just for troubleshooting really
+ and won't even be correct if the collection were renamed!
+ */
+ Namespace nsDiagnostic;
+
+ int length; /* size of the extent, including these fields */
+ DiskLoc firstRecord;
+ DiskLoc lastRecord;
+ char _extentData[4];
+
+ static int HeaderSize() { return sizeof(Extent)-4; }
+
+ bool validates() {
+ return !(firstRecord.isNull() ^ lastRecord.isNull()) &&
+ length >= 0 && !myLoc.isNull();
+ }
+
+ BSONObj dump() {
+ return BSON( "loc" << myLoc.toString() << "xnext" << xnext.toString() << "xprev" << xprev.toString()
+ << "nsdiag" << nsDiagnostic.toString()
+ << "size" << length << "firstRecord" << firstRecord.toString() << "lastRecord" << lastRecord.toString());
+ }
+
+ void dump(iostream& s) {
+ s << " loc:" << myLoc.toString() << " xnext:" << xnext.toString() << " xprev:" << xprev.toString() << '\n';
+ s << " nsdiag:" << nsDiagnostic.toString() << '\n';
+ s << " size:" << length << " firstRecord:" << firstRecord.toString() << " lastRecord:" << lastRecord.toString() << '\n';
+ }
+
+ /* assumes already zeroed -- insufficient for block 'reuse' perhaps
+ Returns a DeletedRecord location which is the data in the extent ready for us.
+ Caller will need to add that to the freelist structure in namespacedetail.
+ */
+ DiskLoc init(const char *nsname, int _length, int _fileNo, int _offset, bool capped);
+
+ /* like init(), but for a reuse case */
+ DiskLoc reuse(const char *nsname, bool newUseIsAsCapped);
+
+ bool isOk() const { return magic == 0x41424344; }
+ void assertOk() const { assert(isOk()); }
+
+ Record* newRecord(int len);
+
+ Record* getRecord(DiskLoc dl) {
+ assert( !dl.isNull() );
+ assert( dl.sameFile(myLoc) );
+ int x = dl.getOfs() - myLoc.getOfs();
+ assert( x > 0 );
+ return (Record *) (((char *) this) + x);
+ }
+
+ Extent* getNextExtent() { return xnext.isNull() ? 0 : DataFileMgr::getExtent(xnext); }
+ Extent* getPrevExtent() { return xprev.isNull() ? 0 : DataFileMgr::getExtent(xprev); }
+
+ static int maxSize();
+ static int minSize() { return 0x100; }
+ /**
+ * @param len lengt of record we need
+ * @param lastRecord size of last extent which is a factor in next extent size
+ */
+ static int followupSize(int len, int lastExtentLen);
+
+ /** get a suggested size for the first extent in a namespace
+ * @param len length of record we need to insert
+ */
+ static int initialSize(int len);
+
+ struct FL {
+ DiskLoc firstRecord;
+ DiskLoc lastRecord;
+ };
+ /** often we want to update just the firstRecord and lastRecord fields.
+ this helper is for that -- for use with getDur().writing() method
+ */
+ FL* fl() { return (FL*) &firstRecord; }
+
+ /** caller must declare write intent first */
+ void markEmpty();
+ private:
+ DiskLoc _reuse(const char *nsname, bool newUseIsAsCapped); // recycle an extent and reuse it for a different ns
+ };
+
+ /* a datafile - i.e. the "dbname.<#>" files :
+
+ ----------------------
+ DataFileHeader
+ ----------------------
+ Extent (for a particular namespace)
+ Record
+ ...
+ Record (some chained for unused space)
+ ----------------------
+ more Extents...
+ ----------------------
+ */
+ class DataFileHeader {
+ public:
+ int version;
+ int versionMinor;
+ int fileLength;
+ DiskLoc unused; /* unused is the portion of the file that doesn't belong to any allocated extents. -1 = no more */
+ int unusedLength;
+ char reserved[8192 - 4*4 - 8];
+
+ char data[4]; // first extent starts here
+
+ enum { HeaderSize = 8192 };
+
+ bool isCurrentVersion() const { return ( version == PDFILE_VERSION ) && ( versionMinor == PDFILE_VERSION_MINOR ); }
+
+ bool uninitialized() const { return version == 0; }
+
+ void init(int fileno, int filelength, const char* filename) {
+ if ( uninitialized() ) {
+ DEV log() << "datafileheader::init initializing " << filename << " n:" << fileno << endl;
+ if( !(filelength > 32768 ) ) {
+ massert(13640, str::stream() << "DataFileHeader looks corrupt at file open filelength:" << filelength << " fileno:" << fileno, false);
+ }
+
+ {
+ if( !d.dbMutex.isWriteLocked() ) {
+ log() << "*** TEMP NOT INITIALIZING FILE " << filename << ", not in a write lock." << endl;
+ log() << "temp bypass until more elaborate change - case that is manifesting is benign anyway" << endl;
+ return;
+/**
+ log() << "ERROR can't create outside a write lock" << endl;
+ printStackTrace();
+ ::abort();
+**/
+ }
+ }
+
+ getDur().createdFile(filename, filelength);
+ assert( HeaderSize == 8192 );
+ DataFileHeader *h = getDur().writing(this);
+ h->fileLength = filelength;
+ h->version = PDFILE_VERSION;
+ h->versionMinor = PDFILE_VERSION_MINOR;
+ h->unused.set( fileno, HeaderSize );
+ assert( (data-(char*)this) == HeaderSize );
+ h->unusedLength = fileLength - HeaderSize - 16;
+ }
+ }
+
+ bool isEmpty() const {
+ return uninitialized() || ( unusedLength == fileLength - HeaderSize - 16 );
+ }
+ };
+
+#pragma pack()
+
+ inline Extent* MongoDataFile::_getExtent(DiskLoc loc) const {
+ loc.assertOk();
+ Extent *e = (Extent *) (p()+loc.getOfs());
+ return e;
+ }
+
+ inline Extent* MongoDataFile::getExtent(DiskLoc loc) const {
+ Extent *e = _getExtent(loc);
+ e->assertOk();
+ return e;
+ }
+
+} // namespace mongo
+
+#include "cursor.h"
+
+namespace mongo {
+
+ inline Record* MongoDataFile::recordAt(DiskLoc dl) {
+ int ofs = dl.getOfs();
+ if( ofs < DataFileHeader::HeaderSize ) badOfs(ofs); // will uassert - external call to keep out of the normal code path
+ return (Record*) (p()+ofs);
+ }
+
+ inline Record* MongoDataFile::makeRecord(DiskLoc dl, int size) {
+ int ofs = dl.getOfs();
+ if( ofs < DataFileHeader::HeaderSize ) badOfs(ofs); // will uassert - external call to keep out of the normal code path
+ return (Record*) (p()+ofs);
+ }
+
+ inline DiskLoc Record::getNext(const DiskLoc& myLoc) {
+ if ( nextOfs != DiskLoc::NullOfs ) {
+ /* defensive */
+ if ( nextOfs >= 0 && nextOfs < 10 ) {
+ sayDbContext("Assertion failure - Record::getNext() referencing a deleted record?");
+ return DiskLoc();
+ }
+
+ return DiskLoc(myLoc.a(), nextOfs);
+ }
+ Extent *e = myExtent(myLoc);
+ while ( 1 ) {
+ if ( e->xnext.isNull() )
+ return DiskLoc(); // end of table.
+ e = e->xnext.ext();
+ if ( !e->firstRecord.isNull() )
+ break;
+ // entire extent could be empty, keep looking
+ }
+ return e->firstRecord;
+ }
+ inline DiskLoc Record::getPrev(const DiskLoc& myLoc) {
+ if ( prevOfs != DiskLoc::NullOfs )
+ return DiskLoc(myLoc.a(), prevOfs);
+ Extent *e = myExtent(myLoc);
+ if ( e->xprev.isNull() )
+ return DiskLoc();
+ return e->xprev.ext()->lastRecord;
+ }
+
+ inline BSONObj DiskLoc::obj() const {
+ return BSONObj(rec()->accessed());
+ }
+ inline DeletedRecord* DiskLoc::drec() const {
+ assert( _a != -1 );
+ return (DeletedRecord*) rec();
+ }
+ inline Extent* DiskLoc::ext() const {
+ return DataFileMgr::getExtent(*this);
+ }
+
+ template< class V >
+ inline
+ const BtreeBucket<V> * DiskLoc::btree() const {
+ assert( _a != -1 );
+ return (const BtreeBucket<V> *) rec()->data;
+ }
+
+} // namespace mongo
+
+#include "database.h"
+
+namespace mongo {
+
+ boost::intmax_t dbSize( const char *database );
+
+ inline NamespaceIndex* nsindex(const char *ns) {
+ Database *database = cc().database();
+ assert( database );
+ DEV {
+ char buf[256];
+ nsToDatabase(ns, buf);
+ if ( database->name != buf ) {
+ out() << "ERROR: attempt to write to wrong database\n";
+ out() << " ns:" << ns << '\n';
+ out() << " database->name:" << database->name << endl;
+ assert( database->name == buf );
+ }
+ }
+ return &database->namespaceIndex;
+ }
+
+ inline NamespaceDetails* nsdetails(const char *ns) {
+ // if this faults, did you set the current db first? (Client::Context + dblock)
+ return nsindex(ns)->details(ns);
+ }
+
+ inline Extent* DataFileMgr::getExtent(const DiskLoc& dl) {
+ assert( dl.a() != -1 );
+ return cc().database()->getFile(dl.a())->getExtent(dl);
+ }
+
+ inline Record* DataFileMgr::getRecord(const DiskLoc& dl) {
+ assert( dl.a() != -1 );
+ return cc().database()->getFile(dl.a())->recordAt(dl);
+ }
+
+ BOOST_STATIC_ASSERT( 16 == sizeof(DeletedRecord) );
+
+ inline DeletedRecord* DataFileMgr::makeDeletedRecord(const DiskLoc& dl, int len) {
+ assert( dl.a() != -1 );
+ return (DeletedRecord*) cc().database()->getFile(dl.a())->makeRecord(dl, sizeof(DeletedRecord));
+ }
+
+ void ensureHaveIdIndex(const char *ns);
+
+ bool dropIndexes( NamespaceDetails *d, const char *ns, const char *name, string &errmsg, BSONObjBuilder &anObjBuilder, bool maydeleteIdIndex );
+
+ inline BSONObj::BSONObj(const Record *r) {
+ init(r->data);
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/pipeline/accumulator.cpp b/src/mongo/db/pipeline/accumulator.cpp
new file mode 100755
index 00000000000..9ef8aa39470
--- /dev/null
+++ b/src/mongo/db/pipeline/accumulator.cpp
@@ -0,0 +1,92 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "db/pipeline/accumulator.h"
+
+#include "db/jsobj.h"
+#include "util/mongoutils/str.h"
+
+namespace mongo {
+ using namespace mongoutils;
+
+ void Accumulator::addOperand(
+ const intrusive_ptr<Expression> &pExpression) {
+ uassert(15943, str::stream() << "group accumulator " <<
+ getOpName() << " only accepts one operand",
+ vpOperand.size() < 1);
+
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ Accumulator::Accumulator():
+ ExpressionNary() {
+ }
+
+ void Accumulator::opToBson(
+ BSONObjBuilder *pBuilder, string opName,
+ string fieldName, unsigned depth) const {
+ assert(vpOperand.size() == 1);
+ BSONObjBuilder builder;
+ vpOperand[0]->addToBsonObj(&builder, opName, depth);
+ pBuilder->append(fieldName, builder.done());
+ }
+
+ void Accumulator::addToBsonObj(
+ BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const {
+ opToBson(pBuilder, getOpName(), fieldName, depth);
+ }
+
+ void Accumulator::addToBsonArray(
+ BSONArrayBuilder *pBuilder, unsigned depth) const {
+ assert(false); // these can't appear in arrays
+ }
+
+ void agg_framework_reservedErrors() {
+ uassert(16017, "reserved error", false);
+ uassert(16018, "reserved error", false);
+ uassert(16019, "reserved error", false);
+ uassert(16020, "reserved error", false);
+ uassert(16021, "reserved error", false);
+ uassert(16022, "reserved error", false);
+ uassert(16023, "reserved error", false);
+ uassert(16024, "reserved error", false);
+ uassert(16025, "reserved error", false);
+ uassert(16026, "reserved error", false);
+ uassert(16027, "reserved error", false);
+ uassert(16028, "reserved error", false);
+ uassert(16029, "reserved error", false);
+ uassert(16030, "reserved error", false);
+ uassert(16031, "reserved error", false);
+ uassert(16032, "reserved error", false);
+ uassert(16033, "reserved error", false);
+
+ uassert(16036, "reserved error", false);
+ uassert(16037, "reserved error", false);
+ uassert(16038, "reserved error", false);
+ uassert(16039, "reserved error", false);
+ uassert(16040, "reserved error", false);
+ uassert(16041, "reserved error", false);
+ uassert(16042, "reserved error", false);
+ uassert(16043, "reserved error", false);
+ uassert(16044, "reserved error", false);
+ uassert(16045, "reserved error", false);
+ uassert(16046, "reserved error", false);
+ uassert(16047, "reserved error", false);
+ uassert(16048, "reserved error", false);
+ uassert(16049, "reserved error", false);
+ }
+}
diff --git a/src/mongo/db/pipeline/accumulator.h b/src/mongo/db/pipeline/accumulator.h
new file mode 100755
index 00000000000..a75b2c9abaa
--- /dev/null
+++ b/src/mongo/db/pipeline/accumulator.h
@@ -0,0 +1,259 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "pch.h"
+
+#include <boost/unordered_set.hpp>
+#include "db/pipeline/value.h"
+#include "db/pipeline/expression.h"
+#include "bson/bsontypes.h"
+
+namespace mongo {
+ class ExpressionContext;
+
+ class Accumulator :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+ virtual void addToBsonObj(
+ BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const;
+ virtual void addToBsonArray(
+ BSONArrayBuilder *pBuilder, unsigned depth) const;
+
+ /*
+ Get the accumulated value.
+
+ @returns the accumulated value
+ */
+ virtual intrusive_ptr<const Value> getValue() const = 0;
+
+ protected:
+ Accumulator();
+
+ /*
+ Convenience method for doing this for accumulators. The pattern
+ is always the same, so a common implementation works, but requires
+ knowing the operator name.
+
+ @param pBuilder the builder to add to
+ @param fieldName the projected name
+ @param opName the operator name
+ */
+ void opToBson(
+ BSONObjBuilder *pBuilder, string fieldName, string opName,
+ unsigned depth) const;
+ };
+
+
+ class AccumulatorAddToSet :
+ public Accumulator {
+ public:
+ // virtuals from Expression
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual intrusive_ptr<const Value> getValue() const;
+ virtual const char *getOpName() const;
+
+ /*
+ Create an appending accumulator.
+
+ @param pCtx the expression context
+ @returns the created accumulator
+ */
+ static intrusive_ptr<Accumulator> create(
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ private:
+ AccumulatorAddToSet(const intrusive_ptr<ExpressionContext> &pTheCtx);
+ typedef boost::unordered_set<intrusive_ptr<const Value>, Value::Hash > SetType;
+ mutable SetType set;
+ mutable SetType::iterator itr;
+ intrusive_ptr<ExpressionContext> pCtx;
+ };
+
+
+ /*
+ This isn't a finished accumulator, but rather a convenient base class
+ for others such as $first, $last, $max, $min, and similar. It just
+ provides a holder for a single Value, and the getter for that. The
+ holder is protected so derived classes can manipulate it.
+ */
+ class AccumulatorSingleValue :
+ public Accumulator {
+ public:
+ // virtuals from Expression
+ virtual intrusive_ptr<const Value> getValue() const;
+
+ protected:
+ AccumulatorSingleValue();
+
+ mutable intrusive_ptr<const Value> pValue; /* current min/max */
+ };
+
+
+ class AccumulatorFirst :
+ public AccumulatorSingleValue {
+ public:
+ // virtuals from Expression
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+
+ /*
+ Create the accumulator.
+
+ @returns the created accumulator
+ */
+ static intrusive_ptr<Accumulator> create(
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ private:
+ AccumulatorFirst();
+ };
+
+
+ class AccumulatorLast :
+ public AccumulatorSingleValue {
+ public:
+ // virtuals from Expression
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+
+ /*
+ Create the accumulator.
+
+ @returns the created accumulator
+ */
+ static intrusive_ptr<Accumulator> create(
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ private:
+ AccumulatorLast();
+ };
+
+
+ class AccumulatorSum :
+ public Accumulator {
+ public:
+ // virtuals from Accumulator
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual intrusive_ptr<const Value> getValue() const;
+ virtual const char *getOpName() const;
+
+ /*
+ Create a summing accumulator.
+
+ @param pCtx the expression context
+ @returns the created accumulator
+ */
+ static intrusive_ptr<Accumulator> create(
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ protected: /* reused by AccumulatorAvg */
+ AccumulatorSum();
+
+ mutable BSONType totalType;
+ mutable long long longTotal;
+ mutable double doubleTotal;
+ };
+
+
+ class AccumulatorMinMax :
+ public AccumulatorSingleValue {
+ public:
+ // virtuals from Expression
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+
+ /*
+ Create either the max or min accumulator.
+
+ @returns the created accumulator
+ */
+ static intrusive_ptr<Accumulator> createMin(
+ const intrusive_ptr<ExpressionContext> &pCtx);
+ static intrusive_ptr<Accumulator> createMax(
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ private:
+ AccumulatorMinMax(int theSense);
+
+ int sense; /* 1 for min, -1 for max; used to "scale" comparison */
+ };
+
+
+ class AccumulatorPush :
+ public Accumulator {
+ public:
+ // virtuals from Expression
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual intrusive_ptr<const Value> getValue() const;
+ virtual const char *getOpName() const;
+
+ /*
+ Create an appending accumulator.
+
+ @param pCtx the expression context
+ @returns the created accumulator
+ */
+ static intrusive_ptr<Accumulator> create(
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ private:
+ AccumulatorPush(const intrusive_ptr<ExpressionContext> &pTheCtx);
+
+ mutable vector<intrusive_ptr<const Value> > vpValue;
+ intrusive_ptr<ExpressionContext> pCtx;
+ };
+
+
+ class AccumulatorAvg :
+ public AccumulatorSum {
+ typedef AccumulatorSum Super;
+ public:
+ // virtuals from Accumulator
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual intrusive_ptr<const Value> getValue() const;
+ virtual const char *getOpName() const;
+
+ /*
+ Create an averaging accumulator.
+
+ @param pCtx the expression context
+ @returns the created accumulator
+ */
+ static intrusive_ptr<Accumulator> create(
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ private:
+ static const char subTotalName[];
+ static const char countName[];
+
+ AccumulatorAvg(const intrusive_ptr<ExpressionContext> &pCtx);
+
+ mutable long long count;
+ intrusive_ptr<ExpressionContext> pCtx;
+ };
+
+}
diff --git a/src/mongo/db/pipeline/accumulator_add_to_set.cpp b/src/mongo/db/pipeline/accumulator_add_to_set.cpp
new file mode 100755
index 00000000000..94df0293de4
--- /dev/null
+++ b/src/mongo/db/pipeline/accumulator_add_to_set.cpp
@@ -0,0 +1,79 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "accumulator.h"
+
+#include "db/pipeline/expression_context.h"
+#include "db/pipeline/value.h"
+
+namespace mongo {
+ intrusive_ptr<const Value> AccumulatorAddToSet::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ assert(vpOperand.size() == 1);
+ intrusive_ptr<const Value> prhs(vpOperand[0]->evaluate(pDocument));
+
+ if (prhs->getType() == Undefined)
+ ; /* nothing to add to the array */
+ else if (!pCtx->getInRouter())
+ set.insert(prhs);
+ else {
+ /*
+ If we're in the router, we need to take apart the arrays we
+ receive and put their elements into the array we are collecting.
+ If we didn't, then we'd get an array of arrays, with one array
+ from each shard that responds.
+ */
+ assert(prhs->getType() == Array);
+
+ intrusive_ptr<ValueIterator> pvi(prhs->getArray());
+ while(pvi->more()) {
+ intrusive_ptr<const Value> pElement(pvi->next());
+ set.insert(pElement);
+ }
+ }
+
+ return Value::getNull();
+ }
+
+ intrusive_ptr<const Value> AccumulatorAddToSet::getValue() const {
+ vector<intrusive_ptr<const Value> > valVec;
+
+ for (itr = set.begin(); itr != set.end(); ++itr) {
+ valVec.push_back(*itr);
+ }
+ /* there is no issue of scope since createArray copy constructs */
+ return Value::createArray(valVec);
+ }
+
+ AccumulatorAddToSet::AccumulatorAddToSet(
+ const intrusive_ptr<ExpressionContext> &pTheCtx):
+ Accumulator(),
+ set(),
+ pCtx(pTheCtx) {
+ }
+
+ intrusive_ptr<Accumulator> AccumulatorAddToSet::create(
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ intrusive_ptr<AccumulatorAddToSet> pAccumulator(
+ new AccumulatorAddToSet(pCtx));
+ return pAccumulator;
+ }
+
+ const char *AccumulatorAddToSet::getOpName() const {
+ return "$addToSet";
+ }
+}
diff --git a/src/mongo/db/pipeline/accumulator_avg.cpp b/src/mongo/db/pipeline/accumulator_avg.cpp
new file mode 100755
index 00000000000..9f18b1820c8
--- /dev/null
+++ b/src/mongo/db/pipeline/accumulator_avg.cpp
@@ -0,0 +1,123 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "accumulator.h"
+
+#include "db/pipeline/document.h"
+#include "db/pipeline/expression_context.h"
+#include "db/pipeline/value.h"
+
+namespace mongo {
+
+ const char AccumulatorAvg::subTotalName[] = "subTotal";
+ const char AccumulatorAvg::countName[] = "count";
+
+ intrusive_ptr<const Value> AccumulatorAvg::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ if (!pCtx->getInRouter()) {
+ Super::evaluate(pDocument);
+ ++count;
+ }
+ else {
+ /*
+ If we're in the router, we expect an object that contains
+ both a subtotal and a count. This is what getValue() produced
+ below.
+ */
+ intrusive_ptr<const Value> prhs(
+ vpOperand[0]->evaluate(pDocument));
+ assert(prhs->getType() == Object);
+ intrusive_ptr<Document> pShardDoc(prhs->getDocument());
+
+ intrusive_ptr<const Value> pSubTotal(
+ pShardDoc->getValue(subTotalName));
+ assert(pSubTotal.get());
+ BSONType subTotalType = pSubTotal->getType();
+ if ((totalType == NumberLong) || (subTotalType == NumberLong))
+ totalType = NumberLong;
+ if ((totalType == NumberDouble) || (subTotalType == NumberDouble))
+ totalType = NumberDouble;
+
+ if (subTotalType == NumberInt) {
+ int v = pSubTotal->getInt();
+ longTotal += v;
+ doubleTotal += v;
+ }
+ else if (subTotalType == NumberLong) {
+ long long v = pSubTotal->getLong();
+ longTotal += v;
+ doubleTotal += v;
+ }
+ else {
+ double v = pSubTotal->getDouble();
+ doubleTotal += v;
+ }
+
+ intrusive_ptr<const Value> pCount(pShardDoc->getValue(countName));
+ count += pCount->getLong();
+ }
+
+ return Value::getZero();
+ }
+
+ intrusive_ptr<Accumulator> AccumulatorAvg::create(
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ intrusive_ptr<AccumulatorAvg> pA(new AccumulatorAvg(pCtx));
+ return pA;
+ }
+
+ intrusive_ptr<const Value> AccumulatorAvg::getValue() const {
+ if (!pCtx->getInShard()) {
+ double avg = 0;
+ if (count) {
+ if (totalType != NumberDouble)
+ avg = static_cast<double>(longTotal / count);
+ else
+ avg = doubleTotal / count;
+ }
+
+ return Value::createDouble(avg);
+ }
+
+ intrusive_ptr<Document> pDocument(Document::create());
+
+ intrusive_ptr<const Value> pSubTotal;
+ if (totalType == NumberInt)
+ pSubTotal = Value::createInt((int)longTotal);
+ else if (totalType == NumberLong)
+ pSubTotal = Value::createLong(longTotal);
+ else
+ pSubTotal = Value::createDouble(doubleTotal);
+ pDocument->addField(subTotalName, pSubTotal);
+
+ intrusive_ptr<const Value> pCount(Value::createLong(count));
+ pDocument->addField(countName, pCount);
+
+ return Value::createDocument(pDocument);
+ }
+
+ AccumulatorAvg::AccumulatorAvg(
+ const intrusive_ptr<ExpressionContext> &pTheCtx):
+ AccumulatorSum(),
+ count(0),
+ pCtx(pTheCtx) {
+ }
+
+ const char *AccumulatorAvg::getOpName() const {
+ return "$avg";
+ }
+}
diff --git a/src/mongo/db/pipeline/accumulator_first.cpp b/src/mongo/db/pipeline/accumulator_first.cpp
new file mode 100755
index 00000000000..c947aa83996
--- /dev/null
+++ b/src/mongo/db/pipeline/accumulator_first.cpp
@@ -0,0 +1,49 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "accumulator.h"
+
+#include "db/pipeline/value.h"
+
+namespace mongo {
+
+ intrusive_ptr<const Value> AccumulatorFirst::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ assert(vpOperand.size() == 1);
+
+ /* only remember the first value seen */
+ if (!pValue.get())
+ pValue = vpOperand[0]->evaluate(pDocument);
+
+ return pValue;
+ }
+
+ AccumulatorFirst::AccumulatorFirst():
+ AccumulatorSingleValue() {
+ }
+
+ intrusive_ptr<Accumulator> AccumulatorFirst::create(
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ intrusive_ptr<AccumulatorFirst> pAccumulator(
+ new AccumulatorFirst());
+ return pAccumulator;
+ }
+
+ const char *AccumulatorFirst::getOpName() const {
+ return "$first";
+ }
+}
diff --git a/src/mongo/db/pipeline/accumulator_last.cpp b/src/mongo/db/pipeline/accumulator_last.cpp
new file mode 100755
index 00000000000..c134fc83159
--- /dev/null
+++ b/src/mongo/db/pipeline/accumulator_last.cpp
@@ -0,0 +1,48 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "accumulator.h"
+
+#include "db/pipeline/value.h"
+
+namespace mongo {
+
+ intrusive_ptr<const Value> AccumulatorLast::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ assert(vpOperand.size() == 1);
+
+ /* always remember the last value seen */
+ pValue = vpOperand[0]->evaluate(pDocument);
+
+ return pValue;
+ }
+
+ AccumulatorLast::AccumulatorLast():
+ AccumulatorSingleValue() {
+ }
+
+ intrusive_ptr<Accumulator> AccumulatorLast::create(
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ intrusive_ptr<AccumulatorLast> pAccumulator(
+ new AccumulatorLast());
+ return pAccumulator;
+ }
+
+ const char *AccumulatorLast::getOpName() const {
+ return "$last";
+ }
+}
diff --git a/src/mongo/db/pipeline/accumulator_min_max.cpp b/src/mongo/db/pipeline/accumulator_min_max.cpp
new file mode 100755
index 00000000000..6f078187b44
--- /dev/null
+++ b/src/mongo/db/pipeline/accumulator_min_max.cpp
@@ -0,0 +1,67 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "accumulator.h"
+
+#include "db/pipeline/value.h"
+
+namespace mongo {
+
+ intrusive_ptr<const Value> AccumulatorMinMax::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ assert(vpOperand.size() == 1);
+ intrusive_ptr<const Value> prhs(vpOperand[0]->evaluate(pDocument));
+
+ /* if this is the first value, just use it */
+ if (!pValue.get())
+ pValue = prhs;
+ else {
+ /* compare with the current value; swap if appropriate */
+ int cmp = Value::compare(pValue, prhs) * sense;
+ if (cmp > 0)
+ pValue = prhs;
+ }
+
+ return pValue;
+ }
+
+ AccumulatorMinMax::AccumulatorMinMax(int theSense):
+ AccumulatorSingleValue(),
+ sense(theSense) {
+ assert((sense == 1) || (sense == -1));
+ }
+
+ intrusive_ptr<Accumulator> AccumulatorMinMax::createMin(
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ intrusive_ptr<AccumulatorMinMax> pAccumulator(
+ new AccumulatorMinMax(1));
+ return pAccumulator;
+ }
+
+ intrusive_ptr<Accumulator> AccumulatorMinMax::createMax(
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ intrusive_ptr<AccumulatorMinMax> pAccumulator(
+ new AccumulatorMinMax(-1));
+ return pAccumulator;
+ }
+
+ const char *AccumulatorMinMax::getOpName() const {
+ if (sense == 1)
+ return "$min";
+ return "$max";
+ }
+}
diff --git a/src/mongo/db/pipeline/accumulator_push.cpp b/src/mongo/db/pipeline/accumulator_push.cpp
new file mode 100755
index 00000000000..2640bc4ecfd
--- /dev/null
+++ b/src/mongo/db/pipeline/accumulator_push.cpp
@@ -0,0 +1,73 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "accumulator.h"
+
+#include "db/pipeline/expression_context.h"
+#include "db/pipeline/value.h"
+
+namespace mongo {
+ intrusive_ptr<const Value> AccumulatorPush::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ assert(vpOperand.size() == 1);
+ intrusive_ptr<const Value> prhs(vpOperand[0]->evaluate(pDocument));
+
+ if (prhs->getType() == Undefined)
+ ; /* nothing to add to the array */
+ else if (!pCtx->getInRouter())
+ vpValue.push_back(prhs);
+ else {
+ /*
+ If we're in the router, we need to take apart the arrays we
+ receive and put their elements into the array we are collecting.
+ If we didn't, then we'd get an array of arrays, with one array
+ from each shard that responds.
+ */
+ assert(prhs->getType() == Array);
+
+ intrusive_ptr<ValueIterator> pvi(prhs->getArray());
+ while(pvi->more()) {
+ intrusive_ptr<const Value> pElement(pvi->next());
+ vpValue.push_back(pElement);
+ }
+ }
+
+ return Value::getNull();
+ }
+
+ intrusive_ptr<const Value> AccumulatorPush::getValue() const {
+ return Value::createArray(vpValue);
+ }
+
+ AccumulatorPush::AccumulatorPush(
+ const intrusive_ptr<ExpressionContext> &pTheCtx):
+ Accumulator(),
+ vpValue(),
+ pCtx(pTheCtx) {
+ }
+
+ intrusive_ptr<Accumulator> AccumulatorPush::create(
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ intrusive_ptr<AccumulatorPush> pAccumulator(
+ new AccumulatorPush(pCtx));
+ return pAccumulator;
+ }
+
+ const char *AccumulatorPush::getOpName() const {
+ return "$push";
+ }
+}
diff --git a/src/mongo/db/pipeline/accumulator_single_value.cpp b/src/mongo/db/pipeline/accumulator_single_value.cpp
new file mode 100755
index 00000000000..bfec80387d3
--- /dev/null
+++ b/src/mongo/db/pipeline/accumulator_single_value.cpp
@@ -0,0 +1,32 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "accumulator.h"
+
+#include "db/pipeline/value.h"
+
+namespace mongo {
+
+ intrusive_ptr<const Value> AccumulatorSingleValue::getValue() const {
+ return pValue;
+ }
+
+ AccumulatorSingleValue::AccumulatorSingleValue():
+ pValue(intrusive_ptr<const Value>()) {
+ }
+
+}
diff --git a/src/mongo/db/pipeline/accumulator_sum.cpp b/src/mongo/db/pipeline/accumulator_sum.cpp
new file mode 100755
index 00000000000..e6526ac254a
--- /dev/null
+++ b/src/mongo/db/pipeline/accumulator_sum.cpp
@@ -0,0 +1,74 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "accumulator.h"
+
+#include "db/pipeline/value.h"
+
+namespace mongo {
+
+ intrusive_ptr<const Value> AccumulatorSum::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ assert(vpOperand.size() == 1);
+ intrusive_ptr<const Value> prhs(vpOperand[0]->evaluate(pDocument));
+
+ /* upgrade to the widest type required to hold the result */
+ totalType = Value::getWidestNumeric(totalType, prhs->getType());
+
+ if (totalType == NumberInt) {
+ int v = prhs->coerceToInt();
+ longTotal += v;
+ doubleTotal += v;
+ }
+ else if (totalType == NumberLong) {
+ long long v = prhs->coerceToLong();
+ longTotal += v;
+ doubleTotal += v;
+ }
+ else { /* (totalType == NumberDouble) */
+ double v = prhs->coerceToDouble();
+ doubleTotal += v;
+ }
+
+ return Value::getZero();
+ }
+
+ intrusive_ptr<Accumulator> AccumulatorSum::create(
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ intrusive_ptr<AccumulatorSum> pSummer(new AccumulatorSum());
+ return pSummer;
+ }
+
+ intrusive_ptr<const Value> AccumulatorSum::getValue() const {
+ if (totalType == NumberInt)
+ return Value::createInt((int)longTotal);
+ if (totalType == NumberLong)
+ return Value::createLong(longTotal);
+ return Value::createDouble(doubleTotal);
+ }
+
+ AccumulatorSum::AccumulatorSum():
+ Accumulator(),
+ totalType(NumberInt),
+ longTotal(0),
+ doubleTotal(0) {
+ }
+
+ const char *AccumulatorSum::getOpName() const {
+ return "$sum";
+ }
+}
diff --git a/src/mongo/db/pipeline/builder.cpp b/src/mongo/db/pipeline/builder.cpp
new file mode 100755
index 00000000000..cbde3705656
--- /dev/null
+++ b/src/mongo/db/pipeline/builder.cpp
@@ -0,0 +1,117 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "db/jsobj.h"
+#include "db/pipeline/builder.h"
+
+
+namespace mongo {
+
+ void BuilderObj::append() {
+ pBuilder->appendNull(fieldName);
+ }
+
+ void BuilderObj::append(bool b) {
+ pBuilder->append(fieldName, b);
+ }
+
+ void BuilderObj::append(int i) {
+ pBuilder->append(fieldName, i);
+ }
+
+ void BuilderObj::append(long long ll) {
+ pBuilder->append(fieldName, ll);
+ }
+
+ void BuilderObj::append(double d) {
+ pBuilder->append(fieldName, d);
+ }
+
+ void BuilderObj::append(string s) {
+ pBuilder->append(fieldName, s);
+ }
+
+ void BuilderObj::append(const OID &o) {
+ pBuilder->append(fieldName, o);
+ }
+
+ void BuilderObj::append(const Date_t &d) {
+ pBuilder->append(fieldName, d);
+ }
+
+ void BuilderObj::append(BSONObjBuilder *pDone) {
+ pBuilder->append(fieldName, pDone->done());
+ }
+
+ void BuilderObj::append(BSONArrayBuilder *pDone) {
+ pBuilder->append(fieldName, pDone->arr());
+ }
+
+ BuilderObj::BuilderObj(
+ BSONObjBuilder *pObjBuilder, string theFieldName):
+ pBuilder(pObjBuilder),
+ fieldName(theFieldName) {
+ }
+
+
+ void BuilderArray::append() {
+ pBuilder->appendNull();
+ }
+
+ void BuilderArray::append(bool b) {
+ pBuilder->append(b);
+ }
+
+ void BuilderArray::append(int i) {
+ pBuilder->append(i);
+ }
+
+ void BuilderArray::append(long long ll) {
+ pBuilder->append(ll);
+ }
+
+ void BuilderArray::append(double d) {
+ pBuilder->append(d);
+ }
+
+ void BuilderArray::append(string s) {
+ pBuilder->append(s);
+ }
+
+ void BuilderArray::append(const OID &o) {
+ pBuilder->append(o);
+ }
+
+ void BuilderArray::append(const Date_t &d) {
+ pBuilder->append(d);
+ }
+
+ void BuilderArray::append(BSONObjBuilder *pDone) {
+ pBuilder->append(pDone->done());
+ }
+
+ void BuilderArray::append(BSONArrayBuilder *pDone) {
+ pBuilder->append(pDone->arr());
+ }
+
+ BuilderArray::BuilderArray(
+ BSONArrayBuilder *pArrayBuilder):
+ pBuilder(pArrayBuilder) {
+ }
+
+}
diff --git a/src/mongo/db/pipeline/builder.h b/src/mongo/db/pipeline/builder.h
new file mode 100755
index 00000000000..bdf71cd784c
--- /dev/null
+++ b/src/mongo/db/pipeline/builder.h
@@ -0,0 +1,95 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "pch.h"
+
+namespace mongo {
+
+ class BSONArrayBuilder;
+ class BSONObjBuilder;
+
+ /*
+ Generic Builder.
+
+ The methods to append items to an object (on BSONObjBuilder) and an array
+ (on BSONArrayBuilder) differ only by their inclusion of a field name.
+ For more complicated implementations of addToBsonObj() and
+ addToBsonArray(), it makes sense to abstract that out and use
+ this generic builder that always looks the same, and then implement
+ addToBsonObj() and addToBsonArray() by using a common method.
+ */
+ class Builder :
+ boost::noncopyable {
+ public:
+ virtual ~Builder() {};
+
+ virtual void append() = 0; // append a null
+ virtual void append(bool b) = 0;
+ virtual void append(int i) = 0;
+ virtual void append(long long ll) = 0;
+ virtual void append(double d) = 0;
+ virtual void append(string s) = 0;
+ virtual void append(const OID &o) = 0;
+ virtual void append(const Date_t &d) = 0;
+ virtual void append(BSONObjBuilder *pDone) = 0;
+ virtual void append(BSONArrayBuilder *pDone) = 0;
+ };
+
+ class BuilderObj :
+ public Builder {
+ public:
+ // virtuals from Builder
+ virtual void append();
+ virtual void append(bool b);
+ virtual void append(int i);
+ virtual void append(long long ll);
+ virtual void append(double d);
+ virtual void append(string s);
+ virtual void append(const OID &o);
+ virtual void append(const Date_t &d);
+ virtual void append(BSONObjBuilder *pDone);
+ virtual void append(BSONArrayBuilder *pDone);
+
+ BuilderObj(BSONObjBuilder *pBuilder, string fieldName);
+
+ private:
+ BSONObjBuilder *pBuilder;
+ string fieldName;
+ };
+
+ class BuilderArray :
+ public Builder {
+ public:
+ // virtuals from Builder
+ virtual void append();
+ virtual void append(bool b);
+ virtual void append(int i);
+ virtual void append(long long ll);
+ virtual void append(double d);
+ virtual void append(string s);
+ virtual void append(const OID &o);
+ virtual void append(const Date_t &d);
+ virtual void append(BSONObjBuilder *pDone);
+ virtual void append(BSONArrayBuilder *pDone);
+
+ BuilderArray(BSONArrayBuilder *pBuilder);
+
+ private:
+ BSONArrayBuilder *pBuilder;
+ };
+}
diff --git a/src/mongo/db/pipeline/doc_mem_monitor.cpp b/src/mongo/db/pipeline/doc_mem_monitor.cpp
new file mode 100755
index 00000000000..ffbe9c88854
--- /dev/null
+++ b/src/mongo/db/pipeline/doc_mem_monitor.cpp
@@ -0,0 +1,68 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "db/pipeline/doc_mem_monitor.h"
+#include "util/systeminfo.h"
+
+namespace mongo {
+
+ DocMemMonitor::DocMemMonitor(StringWriter *pW) {
+ /*
+ Use the default values.
+
+ Currently, we warn in log at 5%, and assert at 10%.
+ */
+ size_t errorRam = SystemInfo::getPhysicalRam() / 10;
+ size_t warnRam = errorRam / 2;
+
+ init(pW, warnRam, errorRam);
+ }
+
+ DocMemMonitor::DocMemMonitor(StringWriter *pW,
+ size_t warnLimit, size_t errorLimit) {
+ init(pW, warnLimit, errorLimit);
+ }
+
+ void DocMemMonitor::addToTotal(size_t amount) {
+ totalUsed += amount;
+
+ if (!warned) {
+ if (warnLimit && (totalUsed > warnLimit)) {
+ stringstream ss;
+ ss << "warning, 5% of physical RAM used for ";
+ pWriter->writeString(ss);
+ ss << endl;
+ warning() << ss.str();
+ warned = true;
+ }
+ }
+
+ if (errorLimit) {
+ uassert(15944, "terminating request: request heap use exceeded 10% of physical RAM", (totalUsed <= errorLimit));
+ }
+ }
+
+ void DocMemMonitor::init(StringWriter *pW,
+ size_t warnLimit, size_t errorLimit) {
+ this->pWriter = pW;
+ this->warnLimit = warnLimit;
+ this->errorLimit = errorLimit;
+
+ warned = false;
+ totalUsed = 0;
+ }
+}
diff --git a/src/mongo/db/pipeline/doc_mem_monitor.h b/src/mongo/db/pipeline/doc_mem_monitor.h
new file mode 100755
index 00000000000..e368acc906a
--- /dev/null
+++ b/src/mongo/db/pipeline/doc_mem_monitor.h
@@ -0,0 +1,94 @@
+/**
+ * Copyright 2011 (c) 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "pch.h"
+#include "util/string_writer.h"
+
+
+namespace mongo {
+
+ /*
+ This utility class provides an easy way to total up, monitor, warn, and
+ signal an error when the amount of memory used for an operation exceeds
+ given thresholds.
+
+ Create a local instance of this class, and then inform it of any memory
+ that you consume using addToTotal().
+
+ Warnings or errors are issued as usage exceeds certain fractions of
+ physical memory on the host, as determined by SystemInfo.
+
+ This class is not guaranteed to warn or signal errors if the host system
+ does not support the ability to report its memory, as per the warnings
+ for SystemInfo in systeminfo.h.
+ */
+ class DocMemMonitor {
+ public:
+ /*
+ Constructor.
+
+ Uses default limits for warnings and errors.
+
+ The StringWriter parameter must outlive the DocMemMonitor instance.
+
+ @param pWriter string writer that provides information about the
+ operation being monitored
+ */
+ DocMemMonitor(StringWriter *pWriter);
+
+ /*
+ Constructor.
+
+ This variant allows explicit selection of the limits. Note that
+ limits of zero are treated as infinite.
+
+ The StringWriter parameter must outlive the DocMemMonitor instance.
+
+ @param pWriter string writer that provides information about the
+ operation being monitored
+ @param warnLimit the amount of ram to issue (log) a warning for
+ @param errorLimit the amount of ram to throw an error for
+ */
+ DocMemMonitor(StringWriter *pWriter, size_t warnLimit,
+ size_t errorLimit);
+
+ /*
+ Increment the total amount of memory used by the given amount. If
+ the warning threshold is exceeded, a warning will be logged. If the
+ error threshold is exceeded, an error will be thrown.
+
+ @param amount the amount of memory to add to the current total
+ */
+ void addToTotal(size_t amount);
+
+ private:
+ /*
+ Real constructor body.
+
+ Provides common construction for all the variant constructors.
+ */
+ void init(StringWriter *pW, size_t warnLimit, size_t errorLimit);
+
+ bool warned;
+ size_t totalUsed;
+ size_t warnLimit;
+ size_t errorLimit;
+ StringWriter *pWriter;
+ };
+
+}
diff --git a/src/mongo/db/pipeline/document.cpp b/src/mongo/db/pipeline/document.cpp
new file mode 100755
index 00000000000..a49c7e303c1
--- /dev/null
+++ b/src/mongo/db/pipeline/document.cpp
@@ -0,0 +1,219 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "db/jsobj.h"
+#include "db/pipeline/document.h"
+#include "db/pipeline/value.h"
+#include "util/mongoutils/str.h"
+
+namespace mongo {
+ using namespace mongoutils;
+
+ string Document::idName("_id");
+
+ intrusive_ptr<Document> Document::createFromBsonObj(BSONObj *pBsonObj) {
+ intrusive_ptr<Document> pDocument(new Document(pBsonObj));
+ return pDocument;
+ }
+
+ Document::Document(BSONObj *pBsonObj):
+ vFieldName(),
+ vpValue() {
+ BSONObjIterator bsonIterator(pBsonObj->begin());
+ while(bsonIterator.more()) {
+ BSONElement bsonElement(bsonIterator.next());
+ string fieldName(bsonElement.fieldName());
+ intrusive_ptr<const Value> pValue(
+ Value::createFromBsonElement(&bsonElement));
+
+ vFieldName.push_back(fieldName);
+ vpValue.push_back(pValue);
+ }
+ }
+
+ void Document::toBson(BSONObjBuilder *pBuilder) {
+ const size_t n = vFieldName.size();
+ for(size_t i = 0; i < n; ++i)
+ vpValue[i]->addToBsonObj(pBuilder, vFieldName[i]);
+ }
+
+ intrusive_ptr<Document> Document::create(size_t sizeHint) {
+ intrusive_ptr<Document> pDocument(new Document(sizeHint));
+ return pDocument;
+ }
+
+ Document::Document(size_t sizeHint):
+ vFieldName(),
+ vpValue() {
+ if (sizeHint) {
+ vFieldName.reserve(sizeHint);
+ vpValue.reserve(sizeHint);
+ }
+ }
+
+ intrusive_ptr<Document> Document::clone() {
+ const size_t n = vFieldName.size();
+ intrusive_ptr<Document> pNew(Document::create(n));
+ for(size_t i = 0; i < n; ++i)
+ pNew->addField(vFieldName[i], vpValue[i]);
+
+ return pNew;
+ }
+
+ Document::~Document() {
+ }
+
+ FieldIterator *Document::createFieldIterator() {
+ return new FieldIterator(intrusive_ptr<Document>(this));
+ }
+
+ intrusive_ptr<const Value> Document::getValue(const string &fieldName) {
+ /*
+ For now, assume the number of fields is small enough that iteration
+ is ok. Later, if this gets large, we can create a map into the
+ vector for these lookups.
+
+ Note that because of the schema-less nature of this data, we always
+ have to look, and can't assume that the requested field is always
+ in a particular place as we would with a statically compilable
+ reference.
+ */
+ const size_t n = vFieldName.size();
+ for(size_t i = 0; i < n; ++i) {
+ if (fieldName.compare(vFieldName[i]) == 0)
+ return vpValue[i];
+ }
+
+ return(intrusive_ptr<const Value>());
+ }
+
+ void Document::addField(const string &fieldName,
+ const intrusive_ptr<const Value> &pValue) {
+ uassert(15945, str::stream() << "cannot add undefined field " <<
+ fieldName << " to document", pValue->getType() != Undefined);
+
+ vFieldName.push_back(fieldName);
+ vpValue.push_back(pValue);
+ }
+
+ void Document::setField(size_t index,
+ const string &fieldName,
+ const intrusive_ptr<const Value> &pValue) {
+ /* special case: should this field be removed? */
+ if (!pValue.get()) {
+ vFieldName.erase(vFieldName.begin() + index);
+ vpValue.erase(vpValue.begin() + index);
+ return;
+ }
+
+ /* make sure we have a valid value */
+ uassert(15968, str::stream() << "cannot set undefined field " <<
+ fieldName << " to document", pValue->getType() != Undefined);
+
+ /* set the indicated field */
+ vFieldName[index] = fieldName;
+ vpValue[index] = pValue;
+ }
+
+ intrusive_ptr<const Value> Document::getField(const string &fieldName) const {
+ const size_t n = vFieldName.size();
+ for(size_t i = 0; i < n; ++i) {
+ if (fieldName.compare(vFieldName[i]) == 0)
+ return vpValue[i];
+ }
+
+ /* if we got here, there's no such field */
+ return intrusive_ptr<const Value>();
+ }
+
+ size_t Document::getApproximateSize() const {
+ size_t size = sizeof(Document);
+ const size_t n = vpValue.size();
+ for(size_t i = 0; i < n; ++i)
+ size += vpValue[i]->getApproximateSize();
+
+ return size;
+ }
+
+ size_t Document::getFieldIndex(const string &fieldName) const {
+ const size_t n = vFieldName.size();
+ size_t i = 0;
+ for(; i < n; ++i) {
+ if (fieldName.compare(vFieldName[i]) == 0)
+ break;
+ }
+
+ return i;
+ }
+
+ void Document::hash_combine(size_t &seed) const {
+ const size_t n = vFieldName.size();
+ for(size_t i = 0; i < n; ++i) {
+ boost::hash_combine(seed, vFieldName[i]);
+ vpValue[i]->hash_combine(seed);
+ }
+ }
+
+ int Document::compare(const intrusive_ptr<Document> &rL,
+ const intrusive_ptr<Document> &rR) {
+ const size_t lSize = rL->vFieldName.size();
+ const size_t rSize = rR->vFieldName.size();
+
+ for(size_t i = 0; true; ++i) {
+ if (i >= lSize) {
+ if (i >= rSize)
+ return 0; // documents are the same length
+
+ return -1; // left document is shorter
+ }
+
+ if (i >= rSize)
+ return 1; // right document is shorter
+
+ const int nameCmp = rL->vFieldName[i].compare(rR->vFieldName[i]);
+ if (nameCmp)
+ return nameCmp; // field names are unequal
+
+ const int valueCmp = Value::compare(rL->vpValue[i], rR->vpValue[i]);
+ if (valueCmp)
+ return valueCmp; // fields are unequal
+ }
+
+ /* NOTREACHED */
+ assert(false);
+ return 0;
+ }
+
+ /* ----------------------- FieldIterator ------------------------------- */
+
+ FieldIterator::FieldIterator(const intrusive_ptr<Document> &pTheDocument):
+ pDocument(pTheDocument),
+ index(0) {
+ }
+
+ bool FieldIterator::more() const {
+ return (index < pDocument->vFieldName.size());
+ }
+
+ pair<string, intrusive_ptr<const Value> > FieldIterator::next() {
+ assert(more());
+ pair<string, intrusive_ptr<const Value> > result(
+ pDocument->vFieldName[index], pDocument->vpValue[index]);
+ ++index;
+ return result;
+ }
+}
diff --git a/src/mongo/db/pipeline/document.h b/src/mongo/db/pipeline/document.h
new file mode 100755
index 00000000000..f11a825151e
--- /dev/null
+++ b/src/mongo/db/pipeline/document.h
@@ -0,0 +1,246 @@
+/**
+ * Copyright 2011 (c) 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "pch.h"
+
+#include "util/intrusive_counter.h"
+
+namespace mongo {
+ class BSONObj;
+ class FieldIterator;
+ class Value;
+
+ class Document :
+ public IntrusiveCounterUnsigned {
+ public:
+ ~Document();
+
+ /*
+ Create a new Document from the given BSONObj.
+
+ Document field values may be pointed to in the BSONObj, so it
+ must live at least as long as the resulting Document.
+
+ @returns shared pointer to the newly created Document
+ */
+ static intrusive_ptr<Document> createFromBsonObj(BSONObj *pBsonObj);
+
+ /*
+ Create a new empty Document.
+
+ @param sizeHint a hint at what the number of fields will be; if
+ known, this can be used to increase memory allocation efficiency
+ @returns shared pointer to the newly created Document
+ */
+ static intrusive_ptr<Document> create(size_t sizeHint = 0);
+
+ /*
+ Clone a document.
+
+ The new document shares all the fields' values with the original.
+
+ This is not a deep copy. Only the fields on the top-level document
+ are cloned.
+
+ @returns the shallow clone of the document
+ */
+ intrusive_ptr<Document> clone();
+
+ /*
+ Add this document to the BSONObj under construction with the
+ given BSONObjBuilder.
+ */
+ void toBson(BSONObjBuilder *pBsonObjBuilder);
+
+ /*
+ Create a new FieldIterator that can be used to examine the
+ Document's fields.
+ */
+ FieldIterator *createFieldIterator();
+
+ /*
+ Get the value of the specified field.
+
+ @param fieldName the name of the field
+ @return point to the requested field
+ */
+ intrusive_ptr<const Value> getValue(const string &fieldName);
+
+ /*
+ Add the given field to the Document.
+
+ BSON documents' fields are ordered; the new Field will be
+ appened to the current list of fields.
+
+ It is an error to add a field that has the same name as another
+ field.
+ */
+ void addField(const string &fieldName,
+ const intrusive_ptr<const Value> &pValue);
+
+ /*
+ Set the given field to be at the specified position in the
+ Document. This will replace any field that is currently in that
+ position. The index must be within the current range of field
+ indices.
+
+ pValue.get() may be NULL, in which case the field will be
+ removed. fieldName is ignored in this case.
+
+ @param index the field index in the list of fields
+ @param fieldName the new field name
+ @param pValue the new Value
+ */
+ void setField(size_t index,
+ const string &fieldName,
+ const intrusive_ptr<const Value> &pValue);
+
+ /*
+ Convenience type for dealing with fields.
+ */
+ typedef pair<string, intrusive_ptr<const Value> > FieldPair;
+
+ /*
+ Get the indicated field.
+
+ @param index the field index in the list of fields
+ @returns the field name and value of the field
+ */
+ FieldPair getField(size_t index) const;
+
+ /*
+ Get the number of fields in the Document.
+
+ @returns the number of fields in the Document
+ */
+ size_t getFieldCount() const;
+
+ /*
+ Get the index of the given field.
+
+ @param fieldName the name of the field
+ @returns the index of the field, or if it does not exist, the number
+ of fields (getFieldCount())
+ */
+ size_t getFieldIndex(const string &fieldName) const;
+
+ /*
+ Get a field by name.
+
+ @param fieldName the name of the field
+ @returns the value of the field
+ */
+ intrusive_ptr<const Value> getField(const string &fieldName) const;
+
+ /*
+ Get the approximate storage size of the document, in bytes.
+
+ Under the assumption that field name strings are shared, they are
+ not included in the total.
+
+ @returns the approximate storage
+ */
+ size_t getApproximateSize() const;
+
+ /*
+ Compare two documents.
+
+ BSON document field order is significant, so this just goes through
+ the fields in order. The comparison is done in roughly the same way
+ as strings are compared, but comparing one field at a time instead
+ of one character at a time.
+ */
+ static int compare(const intrusive_ptr<Document> &rL,
+ const intrusive_ptr<Document> &rR);
+
+ static string idName; // shared "_id"
+
+ /*
+ Calculate a hash value.
+
+ Meant to be used to create composite hashes suitable for
+ boost classes such as unordered_map<>.
+
+ @param seed value to augment with this' hash
+ */
+ void hash_combine(size_t &seed) const;
+
+ private:
+ friend class FieldIterator;
+
+ Document(size_t sizeHint);
+ Document(BSONObj *pBsonObj);
+
+ /* these two vectors parallel each other */
+ vector<string> vFieldName;
+ vector<intrusive_ptr<const Value> > vpValue;
+ };
+
+
+ class FieldIterator :
+ boost::noncopyable {
+ public:
+ /*
+ Ask if there are more fields to return.
+
+ @return true if there are more fields, false otherwise
+ */
+ bool more() const;
+
+ /*
+ Move the iterator to point to the next field and return it.
+
+ @return the next field's <name, Value>
+ */
+ Document::FieldPair next();
+
+ private:
+ friend class Document;
+
+ /*
+ Constructor.
+
+ @param pDocument points to the document whose fields are being
+ iterated
+ */
+ FieldIterator(const intrusive_ptr<Document> &pDocument);
+
+ /*
+ We'll hang on to the original document to ensure we keep the
+ fieldPtr vector alive.
+ */
+ intrusive_ptr<Document> pDocument;
+ size_t index; // current field in iteration
+ };
+}
+
+
+/* ======================= INLINED IMPLEMENTATIONS ========================== */
+
+namespace mongo {
+
+ inline size_t Document::getFieldCount() const {
+ return vFieldName.size();
+ }
+
+ inline Document::FieldPair Document::getField(size_t index) const {
+ assert( index < vFieldName.size() );
+ return FieldPair(vFieldName[index], vpValue[index]);
+ }
+
+}
diff --git a/src/mongo/db/pipeline/document_source.cpp b/src/mongo/db/pipeline/document_source.cpp
new file mode 100755
index 00000000000..813852e35c6
--- /dev/null
+++ b/src/mongo/db/pipeline/document_source.cpp
@@ -0,0 +1,52 @@
+/**
+* Copyright (C) 2011 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "db/pipeline/document_source.h"
+
+namespace mongo {
+ DocumentSource::~DocumentSource() {
+ }
+
+ void DocumentSource::setSource(
+ const intrusive_ptr<DocumentSource> &pTheSource) {
+ assert(!pSource.get());
+ pSource = pTheSource;
+ }
+
+ bool DocumentSource::coalesce(
+ const intrusive_ptr<DocumentSource> &pNextSource) {
+ return false;
+ }
+
+ void DocumentSource::optimize() {
+ }
+
+ void DocumentSource::addToBsonArray(BSONArrayBuilder *pBuilder) const {
+ BSONObjBuilder insides;
+ sourceToBson(&insides);
+ pBuilder->append(insides.done());
+ }
+
+ void DocumentSource::writeString(stringstream &ss) const {
+ BSONArrayBuilder bab;
+ addToBsonArray(&bab);
+ BSONArray ba(bab.arr());
+ ss << ba.toString(/* isArray */true);
+ // our toString should use standard string types.....
+ }
+}
diff --git a/src/mongo/db/pipeline/document_source.h b/src/mongo/db/pipeline/document_source.h
new file mode 100755
index 00000000000..8d5f0f70847
--- /dev/null
+++ b/src/mongo/db/pipeline/document_source.h
@@ -0,0 +1,985 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "pch.h"
+
+#include <boost/unordered_map.hpp>
+#include "util/intrusive_counter.h"
+#include "client/parallel.h"
+#include "db/jsobj.h"
+#include "db/pipeline/document.h"
+#include "db/pipeline/expression.h"
+#include "db/pipeline/value.h"
+#include "util/string_writer.h"
+
+namespace mongo {
+ class Accumulator;
+ class Cursor;
+ class Document;
+ class Expression;
+ class ExpressionContext;
+ class ExpressionFieldPath;
+ class ExpressionObject;
+ class Matcher;
+
+ class DocumentSource :
+ public IntrusiveCounterUnsigned,
+ public StringWriter {
+ public:
+ virtual ~DocumentSource();
+
+ // virtuals from StringWriter
+ /*
+ Write out a string representation of this pipeline operator.
+
+ @param ss string stream to write the string representation to
+ */
+ virtual void writeString(stringstream &ss) const;
+
+
+ /*
+ Is the source at EOF?
+
+ @returns true if the source has no more Documents to return.
+ */
+ virtual bool eof() = 0;
+
+ /*
+ Advance the state of the DocumentSource so that it will return the
+ next Document.
+
+ @returns whether there is another document to fetch, i.e., whether or
+ not getCurrent() will succeed.
+ */
+ virtual bool advance() = 0;
+
+ /*
+ Advance the source, and return the next Expression.
+
+ @returns the current Document
+ TODO throws an exception if there are no more expressions to return.
+ */
+ virtual intrusive_ptr<Document> getCurrent() = 0;
+
+ /*
+ Set the underlying source this source should use to get Documents
+ from.
+
+ It is an error to set the source more than once. This is to
+ prevent changing sources once the original source has been started;
+ this could break the state maintained by the DocumentSource.
+
+ @param pSource the underlying source to use
+ */
+ virtual void setSource(const intrusive_ptr<DocumentSource> &pSource);
+
+ /*
+ Attempt to coalesce this DocumentSource with its successor in the
+ document processing pipeline. If successful, the successor
+ DocumentSource should be removed from the pipeline and discarded.
+
+ If successful, this operation can be applied repeatedly, in an
+ attempt to coalesce several sources together.
+
+ The default implementation is to do nothing, and return false.
+
+ @param pNextSource the next source in the document processing chain.
+ @returns whether or not the attempt to coalesce was successful or not;
+ if the attempt was not successful, nothing has been changed
+ */
+ virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSource);
+
+ /*
+ Optimize the pipeline operation, if possible. This is a local
+ optimization that only looks within this DocumentSource. For best
+ results, first coalesce compatible sources using coalesce().
+
+ This is intended for any operations that include expressions, and
+ provides a hook for those to optimize those operations.
+
+ The default implementation is to do nothing.
+ */
+ virtual void optimize();
+
+ /*
+ Add the DocumentSource to the array builder.
+
+ The default implementation calls sourceToBson() in order to
+ convert the inner part of the object which will be added to the
+ array being built here.
+
+ @param pBuilder the array builder to add the operation to.
+ */
+ virtual void addToBsonArray(BSONArrayBuilder *pBuilder) const;
+
+ protected:
+ /*
+ Create an object that represents the document source. The object
+ will have a single field whose name is the source's name. This
+ will be used by the default implementation of addToBsonArray()
+ to add this object to a pipeline being represented in BSON.
+
+ @param pBuilder a blank object builder to write to
+ */
+ virtual void sourceToBson(BSONObjBuilder *pBuilder) const = 0;
+
+ /*
+ Most DocumentSources have an underlying source they get their data
+ from. This is a convenience for them.
+
+ The default implementation of setSource() sets this; if you don't
+ need a source, override that to assert(). The default is to
+ assert() if this has already been set.
+ */
+ intrusive_ptr<DocumentSource> pSource;
+ };
+
+
+ class DocumentSourceBsonArray :
+ public DocumentSource {
+ public:
+ // virtuals from DocumentSource
+ virtual ~DocumentSourceBsonArray();
+ virtual bool eof();
+ virtual bool advance();
+ virtual intrusive_ptr<Document> getCurrent();
+ virtual void setSource(const intrusive_ptr<DocumentSource> &pSource);
+
+ /*
+ Create a document source based on a BSON array.
+
+ This is usually put at the beginning of a chain of document sources
+ in order to fetch data from the database.
+
+ CAUTION: the BSON is not read until the source is used. Any
+ elements that appear after these documents must not be read until
+ this source is exhausted.
+
+ @param pBsonElement the BSON array to treat as a document source
+ @returns the newly created document source
+ */
+ static intrusive_ptr<DocumentSourceBsonArray> create(
+ BSONElement *pBsonElement);
+
+ protected:
+ // virtuals from DocumentSource
+ virtual void sourceToBson(BSONObjBuilder *pBuilder) const;
+
+ private:
+ DocumentSourceBsonArray(BSONElement *pBsonElement);
+
+ BSONObj embeddedObject;
+ BSONObjIterator arrayIterator;
+ BSONElement currentElement;
+ bool haveCurrent;
+ };
+
+
+ class DocumentSourceCommandFutures :
+ public DocumentSource {
+ public:
+ // virtuals from DocumentSource
+ virtual ~DocumentSourceCommandFutures();
+ virtual bool eof();
+ virtual bool advance();
+ virtual intrusive_ptr<Document> getCurrent();
+ virtual void setSource(const intrusive_ptr<DocumentSource> &pSource);
+
+ /* convenient shorthand for a commonly used type */
+ typedef list<shared_ptr<Future::CommandResult> > FuturesList;
+
+ /*
+ Create a DocumentSource that wraps a list of Command::Futures.
+
+ @param errmsg place to write error messages to; must exist for the
+ lifetime of the created DocumentSourceCommandFutures
+ @param pList the list of futures
+ */
+ static intrusive_ptr<DocumentSourceCommandFutures> create(
+ string &errmsg, FuturesList *pList);
+
+ protected:
+ // virtuals from DocumentSource
+ virtual void sourceToBson(BSONObjBuilder *pBuilder) const;
+
+ private:
+ DocumentSourceCommandFutures(string &errmsg, FuturesList *pList);
+
+ /*
+ Advance to the next document, setting pCurrent appropriately.
+
+ Adjusts pCurrent, pBsonSource, and iterator, as needed. On exit,
+ pCurrent is the Document to return, or NULL. If NULL, this
+ indicates there is nothing more to return.
+ */
+ void getNextDocument();
+
+ bool newSource; // set to true for the first item of a new source
+ intrusive_ptr<DocumentSourceBsonArray> pBsonSource;
+ intrusive_ptr<Document> pCurrent;
+ FuturesList::iterator iterator;
+ FuturesList::iterator listEnd;
+ string &errmsg;
+ };
+
+
+ class DocumentSourceCursor :
+ public DocumentSource {
+ public:
+ // virtuals from DocumentSource
+ virtual ~DocumentSourceCursor();
+ virtual bool eof();
+ virtual bool advance();
+ virtual intrusive_ptr<Document> getCurrent();
+ virtual void setSource(const intrusive_ptr<DocumentSource> &pSource);
+
+ /*
+ Create a document source based on a cursor.
+
+ This is usually put at the beginning of a chain of document sources
+ in order to fetch data from the database.
+
+ @param pCursor the cursor to use to fetch data
+ */
+ static intrusive_ptr<DocumentSourceCursor> create(
+ const shared_ptr<Cursor> &pCursor);
+
+ protected:
+ // virtuals from DocumentSource
+ virtual void sourceToBson(BSONObjBuilder *pBuilder) const;
+
+ private:
+ DocumentSourceCursor(const shared_ptr<Cursor> &pTheCursor);
+
+ void findNext();
+ shared_ptr<Cursor> pCursor;
+ intrusive_ptr<Document> pCurrent;
+ };
+
+
+ /*
+ This contains all the basic mechanics for filtering a stream of
+ Documents, except for the actual predicate evaluation itself. This was
+ factored out so we could create DocumentSources that use both Matcher
+ style predicates as well as full Expressions.
+ */
+ class DocumentSourceFilterBase :
+ public DocumentSource {
+ public:
+ // virtuals from DocumentSource
+ virtual ~DocumentSourceFilterBase();
+ virtual bool eof();
+ virtual bool advance();
+ virtual intrusive_ptr<Document> getCurrent();
+
+ /*
+ Create a BSONObj suitable for Matcher construction.
+
+ This is used after filter analysis has moved as many filters to
+ as early a point as possible in the document processing pipeline.
+ See db/Matcher.h and the associated wiki documentation for the
+ format. This conversion is used to move back to the low-level
+ find() Cursor mechanism.
+
+ @param pBuilder the builder to write to
+ */
+ virtual void toMatcherBson(BSONObjBuilder *pBuilder) const = 0;
+
+ protected:
+ DocumentSourceFilterBase();
+
+ /*
+ Test the given document against the predicate and report if it
+ should be accepted or not.
+
+ @param pDocument the document to test
+ @returns true if the document matches the filter, false otherwise
+ */
+ virtual bool accept(const intrusive_ptr<Document> &pDocument) const = 0;
+
+ private:
+
+ void findNext();
+
+ bool unstarted;
+ bool hasNext;
+ intrusive_ptr<Document> pCurrent;
+ };
+
+
+ class DocumentSourceFilter :
+ public DocumentSourceFilterBase {
+ public:
+ // virtuals from DocumentSource
+ virtual ~DocumentSourceFilter();
+ virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSource);
+ virtual void optimize();
+
+ /*
+ Create a filter.
+
+ @param pBsonElement the raw BSON specification for the filter
+ @returns the filter
+ */
+ static intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ /*
+ Create a filter.
+
+ @param pFilter the expression to use to filter
+ @returns the filter
+ */
+ static intrusive_ptr<DocumentSourceFilter> create(
+ const intrusive_ptr<Expression> &pFilter);
+
+ /*
+ Create a BSONObj suitable for Matcher construction.
+
+ This is used after filter analysis has moved as many filters to
+ as early a point as possible in the document processing pipeline.
+ See db/Matcher.h and the associated wiki documentation for the
+ format. This conversion is used to move back to the low-level
+ find() Cursor mechanism.
+
+ @param pBuilder the builder to write to
+ */
+ void toMatcherBson(BSONObjBuilder *pBuilder) const;
+
+ static const char filterName[];
+
+ protected:
+ // virtuals from DocumentSource
+ virtual void sourceToBson(BSONObjBuilder *pBuilder) const;
+
+ // virtuals from DocumentSourceFilterBase
+ virtual bool accept(const intrusive_ptr<Document> &pDocument) const;
+
+ private:
+ DocumentSourceFilter(const intrusive_ptr<Expression> &pFilter);
+
+ intrusive_ptr<Expression> pFilter;
+ };
+
+
+ class DocumentSourceGroup :
+ public DocumentSource {
+ public:
+ // virtuals from DocumentSource
+ virtual ~DocumentSourceGroup();
+ virtual bool eof();
+ virtual bool advance();
+ virtual intrusive_ptr<Document> getCurrent();
+
+ /*
+ Create a new grouping DocumentSource.
+
+ @param pCtx the expression context
+ @returns the DocumentSource
+ */
+ static intrusive_ptr<DocumentSourceGroup> create(
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ /*
+ Set the Id Expression.
+
+ Documents that pass through the grouping Document are grouped
+ according to this key. This will generate the id_ field in the
+ result documents.
+
+ @param pExpression the group key
+ */
+ void setIdExpression(const intrusive_ptr<Expression> &pExpression);
+
+ /*
+ Add an accumulator.
+
+ Accumulators become fields in the Documents that result from
+ grouping. Each unique group document must have it's own
+ accumulator; the accumulator factory is used to create that.
+
+ @param fieldName the name the accumulator result will have in the
+ result documents
+ @param pAccumulatorFactory used to create the accumulator for the
+ group field
+ */
+ void addAccumulator(string fieldName,
+ intrusive_ptr<Accumulator> (*pAccumulatorFactory)(
+ const intrusive_ptr<ExpressionContext> &),
+ const intrusive_ptr<Expression> &pExpression);
+
+ /*
+ Create a grouping DocumentSource from BSON.
+
+ This is a convenience method that uses the above, and operates on
+ a BSONElement that has been deteremined to be an Object with an
+ element named $group.
+
+ @param pBsonElement the BSONELement that defines the group
+ @param pCtx the expression context
+ @returns the grouping DocumentSource
+ */
+ static intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+
+ /*
+ Create a unifying group that can be used to combine group results
+ from shards.
+
+ @returns the grouping DocumentSource
+ */
+ intrusive_ptr<DocumentSource> createMerger();
+
+ static const char groupName[];
+
+ protected:
+ // virtuals from DocumentSource
+ virtual void sourceToBson(BSONObjBuilder *pBuilder) const;
+
+ private:
+ DocumentSourceGroup(const intrusive_ptr<ExpressionContext> &pCtx);
+
+ /*
+ Before returning anything, this source must fetch everything from
+ the underlying source and group it. populate() is used to do that
+ on the first call to any method on this source. The populated
+ boolean indicates that this has been done.
+ */
+ void populate();
+ bool populated;
+
+ intrusive_ptr<Expression> pIdExpression;
+
+ typedef boost::unordered_map<intrusive_ptr<const Value>,
+ vector<intrusive_ptr<Accumulator> >, Value::Hash> GroupsType;
+ GroupsType groups;
+
+ /*
+ The field names for the result documents and the accumulator
+ factories for the result documents. The Expressions are the
+ common expressions used by each instance of each accumulator
+ in order to find the right-hand side of what gets added to the
+ accumulator. Note that each of those is the same for each group,
+ so we can share them across all groups by adding them to the
+ accumulators after we use the factories to make a new set of
+ accumulators for each new group.
+
+ These three vectors parallel each other.
+ */
+ vector<string> vFieldName;
+ vector<intrusive_ptr<Accumulator> (*)(
+ const intrusive_ptr<ExpressionContext> &)> vpAccumulatorFactory;
+ vector<intrusive_ptr<Expression> > vpExpression;
+
+
+ intrusive_ptr<Document> makeDocument(
+ const GroupsType::iterator &rIter);
+
+ GroupsType::iterator groupsIterator;
+ intrusive_ptr<Document> pCurrent;
+
+ intrusive_ptr<ExpressionContext> pCtx;
+ };
+
+
+ class DocumentSourceMatch :
+ public DocumentSourceFilterBase {
+ public:
+ // virtuals from DocumentSource
+ virtual ~DocumentSourceMatch();
+
+ /*
+ Create a filter.
+
+ @param pBsonElement the raw BSON specification for the filter
+ @returns the filter
+ */
+ static intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ /*
+ Create a BSONObj suitable for Matcher construction.
+
+ This is used after filter analysis has moved as many filters to
+ as early a point as possible in the document processing pipeline.
+ See db/Matcher.h and the associated wiki documentation for the
+ format. This conversion is used to move back to the low-level
+ find() Cursor mechanism.
+
+ @param pBuilder the builder to write to
+ */
+ void toMatcherBson(BSONObjBuilder *pBuilder) const;
+
+ static const char matchName[];
+
+ protected:
+ // virtuals from DocumentSource
+ virtual void sourceToBson(BSONObjBuilder *pBuilder) const;
+
+ // virtuals from DocumentSourceFilterBase
+ virtual bool accept(const intrusive_ptr<Document> &pDocument) const;
+
+ private:
+ DocumentSourceMatch(const BSONObj &query);
+
+ Matcher matcher;
+ };
+
+
+ class DocumentSourceOut :
+ public DocumentSource {
+ public:
+ // virtuals from DocumentSource
+ virtual ~DocumentSourceOut();
+ virtual bool eof();
+ virtual bool advance();
+ virtual intrusive_ptr<Document> getCurrent();
+
+ /*
+ Create a document source for output and pass-through.
+
+ This can be put anywhere in a pipeline and will store content as
+ well as pass it on.
+
+ @returns the newly created document source
+ */
+ static intrusive_ptr<DocumentSourceOut> createFromBson(
+ BSONElement *pBsonElement);
+
+ static const char outName[];
+
+ protected:
+ // virtuals from DocumentSource
+ virtual void sourceToBson(BSONObjBuilder *pBuilder) const;
+
+ private:
+ DocumentSourceOut(BSONElement *pBsonElement);
+ };
+
+
+ class DocumentSourceProject :
+ public DocumentSource,
+ public boost::enable_shared_from_this<DocumentSourceProject> {
+ public:
+ // virtuals from DocumentSource
+ virtual ~DocumentSourceProject();
+ virtual bool eof();
+ virtual bool advance();
+ virtual intrusive_ptr<Document> getCurrent();
+ virtual void optimize();
+
+ /*
+ Create a new DocumentSource that can implement projection.
+
+ @returns the projection DocumentSource
+ */
+ static intrusive_ptr<DocumentSourceProject> create();
+
+ /*
+ Include a field path in a projection.
+
+ @param fieldPath the path of the field to include
+ */
+ void includePath(const string &fieldPath);
+
+ /*
+ Exclude a field path from the projection.
+
+ @param fieldPath the path of the field to exclude
+ */
+ void excludePath(const string &fieldPath);
+
+ /*
+ Add an output Expression in the projection.
+
+ BSON document fields are ordered, so the new field will be
+ appended to the existing set.
+
+ @param fieldName the name of the field as it will appear
+ @param pExpression the expression used to compute the field
+ */
+ void addField(const string &fieldName,
+ const intrusive_ptr<Expression> &pExpression);
+
+ /*
+ Create a new projection DocumentSource from BSON.
+
+ This is a convenience for directly handling BSON, and relies on the
+ above methods.
+
+ @param pBsonElement the BSONElement with an object named $project
+ @returns the created projection
+ */
+ static intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ static const char projectName[];
+
+ protected:
+ // virtuals from DocumentSource
+ virtual void sourceToBson(BSONObjBuilder *pBuilder) const;
+
+ private:
+ DocumentSourceProject();
+
+ // configuration state
+ bool excludeId;
+ intrusive_ptr<ExpressionObject> pEO;
+ };
+
+
+ class DocumentSourceSort :
+ public DocumentSource {
+ public:
+ // virtuals from DocumentSource
+ virtual ~DocumentSourceSort();
+ virtual bool eof();
+ virtual bool advance();
+ virtual intrusive_ptr<Document> getCurrent();
+ /*
+ TODO
+ Adjacent sorts should reduce to the last sort.
+ virtual bool coalesce(const intrusive_ptr<DocumentSource> &pNextSource);
+ */
+
+ /*
+ Create a new sorting DocumentSource.
+
+ @param pCtx the expression context
+ @returns the DocumentSource
+ */
+ static intrusive_ptr<DocumentSourceSort> create(
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ /*
+ Add sort key field.
+
+ Adds a sort key field to the key being built up. A concatenated
+ key is built up by calling this repeatedly.
+
+ @param fieldPath the field path to the key component
+ @param ascending if true, use the key for an ascending sort,
+ otherwise, use it for descending
+ */
+ void addKey(const string &fieldPath, bool ascending);
+
+ /*
+ Write out an object whose contents are the sort key.
+
+ @param pBuilder initialized object builder.
+ @param fieldPrefix specify whether or not to include the field prefix
+ */
+ void sortKeyToBson(BSONObjBuilder *pBuilder, bool usePrefix) const;
+
+ /*
+ Create a sorting DocumentSource from BSON.
+
+ This is a convenience method that uses the above, and operates on
+ a BSONElement that has been deteremined to be an Object with an
+ element named $group.
+
+ @param pBsonElement the BSONELement that defines the group
+ @param pCtx the expression context
+ @returns the grouping DocumentSource
+ */
+ static intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+
+ static const char sortName[];
+
+ protected:
+ // virtuals from DocumentSource
+ virtual void sourceToBson(BSONObjBuilder *pBuilder) const;
+
+ private:
+ DocumentSourceSort(const intrusive_ptr<ExpressionContext> &pCtx);
+
+ /*
+ Before returning anything, this source must fetch everything from
+ the underlying source and group it. populate() is used to do that
+ on the first call to any method on this source. The populated
+ boolean indicates that this has been done.
+ */
+ void populate();
+ bool populated;
+ long long count;
+
+ /* these two parallel each other */
+ vector<intrusive_ptr<ExpressionFieldPath> > vSortKey;
+ vector<bool> vAscending;
+
+ class Carrier {
+ public:
+ /*
+ We need access to the key for compares, so we have to carry
+ this around.
+ */
+ DocumentSourceSort *pSort;
+
+ intrusive_ptr<Document> pDocument;
+
+ Carrier(DocumentSourceSort *pSort,
+ const intrusive_ptr<Document> &pDocument);
+
+ static bool lessThan(const Carrier &rL, const Carrier &rR);
+ };
+
+ /*
+ Compare two documents according to the specified sort key.
+
+ @param rL reference to the left document
+ @param rR reference to the right document
+ @returns a number less than, equal to, or greater than zero,
+ indicating pL < pR, pL == pR, or pL > pR, respectively
+ */
+ int compare(const intrusive_ptr<Document> &pL,
+ const intrusive_ptr<Document> &pR);
+
+ typedef list<Carrier> ListType;
+ ListType documents;
+
+ ListType::iterator listIterator;
+ intrusive_ptr<Document> pCurrent;
+
+ intrusive_ptr<ExpressionContext> pCtx;
+ };
+
+
+ class DocumentSourceLimit :
+ public DocumentSource {
+ public:
+ // virtuals from DocumentSource
+ virtual ~DocumentSourceLimit();
+ virtual bool eof();
+ virtual bool advance();
+ virtual intrusive_ptr<Document> getCurrent();
+
+ /*
+ Create a new limiting DocumentSource.
+
+ @param pCtx the expression context
+ @returns the DocumentSource
+ */
+ static intrusive_ptr<DocumentSourceLimit> create(
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ /*
+ Create a limiting DocumentSource from BSON.
+
+ This is a convenience method that uses the above, and operates on
+ a BSONElement that has been deteremined to be an Object with an
+ element named $limit.
+
+ @param pBsonElement the BSONELement that defines the limit
+ @param pCtx the expression context
+ @returns the grouping DocumentSource
+ */
+ static intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+
+ static const char limitName[];
+
+ protected:
+ // virtuals from DocumentSource
+ virtual void sourceToBson(BSONObjBuilder *pBuilder) const;
+
+ private:
+ DocumentSourceLimit(const intrusive_ptr<ExpressionContext> &pCtx);
+
+ long long limit;
+ long long count;
+ intrusive_ptr<Document> pCurrent;
+
+ intrusive_ptr<ExpressionContext> pCtx;
+ };
+
+ class DocumentSourceSkip :
+ public DocumentSource {
+ public:
+ // virtuals from DocumentSource
+ virtual ~DocumentSourceSkip();
+ virtual bool eof();
+ virtual bool advance();
+ virtual intrusive_ptr<Document> getCurrent();
+
+ /*
+ Create a new skipping DocumentSource.
+
+ @param pCtx the expression context
+ @returns the DocumentSource
+ */
+ static intrusive_ptr<DocumentSourceSkip> create(
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ /*
+ Create a skipping DocumentSource from BSON.
+
+ This is a convenience method that uses the above, and operates on
+ a BSONElement that has been deteremined to be an Object with an
+ element named $skip.
+
+ @param pBsonElement the BSONELement that defines the skip
+ @param pCtx the expression context
+ @returns the grouping DocumentSource
+ */
+ static intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+
+ static const char skipName[];
+
+ protected:
+ // virtuals from DocumentSource
+ virtual void sourceToBson(BSONObjBuilder *pBuilder) const;
+
+ private:
+ DocumentSourceSkip(const intrusive_ptr<ExpressionContext> &pCtx);
+
+ /*
+ Skips initial documents.
+ */
+ void skipper();
+
+ long long skip;
+ long long count;
+ intrusive_ptr<Document> pCurrent;
+
+ intrusive_ptr<ExpressionContext> pCtx;
+ };
+
+
+ class DocumentSourceUnwind :
+ public DocumentSource,
+ public boost::enable_shared_from_this<DocumentSourceUnwind> {
+ public:
+ // virtuals from DocumentSource
+ virtual ~DocumentSourceUnwind();
+ virtual bool eof();
+ virtual bool advance();
+ virtual intrusive_ptr<Document> getCurrent();
+
+ /*
+ Create a new DocumentSource that can implement unwind.
+
+ @returns the projection DocumentSource
+ */
+ static intrusive_ptr<DocumentSourceUnwind> create();
+
+ /*
+ Specify the field to unwind. There must be exactly one before
+ the pipeline begins execution.
+
+ @param rFieldPath - path to the field to unwind
+ */
+ void unwindField(const FieldPath &rFieldPath);
+
+ /*
+ Create a new projection DocumentSource from BSON.
+
+ This is a convenience for directly handling BSON, and relies on the
+ above methods.
+
+ @param pBsonElement the BSONElement with an object named $project
+ @returns the created projection
+ */
+ static intrusive_ptr<DocumentSource> createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx);
+
+ static const char unwindName[];
+
+ protected:
+ // virtuals from DocumentSource
+ virtual void sourceToBson(BSONObjBuilder *pBuilder) const;
+
+ private:
+ DocumentSourceUnwind();
+
+ // configuration state
+ FieldPath unwindPath;
+
+ vector<int> fieldIndex; /* for the current document, the indices
+ leading down to the field being unwound */
+
+ // iteration state
+ intrusive_ptr<Document> pNoUnwindDocument;
+ // document to return, pre-unwind
+ intrusive_ptr<const Value> pUnwindArray; // field being unwound
+ intrusive_ptr<ValueIterator> pUnwinder; // iterator used for unwinding
+ intrusive_ptr<const Value> pUnwindValue; // current value
+
+ /*
+ Clear all the state related to unwinding an array.
+ */
+ void resetArray();
+
+ /*
+ Clone the current document being unwound.
+
+ This is a partial deep clone. Because we're going to replace the
+ value at the end, we have to replace everything along the path
+ leading to that in order to not share that change with any other
+ clones (or the original) that we've made.
+
+ This expects pUnwindValue to have been set by a prior call to
+ advance(). However, pUnwindValue may also be NULL, in which case
+ the field will be removed -- this is the action for an empty
+ array.
+
+ @returns a partial deep clone of pNoUnwindDocument
+ */
+ intrusive_ptr<Document> clonePath() const;
+
+ };
+
+}
+
+
+/* ======================= INLINED IMPLEMENTATIONS ========================== */
+
+namespace mongo {
+
+ inline void DocumentSourceGroup::setIdExpression(
+ const intrusive_ptr<Expression> &pExpression) {
+ pIdExpression = pExpression;
+ }
+
+ inline void DocumentSourceUnwind::resetArray() {
+ pNoUnwindDocument.reset();
+ pUnwindArray.reset();
+ pUnwinder.reset();
+ pUnwindValue.reset();
+ }
+
+ inline DocumentSourceSort::Carrier::Carrier(
+ DocumentSourceSort *pTheSort,
+ const intrusive_ptr<Document> &pTheDocument):
+ pSort(pTheSort),
+ pDocument(pTheDocument) {
+ }
+}
diff --git a/src/mongo/db/pipeline/document_source_bson_array.cpp b/src/mongo/db/pipeline/document_source_bson_array.cpp
new file mode 100755
index 00000000000..5d187b03ef9
--- /dev/null
+++ b/src/mongo/db/pipeline/document_source_bson_array.cpp
@@ -0,0 +1,83 @@
+/**
+ * Copyright 2011 (c) 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "db/pipeline/document_source.h"
+
+#include "db/pipeline/document.h"
+
+namespace mongo {
+
+ DocumentSourceBsonArray::~DocumentSourceBsonArray() {
+ }
+
+ bool DocumentSourceBsonArray::eof() {
+ return !haveCurrent;
+ }
+
+ bool DocumentSourceBsonArray::advance() {
+ if (eof())
+ return false;
+
+ if (!arrayIterator.more()) {
+ haveCurrent = false;
+ return false;
+ }
+
+ currentElement = arrayIterator.next();
+ return true;
+ }
+
+ intrusive_ptr<Document> DocumentSourceBsonArray::getCurrent() {
+ assert(haveCurrent);
+ BSONObj documentObj(currentElement.Obj());
+ intrusive_ptr<Document> pDocument(
+ Document::createFromBsonObj(&documentObj));
+ return pDocument;
+ }
+
+ void DocumentSourceBsonArray::setSource(
+ const intrusive_ptr<DocumentSource> &pSource) {
+ /* this doesn't take a source */
+ assert(false);
+ }
+
+ DocumentSourceBsonArray::DocumentSourceBsonArray(
+ BSONElement *pBsonElement):
+ embeddedObject(pBsonElement->embeddedObject()),
+ arrayIterator(embeddedObject),
+ haveCurrent(false) {
+ if (arrayIterator.more()) {
+ currentElement = arrayIterator.next();
+ haveCurrent = true;
+ }
+ }
+
+ intrusive_ptr<DocumentSourceBsonArray> DocumentSourceBsonArray::create(
+ BSONElement *pBsonElement) {
+
+ assert(pBsonElement->type() == Array);
+ intrusive_ptr<DocumentSourceBsonArray> pSource(
+ new DocumentSourceBsonArray(pBsonElement));
+
+ return pSource;
+ }
+
+ void DocumentSourceBsonArray::sourceToBson(BSONObjBuilder *pBuilder) const {
+ assert(false); // this has no analog in the BSON world
+ }
+}
diff --git a/src/mongo/db/pipeline/document_source_command_futures.cpp b/src/mongo/db/pipeline/document_source_command_futures.cpp
new file mode 100755
index 00000000000..61a257cf16f
--- /dev/null
+++ b/src/mongo/db/pipeline/document_source_command_futures.cpp
@@ -0,0 +1,132 @@
+/**
+ * Copyright 2011 (c) 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "db/pipeline/document_source.h"
+
+namespace mongo {
+
+ DocumentSourceCommandFutures::~DocumentSourceCommandFutures() {
+ }
+
+ bool DocumentSourceCommandFutures::eof() {
+ /* if we haven't even started yet, do so */
+ if (!pCurrent.get())
+ getNextDocument();
+
+ return (pCurrent.get() == NULL);
+ }
+
+ bool DocumentSourceCommandFutures::advance() {
+ if (eof())
+ return false;
+
+ /* advance */
+ getNextDocument();
+
+ return (pCurrent.get() != NULL);
+ }
+
+ intrusive_ptr<Document> DocumentSourceCommandFutures::getCurrent() {
+ assert(!eof());
+ return pCurrent;
+ }
+
+ void DocumentSourceCommandFutures::setSource(
+ const intrusive_ptr<DocumentSource> &pSource) {
+ /* this doesn't take a source */
+ assert(false);
+ }
+
+ void DocumentSourceCommandFutures::sourceToBson(
+ BSONObjBuilder *pBuilder) const {
+ /* this has no BSON equivalent */
+ assert(false);
+ }
+
+ DocumentSourceCommandFutures::DocumentSourceCommandFutures(
+ string &theErrmsg, FuturesList *pList):
+ newSource(false),
+ pBsonSource(),
+ pCurrent(),
+ iterator(pList->begin()),
+ listEnd(pList->end()),
+ errmsg(theErrmsg) {
+ }
+
+ intrusive_ptr<DocumentSourceCommandFutures>
+ DocumentSourceCommandFutures::create(
+ string &errmsg, FuturesList *pList) {
+ intrusive_ptr<DocumentSourceCommandFutures> pSource(
+ new DocumentSourceCommandFutures(errmsg, pList));
+ return pSource;
+ }
+
+ void DocumentSourceCommandFutures::getNextDocument() {
+ while(true) {
+ if (!pBsonSource.get()) {
+ /* if there aren't any more futures, we're done */
+ if (iterator == listEnd) {
+ pCurrent.reset();
+ return;
+ }
+
+ /* grab the next command result */
+ shared_ptr<Future::CommandResult> pResult(*iterator);
+ ++iterator;
+
+ /* try to wait for it */
+ if (!pResult->join()) {
+ error() << "sharded pipeline failed on shard: " <<
+ pResult->getServer() << " error: " <<
+ pResult->result() << endl;
+ errmsg += "-- mongod pipeline failed: ";
+ errmsg += pResult->result().toString();
+
+ /* move on to the next command future */
+ continue;
+ }
+
+ /* grab the result array out of the shard server's response */
+ BSONObj shardResult(pResult->result());
+ BSONObjIterator objIterator(shardResult);
+ while(objIterator.more()) {
+ BSONElement element(objIterator.next());
+ const char *pFieldName = element.fieldName();
+
+ /* find the result array and quit this loop */
+ if (strcmp(pFieldName, "result") == 0) {
+ pBsonSource = DocumentSourceBsonArray::create(&element);
+ newSource = true;
+ break;
+ }
+ }
+ }
+
+ /* if we're done with this shard's results, try the next */
+ if (pBsonSource->eof() ||
+ (!newSource && !pBsonSource->advance())) {
+ pBsonSource.reset();
+ continue;
+ }
+
+ pCurrent = pBsonSource->getCurrent();
+ newSource = false;
+ return;
+ }
+ }
+}
diff --git a/src/mongo/db/pipeline/document_source_filter.cpp b/src/mongo/db/pipeline/document_source_filter.cpp
new file mode 100755
index 00000000000..66e57ba2e93
--- /dev/null
+++ b/src/mongo/db/pipeline/document_source_filter.cpp
@@ -0,0 +1,98 @@
+/**
+* Copyright (C) 2011 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "db/pipeline/document_source.h"
+
+#include "db/jsobj.h"
+#include "db/pipeline/expression.h"
+#include "db/pipeline/value.h"
+
+namespace mongo {
+
+ const char DocumentSourceFilter::filterName[] = "$filter";
+
+ DocumentSourceFilter::~DocumentSourceFilter() {
+ }
+
+ bool DocumentSourceFilter::coalesce(
+ const intrusive_ptr<DocumentSource> &pNextSource) {
+
+ /* we only know how to coalesce other filters */
+ DocumentSourceFilter *pDocFilter =
+ dynamic_cast<DocumentSourceFilter *>(pNextSource.get());
+ if (!pDocFilter)
+ return false;
+
+ /*
+ Two adjacent filters can be combined by creating a conjunction of
+ their predicates.
+ */
+ intrusive_ptr<ExpressionNary> pAnd(ExpressionAnd::create());
+ pAnd->addOperand(pFilter);
+ pAnd->addOperand(pDocFilter->pFilter);
+ pFilter = pAnd;
+
+ return true;
+ }
+
+ void DocumentSourceFilter::optimize() {
+ pFilter = pFilter->optimize();
+ }
+
+ void DocumentSourceFilter::sourceToBson(BSONObjBuilder *pBuilder) const {
+ pFilter->addToBsonObj(pBuilder, filterName, 0);
+ }
+
+ bool DocumentSourceFilter::accept(
+ const intrusive_ptr<Document> &pDocument) const {
+ intrusive_ptr<const Value> pValue(pFilter->evaluate(pDocument));
+ return pValue->coerceToBool();
+ }
+
+ intrusive_ptr<DocumentSource> DocumentSourceFilter::createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ uassert(15946, "a document filter expression must be an object",
+ pBsonElement->type() == Object);
+
+ Expression::ObjectCtx oCtx(0);
+ intrusive_ptr<Expression> pExpression(
+ Expression::parseObject(pBsonElement, &oCtx));
+ intrusive_ptr<DocumentSourceFilter> pFilter(
+ DocumentSourceFilter::create(pExpression));
+
+ return pFilter;
+ }
+
+ intrusive_ptr<DocumentSourceFilter> DocumentSourceFilter::create(
+ const intrusive_ptr<Expression> &pFilter) {
+ intrusive_ptr<DocumentSourceFilter> pSource(
+ new DocumentSourceFilter(pFilter));
+ return pSource;
+ }
+
+ DocumentSourceFilter::DocumentSourceFilter(
+ const intrusive_ptr<Expression> &pTheFilter):
+ DocumentSourceFilterBase(),
+ pFilter(pTheFilter) {
+ }
+
+ void DocumentSourceFilter::toMatcherBson(BSONObjBuilder *pBuilder) const {
+ pFilter->toMatcherBson(pBuilder, 0);
+ }
+}
diff --git a/src/mongo/db/pipeline/document_source_filter_base.cpp b/src/mongo/db/pipeline/document_source_filter_base.cpp
new file mode 100755
index 00000000000..dbda34b7151
--- /dev/null
+++ b/src/mongo/db/pipeline/document_source_filter_base.cpp
@@ -0,0 +1,85 @@
+/**
+* Copyright (C) 2011 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "db/pipeline/document_source.h"
+
+#include "db/jsobj.h"
+#include "db/pipeline/expression.h"
+#include "db/pipeline/value.h"
+
+namespace mongo {
+
+ DocumentSourceFilterBase::~DocumentSourceFilterBase() {
+ }
+
+ void DocumentSourceFilterBase::findNext() {
+ /* only do this the first time */
+ if (unstarted) {
+ hasNext = !pSource->eof();
+ unstarted = false;
+ }
+
+ while(hasNext) {
+ boost::intrusive_ptr<Document> pDocument(pSource->getCurrent());
+ hasNext = pSource->advance();
+
+ if (accept(pDocument)) {
+ pCurrent = pDocument;
+ return;
+ }
+ }
+
+ pCurrent.reset();
+ }
+
+ bool DocumentSourceFilterBase::eof() {
+ if (unstarted)
+ findNext();
+
+ return (pCurrent.get() == NULL);
+ }
+
+ bool DocumentSourceFilterBase::advance() {
+ if (unstarted)
+ findNext();
+
+ /*
+ This looks weird after the above, but is correct. Note that calling
+ getCurrent() when first starting already yields the first document
+ in the collection. Calling advance() without using getCurrent()
+ first will skip over the first item.
+ */
+ findNext();
+
+ return (pCurrent.get() != NULL);
+ }
+
+ boost::intrusive_ptr<Document> DocumentSourceFilterBase::getCurrent() {
+ if (unstarted)
+ findNext();
+
+ assert(pCurrent.get() != NULL);
+ return pCurrent;
+ }
+
+ DocumentSourceFilterBase::DocumentSourceFilterBase():
+ unstarted(true),
+ hasNext(false),
+ pCurrent() {
+ }
+}
diff --git a/src/mongo/db/pipeline/document_source_group.cpp b/src/mongo/db/pipeline/document_source_group.cpp
new file mode 100755
index 00000000000..244561589da
--- /dev/null
+++ b/src/mongo/db/pipeline/document_source_group.cpp
@@ -0,0 +1,391 @@
+/**
+* Copyright (C) 2011 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "db/pipeline/document_source.h"
+
+#include "db/jsobj.h"
+#include "db/pipeline/accumulator.h"
+#include "db/pipeline/document.h"
+#include "db/pipeline/expression.h"
+#include "db/pipeline/expression_context.h"
+#include "db/pipeline/value.h"
+
+namespace mongo {
+ const char DocumentSourceGroup::groupName[] = "$group";
+
+ DocumentSourceGroup::~DocumentSourceGroup() {
+ }
+
+ bool DocumentSourceGroup::eof() {
+ if (!populated)
+ populate();
+
+ return (groupsIterator == groups.end());
+ }
+
+ bool DocumentSourceGroup::advance() {
+ if (!populated)
+ populate();
+
+ assert(groupsIterator != groups.end());
+
+ ++groupsIterator;
+ if (groupsIterator == groups.end()) {
+ pCurrent.reset();
+ return false;
+ }
+
+ pCurrent = makeDocument(groupsIterator);
+ return true;
+ }
+
+ intrusive_ptr<Document> DocumentSourceGroup::getCurrent() {
+ if (!populated)
+ populate();
+
+ return pCurrent;
+ }
+
+ void DocumentSourceGroup::sourceToBson(BSONObjBuilder *pBuilder) const {
+ BSONObjBuilder insides;
+
+ /* add the _id */
+ pIdExpression->addToBsonObj(&insides, Document::idName.c_str(), 0);
+
+ /* add the remaining fields */
+ const size_t n = vFieldName.size();
+ for(size_t i = 0; i < n; ++i) {
+ intrusive_ptr<Accumulator> pA((*vpAccumulatorFactory[i])(pCtx));
+ pA->addOperand(vpExpression[i]);
+ pA->addToBsonObj(&insides, vFieldName[i], 0);
+ }
+
+ pBuilder->append(groupName, insides.done());
+ }
+
+ intrusive_ptr<DocumentSourceGroup> DocumentSourceGroup::create(
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ intrusive_ptr<DocumentSourceGroup> pSource(
+ new DocumentSourceGroup(pCtx));
+ return pSource;
+ }
+
+ DocumentSourceGroup::DocumentSourceGroup(
+ const intrusive_ptr<ExpressionContext> &pTheCtx):
+ populated(false),
+ pIdExpression(),
+ groups(),
+ vFieldName(),
+ vpAccumulatorFactory(),
+ vpExpression(),
+ pCtx(pTheCtx) {
+ }
+
+ void DocumentSourceGroup::addAccumulator(
+ string fieldName,
+ intrusive_ptr<Accumulator> (*pAccumulatorFactory)(
+ const intrusive_ptr<ExpressionContext> &),
+ const intrusive_ptr<Expression> &pExpression) {
+ vFieldName.push_back(fieldName);
+ vpAccumulatorFactory.push_back(pAccumulatorFactory);
+ vpExpression.push_back(pExpression);
+ }
+
+
+ struct GroupOpDesc {
+ const char *pName;
+ intrusive_ptr<Accumulator> (*pFactory)(
+ const intrusive_ptr<ExpressionContext> &);
+ };
+
+ static int GroupOpDescCmp(const void *pL, const void *pR) {
+ return strcmp(((const GroupOpDesc *)pL)->pName,
+ ((const GroupOpDesc *)pR)->pName);
+ }
+
+ /*
+ Keep these sorted alphabetically so we can bsearch() them using
+ GroupOpDescCmp() above.
+ */
+ static const GroupOpDesc GroupOpTable[] = {
+ {"$addToSet", AccumulatorAddToSet::create},
+ {"$avg", AccumulatorAvg::create},
+ {"$first", AccumulatorFirst::create},
+ {"$last", AccumulatorLast::create},
+ {"$max", AccumulatorMinMax::createMax},
+ {"$min", AccumulatorMinMax::createMin},
+ {"$push", AccumulatorPush::create},
+ {"$sum", AccumulatorSum::create},
+ };
+
+ static const size_t NGroupOp = sizeof(GroupOpTable)/sizeof(GroupOpTable[0]);
+
+ intrusive_ptr<DocumentSource> DocumentSourceGroup::createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ uassert(15947, "a group's fields must be specified in an object",
+ pBsonElement->type() == Object);
+
+ intrusive_ptr<DocumentSourceGroup> pGroup(
+ DocumentSourceGroup::create(pCtx));
+ bool idSet = false;
+
+ BSONObj groupObj(pBsonElement->Obj());
+ BSONObjIterator groupIterator(groupObj);
+ while(groupIterator.more()) {
+ BSONElement groupField(groupIterator.next());
+ const char *pFieldName = groupField.fieldName();
+
+ if (strcmp(pFieldName, Document::idName.c_str()) == 0) {
+ uassert(15948, "a group's _id may only be specified once",
+ !idSet);
+
+ BSONType groupType = groupField.type();
+
+ if (groupType == Object) {
+ /*
+ Use the projection-like set of field paths to create the
+ group-by key.
+ */
+ Expression::ObjectCtx oCtx(
+ Expression::ObjectCtx::DOCUMENT_OK);
+ intrusive_ptr<Expression> pId(
+ Expression::parseObject(&groupField, &oCtx));
+
+ pGroup->setIdExpression(pId);
+ idSet = true;
+ }
+ else if (groupType == String) {
+ string groupString(groupField.String());
+ const char *pGroupString = groupString.c_str();
+ if ((groupString.length() == 0) ||
+ (pGroupString[0] != '$'))
+ goto StringConstantId;
+
+ string pathString(
+ Expression::removeFieldPrefix(groupString));
+ intrusive_ptr<ExpressionFieldPath> pFieldPath(
+ ExpressionFieldPath::create(pathString));
+ pGroup->setIdExpression(pFieldPath);
+ idSet = true;
+ }
+ else {
+ /* pick out the constant types that are allowed */
+ switch(groupType) {
+ case NumberDouble:
+ case String:
+ case Object:
+ case Array:
+ case jstOID:
+ case Bool:
+ case Date:
+ case NumberInt:
+ case Timestamp:
+ case NumberLong:
+ case jstNULL:
+ StringConstantId: // from string case above
+ {
+ intrusive_ptr<const Value> pValue(
+ Value::createFromBsonElement(&groupField));
+ intrusive_ptr<ExpressionConstant> pConstant(
+ ExpressionConstant::create(pValue));
+ pGroup->setIdExpression(pConstant);
+ idSet = true;
+ break;
+ }
+
+ default:
+ uassert(15949, str::stream() <<
+ "a group's _id may not include fields of BSON type " << groupType,
+ false);
+ }
+ }
+ }
+ else {
+ /*
+ Treat as a projection field with the additional ability to
+ add aggregation operators.
+ */
+ uassert(15950, str::stream() <<
+ "the group aggregate field name " <<
+ *pFieldName << " cannot be an operator name",
+ *pFieldName != '$');
+
+ uassert(15951, str::stream() <<
+ "the group aggregate field " << *pFieldName <<
+ "must be defined as an expression inside an object",
+ groupField.type() == Object);
+
+ BSONObj subField(groupField.Obj());
+ BSONObjIterator subIterator(subField);
+ size_t subCount = 0;
+ for(; subIterator.more(); ++subCount) {
+ BSONElement subElement(subIterator.next());
+
+ /* look for the specified operator */
+ GroupOpDesc key;
+ key.pName = subElement.fieldName();
+ const GroupOpDesc *pOp =
+ (const GroupOpDesc *)bsearch(
+ &key, GroupOpTable, NGroupOp, sizeof(GroupOpDesc),
+ GroupOpDescCmp);
+
+ uassert(15952, str::stream() <<
+ "unknown group operator \"" <<
+ key.pName << "\"",
+ pOp);
+
+ intrusive_ptr<Expression> pGroupExpr;
+
+ BSONType elementType = subElement.type();
+ if (elementType == Object) {
+ Expression::ObjectCtx oCtx(
+ Expression::ObjectCtx::DOCUMENT_OK);
+ pGroupExpr = Expression::parseObject(
+ &subElement, &oCtx);
+ }
+ else if (elementType == Array) {
+ uassert(15953, str::stream() <<
+ "aggregating group operators are unary (" <<
+ key.pName << ")", false);
+ }
+ else { /* assume its an atomic single operand */
+ pGroupExpr = Expression::parseOperand(&subElement);
+ }
+
+ pGroup->addAccumulator(
+ pFieldName, pOp->pFactory, pGroupExpr);
+ }
+
+ uassert(15954, str::stream() <<
+ "the computed aggregate \"" <<
+ pFieldName << "\" must specify exactly one operator",
+ subCount == 1);
+ }
+ }
+
+ uassert(15955, "a group specification must include an _id", idSet);
+
+ return pGroup;
+ }
+
+ void DocumentSourceGroup::populate() {
+ for(bool hasNext = !pSource->eof(); hasNext;
+ hasNext = pSource->advance()) {
+ intrusive_ptr<Document> pDocument(pSource->getCurrent());
+
+ /* get the _id document */
+ intrusive_ptr<const Value> pId(pIdExpression->evaluate(pDocument));
+ uassert(15956, "the _id field for a group must not be undefined",
+ pId->getType() != Undefined);
+
+ /*
+ Look for the _id value in the map; if it's not there, add a
+ new entry with a blank accumulator.
+ */
+ vector<intrusive_ptr<Accumulator> > *pGroup;
+ GroupsType::iterator it(groups.find(pId));
+ if (it != groups.end()) {
+ /* point at the existing accumulators */
+ pGroup = &it->second;
+ }
+ else {
+ /* insert a new group into the map */
+ groups.insert(it,
+ pair<intrusive_ptr<const Value>,
+ vector<intrusive_ptr<Accumulator> > >(
+ pId, vector<intrusive_ptr<Accumulator> >()));
+
+ /* find the accumulator vector (the map value) */
+ it = groups.find(pId);
+ pGroup = &it->second;
+
+ /* add the accumulators */
+ const size_t n = vpAccumulatorFactory.size();
+ pGroup->reserve(n);
+ for(size_t i = 0; i < n; ++i) {
+ intrusive_ptr<Accumulator> pAccumulator(
+ (*vpAccumulatorFactory[i])(pCtx));
+ pAccumulator->addOperand(vpExpression[i]);
+ pGroup->push_back(pAccumulator);
+ }
+ }
+
+ /* point at the existing key */
+ // unneeded atm // pId = it.first;
+
+ /* tickle all the accumulators for the group we found */
+ const size_t n = pGroup->size();
+ for(size_t i = 0; i < n; ++i)
+ (*pGroup)[i]->evaluate(pDocument);
+ }
+
+ /* start the group iterator */
+ groupsIterator = groups.begin();
+ if (groupsIterator != groups.end())
+ pCurrent = makeDocument(groupsIterator);
+ populated = true;
+ }
+
+ intrusive_ptr<Document> DocumentSourceGroup::makeDocument(
+ const GroupsType::iterator &rIter) {
+ vector<intrusive_ptr<Accumulator> > *pGroup = &rIter->second;
+ const size_t n = vFieldName.size();
+ intrusive_ptr<Document> pResult(Document::create(1 + n));
+
+ /* add the _id field */
+ pResult->addField(Document::idName, rIter->first);
+
+ /* add the rest of the fields */
+ for(size_t i = 0; i < n; ++i) {
+ intrusive_ptr<const Value> pValue((*pGroup)[i]->getValue());
+ if (pValue->getType() != Undefined)
+ pResult->addField(vFieldName[i], pValue);
+ }
+
+ return pResult;
+ }
+
+ intrusive_ptr<DocumentSource> DocumentSourceGroup::createMerger() {
+ intrusive_ptr<DocumentSourceGroup> pMerger(
+ DocumentSourceGroup::create(pCtx));
+
+ /* the merger will use the same grouping key */
+ pMerger->setIdExpression(ExpressionFieldPath::create(
+ Document::idName.c_str()));
+
+ const size_t n = vFieldName.size();
+ for(size_t i = 0; i < n; ++i) {
+ /*
+ The merger's output field names will be the same, as will the
+ accumulator factories. However, for some accumulators, the
+ expression to be accumulated will be different. The original
+ accumulator may be collecting an expression based on a field
+ expression or constant. Here, we accumulate the output of the
+ same name from the prior group.
+ */
+ pMerger->addAccumulator(
+ vFieldName[i], vpAccumulatorFactory[i],
+ ExpressionFieldPath::create(vFieldName[i]));
+ }
+
+ return pMerger;
+ }
+}
+
+
diff --git a/src/mongo/db/pipeline/document_source_limit.cpp b/src/mongo/db/pipeline/document_source_limit.cpp
new file mode 100644
index 00000000000..a73d4da2005
--- /dev/null
+++ b/src/mongo/db/pipeline/document_source_limit.cpp
@@ -0,0 +1,83 @@
+/**
+* Copyright (C) 2011 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "db/pipeline/document_source.h"
+
+#include "db/jsobj.h"
+#include "db/pipeline/document.h"
+#include "db/pipeline/expression.h"
+#include "db/pipeline/expression_context.h"
+#include "db/pipeline/value.h"
+
+namespace mongo {
+ const char DocumentSourceLimit::limitName[] = "$limit";
+
+ DocumentSourceLimit::DocumentSourceLimit(const intrusive_ptr<ExpressionContext> &pTheCtx):
+ limit(0),
+ count(0),
+ pCtx(pTheCtx) {
+ }
+
+ DocumentSourceLimit::~DocumentSourceLimit() {
+ }
+
+ bool DocumentSourceLimit::eof() {
+ return pSource->eof() || count >= limit;
+ }
+
+ bool DocumentSourceLimit::advance() {
+ ++count;
+ if (count >= limit) {
+ pCurrent.reset();
+ return false;
+ }
+ pCurrent = pSource->getCurrent();
+ return pSource->advance();
+ }
+
+ intrusive_ptr<Document> DocumentSourceLimit::getCurrent() {
+ return pSource->getCurrent();
+ }
+
+ void DocumentSourceLimit::sourceToBson(BSONObjBuilder *pBuilder) const {
+ pBuilder->append("$limit", limit);
+ }
+
+ intrusive_ptr<DocumentSourceLimit> DocumentSourceLimit::create(
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ intrusive_ptr<DocumentSourceLimit> pSource(
+ new DocumentSourceLimit(pCtx));
+ return pSource;
+ }
+
+ intrusive_ptr<DocumentSource> DocumentSourceLimit::createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ uassert(15957, "the limit must be specified as a number",
+ pBsonElement->isNumber());
+
+ intrusive_ptr<DocumentSourceLimit> pLimit(
+ DocumentSourceLimit::create(pCtx));
+
+ pLimit->limit = (int)pBsonElement->numberLong();
+ uassert(15958, "the limit must be positive",
+ pLimit->limit > 0);
+
+ return pLimit;
+ }
+}
diff --git a/src/mongo/db/pipeline/document_source_match.cpp b/src/mongo/db/pipeline/document_source_match.cpp
new file mode 100755
index 00000000000..bedac3ef717
--- /dev/null
+++ b/src/mongo/db/pipeline/document_source_match.cpp
@@ -0,0 +1,80 @@
+/**
+* Copyright (C) 2011 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "db/pipeline/document_source.h"
+
+#include "db/jsobj.h"
+#include "db/matcher.h"
+#include "db/pipeline/document.h"
+#include "db/pipeline/expression.h"
+
+namespace mongo {
+
+ const char DocumentSourceMatch::matchName[] = "$match";
+
+ DocumentSourceMatch::~DocumentSourceMatch() {
+ }
+
+ void DocumentSourceMatch::sourceToBson(BSONObjBuilder *pBuilder) const {
+ const BSONObj *pQuery = matcher.getQuery();
+ pBuilder->append(matchName, *pQuery);
+ }
+
+ bool DocumentSourceMatch::accept(
+ const intrusive_ptr<Document> &pDocument) const {
+
+ /*
+ The matcher only takes BSON documents, so we have to make one.
+
+ LATER
+ We could optimize this by making a document with only the
+ fields referenced by the Matcher. We could do this by looking inside
+ the Matcher's BSON before it is created, and recording those. The
+ easiest implementation might be to hold onto an ExpressionDocument
+ in here, and give that pDocument to create the created subset of
+ fields, and then convert that instead.
+ */
+ BSONObjBuilder objBuilder;
+ pDocument->toBson(&objBuilder);
+ BSONObj obj(objBuilder.done());
+
+ return matcher.matches(obj);
+ }
+
+ intrusive_ptr<DocumentSource> DocumentSourceMatch::createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ uassert(15959, "the match filter must be an expression in an object",
+ pBsonElement->type() == Object);
+
+ intrusive_ptr<DocumentSourceMatch> pMatcher(
+ new DocumentSourceMatch(pBsonElement->Obj()));
+
+ return pMatcher;
+ }
+
+ void DocumentSourceMatch::toMatcherBson(BSONObjBuilder *pBuilder) const {
+ const BSONObj *pQuery = matcher.getQuery();
+ pBuilder->appendElements(*pQuery);
+ }
+
+ DocumentSourceMatch::DocumentSourceMatch(const BSONObj &query):
+ DocumentSourceFilterBase(),
+ matcher(query) {
+ }
+}
diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp
new file mode 100755
index 00000000000..5a30342d25c
--- /dev/null
+++ b/src/mongo/db/pipeline/document_source_out.cpp
@@ -0,0 +1,56 @@
+/**
+ * Copyright 2011 (c) 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "db/pipeline/document_source.h"
+
+
+namespace mongo {
+
+ const char DocumentSourceOut::outName[] = "$out";
+
+ DocumentSourceOut::~DocumentSourceOut() {
+ }
+
+ bool DocumentSourceOut::eof() {
+ return pSource->eof();
+ }
+
+ bool DocumentSourceOut::advance() {
+ return pSource->advance();
+ }
+
+ boost::intrusive_ptr<Document> DocumentSourceOut::getCurrent() {
+ return pSource->getCurrent();
+ }
+
+ DocumentSourceOut::DocumentSourceOut(BSONElement *pBsonElement) {
+ assert(false && "unimplemented");
+ }
+
+ intrusive_ptr<DocumentSourceOut> DocumentSourceOut::createFromBson(
+ BSONElement *pBsonElement) {
+ intrusive_ptr<DocumentSourceOut> pSource(
+ new DocumentSourceOut(pBsonElement));
+
+ return pSource;
+ }
+
+ void DocumentSourceOut::sourceToBson(BSONObjBuilder *pBuilder) const {
+ assert(false); // CW TODO
+ }
+}
diff --git a/src/mongo/db/pipeline/document_source_project.cpp b/src/mongo/db/pipeline/document_source_project.cpp
new file mode 100755
index 00000000000..bb7a0b5a6d9
--- /dev/null
+++ b/src/mongo/db/pipeline/document_source_project.cpp
@@ -0,0 +1,201 @@
+/**
+ * Copyright 2011 (c) 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "db/pipeline/document_source.h"
+
+#include "db/jsobj.h"
+#include "db/pipeline/document.h"
+#include "db/pipeline/expression.h"
+#include "db/pipeline/value.h"
+
+namespace mongo {
+
+ const char DocumentSourceProject::projectName[] = "$project";
+
+ DocumentSourceProject::~DocumentSourceProject() {
+ }
+
+ DocumentSourceProject::DocumentSourceProject():
+ excludeId(false),
+ pEO(ExpressionObject::create()) {
+ }
+
+ bool DocumentSourceProject::eof() {
+ return pSource->eof();
+ }
+
+ bool DocumentSourceProject::advance() {
+ return pSource->advance();
+ }
+
+ intrusive_ptr<Document> DocumentSourceProject::getCurrent() {
+ intrusive_ptr<Document> pInDocument(pSource->getCurrent());
+
+ /* create the result document */
+ const size_t sizeHint =
+ pEO->getSizeHint(pInDocument) + (excludeId ? 0 : 1);
+ intrusive_ptr<Document> pResultDocument(Document::create(sizeHint));
+
+ if (!excludeId) {
+ intrusive_ptr<const Value> pId(
+ pInDocument->getField(Document::idName));
+ pResultDocument->addField(Document::idName, pId);
+ }
+
+ /* use the ExpressionObject to create the base result */
+ pEO->addToDocument(pResultDocument, pInDocument);
+
+ return pResultDocument;
+ }
+
+ void DocumentSourceProject::optimize() {
+ intrusive_ptr<Expression> pE(pEO->optimize());
+ pEO = dynamic_pointer_cast<ExpressionObject>(pE);
+ }
+
+ void DocumentSourceProject::sourceToBson(BSONObjBuilder *pBuilder) const {
+ BSONObjBuilder insides;
+ if (excludeId)
+ insides.append(Document::idName, false);
+ pEO->documentToBson(&insides, 0);
+ pBuilder->append(projectName, insides.done());
+ }
+
+ intrusive_ptr<DocumentSourceProject> DocumentSourceProject::create() {
+ intrusive_ptr<DocumentSourceProject> pSource(
+ new DocumentSourceProject());
+ return pSource;
+ }
+
+ void DocumentSourceProject::addField(
+ const string &fieldName, const intrusive_ptr<Expression> &pExpression) {
+ uassert(15960,
+ "projection fields must be defined by non-empty expressions",
+ pExpression);
+
+ pEO->addField(fieldName, pExpression);
+ }
+
+ void DocumentSourceProject::includePath(const string &fieldPath) {
+ if (Document::idName.compare(fieldPath) == 0) {
+ uassert(15961, str::stream() << projectName <<
+ ": _id cannot be included once it has been excluded",
+ !excludeId);
+
+ return;
+ }
+
+ pEO->includePath(fieldPath);
+ }
+
+ void DocumentSourceProject::excludePath(const string &fieldPath) {
+ if (Document::idName.compare(fieldPath) == 0) {
+ excludeId = true;
+ return;
+ }
+
+ pEO->excludePath(fieldPath);
+ }
+
+ intrusive_ptr<DocumentSource> DocumentSourceProject::createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ /* validate */
+ uassert(15969, str::stream() << projectName <<
+ " specification must be an object",
+ pBsonElement->type() == Object);
+
+ /* chain the projection onto the original source */
+ intrusive_ptr<DocumentSourceProject> pProject(
+ DocumentSourceProject::create());
+
+ /*
+ Pull out the $project object. This should just be a list of
+ field inclusion or exclusion specifications. Note you can't do
+ both, except for the case of _id.
+ */
+ BSONObj projectObj(pBsonElement->Obj());
+ BSONObjIterator fieldIterator(projectObj);
+ Expression::ObjectCtx objectCtx(
+ Expression::ObjectCtx::DOCUMENT_OK);
+ while(fieldIterator.more()) {
+ BSONElement outFieldElement(fieldIterator.next());
+ string outFieldPath(outFieldElement.fieldName());
+ string inFieldName(outFieldPath);
+ BSONType specType = outFieldElement.type();
+ int fieldInclusion = -1;
+
+ switch(specType) {
+ case NumberDouble: {
+ double inclusion = outFieldElement.numberDouble();
+ fieldInclusion = static_cast<int>(inclusion);
+ goto IncludeExclude;
+ }
+
+ case NumberInt:
+ /* just a plain integer include/exclude specification */
+ fieldInclusion = outFieldElement.numberInt();
+
+IncludeExclude:
+ uassert(15970, str::stream() <<
+ "field inclusion or exclusion specification for \"" <<
+ outFieldPath <<
+ "\" must be true, 1, false, or zero",
+ ((fieldInclusion == 0) || (fieldInclusion == 1)));
+
+ if (fieldInclusion == 0)
+ pProject->excludePath(outFieldPath);
+ else
+ pProject->includePath(outFieldPath);
+ break;
+
+ case Bool:
+ /* just a plain boolean include/exclude specification */
+ fieldInclusion = (outFieldElement.Bool() ? 1 : 0);
+ goto IncludeExclude;
+
+ case String:
+ /* include a field, with rename */
+ fieldInclusion = 1;
+ inFieldName = outFieldElement.String();
+ pProject->addField(
+ outFieldPath,
+ ExpressionFieldPath::create(
+ Expression::removeFieldPrefix(inFieldName)));
+ break;
+
+ case Object: {
+ intrusive_ptr<Expression> pDocument(
+ Expression::parseObject(&outFieldElement, &objectCtx));
+
+ /* add The document expression to the projection */
+ pProject->addField(outFieldPath, pDocument);
+ break;
+ }
+
+ default:
+ uassert(15971, str::stream() <<
+ "invalid BSON type (" << specType <<
+ ") for " << projectName <<
+ " field " << outFieldPath, false);
+ }
+
+ }
+
+ return pProject;
+ }
+}
diff --git a/src/mongo/db/pipeline/document_source_skip.cpp b/src/mongo/db/pipeline/document_source_skip.cpp
new file mode 100644
index 00000000000..74bf2360ce9
--- /dev/null
+++ b/src/mongo/db/pipeline/document_source_skip.cpp
@@ -0,0 +1,99 @@
+/**
+* Copyright (C) 2011 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "db/pipeline/document_source.h"
+
+#include "db/jsobj.h"
+#include "db/pipeline/document.h"
+#include "db/pipeline/expression.h"
+#include "db/pipeline/expression_context.h"
+#include "db/pipeline/value.h"
+
+namespace mongo {
+ const char DocumentSourceSkip::skipName[] = "$skip";
+
+ DocumentSourceSkip::DocumentSourceSkip(const intrusive_ptr<ExpressionContext> &pTheCtx):
+ skip(0),
+ count(0),
+ pCtx(pTheCtx) {
+ }
+
+ DocumentSourceSkip::~DocumentSourceSkip() {
+ }
+
+ void DocumentSourceSkip::skipper() {
+ if (count == 0) {
+ while (!pSource->eof() && count++ < skip) {
+ pSource->advance();
+ }
+ }
+
+ if (pSource->eof()) {
+ pCurrent.reset();
+ return;
+ }
+
+ pCurrent = pSource->getCurrent();
+ }
+
+ bool DocumentSourceSkip::eof() {
+ skipper();
+ return pSource->eof();
+ }
+
+ bool DocumentSourceSkip::advance() {
+ if (eof()) {
+ pCurrent.reset();
+ return false;
+ }
+
+ pCurrent = pSource->getCurrent();
+ return pSource->advance();
+ }
+
+ intrusive_ptr<Document> DocumentSourceSkip::getCurrent() {
+ skipper();
+ return pCurrent;
+ }
+
+ void DocumentSourceSkip::sourceToBson(BSONObjBuilder *pBuilder) const {
+ pBuilder->append("$skip", skip);
+ }
+
+ intrusive_ptr<DocumentSourceSkip> DocumentSourceSkip::create(
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ intrusive_ptr<DocumentSourceSkip> pSource(
+ new DocumentSourceSkip(pCtx));
+ return pSource;
+ }
+
+ intrusive_ptr<DocumentSource> DocumentSourceSkip::createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ uassert(15972, str::stream() << "the value to " <<
+ skipName << " must be a number", pBsonElement->isNumber());
+
+ intrusive_ptr<DocumentSourceSkip> pSkip(
+ DocumentSourceSkip::create(pCtx));
+
+ pSkip->skip = (int)pBsonElement->numberLong();
+ assert(pSkip->skip > 0); // CW TODO error code
+
+ return pSkip;
+ }
+}
diff --git a/src/mongo/db/pipeline/document_source_sort.cpp b/src/mongo/db/pipeline/document_source_sort.cpp
new file mode 100755
index 00000000000..bf4739af7d1
--- /dev/null
+++ b/src/mongo/db/pipeline/document_source_sort.cpp
@@ -0,0 +1,216 @@
+/**
+* Copyright (C) 2011 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "db/pipeline/document_source.h"
+
+#include "db/jsobj.h"
+#include "db/pipeline/doc_mem_monitor.h"
+#include "db/pipeline/document.h"
+#include "db/pipeline/expression.h"
+#include "db/pipeline/expression_context.h"
+#include "db/pipeline/value.h"
+
+
+namespace mongo {
+ const char DocumentSourceSort::sortName[] = "$sort";
+
+ DocumentSourceSort::~DocumentSourceSort() {
+ }
+
+ bool DocumentSourceSort::eof() {
+ if (!populated)
+ populate();
+
+ return (listIterator == documents.end());
+ }
+
+ bool DocumentSourceSort::advance() {
+ if (!populated)
+ populate();
+
+ assert(listIterator != documents.end());
+
+ ++listIterator;
+ if (listIterator == documents.end()) {
+ pCurrent.reset();
+ count = 0;
+ return false;
+ }
+ pCurrent = listIterator->pDocument;
+
+ return true;
+ }
+
+ intrusive_ptr<Document> DocumentSourceSort::getCurrent() {
+ if (!populated)
+ populate();
+
+ return pCurrent;
+ }
+
+ void DocumentSourceSort::sourceToBson(BSONObjBuilder *pBuilder) const {
+ BSONObjBuilder insides;
+ sortKeyToBson(&insides, false);
+ pBuilder->append(sortName, insides.done());
+ }
+
+ intrusive_ptr<DocumentSourceSort> DocumentSourceSort::create(
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ intrusive_ptr<DocumentSourceSort> pSource(
+ new DocumentSourceSort(pCtx));
+ return pSource;
+ }
+
+ DocumentSourceSort::DocumentSourceSort(
+ const intrusive_ptr<ExpressionContext> &pTheCtx):
+ populated(false),
+ pCtx(pTheCtx) {
+ }
+
+ void DocumentSourceSort::addKey(const string &fieldPath, bool ascending) {
+ intrusive_ptr<ExpressionFieldPath> pE(
+ ExpressionFieldPath::create(fieldPath));
+ vSortKey.push_back(pE);
+ vAscending.push_back(ascending);
+ }
+
+ void DocumentSourceSort::sortKeyToBson(
+ BSONObjBuilder *pBuilder, bool usePrefix) const {
+ /* add the key fields */
+ const size_t n = vSortKey.size();
+ for(size_t i = 0; i < n; ++i) {
+ /* create the "field name" */
+ stringstream ss;
+ vSortKey[i]->writeFieldPath(ss, usePrefix);
+
+ /* append a named integer based on the sort order */
+ pBuilder->append(ss.str(), (vAscending[i] ? 1 : -1));
+ }
+ }
+
+ intrusive_ptr<DocumentSource> DocumentSourceSort::createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ uassert(15973, str::stream() << " the " <<
+ sortName << " key specification must be an object",
+ pBsonElement->type() == Object);
+
+ intrusive_ptr<DocumentSourceSort> pSort(
+ DocumentSourceSort::create(pCtx));
+
+ /* check for then iterate over the sort object */
+ size_t sortKeys = 0;
+ for(BSONObjIterator keyIterator(pBsonElement->Obj().begin());
+ keyIterator.more();) {
+ BSONElement keyField(keyIterator.next());
+ const char *pKeyFieldName = keyField.fieldName();
+ int sortOrder = 0;
+
+ uassert(15974, str::stream() << sortName <<
+ " key ordering must be specified using a number",
+ keyField.isNumber());
+ sortOrder = (int)keyField.numberInt();
+
+ uassert(15975, str::stream() << sortName <<
+ " key ordering must be 1 (for ascending) or -1 (for descending",
+ ((sortOrder == 1) || (sortOrder == -1)));
+
+ pSort->addKey(pKeyFieldName, (sortOrder > 0));
+ ++sortKeys;
+ }
+
+ uassert(15976, str::stream() << sortName <<
+ " must have at least one sort key", (sortKeys > 0));
+
+ return pSort;
+ }
+
+ void DocumentSourceSort::populate() {
+ /* make sure we've got a sort key */
+ assert(vSortKey.size());
+
+ /* track and warn about how much physical memory has been used */
+ DocMemMonitor dmm(this);
+
+ /* pull everything from the underlying source */
+ for(bool hasNext = !pSource->eof(); hasNext;
+ hasNext = pSource->advance()) {
+ intrusive_ptr<Document> pDocument(pSource->getCurrent());
+ documents.push_back(Carrier(this, pDocument));
+
+ dmm.addToTotal(pDocument->getApproximateSize());
+ }
+
+ /* sort the list */
+ documents.sort(Carrier::lessThan);
+
+ /* start the sort iterator */
+ listIterator = documents.begin();
+
+ if (listIterator != documents.end())
+ pCurrent = listIterator->pDocument;
+ populated = true;
+ }
+
+ int DocumentSourceSort::compare(
+ const intrusive_ptr<Document> &pL, const intrusive_ptr<Document> &pR) {
+
+ /*
+ populate() already checked that there is a non-empty sort key,
+ so we shouldn't have to worry about that here.
+
+ However, the tricky part is what to do is none of the sort keys are
+ present. In this case, consider the document less.
+ */
+ const size_t n = vSortKey.size();
+ for(size_t i = 0; i < n; ++i) {
+ /* evaluate the sort keys */
+ ExpressionFieldPath *pE = vSortKey[i].get();
+ intrusive_ptr<const Value> pLeft(pE->evaluate(pL));
+ intrusive_ptr<const Value> pRight(pE->evaluate(pR));
+
+ /*
+ Compare the two values; if they differ, return. If they are
+ the same, move on to the next key.
+ */
+ int cmp = Value::compare(pLeft, pRight);
+ if (cmp) {
+ /* if necessary, adjust the return value by the key ordering */
+ if (!vAscending[i])
+ cmp = -cmp;
+
+ return cmp;
+ }
+ }
+
+ /*
+ If we got here, everything matched (or didn't exist), so we'll
+ consider the documents equal for purposes of this sort.
+ */
+ return 0;
+ }
+
+ bool DocumentSourceSort::Carrier::lessThan(
+ const Carrier &rL, const Carrier &rR) {
+ /* make sure these aren't from different lists */
+ assert(rL.pSort == rR.pSort);
+
+ /* compare the documents according to the sort key */
+ return (rL.pSort->compare(rL.pDocument, rR.pDocument) < 0);
+ }
+}
diff --git a/src/mongo/db/pipeline/document_source_unwind.cpp b/src/mongo/db/pipeline/document_source_unwind.cpp
new file mode 100755
index 00000000000..bb231451113
--- /dev/null
+++ b/src/mongo/db/pipeline/document_source_unwind.cpp
@@ -0,0 +1,234 @@
+/**
+ * Copyright 2011 (c) 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "db/pipeline/document_source.h"
+
+#include "db/jsobj.h"
+#include "db/pipeline/document.h"
+#include "db/pipeline/expression.h"
+#include "db/pipeline/value.h"
+
+namespace mongo {
+
+ const char DocumentSourceUnwind::unwindName[] = "$unwind";
+
+ DocumentSourceUnwind::~DocumentSourceUnwind() {
+ }
+
+ DocumentSourceUnwind::DocumentSourceUnwind():
+ unwindPath(),
+ pNoUnwindDocument(),
+ pUnwindArray(),
+ pUnwinder(),
+ pUnwindValue() {
+ }
+
+ bool DocumentSourceUnwind::eof() {
+ /*
+ If we're unwinding an array, and there are more elements, then we
+ can return more documents.
+ */
+ if (pUnwinder.get() && pUnwinder->more())
+ return false;
+
+ return pSource->eof();
+ }
+
+ bool DocumentSourceUnwind::advance() {
+ if (pUnwinder.get() && pUnwinder->more()) {
+ pUnwindValue = pUnwinder->next();
+ return true;
+ }
+
+ /* release the last document and advance */
+ resetArray();
+ return pSource->advance();
+ }
+
+ intrusive_ptr<Document> DocumentSourceUnwind::getCurrent() {
+ if (!pNoUnwindDocument.get()) {
+ intrusive_ptr<Document> pInDocument(pSource->getCurrent());
+
+ /* create the result document */
+ pNoUnwindDocument = pInDocument;
+ fieldIndex.clear();
+
+ /*
+ First we'll look to see if the path is there. If it isn't,
+ we'll pass this document through. If it is, we record the
+ indexes of the fields down the field path so that we can
+ quickly replace them as we clone the documents along the
+ field path.
+
+ We have to clone all the documents along the field path so
+ that we don't share the end value across documents that have
+ come out of this pipeline operator.
+ */
+ intrusive_ptr<Document> pCurrent(pInDocument);
+ const size_t pathLength = unwindPath.getPathLength();
+ for(size_t i = 0; i < pathLength; ++i) {
+ size_t idx = pCurrent->getFieldIndex(
+ unwindPath.getFieldName(i));
+ if (idx == pCurrent->getFieldCount() ) {
+ /* this document doesn't contain the target field */
+ resetArray();
+ return pInDocument;
+ break;
+ }
+
+ fieldIndex.push_back(idx);
+ Document::FieldPair fp(pCurrent->getField(idx));
+ intrusive_ptr<const Value> pPathValue(fp.second);
+ if (i < pathLength - 1) {
+ if (pPathValue->getType() != Object) {
+ /* can't walk down the field path */
+ resetArray();
+ uassert(15977, str::stream() << unwindName <<
+ ": cannot traverse field path past scalar value for \"" <<
+ fp.first << "\"", false);
+ break;
+ }
+
+ /* move down the object tree */
+ pCurrent = pPathValue->getDocument();
+ }
+ else /* (i == pathLength - 1) */ {
+ if (pPathValue->getType() != Array) {
+ /* last item on path must be an array to unwind */
+ resetArray();
+ uassert(15978, str::stream() << unwindName <<
+ ": value at end of field path must be an array",
+ false);
+ break;
+ }
+
+ /* keep track of the array we're unwinding */
+ pUnwindArray = pPathValue;
+ if (pUnwindArray->getArrayLength() == 0) {
+ /*
+ The $unwind of an empty array is a NULL value. If we
+ encounter this, use the non-unwind path, but replace
+ pOutField with a null.
+
+ Make sure unwind value is clear so the array is
+ removed.
+ */
+ pUnwindValue.reset();
+ intrusive_ptr<Document> pClone(clonePath());
+ resetArray();
+ return pClone;
+ }
+
+ /* get the iterator we'll use to unwind the array */
+ pUnwinder = pUnwindArray->getArray();
+ assert(pUnwinder->more()); // we just checked above...
+ pUnwindValue = pUnwinder->next();
+ }
+ }
+ }
+
+ /*
+ If we're unwinding a field, create an alternate document. In the
+ alternate (clone), replace the unwound array field with the element
+ at the appropriate index.
+ */
+ if (pUnwindArray.get()) {
+ /* clone the document with an array we're unwinding */
+ intrusive_ptr<Document> pUnwindDocument(clonePath());
+
+ return pUnwindDocument;
+ }
+
+ return pNoUnwindDocument;
+ }
+
+ intrusive_ptr<Document> DocumentSourceUnwind::clonePath() const {
+ /*
+ For this to be valid, we must already have pNoUnwindDocument set,
+ and have set up the vector of indices for that document in fieldIndex.
+ */
+ assert(pNoUnwindDocument.get());
+ assert(pUnwinder.get());
+
+ intrusive_ptr<Document> pClone(pNoUnwindDocument->clone());
+ intrusive_ptr<Document> pCurrent(pClone);
+ const size_t n = fieldIndex.size();
+ assert(n);
+ for(size_t i = 0; i < n; ++i) {
+ const size_t fi = fieldIndex[i];
+ Document::FieldPair fp(pCurrent->getField(fi));
+ if (i + 1 < n) {
+ /*
+ For every object in the path but the last, clone it and
+ continue on down.
+ */
+ intrusive_ptr<Document> pNext(
+ fp.second->getDocument()->clone());
+ pCurrent->setField(fi, fp.first, Value::createDocument(pNext));
+ pCurrent = pNext;
+ }
+ else {
+ /* for the last, subsitute the next unwound value */
+ pCurrent->setField(fi, fp.first, pUnwindValue);
+ }
+ }
+
+ return pClone;
+ }
+
+ void DocumentSourceUnwind::sourceToBson(BSONObjBuilder *pBuilder) const {
+ pBuilder->append(unwindName, unwindPath.getPath(true));
+ }
+
+ intrusive_ptr<DocumentSourceUnwind> DocumentSourceUnwind::create() {
+ intrusive_ptr<DocumentSourceUnwind> pSource(
+ new DocumentSourceUnwind());
+ return pSource;
+ }
+
+ void DocumentSourceUnwind::unwindField(const FieldPath &rFieldPath) {
+ /* can't set more than one unwind field */
+ uassert(15979, str::stream() << unwindName <<
+ "can't unwind more than one path at once",
+ !unwindPath.getPathLength());
+
+ uassert(15980, "the path of the field to unwind cannot be empty",
+ false);
+
+ /* record the field path */
+ unwindPath = rFieldPath;
+ }
+
+ intrusive_ptr<DocumentSource> DocumentSourceUnwind::createFromBson(
+ BSONElement *pBsonElement,
+ const intrusive_ptr<ExpressionContext> &pCtx) {
+ /*
+ The value of $unwind should just be a field path.
+ */
+ uassert(15981, str::stream() << "the " << unwindName <<
+ " field path must be specified as a string",
+ pBsonElement->type() == String);
+
+ string prefixedPathString(pBsonElement->String());
+ string pathString(Expression::removeFieldPrefix(prefixedPathString));
+ intrusive_ptr<DocumentSourceUnwind> pUnwind(
+ DocumentSourceUnwind::create());
+ pUnwind->unwindPath = FieldPath(pathString);
+
+ return pUnwind;
+ }
+}
diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp
new file mode 100755
index 00000000000..b3caefcf899
--- /dev/null
+++ b/src/mongo/db/pipeline/expression.cpp
@@ -0,0 +1,2815 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "db/pipeline/expression.h"
+
+#include <cstdio>
+#include "db/jsobj.h"
+#include "db/pipeline/builder.h"
+#include "db/pipeline/document.h"
+#include "db/pipeline/expression_context.h"
+#include "db/pipeline/value.h"
+#include "util/mongoutils/str.h"
+
+namespace mongo {
+ using namespace mongoutils;
+
+ /* --------------------------- Expression ------------------------------ */
+
+ void Expression::toMatcherBson(
+ BSONObjBuilder *pBuilder, unsigned depth) const {
+ assert(false && "Expression::toMatcherBson()");
+ }
+
+ Expression::ObjectCtx::ObjectCtx(int theOptions):
+ options(theOptions),
+ unwindField() {
+ }
+
+ void Expression::ObjectCtx::unwind(string fieldName) {
+ assert(unwindOk());
+ assert(!unwindUsed());
+ assert(fieldName.size());
+ unwindField = fieldName;
+ }
+
+ bool Expression::ObjectCtx::documentOk() const {
+ return ((options & DOCUMENT_OK) != 0);
+ }
+
+ const char Expression::unwindName[] = "$unwind";
+
+ string Expression::removeFieldPrefix(const string &prefixedField) {
+ const char *pPrefixedField = prefixedField.c_str();
+ uassert(15982, str::stream() <<
+ "field path references must be prefixed with a '$' (\"" <<
+ prefixedField << "\"", pPrefixedField[0] == '$');
+
+ return string(pPrefixedField + 1);
+ }
+
+ intrusive_ptr<Expression> Expression::parseObject(
+ BSONElement *pBsonElement, ObjectCtx *pCtx) {
+ /*
+ An object expression can take any of the following forms:
+
+ f0: {f1: ..., f2: ..., f3: ...}
+ f0: {$operator:[operand1, operand2, ...]}
+ f0: {$unwind:"fieldpath"}
+
+ We handle $unwind as a special case, because this is done by the
+ projection source. For any other expression, we hand over control to
+ code that parses the expression and returns an expression.
+ */
+
+ intrusive_ptr<Expression> pExpression; // the result
+ intrusive_ptr<ExpressionObject> pExpressionObject; // alt result
+ int isOp = -1; /* -1 -> unknown, 0 -> not an operator, 1 -> operator */
+ enum { UNKNOWN, NOTOPERATOR, OPERATOR } kind = UNKNOWN;
+
+ BSONObj obj(pBsonElement->Obj());
+ BSONObjIterator iter(obj);
+ for(size_t fieldCount = 0; iter.more(); ++fieldCount) {
+ BSONElement fieldElement(iter.next());
+ const char *pFieldName = fieldElement.fieldName();
+
+ if (pFieldName[0] == '$') {
+ uassert(15983, str::stream() <<
+ "the operator must be the only field in a pipeline object (at \""
+ << pFieldName << "\"",
+ fieldCount == 0);
+
+ /* we've determined this "object" is an operator expression */
+ isOp = 1;
+ kind = OPERATOR;
+
+ if (strcmp(pFieldName, unwindName) != 0) {
+ pExpression = parseExpression(pFieldName, &fieldElement);
+ }
+ else {
+ assert(pCtx->unwindOk());
+ // CW TODO error: it's not OK to unwind in this context
+
+ assert(!pCtx->unwindUsed());
+ // CW TODO error: this projection already has an unwind
+
+ assert(fieldElement.type() == String);
+ // CW TODO $unwind operand must be single field name
+
+ string fieldPath(removeFieldPrefix(fieldElement.String()));
+ pExpression = ExpressionFieldPath::create(fieldPath);
+ pCtx->unwind(fieldPath);
+ }
+ }
+ else {
+ uassert(15984, str::stream() << "this object is already an operator expression, and can't be used as a document expression (at \"" <<
+ pFieldName << "\")",
+ isOp != 1);
+ uassert(15990, str::stream() << "this object is already an operator expression, and can't be used as a document expression (at \"" <<
+ pFieldName << "\")",
+ kind != OPERATOR);
+
+ /* if it's our first time, create the document expression */
+ if (!pExpression.get()) {
+ assert(pCtx->documentOk());
+ // CW TODO error: document not allowed in this context
+
+ pExpressionObject = ExpressionObject::create();
+ pExpression = pExpressionObject;
+
+ /* this "object" is not an operator expression */
+ isOp = 0;
+ kind = NOTOPERATOR;
+ }
+
+ BSONType fieldType = fieldElement.type();
+ string fieldName(pFieldName);
+ if (fieldType == Object) {
+ /* it's a nested document */
+ ObjectCtx oCtx(
+ (pCtx->documentOk() ? ObjectCtx::DOCUMENT_OK : 0));
+ intrusive_ptr<Expression> pNested(
+ parseObject(&fieldElement, &oCtx));
+ pExpressionObject->addField(fieldName, pNested);
+ }
+ else if (fieldType == String) {
+ /* it's a renamed field */
+ // CW TODO could also be a constant
+ intrusive_ptr<Expression> pPath(
+ ExpressionFieldPath::create(
+ removeFieldPrefix(fieldElement.String())));
+ pExpressionObject->addField(fieldName, pPath);
+ }
+ else if (fieldType == NumberDouble) {
+ /* it's an inclusion specification */
+ int inclusion = static_cast<int>(fieldElement.Double());
+ if (inclusion == 0)
+ pExpressionObject->excludePath(fieldName);
+ else if (inclusion == 1)
+ pExpressionObject->includePath(fieldName);
+ else
+ uassert(15991, str::stream() <<
+ "\"" << fieldName <<
+ "\" numeric inclusion or exclusion must be 1 or 0 (or boolean)",
+ false);
+ }
+ else if (fieldType == Bool) {
+ bool inclusion = fieldElement.Bool();
+ if (!inclusion)
+ pExpressionObject->excludePath(fieldName);
+ else
+ pExpressionObject->includePath(fieldName);
+ }
+ else { /* nothing else is allowed */
+ uassert(15992, str::stream() <<
+ "disallowed field type " << fieldType <<
+ " in object expression (at \"" <<
+ fieldName << "\")", false);
+ }
+ }
+ }
+
+ return pExpression;
+ }
+
+
+ struct OpDesc {
+ const char *pName;
+ intrusive_ptr<ExpressionNary> (*pFactory)(void);
+ };
+
+ static int OpDescCmp(const void *pL, const void *pR) {
+ return strcmp(((const OpDesc *)pL)->pName, ((const OpDesc *)pR)->pName);
+ }
+
+ /*
+ Keep these sorted alphabetically so we can bsearch() them using
+ OpDescCmp() above.
+ */
+ static const OpDesc OpTable[] = {
+ {"$add", ExpressionAdd::create},
+ {"$and", ExpressionAnd::create},
+ {"$cmp", ExpressionCompare::createCmp},
+ {"$cond", ExpressionCond::create},
+ {"$const", ExpressionNoOp::create},
+ {"$dayOfMonth", ExpressionDayOfMonth::create},
+ {"$dayOfWeek", ExpressionDayOfWeek::create},
+ {"$dayOfYear", ExpressionDayOfYear::create},
+ {"$divide", ExpressionDivide::create},
+ {"$eq", ExpressionCompare::createEq},
+ {"$gt", ExpressionCompare::createGt},
+ {"$gte", ExpressionCompare::createGte},
+ {"$hour", ExpressionHour::create},
+ {"$ifNull", ExpressionIfNull::create},
+ {"$lt", ExpressionCompare::createLt},
+ {"$lte", ExpressionCompare::createLte},
+ {"$minute", ExpressionMinute::create},
+ {"$mod", ExpressionMod::create},
+ {"$month", ExpressionMonth::create},
+ {"$multiply", ExpressionMultiply::create},
+ {"$ne", ExpressionCompare::createNe},
+ {"$not", ExpressionNot::create},
+ {"$or", ExpressionOr::create},
+ {"$second", ExpressionSecond::create},
+ {"$strcasecmp", ExpressionStrcasecmp::create},
+ {"$substr", ExpressionSubstr::create},
+ {"$subtract", ExpressionSubtract::create},
+ {"$toLower", ExpressionToLower::create},
+ {"$toUpper", ExpressionToUpper::create},
+ {"$week", ExpressionWeek::create},
+ {"$year", ExpressionYear::create},
+ };
+
+ static const size_t NOp = sizeof(OpTable)/sizeof(OpTable[0]);
+
+ intrusive_ptr<Expression> Expression::parseExpression(
+ const char *pOpName, BSONElement *pBsonElement) {
+ /* look for the specified operator */
+ OpDesc key;
+ key.pName = pOpName;
+ const OpDesc *pOp = (const OpDesc *)bsearch(
+ &key, OpTable, NOp, sizeof(OpDesc), OpDescCmp);
+
+ uassert(15999, str::stream() << "invalid operator \"" <<
+ pOpName << "\"", pOp);
+
+ /* make the expression node */
+ intrusive_ptr<ExpressionNary> pExpression((*pOp->pFactory)());
+
+ /* add the operands to the expression node */
+ BSONType elementType = pBsonElement->type();
+ if (elementType == Object) {
+ /* the operator must be unary and accept an object argument */
+ BSONObj objOperand(pBsonElement->Obj());
+ ObjectCtx oCtx(ObjectCtx::DOCUMENT_OK);
+ intrusive_ptr<Expression> pOperand(
+ Expression::parseObject(pBsonElement, &oCtx));
+ pExpression->addOperand(pOperand);
+ }
+ else if (elementType == Array) {
+ /* multiple operands - an n-ary operator */
+ vector<BSONElement> bsonArray(pBsonElement->Array());
+ const size_t n = bsonArray.size();
+ for(size_t i = 0; i < n; ++i) {
+ BSONElement *pBsonOperand = &bsonArray[i];
+ intrusive_ptr<Expression> pOperand(
+ Expression::parseOperand(pBsonOperand));
+ pExpression->addOperand(pOperand);
+ }
+ }
+ else { /* assume it's an atomic operand */
+ intrusive_ptr<Expression> pOperand(
+ Expression::parseOperand(pBsonElement));
+ pExpression->addOperand(pOperand);
+ }
+
+ return pExpression;
+ }
+
+ intrusive_ptr<Expression> Expression::parseOperand(BSONElement *pBsonElement) {
+ BSONType type = pBsonElement->type();
+
+ switch(type) {
+ case String: {
+ /*
+ This could be a field path, or it could be a constant
+ string.
+
+ We make a copy of the BSONElement reader so we can read its
+ value without advancing its state, in case we need to read it
+ again in the constant code path.
+ */
+ BSONElement opCopy(*pBsonElement);
+ string value(opCopy.String());
+
+ /* check for a field path */
+ if (value[0] != '$')
+ goto ExpectConstant; // assume plain string constant
+
+ /* if we got here, this is a field path expression */
+ string fieldPath(removeFieldPrefix(value));
+ intrusive_ptr<Expression> pFieldExpr(
+ ExpressionFieldPath::create(fieldPath));
+ return pFieldExpr;
+ }
+
+ case Object: {
+ ObjectCtx oCtx(ObjectCtx::DOCUMENT_OK);
+ intrusive_ptr<Expression> pSubExpression(
+ Expression::parseObject(pBsonElement, &oCtx));
+ return pSubExpression;
+ }
+
+ default:
+ ExpectConstant: {
+ intrusive_ptr<Expression> pOperand(
+ ExpressionConstant::createFromBsonElement(pBsonElement));
+ return pOperand;
+ }
+
+ } // switch(type)
+
+ /* NOTREACHED */
+ assert(false);
+ return intrusive_ptr<Expression>();
+ }
+
+ /* ------------------------- ExpressionAdd ----------------------------- */
+
+ ExpressionAdd::~ExpressionAdd() {
+ }
+
+ intrusive_ptr<Expression> ExpressionAdd::optimize() {
+ intrusive_ptr<Expression> pE(ExpressionNary::optimize());
+ ExpressionAdd *pA = dynamic_cast<ExpressionAdd *>(pE.get());
+ if (pA) {
+ /* don't create a circular reference */
+ if (pA != this)
+ pA->pAdd = this;
+ }
+
+ return pE;
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionAdd::create() {
+ intrusive_ptr<ExpressionAdd> pExpression(new ExpressionAdd());
+ return pExpression;
+ }
+
+ ExpressionAdd::ExpressionAdd():
+ ExpressionNary(),
+ useOriginal(false) {
+ }
+
+ intrusive_ptr<const Value> ExpressionAdd::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ unsigned stringCount = 0;
+ unsigned nonConstStringCount = 0;
+ unsigned dateCount = 0;
+ const size_t n = vpOperand.size();
+ vector<intrusive_ptr<const Value> > vpValue; /* evaluated operands */
+
+ /* use the original, if we've been told to do so */
+ if (useOriginal) {
+ return pAdd->evaluate(pDocument);
+ }
+
+ for (size_t i = 0; i < n; ++i) {
+ intrusive_ptr<const Value> pValue(
+ vpOperand[i]->evaluate(pDocument));
+ vpValue.push_back(pValue);
+
+ BSONType valueType = pValue->getType();
+ if (valueType == String) {
+ ++stringCount;
+ if (!dynamic_cast<ExpressionConstant *>(vpOperand[i].get()))
+ ++nonConstStringCount;
+ }
+ else if (valueType == Date)
+ ++dateCount;
+ }
+
+ /*
+ We don't allow adding two dates because it doesn't make sense
+ especially since they are in epoch time. However, if there is a
+ string present then we would be appending the dates to a string so
+ having many would not be not a problem.
+ */
+ if ((dateCount > 1) && !stringCount) {
+ uassert(16000, "can't add two dates together", false);
+ return Value::getNull();
+ }
+
+ /*
+ If there are non-constant strings, and we've got a copy of the
+ original, then use that from this point forward. This is necessary
+ to keep the order of strings the same for string concatenation;
+ constant-folding would violate the order preservation.
+
+ This is a one-way conversion we do if we see one of these. It is
+ possible that these could vary from document to document, but any
+ sane schema probably isn't going to do that, so once we see a string,
+ we can probably assume they're going to be strings all the way down.
+ */
+ if (nonConstStringCount && pAdd.get()) {
+ useOriginal = true;
+ return pAdd->evaluate(pDocument);
+ }
+
+ if (stringCount) {
+ stringstream stringTotal;
+ for (size_t i = 0; i < n; ++i) {
+ intrusive_ptr<const Value> pValue(vpValue[i]);
+ stringTotal << pValue->coerceToString();
+ }
+
+ return Value::createString(stringTotal.str());
+ }
+
+ if (dateCount) {
+ long long dateTotal = 0;
+ for (size_t i = 0; i < n; ++i) {
+ intrusive_ptr<const Value> pValue(vpValue[i]);
+ if (pValue->getType() == Date)
+ dateTotal += pValue->coerceToDate();
+ else
+ dateTotal += static_cast<long long>(pValue->coerceToDouble()*24*60*60*1000);
+ }
+
+ return Value::createDate(Date_t(dateTotal));
+ }
+
+ /*
+ We'll try to return the narrowest possible result value. To do that
+ without creating intermediate Values, do the arithmetic for double
+ and integral types in parallel, tracking the current narrowest
+ type.
+ */
+ double doubleTotal = 0;
+ long long longTotal = 0;
+ BSONType totalType = NumberInt;
+ for(size_t i = 0; i < n; ++i) {
+ intrusive_ptr<const Value> pValue(vpValue[i]);
+
+ totalType = Value::getWidestNumeric(totalType, pValue->getType());
+ doubleTotal += pValue->coerceToDouble();
+ longTotal += pValue->coerceToLong();
+ }
+
+ if (totalType == NumberDouble)
+ return Value::createDouble(doubleTotal);
+ if (totalType == NumberLong)
+ return Value::createLong(longTotal);
+ return Value::createInt((int)longTotal);
+ }
+
+ const char *ExpressionAdd::getOpName() const {
+ return "$add";
+ }
+
+ intrusive_ptr<ExpressionNary> (*ExpressionAdd::getFactory() const)() {
+ return ExpressionAdd::create;
+ }
+
+ void ExpressionAdd::toBson(
+ BSONObjBuilder *pBuilder, const char *pOpName, unsigned depth) const {
+
+ if (pAdd)
+ pAdd->toBson(pBuilder, pOpName, depth);
+ else
+ ExpressionNary::toBson(pBuilder, pOpName, depth);
+ }
+
+
+ /* ------------------------- ExpressionAnd ----------------------------- */
+
+ ExpressionAnd::~ExpressionAnd() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionAnd::create() {
+ intrusive_ptr<ExpressionNary> pExpression(new ExpressionAnd());
+ return pExpression;
+ }
+
+ ExpressionAnd::ExpressionAnd():
+ ExpressionNary() {
+ }
+
+ intrusive_ptr<Expression> ExpressionAnd::optimize() {
+ /* optimize the conjunction as much as possible */
+ intrusive_ptr<Expression> pE(ExpressionNary::optimize());
+
+ /* if the result isn't a conjunction, we can't do anything */
+ ExpressionAnd *pAnd = dynamic_cast<ExpressionAnd *>(pE.get());
+ if (!pAnd)
+ return pE;
+
+ /*
+ Check the last argument on the result; if it's not constant (as
+ promised by ExpressionNary::optimize(),) then there's nothing
+ we can do.
+ */
+ const size_t n = pAnd->vpOperand.size();
+ intrusive_ptr<Expression> pLast(pAnd->vpOperand[n - 1]);
+ const ExpressionConstant *pConst =
+ dynamic_cast<ExpressionConstant *>(pLast.get());
+ if (!pConst)
+ return pE;
+
+ /*
+ Evaluate and coerce the last argument to a boolean. If it's false,
+ then we can replace this entire expression.
+ */
+ bool last = pLast->evaluate(intrusive_ptr<Document>())->coerceToBool();
+ if (!last) {
+ intrusive_ptr<ExpressionConstant> pFinal(
+ ExpressionConstant::create(Value::getFalse()));
+ return pFinal;
+ }
+
+ /*
+ If we got here, the final operand was true, so we don't need it
+ anymore. If there was only one other operand, we don't need the
+ conjunction either. Note we still need to keep the promise that
+ the result will be a boolean.
+ */
+ if (n == 2) {
+ intrusive_ptr<Expression> pFinal(
+ ExpressionCoerceToBool::create(pAnd->vpOperand[0]));
+ return pFinal;
+ }
+
+ /*
+ Remove the final "true" value, and return the new expression.
+
+ CW TODO:
+ Note that because of any implicit conversions, we may need to
+ apply an implicit boolean conversion.
+ */
+ pAnd->vpOperand.resize(n - 1);
+ return pE;
+ }
+
+ intrusive_ptr<const Value> ExpressionAnd::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ const size_t n = vpOperand.size();
+ for(size_t i = 0; i < n; ++i) {
+ intrusive_ptr<const Value> pValue(vpOperand[i]->evaluate(pDocument));
+ if (!pValue->coerceToBool())
+ return Value::getFalse();
+ }
+
+ return Value::getTrue();
+ }
+
+ const char *ExpressionAnd::getOpName() const {
+ return "$and";
+ }
+
+ void ExpressionAnd::toMatcherBson(
+ BSONObjBuilder *pBuilder, unsigned depth) const {
+ /*
+ There are two patterns we can handle:
+ (1) one or two comparisons on the same field: { a:{$gte:3, $lt:7} }
+ (2) multiple field comparisons: {a:7, b:{$lte:6}, c:2}
+ This can be recognized as a conjunction of a set of range
+ expressions. Direct equality is a degenerate range expression;
+ range expressions can be open-ended.
+ */
+ assert(false && "unimplemented");
+ }
+
+ intrusive_ptr<ExpressionNary> (*ExpressionAnd::getFactory() const)() {
+ return ExpressionAnd::create;
+ }
+
+ /* -------------------- ExpressionCoerceToBool ------------------------- */
+
+ ExpressionCoerceToBool::~ExpressionCoerceToBool() {
+ }
+
+ intrusive_ptr<ExpressionCoerceToBool> ExpressionCoerceToBool::create(
+ const intrusive_ptr<Expression> &pExpression) {
+ intrusive_ptr<ExpressionCoerceToBool> pNew(
+ new ExpressionCoerceToBool(pExpression));
+ return pNew;
+ }
+
+ ExpressionCoerceToBool::ExpressionCoerceToBool(
+ const intrusive_ptr<Expression> &pTheExpression):
+ Expression(),
+ pExpression(pTheExpression) {
+ }
+
+ intrusive_ptr<Expression> ExpressionCoerceToBool::optimize() {
+ /* optimize the operand */
+ pExpression = pExpression->optimize();
+
+ /* if the operand already produces a boolean, then we don't need this */
+ /* LATER - Expression to support a "typeof" query? */
+ Expression *pE = pExpression.get();
+ if (dynamic_cast<ExpressionAnd *>(pE) ||
+ dynamic_cast<ExpressionOr *>(pE) ||
+ dynamic_cast<ExpressionNot *>(pE) ||
+ dynamic_cast<ExpressionCoerceToBool *>(pE))
+ return pExpression;
+
+ return intrusive_ptr<Expression>(this);
+ }
+
+ intrusive_ptr<const Value> ExpressionCoerceToBool::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+
+ intrusive_ptr<const Value> pResult(pExpression->evaluate(pDocument));
+ bool b = pResult->coerceToBool();
+ if (b)
+ return Value::getTrue();
+ return Value::getFalse();
+ }
+
+ void ExpressionCoerceToBool::addToBsonObj(
+ BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const {
+ assert(false && "not possible"); // no equivalent of this
+ }
+
+ void ExpressionCoerceToBool::addToBsonArray(
+ BSONArrayBuilder *pBuilder, unsigned depth) const {
+ assert(false && "not possible"); // no equivalent of this
+ }
+
+ /* ----------------------- ExpressionCompare --------------------------- */
+
+ ExpressionCompare::~ExpressionCompare() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionCompare::createEq() {
+ intrusive_ptr<ExpressionCompare> pExpression(
+ new ExpressionCompare(EQ));
+ return pExpression;
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionCompare::createNe() {
+ intrusive_ptr<ExpressionCompare> pExpression(
+ new ExpressionCompare(NE));
+ return pExpression;
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionCompare::createGt() {
+ intrusive_ptr<ExpressionCompare> pExpression(
+ new ExpressionCompare(GT));
+ return pExpression;
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionCompare::createGte() {
+ intrusive_ptr<ExpressionCompare> pExpression(
+ new ExpressionCompare(GTE));
+ return pExpression;
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionCompare::createLt() {
+ intrusive_ptr<ExpressionCompare> pExpression(
+ new ExpressionCompare(LT));
+ return pExpression;
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionCompare::createLte() {
+ intrusive_ptr<ExpressionCompare> pExpression(
+ new ExpressionCompare(LTE));
+ return pExpression;
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionCompare::createCmp() {
+ intrusive_ptr<ExpressionCompare> pExpression(
+ new ExpressionCompare(CMP));
+ return pExpression;
+ }
+
+ ExpressionCompare::ExpressionCompare(CmpOp theCmpOp):
+ ExpressionNary(),
+ cmpOp(theCmpOp) {
+ }
+
+ void ExpressionCompare::addOperand(
+ const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(2);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ /*
+ Lookup table for truth value returns
+ */
+ struct CmpLookup {
+ bool truthValue[3]; /* truth value for -1, 0, 1 */
+ Expression::CmpOp reverse; /* reverse comparison operator */
+ char name[5]; /* string name (w/trailing '\0') */
+ };
+ static const CmpLookup cmpLookup[7] = {
+ /* -1 0 1 reverse name */
+ /* EQ */ { { false, true, false }, Expression::EQ, "$eq" },
+ /* NE */ { { true, false, true }, Expression::NE, "$ne" },
+ /* GT */ { { false, false, true }, Expression::LTE, "$gt" },
+ /* GTE */ { { false, true, true }, Expression::LT, "$gte" },
+ /* LT */ { { true, false, false }, Expression::GTE, "$lt" },
+ /* LTE */ { { true, true, false }, Expression::GT, "$lte" },
+ /* CMP */ { { false, false, false }, Expression::CMP, "$cmp" },
+ };
+
+ intrusive_ptr<Expression> ExpressionCompare::optimize() {
+ /* first optimize the comparison operands */
+ intrusive_ptr<Expression> pE(ExpressionNary::optimize());
+
+ /*
+ If the result of optimization is no longer a comparison, there's
+ nothing more we can do.
+ */
+ ExpressionCompare *pCmp = dynamic_cast<ExpressionCompare *>(pE.get());
+ if (!pCmp)
+ return pE;
+
+ /* check to see if optimizing comparison operator is supported */
+ CmpOp newOp = pCmp->cmpOp;
+ if (newOp == CMP)
+ return pE; // not reversible: there's nothing more we can do
+
+ /*
+ There's one localized optimization we recognize: a comparison
+ between a field and a constant. If we recognize that pattern,
+ replace it with an ExpressionFieldRange.
+
+ When looking for this pattern, note that the operands could appear
+ in any order. If we need to reverse the sense of the comparison to
+ put it into the required canonical form, do so.
+ */
+ intrusive_ptr<Expression> pLeft(pCmp->vpOperand[0]);
+ intrusive_ptr<Expression> pRight(pCmp->vpOperand[1]);
+ intrusive_ptr<ExpressionFieldPath> pFieldPath(
+ dynamic_pointer_cast<ExpressionFieldPath>(pLeft));
+ intrusive_ptr<ExpressionConstant> pConstant;
+ if (pFieldPath.get()) {
+ pConstant = dynamic_pointer_cast<ExpressionConstant>(pRight);
+ if (!pConstant.get())
+ return pE; // there's nothing more we can do
+ }
+ else {
+ /* if the first operand wasn't a path, see if it's a constant */
+ pConstant = dynamic_pointer_cast<ExpressionConstant>(pLeft);
+ if (!pConstant.get())
+ return pE; // there's nothing more we can do
+
+ /* the left operand was a constant; see if the right is a path */
+ pFieldPath = dynamic_pointer_cast<ExpressionFieldPath>(pRight);
+ if (!pFieldPath.get())
+ return pE; // there's nothing more we can do
+
+ /* these were not in canonical order, so reverse the sense */
+ newOp = cmpLookup[newOp].reverse;
+ }
+
+ return ExpressionFieldRange::create(
+ pFieldPath, newOp, pConstant->getValue());
+ }
+
+ intrusive_ptr<const Value> ExpressionCompare::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(2);
+ intrusive_ptr<const Value> pLeft(vpOperand[0]->evaluate(pDocument));
+ intrusive_ptr<const Value> pRight(vpOperand[1]->evaluate(pDocument));
+
+ BSONType leftType = pLeft->getType();
+ BSONType rightType = pRight->getType();
+ uassert(15994, str::stream() << getOpName() <<
+ ": no automatic conversion for types " <<
+ leftType << " and " << rightType,
+ leftType == rightType);
+ // CW TODO at least for now. later, handle automatic conversions
+
+ int cmp = 0;
+ switch(leftType) {
+ case NumberDouble: {
+ double left = pLeft->getDouble();
+ double right = pRight->getDouble();
+
+ if (left < right)
+ cmp = -1;
+ else if (left > right)
+ cmp = 1;
+ break;
+ }
+
+ case NumberInt: {
+ int left = pLeft->getInt();
+ int right = pRight->getInt();
+
+ if (left < right)
+ cmp = -1;
+ else if (left > right)
+ cmp = 1;
+ break;
+ }
+
+ case String: {
+ string left(pLeft->getString());
+ string right(pRight->getString());
+ cmp = signum(left.compare(right));
+ break;
+ }
+
+ default:
+ uassert(15995, str::stream() <<
+ "can't compare values of type " << leftType, false);
+ break;
+ }
+
+ if (cmpOp == CMP) {
+ switch(cmp) {
+ case -1:
+ return Value::getMinusOne();
+ case 0:
+ return Value::getZero();
+ case 1:
+ return Value::getOne();
+
+ default:
+ assert(false); // CW TODO internal error
+ return Value::getNull();
+ }
+ }
+
+ bool returnValue = cmpLookup[cmpOp].truthValue[cmp + 1];
+ if (returnValue)
+ return Value::getTrue();
+ return Value::getFalse();
+ }
+
+ const char *ExpressionCompare::getOpName() const {
+ return cmpLookup[cmpOp].name;
+ }
+
+ /* ----------------------- ExpressionCond ------------------------------ */
+
+ ExpressionCond::~ExpressionCond() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionCond::create() {
+ intrusive_ptr<ExpressionCond> pExpression(new ExpressionCond());
+ return pExpression;
+ }
+
+ ExpressionCond::ExpressionCond():
+ ExpressionNary() {
+ }
+
+ void ExpressionCond::addOperand(
+ const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(3);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionCond::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(3);
+ intrusive_ptr<const Value> pCond(vpOperand[0]->evaluate(pDocument));
+ int idx = pCond->coerceToBool() ? 1 : 2;
+ return vpOperand[idx]->evaluate(pDocument);
+ }
+
+ const char *ExpressionCond::getOpName() const {
+ return "$cond";
+ }
+
+ /* ---------------------- ExpressionConstant --------------------------- */
+
+ ExpressionConstant::~ExpressionConstant() {
+ }
+
+ intrusive_ptr<ExpressionConstant> ExpressionConstant::createFromBsonElement(
+ BSONElement *pBsonElement) {
+ intrusive_ptr<ExpressionConstant> pEC(
+ new ExpressionConstant(pBsonElement));
+ return pEC;
+ }
+
+ ExpressionConstant::ExpressionConstant(BSONElement *pBsonElement):
+ pValue(Value::createFromBsonElement(pBsonElement)) {
+ }
+
+ intrusive_ptr<ExpressionConstant> ExpressionConstant::create(
+ const intrusive_ptr<const Value> &pValue) {
+ intrusive_ptr<ExpressionConstant> pEC(new ExpressionConstant(pValue));
+ return pEC;
+ }
+
+ ExpressionConstant::ExpressionConstant(
+ const intrusive_ptr<const Value> &pTheValue):
+ pValue(pTheValue) {
+ }
+
+
+ intrusive_ptr<Expression> ExpressionConstant::optimize() {
+ /* nothing to do */
+ return intrusive_ptr<Expression>(this);
+ }
+
+ intrusive_ptr<const Value> ExpressionConstant::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ return pValue;
+ }
+
+ void ExpressionConstant::addToBsonObj(
+ BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const {
+
+ /*
+ For depth greater than one, do the regular thing
+
+ This will be one because any top level expression will actually
+ be an operator node, so by the time we get to an expression
+ constant, we're at level 1 (counting up as we go down the
+ expression tree).
+
+ See the comment below for more on why this happens.
+ */
+ if (depth > 1) {
+ pValue->addToBsonObj(pBuilder, fieldName);
+ return;
+ }
+
+ /*
+ If this happens at the top level, we don't have any direct way
+ to express it. However, we may need to if constant folding
+ reduced expressions to constants, and we need to re-materialize
+ the pipeline in order to ship it to a shard server. This has
+ forced the introduction of {$const: ...}.
+ */
+ BSONObjBuilder constBuilder;
+ pValue->addToBsonObj(&constBuilder, "$const");
+ pBuilder->append(fieldName, constBuilder.done());
+ }
+
+ void ExpressionConstant::addToBsonArray(
+ BSONArrayBuilder *pBuilder, unsigned depth) const {
+ pValue->addToBsonArray(pBuilder);
+ }
+
+ const char *ExpressionConstant::getOpName() const {
+ assert(false); // this has no name
+ return NULL;
+ }
+
+ /* ---------------------- ExpressionDayOfMonth ------------------------- */
+
+ ExpressionDayOfMonth::~ExpressionDayOfMonth() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionDayOfMonth::create() {
+ intrusive_ptr<ExpressionDayOfMonth> pExpression(new ExpressionDayOfMonth());
+ return pExpression;
+ }
+
+ ExpressionDayOfMonth::ExpressionDayOfMonth():
+ ExpressionNary() {
+ }
+
+ void ExpressionDayOfMonth::addOperand(const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(1);
+
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionDayOfMonth::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(1);
+ intrusive_ptr<const Value> pDate(vpOperand[0]->evaluate(pDocument));
+ tm date;
+ (pDate->coerceToDate()).toTm(&date);
+ return Value::createInt(date.tm_mday);
+ }
+
+ const char *ExpressionDayOfMonth::getOpName() const {
+ return "$dayOfMonth";
+ }
+
+ /* ------------------------- ExpressionDayOfWeek ----------------------------- */
+
+ ExpressionDayOfWeek::~ExpressionDayOfWeek() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionDayOfWeek::create() {
+ intrusive_ptr<ExpressionDayOfWeek> pExpression(new ExpressionDayOfWeek());
+ return pExpression;
+ }
+
+ ExpressionDayOfWeek::ExpressionDayOfWeek():
+ ExpressionNary() {
+ }
+
+ void ExpressionDayOfWeek::addOperand(const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(1);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionDayOfWeek::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(1);
+ intrusive_ptr<const Value> pDate(vpOperand[0]->evaluate(pDocument));
+ tm date;
+ (pDate->coerceToDate()).toTm(&date);
+ return Value::createInt(date.tm_wday+1); // MySQL uses 1-7 tm uses 0-6
+ }
+
+ const char *ExpressionDayOfWeek::getOpName() const {
+ return "$dayOfWeek";
+ }
+
+ /* ------------------------- ExpressionDayOfYear ----------------------------- */
+
+ ExpressionDayOfYear::~ExpressionDayOfYear() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionDayOfYear::create() {
+ intrusive_ptr<ExpressionDayOfYear> pExpression(new ExpressionDayOfYear());
+ return pExpression;
+ }
+
+ ExpressionDayOfYear::ExpressionDayOfYear():
+ ExpressionNary() {
+ }
+
+ void ExpressionDayOfYear::addOperand(const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(1);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionDayOfYear::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(1);
+ intrusive_ptr<const Value> pDate(vpOperand[0]->evaluate(pDocument));
+ tm date;
+ (pDate->coerceToDate()).toTm(&date);
+ return Value::createInt(date.tm_yday+1); // MySQL uses 1-366 tm uses 0-365
+ }
+
+ const char *ExpressionDayOfYear::getOpName() const {
+ return "$dayOfYear";
+ }
+
+ /* ----------------------- ExpressionDivide ---------------------------- */
+
+ ExpressionDivide::~ExpressionDivide() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionDivide::create() {
+ intrusive_ptr<ExpressionDivide> pExpression(new ExpressionDivide());
+ return pExpression;
+ }
+
+ ExpressionDivide::ExpressionDivide():
+ ExpressionNary() {
+ }
+
+ void ExpressionDivide::addOperand(
+ const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(2);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionDivide::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(2);
+ intrusive_ptr<const Value> pLeft(vpOperand[0]->evaluate(pDocument));
+ intrusive_ptr<const Value> pRight(vpOperand[1]->evaluate(pDocument));
+
+ double right = pRight->coerceToDouble();
+ if (right == 0)
+ return Value::getUndefined();
+
+ double left = pLeft->coerceToDouble();
+
+ return Value::createDouble(left / right);
+ }
+
+ const char *ExpressionDivide::getOpName() const {
+ return "$divide";
+ }
+
+ /* ---------------------- ExpressionObject --------------------------- */
+
+ ExpressionObject::~ExpressionObject() {
+ }
+
+ intrusive_ptr<ExpressionObject> ExpressionObject::create() {
+ intrusive_ptr<ExpressionObject> pExpression(new ExpressionObject());
+ return pExpression;
+ }
+
+ ExpressionObject::ExpressionObject():
+ excludePaths(false),
+ path(),
+ vFieldName(),
+ vpExpression() {
+ }
+
+ intrusive_ptr<Expression> ExpressionObject::optimize() {
+ const size_t n = vpExpression.size();
+ for(size_t i = 0; i < n; ++i) {
+ intrusive_ptr<Expression> pE(vpExpression[i]->optimize());
+ vpExpression[i] = pE;
+ }
+
+ return intrusive_ptr<Expression>(this);
+ }
+
+ void ExpressionObject::addToDocument(
+ const intrusive_ptr<Document> &pResult,
+ const intrusive_ptr<Document> &pDocument) const {
+ const size_t pathSize = path.size();
+ set<string>::const_iterator end(path.end());
+
+ /*
+ Take care of inclusions or exclusions. Note that _id is special,
+ that that it is always included, unless it is specifically excluded.
+ we use excludeId for that in case excludePaths if false, which means
+ to include paths.
+ */
+ if (pathSize) {
+ auto_ptr<FieldIterator> pIter(pDocument->createFieldIterator());
+ if (excludePaths) {
+ while(pIter->more()) {
+ pair<string, intrusive_ptr<const Value> > field(pIter->next());
+
+ /*
+ If the field in the document is not in the exclusion set,
+ add it to the result document.
+
+ Note that exclusions are only allowed on leaves, so we
+ can assume we don't have to descend recursively here.
+ */
+ if (path.find(field.first) != end)
+ continue; // we found it, so don't add it
+
+ pResult->addField(field.first, field.second);
+ }
+ }
+ else { /* !excludePaths */
+ while(pIter->more()) {
+ pair<string, intrusive_ptr<const Value> > field(
+ pIter->next());
+ /*
+ If the field in the document is in the inclusion set,
+ add it to the result document. Or, if we're not
+ excluding _id, and it is _id, include it.
+
+ Note that this could be an inclusion along a pathway,
+ so we look for an ExpressionObject in vpExpression; when
+ we find one, we populate the result with the evaluation
+ of that on the nested object, yielding relative paths.
+ This also allows us to handle intermediate arrays; if we
+ encounter one, we repeat this for each array element.
+ */
+ if (path.find(field.first) != end) {
+ /* find the Expression */
+ const size_t n = vFieldName.size();
+ size_t i;
+ Expression *pE = NULL;
+ for(i = 0; i < n; ++i) {
+ if (field.first.compare(vFieldName[i]) == 0) {
+ pE = vpExpression[i].get();
+ break;
+ }
+ }
+
+ /*
+ If we didn't find an expression, it's the last path
+ element to include.
+ */
+ if (!pE) {
+ pResult->addField(field.first, field.second);
+ continue;
+ }
+
+ ExpressionObject *pChild =
+ dynamic_cast<ExpressionObject *>(pE);
+ assert(pChild);
+
+ /*
+ Check on the type of the result object. If it's an
+ object, just walk down into that recursively, and
+ add it to the result.
+ */
+ BSONType valueType = field.second->getType();
+ if (valueType == Object) {
+ intrusive_ptr<Document> pD(
+ pChild->evaluateDocument(
+ field.second->getDocument()));
+ pResult->addField(vFieldName[i],
+ Value::createDocument(pD));
+ }
+ else if (valueType == Array) {
+ /*
+ If it's an array, we have to do the same thing,
+ but to each array element. Then, add the array
+ of results to the current document.
+ */
+ vector<intrusive_ptr<const Value> > result;
+ intrusive_ptr<ValueIterator> pVI(
+ field.second->getArray());
+ while(pVI->more()) {
+ intrusive_ptr<Document> pD(
+ pChild->evaluateDocument(
+ pVI->next()->getDocument()));
+ result.push_back(Value::createDocument(pD));
+ }
+
+ pResult->addField(vFieldName[i],
+ Value::createArray(result));
+ }
+ }
+ }
+ }
+ }
+
+ /* add any remaining fields we haven't already taken care of */
+ const size_t n = vFieldName.size();
+ for(size_t i = 0; i < n; ++i) {
+ string fieldName(vFieldName[i]);
+
+ /* if we've already dealt with this field, above, do nothing */
+ if (path.find(fieldName) != end)
+ continue;
+
+ intrusive_ptr<const Value> pValue(
+ vpExpression[i]->evaluate(pDocument));
+
+ /*
+ Don't add non-existent values (note: different from NULL);
+ this is consistent with existing selection syntax which doesn't
+ force the appearnance of non-existent fields.
+ */
+ if (pValue->getType() == Undefined)
+ continue;
+
+ pResult->addField(fieldName, pValue);
+ }
+ }
+
+ size_t ExpressionObject::getSizeHint(
+ const intrusive_ptr<Document> &pDocument) const {
+ size_t sizeHint = pDocument->getFieldCount();
+ const size_t pathSize = path.size();
+ if (!excludePaths)
+ sizeHint += pathSize;
+ else {
+ size_t excludeCount = pathSize;
+ if (sizeHint > excludeCount)
+ sizeHint -= excludeCount;
+ else
+ sizeHint = 0;
+ }
+
+ /* account for the additional computed fields */
+ sizeHint += vFieldName.size();
+
+ return sizeHint;
+ }
+
+ intrusive_ptr<Document> ExpressionObject::evaluateDocument(
+ const intrusive_ptr<Document> &pDocument) const {
+ /* create and populate the result */
+ intrusive_ptr<Document> pResult(
+ Document::create(getSizeHint(pDocument)));
+ addToDocument(pResult, pDocument);
+ return pResult;
+ }
+
+ intrusive_ptr<const Value> ExpressionObject::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ return Value::createDocument(evaluateDocument(pDocument));
+ }
+
+ void ExpressionObject::addField(const string &fieldName,
+ const intrusive_ptr<Expression> &pExpression) {
+ /* must have an expression */
+ assert(pExpression.get());
+
+ /* parse the field path */
+ FieldPath fieldPath(fieldName);
+ uassert(16008, str::stream() <<
+ "an expression object's field names cannot be field paths (at \"" <<
+ fieldName << "\")", fieldPath.getPathLength() == 1);
+
+ /* make sure it isn't a name we've included or excluded */
+ set<string>::iterator ex(path.find(fieldName));
+ uassert(16009, str::stream() <<
+ "can't add a field to an object expression that has already been excluded (at \"" <<
+ fieldName << "\")", ex == path.end());
+
+ /* make sure it isn't a name we've already got */
+ const size_t n = vFieldName.size();
+ for(size_t i = 0; i < n; ++i) {
+ uassert(16010, str::stream() <<
+ "can't add the same field to an object expression more than once (at \"" <<
+ fieldName << "\")",
+ fieldName.compare(vFieldName[i]) != 0);
+ }
+
+ vFieldName.push_back(fieldName);
+ vpExpression.push_back(pExpression);
+ }
+
+ void ExpressionObject::includePath(
+ const FieldPath *pPath, size_t pathi, size_t pathn, bool excludeLast) {
+
+ /* get the current path field name */
+ string fieldName(pPath->getFieldName(pathi));
+ uassert(16011,
+ "an object expression can't include an empty field-name",
+ fieldName.length());
+
+ const size_t pathCount = path.size();
+
+ /* if this is the leaf-most object, stop */
+ if (pathi == pathn - 1) {
+ /*
+ Make sure the exclusion configuration of this node matches
+ the requested result. Or, that this is the first (determining)
+ specification.
+ */
+ uassert(16012, str::stream() <<
+ "incompatible exclusion for \"" <<
+ pPath->getPath(false) <<
+ "\" because of a prior inclusion that includes a common sub-path",
+ ((excludePaths == excludeLast) || !pathCount));
+
+ excludePaths = excludeLast; // if (!pathCount), set this
+ path.insert(fieldName);
+ return;
+ }
+
+ /* this level had better be about inclusions */
+ uassert(16013, str::stream() <<
+ "incompatible inclusion for \"" << pPath->getPath(false) <<
+ "\" because of a prior exclusion that includes a common sub-path",
+ !excludePaths);
+
+ /* see if we already know about this field */
+ const size_t n = vFieldName.size();
+ size_t i;
+ for(i = 0; i < n; ++i) {
+ if (fieldName.compare(vFieldName[i]) == 0)
+ break;
+ }
+
+ /* find the right object, and continue */
+ ExpressionObject *pChild;
+ if (i < n) {
+ /* the intermediate child already exists */
+ pChild = dynamic_cast<ExpressionObject *>(vpExpression[i].get());
+ assert(pChild);
+ }
+ else {
+ /*
+ If we get here, the intervening child isn't already there,
+ so create it.
+ */
+ intrusive_ptr<ExpressionObject> pSharedChild(
+ ExpressionObject::create());
+ path.insert(fieldName);
+ vFieldName.push_back(fieldName);
+ vpExpression.push_back(pSharedChild);
+ pChild = pSharedChild.get();
+ }
+
+ // LATER CW TODO turn this into a loop
+ pChild->includePath(pPath, pathi + 1, pathn, excludeLast);
+ }
+
+ void ExpressionObject::includePath(const string &theFieldPath) {
+ /* parse the field path */
+ FieldPath fieldPath(theFieldPath);
+ includePath(&fieldPath, 0, fieldPath.getPathLength(), false);
+ }
+
+ void ExpressionObject::excludePath(const string &theFieldPath) {
+ /* parse the field path */
+ FieldPath fieldPath(theFieldPath);
+ includePath(&fieldPath, 0, fieldPath.getPathLength(), true);
+ }
+
+ intrusive_ptr<Expression> ExpressionObject::getField(
+ const string &fieldName) const {
+ const size_t n = vFieldName.size();
+ for(size_t i = 0; i < n; ++i) {
+ if (fieldName.compare(vFieldName[i]) == 0)
+ return vpExpression[i];
+ }
+
+ /* if we got here, we didn't find it */
+ return intrusive_ptr<Expression>();
+ }
+
+ void ExpressionObject::emitPaths(
+ BSONObjBuilder *pBuilder, vector<string> *pvPath) const {
+ if (!path.size())
+ return;
+
+ /* we use these for loops */
+ const size_t nField = vFieldName.size();
+ const size_t nPath = pvPath->size();
+
+ /*
+ We can iterate over the inclusion/exclusion paths in their
+ (random) set order because they don't affect the order that
+ fields are listed in the result. That comes from the underlying
+ Document they are fetched from.
+ */
+ for(set<string>::const_iterator end(path.end()),
+ iter(path.begin()); iter != end; ++iter) {
+
+ /* find the matching field description */
+ size_t iField = 0;
+ for(; iField < nField; ++iField) {
+ if (iter->compare(vFieldName[iField]) == 0)
+ break;
+ }
+
+ if (iField == nField) {
+ /*
+ If we didn't find a matching field description, this is the
+ leaf, so add the path.
+ */
+ stringstream ss;
+
+ for(size_t iPath = 0; iPath < nPath; ++iPath)
+ ss << (*pvPath)[iPath] << ".";
+ ss << *iter;
+
+ pBuilder->append(ss.str(), !excludePaths);
+ }
+ else {
+ /*
+ If we found a matching field description, then we need to
+ descend into the next level.
+ */
+ Expression *pE = vpExpression[iField].get();
+ ExpressionObject *pEO = dynamic_cast<ExpressionObject *>(pE);
+ assert(pEO);
+
+ /*
+ Add the current field name to the path being built up,
+ then go down into the next level.
+ */
+ PathPusher pathPusher(pvPath, vFieldName[iField]);
+ pEO->emitPaths(pBuilder, pvPath);
+ }
+ }
+ }
+
+ void ExpressionObject::documentToBson(
+ BSONObjBuilder *pBuilder, unsigned depth) const {
+
+ /* emit any inclusion/exclusion paths */
+ vector<string> vPath;
+ emitPaths(pBuilder, &vPath);
+
+ /* then add any expressions */
+ const size_t nField = vFieldName.size();
+ const set<string>::const_iterator pathEnd(path.end());
+ for(size_t iField = 0; iField < nField; ++iField) {
+ string fieldName(vFieldName[iField]);
+
+ /* if we already took care of this, don't repeat it */
+ if (path.find(fieldName) != pathEnd)
+ continue;
+
+ vpExpression[iField]->addToBsonObj(pBuilder, fieldName, depth + 1);
+ }
+ }
+
+ void ExpressionObject::addToBsonObj(
+ BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const {
+
+ BSONObjBuilder objBuilder;
+ documentToBson(&objBuilder, depth);
+ pBuilder->append(fieldName, objBuilder.done());
+ }
+
+ void ExpressionObject::addToBsonArray(
+ BSONArrayBuilder *pBuilder, unsigned depth) const {
+
+ BSONObjBuilder objBuilder;
+ documentToBson(&objBuilder, depth);
+ pBuilder->append(objBuilder.done());
+ }
+
+ /* --------------------- ExpressionFieldPath --------------------------- */
+
+ ExpressionFieldPath::~ExpressionFieldPath() {
+ }
+
+ intrusive_ptr<ExpressionFieldPath> ExpressionFieldPath::create(
+ const string &fieldPath) {
+ intrusive_ptr<ExpressionFieldPath> pExpression(
+ new ExpressionFieldPath(fieldPath));
+ return pExpression;
+ }
+
+ ExpressionFieldPath::ExpressionFieldPath(
+ const string &theFieldPath):
+ fieldPath(theFieldPath) {
+ }
+
+ intrusive_ptr<Expression> ExpressionFieldPath::optimize() {
+ /* nothing can be done for these */
+ return intrusive_ptr<Expression>(this);
+ }
+
+ intrusive_ptr<const Value> ExpressionFieldPath::evaluatePath(
+ size_t index, const size_t pathLength,
+ intrusive_ptr<Document> pDocument) const {
+ intrusive_ptr<const Value> pValue; /* the return value */
+
+ pValue = pDocument->getValue(fieldPath.getFieldName(index));
+
+ /* if the field doesn't exist, quit with an undefined value */
+ if (!pValue.get())
+ return Value::getUndefined();
+
+ /* if we've hit the end of the path, stop */
+ ++index;
+ if (index >= pathLength)
+ return pValue;
+
+ /*
+ We're diving deeper. If the value was null, return null.
+ */
+ BSONType type = pValue->getType();
+ if ((type == Undefined) || (type == jstNULL))
+ return Value::getUndefined();
+
+ if (type == Object) {
+ /* extract from the next level down */
+ return evaluatePath(index, pathLength, pValue->getDocument());
+ }
+
+ if (type == Array) {
+ /*
+ We're going to repeat this for each member of the array,
+ building up a new array as we go.
+ */
+ vector<intrusive_ptr<const Value> > result;
+ intrusive_ptr<ValueIterator> pIter(pValue->getArray());
+ while(pIter->more()) {
+ intrusive_ptr<const Value> pItem(pIter->next());
+ BSONType iType = pItem->getType();
+ if ((iType == Undefined) || (iType == jstNULL)) {
+ result.push_back(pItem);
+ continue;
+ }
+
+ uassert(16014, str::stream() <<
+ "the element \"" << fieldPath.getFieldName(index) <<
+ "\" along the dotted path \"" <<
+ fieldPath.getPath(false) <<
+ "\" is not an object, and cannot be navigated",
+ iType == Object);
+ intrusive_ptr<const Value> itemResult(
+ evaluatePath(index, pathLength, pItem->getDocument()));
+ result.push_back(itemResult);
+ }
+
+ return Value::createArray(result);
+ }
+
+ uassert(16015, str::stream() <<
+ "can't navigate into value of type " << type <<
+ "at \"" << fieldPath.getFieldName(index) <<
+ "\" in dotted path \"" << fieldPath.getPath(false),
+ false);
+ return intrusive_ptr<const Value>();
+ }
+
+ intrusive_ptr<const Value> ExpressionFieldPath::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ return evaluatePath(0, fieldPath.getPathLength(), pDocument);
+ }
+
+ void ExpressionFieldPath::addToBsonObj(
+ BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const {
+ pBuilder->append(fieldName, fieldPath.getPath(true));
+ }
+
+ void ExpressionFieldPath::addToBsonArray(
+ BSONArrayBuilder *pBuilder, unsigned depth) const {
+ pBuilder->append(getFieldPath(true));
+ }
+
+ /* --------------------- ExpressionFieldPath --------------------------- */
+
+ ExpressionFieldRange::~ExpressionFieldRange() {
+ }
+
+ intrusive_ptr<Expression> ExpressionFieldRange::optimize() {
+ /* if there is no range to match, this will never evaluate true */
+ if (!pRange.get())
+ return ExpressionConstant::create(Value::getFalse());
+
+ /*
+ If we ended up with a double un-ended range, anything matches. I
+ don't know how that can happen, given intersect()'s interface, but
+ here it is, just in case.
+ */
+ if (!pRange->pBottom.get() && !pRange->pTop.get())
+ return ExpressionConstant::create(Value::getTrue());
+
+ /*
+ In all other cases, we have to test candidate values. The
+ intersect() method has already optimized those tests, so there
+ aren't any more optimizations to look for here.
+ */
+ return intrusive_ptr<Expression>(this);
+ }
+
+ intrusive_ptr<const Value> ExpressionFieldRange::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ /* if there's no range, there can't be a match */
+ if (!pRange.get())
+ return Value::getFalse();
+
+ /* get the value of the specified field */
+ intrusive_ptr<const Value> pValue(pFieldPath->evaluate(pDocument));
+
+ /* see if it fits within any of the ranges */
+ if (pRange->contains(pValue))
+ return Value::getTrue();
+
+ return Value::getFalse();
+ }
+
+ void ExpressionFieldRange::addToBson(
+ Builder *pBuilder, unsigned depth) const {
+ if (!pRange.get()) {
+ /* nothing will satisfy this predicate */
+ pBuilder->append(false);
+ return;
+ }
+
+ if (!pRange->pTop.get() && !pRange->pBottom.get()) {
+ /* any value will satisfy this predicate */
+ pBuilder->append(true);
+ return;
+ }
+
+ if (pRange->pTop.get() == pRange->pBottom.get()) {
+ BSONArrayBuilder operands;
+ pFieldPath->addToBsonArray(&operands, depth);
+ pRange->pTop->addToBsonArray(&operands);
+
+ BSONObjBuilder equals;
+ equals.append("$eq", operands.arr());
+ pBuilder->append(&equals);
+ return;
+ }
+
+ BSONObjBuilder leftOperator;
+ if (pRange->pBottom.get()) {
+ BSONArrayBuilder leftOperands;
+ pFieldPath->addToBsonArray(&leftOperands, depth);
+ pRange->pBottom->addToBsonArray(&leftOperands);
+ leftOperator.append(
+ (pRange->bottomOpen ? "$gt" : "$gte"),
+ leftOperands.arr());
+
+ if (!pRange->pTop.get()) {
+ pBuilder->append(&leftOperator);
+ return;
+ }
+ }
+
+ BSONObjBuilder rightOperator;
+ if (pRange->pTop.get()) {
+ BSONArrayBuilder rightOperands;
+ pFieldPath->addToBsonArray(&rightOperands, depth);
+ pRange->pTop->addToBsonArray(&rightOperands);
+ rightOperator.append(
+ (pRange->topOpen ? "$lt" : "$lte"),
+ rightOperands.arr());
+
+ if (!pRange->pBottom.get()) {
+ pBuilder->append(&rightOperator);
+ return;
+ }
+ }
+
+ BSONArrayBuilder andOperands;
+ andOperands.append(leftOperator.done());
+ andOperands.append(rightOperator.done());
+ BSONObjBuilder andOperator;
+ andOperator.append("$and", andOperands.arr());
+ pBuilder->append(&andOperator);
+ }
+
+ void ExpressionFieldRange::addToBsonObj(
+ BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const {
+ BuilderObj builder(pBuilder, fieldName);
+ addToBson(&builder, depth);
+ }
+
+ void ExpressionFieldRange::addToBsonArray(
+ BSONArrayBuilder *pBuilder, unsigned depth) const {
+ BuilderArray builder(pBuilder);
+ addToBson(&builder, depth);
+ }
+
+ void ExpressionFieldRange::toMatcherBson(
+ BSONObjBuilder *pBuilder, unsigned depth) const {
+ assert(pRange.get()); // otherwise, we can't do anything
+
+ /* if there are no endpoints, then every value is accepted */
+ if (!pRange->pBottom.get() && !pRange->pTop.get())
+ return; // nothing to add to the predicate
+
+ /* we're going to need the field path */
+ string fieldPath(pFieldPath->getFieldPath(false));
+
+ BSONObjBuilder range;
+ if (pRange->pBottom.get()) {
+ /* the test for equality doesn't generate a subobject */
+ if (pRange->pBottom.get() == pRange->pTop.get()) {
+ pRange->pBottom->addToBsonObj(pBuilder, fieldPath);
+ return;
+ }
+
+ pRange->pBottom->addToBsonObj(
+ pBuilder, (pRange->bottomOpen ? "$gt" : "$gte"));
+ }
+
+ if (pRange->pTop.get()) {
+ pRange->pTop->addToBsonObj(
+ pBuilder, (pRange->topOpen ? "$lt" : "$lte"));
+ }
+
+ pBuilder->append(fieldPath, range.done());
+ }
+
+ intrusive_ptr<ExpressionFieldRange> ExpressionFieldRange::create(
+ const intrusive_ptr<ExpressionFieldPath> &pFieldPath, CmpOp cmpOp,
+ const intrusive_ptr<const Value> &pValue) {
+ intrusive_ptr<ExpressionFieldRange> pE(
+ new ExpressionFieldRange(pFieldPath, cmpOp, pValue));
+ return pE;
+ }
+
+ ExpressionFieldRange::ExpressionFieldRange(
+ const intrusive_ptr<ExpressionFieldPath> &pTheFieldPath, CmpOp cmpOp,
+ const intrusive_ptr<const Value> &pValue):
+ pFieldPath(pTheFieldPath),
+ pRange(new Range(cmpOp, pValue)) {
+ }
+
+ void ExpressionFieldRange::intersect(
+ CmpOp cmpOp, const intrusive_ptr<const Value> &pValue) {
+
+ /* create the new range */
+ scoped_ptr<Range> pNew(new Range(cmpOp, pValue));
+
+ /*
+ Go through the range list. For every range, either add the
+ intersection of that to the range list, or if there is none, the
+ original range. This has the effect of restricting overlapping
+ ranges, but leaving non-overlapping ones as-is.
+ */
+ pRange.reset(pRange->intersect(pNew.get()));
+ }
+
+ ExpressionFieldRange::Range::Range(
+ CmpOp cmpOp, const intrusive_ptr<const Value> &pValue):
+ bottomOpen(false),
+ topOpen(false),
+ pBottom(),
+ pTop() {
+ switch(cmpOp) {
+ case NE:
+ bottomOpen = topOpen = true;
+ /* FALLTHROUGH */
+ case EQ:
+ pBottom = pTop = pValue;
+ break;
+
+ case GT:
+ bottomOpen = true;
+ /* FALLTHROUGH */
+ case GTE:
+ topOpen = true;
+ pBottom = pValue;
+ break;
+
+ case LT:
+ topOpen = true;
+ /* FALLTHROUGH */
+ case LTE:
+ bottomOpen = true;
+ pTop = pValue;
+ break;
+
+ case CMP:
+ assert(false); // not allowed
+ break;
+ }
+ }
+
+ ExpressionFieldRange::Range::Range(const Range &rRange):
+ bottomOpen(rRange.bottomOpen),
+ topOpen(rRange.topOpen),
+ pBottom(rRange.pBottom),
+ pTop(rRange.pTop) {
+ }
+
+ ExpressionFieldRange::Range::Range(
+ const intrusive_ptr<const Value> &pTheBottom, bool theBottomOpen,
+ const intrusive_ptr<const Value> &pTheTop, bool theTopOpen):
+ bottomOpen(theBottomOpen),
+ topOpen(theTopOpen),
+ pBottom(pTheBottom),
+ pTop(pTheTop) {
+ }
+
+ ExpressionFieldRange::Range *ExpressionFieldRange::Range::intersect(
+ const Range *pRange) const {
+ /*
+ Find the max of the bottom end of the ranges.
+
+ Start by assuming the maximum is from pRange. Then, if we have
+ values of our own, see if they're greater.
+ */
+ intrusive_ptr<const Value> pMaxBottom(pRange->pBottom);
+ bool maxBottomOpen = pRange->bottomOpen;
+ if (pBottom.get()) {
+ if (!pRange->pBottom.get()) {
+ pMaxBottom = pBottom;
+ maxBottomOpen = bottomOpen;
+ }
+ else {
+ const int cmp = Value::compare(pBottom, pRange->pBottom);
+ if (cmp == 0)
+ maxBottomOpen = bottomOpen || pRange->bottomOpen;
+ else if (cmp > 0) {
+ pMaxBottom = pBottom;
+ maxBottomOpen = bottomOpen;
+ }
+ }
+ }
+
+ /*
+ Find the minimum of the tops of the ranges.
+
+ Start by assuming the minimum is from pRange. Then, if we have
+ values of our own, see if they are less.
+ */
+ intrusive_ptr<const Value> pMinTop(pRange->pTop);
+ bool minTopOpen = pRange->topOpen;
+ if (pTop.get()) {
+ if (!pRange->pTop.get()) {
+ pMinTop = pTop;
+ minTopOpen = topOpen;
+ }
+ else {
+ const int cmp = Value::compare(pTop, pRange->pTop);
+ if (cmp == 0)
+ minTopOpen = topOpen || pRange->topOpen;
+ else if (cmp < 0) {
+ pMinTop = pTop;
+ minTopOpen = topOpen;
+ }
+ }
+ }
+
+ /*
+ If the intersections didn't create a disjoint set, create the
+ new range.
+ */
+ if (Value::compare(pMaxBottom, pMinTop) <= 0)
+ return new Range(pMaxBottom, maxBottomOpen, pMinTop, minTopOpen);
+
+ /* if we got here, the intersection is empty */
+ return NULL;
+ }
+
+ bool ExpressionFieldRange::Range::contains(
+ const intrusive_ptr<const Value> &pValue) const {
+ if (pBottom.get()) {
+ const int cmp = Value::compare(pValue, pBottom);
+ if (cmp < 0)
+ return false;
+ if (bottomOpen && (cmp == 0))
+ return false;
+ }
+
+ if (pTop.get()) {
+ const int cmp = Value::compare(pValue, pTop);
+ if (cmp > 0)
+ return false;
+ if (topOpen && (cmp == 0))
+ return false;
+ }
+
+ return true;
+ }
+
+ /* ------------------------- ExpressionMinute ----------------------------- */
+
+ ExpressionMinute::~ExpressionMinute() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionMinute::create() {
+ intrusive_ptr<ExpressionMinute> pExpression(new ExpressionMinute());
+ return pExpression;
+ }
+
+ ExpressionMinute::ExpressionMinute():
+ ExpressionNary() {
+ }
+
+ void ExpressionMinute::addOperand(const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(1);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionMinute::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(1);
+ intrusive_ptr<const Value> pDate(vpOperand[0]->evaluate(pDocument));
+ tm date;
+ (pDate->coerceToDate()).toTm(&date);
+ return Value::createInt(date.tm_min);
+ }
+
+ const char *ExpressionMinute::getOpName() const {
+ return "$minute";
+ }
+
+ /* ----------------------- ExpressionMod ---------------------------- */
+
+ ExpressionMod::~ExpressionMod() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionMod::create() {
+ intrusive_ptr<ExpressionMod> pExpression(new ExpressionMod());
+ return pExpression;
+ }
+
+ ExpressionMod::ExpressionMod():
+ ExpressionNary() {
+ }
+
+ void ExpressionMod::addOperand(
+ const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(2);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionMod::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ BSONType productType;
+ checkArgCount(2);
+ intrusive_ptr<const Value> pLeft(vpOperand[0]->evaluate(pDocument));
+ intrusive_ptr<const Value> pRight(vpOperand[1]->evaluate(pDocument));
+
+ productType = Value::getWidestNumeric(pRight->getType(), pLeft->getType());
+
+ long long right = pRight->coerceToLong();
+ if (right == 0)
+ return Value::getUndefined();
+
+ long long left = pLeft->coerceToLong();
+ if (productType == NumberLong)
+ return Value::createLong(left % right);
+ return Value::createInt((int)left % right);
+ }
+
+ const char *ExpressionMod::getOpName() const {
+ return "$mod";
+ }
+
+ /* ------------------------- ExpressionMonth ----------------------------- */
+
+ ExpressionMonth::~ExpressionMonth() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionMonth::create() {
+ intrusive_ptr<ExpressionMonth> pExpression(new ExpressionMonth());
+ return pExpression;
+ }
+
+ ExpressionMonth::ExpressionMonth():
+ ExpressionNary() {
+ }
+
+ void ExpressionMonth::addOperand(const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(1);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionMonth::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(1);
+ intrusive_ptr<const Value> pDate(vpOperand[0]->evaluate(pDocument));
+ tm date;
+ (pDate->coerceToDate()).toTm(&date);
+ return Value::createInt(date.tm_mon+1); // MySQL uses 1-12 tm uses 0-11
+ }
+
+ const char *ExpressionMonth::getOpName() const {
+ return "$month";
+ }
+
+ /* ------------------------- ExpressionMultiply ----------------------------- */
+
+ ExpressionMultiply::~ExpressionMultiply() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionMultiply::create() {
+ intrusive_ptr<ExpressionMultiply> pExpression(new ExpressionMultiply());
+ return pExpression;
+ }
+
+ ExpressionMultiply::ExpressionMultiply():
+ ExpressionNary() {
+ }
+
+ intrusive_ptr<const Value> ExpressionMultiply::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ /*
+ We'll try to return the narrowest possible result value. To do that
+ without creating intermediate Values, do the arithmetic for double
+ and integral types in parallel, tracking the current narrowest
+ type.
+ */
+ double doubleProduct = 1;
+ long long longProduct = 1;
+ BSONType productType = NumberInt;
+
+ const size_t n = vpOperand.size();
+ for(size_t i = 0; i < n; ++i) {
+ intrusive_ptr<const Value> pValue(vpOperand[i]->evaluate(pDocument));
+
+ productType = Value::getWidestNumeric(productType, pValue->getType());
+ doubleProduct *= pValue->coerceToDouble();
+ longProduct *= pValue->coerceToLong();
+ }
+
+ if (productType == NumberDouble)
+ return Value::createDouble(doubleProduct);
+ if (productType == NumberLong)
+ return Value::createLong(longProduct);
+ return Value::createInt((int)longProduct);
+ }
+
+ const char *ExpressionMultiply::getOpName() const {
+ return "$multiply";
+ }
+
+ intrusive_ptr<ExpressionNary> (*ExpressionMultiply::getFactory() const)() {
+ return ExpressionMultiply::create;
+ }
+
+ /* ------------------------- ExpressionHour ----------------------------- */
+
+ ExpressionHour::~ExpressionHour() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionHour::create() {
+ intrusive_ptr<ExpressionHour> pExpression(new ExpressionHour());
+ return pExpression;
+ }
+
+ ExpressionHour::ExpressionHour():
+ ExpressionNary() {
+ }
+
+ void ExpressionHour::addOperand(const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(1);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionHour::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(1);
+ intrusive_ptr<const Value> pDate(vpOperand[0]->evaluate(pDocument));
+ tm date;
+ (pDate->coerceToDate()).toTm(&date);
+ return Value::createInt(date.tm_hour);
+ }
+
+ const char *ExpressionHour::getOpName() const {
+ return "$hour";
+ }
+
+ /* ----------------------- ExpressionIfNull ---------------------------- */
+
+ ExpressionIfNull::~ExpressionIfNull() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionIfNull::create() {
+ intrusive_ptr<ExpressionIfNull> pExpression(new ExpressionIfNull());
+ return pExpression;
+ }
+
+ ExpressionIfNull::ExpressionIfNull():
+ ExpressionNary() {
+ }
+
+ void ExpressionIfNull::addOperand(
+ const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(2);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionIfNull::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(2);
+ intrusive_ptr<const Value> pLeft(vpOperand[0]->evaluate(pDocument));
+ BSONType leftType = pLeft->getType();
+
+ if ((leftType != Undefined) && (leftType != jstNULL))
+ return pLeft;
+
+ intrusive_ptr<const Value> pRight(vpOperand[1]->evaluate(pDocument));
+ return pRight;
+ }
+
+ const char *ExpressionIfNull::getOpName() const {
+ return "$ifNull";
+ }
+
+ /* ------------------------ ExpressionNary ----------------------------- */
+
+ ExpressionNary::ExpressionNary():
+ vpOperand() {
+ }
+
+ intrusive_ptr<Expression> ExpressionNary::optimize() {
+ unsigned constCount = 0; // count of constant operands
+ unsigned stringCount = 0; // count of constant string operands
+ const size_t n = vpOperand.size();
+ for(size_t i = 0; i < n; ++i) {
+ intrusive_ptr<Expression> pNew(vpOperand[i]->optimize());
+
+ /* subsitute the optimized expression */
+ vpOperand[i] = pNew;
+
+ /* check to see if the result was a constant */
+ const ExpressionConstant *pConst =
+ dynamic_cast<ExpressionConstant *>(pNew.get());
+ if (pConst) {
+ ++constCount;
+ if (pConst->getValue()->getType() == String)
+ ++stringCount;
+ }
+ }
+
+ /*
+ If all the operands are constant, we can replace this expression
+ with a constant. We can find the value by evaluating this
+ expression over a NULL Document because evaluating the
+ ExpressionConstant never refers to the argument Document.
+ */
+ if (constCount == n) {
+ intrusive_ptr<const Value> pResult(
+ evaluate(intrusive_ptr<Document>()));
+ intrusive_ptr<Expression> pReplacement(
+ ExpressionConstant::create(pResult));
+ return pReplacement;
+ }
+
+ /*
+ If there are any strings, we can't re-arrange anything, so stop
+ now.
+
+ LATER: we could concatenate adjacent strings as a special case.
+ */
+ if (stringCount)
+ return intrusive_ptr<Expression>(this);
+
+ /*
+ If there's no more than one constant, then we can't do any
+ constant folding, so don't bother going any further.
+ */
+ if (constCount <= 1)
+ return intrusive_ptr<Expression>(this);
+
+ /*
+ If the operator isn't commutative or associative, there's nothing
+ more we can do. We test that by seeing if we can get a factory;
+ if we can, we can use it to construct a temporary expression which
+ we'll evaluate to collapse as many constants as we can down to
+ a single one.
+ */
+ intrusive_ptr<ExpressionNary> (*const pFactory)() = getFactory();
+ if (!pFactory)
+ return intrusive_ptr<Expression>(this);
+
+ /*
+ Create a new Expression that will be the replacement for this one.
+ We actually create two: one to hold constant expressions, and
+ one to hold non-constants. Once we've got these, we evaluate
+ the constant expression to produce a single value, as above.
+ We then add this operand to the end of the non-constant expression,
+ and return that.
+ */
+ intrusive_ptr<ExpressionNary> pNew((*pFactory)());
+ intrusive_ptr<ExpressionNary> pConst((*pFactory)());
+ for(size_t i = 0; i < n; ++i) {
+ intrusive_ptr<Expression> pE(vpOperand[i]);
+ if (dynamic_cast<ExpressionConstant *>(pE.get()))
+ pConst->addOperand(pE);
+ else {
+ /*
+ If the child operand is the same type as this, then we can
+ extract its operands and inline them here because we already
+ know this is commutative and associative because it has a
+ factory. We can detect sameness of the child operator by
+ checking for equality of the factory
+
+ Note we don't have to do this recursively, because we
+ called optimize() on all the children first thing in
+ this call to optimize().
+ */
+ ExpressionNary *pNary =
+ dynamic_cast<ExpressionNary *>(pE.get());
+ if (!pNary)
+ pNew->addOperand(pE);
+ else {
+ intrusive_ptr<ExpressionNary> (*const pChildFactory)() =
+ pNary->getFactory();
+ if (pChildFactory != pFactory)
+ pNew->addOperand(pE);
+ else {
+ /* same factory, so flatten */
+ size_t nChild = pNary->vpOperand.size();
+ for(size_t iChild = 0; iChild < nChild; ++iChild) {
+ intrusive_ptr<Expression> pCE(
+ pNary->vpOperand[iChild]);
+ if (dynamic_cast<ExpressionConstant *>(pCE.get()))
+ pConst->addOperand(pCE);
+ else
+ pNew->addOperand(pCE);
+ }
+ }
+ }
+ }
+ }
+
+ /*
+ If there was only one constant, add it to the end of the expression
+ operand vector.
+ */
+ if (pConst->vpOperand.size() == 1)
+ pNew->addOperand(pConst->vpOperand[0]);
+ else if (pConst->vpOperand.size() > 1) {
+ /*
+ If there was more than one constant, collapse all the constants
+ together before adding the result to the end of the expression
+ operand vector.
+ */
+ intrusive_ptr<const Value> pResult(
+ pConst->evaluate(intrusive_ptr<Document>()));
+ pNew->addOperand(ExpressionConstant::create(pResult));
+ }
+
+ return pNew;
+ }
+
+ void ExpressionNary::addOperand(
+ const intrusive_ptr<Expression> &pExpression) {
+ vpOperand.push_back(pExpression);
+ }
+
+ intrusive_ptr<ExpressionNary> (*ExpressionNary::getFactory() const)() {
+ return NULL;
+ }
+
+ void ExpressionNary::toBson(
+ BSONObjBuilder *pBuilder, const char *pOpName, unsigned depth) const {
+ const size_t nOperand = vpOperand.size();
+ assert(nOperand > 0);
+ if (nOperand == 1) {
+ vpOperand[0]->addToBsonObj(pBuilder, pOpName, depth + 1);
+ return;
+ }
+
+ /* build up the array */
+ BSONArrayBuilder arrBuilder;
+ for(size_t i = 0; i < nOperand; ++i)
+ vpOperand[i]->addToBsonArray(&arrBuilder, depth + 1);
+
+ pBuilder->append(pOpName, arrBuilder.arr());
+ }
+
+ void ExpressionNary::addToBsonObj(
+ BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const {
+ BSONObjBuilder exprBuilder;
+ toBson(&exprBuilder, getOpName(), depth);
+ pBuilder->append(fieldName, exprBuilder.done());
+ }
+
+ void ExpressionNary::addToBsonArray(
+ BSONArrayBuilder *pBuilder, unsigned depth) const {
+ BSONObjBuilder exprBuilder;
+ toBson(&exprBuilder, getOpName(), depth);
+ pBuilder->append(exprBuilder.done());
+ }
+
+ void ExpressionNary::checkArgLimit(unsigned maxArgs) const {
+ uassert(15993, str::stream() << getOpName() <<
+ " only takes " << maxArgs <<
+ " operand" << (maxArgs == 1 ? "" : "s"),
+ vpOperand.size() < maxArgs);
+ }
+
+ void ExpressionNary::checkArgCount(unsigned reqArgs) const {
+ uassert(15997, str::stream() << getOpName() <<
+ ": insufficient operands; " << reqArgs <<
+ " required, only got " << vpOperand.size(),
+ vpOperand.size() == reqArgs);
+ }
+
+ /* ----------------------- ExpressionNoOp ------------------------------ */
+
+ ExpressionNoOp::~ExpressionNoOp() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionNoOp::create() {
+ intrusive_ptr<ExpressionNoOp> pExpression(new ExpressionNoOp());
+ return pExpression;
+ }
+
+ intrusive_ptr<Expression> ExpressionNoOp::optimize() {
+ checkArgCount(1);
+ intrusive_ptr<Expression> pR(vpOperand[0]->optimize());
+ return pR;
+ }
+
+ ExpressionNoOp::ExpressionNoOp():
+ ExpressionNary() {
+ }
+
+ void ExpressionNoOp::addOperand(const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(1);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionNoOp::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(1);
+ intrusive_ptr<const Value> pValue(vpOperand[0]->evaluate(pDocument));
+ return pValue;
+ }
+
+ const char *ExpressionNoOp::getOpName() const {
+ return "$noOp";
+ }
+
+ /* ------------------------- ExpressionNot ----------------------------- */
+
+ ExpressionNot::~ExpressionNot() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionNot::create() {
+ intrusive_ptr<ExpressionNot> pExpression(new ExpressionNot());
+ return pExpression;
+ }
+
+ ExpressionNot::ExpressionNot():
+ ExpressionNary() {
+ }
+
+ void ExpressionNot::addOperand(const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(1);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionNot::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(1);
+ intrusive_ptr<const Value> pOp(vpOperand[0]->evaluate(pDocument));
+
+ bool b = pOp->coerceToBool();
+ if (b)
+ return Value::getFalse();
+ return Value::getTrue();
+ }
+
+ const char *ExpressionNot::getOpName() const {
+ return "$not";
+ }
+
+ /* -------------------------- ExpressionOr ----------------------------- */
+
+ ExpressionOr::~ExpressionOr() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionOr::create() {
+ intrusive_ptr<ExpressionNary> pExpression(new ExpressionOr());
+ return pExpression;
+ }
+
+ ExpressionOr::ExpressionOr():
+ ExpressionNary() {
+ }
+
+ intrusive_ptr<const Value> ExpressionOr::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ const size_t n = vpOperand.size();
+ for(size_t i = 0; i < n; ++i) {
+ intrusive_ptr<const Value> pValue(vpOperand[i]->evaluate(pDocument));
+ if (pValue->coerceToBool())
+ return Value::getTrue();
+ }
+
+ return Value::getFalse();
+ }
+
+ void ExpressionOr::toMatcherBson(
+ BSONObjBuilder *pBuilder, unsigned depth) const {
+ BSONObjBuilder opArray;
+ const size_t n = vpOperand.size();
+ for(size_t i = 0; i < n; ++i)
+ vpOperand[i]->toMatcherBson(&opArray, depth + 1);
+
+ pBuilder->append("$or", opArray.done());
+ }
+
+ intrusive_ptr<ExpressionNary> (*ExpressionOr::getFactory() const)() {
+ return ExpressionOr::create;
+ }
+
+ intrusive_ptr<Expression> ExpressionOr::optimize() {
+ /* optimize the disjunction as much as possible */
+ intrusive_ptr<Expression> pE(ExpressionNary::optimize());
+
+ /* if the result isn't a conjunction, we can't do anything */
+ ExpressionOr *pOr = dynamic_cast<ExpressionOr *>(pE.get());
+ if (!pOr)
+ return pE;
+
+ /*
+ Check the last argument on the result; if it's not constant (as
+ promised by ExpressionNary::optimize(),) then there's nothing
+ we can do.
+ */
+ const size_t n = pOr->vpOperand.size();
+ intrusive_ptr<Expression> pLast(pOr->vpOperand[n - 1]);
+ const ExpressionConstant *pConst =
+ dynamic_cast<ExpressionConstant *>(pLast.get());
+ if (!pConst)
+ return pE;
+
+ /*
+ Evaluate and coerce the last argument to a boolean. If it's true,
+ then we can replace this entire expression.
+ */
+ bool last = pLast->evaluate(intrusive_ptr<Document>())->coerceToBool();
+ if (last) {
+ intrusive_ptr<ExpressionConstant> pFinal(
+ ExpressionConstant::create(Value::getTrue()));
+ return pFinal;
+ }
+
+ /*
+ If we got here, the final operand was false, so we don't need it
+ anymore. If there was only one other operand, we don't need the
+ conjunction either. Note we still need to keep the promise that
+ the result will be a boolean.
+ */
+ if (n == 2) {
+ intrusive_ptr<Expression> pFinal(
+ ExpressionCoerceToBool::create(pOr->vpOperand[0]));
+ return pFinal;
+ }
+
+ /*
+ Remove the final "false" value, and return the new expression.
+ */
+ pOr->vpOperand.resize(n - 1);
+ return pE;
+ }
+
+ const char *ExpressionOr::getOpName() const {
+ return "$or";
+ }
+
+ /* ------------------------- ExpressionSecond ----------------------------- */
+
+ ExpressionSecond::~ExpressionSecond() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionSecond::create() {
+ intrusive_ptr<ExpressionSecond> pExpression(new ExpressionSecond());
+ return pExpression;
+ }
+
+ ExpressionSecond::ExpressionSecond():
+ ExpressionNary() {
+ }
+
+ void ExpressionSecond::addOperand(const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(1);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionSecond::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(1);
+ intrusive_ptr<const Value> pDate(vpOperand[0]->evaluate(pDocument));
+ tm date;
+ (pDate->coerceToDate()).toTm(&date);
+ return Value::createInt(date.tm_sec);
+ }
+
+ const char *ExpressionSecond::getOpName() const {
+ return "$second";
+ }
+
+ /* ----------------------- ExpressionStrcasecmp ---------------------------- */
+
+ ExpressionStrcasecmp::~ExpressionStrcasecmp() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionStrcasecmp::create() {
+ intrusive_ptr<ExpressionStrcasecmp> pExpression(new ExpressionStrcasecmp());
+ return pExpression;
+ }
+
+ ExpressionStrcasecmp::ExpressionStrcasecmp():
+ ExpressionNary() {
+ }
+
+ void ExpressionStrcasecmp::addOperand(
+ const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(2);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionStrcasecmp::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(2);
+ intrusive_ptr<const Value> pString1(vpOperand[0]->evaluate(pDocument));
+ intrusive_ptr<const Value> pString2(vpOperand[1]->evaluate(pDocument));
+
+ /* boost::iequals returns a bool not an int so strings must actually be allocated */
+ string str1 = boost::to_upper_copy( pString1->coerceToString() );
+ string str2 = boost::to_upper_copy( pString2->coerceToString() );
+ int result = str1.compare(str2);
+
+ if (result == 0)
+ return Value::getZero();
+ if (result > 0)
+ return Value::getOne();
+ return Value::getMinusOne();
+ }
+
+ const char *ExpressionStrcasecmp::getOpName() const {
+ return "$strcasecmp";
+ }
+
+ /* ----------------------- ExpressionSubstr ---------------------------- */
+
+ ExpressionSubstr::~ExpressionSubstr() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionSubstr::create() {
+ intrusive_ptr<ExpressionSubstr> pExpression(new ExpressionSubstr());
+ return pExpression;
+ }
+
+ ExpressionSubstr::ExpressionSubstr():
+ ExpressionNary() {
+ }
+
+ void ExpressionSubstr::addOperand(
+ const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(3);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionSubstr::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(3);
+ intrusive_ptr<const Value> pString(vpOperand[0]->evaluate(pDocument));
+ intrusive_ptr<const Value> pLower(vpOperand[1]->evaluate(pDocument));
+ intrusive_ptr<const Value> pLength(vpOperand[2]->evaluate(pDocument));
+
+ string str = pString->coerceToString();
+ uassert(16034, str::stream() << getOpName() <<
+ ": starting index must be a numeric type (is BSON type " <<
+ pLower->getType() << ")",
+ (pLower->getType() == NumberInt
+ || pLower->getType() == NumberLong
+ || pLower->getType() == NumberDouble));
+ uassert(16035, str::stream() << getOpName() <<
+ ": length must be a numeric type (is BSON type " <<
+ pLength->getType() << ")",
+ (pLength->getType() == NumberInt
+ || pLength->getType() == NumberLong
+ || pLength->getType() == NumberDouble));
+ string::size_type lower = static_cast< string::size_type >( pLower->coerceToLong() );
+ string::size_type length = static_cast< string::size_type >( pLength->coerceToLong() );
+ return Value::createString( str.substr(lower, length) );
+ }
+
+ const char *ExpressionSubstr::getOpName() const {
+ return "$substr";
+ }
+
+ /* ----------------------- ExpressionSubtract ---------------------------- */
+
+ ExpressionSubtract::~ExpressionSubtract() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionSubtract::create() {
+ intrusive_ptr<ExpressionSubtract> pExpression(new ExpressionSubtract());
+ return pExpression;
+ }
+
+ ExpressionSubtract::ExpressionSubtract():
+ ExpressionNary() {
+ }
+
+ void ExpressionSubtract::addOperand(
+ const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(2);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionSubtract::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ BSONType productType;
+ checkArgCount(2);
+ intrusive_ptr<const Value> pLeft(vpOperand[0]->evaluate(pDocument));
+ intrusive_ptr<const Value> pRight(vpOperand[1]->evaluate(pDocument));
+ if (pLeft->getType() == Date) {
+ long long right;
+ long long left = pLeft->coerceToDate();
+ if (pRight->getType() == Date)
+ right = pRight->coerceToDate();
+ else
+ right = static_cast<long long>(pRight->coerceToDouble()*24*60*60*1000);
+ return Value::createDate(Date_t(left-right));
+ }
+
+ uassert(15996, "cannot subtract one date from another",
+ pRight->getType() != Date);
+
+ productType = Value::getWidestNumeric(
+ pRight->getType(), pLeft->getType());
+
+
+ if (productType == NumberDouble) {
+ double right = pRight->coerceToDouble();
+ double left = pLeft->coerceToDouble();
+ return Value::createDouble(left - right);
+ }
+
+ long long right = pRight->coerceToLong();
+ long long left = pLeft->coerceToLong();
+ if (productType == NumberLong)
+ return Value::createLong(left - right);
+ return Value::createInt((int)(left - right));
+ }
+
+ const char *ExpressionSubtract::getOpName() const {
+ return "$subtract";
+ }
+
+ /* ------------------------- ExpressionToLower ----------------------------- */
+
+ ExpressionToLower::~ExpressionToLower() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionToLower::create() {
+ intrusive_ptr<ExpressionToLower> pExpression(new ExpressionToLower());
+ return pExpression;
+ }
+
+ ExpressionToLower::ExpressionToLower():
+ ExpressionNary() {
+ }
+
+ void ExpressionToLower::addOperand(const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(1);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionToLower::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(1);
+ intrusive_ptr<const Value> pString(vpOperand[0]->evaluate(pDocument));
+ string str = pString->coerceToString();
+ boost::to_lower(str);
+ return Value::createString(str);
+ }
+
+ const char *ExpressionToLower::getOpName() const {
+ return "$toLower";
+ }
+
+ /* ------------------------- ExpressionToUpper -------------------------- */
+
+ ExpressionToUpper::~ExpressionToUpper() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionToUpper::create() {
+ intrusive_ptr<ExpressionToUpper> pExpression(new ExpressionToUpper());
+ return pExpression;
+ }
+
+ ExpressionToUpper::ExpressionToUpper():
+ ExpressionNary() {
+ }
+
+ void ExpressionToUpper::addOperand(
+ const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(1);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionToUpper::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(1);
+ intrusive_ptr<const Value> pString(vpOperand[0]->evaluate(pDocument));
+ string str(pString->coerceToString());
+ boost::to_upper(str);
+ return Value::createString(str);
+ }
+
+ const char *ExpressionToUpper::getOpName() const {
+ return "$toUpper";
+ }
+
+ /* ------------------------- ExpressionWeek ----------------------------- */
+
+ ExpressionWeek::~ExpressionWeek() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionWeek::create() {
+ intrusive_ptr<ExpressionWeek> pExpression(new ExpressionWeek());
+ return pExpression;
+ }
+
+ ExpressionWeek::ExpressionWeek():
+ ExpressionNary() {
+ }
+
+ void ExpressionWeek::addOperand(const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(1);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionWeek::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(1);
+ intrusive_ptr<const Value> pDate(vpOperand[0]->evaluate(pDocument));
+ tm date;
+ (pDate->coerceToDate()).toTm(&date);
+ int dayOfWeek = date.tm_wday+1;
+ int dayOfYear = date.tm_yday;
+ int week = 0;
+ int janFirst = 0;
+ int offset = 0;
+
+ janFirst = dayOfWeek - dayOfYear % 7;
+ offset = (janFirst + 6) % 7;
+ week = (dayOfYear + offset) / 7;
+ return Value::createInt(week);
+ }
+
+ const char *ExpressionWeek::getOpName() const {
+ return "$week";
+ }
+
+ /* ------------------------- ExpressionYear ----------------------------- */
+
+ ExpressionYear::~ExpressionYear() {
+ }
+
+ intrusive_ptr<ExpressionNary> ExpressionYear::create() {
+ intrusive_ptr<ExpressionYear> pExpression(new ExpressionYear());
+ return pExpression;
+ }
+
+ ExpressionYear::ExpressionYear():
+ ExpressionNary() {
+ }
+
+ void ExpressionYear::addOperand(
+ const intrusive_ptr<Expression> &pExpression) {
+ checkArgLimit(1);
+ ExpressionNary::addOperand(pExpression);
+ }
+
+ intrusive_ptr<const Value> ExpressionYear::evaluate(
+ const intrusive_ptr<Document> &pDocument) const {
+ checkArgCount(1);
+ intrusive_ptr<const Value> pDate(vpOperand[0]->evaluate(pDocument));
+ tm date;
+ (pDate->coerceToDate()).toTm(&date);
+ return Value::createInt(date.tm_year+1900); // tm_year is years since 1900
+ }
+
+ const char *ExpressionYear::getOpName() const {
+ return "$year";
+ }
+}
diff --git a/src/mongo/db/pipeline/expression.h b/src/mongo/db/pipeline/expression.h
new file mode 100755
index 00000000000..c49e385a3c7
--- /dev/null
+++ b/src/mongo/db/pipeline/expression.h
@@ -0,0 +1,1223 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "pch.h"
+
+#include "db/pipeline/field_path.h"
+#include "util/intrusive_counter.h"
+
+
+namespace mongo {
+ class BSONArrayBuilder;
+ class BSONElement;
+ class BSONObjBuilder;
+ class Builder;
+ class Document;
+ class ExpressionContext;
+ class Value;
+
+ class Expression :
+ public IntrusiveCounterUnsigned {
+ public:
+ virtual ~Expression() {};
+
+ /*
+ Optimize the Expression.
+
+ This provides an opportunity to do constant folding, or to
+ collapse nested operators that have the same precedence, such as
+ $add, $and, or $or.
+
+ The Expression should be replaced with the return value, which may
+ or may not be the same object. In the case of constant folding,
+ a computed expression may be replaced by a constant.
+
+ @returns the optimized Expression
+ */
+ virtual intrusive_ptr<Expression> optimize() = 0;
+
+ /*
+ Evaluate the Expression using the given document as input.
+
+ @returns the computed value
+ */
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const = 0;
+
+ /*
+ Add the Expression (and any descendant Expressions) into a BSON
+ object that is under construction.
+
+ Unevaluated Expressions always materialize as objects. Evaluation
+ may produce a scalar or another object, either of which will be
+ substituted inline.
+
+ @param pBuilder the builder to add the expression to
+ @param fieldName the name the object should be given
+ */
+ virtual void addToBsonObj(
+ BSONObjBuilder *pBuilder, string fieldName,
+ unsigned depth) const = 0;
+
+ /*
+ Add the Expression (and any descendant Expressions) into a BSON
+ array that is under construction.
+
+ Unevaluated Expressions always materialize as objects. Evaluation
+ may produce a scalar or another object, either of which will be
+ substituted inline.
+
+ @param pBuilder the builder to add the expression to
+ */
+ virtual void addToBsonArray(BSONArrayBuilder *pBuilder,
+ unsigned depth) const = 0;
+
+ /*
+ Convert the expression into a BSONObj that corresponds to the
+ db.collection.find() predicate language. This is intended for
+ use by DocumentSourceFilter.
+
+ This is more limited than the full expression language supported
+ by all available expressions in a DocumentSource processing
+ pipeline, and will fail with an assertion if an attempt is made
+ to go outside the bounds of the recognized patterns, which don't
+ include full computed expressions. There are other methods available
+ on DocumentSourceFilter which can be used to analyze a filter
+ predicate and break it up into appropriate expressions which can
+ be translated within these constraints. As a result, the default
+ implementation is to fail with an assertion; only a subset of
+ operators will be able to fulfill this request.
+
+ @param pBuilder the builder to add the expression to.
+ */
+ virtual void toMatcherBson(
+ BSONObjBuilder *pBuilder, unsigned depth) const;
+
+ /*
+ Utility class for parseObject() below.
+
+ Only one array can be unwound in a processing pipeline. If the
+ UNWIND_OK option is used, unwindOk() will return true, and a field
+ can be declared as unwound using unwind(), after which unwindUsed()
+ will return true. Only specify UNWIND_OK if it is OK to unwind an
+ array in the current context.
+
+ DOCUMENT_OK indicates that it is OK to use a Document in the current
+ context.
+ */
+ class ObjectCtx {
+ public:
+ ObjectCtx(int options);
+ static const int UNWIND_OK = 0x0001;
+ static const int DOCUMENT_OK = 0x0002;
+
+ bool unwindOk() const;
+ bool unwindUsed() const;
+ void unwind(string fieldName);
+
+ bool documentOk() const;
+
+ private:
+ int options;
+ string unwindField;
+ };
+
+ /*
+ Parse a BSONElement Object. The object could represent a functional
+ expression or a Document expression.
+
+ @param pBsonElement the element representing the object
+ @param pCtx a MiniCtx representing the options above
+ @returns the parsed Expression
+ */
+ static intrusive_ptr<Expression> parseObject(
+ BSONElement *pBsonElement, ObjectCtx *pCtx);
+
+ static const char unwindName[];
+
+ /*
+ Parse a BSONElement Object which has already been determined to be
+ functional expression.
+
+ @param pOpName the name of the (prefix) operator
+ @param pBsonElement the BSONElement to parse
+ @returns the parsed Expression
+ */
+ static intrusive_ptr<Expression> parseExpression(
+ const char *pOpName, BSONElement *pBsonElement);
+
+
+ /*
+ Parse a BSONElement which is an operand in an Expression.
+
+ @param pBsonElement the expected operand's BSONElement
+ @returns the parsed operand, as an Expression
+ */
+ static intrusive_ptr<Expression> parseOperand(
+ BSONElement *pBsonElement);
+
+ /*
+ Produce a field path string with the field prefix removed.
+
+ Throws an error if the field prefix is not present.
+
+ @param prefixedField the prefixed field
+ @returns the field path with the prefix removed
+ */
+ static string removeFieldPrefix(const string &prefixedField);
+
+ /*
+ Enumeration of comparison operators. These are shared between a
+ few expression implementations, so they are factored out here.
+
+ Any changes to these values require adjustment of the lookup
+ table in the implementation.
+ */
+ enum CmpOp {
+ EQ = 0, // return true for a == b, false otherwise
+ NE = 1, // return true for a != b, false otherwise
+ GT = 2, // return true for a > b, false otherwise
+ GTE = 3, // return true for a >= b, false otherwise
+ LT = 4, // return true for a < b, false otherwise
+ LTE = 5, // return true for a <= b, false otherwise
+ CMP = 6, // return -1, 0, 1 for a < b, a == b, a > b
+ };
+
+ static int signum(int i);
+ };
+
+
+ class ExpressionNary :
+ public Expression,
+ public boost::enable_shared_from_this<ExpressionNary> {
+ public:
+ // virtuals from Expression
+ virtual intrusive_ptr<Expression> optimize();
+ virtual void addToBsonObj(
+ BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const;
+ virtual void addToBsonArray(
+ BSONArrayBuilder *pBuilder, unsigned depth) const;
+
+ /*
+ Add an operand to the n-ary expression.
+
+ @param pExpression the expression to add
+ */
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ /*
+ Return a factory function that will make Expression nodes of
+ the same type as this. This will be used to create constant
+ expressions for constant folding for optimize(). Only return
+ a factory function if this operator is both associative and
+ commutative. The default implementation returns NULL; optimize()
+ will recognize that and stop.
+
+ Note that ExpressionNary::optimize() promises that if it uses this
+ to fold constants, then if optimize() returns an ExpressionNary,
+ any remaining constant will be the last one in vpOperand. Derived
+ classes may take advantage of this to do further optimizations in
+ their optimize().
+
+ @returns pointer to a factory function or NULL
+ */
+ virtual intrusive_ptr<ExpressionNary> (*getFactory() const)();
+
+ /*
+ Get the name of the operator.
+
+ @returns the name of the operator; this string belongs to the class
+ implementation, and should not be deleted
+ and should not
+ */
+ virtual const char *getOpName() const = 0;
+
+ protected:
+ ExpressionNary();
+
+ vector<intrusive_ptr<Expression> > vpOperand;
+
+ /*
+ Add the expression to the builder.
+
+ If there is only one operand (a unary operator), then the operand
+ is added directly, without an array. For more than one operand,
+ a named array is created. In both cases, the result is an object.
+
+ @param pBuilder the (blank) builder to add the expression to
+ @param pOpName the name of the operator
+ */
+ virtual void toBson(BSONObjBuilder *pBuilder,
+ const char *pOpName, unsigned depth) const;
+
+ /*
+ Checks the current size of vpOperand; if the size equal to or
+ greater than maxArgs, fires a user assertion indicating that this
+ operator cannot have this many arguments.
+
+ The equal is there because this is intended to be used in
+ addOperand() to check for the limit *before* adding the requested
+ argument.
+
+ @param maxArgs the maximum number of arguments the operator accepts
+ */
+ void checkArgLimit(unsigned maxArgs) const;
+
+ /*
+ Checks the current size of vpOperand; if the size is not equal to
+ reqArgs, fires a user assertion indicating that this must have
+ exactly reqArgs arguments.
+
+ This is meant to be used in evaluate(), *before* the evaluation
+ takes place.
+
+ @param reqArgs the number of arguments this operator requires
+ */
+ void checkArgCount(unsigned reqArgs) const;
+ };
+
+
+ class ExpressionAdd :
+ public ExpressionNary {
+ public:
+ // virtuals from Expression
+ virtual ~ExpressionAdd();
+ virtual intrusive_ptr<Expression> optimize();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+
+ // virtuals from ExpressionNary
+ virtual intrusive_ptr<ExpressionNary> (*getFactory() const)();
+
+ /*
+ Create an expression that finds the sum of n operands.
+
+ @returns addition expression
+ */
+ static intrusive_ptr<ExpressionNary> create();
+
+ protected:
+ // virtuals from ExpressionNary
+ virtual void toBson(BSONObjBuilder *pBuilder,
+ const char *pOpName, unsigned depth) const;
+
+ private:
+ ExpressionAdd();
+
+ /*
+ If the operator can be optimized, we save the original here.
+
+ This is necessary because addition must follow its original operand
+ ordering strictly if a string is detected, otherwise string
+ concatenation may appear to have re-ordered the operands.
+ */
+ intrusive_ptr<ExpressionAdd> pAdd;
+ mutable bool useOriginal;
+ };
+
+
+ class ExpressionAnd :
+ public ExpressionNary {
+ public:
+ // virtuals from Expression
+ virtual ~ExpressionAnd();
+ virtual intrusive_ptr<Expression> optimize();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void toMatcherBson(
+ BSONObjBuilder *pBuilder, unsigned depth) const;
+
+ // virtuals from ExpressionNary
+ virtual intrusive_ptr<ExpressionNary> (*getFactory() const)();
+
+ /*
+ Create an expression that finds the conjunction of n operands.
+ The conjunction uses short-circuit logic; the expressions are
+ evaluated in the order they were added to the conjunction, and
+ the evaluation stops and returns false on the first operand that
+ evaluates to false.
+
+ @returns conjunction expression
+ */
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionAnd();
+ };
+
+
+ class ExpressionCoerceToBool :
+ public Expression,
+ public boost::enable_shared_from_this<ExpressionCoerceToBool> {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionCoerceToBool();
+ virtual intrusive_ptr<Expression> optimize();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual void addToBsonObj(
+ BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const;
+ virtual void addToBsonArray(
+ BSONArrayBuilder *pBuilder, unsigned depth) const;
+
+ static intrusive_ptr<ExpressionCoerceToBool> create(
+ const intrusive_ptr<Expression> &pExpression);
+
+ private:
+ ExpressionCoerceToBool(const intrusive_ptr<Expression> &pExpression);
+
+ intrusive_ptr<Expression> pExpression;
+ };
+
+
+ class ExpressionCompare :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionCompare();
+ virtual intrusive_ptr<Expression> optimize();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ /*
+ Shorthands for creating various comparisons expressions.
+ Provide for conformance with the uniform function pointer signature
+ required for parsing.
+
+ These create a particular comparision operand, without any
+ operands. Those must be added via ExpressionNary::addOperand().
+ */
+ static intrusive_ptr<ExpressionNary> createCmp();
+ static intrusive_ptr<ExpressionNary> createEq();
+ static intrusive_ptr<ExpressionNary> createNe();
+ static intrusive_ptr<ExpressionNary> createGt();
+ static intrusive_ptr<ExpressionNary> createGte();
+ static intrusive_ptr<ExpressionNary> createLt();
+ static intrusive_ptr<ExpressionNary> createLte();
+
+ private:
+ friend class ExpressionFieldRange;
+ ExpressionCompare(CmpOp cmpOp);
+
+ CmpOp cmpOp;
+ };
+
+
+ class ExpressionCond :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionCond();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionCond();
+ };
+
+
+ class ExpressionConstant :
+ public Expression,
+ public boost::enable_shared_from_this<ExpressionConstant> {
+ public:
+ // virtuals from Expression
+ virtual ~ExpressionConstant();
+ virtual intrusive_ptr<Expression> optimize();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addToBsonObj(
+ BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const;
+ virtual void addToBsonArray(
+ BSONArrayBuilder *pBuilder, unsigned depth) const;
+
+ static intrusive_ptr<ExpressionConstant> createFromBsonElement(
+ BSONElement *pBsonElement);
+ static intrusive_ptr<ExpressionConstant> create(
+ const intrusive_ptr<const Value> &pValue);
+
+ /*
+ Get the constant value represented by this Expression.
+
+ @returns the value
+ */
+ intrusive_ptr<const Value> getValue() const;
+
+ private:
+ ExpressionConstant(BSONElement *pBsonElement);
+ ExpressionConstant(const intrusive_ptr<const Value> &pValue);
+
+ intrusive_ptr<const Value> pValue;
+ };
+
+
+ class ExpressionDayOfMonth :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionDayOfMonth();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionDayOfMonth();
+ };
+
+
+ class ExpressionDayOfWeek :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionDayOfWeek();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionDayOfWeek();
+ };
+
+
+ class ExpressionDayOfYear :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionDayOfYear();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionDayOfYear();
+ };
+
+
+ class ExpressionDivide :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionDivide();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionDivide();
+ };
+
+
+ class ExpressionFieldPath :
+ public Expression,
+ public boost::enable_shared_from_this<ExpressionFieldPath> {
+ public:
+ // virtuals from Expression
+ virtual ~ExpressionFieldPath();
+ virtual intrusive_ptr<Expression> optimize();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual void addToBsonObj(
+ BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const;
+ virtual void addToBsonArray(
+ BSONArrayBuilder *pBuilder, unsigned depth) const;
+
+ /*
+ Create a field path expression.
+
+ Evaluation will extract the value associated with the given field
+ path from the source document.
+
+ @param fieldPath the field path string, without any leading document
+ indicator
+ @returns the newly created field path expression
+ */
+ static intrusive_ptr<ExpressionFieldPath> create(
+ const string &fieldPath);
+
+ /*
+ Return a string representation of the field path.
+
+ @param fieldPrefix whether or not to include the document field
+ indicator prefix
+ @returns the dot-delimited field path
+ */
+ string getFieldPath(bool fieldPrefix) const;
+
+ /*
+ Write a string representation of the field path to a stream.
+
+ @param the stream to write to
+ @param fieldPrefix whether or not to include the document field
+ indicator prefix
+ */
+ void writeFieldPath(ostream &outStream, bool fieldPrefix) const;
+
+ private:
+ ExpressionFieldPath(const string &fieldPath);
+
+ /*
+ Internal implementation of evaluate(), used recursively.
+
+ The internal implementation doesn't just use a loop because of
+ the possibility that we need to skip over an array. If the path
+ is "a.b.c", and a is an array, then we fan out from there, and
+ traverse "b.c" for each element of a:[...]. This requires that
+ a be an array of objects in order to navigate more deeply.
+
+ @param index current path field index to extract
+ @param pathLength maximum number of fields on field path
+ @param pDocument current document traversed to (not the top-level one)
+ @returns the field found; could be an array
+ */
+ intrusive_ptr<const Value> evaluatePath(
+ size_t index, const size_t pathLength,
+ intrusive_ptr<Document> pDocument) const;
+
+ FieldPath fieldPath;
+ };
+
+
+ class ExpressionFieldRange :
+ public Expression,
+ public boost::enable_shared_from_this<ExpressionFieldRange> {
+ public:
+ // virtuals from expression
+ virtual ~ExpressionFieldRange();
+ virtual intrusive_ptr<Expression> optimize();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual void addToBsonObj(
+ BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const;
+ virtual void addToBsonArray(
+ BSONArrayBuilder *pBuilder, unsigned depth) const;
+ virtual void toMatcherBson(
+ BSONObjBuilder *pBuilder, unsigned depth) const;
+
+ /*
+ Create a field range expression.
+
+ Field ranges are meant to match up with classic Matcher semantics,
+ and therefore are conjunctions. For example, these appear in
+ mongo shell predicates in one of these forms:
+ { a : C } -> (a == C) // degenerate "point" range
+ { a : { $lt : C } } -> (a < C) // open range
+ { a : { $gt : C1, $lte : C2 } } -> ((a > C1) && (a <= C2)) // closed
+
+ When initially created, a field range only includes one end of
+ the range. Additional points may be added via intersect().
+
+ Note that NE and CMP are not supported.
+
+ @param pFieldPath the field path for extracting the field value
+ @param cmpOp the comparison operator
+ @param pValue the value to compare against
+ @returns the newly created field range expression
+ */
+ static intrusive_ptr<ExpressionFieldRange> create(
+ const intrusive_ptr<ExpressionFieldPath> &pFieldPath,
+ CmpOp cmpOp, const intrusive_ptr<const Value> &pValue);
+
+ /*
+ Add an intersecting range.
+
+ This can be done any number of times after creation. The
+ range is internally optimized for each new addition. If the new
+ intersection extends or reduces the values within the range, the
+ internal representation is adjusted to reflect that.
+
+ Note that NE and CMP are not supported.
+
+ @param cmpOp the comparison operator
+ @param pValue the value to compare against
+ */
+ void intersect(CmpOp cmpOp, const intrusive_ptr<const Value> &pValue);
+
+ private:
+ ExpressionFieldRange(const intrusive_ptr<ExpressionFieldPath> &pFieldPath,
+ CmpOp cmpOp,
+ const intrusive_ptr<const Value> &pValue);
+
+ intrusive_ptr<ExpressionFieldPath> pFieldPath;
+
+ class Range {
+ public:
+ Range(CmpOp cmpOp, const intrusive_ptr<const Value> &pValue);
+ Range(const Range &rRange);
+
+ Range *intersect(const Range *pRange) const;
+ bool contains(const intrusive_ptr<const Value> &pValue) const;
+
+ Range(const intrusive_ptr<const Value> &pBottom, bool bottomOpen,
+ const intrusive_ptr<const Value> &pTop, bool topOpen);
+
+ bool bottomOpen;
+ bool topOpen;
+ intrusive_ptr<const Value> pBottom;
+ intrusive_ptr<const Value> pTop;
+ };
+
+ scoped_ptr<Range> pRange;
+
+ /*
+ Add to a generic Builder.
+
+ The methods to append items to an object and an array differ by
+ their inclusion of a field name. For more complicated objects,
+ it makes sense to abstract that out and use a generic builder that
+ always looks the same, and then implement addToBsonObj() and
+ addToBsonArray() by using the common method.
+ */
+ void addToBson(Builder *pBuilder, unsigned depth) const;
+ };
+
+
+ class ExpressionHour :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionHour();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionHour();
+ };
+
+
+ class ExpressionIfNull :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionIfNull();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionIfNull();
+ };
+
+
+ class ExpressionMinute :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionMinute();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionMinute();
+ };
+
+
+ class ExpressionMod :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionMod();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionMod();
+ };
+
+
+ class ExpressionMultiply :
+ public ExpressionNary {
+ public:
+ // virtuals from Expression
+ virtual ~ExpressionMultiply();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+
+ // virtuals from ExpressionNary
+ virtual intrusive_ptr<ExpressionNary> (*getFactory() const)();
+
+ /*
+ Create an expression that finds the product of n operands.
+
+ @returns multiplication expression
+ */
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionMultiply();
+ };
+
+
+ class ExpressionMonth :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionMonth();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionMonth();
+ };
+
+
+ class ExpressionNoOp :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionNoOp();
+ virtual intrusive_ptr<Expression> optimize();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionNoOp();
+ };
+
+
+ class ExpressionNot :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionNot();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionNot();
+ };
+
+
+ class ExpressionObject :
+ public Expression,
+ public boost::enable_shared_from_this<ExpressionObject> {
+ public:
+ // virtuals from Expression
+ virtual ~ExpressionObject();
+ virtual intrusive_ptr<Expression> optimize();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual void addToBsonObj(
+ BSONObjBuilder *pBuilder, string fieldName, unsigned depth) const;
+ virtual void addToBsonArray(
+ BSONArrayBuilder *pBuilder, unsigned depth) const;
+
+ /*
+ evaluate(), but return a Document instead of a Value-wrapped
+ Document.
+
+ @param pDocument the input Document
+ @returns the result document
+ */
+ intrusive_ptr<Document> evaluateDocument(
+ const intrusive_ptr<Document> &pDocument) const;
+
+ /*
+ evaluate(), but add the evaluated fields to a given document
+ instead of creating a new one.
+
+ @param pResult the Document to add the evaluated expressions to
+ @param pDocument the input Document
+ */
+ void addToDocument(const intrusive_ptr<Document> &pResult,
+ const intrusive_ptr<Document> &pDocument) const;
+
+ /*
+ Estimate the number of fields that will result from evaluating
+ this over pDocument. Does not include _id. This is an estimate
+ (really an upper bound) because we can't account for undefined
+ fields without actually doing the evaluation. But this is still
+ useful as an argument to Document::create(), if you plan to use
+ addToDocument().
+
+ @param pDocument the input document
+ @returns estimated number of fields that will result
+ */
+ size_t getSizeHint(const intrusive_ptr<Document> &pDocument) const;
+
+ /*
+ Create an empty expression. Until fields are added, this
+ will evaluate to an empty document (object).
+ */
+ static intrusive_ptr<ExpressionObject> create();
+
+ /*
+ Add a field to the document expression.
+
+ @param fieldPath the path the evaluated expression will have in the
+ result Document
+ @param pExpression the expression to evaluate obtain this field's
+ Value in the result Document
+ */
+ void addField(const string &fieldPath,
+ const intrusive_ptr<Expression> &pExpression);
+
+ /*
+ Add a field path to the set of those to be included.
+
+ Note that including a nested field implies including everything on
+ the path leading down to it.
+
+ @param fieldPath the name of the field to be included
+ */
+ void includePath(const string &fieldPath);
+
+ /*
+ Add a field path to the set of those to be excluded.
+
+ Note that excluding a nested field implies including everything on
+ the path leading down to it (because you're stating you want to see
+ all the other fields that aren't being excluded).
+
+ @param fieldName the name of the field to be excluded
+ */
+ void excludePath(const string &fieldPath);
+
+ /*
+ Return the expression for a field.
+
+ @param fieldName the field name for the expression to return
+ @returns the expression used to compute the field, if it is present,
+ otherwise NULL.
+ */
+ intrusive_ptr<Expression> getField(const string &fieldName) const;
+
+ /*
+ Get a count of the added fields.
+
+ @returns how many fields have been added
+ */
+ size_t getFieldCount() const;
+
+ /*
+ Get a count of the exclusions.
+
+ @returns how many fields have been excluded.
+ */
+ size_t getExclusionCount() const;
+
+ /*
+ Specialized BSON conversion that allows for writing out a
+ $project specification. This creates a standalone object, which must
+ be added to a containing object with a name
+
+ @param pBuilder where to write the object to
+ */
+ void documentToBson(BSONObjBuilder *pBuilder, unsigned depth) const;
+
+ private:
+ ExpressionObject();
+
+ void includePath(
+ const FieldPath *pPath, size_t pathi, size_t pathn,
+ bool excludeLast);
+
+ bool excludePaths;
+ set<string> path;
+
+ /* these two vectors are maintained in parallel */
+ vector<string> vFieldName;
+ vector<intrusive_ptr<Expression> > vpExpression;
+
+ /*
+ Utility function used by documentToBson(). Emits inclusion
+ and exclusion paths by recursively walking down the nested
+ ExpressionObject trees these have created.
+
+ @param pBuilder the builder to write boolean valued path "fields" to
+ @param pvPath pointer to a vector of strings describing the path on
+ descent; the top-level call should pass an empty vector
+ */
+ void emitPaths(BSONObjBuilder *pBuilder, vector<string> *pvPath) const;
+
+ /* utility class used by emitPaths() */
+ class PathPusher :
+ boost::noncopyable {
+ public:
+ PathPusher(vector<string> *pvPath, const string &s);
+ ~PathPusher();
+
+ private:
+ vector<string> *pvPath;
+ };
+ };
+
+
+ class ExpressionOr :
+ public ExpressionNary {
+ public:
+ // virtuals from Expression
+ virtual ~ExpressionOr();
+ virtual intrusive_ptr<Expression> optimize();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void toMatcherBson(
+ BSONObjBuilder *pBuilder, unsigned depth) const;
+
+ // virtuals from ExpressionNary
+ virtual intrusive_ptr<ExpressionNary> (*getFactory() const)();
+
+ /*
+ Create an expression that finds the conjunction of n operands.
+ The conjunction uses short-circuit logic; the expressions are
+ evaluated in the order they were added to the conjunction, and
+ the evaluation stops and returns false on the first operand that
+ evaluates to false.
+
+ @returns conjunction expression
+ */
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionOr();
+ };
+
+
+ class ExpressionSecond :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionSecond();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionSecond();
+ };
+
+
+ class ExpressionStrcasecmp :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionStrcasecmp();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionStrcasecmp();
+ };
+
+
+ class ExpressionSubstr :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionSubstr();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionSubstr();
+ };
+
+
+ class ExpressionSubtract :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionSubtract();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionSubtract();
+ };
+
+
+ class ExpressionToLower :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionToLower();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionToLower();
+ };
+
+
+ class ExpressionToUpper :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionToUpper();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionToUpper();
+ };
+
+
+ class ExpressionWeek :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionWeek();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionWeek();
+ };
+
+
+ class ExpressionYear :
+ public ExpressionNary {
+ public:
+ // virtuals from ExpressionNary
+ virtual ~ExpressionYear();
+ virtual intrusive_ptr<const Value> evaluate(
+ const intrusive_ptr<Document> &pDocument) const;
+ virtual const char *getOpName() const;
+ virtual void addOperand(const intrusive_ptr<Expression> &pExpression);
+
+ static intrusive_ptr<ExpressionNary> create();
+
+ private:
+ ExpressionYear();
+ };
+}
+
+
+/* ======================= INLINED IMPLEMENTATIONS ========================== */
+
+namespace mongo {
+
+ inline bool Expression::ObjectCtx::unwindOk() const {
+ return ((options & UNWIND_OK) != 0);
+ }
+
+ inline bool Expression::ObjectCtx::unwindUsed() const {
+ return (unwindField.size() != 0);
+ }
+
+ inline int Expression::signum(int i) {
+ if (i < 0)
+ return -1;
+ if (i > 0)
+ return 1;
+ return 0;
+ }
+
+ inline intrusive_ptr<const Value> ExpressionConstant::getValue() const {
+ return pValue;
+ }
+
+ inline string ExpressionFieldPath::getFieldPath(bool fieldPrefix) const {
+ return fieldPath.getPath(fieldPrefix);
+ }
+
+ inline void ExpressionFieldPath::writeFieldPath(
+ ostream &outStream, bool fieldPrefix) const {
+ return fieldPath.writePath(outStream, fieldPrefix);
+ }
+
+ inline size_t ExpressionObject::getFieldCount() const {
+ return vFieldName.size();
+ }
+
+ inline ExpressionObject::PathPusher::PathPusher(
+ vector<string> *pTheVPath, const string &s):
+ pvPath(pTheVPath) {
+ pvPath->push_back(s);
+ }
+
+ inline ExpressionObject::PathPusher::~PathPusher() {
+ pvPath->pop_back();
+ }
+
+}
diff --git a/src/mongo/db/pipeline/expression_context.cpp b/src/mongo/db/pipeline/expression_context.cpp
new file mode 100755
index 00000000000..4835dcfa5a9
--- /dev/null
+++ b/src/mongo/db/pipeline/expression_context.cpp
@@ -0,0 +1,35 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "db/pipeline/expression_context.h"
+
+namespace mongo {
+
+ ExpressionContext::~ExpressionContext() {
+ }
+
+ inline ExpressionContext::ExpressionContext():
+ inShard(false),
+ inRouter(false) {
+ }
+
+ ExpressionContext *ExpressionContext::create() {
+ return new ExpressionContext();
+ }
+
+}
diff --git a/src/mongo/db/pipeline/expression_context.h b/src/mongo/db/pipeline/expression_context.h
new file mode 100755
index 00000000000..0277039c80b
--- /dev/null
+++ b/src/mongo/db/pipeline/expression_context.h
@@ -0,0 +1,67 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "pch.h"
+
+#include "util/intrusive_counter.h"
+
+namespace mongo {
+
+ class ExpressionContext :
+ public IntrusiveCounterUnsigned {
+ public:
+ virtual ~ExpressionContext();
+
+ void setInShard(bool b);
+ void setInRouter(bool b);
+
+ bool getInShard() const;
+ bool getInRouter() const;
+
+ static ExpressionContext *create();
+
+ private:
+ ExpressionContext();
+
+ bool inShard;
+ bool inRouter;
+ };
+}
+
+
+/* ======================= INLINED IMPLEMENTATIONS ========================== */
+
+namespace mongo {
+
+ inline void ExpressionContext::setInShard(bool b) {
+ inShard = b;
+ }
+
+ inline void ExpressionContext::setInRouter(bool b) {
+ inRouter = b;
+ }
+
+ inline bool ExpressionContext::getInShard() const {
+ return inShard;
+ }
+
+ inline bool ExpressionContext::getInRouter() const {
+ return inRouter;
+ }
+
+};
diff --git a/src/mongo/db/pipeline/field_path.cpp b/src/mongo/db/pipeline/field_path.cpp
new file mode 100755
index 00000000000..96e1fc92f83
--- /dev/null
+++ b/src/mongo/db/pipeline/field_path.cpp
@@ -0,0 +1,87 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "db/pipeline/field_path.h"
+#include "util/mongoutils/str.h"
+
+namespace mongo {
+ using namespace mongoutils;
+
+ FieldPath::~FieldPath() {
+ }
+
+ FieldPath::FieldPath():
+ vFieldName() {
+ }
+
+ FieldPath::FieldPath(const string &fieldPath):
+ vFieldName() {
+ /*
+ The field path could be using dot notation.
+ Break the field path up by peeling off successive pieces.
+ */
+ size_t startpos = 0;
+ while(true) {
+ /* find the next dot */
+ const size_t dotpos = fieldPath.find('.', startpos);
+
+ /* if there are no more dots, use the remainder of the string */
+ if (dotpos == fieldPath.npos) {
+ vFieldName.push_back(fieldPath.substr(startpos, dotpos));
+ break;
+ }
+
+ /* use the string up to the dot */
+ const size_t length = dotpos - startpos;
+ uassert(15998, str::stream() <<
+ "field names cannot be zero length (in path \"" <<
+ fieldPath << "\")",
+ length > 0);
+
+ vFieldName.push_back(fieldPath.substr(startpos, length));
+
+ /* next time, search starting one spot after that */
+ startpos = dotpos + 1;
+ }
+ }
+
+ string FieldPath::getPath(bool fieldPrefix) const {
+ stringstream ss;
+ writePath(ss, fieldPrefix);
+ return ss.str();
+ }
+
+ void FieldPath::writePath(ostream &outStream, bool fieldPrefix) const {
+ if (fieldPrefix)
+ outStream << "$";
+
+ outStream << vFieldName[0];
+
+ const size_t n = vFieldName.size();
+ for(size_t i = 1; i < n; ++i)
+ outStream << "." << vFieldName[i];
+ }
+
+ FieldPath &FieldPath::operator=(const FieldPath &rRHS) {
+ if (this != &rRHS) {
+ vFieldName = rRHS.vFieldName;
+ }
+
+ return *this;
+ }
+
+}
diff --git a/src/mongo/db/pipeline/field_path.h b/src/mongo/db/pipeline/field_path.h
new file mode 100755
index 00000000000..810c5d0c7ea
--- /dev/null
+++ b/src/mongo/db/pipeline/field_path.h
@@ -0,0 +1,82 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "pch.h"
+
+namespace mongo {
+
+ class FieldPath {
+ public:
+ virtual ~FieldPath();
+
+ FieldPath(const string &fieldPath);
+ FieldPath();
+
+ /*
+ Get the number of path elements in the field path.
+
+ @returns the number of path elements
+ */
+ size_t getPathLength() const;
+
+ /*
+ Get a particular path element from the path.
+
+ @param i the index of the path element
+ @returns the path element
+ */
+ string getFieldName(size_t i) const;
+
+ /*
+ Get the full path.
+
+ @param fieldPrefix whether or not to include the field prefix
+ @returns the complete field path
+ */
+ string getPath(bool fieldPrefix) const;
+
+ /*
+ Write the full path.
+
+ @param outStream where to write the path to
+ @param fieldPrefix whether or not to include the field prefix
+ */
+ void writePath(ostream &outStream, bool fieldPrefix) const;
+
+ FieldPath &operator=(const FieldPath &rRHS);
+
+ private:
+ vector<string> vFieldName;
+ };
+}
+
+
+/* ======================= INLINED IMPLEMENTATIONS ========================== */
+
+namespace mongo {
+
+ inline size_t FieldPath::getPathLength() const {
+ return vFieldName.size();
+ }
+
+ inline string FieldPath::getFieldName(size_t i) const {
+ return vFieldName[i];
+ }
+
+}
+
diff --git a/src/mongo/db/pipeline/value.cpp b/src/mongo/db/pipeline/value.cpp
new file mode 100755
index 00000000000..b83dec359cf
--- /dev/null
+++ b/src/mongo/db/pipeline/value.cpp
@@ -0,0 +1,1034 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "db/pipeline/value.h"
+
+#include <boost/functional/hash.hpp>
+#include "db/jsobj.h"
+#include "db/pipeline/builder.h"
+#include "db/pipeline/document.h"
+#include "util/mongoutils/str.h"
+
+namespace mongo {
+ using namespace mongoutils;
+
+ const intrusive_ptr<const Value> Value::pFieldUndefined(
+ new ValueStatic(Undefined));
+ const intrusive_ptr<const Value> Value::pFieldNull(new ValueStatic());
+ const intrusive_ptr<const Value> Value::pFieldTrue(new ValueStatic(true));
+ const intrusive_ptr<const Value> Value::pFieldFalse(new ValueStatic(false));
+ const intrusive_ptr<const Value> Value::pFieldMinusOne(new ValueStatic(-1));
+ const intrusive_ptr<const Value> Value::pFieldZero(new ValueStatic(0));
+ const intrusive_ptr<const Value> Value::pFieldOne(new ValueStatic(1));
+
+ Value::~Value() {
+ }
+
+ Value::Value():
+ type(jstNULL),
+ oidValue(),
+ dateValue(),
+ stringValue(),
+ pDocumentValue(),
+ vpValue() {
+ }
+
+ Value::Value(BSONType theType):
+ type(theType),
+ oidValue(),
+ dateValue(),
+ stringValue(),
+ pDocumentValue(),
+ vpValue() {
+ switch(type) {
+ case Undefined:
+ case jstNULL:
+ case Object: // empty
+ case Array: // empty
+ break;
+
+ case NumberDouble:
+ simple.doubleValue = 0;
+ break;
+
+ case Bool:
+ simple.boolValue = false;
+ break;
+
+ case NumberInt:
+ simple.intValue = 0;
+ break;
+
+ case Timestamp:
+ simple.timestampValue = 0;
+ break;
+
+ case NumberLong:
+ simple.longValue = 0;
+ break;
+
+ default:
+ // nothing else is allowed
+ uassert(16001, str::stream() <<
+ "can't create empty Value of type " << type, false);
+ break;
+ }
+ }
+
+ Value::Value(bool boolValue):
+ type(Bool),
+ pDocumentValue(),
+ vpValue() {
+ simple.boolValue = boolValue;
+ }
+
+ intrusive_ptr<const Value> Value::createFromBsonElement(
+ BSONElement *pBsonElement) {
+ intrusive_ptr<const Value> pValue(new Value(pBsonElement));
+ return pValue;
+ }
+
+ Value::Value(BSONElement *pBsonElement):
+ type(pBsonElement->type()),
+ pDocumentValue(),
+ vpValue() {
+ switch(type) {
+ case NumberDouble:
+ simple.doubleValue = pBsonElement->Double();
+ break;
+
+ case String:
+ stringValue = pBsonElement->String();
+ break;
+
+ case Object: {
+ BSONObj document(pBsonElement->embeddedObject());
+ pDocumentValue = Document::createFromBsonObj(&document);
+ break;
+ }
+
+ case Array: {
+ vector<BSONElement> vElement(pBsonElement->Array());
+ const size_t n = vElement.size();
+
+ vpValue.reserve(n); // save on realloc()ing
+
+ for(size_t i = 0; i < n; ++i) {
+ vpValue.push_back(
+ Value::createFromBsonElement(&vElement[i]));
+ }
+ break;
+ }
+
+ case jstOID:
+ oidValue = pBsonElement->OID();
+ break;
+
+ case Bool:
+ simple.boolValue = pBsonElement->Bool();
+ break;
+
+ case Date:
+ dateValue = pBsonElement->Date();
+ break;
+
+ case RegEx:
+ stringValue = pBsonElement->regex();
+ // TODO pBsonElement->regexFlags();
+ break;
+
+ case NumberInt:
+ simple.intValue = pBsonElement->numberInt();
+ break;
+
+ case Timestamp:
+ dateValue = pBsonElement->timestampTime();
+ break;
+
+ case NumberLong:
+ simple.longValue = pBsonElement->numberLong();
+ break;
+
+ case jstNULL:
+ break;
+
+ case BinData:
+ case Symbol:
+ case CodeWScope:
+ uassert(16002, str::stream() <<
+ "can't create Value of type " << type, false);
+ break;
+
+ /* these shouldn't happen in this context */
+ case MinKey:
+ case EOO:
+ case Undefined:
+ case DBRef:
+ case Code:
+ case MaxKey:
+ assert(false); // CW TODO better message
+ break;
+ }
+ }
+
+ Value::Value(int intValue):
+ type(NumberInt),
+ pDocumentValue(),
+ vpValue() {
+ simple.intValue = intValue;
+ }
+
+ intrusive_ptr<const Value> Value::createInt(int value) {
+ intrusive_ptr<const Value> pValue(new Value(value));
+ return pValue;
+ }
+
+ Value::Value(long long longValue):
+ type(NumberLong),
+ pDocumentValue(),
+ vpValue() {
+ simple.longValue = longValue;
+ }
+
+ intrusive_ptr<const Value> Value::createLong(long long value) {
+ intrusive_ptr<const Value> pValue(new Value(value));
+ return pValue;
+ }
+
+ Value::Value(double value):
+ type(NumberDouble),
+ pDocumentValue(),
+ vpValue() {
+ simple.doubleValue = value;
+ }
+
+ intrusive_ptr<const Value> Value::createDouble(double value) {
+ intrusive_ptr<const Value> pValue(new Value(value));
+ return pValue;
+ }
+
+ Value::Value(const Date_t &value):
+ type(Date),
+ pDocumentValue(),
+ vpValue() {
+ dateValue = value;
+ }
+
+ intrusive_ptr<const Value> Value::createDate(const Date_t &value) {
+ intrusive_ptr<const Value> pValue(new Value(value));
+ return pValue;
+ }
+
+ Value::Value(const string &value):
+ type(String),
+ pDocumentValue(),
+ vpValue() {
+ stringValue = value;
+ }
+
+ intrusive_ptr<const Value> Value::createString(const string &value) {
+ intrusive_ptr<const Value> pValue(new Value(value));
+ return pValue;
+ }
+
+ Value::Value(const intrusive_ptr<Document> &pDocument):
+ type(Object),
+ pDocumentValue(pDocument),
+ vpValue() {
+ }
+
+ intrusive_ptr<const Value> Value::createDocument(
+ const intrusive_ptr<Document> &pDocument) {
+ intrusive_ptr<const Value> pValue(new Value(pDocument));
+ return pValue;
+ }
+
+ Value::Value(const vector<intrusive_ptr<const Value> > &thevpValue):
+ type(Array),
+ pDocumentValue(),
+ vpValue(thevpValue) {
+ }
+
+ intrusive_ptr<const Value> Value::createArray(
+ const vector<intrusive_ptr<const Value> > &vpValue) {
+ intrusive_ptr<const Value> pValue(new Value(vpValue));
+ return pValue;
+ }
+
+ double Value::getDouble() const {
+ BSONType type = getType();
+ if (type == NumberInt)
+ return simple.intValue;
+ if (type == NumberLong)
+ return static_cast< double >( simple.longValue );
+
+ assert(type == NumberDouble);
+ return simple.doubleValue;
+ }
+
+ string Value::getString() const {
+ assert(getType() == String);
+ return stringValue;
+ }
+
+ intrusive_ptr<Document> Value::getDocument() const {
+ assert(getType() == Object);
+ return pDocumentValue;
+ }
+
+ ValueIterator::~ValueIterator() {
+ }
+
+ Value::vi::~vi() {
+ }
+
+ bool Value::vi::more() const {
+ return (nextIndex < size);
+ }
+
+ intrusive_ptr<const Value> Value::vi::next() {
+ assert(more());
+ return (*pvpValue)[nextIndex++];
+ }
+
+ Value::vi::vi(const intrusive_ptr<const Value> &pValue,
+ const vector<intrusive_ptr<const Value> > *thepvpValue):
+ size(thepvpValue->size()),
+ nextIndex(0),
+ pvpValue(thepvpValue) {
+ }
+
+ intrusive_ptr<ValueIterator> Value::getArray() const {
+ assert(getType() == Array);
+ intrusive_ptr<ValueIterator> pVI(
+ new vi(intrusive_ptr<const Value>(this), &vpValue));
+ return pVI;
+ }
+
+ OID Value::getOid() const {
+ assert(getType() == jstOID);
+ return oidValue;
+ }
+
+ bool Value::getBool() const {
+ assert(getType() == Bool);
+ return simple.boolValue;
+ }
+
+ Date_t Value::getDate() const {
+ assert(getType() == Date);
+ return dateValue;
+ }
+
+ string Value::getRegex() const {
+ assert(getType() == RegEx);
+ return stringValue;
+ }
+
+ string Value::getSymbol() const {
+ assert(getType() == Symbol);
+ return stringValue;
+ }
+
+ int Value::getInt() const {
+ assert(getType() == NumberInt);
+ return simple.intValue;
+ }
+
+ unsigned long long Value::getTimestamp() const {
+ assert(getType() == Timestamp);
+ return dateValue;
+ }
+
+ long long Value::getLong() const {
+ BSONType type = getType();
+ if (type == NumberInt)
+ return simple.intValue;
+
+ assert(type == NumberLong);
+ return simple.longValue;
+ }
+
+ void Value::addToBson(Builder *pBuilder) const {
+ switch(getType()) {
+ case NumberDouble:
+ pBuilder->append(getDouble());
+ break;
+
+ case String:
+ pBuilder->append(getString());
+ break;
+
+ case Object: {
+ intrusive_ptr<Document> pDocument(getDocument());
+ BSONObjBuilder subBuilder;
+ pDocument->toBson(&subBuilder);
+ subBuilder.done();
+ pBuilder->append(&subBuilder);
+ break;
+ }
+
+ case Array: {
+ const size_t n = vpValue.size();
+ BSONArrayBuilder arrayBuilder(n);
+ for(size_t i = 0; i < n; ++i) {
+ vpValue[i]->addToBsonArray(&arrayBuilder);
+ }
+
+ pBuilder->append(&arrayBuilder);
+ break;
+ }
+
+ case BinData:
+ // pBuilder->appendBinData(fieldName, ...);
+ assert(false); // CW TODO unimplemented
+ break;
+
+ case jstOID:
+ pBuilder->append(getOid());
+ break;
+
+ case Bool:
+ pBuilder->append(getBool());
+ break;
+
+ case Date:
+ pBuilder->append(getDate());
+ break;
+
+ case RegEx:
+ pBuilder->append(getRegex());
+ break;
+
+ case Symbol:
+ pBuilder->append(getSymbol());
+ break;
+
+ case CodeWScope:
+ assert(false); // CW TODO unimplemented
+ break;
+
+ case NumberInt:
+ pBuilder->append(getInt());
+ break;
+
+ case Timestamp:
+ pBuilder->append((long long)getTimestamp());
+ break;
+
+ case NumberLong:
+ pBuilder->append(getLong());
+ break;
+
+ case jstNULL:
+ pBuilder->append();
+ break;
+
+ /* these shouldn't appear in this context */
+ case MinKey:
+ case EOO:
+ case Undefined:
+ case DBRef:
+ case Code:
+ case MaxKey:
+ assert(false); // CW TODO better message
+ break;
+ }
+ }
+
+ void Value::addToBsonObj(BSONObjBuilder *pBuilder, string fieldName) const {
+ BuilderObj objBuilder(pBuilder, fieldName);
+ addToBson(&objBuilder);
+ }
+
+ void Value::addToBsonArray(BSONArrayBuilder *pBuilder) const {
+ BuilderArray arrBuilder(pBuilder);
+ addToBson(&arrBuilder);
+ }
+
+ bool Value::coerceToBool() const {
+ BSONType type = getType();
+ switch(type) {
+ case NumberDouble:
+ if (simple.doubleValue != 0)
+ return true;
+ break;
+
+ case String:
+ case Object:
+ case Array:
+ case BinData:
+ case jstOID:
+ case Date:
+ case RegEx:
+ case Symbol:
+ case Timestamp:
+ return true;
+
+ case Bool:
+ if (simple.boolValue)
+ return true;
+ break;
+
+ case CodeWScope:
+ assert(false); // CW TODO unimplemented
+ break;
+
+ case NumberInt:
+ if (simple.intValue != 0)
+ return true;
+ break;
+
+ case NumberLong:
+ if (simple.longValue != 0)
+ return true;
+ break;
+
+ case jstNULL:
+ case Undefined:
+ /* nothing to do */
+ break;
+
+ /* these shouldn't happen in this context */
+ case MinKey:
+ case EOO:
+ case DBRef:
+ case Code:
+ case MaxKey:
+ assert(false); // CW TODO better message
+ break;
+ }
+
+ return false;
+ }
+
+ intrusive_ptr<const Value> Value::coerceToBoolean() const {
+ bool result = coerceToBool();
+
+ /* always normalize to the singletons */
+ if (result)
+ return Value::getTrue();
+ return Value::getFalse();
+ }
+
+ int Value::coerceToInt() const {
+ switch(type) {
+ case NumberDouble:
+ return (int)simple.doubleValue;
+
+ case NumberInt:
+ return simple.intValue;
+
+ case NumberLong:
+ return (int)simple.longValue;
+
+ case jstNULL:
+ case Undefined:
+ break;
+
+ case String:
+ default:
+ uassert(16003, str::stream() <<
+ "can't convert from BSON type " << type <<
+ " to int",
+ false);
+ } // switch(type)
+
+ return (int)0;
+ }
+
+ long long Value::coerceToLong() const {
+ switch(type) {
+ case NumberDouble:
+ return (long long)simple.doubleValue;
+
+ case NumberInt:
+ return simple.intValue;
+
+ case NumberLong:
+ return simple.longValue;
+
+ case jstNULL:
+ case Undefined:
+ break;
+
+ case String:
+ default:
+ uassert(16004, str::stream() <<
+ "can't convert from BSON type " << type <<
+ " to long",
+ false);
+ } // switch(type)
+
+ return (long long)0;
+ }
+
+ double Value::coerceToDouble() const {
+ switch(type) {
+ case NumberDouble:
+ return simple.doubleValue;
+
+ case NumberInt:
+ return (double)simple.intValue;
+
+ case NumberLong:
+ return (double)simple.longValue;
+
+ case jstNULL:
+ case Undefined:
+ break;
+
+ case String:
+ default:
+ uassert(16005, str::stream() <<
+ "can't convert from BSON type " << type <<
+ " to double",
+ false);
+ } // switch(type)
+
+ return (double)0;
+ }
+
+ Date_t Value::coerceToDate() const {
+ switch(type) {
+
+ case Date:
+ return dateValue;
+
+ case jstNULL:
+ case Undefined:
+ break;
+
+ default:
+ uassert(16006, str::stream() <<
+ "can't convert from BSON type " << type <<
+ " to double",
+ false);
+ } // switch(type)
+
+ assert(false); // CW TODO no conversion available
+ return jstNULL;
+ }
+
+ string Value::coerceToString() const {
+ stringstream ss;
+ switch(type) {
+ case NumberDouble:
+ ss << simple.doubleValue;
+ return ss.str();
+
+ case NumberInt:
+ ss << simple.intValue;
+ return ss.str();
+
+ case NumberLong:
+ ss << simple.longValue;
+ return ss.str();
+
+ case String:
+ return stringValue;
+
+ case Date:
+ return dateValue.toString();
+
+ case jstNULL:
+ case Undefined:
+ break;
+
+ default:
+ uassert(16007, str::stream() <<
+ "can't convert from BSON type " << type <<
+ " to double",
+ false);
+ } // switch(type)
+
+ return "";
+ }
+
+ int Value::compare(const intrusive_ptr<const Value> &rL,
+ const intrusive_ptr<const Value> &rR) {
+ BSONType lType = rL->getType();
+ BSONType rType = rR->getType();
+
+ /*
+ Special handling for Undefined and NULL values; these are types,
+ so it's easier to handle them here before we go below to handle
+ values of the same types. This allows us to compare Undefined and
+ NULL values with everything else. As coded now:
+ (*) Undefined is less than everything except itself (which is equal)
+ (*) NULL is less than everything except Undefined and itself
+ */
+ if (lType == Undefined) {
+ if (rType == Undefined)
+ return 0;
+
+ /* if rType is anything else, the left value is less */
+ return -1;
+ }
+
+ if (lType == jstNULL) {
+ if (rType == Undefined)
+ return 1;
+ if (rType == jstNULL)
+ return 0;
+
+ return -1;
+ }
+
+ if ((rType == Undefined) || (rType == jstNULL)) {
+ /*
+ We know the left value isn't Undefined, because of the above.
+ Count a NULL value as greater than an undefined one.
+ */
+ return 1;
+ }
+
+ // CW TODO for now, only compare like values
+ uassert(16016, str::stream() <<
+ "can't compare values of BSON types " << lType <<
+ " and " << rType,
+ lType == rType);
+
+ switch(lType) {
+ case NumberDouble:
+ if (rL->simple.doubleValue < rR->simple.doubleValue)
+ return -1;
+ if (rL->simple.doubleValue > rR->simple.doubleValue)
+ return 1;
+ return 0;
+
+ case String:
+ return rL->stringValue.compare(rR->stringValue);
+
+ case Object:
+ return Document::compare(rL->getDocument(), rR->getDocument());
+
+ case Array: {
+ intrusive_ptr<ValueIterator> pli(rL->getArray());
+ intrusive_ptr<ValueIterator> pri(rR->getArray());
+
+ while(true) {
+ /* have we run out of left array? */
+ if (!pli->more()) {
+ if (!pri->more())
+ return 0; // the arrays are the same length
+
+ return -1; // the left array is shorter
+ }
+
+ /* have we run out of right array? */
+ if (!pri->more())
+ return 1; // the right array is shorter
+
+ /* compare the two corresponding elements */
+ intrusive_ptr<const Value> plv(pli->next());
+ intrusive_ptr<const Value> prv(pri->next());
+ const int cmp = Value::compare(plv, prv);
+ if (cmp)
+ return cmp; // values are unequal
+ }
+
+ /* NOTREACHED */
+ assert(false);
+ break;
+ }
+
+ case BinData:
+ // pBuilder->appendBinData(fieldName, ...);
+ assert(false); // CW TODO unimplemented
+ break;
+
+ case jstOID:
+ if (rL->oidValue < rR->oidValue)
+ return -1;
+ if (rL->oidValue == rR->oidValue)
+ return 0;
+ return 1;
+
+ case Bool:
+ if (rL->simple.boolValue == rR->simple.boolValue)
+ return 0;
+ if (rL->simple.boolValue)
+ return 1;
+ return -1;
+
+ case Date:
+ if (rL->dateValue < rR->dateValue)
+ return -1;
+ if (rL->dateValue > rR->dateValue)
+ return 1;
+ return 0;
+
+ case RegEx:
+ return rL->stringValue.compare(rR->stringValue);
+
+ case Symbol:
+ assert(false); // CW TODO unimplemented
+ break;
+
+ case CodeWScope:
+ assert(false); // CW TODO unimplemented
+ break;
+
+ case NumberInt:
+ if (rL->simple.intValue < rR->simple.intValue)
+ return -1;
+ if (rL->simple.intValue > rR->simple.intValue)
+ return 1;
+ return 0;
+
+ case Timestamp:
+ if (rL->dateValue < rR->dateValue)
+ return -1;
+ if (rL->dateValue > rR->dateValue)
+ return 1;
+ return 0;
+
+ case NumberLong:
+ if (rL->simple.longValue < rR->simple.longValue)
+ return -1;
+ if (rL->simple.longValue > rR->simple.longValue)
+ return 1;
+ return 0;
+
+ case Undefined:
+ case jstNULL:
+ return 0; // treat two Undefined or NULL values as equal
+
+ /* these shouldn't happen in this context */
+ case MinKey:
+ case EOO:
+ case DBRef:
+ case Code:
+ case MaxKey:
+ assert(false); // CW TODO better message
+ break;
+ } // switch(lType)
+
+ /* NOTREACHED */
+ return 0;
+ }
+
+ void Value::hash_combine(size_t &seed) const {
+ BSONType type = getType();
+ boost::hash_combine(seed, (int)type);
+
+ switch(type) {
+ case NumberDouble:
+ boost::hash_combine(seed, simple.doubleValue);
+ break;
+
+ case String:
+ boost::hash_combine(seed, stringValue);
+ break;
+
+ case Object:
+ getDocument()->hash_combine(seed);
+ break;
+
+ case Array: {
+ intrusive_ptr<ValueIterator> pIter(getArray());
+ while(pIter->more()) {
+ intrusive_ptr<const Value> pValue(pIter->next());
+ pValue->hash_combine(seed);
+ };
+ break;
+ }
+
+ case BinData:
+ // pBuilder->appendBinData(fieldName, ...);
+ assert(false); // CW TODO unimplemented
+ break;
+
+ case jstOID:
+ oidValue.hash_combine(seed);
+ break;
+
+ case Bool:
+ boost::hash_combine(seed, simple.boolValue);
+ break;
+
+ case Date:
+ boost::hash_combine(seed, (unsigned long long)dateValue);
+ break;
+
+ case RegEx:
+ boost::hash_combine(seed, stringValue);
+ break;
+
+ case Symbol:
+ assert(false); // CW TODO unimplemented
+ break;
+
+ case CodeWScope:
+ assert(false); // CW TODO unimplemented
+ break;
+
+ case NumberInt:
+ boost::hash_combine(seed, simple.intValue);
+ break;
+
+ case Timestamp:
+ boost::hash_combine(seed, (unsigned long long)dateValue);
+ break;
+
+ case NumberLong:
+ boost::hash_combine(seed, simple.longValue);
+ break;
+
+ case Undefined:
+ case jstNULL:
+ break;
+
+ /* these shouldn't happen in this context */
+ case MinKey:
+ case EOO:
+ case DBRef:
+ case Code:
+ case MaxKey:
+ assert(false); // CW TODO better message
+ break;
+ } // switch(type)
+ }
+
+ BSONType Value::getWidestNumeric(BSONType lType, BSONType rType) {
+ if (lType == NumberDouble) {
+ switch(rType) {
+ case NumberDouble:
+ case NumberLong:
+ case NumberInt:
+ case jstNULL:
+ case Undefined:
+ return NumberDouble;
+
+ default:
+ break;
+ }
+ }
+ else if (lType == NumberLong) {
+ switch(rType) {
+ case NumberDouble:
+ return NumberDouble;
+
+ case NumberLong:
+ case NumberInt:
+ case jstNULL:
+ case Undefined:
+ return NumberLong;
+
+ default:
+ break;
+ }
+ }
+ else if (lType == NumberInt) {
+ switch(rType) {
+ case NumberDouble:
+ return NumberDouble;
+
+ case NumberLong:
+ return NumberLong;
+
+ case NumberInt:
+ case jstNULL:
+ case Undefined:
+ return NumberInt;
+
+ default:
+ break;
+ }
+ }
+ else if ((lType == jstNULL) || (lType == Undefined)) {
+ switch(rType) {
+ case NumberDouble:
+ return NumberDouble;
+
+ case NumberLong:
+ return NumberLong;
+
+ case NumberInt:
+ return NumberInt;
+
+ default:
+ break;
+ }
+ }
+
+ /* NOTREACHED */
+ return Undefined;
+ }
+
+ size_t Value::getApproximateSize() const {
+ switch(type) {
+ case String:
+ return sizeof(Value) + stringValue.length();
+
+ case Object:
+ return sizeof(Value) + pDocumentValue->getApproximateSize();
+
+ case Array: {
+ size_t size = sizeof(Value);
+ const size_t n = vpValue.size();
+ for(size_t i = 0; i < n; ++i) {
+ size += vpValue[i]->getApproximateSize();
+ }
+ return size;
+ }
+
+ case NumberDouble:
+ case BinData:
+ case jstOID:
+ case Bool:
+ case Date:
+ case RegEx:
+ case Symbol:
+ case CodeWScope:
+ case NumberInt:
+ case Timestamp:
+ case NumberLong:
+ case jstNULL:
+ case Undefined:
+ return sizeof(Value);
+
+ /* these shouldn't happen in this context */
+ case MinKey:
+ case EOO:
+ case DBRef:
+ case Code:
+ case MaxKey:
+ assert(false); // CW TODO better message
+ return sizeof(Value);
+ }
+
+ /*
+ We shouldn't get here. In order to make the implementor think about
+ these cases, they are all listed explicitly, above. The compiler
+ should complain if they aren't all listed, because there's no
+ default. However, not all the compilers seem to do that. Therefore,
+ this final catch-all is here.
+ */
+ assert(false);
+ return sizeof(Value);
+ }
+
+
+ void ValueStatic::addRef() const {
+ }
+
+ void ValueStatic::release() const {
+ }
+
+}
diff --git a/src/mongo/db/pipeline/value.h b/src/mongo/db/pipeline/value.h
new file mode 100755
index 00000000000..8bd1bcbbbfd
--- /dev/null
+++ b/src/mongo/db/pipeline/value.h
@@ -0,0 +1,468 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "pch.h"
+#include "bson/bsontypes.h"
+#include "util/intrusive_counter.h"
+
+namespace mongo {
+ class BSONElement;
+ class Builder;
+ class Document;
+ class Value;
+
+ class ValueIterator :
+ public IntrusiveCounterUnsigned {
+ public:
+ virtual ~ValueIterator();
+
+ /*
+ Ask if there are more fields to return.
+
+ @returns true if there are more fields, false otherwise
+ */
+ virtual bool more() const = 0;
+
+ /*
+ Move the iterator to point to the next field and return it.
+
+ @returns the next field's <name, Value>
+ */
+ virtual intrusive_ptr<const Value> next() = 0;
+ };
+
+
+ /*
+ Values are immutable, so these are passed around as
+ intrusive_ptr<const Value>.
+ */
+ class Value :
+ public IntrusiveCounterUnsigned {
+ public:
+ ~Value();
+
+ /*
+ Construct a Value from a BSONElement.
+
+ This ignores the name of the element, and only uses the value,
+ whatever type it is.
+
+ @returns a new Value initialized from the bsonElement
+ */
+ static intrusive_ptr<const Value> createFromBsonElement(
+ BSONElement *pBsonElement);
+
+ /*
+ Construct an integer-valued Value.
+
+ For commonly used values, consider using one of the singleton
+ instances defined below.
+
+ @param value the value
+ @returns a Value with the given value
+ */
+ static intrusive_ptr<const Value> createInt(int value);
+
+ /*
+ Construct an long(long)-valued Value.
+
+ For commonly used values, consider using one of the singleton
+ instances defined below.
+
+ @param value the value
+ @returns a Value with the given value
+ */
+ static intrusive_ptr<const Value> createLong(long long value);
+
+ /*
+ Construct a double-valued Value.
+
+ @param value the value
+ @returns a Value with the given value
+ */
+ static intrusive_ptr<const Value> createDouble(double value);
+
+ /*
+ Construct a string-valued Value.
+
+ @param value the value
+ @returns a Value with the given value
+ */
+ static intrusive_ptr<const Value> createString(const string &value);
+
+ /*
+ Construct a date-valued Value.
+
+ @param value the value
+ @returns a Value with the given value
+ */
+ static intrusive_ptr<const Value> createDate(const Date_t &value);
+
+ /*
+ Construct a document-valued Value.
+
+ @param value the value
+ @returns a Value with the given value
+ */
+ static intrusive_ptr<const Value> createDocument(
+ const intrusive_ptr<Document> &pDocument);
+
+ /*
+ Construct an array-valued Value.
+
+ @param value the value
+ @returns a Value with the given value
+ */
+ static intrusive_ptr<const Value> createArray(
+ const vector<intrusive_ptr<const Value> > &vpValue);
+
+ /*
+ Get the BSON type of the field.
+
+ If the type is jstNULL, no value getter will work.
+
+ @return the BSON type of the field.
+ */
+ BSONType getType() const;
+
+ /*
+ Getters.
+
+ @returns the Value's value; asserts if the requested value type is
+ incorrect.
+ */
+ double getDouble() const;
+ string getString() const;
+ intrusive_ptr<Document> getDocument() const;
+ intrusive_ptr<ValueIterator> getArray() const;
+ OID getOid() const;
+ bool getBool() const;
+ Date_t getDate() const;
+ string getRegex() const;
+ string getSymbol() const;
+ int getInt() const;
+ unsigned long long getTimestamp() const;
+ long long getLong() const;
+
+ /*
+ Get the length of an array value.
+
+ @returns the length of the array, if this is array-valued; otherwise
+ throws an error
+ */
+ size_t getArrayLength() const;
+
+ /*
+ Add this value to the BSON object under construction.
+ */
+ void addToBsonObj(BSONObjBuilder *pBuilder, string fieldName) const;
+
+ /*
+ Add this field to the BSON array under construction.
+
+ As part of an array, the Value's name will be ignored.
+ */
+ void addToBsonArray(BSONArrayBuilder *pBuilder) const;
+
+ /*
+ Get references to singleton instances of commonly used field values.
+ */
+ static intrusive_ptr<const Value> getUndefined();
+ static intrusive_ptr<const Value> getNull();
+ static intrusive_ptr<const Value> getTrue();
+ static intrusive_ptr<const Value> getFalse();
+ static intrusive_ptr<const Value> getMinusOne();
+ static intrusive_ptr<const Value> getZero();
+ static intrusive_ptr<const Value> getOne();
+
+ /*
+ Coerce (cast) a value to a native bool, using JSON rules.
+
+ @returns the bool value
+ */
+ bool coerceToBool() const;
+
+ /*
+ Coerce (cast) a value to a Boolean Value, using JSON rules.
+
+ @returns the Boolean Value value
+ */
+ intrusive_ptr<const Value> coerceToBoolean() const;
+
+ /*
+ Coerce (cast) a value to an int, using JSON rules.
+
+ @returns the int value
+ */
+ int coerceToInt() const;
+
+ /*
+ Coerce (cast) a value to a long long, using JSON rules.
+
+ @returns the long value
+ */
+ long long coerceToLong() const;
+
+ /*
+ Coerce (cast) a value to a double, using JSON rules.
+
+ @returns the double value
+ */
+ double coerceToDouble() const;
+
+ /*
+ Coerce (cast) a value to a date, using JSON rules.
+
+ @returns the date value
+ */
+ Date_t coerceToDate() const;
+
+ /*
+ Coerce (cast) a value to a string, using JSON rules.
+
+ @returns the date value
+ */
+ string coerceToString() const;
+
+ /*
+ Compare two Values.
+
+ @param rL left value
+ @param rR right value
+ @returns an integer less than zero, zero, or an integer greater than
+ zero, depending on whether rL < rR, rL == rR, or rL > rR
+ */
+ static int compare(const intrusive_ptr<const Value> &rL,
+ const intrusive_ptr<const Value> &rR);
+
+
+ /*
+ Figure out what the widest of two numeric types is.
+
+ Widest can be thought of as "most capable," or "able to hold the
+ largest or most precise value." The progression is Int, Long, Double.
+
+ @param rL left value
+ @param rR right value
+ @returns a BSONType of NumberInt, NumberLong, or NumberDouble
+ */
+ static BSONType getWidestNumeric(BSONType lType, BSONType rType);
+
+ /*
+ Get the approximate storage size of the value, in bytes.
+
+ @returns approximate storage size of the value.
+ */
+ size_t getApproximateSize() const;
+
+ /*
+ Calculate a hash value.
+
+ Meant to be used to create composite hashes suitable for
+ boost classes such as unordered_map<>.
+
+ @param seed value to augment with this' hash
+ */
+ void hash_combine(size_t &seed) const;
+
+ /*
+ struct Hash is defined to enable the use of Values as
+ keys in boost::unordered_map<>.
+
+ Values are always referenced as immutables in the form
+ intrusive_ptr<const Value>, so these operate on that construction.
+ */
+ struct Hash :
+ unary_function<intrusive_ptr<const Value>, size_t> {
+ size_t operator()(const intrusive_ptr<const Value> &rV) const;
+ };
+
+ protected:
+ Value(); // creates null value
+ Value(BSONType type); // creates an empty (unitialized value) of type
+ // mostly useful for Undefined
+ Value(bool boolValue);
+ Value(int intValue);
+
+ private:
+ Value(BSONElement *pBsonElement);
+
+ Value(long long longValue);
+ Value(double doubleValue);
+ Value(const Date_t &dateValue);
+ Value(const string &stringValue);
+ Value(const intrusive_ptr<Document> &pDocument);
+ Value(const vector<intrusive_ptr<const Value> > &vpValue);
+
+ void addToBson(Builder *pBuilder) const;
+
+ BSONType type;
+
+ /* store value in one of these */
+ union {
+ double doubleValue;
+ bool boolValue;
+ int intValue;
+ unsigned long long timestampValue;
+ long long longValue;
+
+ } simple; // values that don't need a ctor/dtor
+ OID oidValue;
+ Date_t dateValue;
+ string stringValue; // String, Regex, Symbol
+ intrusive_ptr<Document> pDocumentValue;
+ vector<intrusive_ptr<const Value> > vpValue; // for arrays
+
+
+ /*
+ These are often used as the result of boolean or comparison
+ expressions.
+
+ These are obtained via public static getters defined above.
+ */
+ static const intrusive_ptr<const Value> pFieldUndefined;
+ static const intrusive_ptr<const Value> pFieldNull;
+ static const intrusive_ptr<const Value> pFieldTrue;
+ static const intrusive_ptr<const Value> pFieldFalse;
+ static const intrusive_ptr<const Value> pFieldMinusOne;
+ static const intrusive_ptr<const Value> pFieldZero;
+ static const intrusive_ptr<const Value> pFieldOne;
+
+ /* this implementation is used for getArray() */
+ class vi :
+ public ValueIterator {
+ public:
+ // virtuals from ValueIterator
+ virtual ~vi();
+ virtual bool more() const;
+ virtual intrusive_ptr<const Value> next();
+
+ private:
+ friend class Value;
+ vi(const intrusive_ptr<const Value> &pSource,
+ const vector<intrusive_ptr<const Value> > *pvpValue);
+
+ size_t size;
+ size_t nextIndex;
+ const vector<intrusive_ptr<const Value> > *pvpValue;
+ }; /* class vi */
+
+ };
+
+ /*
+ Equality operator for values.
+
+ Useful for unordered_map<>, etc.
+ */
+ inline bool operator==(const intrusive_ptr<const Value> &v1,
+ const intrusive_ptr<const Value> &v2) {
+ return (Value::compare(v1, v2) == 0);
+ }
+
+ /*
+ For performance reasons, there are various sharable static values
+ defined in class Value, obtainable by methods such as getUndefined(),
+ getTrue(), getOne(), etc. We don't want these to go away as they are
+ used by a multitude of threads evaluating pipelines. In order to avoid
+ having to use atomic integers in the intrusive reference counter, this
+ class overrides the reference counting methods to do nothing, making it
+ safe to use for static Values.
+
+ At this point, only the constructors necessary for the static Values in
+ common use have been defined. The remainder can be defined if necessary.
+ */
+ class ValueStatic :
+ public Value {
+ public:
+ // virtuals from IntrusiveCounterUnsigned
+ virtual void addRef() const;
+ virtual void release() const;
+
+ // constructors
+ ValueStatic();
+ ValueStatic(BSONType type);
+ ValueStatic(bool boolValue);
+ ValueStatic(int intValue);
+ };
+}
+
+/* ======================= INLINED IMPLEMENTATIONS ========================== */
+
+namespace mongo {
+
+ inline BSONType Value::getType() const {
+ return type;
+ }
+
+ inline size_t Value::getArrayLength() const {
+ assert(getType() == Array);
+ return vpValue.size();
+ }
+
+ inline intrusive_ptr<const Value> Value::getUndefined() {
+ return pFieldUndefined;
+ }
+
+ inline intrusive_ptr<const Value> Value::getNull() {
+ return pFieldNull;
+ }
+
+ inline intrusive_ptr<const Value> Value::getTrue() {
+ return pFieldTrue;
+ }
+
+ inline intrusive_ptr<const Value> Value::getFalse() {
+ return pFieldFalse;
+ }
+
+ inline intrusive_ptr<const Value> Value::getMinusOne() {
+ return pFieldMinusOne;
+ }
+
+ inline intrusive_ptr<const Value> Value::getZero() {
+ return pFieldZero;
+ }
+
+ inline intrusive_ptr<const Value> Value::getOne() {
+ return pFieldOne;
+ }
+
+ inline size_t Value::Hash::operator()(
+ const intrusive_ptr<const Value> &rV) const {
+ size_t seed = 0xf0afbeef;
+ rV->hash_combine(seed);
+ return seed;
+ }
+
+ inline ValueStatic::ValueStatic():
+ Value() {
+ }
+
+ inline ValueStatic::ValueStatic(BSONType type):
+ Value(type) {
+ }
+
+ inline ValueStatic::ValueStatic(bool boolValue):
+ Value(boolValue) {
+ }
+
+ inline ValueStatic::ValueStatic(int intValue):
+ Value(intValue) {
+ }
+
+};
diff --git a/src/mongo/db/projection.cpp b/src/mongo/db/projection.cpp
new file mode 100644
index 00000000000..d07e56527af
--- /dev/null
+++ b/src/mongo/db/projection.cpp
@@ -0,0 +1,301 @@
+// projection.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "projection.h"
+#include "../util/mongoutils/str.h"
+
+namespace mongo {
+
+ void Projection::init( const BSONObj& o ) {
+ massert( 10371 , "can only add to Projection once", _source.isEmpty());
+ _source = o;
+
+ BSONObjIterator i( o );
+ int true_false = -1;
+ while ( i.more() ) {
+ BSONElement e = i.next();
+
+ if ( ! e.isNumber() )
+ _hasNonSimple = true;
+
+ if (e.type() == Object) {
+ BSONObj obj = e.embeddedObject();
+ BSONElement e2 = obj.firstElement();
+ if ( strcmp(e2.fieldName(), "$slice") == 0 ) {
+ if (e2.isNumber()) {
+ int i = e2.numberInt();
+ if (i < 0)
+ add(e.fieldName(), i, -i); // limit is now positive
+ else
+ add(e.fieldName(), 0, i);
+
+ }
+ else if (e2.type() == Array) {
+ BSONObj arr = e2.embeddedObject();
+ uassert(13099, "$slice array wrong size", arr.nFields() == 2 );
+
+ BSONObjIterator it(arr);
+ int skip = it.next().numberInt();
+ int limit = it.next().numberInt();
+ uassert(13100, "$slice limit must be positive", limit > 0 );
+ add(e.fieldName(), skip, limit);
+
+ }
+ else {
+ uassert(13098, "$slice only supports numbers and [skip, limit] arrays", false);
+ }
+ }
+ else {
+ uassert(13097, string("Unsupported projection option: ") + obj.firstElementFieldName(), false);
+ }
+
+ }
+ else if (!strcmp(e.fieldName(), "_id") && !e.trueValue()) {
+ _includeID = false;
+
+ }
+ else {
+
+ add (e.fieldName(), e.trueValue());
+
+ // validate input
+ if (true_false == -1) {
+ true_false = e.trueValue();
+ _include = !e.trueValue();
+ }
+ else {
+ uassert( 10053 , "You cannot currently mix including and excluding fields. Contact us if this is an issue." ,
+ (bool)true_false == e.trueValue() );
+ }
+ }
+ }
+ }
+
+ void Projection::add(const string& field, bool include) {
+ if (field.empty()) { // this is the field the user referred to
+ _include = include;
+ }
+ else {
+ _include = !include;
+
+ const size_t dot = field.find('.');
+ const string subfield = field.substr(0,dot);
+ const string rest = (dot == string::npos ? "" : field.substr(dot+1,string::npos));
+
+ boost::shared_ptr<Projection>& fm = _fields[subfield];
+ if (!fm)
+ fm.reset(new Projection());
+
+ fm->add(rest, include);
+ }
+ }
+
+ void Projection::add(const string& field, int skip, int limit) {
+ _special = true; // can't include or exclude whole object
+
+ if (field.empty()) { // this is the field the user referred to
+ _skip = skip;
+ _limit = limit;
+ }
+ else {
+ const size_t dot = field.find('.');
+ const string subfield = field.substr(0,dot);
+ const string rest = (dot == string::npos ? "" : field.substr(dot+1,string::npos));
+
+ boost::shared_ptr<Projection>& fm = _fields[subfield];
+ if (!fm)
+ fm.reset(new Projection());
+
+ fm->add(rest, skip, limit);
+ }
+ }
+
+ void Projection::transform( const BSONObj& in , BSONObjBuilder& b ) const {
+ BSONObjIterator i(in);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( mongoutils::str::equals( "_id" , e.fieldName() ) ) {
+ if ( _includeID )
+ b.append( e );
+ }
+ else {
+ append( b , e );
+ }
+ }
+ }
+
+ BSONObj Projection::transform( const BSONObj& in ) const {
+ BSONObjBuilder b;
+ transform( in , b );
+ return b.obj();
+ }
+
+
+ //b will be the value part of an array-typed BSONElement
+ void Projection::appendArray( BSONObjBuilder& b , const BSONObj& a , bool nested) const {
+ int skip = nested ? 0 : _skip;
+ int limit = nested ? -1 : _limit;
+
+ if (skip < 0) {
+ skip = max(0, skip + a.nFields());
+ }
+
+ int i=0;
+ BSONObjIterator it(a);
+ while (it.more()) {
+ BSONElement e = it.next();
+
+ if (skip) {
+ skip--;
+ continue;
+ }
+
+ if (limit != -1 && (limit-- == 0)) {
+ break;
+ }
+
+ switch(e.type()) {
+ case Array: {
+ BSONObjBuilder subb;
+ appendArray(subb , e.embeddedObject(), true);
+ b.appendArray(b.numStr(i++), subb.obj());
+ break;
+ }
+ case Object: {
+ BSONObjBuilder subb;
+ BSONObjIterator jt(e.embeddedObject());
+ while (jt.more()) {
+ append(subb , jt.next());
+ }
+ b.append(b.numStr(i++), subb.obj());
+ break;
+ }
+ default:
+ if (_include)
+ b.appendAs(e, b.numStr(i++));
+ }
+ }
+ }
+
+ void Projection::append( BSONObjBuilder& b , const BSONElement& e ) const {
+ FieldMap::const_iterator field = _fields.find( e.fieldName() );
+
+ if (field == _fields.end()) {
+ if (_include)
+ b.append(e);
+ }
+ else {
+ Projection& subfm = *field->second;
+
+ if ((subfm._fields.empty() && !subfm._special) || !(e.type()==Object || e.type()==Array) ) {
+ if (subfm._include)
+ b.append(e);
+ }
+ else if (e.type() == Object) {
+ BSONObjBuilder subb;
+ BSONObjIterator it(e.embeddedObject());
+ while (it.more()) {
+ subfm.append(subb, it.next());
+ }
+ b.append(e.fieldName(), subb.obj());
+
+ }
+ else { //Array
+ BSONObjBuilder subb;
+ subfm.appendArray(subb, e.embeddedObject());
+ b.appendArray(e.fieldName(), subb.obj());
+ }
+ }
+ }
+
+ Projection::KeyOnly* Projection::checkKey( const BSONObj& keyPattern ) const {
+ if ( _include ) {
+ // if we default to including then we can't
+ // use an index because we don't know what we're missing
+ return 0;
+ }
+
+ if ( _hasNonSimple )
+ return 0;
+
+ if ( _includeID && keyPattern["_id"].eoo() )
+ return 0;
+
+ // at this point we know its all { x : 1 } style
+
+ auto_ptr<KeyOnly> p( new KeyOnly() );
+
+ int got = 0;
+ BSONObjIterator i( keyPattern );
+ while ( i.more() ) {
+ BSONElement k = i.next();
+
+ if ( _source[k.fieldName()].type() ) {
+
+ if ( strchr( k.fieldName() , '.' ) ) {
+ // TODO we currently don't support dotted fields
+ // SERVER-2104
+ return 0;
+ }
+
+ if ( ! _includeID && mongoutils::str::equals( k.fieldName() , "_id" ) ) {
+ p->addNo();
+ }
+ else {
+ p->addYes( k.fieldName() );
+ got++;
+ }
+ }
+ else if ( mongoutils::str::equals( "_id" , k.fieldName() ) && _includeID ) {
+ p->addYes( "_id" );
+ }
+ else {
+ p->addNo();
+ }
+
+ }
+
+ int need = _source.nFields();
+ if ( ! _includeID )
+ need--;
+
+ if ( got == need )
+ return p.release();
+
+ return 0;
+ }
+
+ BSONObj Projection::KeyOnly::hydrate( const BSONObj& key ) const {
+ assert( _include.size() == _names.size() );
+
+ BSONObjBuilder b( key.objsize() + _stringSize + 16 );
+
+ BSONObjIterator i(key);
+ unsigned n=0;
+ while ( i.more() ) {
+ assert( n < _include.size() );
+ BSONElement e = i.next();
+ if ( _include[n] ) {
+ b.appendAs( e , _names[n] );
+ }
+ n++;
+ }
+
+ return b.obj();
+ }
+}
diff --git a/src/mongo/db/projection.h b/src/mongo/db/projection.h
new file mode 100644
index 00000000000..b5e0a0c4289
--- /dev/null
+++ b/src/mongo/db/projection.h
@@ -0,0 +1,129 @@
+// projection.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "pch.h"
+#include "jsobj.h"
+
+namespace mongo {
+
+ /**
+ * given a document and a projection specification
+ * can transform the document
+ * currently supports specifying which fields and $slice
+ */
+ class Projection {
+ public:
+
+ class KeyOnly {
+ public:
+
+ KeyOnly() : _stringSize(0) {}
+
+ BSONObj hydrate( const BSONObj& key ) const;
+
+ void addNo() { _add( false , "" ); }
+ void addYes( const string& name ) { _add( true , name ); }
+
+ private:
+
+ void _add( bool b , const string& name ) {
+ _include.push_back( b );
+ _names.push_back( name );
+ _stringSize += name.size();
+ }
+
+ vector<bool> _include; // one entry per field in key. true iff should be in output
+ vector<string> _names; // name of field since key doesn't have names
+
+ int _stringSize;
+ };
+
+ Projection() :
+ _include(true) ,
+ _special(false) ,
+ _includeID(true) ,
+ _skip(0) ,
+ _limit(-1) ,
+ _hasNonSimple(false) {
+ }
+
+ /**
+ * called once per lifetime
+ * e.g. { "x" : 1 , "a.y" : 1 }
+ */
+ void init( const BSONObj& spec );
+
+ /**
+ * @return the spec init was called with
+ */
+ BSONObj getSpec() const { return _source; }
+
+ /**
+ * transforms in according to spec
+ */
+ BSONObj transform( const BSONObj& in ) const;
+
+
+ /**
+ * transforms in according to spec
+ */
+ void transform( const BSONObj& in , BSONObjBuilder& b ) const;
+
+
+ /**
+ * @return if the keyPattern has all the information needed to return then
+ * return a new KeyOnly otherwise null
+ * NOTE: a key may have modified the actual data
+ * which has to be handled above this (arrays, geo)
+ */
+ KeyOnly* checkKey( const BSONObj& keyPattern ) const;
+
+ bool includeID() const { return _includeID; }
+
+ private:
+
+ /**
+ * appends e to b if user wants it
+ * will descend into e if needed
+ */
+ void append( BSONObjBuilder& b , const BSONElement& e ) const;
+
+
+ void add( const string& field, bool include );
+ void add( const string& field, int skip, int limit );
+ void appendArray( BSONObjBuilder& b , const BSONObj& a , bool nested=false) const;
+
+ bool _include; // true if default at this level is to include
+ bool _special; // true if this level can't be skipped or included without recursing
+
+ //TODO: benchmark vector<pair> vs map
+ typedef map<string, boost::shared_ptr<Projection> > FieldMap;
+ FieldMap _fields;
+ BSONObj _source;
+ bool _includeID;
+
+ // used for $slice operator
+ int _skip;
+ int _limit;
+
+ bool _hasNonSimple;
+ };
+
+
+}
diff --git a/src/mongo/db/queryoptimizer.cpp b/src/mongo/db/queryoptimizer.cpp
new file mode 100644
index 00000000000..9d9040d51e2
--- /dev/null
+++ b/src/mongo/db/queryoptimizer.cpp
@@ -0,0 +1,1337 @@
+// @file queryoptimizer.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "db.h"
+#include "btree.h"
+#include "pdfile.h"
+#include "queryoptimizer.h"
+#include "cmdline.h"
+#include "clientcursor.h"
+
+//#define DEBUGQO(x) cout << x << endl;
+#define DEBUGQO(x)
+
+namespace mongo {
+
+ void checkTableScanAllowed( const char * ns ) {
+ if ( ! cmdLine.noTableScan )
+ return;
+
+ if ( strstr( ns , ".system." ) ||
+ strstr( ns , "local." ) )
+ return;
+
+ if ( ! nsdetails( ns ) )
+ return;
+
+ uassert( 10111 , (string)"table scans not allowed:" + ns , ! cmdLine.noTableScan );
+ }
+
+ double elementDirection( const BSONElement &e ) {
+ if ( e.isNumber() )
+ return e.number();
+ return 1;
+ }
+
+ QueryPlan::QueryPlan(
+ NamespaceDetails *d, int idxNo,
+ const FieldRangeSetPair &frsp, const FieldRangeSetPair *originalFrsp, const BSONObj &originalQuery, const BSONObj &order, bool mustAssertOnYieldFailure, const BSONObj &startKey, const BSONObj &endKey , string special ) :
+ _d(d), _idxNo(idxNo),
+ _frs( frsp.frsForIndex( _d, _idxNo ) ),
+ _frsMulti( frsp.frsForIndex( _d, -1 ) ),
+ _originalQuery( originalQuery ),
+ _order( order ),
+ _index( 0 ),
+ _optimal( false ),
+ _scanAndOrderRequired( true ),
+ _exactKeyMatch( false ),
+ _direction( 0 ),
+ _endKeyInclusive( endKey.isEmpty() ),
+ _unhelpful( false ),
+ _impossible( false ),
+ _special( special ),
+ _type(0),
+ _startOrEndSpec( !startKey.isEmpty() || !endKey.isEmpty() ),
+ _mustAssertOnYieldFailure( mustAssertOnYieldFailure ) {
+
+ BSONObj idxKey = _idxNo < 0 ? BSONObj() : d->idx( _idxNo ).keyPattern();
+
+ if ( !_frs.matchPossibleForIndex( idxKey ) ) {
+ _impossible = true;
+ _scanAndOrderRequired = false;
+ return;
+ }
+
+ if ( willScanTable() ) {
+ if ( _order.isEmpty() || !strcmp( _order.firstElementFieldName(), "$natural" ) )
+ _scanAndOrderRequired = false;
+ return;
+ }
+
+ _index = &d->idx(_idxNo);
+
+ // If the parsing or index indicates this is a special query, don't continue the processing
+ if ( _special.size() ||
+ ( _index->getSpec().getType() && _index->getSpec().getType()->suitability( originalQuery, order ) != USELESS ) ) {
+
+ if( _special.size() ) _optimal = true;
+
+ _type = _index->getSpec().getType();
+ if( !_special.size() ) _special = _index->getSpec().getType()->getPlugin()->getName();
+
+ massert( 13040 , (string)"no type for special: " + _special , _type );
+ // hopefully safe to use original query in these contexts - don't think we can mix special with $or clause separation yet
+ _scanAndOrderRequired = _type->scanAndOrderRequired( _originalQuery , order );
+ return;
+ }
+
+ const IndexSpec &idxSpec = _index->getSpec();
+ BSONObjIterator o( order );
+ BSONObjIterator k( idxKey );
+ if ( !o.moreWithEOO() )
+ _scanAndOrderRequired = false;
+ while( o.moreWithEOO() ) {
+ BSONElement oe = o.next();
+ if ( oe.eoo() ) {
+ _scanAndOrderRequired = false;
+ break;
+ }
+ if ( !k.moreWithEOO() )
+ break;
+ BSONElement ke;
+ while( 1 ) {
+ ke = k.next();
+ if ( ke.eoo() )
+ goto doneCheckOrder;
+ if ( strcmp( oe.fieldName(), ke.fieldName() ) == 0 )
+ break;
+ if ( !_frs.range( ke.fieldName() ).equality() )
+ goto doneCheckOrder;
+ }
+ int d = elementDirection( oe ) == elementDirection( ke ) ? 1 : -1;
+ if ( _direction == 0 )
+ _direction = d;
+ else if ( _direction != d )
+ break;
+ }
+doneCheckOrder:
+ if ( _scanAndOrderRequired )
+ _direction = 0;
+ BSONObjIterator i( idxKey );
+ int exactIndexedQueryCount = 0;
+ int optimalIndexedQueryCount = 0;
+ bool stillOptimalIndexedQueryCount = true;
+ set<string> orderFieldsUnindexed;
+ order.getFieldNames( orderFieldsUnindexed );
+ while( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ const FieldRange &fr = _frs.range( e.fieldName() );
+ if ( stillOptimalIndexedQueryCount ) {
+ if ( fr.nontrivial() )
+ ++optimalIndexedQueryCount;
+ if ( !fr.equality() )
+ stillOptimalIndexedQueryCount = false;
+ }
+ else {
+ if ( fr.nontrivial() )
+ optimalIndexedQueryCount = -1;
+ }
+ if ( fr.equality() ) {
+ BSONElement e = fr.max();
+ if ( !e.isNumber() && !e.mayEncapsulate() && e.type() != RegEx )
+ ++exactIndexedQueryCount;
+ }
+ orderFieldsUnindexed.erase( e.fieldName() );
+ }
+ if ( !_scanAndOrderRequired &&
+ ( optimalIndexedQueryCount == _frs.nNontrivialRanges() ) )
+ _optimal = true;
+ if ( exactIndexedQueryCount == _frs.nNontrivialRanges() &&
+ orderFieldsUnindexed.size() == 0 &&
+ exactIndexedQueryCount == idxKey.nFields() &&
+ exactIndexedQueryCount == _originalQuery.nFields() ) {
+ _exactKeyMatch = true;
+ }
+ _frv.reset( new FieldRangeVector( _frs, idxSpec, _direction ) );
+ if ( originalFrsp ) {
+ _originalFrv.reset( new FieldRangeVector( originalFrsp->frsForIndex( _d, _idxNo ), idxSpec, _direction ) );
+ }
+ else {
+ _originalFrv = _frv;
+ }
+ if ( _startOrEndSpec ) {
+ BSONObj newStart, newEnd;
+ if ( !startKey.isEmpty() )
+ _startKey = startKey;
+ else
+ _startKey = _frv->startKey();
+ if ( !endKey.isEmpty() )
+ _endKey = endKey;
+ else
+ _endKey = _frv->endKey();
+ }
+
+ if ( ( _scanAndOrderRequired || _order.isEmpty() ) &&
+ !_frs.range( idxKey.firstElementFieldName() ).nontrivial() ) {
+ _unhelpful = true;
+ }
+ }
+
+ shared_ptr<Cursor> QueryPlan::newCursor( const DiskLoc &startLoc , int numWanted ) const {
+
+ if ( _type ) {
+ // hopefully safe to use original query in these contexts - don't think we can mix type with $or clause separation yet
+ return _type->newCursor( _originalQuery , _order , numWanted );
+ }
+
+ if ( _impossible ) {
+ // TODO We might want to allow this dummy table scan even in no table
+ // scan mode, since it won't scan anything.
+ if ( _frs.nNontrivialRanges() )
+ checkTableScanAllowed( _frs.ns() );
+ return shared_ptr<Cursor>( new BasicCursor( DiskLoc() ) );
+ }
+
+ if ( willScanTable() ) {
+ if ( _frs.nNontrivialRanges() ) {
+ checkTableScanAllowed( _frs.ns() );
+
+ // if we are doing a table scan on _id
+ // and it's a capped collection
+ // we warn /*disallow*/ as it's a common user error
+ // .system. and local collections are exempt
+ if ( _d && _d->capped && _frs.range( "_id" ).nontrivial() ) {
+ if ( cc().isSyncThread() ||
+ str::contains( _frs.ns() , ".system." ) ||
+ str::startsWith( _frs.ns() , "local." ) ) {
+ // ok
+ }
+ else {
+ warning() << "_id query on capped collection without an _id index, performance will be poor collection: " << _frs.ns() << endl;
+ //uassert( 14820, str::stream() << "doing _id query on a capped collection without an index is not allowed: " << _frs.ns() ,
+ }
+ }
+ }
+ return findTableScan( _frs.ns(), _order, startLoc );
+ }
+
+ massert( 10363 , "newCursor() with start location not implemented for indexed plans", startLoc.isNull() );
+
+ if ( _startOrEndSpec ) {
+ // we are sure to spec _endKeyInclusive
+ return shared_ptr<Cursor>( BtreeCursor::make( _d, _idxNo, *_index, _startKey, _endKey, _endKeyInclusive, _direction >= 0 ? 1 : -1 ) );
+ }
+ else if ( _index->getSpec().getType() ) {
+ return shared_ptr<Cursor>( BtreeCursor::make( _d, _idxNo, *_index, _frv->startKey(), _frv->endKey(), true, _direction >= 0 ? 1 : -1 ) );
+ }
+ else {
+ return shared_ptr<Cursor>( BtreeCursor::make( _d, _idxNo, *_index, _frv, _direction >= 0 ? 1 : -1 ) );
+ }
+ }
+
+ shared_ptr<Cursor> QueryPlan::newReverseCursor() const {
+ if ( willScanTable() ) {
+ int orderSpec = _order.getIntField( "$natural" );
+ if ( orderSpec == INT_MIN )
+ orderSpec = 1;
+ return findTableScan( _frs.ns(), BSON( "$natural" << -orderSpec ) );
+ }
+ massert( 10364 , "newReverseCursor() not implemented for indexed plans", false );
+ return shared_ptr<Cursor>();
+ }
+
+ BSONObj QueryPlan::indexKey() const {
+ if ( !_index )
+ return BSON( "$natural" << 1 );
+ return _index->keyPattern();
+ }
+
+ void QueryPlan::registerSelf( long long nScanned ) const {
+ // Impossible query constraints can be detected before scanning, and we
+ // don't have a reserved pattern enum value for impossible constraints.
+ if ( _impossible ) {
+ return;
+ }
+
+ SimpleMutex::scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
+ NamespaceDetailsTransient::get_inlock( ns() ).registerIndexForPattern( _frs.pattern( _order ), indexKey(), nScanned );
+ }
+
+ /**
+ * @return a copy of the inheriting class, which will be run with its own
+ * query plan. If multiple plan sets are required for an $or query, the
+ * QueryOp of the winning plan from a given set will be cloned to generate
+ * QueryOps for the subsequent plan set. This function should only be called
+ * after the query op has completed executing.
+ */
+ QueryOp *QueryOp::createChild() {
+ if( _orConstraint.get() ) {
+ _matcher->advanceOrClause( _orConstraint );
+ _orConstraint.reset();
+ }
+ QueryOp *ret = _createChild();
+ ret->_oldMatcher = _matcher;
+ return ret;
+ }
+
+ bool QueryPlan::isMultiKey() const {
+ if ( _idxNo < 0 )
+ return false;
+ return _d->isMultikey( _idxNo );
+ }
+
+ void QueryOp::init() {
+ if ( _oldMatcher.get() ) {
+ _matcher.reset( _oldMatcher->nextClauseMatcher( qp().indexKey() ) );
+ }
+ else {
+ _matcher.reset( new CoveredIndexMatcher( qp().originalQuery(), qp().indexKey(), alwaysUseRecord() ) );
+ }
+ _init();
+ }
+
+ QueryPlanSet::QueryPlanSet( const char *ns, auto_ptr<FieldRangeSetPair> frsp, auto_ptr<FieldRangeSetPair> originalFrsp, const BSONObj &originalQuery, const BSONObj &order, bool mustAssertOnYieldFailure, const BSONElement *hint, bool honorRecordedPlan, const BSONObj &min, const BSONObj &max, bool bestGuessOnly, bool mayYield ) :
+ _ns(ns),
+ _originalQuery( originalQuery ),
+ _frsp( frsp ),
+ _originalFrsp( originalFrsp ),
+ _mayRecordPlan( false ),
+ _usingCachedPlan( false ),
+ _hint( BSONObj() ),
+ _order( order.getOwned() ),
+ _oldNScanned( 0 ),
+ _honorRecordedPlan( honorRecordedPlan ),
+ _min( min.getOwned() ),
+ _max( max.getOwned() ),
+ _bestGuessOnly( bestGuessOnly ),
+ _mayYield( mayYield ),
+ _yieldSometimesTracker( 256, 20 ),
+ _mustAssertOnYieldFailure( mustAssertOnYieldFailure ) {
+ if ( hint && !hint->eoo() ) {
+ _hint = hint->wrap();
+ }
+ init();
+ }
+
+ bool QueryPlanSet::modifiedKeys() const {
+ for( PlanSet::const_iterator i = _plans.begin(); i != _plans.end(); ++i )
+ if ( (*i)->isMultiKey() )
+ return true;
+ return false;
+ }
+
+ bool QueryPlanSet::hasMultiKey() const {
+ for( PlanSet::const_iterator i = _plans.begin(); i != _plans.end(); ++i )
+ if ( (*i)->isMultiKey() )
+ return true;
+ return false;
+ }
+
+
+ void QueryPlanSet::addHint( IndexDetails &id ) {
+ if ( !_min.isEmpty() || !_max.isEmpty() ) {
+ string errmsg;
+ BSONObj keyPattern = id.keyPattern();
+ // This reformats _min and _max to be used for index lookup.
+ massert( 10365 , errmsg, indexDetailsForRange( _frsp->ns(), errmsg, _min, _max, keyPattern ) );
+ }
+ NamespaceDetails *d = nsdetails(_ns);
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d, d->idxNo(id), *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure, _min, _max ) ) );
+ }
+
+ // returns an IndexDetails * for a hint, 0 if hint is $natural.
+ // hint must not be eoo()
+ IndexDetails *parseHint( const BSONElement &hint, NamespaceDetails *d ) {
+ massert( 13292, "hint eoo", !hint.eoo() );
+ if( hint.type() == String ) {
+ string hintstr = hint.valuestr();
+ NamespaceDetails::IndexIterator i = d->ii();
+ while( i.more() ) {
+ IndexDetails& ii = i.next();
+ if ( ii.indexName() == hintstr ) {
+ return &ii;
+ }
+ }
+ }
+ else if( hint.type() == Object ) {
+ BSONObj hintobj = hint.embeddedObject();
+ uassert( 10112 , "bad hint", !hintobj.isEmpty() );
+ if ( !strcmp( hintobj.firstElementFieldName(), "$natural" ) ) {
+ return 0;
+ }
+ NamespaceDetails::IndexIterator i = d->ii();
+ while( i.more() ) {
+ IndexDetails& ii = i.next();
+ if( ii.keyPattern().woCompare(hintobj) == 0 ) {
+ return &ii;
+ }
+ }
+ }
+ uassert( 10113 , "bad hint", false );
+ return 0;
+ }
+
+ void QueryPlanSet::init() {
+ DEBUGQO( "QueryPlanSet::init " << ns << "\t" << _originalQuery );
+ _runner.reset();
+ _plans.clear();
+ _usingCachedPlan = false;
+
+ const char *ns = _frsp->ns();
+ NamespaceDetails *d = nsdetails( ns );
+ if ( !d || !_frsp->matchPossible() ) {
+ // Table scan plan, when no matches are possible
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) ) );
+ return;
+ }
+
+ BSONElement hint = _hint.firstElement();
+ if ( !hint.eoo() ) {
+ IndexDetails *id = parseHint( hint, d );
+ if ( id ) {
+ addHint( *id );
+ }
+ else {
+ massert( 10366 , "natural order cannot be specified with $min/$max", _min.isEmpty() && _max.isEmpty() );
+ // Table scan plan
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) ) );
+ }
+ return;
+ }
+
+ if ( !_min.isEmpty() || !_max.isEmpty() ) {
+ string errmsg;
+ BSONObj keyPattern;
+ IndexDetails *idx = indexDetailsForRange( ns, errmsg, _min, _max, keyPattern );
+ massert( 10367 , errmsg, idx );
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d, d->idxNo(*idx), *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure, _min, _max ) ) );
+ return;
+ }
+
+ if ( isSimpleIdQuery( _originalQuery ) ) {
+ int idx = d->findIdIndex();
+ if ( idx >= 0 ) {
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d , idx , *_frsp , _originalFrsp.get() , _originalQuery, _order, _mustAssertOnYieldFailure ) ) );
+ return;
+ }
+ }
+
+ if ( _originalQuery.isEmpty() && _order.isEmpty() ) {
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) ) );
+ return;
+ }
+
+ DEBUGQO( "\t special : " << _frsp->getSpecial() );
+ if ( _frsp->getSpecial().size() ) {
+ _special = _frsp->getSpecial();
+ NamespaceDetails::IndexIterator i = d->ii();
+ while( i.more() ) {
+ int j = i.pos();
+ IndexDetails& ii = i.next();
+ const IndexSpec& spec = ii.getSpec();
+ if ( spec.getTypeName() == _special && spec.suitability( _originalQuery , _order ) ) {
+ _plans.push_back( QueryPlanPtr( new QueryPlan( d , j , *_frsp , _originalFrsp.get() , _originalQuery, _order ,
+ _mustAssertOnYieldFailure , BSONObj() , BSONObj() , _special ) ) );
+ return;
+ }
+ }
+ uassert( 13038 , (string)"can't find special index: " + _special + " for: " + _originalQuery.toString() , 0 );
+ }
+
+ if ( _honorRecordedPlan ) {
+ pair< BSONObj, long long > best = QueryUtilIndexed::bestIndexForPatterns( *_frsp, _order );
+ BSONObj bestIndex = best.first;
+ long long oldNScanned = best.second;
+ if ( !bestIndex.isEmpty() ) {
+ QueryPlanPtr p;
+ _oldNScanned = oldNScanned;
+ if ( !strcmp( bestIndex.firstElementFieldName(), "$natural" ) ) {
+ // Table scan plan
+ p.reset( new QueryPlan( d, -1, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) );
+ }
+
+ NamespaceDetails::IndexIterator i = d->ii();
+ while( i.more() ) {
+ int j = i.pos();
+ IndexDetails& ii = i.next();
+ if( ii.keyPattern().woCompare(bestIndex) == 0 ) {
+ p.reset( new QueryPlan( d, j, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) );
+ }
+ }
+
+ massert( 10368 , "Unable to locate previously recorded index", p.get() );
+ if ( !( _bestGuessOnly && p->scanAndOrderRequired() ) ) {
+ _usingCachedPlan = true;
+ _plans.push_back( p );
+ return;
+ }
+ }
+ }
+
+ addOtherPlans( false );
+ }
+
+ void QueryPlanSet::addOtherPlans( bool checkFirst ) {
+ const char *ns = _frsp->ns();
+ NamespaceDetails *d = nsdetails( ns );
+ if ( !d )
+ return;
+
+ // If table scan is optimal or natural order requested or tailable cursor requested
+ if ( !_frsp->matchPossible() || ( _frsp->noNontrivialRanges() && _order.isEmpty() ) ||
+ ( !_order.isEmpty() && !strcmp( _order.firstElementFieldName(), "$natural" ) ) ) {
+ // Table scan plan
+ addPlan( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) ), checkFirst );
+ return;
+ }
+
+ bool normalQuery = _hint.isEmpty() && _min.isEmpty() && _max.isEmpty();
+
+ PlanSet plans;
+ QueryPlanPtr optimalPlan;
+ QueryPlanPtr specialPlan;
+ for( int i = 0; i < d->nIndexes; ++i ) {
+ if ( normalQuery ) {
+ BSONObj keyPattern = d->idx( i ).keyPattern();
+ if ( !_frsp->matchPossibleForIndex( d, i, keyPattern ) ) {
+ // If no match is possible, only generate a trival plan that won't
+ // scan any documents.
+ QueryPlanPtr p( new QueryPlan( d, i, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) );
+ addPlan( p, checkFirst );
+ return;
+ }
+ if ( !QueryUtilIndexed::indexUseful( *_frsp, d, i, _order ) ) {
+ continue;
+ }
+ }
+
+ QueryPlanPtr p( new QueryPlan( d, i, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) );
+ if ( p->optimal() ) {
+ if ( !optimalPlan.get() ) {
+ optimalPlan = p;
+ }
+ }
+ else if ( !p->unhelpful() ) {
+ if ( p->special().empty() ) {
+ plans.push_back( p );
+ }
+ else {
+ specialPlan = p;
+ }
+ }
+ }
+ if ( optimalPlan.get() ) {
+ addPlan( optimalPlan, checkFirst );
+ return;
+ }
+ for( PlanSet::const_iterator i = plans.begin(); i != plans.end(); ++i ) {
+ addPlan( *i, checkFirst );
+ }
+
+ // Only add a special plan if no standard btree plans have been added. SERVER-4531
+ if ( plans.empty() && specialPlan ) {
+ addPlan( specialPlan, checkFirst );
+ return;
+ }
+
+ // Table scan plan
+ addPlan( QueryPlanPtr( new QueryPlan( d, -1, *_frsp, _originalFrsp.get(), _originalQuery, _order, _mustAssertOnYieldFailure ) ), checkFirst );
+ _mayRecordPlan = true;
+ }
+
+ shared_ptr<QueryOp> QueryPlanSet::runOp( QueryOp &op ) {
+ if ( _usingCachedPlan ) {
+ Runner r( *this, op );
+ shared_ptr<QueryOp> res = r.runUntilFirstCompletes();
+ // _plans.size() > 1 if addOtherPlans was called in Runner::runUntilFirstCompletes().
+ if ( _bestGuessOnly || res->complete() || _plans.size() > 1 )
+ return res;
+ // A cached plan was used, so clear the plan for this query pattern and retry the query without a cached plan.
+ // Carefull here, as the namespace may have been dropped.
+ QueryUtilIndexed::clearIndexesForPatterns( *_frsp, _order );
+ init();
+ }
+ Runner r( *this, op );
+ return r.runUntilFirstCompletes();
+ }
+
+ shared_ptr<QueryOp> QueryPlanSet::nextOp( QueryOp &originalOp, bool retried ) {
+ if ( !_runner ) {
+ _runner.reset( new Runner( *this, originalOp ) );
+ shared_ptr<QueryOp> op = _runner->init();
+ if ( op->complete() ) {
+ return op;
+ }
+ }
+ shared_ptr<QueryOp> op = _runner->nextNonError();
+ if ( !op->error() ) {
+ return op;
+ }
+ if ( !_usingCachedPlan || _bestGuessOnly || _plans.size() > 1 ) {
+ return op;
+ }
+
+ // Avoid an infinite loop here - this should never occur.
+ verify( 15878, !retried );
+
+ // A cached plan was used, so clear the plan for this query pattern and retry the query without a cached plan.
+ QueryUtilIndexed::clearIndexesForPatterns( *_frsp, _order );
+ init();
+ return nextOp( originalOp, true );
+ }
+
+ bool QueryPlanSet::prepareToYield() {
+ return _runner ? _runner->prepareToYield() : true;
+ }
+
+ void QueryPlanSet::recoverFromYield() {
+ if ( _runner ) {
+ _runner->recoverFromYield();
+ }
+ }
+
+ void QueryPlanSet::clearRunner() {
+ if ( _runner ) {
+ _runner.reset();
+ }
+ }
+
+ BSONObj QueryPlanSet::explain() const {
+ vector<BSONObj> arr;
+ for( PlanSet::const_iterator i = _plans.begin(); i != _plans.end(); ++i ) {
+ shared_ptr<Cursor> c = (*i)->newCursor();
+ BSONObjBuilder explain;
+ explain.append( "cursor", c->toString() );
+ explain.append( "indexBounds", c->prettyIndexBounds() );
+ arr.push_back( explain.obj() );
+ }
+ BSONObjBuilder b;
+ b.append( "allPlans", arr );
+ return b.obj();
+ }
+
+ QueryPlanSet::QueryPlanPtr QueryPlanSet::getBestGuess() const {
+ assert( _plans.size() );
+ if ( _plans[ 0 ]->scanAndOrderRequired() ) {
+ for ( unsigned i=1; i<_plans.size(); i++ ) {
+ if ( ! _plans[i]->scanAndOrderRequired() )
+ return _plans[i];
+ }
+
+ warning() << "best guess query plan requested, but scan and order are required for all plans "
+ << " query: " << _originalQuery
+ << " order: " << _order
+ << " choices: ";
+
+ for ( unsigned i=0; i<_plans.size(); i++ )
+ warning() << _plans[i]->indexKey() << " ";
+ warning() << endl;
+
+ return QueryPlanPtr();
+ }
+ return _plans[0];
+ }
+
+ QueryPlanSet::Runner::Runner( QueryPlanSet &plans, QueryOp &op ) :
+ _op( op ),
+ _plans( plans ) {
+ }
+
+ bool QueryPlanSet::Runner::prepareToYield() {
+ for( vector<shared_ptr<QueryOp> >::const_iterator i = _ops.begin(); i != _ops.end(); ++i ) {
+ if ( !prepareToYieldOp( **i ) ) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ void QueryPlanSet::Runner::recoverFromYield() {
+ for( vector<shared_ptr<QueryOp> >::const_iterator i = _ops.begin(); i != _ops.end(); ++i ) {
+ recoverFromYieldOp( **i );
+ }
+ }
+
+ void QueryPlanSet::Runner::mayYield() {
+ if ( ! _plans._mayYield )
+ return;
+
+ if ( ! _plans._yieldSometimesTracker.intervalHasElapsed() )
+ return;
+
+ int micros = ClientCursor::suggestYieldMicros();
+ if ( micros <= 0 )
+ return;
+
+ if ( !prepareToYield() )
+ return;
+
+ ClientCursor::staticYield( micros , _plans._ns , 0 );
+ recoverFromYield();
+ }
+
+ shared_ptr<QueryOp> QueryPlanSet::Runner::init() {
+ massert( 10369 , "no plans", _plans._plans.size() > 0 );
+
+ if ( _plans._bestGuessOnly ) {
+ shared_ptr<QueryOp> op( _op.createChild() );
+ shared_ptr<QueryPlan> plan = _plans.getBestGuess();
+ massert( 15894, "no index matches QueryPlanSet's sort with _bestGuessOnly", plan.get() );
+ op->setQueryPlan( plan.get() );
+ _ops.push_back( op );
+ }
+ else {
+ if ( _plans._plans.size() > 1 )
+ log(1) << " running multiple plans" << endl;
+ for( PlanSet::iterator i = _plans._plans.begin(); i != _plans._plans.end(); ++i ) {
+ shared_ptr<QueryOp> op( _op.createChild() );
+ op->setQueryPlan( i->get() );
+ _ops.push_back( op );
+ }
+ }
+
+ // Initialize ops.
+ for( vector<shared_ptr<QueryOp> >::iterator i = _ops.begin(); i != _ops.end(); ++i ) {
+ initOp( **i );
+ if ( (*i)->complete() )
+ return *i;
+ }
+
+ // Put runnable ops in the priority queue.
+ for( vector<shared_ptr<QueryOp> >::iterator i = _ops.begin(); i != _ops.end(); ++i ) {
+ if ( !(*i)->error() ) {
+ _queue.push( *i );
+ }
+ }
+
+ return *_ops.begin();
+ }
+
+ shared_ptr<QueryOp> QueryPlanSet::Runner::nextNonError() {
+ if ( _queue.empty() ) {
+ return *_ops.begin();
+ }
+ shared_ptr<QueryOp> ret;
+ do {
+ ret = next();
+ } while( ret->error() && !_queue.empty() );
+ return ret;
+ }
+
+ shared_ptr<QueryOp> QueryPlanSet::Runner::next() {
+ mayYield();
+ dassert( !_queue.empty() );
+ OpHolder holder = _queue.pop();
+ QueryOp &op = *holder._op;
+ nextOp( op );
+ if ( op.complete() ) {
+ if ( _plans._mayRecordPlan && op.mayRecordPlan() ) {
+ op.qp().registerSelf( op.nscanned() );
+ }
+ return holder._op;
+ }
+ if ( op.error() ) {
+ return holder._op;
+ }
+ if ( !_plans._bestGuessOnly && _plans._usingCachedPlan && op.nscanned() > _plans._oldNScanned * 10 && _plans._special.empty() ) {
+ holder._offset = -op.nscanned();
+ _plans.addOtherPlans( /* avoid duplicating the initial plan */ true );
+ PlanSet::iterator i = _plans._plans.begin();
+ ++i;
+ for( ; i != _plans._plans.end(); ++i ) {
+ shared_ptr<QueryOp> op( _op.createChild() );
+ op->setQueryPlan( i->get() );
+ _ops.push_back( op );
+ initOp( *op );
+ if ( op->complete() )
+ return op;
+ _queue.push( op );
+ }
+ _plans._usingCachedPlan = false;
+ }
+ _queue.push( holder );
+ return holder._op;
+ }
+
+ shared_ptr<QueryOp> QueryPlanSet::Runner::runUntilFirstCompletes() {
+ shared_ptr<QueryOp> potentialFinisher = init();
+ if ( potentialFinisher->complete() ) {
+ return potentialFinisher;
+ }
+
+ while( !_queue.empty() ) {
+ shared_ptr<QueryOp> potentialFinisher = next();
+ if ( potentialFinisher->complete() ) {
+ return potentialFinisher;
+ }
+ }
+ return _ops[ 0 ];
+ }
+
+#define GUARD_OP_EXCEPTION( op, expression ) \
+ try { \
+ expression; \
+ } \
+ catch ( DBException& e ) { \
+ op.setException( e.getInfo() ); \
+ } \
+ catch ( const std::exception &e ) { \
+ op.setException( ExceptionInfo( e.what() , 0 ) ); \
+ } \
+ catch ( ... ) { \
+ op.setException( ExceptionInfo( "Caught unknown exception" , 0 ) ); \
+ }
+
+
+ void QueryPlanSet::Runner::initOp( QueryOp &op ) {
+ GUARD_OP_EXCEPTION( op, op.init() );
+ }
+
+ void QueryPlanSet::Runner::nextOp( QueryOp &op ) {
+ GUARD_OP_EXCEPTION( op, if ( !op.error() ) { op.next(); } );
+ }
+
+ bool QueryPlanSet::Runner::prepareToYieldOp( QueryOp &op ) {
+ GUARD_OP_EXCEPTION( op,
+ if ( op.error() ) {
+ return true;
+ }
+ else {
+ return op.prepareToYield();
+ } );
+ return true;
+ }
+
+ void QueryPlanSet::Runner::recoverFromYieldOp( QueryOp &op ) {
+ GUARD_OP_EXCEPTION( op, if ( !op.error() ) { op.recoverFromYield(); } );
+ }
+
+ /**
+ * NOTE on our $or implementation: In our current qo implementation we don't
+ * keep statistics on our data, but we can conceptualize the problem of
+ * selecting an index when statistics exist for all index ranges. The
+ * d-hitting set problem on k sets and n elements can be reduced to the
+ * problem of index selection on k $or clauses and n index ranges (where
+ * d is the max number of indexes, and the number of ranges n is unbounded).
+ * In light of the fact that d-hitting set is np complete, and we don't even
+ * track statistics (so cost calculations are expensive) our first
+ * implementation uses the following greedy approach: We take one $or clause
+ * at a time and treat each as a separate query for index selection purposes.
+ * But if an index range is scanned for a particular $or clause, we eliminate
+ * that range from all subsequent clauses. One could imagine an opposite
+ * implementation where we select indexes based on the union of index ranges
+ * for all $or clauses, but this can have much poorer worst case behavior.
+ * (An index range that suits one $or clause may not suit another, and this
+ * is worse than the typical case of index range choice staleness because
+ * with $or the clauses may likely be logically distinct.) The greedy
+ * implementation won't do any worse than all the $or clauses individually,
+ * and it can often do better. In the first cut we are intentionally using
+ * QueryPattern tracking to record successful plans on $or clauses for use by
+ * subsequent $or clauses, even though there may be a significant aggregate
+ * $nor component that would not be represented in QueryPattern.
+ */
+
+ MultiPlanScanner::MultiPlanScanner( const char *ns,
+ const BSONObj &query,
+ const BSONObj &order,
+ const BSONElement *hint,
+ bool honorRecordedPlan,
+ const BSONObj &min,
+ const BSONObj &max,
+ bool bestGuessOnly,
+ bool mayYield ) :
+ _ns( ns ),
+ _or( !query.getField( "$or" ).eoo() ),
+ _query( query.getOwned() ),
+ _i(),
+ _honorRecordedPlan( honorRecordedPlan ),
+ _bestGuessOnly( bestGuessOnly ),
+ _hint( ( hint && !hint->eoo() ) ? hint->wrap() : BSONObj() ),
+ _mayYield( mayYield ),
+ _tableScanned() {
+ if ( !order.isEmpty() || !min.isEmpty() || !max.isEmpty() ) {
+ _or = false;
+ }
+ if ( _or ) {
+ // Only construct an OrRangeGenerator if we may handle $or clauses.
+ _org.reset( new OrRangeGenerator( ns, _query ) );
+ if ( !_org->getSpecial().empty() ) {
+ _or = false;
+ }
+ else if ( uselessOr( _hint.firstElement() ) ) {
+ _or = false;
+ }
+ }
+ // if _or == false, don't use or clauses for index selection
+ if ( !_or ) {
+ auto_ptr<FieldRangeSetPair> frsp( new FieldRangeSetPair( ns, _query, true ) );
+ _currentQps.reset( new QueryPlanSet( ns, frsp, auto_ptr<FieldRangeSetPair>(), _query, order, false, hint, honorRecordedPlan, min, max, _bestGuessOnly, _mayYield ) );
+ }
+ else {
+ BSONElement e = _query.getField( "$or" );
+ massert( 13268, "invalid $or spec", e.type() == Array && e.embeddedObject().nFields() > 0 );
+ }
+ }
+
+ shared_ptr<QueryOp> MultiPlanScanner::runOpOnce( QueryOp &op ) {
+ assertMayRunMore();
+ if ( !_or ) {
+ ++_i;
+ return _currentQps->runOp( op );
+ }
+ ++_i;
+ auto_ptr<FieldRangeSetPair> frsp( _org->topFrsp() );
+ auto_ptr<FieldRangeSetPair> originalFrsp( _org->topFrspOriginal() );
+ BSONElement hintElt = _hint.firstElement();
+ _currentQps.reset( new QueryPlanSet( _ns, frsp, originalFrsp, _query, BSONObj(), true, &hintElt, _honorRecordedPlan, BSONObj(), BSONObj(), _bestGuessOnly, _mayYield ) );
+ shared_ptr<QueryOp> ret( _currentQps->runOp( op ) );
+ if ( ! ret->complete() )
+ throw MsgAssertionException( ret->exception() );
+ if ( ret->qp().willScanTable() ) {
+ _tableScanned = true;
+ } else {
+ // If the full table was scanned, don't bother popping the last or clause.
+ _org->popOrClause( ret->qp().nsd(), ret->qp().idxNo(), ret->qp().indexed() ? ret->qp().indexKey() : BSONObj() );
+ }
+ return ret;
+ }
+
+ shared_ptr<QueryOp> MultiPlanScanner::runOp( QueryOp &op ) {
+ shared_ptr<QueryOp> ret = runOpOnce( op );
+ while( !ret->stopRequested() && mayRunMore() ) {
+ ret = runOpOnce( *ret );
+ }
+ return ret;
+ }
+
+ shared_ptr<QueryOp> MultiPlanScanner::nextOpHandleEndOfClause() {
+ shared_ptr<QueryOp> op = _currentQps->nextOp( *_baseOp );
+ if ( !op->complete() ) {
+ return op;
+ }
+ if ( op->qp().willScanTable() ) {
+ _tableScanned = true;
+ } else {
+ _org->popOrClause( op->qp().nsd(), op->qp().idxNo(), op->qp().indexed() ? op->qp().indexKey() : BSONObj() );
+ }
+ return op;
+ }
+
+ shared_ptr<QueryOp> MultiPlanScanner::nextOpBeginningClause() {
+ assertMayRunMore();
+ shared_ptr<QueryOp> op;
+ while( mayRunMore() ) {
+ ++_i;
+ auto_ptr<FieldRangeSetPair> frsp( _org->topFrsp() );
+ auto_ptr<FieldRangeSetPair> originalFrsp( _org->topFrspOriginal() );
+ BSONElement hintElt = _hint.firstElement();
+ _currentQps.reset( new QueryPlanSet( _ns, frsp, originalFrsp, _query, BSONObj(), true, &hintElt, _honorRecordedPlan, BSONObj(), BSONObj(), _bestGuessOnly, _mayYield ) );
+ op = nextOpHandleEndOfClause();
+ if ( !op->complete() ) {
+ return op;
+ }
+ _baseOp = op;
+ }
+ return op;
+ }
+
+ shared_ptr<QueryOp> MultiPlanScanner::nextOp() {
+ if ( !_or ) {
+ if ( _i == 0 ) {
+ assertMayRunMore();
+ ++_i;
+ }
+ return _currentQps->nextOp( *_baseOp );
+ }
+ if ( _i == 0 ) {
+ return nextOpBeginningClause();
+ }
+ shared_ptr<QueryOp> op = nextOpHandleEndOfClause();
+ if ( !op->complete() ) {
+ return op;
+ }
+ if ( !op->stopRequested() && mayRunMore() ) {
+ // Finished scanning the clause, but stop hasn't been requested.
+ // Start scanning the next clause.
+ _baseOp = op;
+ return nextOpBeginningClause();
+ }
+ return op;
+ }
+
+ bool MultiPlanScanner::prepareToYield() {
+ return _currentQps.get() ? _currentQps->prepareToYield() : true;
+ }
+
+ void MultiPlanScanner::recoverFromYield() {
+ if ( _currentQps.get() ) {
+ _currentQps->recoverFromYield();
+ }
+ }
+
+ void MultiPlanScanner::clearRunner() {
+ if ( _currentQps.get() ) {
+ _currentQps->clearRunner();
+ }
+ }
+
+ int MultiPlanScanner::currentNPlans() const {
+ return _currentQps.get() ? _currentQps->nPlans() : 0;
+ }
+
+ shared_ptr<Cursor> MultiPlanScanner::singleCursor() const {
+ const QueryPlan *qp = singlePlan();
+ if ( !qp ) {
+ return shared_ptr<Cursor>();
+ }
+ // If there is only one plan and it does not require an in memory
+ // sort, we do not expect its cursor op to throw an exception and
+ // so do not need a QueryOptimizerCursor to handle this case.
+ return qp->newCursor();
+ }
+
+ const QueryPlan *MultiPlanScanner::singlePlan() const {
+ if ( _or || _currentQps->nPlans() != 1 || _currentQps->firstPlan()->scanAndOrderRequired() || _currentQps->usingCachedPlan() ) {
+ return 0;
+ }
+ return _currentQps->firstPlan().get();
+ }
+
+ bool MultiPlanScanner::uselessOr( const BSONElement &hint ) const {
+ NamespaceDetails *nsd = nsdetails( _ns );
+ if ( !nsd ) {
+ return true;
+ }
+ if ( !hint.eoo() ) {
+ IndexDetails *id = parseHint( hint, nsd );
+ if ( !id ) {
+ return true;
+ }
+ return QueryUtilIndexed::uselessOr( *_org, nsd, nsd->idxNo( *id ) );
+ }
+ return QueryUtilIndexed::uselessOr( *_org, nsd, -1 );
+ }
+
+ MultiCursor::MultiCursor( const char *ns, const BSONObj &pattern, const BSONObj &order, shared_ptr<CursorOp> op, bool mayYield )
+ : _mps( new MultiPlanScanner( ns, pattern, order, 0, true, BSONObj(), BSONObj(), !op.get(), mayYield ) ), _nscanned() {
+ if ( op.get() ) {
+ _op = op;
+ }
+ else {
+ _op.reset( new NoOp() );
+ }
+ if ( _mps->mayRunMore() ) {
+ nextClause();
+ if ( !ok() ) {
+ advance();
+ }
+ }
+ else {
+ _c.reset( new BasicCursor( DiskLoc() ) );
+ }
+ }
+
+ MultiCursor::MultiCursor( auto_ptr<MultiPlanScanner> mps, const shared_ptr<Cursor> &c, const shared_ptr<CoveredIndexMatcher> &matcher, const QueryOp &op, long long nscanned )
+ : _op( new NoOp( op ) ), _c( c ), _mps( mps ), _matcher( matcher ), _nscanned( nscanned ) {
+ _mps->setBestGuessOnly();
+ _mps->mayYield( false ); // with a NoOp, there's no need to yield in QueryPlanSet
+ if ( !ok() ) {
+ // would have been advanced by UserQueryOp if possible
+ advance();
+ }
+ }
+
+ void MultiCursor::nextClause() {
+ if ( _nscanned >= 0 && _c.get() ) {
+ _nscanned += _c->nscanned();
+ }
+ shared_ptr<CursorOp> best = _mps->runOpOnce( *_op );
+ if ( ! best->complete() )
+ throw MsgAssertionException( best->exception() );
+ _c = best->newCursor();
+ _matcher = best->matcher( _c );
+ _op = best;
+ }
+
+ bool indexWorks( const BSONObj &idxPattern, const BSONObj &sampleKey, int direction, int firstSignificantField ) {
+ BSONObjIterator p( idxPattern );
+ BSONObjIterator k( sampleKey );
+ int i = 0;
+ while( 1 ) {
+ BSONElement pe = p.next();
+ BSONElement ke = k.next();
+ if ( pe.eoo() && ke.eoo() )
+ return true;
+ if ( pe.eoo() || ke.eoo() )
+ return false;
+ if ( strcmp( pe.fieldName(), ke.fieldName() ) != 0 )
+ return false;
+ if ( ( i == firstSignificantField ) && !( ( direction > 0 ) == ( pe.number() > 0 ) ) )
+ return false;
+ ++i;
+ }
+ return false;
+ }
+
+ BSONObj extremeKeyForIndex( const BSONObj &idxPattern, int baseDirection ) {
+ BSONObjIterator i( idxPattern );
+ BSONObjBuilder b;
+ while( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ int idxDirection = e.number() >= 0 ? 1 : -1;
+ int direction = idxDirection * baseDirection;
+ switch( direction ) {
+ case 1:
+ b.appendMaxKey( e.fieldName() );
+ break;
+ case -1:
+ b.appendMinKey( e.fieldName() );
+ break;
+ default:
+ assert( false );
+ }
+ }
+ return b.obj();
+ }
+
+ pair<int,int> keyAudit( const BSONObj &min, const BSONObj &max ) {
+ int direction = 0;
+ int firstSignificantField = 0;
+ BSONObjIterator i( min );
+ BSONObjIterator a( max );
+ while( 1 ) {
+ BSONElement ie = i.next();
+ BSONElement ae = a.next();
+ if ( ie.eoo() && ae.eoo() )
+ break;
+ if ( ie.eoo() || ae.eoo() || strcmp( ie.fieldName(), ae.fieldName() ) != 0 ) {
+ return make_pair( -1, -1 );
+ }
+ int cmp = ie.woCompare( ae );
+ if ( cmp < 0 )
+ direction = 1;
+ if ( cmp > 0 )
+ direction = -1;
+ if ( direction != 0 )
+ break;
+ ++firstSignificantField;
+ }
+ return make_pair( direction, firstSignificantField );
+ }
+
+ pair<int,int> flexibleKeyAudit( const BSONObj &min, const BSONObj &max ) {
+ if ( min.isEmpty() || max.isEmpty() ) {
+ return make_pair( 1, -1 );
+ }
+ else {
+ return keyAudit( min, max );
+ }
+ }
+
+ // NOTE min, max, and keyPattern will be updated to be consistent with the selected index.
+ IndexDetails *indexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern ) {
+ if ( min.isEmpty() && max.isEmpty() ) {
+ errmsg = "one of min or max must be specified";
+ return 0;
+ }
+
+ Client::Context ctx( ns );
+ IndexDetails *id = 0;
+ NamespaceDetails *d = nsdetails( ns );
+ if ( !d ) {
+ errmsg = "ns not found";
+ return 0;
+ }
+
+ pair<int,int> ret = flexibleKeyAudit( min, max );
+ if ( ret == make_pair( -1, -1 ) ) {
+ errmsg = "min and max keys do not share pattern";
+ return 0;
+ }
+ if ( keyPattern.isEmpty() ) {
+ NamespaceDetails::IndexIterator i = d->ii();
+ while( i.more() ) {
+ IndexDetails& ii = i.next();
+ if ( indexWorks( ii.keyPattern(), min.isEmpty() ? max : min, ret.first, ret.second ) ) {
+ if ( ii.getSpec().getType() == 0 ) {
+ id = &ii;
+ keyPattern = ii.keyPattern();
+ break;
+ }
+ }
+ }
+
+ }
+ else {
+ if ( !indexWorks( keyPattern, min.isEmpty() ? max : min, ret.first, ret.second ) ) {
+ errmsg = "requested keyPattern does not match specified keys";
+ return 0;
+ }
+ NamespaceDetails::IndexIterator i = d->ii();
+ while( i.more() ) {
+ IndexDetails& ii = i.next();
+ if( ii.keyPattern().woCompare(keyPattern) == 0 ) {
+ id = &ii;
+ break;
+ }
+ if ( keyPattern.nFields() == 1 && ii.keyPattern().nFields() == 1 &&
+ IndexDetails::isIdIndexPattern( keyPattern ) &&
+ ii.isIdIndex() ) {
+ id = &ii;
+ break;
+ }
+
+ }
+ }
+
+ if ( min.isEmpty() ) {
+ min = extremeKeyForIndex( keyPattern, -1 );
+ }
+ else if ( max.isEmpty() ) {
+ max = extremeKeyForIndex( keyPattern, 1 );
+ }
+
+ if ( !id ) {
+ errmsg = str::stream() << "no index found for specified keyPattern: " << keyPattern.toString()
+ << " min: " << min << " max: " << max;
+ return 0;
+ }
+
+ min = min.extractFieldsUnDotted( keyPattern );
+ max = max.extractFieldsUnDotted( keyPattern );
+
+ return id;
+ }
+
+ bool isSimpleIdQuery( const BSONObj& query ) {
+ BSONObjIterator i(query);
+
+ if( !i.more() )
+ return false;
+
+ BSONElement e = i.next();
+
+ if( i.more() )
+ return false;
+
+ if( strcmp("_id", e.fieldName()) != 0 )
+ return false;
+
+ if ( e.isSimpleType() ) // e.g. not something like { _id : { $gt : ...
+ return true;
+
+ if ( e.type() == Object )
+ return e.Obj().firstElementFieldName()[0] != '$';
+
+ return false;
+ }
+
+ shared_ptr<Cursor> bestGuessCursor( const char *ns, const BSONObj &query, const BSONObj &sort ) {
+ if( !query.getField( "$or" ).eoo() ) {
+ return shared_ptr<Cursor>( new MultiCursor( ns, query, sort ) );
+ }
+ else {
+ auto_ptr<FieldRangeSetPair> frsp( new FieldRangeSetPair( ns, query, true ) );
+ auto_ptr<FieldRangeSetPair> origFrsp( new FieldRangeSetPair( *frsp ) );
+
+ QueryPlanSet qps( ns, frsp, origFrsp, query, sort, false );
+ QueryPlanSet::QueryPlanPtr qpp = qps.getBestGuess();
+ if( ! qpp.get() ) return shared_ptr<Cursor>();
+
+ shared_ptr<Cursor> ret = qpp->newCursor();
+
+ // If we don't already have a matcher, supply one.
+ if ( !query.isEmpty() && ! ret->matcher() ) {
+ shared_ptr<CoveredIndexMatcher> matcher( new CoveredIndexMatcher( query, ret->indexKeyPattern() ) );
+ ret->setMatcher( matcher );
+ }
+ return ret;
+ }
+ }
+
+ bool QueryUtilIndexed::indexUseful( const FieldRangeSetPair &frsp, NamespaceDetails *d, int idxNo, const BSONObj &order ) {
+ DEV frsp.assertValidIndex( d, idxNo );
+ BSONObj keyPattern = d->idx( idxNo ).keyPattern();
+ if ( !frsp.matchPossibleForIndex( d, idxNo, keyPattern ) ) {
+ // No matches are possible in the index so the index may be useful.
+ return true;
+ }
+ return d->idx( idxNo ).getSpec().suitability( frsp.simplifiedQueryForIndex( d, idxNo, keyPattern ), order ) != USELESS;
+ }
+
+ void QueryUtilIndexed::clearIndexesForPatterns( const FieldRangeSetPair &frsp, const BSONObj &order ) {
+ SimpleMutex::scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
+ NamespaceDetailsTransient& nsd = NamespaceDetailsTransient::get_inlock( frsp.ns() );
+ nsd.registerIndexForPattern( frsp._singleKey.pattern( order ), BSONObj(), 0 );
+ nsd.registerIndexForPattern( frsp._multiKey.pattern( order ), BSONObj(), 0 );
+ }
+
+ pair< BSONObj, long long > QueryUtilIndexed::bestIndexForPatterns( const FieldRangeSetPair &frsp, const BSONObj &order ) {
+ SimpleMutex::scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
+ NamespaceDetailsTransient& nsd = NamespaceDetailsTransient::get_inlock( frsp.ns() );
+ // TODO Maybe it would make sense to return the index with the lowest
+ // nscanned if there are two possibilities.
+ if ( frsp._singleKey.matchPossible() ) {
+ QueryPattern pattern = frsp._singleKey.pattern( order );
+ BSONObj oldIdx = nsd.indexForPattern( pattern );
+ if ( !oldIdx.isEmpty() ) {
+ long long oldNScanned = nsd.nScannedForPattern( pattern );
+ return make_pair( oldIdx, oldNScanned );
+ }
+ }
+ if ( frsp._multiKey.matchPossible() ) {
+ QueryPattern pattern = frsp._multiKey.pattern( order );
+ BSONObj oldIdx = nsd.indexForPattern( pattern );
+ if ( !oldIdx.isEmpty() ) {
+ long long oldNScanned = nsd.nScannedForPattern( pattern );
+ return make_pair( oldIdx, oldNScanned );
+ }
+ }
+ return make_pair( BSONObj(), 0 );
+ }
+
+ bool QueryUtilIndexed::uselessOr( const OrRangeGenerator &org, NamespaceDetails *d, int hintIdx ) {
+ for( list<FieldRangeSetPair>::const_iterator i = org._originalOrSets.begin(); i != org._originalOrSets.end(); ++i ) {
+ if ( hintIdx != -1 ) {
+ if ( !indexUseful( *i, d, hintIdx, BSONObj() ) ) {
+ return true;
+ }
+ }
+ else {
+ bool useful = false;
+ for( int j = 0; j < d->nIndexes; ++j ) {
+ if ( indexUseful( *i, d, j, BSONObj() ) ) {
+ useful = true;
+ break;
+ }
+ }
+ if ( !useful ) {
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/queryoptimizer.h b/src/mongo/db/queryoptimizer.h
new file mode 100644
index 00000000000..297c6fe9505
--- /dev/null
+++ b/src/mongo/db/queryoptimizer.h
@@ -0,0 +1,599 @@
+// @file queryoptimizer.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "cursor.h"
+#include "jsobj.h"
+#include "queryutil.h"
+#include "matcher.h"
+#include "../util/net/listen.h"
+#include <queue>
+
+namespace mongo {
+
+ class IndexDetails;
+ class IndexType;
+ class ElapsedTracker;
+
+ /** A plan for executing a query using the given index spec and FieldRangeSet. */
+ class QueryPlan : boost::noncopyable {
+ public:
+
+ /**
+ * @param originalFrsp - original constraints for this query clause. If null, frsp will be used instead.
+ */
+ QueryPlan(NamespaceDetails *d,
+ int idxNo, // -1 = no index
+ const FieldRangeSetPair &frsp,
+ const FieldRangeSetPair *originalFrsp,
+ const BSONObj &originalQuery,
+ const BSONObj &order,
+ bool mustAssertOnYieldFailure = true,
+ const BSONObj &startKey = BSONObj(),
+ const BSONObj &endKey = BSONObj(),
+ string special="" );
+
+ /** @return true iff no other plans should be considered. */
+ bool optimal() const { return _optimal; }
+ /* @return true iff this plan should not be considered at all. */
+ bool unhelpful() const { return _unhelpful; }
+ /** @return true iff ScanAndOrder processing will be required for result set. */
+ bool scanAndOrderRequired() const { return _scanAndOrderRequired; }
+ /**
+ * @return true iff the index we are using has keys such that it can completely resolve the
+ * query expression to match by itself without ever checking the main object.
+ */
+ bool exactKeyMatch() const { return _exactKeyMatch; }
+ /** @return true iff this QueryPlan would perform an unindexed scan. */
+ bool willScanTable() const { return _idxNo < 0 && !_impossible; }
+ /** @return 'special' attribute of the plan, which was either set explicitly or generated from the index. */
+ const string &special() const { return _special; }
+
+ /** @return a new cursor based on this QueryPlan's index and FieldRangeSet. */
+ shared_ptr<Cursor> newCursor( const DiskLoc &startLoc = DiskLoc() , int numWanted=0 ) const;
+ /** @return a new reverse cursor if this is an unindexed plan. */
+ shared_ptr<Cursor> newReverseCursor() const;
+ /** Register this plan as a winner for its QueryPattern, with specified 'nscanned'. */
+ void registerSelf( long long nScanned ) const;
+
+ int direction() const { return _direction; }
+ BSONObj indexKey() const;
+ bool indexed() const { return _index; }
+ int idxNo() const { return _idxNo; }
+ const char *ns() const { return _frs.ns(); }
+ NamespaceDetails *nsd() const { return _d; }
+ BSONObj originalQuery() const { return _originalQuery; }
+ BSONObj simplifiedQuery( const BSONObj& fields = BSONObj() ) const { return _frs.simplifiedQuery( fields ); }
+ const FieldRange &range( const char *fieldName ) const { return _frs.range( fieldName ); }
+ shared_ptr<FieldRangeVector> originalFrv() const { return _originalFrv; }
+
+ const FieldRangeSet &multikeyFrs() const { return _frsMulti; }
+
+ bool mustAssertOnYieldFailure() const { return _mustAssertOnYieldFailure; }
+
+ /** The following member functions are just for testing. */
+
+ shared_ptr<FieldRangeVector> frv() const { return _frv; }
+ bool isMultiKey() const;
+
+ private:
+ NamespaceDetails * _d;
+ int _idxNo;
+ const FieldRangeSet &_frs;
+ const FieldRangeSet &_frsMulti;
+ const BSONObj &_originalQuery;
+ const BSONObj &_order;
+ const IndexDetails * _index;
+ bool _optimal;
+ bool _scanAndOrderRequired;
+ bool _exactKeyMatch;
+ int _direction;
+ shared_ptr<FieldRangeVector> _frv;
+ shared_ptr<FieldRangeVector> _originalFrv;
+ BSONObj _startKey;
+ BSONObj _endKey;
+ bool _endKeyInclusive;
+ bool _unhelpful;
+ bool _impossible;
+ string _special;
+ IndexType * _type;
+ bool _startOrEndSpec;
+ bool _mustAssertOnYieldFailure;
+ };
+
+ /**
+ * Inherit from this interface to implement a new query operation.
+ * The query optimizer will clone the QueryOp that is provided, giving
+ * each clone its own query plan.
+ *
+ * Normal sequence of events:
+ * 1) A new QueryOp is generated using createChild().
+ * 2) A QueryPlan is assigned to this QueryOp with setQueryPlan().
+ * 3) _init() is called on the QueryPlan.
+ * 4) next() is called repeatedly, with nscanned() checked after each call.
+ * 5) In one of these calls to next(), setComplete() is called.
+ * 6) The QueryPattern for the QueryPlan may be recorded as a winner.
+ */
+ class QueryOp {
+ public:
+ QueryOp() : _complete(), _stopRequested(), _qp(), _error() {}
+
+ /** Used when handing off from one QueryOp to another. */
+ QueryOp( const QueryOp &other ) :
+ _complete(), _stopRequested(), _qp(), _error(), _matcher( other._matcher ),
+ _orConstraint( other._orConstraint ) {}
+
+ virtual ~QueryOp() {}
+
+ /** @return QueryPlan assigned to this QueryOp by the query optimizer. */
+ const QueryPlan &qp() const { return *_qp; }
+
+ /** Advance to next potential matching document (eg using a cursor). */
+ virtual void next() = 0;
+ /**
+ * @return current 'nscanned' metric for this QueryOp. Used to compare
+ * cost to other QueryOps.
+ */
+ virtual long long nscanned() = 0;
+ /** Take any steps necessary before the db mutex is yielded. */
+ virtual bool prepareToYield() { massert( 13335, "yield not supported", false ); return false; }
+ /** Recover once the db mutex is regained. */
+ virtual void recoverFromYield() { massert( 13336, "yield not supported", false ); }
+
+ /**
+ * @return true iff the QueryPlan for this QueryOp may be registered
+ * as a winning plan.
+ */
+ virtual bool mayRecordPlan() const = 0;
+
+ /** @return true iff the implementation called setComplete() or setStop(). */
+ bool complete() const { return _complete; }
+ /** @return true iff the implementation called steStop(). */
+ bool stopRequested() const { return _stopRequested; }
+ /** @return true iff the implementation threw an exception. */
+ bool error() const { return _error; }
+ /** @return the exception thrown by implementation if one was thrown. */
+ ExceptionInfo exception() const { return _exception; }
+
+ /** To be called by QueryPlanSet::Runner only. */
+
+ QueryOp *createChild();
+ void setQueryPlan( const QueryPlan *qp ) { _qp = qp; assert( _qp != NULL ); }
+ void init();
+ void setException( const DBException &e ) {
+ _error = true;
+ _exception = e.getInfo();
+ }
+
+ shared_ptr<CoveredIndexMatcher> matcher( const shared_ptr<Cursor>& c ) const {
+ return matcher( c.get() );
+ }
+ shared_ptr<CoveredIndexMatcher> matcher( Cursor* c ) const {
+ if( ! c ) return _matcher;
+ return c->matcher() ? c->matcherPtr() : _matcher;
+ }
+
+ protected:
+ /** Call if all results have been found. */
+ void setComplete() {
+ _orConstraint = qp().originalFrv();
+ _complete = true;
+ }
+ /** Call if the scan is complete even if not all results have been found. */
+ void setStop() { setComplete(); _stopRequested = true; }
+
+ /** Handle initialization after a QueryPlan has been set. */
+ virtual void _init() = 0;
+
+ /** @return a copy of the inheriting class, which will be run with its own query plan. */
+ virtual QueryOp *_createChild() const = 0;
+
+ virtual bool alwaysUseRecord() const { return false; }
+
+ private:
+ bool _complete;
+ bool _stopRequested;
+ ExceptionInfo _exception;
+ const QueryPlan *_qp;
+ bool _error;
+ shared_ptr<CoveredIndexMatcher> _matcher;
+ shared_ptr<CoveredIndexMatcher> _oldMatcher;
+ shared_ptr<FieldRangeVector> _orConstraint;
+ };
+
+ // temp. this class works if T::operator< is variant unlike a regular stl priority queue.
+ // but it's very slow. however if v.size() is always very small, it would be fine,
+ // maybe even faster than a smart impl that does more memory allocations.
+ template<class T>
+ class our_priority_queue : boost::noncopyable {
+ vector<T> v;
+ public:
+ our_priority_queue() {
+ v.reserve(4);
+ }
+ int size() const { return v.size(); }
+ bool empty() const { return v.empty(); }
+ void push(const T & x) {
+ v.push_back(x);
+ }
+ T pop() {
+ size_t t = 0;
+ for( size_t i = 1; i < v.size(); i++ ) {
+ if( v[t] < v[i] )
+ t = i;
+ }
+ T ret = v[t];
+ v.erase(v.begin()+t);
+ return ret;
+ }
+ };
+
+ /**
+ * A set of candidate query plans for a query. This class can return a best buess plan or run a
+ * QueryOp on all the plans.
+ */
+ class QueryPlanSet {
+ public:
+
+ typedef boost::shared_ptr<QueryPlan> QueryPlanPtr;
+ typedef vector<QueryPlanPtr> PlanSet;
+
+ /**
+ * @param originalFrsp - original constraints for this query clause; if null, frsp will be used.
+ */
+ QueryPlanSet( const char *ns,
+ auto_ptr<FieldRangeSetPair> frsp,
+ auto_ptr<FieldRangeSetPair> originalFrsp,
+ const BSONObj &originalQuery,
+ const BSONObj &order,
+ bool mustAssertOnYieldFailure = true,
+ const BSONElement *hint = 0,
+ bool honorRecordedPlan = true,
+ const BSONObj &min = BSONObj(),
+ const BSONObj &max = BSONObj(),
+ bool bestGuessOnly = false,
+ bool mayYield = false);
+
+ /** @return number of candidate plans. */
+ int nPlans() const { return _plans.size(); }
+
+ /**
+ * Clone op for each query plan, and @return the first cloned op to call
+ * setComplete() or setStop().
+ */
+
+ shared_ptr<QueryOp> runOp( QueryOp &op );
+ template<class T>
+ shared_ptr<T> runOp( T &op ) {
+ return dynamic_pointer_cast<T>( runOp( static_cast<QueryOp&>( op ) ) );
+ }
+
+ /** Initialize or iterate a runner generated from @param originalOp. */
+ shared_ptr<QueryOp> nextOp( QueryOp &originalOp, bool retried = false );
+
+ /** Yield the runner member. */
+
+ bool prepareToYield();
+ void recoverFromYield();
+
+ /** Clear the runner member. */
+ void clearRunner();
+
+ QueryPlanPtr firstPlan() const { return _plans[ 0 ]; }
+
+ /** @return metadata about cursors and index bounds for all plans, suitable for explain output. */
+ BSONObj explain() const;
+ /** @return true iff a plan is selected based on previous success of this plan. */
+ bool usingCachedPlan() const { return _usingCachedPlan; }
+ /** @return a single plan that may work well for the specified query. */
+ QueryPlanPtr getBestGuess() const;
+
+ //for testing
+ const FieldRangeSetPair &frsp() const { return *_frsp; }
+ const FieldRangeSetPair *originalFrsp() const { return _originalFrsp.get(); }
+ bool modifiedKeys() const;
+ bool hasMultiKey() const;
+
+ private:
+ void addOtherPlans( bool checkFirst );
+ void addPlan( QueryPlanPtr plan, bool checkFirst ) {
+ if ( checkFirst && plan->indexKey().woCompare( _plans[ 0 ]->indexKey() ) == 0 )
+ return;
+ _plans.push_back( plan );
+ }
+ void init();
+ void addHint( IndexDetails &id );
+ class Runner {
+ public:
+ Runner( QueryPlanSet &plans, QueryOp &op );
+
+ /**
+ * Iterate interactively through candidate documents on all plans.
+ * QueryOp objects are returned at each interleaved step.
+ */
+
+ /** @return a plan that has completed, otherwise an arbitrary plan. */
+ shared_ptr<QueryOp> init();
+ /**
+ * Move the Runner forward one iteration, and @return the plan for
+ * this iteration.
+ */
+ shared_ptr<QueryOp> next();
+ /** @return next non error op if there is one, otherwise an error op. */
+ shared_ptr<QueryOp> nextNonError();
+
+ bool prepareToYield();
+ void recoverFromYield();
+
+ /** Run until first op completes. */
+ shared_ptr<QueryOp> runUntilFirstCompletes();
+
+ void mayYield();
+ QueryOp &_op;
+ QueryPlanSet &_plans;
+ static void initOp( QueryOp &op );
+ static void nextOp( QueryOp &op );
+ static bool prepareToYieldOp( QueryOp &op );
+ static void recoverFromYieldOp( QueryOp &op );
+ private:
+ vector<shared_ptr<QueryOp> > _ops;
+ struct OpHolder {
+ OpHolder( const shared_ptr<QueryOp> &op ) : _op( op ), _offset() {}
+ shared_ptr<QueryOp> _op;
+ long long _offset;
+ bool operator<( const OpHolder &other ) const {
+ return _op->nscanned() + _offset > other._op->nscanned() + other._offset;
+ }
+ };
+ our_priority_queue<OpHolder> _queue;
+ };
+
+ const char *_ns;
+ BSONObj _originalQuery;
+ auto_ptr<FieldRangeSetPair> _frsp;
+ auto_ptr<FieldRangeSetPair> _originalFrsp;
+ PlanSet _plans;
+ bool _mayRecordPlan;
+ bool _usingCachedPlan;
+ BSONObj _hint;
+ BSONObj _order;
+ long long _oldNScanned;
+ bool _honorRecordedPlan;
+ BSONObj _min;
+ BSONObj _max;
+ string _special;
+ bool _bestGuessOnly;
+ bool _mayYield;
+ ElapsedTracker _yieldSometimesTracker;
+ shared_ptr<Runner> _runner;
+ bool _mustAssertOnYieldFailure;
+ };
+
+ /** Handles $or type queries by generating a QueryPlanSet for each $or clause. */
+ class MultiPlanScanner {
+ public:
+ MultiPlanScanner( const char *ns,
+ const BSONObj &query,
+ const BSONObj &order,
+ const BSONElement *hint = 0,
+ bool honorRecordedPlan = true,
+ const BSONObj &min = BSONObj(),
+ const BSONObj &max = BSONObj(),
+ bool bestGuessOnly = false,
+ bool mayYield = false);
+
+ /**
+ * Clone op for each query plan of a single $or clause, and @return the first cloned op
+ * to call setComplete() or setStop().
+ */
+
+ shared_ptr<QueryOp> runOpOnce( QueryOp &op );
+ template<class T>
+ shared_ptr<T> runOpOnce( T &op ) {
+ return dynamic_pointer_cast<T>( runOpOnce( static_cast<QueryOp&>( op ) ) );
+ }
+
+ /**
+ * For each $or clause, calls runOpOnce on the child QueryOp cloned from the winning QueryOp
+ * of the previous $or clause (or from the supplied 'op' for the first $or clause).
+ */
+
+ shared_ptr<QueryOp> runOp( QueryOp &op );
+ template<class T>
+ shared_ptr<T> runOp( T &op ) {
+ return dynamic_pointer_cast<T>( runOp( static_cast<QueryOp&>( op ) ) );
+ }
+
+ /** Initialize or iterate a runner generated from @param originalOp. */
+
+ void initialOp( const shared_ptr<QueryOp> &originalOp ) { _baseOp = originalOp; }
+ shared_ptr<QueryOp> nextOp();
+
+ /** Yield the runner member. */
+
+ bool prepareToYield();
+ void recoverFromYield();
+
+ /** Clear the runner member. */
+ void clearRunner();
+
+ int currentNPlans() const;
+
+ /**
+ * @return a single simple cursor if the scanner would run a single cursor
+ * for this query, otherwise return an empty shared_ptr.
+ */
+ shared_ptr<Cursor> singleCursor() const;
+
+ /**
+ * @return the query plan that would be used if the scanner would run a single
+ * cursor for this query, otherwise 0. The returned plan is invalid if this
+ * MultiPlanScanner is destroyed, hence we return a raw pointer.
+ */
+ const QueryPlan *singlePlan() const;
+
+ /** @return true iff more $or clauses need to be scanned. */
+ bool mayRunMore() const { return _or ? ( !_tableScanned && !_org->orFinished() ) : _i == 0; }
+ /** @return non-$or version of explain output. */
+ BSONObj oldExplain() const { assertNotOr(); return _currentQps->explain(); }
+ /** @return true iff this is not a $or query and a plan is selected based on previous success of this plan. */
+ bool usingCachedPlan() const { return !_or && _currentQps->usingCachedPlan(); }
+ /** Don't attempt to scan multiple plans, just use the best guess. */
+ void setBestGuessOnly() { _bestGuessOnly = true; }
+ /** Yielding is allowed while running each QueryPlan. */
+ void mayYield( bool val ) { _mayYield = val; }
+ bool modifiedKeys() const { return _currentQps->modifiedKeys(); }
+ bool hasMultiKey() const { return _currentQps->hasMultiKey(); }
+
+ private:
+ void assertNotOr() const {
+ massert( 13266, "not implemented for $or query", !_or );
+ }
+ void assertMayRunMore() const {
+ massert( 13271, "can't run more ops", mayRunMore() );
+ }
+ shared_ptr<QueryOp> nextOpBeginningClause();
+ shared_ptr<QueryOp> nextOpHandleEndOfClause();
+ bool uselessOr( const BSONElement &hint ) const;
+ const char * _ns;
+ bool _or;
+ BSONObj _query;
+ shared_ptr<OrRangeGenerator> _org; // May be null in certain non $or query cases.
+ auto_ptr<QueryPlanSet> _currentQps;
+ int _i;
+ bool _honorRecordedPlan;
+ bool _bestGuessOnly;
+ BSONObj _hint;
+ bool _mayYield;
+ bool _tableScanned;
+ shared_ptr<QueryOp> _baseOp;
+ };
+
+ /** Provides a cursor interface for certain limited uses of a MultiPlanScanner. */
+ class MultiCursor : public Cursor {
+ public:
+ class CursorOp : public QueryOp {
+ public:
+ CursorOp() {}
+ CursorOp( const QueryOp &other ) : QueryOp( other ) {}
+ virtual shared_ptr<Cursor> newCursor() const = 0;
+ };
+ /** takes ownership of 'op' */
+ MultiCursor( const char *ns, const BSONObj &pattern, const BSONObj &order, shared_ptr<CursorOp> op = shared_ptr<CursorOp>(), bool mayYield = false );
+ /**
+ * Used
+ * 1. To handoff a query to a getMore()
+ * 2. To handoff a QueryOptimizerCursor
+ * @param nscanned is an optional initial value, if not supplied nscanned()
+ * will always return -1
+ */
+ MultiCursor( auto_ptr<MultiPlanScanner> mps, const shared_ptr<Cursor> &c, const shared_ptr<CoveredIndexMatcher> &matcher, const QueryOp &op, long long nscanned = -1 );
+
+ virtual bool ok() { return _c->ok(); }
+ virtual Record* _current() { return _c->_current(); }
+ virtual BSONObj current() { return _c->current(); }
+ virtual DiskLoc currLoc() { return _c->currLoc(); }
+ virtual bool advance() {
+ _c->advance();
+ while( !ok() && _mps->mayRunMore() ) {
+ nextClause();
+ }
+ return ok();
+ }
+ virtual BSONObj currKey() const { return _c->currKey(); }
+ virtual DiskLoc refLoc() { return _c->refLoc(); }
+ virtual void noteLocation() { _c->noteLocation(); }
+ virtual void checkLocation() { _c->checkLocation(); }
+ virtual bool supportGetMore() { return true; }
+ virtual bool supportYields() { return _c->supportYields(); }
+ virtual BSONObj indexKeyPattern() { return _c->indexKeyPattern(); }
+
+ /**
+ * with update we could potentially get the same document on multiple
+ * indexes, but update appears to already handle this with seenObjects
+ * so we don't have to do anything special here.
+ */
+ virtual bool getsetdup(DiskLoc loc) { return _c->getsetdup( loc ); }
+
+ virtual bool autoDedup() const { return _c->autoDedup(); }
+
+ virtual bool modifiedKeys() const { return _mps->modifiedKeys(); }
+
+ virtual bool isMultiKey() const { return _mps->hasMultiKey(); }
+
+ virtual shared_ptr< CoveredIndexMatcher > matcherPtr() const { return _matcher; }
+ virtual CoveredIndexMatcher* matcher() const { return _matcher.get(); }
+
+ virtual bool capped() const { return _c->capped(); }
+
+ /** return -1 if we're a getmore handoff */
+ virtual long long nscanned() { return _nscanned >= 0 ? _nscanned + _c->nscanned() : _nscanned; }
+ /** just for testing */
+ shared_ptr<Cursor> sub_c() const { return _c; }
+ private:
+ class NoOp : public CursorOp {
+ public:
+ NoOp() {}
+ NoOp( const QueryOp &other ) : CursorOp( other ) {}
+ virtual void _init() { setComplete(); }
+ virtual void next() {}
+ virtual bool mayRecordPlan() const { return false; }
+ virtual QueryOp *_createChild() const { return new NoOp(); }
+ virtual shared_ptr<Cursor> newCursor() const { return qp().newCursor(); }
+ virtual long long nscanned() { assert( false ); return 0; }
+ };
+ void nextClause();
+ shared_ptr<CursorOp> _op;
+ shared_ptr<Cursor> _c;
+ auto_ptr<MultiPlanScanner> _mps;
+ shared_ptr<CoveredIndexMatcher> _matcher;
+ long long _nscanned;
+ };
+
+ /** NOTE min, max, and keyPattern will be updated to be consistent with the selected index. */
+ IndexDetails *indexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern );
+
+ bool isSimpleIdQuery( const BSONObj& query );
+
+ /**
+ * @return a single cursor that may work well for the given query.
+ * It is possible no cursor is returned if the sort is not supported by an index. Clients are responsible
+ * for checking this if they are not sure an index for a sort exists, and defaulting to a non-sort if
+ * no suitable indices exist.
+ */
+ shared_ptr<Cursor> bestGuessCursor( const char *ns, const BSONObj &query, const BSONObj &sort );
+
+ /**
+ * Add-on functionality for queryutil classes requiring access to indexing
+ * functionality not currently linked to mongos.
+ * TODO Clean this up a bit, possibly with separate sharded and non sharded
+ * implementations for the appropriate queryutil classes or by pulling index
+ * related functionality into separate wrapper classes.
+ */
+ struct QueryUtilIndexed {
+ /** @return true if the index may be useful according to its KeySpec. */
+ static bool indexUseful( const FieldRangeSetPair &frsp, NamespaceDetails *d, int idxNo, const BSONObj &order );
+ /** Clear any indexes recorded as the best for either the single or multi key pattern. */
+ static void clearIndexesForPatterns( const FieldRangeSetPair &frsp, const BSONObj &order );
+ /** Return a recorded best index for the single or multi key pattern. */
+ static pair< BSONObj, long long > bestIndexForPatterns( const FieldRangeSetPair &frsp, const BSONObj &order );
+ static bool uselessOr( const OrRangeGenerator& org, NamespaceDetails *d, int hintIdx );
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/queryoptimizercursor.cpp b/src/mongo/db/queryoptimizercursor.cpp
new file mode 100644
index 00000000000..07f8df12815
--- /dev/null
+++ b/src/mongo/db/queryoptimizercursor.cpp
@@ -0,0 +1,530 @@
+// @file queryoptimizercursor.cpp
+
+/**
+ * Copyright (C) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "queryoptimizer.h"
+#include "pdfile.h"
+#include "clientcursor.h"
+#include "btree.h"
+#include "queryoptimizercursor.h"
+
+namespace mongo {
+
+ static const int OutOfOrderDocumentsAssertionCode = 14810;
+
+ /**
+ * A QueryOp implementation utilized by the QueryOptimizerCursor
+ */
+ class QueryOptimizerCursorOp : public QueryOp {
+ public:
+ /**
+ * @param aggregateNscanned - shared long long counting total nscanned for
+ * query ops for all cursors.
+ * @param requireIndex - if unindexed scans should be prohibited.
+ */
+ QueryOptimizerCursorOp( long long &aggregateNscanned, bool requireIndex, int cumulativeCount = 0 ) : _matchCounter( aggregateNscanned, cumulativeCount ), _countingMatches(), _mustAdvance(), _capped(), _yieldRecoveryFailed(), _requireIndex( requireIndex ) {}
+
+ virtual void _init() {
+ if ( qp().scanAndOrderRequired() ) {
+ throw MsgAssertionException( OutOfOrderDocumentsAssertionCode, "order spec cannot be satisfied with index" );
+ }
+ if ( _requireIndex && strcmp( qp().indexKey().firstElementFieldName(), "$natural" ) == 0 ) {
+ throw MsgAssertionException( 9011, "Not an index cursor" );
+ }
+ _c = qp().newCursor();
+
+ // The QueryOptimizerCursor::prepareToTouchEarlierIterate() implementation requires _c->prepareToYield() to work.
+ verify( 15940, _c->supportYields() );
+ _capped = _c->capped();
+
+ // TODO This violates the current Cursor interface abstraction, but for now it's simpler to keep our own set of
+ // dups rather than avoid poisoning the cursor's dup set with unreturned documents. Deduping documents
+ // matched in this QueryOptimizerCursorOp will run against the takeover cursor.
+ _matchCounter.setCheckDups( _c->isMultiKey() );
+
+ _matchCounter.updateNscanned( _c->nscanned() );
+ }
+
+ virtual long long nscanned() {
+ return _c ? _c->nscanned() : _matchCounter.nscanned();
+ }
+
+ virtual bool prepareToYield() {
+ if ( _c && !_cc ) {
+ _cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , _c , qp().ns() ) );
+ }
+ if ( _cc ) {
+ recordCursorLocation();
+ return _cc->prepareToYield( _yieldData );
+ }
+ // no active cursor - ok to yield
+ return true;
+ }
+
+ virtual void recoverFromYield() {
+ if ( _cc && !ClientCursor::recoverFromYield( _yieldData ) ) {
+ _yieldRecoveryFailed = true;
+ _c.reset();
+ _cc.reset();
+
+ if ( _capped ) {
+ msgassertedNoTrace( 13338, str::stream() << "capped cursor overrun: " << qp().ns() );
+ }
+ else if ( qp().mustAssertOnYieldFailure() ) {
+ msgassertedNoTrace( 15892, str::stream() << "QueryOptimizerCursorOp::recoverFromYield() failed to recover" );
+ }
+ else {
+ // we don't fail query since we're fine with returning partial data if collection dropped
+ // also, see SERVER-2454
+ }
+ }
+ else {
+ checkCursorAdvanced();
+ }
+ }
+
+ void prepareToTouchEarlierIterate() {
+ recordCursorLocation();
+ if ( _c ) {
+ _c->prepareToTouchEarlierIterate();
+ }
+ }
+
+ void recoverFromTouchingEarlierIterate() {
+ if ( _c ) {
+ _c->recoverFromTouchingEarlierIterate();
+ }
+ checkCursorAdvanced();
+ }
+
+ virtual void next() {
+ mayAdvance();
+
+ if ( _matchCounter.enoughCumulativeMatchesToChooseAPlan() ) {
+ setStop();
+ return;
+ }
+ if ( !_c || !_c->ok() ) {
+ setComplete();
+ return;
+ }
+
+ _mustAdvance = true;
+ }
+ virtual QueryOp *_createChild() const {
+ return new QueryOptimizerCursorOp( _matchCounter.aggregateNscanned(), _requireIndex, _matchCounter.cumulativeCount() );
+ }
+ DiskLoc currLoc() const { return _c ? _c->currLoc() : DiskLoc(); }
+ BSONObj currKey() const { return _c ? _c->currKey() : BSONObj(); }
+ bool currentMatches( MatchDetails *details ) {
+ bool ret = ( _c && _c->ok() ) ? matcher( _c.get() )->matchesCurrent( _c.get(), details ) : false;
+ // Cache the match, so we can count it in mayAdvance().
+ _matchCounter.setMatch( ret );
+ return ret;
+ }
+ virtual bool mayRecordPlan() const {
+ return !_yieldRecoveryFailed && complete() && ( !stopRequested() || _matchCounter.enoughMatchesToRecordPlan() );
+ }
+ shared_ptr<Cursor> cursor() const { return _c; }
+ private:
+ void mayAdvance() {
+ if ( !_c ) {
+ return;
+ }
+ if ( countingMatches() ) {
+ // Check match if not yet known.
+ if ( !_matchCounter.knowMatch() ) {
+ currentMatches( 0 );
+ }
+ _matchCounter.countMatch( currLoc() );
+ }
+ if ( _mustAdvance ) {
+ _c->advance();
+ handleCursorAdvanced();
+ }
+ _matchCounter.updateNscanned( _c->nscanned() );
+ }
+ // Don't count matches on the first call to next(), which occurs before the first result is returned.
+ bool countingMatches() {
+ if ( _countingMatches ) {
+ return true;
+ }
+ _countingMatches = true;
+ return false;
+ }
+
+ void recordCursorLocation() {
+ _posBeforeYield = currLoc();
+ }
+ void checkCursorAdvanced() {
+ // This check will not correctly determine if we are looking at a different document in
+ // all cases, but it is adequate for updating the query plan's match count (just used to pick
+ // plans, not returned to the client) and adjust iteration via _mustAdvance.
+ if ( _posBeforeYield != currLoc() ) {
+ // If the yield advanced our position, the next next() will be a no op.
+ handleCursorAdvanced();
+ }
+ }
+ void handleCursorAdvanced() {
+ _mustAdvance = false;
+ _matchCounter.resetMatch();
+ }
+
+ CachedMatchCounter _matchCounter;
+ bool _countingMatches;
+ bool _mustAdvance;
+ bool _capped;
+ shared_ptr<Cursor> _c;
+ ClientCursor::CleanupPointer _cc;
+ DiskLoc _posBeforeYield;
+ ClientCursor::YieldData _yieldData;
+ bool _yieldRecoveryFailed;
+ bool _requireIndex;
+ };
+
+ /**
+ * This cursor runs a MultiPlanScanner iteratively and returns results from
+ * the scanner's cursors as they become available. Once the scanner chooses
+ * a single plan, this cursor becomes a simple wrapper around that single
+ * plan's cursor (called the 'takeover' cursor).
+ */
+ class QueryOptimizerCursor : public Cursor {
+ public:
+ QueryOptimizerCursor( auto_ptr<MultiPlanScanner> &mps, bool requireIndex ) :
+ _mps( mps ),
+ _originalOp( new QueryOptimizerCursorOp( _nscanned, requireIndex ) ),
+ _currOp(),
+ _nscanned() {
+ _mps->initialOp( _originalOp );
+ shared_ptr<QueryOp> op = _mps->nextOp();
+ rethrowOnError( op );
+ if ( !op->complete() ) {
+ _currOp = dynamic_cast<QueryOptimizerCursorOp*>( op.get() );
+ }
+ }
+
+ virtual bool ok() { return _takeover ? _takeover->ok() : !currLoc().isNull(); }
+
+ virtual Record* _current() {
+ if ( _takeover ) {
+ return _takeover->_current();
+ }
+ assertOk();
+ return currLoc().rec();
+ }
+
+ virtual BSONObj current() {
+ if ( _takeover ) {
+ return _takeover->current();
+ }
+ assertOk();
+ return currLoc().obj();
+ }
+
+ virtual DiskLoc currLoc() { return _takeover ? _takeover->currLoc() : _currLoc(); }
+
+ DiskLoc _currLoc() const {
+ dassert( !_takeover );
+ return _currOp ? _currOp->currLoc() : DiskLoc();
+ }
+
+ virtual bool advance() {
+ return _advance( false );
+ }
+
+ virtual BSONObj currKey() const {
+ if ( _takeover ) {
+ return _takeover->currKey();
+ }
+ assertOk();
+ return _currOp->currKey();
+ }
+
+ /**
+ * When return value isNull(), our cursor will be ignored for yielding by the client cursor implementation.
+ * In such cases, an internal ClientCursor will update the position of component cursors when necessary.
+ */
+ virtual DiskLoc refLoc() { return _takeover ? _takeover->refLoc() : DiskLoc(); }
+
+ virtual BSONObj indexKeyPattern() {
+ if ( _takeover ) {
+ return _takeover->indexKeyPattern();
+ }
+ assertOk();
+ return _currOp->cursor()->indexKeyPattern();
+ }
+
+ virtual bool supportGetMore() { return false; }
+
+ virtual bool supportYields() { return _takeover ? _takeover->supportYields() : true; }
+
+ virtual void prepareToTouchEarlierIterate() {
+ if ( _takeover ) {
+ _takeover->prepareToTouchEarlierIterate();
+ }
+ else if ( _currOp ) {
+ if ( _mps->currentNPlans() == 1 ) {
+ // This single plan version is a bit more performant, so we use it when possible.
+ _currOp->prepareToTouchEarlierIterate();
+ }
+ else {
+ // With multiple plans, the 'earlier iterate' could be the current iterate of one of
+ // the component plans. We do a full yield of all plans, using ClientCursors.
+ verify( 15941, _mps->prepareToYield() );
+ }
+ }
+ }
+
+ virtual void recoverFromTouchingEarlierIterate() {
+ if ( _takeover ) {
+ _takeover->recoverFromTouchingEarlierIterate();
+ }
+ else if ( _currOp ) {
+ if ( _mps->currentNPlans() == 1 ) {
+ _currOp->recoverFromTouchingEarlierIterate();
+ }
+ else {
+ recoverFromYield();
+ }
+ }
+ }
+
+ virtual bool prepareToYield() {
+ if ( _takeover ) {
+ return _takeover->prepareToYield();
+ }
+ else if ( _currOp ) {
+ return _mps->prepareToYield();
+ }
+ else {
+ // No state needs to be protected, so yielding is fine.
+ return true;
+ }
+ }
+
+ virtual void recoverFromYield() {
+ if ( _takeover ) {
+ _takeover->recoverFromYield();
+ return;
+ }
+ if ( _currOp ) {
+ _mps->recoverFromYield();
+ if ( _currOp->error() || !ok() ) {
+ // Advance to a non error op if on of the ops errored out.
+ // Advance to a following $or clause if the $or clause returned all results.
+ _advance( true );
+ }
+ }
+ }
+
+ virtual string toString() { return "QueryOptimizerCursor"; }
+
+ virtual bool getsetdup(DiskLoc loc) {
+ if ( _takeover ) {
+ if ( getdupInternal( loc ) ) {
+ return true;
+ }
+ return _takeover->getsetdup( loc );
+ }
+ assertOk();
+ return getsetdupInternal( loc );
+ }
+
+ /** Matcher needs to know if the the cursor being forwarded to is multikey. */
+ virtual bool isMultiKey() const {
+ if ( _takeover ) {
+ return _takeover->isMultiKey();
+ }
+ assertOk();
+ return _currOp->cursor()->isMultiKey();
+ }
+
+ virtual bool modifiedKeys() const { return true; }
+
+ /** Initial capped wrapping cases (before takeover) are handled internally by a component ClientCursor. */
+ virtual bool capped() const { return _takeover ? _takeover->capped() : false; }
+
+ virtual long long nscanned() { return _takeover ? _takeover->nscanned() : _nscanned; }
+
+ virtual shared_ptr<CoveredIndexMatcher> matcherPtr() const {
+ if ( _takeover ) {
+ return _takeover->matcherPtr();
+ }
+ assertOk();
+ return _currOp->matcher( _currOp->cursor() );
+ }
+
+ virtual CoveredIndexMatcher* matcher() const {
+ if ( _takeover ) {
+ return _takeover->matcher();
+ }
+ assertOk();
+ return _currOp->matcher( _currOp->cursor() ).get();
+ }
+
+ virtual bool currentMatches( MatchDetails *details = 0 ) {
+ if ( _takeover ) {
+ return _takeover->currentMatches( details );
+ }
+ assertOk();
+ return _currOp->currentMatches( details );
+ }
+
+ private:
+ /**
+ * Advances the QueryPlanSet::Runner.
+ * @param force - advance even if the current query op is not valid. The 'force' param should only be specified
+ * when there are plans left in the runner.
+ */
+ bool _advance( bool force ) {
+ if ( _takeover ) {
+ return _takeover->advance();
+ }
+
+ if ( !force && !ok() ) {
+ return false;
+ }
+
+ DiskLoc prevLoc = _currLoc();
+
+ _currOp = 0;
+ shared_ptr<QueryOp> op = _mps->nextOp();
+ rethrowOnError( op );
+
+ // Avoiding dynamic_cast here for performance. Soon we won't need to
+ // do a cast at all.
+ QueryOptimizerCursorOp *qocop = (QueryOptimizerCursorOp*)( op.get() );
+
+ if ( !op->complete() ) {
+ // The 'qocop' will be valid until we call _mps->nextOp() again. We return 'current' values from this op.
+ _currOp = qocop;
+ }
+ else if ( op->stopRequested() ) {
+ if ( qocop->cursor() ) {
+ // Ensure that prepareToTouchEarlierIterate() may be called safely when a BasicCursor takes over.
+ if ( !prevLoc.isNull() && prevLoc == qocop->currLoc() ) {
+ qocop->cursor()->advance();
+ }
+ // Clear the Runner and any unnecessary QueryOps and their ClientCursors.
+ _mps->clearRunner();
+ _takeover.reset( new MultiCursor( _mps,
+ qocop->cursor(),
+ op->matcher( qocop->cursor() ),
+ *op,
+ _nscanned - qocop->cursor()->nscanned() ) );
+ }
+ }
+
+ return ok();
+ }
+ /** Forward an exception when the runner errs out. */
+ void rethrowOnError( const shared_ptr< QueryOp > &op ) {
+ if ( op->error() ) {
+ throw MsgAssertionException( op->exception() );
+ }
+ }
+
+ void assertOk() const {
+ massert( 14809, "Invalid access for cursor that is not ok()", !_currLoc().isNull() );
+ }
+
+ /** Insert and check for dups before takeover occurs */
+ bool getsetdupInternal(const DiskLoc &loc) {
+ return _dups.getsetdup( loc );
+ }
+
+ /** Just check for dups - after takeover occurs */
+ bool getdupInternal(const DiskLoc &loc) {
+ dassert( _takeover );
+ return _dups.getdup( loc );
+ }
+
+ auto_ptr<MultiPlanScanner> _mps;
+ shared_ptr<QueryOptimizerCursorOp> _originalOp;
+ QueryOptimizerCursorOp *_currOp;
+ shared_ptr<Cursor> _takeover;
+ long long _nscanned;
+ // Using a SmallDupSet seems a bit hokey, but I've measured a 5% performance improvement with ~100 document non multi key scans.
+ SmallDupSet _dups;
+ };
+
+ shared_ptr<Cursor> newQueryOptimizerCursor( auto_ptr<MultiPlanScanner> mps, bool requireIndex ) {
+ try {
+ return shared_ptr<Cursor>( new QueryOptimizerCursor( mps, requireIndex ) );
+ } catch( const AssertionException &e ) {
+ if ( e.getCode() == OutOfOrderDocumentsAssertionCode ) {
+ // If no indexes follow the requested sort order, return an
+ // empty pointer. This is legacy behavior based on bestGuessCursor().
+ return shared_ptr<Cursor>();
+ }
+ throw;
+ }
+ return shared_ptr<Cursor>();
+ }
+
+ shared_ptr<Cursor> NamespaceDetailsTransient::getCursor( const char *ns, const BSONObj &query,
+ const BSONObj &order, bool requireIndex,
+ bool *simpleEqualityMatch ) {
+ if ( simpleEqualityMatch ) {
+ *simpleEqualityMatch = false;
+ }
+ if ( query.isEmpty() && order.isEmpty() && !requireIndex ) {
+ // TODO This will not use a covered index currently.
+ return theDataFileMgr.findAll( ns );
+ }
+ if ( isSimpleIdQuery( query ) ) {
+ Database *database = cc().database();
+ verify( 15985, database );
+ NamespaceDetails *d = database->namespaceIndex.details(ns);
+ if ( d ) {
+ int idxNo = d->findIdIndex();
+ if ( idxNo >= 0 ) {
+ IndexDetails& i = d->idx( idxNo );
+ BSONObj key = i.getKeyFromQuery( query );
+ return shared_ptr<Cursor>( BtreeCursor::make( d, idxNo, i, key, key, true, 1 ) );
+ }
+ }
+ }
+ auto_ptr<MultiPlanScanner> mps( new MultiPlanScanner( ns, query, order ) ); // mayYield == false
+ shared_ptr<Cursor> single = mps->singleCursor();
+ if ( single ) {
+ if ( !( requireIndex &&
+ dynamic_cast<BasicCursor*>( single.get() ) /* May not use an unindexed cursor */ ) ) {
+ if ( !query.isEmpty() && !single->matcher() ) {
+ shared_ptr<CoveredIndexMatcher> matcher( new CoveredIndexMatcher( query, single->indexKeyPattern() ) );
+ single->setMatcher( matcher );
+ }
+ if ( simpleEqualityMatch ) {
+ const QueryPlan *qp = mps->singlePlan();
+ if ( qp->exactKeyMatch() && !single->matcher()->needRecord() ) {
+ *simpleEqualityMatch = true;
+ }
+ }
+ return single;
+ }
+ }
+ return newQueryOptimizerCursor( mps, requireIndex );
+ }
+
+ /** This interface just available for testing. */
+ shared_ptr<Cursor> newQueryOptimizerCursor( const char *ns, const BSONObj &query, const BSONObj &order, bool requireIndex ) {
+ auto_ptr<MultiPlanScanner> mps( new MultiPlanScanner( ns, query, order ) ); // mayYield == false
+ return newQueryOptimizerCursor( mps, requireIndex );
+ }
+
+} // namespace mongo;
diff --git a/src/mongo/db/queryoptimizercursor.h b/src/mongo/db/queryoptimizercursor.h
new file mode 100644
index 00000000000..ee5a1663370
--- /dev/null
+++ b/src/mongo/db/queryoptimizercursor.h
@@ -0,0 +1,150 @@
+// @file queryoptimizercursor.h
+
+/**
+ * Copyright (C) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+namespace mongo {
+
+ /** Helper class for caching and counting matches during execution of a QueryPlan. */
+ class CachedMatchCounter {
+ public:
+ /**
+ * @param aggregateNscanned - shared count of nscanned for this and othe plans.
+ * @param cumulativeCount - starting point for accumulated count over a series of plans.
+ */
+ CachedMatchCounter( long long &aggregateNscanned, int cumulativeCount ) : _aggregateNscanned( aggregateNscanned ), _nscanned(), _cumulativeCount( cumulativeCount ), _count(), _checkDups(), _match( Unknown ), _counted() {}
+
+ /** Set whether dup checking is enabled when counting. */
+ void setCheckDups( bool checkDups ) { _checkDups = checkDups; }
+
+ /**
+ * Usual sequence of events:
+ * 1) resetMatch() - reset stored match value to Unkonwn.
+ * 2) setMatch() - set match value to a definite true/false value.
+ * 3) knowMatch() - check if setMatch() has been called.
+ * 4) countMatch() - increment count if match is true.
+ */
+
+ void resetMatch() {
+ _match = Unknown;
+ _counted = false;
+ }
+ void setMatch( bool match ) { _match = match ? True : False; }
+ bool knowMatch() const { return _match != Unknown; }
+ void countMatch( const DiskLoc &loc ) {
+ if ( !_counted && _match == True && !getsetdup( loc ) ) {
+ ++_cumulativeCount;
+ ++_count;
+ _counted = true;
+ }
+ }
+
+ bool enoughCumulativeMatchesToChooseAPlan() const {
+ // This is equivalent to the default condition for switching from
+ // a query to a getMore, which was the historical default match count for
+ // choosing a plan.
+ return _cumulativeCount >= 101;
+ }
+ bool enoughMatchesToRecordPlan() const {
+ // Recording after 50 matches is a historical default (101 default limit / 2).
+ return _count > 50;
+ }
+
+ int cumulativeCount() const { return _cumulativeCount; }
+ int count() const { return _count; }
+
+ /** Update local and aggregate nscanned counts. */
+ void updateNscanned( long long nscanned ) {
+ _aggregateNscanned += ( nscanned - _nscanned );
+ _nscanned = nscanned;
+ }
+ long long nscanned() const { return _nscanned; }
+ long long &aggregateNscanned() const { return _aggregateNscanned; }
+ private:
+ bool getsetdup( const DiskLoc &loc ) {
+ if ( !_checkDups ) {
+ return false;
+ }
+ pair<set<DiskLoc>::iterator, bool> p = _dups.insert( loc );
+ return !p.second;
+ }
+ long long &_aggregateNscanned;
+ long long _nscanned;
+ int _cumulativeCount;
+ int _count;
+ bool _checkDups;
+ enum MatchState { Unknown, False, True };
+ MatchState _match;
+ bool _counted;
+ set<DiskLoc> _dups;
+ };
+
+ /** Dup tracking class, optimizing one common case with small set and few initial reads. */
+ class SmallDupSet {
+ public:
+ SmallDupSet() : _accesses() {
+ _vec.reserve( 250 );
+ }
+ /** @return true if @param 'loc' already added to the set, false if adding to the set in this call. */
+ bool getsetdup( const DiskLoc &loc ) {
+ access();
+ return vec() ? getsetdupVec( loc ) : getsetdupSet( loc );
+ }
+ /** @return true when @param loc in the set. */
+ bool getdup( const DiskLoc &loc ) {
+ access();
+ return vec() ? getdupVec( loc ) : getdupSet( loc );
+ }
+ private:
+ void access() {
+ ++_accesses;
+ mayUpgrade();
+ }
+ void mayUpgrade() {
+ if ( vec() && _accesses > 500 ) {
+ _set.insert( _vec.begin(), _vec.end() );
+ }
+ }
+ bool vec() const {
+ return _set.size() == 0;
+ }
+ bool getsetdupVec( const DiskLoc &loc ) {
+ if ( getdupVec( loc ) ) {
+ return true;
+ }
+ _vec.push_back( loc );
+ return false;
+ }
+ bool getdupVec( const DiskLoc &loc ) const {
+ for( vector<DiskLoc>::const_iterator i = _vec.begin(); i != _vec.end(); ++i ) {
+ if ( *i == loc ) {
+ return true;
+ }
+ }
+ return false;
+ }
+ bool getsetdupSet( const DiskLoc &loc ) {
+ pair<set<DiskLoc>::iterator, bool> p = _set.insert(loc);
+ return !p.second;
+ }
+ bool getdupSet( const DiskLoc &loc ) {
+ return _set.count( loc ) > 0;
+ }
+ vector<DiskLoc> _vec;
+ set<DiskLoc> _set;
+ long long _accesses;
+ };
+} // namespace mongo
diff --git a/src/mongo/db/querypattern.cpp b/src/mongo/db/querypattern.cpp
new file mode 100644
index 00000000000..e20e2b6a6ae
--- /dev/null
+++ b/src/mongo/db/querypattern.cpp
@@ -0,0 +1,99 @@
+// @file querypattern.cpp - Query pattern matching for selecting similar plans given similar queries.
+
+/* Copyright 2011 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "querypattern.h"
+
+namespace mongo {
+
+ QueryPattern::QueryPattern( const FieldRangeSet &frs, const BSONObj &sort ) {
+ for( map<string,FieldRange>::const_iterator i = frs.ranges().begin(); i != frs.ranges().end(); ++i ) {
+ if ( i->second.equality() ) {
+ _fieldTypes[ i->first ] = QueryPattern::Equality;
+ }
+ else if ( i->second.empty() ) {
+ // This case generally results from an upper and lower bound that are inconsistent for a single key index.
+ _fieldTypes[ i->first ] = QueryPattern::UpperAndLowerBound;
+ }
+ else if ( i->second.nontrivial() ) {
+ bool upper = i->second.max().type() != MaxKey;
+ bool lower = i->second.min().type() != MinKey;
+ if ( upper && lower )
+ _fieldTypes[ i->first ] = QueryPattern::UpperAndLowerBound;
+ else if ( upper )
+ _fieldTypes[ i->first ] = QueryPattern::UpperBound;
+ else if ( lower )
+ _fieldTypes[ i->first ] = QueryPattern::LowerBound;
+ }
+ }
+ setSort( sort );
+ }
+
+ /** for testing only - speed unimportant */
+ bool QueryPattern::operator==( const QueryPattern &other ) const {
+ bool less = operator<( other );
+ bool more = other.operator<( *this );
+ assert( !( less && more ) );
+ return !( less || more );
+ }
+
+ /** for testing only - speed unimportant */
+ bool QueryPattern::operator!=( const QueryPattern &other ) const {
+ return !operator==( other );
+ }
+
+ string typeToString( enum QueryPattern::Type t ) {
+ switch (t) {
+ case QueryPattern::Equality:
+ return "Equality";
+ case QueryPattern::LowerBound:
+ return "LowerBound";
+ case QueryPattern::UpperBound:
+ return "UpperBound";
+ case QueryPattern::UpperAndLowerBound:
+ return "UpperAndLowerBound";
+ }
+ return "";
+ }
+
+ string QueryPattern::toString() const {
+ BSONObjBuilder b;
+ for( map<string,Type>::const_iterator i = _fieldTypes.begin(); i != _fieldTypes.end(); ++i ) {
+ b << i->first << typeToString( i->second );
+ }
+ return BSON( "query" << b.done() << "sort" << _sort ).toString();
+ }
+
+ void QueryPattern::setSort( const BSONObj sort ) {
+ _sort = normalizeSort( sort );
+ }
+
+ BSONObj QueryPattern::normalizeSort( const BSONObj &spec ) {
+ if ( spec.isEmpty() )
+ return spec;
+ int direction = ( spec.firstElement().number() >= 0 ) ? 1 : -1;
+ BSONObjIterator i( spec );
+ BSONObjBuilder b;
+ while( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ b.append( e.fieldName(), direction * ( ( e.number() >= 0 ) ? -1 : 1 ) );
+ }
+ return b.obj();
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/querypattern.h b/src/mongo/db/querypattern.h
new file mode 100644
index 00000000000..000c301a0de
--- /dev/null
+++ b/src/mongo/db/querypattern.h
@@ -0,0 +1,78 @@
+// @file querypattern.h - Query pattern matching for selecting similar plans given similar queries.
+
+/* Copyright 2011 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "jsobj.h"
+#include "queryutil.h"
+
+namespace mongo {
+
+ /**
+ * Implements query pattern matching, used to determine if a query is
+ * similar to an earlier query and should use the same plan.
+ *
+ * Two queries will generate the same QueryPattern, and therefore match each
+ * other, if their fields have the same Types and they have the same sort
+ * spec.
+ */
+ class QueryPattern {
+ public:
+ QueryPattern( const FieldRangeSet &frs, const BSONObj &sort );
+ enum Type {
+ Equality,
+ LowerBound,
+ UpperBound,
+ UpperAndLowerBound
+ };
+ bool operator<( const QueryPattern &other ) const;
+ /** for testing only */
+ bool operator==( const QueryPattern &other ) const;
+ /** for testing only */
+ bool operator!=( const QueryPattern &other ) const;
+ /** for development / debugging */
+ string toString() const;
+ private:
+ void setSort( const BSONObj sort );
+ static BSONObj normalizeSort( const BSONObj &spec );
+ map<string,Type> _fieldTypes;
+ BSONObj _sort;
+ };
+
+ inline bool QueryPattern::operator<( const QueryPattern &other ) const {
+ map<string,Type>::const_iterator i = _fieldTypes.begin();
+ map<string,Type>::const_iterator j = other._fieldTypes.begin();
+ while( i != _fieldTypes.end() ) {
+ if ( j == other._fieldTypes.end() )
+ return false;
+ if ( i->first < j->first )
+ return true;
+ else if ( i->first > j->first )
+ return false;
+ if ( i->second < j->second )
+ return true;
+ else if ( i->second > j->second )
+ return false;
+ ++i;
+ ++j;
+ }
+ if ( j != other._fieldTypes.end() )
+ return true;
+ return _sort.woCompare( other._sort ) < 0;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/queryutil-inl.h b/src/mongo/db/queryutil-inl.h
new file mode 100644
index 00000000000..08d3b1fac52
--- /dev/null
+++ b/src/mongo/db/queryutil-inl.h
@@ -0,0 +1,153 @@
+// @file queryutil-inl.h - Inline definitions for frequently called queryutil.h functions
+
+/* Copyright 2011 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace mongo {
+
+ inline bool FieldInterval::equality() const {
+ if ( _cachedEquality == -1 ) {
+ _cachedEquality = ( _lower._inclusive && _upper._inclusive && _lower._bound.woCompare( _upper._bound, false ) == 0 );
+ }
+ return _cachedEquality != 0;
+ }
+
+ inline bool FieldRange::equality() const {
+ return
+ !empty() &&
+ min().woCompare( max(), false ) == 0 &&
+ maxInclusive() &&
+ minInclusive();
+ }
+
+ inline bool FieldRange::inQuery() const {
+ if ( equality() ) {
+ return true;
+ }
+ for( vector<FieldInterval>::const_iterator i = _intervals.begin(); i != _intervals.end(); ++i ) {
+ if ( !i->equality() ) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * TODO Assumes intervals are contiguous and minKey/maxKey will not be
+ * matched against.
+ */
+ inline bool FieldRange::nontrivial() const {
+ return
+ ! empty() &&
+ ( _intervals.size() != 1 ||
+ minKey.firstElement().woCompare( min(), false ) != 0 ||
+ maxKey.firstElement().woCompare( max(), false ) != 0 );
+ }
+
+ inline const FieldRange &FieldRangeSet::range( const char *fieldName ) const {
+ map<string,FieldRange>::const_iterator f = _ranges.find( fieldName );
+ if ( f == _ranges.end() )
+ return trivialRange();
+ return f->second;
+ }
+
+ inline FieldRange &FieldRangeSet::range( const char *fieldName ) {
+ map<string,FieldRange>::iterator f = _ranges.find( fieldName );
+ if ( f == _ranges.end() ) {
+ _ranges.insert( make_pair( string( fieldName ), trivialRange() ) );
+ return _ranges.find( fieldName )->second;
+ }
+ return f->second;
+ }
+
+ inline int FieldRangeSet::nNontrivialRanges() const {
+ int count = 0;
+ for( map<string,FieldRange>::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
+ if ( i->second.nontrivial() )
+ ++count;
+ }
+ return count;
+ }
+
+ inline bool FieldRangeSet::matchPossible() const {
+ for( map<string,FieldRange>::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
+ if ( i->second.empty() ) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ inline bool FieldRangeSet::matchPossibleForIndex( const BSONObj &keyPattern ) const {
+ if ( !_singleKey ) {
+ return matchPossible();
+ }
+ BSONObjIterator i( keyPattern );
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.fieldName() == string( "$natural" ) ) {
+ return true;
+ }
+ if ( range( e.fieldName() ).empty() ) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ inline long long FieldRangeVector::size() {
+ long long ret = 1;
+ for( vector<FieldRange>::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
+ ret *= i->intervals().size();
+ }
+ return ret;
+ }
+
+ inline FieldRangeSetPair *OrRangeGenerator::topFrsp() const {
+ FieldRangeSetPair *ret = new FieldRangeSetPair( _baseSet );
+ if (_orSets.size()) {
+ *ret &= _orSets.front();
+ }
+ return ret;
+ }
+
+ inline FieldRangeSetPair *OrRangeGenerator::topFrspOriginal() const {
+ FieldRangeSetPair *ret = new FieldRangeSetPair( _baseSet );
+ if (_originalOrSets.size()) {
+ *ret &= _originalOrSets.front();
+ }
+ return ret;
+ }
+
+ inline bool FieldRangeSetPair::matchPossibleForIndex( NamespaceDetails *d, int idxNo, const BSONObj &keyPattern ) const {
+ assertValidIndexOrNoIndex( d, idxNo );
+ if ( !matchPossible() ) {
+ return false;
+ }
+ if ( idxNo < 0 ) {
+ // multi key matchPossible() is true, so return true.
+ return true;
+ }
+ return frsForIndex( d, idxNo ).matchPossibleForIndex( keyPattern );
+ }
+
+ inline void FieldRangeSetPair::assertValidIndexOrNoIndex( const NamespaceDetails *d, int idxNo ) const {
+ massert( 14049, "FieldRangeSetPair invalid index specified", idxNo >= -1 );
+ if ( idxNo >= 0 ) {
+ assertValidIndex( d, idxNo );
+ }
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/queryutil.cpp b/src/mongo/db/queryutil.cpp
new file mode 100644
index 00000000000..e6748c4bc2e
--- /dev/null
+++ b/src/mongo/db/queryutil.cpp
@@ -0,0 +1,1551 @@
+// @file queryutil.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+
+#include "btree.h"
+#include "matcher.h"
+#include "pdfile.h"
+#include "queryoptimizer.h"
+#include "../util/unittest.h"
+#include "dbmessage.h"
+#include "indexkey.h"
+#include "../util/mongoutils/str.h"
+
+namespace mongo {
+ extern BSONObj staticNull;
+ extern BSONObj staticUndefined;
+
+ /** returns a string that when used as a matcher, would match a super set of regex()
+ returns "" for complex regular expressions
+ used to optimize queries in some simple regex cases that start with '^'
+
+ if purePrefix != NULL, sets it to whether the regex can be converted to a range query
+ */
+ string simpleRegex(const char* regex, const char* flags, bool* purePrefix) {
+ string r = "";
+
+ if (purePrefix) *purePrefix = false;
+
+ bool multilineOK;
+ if ( regex[0] == '\\' && regex[1] == 'A') {
+ multilineOK = true;
+ regex += 2;
+ }
+ else if (regex[0] == '^') {
+ multilineOK = false;
+ regex += 1;
+ }
+ else {
+ return r;
+ }
+
+ bool extended = false;
+ while (*flags) {
+ switch (*(flags++)) {
+ case 'm': // multiline
+ if (multilineOK)
+ continue;
+ else
+ return r;
+ case 'x': // extended
+ extended = true;
+ break;
+ default:
+ return r; // cant use index
+ }
+ }
+
+ stringstream ss;
+
+ while(*regex) {
+ char c = *(regex++);
+ if ( c == '*' || c == '?' ) {
+ // These are the only two symbols that make the last char optional
+ r = ss.str();
+ r = r.substr( 0 , r.size() - 1 );
+ return r; //breaking here fails with /^a?/
+ }
+ else if (c == '|') {
+ // whole match so far is optional. Nothing we can do here.
+ return string();
+ }
+ else if (c == '\\') {
+ c = *(regex++);
+ if (c == 'Q'){
+ // \Q...\E quotes everything inside
+ while (*regex) {
+ c = (*regex++);
+ if (c == '\\' && (*regex == 'E')){
+ regex++; //skip the 'E'
+ break; // go back to start of outer loop
+ }
+ else {
+ ss << c; // character should match itself
+ }
+ }
+ }
+ else if ((c >= 'A' && c <= 'Z') ||
+ (c >= 'a' && c <= 'z') ||
+ (c >= '0' && c <= '0') ||
+ (c == '\0')) {
+ // don't know what to do with these
+ r = ss.str();
+ break;
+ }
+ else {
+ // slash followed by non-alphanumeric represents the following char
+ ss << c;
+ }
+ }
+ else if (strchr("^$.[()+{", c)) {
+ // list of "metacharacters" from man pcrepattern
+ r = ss.str();
+ break;
+ }
+ else if (extended && c == '#') {
+ // comment
+ r = ss.str();
+ break;
+ }
+ else if (extended && isspace(c)) {
+ continue;
+ }
+ else {
+ // self-matching char
+ ss << c;
+ }
+ }
+
+ if ( r.empty() && *regex == 0 ) {
+ r = ss.str();
+ if (purePrefix) *purePrefix = !r.empty();
+ }
+
+ return r;
+ }
+ inline string simpleRegex(const BSONElement& e) {
+ switch(e.type()) {
+ case RegEx:
+ return simpleRegex(e.regex(), e.regexFlags());
+ case Object: {
+ BSONObj o = e.embeddedObject();
+ return simpleRegex(o["$regex"].valuestrsafe(), o["$options"].valuestrsafe());
+ }
+ default: assert(false); return ""; //return squashes compiler warning
+ }
+ }
+
+ string simpleRegexEnd( string regex ) {
+ ++regex[ regex.length() - 1 ];
+ return regex;
+ }
+
+
+ FieldRange::FieldRange( const BSONElement &e, bool singleKey, bool isNot, bool optimize )
+ : _singleKey( singleKey ) {
+ int op = e.getGtLtOp();
+
+ // NOTE with $not, we could potentially form a complementary set of intervals.
+ if ( !isNot && !e.eoo() && e.type() != RegEx && op == BSONObj::opIN ) {
+ set<BSONElement,element_lt> vals;
+ vector<FieldRange> regexes;
+ uassert( 12580 , "invalid query" , e.isABSONObj() );
+ BSONObjIterator i( e.embeddedObject() );
+ while( i.more() ) {
+ BSONElement ie = i.next();
+ uassert( 15881, "$elemMatch not allowed within $in",
+ ie.type() != Object ||
+ ie.embeddedObject().firstElement().getGtLtOp() != BSONObj::opELEM_MATCH );
+ if ( ie.type() == RegEx ) {
+ regexes.push_back( FieldRange( ie, singleKey, false, optimize ) );
+ }
+ else {
+ // A document array may be indexed by its first element, by undefined
+ // if it is empty, or as a full array if it is embedded within another
+ // array.
+ vals.insert( ie );
+ if ( ie.type() == Array ) {
+ BSONElement temp = ie.embeddedObject().firstElement();
+ if ( temp.eoo() ) {
+ temp = staticUndefined.firstElement();
+ }
+ vals.insert( temp );
+ }
+ }
+ }
+
+ for( set<BSONElement,element_lt>::const_iterator i = vals.begin(); i != vals.end(); ++i )
+ _intervals.push_back( FieldInterval(*i) );
+
+ for( vector<FieldRange>::const_iterator i = regexes.begin(); i != regexes.end(); ++i )
+ *this |= *i;
+
+ return;
+ }
+
+ // A document array may be indexed by its first element, by undefined
+ // if it is empty, or as a full array if it is embedded within another
+ // array.
+ if ( e.type() == Array && op == BSONObj::Equality ) {
+
+ _intervals.push_back( FieldInterval(e) );
+ BSONElement temp = e.embeddedObject().firstElement();
+ if ( temp.eoo() ) {
+ temp = staticUndefined.firstElement();
+ }
+ if ( temp < e ) {
+ _intervals.insert( _intervals.begin() , temp );
+ }
+ else {
+ _intervals.push_back( FieldInterval(temp) );
+ }
+
+ return;
+ }
+
+ _intervals.push_back( FieldInterval() );
+ FieldInterval &initial = _intervals[ 0 ];
+ BSONElement &lower = initial._lower._bound;
+ bool &lowerInclusive = initial._lower._inclusive;
+ BSONElement &upper = initial._upper._bound;
+ bool &upperInclusive = initial._upper._inclusive;
+ lower = minKey.firstElement();
+ lowerInclusive = true;
+ upper = maxKey.firstElement();
+ upperInclusive = true;
+
+ if ( e.eoo() )
+ return;
+
+ bool existsSpec = false;
+ if ( op == BSONObj::opEXISTS ) {
+ existsSpec = e.trueValue();
+ }
+
+ if ( e.type() == RegEx
+ || (e.type() == Object && !e.embeddedObject()["$regex"].eoo())
+ ) {
+ uassert( 13454, "invalid regular expression operator", op == BSONObj::Equality || op == BSONObj::opREGEX );
+ if ( !isNot ) { // no optimization for negated regex - we could consider creating 2 intervals comprising all nonmatching prefixes
+ const string r = simpleRegex(e);
+ if ( r.size() ) {
+ lower = addObj( BSON( "" << r ) ).firstElement();
+ upper = addObj( BSON( "" << simpleRegexEnd( r ) ) ).firstElement();
+ upperInclusive = false;
+ }
+ else {
+ BSONObjBuilder b1(32), b2(32);
+ b1.appendMinForType( "" , String );
+ lower = addObj( b1.obj() ).firstElement();
+
+ b2.appendMaxForType( "" , String );
+ upper = addObj( b2.obj() ).firstElement();
+ upperInclusive = false; //MaxForType String is an empty Object
+ }
+
+ // regex matches self - regex type > string type
+ if (e.type() == RegEx) {
+ BSONElement re = addObj( BSON( "" << e ) ).firstElement();
+ _intervals.push_back( FieldInterval(re) );
+ }
+ else {
+ BSONObj orig = e.embeddedObject();
+ BSONObjBuilder b;
+ b.appendRegex("", orig["$regex"].valuestrsafe(), orig["$options"].valuestrsafe());
+ BSONElement re = addObj( b.obj() ).firstElement();
+ _intervals.push_back( FieldInterval(re) );
+ }
+
+ }
+ return;
+ }
+ if ( isNot ) {
+ switch( op ) {
+ case BSONObj::Equality:
+ return;
+// op = BSONObj::NE;
+// break;
+ case BSONObj::opALL:
+ case BSONObj::opMOD: // NOTE for mod and type, we could consider having 1-2 intervals comprising the complementary types (multiple intervals already possible with $in)
+ case BSONObj::opTYPE:
+ // no bound calculation
+ return;
+ case BSONObj::NE:
+ op = BSONObj::Equality;
+ break;
+ case BSONObj::LT:
+ op = BSONObj::GTE;
+ break;
+ case BSONObj::LTE:
+ op = BSONObj::GT;
+ break;
+ case BSONObj::GT:
+ op = BSONObj::LTE;
+ break;
+ case BSONObj::GTE:
+ op = BSONObj::LT;
+ break;
+ case BSONObj::opEXISTS:
+ existsSpec = !existsSpec;
+ break;
+ default: // otherwise doesn't matter
+ break;
+ }
+ }
+ switch( op ) {
+ case BSONObj::Equality:
+ lower = upper = e;
+ break;
+ case BSONObj::NE: {
+ // this will invalidate the upper/lower references above
+ _intervals.push_back( FieldInterval() );
+ // optimize doesn't make sense for negative ranges
+ _intervals[ 0 ]._upper._bound = e;
+ _intervals[ 0 ]._upper._inclusive = false;
+ _intervals[ 1 ]._lower._bound = e;
+ _intervals[ 1 ]._lower._inclusive = false;
+ _intervals[ 1 ]._upper._bound = maxKey.firstElement();
+ _intervals[ 1 ]._upper._inclusive = true;
+ optimize = false; // don't run optimize code below
+ break;
+ }
+ case BSONObj::LT:
+ upperInclusive = false;
+ case BSONObj::LTE:
+ upper = e;
+ break;
+ case BSONObj::GT:
+ lowerInclusive = false;
+ case BSONObj::GTE:
+ lower = e;
+ break;
+ case BSONObj::opALL: {
+ uassert( 10370 , "$all requires array", e.type() == Array );
+ BSONObjIterator i( e.embeddedObject() );
+ bool bound = false;
+ while ( i.more() ) {
+ BSONElement x = i.next();
+ if ( x.type() == Object && x.embeddedObject().firstElement().getGtLtOp() == BSONObj::opELEM_MATCH ) {
+ // taken care of elsewhere
+ }
+ else if ( x.type() != RegEx ) {
+ lower = upper = x;
+ bound = true;
+ break;
+ }
+ }
+ if ( !bound ) { // if no good non regex bound found, try regex bounds
+ BSONObjIterator i( e.embeddedObject() );
+ while( i.more() ) {
+ BSONElement x = i.next();
+ if ( x.type() != RegEx )
+ continue;
+ string simple = simpleRegex( x.regex(), x.regexFlags() );
+ if ( !simple.empty() ) {
+ lower = addObj( BSON( "" << simple ) ).firstElement();
+ upper = addObj( BSON( "" << simpleRegexEnd( simple ) ) ).firstElement();
+ break;
+ }
+ }
+ }
+ break;
+ }
+ case BSONObj::opMOD: {
+ {
+ BSONObjBuilder b;
+ b.appendMinForType( "" , NumberDouble );
+ lower = addObj( b.obj() ).firstElement();
+ }
+ {
+ BSONObjBuilder b;
+ b.appendMaxForType( "" , NumberDouble );
+ upper = addObj( b.obj() ).firstElement();
+ }
+ break;
+ }
+ case BSONObj::opTYPE: {
+ BSONType t = (BSONType)e.numberInt();
+ {
+ BSONObjBuilder b;
+ b.appendMinForType( "" , t );
+ lower = addObj( b.obj() ).firstElement();
+ }
+ {
+ BSONObjBuilder b;
+ b.appendMaxForType( "" , t );
+ upper = addObj( b.obj() ).firstElement();
+ }
+
+ break;
+ }
+ case BSONObj::opREGEX:
+ case BSONObj::opOPTIONS:
+ // do nothing
+ break;
+ case BSONObj::opELEM_MATCH: {
+ log() << "warning: shouldn't get here?" << endl;
+ break;
+ }
+ case BSONObj::opNEAR:
+ case BSONObj::opWITHIN:
+ _special = "2d";
+ break;
+ case BSONObj::opEXISTS: {
+ if ( !existsSpec ) {
+ lower = upper = staticNull.firstElement();
+ }
+ optimize = false;
+ break;
+ }
+ default:
+ break;
+ }
+
+ if ( optimize ) {
+ if ( lower.type() != MinKey && upper.type() == MaxKey && lower.isSimpleType() ) { // TODO: get rid of isSimpleType
+ BSONObjBuilder b;
+ b.appendMaxForType( lower.fieldName() , lower.type() );
+ upper = addObj( b.obj() ).firstElement();
+ }
+ else if ( lower.type() == MinKey && upper.type() != MaxKey && upper.isSimpleType() ) { // TODO: get rid of isSimpleType
+ if( upper.type() == Date )
+ lowerInclusive = false;
+ BSONObjBuilder b;
+ b.appendMinForType( upper.fieldName() , upper.type() );
+ lower = addObj( b.obj() ).firstElement();
+ }
+ }
+
+ }
+
+ void FieldRange::finishOperation( const vector<FieldInterval> &newIntervals, const FieldRange &other ) {
+ _intervals = newIntervals;
+ for( vector<BSONObj>::const_iterator i = other._objData.begin(); i != other._objData.end(); ++i )
+ _objData.push_back( *i );
+ if ( _special.size() == 0 && other._special.size() )
+ _special = other._special;
+ }
+
+ // as called, these functions find the max/min of a bound in the
+ // opposite direction, so inclusive bounds are considered less
+ // superlative
+ FieldBound maxFieldBound( const FieldBound &a, const FieldBound &b ) {
+ int cmp = a._bound.woCompare( b._bound, false );
+ if ( ( cmp == 0 && !b._inclusive ) || cmp < 0 )
+ return b;
+ return a;
+ }
+
+ FieldBound minFieldBound( const FieldBound &a, const FieldBound &b ) {
+ int cmp = a._bound.woCompare( b._bound, false );
+ if ( ( cmp == 0 && !b._inclusive ) || cmp > 0 )
+ return b;
+ return a;
+ }
+
+ bool fieldIntervalOverlap( const FieldInterval &one, const FieldInterval &two, FieldInterval &result ) {
+ result._lower = maxFieldBound( one._lower, two._lower );
+ result._upper = minFieldBound( one._upper, two._upper );
+ return result.strictValid();
+ }
+
+ const FieldRange &FieldRange::operator&=( const FieldRange &other ) {
+ if ( !_singleKey && nontrivial() ) {
+ if ( other <= *this ) {
+ *this = other;
+ }
+ return *this;
+ }
+ vector<FieldInterval> newIntervals;
+ vector<FieldInterval>::const_iterator i = _intervals.begin();
+ vector<FieldInterval>::const_iterator j = other._intervals.begin();
+ while( i != _intervals.end() && j != other._intervals.end() ) {
+ FieldInterval overlap;
+ if ( fieldIntervalOverlap( *i, *j, overlap ) ) {
+ newIntervals.push_back( overlap );
+ }
+ if ( i->_upper == minFieldBound( i->_upper, j->_upper ) ) {
+ ++i;
+ }
+ else {
+ ++j;
+ }
+ }
+ finishOperation( newIntervals, other );
+ return *this;
+ }
+
+ void handleInterval( const FieldInterval &lower, FieldBound &low, FieldBound &high, vector<FieldInterval> &newIntervals ) {
+ if ( low._bound.eoo() ) {
+ low = lower._lower; high = lower._upper;
+ }
+ else {
+ int cmp = high._bound.woCompare( lower._lower._bound, false );
+ if ( ( cmp < 0 ) || ( cmp == 0 && !high._inclusive && !lower._lower._inclusive ) ) {
+ FieldInterval tmp;
+ tmp._lower = low;
+ tmp._upper = high;
+ newIntervals.push_back( tmp );
+ low = lower._lower; high = lower._upper;
+ }
+ else {
+ high = lower._upper;
+ }
+ }
+ }
+
+ const FieldRange &FieldRange::operator|=( const FieldRange &other ) {
+ vector<FieldInterval> newIntervals;
+ FieldBound low;
+ FieldBound high;
+ vector<FieldInterval>::const_iterator i = _intervals.begin();
+ vector<FieldInterval>::const_iterator j = other._intervals.begin();
+ while( i != _intervals.end() && j != other._intervals.end() ) {
+ int cmp = i->_lower._bound.woCompare( j->_lower._bound, false );
+ if ( ( cmp == 0 && i->_lower._inclusive ) || cmp < 0 ) {
+ handleInterval( *i, low, high, newIntervals );
+ ++i;
+ }
+ else {
+ handleInterval( *j, low, high, newIntervals );
+ ++j;
+ }
+ }
+ while( i != _intervals.end() ) {
+ handleInterval( *i, low, high, newIntervals );
+ ++i;
+ }
+ while( j != other._intervals.end() ) {
+ handleInterval( *j, low, high, newIntervals );
+ ++j;
+ }
+ FieldInterval tmp;
+ tmp._lower = low;
+ tmp._upper = high;
+ newIntervals.push_back( tmp );
+ finishOperation( newIntervals, other );
+ return *this;
+ }
+
+ const FieldRange &FieldRange::operator-=( const FieldRange &other ) {
+ vector<FieldInterval> newIntervals;
+ vector<FieldInterval>::iterator i = _intervals.begin();
+ vector<FieldInterval>::const_iterator j = other._intervals.begin();
+ while( i != _intervals.end() && j != other._intervals.end() ) {
+ int cmp = i->_lower._bound.woCompare( j->_lower._bound, false );
+ if ( cmp < 0 ||
+ ( cmp == 0 && i->_lower._inclusive && !j->_lower._inclusive ) ) {
+ int cmp2 = i->_upper._bound.woCompare( j->_lower._bound, false );
+ if ( cmp2 < 0 ) {
+ newIntervals.push_back( *i );
+ ++i;
+ }
+ else if ( cmp2 == 0 ) {
+ newIntervals.push_back( *i );
+ if ( newIntervals.back()._upper._inclusive && j->_lower._inclusive ) {
+ newIntervals.back()._upper._inclusive = false;
+ }
+ ++i;
+ }
+ else {
+ newIntervals.push_back( *i );
+ newIntervals.back()._upper = j->_lower;
+ newIntervals.back()._upper.flipInclusive();
+ int cmp3 = i->_upper._bound.woCompare( j->_upper._bound, false );
+ if ( cmp3 < 0 ||
+ ( cmp3 == 0 && ( !i->_upper._inclusive || j->_upper._inclusive ) ) ) {
+ ++i;
+ }
+ else {
+ i->_lower = j->_upper;
+ i->_lower.flipInclusive();
+ ++j;
+ }
+ }
+ }
+ else {
+ int cmp2 = i->_lower._bound.woCompare( j->_upper._bound, false );
+ if ( cmp2 > 0 ||
+ ( cmp2 == 0 && ( !i->_lower._inclusive || !j->_upper._inclusive ) ) ) {
+ ++j;
+ }
+ else {
+ int cmp3 = i->_upper._bound.woCompare( j->_upper._bound, false );
+ if ( cmp3 < 0 ||
+ ( cmp3 == 0 && ( !i->_upper._inclusive || j->_upper._inclusive ) ) ) {
+ ++i;
+ }
+ else {
+ i->_lower = j->_upper;
+ i->_lower.flipInclusive();
+ ++j;
+ }
+ }
+ }
+ }
+ while( i != _intervals.end() ) {
+ newIntervals.push_back( *i );
+ ++i;
+ }
+ finishOperation( newIntervals, other );
+ return *this;
+ }
+
+ // TODO write a proper implementation that doesn't do a full copy
+ bool FieldRange::operator<=( const FieldRange &other ) const {
+ FieldRange temp = *this;
+ temp -= other;
+ return temp.empty();
+ }
+
+ void FieldRange::setExclusiveBounds() {
+ for( vector<FieldInterval>::iterator i = _intervals.begin(); i != _intervals.end(); ++i ) {
+ i->_lower._inclusive = false;
+ i->_upper._inclusive = false;
+ }
+ }
+
+ void FieldRange::reverse( FieldRange &ret ) const {
+ assert( _special.empty() );
+ ret._intervals.clear();
+ ret._objData = _objData;
+ for( vector<FieldInterval>::const_reverse_iterator i = _intervals.rbegin(); i != _intervals.rend(); ++i ) {
+ FieldInterval fi;
+ fi._lower = i->_upper;
+ fi._upper = i->_lower;
+ ret._intervals.push_back( fi );
+ }
+ }
+
+ BSONObj FieldRange::addObj( const BSONObj &o ) {
+ _objData.push_back( o );
+ return o;
+ }
+
+ string FieldInterval::toString() const {
+ StringBuilder buf;
+ buf << ( _lower._inclusive ? "[" : "(" );
+ buf << _lower._bound;
+ buf << " , ";
+ buf << _upper._bound;
+ buf << ( _upper._inclusive ? "]" : ")" );
+ return buf.str();
+ }
+
+ string FieldRange::toString() const {
+ StringBuilder buf;
+ buf << "(FieldRange special: " << _special << " singleKey: " << _special << " intervals: ";
+ for( vector<FieldInterval>::const_iterator i = _intervals.begin(); i != _intervals.end(); ++i ) {
+ buf << i->toString();
+ }
+
+ buf << ")";
+ return buf.str();
+ }
+
+ string FieldRangeSet::getSpecial() const {
+ string s = "";
+ for ( map<string,FieldRange>::const_iterator i=_ranges.begin(); i!=_ranges.end(); i++ ) {
+ if ( i->second.getSpecial().size() == 0 )
+ continue;
+ uassert( 13033 , "can't have 2 special fields" , s.size() == 0 );
+ s = i->second.getSpecial();
+ }
+ return s;
+ }
+
+ /**
+ * Btree scanning for a multidimentional key range will yield a
+ * multidimensional box. The idea here is that if an 'other'
+ * multidimensional box contains the current box we don't have to scan
+ * the current box. If the 'other' box contains the current box in
+ * all dimensions but one, we can safely subtract the values of 'other'
+ * along that one dimension from the values for the current box on the
+ * same dimension. In other situations, subtracting the 'other'
+ * box from the current box yields a result that is not a box (but
+ * rather can be expressed as a union of boxes). We don't support
+ * such splitting currently in calculating index ranges. Note that
+ * where I have said 'box' above, I actually mean sets of boxes because
+ * a field range can consist of multiple intervals.
+ */
+ const FieldRangeSet &FieldRangeSet::operator-=( const FieldRangeSet &other ) {
+ int nUnincluded = 0;
+ string unincludedKey;
+ map<string,FieldRange>::iterator i = _ranges.begin();
+ map<string,FieldRange>::const_iterator j = other._ranges.begin();
+ while( nUnincluded < 2 && i != _ranges.end() && j != other._ranges.end() ) {
+ int cmp = i->first.compare( j->first );
+ if ( cmp == 0 ) {
+ if ( i->second <= j->second ) {
+ // nothing
+ }
+ else {
+ ++nUnincluded;
+ unincludedKey = i->first;
+ }
+ ++i;
+ ++j;
+ }
+ else if ( cmp < 0 ) {
+ ++i;
+ }
+ else {
+ // other has a bound we don't, nothing can be done
+ return *this;
+ }
+ }
+ if ( j != other._ranges.end() ) {
+ // other has a bound we don't, nothing can be done
+ return *this;
+ }
+ if ( nUnincluded > 1 ) {
+ return *this;
+ }
+ if ( nUnincluded == 0 ) {
+ makeEmpty();
+ return *this;
+ }
+ // nUnincluded == 1
+ range( unincludedKey.c_str() ) -= other.range( unincludedKey.c_str() );
+ appendQueries( other );
+ return *this;
+ }
+
+ const FieldRangeSet &FieldRangeSet::operator&=( const FieldRangeSet &other ) {
+ map<string,FieldRange>::iterator i = _ranges.begin();
+ map<string,FieldRange>::const_iterator j = other._ranges.begin();
+ while( i != _ranges.end() && j != other._ranges.end() ) {
+ int cmp = i->first.compare( j->first );
+ if ( cmp == 0 ) {
+ // Same field name, so find range intersection.
+ i->second &= j->second;
+ ++i;
+ ++j;
+ }
+ else if ( cmp < 0 ) {
+ // Field present in *this.
+ ++i;
+ }
+ else {
+ // Field not present in *this, so add it.
+ range( j->first.c_str() ) = j->second;
+ ++j;
+ }
+ }
+ while( j != other._ranges.end() ) {
+ // Field not present in *this, add it.
+ range( j->first.c_str() ) = j->second;
+ ++j;
+ }
+ appendQueries( other );
+ return *this;
+ }
+
+ void FieldRangeSet::appendQueries( const FieldRangeSet &other ) {
+ for( vector<BSONObj>::const_iterator i = other._queries.begin(); i != other._queries.end(); ++i ) {
+ _queries.push_back( *i );
+ }
+ }
+
+ void FieldRangeSet::makeEmpty() {
+ for( map<string,FieldRange>::iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
+ i->second.makeEmpty();
+ }
+ }
+
+ void FieldRangeSet::processOpElement( const char *fieldName, const BSONElement &f, bool isNot, bool optimize ) {
+ BSONElement g = f;
+ int op2 = g.getGtLtOp();
+ if ( op2 == BSONObj::opALL ) {
+ BSONElement h = g;
+ uassert( 13050 , "$all requires array", h.type() == Array );
+ BSONObjIterator i( h.embeddedObject() );
+ if( i.more() ) {
+ BSONElement x = i.next();
+ if ( x.type() == Object && x.embeddedObject().firstElement().getGtLtOp() == BSONObj::opELEM_MATCH ) {
+ g = x.embeddedObject().firstElement();
+ op2 = g.getGtLtOp();
+ }
+ }
+ }
+ if ( op2 == BSONObj::opELEM_MATCH ) {
+ BSONObjIterator k( g.embeddedObjectUserCheck() );
+ while ( k.more() ) {
+ BSONElement h = k.next();
+ StringBuilder buf(32);
+ buf << fieldName << "." << h.fieldName();
+ string fullname = buf.str();
+
+ int op3 = getGtLtOp( h );
+ if ( op3 == BSONObj::Equality ) {
+ range( fullname.c_str() ) &= FieldRange( h , _singleKey , isNot , optimize );
+ }
+ else {
+ BSONObjIterator l( h.embeddedObject() );
+ while ( l.more() ) {
+ range( fullname.c_str() ) &= FieldRange( l.next() , _singleKey , isNot , optimize );
+ }
+ }
+ }
+ }
+ else {
+ range( fieldName ) &= FieldRange( f , _singleKey , isNot , optimize );
+ }
+ }
+
+ void FieldRangeSet::processQueryField( const BSONElement &e, bool optimize ) {
+ if ( e.fieldName()[ 0 ] == '$' ) {
+ if ( strcmp( e.fieldName(), "$and" ) == 0 ) {
+ uassert( 14816 , "$and expression must be a nonempty array" , e.type() == Array && e.embeddedObject().nFields() > 0 );
+ BSONObjIterator i( e.embeddedObject() );
+ while( i.more() ) {
+ BSONElement e = i.next();
+ uassert( 14817 , "$and elements must be objects" , e.type() == Object );
+ BSONObjIterator j( e.embeddedObject() );
+ while( j.more() ) {
+ processQueryField( j.next(), optimize );
+ }
+ }
+ }
+
+ if ( strcmp( e.fieldName(), "$where" ) == 0 ) {
+ return;
+ }
+
+ if ( strcmp( e.fieldName(), "$or" ) == 0 ) {
+ return;
+ }
+
+ if ( strcmp( e.fieldName(), "$nor" ) == 0 ) {
+ return;
+ }
+ }
+
+ bool equality = ( getGtLtOp( e ) == BSONObj::Equality );
+ if ( equality && e.type() == Object ) {
+ equality = ( strcmp( e.embeddedObject().firstElementFieldName(), "$not" ) != 0 );
+ }
+
+ if ( equality || ( e.type() == Object && !e.embeddedObject()[ "$regex" ].eoo() ) ) {
+ range( e.fieldName() ) &= FieldRange( e , _singleKey , false , optimize );
+ }
+ if ( !equality ) {
+ BSONObjIterator j( e.embeddedObject() );
+ while( j.more() ) {
+ BSONElement f = j.next();
+ if ( strcmp( f.fieldName(), "$not" ) == 0 ) {
+ switch( f.type() ) {
+ case Object: {
+ BSONObjIterator k( f.embeddedObject() );
+ while( k.more() ) {
+ BSONElement g = k.next();
+ uassert( 13034, "invalid use of $not", g.getGtLtOp() != BSONObj::Equality );
+ processOpElement( e.fieldName(), g, true, optimize );
+ }
+ break;
+ }
+ case RegEx:
+ processOpElement( e.fieldName(), f, true, optimize );
+ break;
+ default:
+ uassert( 13041, "invalid use of $not", false );
+ }
+ }
+ else {
+ processOpElement( e.fieldName(), f, false, optimize );
+ }
+ }
+ }
+ }
+
+ FieldRangeSet::FieldRangeSet( const char *ns, const BSONObj &query, bool singleKey, bool optimize )
+ : _ns( ns ), _queries( 1, query.getOwned() ), _singleKey( singleKey ) {
+ BSONObjIterator i( _queries[ 0 ] );
+
+ while( i.more() ) {
+ processQueryField( i.next(), optimize );
+ }
+ }
+
+ FieldRangeVector::FieldRangeVector( const FieldRangeSet &frs, const IndexSpec &indexSpec, int direction )
+ :_indexSpec( indexSpec ), _direction( direction >= 0 ? 1 : -1 ) {
+ _queries = frs._queries;
+ BSONObjIterator i( _indexSpec.keyPattern );
+ set< string > baseObjectNontrivialPrefixes;
+ while( i.more() ) {
+ BSONElement e = i.next();
+ const FieldRange *range = &frs.range( e.fieldName() );
+ if ( !frs.singleKey() ) {
+ string prefix = str::before( e.fieldName(), '.' );
+ if ( baseObjectNontrivialPrefixes.count( prefix ) > 0 ) {
+ // A field with the same parent field has already been
+ // constrainted, and with a multikey index we cannot
+ // constrain this field.
+ range = &frs.trivialRange();
+ } else {
+ if ( range->nontrivial() ) {
+ baseObjectNontrivialPrefixes.insert( prefix );
+ }
+ }
+ }
+ int number = (int) e.number(); // returns 0.0 if not numeric
+ bool forward = ( ( number >= 0 ? 1 : -1 ) * ( direction >= 0 ? 1 : -1 ) > 0 );
+ if ( forward ) {
+ _ranges.push_back( *range );
+ }
+ else {
+ _ranges.push_back( FieldRange( BSONObj().firstElement(), frs.singleKey(), false, true ) );
+ range->reverse( _ranges.back() );
+ }
+ assert( !_ranges.back().empty() );
+ }
+ uassert( 13385, "combinatorial limit of $in partitioning of result set exceeded", size() < 1000000 );
+ }
+
+ BSONObj FieldRangeVector::startKey() const {
+ BSONObjBuilder b;
+ for( vector<FieldRange>::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
+ const FieldInterval &fi = i->intervals().front();
+ b.appendAs( fi._lower._bound, "" );
+ }
+ return b.obj();
+ }
+
+ BSONObj FieldRangeVector::endKey() const {
+ BSONObjBuilder b;
+ for( vector<FieldRange>::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
+ const FieldInterval &fi = i->intervals().back();
+ b.appendAs( fi._upper._bound, "" );
+ }
+ return b.obj();
+ }
+
+ BSONObj FieldRangeVector::obj() const {
+ BSONObjBuilder b;
+ BSONObjIterator k( _indexSpec.keyPattern );
+ for( int i = 0; i < (int)_ranges.size(); ++i ) {
+ BSONArrayBuilder a( b.subarrayStart( k.next().fieldName() ) );
+ for( vector<FieldInterval>::const_iterator j = _ranges[ i ].intervals().begin();
+ j != _ranges[ i ].intervals().end(); ++j ) {
+ a << BSONArray( BSON_ARRAY( j->_lower._bound << j->_upper._bound ).clientReadable() );
+ }
+ a.done();
+ }
+ return b.obj();
+ }
+
+ FieldRange *FieldRangeSet::__singleKeyTrivialRange = 0;
+ FieldRange *FieldRangeSet::__multiKeyTrivialRange = 0;
+ const FieldRange &FieldRangeSet::trivialRange() const {
+ FieldRange *&ret = _singleKey ? __singleKeyTrivialRange : __multiKeyTrivialRange;
+ if ( ret == 0 ) {
+ ret = new FieldRange( BSONObj().firstElement(), _singleKey, false, true );
+ }
+ return *ret;
+ }
+
+ BSONObj FieldRangeSet::simplifiedQuery( const BSONObj &_fields ) const {
+ BSONObj fields = _fields;
+ if ( fields.isEmpty() ) {
+ BSONObjBuilder b;
+ for( map<string,FieldRange>::const_iterator i = _ranges.begin(); i != _ranges.end(); ++i ) {
+ b.append( i->first, 1 );
+ }
+ fields = b.obj();
+ }
+ BSONObjBuilder b;
+ BSONObjIterator i( fields );
+ while( i.more() ) {
+ BSONElement e = i.next();
+ const char *name = e.fieldName();
+ const FieldRange &eRange = range( name );
+ assert( !eRange.empty() );
+ if ( eRange.equality() )
+ b.appendAs( eRange.min(), name );
+ else if ( eRange.nontrivial() ) {
+ BSONObj o;
+ BSONObjBuilder c;
+ if ( eRange.min().type() != MinKey )
+ c.appendAs( eRange.min(), eRange.minInclusive() ? "$gte" : "$gt" );
+ if ( eRange.max().type() != MaxKey )
+ c.appendAs( eRange.max(), eRange.maxInclusive() ? "$lte" : "$lt" );
+ o = c.obj();
+ b.append( name, o );
+ }
+ }
+ return b.obj();
+ }
+
+ QueryPattern FieldRangeSet::pattern( const BSONObj &sort ) const {
+ return QueryPattern( *this, sort );
+ }
+
+ // TODO get rid of this
+ BoundList FieldRangeSet::indexBounds( const BSONObj &keyPattern, int direction ) const {
+ typedef vector<pair<shared_ptr<BSONObjBuilder>, shared_ptr<BSONObjBuilder> > > BoundBuilders;
+ BoundBuilders builders;
+ builders.push_back( make_pair( shared_ptr<BSONObjBuilder>( new BSONObjBuilder() ), shared_ptr<BSONObjBuilder>( new BSONObjBuilder() ) ) );
+ BSONObjIterator i( keyPattern );
+ bool ineq = false; // until ineq is true, we are just dealing with equality and $in bounds
+ while( i.more() ) {
+ BSONElement e = i.next();
+ const FieldRange &fr = range( e.fieldName() );
+ int number = (int) e.number(); // returns 0.0 if not numeric
+ bool forward = ( ( number >= 0 ? 1 : -1 ) * ( direction >= 0 ? 1 : -1 ) > 0 );
+ if ( !ineq ) {
+ if ( fr.equality() ) {
+ for( BoundBuilders::const_iterator j = builders.begin(); j != builders.end(); ++j ) {
+ j->first->appendAs( fr.min(), "" );
+ j->second->appendAs( fr.min(), "" );
+ }
+ }
+ else {
+ if ( !fr.inQuery() ) {
+ ineq = true;
+ }
+ BoundBuilders newBuilders;
+ const vector<FieldInterval> &intervals = fr.intervals();
+ for( BoundBuilders::const_iterator i = builders.begin(); i != builders.end(); ++i ) {
+ BSONObj first = i->first->obj();
+ BSONObj second = i->second->obj();
+
+ const unsigned maxCombinations = 4000000;
+ if ( forward ) {
+ for( vector<FieldInterval>::const_iterator j = intervals.begin(); j != intervals.end(); ++j ) {
+ uassert( 13303, "combinatorial limit of $in partitioning of result set exceeded", newBuilders.size() < maxCombinations );
+ newBuilders.push_back( make_pair( shared_ptr<BSONObjBuilder>( new BSONObjBuilder() ), shared_ptr<BSONObjBuilder>( new BSONObjBuilder() ) ) );
+ newBuilders.back().first->appendElements( first );
+ newBuilders.back().second->appendElements( second );
+ newBuilders.back().first->appendAs( j->_lower._bound, "" );
+ newBuilders.back().second->appendAs( j->_upper._bound, "" );
+ }
+ }
+ else {
+ for( vector<FieldInterval>::const_reverse_iterator j = intervals.rbegin(); j != intervals.rend(); ++j ) {
+ uassert( 13304, "combinatorial limit of $in partitioning of result set exceeded", newBuilders.size() < maxCombinations );
+ newBuilders.push_back( make_pair( shared_ptr<BSONObjBuilder>( new BSONObjBuilder() ), shared_ptr<BSONObjBuilder>( new BSONObjBuilder() ) ) );
+ newBuilders.back().first->appendElements( first );
+ newBuilders.back().second->appendElements( second );
+ newBuilders.back().first->appendAs( j->_upper._bound, "" );
+ newBuilders.back().second->appendAs( j->_lower._bound, "" );
+ }
+ }
+ }
+ builders = newBuilders;
+ }
+ }
+ else {
+ for( BoundBuilders::const_iterator j = builders.begin(); j != builders.end(); ++j ) {
+ j->first->appendAs( forward ? fr.min() : fr.max(), "" );
+ j->second->appendAs( forward ? fr.max() : fr.min(), "" );
+ }
+ }
+ }
+ BoundList ret;
+ for( BoundBuilders::const_iterator i = builders.begin(); i != builders.end(); ++i )
+ ret.push_back( make_pair( i->first->obj(), i->second->obj() ) );
+ return ret;
+ }
+
+ FieldRangeSet *FieldRangeSet::subset( const BSONObj &fields ) const {
+ FieldRangeSet *ret = new FieldRangeSet( _ns, BSONObj(), _singleKey, true );
+ BSONObjIterator i( fields );
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if ( range( e.fieldName() ).nontrivial() ) {
+ ret->range( e.fieldName() ) = range( e.fieldName() );
+ }
+ }
+ ret->_queries = _queries;
+ return ret;
+ }
+
+ bool FieldRangeSetPair::noNontrivialRanges() const {
+ return _singleKey.matchPossible() && _singleKey.nNontrivialRanges() == 0 &&
+ _multiKey.matchPossible() && _multiKey.nNontrivialRanges() == 0;
+ }
+
+ FieldRangeSetPair &FieldRangeSetPair::operator&=( const FieldRangeSetPair &other ) {
+ _singleKey &= other._singleKey;
+ _multiKey &= other._multiKey;
+ return *this;
+ }
+
+ FieldRangeSetPair &FieldRangeSetPair::operator-=( const FieldRangeSet &scanned ) {
+ _singleKey -= scanned;
+ _multiKey -= scanned;
+ return *this;
+ }
+
+ BSONObj FieldRangeSetPair::simplifiedQueryForIndex( NamespaceDetails *d, int idxNo, const BSONObj &keyPattern ) const {
+ return frsForIndex( d, idxNo ).simplifiedQuery( keyPattern );
+ }
+
+ void FieldRangeSetPair::assertValidIndex( const NamespaceDetails *d, int idxNo ) const {
+ massert( 14048, "FieldRangeSetPair invalid index specified", idxNo >= 0 && idxNo < d->nIndexes );
+ }
+
+ const FieldRangeSet &FieldRangeSetPair::frsForIndex( const NamespaceDetails* nsd, int idxNo ) const {
+ assertValidIndexOrNoIndex( nsd, idxNo );
+ if ( idxNo < 0 ) {
+ // An unindexed cursor cannot have a "single key" constraint.
+ return _multiKey;
+ }
+ return nsd->isMultikey( idxNo ) ? _multiKey : _singleKey;
+ }
+
+ bool FieldRangeVector::matchesElement( const BSONElement &e, int i, bool forward ) const {
+ bool eq;
+ int l = matchingLowElement( e, i, forward, eq );
+ return ( l % 2 == 0 ); // if we're inside an interval
+ }
+
+ // binary search for interval containing the specified element
+ // an even return value indicates that the element is contained within a valid interval
+ int FieldRangeVector::matchingLowElement( const BSONElement &e, int i, bool forward, bool &lowEquality ) const {
+ lowEquality = false;
+ int l = -1;
+ int h = _ranges[ i ].intervals().size() * 2;
+ while( l + 1 < h ) {
+ int m = ( l + h ) / 2;
+ BSONElement toCmp;
+ bool toCmpInclusive;
+ const FieldInterval &interval = _ranges[ i ].intervals()[ m / 2 ];
+ if ( m % 2 == 0 ) {
+ toCmp = interval._lower._bound;
+ toCmpInclusive = interval._lower._inclusive;
+ }
+ else {
+ toCmp = interval._upper._bound;
+ toCmpInclusive = interval._upper._inclusive;
+ }
+ int cmp = toCmp.woCompare( e, false );
+ if ( !forward ) {
+ cmp = -cmp;
+ }
+ if ( cmp < 0 ) {
+ l = m;
+ }
+ else if ( cmp > 0 ) {
+ h = m;
+ }
+ else {
+ if ( m % 2 == 0 ) {
+ lowEquality = true;
+ }
+ int ret = m;
+ // if left match and inclusive, all good
+ // if left match and not inclusive, return right before left bound
+ // if right match and inclusive, return left bound
+ // if right match and not inclusive, return right bound
+ if ( ( m % 2 == 0 && !toCmpInclusive ) || ( m % 2 == 1 && toCmpInclusive ) ) {
+ --ret;
+ }
+ return ret;
+ }
+ }
+ assert( l + 1 == h );
+ return l;
+ }
+
+ bool FieldRangeVector::matchesKey( const BSONObj &key ) const {
+ BSONObjIterator j( key );
+ BSONObjIterator k( _indexSpec.keyPattern );
+ for( int l = 0; l < (int)_ranges.size(); ++l ) {
+ int number = (int) k.next().number();
+ bool forward = ( number >= 0 ? 1 : -1 ) * ( _direction >= 0 ? 1 : -1 ) > 0;
+ if ( !matchesElement( j.next(), l, forward ) ) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool FieldRangeVector::matches( const BSONObj &obj ) const {
+
+ bool ok = false;
+
+ // TODO The representation of matching keys could potentially be optimized
+ // more for the case at hand. (For example, we can potentially consider
+ // fields individually instead of constructing several bson objects using
+ // multikey arrays.) But getKeys() canonically defines the key set for a
+ // given object and for now we are using it as is.
+ BSONObjSet keys;
+ _indexSpec.getKeys( obj, keys );
+ for( BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i ) {
+ if ( matchesKey( *i ) ) {
+ ok = true;
+ break;
+ }
+ }
+
+ LOG(5) << "FieldRangeVector::matches() returns " << ok << endl;
+
+ return ok;
+ }
+
+ BSONObj FieldRangeVector::firstMatch( const BSONObj &obj ) const {
+ // NOTE Only works in forward direction.
+ assert( _direction >= 0 );
+ BSONObjSet keys( BSONObjCmp( _indexSpec.keyPattern ) );
+ _indexSpec.getKeys( obj, keys );
+ for( BSONObjSet::const_iterator i = keys.begin(); i != keys.end(); ++i ) {
+ if ( matchesKey( *i ) ) {
+ return *i;
+ }
+ }
+ return BSONObj();
+ }
+
+ // TODO optimize more
+ int FieldRangeVectorIterator::advance( const BSONObj &curr ) {
+ BSONObjIterator j( curr );
+ BSONObjIterator o( _v._indexSpec.keyPattern );
+ // track first field for which we are not at the end of the valid values,
+ // since we may need to advance from the key prefix ending with this field
+ int latestNonEndpoint = -1;
+ // iterate over fields to determine appropriate advance method
+ for( int i = 0; i < (int)_i.size(); ++i ) {
+ if ( i > 0 && !_v._ranges[ i - 1 ].intervals()[ _i[ i - 1 ] ].equality() ) {
+ // if last bound was inequality, we don't know anything about where we are for this field
+ // TODO if possible avoid this certain cases when value in previous field of the previous
+ // key is the same as value of previous field in current key
+ setMinus( i );
+ }
+ bool eq = false;
+ BSONElement oo = o.next();
+ bool reverse = ( ( oo.number() < 0 ) ^ ( _v._direction < 0 ) );
+ BSONElement jj = j.next();
+ if ( _i[ i ] == -1 ) { // unknown position for this field, do binary search
+ bool lowEquality;
+ int l = _v.matchingLowElement( jj, i, !reverse, lowEquality );
+ if ( l % 2 == 0 ) { // we are in a valid range for this field
+ _i[ i ] = l / 2;
+ int diff = (int)_v._ranges[ i ].intervals().size() - _i[ i ];
+ if ( diff > 1 ) {
+ latestNonEndpoint = i;
+ }
+ else if ( diff == 1 ) {
+ int x = _v._ranges[ i ].intervals()[ _i[ i ] ]._upper._bound.woCompare( jj, false );
+ if ( x != 0 ) {
+ latestNonEndpoint = i;
+ }
+ }
+ continue;
+ }
+ else { // not in a valid range for this field - determine if and how to advance
+ // check if we're after the last interval for this field
+ if ( l == (int)_v._ranges[ i ].intervals().size() * 2 - 1 ) {
+ if ( latestNonEndpoint == -1 ) {
+ return -2;
+ }
+ setZero( latestNonEndpoint + 1 );
+ // skip to curr / latestNonEndpoint + 1 / superlative
+ _after = true;
+ return latestNonEndpoint + 1;
+ }
+ _i[ i ] = ( l + 1 ) / 2;
+ if ( lowEquality ) {
+ // skip to curr / i + 1 / superlative
+ _after = true;
+ return i + 1;
+ }
+ // skip to curr / i / nextbounds
+ _cmp[ i ] = &_v._ranges[ i ].intervals()[ _i[ i ] ]._lower._bound;
+ _inc[ i ] = _v._ranges[ i ].intervals()[ _i[ i ] ]._lower._inclusive;
+ for( int j = i + 1; j < (int)_i.size(); ++j ) {
+ _cmp[ j ] = &_v._ranges[ j ].intervals().front()._lower._bound;
+ _inc[ j ] = _v._ranges[ j ].intervals().front()._lower._inclusive;
+ }
+ _after = false;
+ return i;
+ }
+ }
+ bool first = true;
+ // _i[ i ] != -1, so we have a starting interval for this field
+ // which serves as a lower/equal bound on the first iteration -
+ // we advance from this interval to find a matching interval
+ while( _i[ i ] < (int)_v._ranges[ i ].intervals().size() ) {
+ // compare to current interval's upper bound
+ int x = _v._ranges[ i ].intervals()[ _i[ i ] ]._upper._bound.woCompare( jj, false );
+ if ( reverse ) {
+ x = -x;
+ }
+ if ( x == 0 && _v._ranges[ i ].intervals()[ _i[ i ] ]._upper._inclusive ) {
+ eq = true;
+ break;
+ }
+ // see if we're less than the upper bound
+ if ( x > 0 ) {
+ if ( i == 0 && first ) {
+ // the value of 1st field won't go backward, so don't check lower bound
+ // TODO maybe we can check first only?
+ break;
+ }
+ // if it's an equality interval, don't need to compare separately to lower bound
+ if ( !_v._ranges[ i ].intervals()[ _i[ i ] ].equality() ) {
+ // compare to current interval's lower bound
+ x = _v._ranges[ i ].intervals()[ _i[ i ] ]._lower._bound.woCompare( jj, false );
+ if ( reverse ) {
+ x = -x;
+ }
+ }
+ // if we're equal to and not inclusive the lower bound, advance
+ if ( ( x == 0 && !_v._ranges[ i ].intervals()[ _i[ i ] ]._lower._inclusive ) ) {
+ setZero( i + 1 );
+ // skip to curr / i + 1 / superlative
+ _after = true;
+ return i + 1;
+ }
+ // if we're less than the lower bound, advance
+ if ( x > 0 ) {
+ setZero( i + 1 );
+ // skip to curr / i / nextbounds
+ _cmp[ i ] = &_v._ranges[ i ].intervals()[ _i[ i ] ]._lower._bound;
+ _inc[ i ] = _v._ranges[ i ].intervals()[ _i[ i ] ]._lower._inclusive;
+ for( int j = i + 1; j < (int)_i.size(); ++j ) {
+ _cmp[ j ] = &_v._ranges[ j ].intervals().front()._lower._bound;
+ _inc[ j ] = _v._ranges[ j ].intervals().front()._lower._inclusive;
+ }
+ _after = false;
+ return i;
+ }
+ else {
+ break;
+ }
+ }
+ // we're above the upper bound, so try next interval and reset remaining fields
+ ++_i[ i ];
+ setZero( i + 1 );
+ first = false;
+ }
+ int diff = (int)_v._ranges[ i ].intervals().size() - _i[ i ];
+ if ( diff > 1 || ( !eq && diff == 1 ) ) {
+ // check if we're not at the end of valid values for this field
+ latestNonEndpoint = i;
+ }
+ else if ( diff == 0 ) { // check if we're past the last interval for this field
+ if ( latestNonEndpoint == -1 ) {
+ return -2;
+ }
+ // more values possible, skip...
+ setZero( latestNonEndpoint + 1 );
+ // skip to curr / latestNonEndpoint + 1 / superlative
+ _after = true;
+ return latestNonEndpoint + 1;
+ }
+ }
+ return -1;
+ }
+
+ void FieldRangeVectorIterator::prepDive() {
+ for( int j = 0; j < (int)_i.size(); ++j ) {
+ _cmp[ j ] = &_v._ranges[ j ].intervals().front()._lower._bound;
+ _inc[ j ] = _v._ranges[ j ].intervals().front()._lower._inclusive;
+ }
+ }
+
+ BSONObj FieldRangeVectorIterator::startKey() {
+ BSONObjBuilder b;
+ for( int unsigned i = 0; i < _i.size(); ++i ) {
+ const FieldInterval &fi = _v._ranges[ i ].intervals()[ _i[ i ] ];
+ b.appendAs( fi._lower._bound, "" );
+ }
+ return b.obj();
+ }
+
+ // temp
+ BSONObj FieldRangeVectorIterator::endKey() {
+ BSONObjBuilder b;
+ for( int unsigned i = 0; i < _i.size(); ++i ) {
+ const FieldInterval &fi = _v._ranges[ i ].intervals()[ _i[ i ] ];
+ b.appendAs( fi._upper._bound, "" );
+ }
+ return b.obj();
+ }
+
+ OrRangeGenerator::OrRangeGenerator( const char *ns, const BSONObj &query , bool optimize )
+ : _baseSet( ns, query, optimize ), _orFound() {
+
+ BSONObjIterator i( _baseSet.originalQuery() );
+
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if ( strcmp( e.fieldName(), "$or" ) == 0 ) {
+ uassert( 13262, "$or requires nonempty array", e.type() == Array && e.embeddedObject().nFields() > 0 );
+ BSONObjIterator j( e.embeddedObject() );
+ while( j.more() ) {
+ BSONElement f = j.next();
+ uassert( 13263, "$or array must contain objects", f.type() == Object );
+ _orSets.push_back( FieldRangeSetPair( ns, f.embeddedObject(), optimize ) );
+ uassert( 13291, "$or may not contain 'special' query", _orSets.back().getSpecial().empty() );
+ _originalOrSets.push_back( _orSets.back() );
+ }
+ _orFound = true;
+ continue;
+ }
+ }
+ }
+
+ void OrRangeGenerator::assertMayPopOrClause() {
+ massert( 13274, "no or clause to pop", !orFinished() );
+ }
+
+ void OrRangeGenerator::popOrClause( NamespaceDetails *nsd, int idxNo, const BSONObj &keyPattern ) {
+ assertMayPopOrClause();
+ auto_ptr<FieldRangeSet> holder;
+ const FieldRangeSet *toDiff = &_originalOrSets.front().frsForIndex( nsd, idxNo );
+ BSONObj indexSpec = keyPattern;
+ if ( !indexSpec.isEmpty() && toDiff->matchPossibleForIndex( indexSpec ) ) {
+ holder.reset( toDiff->subset( indexSpec ) );
+ toDiff = holder.get();
+ }
+ popOrClause( toDiff, nsd, idxNo, keyPattern );
+ }
+
+ void OrRangeGenerator::popOrClauseSingleKey() {
+ assertMayPopOrClause();
+ FieldRangeSet *toDiff = &_originalOrSets.front()._singleKey;
+ popOrClause( toDiff );
+ }
+
+ /**
+ * Removes the top or clause, which would have been recently scanned, and
+ * removes the field ranges it covers from all subsequent or clauses. As a
+ * side effect, this function may invalidate the return values of topFrs()
+ * calls made before this function was called.
+ * @param indexSpec - Keys of the index that was used to satisfy the last or
+ * clause. Used to determine the range of keys that were scanned. If
+ * empty we do not constrain the previous clause's ranges using index keys,
+ * which may reduce opportunities for range elimination.
+ */
+ void OrRangeGenerator::popOrClause( const FieldRangeSet *toDiff, NamespaceDetails *d, int idxNo, const BSONObj &keyPattern ) {
+ list<FieldRangeSetPair>::iterator i = _orSets.begin();
+ list<FieldRangeSetPair>::iterator j = _originalOrSets.begin();
+ ++i;
+ ++j;
+ while( i != _orSets.end() ) {
+ *i -= *toDiff;
+ // Check if match is possible at all, and if it is possible for the recently scanned index.
+ if( !i->matchPossible() || ( d && !i->matchPossibleForIndex( d, idxNo, keyPattern ) ) ) {
+ i = _orSets.erase( i );
+ j = _originalOrSets.erase( j );
+ }
+ else {
+ ++i;
+ ++j;
+ }
+ }
+ _oldOrSets.push_front( _orSets.front() );
+ _orSets.pop_front();
+ _originalOrSets.pop_front();
+ }
+
+ struct SimpleRegexUnitTest : UnitTest {
+ void run() {
+ {
+ BSONObjBuilder b;
+ b.appendRegex("r", "^foo");
+ BSONObj o = b.done();
+ assert( simpleRegex(o.firstElement()) == "foo" );
+ }
+ {
+ BSONObjBuilder b;
+ b.appendRegex("r", "^f?oo");
+ BSONObj o = b.done();
+ assert( simpleRegex(o.firstElement()) == "" );
+ }
+ {
+ BSONObjBuilder b;
+ b.appendRegex("r", "^fz?oo");
+ BSONObj o = b.done();
+ assert( simpleRegex(o.firstElement()) == "f" );
+ }
+ {
+ BSONObjBuilder b;
+ b.appendRegex("r", "^f", "");
+ BSONObj o = b.done();
+ assert( simpleRegex(o.firstElement()) == "f" );
+ }
+ {
+ BSONObjBuilder b;
+ b.appendRegex("r", "\\Af", "");
+ BSONObj o = b.done();
+ assert( simpleRegex(o.firstElement()) == "f" );
+ }
+ {
+ BSONObjBuilder b;
+ b.appendRegex("r", "^f", "m");
+ BSONObj o = b.done();
+ assert( simpleRegex(o.firstElement()) == "" );
+ }
+ {
+ BSONObjBuilder b;
+ b.appendRegex("r", "\\Af", "m");
+ BSONObj o = b.done();
+ assert( simpleRegex(o.firstElement()) == "f" );
+ }
+ {
+ BSONObjBuilder b;
+ b.appendRegex("r", "\\Af", "mi");
+ BSONObj o = b.done();
+ assert( simpleRegex(o.firstElement()) == "" );
+ }
+ {
+ BSONObjBuilder b;
+ b.appendRegex("r", "\\Af \t\vo\n\ro \\ \\# #comment", "mx");
+ BSONObj o = b.done();
+ assert( simpleRegex(o.firstElement()) == "foo #" );
+ }
+ {
+ assert( simpleRegex("^\\Qasdf\\E", "", NULL) == "asdf" );
+ assert( simpleRegex("^\\Qasdf\\E.*", "", NULL) == "asdf" );
+ assert( simpleRegex("^\\Qasdf", "", NULL) == "asdf" ); // PCRE supports this
+ assert( simpleRegex("^\\Qasdf\\\\E", "", NULL) == "asdf\\" );
+ assert( simpleRegex("^\\Qas.*df\\E", "", NULL) == "as.*df" );
+ assert( simpleRegex("^\\Qas\\Q[df\\E", "", NULL) == "as\\Q[df" );
+ assert( simpleRegex("^\\Qas\\E\\\\E\\Q$df\\E", "", NULL) == "as\\E$df" ); // quoted string containing \E
+ }
+
+ }
+ } simple_regex_unittest;
+
+
+ long long applySkipLimit( long long num , const BSONObj& cmd ) {
+ BSONElement s = cmd["skip"];
+ BSONElement l = cmd["limit"];
+
+ if ( s.isNumber() ) {
+ num = num - s.numberLong();
+ if ( num < 0 ) {
+ num = 0;
+ }
+ }
+
+ if ( l.isNumber() ) {
+ long long limit = l.numberLong();
+ if ( limit < num ) {
+ num = limit;
+ }
+ }
+
+ return num;
+ }
+
+
+} // namespace mongo
diff --git a/src/mongo/db/queryutil.h b/src/mongo/db/queryutil.h
new file mode 100644
index 00000000000..aefef27cc8b
--- /dev/null
+++ b/src/mongo/db/queryutil.h
@@ -0,0 +1,443 @@
+// @file queryutil.h - Utility classes representing ranges of valid BSONElement values for a query.
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "jsobj.h"
+#include "indexkey.h"
+
+namespace mongo {
+
+ /**
+ * One side of an interval of valid BSONElements, specified by a value and a
+ * boolean indicating whether the interval includes the value.
+ */
+ struct FieldBound {
+ BSONElement _bound;
+ bool _inclusive;
+ bool operator==( const FieldBound &other ) const {
+ return _bound.woCompare( other._bound ) == 0 &&
+ _inclusive == other._inclusive;
+ }
+ void flipInclusive() { _inclusive = !_inclusive; }
+ };
+
+ /** A closed interval composed of a lower and an upper FieldBound. */
+ struct FieldInterval {
+ FieldInterval() : _cachedEquality( -1 ) {}
+ FieldInterval( const BSONElement& e ) : _cachedEquality( -1 ) {
+ _lower._bound = _upper._bound = e;
+ _lower._inclusive = _upper._inclusive = true;
+ }
+ FieldBound _lower;
+ FieldBound _upper;
+ /** @return true iff no single element can be contained in the interval. */
+ bool strictValid() const {
+ int cmp = _lower._bound.woCompare( _upper._bound, false );
+ return ( cmp < 0 || ( cmp == 0 && _lower._inclusive && _upper._inclusive ) );
+ }
+ /** @return true iff the interval is an equality constraint. */
+ bool equality() const;
+ mutable int _cachedEquality;
+
+ string toString() const;
+ };
+
+ /**
+ * An ordered list of FieldIntervals expressing constraints on valid
+ * BSONElement values for a field.
+ */
+ class FieldRange {
+ public:
+ FieldRange( const BSONElement &e , bool singleKey , bool isNot=false , bool optimize=true );
+
+ /** @return Range intersection with 'other'. */
+ const FieldRange &operator&=( const FieldRange &other );
+ /** @return Range union with 'other'. */
+ const FieldRange &operator|=( const FieldRange &other );
+ /** @return Range of elements elements included in 'this' but not 'other'. */
+ const FieldRange &operator-=( const FieldRange &other );
+ /** @return true iff this range is a subset of 'other'. */
+ bool operator<=( const FieldRange &other ) const;
+
+ /**
+ * If there are any valid values for this range, the extreme values can
+ * be extracted.
+ */
+
+ BSONElement min() const { assert( !empty() ); return _intervals[ 0 ]._lower._bound; }
+ BSONElement max() const { assert( !empty() ); return _intervals[ _intervals.size() - 1 ]._upper._bound; }
+ bool minInclusive() const { assert( !empty() ); return _intervals[ 0 ]._lower._inclusive; }
+ bool maxInclusive() const { assert( !empty() ); return _intervals[ _intervals.size() - 1 ]._upper._inclusive; }
+
+ /** @return true iff this range expresses a single equality interval. */
+ bool equality() const;
+ /** @return true if all the intervals for this range are equalities */
+ bool inQuery() const;
+ /** @return true iff this range does not include every BSONElement */
+ bool nontrivial() const;
+ /** @return true iff this range matches no BSONElements. */
+ bool empty() const { return _intervals.empty(); }
+
+ /** Empty the range so it matches no BSONElements. */
+ void makeEmpty() { _intervals.clear(); }
+ const vector<FieldInterval> &intervals() const { return _intervals; }
+ string getSpecial() const { return _special; }
+ /** Make component intervals noninclusive. */
+ void setExclusiveBounds();
+ /**
+ * Constructs a range where all FieldIntervals and FieldBounds are in
+ * the opposite order of the current range.
+ * NOTE the resulting intervals might not be strictValid().
+ */
+ void reverse( FieldRange &ret ) const;
+
+ string toString() const;
+ private:
+ BSONObj addObj( const BSONObj &o );
+ void finishOperation( const vector<FieldInterval> &newIntervals, const FieldRange &other );
+ vector<FieldInterval> _intervals;
+ // Owns memory for our BSONElements.
+ vector<BSONObj> _objData;
+ string _special;
+ bool _singleKey;
+ };
+
+ /**
+ * A BoundList contains intervals specified by inclusive start
+ * and end bounds. The intervals should be nonoverlapping and occur in
+ * the specified direction of traversal. For example, given a simple index {i:1}
+ * and direction +1, one valid BoundList is: (1, 2); (4, 6). The same BoundList
+ * would be valid for index {i:-1} with direction -1.
+ */
+ typedef vector<pair<BSONObj,BSONObj> > BoundList;
+
+ class QueryPattern;
+
+ /**
+ * A set of FieldRanges determined from constraints on the fields of a query,
+ * that may be used to determine index bounds.
+ */
+ class FieldRangeSet {
+ public:
+ friend class OrRangeGenerator;
+ friend class FieldRangeVector;
+ FieldRangeSet( const char *ns, const BSONObj &query , bool singleKey , bool optimize=true );
+
+ /** @return true if there is a nontrivial range for the given field. */
+ bool hasRange( const char *fieldName ) const {
+ map<string, FieldRange>::const_iterator f = _ranges.find( fieldName );
+ return f != _ranges.end();
+ }
+ /** @return range for the given field. */
+ const FieldRange &range( const char *fieldName ) const;
+ /** @return range for the given field. */
+ FieldRange &range( const char *fieldName );
+ /** @return the number of nontrivial ranges. */
+ int nNontrivialRanges() const;
+ /** @return the field ranges comprising this set. */
+ const map<string,FieldRange> &ranges() const { return _ranges; }
+ /**
+ * @return true if a match could be possible on every field. Generally this
+ * is not useful information for a single key FieldRangeSet and
+ * matchPossibleForIndex() should be used instead.
+ */
+ bool matchPossible() const;
+ /**
+ * @return true if a match could be possible given the value of _singleKey
+ * and index key 'keyPattern'.
+ * @param keyPattern May be {} or {$natural:1} for a non index scan.
+ */
+ bool matchPossibleForIndex( const BSONObj &keyPattern ) const;
+
+ const char *ns() const { return _ns; }
+
+ /**
+ * @return a simplified query from the extreme values of the nontrivial
+ * fields.
+ * @param fields If specified, the fields of the returned object are
+ * ordered to match those of 'fields'.
+ */
+ BSONObj simplifiedQuery( const BSONObj &fields = BSONObj() ) const;
+
+ QueryPattern pattern( const BSONObj &sort = BSONObj() ) const;
+ string getSpecial() const;
+
+ /**
+ * @return a FieldRangeSet approximation of the documents in 'this' but
+ * not in 'other'. The approximation will be a superset of the documents
+ * in 'this' but not 'other'.
+ */
+ const FieldRangeSet &operator-=( const FieldRangeSet &other );
+ /** @return intersection of 'this' with 'other'. */
+ const FieldRangeSet &operator&=( const FieldRangeSet &other );
+
+ /**
+ * @return an ordered list of bounds generated using an index key pattern
+ * and traversal direction.
+ *
+ * NOTE This function is deprecated in the query optimizer and only
+ * currently used by the sharding code.
+ */
+ BoundList indexBounds( const BSONObj &keyPattern, int direction ) const;
+
+ /**
+ * @return - A new FieldRangeSet based on this FieldRangeSet, but with only
+ * a subset of the fields.
+ * @param fields - Only fields which are represented as field names in this object
+ * will be included in the returned FieldRangeSet.
+ */
+ FieldRangeSet *subset( const BSONObj &fields ) const;
+
+ bool singleKey() const { return _singleKey; }
+
+ BSONObj originalQuery() const { return _queries[ 0 ]; }
+ private:
+ void appendQueries( const FieldRangeSet &other );
+ void makeEmpty();
+ void processQueryField( const BSONElement &e, bool optimize );
+ void processOpElement( const char *fieldName, const BSONElement &f, bool isNot, bool optimize );
+ static FieldRange *__singleKeyTrivialRange;
+ static FieldRange *__multiKeyTrivialRange;
+ const FieldRange &trivialRange() const;
+ map<string,FieldRange> _ranges;
+ const char *_ns;
+ // Owns memory for FieldRange BSONElements.
+ vector<BSONObj> _queries;
+ bool _singleKey;
+ };
+
+ class NamespaceDetails;
+
+ /**
+ * A pair of FieldRangeSets, one representing constraints for single key
+ * indexes and the other representing constraints for multi key indexes and
+ * unindexed scans. In several member functions the caller is asked to
+ * supply an index so that the implementation may utilize the proper
+ * FieldRangeSet and return results that are appropriate with respect to that
+ * supplied index.
+ */
+ class FieldRangeSetPair {
+ public:
+ FieldRangeSetPair( const char *ns, const BSONObj &query, bool optimize=true )
+ :_singleKey( ns, query, true, optimize ), _multiKey( ns, query, false, optimize ) {}
+
+ /**
+ * @return the appropriate single or multi key FieldRangeSet for the specified index.
+ * @param idxNo -1 for non index scan.
+ */
+ const FieldRangeSet &frsForIndex( const NamespaceDetails* nsd, int idxNo ) const;
+
+ /** @return a field range in the single key FieldRangeSet. */
+ const FieldRange &singleKeyRange( const char *fieldName ) const {
+ return _singleKey.range( fieldName );
+ }
+ /** @return true if the range limits are equivalent to an empty query. */
+ bool noNontrivialRanges() const;
+ /** @return false if a match is impossible regardless of index. */
+ bool matchPossible() const { return _multiKey.matchPossible(); }
+ /**
+ * @return false if a match is impossible on the specified index.
+ * @param idxNo -1 for non index scan.
+ */
+ bool matchPossibleForIndex( NamespaceDetails *d, int idxNo, const BSONObj &keyPattern ) const;
+
+ const char *ns() const { return _singleKey.ns(); }
+
+ string getSpecial() const { return _singleKey.getSpecial(); }
+
+ /** Intersect with another FieldRangeSetPair. */
+ FieldRangeSetPair &operator&=( const FieldRangeSetPair &other );
+ /**
+ * Subtract a FieldRangeSet, generally one expressing a range that has
+ * already been scanned.
+ */
+ FieldRangeSetPair &operator-=( const FieldRangeSet &scanned );
+
+ BoundList singleKeyIndexBounds( const BSONObj &keyPattern, int direction ) const {
+ return _singleKey.indexBounds( keyPattern, direction );
+ }
+
+ BSONObj originalQuery() const { return _singleKey.originalQuery(); }
+
+ private:
+ FieldRangeSetPair( const FieldRangeSet &singleKey, const FieldRangeSet &multiKey )
+ :_singleKey( singleKey ), _multiKey( multiKey ) {}
+ void assertValidIndex( const NamespaceDetails *d, int idxNo ) const;
+ void assertValidIndexOrNoIndex( const NamespaceDetails *d, int idxNo ) const;
+ /** matchPossibleForIndex() must be true. */
+ BSONObj simplifiedQueryForIndex( NamespaceDetails *d, int idxNo, const BSONObj &keyPattern ) const;
+ FieldRangeSet _singleKey;
+ FieldRangeSet _multiKey;
+ friend class OrRangeGenerator;
+ friend struct QueryUtilIndexed;
+ };
+
+ class IndexSpec;
+
+ /**
+ * An ordered list of fields and their FieldRanges, corresponding to valid
+ * index keys for a given index spec.
+ */
+ class FieldRangeVector {
+ public:
+ /**
+ * @param frs The valid ranges for all fields, as defined by the query spec
+ * @param indexSpec The index spec (key pattern and info)
+ * @param direction The direction of index traversal
+ */
+ FieldRangeVector( const FieldRangeSet &frs, const IndexSpec &indexSpec, int direction );
+
+ /** @return the number of index ranges represented by 'this' */
+ long long size();
+ /** @return starting point for an index traversal. */
+ BSONObj startKey() const;
+ /** @return end point for an index traversal. */
+ BSONObj endKey() const;
+ /** @return a client readable representation of 'this' */
+ BSONObj obj() const;
+
+ const IndexSpec& getSpec(){ return _indexSpec; }
+
+ /**
+ * @return true iff the provided document matches valid ranges on all
+ * of this FieldRangeVector's fields, which is the case iff this document
+ * would be returned while scanning the index corresponding to this
+ * FieldRangeVector. This function is used for $or clause deduping.
+ */
+ bool matches( const BSONObj &obj ) const;
+
+ /**
+ * @return first key of 'obj' that would be encountered by a forward
+ * index scan using this FieldRangeVector, BSONObj() if no such key.
+ */
+ BSONObj firstMatch( const BSONObj &obj ) const;
+
+ private:
+ int matchingLowElement( const BSONElement &e, int i, bool direction, bool &lowEquality ) const;
+ bool matchesElement( const BSONElement &e, int i, bool direction ) const;
+ bool matchesKey( const BSONObj &key ) const;
+ vector<FieldRange> _ranges;
+ const IndexSpec _indexSpec;
+ int _direction;
+ vector<BSONObj> _queries; // make sure mem owned
+ friend class FieldRangeVectorIterator;
+ };
+
+ /**
+ * Helper class for iterating through an ordered representation of keys
+ * to find those keys that match a specified FieldRangeVector.
+ */
+ class FieldRangeVectorIterator {
+ public:
+ FieldRangeVectorIterator( const FieldRangeVector &v ) : _v( v ), _i( _v._ranges.size(), -1 ), _cmp( _v._ranges.size(), 0 ), _inc( _v._ranges.size(), false ), _after() {
+ }
+ static BSONObj minObject() {
+ BSONObjBuilder b; b.appendMinKey( "" );
+ return b.obj();
+ }
+ static BSONObj maxObject() {
+ BSONObjBuilder b; b.appendMaxKey( "" );
+ return b.obj();
+ }
+ /**
+ * @return Suggested advance method, based on current key.
+ * -2 Iteration is complete, no need to advance.
+ * -1 Advance to the next key, without skipping.
+ * >=0 Skip parameter. If @return is r, skip to the key comprised
+ * of the first r elements of curr followed by the (r+1)th and
+ * remaining elements of cmp() (with inclusivity specified by
+ * the (r+1)th and remaining elements of inc()). If after() is
+ * true, skip past this key not to it.
+ */
+ int advance( const BSONObj &curr );
+ const vector<const BSONElement *> &cmp() const { return _cmp; }
+ const vector<bool> &inc() const { return _inc; }
+ bool after() const { return _after; }
+ void prepDive();
+ void setZero( int i ) { for( int j = i; j < (int)_i.size(); ++j ) _i[ j ] = 0; }
+ void setMinus( int i ) { for( int j = i; j < (int)_i.size(); ++j ) _i[ j ] = -1; }
+ bool ok() { return _i[ 0 ] < (int)_v._ranges[ 0 ].intervals().size(); }
+ BSONObj startKey();
+ // temp
+ BSONObj endKey();
+ private:
+ const FieldRangeVector &_v;
+ vector<int> _i;
+ vector<const BSONElement*> _cmp;
+ vector<bool> _inc;
+ bool _after;
+ };
+
+ /**
+ * As we iterate through $or clauses this class generates a FieldRangeSetPair
+ * for the current $or clause, in some cases by excluding ranges that were
+ * included in a previous clause.
+ */
+ class OrRangeGenerator {
+ public:
+ OrRangeGenerator( const char *ns, const BSONObj &query , bool optimize=true );
+
+ /**
+ * @return true iff we are done scanning $or clauses. if there's a
+ * useless or clause, we won't use or index ranges to help with scanning.
+ */
+ bool orFinished() const { return _orFound && _orSets.empty(); }
+ /** Iterates to the next $or clause by removing the current $or clause. */
+ void popOrClause( NamespaceDetails *nsd, int idxNo, const BSONObj &keyPattern );
+ void popOrClauseSingleKey();
+ /** @return FieldRangeSetPair for the current $or clause. */
+ FieldRangeSetPair *topFrsp() const;
+ /**
+ * @return original FieldRangeSetPair for the current $or clause. While the
+ * original bounds are looser, they are composed of fewer ranges and it
+ * is faster to do operations with them; when they can be used instead of
+ * more precise bounds, they should.
+ */
+ FieldRangeSetPair *topFrspOriginal() const;
+
+ string getSpecial() const { return _baseSet.getSpecial(); }
+
+ bool moreOrClauses() const { return !_orSets.empty(); }
+ private:
+ void assertMayPopOrClause();
+ void popOrClause( const FieldRangeSet *toDiff, NamespaceDetails *d = 0, int idxNo = -1, const BSONObj &keyPattern = BSONObj() );
+ FieldRangeSetPair _baseSet;
+ list<FieldRangeSetPair> _orSets;
+ list<FieldRangeSetPair> _originalOrSets;
+ // ensure memory is owned
+ list<FieldRangeSetPair> _oldOrSets;
+ bool _orFound;
+ friend struct QueryUtilIndexed;
+ };
+
+ /** returns a string that when used as a matcher, would match a super set of regex()
+ returns "" for complex regular expressions
+ used to optimize queries in some simple regex cases that start with '^'
+
+ if purePrefix != NULL, sets it to whether the regex can be converted to a range query
+ */
+ string simpleRegex(const char* regex, const char* flags, bool* purePrefix=NULL);
+
+ /** returns the upper bound of a query that matches prefix */
+ string simpleRegexEnd( string prefix );
+
+ long long applySkipLimit( long long num , const BSONObj& cmd );
+
+} // namespace mongo
+
+#include "queryutil-inl.h"
diff --git a/src/mongo/db/record.cpp b/src/mongo/db/record.cpp
new file mode 100644
index 00000000000..17987002efc
--- /dev/null
+++ b/src/mongo/db/record.cpp
@@ -0,0 +1,267 @@
+// record.cpp
+
+#include "pch.h"
+#include "pdfile.h"
+#include "../util/processinfo.h"
+#include "../util/net/listen.h"
+#include "pagefault.h"
+
+namespace mongo {
+
+ namespace ps {
+
+ enum State {
+ In , Out, Unk
+ };
+
+ enum Constants {
+ SliceSize = 65536 ,
+ MaxChain = 20 , // intentionally very low
+ NumSlices = 10 ,
+ RotateTimeSecs = 90
+ };
+
+ int hash( size_t region ) {
+ return
+ abs( ( ( 7 + (int)(region & 0xFFFF) )
+ * ( 11 + (int)( ( region >> 16 ) & 0xFFFF ) )
+#if defined(_WIN64) || defined(__amd64__)
+ * ( 13 + (int)( ( region >> 32 ) & 0xFFFF ) )
+ * ( 17 + (int)( ( region >> 48 ) & 0xFFFF ) )
+#endif
+ ) % SliceSize );
+ }
+
+
+ /**
+ * simple hash map for region -> status
+ * this constitures a single region of time
+ * it does chaining, but very short chains
+ */
+ class Slice {
+
+ struct Entry {
+ size_t region;
+ unsigned long long value;
+ };
+
+ public:
+
+ Slice() {
+ reset();
+ }
+
+ void reset() {
+ memset( _data , 0 , SliceSize * sizeof(Entry) );
+ }
+
+ State get( int regionHash , size_t region , short offset ) {
+ DEV assert( hash( region ) == regionHash );
+
+ Entry * e = _get( regionHash , region , false );
+ if ( ! e )
+ return Unk;
+
+ return ( e->value & ( ((unsigned long long)1) << offset ) ) ? In : Out;
+ }
+
+ /**
+ * @return true if added, false if full
+ */
+ bool in( int regionHash , size_t region , short offset ) {
+ DEV assert( hash( region ) == regionHash );
+
+ Entry * e = _get( regionHash , region , true );
+ if ( ! e )
+ return false;
+
+ e->value |= ((unsigned long long)1) << offset;
+ return true;
+ }
+
+ private:
+
+ Entry* _get( int start , size_t region , bool add ) {
+ for ( int i=0; i<MaxChain; i++ ) {
+
+ int bucket = ( start + i ) % SliceSize;
+
+ if ( _data[bucket].region == 0 ) {
+ if ( ! add )
+ return 0;
+
+ _data[bucket].region = region;
+ return &_data[bucket];
+ }
+
+ if ( _data[bucket].region == region ) {
+ return &_data[bucket];
+ }
+ }
+ return 0;
+ }
+
+ Entry _data[SliceSize];
+ };
+
+
+ /**
+ * this contains many slices of times
+ * the idea you put mem status in the current time slice
+ * and then after a certain period of time, it rolls off so we check again
+ */
+ class Rolling {
+
+ public:
+ Rolling()
+ : _lock( "ps::Rolling" ){
+ _curSlice = 0;
+ _lastRotate = Listener::getElapsedTimeMillis();
+ }
+
+
+ /**
+ * after this call, we assume the page is in ram
+ * @param doHalf if this is a known good access, want to put in first half
+ * @return whether we know the page is in ram
+ */
+ bool access( size_t region , short offset , bool doHalf ) {
+ int regionHash = hash(region);
+
+ SimpleMutex::scoped_lock lk( _lock );
+
+ static int rarely_count = 0;
+ if ( rarely_count++ % 2048 == 0 ) {
+ long long now = Listener::getElapsedTimeMillis();
+ RARELY if ( now == 0 ) {
+ tlog() << "warning Listener::getElapsedTimeMillis returning 0ms" << endl;
+ }
+
+ if ( now - _lastRotate > ( 1000 * RotateTimeSecs ) ) {
+ _rotate();
+ }
+ }
+
+ for ( int i=0; i<NumSlices / ( doHalf ? 2 : 1 ); i++ ) {
+ int pos = (_curSlice+i)%NumSlices;
+ State s = _slices[pos].get( regionHash , region , offset );
+
+ if ( s == In )
+ return true;
+
+ if ( s == Out ) {
+ _slices[pos].in( regionHash , region , offset );
+ return false;
+ }
+ }
+
+ // we weren't in any slice
+ // so add to cur
+ if ( ! _slices[_curSlice].in( regionHash , region , offset ) ) {
+ _rotate();
+ _slices[_curSlice].in( regionHash , region , offset );
+ }
+ return false;
+ }
+
+ private:
+
+ void _rotate() {
+ _curSlice = ( _curSlice + 1 ) % NumSlices;
+ _slices[_curSlice].reset();
+ _lastRotate = Listener::getElapsedTimeMillis();
+ }
+
+ int _curSlice;
+ long long _lastRotate;
+ Slice _slices[NumSlices];
+
+ SimpleMutex _lock;
+ } rolling;
+
+ }
+
+ bool Record::MemoryTrackingEnabled = true;
+
+ volatile int __record_touch_dummy = 1; // this is used to make sure the compiler doesn't get too smart on us
+ void Record::touch( bool entireRecrd ) {
+ if ( lengthWithHeaders > HeaderSize ) { // this also makes sure lengthWithHeaders is in memory
+ char * addr = data;
+ char * end = data + netLength();
+ for ( ; addr <= end ; addr += 2048 ) {
+ __record_touch_dummy += addr[0];
+
+ break; // TODO: remove this, pending SERVER-3711
+
+ // note if this is a touch of a deletedrecord, we don't want to touch more than the first part. we may simply
+ // be updated the linked list and a deletedrecord could be gigantic. similar circumstance just less extreme
+ // exists for any record if we are just updating its header, say on a remove(); some sort of hints might be
+ // useful.
+
+ if ( ! entireRecrd )
+ break;
+ }
+ }
+ }
+
+ const bool blockSupported = ProcessInfo::blockCheckSupported();
+
+ bool Record::likelyInPhysicalMemory() {
+ if ( ! MemoryTrackingEnabled )
+ return true;
+
+ const size_t page = (size_t)data >> 12;
+ const size_t region = page >> 6;
+ const size_t offset = page & 0x3f;
+
+ if ( ps::rolling.access( region , offset , false ) )
+ return true;
+
+ if ( ! blockSupported ) {
+ // this means we don't fallback to system call
+ // and assume things aren't in memory
+ // possible we yield too much - but better than not yielding through a fault
+ return false;
+ }
+
+ return ProcessInfo::blockInMemory( data );
+ }
+
+
+ Record* Record::accessed() {
+ const size_t page = (size_t)data >> 12;
+ const size_t region = page >> 6;
+ const size_t offset = page & 0x3f;
+ ps::rolling.access( region , offset , true );
+ return this;
+ }
+
+ Record* DiskLoc::rec() const {
+ Record *r = DataFileMgr::getRecord(*this);
+#if defined(_PAGEFAULTEXCEPTION)
+ DEV ONCE {
+ log() << "_DEBUG info _PAGEFAULTEXCEPTION is ON -- experimental at this time" << endl;
+ }
+ bool fault = !r->likelyInPhysicalMemory();
+ DEV if( rand() % 100 == 0 )
+ fault = true;
+ if( fault &&
+ !cc()._hasWrittenThisPass &&
+ cc()._pageFaultRetryableSection )
+ {
+ if( cc()._pageFaultRetryableSection->_laps > 100 ) {
+ log() << "info pagefaultexception _laps > 100" << endl;
+ }
+ else {
+ throw PageFaultException(r);
+ }
+ }
+#else
+ DEV ONCE {
+ log() << "_DEBUG info _PAGEFAULTEXCEPTION is off" << endl;
+ }
+#endif
+ return r;
+ }
+
+}
diff --git a/src/mongo/db/repl.cpp b/src/mongo/db/repl.cpp
new file mode 100644
index 00000000000..25ecb6b455f
--- /dev/null
+++ b/src/mongo/db/repl.cpp
@@ -0,0 +1,1516 @@
+// repl.cpp
+
+/* TODO
+ PAIRING
+ _ on a syncexception, don't allow going back to master state?
+*/
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* Collections we use:
+
+ local.sources - indicates what sources we pull from as a "slave", and the last update of each
+ local.oplog.$main - our op log as "master"
+ local.dbinfo.<dbname> - no longer used???
+ local.pair.startup - [deprecated] can contain a special value indicating for a pair that we have the master copy.
+ used when replacing other half of the pair which has permanently failed.
+ local.pair.sync - [deprecated] { initialsynccomplete: 1 }
+*/
+
+#include "pch.h"
+#include "jsobj.h"
+#include "../util/goodies.h"
+#include "repl.h"
+#include "../util/net/message.h"
+#include "../util/background.h"
+#include "../client/dbclient.h"
+#include "../client/connpool.h"
+#include "pdfile.h"
+#include "ops/query.h"
+#include "db.h"
+#include "commands.h"
+#include "security.h"
+#include "cmdline.h"
+#include "repl_block.h"
+#include "repl/rs.h"
+#include "replutil.h"
+#include "repl/connections.h"
+#include "ops/update.h"
+
+namespace mongo {
+
+ // our config from command line etc.
+ ReplSettings replSettings;
+
+ /* if 1 sync() is running */
+ volatile int syncing = 0;
+ static volatile int relinquishSyncingSome = 0;
+
+ /* "dead" means something really bad happened like replication falling completely out of sync.
+ when non-null, we are dead and the string is informational
+ */
+ const char *replAllDead = 0;
+
+ time_t lastForcedResync = 0;
+
+} // namespace mongo
+
+namespace mongo {
+
+ /* output by the web console */
+ const char *replInfo = "";
+ struct ReplInfo {
+ ReplInfo(const char *msg) {
+ replInfo = msg;
+ }
+ ~ReplInfo() {
+ replInfo = "?";
+ }
+ };
+
+ /* operator requested resynchronization of replication (on the slave). { resync : 1 } */
+ class CmdResync : public Command {
+ public:
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ virtual bool logTheOp() { return false; }
+ virtual LockType locktype() const { return WRITE; }
+ void help(stringstream&h) const { h << "resync (from scratch) an out of date replica slave.\nhttp://www.mongodb.org/display/DOCS/Master+Slave"; }
+ CmdResync() : Command("resync") { }
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if( cmdLine.usingReplSets() ) {
+ errmsg = "resync command not currently supported with replica sets. See RS102 info in the mongodb documentations";
+ result.append("info", "http://www.mongodb.org/display/DOCS/Resyncing+a+Very+Stale+Replica+Set+Member");
+ return false;
+ }
+
+ if ( cmdObj.getBoolField( "force" ) ) {
+ if ( !waitForSyncToFinish( errmsg ) )
+ return false;
+ replAllDead = "resync forced";
+ }
+ if ( !replAllDead ) {
+ errmsg = "not dead, no need to resync";
+ return false;
+ }
+ if ( !waitForSyncToFinish( errmsg ) )
+ return false;
+
+ ReplSource::forceResyncDead( "client" );
+ result.append( "info", "triggered resync for all sources" );
+ return true;
+ }
+ bool waitForSyncToFinish( string &errmsg ) const {
+ // Wait for slave thread to finish syncing, so sources will be be
+ // reloaded with new saved state on next pass.
+ Timer t;
+ while ( 1 ) {
+ if ( syncing == 0 || t.millis() > 30000 )
+ break;
+ {
+ dbtemprelease t;
+ relinquishSyncingSome = 1;
+ sleepmillis(1);
+ }
+ }
+ if ( syncing ) {
+ errmsg = "timeout waiting for sync() to finish";
+ return false;
+ }
+ return true;
+ }
+ } cmdResync;
+
+ bool anyReplEnabled() {
+ return replSettings.slave || replSettings.master || theReplSet;
+ }
+
+ bool replAuthenticate(DBClientBase *conn);
+
+ void appendReplicationInfo( BSONObjBuilder& result , bool authed , int level ) {
+
+ if ( replSet ) {
+ if( theReplSet == 0 ) {
+ result.append("ismaster", false);
+ result.append("secondary", false);
+ result.append("info", ReplSet::startupStatusMsg.get());
+ result.append( "isreplicaset" , true );
+ return;
+ }
+
+ theReplSet->fillIsMaster(result);
+ return;
+ }
+
+ if ( replAllDead ) {
+ result.append("ismaster", 0);
+ string s = string("dead: ") + replAllDead;
+ result.append("info", s);
+ }
+ else {
+ result.appendBool("ismaster", _isMaster() );
+ }
+
+ if ( level && replSet ) {
+ result.append( "info" , "is replica set" );
+ }
+ else if ( level ) {
+ BSONObjBuilder sources( result.subarrayStart( "sources" ) );
+
+ readlock lk( "local.sources" );
+ Client::Context ctx( "local.sources", dbpath, authed );
+ shared_ptr<Cursor> c = findTableScan("local.sources", BSONObj());
+ int n = 0;
+ while ( c->ok() ) {
+ BSONObj s = c->current();
+
+ BSONObjBuilder bb;
+ bb.append( s["host"] );
+ string sourcename = s["source"].valuestr();
+ if ( sourcename != "main" )
+ bb.append( s["source"] );
+
+ {
+ BSONElement e = s["syncedTo"];
+ BSONObjBuilder t( bb.subobjStart( "syncedTo" ) );
+ t.appendDate( "time" , e.timestampTime() );
+ t.append( "inc" , e.timestampInc() );
+ t.done();
+ }
+
+ if ( level > 1 ) {
+ dbtemprelease unlock;
+ // note: there is no so-style timeout on this connection; perhaps we should have one.
+ ScopedDbConnection conn( s["host"].valuestr() );
+ DBClientConnection *cliConn = dynamic_cast< DBClientConnection* >( &conn.conn() );
+ if ( cliConn && replAuthenticate( cliConn ) ) {
+ BSONObj first = conn->findOne( (string)"local.oplog.$" + sourcename , Query().sort( BSON( "$natural" << 1 ) ) );
+ BSONObj last = conn->findOne( (string)"local.oplog.$" + sourcename , Query().sort( BSON( "$natural" << -1 ) ) );
+ bb.appendDate( "masterFirst" , first["ts"].timestampTime() );
+ bb.appendDate( "masterLast" , last["ts"].timestampTime() );
+ double lag = (double) (last["ts"].timestampTime() - s["syncedTo"].timestampTime());
+ bb.append( "lagSeconds" , lag / 1000 );
+ }
+ conn.done();
+ }
+
+ sources.append( BSONObjBuilder::numStr( n++ ) , bb.obj() );
+ c->advance();
+ }
+
+ sources.done();
+ }
+ }
+
+ class CmdIsMaster : public Command {
+ public:
+ virtual bool requiresAuth() { return false; }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void help( stringstream &help ) const {
+ help << "Check if this server is primary for a replica pair/set; also if it is --master or --slave in simple master/slave setups.\n";
+ help << "{ isMaster : 1 }";
+ }
+ virtual LockType locktype() const { return NONE; }
+ CmdIsMaster() : Command("isMaster", true, "ismaster") { }
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ /* currently request to arbiter is (somewhat arbitrarily) an ismaster request that is not
+ authenticated.
+ we allow unauthenticated ismaster but we aren't as verbose informationally if
+ one is not authenticated for admin db to be safe.
+ */
+ bool authed = cc().getAuthenticationInfo()->isAuthorizedReads("admin");
+ appendReplicationInfo( result , authed );
+
+ result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize);
+ return true;
+ }
+ } cmdismaster;
+
+ ReplSource::ReplSource() {
+ nClonedThisPass = 0;
+ }
+
+ ReplSource::ReplSource(BSONObj o) : nClonedThisPass(0) {
+ only = o.getStringField("only");
+ hostName = o.getStringField("host");
+ _sourceName = o.getStringField("source");
+ uassert( 10118 , "'host' field not set in sources collection object", !hostName.empty() );
+ uassert( 10119 , "only source='main' allowed for now with replication", sourceName() == "main" );
+ BSONElement e = o.getField("syncedTo");
+ if ( !e.eoo() ) {
+ uassert( 10120 , "bad sources 'syncedTo' field value", e.type() == Date || e.type() == Timestamp );
+ OpTime tmp( e.date() );
+ syncedTo = tmp;
+ }
+
+ BSONObj dbsObj = o.getObjectField("dbsNextPass");
+ if ( !dbsObj.isEmpty() ) {
+ BSONObjIterator i(dbsObj);
+ while ( 1 ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ addDbNextPass.insert( e.fieldName() );
+ }
+ }
+
+ dbsObj = o.getObjectField("incompleteCloneDbs");
+ if ( !dbsObj.isEmpty() ) {
+ BSONObjIterator i(dbsObj);
+ while ( 1 ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ incompleteCloneDbs.insert( e.fieldName() );
+ }
+ }
+ }
+
+ /* Turn our C++ Source object into a BSONObj */
+ BSONObj ReplSource::jsobj() {
+ BSONObjBuilder b;
+ b.append("host", hostName);
+ b.append("source", sourceName());
+ if ( !only.empty() )
+ b.append("only", only);
+ if ( !syncedTo.isNull() )
+ b.appendTimestamp("syncedTo", syncedTo.asDate());
+
+ BSONObjBuilder dbsNextPassBuilder;
+ int n = 0;
+ for ( set<string>::iterator i = addDbNextPass.begin(); i != addDbNextPass.end(); i++ ) {
+ n++;
+ dbsNextPassBuilder.appendBool(*i, 1);
+ }
+ if ( n )
+ b.append("dbsNextPass", dbsNextPassBuilder.done());
+
+ BSONObjBuilder incompleteCloneDbsBuilder;
+ n = 0;
+ for ( set<string>::iterator i = incompleteCloneDbs.begin(); i != incompleteCloneDbs.end(); i++ ) {
+ n++;
+ incompleteCloneDbsBuilder.appendBool(*i, 1);
+ }
+ if ( n )
+ b.append("incompleteCloneDbs", incompleteCloneDbsBuilder.done());
+
+ return b.obj();
+ }
+
+ void ReplSource::save() {
+ BSONObjBuilder b;
+ assert( !hostName.empty() );
+ b.append("host", hostName);
+ // todo: finish allowing multiple source configs.
+ // this line doesn't work right when source is null, if that is allowed as it is now:
+ //b.append("source", _sourceName);
+ BSONObj pattern = b.done();
+
+ BSONObj o = jsobj();
+ log( 1 ) << "Saving repl source: " << o << endl;
+
+ {
+ OpDebug debug;
+ Client::Context ctx("local.sources");
+ UpdateResult res = updateObjects("local.sources", o, pattern, true/*upsert for pair feature*/, false,false,debug);
+ assert( ! res.mod );
+ assert( res.num == 1 );
+ }
+ }
+
+ static void addSourceToList(ReplSource::SourceVector &v, ReplSource& s, ReplSource::SourceVector &old) {
+ if ( !s.syncedTo.isNull() ) { // Don't reuse old ReplSource if there was a forced resync.
+ for ( ReplSource::SourceVector::iterator i = old.begin(); i != old.end(); ) {
+ if ( s == **i ) {
+ v.push_back(*i);
+ old.erase(i);
+ return;
+ }
+ i++;
+ }
+ }
+
+ v.push_back( shared_ptr< ReplSource >( new ReplSource( s ) ) );
+ }
+
+ /* we reuse our existing objects so that we can keep our existing connection
+ and cursor in effect.
+ */
+ void ReplSource::loadAll(SourceVector &v) {
+ Client::Context ctx("local.sources");
+ SourceVector old = v;
+ v.clear();
+
+ if ( !cmdLine.source.empty() ) {
+ // --source <host> specified.
+ // check that no items are in sources other than that
+ // add if missing
+ shared_ptr<Cursor> c = findTableScan("local.sources", BSONObj());
+ int n = 0;
+ while ( c->ok() ) {
+ n++;
+ ReplSource tmp(c->current());
+ if ( tmp.hostName != cmdLine.source ) {
+ log() << "repl: --source " << cmdLine.source << " != " << tmp.hostName << " from local.sources collection" << endl;
+ log() << "repl: for instructions on changing this slave's source, see:" << endl;
+ log() << "http://dochub.mongodb.org/core/masterslave" << endl;
+ log() << "repl: terminating mongod after 30 seconds" << endl;
+ sleepsecs(30);
+ dbexit( EXIT_REPLICATION_ERROR );
+ }
+ if ( tmp.only != cmdLine.only ) {
+ log() << "--only " << cmdLine.only << " != " << tmp.only << " from local.sources collection" << endl;
+ log() << "terminating after 30 seconds" << endl;
+ sleepsecs(30);
+ dbexit( EXIT_REPLICATION_ERROR );
+ }
+ c->advance();
+ }
+ uassert( 10002 , "local.sources collection corrupt?", n<2 );
+ if ( n == 0 ) {
+ // source missing. add.
+ ReplSource s;
+ s.hostName = cmdLine.source;
+ s.only = cmdLine.only;
+ s.save();
+ }
+ }
+ else {
+ try {
+ massert( 10384 , "--only requires use of --source", cmdLine.only.empty());
+ }
+ catch ( ... ) {
+ dbexit( EXIT_BADOPTIONS );
+ }
+ }
+
+ shared_ptr<Cursor> c = findTableScan("local.sources", BSONObj());
+ while ( c->ok() ) {
+ ReplSource tmp(c->current());
+ if ( tmp.syncedTo.isNull() ) {
+ DBDirectClient c;
+ if ( c.exists( "local.oplog.$main" ) ) {
+ BSONObj op = c.findOne( "local.oplog.$main", QUERY( "op" << NE << "n" ).sort( BSON( "$natural" << -1 ) ) );
+ if ( !op.isEmpty() ) {
+ tmp.syncedTo = op[ "ts" ].date();
+ }
+ }
+ }
+ addSourceToList(v, tmp, old);
+ c->advance();
+ }
+ }
+
+ BSONObj opTimeQuery = fromjson("{\"getoptime\":1}");
+
+ bool ReplSource::throttledForceResyncDead( const char *requester ) {
+ if ( time( 0 ) - lastForcedResync > 600 ) {
+ forceResyncDead( requester );
+ lastForcedResync = time( 0 );
+ return true;
+ }
+ return false;
+ }
+
+ void ReplSource::forceResyncDead( const char *requester ) {
+ if ( !replAllDead )
+ return;
+ SourceVector sources;
+ ReplSource::loadAll(sources);
+ for( SourceVector::iterator i = sources.begin(); i != sources.end(); ++i ) {
+ log() << requester << " forcing resync from " << (*i)->hostName << endl;
+ (*i)->forceResync( requester );
+ }
+ replAllDead = 0;
+ }
+
+ void ReplSource::forceResync( const char *requester ) {
+ BSONObj info;
+ {
+ dbtemprelease t;
+ if (!oplogReader.connect(hostName)) {
+ msgassertedNoTrace( 14051 , "unable to connect to resync");
+ }
+ /* todo use getDatabaseNames() method here */
+ bool ok = oplogReader.conn()->runCommand( "admin", BSON( "listDatabases" << 1 ), info );
+ massert( 10385 , "Unable to get database list", ok );
+ }
+ BSONObjIterator i( info.getField( "databases" ).embeddedObject() );
+ while( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ string name = e.embeddedObject().getField( "name" ).valuestr();
+ if ( !e.embeddedObject().getBoolField( "empty" ) ) {
+ if ( name != "local" ) {
+ if ( only.empty() || only == name ) {
+ resyncDrop( name.c_str(), requester );
+ }
+ }
+ }
+ }
+ syncedTo = OpTime();
+ addDbNextPass.clear();
+ save();
+ }
+
+ string ReplSource::resyncDrop( const char *db, const char *requester ) {
+ log() << "resync: dropping database " << db << endl;
+ Client::Context ctx(db);
+ dropDatabase(db);
+ return db;
+ }
+
+ /* grab initial copy of a database from the master */
+ void ReplSource::resync(string db) {
+ string dummyNs = resyncDrop( db.c_str(), "internal" );
+ Client::Context ctx( dummyNs );
+ {
+ log() << "resync: cloning database " << db << " to get an initial copy" << endl;
+ ReplInfo r("resync: cloning a database");
+ string errmsg;
+ int errCode = 0;
+ bool ok = cloneFrom(hostName.c_str(), errmsg, cc().database()->name, false, /*slaveOk*/ true, /*replauth*/ true, /*snapshot*/false, /*mayYield*/true, /*mayBeInterrupted*/false, &errCode);
+ if ( !ok ) {
+ if ( errCode == DatabaseDifferCaseCode ) {
+ resyncDrop( db.c_str(), "internal" );
+ log() << "resync: database " << db << " not valid on the master due to a name conflict, dropping." << endl;
+ return;
+ }
+ else {
+ problem() << "resync of " << db << " from " << hostName << " failed " << errmsg << endl;
+ throw SyncException();
+ }
+ }
+ }
+
+ log() << "resync: done with initial clone for db: " << db << endl;
+
+ return;
+ }
+
+ DatabaseIgnorer ___databaseIgnorer;
+
+ void DatabaseIgnorer::doIgnoreUntilAfter( const string &db, const OpTime &futureOplogTime ) {
+ if ( futureOplogTime > _ignores[ db ] ) {
+ _ignores[ db ] = futureOplogTime;
+ }
+ }
+
+ bool DatabaseIgnorer::ignoreAt( const string &db, const OpTime &currentOplogTime ) {
+ if ( _ignores[ db ].isNull() ) {
+ return false;
+ }
+ if ( _ignores[ db ] >= currentOplogTime ) {
+ return true;
+ } else {
+ // The ignore state has expired, so clear it.
+ _ignores.erase( db );
+ return false;
+ }
+ }
+
+ bool ReplSource::handleDuplicateDbName( const BSONObj &op, const char *ns, const char *db ) {
+ if ( dbHolder()._isLoaded( ns, dbpath ) ) {
+ // Database is already present.
+ return true;
+ }
+ BSONElement ts = op.getField( "ts" );
+ if ( ( ts.type() == Date || ts.type() == Timestamp ) && ___databaseIgnorer.ignoreAt( db, ts.date() ) ) {
+ // Database is ignored due to a previous indication that it is
+ // missing from master after optime "ts".
+ return false;
+ }
+ if ( Database::duplicateUncasedName( false, db, dbpath ).empty() ) {
+ // No duplicate database names are present.
+ return true;
+ }
+
+ OpTime lastTime;
+ bool dbOk = false;
+ {
+ dbtemprelease release;
+
+ // We always log an operation after executing it (never before), so
+ // a database list will always be valid as of an oplog entry generated
+ // before it was retrieved.
+
+ BSONObj last = oplogReader.findOne( this->ns().c_str(), Query().sort( BSON( "$natural" << -1 ) ) );
+ if ( !last.isEmpty() ) {
+ BSONElement ts = last.getField( "ts" );
+ massert( 14032, "Invalid 'ts' in remote log", ts.type() == Date || ts.type() == Timestamp );
+ lastTime = OpTime( ts.date() );
+ }
+
+ BSONObj info;
+ bool ok = oplogReader.conn()->runCommand( "admin", BSON( "listDatabases" << 1 ), info );
+ massert( 14033, "Unable to get database list", ok );
+ BSONObjIterator i( info.getField( "databases" ).embeddedObject() );
+ while( i.more() ) {
+ BSONElement e = i.next();
+
+ const char * name = e.embeddedObject().getField( "name" ).valuestr();
+ if ( strcasecmp( name, db ) != 0 )
+ continue;
+
+ if ( strcmp( name, db ) == 0 ) {
+ // The db exists on master, still need to check that no conflicts exist there.
+ dbOk = true;
+ continue;
+ }
+
+ // The master has a db name that conflicts with the requested name.
+ dbOk = false;
+ break;
+ }
+ }
+
+ if ( !dbOk ) {
+ ___databaseIgnorer.doIgnoreUntilAfter( db, lastTime );
+ incompleteCloneDbs.erase(db);
+ addDbNextPass.erase(db);
+ return false;
+ }
+
+ // Check for duplicates again, since we released the lock above.
+ set< string > duplicates;
+ Database::duplicateUncasedName( false, db, dbpath, &duplicates );
+
+ // The database is present on the master and no conflicting databases
+ // are present on the master. Drop any local conflicts.
+ for( set< string >::const_iterator i = duplicates.begin(); i != duplicates.end(); ++i ) {
+ ___databaseIgnorer.doIgnoreUntilAfter( *i, lastTime );
+ incompleteCloneDbs.erase(*i);
+ addDbNextPass.erase(*i);
+ Client::Context ctx(*i);
+ dropDatabase(*i);
+ }
+
+ massert( 14034, "Duplicate database names present after attempting to delete duplicates",
+ Database::duplicateUncasedName( false, db, dbpath ).empty() );
+ return true;
+ }
+
+ void ReplSource::applyOperation(const BSONObj& op) {
+ try {
+ bool failedUpdate = applyOperation_inlock( op );
+ if (failedUpdate) {
+ Sync sync(hostName);
+ if (sync.shouldRetry(op)) {
+ uassert(15914, "Failure retrying initial sync update", !applyOperation_inlock(op));
+ }
+ }
+ }
+ catch ( UserException& e ) {
+ log() << "sync: caught user assertion " << e << " while applying op: " << op << endl;;
+ }
+ catch ( DBException& e ) {
+ log() << "sync: caught db exception " << e << " while applying op: " << op << endl;;
+ }
+
+ }
+
+ /* local.$oplog.main is of the form:
+ { ts: ..., op: <optype>, ns: ..., o: <obj> , o2: <extraobj>, b: <boolflag> }
+ ...
+ see logOp() comments.
+
+ @param alreadyLocked caller already put us in write lock if true
+ */
+ void ReplSource::sync_pullOpLog_applyOperation(BSONObj& op, bool alreadyLocked) {
+ if( logLevel >= 6 ) // op.tostring is expensive so doing this check explicitly
+ log(6) << "processing op: " << op << endl;
+
+ if( op.getStringField("op")[0] == 'n' )
+ return;
+
+ char clientName[MaxDatabaseNameLen];
+ const char *ns = op.getStringField("ns");
+ nsToDatabase(ns, clientName);
+
+ if ( *ns == '.' ) {
+ problem() << "skipping bad op in oplog: " << op.toString() << endl;
+ return;
+ }
+ else if ( *ns == 0 ) {
+ /*if( op.getStringField("op")[0] != 'n' )*/ {
+ problem() << "halting replication, bad op in oplog:\n " << op.toString() << endl;
+ replAllDead = "bad object in oplog";
+ throw SyncException();
+ }
+ //ns = "local.system.x";
+ //nsToDatabase(ns, clientName);
+ }
+
+ if ( !only.empty() && only != clientName )
+ return;
+
+ if( cmdLine.pretouch && !alreadyLocked/*doesn't make sense if in write lock already*/ ) {
+ if( cmdLine.pretouch > 1 ) {
+ /* note: this is bad - should be put in ReplSource. but this is first test... */
+ static int countdown;
+ assert( countdown >= 0 );
+ if( countdown > 0 ) {
+ countdown--; // was pretouched on a prev pass
+ }
+ else {
+ const int m = 4;
+ if( tp.get() == 0 ) {
+ int nthr = min(8, cmdLine.pretouch);
+ nthr = max(nthr, 1);
+ tp.reset( new ThreadPool(nthr) );
+ }
+ vector<BSONObj> v;
+ oplogReader.peek(v, cmdLine.pretouch);
+ unsigned a = 0;
+ while( 1 ) {
+ if( a >= v.size() ) break;
+ unsigned b = a + m - 1; // v[a..b]
+ if( b >= v.size() ) b = v.size() - 1;
+ tp->schedule(pretouchN, v, a, b);
+ DEV cout << "pretouch task: " << a << ".." << b << endl;
+ a += m;
+ }
+ // we do one too...
+ pretouchOperation(op);
+ tp->join();
+ countdown = v.size();
+ }
+ }
+ else {
+ pretouchOperation(op);
+ }
+ }
+
+ scoped_ptr<writelock> lk( alreadyLocked ? 0 : new writelock() );
+
+ if ( replAllDead ) {
+ // hmmm why is this check here and not at top of this function? does it get set between top and here?
+ log() << "replAllDead, throwing SyncException: " << replAllDead << endl;
+ throw SyncException();
+ }
+
+ if ( !handleDuplicateDbName( op, ns, clientName ) ) {
+ return;
+ }
+
+ Client::Context ctx( ns );
+ ctx.getClient()->curop()->reset();
+
+ bool empty = ctx.db()->isEmpty();
+ bool incompleteClone = incompleteCloneDbs.count( clientName ) != 0;
+
+ if( logLevel >= 6 )
+ log(6) << "ns: " << ns << ", justCreated: " << ctx.justCreated() << ", empty: " << empty << ", incompleteClone: " << incompleteClone << endl;
+
+ // always apply admin command command
+ // this is a bit hacky -- the semantics of replication/commands aren't well specified
+ if ( strcmp( clientName, "admin" ) == 0 && *op.getStringField( "op" ) == 'c' ) {
+ applyOperation( op );
+ return;
+ }
+
+ if ( ctx.justCreated() || empty || incompleteClone ) {
+ // we must add to incomplete list now that setClient has been called
+ incompleteCloneDbs.insert( clientName );
+ if ( nClonedThisPass ) {
+ /* we only clone one database per pass, even if a lot need done. This helps us
+ avoid overflowing the master's transaction log by doing too much work before going
+ back to read more transactions. (Imagine a scenario of slave startup where we try to
+ clone 100 databases in one pass.)
+ */
+ addDbNextPass.insert( clientName );
+ }
+ else {
+ if ( incompleteClone ) {
+ log() << "An earlier initial clone of '" << clientName << "' did not complete, now resyncing." << endl;
+ }
+ save();
+ Client::Context ctx(ns);
+ nClonedThisPass++;
+ resync(ctx.db()->name);
+ addDbNextPass.erase(clientName);
+ incompleteCloneDbs.erase( clientName );
+ }
+ save();
+ }
+ else {
+ applyOperation( op );
+ addDbNextPass.erase( clientName );
+ }
+ }
+
+ void ReplSource::syncToTailOfRemoteLog() {
+ string _ns = ns();
+ BSONObjBuilder b;
+ if ( !only.empty() ) {
+ b.appendRegex("ns", string("^") + only);
+ }
+ BSONObj last = oplogReader.findOne( _ns.c_str(), Query( b.done() ).sort( BSON( "$natural" << -1 ) ) );
+ if ( !last.isEmpty() ) {
+ BSONElement ts = last.getField( "ts" );
+ massert( 10386 , "non Date ts found: " + last.toString(), ts.type() == Date || ts.type() == Timestamp );
+ syncedTo = OpTime( ts.date() );
+ }
+ }
+
+ extern unsigned replApplyBatchSize;
+
+ /* slave: pull some data from the master's oplog
+ note: not yet in db mutex at this point.
+ @return -1 error
+ 0 ok, don't sleep
+ 1 ok, sleep
+ */
+ int ReplSource::sync_pullOpLog(int& nApplied) {
+ int okResultCode = 1;
+ string ns = string("local.oplog.$") + sourceName();
+ log(2) << "repl: sync_pullOpLog " << ns << " syncedTo:" << syncedTo.toStringLong() << '\n';
+
+ bool tailing = true;
+ oplogReader.tailCheck();
+
+ bool initial = syncedTo.isNull();
+
+ if ( !oplogReader.haveCursor() || initial ) {
+ if ( initial ) {
+ // Important to grab last oplog timestamp before listing databases.
+ syncToTailOfRemoteLog();
+ BSONObj info;
+ bool ok = oplogReader.conn()->runCommand( "admin", BSON( "listDatabases" << 1 ), info );
+ massert( 10389 , "Unable to get database list", ok );
+ BSONObjIterator i( info.getField( "databases" ).embeddedObject() );
+ while( i.moreWithEOO() ) {
+ BSONElement e = i.next();
+ if ( e.eoo() )
+ break;
+ string name = e.embeddedObject().getField( "name" ).valuestr();
+ if ( !e.embeddedObject().getBoolField( "empty" ) ) {
+ if ( name != "local" ) {
+ if ( only.empty() || only == name ) {
+ log( 2 ) << "adding to 'addDbNextPass': " << name << endl;
+ addDbNextPass.insert( name );
+ }
+ }
+ }
+ }
+ dblock lk;
+ save();
+ }
+
+ BSONObjBuilder q;
+ q.appendDate("$gte", syncedTo.asDate());
+ BSONObjBuilder query;
+ query.append("ts", q.done());
+ if ( !only.empty() ) {
+ // note we may here skip a LOT of data table scanning, a lot of work for the master.
+ query.appendRegex("ns", string("^") + only); // maybe append "\\." here?
+ }
+ BSONObj queryObj = query.done();
+ // e.g. queryObj = { ts: { $gte: syncedTo } }
+
+ oplogReader.tailingQuery(ns.c_str(), queryObj);
+ tailing = false;
+ }
+ else {
+ log(2) << "repl: tailing=true\n";
+ }
+
+ if( !oplogReader.haveCursor() ) {
+ problem() << "repl: dbclient::query returns null (conn closed?)" << endl;
+ oplogReader.resetConnection();
+ return -1;
+ }
+
+ // show any deferred database creates from a previous pass
+ {
+ set<string>::iterator i = addDbNextPass.begin();
+ if ( i != addDbNextPass.end() ) {
+ BSONObjBuilder b;
+ b.append("ns", *i + '.');
+ b.append("op", "db");
+ BSONObj op = b.done();
+ sync_pullOpLog_applyOperation(op, false);
+ }
+ }
+
+ if ( !oplogReader.more() ) {
+ if ( tailing ) {
+ log(2) << "repl: tailing & no new activity\n";
+ if( oplogReader.awaitCapable() )
+ okResultCode = 0; // don't sleep
+
+ }
+ else {
+ log() << "repl: " << ns << " oplog is empty\n";
+ }
+ {
+ dblock lk;
+ save();
+ }
+ return okResultCode;
+ }
+
+ OpTime nextOpTime;
+ {
+ BSONObj op = oplogReader.next();
+ BSONElement ts = op.getField("ts");
+ if ( ts.type() != Date && ts.type() != Timestamp ) {
+ string err = op.getStringField("$err");
+ if ( !err.empty() ) {
+ // 13051 is "tailable cursor requested on non capped collection"
+ if (op.getIntField("code") == 13051) {
+ problem() << "trying to slave off of a non-master" << '\n';
+ massert( 13344 , "trying to slave off of a non-master", false );
+ }
+ else {
+ problem() << "repl: $err reading remote oplog: " + err << '\n';
+ massert( 10390 , "got $err reading remote oplog", false );
+ }
+ }
+ else {
+ problem() << "repl: bad object read from remote oplog: " << op.toString() << '\n';
+ massert( 10391 , "repl: bad object read from remote oplog", false);
+ }
+ }
+
+ nextOpTime = OpTime( ts.date() );
+ log(2) << "repl: first op time received: " << nextOpTime.toString() << '\n';
+ if ( initial ) {
+ log(1) << "repl: initial run\n";
+ }
+ if( tailing ) {
+ if( !( syncedTo < nextOpTime ) ) {
+ log() << "repl ASSERTION failed : syncedTo < nextOpTime" << endl;
+ log() << "repl syncTo: " << syncedTo.toStringLong() << endl;
+ log() << "repl nextOpTime: " << nextOpTime.toStringLong() << endl;
+ assert(false);
+ }
+ oplogReader.putBack( op ); // op will be processed in the loop below
+ nextOpTime = OpTime(); // will reread the op below
+ }
+ else if ( nextOpTime != syncedTo ) { // didn't get what we queried for - error
+ Nullstream& l = log();
+ l << "repl: nextOpTime " << nextOpTime.toStringLong() << ' ';
+ if ( nextOpTime < syncedTo )
+ l << "<??";
+ else
+ l << ">";
+
+ l << " syncedTo " << syncedTo.toStringLong() << '\n';
+ log() << "repl: time diff: " << (nextOpTime.getSecs() - syncedTo.getSecs()) << "sec\n";
+ log() << "repl: tailing: " << tailing << '\n';
+ log() << "repl: data too stale, halting replication" << endl;
+ replInfo = replAllDead = "data too stale halted replication";
+ assert( syncedTo < nextOpTime );
+ throw SyncException();
+ }
+ else {
+ /* t == syncedTo, so the first op was applied previously or it is the first op of initial query and need not be applied. */
+ }
+ }
+
+ // apply operations
+ {
+ int n = 0;
+ time_t saveLast = time(0);
+ while ( 1 ) {
+
+ bool moreInitialSyncsPending = !addDbNextPass.empty() && n; // we need "&& n" to assure we actually process at least one op to get a sync point recorded in the first place.
+
+ if ( moreInitialSyncsPending || !oplogReader.more() ) {
+ dblock lk;
+
+ // NOTE aaron 2011-03-29 This block may be unnecessary, but I'm leaving it in place to avoid changing timing behavior.
+ {
+ dbtemprelease t;
+ if ( !moreInitialSyncsPending && oplogReader.more() ) {
+ continue;
+ }
+ // otherwise, break out of loop so we can set to completed or clone more dbs
+ }
+
+ if( oplogReader.awaitCapable() && tailing )
+ okResultCode = 0; // don't sleep
+ syncedTo = nextOpTime;
+ save(); // note how far we are synced up to now
+ log() << "repl: applied " << n << " operations" << endl;
+ nApplied = n;
+ log() << "repl: end sync_pullOpLog syncedTo: " << syncedTo.toStringLong() << endl;
+ break;
+ }
+ else {
+ }
+
+ OCCASIONALLY if( n > 0 && ( n > 100000 || time(0) - saveLast > 60 ) ) {
+ // periodically note our progress, in case we are doing a lot of work and crash
+ dblock lk;
+ syncedTo = nextOpTime;
+ // can't update local log ts since there are pending operations from our peer
+ save();
+ log() << "repl: checkpoint applied " << n << " operations" << endl;
+ log() << "repl: syncedTo: " << syncedTo.toStringLong() << endl;
+ saveLast = time(0);
+ n = 0;
+ }
+
+ BSONObj op = oplogReader.next();
+
+ unsigned b = replApplyBatchSize;
+ bool justOne = b == 1;
+ scoped_ptr<writelock> lk( justOne ? 0 : new writelock() );
+ while( 1 ) {
+
+ BSONElement ts = op.getField("ts");
+ if( !( ts.type() == Date || ts.type() == Timestamp ) ) {
+ log() << "sync error: problem querying remote oplog record" << endl;
+ log() << "op: " << op.toString() << endl;
+ log() << "halting replication" << endl;
+ replInfo = replAllDead = "sync error: no ts found querying remote oplog record";
+ throw SyncException();
+ }
+ OpTime last = nextOpTime;
+ nextOpTime = OpTime( ts.date() );
+ if ( !( last < nextOpTime ) ) {
+ log() << "sync error: last applied optime at slave >= nextOpTime from master" << endl;
+ log() << " last: " << last.toStringLong() << endl;
+ log() << " nextOpTime: " << nextOpTime.toStringLong() << endl;
+ log() << " halting replication" << endl;
+ replInfo = replAllDead = "sync error last >= nextOpTime";
+ uassert( 10123 , "replication error last applied optime at slave >= nextOpTime from master", false);
+ }
+ if ( replSettings.slavedelay && ( unsigned( time( 0 ) ) < nextOpTime.getSecs() + replSettings.slavedelay ) ) {
+ assert( justOne );
+ oplogReader.putBack( op );
+ _sleepAdviceTime = nextOpTime.getSecs() + replSettings.slavedelay + 1;
+ dblock lk;
+ if ( n > 0 ) {
+ syncedTo = last;
+ save();
+ }
+ log() << "repl: applied " << n << " operations" << endl;
+ log() << "repl: syncedTo: " << syncedTo.toStringLong() << endl;
+ log() << "waiting until: " << _sleepAdviceTime << " to continue" << endl;
+ return okResultCode;
+ }
+
+ sync_pullOpLog_applyOperation(op, !justOne);
+ n++;
+
+ if( --b == 0 )
+ break;
+ // if to here, we are doing mulpile applications in a singel write lock acquisition
+ if( !oplogReader.moreInCurrentBatch() ) {
+ // break if no more in batch so we release lock while reading from the master
+ break;
+ }
+ op = oplogReader.next();
+
+ getDur().commitIfNeeded();
+ }
+ }
+ }
+
+ return okResultCode;
+ }
+
+ BSONObj userReplQuery = fromjson("{\"user\":\"repl\"}");
+
+ bool replAuthenticate(DBClientBase *conn) {
+ if( noauth ) {
+ return true;
+ }
+ if( ! cc().isAdmin() ) {
+ log() << "replauthenticate: requires admin permissions, failing\n";
+ return false;
+ }
+
+ string u;
+ string p;
+ if (internalSecurity.pwd.length() > 0) {
+ u = internalSecurity.user;
+ p = internalSecurity.pwd;
+ }
+ else {
+ BSONObj user;
+ {
+ dblock lk;
+ Client::Context ctxt("local.");
+ if( !Helpers::findOne("local.system.users", userReplQuery, user) ||
+ // try the first user in local
+ !Helpers::getSingleton("local.system.users", user) ) {
+ log() << "replauthenticate: no user in local.system.users to use for authentication\n";
+ return false;
+ }
+ }
+ u = user.getStringField("user");
+ p = user.getStringField("pwd");
+ massert( 10392 , "bad user object? [1]", !u.empty());
+ massert( 10393 , "bad user object? [2]", !p.empty());
+ }
+
+ string err;
+ if( !conn->auth("local", u.c_str(), p.c_str(), err, false) ) {
+ log() << "replauthenticate: can't authenticate to master server, user:" << u << endl;
+ return false;
+ }
+ return true;
+ }
+
+ bool replHandshake(DBClientConnection *conn) {
+
+ string myname = getHostName();
+
+ BSONObj me;
+ {
+
+ dblock l;
+ // local.me is an identifier for a server for getLastError w:2+
+ if ( ! Helpers::getSingleton( "local.me" , me ) ||
+ ! me.hasField("host") ||
+ me["host"].String() != myname ) {
+
+ // clean out local.me
+ Helpers::emptyCollection("local.me");
+
+ // repopulate
+ BSONObjBuilder b;
+ b.appendOID( "_id" , 0 , true );
+ b.append( "host", myname );
+ me = b.obj();
+ Helpers::putSingleton( "local.me" , me );
+ }
+ }
+
+ BSONObjBuilder cmd;
+ cmd.appendAs( me["_id"] , "handshake" );
+ if (theReplSet) {
+ cmd.append("member", theReplSet->selfId());
+ }
+
+ BSONObj res;
+ bool ok = conn->runCommand( "admin" , cmd.obj() , res );
+ // ignoring for now on purpose for older versions
+ log(ok) << "replHandshake res not: " << ok << " res: " << res << endl;
+ return true;
+ }
+
+ bool OplogReader::commonConnect(const string& hostName) {
+ if( conn() == 0 ) {
+ _conn = shared_ptr<DBClientConnection>(new DBClientConnection( false, 0, 0 /* tcp timeout */));
+ string errmsg;
+ ReplInfo r("trying to connect to sync source");
+ if ( !_conn->connect(hostName.c_str(), errmsg) ||
+ (!noauth && !replAuthenticate(_conn.get())) ) {
+ resetConnection();
+ log() << "repl: " << errmsg << endl;
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool OplogReader::connect(string hostName) {
+ if (conn() != 0) {
+ return true;
+ }
+
+ if (commonConnect(hostName)) {
+ return replHandshake(_conn.get());
+ }
+ return false;
+ }
+
+ bool OplogReader::connect(const BSONObj& rid, const int from, const string& to) {
+ if (conn() != 0) {
+ return true;
+ }
+ if (commonConnect(to)) {
+ log() << "handshake between " << from << " and " << to << endl;
+ return passthroughHandshake(rid, from);
+ }
+ return false;
+ }
+
+ bool OplogReader::passthroughHandshake(const BSONObj& rid, const int f) {
+ BSONObjBuilder cmd;
+ cmd.appendAs( rid["_id"], "handshake" );
+ cmd.append( "member" , f );
+
+ BSONObj res;
+ return conn()->runCommand( "admin" , cmd.obj() , res );
+ }
+
+ /* note: not yet in mutex at this point.
+ returns >= 0 if ok. return -1 if you want to reconnect.
+ return value of zero indicates no sleep necessary before next call
+ */
+ int ReplSource::sync(int& nApplied) {
+ _sleepAdviceTime = 0;
+ ReplInfo r("sync");
+ if ( !cmdLine.quiet ) {
+ Nullstream& l = log();
+ l << "repl: syncing from ";
+ if( sourceName() != "main" ) {
+ l << "source:" << sourceName() << ' ';
+ }
+ l << "host:" << hostName << endl;
+ }
+ nClonedThisPass = 0;
+
+ // FIXME Handle cases where this db isn't on default port, or default port is spec'd in hostName.
+ if ( (string("localhost") == hostName || string("127.0.0.1") == hostName) && cmdLine.port == CmdLine::DefaultDBPort ) {
+ log() << "repl: can't sync from self (localhost). sources configuration may be wrong." << endl;
+ sleepsecs(5);
+ return -1;
+ }
+
+ if ( !oplogReader.connect(hostName) ) {
+ log(4) << "repl: can't connect to sync source" << endl;
+ return -1;
+ }
+
+ /*
+ // get current mtime at the server.
+ BSONObj o = conn->findOne("admin.$cmd", opTimeQuery);
+ BSONElement e = o.getField("optime");
+ if( e.eoo() ) {
+ log() << "repl: failed to get cur optime from master" << endl;
+ log() << " " << o.toString() << endl;
+ return false;
+ }
+ uassert( 10124 , e.type() == Date );
+ OpTime serverCurTime;
+ serverCurTime.asDate() = e.date();
+ */
+ return sync_pullOpLog(nApplied);
+ }
+
+ /* --------------------------------------------------------------*/
+
+ /*
+ TODO:
+ _ source has autoptr to the cursor
+ _ reuse that cursor when we can
+ */
+
+ /* returns: # of seconds to sleep before next pass
+ 0 = no sleep recommended
+ 1 = special sentinel indicating adaptive sleep recommended
+ */
+ int _replMain(ReplSource::SourceVector& sources, int& nApplied) {
+ {
+ ReplInfo r("replMain load sources");
+ dblock lk;
+ ReplSource::loadAll(sources);
+ replSettings.fastsync = false; // only need this param for initial reset
+ }
+
+ if ( sources.empty() ) {
+ /* replication is not configured yet (for --slave) in local.sources. Poll for config it
+ every 20 seconds.
+ */
+ log() << "no source given, add a master to local.sources to start replication" << endl;
+ return 20;
+ }
+
+ int sleepAdvice = 1;
+ for ( ReplSource::SourceVector::iterator i = sources.begin(); i != sources.end(); i++ ) {
+ ReplSource *s = i->get();
+ int res = -1;
+ try {
+ res = s->sync(nApplied);
+ bool moreToSync = s->haveMoreDbsToSync();
+ if( res < 0 ) {
+ sleepAdvice = 3;
+ }
+ else if( moreToSync ) {
+ sleepAdvice = 0;
+ }
+ else if ( s->sleepAdvice() ) {
+ sleepAdvice = s->sleepAdvice();
+ }
+ else
+ sleepAdvice = res;
+ }
+ catch ( const SyncException& ) {
+ log() << "caught SyncException" << endl;
+ return 10;
+ }
+ catch ( AssertionException& e ) {
+ if ( e.severe() ) {
+ log() << "replMain AssertionException " << e.what() << endl;
+ return 60;
+ }
+ else {
+ log() << "repl: AssertionException " << e.what() << '\n';
+ }
+ replInfo = "replMain caught AssertionException";
+ }
+ catch ( const DBException& e ) {
+ log() << "repl: DBException " << e.what() << endl;
+ replInfo = "replMain caught DBException";
+ }
+ catch ( const std::exception &e ) {
+ log() << "repl: std::exception " << e.what() << endl;
+ replInfo = "replMain caught std::exception";
+ }
+ catch ( ... ) {
+ log() << "unexpected exception during replication. replication will halt" << endl;
+ replAllDead = "caught unexpected exception during replication";
+ }
+ if ( res < 0 )
+ s->oplogReader.resetConnection();
+ }
+ return sleepAdvice;
+ }
+
+ void replMain() {
+ ReplSource::SourceVector sources;
+ while ( 1 ) {
+ int s = 0;
+ {
+ dblock lk;
+ if ( replAllDead ) {
+ // throttledForceResyncDead can throw
+ if ( !replSettings.autoresync || !ReplSource::throttledForceResyncDead( "auto" ) ) {
+ log() << "all sources dead: " << replAllDead << ", sleeping for 5 seconds" << endl;
+ break;
+ }
+ }
+ assert( syncing == 0 ); // i.e., there is only one sync thread running. we will want to change/fix this.
+ syncing++;
+ }
+ try {
+ int nApplied = 0;
+ s = _replMain(sources, nApplied);
+ if( s == 1 ) {
+ if( nApplied == 0 ) s = 2;
+ else if( nApplied > 100 ) {
+ // sleep very little - just enought that we aren't truly hammering master
+ sleepmillis(75);
+ s = 0;
+ }
+ }
+ }
+ catch (...) {
+ out() << "caught exception in _replMain" << endl;
+ s = 4;
+ }
+ {
+ dblock lk;
+ assert( syncing == 1 );
+ syncing--;
+ }
+
+ if( relinquishSyncingSome ) {
+ relinquishSyncingSome = 0;
+ s = 1; // sleep before going back in to syncing=1
+ }
+
+ if ( s ) {
+ stringstream ss;
+ ss << "repl: sleep " << s << " sec before next pass";
+ string msg = ss.str();
+ if ( ! cmdLine.quiet )
+ log() << msg << endl;
+ ReplInfo r(msg.c_str());
+ sleepsecs(s);
+ }
+ }
+ }
+
+ static void replMasterThread() {
+ sleepsecs(4);
+ Client::initThread("replmaster");
+ int toSleep = 10;
+ while( 1 ) {
+
+ sleepsecs( toSleep );
+ /* write a keep-alive like entry to the log. this will make things like
+ printReplicationStatus() and printSlaveReplicationStatus() stay up-to-date
+ even when things are idle.
+ */
+ {
+ writelocktry lk("",1);
+ if ( lk.got() ) {
+ toSleep = 10;
+
+ replLocalAuth();
+
+ try {
+ logKeepalive();
+ }
+ catch(...) {
+ log() << "caught exception in replMasterThread()" << endl;
+ }
+ }
+ else {
+ log(5) << "couldn't logKeepalive" << endl;
+ toSleep = 1;
+ }
+ }
+ }
+ }
+
+ void replSlaveThread() {
+ sleepsecs(1);
+ Client::initThread("replslave");
+ cc().iAmSyncThread();
+
+ {
+ dblock lk;
+ replLocalAuth();
+ }
+
+ while ( 1 ) {
+ try {
+ replMain();
+ sleepsecs(5);
+ }
+ catch ( AssertionException& ) {
+ ReplInfo r("Assertion in replSlaveThread(): sleeping 5 minutes before retry");
+ problem() << "Assertion in replSlaveThread(): sleeping 5 minutes before retry" << endl;
+ sleepsecs(300);
+ }
+ catch ( DBException& e ) {
+ problem() << "exception in replSlaveThread(): " << e.what()
+ << ", sleeping 5 minutes before retry" << endl;
+ sleepsecs(300);
+ }
+ catch ( ... ) {
+ problem() << "error in replSlaveThread(): sleeping 5 minutes before retry" << endl;
+ sleepsecs(300);
+ }
+ }
+ }
+
+ void tempThread() {
+ while ( 1 ) {
+ out() << d.dbMutex.info().isLocked() << endl;
+ sleepmillis(100);
+ }
+ }
+
+ void newRepl();
+ void oldRepl();
+ void startReplSets(ReplSetCmdline*);
+ void startReplication() {
+ /* if we are going to be a replica set, we aren't doing other forms of replication. */
+ if( !cmdLine._replSet.empty() ) {
+ if( replSettings.slave || replSettings.master ) {
+ log() << "***" << endl;
+ log() << "ERROR: can't use --slave or --master replication options with --replSet" << endl;
+ log() << "***" << endl;
+ }
+ newRepl();
+
+ replSet = true;
+ ReplSetCmdline *replSetCmdline = new ReplSetCmdline(cmdLine._replSet);
+ boost::thread t( boost::bind( &startReplSets, replSetCmdline) );
+
+ return;
+ }
+
+ oldRepl();
+
+ /* this was just to see if anything locks for longer than it should -- we need to be careful
+ not to be locked when trying to connect() or query() the other side.
+ */
+ //boost::thread tempt(tempThread);
+
+ if( !replSettings.slave && !replSettings.master )
+ return;
+
+ {
+ dblock lk;
+ replLocalAuth();
+ }
+
+ if ( replSettings.slave ) {
+ assert( replSettings.slave == SimpleSlave );
+ log(1) << "slave=true" << endl;
+ boost::thread repl_thread(replSlaveThread);
+ }
+
+ if ( replSettings.master ) {
+ log(1) << "master=true" << endl;
+ replSettings.master = true;
+ createOplog();
+ boost::thread t(replMasterThread);
+ }
+
+ while( replSettings.fastsync ) // don't allow writes until we've set up from log
+ sleepmillis( 50 );
+ }
+
+ void testPretouch() {
+ int nthr = min(8, 8);
+ nthr = max(nthr, 1);
+ int m = 8 / nthr;
+ ThreadPool tp(nthr);
+ vector<BSONObj> v;
+
+ BSONObj x = BSON( "ns" << "test.foo" << "o" << BSON( "_id" << 1 ) << "op" << "i" );
+
+ v.push_back(x);
+ v.push_back(x);
+ v.push_back(x);
+
+ unsigned a = 0;
+ while( 1 ) {
+ if( a >= v.size() ) break;
+ unsigned b = a + m - 1; // v[a..b]
+ if( b >= v.size() ) b = v.size() - 1;
+ tp.schedule(pretouchN, v, a, b);
+ DEV cout << "pretouch task: " << a << ".." << b << endl;
+ a += m;
+ }
+ tp.join();
+ }
+
+ class ReplApplyBatchSizeValidator : public ParameterValidator {
+ public:
+ ReplApplyBatchSizeValidator() : ParameterValidator( "replApplyBatchSize" ) {}
+
+ virtual bool isValid( BSONElement e , string& errmsg ) const {
+ int b = e.numberInt();
+ if( b < 1 || b > 1024 ) {
+ errmsg = "replApplyBatchSize has to be >= 1 and < 1024";
+ return false;
+ }
+
+ if ( replSettings.slavedelay != 0 && b > 1 ) {
+ errmsg = "can't use a batch size > 1 with slavedelay";
+ return false;
+ }
+ if ( ! replSettings.slave ) {
+ errmsg = "can't set replApplyBatchSize on a non-slave machine";
+ return false;
+ }
+
+ return true;
+ }
+ } replApplyBatchSizeValidator;
+
+} // namespace mongo
diff --git a/src/mongo/db/repl.h b/src/mongo/db/repl.h
new file mode 100644
index 00000000000..83242d0a4ce
--- /dev/null
+++ b/src/mongo/db/repl.h
@@ -0,0 +1,199 @@
+// repl.h - replication
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* replication data overview
+
+ at the slave:
+ local.sources { host: ..., source: ..., only: ..., syncedTo: ..., localLogTs: ..., dbsNextPass: { ... }, incompleteCloneDbs: { ... } }
+
+ at the master:
+ local.oplog.$<source>
+*/
+
+#pragma once
+
+#include "pdfile.h"
+#include "db.h"
+#include "dbhelpers.h"
+#include "../client/dbclient.h"
+#include "../util/optime.h"
+#include "oplog.h"
+#include "../util/concurrency/thread_pool.h"
+#include "oplogreader.h"
+#include "cloner.h"
+
+namespace mongo {
+
+ /* replication slave? (possibly with slave)
+ --slave cmd line setting -> SimpleSlave
+ */
+ typedef enum { NotSlave=0, SimpleSlave } SlaveTypes;
+
+ class ReplSettings {
+ public:
+ SlaveTypes slave;
+
+ /** true means we are master and doing replication. if we are not writing to oplog, this won't be true. */
+ bool master;
+
+ bool fastsync;
+
+ bool autoresync;
+
+ int slavedelay;
+
+ set<string> discoveredSeeds;
+ mutex discoveredSeeds_mx;
+
+ BSONObj reconfig;
+
+ ReplSettings()
+ : slave(NotSlave),
+ master(false),
+ fastsync(),
+ autoresync(false),
+ slavedelay(),
+ discoveredSeeds(),
+ discoveredSeeds_mx("ReplSettings::discoveredSeeds") {
+ }
+
+ };
+
+ extern ReplSettings replSettings;
+
+ /* A replication exception */
+ class SyncException : public DBException {
+ public:
+ SyncException() : DBException( "sync exception" , 10001 ) {}
+ };
+
+ /* A Source is a source from which we can pull (replicate) data.
+ stored in collection local.sources.
+
+ Can be a group of things to replicate for several databases.
+
+ { host: ..., source: ..., only: ..., syncedTo: ..., dbsNextPass: { ... }, incompleteCloneDbs: { ... } }
+
+ 'source' defaults to 'main'; support for multiple source names is
+ not done (always use main for now).
+ */
+ class ReplSource {
+ shared_ptr<ThreadPool> tp;
+
+ void resync(string db);
+
+ /** @param alreadyLocked caller already put us in write lock if true */
+ void sync_pullOpLog_applyOperation(BSONObj& op, bool alreadyLocked);
+
+ /* pull some operations from the master's oplog, and apply them.
+ calls sync_pullOpLog_applyOperation
+ */
+ int sync_pullOpLog(int& nApplied);
+
+ /* we only clone one database per pass, even if a lot need done. This helps us
+ avoid overflowing the master's transaction log by doing too much work before going
+ back to read more transactions. (Imagine a scenario of slave startup where we try to
+ clone 100 databases in one pass.)
+ */
+ set<string> addDbNextPass;
+
+ set<string> incompleteCloneDbs;
+
+ ReplSource();
+
+ // returns the dummy ns used to do the drop
+ string resyncDrop( const char *db, const char *requester );
+ // call without the db mutex
+ void syncToTailOfRemoteLog();
+ string ns() const { return string( "local.oplog.$" ) + sourceName(); }
+ unsigned _sleepAdviceTime;
+
+ /**
+ * If 'db' is a new database and its name would conflict with that of
+ * an existing database, synchronize these database names with the
+ * master.
+ * @return true iff an op with the specified ns may be applied.
+ */
+ bool handleDuplicateDbName( const BSONObj &op, const char *ns, const char *db );
+
+ public:
+ OplogReader oplogReader;
+
+ void applyOperation(const BSONObj& op);
+ string hostName; // ip addr or hostname plus optionally, ":<port>"
+ string _sourceName; // a logical source name.
+ string sourceName() const { return _sourceName.empty() ? "main" : _sourceName; }
+ string only; // only a certain db. note that in the sources collection, this may not be changed once you start replicating.
+
+ /* the last time point we have already synced up to (in the remote/master's oplog). */
+ OpTime syncedTo;
+
+ int nClonedThisPass;
+
+ typedef vector< shared_ptr< ReplSource > > SourceVector;
+ static void loadAll(SourceVector&);
+ explicit ReplSource(BSONObj);
+
+ /* -1 = error */
+ int sync(int& nApplied);
+
+ void save(); // write ourself to local.sources
+
+ // make a jsobj from our member fields of the form
+ // { host: ..., source: ..., syncedTo: ... }
+ BSONObj jsobj();
+
+ bool operator==(const ReplSource&r) const {
+ return hostName == r.hostName && sourceName() == r.sourceName();
+ }
+ string toString() const { return sourceName() + "@" + hostName; }
+
+ bool haveMoreDbsToSync() const { return !addDbNextPass.empty(); }
+ int sleepAdvice() const {
+ if ( !_sleepAdviceTime )
+ return 0;
+ int wait = _sleepAdviceTime - unsigned( time( 0 ) );
+ return wait > 0 ? wait : 0;
+ }
+
+ static bool throttledForceResyncDead( const char *requester );
+ static void forceResyncDead( const char *requester );
+ void forceResync( const char *requester );
+ };
+
+ bool anyReplEnabled();
+ void appendReplicationInfo( BSONObjBuilder& result , bool authed , int level = 0 );
+
+ /**
+ * Helper class used to set and query an ignore state for a named database.
+ * The ignore state will expire after a specified OpTime.
+ */
+ class DatabaseIgnorer {
+ public:
+ /** Indicate that operations for 'db' should be ignored until after 'futureOplogTime' */
+ void doIgnoreUntilAfter( const string &db, const OpTime &futureOplogTime );
+ /**
+ * Query ignore state of 'db'; if 'currentOplogTime' is after the ignore
+ * limit, the ignore state will be cleared.
+ */
+ bool ignoreAt( const string &db, const OpTime &currentOplogTime );
+ private:
+ map< string, OpTime > _ignores;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/repl/connections.h b/src/mongo/db/repl/connections.h
new file mode 100644
index 00000000000..3e08f80b047
--- /dev/null
+++ b/src/mongo/db/repl/connections.h
@@ -0,0 +1,128 @@
+// @file
+
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <map>
+#include "../../client/dbclient.h"
+#include "../security_common.h"
+
+namespace mongo {
+
+ /** here we keep a single connection (with reconnect) for a set of hosts,
+ one each, and allow one user at a time per host. if in use already for that
+ host, we block. so this is an easy way to keep a 1-deep pool of connections
+ that many threads can share.
+
+ thread-safe.
+
+ Example:
+ {
+ ScopedConn c("foo.acme.com:9999");
+ c->runCommand(...);
+ }
+
+ throws exception on connect error (but fine to try again later with a new
+ scopedconn object for same host).
+ */
+ class ScopedConn {
+ public:
+ /** throws assertions if connect failure etc. */
+ ScopedConn(string hostport);
+ ~ScopedConn() {
+ // conLock releases...
+ }
+ void reconnect() {
+ conn()->port().shutdown();
+ connect();
+ }
+
+ /* If we were to run a query and not exhaust the cursor, future use of the connection would be problematic.
+ So here what we do is wrapper known safe methods and not allow cursor-style queries at all. This makes
+ ScopedConn limited in functionality but very safe. More non-cursor wrappers can be added here if needed.
+ */
+ bool runCommand(const string &dbname, const BSONObj& cmd, BSONObj &info, int options=0) {
+ return conn()->runCommand(dbname, cmd, info, options);
+ }
+ unsigned long long count(const string &ns) {
+ return conn()->count(ns);
+ }
+ BSONObj findOne(const string &ns, const Query& q, const BSONObj *fieldsToReturn = 0, int queryOptions = 0) {
+ return conn()->findOne(ns, q, fieldsToReturn, queryOptions);
+ }
+
+ private:
+ auto_ptr<scoped_lock> connLock;
+ static mongo::mutex mapMutex;
+ struct X {
+ mongo::mutex z;
+ DBClientConnection cc;
+ bool connected;
+ X() : z("X"), cc(/*reconnect*/ true, 0, /*timeout*/ 10.0), connected(false) {
+ cc._logLevel = 2;
+ }
+ } *x;
+ typedef map<string,ScopedConn::X*> M;
+ static M& _map;
+ DBClientConnection* conn() { return &x->cc; }
+ const string _hostport;
+
+ // we should already be locked...
+ bool connect() {
+ string err;
+ if (!x->cc.connect(_hostport, err)) {
+ log() << "couldn't connect to " << _hostport << ": " << err << rsLog;
+ return false;
+ }
+ x->connected = true;
+
+ // if we cannot authenticate against a member, then either its key file
+ // or our key file has to change. if our key file has to change, we'll
+ // be rebooting. if their file has to change, they'll be rebooted so the
+ // connection created above will go dead, reconnect, and reauth.
+ if (!noauth && !x->cc.auth("local", internalSecurity.user, internalSecurity.pwd, err, false)) {
+ log() << "could not authenticate against " << _hostport << ", " << err << rsLog;
+ return false;
+ }
+
+ return true;
+ }
+ };
+
+ inline ScopedConn::ScopedConn(string hostport) : _hostport(hostport) {
+ bool first = false;
+ {
+ scoped_lock lk(mapMutex);
+ x = _map[_hostport];
+ if( x == 0 ) {
+ x = _map[_hostport] = new X();
+ first = true;
+ connLock.reset( new scoped_lock(x->z) );
+ }
+ }
+
+ // Keep trying to connect if we're not yet connected
+ if( !first && x->connected ) {
+ connLock.reset( new scoped_lock(x->z) );
+ return;
+ }
+
+ connect();
+ }
+
+}
diff --git a/src/mongo/db/repl/consensus.cpp b/src/mongo/db/repl/consensus.cpp
new file mode 100644
index 00000000000..3995373f5ef
--- /dev/null
+++ b/src/mongo/db/repl/consensus.cpp
@@ -0,0 +1,449 @@
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../commands.h"
+#include "rs.h"
+#include "multicmd.h"
+
+namespace mongo {
+
+ class CmdReplSetFresh : public ReplSetCommand {
+ public:
+ CmdReplSetFresh() : ReplSetCommand("replSetFresh") { }
+ private:
+
+ bool shouldVeto(const BSONObj& cmdObj, string& errmsg) {
+ unsigned id = cmdObj["id"].Int();
+ const Member* primary = theReplSet->box.getPrimary();
+ const Member* hopeful = theReplSet->findById(id);
+ const Member *highestPriority = theReplSet->getMostElectable();
+
+ if( !hopeful ) {
+ errmsg = str::stream() << "replSet couldn't find member with id " << id;
+ return true;
+ }
+ else if( theReplSet->isPrimary() && theReplSet->lastOpTimeWritten >= hopeful->hbinfo().opTime ) {
+ // hbinfo is not updated, so we have to check the primary's last optime separately
+ errmsg = str::stream() << "I am already primary, " << hopeful->fullName() <<
+ " can try again once I've stepped down";
+ return true;
+ }
+ else if( primary && primary->hbinfo().opTime >= hopeful->hbinfo().opTime ) {
+ // other members might be aware of more up-to-date nodes
+ errmsg = str::stream() << hopeful->fullName() << " is trying to elect itself but " <<
+ primary->fullName() << " is already primary and more up-to-date";
+ return true;
+ }
+ else if( highestPriority && highestPriority->config().priority > hopeful->config().priority) {
+ errmsg = str::stream() << hopeful->fullName() << " has lower priority than " << highestPriority->fullName();
+ return true;
+ }
+
+ // don't veto older versions
+ if (cmdObj["id"].eoo()) {
+ // they won't be looking for the veto field
+ return false;
+ }
+
+ if ( !theReplSet->isElectable(id) ||
+ (highestPriority && highestPriority->config().priority > hopeful->config().priority)) {
+ return true;
+ }
+
+ return false;
+ }
+
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if( !check(errmsg, result) )
+ return false;
+
+ if( cmdObj["set"].String() != theReplSet->name() ) {
+ errmsg = "wrong repl set name";
+ return false;
+ }
+ string who = cmdObj["who"].String();
+ int cfgver = cmdObj["cfgver"].Int();
+ OpTime opTime(cmdObj["opTime"].Date());
+
+ bool weAreFresher = false;
+ if( theReplSet->config().version > cfgver ) {
+ log() << "replSet member " << who << " is not yet aware its cfg version " << cfgver << " is stale" << rsLog;
+ result.append("info", "config version stale");
+ weAreFresher = true;
+ }
+ // check not only our own optime, but any other member we can reach
+ else if( opTime < theReplSet->lastOpTimeWritten ||
+ opTime < theReplSet->lastOtherOpTime()) {
+ weAreFresher = true;
+ }
+ result.appendDate("opTime", theReplSet->lastOpTimeWritten.asDate());
+ result.append("fresher", weAreFresher);
+ result.append("veto", shouldVeto(cmdObj, errmsg));
+
+ return true;
+ }
+ } cmdReplSetFresh;
+
+ class CmdReplSetElect : public ReplSetCommand {
+ public:
+ CmdReplSetElect() : ReplSetCommand("replSetElect") { }
+ private:
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if( !check(errmsg, result) )
+ return false;
+ theReplSet->elect.electCmdReceived(cmdObj, &result);
+ return true;
+ }
+ } cmdReplSetElect;
+
+ int Consensus::totalVotes() const {
+ static int complain = 0;
+ int vTot = rs._self->config().votes;
+ for( Member *m = rs.head(); m; m=m->next() )
+ vTot += m->config().votes;
+ if( vTot % 2 == 0 && vTot && complain++ == 0 )
+ log() << "replSet " /*buildbot! warning */ "total number of votes is even - add arbiter or give one member an extra vote" << rsLog;
+ return vTot;
+ }
+
+ bool Consensus::aMajoritySeemsToBeUp() const {
+ int vUp = rs._self->config().votes;
+ for( Member *m = rs.head(); m; m=m->next() )
+ vUp += m->hbinfo().up() ? m->config().votes : 0;
+ return vUp * 2 > totalVotes();
+ }
+
+ bool Consensus::shouldRelinquish() const {
+ int vUp = rs._self->config().votes;
+ const long long T = rs.config().ho.heartbeatTimeoutMillis * rs.config().ho.heartbeatConnRetries;
+ for( Member *m = rs.head(); m; m=m->next() ) {
+ long long dt = m->hbinfo().timeDown();
+ if( dt < T )
+ vUp += m->config().votes;
+ }
+
+ // the manager will handle calling stepdown if another node should be
+ // primary due to priority
+
+ return !( vUp * 2 > totalVotes() );
+ }
+
+ static const int VETO = -10000;
+
+ const time_t LeaseTime = 30;
+
+ SimpleMutex Consensus::lyMutex("ly");
+
+ unsigned Consensus::yea(unsigned memberId) { /* throws VoteException */
+ SimpleMutex::scoped_lock lk(lyMutex);
+ LastYea &L = this->ly.ref(lk);
+ time_t now = time(0);
+ if( L.when + LeaseTime >= now && L.who != memberId ) {
+ LOG(1) << "replSet not voting yea for " << memberId <<
+ " voted for " << L.who << ' ' << now-L.when << " secs ago" << rsLog;
+ throw VoteException();
+ }
+ L.when = now;
+ L.who = memberId;
+ return rs._self->config().votes;
+ }
+
+ /* we vote for ourself at start of election. once it fails, we can cancel the lease we had in
+ place instead of leaving it for a long time.
+ */
+ void Consensus::electionFailed(unsigned meid) {
+ SimpleMutex::scoped_lock lk(lyMutex);
+ LastYea &L = ly.ref(lk);
+ DEV assert( L.who == meid ); // this may not always always hold, so be aware, but adding for now as a quick sanity test
+ if( L.who == meid )
+ L.when = 0;
+ }
+
+ /* todo: threading **************** !!!!!!!!!!!!!!!! */
+ void Consensus::electCmdReceived(BSONObj cmd, BSONObjBuilder* _b) {
+ BSONObjBuilder& b = *_b;
+ DEV log() << "replSet received elect msg " << cmd.toString() << rsLog;
+ else LOG(2) << "replSet received elect msg " << cmd.toString() << rsLog;
+ string set = cmd["set"].String();
+ unsigned whoid = cmd["whoid"].Int();
+ int cfgver = cmd["cfgver"].Int();
+ OID round = cmd["round"].OID();
+ int myver = rs.config().version;
+
+ const Member* primary = rs.box.getPrimary();
+ const Member* hopeful = rs.findById(whoid);
+ const Member* highestPriority = rs.getMostElectable();
+
+ int vote = 0;
+ if( set != rs.name() ) {
+ log() << "replSet error received an elect request for '" << set << "' but our set name is '" << rs.name() << "'" << rsLog;
+ }
+ else if( myver < cfgver ) {
+ // we are stale. don't vote
+ }
+ else if( myver > cfgver ) {
+ // they are stale!
+ log() << "replSet electCmdReceived info got stale version # during election" << rsLog;
+ vote = -10000;
+ }
+ else if( !hopeful ) {
+ log() << "replSet electCmdReceived couldn't find member with id " << whoid << rsLog;
+ vote = -10000;
+ }
+ else if( primary && primary == rs._self && rs.lastOpTimeWritten >= hopeful->hbinfo().opTime ) {
+ // hbinfo is not updated, so we have to check the primary's last optime separately
+ log() << "I am already primary, " << hopeful->fullName()
+ << " can try again once I've stepped down" << rsLog;
+ vote = -10000;
+ }
+ else if( primary && primary->hbinfo().opTime >= hopeful->hbinfo().opTime ) {
+ // other members might be aware of more up-to-date nodes
+ log() << hopeful->fullName() << " is trying to elect itself but " <<
+ primary->fullName() << " is already primary and more up-to-date" << rsLog;
+ vote = -10000;
+ }
+ else if( highestPriority && highestPriority->config().priority > hopeful->config().priority) {
+ log() << hopeful->fullName() << " has lower priority than " << highestPriority->fullName();
+ vote = -10000;
+ }
+ else {
+ try {
+ vote = yea(whoid);
+ dassert( hopeful->id() == whoid );
+ rs.relinquish();
+ log() << "replSet info voting yea for " << hopeful->fullName() << " (" << whoid << ')' << rsLog;
+ }
+ catch(VoteException&) {
+ log() << "replSet voting no for " << hopeful->fullName() << " already voted for another" << rsLog;
+ }
+ }
+
+ b.append("vote", vote);
+ b.append("round", round);
+ }
+
+ void ReplSetImpl::_getTargets(list<Target>& L, int& configVersion) {
+ configVersion = config().version;
+ for( Member *m = head(); m; m=m->next() )
+ if( m->hbinfo().maybeUp() )
+ L.push_back( Target(m->fullName()) );
+ }
+
+ /* config version is returned as it is ok to use this unlocked. BUT, if unlocked, you would need
+ to check later that the config didn't change. */
+ void ReplSetImpl::getTargets(list<Target>& L, int& configVersion) {
+ if( lockedByMe() ) {
+ _getTargets(L, configVersion);
+ return;
+ }
+ lock lk(this);
+ _getTargets(L, configVersion);
+ }
+
+ /* Do we have the newest data of them all?
+ @param allUp - set to true if all members are up. Only set if true returned.
+ @return true if we are freshest. Note we may tie.
+ */
+ bool Consensus::weAreFreshest(bool& allUp, int& nTies) {
+ const OpTime ord = theReplSet->lastOpTimeWritten;
+ nTies = 0;
+ assert( !ord.isNull() );
+ BSONObj cmd = BSON(
+ "replSetFresh" << 1 <<
+ "set" << rs.name() <<
+ "opTime" << Date_t(ord.asDate()) <<
+ "who" << rs._self->fullName() <<
+ "cfgver" << rs._cfg->version <<
+ "id" << rs._self->id());
+ list<Target> L;
+ int ver;
+ /* the following queries arbiters, even though they are never fresh. wonder if that makes sense.
+ it doesn't, but it could, if they "know" what freshness it one day. so consider removing
+ arbiters from getTargets() here. although getTargets is used elsewhere for elections; there
+ arbiters are certainly targets - so a "includeArbs" bool would be necessary if we want to make
+ not fetching them herein happen.
+ */
+ rs.getTargets(L, ver);
+ multiCommand(cmd, L);
+ int nok = 0;
+ allUp = true;
+ for( list<Target>::iterator i = L.begin(); i != L.end(); i++ ) {
+ if( i->ok ) {
+ nok++;
+ if( i->result["fresher"].trueValue() ) {
+ log() << "not electing self, we are not freshest" << rsLog;
+ return false;
+ }
+ OpTime remoteOrd( i->result["opTime"].Date() );
+ if( remoteOrd == ord )
+ nTies++;
+ assert( remoteOrd <= ord );
+
+ if( i->result["veto"].trueValue() ) {
+ BSONElement msg = i->result["errmsg"];
+ if (!msg.eoo()) {
+ log() << "not electing self, " << i->toHost << " would veto with '" <<
+ msg.String() << "'" << rsLog;
+ }
+ else {
+ log() << "not electing self, " << i->toHost << " would veto" << rsLog;
+ }
+ return false;
+ }
+ }
+ else {
+ DEV log() << "replSet freshest returns " << i->result.toString() << rsLog;
+ allUp = false;
+ }
+ }
+ LOG(1) << "replSet dev we are freshest of up nodes, nok:" << nok << " nTies:" << nTies << rsLog;
+ assert( ord <= theReplSet->lastOpTimeWritten ); // <= as this may change while we are working...
+ return true;
+ }
+
+ extern time_t started;
+
+ void Consensus::multiCommand(BSONObj cmd, list<Target>& L) {
+ assert( !rs.lockedByMe() );
+ mongo::multiCommand(cmd, L);
+ }
+
+ void Consensus::_electSelf() {
+ if( time(0) < steppedDown )
+ return;
+
+ {
+ const OpTime ord = theReplSet->lastOpTimeWritten;
+ if( ord == 0 ) {
+ log() << "replSet info not trying to elect self, do not yet have a complete set of data from any point in time" << rsLog;
+ return;
+ }
+ }
+
+ bool allUp;
+ int nTies;
+ if( !weAreFreshest(allUp, nTies) ) {
+ return;
+ }
+
+ rs.sethbmsg("",9);
+
+ if( !allUp && time(0) - started < 60 * 5 ) {
+ /* the idea here is that if a bunch of nodes bounce all at once, we don't want to drop data
+ if we don't have to -- we'd rather be offline and wait a little longer instead
+ todo: make this configurable.
+ */
+ rs.sethbmsg("not electing self, not all members up and we have been up less than 5 minutes");
+ return;
+ }
+
+ Member& me = *rs._self;
+
+ if( nTies ) {
+ /* tie? we then randomly sleep to try to not collide on our voting. */
+ /* todo: smarter. */
+ if( me.id() == 0 || sleptLast ) {
+ // would be fine for one node not to sleep
+ // todo: biggest / highest priority nodes should be the ones that get to not sleep
+ }
+ else {
+ assert( !rs.lockedByMe() ); // bad to go to sleep locked
+ unsigned ms = ((unsigned) rand()) % 1000 + 50;
+ DEV log() << "replSet tie " << nTies << " sleeping a little " << ms << "ms" << rsLog;
+ sleptLast = true;
+ sleepmillis(ms);
+ throw RetryAfterSleepException();
+ }
+ }
+ sleptLast = false;
+
+ time_t start = time(0);
+ unsigned meid = me.id();
+ int tally = yea( meid );
+ bool success = false;
+ try {
+ log() << "replSet info electSelf " << meid << rsLog;
+
+ BSONObj electCmd = BSON(
+ "replSetElect" << 1 <<
+ "set" << rs.name() <<
+ "who" << me.fullName() <<
+ "whoid" << me.hbinfo().id() <<
+ "cfgver" << rs._cfg->version <<
+ "round" << OID::gen() /* this is just for diagnostics */
+ );
+
+ int configVersion;
+ list<Target> L;
+ rs.getTargets(L, configVersion);
+ multiCommand(electCmd, L);
+
+ {
+ for( list<Target>::iterator i = L.begin(); i != L.end(); i++ ) {
+ DEV log() << "replSet elect res: " << i->result.toString() << rsLog;
+ if( i->ok ) {
+ int v = i->result["vote"].Int();
+ tally += v;
+ }
+ }
+ if( tally*2 <= totalVotes() ) {
+ log() << "replSet couldn't elect self, only received " << tally << " votes" << rsLog;
+ }
+ else if( time(0) - start > 30 ) {
+ // defensive; should never happen as we have timeouts on connection and operation for our conn
+ log() << "replSet too much time passed during our election, ignoring result" << rsLog;
+ }
+ else if( configVersion != rs.config().version ) {
+ log() << "replSet config version changed during our election, ignoring result" << rsLog;
+ }
+ else {
+ /* succeeded. */
+ log(1) << "replSet election succeeded, assuming primary role" << rsLog;
+ success = true;
+ rs.assumePrimary();
+ }
+ }
+ }
+ catch( std::exception& ) {
+ if( !success ) electionFailed(meid);
+ throw;
+ }
+ if( !success ) electionFailed(meid);
+ }
+
+ void Consensus::electSelf() {
+ assert( !rs.lockedByMe() );
+ assert( !rs.myConfig().arbiterOnly );
+ assert( rs.myConfig().slaveDelay == 0 );
+ try {
+ _electSelf();
+ }
+ catch(RetryAfterSleepException&) {
+ throw;
+ }
+ catch(VoteException& ) {
+ log() << "replSet not trying to elect self as responded yea to someone else recently" << rsLog;
+ }
+ catch(DBException& e) {
+ log() << "replSet warning caught unexpected exception in electSelf() " << e.toString() << rsLog;
+ }
+ catch(...) {
+ log() << "replSet warning caught unexpected exception in electSelf()" << rsLog;
+ }
+ }
+
+}
diff --git a/src/mongo/db/repl/health.cpp b/src/mongo/db/repl/health.cpp
new file mode 100644
index 00000000000..0b7ed87eac3
--- /dev/null
+++ b/src/mongo/db/repl/health.cpp
@@ -0,0 +1,449 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,b
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "rs.h"
+#include "health.h"
+#include "../../util/background.h"
+#include "../../client/dbclient.h"
+#include "../../client/connpool.h"
+#include "../commands.h"
+#include "../../util/concurrency/value.h"
+#include "../../util/concurrency/task.h"
+#include "../../util/mongoutils/html.h"
+#include "../../util/goodies.h"
+#include "../../util/ramlog.h"
+#include "../helpers/dblogger.h"
+#include "connections.h"
+#include "../../util/unittest.h"
+#include "../dbhelpers.h"
+
+namespace mongo {
+ /* decls for connections.h */
+ ScopedConn::M& ScopedConn::_map = *(new ScopedConn::M());
+ mutex ScopedConn::mapMutex("ScopedConn::mapMutex");
+}
+
+namespace mongo {
+
+ using namespace mongoutils::html;
+ using namespace bson;
+
+ static RamLog * _rsLog = new RamLog( "rs" );
+ Tee *rsLog = _rsLog;
+ extern bool replSetBlind; // for testing
+
+ string ago(time_t t) {
+ if( t == 0 ) return "";
+
+ time_t x = time(0) - t;
+ stringstream s;
+ if( x < 180 ) {
+ s << x << " sec";
+ if( x != 1 ) s << 's';
+ }
+ else if( x < 3600 ) {
+ s.precision(2);
+ s << x / 60.0 << " mins";
+ }
+ else {
+ s.precision(2);
+ s << x / 3600.0 << " hrs";
+ }
+ return s.str();
+ }
+
+ void Member::summarizeMember(stringstream& s) const {
+ s << tr();
+ {
+ stringstream u;
+ u << "http://" << h().host() << ':' << (h().port() + 1000) << "/_replSet";
+ s << td( a(u.str(), "", fullName()) );
+ }
+ s << td( id() );
+ double h = hbinfo().health;
+ bool ok = h > 0;
+ s << td(red(str::stream() << h,h == 0));
+ s << td(ago(hbinfo().upSince));
+ bool never = false;
+ {
+ string h;
+ time_t hb = hbinfo().lastHeartbeat;
+ if( hb == 0 ) {
+ h = "never";
+ never = true;
+ }
+ else h = ago(hb) + " ago";
+ s << td(h);
+ }
+ s << td(config().votes);
+ s << td(config().priority);
+ {
+ string stateText = state().toString();
+ if( _config.hidden )
+ stateText += " (hidden)";
+ if( ok || stateText.empty() )
+ s << td(stateText); // text blank if we've never connected
+ else
+ s << td( grey(str::stream() << "(was " << state().toString() << ')', true) );
+ }
+ s << td( grey(hbinfo().lastHeartbeatMsg,!ok) );
+ stringstream q;
+ q << "/_replSetOplog?_id=" << id();
+ s << td( a(q.str(), "", never ? "?" : hbinfo().opTime.toString()) );
+ if( hbinfo().skew > INT_MIN ) {
+ s << td( grey(str::stream() << hbinfo().skew,!ok) );
+ }
+ else
+ s << td("");
+ s << _tr();
+ }
+
+ string ReplSetImpl::stateAsHtml(MemberState s) {
+ if( s.s == MemberState::RS_STARTUP ) return a("", "serving still starting up, or still trying to initiate the set", "STARTUP");
+ if( s.s == MemberState::RS_PRIMARY ) return a("", "this server thinks it is primary", "PRIMARY");
+ if( s.s == MemberState::RS_SECONDARY ) return a("", "this server thinks it is a secondary (slave mode)", "SECONDARY");
+ if( s.s == MemberState::RS_RECOVERING ) return a("", "recovering/resyncing; after recovery usually auto-transitions to secondary", "RECOVERING");
+ if( s.s == MemberState::RS_FATAL ) return a("", "something bad has occurred and server is not completely offline with regard to the replica set. fatal error.", "FATAL");
+ if( s.s == MemberState::RS_STARTUP2 ) return a("", "loaded config, still determining who is primary", "STARTUP2");
+ if( s.s == MemberState::RS_ARBITER ) return a("", "this server is an arbiter only", "ARBITER");
+ if( s.s == MemberState::RS_DOWN ) return a("", "member is down, slow, or unreachable", "DOWN");
+ if( s.s == MemberState::RS_ROLLBACK ) return a("", "rolling back operations to get in sync", "ROLLBACK");
+ return "";
+ }
+
+ extern time_t started;
+
+ // oplogdiags in web ui
+ static void say(stringstream&ss, const bo& op) {
+ ss << "<tr>";
+
+ set<string> skip;
+ be e = op["ts"];
+ if( e.type() == Date || e.type() == Timestamp ) {
+ OpTime ot = e._opTime();
+ ss << td( time_t_to_String_short( ot.getSecs() ) );
+ ss << td( ot.toString() );
+ skip.insert("ts");
+ }
+ else ss << td("?") << td("?");
+
+ e = op["h"];
+ if( e.type() == NumberLong ) {
+ ss << "<td>" << hex << e.Long() << "</td>\n";
+ skip.insert("h");
+ }
+ else
+ ss << td("?");
+
+ ss << td(op["op"].valuestrsafe());
+ ss << td(op["ns"].valuestrsafe());
+ skip.insert("op");
+ skip.insert("ns");
+
+ ss << "<td>";
+ for( bo::iterator i(op); i.more(); ) {
+ be e = i.next();
+ if( skip.count(e.fieldName()) ) continue;
+ ss << e.toString() << ' ';
+ }
+ ss << "</td></tr>\n";
+ }
+
+ void ReplSetImpl::_getOplogDiagsAsHtml(unsigned server_id, stringstream& ss) const {
+ const Member *m = findById(server_id);
+ if( m == 0 ) {
+ ss << "Error : can't find a member with id: " << server_id << '\n';
+ return;
+ }
+
+ ss << p("Server : " + m->fullName() + "<br>ns : " + rsoplog );
+
+ //const bo fields = BSON( "o" << false << "o2" << false );
+ const bo fields;
+
+ /** todo fix we might want an so timeout here */
+ DBClientConnection conn(false, 0, /*timeout*/ 20);
+ {
+ string errmsg;
+ if( !conn.connect(m->fullName(), errmsg) ) {
+ ss << "couldn't connect to " << m->fullName() << ' ' << errmsg;
+ return;
+ }
+ }
+
+ auto_ptr<DBClientCursor> c = conn.query(rsoplog, Query().sort("$natural",1), 20, 0, &fields);
+ if( c.get() == 0 ) {
+ ss << "couldn't query " << rsoplog;
+ return;
+ }
+ static const char *h[] = {"ts","optime", "h","op","ns","rest",0};
+
+ ss << "<style type=\"text/css\" media=\"screen\">"
+ "table { font-size:75% }\n"
+ // "th { background-color:#bbb; color:#000 }\n"
+ // "td,th { padding:.25em }\n"
+ "</style>\n";
+
+ ss << table(h, true);
+ //ss << "<pre>\n";
+ int n = 0;
+ OpTime otFirst;
+ OpTime otLast;
+ OpTime otEnd;
+ while( c->more() ) {
+ bo o = c->next();
+ otLast = o["ts"]._opTime();
+ if( otFirst.isNull() )
+ otFirst = otLast;
+ say(ss, o);
+ n++;
+ }
+ if( n == 0 ) {
+ ss << rsoplog << " is empty\n";
+ }
+ else {
+ auto_ptr<DBClientCursor> c = conn.query(rsoplog, Query().sort("$natural",-1), 20, 0, &fields);
+ if( c.get() == 0 ) {
+ ss << "couldn't query [2] " << rsoplog;
+ return;
+ }
+ string x;
+ bo o = c->next();
+ otEnd = o["ts"]._opTime();
+ while( 1 ) {
+ stringstream z;
+ if( o["ts"]._opTime() == otLast )
+ break;
+ say(z, o);
+ x = z.str() + x;
+ if( !c->more() )
+ break;
+ o = c->next();
+ }
+ if( !x.empty() ) {
+ ss << "<tr><td>...</td><td>...</td><td>...</td><td>...</td><td>...</td></tr>\n" << x;
+ //ss << "\n...\n\n" << x;
+ }
+ }
+ ss << _table();
+ ss << p(time_t_to_String_short(time(0)) + " current time");
+
+ if( !otEnd.isNull() ) {
+ ss << "<p>Log length in time: ";
+ unsigned d = otEnd.getSecs() - otFirst.getSecs();
+ double h = d / 3600.0;
+ ss.precision(3);
+ if( h < 72 )
+ ss << h << " hours";
+ else
+ ss << h / 24.0 << " days";
+ ss << "</p>\n";
+ }
+ }
+
+ void ReplSetImpl::_summarizeAsHtml(stringstream& s) const {
+ s << table(0, false);
+ s << tr("Set name:", _name);
+ s << tr("Majority up:", elect.aMajoritySeemsToBeUp()?"yes":"no" );
+ s << _table();
+
+ const char *h[] = {"Member",
+ "<a title=\"member id in the replset config\">id</a>",
+ "Up",
+ "<a title=\"length of time we have been continuously connected to the other member with no reconnects (for self, shows uptime)\">cctime</a>",
+ "<a title=\"when this server last received a heartbeat response - includes error code responses\">Last heartbeat</a>",
+ "Votes", "Priority", "State", "Messages",
+ "<a title=\"how up to date this server is. this value polled every few seconds so actually lag is typically much lower than value shown here.\">optime</a>",
+ "<a title=\"Clock skew in seconds relative to this server. Informational; server clock variances will make the diagnostics hard to read, but otherwise are benign..\">skew</a>",
+ 0
+ };
+ s << table(h);
+
+ /* this is to sort the member rows by their ordinal _id, so they show up in the same
+ order on all the different web ui's; that is less confusing for the operator. */
+ map<int,string> mp;
+
+ string myMinValid;
+ try {
+ readlocktry lk("local.replset.minvalid", 300);
+ if( lk.got() ) {
+ BSONObj mv;
+ if( Helpers::getSingleton("local.replset.minvalid", mv) ) {
+ myMinValid = "minvalid:" + mv["ts"]._opTime().toString();
+ }
+ }
+ else myMinValid = ".";
+ }
+ catch(...) {
+ myMinValid = "exception fetching minvalid";
+ }
+
+ const Member *_self = this->_self;
+ assert(_self);
+ {
+ stringstream s;
+ /* self row */
+ s << tr() << td(_self->fullName() + " (me)") <<
+ td(_self->id()) <<
+ td("1") << //up
+ td(ago(started)) <<
+ td("") << // last heartbeat
+ td(ToString(_self->config().votes)) <<
+ td(ToString(_self->config().priority)) <<
+ td( stateAsHtml(box.getState()) + (_self->config().hidden?" (hidden)":"") );
+ s << td( _hbmsg );
+ stringstream q;
+ q << "/_replSetOplog?_id=" << _self->id();
+ s << td( a(q.str(), myMinValid, theReplSet->lastOpTimeWritten.toString()) );
+ s << td(""); // skew
+ s << _tr();
+ mp[_self->hbinfo().id()] = s.str();
+ }
+ Member *m = head();
+ while( m ) {
+ stringstream s;
+ m->summarizeMember(s);
+ mp[m->hbinfo().id()] = s.str();
+ m = m->next();
+ }
+
+ for( map<int,string>::const_iterator i = mp.begin(); i != mp.end(); i++ )
+ s << i->second;
+ s << _table();
+ }
+
+
+ void fillRsLog(stringstream& s) {
+ _rsLog->toHTML( s );
+ }
+
+ const Member* ReplSetImpl::findById(unsigned id) const {
+ if( _self && id == _self->id() ) return _self;
+
+ for( Member *m = head(); m; m = m->next() )
+ if( m->id() == id )
+ return m;
+ return 0;
+ }
+
+ const OpTime ReplSetImpl::lastOtherOpTime() const {
+ OpTime closest(0,0);
+
+ for( Member *m = _members.head(); m; m=m->next() ) {
+ if (!m->hbinfo().up()) {
+ continue;
+ }
+
+ if (m->hbinfo().opTime > closest) {
+ closest = m->hbinfo().opTime;
+ }
+ }
+
+ return closest;
+ }
+
+ void ReplSetImpl::_summarizeStatus(BSONObjBuilder& b) const {
+ vector<BSONObj> v;
+
+ const Member *_self = this->_self;
+ assert( _self );
+
+ MemberState myState = box.getState();
+
+ // add self
+ {
+ BSONObjBuilder bb;
+ bb.append("_id", (int) _self->id());
+ bb.append("name", _self->fullName());
+ bb.append("health", 1.0);
+ bb.append("state", (int)myState.s);
+ bb.append("stateStr", myState.toString());
+ bb.append("uptime", (unsigned)(time(0) - cmdLine.started));
+ if (!_self->config().arbiterOnly) {
+ bb.appendTimestamp("optime", lastOpTimeWritten.asDate());
+ bb.appendDate("optimeDate", lastOpTimeWritten.getSecs() * 1000LL);
+ }
+
+ int maintenance = _maintenanceMode;
+ if (maintenance) {
+ bb.append("maintenanceMode", maintenance);
+ }
+
+ if (theReplSet) {
+ string s = theReplSet->hbmsg();
+ if( !s.empty() )
+ bb.append("errmsg", s);
+ }
+ bb.append("self", true);
+ v.push_back(bb.obj());
+ }
+
+ Member *m =_members.head();
+ while( m ) {
+ BSONObjBuilder bb;
+ bb.append("_id", (int) m->id());
+ bb.append("name", m->fullName());
+ double h = m->hbinfo().health;
+ bb.append("health", h);
+ bb.append("state", (int) m->state().s);
+ if( h == 0 ) {
+ // if we can't connect the state info is from the past and could be confusing to show
+ bb.append("stateStr", "(not reachable/healthy)");
+ }
+ else {
+ bb.append("stateStr", m->state().toString());
+ }
+ bb.append("uptime", (unsigned) (m->hbinfo().upSince ? (time(0)-m->hbinfo().upSince) : 0));
+ if (!m->config().arbiterOnly) {
+ bb.appendTimestamp("optime", m->hbinfo().opTime.asDate());
+ bb.appendDate("optimeDate", m->hbinfo().opTime.getSecs() * 1000LL);
+ }
+ bb.appendTimeT("lastHeartbeat", m->hbinfo().lastHeartbeat);
+ bb.append("pingMs", m->hbinfo().ping);
+ string s = m->lhb();
+ if( !s.empty() )
+ bb.append("errmsg", s);
+
+ if (m->hbinfo().authIssue) {
+ bb.append("authenticated", false);
+ }
+
+ v.push_back(bb.obj());
+ m = m->next();
+ }
+ sort(v.begin(), v.end());
+ b.append("set", name());
+ b.appendTimeT("date", time(0));
+ b.append("myState", myState.s);
+ const Member *syncTarget = _currentSyncTarget;
+ if (syncTarget && myState != MemberState::RS_PRIMARY) {
+ b.append("syncingTo", syncTarget->fullName());
+ }
+ b.append("members", v);
+ if( replSetBlind )
+ b.append("blind",true); // to avoid confusion if set...normally never set except for testing.
+ }
+
+ static struct Test : public UnitTest {
+ void run() {
+ HealthOptions a,b;
+ assert( a == b );
+ assert( a.isDefault() );
+ }
+ } test;
+
+}
diff --git a/src/mongo/db/repl/health.h b/src/mongo/db/repl/health.h
new file mode 100644
index 00000000000..55cca93a27e
--- /dev/null
+++ b/src/mongo/db/repl/health.h
@@ -0,0 +1,50 @@
+// replset.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+namespace mongo {
+
+ /* throws */
+ bool requestHeartbeat(string setname, string fromHost, string memberFullName, BSONObj& result, int myConfigVersion, int& theirConfigVersion, bool checkEmpty = false);
+
+ struct HealthOptions {
+ HealthOptions() :
+ heartbeatSleepMillis(2000),
+ heartbeatTimeoutMillis( 10000 ),
+ heartbeatConnRetries(2)
+ { }
+
+ bool isDefault() const { return *this == HealthOptions(); }
+
+ // see http://www.mongodb.org/display/DOCS/Replica+Set+Internals
+ unsigned heartbeatSleepMillis;
+ unsigned heartbeatTimeoutMillis;
+ unsigned heartbeatConnRetries ;
+
+ void check() {
+ uassert(13112, "bad replset heartbeat option", heartbeatSleepMillis >= 10);
+ uassert(13113, "bad replset heartbeat option", heartbeatTimeoutMillis >= 10);
+ }
+
+ bool operator==(const HealthOptions& r) const {
+ return heartbeatSleepMillis==r.heartbeatSleepMillis && heartbeatTimeoutMillis==r.heartbeatTimeoutMillis && heartbeatConnRetries==r.heartbeatConnRetries;
+ }
+ };
+
+}
diff --git a/src/mongo/db/repl/heartbeat.cpp b/src/mongo/db/repl/heartbeat.cpp
new file mode 100644
index 00000000000..331812af85a
--- /dev/null
+++ b/src/mongo/db/repl/heartbeat.cpp
@@ -0,0 +1,382 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,b
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "rs.h"
+#include "health.h"
+#include "../../util/background.h"
+#include "../../client/dbclient.h"
+#include "../commands.h"
+#include "../../util/concurrency/value.h"
+#include "../../util/concurrency/task.h"
+#include "../../util/concurrency/msg.h"
+#include "../../util/mongoutils/html.h"
+#include "../../util/goodies.h"
+#include "../../util/ramlog.h"
+#include "../helpers/dblogger.h"
+#include "connections.h"
+#include "../../util/unittest.h"
+#include "../instance.h"
+#include "../repl.h"
+
+namespace mongo {
+
+ using namespace bson;
+
+ extern bool replSetBlind;
+ extern ReplSettings replSettings;
+
+ unsigned int HeartbeatInfo::numPings;
+
+ long long HeartbeatInfo::timeDown() const {
+ if( up() ) return 0;
+ if( downSince == 0 )
+ return 0; // still waiting on first heartbeat
+ return jsTime() - downSince;
+ }
+
+ /* { replSetHeartbeat : <setname> } */
+ class CmdReplSetHeartbeat : public ReplSetCommand {
+ public:
+ CmdReplSetHeartbeat() : ReplSetCommand("replSetHeartbeat") { }
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if( replSetBlind ) {
+ if (theReplSet) {
+ errmsg = str::stream() << theReplSet->selfFullName() << " is blind";
+ }
+ return false;
+ }
+
+ /* we don't call ReplSetCommand::check() here because heartbeat
+ checks many things that are pre-initialization. */
+ if( !replSet ) {
+ errmsg = "not running with --replSet";
+ return false;
+ }
+
+ if (!checkAuth(errmsg, result)) {
+ return false;
+ }
+
+ /* we want to keep heartbeat connections open when relinquishing primary. tag them here. */
+ {
+ AbstractMessagingPort *mp = cc().port();
+ if( mp )
+ mp->tag |= 1;
+ }
+
+ if( cmdObj["pv"].Int() != 1 ) {
+ errmsg = "incompatible replset protocol version";
+ return false;
+ }
+ {
+ string s = string(cmdObj.getStringField("replSetHeartbeat"));
+ if( cmdLine.ourSetName() != s ) {
+ errmsg = "repl set names do not match";
+ log() << "replSet set names do not match, our cmdline: " << cmdLine._replSet << rsLog;
+ log() << "replSet s: " << s << rsLog;
+ result.append("mismatch", true);
+ return false;
+ }
+ }
+
+ result.append("rs", true);
+ if( cmdObj["checkEmpty"].trueValue() ) {
+ result.append("hasData", replHasDatabases());
+ }
+ if( theReplSet == 0 ) {
+ string from( cmdObj.getStringField("from") );
+ if( !from.empty() ) {
+ scoped_lock lck( replSettings.discoveredSeeds_mx );
+ replSettings.discoveredSeeds.insert(from);
+ }
+ result.append("hbmsg", "still initializing");
+ return true;
+ }
+
+ if( theReplSet->name() != cmdObj.getStringField("replSetHeartbeat") ) {
+ errmsg = "repl set names do not match (2)";
+ result.append("mismatch", true);
+ return false;
+ }
+ result.append("set", theReplSet->name());
+ result.append("state", theReplSet->state().s);
+ result.append("e", theReplSet->iAmElectable());
+ result.append("hbmsg", theReplSet->hbmsg());
+ result.append("time", (long long) time(0));
+ result.appendDate("opTime", theReplSet->lastOpTimeWritten.asDate());
+ int v = theReplSet->config().version;
+ result.append("v", v);
+ if( v > cmdObj["v"].Int() )
+ result << "config" << theReplSet->config().asBson();
+
+ return true;
+ }
+ } cmdReplSetHeartbeat;
+
+ bool requestHeartbeat(string setName, string from, string memberFullName, BSONObj& result,
+ int myCfgVersion, int& theirCfgVersion, bool checkEmpty) {
+ if( replSetBlind ) {
+ return false;
+ }
+
+ BSONObj cmd = BSON( "replSetHeartbeat" << setName <<
+ "v" << myCfgVersion <<
+ "pv" << 1 <<
+ "checkEmpty" << checkEmpty <<
+ "from" << from );
+
+ // generally not a great idea to do outbound waiting calls in a
+ // write lock. heartbeats can be slow (multisecond to respond), so
+ // generally we don't want to be locked, at least not without
+ // thinking acarefully about it first.
+ uassert(15900, "can't heartbeat: too much lock",
+ !d.dbMutex.isWriteLocked() || theReplSet == 0 || !theReplSet->lockedByMe() );
+
+ ScopedConn conn(memberFullName);
+ return conn.runCommand("admin", cmd, result, 0);
+ }
+
+ /**
+ * Poll every other set member to check its status.
+ *
+ * A detail about local machines and authentication: suppose we have 2
+ * members, A and B, on the same machine using different keyFiles. A is
+ * primary. If we're just starting the set, there are no admin users, so A
+ * and B can access each other because it's local access.
+ *
+ * Then we add a user to A. B cannot sync this user from A, because as soon
+ * as we add a an admin user, A requires auth. However, A can still
+ * heartbeat B, because B *doesn't* have an admin user. So A can reach B
+ * but B cannot reach A.
+ *
+ * Once B is restarted with the correct keyFile, everything should work as
+ * expected.
+ */
+ class ReplSetHealthPollTask : public task::Task {
+ private:
+ HostAndPort h;
+ HeartbeatInfo m;
+ int tries;
+ const int threshold;
+ public:
+ ReplSetHealthPollTask(const HostAndPort& hh, const HeartbeatInfo& mm)
+ : h(hh), m(mm), tries(0), threshold(15) { }
+
+ string name() const { return "rsHealthPoll"; }
+ void doWork() {
+ if ( !theReplSet ) {
+ LOG(2) << "replSet not initialized yet, skipping health poll this round" << rsLog;
+ return;
+ }
+
+ HeartbeatInfo mem = m;
+ HeartbeatInfo old = mem;
+ try {
+ BSONObj info;
+ int theirConfigVersion = -10000;
+
+ bool ok = _requestHeartbeat(mem, info, theirConfigVersion);
+
+ // weight new ping with old pings
+ // on the first ping, just use the ping value
+ if (old.ping != 0) {
+ mem.ping = (unsigned int)((old.ping * .8) + (mem.ping * .2));
+ }
+
+ if( ok ) {
+ up(info, mem);
+ }
+ else if (!info["errmsg"].eoo() &&
+ info["errmsg"].str() == "need to login") {
+ authIssue(mem);
+ }
+ else {
+ down(mem, info.getStringField("errmsg"));
+ }
+ }
+ catch(DBException& e) {
+ down(mem, e.what());
+ }
+ catch(...) {
+ down(mem, "replSet unexpected exception in ReplSetHealthPollTask");
+ }
+ m = mem;
+
+ theReplSet->mgr->send( boost::bind(&ReplSet::msgUpdateHBInfo, theReplSet, mem) );
+
+ static time_t last = 0;
+ time_t now = time(0);
+ bool changed = mem.changed(old);
+ if( changed ) {
+ if( old.hbstate != mem.hbstate )
+ log() << "replSet member " << h.toString() << " is now in state " << mem.hbstate.toString() << rsLog;
+ }
+ if( changed || now-last>4 ) {
+ last = now;
+ theReplSet->mgr->send( boost::bind(&Manager::msgCheckNewState, theReplSet->mgr) );
+ }
+ }
+
+ private:
+ bool _requestHeartbeat(HeartbeatInfo& mem, BSONObj& info, int& theirConfigVersion) {
+ if (tries++ % threshold == (threshold - 1)) {
+ ScopedConn conn(h.toString());
+ conn.reconnect();
+ }
+
+ Timer timer;
+ time_t before = curTimeMicros64() / 1000000;
+
+ bool ok = requestHeartbeat(theReplSet->name(), theReplSet->selfFullName(),
+ h.toString(), info, theReplSet->config().version, theirConfigVersion);
+
+ mem.ping = (unsigned int)timer.millis();
+
+ // we set this on any response - we don't get this far if
+ // couldn't connect because exception is thrown
+ time_t after = mem.lastHeartbeat = before + (mem.ping / 1000);
+
+ if ( info["time"].isNumber() ) {
+ long long t = info["time"].numberLong();
+ if( t > after )
+ mem.skew = (int) (t - after);
+ else if( t < before )
+ mem.skew = (int) (t - before); // negative
+ }
+ else {
+ // it won't be there if remote hasn't initialized yet
+ if( info.hasElement("time") )
+ warning() << "heatbeat.time isn't a number: " << info << endl;
+ mem.skew = INT_MIN;
+ }
+
+ {
+ be state = info["state"];
+ if( state.ok() )
+ mem.hbstate = MemberState(state.Int());
+ }
+
+ return ok;
+ }
+
+ void authIssue(HeartbeatInfo& mem) {
+ mem.authIssue = true;
+ mem.hbstate = MemberState::RS_UNKNOWN;
+
+ // set health to 0 so that this doesn't count towards majority
+ mem.health = 0.0;
+ theReplSet->rmFromElectable(mem.id());
+ }
+
+ void down(HeartbeatInfo& mem, string msg) {
+ mem.authIssue = false;
+ mem.health = 0.0;
+ mem.ping = 0;
+ if( mem.upSince || mem.downSince == 0 ) {
+ mem.upSince = 0;
+ mem.downSince = jsTime();
+ mem.hbstate = MemberState::RS_DOWN;
+ log() << "replSet info " << h.toString() << " is down (or slow to respond): " << msg << rsLog;
+ }
+ mem.lastHeartbeatMsg = msg;
+ theReplSet->rmFromElectable(mem.id());
+ }
+
+ void up(const BSONObj& info, HeartbeatInfo& mem) {
+ HeartbeatInfo::numPings++;
+ mem.authIssue = false;
+
+ if( mem.upSince == 0 ) {
+ log() << "replSet member " << h.toString() << " is up" << rsLog;
+ mem.upSince = mem.lastHeartbeat;
+ }
+ mem.health = 1.0;
+ mem.lastHeartbeatMsg = info["hbmsg"].String();
+ if( info.hasElement("opTime") )
+ mem.opTime = info["opTime"].Date();
+
+ // see if this member is in the electable set
+ if( info["e"].eoo() ) {
+ // for backwards compatibility
+ const Member *member = theReplSet->findById(mem.id());
+ if (member && member->config().potentiallyHot()) {
+ theReplSet->addToElectable(mem.id());
+ }
+ else {
+ theReplSet->rmFromElectable(mem.id());
+ }
+ }
+ // add this server to the electable set if it is within 10
+ // seconds of the latest optime we know of
+ else if( info["e"].trueValue() &&
+ mem.opTime >= theReplSet->lastOpTimeWritten.getSecs() - 10) {
+ unsigned lastOp = theReplSet->lastOtherOpTime().getSecs();
+ if (lastOp > 0 && mem.opTime >= lastOp - 10) {
+ theReplSet->addToElectable(mem.id());
+ }
+ }
+ else {
+ theReplSet->rmFromElectable(mem.id());
+ }
+
+ be cfg = info["config"];
+ if( cfg.ok() ) {
+ // received a new config
+ boost::function<void()> f =
+ boost::bind(&Manager::msgReceivedNewConfig, theReplSet->mgr, cfg.Obj().copy());
+ theReplSet->mgr->send(f);
+ }
+ }
+ };
+
+ void ReplSetImpl::endOldHealthTasks() {
+ unsigned sz = healthTasks.size();
+ for( set<ReplSetHealthPollTask*>::iterator i = healthTasks.begin(); i != healthTasks.end(); i++ )
+ (*i)->halt();
+ healthTasks.clear();
+ if( sz )
+ DEV log() << "replSet debug: cleared old tasks " << sz << endl;
+ }
+
+ void ReplSetImpl::startHealthTaskFor(Member *m) {
+ ReplSetHealthPollTask *task = new ReplSetHealthPollTask(m->h(), m->hbinfo());
+ healthTasks.insert(task);
+ task::repeat(task, 2000);
+ }
+
+ void startSyncThread();
+
+ /** called during repl set startup. caller expects it to return fairly quickly.
+ note ReplSet object is only created once we get a config - so this won't run
+ until the initiation.
+ */
+ void ReplSetImpl::startThreads() {
+ task::fork(mgr);
+ mgr->send( boost::bind(&Manager::msgCheckNewState, theReplSet->mgr) );
+
+ boost::thread t(startSyncThread);
+
+ task::fork(ghost);
+
+ // member heartbeats are started in ReplSetImpl::initFromConfig
+ }
+
+}
+
+/* todo:
+ stop bg job and delete on removefromset
+*/
diff --git a/src/mongo/db/repl/manager.cpp b/src/mongo/db/repl/manager.cpp
new file mode 100644
index 00000000000..91648a1b506
--- /dev/null
+++ b/src/mongo/db/repl/manager.cpp
@@ -0,0 +1,274 @@
+/* @file manager.cpp
+*/
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,b
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "rs.h"
+#include "connections.h"
+#include "../client.h"
+
+namespace mongo {
+
+ enum {
+ NOPRIMARY = -2,
+ SELFPRIMARY = -1
+ };
+
+ /* check members OTHER THAN US to see if they think they are primary */
+ const Member * Manager::findOtherPrimary(bool& two) {
+ two = false;
+ Member *m = rs->head();
+ Member *p = 0;
+ while( m ) {
+ DEV assert( m != rs->_self );
+ if( m->state().primary() && m->hbinfo().up() ) {
+ if( p ) {
+ two = true;
+ return 0;
+ }
+ p = m;
+ }
+ m = m->next();
+ }
+ if( p )
+ noteARemoteIsPrimary(p);
+ return p;
+ }
+
+ Manager::Manager(ReplSetImpl *_rs) :
+ task::Server("rsMgr"), rs(_rs), busyWithElectSelf(false), _primary(NOPRIMARY) {
+ }
+
+ Manager::~Manager() {
+ /* we don't destroy the replset object we sit in; however, the destructor could have thrown on init.
+ the log message below is just a reminder to come back one day and review this code more, and to
+ make it cleaner.
+ */
+ log() << "info: ~Manager called" << rsLog;
+ rs->mgr = 0;
+ }
+
+ void Manager::starting() {
+ Client::initThread("rsMgr");
+ replLocalAuth();
+ }
+
+ void Manager::noteARemoteIsPrimary(const Member *m) {
+ if( rs->box.getPrimary() == m )
+ return;
+ rs->_self->lhb() = "";
+ if( rs->iAmArbiterOnly() ) {
+ rs->box.set(MemberState::RS_ARBITER, m);
+ }
+ else {
+ rs->box.noteRemoteIsPrimary(m);
+ }
+ }
+
+ void Manager::checkElectableSet() {
+ unsigned otherOp = rs->lastOtherOpTime().getSecs();
+
+ // make sure the electable set is up-to-date
+ if (rs->elect.aMajoritySeemsToBeUp() &&
+ rs->iAmPotentiallyHot() &&
+ (otherOp == 0 || rs->lastOpTimeWritten.getSecs() >= otherOp - 10)) {
+ theReplSet->addToElectable(rs->selfId());
+ }
+ else {
+ theReplSet->rmFromElectable(rs->selfId());
+ }
+
+ // check if we should ask the primary (possibly ourselves) to step down
+ const Member *highestPriority = theReplSet->getMostElectable();
+ const Member *primary = rs->box.getPrimary();
+
+ if (primary && highestPriority &&
+ highestPriority->config().priority > primary->config().priority) {
+ log() << "stepping down " << primary->fullName() << endl;
+
+ if (primary->h().isSelf()) {
+ // replSetStepDown tries to acquire the same lock
+ // msgCheckNewState takes, so we can't call replSetStepDown on
+ // ourselves.
+ rs->relinquish();
+ }
+ else {
+ BSONObj cmd = BSON( "replSetStepDown" << 1 );
+ ScopedConn conn(primary->fullName());
+ BSONObj result;
+ if (!conn.runCommand("admin", cmd, result, 0)) {
+ log() << "stepping down " << primary->fullName()
+ << " failed: " << result << endl;
+ }
+ }
+ }
+ }
+
+ void Manager::checkAuth() {
+ int down = 0, authIssue = 0, total = 0;
+
+ for( Member *m = rs->head(); m; m=m->next() ) {
+ total++;
+
+ // all authIssue servers will also be not up
+ if (!m->hbinfo().up()) {
+ down++;
+ if (m->hbinfo().authIssue) {
+ authIssue++;
+ }
+ }
+ }
+
+ // if all nodes are down or failed auth AND at least one failed
+ // auth, go into recovering. If all nodes are down, stay a
+ // secondary.
+ if (authIssue > 0 && down == total) {
+ log() << "replset error could not reach/authenticate against any members" << endl;
+
+ if (rs->box.getPrimary() == rs->_self) {
+ log() << "auth problems, relinquishing primary" << rsLog;
+ rs->relinquish();
+ }
+
+ rs->blockSync(true);
+ }
+ else {
+ rs->blockSync(false);
+ }
+ }
+
+ /** called as the health threads get new results */
+ void Manager::msgCheckNewState() {
+ {
+ theReplSet->assertValid();
+ rs->assertValid();
+
+ RSBase::lock lk(rs);
+
+ if( busyWithElectSelf ) return;
+
+ checkElectableSet();
+ checkAuth();
+
+ const Member *p = rs->box.getPrimary();
+ if( p && p != rs->_self ) {
+ if( !p->hbinfo().up() ||
+ !p->hbinfo().hbstate.primary() ) {
+ p = 0;
+ rs->box.setOtherPrimary(0);
+ }
+ }
+
+ const Member *p2;
+ {
+ bool two;
+ p2 = findOtherPrimary(two);
+ if( two ) {
+ /* two other nodes think they are primary (asynchronously polled) -- wait for things to settle down. */
+ log() << "replSet info two primaries (transiently)" << rsLog;
+ return;
+ }
+ }
+
+ if( p2 ) {
+ /* someone else thinks they are primary. */
+ if( p == p2 ) {
+ // we thought the same; all set.
+ return;
+ }
+ if( p == 0 ) {
+ noteARemoteIsPrimary(p2);
+ return;
+ }
+ // todo xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ if( p != rs->_self ) {
+ // switch primary from oldremotep->newremotep2
+ noteARemoteIsPrimary(p2);
+ return;
+ }
+ /* we thought we were primary, yet now someone else thinks they are. */
+ if( !rs->elect.aMajoritySeemsToBeUp() ) {
+ /* we can't see a majority. so the other node is probably the right choice. */
+ noteARemoteIsPrimary(p2);
+ return;
+ }
+ /* ignore for now, keep thinking we are master.
+ this could just be timing (we poll every couple seconds) or could indicate
+ a problem? if it happens consistently for a duration of time we should
+ alert the sysadmin.
+ */
+ return;
+ }
+
+ /* didn't find anyone who wants to be primary */
+
+ if( p ) {
+ /* we are already primary */
+
+ if( p != rs->_self ) {
+ rs->sethbmsg("error p != rs->self in checkNewState");
+ log() << "replSet " << p->fullName() << rsLog;
+ log() << "replSet " << rs->_self->fullName() << rsLog;
+ return;
+ }
+
+ if( rs->elect.shouldRelinquish() ) {
+ log() << "can't see a majority of the set, relinquishing primary" << rsLog;
+ rs->relinquish();
+ }
+
+ return;
+ }
+
+ if( !rs->iAmPotentiallyHot() ) { // if not we never try to be primary
+ OCCASIONALLY log() << "replSet I don't see a primary and I can't elect myself" << endl;
+ return;
+ }
+
+ /* no one seems to be primary. shall we try to elect ourself? */
+ if( !rs->elect.aMajoritySeemsToBeUp() ) {
+ static time_t last;
+ static int n;
+ int ll = 0;
+ if( ++n > 5 ) ll++;
+ if( last + 60 > time(0 ) ) ll++;
+ log(ll) << "replSet can't see a majority, will not try to elect self" << rsLog;
+ last = time(0);
+ return;
+ }
+
+ if( !rs->iAmElectable() ) {
+ return;
+ }
+
+ busyWithElectSelf = true; // don't try to do further elections & such while we are already working on one.
+ }
+ try {
+ rs->elect.electSelf();
+ }
+ catch(RetryAfterSleepException&) {
+ /* we want to process new inbounds before trying this again. so we just put a checkNewstate in the queue for eval later. */
+ requeue();
+ }
+ catch(...) {
+ log() << "replSet error unexpected assertion in rs manager" << rsLog;
+ }
+ busyWithElectSelf = false;
+ }
+
+}
diff --git a/src/mongo/db/repl/multicmd.h b/src/mongo/db/repl/multicmd.h
new file mode 100644
index 00000000000..2d70c551f64
--- /dev/null
+++ b/src/mongo/db/repl/multicmd.h
@@ -0,0 +1,75 @@
+// @file multicmd.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../../util/background.h"
+#include "connections.h"
+
+namespace mongo {
+
+ struct Target {
+ Target(string hostport) : toHost(hostport), ok(false) { }
+ //Target() : ok(false) { }
+ const string toHost;
+ bool ok;
+ BSONObj result;
+ };
+
+ /** send a command to several servers in parallel. waits for all to complete before
+ returning.
+
+ in: Target::toHost
+ out: Target::result and Target::ok
+ */
+ void multiCommand(BSONObj cmd, list<Target>& L);
+
+ class _MultiCommandJob : public BackgroundJob {
+ public:
+ BSONObj& cmd;
+ Target& d;
+ _MultiCommandJob(BSONObj& _cmd, Target& _d) : cmd(_cmd), d(_d) { }
+
+ private:
+ string name() const { return "MultiCommandJob"; }
+ void run() {
+ try {
+ ScopedConn c(d.toHost);
+ d.ok = c.runCommand("admin", cmd, d.result);
+ }
+ catch(DBException&) {
+ DEV log() << "dev caught dbexception on multiCommand " << d.toHost << rsLog;
+ }
+ }
+ };
+
+ inline void multiCommand(BSONObj cmd, list<Target>& L) {
+ list< shared_ptr<BackgroundJob> > jobs;
+
+ for( list<Target>::iterator i = L.begin(); i != L.end(); i++ ) {
+ Target& d = *i;
+ _MultiCommandJob *j = new _MultiCommandJob(cmd, d);
+ jobs.push_back( shared_ptr<BackgroundJob>(j) );
+ j->go();
+ }
+
+ for( list< shared_ptr<BackgroundJob> >::iterator i = jobs.begin(); i != jobs.end(); i++ ) {
+ (*i)->wait();
+ }
+ }
+}
diff --git a/src/mongo/db/repl/replset_commands.cpp b/src/mongo/db/repl/replset_commands.cpp
new file mode 100644
index 00000000000..84f16e53466
--- /dev/null
+++ b/src/mongo/db/repl/replset_commands.cpp
@@ -0,0 +1,404 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../cmdline.h"
+#include "../commands.h"
+#include "../repl.h"
+#include "health.h"
+#include "rs.h"
+#include "rs_config.h"
+#include "../dbwebserver.h"
+#include "../../util/mongoutils/html.h"
+#include "../../client/dbclient.h"
+#include "../repl_block.h"
+
+using namespace bson;
+
+namespace mongo {
+
+ void checkMembersUpForConfigChange(const ReplSetConfig& cfg, BSONObjBuilder& result, bool initial);
+
+ /* commands in other files:
+ replSetHeartbeat - health.cpp
+ replSetInitiate - rs_mod.cpp
+ */
+
+ bool replSetBlind = false;
+ unsigned replSetForceInitialSyncFailure = 0;
+
+ class CmdReplSetTest : public ReplSetCommand {
+ public:
+ virtual void help( stringstream &help ) const {
+ help << "Just for regression tests.\n";
+ }
+ CmdReplSetTest() : ReplSetCommand("replSetTest") { }
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ log() << "replSet replSetTest command received: " << cmdObj.toString() << rsLog;
+
+ if (!checkAuth(errmsg, result)) {
+ return false;
+ }
+
+ if( cmdObj.hasElement("forceInitialSyncFailure") ) {
+ replSetForceInitialSyncFailure = (unsigned) cmdObj["forceInitialSyncFailure"].Number();
+ return true;
+ }
+
+ if( !check(errmsg, result) )
+ return false;
+
+ if( cmdObj.hasElement("blind") ) {
+ replSetBlind = cmdObj.getBoolField("blind");
+ return true;
+ }
+
+ if (cmdObj.hasElement("sethbmsg")) {
+ replset::sethbmsg(cmdObj["sethbmsg"].String());
+ return true;
+ }
+
+ return false;
+ }
+ } cmdReplSetTest;
+
+ /** get rollback id. used to check if a rollback happened during some interval of time.
+ as consumed, the rollback id is not in any particular order, it simply changes on each rollback.
+ @see incRBID()
+ */
+ class CmdReplSetGetRBID : public ReplSetCommand {
+ public:
+ /* todo: ideally this should only change on rollbacks NOT on mongod restarts also. fix... */
+ int rbid;
+ virtual void help( stringstream &help ) const {
+ help << "internal";
+ }
+ CmdReplSetGetRBID() : ReplSetCommand("replSetGetRBID") {
+ // this is ok but micros or combo with some rand() and/or 64 bits might be better --
+ // imagine a restart and a clock correction simultaneously (very unlikely but possible...)
+ rbid = (int) curTimeMillis64();
+ }
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if( !check(errmsg, result) )
+ return false;
+ result.append("rbid",rbid);
+ return true;
+ }
+ } cmdReplSetRBID;
+
+ /** we increment the rollback id on every rollback event. */
+ void incRBID() {
+ cmdReplSetRBID.rbid++;
+ }
+
+ /** helper to get rollback id from another server. */
+ int getRBID(DBClientConnection *c) {
+ bo info;
+ c->simpleCommand("admin", &info, "replSetGetRBID");
+ return info["rbid"].numberInt();
+ }
+
+ class CmdReplSetGetStatus : public ReplSetCommand {
+ public:
+ virtual void help( stringstream &help ) const {
+ help << "Report status of a replica set from the POV of this server\n";
+ help << "{ replSetGetStatus : 1 }";
+ help << "\nhttp://www.mongodb.org/display/DOCS/Replica+Set+Commands";
+ }
+ CmdReplSetGetStatus() : ReplSetCommand("replSetGetStatus", true) { }
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if ( cmdObj["forShell"].trueValue() )
+ lastError.disableForCommand();
+
+ if( !check(errmsg, result) )
+ return false;
+ theReplSet->summarizeStatus(result);
+ return true;
+ }
+ } cmdReplSetGetStatus;
+
+ class CmdReplSetReconfig : public ReplSetCommand {
+ RWLock mutex; /* we don't need rw but we wanted try capability. :-( */
+ public:
+ virtual void help( stringstream &help ) const {
+ help << "Adjust configuration of a replica set\n";
+ help << "{ replSetReconfig : config_object }";
+ help << "\nhttp://www.mongodb.org/display/DOCS/Replica+Set+Commands";
+ }
+ CmdReplSetReconfig() : ReplSetCommand("replSetReconfig"), mutex("rsreconfig") { }
+ virtual bool run(const string& a, BSONObj& b, int e, string& errmsg, BSONObjBuilder& c, bool d) {
+ try {
+ rwlock_try_write lk(mutex);
+ return _run(a,b,e,errmsg,c,d);
+ }
+ catch(rwlock_try_write::exception&) { }
+ errmsg = "a replSetReconfig is already in progress";
+ return false;
+ }
+ private:
+ bool _run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if ( !checkAuth(errmsg, result) ) {
+ return false;
+ }
+
+ if( cmdObj["replSetReconfig"].type() != Object ) {
+ errmsg = "no configuration specified";
+ return false;
+ }
+
+ bool force = cmdObj.hasField("force") && cmdObj["force"].trueValue();
+ if( force && !theReplSet ) {
+ replSettings.reconfig = cmdObj["replSetReconfig"].Obj().getOwned();
+ result.append("msg", "will try this config momentarily, try running rs.conf() again in a few seconds");
+ return true;
+ }
+
+ if ( !check(errmsg, result) ) {
+ return false;
+ }
+
+ if( !force && !theReplSet->box.getState().primary() ) {
+ errmsg = "replSetReconfig command must be sent to the current replica set primary.";
+ return false;
+ }
+
+ {
+ // just make sure we can get a write lock before doing anything else. we'll reacquire one
+ // later. of course it could be stuck then, but this check lowers the risk if weird things
+ // are up - we probably don't want a change to apply 30 minutes after the initial attempt.
+ time_t t = time(0);
+ writelock lk("");
+ if( time(0)-t > 20 ) {
+ errmsg = "took a long time to get write lock, so not initiating. Initiate when server less busy?";
+ return false;
+ }
+ }
+
+ try {
+ ReplSetConfig newConfig(cmdObj["replSetReconfig"].Obj(), force);
+
+ log() << "replSet replSetReconfig config object parses ok, " << newConfig.members.size() << " members specified" << rsLog;
+
+ if( !ReplSetConfig::legalChange(theReplSet->getConfig(), newConfig, errmsg) ) {
+ return false;
+ }
+
+ checkMembersUpForConfigChange(newConfig, result, false);
+
+ log() << "replSet replSetReconfig [2]" << rsLog;
+
+ theReplSet->haveNewConfig(newConfig, true);
+ ReplSet::startupStatusMsg.set("replSetReconfig'd");
+ }
+ catch( DBException& e ) {
+ log() << "replSet replSetReconfig exception: " << e.what() << rsLog;
+ throw;
+ }
+ catch( string& se ) {
+ log() << "replSet reconfig exception: " << se << rsLog;
+ errmsg = se;
+ return false;
+ }
+
+ resetSlaveCache();
+ return true;
+ }
+ } cmdReplSetReconfig;
+
+ class CmdReplSetFreeze : public ReplSetCommand {
+ public:
+ virtual void help( stringstream &help ) const {
+ help << "{ replSetFreeze : <seconds> }";
+ help << "'freeze' state of member to the extent we can do that. What this really means is that\n";
+ help << "this node will not attempt to become primary until the time period specified expires.\n";
+ help << "You can call again with {replSetFreeze:0} to unfreeze sooner.\n";
+ help << "A process restart unfreezes the member also.\n";
+ help << "\nhttp://www.mongodb.org/display/DOCS/Replica+Set+Commands";
+ }
+
+ CmdReplSetFreeze() : ReplSetCommand("replSetFreeze") { }
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if( !check(errmsg, result) )
+ return false;
+ int secs = (int) cmdObj.firstElement().numberInt();
+ if( theReplSet->freeze(secs) ) {
+ if( secs == 0 )
+ result.append("info","unfreezing");
+ }
+ if( secs == 1 )
+ result.append("warning", "you really want to freeze for only 1 second?");
+ return true;
+ }
+ } cmdReplSetFreeze;
+
+ class CmdReplSetStepDown: public ReplSetCommand {
+ public:
+ virtual void help( stringstream &help ) const {
+ help << "{ replSetStepDown : <seconds> }\n";
+ help << "Step down as primary. Will not try to reelect self for the specified time period (1 minute if no numeric secs value specified).\n";
+ help << "(If another member with same priority takes over in the meantime, it will stay primary.)\n";
+ help << "http://www.mongodb.org/display/DOCS/Replica+Set+Commands";
+ }
+
+ CmdReplSetStepDown() : ReplSetCommand("replSetStepDown") { }
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if( !check(errmsg, result) )
+ return false;
+ if( !theReplSet->box.getState().primary() ) {
+ errmsg = "not primary so can't step down";
+ return false;
+ }
+
+ bool force = cmdObj.hasField("force") && cmdObj["force"].trueValue();
+
+ // only step down if there is another node synced to within 10
+ // seconds of this node
+ if (!force) {
+ long long int lastOp = (long long int)theReplSet->lastOpTimeWritten.getSecs();
+ long long int closest = (long long int)theReplSet->lastOtherOpTime().getSecs();
+
+ long long int diff = lastOp - closest;
+ result.append("closest", closest);
+ result.append("difference", diff);
+
+ if (diff < 0) {
+ // not our problem, but we'll wait until thing settle down
+ errmsg = "someone is ahead of the primary?";
+ return false;
+ }
+
+ if (diff > 10) {
+ errmsg = "no secondaries within 10 seconds of my optime";
+ return false;
+ }
+ }
+
+ int secs = (int) cmdObj.firstElement().numberInt();
+ if( secs == 0 )
+ secs = 60;
+ return theReplSet->stepDown(secs);
+ }
+ } cmdReplSetStepDown;
+
+ class CmdReplSetMaintenance: public ReplSetCommand {
+ public:
+ virtual void help( stringstream &help ) const {
+ help << "{ replSetMaintenance : bool }\n";
+ help << "Enable or disable maintenance mode.";
+ }
+
+ CmdReplSetMaintenance() : ReplSetCommand("replSetMaintenance") { }
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ if( !check(errmsg, result) )
+ return false;
+ if( theReplSet->box.getState().primary() ) {
+ errmsg = "primaries can't modify maintenance mode";
+ return false;
+ }
+
+ theReplSet->setMaintenanceMode(cmdObj["replSetMaintenance"].trueValue());
+ return true;
+ }
+ } cmdReplSetMaintenance;
+
+ using namespace bson;
+ using namespace mongoutils::html;
+ extern void fillRsLog(stringstream&);
+
+ class ReplSetHandler : public DbWebHandler {
+ public:
+ ReplSetHandler() : DbWebHandler( "_replSet" , 1 , true ) {}
+
+ virtual bool handles( const string& url ) const {
+ return startsWith( url , "/_replSet" );
+ }
+
+ virtual void handle( const char *rq, string url, BSONObj params,
+ string& responseMsg, int& responseCode,
+ vector<string>& headers, const SockAddr &from ) {
+
+ if( url == "/_replSetOplog" ) {
+ responseMsg = _replSetOplog(params);
+ }
+ else
+ responseMsg = _replSet();
+ responseCode = 200;
+ }
+
+ string _replSetOplog(bo parms) {
+ int _id = (int) str::toUnsigned( parms["_id"].String() );
+
+ stringstream s;
+ string t = "Replication oplog";
+ s << start(t);
+ s << p(t);
+
+ if( theReplSet == 0 ) {
+ if( cmdLine._replSet.empty() )
+ s << p("Not using --replSet");
+ else {
+ s << p("Still starting up, or else set is not yet " + a("http://www.mongodb.org/display/DOCS/Replica+Set+Configuration#InitialSetup", "", "initiated")
+ + ".<br>" + ReplSet::startupStatusMsg.get());
+ }
+ }
+ else {
+ try {
+ theReplSet->getOplogDiagsAsHtml(_id, s);
+ }
+ catch(std::exception& e) {
+ s << "error querying oplog: " << e.what() << '\n';
+ }
+ }
+
+ s << _end();
+ return s.str();
+ }
+
+ /* /_replSet show replica set status in html format */
+ string _replSet() {
+ stringstream s;
+ s << start("Replica Set Status " + prettyHostName());
+ s << p( a("/", "back", "Home") + " | " +
+ a("/local/system.replset/?html=1", "", "View Replset Config") + " | " +
+ a("/replSetGetStatus?text=1", "", "replSetGetStatus") + " | " +
+ a("http://www.mongodb.org/display/DOCS/Replica+Sets", "", "Docs")
+ );
+
+ if( theReplSet == 0 ) {
+ if( cmdLine._replSet.empty() )
+ s << p("Not using --replSet");
+ else {
+ s << p("Still starting up, or else set is not yet " + a("http://www.mongodb.org/display/DOCS/Replica+Set+Configuration#InitialSetup", "", "initiated")
+ + ".<br>" + ReplSet::startupStatusMsg.get());
+ }
+ }
+ else {
+ try {
+ theReplSet->summarizeAsHtml(s);
+ }
+ catch(...) { s << "error summarizing replset status\n"; }
+ }
+ s << p("Recent replset log activity:");
+ fillRsLog(s);
+ s << _end();
+ return s.str();
+ }
+
+
+
+ } replSetHandler;
+
+}
diff --git a/src/mongo/db/repl/rs.cpp b/src/mongo/db/repl/rs.cpp
new file mode 100644
index 00000000000..fff5d72bcc0
--- /dev/null
+++ b/src/mongo/db/repl/rs.cpp
@@ -0,0 +1,778 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../cmdline.h"
+#include "../../util/net/sock.h"
+#include "../client.h"
+#include "../../client/dbclient.h"
+#include "../dbhelpers.h"
+#include "../../s/d_logic.h"
+#include "rs.h"
+#include "connections.h"
+#include "../repl.h"
+#include "../instance.h"
+
+using namespace std;
+
+namespace mongo {
+
+ using namespace bson;
+
+ bool replSet = false;
+ ReplSet *theReplSet = 0;
+
+ bool isCurrentlyAReplSetPrimary() {
+ return theReplSet && theReplSet->isPrimary();
+ }
+
+ void replset::sethbmsg(const string& s, const int level) {
+ if (theReplSet) {
+ theReplSet->sethbmsg(s, logLevel);
+ }
+ }
+
+ void ReplSetImpl::sethbmsg(string s, int logLevel) {
+ static time_t lastLogged;
+ _hbmsgTime = time(0);
+
+ if( s == _hbmsg ) {
+ // unchanged
+ if( _hbmsgTime - lastLogged < 60 )
+ return;
+ }
+
+ unsigned sz = s.size();
+ if( sz >= 256 )
+ memcpy(_hbmsg, s.c_str(), 255);
+ else {
+ _hbmsg[sz] = 0;
+ memcpy(_hbmsg, s.c_str(), sz);
+ }
+ if( !s.empty() ) {
+ lastLogged = _hbmsgTime;
+ log(logLevel) << "replSet " << s << rsLog;
+ }
+ }
+
+ void ReplSetImpl::assumePrimary() {
+ LOG(2) << "replSet assuming primary" << endl;
+ assert( iAmPotentiallyHot() );
+ writelock lk("admin."); // so we are synchronized with _logOp()
+
+ // Make sure that new OpTimes are higher than existing ones even with clock skew
+ DBDirectClient c;
+ BSONObj lastOp = c.findOne( "local.oplog.rs", Query().sort(reverseNaturalObj), NULL, QueryOption_SlaveOk );
+ if ( !lastOp.isEmpty() ) {
+ OpTime::setLast( lastOp[ "ts" ].date() );
+ }
+
+ changeState(MemberState::RS_PRIMARY);
+ }
+
+ void ReplSetImpl::changeState(MemberState s) { box.change(s, _self); }
+
+ void ReplSetImpl::setMaintenanceMode(const bool inc) {
+ lock lk(this);
+
+ if (inc) {
+ log() << "replSet going into maintenance mode (" << _maintenanceMode << " other tasks)" << rsLog;
+
+ _maintenanceMode++;
+ changeState(MemberState::RS_RECOVERING);
+ }
+ else {
+ _maintenanceMode--;
+ // no need to change state, syncTail will try to go live as a secondary soon
+
+ log() << "leaving maintenance mode (" << _maintenanceMode << " other tasks)" << rsLog;
+ }
+ }
+
+ Member* ReplSetImpl::getMostElectable() {
+ lock lk(this);
+
+ Member *max = 0;
+
+ for (set<unsigned>::iterator it = _electableSet.begin(); it != _electableSet.end(); it++) {
+ const Member *temp = findById(*it);
+ if (!temp) {
+ log() << "couldn't find member: " << *it << endl;
+ _electableSet.erase(*it);
+ continue;
+ }
+ if (!max || max->config().priority < temp->config().priority) {
+ max = (Member*)temp;
+ }
+ }
+
+ return max;
+ }
+
+ const bool closeOnRelinquish = true;
+
+ void ReplSetImpl::relinquish() {
+ LOG(2) << "replSet attempting to relinquish" << endl;
+ if( box.getState().primary() ) {
+ {
+ writelock lk("admin."); // so we are synchronized with _logOp()
+
+ log() << "replSet relinquishing primary state" << rsLog;
+ changeState(MemberState::RS_SECONDARY);
+ }
+
+ if( closeOnRelinquish ) {
+ /* close sockets that were talking to us so they don't blithly send many writes that will fail
+ with "not master" (of course client could check result code, but in case they are not)
+ */
+ log() << "replSet closing client sockets after reqlinquishing primary" << rsLog;
+ MessagingPort::closeAllSockets(1);
+ }
+
+ // now that all connections were closed, strip this mongod from all sharding details
+ // if and when it gets promoted to a primary again, only then it should reload the sharding state
+ // the rationale here is that this mongod won't bring stale state when it regains primaryhood
+ shardingState.resetShardingState();
+
+ }
+ else if( box.getState().startup2() ) {
+ // ? add comment
+ changeState(MemberState::RS_RECOVERING);
+ }
+ }
+
+ /* look freshly for who is primary - includes relinquishing ourself. */
+ void ReplSetImpl::forgetPrimary() {
+ if( box.getState().primary() )
+ relinquish();
+ else {
+ box.setOtherPrimary(0);
+ }
+ }
+
+ // for the replSetStepDown command
+ bool ReplSetImpl::_stepDown(int secs) {
+ lock lk(this);
+ if( box.getState().primary() ) {
+ elect.steppedDown = time(0) + secs;
+ log() << "replSet info stepping down as primary secs=" << secs << rsLog;
+ relinquish();
+ return true;
+ }
+ return false;
+ }
+
+ bool ReplSetImpl::_freeze(int secs) {
+ lock lk(this);
+ /* note if we are primary we remain primary but won't try to elect ourself again until
+ this time period expires.
+ */
+ if( secs == 0 ) {
+ elect.steppedDown = 0;
+ log() << "replSet info 'unfreezing'" << rsLog;
+ }
+ else {
+ if( !box.getState().primary() ) {
+ elect.steppedDown = time(0) + secs;
+ log() << "replSet info 'freezing' for " << secs << " seconds" << rsLog;
+ }
+ else {
+ log() << "replSet info received freeze command but we are primary" << rsLog;
+ }
+ }
+ return true;
+ }
+
+ void ReplSetImpl::msgUpdateHBInfo(HeartbeatInfo h) {
+ for( Member *m = _members.head(); m; m=m->next() ) {
+ if( m->id() == h.id() ) {
+ m->_hbinfo = h;
+ return;
+ }
+ }
+ }
+
+ list<HostAndPort> ReplSetImpl::memberHostnames() const {
+ list<HostAndPort> L;
+ L.push_back(_self->h());
+ for( Member *m = _members.head(); m; m = m->next() )
+ L.push_back(m->h());
+ return L;
+ }
+
+ void ReplSetImpl::_fillIsMasterHost(const Member *m, vector<string>& hosts, vector<string>& passives, vector<string>& arbiters) {
+ assert( m );
+ if( m->config().hidden )
+ return;
+
+ if( m->potentiallyHot() ) {
+ hosts.push_back(m->h().toString());
+ }
+ else if( !m->config().arbiterOnly ) {
+ if( m->config().slaveDelay ) {
+ /* hmmm - we don't list these as they are stale. */
+ }
+ else {
+ passives.push_back(m->h().toString());
+ }
+ }
+ else {
+ arbiters.push_back(m->h().toString());
+ }
+ }
+
+ void ReplSetImpl::_fillIsMaster(BSONObjBuilder& b) {
+ lock lk(this);
+
+ const StateBox::SP sp = box.get();
+ bool isp = sp.state.primary();
+ b.append("setName", name());
+ b.append("ismaster", isp);
+ b.append("secondary", sp.state.secondary());
+ {
+ vector<string> hosts, passives, arbiters;
+ _fillIsMasterHost(_self, hosts, passives, arbiters);
+
+ for( Member *m = _members.head(); m; m = m->next() ) {
+ assert( m );
+ _fillIsMasterHost(m, hosts, passives, arbiters);
+ }
+
+ if( hosts.size() > 0 ) {
+ b.append("hosts", hosts);
+ }
+ if( passives.size() > 0 ) {
+ b.append("passives", passives);
+ }
+ if( arbiters.size() > 0 ) {
+ b.append("arbiters", arbiters);
+ }
+ }
+
+ if( !isp ) {
+ const Member *m = sp.primary;
+ if( m )
+ b.append("primary", m->h().toString());
+ }
+ else {
+ b.append("primary", _self->fullName());
+ }
+
+ if( myConfig().arbiterOnly )
+ b.append("arbiterOnly", true);
+ if( myConfig().priority == 0 && !myConfig().arbiterOnly)
+ b.append("passive", true);
+ if( myConfig().slaveDelay )
+ b.append("slaveDelay", myConfig().slaveDelay);
+ if( myConfig().hidden )
+ b.append("hidden", true);
+ if( !myConfig().buildIndexes )
+ b.append("buildIndexes", false);
+ if( !myConfig().tags.empty() ) {
+ BSONObjBuilder a;
+ for( map<string,string>::const_iterator i = myConfig().tags.begin(); i != myConfig().tags.end(); i++ )
+ a.append((*i).first, (*i).second);
+ b.append("tags", a.done());
+ }
+ b.append("me", myConfig().h.toString());
+ }
+
+ /** @param cfgString <setname>/<seedhost1>,<seedhost2> */
+
+ void parseReplsetCmdLine(string cfgString, string& setname, vector<HostAndPort>& seeds, set<HostAndPort>& seedSet ) {
+ const char *p = cfgString.c_str();
+ const char *slash = strchr(p, '/');
+ if( slash )
+ setname = string(p, slash-p);
+ else
+ setname = p;
+ uassert(13093, "bad --replSet config string format is: <setname>[/<seedhost1>,<seedhost2>,...]", !setname.empty());
+
+ if( slash == 0 )
+ return;
+
+ p = slash + 1;
+ while( 1 ) {
+ const char *comma = strchr(p, ',');
+ if( comma == 0 ) comma = strchr(p,0);
+ if( p == comma )
+ break;
+ {
+ HostAndPort m;
+ try {
+ m = HostAndPort( string(p, comma-p) );
+ }
+ catch(...) {
+ uassert(13114, "bad --replSet seed hostname", false);
+ }
+ uassert(13096, "bad --replSet command line config string - dups?", seedSet.count(m) == 0 );
+ seedSet.insert(m);
+ //uassert(13101, "can't use localhost in replset host list", !m.isLocalHost());
+ if( m.isSelf() ) {
+ log(1) << "replSet ignoring seed " << m.toString() << " (=self)" << rsLog;
+ }
+ else
+ seeds.push_back(m);
+ if( *comma == 0 )
+ break;
+ p = comma + 1;
+ }
+ }
+ }
+
+ ReplSetImpl::ReplSetImpl(ReplSetCmdline& replSetCmdline) : elect(this),
+ _currentSyncTarget(0),
+ _blockSync(false),
+ _hbmsgTime(0),
+ _self(0),
+ _maintenanceMode(0),
+ mgr( new Manager(this) ),
+ ghost( new GhostSync(this) ) {
+
+ _cfg = 0;
+ memset(_hbmsg, 0, sizeof(_hbmsg));
+ strcpy( _hbmsg , "initial startup" );
+ lastH = 0;
+ changeState(MemberState::RS_STARTUP);
+
+ _seeds = &replSetCmdline.seeds;
+
+ LOG(1) << "replSet beginning startup..." << rsLog;
+
+ loadConfig();
+
+ unsigned sss = replSetCmdline.seedSet.size();
+ for( Member *m = head(); m; m = m->next() ) {
+ replSetCmdline.seedSet.erase(m->h());
+ }
+ for( set<HostAndPort>::iterator i = replSetCmdline.seedSet.begin(); i != replSetCmdline.seedSet.end(); i++ ) {
+ if( i->isSelf() ) {
+ if( sss == 1 ) {
+ LOG(1) << "replSet warning self is listed in the seed list and there are no other seeds listed did you intend that?" << rsLog;
+ }
+ }
+ else {
+ log() << "replSet warning command line seed " << i->toString() << " is not present in the current repl set config" << rsLog;
+ }
+ }
+ }
+
+ void newReplUp();
+
+ void ReplSetImpl::loadLastOpTimeWritten(bool quiet) {
+ readlock lk(rsoplog);
+ BSONObj o;
+ if( Helpers::getLast(rsoplog, o) ) {
+ lastH = o["h"].numberLong();
+ lastOpTimeWritten = o["ts"]._opTime();
+ uassert(13290, "bad replSet oplog entry?", quiet || !lastOpTimeWritten.isNull());
+ }
+ }
+
+ /* call after constructing to start - returns fairly quickly after launching its threads */
+ void ReplSetImpl::_go() {
+ try {
+ loadLastOpTimeWritten();
+ }
+ catch(std::exception& e) {
+ log() << "replSet error fatal couldn't query the local " << rsoplog << " collection. Terminating mongod after 30 seconds." << rsLog;
+ log() << e.what() << rsLog;
+ sleepsecs(30);
+ dbexit( EXIT_REPLICATION_ERROR );
+ return;
+ }
+
+ changeState(MemberState::RS_STARTUP2);
+ startThreads();
+ newReplUp(); // oplog.cpp
+ }
+
+ ReplSetImpl::StartupStatus ReplSetImpl::startupStatus = PRESTART;
+ DiagStr ReplSetImpl::startupStatusMsg;
+
+ extern BSONObj *getLastErrorDefault;
+
+ void ReplSetImpl::setSelfTo(Member *m) {
+ // already locked in initFromConfig
+ _self = m;
+ _id = m->id();
+ _config = m->config();
+ if( m ) _buildIndexes = m->config().buildIndexes;
+ else _buildIndexes = true;
+ }
+
+ /** @param reconf true if this is a reconfiguration and not an initial load of the configuration.
+ @return true if ok; throws if config really bad; false if config doesn't include self
+ */
+ bool ReplSetImpl::initFromConfig(ReplSetConfig& c, bool reconf) {
+ /* NOTE: haveNewConfig() writes the new config to disk before we get here. So
+ we cannot error out at this point, except fatally. Check errors earlier.
+ */
+ lock lk(this);
+
+ if( getLastErrorDefault || !c.getLastErrorDefaults.isEmpty() ) {
+ // see comment in dbcommands.cpp for getlasterrordefault
+ getLastErrorDefault = new BSONObj( c.getLastErrorDefaults );
+ }
+
+ list<ReplSetConfig::MemberCfg*> newOnes;
+ // additive short-cuts the new config setup. If we are just adding a
+ // node/nodes and nothing else is changing, this is additive. If it's
+ // not a reconfig, we're not adding anything
+ bool additive = reconf;
+ {
+ unsigned nfound = 0;
+ int me = 0;
+ for( vector<ReplSetConfig::MemberCfg>::iterator i = c.members.begin(); i != c.members.end(); i++ ) {
+
+ ReplSetConfig::MemberCfg& m = *i;
+ if( m.h.isSelf() ) {
+ me++;
+ }
+
+ if( reconf ) {
+ if (m.h.isSelf() && (!_self || (int)_self->id() != m._id)) {
+ log() << "self doesn't match: " << m._id << rsLog;
+ assert(false);
+ }
+
+ const Member *old = findById(m._id);
+ if( old ) {
+ nfound++;
+ assert( (int) old->id() == m._id );
+ if( old->config() != m ) {
+ additive = false;
+ }
+ }
+ else {
+ newOnes.push_back(&m);
+ }
+ }
+ }
+ if( me == 0 ) {
+ _members.orphanAll();
+
+ // sending hbs must continue to pick up new config, so we leave
+ // hb threads alone
+
+ // close sockets to force clients to re-evaluate this member
+ MessagingPort::closeAllSockets(0);
+
+ // stop sync thread
+ box.set(MemberState::RS_STARTUP, 0);
+
+ // go into holding pattern
+ log() << "replSet error self not present in the repl set configuration:" << rsLog;
+ log() << c.toString() << rsLog;
+ return false;
+ }
+ uassert( 13302, "replSet error self appears twice in the repl set configuration", me<=1 );
+
+ // if we found different members that the original config, reload everything
+ if( reconf && config().members.size() != nfound )
+ additive = false;
+ }
+
+ _cfg = new ReplSetConfig(c);
+ assert( _cfg->ok() );
+ assert( _name.empty() || _name == _cfg->_id );
+ _name = _cfg->_id;
+ assert( !_name.empty() );
+
+ // this is a shortcut for simple changes
+ if( additive ) {
+ log() << "replSet info : additive change to configuration" << rsLog;
+ for( list<ReplSetConfig::MemberCfg*>::const_iterator i = newOnes.begin(); i != newOnes.end(); i++ ) {
+ ReplSetConfig::MemberCfg *m = *i;
+ Member *mi = new Member(m->h, m->_id, m, false);
+
+ /** we will indicate that new members are up() initially so that we don't relinquish our
+ primary state because we can't (transiently) see a majority. they should be up as we
+ check that new members are up before getting here on reconfig anyway.
+ */
+ mi->get_hbinfo().health = 0.1;
+
+ _members.push(mi);
+ startHealthTaskFor(mi);
+ }
+
+ // if we aren't creating new members, we may have to update the
+ // groups for the current ones
+ _cfg->updateMembers(_members);
+
+ return true;
+ }
+
+ // start with no members. if this is a reconfig, drop the old ones.
+ _members.orphanAll();
+
+ endOldHealthTasks();
+
+ int oldPrimaryId = -1;
+ {
+ const Member *p = box.getPrimary();
+ if( p )
+ oldPrimaryId = p->id();
+ }
+ forgetPrimary();
+
+ // not setting _self to 0 as other threads use _self w/o locking
+ int me = 0;
+
+ // For logging
+ string members = "";
+
+ for( vector<ReplSetConfig::MemberCfg>::iterator i = _cfg->members.begin(); i != _cfg->members.end(); i++ ) {
+ ReplSetConfig::MemberCfg& m = *i;
+ Member *mi;
+ members += ( members == "" ? "" : ", " ) + m.h.toString();
+ if( m.h.isSelf() ) {
+ assert( me++ == 0 );
+ mi = new Member(m.h, m._id, &m, true);
+ if (!reconf) {
+ log() << "replSet I am " << m.h.toString() << rsLog;
+ }
+ setSelfTo(mi);
+
+ if( (int)mi->id() == oldPrimaryId )
+ box.setSelfPrimary(mi);
+ }
+ else {
+ mi = new Member(m.h, m._id, &m, false);
+ _members.push(mi);
+ startHealthTaskFor(mi);
+ if( (int)mi->id() == oldPrimaryId )
+ box.setOtherPrimary(mi);
+ }
+ }
+
+ if( me == 0 ){
+ log() << "replSet warning did not detect own host in full reconfig, members " << members << " config: " << c << rsLog;
+ }
+
+ return true;
+ }
+
+ // Our own config must be the first one.
+ bool ReplSetImpl::_loadConfigFinish(vector<ReplSetConfig>& cfgs) {
+ int v = -1;
+ ReplSetConfig *highest = 0;
+ int myVersion = -2000;
+ int n = 0;
+ for( vector<ReplSetConfig>::iterator i = cfgs.begin(); i != cfgs.end(); i++ ) {
+ ReplSetConfig& cfg = *i;
+ if( ++n == 1 ) myVersion = cfg.version;
+ if( cfg.ok() && cfg.version > v ) {
+ highest = &cfg;
+ v = cfg.version;
+ }
+ }
+ assert( highest );
+
+ if( !initFromConfig(*highest) )
+ return false;
+
+ if( highest->version > myVersion && highest->version >= 0 ) {
+ log() << "replSet got config version " << highest->version << " from a remote, saving locally" << rsLog;
+ highest->saveConfigLocally(BSONObj());
+ }
+ return true;
+ }
+
+ void ReplSetImpl::loadConfig() {
+ while( 1 ) {
+ startupStatus = LOADINGCONFIG;
+ startupStatusMsg.set("loading " + rsConfigNs + " config (LOADINGCONFIG)");
+ LOG(1) << "loadConfig() " << rsConfigNs << endl;
+ try {
+ vector<ReplSetConfig> configs;
+ try {
+ configs.push_back( ReplSetConfig(HostAndPort::me()) );
+ }
+ catch(DBException& e) {
+ log() << "replSet exception loading our local replset configuration object : " << e.toString() << rsLog;
+ }
+ for( vector<HostAndPort>::const_iterator i = _seeds->begin(); i != _seeds->end(); i++ ) {
+ try {
+ configs.push_back( ReplSetConfig(*i) );
+ }
+ catch( DBException& e ) {
+ log() << "replSet exception trying to load config from " << *i << " : " << e.toString() << rsLog;
+ }
+ }
+ {
+ scoped_lock lck( replSettings.discoveredSeeds_mx );
+ if( replSettings.discoveredSeeds.size() > 0 ) {
+ for (set<string>::iterator i = replSettings.discoveredSeeds.begin();
+ i != replSettings.discoveredSeeds.end();
+ i++) {
+ try {
+ configs.push_back( ReplSetConfig(HostAndPort(*i)) );
+ }
+ catch( DBException& ) {
+ log(1) << "replSet exception trying to load config from discovered seed " << *i << rsLog;
+ replSettings.discoveredSeeds.erase(*i);
+ }
+ }
+ }
+ }
+
+ if (!replSettings.reconfig.isEmpty()) {
+ try {
+ configs.push_back(ReplSetConfig(replSettings.reconfig, true));
+ }
+ catch( DBException& re) {
+ log() << "replSet couldn't load reconfig: " << re.what() << rsLog;
+ replSettings.reconfig = BSONObj();
+ }
+ }
+
+ int nok = 0;
+ int nempty = 0;
+ for( vector<ReplSetConfig>::iterator i = configs.begin(); i != configs.end(); i++ ) {
+ if( i->ok() )
+ nok++;
+ if( i->empty() )
+ nempty++;
+ }
+ if( nok == 0 ) {
+
+ if( nempty == (int) configs.size() ) {
+ startupStatus = EMPTYCONFIG;
+ startupStatusMsg.set("can't get " + rsConfigNs + " config from self or any seed (EMPTYCONFIG)");
+ log() << "replSet can't get " << rsConfigNs << " config from self or any seed (EMPTYCONFIG)" << rsLog;
+ static unsigned once;
+ if( ++once == 1 ) {
+ log() << "replSet info you may need to run replSetInitiate -- rs.initiate() in the shell -- if that is not already done" << rsLog;
+ }
+ if( _seeds->size() == 0 ) {
+ LOG(1) << "replSet info no seed hosts were specified on the --replSet command line" << rsLog;
+ }
+ }
+ else {
+ startupStatus = EMPTYUNREACHABLE;
+ startupStatusMsg.set("can't currently get " + rsConfigNs + " config from self or any seed (EMPTYUNREACHABLE)");
+ log() << "replSet can't get " << rsConfigNs << " config from self or any seed (yet)" << rsLog;
+ }
+
+ sleepsecs(10);
+ continue;
+ }
+
+ if( !_loadConfigFinish(configs) ) {
+ log() << "replSet info Couldn't load config yet. Sleeping 20sec and will try again." << rsLog;
+ sleepsecs(20);
+ continue;
+ }
+ }
+ catch(DBException& e) {
+ startupStatus = BADCONFIG;
+ startupStatusMsg.set("replSet error loading set config (BADCONFIG)");
+ log() << "replSet error loading configurations " << e.toString() << rsLog;
+ log() << "replSet error replication will not start" << rsLog;
+ sethbmsg("error loading set config");
+ _fatal();
+ throw;
+ }
+ break;
+ }
+ startupStatusMsg.set("? started");
+ startupStatus = STARTED;
+ }
+
+ void ReplSetImpl::_fatal() {
+ box.set(MemberState::RS_FATAL, 0);
+ log() << "replSet error fatal, stopping replication" << rsLog;
+ }
+
+ void ReplSet::haveNewConfig(ReplSetConfig& newConfig, bool addComment) {
+ bo comment;
+ if( addComment )
+ comment = BSON( "msg" << "Reconfig set" << "version" << newConfig.version );
+
+ newConfig.saveConfigLocally(comment);
+
+ try {
+ if (initFromConfig(newConfig, true)) {
+ log() << "replSet replSetReconfig new config saved locally" << rsLog;
+ }
+ }
+ catch(DBException& e) {
+ if( e.getCode() == 13497 /* removed from set */ ) {
+ cc().shutdown();
+ dbexit( EXIT_CLEAN , "removed from replica set" ); // never returns
+ assert(0);
+ }
+ log() << "replSet error unexpected exception in haveNewConfig() : " << e.toString() << rsLog;
+ _fatal();
+ }
+ catch(...) {
+ log() << "replSet error unexpected exception in haveNewConfig()" << rsLog;
+ _fatal();
+ }
+ }
+
+ void Manager::msgReceivedNewConfig(BSONObj o) {
+ log() << "replset msgReceivedNewConfig version: " << o["version"].toString() << rsLog;
+ ReplSetConfig c(o);
+ if( c.version > rs->config().version )
+ theReplSet->haveNewConfig(c, false);
+ else {
+ log() << "replSet info msgReceivedNewConfig but version isn't higher " <<
+ c.version << ' ' << rs->config().version << rsLog;
+ }
+ }
+
+ /* forked as a thread during startup
+ it can run quite a while looking for config. but once found,
+ a separate thread takes over as ReplSetImpl::Manager, and this thread
+ terminates.
+ */
+ void startReplSets(ReplSetCmdline *replSetCmdline) {
+ Client::initThread("rsStart");
+ try {
+ assert( theReplSet == 0 );
+ if( replSetCmdline == 0 ) {
+ assert(!replSet);
+ return;
+ }
+ replLocalAuth();
+ (theReplSet = new ReplSet(*replSetCmdline))->go();
+ }
+ catch(std::exception& e) {
+ log() << "replSet caught exception in startReplSets thread: " << e.what() << rsLog;
+ if( theReplSet )
+ theReplSet->fatal();
+ }
+ cc().shutdown();
+ }
+
+ void replLocalAuth() {
+ if ( noauth )
+ return;
+ cc().getAuthenticationInfo()->authorize("local","_repl");
+ }
+
+
+}
+
+namespace boost {
+
+ void assertion_failed(char const * expr, char const * function, char const * file, long line) {
+ mongo::log() << "boost assertion failure " << expr << ' ' << function << ' ' << file << ' ' << line << endl;
+ }
+
+}
diff --git a/src/mongo/db/repl/rs.h b/src/mongo/db/repl/rs.h
new file mode 100644
index 00000000000..8e43204be3b
--- /dev/null
+++ b/src/mongo/db/repl/rs.h
@@ -0,0 +1,667 @@
+// /db/repl/rs.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../../util/concurrency/list.h"
+#include "../../util/concurrency/value.h"
+#include "../../util/concurrency/msg.h"
+#include "../../util/net/hostandport.h"
+#include "../commands.h"
+#include "../oplog.h"
+#include "../oplogreader.h"
+#include "rs_exception.h"
+#include "rs_optime.h"
+#include "rs_member.h"
+#include "rs_config.h"
+
+/**
+ * Order of Events
+ *
+ * On startup, if the --replSet option is present, startReplSets is called.
+ * startReplSets forks off a new thread for replica set activities. It creates
+ * the global theReplSet variable and calls go() on it.
+ *
+ * theReplSet's constructor changes the replica set's state to RS_STARTUP,
+ * starts the replica set manager, and loads the config (if the replica set
+ * has been initialized).
+ */
+
+namespace mongo {
+
+ struct HowToFixUp;
+ struct Target;
+ class DBClientConnection;
+ class ReplSetImpl;
+ class OplogReader;
+ extern bool replSet; // true if using repl sets
+ extern class ReplSet *theReplSet; // null until initialized
+ extern Tee *rsLog;
+
+ /* member of a replica set */
+ class Member : public List1<Member>::Base {
+ private:
+ ~Member(); // intentionally unimplemented as should never be called -- see List1<>::Base.
+ Member(const Member&);
+ public:
+ Member(HostAndPort h, unsigned ord, ReplSetConfig::MemberCfg *c, bool self);
+
+ string fullName() const { return h().toString(); }
+ const ReplSetConfig::MemberCfg& config() const { return _config; }
+ ReplSetConfig::MemberCfg& configw() { return _config; }
+ const HeartbeatInfo& hbinfo() const { return _hbinfo; }
+ HeartbeatInfo& get_hbinfo() { return _hbinfo; }
+ string lhb() const { return _hbinfo.lastHeartbeatMsg; }
+ MemberState state() const { return _hbinfo.hbstate; }
+ const HostAndPort& h() const { return _h; }
+ unsigned id() const { return _hbinfo.id(); }
+
+ bool potentiallyHot() const { return _config.potentiallyHot(); } // not arbiter, not priority 0
+ void summarizeMember(stringstream& s) const;
+
+ private:
+ friend class ReplSetImpl;
+ ReplSetConfig::MemberCfg _config;
+ const HostAndPort _h;
+ HeartbeatInfo _hbinfo;
+ };
+
+ namespace replset {
+ /**
+ * "Normal" replica set syncing
+ */
+ class SyncTail : public Sync {
+ public:
+ virtual ~SyncTail() {}
+ SyncTail(const string& host) : Sync(host) {}
+ virtual bool syncApply(const BSONObj &o);
+ };
+
+ /**
+ * Initial clone and sync
+ */
+ class InitialSync : public SyncTail {
+ public:
+ InitialSync(const string& host) : SyncTail(host) {}
+ virtual ~InitialSync() {}
+ bool oplogApplication(OplogReader& r, const Member* source, const OpTime& applyGTE, const OpTime& minValid);
+ virtual void applyOp(const BSONObj& o, const OpTime& minvalid);
+ };
+
+ // TODO: move hbmsg into an error-keeping class (SERVER-4444)
+ void sethbmsg(const string& s, const int logLevel=0);
+
+ } // namespace replset
+
+ class Manager : public task::Server {
+ ReplSetImpl *rs;
+ bool busyWithElectSelf;
+ int _primary;
+
+ /** @param two - if true two primaries were seen. this can happen transiently, in addition to our
+ polling being only occasional. in this case null is returned, but the caller should
+ not assume primary itself in that situation.
+ */
+ const Member* findOtherPrimary(bool& two);
+
+ void noteARemoteIsPrimary(const Member *);
+ void checkElectableSet();
+ void checkAuth();
+ virtual void starting();
+ public:
+ Manager(ReplSetImpl *rs);
+ virtual ~Manager();
+ void msgReceivedNewConfig(BSONObj);
+ void msgCheckNewState();
+ };
+
+ class GhostSync : public task::Server {
+ struct GhostSlave : boost::noncopyable {
+ GhostSlave() : last(0), slave(0), init(false) { }
+ OplogReader reader;
+ OpTime last;
+ Member* slave;
+ bool init;
+ };
+ /**
+ * This is a cache of ghost slaves
+ */
+ typedef map< mongo::OID,shared_ptr<GhostSlave> > MAP;
+ MAP _ghostCache;
+ RWLock _lock; // protects _ghostCache
+ ReplSetImpl *rs;
+ virtual void starting();
+ public:
+ GhostSync(ReplSetImpl *_rs) : task::Server("rsGhostSync"), _lock("GhostSync"), rs(_rs) {}
+ ~GhostSync() {
+ log() << "~GhostSync() called" << rsLog;
+ }
+
+ /**
+ * Replica sets can sync in a hierarchical fashion, which throws off w
+ * calculation on the master. percolate() faux-syncs from an upstream
+ * node so that the primary will know what the slaves are up to.
+ *
+ * We can't just directly sync to the primary because it could be
+ * unreachable, e.g., S1--->S2--->S3--->P. S2 should ghost sync from S3
+ * and S3 can ghost sync from the primary.
+ *
+ * Say we have an S1--->S2--->P situation and this node is S2. rid
+ * would refer to S1. S2 would create a ghost slave of S1 and connect
+ * it to P (_currentSyncTarget). Then it would use this connection to
+ * pretend to be S1, replicating off of P.
+ */
+ void percolate(const BSONObj& rid, const OpTime& last);
+ void associateSlave(const BSONObj& rid, const int memberId);
+ void updateSlave(const mongo::OID& id, const OpTime& last);
+ };
+
+ struct Target;
+
+ class Consensus {
+ ReplSetImpl &rs;
+ struct LastYea {
+ LastYea() : when(0), who(0xffffffff) { }
+ time_t when;
+ unsigned who;
+ };
+ static SimpleMutex lyMutex;
+ Guarded<LastYea,lyMutex> ly;
+ unsigned yea(unsigned memberId); // throws VoteException
+ void electionFailed(unsigned meid);
+ void _electSelf();
+ bool weAreFreshest(bool& allUp, int& nTies);
+ bool sleptLast; // slept last elect() pass
+ public:
+ Consensus(ReplSetImpl *t) : rs(*t) {
+ sleptLast = false;
+ steppedDown = 0;
+ }
+
+ /* if we've stepped down, this is when we are allowed to try to elect ourself again.
+ todo: handle possible weirdnesses at clock skews etc.
+ */
+ time_t steppedDown;
+
+ int totalVotes() const;
+ bool aMajoritySeemsToBeUp() const;
+ bool shouldRelinquish() const;
+ void electSelf();
+ void electCmdReceived(BSONObj, BSONObjBuilder*);
+ void multiCommand(BSONObj cmd, list<Target>& L);
+ };
+
+ /**
+ * most operations on a ReplSet object should be done while locked. that
+ * logic implemented here.
+ *
+ * Order of locking: lock the replica set, then take a rwlock.
+ */
+ class RSBase : boost::noncopyable {
+ public:
+ const unsigned magic;
+ void assertValid() { assert( magic == 0x12345677 ); }
+ private:
+ mongo::mutex m;
+ int _locked;
+ ThreadLocalValue<bool> _lockedByMe;
+ protected:
+ RSBase() : magic(0x12345677), m("RSBase"), _locked(0) { }
+ ~RSBase() {
+ /* this can happen if we throw in the constructor; otherwise never happens. thus we log it as it is quite unusual. */
+ log() << "replSet ~RSBase called" << rsLog;
+ }
+
+ public:
+ class lock {
+ RSBase& rsbase;
+ auto_ptr<scoped_lock> sl;
+ public:
+ lock(RSBase* b) : rsbase(*b) {
+ if( rsbase._lockedByMe.get() )
+ return; // recursive is ok...
+
+ sl.reset( new scoped_lock(rsbase.m) );
+ DEV assert(rsbase._locked == 0);
+ rsbase._locked++;
+ rsbase._lockedByMe.set(true);
+ }
+ ~lock() {
+ if( sl.get() ) {
+ assert( rsbase._lockedByMe.get() );
+ DEV assert(rsbase._locked == 1);
+ rsbase._lockedByMe.set(false);
+ rsbase._locked--;
+ }
+ }
+ };
+
+ /* for asserts */
+ bool locked() const { return _locked != 0; }
+
+ /* if true, is locked, and was locked by this thread. note if false, it could be in the lock or not for another
+ just for asserts & such so we can make the contracts clear on who locks what when.
+ we don't use these locks that frequently, so the little bit of overhead is fine.
+ */
+ bool lockedByMe() { return _lockedByMe.get(); }
+ };
+
+ class ReplSetHealthPollTask;
+
+ /* safe container for our state that keeps member pointer and state variables always aligned */
+ class StateBox : boost::noncopyable {
+ public:
+ struct SP { // SP is like pair<MemberState,const Member *> but nicer
+ SP() : state(MemberState::RS_STARTUP), primary(0) { }
+ MemberState state;
+ const Member *primary;
+ };
+ const SP get() {
+ rwlock lk(m, false);
+ return sp;
+ }
+ MemberState getState() const {
+ rwlock lk(m, false);
+ return sp.state;
+ }
+ const Member* getPrimary() const {
+ rwlock lk(m, false);
+ return sp.primary;
+ }
+ void change(MemberState s, const Member *self) {
+ rwlock lk(m, true);
+ if( sp.state != s ) {
+ log() << "replSet " << s.toString() << rsLog;
+ }
+ sp.state = s;
+ if( s.primary() ) {
+ sp.primary = self;
+ }
+ else {
+ if( self == sp.primary )
+ sp.primary = 0;
+ }
+ }
+ void set(MemberState s, const Member *p) {
+ rwlock lk(m, true);
+ sp.state = s;
+ sp.primary = p;
+ }
+ void setSelfPrimary(const Member *self) { change(MemberState::RS_PRIMARY, self); }
+ void setOtherPrimary(const Member *mem) {
+ rwlock lk(m, true);
+ assert( !sp.state.primary() );
+ sp.primary = mem;
+ }
+ void noteRemoteIsPrimary(const Member *remote) {
+ rwlock lk(m, true);
+ if( !sp.state.secondary() && !sp.state.fatal() )
+ sp.state = MemberState::RS_RECOVERING;
+ sp.primary = remote;
+ }
+ StateBox() : m("StateBox") { }
+ private:
+ RWLock m;
+ SP sp;
+ };
+
+ void parseReplsetCmdLine(string cfgString, string& setname, vector<HostAndPort>& seeds, set<HostAndPort>& seedSet );
+
+ /** Parameter given to the --replSet command line option (parsed).
+ Syntax is "<setname>/<seedhost1>,<seedhost2>"
+ where setname is a name and seedhost is "<host>[:<port>]" */
+ class ReplSetCmdline {
+ public:
+ ReplSetCmdline(string cfgString) { parseReplsetCmdLine(cfgString, setname, seeds, seedSet); }
+ string setname;
+ vector<HostAndPort> seeds;
+ set<HostAndPort> seedSet;
+ };
+
+ /* information about the entire repl set, such as the various servers in the set, and their state */
+ /* note: We currently do not free mem when the set goes away - it is assumed the replset is a
+ singleton and long lived.
+ */
+ class ReplSetImpl : protected RSBase {
+ public:
+ /** info on our state if the replset isn't yet "up". for example, if we are pre-initiation. */
+ enum StartupStatus {
+ PRESTART=0, LOADINGCONFIG=1, BADCONFIG=2, EMPTYCONFIG=3,
+ EMPTYUNREACHABLE=4, STARTED=5, SOON=6
+ };
+ static StartupStatus startupStatus;
+ static DiagStr startupStatusMsg;
+ static string stateAsHtml(MemberState state);
+
+ /* todo thread */
+ void msgUpdateHBInfo(HeartbeatInfo);
+
+ StateBox box;
+
+ OpTime lastOpTimeWritten;
+ long long lastH; // hash we use to make sure we are reading the right flow of ops and aren't on an out-of-date "fork"
+ private:
+ set<ReplSetHealthPollTask*> healthTasks;
+ void endOldHealthTasks();
+ void startHealthTaskFor(Member *m);
+
+ Consensus elect;
+ void relinquish();
+ void forgetPrimary();
+ protected:
+ bool _stepDown(int secs);
+ bool _freeze(int secs);
+ private:
+ void assumePrimary();
+ void loadLastOpTimeWritten(bool quiet=false);
+ void changeState(MemberState s);
+
+ /**
+ * Find the closest member (using ping time) with a higher latest optime.
+ */
+ Member* getMemberToSyncTo();
+ void veto(const string& host, unsigned secs=10);
+ Member* _currentSyncTarget;
+
+ bool _blockSync;
+ void blockSync(bool block);
+
+ // set of electable members' _ids
+ set<unsigned> _electableSet;
+ protected:
+ // "heartbeat message"
+ // sent in requestHeartbeat respond in field "hbm"
+ char _hbmsg[256]; // we change this unlocked, thus not an stl::string
+ time_t _hbmsgTime; // when it was logged
+ public:
+ void sethbmsg(string s, int logLevel = 0);
+
+ /**
+ * Election with Priorities
+ *
+ * Each node (n) keeps a set of nodes that could be elected primary.
+ * Each node in this set:
+ *
+ * 1. can connect to a majority of the set
+ * 2. has a priority greater than 0
+ * 3. has an optime within 10 seconds of the most up-to-date node
+ * that n can reach
+ *
+ * If a node fails to meet one or more of these criteria, it is removed
+ * from the list. This list is updated whenever the node receives a
+ * heartbeat.
+ *
+ * When a node sends an "am I freshest?" query, the node receiving the
+ * query checks their electable list to make sure that no one else is
+ * electable AND higher priority. If this check passes, the node will
+ * return an "ok" response, if not, it will veto.
+ *
+ * If a node is primary and there is another node with higher priority
+ * on the electable list (i.e., it must be synced to within 10 seconds
+ * of the current primary), the node (or nodes) with connections to both
+ * the primary and the secondary with higher priority will issue
+ * replSetStepDown requests to the primary to allow the higher-priority
+ * node to take over.
+ */
+ void addToElectable(const unsigned m) { lock lk(this); _electableSet.insert(m); }
+ void rmFromElectable(const unsigned m) { lock lk(this); _electableSet.erase(m); }
+ bool iAmElectable() { lock lk(this); return _electableSet.find(_self->id()) != _electableSet.end(); }
+ bool isElectable(const unsigned id) { lock lk(this); return _electableSet.find(id) != _electableSet.end(); }
+ Member* getMostElectable();
+ protected:
+ /**
+ * Load a new config as the replica set's main config.
+ *
+ * If there is a "simple" change (just adding a node), this shortcuts
+ * the config. Returns true if the config was changed. Returns false
+ * if the config doesn't include a this node. Throws an exception if
+ * something goes very wrong.
+ *
+ * Behavior to note:
+ * - locks this
+ * - intentionally leaks the old _cfg and any old _members (if the
+ * change isn't strictly additive)
+ */
+ bool initFromConfig(ReplSetConfig& c, bool reconf=false);
+ void _fillIsMaster(BSONObjBuilder&);
+ void _fillIsMasterHost(const Member*, vector<string>&, vector<string>&, vector<string>&);
+ const ReplSetConfig& config() { return *_cfg; }
+ string name() const { return _name; } /* @return replica set's logical name */
+ MemberState state() const { return box.getState(); }
+ void _fatal();
+ void _getOplogDiagsAsHtml(unsigned server_id, stringstream& ss) const;
+ void _summarizeAsHtml(stringstream&) const;
+ void _summarizeStatus(BSONObjBuilder&) const; // for replSetGetStatus command
+
+ /* throws exception if a problem initializing. */
+ ReplSetImpl(ReplSetCmdline&);
+
+ /* call afer constructing to start - returns fairly quickly after launching its threads */
+ void _go();
+
+ private:
+ string _name;
+ const vector<HostAndPort> *_seeds;
+ ReplSetConfig *_cfg;
+
+ /**
+ * Finds the configuration with the highest version number and attempts
+ * load it.
+ */
+ bool _loadConfigFinish(vector<ReplSetConfig>& v);
+ /**
+ * Gather all possible configs (from command line seeds, our own config
+ * doc, and any hosts listed therein) and try to initiate from the most
+ * recent config we find.
+ */
+ void loadConfig();
+
+ list<HostAndPort> memberHostnames() const;
+ const ReplSetConfig::MemberCfg& myConfig() const { return _config; }
+ bool iAmArbiterOnly() const { return myConfig().arbiterOnly; }
+ bool iAmPotentiallyHot() const {
+ return myConfig().potentiallyHot() && // not an arbiter
+ elect.steppedDown <= time(0) && // not stepped down/frozen
+ state() == MemberState::RS_SECONDARY; // not stale
+ }
+ protected:
+ Member *_self;
+ bool _buildIndexes; // = _self->config().buildIndexes
+ void setSelfTo(Member *); // use this as it sets buildIndexes var
+ private:
+ List1<Member> _members; // all members of the set EXCEPT _self.
+ ReplSetConfig::MemberCfg _config; // config of _self
+ unsigned _id; // _id of _self
+
+ int _maintenanceMode; // if we should stay in recovering state
+ public:
+ // this is called from within a writelock in logOpRS
+ unsigned selfId() const { return _id; }
+ Manager *mgr;
+ GhostSync *ghost;
+ /**
+ * This forces a secondary to go into recovering state and stay there
+ * until this is called again, passing in "false". Multiple threads can
+ * call this and it will leave maintenance mode once all of the callers
+ * have called it again, passing in false.
+ */
+ void setMaintenanceMode(const bool inc);
+ private:
+ Member* head() const { return _members.head(); }
+ public:
+ const Member* findById(unsigned id) const;
+ private:
+ void _getTargets(list<Target>&, int &configVersion);
+ void getTargets(list<Target>&, int &configVersion);
+ void startThreads();
+ friend class FeedbackThread;
+ friend class CmdReplSetElect;
+ friend class Member;
+ friend class Manager;
+ friend class GhostSync;
+ friend class Consensus;
+
+ private:
+ bool initialSyncOplogApplication(const OpTime& applyGTE, const OpTime& minValid);
+ void _syncDoInitialSync();
+ void syncDoInitialSync();
+ void _syncThread();
+ bool tryToGoLiveAsASecondary(OpTime&); // readlocks
+ void syncTail();
+ unsigned _syncRollback(OplogReader& r);
+ void syncRollback(OplogReader& r);
+ void syncFixUp(HowToFixUp& h, OplogReader& r);
+
+ // get an oplog reader for a server with an oplog entry timestamp greater
+ // than or equal to minTS, if set.
+ Member* _getOplogReader(OplogReader& r, const OpTime& minTS);
+
+ // check lastOpTimeWritten against the remote's earliest op, filling in
+ // remoteOldestOp.
+ bool _isStale(OplogReader& r, const OpTime& minTS, BSONObj& remoteOldestOp);
+
+ // keep a list of hosts that we've tried recently that didn't work
+ map<string,time_t> _veto;
+ public:
+ void syncThread();
+ const OpTime lastOtherOpTime() const;
+ };
+
+ class ReplSet : public ReplSetImpl {
+ public:
+ ReplSet(ReplSetCmdline& replSetCmdline) : ReplSetImpl(replSetCmdline) { }
+
+ // for the replSetStepDown command
+ bool stepDown(int secs) { return _stepDown(secs); }
+
+ // for the replSetFreeze command
+ bool freeze(int secs) { return _freeze(secs); }
+
+ string selfFullName() {
+ assert( _self );
+ return _self->fullName();
+ }
+
+ bool buildIndexes() const { return _buildIndexes; }
+
+ /* call after constructing to start - returns fairly quickly after la[unching its threads */
+ void go() { _go(); }
+
+ void fatal() { _fatal(); }
+ bool isPrimary() { return box.getState().primary(); }
+ bool isSecondary() { return box.getState().secondary(); }
+ MemberState state() const { return ReplSetImpl::state(); }
+ string name() const { return ReplSetImpl::name(); }
+ const ReplSetConfig& config() { return ReplSetImpl::config(); }
+ void getOplogDiagsAsHtml(unsigned server_id, stringstream& ss) const { _getOplogDiagsAsHtml(server_id,ss); }
+ void summarizeAsHtml(stringstream& ss) const { _summarizeAsHtml(ss); }
+ void summarizeStatus(BSONObjBuilder& b) const { _summarizeStatus(b); }
+ void fillIsMaster(BSONObjBuilder& b) { _fillIsMaster(b); }
+
+ /**
+ * We have a new config (reconfig) - apply it.
+ * @param comment write a no-op comment to the oplog about it. only
+ * makes sense if one is primary and initiating the reconf.
+ *
+ * The slaves are updated when they get a heartbeat indicating the new
+ * config. The comment is a no-op.
+ */
+ void haveNewConfig(ReplSetConfig& c, bool comment);
+
+ /**
+ * Pointer assignment isn't necessarily atomic, so this needs to assure
+ * locking, even though we don't delete old configs.
+ */
+ const ReplSetConfig& getConfig() { return config(); }
+
+ bool lockedByMe() { return RSBase::lockedByMe(); }
+
+ // heartbeat msg to send to others; descriptive diagnostic info
+ string hbmsg() const {
+ if( time(0)-_hbmsgTime > 120 ) return "";
+ return _hbmsg;
+ }
+ };
+
+ /**
+ * Base class for repl set commands. Checks basic things such if we're in
+ * rs mode before the command does its real work.
+ */
+ class ReplSetCommand : public Command {
+ protected:
+ ReplSetCommand(const char * s, bool show=false) : Command(s, show) { }
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual bool logTheOp() { return false; }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream &help ) const { help << "internal"; }
+
+ /**
+ * Some replica set commands call this and then call check(). This is
+ * intentional, as they might do things before theReplSet is initialized
+ * that still need to be checked for auth.
+ */
+ bool checkAuth(string& errmsg, BSONObjBuilder& result) {
+ if( !noauth ) {
+ AuthenticationInfo *ai = cc().getAuthenticationInfo();
+ if (!ai->isAuthorizedForLock("admin", locktype())) {
+ errmsg = "replSet command unauthorized";
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool check(string& errmsg, BSONObjBuilder& result) {
+ if( !replSet ) {
+ errmsg = "not running with --replSet";
+ if( cmdLine.configsvr ) {
+ result.append("info", "configsvr"); // for shell prompt
+ }
+ return false;
+ }
+
+ if( theReplSet == 0 ) {
+ result.append("startupStatus", ReplSet::startupStatus);
+ string s;
+ errmsg = ReplSet::startupStatusMsg.empty() ? "replset unknown error 2" : ReplSet::startupStatusMsg.get();
+ if( ReplSet::startupStatus == 3 )
+ result.append("info", "run rs.initiate(...) if not yet done for the set");
+ return false;
+ }
+
+ return checkAuth(errmsg, result);
+ }
+ };
+
+ /**
+ * does local authentication
+ * directly authorizes against AuthenticationInfo
+ */
+ void replLocalAuth();
+
+ /** inlines ----------------- */
+
+ inline Member::Member(HostAndPort h, unsigned ord, ReplSetConfig::MemberCfg *c, bool self) :
+ _config(*c), _h(h), _hbinfo(ord) {
+ assert(c);
+ if( self )
+ _hbinfo.health = 1.0;
+ }
+
+}
diff --git a/src/mongo/db/repl/rs_config.cpp b/src/mongo/db/repl/rs_config.cpp
new file mode 100644
index 00000000000..22137773aec
--- /dev/null
+++ b/src/mongo/db/repl/rs_config.cpp
@@ -0,0 +1,662 @@
+// rs_config.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "rs.h"
+#include "../../client/dbclient.h"
+#include "../../client/syncclusterconnection.h"
+#include "../../util/net/hostandport.h"
+#include "../dbhelpers.h"
+#include "connections.h"
+#include "../oplog.h"
+#include "../instance.h"
+#include "../../util/text.h"
+#include <boost/algorithm/string.hpp>
+
+using namespace bson;
+
+namespace mongo {
+
+ void logOpInitiate(const bo&);
+
+ void assertOnlyHas(BSONObj o, const set<string>& fields) {
+ BSONObj::iterator i(o);
+ while( i.more() ) {
+ BSONElement e = i.next();
+ if( !fields.count( e.fieldName() ) ) {
+ uasserted(13434, str::stream() << "unexpected field '" << e.fieldName() << "' in object");
+ }
+ }
+ }
+
+ list<HostAndPort> ReplSetConfig::otherMemberHostnames() const {
+ list<HostAndPort> L;
+ for( vector<MemberCfg>::const_iterator i = members.begin(); i != members.end(); i++ ) {
+ if( !i->h.isSelf() )
+ L.push_back(i->h);
+ }
+ return L;
+ }
+
+ /* comment MUST only be set when initiating the set by the initiator */
+ void ReplSetConfig::saveConfigLocally(bo comment) {
+ checkRsConfig();
+ log() << "replSet info saving a newer config version to local.system.replset" << rsLog;
+ {
+ writelock lk("");
+ Client::Context cx( rsConfigNs );
+ cx.db()->flushFiles(true);
+
+ //theReplSet->lastOpTimeWritten = ??;
+ //rather than above, do a logOp()? probably
+ BSONObj o = asBson();
+ Helpers::putSingletonGod(rsConfigNs.c_str(), o, false/*logOp=false; local db so would work regardless...*/);
+ if( !comment.isEmpty() && (!theReplSet || theReplSet->isPrimary()) )
+ logOpInitiate(comment);
+
+ cx.db()->flushFiles(true);
+ }
+ log() << "replSet saveConfigLocally done" << rsLog;
+ }
+
+ bo ReplSetConfig::MemberCfg::asBson() const {
+ bob b;
+ b << "_id" << _id;
+ b.append("host", h.dynString());
+ if( votes != 1 ) b << "votes" << votes;
+ if( priority != 1.0 ) b << "priority" << priority;
+ if( arbiterOnly ) b << "arbiterOnly" << true;
+ if( slaveDelay ) b << "slaveDelay" << slaveDelay;
+ if( hidden ) b << "hidden" << hidden;
+ if( !buildIndexes ) b << "buildIndexes" << buildIndexes;
+ if( !tags.empty() ) {
+ BSONObjBuilder a;
+ for( map<string,string>::const_iterator i = tags.begin(); i != tags.end(); i++ )
+ a.append((*i).first, (*i).second);
+ b.append("tags", a.done());
+ }
+ return b.obj();
+ }
+
+ void ReplSetConfig::updateMembers(List1<Member> &dest) {
+ for (vector<MemberCfg>::iterator source = members.begin(); source < members.end(); source++) {
+ for( Member *d = dest.head(); d; d = d->next() ) {
+ if (d->fullName() == (*source).h.toString()) {
+ d->configw().groupsw() = (*source).groups();
+ }
+ }
+ }
+ }
+
+ bo ReplSetConfig::asBson() const {
+ bob b;
+ b.append("_id", _id).append("version", version);
+
+ BSONArrayBuilder a;
+ for( unsigned i = 0; i < members.size(); i++ )
+ a.append( members[i].asBson() );
+ b.append("members", a.arr());
+
+ if( !ho.isDefault() || !getLastErrorDefaults.isEmpty() || !rules.empty()) {
+ bob settings;
+ if( !rules.empty() ) {
+ bob modes;
+ for (map<string,TagRule*>::const_iterator it = rules.begin(); it != rules.end(); it++) {
+ bob clauses;
+ vector<TagClause*> r = (*it).second->clauses;
+ for (vector<TagClause*>::iterator it2 = r.begin(); it2 < r.end(); it2++) {
+ clauses << (*it2)->name << (*it2)->target;
+ }
+ modes << (*it).first << clauses.obj();
+ }
+ settings << "getLastErrorModes" << modes.obj();
+ }
+ if( !getLastErrorDefaults.isEmpty() )
+ settings << "getLastErrorDefaults" << getLastErrorDefaults;
+ b << "settings" << settings.obj();
+ }
+
+ return b.obj();
+ }
+
+ static inline void mchk(bool expr) {
+ uassert(13126, "bad Member config", expr);
+ }
+
+ void ReplSetConfig::MemberCfg::check() const {
+ mchk(_id >= 0 && _id <= 255);
+ mchk(priority >= 0 && priority <= 1000);
+ mchk(votes <= 100); // votes >= 0 because it is unsigned
+ uassert(13419, "priorities must be between 0.0 and 100.0", priority >= 0.0 && priority <= 100.0);
+ uassert(13437, "slaveDelay requires priority be zero", slaveDelay == 0 || priority == 0);
+ uassert(13438, "bad slaveDelay value", slaveDelay >= 0 && slaveDelay <= 3600 * 24 * 366);
+ uassert(13439, "priority must be 0 when hidden=true", priority == 0 || !hidden);
+ uassert(13477, "priority must be 0 when buildIndexes=false", buildIndexes || priority == 0);
+ }
+/*
+ string ReplSetConfig::TagSubgroup::toString() const {
+ bool first = true;
+ string result = "\""+name+"\": [";
+ for (set<const MemberCfg*>::const_iterator i = m.begin(); i != m.end(); i++) {
+ if (!first) {
+ result += ", ";
+ }
+ first = false;
+ result += (*i)->h.toString();
+ }
+ return result+"]";
+ }
+ */
+ string ReplSetConfig::TagClause::toString() const {
+ string result = name+": {";
+ for (map<string,TagSubgroup*>::const_iterator i = subgroups.begin(); i != subgroups.end(); i++) {
+//TEMP? result += (*i).second->toString()+", ";
+ }
+ result += "TagClause toString TEMPORARILY DISABLED";
+ return result + "}";
+ }
+
+ string ReplSetConfig::TagRule::toString() const {
+ string result = "{";
+ for (vector<TagClause*>::const_iterator it = clauses.begin(); it < clauses.end(); it++) {
+ result += ((TagClause*)(*it))->toString()+",";
+ }
+ return result+"}";
+ }
+
+ void ReplSetConfig::TagSubgroup::updateLast(const OpTime& op) {
+ RACECHECK
+ if (last < op) {
+ last = op;
+
+ for (vector<TagClause*>::iterator it = clauses.begin(); it < clauses.end(); it++) {
+ (*it)->updateLast(op);
+ }
+ }
+ }
+
+ void ReplSetConfig::TagClause::updateLast(const OpTime& op) {
+ RACECHECK
+ if (last >= op) {
+ return;
+ }
+
+ // check at least n subgroups greater than clause.last
+ int count = 0;
+ map<string,TagSubgroup*>::iterator it;
+ for (it = subgroups.begin(); it != subgroups.end(); it++) {
+ if ((*it).second->last >= op) {
+ count++;
+ }
+ }
+
+ if (count >= actualTarget) {
+ last = op;
+ rule->updateLast(op);
+ }
+ }
+
+ void ReplSetConfig::TagRule::updateLast(const OpTime& op) {
+ OpTime *earliest = (OpTime*)&op;
+ vector<TagClause*>::iterator it;
+
+ for (it = clauses.begin(); it < clauses.end(); it++) {
+ if ((*it)->last < *earliest) {
+ earliest = &(*it)->last;
+ }
+ }
+
+ // rules are simply and-ed clauses, so whatever the most-behind
+ // clause is at is what the rule is at
+ last = *earliest;
+ }
+
+ /** @param o old config
+ @param n new config
+ */
+ /*static*/
+ bool ReplSetConfig::legalChange(const ReplSetConfig& o, const ReplSetConfig& n, string& errmsg) {
+ assert( theReplSet );
+
+ if( o._id != n._id ) {
+ errmsg = "set name may not change";
+ return false;
+ }
+ /* TODO : wonder if we need to allow o.version < n.version only, which is more lenient.
+ if someone had some intermediate config this node doesnt have, that could be
+ necessary. but then how did we become primary? so perhaps we are fine as-is.
+ */
+ if( o.version >= n.version ) {
+ errmsg = str::stream() << "version number must increase, old: "
+ << o.version << " new: " << n.version;
+ return false;
+ }
+
+ map<HostAndPort,const ReplSetConfig::MemberCfg*> old;
+ bool isLocalHost = false;
+ for( vector<ReplSetConfig::MemberCfg>::const_iterator i = o.members.begin(); i != o.members.end(); i++ ) {
+ if (i->h.isLocalHost()) {
+ isLocalHost = true;
+ }
+ old[i->h] = &(*i);
+ }
+ int me = 0;
+ for( vector<ReplSetConfig::MemberCfg>::const_iterator i = n.members.begin(); i != n.members.end(); i++ ) {
+ const ReplSetConfig::MemberCfg& m = *i;
+ if ( (isLocalHost && !m.h.isLocalHost()) || (!isLocalHost && m.h.isLocalHost())) {
+ log() << "reconfig error, cannot switch between localhost and hostnames: "
+ << m.h.toString() << rsLog;
+ uasserted(13645, "hosts cannot switch between localhost and hostname");
+ }
+ if( old.count(m.h) ) {
+ const ReplSetConfig::MemberCfg& oldCfg = *old[m.h];
+ if( oldCfg._id != m._id ) {
+ log() << "replSet reconfig error with member: " << m.h.toString() << rsLog;
+ uasserted(13432, "_id may not change for members");
+ }
+ if( oldCfg.buildIndexes != m.buildIndexes ) {
+ log() << "replSet reconfig error with member: " << m.h.toString() << rsLog;
+ uasserted(13476, "buildIndexes may not change for members");
+ }
+ /* are transitions to and from arbiterOnly guaranteed safe? if not, we should disallow here.
+ there is a test at replsets/replsetarb3.js */
+ if( oldCfg.arbiterOnly != m.arbiterOnly ) {
+ log() << "replSet reconfig error with member: " << m.h.toString() << " arbiterOnly cannot change. remove and readd the member instead " << rsLog;
+ uasserted(13510, "arbiterOnly may not change for members");
+ }
+ }
+ if( m.h.isSelf() )
+ me++;
+ }
+
+ uassert(13433, "can't find self in new replset config", me == 1);
+
+ return true;
+ }
+
+ void ReplSetConfig::clear() {
+ version = -5;
+ _ok = false;
+ }
+
+ void ReplSetConfig::setMajority() {
+ int total = members.size();
+ int nonArbiters = total;
+ int strictMajority = total/2+1;
+
+ for (vector<MemberCfg>::iterator it = members.begin(); it < members.end(); it++) {
+ if ((*it).arbiterOnly) {
+ nonArbiters--;
+ }
+ }
+
+ // majority should be all "normal" members if we have something like 4
+ // arbiters & 3 normal members
+ _majority = (strictMajority > nonArbiters) ? nonArbiters : strictMajority;
+ }
+
+ int ReplSetConfig::getMajority() const {
+ return _majority;
+ }
+
+ void ReplSetConfig::checkRsConfig() const {
+ uassert(13132,
+ str::stream() << "nonmatching repl set name in _id field: " << _id << " vs. " << cmdLine.ourSetName(),
+ _id == cmdLine.ourSetName());
+ uassert(13308, "replSet bad config version #", version > 0);
+ uassert(13133, "replSet bad config no members", members.size() >= 1);
+ uassert(13309, "replSet bad config maximum number of members is 12", members.size() <= 12);
+ {
+ unsigned voters = 0;
+ for( vector<MemberCfg>::const_iterator i = members.begin(); i != members.end(); ++i ) {
+ if( i->votes )
+ voters++;
+ }
+ uassert(13612, "replSet bad config maximum number of voting members is 7", voters <= 7);
+ uassert(13613, "replSet bad config no voting members", voters > 0);
+ }
+ }
+
+ void ReplSetConfig::_populateTagMap(map<string,TagClause> &tagMap) {
+ // create subgroups for each server corresponding to each of
+ // its tags. E.g.:
+ //
+ // A is tagged with {"server" : "A", "dc" : "ny"}
+ // B is tagged with {"server" : "B", "dc" : "ny"}
+ //
+ // At the end of this step, tagMap will contain:
+ //
+ // "server" => {"A" : [A], "B" : [B]}
+ // "dc" => {"ny" : [A,B]}
+
+ for (unsigned i=0; i<members.size(); i++) {
+ MemberCfg member = members[i];
+
+ for (map<string,string>::iterator tag = member.tags.begin(); tag != member.tags.end(); tag++) {
+ string label = (*tag).first;
+ string value = (*tag).second;
+
+ TagClause& clause = tagMap[label];
+ clause.name = label;
+
+ TagSubgroup* subgroup;
+ // search for "ny" in "dc"'s clause
+ if (clause.subgroups.find(value) == clause.subgroups.end()) {
+ clause.subgroups[value] = subgroup = new TagSubgroup(value);
+ }
+ else {
+ subgroup = clause.subgroups[value];
+ }
+
+ subgroup->m.insert(&members[i]);
+ }
+ }
+ }
+
+ void ReplSetConfig::parseRules(const BSONObj& modes) {
+ map<string,TagClause> tagMap;
+ _populateTagMap(tagMap);
+
+ for (BSONObj::iterator i = modes.begin(); i.more(); ) {
+ unsigned int primaryOnly = 0;
+
+ // ruleName : {dc : 2, m : 3}
+ BSONElement rule = i.next();
+ uassert(14046, "getLastErrorMode rules must be objects", rule.type() == mongo::Object);
+
+ TagRule* r = new TagRule();
+
+ BSONObj clauseObj = rule.Obj();
+ for (BSONObj::iterator c = clauseObj.begin(); c.more(); ) {
+ BSONElement clauseElem = c.next();
+ uassert(14829, "getLastErrorMode criteria must be numeric", clauseElem.isNumber());
+
+ // get the clause, e.g., "x.y" : 3
+ const char *criteria = clauseElem.fieldName();
+ int value = clauseElem.numberInt();
+ uassert(14828, str::stream() << "getLastErrorMode criteria must be greater than 0: " << clauseElem, value > 0);
+
+ TagClause* node = new TagClause(tagMap[criteria]);
+
+ int numGroups = node->subgroups.size();
+ uassert(14831, str::stream() << "mode " << clauseObj << " requires "
+ << value << " tagged with " << criteria << ", but only "
+ << numGroups << " with this tag were found", numGroups >= value);
+
+ node->name = criteria;
+ node->target = value;
+ // if any subgroups contain "me", we can decrease the target
+ node->actualTarget = node->target;
+
+ // then we want to add pointers between clause & subgroup
+ for (map<string,TagSubgroup*>::iterator sgs = node->subgroups.begin();
+ sgs != node->subgroups.end(); sgs++) {
+ bool foundMe = false;
+ (*sgs).second->clauses.push_back(node);
+
+ // if this subgroup contains the primary, it's automatically always up-to-date
+ for( set<MemberCfg*>::const_iterator cfg = (*sgs).second->m.begin();
+ cfg != (*sgs).second->m.end();
+ cfg++)
+ {
+ if ((*cfg)->h.isSelf()) {
+ node->actualTarget--;
+ foundMe = true;
+ }
+ }
+
+ for (set<MemberCfg *>::iterator cfg = (*sgs).second->m.begin();
+ !foundMe && cfg != (*sgs).second->m.end(); cfg++) {
+ (*cfg)->groupsw().insert((*sgs).second);
+ }
+ }
+
+ // if all of the members of this clause involve the primary, it's always up-to-date
+ if (node->actualTarget == 0) {
+ node->last = OpTime(INT_MAX, INT_MAX);
+ primaryOnly++;
+ }
+
+ // this is a valid clause, so we want to add it to its rule
+ node->rule = r;
+ r->clauses.push_back(node);
+ }
+
+ // if all of the clauses are satisfied by the primary, this rule is trivially true
+ if (primaryOnly == r->clauses.size()) {
+ r->last = OpTime(INT_MAX, INT_MAX);
+ }
+
+ // if we got here, this is a valid rule
+ LOG(1) << "replSet new rule " << rule.fieldName() << ": " << r->toString() << rsLog;
+ rules[rule.fieldName()] = r;
+ }
+ }
+
+ void ReplSetConfig::from(BSONObj o) {
+ static const string legal[] = {"_id","version", "members","settings"};
+ static const set<string> legals(legal, legal + 4);
+ assertOnlyHas(o, legals);
+
+ md5 = o.md5();
+ _id = o["_id"].String();
+ if( o["version"].ok() ) {
+ version = o["version"].numberInt();
+ uassert(13115, "bad " + rsConfigNs + " config: version", version > 0);
+ }
+
+ set<string> hosts;
+ set<int> ords;
+ vector<BSONElement> members;
+ try {
+ members = o["members"].Array();
+ }
+ catch(...) {
+ uasserted(13131, "replSet error parsing (or missing) 'members' field in config object");
+ }
+
+ unsigned localhosts = 0;
+ for( unsigned i = 0; i < members.size(); i++ ) {
+ BSONObj mobj = members[i].Obj();
+ MemberCfg m;
+ try {
+ static const string legal[] = {
+ "_id","votes","priority","host", "hidden","slaveDelay",
+ "arbiterOnly","buildIndexes","tags","initialSync" // deprecated
+ };
+ static const set<string> legals(legal, legal + 10);
+ assertOnlyHas(mobj, legals);
+
+ try {
+ m._id = (int) mobj["_id"].Number();
+ }
+ catch(...) {
+ /* TODO: use of string exceptions may be problematic for reconfig case! */
+ throw "_id must be numeric";
+ }
+ try {
+ string s = mobj["host"].String();
+ boost::trim(s);
+ m.h = HostAndPort(s);
+ if ( !m.h.hasPort() ) {
+ // make port explicit even if default
+ m.h.setPort(m.h.port());
+ }
+ }
+ catch(...) {
+ throw string("bad or missing host field? ") + mobj.toString();
+ }
+ if( m.h.isLocalHost() )
+ localhosts++;
+ m.arbiterOnly = mobj["arbiterOnly"].trueValue();
+ m.slaveDelay = mobj["slaveDelay"].numberInt();
+ if( mobj.hasElement("hidden") )
+ m.hidden = mobj["hidden"].trueValue();
+ if( mobj.hasElement("buildIndexes") )
+ m.buildIndexes = mobj["buildIndexes"].trueValue();
+ if( mobj.hasElement("priority") )
+ m.priority = mobj["priority"].Number();
+ if( mobj.hasElement("votes") )
+ m.votes = (unsigned) mobj["votes"].Number();
+ if( mobj.hasElement("tags") ) {
+ const BSONObj &t = mobj["tags"].Obj();
+ for (BSONObj::iterator c = t.begin(); c.more(); c.next()) {
+ m.tags[(*c).fieldName()] = (*c).String();
+ }
+ uassert(14827, "arbiters cannot have tags", !m.arbiterOnly || m.tags.empty() );
+ }
+ m.check();
+ }
+ catch( const char * p ) {
+ log() << "replSet cfg parsing exception for members[" << i << "] " << p << rsLog;
+ stringstream ss;
+ ss << "replSet members[" << i << "] " << p;
+ uassert(13107, ss.str(), false);
+ }
+ catch(DBException& e) {
+ log() << "replSet cfg parsing exception for members[" << i << "] " << e.what() << rsLog;
+ stringstream ss;
+ ss << "bad config for member[" << i << "] " << e.what();
+ uassert(13135, ss.str(), false);
+ }
+ if( !(ords.count(m._id) == 0 && hosts.count(m.h.toString()) == 0) ) {
+ log() << "replSet " << o.toString() << rsLog;
+ uassert(13108, "bad replset config -- duplicate hosts in the config object?", false);
+ }
+ hosts.insert(m.h.dynString());
+ ords.insert(m._id);
+ this->members.push_back(m);
+ }
+ uassert(13393, "can't use localhost in repl set member names except when using it for all members", localhosts == 0 || localhosts == members.size());
+ uassert(13117, "bad " + rsConfigNs + " config", !_id.empty());
+
+ if( o["settings"].ok() ) {
+ BSONObj settings = o["settings"].Obj();
+ if( settings["getLastErrorModes"].ok() ) {
+ parseRules(settings["getLastErrorModes"].Obj());
+ }
+ ho.check();
+ try { getLastErrorDefaults = settings["getLastErrorDefaults"].Obj().copy(); }
+ catch(...) { }
+ }
+
+ // figure out the majority for this config
+ setMajority();
+ }
+
+ static inline void configAssert(bool expr) {
+ uassert(13122, "bad repl set config?", expr);
+ }
+
+ ReplSetConfig::ReplSetConfig(BSONObj cfg, bool force) {
+ _constructed = false;
+ clear();
+ from(cfg);
+ if( force ) {
+ version += rand() % 100000 + 10000;
+ }
+ configAssert( version < 0 /*unspecified*/ || (version >= 1) );
+ if( version < 1 )
+ version = 1;
+ _ok = true;
+ _constructed = true;
+ }
+
+ ReplSetConfig::ReplSetConfig(const HostAndPort& h) {
+ LOG(2) << "ReplSetConfig load " << h.toStringLong() << rsLog;
+
+ _constructed = false;
+ clear();
+ int level = 2;
+ DEV level = 0;
+
+ BSONObj cfg;
+ int v = -5;
+ try {
+ if( h.isSelf() ) {
+ ;
+ }
+ else {
+ /* first, make sure other node is configured to be a replset. just to be safe. */
+ string setname = cmdLine.ourSetName();
+ BSONObj cmd = BSON( "replSetHeartbeat" << setname );
+ int theirVersion;
+ BSONObj info;
+ log() << "trying to contact " << h.toString() << rsLog;
+ bool ok = requestHeartbeat(setname, "", h.toString(), info, -2, theirVersion);
+ if( info["rs"].trueValue() ) {
+ // yes, it is a replicate set, although perhaps not yet initialized
+ }
+ else {
+ if( !ok ) {
+ log() << "replSet TEMP !ok heartbeating " << h.toString() << " on cfg load" << rsLog;
+ if( !info.isEmpty() )
+ log() << "replSet info " << h.toString() << " : " << info.toString() << rsLog;
+ return;
+ }
+ {
+ stringstream ss;
+ ss << "replSet error: member " << h.toString() << " is not in --replSet mode";
+ msgassertedNoTrace(13260, ss.str().c_str()); // not caught as not a user exception - we want it not caught
+ //for python err# checker: uassert(13260, "", false);
+ }
+ }
+ }
+
+ v = -4;
+ unsigned long long count = 0;
+ try {
+ ScopedConn conn(h.toString());
+ v = -3;
+ cfg = conn.findOne(rsConfigNs, Query()).getOwned();
+ count = conn.count(rsConfigNs);
+ }
+ catch ( DBException& ) {
+ if ( !h.isSelf() ) {
+ throw;
+ }
+
+ // on startup, socket is not listening yet
+ DBDirectClient cli;
+ cfg = cli.findOne( rsConfigNs, Query() ).getOwned();
+ count = cli.count(rsConfigNs);
+ }
+
+ if( count > 1 )
+ uasserted(13109, str::stream() << "multiple rows in " << rsConfigNs << " not supported host: " << h.toString());
+
+ if( cfg.isEmpty() ) {
+ version = EMPTYCONFIG;
+ return;
+ }
+ version = -1;
+ }
+ catch( DBException& e) {
+ version = v;
+ log(level) << "replSet load config couldn't get from " << h.toString() << ' ' << e.what() << rsLog;
+ return;
+ }
+
+ from(cfg);
+ checkRsConfig();
+ _ok = true;
+ log(level) << "replSet load config ok from " << (h.isSelf() ? "self" : h.toString()) << rsLog;
+ _constructed = true;
+ }
+
+}
diff --git a/src/mongo/db/repl/rs_config.h b/src/mongo/db/repl/rs_config.h
new file mode 100644
index 00000000000..cfe2e86a568
--- /dev/null
+++ b/src/mongo/db/repl/rs_config.h
@@ -0,0 +1,251 @@
+// rs_config.h
+// repl set configuration
+//
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../../util/net/hostandport.h"
+#include "../../util/concurrency/race.h"
+#include "health.h"
+
+namespace mongo {
+ class Member;
+ const string rsConfigNs = "local.system.replset";
+
+ class ReplSetConfig {
+ enum { EMPTYCONFIG = -2 };
+ struct TagSubgroup;
+ public:
+ /**
+ * This contacts the given host and tries to get a config from them.
+ *
+ * This sends a test heartbeat to the host and, if all goes well and the
+ * host has a more recent config, fetches the config and loads it (see
+ * from().
+ *
+ * If it's contacting itself, it skips the heartbeat (for obvious
+ * reasons.) If something is misconfigured, throws an exception. If the
+ * host couldn't be queried or is just blank, ok() will be false.
+ */
+ ReplSetConfig(const HostAndPort& h);
+
+ ReplSetConfig(BSONObj cfg, bool force=false);
+
+ bool ok() const { return _ok; }
+
+ struct TagRule;
+
+ struct MemberCfg {
+ MemberCfg() : _id(-1), votes(1), priority(1.0), arbiterOnly(false), slaveDelay(0), hidden(false), buildIndexes(true) { }
+ int _id; /* ordinal */
+ unsigned votes; /* how many votes this node gets. default 1. */
+ HostAndPort h;
+ double priority; /* 0 means can never be primary */
+ bool arbiterOnly;
+ int slaveDelay; /* seconds. int rather than unsigned for convenient to/front bson conversion. */
+ bool hidden; /* if set, don't advertise to drives in isMaster. for non-primaries (priority 0) */
+ bool buildIndexes; /* if false, do not create any non-_id indexes */
+ map<string,string> tags; /* tagging for data center, rack, etc. */
+ private:
+ set<TagSubgroup*> _groups; // the subgroups this member belongs to
+ public:
+ const set<TagSubgroup*>& groups() const {
+ return _groups;
+ }
+ set<TagSubgroup*>& groupsw() {
+ return _groups;
+ }
+ void check() const; /* check validity, assert if not. */
+ BSONObj asBson() const;
+ bool potentiallyHot() const { return !arbiterOnly && priority > 0; }
+ void updateGroups(const OpTime& last) {
+ RACECHECK
+ for (set<TagSubgroup*>::const_iterator it = groups().begin(); it != groups().end(); it++) {
+ ((TagSubgroup*)(*it))->updateLast(last);
+ }
+ }
+ bool operator==(const MemberCfg& r) const {
+ if (!tags.empty() || !r.tags.empty()) {
+ if (tags.size() != r.tags.size()) {
+ return false;
+ }
+
+ // if they are the same size and not equal, at least one
+ // element in A must be different in B
+ for (map<string,string>::const_iterator lit = tags.begin(); lit != tags.end(); lit++) {
+ map<string,string>::const_iterator rit = r.tags.find((*lit).first);
+
+ if (rit == r.tags.end() || (*lit).second != (*rit).second) {
+ return false;
+ }
+ }
+ }
+
+ return _id==r._id && votes == r.votes && h == r.h && priority == r.priority &&
+ arbiterOnly == r.arbiterOnly && slaveDelay == r.slaveDelay && hidden == r.hidden &&
+ buildIndexes == buildIndexes;
+ }
+ bool operator!=(const MemberCfg& r) const { return !(*this == r); }
+ };
+
+ vector<MemberCfg> members;
+ string _id;
+ int version;
+ HealthOptions ho;
+ string md5;
+ BSONObj getLastErrorDefaults;
+ map<string,TagRule*> rules;
+
+ list<HostAndPort> otherMemberHostnames() const; // except self
+
+ /** @return true if could connect, and there is no cfg object there at all */
+ bool empty() const { return version == EMPTYCONFIG; }
+
+ string toString() const { return asBson().toString(); }
+
+ /** validate the settings. does not call check() on each member, you have to do that separately. */
+ void checkRsConfig() const;
+
+ /** check if modification makes sense */
+ static bool legalChange(const ReplSetConfig& old, const ReplSetConfig& n, string& errmsg);
+
+ //static void receivedNewConfig(BSONObj);
+ void saveConfigLocally(BSONObj comment); // to local db
+ string saveConfigEverywhere(); // returns textual info on what happened
+
+ /**
+ * Update members' groups when the config changes but members stay the same.
+ */
+ void updateMembers(List1<Member> &dest);
+
+ BSONObj asBson() const;
+
+ /**
+ * Getter and setter for _majority. This is almost always
+ * members.size()/2+1, but can be the number of non-arbiter members if
+ * there are more arbiters than non-arbiters (writing to 3 out of 7
+ * servers is safe if 4 of the servers are arbiters).
+ */
+ void setMajority();
+ int getMajority() const;
+
+ bool _constructed;
+ private:
+ bool _ok;
+ int _majority;
+
+ void from(BSONObj);
+ void clear();
+
+ struct TagClause;
+
+ /**
+ * This is a logical grouping of servers. It is pointed to by a set of
+ * servers with a certain tag.
+ *
+ * For example, suppose servers A, B, and C have the tag "dc" : "nyc". If we
+ * have a rule {"dc" : 2}, then we want A _or_ B _or_ C to have the
+ * write for one of the "dc" critiria to be fulfilled, so all three will
+ * point to this subgroup. When one of their oplog-tailing cursors is
+ * updated, this subgroup is updated.
+ */
+ struct TagSubgroup : boost::noncopyable {
+ ~TagSubgroup(); // never called; not defined
+ TagSubgroup(string nm) : name(nm) { }
+ const string name;
+ OpTime last;
+ vector<TagClause*> clauses;
+
+ // this probably won't actually point to valid members after the
+ // subgroup is created, as initFromConfig() makes a copy of the
+ // config
+ set<MemberCfg*> m;
+
+ void updateLast(const OpTime& op);
+
+ //string toString() const;
+
+ /**
+ * If two tags have the same name, they should compare as equal so
+ * that members don't have to update two identical groups on writes.
+ */
+ bool operator() (TagSubgroup& lhs, TagSubgroup& rhs) const {
+ return lhs.name < rhs.name;
+ }
+ };
+
+ /**
+ * An argument in a rule. For example, if we had the rule {dc : 2,
+ * machines : 3}, "dc" : 2 and "machines" : 3 would be two TagClauses.
+ *
+ * Each tag clause has a set of associated subgroups. For example, if
+ * we had "dc" : 2, our subgroups might be "nyc", "sf", and "hk".
+ */
+ struct TagClause {
+ OpTime last;
+ map<string,TagSubgroup*> subgroups;
+ TagRule *rule;
+ string name;
+ /**
+ * If we have get a clause like {machines : 3} and this server is
+ * tagged with "machines", then it's really {machines : 2}, as we
+ * will always be up-to-date. So, target would be 3 and
+ * actualTarget would be 2, in that example.
+ */
+ int target;
+ int actualTarget;
+
+ void updateLast(const OpTime& op);
+ string toString() const;
+ };
+
+ /**
+ * Parses getLastErrorModes.
+ */
+ void parseRules(const BSONObj& modes);
+
+ /**
+ * Create a hash containing every possible clause that could be used in a
+ * rule and the servers related to that clause.
+ *
+ * For example, suppose we have the following servers:
+ * A {"dc" : "ny", "ny" : "rk1"}
+ * B {"dc" : "ny", "ny" : "rk1"}
+ * C {"dc" : "ny", "ny" : "rk2"}
+ * D {"dc" : "sf", "sf" : "rk1"}
+ * E {"dc" : "sf", "sf" : "rk2"}
+ *
+ * This would give us the possible criteria:
+ * "dc" -> {A, B, C},{D, E}
+ * "ny" -> {A, B},{C}
+ * "sf" -> {D},{E}
+ */
+ void _populateTagMap(map<string,TagClause> &tagMap);
+
+ public:
+ struct TagRule {
+ vector<TagClause*> clauses;
+ OpTime last;
+
+ void updateLast(const OpTime& op);
+ string toString() const;
+ };
+ };
+
+}
diff --git a/src/mongo/db/repl/rs_exception.h b/src/mongo/db/repl/rs_exception.h
new file mode 100644
index 00000000000..fc372fc241c
--- /dev/null
+++ b/src/mongo/db/repl/rs_exception.h
@@ -0,0 +1,17 @@
+// @file rs_exception.h
+
+#pragma once
+
+namespace mongo {
+
+ class VoteException : public std::exception {
+ public:
+ const char * what() const throw () { return "VoteException"; }
+ };
+
+ class RetryAfterSleepException : public std::exception {
+ public:
+ const char * what() const throw () { return "RetryAfterSleepException"; }
+ };
+
+}
diff --git a/src/mongo/db/repl/rs_initialsync.cpp b/src/mongo/db/repl/rs_initialsync.cpp
new file mode 100644
index 00000000000..b67c0d71b83
--- /dev/null
+++ b/src/mongo/db/repl/rs_initialsync.cpp
@@ -0,0 +1,271 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../repl.h"
+#include "../client.h"
+#include "../../client/dbclient.h"
+#include "rs.h"
+#include "../oplogreader.h"
+#include "../../util/mongoutils/str.h"
+#include "../dbhelpers.h"
+#include "rs_optime.h"
+#include "../oplog.h"
+
+namespace mongo {
+
+ using namespace mongoutils;
+ using namespace bson;
+
+ void dropAllDatabasesExceptLocal();
+
+ // add try/catch with sleep
+
+ void isyncassert(const string& msg, bool expr) {
+ if( !expr ) {
+ string m = str::stream() << "initial sync " << msg;
+ theReplSet->sethbmsg(m, 0);
+ uasserted(13404, m);
+ }
+ }
+
+ void ReplSetImpl::syncDoInitialSync() {
+ createOplog();
+
+ while( 1 ) {
+ try {
+ _syncDoInitialSync();
+ break;
+ }
+ catch(DBException& e) {
+ sethbmsg("initial sync exception " + e.toString(), 0);
+ sleepsecs(30);
+ }
+ }
+ }
+
+ /* todo : progress metering to sethbmsg. */
+ static bool clone(const char *master, string db) {
+ string err;
+ return cloneFrom(master, err, db, false,
+ /* slave_ok */ true, true, false, /*mayYield*/true, /*mayBeInterrupted*/false);
+ }
+
+ void _logOpObjRS(const BSONObj& op);
+
+ static void emptyOplog() {
+ writelock lk(rsoplog);
+ Client::Context ctx(rsoplog);
+ NamespaceDetails *d = nsdetails(rsoplog);
+
+ // temp
+ if( d && d->stats.nrecords == 0 )
+ return; // already empty, ok.
+
+ LOG(1) << "replSet empty oplog" << rsLog;
+ d->emptyCappedCollection(rsoplog);
+ }
+
+ Member* ReplSetImpl::getMemberToSyncTo() {
+ Member *closest = 0;
+ time_t now = 0;
+ bool buildIndexes = true;
+
+ // wait for 2N pings before choosing a sync target
+ if (_cfg) {
+ int needMorePings = config().members.size()*2 - HeartbeatInfo::numPings;
+
+ if (needMorePings > 0) {
+ OCCASIONALLY log() << "waiting for " << needMorePings << " pings from other members before syncing" << endl;
+ return NULL;
+ }
+
+ buildIndexes = myConfig().buildIndexes;
+ }
+
+ // find the member with the lowest ping time that has more data than me
+ for (Member *m = _members.head(); m; m = m->next()) {
+ if (m->hbinfo().up() &&
+ // make sure members with buildIndexes sync from other members w/indexes
+ (!buildIndexes || (buildIndexes && m->config().buildIndexes)) &&
+ (m->state() == MemberState::RS_PRIMARY ||
+ (m->state() == MemberState::RS_SECONDARY && m->hbinfo().opTime > lastOpTimeWritten)) &&
+ (!closest || m->hbinfo().ping < closest->hbinfo().ping)) {
+
+ map<string,time_t>::iterator vetoed = _veto.find(m->fullName());
+ if (vetoed == _veto.end()) {
+ closest = m;
+ break;
+ }
+
+ if (now == 0) {
+ now = time(0);
+ }
+
+ // if this was on the veto list, check if it was vetoed in the last "while"
+ if ((*vetoed).second < now) {
+ _veto.erase(vetoed);
+ closest = m;
+ break;
+ }
+
+ // if it was recently vetoed, skip
+ log() << "replSet not trying to sync from " << (*vetoed).first
+ << ", it is vetoed for " << ((*vetoed).second - now) << " more seconds" << rsLog;
+ }
+ }
+
+ {
+ lock lk(this);
+
+ if (!closest) {
+ _currentSyncTarget = NULL;
+ return NULL;
+ }
+
+ _currentSyncTarget = closest;
+ }
+
+ sethbmsg( str::stream() << "syncing to: " << closest->fullName(), 0);
+
+ return closest;
+ }
+
+ void ReplSetImpl::veto(const string& host, const unsigned secs) {
+ _veto[host] = time(0)+secs;
+ }
+
+ /**
+ * Do the initial sync for this member.
+ */
+ void ReplSetImpl::_syncDoInitialSync() {
+ sethbmsg("initial sync pending",0);
+
+ // if this is the first node, it may have already become primary
+ if ( box.getState().primary() ) {
+ sethbmsg("I'm already primary, no need for initial sync",0);
+ return;
+ }
+
+ const Member *source = getMemberToSyncTo();
+ if (!source) {
+ sethbmsg("initial sync need a member to be primary or secondary to do our initial sync", 0);
+ sleepsecs(15);
+ return;
+ }
+
+ string sourceHostname = source->h().toString();
+ OplogReader r;
+ if( !r.connect(sourceHostname) ) {
+ sethbmsg( str::stream() << "initial sync couldn't connect to " << source->h().toString() , 0);
+ sleepsecs(15);
+ return;
+ }
+
+ BSONObj lastOp = r.getLastOp(rsoplog);
+ if( lastOp.isEmpty() ) {
+ sethbmsg("initial sync couldn't read remote oplog", 0);
+ sleepsecs(15);
+ return;
+ }
+ OpTime startingTS = lastOp["ts"]._opTime();
+
+ if (replSettings.fastsync) {
+ log() << "fastsync: skipping database clone" << rsLog;
+ }
+ else {
+ sethbmsg("initial sync drop all databases", 0);
+ dropAllDatabasesExceptLocal();
+
+ sethbmsg("initial sync clone all databases", 0);
+
+ list<string> dbs = r.conn()->getDatabaseNames();
+ for( list<string>::iterator i = dbs.begin(); i != dbs.end(); i++ ) {
+ string db = *i;
+ if( db != "local" ) {
+ sethbmsg( str::stream() << "initial sync cloning db: " << db , 0);
+ bool ok;
+ {
+ writelock lk(db);
+ Client::Context ctx(db);
+ ok = clone(sourceHostname.c_str(), db);
+ }
+ if( !ok ) {
+ sethbmsg( str::stream() << "initial sync error clone of " << db << " failed sleeping 5 minutes" ,0);
+ veto(source->fullName(), 600);
+ sleepsecs(300);
+ return;
+ }
+ }
+ }
+ }
+
+ sethbmsg("initial sync query minValid",0);
+
+ /* our cloned copy will be strange until we apply oplog events that occurred
+ through the process. we note that time point here. */
+ BSONObj minValid = r.getLastOp(rsoplog);
+ isyncassert( "getLastOp is empty ", !minValid.isEmpty() );
+ OpTime mvoptime = minValid["ts"]._opTime();
+ assert( !mvoptime.isNull() );
+ assert( mvoptime >= startingTS );
+
+ // apply startingTS..mvoptime portion of the oplog
+ {
+ // note we assume here that this call does not throw
+ if( ! initialSyncOplogApplication(startingTS, mvoptime) ) {
+ log() << "replSet initial sync failed during oplog application phase" << rsLog;
+
+ emptyOplog(); // otherwise we'll be up!
+
+ lastOpTimeWritten = OpTime();
+ lastH = 0;
+
+ log() << "replSet cleaning up [1]" << rsLog;
+ {
+ writelock lk("local.");
+ Client::Context cx( "local." );
+ cx.db()->flushFiles(true);
+ }
+ log() << "replSet cleaning up [2]" << rsLog;
+
+ log() << "replSet initial sync failed will try again" << endl;
+
+ sleepsecs(5);
+ return;
+ }
+ }
+
+ sethbmsg("initial sync finishing up",0);
+
+ assert( !box.getState().primary() ); // wouldn't make sense if we were.
+
+ {
+ writelock lk("local.");
+ Client::Context cx( "local." );
+ cx.db()->flushFiles(true);
+ try {
+ log() << "replSet set minValid=" << minValid["ts"]._opTime().toString() << rsLog;
+ }
+ catch(...) { }
+ Helpers::putSingleton("local.replset.minvalid", minValid);
+ cx.db()->flushFiles(true);
+ }
+
+ sethbmsg("initial sync done",0);
+ }
+
+}
diff --git a/src/mongo/db/repl/rs_initiate.cpp b/src/mongo/db/repl/rs_initiate.cpp
new file mode 100644
index 00000000000..77bc6c03938
--- /dev/null
+++ b/src/mongo/db/repl/rs_initiate.cpp
@@ -0,0 +1,269 @@
+/* @file rs_initiate.cpp
+ */
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../cmdline.h"
+#include "../commands.h"
+#include "../../util/mmap.h"
+#include "../../util/mongoutils/str.h"
+#include "health.h"
+#include "rs.h"
+#include "rs_config.h"
+#include "../dbhelpers.h"
+#include "../oplog.h"
+
+using namespace bson;
+using namespace mongoutils;
+
+namespace mongo {
+
+ /* called on a reconfig AND on initiate
+ throws
+ @param initial true when initiating
+ */
+ void checkMembersUpForConfigChange(const ReplSetConfig& cfg, BSONObjBuilder& result, bool initial) {
+ int failures = 0, allVotes = 0, allowableFailures = 0;
+ int me = 0;
+ stringstream selfs;
+ for( vector<ReplSetConfig::MemberCfg>::const_iterator i = cfg.members.begin(); i != cfg.members.end(); i++ ) {
+ if( i->h.isSelf() ) {
+ me++;
+ if( me > 1 )
+ selfs << ',';
+ selfs << i->h.toString();
+ if( !i->potentiallyHot() ) {
+ uasserted(13420, "initiation and reconfiguration of a replica set must be sent to a node that can become primary");
+ }
+ }
+ allVotes += i->votes;
+ }
+ allowableFailures = allVotes - (allVotes/2 + 1);
+
+ uassert(13278, "bad config: isSelf is true for multiple hosts: " + selfs.str(), me <= 1); // dups?
+ if( me != 1 ) {
+ stringstream ss;
+ ss << "can't find self in the replset config";
+ if( !cmdLine.isDefaultPort() ) ss << " my port: " << cmdLine.port;
+ if( me != 0 ) ss << " found: " << me;
+ uasserted(13279, ss.str());
+ }
+
+ vector<string> down;
+ for( vector<ReplSetConfig::MemberCfg>::const_iterator i = cfg.members.begin(); i != cfg.members.end(); i++ ) {
+ // we know we're up
+ if (i->h.isSelf()) {
+ continue;
+ }
+
+ BSONObj res;
+ {
+ bool ok = false;
+ try {
+ int theirVersion = -1000;
+ ok = requestHeartbeat(cfg._id, "", i->h.toString(), res, -1, theirVersion, initial/*check if empty*/);
+ if( theirVersion >= cfg.version ) {
+ stringstream ss;
+ ss << "replSet member " << i->h.toString() << " has too new a config version (" << theirVersion << ") to reconfigure";
+ uasserted(13259, ss.str());
+ }
+ }
+ catch(DBException& e) {
+ log() << "replSet cmufcc requestHeartbeat " << i->h.toString() << " : " << e.toString() << rsLog;
+ }
+ catch(...) {
+ log() << "replSet cmufcc error exception in requestHeartbeat?" << rsLog;
+ }
+ if( res.getBoolField("mismatch") )
+ uasserted(13145, "set name does not match the set name host " + i->h.toString() + " expects");
+ if( *res.getStringField("set") ) {
+ if( cfg.version <= 1 ) {
+ // this was to be initiation, no one shoudl be initiated already.
+ uasserted(13256, "member " + i->h.toString() + " is already initiated");
+ }
+ else {
+ // Assure no one has a newer config.
+ if( res["v"].Int() >= cfg.version ) {
+ uasserted(13341, "member " + i->h.toString() + " has a config version >= to the new cfg version; cannot change config");
+ }
+ }
+ }
+ if( !ok && !res["rs"].trueValue() ) {
+ down.push_back(i->h.toString());
+
+ if( !res.isEmpty() ) {
+ /* strange. got a response, but not "ok". log it. */
+ log() << "replSet warning " << i->h.toString() << " replied: " << res.toString() << rsLog;
+ }
+
+ bool allowFailure = false;
+ failures += i->votes;
+ if( !initial && failures <= allowableFailures ) {
+ const Member* m = theReplSet->findById( i->_id );
+ if( m ) {
+ assert( m->h().toString() == i->h.toString() );
+ }
+ // it's okay if the down member isn't part of the config,
+ // we might be adding a new member that isn't up yet
+ allowFailure = true;
+ }
+
+ if( !allowFailure ) {
+ string msg = string("need all members up to initiate, not ok : ") + i->h.toStringLong();
+ if( !initial )
+ msg = string("need most members up to reconfigure, not ok : ") + i->h.toString();
+ uasserted(13144, msg);
+ }
+ }
+ }
+ if( initial ) {
+ bool hasData = res["hasData"].Bool();
+ uassert(13311, "member " + i->h.toString() + " has data already, cannot initiate set. All members except initiator must be empty.",
+ !hasData || i->h.isSelf());
+ }
+ }
+ if (down.size() > 0) {
+ result.append("down", down);
+ }
+ }
+
+ class CmdReplSetInitiate : public ReplSetCommand {
+ public:
+ virtual LockType locktype() const { return NONE; }
+ CmdReplSetInitiate() : ReplSetCommand("replSetInitiate") { }
+ virtual void help(stringstream& h) const {
+ h << "Initiate/christen a replica set.";
+ h << "\nhttp://www.mongodb.org/display/DOCS/Replica+Set+Commands";
+ }
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ log() << "replSet replSetInitiate admin command received from client" << rsLog;
+
+ if( !replSet ) {
+ errmsg = "server is not running with --replSet";
+ return false;
+ }
+ if( theReplSet ) {
+ errmsg = "already initialized";
+ result.append("info", "try querying " + rsConfigNs + " to see current configuration");
+ return false;
+ }
+
+ {
+ // just make sure we can get a write lock before doing anything else. we'll reacquire one
+ // later. of course it could be stuck then, but this check lowers the risk if weird things
+ // are up.
+ time_t t = time(0);
+ writelock lk("");
+ if( time(0)-t > 10 ) {
+ errmsg = "took a long time to get write lock, so not initiating. Initiate when server less busy?";
+ return false;
+ }
+
+ /* check that we don't already have an oplog. that could cause issues.
+ it is ok if the initiating member has *other* data than that.
+ */
+ BSONObj o;
+ if( Helpers::getFirst(rsoplog, o) ) {
+ errmsg = rsoplog + string(" is not empty on the initiating member. cannot initiate.");
+ return false;
+ }
+ }
+
+ if( ReplSet::startupStatus == ReplSet::BADCONFIG ) {
+ errmsg = "server already in BADCONFIG state (check logs); not initiating";
+ result.append("info", ReplSet::startupStatusMsg.get());
+ return false;
+ }
+ if( ReplSet::startupStatus != ReplSet::EMPTYCONFIG ) {
+ result.append("startupStatus", ReplSet::startupStatus);
+ errmsg = "all members and seeds must be reachable to initiate set";
+ result.append("info", cmdLine._replSet);
+ return false;
+ }
+
+ BSONObj configObj;
+
+ if( cmdObj["replSetInitiate"].type() != Object ) {
+ result.append("info2", "no configuration explicitly specified -- making one");
+ log() << "replSet info initiate : no configuration specified. Using a default configuration for the set" << rsLog;
+
+ string name;
+ vector<HostAndPort> seeds;
+ set<HostAndPort> seedSet;
+ parseReplsetCmdLine(cmdLine._replSet, name, seeds, seedSet); // may throw...
+
+ bob b;
+ b.append("_id", name);
+ bob members;
+ members.append("0", BSON( "_id" << 0 << "host" << HostAndPort::Me().dynString() ));
+ result.append("me", HostAndPort::Me().toString());
+ for( unsigned i = 0; i < seeds.size(); i++ )
+ members.append(bob::numStr(i+1), BSON( "_id" << i+1 << "host" << seeds[i].toString()));
+ b.appendArray("members", members.obj());
+ configObj = b.obj();
+ log() << "replSet created this configuration for initiation : " << configObj.toString() << rsLog;
+ }
+ else {
+ configObj = cmdObj["replSetInitiate"].Obj();
+ }
+
+ bool parsed = false;
+ try {
+ ReplSetConfig newConfig(configObj);
+ parsed = true;
+
+ if( newConfig.version > 1 ) {
+ errmsg = "can't initiate with a version number greater than 1";
+ return false;
+ }
+
+ log() << "replSet replSetInitiate config object parses ok, " << newConfig.members.size() << " members specified" << rsLog;
+
+ checkMembersUpForConfigChange(newConfig, result, true);
+
+ log() << "replSet replSetInitiate all members seem up" << rsLog;
+
+ createOplog();
+
+ writelock lk("");
+ bo comment = BSON( "msg" << "initiating set");
+ newConfig.saveConfigLocally(comment);
+ log() << "replSet replSetInitiate config now saved locally. Should come online in about a minute." << rsLog;
+ result.append("info", "Config now saved locally. Should come online in about a minute.");
+ ReplSet::startupStatus = ReplSet::SOON;
+ ReplSet::startupStatusMsg.set("Received replSetInitiate - should come online shortly.");
+ }
+ catch( DBException& e ) {
+ log() << "replSet replSetInitiate exception: " << e.what() << rsLog;
+ if( !parsed )
+ errmsg = string("couldn't parse cfg object ") + e.what();
+ else
+ errmsg = string("couldn't initiate : ") + e.what();
+ return false;
+ }
+ catch( string& e2 ) {
+ log() << e2 << rsLog;
+ errmsg = e2;
+ return false;
+ }
+
+ return true;
+ }
+ } cmdReplSetInitiate;
+
+}
diff --git a/src/mongo/db/repl/rs_member.h b/src/mongo/db/repl/rs_member.h
new file mode 100644
index 00000000000..24e593392b6
--- /dev/null
+++ b/src/mongo/db/repl/rs_member.h
@@ -0,0 +1,131 @@
+// @file rsmember.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/** replica set member */
+
+#pragma once
+
+#include "../../util/concurrency/value.h"
+
+namespace mongo {
+
+
+ /*
+ RS_STARTUP serving still starting up, or still trying to initiate the set
+ RS_PRIMARY this server thinks it is primary
+ RS_SECONDARY this server thinks it is a secondary (slave mode)
+ RS_RECOVERING recovering/resyncing; after recovery usually auto-transitions to secondary
+ RS_FATAL something bad has occurred and server is not completely offline with regard to the replica set. fatal error.
+ RS_STARTUP2 loaded config, still determining who is primary
+ */
+ struct MemberState {
+ enum MS {
+ RS_STARTUP = 0,
+ RS_PRIMARY = 1,
+ RS_SECONDARY = 2,
+ RS_RECOVERING = 3,
+ RS_FATAL = 4,
+ RS_STARTUP2 = 5,
+ RS_UNKNOWN = 6, /* remote node not yet reached */
+ RS_ARBITER = 7,
+ RS_DOWN = 8, /* node not reachable for a report */
+ RS_ROLLBACK = 9
+ } s;
+
+ MemberState(MS ms = RS_UNKNOWN) : s(ms) { }
+ explicit MemberState(int ms) : s((MS) ms) { }
+
+ bool startup() const { return s == RS_STARTUP; }
+ bool primary() const { return s == RS_PRIMARY; }
+ bool secondary() const { return s == RS_SECONDARY; }
+ bool recovering() const { return s == RS_RECOVERING; }
+ bool startup2() const { return s == RS_STARTUP2; }
+ bool fatal() const { return s == RS_FATAL; }
+ bool rollback() const { return s == RS_ROLLBACK; }
+ bool readable() const { return s == RS_PRIMARY || s == RS_SECONDARY; }
+
+ string toString() const;
+
+ bool operator==(const MemberState& r) const { return s == r.s; }
+ bool operator!=(const MemberState& r) const { return s != r.s; }
+ };
+
+ /* this is supposed to be just basic information on a member,
+ and copy constructable. */
+ class HeartbeatInfo {
+ unsigned _id;
+ public:
+ HeartbeatInfo() : _id(0xffffffff), hbstate(MemberState::RS_UNKNOWN), health(-1.0),
+ downSince(0), skew(INT_MIN), authIssue(false), ping(0) { }
+ HeartbeatInfo(unsigned id);
+ unsigned id() const { return _id; }
+ MemberState hbstate;
+ double health;
+ time_t upSince;
+ long long downSince;
+ time_t lastHeartbeat;
+ DiagStr lastHeartbeatMsg;
+ OpTime opTime;
+ int skew;
+ bool authIssue;
+ unsigned int ping; // milliseconds
+ static unsigned int numPings;
+
+ bool up() const { return health > 0; }
+
+ /** health is set to -1 on startup. that means we haven't even checked yet. 0 means we checked and it failed. */
+ bool maybeUp() const { return health != 0; }
+
+ long long timeDown() const; // ms
+
+ /* true if changed in a way of interest to the repl set manager. */
+ bool changed(const HeartbeatInfo& old) const;
+ };
+
+ inline HeartbeatInfo::HeartbeatInfo(unsigned id) :
+ _id(id),
+ authIssue(false),
+ ping(0) {
+ hbstate = MemberState::RS_UNKNOWN;
+ health = -1.0;
+ downSince = 0;
+ lastHeartbeat = upSince = 0;
+ skew = INT_MIN;
+ }
+
+ inline bool HeartbeatInfo::changed(const HeartbeatInfo& old) const {
+ return health != old.health ||
+ hbstate != old.hbstate;
+ }
+
+ inline string MemberState::toString() const {
+ switch ( s ) {
+ case RS_STARTUP: return "STARTUP";
+ case RS_PRIMARY: return "PRIMARY";
+ case RS_SECONDARY: return "SECONDARY";
+ case RS_RECOVERING: return "RECOVERING";
+ case RS_FATAL: return "FATAL";
+ case RS_STARTUP2: return "STARTUP2";
+ case RS_ARBITER: return "ARBITER";
+ case RS_DOWN: return "DOWN";
+ case RS_ROLLBACK: return "ROLLBACK";
+ case RS_UNKNOWN: return "UNKNOWN";
+ }
+ return "";
+ }
+
+}
diff --git a/src/mongo/db/repl/rs_optime.h b/src/mongo/db/repl/rs_optime.h
new file mode 100644
index 00000000000..f0ca56927ad
--- /dev/null
+++ b/src/mongo/db/repl/rs_optime.h
@@ -0,0 +1,58 @@
+// @file rs_optime.h
+
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "../../util/optime.h"
+
+namespace mongo {
+
+ const char rsoplog[] = "local.oplog.rs";
+
+ /*
+ class RSOpTime : public OpTime {
+ public:
+ bool initiated() const { return getSecs() != 0; }
+ };*/
+
+ /*struct RSOpTime {
+ unsigned long long ord;
+
+ RSOpTime() : ord(0) { }
+
+ bool initiated() const { return ord > 0; }
+
+ void initiate() {
+ assert( !initiated() );
+ ord = 1000000;
+ }
+
+ ReplTime inc() {
+ DEV assertInWriteLock();
+ return ++ord;
+ }
+
+ string toString() const { return str::stream() << ord; }
+
+ // query the oplog and set the highest value herein. acquires a db read lock. throws.
+ void load();
+ };
+
+ extern RSOpTime rsOpTime;*/
+
+}
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
new file mode 100644
index 00000000000..10727c59669
--- /dev/null
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -0,0 +1,667 @@
+/* @file rs_rollback.cpp
+*
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../client.h"
+#include "../../client/dbclient.h"
+#include "rs.h"
+#include "../repl.h"
+#include "../ops/query.h"
+#include "../cloner.h"
+#include "../ops/update.h"
+#include "../ops/delete.h"
+
+/* Scenarios
+
+ We went offline with ops not replicated out.
+
+ F = node that failed and coming back.
+ P = node that took over, new primary
+
+ #1:
+ F : a b c d e f g
+ P : a b c d q
+
+ The design is "keep P". One could argue here that "keep F" has some merits, however, in most cases P
+ will have significantly more data. Also note that P may have a proper subset of F's stream if there were
+ no subsequent writes.
+
+ For now the model is simply : get F back in sync with P. If P was really behind or something, we should have
+ just chosen not to fail over anyway.
+
+ #2:
+ F : a b c d e f g -> a b c d
+ P : a b c d
+
+ #3:
+ F : a b c d e f g -> a b c d q r s t u v w x z
+ P : a b c d.q r s t u v w x z
+
+ Steps
+ find an event in common. 'd'.
+ undo our events beyond that by:
+ (1) taking copy from other server of those objects
+ (2) do not consider copy valid until we pass reach an optime after when we fetched the new version of object
+ -- i.e., reset minvalid.
+ (3) we could skip operations on objects that are previous in time to our capture of the object as an optimization.
+
+*/
+
+namespace mongo {
+
+ using namespace bson;
+
+ void incRBID();
+
+ class rsfatal : public std::exception {
+ public:
+ virtual const char* what() const throw() { return "replica set fatal exception"; }
+ };
+
+ struct DocID {
+ const char *ns;
+ be _id;
+ bool operator<(const DocID& d) const {
+ int c = strcmp(ns, d.ns);
+ if( c < 0 ) return true;
+ if( c > 0 ) return false;
+ return _id < d._id;
+ }
+ };
+
+ struct HowToFixUp {
+ /* note this is a set -- if there are many $inc's on a single document we need to rollback, we only
+ need to refetch it once. */
+ set<DocID> toRefetch;
+
+ /* collections to drop */
+ set<string> toDrop;
+
+ set<string> collectionsToResync;
+
+ OpTime commonPoint;
+ DiskLoc commonPointOurDiskloc;
+
+ int rbid; // remote server's current rollback sequence #
+ };
+
+ static void refetch(HowToFixUp& h, const BSONObj& ourObj) {
+ const char *op = ourObj.getStringField("op");
+ if( *op == 'n' )
+ return;
+
+ unsigned long long totSize = 0;
+ totSize += ourObj.objsize();
+ if( totSize > 512 * 1024 * 1024 )
+ throw "rollback too large";
+
+ DocID d;
+ // NOTE The assigned ns value may become invalid if we yield.
+ d.ns = ourObj.getStringField("ns");
+ if( *d.ns == 0 ) {
+ log() << "replSet WARNING ignoring op on rollback no ns TODO : " << ourObj.toString() << rsLog;
+ return;
+ }
+
+ bo o = ourObj.getObjectField(*op=='u' ? "o2" : "o");
+ if( o.isEmpty() ) {
+ log() << "replSet warning ignoring op on rollback : " << ourObj.toString() << rsLog;
+ return;
+ }
+
+ if( *op == 'c' ) {
+ be first = o.firstElement();
+ NamespaceString s(d.ns); // foo.$cmd
+ string cmdname = first.fieldName();
+ Command *cmd = Command::findCommand(cmdname.c_str());
+ if( cmd == 0 ) {
+ log() << "replSet warning rollback no suchcommand " << first.fieldName() << " - different mongod versions perhaps?" << rsLog;
+ return;
+ }
+ else {
+ /* findandmodify - tranlated?
+ godinsert?,
+ renamecollection a->b. just resync a & b
+ */
+ if( cmdname == "create" ) {
+ /* Create collection operation
+ { ts: ..., h: ..., op: "c", ns: "foo.$cmd", o: { create: "abc", ... } }
+ */
+ string ns = s.db + '.' + o["create"].String(); // -> foo.abc
+ h.toDrop.insert(ns);
+ return;
+ }
+ else if( cmdname == "drop" ) {
+ string ns = s.db + '.' + first.valuestr();
+ h.collectionsToResync.insert(ns);
+ return;
+ }
+ else if( cmdname == "dropIndexes" || cmdname == "deleteIndexes" ) {
+ /* TODO: this is bad. we simply full resync the collection here, which could be very slow. */
+ log() << "replSet info rollback of dropIndexes is slow in this version of mongod" << rsLog;
+ string ns = s.db + '.' + first.valuestr();
+ h.collectionsToResync.insert(ns);
+ return;
+ }
+ else if( cmdname == "renameCollection" ) {
+ /* TODO: slow. */
+ log() << "replSet info rollback of renameCollection is slow in this version of mongod" << rsLog;
+ string from = first.valuestr();
+ string to = o["to"].String();
+ h.collectionsToResync.insert(from);
+ h.collectionsToResync.insert(to);
+ return;
+ }
+ else if( cmdname == "reIndex" ) {
+ return;
+ }
+ else if( cmdname == "dropDatabase" ) {
+ log() << "replSet error rollback : can't rollback drop database full resync will be required" << rsLog;
+ log() << "replSet " << o.toString() << rsLog;
+ throw rsfatal();
+ }
+ else {
+ log() << "replSet error can't rollback this command yet: " << o.toString() << rsLog;
+ log() << "replSet cmdname=" << cmdname << rsLog;
+ throw rsfatal();
+ }
+ }
+ }
+
+ d._id = o["_id"];
+ if( d._id.eoo() ) {
+ log() << "replSet WARNING ignoring op on rollback no _id TODO : " << d.ns << ' '<< ourObj.toString() << rsLog;
+ return;
+ }
+
+ h.toRefetch.insert(d);
+ }
+
+ int getRBID(DBClientConnection*);
+
+ static void syncRollbackFindCommonPoint(DBClientConnection *them, HowToFixUp& h) {
+ static time_t last;
+ if( time(0)-last < 60 ) {
+ throw "findcommonpoint waiting a while before trying again";
+ }
+ last = time(0);
+
+ assert( d.dbMutex.atLeastReadLocked() );
+ Client::Context c(rsoplog);
+ NamespaceDetails *nsd = nsdetails(rsoplog);
+ assert(nsd);
+ ReverseCappedCursor u(nsd);
+ if( !u.ok() )
+ throw "our oplog empty or unreadable";
+
+ const Query q = Query().sort(reverseNaturalObj);
+ const bo fields = BSON( "ts" << 1 << "h" << 1 );
+
+ //auto_ptr<DBClientCursor> u = us->query(rsoplog, q, 0, 0, &fields, 0, 0);
+
+ h.rbid = getRBID(them);
+ auto_ptr<DBClientCursor> t = them->query(rsoplog, q, 0, 0, &fields, 0, 0);
+
+ if( t.get() == 0 || !t->more() ) throw "remote oplog empty or unreadable";
+
+ BSONObj ourObj = u.current();
+ OpTime ourTime = ourObj["ts"]._opTime();
+ BSONObj theirObj = t->nextSafe();
+ OpTime theirTime = theirObj["ts"]._opTime();
+
+ {
+ long long diff = (long long) ourTime.getSecs() - ((long long) theirTime.getSecs());
+ /* diff could be positive, negative, or zero */
+ log() << "replSet info rollback our last optime: " << ourTime.toStringPretty() << rsLog;
+ log() << "replSet info rollback their last optime: " << theirTime.toStringPretty() << rsLog;
+ log() << "replSet info rollback diff in end of log times: " << diff << " seconds" << rsLog;
+ if( diff > 1800 ) {
+ log() << "replSet rollback too long a time period for a rollback." << rsLog;
+ throw "error not willing to roll back more than 30 minutes of data";
+ }
+ }
+
+ unsigned long long scanned = 0;
+ while( 1 ) {
+ scanned++;
+ /* todo add code to assure no excessive scanning for too long */
+ if( ourTime == theirTime ) {
+ if( ourObj["h"].Long() == theirObj["h"].Long() ) {
+ // found the point back in time where we match.
+ // todo : check a few more just to be careful about hash collisions.
+ log() << "replSet rollback found matching events at " << ourTime.toStringPretty() << rsLog;
+ log() << "replSet rollback findcommonpoint scanned : " << scanned << rsLog;
+ h.commonPoint = ourTime;
+ h.commonPointOurDiskloc = u.currLoc();
+ return;
+ }
+
+ refetch(h, ourObj);
+
+ if( !t->more() ) {
+ log() << "replSet rollback error RS100 reached beginning of remote oplog" << rsLog;
+ log() << "replSet them: " << them->toString() << " scanned: " << scanned << rsLog;
+ log() << "replSet theirTime: " << theirTime.toStringLong() << rsLog;
+ log() << "replSet ourTime: " << ourTime.toStringLong() << rsLog;
+ throw "RS100 reached beginning of remote oplog [2]";
+ }
+ theirObj = t->nextSafe();
+ theirTime = theirObj["ts"]._opTime();
+
+ u.advance();
+ if( !u.ok() ) {
+ log() << "replSet rollback error RS101 reached beginning of local oplog" << rsLog;
+ log() << "replSet them: " << them->toString() << " scanned: " << scanned << rsLog;
+ log() << "replSet theirTime: " << theirTime.toStringLong() << rsLog;
+ log() << "replSet ourTime: " << ourTime.toStringLong() << rsLog;
+ throw "RS101 reached beginning of local oplog [1]";
+ }
+ ourObj = u.current();
+ ourTime = ourObj["ts"]._opTime();
+ }
+ else if( theirTime > ourTime ) {
+ if( !t->more() ) {
+ log() << "replSet rollback error RS100 reached beginning of remote oplog" << rsLog;
+ log() << "replSet them: " << them->toString() << " scanned: " << scanned << rsLog;
+ log() << "replSet theirTime: " << theirTime.toStringLong() << rsLog;
+ log() << "replSet ourTime: " << ourTime.toStringLong() << rsLog;
+ throw "RS100 reached beginning of remote oplog [1]";
+ }
+ theirObj = t->nextSafe();
+ theirTime = theirObj["ts"]._opTime();
+ }
+ else {
+ // theirTime < ourTime
+ refetch(h, ourObj);
+ u.advance();
+ if( !u.ok() ) {
+ log() << "replSet rollback error RS101 reached beginning of local oplog" << rsLog;
+ log() << "replSet them: " << them->toString() << " scanned: " << scanned << rsLog;
+ log() << "replSet theirTime: " << theirTime.toStringLong() << rsLog;
+ log() << "replSet ourTime: " << ourTime.toStringLong() << rsLog;
+ throw "RS101 reached beginning of local oplog [2]";
+ }
+ ourObj = u.current();
+ ourTime = ourObj["ts"]._opTime();
+ }
+ }
+ }
+
+ struct X {
+ const bson::bo *op;
+ bson::bo goodVersionOfObject;
+ };
+
+ static void setMinValid(bo newMinValid) {
+ try {
+ log() << "replSet minvalid=" << newMinValid["ts"]._opTime().toStringLong() << rsLog;
+ }
+ catch(...) { }
+ {
+ Helpers::putSingleton("local.replset.minvalid", newMinValid);
+ Client::Context cx( "local." );
+ cx.db()->flushFiles(true);
+ }
+ }
+
+ void ReplSetImpl::syncFixUp(HowToFixUp& h, OplogReader& r) {
+ DBClientConnection *them = r.conn();
+
+ // fetch all first so we needn't handle interruption in a fancy way
+
+ unsigned long long totSize = 0;
+
+ list< pair<DocID,bo> > goodVersions;
+
+ bo newMinValid;
+
+ /* fetch all the goodVersions of each document from current primary */
+ DocID d;
+ unsigned long long n = 0;
+ try {
+ for( set<DocID>::iterator i = h.toRefetch.begin(); i != h.toRefetch.end(); i++ ) {
+ d = *i;
+
+ assert( !d._id.eoo() );
+
+ {
+ /* TODO : slow. lots of round trips. */
+ n++;
+ bo good= them->findOne(d.ns, d._id.wrap(), NULL, QueryOption_SlaveOk).getOwned();
+ totSize += good.objsize();
+ uassert( 13410, "replSet too much data to roll back", totSize < 300 * 1024 * 1024 );
+
+ // note good might be eoo, indicating we should delete it
+ goodVersions.push_back(pair<DocID,bo>(d,good));
+ }
+ }
+ newMinValid = r.getLastOp(rsoplog);
+ if( newMinValid.isEmpty() ) {
+ sethbmsg("rollback error newMinValid empty?");
+ return;
+ }
+ }
+ catch(DBException& e) {
+ sethbmsg(str::stream() << "rollback re-get objects: " << e.toString(),0);
+ log() << "rollback couldn't re-get ns:" << d.ns << " _id:" << d._id << ' ' << n << '/' << h.toRefetch.size() << rsLog;
+ throw e;
+ }
+
+ MemoryMappedFile::flushAll(true);
+
+ sethbmsg("rollback 3.5");
+ if( h.rbid != getRBID(r.conn()) ) {
+ // our source rolled back itself. so the data we received isn't necessarily consistent.
+ sethbmsg("rollback rbid on source changed during rollback, cancelling this attempt");
+ return;
+ }
+
+ // update them
+ sethbmsg(str::stream() << "rollback 4 n:" << goodVersions.size());
+
+ bool warn = false;
+
+ assert( !h.commonPointOurDiskloc.isNull() );
+
+ mongo::d.dbMutex.assertWriteLocked();
+
+ /* we have items we are writing that aren't from a point-in-time. thus best not to come online
+ until we get to that point in freshness. */
+ setMinValid(newMinValid);
+
+ /** any full collection resyncs required? */
+ if( !h.collectionsToResync.empty() ) {
+ for( set<string>::iterator i = h.collectionsToResync.begin(); i != h.collectionsToResync.end(); i++ ) {
+ string ns = *i;
+ sethbmsg(str::stream() << "rollback 4.1 coll resync " << ns);
+
+ Client::Context c(ns);
+ {
+ bob res;
+ string errmsg;
+ dropCollection(ns, errmsg, res);
+ {
+ dbtemprelease r;
+ bool ok = copyCollectionFromRemote(them->getServerAddress(), ns, errmsg);
+ uassert(15909, str::stream() << "replSet rollback error resyncing collection " << ns << ' ' << errmsg, ok);
+ }
+ }
+ }
+
+ /* we did more reading from primary, so check it again for a rollback (which would mess us up), and
+ make minValid newer.
+ */
+ sethbmsg("rollback 4.2");
+ {
+ string err;
+ try {
+ newMinValid = r.getLastOp(rsoplog);
+ if( newMinValid.isEmpty() ) {
+ err = "can't get minvalid from primary";
+ }
+ else {
+ setMinValid(newMinValid);
+ }
+ }
+ catch (DBException&) {
+ err = "can't get/set minvalid";
+ }
+ if( h.rbid != getRBID(r.conn()) ) {
+ // our source rolled back itself. so the data we received isn't necessarily consistent.
+ // however, we've now done writes. thus we have a problem.
+ err += "rbid at primary changed during resync/rollback";
+ }
+ if( !err.empty() ) {
+ log() << "replSet error rolling back : " << err << ". A full resync will be necessary." << rsLog;
+ /* todo: reset minvalid so that we are permanently in fatal state */
+ /* todo: don't be fatal, but rather, get all the data first. */
+ sethbmsg("rollback error");
+ throw rsfatal();
+ }
+ }
+ sethbmsg("rollback 4.3");
+ }
+
+ sethbmsg("rollback 4.6");
+ /** drop collections to drop before doing individual fixups - that might make things faster below actually if there were subsequent inserts to rollback */
+ for( set<string>::iterator i = h.toDrop.begin(); i != h.toDrop.end(); i++ ) {
+ Client::Context c(*i);
+ try {
+ bob res;
+ string errmsg;
+ log(1) << "replSet rollback drop: " << *i << rsLog;
+ dropCollection(*i, errmsg, res);
+ }
+ catch(...) {
+ log() << "replset rollback error dropping collection " << *i << rsLog;
+ }
+ }
+
+ sethbmsg("rollback 4.7");
+ Client::Context c(rsoplog);
+ NamespaceDetails *oplogDetails = nsdetails(rsoplog);
+ uassert(13423, str::stream() << "replSet error in rollback can't find " << rsoplog, oplogDetails);
+
+ map<string,shared_ptr<RemoveSaver> > removeSavers;
+
+ unsigned deletes = 0, updates = 0;
+ for( list<pair<DocID,bo> >::iterator i = goodVersions.begin(); i != goodVersions.end(); i++ ) {
+ const DocID& d = i->first;
+ bo pattern = d._id.wrap(); // { _id : ... }
+ try {
+ assert( d.ns && *d.ns );
+ if( h.collectionsToResync.count(d.ns) ) {
+ /* we just synced this entire collection */
+ continue;
+ }
+
+ getDur().commitIfNeeded();
+
+ /* keep an archive of items rolled back */
+ shared_ptr<RemoveSaver>& rs = removeSavers[d.ns];
+ if ( ! rs )
+ rs.reset( new RemoveSaver( "rollback" , "" , d.ns ) );
+
+ // todo: lots of overhead in context, this can be faster
+ Client::Context c(d.ns);
+ if( i->second.isEmpty() ) {
+ // wasn't on the primary; delete.
+ /* TODO1.6 : can't delete from a capped collection. need to handle that here. */
+ deletes++;
+
+ NamespaceDetails *nsd = nsdetails(d.ns);
+ if( nsd ) {
+ if( nsd->capped ) {
+ /* can't delete from a capped collection - so we truncate instead. if this item must go,
+ so must all successors!!! */
+ try {
+ /** todo: IIRC cappedTrunateAfter does not handle completely empty. todo. */
+ // this will crazy slow if no _id index.
+ long long start = Listener::getElapsedTimeMillis();
+ DiskLoc loc = Helpers::findOne(d.ns, pattern, false);
+ if( Listener::getElapsedTimeMillis() - start > 200 )
+ log() << "replSet warning roll back slow no _id index for " << d.ns << " perhaps?" << rsLog;
+ //would be faster but requires index: DiskLoc loc = Helpers::findById(nsd, pattern);
+ if( !loc.isNull() ) {
+ try {
+ nsd->cappedTruncateAfter(d.ns, loc, true);
+ }
+ catch(DBException& e) {
+ if( e.getCode() == 13415 ) {
+ // hack: need to just make cappedTruncate do this...
+ nsd->emptyCappedCollection(d.ns);
+ }
+ else {
+ throw;
+ }
+ }
+ }
+ }
+ catch(DBException& e) {
+ log() << "replSet error rolling back capped collection rec " << d.ns << ' ' << e.toString() << rsLog;
+ }
+ }
+ else {
+ try {
+ deletes++;
+ deleteObjects(d.ns, pattern, /*justone*/true, /*logop*/false, /*god*/true, rs.get() );
+ }
+ catch(...) {
+ log() << "replSet error rollback delete failed ns:" << d.ns << rsLog;
+ }
+ }
+ // did we just empty the collection? if so let's check if it even exists on the source.
+ if( nsd->stats.nrecords == 0 ) {
+ try {
+ string sys = cc().database()->name + ".system.namespaces";
+ bo o = them->findOne(sys, QUERY("name"<<d.ns));
+ if( o.isEmpty() ) {
+ // we should drop
+ try {
+ bob res;
+ string errmsg;
+ dropCollection(d.ns, errmsg, res);
+ }
+ catch(...) {
+ log() << "replset error rolling back collection " << d.ns << rsLog;
+ }
+ }
+ }
+ catch(DBException& ) {
+ /* this isn't *that* big a deal, but is bad. */
+ log() << "replSet warning rollback error querying for existence of " << d.ns << " at the primary, ignoring" << rsLog;
+ }
+ }
+ }
+ }
+ else {
+ // todo faster...
+ OpDebug debug;
+ updates++;
+ _updateObjects(/*god*/true, d.ns, i->second, pattern, /*upsert=*/true, /*multi=*/false , /*logtheop=*/false , debug, rs.get() );
+ }
+ }
+ catch(DBException& e) {
+ log() << "replSet exception in rollback ns:" << d.ns << ' ' << pattern.toString() << ' ' << e.toString() << " ndeletes:" << deletes << rsLog;
+ warn = true;
+ }
+ }
+
+ removeSavers.clear(); // this effectively closes all of them
+
+ sethbmsg(str::stream() << "rollback 5 d:" << deletes << " u:" << updates);
+ MemoryMappedFile::flushAll(true);
+ sethbmsg("rollback 6");
+
+ // clean up oplog
+ LOG(2) << "replSet rollback truncate oplog after " << h.commonPoint.toStringPretty() << rsLog;
+ // todo: fatal error if this throws?
+ oplogDetails->cappedTruncateAfter(rsoplog, h.commonPointOurDiskloc, false);
+
+ /* reset cached lastoptimewritten and h value */
+ loadLastOpTimeWritten();
+
+ sethbmsg("rollback 7");
+ MemoryMappedFile::flushAll(true);
+
+ // done
+ if( warn )
+ sethbmsg("issues during syncRollback, see log");
+ else
+ sethbmsg("rollback done");
+ }
+
+ void ReplSetImpl::syncRollback(OplogReader&r) {
+ unsigned s = _syncRollback(r);
+ if( s )
+ sleepsecs(s);
+ }
+
+ unsigned ReplSetImpl::_syncRollback(OplogReader&r) {
+ assert( !lockedByMe() );
+ assert( !d.dbMutex.atLeastReadLocked() );
+
+ sethbmsg("rollback 0");
+
+ writelocktry lk(rsoplog, 20000);
+ if( !lk.got() ) {
+ sethbmsg("rollback couldn't get write lock in a reasonable time");
+ return 2;
+ }
+
+ if( state().secondary() ) {
+ /* by doing this, we will not service reads (return an error as we aren't in secondary staate.
+ that perhaps is moot becasue of the write lock above, but that write lock probably gets deferred
+ or removed or yielded later anyway.
+
+ also, this is better for status reporting - we know what is happening.
+ */
+ changeState(MemberState::RS_ROLLBACK);
+ }
+
+ HowToFixUp how;
+ sethbmsg("rollback 1");
+ {
+ r.resetCursor();
+
+ sethbmsg("rollback 2 FindCommonPoint");
+ try {
+ syncRollbackFindCommonPoint(r.conn(), how);
+ }
+ catch( const char *p ) {
+ sethbmsg(string("rollback 2 error ") + p);
+ return 10;
+ }
+ catch( rsfatal& ) {
+ _fatal();
+ return 2;
+ }
+ catch( DBException& e ) {
+ sethbmsg(string("rollback 2 exception ") + e.toString() + "; sleeping 1 min");
+ dbtemprelease r;
+ sleepsecs(60);
+ throw;
+ }
+ }
+
+ sethbmsg("replSet rollback 3 fixup");
+
+ {
+ incRBID();
+ try {
+ syncFixUp(how, r);
+ }
+ catch( rsfatal& ) {
+ sethbmsg("rollback fixup error");
+ _fatal();
+ return 2;
+ }
+ catch(...) {
+ incRBID(); throw;
+ }
+ incRBID();
+
+ /* success - leave "ROLLBACK" state
+ can go to SECONDARY once minvalid is achieved
+ */
+ changeState(MemberState::RS_RECOVERING);
+ }
+
+ return 0;
+ }
+
+}
diff --git a/src/mongo/db/repl/rs_sync.cpp b/src/mongo/db/repl/rs_sync.cpp
new file mode 100644
index 00000000000..8bac981d951
--- /dev/null
+++ b/src/mongo/db/repl/rs_sync.cpp
@@ -0,0 +1,701 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../client.h"
+#include "../../client/dbclient.h"
+#include "rs.h"
+#include "../repl.h"
+#include "connections.h"
+
+namespace mongo {
+
+ using namespace bson;
+ extern unsigned replSetForceInitialSyncFailure;
+
+ void NOINLINE_DECL blank(const BSONObj& o) {
+ if( *o.getStringField("op") != 'n' ) {
+ log() << "replSet skipping bad op in oplog: " << o.toString() << rsLog;
+ }
+ }
+
+ /* apply the log op that is in param o
+ @return bool success (true) or failure (false)
+ */
+ bool replset::SyncTail::syncApply(const BSONObj &o) {
+ const char *ns = o.getStringField("ns");
+ if ( *ns == '.' || *ns == 0 ) {
+ blank(o);
+ return true;
+ }
+
+ Client::Context ctx(ns);
+ ctx.getClient()->curop()->reset();
+ return !applyOperation_inlock(o);
+ }
+
+ /* initial oplog application, during initial sync, after cloning.
+ @return false on failure.
+ this method returns an error and doesn't throw exceptions (i think).
+ */
+ bool ReplSetImpl::initialSyncOplogApplication(const OpTime& applyGTE, const OpTime& minValid) {
+ Member *source = 0;
+ OplogReader r;
+
+ // keep trying to initial sync from oplog until we run out of targets
+ while ((source = _getOplogReader(r, applyGTE)) != 0) {
+ replset::InitialSync init(source->fullName());
+ if (init.oplogApplication(r, source, applyGTE, minValid)) {
+ return true;
+ }
+
+ r.resetConnection();
+ veto(source->fullName(), 60);
+ log() << "replSet applying oplog from " << source->fullName() << " failed, trying again" << endl;
+ }
+
+ log() << "replSet initial sync error: couldn't find oplog to sync from" << rsLog;
+ return false;
+ }
+
+ bool replset::InitialSync::oplogApplication(OplogReader& r, const Member* source,
+ const OpTime& applyGTE, const OpTime& minValid) {
+
+ const string hn = source->fullName();
+ try {
+ r.tailingQueryGTE( rsoplog, applyGTE );
+ if ( !r.haveCursor() ) {
+ log() << "replSet initial sync oplog query error" << rsLog;
+ return false;
+ }
+
+ {
+ if( !r.more() ) {
+ sethbmsg("replSet initial sync error reading remote oplog");
+ log() << "replSet initial sync error remote oplog (" << rsoplog << ") on host " << hn << " is empty?" << rsLog;
+ return false;
+ }
+ bo op = r.next();
+ OpTime t = op["ts"]._opTime();
+ r.putBack(op);
+
+ if( op.firstElementFieldName() == string("$err") ) {
+ log() << "replSet initial sync error querying " << rsoplog << " on " << hn << " : " << op.toString() << rsLog;
+ return false;
+ }
+
+ uassert( 13508 , str::stream() << "no 'ts' in first op in oplog: " << op , !t.isNull() );
+ if( t > applyGTE ) {
+ sethbmsg(str::stream() << "error " << hn << " oplog wrapped during initial sync");
+ log() << "replSet initial sync expected first optime of " << applyGTE << rsLog;
+ log() << "replSet initial sync but received a first optime of " << t << " from " << hn << rsLog;
+ return false;
+ }
+
+ sethbmsg(str::stream() << "initial oplog application from " << hn << " starting at "
+ << t.toStringPretty() << " to " << minValid.toStringPretty());
+ }
+ }
+ catch(DBException& e) {
+ log() << "replSet initial sync failing: " << e.toString() << rsLog;
+ return false;
+ }
+
+ /* we lock outside the loop to avoid the overhead of locking on every operation. */
+ writelock lk("");
+
+ // todo : use exhaust
+ OpTime ts;
+ time_t start = time(0);
+ unsigned long long n = 0;
+ int fails = 0;
+ while( ts < minValid ) {
+ try {
+ // There are some special cases with initial sync (see the catch block), so we
+ // don't want to break out of this while until we've reached minvalid. Thus, we'll
+ // keep trying to requery.
+ if( !r.more() ) {
+ OCCASIONALLY log() << "replSet initial sync oplog: no more records" << endl;
+ sleepsecs(1);
+
+ r.resetCursor();
+ r.tailingQueryGTE(rsoplog, theReplSet->lastOpTimeWritten);
+ if ( !r.haveCursor() ) {
+ if (fails++ > 30) {
+ log() << "replSet initial sync tried to query oplog 30 times, giving up" << endl;
+ return false;
+ }
+ }
+
+ continue;
+ }
+
+ BSONObj o = r.nextSafe(); /* note we might get "not master" at some point */
+ ts = o["ts"]._opTime();
+
+ {
+ if( (source->state() != MemberState::RS_PRIMARY &&
+ source->state() != MemberState::RS_SECONDARY) ||
+ replSetForceInitialSyncFailure ) {
+
+ int f = replSetForceInitialSyncFailure;
+ if( f > 0 ) {
+ replSetForceInitialSyncFailure = f-1;
+ log() << "replSet test code invoked, replSetForceInitialSyncFailure" << rsLog;
+ throw DBException("forced error",0);
+ }
+ log() << "replSet we are now primary" << rsLog;
+ throw DBException("primary changed",0);
+ }
+
+ applyOp(o, applyGTE);
+ }
+
+ if ( ++n % 1000 == 0 ) {
+ time_t now = time(0);
+ if (now - start > 10) {
+ // simple progress metering
+ log() << "replSet initialSyncOplogApplication applied " << n << " operations, synced to "
+ << ts.toStringPretty() << rsLog;
+ start = now;
+ }
+ }
+
+ getDur().commitIfNeeded();
+ }
+ catch (DBException& e) {
+ // Skip duplicate key exceptions.
+ // These are relatively common on initial sync: if a document is inserted
+ // early in the clone step, the insert will be replayed but the document
+ // will probably already have been cloned over.
+ if( e.getCode() == 11000 || e.getCode() == 11001 || e.getCode() == 12582) {
+ continue;
+ }
+
+ // handle cursor not found (just requery)
+ if( e.getCode() == 13127 ) {
+ log() << "replSet requerying oplog after cursor not found condition, ts: " << ts.toStringPretty() << endl;
+ r.resetCursor();
+ r.tailingQueryGTE(rsoplog, ts);
+ if( r.haveCursor() ) {
+ continue;
+ }
+ }
+
+ // TODO: handle server restart
+
+ if( ts <= minValid ) {
+ // didn't make it far enough
+ log() << "replSet initial sync failing, error applying oplog : " << e.toString() << rsLog;
+ return false;
+ }
+
+ // otherwise, whatever, we'll break out of the loop and catch
+ // anything that's really wrong in syncTail
+ }
+ }
+ return true;
+ }
+
+ void replset::InitialSync::applyOp(const BSONObj& o, const OpTime& applyGTE) {
+ OpTime ts = o["ts"]._opTime();
+
+ // optimes before we started copying need not be applied.
+ if( ts >= applyGTE ) {
+ if (!syncApply(o)) {
+ if (shouldRetry(o)) {
+ uassert(15915, "replSet update still fails after adding missing object", syncApply(o));
+ }
+ }
+ }
+
+ // with repl sets we write the ops to our oplog, too
+ _logOpObjRS(o);
+ }
+
+ /* should be in RECOVERING state on arrival here.
+ readlocks
+ @return true if transitioned to SECONDARY
+ */
+ bool ReplSetImpl::tryToGoLiveAsASecondary(OpTime& /*out*/ minvalid) {
+ bool golive = false;
+
+ {
+ lock lk( this );
+
+ if (_maintenanceMode > 0) {
+ // we're not actually going live
+ return true;
+ }
+ }
+
+ {
+ readlock lk("local.replset.minvalid");
+ BSONObj mv;
+ if( Helpers::getSingleton("local.replset.minvalid", mv) ) {
+ minvalid = mv["ts"]._opTime();
+ if( minvalid <= lastOpTimeWritten ) {
+ golive=true;
+ }
+ }
+ else
+ golive = true; /* must have been the original member */
+ }
+ if( golive ) {
+ sethbmsg("");
+ changeState(MemberState::RS_SECONDARY);
+ }
+ return golive;
+ }
+
+ bool ReplSetImpl::_isStale(OplogReader& r, const OpTime& startTs, BSONObj& remoteOldestOp) {
+ remoteOldestOp = r.findOne(rsoplog, Query());
+ OpTime remoteTs = remoteOldestOp["ts"]._opTime();
+ DEV log() << "replSet remoteOldestOp: " << remoteTs.toStringLong() << rsLog;
+ else LOG(3) << "replSet remoteOldestOp: " << remoteTs.toStringLong() << rsLog;
+ DEV {
+ log() << "replSet lastOpTimeWritten: " << lastOpTimeWritten.toStringLong() << rsLog;
+ log() << "replSet our state: " << state().toString() << rsLog;
+ }
+ if( startTs >= remoteTs ) {
+ return false;
+ }
+
+ return true;
+ }
+
+ Member* ReplSetImpl::_getOplogReader(OplogReader& r, const OpTime& minTS) {
+ Member *target = 0, *stale = 0;
+ BSONObj oldest;
+
+ assert(r.conn() == 0);
+
+ while ((target = getMemberToSyncTo()) != 0) {
+ string current = target->fullName();
+
+ if( !r.connect(current) ) {
+ log(2) << "replSet can't connect to " << current << " to read operations" << rsLog;
+ r.resetConnection();
+ veto(current);
+ continue;
+ }
+
+ if( !minTS.isNull() && _isStale(r, minTS, oldest) ) {
+ r.resetConnection();
+ veto(current, 600);
+ stale = target;
+ continue;
+ }
+
+ // if we made it here, the target is up and not stale
+ return target;
+ }
+
+ // the only viable sync target was stale
+ if (stale) {
+ log() << "replSet error RS102 too stale to catch up, at least from " << stale->fullName() << rsLog;
+ log() << "replSet our last optime : " << lastOpTimeWritten.toStringLong() << rsLog;
+ log() << "replSet oldest at " << stale->fullName() << " : " << oldest["ts"]._opTime().toStringLong() << rsLog;
+ log() << "replSet See http://www.mongodb.org/display/DOCS/Resyncing+a+Very+Stale+Replica+Set+Member" << rsLog;
+
+ // reset minvalid so that we can't become primary prematurely
+ {
+ writelock lk("local.replset.minvalid");
+ Helpers::putSingleton("local.replset.minvalid", oldest);
+ }
+
+ sethbmsg("error RS102 too stale to catch up");
+ changeState(MemberState::RS_RECOVERING);
+ sleepsecs(120);
+ }
+
+ return 0;
+ }
+
+ /* tail an oplog. ok to return, will be re-called. */
+ void ReplSetImpl::syncTail() {
+ // todo : locking vis a vis the mgr...
+ OplogReader r;
+ string hn;
+
+ // find a target to sync from the last op time written
+ Member* target = _getOplogReader(r, lastOpTimeWritten);
+
+ // no server found
+ if (target == 0) {
+ // if there is no one to sync from
+ OpTime minvalid;
+ tryToGoLiveAsASecondary(minvalid);
+ return;
+ }
+
+ r.tailingQueryGTE(rsoplog, lastOpTimeWritten);
+ // if target cut connections between connecting and querying (for
+ // example, because it stepped down) we might not have a cursor
+ if ( !r.haveCursor() ) {
+ return;
+ }
+
+ uassert(1000, "replSet source for syncing doesn't seem to be await capable -- is it an older version of mongodb?", r.awaitCapable() );
+
+ {
+ if( !r.more() ) {
+ /* maybe we are ahead and need to roll back? */
+ try {
+ bo theirLastOp = r.getLastOp(rsoplog);
+ if( theirLastOp.isEmpty() ) {
+ log() << "replSet error empty query result from " << hn << " oplog" << rsLog;
+ sleepsecs(2);
+ return;
+ }
+ OpTime theirTS = theirLastOp["ts"]._opTime();
+ if( theirTS < lastOpTimeWritten ) {
+ log() << "replSet we are ahead of the primary, will try to roll back" << rsLog;
+ syncRollback(r);
+ return;
+ }
+ /* we're not ahead? maybe our new query got fresher data. best to come back and try again */
+ log() << "replSet syncTail condition 1" << rsLog;
+ sleepsecs(1);
+ }
+ catch(DBException& e) {
+ log() << "replSet error querying " << hn << ' ' << e.toString() << rsLog;
+ veto(target->fullName());
+ sleepsecs(2);
+ }
+ return;
+ }
+
+ BSONObj o = r.nextSafe();
+ OpTime ts = o["ts"]._opTime();
+ long long h = o["h"].numberLong();
+ if( ts != lastOpTimeWritten || h != lastH ) {
+ log() << "replSet our last op time written: " << lastOpTimeWritten.toStringPretty() << rsLog;
+ log() << "replset source's GTE: " << ts.toStringPretty() << rsLog;
+ syncRollback(r);
+ return;
+ }
+ }
+
+ /* we have now checked if we need to rollback and we either don't have to or did it. */
+ {
+ OpTime minvalid;
+ tryToGoLiveAsASecondary(minvalid);
+ }
+
+ while( 1 ) {
+ {
+ Timer timeInWriteLock;
+ writelock lk("");
+ while( 1 ) {
+ if( !r.moreInCurrentBatch() ) {
+ dbtemprelease tempRelease;
+ {
+ // we need to occasionally check some things. between
+ // batches is probably a good time.
+ if( state().recovering() ) { // perhaps we should check this earlier? but not before the rollback checks.
+ /* can we go to RS_SECONDARY state? we can if not too old and if minvalid achieved */
+ OpTime minvalid;
+ bool golive = ReplSetImpl::tryToGoLiveAsASecondary(minvalid);
+ if( golive ) {
+ ;
+ }
+ else {
+ sethbmsg(str::stream() << "still syncing, not yet to minValid optime" << minvalid.toString());
+ }
+ // todo: too stale capability
+ }
+ if( !target->hbinfo().hbstate.readable() ) {
+ return;
+ }
+ }
+ r.more(); // to make the requestmore outside the db lock, which obviously is quite important
+ }
+ if( timeInWriteLock.micros() > 1000 ) {
+ dbtemprelease tempRelease;
+ timeInWriteLock.reset();
+ }
+ if( !r.more() )
+ break;
+ {
+ BSONObj o = r.nextSafe(); // note we might get "not master" at some point
+
+ int sd = myConfig().slaveDelay;
+ // ignore slaveDelay if the box is still initializing. once
+ // it becomes secondary we can worry about it.
+ if( sd && box.getState().secondary() ) {
+ const OpTime ts = o["ts"]._opTime();
+ long long a = ts.getSecs();
+ long long b = time(0);
+ long long lag = b - a;
+ long long sleeptime = sd - lag;
+ if( sleeptime > 0 ) {
+ dbtemprelease tempRelease;
+ uassert(12000, "rs slaveDelay differential too big check clocks and systems", sleeptime < 0x40000000);
+ if( sleeptime < 60 ) {
+ sleepsecs((int) sleeptime);
+ }
+ else {
+ log() << "replSet slavedelay sleep long time: " << sleeptime << rsLog;
+ // sleep(hours) would prevent reconfigs from taking effect & such!
+ long long waitUntil = b + sleeptime;
+ while( 1 ) {
+ sleepsecs(6);
+ if( time(0) >= waitUntil )
+ break;
+
+ if( !target->hbinfo().hbstate.readable() ) {
+ break;
+ }
+
+ if( myConfig().slaveDelay != sd ) // reconf
+ break;
+ }
+ }
+ }
+ } // endif slaveDelay
+
+ d.dbMutex.assertWriteLocked();
+ try {
+ /* if we have become primary, we dont' want to apply things from elsewhere
+ anymore. assumePrimary is in the db lock so we are safe as long as
+ we check after we locked above. */
+ if( box.getState().primary() ) {
+ log(0) << "replSet stopping syncTail we are now primary" << rsLog;
+ return;
+ }
+
+ // TODO: make this whole method a member of SyncTail (SERVER-4444)
+ replset::SyncTail tail("");
+ tail.syncApply(o);
+ _logOpObjRS(o); // with repl sets we write the ops to our oplog too
+ }
+ catch (DBException& e) {
+ sethbmsg(str::stream() << "syncTail: " << e.toString() << ", syncing: " << o);
+ veto(target->fullName(), 300);
+ sleepsecs(30);
+ return;
+ }
+ }
+ } // end while
+ } // end writelock scope
+
+ r.tailCheck();
+ if( !r.haveCursor() ) {
+ LOG(1) << "replSet end syncTail pass with " << hn << rsLog;
+ // TODO : reuse our connection to the primary.
+ return;
+ }
+
+ if( !target->hbinfo().hbstate.readable() ) {
+ return;
+ }
+ // looping back is ok because this is a tailable cursor
+ }
+ }
+
+ void ReplSetImpl::_syncThread() {
+ StateBox::SP sp = box.get();
+ if( sp.state.primary() ) {
+ sleepsecs(1);
+ return;
+ }
+ if( _blockSync || sp.state.fatal() || sp.state.startup() ) {
+ sleepsecs(5);
+ return;
+ }
+
+ /* do we have anything at all? */
+ if( lastOpTimeWritten.isNull() ) {
+ syncDoInitialSync();
+ return; // _syncThread will be recalled, starts from top again in case sync failed.
+ }
+
+ /* we have some data. continue tailing. */
+ syncTail();
+ }
+
+ void ReplSetImpl::syncThread() {
+ while( 1 ) {
+ // After a reconfig, we may not be in the replica set anymore, so
+ // check that we are in the set (and not an arbiter) before
+ // trying to sync with other replicas.
+ if( ! _self ) {
+ log() << "replSet warning did not detect own host and port, not syncing, config: " << theReplSet->config() << rsLog;
+ return;
+ }
+ if( myConfig().arbiterOnly ) {
+ return;
+ }
+
+ try {
+ _syncThread();
+ }
+ catch(DBException& e) {
+ sethbmsg(str::stream() << "syncThread: " << e.toString());
+ sleepsecs(10);
+ }
+ catch(...) {
+ sethbmsg("unexpected exception in syncThread()");
+ // TODO : SET NOT SECONDARY here?
+ sleepsecs(60);
+ }
+ sleepsecs(1);
+
+ /* normally msgCheckNewState gets called periodically, but in a single node repl set there
+ are no heartbeat threads, so we do it here to be sure. this is relevant if the singleton
+ member has done a stepDown() and needs to come back up.
+ */
+ OCCASIONALLY {
+ mgr->send( boost::bind(&Manager::msgCheckNewState, theReplSet->mgr) );
+ }
+ }
+ }
+
+ void startSyncThread() {
+ static int n;
+ if( n != 0 ) {
+ log() << "replSet ERROR : more than one sync thread?" << rsLog;
+ assert( n == 0 );
+ }
+ n++;
+
+ Client::initThread("rsSync");
+ cc().iAmSyncThread(); // for isSyncThread() (which is used not used much, is used in secondary create index code
+ replLocalAuth();
+ theReplSet->syncThread();
+ cc().shutdown();
+ }
+
+ void GhostSync::starting() {
+ Client::initThread("rsGhostSync");
+ replLocalAuth();
+ }
+
+ void ReplSetImpl::blockSync(bool block) {
+ _blockSync = block;
+ if (_blockSync) {
+ // syncing is how we get into SECONDARY state, so we'll be stuck in
+ // RECOVERING until we unblock
+ changeState(MemberState::RS_RECOVERING);
+ }
+ }
+
+ void GhostSync::associateSlave(const BSONObj& id, const int memberId) {
+ const OID rid = id["_id"].OID();
+ rwlock lk( _lock , true );
+ shared_ptr<GhostSlave> &g = _ghostCache[rid];
+ if( g.get() == 0 ) {
+ g.reset( new GhostSlave() );
+ wassert( _ghostCache.size() < 10000 );
+ }
+ GhostSlave &slave = *g;
+ if (slave.init) {
+ LOG(1) << "tracking " << slave.slave->h().toString() << " as " << rid << rsLog;
+ return;
+ }
+
+ slave.slave = (Member*)rs->findById(memberId);
+ if (slave.slave != 0) {
+ slave.init = true;
+ }
+ else {
+ log() << "replset couldn't find a slave with id " << memberId
+ << ", not tracking " << rid << rsLog;
+ }
+ }
+
+ void GhostSync::updateSlave(const mongo::OID& rid, const OpTime& last) {
+ rwlock lk( _lock , false );
+ MAP::iterator i = _ghostCache.find( rid );
+ if ( i == _ghostCache.end() ) {
+ OCCASIONALLY warning() << "couldn't update slave " << rid << " no entry" << rsLog;
+ return;
+ }
+
+ GhostSlave& slave = *(i->second);
+ if (!slave.init) {
+ OCCASIONALLY log() << "couldn't update slave " << rid << " not init" << rsLog;
+ return;
+ }
+
+ ((ReplSetConfig::MemberCfg)slave.slave->config()).updateGroups(last);
+ }
+
+ void GhostSync::percolate(const BSONObj& id, const OpTime& last) {
+ const OID rid = id["_id"].OID();
+ GhostSlave* slave;
+ {
+ rwlock lk( _lock , false );
+
+ MAP::iterator i = _ghostCache.find( rid );
+ if ( i == _ghostCache.end() ) {
+ OCCASIONALLY log() << "couldn't percolate slave " << rid << " no entry" << rsLog;
+ return;
+ }
+
+ slave = i->second.get();
+ if (!slave->init) {
+ OCCASIONALLY log() << "couldn't percolate slave " << rid << " not init" << rsLog;
+ return;
+ }
+ }
+
+ assert(slave->slave);
+
+ const Member *target = rs->_currentSyncTarget;
+ if (!target || rs->box.getState().primary()
+ // we are currently syncing from someone who's syncing from us
+ // the target might end up with a new Member, but s.slave never
+ // changes so we'll compare the names
+ || target == slave->slave || target->fullName() == slave->slave->fullName()) {
+ LOG(1) << "replica set ghost target no good" << endl;
+ return;
+ }
+
+ try {
+ if (!slave->reader.haveCursor()) {
+ if (!slave->reader.connect(id, slave->slave->id(), target->fullName())) {
+ // error message logged in OplogReader::connect
+ return;
+ }
+ slave->reader.ghostQueryGTE(rsoplog, last);
+ }
+
+ LOG(1) << "replSet last: " << slave->last.toString() << " to " << last.toString() << rsLog;
+ if (slave->last > last) {
+ return;
+ }
+
+ while (slave->last <= last) {
+ if (!slave->reader.more()) {
+ // we'll be back
+ return;
+ }
+
+ BSONObj o = slave->reader.nextSafe();
+ slave->last = o["ts"]._opTime();
+ }
+ LOG(2) << "now last is " << slave->last.toString() << rsLog;
+ }
+ catch (DBException& e) {
+ // we'll be back
+ LOG(2) << "replSet ghost sync error: " << e.what() << " for "
+ << slave->slave->fullName() << rsLog;
+ slave->reader.resetConnection();
+ }
+ }
+}
diff --git a/src/mongo/db/repl/test.html b/src/mongo/db/repl/test.html
new file mode 100644
index 00000000000..295ad2ef0e0
--- /dev/null
+++ b/src/mongo/db/repl/test.html
@@ -0,0 +1,11 @@
+<HTML>
+<BODY>
+<!-- see also jstests/rs/ -->
+<iframe src="http://127.0.0.1:28000/_replSet" width="100%" height="50%" frameborder=1>
+</iframe>
+
+<iframe src="http://127.0.0.1:28001/_replSet" width="100%" height="50%" frameborder=1>
+</iframe>
+
+</BODY>
+</HTML>
diff --git a/src/mongo/db/repl/testing.js b/src/mongo/db/repl/testing.js
new file mode 100644
index 00000000000..d741cf3a644
--- /dev/null
+++ b/src/mongo/db/repl/testing.js
@@ -0,0 +1,42 @@
+// helpers for testing repl sets
+// run
+// mongo --shell <host:port> testing.js
+
+cfg = {
+ _id: 'asdf',
+ members: [
+ { _id : 0, host : "dm_hp" },
+ { _id : 2, host : "dm_hp:27002" }
+ ]
+};
+c2 = {
+ _id: 'asdf',
+ members: [
+ { _id: 0, host: "dmthink" },
+ { _id: 2, host: "dmthink:27002" }
+ ]
+};
+
+db = db.getSisterDB("admin");
+local = db.getSisterDB("local");
+
+print("\n\ndb = admin db on localhost:27017");
+print("b = admin on localhost:27002");
+print("rc(x) = db.runCommand(x)");
+print("cfg = samp replset config");
+print("i() = replSetInitiate(cfg)");
+print("ism() = rc('ismaster')");
+print("\n\n");
+
+function rc(c) { return db.runCommand(c); }
+function i() { return rc({ replSetInitiate: cfg }); }
+function ism() { return rc("isMaster"); }
+
+b = 0;
+try {
+ b = new Mongo("localhost:27002").getDB("admin");
+}
+catch (e) {
+ print("\nCouldn't connect to b mongod instance\n");
+}
+
diff --git a/src/mongo/db/repl_block.cpp b/src/mongo/db/repl_block.cpp
new file mode 100644
index 00000000000..1776225505c
--- /dev/null
+++ b/src/mongo/db/repl_block.cpp
@@ -0,0 +1,256 @@
+// repl_block.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "repl.h"
+#include "repl_block.h"
+#include "instance.h"
+#include "dbhelpers.h"
+#include "../util/background.h"
+#include "../util/mongoutils/str.h"
+#include "../client/dbclient.h"
+#include "replutil.h"
+
+//#define REPLDEBUG(x) log() << "replBlock: " << x << endl;
+#define REPLDEBUG(x)
+
+namespace mongo {
+
+ using namespace mongoutils;
+
+ class SlaveTracking : public BackgroundJob {
+ public:
+ string name() const { return "SlaveTracking"; }
+
+ static const char * NS;
+
+ struct Ident {
+
+ Ident(const BSONObj& r, const string& h, const string& n) {
+ BSONObjBuilder b;
+ b.appendElements( r );
+ b.append( "host" , h );
+ b.append( "ns" , n );
+ obj = b.obj();
+ }
+
+ bool operator<( const Ident& other ) const {
+ return obj["_id"].OID() < other.obj["_id"].OID();
+ }
+
+ BSONObj obj;
+ };
+
+ struct Info {
+ Info() : loc(0) {}
+ ~Info() {
+ if ( loc && owned ) {
+ delete loc;
+ }
+ }
+ bool owned; // true if loc is a pointer of our creation (and not a pointer into a MMF)
+ OpTime * loc;
+ };
+
+ SlaveTracking() : _mutex("SlaveTracking") {
+ _dirty = false;
+ _started = false;
+ }
+
+ void run() {
+ Client::initThread( "slaveTracking" );
+ DBDirectClient db;
+ while ( ! inShutdown() ) {
+ sleepsecs( 1 );
+
+ if ( ! _dirty )
+ continue;
+
+ writelock lk(NS);
+
+ list< pair<BSONObj,BSONObj> > todo;
+
+ {
+ scoped_lock mylk(_mutex);
+
+ for ( map<Ident,Info>::iterator i=_slaves.begin(); i!=_slaves.end(); i++ ) {
+ BSONObjBuilder temp;
+ temp.appendTimestamp( "syncedTo" , i->second.loc[0].asDate() );
+ todo.push_back( pair<BSONObj,BSONObj>( i->first.obj.getOwned() ,
+ BSON( "$set" << temp.obj() ).getOwned() ) );
+ }
+ }
+
+ for ( list< pair<BSONObj,BSONObj> >::iterator i=todo.begin(); i!=todo.end(); i++ ) {
+ db.update( NS , i->first , i->second , true );
+ }
+
+ _dirty = false;
+ }
+ }
+
+ void reset() {
+ scoped_lock mylk(_mutex);
+ _slaves.clear();
+ }
+
+ void update( const BSONObj& rid , const string& host , const string& ns , OpTime last ) {
+ REPLDEBUG( host << " " << rid << " " << ns << " " << last );
+
+ scoped_lock mylk(_mutex);
+
+#ifdef _DEBUG
+ MongoFileAllowWrites allowWrites;
+#endif
+
+ Ident ident(rid,host,ns);
+ Info& i = _slaves[ ident ];
+
+ if (theReplSet && theReplSet->isPrimary()) {
+ theReplSet->ghost->updateSlave(ident.obj["_id"].OID(), last);
+ }
+
+ if ( i.loc ) {
+ if( i.owned )
+ i.loc[0] = last;
+ else
+ getDur().setNoJournal(i.loc, &last, sizeof(last));
+ return;
+ }
+
+ d.dbMutex.assertAtLeastReadLocked();
+
+ BSONObj res;
+ if ( Helpers::findOne( NS , ident.obj , res ) ) {
+ assert( res["syncedTo"].type() );
+ i.owned = false;
+ i.loc = (OpTime*)res["syncedTo"].value();
+ getDur().setNoJournal(i.loc, &last, sizeof(last));
+ return;
+ }
+
+ i.owned = true;
+ i.loc = new OpTime(last);
+ _dirty = true;
+
+ if ( ! _started ) {
+ // start background thread here since we definitely need it
+ _started = true;
+ go();
+ }
+
+ }
+
+ bool opReplicatedEnough( OpTime op , BSONElement w ) {
+ RARELY {
+ REPLDEBUG( "looking for : " << op << " w=" << w );
+ }
+
+ if (w.isNumber()) {
+ return replicatedToNum(op, w.numberInt());
+ }
+
+ if (!theReplSet) {
+ return false;
+ }
+
+ string wStr = w.String();
+ if (wStr == "majority") {
+ // use the entire set, including arbiters, to prevent writing
+ // to a majority of the set but not a majority of voters
+ return replicatedToNum(op, theReplSet->config().getMajority());
+ }
+
+ map<string,ReplSetConfig::TagRule*>::const_iterator it = theReplSet->config().rules.find(wStr);
+ uassert(14830, str::stream() << "unrecognized getLastError mode: " << wStr,
+ it != theReplSet->config().rules.end());
+
+ return op <= (*it).second->last;
+ }
+
+ bool replicatedToNum(OpTime& op, int w) {
+ if ( w <= 1 || ! _isMaster() )
+ return true;
+
+ w--; // now this is the # of slaves i need
+ scoped_lock mylk(_mutex);
+ for ( map<Ident,Info>::iterator i=_slaves.begin(); i!=_slaves.end(); i++) {
+ OpTime s = *(i->second.loc);
+ if ( s < op ) {
+ continue;
+ }
+ if ( --w == 0 )
+ return true;
+ }
+ return w <= 0;
+ }
+
+ unsigned getSlaveCount() const {
+ scoped_lock mylk(_mutex);
+
+ return _slaves.size();
+ }
+
+ // need to be careful not to deadlock with this
+ mutable mongo::mutex _mutex;
+ map<Ident,Info> _slaves;
+ bool _dirty;
+ bool _started;
+
+ } slaveTracking;
+
+ const char * SlaveTracking::NS = "local.slaves";
+
+ void updateSlaveLocation( CurOp& curop, const char * ns , OpTime lastOp ) {
+ if ( lastOp.isNull() )
+ return;
+
+ assert( str::startsWith(ns, "local.oplog.") );
+
+ Client * c = curop.getClient();
+ assert(c);
+ BSONObj rid = c->getRemoteID();
+ if ( rid.isEmpty() )
+ return;
+
+ slaveTracking.update( rid , curop.getRemoteString( false ) , ns , lastOp );
+
+ if (theReplSet && !theReplSet->isPrimary()) {
+ // we don't know the slave's port, so we make the replica set keep
+ // a map of rids to slaves
+ log(2) << "percolating " << lastOp.toString() << " from " << rid << endl;
+ theReplSet->ghost->send( boost::bind(&GhostSync::percolate, theReplSet->ghost, rid, lastOp) );
+ }
+ }
+
+ bool opReplicatedEnough( OpTime op , BSONElement w ) {
+ return slaveTracking.opReplicatedEnough( op , w );
+ }
+
+ bool opReplicatedEnough( OpTime op , int w ) {
+ return slaveTracking.replicatedToNum( op , w );
+ }
+
+ void resetSlaveCache() {
+ slaveTracking.reset();
+ }
+
+ unsigned getSlaveCount() {
+ return slaveTracking.getSlaveCount();
+ }
+}
diff --git a/src/mongo/db/repl_block.h b/src/mongo/db/repl_block.h
new file mode 100644
index 00000000000..bb74deea10f
--- /dev/null
+++ b/src/mongo/db/repl_block.h
@@ -0,0 +1,39 @@
+// repl_block.h - blocking on writes for replication
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "client.h"
+#include "curop.h"
+
+/**
+ local.slaves - current location for all slaves
+
+ */
+namespace mongo {
+
+ void updateSlaveLocation( CurOp& curop, const char * oplog_ns , OpTime lastOp );
+
+ /** @return true if op has made it to w servers */
+ bool opReplicatedEnough( OpTime op , int w );
+ bool opReplicatedEnough( OpTime op , BSONElement w );
+
+ void resetSlaveCache();
+ unsigned getSlaveCount();
+}
diff --git a/src/mongo/db/replutil.h b/src/mongo/db/replutil.h
new file mode 100644
index 00000000000..6f4dbb875d2
--- /dev/null
+++ b/src/mongo/db/replutil.h
@@ -0,0 +1,102 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "db.h"
+#include "dbhelpers.h"
+#include "json.h"
+#include "../client/dbclient.h"
+#include "repl.h"
+#include "cmdline.h"
+#include "repl/rs.h"
+#include "ops/query.h"
+
+namespace mongo {
+
+ extern const char *replAllDead;
+
+ /* note we always return true for the "local" namespace.
+
+ we should not allow most operations when not the master
+ also we report not master if we are "dead".
+
+ See also CmdIsMaster.
+
+ If 'client' is not specified, the current client is used.
+ */
+ inline bool _isMaster() {
+ if( replSet ) {
+ if( theReplSet )
+ return theReplSet->isPrimary();
+ return false;
+ }
+
+ if( ! replSettings.slave )
+ return true;
+
+ if ( replAllDead )
+ return false;
+
+ if( replSettings.master ) {
+ // if running with --master --slave, allow.
+ return true;
+ }
+
+ if ( cc().isGod() )
+ return true;
+
+ return false;
+ }
+ inline bool isMaster(const char * dbname = 0) {
+ if( _isMaster() )
+ return true;
+ if ( ! dbname ) {
+ Database *database = cc().database();
+ assert( database );
+ dbname = database->name.c_str();
+ }
+ return strcmp( dbname , "local" ) == 0;
+ }
+ inline bool isMasterNs( const char *ns ) {
+ if ( _isMaster() )
+ return true;
+ assert( ns );
+ if ( ! str::startsWith( ns , "local" ) )
+ return false;
+ return ns[5] == 0 || ns[5] == '.';
+ }
+
+ inline void notMasterUnless(bool expr) {
+ uassert( 10107 , "not master" , expr );
+ }
+
+ /** we allow queries to SimpleSlave's */
+ inline void replVerifyReadsOk(ParsedQuery& pq) {
+ if( replSet ) {
+ /* todo: speed up the secondary case. as written here there are 2 mutex entries, it can b 1. */
+ if( isMaster() ) return;
+ uassert(13435, "not master and slaveOk=false", pq.hasOption(QueryOption_SlaveOk));
+ uassert(13436, "not master or secondary; cannot currently read from this replSet member", theReplSet && theReplSet->isSecondary() );
+ }
+ else {
+ notMasterUnless(isMaster() || pq.hasOption(QueryOption_SlaveOk) || replSettings.slave == SimpleSlave );
+ }
+ }
+
+
+
+} // namespace mongo
diff --git a/src/mongo/db/resource.h b/src/mongo/db/resource.h
new file mode 100644
index 00000000000..9ba1ed26a0c
--- /dev/null
+++ b/src/mongo/db/resource.h
@@ -0,0 +1,16 @@
+//{{NO_DEPENDENCIES}}
+// Microsoft Visual C++ generated include file.
+// Used by db.rc
+//
+#define IDI_ICON2 102
+
+// Next default values for new objects
+//
+#ifdef APSTUDIO_INVOKED
+#ifndef APSTUDIO_READONLY_SYMBOLS
+#define _APS_NEXT_RESOURCE_VALUE 104
+#define _APS_NEXT_COMMAND_VALUE 40001
+#define _APS_NEXT_CONTROL_VALUE 1001
+#define _APS_NEXT_SYMED_VALUE 101
+#endif
+#endif
diff --git a/src/mongo/db/restapi.cpp b/src/mongo/db/restapi.cpp
new file mode 100644
index 00000000000..370051354a2
--- /dev/null
+++ b/src/mongo/db/restapi.cpp
@@ -0,0 +1,294 @@
+/** @file resetapi.cpp
+ web rest api
+*/
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../util/net/miniwebserver.h"
+#include "../util/mongoutils/html.h"
+#include "../util/md5.hpp"
+#include "instance.h"
+#include "dbwebserver.h"
+#include "dbhelpers.h"
+#include "repl.h"
+#include "replutil.h"
+#include "clientcursor.h"
+#include "background.h"
+
+#include "restapi.h"
+
+namespace mongo {
+
+ extern const char *replInfo;
+ bool getInitialSyncCompleted();
+
+ using namespace bson;
+ using namespace mongoutils::html;
+
+ class RESTHandler : public DbWebHandler {
+ public:
+ RESTHandler() : DbWebHandler( "DUMMY REST" , 1000 , true ) {}
+
+ virtual bool handles( const string& url ) const {
+ return
+ url[0] == '/' &&
+ url.find_last_of( '/' ) > 0;
+ }
+
+ virtual void handle( const char *rq, string url, BSONObj params,
+ string& responseMsg, int& responseCode,
+ vector<string>& headers, const SockAddr &from ) {
+
+ string::size_type first = url.find( "/" , 1 );
+ if ( first == string::npos ) {
+ responseCode = 400;
+ return;
+ }
+
+ string method = MiniWebServer::parseMethod( rq );
+ string dbname = url.substr( 1 , first - 1 );
+ string coll = url.substr( first + 1 );
+ string action = "";
+
+ string::size_type last = coll.find_last_of( "/" );
+ if ( last == string::npos ) {
+ action = coll;
+ coll = "_defaultCollection";
+ }
+ else {
+ action = coll.substr( last + 1 );
+ coll = coll.substr( 0 , last );
+ }
+
+ for ( string::size_type i=0; i<coll.size(); i++ )
+ if ( coll[i] == '/' )
+ coll[i] = '.';
+
+ string fullns = MiniWebServer::urlDecode(dbname + "." + coll);
+
+ headers.push_back( (string)"x-action: " + action );
+ headers.push_back( (string)"x-ns: " + fullns );
+
+ bool html = false;
+
+ stringstream ss;
+
+ if ( method == "GET" ) {
+ responseCode = 200;
+ html = handleRESTQuery( fullns , action , params , responseCode , ss );
+ }
+ else if ( method == "POST" ) {
+ responseCode = 201;
+ handlePost( fullns , MiniWebServer::body( rq ) , params , responseCode , ss );
+ }
+ else {
+ responseCode = 400;
+ headers.push_back( "X_err: bad request" );
+ ss << "don't know how to handle a [" << method << "]";
+ out() << "don't know how to handle a [" << method << "]" << endl;
+ }
+
+ if( html )
+ headers.push_back("Content-Type: text/html;charset=utf-8");
+ else
+ headers.push_back("Content-Type: text/plain;charset=utf-8");
+
+ responseMsg = ss.str();
+ }
+
+ bool handleRESTQuery( string ns , string action , BSONObj & params , int & responseCode , stringstream & out ) {
+ Timer t;
+
+ int html = _getOption( params["html"] , 0 );
+ int skip = _getOption( params["skip"] , 0 );
+ int num = _getOption( params["limit"] , _getOption( params["count" ] , 1000 ) ); // count is old, limit is new
+
+ int one = 0;
+ if ( params["one"].type() == String && tolower( params["one"].valuestr()[0] ) == 't' ) {
+ num = 1;
+ one = 1;
+ }
+
+ BSONObjBuilder queryBuilder;
+
+ BSONObjIterator i(params);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ string name = e.fieldName();
+ if ( ! name.find( "filter_" ) == 0 )
+ continue;
+
+ string field = name.substr(7);
+ const char * val = e.valuestr();
+
+ char * temp;
+
+ // TODO: this is how i guess if something is a number. pretty lame right now
+ double number = strtod( val , &temp );
+ if ( temp != val )
+ queryBuilder.append( field , number );
+ else
+ queryBuilder.append( field , val );
+ }
+
+ BSONObj query = queryBuilder.obj();
+ auto_ptr<DBClientCursor> cursor = db.query( ns.c_str() , query, num , skip );
+ uassert( 13085 , "query failed for dbwebserver" , cursor.get() );
+
+ if ( one ) {
+ if ( cursor->more() ) {
+ BSONObj obj = cursor->next();
+ out << obj.jsonString(Strict,html?1:0) << '\n';
+ }
+ else {
+ responseCode = 404;
+ }
+ return html != 0;
+ }
+
+ if( html ) {
+ string title = string("query ") + ns;
+ out << start(title)
+ << p(title)
+ << "<pre>";
+ }
+ else {
+ out << "{\n";
+ out << " \"offset\" : " << skip << ",\n";
+ out << " \"rows\": [\n";
+ }
+
+ int howMany = 0;
+ while ( cursor->more() ) {
+ if ( howMany++ && html == 0 )
+ out << " ,\n";
+ BSONObj obj = cursor->next();
+ if( html ) {
+ if( out.tellp() > 4 * 1024 * 1024 ) {
+ out << "Stopping output: more than 4MB returned and in html mode\n";
+ break;
+ }
+ out << obj.jsonString(Strict, html?1:0) << "\n\n";
+ }
+ else {
+ if( out.tellp() > 50 * 1024 * 1024 ) // 50MB limit - we are using ram
+ break;
+ out << " " << obj.jsonString();
+ }
+ }
+
+ if( html ) {
+ out << "</pre>\n";
+ if( howMany == 0 ) out << p("Collection is empty");
+ out << _end();
+ }
+ else {
+ out << "\n ],\n\n";
+ out << " \"total_rows\" : " << howMany << " ,\n";
+ out << " \"query\" : " << query.jsonString() << " ,\n";
+ out << " \"millis\" : " << t.millis() << '\n';
+ out << "}\n";
+ }
+
+ return html != 0;
+ }
+
+ // TODO Generate id and revision per couch POST spec
+ void handlePost( string ns, const char *body, BSONObj& params, int & responseCode, stringstream & out ) {
+ try {
+ BSONObj obj = fromjson( body );
+ db.insert( ns.c_str(), obj );
+ }
+ catch ( ... ) {
+ responseCode = 400; // Bad Request. Seems reasonable for now.
+ out << "{ \"ok\" : false }";
+ return;
+ }
+
+ responseCode = 201;
+ out << "{ \"ok\" : true }";
+ }
+
+ int _getOption( BSONElement e , int def ) {
+ if ( e.isNumber() )
+ return e.numberInt();
+ if ( e.type() == String )
+ return atoi( e.valuestr() );
+ return def;
+ }
+
+ DBDirectClient db;
+
+ } restHandler;
+
+ bool RestAdminAccess::haveAdminUsers() const {
+ readlocktryassert rl("admin.system.users", 10000);
+ Client::Context cx( "admin.system.users", dbpath, false );
+ return ! Helpers::isEmpty("admin.system.users", false);
+ }
+
+ BSONObj RestAdminAccess::getAdminUser( const string& username ) const {
+ Client::GodScope gs;
+ readlocktryassert rl("admin.system.users", 10000);
+ Client::Context cx( "admin.system.users" );
+ BSONObj user;
+ if ( Helpers::findOne( "admin.system.users" , BSON( "user" << username ) , user ) )
+ return user.copy();
+ return BSONObj();
+ }
+
+ class LowLevelMongodStatus : public WebStatusPlugin {
+ public:
+ LowLevelMongodStatus() : WebStatusPlugin( "overview" , 5 , "(only reported if can acquire read lock quickly)" ) {}
+
+ virtual void init() {}
+
+ void _gotLock( int millis , stringstream& ss ) {
+ ss << "<pre>\n";
+ ss << "time to get readlock: " << millis << "ms\n";
+ ss << "# databases: " << dbHolder().sizeInfo() << '\n';
+ ss << "# Cursors: " << ClientCursor::numCursors() << '\n';
+ ss << "replication: ";
+ if( *replInfo )
+ ss << "\nreplInfo: " << replInfo << "\n\n";
+ if( replSet ) {
+ ss << a("", "see replSetGetStatus link top of page") << "--replSet </a>" << cmdLine._replSet;
+ }
+ if ( replAllDead )
+ ss << "\n<b>replication replAllDead=" << replAllDead << "</b>\n";
+ else {
+ ss << "\nmaster: " << replSettings.master << '\n';
+ ss << "slave: " << replSettings.slave << '\n';
+ ss << '\n';
+ }
+
+ BackgroundOperation::dump(ss);
+ ss << "</pre>\n";
+ }
+
+ virtual void run( stringstream& ss ) {
+ Timer t;
+ readlocktry lk( "" , 300 );
+ if ( lk.got() ) {
+ _gotLock( t.millis() , ss );
+ }
+ else {
+ ss << "\n<b>timed out getting lock</b>\n";
+ }
+ }
+ } lowLevelMongodStatus;
+}
diff --git a/src/mongo/db/restapi.h b/src/mongo/db/restapi.h
new file mode 100644
index 00000000000..e5ac52083fe
--- /dev/null
+++ b/src/mongo/db/restapi.h
@@ -0,0 +1,34 @@
+/** @file restapi.h
+ */
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../util/admin_access.h"
+
+namespace mongo {
+
+ class RestAdminAccess : public AdminAccess {
+ public:
+ virtual ~RestAdminAccess() { }
+
+ virtual bool haveAdminUsers() const;
+ virtual BSONObj getAdminUser( const string& username ) const;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/scanandorder.cpp b/src/mongo/db/scanandorder.cpp
new file mode 100644
index 00000000000..b5e282a5866
--- /dev/null
+++ b/src/mongo/db/scanandorder.cpp
@@ -0,0 +1,105 @@
+/* scanandorder.cpp
+ Order results (that aren't already indexes and in order.)
+*/
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "scanandorder.h"
+
+namespace mongo {
+
+ const unsigned ScanAndOrder::MaxScanAndOrderBytes = 32 * 1024 * 1024;
+
+ void ScanAndOrder::_add(BSONObj& k, BSONObj o, DiskLoc* loc) {
+ if (!loc) {
+ _best.insert(make_pair(k.getOwned(),o.getOwned()));
+ }
+ else {
+ BSONObjBuilder b;
+ b.appendElements(o);
+ b.append("$diskLoc", loc->toBSONObj());
+ _best.insert(make_pair(k.getOwned(), b.obj().getOwned()));
+ }
+ }
+
+ void ScanAndOrder::_addIfBetter(BSONObj& k, BSONObj o, BestMap::iterator i, DiskLoc* loc) {
+ /* todo : we don't correct _approxSize here. */
+ const BSONObj& worstBestKey = i->first;
+ int c = worstBestKey.woCompare(k, _order._spec.keyPattern);
+ if ( c > 0 ) {
+ // k is better, 'upgrade'
+ _best.erase(i);
+ _add(k, o, loc);
+ }
+ }
+
+
+ void ScanAndOrder::add(BSONObj o, DiskLoc* loc) {
+ assert( o.isValid() );
+ BSONObj k;
+ try {
+ k = _order.getKeyFromObject(o);
+ }
+ catch (UserException &e) {
+ if ( e.getCode() == ParallelArraysCode ) { // cannot get keys for parallel arrays
+ // fix lasterror text to be more accurate.
+ uasserted( 15925, "cannot sort with keys that are parallel arrays" );
+ }
+ else
+ throw;
+ }
+
+ if ( k.isEmpty() ) {
+ return;
+ }
+ if ( (int) _best.size() < _limit ) {
+ _approxSize += k.objsize();
+ _approxSize += o.objsize();
+
+ /* note : adjust when bson return limit adjusts. note this limit should be a bit higher. */
+ uassert( 10128 , "too much data for sort() with no index. add an index or specify a smaller limit", _approxSize < MaxScanAndOrderBytes );
+
+ _add(k, o, loc);
+ return;
+ }
+ BestMap::iterator i;
+ assert( _best.end() != _best.begin() );
+ i = _best.end();
+ i--;
+ _addIfBetter(k, o, i, loc);
+ }
+
+
+ void ScanAndOrder::fill(BufBuilder& b, Projection *filter, int& nout ) const {
+ int n = 0;
+ int nFilled = 0;
+ for ( BestMap::const_iterator i = _best.begin(); i != _best.end(); i++ ) {
+ n++;
+ if ( n <= _startFrom )
+ continue;
+ const BSONObj& o = i->second;
+ fillQueryResultFromObj(b, filter, o);
+ nFilled++;
+ if ( nFilled >= _limit )
+ break;
+ uassert( 10129 , "too much data for sort() with no index", b.len() < (int)MaxScanAndOrderBytes ); // appserver limit
+ }
+ nout = nFilled;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/scanandorder.h b/src/mongo/db/scanandorder.h
new file mode 100644
index 00000000000..33e76f61f67
--- /dev/null
+++ b/src/mongo/db/scanandorder.h
@@ -0,0 +1,111 @@
+/* scanandorder.h
+ Order results (that aren't already indexes and in order.)
+*/
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "indexkey.h"
+#include "queryutil.h"
+#include "projection.h"
+
+namespace mongo {
+
+ /* todo:
+ _ limit amount of data
+ */
+
+ class KeyType : boost::noncopyable {
+ public:
+ IndexSpec _spec;
+ FieldRangeVector _keyCutter;
+ public:
+ KeyType(BSONObj pattern, const FieldRangeSet &frs):
+ _spec((assert(!pattern.isEmpty()),pattern)),
+ _keyCutter(frs, _spec, 1) {
+ }
+
+ /**
+ * @return first key of the object that would be encountered while
+ * scanning index with keySpec 'pattern' using constraints 'frs', or
+ * BSONObj() if no such key.
+ */
+ BSONObj getKeyFromObject(BSONObj o) {
+ return _keyCutter.firstMatch(o);
+ }
+ };
+
+ /* todo:
+ _ respect limit
+ _ check for excess mem usage
+ _ response size limit from runquery; push it up a bit.
+ */
+
+ inline void fillQueryResultFromObj(BufBuilder& bb, Projection *filter, const BSONObj& js, DiskLoc* loc=NULL) {
+ if ( filter ) {
+ BSONObjBuilder b( bb );
+ filter->transform( js , b );
+ if (loc)
+ b.append("$diskLoc", loc->toBSONObj());
+ b.done();
+ }
+ else if (loc) {
+ BSONObjBuilder b( bb );
+ b.appendElements(js);
+ b.append("$diskLoc", loc->toBSONObj());
+ b.done();
+ }
+ else {
+ bb.appendBuf((void*) js.objdata(), js.objsize());
+ }
+ }
+
+ typedef multimap<BSONObj,BSONObj,BSONObjCmp> BestMap;
+ class ScanAndOrder {
+ public:
+ static const unsigned MaxScanAndOrderBytes;
+
+ ScanAndOrder(int startFrom, int limit, BSONObj order, const FieldRangeSet &frs) :
+ _best( BSONObjCmp( order ) ),
+ _startFrom(startFrom), _order(order, frs) {
+ _limit = limit > 0 ? limit + _startFrom : 0x7fffffff;
+ _approxSize = 0;
+ }
+
+ int size() const { return _best.size(); }
+
+ void add(BSONObj o, DiskLoc* loc);
+
+ /* scanning complete. stick the query result in b for n objects. */
+ void fill(BufBuilder& b, Projection *filter, int& nout ) const;
+
+ private:
+
+ void _add(BSONObj& k, BSONObj o, DiskLoc* loc);
+
+ void _addIfBetter(BSONObj& k, BSONObj o, BestMap::iterator i, DiskLoc* loc);
+
+ BestMap _best; // key -> full object
+ int _startFrom;
+ int _limit; // max to send back.
+ KeyType _order;
+ unsigned _approxSize;
+
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/security.cpp b/src/mongo/db/security.cpp
new file mode 100644
index 00000000000..c9b9bb40326
--- /dev/null
+++ b/src/mongo/db/security.cpp
@@ -0,0 +1,106 @@
+// security.cpp
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "security.h"
+#include "security_common.h"
+#include "instance.h"
+#include "client.h"
+#include "curop-inl.h"
+#include "db.h"
+#include "dbhelpers.h"
+
+// this is the _mongod only_ implementation of security.h
+
+namespace mongo {
+
+ bool AuthenticationInfo::_warned = false;
+ /*
+ void AuthenticationInfo::print() const {
+ cout << "AuthenticationInfo: " << this << '\n';
+ for ( MA::const_iterator i=_dbs.begin(); i!=_dbs.end(); i++ ) {
+ cout << "\t" << i->first << "\t" << i->second.level << '\n';
+ }
+ cout << "END" << endl;
+ }
+ */
+
+ string AuthenticationInfo::getUser( const string& dbname ) const {
+ scoped_spinlock lk(_lock);
+
+ MA::const_iterator i = _dbs.find(dbname);
+ if ( i == _dbs.end() )
+ return "";
+
+ return i->second.user;
+ }
+
+
+ bool AuthenticationInfo::_isAuthorizedSpecialChecks( const string& dbname ) const {
+ if ( cc().isGod() )
+ return true;
+
+ if ( isLocalHost ) {
+ Client::GodScope gs;
+ Client::ReadContext ctx("admin.system.users");
+ BSONObj result;
+ if( ! Helpers::getSingleton("admin.system.users", result) ) {
+ if( ! _warned ) {
+ // you could get a few of these in a race, but that's ok
+ _warned = true;
+ log() << "note: no users configured in admin.system.users, allowing localhost access" << endl;
+ }
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool CmdAuthenticate::getUserObj(const string& dbname, const string& user, BSONObj& userObj, string& pwd) {
+ if (user == internalSecurity.user) {
+ uassert(15889, "key file must be used to log in with internal user", cmdLine.keyFile);
+ pwd = internalSecurity.pwd;
+ }
+ else {
+ // static BSONObj userPattern = fromjson("{\"user\":1}");
+ string systemUsers = dbname + ".system.users";
+ // OCCASIONALLY Helpers::ensureIndex(systemUsers.c_str(), userPattern, false, "user_1");
+ {
+ BSONObjBuilder b;
+ b << "user" << user;
+ BSONObj query = b.done();
+ if( !Helpers::findOne(systemUsers.c_str(), query, userObj) ) {
+ log() << "auth: couldn't find user " << user << ", " << systemUsers << endl;
+ return false;
+ }
+ }
+
+ pwd = userObj.getStringField("pwd");
+ }
+ return true;
+ }
+
+ bool CmdLogout::run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ AuthenticationInfo *ai = cc().getAuthenticationInfo();
+ ai->logout(dbname);
+ return true;
+ }
+
+} // namespace mongo
+
diff --git a/src/mongo/db/security.h b/src/mongo/db/security.h
new file mode 100755
index 00000000000..f193f305def
--- /dev/null
+++ b/src/mongo/db/security.h
@@ -0,0 +1,113 @@
+// security.h
+
+/**
+* Copyright (C) 2009 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "nonce.h"
+#include "concurrency.h"
+#include "security_common.h"
+#include "../util/concurrency/spin_lock.h"
+
+// this is used by both mongos and mongod
+
+namespace mongo {
+
+ /*
+ * for a particular db
+ * levels
+ * 0 : none
+ * 1 : read
+ * 2 : write
+ */
+ struct Auth {
+
+ enum Level { NONE = 0 , READ = 1 , WRITE = 2 };
+
+ Auth() { level = NONE; }
+ Level level;
+ string user;
+ };
+
+ class AuthenticationInfo : boost::noncopyable {
+ public:
+ bool isLocalHost;
+
+ AuthenticationInfo(){ isLocalHost = false; }
+ ~AuthenticationInfo() {}
+
+ // -- modifiers ----
+
+ void logout(const string& dbname ) {
+ scoped_spinlock lk(_lock);
+ _dbs.erase(dbname);
+ }
+ void authorize(const string& dbname , const string& user ) {
+ scoped_spinlock lk(_lock);
+ _dbs[dbname].level = Auth::WRITE;
+ _dbs[dbname].user = user;
+ }
+ void authorizeReadOnly(const string& dbname , const string& user ) {
+ scoped_spinlock lk(_lock);
+ _dbs[dbname].level = Auth::READ;
+ _dbs[dbname].user = user;
+ }
+
+ // -- accessors ---
+
+ bool isAuthorized(const string& dbname) const {
+ return _isAuthorized( dbname, Auth::WRITE );
+ }
+
+ bool isAuthorizedReads(const string& dbname) const {
+ return _isAuthorized( dbname, Auth::READ );
+ }
+
+ /**
+ * @param lockType - this is from dbmutex 1 is write, 0 is read
+ */
+ bool isAuthorizedForLock(const string& dbname, int lockType ) const {
+ return _isAuthorized( dbname , lockType > 0 ? Auth::WRITE : Auth::READ );
+ }
+
+ bool isAuthorizedForLevel( const string& dbname , Auth::Level level ) const {
+ return _isAuthorized( dbname , level );
+ }
+
+ string getUser( const string& dbname ) const;
+
+ void print() const;
+
+ protected:
+ /** takes a lock */
+ bool _isAuthorized(const string& dbname, Auth::Level level) const;
+
+ bool _isAuthorizedSingle_inlock(const string& dbname, Auth::Level level) const;
+
+ /** cannot call this locked */
+ bool _isAuthorizedSpecialChecks( const string& dbname ) const ;
+
+ private:
+ mutable SpinLock _lock;
+
+ typedef map<string,Auth> MA;
+ MA _dbs; // dbname -> auth
+
+ static bool _warned;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/security_commands.cpp b/src/mongo/db/security_commands.cpp
new file mode 100644
index 00000000000..33dbd597c83
--- /dev/null
+++ b/src/mongo/db/security_commands.cpp
@@ -0,0 +1,150 @@
+// security_commands.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+// security.cpp links with both dbgrid and db. this file db only -- at least for now.
+
+// security.cpp
+
+#include "pch.h"
+#include "security.h"
+#include "../util/md5.hpp"
+#include "json.h"
+#include "pdfile.h"
+#include "db.h"
+#include "dbhelpers.h"
+#include "commands.h"
+#include "jsobj.h"
+#include "client.h"
+
+namespace mongo {
+
+ /* authentication
+
+ system.users contains
+ { user : <username>, pwd : <pwd_digest>, ... }
+
+ getnonce sends nonce to client
+
+ client then sends { authenticate:1, nonce64:<nonce_str>, user:<username>, key:<key> }
+
+ where <key> is md5(<nonce_str><username><pwd_digest_str>) as a string
+ */
+
+ boost::thread_specific_ptr<nonce64> lastNonce;
+
+ class CmdGetNonce : public Command {
+ public:
+ virtual bool requiresAuth() { return false; }
+ virtual bool logTheOp() { return false; }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ void help(stringstream& h) const { h << "internal"; }
+ virtual LockType locktype() const { return NONE; }
+ CmdGetNonce() : Command("getnonce") {}
+ bool run(const string&, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ nonce64 *n = new nonce64(Security::getNonce());
+ stringstream ss;
+ ss << hex << *n;
+ result.append("nonce", ss.str() );
+ lastNonce.reset(n);
+ return true;
+ }
+ } cmdGetNonce;
+
+ CmdLogout cmdLogout;
+
+ bool CmdAuthenticate::run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ log() << " authenticate: " << cmdObj << endl;
+
+ string user = cmdObj.getStringField("user");
+ string key = cmdObj.getStringField("key");
+ string received_nonce = cmdObj.getStringField("nonce");
+
+ if( user.empty() || key.empty() || received_nonce.empty() ) {
+ log() << "field missing/wrong type in received authenticate command "
+ << dbname
+ << endl;
+ errmsg = "auth fails";
+ sleepmillis(10);
+ return false;
+ }
+
+ stringstream digestBuilder;
+
+ {
+ bool reject = false;
+ nonce64 *ln = lastNonce.release();
+ if ( ln == 0 ) {
+ reject = true;
+ log(1) << "auth: no lastNonce" << endl;
+ }
+ else {
+ digestBuilder << hex << *ln;
+ reject = digestBuilder.str() != received_nonce;
+ if ( reject ) log(1) << "auth: different lastNonce" << endl;
+ }
+
+ if ( reject ) {
+ log() << "auth: bad nonce received or getnonce not called. could be a driver bug or a security attack. db:" << dbname << endl;
+ errmsg = "auth fails";
+ sleepmillis(30);
+ return false;
+ }
+ }
+
+ BSONObj userObj;
+ string pwd;
+ if (!getUserObj(dbname, user, userObj, pwd)) {
+ errmsg = "auth fails";
+ return false;
+ }
+
+ md5digest d;
+ {
+ digestBuilder << user << pwd;
+ string done = digestBuilder.str();
+
+ md5_state_t st;
+ md5_init(&st);
+ md5_append(&st, (const md5_byte_t *) done.c_str(), done.size());
+ md5_finish(&st, d);
+ }
+
+ string computed = digestToString( d );
+
+ if ( key != computed ) {
+ log() << "auth: key mismatch " << user << ", ns:" << dbname << endl;
+ errmsg = "auth fails";
+ return false;
+ }
+
+ bool readOnly = userObj["readOnly"].trueValue();
+ authenticate(dbname, user, readOnly );
+
+
+ result.append( "dbname" , dbname );
+ result.append( "user" , user );
+ result.appendBool( "readOnly" , readOnly );
+
+
+ return true;
+ }
+
+ CmdAuthenticate cmdAuthenticate;
+
+} // namespace mongo
diff --git a/src/mongo/db/security_common.cpp b/src/mongo/db/security_common.cpp
new file mode 100644
index 00000000000..a480919c27e
--- /dev/null
+++ b/src/mongo/db/security_common.cpp
@@ -0,0 +1,148 @@
+// security_common.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/**
+ * This file contains inter-mongo instance security helpers. Due to the
+ * requirement that it be possible to compile this into mongos and mongod, it
+ * should not depend on much external stuff.
+ */
+
+#include "pch.h"
+#include "security.h"
+#include "security_common.h"
+#include "../client/dbclient.h"
+#include "commands.h"
+#include "nonce.h"
+#include "../util/md5.hpp"
+#include "client_common.h"
+#include <sys/stat.h>
+
+namespace mongo {
+
+ bool noauth = true;
+ AuthInfo internalSecurity;
+
+ bool setUpSecurityKey(const string& filename) {
+ struct stat stats;
+
+ // check obvious file errors
+ if (stat(filename.c_str(), &stats) == -1) {
+ log() << "error getting file " << filename << ": " << strerror(errno) << endl;
+ return false;
+ }
+
+#if !defined(_WIN32)
+ // check permissions: must be X00, where X is >= 4
+ if ((stats.st_mode & (S_IRWXG|S_IRWXO)) != 0) {
+ log() << "permissions on " << filename << " are too open" << endl;
+ return false;
+ }
+#endif
+
+ const unsigned long long fileLength = stats.st_size;
+ if (fileLength < 6 || fileLength > 1024) {
+ log() << " key file " << filename << " has length " << stats.st_size
+ << ", must be between 6 and 1024 chars" << endl;
+ return false;
+ }
+
+ FILE* file = fopen( filename.c_str(), "rb" );
+ if (!file) {
+ log() << "error opening file: " << filename << ": " << strerror(errno) << endl;
+ return false;
+ }
+
+ string str = "";
+
+ // strip key file
+ unsigned long long read = 0;
+ while (read < fileLength) {
+ char buf;
+ int readLength = fread(&buf, 1, 1, file);
+ if (readLength < 1) {
+ log() << "error reading file " << filename << endl;
+ return false;
+ }
+ read++;
+
+ // check for whitespace
+ if ((buf >= '\x09' && buf <= '\x0D') || buf == ' ') {
+ continue;
+ }
+
+ // check valid base64
+ if ((buf < 'A' || buf > 'Z') && (buf < 'a' || buf > 'z') && (buf < '0' || buf > '9') && buf != '+' && buf != '/') {
+ log() << "invalid char in key file " << filename << ": " << buf << endl;
+ return false;
+ }
+
+ str += buf;
+ }
+
+ if (str.size() < 6) {
+ log() << "security key must be at least 6 characters" << endl;
+ return false;
+ }
+
+ log(1) << "security key: " << str << endl;
+
+ // createPWDigest should really not be a member func
+ DBClientConnection conn;
+ internalSecurity.pwd = conn.createPasswordDigest(internalSecurity.user, str);
+
+ return true;
+ }
+
+ void CmdAuthenticate::authenticate(const string& dbname, const string& user, const bool readOnly) {
+ ClientBasic* c = ClientBasic::getCurrent();
+ assert(c);
+ AuthenticationInfo *ai = c->getAuthenticationInfo();
+
+ if ( readOnly ) {
+ ai->authorizeReadOnly( dbname , user );
+ }
+ else {
+ ai->authorize( dbname , user );
+ }
+ }
+
+
+ bool AuthenticationInfo::_isAuthorized(const string& dbname, Auth::Level level) const {
+ {
+ scoped_spinlock lk(_lock);
+
+ if ( _isAuthorizedSingle_inlock( dbname , level ) )
+ return true;
+
+ if ( noauth )
+ return true;
+
+ if ( _isAuthorizedSingle_inlock( "admin" , level ) )
+ return true;
+
+ if ( _isAuthorizedSingle_inlock( "local" , level ) )
+ return true;
+ }
+ return _isAuthorizedSpecialChecks( dbname );
+ }
+
+ bool AuthenticationInfo::_isAuthorizedSingle_inlock(const string& dbname, Auth::Level level) const {
+ MA::const_iterator i = _dbs.find(dbname);
+ return i != _dbs.end() && i->second.level >= level;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/db/security_common.h b/src/mongo/db/security_common.h
new file mode 100644
index 00000000000..6615c6e573e
--- /dev/null
+++ b/src/mongo/db/security_common.h
@@ -0,0 +1,85 @@
+// security_common.h
+
+/**
+* Copyright (C) 2009 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "commands.h"
+#include "concurrency.h"
+#include "../util/concurrency/spin_lock.h"
+
+namespace mongo {
+
+ /**
+ * Internal secret key info.
+ */
+ struct AuthInfo {
+ AuthInfo() {
+ user = "__system";
+ }
+ string user;
+ string pwd;
+ };
+
+ // --noauth cmd line option
+ extern bool noauth;
+ extern AuthInfo internalSecurity;
+
+ /**
+ * This method checks the validity of filename as a security key, hashes its
+ * contents, and stores it in the internalSecurity variable. Prints an
+ * error message to the logs if there's an error.
+ * @param filename the file containing the key
+ * @return if the key was successfully stored
+ */
+ bool setUpSecurityKey(const string& filename);
+
+ class CmdAuthenticate : public Command {
+ public:
+ virtual bool requiresAuth() { return false; }
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual LockType locktype() const { return READ; }
+ virtual void help(stringstream& ss) const { ss << "internal"; }
+ CmdAuthenticate() : Command("authenticate") {}
+ bool run(const string& dbname , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl);
+ void authenticate(const string& dbname, const string& user, const bool readOnly);
+ private:
+ bool getUserObj(const string& dbname, const string& user, BSONObj& userObj, string& pwd);
+ };
+
+ extern CmdAuthenticate cmdAuthenticate;
+
+ class CmdLogout : public Command {
+ public:
+ virtual bool logTheOp() {
+ return false;
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ void help(stringstream& h) const { h << "de-authenticate"; }
+ virtual LockType locktype() const { return NONE; }
+ CmdLogout() : Command("logout") {}
+ bool run(const string& dbname , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl);
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/stats/counters.cpp b/src/mongo/db/stats/counters.cpp
new file mode 100644
index 00000000000..889e8a86c4c
--- /dev/null
+++ b/src/mongo/db/stats/counters.cpp
@@ -0,0 +1,207 @@
+// counters.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "pch.h"
+#include "../jsobj.h"
+#include "counters.h"
+
+namespace mongo {
+
+ OpCounters::OpCounters() {
+ int zero = 0;
+
+ BSONObjBuilder b;
+ b.append( "insert" , zero );
+ b.append( "query" , zero );
+ b.append( "update" , zero );
+ b.append( "delete" , zero );
+ b.append( "getmore" , zero );
+ b.append( "command" , zero );
+ _obj = b.obj();
+
+ _insert = (AtomicUInt*)_obj["insert"].value();
+ _query = (AtomicUInt*)_obj["query"].value();
+ _update = (AtomicUInt*)_obj["update"].value();
+ _delete = (AtomicUInt*)_obj["delete"].value();
+ _getmore = (AtomicUInt*)_obj["getmore"].value();
+ _command = (AtomicUInt*)_obj["command"].value();
+ }
+
+ void OpCounters::gotOp( int op , bool isCommand ) {
+ switch ( op ) {
+ case dbInsert: /*gotInsert();*/ break; // need to handle multi-insert
+ case dbQuery:
+ if ( isCommand )
+ gotCommand();
+ else
+ gotQuery();
+ break;
+
+ case dbUpdate: gotUpdate(); break;
+ case dbDelete: gotDelete(); break;
+ case dbGetMore: gotGetMore(); break;
+ case dbKillCursors:
+ case opReply:
+ case dbMsg:
+ break;
+ default: log() << "OpCounters::gotOp unknown op: " << op << endl;
+ }
+ }
+
+ BSONObj& OpCounters::getObj() {
+ const unsigned MAX = 1 << 30;
+ RARELY {
+ bool wrap =
+ _insert->get() > MAX ||
+ _query->get() > MAX ||
+ _update->get() > MAX ||
+ _delete->get() > MAX ||
+ _getmore->get() > MAX ||
+ _command->get() > MAX;
+
+ if ( wrap ) {
+ _insert->zero();
+ _query->zero();
+ _update->zero();
+ _delete->zero();
+ _getmore->zero();
+ _command->zero();
+ }
+
+ }
+ return _obj;
+ }
+
+ IndexCounters::IndexCounters() {
+ _memSupported = _pi.blockCheckSupported();
+
+ _btreeMemHits = 0;
+ _btreeMemMisses = 0;
+ _btreeAccesses = 0;
+
+
+ _maxAllowed = ( numeric_limits< long long >::max() ) / 2;
+ _resets = 0;
+
+ _sampling = 0;
+ _samplingrate = 100;
+ }
+
+ void IndexCounters::append( BSONObjBuilder& b ) {
+ if ( ! _memSupported ) {
+ b.append( "note" , "not supported on this platform" );
+ return;
+ }
+
+ BSONObjBuilder bb( b.subobjStart( "btree" ) );
+ bb.appendNumber( "accesses" , _btreeAccesses );
+ bb.appendNumber( "hits" , _btreeMemHits );
+ bb.appendNumber( "misses" , _btreeMemMisses );
+
+ bb.append( "resets" , _resets );
+
+ bb.append( "missRatio" , (_btreeAccesses ? (_btreeMemMisses / (double)_btreeAccesses) : 0) );
+
+ bb.done();
+
+ if ( _btreeAccesses > _maxAllowed ) {
+ _btreeAccesses = 0;
+ _btreeMemMisses = 0;
+ _btreeMemHits = 0;
+ _resets++;
+ }
+ }
+
+ FlushCounters::FlushCounters()
+ : _total_time(0)
+ , _flushes(0)
+ , _last()
+ {}
+
+ void FlushCounters::flushed(int ms) {
+ _flushes++;
+ _total_time += ms;
+ _last_time = ms;
+ _last = jsTime();
+ }
+
+ void FlushCounters::append( BSONObjBuilder& b ) {
+ b.appendNumber( "flushes" , _flushes );
+ b.appendNumber( "total_ms" , _total_time );
+ b.appendNumber( "average_ms" , (_flushes ? (_total_time / double(_flushes)) : 0.0) );
+ b.appendNumber( "last_ms" , _last_time );
+ b.append("last_finished", _last);
+ }
+
+
+ void GenericCounter::hit( const string& name , int count ) {
+ scoped_lock lk( _mutex );
+ _counts[name]++;
+ }
+
+ BSONObj GenericCounter::getObj() {
+ BSONObjBuilder b(128);
+ {
+ mongo::mutex::scoped_lock lk( _mutex );
+ for ( map<string,long long>::iterator i=_counts.begin(); i!=_counts.end(); i++ ) {
+ b.appendNumber( i->first , i->second );
+ }
+ }
+ return b.obj();
+ }
+
+
+ void NetworkCounter::hit( long long bytesIn , long long bytesOut ) {
+ const long long MAX = 1ULL << 60;
+
+ // don't care about the race as its just a counter
+ bool overflow = _bytesIn > MAX || _bytesOut > MAX;
+
+ if ( overflow ) {
+ _lock.lock();
+ _overflows++;
+ _bytesIn = bytesIn;
+ _bytesOut = bytesOut;
+ _requests = 1;
+ _lock.unlock();
+ }
+ else {
+ _lock.lock();
+ _bytesIn += bytesIn;
+ _bytesOut += bytesOut;
+ _requests++;
+ _lock.unlock();
+ }
+ }
+
+ void NetworkCounter::append( BSONObjBuilder& b ) {
+ _lock.lock();
+ b.appendNumber( "bytesIn" , _bytesIn );
+ b.appendNumber( "bytesOut" , _bytesOut );
+ b.appendNumber( "numRequests" , _requests );
+ _lock.unlock();
+ }
+
+
+ OpCounters globalOpCounters;
+ OpCounters replOpCounters;
+ IndexCounters globalIndexCounters;
+ FlushCounters globalFlushCounters;
+ NetworkCounter networkCounter;
+
+}
diff --git a/src/mongo/db/stats/counters.h b/src/mongo/db/stats/counters.h
new file mode 100644
index 00000000000..0cb29aa49aa
--- /dev/null
+++ b/src/mongo/db/stats/counters.h
@@ -0,0 +1,159 @@
+// counters.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "../../pch.h"
+#include "../jsobj.h"
+#include "../../util/net/message.h"
+#include "../../util/processinfo.h"
+#include "../../util/concurrency/spin_lock.h"
+
+namespace mongo {
+
+ /**
+ * for storing operation counters
+ * note: not thread safe. ok with that for speed
+ */
+ class OpCounters {
+ public:
+
+ OpCounters();
+
+ AtomicUInt * getInsert() { return _insert; }
+ AtomicUInt * getQuery() { return _query; }
+ AtomicUInt * getUpdate() { return _update; }
+ AtomicUInt * getDelete() { return _delete; }
+ AtomicUInt * getGetMore() { return _getmore; }
+ AtomicUInt * getCommand() { return _command; }
+
+ void incInsertInWriteLock(int n) { _insert->x += n; }
+ void gotInsert() { _insert[0]++; }
+ void gotQuery() { _query[0]++; }
+ void gotUpdate() { _update[0]++; }
+ void gotDelete() { _delete[0]++; }
+ void gotGetMore() { _getmore[0]++; }
+ void gotCommand() { _command[0]++; }
+
+ void gotOp( int op , bool isCommand );
+
+ BSONObj& getObj();
+
+ private:
+ BSONObj _obj;
+
+ // todo: there will be a lot of cache line contention on these. need to do something
+ // else eventually.
+ AtomicUInt * _insert;
+ AtomicUInt * _query;
+ AtomicUInt * _update;
+ AtomicUInt * _delete;
+ AtomicUInt * _getmore;
+ AtomicUInt * _command;
+ };
+
+ extern OpCounters globalOpCounters;
+ extern OpCounters replOpCounters;
+
+
+ class IndexCounters {
+ public:
+ IndexCounters();
+
+ // used without a mutex intentionally (can race)
+ void btree( char * node ) {
+ if ( ! _memSupported )
+ return;
+ if ( _sampling++ % _samplingrate )
+ return;
+ btree( _pi.blockInMemory( node ) );
+ }
+
+ void btree( bool memHit ) {
+ if ( memHit )
+ _btreeMemHits++;
+ else
+ _btreeMemMisses++;
+ _btreeAccesses++;
+ }
+ void btreeHit() { _btreeMemHits++; _btreeAccesses++; }
+ void btreeMiss() { _btreeMemMisses++; _btreeAccesses++; }
+
+ void append( BSONObjBuilder& b );
+
+ private:
+ ProcessInfo _pi;
+ bool _memSupported;
+
+ int _sampling;
+ int _samplingrate;
+
+ int _resets;
+ long long _maxAllowed;
+
+ long long _btreeMemMisses;
+ long long _btreeMemHits;
+ long long _btreeAccesses;
+ };
+
+ extern IndexCounters globalIndexCounters;
+
+ class FlushCounters {
+ public:
+ FlushCounters();
+
+ void flushed(int ms);
+
+ void append( BSONObjBuilder& b );
+
+ private:
+ long long _total_time;
+ long long _flushes;
+ int _last_time;
+ Date_t _last;
+ };
+
+ extern FlushCounters globalFlushCounters;
+
+
+ class GenericCounter {
+ public:
+ GenericCounter() : _mutex("GenericCounter") { }
+ void hit( const string& name , int count=0 );
+ BSONObj getObj();
+ private:
+ map<string,long long> _counts; // TODO: replace with thread safe map
+ mongo::mutex _mutex;
+ };
+
+ class NetworkCounter {
+ public:
+ NetworkCounter() : _bytesIn(0), _bytesOut(0), _requests(0), _overflows(0) {}
+ void hit( long long bytesIn , long long bytesOut );
+ void append( BSONObjBuilder& b );
+ private:
+ long long _bytesIn;
+ long long _bytesOut;
+ long long _requests;
+
+ long long _overflows;
+
+ SpinLock _lock;
+ };
+
+ extern NetworkCounter networkCounter;
+}
diff --git a/src/mongo/db/stats/fine_clock.h b/src/mongo/db/stats/fine_clock.h
new file mode 100644
index 00000000000..02600e718c4
--- /dev/null
+++ b/src/mongo/db/stats/fine_clock.h
@@ -0,0 +1,67 @@
+// fine_clock.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef DB_STATS_FINE_CLOCK_HEADER
+#define DB_STATS_FINE_CLOCK_HEADER
+
+#include <time.h> // struct timespec
+
+namespace mongo {
+
+ /**
+ * This is a nano-second precision clock. We're skipping the
+ * harware TSC in favor of clock_gettime() which in some systems
+ * does not involve a trip to the OS (VDSO).
+ *
+ * We're exporting a type WallTime that is and should remain
+ * opaque. The business of getting accurate time is still ongoing
+ * and we may change the internal representation of this class.
+ * (http://lwn.net/Articles/388188/)
+ *
+ * Really, you shouldn't be using this class in hot code paths for
+ * platforms you're not sure whether the overhead is low.
+ */
+ class FineClock {
+ public:
+
+ typedef timespec WallTime;
+
+ static WallTime now() {
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ts;
+ }
+
+ static uint64_t diffInNanos( WallTime end, WallTime start ) {
+ uint64_t diff;
+ if ( end.tv_nsec < start.tv_nsec ) {
+ diff = 1000000000 * ( end.tv_sec - start.tv_sec - 1);
+ diff += 1000000000 + end.tv_nsec - start.tv_nsec;
+ }
+ else {
+ diff = 1000000000 * ( end.tv_sec - start.tv_sec );
+ diff += end.tv_nsec - start.tv_nsec;
+ }
+ return diff;
+ }
+
+ };
+}
+
+#endif // DB_STATS_FINE_CLOCK_HEADER
+
diff --git a/src/mongo/db/stats/service_stats.cpp b/src/mongo/db/stats/service_stats.cpp
new file mode 100644
index 00000000000..d69147fe969
--- /dev/null
+++ b/src/mongo/db/stats/service_stats.cpp
@@ -0,0 +1,68 @@
+// service_stats.cpp
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include <sstream>
+
+#include "../../util/histogram.h"
+#include "service_stats.h"
+
+namespace mongo {
+
+ using std::ostringstream;
+
+ ServiceStats::ServiceStats() {
+ // Time histogram covers up to 128msec in exponential intervals
+ // starting at 125usec.
+ Histogram::Options timeOpts;
+ timeOpts.numBuckets = 12;
+ timeOpts.bucketSize = 125;
+ timeOpts.exponential = true;
+ _timeHistogram = new Histogram( timeOpts );
+
+ // Space histogram covers up to 1MB in exponentialintervals starting
+ // at 1K.
+ Histogram::Options spaceOpts;
+ spaceOpts.numBuckets = 12;
+ spaceOpts.bucketSize = 1024;
+ spaceOpts.exponential = true;
+ _spaceHistogram = new Histogram( spaceOpts );
+ }
+
+ ServiceStats::~ServiceStats() {
+ delete _timeHistogram;
+ delete _spaceHistogram;
+ }
+
+ void ServiceStats::logResponse( uint64_t duration, uint64_t bytes ) {
+ _spinLock.lock();
+ _timeHistogram->insert( duration / 1000 /* in usecs */ );
+ _spaceHistogram->insert( bytes );
+ _spinLock.unlock();
+ }
+
+ string ServiceStats::toHTML() const {
+ ostringstream res ;
+ res << "Cumulative wire stats\n"
+ << "Response times\n" << _timeHistogram->toHTML()
+ << "Response sizes\n" << _spaceHistogram->toHTML()
+ << '\n';
+
+ return res.str();
+ }
+
+} // mongo
diff --git a/src/mongo/db/stats/service_stats.h b/src/mongo/db/stats/service_stats.h
new file mode 100644
index 00000000000..5b0e75fdcb9
--- /dev/null
+++ b/src/mongo/db/stats/service_stats.h
@@ -0,0 +1,66 @@
+// service_stats.h
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef DB_STATS_SERVICE_STATS_HEADER
+#define DB_STATS_SERVICE_STATS_HEADER
+
+#include <string>
+
+#include "../../util/concurrency/spin_lock.h"
+
+namespace mongo {
+
+ using std::string;
+
+ class Histogram;
+
+ /**
+ * ServiceStats keeps track of the time a request/response message
+ * took inside a service as well as the size of the response
+ * generated.
+ */
+ class ServiceStats {
+ public:
+ ServiceStats();
+ ~ServiceStats();
+
+ /**
+ * Record the 'duration' in microseconds a request/response
+ * message took and the size in bytes of the generated
+ * response.
+ */
+ void logResponse( uint64_t duration, uint64_t bytes );
+
+ /**
+ * Render the histogram as string that can be used inside an
+ * HTML doc.
+ */
+ string toHTML() const;
+
+ private:
+ SpinLock _spinLock; // protects state below
+ Histogram* _timeHistogram;
+ Histogram* _spaceHistogram;
+
+ ServiceStats( const ServiceStats& );
+ ServiceStats operator=( const ServiceStats& );
+ };
+
+} // namespace mongo
+
+#endif // DB_STATS_SERVICE_STATS_HEADER
diff --git a/src/mongo/db/stats/snapshots.cpp b/src/mongo/db/stats/snapshots.cpp
new file mode 100644
index 00000000000..900cc4ff1ad
--- /dev/null
+++ b/src/mongo/db/stats/snapshots.cpp
@@ -0,0 +1,227 @@
+// snapshots.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "snapshots.h"
+#include "../client.h"
+#include "../clientcursor.h"
+#include "../dbwebserver.h"
+#include "../../util/mongoutils/html.h"
+
+/**
+ handles snapshotting performance metrics and other such things
+ */
+namespace mongo {
+
+ void SnapshotData::takeSnapshot() {
+ _created = curTimeMicros64();
+ _globalUsage = Top::global.getGlobalData();
+// _totalWriteLockedTime = d.dbMutex.info().getTimeLocked();
+ Top::global.cloneMap(_usage);
+ }
+
+ SnapshotDelta::SnapshotDelta( const SnapshotData& older , const SnapshotData& newer )
+ : _older( older ) , _newer( newer ) {
+ assert( _newer._created > _older._created );
+ _elapsed = _newer._created - _older._created;
+ }
+
+ Top::CollectionData SnapshotDelta::globalUsageDiff() {
+ return Top::CollectionData( _older._globalUsage , _newer._globalUsage );
+ }
+ Top::UsageMap SnapshotDelta::collectionUsageDiff() {
+ assert( _newer._created > _older._created );
+ Top::UsageMap u;
+
+ for ( Top::UsageMap::const_iterator i=_newer._usage.begin(); i != _newer._usage.end(); i++ ) {
+ Top::UsageMap::const_iterator j = _older._usage.find(i->first);
+ if (j != _older._usage.end())
+ u[i->first] = Top::CollectionData( j->second , i->second );
+ else
+ u[i->first] = i->second;
+ }
+ return u;
+ }
+
+ Snapshots::Snapshots(int n)
+ : _lock("Snapshots"), _n(n)
+ , _snapshots(new SnapshotData[n])
+ , _loc(0)
+ , _stored(0)
+ {}
+
+ const SnapshotData* Snapshots::takeSnapshot() {
+ scoped_lock lk(_lock);
+ _loc = ( _loc + 1 ) % _n;
+ _snapshots[_loc].takeSnapshot();
+ if ( _stored < _n )
+ _stored++;
+ return &_snapshots[_loc];
+ }
+
+ auto_ptr<SnapshotDelta> Snapshots::computeDelta( int numBack ) {
+ scoped_lock lk(_lock);
+ auto_ptr<SnapshotDelta> p;
+ if ( numBack < numDeltas() )
+ p.reset( new SnapshotDelta( getPrev(numBack+1) , getPrev(numBack) ) );
+ return p;
+ }
+
+ const SnapshotData& Snapshots::getPrev( int numBack ) {
+ int x = _loc - numBack;
+ if ( x < 0 )
+ x += _n;
+ return _snapshots[x];
+ }
+
+ void Snapshots::outputLockInfoHTML( stringstream& ss ) {
+ scoped_lock lk(_lock);
+ ss << "\n<div>";
+ for ( int i=0; i<numDeltas(); i++ ) {
+ SnapshotDelta d( getPrev(i+1) , getPrev(i) );
+ unsigned e = (unsigned) d.elapsed() / 1000;
+ ss << (unsigned)(100*d.percentWriteLocked());
+ if( e < 3900 || e > 4100 )
+ ss << '(' << e / 1000.0 << "s)";
+ ss << ' ';
+ }
+ ss << "</div>\n";
+ }
+
+ void SnapshotThread::run() {
+ Client::initThread("snapshotthread");
+ Client& client = cc();
+
+ long long numLoops = 0;
+
+ const SnapshotData* prev = 0;
+
+ while ( ! inShutdown() ) {
+ try {
+ const SnapshotData* s = statsSnapshots.takeSnapshot();
+
+ if ( prev && cmdLine.cpu ) {
+ unsigned long long elapsed = s->_created - prev->_created;
+ SnapshotDelta d( *prev , *s );
+ log() << "cpu: elapsed:" << (elapsed/1000) <<" writelock: " << (int)(100*d.percentWriteLocked()) << "%" << endl;
+ }
+
+ prev = s;
+ }
+ catch ( std::exception& e ) {
+ log() << "ERROR in SnapshotThread: " << e.what() << endl;
+ }
+
+ numLoops++;
+ sleepsecs(4);
+ }
+
+ client.shutdown();
+ }
+
+ using namespace mongoutils::html;
+
+ class WriteLockStatus : public WebStatusPlugin {
+ public:
+ WriteLockStatus() : WebStatusPlugin( "write lock" , 51 , "% time in write lock, by 4 sec periods" ) {}
+ virtual void init() {}
+
+ virtual void run( stringstream& ss ) {
+ statsSnapshots.outputLockInfoHTML( ss );
+
+ ss << "<a "
+ "href=\"http://www.mongodb.org/pages/viewpage.action?pageId=7209296\" "
+ "title=\"snapshot: was the db in the write lock when this page was generated?\">";
+ ss << "write locked now:</a> " << (d.dbMutex.info().isLocked() ? "true" : "false") << "\n";
+ }
+
+ } writeLockStatus;
+
+ class DBTopStatus : public WebStatusPlugin {
+ public:
+ DBTopStatus() : WebStatusPlugin( "dbtop" , 50 , "(occurrences|percent of elapsed)" ) {}
+
+ void display( stringstream& ss , double elapsed , const Top::UsageData& usage ) {
+ ss << "<td>";
+ ss << usage.count;
+ ss << "</td><td>";
+ double per = 100 * ((double)usage.time)/elapsed;
+ if( per == (int) per )
+ ss << (int) per;
+ else
+ ss << setprecision(1) << fixed << per;
+ ss << '%';
+ ss << "</td>";
+ }
+
+ void display( stringstream& ss , double elapsed , const string& ns , const Top::CollectionData& data ) {
+ if ( ns != "TOTAL" && data.total.count == 0 )
+ return;
+ ss << "<tr><th>" << ns << "</th>";
+
+ display( ss , elapsed , data.total );
+
+ display( ss , elapsed , data.readLock );
+ display( ss , elapsed , data.writeLock );
+
+ display( ss , elapsed , data.queries );
+ display( ss , elapsed , data.getmore );
+ display( ss , elapsed , data.insert );
+ display( ss , elapsed , data.update );
+ display( ss , elapsed , data.remove );
+
+ ss << "</tr>\n";
+ }
+
+ void run( stringstream& ss ) {
+ auto_ptr<SnapshotDelta> delta = statsSnapshots.computeDelta();
+ if ( ! delta.get() )
+ return;
+
+ ss << "<table border=1 cellpadding=2 cellspacing=0>";
+ ss << "<tr align='left'><th>";
+ ss << a("http://www.mongodb.org/display/DOCS/Developer+FAQ#DeveloperFAQ-What%27sa%22namespace%22%3F", "namespace") <<
+ "NS</a></th>"
+ "<th colspan=2>total</th>"
+ "<th colspan=2>Reads</th>"
+ "<th colspan=2>Writes</th>"
+ "<th colspan=2>Queries</th>"
+ "<th colspan=2>GetMores</th>"
+ "<th colspan=2>Inserts</th>"
+ "<th colspan=2>Updates</th>"
+ "<th colspan=2>Removes</th>";
+ ss << "</tr>\n";
+
+ display( ss , (double) delta->elapsed() , "TOTAL" , delta->globalUsageDiff() );
+
+ Top::UsageMap usage = delta->collectionUsageDiff();
+ for ( Top::UsageMap::iterator i=usage.begin(); i != usage.end(); i++ ) {
+ display( ss , (double) delta->elapsed() , i->first , i->second );
+ }
+
+ ss << "</table>";
+
+ }
+
+ virtual void init() {}
+ } dbtopStatus;
+
+ Snapshots statsSnapshots;
+ SnapshotThread snapshotThread;
+
+}
diff --git a/src/mongo/db/stats/snapshots.h b/src/mongo/db/stats/snapshots.h
new file mode 100644
index 00000000000..d9b8e5eb901
--- /dev/null
+++ b/src/mongo/db/stats/snapshots.h
@@ -0,0 +1,114 @@
+// snapshots.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+#include "../../pch.h"
+#include "../jsobj.h"
+#include "top.h"
+#include "../../util/background.h"
+
+/**
+ handles snapshotting performance metrics and other such things
+ */
+namespace mongo {
+
+ class SnapshotThread;
+
+ /**
+ * stores a point in time snapshot
+ * i.e. all counters at a given time
+ */
+ class SnapshotData {
+ void takeSnapshot();
+
+ unsigned long long _created;
+ Top::CollectionData _globalUsage;
+ unsigned long long _totalWriteLockedTime; // micros of total time locked
+ Top::UsageMap _usage;
+
+ friend class SnapshotThread;
+ friend class SnapshotDelta;
+ friend class Snapshots;
+ };
+
+ /**
+ * contains performance information for a time period
+ */
+ class SnapshotDelta {
+ public:
+ SnapshotDelta( const SnapshotData& older , const SnapshotData& newer );
+
+ unsigned long long start() const {
+ return _older._created;
+ }
+
+ unsigned long long elapsed() const {
+ return _elapsed;
+ }
+
+ unsigned long long timeInWriteLock() const {
+ return _newer._totalWriteLockedTime - _older._totalWriteLockedTime;
+ }
+ double percentWriteLocked() const {
+ double e = (double) elapsed();
+ double w = (double) timeInWriteLock();
+ return w/e;
+ }
+
+ Top::CollectionData globalUsageDiff();
+ Top::UsageMap collectionUsageDiff();
+
+ private:
+ const SnapshotData& _older;
+ const SnapshotData& _newer;
+
+ unsigned long long _elapsed;
+ };
+
+ class Snapshots {
+ public:
+ Snapshots(int n=100);
+
+ const SnapshotData* takeSnapshot();
+
+ int numDeltas() const { return _stored-1; }
+
+ const SnapshotData& getPrev( int numBack = 0 );
+ auto_ptr<SnapshotDelta> computeDelta( int numBack = 0 );
+
+
+ void outputLockInfoHTML( stringstream& ss );
+ private:
+ mongo::mutex _lock;
+ int _n;
+ boost::scoped_array<SnapshotData> _snapshots;
+ int _loc;
+ int _stored;
+ };
+
+ class SnapshotThread : public BackgroundJob {
+ public:
+ virtual string name() const { return "snapshot"; }
+ void run();
+ };
+
+ extern Snapshots statsSnapshots;
+ extern SnapshotThread snapshotThread;
+
+
+}
diff --git a/src/mongo/db/stats/top.cpp b/src/mongo/db/stats/top.cpp
new file mode 100644
index 00000000000..f5b6ee42f1c
--- /dev/null
+++ b/src/mongo/db/stats/top.cpp
@@ -0,0 +1,183 @@
+// top.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "pch.h"
+#include "top.h"
+#include "../../util/net/message.h"
+#include "../commands.h"
+
+namespace mongo {
+
+ Top::UsageData::UsageData( const UsageData& older , const UsageData& newer ) {
+ // this won't be 100% accurate on rollovers and drop(), but at least it won't be negative
+ time = (newer.time >= older.time) ? (newer.time - older.time) : newer.time;
+ count = (newer.count >= older.count) ? (newer.count - older.count) : newer.count;
+ }
+
+ Top::CollectionData::CollectionData( const CollectionData& older , const CollectionData& newer )
+ : total( older.total , newer.total ) ,
+ readLock( older.readLock , newer.readLock ) ,
+ writeLock( older.writeLock , newer.writeLock ) ,
+ queries( older.queries , newer.queries ) ,
+ getmore( older.getmore , newer.getmore ) ,
+ insert( older.insert , newer.insert ) ,
+ update( older.update , newer.update ) ,
+ remove( older.remove , newer.remove ),
+ commands( older.commands , newer.commands ) {
+
+ }
+
+ void Top::record( const string& ns , int op , int lockType , long long micros , bool command ) {
+ if ( ns[0] == '?' )
+ return;
+
+ //cout << "record: " << ns << "\t" << op << "\t" << command << endl;
+ scoped_lock lk(_lock);
+
+ if ( ( command || op == dbQuery ) && ns == _lastDropped ) {
+ _lastDropped = "";
+ return;
+ }
+
+ CollectionData& coll = _usage[ns];
+ _record( coll , op , lockType , micros , command );
+ _record( _global , op , lockType , micros , command );
+ }
+
+ void Top::_record( CollectionData& c , int op , int lockType , long long micros , bool command ) {
+ c.total.inc( micros );
+
+ if ( lockType > 0 )
+ c.writeLock.inc( micros );
+ else if ( lockType < 0 )
+ c.readLock.inc( micros );
+
+ switch ( op ) {
+ case 0:
+ // use 0 for unknown, non-specific
+ break;
+ case dbUpdate:
+ c.update.inc( micros );
+ break;
+ case dbInsert:
+ c.insert.inc( micros );
+ break;
+ case dbQuery:
+ if ( command )
+ c.commands.inc( micros );
+ else
+ c.queries.inc( micros );
+ break;
+ case dbGetMore:
+ c.getmore.inc( micros );
+ break;
+ case dbDelete:
+ c.remove.inc( micros );
+ break;
+ case dbKillCursors:
+ break;
+ case opReply:
+ case dbMsg:
+ log() << "unexpected op in Top::record: " << op << endl;
+ break;
+ default:
+ log() << "unknown op in Top::record: " << op << endl;
+ }
+
+ }
+
+ void Top::collectionDropped( const string& ns ) {
+ //cout << "collectionDropped: " << ns << endl;
+ scoped_lock lk(_lock);
+ _usage.erase(ns);
+ _lastDropped = ns;
+ }
+
+ void Top::cloneMap(Top::UsageMap& out) const {
+ scoped_lock lk(_lock);
+ out = _usage;
+ }
+
+ void Top::append( BSONObjBuilder& b ) {
+ scoped_lock lk( _lock );
+ _appendToUsageMap( b , _usage );
+ }
+
+ void Top::_appendToUsageMap( BSONObjBuilder& b , const UsageMap& map ) const {
+ for ( UsageMap::const_iterator i=map.begin(); i!=map.end(); i++ ) {
+ BSONObjBuilder bb( b.subobjStart( i->first ) );
+
+ const CollectionData& coll = i->second;
+
+ _appendStatsEntry( b , "total" , coll.total );
+
+ _appendStatsEntry( b , "readLock" , coll.readLock );
+ _appendStatsEntry( b , "writeLock" , coll.writeLock );
+
+ _appendStatsEntry( b , "queries" , coll.queries );
+ _appendStatsEntry( b , "getmore" , coll.getmore );
+ _appendStatsEntry( b , "insert" , coll.insert );
+ _appendStatsEntry( b , "update" , coll.update );
+ _appendStatsEntry( b , "remove" , coll.remove );
+ _appendStatsEntry( b , "commands" , coll.commands );
+
+ bb.done();
+ }
+ }
+
+ void Top::_appendStatsEntry( BSONObjBuilder& b , const char * statsName , const UsageData& map ) const {
+ BSONObjBuilder bb( b.subobjStart( statsName ) );
+ bb.appendNumber( "time" , map.time );
+ bb.appendNumber( "count" , map.count );
+ bb.done();
+ }
+
+ class TopCmd : public Command {
+ public:
+ TopCmd() : Command( "top", true ) {}
+
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual LockType locktype() const { return READ; }
+ virtual void help( stringstream& help ) const { help << "usage by collection, in micros "; }
+
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ {
+ BSONObjBuilder b( result.subobjStart( "totals" ) );
+ b.append( "note" , "all times in microseconds" );
+ Top::global.append( b );
+ b.done();
+ }
+ return true;
+ }
+
+ } topCmd;
+
+ Top Top::global;
+
+ TopOld::T TopOld::_snapshotStart = TopOld::currentTime();
+ TopOld::D TopOld::_snapshotDuration;
+ TopOld::UsageMap TopOld::_totalUsage;
+ TopOld::UsageMap TopOld::_snapshotA;
+ TopOld::UsageMap TopOld::_snapshotB;
+ TopOld::UsageMap &TopOld::_snapshot = TopOld::_snapshotA;
+ TopOld::UsageMap &TopOld::_nextSnapshot = TopOld::_snapshotB;
+ mongo::mutex TopOld::topMutex("topMutex");
+
+
+}
diff --git a/src/mongo/db/stats/top.h b/src/mongo/db/stats/top.h
new file mode 100644
index 00000000000..9645ed1a3a6
--- /dev/null
+++ b/src/mongo/db/stats/top.h
@@ -0,0 +1,247 @@
+// top.h : DB usage monitor.
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <boost/date_time/posix_time/posix_time.hpp>
+#undef assert
+#define assert MONGO_assert
+
+namespace mongo {
+
+ /**
+ * tracks usage by collection
+ */
+ class Top {
+
+ public:
+ Top() : _lock("Top") { }
+
+ struct UsageData {
+ UsageData() : time(0) , count(0) {}
+ UsageData( const UsageData& older , const UsageData& newer );
+ long long time;
+ long long count;
+
+ void inc( long long micros ) {
+ count++;
+ time += micros;
+ }
+ };
+
+ struct CollectionData {
+ /**
+ * constructs a diff
+ */
+ CollectionData() {}
+ CollectionData( const CollectionData& older , const CollectionData& newer );
+
+ UsageData total;
+
+ UsageData readLock;
+ UsageData writeLock;
+
+ UsageData queries;
+ UsageData getmore;
+ UsageData insert;
+ UsageData update;
+ UsageData remove;
+ UsageData commands;
+ };
+
+ typedef map<string,CollectionData> UsageMap;
+
+ public:
+ void record( const string& ns , int op , int lockType , long long micros , bool command );
+ void append( BSONObjBuilder& b );
+ void cloneMap(UsageMap& out) const;
+ CollectionData getGlobalData() const { return _global; }
+ void collectionDropped( const string& ns );
+
+ public: // static stuff
+ static Top global;
+
+ private:
+ void _appendToUsageMap( BSONObjBuilder& b , const UsageMap& map ) const;
+ void _appendStatsEntry( BSONObjBuilder& b , const char * statsName , const UsageData& map ) const;
+ void _record( CollectionData& c , int op , int lockType , long long micros , bool command );
+
+ mutable mongo::mutex _lock;
+ CollectionData _global;
+ UsageMap _usage;
+ string _lastDropped;
+ };
+
+ /* Records per namespace utilization of the mongod process.
+ No two functions of this class may be called concurrently.
+ */
+ class TopOld {
+ typedef boost::posix_time::ptime T;
+ typedef boost::posix_time::time_duration D;
+ typedef boost::tuple< D, int, int, int > UsageData;
+ public:
+ TopOld() : _read(false), _write(false) { }
+
+ /* these are used to record activity: */
+
+ void clientStart( const char *client ) {
+ clientStop();
+ _currentStart = currentTime();
+ _current = client;
+ }
+
+ /* indicate current request is a read operation. */
+ void setRead() { _read = true; }
+
+ void setWrite() { _write = true; }
+
+ void clientStop() {
+ if ( _currentStart == T() )
+ return;
+ D d = currentTime() - _currentStart;
+
+ {
+ scoped_lock L(topMutex);
+ recordUsage( _current, d );
+ }
+
+ _currentStart = T();
+ _read = false;
+ _write = false;
+ }
+
+ /* these are used to fetch the stats: */
+
+ struct Usage {
+ string ns;
+ D time;
+ double pct;
+ int reads, writes, calls;
+ };
+
+ static void usage( vector< Usage > &res ) {
+ scoped_lock L(topMutex);
+
+ // Populate parent namespaces
+ UsageMap snapshot;
+ UsageMap totalUsage;
+ fillParentNamespaces( snapshot, _snapshot );
+ fillParentNamespaces( totalUsage, _totalUsage );
+
+ multimap< D, string, more > sorted;
+ for( UsageMap::iterator i = snapshot.begin(); i != snapshot.end(); ++i )
+ sorted.insert( make_pair( i->second.get<0>(), i->first ) );
+ for( multimap< D, string, more >::iterator i = sorted.begin(); i != sorted.end(); ++i ) {
+ if ( trivialNs( i->second.c_str() ) )
+ continue;
+ Usage u;
+ u.ns = i->second;
+ u.time = totalUsage[ u.ns ].get<0>();
+ u.pct = _snapshotDuration != D() ? 100.0 * i->first.ticks() / _snapshotDuration.ticks() : 0;
+ u.reads = snapshot[ u.ns ].get<1>();
+ u.writes = snapshot[ u.ns ].get<2>();
+ u.calls = snapshot[ u.ns ].get<3>();
+ res.push_back( u );
+ }
+ for( UsageMap::iterator i = totalUsage.begin(); i != totalUsage.end(); ++i ) {
+ if ( snapshot.count( i->first ) != 0 || trivialNs( i->first.c_str() ) )
+ continue;
+ Usage u;
+ u.ns = i->first;
+ u.time = i->second.get<0>();
+ u.pct = 0;
+ u.reads = 0;
+ u.writes = 0;
+ u.calls = 0;
+ res.push_back( u );
+ }
+ }
+
+ static void completeSnapshot() {
+ scoped_lock L(topMutex);
+
+ if ( &_snapshot == &_snapshotA ) {
+ _snapshot = _snapshotB;
+ _nextSnapshot = _snapshotA;
+ }
+ else {
+ _snapshot = _snapshotA;
+ _nextSnapshot = _snapshotB;
+ }
+ _snapshotDuration = currentTime() - _snapshotStart;
+ _snapshotStart = currentTime();
+ _nextSnapshot.clear();
+ }
+
+ private:
+ static mongo::mutex topMutex;
+ static bool trivialNs( const char *ns ) {
+ const char *ret = strrchr( ns, '.' );
+ return ret && ret[ 1 ] == '\0';
+ }
+ typedef map<string,UsageData> UsageMap; // duration, # reads, # writes, # total calls
+ static T currentTime() {
+ return boost::posix_time::microsec_clock::universal_time();
+ }
+ void recordUsage( const string &client, D duration ) {
+ recordUsageForMap( _totalUsage, client, duration );
+ recordUsageForMap( _nextSnapshot, client, duration );
+ }
+ void recordUsageForMap( UsageMap &map, const string &client, D duration ) {
+ UsageData& g = map[client];
+ g.get< 0 >() += duration;
+ if ( _read && !_write )
+ g.get< 1 >()++;
+ else if ( !_read && _write )
+ g.get< 2 >()++;
+ g.get< 3 >()++;
+ }
+ static void fillParentNamespaces( UsageMap &to, const UsageMap &from ) {
+ for( UsageMap::const_iterator i = from.begin(); i != from.end(); ++i ) {
+ string current = i->first;
+ size_t dot = current.rfind( "." );
+ if ( dot == string::npos || dot != current.length() - 1 ) {
+ inc( to[ current ], i->second );
+ }
+ while( dot != string::npos ) {
+ current = current.substr( 0, dot );
+ inc( to[ current ], i->second );
+ dot = current.rfind( "." );
+ }
+ }
+ }
+ static void inc( UsageData &to, const UsageData &from ) {
+ to.get<0>() += from.get<0>();
+ to.get<1>() += from.get<1>();
+ to.get<2>() += from.get<2>();
+ to.get<3>() += from.get<3>();
+ }
+ struct more { bool operator()( const D &a, const D &b ) { return a > b; } };
+ string _current;
+ T _currentStart;
+ static T _snapshotStart;
+ static D _snapshotDuration;
+ static UsageMap _totalUsage;
+ static UsageMap _snapshotA;
+ static UsageMap _snapshotB;
+ static UsageMap &_snapshot;
+ static UsageMap &_nextSnapshot;
+ bool _read;
+ bool _write;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/db/taskqueue.h b/src/mongo/db/taskqueue.h
new file mode 100644
index 00000000000..005bd986f11
--- /dev/null
+++ b/src/mongo/db/taskqueue.h
@@ -0,0 +1,106 @@
+// @file deferredinvoker.h
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "mongomutex.h"
+
+namespace mongo {
+
+ /** defer work items by queueing them for invocation by another thread. presumption is that
+ consumer thread is outside of locks more than the source thread. Additional presumption
+ is that several objects or micro-tasks will be queued and that having a single thread
+ processing them in batch is hepful as they (in the first use case) use a common data
+ structure that can then be in local cpu classes.
+
+ this class is in db/ as it is dbMutex (mongomutex) specific (so far).
+
+ using a functor instead of go() might be more elegant too, once again, would like to test any
+ performance differential. also worry that operator() hides things?
+
+ MT - copyable "micro task" object we can queue
+ must have a static method void MT::go(const MT&)
+
+ see DefInvoke in dbtests/ for an example.
+ */
+ template< class MT >
+ class TaskQueue {
+ public:
+ TaskQueue() : _which(0), _invokeMutex("deferredinvoker") { }
+
+ void defer(MT mt) {
+ // only one writer allowed. however the invoke processing below can occur concurrently with
+ // writes (for the most part)
+ DEV d.dbMutex.assertWriteLocked();
+
+ _queues[_which].push_back(mt);
+ }
+
+ /** call to process deferrals.
+
+ concurrency: handled herein. multiple threads could call invoke(), but their efforts will be
+ serialized. the common case is that there is a single processor calling invoke().
+
+ normally, you call this outside of any lock. but if you want to fully drain the queue,
+ call from within a read lock. for example:
+ {
+ // drain with minimal time in lock
+ d.invoke();
+ readlock lk;
+ d.invoke();
+ ...
+ }
+ you can also call invoke periodically to do some work and then pick up later on more.
+ */
+ void invoke() {
+ mutex::scoped_lock lk2(_invokeMutex);
+ int toDrain = 0;
+ {
+ // flip queueing to the other queue (we are double buffered)
+ readlocktry lk("", 5);
+ if( !lk.got() )
+ return;
+ toDrain = _which;
+ _which = _which ^ 1;
+ wassert( _queues[_which].empty() ); // we are in dbMutex, so it should be/stay empty til we exit dbMutex
+ }
+
+ _drain( _queues[toDrain] );
+ assert( _queues[toDrain].empty() );
+ }
+
+ private:
+ int _which; // 0 or 1
+ typedef vector< MT > Queue;
+ Queue _queues[2];
+
+ // lock order when multiple locks: dbMutex, _invokeMutex
+ mongo::mutex _invokeMutex;
+
+ void _drain(Queue& queue) {
+ unsigned oldCap = queue.capacity();
+ for( typename Queue::iterator i = queue.begin(); i != queue.end(); i++ ) {
+ const MT& v = *i;
+ MT::go(v);
+ }
+ queue.clear();
+ DEV assert( queue.capacity() == oldCap ); // just checking that clear() doesn't deallocate, we don't want that
+ }
+ };
+
+}
diff --git a/src/mongo/db/tests.cpp b/src/mongo/db/tests.cpp
new file mode 100644
index 00000000000..00f299e1bb6
--- /dev/null
+++ b/src/mongo/db/tests.cpp
@@ -0,0 +1,68 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* tests.cpp
+
+ unit test & such
+*/
+
+#include "pch.h"
+#include "../util/mmap.h"
+
+namespace mongo {
+
+ int test2_old9() {
+ out() << "test2" << endl;
+ printStackTrace();
+ if ( 1 )
+ return 1;
+
+ MemoryMappedFile f;
+
+ unsigned long long len = 64*1024*1024;
+ char *p = (char *) f.map("/tmp/test.dat", len);
+ char *start = p;
+ char *end = p + 64*1024*1024-2;
+ end[1] = 'z';
+ int i;
+ while ( p < end ) {
+ *p++ = ' ';
+ if ( ++i%64 == 0 ) {
+ *p++ = '\n';
+ *p++ = 'x';
+ }
+ }
+ *p = 'a';
+
+ f.flush(true);
+ out() << "done" << endl;
+
+ char *x = start + 32 * 1024 * 1024;
+ char *y = start + 48 * 1024 * 1024;
+ char *z = start + 62 * 1024 * 1024;
+
+ strcpy(z, "zfoo");
+ out() << "y" << endl;
+ strcpy(y, "yfoo");
+ strcpy(x, "xfoo");
+ strcpy(start, "xfoo");
+
+ dbexit( EXIT_TEST );
+
+ return 1;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/dbtests/background_job_test.cpp b/src/mongo/dbtests/background_job_test.cpp
new file mode 100644
index 00000000000..f2bf7d86244
--- /dev/null
+++ b/src/mongo/dbtests/background_job_test.cpp
@@ -0,0 +1,109 @@
+// @file background_job_test.cpp
+
+/**
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "../pch.h"
+#include <boost/thread/thread.hpp>
+
+#include "dbtests.h"
+#include "../util/time_support.h"
+#include "../util/background.h"
+
+namespace BackgroundJobTests {
+
+ // a global variable that can be accessed independent of the IncTester object below
+ // IncTester keeps it up-to-date
+ int GLOBAL_val;
+
+ class IncTester : public mongo::BackgroundJob {
+ public:
+ explicit IncTester( long long millis , bool selfDelete = false )
+ : BackgroundJob(selfDelete), _val(0), _millis(millis) { GLOBAL_val = 0; }
+
+ void waitAndInc( long long millis ) {
+ if ( millis )
+ mongo::sleepmillis( millis );
+ ++_val;
+ ++GLOBAL_val;
+ }
+
+ int getVal() { return _val; }
+
+ /* --- BackgroundJob virtuals --- */
+
+ string name() const { return "IncTester"; }
+
+ void run() { waitAndInc( _millis ); }
+
+ private:
+ int _val;
+ long long _millis;
+ };
+
+
+ class NormalCase {
+ public:
+ void run() {
+ IncTester tester( 0 /* inc without wait */ );
+ tester.go();
+ ASSERT( tester.wait() );
+ ASSERT_EQUALS( tester.getVal() , 1 );
+ }
+ };
+
+ class TimeOutCase {
+ public:
+ void run() {
+ IncTester tester( 1000 /* wait 1sec before inc-ing */ );
+ tester.go();
+ ASSERT( ! tester.wait( 100 /* ms */ ) ); // should time out
+ ASSERT_EQUALS( tester.getVal() , 0 );
+
+ // if we wait longer than the IncTester, we should see the increment
+ ASSERT( tester.wait( 1500 /* ms */ ) ); // should not time out
+ ASSERT_EQUALS( tester.getVal() , 1 );
+ }
+ };
+
+ class SelfDeletingCase {
+ public:
+ void run() {
+ BackgroundJob* j = new IncTester( 0 /* inc without wait */ , true /* self delete */ );
+ j->go();
+
+
+ // the background thread should have continued running and this test should pass the
+ // heap-checker as well
+ mongo::sleepmillis( 1000 );
+ ASSERT_EQUALS( GLOBAL_val, 1 );
+ }
+ };
+
+
+ class BackgroundJobSuite : public Suite {
+ public:
+ BackgroundJobSuite() : Suite( "background_job" ) {}
+
+ void setupTests() {
+ add< NormalCase >();
+ add< TimeOutCase >();
+ add< SelfDeletingCase >();
+ }
+
+ } backgroundJobSuite;
+
+} // namespace BackgroundJobTests
diff --git a/src/mongo/dbtests/balancer_policy_tests.cpp b/src/mongo/dbtests/balancer_policy_tests.cpp
new file mode 100644
index 00000000000..6f7c4a5dcd3
--- /dev/null
+++ b/src/mongo/dbtests/balancer_policy_tests.cpp
@@ -0,0 +1,203 @@
+// @file balancer_policy_test.cpp
+
+/**
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "dbtests.h"
+
+// TODO SERVER-1822
+//#include "../s/config.h" // for ShardFields
+//#include "../s/balancer_policy.h"
+
+namespace BalancerPolicyTests {
+
+//
+// TODO SERVER-1822
+//
+#if 0
+
+ typedef mongo::ShardFields sf; // fields from 'shards' colleciton
+ typedef mongo::LimitsFields lf; // fields from the balancer's limits map
+
+ class SizeMaxedShardTest {
+ public:
+ void run() {
+ BSONObj shard0 = BSON( sf::maxSize(0LL) << lf::currSize(0LL) );
+ ASSERT( ! BalancerPolicy::isSizeMaxed( shard0 ) );
+
+ BSONObj shard1 = BSON( sf::maxSize(100LL) << lf::currSize(80LL) );
+ ASSERT( ! BalancerPolicy::isSizeMaxed( shard1 ) );
+
+ BSONObj shard2 = BSON( sf::maxSize(100LL) << lf::currSize(110LL) );
+ ASSERT( BalancerPolicy::isSizeMaxed( shard2 ) );
+
+ BSONObj empty;
+ ASSERT( ! BalancerPolicy::isSizeMaxed( empty ) );
+ }
+ };
+
+ class DrainingShardTest {
+ public:
+ void run() {
+ BSONObj shard0 = BSON( sf::draining(true) );
+ ASSERT( BalancerPolicy::isDraining( shard0 ) );
+
+ BSONObj shard1 = BSON( sf::draining(false) );
+ ASSERT( ! BalancerPolicy::isDraining( shard1 ) );
+
+ BSONObj empty;
+ ASSERT( ! BalancerPolicy::isDraining( empty ) );
+ }
+ };
+
+ class BalanceNormalTest {
+ public:
+ void run() {
+ // 2 chunks and 0 chunk shards
+ BalancerPolicy::ShardToChunksMap chunkMap;
+ vector<BSONObj> chunks;
+ chunks.push_back(BSON( "min" << BSON( "x" << BSON( "$minKey"<<1) ) <<
+ "max" << BSON( "x" << 49 )));
+ chunks.push_back(BSON( "min" << BSON( "x" << 49 ) <<
+ "max" << BSON( "x" << BSON( "$maxkey"<<1 ))));
+ chunkMap["shard0"] = chunks;
+ chunks.clear();
+ chunkMap["shard1"] = chunks;
+
+ // no limits
+ BalancerPolicy::ShardToLimitsMap limitsMap;
+ BSONObj limits0 = BSON( sf::maxSize(0LL) << lf::currSize(2LL) << sf::draining(false) << lf::hasOpsQueued(false) );
+ BSONObj limits1 = BSON( sf::maxSize(0LL) << lf::currSize(0LL) << sf::draining(false) << lf::hasOpsQueued(false) );
+ limitsMap["shard0"] = limits0;
+ limitsMap["shard1"] = limits1;
+
+ BalancerPolicy::ChunkInfo* c = NULL;
+ c = BalancerPolicy::balance( "ns", limitsMap, chunkMap, 1 );
+ ASSERT( c );
+ }
+ };
+
+ class BalanceDrainingTest {
+ public:
+ void run() {
+ // one normal, one draining
+ // 2 chunks and 0 chunk shards
+ BalancerPolicy::ShardToChunksMap chunkMap;
+ vector<BSONObj> chunks;
+ chunks.push_back(BSON( "min" << BSON( "x" << BSON( "$minKey"<<1) ) <<
+ "max" << BSON( "x" << 49 )));
+ chunkMap["shard0"] = chunks;
+ chunks.clear();
+ chunks.push_back(BSON( "min" << BSON( "x" << 49 ) <<
+ "max" << BSON( "x" << BSON( "$maxkey"<<1 ))));
+ chunkMap["shard1"] = chunks;
+
+ // shard0 is draining
+ BalancerPolicy::ShardToLimitsMap limitsMap;
+ BSONObj limits0 = BSON( sf::maxSize(0LL) << lf::currSize(2LL) << sf::draining(true) );
+ BSONObj limits1 = BSON( sf::maxSize(0LL) << lf::currSize(0LL) << sf::draining(false) );
+ limitsMap["shard0"] = limits0;
+ limitsMap["shard1"] = limits1;
+
+ BalancerPolicy::ChunkInfo* c = NULL;
+ c = BalancerPolicy::balance( "ns", limitsMap, chunkMap, 0 );
+ ASSERT( c );
+ ASSERT_EQUALS( c->to , "shard1" );
+ ASSERT_EQUALS( c->from , "shard0" );
+ ASSERT( ! c->chunk.isEmpty() );
+ }
+ };
+
+ class BalanceEndedDrainingTest {
+ public:
+ void run() {
+ // 2 chunks and 0 chunk (drain completed) shards
+ BalancerPolicy::ShardToChunksMap chunkMap;
+ vector<BSONObj> chunks;
+ chunks.push_back(BSON( "min" << BSON( "x" << BSON( "$minKey"<<1) ) <<
+ "max" << BSON( "x" << 49 )));
+ chunks.push_back(BSON( "min" << BSON( "x" << 49 ) <<
+ "max" << BSON( "x" << BSON( "$maxkey"<<1 ))));
+ chunkMap["shard0"] = chunks;
+ chunks.clear();
+ chunkMap["shard1"] = chunks;
+
+ // no limits
+ BalancerPolicy::ShardToLimitsMap limitsMap;
+ BSONObj limits0 = BSON( sf::maxSize(0LL) << lf::currSize(2LL) << sf::draining(false) );
+ BSONObj limits1 = BSON( sf::maxSize(0LL) << lf::currSize(0LL) << sf::draining(true) );
+ limitsMap["shard0"] = limits0;
+ limitsMap["shard1"] = limits1;
+
+ BalancerPolicy::ChunkInfo* c = NULL;
+ c = BalancerPolicy::balance( "ns", limitsMap, chunkMap, 0 );
+ ASSERT( ! c );
+ }
+ };
+
+ class BalanceImpasseTest {
+ public:
+ void run() {
+ // one maxed out, one draining
+ // 2 chunks and 0 chunk shards
+ BalancerPolicy::ShardToChunksMap chunkMap;
+ vector<BSONObj> chunks;
+ chunks.push_back(BSON( "min" << BSON( "x" << BSON( "$minKey"<<1) ) <<
+ "max" << BSON( "x" << 49 )));
+ chunkMap["shard0"] = chunks;
+ chunks.clear();
+ chunks.push_back(BSON( "min" << BSON( "x" << 49 ) <<
+ "max" << BSON( "x" << BSON( "$maxkey"<<1 ))));
+ chunkMap["shard1"] = chunks;
+
+ // shard0 is draining, shard1 is maxed out, shard2 has writebacks pending
+ BalancerPolicy::ShardToLimitsMap limitsMap;
+ BSONObj limits0 = BSON( sf::maxSize(0LL) << lf::currSize(2LL) << sf::draining(true) );
+ BSONObj limits1 = BSON( sf::maxSize(1LL) << lf::currSize(1LL) << sf::draining(false) );
+ BSONObj limits2 = BSON( sf::maxSize(0LL) << lf::currSize(1LL) << lf::hasOpsQueued(true) );
+ limitsMap["shard0"] = limits0;
+ limitsMap["shard1"] = limits1;
+ limitsMap["shard2"] = limits2;
+
+ BalancerPolicy::ChunkInfo* c = NULL;
+ c = BalancerPolicy::balance( "ns", limitsMap, chunkMap, 0 );
+ ASSERT( ! c );
+ }
+ };
+
+//
+// TODO SERVER-1822
+//
+#endif // #if 0
+
+ class All : public Suite {
+ public:
+ All() : Suite( "balancer_policy" ) {
+ }
+
+ void setupTests() {
+ // TODO SERVER-1822
+ // add< SizeMaxedShardTest >();
+ // add< DrainingShardTest >();
+ // add< BalanceNormalTest >();
+ // add< BalanceDrainingTest >();
+ // add< BalanceEndedDrainingTest >();
+ // add< BalanceImpasseTest >();
+ }
+ } allTests;
+
+} // namespace BalancerPolicyTests
diff --git a/src/mongo/dbtests/basictests.cpp b/src/mongo/dbtests/basictests.cpp
new file mode 100644
index 00000000000..46a7dbc22bd
--- /dev/null
+++ b/src/mongo/dbtests/basictests.cpp
@@ -0,0 +1,695 @@
+// basictests.cpp : basic unit tests
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "dbtests.h"
+#include "../util/base64.h"
+#include "../util/array.h"
+#include "../util/text.h"
+#include "../util/queue.h"
+#include "../util/paths.h"
+#include "../util/stringutils.h"
+#include "../util/compress.h"
+#include "../db/db.h"
+
+namespace BasicTests {
+
+ class Rarely {
+ public:
+ void run() {
+ int first = 0;
+ int second = 0;
+ int third = 0;
+ for( int i = 0; i < 128; ++i ) {
+ incRarely( first );
+ incRarely2( second );
+ ONCE ++third;
+ }
+ ASSERT_EQUALS( 1, first );
+ ASSERT_EQUALS( 1, second );
+ ASSERT_EQUALS( 1, third );
+ }
+ private:
+ void incRarely( int &c ) {
+ RARELY ++c;
+ }
+ void incRarely2( int &c ) {
+ RARELY ++c;
+ }
+ };
+
+ class Base64Tests {
+ public:
+
+ void roundTrip( string s ) {
+ ASSERT_EQUALS( s , base64::decode( base64::encode( s ) ) );
+ }
+
+ void roundTrip( const unsigned char * _data , int len ) {
+ const char *data = (const char *) _data;
+ string s = base64::encode( data , len );
+ string out = base64::decode( s );
+ ASSERT_EQUALS( out.size() , static_cast<size_t>(len) );
+ bool broke = false;
+ for ( int i=0; i<len; i++ ) {
+ if ( data[i] != out[i] )
+ broke = true;
+ }
+ if ( ! broke )
+ return;
+
+ cout << s << endl;
+ for ( int i=0; i<len; i++ )
+ cout << hex << ( data[i] & 0xFF ) << dec << " ";
+ cout << endl;
+ for ( int i=0; i<len; i++ )
+ cout << hex << ( out[i] & 0xFF ) << dec << " ";
+ cout << endl;
+
+ ASSERT(0);
+ }
+
+ void run() {
+
+ ASSERT_EQUALS( "ZWxp" , base64::encode( "eli" , 3 ) );
+ ASSERT_EQUALS( "ZWxpb3Rz" , base64::encode( "eliots" , 6 ) );
+ ASSERT_EQUALS( "ZWxpb3Rz" , base64::encode( "eliots" ) );
+
+ ASSERT_EQUALS( "ZQ==" , base64::encode( "e" , 1 ) );
+ ASSERT_EQUALS( "ZWw=" , base64::encode( "el" , 2 ) );
+
+ roundTrip( "e" );
+ roundTrip( "el" );
+ roundTrip( "eli" );
+ roundTrip( "elio" );
+ roundTrip( "eliot" );
+ roundTrip( "eliots" );
+ roundTrip( "eliotsz" );
+
+ unsigned char z[] = { 0x1 , 0x2 , 0x3 , 0x4 };
+ roundTrip( z , 4 );
+
+ unsigned char y[] = {
+ 0x01, 0x10, 0x83, 0x10, 0x51, 0x87, 0x20, 0x92, 0x8B, 0x30,
+ 0xD3, 0x8F, 0x41, 0x14, 0x93, 0x51, 0x55, 0x97, 0x61, 0x96,
+ 0x9B, 0x71, 0xD7, 0x9F, 0x82, 0x18, 0xA3, 0x92, 0x59, 0xA7,
+ 0xA2, 0x9A, 0xAB, 0xB2, 0xDB, 0xAF, 0xC3, 0x1C, 0xB3, 0xD3,
+ 0x5D, 0xB7, 0xE3, 0x9E, 0xBB, 0xF3, 0xDF, 0xBF
+ };
+ roundTrip( y , 4 );
+ roundTrip( y , 40 );
+ }
+ };
+
+ namespace stringbuildertests {
+#define SBTGB(x) ss << (x); sb << (x);
+
+ class Base {
+ virtual void pop() = 0;
+
+ public:
+ Base() {}
+ virtual ~Base() {}
+
+ void run() {
+ pop();
+ ASSERT_EQUALS( ss.str() , sb.str() );
+ }
+
+ stringstream ss;
+ StringBuilder sb;
+ };
+
+ class simple1 : public Base {
+ void pop() {
+ SBTGB(1);
+ SBTGB("yo");
+ SBTGB(2);
+ }
+ };
+
+ class simple2 : public Base {
+ void pop() {
+ SBTGB(1);
+ SBTGB("yo");
+ SBTGB(2);
+ SBTGB( 12123123123LL );
+ SBTGB( "xxx" );
+ SBTGB( 5.4 );
+ SBTGB( 5.4312 );
+ SBTGB( "yyy" );
+ SBTGB( (short)5 );
+ SBTGB( (short)(1231231231231LL) );
+ }
+ };
+
+ class reset1 {
+ public:
+ void run() {
+ StringBuilder sb;
+ sb << "1" << "abc" << "5.17";
+ ASSERT_EQUALS( "1abc5.17" , sb.str() );
+ ASSERT_EQUALS( "1abc5.17" , sb.str() );
+ sb.reset();
+ ASSERT_EQUALS( "" , sb.str() );
+ sb << "999";
+ ASSERT_EQUALS( "999" , sb.str() );
+ }
+ };
+
+ class reset2 {
+ public:
+ void run() {
+ StringBuilder sb;
+ sb << "1" << "abc" << "5.17";
+ ASSERT_EQUALS( "1abc5.17" , sb.str() );
+ ASSERT_EQUALS( "1abc5.17" , sb.str() );
+ sb.reset(1);
+ ASSERT_EQUALS( "" , sb.str() );
+ sb << "999";
+ ASSERT_EQUALS( "999" , sb.str() );
+ }
+ };
+
+ }
+
+ class sleeptest {
+ public:
+
+ void run() {
+ Timer t;
+ int matches = 0;
+ for( int p = 0; p < 3; p++ ) {
+ sleepsecs( 1 );
+ int sec = (t.millis() + 2)/1000;
+ if( sec == 1 )
+ matches++;
+ else
+ log() << "temp millis: " << t.millis() << endl;
+ ASSERT( sec >= 0 && sec <= 2 );
+ t.reset();
+ }
+ if ( matches < 2 )
+ log() << "matches:" << matches << endl;
+ ASSERT( matches >= 2 );
+
+ sleepmicros( 1527123 );
+ ASSERT( t.micros() > 1000000 );
+ ASSERT( t.micros() < 2000000 );
+
+ t.reset();
+ sleepmillis( 1727 );
+ ASSERT( t.millis() >= 1000 );
+ ASSERT( t.millis() <= 2500 );
+
+ {
+ int total = 1200;
+ int ms = 2;
+ t.reset();
+ for ( int i=0; i<(total/ms); i++ ) {
+ sleepmillis( ms );
+ }
+ {
+ int x = t.millis();
+ if ( x < 1000 || x > 2500 ) {
+ cout << "sleeptest finds sleep accuracy to be not great. x: " << x << endl;
+ ASSERT( x >= 1000 );
+ ASSERT( x <= 20000 );
+ }
+ }
+ }
+
+#ifdef __linux__
+ {
+ int total = 1200;
+ int micros = 100;
+ t.reset();
+ int numSleeps = 1000*(total/micros);
+ for ( int i=0; i<numSleeps; i++ ) {
+ sleepmicros( micros );
+ }
+ {
+ int y = t.millis();
+ if ( y < 1000 || y > 2500 ) {
+ cout << "sleeptest y: " << y << endl;
+ ASSERT( y >= 1000 );
+ /* ASSERT( y <= 100000 ); */
+ }
+ }
+ }
+#endif
+
+ }
+
+ };
+
+ class AssertTests {
+ public:
+
+ int x;
+
+ AssertTests() {
+ x = 0;
+ }
+
+ string foo() {
+ x++;
+ return "";
+ }
+ void run() {
+ uassert( -1 , foo() , 1 );
+ if( x != 0 ) {
+ ASSERT_EQUALS( 0 , x );
+ }
+ try {
+ uassert( -1 , foo() , 0 );
+ }
+ catch ( ... ) {}
+ ASSERT_EQUALS( 1 , x );
+ }
+ };
+
+ namespace ArrayTests {
+ class basic1 {
+ public:
+ void run() {
+ FastArray<int> a(100);
+ a.push_back( 5 );
+ a.push_back( 6 );
+
+ ASSERT_EQUALS( 2 , a.size() );
+
+ FastArray<int>::iterator i = a.begin();
+ ASSERT( i != a.end() );
+ ASSERT_EQUALS( 5 , *i );
+ ++i;
+ ASSERT( i != a.end() );
+ ASSERT_EQUALS( 6 , *i );
+ ++i;
+ ASSERT( i == a.end() );
+ }
+ };
+ };
+
+ class ThreadSafeStringTest {
+ public:
+ void run() {
+ ThreadSafeString s;
+ s = "eliot";
+ ASSERT_EQUALS( s , "eliot" );
+ ASSERT( s != "eliot2" );
+
+ ThreadSafeString s2 = s;
+ ASSERT_EQUALS( s2 , "eliot" );
+
+
+ {
+ string foo;
+ {
+ ThreadSafeString bar;
+ bar = "eliot2";
+ foo = bar.toString();
+ }
+ ASSERT_EQUALS( "eliot2" , foo );
+ }
+ }
+ };
+
+ class LexNumCmp {
+ public:
+ void run() {
+
+ ASSERT( ! isNumber( (char)255 ) );
+
+ ASSERT_EQUALS( 0, lexNumCmp( "a", "a" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "aa" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "aa", "a" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "b" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "100", "50" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "50", "100" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "b", "a" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "aa", "aa" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "aa", "ab" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "ab", "aa" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "0", "a" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "a0", "aa" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "0" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "aa", "a0" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "0", "0" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "10", "10" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "1", "10" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "10", "1" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "11", "10" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "10", "11" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "f11f", "f10f" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "f10f", "f11f" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "f11f", "f111" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "f111", "f11f" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "f12f", "f12g" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "f12g", "f12f" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "aa{", "aab" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "aa{", "aa1" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a1{", "a11" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "a1{a", "a1{" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a1{", "a1{a" ) );
+ ASSERT_EQUALS( 1, lexNumCmp("21", "11") );
+ ASSERT_EQUALS( -1, lexNumCmp("11", "21") );
+
+ ASSERT_EQUALS( -1 , lexNumCmp( "a.0" , "a.1" ) );
+ ASSERT_EQUALS( -1 , lexNumCmp( "a.0.b" , "a.1" ) );
+
+ ASSERT_EQUALS( -1 , lexNumCmp( "b." , "b.|" ) );
+ ASSERT_EQUALS( -1 , lexNumCmp( "b.0e" , (string("b.") + (char)255).c_str() ) );
+ ASSERT_EQUALS( -1 , lexNumCmp( "b." , "b.0e" ) );
+
+ ASSERT_EQUALS( 0, lexNumCmp( "238947219478347782934718234", "238947219478347782934718234"));
+ ASSERT_EQUALS( 0, lexNumCmp( "000238947219478347782934718234", "238947219478347782934718234"));
+ ASSERT_EQUALS( 1, lexNumCmp( "000238947219478347782934718235", "238947219478347782934718234"));
+ ASSERT_EQUALS( -1, lexNumCmp( "238947219478347782934718234", "238947219478347782934718234.1"));
+ ASSERT_EQUALS( 0, lexNumCmp( "238", "000238"));
+ ASSERT_EQUALS( 0, lexNumCmp( "002384", "0002384"));
+ ASSERT_EQUALS( 0, lexNumCmp( "00002384", "0002384"));
+ ASSERT_EQUALS( 0, lexNumCmp( "0", "0"));
+ ASSERT_EQUALS( 0, lexNumCmp( "0000", "0"));
+ ASSERT_EQUALS( 0, lexNumCmp( "0", "000"));
+ ASSERT_EQUALS( -1, lexNumCmp( "0000", "0.0"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2380", "238"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2385", "2384"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2385", "02384"));
+ ASSERT_EQUALS( 1, lexNumCmp( "2385", "002384"));
+ ASSERT_EQUALS( -1, lexNumCmp( "123.234.4567", "00238"));
+ ASSERT_EQUALS( 0, lexNumCmp( "123.234", "00123.234"));
+ ASSERT_EQUALS( 0, lexNumCmp( "a.123.b", "a.00123.b"));
+ ASSERT_EQUALS( 1, lexNumCmp( "a.123.b", "a.b.00123.b"));
+ ASSERT_EQUALS( -1, lexNumCmp( "a.00.0", "a.0.1"));
+ ASSERT_EQUALS( 0, lexNumCmp( "01.003.02", "1.3.2"));
+ ASSERT_EQUALS( -1, lexNumCmp( "1.3.2", "10.300.20"));
+ ASSERT_EQUALS( 0, lexNumCmp( "10.300.20", "000000000000010.0000300.000000020"));
+ ASSERT_EQUALS( 0, lexNumCmp( "0000a", "0a"));
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "0a"));
+ ASSERT_EQUALS( -1, lexNumCmp( "000a", "001a"));
+ ASSERT_EQUALS( 0, lexNumCmp( "010a", "0010a"));
+
+ ASSERT_EQUALS( -1 , lexNumCmp( "a0" , "a00" ) );
+ ASSERT_EQUALS( 0 , lexNumCmp( "a.0" , "a.00" ) );
+ ASSERT_EQUALS( -1 , lexNumCmp( "a.b.c.d0" , "a.b.c.d00" ) );
+ ASSERT_EQUALS( 1 , lexNumCmp( "a.b.c.0.y" , "a.b.c.00.x" ) );
+
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "a-" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "a-", "a" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "a-", "a-" ) );
+
+ ASSERT_EQUALS( -1, lexNumCmp( "a", "a-c" ) );
+ ASSERT_EQUALS( 1, lexNumCmp( "a-c", "a" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "a-c", "a-c" ) );
+
+ ASSERT_EQUALS( 1, lexNumCmp( "a-c.t", "a.t" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a.t", "a-c.t" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "a-c.t", "a-c.t" ) );
+
+ ASSERT_EQUALS( 1, lexNumCmp( "ac.t", "a.t" ) );
+ ASSERT_EQUALS( -1, lexNumCmp( "a.t", "ac.t" ) );
+ ASSERT_EQUALS( 0, lexNumCmp( "ac.t", "ac.t" ) );
+ }
+ };
+
+ class DatabaseValidNames {
+ public:
+ void run() {
+ ASSERT( NamespaceString::validDBName( "foo" ) );
+ ASSERT( ! NamespaceString::validDBName( "foo/bar" ) );
+ ASSERT( ! NamespaceString::validDBName( "foo bar" ) );
+ ASSERT( ! NamespaceString::validDBName( "foo.bar" ) );
+
+ ASSERT( NamespaceString::normal( "asdads" ) );
+ ASSERT( ! NamespaceString::normal( "asda$ds" ) );
+ ASSERT( NamespaceString::normal( "local.oplog.$main" ) );
+ }
+ };
+
+ class DatabaseOwnsNS {
+ public:
+ void run() {
+ dblock lk;
+ bool isNew = false;
+ // this leaks as ~Database is private
+ // if that changes, should put this on the stack
+ {
+ Database * db = new Database( "dbtests_basictests_ownsns" , isNew );
+ assert( isNew );
+
+ ASSERT( db->ownsNS( "dbtests_basictests_ownsns.x" ) );
+ ASSERT( db->ownsNS( "dbtests_basictests_ownsns.x.y" ) );
+ ASSERT( ! db->ownsNS( "dbtests_basictests_ownsn.x.y" ) );
+ ASSERT( ! db->ownsNS( "dbtests_basictests_ownsnsa.x.y" ) );
+ }
+ }
+ };
+
+ class NSValidNames {
+ public:
+ void run() {
+ ASSERT( isValidNS( "test.foo" ) );
+ ASSERT( ! isValidNS( "test." ) );
+ ASSERT( ! isValidNS( "test" ) );
+ }
+ };
+
+ class PtrTests {
+ public:
+ void run() {
+ scoped_ptr<int> p1 (new int(1));
+ boost::shared_ptr<int> p2 (new int(2));
+ scoped_ptr<const int> p3 (new int(3));
+ boost::shared_ptr<const int> p4 (new int(4));
+
+ //non-const
+ ASSERT_EQUALS( p1.get() , ptr<int>(p1) );
+ ASSERT_EQUALS( p2.get() , ptr<int>(p2) );
+ ASSERT_EQUALS( p2.get() , ptr<int>(p2.get()) ); // T* constructor
+ ASSERT_EQUALS( p2.get() , ptr<int>(ptr<int>(p2)) ); // copy constructor
+ ASSERT_EQUALS( *p2 , *ptr<int>(p2));
+ ASSERT_EQUALS( p2.get() , ptr<boost::shared_ptr<int> >(&p2)->get() ); // operator->
+
+ //const
+ ASSERT_EQUALS( p1.get() , ptr<const int>(p1) );
+ ASSERT_EQUALS( p2.get() , ptr<const int>(p2) );
+ ASSERT_EQUALS( p2.get() , ptr<const int>(p2.get()) );
+ ASSERT_EQUALS( p3.get() , ptr<const int>(p3) );
+ ASSERT_EQUALS( p4.get() , ptr<const int>(p4) );
+ ASSERT_EQUALS( p4.get() , ptr<const int>(p4.get()) );
+ ASSERT_EQUALS( p2.get() , ptr<const int>(ptr<const int>(p2)) );
+ ASSERT_EQUALS( p2.get() , ptr<const int>(ptr<int>(p2)) ); // constizing copy constructor
+ ASSERT_EQUALS( *p2 , *ptr<int>(p2));
+ ASSERT_EQUALS( p2.get() , ptr<const boost::shared_ptr<int> >(&p2)->get() );
+
+ //bool context
+ ASSERT( ptr<int>(p1) );
+ ASSERT( !ptr<int>(NULL) );
+ ASSERT( !ptr<int>() );
+
+#if 0
+ // These shouldn't compile
+ ASSERT_EQUALS( p3.get() , ptr<int>(p3) );
+ ASSERT_EQUALS( p4.get() , ptr<int>(p4) );
+ ASSERT_EQUALS( p2.get() , ptr<int>(ptr<const int>(p2)) );
+#endif
+ }
+ };
+
+ struct StringSplitterTest {
+
+ void test( string s ) {
+ vector<string> v = StringSplitter::split( s , "," );
+ ASSERT_EQUALS( s , StringSplitter::join( v , "," ) );
+ }
+
+ void run() {
+ test( "a" );
+ test( "a,b" );
+ test( "a,b,c" );
+ }
+ };
+
+ struct IsValidUTF8Test {
+// macros used to get valid line numbers
+#define good(s) ASSERT(isValidUTF8(s));
+#define bad(s) ASSERT(!isValidUTF8(s));
+
+ void run() {
+ good("A");
+ good("\xC2\xA2"); // cent: ¢
+ good("\xE2\x82\xAC"); // euro: €
+ good("\xF0\x9D\x90\x80"); // Blackboard A: 𝐀
+
+ //abrupt end
+ bad("\xC2");
+ bad("\xE2\x82");
+ bad("\xF0\x9D\x90");
+ bad("\xC2 ");
+ bad("\xE2\x82 ");
+ bad("\xF0\x9D\x90 ");
+
+ //too long
+ bad("\xF8\x80\x80\x80\x80");
+ bad("\xFC\x80\x80\x80\x80\x80");
+ bad("\xFE\x80\x80\x80\x80\x80\x80");
+ bad("\xFF\x80\x80\x80\x80\x80\x80\x80");
+
+ bad("\xF5\x80\x80\x80"); // U+140000 > U+10FFFF
+ bad("\x80"); //cant start with continuation byte
+ bad("\xC0\x80"); // 2-byte version of ASCII NUL
+#undef good
+#undef bad
+ }
+ };
+
+
+ class QueueTest {
+ public:
+ void run() {
+ BlockingQueue<int> q;
+ Timer t;
+ int x;
+ ASSERT( ! q.blockingPop( x , 5 ) );
+ ASSERT( t.seconds() > 3 && t.seconds() < 9 );
+
+ }
+ };
+
+ class StrTests {
+ public:
+
+ void run() {
+ ASSERT_EQUALS( 1u , str::count( "abc" , 'b' ) );
+ ASSERT_EQUALS( 3u , str::count( "babab" , 'b' ) );
+ }
+
+ };
+
+ class HostAndPortTests {
+ public:
+ void run() {
+ HostAndPort a( "x1" , 1000 );
+ HostAndPort b( "x1" , 1000 );
+ HostAndPort c( "x1" , 1001 );
+ HostAndPort d( "x2" , 1000 );
+
+ ASSERT( a == b );
+ ASSERT( a != c );
+ ASSERT( a != d );
+
+ }
+ };
+
+ class RelativePathTest {
+ public:
+ void run() {
+ RelativePath a = RelativePath::fromRelativePath( "a" );
+ RelativePath b = RelativePath::fromRelativePath( "a" );
+ RelativePath c = RelativePath::fromRelativePath( "b" );
+ RelativePath d = RelativePath::fromRelativePath( "a/b" );
+
+
+ ASSERT( a == b );
+ ASSERT( a != c );
+ ASSERT( a != d );
+ ASSERT( c != d );
+ }
+ };
+
+ class CmdLineParseConfigTest {
+ public:
+ void run() {
+ stringstream ss1;
+ istringstream iss1("");
+ CmdLine::parseConfigFile( iss1, ss1 );
+ stringstream ss2;
+ istringstream iss2("password=\'foo bar baz\'");
+ CmdLine::parseConfigFile( iss2, ss2 );
+ stringstream ss3;
+ istringstream iss3("\t this = false \n#that = true\n #another = whocares\n\n other = monkeys ");
+ CmdLine::parseConfigFile( iss3, ss3 );
+
+ ASSERT( ss1.str().compare("\n") == 0 );
+ ASSERT( ss2.str().compare("password=\'foo bar baz\'\n\n") == 0 );
+ ASSERT( ss3.str().compare("\n other = monkeys \n\n") == 0 );
+ }
+ };
+
+ struct CompressionTest1 {
+ void run() {
+ const char * c = "this is a test";
+ std::string s;
+ size_t len = compress(c, strlen(c)+1, &s);
+ assert( len > 0 );
+
+ std::string out;
+ bool ok = uncompress(s.c_str(), s.size(), &out);
+ assert(ok);
+ assert( strcmp(out.c_str(), c) == 0 );
+ }
+ } ctest1;
+
+
+ class All : public Suite {
+ public:
+ All() : Suite( "basic" ) {
+ }
+
+ void setupTests() {
+ add< Rarely >();
+ add< Base64Tests >();
+
+ add< stringbuildertests::simple1 >();
+ add< stringbuildertests::simple2 >();
+ add< stringbuildertests::reset1 >();
+ add< stringbuildertests::reset2 >();
+
+ add< sleeptest >();
+ add< AssertTests >();
+
+ add< ArrayTests::basic1 >();
+ add< LexNumCmp >();
+
+ add< DatabaseValidNames >();
+ add< DatabaseOwnsNS >();
+
+ add< NSValidNames >();
+
+ add< PtrTests >();
+
+ add< StringSplitterTest >();
+ add< IsValidUTF8Test >();
+
+ add< QueueTest >();
+
+ add< StrTests >();
+
+ add< HostAndPortTests >();
+ add< RelativePathTest >();
+ add< CmdLineParseConfigTest >();
+
+ add< CompressionTest1 >();
+ }
+ } myall;
+
+} // namespace BasicTests
+
diff --git a/src/mongo/dbtests/btreetests.cpp b/src/mongo/dbtests/btreetests.cpp
new file mode 100644
index 00000000000..efa42b1d5c1
--- /dev/null
+++ b/src/mongo/dbtests/btreetests.cpp
@@ -0,0 +1,59 @@
+// btreetests.cpp : Btree unit tests
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "../db/db.h"
+#include "../db/btree.h"
+
+#include "dbtests.h"
+
+#define BtreeBucket BtreeBucket<V0>
+#define btree btree<V0>
+#define btreemod btreemod<V0>
+#define Continuation Continuation<V0>
+#define testName "btree"
+#define BTVERSION 0
+namespace BtreeTests0 {
+ #include "btreetests.inl"
+}
+
+#undef BtreeBucket
+#undef btree
+#undef btreemod
+#undef Continuation
+#define BtreeBucket BtreeBucket<V1>
+#define btree btree<V1>
+#define btreemod btreemod<V1>
+#define Continuation Continuation<V1>
+#undef testName
+#define testName "btree1"
+#undef BTVERSION
+#define BTVERSION 1
+namespace BtreeTests1 {
+ #include "btreetests.inl"
+}
+
+#undef testName
+#define testName "btree1_twostep"
+#define TESTTWOSTEP 1
+
+namespace BtreeTests2 {
+ #include "btreetests.inl"
+}
diff --git a/src/mongo/dbtests/btreetests.inl b/src/mongo/dbtests/btreetests.inl
new file mode 100644
index 00000000000..824313e6a54
--- /dev/null
+++ b/src/mongo/dbtests/btreetests.inl
@@ -0,0 +1,1713 @@
+ typedef BtreeBucket::_KeyNode _KeyNode;
+
+ const char* ns() {
+ return "unittests.btreetests";
+ }
+
+ // dummy, valid record loc
+ const DiskLoc recordLoc() {
+ return DiskLoc( 0, 2 );
+ }
+
+ class Ensure {
+ public:
+ Ensure() {
+ _c.ensureIndex( ns(), BSON( "a" << 1 ), false, "testIndex",
+ false, // given two versions not sure if cache true would mess us up...
+ false, BTVERSION);
+ }
+ ~Ensure() {
+ _c.dropCollection( ns() );
+ //_c.dropIndexes( ns() );
+ }
+ private:
+ DBDirectClient _c;
+ };
+
+ class Base : public Ensure {
+ public:
+ Base() :
+ _context( ns() ) {
+ {
+ bool f = false;
+ assert( f = true );
+ massert( 10402 , "assert is misdefined", f);
+ }
+ }
+ virtual ~Base() {}
+ static string bigNumString( long long n, int len = 800 ) {
+ char sub[17];
+ sprintf( sub, "%.16llx", n );
+ string val( len, ' ' );
+ for( int i = 0; i < len; ++i ) {
+ val[ i ] = sub[ i % 16 ];
+ }
+ return val;
+ }
+ protected:
+ const BtreeBucket* bt() {
+ return id().head.btree();
+ }
+ DiskLoc dl() {
+ return id().head;
+ }
+ IndexDetails& id() {
+ NamespaceDetails *nsd = nsdetails( ns() );
+ assert( nsd );
+ return nsd->idx( 1 );
+ }
+ void checkValid( int nKeys ) {
+ ASSERT( bt() );
+ ASSERT( bt()->isHead() );
+ bt()->assertValid( order(), true );
+ ASSERT_EQUALS( nKeys, bt()->fullValidate( dl(), order(), 0, true ) );
+ }
+ void dump() {
+ bt()->dumpTree( dl(), order() );
+ }
+ void insert( BSONObj &key ) {
+ const BtreeBucket *b = bt();
+
+#if defined(TESTTWOSTEP)
+ {
+ Continuation c(dl(), recordLoc(), key, Ordering::make(order()), id());
+ b->twoStepInsert(dl(), c, true);
+ c.stepTwo();
+ }
+#else
+ {
+ b->bt_insert( dl(), recordLoc(), key, Ordering::make(order()), true, id(), true );
+ }
+#endif
+ getDur().commitIfNeeded();
+ }
+ bool unindex( BSONObj &key ) {
+ getDur().commitIfNeeded();
+ return bt()->unindex( dl(), id(), key, recordLoc() );
+ }
+ static BSONObj simpleKey( char c, int n = 1 ) {
+ BSONObjBuilder builder;
+ string val( n, c );
+ builder.append( "a", val );
+ return builder.obj();
+ }
+ void locate( BSONObj &key, int expectedPos,
+ bool expectedFound, const DiskLoc &expectedLocation,
+ int direction = 1 ) {
+ int pos;
+ bool found;
+ DiskLoc location =
+ bt()->locate( id(), dl(), key, Ordering::make(order()), pos, found, recordLoc(), direction );
+ ASSERT_EQUALS( expectedFound, found );
+ ASSERT( location == expectedLocation );
+ ASSERT_EQUALS( expectedPos, pos );
+ }
+ bool present( BSONObj &key, int direction ) {
+ int pos;
+ bool found;
+ bt()->locate( id(), dl(), key, Ordering::make(order()), pos, found, recordLoc(), direction );
+ return found;
+ }
+ BSONObj order() {
+ return id().keyPattern();
+ }
+ const BtreeBucket *child( const BtreeBucket *b, int i ) {
+ assert( i <= b->nKeys() );
+ DiskLoc d;
+ if ( i == b->nKeys() ) {
+ d = b->getNextChild();
+ }
+ else {
+ d = b->keyNode( i ).prevChildBucket;
+ }
+ assert( !d.isNull() );
+ return d.btree();
+ }
+ void checkKey( char i ) {
+ stringstream ss;
+ ss << i;
+ checkKey( ss.str() );
+ }
+ void checkKey( const string &k ) {
+ BSONObj key = BSON( "" << k );
+// log() << "key: " << key << endl;
+ ASSERT( present( key, 1 ) );
+ ASSERT( present( key, -1 ) );
+ }
+ private:
+ dblock lk_;
+ Client::Context _context;
+ };
+
+ class Create : public Base {
+ public:
+ void run() {
+ checkValid( 0 );
+ }
+ };
+
+ class SimpleInsertDelete : public Base {
+ public:
+ void run() {
+ BSONObj key = simpleKey( 'z' );
+ insert( key );
+
+ checkValid( 1 );
+ locate( key, 0, true, dl() );
+
+ unindex( key );
+
+ checkValid( 0 );
+ locate( key, 0, false, DiskLoc() );
+ }
+ };
+
+ class SplitUnevenBucketBase : public Base {
+ public:
+ virtual ~SplitUnevenBucketBase() {}
+ void run() {
+ for ( int i = 0; i < 10; ++i ) {
+ BSONObj shortKey = simpleKey( shortToken( i ), 1 );
+ insert( shortKey );
+ BSONObj longKey = simpleKey( longToken( i ), 800 );
+ insert( longKey );
+ }
+ checkValid( 20 );
+ ASSERT_EQUALS( 1, bt()->nKeys() );
+ checkSplit();
+ }
+ protected:
+ virtual char shortToken( int i ) const = 0;
+ virtual char longToken( int i ) const = 0;
+ static char leftToken( int i ) {
+ return 'a' + i;
+ }
+ static char rightToken( int i ) {
+ return 'z' - i;
+ }
+ virtual void checkSplit() = 0;
+ };
+
+ class SplitRightHeavyBucket : public SplitUnevenBucketBase {
+ private:
+ virtual char shortToken( int i ) const {
+ return leftToken( i );
+ }
+ virtual char longToken( int i ) const {
+ return rightToken( i );
+ }
+ virtual void checkSplit() {
+ ASSERT_EQUALS( 15, child( bt(), 0 )->nKeys() );
+ ASSERT_EQUALS( 4, child( bt(), 1 )->nKeys() );
+ }
+ };
+
+ class SplitLeftHeavyBucket : public SplitUnevenBucketBase {
+ private:
+ virtual char shortToken( int i ) const {
+ return rightToken( i );
+ }
+ virtual char longToken( int i ) const {
+ return leftToken( i );
+ }
+ virtual void checkSplit() {
+ ASSERT_EQUALS( 4, child( bt(), 0 )->nKeys() );
+ ASSERT_EQUALS( 15, child( bt(), 1 )->nKeys() );
+ }
+ };
+
+ class MissingLocate : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 3; ++i ) {
+ BSONObj k = simpleKey( 'b' + 2 * i );
+ insert( k );
+ }
+
+ locate( 1, 'a', 'b', dl() );
+ locate( 1, 'c', 'd', dl() );
+ locate( 1, 'e', 'f', dl() );
+ locate( 1, 'g', 'g' + 1, DiskLoc() ); // of course, 'h' isn't in the index.
+
+ // old behavior
+ // locate( -1, 'a', 'b', dl() );
+ // locate( -1, 'c', 'd', dl() );
+ // locate( -1, 'e', 'f', dl() );
+ // locate( -1, 'g', 'f', dl() );
+
+ locate( -1, 'a', 'a' - 1, DiskLoc() ); // of course, 'a' - 1 isn't in the index
+ locate( -1, 'c', 'b', dl() );
+ locate( -1, 'e', 'd', dl() );
+ locate( -1, 'g', 'f', dl() );
+ }
+ private:
+ void locate( int direction, char token, char expectedMatch,
+ DiskLoc expectedLocation ) {
+ BSONObj k = simpleKey( token );
+ int expectedPos = ( expectedMatch - 'b' ) / 2;
+ Base::locate( k, expectedPos, false, expectedLocation, direction );
+ }
+ };
+
+ class MissingLocateMultiBucket : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 8; ++i ) {
+ insert( i );
+ }
+ insert( 9 );
+ insert( 8 );
+// dump();
+ BSONObj straddle = key( 'i' );
+ locate( straddle, 0, false, dl(), 1 );
+ straddle = key( 'k' );
+ locate( straddle, 0, false, dl(), -1 );
+ }
+ private:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'b' + 2 * i );
+ Base::insert( k );
+ }
+ };
+
+ class SERVER983 : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 10; ++i ) {
+ insert( i );
+ }
+// dump();
+ BSONObj straddle = key( 'o' );
+ locate( straddle, 0, false, dl(), 1 );
+ straddle = key( 'q' );
+ locate( straddle, 0, false, dl(), -1 );
+ }
+ private:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'b' + 2 * i );
+ Base::insert( k );
+ }
+ };
+
+ class DontReuseUnused : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 10; ++i ) {
+ insert( i );
+ }
+// dump();
+ BSONObj root = key( 'p' );
+ unindex( root );
+ Base::insert( root );
+ locate( root, 0, true, bt()->getNextChild(), 1 );
+ }
+ private:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'b' + 2 * i );
+ Base::insert( k );
+ }
+ };
+
+ class PackUnused : public Base {
+ public:
+ void run() {
+ for ( long long i = 0; i < 1000000; i += 1000 ) {
+ insert( i );
+ }
+ string orig, after;
+ {
+ stringstream ss;
+ bt()->shape( ss );
+ orig = ss.str();
+ }
+ vector< string > toDel;
+ vector< string > other;
+ BSONObjBuilder start;
+ start.appendMinKey( "a" );
+ BSONObjBuilder end;
+ end.appendMaxKey( "a" );
+ auto_ptr< BtreeCursor > c( BtreeCursor::make( nsdetails( ns() ), 1, id(), start.done(), end.done(), false, 1 ) );
+ while( c->ok() ) {
+ if ( c->curKeyHasChild() ) {
+ toDel.push_back( c->currKey().firstElement().valuestr() );
+ }
+ else {
+ other.push_back( c->currKey().firstElement().valuestr() );
+ }
+ c->advance();
+ }
+ ASSERT( toDel.size() > 0 );
+ for( vector< string >::const_iterator i = toDel.begin(); i != toDel.end(); ++i ) {
+ BSONObj o = BSON( "a" << *i );
+ unindex( o );
+ }
+ ASSERT( other.size() > 0 );
+ for( vector< string >::const_iterator i = other.begin(); i != other.end(); ++i ) {
+ BSONObj o = BSON( "a" << *i );
+ unindex( o );
+ }
+
+ long long unused = 0;
+ ASSERT_EQUALS( 0, bt()->fullValidate( dl(), order(), &unused, true ) );
+
+ for ( long long i = 50000; i < 50100; ++i ) {
+ insert( i );
+ }
+
+ long long unused2 = 0;
+ ASSERT_EQUALS( 100, bt()->fullValidate( dl(), order(), &unused2, true ) );
+
+// log() << "old unused: " << unused << ", new unused: " << unused2 << endl;
+//
+ ASSERT( unused2 <= unused );
+ }
+ protected:
+ void insert( long long n ) {
+ string val = bigNumString( n );
+ BSONObj k = BSON( "a" << val );
+ Base::insert( k );
+ }
+ };
+
+ class DontDropReferenceKey : public PackUnused {
+ public:
+ void run() {
+ // with 80 root node is full
+ for ( long long i = 0; i < 80; i += 1 ) {
+ insert( i );
+ }
+
+ BSONObjBuilder start;
+ start.appendMinKey( "a" );
+ BSONObjBuilder end;
+ end.appendMaxKey( "a" );
+ BSONObj l = bt()->keyNode( 0 ).key.toBson();
+ string toInsert;
+ auto_ptr< BtreeCursor > c( BtreeCursor::make( nsdetails( ns() ), 1, id(), start.done(), end.done(), false, 1 ) );
+ while( c->ok() ) {
+ if ( c->currKey().woCompare( l ) > 0 ) {
+ toInsert = c->currKey().firstElement().valuestr();
+ break;
+ }
+ c->advance();
+ }
+ // too much work to try to make this happen through inserts and deletes
+ // we are intentionally manipulating the btree bucket directly here
+ BtreeBucket::Loc* L = const_cast< BtreeBucket::Loc* >( &bt()->keyNode( 1 ).prevChildBucket );
+ getDur().writing(L)->Null();
+ getDur().writingInt( const_cast< BtreeBucket::Loc& >( bt()->keyNode( 1 ).recordLoc ).GETOFS() ) |= 1; // make unused
+ BSONObj k = BSON( "a" << toInsert );
+ Base::insert( k );
+ }
+ };
+
+ class MergeBuckets : public Base {
+ public:
+ virtual ~MergeBuckets() {}
+ void run() {
+ for ( int i = 0; i < 10; ++i ) {
+ insert( i );
+ }
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ int expectedCount = 10 - unindexKeys();
+// dump();
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ long long unused = 0;
+ ASSERT_EQUALS( expectedCount, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ }
+ protected:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'b' + 2 * i );
+ Base::insert( k );
+ }
+ virtual int unindexKeys() = 0;
+ };
+
+ class MergeBucketsLeft : public MergeBuckets {
+ virtual int unindexKeys() {
+ BSONObj k = key( 'b' );
+ unindex( k );
+ k = key( 'b' + 2 );
+ unindex( k );
+ k = key( 'b' + 4 );
+ unindex( k );
+ k = key( 'b' + 6 );
+ unindex( k );
+ return 4;
+ }
+ };
+
+ class MergeBucketsRight : public MergeBuckets {
+ virtual int unindexKeys() {
+ BSONObj k = key( 'b' + 2 * 9 );
+ unindex( k );
+ return 1;
+ }
+ };
+
+ // deleting from head won't coalesce yet
+// class MergeBucketsHead : public MergeBuckets {
+// virtual BSONObj unindexKey() { return key( 'p' ); }
+// };
+
+ class MergeBucketsDontReplaceHead : public Base {
+ public:
+ void run() {
+ for ( int i = 0; i < 18; ++i ) {
+ insert( i );
+ }
+ // dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = key( 'a' + 17 );
+ unindex( k );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ long long unused = 0;
+ ASSERT_EQUALS( 17, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ }
+ private:
+ BSONObj key( char c ) {
+ return simpleKey( c, 800 );
+ }
+ void insert( int i ) {
+ BSONObj k = key( 'a' + i );
+ Base::insert( k );
+ }
+ };
+
+ // Tool to construct custom trees for tests.
+ class ArtificialTree : public BtreeBucket {
+ public:
+ void push( const BSONObj &key, const DiskLoc &child ) {
+ KeyOwned k(key);
+ pushBack( dummyDiskLoc(), k, Ordering::make( BSON( "a" << 1 ) ), child );
+ }
+ void setNext( const DiskLoc &child ) {
+ nextChild = child;
+ }
+ static DiskLoc make( IndexDetails &id ) {
+ DiskLoc ret = addBucket( id );
+ is( ret )->init();
+ getDur().commitIfNeeded();
+ return ret;
+ }
+ static ArtificialTree *is( const DiskLoc &l ) {
+ return static_cast< ArtificialTree * >( l.btreemod() );
+ }
+ static DiskLoc makeTree( const string &spec, IndexDetails &id ) {
+ return makeTree( fromjson( spec ), id );
+ }
+ static DiskLoc makeTree( const BSONObj &spec, IndexDetails &id ) {
+ DiskLoc node = make( id );
+ ArtificialTree *n = ArtificialTree::is( node );
+ BSONObjIterator i( spec );
+ while( i.more() ) {
+ BSONElement e = i.next();
+ DiskLoc child;
+ if ( e.type() == Object ) {
+ child = makeTree( e.embeddedObject(), id );
+ }
+ if ( e.fieldName() == string( "_" ) ) {
+ n->setNext( child );
+ }
+ else {
+ n->push( BSON( "" << expectedKey( e.fieldName() ) ), child );
+ }
+ }
+ n->fixParentPtrs( node );
+ return node;
+ }
+ static void setTree( const string &spec, IndexDetails &id ) {
+ set( makeTree( spec, id ), id );
+ }
+ static void set( const DiskLoc &l, IndexDetails &id ) {
+ ArtificialTree::is( id.head )->deallocBucket( id.head, id );
+ getDur().writingDiskLoc(id.head) = l;
+ }
+ static string expectedKey( const char *spec ) {
+ if ( spec[ 0 ] != '$' ) {
+ return spec;
+ }
+ char *endPtr;
+ // parsing a long long is a pain, so just allow shorter keys for now
+ unsigned long long num = strtol( spec + 1, &endPtr, 16 );
+ int len = 800;
+ if( *endPtr == '$' ) {
+ len = strtol( endPtr + 1, 0, 16 );
+ }
+ return Base::bigNumString( num, len );
+ }
+ static void checkStructure( const BSONObj &spec, const IndexDetails &id, const DiskLoc node ) {
+ ArtificialTree *n = ArtificialTree::is( node );
+ BSONObjIterator j( spec );
+ for( int i = 0; i < n->n; ++i ) {
+ ASSERT( j.more() );
+ BSONElement e = j.next();
+ KeyNode kn = n->keyNode( i );
+ string expected = expectedKey( e.fieldName() );
+ ASSERT( present( id, BSON( "" << expected ), 1 ) );
+ ASSERT( present( id, BSON( "" << expected ), -1 ) );
+ ASSERT_EQUALS( expected, kn.key.toBson().firstElement().valuestr() );
+ if ( kn.prevChildBucket.isNull() ) {
+ ASSERT( e.type() == jstNULL );
+ }
+ else {
+ ASSERT( e.type() == Object );
+ checkStructure( e.embeddedObject(), id, kn.prevChildBucket );
+ }
+ }
+ if ( n->nextChild.isNull() ) {
+ // maybe should allow '_' field with null value?
+ ASSERT( !j.more() );
+ }
+ else {
+ BSONElement e = j.next();
+ ASSERT_EQUALS( string( "_" ), e.fieldName() );
+ ASSERT( e.type() == Object );
+ checkStructure( e.embeddedObject(), id, n->nextChild );
+ }
+ ASSERT( !j.more() );
+ }
+ static void checkStructure( const string &spec, const IndexDetails &id ) {
+ checkStructure( fromjson( spec ), id, id.head );
+ }
+ static bool present( const IndexDetails &id, const BSONObj &key, int direction ) {
+ int pos;
+ bool found;
+ id.head.btree()->locate( id, id.head, key, Ordering::make(id.keyPattern()), pos, found, recordLoc(), direction );
+ return found;
+ }
+ int headerSize() const { return BtreeBucket::headerSize(); }
+ int packedDataSize( int pos ) const { return BtreeBucket::packedDataSize( pos ); }
+ void fixParentPtrs( const DiskLoc &thisLoc ) { BtreeBucket::fixParentPtrs( thisLoc ); }
+ void forcePack() {
+ topSize += emptySize;
+ emptySize = 0;
+ setNotPacked();
+ }
+ private:
+ DiskLoc dummyDiskLoc() const { return DiskLoc( 0, 2 ); }
+ };
+
+ /**
+ * We could probably refactor the following tests, but it's easier to debug
+ * them in the present state.
+ */
+
+ class MergeBucketsDelInternal : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,_:{c:null}},_:{f:{e:null},_:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "bb" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{b:{a:null},d:{c:null},f:{e:null},_:{g:null}}", id() );
+ }
+ };
+
+ class MergeBucketsRightNull : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},_:{f:{e:null},h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "bb" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}", id() );
+ }
+ };
+
+ // not yet handling this case
+ class DontMergeSingleBucket : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},c:null}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "c" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{d:{b:{a:null}}}", id() );
+ }
+ };
+
+ class ParentMergeNonRightToLeft : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},i:{f:{e:null},h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "bb" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ // child does not currently replace parent in this case
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}", id() );
+ }
+ };
+
+ class ParentMergeNonRightToRight : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},cc:{c:null}},i:{f:{e:null},ff:null,h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "ff" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ // child does not currently replace parent in this case
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{i:{b:{a:null},cc:{c:null},d:null,f:{e:null},h:{g:null}}}", id() );
+ }
+ };
+
+ class CantMergeRightNoMerge : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{d:{b:{a:null},bb:null,cc:{c:null}},dd:null,_:{f:{e:null},h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "bb" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{d:{b:{a:null},cc:{c:null}},dd:null,_:{f:{e:null},h:{g:null}}}", id() );
+ }
+ };
+
+ class CantMergeLeftNoMerge : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{c:{b:{a:null}},d:null,_:{f:{e:null},g:null}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "g" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 6, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{c:{b:{a:null}},d:null,_:{f:{e:null}}}", id() );
+ }
+ };
+
+ class MergeOption : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{c:{b:{a:null}},f:{e:{d:null},ee:null},_:{h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "ee" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{c:{b:{a:null}},_:{e:{d:null},f:null,h:{g:null}}}", id() );
+ }
+ };
+
+ class ForceMergeLeft : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{c:{b:{a:null}},f:{e:{d:null},ee:null},ff:null,_:{h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "ee" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{f:{b:{a:null},c:null,e:{d:null}},ff:null,_:{h:{g:null}}}", id() );
+ }
+ };
+
+ class ForceMergeRight : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{c:{b:{a:null}},cc:null,f:{e:{d:null},ee:null},_:{h:{g:null}}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 7, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "ee" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{c:{b:{a:null}},cc:null,_:{e:{d:null},f:null,h:{g:null}}}", id() );
+ }
+ };
+
+ class RecursiveMerge : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},j:{i:null}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 10, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "c" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ // height is not currently reduced in this case
+ ArtificialTree::checkStructure( "{j:{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}}", id() );
+ }
+ };
+
+ class RecursiveMergeRightBucket : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},g:{f:null}},_:{i:null}}", id() );
+// dump();
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 9, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+
+ BSONObj k = BSON( "" << "c" );
+ assert( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{g:{b:{a:null},d:null,e:null,f:null},h:null,i:null}", id() );
+ }
+ };
+
+ class RecursiveMergeDoubleRightBucket : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( "{h:{e:{b:{a:null},c:null,d:null},_:{f:null}},_:{i:null}}", id() );
+ string ns = id().indexNamespace();
+ ASSERT_EQUALS( 8, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "c" );
+ assert( unindex( k ) );
+ long long keyCount = bt()->fullValidate( dl(), order(), 0, true );
+ ASSERT_EQUALS( 7, keyCount );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ // no recursion currently in this case
+ ArtificialTree::checkStructure( "{h:{b:{a:null},d:null,e:null,f:null},_:{i:null}}", id() );
+ }
+ };
+
+ class MergeSizeBase : public Base {
+ public:
+ MergeSizeBase() : _count() {}
+ virtual ~MergeSizeBase() {}
+ void run() {
+ typedef ArtificialTree A;
+ A::set( A::make( id() ), id() );
+ A* root = A::is( dl() );
+ DiskLoc left = A::make( id() );
+ root->push( biggestKey( 'm' ), left );
+ _count = 1;
+ A* l = A::is( left );
+ DiskLoc right = A::make( id() );
+ root->setNext( right );
+ A* r = A::is( right );
+ root->fixParentPtrs( dl() );
+
+ //ASSERT_EQUALS( bigSize(), bigSize() / 2 * 2 );
+ fillToExactSize( l, leftSize(), 'a' );
+ fillToExactSize( r, rightSize(), 'n' );
+ ASSERT( leftAdditional() <= 2 );
+ if ( leftAdditional() >= 2 ) {
+ l->push( bigKey( 'k' ), DiskLoc() );
+ }
+ if ( leftAdditional() >= 1 ) {
+ l->push( bigKey( 'l' ), DiskLoc() );
+ }
+ ASSERT( rightAdditional() <= 2 );
+ if ( rightAdditional() >= 2 ) {
+ r->push( bigKey( 'y' ), DiskLoc() );
+ }
+ if ( rightAdditional() >= 1 ) {
+ r->push( bigKey( 'z' ), DiskLoc() );
+ }
+ _count += leftAdditional() + rightAdditional();
+
+// dump();
+
+ initCheck();
+ string ns = id().indexNamespace();
+ const char *keys = delKeys();
+ for( const char *i = keys; *i; ++i ) {
+ long long unused = 0;
+ ASSERT_EQUALS( _count, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = bigKey( *i );
+ unindex( k );
+// dump();
+ --_count;
+ }
+
+// dump();
+
+ long long unused = 0;
+ ASSERT_EQUALS( _count, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ validate();
+ if ( !merge() ) {
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ }
+ else {
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ }
+ }
+ protected:
+ virtual int leftAdditional() const { return 2; }
+ virtual int rightAdditional() const { return 2; }
+ virtual void initCheck() {}
+ virtual void validate() {}
+ virtual int leftSize() const = 0;
+ virtual int rightSize() const = 0;
+ virtual const char * delKeys() const { return "klyz"; }
+ virtual bool merge() const { return true; }
+ void fillToExactSize( ArtificialTree *t, int targetSize, char startKey ) {
+ int size = 0;
+ while( size < targetSize ) {
+ int space = targetSize - size;
+ int nextSize = space - sizeof( _KeyNode );
+ assert( nextSize > 0 );
+ BSONObj newKey = key( startKey++, nextSize );
+ t->push( newKey, DiskLoc() );
+ size += BtreeBucket::KeyOwned(newKey).dataSize() + sizeof( _KeyNode );
+ _count += 1;
+ }
+ if( t->packedDataSize( 0 ) != targetSize ) {
+ ASSERT_EQUALS( t->packedDataSize( 0 ), targetSize );
+ }
+ }
+ static BSONObj key( char a, int size ) {
+ if ( size >= bigSize() ) {
+ return bigKey( a );
+ }
+ return simpleKey( a, size - ( bigSize() - 801 ) );
+ }
+ static BSONObj bigKey( char a ) {
+ return simpleKey( a, 801 );
+ }
+ static BSONObj biggestKey( char a ) {
+ int size = BtreeBucket::getKeyMax() - bigSize() + 801;
+ return simpleKey( a, size );
+ }
+ static int bigSize() {
+ return BtreeBucket::KeyOwned(bigKey( 'a' )).dataSize();
+ }
+ static int biggestSize() {
+ return BtreeBucket::KeyOwned(biggestKey( 'a' )).dataSize();
+ }
+ int _count;
+ };
+
+ class MergeSizeJustRightRight : public MergeSizeBase {
+ protected:
+ virtual int rightSize() const { return BtreeBucket::lowWaterMark() - 1; }
+ virtual int leftSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) - ( BtreeBucket::lowWaterMark() - 1 ); }
+ };
+
+ class MergeSizeJustRightLeft : public MergeSizeBase {
+ protected:
+ virtual int leftSize() const { return BtreeBucket::lowWaterMark() - 1; }
+ virtual int rightSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) - ( BtreeBucket::lowWaterMark() - 1 ); }
+ virtual const char * delKeys() const { return "yzkl"; }
+ };
+
+ class MergeSizeRight : public MergeSizeJustRightRight {
+ virtual int rightSize() const { return MergeSizeJustRightRight::rightSize() - 1; }
+ virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() + 1; }
+ };
+
+ class MergeSizeLeft : public MergeSizeJustRightLeft {
+ virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() + 1; }
+ virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize() - 1; }
+ };
+
+ class NoMergeBelowMarkRight : public MergeSizeJustRightRight {
+ virtual int rightSize() const { return MergeSizeJustRightRight::rightSize() + 1; }
+ virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() - 1; }
+ virtual bool merge() const { return false; }
+ };
+
+ class NoMergeBelowMarkLeft : public MergeSizeJustRightLeft {
+ virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() - 1; }
+ virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize() + 1; }
+ virtual bool merge() const { return false; }
+ };
+
+ class MergeSizeRightTooBig : public MergeSizeJustRightLeft {
+ virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() + 1; }
+ virtual bool merge() const { return false; }
+ };
+
+ class MergeSizeLeftTooBig : public MergeSizeJustRightRight {
+ virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() + 1; }
+ virtual bool merge() const { return false; }
+ };
+
+ class BalanceOneLeftToRight : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},b:{$20:null,$30:null,$40:null,$50:null,a:null},_:{c:null}}", id() );
+ ASSERT_EQUALS( 14, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x40 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},b:{$10:null,$20:null,$30:null,$50:null,a:null},_:{c:null}}", id() );
+ }
+ };
+
+ class BalanceOneRightToLeft : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null},b:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},_:{c:null}}", id() );
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x3 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$20:{$1:null,$2:null,$4:null,$10:null},b:{$30:null,$40:null,$50:null,$60:null,$70:null},_:{c:null}}", id() );
+ }
+ };
+
+ class BalanceThreeLeftToRight : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$20:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},$9:{$8:null},$11:{$10:null},$13:{$12:null},_:{$14:null}},b:{$30:null,$40:{$35:null},$50:{$45:null}},_:{c:null}}", id() );
+ ASSERT_EQUALS( 23, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 14, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x30 ) );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 22, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 14, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$9:{$1:{$0:null},$3:{$2:null},$5:{$4:null},$7:{$6:null},_:{$8:null}},b:{$11:{$10:null},$13:{$12:null},$20:{$14:null},$40:{$35:null},$50:{$45:null}},_:{c:null}}", id() );
+ }
+ };
+
+ class BalanceThreeRightToLeft : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$20:{$1:{$0:null},$3:{$2:null},$5:null,_:{$14:null}},b:{$30:{$25:null},$40:{$35:null},$50:{$45:null},$60:{$55:null},$70:{$65:null},$80:{$75:null},$90:{$85:null},$100:{$95:null}},_:{c:null}}", id() );
+ ASSERT_EQUALS( 25, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 15, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x5 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 24, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 15, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$50:{$1:{$0:null},$3:{$2:null},$20:{$14:null},$30:{$25:null},$40:{$35:null},_:{$45:null}},b:{$60:{$55:null},$70:{$65:null},$80:{$75:null},$90:{$85:null},$100:{$95:null}},_:{c:null}}", id() );
+ }
+ };
+
+ class BalanceSingleParentKey : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},_:{$20:null,$30:null,$40:null,$50:null,a:null}}", id() );
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x40 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$10:null,$20:null,$30:null,$50:null,a:null}}", id() );
+ }
+ };
+
+ class PackEmpty : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null}", id() );
+ BSONObj k = BSON( "" << "a" );
+ ASSERT( unindex( k ) );
+ ArtificialTree *t = ArtificialTree::is( dl() );
+ t->forcePack();
+ Tester::checkEmpty( t, id() );
+ }
+ class Tester : public ArtificialTree {
+ public:
+ static void checkEmpty( ArtificialTree *a, const IndexDetails &id ) {
+ Tester *t = static_cast< Tester * >( a );
+ ASSERT_EQUALS( 0, t->n );
+ ASSERT( !( t->flags & Packed ) );
+ Ordering o = Ordering::make( id.keyPattern() );
+ int zero = 0;
+ t->_packReadyForMod( o, zero );
+ ASSERT_EQUALS( 0, t->n );
+ ASSERT_EQUALS( 0, t->topSize );
+ ASSERT_EQUALS( BtreeBucket::bodySize(), t->emptySize );
+ ASSERT( t->flags & Packed );
+ }
+ };
+ };
+
+ class PackedDataSizeEmpty : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null}", id() );
+ BSONObj k = BSON( "" << "a" );
+ ASSERT( unindex( k ) );
+ ArtificialTree *t = ArtificialTree::is( dl() );
+ t->forcePack();
+ Tester::checkEmpty( t, id() );
+ }
+ class Tester : public ArtificialTree {
+ public:
+ static void checkEmpty( ArtificialTree *a, const IndexDetails &id ) {
+ Tester *t = static_cast< Tester * >( a );
+ ASSERT_EQUALS( 0, t->n );
+ ASSERT( !( t->flags & Packed ) );
+ int zero = 0;
+ ASSERT_EQUALS( 0, t->packedDataSize( zero ) );
+ ASSERT( !( t->flags & Packed ) );
+ }
+ };
+ };
+
+ class BalanceSingleParentKeyPackParent : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},_:{$20:null,$30:null,$40:null,$50:null,a:null}}", id() );
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ // force parent pack
+ ArtificialTree::is( dl() )->forcePack();
+ BSONObj k = BSON( "" << bigNumString( 0x40 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 11, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$6:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$10:null,$20:null,$30:null,$50:null,a:null}}", id() );
+ }
+ };
+
+ class BalanceSplitParent : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10$10:{$1:null,$2:null,$3:null,$4:null},$100:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null},$200:null,$300:null,$400:null,$500:null,$600:null,$700:null,$800:null,$900:null,_:{c:null}}", id() );
+ ASSERT_EQUALS( 22, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x3 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 21, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 6, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$500:{$30:{$1:null,$2:null,$4:null,$10$10:null,$20:null},$100:{$40:null,$50:null,$60:null,$70:null,$80:null},$200:null,$300:null,$400:null},_:{$600:null,$700:null,$800:null,$900:null,_:{c:null}}}", id() );
+ }
+ };
+
+ class RebalancedSeparatorBase : public Base {
+ public:
+ void run() {
+ ArtificialTree::setTree( treeSpec(), id() );
+ modTree();
+ Tester::checkSeparator( id(), expectedSeparator() );
+ }
+ virtual string treeSpec() const = 0;
+ virtual int expectedSeparator() const = 0;
+ virtual void modTree() {}
+ struct Tester : public ArtificialTree {
+ static void checkSeparator( const IndexDetails& id, int expected ) {
+ ASSERT_EQUALS( expected, static_cast< Tester * >( id.head.btreemod() )->rebalancedSeparatorPos( id.head, 0 ) );
+ }
+ };
+ };
+
+ class EvenRebalanceLeft : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$7:{$1:null,$2$31f:null,$3:null,$4$31f:null,$5:null,$6:null},_:{$8:null,$9:null,$10$31e:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class EvenRebalanceLeftCusp : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$6:{$1:null,$2$31f:null,$3:null,$4$31f:null,$5:null},_:{$7:null,$8:null,$9$31e:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class EvenRebalanceRight : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$3:{$1:null,$2$31f:null},_:{$4$31f:null,$5:null,$6:null,$7:null,$8$31e:null,$9:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class EvenRebalanceRightCusp : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$4$31f:{$1:null,$2$31f:null,$3:null},_:{$5:null,$6:null,$7$31e:null,$8:null,$9:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class EvenRebalanceCenter : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$5:{$1:null,$2$31f:null,$3:null,$4$31f:null},_:{$6:null,$7$31e:null,$8:null,$9:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class OddRebalanceLeft : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$6$31f:{$1:null,$2:null,$3:null,$4:null,$5:null},_:{$7:null,$8:null,$9:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class OddRebalanceRight : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$4:{$1:null,$2:null,$3:null},_:{$5:null,$6:null,$7:null,$8$31f:null,$9:null,$10:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class OddRebalanceCenter : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$5:{$1:null,$2:null,$3:null,$4:null},_:{$6:null,$7:null,$8:null,$9:null,$10$31f:null}}"; }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class RebalanceEmptyRight : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$a:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null,$7:null,$8:null,$9:null},_:{$b:null}}"; }
+ virtual void modTree() {
+ BSONObj k = BSON( "" << bigNumString( 0xb ) );
+ ASSERT( unindex( k ) );
+ }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class RebalanceEmptyLeft : public RebalancedSeparatorBase {
+ virtual string treeSpec() const { return "{$a:{$1:null},_:{$11:null,$12:null,$13:null,$14:null,$15:null,$16:null,$17:null,$18:null,$19:null}}"; }
+ virtual void modTree() {
+ BSONObj k = BSON( "" << bigNumString( 0x1 ) );
+ ASSERT( unindex( k ) );
+ }
+ virtual int expectedSeparator() const { return 4; }
+ };
+
+ class NoMoveAtLowWaterMarkRight : public MergeSizeJustRightRight {
+ virtual int rightSize() const { return MergeSizeJustRightRight::rightSize() + 1; }
+ virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key.toBson(); }
+ virtual void validate() { ASSERT_EQUALS( _oldTop, bt()->keyNode( 0 ).key.toBson() ); }
+ virtual bool merge() const { return false; }
+ protected:
+ BSONObj _oldTop;
+ };
+
+ class MoveBelowLowWaterMarkRight : public NoMoveAtLowWaterMarkRight {
+ virtual int rightSize() const { return MergeSizeJustRightRight::rightSize(); }
+ virtual int leftSize() const { return MergeSizeJustRightRight::leftSize() + 1; }
+ // different top means we rebalanced
+ virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key.toBson() ) ); }
+ };
+
+ class NoMoveAtLowWaterMarkLeft : public MergeSizeJustRightLeft {
+ virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize() + 1; }
+ virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key.toBson(); }
+ virtual void validate() { ASSERT_EQUALS( _oldTop, bt()->keyNode( 0 ).key.toBson() ); }
+ virtual bool merge() const { return false; }
+ protected:
+ BSONObj _oldTop;
+ };
+
+ class MoveBelowLowWaterMarkLeft : public NoMoveAtLowWaterMarkLeft {
+ virtual int leftSize() const { return MergeSizeJustRightLeft::leftSize(); }
+ virtual int rightSize() const { return MergeSizeJustRightLeft::rightSize() + 1; }
+ // different top means we rebalanced
+ virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key.toBson() ) ); }
+ };
+
+ class PreferBalanceLeft : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null,$2:null,$3:null,$4:null,$5:null,$6:null},$20:{$11:null,$12:null,$13:null,$14:null},_:{$30:null}}", id() );
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x12 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$5:{$1:null,$2:null,$3:null,$4:null},$20:{$6:null,$10:null,$11:null,$13:null,$14:null},_:{$30:null}}", id() );
+ }
+ };
+
+ class PreferBalanceRight : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$1:null},$20:{$11:null,$12:null,$13:null,$14:null},_:{$31:null,$32:null,$33:null,$34:null,$35:null,$36:null}}", id() );
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x12 ) );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$10:{$1:null},$31:{$11:null,$13:null,$14:null,$20:null},_:{$32:null,$33:null,$34:null,$35:null,$36:null}}", id() );
+ }
+ };
+
+ class RecursiveMergeThenBalance : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:{$5:{$1:null,$2:null},$8:{$6:null,$7:null}},_:{$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null,$90:null}}", id() );
+ ASSERT_EQUALS( 15, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x7 ) );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 14, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$40:{$8:{$1:null,$2:null,$5:null,$6:null},$10:null,$20:null,$30:null},_:{$50:null,$60:null,$70:null,$80:null,$90:null}}", id() );
+ }
+ };
+
+ class MergeRightEmpty : public MergeSizeBase {
+ protected:
+ virtual int rightAdditional() const { return 1; }
+ virtual int leftAdditional() const { return 1; }
+ virtual const char * delKeys() const { return "lz"; }
+ virtual int rightSize() const { return 0; }
+ virtual int leftSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ); }
+ };
+
+ class MergeMinRightEmpty : public MergeSizeBase {
+ protected:
+ virtual int rightAdditional() const { return 1; }
+ virtual int leftAdditional() const { return 0; }
+ virtual const char * delKeys() const { return "z"; }
+ virtual int rightSize() const { return 0; }
+ virtual int leftSize() const { return bigSize() + sizeof( _KeyNode ); }
+ };
+
+ class MergeLeftEmpty : public MergeSizeBase {
+ protected:
+ virtual int rightAdditional() const { return 1; }
+ virtual int leftAdditional() const { return 1; }
+ virtual const char * delKeys() const { return "zl"; }
+ virtual int leftSize() const { return 0; }
+ virtual int rightSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ); }
+ };
+
+ class MergeMinLeftEmpty : public MergeSizeBase {
+ protected:
+ virtual int leftAdditional() const { return 1; }
+ virtual int rightAdditional() const { return 0; }
+ virtual const char * delKeys() const { return "l"; }
+ virtual int leftSize() const { return 0; }
+ virtual int rightSize() const { return bigSize() + sizeof( _KeyNode ); }
+ };
+
+ class BalanceRightEmpty : public MergeRightEmpty {
+ protected:
+ virtual int leftSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) + 1; }
+ virtual bool merge() const { return false; }
+ virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key.toBson(); }
+ virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key.toBson() ) ); }
+ private:
+ BSONObj _oldTop;
+ };
+
+ class BalanceLeftEmpty : public MergeLeftEmpty {
+ protected:
+ virtual int rightSize() const { return BtreeBucket::bodySize() - biggestSize() - sizeof( _KeyNode ) + 1; }
+ virtual bool merge() const { return false; }
+ virtual void initCheck() { _oldTop = bt()->keyNode( 0 ).key.toBson(); }
+ virtual void validate() { ASSERT( !( _oldTop == bt()->keyNode( 0 ).key.toBson() ) ); }
+ private:
+ BSONObj _oldTop;
+ };
+
+ class DelEmptyNoNeighbors : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{b:{a:null}}", id() );
+ ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "a" );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 1, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{b:null}", id() );
+ }
+ };
+
+ class DelEmptyEmptyNeighbors : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,c:{b:null},d:null}", id() );
+ ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "b" );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), 0, true ) );
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{a:null,c:null,d:null}", id() );
+ }
+ };
+
+ class DelInternal : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,c:{b:null},d:null}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "c" );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{a:null,b:null,d:null}", id() );
+ }
+ };
+
+ class DelInternalReplaceWithUnused : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,c:{b:null},d:null}", id() );
+ getDur().writingInt( const_cast< BtreeBucket::Loc& >( bt()->keyNode( 1 ).prevChildBucket.btree()->keyNode( 0 ).recordLoc ).GETOFS() ) |= 1; // make unused
+ long long unused = 0;
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 1, unused );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "c" );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ unused = 0;
+ ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 1, unused );
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ // doesn't discriminate between used and unused
+ ArtificialTree::checkStructure( "{a:null,b:null,d:null}", id() );
+ }
+ };
+
+ class DelInternalReplaceRight : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,_:{b:null}}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "a" );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ unused = 0;
+ ASSERT_EQUALS( 1, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 1, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{b:null}", id() );
+ }
+ };
+
+ class DelInternalPromoteKey : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,y:{d:{c:{b:null}},_:{e:null}},z:null}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 7, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 5, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "y" );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ unused = 0;
+ ASSERT_EQUALS( 6, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{a:null,e:{c:{b:null},d:null},z:null}", id() );
+ }
+ };
+
+ class DelInternalPromoteRightKey : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,_:{e:{c:null},_:{f:null}}}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "a" );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ unused = 0;
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 2, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{c:null,_:{e:null,f:null}}", id() );
+ }
+ };
+
+ class DelInternalReplacementPrevNonNull : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,d:{c:{b:null}},e:null}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 5, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "d" );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 4, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 1, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{a:null,d:{c:{b:null}},e:null}", id() );
+ ASSERT( bt()->keyNode( 1 ).recordLoc.getOfs() & 1 ); // check 'unused' key
+ }
+ };
+
+ class DelInternalReplacementNextNonNull : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{a:null,_:{c:null,_:{d:null}}}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 3, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << "a" );
+ // dump();
+ ASSERT( unindex( k ) );
+ // dump();
+ ASSERT_EQUALS( 2, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 1, unused );
+ ASSERT_EQUALS( 3, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{a:null,_:{c:null,_:{d:null}}}", id() );
+ ASSERT( bt()->keyNode( 0 ).recordLoc.getOfs() & 1 ); // check 'unused' key
+ }
+ };
+
+ class DelInternalSplitPromoteLeft : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:null,$20:null,$30$10:{$25:{$23:null},_:{$27:null}},$40:null,$50:null,$60:null,$70:null,$80:null,$90:null,$100:null}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x30, 0x10 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$60:{$10:null,$20:null,$27:{$23:null,$25:null},$40:null,$50:null},_:{$70:null,$80:null,$90:null,$100:null}}", id() );
+ }
+ };
+
+ class DelInternalSplitPromoteRight : public Base {
+ public:
+ void run() {
+ string ns = id().indexNamespace();
+ ArtificialTree::setTree( "{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null,$80:null,$90:null,$100$10:{$95:{$93:null},_:{$97:null}}}", id() );
+ long long unused = 0;
+ ASSERT_EQUALS( 13, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ BSONObj k = BSON( "" << bigNumString( 0x100, 0x10 ) );
+// dump();
+ ASSERT( unindex( k ) );
+// dump();
+ ASSERT_EQUALS( 12, bt()->fullValidate( dl(), order(), &unused, true ) );
+ ASSERT_EQUALS( 0, unused );
+ ASSERT_EQUALS( 4, nsdetails( ns.c_str() )->stats.nrecords );
+ ArtificialTree::checkStructure( "{$80:{$10:null,$20:null,$30:null,$40:null,$50:null,$60:null,$70:null},_:{$90:null,$97:{$93:null,$95:null}}}", id() );
+ }
+ };
+
+ class SignedZeroDuplication : public Base {
+ public:
+ void run() {
+ ASSERT_EQUALS( 0.0, -0.0 );
+ DBDirectClient c;
+ c.ensureIndex( ns(), BSON( "b" << 1 ), true );
+ c.insert( ns(), BSON( "b" << 0.0 ) );
+ c.insert( ns(), BSON( "b" << 1.0 ) );
+ c.update( ns(), BSON( "b" << 1.0 ), BSON( "b" << -0.0 ) );
+ ASSERT_EQUALS( 1U, c.count( ns(), BSON( "b" << 0.0 ) ) );
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( testName ) {
+ }
+
+ void setupTests() {
+ add< Create >();
+ add< SimpleInsertDelete >();
+ add< SplitRightHeavyBucket >();
+ add< SplitLeftHeavyBucket >();
+ add< MissingLocate >();
+ add< MissingLocateMultiBucket >();
+ add< SERVER983 >();
+ add< DontReuseUnused >();
+ add< PackUnused >();
+ add< DontDropReferenceKey >();
+ add< MergeBucketsLeft >();
+ add< MergeBucketsRight >();
+// add< MergeBucketsHead >();
+ add< MergeBucketsDontReplaceHead >();
+ add< MergeBucketsDelInternal >();
+ add< MergeBucketsRightNull >();
+ add< DontMergeSingleBucket >();
+ add< ParentMergeNonRightToLeft >();
+ add< ParentMergeNonRightToRight >();
+ add< CantMergeRightNoMerge >();
+ add< CantMergeLeftNoMerge >();
+ add< MergeOption >();
+ add< ForceMergeLeft >();
+ add< ForceMergeRight >();
+ add< RecursiveMerge >();
+ add< RecursiveMergeRightBucket >();
+ add< RecursiveMergeDoubleRightBucket >();
+ add< MergeSizeJustRightRight >();
+ add< MergeSizeJustRightLeft >();
+ add< MergeSizeRight >();
+ add< MergeSizeLeft >();
+ add< NoMergeBelowMarkRight >();
+ add< NoMergeBelowMarkLeft >();
+ add< MergeSizeRightTooBig >();
+ add< MergeSizeLeftTooBig >();
+ add< BalanceOneLeftToRight >();
+ add< BalanceOneRightToLeft >();
+ add< BalanceThreeLeftToRight >();
+ add< BalanceThreeRightToLeft >();
+ add< BalanceSingleParentKey >();
+ add< PackEmpty >();
+ add< PackedDataSizeEmpty >();
+ add< BalanceSingleParentKeyPackParent >();
+ add< BalanceSplitParent >();
+ add< EvenRebalanceLeft >();
+ add< EvenRebalanceLeftCusp >();
+ add< EvenRebalanceRight >();
+ add< EvenRebalanceRightCusp >();
+ add< EvenRebalanceCenter >();
+ add< OddRebalanceLeft >();
+ add< OddRebalanceRight >();
+ add< OddRebalanceCenter >();
+ add< RebalanceEmptyRight >();
+ add< RebalanceEmptyLeft >();
+ add< NoMoveAtLowWaterMarkRight >();
+ add< MoveBelowLowWaterMarkRight >();
+ add< NoMoveAtLowWaterMarkLeft >();
+ add< MoveBelowLowWaterMarkLeft >();
+ add< PreferBalanceLeft >();
+ add< PreferBalanceRight >();
+ add< RecursiveMergeThenBalance >();
+ add< MergeRightEmpty >();
+ add< MergeMinRightEmpty >();
+ add< MergeLeftEmpty >();
+ add< MergeMinLeftEmpty >();
+ add< BalanceRightEmpty >();
+ add< BalanceLeftEmpty >();
+ add< DelEmptyNoNeighbors >();
+ add< DelEmptyEmptyNeighbors >();
+ add< DelInternal >();
+ add< DelInternalReplaceWithUnused >();
+ add< DelInternalReplaceRight >();
+ add< DelInternalPromoteKey >();
+ add< DelInternalPromoteRightKey >();
+ add< DelInternalReplacementPrevNonNull >();
+ add< DelInternalReplacementNextNonNull >();
+ add< DelInternalSplitPromoteLeft >();
+ add< DelInternalSplitPromoteRight >();
+ add< SignedZeroDuplication >();
+ }
+ } myall;
diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp
new file mode 100644
index 00000000000..a64894b43c1
--- /dev/null
+++ b/src/mongo/dbtests/clienttests.cpp
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+// client.cpp
+
+#include "pch.h"
+#include "../client/dbclient.h"
+#include "dbtests.h"
+#include "../db/concurrency.h"
+
+namespace ClientTests {
+
+ class Base {
+ public:
+
+ Base( string coll ) {
+ _ns = (string)"test." + coll;
+ }
+
+ virtual ~Base() {
+ db.dropCollection( _ns );
+ }
+
+ const char * ns() { return _ns.c_str(); }
+
+ string _ns;
+ DBDirectClient db;
+ };
+
+
+ class DropIndex : public Base {
+ public:
+ DropIndex() : Base( "dropindex" ) {}
+ void run() {
+ db.insert( ns() , BSON( "x" << 2 ) );
+ ASSERT_EQUALS( 1 , db.getIndexes( ns() )->itcount() );
+
+ db.ensureIndex( ns() , BSON( "x" << 1 ) );
+ ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
+
+ db.dropIndex( ns() , BSON( "x" << 1 ) );
+ ASSERT_EQUALS( 1 , db.getIndexes( ns() )->itcount() );
+
+ db.ensureIndex( ns() , BSON( "x" << 1 ) );
+ ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
+
+ db.dropIndexes( ns() );
+ ASSERT_EQUALS( 1 , db.getIndexes( ns() )->itcount() );
+ }
+ };
+
+ class ReIndex : public Base {
+ public:
+ ReIndex() : Base( "reindex" ) {}
+ void run() {
+
+ db.insert( ns() , BSON( "x" << 2 ) );
+ ASSERT_EQUALS( 1 , db.getIndexes( ns() )->itcount() );
+
+ db.ensureIndex( ns() , BSON( "x" << 1 ) );
+ ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
+
+ db.reIndex( ns() );
+ ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
+ }
+
+ };
+
+ class ReIndex2 : public Base {
+ public:
+ ReIndex2() : Base( "reindex2" ) {}
+ void run() {
+
+ db.insert( ns() , BSON( "x" << 2 ) );
+ ASSERT_EQUALS( 1 , db.getIndexes( ns() )->itcount() );
+
+ db.ensureIndex( ns() , BSON( "x" << 1 ) );
+ ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
+
+ BSONObj out;
+ ASSERT( db.runCommand( "test" , BSON( "reIndex" << "reindex2" ) , out ) );
+ ASSERT_EQUALS( 2 , out["nIndexes"].number() );
+ ASSERT_EQUALS( 2 , db.getIndexes( ns() )->itcount() );
+ }
+
+ };
+
+ class CS_10 : public Base {
+ public:
+ CS_10() : Base( "CS_10" ) {}
+ void run() {
+ string longs( 770, 'c' );
+ for( int i = 0; i < 1111; ++i )
+ db.insert( ns(), BSON( "a" << i << "b" << longs ) );
+ db.ensureIndex( ns(), BSON( "a" << 1 << "b" << 1 ) );
+
+ auto_ptr< DBClientCursor > c = db.query( ns(), Query().sort( BSON( "a" << 1 << "b" << 1 ) ) );
+ ASSERT_EQUALS( 1111, c->itcount() );
+ }
+ };
+
+ class PushBack : public Base {
+ public:
+ PushBack() : Base( "PushBack" ) {}
+ void run() {
+ for( int i = 0; i < 10; ++i )
+ db.insert( ns(), BSON( "i" << i ) );
+ auto_ptr< DBClientCursor > c = db.query( ns(), Query().sort( BSON( "i" << 1 ) ) );
+
+ BSONObj o = c->next();
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 9 , c->objsLeftInBatch() );
+ ASSERT( c->moreInCurrentBatch() );
+
+ c->putBack( o );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 10, c->objsLeftInBatch() );
+ ASSERT( c->moreInCurrentBatch() );
+
+ o = c->next();
+ BSONObj o2 = c->next();
+ BSONObj o3 = c->next();
+ c->putBack( o3 );
+ c->putBack( o2 );
+ c->putBack( o );
+ for( int i = 0; i < 10; ++i ) {
+ o = c->next();
+ ASSERT_EQUALS( i, o[ "i" ].number() );
+ }
+ ASSERT( !c->more() );
+ ASSERT_EQUALS( 0, c->objsLeftInBatch() );
+ ASSERT( !c->moreInCurrentBatch() );
+
+ c->putBack( o );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 1, c->objsLeftInBatch() );
+ ASSERT( c->moreInCurrentBatch() );
+ ASSERT_EQUALS( 1, c->itcount() );
+ }
+ };
+
+ class Create : public Base {
+ public:
+ Create() : Base( "Create" ) {}
+ void run() {
+ db.createCollection( "unittests.clienttests.create", 4096, true );
+ BSONObj info;
+ ASSERT( db.runCommand( "unittests", BSON( "collstats" << "clienttests.create" ), info ) );
+ }
+ };
+
+ class ConnectionStringTests {
+ public:
+ void run() {
+ {
+ ConnectionString s( "a/b,c,d" , ConnectionString::SET );
+ ASSERT_EQUALS( ConnectionString::SET , s.type() );
+ ASSERT_EQUALS( "a" , s.getSetName() );
+ vector<HostAndPort> v = s.getServers();
+ ASSERT_EQUALS( 3U , v.size() );
+ ASSERT_EQUALS( "b" , v[0].host() );
+ ASSERT_EQUALS( "c" , v[1].host() );
+ ASSERT_EQUALS( "d" , v[2].host() );
+ }
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "client" ) {
+ }
+
+ void setupTests() {
+ add<DropIndex>();
+ add<ReIndex>();
+ add<ReIndex2>();
+ add<CS_10>();
+ add<PushBack>();
+ add<Create>();
+ add<ConnectionStringTests>();
+ }
+
+ } all;
+}
diff --git a/src/mongo/dbtests/commandtests.cpp b/src/mongo/dbtests/commandtests.cpp
new file mode 100644
index 00000000000..fa6204d25fd
--- /dev/null
+++ b/src/mongo/dbtests/commandtests.cpp
@@ -0,0 +1,98 @@
+/**
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../client/dbclient.h"
+#include "dbtests.h"
+#include "../db/concurrency.h"
+
+using namespace mongo;
+
+namespace CommandTests {
+ // one namespace per command
+ namespace FileMD5 {
+ struct Base {
+ Base() {
+ db.dropCollection(ns());
+ db.ensureIndex(ns(), BSON( "files_id" << 1 << "n" << 1 ));
+ }
+
+ const char* ns() { return "test.fs.chunks"; }
+
+ DBDirectClient db;
+ };
+ struct Type0 : Base {
+ void run() {
+ {
+ BSONObjBuilder b;
+ b.genOID();
+ b.append("files_id", 0);
+ b.append("n", 0);
+ b.appendBinData("data", 6, BinDataGeneral, "hello ");
+ db.insert(ns(), b.obj());
+ }
+ {
+ BSONObjBuilder b;
+ b.genOID();
+ b.append("files_id", 0);
+ b.append("n", 1);
+ b.appendBinData("data", 5, BinDataGeneral, "world");
+ db.insert(ns(), b.obj());
+ }
+
+ BSONObj result;
+ ASSERT( db.runCommand("test", BSON("filemd5" << 0), result) );
+ ASSERT_EQUALS( string("5eb63bbbe01eeed093cb22bb8f5acdc3") , result["md5"].valuestr() );
+ }
+ };
+ struct Type2 : Base {
+ void run() {
+ {
+ BSONObjBuilder b;
+ b.genOID();
+ b.append("files_id", 0);
+ b.append("n", 0);
+ b.appendBinDataArrayDeprecated("data", "hello ", 6);
+ db.insert(ns(), b.obj());
+ }
+ {
+ BSONObjBuilder b;
+ b.genOID();
+ b.append("files_id", 0);
+ b.append("n", 1);
+ b.appendBinDataArrayDeprecated("data", "world", 5);
+ db.insert(ns(), b.obj());
+ }
+
+ BSONObj result;
+ ASSERT( db.runCommand("test", BSON("filemd5" << 0), result) );
+ ASSERT_EQUALS( string("5eb63bbbe01eeed093cb22bb8f5acdc3") , result["md5"].valuestr() );
+ }
+ };
+ }
+
+ class All : public Suite {
+ public:
+ All() : Suite( "commands" ) {
+ }
+
+ void setupTests() {
+ add< FileMD5::Type0 >();
+ add< FileMD5::Type2 >();
+ }
+
+ } all;
+}
diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp
new file mode 100644
index 00000000000..0d2575f14e3
--- /dev/null
+++ b/src/mongo/dbtests/counttests.cpp
@@ -0,0 +1,142 @@
+// counttests.cpp : count.{h,cpp} unit tests.
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "../db/ops/count.h"
+
+#include "../db/cursor.h"
+#include "../db/pdfile.h"
+
+#include "dbtests.h"
+
+namespace CountTests {
+
+ class Base {
+ dblock lk;
+ Client::Context _context;
+ public:
+ Base() : _context( ns() ) {
+ addIndex( fromjson( "{\"a\":1}" ) );
+ }
+ ~Base() {
+ try {
+ boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( ns() );
+ vector< DiskLoc > toDelete;
+ for(; c->ok(); c->advance() )
+ toDelete.push_back( c->currLoc() );
+ for( vector< DiskLoc >::iterator i = toDelete.begin(); i != toDelete.end(); ++i )
+ theDataFileMgr.deleteRecord( ns(), i->rec(), *i, false );
+ DBDirectClient cl;
+ cl.dropIndexes( ns() );
+ }
+ catch ( ... ) {
+ FAIL( "Exception while cleaning up collection" );
+ }
+ }
+ protected:
+ static const char *ns() {
+ return "unittests.counttests";
+ }
+ static void addIndex( const BSONObj &key ) {
+ BSONObjBuilder b;
+ b.append( "name", key.firstElementFieldName() );
+ b.append( "ns", ns() );
+ b.append( "key", key );
+ BSONObj o = b.done();
+ stringstream indexNs;
+ indexNs << "unittests.system.indexes";
+ theDataFileMgr.insert( indexNs.str().c_str(), o.objdata(), o.objsize() );
+ }
+ static void insert( const char *s ) {
+ insert( fromjson( s ) );
+ }
+ static void insert( const BSONObj &o ) {
+ theDataFileMgr.insert( ns(), o.objdata(), o.objsize() );
+ }
+ };
+
+ class CountBasic : public Base {
+ public:
+ void run() {
+ insert( "{\"a\":\"b\"}" );
+ BSONObj cmd = fromjson( "{\"query\":{}}" );
+ string err;
+ ASSERT_EQUALS( 1, runCount( ns(), cmd, err ) );
+ }
+ };
+
+ class CountQuery : public Base {
+ public:
+ void run() {
+ insert( "{\"a\":\"b\"}" );
+ insert( "{\"a\":\"b\",\"x\":\"y\"}" );
+ insert( "{\"a\":\"c\"}" );
+ BSONObj cmd = fromjson( "{\"query\":{\"a\":\"b\"}}" );
+ string err;
+ ASSERT_EQUALS( 2, runCount( ns(), cmd, err ) );
+ }
+ };
+
+ class CountFields : public Base {
+ public:
+ void run() {
+ insert( "{\"a\":\"b\"}" );
+ insert( "{\"c\":\"d\"}" );
+ BSONObj cmd = fromjson( "{\"query\":{},\"fields\":{\"a\":1}}" );
+ string err;
+ ASSERT_EQUALS( 2, runCount( ns(), cmd, err ) );
+ }
+ };
+
+ class CountQueryFields : public Base {
+ public:
+ void run() {
+ insert( "{\"a\":\"b\"}" );
+ insert( "{\"a\":\"c\"}" );
+ insert( "{\"d\":\"e\"}" );
+ BSONObj cmd = fromjson( "{\"query\":{\"a\":\"b\"},\"fields\":{\"a\":1}}" );
+ string err;
+ ASSERT_EQUALS( 1, runCount( ns(), cmd, err ) );
+ }
+ };
+
+ class CountIndexedRegex : public Base {
+ public:
+ void run() {
+ insert( "{\"a\":\"b\"}" );
+ insert( "{\"a\":\"c\"}" );
+ BSONObj cmd = fromjson( "{\"query\":{\"a\":/^b/}}" );
+ string err;
+ ASSERT_EQUALS( 1, runCount( ns(), cmd, err ) );
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "count" ) {
+ }
+
+ void setupTests() {
+ add< CountBasic >();
+ add< CountQuery >();
+ add< CountFields >();
+ add< CountQueryFields >();
+ add< CountIndexedRegex >();
+ }
+ } myall;
+
+} // namespace CountTests
diff --git a/src/mongo/dbtests/cursortests.cpp b/src/mongo/dbtests/cursortests.cpp
new file mode 100644
index 00000000000..a7b52aada12
--- /dev/null
+++ b/src/mongo/dbtests/cursortests.cpp
@@ -0,0 +1,305 @@
+// cusrortests.cpp // cursor related unit tests
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/clientcursor.h"
+#include "../db/instance.h"
+#include "../db/btree.h"
+#include "../db/queryutil.h"
+#include "dbtests.h"
+
+namespace CursorTests {
+
+ namespace BtreeCursorTests {
+
+ // The ranges expressed in these tests are impossible given our query
+ // syntax, so going to do them a hacky way.
+
+ class Base {
+ protected:
+ static const char *ns() { return "unittests.cursortests.Base"; }
+ FieldRangeVector *vec( int *vals, int len, int direction = 1 ) {
+ FieldRangeSet s( "", BSON( "a" << 1 ), true );
+ for( int i = 0; i < len; i += 2 ) {
+ _objs.push_back( BSON( "a" << BSON( "$gte" << vals[ i ] << "$lte" << vals[ i + 1 ] ) ) );
+ FieldRangeSet s2( "", _objs.back(), true );
+ if ( i == 0 ) {
+ s.range( "a" ) = s2.range( "a" );
+ }
+ else {
+ s.range( "a" ) |= s2.range( "a" );
+ }
+ }
+ // orphan idxSpec for this test
+ IndexSpec *idxSpec = new IndexSpec( BSON( "a" << 1 ) );
+ return new FieldRangeVector( s, *idxSpec, direction );
+ }
+ DBDirectClient _c;
+ private:
+ vector< BSONObj > _objs;
+ };
+
+ class MultiRange : public Base {
+ public:
+ void run() {
+ dblock lk;
+ const char *ns = "unittests.cursortests.BtreeCursorTests.MultiRange";
+ {
+ DBDirectClient c;
+ for( int i = 0; i < 10; ++i )
+ c.insert( ns, BSON( "a" << i ) );
+ ASSERT( c.ensureIndex( ns, BSON( "a" << 1 ) ) );
+ }
+ int v[] = { 1, 2, 4, 6 };
+ boost::shared_ptr< FieldRangeVector > frv( vec( v, 4 ) );
+ Client::Context ctx( ns );
+ scoped_ptr<BtreeCursor> _c( BtreeCursor::make( nsdetails( ns ), 1, nsdetails( ns )->idx(1), frv, 1 ) );
+ BtreeCursor &c = *_c.get();
+ ASSERT_EQUALS( "BtreeCursor a_1 multi", c.toString() );
+ double expected[] = { 1, 2, 4, 5, 6 };
+ for( int i = 0; i < 5; ++i ) {
+ ASSERT( c.ok() );
+ ASSERT_EQUALS( expected[ i ], c.currKey().firstElement().number() );
+ c.advance();
+ }
+ ASSERT( !c.ok() );
+ }
+ };
+
+ class MultiRangeGap : public Base {
+ public:
+ void run() {
+ dblock lk;
+ const char *ns = "unittests.cursortests.BtreeCursorTests.MultiRangeGap";
+ {
+ DBDirectClient c;
+ for( int i = 0; i < 10; ++i )
+ c.insert( ns, BSON( "a" << i ) );
+ for( int i = 100; i < 110; ++i )
+ c.insert( ns, BSON( "a" << i ) );
+ ASSERT( c.ensureIndex( ns, BSON( "a" << 1 ) ) );
+ }
+ int v[] = { -50, 2, 40, 60, 109, 200 };
+ boost::shared_ptr< FieldRangeVector > frv( vec( v, 6 ) );
+ Client::Context ctx( ns );
+ scoped_ptr<BtreeCursor> _c( BtreeCursor::make(nsdetails( ns ), 1, nsdetails( ns )->idx(1), frv, 1 ) );
+ BtreeCursor &c = *_c.get();
+ ASSERT_EQUALS( "BtreeCursor a_1 multi", c.toString() );
+ double expected[] = { 0, 1, 2, 109 };
+ for( int i = 0; i < 4; ++i ) {
+ ASSERT( c.ok() );
+ ASSERT_EQUALS( expected[ i ], c.currKey().firstElement().number() );
+ c.advance();
+ }
+ ASSERT( !c.ok() );
+ }
+ };
+
+ class MultiRangeReverse : public Base {
+ public:
+ void run() {
+ dblock lk;
+ const char *ns = "unittests.cursortests.BtreeCursorTests.MultiRangeReverse";
+ {
+ DBDirectClient c;
+ for( int i = 0; i < 10; ++i )
+ c.insert( ns, BSON( "a" << i ) );
+ ASSERT( c.ensureIndex( ns, BSON( "a" << 1 ) ) );
+ }
+ int v[] = { 1, 2, 4, 6 };
+ boost::shared_ptr< FieldRangeVector > frv( vec( v, 4, -1 ) );
+ Client::Context ctx( ns );
+ scoped_ptr<BtreeCursor> _c( BtreeCursor::make( nsdetails( ns ), 1, nsdetails( ns )->idx(1), frv, -1 ) );
+ BtreeCursor& c = *_c.get();
+ ASSERT_EQUALS( "BtreeCursor a_1 reverse multi", c.toString() );
+ double expected[] = { 6, 5, 4, 2, 1 };
+ for( int i = 0; i < 5; ++i ) {
+ ASSERT( c.ok() );
+ ASSERT_EQUALS( expected[ i ], c.currKey().firstElement().number() );
+ c.advance();
+ }
+ ASSERT( !c.ok() );
+ }
+ };
+
+ class Base2 {
+ public:
+ virtual ~Base2() { _c.dropCollection( ns() ); }
+ protected:
+ static const char *ns() { return "unittests.cursortests.Base2"; }
+ DBDirectClient _c;
+ virtual BSONObj idx() const = 0;
+ virtual int direction() const { return 1; }
+ void insert( const BSONObj &o ) {
+ _objs.push_back( o );
+ _c.insert( ns(), o );
+ }
+ void check( const BSONObj &spec ) {
+ {
+ BSONObj keypat = idx();
+ //cout << keypat.toString() << endl;
+ _c.ensureIndex( ns(), idx() );
+ }
+
+ Client::Context ctx( ns() );
+ FieldRangeSet frs( ns(), spec, true );
+ // orphan spec for this test.
+ IndexSpec *idxSpec = new IndexSpec( idx() );
+ boost::shared_ptr< FieldRangeVector > frv( new FieldRangeVector( frs, *idxSpec, direction() ) );
+ scoped_ptr<BtreeCursor> c( BtreeCursor::make( nsdetails( ns() ), 1, nsdetails( ns() )->idx( 1 ), frv, direction() ) );
+ Matcher m( spec );
+ int count = 0;
+ while( c->ok() ) {
+ ASSERT( m.matches( c->current() ) );
+ c->advance();
+ ++count;
+ }
+ int expectedCount = 0;
+ for( vector< BSONObj >::const_iterator i = _objs.begin(); i != _objs.end(); ++i ) {
+ if ( m.matches( *i ) ) {
+ ++expectedCount;
+ }
+ }
+ ASSERT_EQUALS( expectedCount, count );
+ }
+ private:
+ dblock _lk;
+ vector< BSONObj > _objs;
+ };
+
+ class EqEq : public Base2 {
+ public:
+ void run() {
+ insert( BSON( "a" << 4 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 4 ) );
+ insert( BSON( "a" << 5 << "b" << 4 ) );
+ check( BSON( "a" << 4 << "b" << 5 ) );
+ }
+ virtual BSONObj idx() const { return BSON( "a" << 1 << "b" << 1 ); }
+ };
+
+ class EqRange : public Base2 {
+ public:
+ void run() {
+ insert( BSON( "a" << 3 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 0 ) );
+ insert( BSON( "a" << 4 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 6 ) );
+ insert( BSON( "a" << 4 << "b" << 6 ) );
+ insert( BSON( "a" << 4 << "b" << 10 ) );
+ insert( BSON( "a" << 4 << "b" << 11 ) );
+ insert( BSON( "a" << 5 << "b" << 5 ) );
+ check( BSON( "a" << 4 << "b" << BSON( "$gte" << 1 << "$lte" << 10 ) ) );
+ }
+ virtual BSONObj idx() const { return BSON( "a" << 1 << "b" << 1 ); }
+ };
+
+ class EqIn : public Base2 {
+ public:
+ void run() {
+ insert( BSON( "a" << 3 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 0 ) );
+ insert( BSON( "a" << 4 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 6 ) );
+ insert( BSON( "a" << 4 << "b" << 6 ) );
+ insert( BSON( "a" << 4 << "b" << 10 ) );
+ insert( BSON( "a" << 4 << "b" << 11 ) );
+ insert( BSON( "a" << 5 << "b" << 5 ) );
+ check( BSON( "a" << 4 << "b" << BSON( "$in" << BSON_ARRAY( 5 << 6 << 11 ) ) ) );
+ }
+ virtual BSONObj idx() const { return BSON( "a" << 1 << "b" << 1 ); }
+ };
+
+ class RangeEq : public Base2 {
+ public:
+ void run() {
+ insert( BSON( "a" << 0 << "b" << 4 ) );
+ insert( BSON( "a" << 1 << "b" << 4 ) );
+ insert( BSON( "a" << 4 << "b" << 3 ) );
+ insert( BSON( "a" << 5 << "b" << 4 ) );
+ insert( BSON( "a" << 7 << "b" << 4 ) );
+ insert( BSON( "a" << 4 << "b" << 4 ) );
+ insert( BSON( "a" << 9 << "b" << 6 ) );
+ insert( BSON( "a" << 11 << "b" << 1 ) );
+ insert( BSON( "a" << 11 << "b" << 4 ) );
+ check( BSON( "a" << BSON( "$gte" << 1 << "$lte" << 10 ) << "b" << 4 ) );
+ }
+ virtual BSONObj idx() const { return BSON( "a" << 1 << "b" << 1 ); }
+ };
+
+ class RangeIn : public Base2 {
+ public:
+ void run() {
+ insert( BSON( "a" << 0 << "b" << 4 ) );
+ insert( BSON( "a" << 1 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 3 ) );
+ insert( BSON( "a" << 5 << "b" << 4 ) );
+ insert( BSON( "a" << 7 << "b" << 5 ) );
+ insert( BSON( "a" << 4 << "b" << 4 ) );
+ insert( BSON( "a" << 9 << "b" << 6 ) );
+ insert( BSON( "a" << 11 << "b" << 1 ) );
+ insert( BSON( "a" << 11 << "b" << 4 ) );
+ check( BSON( "a" << BSON( "$gte" << 1 << "$lte" << 10 ) << "b" << BSON( "$in" << BSON_ARRAY( 4 << 6 ) ) ) );
+ }
+ virtual BSONObj idx() const { return BSON( "a" << 1 << "b" << 1 ); }
+ };
+
+ class AbortImplicitScan : public Base {
+ public:
+ void run() {
+ dblock lk;
+ IndexSpec idx( BSON( "a" << 1 << "b" << 1 ) );
+ _c.ensureIndex( ns(), idx.keyPattern );
+ for( int i = 0; i < 300; ++i ) {
+ _c.insert( ns(), BSON( "a" << i << "b" << 5 ) );
+ }
+ FieldRangeSet frs( ns(), BSON( "b" << 3 ), true );
+ boost::shared_ptr<FieldRangeVector> frv( new FieldRangeVector( frs, idx, 1 ) );
+ Client::Context ctx( ns() );
+ scoped_ptr<BtreeCursor> c( BtreeCursor::make( nsdetails( ns() ), 1, nsdetails( ns() )->idx(1), frv, 1 ) );
+ long long initialNscanned = c->nscanned();
+ ASSERT( initialNscanned < 200 );
+ ASSERT( c->ok() );
+ c->advance();
+ ASSERT( c->nscanned() > initialNscanned );
+ ASSERT( c->nscanned() < 200 );
+ ASSERT( c->ok() );
+ }
+ };
+
+ } // namespace BtreeCursorTests
+
+ class All : public Suite {
+ public:
+ All() : Suite( "cursor" ) {}
+
+ void setupTests() {
+ add< BtreeCursorTests::MultiRange >();
+ add< BtreeCursorTests::MultiRangeGap >();
+ add< BtreeCursorTests::MultiRangeReverse >();
+ add< BtreeCursorTests::EqEq >();
+ add< BtreeCursorTests::EqRange >();
+ add< BtreeCursorTests::EqIn >();
+ add< BtreeCursorTests::RangeEq >();
+ add< BtreeCursorTests::RangeIn >();
+ add< BtreeCursorTests::AbortImplicitScan >();
+ }
+ } myall;
+} // namespace CursorTests
diff --git a/src/mongo/dbtests/d_chunk_manager_tests.cpp b/src/mongo/dbtests/d_chunk_manager_tests.cpp
new file mode 100644
index 00000000000..2bcc90faf7a
--- /dev/null
+++ b/src/mongo/dbtests/d_chunk_manager_tests.cpp
@@ -0,0 +1,467 @@
+//@file d_chunk_manager_tests.cpp : s/d_chunk_manager.{h,cpp} tests
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "dbtests.h"
+
+#include "../s/d_chunk_manager.h"
+
+namespace {
+
+ class BasicTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 ) <<
+ "unique" << false );
+
+ // single-chunk collection
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey" <<
+ "ns" << "test.foo" <<
+ "min" << BSON( "a" << MINKEY ) <<
+ "max" << BSON( "a" << MAXKEY ) ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ BSONObj k1 = BSON( "a" << MINKEY );
+ ASSERT( s.belongsToMe( k1 ) );
+ BSONObj k2 = BSON( "a" << MAXKEY );
+ ASSERT( ! s.belongsToMe( k2 ) );
+ BSONObj k3 = BSON( "a" << 1 << "b" << 2 );
+ ASSERT( s.belongsToMe( k3 ) );
+ }
+ };
+
+ class BasicCompoundTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 << "b" << 1) <<
+ "unique" << false );
+
+ // single-chunk collection
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKeyb_MinKey" <<
+ "ns" << "test.foo" <<
+ "min" << BSON( "a" << MINKEY << "b" << MINKEY ) <<
+ "max" << BSON( "a" << MAXKEY << "b" << MAXKEY ) ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ BSONObj k1 = BSON( "a" << MINKEY << "b" << MINKEY );
+ ASSERT( s.belongsToMe( k1 ) );
+ BSONObj k2 = BSON( "a" << MAXKEY << "b" << MAXKEY );
+ ASSERT( ! s.belongsToMe( k2 ) );
+ BSONObj k3 = BSON( "a" << MINKEY << "b" << 10 );
+ ASSERT( s.belongsToMe( k3 ) );
+ BSONObj k4 = BSON( "a" << 10 << "b" << 20 );
+ ASSERT( s.belongsToMe( k4 ) );
+ }
+ };
+
+ class RangeTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "x.y" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 ) <<
+ "unique" << false );
+
+ // 3-chunk collection, 2 of them being contiguous
+ // [min->10) , [10->20) , <gap> , [30->max)
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "x.y-a_MinKey" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << MINKEY ) <<
+ "max" << BSON( "a" << 10 ) ) <<
+ BSON( "_id" << "x.y-a_10" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 10 ) <<
+ "max" << BSON( "a" << 20 ) ) <<
+ BSON( "_id" << "x.y-a_30" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 30 ) <<
+ "max" << BSON( "a" << MAXKEY ) ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ BSONObj k1 = BSON( "a" << 5 );
+ ASSERT( s.belongsToMe( k1 ) );
+ BSONObj k2 = BSON( "a" << 10 );
+ ASSERT( s.belongsToMe( k2 ) );
+ BSONObj k3 = BSON( "a" << 25 );
+ ASSERT( ! s.belongsToMe( k3 ) );
+ BSONObj k4 = BSON( "a" << 30 );
+ ASSERT( s.belongsToMe( k4 ) );
+ BSONObj k5 = BSON( "a" << 40 );
+ ASSERT( s.belongsToMe( k5 ) );
+ }
+ };
+
+ class GetNextTests {
+ public:
+ void run() {
+
+ BSONObj collection = BSON( "_id" << "x.y" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 ) <<
+ "unique" << false );
+ // empty collection
+ BSONArray chunks1 = BSONArray();
+ ShardChunkManager s1( collection , chunks1 );
+
+ BSONObj empty;
+ BSONObj arbitraryKey = BSON( "a" << 10 );
+ BSONObj foundMin, foundMax;
+
+ ASSERT( s1.getNextChunk( empty , &foundMin , &foundMax ) );
+ ASSERT( foundMin.isEmpty() );
+ ASSERT( foundMax.isEmpty() );
+ ASSERT( s1.getNextChunk( arbitraryKey , &foundMin , &foundMax ) );
+ ASSERT( foundMin.isEmpty() );
+ ASSERT( foundMax.isEmpty() );
+
+ // single-chunk collection
+ // [10->20]
+ BSONObj key_a10 = BSON( "a" << 10 );
+ BSONObj key_a20 = BSON( "a" << 20 );
+ BSONArray chunks2 = BSON_ARRAY( BSON( "_id" << "x.y-a_10" <<
+ "ns" << "x.y" <<
+ "min" << key_a10 <<
+ "max" << key_a20 ) );
+ ShardChunkManager s2( collection , chunks2 );
+ ASSERT( s2.getNextChunk( empty , &foundMin , &foundMax ) );
+ ASSERT( foundMin.woCompare( key_a10 ) == 0 );
+ ASSERT( foundMax.woCompare( key_a20 ) == 0 );
+
+ // 3-chunk collection, 2 of them being contiguous
+ // [min->10) , [10->20) , <gap> , [30->max)
+ BSONObj key_a30 = BSON( "a" << 30 );
+ BSONObj key_min = BSON( "a" << MINKEY );
+ BSONObj key_max = BSON( "a" << MAXKEY );
+ BSONArray chunks3 = BSON_ARRAY( BSON( "_id" << "x.y-a_MinKey" <<
+ "ns" << "x.y" <<
+ "min" << key_min <<
+ "max" << key_a10 ) <<
+ BSON( "_id" << "x.y-a_10" <<
+ "ns" << "x.y" <<
+ "min" << key_a10 <<
+ "max" << key_a20 ) <<
+ BSON( "_id" << "x.y-a_30" <<
+ "ns" << "x.y" <<
+ "min" << key_a30 <<
+ "max" << key_max ) );
+ ShardChunkManager s3( collection , chunks3 );
+ ASSERT( ! s3.getNextChunk( empty , &foundMin , &foundMax ) ); // not eof
+ ASSERT( foundMin.woCompare( key_min ) == 0 );
+ ASSERT( foundMax.woCompare( key_a10 ) == 0 );
+ ASSERT( ! s3.getNextChunk( key_a10 , &foundMin , &foundMax ) );
+ ASSERT( foundMin.woCompare( key_a30 ) == 0 );
+ ASSERT( foundMax.woCompare( key_max ) == 0 );
+ ASSERT( s3.getNextChunk( key_a30 , &foundMin , &foundMax ) );
+ }
+ };
+
+ class DeletedTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << "true" );
+
+ BSONArray chunks = BSONArray();
+
+ ASSERT_THROWS( ShardChunkManager s ( collection , chunks ) , UserException );
+ }
+ };
+
+ class ClonePlusTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 << "b" << 1 ) <<
+ "unique" << false );
+ // 1-chunk collection
+ // [10,0-20,0)
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey" <<
+ "ns" << "test.foo" <<
+ "min" << BSON( "a" << 10 << "b" << 0 ) <<
+ "max" << BSON( "a" << 20 << "b" << 0 ) ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ // new chunk [20,0-30,0)
+ BSONObj min = BSON( "a" << 20 << "b" << 0 );
+ BSONObj max = BSON( "a" << 30 << "b" << 0 );
+ ShardChunkManagerPtr cloned( s.clonePlus( min , max , 1 /* TODO test version */ ) );
+
+ BSONObj k1 = BSON( "a" << 5 << "b" << 0 );
+ ASSERT( ! cloned->belongsToMe( k1 ) );
+ BSONObj k2 = BSON( "a" << 20 << "b" << 0 );
+ ASSERT( cloned->belongsToMe( k2 ) );
+ BSONObj k3 = BSON( "a" << 25 << "b" << 0 );
+ ASSERT( cloned->belongsToMe( k3 ) );
+ BSONObj k4 = BSON( "a" << 30 << "b" << 0 );
+ ASSERT( ! cloned->belongsToMe( k4 ) );
+ }
+ };
+
+ class ClonePlusExceptionTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 << "b" << 1 ) <<
+ "unique" << false );
+ // 1-chunk collection
+ // [10,0-20,0)
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey" <<
+ "ns" << "test.foo" <<
+ "min" << BSON( "a" << 10 << "b" << 0 ) <<
+ "max" << BSON( "a" << 20 << "b" << 0 ) ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ // [15,0-25,0) overlaps [10,0-20,0)
+ BSONObj min = BSON( "a" << 15 << "b" << 0 );
+ BSONObj max = BSON( "a" << 25 << "b" << 0 );
+ ASSERT_THROWS( s.clonePlus ( min , max , 1 /* TODO test version */ ) , UserException );
+ }
+ };
+
+ class CloneMinusTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "x.y" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 << "b" << 1 ) <<
+ "unique" << false );
+
+ // 2-chunk collection
+ // [10,0->20,0) , <gap> , [30,0->40,0)
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "x.y-a_10b_0" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 10 << "b" << 0 ) <<
+ "max" << BSON( "a" << 20 << "b" << 0 ) ) <<
+ BSON( "_id" << "x.y-a_30b_0" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 30 << "b" << 0 ) <<
+ "max" << BSON( "a" << 40 << "b" << 0 ) ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ // deleting chunk [10,0-20,0)
+ BSONObj min = BSON( "a" << 10 << "b" << 0 );
+ BSONObj max = BSON( "a" << 20 << "b" << 0 );
+ ShardChunkManagerPtr cloned( s.cloneMinus( min , max , 1 /* TODO test version */ ) );
+
+ BSONObj k1 = BSON( "a" << 5 << "b" << 0 );
+ ASSERT( ! cloned->belongsToMe( k1 ) );
+ BSONObj k2 = BSON( "a" << 15 << "b" << 0 );
+ ASSERT( ! cloned->belongsToMe( k2 ) );
+ BSONObj k3 = BSON( "a" << 30 << "b" << 0 );
+ ASSERT( cloned->belongsToMe( k3 ) );
+ BSONObj k4 = BSON( "a" << 35 << "b" << 0 );
+ ASSERT( cloned->belongsToMe( k4 ) );
+ BSONObj k5 = BSON( "a" << 40 << "b" << 0 );
+ ASSERT( ! cloned->belongsToMe( k5 ) );
+ }
+ };
+
+ class CloneMinusExceptionTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "x.y" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 << "b" << 1 ) <<
+ "unique" << false );
+
+ // 2-chunk collection
+ // [10,0->20,0) , <gap> , [30,0->40,0)
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "x.y-a_10b_0" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 10 << "b" << 0 ) <<
+ "max" << BSON( "a" << 20 << "b" << 0 ) ) <<
+ BSON( "_id" << "x.y-a_30b_0" <<
+ "ns" << "x.y" <<
+ "min" << BSON( "a" << 30 << "b" << 0 ) <<
+ "max" << BSON( "a" << 40 << "b" << 0 ) ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ // deleting non-existing chunk [25,0-28,0)
+ BSONObj min1 = BSON( "a" << 25 << "b" << 0 );
+ BSONObj max1 = BSON( "a" << 28 << "b" << 0 );
+ ASSERT_THROWS( s.cloneMinus( min1 , max1 , 1 /* TODO test version */ ) , UserException );
+
+
+ // deletin an overlapping range (not exactly a chunk) [15,0-25,0)
+ BSONObj min2 = BSON( "a" << 15 << "b" << 0 );
+ BSONObj max2 = BSON( "a" << 25 << "b" << 0 );
+ ASSERT_THROWS( s.cloneMinus( min2 , max2 , 1 /* TODO test version */ ) , UserException );
+ }
+ };
+
+ class CloneSplitTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 << "b" << 1 ) <<
+ "unique" << false );
+ // 1-chunk collection
+ // [10,0-20,0)
+ BSONObj min = BSON( "a" << 10 << "b" << 0 );
+ BSONObj max = BSON( "a" << 20 << "b" << 0 );
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey"
+ << "ns" << "test.foo"
+ << "min" << min
+ << "max" << max ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ BSONObj split1 = BSON( "a" << 15 << "b" << 0 );
+ BSONObj split2 = BSON( "a" << 18 << "b" << 0 );
+ vector<BSONObj> splitKeys;
+ splitKeys.push_back( split1 );
+ splitKeys.push_back( split2 );
+ ShardChunkVersion version( 1 , 99 ); // first chunk 1|99 , second 1|100
+ ShardChunkManagerPtr cloned( s.cloneSplit( min , max , splitKeys , version ) );
+
+ version.incMinor(); /* second chunk 1|100, first split point */
+ version.incMinor(); /* third chunk 1|101, second split point */
+ ASSERT_EQUALS( cloned->getVersion() , version /* 1|101 */ );
+ ASSERT_EQUALS( s.getNumChunks() , 1u );
+ ASSERT_EQUALS( cloned->getNumChunks() , 3u );
+ ASSERT( cloned->belongsToMe( min ) );
+ ASSERT( cloned->belongsToMe( split1 ) );
+ ASSERT( cloned->belongsToMe( split2 ) );
+ ASSERT( ! cloned->belongsToMe( max ) );
+ }
+ };
+
+ class CloneSplitExceptionTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 << "b" << 1 ) <<
+ "unique" << false );
+ // 1-chunk collection
+ // [10,0-20,0)
+ BSONObj min = BSON( "a" << 10 << "b" << 0 );
+ BSONObj max = BSON( "a" << 20 << "b" << 0 );
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_MinKey"
+ << "ns" << "test.foo"
+ << "min" << min
+ << "max" << max ) );
+
+ ShardChunkManager s ( collection , chunks );
+
+ BSONObj badSplit = BSON( "a" << 5 << "b" << 0 );
+ vector<BSONObj> splitKeys;
+ splitKeys.push_back( badSplit );
+ ASSERT_THROWS( s.cloneSplit( min , max , splitKeys , ShardChunkVersion( 1 ) ) , UserException );
+
+ BSONObj badMax = BSON( "a" << 25 << "b" << 0 );
+ BSONObj split = BSON( "a" << 15 << "b" << 0 );
+ splitKeys.clear();
+ splitKeys.push_back( split );
+ ASSERT_THROWS( s.cloneSplit( min , badMax, splitKeys , ShardChunkVersion( 1 ) ) , UserException );
+ }
+ };
+
+ class EmptyShardTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 ) <<
+ "unique" << false );
+
+ // no chunks on this shard
+ BSONArray chunks;
+
+ // shard can have zero chunks for an existing collection
+ // version should be 0, though
+ ShardChunkManager s( collection , chunks );
+ ASSERT_EQUALS( s.getVersion() , ShardChunkVersion( 0 ) );
+ ASSERT_EQUALS( s.getNumChunks() , 0u );
+ }
+ };
+
+ class LastChunkTests {
+ public:
+ void run() {
+ BSONObj collection = BSON( "_id" << "test.foo" <<
+ "dropped" << false <<
+ "key" << BSON( "a" << 1 ) <<
+ "unique" << false );
+
+ // 1-chunk collection
+ // [10->20)
+ BSONArray chunks = BSON_ARRAY( BSON( "_id" << "test.foo-a_10" <<
+ "ns" << "test.foo" <<
+ "min" << BSON( "a" << 10 ) <<
+ "max" << BSON( "a" << 20 ) ) );
+
+ ShardChunkManager s( collection , chunks );
+ BSONObj min = BSON( "a" << 10 );
+ BSONObj max = BSON( "a" << 20 );
+
+ // if we remove the only chunk, the only version accepted is 0
+ ShardChunkVersion nonZero = 99;
+ ASSERT_THROWS( s.cloneMinus( min , max , nonZero ) , UserException );
+ ShardChunkManagerPtr empty( s.cloneMinus( min , max , 0 ) );
+ ASSERT_EQUALS( empty->getVersion() , ShardChunkVersion( 0 ) );
+ ASSERT_EQUALS( empty->getNumChunks() , 0u );
+ BSONObj k = BSON( "a" << 15 << "b" << 0 );
+ ASSERT( ! empty->belongsToMe( k ) );
+
+ // we can add a chunk to an empty manager
+ // version should be provided
+ ASSERT_THROWS( empty->clonePlus( min , max , 0 ) , UserException );
+ ShardChunkManagerPtr cloned( empty->clonePlus( min , max , nonZero ) );
+ ASSERT_EQUALS( cloned->getVersion(), nonZero );
+ ASSERT_EQUALS( cloned->getNumChunks() , 1u );
+ ASSERT( cloned->belongsToMe( k ) );
+ }
+ };
+
+ class ShardChunkManagerSuite : public Suite {
+ public:
+ ShardChunkManagerSuite() : Suite ( "shard_chunk_manager" ) {}
+
+ void setupTests() {
+ add< BasicTests >();
+ add< BasicCompoundTests >();
+ add< RangeTests >();
+ add< GetNextTests >();
+ add< DeletedTests >();
+ add< ClonePlusTests >();
+ add< ClonePlusExceptionTests >();
+ add< CloneMinusTests >();
+ add< CloneMinusExceptionTests >();
+ add< CloneSplitTests >();
+ add< CloneSplitExceptionTests >();
+ add< EmptyShardTests >();
+ add< LastChunkTests >();
+ }
+ } shardChunkManagerSuite;
+
+} // anonymous namespace
diff --git a/src/mongo/dbtests/dbtests.cpp b/src/mongo/dbtests/dbtests.cpp
new file mode 100644
index 00000000000..fde0f669c98
--- /dev/null
+++ b/src/mongo/dbtests/dbtests.cpp
@@ -0,0 +1,29 @@
+// #file dbtests.cpp : Runs db unit tests.
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "dbtests.h"
+#include "../util/unittest.h"
+
+int main( int argc, char** argv ) {
+ static StaticObserver StaticObserver;
+ doPreServerStartupInits();
+ UnitTest::runTests();
+ return Suite::run(argc, argv, "/tmp/unittest");
+}
diff --git a/src/mongo/dbtests/dbtests.h b/src/mongo/dbtests/dbtests.h
new file mode 100644
index 00000000000..dbaeea1d180
--- /dev/null
+++ b/src/mongo/dbtests/dbtests.h
@@ -0,0 +1,25 @@
+// dbtests.h : Test suite generator headers.
+//
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "framework.h"
+
+using namespace mongo;
+using namespace mongo::regression;
+using boost::shared_ptr;
+
diff --git a/src/mongo/dbtests/directclienttests.cpp b/src/mongo/dbtests/directclienttests.cpp
new file mode 100644
index 00000000000..860eb7e7e5c
--- /dev/null
+++ b/src/mongo/dbtests/directclienttests.cpp
@@ -0,0 +1,103 @@
+/** @file directclienttests.cpp
+*/
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/ops/query.h"
+#include "../db/db.h"
+#include "../db/instance.h"
+#include "../db/json.h"
+#include "../db/lasterror.h"
+#include "../db/ops/update.h"
+#include "../util/timer.h"
+#include "dbtests.h"
+
+namespace DirectClientTests {
+
+ class ClientBase {
+ public:
+ // NOTE: Not bothering to backup the old error record.
+ ClientBase() { mongo::lastError.reset( new LastError() ); }
+ virtual ~ClientBase() { }
+ protected:
+ static bool error() {
+ return !_client.getPrevError().getField( "err" ).isNull();
+ }
+ DBDirectClient &client() const { return _client; }
+ private:
+ static DBDirectClient _client;
+ };
+ DBDirectClient ClientBase::_client;
+
+ const char *ns = "a.b";
+
+ class Capped : public ClientBase {
+ public:
+ virtual void run() {
+ for( int pass=0; pass < 3; pass++ ) {
+ client().createCollection(ns, 1024 * 1024, true, 999);
+ for( int j =0; j < pass*3; j++ )
+ client().insert(ns, BSON("x" << j));
+
+ // test truncation of a capped collection
+ if( pass ) {
+ BSONObj info;
+ BSONObj cmd = BSON( "captrunc" << "b" << "n" << 1 << "inc" << true );
+ //cout << cmd.toString() << endl;
+ bool ok = client().runCommand("a", cmd, info);
+ //cout << info.toString() << endl;
+ assert(ok);
+ }
+
+ assert( client().dropCollection(ns) );
+ }
+ }
+ };
+
+ class InsertMany : ClientBase {
+ public:
+ virtual void run(){
+ vector<BSONObj> objs;
+ objs.push_back(BSON("_id" << 1));
+ objs.push_back(BSON("_id" << 1));
+ objs.push_back(BSON("_id" << 2));
+
+
+ client().dropCollection(ns);
+ client().insert(ns, objs);
+ ASSERT_EQUALS(client().getLastErrorDetailed()["code"].numberInt(), 11000);
+ ASSERT_EQUALS((int)client().count(ns), 1);
+
+ client().dropCollection(ns);
+ client().insert(ns, objs, InsertOption_ContinueOnError);
+ ASSERT_EQUALS(client().getLastErrorDetailed()["code"].numberInt(), 11000);
+ ASSERT_EQUALS((int)client().count(ns), 2);
+ }
+
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "directclient" ) {
+ }
+ void setupTests() {
+ add< Capped >();
+ add< InsertMany >();
+ }
+ } myall;
+}
diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp
new file mode 100644
index 00000000000..95ed8b33668
--- /dev/null
+++ b/src/mongo/dbtests/framework.cpp
@@ -0,0 +1,446 @@
+// framework.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../util/version.h"
+#include <boost/program_options.hpp>
+
+#undef assert
+#define assert MONGO_assert
+
+#include "framework.h"
+#include "../util/file_allocator.h"
+#include "../db/dur.h"
+#include "../util/background.h"
+
+#ifndef _WIN32
+#include <cxxabi.h>
+#include <sys/file.h>
+#endif
+
+namespace po = boost::program_options;
+
+namespace mongo {
+
+ CmdLine cmdLine;
+
+ namespace regression {
+
+ map<string,Suite*> * mongo::regression::Suite::_suites = 0;
+
+ class Result {
+ public:
+ Result( string name ) : _name( name ) , _rc(0) , _tests(0) , _fails(0) , _asserts(0) {
+ }
+
+ string toString() {
+ stringstream ss;
+
+ char result[128];
+ sprintf(result, "%-20s | tests: %4d | fails: %4d | assert calls: %6d\n", _name.c_str(), _tests, _fails, _asserts);
+ ss << result;
+
+ for ( list<string>::iterator i=_messages.begin(); i!=_messages.end(); i++ ) {
+ ss << "\t" << *i << '\n';
+ }
+
+ return ss.str();
+ }
+
+ int rc() {
+ return _rc;
+ }
+
+ string _name;
+
+ int _rc;
+ int _tests;
+ int _fails;
+ int _asserts;
+ list<string> _messages;
+
+ static Result * cur;
+ };
+
+ Result * Result::cur = 0;
+
+ int minutesRunning = 0; // reset to 0 each time a new test starts
+ mutex minutesRunningMutex("minutesRunningMutex");
+ string currentTestName;
+
+ Result * Suite::run( const string& filter ) {
+ // set tlogLevel to -1 to suppress tlog() output in a test program
+ tlogLevel = -1;
+
+ log(1) << "\t about to setupTests" << endl;
+ setupTests();
+ log(1) << "\t done setupTests" << endl;
+
+ Result * r = new Result( _name );
+ Result::cur = r;
+
+ /* see note in SavedContext */
+ //writelock lk("");
+
+ for ( list<TestCase*>::iterator i=_tests.begin(); i!=_tests.end(); i++ ) {
+ TestCase * tc = *i;
+ if ( filter.size() && tc->getName().find( filter ) == string::npos ) {
+ log(1) << "\t skipping test: " << tc->getName() << " because doesn't match filter" << endl;
+ continue;
+ }
+
+ r->_tests++;
+
+ bool passes = false;
+
+ log(1) << "\t going to run test: " << tc->getName() << endl;
+
+ stringstream err;
+ err << tc->getName() << "\t";
+
+ {
+ scoped_lock lk(minutesRunningMutex);
+ minutesRunning = 0;
+ currentTestName = tc->getName();
+ }
+
+ try {
+ tc->run();
+ passes = true;
+ }
+ catch ( MyAssertionException * ae ) {
+ err << ae->ss.str();
+ delete( ae );
+ }
+ catch ( std::exception& e ) {
+ err << " exception: " << e.what();
+ }
+ catch ( int x ) {
+ err << " caught int : " << x << endl;
+ }
+ catch ( ... ) {
+ cerr << "unknown exception in test: " << tc->getName() << endl;
+ }
+
+ if ( ! passes ) {
+ string s = err.str();
+ log() << "FAIL: " << s << endl;
+ r->_fails++;
+ r->_messages.push_back( s );
+ }
+ }
+
+ if ( r->_fails )
+ r->_rc = 17;
+
+ log(1) << "\t DONE running tests" << endl;
+
+ return r;
+ }
+
+ void show_help_text(const char* name, po::options_description options) {
+ cout << "usage: " << name << " [options] [suite]..." << endl
+ << options << "suite: run the specified test suite(s) only" << endl;
+ }
+
+ class TestWatchDog : public BackgroundJob {
+ public:
+ virtual string name() const { return "TestWatchDog"; }
+ virtual void run(){
+
+ while (true) {
+ sleepsecs(60);
+
+ scoped_lock lk(minutesRunningMutex);
+ minutesRunning++; //reset to 0 when new test starts
+
+ if (minutesRunning > 30){
+ log() << currentTestName << " has been running for more than 30 minutes. aborting." << endl;
+ ::abort();
+ }
+ else if (minutesRunning > 1){
+ warning() << currentTestName << " has been running for more than " << minutesRunning-1 << " minutes." << endl;
+ }
+ }
+ }
+ };
+
+ unsigned perfHist = 1;
+
+ int Suite::run( int argc , char** argv , string default_dbpath ) {
+ unsigned long long seed = time( 0 );
+ string dbpathSpec;
+
+ po::options_description shell_options("options");
+ po::options_description hidden_options("Hidden options");
+ po::options_description cmdline_options("Command line options");
+ po::positional_options_description positional_options;
+
+ shell_options.add_options()
+ ("help,h", "show this usage information")
+ ("dbpath", po::value<string>(&dbpathSpec)->default_value(default_dbpath),
+ "db data path for this test run. NOTE: the contents of this "
+ "directory will be overwritten if it already exists")
+ ("debug", "run tests with verbose output")
+ ("list,l", "list available test suites")
+ ("bigfiles", "use big datafiles instead of smallfiles which is the default")
+ ("filter,f" , po::value<string>() , "string substring filter on test name" )
+ ("verbose,v", "verbose")
+ ("dur", "enable journaling")
+ ("nodur", "disable journaling (currently the default)")
+ ("seed", po::value<unsigned long long>(&seed), "random number seed")
+ ("perfHist", po::value<unsigned>(&perfHist), "number of back runs of perf stats to display")
+ ;
+
+ hidden_options.add_options()
+ ("suites", po::value< vector<string> >(), "test suites to run")
+ ("nopreallocj", "disable journal prealloc")
+ ;
+
+ positional_options.add("suites", -1);
+
+ cmdline_options.add(shell_options).add(hidden_options);
+
+ po::variables_map params;
+ int command_line_style = (((po::command_line_style::unix_style ^
+ po::command_line_style::allow_guessing) |
+ po::command_line_style::allow_long_disguise) ^
+ po::command_line_style::allow_sticky);
+
+ try {
+ po::store(po::command_line_parser(argc, argv).options(cmdline_options).
+ positional(positional_options).
+ style(command_line_style).run(), params);
+ po::notify(params);
+ }
+ catch (po::error &e) {
+ cout << "ERROR: " << e.what() << endl << endl;
+ show_help_text(argv[0], shell_options);
+ return EXIT_BADOPTIONS;
+ }
+
+ if (params.count("help")) {
+ show_help_text(argv[0], shell_options);
+ return EXIT_CLEAN;
+ }
+
+ bool nodur = false;
+ if( params.count("nodur") ) {
+ nodur = true;
+ cmdLine.dur = false;
+ }
+ if( params.count("dur") || cmdLine.dur ) {
+ cmdLine.dur = true;
+ }
+
+ if( params.count("nopreallocj") ) {
+ cmdLine.preallocj = false;
+ }
+
+ if (params.count("debug") || params.count("verbose") ) {
+ logLevel = 1;
+ }
+
+ if (params.count("list")) {
+ for ( map<string,Suite*>::iterator i = _suites->begin() ; i != _suites->end(); i++ )
+ cout << i->first << endl;
+ return 0;
+ }
+
+ boost::filesystem::path p(dbpathSpec);
+
+ /* remove the contents of the test directory if it exists. */
+ if (boost::filesystem::exists(p)) {
+ if (!boost::filesystem::is_directory(p)) {
+ cout << "ERROR: path \"" << p.string() << "\" is not a directory" << endl << endl;
+ show_help_text(argv[0], shell_options);
+ return EXIT_BADOPTIONS;
+ }
+ boost::filesystem::directory_iterator end_iter;
+ for (boost::filesystem::directory_iterator dir_iter(p);
+ dir_iter != end_iter; ++dir_iter) {
+ boost::filesystem::remove_all(*dir_iter);
+ }
+ }
+ else {
+ boost::filesystem::create_directory(p);
+ }
+
+ string dbpathString = p.native_directory_string();
+ dbpath = dbpathString.c_str();
+
+ cmdLine.prealloc = false;
+
+ // dbtest defaults to smallfiles
+ cmdLine.smallfiles = true;
+ if( params.count("bigfiles") ) {
+ cmdLine.dur = true;
+ }
+
+ cmdLine.oplogSize = 10 * 1024 * 1024;
+ Client::initThread("testsuite");
+ acquirePathLock();
+
+ srand( (unsigned) seed );
+ printGitVersion();
+ printSysInfo();
+ DEV log() << "_DEBUG build" << endl;
+ if( sizeof(void*)==4 )
+ log() << "32bit" << endl;
+ log() << "random seed: " << seed << endl;
+
+ if( time(0) % 3 == 0 && !nodur ) {
+ cmdLine.dur = true;
+ log() << "****************" << endl;
+ log() << "running with journaling enabled to test that. dbtests will do this occasionally even if --dur is not specified." << endl;
+ log() << "****************" << endl;
+ }
+
+ FileAllocator::get()->start();
+
+ vector<string> suites;
+ if (params.count("suites")) {
+ suites = params["suites"].as< vector<string> >();
+ }
+
+ string filter = "";
+ if ( params.count( "filter" ) ) {
+ filter = params["filter"].as<string>();
+ }
+
+ dur::startup();
+
+ if( debug && cmdLine.dur ) {
+ log() << "_DEBUG: automatically enabling cmdLine.durOptions=8 (DurParanoid)" << endl;
+ // this was commented out. why too slow or something? :
+ cmdLine.durOptions |= 8;
+ }
+
+ TestWatchDog twd;
+ twd.go();
+
+ int ret = run(suites,filter);
+
+#if !defined(_WIN32) && !defined(__sunos__)
+ flock( lockFile, LOCK_UN );
+#endif
+
+ cc().shutdown();
+ dbexit( (ExitCode)ret ); // so everything shuts down cleanly
+ return ret;
+ }
+
+ int Suite::run( vector<string> suites , const string& filter ) {
+ for ( unsigned int i = 0; i < suites.size(); i++ ) {
+ if ( _suites->find( suites[i] ) == _suites->end() ) {
+ cout << "invalid test [" << suites[i] << "], use --list to see valid names" << endl;
+ return -1;
+ }
+ }
+
+ list<string> torun(suites.begin(), suites.end());
+
+ if ( torun.size() == 0 )
+ for ( map<string,Suite*>::iterator i=_suites->begin() ; i!=_suites->end(); i++ )
+ torun.push_back( i->first );
+
+ list<Result*> results;
+
+ for ( list<string>::iterator i=torun.begin(); i!=torun.end(); i++ ) {
+ string name = *i;
+ Suite * s = (*_suites)[name];
+ assert( s );
+
+ log() << "going to run suite: " << name << endl;
+ results.push_back( s->run( filter ) );
+ }
+
+ Logstream::get().flush();
+
+ cout << "**************************************************" << endl;
+
+ int rc = 0;
+
+ int tests = 0;
+ int fails = 0;
+ int asserts = 0;
+
+ for ( list<Result*>::iterator i=results.begin(); i!=results.end(); i++ ) {
+ Result * r = *i;
+ cout << r->toString();
+ if ( abs( r->rc() ) > abs( rc ) )
+ rc = r->rc();
+
+ tests += r->_tests;
+ fails += r->_fails;
+ asserts += r->_asserts;
+ }
+
+ Result totals ("TOTALS");
+ totals._tests = tests;
+ totals._fails = fails;
+ totals._asserts = asserts;
+
+ cout << totals.toString(); // includes endl
+
+ return rc;
+ }
+
+ void Suite::registerSuite( string name , Suite * s ) {
+ if ( ! _suites )
+ _suites = new map<string,Suite*>();
+ Suite*& m = (*_suites)[name];
+ uassert( 10162 , "already have suite with that name" , ! m );
+ m = s;
+ }
+
+ void assert_pass() {
+ Result::cur->_asserts++;
+ }
+
+ void assert_fail( const char * exp , const char * file , unsigned line ) {
+ Result::cur->_asserts++;
+
+ MyAssertionException * e = new MyAssertionException();
+ e->ss << "ASSERT FAILED! " << file << ":" << line << endl;
+ throw e;
+ }
+
+ void fail( const char * exp , const char * file , unsigned line ) {
+ assert(0);
+ }
+
+ MyAssertionException * MyAsserts::getBase() {
+ MyAssertionException * e = new MyAssertionException();
+ e->ss << _file << ":" << _line << " " << _aexp << " != " << _bexp << " ";
+ return e;
+ }
+
+ void MyAsserts::printLocation() {
+ log() << _file << ":" << _line << " " << _aexp << " != " << _bexp << " ";
+ }
+
+ void MyAsserts::_gotAssert() {
+ Result::cur->_asserts++;
+ }
+
+ }
+
+ void setupSignals( bool inFork ) {}
+
+}
diff --git a/src/mongo/dbtests/framework.h b/src/mongo/dbtests/framework.h
new file mode 100644
index 00000000000..adf610a05eb
--- /dev/null
+++ b/src/mongo/dbtests/framework.h
@@ -0,0 +1,199 @@
+// framework.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/*
+
+ simple portable regression system
+ */
+
+#include "../pch.h"
+
+#define ASSERT_THROWS(a,b) \
+ try { \
+ a; \
+ mongo::regression::assert_fail( #a , __FILE__ , __LINE__ ); \
+ } catch ( b& ){ \
+ mongo::regression::assert_pass(); \
+ }
+
+
+
+#define ASSERT_EQUALS(a,b) (mongo::regression::MyAsserts( #a , #b , __FILE__ , __LINE__ ) ).ae( (a) , (b) )
+#define ASSERT_NOT_EQUALS(a,b) (mongo::regression::MyAsserts( #a , #b , __FILE__ , __LINE__ ) ).nae( (a) , (b) )
+
+#define ASSERT(x) (void)( (!(!(x))) ? mongo::regression::assert_pass() : mongo::regression::assert_fail( #x , __FILE__ , __LINE__ ) )
+#define FAIL(x) mongo::regression::fail( #x , __FILE__ , __LINE__ )
+
+#include "../db/instance.h"
+
+namespace mongo {
+
+ namespace regression {
+
+ class Result;
+
+ class TestCase {
+ public:
+ virtual ~TestCase() {}
+ virtual void run() = 0;
+ virtual string getName() = 0;
+ };
+
+ template< class T >
+ class TestHolderBase : public TestCase {
+ public:
+ TestHolderBase() {}
+ virtual ~TestHolderBase() {}
+ virtual void run() {
+ auto_ptr<T> t;
+ t.reset( create() );
+ t->run();
+ }
+ virtual T * create() = 0;
+ virtual string getName() {
+ return demangleName( typeid(T) );
+ }
+ };
+
+ template< class T >
+ class TestHolder0 : public TestHolderBase<T> {
+ public:
+ virtual T * create() {
+ return new T();
+ }
+ };
+
+ template< class T , typename A >
+ class TestHolder1 : public TestHolderBase<T> {
+ public:
+ TestHolder1( const A& a ) : _a(a) {}
+ virtual T * create() {
+ return new T( _a );
+ }
+ const A& _a;
+ };
+
+ class Suite {
+ public:
+ Suite( string name ) : _name( name ) {
+ registerSuite( name , this );
+ _ran = 0;
+ }
+
+ virtual ~Suite() {
+ if ( _ran ) {
+ DBDirectClient c;
+ c.dropDatabase( "unittests" );
+ }
+ }
+
+ template<class T>
+ void add() {
+ _tests.push_back( new TestHolder0<T>() );
+ }
+
+ template<class T , typename A >
+ void add( const A& a ) {
+ _tests.push_back( new TestHolder1<T,A>(a) );
+ }
+
+ Result * run( const string& filter );
+
+ static int run( vector<string> suites , const string& filter );
+ static int run( int argc , char ** argv , string default_dbpath );
+
+
+ protected:
+ virtual void setupTests() = 0;
+
+ private:
+ string _name;
+ list<TestCase*> _tests;
+ bool _ran;
+
+ static map<string,Suite*> * _suites;
+
+ void registerSuite( string name , Suite * s );
+ };
+
+ void assert_pass();
+ void assert_fail( const char * exp , const char * file , unsigned line );
+ void fail( const char * exp , const char * file , unsigned line );
+
+ class MyAssertionException : boost::noncopyable {
+ public:
+ MyAssertionException() {
+ ss << "assertion: ";
+ }
+ stringstream ss;
+ };
+
+
+
+ class MyAsserts {
+ public:
+ MyAsserts( const char * aexp , const char * bexp , const char * file , unsigned line )
+ : _aexp( aexp ) , _bexp( bexp ) , _file( file ) , _line( line ) {
+
+ }
+
+ template<typename A,typename B>
+ void ae( A a , B b ) {
+ _gotAssert();
+ if ( a == b )
+ return;
+
+ printLocation();
+
+ MyAssertionException * e = getBase();
+ e->ss << a << " != " << b << endl;
+ log() << e->ss.str() << endl;
+ throw e;
+ }
+
+ template<typename A,typename B>
+ void nae( A a , B b ) {
+ _gotAssert();
+ if ( a != b )
+ return;
+
+ printLocation();
+
+ MyAssertionException * e = getBase();
+ e->ss << a << " == " << b << endl;
+ log() << e->ss.str() << endl;
+ throw e;
+ }
+
+
+ void printLocation();
+
+ private:
+
+ void _gotAssert();
+
+ MyAssertionException * getBase();
+
+ string _aexp;
+ string _bexp;
+ string _file;
+ unsigned _line;
+ };
+
+ }
+}
diff --git a/src/mongo/dbtests/histogram_test.cpp b/src/mongo/dbtests/histogram_test.cpp
new file mode 100644
index 00000000000..e9cbb5bdf25
--- /dev/null
+++ b/src/mongo/dbtests/histogram_test.cpp
@@ -0,0 +1,94 @@
+// histogramtests.cpp : histogram.{h,cpp} unit tests
+
+/**
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "../pch.h"
+
+#include "dbtests.h"
+#include "../util/histogram.h"
+
+namespace mongo {
+
+ using mongo::Histogram;
+
+ class BoundariesInit {
+ public:
+ void run() {
+ Histogram::Options opts;
+ opts.numBuckets = 3;
+ opts.bucketSize = 10;
+ Histogram h( opts );
+
+ ASSERT_EQUALS( h.getBucketsNum(), 3u );
+
+ ASSERT_EQUALS( h.getCount( 0 ), 0u );
+ ASSERT_EQUALS( h.getCount( 1 ), 0u );
+ ASSERT_EQUALS( h.getCount( 2 ), 0u );
+
+ ASSERT_EQUALS( h.getBoundary( 0 ), 10u );
+ ASSERT_EQUALS( h.getBoundary( 1 ), 20u );
+ ASSERT_EQUALS( h.getBoundary( 2 ), numeric_limits<uint32_t>::max() );
+ }
+ };
+
+ class BoundariesExponential {
+ public:
+ void run() {
+ Histogram::Options opts;
+ opts.numBuckets = 4;
+ opts.bucketSize = 125;
+ opts.exponential = true;
+ Histogram h( opts );
+
+ ASSERT_EQUALS( h.getBoundary( 0 ), 125u );
+ ASSERT_EQUALS( h.getBoundary( 1 ), 250u );
+ ASSERT_EQUALS( h.getBoundary( 2 ), 500u );
+ ASSERT_EQUALS( h.getBoundary( 3 ), numeric_limits<uint32_t>::max() );
+ }
+ };
+
+ class BoundariesFind {
+ public:
+ void run() {
+ Histogram::Options opts;
+ opts.numBuckets = 3;
+ opts.bucketSize = 10;
+ Histogram h( opts );
+
+ h.insert( 10 ); // end of first bucket
+ h.insert( 15 ); // second bucket
+ h.insert( 18 ); // second bucket
+
+ ASSERT_EQUALS( h.getCount( 0 ), 1u );
+ ASSERT_EQUALS( h.getCount( 1 ), 2u );
+ ASSERT_EQUALS( h.getCount( 2 ), 0u );
+ }
+ };
+
+ class HistogramSuite : public Suite {
+ public:
+ HistogramSuite() : Suite( "histogram" ) {}
+
+ void setupTests() {
+ add< BoundariesInit >();
+ add< BoundariesExponential >();
+ add< BoundariesFind >();
+ // TODO: complete the test suite
+ }
+ } histogramSuite;
+
+} // anonymous namespace
diff --git a/src/mongo/dbtests/jsobjtests.cpp b/src/mongo/dbtests/jsobjtests.cpp
new file mode 100644
index 00000000000..709c013f6d8
--- /dev/null
+++ b/src/mongo/dbtests/jsobjtests.cpp
@@ -0,0 +1,2208 @@
+// jsobjtests.cpp - Tests for jsobj.{h,cpp} code
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../bson/util/builder.h"
+#include "../db/jsobj.h"
+#include "../db/jsobjmanipulator.h"
+#include "../db/json.h"
+#include "../db/repl.h"
+#include "../db/extsort.h"
+#include "dbtests.h"
+#include "../util/mongoutils/checksum.h"
+#include "../db/key.h"
+#include "../db/btree.h"
+
+namespace JsobjTests {
+
+ IndexInterface& indexInterfaceForTheseTests = (time(0)%2) ? *IndexDetails::iis[0] : *IndexDetails::iis[1];
+
+ void keyTest(const BSONObj& o, bool mustBeCompact = false) {
+ static KeyV1Owned *kLast;
+ static BSONObj last;
+
+ KeyV1Owned *key = new KeyV1Owned(o);
+ KeyV1Owned& k = *key;
+
+ ASSERT( !mustBeCompact || k.isCompactFormat() );
+
+ BSONObj x = k.toBson();
+ int res = o.woCompare(x, BSONObj(), /*considerfieldname*/false);
+ if( res ) {
+ cout << o.toString() << endl;
+ k.toBson();
+ cout << x.toString() << endl;
+ o.woCompare(x, BSONObj(), /*considerfieldname*/false);
+ ASSERT( res == 0 );
+ }
+ ASSERT( k.woEqual(k) );
+ ASSERT( !k.isCompactFormat() || k.dataSize() < o.objsize() );
+
+ {
+ // check BSONObj::equal. this part not a KeyV1 test.
+ int res = o.woCompare(last);
+ ASSERT( (res==0) == o.equal(last) );
+ }
+
+ if( kLast ) {
+ int r1 = o.woCompare(last, BSONObj(), false);
+ int r2 = k.woCompare(*kLast, Ordering::make(BSONObj()));
+ bool ok = (r1<0 && r2<0) || (r1>0&&r2>0) || r1==r2;
+ if( !ok ) {
+ cout << "r1r2 " << r1 << ' ' << r2 << endl;
+ cout << "o:" << o.toString() << endl;
+ cout << "last:" << last.toString() << endl;
+ cout << "k:" << k.toString() << endl;
+ cout << "kLast:" << kLast->toString() << endl;
+ int r3 = k.woCompare(*kLast, Ordering::make(BSONObj()));
+ cout << r3 << endl;
+ }
+ ASSERT(ok);
+ if( k.isCompactFormat() && kLast->isCompactFormat() ) { // only check if not bson as bson woEqual is broken! (or was may2011)
+ if( k.woEqual(*kLast) != (r2 == 0) ) { // check woEqual matches
+ cout << r2 << endl;
+ cout << k.toString() << endl;
+ cout << kLast->toString() << endl;
+ k.woEqual(*kLast);
+ ASSERT(false);
+ }
+ }
+ }
+
+ delete kLast;
+ kLast = key;
+ last = o.getOwned();
+ }
+
+ class BufBuilderBasic {
+ public:
+ void run() {
+ {
+ BufBuilder b( 0 );
+ b.appendStr( "foo" );
+ ASSERT_EQUALS( 4, b.len() );
+ ASSERT( strcmp( "foo", b.buf() ) == 0 );
+ }
+ {
+ mongo::StackBufBuilder b;
+ b.appendStr( "foo" );
+ ASSERT_EQUALS( 4, b.len() );
+ ASSERT( strcmp( "foo", b.buf() ) == 0 );
+ }
+ }
+ };
+
+ class BSONElementBasic {
+ public:
+ void run() {
+ ASSERT_EQUALS( 1, BSONElement().size() );
+
+ BSONObj x;
+ ASSERT_EQUALS( 1, x.firstElement().size() );
+ }
+ };
+
+ namespace BSONObjTests {
+ class Create {
+ public:
+ void run() {
+ BSONObj b;
+ ASSERT_EQUALS( 0, b.nFields() );
+ }
+ };
+
+ class Base {
+ protected:
+ static BSONObj basic( const char *name, int val ) {
+ BSONObjBuilder b;
+ b.append( name, val );
+ return b.obj();
+ }
+ static BSONObj basic( const char *name, vector< int > val ) {
+ BSONObjBuilder b;
+ b.append( name, val );
+ return b.obj();
+ }
+ template< class T >
+ static BSONObj basic( const char *name, T val ) {
+ BSONObjBuilder b;
+ b.append( name, val );
+ return b.obj();
+ }
+ };
+
+ class WoCompareBasic : public Base {
+ public:
+ void run() {
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1 ) ) == 0 );
+ ASSERT( basic( "a", 2 ).woCompare( basic( "a", 1 ) ) > 0 );
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 2 ) ) < 0 );
+ // field name comparison
+ ASSERT( basic( "a", 1 ).woCompare( basic( "b", 1 ) ) < 0 );
+ }
+ };
+
+ class NumericCompareBasic : public Base {
+ public:
+ void run() {
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1.0 ) ) == 0 );
+ }
+ };
+
+ class WoCompareEmbeddedObject : public Base {
+ public:
+ void run() {
+ ASSERT( basic( "a", basic( "b", 1 ) ).woCompare
+ ( basic( "a", basic( "b", 1.0 ) ) ) == 0 );
+ ASSERT( basic( "a", basic( "b", 1 ) ).woCompare
+ ( basic( "a", basic( "b", 2 ) ) ) < 0 );
+ }
+ };
+
+ class WoCompareEmbeddedArray : public Base {
+ public:
+ void run() {
+ vector< int > i;
+ i.push_back( 1 );
+ i.push_back( 2 );
+ vector< double > d;
+ d.push_back( 1 );
+ d.push_back( 2 );
+ ASSERT( basic( "a", i ).woCompare( basic( "a", d ) ) == 0 );
+
+ vector< int > j;
+ j.push_back( 1 );
+ j.push_back( 3 );
+ ASSERT( basic( "a", i ).woCompare( basic( "a", j ) ) < 0 );
+ }
+ };
+
+ class WoCompareOrdered : public Base {
+ public:
+ void run() {
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1 ), basic( "a", 1 ) ) == 0 );
+ ASSERT( basic( "a", 2 ).woCompare( basic( "a", 1 ), basic( "a", 1 ) ) > 0 );
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 2 ), basic( "a", 1 ) ) < 0 );
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 1 ), basic( "a", -1 ) ) == 0 );
+ ASSERT( basic( "a", 2 ).woCompare( basic( "a", 1 ), basic( "a", -1 ) ) < 0 );
+ ASSERT( basic( "a", 1 ).woCompare( basic( "a", 2 ), basic( "a", -1 ) ) > 0 );
+ }
+ };
+
+ class WoCompareDifferentLength : public Base {
+ public:
+ void run() {
+ ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << 1 << "b" << 1 ) ) < 0 );
+ ASSERT( BSON( "a" << 1 << "b" << 1 ).woCompare( BSON( "a" << 1 ) ) > 0 );
+ }
+ };
+
+ class WoSortOrder : public Base {
+ public:
+ void run() {
+ ASSERT( BSON( "a" << 1 ).woSortOrder( BSON( "a" << 2 ), BSON( "b" << 1 << "a" << 1 ) ) < 0 );
+ ASSERT( fromjson( "{a:null}" ).woSortOrder( BSON( "b" << 1 ), BSON( "a" << 1 ) ) == 0 );
+ }
+ };
+
+ class MultiKeySortOrder : public Base {
+ public:
+ void run() {
+ ASSERT( BSON( "x" << "a" ).woCompare( BSON( "x" << "b" ) ) < 0 );
+ ASSERT( BSON( "x" << "b" ).woCompare( BSON( "x" << "a" ) ) > 0 );
+
+ ASSERT( BSON( "x" << "a" << "y" << "a" ).woCompare( BSON( "x" << "a" << "y" << "b" ) ) < 0 );
+ ASSERT( BSON( "x" << "a" << "y" << "a" ).woCompare( BSON( "x" << "b" << "y" << "a" ) ) < 0 );
+ ASSERT( BSON( "x" << "a" << "y" << "a" ).woCompare( BSON( "x" << "b" ) ) < 0 );
+
+ ASSERT( BSON( "x" << "c" ).woCompare( BSON( "x" << "b" << "y" << "h" ) ) > 0 );
+ ASSERT( BSON( "x" << "b" << "y" << "b" ).woCompare( BSON( "x" << "c" ) ) < 0 );
+
+ BSONObj key = BSON( "x" << 1 << "y" << 1 );
+
+ ASSERT( BSON( "x" << "c" ).woSortOrder( BSON( "x" << "b" << "y" << "h" ) , key ) > 0 );
+ ASSERT( BSON( "x" << "b" << "y" << "b" ).woCompare( BSON( "x" << "c" ) , key ) < 0 );
+
+ key = BSON( "" << 1 << "" << 1 );
+
+ ASSERT( BSON( "" << "c" ).woSortOrder( BSON( "" << "b" << "" << "h" ) , key ) > 0 );
+ ASSERT( BSON( "" << "b" << "" << "b" ).woCompare( BSON( "" << "c" ) , key ) < 0 );
+
+ {
+ // test a big key
+ string x(2000, 'z');
+ BSONObj o = BSON( "q" << x );
+ keyTest(o, false);
+ }
+ {
+ string y(200, 'w');
+ BSONObjBuilder b;
+ for( int i = 0; i < 10; i++ ) {
+ b.append("x", y);
+ }
+ keyTest(b.obj(), true);
+ }
+ {
+ double nan = numeric_limits<double>::quiet_NaN();
+ BSONObj o = BSON( "y" << nan );
+ keyTest(o);
+ }
+
+ {
+ BSONObjBuilder b;
+ b.append( "" , "c" );
+ b.appendNull( "" );
+ BSONObj o = b.obj();
+ keyTest(o);
+ ASSERT( o.woSortOrder( BSON( "" << "b" << "" << "h" ) , key ) > 0 );
+ ASSERT( BSON( "" << "b" << "" << "h" ).woSortOrder( o , key ) < 0 );
+
+ }
+
+ ASSERT( BSON( "" << "a" ).woCompare( BSON( "" << "a" << "" << "c" ) ) < 0 );
+ {
+ BSONObjBuilder b;
+ b.append( "" , "a" );
+ b.appendNull( "" );
+ ASSERT( b.obj().woCompare( BSON( "" << "a" << "" << "c" ) ) < 0 ); // SERVER-282
+ }
+
+ }
+ };
+
+ class TimestampTest : public Base {
+ public:
+ void run() {
+ Client *c = currentClient.get();
+ if( c == 0 ) {
+ Client::initThread("pretouchN");
+ c = &cc();
+ }
+ writelock lk(""); // for initTimestamp
+
+ BSONObjBuilder b;
+ b.appendTimestamp( "a" );
+ BSONObj o = b.done();
+ o.toString();
+ ASSERT( o.valid() );
+ ASSERT_EQUALS( Timestamp, o.getField( "a" ).type() );
+ BSONObjIterator i( o );
+ ASSERT( i.moreWithEOO() );
+ ASSERT( i.more() );
+
+ BSONElement e = i.next();
+ ASSERT_EQUALS( Timestamp, e.type() );
+ ASSERT( i.moreWithEOO() );
+ ASSERT( ! i.more() );
+
+ e = i.next();
+ ASSERT( e.eoo() );
+
+ OpTime before = OpTime::now();
+ BSONElementManipulator( o.firstElement() ).initTimestamp();
+ OpTime after = OpTime::now();
+
+ OpTime test = OpTime( o.firstElement().date() );
+ ASSERT( before < test && test < after );
+
+ BSONElementManipulator( o.firstElement() ).initTimestamp();
+ test = OpTime( o.firstElement().date() );
+ ASSERT( before < test && test < after );
+
+ OpTime x(123,456);
+ ASSERT_EQUALS( 528280977864LL , x.asLL() );
+ }
+ };
+
+ class Nan : public Base {
+ public:
+ void run() {
+ double inf = numeric_limits< double >::infinity();
+ double nan = numeric_limits< double >::quiet_NaN();
+ double nan2 = numeric_limits< double >::signaling_NaN();
+ ASSERT( isNaN(nan) );
+ ASSERT( isNaN(nan2) );
+ ASSERT( !isNaN(inf) );
+
+ ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << inf ) ) == 0 );
+ ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << 1 ) ) > 0 );
+ ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << inf ) ) < 0 );
+
+ ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << nan ) ) == 0 );
+ ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << 1 ) ) < 0 );
+
+ ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << 5000000000LL ) ) < 0 );
+
+ {
+ KeyV1Owned a( BSON( "a" << nan ) );
+ KeyV1Owned b( BSON( "a" << 1 ) );
+ Ordering o = Ordering::make(BSON("a"<<1));
+ ASSERT( a.woCompare(b, o) < 0 );
+ }
+
+ ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << nan ) ) > 0 );
+
+ ASSERT( BSON( "a" << nan2 ).woCompare( BSON( "a" << nan2 ) ) == 0 );
+ ASSERT( BSON( "a" << nan2 ).woCompare( BSON( "a" << 1 ) ) < 0 );
+ ASSERT( BSON( "a" << 1 ).woCompare( BSON( "a" << nan2 ) ) > 0 );
+
+ ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << nan ) ) > 0 );
+ ASSERT( BSON( "a" << inf ).woCompare( BSON( "a" << nan2 ) ) > 0 );
+ ASSERT( BSON( "a" << nan ).woCompare( BSON( "a" << nan2 ) ) == 0 );
+ }
+ };
+
+ class AsTempObj {
+ public:
+ void run() {
+ {
+ BSONObjBuilder bb;
+ bb << "a" << 1;
+ BSONObj tmp = bb.asTempObj();
+ ASSERT(tmp.objsize() == 4+(1+2+4)+1);
+ ASSERT(tmp.valid());
+ ASSERT(tmp.hasField("a"));
+ ASSERT(!tmp.hasField("b"));
+ ASSERT(tmp == BSON("a" << 1));
+
+ bb << "b" << 2;
+ BSONObj obj = bb.obj();
+ ASSERT_EQUALS(obj.objsize() , 4+(1+2+4)+(1+2+4)+1);
+ ASSERT(obj.valid());
+ ASSERT(obj.hasField("a"));
+ ASSERT(obj.hasField("b"));
+ ASSERT(obj == BSON("a" << 1 << "b" << 2));
+ }
+ {
+ BSONObjBuilder bb;
+ bb << "a" << GT << 1;
+ BSONObj tmp = bb.asTempObj();
+ ASSERT(tmp.objsize() == 4+(1+2+(4+1+4+4+1))+1);
+ ASSERT(tmp.valid());
+ ASSERT(tmp.hasField("a"));
+ ASSERT(!tmp.hasField("b"));
+ ASSERT(tmp == BSON("a" << BSON("$gt" << 1)));
+
+ bb << "b" << LT << 2;
+ BSONObj obj = bb.obj();
+ ASSERT(obj.objsize() == 4+(1+2+(4+1+4+4+1))+(1+2+(4+1+4+4+1))+1);
+ ASSERT(obj.valid());
+ ASSERT(obj.hasField("a"));
+ ASSERT(obj.hasField("b"));
+ ASSERT(obj == BSON("a" << BSON("$gt" << 1)
+ << "b" << BSON("$lt" << 2)));
+ }
+ {
+ BSONObjBuilder bb(32);
+ bb << "a" << 1;
+ BSONObj tmp = bb.asTempObj();
+ ASSERT(tmp.objsize() == 4+(1+2+4)+1);
+ ASSERT(tmp.valid());
+ ASSERT(tmp.hasField("a"));
+ ASSERT(!tmp.hasField("b"));
+ ASSERT(tmp == BSON("a" << 1));
+
+ //force a realloc
+ BSONArrayBuilder arr;
+ for (int i=0; i < 10000; i++) {
+ arr << i;
+ }
+ bb << "b" << arr.arr();
+ BSONObj obj = bb.obj();
+ ASSERT(obj.valid());
+ ASSERT(obj.hasField("a"));
+ ASSERT(obj.hasField("b"));
+ }
+ }
+ };
+
+ struct AppendIntOrLL {
+ void run() {
+ const long long billion = 1000*1000*1000;
+
+ long long n = 0x3333111122224444LL;
+ {
+ double d = (double) n;
+ BSONObj a = BSON( "x" << n );
+ BSONObj b = BSON( "x" << d );
+
+ long long back = (long long) d;
+//3717
+////// int res = a.woCompare(b);
+
+ ASSERT( n > back );
+ //ASSERT( res > 0 ); // SERVER-3717
+
+ keyTest(a, false);
+
+ KeyV1Owned A(a);
+ KeyV1Owned B(b);
+//3717
+////// int res2 = A.woCompare(B, Ordering::make(BSONObj()));
+ // ASSERT( res2 > 0 ); // SERVER-3717
+
+ // fixing requires an index v# change.
+
+ cout << "todo fix SERVER-3717 and uncomment test in AppendIntOrLL" << endl;
+
+ n++;
+ }
+
+ {
+ BSONObjBuilder b;
+ b.appendIntOrLL("L4", -4*billion);
+ keyTest(b.obj());
+ keyTest( BSON("" << billion) );
+ }
+
+ BSONObjBuilder b;
+ b.appendIntOrLL("i1", 1);
+ b.appendIntOrLL("i2", -1);
+ b.appendIntOrLL("i3", 1*billion);
+ b.appendIntOrLL("i4", -1*billion);
+
+ b.appendIntOrLL("L1", 2*billion);
+ b.appendIntOrLL("L2", -2*billion);
+ b.appendIntOrLL("L3", 4*billion);
+ b.appendIntOrLL("L4", -4*billion);
+ b.appendIntOrLL("L5", 16*billion);
+ b.appendIntOrLL("L6", -16*billion);
+
+ BSONObj o = b.obj();
+ keyTest(o);
+
+ ASSERT(o["i1"].type() == NumberInt);
+ ASSERT(o["i1"].number() == 1);
+ ASSERT(o["i2"].type() == NumberInt);
+ ASSERT(o["i2"].number() == -1);
+ ASSERT(o["i3"].type() == NumberInt);
+ ASSERT(o["i3"].number() == 1*billion);
+ ASSERT(o["i4"].type() == NumberInt);
+ ASSERT(o["i4"].number() == -1*billion);
+
+ ASSERT(o["L1"].type() == NumberLong);
+ ASSERT(o["L1"].number() == 2*billion);
+ ASSERT(o["L2"].type() == NumberLong);
+ ASSERT(o["L2"].number() == -2*billion);
+ ASSERT(o["L3"].type() == NumberLong);
+ ASSERT(o["L3"].number() == 4*billion);
+ ASSERT(o["L4"].type() == NumberLong);
+ ASSERT(o["L4"].number() == -4*billion);
+ ASSERT(o["L5"].type() == NumberLong);
+ ASSERT(o["L5"].number() == 16*billion);
+ ASSERT(o["L6"].type() == NumberLong);
+ ASSERT(o["L6"].number() == -16*billion);
+ }
+ };
+
+ struct AppendNumber {
+ void run() {
+ BSONObjBuilder b;
+ b.appendNumber( "a" , 5 );
+ b.appendNumber( "b" , 5.5 );
+ b.appendNumber( "c" , (1024LL*1024*1024)-1 );
+ b.appendNumber( "d" , (1024LL*1024*1024*1024)-1 );
+ b.appendNumber( "e" , 1024LL*1024*1024*1024*1024*1024 );
+
+ BSONObj o = b.obj();
+ keyTest(o);
+
+ ASSERT( o["a"].type() == NumberInt );
+ ASSERT( o["b"].type() == NumberDouble );
+ ASSERT( o["c"].type() == NumberInt );
+ ASSERT( o["d"].type() == NumberDouble );
+ ASSERT( o["e"].type() == NumberLong );
+
+ }
+ };
+
+ class ToStringArray {
+ public:
+ void run() {
+ string spec = "{ a: [ \"a\", \"b\" ] }";
+ ASSERT_EQUALS( spec, fromjson( spec ).toString() );
+
+ BSONObj x = BSON( "a" << "astring" << "b" << "str" );
+ keyTest(x);
+ keyTest(x);
+ BSONObj y = BSON( "a" << "astring" << "b" << "stra" );
+ keyTest(y);
+ y = BSON( "a" << "" );
+ keyTest(y);
+
+ keyTest( BSON("abc" << true ) );
+ keyTest( BSON("abc" << false ) );
+ keyTest( BSON("abc" << false << "b" << true ) );
+
+ Date_t now = jsTime();
+ keyTest( BSON("" << now << "" << 3 << "" << jstNULL << "" << true) );
+ keyTest( BSON("" << now << "" << 3 << "" << BSONObj() << "" << true) );
+
+ {
+ {
+ // check signed dates with new key format
+ KeyV1Owned a( BSONObjBuilder().appendDate("", -50).obj() );
+ KeyV1Owned b( BSONObjBuilder().appendDate("", 50).obj() );
+ ASSERT( a.woCompare(b, Ordering::make(BSONObj())) < 0 );
+ }
+ {
+ // backward compatibility
+ KeyBson a( BSONObjBuilder().appendDate("", -50).obj() );
+ KeyBson b( BSONObjBuilder().appendDate("", 50).obj() );
+ ASSERT( a.woCompare(b, Ordering::make(BSONObj())) > 0 );
+ }
+ {
+ // this is an uncompactible key:
+ BSONObj uc1 = BSONObjBuilder().appendDate("", -50).appendCode("", "abc").obj();
+ BSONObj uc2 = BSONObjBuilder().appendDate("", 55).appendCode("", "abc").obj();
+ ASSERT( uc1.woCompare(uc2, Ordering::make(BSONObj())) < 0 );
+ {
+ KeyV1Owned a(uc1);
+ KeyV1Owned b(uc2);
+ ASSERT( !a.isCompactFormat() );
+ ASSERT( a.woCompare(b, Ordering::make(BSONObj())) < 0 );
+ }
+ {
+ KeyBson a(uc1);
+ KeyBson b(uc2);
+ ASSERT( !a.isCompactFormat() );
+ ASSERT( a.woCompare(b, Ordering::make(BSONObj())) > 0 );
+ }
+ }
+ }
+
+ {
+ BSONObjBuilder b;
+ b.appendBinData("f", 8, (BinDataType) 1, "aaaabbbb");
+ b.appendBinData("e", 3, (BinDataType) 1, "aaa");
+ b.appendBinData("b", 1, (BinDataType) 1, "x");
+ BSONObj o = b.obj();
+ keyTest( o, true );
+ }
+
+ {
+ // check (non)equality
+ BSONObj a = BSONObjBuilder().appendBinData("", 8, (BinDataType) 1, "abcdefgh").obj();
+ BSONObj b = BSONObjBuilder().appendBinData("", 8, (BinDataType) 1, "abcdefgj").obj();
+ ASSERT( !a.equal(b) );
+ int res_ab = a.woCompare(b);
+ ASSERT( res_ab != 0 );
+ keyTest( a, true );
+ keyTest( b, true );
+
+ // check subtypes do not equal
+ BSONObj c = BSONObjBuilder().appendBinData("", 8, (BinDataType) 4, "abcdefgh").obj();
+ BSONObj d = BSONObjBuilder().appendBinData("", 8, (BinDataType) 0x81, "abcdefgh").obj();
+ ASSERT( !a.equal(c) );
+ int res_ac = a.woCompare(c);
+ ASSERT( res_ac != 0 );
+ keyTest( c, true );
+ ASSERT( !a.equal(d) );
+ int res_ad = a.woCompare(d);
+ ASSERT( res_ad != 0 );
+ keyTest( d, true );
+
+ KeyV1Owned A(a);
+ KeyV1Owned B(b);
+ KeyV1Owned C(c);
+ KeyV1Owned D(d);
+ ASSERT( !A.woEqual(B) );
+ ASSERT( A.woCompare(B, Ordering::make(BSONObj())) < 0 && res_ab < 0 );
+ ASSERT( !A.woEqual(C) );
+ ASSERT( A.woCompare(C, Ordering::make(BSONObj())) < 0 && res_ac < 0 );
+ ASSERT( !A.woEqual(D) );
+ ASSERT( A.woCompare(D, Ordering::make(BSONObj())) < 0 && res_ad < 0 );
+ }
+
+ {
+ BSONObjBuilder b;
+ b.appendBinData("f", 33, (BinDataType) 1, "123456789012345678901234567890123");
+ BSONObj o = b.obj();
+ keyTest( o, false );
+ }
+
+ {
+ for( int i = 1; i <= 3; i++ ) {
+ for( int j = 1; j <= 3; j++ ) {
+ BSONObjBuilder b;
+ b.appendBinData("f", i, (BinDataType) j, "abc");
+ BSONObj o = b.obj();
+ keyTest( o, j != ByteArrayDeprecated );
+ }
+ }
+ }
+
+ {
+ BSONObjBuilder b;
+ b.appendBinData("f", 1, (BinDataType) 133, "a");
+ BSONObj o = b.obj();
+ keyTest( o, true );
+ }
+
+ {
+ BSONObjBuilder b;
+ b.append("AA", 3);
+ b.appendBinData("f", 0, (BinDataType) 0, "");
+ b.appendBinData("e", 3, (BinDataType) 7, "aaa");
+ b.appendBinData("b", 1, (BinDataType) 128, "x");
+ b.append("z", 3);
+ b.appendBinData("bb", 0, (BinDataType) 129, "x");
+ BSONObj o = b.obj();
+ keyTest( o, true );
+ }
+
+ {
+ // 9 is not supported in compact format. so test a non-compact case here.
+ BSONObjBuilder b;
+ b.appendBinData("f", 9, (BinDataType) 0, "aaaabbbbc");
+ BSONObj o = b.obj();
+ keyTest( o );
+ }
+ }
+ };
+
+ class ToStringNumber {
+ public:
+
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a" , (int)4 );
+ b.append( "b" , (double)5 );
+ b.append( "c" , (long long)6 );
+
+ b.append( "d" , 123.456789123456789123456789123456789 );
+ b.append( "e" , 123456789.123456789123456789123456789 );
+ b.append( "f" , 1234567891234567891234.56789123456789 );
+
+ b.append( "g" , -123.456 );
+
+ BSONObj x = b.obj();
+ keyTest(x);
+
+ ASSERT_EQUALS( "4", x["a"].toString( false , true ) );
+ ASSERT_EQUALS( "5.0", x["b"].toString( false , true ) );
+ ASSERT_EQUALS( "6", x["c"].toString( false , true ) );
+
+ ASSERT_EQUALS( "123.4567891234568" , x["d"].toString( false , true ) );
+ ASSERT_EQUALS( "123456789.1234568" , x["e"].toString( false , true ) );
+ // ASSERT_EQUALS( "1.234567891234568e+21" , x["f"].toString( false , true ) ); // windows and *nix are different - TODO, work around for test or not bother?
+
+ ASSERT_EQUALS( "-123.456" , x["g"].toString( false , true ) );
+
+ }
+ };
+
+ class NullString {
+ public:
+ void run() {
+ {
+ BSONObjBuilder b;
+ const char x[] = {'a', 0, 'b', 0};
+ b.append("field", x, 4);
+ b.append("z", true);
+ BSONObj B = b.obj();
+ //cout << B.toString() << endl;
+
+ BSONObjBuilder a;
+ const char xx[] = {'a', 0, 'c', 0};
+ a.append("field", xx, 4);
+ a.append("z", true);
+ BSONObj A = a.obj();
+
+ BSONObjBuilder c;
+ const char xxx[] = {'a', 0, 'c', 0, 0};
+ c.append("field", xxx, 5);
+ c.append("z", true);
+ BSONObj C = c.obj();
+
+ // test that nulls are ok within bson strings
+ ASSERT( !(A == B) );
+ ASSERT( A > B );
+
+ ASSERT( !(B == C) );
+ ASSERT( C > B );
+
+ // check iteration is ok
+ ASSERT( B["z"].Bool() && A["z"].Bool() && C["z"].Bool() );
+ }
+
+ BSONObjBuilder b;
+ b.append("a", "a\0b", 4);
+ string z("a\0b", 3);
+ b.append("b", z);
+ b.appendAs(b.asTempObj()["a"], "c");
+ BSONObj o = b.obj();
+ keyTest(o);
+
+ stringstream ss;
+ ss << 'a' << '\0' << 'b';
+
+ ASSERT_EQUALS(o["a"].valuestrsize(), 3+1);
+ ASSERT_EQUALS(o["a"].str(), ss.str());
+
+ ASSERT_EQUALS(o["b"].valuestrsize(), 3+1);
+ ASSERT_EQUALS(o["b"].str(), ss.str());
+
+ ASSERT_EQUALS(o["c"].valuestrsize(), 3+1);
+ ASSERT_EQUALS(o["c"].str(), ss.str());
+
+ }
+
+ };
+
+ class AppendAs {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ {
+ BSONObj foo = BSON( "foo" << 1 );
+ b.appendAs( foo.firstElement(), "bar" );
+ }
+ ASSERT_EQUALS( BSON( "bar" << 1 ), b.done() );
+ }
+ };
+
+ class ArrayAppendAs {
+ public:
+ void run() {
+ BSONArrayBuilder b;
+ {
+ BSONObj foo = BSON( "foo" << 1 );
+ b.appendAs( foo.firstElement(), "3" );
+ }
+ BSONArray a = b.arr();
+ BSONObj expected = BSON( "3" << 1 );
+ ASSERT_EQUALS( expected.firstElement(), a[ 3 ] );
+ ASSERT_EQUALS( 4, a.nFields() );
+ }
+ };
+
+ class GetField {
+ public:
+ void run(){
+ BSONObj o = BSON( "a" << 1 <<
+ "b" << BSON( "a" << 2 ) <<
+ "c" << BSON_ARRAY( BSON( "a" << 3 ) << BSON( "a" << 4 ) ) );
+ ASSERT_EQUALS( 1 , o.getFieldDotted( "a" ).numberInt() );
+ ASSERT_EQUALS( 2 , o.getFieldDotted( "b.a" ).numberInt() );
+ ASSERT_EQUALS( 3 , o.getFieldDotted( "c.0.a" ).numberInt() );
+ ASSERT_EQUALS( 4 , o.getFieldDotted( "c.1.a" ).numberInt() );
+ keyTest(o);
+ }
+ };
+
+ namespace Validation {
+
+ class Base {
+ public:
+ virtual ~Base() {}
+ void run() {
+ ASSERT( valid().valid() );
+ ASSERT( !invalid().valid() );
+ }
+ protected:
+ virtual BSONObj valid() const { return BSONObj(); }
+ virtual BSONObj invalid() const { return BSONObj(); }
+ static char get( const BSONObj &o, int i ) {
+ return o.objdata()[ i ];
+ }
+ static void set( BSONObj &o, int i, char c ) {
+ const_cast< char * >( o.objdata() )[ i ] = c;
+ }
+ };
+
+ class BadType : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":1}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set( ret, 4, 50 );
+ return ret;
+ }
+ };
+
+ class EooBeforeEnd : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":1}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ // (first byte of size)++
+ set( ret, 0, get( ret, 0 ) + 1 );
+ // re-read size for BSONObj::details
+ return ret.copy();
+ }
+ };
+
+ class Undefined : public Base {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendNull( "a" );
+ BSONObj o = b.done();
+ set( o, 4, mongo::Undefined );
+ ASSERT( o.valid() );
+ }
+ };
+
+ class TotalSizeTooSmall : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":1}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ // (first byte of size)--
+ set( ret, 0, get( ret, 0 ) - 1 );
+ // re-read size for BSONObj::details
+ return ret.copy();
+ }
+ };
+
+ class EooMissing : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":1}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set( ret, ret.objsize() - 1, (char) 0xff );
+ // (first byte of size)--
+ set( ret, 0, get( ret, 0 ) - 1 );
+ // re-read size for BSONObj::details
+ return ret.copy();
+ }
+ };
+
+ class WrongStringSize : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":\"b\"}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ ASSERT_EQUALS( ret.firstElement().valuestr()[0] , 'b' );
+ ASSERT_EQUALS( ret.firstElement().valuestr()[1] , 0 );
+ ((char*)ret.firstElement().valuestr())[1] = 1;
+ return ret.copy();
+ }
+ };
+
+ class ZeroStringSize : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":\"b\"}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set( ret, 7, 0 );
+ return ret;
+ }
+ };
+
+ class NegativeStringSize : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":\"b\"}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set( ret, 10, -100 );
+ return ret;
+ }
+ };
+
+ class WrongSubobjectSize : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":{\"b\":1}}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set( ret, 0, get( ret, 0 ) + 1 );
+ set( ret, 7, get( ret, 7 ) + 1 );
+ return ret.copy();
+ }
+ };
+
+ class WrongDbrefNsSize : public Base {
+ BSONObj valid() const {
+ return fromjson( "{ \"a\": Dbref( \"b\", \"ffffffffffffffffffffffff\" ) }" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ set( ret, 0, get( ret, 0 ) + 1 );
+ set( ret, 7, get( ret, 7 ) + 1 );
+ return ret.copy();
+ };
+ };
+
+ class NoFieldNameEnd : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":1}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ memset( const_cast< char * >( ret.objdata() ) + 5, 0xff, ret.objsize() - 5 );
+ return ret;
+ }
+ };
+
+ class BadRegex : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":/c/i}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ memset( const_cast< char * >( ret.objdata() ) + 7, 0xff, ret.objsize() - 7 );
+ return ret;
+ }
+ };
+
+ class BadRegexOptions : public Base {
+ BSONObj valid() const {
+ return fromjson( "{\"a\":/c/i}" );
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ memset( const_cast< char * >( ret.objdata() ) + 9, 0xff, ret.objsize() - 9 );
+ return ret;
+ }
+ };
+
+ class CodeWScopeBase : public Base {
+ BSONObj valid() const {
+ BSONObjBuilder b;
+ BSONObjBuilder scope;
+ scope.append( "a", "b" );
+ b.appendCodeWScope( "c", "d", scope.done() );
+ return b.obj();
+ }
+ BSONObj invalid() const {
+ BSONObj ret = valid();
+ modify( ret );
+ return ret;
+ }
+ protected:
+ virtual void modify( BSONObj &o ) const = 0;
+ };
+
+ class CodeWScopeSmallSize : public CodeWScopeBase {
+ void modify( BSONObj &o ) const {
+ set( o, 7, 7 );
+ }
+ };
+
+ class CodeWScopeZeroStrSize : public CodeWScopeBase {
+ void modify( BSONObj &o ) const {
+ set( o, 11, 0 );
+ }
+ };
+
+ class CodeWScopeSmallStrSize : public CodeWScopeBase {
+ void modify( BSONObj &o ) const {
+ set( o, 11, 1 );
+ }
+ };
+
+ class CodeWScopeNoSizeForObj : public CodeWScopeBase {
+ void modify( BSONObj &o ) const {
+ set( o, 7, 13 );
+ }
+ };
+
+ class CodeWScopeSmallObjSize : public CodeWScopeBase {
+ void modify( BSONObj &o ) const {
+ set( o, 17, 1 );
+ }
+ };
+
+ class CodeWScopeBadObject : public CodeWScopeBase {
+ void modify( BSONObj &o ) const {
+ set( o, 21, JSTypeMax + 1 );
+ }
+ };
+
+ class NoSize {
+ public:
+ NoSize( BSONType type ) : type_( type ) {}
+ void run() {
+ const char data[] = { 0x07, 0x00, 0x00, 0x00, char( type_ ), 'a', 0x00 };
+ BSONObj o( data );
+ ASSERT( !o.valid() );
+ }
+ private:
+ BSONType type_;
+ };
+
+ // Randomized BSON parsing test. See if we seg fault.
+ class Fuzz {
+ public:
+ Fuzz( double frequency ) : frequency_( frequency ) {}
+ void run() {
+ BSONObj a = fromjson( "{\"a\": 1, \"b\": \"c\"}" );
+ fuzz( a );
+ a.valid();
+
+ BSONObj b = fromjson( "{\"one\":2, \"two\":5, \"three\": {},"
+ "\"four\": { \"five\": { \"six\" : 11 } },"
+ "\"seven\": [ \"a\", \"bb\", \"ccc\", 5 ],"
+ "\"eight\": Dbref( \"rrr\", \"01234567890123456789aaaa\" ),"
+ "\"_id\": ObjectId( \"deadbeefdeadbeefdeadbeef\" ),"
+ "\"nine\": { \"$binary\": \"abc=\", \"$type\": \"00\" },"
+ "\"ten\": Date( 44 ), \"eleven\": /foooooo/i }" );
+ fuzz( b );
+ b.valid();
+ }
+ private:
+ void fuzz( BSONObj &o ) const {
+ for( int i = 4; i < o.objsize(); ++i )
+ for( unsigned char j = 1; j; j <<= 1 )
+ if ( rand() < int( RAND_MAX * frequency_ ) ) {
+ char *c = const_cast< char * >( o.objdata() ) + i;
+ if ( *c & j )
+ *c &= ~j;
+ else
+ *c |= j;
+ }
+ }
+ double frequency_;
+ };
+
+ } // namespace Validation
+
+ } // namespace BSONObjTests
+
+ namespace OIDTests {
+
+ class init1 {
+ public:
+ void run() {
+ OID a;
+ OID b;
+
+ a.init();
+ b.init();
+
+ ASSERT( a != b );
+ }
+ };
+
+ class initParse1 {
+ public:
+ void run() {
+
+ OID a;
+ OID b;
+
+ a.init();
+ b.init( a.str() );
+
+ ASSERT( a == b );
+ }
+ };
+
+ class append {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendOID( "a" , 0 );
+ b.appendOID( "b" , 0 , false );
+ b.appendOID( "c" , 0 , true );
+ BSONObj o = b.obj();
+ keyTest(o);
+
+ ASSERT( o["a"].__oid().str() == "000000000000000000000000" );
+ ASSERT( o["b"].__oid().str() == "000000000000000000000000" );
+ ASSERT( o["c"].__oid().str() != "000000000000000000000000" );
+
+ }
+ };
+
+ class increasing {
+ public:
+ BSONObj g() {
+ BSONObjBuilder b;
+ b.appendOID( "_id" , 0 , true );
+ return b.obj();
+ }
+ void run() {
+ BSONObj a = g();
+ BSONObj b = g();
+
+ ASSERT( a.woCompare( b ) < 0 );
+
+ // yes, there is a 1/1000 chance this won't increase time(0)
+ // and therefore inaccurately say the function is behaving
+ // buf if its broken, it will fail 999/1000, so i think that's good enough
+ sleepsecs( 1 );
+ BSONObj c = g();
+ ASSERT( a.woCompare( c ) < 0 );
+ }
+ };
+
+ class ToDate {
+ public:
+ void run() {
+ OID oid;
+
+ {
+ time_t before = ::time(0);
+ oid.init();
+ time_t after = ::time(0);
+ ASSERT( oid.asTimeT() >= before );
+ ASSERT( oid.asTimeT() <= after );
+ }
+
+ {
+ Date_t before = jsTime();
+ sleepsecs(1);
+ oid.init();
+ Date_t after = jsTime();
+ ASSERT( oid.asDateT() >= before );
+ ASSERT( oid.asDateT() <= after );
+ }
+ }
+ };
+
+ class FromDate {
+ public:
+ void run() {
+ OID min, oid, max;
+ Date_t now = jsTime();
+ oid.init(); // slight chance this has different time. If its a problem, can change.
+ min.init(now);
+ max.init(now, true);
+
+ ASSERT_EQUALS( (unsigned)oid.asTimeT() , now/1000 );
+ ASSERT_EQUALS( (unsigned)min.asTimeT() , now/1000 );
+ ASSERT_EQUALS( (unsigned)max.asTimeT() , now/1000 );
+ ASSERT( BSON("" << min).woCompare( BSON("" << oid) ) < 0 );
+ ASSERT( BSON("" << max).woCompare( BSON("" << oid) )> 0 );
+ }
+ };
+ } // namespace OIDTests
+
+
+ namespace ValueStreamTests {
+
+ class LabelBase {
+ public:
+ virtual ~LabelBase() {}
+ void run() {
+ ASSERT( !expected().woCompare( actual() ) );
+ }
+ protected:
+ virtual BSONObj expected() = 0;
+ virtual BSONObj actual() = 0;
+ };
+
+ class LabelBasic : public LabelBase {
+ BSONObj expected() {
+ return BSON( "a" << ( BSON( "$gt" << 1 ) ) );
+ }
+ BSONObj actual() {
+ return BSON( "a" << GT << 1 );
+ }
+ };
+
+ class LabelShares : public LabelBase {
+ BSONObj expected() {
+ return BSON( "z" << "q" << "a" << ( BSON( "$gt" << 1 ) ) << "x" << "p" );
+ }
+ BSONObj actual() {
+ return BSON( "z" << "q" << "a" << GT << 1 << "x" << "p" );
+ }
+ };
+
+ class LabelDouble : public LabelBase {
+ BSONObj expected() {
+ return BSON( "a" << ( BSON( "$gt" << 1 << "$lte" << "x" ) ) );
+ }
+ BSONObj actual() {
+ return BSON( "a" << GT << 1 << LTE << "x" );
+ }
+ };
+
+ class LabelDoubleShares : public LabelBase {
+ BSONObj expected() {
+ return BSON( "z" << "q" << "a" << ( BSON( "$gt" << 1 << "$lte" << "x" ) ) << "x" << "p" );
+ }
+ BSONObj actual() {
+ return BSON( "z" << "q" << "a" << GT << 1 << LTE << "x" << "x" << "p" );
+ }
+ };
+
+ class LabelSize : public LabelBase {
+ BSONObj expected() {
+ return BSON( "a" << BSON( "$size" << 4 ) );
+ }
+ BSONObj actual() {
+ return BSON( "a" << mongo::SIZE << 4 );
+ }
+ };
+
+ class LabelMulti : public LabelBase {
+ BSONObj expected() {
+ return BSON( "z" << "q"
+ << "a" << BSON( "$gt" << 1 << "$lte" << "x" )
+ << "b" << BSON( "$ne" << 1 << "$ne" << "f" << "$ne" << 22.3 )
+ << "x" << "p" );
+ }
+ BSONObj actual() {
+ return BSON( "z" << "q"
+ << "a" << GT << 1 << LTE << "x"
+ << "b" << NE << 1 << NE << "f" << NE << 22.3
+ << "x" << "p" );
+ }
+ };
+ class LabelishOr : public LabelBase {
+ BSONObj expected() {
+ return BSON( "$or" << BSON_ARRAY(
+ BSON("a" << BSON( "$gt" << 1 << "$lte" << "x" ))
+ << BSON("b" << BSON( "$ne" << 1 << "$ne" << "f" << "$ne" << 22.3 ))
+ << BSON("x" << "p" )));
+ }
+ BSONObj actual() {
+ return OR( BSON( "a" << GT << 1 << LTE << "x"),
+ BSON( "b" << NE << 1 << NE << "f" << NE << 22.3),
+ BSON( "x" << "p" ) );
+ }
+ };
+
+ class Unallowed {
+ public:
+ void run() {
+ ASSERT_THROWS( BSON( GT << 4 ), MsgAssertionException );
+ ASSERT_THROWS( BSON( "a" << 1 << GT << 4 ), MsgAssertionException );
+ }
+ };
+
+ class ElementAppend {
+ public:
+ void run() {
+ BSONObj a = BSON( "a" << 17 );
+ BSONObj b = BSON( "b" << a["a"] );
+ ASSERT_EQUALS( NumberInt , a["a"].type() );
+ ASSERT_EQUALS( NumberInt , b["b"].type() );
+ ASSERT_EQUALS( 17 , b["b"].number() );
+ }
+ };
+
+ } // namespace ValueStreamTests
+
+ class SubObjectBuilder {
+ public:
+ void run() {
+ BSONObjBuilder b1;
+ b1.append( "a", "bcd" );
+ BSONObjBuilder b2( b1.subobjStart( "foo" ) );
+ b2.append( "ggg", 44.0 );
+ b2.done();
+ b1.append( "f", 10.0 );
+ BSONObj ret = b1.done();
+ ASSERT( ret.valid() );
+ ASSERT( ret.woCompare( fromjson( "{a:'bcd',foo:{ggg:44},f:10}" ) ) == 0 );
+ }
+ };
+
+ class DateBuilder {
+ public:
+ void run() {
+ BSONObj o = BSON("" << Date_t(1234567890));
+ ASSERT( o.firstElement().type() == Date );
+ ASSERT( o.firstElement().date() == Date_t(1234567890) );
+ }
+ };
+
+ class DateNowBuilder {
+ public:
+ void run() {
+ Date_t before = jsTime();
+ BSONObj o = BSON("now" << DATENOW);
+ Date_t after = jsTime();
+
+ ASSERT( o.valid() );
+
+ BSONElement e = o["now"];
+ ASSERT( e.type() == Date );
+ ASSERT( e.date() >= before );
+ ASSERT( e.date() <= after );
+ }
+ };
+
+ class TimeTBuilder {
+ public:
+ void run() {
+ Date_t before = jsTime();
+ sleepmillis(1);
+ time_t now = time(NULL);
+ sleepmillis(1);
+ Date_t after = jsTime();
+
+ BSONObjBuilder b;
+ b.appendTimeT("now", now);
+ BSONObj o = b.obj();
+
+ ASSERT( o.valid() );
+
+ BSONElement e = o["now"];
+ ASSERT( e.type() == Date );
+ ASSERT( e.date()/1000 >= before/1000 );
+ ASSERT( e.date()/1000 <= after/1000 );
+ }
+ };
+
+ class MinMaxKeyBuilder {
+ public:
+ void run() {
+ BSONObj min = BSON( "a" << MINKEY );
+ BSONObj max = BSON( "b" << MAXKEY );
+
+ ASSERT( min.valid() );
+ ASSERT( max.valid() );
+
+ BSONElement minElement = min["a"];
+ BSONElement maxElement = max["b"];
+ ASSERT( minElement.type() == MinKey );
+ ASSERT( maxElement.type() == MaxKey );
+ }
+ };
+
+ class MinMaxElementTest {
+ public:
+
+ BSONObj min( int t ) {
+ BSONObjBuilder b;
+ b.appendMinForType( "a" , t );
+ return b.obj();
+ }
+
+ BSONObj max( int t ) {
+ BSONObjBuilder b;
+ b.appendMaxForType( "a" , t );
+ return b.obj();
+ }
+
+ void run() {
+ for ( int t=1; t<JSTypeMax; t++ ) {
+ stringstream ss;
+ ss << "type: " << t;
+ string s = ss.str();
+ ASSERT( min( t ).woCompare( max( t ) ) <= 0 );
+ ASSERT( max( t ).woCompare( min( t ) ) >= 0 );
+ ASSERT( min( t ).woCompare( min( t ) ) == 0 );
+ ASSERT( max( t ).woCompare( max( t ) ) == 0 );
+ }
+ }
+ };
+
+ class ExtractFieldsTest {
+ public:
+ void run() {
+ BSONObj x = BSON( "a" << 10 << "b" << 11 );
+ assert( BSON( "a" << 10 ).woCompare( x.extractFields( BSON( "a" << 1 ) ) ) == 0 );
+ assert( BSON( "b" << 11 ).woCompare( x.extractFields( BSON( "b" << 1 ) ) ) == 0 );
+ assert( x.woCompare( x.extractFields( BSON( "a" << 1 << "b" << 1 ) ) ) == 0 );
+
+ assert( (string)"a" == x.extractFields( BSON( "a" << 1 << "c" << 1 ) ).firstElementFieldName() );
+ }
+ };
+
+ class ComparatorTest {
+ public:
+ BSONObj one( string s ) {
+ return BSON( "x" << s );
+ }
+ BSONObj two( string x , string y ) {
+ BSONObjBuilder b;
+ b.append( "x" , x );
+ if ( y.size() )
+ b.append( "y" , y );
+ else
+ b.appendNull( "y" );
+ return b.obj();
+ }
+
+ void test( BSONObj order , BSONObj l , BSONObj r , bool wanted ) {
+ BSONObjCmp c( order );
+ bool got = c(l,r);
+ if ( got == wanted )
+ return;
+ cout << " order: " << order << " l: " << l << "r: " << r << " wanted: " << wanted << " got: " << got << endl;
+ }
+
+ void lt( BSONObj order , BSONObj l , BSONObj r ) {
+ test( order , l , r , 1 );
+ }
+
+ void run() {
+ BSONObj s = BSON( "x" << 1 );
+ BSONObj c = BSON( "x" << 1 << "y" << 1 );
+ test( s , one( "A" ) , one( "B" ) , 1 );
+ test( s , one( "B" ) , one( "A" ) , 0 );
+
+ test( c , two( "A" , "A" ) , two( "A" , "B" ) , 1 );
+ test( c , two( "A" , "A" ) , two( "B" , "A" ) , 1 );
+ test( c , two( "B" , "A" ) , two( "A" , "B" ) , 0 );
+
+ lt( c , one("A") , two( "A" , "A" ) );
+ lt( c , one("A") , one( "B" ) );
+ lt( c , two("A","") , two( "B" , "A" ) );
+
+ lt( c , two("B","A") , two( "C" , "A" ) );
+ lt( c , two("B","A") , one( "C" ) );
+ lt( c , two("B","A") , two( "C" , "" ) );
+
+ }
+ };
+
+ namespace external_sort {
+ class Basic1 {
+ public:
+ void run() {
+ BSONObjExternalSorter sorter(indexInterfaceForTheseTests);
+
+ sorter.add( BSON( "x" << 10 ) , 5 , 1);
+ sorter.add( BSON( "x" << 2 ) , 3 , 1 );
+ sorter.add( BSON( "x" << 5 ) , 6 , 1 );
+ sorter.add( BSON( "x" << 5 ) , 7 , 1 );
+
+ sorter.sort();
+
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ int num=0;
+ while ( i->more() ) {
+ pair<BSONObj,DiskLoc> p = i->next();
+ if ( num == 0 )
+ assert( p.first["x"].number() == 2 );
+ else if ( num <= 2 ) {
+ assert( p.first["x"].number() == 5 );
+ }
+ else if ( num == 3 )
+ assert( p.first["x"].number() == 10 );
+ else
+ ASSERT( 0 );
+ num++;
+ }
+
+
+ ASSERT_EQUALS( 0 , sorter.numFiles() );
+ }
+ };
+
+ class Basic2 {
+ public:
+ void run() {
+ BSONObjExternalSorter sorter( indexInterfaceForTheseTests, BSONObj() , 10 );
+ sorter.add( BSON( "x" << 10 ) , 5 , 11 );
+ sorter.add( BSON( "x" << 2 ) , 3 , 1 );
+ sorter.add( BSON( "x" << 5 ) , 6 , 1 );
+ sorter.add( BSON( "x" << 5 ) , 7 , 1 );
+
+ sorter.sort();
+
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ int num=0;
+ while ( i->more() ) {
+ pair<BSONObj,DiskLoc> p = i->next();
+ if ( num == 0 ) {
+ assert( p.first["x"].number() == 2 );
+ ASSERT_EQUALS( p.second.toString() , "3:1" );
+ }
+ else if ( num <= 2 )
+ assert( p.first["x"].number() == 5 );
+ else if ( num == 3 ) {
+ assert( p.first["x"].number() == 10 );
+ ASSERT_EQUALS( p.second.toString() , "5:b" );
+ }
+ else
+ ASSERT( 0 );
+ num++;
+ }
+
+ }
+ };
+
+ class Basic3 {
+ public:
+ void run() {
+ BSONObjExternalSorter sorter( indexInterfaceForTheseTests, BSONObj() , 10 );
+ sorter.sort();
+
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ assert( ! i->more() );
+
+ }
+ };
+
+
+ class ByDiskLock {
+ public:
+ void run() {
+ BSONObjExternalSorter sorter(indexInterfaceForTheseTests);
+ sorter.add( BSON( "x" << 10 ) , 5 , 4);
+ sorter.add( BSON( "x" << 2 ) , 3 , 0 );
+ sorter.add( BSON( "x" << 5 ) , 6 , 2 );
+ sorter.add( BSON( "x" << 5 ) , 7 , 3 );
+ sorter.add( BSON( "x" << 5 ) , 2 , 1 );
+
+ sorter.sort();
+
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ int num=0;
+ while ( i->more() ) {
+ pair<BSONObj,DiskLoc> p = i->next();
+ if ( num == 0 )
+ assert( p.first["x"].number() == 2 );
+ else if ( num <= 3 ) {
+ assert( p.first["x"].number() == 5 );
+ }
+ else if ( num == 4 )
+ assert( p.first["x"].number() == 10 );
+ else
+ ASSERT( 0 );
+ ASSERT_EQUALS( num , p.second.getOfs() );
+ num++;
+ }
+
+
+ }
+ };
+
+
+ class Big1 {
+ public:
+ void run() {
+ BSONObjExternalSorter sorter( indexInterfaceForTheseTests, BSONObj() , 2000 );
+ for ( int i=0; i<10000; i++ ) {
+ sorter.add( BSON( "x" << rand() % 10000 ) , 5 , i );
+ }
+
+ sorter.sort();
+
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ int num=0;
+ double prev = 0;
+ while ( i->more() ) {
+ pair<BSONObj,DiskLoc> p = i->next();
+ num++;
+ double cur = p.first["x"].number();
+ assert( cur >= prev );
+ prev = cur;
+ }
+ assert( num == 10000 );
+ }
+ };
+
+ class Big2 {
+ public:
+ void run() {
+ const int total = 100000;
+ BSONObjExternalSorter sorter( indexInterfaceForTheseTests, BSONObj() , total * 2 );
+ for ( int i=0; i<total; i++ ) {
+ sorter.add( BSON( "a" << "b" ) , 5 , i );
+ }
+
+ sorter.sort();
+
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ int num=0;
+ double prev = 0;
+ while ( i->more() ) {
+ pair<BSONObj,DiskLoc> p = i->next();
+ num++;
+ double cur = p.first["x"].number();
+ assert( cur >= prev );
+ prev = cur;
+ }
+ assert( num == total );
+ ASSERT( sorter.numFiles() > 2 );
+ }
+ };
+
+ class D1 {
+ public:
+ void run() {
+
+ BSONObjBuilder b;
+ b.appendNull("");
+ BSONObj x = b.obj();
+
+ BSONObjExternalSorter sorter(indexInterfaceForTheseTests);
+ sorter.add(x, DiskLoc(3,7));
+ sorter.add(x, DiskLoc(4,7));
+ sorter.add(x, DiskLoc(2,7));
+ sorter.add(x, DiskLoc(1,7));
+ sorter.add(x, DiskLoc(3,77));
+
+ sorter.sort();
+
+ auto_ptr<BSONObjExternalSorter::Iterator> i = sorter.iterator();
+ while( i->more() ) {
+ BSONObjExternalSorter::Data d = i->next();
+ /*cout << d.second.toString() << endl;
+ cout << d.first.objsize() << endl;
+ cout<<"SORTER next:" << d.first.toString() << endl;*/
+ }
+ }
+ };
+ }
+
+ class CompatBSON {
+ public:
+
+#define JSONBSONTEST(j,s,m) ASSERT_EQUALS( fromjson( j ).objsize() , s ); ASSERT_EQUALS( fromjson( j ).md5() , m );
+#define RAWBSONTEST(j,s,m) ASSERT_EQUALS( j.objsize() , s ); ASSERT_EQUALS( j.md5() , m );
+
+ void run() {
+
+ JSONBSONTEST( "{ 'x' : true }" , 9 , "6fe24623e4efc5cf07f027f9c66b5456" );
+ JSONBSONTEST( "{ 'x' : null }" , 8 , "12d43430ff6729af501faf0638e68888" );
+ JSONBSONTEST( "{ 'x' : 5.2 }" , 16 , "aaeeac4a58e9c30eec6b0b0319d0dff2" );
+ JSONBSONTEST( "{ 'x' : 'eliot' }" , 18 , "331a3b8b7cbbe0706c80acdb45d4ebbe" );
+ JSONBSONTEST( "{ 'x' : 5.2 , 'y' : 'truth' , 'z' : 1.1 }" , 40 , "7c77b3a6e63e2f988ede92624409da58" );
+ JSONBSONTEST( "{ 'a' : { 'b' : 1.1 } }" , 24 , "31887a4b9d55cd9f17752d6a8a45d51f" );
+ JSONBSONTEST( "{ 'x' : 5.2 , 'y' : { 'a' : 'eliot' , b : true } , 'z' : null }" , 44 , "b3de8a0739ab329e7aea138d87235205" );
+ JSONBSONTEST( "{ 'x' : 5.2 , 'y' : [ 'a' , 'eliot' , 'b' , true ] , 'z' : null }" , 62 , "cb7bad5697714ba0cbf51d113b6a0ee8" );
+
+ RAWBSONTEST( BSON( "x" << 4 ) , 12 , "d1ed8dbf79b78fa215e2ded74548d89d" );
+
+ }
+ };
+
+ class CompareDottedFieldNamesTest {
+ public:
+ void t( FieldCompareResult res , const string& l , const string& r ) {
+ ASSERT_EQUALS( res , compareDottedFieldNames( l , r ) );
+ ASSERT_EQUALS( -1 * res , compareDottedFieldNames( r , l ) );
+ }
+
+ void run() {
+ t( SAME , "x" , "x" );
+ t( SAME , "x.a" , "x.a" );
+ t( LEFT_BEFORE , "a" , "b" );
+ t( RIGHT_BEFORE , "b" , "a" );
+
+ t( LEFT_SUBFIELD , "a.x" , "a" );
+ }
+ };
+
+ struct NestedDottedConversions {
+ void t(const BSONObj& nest, const BSONObj& dot) {
+ ASSERT_EQUALS( nested2dotted(nest), dot);
+ ASSERT_EQUALS( nest, dotted2nested(dot));
+ }
+
+ void run() {
+ t( BSON("a" << BSON("b" << 1)), BSON("a.b" << 1) );
+ t( BSON("a" << BSON("b" << 1 << "c" << 1)), BSON("a.b" << 1 << "a.c" << 1) );
+ t( BSON("a" << BSON("b" << 1 << "c" << 1) << "d" << 1), BSON("a.b" << 1 << "a.c" << 1 << "d" << 1) );
+ t( BSON("a" << BSON("b" << 1 << "c" << 1 << "e" << BSON("f" << 1)) << "d" << 1), BSON("a.b" << 1 << "a.c" << 1 << "a.e.f" << 1 << "d" << 1) );
+ }
+ };
+
+ struct BSONArrayBuilderTest {
+ void run() {
+ int i = 0;
+ BSONObjBuilder objb;
+ BSONArrayBuilder arrb;
+
+ objb << objb.numStr(i++) << 100;
+ arrb << 100;
+
+ objb << objb.numStr(i++) << 1.0;
+ arrb << 1.0;
+
+ objb << objb.numStr(i++) << "Hello";
+ arrb << "Hello";
+
+ objb << objb.numStr(i++) << string("World");
+ arrb << string("World");
+
+ objb << objb.numStr(i++) << BSON( "a" << 1 << "b" << "foo" );
+ arrb << BSON( "a" << 1 << "b" << "foo" );
+
+ objb << objb.numStr(i++) << BSON( "a" << 1)["a"];
+ arrb << BSON( "a" << 1)["a"];
+
+ OID oid;
+ oid.init();
+ objb << objb.numStr(i++) << oid;
+ arrb << oid;
+
+ BSONObj obj = objb.obj();
+ BSONArray arr = arrb.arr();
+
+ ASSERT_EQUALS(obj, arr);
+
+ BSONObj o = BSON( "obj" << obj << "arr" << arr << "arr2" << BSONArray(obj) );
+ keyTest(o);
+
+ ASSERT_EQUALS(o["obj"].type(), Object);
+ ASSERT_EQUALS(o["arr"].type(), Array);
+ ASSERT_EQUALS(o["arr2"].type(), Array);
+ }
+ };
+
+ struct ArrayMacroTest {
+ void run() {
+ BSONArray arr = BSON_ARRAY( "hello" << 1 << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
+ BSONObj obj = BSON( "0" << "hello"
+ << "1" << 1
+ << "2" << BSON( "foo" << BSON_ARRAY( "bar" << "baz" << "qux" ) ) );
+
+ ASSERT_EQUALS(arr, obj);
+ ASSERT_EQUALS(arr["2"].type(), Object);
+ ASSERT_EQUALS(arr["2"].embeddedObject()["foo"].type(), Array);
+ }
+ };
+
+ class NumberParsing {
+ public:
+ void run() {
+ BSONObjBuilder a;
+ BSONObjBuilder b;
+
+ a.append( "a" , (int)1 );
+ ASSERT( b.appendAsNumber( "a" , "1" ) );
+
+ a.append( "b" , 1.1 );
+ ASSERT( b.appendAsNumber( "b" , "1.1" ) );
+
+ a.append( "c" , (int)-1 );
+ ASSERT( b.appendAsNumber( "c" , "-1" ) );
+
+ a.append( "d" , -1.1 );
+ ASSERT( b.appendAsNumber( "d" , "-1.1" ) );
+
+ a.append( "e" , (long long)32131231231232313LL );
+ ASSERT( b.appendAsNumber( "e" , "32131231231232313" ) );
+
+ ASSERT( ! b.appendAsNumber( "f" , "zz" ) );
+ ASSERT( ! b.appendAsNumber( "f" , "5zz" ) );
+ ASSERT( ! b.appendAsNumber( "f" , "zz5" ) );
+
+ ASSERT_EQUALS( a.obj() , b.obj() );
+ }
+ };
+
+ class bson2settest {
+ public:
+ void run() {
+ BSONObj o = BSON( "z" << 1 << "a" << 2 << "m" << 3 << "c" << 4 );
+ BSONObjIteratorSorted i( o );
+ stringstream ss;
+ while ( i.more() )
+ ss << i.next().fieldName();
+ ASSERT_EQUALS( "acmz" , ss.str() );
+
+ {
+ Timer t;
+ for ( int i=0; i<10000; i++ ) {
+ BSONObjIteratorSorted j( o );
+ int l = 0;
+ while ( j.more() )
+ l += strlen( j.next().fieldName() );
+ }
+ //unsigned long long tm = t.micros();
+ //cout << "time: " << tm << endl;
+ }
+ }
+
+ };
+
+ class checkForStorageTests {
+ public:
+
+ void good( string s ) {
+ BSONObj o = fromjson( s );
+ if ( o.okForStorage() )
+ return;
+ throw UserException( 12528 , (string)"should be ok for storage:" + s );
+ }
+
+ void bad( string s ) {
+ BSONObj o = fromjson( s );
+ if ( ! o.okForStorage() )
+ return;
+ throw UserException( 12529 , (string)"should NOT be ok for storage:" + s );
+ }
+
+ void run() {
+ good( "{x:1}" );
+ bad( "{'x.y':1}" );
+
+ good( "{x:{a:2}}" );
+ bad( "{x:{'$a':2}}" );
+ }
+ };
+
+ class InvalidIDFind {
+ public:
+ void run() {
+ BSONObj x = BSON( "_id" << 5 << "t" << 2 );
+ {
+ char * crap = (char*)malloc( x.objsize() );
+ memcpy( crap , x.objdata() , x.objsize() );
+ BSONObj y( crap );
+ ASSERT_EQUALS( x , y );
+ free( crap );
+ }
+
+ {
+ char * crap = (char*)malloc( x.objsize() );
+ memcpy( crap , x.objdata() , x.objsize() );
+ int * foo = (int*)crap;
+ foo[0] = 123123123;
+ int state = 0;
+ try {
+ BSONObj y( crap );
+ state = 1;
+ }
+ catch ( std::exception& e ) {
+ state = 2;
+ ASSERT( strstr( e.what() , "_id: 5" ) > 0 );
+ }
+ free( crap );
+ ASSERT_EQUALS( 2 , state );
+ }
+
+
+ }
+ };
+
+ class ElementSetTest {
+ public:
+ void run() {
+ BSONObj x = BSON( "a" << 1 << "b" << 1 << "c" << 2 );
+ BSONElement a = x["a"];
+ BSONElement b = x["b"];
+ BSONElement c = x["c"];
+ //cout << "c: " << c << endl;
+ ASSERT( a.woCompare( b ) != 0 );
+ ASSERT( a.woCompare( b , false ) == 0 );
+
+ BSONElementSet s;
+ s.insert( a );
+ ASSERT_EQUALS( 1U , s.size() );
+ s.insert( b );
+ ASSERT_EQUALS( 1U , s.size() );
+ ASSERT( ! s.count( c ) );
+
+ ASSERT( s.find( a ) != s.end() );
+ ASSERT( s.find( b ) != s.end() );
+ ASSERT( s.find( c ) == s.end() );
+
+
+ s.insert( c );
+ ASSERT_EQUALS( 2U , s.size() );
+
+
+ ASSERT( s.find( a ) != s.end() );
+ ASSERT( s.find( b ) != s.end() );
+ ASSERT( s.find( c ) != s.end() );
+
+ ASSERT( s.count( a ) );
+ ASSERT( s.count( b ) );
+ ASSERT( s.count( c ) );
+
+ {
+ BSONElementSet x;
+ BSONObj o = fromjson( "{ 'a' : [ 1 , 2 , 1 ] }" );
+ BSONObjIterator i( o["a"].embeddedObjectUserCheck() );
+ while ( i.more() ) {
+ x.insert( i.next() );
+ }
+ ASSERT_EQUALS( 2U , x.size() );
+ }
+ }
+ };
+
+ class EmbeddedNumbers {
+ public:
+ void run() {
+ BSONObj x = BSON( "a" << BSON( "b" << 1 ) );
+ BSONObj y = BSON( "a" << BSON( "b" << 1.0 ) );
+ keyTest(x); keyTest(y);
+ ASSERT_EQUALS( x , y );
+ ASSERT_EQUALS( 0 , x.woCompare( y ) );
+ }
+ };
+
+ class BuilderPartialItearte {
+ public:
+ void run() {
+ {
+ BSONObjBuilder b;
+ b.append( "x" , 1 );
+ b.append( "y" , 2 );
+
+ BSONObjIterator i = b.iterator();
+ ASSERT( i.more() );
+ ASSERT_EQUALS( 1 , i.next().numberInt() );
+ ASSERT( i.more() );
+ ASSERT_EQUALS( 2 , i.next().numberInt() );
+ ASSERT( ! i.more() );
+
+ b.append( "z" , 3 );
+
+ i = b.iterator();
+ ASSERT( i.more() );
+ ASSERT_EQUALS( 1 , i.next().numberInt() );
+ ASSERT( i.more() );
+ ASSERT_EQUALS( 2 , i.next().numberInt() );
+ ASSERT( i.more() );
+ ASSERT_EQUALS( 3 , i.next().numberInt() );
+ ASSERT( ! i.more() );
+
+ ASSERT_EQUALS( BSON( "x" << 1 << "y" << 2 << "z" << 3 ) , b.obj() );
+ }
+
+ }
+ };
+
+ class BSONFieldTests {
+ public:
+ void run() {
+ {
+ BSONField<int> x("x");
+ BSONObj o = BSON( x << 5 );
+ ASSERT_EQUALS( BSON( "x" << 5 ) , o );
+ }
+
+ {
+ BSONField<int> x("x");
+ BSONObj o = BSON( x.make(5) );
+ ASSERT_EQUALS( BSON( "x" << 5 ) , o );
+ }
+
+ {
+ BSONField<int> x("x");
+ BSONObj o = BSON( x(5) );
+ ASSERT_EQUALS( BSON( "x" << 5 ) , o );
+
+ o = BSON( x.gt(5) );
+ ASSERT_EQUALS( BSON( "x" << BSON( "$gt" << 5 ) ) , o );
+ }
+
+ }
+ };
+
+ class BSONForEachTest {
+ public:
+ void run() {
+ BSONObj obj = BSON("a" << 1 << "a" << 2 << "a" << 3);
+
+ int count = 0;
+ BSONForEach(e, obj) {
+ ASSERT_EQUALS( e.fieldName() , string("a") );
+ count += e.Int();
+ }
+
+ ASSERT_EQUALS( count , 1+2+3 );
+ }
+ };
+
+ class StringDataTest {
+ public:
+ void run() {
+ StringData a( string( "aaa" ) );
+ ASSERT_EQUALS( 3u , a.size() );
+
+ StringData b( string( "bbb" ).c_str() );
+ ASSERT_EQUALS( 3u , b.size() );
+
+ StringData c( "ccc", StringData::LiteralTag() );
+ ASSERT_EQUALS( 3u , c.size() );
+
+ // TODO update test when second parm takes StringData too
+ BSONObjBuilder builder;
+ builder.append( c, "value");
+ ASSERT_EQUALS( builder.obj() , BSON( c.data() << "value" ) );
+
+ }
+ };
+
+ class CompareOps {
+ public:
+ void run() {
+
+ BSONObj a = BSON("a"<<1);
+ BSONObj b = BSON("a"<<1);
+ BSONObj c = BSON("a"<<2);
+ BSONObj d = BSON("a"<<3);
+ BSONObj e = BSON("a"<<4);
+ BSONObj f = BSON("a"<<4);
+
+ ASSERT( ! ( a < b ) );
+ ASSERT( a <= b );
+ ASSERT( a < c );
+
+ ASSERT( f > d );
+ ASSERT( f >= e );
+ ASSERT( ! ( f > e ) );
+ }
+ };
+
+ class HashingTest {
+ public:
+ void run() {
+ int N = 100000;
+ BSONObj x = BSON( "name" << "eliot was here"
+ << "x" << 5
+ << "asdasdasdas" << "asldkasldjasldjasldjlasjdlasjdlasdasdasdasdasdasdasd" );
+
+ {
+ //Timer t;
+ for ( int i=0; i<N; i++ )
+ x.md5();
+ //int millis = t.millis();
+ //cout << "md5 : " << millis << endl;
+ }
+
+ {
+ //Timer t;
+ for ( int i=0; i<N; i++ )
+ x.toString();
+ //int millis = t.millis();
+ //cout << "toString : " << millis << endl;
+ }
+
+ {
+ //Timer t;
+ for ( int i=0; i<N; i++ )
+ checksum( x.objdata() , x.objsize() );
+ //int millis = t.millis();
+ //cout << "checksum : " << millis << endl;
+ }
+
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "jsobj" ) {
+ }
+
+ void setupTests() {
+ add< BufBuilderBasic >();
+ add< BSONElementBasic >();
+ add< BSONObjTests::NullString >();
+ add< BSONObjTests::Create >();
+ add< BSONObjTests::WoCompareBasic >();
+ add< BSONObjTests::NumericCompareBasic >();
+ add< BSONObjTests::WoCompareEmbeddedObject >();
+ add< BSONObjTests::WoCompareEmbeddedArray >();
+ add< BSONObjTests::WoCompareOrdered >();
+ add< BSONObjTests::WoCompareDifferentLength >();
+ add< BSONObjTests::WoSortOrder >();
+ add< BSONObjTests::MultiKeySortOrder > ();
+ add< BSONObjTests::TimestampTest >();
+ add< BSONObjTests::Nan >();
+ add< BSONObjTests::AsTempObj >();
+ add< BSONObjTests::AppendIntOrLL >();
+ add< BSONObjTests::AppendNumber >();
+ add< BSONObjTests::ToStringArray >();
+ add< BSONObjTests::ToStringNumber >();
+ add< BSONObjTests::AppendAs >();
+ add< BSONObjTests::ArrayAppendAs >();
+ add< BSONObjTests::GetField >();
+
+ add< BSONObjTests::Validation::BadType >();
+ add< BSONObjTests::Validation::EooBeforeEnd >();
+ add< BSONObjTests::Validation::Undefined >();
+ add< BSONObjTests::Validation::TotalSizeTooSmall >();
+ add< BSONObjTests::Validation::EooMissing >();
+ add< BSONObjTests::Validation::WrongStringSize >();
+ add< BSONObjTests::Validation::ZeroStringSize >();
+ add< BSONObjTests::Validation::NegativeStringSize >();
+ add< BSONObjTests::Validation::WrongSubobjectSize >();
+ add< BSONObjTests::Validation::WrongDbrefNsSize >();
+ add< BSONObjTests::Validation::NoFieldNameEnd >();
+ add< BSONObjTests::Validation::BadRegex >();
+ add< BSONObjTests::Validation::BadRegexOptions >();
+ add< BSONObjTests::Validation::CodeWScopeSmallSize >();
+ add< BSONObjTests::Validation::CodeWScopeZeroStrSize >();
+ add< BSONObjTests::Validation::CodeWScopeSmallStrSize >();
+ add< BSONObjTests::Validation::CodeWScopeNoSizeForObj >();
+ add< BSONObjTests::Validation::CodeWScopeSmallObjSize >();
+ add< BSONObjTests::Validation::CodeWScopeBadObject >();
+ add< BSONObjTests::Validation::NoSize >( Symbol );
+ add< BSONObjTests::Validation::NoSize >( Code );
+ add< BSONObjTests::Validation::NoSize >( String );
+ add< BSONObjTests::Validation::NoSize >( CodeWScope );
+ add< BSONObjTests::Validation::NoSize >( DBRef );
+ add< BSONObjTests::Validation::NoSize >( Object );
+ add< BSONObjTests::Validation::NoSize >( Array );
+ add< BSONObjTests::Validation::NoSize >( BinData );
+ add< BSONObjTests::Validation::Fuzz >( .5 );
+ add< BSONObjTests::Validation::Fuzz >( .1 );
+ add< BSONObjTests::Validation::Fuzz >( .05 );
+ add< BSONObjTests::Validation::Fuzz >( .01 );
+ add< BSONObjTests::Validation::Fuzz >( .001 );
+ add< OIDTests::init1 >();
+ add< OIDTests::initParse1 >();
+ add< OIDTests::append >();
+ add< OIDTests::increasing >();
+ add< OIDTests::ToDate >();
+ add< OIDTests::FromDate >();
+ add< ValueStreamTests::LabelBasic >();
+ add< ValueStreamTests::LabelShares >();
+ add< ValueStreamTests::LabelDouble >();
+ add< ValueStreamTests::LabelDoubleShares >();
+ add< ValueStreamTests::LabelSize >();
+ add< ValueStreamTests::LabelMulti >();
+ add< ValueStreamTests::LabelishOr >();
+ add< ValueStreamTests::Unallowed >();
+ add< ValueStreamTests::ElementAppend >();
+ add< ValueStreamTests::Unallowed >();
+ add< ValueStreamTests::ElementAppend >();
+ add< SubObjectBuilder >();
+ add< DateBuilder >();
+ add< DateNowBuilder >();
+ add< TimeTBuilder >();
+ add< MinMaxKeyBuilder >();
+ add< MinMaxElementTest >();
+ add< ComparatorTest >();
+ add< ExtractFieldsTest >();
+ add< external_sort::Basic1 >();
+ add< external_sort::Basic2 >();
+ add< external_sort::Basic3 >();
+ add< external_sort::ByDiskLock >();
+ add< external_sort::Big1 >();
+ add< external_sort::Big2 >();
+ add< external_sort::D1 >();
+ add< CompatBSON >();
+ add< CompareDottedFieldNamesTest >();
+ add< NestedDottedConversions >();
+ add< BSONArrayBuilderTest >();
+ add< ArrayMacroTest >();
+ add< NumberParsing >();
+ add< bson2settest >();
+ add< checkForStorageTests >();
+ add< InvalidIDFind >();
+ add< ElementSetTest >();
+ add< EmbeddedNumbers >();
+ add< BuilderPartialItearte >();
+ add< BSONFieldTests >();
+ add< BSONForEachTest >();
+ add< StringDataTest >();
+ add< CompareOps >();
+ add< HashingTest >();
+ }
+ } myall;
+
+} // namespace JsobjTests
+
diff --git a/src/mongo/dbtests/jsontests.cpp b/src/mongo/dbtests/jsontests.cpp
new file mode 100644
index 00000000000..36c204a1011
--- /dev/null
+++ b/src/mongo/dbtests/jsontests.cpp
@@ -0,0 +1,1185 @@
+// jsontests.cpp - Tests for json.{h,cpp} code and BSONObj::jsonString()
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/jsobj.h"
+#include "../db/json.h"
+
+#include "dbtests.h"
+
+#include <limits>
+
+namespace JsonTests {
+ namespace JsonStringTests {
+
+ class Empty {
+ public:
+ void run() {
+ ASSERT_EQUALS( "{}", BSONObj().jsonString( Strict ) );
+ }
+ };
+
+ class SingleStringMember {
+ public:
+ void run() {
+ ASSERT_EQUALS( "{ \"a\" : \"b\" }", BSON( "a" << "b" ).jsonString( Strict ) );
+ }
+ };
+
+ class EscapedCharacters {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", "\" \\ / \b \f \n \r \t" );
+ ASSERT_EQUALS( "{ \"a\" : \"\\\" \\\\ / \\b \\f \\n \\r \\t\" }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ // per http://www.ietf.org/rfc/rfc4627.txt, control characters are
+ // (U+0000 through U+001F). U+007F is not mentioned as a control character.
+ class AdditionalControlCharacters {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", "\x1 \x1f" );
+ ASSERT_EQUALS( "{ \"a\" : \"\\u0001 \\u001f\" }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class ExtendedAscii {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", "\x80" );
+ ASSERT_EQUALS( "{ \"a\" : \"\x80\" }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class EscapeFieldName {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "\t", "b" );
+ ASSERT_EQUALS( "{ \"\\t\" : \"b\" }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleIntMember {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ ASSERT_EQUALS( "{ \"a\" : 1 }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleNumberMember {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", 1.5 );
+ ASSERT_EQUALS( "{ \"a\" : 1.5 }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class InvalidNumbers {
+ public:
+ void run() {
+ BSONObjBuilder c;
+ c.append( "a", numeric_limits< double >::quiet_NaN() );
+ string s = c.done().jsonString( Strict );
+ // Note there is no NaN in the JSON RFC but what would be the alternative?
+ ASSERT( str::contains(s, "NaN") );
+
+ // commented out assertion as it doesn't throw anymore:
+ //ASSERT_THROWS( c.done().jsonString( Strict ), AssertionException );
+
+ BSONObjBuilder d;
+ d.append( "a", numeric_limits< double >::signaling_NaN() );
+ //ASSERT_THROWS( d.done().jsonString( Strict ), AssertionException );
+ s = d.done().jsonString( Strict );
+ ASSERT( str::contains(s, "NaN") );
+ }
+ };
+
+ class NumberPrecision {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", 123456789 );
+ ASSERT_EQUALS( "{ \"a\" : 123456789 }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class NegativeNumber {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", -1 );
+ ASSERT_EQUALS( "{ \"a\" : -1 }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleBoolMember {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendBool( "a", true );
+ ASSERT_EQUALS( "{ \"a\" : true }", b.done().jsonString( Strict ) );
+
+ BSONObjBuilder c;
+ c.appendBool( "a", false );
+ ASSERT_EQUALS( "{ \"a\" : false }", c.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleNullMember {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendNull( "a" );
+ ASSERT_EQUALS( "{ \"a\" : null }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class SingleObjectMember {
+ public:
+ void run() {
+ BSONObjBuilder b, c;
+ b.append( "a", c.done() );
+ ASSERT_EQUALS( "{ \"a\" : {} }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class TwoMembers {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ b.append( "b", 2 );
+ ASSERT_EQUALS( "{ \"a\" : 1, \"b\" : 2 }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class EmptyArray {
+ public:
+ void run() {
+ vector< int > arr;
+ BSONObjBuilder b;
+ b.append( "a", arr );
+ ASSERT_EQUALS( "{ \"a\" : [] }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class Array {
+ public:
+ void run() {
+ vector< int > arr;
+ arr.push_back( 1 );
+ arr.push_back( 2 );
+ BSONObjBuilder b;
+ b.append( "a", arr );
+ ASSERT_EQUALS( "{ \"a\" : [ 1, 2 ] }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class DBRef {
+ public:
+ void run() {
+ OID oid;
+ memset( &oid, 0xff, 12 );
+ BSONObjBuilder b;
+ b.appendDBRef( "a", "namespace", oid );
+ BSONObj built = b.done();
+ ASSERT_EQUALS( "{ \"a\" : { \"$ref\" : \"namespace\", \"$id\" : \"ffffffffffffffffffffffff\" } }",
+ built.jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : { \"$ref\" : \"namespace\", \"$id\" : \"ffffffffffffffffffffffff\" } }",
+ built.jsonString( JS ) );
+ ASSERT_EQUALS( "{ \"a\" : Dbref( \"namespace\", \"ffffffffffffffffffffffff\" ) }",
+ built.jsonString( TenGen ) );
+ }
+ };
+
+ class DBRefZero {
+ public:
+ void run() {
+ OID oid;
+ memset( &oid, 0, 12 );
+ BSONObjBuilder b;
+ b.appendDBRef( "a", "namespace", oid );
+ ASSERT_EQUALS( "{ \"a\" : { \"$ref\" : \"namespace\", \"$id\" : \"000000000000000000000000\" } }",
+ b.done().jsonString( Strict ) );
+ }
+ };
+
+ class ObjectId {
+ public:
+ void run() {
+ OID oid;
+ memset( &oid, 0xff, 12 );
+ BSONObjBuilder b;
+ b.appendOID( "a", &oid );
+ BSONObj built = b.done();
+ ASSERT_EQUALS( "{ \"a\" : { \"$oid\" : \"ffffffffffffffffffffffff\" } }",
+ built.jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : ObjectId( \"ffffffffffffffffffffffff\" ) }",
+ built.jsonString( TenGen ) );
+ }
+ };
+
+ class BinData {
+ public:
+ void run() {
+ char z[ 3 ];
+ z[ 0 ] = 'a';
+ z[ 1 ] = 'b';
+ z[ 2 ] = 'c';
+ BSONObjBuilder b;
+ b.appendBinData( "a", 3, BinDataGeneral, z );
+
+ string o = b.done().jsonString( Strict );
+
+ ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"00\" } }",
+ o );
+
+ BSONObjBuilder c;
+ c.appendBinData( "a", 2, BinDataGeneral, z );
+ ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YWI=\", \"$type\" : \"00\" } }",
+ c.done().jsonString( Strict ) );
+
+ BSONObjBuilder d;
+ d.appendBinData( "a", 1, BinDataGeneral, z );
+ ASSERT_EQUALS( "{ \"a\" : { \"$binary\" : \"YQ==\", \"$type\" : \"00\" } }",
+ d.done().jsonString( Strict ) );
+ }
+ };
+
+ class Symbol {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendSymbol( "a", "b" );
+ ASSERT_EQUALS( "{ \"a\" : \"b\" }", b.done().jsonString( Strict ) );
+ }
+ };
+
+ class Date {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendDate( "a", 0 );
+ BSONObj built = b.done();
+ ASSERT_EQUALS( "{ \"a\" : { \"$date\" : 0 } }", built.jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : Date( 0 ) }", built.jsonString( TenGen ) );
+ ASSERT_EQUALS( "{ \"a\" : Date( 0 ) }", built.jsonString( JS ) );
+ }
+ };
+
+ class Regex {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "abc", "i" );
+ BSONObj built = b.done();
+ ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"abc\", \"$options\" : \"i\" } }",
+ built.jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : /abc/i }", built.jsonString( TenGen ) );
+ ASSERT_EQUALS( "{ \"a\" : /abc/i }", built.jsonString( JS ) );
+ }
+ };
+
+ class RegexEscape {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "/\"", "i" );
+ BSONObj built = b.done();
+ ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"/\\\"\", \"$options\" : \"i\" } }",
+ built.jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : /\\/\\\"/i }", built.jsonString( TenGen ) );
+ ASSERT_EQUALS( "{ \"a\" : /\\/\\\"/i }", built.jsonString( JS ) );
+ }
+ };
+
+ class RegexManyOptions {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "z", "abcgimx" );
+ BSONObj built = b.done();
+ ASSERT_EQUALS( "{ \"a\" : { \"$regex\" : \"z\", \"$options\" : \"abcgimx\" } }",
+ built.jsonString( Strict ) );
+ ASSERT_EQUALS( "{ \"a\" : /z/gim }", built.jsonString( TenGen ) );
+ ASSERT_EQUALS( "{ \"a\" : /z/gim }", built.jsonString( JS ) );
+ }
+ };
+
+ class CodeTests {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendCode( "x" , "function(){ return 1; }" );
+ BSONObj o = b.obj();
+ ASSERT_EQUALS( "{ \"x\" : function(){ return 1; } }" , o.jsonString() );
+ }
+ };
+
+ class TimestampTests {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendTimestamp( "x" , 4000 , 10 );
+ BSONObj o = b.obj();
+ ASSERT_EQUALS( "{ \"x\" : { \"t\" : 4000 , \"i\" : 10 } }" , o.jsonString() );
+ }
+ };
+
+ class NullString {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "x" , "a\0b" , 4 );
+ BSONObj o = b.obj();
+ ASSERT_EQUALS( "{ \"x\" : \"a\\u0000b\" }" , o.jsonString() );
+ }
+ };
+
+ class AllTypes {
+ public:
+ void run() {
+ OID oid;
+ oid.init();
+
+ BSONObjBuilder b;
+ b.appendMinKey( "a" );
+ b.append( "b" , 5.5 );
+ b.append( "c" , "abc" );
+ b.append( "e" , BSON( "x" << 1 ) );
+ b.append( "f" , BSON_ARRAY( 1 << 2 << 3 ) );
+ b.appendBinData( "g" , 5 , bdtCustom , (const char*)this );
+ b.appendUndefined( "h" );
+ b.append( "i" , oid );
+ b.appendBool( "j" , 1 );
+ b.appendDate( "k" , 123 );
+ b.appendNull( "l" );
+ b.appendRegex( "m" , "a" );
+ b.appendDBRef( "n" , "foo" , oid );
+ b.appendCode( "o" , "function(){}" );
+ b.appendSymbol( "p" , "foo" );
+ b.appendCodeWScope( "q" , "function(){}" , BSON("x" << 1 ) );
+ b.append( "r" , (int)5 );
+ b.appendTimestamp( "s" , 123123123123123LL );
+ b.append( "t" , 12321312312LL );
+ b.appendMaxKey( "u" );
+
+ BSONObj o = b.obj();
+ o.jsonString();
+ //cout << o.jsonString() << endl;
+ }
+ };
+
+ } // namespace JsonStringTests
+
+ namespace FromJsonTests {
+
+ class Base {
+ public:
+ virtual ~Base() {}
+ void run() {
+ ASSERT( fromjson( json() ).valid() );
+ assertEquals( bson(), fromjson( json() ) );
+ assertEquals( bson(), fromjson( bson().jsonString( Strict ) ) );
+ assertEquals( bson(), fromjson( bson().jsonString( TenGen ) ) );
+ assertEquals( bson(), fromjson( bson().jsonString( JS ) ) );
+ }
+ protected:
+ virtual BSONObj bson() const = 0;
+ virtual string json() const = 0;
+ private:
+ static void assertEquals( const BSONObj &expected, const BSONObj &actual ) {
+ if ( expected.woCompare( actual ) ) {
+ out() << "want:" << expected.jsonString() << " size: " << expected.objsize() << endl;
+ out() << "got :" << actual.jsonString() << " size: " << actual.objsize() << endl;
+ out() << expected.hexDump() << endl;
+ out() << actual.hexDump() << endl;
+ }
+ ASSERT( !expected.woCompare( actual ) );
+ }
+ };
+
+ class Bad {
+ public:
+ virtual ~Bad() {}
+ void run() {
+ ASSERT_THROWS( fromjson( json() ), MsgAssertionException );
+ }
+ protected:
+ virtual string json() const = 0;
+ };
+
+ class Empty : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{}";
+ }
+ };
+
+ class EmptyWithSpace : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ }";
+ }
+ };
+
+ class SingleString : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", "b" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"b\" }";
+ }
+ };
+
+ class EmptyStrings : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "", "" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"\" : \"\" }";
+ }
+ };
+
+ class ReservedFieldName : public Bad {
+ virtual string json() const {
+ return "{ \"$oid\" : \"b\" }";
+ }
+ };
+
+ class OkDollarFieldName : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "$where", 1 );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"$where\" : 1 }";
+ }
+ };
+
+ class SingleNumber : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : 1 }";
+ }
+ };
+
+ class RealNumber : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", strtod( "0.7", 0 ) );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : 0.7 }";
+ }
+ };
+
+ class FancyNumber : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", strtod( "-4.4433e-2", 0 ) );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : -4.4433e-2 }";
+ }
+ };
+
+ class TwoElements : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ b.append( "b", "foo" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : 1, \"b\" : \"foo\" }";
+ }
+ };
+
+ class Subobject : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ BSONObjBuilder c;
+ c.append( "z", b.done() );
+ return c.obj();
+ }
+ virtual string json() const {
+ return "{ \"z\" : { \"a\" : 1 } }";
+ }
+ };
+
+ class ArrayEmpty : public Base {
+ virtual BSONObj bson() const {
+ vector< int > arr;
+ BSONObjBuilder b;
+ b.append( "a", arr );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : [] }";
+ }
+ };
+
+ class Array : public Base {
+ virtual BSONObj bson() const {
+ vector< int > arr;
+ arr.push_back( 1 );
+ arr.push_back( 2 );
+ arr.push_back( 3 );
+ BSONObjBuilder b;
+ b.append( "a", arr );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : [ 1, 2, 3 ] }";
+ }
+ };
+
+ class True : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendBool( "a", true );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : true }";
+ }
+ };
+
+ class False : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendBool( "a", false );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : false }";
+ }
+ };
+
+ class Null : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendNull( "a" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : null }";
+ }
+ };
+
+ class EscapedCharacters : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", "\" \\ / \b \f \n \r \t \v" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\\" \\\\ \\/ \\b \\f \\n \\r \\t \\v\" }";
+ }
+ };
+
+ class NonEscapedCharacters : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", "% { a z $ # ' " );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\% \\{ \\a \\z \\$ \\# \\' \\ \" }";
+ }
+ };
+
+ class AllowedControlCharacter : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a", "\x7f" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\x7f\" }";
+ }
+ };
+
+ class EscapeFieldName : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "\n", "b" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"\\n\" : \"b\" }";
+ }
+ };
+
+ class EscapedUnicodeToUtf8 : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ unsigned char u[ 7 ];
+ u[ 0 ] = 0xe0 | 0x0a;
+ u[ 1 ] = 0x80;
+ u[ 2 ] = 0x80;
+ u[ 3 ] = 0xe0 | 0x0a;
+ u[ 4 ] = 0x80;
+ u[ 5 ] = 0x80;
+ u[ 6 ] = 0;
+ b.append( "a", (char *) u );
+ BSONObj built = b.obj();
+ ASSERT_EQUALS( string( (char *) u ), built.firstElement().valuestr() );
+ return built;
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\ua000\\uA000\" }";
+ }
+ };
+
+ class Utf8AllOnes : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ unsigned char u[ 8 ];
+ u[ 0 ] = 0x01;
+
+ u[ 1 ] = 0x7f;
+
+ u[ 2 ] = 0xdf;
+ u[ 3 ] = 0xbf;
+
+ u[ 4 ] = 0xef;
+ u[ 5 ] = 0xbf;
+ u[ 6 ] = 0xbf;
+
+ u[ 7 ] = 0;
+
+ b.append( "a", (char *) u );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\u0001\\u007f\\u07ff\\uffff\" }";
+ }
+ };
+
+ class Utf8FirstByteOnes : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ unsigned char u[ 6 ];
+ u[ 0 ] = 0xdc;
+ u[ 1 ] = 0x80;
+
+ u[ 2 ] = 0xef;
+ u[ 3 ] = 0xbc;
+ u[ 4 ] = 0x80;
+
+ u[ 5 ] = 0;
+
+ b.append( "a", (char *) u );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : \"\\u0700\\uff00\" }";
+ }
+ };
+
+ class DBRef : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ OID o;
+ memset( &o, 0, 12 );
+ b.appendDBRef( "a", "foo", o );
+ return b.obj();
+ }
+ // NOTE Testing other formats handled by by Base class.
+ virtual string json() const {
+ return "{ \"a\" : { \"$ref\" : \"foo\", \"$id\" : \"000000000000000000000000\" } }";
+ }
+ };
+
+ class NewDBRef : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ OID o;
+ memset( &o, 0, 12 );
+ b.append( "$ref", "items" );
+ b.appendOID( "$id", &o );
+ BSONObjBuilder c;
+ c.append( "refval", b.done() );
+ return c.obj();
+ }
+ virtual string json() const {
+ return "{ \"refval\" : { \"$ref\" : \"items\", \"$id\" : ObjectId( \"000000000000000000000000\" ) } }";
+ }
+ };
+
+ class Oid : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendOID( "_id" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"_id\" : { \"$oid\" : \"000000000000000000000000\" } }";
+ }
+ };
+
+ class Oid2 : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ OID o;
+ memset( &o, 0x0f, 12 );
+ b.appendOID( "_id", &o );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"_id\" : ObjectId( \"0f0f0f0f0f0f0f0f0f0f0f0f\" ) }";
+ }
+ };
+
+ class StringId : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append("_id", "000000000000000000000000");
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"_id\" : \"000000000000000000000000\" }";
+ }
+ };
+
+ class BinData : public Base {
+ virtual BSONObj bson() const {
+ char z[ 3 ];
+ z[ 0 ] = 'a';
+ z[ 1 ] = 'b';
+ z[ 2 ] = 'c';
+ BSONObjBuilder b;
+ b.appendBinData( "a", 3, BinDataGeneral, z );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YWJj\", \"$type\" : \"00\" } }";
+ }
+ };
+
+ class BinDataPaddedSingle : public Base {
+ virtual BSONObj bson() const {
+ char z[ 2 ];
+ z[ 0 ] = 'a';
+ z[ 1 ] = 'b';
+ BSONObjBuilder b;
+ b.appendBinData( "a", 2, BinDataGeneral, z );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YWI=\", \"$type\" : \"00\" } }";
+ }
+ };
+
+ class BinDataPaddedDouble : public Base {
+ virtual BSONObj bson() const {
+ char z[ 1 ];
+ z[ 0 ] = 'a';
+ BSONObjBuilder b;
+ b.appendBinData( "a", 1, BinDataGeneral, z );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"YQ==\", \"$type\" : \"00\" } }";
+ }
+ };
+
+ class BinDataAllChars : public Base {
+ virtual BSONObj bson() const {
+ unsigned char z[] = {
+ 0x00, 0x10, 0x83, 0x10, 0x51, 0x87, 0x20, 0x92, 0x8B, 0x30,
+ 0xD3, 0x8F, 0x41, 0x14, 0x93, 0x51, 0x55, 0x97, 0x61, 0x96,
+ 0x9B, 0x71, 0xD7, 0x9F, 0x82, 0x18, 0xA3, 0x92, 0x59, 0xA7,
+ 0xA2, 0x9A, 0xAB, 0xB2, 0xDB, 0xAF, 0xC3, 0x1C, 0xB3, 0xD3,
+ 0x5D, 0xB7, 0xE3, 0x9E, 0xBB, 0xF3, 0xDF, 0xBF
+ };
+ BSONObjBuilder b;
+ b.appendBinData( "a", 48, BinDataGeneral, z );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$binary\" : \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/\", \"$type\" : \"00\" } }";
+ }
+ };
+
+ class Date : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate( "a", 0 );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$date\" : 0 } }";
+ }
+ };
+
+ class DateNonzero : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendDate( "a", 100 );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$date\" : 100 } }";
+ }
+ };
+
+ class DateTooLong : public Bad {
+ virtual string json() const {
+ stringstream ss;
+ ss << "{ \"a\" : { \"$date\" : " << ~(0LL) << "0" << " } }";
+ return ss.str();
+ }
+ };
+
+ class Regex : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "b", "i" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"i\" } }";
+ }
+ };
+
+ class RegexEscape : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "\t", "i" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"\\t\", \"$options\" : \"i\" } }";
+ }
+ };
+
+ class RegexWithQuotes : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "\"", "" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"a\" : /\"/ }";
+ }
+ };
+
+ class RegexInvalidOption : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : { \"$regex\" : \"b\", \"$options\" : \"1\" } }";
+ }
+ };
+
+ class RegexInvalidOption2 : public Bad {
+ virtual string json() const {
+ return "{ \"a\" : /b/c }";
+ }
+ };
+
+ class Malformed : public Bad {
+ string json() const {
+ return "{";
+ }
+ };
+
+ class UnquotedFieldName : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "a_b", 1 );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ a_b : 1 }";
+ }
+ };
+
+ class UnquotedFieldNameDollar : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "$a_b", 1 );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ $a_b : 1 }";
+ }
+ };
+
+ class SingleQuotes : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "ab'c\"", "bb\b '\"" );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ 'ab\\'c\"' : 'bb\\b \\'\"' }";
+ }
+ };
+
+ class ObjectId : public Base {
+ virtual BSONObj bson() const {
+ OID id;
+ id.init( "deadbeeff00ddeadbeeff00d" );
+ BSONObjBuilder b;
+ b.appendOID( "_id", &id );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"_id\": ObjectId( \"deadbeeff00ddeadbeeff00d\" ) }";
+ }
+ };
+
+ class ObjectId2 : public Base {
+ virtual BSONObj bson() const {
+ OID id;
+ id.init( "deadbeeff00ddeadbeeff00d" );
+ BSONObjBuilder b;
+ b.appendOID( "foo", &id );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"foo\": ObjectId( \"deadbeeff00ddeadbeeff00d\" ) }";
+ }
+ };
+
+ class NumericTypes : public Base {
+ public:
+ void run() {
+ Base::run();
+
+ BSONObj o = fromjson(json());
+
+ ASSERT(o["int"].type() == NumberInt);
+ ASSERT(o["long"].type() == NumberLong);
+ ASSERT(o["double"].type() == NumberDouble);
+
+ ASSERT(o["long"].numberLong() == 9223372036854775807ll);
+ }
+
+ virtual BSONObj bson() const {
+ return BSON( "int" << 123
+ << "long" << 9223372036854775807ll // 2**63 - 1
+ << "double" << 3.14
+ );
+ }
+ virtual string json() const {
+ return "{ \"int\": 123, \"long\": 9223372036854775807, \"double\": 3.14 }";
+ }
+ };
+
+ class NegativeNumericTypes : public Base {
+ public:
+ void run() {
+ Base::run();
+
+ BSONObj o = fromjson(json());
+
+ ASSERT(o["int"].type() == NumberInt);
+ ASSERT(o["long"].type() == NumberLong);
+ ASSERT(o["double"].type() == NumberDouble);
+
+ ASSERT(o["long"].numberLong() == -9223372036854775807ll);
+ }
+
+ virtual BSONObj bson() const {
+ return BSON( "int" << -123
+ << "long" << -9223372036854775807ll // -1 * (2**63 - 1)
+ << "double" << -3.14
+ );
+ }
+ virtual string json() const {
+ return "{ \"int\": -123, \"long\": -9223372036854775807, \"double\": -3.14 }";
+ }
+ };
+
+ class EmbeddedDatesBase : public Base {
+ public:
+
+ virtual void run() {
+ BSONObj o = fromjson( json() );
+ ASSERT_EQUALS( 3 , (o["time.valid"].type()) );
+ BSONObj e = o["time.valid"].embeddedObjectUserCheck();
+ ASSERT_EQUALS( 9 , e["$gt"].type() );
+ ASSERT_EQUALS( 9 , e["$lt"].type() );
+ Base::run();
+ }
+
+ BSONObj bson() const {
+ BSONObjBuilder e;
+ e.appendDate( "$gt" , 1257829200000LL );
+ e.appendDate( "$lt" , 1257829200100LL );
+
+ BSONObjBuilder b;
+ b.append( "time.valid" , e.obj() );
+ return b.obj();
+ }
+ virtual string json() const = 0;
+ };
+
+ struct EmbeddedDatesFormat1 : EmbeddedDatesBase {
+ string json() const {
+ return "{ \"time.valid\" : { $gt : { \"$date\" : 1257829200000 } , $lt : { \"$date\" : 1257829200100 } } }";
+ }
+ };
+ struct EmbeddedDatesFormat2 : EmbeddedDatesBase {
+ string json() const {
+ return "{ \"time.valid\" : { $gt : Date(1257829200000) , $lt : Date( 1257829200100 ) } }";
+ }
+ };
+ struct EmbeddedDatesFormat3 : EmbeddedDatesBase {
+ string json() const {
+ return "{ \"time.valid\" : { $gt : new Date(1257829200000) , $lt : new Date( 1257829200100 ) } }";
+ }
+ };
+
+ class NullString : public Base {
+ virtual BSONObj bson() const {
+ BSONObjBuilder b;
+ b.append( "x" , "a\0b" , 4 );
+ return b.obj();
+ }
+ virtual string json() const {
+ return "{ \"x\" : \"a\\u0000b\" }";
+ }
+ };
+
+ } // namespace FromJsonTests
+
+ class All : public Suite {
+ public:
+ All() : Suite( "json" ) {
+ }
+
+ void setupTests() {
+ add< JsonStringTests::Empty >();
+ add< JsonStringTests::SingleStringMember >();
+ add< JsonStringTests::EscapedCharacters >();
+ add< JsonStringTests::AdditionalControlCharacters >();
+ add< JsonStringTests::ExtendedAscii >();
+ add< JsonStringTests::EscapeFieldName >();
+ add< JsonStringTests::SingleIntMember >();
+ add< JsonStringTests::SingleNumberMember >();
+ add< JsonStringTests::InvalidNumbers >();
+ add< JsonStringTests::NumberPrecision >();
+ add< JsonStringTests::NegativeNumber >();
+ add< JsonStringTests::SingleBoolMember >();
+ add< JsonStringTests::SingleNullMember >();
+ add< JsonStringTests::SingleObjectMember >();
+ add< JsonStringTests::TwoMembers >();
+ add< JsonStringTests::EmptyArray >();
+ add< JsonStringTests::Array >();
+ add< JsonStringTests::DBRef >();
+ add< JsonStringTests::DBRefZero >();
+ add< JsonStringTests::ObjectId >();
+ add< JsonStringTests::BinData >();
+ add< JsonStringTests::Symbol >();
+ add< JsonStringTests::Date >();
+ add< JsonStringTests::Regex >();
+ add< JsonStringTests::RegexEscape >();
+ add< JsonStringTests::RegexManyOptions >();
+ add< JsonStringTests::CodeTests >();
+ add< JsonStringTests::TimestampTests >();
+ add< JsonStringTests::NullString >();
+ add< JsonStringTests::AllTypes >();
+
+ add< FromJsonTests::Empty >();
+ add< FromJsonTests::EmptyWithSpace >();
+ add< FromJsonTests::SingleString >();
+ add< FromJsonTests::EmptyStrings >();
+ add< FromJsonTests::ReservedFieldName >();
+ add< FromJsonTests::OkDollarFieldName >();
+ add< FromJsonTests::SingleNumber >();
+ add< FromJsonTests::RealNumber >();
+ add< FromJsonTests::FancyNumber >();
+ add< FromJsonTests::TwoElements >();
+ add< FromJsonTests::Subobject >();
+ add< FromJsonTests::ArrayEmpty >();
+ add< FromJsonTests::Array >();
+ add< FromJsonTests::True >();
+ add< FromJsonTests::False >();
+ add< FromJsonTests::Null >();
+ add< FromJsonTests::EscapedCharacters >();
+ add< FromJsonTests::NonEscapedCharacters >();
+ add< FromJsonTests::AllowedControlCharacter >();
+ add< FromJsonTests::EscapeFieldName >();
+ add< FromJsonTests::EscapedUnicodeToUtf8 >();
+ add< FromJsonTests::Utf8AllOnes >();
+ add< FromJsonTests::Utf8FirstByteOnes >();
+ add< FromJsonTests::DBRef >();
+ add< FromJsonTests::NewDBRef >();
+ add< FromJsonTests::Oid >();
+ add< FromJsonTests::Oid2 >();
+ add< FromJsonTests::StringId >();
+ add< FromJsonTests::BinData >();
+ add< FromJsonTests::BinDataPaddedSingle >();
+ add< FromJsonTests::BinDataPaddedDouble >();
+ add< FromJsonTests::BinDataAllChars >();
+ add< FromJsonTests::Date >();
+ add< FromJsonTests::DateNonzero >();
+ add< FromJsonTests::DateTooLong >();
+ add< FromJsonTests::Regex >();
+ add< FromJsonTests::RegexEscape >();
+ add< FromJsonTests::RegexWithQuotes >();
+ add< FromJsonTests::RegexInvalidOption >();
+ add< FromJsonTests::RegexInvalidOption2 >();
+ add< FromJsonTests::Malformed >();
+ add< FromJsonTests::UnquotedFieldName >();
+ add< FromJsonTests::UnquotedFieldNameDollar >();
+ add< FromJsonTests::SingleQuotes >();
+ add< FromJsonTests::ObjectId >();
+ add< FromJsonTests::ObjectId2 >();
+ add< FromJsonTests::NumericTypes >();
+ add< FromJsonTests::NegativeNumericTypes >();
+ add< FromJsonTests::EmbeddedDatesFormat1 >();
+ add< FromJsonTests::EmbeddedDatesFormat2 >();
+ add< FromJsonTests::EmbeddedDatesFormat3 >();
+ add< FromJsonTests::NullString >();
+ }
+ } myall;
+
+} // namespace JsonTests
+
diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp
new file mode 100644
index 00000000000..9782eedaacb
--- /dev/null
+++ b/src/mongo/dbtests/jstests.cpp
@@ -0,0 +1,1052 @@
+// javajstests.cpp
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/instance.h"
+
+#include "../pch.h"
+#include "../scripting/engine.h"
+#include "../util/timer.h"
+
+#include "dbtests.h"
+
+namespace mongo {
+ bool dbEval(const string& dbName , BSONObj& cmd, BSONObjBuilder& result, string& errmsg);
+} // namespace mongo
+
+namespace JSTests {
+
+ class Fundamental {
+ public:
+ void run() {
+ // By calling JavaJSImpl() inside run(), we ensure the unit test framework's
+ // signal handlers are pre-installed from JNI's perspective. This allows
+ // JNI to catch signals generated within the JVM and forward other signals
+ // as appropriate.
+ ScriptEngine::setup();
+ globalScriptEngine->runTest();
+ }
+ };
+
+ class BasicScope {
+ public:
+ void run() {
+ auto_ptr<Scope> s;
+ s.reset( globalScriptEngine->newScope() );
+
+ s->setNumber( "x" , 5 );
+ ASSERT( 5 == s->getNumber( "x" ) );
+
+ s->setNumber( "x" , 1.67 );
+ ASSERT( 1.67 == s->getNumber( "x" ) );
+
+ s->setString( "s" , "eliot was here" );
+ ASSERT( "eliot was here" == s->getString( "s" ) );
+
+ s->setBoolean( "b" , true );
+ ASSERT( s->getBoolean( "b" ) );
+
+ if ( 0 ) {
+ s->setBoolean( "b" , false );
+ ASSERT( ! s->getBoolean( "b" ) );
+ }
+ }
+ };
+
+ class ResetScope {
+ public:
+ void run() {
+ // Not worrying about this for now SERVER-446.
+ /*
+ auto_ptr<Scope> s;
+ s.reset( globalScriptEngine->newScope() );
+
+ s->setBoolean( "x" , true );
+ ASSERT( s->getBoolean( "x" ) );
+
+ s->reset();
+ ASSERT( !s->getBoolean( "x" ) );
+ */
+ }
+ };
+
+ class FalseTests {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ ASSERT( ! s->getBoolean( "x" ) );
+
+ s->setString( "z" , "" );
+ ASSERT( ! s->getBoolean( "z" ) );
+
+
+ delete s ;
+ }
+ };
+
+ class SimpleFunctions {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ s->invoke( "x=5;" , 0, 0 );
+ ASSERT( 5 == s->getNumber( "x" ) );
+
+ s->invoke( "return 17;" , 0, 0 );
+ ASSERT( 17 == s->getNumber( "return" ) );
+
+ s->invoke( "function(){ return 17; }" , 0, 0 );
+ ASSERT( 17 == s->getNumber( "return" ) );
+
+ s->setNumber( "x" , 1.76 );
+ s->invoke( "return x == 1.76; " , 0, 0 );
+ ASSERT( s->getBoolean( "return" ) );
+
+ s->setNumber( "x" , 1.76 );
+ s->invoke( "return x == 1.79; " , 0, 0 );
+ ASSERT( ! s->getBoolean( "return" ) );
+
+ BSONObj obj = BSON( "" << 11.0 );
+ s->invoke( "function( z ){ return 5 + z; }" , &obj, 0 );
+ ASSERT_EQUALS( 16 , s->getNumber( "return" ) );
+
+ delete s;
+ }
+ };
+
+ class ObjectMapping {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ BSONObj o = BSON( "x" << 17.0 << "y" << "eliot" << "z" << "sara" );
+ s->setObject( "blah" , o );
+
+ s->invoke( "return blah.x;" , 0, 0 );
+ ASSERT_EQUALS( 17 , s->getNumber( "return" ) );
+ s->invoke( "return blah.y;" , 0, 0 );
+ ASSERT_EQUALS( "eliot" , s->getString( "return" ) );
+
+ s->invoke( "return this.z;" , 0, &o );
+ ASSERT_EQUALS( "sara" , s->getString( "return" ) );
+
+ s->invoke( "return this.z == 'sara';" , 0, &o );
+ ASSERT_EQUALS( true , s->getBoolean( "return" ) );
+
+ s->invoke( "this.z == 'sara';" , 0, &o );
+ ASSERT_EQUALS( true , s->getBoolean( "return" ) );
+
+ s->invoke( "this.z == 'asara';" , 0, &o );
+ ASSERT_EQUALS( false , s->getBoolean( "return" ) );
+
+ s->invoke( "return this.x == 17;" , 0, &o );
+ ASSERT_EQUALS( true , s->getBoolean( "return" ) );
+
+ s->invoke( "return this.x == 18;" , 0, &o );
+ ASSERT_EQUALS( false , s->getBoolean( "return" ) );
+
+ s->invoke( "function(){ return this.x == 17; }" , 0, &o );
+ ASSERT_EQUALS( true , s->getBoolean( "return" ) );
+
+ s->invoke( "function(){ return this.x == 18; }" , 0, &o );
+ ASSERT_EQUALS( false , s->getBoolean( "return" ) );
+
+ s->invoke( "function (){ return this.x == 17; }" , 0, &o );
+ ASSERT_EQUALS( true , s->getBoolean( "return" ) );
+
+ s->invoke( "function z(){ return this.x == 18; }" , 0, &o );
+ ASSERT_EQUALS( false , s->getBoolean( "return" ) );
+
+ s->invoke( "function (){ this.x == 17; }" , 0, &o );
+ ASSERT_EQUALS( false , s->getBoolean( "return" ) );
+
+ s->invoke( "function z(){ this.x == 18; }" , 0, &o );
+ ASSERT_EQUALS( false , s->getBoolean( "return" ) );
+
+ s->invoke( "x = 5; for( ; x <10; x++){ a = 1; }" , 0, &o );
+ ASSERT_EQUALS( 10 , s->getNumber( "x" ) );
+
+ delete s;
+ }
+ };
+
+ class ObjectDecoding {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ s->invoke( "z = { num : 1 };" , 0, 0 );
+ BSONObj out = s->getObject( "z" );
+ ASSERT_EQUALS( 1 , out["num"].number() );
+ ASSERT_EQUALS( 1 , out.nFields() );
+
+ s->invoke( "z = { x : 'eliot' };" , 0, 0 );
+ out = s->getObject( "z" );
+ ASSERT_EQUALS( (string)"eliot" , out["x"].valuestr() );
+ ASSERT_EQUALS( 1 , out.nFields() );
+
+ BSONObj o = BSON( "x" << 17 );
+ s->setObject( "blah" , o );
+ out = s->getObject( "blah" );
+ ASSERT_EQUALS( 17 , out["x"].number() );
+
+ delete s;
+ }
+ };
+
+ class JSOIDTests {
+ public:
+ void run() {
+#ifdef MOZJS
+ Scope * s = globalScriptEngine->newScope();
+
+ s->localConnect( "blah" );
+
+ s->invoke( "z = { _id : new ObjectId() , a : 123 };" , 0, 0 );
+ BSONObj out = s->getObject( "z" );
+ ASSERT_EQUALS( 123 , out["a"].number() );
+ ASSERT_EQUALS( jstOID , out["_id"].type() );
+
+ OID save = out["_id"].__oid();
+
+ s->setObject( "a" , out );
+
+ s->invoke( "y = { _id : a._id , a : 124 };" , 0, 0 );
+ out = s->getObject( "y" );
+ ASSERT_EQUALS( 124 , out["a"].number() );
+ ASSERT_EQUALS( jstOID , out["_id"].type() );
+ ASSERT_EQUALS( out["_id"].__oid().str() , save.str() );
+
+ s->invoke( "y = { _id : new ObjectId( a._id ) , a : 125 };" , 0, 0 );
+ out = s->getObject( "y" );
+ ASSERT_EQUALS( 125 , out["a"].number() );
+ ASSERT_EQUALS( jstOID , out["_id"].type() );
+ ASSERT_EQUALS( out["_id"].__oid().str() , save.str() );
+
+ delete s;
+#endif
+ }
+ };
+
+ class SetImplicit {
+ public:
+ void run() {
+ Scope *s = globalScriptEngine->newScope();
+
+ BSONObj o = BSON( "foo" << "bar" );
+ s->setObject( "a.b", o );
+ ASSERT( s->getObject( "a" ).isEmpty() );
+
+ BSONObj o2 = BSONObj();
+ s->setObject( "a", o2 );
+ s->setObject( "a.b", o );
+ ASSERT( s->getObject( "a" ).isEmpty() );
+
+ o2 = fromjson( "{b:{}}" );
+ s->setObject( "a", o2 );
+ s->setObject( "a.b", o );
+ ASSERT( !s->getObject( "a" ).isEmpty() );
+ }
+ };
+
+ class ObjectModReadonlyTests {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ BSONObj o = BSON( "x" << 17 << "y" << "eliot" << "z" << "sara" << "zz" << BSONObj() );
+ s->setObject( "blah" , o , true );
+
+ s->invoke( "blah.y = 'e'", 0, 0 );
+ BSONObj out = s->getObject( "blah" );
+ ASSERT( strlen( out["y"].valuestr() ) > 1 );
+
+ s->invoke( "blah.a = 19;" , 0, 0 );
+ out = s->getObject( "blah" );
+ ASSERT( out["a"].eoo() );
+
+ s->invoke( "blah.zz.a = 19;" , 0, 0 );
+ out = s->getObject( "blah" );
+ ASSERT( out["zz"].embeddedObject()["a"].eoo() );
+
+ s->setObject( "blah.zz", BSON( "a" << 19 ) );
+ out = s->getObject( "blah" );
+ ASSERT( out["zz"].embeddedObject()["a"].eoo() );
+
+ s->invoke( "delete blah['x']" , 0, 0 );
+ out = s->getObject( "blah" );
+ ASSERT( !out["x"].eoo() );
+
+ // read-only object itself can be overwritten
+ s->invoke( "blah = {}", 0, 0 );
+ out = s->getObject( "blah" );
+ ASSERT( out.isEmpty() );
+
+ // test array - can't implement this in v8
+// o = fromjson( "{a:[1,2,3]}" );
+// s->setObject( "blah", o, true );
+// out = s->getObject( "blah" );
+// s->invoke( "blah.a[ 0 ] = 4;", BSONObj() );
+// s->invoke( "delete blah['a'][ 2 ];", BSONObj() );
+// out = s->getObject( "blah" );
+// ASSERT_EQUALS( 1.0, out[ "a" ].embeddedObject()[ 0 ].number() );
+// ASSERT_EQUALS( 3.0, out[ "a" ].embeddedObject()[ 2 ].number() );
+
+ delete s;
+ }
+ };
+
+ class OtherJSTypes {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ {
+ // date
+ BSONObj o;
+ {
+ BSONObjBuilder b;
+ b.appendDate( "d" , 123456789 );
+ o = b.obj();
+ }
+ s->setObject( "x" , o );
+
+ s->invoke( "return x.d.getTime() != 12;" , 0, 0 );
+ ASSERT_EQUALS( true, s->getBoolean( "return" ) );
+
+ s->invoke( "z = x.d.getTime();" , 0, 0 );
+ ASSERT_EQUALS( 123456789 , s->getNumber( "z" ) );
+
+ s->invoke( "z = { z : x.d }" , 0, 0 );
+ BSONObj out = s->getObject( "z" );
+ ASSERT( out["z"].type() == Date );
+ }
+
+ {
+ // regex
+ BSONObj o;
+ {
+ BSONObjBuilder b;
+ b.appendRegex( "r" , "^a" , "i" );
+ o = b.obj();
+ }
+ s->setObject( "x" , o );
+
+ s->invoke( "z = x.r.test( 'b' );" , 0, 0 );
+ ASSERT_EQUALS( false , s->getBoolean( "z" ) );
+
+ s->invoke( "z = x.r.test( 'a' );" , 0, 0 );
+ ASSERT_EQUALS( true , s->getBoolean( "z" ) );
+
+ s->invoke( "z = x.r.test( 'ba' );" , 0, 0 );
+ ASSERT_EQUALS( false , s->getBoolean( "z" ) );
+
+ s->invoke( "z = { a : x.r };" , 0, 0 );
+
+ BSONObj out = s->getObject("z");
+ ASSERT_EQUALS( (string)"^a" , out["a"].regex() );
+ ASSERT_EQUALS( (string)"i" , out["a"].regexFlags() );
+
+ }
+
+ // array
+ {
+ BSONObj o = fromjson( "{r:[1,2,3]}" );
+ s->setObject( "x", o, false );
+ BSONObj out = s->getObject( "x" );
+ ASSERT_EQUALS( Array, out.firstElement().type() );
+
+ s->setObject( "x", o, true );
+ out = s->getObject( "x" );
+ ASSERT_EQUALS( Array, out.firstElement().type() );
+ }
+
+ delete s;
+ }
+ };
+
+ class SpecialDBTypes {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ BSONObjBuilder b;
+ b.appendTimestamp( "a" , 123456789 );
+ b.appendMinKey( "b" );
+ b.appendMaxKey( "c" );
+ b.appendTimestamp( "d" , 1234000 , 9876 );
+
+
+ {
+ BSONObj t = b.done();
+ ASSERT_EQUALS( 1234000U , t["d"].timestampTime() );
+ ASSERT_EQUALS( 9876U , t["d"].timestampInc() );
+ }
+
+ s->setObject( "z" , b.obj() );
+
+ ASSERT( s->invoke( "y = { a : z.a , b : z.b , c : z.c , d: z.d }" , 0, 0 ) == 0 );
+
+ BSONObj out = s->getObject( "y" );
+ ASSERT_EQUALS( Timestamp , out["a"].type() );
+ ASSERT_EQUALS( MinKey , out["b"].type() );
+ ASSERT_EQUALS( MaxKey , out["c"].type() );
+ ASSERT_EQUALS( Timestamp , out["d"].type() );
+
+ ASSERT_EQUALS( 9876U , out["d"].timestampInc() );
+ ASSERT_EQUALS( 1234000U , out["d"].timestampTime() );
+ ASSERT_EQUALS( 123456789U , out["a"].date() );
+
+ delete s;
+ }
+ };
+
+ class TypeConservation {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ // -- A --
+
+ BSONObj o;
+ {
+ BSONObjBuilder b ;
+ b.append( "a" , (int)5 );
+ b.append( "b" , 5.6 );
+ o = b.obj();
+ }
+ ASSERT_EQUALS( NumberInt , o["a"].type() );
+ ASSERT_EQUALS( NumberDouble , o["b"].type() );
+
+ s->setObject( "z" , o );
+ s->invoke( "return z" , 0, 0 );
+ BSONObj out = s->getObject( "return" );
+ ASSERT_EQUALS( 5 , out["a"].number() );
+ ASSERT_EQUALS( 5.6 , out["b"].number() );
+
+ ASSERT_EQUALS( NumberDouble , out["b"].type() );
+ ASSERT_EQUALS( NumberInt , out["a"].type() );
+
+ // -- B --
+
+ {
+ BSONObjBuilder b ;
+ b.append( "a" , (int)5 );
+ b.append( "b" , 5.6 );
+ o = b.obj();
+ }
+
+ s->setObject( "z" , o , false );
+ s->invoke( "return z" , 0, 0 );
+ out = s->getObject( "return" );
+ ASSERT_EQUALS( 5 , out["a"].number() );
+ ASSERT_EQUALS( 5.6 , out["b"].number() );
+
+ ASSERT_EQUALS( NumberDouble , out["b"].type() );
+ ASSERT_EQUALS( NumberInt , out["a"].type() );
+
+
+ // -- C --
+
+ {
+ BSONObjBuilder b ;
+
+ {
+ BSONObjBuilder c;
+ c.append( "0" , 5.5 );
+ c.append( "1" , 6 );
+ b.appendArray( "a" , c.obj() );
+ }
+
+ o = b.obj();
+ }
+
+ ASSERT_EQUALS( NumberDouble , o["a"].embeddedObjectUserCheck()["0"].type() );
+ ASSERT_EQUALS( NumberInt , o["a"].embeddedObjectUserCheck()["1"].type() );
+
+ s->setObject( "z" , o , false );
+ out = s->getObject( "z" );
+
+ ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["0"].type() );
+ ASSERT_EQUALS( NumberInt , out["a"].embeddedObjectUserCheck()["1"].type() );
+
+ s->invokeSafe( "z.z = 5;" , 0, 0 );
+ out = s->getObject( "z" );
+ ASSERT_EQUALS( 5 , out["z"].number() );
+ ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["0"].type() );
+ // Commenting so that v8 tests will work
+// ASSERT_EQUALS( NumberDouble , out["a"].embeddedObjectUserCheck()["1"].type() ); // TODO: this is technically bad, but here to make sure that i understand the behavior
+
+
+ // Eliot says I don't have to worry about this case
+
+// // -- D --
+//
+// o = fromjson( "{a:3.0,b:4.5}" );
+// ASSERT_EQUALS( NumberDouble , o["a"].type() );
+// ASSERT_EQUALS( NumberDouble , o["b"].type() );
+//
+// s->setObject( "z" , o , false );
+// s->invoke( "return z" , BSONObj() );
+// out = s->getObject( "return" );
+// ASSERT_EQUALS( 3 , out["a"].number() );
+// ASSERT_EQUALS( 4.5 , out["b"].number() );
+//
+// ASSERT_EQUALS( NumberDouble , out["b"].type() );
+// ASSERT_EQUALS( NumberDouble , out["a"].type() );
+//
+
+ delete s;
+ }
+
+ };
+
+ class NumberLong {
+ public:
+ void run() {
+ auto_ptr<Scope> s( globalScriptEngine->newScope() );
+ s->localConnect( "blah" );
+ BSONObjBuilder b;
+ long long val = (long long)( 0xbabadeadbeefbaddULL );
+ b.append( "a", val );
+ BSONObj in = b.obj();
+ s->setObject( "a", in );
+ BSONObj out = s->getObject( "a" );
+ ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
+
+ ASSERT( s->exec( "b = {b:a.a}", "foo", false, true, false ) );
+ out = s->getObject( "b" );
+ ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
+ if( val != out.firstElement().numberLong() ) {
+ cout << val << endl;
+ cout << out.firstElement().numberLong() << endl;
+ cout << out.toString() << endl;
+ ASSERT_EQUALS( val, out.firstElement().numberLong() );
+ }
+
+ ASSERT( s->exec( "c = {c:a.a.toString()}", "foo", false, true, false ) );
+ out = s->getObject( "c" );
+ stringstream ss;
+ ss << "NumberLong(\"" << val << "\")";
+ ASSERT_EQUALS( ss.str(), out.firstElement().valuestr() );
+
+ ASSERT( s->exec( "d = {d:a.a.toNumber()}", "foo", false, true, false ) );
+ out = s->getObject( "d" );
+ ASSERT_EQUALS( NumberDouble, out.firstElement().type() );
+ ASSERT_EQUALS( double( val ), out.firstElement().number() );
+
+ ASSERT( s->exec( "e = {e:a.a.floatApprox}", "foo", false, true, false ) );
+ out = s->getObject( "e" );
+ ASSERT_EQUALS( NumberDouble, out.firstElement().type() );
+ ASSERT_EQUALS( double( val ), out.firstElement().number() );
+
+ ASSERT( s->exec( "f = {f:a.a.top}", "foo", false, true, false ) );
+ out = s->getObject( "f" );
+ ASSERT( NumberDouble == out.firstElement().type() || NumberInt == out.firstElement().type() );
+
+ s->setObject( "z", BSON( "z" << (long long)( 4 ) ) );
+ ASSERT( s->exec( "y = {y:z.z.top}", "foo", false, true, false ) );
+ out = s->getObject( "y" );
+ ASSERT_EQUALS( Undefined, out.firstElement().type() );
+
+ ASSERT( s->exec( "x = {x:z.z.floatApprox}", "foo", false, true, false ) );
+ out = s->getObject( "x" );
+ ASSERT( NumberDouble == out.firstElement().type() || NumberInt == out.firstElement().type() );
+ ASSERT_EQUALS( double( 4 ), out.firstElement().number() );
+
+ ASSERT( s->exec( "w = {w:z.z}", "foo", false, true, false ) );
+ out = s->getObject( "w" );
+ ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
+ ASSERT_EQUALS( 4, out.firstElement().numberLong() );
+
+ }
+ };
+
+ class NumberLong2 {
+ public:
+ void run() {
+ auto_ptr<Scope> s( globalScriptEngine->newScope() );
+ s->localConnect( "blah" );
+
+ BSONObj in;
+ {
+ BSONObjBuilder b;
+ b.append( "a" , 5 );
+ b.append( "b" , (long long)5 );
+ b.append( "c" , (long long)pow( 2.0, 29 ) );
+ b.append( "d" , (long long)pow( 2.0, 30 ) );
+ b.append( "e" , (long long)pow( 2.0, 31 ) );
+ b.append( "f" , (long long)pow( 2.0, 45 ) );
+ in = b.obj();
+ }
+ s->setObject( "a" , in );
+
+ ASSERT( s->exec( "x = tojson( a ); " ,"foo" , false , true , false ) );
+ string outString = s->getString( "x" );
+
+ ASSERT( s->exec( (string)"y = " + outString , "foo2" , false , true , false ) );
+ BSONObj out = s->getObject( "y" );
+ ASSERT_EQUALS( in , out );
+ }
+ };
+
+ class NumberLongUnderLimit {
+ public:
+ void run() {
+ auto_ptr<Scope> s( globalScriptEngine->newScope() );
+ s->localConnect( "blah" );
+ BSONObjBuilder b;
+ // limit is 2^53
+ long long val = (long long)( 9007199254740991ULL );
+ b.append( "a", val );
+ BSONObj in = b.obj();
+ s->setObject( "a", in );
+ BSONObj out = s->getObject( "a" );
+ ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
+
+ ASSERT( s->exec( "b = {b:a.a}", "foo", false, true, false ) );
+ out = s->getObject( "b" );
+ ASSERT_EQUALS( mongo::NumberLong, out.firstElement().type() );
+ if( val != out.firstElement().numberLong() ) {
+ cout << val << endl;
+ cout << out.firstElement().numberLong() << endl;
+ cout << out.toString() << endl;
+ ASSERT_EQUALS( val, out.firstElement().numberLong() );
+ }
+
+ ASSERT( s->exec( "c = {c:a.a.toString()}", "foo", false, true, false ) );
+ out = s->getObject( "c" );
+ stringstream ss;
+ ss << "NumberLong(\"" << val << "\")";
+ ASSERT_EQUALS( ss.str(), out.firstElement().valuestr() );
+
+ ASSERT( s->exec( "d = {d:a.a.toNumber()}", "foo", false, true, false ) );
+ out = s->getObject( "d" );
+ ASSERT_EQUALS( NumberDouble, out.firstElement().type() );
+ ASSERT_EQUALS( double( val ), out.firstElement().number() );
+
+ ASSERT( s->exec( "e = {e:a.a.floatApprox}", "foo", false, true, false ) );
+ out = s->getObject( "e" );
+ ASSERT_EQUALS( NumberDouble, out.firstElement().type() );
+ ASSERT_EQUALS( double( val ), out.firstElement().number() );
+
+ ASSERT( s->exec( "f = {f:a.a.top}", "foo", false, true, false ) );
+ out = s->getObject( "f" );
+ ASSERT( Undefined == out.firstElement().type() );
+ }
+ };
+
+ class WeirdObjects {
+ public:
+
+ BSONObj build( int depth ) {
+ BSONObjBuilder b;
+ b.append( "0" , depth );
+ if ( depth > 0 )
+ b.appendArray( "1" , build( depth - 1 ) );
+ return b.obj();
+ }
+
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ s->localConnect( "blah" );
+
+ for ( int i=5; i<100 ; i += 10 ) {
+ s->setObject( "a" , build(i) , false );
+ s->invokeSafe( "tojson( a )" , 0, 0 );
+
+ s->setObject( "a" , build(5) , true );
+ s->invokeSafe( "tojson( a )" , 0, 0 );
+ }
+
+ delete s;
+ }
+ };
+
+
+ void dummy_function_to_force_dbeval_cpp_linking() {
+ BSONObj cmd;
+ BSONObjBuilder result;
+ string errmsg;
+ dbEval( "test", cmd, result, errmsg);
+ assert(0);
+ }
+
+ DBDirectClient client;
+
+ class Utf8Check {
+ public:
+ Utf8Check() { reset(); }
+ ~Utf8Check() { reset(); }
+ void run() {
+ if( !globalScriptEngine->utf8Ok() ) {
+ log() << "warning: utf8 not supported" << endl;
+ return;
+ }
+ string utf8ObjSpec = "{'_id':'\\u0001\\u007f\\u07ff\\uffff'}";
+ BSONObj utf8Obj = fromjson( utf8ObjSpec );
+ client.insert( ns(), utf8Obj );
+ client.eval( "unittest", "v = db.jstests.utf8check.findOne(); db.jstests.utf8check.remove( {} ); db.jstests.utf8check.insert( v );" );
+ check( utf8Obj, client.findOne( ns(), BSONObj() ) );
+ }
+ private:
+ void check( const BSONObj &one, const BSONObj &two ) {
+ if ( one.woCompare( two ) != 0 ) {
+ static string fail = string( "Assertion failure expected " ) + one.toString() + ", got " + two.toString();
+ FAIL( fail.c_str() );
+ }
+ }
+ void reset() {
+ client.dropCollection( ns() );
+ }
+ static const char *ns() { return "unittest.jstests.utf8check"; }
+ };
+
+ class LongUtf8String {
+ public:
+ LongUtf8String() { reset(); }
+ ~LongUtf8String() { reset(); }
+ void run() {
+ if( !globalScriptEngine->utf8Ok() )
+ return;
+ client.eval( "unittest", "db.jstests.longutf8string.save( {_id:'\\uffff\\uffff\\uffff\\uffff'} )" );
+ }
+ private:
+ void reset() {
+ client.dropCollection( ns() );
+ }
+ static const char *ns() { return "unittest.jstests.longutf8string"; }
+ };
+
+ class InvalidUTF8Check {
+ public:
+ void run() {
+ if( !globalScriptEngine->utf8Ok() )
+ return;
+
+ auto_ptr<Scope> s;
+ s.reset( globalScriptEngine->newScope() );
+
+ BSONObj b;
+ {
+ char crap[5];
+
+ crap[0] = (char) 128;
+ crap[1] = 17;
+ crap[2] = (char) 128;
+ crap[3] = 17;
+ crap[4] = 0;
+
+ BSONObjBuilder bb;
+ bb.append( "x" , crap );
+ b = bb.obj();
+ }
+
+ //cout << "ELIOT: " << b.jsonString() << endl;
+ // its ok if this is handled by js, just can't create a c++ exception
+ s->invoke( "x=this.x.length;" , 0, &b );
+ }
+ };
+
+ class CodeTests {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ {
+ BSONObjBuilder b;
+ b.append( "a" , 1 );
+ b.appendCode( "b" , "function(){ out.b = 11; }" );
+ b.appendCodeWScope( "c" , "function(){ out.c = 12; }" , BSONObj() );
+ b.appendCodeWScope( "d" , "function(){ out.d = 13 + bleh; }" , BSON( "bleh" << 5 ) );
+ s->setObject( "foo" , b.obj() );
+ }
+
+ s->invokeSafe( "out = {}; out.a = foo.a; foo.b(); foo.c();" , 0, 0 );
+ BSONObj out = s->getObject( "out" );
+
+ ASSERT_EQUALS( 1 , out["a"].number() );
+ ASSERT_EQUALS( 11 , out["b"].number() );
+ ASSERT_EQUALS( 12 , out["c"].number() );
+
+ // Guess we don't care about this
+ //s->invokeSafe( "foo.d() " , BSONObj() );
+ //out = s->getObject( "out" );
+ //ASSERT_EQUALS( 18 , out["d"].number() );
+
+
+ delete s;
+ }
+ };
+
+ class DBRefTest {
+ public:
+ DBRefTest() {
+ _a = "unittest.dbref.a";
+ _b = "unittest.dbref.b";
+ reset();
+ }
+ ~DBRefTest() {
+ //reset();
+ }
+
+ void run() {
+
+ client.insert( _a , BSON( "a" << "17" ) );
+
+ {
+ BSONObj fromA = client.findOne( _a , BSONObj() );
+ assert( fromA.valid() );
+ //cout << "Froma : " << fromA << endl;
+ BSONObjBuilder b;
+ b.append( "b" , 18 );
+ b.appendDBRef( "c" , "dbref.a" , fromA["_id"].__oid() );
+ client.insert( _b , b.obj() );
+ }
+
+ ASSERT( client.eval( "unittest" , "x = db.dbref.b.findOne(); assert.eq( 17 , x.c.fetch().a , 'ref working' );" ) );
+
+ // BSON DBRef <=> JS DBPointer
+ ASSERT( client.eval( "unittest", "x = db.dbref.b.findOne(); db.dbref.b.drop(); x.c = new DBPointer( x.c.ns, x.c.id ); db.dbref.b.insert( x );" ) );
+ ASSERT_EQUALS( DBRef, client.findOne( "unittest.dbref.b", "" )[ "c" ].type() );
+
+ // BSON Object <=> JS DBRef
+ ASSERT( client.eval( "unittest", "x = db.dbref.b.findOne(); db.dbref.b.drop(); x.c = new DBRef( x.c.ns, x.c.id ); db.dbref.b.insert( x );" ) );
+ ASSERT_EQUALS( Object, client.findOne( "unittest.dbref.b", "" )[ "c" ].type() );
+ ASSERT_EQUALS( string( "dbref.a" ), client.findOne( "unittest.dbref.b", "" )[ "c" ].embeddedObject().getStringField( "$ref" ) );
+ }
+
+ void reset() {
+ client.dropCollection( _a );
+ client.dropCollection( _b );
+ }
+
+ const char * _a;
+ const char * _b;
+ };
+
+ class InformalDBRef {
+ public:
+ void run() {
+ client.insert( ns(), BSON( "i" << 1 ) );
+ BSONObj obj = client.findOne( ns(), BSONObj() );
+ client.remove( ns(), BSONObj() );
+ client.insert( ns(), BSON( "r" << BSON( "$ref" << "jstests.informaldbref" << "$id" << obj["_id"].__oid() << "foo" << "bar" ) ) );
+ obj = client.findOne( ns(), BSONObj() );
+ ASSERT_EQUALS( "bar", obj[ "r" ].embeddedObject()[ "foo" ].str() );
+
+ ASSERT( client.eval( "unittest", "x = db.jstests.informaldbref.findOne(); y = { r:x.r }; db.jstests.informaldbref.drop(); y.r[ \"a\" ] = \"b\"; db.jstests.informaldbref.save( y );" ) );
+ obj = client.findOne( ns(), BSONObj() );
+ ASSERT_EQUALS( "bar", obj[ "r" ].embeddedObject()[ "foo" ].str() );
+ ASSERT_EQUALS( "b", obj[ "r" ].embeddedObject()[ "a" ].str() );
+ }
+ private:
+ static const char *ns() { return "unittest.jstests.informaldbref"; }
+ };
+
+ class BinDataType {
+ public:
+
+ void pp( const char * s , BSONElement e ) {
+ int len;
+ const char * data = e.binData( len );
+ cout << s << ":" << e.binDataType() << "\t" << len << endl;
+ cout << "\t";
+ for ( int i=0; i<len; i++ )
+ cout << (int)(data[i]) << " ";
+ cout << endl;
+ }
+
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+ s->localConnect( "asd" );
+ const char * foo = "asdas\0asdasd";
+ const char * base64 = "YXNkYXMAYXNkYXNk";
+
+ BSONObj in;
+ {
+ BSONObjBuilder b;
+ b.append( "a" , 7 );
+ b.appendBinData( "b" , 12 , BinDataGeneral , foo );
+ in = b.obj();
+ s->setObject( "x" , in );
+ }
+
+ s->invokeSafe( "myb = x.b; print( myb ); printjson( myb );" , 0, 0 );
+ s->invokeSafe( "y = { c : myb };" , 0, 0 );
+
+ BSONObj out = s->getObject( "y" );
+ ASSERT_EQUALS( BinData , out["c"].type() );
+// pp( "in " , in["b"] );
+// pp( "out" , out["c"] );
+ ASSERT_EQUALS( 0 , in["b"].woCompare( out["c"] , false ) );
+
+ // check that BinData js class is utilized
+ s->invokeSafe( "q = x.b.toString();", 0, 0 );
+ stringstream expected;
+ expected << "BinData(" << BinDataGeneral << ",\"" << base64 << "\")";
+ ASSERT_EQUALS( expected.str(), s->getString( "q" ) );
+
+ stringstream scriptBuilder;
+ scriptBuilder << "z = { c : new BinData( " << BinDataGeneral << ", \"" << base64 << "\" ) };";
+ string script = scriptBuilder.str();
+ s->invokeSafe( script.c_str(), 0, 0 );
+ out = s->getObject( "z" );
+// pp( "out" , out["c"] );
+ ASSERT_EQUALS( 0 , in["b"].woCompare( out["c"] , false ) );
+
+ s->invokeSafe( "a = { f: new BinData( 128, \"\" ) };", 0, 0 );
+ out = s->getObject( "a" );
+ int len = -1;
+ out[ "f" ].binData( len );
+ ASSERT_EQUALS( 0, len );
+ ASSERT_EQUALS( 128, out[ "f" ].binDataType() );
+
+ delete s;
+ }
+ };
+
+ class VarTests {
+ public:
+ void run() {
+ Scope * s = globalScriptEngine->newScope();
+
+ ASSERT( s->exec( "a = 5;" , "a" , false , true , false ) );
+ ASSERT_EQUALS( 5 , s->getNumber("a" ) );
+
+ ASSERT( s->exec( "var b = 6;" , "b" , false , true , false ) );
+ ASSERT_EQUALS( 6 , s->getNumber("b" ) );
+ delete s;
+ }
+ };
+
+ class Speed1 {
+ public:
+ void run() {
+ BSONObj start = BSON( "x" << 5.0 );
+ BSONObj empty;
+
+ auto_ptr<Scope> s;
+ s.reset( globalScriptEngine->newScope() );
+
+ ScriptingFunction f = s->createFunction( "return this.x + 6;" );
+
+ Timer t;
+ double n = 0;
+ for ( ; n < 100000; n++ ) {
+ s->invoke( f , &empty, &start );
+ ASSERT_EQUALS( 11 , s->getNumber( "return" ) );
+ }
+ //cout << "speed1: " << ( n / t.millis() ) << " ops/ms" << endl;
+ }
+ };
+
+ class ScopeOut {
+ public:
+ void run() {
+ auto_ptr<Scope> s;
+ s.reset( globalScriptEngine->newScope() );
+
+ s->invokeSafe( "x = 5;" , 0, 0 );
+ {
+ BSONObjBuilder b;
+ s->append( b , "z" , "x" );
+ ASSERT_EQUALS( BSON( "z" << 5 ) , b.obj() );
+ }
+
+ s->invokeSafe( "x = function(){ return 17; }" , 0, 0 );
+ BSONObj temp;
+ {
+ BSONObjBuilder b;
+ s->append( b , "z" , "x" );
+ temp = b.obj();
+ }
+
+ s->invokeSafe( "foo = this.z();" , 0, &temp );
+ ASSERT_EQUALS( 17 , s->getNumber( "foo" ) );
+ }
+ };
+
+ class RenameTest {
+ public:
+ void run() {
+ auto_ptr<Scope> s;
+ s.reset( globalScriptEngine->newScope() );
+
+ s->setNumber( "x" , 5 );
+ ASSERT_EQUALS( 5 , s->getNumber( "x" ) );
+ ASSERT_EQUALS( Undefined , s->type( "y" ) );
+
+ s->rename( "x" , "y" );
+ ASSERT_EQUALS( 5 , s->getNumber( "y" ) );
+ ASSERT_EQUALS( Undefined , s->type( "x" ) );
+
+ s->rename( "y" , "x" );
+ ASSERT_EQUALS( 5 , s->getNumber( "x" ) );
+ ASSERT_EQUALS( Undefined , s->type( "y" ) );
+ }
+ };
+
+
+ class All : public Suite {
+ public:
+ All() : Suite( "js" ) {
+ }
+
+ void setupTests() {
+ add< Fundamental >();
+ add< BasicScope >();
+ add< ResetScope >();
+ add< FalseTests >();
+ add< SimpleFunctions >();
+
+ add< ObjectMapping >();
+ add< ObjectDecoding >();
+ add< JSOIDTests >();
+ add< SetImplicit >();
+ add< ObjectModReadonlyTests >();
+ add< OtherJSTypes >();
+ add< SpecialDBTypes >();
+ add< TypeConservation >();
+ add< NumberLong >();
+ add< NumberLong2 >();
+ add< RenameTest >();
+
+ add< WeirdObjects >();
+ add< CodeTests >();
+ add< DBRefTest >();
+ add< InformalDBRef >();
+ add< BinDataType >();
+
+ add< VarTests >();
+
+ add< Speed1 >();
+
+ add< InvalidUTF8Check >();
+ add< Utf8Check >();
+ add< LongUtf8String >();
+
+ add< ScopeOut >();
+ }
+ } myall;
+
+} // namespace JavaJSTests
+
diff --git a/src/mongo/dbtests/macrotests.cpp b/src/mongo/dbtests/macrotests.cpp
new file mode 100644
index 00000000000..f547c851677
--- /dev/null
+++ b/src/mongo/dbtests/macrotests.cpp
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#undef MONGO_EXPOSE_MACROS
+
+#include "../client/dbclient.h"
+
+#ifdef malloc
+# error malloc defined 0
+#endif
+
+#ifdef assert
+# error assert defined 1
+#endif
+
+#include "../client/parallel.h" //uses assert
+
+#ifdef assert
+# error assert defined 2
+#endif
+
+#include "../client/redef_macros.h"
+
+#ifndef assert
+# error assert not defined 3
+#endif
+
+#include "../client/undef_macros.h"
+
+#ifdef assert
+# error assert defined 3
+#endif
+
+
diff --git a/src/mongo/dbtests/matchertests.cpp b/src/mongo/dbtests/matchertests.cpp
new file mode 100644
index 00000000000..380b8b802d4
--- /dev/null
+++ b/src/mongo/dbtests/matchertests.cpp
@@ -0,0 +1,163 @@
+// matchertests.cpp : matcher unit tests
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../util/timer.h"
+
+#include "../db/matcher.h"
+#include "../db/json.h"
+
+#include "dbtests.h"
+
+
+
+namespace MatcherTests {
+
+ class Basic {
+ public:
+ void run() {
+ BSONObj query = fromjson( "{\"a\":\"b\"}" );
+ Matcher m( query );
+ ASSERT( m.matches( fromjson( "{\"a\":\"b\"}" ) ) );
+ }
+ };
+
+ class DoubleEqual {
+ public:
+ void run() {
+ BSONObj query = fromjson( "{\"a\":5}" );
+ Matcher m( query );
+ ASSERT( m.matches( fromjson( "{\"a\":5}" ) ) );
+ }
+ };
+
+ class MixedNumericEqual {
+ public:
+ void run() {
+ BSONObjBuilder query;
+ query.append( "a", 5 );
+ Matcher m( query.done() );
+ ASSERT( m.matches( fromjson( "{\"a\":5}" ) ) );
+ }
+ };
+
+ class MixedNumericGt {
+ public:
+ void run() {
+ BSONObj query = fromjson( "{\"a\":{\"$gt\":4}}" );
+ Matcher m( query );
+ BSONObjBuilder b;
+ b.append( "a", 5 );
+ ASSERT( m.matches( b.done() ) );
+ }
+ };
+
+ class MixedNumericIN {
+ public:
+ void run() {
+ BSONObj query = fromjson( "{ a : { $in : [4,6] } }" );
+ ASSERT_EQUALS( 4 , query["a"].embeddedObject()["$in"].embeddedObject()["0"].number() );
+ ASSERT_EQUALS( NumberInt , query["a"].embeddedObject()["$in"].embeddedObject()["0"].type() );
+
+ Matcher m( query );
+
+ {
+ BSONObjBuilder b;
+ b.append( "a" , 4.0 );
+ ASSERT( m.matches( b.done() ) );
+ }
+
+ {
+ BSONObjBuilder b;
+ b.append( "a" , 5 );
+ ASSERT( ! m.matches( b.done() ) );
+ }
+
+
+ {
+ BSONObjBuilder b;
+ b.append( "a" , 4 );
+ ASSERT( m.matches( b.done() ) );
+ }
+
+ }
+ };
+
+ class MixedNumericEmbedded {
+ public:
+ void run() {
+ Matcher m( BSON( "a" << BSON( "x" << 1 ) ) );
+ ASSERT( m.matches( BSON( "a" << BSON( "x" << 1 ) ) ) );
+ ASSERT( m.matches( BSON( "a" << BSON( "x" << 1.0 ) ) ) );
+ }
+ };
+
+ class Size {
+ public:
+ void run() {
+ Matcher m( fromjson( "{a:{$size:4}}" ) );
+ ASSERT( m.matches( fromjson( "{a:[1,2,3,4]}" ) ) );
+ ASSERT( !m.matches( fromjson( "{a:[1,2,3]}" ) ) );
+ ASSERT( !m.matches( fromjson( "{a:[1,2,3,'a','b']}" ) ) );
+ ASSERT( !m.matches( fromjson( "{a:[[1,2,3,4]]}" ) ) );
+ }
+ };
+
+
+ class TimingBase {
+ public:
+ long time( const BSONObj& patt , const BSONObj& obj ) {
+ Matcher m( patt );
+ Timer t;
+ for ( int i=0; i<10000; i++ ) {
+ ASSERT( m.matches( obj ) );
+ }
+ return t.millis();
+ }
+ };
+
+ class AllTiming : public TimingBase {
+ public:
+ void run() {
+ long normal = time( BSON( "x" << 5 ) , BSON( "x" << 5 ) );
+ long all = time( BSON( "x" << BSON( "$all" << BSON_ARRAY( 5 ) ) ) , BSON( "x" << 5 ) );
+
+ cout << "normal: " << normal << " all: " << all << endl;
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "matcher" ) {
+ }
+
+ void setupTests() {
+ add< Basic >();
+ add< DoubleEqual >();
+ add< MixedNumericEqual >();
+ add< MixedNumericGt >();
+ add< MixedNumericIN >();
+ add< Size >();
+ add< MixedNumericEmbedded >();
+ add< AllTiming >();
+ }
+ } dball;
+
+} // namespace MatcherTests
+
diff --git a/src/mongo/dbtests/mmaptests.cpp b/src/mongo/dbtests/mmaptests.cpp
new file mode 100644
index 00000000000..7fb6eee98fc
--- /dev/null
+++ b/src/mongo/dbtests/mmaptests.cpp
@@ -0,0 +1,219 @@
+// @file mmaptests.cpp
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/mongommf.h"
+#include "../util/timer.h"
+#include "dbtests.h"
+
+namespace MMapTests {
+
+ class LeakTest {
+ const string fn;
+ const int optOld;
+ public:
+ LeakTest() :
+ fn( (path(dbpath) / "testfile.map").string() ), optOld(cmdLine.durOptions)
+ {
+ cmdLine.durOptions = 0; // DurParanoid doesn't make sense with this test
+ }
+ ~LeakTest() {
+ cmdLine.durOptions = optOld;
+ try { boost::filesystem::remove(fn); }
+ catch(...) { }
+ }
+ void run() {
+
+ try { boost::filesystem::remove(fn); }
+ catch(...) { }
+
+ writelock lk;
+
+ {
+ MongoMMF f;
+ unsigned long long len = 256 * 1024 * 1024;
+ assert( f.create(fn, len, /*sequential*/false) );
+ {
+ char *p = (char *) f.getView();
+ assert(p);
+ // write something to the private view as a test
+ if( cmdLine.dur )
+ MemoryMappedFile::makeWritable(p, 6);
+ strcpy(p, "hello");
+ }
+ if( cmdLine.dur ) {
+ char *w = (char *) f.view_write();
+ strcpy(w + 6, "world");
+ }
+ MongoFileFinder ff;
+ ASSERT( ff.findByPath(fn) );
+ ASSERT( ff.findByPath("asdf") == 0 );
+ }
+ {
+ MongoFileFinder ff;
+ ASSERT( ff.findByPath(fn) == 0 );
+ }
+
+ int N = 10000;
+#if !defined(_WIN32) && !defined(__linux__)
+ // seems this test is slow on OS X.
+ N = 100;
+#endif
+
+ // we make a lot here -- if we were leaking, presumably it would fail doing this many.
+ Timer t;
+ for( int i = 0; i < N; i++ ) {
+ MongoMMF f;
+ assert( f.open(fn, i%4==1) );
+ {
+ char *p = (char *) f.getView();
+ assert(p);
+ if( cmdLine.dur )
+ MemoryMappedFile::makeWritable(p, 4);
+ strcpy(p, "zzz");
+ }
+ if( cmdLine.dur ) {
+ char *w = (char *) f.view_write();
+ if( i % 2 == 0 )
+ ++(*w);
+ assert( w[6] == 'w' );
+ }
+ }
+ if( t.millis() > 10000 ) {
+ log() << "warning: MMap LeakTest is unusually slow N:" << N << ' ' << t.millis() << "ms" << endl;
+ }
+
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "mmap" ) {}
+ void setupTests() {
+ add< LeakTest >();
+ }
+ } myall;
+
+#if 0
+
+ class CopyOnWriteSpeedTest {
+ public:
+ void run() {
+
+ string fn = "/tmp/testfile.map";
+ boost::filesystem::remove(fn);
+
+ MemoryMappedFile f;
+ char *p = (char *) f.create(fn, 1024 * 1024 * 1024, true);
+ assert(p);
+ strcpy(p, "hello");
+
+ {
+ void *x = f.testGetCopyOnWriteView();
+ Timer tt;
+ for( int i = 11; i < 1000000000; i++ )
+ p[i] = 'z';
+ cout << "fill 1GB time: " << tt.millis() << "ms" << endl;
+ f.testCloseCopyOnWriteView(x);
+ }
+
+ /* test a lot of view/unviews */
+ {
+ Timer t;
+
+ char *q;
+ for( int i = 0; i < 1000; i++ ) {
+ q = (char *) f.testGetCopyOnWriteView();
+ assert( q );
+ if( i == 999 ) {
+ strcpy(q+2, "there");
+ }
+ f.testCloseCopyOnWriteView(q);
+ }
+
+ cout << "view unview: " << t.millis() << "ms" << endl;
+ }
+
+ f.flush(true);
+
+ /* plain old mmaped writes */
+ {
+ Timer t;
+ for( int i = 0; i < 10; i++ ) {
+ memset(p+100, 'c', 200 * 1024 * 1024);
+ }
+ cout << "traditional writes: " << t.millis() << "ms" << endl;
+ }
+
+ f.flush(true);
+
+ /* test doing some writes */
+ {
+ Timer t;
+ char *q = (char *) f.testGetCopyOnWriteView();
+ for( int i = 0; i < 10; i++ ) {
+ assert( q );
+ memset(q+100, 'c', 200 * 1024 * 1024);
+ }
+ f.testCloseCopyOnWriteView(q);
+
+ cout << "inc style some writes: " << t.millis() << "ms" << endl;
+ }
+
+ /* test doing some writes */
+ {
+ Timer t;
+ for( int i = 0; i < 10; i++ ) {
+ char *q = (char *) f.testGetCopyOnWriteView();
+ assert( q );
+ memset(q+100, 'c', 200 * 1024 * 1024);
+ f.testCloseCopyOnWriteView(q);
+ }
+
+ cout << "some writes: " << t.millis() << "ms" << endl;
+ }
+
+ /* more granular */
+ {
+ Timer t;
+ for( int i = 0; i < 100; i++ ) {
+ char *q = (char *) f.testGetCopyOnWriteView();
+ assert( q );
+ memset(q+100, 'c', 20 * 1024 * 1024);
+ f.testCloseCopyOnWriteView(q);
+ }
+
+ cout << "more granular some writes: " << t.millis() << "ms" << endl;
+ }
+
+ p[10] = 0;
+ cout << p << endl;
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "mmap" ) {}
+ void setupTests() {
+ add< CopyOnWriteSpeedTest >();
+ }
+ } myall;
+
+#endif
+
+}
diff --git a/src/mongo/dbtests/namespacetests.cpp b/src/mongo/dbtests/namespacetests.cpp
new file mode 100644
index 00000000000..792baf2ccfa
--- /dev/null
+++ b/src/mongo/dbtests/namespacetests.cpp
@@ -0,0 +1,1244 @@
+// namespacetests.cpp : namespace.{h,cpp} unit tests.
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+// Where IndexDetails defined.
+#include "pch.h"
+#include "../db/namespace.h"
+
+#include "../db/db.h"
+#include "../db/json.h"
+
+#include "dbtests.h"
+
+namespace NamespaceTests {
+
+ const int MinExtentSize = 4096;
+
+ namespace IndexDetailsTests {
+ class Base {
+ dblock lk;
+ Client::Context _context;
+ public:
+ Base() : _context(ns()) {
+ }
+ virtual ~Base() {
+ if ( id_.info.isNull() )
+ return;
+ theDataFileMgr.deleteRecord( ns(), id_.info.rec(), id_.info );
+ ASSERT( theDataFileMgr.findAll( ns() )->eof() );
+ }
+ protected:
+ void create( bool sparse = false ) {
+ NamespaceDetailsTransient::get( ns() ).deletedIndex();
+ BSONObjBuilder builder;
+ builder.append( "ns", ns() );
+ builder.append( "name", "testIndex" );
+ builder.append( "key", key() );
+ builder.append( "sparse", sparse );
+ BSONObj bobj = builder.done();
+ id_.info = theDataFileMgr.insert( ns(), bobj.objdata(), bobj.objsize() );
+ // head not needed for current tests
+ // idx_.head = BtreeBucket::addHead( id_ );
+ }
+ static const char* ns() {
+ return "unittests.indexdetailstests";
+ }
+ IndexDetails& id() {
+ return id_;
+ }
+ virtual BSONObj key() const {
+ BSONObjBuilder k;
+ k.append( "a", 1 );
+ return k.obj();
+ }
+ BSONObj aDotB() const {
+ BSONObjBuilder k;
+ k.append( "a.b", 1 );
+ return k.obj();
+ }
+ BSONObj aAndB() const {
+ BSONObjBuilder k;
+ k.append( "a", 1 );
+ k.append( "b", 1 );
+ return k.obj();
+ }
+ static vector< int > shortArray() {
+ vector< int > a;
+ a.push_back( 1 );
+ a.push_back( 2 );
+ a.push_back( 3 );
+ return a;
+ }
+ static BSONObj simpleBC( int i ) {
+ BSONObjBuilder b;
+ b.append( "b", i );
+ b.append( "c", 4 );
+ return b.obj();
+ }
+ static void checkSize( int expected, const BSONObjSet &objs ) {
+ ASSERT_EQUALS( BSONObjSet::size_type( expected ), objs.size() );
+ }
+ static void assertEquals( const BSONObj &a, const BSONObj &b ) {
+ if ( a.woCompare( b ) != 0 ) {
+ out() << "expected: " << a.toString()
+ << ", got: " << b.toString() << endl;
+ }
+ ASSERT( a.woCompare( b ) == 0 );
+ }
+ BSONObj nullObj() const {
+ BSONObjBuilder b;
+ b.appendNull( "" );
+ return b.obj();
+ }
+ private:
+ dblock lk_;
+ IndexDetails id_;
+ };
+
+ class Create : public Base {
+ public:
+ void run() {
+ create();
+ ASSERT_EQUALS( "testIndex", id().indexName() );
+ ASSERT_EQUALS( ns(), id().parentNS() );
+ assertEquals( key(), id().keyPattern() );
+ }
+ };
+
+ class GetKeysFromObjectSimple : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b, e;
+ b.append( "b", 4 );
+ b.append( "a", 5 );
+ e.append( "", 5 );
+ BSONObjSet keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 1, keys );
+ assertEquals( e.obj(), *keys.begin() );
+ }
+ };
+
+ class GetKeysFromObjectDotted : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder a, e, b;
+ b.append( "b", 4 );
+ a.append( "a", b.done() );
+ a.append( "c", "foo" );
+ e.append( "", 4 );
+ BSONObjSet keys;
+ id().getKeysFromObject( a.done(), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( e.obj(), *keys.begin() );
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class GetKeysFromArraySimple : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b;
+ b.append( "a", shortArray()) ;
+
+ BSONObjSet keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", j );
+ assertEquals( b.obj(), *i );
+ }
+ }
+ };
+
+ class GetKeysFromArrayFirstElement : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b;
+ b.append( "a", shortArray() );
+ b.append( "b", 2 );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", j );
+ b.append( "", 2 );
+ assertEquals( b.obj(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ return aAndB();
+ }
+ };
+
+ class GetKeysFromArraySecondElement : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b;
+ b.append( "first", 5 );
+ b.append( "a", shortArray()) ;
+
+ BSONObjSet keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", 5 );
+ b.append( "", j );
+ assertEquals( b.obj(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ BSONObjBuilder k;
+ k.append( "first", 1 );
+ k.append( "a", 1 );
+ return k.obj();
+ }
+ };
+
+ class GetKeysFromSecondLevelArray : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b;
+ b.append( "b", shortArray() );
+ BSONObjBuilder a;
+ a.append( "a", b.done() );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( a.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", j );
+ assertEquals( b.obj(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class ParallelArraysBasic : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjBuilder b;
+ b.append( "a", shortArray() );
+ b.append( "b", shortArray() );
+
+ BSONObjSet keys;
+ ASSERT_THROWS( id().getKeysFromObject( b.done(), keys ),
+ UserException );
+ }
+ private:
+ virtual BSONObj key() const {
+ return aAndB();
+ }
+ };
+
+ class ArraySubobjectBasic : public Base {
+ public:
+ void run() {
+ create();
+ vector< BSONObj > elts;
+ for ( int i = 1; i < 4; ++i )
+ elts.push_back( simpleBC( i ) );
+ BSONObjBuilder b;
+ b.append( "a", elts );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", j );
+ assertEquals( b.obj(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class ArraySubobjectMultiFieldIndex : public Base {
+ public:
+ void run() {
+ create();
+ vector< BSONObj > elts;
+ for ( int i = 1; i < 4; ++i )
+ elts.push_back( simpleBC( i ) );
+ BSONObjBuilder b;
+ b.append( "a", elts );
+ b.append( "d", 99 );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 3, keys );
+ int j = 1;
+ for ( BSONObjSet::iterator i = keys.begin(); i != keys.end(); ++i, ++j ) {
+ BSONObjBuilder c;
+ c.append( "", j );
+ c.append( "", 99 );
+ assertEquals( c.obj(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ BSONObjBuilder k;
+ k.append( "a.b", 1 );
+ k.append( "d", 1 );
+ return k.obj();
+ }
+ };
+
+ class ArraySubobjectSingleMissing : public Base {
+ public:
+ void run() {
+ create();
+ vector< BSONObj > elts;
+ BSONObjBuilder s;
+ s.append( "foo", 41 );
+ elts.push_back( s.obj() );
+ for ( int i = 1; i < 4; ++i )
+ elts.push_back( simpleBC( i ) );
+ BSONObjBuilder b;
+ b.append( "a", elts );
+ BSONObj obj = b.obj();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( obj, keys );
+ checkSize( 4, keys );
+ BSONObjSet::iterator i = keys.begin();
+ assertEquals( nullObj(), *i++ ); // see SERVER-3377
+ for ( int j = 1; j < 4; ++i, ++j ) {
+ BSONObjBuilder b;
+ b.append( "", j );
+ assertEquals( b.obj(), *i );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class ArraySubobjectMissing : public Base {
+ public:
+ void run() {
+ create();
+ vector< BSONObj > elts;
+ BSONObjBuilder s;
+ s.append( "foo", 41 );
+ for ( int i = 1; i < 4; ++i )
+ elts.push_back( s.done() );
+ BSONObjBuilder b;
+ b.append( "a", elts );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( b.done(), keys );
+ checkSize( 1, keys );
+ assertEquals( nullObj(), *keys.begin() );
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class MissingField : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjSet keys;
+ id().getKeysFromObject( BSON( "b" << 1 ), keys );
+ checkSize( 1, keys );
+ assertEquals( nullObj(), *keys.begin() );
+ }
+ private:
+ virtual BSONObj key() const {
+ return BSON( "a" << 1 );
+ }
+ };
+
+ class SubobjectMissing : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[1,2]}" ), keys );
+ checkSize( 1, keys );
+ assertEquals( nullObj(), *keys.begin() );
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class CompoundMissing : public Base {
+ public:
+ void run() {
+ create();
+
+ {
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{x:'a',y:'b'}" ) , keys );
+ checkSize( 1 , keys );
+ assertEquals( BSON( "" << "a" << "" << "b" ) , *keys.begin() );
+ }
+
+ {
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{x:'a'}" ) , keys );
+ checkSize( 1 , keys );
+ BSONObjBuilder b;
+ b.append( "" , "a" );
+ b.appendNull( "" );
+ assertEquals( b.obj() , *keys.begin() );
+ }
+
+ }
+
+ private:
+ virtual BSONObj key() const {
+ return BSON( "x" << 1 << "y" << 1 );
+ }
+
+ };
+
+ class ArraySubelementComplex : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[{b:[2]}]}" ), keys );
+ checkSize( 1, keys );
+ assertEquals( BSON( "" << 2 ), *keys.begin() );
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class ParallelArraysComplex : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjSet keys;
+ ASSERT_THROWS( id().getKeysFromObject( fromjson( "{a:[{b:[1],c:[2]}]}" ), keys ),
+ UserException );
+ }
+ private:
+ virtual BSONObj key() const {
+ return fromjson( "{'a.b':1,'a.c':1}" );
+ }
+ };
+
+ class AlternateMissing : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[{b:1},{c:2}]}" ), keys );
+ checkSize( 2, keys );
+ BSONObjSet::iterator i = keys.begin();
+ {
+ BSONObjBuilder e;
+ e.appendNull( "" );
+ e.append( "", 2 );
+ assertEquals( e.obj(), *i++ );
+ }
+
+ {
+ BSONObjBuilder e;
+ e.append( "", 1 );
+ e.appendNull( "" );
+ assertEquals( e.obj(), *i++ );
+ }
+ }
+ private:
+ virtual BSONObj key() const {
+ return fromjson( "{'a.b':1,'a.c':1}" );
+ }
+ };
+
+ class MultiComplex : public Base {
+ public:
+ void run() {
+ create();
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[{b:1},{b:[1,2,3]}]}" ), keys );
+ checkSize( 3, keys );
+ }
+ private:
+ virtual BSONObj key() const {
+ return aDotB();
+ }
+ };
+
+ class EmptyArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[1,2]}" ), keys );
+ checkSize(2, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[1]}" ), keys );
+ checkSize(1, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:null}" ), keys );
+ checkSize(1, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize(1, keys );
+ ASSERT_EQUALS( Undefined, keys.begin()->firstElement().type() );
+ keys.clear();
+ }
+ };
+
+ class DoubleArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[1,2]}" ), keys );
+ checkSize(2, keys );
+ BSONObjSet::const_iterator i = keys.begin();
+ ASSERT_EQUALS( BSON( "" << 1 << "" << 1 ), *i );
+ ++i;
+ ASSERT_EQUALS( BSON( "" << 2 << "" << 2 ), *i );
+ keys.clear();
+ }
+
+ protected:
+ BSONObj key() const {
+ return BSON( "a" << 1 << "a" << 1 );
+ }
+ };
+
+ class DoubleEmptyArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize(1, keys );
+ ASSERT_EQUALS( fromjson( "{'':undefined,'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+
+ protected:
+ BSONObj key() const {
+ return BSON( "a" << 1 << "a" << 1 );
+ }
+ };
+
+ class MultiEmptyArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:1,b:[1,2]}" ), keys );
+ checkSize(2, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:1,b:[1]}" ), keys );
+ checkSize(1, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:1,b:null}" ), keys );
+ //cout << "YO : " << *(keys.begin()) << endl;
+ checkSize(1, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:1,b:[]}" ), keys );
+ checkSize(1, keys );
+ //cout << "YO : " << *(keys.begin()) << endl;
+ BSONObjIterator i( *keys.begin() );
+ ASSERT_EQUALS( NumberInt , i.next().type() );
+ ASSERT_EQUALS( Undefined , i.next().type() );
+ keys.clear();
+ }
+
+ protected:
+ BSONObj key() const {
+ return aAndB();
+ }
+ };
+
+ class NestedEmptyArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 ); }
+ };
+
+ class MultiNestedEmptyArray : Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null,'':null}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 << "a.c" << 1 ); }
+ };
+
+ class UnevenNestedEmptyArray : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':undefined,'':null}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{b:1}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':{b:1},'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{b:[]}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':{b:[]},'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a" << 1 << "a.b" << 1 ); }
+ };
+
+ class ReverseUnevenNestedEmptyArray : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null,'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 << "a" << 1 ); }
+ };
+
+ class SparseReverseUnevenNestedEmptyArray : public Base {
+ public:
+ void run() {
+ create( true );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null,'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 << "a" << 1 ); }
+ };
+
+ class SparseEmptyArray : public Base {
+ public:
+ void run() {
+ create( true );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:1}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{c:1}]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 ); }
+ };
+
+ class SparseEmptyArraySecond : public Base {
+ public:
+ void run() {
+ create( true );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:1}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{c:1}]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "z" << 1 << "a.b" << 1 ); }
+ };
+
+ class NonObjectMissingNestedField : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[1]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[1,{b:1}]}" ), keys );
+ checkSize( 2, keys );
+ BSONObjSet::const_iterator c = keys.begin();
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *c );
+ ++c;
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *c );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 ); }
+ };
+
+ class SparseNonObjectMissingNestedField : public Base {
+ public:
+ void run() {
+ create( true );
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[1]}" ), keys );
+ checkSize( 0, keys );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[1,{b:1}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.b" << 1 ); }
+ };
+
+ class IndexedArrayIndex : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[1]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( BSON( "" << 1 ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[1]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':[1]}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':undefined}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:{'0':1}}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( BSON( "" << 1 ), *keys.begin() );
+ keys.clear();
+
+ ASSERT_THROWS( id().getKeysFromObject( fromjson( "{a:[{'0':1}]}" ), keys ), UserException );
+
+ ASSERT_THROWS( id().getKeysFromObject( fromjson( "{a:[1,{'0':2}]}" ), keys ), UserException );
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.0" << 1 ); }
+ };
+
+ class DoubleIndexedArrayIndex : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[[1]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':null}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[[]]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.0.0" << 1 ); }
+ };
+
+ class ObjectWithinArray : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[{b:1}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{b:[1]}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[{b:[[1]]}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':[1]}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[{b:1}]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[{b:[1]}]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[{b:[[1]]}]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':[1]}" ), *keys.begin() );
+ keys.clear();
+
+ id().getKeysFromObject( fromjson( "{a:[[{b:[]}]]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':undefined}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.0.b" << 1 ); }
+ };
+
+ class ArrayWithinObjectWithinArray : public Base {
+ public:
+ void run() {
+ create();
+
+ BSONObjSet keys;
+ id().getKeysFromObject( fromjson( "{a:[{b:[1]}]}" ), keys );
+ checkSize( 1, keys );
+ ASSERT_EQUALS( fromjson( "{'':1}" ), *keys.begin() );
+ keys.clear();
+ }
+ protected:
+ BSONObj key() const { return BSON( "a.0.b.0" << 1 ); }
+ };
+
+ // also test numeric string field names
+
+ } // namespace IndexDetailsTests
+
+ namespace NamespaceDetailsTests {
+
+ class Base {
+ const char *ns_;
+ dblock lk;
+ Client::Context _context;
+ public:
+ Base( const char *ns = "unittests.NamespaceDetailsTests" ) : ns_( ns ) , _context( ns ) {}
+ virtual ~Base() {
+ if ( !nsd() )
+ return;
+ string s( ns() );
+ string errmsg;
+ BSONObjBuilder result;
+ dropCollection( s, errmsg, result );
+ }
+ protected:
+ void create() {
+ dblock lk;
+ string err;
+ ASSERT( userCreateNS( ns(), fromjson( spec() ), err, false ) );
+ }
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512,\"$nExtents\":1}";
+ }
+ int nRecords() const {
+ int count = 0;
+ for ( DiskLoc i = nsd()->firstExtent; !i.isNull(); i = i.ext()->xnext ) {
+ int fileNo = i.ext()->firstRecord.a();
+ if ( fileNo == -1 )
+ continue;
+ for ( int j = i.ext()->firstRecord.getOfs(); j != DiskLoc::NullOfs;
+ j = DiskLoc( fileNo, j ).rec()->nextOfs ) {
+ ++count;
+ }
+ }
+ ASSERT_EQUALS( count, nsd()->stats.nrecords );
+ return count;
+ }
+ int nExtents() const {
+ int count = 0;
+ for ( DiskLoc i = nsd()->firstExtent; !i.isNull(); i = i.ext()->xnext )
+ ++count;
+ return count;
+ }
+ static int min( int a, int b ) {
+ return a < b ? a : b;
+ }
+ const char *ns() const {
+ return ns_;
+ }
+ NamespaceDetails *nsd() const {
+ return nsdetails( ns() )->writingWithExtra();
+ }
+ static BSONObj bigObj(bool bGenID=false) {
+ BSONObjBuilder b;
+ if (bGenID)
+ b.appendOID("_id", 0, true);
+ string as( 187, 'a' );
+ b.append( "a", as );
+ return b.obj();
+ }
+ };
+
+ class Create : public Base {
+ public:
+ void run() {
+ create();
+ ASSERT( nsd() );
+ ASSERT_EQUALS( 0, nRecords() );
+ ASSERT( nsd()->firstExtent == nsd()->capExtent );
+ DiskLoc initial = DiskLoc();
+ initial.setInvalid();
+ ASSERT( initial == nsd()->capFirstNewRecord );
+ }
+ };
+
+ class SingleAlloc : public Base {
+ public:
+ void run() {
+ create();
+ BSONObj b = bigObj();
+ ASSERT( !theDataFileMgr.insert( ns(), b.objdata(), b.objsize() ).isNull() );
+ ASSERT_EQUALS( 1, nRecords() );
+ }
+ };
+
+ class Realloc : public Base {
+ public:
+ void run() {
+ create();
+
+ const int N = 20;
+ const int Q = 16; // these constants depend on the size of the bson object, the extent size allocated by the system too
+ DiskLoc l[ N ];
+ for ( int i = 0; i < N; ++i ) {
+ BSONObj b = bigObj(true);
+ l[ i ] = theDataFileMgr.insert( ns(), b.objdata(), b.objsize() );
+ ASSERT( !l[ i ].isNull() );
+ ASSERT( nRecords() <= Q );
+ //ASSERT_EQUALS( 1 + i % 2, nRecords() );
+ if ( i >= 16 )
+ ASSERT( l[ i ] == l[ i - Q] );
+ }
+ }
+ };
+
+ class TwoExtent : public Base {
+ public:
+ void run() {
+ create();
+ ASSERT_EQUALS( 2, nExtents() );
+
+ BSONObj b = bigObj();
+
+ DiskLoc l[ 8 ];
+ for ( int i = 0; i < 8; ++i ) {
+ l[ i ] = theDataFileMgr.insert( ns(), b.objdata(), b.objsize() );
+ ASSERT( !l[ i ].isNull() );
+ //ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
+ //if ( i > 3 )
+ // ASSERT( l[ i ] == l[ i - 4 ] );
+ }
+ ASSERT( nRecords() == 8 );
+
+ // Too big
+ BSONObjBuilder bob;
+ bob.append( "a", string( MinExtentSize + 500, 'a' ) ); // min extent size is now 4096
+ BSONObj bigger = bob.done();
+ ASSERT( theDataFileMgr.insert( ns(), bigger.objdata(), bigger.objsize() ).isNull() );
+ ASSERT_EQUALS( 0, nRecords() );
+ }
+ private:
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
+ }
+ };
+
+ /* test NamespaceDetails::cappedTruncateAfter(const char *ns, DiskLoc loc)
+ */
+ class TruncateCapped : public Base {
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512,\"$nExtents\":2}";
+ }
+ void pass(int p) {
+ create();
+ ASSERT_EQUALS( 2, nExtents() );
+
+ BSONObj b = bigObj(true);
+
+ int N = MinExtentSize / b.objsize() * nExtents() + 5;
+ int T = N - 4;
+
+ DiskLoc truncAt;
+ //DiskLoc l[ 8 ];
+ for ( int i = 0; i < N; ++i ) {
+ BSONObj bb = bigObj(true);
+ DiskLoc a = theDataFileMgr.insert( ns(), bb.objdata(), bb.objsize() );
+ if( T == i )
+ truncAt = a;
+ ASSERT( !a.isNull() );
+ /*ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() );
+ if ( i > 3 )
+ ASSERT( l[ i ] == l[ i - 4 ] );*/
+ }
+ ASSERT( nRecords() < N );
+
+ NamespaceDetails *nsd = nsdetails(ns());
+
+ DiskLoc last, first;
+ {
+ ReverseCappedCursor c(nsd);
+ last = c.currLoc();
+ ASSERT( !last.isNull() );
+ }
+ {
+ ForwardCappedCursor c(nsd);
+ first = c.currLoc();
+ ASSERT( !first.isNull() );
+ ASSERT( first != last ) ;
+ }
+
+ nsd->cappedTruncateAfter(ns(), truncAt, false);
+ ASSERT_EQUALS( nsd->stats.nrecords , 28 );
+
+ {
+ ForwardCappedCursor c(nsd);
+ ASSERT( first == c.currLoc() );
+ }
+ {
+ ReverseCappedCursor c(nsd);
+ ASSERT( last != c.currLoc() ); // old last should be deleted
+ ASSERT( !last.isNull() );
+ }
+
+ // Too big
+ BSONObjBuilder bob;
+ bob.appendOID("_id", 0, true);
+ bob.append( "a", string( MinExtentSize + 300, 'a' ) );
+ BSONObj bigger = bob.done();
+ ASSERT( theDataFileMgr.insert( ns(), bigger.objdata(), bigger.objsize() ).isNull() );
+ ASSERT_EQUALS( 0, nRecords() );
+ }
+ public:
+ void run() {
+// log() << "******** NOT RUNNING TruncateCapped test yet ************" << endl;
+ pass(0);
+ }
+ };
+
+ class Migrate : public Base {
+ public:
+ void run() {
+ create();
+ nsd()->deletedList[ 2 ] = nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted.drec()->nextDeleted;
+ nsd()->cappedListOfAllDeletedRecords().drec()->nextDeleted.drec()->nextDeleted.writing() = DiskLoc();
+ nsd()->cappedLastDelRecLastExtent().Null();
+ NamespaceDetails *d = nsd();
+ zero( &d->capExtent );
+ zero( &d->capFirstNewRecord );
+
+ nsd();
+
+ ASSERT( nsd()->firstExtent == nsd()->capExtent );
+ ASSERT( nsd()->capExtent.getOfs() != 0 );
+ ASSERT( !nsd()->capFirstNewRecord.isValid() );
+ int nDeleted = 0;
+ for ( DiskLoc i = nsd()->cappedListOfAllDeletedRecords(); !i.isNull(); i = i.drec()->nextDeleted, ++nDeleted );
+ ASSERT_EQUALS( 10, nDeleted );
+ ASSERT( nsd()->cappedLastDelRecLastExtent().isNull() );
+ }
+ private:
+ static void zero( DiskLoc *d ) {
+ memset( d, 0, sizeof( DiskLoc ) );
+ }
+ virtual string spec() const {
+ return "{\"capped\":true,\"size\":512,\"$nExtents\":10}";
+ }
+ };
+
+ // This isn't a particularly useful test, and because it doesn't clean up
+ // after itself, /tmp/unittest needs to be cleared after running.
+ // class BigCollection : public Base {
+ // public:
+ // BigCollection() : Base( "NamespaceDetailsTests_BigCollection" ) {}
+ // void run() {
+ // create();
+ // ASSERT_EQUALS( 2, nExtents() );
+ // }
+ // private:
+ // virtual string spec() const {
+ // // NOTE 256 added to size in _userCreateNS()
+ // long long big = MongoDataFile::maxSize() - DataFileHeader::HeaderSize;
+ // stringstream ss;
+ // ss << "{\"capped\":true,\"size\":" << big << "}";
+ // return ss.str();
+ // }
+ // };
+
+ class Size {
+ public:
+ void run() {
+ ASSERT_EQUALS( 496U, sizeof( NamespaceDetails ) );
+ }
+ };
+
+ } // namespace NamespaceDetailsTests
+
+ class All : public Suite {
+ public:
+ All() : Suite( "namespace" ) {
+ }
+
+ void setupTests() {
+ add< IndexDetailsTests::Create >();
+ add< IndexDetailsTests::GetKeysFromObjectSimple >();
+ add< IndexDetailsTests::GetKeysFromObjectDotted >();
+ add< IndexDetailsTests::GetKeysFromArraySimple >();
+ add< IndexDetailsTests::GetKeysFromArrayFirstElement >();
+ add< IndexDetailsTests::GetKeysFromArraySecondElement >();
+ add< IndexDetailsTests::GetKeysFromSecondLevelArray >();
+ add< IndexDetailsTests::ParallelArraysBasic >();
+ add< IndexDetailsTests::ArraySubobjectBasic >();
+ add< IndexDetailsTests::ArraySubobjectMultiFieldIndex >();
+ add< IndexDetailsTests::ArraySubobjectSingleMissing >();
+ add< IndexDetailsTests::ArraySubobjectMissing >();
+ add< IndexDetailsTests::ArraySubelementComplex >();
+ add< IndexDetailsTests::ParallelArraysComplex >();
+ add< IndexDetailsTests::AlternateMissing >();
+ add< IndexDetailsTests::MultiComplex >();
+ add< IndexDetailsTests::EmptyArray >();
+ add< IndexDetailsTests::DoubleArray >();
+ add< IndexDetailsTests::DoubleEmptyArray >();
+ add< IndexDetailsTests::MultiEmptyArray >();
+ add< IndexDetailsTests::NestedEmptyArray >();
+ add< IndexDetailsTests::MultiNestedEmptyArray >();
+ add< IndexDetailsTests::UnevenNestedEmptyArray >();
+ add< IndexDetailsTests::ReverseUnevenNestedEmptyArray >();
+ add< IndexDetailsTests::SparseReverseUnevenNestedEmptyArray >();
+ add< IndexDetailsTests::SparseEmptyArray >();
+ add< IndexDetailsTests::SparseEmptyArraySecond >();
+ add< IndexDetailsTests::NonObjectMissingNestedField >();
+ add< IndexDetailsTests::SparseNonObjectMissingNestedField >();
+ add< IndexDetailsTests::IndexedArrayIndex >();
+ add< IndexDetailsTests::DoubleIndexedArrayIndex >();
+ add< IndexDetailsTests::ObjectWithinArray >();
+ add< IndexDetailsTests::ArrayWithinObjectWithinArray >();
+ add< IndexDetailsTests::MissingField >();
+ add< IndexDetailsTests::SubobjectMissing >();
+ add< IndexDetailsTests::CompoundMissing >();
+ add< NamespaceDetailsTests::Create >();
+ add< NamespaceDetailsTests::SingleAlloc >();
+ add< NamespaceDetailsTests::Realloc >();
+ add< NamespaceDetailsTests::TwoExtent >();
+ add< NamespaceDetailsTests::TruncateCapped >();
+ add< NamespaceDetailsTests::Migrate >();
+ // add< NamespaceDetailsTests::BigCollection >();
+ add< NamespaceDetailsTests::Size >();
+ }
+ } myall;
+} // namespace NamespaceTests
+
diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp
new file mode 100644
index 00000000000..e07ccb42aa6
--- /dev/null
+++ b/src/mongo/dbtests/pdfiletests.cpp
@@ -0,0 +1,407 @@
+// pdfiletests.cpp : pdfile unit tests.
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/pdfile.h"
+
+#include "../db/db.h"
+#include "../db/json.h"
+
+#include "dbtests.h"
+
+namespace PdfileTests {
+
+ namespace ScanCapped {
+
+ class Base {
+ public:
+ Base() : _context( ns() ) {
+ }
+ virtual ~Base() {
+ if ( !nsd() )
+ return;
+ string n( ns() );
+ dropNS( n );
+ }
+ void run() {
+ stringstream spec;
+ spec << "{\"capped\":true,\"size\":2000,\"$nExtents\":" << nExtents() << "}";
+ string err;
+ ASSERT( userCreateNS( ns(), fromjson( spec.str() ), err, false ) );
+ prepare();
+ int j = 0;
+ for ( boost::shared_ptr<Cursor> i = theDataFileMgr.findAll( ns() );
+ i->ok(); i->advance(), ++j )
+ ASSERT_EQUALS( j, i->current().firstElement().number() );
+ ASSERT_EQUALS( count(), j );
+
+ j = count() - 1;
+ for ( boost::shared_ptr<Cursor> i =
+ findTableScan( ns(), fromjson( "{\"$natural\":-1}" ) );
+ i->ok(); i->advance(), --j )
+ ASSERT_EQUALS( j, i->current().firstElement().number() );
+ ASSERT_EQUALS( -1, j );
+ }
+ protected:
+ virtual void prepare() = 0;
+ virtual int count() const = 0;
+ virtual int nExtents() const {
+ return 0;
+ }
+ // bypass standard alloc/insert routines to use the extent we want.
+ static DiskLoc insert( DiskLoc ext, int i ) {
+ BSONObjBuilder b;
+ b.append( "a", i );
+ BSONObj o = b.done();
+ int len = o.objsize();
+ Extent *e = ext.ext();
+ e = getDur().writing(e);
+ int ofs;
+ if ( e->lastRecord.isNull() )
+ ofs = ext.getOfs() + ( e->_extentData - (char *)e );
+ else
+ ofs = e->lastRecord.getOfs() + e->lastRecord.rec()->lengthWithHeaders;
+ DiskLoc dl( ext.a(), ofs );
+ Record *r = dl.rec();
+ r = (Record*) getDur().writingPtr(r, Record::HeaderSize + len);
+ r->lengthWithHeaders = Record::HeaderSize + len;
+ r->extentOfs = e->myLoc.getOfs();
+ r->nextOfs = DiskLoc::NullOfs;
+ r->prevOfs = e->lastRecord.isNull() ? DiskLoc::NullOfs : e->lastRecord.getOfs();
+ memcpy( r->data, o.objdata(), len );
+ if ( e->firstRecord.isNull() )
+ e->firstRecord = dl;
+ else
+ getDur().writingInt(e->lastRecord.rec()->nextOfs) = ofs;
+ e->lastRecord = dl;
+ return dl;
+ }
+ static const char *ns() {
+ return "unittests.ScanCapped";
+ }
+ static NamespaceDetails *nsd() {
+ return nsdetails( ns() );
+ }
+ private:
+ dblock lk_;
+ Client::Context _context;
+ };
+
+ class Empty : public Base {
+ virtual void prepare() {}
+ virtual int count() const {
+ return 0;
+ }
+ };
+
+ class EmptyLooped : public Base {
+ virtual void prepare() {
+ nsd()->writingWithExtra()->capFirstNewRecord = DiskLoc();
+ }
+ virtual int count() const {
+ return 0;
+ }
+ };
+
+ class EmptyMultiExtentLooped : public Base {
+ virtual void prepare() {
+ nsd()->writingWithExtra()->capFirstNewRecord = DiskLoc();
+ }
+ virtual int count() const {
+ return 0;
+ }
+ virtual int nExtents() const {
+ return 3;
+ }
+ };
+
+ class Single : public Base {
+ virtual void prepare() {
+ nsd()->writingWithExtra()->capFirstNewRecord = insert( nsd()->capExtent, 0 );
+ }
+ virtual int count() const {
+ return 1;
+ }
+ };
+
+ class NewCapFirst : public Base {
+ virtual void prepare() {
+ DiskLoc x = insert( nsd()->capExtent, 0 );
+ nsd()->writingWithExtra()->capFirstNewRecord = x;
+ insert( nsd()->capExtent, 1 );
+ }
+ virtual int count() const {
+ return 2;
+ }
+ };
+
+ class NewCapLast : public Base {
+ virtual void prepare() {
+ insert( nsd()->capExtent, 0 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 1 );
+ }
+ virtual int count() const {
+ return 2;
+ }
+ };
+
+ class NewCapMiddle : public Base {
+ virtual void prepare() {
+ insert( nsd()->capExtent, 0 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 1 );
+ insert( nsd()->capExtent, 2 );
+ }
+ virtual int count() const {
+ return 3;
+ }
+ };
+
+ class FirstExtent : public Base {
+ virtual void prepare() {
+ insert( nsd()->capExtent, 0 );
+ insert( nsd()->lastExtent, 1 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 2 );
+ insert( nsd()->capExtent, 3 );
+ }
+ virtual int count() const {
+ return 4;
+ }
+ virtual int nExtents() const {
+ return 2;
+ }
+ };
+
+ class LastExtent : public Base {
+ virtual void prepare() {
+ nsd()->capExtent.writing() = nsd()->lastExtent;
+ insert( nsd()->capExtent, 0 );
+ insert( nsd()->firstExtent, 1 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 2 );
+ insert( nsd()->capExtent, 3 );
+ }
+ virtual int count() const {
+ return 4;
+ }
+ virtual int nExtents() const {
+ return 2;
+ }
+ };
+
+ class MidExtent : public Base {
+ virtual void prepare() {
+ nsd()->capExtent.writing() = nsd()->firstExtent.ext()->xnext;
+ insert( nsd()->capExtent, 0 );
+ insert( nsd()->lastExtent, 1 );
+ insert( nsd()->firstExtent, 2 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 3 );
+ insert( nsd()->capExtent, 4 );
+ }
+ virtual int count() const {
+ return 5;
+ }
+ virtual int nExtents() const {
+ return 3;
+ }
+ };
+
+ class AloneInExtent : public Base {
+ virtual void prepare() {
+ nsd()->capExtent.writing() = nsd()->firstExtent.ext()->xnext;
+ insert( nsd()->lastExtent, 0 );
+ insert( nsd()->firstExtent, 1 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 2 );
+ }
+ virtual int count() const {
+ return 3;
+ }
+ virtual int nExtents() const {
+ return 3;
+ }
+ };
+
+ class FirstInExtent : public Base {
+ virtual void prepare() {
+ nsd()->capExtent.writing() = nsd()->firstExtent.ext()->xnext;
+ insert( nsd()->lastExtent, 0 );
+ insert( nsd()->firstExtent, 1 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 2 );
+ insert( nsd()->capExtent, 3 );
+ }
+ virtual int count() const {
+ return 4;
+ }
+ virtual int nExtents() const {
+ return 3;
+ }
+ };
+
+ class LastInExtent : public Base {
+ virtual void prepare() {
+ nsd()->capExtent.writing() = nsd()->firstExtent.ext()->xnext;
+ insert( nsd()->capExtent, 0 );
+ insert( nsd()->lastExtent, 1 );
+ insert( nsd()->firstExtent, 2 );
+ nsd()->capFirstNewRecord.writing() = insert( nsd()->capExtent, 3 );
+ }
+ virtual int count() const {
+ return 4;
+ }
+ virtual int nExtents() const {
+ return 3;
+ }
+ };
+
+ } // namespace ScanCapped
+
+ namespace Insert {
+ class Base {
+ public:
+ Base() : _context( ns() ) {
+ }
+ virtual ~Base() {
+ if ( !nsd() )
+ return;
+ string n( ns() );
+ dropNS( n );
+ }
+ protected:
+ static const char *ns() {
+ return "unittests.pdfiletests.Insert";
+ }
+ static NamespaceDetails *nsd() {
+ return nsdetails( ns() );
+ }
+ private:
+ dblock lk_;
+ Client::Context _context;
+ };
+
+ class UpdateDate : public Base {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendTimestamp( "a" );
+ BSONObj o = b.done();
+ ASSERT( 0 == o.getField( "a" ).date() );
+ theDataFileMgr.insertWithObjMod( ns(), o );
+ ASSERT( 0 != o.getField( "a" ).date() );
+ }
+ };
+ } // namespace Insert
+
+ class ExtentSizing {
+ public:
+ struct SmallFilesControl {
+ SmallFilesControl() {
+ old = cmdLine.smallfiles;
+ cmdLine.smallfiles = false;
+ }
+ ~SmallFilesControl() {
+ cmdLine.smallfiles = old;
+ }
+ bool old;
+ };
+ void run() {
+ SmallFilesControl c;
+ // test that no matter what we start with, we always get to max extent size
+ for ( int obj=16; obj<BSONObjMaxUserSize; obj += 111 ) {
+ int sz = Extent::initialSize( obj );
+ for ( int i=0; i<100; i++ ) {
+ sz = Extent::followupSize( obj , sz );
+ }
+ ASSERT_EQUALS( Extent::maxSize() , sz );
+ }
+ }
+ };
+
+ class ExtentAllocOrder {
+ public:
+ void run() {
+ string dbname = "unittest_ex";
+
+ string c1 = dbname + ".x1";
+ string c2 = dbname + ".x2";
+
+ {
+ DBDirectClient db;
+ db.dropDatabase( dbname );
+ }
+
+ dblock mylock;
+ Client::Context cx( dbname );
+
+ bool isnew;
+ Database * d = dbHolderW().getOrCreate( dbname , dbpath , isnew );
+ assert( d );
+
+ int big = 10 * 1024;
+ //int small = 1024;
+
+ unsigned long long l = 0;
+ int n = 0;
+ while ( 1 ) {
+ n++;
+ if( n == 5 && sizeof(void*)==4 )
+ break;
+ MongoDataFile * f = d->addAFile( big , false );
+ //cout << f->length() << ' ' << n << endl;
+ if ( f->length() == l )
+ break;
+ l = f->length();
+ }
+
+ int start = d->numFiles();
+ for ( int i=0; i<start; i++ )
+ d->allocExtent( c1.c_str() , d->getFile( i )->getHeader()->unusedLength , false, false );
+ ASSERT_EQUALS( start , d->numFiles() );
+
+ {
+ DBDirectClient db;
+ db.dropDatabase( dbname );
+ }
+ }
+ };
+
+
+ class All : public Suite {
+ public:
+ All() : Suite( "pdfile" ) {}
+
+ void setupTests() {
+ add< ScanCapped::Empty >();
+ add< ScanCapped::EmptyLooped >();
+ add< ScanCapped::EmptyMultiExtentLooped >();
+ add< ScanCapped::Single >();
+ add< ScanCapped::NewCapFirst >();
+ add< ScanCapped::NewCapLast >();
+ add< ScanCapped::NewCapMiddle >();
+ add< ScanCapped::FirstExtent >();
+ add< ScanCapped::LastExtent >();
+ add< ScanCapped::MidExtent >();
+ add< ScanCapped::AloneInExtent >();
+ add< ScanCapped::FirstInExtent >();
+ add< ScanCapped::LastInExtent >();
+ add< Insert::UpdateDate >();
+ add< ExtentSizing >();
+ add< ExtentAllocOrder >();
+ }
+ } myall;
+
+} // namespace PdfileTests
+
diff --git a/src/mongo/dbtests/perf/btreeperf.cpp b/src/mongo/dbtests/perf/btreeperf.cpp
new file mode 100644
index 00000000000..7d68d8f5cc7
--- /dev/null
+++ b/src/mongo/dbtests/perf/btreeperf.cpp
@@ -0,0 +1,442 @@
+// btreeperf.cpp
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Performance timing and space utilization testing for btree indexes.
+ */
+
+#include <iostream>
+
+#include <boost/random/bernoulli_distribution.hpp>
+#include <boost/random/geometric_distribution.hpp>
+#include <boost/random/mersenne_twister.hpp>
+#include <boost/random/variate_generator.hpp>
+#include <boost/random/uniform_int.hpp>
+
+#include "client/dbclient.h"
+#include "../../util/timer.h"
+
+using namespace std;
+using namespace mongo;
+using namespace boost;
+
+const char *ns = "test.btreeperf";
+const char *db = "test";
+const char *index_collection = "btreeperf.$_id_";
+
+// This random number generator has a much larger period than the default
+// generator and is half as fast as the default. Given that we intend to
+// generate large numbers of documents and will utilize more than one random
+// sample per document, choosing this generator seems like a worthwhile tradeoff.
+mt19937 randomNumberGenerator;
+
+/**
+ * An interface for generating documents to be inserted and document specs for
+ * remove requests.
+ */
+class InsertAndRemoveStrategy {
+public:
+ virtual ~InsertAndRemoveStrategy() {}
+ virtual BSONObj insertObj() = 0;
+ virtual BSONObj removeObj() = 0;
+protected:
+ /**
+ * Helper functions for converting a sample value to a sample object with
+ * specified _id, to be inserted or removed.
+ */
+
+ template< class T >
+ BSONObj insertObjWithVal( const T &val ) {
+ BSONObjBuilder b;
+ b.append( "_id", val );
+ return b.obj();
+ }
+ template< class T >
+ BSONObj removeObjWithVal( const T &val ) {
+ BSONObjBuilder b;
+ b.append( "_id", val );
+ return b.obj();
+ }
+};
+
+/**
+ * Manages a set of elements of type T. Supports inserting unique elements and
+ * sampling a random element without replacement.
+ *
+ * TODO In the contexts where this class is currently used, duplicate keys are
+ * either impossible or highly unlikely. And an occasional duplicate value will
+ * not much affect the procedure by wich a random element is chosen. We could
+ * stop checking for duplicates in push(), eliminate _set from the implementaiton,
+ * and potentially improve performance and memory requirements somewhat.
+ */
+template< class T >
+class SetSampler {
+public:
+ /** @param val Insert this value in the set if not already present. */
+ void push( const T& val ) {
+ if ( _set.insert( val ).second ) {
+ _vector.push_back( val );
+ }
+ }
+ /** @return a random element removed from the set */
+ T pull() {
+ if ( _vector.size() == 0 ) {
+ return T();
+ }
+ uniform_int< size_t > sizeRange( 0, _vector.size() - 1 );
+ variate_generator< mt19937&, uniform_int< size_t > > sizeGenerator( randomNumberGenerator, sizeRange );
+ size_t toRemove = sizeGenerator();
+ T val = _vector[ toRemove ];
+ // Replace the random element with the last element, then remove the
+ // last element.
+ _vector[ toRemove ] = _vector.back();
+ _vector.pop_back();
+ _set.erase( val );
+ return val;
+ }
+private:
+ vector< T > _vector;
+ set< T > _set;
+};
+
+/**
+ * Tracks values that have been specified for insertion by the derived class's
+ * implementation of insertVal() and selects uniformally from among values that
+ * have been inserted but not yet removed for the next value to remove.
+ *
+ * The implementation is probabilistically sound, but may be resource intensive
+ * and slow due to the use of a SetSampler.
+ */
+template< class T >
+class InsertAndUniformRemoveStrategy : public InsertAndRemoveStrategy {
+public:
+ virtual BSONObj insertObj() {
+ T val = insertVal();
+ _sampler.push( val );
+ return insertObjWithVal( val );
+ }
+ virtual BSONObj removeObj() { return removeObjWithVal( _sampler.pull() ); }
+protected:
+ /** @return value to insert. This is the only function a derived class need implement. */
+ virtual T insertVal() = 0;
+private:
+ SetSampler< T > _sampler;
+};
+
+/**
+ * The derived class supplies keys to be inserted and removed. The key removal
+ * strategy is similar to the strategy for selecting a random element described
+ * in the MongoDB cookbook: the first key in the collection greater than or
+ * equal to the supplied removal key is removed. This allows selecting an
+ * exising key for removal without the overhead required by a SetSampler.
+ *
+ * While this ranged selection strategy can work well for selecting a random
+ * element, there are some theoretical and empirically observed shortcomings
+ * when the strategy is applied to removing nodes for btree performance measurement:
+ * 1 The likelihood that a given key is removed is proportional to the difference
+ * in value between it and the previous key. Because key deletion increases
+ * the difference in value between adjacent keys, neighboring keys will be
+ * more likely to be deleted than they would be in a true uniform distribution.
+ * 2 MongoDB 1.6 uses 'unused' nodes in the btree implementation. With a ranged
+ * removal strategy, those nodes must be traversed to find a node available
+ * for removal.
+ * 3 Ranged removal was observed to be biased against the balancing policy of
+ * MongoDB 1.7 in some cases, in terms of storage size. This may be a
+ * consequence of point 1 above.
+ * 4 Ranged removal was observed to be significantly biased against the btree
+ * implementation in MongoDB 1.6 in terms of performance. This is likely a
+ * consequence of point 2 above.
+ * 5 In some cases the biases described above were not evident in tests lasting
+ * several minutes, but were evident in tests lasting several hours.
+ */
+template< class T >
+class InsertAndRangedRemoveStrategy : public InsertAndRemoveStrategy {
+public:
+ virtual BSONObj insertObj() { return insertObjWithVal( insertVal() ); }
+ virtual BSONObj removeObj() { return rangedRemoveObjWithVal( removeVal() ); }
+protected:
+ /** Small likelihood that this removal spec will not match any document */
+ template< class U >
+ BSONObj rangedRemoveObjWithVal( const U &val ) {
+ BSONObjBuilder b1;
+ BSONObjBuilder b2( b1.subobjStart( "_id" ) );
+ b2.append( "$gte", val );
+ b2.done();
+ return b1.obj();
+ }
+ virtual T insertVal() = 0;
+ virtual T removeVal() = 0;
+};
+
+/**
+ * Integer Keys
+ * Uniform Inserts
+ * Uniform Removes
+ */
+class UniformInsertRangedUniformRemoveInteger : public InsertAndRangedRemoveStrategy< long long > {
+public:
+ UniformInsertRangedUniformRemoveInteger() :
+ _uniform_int( 0ULL, ~0ULL ),
+ _nextLongLong( randomNumberGenerator, _uniform_int ) {
+ }
+ /** Small likelihood of duplicates */
+ virtual long long insertVal() { return _nextLongLong(); }
+ virtual long long removeVal() { return _nextLongLong(); }
+private:
+ uniform_int< unsigned long long > _uniform_int;
+ variate_generator< mt19937&, uniform_int< unsigned long long > > _nextLongLong;
+};
+
+class UniformInsertUniformRemoveInteger : public InsertAndUniformRemoveStrategy< long long > {
+public:
+ virtual long long insertVal() { return _gen.insertVal(); }
+private:
+ UniformInsertRangedUniformRemoveInteger _gen;
+};
+
+/**
+ * String Keys
+ * Uniform Inserts
+ * Uniform Removes
+ */
+class UniformInsertRangedUniformRemoveString : public InsertAndRangedRemoveStrategy< string > {
+public:
+ UniformInsertRangedUniformRemoveString() :
+ _geometric_distribution( 0.9 ),
+ _nextLength( randomNumberGenerator, _geometric_distribution ),
+ _uniform_char( 'a', 'z' ),
+ _nextChar( randomNumberGenerator, _uniform_char ) {
+ }
+ /** Small likelihood of duplicates */
+ virtual string insertVal() { return nextString(); }
+ virtual string removeVal() { return nextString(); }
+private:
+ string nextString() {
+ // The longer the minimum string length, the lower the likelihood of duplicates
+ int len = _nextLength() + 5;
+ len = len > 100 ? 100 : len;
+ string ret( len, 'x' );
+ for( int i = 0; i < len; ++i ) {
+ ret[ i ] = _nextChar();
+ }
+ return ret;
+ }
+ geometric_distribution<> _geometric_distribution;
+ variate_generator< mt19937&, geometric_distribution<> > _nextLength;
+ uniform_int< char > _uniform_char;
+ variate_generator< mt19937&, uniform_int< char > > _nextChar;
+};
+
+class UniformInsertUniformRemoveString : public InsertAndUniformRemoveStrategy< string > {
+public:
+ virtual string insertVal() { return _gen.insertVal(); }
+private:
+ UniformInsertRangedUniformRemoveString _gen;
+};
+
+/**
+ * OID Keys
+ * Increasing Inserts
+ * Uniform Removes
+ */
+class IncreasingInsertRangedUniformRemoveOID : public InsertAndRangedRemoveStrategy< OID > {
+public:
+ IncreasingInsertRangedUniformRemoveOID() :
+ _max( -1 ) {
+ }
+ virtual OID insertVal() { return oidFromULL( ++_max ); }
+ virtual OID removeVal() {
+ uniform_int< unsigned long long > distribution( 0, _max > 0 ? _max : 0 );
+ variate_generator< mt19937&, uniform_int< unsigned long long > > generator( randomNumberGenerator, distribution );
+ return oidFromULL( generator() );
+ }
+private:
+ static OID oidFromULL( unsigned long long val ) {
+ val = __builtin_bswap64( val );
+ OID oid;
+ oid.clear();
+ memcpy( (char*)&oid + 4, &val, 8 );
+ return oid;
+ }
+ long long _max;
+};
+
+class IncreasingInsertUniformRemoveOID : public InsertAndUniformRemoveStrategy< OID > {
+public:
+ virtual OID insertVal() { return _gen.insertVal(); }
+private:
+ IncreasingInsertRangedUniformRemoveOID _gen;
+};
+
+/**
+ * Integer Keys
+ * Increasing Inserts
+ * Increasing Removes (on remove, the lowest key is always removed)
+ */
+class IncreasingInsertIncreasingRemoveInteger : public InsertAndRemoveStrategy {
+public:
+ IncreasingInsertIncreasingRemoveInteger() :
+ // Start with a large value so data type will be preserved if we round
+ // trip through json.
+ _min( 1LL << 32 ),
+ _max( 1LL << 32 ) {
+ }
+ virtual BSONObj insertObj() { return insertObjWithVal( ++_max ); }
+ virtual BSONObj removeObj() { return removeObjWithVal( _min < _max ? ++_min : _min ); }
+private:
+ long long _min;
+ long long _max;
+};
+
+/** Generate a random boolean value. */
+class BernoulliGenerator {
+public:
+ /**
+ * @param excessFalsePercent This specifies the desired rate of false values
+ * vs true values. If we want false to be 5% more likely than true, we
+ * specify 5 for this argument.
+ */
+ BernoulliGenerator( int excessFalsePercent ) :
+ _bernoulli_distribution( 1.0 / ( 2.0 + excessFalsePercent / 100.0 ) ),
+ _generator( randomNumberGenerator, _bernoulli_distribution ) {
+ }
+ bool operator()() { return _generator(); }
+private:
+ bernoulli_distribution<> _bernoulli_distribution;
+ variate_generator< mt19937&, bernoulli_distribution<> > _generator;
+};
+
+/** Runs a strategy on a connection, with specified mix of inserts and removes. */
+class InsertAndRemoveRunner {
+public:
+ InsertAndRemoveRunner( DBClientConnection &conn, InsertAndRemoveStrategy &strategy, int excessInsertPercent ) :
+ _conn( conn ),
+ _strategy( strategy ),
+ _nextOpTypeRemove( excessInsertPercent ) {
+ }
+ void writeOne() {
+ if ( _nextOpTypeRemove() ) {
+ _conn.remove( ns, _strategy.removeObj(), true );
+ }
+ else {
+ _conn.insert( ns, _strategy.insertObj() );
+ }
+ }
+private:
+ DBClientConnection &_conn;
+ InsertAndRemoveStrategy &_strategy;
+ BernoulliGenerator _nextOpTypeRemove;
+};
+
+/**
+ * Writes a test script to cout based on a strategy and specified mix of inserts
+ * and removes. The script can be subsequently executed by InsertAndRemoveRunner.
+ * Script generation is intended for strategies that are memory or cpu intensive
+ * and might either divert resources from a mongod instance being analyzed on the
+ * same machine or fail to generate requests as quickly as the mongod might
+ * accept them.
+ * The script contains one line per operation. Each line begins
+ * with a letter indicating the operation type, followed by a space. Next
+ * follows the json representation of a document for the specified operation
+ * type.
+ */
+class InsertAndRemoveScriptGenerator {
+public:
+ InsertAndRemoveScriptGenerator( InsertAndRemoveStrategy &strategy, int excessInsertPercent ) :
+ _strategy( strategy ),
+ _nextOpTypeRemove( excessInsertPercent ) {
+ }
+ void writeOne() {
+ if ( _nextOpTypeRemove() ) {
+ cout << "r " << _strategy.removeObj().jsonString() << endl;
+ }
+ else {
+ cout << "i " << _strategy.insertObj().jsonString() << endl;
+ }
+ }
+private:
+ InsertAndRemoveStrategy &_strategy;
+ BernoulliGenerator _nextOpTypeRemove;
+};
+
+/**
+ * Run a test script from cin that was generated by
+ * InsertAndRemoveScriptGenerator. Running the script is intended to be
+ * lightweight in terms of memory and cpu usage, and fast.
+ */
+class InsertAndRemoveScriptRunner {
+public:
+ InsertAndRemoveScriptRunner( DBClientConnection &conn ) :
+ _conn( conn ) {
+ }
+ void writeOne() {
+ cin.getline( _buf, 1024 );
+ BSONObj val = fromjson( _buf + 2 );
+ if ( _buf[ 0 ] == 'r' ) {
+ _conn.remove( ns, val, true );
+ }
+ else {
+ _conn.insert( ns, val );
+ }
+ }
+private:
+ DBClientConnection &_conn;
+ char _buf[ 1024 ];
+};
+
+int main( int argc, const char **argv ) {
+
+ DBClientConnection conn;
+ conn.connect( "127.0.0.1:27017" );
+ conn.dropCollection( ns );
+
+// UniformInsertRangedUniformRemoveInteger strategy;
+// UniformInsertUniformRemoveInteger strategy;
+// UniformInsertRangedUniformRemoveString strategy;
+// UniformInsertUniformRemoveString strategy;
+// IncreasingInsertRangedUniformRemoveOID strategy;
+// IncreasingInsertUniformRemoveOID strategy;
+// IncreasingInsertIncreasingRemoveInteger strategy;
+// InsertAndRemoveScriptGenerator runner( strategy, 5 );
+ InsertAndRemoveScriptRunner runner( conn );
+
+ Timer t;
+ BSONObj statsCmd = BSON( "collstats" << index_collection );
+
+ // Print header, unless we are generating a script (in that case, comment this out).
+ cout << "ops,milliseconds,docs,totalBucketSize" << endl;
+
+ long long i = 0;
+ long long n = 10000000000;
+ while( i < n ) {
+ runner.writeOne();
+ // Print statistics, unless we are generating a script (in that case, comment this out).
+ // The stats collection requests below provide regular read operations,
+ // ensuring we are caught up with the progress being made by the mongod
+ // under analysis.
+ if ( ++i % 50000 == 0 ) {
+ // The total number of documents present.
+ long long docs = conn.count( ns );
+ BSONObj result;
+ conn.runCommand( db, statsCmd, result );
+ // The total number of bytes used for all allocated 8K buckets of the
+ // btree.
+ long long totalBucketSize = result.getField( "count" ).numberLong() * 8192;
+ cout << i << ',' << t.millis() << ',' << docs << ',' << totalBucketSize << endl;
+ }
+ }
+}
diff --git a/src/mongo/dbtests/perf/perftest.cpp b/src/mongo/dbtests/perf/perftest.cpp
new file mode 100644
index 00000000000..b6219f7f5d9
--- /dev/null
+++ b/src/mongo/dbtests/perf/perftest.cpp
@@ -0,0 +1,761 @@
+// perftest.cpp : Run db performance tests.
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "../../client/dbclient.h"
+#include "../../db/instance.h"
+#include "../../db/ops/query.h"
+#include "../../db/queryoptimizer.h"
+#include "../../util/file_allocator.h"
+
+#include "../framework.h"
+#include <boost/date_time/posix_time/posix_time.hpp>
+
+namespace mongo {
+ extern string dbpath;
+} // namespace mongo
+
+
+using namespace mongo;
+using namespace mongo::regression;
+
+DBClientBase *client_;
+
+// Each test runs with a separate db, so no test does any of the startup
+// (ie allocation) work for another test.
+template< class T >
+string testDb( T *t = 0 ) {
+ string name = mongo::demangleName( typeid( T ) );
+ // Make filesystem safe.
+ for( string::iterator i = name.begin(); i != name.end(); ++i )
+ if ( *i == ':' )
+ *i = '_';
+ return name;
+}
+
+template< class T >
+string testNs( T *t ) {
+ stringstream ss;
+ ss << testDb( t ) << ".perftest";
+ return ss.str();
+}
+
+template <class T>
+class Runner {
+public:
+ void run() {
+ T test;
+ string name = testDb( &test );
+ boost::posix_time::ptime start = boost::posix_time::microsec_clock::universal_time();
+ test.run();
+ boost::posix_time::ptime end = boost::posix_time::microsec_clock::universal_time();
+ long long micro = ( end - start ).total_microseconds();
+ cout << "{'" << name << "': "
+ << micro / 1000000
+ << "."
+ << setw( 6 ) << setfill( '0' ) << micro % 1000000
+ << "}" << endl;
+ }
+ ~Runner() {
+ FileAllocator::get()->waitUntilFinished();
+ client_->dropDatabase( testDb< T >().c_str() );
+ }
+};
+
+class RunnerSuite : public Suite {
+public:
+ RunnerSuite( string name ) : Suite( name ) {}
+protected:
+ template< class T >
+ void add() {
+ Suite::add< Runner< T > >();
+ }
+};
+
+namespace Insert {
+ class IdIndex {
+ public:
+ void run() {
+ string ns = testNs( this );
+ for( int i = 0; i < 100000; ++i ) {
+ client_->insert( ns.c_str(), BSON( "_id" << i ) );
+ }
+ }
+ };
+
+ class TwoIndex {
+ public:
+ TwoIndex() : ns_( testNs( this ) ) {
+ client_->ensureIndex( ns_, BSON( "_id" << 1 ), "my_id" );
+ }
+ void run() {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i ) );
+ }
+ string ns_;
+ };
+
+ class TenIndex {
+ public:
+ TenIndex() : ns_( testNs( this ) ) {
+ const char *names = "aaaaaaaaa";
+ for( int i = 0; i < 9; ++i ) {
+ client_->resetIndexCache();
+ client_->ensureIndex( ns_.c_str(), BSON( "_id" << 1 ), false, names + i );
+ }
+ }
+ void run() {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i ) );
+ }
+ string ns_;
+ };
+
+ class Capped {
+ public:
+ Capped() : ns_( testNs( this ) ) {
+ client_->createCollection( ns_.c_str(), 100000, true );
+ }
+ void run() {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i ) );
+ }
+ string ns_;
+ };
+
+ class OneIndexReverse {
+ public:
+ OneIndexReverse() : ns_( testNs( this ) ) {
+ client_->ensureIndex( ns_, BSON( "_id" << 1 ) );
+ }
+ void run() {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << ( 100000 - 1 - i ) ) );
+ }
+ string ns_;
+ };
+
+ class OneIndexHighLow {
+ public:
+ OneIndexHighLow() : ns_( testNs( this ) ) {
+ client_->ensureIndex( ns_, BSON( "_id" << 1 ) );
+ }
+ void run() {
+ for( int i = 0; i < 100000; ++i ) {
+ int j = 50000 + ( ( i % 2 == 0 ) ? 1 : -1 ) * ( i / 2 + 1 );
+ client_->insert( ns_.c_str(), BSON( "_id" << j ) );
+ }
+ }
+ string ns_;
+ };
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite( "insert" ) {}
+
+ void setupTests() {
+ add< IdIndex >();
+ add< TwoIndex >();
+ add< TenIndex >();
+ add< Capped >();
+ add< OneIndexReverse >();
+ add< OneIndexHighLow >();
+ }
+ } all;
+} // namespace Insert
+
+namespace Update {
+ class Smaller {
+ public:
+ Smaller() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i << "b" << 2 ) );
+ }
+ void run() {
+ for( int i = 0; i < 100000; ++i )
+ client_->update( ns_.c_str(), QUERY( "_id" << i ), BSON( "_id" << i ) );
+ }
+ string ns_;
+ };
+
+ class Bigger {
+ public:
+ Bigger() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i ) );
+ }
+ void run() {
+ for( int i = 0; i < 100000; ++i )
+ client_->update( ns_.c_str(), QUERY( "_id" << i ), BSON( "_id" << i << "b" << 2 ) );
+ }
+ string ns_;
+ };
+
+ class Inc {
+ public:
+ Inc() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 10000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i << "i" << 0 ) );
+ }
+ void run() {
+ for( int j = 0; j < 10; ++j )
+ for( int i = 0; i < 10000; ++i )
+ client_->update( ns_.c_str(), QUERY( "_id" << i ), BSON( "$inc" << BSON( "i" << 1 ) ) );
+ }
+ string ns_;
+ };
+
+ class Set {
+ public:
+ Set() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 10000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i << "i" << 0 ) );
+ }
+ void run() {
+ for( int j = 1; j < 11; ++j )
+ for( int i = 0; i < 10000; ++i )
+ client_->update( ns_.c_str(), QUERY( "_id" << i ), BSON( "$set" << BSON( "i" << j ) ) );
+ }
+ string ns_;
+ };
+
+ class SetGrow {
+ public:
+ SetGrow() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 10000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i << "i" << "" ) );
+ }
+ void run() {
+ for( int j = 9; j > -1; --j )
+ for( int i = 0; i < 10000; ++i )
+ client_->update( ns_.c_str(), QUERY( "_id" << i ), BSON( "$set" << BSON( "i" << "aaaaaaaaaa"[j] ) ) );
+ }
+ string ns_;
+ };
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite( "update" ) {}
+ void setupTests() {
+ add< Smaller >();
+ add< Bigger >();
+ add< Inc >();
+ add< Set >();
+ add< SetGrow >();
+ }
+ } all;
+} // namespace Update
+
+namespace BSON {
+
+ const char *sample =
+ "{\"one\":2, \"two\":5, \"three\": {},"
+ "\"four\": { \"five\": { \"six\" : 11 } },"
+ "\"seven\": [ \"a\", \"bb\", \"ccc\", 5 ],"
+ "\"eight\": Dbref( \"rrr\", \"01234567890123456789aaaa\" ),"
+ "\"_id\": ObjectId( \"deadbeefdeadbeefdeadbeef\" ),"
+ "\"nine\": { \"$binary\": \"abc=\", \"$type\": \"02\" },"
+ "\"ten\": Date( 44 ), \"eleven\": /foooooo/i }";
+
+ const char *shopwikiSample =
+ "{ '_id' : '289780-80f85380b5c1d4a0ad75d1217673a4a2' , 'site_id' : 289780 , 'title'"
+ ": 'Jubilee - Margaret Walker' , 'image_url' : 'http://www.heartlanddigsandfinds.c"
+ "om/store/graphics/Product_Graphics/Product_8679.jpg' , 'url' : 'http://www.heartla"
+ "nddigsandfinds.com/store/store_product_detail.cfm?Product_ID=8679&Category_ID=2&Su"
+ "b_Category_ID=910' , 'url_hash' : 3450626119933116345 , 'last_update' : null , '"
+ "features' : { '$imagePrefetchDate' : '2008Aug30 22:39' , '$image.color.rgb' : '5a7"
+ "574' , 'Price' : '$10.99' , 'Description' : 'Author--s 1st Novel. A Houghton Miffl"
+ "in Literary Fellowship Award novel by the esteemed poet and novelist who has demon"
+ "strated a lifelong commitment to the heritage of black culture. An acclaimed story"
+ "of Vyry, a negro slave during the 19th Century, facing the biggest challenge of h"
+ "er lifetime - that of gaining her freedom, fighting for all the things she had nev"
+ "er known before. The author, great-granddaughter of Vyry, reveals what the Civil W"
+ "ar in America meant to the Negroes. Slavery W' , '$priceHistory-1' : '2008Dec03 $1"
+ "0.99' , 'Brand' : 'Walker' , '$brands_in_title' : 'Walker' , '--path' : '//HTML[1]"
+ "/BODY[1]/TABLE[1]/TR[1]/TD[1]/P[1]/TABLE[1]/TR[1]/TD[1]/TABLE[1]/TR[2]/TD[2]/TABLE"
+ "[1]/TR[1]/TD[1]/P[1]/TABLE[1]/TR[1]' , '~location' : 'en_US' , '$crawled' : '2009J"
+ "an11 03:22' , '$priceHistory-2' : '2008Nov15 $10.99' , '$priceHistory-0' : '2008De"
+ "c24 $10.99'}}";
+
+ class Parse {
+ public:
+ void run() {
+ for( int i = 0; i < 10000; ++i )
+ fromjson( sample );
+ }
+ };
+
+ class ShopwikiParse {
+ public:
+ void run() {
+ for( int i = 0; i < 10000; ++i )
+ fromjson( shopwikiSample );
+ }
+ };
+
+ class Json {
+ public:
+ Json() : o_( fromjson( sample ) ) {}
+ void run() {
+ for( int i = 0; i < 10000; ++i )
+ o_.jsonString();
+ }
+ BSONObj o_;
+ };
+
+ class ShopwikiJson {
+ public:
+ ShopwikiJson() : o_( fromjson( shopwikiSample ) ) {}
+ void run() {
+ for( int i = 0; i < 10000; ++i )
+ o_.jsonString();
+ }
+ BSONObj o_;
+ };
+
+ template <int LEN>
+ class Copy {
+ public:
+ Copy(){
+ // putting it in a subobject to force copy on getOwned
+ BSONObjBuilder outer;
+ BSONObjBuilder b (outer.subobjStart("inner"));
+ while (b.len() < LEN)
+ b.append(BSONObjBuilder::numStr(b.len()), b.len());
+ b.done();
+ _base = outer.obj();
+ }
+
+ void run() {
+ int iterations = 1000*1000;
+ while (iterations--){
+ BSONObj temp = copy(_base.firstElement().embeddedObject().getOwned());
+ }
+ }
+
+ private:
+ // noinline should force copying even when optimized
+ NOINLINE_DECL BSONObj copy(BSONObj x){
+ return x;
+ }
+
+ BSONObj _base;
+ };
+
+
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite( "bson" ) {}
+ void setupTests() {
+ add< Parse >();
+ add< ShopwikiParse >();
+ add< Json >();
+ add< ShopwikiJson >();
+ add< Copy<10> >();
+ add< Copy<100> >();
+ add< Copy<1000> >();
+ add< Copy<10*1000> >();
+ }
+ } all;
+
+} // namespace BSON
+
+namespace Index {
+
+ class Int {
+ public:
+ Int() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "a" << i ) );
+ }
+ void run() {
+ client_->ensureIndex( ns_, BSON( "a" << 1 ) );
+ }
+ string ns_;
+ };
+
+ class ObjectId {
+ public:
+ ObjectId() : ns_( testNs( this ) ) {
+ OID id;
+ for( int i = 0; i < 100000; ++i ) {
+ id.init();
+ client_->insert( ns_.c_str(), BSON( "a" << id ) );
+ }
+ }
+ void run() {
+ client_->ensureIndex( ns_, BSON( "a" << 1 ) );
+ }
+ string ns_;
+ };
+
+ class String {
+ public:
+ String() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i ) {
+ stringstream ss;
+ ss << i;
+ client_->insert( ns_.c_str(), BSON( "a" << ss.str() ) );
+ }
+ }
+ void run() {
+ client_->ensureIndex( ns_, BSON( "a" << 1 ) );
+ }
+ string ns_;
+ };
+
+ class Object {
+ public:
+ Object() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i ) {
+ client_->insert( ns_.c_str(), BSON( "a" << BSON( "a" << i ) ) );
+ }
+ }
+ void run() {
+ client_->ensureIndex( ns_, BSON( "a" << 1 ) );
+ }
+ string ns_;
+ };
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite( "index" ) {}
+ void setupTests() {
+ add< Int >();
+ add< ObjectId >();
+ add< String >();
+ add< Object >();
+ }
+ } all;
+
+} // namespace Index
+
+namespace QueryTests {
+
+ class NoMatch {
+ public:
+ NoMatch() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i ) );
+ }
+ void run() {
+ client_->findOne( ns_.c_str(), QUERY( "_id" << 100000 ) );
+ }
+ string ns_;
+ };
+
+ class NoMatchIndex {
+ public:
+ NoMatchIndex() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i ) );
+ }
+ void run() {
+ client_->findOne( ns_.c_str(),
+ QUERY( "a" << "b" ).hint( BSON( "_id" << 1 ) ) );
+ }
+ string ns_;
+ };
+
+ class NoMatchLong {
+ public:
+ NoMatchLong() : ns_( testNs( this ) ) {
+ const char *names = "aaaaaaaaaa";
+ for( int i = 0; i < 100000; ++i ) {
+ BSONObjBuilder b;
+ for( int j = 0; j < 10; ++j )
+ b << ( names + j ) << i;
+ client_->insert( ns_.c_str(), b.obj() );
+ }
+ }
+ void run() {
+ client_->findOne( ns_.c_str(), QUERY( "a" << 100000 ) );
+ }
+ string ns_;
+ };
+
+ class SortOrdered {
+ public:
+ SortOrdered() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 50000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << i ) );
+ }
+ void run() {
+ auto_ptr< DBClientCursor > c =
+ client_->query( ns_.c_str(), Query( BSONObj() ).sort( BSON( "_id" << 1 ) ) );
+ int i = 0;
+ for( ; c->more(); c->nextSafe(), ++i );
+ ASSERT_EQUALS( 50000, i );
+ }
+ string ns_;
+ };
+
+ class SortReverse {
+ public:
+ SortReverse() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 50000; ++i )
+ client_->insert( ns_.c_str(), BSON( "_id" << ( 50000 - 1 - i ) ) );
+ }
+ void run() {
+ auto_ptr< DBClientCursor > c =
+ client_->query( ns_.c_str(), Query( BSONObj() ).sort( BSON( "_id" << 1 ) ) );
+ int i = 0;
+ for( ; c->more(); c->nextSafe(), ++i );
+ ASSERT_EQUALS( 50000, i );
+ }
+ string ns_;
+ };
+
+ class GetMore {
+ public:
+ GetMore() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "a" << i ) );
+ c_ = client_->query( ns_.c_str(), Query() );
+ }
+ void run() {
+ int i = 0;
+ for( ; c_->more(); c_->nextSafe(), ++i );
+ ASSERT_EQUALS( 100000, i );
+ }
+ string ns_;
+ auto_ptr< DBClientCursor > c_;
+ };
+
+ class GetMoreIndex {
+ public:
+ GetMoreIndex() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_.c_str(), BSON( "a" << i ) );
+ client_->ensureIndex( ns_, BSON( "a" << 1 ) );
+ c_ = client_->query( ns_.c_str(), QUERY( "a" << GT << -1 ).hint( BSON( "a" << 1 ) ) );
+ }
+ void run() {
+ int i = 0;
+ for( ; c_->more(); c_->nextSafe(), ++i );
+ ASSERT_EQUALS( 100000, i );
+ }
+ string ns_;
+ auto_ptr< DBClientCursor > c_;
+ };
+
+ class GetMoreKeyMatchHelps {
+ public:
+ GetMoreKeyMatchHelps() : ns_( testNs( this ) ) {
+ for( int i = 0; i < 1000000; ++i )
+ client_->insert( ns_.c_str(), BSON( "a" << i << "b" << i % 10 << "c" << "d" ) );
+ client_->ensureIndex( ns_, BSON( "a" << 1 << "b" << 1 ) );
+ c_ = client_->query( ns_.c_str(), QUERY( "a" << GT << -1 << "b" << 0 ).hint( BSON( "a" << 1 << "b" << 1 ) ) );
+ }
+ void run() {
+ int i = 0;
+ for( ; c_->more(); c_->nextSafe(), ++i );
+ ASSERT_EQUALS( 100000, i );
+ }
+ string ns_;
+ auto_ptr< DBClientCursor > c_;
+ };
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite( "query" ) {}
+ void setupTests() {
+ add< NoMatch >();
+ add< NoMatchIndex >();
+ add< NoMatchLong >();
+ add< SortOrdered >();
+ add< SortReverse >();
+ add< GetMore >();
+ add< GetMoreIndex >();
+ add< GetMoreKeyMatchHelps >();
+ }
+ } all;
+
+} // namespace QueryTests
+
+namespace Count {
+
+ class Count {
+ public:
+ Count() : ns_( testNs( this ) ) {
+ BSONObj obj = BSON( "a" << 1 );
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_, obj );
+ }
+ void run() {
+ ASSERT_EQUALS( 100000U, client_->count( ns_, BSON( "a" << 1 ) ) );
+ }
+ string ns_;
+ };
+
+ class CountIndex {
+ public:
+ CountIndex() : ns_( testNs( this ) ) {
+ BSONObj obj = BSON( "a" << 1 );
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_, obj );
+ client_->ensureIndex( ns_, obj );
+ }
+ void run() {
+ // 'simple' match does not work for numbers
+ ASSERT_EQUALS( 100000U, client_->count( ns_, BSON( "a" << 1 ) ) );
+ }
+ string ns_;
+ };
+
+ class CountSimpleIndex {
+ public:
+ CountSimpleIndex() : ns_( testNs( this ) ) {
+ BSONObj obj = BSON( "a" << "b" );
+ for( int i = 0; i < 100000; ++i )
+ client_->insert( ns_, obj );
+ client_->ensureIndex( ns_, obj );
+ }
+ void run() {
+ ASSERT_EQUALS( 100000U, client_->count( ns_, BSON( "a" << "b" ) ) );
+ }
+ string ns_;
+ };
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite( "count" ) {}
+ void setupTests() {
+ add< Count >();
+ add< CountIndex >();
+ add< CountSimpleIndex >();
+ }
+ } all;
+
+} // namespace Count
+
+namespace Plan {
+
+ class Hint {
+ public:
+ Hint() : ns_( testNs( this ) ) {
+ const char *names = "aaaaaaaaa";
+ for( int i = 0; i < 9; ++i ) {
+ client_->resetIndexCache();
+ client_->ensureIndex( ns_.c_str(), BSON( ( names + i ) << 1 ), false, names + i );
+ }
+ lk_.reset( new dblock );
+ Client::Context ctx( ns_ );
+ hint_ = BSON( "hint" << BSON( "a" << 1 ) );
+ hintElt_ = hint_.firstElement();
+ }
+ void run() {
+ for( int i = 0; i < 10000; ++i )
+ MultiPlanScanner s( ns_.c_str(), BSONObj(), BSONObj(), &hintElt_ );
+ }
+ string ns_;
+ auto_ptr< dblock > lk_;
+ BSONObj hint_;
+ BSONElement hintElt_;
+ };
+
+ class Sort {
+ public:
+ Sort() : ns_( testNs( this ) ) {
+ const char *names = "aaaaaaaaaa";
+ for( int i = 0; i < 10; ++i ) {
+ client_->resetIndexCache();
+ client_->ensureIndex( ns_.c_str(), BSON( ( names + i ) << 1 ), false, names + i );
+ }
+ lk_.reset( new dblock );
+ }
+ void run() {
+ Client::Context ctx( ns_ );
+ for( int i = 0; i < 10000; ++i )
+ MultiPlanScanner s( ns_.c_str(), BSONObj(), BSON( "a" << 1 ) );
+ }
+ string ns_;
+ auto_ptr< dblock > lk_;
+ };
+
+ class Query {
+ public:
+ Query() : ns_( testNs( this ) ) {
+ const char *names = "aaaaaaaaaa";
+ for( int i = 0; i < 10; ++i ) {
+ client_->resetIndexCache();
+ client_->ensureIndex( ns_.c_str(), BSON( ( names + i ) << 1 ), false, names + i );
+ }
+ lk_.reset( new dblock );
+ }
+ void run() {
+ Client::Context ctx( ns_.c_str() );
+ for( int i = 0; i < 10000; ++i )
+ MultiPlanScanner s( ns_.c_str(), BSON( "a" << 1 ), BSONObj() );
+ }
+ string ns_;
+ auto_ptr< dblock > lk_;
+ };
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite("plan" ) {}
+ void setupTests() {
+ add< Hint >();
+ add< Sort >();
+ add< Query >();
+ }
+ } all;
+} // namespace Plan
+
+namespace Misc {
+ class TimeMicros64 {
+ public:
+ void run() {
+ int iterations = 1000*1000;
+ while(iterations--){
+ curTimeMicros64();
+ }
+ }
+ };
+
+ class JSTime {
+ public:
+ void run() {
+ int iterations = 1000*1000;
+ while(iterations--){
+ jsTime();
+ }
+ }
+ };
+
+ class All : public RunnerSuite {
+ public:
+ All() : RunnerSuite("misc") {}
+ void setupTests() {
+ add< TimeMicros64 >();
+ add< JSTime >();
+ }
+ } all;
+}
+
+int main( int argc, char **argv ) {
+ logLevel = -1;
+ client_ = new DBDirectClient();
+
+ return Suite::run(argc, argv, "/data/db/perftest");
+}
+
diff --git a/src/mongo/dbtests/perftests.cpp b/src/mongo/dbtests/perftests.cpp
new file mode 100644
index 00000000000..284e3991f15
--- /dev/null
+++ b/src/mongo/dbtests/perftests.cpp
@@ -0,0 +1,1029 @@
+/** @file perftests.cpp.cpp : unit tests relating to performance
+
+ The idea herein is tests that run fast and can be part of the normal CI suite. So no tests herein that take
+ a long time to run. Obviously we need those too, but they will be separate.
+
+ These tests use DBDirectClient; they are a bit white-boxish.
+*/
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include <fstream>
+#include "../db/ops/query.h"
+#include "../db/db.h"
+#include "../db/instance.h"
+#include "../db/json.h"
+#include "../db/lasterror.h"
+#include "../db/ops/update.h"
+#include "../db/taskqueue.h"
+#include "../util/timer.h"
+#include "dbtests.h"
+#include "../db/dur_stats.h"
+#include "../util/checksum.h"
+#include "../util/version.h"
+#include "../db/key.h"
+#include "../util/compress.h"
+
+using namespace bson;
+
+namespace mongo {
+ namespace regression {
+ extern unsigned perfHist;
+ }
+}
+
+namespace PerfTests {
+
+ const bool profiling = false;
+
+ typedef DBDirectClient DBClientType;
+ //typedef DBClientConnection DBClientType;
+
+ class ClientBase {
+ public:
+ // NOTE: Not bothering to backup the old error record.
+ ClientBase() {
+ //_client.connect("localhost");
+ mongo::lastError.reset( new LastError() );
+ }
+ virtual ~ClientBase() {
+ //mongo::lastError.release();
+ }
+ protected:
+ static void insert( const char *ns, BSONObj o ) {
+ _client.insert( ns, o );
+ }
+ static void update( const char *ns, BSONObj q, BSONObj o, bool upsert = 0 ) {
+ _client.update( ns, Query( q ), o, upsert );
+ }
+ static bool error() {
+ return !_client.getPrevError().getField( "err" ).isNull();
+ }
+ DBClientBase &client() const { return _client; }
+ private:
+ static DBClientType _client;
+ };
+ DBClientType ClientBase::_client;
+
+ // todo: use a couple threads. not a very good test yet.
+ class TaskQueueTest {
+ static int tot;
+ struct V {
+ int val;
+ static void go(const V &v) { tot += v.val; }
+ };
+ public:
+ void run() {
+ tot = 0;
+ TaskQueue<V> d;
+ int x = 0;
+ for( int i = 0; i < 100; i++ ) {
+ if( i % 30 == 0 )
+ d.invoke();
+
+ x += i;
+ writelock lk;
+ V v;
+ v.val = i;
+ d.defer(v);
+ }
+ d.invoke();
+ assert( x == tot );
+ }
+ };
+ int TaskQueueTest::tot;
+
+ class B : public ClientBase {
+ string _ns;
+ protected:
+ const char *ns() { return _ns.c_str(); }
+
+ // anything you want to do before being timed
+ virtual void prep() { }
+
+ virtual void timed() = 0;
+
+ // optional 2nd test phase to be timed separately
+ // return name of it
+ virtual string timed2(DBClientBase&) { return ""; }
+
+ virtual void post() { }
+
+ virtual string name() = 0;
+
+ // how long to run test. 0 is a sentinel which means just run the timed() method once and time it.
+ virtual int howLongMillis() { return profiling ? 60000 : 5000; }
+
+ /* override if your test output doesn't need that */
+ virtual bool showDurStats() { return true; }
+
+ static boost::shared_ptr<DBClientConnection> conn;
+ static string _perfhostname;
+ static unsigned once;
+
+ public:
+ /* if you want recording of the timings, place the password for the perf database
+ in ./../settings.py:
+ pstatspassword="<pwd>"
+ */
+ void connect() {
+ if( once )
+ return;
+ ++once;
+
+ // no writing to perf db if _DEBUG
+ DEV return;
+
+ const char *fn = "../../settings.py";
+ if( !exists(fn) ) {
+ if( exists("settings.py") )
+ fn = "settings.py";
+ else {
+ cout << "no ../../settings.py or ./settings.py file found. will not write perf stats to pstats db." << endl;
+ cout << "it is recommended this be enabled even on dev boxes" << endl;
+ return;
+ }
+ }
+
+ try {
+ if( conn == 0 ) {
+ MemoryMappedFile f;
+ const char *p = (const char *) f.mapWithOptions(fn, MongoFile::READONLY);
+ string pwd;
+
+ {
+ const char *q = str::after(p, "pstatspassword=\"");
+ if( *q == 0 ) {
+ cout << "info perftests.cpp: no pstatspassword= in settings.py" << endl;
+ return;
+ }
+ else {
+ pwd = str::before(q, '\"');
+ }
+ }
+
+ boost::shared_ptr<DBClientConnection> c(new DBClientConnection(false, 0, 60));
+ string err;
+ if( c->connect("perfdb.10gen.cc", err) ) {
+ if( !c->auth("perf", "perf", pwd, err) ) {
+ cout << "info: authentication with stats db failed: " << err << endl;
+ assert(false);
+ }
+ conn = c;
+
+ // override the hostname with the buildbot hostname, if present
+ ifstream hostf( "../../info/host" );
+ if ( hostf.good() ) {
+ char buf[1024];
+ hostf.getline(buf, sizeof(buf));
+ _perfhostname = buf;
+ }
+ else {
+ _perfhostname = getHostName();
+ }
+ }
+ else {
+ cout << err << " (to log perfstats)" << endl;
+ }
+ }
+ }
+ catch(...) { }
+ }
+
+ virtual unsigned batchSize() { return 50; }
+
+ void say(unsigned long long n, int ms, string s) {
+ unsigned long long rps = n*1000/ms;
+ cout << "stats " << setw(33) << left << s << ' ' << right << setw(9) << rps << ' ' << right << setw(5) << ms << "ms ";
+ if( showDurStats() )
+ cout << dur::stats.curr->_asCSV();
+ cout << endl;
+
+ connect();
+
+ if( conn && !conn->isFailed() ) {
+ const char *ns = "perf.pstats";
+ if( perfHist ) {
+ static bool needver = true;
+ try {
+ // try to report rps from last time */
+ Query q;
+ {
+ BSONObjBuilder b;
+ b.append("host",_perfhostname).append("test",s).append("dur",cmdLine.dur);
+ DEV { b.append("info.DEBUG",true); }
+ else b.appendNull("info.DEBUG");
+ if( sizeof(int*) == 4 )
+ b.append("info.bits", 32);
+ else
+ b.appendNull("info.bits");
+ q = Query(b.obj()).sort("when",-1);
+ }
+ BSONObj fields = BSON( "rps" << 1 << "info" << 1 );
+ vector<BSONObj> v;
+ conn->findN(v, ns, q, perfHist, 0, &fields);
+ for( vector<BSONObj>::iterator i = v.begin(); i != v.end(); i++ ) {
+ BSONObj o = *i;
+ double lastrps = o["rps"].Number();
+ if( lastrps ) {
+ cout << "stats " << setw(33) << right << "new/old:" << ' ' << setw(9);
+ cout << fixed << setprecision(2) << rps / lastrps;
+ if( needver ) {
+ cout << " " << o.getFieldDotted("info.git").toString();
+ }
+ cout << '\n';
+ }
+ }
+ } catch(...) { }
+ cout.flush();
+ needver = false;
+ }
+ {
+ bob b;
+ b.append("host", _perfhostname);
+ b.appendTimeT("when", time(0));
+ b.append("test", s);
+ b.append("rps", (int) rps);
+ b.append("millis", ms);
+ b.appendBool("dur", cmdLine.dur);
+ if( showDurStats() && cmdLine.dur )
+ b.append("durStats", dur::stats.curr->_asObj());
+ {
+ bob inf;
+ inf.append("version", versionString);
+ if( sizeof(int*) == 4 ) inf.append("bits", 32);
+ DEV inf.append("DEBUG", true);
+#if defined(_WIN32)
+ inf.append("os", "win");
+#endif
+ inf.append("git", gitVersion());
+ inf.append("boost", BOOST_VERSION);
+ b.append("info", inf.obj());
+ }
+ BSONObj o = b.obj();
+ //cout << "inserting " << o.toString() << endl;
+ try {
+ conn->insert(ns, o);
+ }
+ catch ( std::exception& e ) {
+ warning() << "couldn't save perf results: " << e.what() << endl;
+ }
+ }
+ }
+ }
+
+ virtual bool testThreaded() { return false; }
+
+ unsigned long long n;
+
+ void run() {
+ _ns = string("perftest.") + name();
+ client().dropCollection(ns());
+
+ prep();
+
+ int hlm = howLongMillis();
+ DEV {
+ // don't run very long with _DEBUG - not very meaningful anyway on that build
+ hlm = min(hlm, 500);
+ }
+
+ dur::stats._intervalMicros = 0; // no auto rotate
+ dur::stats.curr->reset();
+ mongo::Timer t;
+ n = 0;
+ const unsigned Batch = batchSize();
+
+ if( hlm == 0 ) {
+ // means just do once
+ timed();
+ }
+ else {
+ do {
+ unsigned i;
+ for( i = 0; i < Batch; i++ )
+ timed();
+ n += i;
+ } while( t.micros() < (unsigned) hlm * 1000 );
+ }
+
+ client().getLastError(); // block until all ops are finished
+ int ms = t.millis();
+
+ say(n, ms, name());
+
+ post();
+
+ string test2name = timed2(client());
+ {
+ if( test2name.size() != 0 ) {
+ dur::stats.curr->reset();
+ mongo::Timer t;
+ unsigned long long n = 0;
+ while( 1 ) {
+ unsigned i;
+ for( i = 0; i < Batch; i++ )
+ timed2(client());
+ n += i;
+ if( t.millis() > hlm )
+ break;
+ }
+ int ms = t.millis();
+ say(n, ms, test2name);
+ }
+ }
+
+ if( testThreaded() ) {
+ cout << "testThreaded" << endl;
+ mongo::Timer t;
+ launchThreads(8);
+ //cout << "threaded done " << t.millis() << "ms" << endl;
+ //cout << n * 1000 / t.millis() << " per second" << endl;
+ say(n, t.millis(), test2name+"-threaded");
+
+ }
+ }
+
+ void thread() {
+ DBClientType c;
+ Client::initThreadIfNotAlready("perftestthr");
+ for( unsigned long long i = 0; i < n/8; i++ ) {
+ timed2(c);
+ }
+ cc().shutdown();
+ }
+
+ void launchThreads(int remaining) {
+ if (!remaining)
+ return;
+ boost::thread athread(boost::bind(&B::thread, this));
+ launchThreads(remaining - 1);
+ athread.join();
+ }
+ };
+
+ boost::shared_ptr<DBClientConnection> B::conn;
+ string B::_perfhostname;
+ unsigned B::once;
+
+ unsigned dontOptimizeOutHopefully;
+
+ class NonDurTest : public B {
+ public:
+ virtual int howLongMillis() { return 3000; }
+ virtual bool showDurStats() { return false; }
+ };
+
+ class BSONIter : public NonDurTest {
+ public:
+ int n;
+ bo b, sub;
+ string name() { return "BSONIter"; }
+ BSONIter() {
+ n = 0;
+ bo sub = bob().appendTimeT("t", time(0)).appendBool("abool", true).appendBinData("somebin", 3, BinDataGeneral, "abc").appendNull("anullone").obj();
+ b = BSON( "_id" << OID() << "x" << 3 << "yaaaaaa" << 3.00009 << "zz" << 1 << "q" << false << "obj" << sub << "zzzzzzz" << "a string a string" );
+ }
+ void timed() {
+ for( bo::iterator i = b.begin(); i.more(); )
+ if( i.next().fieldName() )
+ n++;
+ for( bo::iterator i = sub.begin(); i.more(); )
+ if( i.next().fieldName() )
+ n++;
+ }
+ };
+
+ class BSONGetFields1 : public NonDurTest {
+ public:
+ int n;
+ bo b, sub;
+ string name() { return "BSONGetFields1By1"; }
+ BSONGetFields1() {
+ n = 0;
+ bo sub = bob().appendTimeT("t", time(0)).appendBool("abool", true).appendBinData("somebin", 3, BinDataGeneral, "abc").appendNull("anullone").obj();
+ b = BSON( "_id" << OID() << "x" << 3 << "yaaaaaa" << 3.00009 << "zz" << 1 << "q" << false << "obj" << sub << "zzzzzzz" << "a string a string" );
+ }
+ void timed() {
+ if( b["x"].eoo() )
+ n++;
+ if( b["q"].eoo() )
+ n++;
+ if( b["zzz"].eoo() )
+ n++;
+ }
+ };
+
+ class BSONGetFields2 : public BSONGetFields1 {
+ public:
+ string name() { return "BSONGetFields"; }
+ void timed() {
+ static const char *names[] = { "x", "q", "zzz" };
+ BSONElement elements[3];
+ b.getFields(3, names, elements);
+ if( elements[0].eoo() )
+ n++;
+ if( elements[1].eoo() )
+ n++;
+ if( elements[2].eoo() )
+ n++;
+ }
+ };
+
+ class KeyTest : public B {
+ public:
+ KeyV1Owned a,b,c;
+ string name() { return "Key-woequal"; }
+ virtual int howLongMillis() { return 3000; }
+ KeyTest() :
+ a(BSON("a"<<1<<"b"<<3.0<<"c"<<"qqq")),
+ b(BSON("a"<<1<<"b"<<3.0<<"c"<<"qqq")),
+ c(BSON("a"<<1<<"b"<<3.0<<"c"<<"qqqb"))
+ {}
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ assert( a.woEqual(b) );
+ assert( !a.woEqual(c) );
+ }
+ };
+
+ unsigned long long aaa;
+
+ class Timer : public B {
+ public:
+ string name() { return "Timer"; }
+ virtual int howLongMillis() { return 1000; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ mongo::Timer t;
+ aaa += t.millis();
+ }
+ };
+
+ class Sleep0Ms : public B {
+ public:
+ string name() { return "Sleep0Ms"; }
+ virtual int howLongMillis() { return 400; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ sleepmillis(0);
+ mongo::Timer t;
+ aaa++;
+ }
+ };
+
+ RWLock lk("testrw");
+ SimpleMutex m("simptst");
+ mongo::mutex mtest("mtest");
+ SpinLock s;
+
+ class mutexspeed : public B {
+ public:
+ string name() { return "mutex"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ mongo::mutex::scoped_lock lk(mtest);
+ }
+ };
+ class simplemutexspeed : public B {
+ public:
+ string name() { return "simplemutex"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ SimpleMutex::scoped_lock lk(m);
+ }
+ };
+ class spinlockspeed : public B {
+ public:
+ string name() { return "spinlock"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ mongo::scoped_spinlock lk(s);
+ }
+ };
+ int cas;
+ class casspeed : public B {
+ public:
+ string name() { return "compareandswap"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4
+#define RUNCOMPARESWAP 1
+ __sync_bool_compare_and_swap(&cas, 0, 0);
+#endif
+ }
+ };
+ class rlock : public B {
+ public:
+ string name() { return "rlock"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ lk.lock_shared();
+ lk.unlock_shared();
+ }
+ };
+ class wlock : public B {
+ public:
+ string name() { return "wlock"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ lk.lock();
+ lk.unlock();
+ }
+ };
+
+#if 0
+ class ulock : public B {
+ public:
+ string name() { return "ulock"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ lk.lockAsUpgradable();
+ lk.unlockFromUpgradable();
+ }
+ };
+#endif
+
+ class CTM : public B {
+ public:
+ CTM() : last(0), delts(0), n(0) { }
+ string name() { return "curTimeMillis64"; }
+ virtual int howLongMillis() { return 500; }
+ virtual bool showDurStats() { return false; }
+ unsigned long long last;
+ unsigned long long delts;
+ unsigned n;
+ void timed() {
+ unsigned long long x = curTimeMillis64();
+ aaa += x;
+ if( last ) {
+ unsigned long long delt = x-last;
+ if( delt ) {
+ delts += delt;
+ n++;
+ }
+ }
+ last = x;
+ }
+ void post() {
+ // we need to know if timing is highly ungranular - that could be relevant in some places
+ if( n )
+ cout << " avg timer granularity: " << ((double)delts)/n << "ms " << endl;
+ }
+ };
+
+ class Bldr : public B {
+ public:
+ int n;
+ string name() { return "BufBuilder"; }
+ Bldr() {
+ }
+ virtual int howLongMillis() { return 3000; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ BufBuilder b;
+ b.appendNum(3);
+ b.appendUChar(' ');
+ b.appendStr("abcd");
+ n += b.len();
+ }
+ };
+
+ class StkBldr : public B {
+ public:
+ virtual int howLongMillis() { return 3000; }
+ int n;
+ string name() { return "StackBufBuilder"; }
+ virtual bool showDurStats() { return false; }
+ void timed() {
+ StackBufBuilder b;
+ b.appendNum(3);
+ b.appendUChar(' ');
+ b.appendStr("abcd");
+ n += b.len();
+ }
+ };
+
+ // if a test is this fast, it was optimized out
+ class Dummy : public B {
+ public:
+ Dummy() { }
+ virtual int howLongMillis() { return 3000; }
+ string name() { return "dummy"; }
+ void timed() {
+ dontOptimizeOutHopefully++;
+ }
+ virtual bool showDurStats() { return false; }
+ };
+
+ // test thread local speed
+#if defined(_WIN32)
+ __declspec( thread ) int x;
+ class TLS2 : public B {
+ public:
+ virtual int howLongMillis() { return 3000; }
+ string name() { return "thread-local-storage2"; }
+ void timed() {
+ if( x )
+ dontOptimizeOutHopefully++;
+ }
+ virtual bool showDurStats() { return false; }
+ };
+#endif
+
+ // test thread local speed
+ class TLS : public B {
+ public:
+ virtual int howLongMillis() { return 3000; }
+ string name() { return "thread-local-storage"; }
+ void timed() {
+ if( &cc() )
+ dontOptimizeOutHopefully++;
+ }
+ virtual bool showDurStats() { return false; }
+ };
+
+ bool dummy1 = false;
+
+ class TestException : public DBException {
+ public:
+ TestException() : DBException("testexception",3) { }
+ };
+
+ void foo_throws() {
+ if( dontOptimizeOutHopefully ) {
+ throw TestException();
+ }
+ log() << "hmmm" << endl;
+ }
+
+ class Throw : public B {
+ public:
+ virtual int howLongMillis() { return 2000; }
+ string name() { return "throw"; }
+ void timed() {
+ try {
+ foo_throws();
+ dontOptimizeOutHopefully += 2;
+ }
+ catch(DBException& e) {
+ e.getCode();
+ dontOptimizeOutHopefully++;
+ }
+ }
+ virtual bool showDurStats() { return false; }
+ };
+
+ class New128 : public B {
+ public:
+ virtual int howLongMillis() { return 2000; }
+ string name() { return "new128"; }
+ void timed() {
+ char *p = new char[128];
+ if( dontOptimizeOutHopefully++ > 0 )
+ delete p;
+ }
+ virtual bool showDurStats() { return false; }
+ };
+
+ class New8 : public B {
+ public:
+ virtual int howLongMillis() { return 2000; }
+ string name() { return "new8"; }
+ void timed() {
+ char *p = new char[8];
+ if( dontOptimizeOutHopefully++ > 0 )
+ delete p;
+ }
+ virtual bool showDurStats() { return false; }
+ };
+
+ class Compress : public B {
+ public:
+ const unsigned sz;
+ void *p;
+ Compress() : sz(1024*1024*100+3) { }
+ virtual unsigned batchSize() { return 1; }
+ string name() { return "compress"; }
+ virtual bool showDurStats() { return false; }
+ virtual int howLongMillis() { return 4000; }
+ void prep() {
+ p = malloc(sz);
+ // this isn't a fair test as it is mostly rands but we just want a rough perf check
+ static int last;
+ for (unsigned i = 0; i<sz; i++) {
+ int r = rand();
+ if( (r & 0x300) == 0x300 )
+ r = last;
+ ((char*)p)[i] = r;
+ last = r;
+ }
+ }
+ size_t last;
+ string res;
+ void timed() {
+ mongo::Timer t;
+ string out;
+ size_t len = compress((const char *) p, sz, &out);
+ bool ok = uncompress(out.c_str(), out.size(), &res);
+ ASSERT(ok);
+ static unsigned once;
+ if( once++ == 0 )
+ cout << "compress round trip " << sz/(1024.0*1024) / (t.millis()/1000.0) << "MB/sec\n";
+ //cout << len / (1024.0/1024) << " compressed" << endl;
+ (void)len; //fix unused error while above line is commented out
+ }
+ void post() {
+ ASSERT( memcmp(res.c_str(), p, sz) == 0 );
+ free(p);
+ }
+ };
+
+ // test speed of checksum method
+ class ChecksumTest : public B {
+ public:
+ const unsigned sz;
+ ChecksumTest() : sz(1024*1024*100+3) { }
+ string name() { return "checksum"; }
+ virtual int howLongMillis() { return 2000; }
+ virtual bool showDurStats() { return false; }
+ virtual unsigned batchSize() { return 1; }
+
+ void *p;
+
+ void prep() {
+ {
+ // the checksum code assumes 'standard' rollover on addition overflows. let's check that:
+ unsigned long long x = 0xffffffffffffffffULL;
+ ASSERT( x+2 == 1 );
+ }
+
+ p = malloc(sz);
+ for (unsigned i = 0; i<sz; i++)
+ ((char*)p)[i] = rand();
+ }
+
+ Checksum last;
+
+ void timed() {
+ static int i;
+ Checksum c;
+ c.gen(p, sz);
+ if( i == 0 )
+ last = c;
+ else if( i == 1 ) {
+ ASSERT( c == last );
+ }
+ }
+ void post() {
+ {
+ mongo::Checksum c;
+ c.gen(p, sz-1);
+ ASSERT( c != last );
+ ((char *&)p)[0]++; // check same data, different order, doesn't give same checksum
+ ((char *&)p)[1]--;
+ c.gen(p, sz);
+ ASSERT( c != last );
+ ((char *&)p)[1]++; // check same data, different order, doesn't give same checksum (different longwords case)
+ ((char *&)p)[8]--;
+ c.gen(p, sz);
+ ASSERT( c != last );
+ }
+ free(p);
+ }
+ };
+
+ class InsertDup : public B {
+ const BSONObj o;
+ public:
+ InsertDup() : o( BSON("_id" << 1) ) { } // dup keys
+ string name() {
+ return "insert-duplicate-_ids";
+ }
+ void prep() {
+ client().insert( ns(), o );
+ }
+ void timed() {
+ client().insert( ns(), o );
+ }
+ void post() {
+ assert( client().count(ns()) == 1 );
+ }
+ };
+
+ class Insert1 : public B {
+ const BSONObj x;
+ OID oid;
+ BSONObj query;
+ public:
+ virtual int howLongMillis() { return 30000; }
+ Insert1() : x( BSON("x" << 99) ) {
+ oid.init();
+ query = BSON("_id" << oid);
+ i = 0;
+ }
+ string name() { return "insert-simple"; }
+ unsigned i;
+ void timed() {
+ BSONObj o = BSON( "_id" << i++ << "x" << 99 );
+ client().insert( ns(), o );
+ //client().insert( ns(), x );
+ }
+ virtual bool testThreaded() { return true; }
+ string timed2(DBClientBase& c) {
+ Query q = QUERY( "_id" << (unsigned) Security::getNonce() % i );
+ c.findOne(ns(), q);
+ //client().findOne(ns(), query);
+ return "findOne_by_id";
+ }
+ void post() {
+#if !defined(_DEBUG)
+ assert( client().count(ns()) > 50 );
+#endif
+ }
+ };
+
+ class InsertBig : public B {
+ BSONObj x;
+ virtual int howLongMillis() {
+ if( sizeof(void*) == 4 )
+ return 1000; // could exceed mmapping if run too long, as this function adds a lot fasta
+ return 5000;
+ }
+ public:
+ InsertBig() {
+ char buf[200000];
+ BSONObjBuilder b;
+ b.append("x", 99);
+ b.appendBinData("bin", 200000, (BinDataType) 129, buf);
+ x = b.obj();
+ }
+ string name() { return "insert-big"; }
+ void timed() {
+ client().insert( ns(), x );
+ }
+ };
+
+ class InsertRandom : public B {
+ public:
+ virtual int howLongMillis() { return profiling ? 30000 : 5000; }
+ string name() { return "random-inserts"; }
+ void prep() {
+ client().insert( ns(), BSONObj() );
+ client().ensureIndex(ns(), BSON("x"<<1));
+ }
+ void timed() {
+ int x = rand();
+ BSONObj y = BSON("x" << x << "y" << rand() << "z" << 33);
+ client().insert(ns(), y);
+ }
+ };
+
+ /** upserts about 32k records and then keeps updating them
+ 2 indexes
+ */
+ class Update1 : public B {
+ public:
+ static int rand() {
+ return std::rand() & 0x7fff;
+ }
+ virtual string name() { return "random-upserts"; }
+ void prep() {
+ client().insert( ns(), BSONObj() );
+ client().ensureIndex(ns(), BSON("x"<<1));
+ }
+ void timed() {
+ int x = rand();
+ BSONObj q = BSON("x" << x);
+ BSONObj y = BSON("x" << x << "y" << rand() << "z" << 33);
+ client().update(ns(), q, y, /*upsert*/true);
+ }
+
+ virtual string timed2(DBClientBase& c) {
+ static BSONObj I = BSON( "$inc" << BSON( "y" << 1 ) );
+
+ // test some $inc's
+
+ int x = rand();
+ BSONObj q = BSON("x" << x);
+ c.update(ns(), q, I);
+
+ return name()+"-inc";
+ }
+ };
+
+ template <typename T>
+ class MoreIndexes : public T {
+ public:
+ string name() { return T::name() + "-more-indexes"; }
+ void prep() {
+ T::prep();
+ this->client().ensureIndex(this->ns(), BSON("y"<<1));
+ this->client().ensureIndex(this->ns(), BSON("z"<<1));
+ }
+ };
+
+ void t() {
+ for( int i = 0; i < 20; i++ ) {
+ sleepmillis(21);
+ string fn = "/tmp/t1";
+ MongoMMF f;
+ unsigned long long len = 1 * 1024 * 1024;
+ assert( f.create(fn, len, /*sequential*/rand()%2==0) );
+ {
+ char *p = (char *) f.getView();
+ assert(p);
+ // write something to the private view as a test
+ strcpy(p, "hello");
+ }
+ if( cmdLine.dur ) {
+ char *w = (char *) f.view_write();
+ strcpy(w + 6, "world");
+ }
+ MongoFileFinder ff;
+ ASSERT( ff.findByPath(fn) );
+ }
+ }
+
+ class All : public Suite {
+ public:
+ All() : Suite( "perf" ) { }
+
+ Result * run( const string& filter ) {
+ boost::thread a(t);
+ Result * res = Suite::run(filter);
+ a.join();
+ return res;
+ }
+
+ void setupTests() {
+ cout
+ << "stats test rps------ time-- "
+ << dur::stats.curr->_CSVHeader() << endl;
+ if( profiling ) {
+ add< New8 >();
+ add< New128 >();
+ }
+ else {
+ add< Dummy >();
+ add< ChecksumTest >();
+ add< Compress >();
+ add< TLS >();
+#if defined(_WIN32)
+ add< TLS2 >();
+#endif
+ add< New8 >();
+ add< New128 >();
+ add< Throw >();
+ add< Timer >();
+ add< Sleep0Ms >();
+ add< rlock >();
+ add< wlock >();
+ //add< ulock >();
+ add< mutexspeed >();
+ add< simplemutexspeed >();
+ add< spinlockspeed >();
+#ifdef RUNCOMPARESWAP
+ add< casspeed >();
+#endif
+ add< CTM >();
+ add< KeyTest >();
+ add< Bldr >();
+ add< StkBldr >();
+ add< BSONIter >();
+ add< BSONGetFields1 >();
+ add< BSONGetFields2 >();
+ add< TaskQueueTest >();
+ add< InsertDup >();
+ add< Insert1 >();
+ add< InsertRandom >();
+ add< MoreIndexes<InsertRandom> >();
+ add< Update1 >();
+ add< MoreIndexes<Update1> >();
+ add< InsertBig >();
+ }
+ }
+ } myall;
+}
diff --git a/src/mongo/dbtests/queryoptimizercursortests.cpp b/src/mongo/dbtests/queryoptimizercursortests.cpp
new file mode 100644
index 00000000000..2d5590db3b7
--- /dev/null
+++ b/src/mongo/dbtests/queryoptimizercursortests.cpp
@@ -0,0 +1,2521 @@
+// queryoptimizertests.cpp : query optimizer unit tests
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/queryoptimizer.h"
+#include "../db/queryoptimizercursor.h"
+#include "../db/instance.h"
+#include "../db/ops/delete.h"
+#include "dbtests.h"
+
+namespace mongo {
+ void __forceLinkGeoPlugin();
+ shared_ptr<Cursor> newQueryOptimizerCursor( const char *ns, const BSONObj &query, const BSONObj &order = BSONObj(), bool requireIndex = false );
+} // namespace mongo
+
+namespace QueryOptimizerCursorTests {
+
+ void dropCollection( const char *ns ) {
+ string errmsg;
+ BSONObjBuilder result;
+ dropCollection( ns, errmsg, result );
+ }
+
+ using boost::shared_ptr;
+
+ class CachedMatchCounterCount {
+ public:
+ void run() {
+ long long aggregateNscanned;
+ CachedMatchCounter c( aggregateNscanned, 0 );
+ ASSERT_EQUALS( 0, c.count() );
+ ASSERT_EQUALS( 0, c.cumulativeCount() );
+
+ c.resetMatch();
+ ASSERT( !c.knowMatch() );
+
+ c.setMatch( false );
+ ASSERT( c.knowMatch() );
+
+ c.countMatch( DiskLoc() );
+ ASSERT_EQUALS( 0, c.count() );
+ ASSERT_EQUALS( 0, c.cumulativeCount() );
+
+ c.resetMatch();
+ ASSERT( !c.knowMatch() );
+
+ c.setMatch( true );
+ ASSERT( c.knowMatch() );
+
+ c.countMatch( DiskLoc() );
+ ASSERT_EQUALS( 1, c.count() );
+ ASSERT_EQUALS( 1, c.cumulativeCount() );
+
+ // Don't count the same match twice, without checking the document location.
+ c.countMatch( DiskLoc( 1, 1 ) );
+ ASSERT_EQUALS( 1, c.count() );
+ ASSERT_EQUALS( 1, c.cumulativeCount() );
+
+ // Reset and count another match.
+ c.resetMatch();
+ c.setMatch( true );
+ c.countMatch( DiskLoc( 1, 1 ) );
+ ASSERT_EQUALS( 2, c.count() );
+ ASSERT_EQUALS( 2, c.cumulativeCount() );
+ }
+ };
+
+ class CachedMatchCounterAccumulate {
+ public:
+ void run() {
+ long long aggregateNscanned;
+ CachedMatchCounter c( aggregateNscanned, 10 );
+ ASSERT_EQUALS( 0, c.count() );
+ ASSERT_EQUALS( 10, c.cumulativeCount() );
+
+ c.setMatch( true );
+ c.countMatch( DiskLoc() );
+ ASSERT_EQUALS( 1, c.count() );
+ ASSERT_EQUALS( 11, c.cumulativeCount() );
+ }
+ };
+
+ class CachedMatchCounterDedup {
+ public:
+ void run() {
+ long long aggregateNscanned;
+ CachedMatchCounter c( aggregateNscanned, 0 );
+
+ c.setCheckDups( true );
+ c.setMatch( true );
+ c.countMatch( DiskLoc() );
+ ASSERT_EQUALS( 1, c.count() );
+
+ c.resetMatch();
+ c.setMatch( true );
+ c.countMatch( DiskLoc() );
+ ASSERT_EQUALS( 1, c.count() );
+ }
+ };
+
+ class CachedMatchCounterNscanned {
+ public:
+ void run() {
+ long long aggregateNscanned = 5;
+ CachedMatchCounter c( aggregateNscanned, 0 );
+ ASSERT_EQUALS( 0, c.nscanned() );
+ ASSERT_EQUALS( 5, c.aggregateNscanned() );
+
+ c.updateNscanned( 4 );
+ ASSERT_EQUALS( 4, c.nscanned() );
+ ASSERT_EQUALS( 9, c.aggregateNscanned() );
+ }
+ };
+
+ class SmallDupSetUpgrade {
+ public:
+ void run() {
+ SmallDupSet d;
+ for( int i = 0; i < 100; ++i ) {
+ ASSERT( !d.getsetdup( DiskLoc( 0, i ) ) );
+ for( int j = 0; j <= i; ++j ) {
+ ASSERT( d.getdup( DiskLoc( 0, j ) ) );
+ }
+ }
+ }
+ };
+
+ class SmallDupSetUpgradeRead {
+ public:
+ void run() {
+ SmallDupSet d;
+ d.getsetdup( DiskLoc( 0, 0 ) );
+ for( int i = 0; i < 550; ++i ) {
+ ASSERT( d.getdup( DiskLoc( 0, 0 ) ) );
+ }
+ ASSERT( d.getsetdup( DiskLoc( 0, 0 ) ) );
+ }
+ };
+
+ class SmallDupSetUpgradeWrite {
+ public:
+ void run() {
+ SmallDupSet d;
+ for( int i = 0; i < 550; ++i ) {
+ ASSERT( !d.getsetdup( DiskLoc( 0, i ) ) );
+ }
+ for( int i = 0; i < 550; ++i ) {
+ ASSERT( d.getsetdup( DiskLoc( 0, i ) ) );
+ }
+ }
+ };
+
+ class Base {
+ public:
+ Base() {
+ dblock lk;
+ Client::Context ctx( ns() );
+ string err;
+ userCreateNS( ns(), BSONObj(), err, false );
+ dropCollection( ns() );
+ }
+ ~Base() {
+ cc().curop()->reset();
+ }
+ protected:
+ DBDirectClient _cli;
+ static const char *ns() { return "unittests.QueryOptimizerTests"; }
+ void setQueryOptimizerCursor( const BSONObj &query, const BSONObj &order = BSONObj() ) {
+ setQueryOptimizerCursorWithoutAdvancing( query, order );
+ if ( ok() && !mayReturnCurrent() ) {
+ advance();
+ }
+ }
+ void setQueryOptimizerCursorWithoutAdvancing( const BSONObj &query, const BSONObj &order = BSONObj() ) {
+ _c = newQueryOptimizerCursor( ns(), query, order, false );
+ }
+ bool ok() const { return _c->ok(); }
+ /** Handles matching and deduping. */
+ bool advance() {
+ while( _c->advance() && !mayReturnCurrent() );
+ return ok();
+ }
+ int itcount() {
+ int ret = 0;
+ while( ok() ) {
+ ++ret;
+ advance();
+ }
+ return ret;
+ }
+ BSONObj current() const { return _c->current(); }
+ DiskLoc currLoc() const { return _c->currLoc(); }
+ void prepareToTouchEarlierIterate() { _c->prepareToTouchEarlierIterate(); }
+ void recoverFromTouchingEarlierIterate() { _c->recoverFromTouchingEarlierIterate(); }
+ bool mayReturnCurrent() {
+// return _c->currentMatches() && !_c->getsetdup( _c->currLoc() );
+ return ( !_c->matcher() || _c->matcher()->matchesCurrent( _c.get() ) ) && !_c->getsetdup( _c->currLoc() );
+ }
+ bool prepareToYield() const { return _c->prepareToYield(); }
+ void recoverFromYield() {
+ _c->recoverFromYield();
+ if ( ok() && !mayReturnCurrent() ) {
+ advance();
+ }
+ }
+ shared_ptr<Cursor> c() { return _c; }
+ long long nscanned() const { return _c->nscanned(); }
+ private:
+ shared_ptr<Cursor> _c;
+ };
+
+ /** No results for empty collection. */
+ class Empty : public Base {
+ public:
+ void run() {
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSONObj() );
+ ASSERT( !c->ok() );
+ ASSERT_THROWS( c->_current(), AssertionException );
+ ASSERT_THROWS( c->current(), AssertionException );
+ ASSERT( c->currLoc().isNull() );
+ ASSERT( !c->advance() );
+ ASSERT_THROWS( c->currKey(), AssertionException );
+ ASSERT_THROWS( c->getsetdup( DiskLoc() ), AssertionException );
+ ASSERT_THROWS( c->isMultiKey(), AssertionException );
+ ASSERT_THROWS( c->matcher(), AssertionException );
+ }
+ };
+
+ /** Simple table scan. */
+ class Unindexed : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSONObj() );
+ ASSERT_EQUALS( 2, itcount() );
+ }
+ };
+
+ /** Basic test with two indexes and deduping requirement. */
+ class Basic : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 2 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 2 << "a" << 1 ), current() );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ };
+
+ class NoMatch : public Base {
+ public:
+ void run() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 5 << LT << 4 << "a" << GT << 0 ) );
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Order of results indicates that interleaving is occurring. */
+ class Interleaved : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 3 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 2 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 3 << "a" << 1 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 2 << "a" << 2 ), current() );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Some values on each index do not match. */
+ class NotMatch : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 10 ) );
+ _cli.insert( ns(), BSON( "_id" << 10 << "a" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 11 << "a" << 12 ) );
+ _cli.insert( ns(), BSON( "_id" << 12 << "a" << 11 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 5 << "a" << GT << 5 ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( BSON( "_id" << 11 << "a" << 12 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 12 << "a" << 11 ), current() );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ };
+
+ /** After the first 101 matches for a plan, we stop interleaving the plans. */
+ class StopInterleaving : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 101; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+ for( int i = 101; i < 200; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << (301-i) ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << -1 << "a" << GT << -1 ) );
+ for( int i = 0; i < 200; ++i ) {
+ ASSERT( ok() );
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ advance();
+ }
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Test correct deduping with the takeover cursor. */
+ class TakeoverWithDup : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 101; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+ _cli.insert( ns(), BSON( "_id" << 500 << "a" << BSON_ARRAY( 0 << 300 ) ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << -1 << "a" << GT << -1 ) );
+ ASSERT_EQUALS( 102, itcount() );
+ }
+ };
+
+ /** Test usage of matcher with takeover cursor. */
+ class TakeoverWithNonMatches : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 101; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+ _cli.insert( ns(), BSON( "_id" << 101 << "a" << 600 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << -1 << "a" << LT << 500 ) );
+ ASSERT_EQUALS( 101, itcount() );
+ }
+ };
+
+ /** Check deduping of dups within just the takeover cursor. */
+ class TakeoverWithTakeoverDup : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 101; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i*2 << "a" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << i*2+1 << "a" << 1 ) );
+ }
+ _cli.insert( ns(), BSON( "_id" << 202 << "a" << BSON_ARRAY( 2 << 3 ) ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << -1 << "a" << GT << 0) );
+ ASSERT_EQUALS( 102, itcount() );
+ }
+ };
+
+ /** Basic test with $or query. */
+ class BasicOr : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 0 ) << BSON( "a" << 1 ) ) ) );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 1 ), current() );
+ ASSERT( !advance() );
+ }
+ };
+
+ /** $or first clause empty. */
+ class OrFirstClauseEmpty : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << -1 ) << BSON( "a" << 1 ) ) ) );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 1 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 1 ), current() );
+ ASSERT( !advance() );
+ }
+ };
+
+ /** $or second clause empty. */
+ class OrSecondClauseEmpty : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 0 ) << BSON( "_id" << -1 ) << BSON( "a" << 1 ) ) ) );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 1 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 1 ), current() );
+ ASSERT( !advance() );
+ }
+ };
+
+ /** $or multiple clauses empty empty. */
+ class OrMultipleClausesEmpty : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 2 ) << BSON( "_id" << 4 ) << BSON( "_id" << 0 ) << BSON( "_id" << -1 ) << BSON( "_id" << 6 ) << BSON( "a" << 1 ) << BSON( "_id" << 9 ) ) ) );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 1 ), current() );
+ ASSERT( advance() );
+ ASSERT_EQUALS( BSON( "_id" << 1 << "a" << 1 ), current() );
+ ASSERT( !advance() );
+ }
+ };
+
+ /** Check that takeover occurs at proper match count with $or clauses */
+ class TakeoverCountOr : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 60; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 0 ) );
+ }
+ for( int i = 60; i < 120; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 1 ) );
+ }
+ for( int i = 120; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << (200-i) ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "a" << 0 ) << BSON( "a" << 1 ) << BSON( "_id" << GTE << 120 << "a" << GT << 1 ) ) ) );
+ for( int i = 0; i < 120; ++i ) {
+ ASSERT( ok() );
+ advance();
+ }
+ // Expect to be scanning on _id index only.
+ for( int i = 120; i < 150; ++i ) {
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ advance();
+ }
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Takeover just at end of clause. */
+ class TakeoverEndOfOrClause : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 102; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i ) );
+ }
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << LT << 101 ) << BSON( "_id" << 101 ) ) ) );
+ for( int i = 0; i < 102; ++i ) {
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ advance();
+ }
+ ASSERT( !ok() );
+ }
+ };
+
+ class TakeoverBeforeEndOfOrClause : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 101; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i ) );
+ }
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << LT << 100 ) << BSON( "_id" << 100 ) ) ) );
+ for( int i = 0; i < 101; ++i ) {
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ advance();
+ }
+ ASSERT( !ok() );
+ }
+ };
+
+ class TakeoverAfterEndOfOrClause : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 103; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i ) );
+ }
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << LT << 102 ) << BSON( "_id" << 102 ) ) ) );
+ for( int i = 0; i < 103; ++i ) {
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ advance();
+ }
+ ASSERT( !ok() );
+ }
+ };
+
+ /** Test matching and deduping done manually by cursor client. */
+ class ManualMatchingDeduping : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 10 ) );
+ _cli.insert( ns(), BSON( "_id" << 10 << "a" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 11 << "a" << 12 ) );
+ _cli.insert( ns(), BSON( "_id" << 12 << "a" << 11 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr< Cursor > c = newQueryOptimizerCursor( ns(), BSON( "_id" << GT << 5 << "a" << GT << 5 ) );
+ ASSERT( c->ok() );
+
+ // _id 10 {_id:1}
+ ASSERT_EQUALS( 10, c->current().getIntField( "_id" ) );
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+
+ // _id 0 {a:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+
+ // _id 0 {$natural:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+
+ // _id 11 {_id:1}
+ ASSERT_EQUALS( BSON( "_id" << 11 << "a" << 12 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+
+ // _id 12 {a:1}
+ ASSERT_EQUALS( BSON( "_id" << 12 << "a" << 11 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+
+ // _id 10 {$natural:1}
+ ASSERT_EQUALS( 10, c->current().getIntField( "_id" ) );
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+
+ // _id 12 {_id:1}
+ ASSERT_EQUALS( BSON( "_id" << 12 << "a" << 11 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+
+ // _id 11 {a:1}
+ ASSERT_EQUALS( BSON( "_id" << 11 << "a" << 12 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+
+ // _id 11 {$natural:1}
+ ASSERT_EQUALS( 11, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+
+ // {_id:1} scan is complete.
+ ASSERT( !c->advance() );
+ ASSERT( !c->ok() );
+
+ // Scan the results again - this time the winning plan has been
+ // recorded.
+ c = newQueryOptimizerCursor( ns(), BSON( "_id" << GT << 5 << "a" << GT << 5 ) );
+ ASSERT( c->ok() );
+
+ // _id 10 {_id:1}
+ ASSERT_EQUALS( 10, c->current().getIntField( "_id" ) );
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+
+ // _id 11 {_id:1}
+ ASSERT_EQUALS( BSON( "_id" << 11 << "a" << 12 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+
+ // _id 12 {_id:1}
+ ASSERT_EQUALS( BSON( "_id" << 12 << "a" << 11 ), c->current() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+
+ // {_id:1} scan complete
+ ASSERT( !c->advance() );
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Curr key must be correct for currLoc for correct matching. */
+ class ManualMatchingUsingCurrKey : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << "a" ) );
+ _cli.insert( ns(), BSON( "_id" << "b" ) );
+ _cli.insert( ns(), BSON( "_id" << "ba" ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr< Cursor > c = newQueryOptimizerCursor( ns(), fromjson( "{_id:/a/}" ) );
+ ASSERT( c->ok() );
+ // "a"
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( c->advance() );
+ ASSERT( c->ok() );
+
+ // "b"
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+ ASSERT( c->ok() );
+
+ // "ba"
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ /** Test matching and deduping done manually by cursor client. */
+ class ManualMatchingDedupingTakeover : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 0 ) );
+ }
+ _cli.insert( ns(), BSON( "_id" << 300 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr< Cursor > c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "_id" << LT << 300 ) << BSON( "a" << 1 ) ) ) );
+ for( int i = 0; i < 151; ++i ) {
+ ASSERT( c->ok() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ c->advance();
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Test single key matching bounds. */
+ class Singlekey : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "a" << "10" ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr< Cursor > c = newQueryOptimizerCursor( ns(), BSON( "a" << GT << 1 << LT << 5 ) );
+ // Two sided bounds work.
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Test multi key matching bounds. */
+ class Multikey : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "a" << BSON_ARRAY( 1 << 10 ) ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "a" << GT << 5 << LT << 3 ) );
+ // Multi key bounds work.
+ ASSERT( ok() );
+ }
+ };
+
+ /** Add other plans when the recorded one is doing more poorly than expected. */
+ class AddOtherPlans : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 0 << "b" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 << "b" << 0 ) );
+ for( int i = 100; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 100 << "b" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "a" << 0 << "b" << 0 ) );
+
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 << "b" << 0 ), c->current() );
+ ASSERT_EQUALS( BSON( "a" << 1 ), c->indexKeyPattern() );
+
+ ASSERT( c->advance() );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 << "b" << 0 ), c->current() );
+ ASSERT_EQUALS( BSON( "b" << 1 ), c->indexKeyPattern() );
+
+ ASSERT( c->advance() );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 << "b" << 0 ), c->current() );
+ // Unindexed plan
+ ASSERT_EQUALS( BSONObj(), c->indexKeyPattern() );
+ ASSERT( !c->advance() );
+
+ c = newQueryOptimizerCursor( ns(), BSON( "a" << 100 << "b" << 149 ) );
+ // Try {a:1}, which was successful previously.
+ for( int i = 0; i < 12; ++i ) {
+ ASSERT( 149 != c->current().getIntField( "b" ) );
+ ASSERT( c->advance() );
+ }
+ bool sawB1Index = false;
+ do {
+ if ( c->indexKeyPattern() == BSON( "b" << 1 ) ) {
+ ASSERT_EQUALS( 149, c->current().getIntField( "b" ) );
+ // We should try the {b:1} index and only see one result from it.
+ ASSERT( !sawB1Index );
+ sawB1Index = true;
+ }
+ } while ( c->advance() );
+ ASSERT( sawB1Index );
+ }
+ };
+
+ /** Add other plans when the recorded one is doing more poorly than expected, with deletion. */
+ class AddOtherPlansDelete : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 0 << "b" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 << "b" << 0 ) );
+ for( int i = 100; i < 120; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 100 << "b" << i ) );
+ }
+ for( int i = 199; i >= 150; --i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 100 << "b" << 150 ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "a" << 0 << "b" << 0 ) );
+
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 << "b" << 0 ), c->current() );
+ ASSERT_EQUALS( BSON( "a" << 1 ), c->indexKeyPattern() );
+
+ ASSERT( c->advance() );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 << "b" << 0 ), c->current() );
+ ASSERT_EQUALS( BSON( "b" << 1 ), c->indexKeyPattern() );
+
+ ASSERT( c->advance() );
+ ASSERT_EQUALS( BSON( "_id" << 0 << "a" << 0 << "b" << 0 ), c->current() );
+ // Unindexed plan
+ ASSERT_EQUALS( BSONObj(), c->indexKeyPattern() );
+ ASSERT( !c->advance() );
+
+ c = newQueryOptimizerCursor( ns(), BSON( "a" << 100 << "b" << 150 ) );
+ // Try {a:1}, which was successful previously.
+ for( int i = 0; i < 12; ++i ) {
+ ASSERT( 150 != c->current().getIntField( "b" ) );
+ ASSERT_EQUALS( BSON( "a" << 1 ), c->indexKeyPattern() );
+ ASSERT( c->advance() );
+ }
+ // Now try {b:1} plan.
+ ASSERT_EQUALS( BSON( "b" << 1 ), c->indexKeyPattern() );
+ ASSERT_EQUALS( 150, c->current().getIntField( "b" ) );
+ ASSERT( c->currentMatches() );
+ int id = c->current().getIntField( "_id" );
+ c->advance();
+ c->prepareToTouchEarlierIterate();
+ _cli.remove( ns(), BSON( "_id" << id ) );
+ c->recoverFromTouchingEarlierIterate();
+ int count = 1;
+ while( c->ok() ) {
+ if ( c->currentMatches() ) {
+ ++count;
+ int id = c->current().getIntField( "_id" );
+ c->advance();
+ c->prepareToTouchEarlierIterate();
+ _cli.remove( ns(), BSON( "_id" << id ) );
+ c->recoverFromTouchingEarlierIterate();
+ }
+ else {
+ c->advance();
+ }
+ }
+ ASSERT_EQUALS( 50, count );
+ }
+ };
+
+ /**
+ * Add other plans when the recorded one is doing more poorly than expected, with deletion before
+ * and after adding the additional plans.
+ */
+ class AddOtherPlansContinuousDelete : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << 0 << "b" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 << "b" << 0 ) );
+ for( int i = 100; i < 400; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i << "b" << ( 499 - i ) ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "a" << GTE << -1 << LTE << 0 << "b" << GTE << -1 << LTE << 0 ) );
+ while( c->advance() );
+ // {a:1} plan should be recorded now.
+
+ c = newQueryOptimizerCursor( ns(), BSON( "a" << GTE << 100 << LTE << 400 << "b" << GTE << 100 << LTE << 400 ) );
+ int count = 0;
+ while( c->ok() ) {
+ if ( c->currentMatches() ) {
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ++count;
+ int id = c->current().getIntField( "_id" );
+ c->advance();
+ c->prepareToTouchEarlierIterate();
+ _cli.remove( ns(), BSON( "_id" << id ) );
+ c->recoverFromTouchingEarlierIterate();
+ } else {
+ c->advance();
+ }
+ }
+ ASSERT_EQUALS( 300, count );
+ ASSERT_EQUALS( 2U, _cli.count( ns(), BSONObj() ) );
+ }
+ };
+
+ /** Check $or clause range elimination. */
+ class OrRangeElimination : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "_id" << GT << 0 ) << BSON( "_id" << 1 ) ) ) );
+ ASSERT( c->ok() );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ /** Check $or match deduping - in takeover cursor. */
+ class OrDedup : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "_id" << LT << 140 ) << BSON( "_id" << 145 ) << BSON( "a" << 145 ) ) ) );
+
+ while( c->current().getIntField( "_id" ) < 140 ) {
+ ASSERT( c->advance() );
+ }
+ // Match from second $or clause.
+ ASSERT_EQUALS( 145, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+ // Match from third $or clause.
+ ASSERT_EQUALS( 145, c->current().getIntField( "_id" ) );
+ // $or deduping is handled by the matcher.
+ ASSERT( !c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ /** Standard dups with a multikey cursor. */
+ class EarlyDups : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "a" << BSON_ARRAY( 0 << 1 << 200 ) ) );
+ for( int i = 2; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "a" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "a" << GT << -1 ) );
+ ASSERT_EQUALS( 149, itcount() );
+ }
+ };
+
+ /** Pop or clause in takeover cursor. */
+ class OrPopInTakeover : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i ) );
+ }
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "_id" << LTE << 147 ) << BSON( "_id" << 148 ) << BSON( "_id" << 149 ) ) ) );
+ for( int i = 0; i < 150; ++i ) {
+ ASSERT( c->ok() );
+ ASSERT_EQUALS( i, c->current().getIntField( "_id" ) );
+ c->advance();
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Or clause iteration abandoned once full collection scan is performed. */
+ class OrCollectionScanAbort : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 0 << "a" << BSON_ARRAY( 1 << 2 << 3 << 4 << 5 ) << "b" << 4 ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << BSON_ARRAY( 6 << 7 << 8 << 9 << 10 ) << "b" << 4 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "a" << LT << 6 << "b" << 4 ) << BSON( "a" << GTE << 6 << "b" << 4 ) ) ) );
+
+ ASSERT( c->ok() );
+
+ // _id 0 on {a:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ c->advance();
+
+ // _id 0 on {$natural:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ c->advance();
+
+ // _id 0 on {a:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ c->advance();
+
+ // _id 1 on {$natural:1}
+ ASSERT_EQUALS( 1, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ c->advance();
+
+ // _id 0 on {a:1}
+ ASSERT_EQUALS( 0, c->current().getIntField( "_id" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ c->advance();
+
+ // {$natural:1} finished
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Yield cursor and delete current entry, then continue iteration. */
+ class YieldNoOp : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ ASSERT( prepareToYield() );
+ recoverFromYield();
+ }
+ }
+ };
+
+ /** Yield cursor and delete current entry. */
+ class YieldDelete : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << 1 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( !ok() );
+ ASSERT( !advance() );
+ }
+ }
+ };
+
+ /** Yield cursor and delete current entry, then continue iteration. */
+ class YieldDeleteContinue : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield cursor and delete current entry, then continue iteration. */
+ class YieldDeleteContinueFurther : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 3 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 3, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield and update current. */
+ class YieldUpdate : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "a" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.update( ns(), BSON( "a" << 1 ), BSON( "$set" << BSON( "a" << 3 ) ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "a" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield and drop collection. */
+ class YieldDrop : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.dropCollection( ns() );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield and drop collection with $or query. */
+ class YieldDropOr : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 1 ) << BSON( "_id" << 2 ) ) ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.dropCollection( ns() );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT_THROWS( recoverFromYield(), MsgAssertionException );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield and remove document with $or query. */
+ class YieldRemoveOr : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 1 ) << BSON( "_id" << 2 ) ) ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ /** Yield and overwrite current in capped collection. */
+ class YieldCappedOverwrite : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 1000, true );
+ _cli.insert( ns(), BSON( "x" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "x" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "x" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ int x = 2;
+ while( _cli.count( ns(), BSON( "x" << 1 ) ) > 0 ) {
+ _cli.insert( ns(), BSON( "x" << x++ ) );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT_THROWS( recoverFromYield(), MsgAssertionException );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yield and drop unrelated index - see SERVER-2454. */
+ class YieldDropIndex : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << 1 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.dropIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with multiple plans active. */
+ class YieldMultiplePlansNoOp : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with advance and multiple plans active. */
+ class YieldMultiplePlansAdvanceNoOp : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 3 << "a" << 3 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ advance();
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 3, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with delete and multiple plans active. */
+ class YieldMultiplePlansDelete : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 3 << "a" << 4 ) );
+ _cli.insert( ns(), BSON( "_id" << 4 << "a" << 3 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ advance();
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 2 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c()->recoverFromYield();
+ ASSERT( ok() );
+ // index {a:1} active during yield
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 3, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 4, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with delete, multiple plans active, and $or clause. */
+ class YieldMultiplePlansDeleteOr : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 1 << "a" << 2 ) << BSON( "_id" << 2 << "a" << 1 ) ) ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c()->recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with delete, multiple plans active with advancement to the second, and $or clause. */
+ class YieldMultiplePlansDeleteOrAdvance : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 2 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "$or" << BSON_ARRAY( BSON( "_id" << 1 << "a" << 2 ) << BSON( "_id" << 2 << "a" << 1 ) ) ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ c()->advance();
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c()->recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ ASSERT( !ok() );
+ }
+ }
+ };
+
+ /** Yielding with multiple plans and capped overwrite. */
+ class YieldMultiplePlansCappedOverwrite : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 1000, true );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "_id" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ int i = 1;
+ while( _cli.count( ns(), BSON( "_id" << 1 ) ) > 0 ) {
+ ++i;
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ // {$natural:1} plan does not recover, {_id:1} plan does.
+ ASSERT( 1 < current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ /**
+ * Yielding with multiple plans and capped overwrite with unrecoverable cursor
+ * active at time of yield.
+ */
+ class YieldMultiplePlansCappedOverwriteManual : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 1000, true );
+ _cli.insert( ns(), BSON( "a" << 1 << "b" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ shared_ptr<Cursor> c;
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c = newQueryOptimizerCursor( ns(), BSON( "a" << GT << 0 << "b" << GT << 0 ) );
+ ASSERT_EQUALS( 1, c->current().getIntField( "a" ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ c->advance();
+ ASSERT_EQUALS( 1, c->current().getIntField( "a" ) );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ ASSERT( c->prepareToYield() );
+ }
+
+ int i = 1;
+ while( _cli.count( ns(), BSON( "a" << 1 ) ) > 0 ) {
+ ++i;
+ _cli.insert( ns(), BSON( "a" << i << "b" << i ) );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c->recoverFromYield();
+ ASSERT( c->ok() );
+ // {$natural:1} plan does not recover, {_id:1} plan does.
+ ASSERT( 1 < c->current().getIntField( "a" ) );
+ }
+ }
+ };
+
+ /**
+ * Yielding with multiple plans and capped overwrite with unrecoverable cursor
+ * inctive at time of yield.
+ */
+ class YieldMultiplePlansCappedOverwriteManual2 : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 1000, true );
+ _cli.insert( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "_id" << 1 ) );
+
+ shared_ptr<Cursor> c;
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c = newQueryOptimizerCursor( ns(), BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT_EQUALS( 1, c->current().getIntField( "_id" ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ ASSERT( c->prepareToYield() );
+ }
+
+ int n = 1;
+ while( _cli.count( ns(), BSON( "_id" << 1 ) ) > 0 ) {
+ ++n;
+ _cli.insert( ns(), BSON( "_id" << n << "a" << n ) );
+ }
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ c->recoverFromYield();
+ ASSERT( c->ok() );
+ // {$natural:1} plan does not recover, {_id:1} plan does.
+ ASSERT( 1 < c->current().getIntField( "_id" ) );
+ ASSERT( !c->getsetdup( c->currLoc() ) );
+ int i = c->current().getIntField( "_id" );
+ ASSERT( c->advance() );
+ ASSERT( c->getsetdup( c->currLoc() ) );
+ while( i < n ) {
+ ASSERT( c->advance() );
+ ++i;
+ ASSERT_EQUALS( i, c->current().getIntField( "_id" ) );
+ }
+ }
+ }
+ };
+
+ /** Yield with takeover cursor. */
+ class YieldTakeover : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GTE << 0 << "a" << GTE << 0 ) );
+ for( int i = 0; i < 120; ++i ) {
+ ASSERT( advance() );
+ }
+ ASSERT( ok() );
+ ASSERT_EQUALS( 120, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 120 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 121, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 122, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ /** Yield with BacicCursor takeover cursor. */
+ class YieldTakeoverBasic : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 150; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << BSON_ARRAY( i << i+1 ) ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ auto_ptr<ClientCursor> cc;
+ auto_ptr<ClientCursor::YieldData> data( new ClientCursor::YieldData() );
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "b" << NE << 0 << "a" << GTE << 0 ) );
+ cc.reset( new ClientCursor( QueryOption_NoCursorTimeout, c(), ns() ) );
+ for( int i = 0; i < 120; ++i ) {
+ ASSERT( advance() );
+ }
+ ASSERT( ok() );
+ ASSERT_EQUALS( 120, current().getIntField( "_id" ) );
+ cc->prepareToYield( *data );
+ }
+ _cli.remove( ns(), BSON( "_id" << 120 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT( ClientCursor::recoverFromYield( *data ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( 121, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 122, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ /** Yield with advance of inactive cursor. */
+ class YieldInactiveCursorAdvance : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 10 - i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "a" << GT << 0 ) );
+ ASSERT( ok() );
+ ASSERT_EQUALS( 1, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 9, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 2, current().getIntField( "_id" ) );
+ ASSERT( prepareToYield() );
+ }
+
+ _cli.remove( ns(), BSON( "_id" << 9 ) );
+
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ recoverFromYield();
+ ASSERT( ok() );
+ ASSERT_EQUALS( 8, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 3, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 7, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ class OrderId : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i ) );
+ }
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSONObj(), BSON( "_id" << 1 ) );
+
+ for( int i = 0; i < 10; ++i, advance() ) {
+ ASSERT( ok() );
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ class OrderMultiIndex : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << 1 ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "_id" << 1 << "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GTE << 0 << "a" << GTE << 0 ), BSON( "_id" << 1 ) );
+
+ for( int i = 0; i < 10; ++i, advance() ) {
+ ASSERT( ok() );
+ ASSERT_EQUALS( i, current().getIntField( "_id" ) );
+ }
+ }
+ };
+
+ class OrderReject : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i % 5 ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "a" << GTE << 3 ), BSON( "_id" << 1 ) );
+
+ ASSERT( ok() );
+ ASSERT_EQUALS( 3, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 4, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 8, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 9, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ }
+ };
+
+ class OrderNatural : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 5 ) );
+ _cli.insert( ns(), BSON( "_id" << 4 ) );
+ _cli.insert( ns(), BSON( "_id" << 6 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 ), BSON( "$natural" << 1 ) );
+
+ ASSERT( ok() );
+ ASSERT_EQUALS( 5, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 4, current().getIntField( "_id" ) );
+ ASSERT( advance() );
+ ASSERT_EQUALS( 6, current().getIntField( "_id" ) );
+ ASSERT( !advance() );
+ }
+ };
+
+ class OrderUnindexed : public Base {
+ public:
+ void run() {
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT( !newQueryOptimizerCursor( ns(), BSONObj(), BSON( "a" << 1 ) ).get() );
+ }
+ };
+
+ class RecordedOrderInvalid : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "a" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 2 << "b" << 2 ) );
+ _cli.insert( ns(), BSON( "a" << 3 << "b" << 3 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+ ASSERT( _cli.query( ns(), QUERY( "a" << 2 ).sort( "b" ) )->more() );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "a" << 2 ), BSON( "b" << 1 ) );
+ // Check that we are scanning {b:1} not {a:1}.
+ for( int i = 0; i < 3; ++i ) {
+ ASSERT( c->ok() );
+ c->advance();
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ class KillOp : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "b" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ Client::ReadContext ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "b" << GT << 0 ) );
+ ASSERT( ok() );
+ cc().curop()->kill();
+ // First advance() call throws, subsequent calls just fail.
+ ASSERT_THROWS( advance(), MsgAssertionException );
+ ASSERT( !advance() );
+ }
+ };
+
+ class KillOpFirstClause : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "b" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ Client::ReadContext ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "_id" << GT << 0 ) << BSON( "b" << GT << 0 ) ) ) );
+ ASSERT( c->ok() );
+ cc().curop()->kill();
+ // First advance() call throws, subsequent calls just fail.
+ ASSERT_THROWS( c->advance(), MsgAssertionException );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class Nscanned : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 120; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "a" << i ) );
+ }
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "_id" << GTE << 0 << "a" << GTE << 0 ) );
+ ASSERT( c->ok() );
+ ASSERT_EQUALS( 2, c->nscanned() );
+ c->advance();
+ ASSERT( c->ok() );
+ ASSERT_EQUALS( 2, c->nscanned() );
+ c->advance();
+ for( int i = 3; i < 222; ++i ) {
+ ASSERT( c->ok() );
+ c->advance();
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /* Test 'touching earlier iterate' without doc modifications. */
+ class TouchEarlierIterate : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "b" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ Client::ReadContext ctx( ns() );
+ shared_ptr<Cursor> c = newQueryOptimizerCursor( ns(), BSON( "_id" << GT << 0 << "b" << GT << 0 ) );
+
+ ASSERT( c->ok() );
+ while( c->ok() ) {
+ DiskLoc loc = c->currLoc();
+ BSONObj obj = c->current();
+ c->prepareToTouchEarlierIterate();
+ c->recoverFromTouchingEarlierIterate();
+ ASSERT( loc == c->currLoc() );
+ ASSERT_EQUALS( obj, c->current() );
+ c->advance();
+ }
+ }
+ };
+
+ /* Test 'touching earlier iterate' with doc modifications. */
+ class TouchEarlierIterateDelete : public Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "_id" << 2 << "b" << 2 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ DiskLoc firstLoc;
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "b" << GT << 0 ) );
+ ASSERT( ok() );
+ firstLoc = currLoc();
+ ASSERT( c()->advance() );
+ prepareToTouchEarlierIterate();
+
+ _cli.remove( ns(), BSON( "_id" << 1 ), true );
+
+ recoverFromTouchingEarlierIterate();
+ ASSERT( ok() );
+ while( ok() ) {
+ ASSERT( firstLoc != currLoc() );
+ c()->advance();
+ }
+ }
+ };
+
+ /* Test 'touch earlier iterate' with several doc modifications. */
+ class TouchEarlierIterateDeleteMultiple : public Base {
+ public:
+ void run() {
+ for( int i = 1; i < 10; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "b" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ set<DiskLoc> deleted;
+ int id = 0;
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "b" << GT << 0 ) );
+ while( 1 ) {
+ if ( !ok() ) {
+ break;
+ }
+ ASSERT( deleted.count( currLoc() ) == 0 );
+ id = current()["_id"].Int();
+ deleted.insert( currLoc() );
+ c()->advance();
+ prepareToTouchEarlierIterate();
+
+ _cli.remove( ns(), BSON( "_id" << id ), true );
+
+ recoverFromTouchingEarlierIterate();
+ }
+ ASSERT_EQUALS( 9U, deleted.size() );
+ }
+ };
+
+ /* Test 'touch earlier iterate' with takeover. */
+ class TouchEarlierIterateTakeover : public Base {
+ public:
+ void run() {
+ for( int i = 1; i < 600; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "b" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ Client::ReadContext ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "b" << GT << 0 ) );
+
+ ASSERT( ok() );
+ int count = 1;
+ while( ok() ) {
+ DiskLoc loc = currLoc();
+ BSONObj obj = current();
+ prepareToTouchEarlierIterate();
+ recoverFromTouchingEarlierIterate();
+ ASSERT( loc == currLoc() );
+ ASSERT_EQUALS( obj, current() );
+ count += mayReturnCurrent();
+ c()->advance();
+ }
+ ASSERT_EQUALS( 599, count );
+ }
+ };
+
+ /* Test 'touch earlier iterate' with takeover and deletes. */
+ class TouchEarlierIterateTakeoverDeleteMultiple : public Base {
+ public:
+ void run() {
+ for( int i = 1; i < 600; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "b" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ set<DiskLoc> deleted;
+ int id = 0;
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursorWithoutAdvancing( BSON( "_id" << GT << 0 << "b" << GT << 0 ) );
+ while( 1 ) {
+ if ( !ok() ) {
+ break;
+ }
+ ASSERT( deleted.count( currLoc() ) == 0 );
+ id = current()["_id"].Int();
+ ASSERT( c()->currentMatches() );
+ ASSERT( !c()->getsetdup( currLoc() ) );
+ deleted.insert( currLoc() );
+ c()->advance();
+ prepareToTouchEarlierIterate();
+
+ _cli.remove( ns(), BSON( "_id" << id ), true );
+
+ recoverFromTouchingEarlierIterate();
+ }
+ ASSERT_EQUALS( 599U, deleted.size() );
+ }
+ };
+
+ /* Test 'touch earlier iterate' with undexed cursor takeover and deletes. */
+ class TouchEarlierIterateUnindexedTakeoverDeleteMultiple : public Base {
+ public:
+ void run() {
+ for( int i = 1; i < 600; ++i ) {
+ _cli.insert( ns(), BSON( "a" << BSON_ARRAY( i << i+1 ) << "b" << BSON_ARRAY( i << i+1 ) << "_id" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ set<DiskLoc> deleted;
+ int id = 0;
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursorWithoutAdvancing( BSON( "a" << GT << 0 << "b" << GT << 0 ) );
+ while( 1 ) {
+ if ( !ok() ) {
+ break;
+ }
+ ASSERT( deleted.count( currLoc() ) == 0 );
+ id = current()["_id"].Int();
+ ASSERT( c()->currentMatches() );
+ ASSERT( !c()->getsetdup( currLoc() ) );
+ deleted.insert( currLoc() );
+ c()->advance();
+ prepareToTouchEarlierIterate();
+
+ _cli.remove( ns(), BSON( "_id" << id ), true );
+
+ recoverFromTouchingEarlierIterate();
+ }
+ ASSERT_EQUALS( 599U, deleted.size() );
+ }
+ };
+
+ /* Test 'touch earlier iterate' with takeover and deletes, with multiple advances in a row. */
+ class TouchEarlierIterateTakeoverDeleteMultipleMultiAdvance : public Base {
+ public:
+ void run() {
+ for( int i = 1; i < 600; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "b" << i ) );
+ }
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+
+ set<DiskLoc> deleted;
+ int id = 0;
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "_id" << GT << 0 << "b" << GT << 0 ) );
+ while( 1 ) {
+ if ( !ok() ) {
+ break;
+ }
+ ASSERT( deleted.count( currLoc() ) == 0 );
+ id = current()["_id"].Int();
+ ASSERT( c()->currentMatches() );
+ deleted.insert( currLoc() );
+ advance();
+ prepareToTouchEarlierIterate();
+
+ _cli.remove( ns(), BSON( "_id" << id ), true );
+
+ recoverFromTouchingEarlierIterate();
+ }
+ ASSERT_EQUALS( 599U, deleted.size() );
+ }
+ };
+
+ /* Test yield recovery failure of component capped cursor. */
+ class InitialCappedWrapYieldRecoveryFailure : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 1000, true );
+ _cli.insert( ns(), BSON( "_id" << 1 << "x" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "x" << GT << 0 ) );
+ ASSERT_EQUALS( 1, current().getIntField( "x" ) );
+
+ ClientCursor::CleanupPointer p;
+ p.reset( new ClientCursor( QueryOption_NoCursorTimeout, c(), ns() ) );
+ ClientCursor::YieldData yieldData;
+ p->prepareToYield( yieldData );
+
+ int x = 2;
+ while( _cli.count( ns(), BSON( "x" << 1 ) ) > 0 ) {
+ _cli.insert( ns(), BSON( "_id" << x << "x" << x ) );
+ ++x;
+ }
+
+ // TODO - Might be preferable to return false rather than assert here.
+ ASSERT_THROWS( ClientCursor::recoverFromYield( yieldData ), AssertionException );
+ }
+ };
+
+ /* Test yield recovery failure of takeover capped cursor. */
+ class TakeoverCappedWrapYieldRecoveryFailure : public Base {
+ public:
+ void run() {
+ _cli.createCollection( ns(), 10000, true );
+ for( int i = 0; i < 300; ++i ) {
+ _cli.insert( ns(), BSON( "_id" << i << "x" << i ) );
+ }
+
+ ClientCursor::CleanupPointer p;
+ ClientCursor::YieldData yieldData;
+ {
+ dblock lk;
+ Client::Context ctx( ns() );
+ setQueryOptimizerCursor( BSON( "x" << GTE << 0 ) );
+ for( int i = 0; i < 299; ++i ) {
+ advance();
+ }
+ ASSERT_EQUALS( 299, current().getIntField( "x" ) );
+
+ p.reset( new ClientCursor( QueryOption_NoCursorTimeout, c(), ns() ) );
+ p->prepareToYield( yieldData );
+ }
+
+ int i = 300;
+ while( _cli.count( ns(), BSON( "x" << 299 ) ) > 0 ) {
+ _cli.insert( ns(), BSON( "_id" << i << "x" << i ) );
+ ++i;
+ }
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT( !ClientCursor::recoverFromYield( yieldData ) );
+ }
+ };
+
+ namespace GetCursor {
+
+ class Base : public QueryOptimizerCursorTests::Base {
+ public:
+ Base() {
+ // create collection
+ _cli.insert( ns(), BSON( "_id" << 5 ) );
+ }
+ virtual ~Base() {}
+ void run() {
+ dblock lk;
+ Client::Context ctx( ns() );
+ bool simpleEqualityMatch;
+ if ( expectException() ) {
+ ASSERT_THROWS( NamespaceDetailsTransient::getCursor( ns(), query(), order(), requireIndex(), &simpleEqualityMatch ), MsgAssertionException );
+ return;
+ }
+ shared_ptr<Cursor> c = NamespaceDetailsTransient::getCursor( ns(), query(), order(), requireIndex(), &simpleEqualityMatch );
+ ASSERT_EQUALS( expectSimpleEquality(), simpleEqualityMatch );
+ string type = c->toString().substr( 0, expectedType().length() );
+ ASSERT_EQUALS( expectedType(), type );
+ check( c );
+ }
+ protected:
+ virtual string expectedType() const { return "TESTDUMMY"; }
+ virtual bool expectException() const { return false; }
+ virtual bool expectSimpleEquality() const { return false; }
+ virtual BSONObj query() const { return BSONObj(); }
+ virtual BSONObj order() const { return BSONObj(); }
+ virtual bool requireIndex() const { return false; }
+ virtual void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT( !c->matcher() );
+ ASSERT_EQUALS( 5, c->current().getIntField( "_id" ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class NoConstraints : public Base {
+ string expectedType() const { return "BasicCursor"; }
+ };
+
+ class SimpleId : public Base {
+ public:
+ SimpleId() {
+ _cli.insert( ns(), BSON( "_id" << 0 ) );
+ _cli.insert( ns(), BSON( "_id" << 10 ) );
+ }
+ string expectedType() const { return "BtreeCursor _id_"; }
+ BSONObj query() const { return BSON( "_id" << 5 ); }
+ };
+
+ class OptimalIndex : public Base {
+ public:
+ OptimalIndex() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 5 ) );
+ _cli.insert( ns(), BSON( "a" << 6 ) );
+ }
+ string expectedType() const { return "BtreeCursor a_1"; }
+ BSONObj query() const { return BSON( "a" << GTE << 5 ); }
+ void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT( c->matcher() );
+ ASSERT_EQUALS( 5, c->current().getIntField( "a" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( c->advance() );
+ ASSERT_EQUALS( 6, c->current().getIntField( "a" ) );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class SimpleKeyMatch : public Base {
+ public:
+ SimpleKeyMatch() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.update( ns(), BSONObj(), BSON( "$set" << BSON( "a" << true ) ) );
+ }
+ string expectedType() const { return "BtreeCursor a_1"; }
+ bool expectSimpleEquality() const { return true; }
+ BSONObj query() const { return BSON( "a" << true ); }
+ virtual void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT_EQUALS( 5, c->current().getIntField( "_id" ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class Geo : public Base {
+ public:
+ Geo() {
+ _cli.insert( ns(), BSON( "_id" << 44 << "loc" << BSON_ARRAY( 44 << 45 ) ) );
+ _cli.ensureIndex( ns(), BSON( "loc" << "2d" ) );
+ }
+ string expectedType() const { return "GeoSearchCursor"; }
+ BSONObj query() const { return fromjson( "{ loc : { $near : [50,50] } }" ); }
+ void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT( c->matcher() );
+ ASSERT( c->matcher()->matchesCurrent( c.get() ) );
+ ASSERT_EQUALS( 44, c->current().getIntField( "_id" ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class OutOfOrder : public QueryOptimizerCursorTests::Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 5 ) );
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = NamespaceDetailsTransient::getCursor( ns(), BSONObj(), BSON( "b" << 1 ) );
+ ASSERT( !c );
+ }
+ };
+
+ class BestSavedOutOfOrder : public QueryOptimizerCursorTests::Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 5 << "b" << BSON_ARRAY( 1 << 2 << 3 << 4 << 5 ) ) );
+ _cli.insert( ns(), BSON( "_id" << 1 << "b" << 6 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+ // record {_id:1} index for this query
+ ASSERT( _cli.query( ns(), QUERY( "_id" << GT << 0 << "b" << GT << 0 ).sort( "b" ) )->more() );
+ dblock lk;
+ Client::Context ctx( ns() );
+ shared_ptr<Cursor> c = NamespaceDetailsTransient::getCursor( ns(), BSON( "_id" << GT << 0 << "b" << GT << 0 ), BSON( "b" << 1 ) );
+ // {_id:1} requires scan and order, so {b:1} must be chosen.
+ ASSERT( c );
+ ASSERT_EQUALS( 5, c->current().getIntField( "_id" ) );
+ }
+ };
+
+ /**
+ * If an optimal plan is a candidate, return a cursor for it rather than a QueryOptimizerCursor. Avoid
+ * caching optimal plans since simple cursors will not save a plan anyway (so in the most common case optimal
+ * plans won't be cached) and because this simplifies the implementation for selecting a simple cursor.
+ */
+ class BestSavedOptimal : public QueryOptimizerCursorTests::Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "_id" << 1 << "q" << 1 ) );
+ // {_id:1} index not recorded for these queries since it is an optimal index.
+ ASSERT( _cli.query( ns(), QUERY( "_id" << GT << 0 ) )->more() );
+ ASSERT( _cli.query( ns(), QUERY( "$or" << BSON_ARRAY( BSON( "_id" << GT << 0 ) ) ) )->more() );
+ dblock lk;
+ Client::Context ctx( ns() );
+ // Check that no plan was recorded for this query.
+ ASSERT( BSONObj().woCompare( NamespaceDetailsTransient::get_inlock( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "_id" << GT << 0 ), true ).pattern() ) ) == 0 );
+ shared_ptr<Cursor> c = NamespaceDetailsTransient::getCursor( ns(), BSON( "_id" << GT << 0 ) );
+ // No need for query optimizer cursor since the plan is optimal.
+ ASSERT_EQUALS( "BtreeCursor _id_", c->toString() );
+ }
+ };
+
+ /** If a no optimal plan is a candidate a QueryOptimizerCursor should be returned, even if plan has been recorded. */
+ class BestSavedNotOptimal : public QueryOptimizerCursorTests::Base {
+ public:
+ void run() {
+ _cli.insert( ns(), BSON( "_id" << 1 << "q" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "q" << 1 ) );
+ // Record {_id:1} index for this query
+ ASSERT( _cli.query( ns(), QUERY( "q" << 1 << "_id" << 1 ) )->more() );
+ dblock lk;
+ Client::Context ctx( ns() );
+ ASSERT( BSON( "_id" << 1 ).woCompare( NamespaceDetailsTransient::get_inlock( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "q" << 1 << "_id" << 1 ), true ).pattern() ) ) == 0 );
+ shared_ptr<Cursor> c = NamespaceDetailsTransient::getCursor( ns(), BSON( "q" << 1 << "_id" << 1 ) );
+ // Need query optimizer cursor since the cached plan is not optimal.
+ ASSERT_EQUALS( "QueryOptimizerCursor", c->toString() );
+ }
+ };
+
+ class MultiIndex : public Base {
+ public:
+ MultiIndex() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ }
+ string expectedType() const { return "QueryOptimizerCursor"; }
+ BSONObj query() const { return BSON( "_id" << GT << 0 << "a" << GT << 0 ); }
+ void check( const shared_ptr<Cursor> &c ) {}
+ };
+
+ class RequireIndexNoConstraints : public Base {
+ bool requireIndex() const { return true; }
+ bool expectException() const { return true; }
+ };
+
+ class RequireIndexSimpleId : public Base {
+ bool requireIndex() const { return true; }
+ string expectedType() const { return "BtreeCursor _id_"; }
+ BSONObj query() const { return BSON( "_id" << 5 ); }
+ };
+
+ class RequireIndexUnindexedQuery : public Base {
+ bool requireIndex() const { return true; }
+ bool expectException() const { return true; }
+ BSONObj query() const { return BSON( "a" << GTE << 5 ); }
+ };
+
+ class RequireIndexIndexedQuery : public Base {
+ public:
+ RequireIndexIndexedQuery() {
+ _cli.insert( ns(), BSON( "_id" << 6 << "a" << 6 << "c" << 4 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 << "b" << 1 << "c" << 1 ) );
+ }
+ string expectedType() const { return "QueryOptimizerCursor"; }
+ bool requireIndex() const { return true; }
+ BSONObj query() const { return BSON( "a" << GTE << 5 << "c" << 4 ); }
+ void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT( c->matcher() );
+ ASSERT_EQUALS( 6, c->current().getIntField( "_id" ) );
+ ASSERT( !c->advance() );
+ }
+ };
+
+ class RequireIndexSecondOrClauseIndexed : public Base {
+ public:
+ RequireIndexSecondOrClauseIndexed() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "b" << 1 ) );
+ }
+ bool requireIndex() const { return true; }
+ string expectedType() const { return "QueryOptimizerCursor"; }
+ BSONObj query() const { return fromjson( "{$or:[{a:1},{b:1}]}" ); }
+ void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT( c->matcher() );
+ ASSERT( c->advance() );
+ ASSERT( !c->advance() ); // 2 matches exactly
+ }
+ };
+
+ class RequireIndexSecondOrClauseUnindexed : public Base {
+ public:
+ RequireIndexSecondOrClauseUnindexed() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 1 ) );
+ }
+ bool requireIndex() const { return true; }
+ bool expectException() const { return true; }
+ BSONObj query() const { return fromjson( "{$or:[{a:1},{b:1}]}" ); }
+ };
+
+ class RequireIndexSecondOrClauseUnindexedUndetected : public Base {
+ public:
+ RequireIndexSecondOrClauseUnindexedUndetected() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "a" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "b" << 1 ) );
+ }
+ bool requireIndex() const { return true; }
+ string expectedType() const { return "QueryOptimizerCursor"; }
+ BSONObj query() const { return fromjson( "{$or:[{a:1},{b:1}]}" ); }
+ void check( const shared_ptr<Cursor> &c ) {
+ ASSERT( c->ok() );
+ ASSERT( c->matcher() );
+ // An unindexed cursor is required for the second clause, but is not allowed.
+ ASSERT_THROWS( c->advance(), MsgAssertionException );
+ }
+ };
+
+ } // namespace GetCursor
+
+ class All : public Suite {
+ public:
+ All() : Suite( "queryoptimizercursor" ) {}
+
+ void setupTests() {
+ __forceLinkGeoPlugin();
+ add<QueryOptimizerCursorTests::CachedMatchCounterCount>();
+ add<QueryOptimizerCursorTests::CachedMatchCounterAccumulate>();
+ add<QueryOptimizerCursorTests::CachedMatchCounterDedup>();
+ add<QueryOptimizerCursorTests::CachedMatchCounterNscanned>();
+ add<QueryOptimizerCursorTests::SmallDupSetUpgrade>();
+ add<QueryOptimizerCursorTests::CachedMatchCounterCount>();
+ add<QueryOptimizerCursorTests::SmallDupSetUpgradeRead>();
+ add<QueryOptimizerCursorTests::SmallDupSetUpgradeWrite>();
+ add<QueryOptimizerCursorTests::Empty>();
+ add<QueryOptimizerCursorTests::Unindexed>();
+ add<QueryOptimizerCursorTests::Basic>();
+ add<QueryOptimizerCursorTests::NoMatch>();
+ add<QueryOptimizerCursorTests::Interleaved>();
+ add<QueryOptimizerCursorTests::NotMatch>();
+ add<QueryOptimizerCursorTests::StopInterleaving>();
+ add<QueryOptimizerCursorTests::TakeoverWithDup>();
+ add<QueryOptimizerCursorTests::TakeoverWithNonMatches>();
+ add<QueryOptimizerCursorTests::TakeoverWithTakeoverDup>();
+ add<QueryOptimizerCursorTests::BasicOr>();
+ add<QueryOptimizerCursorTests::OrFirstClauseEmpty>();
+ add<QueryOptimizerCursorTests::OrSecondClauseEmpty>();
+ add<QueryOptimizerCursorTests::OrMultipleClausesEmpty>();
+ add<QueryOptimizerCursorTests::TakeoverCountOr>();
+ add<QueryOptimizerCursorTests::TakeoverEndOfOrClause>();
+ add<QueryOptimizerCursorTests::TakeoverBeforeEndOfOrClause>();
+ add<QueryOptimizerCursorTests::TakeoverAfterEndOfOrClause>();
+ add<QueryOptimizerCursorTests::ManualMatchingDeduping>();
+ add<QueryOptimizerCursorTests::ManualMatchingUsingCurrKey>();
+ add<QueryOptimizerCursorTests::ManualMatchingDedupingTakeover>();
+ add<QueryOptimizerCursorTests::Singlekey>();
+ add<QueryOptimizerCursorTests::Multikey>();
+ add<QueryOptimizerCursorTests::AddOtherPlans>();
+ add<QueryOptimizerCursorTests::AddOtherPlansDelete>();
+ add<QueryOptimizerCursorTests::AddOtherPlansContinuousDelete>();
+ add<QueryOptimizerCursorTests::OrRangeElimination>();
+ add<QueryOptimizerCursorTests::OrDedup>();
+ add<QueryOptimizerCursorTests::EarlyDups>();
+ add<QueryOptimizerCursorTests::OrPopInTakeover>();
+ add<QueryOptimizerCursorTests::OrCollectionScanAbort>();
+ add<QueryOptimizerCursorTests::YieldNoOp>();
+ add<QueryOptimizerCursorTests::YieldDelete>();
+ add<QueryOptimizerCursorTests::YieldDeleteContinue>();
+ add<QueryOptimizerCursorTests::YieldDeleteContinueFurther>();
+ add<QueryOptimizerCursorTests::YieldUpdate>();
+ add<QueryOptimizerCursorTests::YieldDrop>();
+ add<QueryOptimizerCursorTests::YieldDropOr>();
+ add<QueryOptimizerCursorTests::YieldRemoveOr>();
+ add<QueryOptimizerCursorTests::YieldCappedOverwrite>();
+ add<QueryOptimizerCursorTests::YieldDropIndex>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansNoOp>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansAdvanceNoOp>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansDelete>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansDeleteOr>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansDeleteOrAdvance>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansCappedOverwrite>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansCappedOverwriteManual>();
+ add<QueryOptimizerCursorTests::YieldMultiplePlansCappedOverwriteManual2>();
+ add<QueryOptimizerCursorTests::YieldTakeover>();
+ add<QueryOptimizerCursorTests::YieldTakeoverBasic>();
+ add<QueryOptimizerCursorTests::YieldInactiveCursorAdvance>();
+ add<QueryOptimizerCursorTests::OrderId>();
+ add<QueryOptimizerCursorTests::OrderMultiIndex>();
+ add<QueryOptimizerCursorTests::OrderReject>();
+ add<QueryOptimizerCursorTests::OrderNatural>();
+ add<QueryOptimizerCursorTests::OrderUnindexed>();
+ add<QueryOptimizerCursorTests::RecordedOrderInvalid>();
+ add<QueryOptimizerCursorTests::KillOp>();
+ add<QueryOptimizerCursorTests::KillOpFirstClause>();
+ add<QueryOptimizerCursorTests::Nscanned>();
+ add<QueryOptimizerCursorTests::TouchEarlierIterate>();
+ add<QueryOptimizerCursorTests::TouchEarlierIterateDelete>();
+ add<QueryOptimizerCursorTests::TouchEarlierIterateDeleteMultiple>();
+ add<QueryOptimizerCursorTests::TouchEarlierIterateTakeover>();
+ add<QueryOptimizerCursorTests::TouchEarlierIterateTakeoverDeleteMultiple>();
+ add<QueryOptimizerCursorTests::TouchEarlierIterateUnindexedTakeoverDeleteMultiple>();
+ add<QueryOptimizerCursorTests::TouchEarlierIterateTakeoverDeleteMultipleMultiAdvance>();
+ add<QueryOptimizerCursorTests::InitialCappedWrapYieldRecoveryFailure>();
+ add<QueryOptimizerCursorTests::TakeoverCappedWrapYieldRecoveryFailure>();
+ add<QueryOptimizerCursorTests::GetCursor::NoConstraints>();
+ add<QueryOptimizerCursorTests::GetCursor::SimpleId>();
+ add<QueryOptimizerCursorTests::GetCursor::OptimalIndex>();
+ add<QueryOptimizerCursorTests::GetCursor::SimpleKeyMatch>();
+ add<QueryOptimizerCursorTests::GetCursor::Geo>();
+ add<QueryOptimizerCursorTests::GetCursor::OutOfOrder>();
+ add<QueryOptimizerCursorTests::GetCursor::BestSavedOutOfOrder>();
+ add<QueryOptimizerCursorTests::GetCursor::BestSavedOptimal>();
+ add<QueryOptimizerCursorTests::GetCursor::BestSavedNotOptimal>();
+ add<QueryOptimizerCursorTests::GetCursor::MultiIndex>();
+ add<QueryOptimizerCursorTests::GetCursor::RequireIndexNoConstraints>();
+ add<QueryOptimizerCursorTests::GetCursor::RequireIndexSimpleId>();
+ add<QueryOptimizerCursorTests::GetCursor::RequireIndexUnindexedQuery>();
+ add<QueryOptimizerCursorTests::GetCursor::RequireIndexIndexedQuery>();
+ add<QueryOptimizerCursorTests::GetCursor::RequireIndexSecondOrClauseIndexed>();
+ add<QueryOptimizerCursorTests::GetCursor::RequireIndexSecondOrClauseUnindexed>();
+ add<QueryOptimizerCursorTests::GetCursor::RequireIndexSecondOrClauseUnindexedUndetected>();
+ }
+ } myall;
+
+} // namespace QueryOptimizerTests
+
diff --git a/src/mongo/dbtests/queryoptimizertests.cpp b/src/mongo/dbtests/queryoptimizertests.cpp
new file mode 100644
index 00000000000..8da13578b45
--- /dev/null
+++ b/src/mongo/dbtests/queryoptimizertests.cpp
@@ -0,0 +1,1063 @@
+// queryoptimizertests.cpp : query optimizer unit tests
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/queryoptimizer.h"
+#include "../db/instance.h"
+#include "../db/ops/count.h"
+#include "../db/ops/query.h"
+#include "../db/ops/delete.h"
+#include "dbtests.h"
+
+
+namespace mongo {
+ extern BSONObj id_obj;
+ void runQuery(Message& m, QueryMessage& q, Message &response ) {
+ CurOp op( &(cc()) );
+ op.ensureStarted();
+ runQuery( m , q , op, response );
+ }
+ void runQuery(Message& m, QueryMessage& q ) {
+ Message response;
+ runQuery( m, q, response );
+ }
+ void __forceLinkGeoPlugin();
+} // namespace mongo
+
+namespace QueryOptimizerTests {
+
+ void dropCollection( const char *ns ) {
+ string errmsg;
+ BSONObjBuilder result;
+ dropCollection( ns, errmsg, result );
+ }
+
+ namespace QueryPlanTests {
+
+ using boost::shared_ptr;
+
+ class Base {
+ public:
+ Base() : _ctx( ns() ) , indexNum_( 0 ) {
+ string err;
+ userCreateNS( ns(), BSONObj(), err, false );
+ }
+ ~Base() {
+ if ( !nsd() )
+ return;
+ dropCollection( ns() );
+ }
+ protected:
+ static const char *ns() { return "unittests.QueryPlanTests"; }
+ static NamespaceDetails *nsd() { return nsdetails( ns() ); }
+ IndexDetails *index( const BSONObj &key ) {
+ stringstream ss;
+ ss << indexNum_++;
+ string name = ss.str();
+ client_.resetIndexCache();
+ client_.ensureIndex( ns(), key, false, name.c_str() );
+ NamespaceDetails *d = nsd();
+ for( int i = 0; i < d->nIndexes; ++i ) {
+ if ( d->idx(i).keyPattern() == key /*indexName() == name*/ || ( d->idx(i).isIdIndex() && IndexDetails::isIdIndexPattern( key ) ) )
+ return &d->idx(i);
+ }
+ assert( false );
+ return 0;
+ }
+ int indexno( const BSONObj &key ) {
+ return nsd()->idxNo( *index(key) );
+ }
+ BSONObj startKey( const QueryPlan &p ) const {
+ return p.frv()->startKey();
+ }
+ BSONObj endKey( const QueryPlan &p ) const {
+ return p.frv()->endKey();
+ }
+ private:
+ dblock lk_;
+ Client::Context _ctx;
+ int indexNum_;
+ static DBDirectClient client_;
+ };
+ DBDirectClient Base::client_;
+
+ // There's a limit of 10 indexes total, make sure not to exceed this in a given test.
+#define INDEXNO(x) nsd()->idxNo( *this->index( BSON(x) ) )
+#define INDEX(x) this->index( BSON(x) )
+ auto_ptr< FieldRangeSetPair > FieldRangeSetPair_GLOBAL;
+#define FRSP(x) ( FieldRangeSetPair_GLOBAL.reset( new FieldRangeSetPair( ns(), x ) ), *FieldRangeSetPair_GLOBAL )
+ auto_ptr< FieldRangeSetPair > FieldRangeSetPair_GLOBAL2;
+#define FRSP2(x) ( FieldRangeSetPair_GLOBAL2.reset( new FieldRangeSetPair( ns(), x ) ), FieldRangeSetPair_GLOBAL2.get() )
+
+ class NoIndex : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), -1, FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSONObj() );
+ ASSERT( !p.optimal() );
+ ASSERT( !p.scanAndOrderRequired() );
+ ASSERT( !p.exactKeyMatch() );
+ }
+ };
+
+ class SimpleOrder : public Base {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendMinKey( "" );
+ BSONObj start = b.obj();
+ BSONObjBuilder b2;
+ b2.appendMaxKey( "" );
+ BSONObj end = b2.obj();
+
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ ASSERT( !p.scanAndOrderRequired() );
+ ASSERT( !startKey( p ).woCompare( start ) );
+ ASSERT( !endKey( p ).woCompare( end ) );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << 1 ) );
+ ASSERT( !p2.scanAndOrderRequired() );
+ QueryPlan p3( nsd(), INDEXNO( "a" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "b" << 1 ) );
+ ASSERT( p3.scanAndOrderRequired() );
+ ASSERT( !startKey( p3 ).woCompare( start ) );
+ ASSERT( !endKey( p3 ).woCompare( end ) );
+ }
+ };
+
+ class MoreIndexThanNeeded : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ ASSERT( !p.scanAndOrderRequired() );
+ }
+ };
+
+ class IndexSigns : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << -1 ) , FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << -1 ) );
+ ASSERT( !p.scanAndOrderRequired() );
+ ASSERT_EQUALS( 1, p.direction() );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << -1 ) );
+ ASSERT( p2.scanAndOrderRequired() );
+ ASSERT_EQUALS( 0, p2.direction() );
+ QueryPlan p3( nsd(), indexno( id_obj ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "_id" << 1 ) );
+ ASSERT( !p3.scanAndOrderRequired() );
+ ASSERT_EQUALS( 1, p3.direction() );
+ }
+ };
+
+ class IndexReverse : public Base {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.appendMinKey( "" );
+ b.appendMaxKey( "" );
+ BSONObj start = b.obj();
+ BSONObjBuilder b2;
+ b2.appendMaxKey( "" );
+ b2.appendMinKey( "" );
+ BSONObj end = b2.obj();
+ QueryPlan p( nsd(), INDEXNO( "a" << -1 << "b" << 1 ),FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 << "b" << -1 ) );
+ ASSERT( !p.scanAndOrderRequired() );
+ ASSERT_EQUALS( -1, p.direction() );
+ ASSERT( !startKey( p ).woCompare( start ) );
+ ASSERT( !endKey( p ).woCompare( end ) );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << -1 << "b" << -1 ) );
+ ASSERT( !p2.scanAndOrderRequired() );
+ ASSERT_EQUALS( -1, p2.direction() );
+ QueryPlan p3( nsd(), INDEXNO( "a" << 1 << "b" << -1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << -1 << "b" << -1 ) );
+ ASSERT( p3.scanAndOrderRequired() );
+ ASSERT_EQUALS( 0, p3.direction() );
+ }
+ };
+
+ class NoOrder : public Base {
+ public:
+ void run() {
+ BSONObjBuilder b;
+ b.append( "", 3 );
+ b.appendMinKey( "" );
+ BSONObj start = b.obj();
+ BSONObjBuilder b2;
+ b2.append( "", 3 );
+ b2.appendMaxKey( "" );
+ BSONObj end = b2.obj();
+ QueryPlan p( nsd(), INDEXNO( "a" << -1 << "b" << 1 ), FRSP( BSON( "a" << 3 ) ), FRSP2( BSON( "a" << 3 ) ), BSON( "a" << 3 ), BSONObj() );
+ ASSERT( !p.scanAndOrderRequired() );
+ ASSERT( !startKey( p ).woCompare( start ) );
+ ASSERT( !endKey( p ).woCompare( end ) );
+ QueryPlan p2( nsd(), INDEXNO( "a" << -1 << "b" << 1 ), FRSP( BSON( "a" << 3 ) ), FRSP2( BSON( "a" << 3 ) ), BSON( "a" << 3 ), BSONObj() );
+ ASSERT( !p2.scanAndOrderRequired() );
+ ASSERT( !startKey( p ).woCompare( start ) );
+ ASSERT( !endKey( p ).woCompare( end ) );
+ }
+ };
+
+ class EqualWithOrder : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << 4 ) ), FRSP2( BSON( "a" << 4 ) ), BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ ASSERT( !p.scanAndOrderRequired() );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "b" << 4 ) ), FRSP2( BSON( "b" << 4 ) ), BSON( "b" << 4 ), BSON( "a" << 1 << "c" << 1 ) );
+ ASSERT( !p2.scanAndOrderRequired() );
+ QueryPlan p3( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << 4 ) ), FRSP2( BSON( "b" << 4 ) ), BSON( "b" << 4 ), BSON( "a" << 1 << "c" << 1 ) );
+ ASSERT( p3.scanAndOrderRequired() );
+ }
+ };
+
+ class Optimal : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ ASSERT( p.optimal() );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ ASSERT( p2.optimal() );
+ QueryPlan p3( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << 1 ) ), FRSP2( BSON( "a" << 1 ) ), BSON( "a" << 1 ), BSON( "a" << 1 ) );
+ ASSERT( p3.optimal() );
+ QueryPlan p4( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << 1 ) ), FRSP2( BSON( "b" << 1 ) ), BSON( "b" << 1 ), BSON( "a" << 1 ) );
+ ASSERT( !p4.optimal() );
+ QueryPlan p5( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << 1 ) ), FRSP2( BSON( "a" << 1 ) ), BSON( "a" << 1 ), BSON( "b" << 1 ) );
+ ASSERT( p5.optimal() );
+ QueryPlan p6( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << 1 ) ), FRSP2( BSON( "b" << 1 ) ), BSON( "b" << 1 ), BSON( "b" << 1 ) );
+ ASSERT( !p6.optimal() );
+ QueryPlan p7( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << 1 << "b" << 1 ) ), FRSP2( BSON( "a" << 1 << "b" << 1 ) ), BSON( "a" << 1 << "b" << 1 ), BSON( "a" << 1 ) );
+ ASSERT( p7.optimal() );
+ QueryPlan p8( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << 1 << "b" << LT << 1 ) ), FRSP2( BSON( "a" << 1 << "b" << LT << 1 ) ), BSON( "a" << 1 << "b" << LT << 1 ), BSON( "a" << 1 ) );
+ ASSERT( p8.optimal() );
+ QueryPlan p9( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "a" << 1 << "b" << LT << 1 ) ), FRSP2( BSON( "a" << 1 << "b" << LT << 1 ) ), BSON( "a" << 1 << "b" << LT << 1 ), BSON( "a" << 1 ) );
+ ASSERT( p9.optimal() );
+ }
+ };
+
+ class MoreOptimal : public Base {
+ public:
+ void run() {
+ QueryPlan p10( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "a" << 1 ) ), FRSP2( BSON( "a" << 1 ) ), BSON( "a" << 1 ), BSONObj() );
+ ASSERT( p10.optimal() );
+ QueryPlan p11( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "a" << 1 << "b" << LT << 1 ) ), FRSP2( BSON( "a" << 1 << "b" << LT << 1 ) ), BSON( "a" << 1 << "b" << LT << 1 ), BSONObj() );
+ ASSERT( p11.optimal() );
+ QueryPlan p12( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "a" << LT << 1 ) ), FRSP2( BSON( "a" << LT << 1 ) ), BSON( "a" << LT << 1 ), BSONObj() );
+ ASSERT( p12.optimal() );
+ QueryPlan p13( nsd(), INDEXNO( "a" << 1 << "b" << 1 << "c" << 1 ), FRSP( BSON( "a" << LT << 1 ) ), FRSP2( BSON( "a" << LT << 1 ) ), BSON( "a" << LT << 1 ), BSON( "a" << 1 ) );
+ ASSERT( p13.optimal() );
+ }
+ };
+
+ class KeyMatch : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ ASSERT( !p.exactKeyMatch() );
+ QueryPlan p2( nsd(), INDEXNO( "b" << 1 << "a" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ ASSERT( !p2.exactKeyMatch() );
+ QueryPlan p3( nsd(), INDEXNO( "b" << 1 << "a" << 1 ), FRSP( BSON( "b" << "z" ) ), FRSP2( BSON( "b" << "z" ) ), BSON( "b" << "z" ), BSON( "a" << 1 ) );
+ ASSERT( !p3.exactKeyMatch() );
+ QueryPlan p4( nsd(), INDEXNO( "b" << 1 << "a" << 1 << "c" << 1 ), FRSP( BSON( "c" << "y" << "b" << "z" ) ), FRSP2( BSON( "c" << "y" << "b" << "z" ) ), BSON( "c" << "y" << "b" << "z" ), BSON( "a" << 1 ) );
+ ASSERT( !p4.exactKeyMatch() );
+ QueryPlan p5( nsd(), INDEXNO( "b" << 1 << "a" << 1 << "c" << 1 ), FRSP( BSON( "c" << "y" << "b" << "z" ) ), FRSP2( BSON( "c" << "y" << "b" << "z" ) ), BSON( "c" << "y" << "b" << "z" ), BSONObj() );
+ ASSERT( !p5.exactKeyMatch() );
+ QueryPlan p6( nsd(), INDEXNO( "b" << 1 << "a" << 1 << "c" << 1 ), FRSP( BSON( "c" << LT << "y" << "b" << GT << "z" ) ), FRSP2( BSON( "c" << LT << "y" << "b" << GT << "z" ) ), BSON( "c" << LT << "y" << "b" << GT << "z" ), BSONObj() );
+ ASSERT( !p6.exactKeyMatch() );
+ QueryPlan p7( nsd(), INDEXNO( "b" << 1 ), FRSP( BSONObj() ), FRSP2( BSONObj() ), BSONObj(), BSON( "a" << 1 ) );
+ ASSERT( !p7.exactKeyMatch() );
+ QueryPlan p8( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << "y" << "a" << "z" ) ), FRSP2( BSON( "b" << "y" << "a" << "z" ) ), BSON( "b" << "y" << "a" << "z" ), BSONObj() );
+ ASSERT( p8.exactKeyMatch() );
+ QueryPlan p9( nsd(), INDEXNO( "a" << 1 ), FRSP( BSON( "a" << "z" ) ), FRSP2( BSON( "a" << "z" ) ), BSON( "a" << "z" ), BSON( "a" << 1 ) );
+ ASSERT( p9.exactKeyMatch() );
+ }
+ };
+
+ class MoreKeyMatch : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FRSP( BSON( "a" << "r" << "b" << NE << "q" ) ), FRSP2( BSON( "a" << "r" << "b" << NE << "q" ) ), BSON( "a" << "r" << "b" << NE << "q" ), BSON( "a" << 1 ) );
+ ASSERT( !p.exactKeyMatch() );
+ }
+ };
+
+ class ExactKeyQueryTypes : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 ), FRSP( BSON( "a" << "b" ) ), FRSP2( BSON( "a" << "b" ) ), BSON( "a" << "b" ), BSONObj() );
+ ASSERT( p.exactKeyMatch() );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 ), FRSP( BSON( "a" << 4 ) ), FRSP2( BSON( "a" << 4 ) ), BSON( "a" << 4 ), BSONObj() );
+ ASSERT( !p2.exactKeyMatch() );
+ QueryPlan p3( nsd(), INDEXNO( "a" << 1 ), FRSP( BSON( "a" << BSON( "c" << "d" ) ) ), FRSP2( BSON( "a" << BSON( "c" << "d" ) ) ), BSON( "a" << BSON( "c" << "d" ) ), BSONObj() );
+ ASSERT( !p3.exactKeyMatch() );
+ BSONObjBuilder b;
+ b.appendRegex( "a", "^ddd" );
+ BSONObj q = b.obj();
+ QueryPlan p4( nsd(), INDEXNO( "a" << 1 ), FRSP( q ), FRSP2( q ), q, BSONObj() );
+ ASSERT( !p4.exactKeyMatch() );
+ QueryPlan p5( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "a" << "z" << "b" << 4 ) ), FRSP2( BSON( "a" << "z" << "b" << 4 ) ), BSON( "a" << "z" << "b" << 4 ), BSONObj() );
+ ASSERT( !p5.exactKeyMatch() );
+ }
+ };
+
+ class Unhelpful : public Base {
+ public:
+ void run() {
+ QueryPlan p( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << 1 ) ), FRSP2( BSON( "b" << 1 ) ), BSON( "b" << 1 ), BSONObj() );
+ ASSERT( !p.range( "a" ).nontrivial() );
+ ASSERT( p.unhelpful() );
+ QueryPlan p2( nsd(), INDEXNO( "a" << 1 << "b" << 1 ), FRSP( BSON( "b" << 1 << "c" << 1 ) ), FRSP2( BSON( "b" << 1 << "c" << 1 ) ), BSON( "b" << 1 << "c" << 1 ), BSON( "a" << 1 ) );
+ ASSERT( !p2.scanAndOrderRequired() );
+ ASSERT( !p2.range( "a" ).nontrivial() );
+ ASSERT( !p2.unhelpful() );
+ QueryPlan p3( nsd(), INDEXNO( "b" << 1 ), FRSP( BSON( "b" << 1 << "c" << 1 ) ), FRSP2( BSON( "b" << 1 << "c" << 1 ) ), BSON( "b" << 1 << "c" << 1 ), BSONObj() );
+ ASSERT( p3.range( "b" ).nontrivial() );
+ ASSERT( !p3.unhelpful() );
+ QueryPlan p4( nsd(), INDEXNO( "b" << 1 << "c" << 1 ), FRSP( BSON( "c" << 1 << "d" << 1 ) ), FRSP2( BSON( "c" << 1 << "d" << 1 ) ), BSON( "c" << 1 << "d" << 1 ), BSONObj() );
+ ASSERT( !p4.range( "b" ).nontrivial() );
+ ASSERT( p4.unhelpful() );
+ }
+ };
+
+ } // namespace QueryPlanTests
+
+ namespace QueryPlanSetTests {
+ class Base {
+ public:
+ Base() : _context( ns() ) {
+ string err;
+ userCreateNS( ns(), BSONObj(), err, false );
+ }
+ virtual ~Base() {
+ if ( !nsd() )
+ return;
+ NamespaceDetailsTransient::get_inlock( ns() ).clearQueryCache();
+ dropCollection( ns() );
+ }
+ static void assembleRequest( const string &ns, BSONObj query, int nToReturn, int nToSkip, BSONObj *fieldsToReturn, int queryOptions, Message &toSend ) {
+ // see query.h for the protocol we are using here.
+ BufBuilder b;
+ int opts = queryOptions;
+ b.appendNum(opts);
+ b.appendStr(ns);
+ b.appendNum(nToSkip);
+ b.appendNum(nToReturn);
+ query.appendSelfToBufBuilder(b);
+ if ( fieldsToReturn )
+ fieldsToReturn->appendSelfToBufBuilder(b);
+ toSend.setData(dbQuery, b.buf(), b.len());
+ }
+ protected:
+ static const char *ns() { return "unittests.QueryPlanSetTests"; }
+ static NamespaceDetails *nsd() { return nsdetails( ns() ); }
+ private:
+ dblock lk_;
+ Client::Context _context;
+ };
+
+ class NoIndexes : public Base {
+ public:
+ void run() {
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ ASSERT_EQUALS( 1, s.nPlans() );
+ }
+ };
+
+ class Optimal : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "b_2" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSONObj() );
+ ASSERT_EQUALS( 1, s.nPlans() );
+ }
+ };
+
+ class NoOptimal : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ ASSERT_EQUALS( 3, s.nPlans() );
+ }
+ };
+
+ class NoSpec : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSONObj() ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSONObj(), BSONObj() );
+ ASSERT_EQUALS( 1, s.nPlans() );
+ }
+ };
+
+ class HintSpec : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ BSONObj b = BSON( "hint" << BSON( "a" << 1 ) );
+ BSONElement e = b.firstElement();
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), true, &e );
+ ASSERT_EQUALS( 1, s.nPlans() );
+ }
+ };
+
+ class HintName : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ BSONObj b = BSON( "hint" << "a_1" );
+ BSONElement e = b.firstElement();
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), true, &e );
+ ASSERT_EQUALS( 1, s.nPlans() );
+ }
+ };
+
+ class NaturalHint : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ BSONObj b = BSON( "hint" << BSON( "$natural" << 1 ) );
+ BSONElement e = b.firstElement();
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), true, &e );
+ ASSERT_EQUALS( 1, s.nPlans() );
+ }
+ };
+
+ class NaturalSort : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "b_2" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 ), BSON( "$natural" << 1 ) );
+ ASSERT_EQUALS( 1, s.nPlans() );
+ }
+ };
+
+ class BadHint : public Base {
+ public:
+ void run() {
+ BSONObj b = BSON( "hint" << "a_1" );
+ BSONElement e = b.firstElement();
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ ASSERT_THROWS( QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 ), BSON( "b" << 1 ), true, &e ),
+ AssertionException );
+ }
+ };
+
+ class Count : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ string err;
+ ASSERT_EQUALS( 0, runCount( ns(), BSON( "query" << BSON( "a" << 4 ) ), err ) );
+ BSONObj one = BSON( "a" << 1 );
+ BSONObj fourA = BSON( "a" << 4 );
+ BSONObj fourB = BSON( "a" << 4 );
+ theDataFileMgr.insertWithObjMod( ns(), one );
+ ASSERT_EQUALS( 0, runCount( ns(), BSON( "query" << BSON( "a" << 4 ) ), err ) );
+ theDataFileMgr.insertWithObjMod( ns(), fourA );
+ ASSERT_EQUALS( 1, runCount( ns(), BSON( "query" << BSON( "a" << 4 ) ), err ) );
+ theDataFileMgr.insertWithObjMod( ns(), fourB );
+ ASSERT_EQUALS( 2, runCount( ns(), BSON( "query" << BSON( "a" << 4 ) ), err ) );
+ ASSERT_EQUALS( 3, runCount( ns(), BSON( "query" << BSONObj() ), err ) );
+ ASSERT_EQUALS( 3, runCount( ns(), BSON( "query" << BSON( "a" << GT << 0 ) ), err ) );
+ // missing ns
+ ASSERT_EQUALS( -1, runCount( "unittests.missingNS", BSONObj(), err ) );
+ // impossible match
+ ASSERT_EQUALS( 0, runCount( ns(), BSON( "query" << BSON( "a" << GT << 0 << LT << -1 ) ), err ) );
+ }
+ };
+
+ class QueryMissingNs : public Base {
+ public:
+ QueryMissingNs() { log() << "querymissingns starts" << endl; }
+ ~QueryMissingNs() {
+ log() << "end QueryMissingNs" << endl;
+ }
+ void run() {
+ Message m;
+ assembleRequest( "unittests.missingNS", BSONObj(), 0, 0, 0, 0, m );
+ DbMessage d(m);
+ QueryMessage q(d);
+ Message ret;
+ runQuery( m, q, ret );
+ ASSERT_EQUALS( 0, ((QueryResult*)ret.header())->nReturned );
+ }
+
+ };
+
+ class UnhelpfulIndex : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 1 << "c" << 2 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 1 << "c" << 2 ), BSONObj() );
+ ASSERT_EQUALS( 2, s.nPlans() );
+ }
+ };
+
+ class SingleException : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ ASSERT_EQUALS( 3, s.nPlans() );
+ bool threw = false;
+ auto_ptr< TestOp > t( new TestOp( true, threw ) );
+ boost::shared_ptr< TestOp > done = s.runOp( *t );
+ ASSERT( threw );
+ ASSERT( done->complete() );
+ ASSERT( done->exception().empty() );
+ ASSERT( !done->error() );
+ }
+ private:
+ class TestOp : public QueryOp {
+ public:
+ TestOp( bool iThrow, bool &threw ) : iThrow_( iThrow ), threw_( threw ), i_(), youThrow_( false ) {}
+ virtual void _init() {}
+ virtual void next() {
+ if ( iThrow_ )
+ threw_ = true;
+ massert( 10408 , "throw", !iThrow_ );
+ if ( ++i_ > 10 )
+ setComplete();
+ }
+ virtual QueryOp *_createChild() const {
+ QueryOp *op = new TestOp( youThrow_, threw_ );
+ youThrow_ = !youThrow_;
+ return op;
+ }
+ virtual bool mayRecordPlan() const { return true; }
+ virtual long long nscanned() { return 0; }
+ private:
+ bool iThrow_;
+ bool &threw_;
+ int i_;
+ mutable bool youThrow_;
+ };
+ };
+
+ class AllException : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ ASSERT_EQUALS( 3, s.nPlans() );
+ auto_ptr< TestOp > t( new TestOp() );
+ boost::shared_ptr< TestOp > done = s.runOp( *t );
+ ASSERT( !done->complete() );
+ ASSERT_EQUALS( "throw", done->exception().msg );
+ ASSERT( done->error() );
+ }
+ private:
+ class TestOp : public QueryOp {
+ public:
+ virtual void _init() {}
+ virtual void next() {
+ massert( 10409 , "throw", false );
+ }
+ virtual QueryOp *_createChild() const {
+ return new TestOp();
+ }
+ virtual bool mayRecordPlan() const { return true; }
+ virtual long long nscanned() { return 0; }
+ };
+ };
+
+ class SaveGoodIndex : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ // No best plan - all must be tried.
+ nPlans( 3 );
+ runQuery();
+ // Best plan selected by query.
+ nPlans( 1 );
+ nPlans( 1 );
+ Helpers::ensureIndex( ns(), BSON( "c" << 1 ), false, "c_1" );
+ // Best plan cleared when new index added.
+ nPlans( 3 );
+ runQuery();
+ // Best plan selected by query.
+ nPlans( 1 );
+
+ {
+ DBDirectClient client;
+ for( int i = 0; i < 334; ++i ) {
+ client.insert( ns(), BSON( "i" << i ) );
+ client.update( ns(), QUERY( "i" << i ), BSON( "i" << i + 1 ) );
+ client.remove( ns(), BSON( "i" << i + 1 ) );
+ }
+ }
+ // Best plan cleared by ~1000 writes.
+ nPlans( 3 );
+
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ NoRecordTestOp original;
+ s.runOp( original );
+ // NoRecordTestOp doesn't record a best plan (test cases where mayRecordPlan() is false).
+ nPlans( 3 );
+
+ BSONObj hint = fromjson( "{hint:{$natural:1}}" );
+ BSONElement hintElt = hint.firstElement();
+ auto_ptr< FieldRangeSetPair > frsp2( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig2( new FieldRangeSetPair( *frsp2 ) );
+ QueryPlanSet s2( ns(), frsp2, frspOrig2, BSON( "a" << 4 ), BSON( "b" << 1 ), true, &hintElt );
+ TestOp newOriginal;
+ s2.runOp( newOriginal );
+ // No plan recorded when a hint is used.
+ nPlans( 3 );
+
+ auto_ptr< FieldRangeSetPair > frsp3( new FieldRangeSetPair( ns(), BSON( "a" << 4 ), true ) );
+ auto_ptr< FieldRangeSetPair > frspOrig3( new FieldRangeSetPair( *frsp3 ) );
+ QueryPlanSet s3( ns(), frsp3, frspOrig3, BSON( "a" << 4 ), BSON( "b" << 1 << "c" << 1 ) );
+ TestOp newerOriginal;
+ s3.runOp( newerOriginal );
+ // Plan recorded was for a different query pattern (different sort spec).
+ nPlans( 3 );
+
+ // Best plan still selected by query after all these other tests.
+ runQuery();
+ nPlans( 1 );
+ }
+ private:
+ void nPlans( int n ) {
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ ASSERT_EQUALS( n, s.nPlans() );
+ }
+ void runQuery() {
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ TestOp original;
+ s.runOp( original );
+ }
+ class TestOp : public QueryOp {
+ public:
+ virtual void _init() {}
+ virtual void next() {
+ setComplete();
+ }
+ virtual QueryOp *_createChild() const {
+ return new TestOp();
+ }
+ virtual bool mayRecordPlan() const { return true; }
+ virtual long long nscanned() { return 0; }
+ };
+ class NoRecordTestOp : public TestOp {
+ virtual bool mayRecordPlan() const { return false; }
+ virtual QueryOp *_createChild() const { return new NoRecordTestOp(); }
+ };
+ };
+
+ class TryAllPlansOnErr : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ ScanOnlyTestOp op;
+ s.runOp( op );
+ pair< BSONObj, long long > best = QueryUtilIndexed::bestIndexForPatterns( s.frsp(), BSON( "b" << 1 ) );
+ ASSERT( fromjson( "{$natural:1}" ).woCompare( best.first ) == 0 );
+ ASSERT_EQUALS( 1, best.second );
+
+ auto_ptr< FieldRangeSetPair > frsp2( new FieldRangeSetPair( ns(), BSON( "a" << 4 ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig2( new FieldRangeSetPair( *frsp2 ) );
+ QueryPlanSet s2( ns(), frsp2, frspOrig2, BSON( "a" << 4 ), BSON( "b" << 1 ) );
+ TestOp op2;
+ ASSERT( s2.runOp( op2 )->complete() );
+ }
+ private:
+ class TestOp : public QueryOp {
+ public:
+ TestOp() {}
+ virtual void _init() {}
+ virtual void next() {
+ if ( qp().indexKey().firstElementFieldName() == string( "$natural" ) )
+ massert( 10410 , "throw", false );
+ setComplete();
+ }
+ virtual QueryOp *_createChild() const {
+ return new TestOp();
+ }
+ virtual bool mayRecordPlan() const { return true; }
+ virtual long long nscanned() { return 1; }
+ };
+ class ScanOnlyTestOp : public TestOp {
+ virtual void next() {
+ if ( qp().indexKey().firstElement().fieldName() == string( "$natural" ) )
+ setComplete();
+ massert( 10411 , "throw", false );
+ }
+ virtual QueryOp *_createChild() const {
+ return new ScanOnlyTestOp();
+ }
+ };
+ };
+
+ class FindOne : public Base {
+ public:
+ void run() {
+ BSONObj one = BSON( "a" << 1 );
+ theDataFileMgr.insertWithObjMod( ns(), one );
+ BSONObj result;
+ ASSERT( Helpers::findOne( ns(), BSON( "a" << 1 ), result ) );
+ ASSERT_THROWS( Helpers::findOne( ns(), BSON( "a" << 1 ), result, true ), AssertionException );
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ ASSERT( Helpers::findOne( ns(), BSON( "a" << 1 ), result, true ) );
+ }
+ };
+
+ class Delete : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ for( int i = 0; i < 200; ++i ) {
+ BSONObj two = BSON( "a" << 2 );
+ theDataFileMgr.insertWithObjMod( ns(), two );
+ }
+ BSONObj one = BSON( "a" << 1 );
+ theDataFileMgr.insertWithObjMod( ns(), one );
+ BSONObj delSpec = BSON( "a" << 1 << "_id" << NE << 0 );
+ deleteObjects( ns(), delSpec, false );
+ ASSERT( BSON( "a" << 1 ).woCompare( NamespaceDetailsTransient::get_inlock( ns() ).indexForPattern( FieldRangeSet( ns(), delSpec, true ).pattern() ) ) == 0 );
+ ASSERT_EQUALS( 1, NamespaceDetailsTransient::get_inlock( ns() ).nScannedForPattern( FieldRangeSet( ns(), delSpec, true ).pattern() ) );
+ }
+ };
+
+ class DeleteOneScan : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "_id" << 1 ), false, "_id_1" );
+ BSONObj one = BSON( "_id" << 3 << "a" << 1 );
+ BSONObj two = BSON( "_id" << 2 << "a" << 1 );
+ BSONObj three = BSON( "_id" << 1 << "a" << -1 );
+ theDataFileMgr.insertWithObjMod( ns(), one );
+ theDataFileMgr.insertWithObjMod( ns(), two );
+ theDataFileMgr.insertWithObjMod( ns(), three );
+ deleteObjects( ns(), BSON( "_id" << GT << 0 << "a" << GT << 0 ), true );
+ for( boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( ns() ); c->ok(); c->advance() )
+ ASSERT( 3 != c->current().getIntField( "_id" ) );
+ }
+ };
+
+ class DeleteOneIndex : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a" );
+ BSONObj one = BSON( "a" << 2 << "_id" << 0 );
+ BSONObj two = BSON( "a" << 1 << "_id" << 1 );
+ BSONObj three = BSON( "a" << 0 << "_id" << 2 );
+ theDataFileMgr.insertWithObjMod( ns(), one );
+ theDataFileMgr.insertWithObjMod( ns(), two );
+ theDataFileMgr.insertWithObjMod( ns(), three );
+ deleteObjects( ns(), BSON( "a" << GTE << 0 ), true );
+ for( boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( ns() ); c->ok(); c->advance() )
+ ASSERT( 2 != c->current().getIntField( "_id" ) );
+ }
+ };
+
+ class TryOtherPlansBeforeFinish : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ for( int i = 0; i < 100; ++i ) {
+ for( int j = 0; j < 2; ++j ) {
+ BSONObj temp = BSON( "a" << 100 - i - 1 << "b" << i );
+ theDataFileMgr.insertWithObjMod( ns(), temp );
+ }
+ }
+ Message m;
+ // Need to return at least 2 records to cause plan to be recorded.
+ assembleRequest( ns(), QUERY( "b" << 0 << "a" << GTE << 0 ).obj, 2, 0, 0, 0, m );
+ stringstream ss;
+ {
+ DbMessage d(m);
+ QueryMessage q(d);
+ runQuery( m, q);
+ }
+ ASSERT( BSON( "$natural" << 1 ).woCompare( NamespaceDetailsTransient::get_inlock( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ), true ).pattern() ) ) == 0 );
+
+ Message m2;
+ assembleRequest( ns(), QUERY( "b" << 99 << "a" << GTE << 0 ).obj, 2, 0, 0, 0, m2 );
+ {
+ DbMessage d(m2);
+ QueryMessage q(d);
+ runQuery( m2, q);
+ }
+ ASSERT( BSON( "a" << 1 ).woCompare( NamespaceDetailsTransient::get_inlock( ns() ).indexForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ), true ).pattern() ) ) == 0 );
+ ASSERT_EQUALS( 3, NamespaceDetailsTransient::get_inlock( ns() ).nScannedForPattern( FieldRangeSet( ns(), BSON( "b" << 0 << "a" << GTE << 0 ), true ).pattern() ) );
+ }
+ };
+
+ class InQueryIntervals : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ for( int i = 0; i < 10; ++i ) {
+ BSONObj temp = BSON( "a" << i );
+ theDataFileMgr.insertWithObjMod( ns(), temp );
+ }
+ BSONObj hint = fromjson( "{$hint:{a:1}}" );
+ BSONElement hintElt = hint.firstElement();
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSONObj(), true, &hintElt );
+ QueryPlan qp( nsd(), 1, s.frsp(), s.originalFrsp(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSONObj() );
+ boost::shared_ptr<Cursor> c = qp.newCursor();
+ double expected[] = { 2, 3, 6, 9 };
+ for( int i = 0; i < 4; ++i, c->advance() ) {
+ ASSERT_EQUALS( expected[ i ], c->current().getField( "a" ).number() );
+ }
+ ASSERT( !c->ok() );
+
+ // now check reverse
+ {
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ) ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSON( "a" << -1 ), true, &hintElt );
+ QueryPlan qp( nsd(), 1, s.frsp(), s.originalFrsp(), fromjson( "{a:{$in:[2,3,6,9,11]}}" ), BSON( "a" << -1 ) );
+ boost::shared_ptr<Cursor> c = qp.newCursor();
+ double expected[] = { 9, 6, 3, 2 };
+ for( int i = 0; i < 4; ++i, c->advance() ) {
+ ASSERT_EQUALS( expected[ i ], c->current().getField( "a" ).number() );
+ }
+ ASSERT( !c->ok() );
+ }
+ }
+ };
+
+ class EqualityThenIn : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 << "b" << 1 ), false, "a_1_b_1" );
+ for( int i = 0; i < 10; ++i ) {
+ BSONObj temp = BSON( "a" << 5 << "b" << i );
+ theDataFileMgr.insertWithObjMod( ns(), temp );
+ }
+ BSONObj hint = fromjson( "{$hint:{a:1,b:1}}" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), fromjson( "{a:5,b:{$in:[2,3,6,9,11]}}" ) ) );
+ QueryPlan qp( nsd(), 1, *frsp, frsp.get(), fromjson( "{a:5,b:{$in:[2,3,6,9,11]}}" ), BSONObj() );
+ boost::shared_ptr<Cursor> c = qp.newCursor();
+ double expected[] = { 2, 3, 6, 9 };
+ ASSERT( c->ok() );
+ for( int i = 0; i < 4; ++i, c->advance() ) {
+ ASSERT( c->ok() );
+ ASSERT_EQUALS( expected[ i ], c->current().getField( "b" ).number() );
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ class NotEqualityThenIn : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 << "b" << 1 ), false, "a_1_b_1" );
+ for( int i = 0; i < 10; ++i ) {
+ BSONObj temp = BSON( "a" << 5 << "b" << i );
+ theDataFileMgr.insertWithObjMod( ns(), temp );
+ }
+ BSONObj hint = fromjson( "{$hint:{a:1,b:1}}" );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), fromjson( "{a:{$gte:5},b:{$in:[2,3,6,9,11]}}" ) ) );
+ QueryPlan qp( nsd(), 1, *frsp, frsp.get(), fromjson( "{a:{$gte:5},b:{$in:[2,3,6,9,11]}}" ), BSONObj() );
+ boost::shared_ptr<Cursor> c = qp.newCursor();
+ int matches[] = { 2, 3, 6, 9 };
+ for( int i = 0; i < 4; ++i, c->advance() ) {
+ ASSERT_EQUALS( matches[ i ], c->current().getField( "b" ).number() );
+ }
+ ASSERT( !c->ok() );
+ }
+ };
+
+ /** Exclude special plan candidate if there are btree plan candidates. SERVER-4531 */
+ class ExcludeSpecialPlanWhenBtreePlan : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << "2d" ), false, "a_2d" );
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ BSONObj query = BSON( "a" << BSON_ARRAY( 0 << 0 ) << "b" << 1 );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), query ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, query, BSONObj() );
+ // Two query plans, btree and collection scan.
+ ASSERT_EQUALS( 2, s.nPlans() );
+ // Not the geo plan.
+ ASSERT( s.firstPlan()->special().empty() );
+ }
+ };
+
+ /** Exclude unindexed plan candidate if there is a special plan candidate. SERVER-4531 */
+ class ExcludeUnindexedPlanWhenSpecialPlan : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << "2d" ), false, "a_2d" );
+ BSONObj query = BSON( "a" << BSON_ARRAY( 0 << 0 ) << "b" << 1 );
+ auto_ptr< FieldRangeSetPair > frsp( new FieldRangeSetPair( ns(), query ) );
+ auto_ptr< FieldRangeSetPair > frspOrig( new FieldRangeSetPair( *frsp ) );
+ QueryPlanSet s( ns(), frsp, frspOrig, query, BSONObj() );
+ // Single query plan.
+ ASSERT_EQUALS( 1, s.nPlans() );
+ // It's the geo plan.
+ ASSERT( !s.firstPlan()->special().empty() );
+ }
+ };
+
+ } // namespace QueryPlanSetTests
+
+ class Base {
+ public:
+ Base() : _ctx( ns() ) {
+ string err;
+ userCreateNS( ns(), BSONObj(), err, false );
+ }
+ ~Base() {
+ if ( !nsd() )
+ return;
+ string s( ns() );
+ dropCollection( ns() );
+ }
+ protected:
+ static const char *ns() { return "unittests.QueryOptimizerTests"; }
+ static NamespaceDetails *nsd() { return nsdetails( ns() ); }
+ private:
+ dblock lk_;
+ Client::Context _ctx;
+ };
+
+ class BestGuess : public Base {
+ public:
+ void run() {
+ Helpers::ensureIndex( ns(), BSON( "a" << 1 ), false, "a_1" );
+ Helpers::ensureIndex( ns(), BSON( "b" << 1 ), false, "b_1" );
+ BSONObj temp = BSON( "a" << 1 );
+ theDataFileMgr.insertWithObjMod( ns(), temp );
+ temp = BSON( "b" << 1 );
+ theDataFileMgr.insertWithObjMod( ns(), temp );
+
+ boost::shared_ptr< Cursor > c = bestGuessCursor( ns(), BSON( "b" << 1 ), BSON( "a" << 1 ) );
+ ASSERT_EQUALS( string( "a" ), c->indexKeyPattern().firstElement().fieldName() );
+ c = bestGuessCursor( ns(), BSON( "a" << 1 ), BSON( "b" << 1 ) );
+ ASSERT_EQUALS( string( "b" ), c->indexKeyPattern().firstElementFieldName() );
+ boost::shared_ptr< MultiCursor > m = dynamic_pointer_cast< MultiCursor >( bestGuessCursor( ns(), fromjson( "{b:1,$or:[{z:1}]}" ), BSON( "a" << 1 ) ) );
+ ASSERT_EQUALS( string( "a" ), m->sub_c()->indexKeyPattern().firstElement().fieldName() );
+ m = dynamic_pointer_cast< MultiCursor >( bestGuessCursor( ns(), fromjson( "{a:1,$or:[{y:1}]}" ), BSON( "b" << 1 ) ) );
+ ASSERT_EQUALS( string( "b" ), m->sub_c()->indexKeyPattern().firstElementFieldName() );
+
+ FieldRangeSet frs( "ns", BSON( "a" << 1 ), true );
+ {
+ SimpleMutex::scoped_lock lk(NamespaceDetailsTransient::_qcMutex);
+ NamespaceDetailsTransient::get_inlock( ns() ).registerIndexForPattern( frs.pattern( BSON( "b" << 1 ) ), BSON( "a" << 1 ), 0 );
+ }
+ m = dynamic_pointer_cast< MultiCursor >( bestGuessCursor( ns(), fromjson( "{a:1,$or:[{y:1}]}" ), BSON( "b" << 1 ) ) );
+ ASSERT_EQUALS( string( "b" ), m->sub_c()->indexKeyPattern().firstElement().fieldName() );
+ }
+ };
+
+ class BestGuessOrSortAssertion : public Base {
+ public:
+ void run() {
+ ASSERT_THROWS( bestGuessCursor( ns(), BSON( "$or" << BSON_ARRAY( BSON( "b" << 1 ) ) ), BSON( "a" << 1 ) ), MsgAssertionException );
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "queryoptimizer" ) {}
+
+ void setupTests() {
+ __forceLinkGeoPlugin();
+ add<QueryPlanTests::NoIndex>();
+ add<QueryPlanTests::SimpleOrder>();
+ add<QueryPlanTests::MoreIndexThanNeeded>();
+ add<QueryPlanTests::IndexSigns>();
+ add<QueryPlanTests::IndexReverse>();
+ add<QueryPlanTests::NoOrder>();
+ add<QueryPlanTests::EqualWithOrder>();
+ add<QueryPlanTests::Optimal>();
+ add<QueryPlanTests::MoreOptimal>();
+ add<QueryPlanTests::KeyMatch>();
+ add<QueryPlanTests::MoreKeyMatch>();
+ add<QueryPlanTests::ExactKeyQueryTypes>();
+ add<QueryPlanTests::Unhelpful>();
+ add<QueryPlanSetTests::NoIndexes>();
+ add<QueryPlanSetTests::Optimal>();
+ add<QueryPlanSetTests::NoOptimal>();
+ add<QueryPlanSetTests::NoSpec>();
+ add<QueryPlanSetTests::HintSpec>();
+ add<QueryPlanSetTests::HintName>();
+ add<QueryPlanSetTests::NaturalHint>();
+ add<QueryPlanSetTests::NaturalSort>();
+ add<QueryPlanSetTests::BadHint>();
+ add<QueryPlanSetTests::Count>();
+ add<QueryPlanSetTests::QueryMissingNs>();
+ add<QueryPlanSetTests::UnhelpfulIndex>();
+ add<QueryPlanSetTests::SingleException>();
+ add<QueryPlanSetTests::AllException>();
+ add<QueryPlanSetTests::SaveGoodIndex>();
+ add<QueryPlanSetTests::TryAllPlansOnErr>();
+ add<QueryPlanSetTests::FindOne>();
+ add<QueryPlanSetTests::Delete>();
+ add<QueryPlanSetTests::DeleteOneScan>();
+ add<QueryPlanSetTests::DeleteOneIndex>();
+ add<QueryPlanSetTests::TryOtherPlansBeforeFinish>();
+ add<QueryPlanSetTests::InQueryIntervals>();
+ add<QueryPlanSetTests::EqualityThenIn>();
+ add<QueryPlanSetTests::NotEqualityThenIn>();
+ add<QueryPlanSetTests::ExcludeSpecialPlanWhenBtreePlan>();
+ add<QueryPlanSetTests::ExcludeUnindexedPlanWhenSpecialPlan>();
+ add<BestGuess>();
+ add<BestGuessOrSortAssertion>();
+ }
+ } myall;
+
+} // namespace QueryOptimizerTests
+
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
new file mode 100644
index 00000000000..9416ae20723
--- /dev/null
+++ b/src/mongo/dbtests/querytests.cpp
@@ -0,0 +1,1408 @@
+// querytests.cpp : query.{h,cpp} unit tests.
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/ops/query.h"
+#include "../db/dbhelpers.h"
+#include "../db/clientcursor.h"
+
+#include "../db/instance.h"
+#include "../db/json.h"
+#include "../db/lasterror.h"
+
+#include "../util/timer.h"
+
+#include "dbtests.h"
+
+namespace mongo {
+ extern int __findingStartInitialTimeout;
+}
+
+namespace QueryTests {
+
+ class Base {
+ dblock lk;
+ Client::Context _context;
+ public:
+ Base() : _context( ns() ) {
+ addIndex( fromjson( "{\"a\":1}" ) );
+ }
+ ~Base() {
+ try {
+ boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( ns() );
+ vector< DiskLoc > toDelete;
+ for(; c->ok(); c->advance() )
+ toDelete.push_back( c->currLoc() );
+ for( vector< DiskLoc >::iterator i = toDelete.begin(); i != toDelete.end(); ++i )
+ theDataFileMgr.deleteRecord( ns(), i->rec(), *i, false );
+ DBDirectClient cl;
+ cl.dropIndexes( ns() );
+ }
+ catch ( ... ) {
+ FAIL( "Exception while cleaning up collection" );
+ }
+ }
+ protected:
+ static const char *ns() {
+ return "unittests.querytests";
+ }
+ static void addIndex( const BSONObj &key ) {
+ BSONObjBuilder b;
+ b.append( "name", key.firstElementFieldName() );
+ b.append( "ns", ns() );
+ b.append( "key", key );
+ BSONObj o = b.done();
+ stringstream indexNs;
+ indexNs << "unittests.system.indexes";
+ theDataFileMgr.insert( indexNs.str().c_str(), o.objdata(), o.objsize() );
+ }
+ static void insert( const char *s ) {
+ insert( fromjson( s ) );
+ }
+ static void insert( const BSONObj &o ) {
+ theDataFileMgr.insert( ns(), o.objdata(), o.objsize() );
+ }
+ };
+
+ class FindOne : public Base {
+ public:
+ void run() {
+ addIndex( BSON( "b" << 1 ) );
+ addIndex( BSON( "c" << 1 ) );
+ insert( BSON( "b" << 2 << "_id" << 0 ) );
+ insert( BSON( "c" << 3 << "_id" << 1 ) );
+ BSONObj query = fromjson( "{$or:[{b:2},{c:3}]}" );
+ BSONObj ret;
+ // Check findOne() returning object.
+ ASSERT( Helpers::findOne( ns(), query, ret, true ) );
+ ASSERT_EQUALS( string( "b" ), ret.firstElement().fieldName() );
+ // Cross check with findOne() returning location.
+ ASSERT_EQUALS( ret, Helpers::findOne( ns(), query, true ).obj() );
+ }
+ };
+
+ class FindOneRequireIndex : public Base {
+ public:
+ void run() {
+ insert( BSON( "b" << 2 << "_id" << 0 ) );
+ BSONObj query = fromjson( "{b:2}" );
+ BSONObj ret;
+
+ // Check findOne() returning object, allowing unindexed scan.
+ ASSERT( Helpers::findOne( ns(), query, ret, false ) );
+ // Check findOne() returning location, allowing unindexed scan.
+ ASSERT_EQUALS( ret, Helpers::findOne( ns(), query, false ).obj() );
+
+ // Check findOne() returning object, requiring indexed scan without index.
+ ASSERT_THROWS( Helpers::findOne( ns(), query, ret, true ), MsgAssertionException );
+ // Check findOne() returning location, requiring indexed scan without index.
+ ASSERT_THROWS( Helpers::findOne( ns(), query, true ), MsgAssertionException );
+
+ addIndex( BSON( "b" << 1 ) );
+ // Check findOne() returning object, requiring indexed scan with index.
+ ASSERT( Helpers::findOne( ns(), query, ret, false ) );
+ // Check findOne() returning location, requiring indexed scan with index.
+ ASSERT_EQUALS( ret, Helpers::findOne( ns(), query, false ).obj() );
+ }
+ };
+
+ class FindOneEmptyObj : public Base {
+ public:
+ void run() {
+ // We don't normally allow empty objects in the database, but test that we can find
+ // an empty object (one might be allowed inside a reserved namespace at some point).
+ dblock lk;
+ Client::Context ctx( "unittests.querytests" );
+ // Set up security so godinsert command can run.
+ cc().getAuthenticationInfo()->isLocalHost = true;
+ DBDirectClient cl;
+ BSONObj info;
+ ASSERT( cl.runCommand( "unittests", BSON( "godinsert" << "querytests" << "obj" << BSONObj() ), info ) );
+ insert( BSONObj() );
+ BSONObj query;
+ BSONObj ret;
+ ASSERT( Helpers::findOne( ns(), query, ret, false ) );
+ ASSERT( ret.isEmpty() );
+ ASSERT_EQUALS( ret, Helpers::findOne( ns(), query, false ).obj() );
+ }
+ };
+
+ class ClientBase {
+ public:
+ ClientBase() {
+ mongo::lastError.reset( new LastError() );
+ }
+ ~ClientBase() {
+ //mongo::lastError.release();
+ }
+ protected:
+ static void insert( const char *ns, BSONObj o ) {
+ client_.insert( ns, o );
+ }
+ static void update( const char *ns, BSONObj q, BSONObj o, bool upsert = 0 ) {
+ client_.update( ns, Query( q ), o, upsert );
+ }
+ static bool error() {
+ return !client_.getPrevError().getField( "err" ).isNull();
+ }
+ DBDirectClient &client() const { return client_; }
+
+ static DBDirectClient client_;
+ };
+ DBDirectClient ClientBase::client_;
+
+ class BoundedKey : public ClientBase {
+ public:
+ ~BoundedKey() {
+ client().dropCollection( "unittests.querytests.BoundedKey" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.BoundedKey";
+ insert( ns, BSON( "a" << 1 ) );
+ BSONObjBuilder a;
+ a.appendMaxKey( "$lt" );
+ BSONObj limit = a.done();
+ ASSERT( !client().findOne( ns, QUERY( "a" << limit ) ).isEmpty() );
+ client().ensureIndex( ns, BSON( "a" << 1 ) );
+ ASSERT( !client().findOne( ns, QUERY( "a" << limit ).hint( BSON( "a" << 1 ) ) ).isEmpty() );
+ }
+ };
+
+ class GetMore : public ClientBase {
+ public:
+ ~GetMore() {
+ client().dropCollection( "unittests.querytests.GetMore" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.GetMore";
+ insert( ns, BSON( "a" << 1 ) );
+ insert( ns, BSON( "a" << 2 ) );
+ insert( ns, BSON( "a" << 3 ) );
+ auto_ptr< DBClientCursor > cursor = client().query( ns, BSONObj(), 2 );
+ long long cursorId = cursor->getCursorId();
+ cursor->decouple();
+ cursor.reset();
+ cursor = client().getMore( ns, cursorId );
+ ASSERT( cursor->more() );
+ ASSERT_EQUALS( 3, cursor->next().getIntField( "a" ) );
+ }
+ };
+
+ class PositiveLimit : public ClientBase {
+ public:
+ const char* ns;
+ PositiveLimit() : ns("unittests.querytests.PositiveLimit") {}
+ ~PositiveLimit() {
+ client().dropCollection( ns );
+ }
+
+ void testLimit(int limit) {
+ ASSERT_EQUALS(client().query( ns, BSONObj(), limit )->itcount(), limit);
+ }
+ void run() {
+ for(int i=0; i<1000; i++)
+ insert( ns, BSON( GENOID << "i" << i ) );
+
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 1 )->itcount(), 1);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 10 )->itcount(), 10);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 101 )->itcount(), 101);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 999 )->itcount(), 999);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 1000 )->itcount(), 1000);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 1001 )->itcount(), 1000);
+ ASSERT_EQUALS( client().query(ns, BSONObj(), 0 )->itcount(), 1000);
+ }
+ };
+
+ class ReturnOneOfManyAndTail : public ClientBase {
+ public:
+ ~ReturnOneOfManyAndTail() {
+ client().dropCollection( "unittests.querytests.ReturnOneOfManyAndTail" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.ReturnOneOfManyAndTail";
+ client().createCollection( ns, 1024, true );
+ insert( ns, BSON( "a" << 0 ) );
+ insert( ns, BSON( "a" << 1 ) );
+ insert( ns, BSON( "a" << 2 ) );
+ auto_ptr< DBClientCursor > c = client().query( ns, QUERY( "a" << GT << 0 ).hint( BSON( "$natural" << 1 ) ), 1, 0, 0, QueryOption_CursorTailable );
+ // If only one result requested, a cursor is not saved.
+ ASSERT_EQUALS( 0, c->getCursorId() );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 1, c->next().getIntField( "a" ) );
+ }
+ };
+
+ class TailNotAtEnd : public ClientBase {
+ public:
+ ~TailNotAtEnd() {
+ client().dropCollection( "unittests.querytests.TailNotAtEnd" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.TailNotAtEnd";
+ client().createCollection( ns, 2047, true );
+ insert( ns, BSON( "a" << 0 ) );
+ insert( ns, BSON( "a" << 1 ) );
+ insert( ns, BSON( "a" << 2 ) );
+ auto_ptr< DBClientCursor > c = client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
+ ASSERT( 0 != c->getCursorId() );
+ while( c->more() )
+ c->next();
+ ASSERT( 0 != c->getCursorId() );
+ insert( ns, BSON( "a" << 3 ) );
+ insert( ns, BSON( "a" << 4 ) );
+ insert( ns, BSON( "a" << 5 ) );
+ insert( ns, BSON( "a" << 6 ) );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 3, c->next().getIntField( "a" ) );
+ }
+ };
+
+ class EmptyTail : public ClientBase {
+ public:
+ ~EmptyTail() {
+ client().dropCollection( "unittests.querytests.EmptyTail" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.EmptyTail";
+ client().createCollection( ns, 1900, true );
+ auto_ptr< DBClientCursor > c = client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
+ ASSERT_EQUALS( 0, c->getCursorId() );
+ ASSERT( c->isDead() );
+ insert( ns, BSON( "a" << 0 ) );
+ c = client().query( ns, QUERY( "a" << 1 ).hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
+ ASSERT( 0 != c->getCursorId() );
+ ASSERT( !c->isDead() );
+ }
+ };
+
+ class TailableDelete : public ClientBase {
+ public:
+ ~TailableDelete() {
+ client().dropCollection( "unittests.querytests.TailableDelete" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.TailableDelete";
+ client().createCollection( ns, 8192, true, 2 );
+ insert( ns, BSON( "a" << 0 ) );
+ insert( ns, BSON( "a" << 1 ) );
+ auto_ptr< DBClientCursor > c = client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
+ c->next();
+ c->next();
+ ASSERT( !c->more() );
+ insert( ns, BSON( "a" << 2 ) );
+ insert( ns, BSON( "a" << 3 ) );
+ ASSERT( !c->more() );
+ ASSERT_EQUALS( 0, c->getCursorId() );
+ }
+ };
+
+ class TailableInsertDelete : public ClientBase {
+ public:
+ ~TailableInsertDelete() {
+ client().dropCollection( "unittests.querytests.TailableInsertDelete" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.TailableInsertDelete";
+ client().createCollection( ns, 1330, true );
+ insert( ns, BSON( "a" << 0 ) );
+ insert( ns, BSON( "a" << 1 ) );
+ auto_ptr< DBClientCursor > c = client().query( ns, Query().hint( BSON( "$natural" << 1 ) ), 2, 0, 0, QueryOption_CursorTailable );
+ c->next();
+ c->next();
+ ASSERT( !c->more() );
+ insert( ns, BSON( "a" << 2 ) );
+ client().remove( ns, QUERY( "a" << 1 ) );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 2, c->next().getIntField( "a" ) );
+ ASSERT( !c->more() );
+ }
+ };
+
+ class TailCappedOnly : public ClientBase {
+ public:
+ ~TailCappedOnly() {
+ client().dropCollection( "unittest.querytests.TailCappedOnly" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.TailCappedOnly";
+ client().insert( ns, BSONObj() );
+ auto_ptr< DBClientCursor > c = client().query( ns, BSONObj(), 0, 0, 0, QueryOption_CursorTailable );
+ ASSERT( c->isDead() );
+ ASSERT( !client().getLastError().empty() );
+ }
+ };
+
+ class TailableQueryOnId : public ClientBase {
+ public:
+ ~TailableQueryOnId() {
+ client().dropCollection( "unittests.querytests.TailableQueryOnId" );
+ }
+
+ void insertA(const char* ns, int a) {
+ BSONObjBuilder b;
+ b.appendOID("_id", 0, true);
+ b.appendOID("value", 0, true);
+ b.append("a", a);
+ insert(ns, b.obj());
+ }
+
+ void run() {
+ const char *ns = "unittests.querytests.TailableQueryOnId";
+ BSONObj info;
+ client().runCommand( "unittests", BSON( "create" << "querytests.TailableQueryOnId" << "capped" << true << "size" << 8192 << "autoIndexId" << true ), info );
+ insertA( ns, 0 );
+ insertA( ns, 1 );
+ auto_ptr< DBClientCursor > c1 = client().query( ns, QUERY( "a" << GT << -1 ), 0, 0, 0, QueryOption_CursorTailable );
+ OID id;
+ id.init("000000000000000000000000");
+ auto_ptr< DBClientCursor > c2 = client().query( ns, QUERY( "value" << GT << id ), 0, 0, 0, QueryOption_CursorTailable );
+ c1->next();
+ c1->next();
+ ASSERT( !c1->more() );
+ c2->next();
+ c2->next();
+ ASSERT( !c2->more() );
+ insertA( ns, 2 );
+ ASSERT( c1->more() );
+ ASSERT_EQUALS( 2, c1->next().getIntField( "a" ) );
+ ASSERT( !c1->more() );
+ ASSERT( c2->more() );
+ ASSERT_EQUALS( 2, c2->next().getIntField( "a" ) ); // SERVER-645
+ ASSERT( !c2->more() );
+ ASSERT( !c2->isDead() );
+ }
+ };
+
+ class OplogReplayMode : public ClientBase {
+ public:
+ ~OplogReplayMode() {
+ client().dropCollection( "unittests.querytests.OplogReplayMode" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.OplogReplayMode";
+ insert( ns, BSON( "ts" << 0 ) );
+ insert( ns, BSON( "ts" << 1 ) );
+ insert( ns, BSON( "ts" << 2 ) );
+ auto_ptr< DBClientCursor > c = client().query( ns, QUERY( "ts" << GT << 1 ).hint( BSON( "$natural" << 1 ) ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 2, c->next().getIntField( "ts" ) );
+ ASSERT( !c->more() );
+
+ insert( ns, BSON( "ts" << 3 ) );
+ c = client().query( ns, QUERY( "ts" << GT << 1 ).hint( BSON( "$natural" << 1 ) ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 2, c->next().getIntField( "ts" ) );
+ ASSERT( c->more() );
+ }
+ };
+
+ class BasicCount : public ClientBase {
+ public:
+ ~BasicCount() {
+ client().dropCollection( "unittests.querytests.BasicCount" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.BasicCount";
+ client().ensureIndex( ns, BSON( "a" << 1 ) );
+ count( 0 );
+ insert( ns, BSON( "a" << 3 ) );
+ count( 0 );
+ insert( ns, BSON( "a" << 4 ) );
+ count( 1 );
+ insert( ns, BSON( "a" << 5 ) );
+ count( 1 );
+ insert( ns, BSON( "a" << 4 ) );
+ count( 2 );
+ }
+ private:
+ void count( unsigned long long c ) const {
+ ASSERT_EQUALS( c, client().count( "unittests.querytests.BasicCount", BSON( "a" << 4 ) ) );
+ }
+ };
+
+ class ArrayId : public ClientBase {
+ public:
+ ~ArrayId() {
+ client().dropCollection( "unittests.querytests.ArrayId" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.ArrayId";
+ client().ensureIndex( ns, BSON( "_id" << 1 ) );
+ ASSERT( !error() );
+ client().insert( ns, fromjson( "{'_id':[1,2]}" ) );
+ ASSERT( error() );
+ }
+ };
+
+ class UnderscoreNs : public ClientBase {
+ public:
+ ~UnderscoreNs() {
+ client().dropCollection( "unittests.querytests._UnderscoreNs" );
+ }
+ void run() {
+ ASSERT( !error() );
+ const char *ns = "unittests.querytests._UnderscoreNs";
+ ASSERT( client().findOne( ns, "{}" ).isEmpty() );
+ client().insert( ns, BSON( "a" << 1 ) );
+ ASSERT_EQUALS( 1, client().findOne( ns, "{}" ).getIntField( "a" ) );
+ ASSERT( !error() );
+ }
+ };
+
+ class EmptyFieldSpec : public ClientBase {
+ public:
+ ~EmptyFieldSpec() {
+ client().dropCollection( "unittests.querytests.EmptyFieldSpec" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.EmptyFieldSpec";
+ client().insert( ns, BSON( "a" << 1 ) );
+ ASSERT( !client().findOne( ns, "" ).isEmpty() );
+ BSONObj empty;
+ ASSERT( !client().findOne( ns, "", &empty ).isEmpty() );
+ }
+ };
+
+ class MultiNe : public ClientBase {
+ public:
+ ~MultiNe() {
+ client().dropCollection( "unittests.querytests.Ne" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.Ne";
+ client().insert( ns, fromjson( "{a:[1,2]}" ) );
+ ASSERT( client().findOne( ns, fromjson( "{a:{$ne:1}}" ) ).isEmpty() );
+ BSONObj spec = fromjson( "{a:{$ne:1,$ne:2}}" );
+ ASSERT( client().findOne( ns, spec ).isEmpty() );
+ }
+ };
+
+ class EmbeddedNe : public ClientBase {
+ public:
+ ~EmbeddedNe() {
+ client().dropCollection( "unittests.querytests.NestedNe" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.NestedNe";
+ client().insert( ns, fromjson( "{a:[{b:1},{b:2}]}" ) );
+ ASSERT( client().findOne( ns, fromjson( "{'a.b':{$ne:1}}" ) ).isEmpty() );
+ }
+ };
+
+ class EmbeddedNumericTypes : public ClientBase {
+ public:
+ ~EmbeddedNumericTypes() {
+ client().dropCollection( "unittests.querytests.NumericEmbedded" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.NumericEmbedded";
+ client().insert( ns, BSON( "a" << BSON ( "b" << 1 ) ) );
+ ASSERT( ! client().findOne( ns, BSON( "a" << BSON ( "b" << 1.0 ) ) ).isEmpty() );
+ client().ensureIndex( ns , BSON( "a" << 1 ) );
+ ASSERT( ! client().findOne( ns, BSON( "a" << BSON ( "b" << 1.0 ) ) ).isEmpty() );
+ }
+ };
+
+ class AutoResetIndexCache : public ClientBase {
+ public:
+ ~AutoResetIndexCache() {
+ client().dropCollection( "unittests.querytests.AutoResetIndexCache" );
+ }
+ static const char *ns() { return "unittests.querytests.AutoResetIndexCache"; }
+ static const char *idxNs() { return "unittests.system.indexes"; }
+ void index() const { ASSERT( !client().findOne( idxNs(), BSON( "name" << NE << "_id_" ) ).isEmpty() ); }
+ void noIndex() const {
+ BSONObj o = client().findOne( idxNs(), BSON( "name" << NE << "_id_" ) );
+ if( !o.isEmpty() ) {
+ cout << o.toString() << endl;
+ ASSERT( false );
+ }
+ }
+ void checkIndex() {
+ client().ensureIndex( ns(), BSON( "a" << 1 ) );
+ index();
+ }
+ void run() {
+ client().dropDatabase( "unittests" );
+ noIndex();
+ checkIndex();
+ client().dropCollection( ns() );
+ noIndex();
+ checkIndex();
+ client().dropDatabase( "unittests" );
+ noIndex();
+ checkIndex();
+ }
+ };
+
+ class UniqueIndex : public ClientBase {
+ public:
+ ~UniqueIndex() {
+ client().dropCollection( "unittests.querytests.UniqueIndex" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.UniqueIndex";
+ client().ensureIndex( ns, BSON( "a" << 1 ), true );
+ client().insert( ns, BSON( "a" << 4 << "b" << 2 ) );
+ client().insert( ns, BSON( "a" << 4 << "b" << 3 ) );
+ ASSERT_EQUALS( 1U, client().count( ns, BSONObj() ) );
+ client().dropCollection( ns );
+ client().ensureIndex( ns, BSON( "b" << 1 ), true );
+ client().insert( ns, BSON( "a" << 4 << "b" << 2 ) );
+ client().insert( ns, BSON( "a" << 4 << "b" << 3 ) );
+ ASSERT_EQUALS( 2U, client().count( ns, BSONObj() ) );
+ }
+ };
+
+ class UniqueIndexPreexistingData : public ClientBase {
+ public:
+ ~UniqueIndexPreexistingData() {
+ client().dropCollection( "unittests.querytests.UniqueIndexPreexistingData" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.UniqueIndexPreexistingData";
+ client().insert( ns, BSON( "a" << 4 << "b" << 2 ) );
+ client().insert( ns, BSON( "a" << 4 << "b" << 3 ) );
+ client().ensureIndex( ns, BSON( "a" << 1 ), true );
+ ASSERT_EQUALS( 0U, client().count( "unittests.system.indexes", BSON( "ns" << ns << "name" << NE << "_id_" ) ) );
+ }
+ };
+
+ class SubobjectInArray : public ClientBase {
+ public:
+ ~SubobjectInArray() {
+ client().dropCollection( "unittests.querytests.SubobjectInArray" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.SubobjectInArray";
+ client().insert( ns, fromjson( "{a:[{b:{c:1}}]}" ) );
+ ASSERT( !client().findOne( ns, BSON( "a.b.c" << 1 ) ).isEmpty() );
+ ASSERT( !client().findOne( ns, fromjson( "{'a.c':null}" ) ).isEmpty() );
+ }
+ };
+
+ class Size : public ClientBase {
+ public:
+ ~Size() {
+ client().dropCollection( "unittests.querytests.Size" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.Size";
+ client().insert( ns, fromjson( "{a:[1,2,3]}" ) );
+ client().ensureIndex( ns, BSON( "a" << 1 ) );
+ ASSERT( client().query( ns, QUERY( "a" << mongo::SIZE << 3 ).hint( BSON( "a" << 1 ) ) )->more() );
+ }
+ };
+
+ class FullArray : public ClientBase {
+ public:
+ ~FullArray() {
+ client().dropCollection( "unittests.querytests.IndexedArray" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.IndexedArray";
+ client().insert( ns, fromjson( "{a:[1,2,3]}" ) );
+ ASSERT( client().query( ns, Query( "{a:[1,2,3]}" ) )->more() );
+ client().ensureIndex( ns, BSON( "a" << 1 ) );
+ ASSERT( client().query( ns, Query( "{a:{$in:[1,[1,2,3]]}}" ).hint( BSON( "a" << 1 ) ) )->more() );
+ ASSERT( client().query( ns, Query( "{a:[1,2,3]}" ).hint( BSON( "a" << 1 ) ) )->more() ); // SERVER-146
+ }
+ };
+
+ class InsideArray : public ClientBase {
+ public:
+ ~InsideArray() {
+ client().dropCollection( "unittests.querytests.InsideArray" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.InsideArray";
+ client().insert( ns, fromjson( "{a:[[1],2]}" ) );
+ check( "$natural" );
+ client().ensureIndex( ns, BSON( "a" << 1 ) );
+ check( "a" ); // SERVER-146
+ }
+ private:
+ void check( const string &hintField ) {
+ const char *ns = "unittests.querytests.InsideArray";
+ ASSERT( client().query( ns, Query( "{a:[[1],2]}" ).hint( BSON( hintField << 1 ) ) )->more() );
+ ASSERT( client().query( ns, Query( "{a:[1]}" ).hint( BSON( hintField << 1 ) ) )->more() );
+ ASSERT( client().query( ns, Query( "{a:2}" ).hint( BSON( hintField << 1 ) ) )->more() );
+ ASSERT( !client().query( ns, Query( "{a:1}" ).hint( BSON( hintField << 1 ) ) )->more() );
+ }
+ };
+
+ class IndexInsideArrayCorrect : public ClientBase {
+ public:
+ ~IndexInsideArrayCorrect() {
+ client().dropCollection( "unittests.querytests.IndexInsideArrayCorrect" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.IndexInsideArrayCorrect";
+ client().insert( ns, fromjson( "{'_id':1,a:[1]}" ) );
+ client().insert( ns, fromjson( "{'_id':2,a:[[1]]}" ) );
+ client().ensureIndex( ns, BSON( "a" << 1 ) );
+ ASSERT_EQUALS( 1, client().query( ns, Query( "{a:[1]}" ).hint( BSON( "a" << 1 ) ) )->next().getIntField( "_id" ) );
+ }
+ };
+
+ class SubobjArr : public ClientBase {
+ public:
+ ~SubobjArr() {
+ client().dropCollection( "unittests.querytests.SubobjArr" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.SubobjArr";
+ client().insert( ns, fromjson( "{a:[{b:[1]}]}" ) );
+ check( "$natural" );
+ client().ensureIndex( ns, BSON( "a" << 1 ) );
+ check( "a" );
+ }
+ private:
+ void check( const string &hintField ) {
+ const char *ns = "unittests.querytests.SubobjArr";
+ ASSERT( client().query( ns, Query( "{'a.b':1}" ).hint( BSON( hintField << 1 ) ) )->more() );
+ ASSERT( client().query( ns, Query( "{'a.b':[1]}" ).hint( BSON( hintField << 1 ) ) )->more() );
+ }
+ };
+
+ class MinMax : public ClientBase {
+ public:
+ MinMax() : ns( "unittests.querytests.MinMax" ) {}
+ ~MinMax() {
+ client().dropCollection( "unittests.querytests.MinMax" );
+ }
+ void run() {
+ client().ensureIndex( ns, BSON( "a" << 1 << "b" << 1 ) );
+ client().insert( ns, BSON( "a" << 1 << "b" << 1 ) );
+ client().insert( ns, BSON( "a" << 1 << "b" << 2 ) );
+ client().insert( ns, BSON( "a" << 2 << "b" << 1 ) );
+ client().insert( ns, BSON( "a" << 2 << "b" << 2 ) );
+
+ ASSERT_EQUALS( 4, count( client().query( ns, BSONObj() ) ) );
+ BSONObj hints[] = { BSONObj(), BSON( "a" << 1 << "b" << 1 ) };
+ for( int i = 0; i < 2; ++i ) {
+ check( 0, 0, 3, 3, 4, hints[ i ] );
+ check( 1, 1, 2, 2, 3, hints[ i ] );
+ check( 1, 2, 2, 2, 2, hints[ i ] );
+ check( 1, 2, 2, 1, 1, hints[ i ] );
+
+ auto_ptr< DBClientCursor > c = query( 1, 2, 2, 2, hints[ i ] );
+ BSONObj obj = c->next();
+ ASSERT_EQUALS( 1, obj.getIntField( "a" ) );
+ ASSERT_EQUALS( 2, obj.getIntField( "b" ) );
+ obj = c->next();
+ ASSERT_EQUALS( 2, obj.getIntField( "a" ) );
+ ASSERT_EQUALS( 1, obj.getIntField( "b" ) );
+ ASSERT( !c->more() );
+ }
+ }
+ private:
+ auto_ptr< DBClientCursor > query( int minA, int minB, int maxA, int maxB, const BSONObj &hint ) {
+ Query q;
+ q = q.minKey( BSON( "a" << minA << "b" << minB ) ).maxKey( BSON( "a" << maxA << "b" << maxB ) );
+ if ( !hint.isEmpty() )
+ q.hint( hint );
+ return client().query( ns, q );
+ }
+ void check( int minA, int minB, int maxA, int maxB, int expectedCount, const BSONObj &hint = empty_ ) {
+ ASSERT_EQUALS( expectedCount, count( query( minA, minB, maxA, maxB, hint ) ) );
+ }
+ int count( auto_ptr< DBClientCursor > c ) {
+ int ret = 0;
+ while( c->more() ) {
+ ++ret;
+ c->next();
+ }
+ return ret;
+ }
+ const char *ns;
+ static BSONObj empty_;
+ };
+ BSONObj MinMax::empty_;
+
+ class MatchCodeCodeWScope : public ClientBase {
+ public:
+ MatchCodeCodeWScope() : _ns( "unittests.querytests.MatchCodeCodeWScope" ) {}
+ ~MatchCodeCodeWScope() {
+ client().dropCollection( "unittests.querytests.MatchCodeCodeWScope" );
+ }
+ void run() {
+ checkMatch();
+ client().ensureIndex( _ns, BSON( "a" << 1 ) );
+ checkMatch();
+ // Use explain queries to check index bounds.
+ {
+ BSONObj explain = client().findOne( _ns, QUERY( "a" << BSON( "$type" << (int)Code ) ).explain() );
+ BSONObjBuilder lower;
+ lower.appendCode( "", "" );
+ BSONObjBuilder upper;
+ upper.appendCodeWScope( "", "", BSONObj() );
+ ASSERT( lower.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 0 ] ) );
+ ASSERT( upper.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 1 ] ) );
+ }
+ {
+ BSONObj explain = client().findOne( _ns, QUERY( "a" << BSON( "$type" << (int)CodeWScope ) ).explain() );
+ BSONObjBuilder lower;
+ lower.appendCodeWScope( "", "", BSONObj() );
+ // This upper bound may change if a new bson type is added.
+ BSONObjBuilder upper;
+ upper << "" << BSON( "$maxElement" << 1 );
+ ASSERT( lower.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 0 ] ) );
+ ASSERT( upper.done().firstElement().valuesEqual( explain[ "indexBounds" ].Obj()[ "a" ].Array()[ 0 ].Array()[ 1 ] ) );
+ }
+ }
+ private:
+ void checkMatch() {
+ client().remove( _ns, BSONObj() );
+
+ client().insert( _ns, code() );
+ client().insert( _ns, codeWScope() );
+
+ ASSERT_EQUALS( 1U, client().count( _ns, code() ) );
+ ASSERT_EQUALS( 1U, client().count( _ns, codeWScope() ) );
+
+ ASSERT_EQUALS( 1U, client().count( _ns, BSON( "a" << BSON( "$type" << (int)Code ) ) ) );
+ ASSERT_EQUALS( 1U, client().count( _ns, BSON( "a" << BSON( "$type" << (int)CodeWScope ) ) ) );
+ }
+ BSONObj code() const {
+ BSONObjBuilder codeBuilder;
+ codeBuilder.appendCode( "a", "return 1;" );
+ return codeBuilder.obj();
+ }
+ BSONObj codeWScope() const {
+ BSONObjBuilder codeWScopeBuilder;
+ codeWScopeBuilder.appendCodeWScope( "a", "return 1;", BSONObj() );
+ return codeWScopeBuilder.obj();
+ }
+ const char *_ns;
+ };
+
+ class MatchDBRefType : public ClientBase {
+ public:
+ MatchDBRefType() : _ns( "unittests.querytests.MatchDBRefType" ) {}
+ ~MatchDBRefType() {
+ client().dropCollection( "unittests.querytests.MatchDBRefType" );
+ }
+ void run() {
+ checkMatch();
+ client().ensureIndex( _ns, BSON( "a" << 1 ) );
+ checkMatch();
+ }
+ private:
+ void checkMatch() {
+ client().remove( _ns, BSONObj() );
+ client().insert( _ns, dbref() );
+ ASSERT_EQUALS( 1U, client().count( _ns, dbref() ) );
+ ASSERT_EQUALS( 1U, client().count( _ns, BSON( "a" << BSON( "$type" << (int)DBRef ) ) ) );
+ }
+ BSONObj dbref() const {
+ BSONObjBuilder b;
+ OID oid;
+ b.appendDBRef( "a", "ns", oid );
+ return b.obj();
+ }
+ const char *_ns;
+ };
+
+ class DirectLocking : public ClientBase {
+ public:
+ void run() {
+ dblock lk;
+ Client::Context ctx( "unittests.DirectLocking" );
+ client().remove( "a.b", BSONObj() );
+ ASSERT_EQUALS( "unittests", cc().database()->name );
+ }
+ const char *ns;
+ };
+
+ class FastCountIn : public ClientBase {
+ public:
+ ~FastCountIn() {
+ client().dropCollection( "unittests.querytests.FastCountIn" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.FastCountIn";
+ client().insert( ns, BSON( "i" << "a" ) );
+ client().ensureIndex( ns, BSON( "i" << 1 ) );
+ ASSERT_EQUALS( 1U, client().count( ns, fromjson( "{i:{$in:['a']}}" ) ) );
+ }
+ };
+
+ class EmbeddedArray : public ClientBase {
+ public:
+ ~EmbeddedArray() {
+ client().dropCollection( "unittests.querytests.EmbeddedArray" );
+ }
+ void run() {
+ const char *ns = "unittests.querytests.EmbeddedArray";
+ client().insert( ns, fromjson( "{foo:{bar:['spam']}}" ) );
+ client().insert( ns, fromjson( "{foo:{bar:['spam','eggs']}}" ) );
+ client().insert( ns, fromjson( "{bar:['spam']}" ) );
+ client().insert( ns, fromjson( "{bar:['spam','eggs']}" ) );
+ ASSERT_EQUALS( 2U, client().count( ns, BSON( "bar" << "spam" ) ) );
+ ASSERT_EQUALS( 2U, client().count( ns, BSON( "foo.bar" << "spam" ) ) );
+ }
+ };
+
+ class DifferentNumbers : public ClientBase {
+ public:
+ ~DifferentNumbers() {
+ client().dropCollection( "unittests.querytests.DifferentNumbers" );
+ }
+ void t( const char * ns ) {
+ auto_ptr< DBClientCursor > cursor = client().query( ns, Query().sort( "7" ) );
+ while ( cursor->more() ) {
+ BSONObj o = cursor->next();
+ assert( o.valid() );
+ //cout << " foo " << o << endl;
+ }
+
+ }
+ void run() {
+ const char *ns = "unittests.querytests.DifferentNumbers";
+ { BSONObjBuilder b; b.append( "7" , (int)4 ); client().insert( ns , b.obj() ); }
+ { BSONObjBuilder b; b.append( "7" , (long long)2 ); client().insert( ns , b.obj() ); }
+ { BSONObjBuilder b; b.appendNull( "7" ); client().insert( ns , b.obj() ); }
+ { BSONObjBuilder b; b.append( "7" , "b" ); client().insert( ns , b.obj() ); }
+ { BSONObjBuilder b; b.appendNull( "8" ); client().insert( ns , b.obj() ); }
+ { BSONObjBuilder b; b.append( "7" , (double)3.7 ); client().insert( ns , b.obj() ); }
+
+ t(ns);
+ client().ensureIndex( ns , BSON( "7" << 1 ) );
+ t(ns);
+ }
+ };
+
+ class CollectionBase : public ClientBase {
+ public:
+
+ CollectionBase( string leaf ) {
+ _ns = "unittests.querytests.";
+ _ns += leaf;
+ client().dropCollection( ns() );
+ }
+
+ virtual ~CollectionBase() {
+ client().dropCollection( ns() );
+ }
+
+ int count() {
+ return (int) client().count( ns() );
+ }
+
+ const char * ns() {
+ return _ns.c_str();
+ }
+
+ private:
+ string _ns;
+ };
+
+ class SymbolStringSame : public CollectionBase {
+ public:
+ SymbolStringSame() : CollectionBase( "symbolstringsame" ) {}
+
+ void run() {
+ { BSONObjBuilder b; b.appendSymbol( "x" , "eliot" ); b.append( "z" , 17 ); client().insert( ns() , b.obj() ); }
+ ASSERT_EQUALS( 17 , client().findOne( ns() , BSONObj() )["z"].number() );
+ {
+ BSONObjBuilder b;
+ b.appendSymbol( "x" , "eliot" );
+ ASSERT_EQUALS( 17 , client().findOne( ns() , b.obj() )["z"].number() );
+ }
+ ASSERT_EQUALS( 17 , client().findOne( ns() , BSON( "x" << "eliot" ) )["z"].number() );
+ client().ensureIndex( ns() , BSON( "x" << 1 ) );
+ ASSERT_EQUALS( 17 , client().findOne( ns() , BSON( "x" << "eliot" ) )["z"].number() );
+ }
+ };
+
+ class TailableCappedRaceCondition : public CollectionBase {
+ public:
+
+ TailableCappedRaceCondition() : CollectionBase( "tailablecappedrace" ) {
+ client().dropCollection( ns() );
+ _n = 0;
+ }
+ void run() {
+ string err;
+
+ writelock lk("");
+ Client::Context ctx( "unittests" );
+
+ // note that extents are always at least 4KB now - so this will get rounded up a bit.
+ ASSERT( userCreateNS( ns() , fromjson( "{ capped : true , size : 2000 }" ) , err , false ) );
+ for ( int i=0; i<200; i++ ) {
+ insertNext();
+// cout << count() << endl;
+ ASSERT( count() < 90 );
+ }
+
+ int a = count();
+
+ auto_ptr< DBClientCursor > c = client().query( ns() , QUERY( "i" << GT << 0 ).hint( BSON( "$natural" << 1 ) ), 0, 0, 0, QueryOption_CursorTailable );
+ int n=0;
+ while ( c->more() ) {
+ BSONObj z = c->next();
+ n++;
+ }
+
+ ASSERT_EQUALS( a , n );
+
+ insertNext();
+ ASSERT( c->more() );
+
+ for ( int i=0; i<90; i++ ) {
+ insertNext();
+ }
+
+ while ( c->more() ) { c->next(); }
+ ASSERT( c->isDead() );
+ }
+
+ void insertNext() {
+ BSONObjBuilder b;
+ b.appendOID("_id", 0, true);
+ b.append("i", _n++);
+ insert( ns() , b.obj() );
+ }
+
+ int _n;
+ };
+
+ class HelperTest : public CollectionBase {
+ public:
+
+ HelperTest() : CollectionBase( "helpertest" ) {
+ }
+
+ void run() {
+ writelock lk("");
+ Client::Context ctx( "unittests" );
+
+ for ( int i=0; i<50; i++ ) {
+ insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
+ }
+
+ ASSERT_EQUALS( 50 , count() );
+
+ BSONObj res;
+ ASSERT( Helpers::findOne( ns() , BSON( "_id" << 20 ) , res , true ) );
+ ASSERT_EQUALS( 40 , res["x"].numberInt() );
+
+ ASSERT( Helpers::findById( cc(), ns() , BSON( "_id" << 20 ) , res ) );
+ ASSERT_EQUALS( 40 , res["x"].numberInt() );
+
+ ASSERT( ! Helpers::findById( cc(), ns() , BSON( "_id" << 200 ) , res ) );
+
+ unsigned long long slow , fast;
+
+ int n = 10000;
+ DEV n = 1000;
+ {
+ Timer t;
+ for ( int i=0; i<n; i++ ) {
+ ASSERT( Helpers::findOne( ns() , BSON( "_id" << 20 ) , res , true ) );
+ }
+ slow = t.micros();
+ }
+ {
+ Timer t;
+ for ( int i=0; i<n; i++ ) {
+ ASSERT( Helpers::findById( cc(), ns() , BSON( "_id" << 20 ) , res ) );
+ }
+ fast = t.micros();
+ }
+
+ cout << "HelperTest slow:" << slow << " fast:" << fast << endl;
+
+ }
+ };
+
+ class HelperByIdTest : public CollectionBase {
+ public:
+
+ HelperByIdTest() : CollectionBase( "helpertestbyid" ) {
+ }
+
+ void run() {
+ writelock lk("");
+ Client::Context ctx( "unittests" );
+
+ for ( int i=0; i<1000; i++ ) {
+ insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
+ }
+ for ( int i=0; i<1000; i+=2 ) {
+ client_.remove( ns() , BSON( "_id" << i ) );
+ }
+
+ BSONObj res;
+ for ( int i=0; i<1000; i++ ) {
+ bool found = Helpers::findById( cc(), ns() , BSON( "_id" << i ) , res );
+ ASSERT_EQUALS( i % 2 , int(found) );
+ }
+
+ }
+ };
+
+ class ClientCursorTest : public CollectionBase {
+ ClientCursorTest() : CollectionBase( "clientcursortest" ) {
+ }
+
+ void run() {
+ writelock lk("");
+ Client::Context ctx( "unittests" );
+
+ for ( int i=0; i<1000; i++ ) {
+ insert( ns() , BSON( "_id" << i << "x" << i * 2 ) );
+ }
+
+
+ }
+ };
+
+ class FindingStart : public CollectionBase {
+ public:
+ FindingStart() : CollectionBase( "findingstart" ), _old( __findingStartInitialTimeout ) {
+ __findingStartInitialTimeout = 0;
+ }
+ ~FindingStart() {
+ __findingStartInitialTimeout = _old;
+ }
+
+ void run() {
+ BSONObj info;
+ ASSERT( client().runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
+
+ int i = 0;
+ for( int oldCount = -1;
+ count() != oldCount;
+ oldCount = count(), client().insert( ns(), BSON( "ts" << i++ ) ) );
+
+ for( int k = 0; k < 5; ++k ) {
+ client().insert( ns(), BSON( "ts" << i++ ) );
+ int min = client().query( ns(), Query().sort( BSON( "$natural" << 1 ) ) )->next()[ "ts" ].numberInt();
+ for( int j = -1; j < i; ++j ) {
+ auto_ptr< DBClientCursor > c = client().query( ns(), QUERY( "ts" << GTE << j ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( c->more() );
+ BSONObj next = c->next();
+ ASSERT( !next[ "ts" ].eoo() );
+ ASSERT_EQUALS( ( j > min ? j : min ), next[ "ts" ].numberInt() );
+ }
+ //cout << k << endl;
+ }
+ }
+
+ private:
+ int _old;
+ };
+
+ class FindingStartPartiallyFull : public CollectionBase {
+ public:
+ FindingStartPartiallyFull() : CollectionBase( "findingstart" ), _old( __findingStartInitialTimeout ) {
+ __findingStartInitialTimeout = 0;
+ }
+ ~FindingStartPartiallyFull() {
+ __findingStartInitialTimeout = _old;
+ }
+
+ void run() {
+ unsigned startNumCursors = ClientCursor::numCursors();
+
+ BSONObj info;
+ ASSERT( client().runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
+
+ int i = 0;
+ for( ; i < 150; client().insert( ns(), BSON( "ts" << i++ ) ) );
+
+ for( int k = 0; k < 5; ++k ) {
+ client().insert( ns(), BSON( "ts" << i++ ) );
+ int min = client().query( ns(), Query().sort( BSON( "$natural" << 1 ) ) )->next()[ "ts" ].numberInt();
+ for( int j = -1; j < i; ++j ) {
+ auto_ptr< DBClientCursor > c = client().query( ns(), QUERY( "ts" << GTE << j ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( c->more() );
+ BSONObj next = c->next();
+ ASSERT( !next[ "ts" ].eoo() );
+ ASSERT_EQUALS( ( j > min ? j : min ), next[ "ts" ].numberInt() );
+ }
+ }
+
+ ASSERT_EQUALS( startNumCursors, ClientCursor::numCursors() );
+ }
+
+ private:
+ int _old;
+ };
+
+ /**
+ * Check OplogReplay mode where query timestamp is earlier than the earliest
+ * entry in the collection.
+ */
+ class FindingStartStale : public CollectionBase {
+ public:
+ FindingStartStale() : CollectionBase( "findingstart" ) {}
+
+ void run() {
+ unsigned startNumCursors = ClientCursor::numCursors();
+
+ BSONObj info;
+ ASSERT( client().runCommand( "unittests", BSON( "create" << "querytests.findingstart" << "capped" << true << "$nExtents" << 5 << "autoIndexId" << false ), info ) );
+
+ // Check OplogReplay mode with empty collection.
+ auto_ptr< DBClientCursor > c = client().query( ns(), QUERY( "ts" << GTE << 50 ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( !c->more() );
+
+ // Check with some docs in the collection.
+ for( int i = 100; i < 150; client().insert( ns(), BSON( "ts" << i++ ) ) );
+ c = client().query( ns(), QUERY( "ts" << GTE << 50 ), 0, 0, 0, QueryOption_OplogReplay );
+ ASSERT( c->more() );
+ ASSERT_EQUALS( 100, c->next()[ "ts" ].numberInt() );
+
+ // Check that no persistent cursors outlast our queries above.
+ ASSERT_EQUALS( startNumCursors, ClientCursor::numCursors() );
+ }
+ };
+
+ class WhatsMyUri : public CollectionBase {
+ public:
+ WhatsMyUri() : CollectionBase( "whatsmyuri" ) {}
+ void run() {
+ BSONObj result;
+ client().runCommand( "admin", BSON( "whatsmyuri" << 1 ), result );
+ ASSERT_EQUALS( unknownAddress.toString(), result[ "you" ].str() );
+ }
+ };
+
+ namespace parsedtests {
+ class basic1 {
+ public:
+ void _test( const BSONObj& in ) {
+ ParsedQuery q( "a.b" , 5 , 6 , 9 , in , BSONObj() );
+ ASSERT_EQUALS( BSON( "x" << 5 ) , q.getFilter() );
+ }
+ void run() {
+ _test( BSON( "x" << 5 ) );
+ _test( BSON( "query" << BSON( "x" << 5 ) ) );
+ _test( BSON( "$query" << BSON( "x" << 5 ) ) );
+
+ {
+ ParsedQuery q( "a.b" , 5 , 6 , 9 , BSON( "x" << 5 ) , BSONObj() );
+ ASSERT_EQUALS( 6 , q.getNumToReturn() );
+ ASSERT( q.wantMore() );
+ }
+ {
+ ParsedQuery q( "a.b" , 5 , -6 , 9 , BSON( "x" << 5 ) , BSONObj() );
+ ASSERT_EQUALS( 6 , q.getNumToReturn() );
+ ASSERT( ! q.wantMore() );
+ }
+ }
+ };
+ };
+
+ namespace queryobjecttests {
+ class names1 {
+ public:
+ void run() {
+ ASSERT_EQUALS( BSON( "x" << 1 ) , QUERY( "query" << BSON( "x" << 1 ) ).getFilter() );
+ ASSERT_EQUALS( BSON( "x" << 1 ) , QUERY( "$query" << BSON( "x" << 1 ) ).getFilter() );
+ }
+
+ };
+ }
+
+ class OrderingTest {
+ public:
+ void run() {
+ {
+ Ordering o = Ordering::make( BSON( "a" << 1 << "b" << -1 << "c" << 1 ) );
+ ASSERT_EQUALS( 1 , o.get(0) );
+ ASSERT_EQUALS( -1 , o.get(1) );
+ ASSERT_EQUALS( 1 , o.get(2) );
+
+ ASSERT( ! o.descending( 1 ) );
+ ASSERT( o.descending( 1 << 1 ) );
+ ASSERT( ! o.descending( 1 << 2 ) );
+ }
+
+ {
+ Ordering o = Ordering::make( BSON( "a.d" << 1 << "a" << 1 << "e" << -1 ) );
+ ASSERT_EQUALS( 1 , o.get(0) );
+ ASSERT_EQUALS( 1 , o.get(1) );
+ ASSERT_EQUALS( -1 , o.get(2) );
+
+ ASSERT( ! o.descending( 1 ) );
+ ASSERT( ! o.descending( 1 << 1 ) );
+ ASSERT( o.descending( 1 << 2 ) );
+ }
+
+ }
+ };
+
+ namespace proj { // Projection tests
+
+ class T1 {
+ public:
+ void run() {
+
+ Projection m;
+ m.init( BSON( "a" << 1 ) );
+ ASSERT_EQUALS( BSON( "a" << 5 ) , m.transform( BSON( "x" << 1 << "a" << 5 ) ) );
+ }
+ };
+
+ class K1 {
+ public:
+ void run() {
+
+ Projection m;
+ m.init( BSON( "a" << 1 ) );
+
+ scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 ) ) );
+ ASSERT( ! x );
+
+ x.reset( m.checkKey( BSON( "a" << 1 << "_id" << 1 ) ) );
+ ASSERT( x );
+
+ ASSERT_EQUALS( BSON( "a" << 5 << "_id" << 17 ) ,
+ x->hydrate( BSON( "" << 5 << "" << 17 ) ) );
+
+ x.reset( m.checkKey( BSON( "a" << 1 << "x" << 1 << "_id" << 1 ) ) );
+ ASSERT( x );
+
+ ASSERT_EQUALS( BSON( "a" << 5 << "_id" << 17 ) ,
+ x->hydrate( BSON( "" << 5 << "" << 123 << "" << 17 ) ) );
+
+ }
+ };
+
+ class K2 {
+ public:
+ void run() {
+
+ Projection m;
+ m.init( BSON( "a" << 1 << "_id" << 0 ) );
+
+ scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 ) ) );
+ ASSERT( x );
+
+ ASSERT_EQUALS( BSON( "a" << 17 ) ,
+ x->hydrate( BSON( "" << 17 ) ) );
+
+ x.reset( m.checkKey( BSON( "x" << 1 << "a" << 1 << "_id" << 1 ) ) );
+ ASSERT( x );
+
+ ASSERT_EQUALS( BSON( "a" << 123 ) ,
+ x->hydrate( BSON( "" << 5 << "" << 123 << "" << 17 ) ) );
+
+ }
+ };
+
+
+ class K3 {
+ public:
+ void run() {
+
+ {
+ Projection m;
+ m.init( BSON( "a" << 1 << "_id" << 0 ) );
+
+ scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 << "x.a" << 1 ) ) );
+ ASSERT( x );
+ }
+
+
+ {
+ // TODO: this is temporary SERVER-2104
+ Projection m;
+ m.init( BSON( "x.a" << 1 << "_id" << 0 ) );
+
+ scoped_ptr<Projection::KeyOnly> x( m.checkKey( BSON( "a" << 1 << "x.a" << 1 ) ) );
+ ASSERT( ! x );
+ }
+
+ }
+ };
+
+
+ }
+
+ class All : public Suite {
+ public:
+ All() : Suite( "query" ) {
+ }
+
+ void setupTests() {
+ add< FindingStart >();
+ add< FindOne >();
+ add< FindOneRequireIndex >();
+ add< FindOneEmptyObj >();
+ add< BoundedKey >();
+ add< GetMore >();
+ add< PositiveLimit >();
+ add< ReturnOneOfManyAndTail >();
+ add< TailNotAtEnd >();
+ add< EmptyTail >();
+ add< TailableDelete >();
+ add< TailableInsertDelete >();
+ add< TailCappedOnly >();
+ add< TailableQueryOnId >();
+ add< OplogReplayMode >();
+ add< ArrayId >();
+ add< UnderscoreNs >();
+ add< EmptyFieldSpec >();
+ add< MultiNe >();
+ add< EmbeddedNe >();
+ add< EmbeddedNumericTypes >();
+ add< AutoResetIndexCache >();
+ add< UniqueIndex >();
+ add< UniqueIndexPreexistingData >();
+ add< SubobjectInArray >();
+ add< Size >();
+ add< FullArray >();
+ add< InsideArray >();
+ add< IndexInsideArrayCorrect >();
+ add< SubobjArr >();
+ add< MinMax >();
+ add< MatchCodeCodeWScope >();
+ add< MatchDBRefType >();
+ add< DirectLocking >();
+ add< FastCountIn >();
+ add< EmbeddedArray >();
+ add< DifferentNumbers >();
+ add< SymbolStringSame >();
+ add< TailableCappedRaceCondition >();
+ add< HelperTest >();
+ add< HelperByIdTest >();
+ add< FindingStartPartiallyFull >();
+ add< FindingStartStale >();
+ add< WhatsMyUri >();
+
+ add< parsedtests::basic1 >();
+
+ add< queryobjecttests::names1 >();
+
+ add< OrderingTest >();
+
+ add< proj::T1 >();
+ add< proj::K1 >();
+ add< proj::K2 >();
+ add< proj::K3 >();
+ }
+ } myall;
+
+} // namespace QueryTests
+
diff --git a/src/mongo/dbtests/queryutiltests.cpp b/src/mongo/dbtests/queryutiltests.cpp
new file mode 100644
index 00000000000..e825b4f8a9b
--- /dev/null
+++ b/src/mongo/dbtests/queryutiltests.cpp
@@ -0,0 +1,989 @@
+// queryutiltests.cpp : query utility unit tests
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/queryutil.h"
+#include "../db/querypattern.h"
+#include "../db/instance.h"
+#include "../db/pdfile.h"
+#include "dbtests.h"
+
+namespace QueryUtilTests {
+
+ namespace FieldRangeTests {
+ class Base {
+ public:
+ virtual ~Base() {}
+ void run() {
+ const FieldRangeSet s( "ns", query(), true );
+ checkElt( lower(), s.range( "a" ).min() );
+ checkElt( upper(), s.range( "a" ).max() );
+ ASSERT_EQUALS( lowerInclusive(), s.range( "a" ).minInclusive() );
+ ASSERT_EQUALS( upperInclusive(), s.range( "a" ).maxInclusive() );
+ }
+ protected:
+ virtual BSONObj query() = 0;
+ virtual BSONElement lower() { return minKey.firstElement(); }
+ virtual bool lowerInclusive() { return true; }
+ virtual BSONElement upper() { return maxKey.firstElement(); }
+ virtual bool upperInclusive() { return true; }
+ static void checkElt( BSONElement expected, BSONElement actual ) {
+ if ( expected.woCompare( actual, false ) ) {
+ log() << "expected: " << expected << ", got: " << actual;
+ ASSERT( false );
+ }
+ }
+ };
+
+
+ class NumericBase : public Base {
+ public:
+ NumericBase() {
+ o = BSON( "min" << -numeric_limits<double>::max() << "max" << numeric_limits<double>::max() );
+ }
+
+ virtual BSONElement lower() { return o["min"]; }
+ virtual BSONElement upper() { return o["max"]; }
+ private:
+ BSONObj o;
+ };
+
+ class Empty : public Base {
+ virtual BSONObj query() { return BSONObj(); }
+ };
+
+ class Eq : public Base {
+ public:
+ Eq() : o_( BSON( "a" << 1 ) ) {}
+ virtual BSONObj query() { return o_; }
+ virtual BSONElement lower() { return o_.firstElement(); }
+ virtual BSONElement upper() { return o_.firstElement(); }
+ BSONObj o_;
+ };
+
+ class DupEq : public Eq {
+ public:
+ virtual BSONObj query() { return BSON( "a" << 1 << "b" << 2 << "a" << 1 ); }
+ };
+
+ class Lt : public NumericBase {
+ public:
+ Lt() : o_( BSON( "-" << 1 ) ) {}
+ virtual BSONObj query() { return BSON( "a" << LT << 1 ); }
+ virtual BSONElement upper() { return o_.firstElement(); }
+ virtual bool upperInclusive() { return false; }
+ BSONObj o_;
+ };
+
+ class Lte : public Lt {
+ virtual BSONObj query() { return BSON( "a" << LTE << 1 ); }
+ virtual bool upperInclusive() { return true; }
+ };
+
+ class Gt : public NumericBase {
+ public:
+ Gt() : o_( BSON( "-" << 1 ) ) {}
+ virtual BSONObj query() { return BSON( "a" << GT << 1 ); }
+ virtual BSONElement lower() { return o_.firstElement(); }
+ virtual bool lowerInclusive() { return false; }
+ BSONObj o_;
+ };
+
+ class Gte : public Gt {
+ virtual BSONObj query() { return BSON( "a" << GTE << 1 ); }
+ virtual bool lowerInclusive() { return true; }
+ };
+
+ class TwoLt : public Lt {
+ virtual BSONObj query() { return BSON( "a" << LT << 1 << LT << 5 ); }
+ };
+
+ class TwoGt : public Gt {
+ virtual BSONObj query() { return BSON( "a" << GT << 0 << GT << 1 ); }
+ };
+
+ class EqGte : public Eq {
+ virtual BSONObj query() { return BSON( "a" << 1 << "a" << GTE << 1 ); }
+ };
+
+ class EqGteInvalid {
+ public:
+ void run() {
+ FieldRangeSet frs( "ns", BSON( "a" << 1 << "a" << GTE << 2 ), true );
+ ASSERT( !frs.matchPossible() );
+ }
+ };
+
+ struct RegexBase : Base {
+ void run() { //need to only look at first interval
+ FieldRangeSet s( "ns", query(), true );
+ checkElt( lower(), s.range( "a" ).intervals()[0]._lower._bound );
+ checkElt( upper(), s.range( "a" ).intervals()[0]._upper._bound );
+ ASSERT_EQUALS( lowerInclusive(), s.range( "a" ).intervals()[0]._lower._inclusive );
+ ASSERT_EQUALS( upperInclusive(), s.range( "a" ).intervals()[0]._upper._inclusive );
+ }
+ };
+
+ class Regex : public RegexBase {
+ public:
+ Regex() : o1_( BSON( "" << "abc" ) ), o2_( BSON( "" << "abd" ) ) {}
+ virtual BSONObj query() {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "^abc" );
+ return b.obj();
+ }
+ virtual BSONElement lower() { return o1_.firstElement(); }
+ virtual BSONElement upper() { return o2_.firstElement(); }
+ virtual bool upperInclusive() { return false; }
+ BSONObj o1_, o2_;
+ };
+
+ class RegexObj : public RegexBase {
+ public:
+ RegexObj() : o1_( BSON( "" << "abc" ) ), o2_( BSON( "" << "abd" ) ) {}
+ virtual BSONObj query() { return BSON("a" << BSON("$regex" << "^abc")); }
+ virtual BSONElement lower() { return o1_.firstElement(); }
+ virtual BSONElement upper() { return o2_.firstElement(); }
+ virtual bool upperInclusive() { return false; }
+ BSONObj o1_, o2_;
+ };
+
+ class UnhelpfulRegex : public RegexBase {
+ public:
+ UnhelpfulRegex() {
+ BSONObjBuilder b;
+ b.appendMinForType("lower", String);
+ b.appendMaxForType("upper", String);
+ limits = b.obj();
+ }
+
+ virtual BSONObj query() {
+ BSONObjBuilder b;
+ b.appendRegex( "a", "abc" );
+ return b.obj();
+ }
+ virtual BSONElement lower() { return limits["lower"]; }
+ virtual BSONElement upper() { return limits["upper"]; }
+ virtual bool upperInclusive() { return false; }
+ BSONObj limits;
+ };
+
+ class In : public Base {
+ public:
+ In() : o1_( BSON( "-" << -3 ) ), o2_( BSON( "-" << 44 ) ) {}
+ virtual BSONObj query() {
+ vector< int > vals;
+ vals.push_back( 4 );
+ vals.push_back( 8 );
+ vals.push_back( 44 );
+ vals.push_back( -1 );
+ vals.push_back( -3 );
+ vals.push_back( 0 );
+ BSONObjBuilder bb;
+ bb.append( "$in", vals );
+ BSONObjBuilder b;
+ b.append( "a", bb.done() );
+ return b.obj();
+ }
+ virtual BSONElement lower() { return o1_.firstElement(); }
+ virtual BSONElement upper() { return o2_.firstElement(); }
+ BSONObj o1_, o2_;
+ };
+
+ class Equality {
+ public:
+ void run() {
+ FieldRangeSet s( "ns", BSON( "a" << 1 ), true );
+ ASSERT( s.range( "a" ).equality() );
+ FieldRangeSet s2( "ns", BSON( "a" << GTE << 1 << LTE << 1 ), true );
+ ASSERT( s2.range( "a" ).equality() );
+ FieldRangeSet s3( "ns", BSON( "a" << GT << 1 << LTE << 1 ), true );
+ ASSERT( !s3.range( "a" ).equality() );
+ FieldRangeSet s4( "ns", BSON( "a" << GTE << 1 << LT << 1 ), true );
+ ASSERT( !s4.range( "a" ).equality() );
+ FieldRangeSet s5( "ns", BSON( "a" << GTE << 1 << LTE << 1 << GT << 1 ), true );
+ ASSERT( !s5.range( "a" ).equality() );
+ FieldRangeSet s6( "ns", BSON( "a" << GTE << 1 << LTE << 1 << LT << 1 ), true );
+ ASSERT( !s6.range( "a" ).equality() );
+ }
+ };
+
+ class SimplifiedQuery {
+ public:
+ void run() {
+ FieldRangeSet frs( "ns", BSON( "a" << GT << 1 << GT << 5 << LT << 10 << "b" << 4 << "c" << LT << 4 << LT << 6 << "d" << GTE << 0 << GT << 0 << "e" << GTE << 0 << LTE << 10 ), true );
+ BSONObj simple = frs.simplifiedQuery();
+ cout << "simple: " << simple << endl;
+ ASSERT( !simple.getObjectField( "a" ).woCompare( fromjson( "{$gt:5,$lt:10}" ) ) );
+ ASSERT_EQUALS( 4, simple.getIntField( "b" ) );
+ ASSERT( !simple.getObjectField( "c" ).woCompare( BSON("$gte" << -numeric_limits<double>::max() << "$lt" << 4 ) ) );
+ ASSERT( !simple.getObjectField( "d" ).woCompare( BSON("$gt" << 0 << "$lte" << numeric_limits<double>::max() ) ) );
+ ASSERT( !simple.getObjectField( "e" ).woCompare( fromjson( "{$gte:0,$lte:10}" ) ) );
+ }
+ };
+
+ class QueryPatternTest {
+ public:
+ void run() {
+ ASSERT( p( BSON( "a" << 1 ) ) == p( BSON( "a" << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ) ) == p( BSON( "a" << 5 ) ) );
+ ASSERT( p( BSON( "a" << 1 ) ) != p( BSON( "b" << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ) ) != p( BSON( "a" << LTE << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ) ) != p( BSON( "a" << 1 << "b" << 2 ) ) );
+ ASSERT( p( BSON( "a" << 1 << "b" << 3 ) ) != p( BSON( "a" << 1 ) ) );
+ ASSERT( p( BSON( "a" << LT << 1 ) ) == p( BSON( "a" << LTE << 5 ) ) );
+ ASSERT( p( BSON( "a" << LT << 1 << GTE << 0 ) ) == p( BSON( "a" << LTE << 5 << GTE << 0 ) ) );
+ ASSERT( p( BSON( "a" << 1 ) ) < p( BSON( "a" << 1 << "b" << 1 ) ) );
+ ASSERT( !( p( BSON( "a" << 1 << "b" << 1 ) ) < p( BSON( "a" << 1 ) ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) == p( BSON( "a" << 4 ), BSON( "b" << "a" ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) == p( BSON( "a" << 4 ), BSON( "b" << -1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) != p( BSON( "a" << 4 ), BSON( "c" << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 << "c" << -1 ) ) == p( BSON( "a" << 4 ), BSON( "b" << -1 << "c" << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 << "c" << 1 ) ) != p( BSON( "a" << 4 ), BSON( "b" << 1 ) ) );
+ ASSERT( p( BSON( "a" << 1 ), BSON( "b" << 1 ) ) != p( BSON( "a" << 4 ), BSON( "b" << 1 << "c" << 1 ) ) );
+ }
+ private:
+ static QueryPattern p( const BSONObj &query, const BSONObj &sort = BSONObj() ) {
+ return FieldRangeSet( "", query, true ).pattern( sort );
+ }
+ };
+
+ class NoWhere {
+ public:
+ void run() {
+ ASSERT_EQUALS( 0, FieldRangeSet( "ns", BSON( "$where" << 1 ), true ).nNontrivialRanges() );
+ }
+ };
+
+ class Numeric {
+ public:
+ void run() {
+ FieldRangeSet f( "", BSON( "a" << 1 ), true );
+ ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 2.0 ).firstElement() ) < 0 );
+ ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 0.0 ).firstElement() ) > 0 );
+ }
+ };
+
+ class InLowerBound {
+ public:
+ void run() {
+ FieldRangeSet f( "", fromjson( "{a:{$gt:4,$in:[1,2,3,4,5,6]}}" ), true );
+ ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 5.0 ).firstElement(), false ) == 0 );
+ ASSERT( f.range( "a" ).max().woCompare( BSON( "a" << 6.0 ).firstElement(), false ) == 0 );
+ }
+ };
+
+ class InUpperBound {
+ public:
+ void run() {
+ FieldRangeSet f( "", fromjson( "{a:{$lt:4,$in:[1,2,3,4,5,6]}}" ), true );
+ ASSERT( f.range( "a" ).min().woCompare( BSON( "a" << 1.0 ).firstElement(), false ) == 0 );
+ ASSERT( f.range( "a" ).max().woCompare( BSON( "a" << 3.0 ).firstElement(), false ) == 0 );
+ }
+ };
+
+ class UnionBound {
+ public:
+ void run() {
+ FieldRangeSet frs( "", fromjson( "{a:{$gt:1,$lt:9},b:{$gt:9,$lt:12}}" ), true );
+ FieldRange ret = frs.range( "a" );
+ ret |= frs.range( "b" );
+ ASSERT_EQUALS( 2U, ret.intervals().size() );
+ }
+ };
+
+ class MultiBound {
+ public:
+ void run() {
+ FieldRangeSet frs1( "", fromjson( "{a:{$in:[1,3,5,7,9]}}" ), true );
+ FieldRangeSet frs2( "", fromjson( "{a:{$in:[2,3,5,8,9]}}" ), true );
+ FieldRange fr1 = frs1.range( "a" );
+ FieldRange fr2 = frs2.range( "a" );
+ fr1 &= fr2;
+ ASSERT( fr1.min().woCompare( BSON( "a" << 3.0 ).firstElement(), false ) == 0 );
+ ASSERT( fr1.max().woCompare( BSON( "a" << 9.0 ).firstElement(), false ) == 0 );
+ vector< FieldInterval > intervals = fr1.intervals();
+ vector< FieldInterval >::const_iterator j = intervals.begin();
+ double expected[] = { 3, 5, 9 };
+ for( int i = 0; i < 3; ++i, ++j ) {
+ ASSERT_EQUALS( expected[ i ], j->_lower._bound.number() );
+ ASSERT( j->_lower._inclusive );
+ ASSERT( j->_lower == j->_upper );
+ }
+ ASSERT( j == intervals.end() );
+ }
+ };
+
+ class DiffBase {
+ public:
+ virtual ~DiffBase() {}
+ void run() {
+ FieldRangeSet frs( "", fromjson( obj().toString() ), true );
+ FieldRange ret = frs.range( "a" );
+ ret -= frs.range( "b" );
+ check( ret );
+ }
+ protected:
+ void check( const FieldRange &fr ) {
+ vector< FieldInterval > fi = fr.intervals();
+ ASSERT_EQUALS( len(), fi.size() );
+ int i = 0;
+ for( vector< FieldInterval >::const_iterator j = fi.begin(); j != fi.end(); ++j ) {
+ ASSERT_EQUALS( nums()[ i ], j->_lower._bound.numberInt() );
+ ASSERT_EQUALS( incs()[ i ], j->_lower._inclusive );
+ ++i;
+ ASSERT_EQUALS( nums()[ i ], j->_upper._bound.numberInt() );
+ ASSERT_EQUALS( incs()[ i ], j->_upper._inclusive );
+ ++i;
+ }
+ }
+ virtual unsigned len() const = 0;
+ virtual const int *nums() const = 0;
+ virtual const bool *incs() const = 0;
+ virtual BSONObj obj() const = 0;
+ };
+
+ class TwoRangeBase : public DiffBase {
+ public:
+ TwoRangeBase( string obj, int low, int high, bool lowI, bool highI )
+ : _obj( obj ) {
+ _n[ 0 ] = low;
+ _n[ 1 ] = high;
+ _b[ 0 ] = lowI;
+ _b[ 1 ] = highI;
+ }
+ private:
+ virtual unsigned len() const { return 1; }
+ virtual const int *nums() const { return _n; }
+ virtual const bool *incs() const { return _b; }
+ virtual BSONObj obj() const { return fromjson( _obj ); }
+ string _obj;
+ int _n[ 2 ];
+ bool _b[ 2 ];
+ };
+
+ struct Diff1 : public TwoRangeBase {
+ Diff1() : TwoRangeBase( "{a:{$gt:1,$lt:2},b:{$gt:3,$lt:4}}", 1, 2, false, false ) {}
+ };
+
+ struct Diff2 : public TwoRangeBase {
+ Diff2() : TwoRangeBase( "{a:{$gt:1,$lt:2},b:{$gt:2,$lt:4}}", 1, 2, false, false ) {}
+ };
+
+ struct Diff3 : public TwoRangeBase {
+ Diff3() : TwoRangeBase( "{a:{$gt:1,$lte:2},b:{$gt:2,$lt:4}}", 1, 2, false, true ) {}
+ };
+
+ struct Diff4 : public TwoRangeBase {
+ Diff4() : TwoRangeBase( "{a:{$gt:1,$lt:2},b:{$gte:2,$lt:4}}", 1, 2, false, false) {}
+ };
+
+ struct Diff5 : public TwoRangeBase {
+ Diff5() : TwoRangeBase( "{a:{$gt:1,$lte:2},b:{$gte:2,$lt:4}}", 1, 2, false, false) {}
+ };
+
+ struct Diff6 : public TwoRangeBase {
+ Diff6() : TwoRangeBase( "{a:{$gt:1,$lte:3},b:{$gte:2,$lt:4}}", 1, 2, false, false) {}
+ };
+
+ struct Diff7 : public TwoRangeBase {
+ Diff7() : TwoRangeBase( "{a:{$gt:1,$lte:3},b:{$gt:2,$lt:4}}", 1, 2, false, true) {}
+ };
+
+ struct Diff8 : public TwoRangeBase {
+ Diff8() : TwoRangeBase( "{a:{$gt:1,$lt:4},b:{$gt:2,$lt:4}}", 1, 2, false, true) {}
+ };
+
+ struct Diff9 : public TwoRangeBase {
+ Diff9() : TwoRangeBase( "{a:{$gt:1,$lt:4},b:{$gt:2,$lte:4}}", 1, 2, false, true) {}
+ };
+
+ struct Diff10 : public TwoRangeBase {
+ Diff10() : TwoRangeBase( "{a:{$gt:1,$lte:4},b:{$gt:2,$lte:4}}", 1, 2, false, true) {}
+ };
+
+ class SplitRangeBase : public DiffBase {
+ public:
+ SplitRangeBase( string obj, int low1, bool low1I, int high1, bool high1I, int low2, bool low2I, int high2, bool high2I )
+ : _obj( obj ) {
+ _n[ 0 ] = low1;
+ _n[ 1 ] = high1;
+ _n[ 2 ] = low2;
+ _n[ 3 ] = high2;
+ _b[ 0 ] = low1I;
+ _b[ 1 ] = high1I;
+ _b[ 2 ] = low2I;
+ _b[ 3 ] = high2I;
+ }
+ private:
+ virtual unsigned len() const { return 2; }
+ virtual const int *nums() const { return _n; }
+ virtual const bool *incs() const { return _b; }
+ virtual BSONObj obj() const { return fromjson( _obj ); }
+ string _obj;
+ int _n[ 4 ];
+ bool _b[ 4 ];
+ };
+
+ struct Diff11 : public SplitRangeBase {
+ Diff11() : SplitRangeBase( "{a:{$gt:1,$lte:4},b:{$gt:2,$lt:4}}", 1, false, 2, true, 4, true, 4, true) {}
+ };
+
+ struct Diff12 : public SplitRangeBase {
+ Diff12() : SplitRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:2,$lt:4}}", 1, false, 2, true, 4, true, 5, false) {}
+ };
+
+ struct Diff13 : public TwoRangeBase {
+ Diff13() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:1,$lt:4}}", 4, 5, true, false) {}
+ };
+
+ struct Diff14 : public SplitRangeBase {
+ Diff14() : SplitRangeBase( "{a:{$gte:1,$lt:5},b:{$gt:1,$lt:4}}", 1, true, 1, true, 4, true, 5, false) {}
+ };
+
+ struct Diff15 : public TwoRangeBase {
+ Diff15() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gte:1,$lt:4}}", 4, 5, true, false) {}
+ };
+
+ struct Diff16 : public TwoRangeBase {
+ Diff16() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:{$gte:1,$lt:4}}", 4, 5, true, false) {}
+ };
+
+ struct Diff17 : public TwoRangeBase {
+ Diff17() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lt:4}}", 4, 5, true, false) {}
+ };
+
+ struct Diff18 : public TwoRangeBase {
+ Diff18() : TwoRangeBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lte:4}}", 4, 5, false, false) {}
+ };
+
+ struct Diff19 : public TwoRangeBase {
+ Diff19() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gte:0,$lte:1}}", 1, 5, false, true) {}
+ };
+
+ struct Diff20 : public TwoRangeBase {
+ Diff20() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:0,$lte:1}}", 1, 5, false, true) {}
+ };
+
+ struct Diff21 : public TwoRangeBase {
+ Diff21() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gte:0,$lt:1}}", 1, 5, true, true) {}
+ };
+
+ struct Diff22 : public TwoRangeBase {
+ Diff22() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:0,$lt:1}}", 1, 5, false, true) {}
+ };
+
+ struct Diff23 : public TwoRangeBase {
+ Diff23() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:0,$lt:0.5}}", 1, 5, false, true) {}
+ };
+
+ struct Diff24 : public TwoRangeBase {
+ Diff24() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:0}", 1, 5, false, true) {}
+ };
+
+ struct Diff25 : public TwoRangeBase {
+ Diff25() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:0}", 1, 5, true, true) {}
+ };
+
+ struct Diff26 : public TwoRangeBase {
+ Diff26() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:1}", 1, 5, false, true) {}
+ };
+
+ struct Diff27 : public TwoRangeBase {
+ Diff27() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:1}", 1, 5, false, true) {}
+ };
+
+ struct Diff28 : public SplitRangeBase {
+ Diff28() : SplitRangeBase( "{a:{$gte:1,$lte:5},b:3}", 1, true, 3, false, 3, false, 5, true) {}
+ };
+
+ struct Diff29 : public TwoRangeBase {
+ Diff29() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:5}", 1, 5, true, false) {}
+ };
+
+ struct Diff30 : public TwoRangeBase {
+ Diff30() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:5}", 1, 5, true, false) {}
+ };
+
+ struct Diff31 : public TwoRangeBase {
+ Diff31() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:6}", 1, 5, true, false) {}
+ };
+
+ struct Diff32 : public TwoRangeBase {
+ Diff32() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:6}", 1, 5, true, true) {}
+ };
+
+ class EmptyBase : public DiffBase {
+ public:
+ EmptyBase( string obj )
+ : _obj( obj ) {}
+ private:
+ virtual unsigned len() const { return 0; }
+ virtual const int *nums() const { return 0; }
+ virtual const bool *incs() const { return 0; }
+ virtual BSONObj obj() const { return fromjson( _obj ); }
+ string _obj;
+ };
+
+ struct Diff33 : public EmptyBase {
+ Diff33() : EmptyBase( "{a:{$gte:1,$lte:5},b:{$gt:0,$lt:6}}" ) {}
+ };
+
+ struct Diff34 : public EmptyBase {
+ Diff34() : EmptyBase( "{a:{$gte:1,$lte:5},b:{$gte:1,$lt:6}}" ) {}
+ };
+
+ struct Diff35 : public EmptyBase {
+ Diff35() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gte:1,$lt:6}}" ) {}
+ };
+
+ struct Diff36 : public EmptyBase {
+ Diff36() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gt:1,$lt:6}}" ) {}
+ };
+
+ struct Diff37 : public TwoRangeBase {
+ Diff37() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:1,$lt:6}}", 1, 1, true, true ) {}
+ };
+
+ struct Diff38 : public EmptyBase {
+ Diff38() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lt:5}}" ) {}
+ };
+
+ struct Diff39 : public EmptyBase {
+ Diff39() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:0,$lte:5}}" ) {}
+ };
+
+ struct Diff40 : public EmptyBase {
+ Diff40() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gt:0,$lte:5}}" ) {}
+ };
+
+ struct Diff41 : public TwoRangeBase {
+ Diff41() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:0,$lt:5}}", 5, 5, true, true ) {}
+ };
+
+ struct Diff42 : public EmptyBase {
+ Diff42() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:1,$lt:5}}" ) {}
+ };
+
+ struct Diff43 : public EmptyBase {
+ Diff43() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gt:1,$lte:5}}" ) {}
+ };
+
+ struct Diff44 : public EmptyBase {
+ Diff44() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gte:1,$lt:5}}" ) {}
+ };
+
+ struct Diff45 : public EmptyBase {
+ Diff45() : EmptyBase( "{a:{$gt:1,$lt:5},b:{$gte:1,$lte:5}}" ) {}
+ };
+
+ struct Diff46 : public TwoRangeBase {
+ Diff46() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gt:1,$lt:5}}", 5, 5, true, true ) {}
+ };
+
+ struct Diff47 : public EmptyBase {
+ Diff47() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gt:1,$lte:5}}" ) {}
+ };
+
+ struct Diff48 : public TwoRangeBase {
+ Diff48() : TwoRangeBase( "{a:{$gt:1,$lte:5},b:{$gte:1,$lt:5}}", 5, 5, true, true ) {}
+ };
+
+ struct Diff49 : public EmptyBase {
+ Diff49() : EmptyBase( "{a:{$gt:1,$lte:5},b:{$gte:1,$lte:5}}" ) {}
+ };
+
+ struct Diff50 : public TwoRangeBase {
+ Diff50() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:{$gt:1,$lt:5}}", 1, 1, true, true ) {}
+ };
+
+ struct Diff51 : public TwoRangeBase {
+ Diff51() : TwoRangeBase( "{a:{$gte:1,$lt:5},b:{$gt:1,$lte:5}}", 1, 1, true, true ) {}
+ };
+
+ struct Diff52 : public EmptyBase {
+ Diff52() : EmptyBase( "{a:{$gte:1,$lt:5},b:{$gte:1,$lt:5}}" ) {}
+ };
+
+ struct Diff53 : public EmptyBase {
+ Diff53() : EmptyBase( "{a:{$gte:1,$lt:5},b:{$gte:1,$lte:5}}" ) {}
+ };
+
+ struct Diff54 : public SplitRangeBase {
+ Diff54() : SplitRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:1,$lt:5}}", 1, true, 1, true, 5, true, 5, true ) {}
+ };
+
+ struct Diff55 : public TwoRangeBase {
+ Diff55() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gt:1,$lte:5}}", 1, 1, true, true ) {}
+ };
+
+ struct Diff56 : public TwoRangeBase {
+ Diff56() : TwoRangeBase( "{a:{$gte:1,$lte:5},b:{$gte:1,$lt:5}}", 5, 5, true, true ) {}
+ };
+
+ struct Diff57 : public EmptyBase {
+ Diff57() : EmptyBase( "{a:{$gte:1,$lte:5},b:{$gte:1,$lte:5}}" ) {}
+ };
+
+ struct Diff58 : public TwoRangeBase {
+ Diff58() : TwoRangeBase( "{a:1,b:{$gt:1,$lt:5}}", 1, 1, true, true ) {}
+ };
+
+ struct Diff59 : public EmptyBase {
+ Diff59() : EmptyBase( "{a:1,b:{$gte:1,$lt:5}}" ) {}
+ };
+
+ struct Diff60 : public EmptyBase {
+ Diff60() : EmptyBase( "{a:2,b:{$gte:1,$lt:5}}" ) {}
+ };
+
+ struct Diff61 : public EmptyBase {
+ Diff61() : EmptyBase( "{a:5,b:{$gte:1,$lte:5}}" ) {}
+ };
+
+ struct Diff62 : public TwoRangeBase {
+ Diff62() : TwoRangeBase( "{a:5,b:{$gt:1,$lt:5}}", 5, 5, true, true ) {}
+ };
+
+ struct Diff63 : public EmptyBase {
+ Diff63() : EmptyBase( "{a:5,b:5}" ) {}
+ };
+
+ struct Diff64 : public TwoRangeBase {
+ Diff64() : TwoRangeBase( "{a:{$gte:1,$lte:2},b:{$gt:0,$lte:1}}", 1, 2, false, true ) {}
+ };
+
+ class DiffMulti1 : public DiffBase {
+ public:
+ void run() {
+ FieldRangeSet frs( "", fromjson( "{a:{$gt:1,$lt:9},b:{$gt:0,$lt:2},c:3,d:{$gt:4,$lt:5},e:{$gt:7,$lt:10}}" ), true );
+ FieldRange ret = frs.range( "a" );
+ FieldRange other = frs.range( "b" );
+ other |= frs.range( "c" );
+ other |= frs.range( "d" );
+ other |= frs.range( "e" );
+ ret -= other;
+ check( ret );
+ }
+ protected:
+ virtual unsigned len() const { return 3; }
+ virtual const int *nums() const { static int n[] = { 2, 3, 3, 4, 5, 7 }; return n; }
+ virtual const bool *incs() const { static bool b[] = { true, false, false, true, true, true }; return b; }
+ virtual BSONObj obj() const { return BSONObj(); }
+ };
+
+ class DiffMulti2 : public DiffBase {
+ public:
+ void run() {
+ FieldRangeSet frs( "", fromjson( "{a:{$gt:1,$lt:9},b:{$gt:0,$lt:2},c:3,d:{$gt:4,$lt:5},e:{$gt:7,$lt:10}}" ), true );
+ FieldRange mask = frs.range( "a" );
+ FieldRange ret = frs.range( "b" );
+ ret |= frs.range( "c" );
+ ret |= frs.range( "d" );
+ ret |= frs.range( "e" );
+ ret -= mask;
+ check( ret );
+ }
+ protected:
+ virtual unsigned len() const { return 2; }
+ virtual const int *nums() const { static int n[] = { 0, 1, 9, 10 }; return n; }
+ virtual const bool *incs() const { static bool b[] = { false, true, true, false }; return b; }
+ virtual BSONObj obj() const { return BSONObj(); }
+ };
+
+ } // namespace FieldRangeTests
+
+ namespace FieldRangeSetTests {
+
+ class Intersect {
+ public:
+ void run() {
+ FieldRangeSet frs1( "", fromjson( "{b:{$in:[5,6]},c:7,d:{$in:[8,9]}}" ), true );
+ FieldRangeSet frs2( "", fromjson( "{a:1,b:5,c:{$in:[7,8]},d:{$in:[8,9]},e:10}" ), true );
+ frs1 &= frs2;
+ ASSERT_EQUALS( fromjson( "{a:1,b:5,c:7,d:{$gte:8,$lte:9},e:10}" ), frs1.simplifiedQuery( BSONObj() ) );
+ }
+ };
+
+ class MultiKeyIntersect {
+ public:
+ void run() {
+ FieldRangeSet frs1( "", BSONObj(), false );
+ FieldRangeSet frs2( "", BSON( "a" << GT << 4 ), false );
+ FieldRangeSet frs3( "", BSON( "a" << LT << 6 ), false );
+ // An intersection with a trivial range is allowed.
+ frs1 &= frs2;
+ ASSERT_EQUALS( frs2.simplifiedQuery( BSONObj() ), frs1.simplifiedQuery( BSONObj() ) );
+ // An intersection with a nontrivial range is not allowed, as it might prevent a valid
+ // multikey match.
+ frs1 &= frs3;
+ ASSERT_EQUALS( frs2.simplifiedQuery( BSONObj() ), frs1.simplifiedQuery( BSONObj() ) );
+ // Now intersect with a fully contained range.
+ FieldRangeSet frs4( "", BSON( "a" << GT << 6 ), false );
+ frs1 &= frs4;
+ ASSERT_EQUALS( frs4.simplifiedQuery( BSONObj() ), frs1.simplifiedQuery( BSONObj() ) );
+ }
+ };
+
+ class MultiKeyDiff {
+ public:
+ void run() {
+ FieldRangeSet frs1( "", BSON( "a" << GT << 4 ), false );
+ FieldRangeSet frs2( "", BSON( "a" << GT << 6 ), false );
+ // Range subtraction is no different for multikey ranges.
+ frs1 -= frs2;
+ ASSERT_EQUALS( BSON( "a" << GT << 4 << LTE << 6 ), frs1.simplifiedQuery( BSONObj() ) );
+ }
+ };
+
+ class MatchPossible {
+ public:
+ void run() {
+ FieldRangeSet frs1( "", BSON( "a" << GT << 4 ), true );
+ ASSERT( frs1.matchPossible() );
+ // Conflicting constraints invalid for a single key set.
+ FieldRangeSet frs2( "", BSON( "a" << GT << 4 << LT << 2 ), true );
+ ASSERT( !frs2.matchPossible() );
+ // Conflicting constraints not possible for a multi key set.
+ FieldRangeSet frs3( "", BSON( "a" << GT << 4 << LT << 2 ), false );
+ ASSERT( frs3.matchPossible() );
+ }
+ };
+
+ class MatchPossibleForIndex {
+ public:
+ void run() {
+ // Conflicting constraints not possible for a multi key set.
+ FieldRangeSet frs1( "", BSON( "a" << GT << 4 << LT << 2 ), false );
+ ASSERT( frs1.matchPossibleForIndex( BSON( "a" << 1 ) ) );
+ // Conflicting constraints for a multi key set.
+ FieldRangeSet frs2( "", BSON( "a" << GT << 4 << LT << 2 ), true );
+ ASSERT( !frs2.matchPossibleForIndex( BSON( "a" << 1 ) ) );
+ // If the index doesn't include the key, it is not single key invalid.
+ ASSERT( frs2.matchPossibleForIndex( BSON( "b" << 1 ) ) );
+ // If the index key is not an index, the set is not single key invalid.
+ ASSERT( frs2.matchPossibleForIndex( BSON( "$natural" << 1 ) ) );
+ ASSERT( frs2.matchPossibleForIndex( BSONObj() ) );
+ }
+ };
+
+ } // namespace FieldRangeSetTests
+
+ namespace FieldRangeSetPairTests {
+
+ class NoNontrivialRanges {
+ public:
+ void run() {
+ FieldRangeSetPair frsp1( "", BSONObj() );
+ ASSERT( frsp1.noNontrivialRanges() );
+ FieldRangeSetPair frsp2( "", BSON( "a" << 1 ) );
+ ASSERT( !frsp2.noNontrivialRanges() );
+ FieldRangeSetPair frsp3( "", BSON( "a" << GT << 1 ) );
+ ASSERT( !frsp3.noNontrivialRanges() );
+ // A single key invalid constraint is still nontrivial.
+ FieldRangeSetPair frsp4( "", BSON( "a" << GT << 1 << LT << 0 ) );
+ ASSERT( !frsp4.noNontrivialRanges() );
+ // Still nontrivial if multikey invalid.
+ frsp4 -= frsp4.frsForIndex( 0, -1 );
+ ASSERT( !frsp4.noNontrivialRanges() );
+ }
+ };
+
+ class MatchPossible {
+ public:
+ void run() {
+ // Match possible for simple query.
+ FieldRangeSetPair frsp1( "", BSON( "a" << 1 ) );
+ ASSERT( frsp1.matchPossible() );
+ // Match possible for single key invalid query.
+ FieldRangeSetPair frsp2( "", BSON( "a" << GT << 1 << LT << 0 ) );
+ ASSERT( frsp2.matchPossible() );
+ // Match not possible for multi key invalid query.
+ frsp1 -= frsp1.frsForIndex( 0, - 1 );
+ ASSERT( !frsp1.matchPossible() );
+ }
+ };
+
+ class IndexBase {
+ public:
+ IndexBase() : _ctx( ns() ) , indexNum_( 0 ) {
+ string err;
+ userCreateNS( ns(), BSONObj(), err, false );
+ }
+ ~IndexBase() {
+ if ( !nsd() )
+ return;
+ string s( ns() );
+ dropNS( s );
+ }
+ protected:
+ static const char *ns() { return "unittests.FieldRangeSetPairTests"; }
+ static NamespaceDetails *nsd() { return nsdetails( ns() ); }
+ IndexDetails *index( const BSONObj &key ) {
+ stringstream ss;
+ ss << indexNum_++;
+ string name = ss.str();
+ client_.resetIndexCache();
+ client_.ensureIndex( ns(), key, false, name.c_str() );
+ NamespaceDetails *d = nsd();
+ for( int i = 0; i < d->nIndexes; ++i ) {
+ if ( d->idx(i).keyPattern() == key /*indexName() == name*/ || ( d->idx(i).isIdIndex() && IndexDetails::isIdIndexPattern( key ) ) )
+ return &d->idx(i);
+ }
+ assert( false );
+ return 0;
+ }
+ int indexno( const BSONObj &key ) {
+ return nsd()->idxNo( *index(key) );
+ }
+ static DBDirectClient client_;
+ private:
+ dblock lk_;
+ Client::Context _ctx;
+ int indexNum_;
+ };
+ DBDirectClient IndexBase::client_;
+
+ class MatchPossibleForIndex : public IndexBase {
+ public:
+ void run() {
+ int a = indexno( BSON( "a" << 1 ) );
+ int b = indexno( BSON( "b" << 1 ) );
+ IndexBase::client_.insert( ns(), BSON( "a" << BSON_ARRAY( 1 << 2 ) << "b" << 1 ) );
+ // Valid ranges match possible for both indexes.
+ FieldRangeSetPair frsp1( ns(), BSON( "a" << GT << 1 << LT << 4 << "b" << GT << 1 << LT << 4 ) );
+ ASSERT( frsp1.matchPossibleForIndex( nsd(), a, BSON( "a" << 1 ) ) );
+ ASSERT( frsp1.matchPossibleForIndex( nsd(), b, BSON( "b" << 1 ) ) );
+ // Single key invalid range means match impossible for single key index.
+ FieldRangeSetPair frsp2( ns(), BSON( "a" << GT << 4 << LT << 1 << "b" << GT << 4 << LT << 1 ) );
+ ASSERT( frsp2.matchPossibleForIndex( nsd(), a, BSON( "a" << 1 ) ) );
+ ASSERT( !frsp2.matchPossibleForIndex( nsd(), b, BSON( "b" << 1 ) ) );
+ }
+ };
+
+ } // namespace FieldRangeSetPairTests
+
+ class All : public Suite {
+ public:
+ All() : Suite( "queryutil" ) {}
+
+ void setupTests() {
+ add< FieldRangeTests::Empty >();
+ add< FieldRangeTests::Eq >();
+ add< FieldRangeTests::DupEq >();
+ add< FieldRangeTests::Lt >();
+ add< FieldRangeTests::Lte >();
+ add< FieldRangeTests::Gt >();
+ add< FieldRangeTests::Gte >();
+ add< FieldRangeTests::TwoLt >();
+ add< FieldRangeTests::TwoGt >();
+ add< FieldRangeTests::EqGte >();
+ add< FieldRangeTests::EqGteInvalid >();
+ add< FieldRangeTests::Regex >();
+ add< FieldRangeTests::RegexObj >();
+ add< FieldRangeTests::UnhelpfulRegex >();
+ add< FieldRangeTests::In >();
+ add< FieldRangeTests::Equality >();
+ add< FieldRangeTests::SimplifiedQuery >();
+ add< FieldRangeTests::QueryPatternTest >();
+ add< FieldRangeTests::NoWhere >();
+ add< FieldRangeTests::Numeric >();
+ add< FieldRangeTests::InLowerBound >();
+ add< FieldRangeTests::InUpperBound >();
+ add< FieldRangeTests::UnionBound >();
+ add< FieldRangeTests::MultiBound >();
+ add< FieldRangeTests::Diff1 >();
+ add< FieldRangeTests::Diff2 >();
+ add< FieldRangeTests::Diff3 >();
+ add< FieldRangeTests::Diff4 >();
+ add< FieldRangeTests::Diff5 >();
+ add< FieldRangeTests::Diff6 >();
+ add< FieldRangeTests::Diff7 >();
+ add< FieldRangeTests::Diff8 >();
+ add< FieldRangeTests::Diff9 >();
+ add< FieldRangeTests::Diff10 >();
+ add< FieldRangeTests::Diff11 >();
+ add< FieldRangeTests::Diff12 >();
+ add< FieldRangeTests::Diff13 >();
+ add< FieldRangeTests::Diff14 >();
+ add< FieldRangeTests::Diff15 >();
+ add< FieldRangeTests::Diff16 >();
+ add< FieldRangeTests::Diff17 >();
+ add< FieldRangeTests::Diff18 >();
+ add< FieldRangeTests::Diff19 >();
+ add< FieldRangeTests::Diff20 >();
+ add< FieldRangeTests::Diff21 >();
+ add< FieldRangeTests::Diff22 >();
+ add< FieldRangeTests::Diff23 >();
+ add< FieldRangeTests::Diff24 >();
+ add< FieldRangeTests::Diff25 >();
+ add< FieldRangeTests::Diff26 >();
+ add< FieldRangeTests::Diff27 >();
+ add< FieldRangeTests::Diff28 >();
+ add< FieldRangeTests::Diff29 >();
+ add< FieldRangeTests::Diff30 >();
+ add< FieldRangeTests::Diff31 >();
+ add< FieldRangeTests::Diff32 >();
+ add< FieldRangeTests::Diff33 >();
+ add< FieldRangeTests::Diff34 >();
+ add< FieldRangeTests::Diff35 >();
+ add< FieldRangeTests::Diff36 >();
+ add< FieldRangeTests::Diff37 >();
+ add< FieldRangeTests::Diff38 >();
+ add< FieldRangeTests::Diff39 >();
+ add< FieldRangeTests::Diff40 >();
+ add< FieldRangeTests::Diff41 >();
+ add< FieldRangeTests::Diff42 >();
+ add< FieldRangeTests::Diff43 >();
+ add< FieldRangeTests::Diff44 >();
+ add< FieldRangeTests::Diff45 >();
+ add< FieldRangeTests::Diff46 >();
+ add< FieldRangeTests::Diff47 >();
+ add< FieldRangeTests::Diff48 >();
+ add< FieldRangeTests::Diff49 >();
+ add< FieldRangeTests::Diff50 >();
+ add< FieldRangeTests::Diff51 >();
+ add< FieldRangeTests::Diff52 >();
+ add< FieldRangeTests::Diff53 >();
+ add< FieldRangeTests::Diff54 >();
+ add< FieldRangeTests::Diff55 >();
+ add< FieldRangeTests::Diff56 >();
+ add< FieldRangeTests::Diff57 >();
+ add< FieldRangeTests::Diff58 >();
+ add< FieldRangeTests::Diff59 >();
+ add< FieldRangeTests::Diff60 >();
+ add< FieldRangeTests::Diff61 >();
+ add< FieldRangeTests::Diff62 >();
+ add< FieldRangeTests::Diff63 >();
+ add< FieldRangeTests::Diff64 >();
+ add< FieldRangeTests::DiffMulti1 >();
+ add< FieldRangeTests::DiffMulti2 >();
+ add< FieldRangeSetTests::Intersect >();
+ add< FieldRangeSetTests::MultiKeyIntersect >();
+ add< FieldRangeSetTests::MultiKeyDiff >();
+ add< FieldRangeSetTests::MatchPossible >();
+ add< FieldRangeSetTests::MatchPossibleForIndex >();
+ add< FieldRangeSetPairTests::NoNontrivialRanges >();
+ add< FieldRangeSetPairTests::MatchPossible >();
+ add< FieldRangeSetPairTests::MatchPossibleForIndex >();
+ }
+ } myall;
+
+} // namespace QueryUtilTests
+
diff --git a/src/mongo/dbtests/replsettests.cpp b/src/mongo/dbtests/replsettests.cpp
new file mode 100644
index 00000000000..c1fca3b1ad6
--- /dev/null
+++ b/src/mongo/dbtests/replsettests.cpp
@@ -0,0 +1,227 @@
+// replsettests.cpp : Unit tests for replica sets
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/repl.h"
+
+#include "../db/db.h"
+#include "../db/instance.h"
+#include "../db/json.h"
+
+#include "dbtests.h"
+#include "../db/oplog.h"
+#include "../db/queryoptimizer.h"
+
+#include "../db/repl/rs.h"
+
+namespace mongo {
+ void createOplog();
+}
+
+namespace ReplSetTests {
+
+ class Base {
+ static DBDirectClient client_;
+ public:
+ Base() {
+ cmdLine._replSet = "foo";
+ cmdLine.oplogSize = 5;
+ createOplog();
+ }
+
+ static const char *ns() {
+ return "unittests.repltests";
+ }
+
+ DBDirectClient *client() const { return &client_; }
+
+ static void insert( const BSONObj &o, bool god = false ) {
+ dblock lk;
+ Client::Context ctx( ns() );
+ theDataFileMgr.insert( ns(), o.objdata(), o.objsize(), god );
+ }
+ BSONObj findOne( const BSONObj &query = BSONObj() ) const {
+ return client()->findOne( ns(), query );
+ }
+ };
+ DBDirectClient Base::client_;
+
+
+ class MockInitialSync : public replset::InitialSync {
+ int step;
+ public:
+ MockInitialSync() : replset::InitialSync(""), step(0), failOnStep(SUCCEED), retry(true) {}
+
+ enum FailOn {SUCCEED, FAIL_FIRST_APPLY, FAIL_BOTH_APPLY};
+
+ FailOn failOnStep;
+ bool retry;
+
+ // instead of actually applying operations, we return success or failure
+ virtual bool syncApply(const BSONObj& o) {
+ step++;
+
+ if ((failOnStep == FAIL_FIRST_APPLY && step == 1) ||
+ (failOnStep == FAIL_BOTH_APPLY)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ virtual bool shouldRetry(const BSONObj& o) {
+ return retry;
+ }
+ };
+
+ class TestInitApplyOp : public Base {
+ public:
+ void run() {
+ writelock lk("");
+
+ OpTime o1 = OpTime::now();
+ OpTime o2 = OpTime::now();
+
+ BSONObjBuilder b;
+ b.appendTimestamp("ts", o2.asLL());
+ BSONObj obj = b.obj();
+
+ MockInitialSync mock;
+
+ // all three should succeed
+ mock.applyOp(obj, o1);
+
+ mock.failOnStep = MockInitialSync::FAIL_FIRST_APPLY;
+ mock.applyOp(obj, o1);
+
+ mock.retry = false;
+ mock.applyOp(obj, o1);
+
+ // force failure
+ MockInitialSync mock2;
+ mock2.failOnStep = MockInitialSync::FAIL_BOTH_APPLY;
+
+ ASSERT_THROWS(mock2.applyOp(obj, o2), UserException);
+ }
+ };
+
+ class SyncTest2 : public replset::InitialSync {
+ public:
+ bool insertOnRetry;
+ SyncTest2() : replset::InitialSync(""), insertOnRetry(false) {}
+ virtual ~SyncTest2() {}
+ virtual bool shouldRetry(const BSONObj& o) {
+ if (!insertOnRetry) {
+ return true;
+ }
+
+ Base::insert(BSON("_id" << 123));
+ return true;
+ }
+ };
+
+ class TestInitApplyOp2 : public Base {
+ public:
+ void run() {
+ writelock lk("");
+
+ OpTime o1 = OpTime::now();
+ OpTime o2 = OpTime::now();
+
+ BSONObjBuilder b;
+ b.appendTimestamp("ts", o2.asLL());
+ b.append("op", "u");
+ b.append("o", BSON("$set" << BSON("x" << 456)));
+ b.append("o2", BSON("_id" << 123));
+ b.append("ns", ns());
+ BSONObj obj = b.obj();
+
+ SyncTest2 sync;
+ ASSERT_THROWS(sync.applyOp(obj, o1), UserException);
+
+ sync.insertOnRetry = true;
+ // succeeds
+ sync.applyOp(obj, o1);
+
+ BSONObj fin = findOne();
+ assert(fin["x"].Number() == 456);
+ }
+ };
+
+ class CappedInitialSync : public Base {
+ string _ns;
+ dblock lk;
+ Client::Context _context;
+
+ string spec() const {
+ return "{\"capped\":true,\"size\":512}";
+ }
+
+ void create() {
+ dblock lk;
+ string err;
+ ASSERT(userCreateNS( _ns.c_str(), fromjson( spec() ), err, false ));
+ }
+
+ void drop() {
+ string s( _ns );
+ string errmsg;
+ BSONObjBuilder result;
+ dropCollection( s, errmsg, result );
+ }
+ public:
+ CappedInitialSync() : _ns("unittests.foo.bar"), _context(_ns) {
+ if (nsdetails(_ns.c_str()) != NULL) {
+ drop();
+ }
+ }
+ ~CappedInitialSync() {
+ if ( nsdetails(_ns.c_str()) == NULL )
+ return;
+ drop();
+ }
+
+ void run() {
+ create();
+
+ BSONObjBuilder b;
+ b.appendTimestamp("ts", OpTime::now().asLL());
+ b.append("op", "u");
+ b.append("o", BSON("$set" << BSON("x" << 456)));
+ b.append("o2", BSON("_id" << 123 << "x" << 123));
+ b.append("ns", _ns);
+
+ // in an annoying twist of api, returns true on failure
+ assert(applyOperation_inlock(b.obj(), true));
+ }
+ };
+
+
+ class All : public Suite {
+ public:
+ All() : Suite( "replset" ) {
+ }
+
+ void setupTests() {
+ add< TestInitApplyOp >();
+ add< TestInitApplyOp2 >();
+ add< CappedInitialSync >();
+ }
+ } myall;
+}
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
new file mode 100644
index 00000000000..86288ad9426
--- /dev/null
+++ b/src/mongo/dbtests/repltests.cpp
@@ -0,0 +1,1228 @@
+// repltests.cpp : Unit tests for replication
+//
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/repl.h"
+
+#include "../db/db.h"
+#include "../db/instance.h"
+#include "../db/json.h"
+
+#include "dbtests.h"
+#include "../db/oplog.h"
+#include "../db/queryoptimizer.h"
+
+#include "../db/repl/rs.h"
+
+namespace mongo {
+ void createOplog();
+}
+
+namespace ReplTests {
+
+ BSONObj f( const char *s ) {
+ return fromjson( s );
+ }
+
+ class Base {
+ dblock lk;
+ Client::Context _context;
+ public:
+ Base() : _context( ns() ) {
+ replSettings.master = true;
+ createOplog();
+ ensureHaveIdIndex( ns() );
+ }
+ ~Base() {
+ try {
+ replSettings.master = false;
+ deleteAll( ns() );
+ deleteAll( cllNS() );
+ }
+ catch ( ... ) {
+ FAIL( "Exception while cleaning up test" );
+ }
+ }
+ protected:
+ static const char *ns() {
+ return "unittests.repltests";
+ }
+ static const char *cllNS() {
+ return "local.oplog.$main";
+ }
+ DBDirectClient *client() const { return &client_; }
+ BSONObj one( const BSONObj &query = BSONObj() ) const {
+ return client()->findOne( ns(), query );
+ }
+ void checkOne( const BSONObj &o ) const {
+ check( o, one( o ) );
+ }
+ void checkAll( const BSONObj &o ) const {
+ auto_ptr< DBClientCursor > c = client()->query( ns(), o );
+ assert( c->more() );
+ while( c->more() ) {
+ check( o, c->next() );
+ }
+ }
+ void check( const BSONObj &expected, const BSONObj &got ) const {
+ if ( expected.woCompare( got ) ) {
+ out() << "expected: " << expected.toString()
+ << ", got: " << got.toString() << endl;
+ }
+ ASSERT_EQUALS( expected , got );
+ }
+ BSONObj oneOp() const {
+ return client()->findOne( cllNS(), BSONObj() );
+ }
+ int count() const {
+ int count = 0;
+ dblock lk;
+ Client::Context ctx( ns() );
+ boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( ns() );
+ for(; c->ok(); c->advance(), ++count ) {
+// cout << "obj: " << c->current().toString() << endl;
+ }
+ return count;
+ }
+ static int opCount() {
+ dblock lk;
+ Client::Context ctx( cllNS() );
+ int count = 0;
+ for( boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( cllNS() ); c->ok(); c->advance() )
+ ++count;
+ return count;
+ }
+ static void applyAllOperations() {
+ dblock lk;
+ vector< BSONObj > ops;
+ {
+ Client::Context ctx( cllNS() );
+ for( boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( cllNS() ); c->ok(); c->advance() )
+ ops.push_back( c->current() );
+ }
+ {
+ Client::Context ctx( ns() );
+ BSONObjBuilder b;
+ b.append("host", "localhost");
+ b.appendTimestamp("syncedTo", 0);
+ ReplSource a(b.obj());
+ for( vector< BSONObj >::iterator i = ops.begin(); i != ops.end(); ++i ) {
+ a.applyOperation( *i );
+ }
+ }
+ }
+ static void printAll( const char *ns ) {
+ dblock lk;
+ Client::Context ctx( ns );
+ boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( ns );
+ vector< DiskLoc > toDelete;
+ out() << "all for " << ns << endl;
+ for(; c->ok(); c->advance() ) {
+ out() << c->current().toString() << endl;
+ }
+ }
+ // These deletes don't get logged.
+ static void deleteAll( const char *ns ) {
+ dblock lk;
+ Client::Context ctx( ns );
+ boost::shared_ptr<Cursor> c = theDataFileMgr.findAll( ns );
+ vector< DiskLoc > toDelete;
+ for(; c->ok(); c->advance() ) {
+ toDelete.push_back( c->currLoc() );
+ }
+ for( vector< DiskLoc >::iterator i = toDelete.begin(); i != toDelete.end(); ++i ) {
+ theDataFileMgr.deleteRecord( ns, i->rec(), *i, true );
+ }
+ }
+ static void insert( const BSONObj &o, bool god = false ) {
+ dblock lk;
+ Client::Context ctx( ns() );
+ theDataFileMgr.insert( ns(), o.objdata(), o.objsize(), god );
+ }
+ static BSONObj wid( const char *json ) {
+ class BSONObjBuilder b;
+ OID id;
+ id.init();
+ b.appendOID( "_id", &id );
+ b.appendElements( fromjson( json ) );
+ return b.obj();
+ }
+ private:
+ static DBDirectClient client_;
+ };
+ DBDirectClient Base::client_;
+
+ class LogBasic : public Base {
+ public:
+ void run() {
+ ASSERT_EQUALS( 1, opCount() );
+ client()->insert( ns(), fromjson( "{\"a\":\"b\"}" ) );
+ ASSERT_EQUALS( 2, opCount() );
+ }
+ };
+
+ namespace Idempotence {
+
+ class Base : public ReplTests::Base {
+ public:
+ virtual ~Base() {}
+ void run() {
+ reset();
+ doIt();
+ int nOps = opCount();
+ check();
+ applyAllOperations();
+ check();
+ ASSERT_EQUALS( nOps, opCount() );
+
+ reset();
+ applyAllOperations();
+ check();
+ ASSERT_EQUALS( nOps, opCount() );
+ applyAllOperations();
+ check();
+ ASSERT_EQUALS( nOps, opCount() );
+ }
+ protected:
+ virtual void doIt() const = 0;
+ virtual void check() const = 0;
+ virtual void reset() const = 0;
+ };
+
+ class InsertTimestamp : public Base {
+ public:
+ void doIt() const {
+ BSONObjBuilder b;
+ b.append( "a", 1 );
+ b.appendTimestamp( "t" );
+ client()->insert( ns(), b.done() );
+ date_ = client()->findOne( ns(), QUERY( "a" << 1 ) ).getField( "t" ).date();
+ }
+ void check() const {
+ BSONObj o = client()->findOne( ns(), QUERY( "a" << 1 ) );
+ ASSERT( 0 != o.getField( "t" ).date() );
+ ASSERT_EQUALS( date_, o.getField( "t" ).date() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ }
+ private:
+ mutable Date_t date_;
+ };
+
+ class InsertAutoId : public Base {
+ public:
+ InsertAutoId() : o_( fromjson( "{\"a\":\"b\"}" ) ) {}
+ void doIt() const {
+ client()->insert( ns(), o_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ }
+ protected:
+ BSONObj o_;
+ };
+
+ class InsertWithId : public InsertAutoId {
+ public:
+ InsertWithId() {
+ o_ = fromjson( "{\"_id\":ObjectId(\"0f0f0f0f0f0f0f0f0f0f0f0f\"),\"a\":\"b\"}" );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( o_ );
+ }
+ };
+
+ class InsertTwo : public Base {
+ public:
+ InsertTwo() :
+ o_( fromjson( "{'_id':1,a:'b'}" ) ),
+ t_( fromjson( "{'_id':2,c:'d'}" ) ) {}
+ void doIt() const {
+ vector< BSONObj > v;
+ v.push_back( o_ );
+ v.push_back( t_ );
+ client()->insert( ns(), v );
+ }
+ void check() const {
+ ASSERT_EQUALS( 2, count() );
+ checkOne( o_ );
+ checkOne( t_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ }
+ private:
+ BSONObj o_;
+ BSONObj t_;
+ };
+
+ class InsertTwoIdentical : public Base {
+ public:
+ InsertTwoIdentical() : o_( fromjson( "{\"a\":\"b\"}" ) ) {}
+ void doIt() const {
+ client()->insert( ns(), o_ );
+ client()->insert( ns(), o_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 2, count() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ }
+ private:
+ BSONObj o_;
+ };
+
+ class UpdateTimestamp : public Base {
+ public:
+ void doIt() const {
+ BSONObjBuilder b;
+ b.append( "_id", 1 );
+ b.appendTimestamp( "t" );
+ client()->update( ns(), BSON( "_id" << 1 ), b.done() );
+ date_ = client()->findOne( ns(), QUERY( "_id" << 1 ) ).getField( "t" ).date();
+ }
+ void check() const {
+ BSONObj o = client()->findOne( ns(), QUERY( "_id" << 1 ) );
+ ASSERT( 0 != o.getField( "t" ).date() );
+ ASSERT_EQUALS( date_, o.getField( "t" ).date() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( BSON( "_id" << 1 ) );
+ }
+ private:
+ mutable Date_t date_;
+ };
+
+ class UpdateSameField : public Base {
+ public:
+ UpdateSameField() :
+ q_( fromjson( "{a:'b'}" ) ),
+ o1_( wid( "{a:'b'}" ) ),
+ o2_( wid( "{a:'b'}" ) ),
+ u_( fromjson( "{a:'c'}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 2, count() );
+ ASSERT( !client()->findOne( ns(), q_ ).isEmpty() );
+ ASSERT( !client()->findOne( ns(), u_ ).isEmpty() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o1_ );
+ insert( o2_ );
+ }
+ private:
+ BSONObj q_, o1_, o2_, u_;
+ };
+
+ class UpdateSameFieldWithId : public Base {
+ public:
+ UpdateSameFieldWithId() :
+ o_( fromjson( "{'_id':1,a:'b'}" ) ),
+ q_( fromjson( "{a:'b'}" ) ),
+ u_( fromjson( "{'_id':1,a:'c'}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 2, count() );
+ ASSERT( !client()->findOne( ns(), q_ ).isEmpty() );
+ ASSERT( !client()->findOne( ns(), u_ ).isEmpty() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ insert( fromjson( "{'_id':2,a:'b'}" ) );
+ }
+ private:
+ BSONObj o_, q_, u_;
+ };
+
+ class UpdateSameFieldExplicitId : public Base {
+ public:
+ UpdateSameFieldExplicitId() :
+ o_( fromjson( "{'_id':1,a:'b'}" ) ),
+ u_( fromjson( "{'_id':1,a:'c'}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), o_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( u_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, u_;
+ };
+
+ class UpdateDifferentFieldExplicitId : public Base {
+ public:
+ UpdateDifferentFieldExplicitId() :
+ o_( fromjson( "{'_id':1,a:'b'}" ) ),
+ q_( fromjson( "{'_id':1}" ) ),
+ u_( fromjson( "{'_id':1,a:'c'}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( u_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, q_, u_;
+ };
+
+ class UpsertUpdateNoMods : public UpdateDifferentFieldExplicitId {
+ void doIt() const {
+ client()->update( ns(), q_, u_, true );
+ }
+ };
+
+ class UpsertInsertNoMods : public InsertAutoId {
+ void doIt() const {
+ client()->update( ns(), fromjson( "{a:'c'}" ), o_, true );
+ }
+ };
+
+ class UpdateSet : public Base {
+ public:
+ UpdateSet() :
+ o_( fromjson( "{'_id':1,a:5}" ) ),
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$set:{a:7}}" ) ),
+ ou_( fromjson( "{'_id':1,a:7}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( ou_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+ class UpdateInc : public Base {
+ public:
+ UpdateInc() :
+ o_( fromjson( "{'_id':1,a:5}" ) ),
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$inc:{a:3}}" ) ),
+ ou_( fromjson( "{'_id':1,a:8}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( ou_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+ class UpdateInc2 : public Base {
+ public:
+ UpdateInc2() :
+ o_( fromjson( "{'_id':1,a:5}" ) ),
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$inc:{a:3},$set:{x:5}}" ) ),
+ ou_( fromjson( "{'_id':1,a:8,x:5}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( ou_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+ class IncEmbedded : public Base {
+ public:
+ IncEmbedded() :
+ o_( fromjson( "{'_id':1,a:{b:3},b:{b:1}}" ) ),
+ q_( fromjson( "{'_id':1}" ) ),
+ u_( fromjson( "{$inc:{'a.b':1,'b.b':1}}" ) ),
+ ou_( fromjson( "{'_id':1,a:{b:4},b:{b:2}}" ) )
+ {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( ou_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+ class IncCreates : public Base {
+ public:
+ IncCreates() :
+ o_( fromjson( "{'_id':1}" ) ),
+ q_( fromjson( "{'_id':1}" ) ),
+ u_( fromjson( "{$inc:{'a':1}}" ) ),
+ ou_( fromjson( "{'_id':1,a:1}") )
+ {}
+ void doIt() const {
+ client()->update( ns(), q_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( ou_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o_ );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+
+ class UpsertInsertIdMod : public Base {
+ public:
+ UpsertInsertIdMod() :
+ q_( fromjson( "{'_id':5,a:4}" ) ),
+ u_( fromjson( "{$inc:{a:3}}" ) ),
+ ou_( fromjson( "{'_id':5,a:7}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_, true );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( ou_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ }
+ protected:
+ BSONObj q_, u_, ou_;
+ };
+
+ class UpsertInsertSet : public Base {
+ public:
+ UpsertInsertSet() :
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$set:{a:7}}" ) ),
+ ou_( fromjson( "{a:7}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_, true );
+ }
+ void check() const {
+ ASSERT_EQUALS( 2, count() );
+ ASSERT( !client()->findOne( ns(), ou_ ).isEmpty() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':7,a:7}" ) );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+ class UpsertInsertInc : public Base {
+ public:
+ UpsertInsertInc() :
+ q_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{$inc:{a:3}}" ) ),
+ ou_( fromjson( "{a:8}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), q_, u_, true );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ ASSERT( !client()->findOne( ns(), ou_ ).isEmpty() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ }
+ protected:
+ BSONObj o_, q_, u_, ou_;
+ };
+
+ class MultiInc : public Base {
+ public:
+
+ string s() const {
+ stringstream ss;
+ auto_ptr<DBClientCursor> cc = client()->query( ns() , Query().sort( BSON( "_id" << 1 ) ) );
+ bool first = true;
+ while ( cc->more() ) {
+ if ( first ) first = false;
+ else ss << ",";
+
+ BSONObj o = cc->next();
+ ss << o["x"].numberInt();
+ }
+ return ss.str();
+ }
+
+ void doIt() const {
+ client()->insert( ns(), BSON( "_id" << 1 << "x" << 1 ) );
+ client()->insert( ns(), BSON( "_id" << 2 << "x" << 5 ) );
+
+ ASSERT_EQUALS( "1,5" , s() );
+
+ client()->update( ns() , BSON( "_id" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) );
+ ASSERT_EQUALS( "2,5" , s() );
+
+ client()->update( ns() , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) );
+ ASSERT_EQUALS( "3,5" , s() );
+
+ client()->update( ns() , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) , false , true );
+ check();
+ }
+
+ void check() const {
+ ASSERT_EQUALS( "4,6" , s() );
+ }
+
+ void reset() const {
+ deleteAll( ns() );
+ }
+ };
+
+ class UpdateWithoutPreexistingId : public Base {
+ public:
+ UpdateWithoutPreexistingId() :
+ o_( fromjson( "{a:5}" ) ),
+ u_( fromjson( "{a:5}" ) ),
+ ot_( fromjson( "{b:4}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), o_, u_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 2, count() );
+ checkOne( u_ );
+ checkOne( ot_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( ot_, true );
+ insert( o_, true );
+ }
+ protected:
+ BSONObj o_, u_, ot_;
+ };
+
+ class Remove : public Base {
+ public:
+ Remove() :
+ o1_( f( "{\"_id\":\"010101010101010101010101\",\"a\":\"b\"}" ) ),
+ o2_( f( "{\"_id\":\"010101010101010101010102\",\"a\":\"b\"}" ) ),
+ q_( f( "{\"a\":\"b\"}" ) ) {}
+ void doIt() const {
+ client()->remove( ns(), q_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 0, count() );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( o1_ );
+ insert( o2_ );
+ }
+ protected:
+ BSONObj o1_, o2_, q_;
+ };
+
+ class RemoveOne : public Remove {
+ void doIt() const {
+ client()->remove( ns(), q_, true );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ }
+ };
+
+ class FailingUpdate : public Base {
+ public:
+ FailingUpdate() :
+ o_( fromjson( "{'_id':1,a:'b'}" ) ),
+ u_( fromjson( "{'_id':1,c:'d'}" ) ) {}
+ void doIt() const {
+ client()->update( ns(), o_, u_ );
+ client()->insert( ns(), o_ );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( o_ );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ }
+ protected:
+ BSONObj o_, u_;
+ };
+
+ class SetNumToStr : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), BSON( "$set" << BSON( "a" << "bcd" ) ) );
+ }
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ checkOne( BSON( "_id" << 0 << "a" << "bcd" ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( BSON( "_id" << 0 << "a" << 4.0 ) );
+ }
+ };
+
+ class Push : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), BSON( "$push" << BSON( "a" << 5.0 ) ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[4,5]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4]}" ) );
+ }
+ };
+
+ class PushUpsert : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), BSON( "$push" << BSON( "a" << 5.0 ) ), true );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[4,5]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4]}" ) );
+ }
+ };
+
+ class MultiPush : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), BSON( "$push" << BSON( "a" << 5.0 ) << "$push" << BSON( "b.c" << 6.0 ) ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[4,5],b:{c:[6]}}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4]}" ) );
+ }
+ };
+
+ class EmptyPush : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), BSON( "$push" << BSON( "a" << 5.0 ) ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[5]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0}" ) );
+ }
+ };
+
+ class PushAll : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pushAll:{a:[5.0,6.0]}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[4,5,6]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4]}" ) );
+ }
+ };
+
+ class PushAllUpsert : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pushAll:{a:[5.0,6.0]}}" ), true );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[4,5,6]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4]}" ) );
+ }
+ };
+
+ class EmptyPushAll : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pushAll:{a:[5.0,6.0]}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[5,6]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0}" ) );
+ }
+ };
+
+ class Pull : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), BSON( "$pull" << BSON( "a" << 4.0 ) ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[5]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4,5]}" ) );
+ }
+ };
+
+ class PullNothing : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), BSON( "$pull" << BSON( "a" << 6.0 ) ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[4,5]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4,5]}" ) );
+ }
+ };
+
+ class PullAll : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pullAll:{a:[4,5]}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[6]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4,5,6]}" ) );
+ }
+ };
+
+ class Pop : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pop:{a:1}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[4,5]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4,5,6]}" ) );
+ }
+ };
+
+ class PopReverse : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$pop:{a:-1}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( fromjson( "{'_id':0,a:[5,6]}" ), one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:[4,5,6]}" ) );
+ }
+ };
+
+ class BitOp : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$bit:{a:{and:2,or:8}}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( BSON( "_id" << 0 << "a" << ( ( 3 & 2 ) | 8 ) ) , one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:3}" ) );
+ }
+ };
+
+ class Rename : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{a:'b'}}" ) );
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$set:{a:50}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( BSON( "_id" << 0 << "a" << 50 << "b" << 3 ) , one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:3}" ) );
+ }
+ };
+
+ class RenameReplace : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{a:'b'}}" ) );
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$set:{a:50}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( BSON( "_id" << 0 << "a" << 50 << "b" << 3 ) , one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:3,b:100}" ) );
+ }
+ };
+
+ class RenameOverwrite : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{a:'b'}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( BSON( "_id" << 0 << "b" << 3 << "z" << 1 ) , one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,z:1,a:3}" ) );
+ }
+ };
+
+ class NoRename : public Base {
+ public:
+ void doIt() const {
+ client()->update( ns(), BSON( "_id" << 0 ), fromjson( "{$rename:{c:'b'},$set:{z:1}}" ) );
+ }
+ using ReplTests::Base::check;
+ void check() const {
+ ASSERT_EQUALS( 1, count() );
+ check( BSON( "_id" << 0 << "a" << 3 << "z" << 1 ) , one( fromjson( "{'_id':0}" ) ) );
+ }
+ void reset() const {
+ deleteAll( ns() );
+ insert( fromjson( "{'_id':0,a:3}" ) );
+ }
+ };
+
+
+ } // namespace Idempotence
+
+ class DeleteOpIsIdBased : public Base {
+ public:
+ void run() {
+ insert( BSON( "_id" << 0 << "a" << 10 ) );
+ insert( BSON( "_id" << 1 << "a" << 11 ) );
+ insert( BSON( "_id" << 3 << "a" << 10 ) );
+ client()->remove( ns(), BSON( "a" << 10 ) );
+ ASSERT_EQUALS( 1U, client()->count( ns(), BSONObj() ) );
+ insert( BSON( "_id" << 0 << "a" << 11 ) );
+ insert( BSON( "_id" << 2 << "a" << 10 ) );
+ insert( BSON( "_id" << 3 << "a" << 10 ) );
+
+ applyAllOperations();
+ ASSERT_EQUALS( 2U, client()->count( ns(), BSONObj() ) );
+ ASSERT( !one( BSON( "_id" << 1 ) ).isEmpty() );
+ ASSERT( !one( BSON( "_id" << 2 ) ).isEmpty() );
+ }
+ };
+
+ class DatabaseIgnorerBasic {
+ public:
+ void run() {
+ DatabaseIgnorer d;
+ ASSERT( !d.ignoreAt( "a", OpTime( 4, 0 ) ) );
+ d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 4, 0 ) ) );
+ ASSERT( !d.ignoreAt( "b", OpTime( 4, 0 ) ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 4, 10 ) ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 5, 0 ) ) );
+ ASSERT( !d.ignoreAt( "a", OpTime( 5, 1 ) ) );
+ // Ignore state is expired.
+ ASSERT( !d.ignoreAt( "a", OpTime( 4, 0 ) ) );
+ }
+ };
+
+ class DatabaseIgnorerUpdate {
+ public:
+ void run() {
+ DatabaseIgnorer d;
+ d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
+ d.doIgnoreUntilAfter( "a", OpTime( 6, 0 ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 5, 5 ) ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 6, 0 ) ) );
+ ASSERT( !d.ignoreAt( "a", OpTime( 6, 1 ) ) );
+
+ d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
+ d.doIgnoreUntilAfter( "a", OpTime( 6, 0 ) );
+ d.doIgnoreUntilAfter( "a", OpTime( 6, 0 ) );
+ d.doIgnoreUntilAfter( "a", OpTime( 5, 0 ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 5, 5 ) ) );
+ ASSERT( d.ignoreAt( "a", OpTime( 6, 0 ) ) );
+ ASSERT( !d.ignoreAt( "a", OpTime( 6, 1 ) ) );
+ }
+ };
+
+ /**
+ * Check against oldest document in the oplog before scanning backward
+ * from the newest document.
+ */
+ class FindingStartCursorStale : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ client()->insert( ns(), BSON( "_id" << i ) );
+ }
+ dblock lk;
+ Client::Context ctx( cllNS() );
+ NamespaceDetails *nsd = nsdetails( cllNS() );
+ BSONObjBuilder b;
+ b.appendTimestamp( "$gte" );
+ BSONObj query = BSON( "ts" << b.obj() );
+ FieldRangeSetPair frsp( cllNS(), query );
+ BSONObj order = BSON( "$natural" << 1 );
+ QueryPlan qp( nsd, -1, frsp, &frsp, query, order );
+ FindingStartCursor fsc( qp );
+ ASSERT( fsc.done() );
+ ASSERT_EQUALS( 0, fsc.cursor()->current()[ "o" ].Obj()[ "_id" ].Int() );
+ }
+ };
+
+ /** Check unsuccessful yield recovery with FindingStartCursor */
+ class FindingStartCursorYield : public Base {
+ public:
+ void run() {
+ for( int i = 0; i < 10; ++i ) {
+ client()->insert( ns(), BSON( "_id" << i ) );
+ }
+ Date_t ts = client()->query( "local.oplog.$main", Query().sort( BSON( "$natural" << 1 ) ), 1, 4 )->next()[ "ts" ].date();
+ Client::Context ctx( cllNS() );
+ NamespaceDetails *nsd = nsdetails( cllNS() );
+ BSONObjBuilder b;
+ b.appendDate( "$gte", ts );
+ BSONObj query = BSON( "ts" << b.obj() );
+ FieldRangeSetPair frsp( cllNS(), query );
+ BSONObj order = BSON( "$natural" << 1 );
+ QueryPlan qp( nsd, -1, frsp, &frsp, query, order );
+ FindingStartCursor fsc( qp );
+ ASSERT( !fsc.done() );
+ fsc.next();
+ ASSERT( !fsc.done() );
+ ASSERT( fsc.prepareToYield() );
+ ClientCursor::invalidate( "local.oplog.$main" );
+ ASSERT_THROWS( fsc.recoverFromYield(), MsgAssertionException );
+ }
+ };
+
+ /** Check ReplSetConfig::MemberCfg equality */
+ class ReplSetMemberCfgEquality : public Base {
+ public:
+ void run() {
+ ReplSetConfig::MemberCfg m1, m2;
+ assert(m1 == m2);
+ m1.tags["x"] = "foo";
+ assert(m1 != m2);
+ m2.tags["y"] = "bar";
+ assert(m1 != m2);
+ m1.tags["y"] = "bar";
+ assert(m1 != m2);
+ m2.tags["x"] = "foo";
+ assert(m1 == m2);
+ m1.tags.clear();
+ assert(m1 != m2);
+ }
+ };
+
+ class SyncTest : public Sync {
+ public:
+ bool returnEmpty;
+ SyncTest() : Sync(""), returnEmpty(false) {}
+ virtual ~SyncTest() {}
+ virtual BSONObj getMissingDoc(const BSONObj& o) {
+ if (returnEmpty) {
+ BSONObj o;
+ return o;
+ }
+ return BSON("_id" << "on remote" << "foo" << "baz");
+ }
+ };
+
+ class ShouldRetry : public Base {
+ public:
+ void run() {
+ bool threw = false;
+ BSONObj o = BSON("ns" << ns() << "o" << BSON("foo" << "bar") << "o2" << BSON("_id" << "in oplog" << "foo" << "bar"));
+
+ // this should fail because we can't connect
+ try {
+ Sync badSource("localhost:123");
+ badSource.getMissingDoc(o);
+ }
+ catch (DBException&) {
+ threw = true;
+ }
+ assert(threw);
+
+ // now this should succeed
+ SyncTest t;
+ assert(t.shouldRetry(o));
+ assert(!client()->findOne(ns(), BSON("_id" << "on remote")).isEmpty());
+
+ // force it not to find an obj
+ t.returnEmpty = true;
+ assert(!t.shouldRetry(o));
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "repl" ) {
+ }
+
+ void setupTests() {
+ add< LogBasic >();
+ add< Idempotence::InsertTimestamp >();
+ add< Idempotence::InsertAutoId >();
+ add< Idempotence::InsertWithId >();
+ add< Idempotence::InsertTwo >();
+ add< Idempotence::InsertTwoIdentical >();
+ add< Idempotence::UpdateTimestamp >();
+ add< Idempotence::UpdateSameField >();
+ add< Idempotence::UpdateSameFieldWithId >();
+ add< Idempotence::UpdateSameFieldExplicitId >();
+ add< Idempotence::UpdateDifferentFieldExplicitId >();
+ add< Idempotence::UpsertUpdateNoMods >();
+ add< Idempotence::UpsertInsertNoMods >();
+ add< Idempotence::UpdateSet >();
+ add< Idempotence::UpdateInc >();
+ add< Idempotence::UpdateInc2 >();
+ add< Idempotence::IncEmbedded >(); // SERVER-716
+ add< Idempotence::IncCreates >(); // SERVER-717
+ add< Idempotence::UpsertInsertIdMod >();
+ add< Idempotence::UpsertInsertSet >();
+ add< Idempotence::UpsertInsertInc >();
+ add< Idempotence::MultiInc >();
+ // Don't worry about this until someone wants this functionality.
+// add< Idempotence::UpdateWithoutPreexistingId >();
+ add< Idempotence::Remove >();
+ add< Idempotence::RemoveOne >();
+ add< Idempotence::FailingUpdate >();
+ add< Idempotence::SetNumToStr >();
+ add< Idempotence::Push >();
+ add< Idempotence::PushUpsert >();
+ add< Idempotence::MultiPush >();
+ add< Idempotence::EmptyPush >();
+ add< Idempotence::PushAll >();
+ add< Idempotence::PushAllUpsert >();
+ add< Idempotence::EmptyPushAll >();
+ add< Idempotence::Pull >();
+ add< Idempotence::PullNothing >();
+ add< Idempotence::PullAll >();
+ add< Idempotence::Pop >();
+ add< Idempotence::PopReverse >();
+ add< Idempotence::BitOp >();
+ add< Idempotence::Rename >();
+ add< Idempotence::RenameReplace >();
+ add< Idempotence::RenameOverwrite >();
+ add< Idempotence::NoRename >();
+ add< DeleteOpIsIdBased >();
+ add< DatabaseIgnorerBasic >();
+ add< DatabaseIgnorerUpdate >();
+ add< FindingStartCursorStale >();
+ add< FindingStartCursorYield >();
+ add< ReplSetMemberCfgEquality >();
+ add< ShouldRetry >();
+ }
+ } myall;
+
+} // namespace ReplTests
+
diff --git a/src/mongo/dbtests/sharding.cpp b/src/mongo/dbtests/sharding.cpp
new file mode 100644
index 00000000000..19edd5537ab
--- /dev/null
+++ b/src/mongo/dbtests/sharding.cpp
@@ -0,0 +1,56 @@
+// sharding.cpp : some unit tests for sharding internals
+
+/**
+ * Copyright (C) 2009 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "dbtests.h"
+
+#include "../client/parallel.h"
+
+namespace ShardingTests {
+
+ namespace serverandquerytests {
+ class test1 {
+ public:
+ void run() {
+ ServerAndQuery a( "foo:1" , BSON( "a" << GT << 0 << LTE << 100 ) );
+ ServerAndQuery b( "foo:1" , BSON( "a" << GT << 200 << LTE << 1000 ) );
+
+ ASSERT( a < b );
+ ASSERT( ! ( b < a ) );
+
+ set<ServerAndQuery> s;
+ s.insert( a );
+ s.insert( b );
+
+ ASSERT_EQUALS( (unsigned int)2 , s.size() );
+ }
+ };
+ }
+
+ class All : public Suite {
+ public:
+ All() : Suite( "sharding" ) {
+ }
+
+ void setupTests() {
+ add< serverandquerytests::test1 >();
+ }
+ } myall;
+
+}
diff --git a/src/mongo/dbtests/socktests.cpp b/src/mongo/dbtests/socktests.cpp
new file mode 100644
index 00000000000..176db8c8e95
--- /dev/null
+++ b/src/mongo/dbtests/socktests.cpp
@@ -0,0 +1,48 @@
+// socktests.cpp : sock.{h,cpp} unit tests.
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../util/net/sock.h"
+#include "dbtests.h"
+
+namespace SockTests {
+
+ class HostByName {
+ public:
+ void run() {
+ ASSERT_EQUALS( "127.0.0.1", hostbyname( "localhost" ) );
+ ASSERT_EQUALS( "127.0.0.1", hostbyname( "127.0.0.1" ) );
+ // ASSERT_EQUALS( "::1", hostbyname( "::1" ) ); // IPv6 disabled at runtime by default.
+
+ HostAndPort h("asdfasdfasdf_no_such_host");
+ // this fails uncomment when fixed.
+ ASSERT( !h.isSelf() );
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "sock" ) {}
+ void setupTests() {
+ add< HostByName >();
+ }
+ } myall;
+
+} // namespace SockTests
+
diff --git a/src/mongo/dbtests/spin_lock_test.cpp b/src/mongo/dbtests/spin_lock_test.cpp
new file mode 100644
index 00000000000..ed1f1ae1ca5
--- /dev/null
+++ b/src/mongo/dbtests/spin_lock_test.cpp
@@ -0,0 +1,114 @@
+// spin_lock_test.cpp : spin_lcok.{h, cpp} unit test
+
+/**
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include <boost/thread/thread.hpp>
+#include "dbtests.h"
+#include "../util/concurrency/spin_lock.h"
+#include "../util/timer.h"
+
+namespace {
+
+ using mongo::SpinLock;
+
+ class LockTester {
+ public:
+ LockTester( SpinLock* spin, int* counter )
+ : _spin(spin), _counter(counter), _requests(0) {}
+
+ ~LockTester() {
+ delete _t;
+ }
+
+ void start( int increments ) {
+ _t = new boost::thread( boost::bind(&LockTester::test, this, increments) );
+ }
+
+ void join() {
+ if ( _t ) _t->join();
+ }
+
+ int requests() const {
+ return _requests;
+ }
+
+ private:
+ SpinLock* _spin; // not owned here
+ int* _counter; // not owned here
+ int _requests;
+ boost::thread* _t;
+
+ void test( int increments ) {
+ while ( increments-- > 0 ) {
+ _spin->lock();
+ ++(*_counter);
+ ++_requests;
+ _spin->unlock();
+ }
+ }
+
+ LockTester( LockTester& );
+ LockTester& operator=( LockTester& );
+ };
+
+ class ConcurrentIncs {
+ public:
+ void run() {
+
+ SpinLock spin;
+ int counter = 0;
+
+ const int threads = 64;
+ const int incs = 50000;
+ LockTester* testers[threads];
+
+ Timer timer;
+
+ for ( int i = 0; i < threads; i++ ) {
+ testers[i] = new LockTester( &spin, &counter );
+ }
+ for ( int i = 0; i < threads; i++ ) {
+ testers[i]->start( incs );
+ }
+ for ( int i = 0; i < threads; i++ ) {
+ testers[i]->join();
+ ASSERT_EQUALS( testers[i]->requests(), incs );
+ delete testers[i];
+ }
+
+ int ms = timer.millis();
+ log() << "spinlock ConcurrentIncs time: " << ms << endl;
+
+ ASSERT_EQUALS( counter, threads*incs );
+#if defined(__linux__)
+ ASSERT( SpinLock::isfast() );
+#endif
+
+ }
+ };
+
+ class SpinLockSuite : public Suite {
+ public:
+ SpinLockSuite() : Suite( "spinlock" ) {}
+
+ void setupTests() {
+ add< ConcurrentIncs >();
+ }
+ } spinLockSuite;
+
+} // anonymous namespace
diff --git a/src/mongo/dbtests/test.sln b/src/mongo/dbtests/test.sln
new file mode 100755
index 00000000000..3a1b741c716
--- /dev/null
+++ b/src/mongo/dbtests/test.sln
@@ -0,0 +1,26 @@
+
+Microsoft Visual Studio Solution File, Format Version 11.00
+# Visual Studio 2010
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test", "test.vcxproj", "{215B2D68-0A70-4D10-8E75-B33010C62A91}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Win32 = Debug|Win32
+ Debug|x64 = Debug|x64
+ Release|Win32 = Release|Win32
+ Release|x64 = Release|x64
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|Win32.ActiveCfg = Debug|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|Win32.Build.0 = Debug|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|x64.ActiveCfg = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Debug|x64.Build.0 = Debug|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|Win32.ActiveCfg = Release|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|Win32.Build.0 = Release|Win32
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|x64.ActiveCfg = Release|x64
+ {215B2D68-0A70-4D10-8E75-B33010C62A91}.Release|x64.Build.0 = Release|x64
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/src/mongo/dbtests/test.vcxproj b/src/mongo/dbtests/test.vcxproj
new file mode 100644
index 00000000000..c5d1aad61e9
--- /dev/null
+++ b/src/mongo/dbtests/test.vcxproj
@@ -0,0 +1,776 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug|x64">
+ <Configuration>Debug</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|x64">
+ <Configuration>Release</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{215B2D68-0A70-4D10-8E75-B33010C62A91}</ProjectGuid>
+ <RootNamespace>dbtests</RootNamespace>
+ <Keyword>Win32Proj</Keyword>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseOfMfc>false</UseOfMfc>
+ <UseOfAtl>false</UseOfAtl>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseOfMfc>false</UseOfMfc>
+ <UseOfAtl>false</UseOfAtl>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+ <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">false</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..;$(IncludePath)</IncludePath>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <MinimalRebuild>No</MinimalRebuild>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
+ <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
+ <IgnoreSpecificDefaultLibraries>%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <TargetMachine>MachineX86</TargetMachine>
+ <Profile>true</Profile>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_DURABLE;_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
+ <IgnoreAllDefaultLibraries>false</IgnoreAllDefaultLibraries>
+ <IgnoreSpecificDefaultLibraries>%(IgnoreSpecificDefaultLibraries)</IgnoreSpecificDefaultLibraries>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <TargetMachine>MachineX86</TargetMachine>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\config\auto_link.hpp" />
+ <ClInclude Include="..\bson\bson-inl.h" />
+ <ClInclude Include="..\bson\bson.h" />
+ <ClInclude Include="..\bson\bsonelement.h" />
+ <ClInclude Include="..\bson\bsonmisc.h" />
+ <ClInclude Include="..\bson\bsonobj.h" />
+ <ClInclude Include="..\bson\bsonobjbuilder.h" />
+ <ClInclude Include="..\bson\bsonobjiterator.h" />
+ <ClInclude Include="..\bson\bsontypes.h" />
+ <ClInclude Include="..\bson\bson_db.h" />
+ <ClInclude Include="..\bson\inline_decls.h" />
+ <ClInclude Include="..\bson\oid.h" />
+ <ClInclude Include="..\bson\ordering.h" />
+ <ClInclude Include="..\bson\stringdata.h" />
+ <ClInclude Include="..\client\dbclientmockcursor.h" />
+ <ClInclude Include="..\db\collection.h" />
+ <ClInclude Include="..\db\databaseholder.h" />
+ <ClInclude Include="..\db\dur.h" />
+ <ClInclude Include="..\db\durop.h" />
+ <ClInclude Include="..\db\dur_journal.h" />
+ <ClInclude Include="..\db\jsobjmanipulator.h" />
+ <ClInclude Include="..\db\mongommf.h" />
+ <ClInclude Include="..\db\mongomutex.h" />
+ <ClInclude Include="..\db\ops\count.h" />
+ <ClInclude Include="..\db\ops\delete.h" />
+ <ClInclude Include="..\db\ops\query.h" />
+ <ClInclude Include="..\db\ops\update.h" />
+ <ClInclude Include="..\db\pagefault.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\pcrecpp.h" />
+ <ClInclude Include="..\server.h" />
+ <ClInclude Include="..\targetver.h" />
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\version.hpp" />
+ <ClInclude Include="..\third_party\pcre-7.4\config.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\pcre.h" />
+ <ClInclude Include="..\client\connpool.h" />
+ <ClInclude Include="..\client\dbclient.h" />
+ <ClInclude Include="..\client\model.h" />
+ <ClInclude Include="..\db\btree.h" />
+ <ClInclude Include="..\db\clientcursor.h" />
+ <ClInclude Include="..\db\cmdline.h" />
+ <ClInclude Include="..\db\commands.h" />
+ <ClInclude Include="..\db\concurrency.h" />
+ <ClInclude Include="..\db\curop.h" />
+ <ClInclude Include="..\db\cursor.h" />
+ <ClInclude Include="..\db\database.h" />
+ <ClInclude Include="..\db\db.h" />
+ <ClInclude Include="..\db\dbhelpers.h" />
+ <ClInclude Include="..\db\dbinfo.h" />
+ <ClInclude Include="..\db\dbmessage.h" />
+ <ClInclude Include="..\db\diskloc.h" />
+ <ClInclude Include="..\db\extsort.h" />
+ <ClInclude Include="..\db\introspect.h" />
+ <ClInclude Include="..\db\jsobj.h" />
+ <ClInclude Include="..\db\json.h" />
+ <ClInclude Include="..\db\matcher.h" />
+ <ClInclude Include="..\grid\message.h" />
+ <ClInclude Include="..\db\minilex.h" />
+ <ClInclude Include="..\db\namespace.h" />
+ <ClInclude Include="..\pch.h" />
+ <ClInclude Include="..\db\pdfile.h" />
+ <ClInclude Include="..\grid\protocol.h" />
+ <ClInclude Include="..\db\query.h" />
+ <ClInclude Include="..\db\queryoptimizer.h" />
+ <ClInclude Include="..\db\repl.h" />
+ <ClInclude Include="..\db\replset.h" />
+ <ClInclude Include="..\db\resource.h" />
+ <ClInclude Include="..\db\scanandorder.h" />
+ <ClInclude Include="..\db\security.h" />
+ <ClInclude Include="..\third_party\snappy\config.h" />
+ <ClInclude Include="..\third_party\snappy\snappy-c.h" />
+ <ClInclude Include="..\third_party\snappy\snappy-internal.h" />
+ <ClInclude Include="..\third_party\snappy\snappy-sinksource.h" />
+ <ClInclude Include="..\third_party\snappy\snappy-stubs-internal.h" />
+ <ClInclude Include="..\third_party\snappy\snappy-stubs-public.h" />
+ <ClInclude Include="..\third_party\snappy\snappy.h" />
+ <ClInclude Include="..\util\builder.h" />
+ <ClInclude Include="..\util\checksum.h" />
+ <ClInclude Include="..\util\compress.h" />
+ <ClInclude Include="..\util\concurrency\list.h" />
+ <ClInclude Include="..\util\concurrency\task.h" />
+ <ClInclude Include="..\util\concurrency\value.h" />
+ <ClInclude Include="..\util\file.h" />
+ <ClInclude Include="..\util\goodies.h" />
+ <ClInclude Include="..\util\hashtab.h" />
+ <ClInclude Include="..\db\lasterror.h" />
+ <ClInclude Include="..\util\log.h" />
+ <ClInclude Include="..\util\logfile.h" />
+ <ClInclude Include="..\util\lruishmap.h" />
+ <ClInclude Include="..\util\md5.h" />
+ <ClInclude Include="..\util\md5.hpp" />
+ <ClInclude Include="..\util\miniwebserver.h" />
+ <ClInclude Include="..\util\mmap.h" />
+ <ClInclude Include="..\util\mongoutils\hash.h" />
+ <ClInclude Include="..\util\sock.h" />
+ <ClInclude Include="..\util\unittest.h" />
+ <ClInclude Include="framework.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\bson\oid.cpp" />
+ <ClCompile Include="..\client\dbclientcursor.cpp" />
+ <ClCompile Include="..\client\dbclient_rs.cpp" />
+ <ClCompile Include="..\client\distlock.cpp" />
+ <ClCompile Include="..\client\gridfs.cpp" />
+ <ClCompile Include="..\client\model.cpp" />
+ <ClCompile Include="..\client\parallel.cpp" />
+ <ClCompile Include="..\db\btreebuilder.cpp" />
+ <ClCompile Include="..\db\cap.cpp" />
+ <ClCompile Include="..\db\commands\isself.cpp" />
+ <ClCompile Include="..\db\compact.cpp" />
+ <ClCompile Include="..\db\curop.cpp" />
+ <ClCompile Include="..\db\dbcommands_admin.cpp" />
+ <ClCompile Include="..\db\dbcommands_generic.cpp" />
+ <ClCompile Include="..\db\dur.cpp" />
+ <ClCompile Include="..\db\durop.cpp" />
+ <ClCompile Include="..\db\dur_commitjob.cpp" />
+ <ClCompile Include="..\db\dur_journal.cpp" />
+ <ClCompile Include="..\db\dur_preplogbuffer.cpp" />
+ <ClCompile Include="..\db\dur_recover.cpp" />
+ <ClCompile Include="..\db\dur_writetodatafiles.cpp" />
+ <ClCompile Include="..\db\d_concurrency.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Use</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Use</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Use</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Use</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\d_globals.cpp" />
+ <ClCompile Include="..\db\geo\2d.cpp" />
+ <ClCompile Include="..\db\geo\haystack.cpp" />
+ <ClCompile Include="..\db\key.cpp" />
+ <ClCompile Include="..\db\mongommf.cpp" />
+ <ClCompile Include="..\db\ops\count.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\ops\delete.cpp" />
+ <ClCompile Include="..\db\ops\query.cpp" />
+ <ClCompile Include="..\db\ops\update.cpp" />
+ <ClCompile Include="..\db\pagefault.cpp" />
+ <ClCompile Include="..\db\projection.cpp" />
+ <ClCompile Include="..\db\queryoptimizercursor.cpp" />
+ <ClCompile Include="..\db\querypattern.cpp">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\db\record.cpp" />
+ <ClCompile Include="..\db\repl\consensus.cpp" />
+ <ClCompile Include="..\db\repl\heartbeat.cpp" />
+ <ClCompile Include="..\db\repl\manager.cpp" />
+ <ClCompile Include="..\db\repl\rs.cpp" />
+ <ClCompile Include="..\db\repl\rs_initialsync.cpp" />
+ <ClCompile Include="..\db\repl\rs_initiate.cpp" />
+ <ClCompile Include="..\db\repl\rs_rollback.cpp" />
+ <ClCompile Include="..\db\repl\rs_sync.cpp" />
+ <ClCompile Include="..\db\restapi.cpp" />
+ <ClCompile Include="..\db\scanandorder.cpp" />
+ <ClCompile Include="..\db\security_common.cpp" />
+ <ClCompile Include="..\s\default_version.cpp" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcrecpp.cc">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_chartables.c">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_compile.c">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_config.c">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_dfa_exec.c">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_exec.c">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_fullinfo.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_get.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_globals.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_info.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_maketables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_newline.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ord2utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_refcount.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_scanner.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_stringpiece.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_study.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_tables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_try_flipped.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ucp_searchfuncs.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_valid_utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_version.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_xclass.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcreposix.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\client\connpool.cpp" />
+ <ClCompile Include="..\client\dbclient.cpp" />
+ <ClCompile Include="..\client\syncclusterconnection.cpp" />
+ <ClCompile Include="..\db\btree.cpp" />
+ <ClCompile Include="..\db\btreecursor.cpp" />
+ <ClCompile Include="..\pch.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\client.cpp" />
+ <ClCompile Include="..\db\clientcursor.cpp" />
+ <ClCompile Include="..\db\cloner.cpp" />
+ <ClCompile Include="..\db\commands\cloud.cpp">
+ <PrecompiledHeader />
+ </ClCompile>
+ <ClCompile Include="..\db\commands.cpp" />
+ <ClCompile Include="..\db\common.cpp">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\cursor.cpp" />
+ <ClCompile Include="..\db\database.cpp" />
+ <ClCompile Include="..\db\dbcommands.cpp" />
+ <ClCompile Include="..\db\dbeval.cpp" />
+ <ClCompile Include="..\db\dbhelpers.cpp" />
+ <ClCompile Include="..\db\dbwebserver.cpp" />
+ <ClCompile Include="..\db\extsort.cpp" />
+ <ClCompile Include="..\db\index.cpp" />
+ <ClCompile Include="..\db\indexkey.cpp" />
+ <ClCompile Include="..\db\instance.cpp" />
+ <ClCompile Include="..\db\introspect.cpp" />
+ <ClCompile Include="..\db\jsobj.cpp" />
+ <ClCompile Include="..\db\json.cpp" />
+ <ClCompile Include="..\db\lasterror.cpp" />
+ <ClCompile Include="..\db\matcher.cpp" />
+ <ClCompile Include="..\scripting\bench.cpp" />
+ <ClCompile Include="..\s\chunk.cpp" />
+ <ClCompile Include="..\s\config.cpp" />
+ <ClCompile Include="..\s\d_chunk_manager.cpp" />
+ <ClCompile Include="..\s\d_migrate.cpp" />
+ <ClCompile Include="..\s\d_split.cpp" />
+ <ClCompile Include="..\s\d_state.cpp" />
+ <ClCompile Include="..\s\d_writeback.cpp" />
+ <ClCompile Include="..\s\grid.cpp" />
+ <ClCompile Include="..\s\shard.cpp" />
+ <ClCompile Include="..\s\shardconnection.cpp" />
+ <ClCompile Include="..\s\shardkey.cpp" />
+ <ClCompile Include="..\third_party\snappy\snappy-sinksource.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\snappy\snappy.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\alignedbuilder.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\compress.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\spin_lock.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\synchronization.cpp" />
+ <ClCompile Include="..\util\concurrency\task.cpp" />
+ <ClCompile Include="..\util\concurrency\thread_pool.cpp" />
+ <ClCompile Include="..\util\concurrency\vars.cpp" />
+ <ClCompile Include="..\util\file_allocator.cpp" />
+ <ClCompile Include="..\util\log.cpp" />
+ <ClCompile Include="..\util\logfile.cpp" />
+ <ClCompile Include="..\util\mmap_win.cpp" />
+ <ClCompile Include="..\db\namespace.cpp" />
+ <ClCompile Include="..\db\nonce.cpp" />
+ <ClCompile Include="..\db\pdfile.cpp" />
+ <ClCompile Include="..\db\queryoptimizer.cpp" />
+ <ClCompile Include="..\util\processinfo.cpp" />
+ <ClCompile Include="..\db\repl.cpp" />
+ <ClCompile Include="..\db\security.cpp" />
+ <ClCompile Include="..\db\security_commands.cpp" />
+ <ClCompile Include="..\db\tests.cpp" />
+ <ClCompile Include="..\db\cmdline.cpp" />
+ <ClCompile Include="..\db\dbmessage.cpp" />
+ <ClCompile Include="..\db\matcher_covered.cpp" />
+ <ClCompile Include="..\db\oplog.cpp" />
+ <ClCompile Include="..\db\queryutil.cpp" />
+ <ClCompile Include="..\db\repl_block.cpp" />
+ <ClCompile Include="..\util\assert_util.cpp" />
+ <ClCompile Include="..\util\background.cpp" />
+ <ClCompile Include="..\util\base64.cpp" />
+ <ClCompile Include="..\util\md5.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeaderFile>
+ <PrecompiledHeaderFile Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeaderFile>
+ </ClCompile>
+ <ClCompile Include="..\util\md5main.cpp" />
+ <ClCompile Include="..\util\net\message.cpp" />
+ <ClCompile Include="..\util\net\listen.cpp" />
+ <ClCompile Include="..\util\net\message_server_port.cpp" />
+ <ClCompile Include="..\util\net\message_port.cpp" />
+ <ClCompile Include="..\util\net\miniwebserver.cpp" />
+ <ClCompile Include="..\util\mmap.cpp" />
+ <ClCompile Include="..\util\processinfo_win32.cpp" />
+ <ClCompile Include="..\util\ramlog.cpp" />
+ <ClCompile Include="..\util\net\sock.cpp" />
+ <ClCompile Include="..\util\stringutils.cpp" />
+ <ClCompile Include="..\util\text.cpp" />
+ <ClCompile Include="..\util\util.cpp" />
+ <ClCompile Include="..\s\d_logic.cpp" />
+ <ClCompile Include="..\scripting\engine.cpp" />
+ <ClCompile Include="..\scripting\engine_spidermonkey.cpp" />
+ <ClCompile Include="..\shell\mongo.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\scripting\utils.cpp" />
+ <ClCompile Include="..\util\version.cpp" />
+ <ClCompile Include="basictests.cpp" />
+ <ClCompile Include="btreetests.cpp" />
+ <ClCompile Include="clienttests.cpp" />
+ <ClCompile Include="cursortests.cpp" />
+ <ClCompile Include="dbtests.cpp" />
+ <ClCompile Include="directclienttests.cpp" />
+ <ClCompile Include="d_chunk_manager_tests.cpp" />
+ <ClCompile Include="framework.cpp" />
+ <ClCompile Include="jsobjtests.cpp" />
+ <ClCompile Include="jsontests.cpp" />
+ <ClCompile Include="jstests.cpp" />
+ <ClCompile Include="matchertests.cpp" />
+ <ClCompile Include="mmaptests.cpp" />
+ <ClCompile Include="namespacetests.cpp" />
+ <ClCompile Include="pdfiletests.cpp" />
+ <ClCompile Include="perftests.cpp" />
+ <ClCompile Include="queryoptimizercursortests.cpp" />
+ <ClCompile Include="queryoptimizertests.cpp" />
+ <ClCompile Include="querytests.cpp" />
+ <ClCompile Include="repltests.cpp" />
+ <ClCompile Include="socktests.cpp" />
+ <ClCompile Include="spin_lock_test.cpp" />
+ <ClCompile Include="threadedtests.cpp">
+ <DisableSpecificWarnings Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">4180;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <DisableSpecificWarnings Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">4180;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ </ClCompile>
+ <ClCompile Include="updatetests.cpp" />
+ <ClCompile Include="..\db\stats\counters.cpp" />
+ <ClCompile Include="..\db\stats\snapshots.cpp" />
+ <ClCompile Include="..\db\stats\top.cpp" />
+ <ClCompile Include="..\db\repl\health.cpp" />
+ <ClCompile Include="..\db\repl\replset_commands.cpp" />
+ <ClCompile Include="..\db\repl\rs_config.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="..\SConstruct" />
+ <None Include="btreetests.inl" />
+ </ItemGroup>
+ <ItemGroup>
+ <Library Include="..\..\js\js32d.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js32r.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js64d.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js64r.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project> \ No newline at end of file
diff --git a/src/mongo/dbtests/test.vcxproj.filters b/src/mongo/dbtests/test.vcxproj.filters
new file mode 100755
index 00000000000..a692d0ca692
--- /dev/null
+++ b/src/mongo/dbtests/test.vcxproj.filters
@@ -0,0 +1,939 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <Filter Include="misc and third party">
+ <UniqueIdentifier>{17c97725-06a4-41a6-bc1c-f0e05eada682}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="misc and third party">
+ <UniqueIdentifier>{0a50fb63-4ac3-4e30-a9d4-b0841878ee73}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="client">
+ <UniqueIdentifier>{45dab36c-864e-45de-bb8e-cf1d87a2c4f6}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="db">
+ <UniqueIdentifier>{69e233b0-5354-4612-8474-d4e4faaee607}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="db\cpp">
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+ </Filter>
+ <Filter Include="db\h">
+ <UniqueIdentifier>{f86d2fc9-fb76-40cf-943d-330feb945ff3}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="util">
+ <UniqueIdentifier>{0ec2e082-aace-46da-9898-a1a7b24d60b7}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="util\cpp">
+ <UniqueIdentifier>{12efa241-3593-4177-a0cb-1eb672491f49}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="shard">
+ <UniqueIdentifier>{3865c5a5-bdb1-4420-a3ae-5a6615d563d4}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="scripting">
+ <UniqueIdentifier>{28893dc5-8a18-429a-b5c9-2cf701d324da}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="dbtests">
+ <UniqueIdentifier>{bc08b47a-daa3-4894-b9af-ae88755838db}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="stats">
+ <UniqueIdentifier>{2b914dc3-a760-4397-a12b-73a0381fa71d}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="replsets">
+ <UniqueIdentifier>{9320a670-3b28-471a-bf92-6c8d881a37a4}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="util\concurrency">
+ <UniqueIdentifier>{d499fdba-b256-4b12-af20-cdd1ae1addff}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="util\h">
+ <UniqueIdentifier>{353b6f01-1cab-4156-a576-bc75ab204776}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="btree">
+ <UniqueIdentifier>{4fff2dbf-30c4-4295-8db8-d513c1e36220}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="dur">
+ <UniqueIdentifier>{c296d097-0d46-46ee-9097-f2df659d9596}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="bson">
+ <UniqueIdentifier>{e6652333-c77f-420c-af8e-72d55bc095fe}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="misc and third party\snappy">
+ <UniqueIdentifier>{fbc4416f-ca67-4e63-a1ea-49027de7e080}</UniqueIdentifier>
+ </Filter>
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\config\auto_link.hpp">
+ <Filter>misc and third party</Filter>
+ </ClInclude>
+ <ClInclude Include="..\targetver.h">
+ <Filter>misc and third party</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\version.hpp">
+ <Filter>misc and third party</Filter>
+ </ClInclude>
+ <ClInclude Include="..\client\connpool.h">
+ <Filter>client</Filter>
+ </ClInclude>
+ <ClInclude Include="..\client\dbclient.h">
+ <Filter>client</Filter>
+ </ClInclude>
+ <ClInclude Include="..\client\model.h">
+ <Filter>client</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\clientcursor.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\cmdline.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\commands.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\concurrency.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\curop.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\cursor.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\database.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\db.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\dbhelpers.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\dbinfo.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\dbmessage.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\diskloc.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\extsort.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\introspect.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\jsobj.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\json.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\matcher.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\grid\message.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\minilex.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\namespace.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\pch.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\pdfile.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\grid\protocol.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\query.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\queryoptimizer.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\repl.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\replset.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\resource.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\scanandorder.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\security.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\btree.h">
+ <Filter>btree</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\concurrency\list.h">
+ <Filter>util\concurrency</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\concurrency\value.h">
+ <Filter>util\concurrency</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\concurrency\task.h">
+ <Filter>util\concurrency</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\builder.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\unittest.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\file.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\goodies.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\hashtab.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\lasterror.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\log.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\lruishmap.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\md5.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\md5.hpp">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\miniwebserver.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\mmap.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\sock.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\dur.h">
+ <Filter>dur</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\dur_journal.h">
+ <Filter>dur</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\logfile.h">
+ <Filter>dur</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\mongommf.h">
+ <Filter>dur</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\durop.h">
+ <Filter>dur</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\jsobjmanipulator.h">
+ <Filter>db</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\mongomutex.h">
+ <Filter>db</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\mongoutils\hash.h">
+ <Filter>util\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\checksum.h">
+ <Filter>util</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bson.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bson_db.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsonelement.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bson-inl.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsonmisc.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsonobj.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsonobjbuilder.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsonobjiterator.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\bsontypes.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\inline_decls.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\oid.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\ordering.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\bson\stringdata.h">
+ <Filter>bson</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\ops\delete.h">
+ <Filter>db\cpp</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\ops\update.h">
+ <Filter>db\cpp</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\ops\query.h">
+ <Filter>db\cpp</Filter>
+ </ClInclude>
+ <ClInclude Include="..\server.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\config.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy-c.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy-internal.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy-sinksource.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy-stubs-internal.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\snappy\snappy-stubs-public.h">
+ <Filter>misc and third party\snappy</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\compress.h">
+ <Filter>misc and third party</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\pcre-7.4\pcrecpp.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\config.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\pcre.h" />
+ <ClInclude Include="..\db\collection.h" />
+ <ClInclude Include="..\db\databaseholder.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="framework.h" />
+ <ClInclude Include="..\db\ops\count.h">
+ <Filter>db\h</Filter>
+ </ClInclude>
+ <ClInclude Include="..\client\dbclientmockcursor.h" />
+ <ClInclude Include="..\db\pagefault.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <Library Include="..\..\js\js64r.lib">
+ <Filter>misc and third party</Filter>
+ </Library>
+ <Library Include="..\..\js\js32d.lib">
+ <Filter>misc and third party</Filter>
+ </Library>
+ <Library Include="..\..\js\js32r.lib">
+ <Filter>misc and third party</Filter>
+ </Library>
+ <Library Include="..\..\js\js64d.lib">
+ <Filter>misc and third party</Filter>
+ </Library>
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\client\connpool.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\dbclient.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\dbclientcursor.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\syncclusterconnection.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\pch.cpp">
+ <Filter>db</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\client.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\clientcursor.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\cloner.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\commands.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\common.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\cursor.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\database.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbcommands.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbeval.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbhelpers.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbwebserver.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\extsort.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\index.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\indexkey.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\instance.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\introspect.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\jsobj.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\json.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\lasterror.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\matcher.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\mmap_win.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\namespace.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\nonce.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pdfile.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\queryoptimizer.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\security.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\security_commands.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\tests.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\cmdline.cpp">
+ <Filter>db\h</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\matcher_covered.cpp">
+ <Filter>db\h</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\oplog.cpp">
+ <Filter>db\h</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\queryutil.cpp">
+ <Filter>db\h</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl_block.cpp">
+ <Filter>db\h</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\assert_util.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\background.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\base64.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\md5.c">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\md5main.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\mmap.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\processinfo_win32.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\util.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\d_logic.cpp">
+ <Filter>shard</Filter>
+ </ClCompile>
+ <ClCompile Include="..\scripting\engine.cpp">
+ <Filter>scripting</Filter>
+ </ClCompile>
+ <ClCompile Include="..\scripting\engine_spidermonkey.cpp">
+ <Filter>scripting</Filter>
+ </ClCompile>
+ <ClCompile Include="..\shell\mongo.cpp">
+ <Filter>scripting</Filter>
+ </ClCompile>
+ <ClCompile Include="..\scripting\utils.cpp">
+ <Filter>scripting</Filter>
+ </ClCompile>
+ <ClCompile Include="basictests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="btreetests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="clienttests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="cursortests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="dbtests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="framework.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="jsobjtests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="jsontests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="jstests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="matchertests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="namespacetests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="pdfiletests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="queryoptimizertests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="querytests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="repltests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="socktests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="threadedtests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="updatetests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\stats\counters.cpp">
+ <Filter>stats</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\stats\snapshots.cpp">
+ <Filter>stats</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\stats\top.cpp">
+ <Filter>stats</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\consensus.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\health.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\replset_commands.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\rs_config.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\btree.cpp">
+ <Filter>btree</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\btreecursor.cpp">
+ <Filter>btree</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\manager.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\rs_initiate.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\vars.cpp">
+ <Filter>util\concurrency</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\task.cpp">
+ <Filter>util\concurrency</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\heartbeat.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\shardconnection.cpp">
+ <Filter>shard</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\thread_pool.cpp">
+ <Filter>util\concurrency</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\version.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\rs.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\text.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\gridfs.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\d_writeback.cpp">
+ <Filter>shard</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\d_state.cpp">
+ <Filter>shard</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\geo\2d.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\chunk.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\config.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\shardkey.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\shard.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\model.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\parallel.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\stringutils.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\distlock.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\d_migrate.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\d_split.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\rs_rollback.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\rs_sync.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\repl\rs_initialsync.cpp">
+ <Filter>replsets</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\geo\haystack.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\cap.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\log.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\processinfo.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\grid.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\restapi.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="mmaptests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="..\scripting\bench.cpp">
+ <Filter>scripting</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\compact.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\commands\isself.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dur.cpp">
+ <Filter>dur</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dur_journal.cpp">
+ <Filter>dur</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\logfile.cpp">
+ <Filter>dur</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\mongommf.cpp">
+ <Filter>dur</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\projection.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\d_chunk_manager.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dur_recover.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\durop.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbcommands_generic.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\alignedbuilder.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\bson\oid.cpp">
+ <Filter>db</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\synchronization.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dur_commitjob.cpp">
+ <Filter>dur</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dur_writetodatafiles.cpp">
+ <Filter>dur</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\dbclient_rs.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dur_preplogbuffer.cpp">
+ <Filter>dur</Filter>
+ </ClCompile>
+ <ClCompile Include="perftests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="directclienttests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\file_allocator.cpp">
+ <Filter>util\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbcommands_admin.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\querypattern.cpp">
+ <Filter>db</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\ramlog.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\key.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\btreebuilder.cpp">
+ <Filter>btree</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\queryoptimizercursor.cpp">
+ <Filter>db</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\record.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\ops\delete.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\ops\update.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\security_common.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\ops\query.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbmessage.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\listen.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message_server_port.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message_port.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\miniwebserver.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\sock.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="spin_lock_test.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\spin_lock.cpp">
+ <Filter>util\concurrency</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\snappy\snappy.cc">
+ <Filter>misc and third party\snappy</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\compress.cpp">
+ <Filter>misc and third party</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\snappy\snappy-sinksource.cc">
+ <Filter>misc and third party\snappy</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\scanandorder.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcrecpp.cc">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_chartables.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_compile.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_config.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_dfa_exec.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_exec.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_fullinfo.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_get.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_globals.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_info.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_maketables.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_newline.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ord2utf8.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_refcount.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_scanner.cc">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_stringpiece.cc">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_study.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_tables.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_try_flipped.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ucp_searchfuncs.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_valid_utf8.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_version.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_xclass.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcreposix.c">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\commands\cloud.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\d_concurrency.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="d_chunk_manager_tests.cpp">
+ <Filter>shard</Filter>
+ </ClCompile>
+ <ClCompile Include="queryoptimizercursortests.cpp">
+ <Filter>dbtests</Filter>
+ </ClCompile>
+ <ClCompile Include="..\s\default_version.cpp">
+ <Filter>shard</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\ops\count.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pagefault.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\d_globals.cpp">
+ <Filter>db</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\curop.cpp">
+ <Filter>db\cpp</Filter>
+ </ClCompile>
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="..\SConstruct">
+ <Filter>misc and third party</Filter>
+ </None>
+ <None Include="btreetests.inl">
+ <Filter>dbtests</Filter>
+ </None>
+ </ItemGroup>
+</Project> \ No newline at end of file
diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp
new file mode 100644
index 00000000000..1304a276b7d
--- /dev/null
+++ b/src/mongo/dbtests/threadedtests.cpp
@@ -0,0 +1,649 @@
+// threadedtests.cpp - Tests for threaded code
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../bson/util/atomic_int.h"
+#include "../util/concurrency/mvar.h"
+#include "../util/concurrency/thread_pool.h"
+#include "../util/concurrency/list.h"
+#include "../util/timer.h"
+#include <boost/thread.hpp>
+#include <boost/bind.hpp>
+#include "../db/d_concurrency.h"
+
+#include "dbtests.h"
+
+namespace ThreadedTests {
+
+ template <int nthreads_param=10>
+ class ThreadedTest {
+ public:
+ virtual void setup() {} //optional
+ virtual void subthread(int remaining) = 0; // each thread whatever test work you want done
+ virtual void validate() = 0; // after work is done
+
+ static const int nthreads = nthreads_param;
+
+ void run() {
+ setup();
+ launch_subthreads(nthreads);
+ validate();
+ }
+
+ virtual ~ThreadedTest() {}; // not necessary, but makes compilers happy
+
+ private:
+ void launch_subthreads(int remaining) {
+ if (!remaining)
+ return;
+
+ boost::thread athread(boost::bind(&ThreadedTest::subthread, this, remaining));
+ launch_subthreads(remaining - 1);
+ athread.join();
+ }
+ };
+
+ class MongoMutexTest : public ThreadedTest<135> {
+#if defined(_DEBUG)
+ enum { N = 5000 };
+#else
+ enum { N = 40000 };
+#endif
+ MongoMutex *mm;
+ ProgressMeter pm;
+ public:
+ MongoMutexTest() : pm(N * nthreads) {}
+ void run() {
+ DEV {
+ // in _DEBUG builds on linux we mprotect each time a writelock
+ // is taken. That can greatly slow down this test if there are
+ // many open files
+ DBDirectClient db;
+ db.simpleCommand("admin", NULL, "closeAllDatabases");
+ }
+
+ Timer t;
+ cout << "MongoMutexTest N:" << N << endl;
+ ThreadedTest<135>::run();
+ cout << "MongoMutexTest " << t.millis() << "ms" << endl;
+ }
+ private:
+ virtual void setup() {
+ mm = &d.dbMutex;
+ }
+ virtual void subthread(int) {
+ Client::initThread("mongomutextest");
+ sleepmillis(0);
+ for( int i = 0; i < N; i++ ) {
+ if( i % 7 == 0 ) {
+ mm->lock_shared();
+ mm->lock_shared();
+ mm->unlock_shared();
+ mm->unlock_shared();
+ }
+ else if( i % 7 == 1 ) {
+ mm->lock_shared();
+ ASSERT( mm->atLeastReadLocked() );
+ mm->unlock_shared();
+ }
+ else if( i % 7 == 2 ) {
+ mm->lock();
+ ASSERT( mm->isWriteLocked() );
+ mm->unlock();
+ }
+ else if( i % 7 == 3 ) {
+ mm->lock();
+ mm->lock_shared();
+ ASSERT( mm->isWriteLocked() );
+ mm->unlock_shared();
+ mm->unlock();
+ }
+ else if( i % 7 == 4 ) {
+ mm->lock();
+ mm->releaseEarly();
+ mm->unlock();
+ }
+ else if( i % 7 == 5 ) {
+ if( mm->lock_try(1) ) {
+ mm->unlock();
+ }
+ }
+ else if( i % 7 == 6 ) {
+ if( mm->lock_shared_try(0) ) {
+ mm->unlock_shared();
+ }
+ }
+ else {
+ mm->lock_shared();
+ mm->unlock_shared();
+ }
+ pm.hit();
+ }
+ cc().shutdown();
+ }
+ virtual void validate() {
+ ASSERT( !mm->atLeastReadLocked() );
+ mm->lock();
+ mm->unlock();
+ mm->lock_shared();
+ mm->unlock_shared();
+ }
+ };
+
+ // Tested with up to 30k threads
+ class IsAtomicUIntAtomic : public ThreadedTest<> {
+ static const int iterations = 1000000;
+ AtomicUInt target;
+
+ void subthread(int) {
+ for(int i=0; i < iterations; i++) {
+ //target.x++; // verified to fail with this version
+ target++;
+ }
+ }
+ void validate() {
+ ASSERT_EQUALS(target.x , unsigned(nthreads * iterations));
+
+ AtomicUInt u;
+ ASSERT_EQUALS(0u, u);
+ ASSERT_EQUALS(0u, u++);
+ ASSERT_EQUALS(2u, ++u);
+ ASSERT_EQUALS(2u, u--);
+ ASSERT_EQUALS(0u, --u);
+ ASSERT_EQUALS(0u, u);
+
+ u++;
+ ASSERT( u > 0 );
+
+ u--;
+ ASSERT( ! ( u > 0 ) );
+ }
+ };
+
+ class MVarTest : public ThreadedTest<> {
+ static const int iterations = 10000;
+ MVar<int> target;
+
+ public:
+ MVarTest() : target(0) {}
+ void subthread(int) {
+ for(int i=0; i < iterations; i++) {
+ int val = target.take();
+#if BOOST_VERSION >= 103500
+ //increase chances of catching failure
+ boost::this_thread::yield();
+#endif
+ target.put(val+1);
+ }
+ }
+ void validate() {
+ ASSERT_EQUALS(target.take() , nthreads * iterations);
+ }
+ };
+
+ class ThreadPoolTest {
+ static const int iterations = 10000;
+ static const int nThreads = 8;
+
+ AtomicUInt counter;
+ void increment(int n) {
+ for (int i=0; i<n; i++) {
+ counter++;
+ }
+ }
+
+ public:
+ void run() {
+ ThreadPool tp(nThreads);
+
+ for (int i=0; i < iterations; i++) {
+ tp.schedule(&ThreadPoolTest::increment, this, 2);
+ }
+
+ tp.join();
+
+ ASSERT(counter == (unsigned)(iterations * 2));
+ }
+ };
+
+ class LockTest {
+ public:
+ void run() {
+ // quick atomicint wrap test
+ // MSGID likely assumes this semantic
+ AtomicUInt counter = 0xffffffff;
+ counter++;
+ ASSERT( counter == 0 );
+
+ writelocktry lk( "" , 0 );
+ ASSERT( lk.got() );
+ ASSERT( d.dbMutex.isWriteLocked() );
+ }
+ };
+
+ class RWLockTest1 {
+ public:
+ void run() {
+ RWLock lk( "eliot" );
+ {
+ rwlock r( lk , true , 1000 );
+ }
+ }
+ };
+
+ class RWLockTest2 {
+ public:
+
+ static void worker1( RWLockRecursiveNongreedy * lk , AtomicUInt * x ) {
+ (*x)++; // 1
+ //cout << "lock b try" << endl;
+ RWLockRecursiveNongreedy::Exclusive b(*lk);
+ //cout << "lock b got" << endl;
+ (*x)++; // 2
+ }
+
+ static void worker2( RWLockRecursiveNongreedy * lk , AtomicUInt * x ) {
+ //cout << "lock c try" << endl;
+ RWLockRecursiveNongreedy::Shared c(*lk);
+ (*x)++;
+ //cout << "lock c got" << endl;
+ }
+
+ void run() {
+ /**
+ * note: this test will deadlock if the code breaks
+ */
+
+ RWLockRecursiveNongreedy lk( "eliot2" , 120 * 1000 );
+ cout << "RWLock impl: " << lk.implType() << endl;
+
+ auto_ptr<RWLockRecursiveNongreedy::Shared> a( new RWLockRecursiveNongreedy::Shared(lk) );
+
+ AtomicUInt x1 = 0;
+ cout << "A : " << &x1 << endl;
+ boost::thread t1( boost::bind( worker1 , &lk , &x1 ) );
+ while ( ! x1 );
+ assert( x1 == 1 );
+ sleepmillis( 500 );
+ assert( x1 == 1 );
+
+ AtomicUInt x2 = 0;
+
+ boost::thread t2( boost::bind( worker2, &lk , &x2 ) );
+ t2.join();
+ assert( x2 == 1 );
+
+ a.reset();
+
+ for ( int i=0; i<2000; i++ ) {
+ if ( x1 == 2 )
+ break;
+ sleepmillis(1);
+ }
+
+ assert( x1 == 2 );
+ t1.join();
+
+ }
+ };
+
+
+
+ /** test of shared lock */
+ class RWLockTest3 {
+ public:
+
+ static void worker2( RWLockRecursiveNongreedy * lk , AtomicUInt * x ) {
+ assert( ! lk->__lock_try(0) );
+ //cout << "lock c try" << endl;
+ RWLockRecursiveNongreedy::Shared c( *lk );
+ (*x)++;
+ //cout << "lock c got" << endl;
+ }
+
+ void run() {
+ /**
+ * note: this test will deadlock if the code breaks
+ */
+
+ RWLockRecursiveNongreedy lk( "eliot2" , 120 * 1000 );
+
+ auto_ptr<RWLockRecursiveNongreedy::Shared> a( new RWLockRecursiveNongreedy::Shared( lk ) );
+
+ AtomicUInt x2 = 0;
+
+ boost::thread t2( boost::bind( worker2, &lk , &x2 ) );
+ t2.join();
+ assert( x2 == 1 );
+
+ a.reset();
+
+ }
+ };
+
+ class RWLockTest4 {
+ public:
+
+#if defined(__linux__) || defined(__APPLE__)
+ static void worker1( pthread_rwlock_t * lk , AtomicUInt * x ) {
+ (*x)++; // 1
+ cout << "lock b try" << endl;
+ while ( 1 ) {
+ if ( pthread_rwlock_trywrlock( lk ) == 0 )
+ break;
+ sleepmillis(10);
+ }
+ cout << "lock b got" << endl;
+ (*x)++; // 2
+ pthread_rwlock_unlock( lk );
+ }
+
+ static void worker2( pthread_rwlock_t * lk , AtomicUInt * x ) {
+ cout << "lock c try" << endl;
+ pthread_rwlock_rdlock( lk );
+ (*x)++;
+ cout << "lock c got" << endl;
+ pthread_rwlock_unlock( lk );
+ }
+#endif
+ void run() {
+ /**
+ * note: this test will deadlock if the code breaks
+ */
+
+#if defined(__linux__) || defined(__APPLE__)
+
+ // create
+ pthread_rwlock_t lk;
+ assert( pthread_rwlock_init( &lk , 0 ) == 0 );
+
+ // read lock
+ assert( pthread_rwlock_rdlock( &lk ) == 0 );
+
+ AtomicUInt x1 = 0;
+ boost::thread t1( boost::bind( worker1 , &lk , &x1 ) );
+ while ( ! x1 );
+ assert( x1 == 1 );
+ sleepmillis( 500 );
+ assert( x1 == 1 );
+
+ AtomicUInt x2 = 0;
+
+ boost::thread t2( boost::bind( worker2, &lk , &x2 ) );
+ t2.join();
+ assert( x2 == 1 );
+
+ pthread_rwlock_unlock( &lk );
+
+ for ( int i=0; i<2000; i++ ) {
+ if ( x1 == 2 )
+ break;
+ sleepmillis(1);
+ }
+
+ assert( x1 == 2 );
+ t1.join();
+#endif
+ }
+ };
+
+ class List1Test2 : public ThreadedTest<> {
+ static const int iterations = 1000; // note: a lot of iterations will use a lot of memory as List1 leaks on purpose
+ class M : public List1<M>::Base {
+ public:
+ M(int x) : _x(x) { }
+ const int _x;
+ };
+ List1<M> l;
+ public:
+ void validate() { }
+ void subthread(int) {
+ for(int i=0; i < iterations; i++) {
+ int r = std::rand() % 256;
+ if( r == 0 ) {
+ l.orphanAll();
+ }
+ else if( r < 4 ) {
+ l.push(new M(r));
+ }
+ else {
+ M *orph = 0;
+ for( M *m = l.head(); m; m=m->next() ) {
+ ASSERT( m->_x > 0 && m->_x < 4 );
+ if( r > 192 && std::rand() % 8 == 0 )
+ orph = m;
+ }
+ if( orph ) {
+ try {
+ l.orphan(orph);
+ }
+ catch(...) { }
+ }
+ }
+ }
+ }
+ };
+
+ class List1Test {
+ public:
+ class M : public List1<M>::Base {
+ ~M();
+ public:
+ M( int x ) {
+ num = x;
+ }
+ int num;
+ };
+
+ void run(){
+ List1<M> l;
+
+ vector<M*> ms;
+ for ( int i=0; i<5; i++ ) {
+ M * m = new M(i);
+ ms.push_back( m );
+ l.push( m );
+ }
+
+ // must assert as the item is missing
+ ASSERT_THROWS( l.orphan( new M( -3 ) ) , UserException );
+ }
+ };
+
+ class Hierarchical1 {
+ public:
+ void run() {
+ {
+ LockCollectionForReading x("bar");
+ }
+ {
+ LockCollectionForReading x("foo");
+ LockCollectionForReading y("foo"); // recursion is ok
+ }
+ {
+ LockCollectionForReading x("foo");
+ LockCollectionForReading y("foo.$bar");
+ }
+#if defined(CLC)
+ {
+ LockCollectionForWriting x("foo");
+ LockCollectionForWriting y("foo");
+ }
+ {
+ LockCollectionForReading x("foo");
+ ASSERT_THROWS( LockCollectionForWriting y("foo"), DBException )
+ }
+ {
+ LockCollectionForReading x("foo");
+ ASSERT_THROWS( LockCollectionForReading y("bar"), DBException )
+ }
+#endif
+ cout << "temp ok" << endl;
+ }
+ };
+
+#if 1
+ class UpgradableTest : public ThreadedTest<7> {
+ RWLock m;
+ public:
+ UpgradableTest() : m("utest") {}
+ private:
+ virtual void validate() { }
+ virtual void subthread(int x) {
+ Client::initThread("utest");
+
+ /* r = get a read lock
+ R = get a read lock and we expect it to be fast
+ u = get upgradable
+ U = get upgradable and we expect it to be fast
+ w = get a write lock
+ */
+ // /-- verify upgrade can be done instantly while in a read lock already
+ // | /-- verify upgrade acquisition isn't greedy
+ // | | /-- verify writes aren't greedy while in upgradable (or are they?)
+ // v v v
+ const char *what = " RURuRwR";
+
+ sleepmillis(100*x);
+
+ log() << x << ' ' << what[x] << " request" << endl;
+ char ch = what[x];
+ switch( ch ) {
+ case 'w':
+ {
+ m.lock();
+ log() << x << " w got" << endl;
+ sleepmillis(100);
+ log() << x << " w unlock" << endl;
+ m.unlock();
+ }
+ break;
+ case 'u':
+ case 'U':
+ {
+ Timer t;
+ RWLock::Upgradable u(m);
+ log() << x << ' ' << ch << " got" << endl;
+ if( ch == 'U' ) {
+#ifdef MONGO_USE_SRW_ON_WINDOWS
+ if( t.millis() > 200 ) {
+#else
+ if( t.millis() > 20 ) {
+#endif
+ DEV {
+ // a _DEBUG buildbot might be slow, try to avoid false positives
+ log() << "warning lock upgrade was slow " << t.millis() << endl;
+ }
+ else {
+ log() << "assertion failure: lock upgrade was too slow: " << t.millis() << endl;
+ ASSERT( false );
+ }
+ }
+ }
+ sleepsecs(1);
+ log() << x << ' ' << ch << " unlock" << endl;
+ }
+ break;
+ case 'r':
+ case 'R':
+ {
+ Timer t;
+ m.lock_shared();
+ log() << x << ' ' << ch << " got " << endl;
+ if( what[x] == 'R' ) {
+ if( t.millis() > 15 ) {
+ log() << x << " warning: when in upgradable, write locks are still greedy on this platform" << endl;
+ }
+ }
+ sleepmillis(200);
+ log() << x << ' ' << ch << " unlock" << endl;
+ m.unlock_shared();
+ }
+ break;
+ default:
+ ASSERT(false);
+ }
+
+ cc().shutdown();
+ }
+ };
+#endif
+
+ class WriteLocksAreGreedy : public ThreadedTest<3> {
+ public:
+ WriteLocksAreGreedy() : m("gtest") {}
+ private:
+ RWLock m;
+ virtual void validate() { }
+ virtual void subthread(int x) {
+ Client::initThread("utest");
+ if( x == 1 ) {
+ cout << mongo::curTimeMillis64() % 10000 << " 1" << endl;
+ rwlock_shared lk(m);
+ sleepmillis(300);
+ cout << mongo::curTimeMillis64() % 10000 << " 1x" << endl;
+ }
+ if( x == 2 ) {
+ sleepmillis(100);
+ cout << mongo::curTimeMillis64() % 10000 << " 2" << endl;
+ rwlock lk(m, true);
+ //m._lock();
+ cout << mongo::curTimeMillis64() % 10000 << " 2x" << endl;
+ //m.unlock();
+ }
+ if( x == 3 ) {
+ sleepmillis(200);
+ Timer t;
+ cout << mongo::curTimeMillis64() % 10000 << " 3" << endl;
+ rwlock_shared lk(m);
+ cout << mongo::curTimeMillis64() % 10000 << " 3x" << endl;
+ cout << t.millis() << endl;
+ ASSERT( t.millis() > 50 );
+ }
+ cc().shutdown();
+ }
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "threading" ) { }
+
+ void setupTests() {
+ add< Hierarchical1 >();
+
+ add< WriteLocksAreGreedy >();
+ add< UpgradableTest >();
+ add< List1Test >();
+ add< List1Test2 >();
+
+ add< IsAtomicUIntAtomic >();
+ add< MVarTest >();
+ add< ThreadPoolTest >();
+ add< LockTest >();
+
+
+ add< RWLockTest1 >();
+ //add< RWLockTest2 >(); // SERVER-2996
+ add< RWLockTest3 >();
+ add< RWLockTest4 >();
+
+ add< MongoMutexTest >();
+ }
+ } myall;
+}
diff --git a/src/mongo/dbtests/updatetests.cpp b/src/mongo/dbtests/updatetests.cpp
new file mode 100644
index 00000000000..c912bf437d0
--- /dev/null
+++ b/src/mongo/dbtests/updatetests.cpp
@@ -0,0 +1,877 @@
+// updatetests.cpp : unit tests relating to update requests
+//
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../db/ops/query.h"
+
+#include "../db/db.h"
+#include "../db/instance.h"
+#include "../db/json.h"
+#include "../db/lasterror.h"
+#include "../db/ops/update.h"
+
+#include "dbtests.h"
+
+namespace UpdateTests {
+
+ class ClientBase {
+ public:
+ // NOTE: Not bothering to backup the old error record.
+ ClientBase() {
+ mongo::lastError.reset( new LastError() );
+ }
+ ~ClientBase() {
+ mongo::lastError.release();
+ }
+ protected:
+ static void insert( const char *ns, BSONObj o ) {
+ client_.insert( ns, o );
+ }
+ static void update( const char *ns, BSONObj q, BSONObj o, bool upsert = 0 ) {
+ client_.update( ns, Query( q ), o, upsert );
+ }
+ static bool error() {
+ return !client_.getPrevError().getField( "err" ).isNull();
+ }
+ DBDirectClient &client() const { return client_; }
+ private:
+ static DBDirectClient client_;
+ };
+ DBDirectClient ClientBase::client_;
+
+ class Fail : public ClientBase {
+ public:
+ virtual ~Fail() {}
+ void run() {
+ prep();
+ ASSERT( !error() );
+ doIt();
+ ASSERT( error() );
+ }
+ protected:
+ const char *ns() { return "unittests.UpdateTests_Fail"; }
+ virtual void prep() {
+ insert( ns(), fromjson( "{a:1}" ) );
+ }
+ virtual void doIt() = 0;
+ };
+
+ class ModId : public Fail {
+ void doIt() {
+ update( ns(), BSONObj(), fromjson( "{$set:{'_id':4}}" ) );
+ }
+ };
+
+ class ModNonmodMix : public Fail {
+ void doIt() {
+ update( ns(), BSONObj(), fromjson( "{$set:{a:4},z:3}" ) );
+ }
+ };
+
+ class InvalidMod : public Fail {
+ void doIt() {
+ update( ns(), BSONObj(), fromjson( "{$awk:{a:4}}" ) );
+ }
+ };
+
+ class ModNotFirst : public Fail {
+ void doIt() {
+ update( ns(), BSONObj(), fromjson( "{z:3,$set:{a:4}}" ) );
+ }
+ };
+
+ class ModDuplicateFieldSpec : public Fail {
+ void doIt() {
+ update( ns(), BSONObj(), fromjson( "{$set:{a:4},$inc:{a:1}}" ) );
+ }
+ };
+
+ class IncNonNumber : public Fail {
+ void doIt() {
+ update( ns(), BSONObj(), fromjson( "{$inc:{a:'d'}}" ) );
+ }
+ };
+
+ class PushAllNonArray : public Fail {
+ void doIt() {
+ insert( ns(), fromjson( "{a:[1]}" ) );
+ update( ns(), BSONObj(), fromjson( "{$pushAll:{a:'d'}}" ) );
+ }
+ };
+
+ class PullAllNonArray : public Fail {
+ void doIt() {
+ insert( ns(), fromjson( "{a:[1]}" ) );
+ update( ns(), BSONObj(), fromjson( "{$pullAll:{a:'d'}}" ) );
+ }
+ };
+
+ class IncTargetNonNumber : public Fail {
+ void doIt() {
+ insert( ns(), BSON( "a" << "a" ) );
+ update( ns(), BSON( "a" << "a" ), fromjson( "{$inc:{a:1}}" ) );
+ }
+ };
+
+ class SetBase : public ClientBase {
+ public:
+ ~SetBase() {
+ client().dropCollection( ns() );
+ }
+ protected:
+ const char *ns() { return "unittests.updatetests.SetBase"; }
+ };
+
+ class SetNum : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), BSON( "a" << 1 ) );
+ client().update( ns(), BSON( "a" << 1 ), BSON( "$set" << BSON( "a" << 4 ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a" << 4 ) ).isEmpty() );
+ }
+ };
+
+ class SetString : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), BSON( "a" << "b" ) );
+ client().update( ns(), BSON( "a" << "b" ), BSON( "$set" << BSON( "a" << "c" ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a" << "c" ) ).isEmpty() );
+ }
+ };
+
+ class SetStringDifferentLength : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), BSON( "a" << "b" ) );
+ client().update( ns(), BSON( "a" << "b" ), BSON( "$set" << BSON( "a" << "cd" ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a" << "cd" ) ).isEmpty() );
+ }
+ };
+
+ class SetStringToNum : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), BSON( "a" << "b" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a" << 5 ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a" << 5 ) ).isEmpty() );
+ }
+ };
+
+ class SetStringToNumInPlace : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), BSON( "a" << "bcd" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a" << 5.0 ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a" << 5.0 ) ).isEmpty() );
+ }
+ };
+
+ class ModDotted : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{a:{b:4}}" ) );
+ client().update( ns(), Query(), BSON( "$inc" << BSON( "a.b" << 10 ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a.b" << 14 ) ).isEmpty() );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b" << 55 ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a.b" << 55 ) ).isEmpty() );
+ }
+ };
+
+ class SetInPlaceDotted : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{a:{b:'cdef'}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b" << "llll" ) ) );
+ ASSERT( !client().findOne( ns(), BSON( "a.b" << "llll" ) ).isEmpty() );
+ }
+ };
+
+ class SetRecreateDotted : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:'cdef'}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b" << "lllll" ) ) );
+ ASSERT( client().findOne( ns(), BSON( "a.b" << "lllll" ) ).woCompare( fromjson( "{'_id':0,a:{b:'lllll'}}" ) ) == 0 );
+ }
+ };
+
+ class SetMissingDotted : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), BSONObj(), BSON( "$set" << BSON( "a.b" << "lllll" ) ) );
+ ASSERT( client().findOne( ns(), BSON( "a.b" << "lllll" ) ).woCompare( fromjson( "{'_id':0,a:{b:'lllll'}}" ) ) == 0 );
+ }
+ };
+
+ class SetAdjacentDotted : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{c:4}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b" << "lllll" ) ) );
+ ASSERT_EQUALS( client().findOne( ns(), BSON( "a.b" << "lllll" ) ) , fromjson( "{'_id':0,a:{b:'lllll',c:4}}" ) );
+ }
+ };
+
+ class IncMissing : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), Query(), BSON( "$inc" << BSON( "f" << 3.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,f:3}" ) ) == 0 );
+ }
+ };
+
+ class MultiInc : public SetBase {
+ public:
+
+ string s() {
+ stringstream ss;
+ auto_ptr<DBClientCursor> cc = client().query( ns() , Query().sort( BSON( "_id" << 1 ) ) );
+ bool first = true;
+ while ( cc->more() ) {
+ if ( first ) first = false;
+ else ss << ",";
+
+ BSONObj o = cc->next();
+ ss << o["x"].numberInt();
+ }
+ return ss.str();
+ }
+
+ void run() {
+ client().insert( ns(), BSON( "_id" << 1 << "x" << 1 ) );
+ client().insert( ns(), BSON( "_id" << 2 << "x" << 5 ) );
+
+ ASSERT_EQUALS( "1,5" , s() );
+
+ client().update( ns() , BSON( "_id" << 1 ) , BSON( "$inc" << BSON( "x" << 1 ) ) );
+ ASSERT_EQUALS( "2,5" , s() );
+
+ client().update( ns() , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) );
+ ASSERT_EQUALS( "3,5" , s() );
+
+ client().update( ns() , BSONObj() , BSON( "$inc" << BSON( "x" << 1 ) ) , false , true );
+ ASSERT_EQUALS( "4,6" , s() );
+
+ }
+ };
+
+ class UnorderedNewSet : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "f.g.h" << 3.0 << "f.g.a" << 2.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,f:{g:{a:2,h:3}}}" ) ) == 0 );
+ }
+ };
+
+ class UnorderedNewSetAdjacent : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), BSONObj(), BSON( "$set" << BSON( "f.g.h.b" << 3.0 << "f.g.a.b" << 2.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,f:{g:{a:{b:2},h:{b:3}}}}" ) ) == 0 );
+ }
+ };
+
+ class ArrayEmbeddedSet : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,z:[4,'b']}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "z.0" << "a" ) ) );
+ ASSERT_EQUALS( client().findOne( ns(), Query() ) , fromjson( "{'_id':0,z:['a','b']}" ) );
+ }
+ };
+
+ class AttemptEmbedInExistingNum : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:1}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b" << 1 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:1}" ) ) == 0 );
+ }
+ };
+
+ class AttemptEmbedConflictsWithOtherSet : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a" << 2 << "a.b" << 1 ) ) );
+ ASSERT_EQUALS( client().findOne( ns(), Query() ) , fromjson( "{'_id':0}" ) );
+ }
+ };
+
+ class ModMasksEmbeddedConflict : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:2}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a" << 2 << "a.b" << 1 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:2}}" ) ) == 0 );
+ }
+ };
+
+ class ModOverwritesExistingObject : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:2}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a" << BSON( "c" << 2 ) ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{c:2}}" ) ) == 0 );
+ }
+ };
+
+ class InvalidEmbeddedSet : public Fail {
+ public:
+ virtual void doIt() {
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a." << 1 ) ) );
+ }
+ };
+
+ class UpsertMissingEmbedded : public SetBase {
+ public:
+ void run() {
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b" << 1 ) ), true );
+ ASSERT( !client().findOne( ns(), QUERY( "a.b" << 1 ) ).isEmpty() );
+ }
+ };
+
+ class Push : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:[1]}" ) );
+ client().update( ns(), Query(), BSON( "$push" << BSON( "a" << 5 ) ) );
+ ASSERT_EQUALS( client().findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[1,5]}" ) );
+ }
+ };
+
+ class PushInvalidEltType : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:1}" ) );
+ client().update( ns(), Query(), BSON( "$push" << BSON( "a" << 5 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:1}" ) ) == 0 );
+ }
+ };
+
+ class PushConflictsWithOtherMod : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:[1]}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a" << 1 ) <<"$push" << BSON( "a" << 5 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:[1]}" ) ) == 0 );
+ }
+ };
+
+ class PushFromNothing : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), Query(), BSON( "$push" << BSON( "a" << 5 ) ) );
+ ASSERT_EQUALS( client().findOne( ns(), Query() ) , fromjson( "{'_id':0,a:[5]}" ) );
+ }
+ };
+
+ class PushFromEmpty : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:[]}" ) );
+ client().update( ns(), Query(), BSON( "$push" << BSON( "a" << 5 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:[5]}" ) ) == 0 );
+ }
+ };
+
+ class PushInsideNothing : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), Query(), BSON( "$push" << BSON( "a.b" << 5 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:[5]}}" ) ) == 0 );
+ }
+ };
+
+ class CantPushInsideOtherMod : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a" << BSONObj() ) << "$push" << BSON( "a.b" << 5 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0}" ) ) == 0 );
+ }
+ };
+
+ class CantPushTwice : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:[]}" ) );
+ client().update( ns(), Query(), BSON( "$push" << BSON( "a" << 4 ) << "$push" << BSON( "a" << 5 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:[]}" ) ) == 0 );
+ }
+ };
+
+ class SetEncapsulationConflictsWithExistingType : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:4}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b.c" << 4.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:4}}" ) ) == 0 );
+ }
+ };
+
+ class CantPushToParent : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:4}}" ) );
+ client().update( ns(), Query(), BSON( "$push" << BSON( "a" << 4.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:4}}" ) ) == 0 );
+ }
+ };
+
+ class CantIncParent : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:4}}" ) );
+ client().update( ns(), Query(), BSON( "$inc" << BSON( "a" << 4.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:4}}" ) ) == 0 );
+ }
+ };
+
+ class DontDropEmpty : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:{}}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.c" << 4.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:{},c:4}}" ) ) == 0 );
+ }
+ };
+
+ class InsertInEmpty : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:{}}}" ) );
+ client().update( ns(), Query(), BSON( "$set" << BSON( "a.b.f" << 4.0 ) ) );
+ ASSERT( client().findOne( ns(), Query() ).woCompare( fromjson( "{'_id':0,a:{b:{f:4}}}" ) ) == 0 );
+ }
+ };
+
+ class IndexParentOfMod : public SetBase {
+ public:
+ void run() {
+ client().ensureIndex( ns(), BSON( "a" << 1 ) );
+ client().insert( ns(), fromjson( "{'_id':0}" ) );
+ client().update( ns(), Query(), fromjson( "{$set:{'a.b':4}}" ) );
+ ASSERT_EQUALS( fromjson( "{'_id':0,a:{b:4}}" ) , client().findOne( ns(), Query() ) );
+ ASSERT_EQUALS( fromjson( "{'_id':0,a:{b:4}}" ) , client().findOne( ns(), fromjson( "{'a.b':4}" ) ) ); // make sure the index works
+ }
+ };
+
+ class IndexModSet : public SetBase {
+ public:
+ void run() {
+ client().ensureIndex( ns(), BSON( "a.b" << 1 ) );
+ client().insert( ns(), fromjson( "{'_id':0,a:{b:3}}" ) );
+ client().update( ns(), Query(), fromjson( "{$set:{'a.b':4}}" ) );
+ ASSERT_EQUALS( fromjson( "{'_id':0,a:{b:4}}" ) , client().findOne( ns(), Query() ) );
+ ASSERT_EQUALS( fromjson( "{'_id':0,a:{b:4}}" ) , client().findOne( ns(), fromjson( "{'a.b':4}" ) ) ); // make sure the index works
+ }
+ };
+
+
+ class PreserveIdWithIndex : public SetBase { // Not using $set, but base class is still useful
+ public:
+ void run() {
+ client().insert( ns(), BSON( "_id" << 55 << "i" << 5 ) );
+ client().update( ns(), BSON( "i" << 5 ), BSON( "i" << 6 ) );
+ ASSERT( !client().findOne( ns(), Query( BSON( "_id" << 55 ) ).hint
+ ( "{\"_id\":ObjectId(\"000000000000000000000000\")}" ) ).isEmpty() );
+ }
+ };
+
+ class CheckNoMods : public SetBase {
+ public:
+ void run() {
+ client().update( ns(), BSONObj(), BSON( "i" << 5 << "$set" << BSON( "q" << 3 ) ), true );
+ ASSERT( error() );
+ }
+ };
+
+ class UpdateMissingToNull : public SetBase {
+ public:
+ void run() {
+ client().insert( ns(), BSON( "a" << 5 ) );
+ client().update( ns(), BSON( "a" << 5 ), fromjson( "{$set:{b:null}}" ) );
+ ASSERT_EQUALS( jstNULL, client().findOne( ns(), QUERY( "a" << 5 ) ).getField( "b" ).type() );
+ }
+ };
+
+ namespace ModSetTests {
+
+ class internal1 {
+ public:
+ void run() {
+ BSONObj b = BSON( "$inc" << BSON( "x" << 1 << "a.b" << 1 ) );
+ ModSet m(b);
+
+ ASSERT( m.haveModForField( "x" ) );
+ ASSERT( m.haveModForField( "a.b" ) );
+ ASSERT( ! m.haveModForField( "y" ) );
+ ASSERT( ! m.haveModForField( "a.c" ) );
+ ASSERT( ! m.haveModForField( "a" ) );
+
+ ASSERT( m.haveConflictingMod( "x" ) );
+ ASSERT( m.haveConflictingMod( "a" ) );
+ ASSERT( m.haveConflictingMod( "a.b" ) );
+ ASSERT( ! m.haveConflictingMod( "a.bc" ) );
+ ASSERT( ! m.haveConflictingMod( "a.c" ) );
+ ASSERT( ! m.haveConflictingMod( "a.a" ) );
+ }
+ };
+
+ class Base {
+ public:
+
+ virtual ~Base() {}
+
+
+ void test( BSONObj morig , BSONObj in , BSONObj wanted ) {
+ BSONObj m = morig.copy();
+ ModSet set(m);
+
+ BSONObj out = set.prepare(in)->createNewFromMods();
+ ASSERT_EQUALS( wanted , out );
+ }
+ };
+
+ class inc1 : public Base {
+ public:
+ void run() {
+ BSONObj m = BSON( "$inc" << BSON( "x" << 1 ) );
+ test( m , BSON( "x" << 5 ) , BSON( "x" << 6 ) );
+ test( m , BSON( "a" << 5 ) , BSON( "a" << 5 << "x" << 1 ) );
+ test( m , BSON( "z" << 5 ) , BSON( "x" << 1 << "z" << 5 ) );
+ }
+ };
+
+ class inc2 : public Base {
+ public:
+ void run() {
+ BSONObj m = BSON( "$inc" << BSON( "a.b" << 1 ) );
+ test( m , BSONObj() , BSON( "a" << BSON( "b" << 1 ) ) );
+ test( m , BSON( "a" << BSON( "b" << 2 ) ) , BSON( "a" << BSON( "b" << 3 ) ) );
+
+ m = BSON( "$inc" << BSON( "a.b" << 1 << "a.c" << 1 ) );
+ test( m , BSONObj() , BSON( "a" << BSON( "b" << 1 << "c" << 1 ) ) );
+
+
+ }
+ };
+
+ class set1 : public Base {
+ public:
+ void run() {
+ test( BSON( "$set" << BSON( "x" << 17 ) ) , BSONObj() , BSON( "x" << 17 ) );
+ test( BSON( "$set" << BSON( "x" << 17 ) ) , BSON( "x" << 5 ) , BSON( "x" << 17 ) );
+
+ test( BSON( "$set" << BSON( "x.a" << 17 ) ) , BSON( "z" << 5 ) , BSON( "x" << BSON( "a" << 17 )<< "z" << 5 ) );
+ }
+ };
+
+ class push1 : public Base {
+ public:
+ void run() {
+ test( BSON( "$push" << BSON( "a" << 5 ) ) , fromjson( "{a:[1]}" ) , fromjson( "{a:[1,5]}" ) );
+ }
+ };
+
+ };
+
+ namespace basic {
+ class Base : public ClientBase {
+ protected:
+
+ virtual const char * ns() = 0;
+ virtual void dotest() = 0;
+
+ void insert( const BSONObj& o ) {
+ client().insert( ns() , o );
+ }
+
+ void update( const BSONObj& m ) {
+ client().update( ns() , BSONObj() , m );
+ }
+
+ BSONObj findOne() {
+ return client().findOne( ns() , BSONObj() );
+ }
+
+ void test( const char* initial , const char* mod , const char* after ) {
+ test( fromjson( initial ) , fromjson( mod ) , fromjson( after ) );
+ }
+
+
+ void test( const BSONObj& initial , const BSONObj& mod , const BSONObj& after ) {
+ client().dropCollection( ns() );
+ insert( initial );
+ update( mod );
+ ASSERT_EQUALS( after , findOne() );
+ client().dropCollection( ns() );
+ }
+
+ public:
+
+ Base() {}
+ virtual ~Base() {
+ }
+
+ void run() {
+ client().dropCollection( ns() );
+
+ dotest();
+
+ client().dropCollection( ns() );
+ }
+ };
+
+ class SingleTest : public Base {
+ virtual BSONObj initial() = 0;
+ virtual BSONObj mod() = 0;
+ virtual BSONObj after() = 0;
+
+ void dotest() {
+ test( initial() , mod() , after() );
+ }
+
+ };
+
+ class inc1 : public SingleTest {
+ virtual BSONObj initial() {
+ return BSON( "_id" << 1 << "x" << 1 );
+ }
+ virtual BSONObj mod() {
+ return BSON( "$inc" << BSON( "x" << 2 ) );
+ }
+ virtual BSONObj after() {
+ return BSON( "_id" << 1 << "x" << 3 );
+ }
+ virtual const char * ns() {
+ return "unittests.inc1";
+ }
+
+ };
+
+ class inc2 : public SingleTest {
+ virtual BSONObj initial() {
+ return BSON( "_id" << 1 << "x" << 1 );
+ }
+ virtual BSONObj mod() {
+ return BSON( "$inc" << BSON( "x" << 2.5 ) );
+ }
+ virtual BSONObj after() {
+ return BSON( "_id" << 1 << "x" << 3.5 );
+ }
+ virtual const char * ns() {
+ return "unittests.inc2";
+ }
+
+ };
+
+ class inc3 : public SingleTest {
+ virtual BSONObj initial() {
+ return BSON( "_id" << 1 << "x" << 537142123123LL );
+ }
+ virtual BSONObj mod() {
+ return BSON( "$inc" << BSON( "x" << 2 ) );
+ }
+ virtual BSONObj after() {
+ return BSON( "_id" << 1 << "x" << 537142123125LL );
+ }
+ virtual const char * ns() {
+ return "unittests.inc3";
+ }
+
+ };
+
+ class inc4 : public SingleTest {
+ virtual BSONObj initial() {
+ return BSON( "_id" << 1 << "x" << 537142123123LL );
+ }
+ virtual BSONObj mod() {
+ return BSON( "$inc" << BSON( "x" << 2LL ) );
+ }
+ virtual BSONObj after() {
+ return BSON( "_id" << 1 << "x" << 537142123125LL );
+ }
+ virtual const char * ns() {
+ return "unittests.inc4";
+ }
+
+ };
+
+ class inc5 : public SingleTest {
+ virtual BSONObj initial() {
+ return BSON( "_id" << 1 << "x" << 537142123123LL );
+ }
+ virtual BSONObj mod() {
+ return BSON( "$inc" << BSON( "x" << 2.0 ) );
+ }
+ virtual BSONObj after() {
+ return BSON( "_id" << 1 << "x" << 537142123125LL );
+ }
+ virtual const char * ns() {
+ return "unittests.inc5";
+ }
+
+ };
+
+ class inc6 : public Base {
+
+ virtual const char * ns() {
+ return "unittests.inc6";
+ }
+
+
+ virtual BSONObj initial() { return BSONObj(); }
+ virtual BSONObj mod() { return BSONObj(); }
+ virtual BSONObj after() { return BSONObj(); }
+
+ void dotest() {
+ long long start = numeric_limits<int>::max() - 5;
+ long long max = numeric_limits<int>::max() + 5ll;
+
+ client().insert( ns() , BSON( "x" << (int)start ) );
+ ASSERT( findOne()["x"].type() == NumberInt );
+
+ while ( start < max ) {
+ update( BSON( "$inc" << BSON( "x" << 1 ) ) );
+ start += 1;
+ ASSERT_EQUALS( start , findOne()["x"].numberLong() ); // SERVER-2005
+ }
+
+ ASSERT( findOne()["x"].type() == NumberLong );
+ }
+ };
+
+ class bit1 : public Base {
+ const char * ns() {
+ return "unittests.bit1";
+ }
+ void dotest() {
+ test( BSON( "_id" << 1 << "x" << 3 ) , BSON( "$bit" << BSON( "x" << BSON( "and" << 2 ) ) ) , BSON( "_id" << 1 << "x" << ( 3 & 2 ) ) );
+ test( BSON( "_id" << 1 << "x" << 1 ) , BSON( "$bit" << BSON( "x" << BSON( "or" << 4 ) ) ) , BSON( "_id" << 1 << "x" << ( 1 | 4 ) ) );
+ test( BSON( "_id" << 1 << "x" << 3 ) , BSON( "$bit" << BSON( "x" << BSON( "and" << 2 << "or" << 8 ) ) ) , BSON( "_id" << 1 << "x" << ( ( 3 & 2 ) | 8 ) ) );
+ test( BSON( "_id" << 1 << "x" << 3 ) , BSON( "$bit" << BSON( "x" << BSON( "or" << 2 << "and" << 8 ) ) ) , BSON( "_id" << 1 << "x" << ( ( 3 | 2 ) & 8 ) ) );
+
+ }
+ };
+
+ class unset : public Base {
+ const char * ns() {
+ return "unittests.unset";
+ }
+ void dotest() {
+ test( "{_id:1,x:1}" , "{$unset:{x:1}}" , "{_id:1}" );
+ }
+ };
+
+ class setswitchint : public Base {
+ const char * ns() {
+ return "unittests.int1";
+ }
+ void dotest() {
+ test( BSON( "_id" << 1 << "x" << 1 ) , BSON( "$set" << BSON( "x" << 5.6 ) ) , BSON( "_id" << 1 << "x" << 5.6 ) );
+ test( BSON( "_id" << 1 << "x" << 5.6 ) , BSON( "$set" << BSON( "x" << 1 ) ) , BSON( "_id" << 1 << "x" << 1 ) );
+ }
+ };
+
+
+ };
+
+ class All : public Suite {
+ public:
+ All() : Suite( "update" ) {
+ }
+ void setupTests() {
+ add< ModId >();
+ add< ModNonmodMix >();
+ add< InvalidMod >();
+ add< ModNotFirst >();
+ add< ModDuplicateFieldSpec >();
+ add< IncNonNumber >();
+ add< PushAllNonArray >();
+ add< PullAllNonArray >();
+ add< IncTargetNonNumber >();
+ add< SetNum >();
+ add< SetString >();
+ add< SetStringDifferentLength >();
+ add< SetStringToNum >();
+ add< SetStringToNumInPlace >();
+ add< ModDotted >();
+ add< SetInPlaceDotted >();
+ add< SetRecreateDotted >();
+ add< SetMissingDotted >();
+ add< SetAdjacentDotted >();
+ add< IncMissing >();
+ add< MultiInc >();
+ add< UnorderedNewSet >();
+ add< UnorderedNewSetAdjacent >();
+ add< ArrayEmbeddedSet >();
+ add< AttemptEmbedInExistingNum >();
+ add< AttemptEmbedConflictsWithOtherSet >();
+ add< ModMasksEmbeddedConflict >();
+ add< ModOverwritesExistingObject >();
+ add< InvalidEmbeddedSet >();
+ add< UpsertMissingEmbedded >();
+ add< Push >();
+ add< PushInvalidEltType >();
+ add< PushConflictsWithOtherMod >();
+ add< PushFromNothing >();
+ add< PushFromEmpty >();
+ add< PushInsideNothing >();
+ add< CantPushInsideOtherMod >();
+ add< CantPushTwice >();
+ add< SetEncapsulationConflictsWithExistingType >();
+ add< CantPushToParent >();
+ add< CantIncParent >();
+ add< DontDropEmpty >();
+ add< InsertInEmpty >();
+ add< IndexParentOfMod >();
+ add< IndexModSet >();
+ add< PreserveIdWithIndex >();
+ add< CheckNoMods >();
+ add< UpdateMissingToNull >();
+
+ add< ModSetTests::internal1 >();
+ add< ModSetTests::inc1 >();
+ add< ModSetTests::inc2 >();
+ add< ModSetTests::set1 >();
+ add< ModSetTests::push1 >();
+
+ add< basic::inc1 >();
+ add< basic::inc2 >();
+ add< basic::inc3 >();
+ add< basic::inc4 >();
+ add< basic::inc5 >();
+ add< basic::inc6 >();
+ add< basic::bit1 >();
+ add< basic::unset >();
+ add< basic::setswitchint >();
+ }
+ } myall;
+
+} // namespace UpdateTests
+
diff --git a/src/mongo/pch.cpp b/src/mongo/pch.cpp
new file mode 100644
index 00000000000..afa19a5be6b
--- /dev/null
+++ b/src/mongo/pch.cpp
@@ -0,0 +1,41 @@
+// pch.cpp : helper for using precompiled headers
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+
+#ifndef JSTIME_VIRTUAL_SKEW
+#define JSTIME_VIRTUAL_SKEW
+
+namespace mongo {
+ // jsTime_virtual_skew is just for testing. a test command manipulates it.
+ long long jsTime_virtual_skew = 0;
+ boost::thread_specific_ptr<long long> jsTime_virtual_thread_skew;
+}
+
+#endif
+
+#if defined( __MSVC__ )
+// should probably check VS version here
+#elif defined( __GNUC__ )
+
+#if __GNUC__ < 4
+#error gcc < 4 not supported
+#endif
+
+#else
+// unknown compiler
+#endif
diff --git a/src/mongo/pch.h b/src/mongo/pch.h
new file mode 100644
index 00000000000..162ff48cc69
--- /dev/null
+++ b/src/mongo/pch.h
@@ -0,0 +1,184 @@
+/** @file pch.h : include file for standard system include files,
+ * or project specific include files that are used frequently, but
+ * are changed infrequently
+ */
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MONGO_PCH_H
+#define MONGO_PCH_H
+
+#if defined(MONGO_EXPOSE_MACROS)
+# define JS_C_STRINGS_ARE_UTF8
+# undef SUPPORT_UCP
+# define SUPPORT_UCP
+# undef SUPPORT_UTF8
+# define SUPPORT_UTF8
+# undef _CRT_SECURE_NO_WARNINGS
+# define _CRT_SECURE_NO_WARNINGS
+#endif
+
+#if defined(_WIN32)
+// for rand_s() usage:
+# define _CRT_RAND_S
+# ifndef NOMINMAX
+# define NOMINMAX
+# endif
+#define WIN32_LEAN_AND_MEAN
+# include <winsock2.h> //this must be included before the first windows.h include
+# include <ws2tcpip.h>
+# include <wspiapi.h>
+# include <windows.h>
+#endif
+
+#if defined(__linux__) && defined(MONGO_EXPOSE_MACROS)
+// glibc's optimized versions are better than g++ builtins
+# define __builtin_strcmp strcmp
+# define __builtin_strlen strlen
+# define __builtin_memchr memchr
+# define __builtin_memcmp memcmp
+# define __builtin_memcpy memcpy
+# define __builtin_memset memset
+# define __builtin_memmove memmove
+#endif
+
+
+#include <ctime>
+#include <cstring>
+#include <sstream>
+#include <string>
+#include <memory>
+#include <string>
+#include <iostream>
+#include <fstream>
+#include <map>
+#include <vector>
+#include <set>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sstream>
+#include <signal.h>
+#include "targetver.h"
+#include "time.h"
+#include "string.h"
+#include "limits.h"
+
+//#include <boost/any.hpp>
+#include "boost/thread/once.hpp"
+//#include <boost/archive/iterators/transform_width.hpp>
+#define BOOST_FILESYSTEM_VERSION 2
+#include <boost/filesystem/convenience.hpp>
+#include <boost/filesystem/exception.hpp>
+#include <boost/filesystem/operations.hpp>
+#include <boost/program_options.hpp>
+#include <boost/shared_ptr.hpp>
+#include <boost/smart_ptr.hpp>
+#include <boost/function.hpp>
+#include "boost/bind.hpp"
+#include "boost/function.hpp"
+#include <boost/thread/tss.hpp>
+#include "boost/detail/endian.hpp"
+#define BOOST_SPIRIT_THREADSAFE
+#include <boost/version.hpp>
+#include <boost/tuple/tuple.hpp>
+#include <boost/thread/thread.hpp>
+#include <boost/thread/condition.hpp>
+#include <boost/thread/recursive_mutex.hpp>
+#include <boost/thread/xtime.hpp>
+#undef assert
+#define assert MONGO_assert
+
+namespace mongo {
+
+ using namespace std;
+ using boost::shared_ptr;
+
+#if defined(_DEBUG)
+ const bool debug=true;
+#else
+ const bool debug=false;
+#endif
+
+ // pdfile versions
+ const int PDFILE_VERSION = 4;
+ const int PDFILE_VERSION_MINOR = 5;
+
+ enum ExitCode {
+ EXIT_CLEAN = 0 ,
+ EXIT_BADOPTIONS = 2 ,
+ EXIT_REPLICATION_ERROR = 3 ,
+ EXIT_NEED_UPGRADE = 4 ,
+ EXIT_SHARDING_ERROR = 5 ,
+ EXIT_KILL = 12 ,
+ EXIT_ABRUPT = 14 ,
+ EXIT_NTSERVICE_ERROR = 20 ,
+ EXIT_JAVA = 21 ,
+ EXIT_OOM_MALLOC = 42 ,
+ EXIT_OOM_REALLOC = 43 ,
+ EXIT_FS = 45 ,
+ EXIT_CLOCK_SKEW = 47 ,
+ EXIT_NET_ERROR = 48 ,
+ EXIT_WINDOWS_SERVICE_STOP = 49 ,
+ EXIT_POSSIBLE_CORRUPTION = 60 , // this means we detected a possible corruption situation, like a buf overflow
+ EXIT_UNCAUGHT = 100 , // top level exception that wasn't caught
+ EXIT_TEST = 101 ,
+
+ };
+
+ void dbexit( ExitCode returnCode, const char *whyMsg = "", bool tryToGetLock = false);
+
+ /**
+ this is here so you can't just type exit() to quit the program
+ you should either use dbexit to shutdown cleanly, or ::exit to tell the system to quit
+ if you use this, you'll get a link error since mongo::exit isn't defined
+ */
+ void exit( ExitCode returnCode );
+ bool inShutdown();
+
+ using namespace boost::filesystem;
+ void asserted(const char *msg, const char *file, unsigned line);
+}
+
+
+
+// TODO: Rework the headers so we don't need this craziness
+#include "bson/inline_decls.h"
+#define MONGO_assert(_Expression) (void)( MONGO_likely(!!(_Expression)) || (mongo::asserted(#_Expression, __FILE__, __LINE__), 0) )
+
+#include "util/debug_util.h"
+#include "util/goodies.h"
+#include "util/log.h"
+#include "util/allocator.h"
+#include "util/assert_util.h"
+
+namespace mongo {
+
+ void sayDbContext(const char *msg = 0);
+ void rawOut( const string &s );
+
+ typedef char _TCHAR;
+
+ using boost::uint32_t;
+ using boost::uint64_t;
+
+ /** called by mongos, mongod, test. do not call from clients and such.
+ invoked before about everything except global var construction.
+ */
+ void doPreServerStartupInits();
+
+} // namespace mongo
+
+#endif // MONGO_PCH_H
diff --git a/src/mongo/s/balance.cpp b/src/mongo/s/balance.cpp
new file mode 100644
index 00000000000..e1c4b65ca0b
--- /dev/null
+++ b/src/mongo/s/balance.cpp
@@ -0,0 +1,348 @@
+//@file balance.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "../db/jsobj.h"
+#include "../db/cmdline.h"
+
+#include "../client/distlock.h"
+
+#include "balance.h"
+#include "server.h"
+#include "shard.h"
+#include "config.h"
+#include "chunk.h"
+#include "grid.h"
+
+namespace mongo {
+
+ Balancer balancer;
+
+ Balancer::Balancer() : _balancedLastTime(0), _policy( new BalancerPolicy() ) {}
+
+ Balancer::~Balancer() {
+ }
+
+ int Balancer::_moveChunks( const vector<CandidateChunkPtr>* candidateChunks ) {
+ int movedCount = 0;
+
+ for ( vector<CandidateChunkPtr>::const_iterator it = candidateChunks->begin(); it != candidateChunks->end(); ++it ) {
+ const CandidateChunk& chunkInfo = *it->get();
+
+ DBConfigPtr cfg = grid.getDBConfig( chunkInfo.ns );
+ assert( cfg );
+
+ ChunkManagerPtr cm = cfg->getChunkManager( chunkInfo.ns );
+ assert( cm );
+
+ const BSONObj& chunkToMove = chunkInfo.chunk;
+ ChunkPtr c = cm->findChunk( chunkToMove["min"].Obj() );
+ if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) || c->getMax().woCompare( chunkToMove["max"].Obj() ) ) {
+ // likely a split happened somewhere
+ cm = cfg->getChunkManager( chunkInfo.ns , true /* reload */);
+ assert( cm );
+
+ c = cm->findChunk( chunkToMove["min"].Obj() );
+ if ( c->getMin().woCompare( chunkToMove["min"].Obj() ) || c->getMax().woCompare( chunkToMove["max"].Obj() ) ) {
+ log() << "chunk mismatch after reload, ignoring will retry issue cm: "
+ << c->getMin() << " min: " << chunkToMove["min"].Obj() << endl;
+ continue;
+ }
+ }
+
+ BSONObj res;
+ if ( c->moveAndCommit( Shard::make( chunkInfo.to ) , Chunk::MaxChunkSize , res ) ) {
+ movedCount++;
+ continue;
+ }
+
+ // the move requires acquiring the collection metadata's lock, which can fail
+ log() << "balancer move failed: " << res << " from: " << chunkInfo.from << " to: " << chunkInfo.to
+ << " chunk: " << chunkToMove << endl;
+
+ if ( res["chunkTooBig"].trueValue() ) {
+ // reload just to be safe
+ cm = cfg->getChunkManager( chunkInfo.ns );
+ assert( cm );
+ c = cm->findChunk( chunkToMove["min"].Obj() );
+
+ log() << "forcing a split because migrate failed for size reasons" << endl;
+
+ res = BSONObj();
+ c->singleSplit( true , res );
+ log() << "forced split results: " << res << endl;
+
+ if ( ! res["ok"].trueValue() ) {
+ log() << "marking chunk as jumbo: " << c->toString() << endl;
+ c->markAsJumbo();
+ // we increment moveCount so we do another round right away
+ movedCount++;
+ }
+
+ }
+ }
+
+ return movedCount;
+ }
+
+ void Balancer::_ping( DBClientBase& conn ) {
+ WriteConcern w = conn.getWriteConcern();
+ conn.setWriteConcern( W_NONE );
+
+ conn.update( ShardNS::mongos ,
+ BSON( "_id" << _myid ) ,
+ BSON( "$set" << BSON( "ping" << DATENOW << "up" << (int)(time(0)-_started) ) ) ,
+ true );
+
+ conn.setWriteConcern( w);
+ }
+
+ bool Balancer::_checkOIDs() {
+ vector<Shard> all;
+ Shard::getAllShards( all );
+
+ map<int,Shard> oids;
+
+ for ( vector<Shard>::iterator i=all.begin(); i!=all.end(); ++i ) {
+ Shard s = *i;
+ BSONObj f = s.runCommand( "admin" , "features" );
+ if ( f["oidMachine"].isNumber() ) {
+ int x = f["oidMachine"].numberInt();
+ if ( oids.count(x) == 0 ) {
+ oids[x] = s;
+ }
+ else {
+ log() << "error: 2 machines have " << x << " as oid machine piece " << s.toString() << " and " << oids[x].toString() << endl;
+ s.runCommand( "admin" , BSON( "features" << 1 << "oidReset" << 1 ) );
+ oids[x].runCommand( "admin" , BSON( "features" << 1 << "oidReset" << 1 ) );
+ return false;
+ }
+ }
+ else {
+ log() << "warning: oidMachine not set on: " << s.toString() << endl;
+ }
+ }
+ return true;
+ }
+
+ void Balancer::_doBalanceRound( DBClientBase& conn, vector<CandidateChunkPtr>* candidateChunks ) {
+ assert( candidateChunks );
+
+ //
+ // 1. Check whether there is any sharded collection to be balanced by querying
+ // the ShardsNS::collections collection
+ //
+
+ auto_ptr<DBClientCursor> cursor = conn.query( ShardNS::collection , BSONObj() );
+ vector< string > collections;
+ while ( cursor->more() ) {
+ BSONObj col = cursor->nextSafe();
+
+ // sharded collections will have a shard "key".
+ if ( ! col["key"].eoo() )
+ collections.push_back( col["_id"].String() );
+ }
+ cursor.reset();
+
+ if ( collections.empty() ) {
+ LOG(1) << "no collections to balance" << endl;
+ return;
+ }
+
+ //
+ // 2. Get a list of all the shards that are participating in this balance round
+ // along with any maximum allowed quotas and current utilization. We get the
+ // latter by issuing db.serverStatus() (mem.mapped) to all shards.
+ //
+ // TODO: skip unresponsive shards and mark information as stale.
+ //
+
+ vector<Shard> allShards;
+ Shard::getAllShards( allShards );
+ if ( allShards.size() < 2) {
+ LOG(1) << "can't balance without more active shards" << endl;
+ return;
+ }
+
+ map< string, BSONObj > shardLimitsMap;
+ for ( vector<Shard>::const_iterator it = allShards.begin(); it != allShards.end(); ++it ) {
+ const Shard& s = *it;
+ ShardStatus status = s.getStatus();
+
+ BSONObj limitsObj = BSON( ShardFields::maxSize( s.getMaxSize() ) <<
+ LimitsFields::currSize( status.mapped() ) <<
+ ShardFields::draining( s.isDraining() ) <<
+ LimitsFields::hasOpsQueued( status.hasOpsQueued() )
+ );
+
+ shardLimitsMap[ s.getName() ] = limitsObj;
+ }
+
+ //
+ // 3. For each collection, check if the balancing policy recommends moving anything around.
+ //
+
+ for (vector<string>::const_iterator it = collections.begin(); it != collections.end(); ++it ) {
+ const string& ns = *it;
+
+ map< string,vector<BSONObj> > shardToChunksMap;
+ cursor = conn.query( ShardNS::chunk , QUERY( "ns" << ns ).sort( "min" ) );
+ while ( cursor->more() ) {
+ BSONObj chunk = cursor->nextSafe();
+ if ( chunk["jumbo"].trueValue() )
+ continue;
+ vector<BSONObj>& chunks = shardToChunksMap[chunk["shard"].String()];
+ chunks.push_back( chunk.getOwned() );
+ }
+ cursor.reset();
+
+ if (shardToChunksMap.empty()) {
+ LOG(1) << "skipping empty collection (" << ns << ")";
+ continue;
+ }
+
+ for ( vector<Shard>::iterator i=allShards.begin(); i!=allShards.end(); ++i ) {
+ // this just makes sure there is an entry in shardToChunksMap for every shard
+ Shard s = *i;
+ shardToChunksMap[s.getName()].size();
+ }
+
+ CandidateChunk* p = _policy->balance( ns , shardLimitsMap , shardToChunksMap , _balancedLastTime );
+ if ( p ) candidateChunks->push_back( CandidateChunkPtr( p ) );
+ }
+ }
+
+ bool Balancer::_init() {
+ try {
+
+ log() << "about to contact config servers and shards" << endl;
+
+ // contact the config server and refresh shard information
+ // checks that each shard is indeed a different process (no hostname mixup)
+ // these checks are redundant in that they're redone at every new round but we want to do them initially here
+ // so to catch any problem soon
+ Shard::reloadShardInfo();
+ _checkOIDs();
+
+ log() << "config servers and shards contacted successfully" << endl;
+
+ StringBuilder buf;
+ buf << getHostNameCached() << ":" << cmdLine.port;
+ _myid = buf.str();
+ _started = time(0);
+
+ log() << "balancer id: " << _myid << " started at " << time_t_to_String_short(_started) << endl;
+
+ return true;
+
+ }
+ catch ( std::exception& e ) {
+ warning() << "could not initialize balancer, please check that all shards and config servers are up: " << e.what() << endl;
+ return false;
+
+ }
+ }
+
+ void Balancer::run() {
+
+ // this is the body of a BackgroundJob so if we throw here we're basically ending the balancer thread prematurely
+ while ( ! inShutdown() ) {
+
+ if ( ! _init() ) {
+ log() << "will retry to initialize balancer in one minute" << endl;
+ sleepsecs( 60 );
+ continue;
+ }
+
+ break;
+ }
+
+ // getConnectioString and dist lock constructor does not throw, which is what we expect on while
+ // on the balancer thread
+ ConnectionString config = configServer.getConnectionString();
+ DistributedLock balanceLock( config , "balancer" );
+
+ while ( ! inShutdown() ) {
+
+ try {
+
+ ScopedDbConnection conn( config );
+
+ // ping has to be first so we keep things in the config server in sync
+ _ping( conn.conn() );
+
+ // now make sure we should even be running
+ if ( ! grid.shouldBalance() ) {
+ LOG(1) << "skipping balancing round because balancing is disabled" << endl;
+ conn.done();
+
+ sleepsecs( 30 );
+ continue;
+ }
+
+ uassert( 13258 , "oids broken after resetting!" , _checkOIDs() );
+
+ // use fresh shard state
+ Shard::reloadShardInfo();
+
+ // refresh chunk size (even though another balancer might be active)
+ Chunk::refreshChunkSize();
+
+ {
+ dist_lock_try lk( &balanceLock , "doing balance round" );
+ if ( ! lk.got() ) {
+ LOG(1) << "skipping balancing round because another balancer is active" << endl;
+ conn.done();
+
+ sleepsecs( 30 ); // no need to wake up soon
+ continue;
+ }
+
+ LOG(1) << "*** start balancing round" << endl;
+
+ vector<CandidateChunkPtr> candidateChunks;
+ _doBalanceRound( conn.conn() , &candidateChunks );
+ if ( candidateChunks.size() == 0 ) {
+ LOG(1) << "no need to move any chunk" << endl;
+ }
+ else {
+ _balancedLastTime = _moveChunks( &candidateChunks );
+ }
+
+ LOG(1) << "*** end of balancing round" << endl;
+ }
+
+ conn.done();
+
+ sleepsecs( _balancedLastTime ? 5 : 10 );
+ }
+ catch ( std::exception& e ) {
+ log() << "caught exception while doing balance: " << e.what() << endl;
+
+ // Just to match the opening statement if in log level 1
+ LOG(1) << "*** End of balancing round" << endl;
+
+ sleepsecs( 30 ); // sleep a fair amount b/c of error
+ continue;
+ }
+ }
+
+ }
+
+} // namespace mongo
diff --git a/src/mongo/s/balance.h b/src/mongo/s/balance.h
new file mode 100644
index 00000000000..687599610db
--- /dev/null
+++ b/src/mongo/s/balance.h
@@ -0,0 +1,105 @@
+//@file balance.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "../util/background.h"
+#include "../client/dbclient.h"
+#include "balancer_policy.h"
+
+namespace mongo {
+
+ /**
+ * The balancer is a background task that tries to keep the number of chunks across all servers of the cluster even. Although
+ * every mongos will have one balancer running, only one of them will be active at the any given point in time. The balancer
+ * uses a 'DistributedLock' for that coordination.
+ *
+ * The balancer does act continuously but in "rounds". At a given round, it would decide if there is an imbalance by
+ * checking the difference in chunks between the most and least loaded shards. It would issue a request for a chunk
+ * migration per round, if it found so.
+ */
+ class Balancer : public BackgroundJob {
+ public:
+ Balancer();
+ virtual ~Balancer();
+
+ // BackgroundJob methods
+
+ virtual void run();
+
+ virtual string name() const { return "Balancer"; }
+
+ private:
+ typedef BalancerPolicy::ChunkInfo CandidateChunk;
+ typedef shared_ptr<CandidateChunk> CandidateChunkPtr;
+
+ // hostname:port of my mongos
+ string _myid;
+
+ // time the Balancer started running
+ time_t _started;
+
+ // number of moved chunks in last round
+ int _balancedLastTime;
+
+ // decide which chunks to move; owned here.
+ scoped_ptr<BalancerPolicy> _policy;
+
+ /**
+ * Checks that the balancer can connect to all servers it needs to do its job.
+ *
+ * @return true if balancing can be started
+ *
+ * This method throws on a network exception
+ */
+ bool _init();
+
+ /**
+ * Gathers all the necessary information about shards and chunks, and decides whether there are candidate chunks to
+ * be moved.
+ *
+ * @param conn is the connection with the config server(s)
+ * @param candidateChunks (IN/OUT) filled with candidate chunks, one per collection, that could possibly be moved
+ */
+ void _doBalanceRound( DBClientBase& conn, vector<CandidateChunkPtr>* candidateChunks );
+
+ /**
+ * Issues chunk migration request, one at a time.
+ *
+ * @param candidateChunks possible chunks to move
+ * @return number of chunks effectively moved
+ */
+ int _moveChunks( const vector<CandidateChunkPtr>* candidateChunks );
+
+ /**
+ * Marks this balancer as being live on the config server(s).
+ *
+ * @param conn is the connection with the config server(s)
+ */
+ void _ping( DBClientBase& conn );
+
+ /**
+ * @return true if all the servers listed in configdb as being shards are reachable and are distinct processes
+ */
+ bool _checkOIDs();
+
+ };
+
+ extern Balancer balancer;
+}
diff --git a/src/mongo/s/balancer_policy.cpp b/src/mongo/s/balancer_policy.cpp
new file mode 100644
index 00000000000..03defa5678a
--- /dev/null
+++ b/src/mongo/s/balancer_policy.cpp
@@ -0,0 +1,192 @@
+// balancer_policy.cpp
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "config.h"
+
+#include "../client/dbclient.h"
+#include "../util/stringutils.h"
+#include "../util/unittest.h"
+
+#include "balancer_policy.h"
+
+namespace mongo {
+
+ // limits map fields
+ BSONField<long long> LimitsFields::currSize( "currSize" );
+ BSONField<bool> LimitsFields::hasOpsQueued( "hasOpsQueued" );
+
+ BalancerPolicy::ChunkInfo* BalancerPolicy::balance( const string& ns,
+ const ShardToLimitsMap& shardToLimitsMap,
+ const ShardToChunksMap& shardToChunksMap,
+ int balancedLastTime ) {
+ pair<string,unsigned> min("",numeric_limits<unsigned>::max());
+ pair<string,unsigned> max("",0);
+ vector<string> drainingShards;
+
+ bool maxOpsQueued = false;
+
+ for (ShardToChunksIter i = shardToChunksMap.begin(); i!=shardToChunksMap.end(); ++i ) {
+
+ // Find whether this shard's capacity or availability are exhausted
+ const string& shard = i->first;
+ BSONObj shardLimits;
+ ShardToLimitsIter it = shardToLimitsMap.find( shard );
+ if ( it != shardToLimitsMap.end() ) shardLimits = it->second;
+ const bool maxedOut = isSizeMaxed( shardLimits );
+ const bool draining = isDraining( shardLimits );
+ const bool opsQueued = hasOpsQueued( shardLimits );
+
+
+ // Is this shard a better chunk receiver then the current one?
+ // Shards that would be bad receiver candidates:
+ // + maxed out shards
+ // + draining shards
+ // + shards with operations queued for writeback
+ const unsigned size = i->second.size();
+ if ( ! maxedOut && ! draining && ! opsQueued ) {
+ if ( size < min.second ) {
+ min = make_pair( shard , size );
+ }
+ }
+ else if ( opsQueued ) {
+ LOG(1) << "won't send a chunk to: " << shard << " because it has ops queued" << endl;
+ }
+ else if ( maxedOut ) {
+ LOG(1) << "won't send a chunk to: " << shard << " because it is maxedOut" << endl;
+ }
+
+
+ // Check whether this shard is a better chunk donor then the current one.
+ // Draining shards take a lower priority than overloaded shards.
+ if ( size > max.second ) {
+ max = make_pair( shard , size );
+ maxOpsQueued = opsQueued;
+ }
+ if ( draining && (size > 0)) {
+ drainingShards.push_back( shard );
+ }
+ }
+
+ // If there is no candidate chunk receiver -- they may have all been maxed out,
+ // draining, ... -- there's not much that the policy can do.
+ if ( min.second == numeric_limits<unsigned>::max() ) {
+ log() << "no available shards to take chunks" << endl;
+ return NULL;
+ }
+
+ if ( maxOpsQueued ) {
+ log() << "biggest shard " << max.first << " has unprocessed writebacks, waiting for completion of migrate" << endl;
+ return NULL;
+ }
+
+ LOG(1) << "collection : " << ns << endl;
+ LOG(1) << "donor : " << max.second << " chunks on " << max.first << endl;
+ LOG(1) << "receiver : " << min.second << " chunks on " << min.first << endl;
+ if ( ! drainingShards.empty() ) {
+ string drainingStr;
+ joinStringDelim( drainingShards, &drainingStr, ',' );
+ LOG(1) << "draining : " << ! drainingShards.empty() << "(" << drainingShards.size() << ")" << endl;
+ }
+
+ // Solving imbalances takes a higher priority than draining shards. Many shards can
+ // be draining at once but we choose only one of them to cater to per round.
+ // Important to start balanced, so when there are few chunks any imbalance must be fixed.
+ const int imbalance = max.second - min.second;
+ int threshold = 8;
+ if (balancedLastTime || max.second < 20) threshold = 2;
+ else if (max.second < 80) threshold = 4;
+ string from, to;
+ if ( imbalance >= threshold ) {
+ from = max.first;
+ to = min.first;
+
+ }
+ else if ( ! drainingShards.empty() ) {
+ from = drainingShards[ rand() % drainingShards.size() ];
+ to = min.first;
+
+ }
+ else {
+ // Everything is balanced here!
+ return NULL;
+ }
+
+ const vector<BSONObj>& chunksFrom = shardToChunksMap.find( from )->second;
+ const vector<BSONObj>& chunksTo = shardToChunksMap.find( to )->second;
+ BSONObj chunkToMove = pickChunk( chunksFrom , chunksTo );
+ log() << "chose [" << from << "] to [" << to << "] " << chunkToMove << endl;
+
+ return new ChunkInfo( ns, to, from, chunkToMove );
+ }
+
+ BSONObj BalancerPolicy::pickChunk( const vector<BSONObj>& from, const vector<BSONObj>& to ) {
+ // It is possible for a donor ('from') shard to have less chunks than a receiver one ('to')
+ // if the donor is in draining mode.
+
+ if ( to.size() == 0 )
+ return from[0];
+
+ if ( from[0]["min"].Obj().woCompare( to[to.size()-1]["max"].Obj() , BSONObj() , false ) == 0 )
+ return from[0];
+
+ if ( from[from.size()-1]["max"].Obj().woCompare( to[0]["min"].Obj() , BSONObj() , false ) == 0 )
+ return from[from.size()-1];
+
+ return from[0];
+ }
+
+ bool BalancerPolicy::isSizeMaxed( BSONObj limits ) {
+ // If there's no limit information for the shard, assume it can be a chunk receiver
+ // (i.e., there's not bound on space utilization)
+ if ( limits.isEmpty() ) {
+ return false;
+ }
+
+ long long maxUsage = limits[ ShardFields::maxSize.name() ].Long();
+ if ( maxUsage == 0 ) {
+ return false;
+ }
+
+ long long currUsage = limits[ LimitsFields::currSize.name() ].Long();
+ if ( currUsage < maxUsage ) {
+ return false;
+ }
+
+ return true;
+ }
+
+ bool BalancerPolicy::isDraining( BSONObj limits ) {
+ BSONElement draining = limits[ ShardFields::draining.name() ];
+ if ( draining.eoo() || ! draining.trueValue() ) {
+ return false;
+ }
+
+ return true;
+ }
+
+ bool BalancerPolicy::hasOpsQueued( BSONObj limits ) {
+ BSONElement opsQueued = limits[ LimitsFields::hasOpsQueued.name() ];
+ if ( opsQueued.eoo() || ! opsQueued.trueValue() ) {
+ return false;
+ }
+ return true;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/s/balancer_policy.h b/src/mongo/s/balancer_policy.h
new file mode 100644
index 00000000000..cef5aa64afc
--- /dev/null
+++ b/src/mongo/s/balancer_policy.h
@@ -0,0 +1,98 @@
+// @file balancer_policy.h
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef S_BALANCER_POLICY_HEADER
+#define S_BALANCER_POLICY_HEADER
+
+#include "../pch.h"
+
+namespace mongo {
+
+ class BalancerPolicy {
+ public:
+ struct ChunkInfo;
+
+ /**
+ * Returns a suggested chunk to move whithin a collection's shards, given information about
+ * space usage and number of chunks for that collection. If the policy doesn't recommend
+ * moving, it returns NULL.
+ *
+ * @param ns is the collections namepace.
+ * @param shardLimitMap is a map from shardId to an object that describes (for now) space
+ * cap and usage. E.g.: { "maxSize" : <size_in_MB> , "usedSize" : <size_in_MB> }.
+ * @param shardToChunksMap is a map from shardId to chunks that live there. A chunk's format
+ * is { }.
+ * @param balancedLastTime is the number of chunks effectively moved in the last round.
+ * @returns NULL or ChunkInfo of the best move to make towards balacing the collection.
+ */
+ typedef map< string,BSONObj > ShardToLimitsMap;
+ typedef map< string,vector<BSONObj> > ShardToChunksMap;
+ static ChunkInfo* balance( const string& ns, const ShardToLimitsMap& shardToLimitsMap,
+ const ShardToChunksMap& shardToChunksMap, int balancedLastTime );
+
+ // below exposed for testing purposes only -- treat it as private --
+
+ static BSONObj pickChunk( const vector<BSONObj>& from, const vector<BSONObj>& to );
+
+ /**
+ * Returns true if a shard cannot receive any new chunks bacause it reache 'shardLimits'.
+ * Expects the optional fields "maxSize", can in size in MB, and "usedSize", currently used size
+ * in MB, on 'shardLimits'.
+ */
+ static bool isSizeMaxed( BSONObj shardLimits );
+
+ /**
+ * Returns true if 'shardLimist' contains a field "draining". Expects the optional field
+ * "isDraining" on 'shrdLimits'.
+ */
+ static bool isDraining( BSONObj shardLimits );
+
+ /**
+ * Returns true if a shard currently has operations in any of its writeback queues
+ */
+ static bool hasOpsQueued( BSONObj shardLimits );
+
+ private:
+ // Convenience types
+ typedef ShardToChunksMap::const_iterator ShardToChunksIter;
+ typedef ShardToLimitsMap::const_iterator ShardToLimitsIter;
+
+ };
+
+ struct BalancerPolicy::ChunkInfo {
+ const string ns;
+ const string to;
+ const string from;
+ const BSONObj chunk;
+
+ ChunkInfo( const string& a_ns , const string& a_to , const string& a_from , const BSONObj& a_chunk )
+ : ns( a_ns ) , to( a_to ) , from( a_from ), chunk( a_chunk ) {}
+ };
+
+ /**
+ * Field names used in the 'limits' map.
+ */
+ struct LimitsFields {
+ // we use 'draining' and 'maxSize' from the 'shards' collection plus the following
+ static BSONField<long long> currSize; // currently used disk space in bytes
+ static BSONField<bool> hasOpsQueued; // writeback queue is not empty?
+ };
+
+} // namespace mongo
+
+#endif // S_BALANCER_POLICY_HEADER
diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp
new file mode 100644
index 00000000000..e0e7edee9bd
--- /dev/null
+++ b/src/mongo/s/chunk.cpp
@@ -0,0 +1,1104 @@
+// @file chunk.cpp
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "../client/connpool.h"
+#include "../db/querypattern.h"
+#include "../db/queryutil.h"
+#include "../util/unittest.h"
+#include "../util/timer.h"
+
+#include "chunk.h"
+#include "config.h"
+#include "cursors.h"
+#include "grid.h"
+#include "strategy.h"
+#include "client.h"
+
+namespace mongo {
+
+ inline bool allOfType(BSONType type, const BSONObj& o) {
+ BSONObjIterator it(o);
+ while(it.more()) {
+ if (it.next().type() != type)
+ return false;
+ }
+ return true;
+ }
+
+ // ------- Shard --------
+
+ string Chunk::chunkMetadataNS = "config.chunks";
+
+ int Chunk::MaxChunkSize = 1024 * 1024 * 64;
+ int Chunk::MaxObjectPerChunk = 250000;
+
+
+ Chunk::Chunk(const ChunkManager * manager, BSONObj from)
+ : _manager(manager), _lastmod(0), _dataWritten(mkDataWritten())
+ {
+ string ns = from.getStringField( "ns" );
+ _shard.reset( from.getStringField( "shard" ) );
+
+ _lastmod = from["lastmod"];
+ assert( _lastmod > 0 );
+
+ _min = from.getObjectField( "min" ).getOwned();
+ _max = from.getObjectField( "max" ).getOwned();
+
+ _jumbo = from["jumbo"].trueValue();
+
+ uassert( 10170 , "Chunk needs a ns" , ! ns.empty() );
+ uassert( 13327 , "Chunk ns must match server ns" , ns == _manager->getns() );
+
+ uassert( 10171 , "Chunk needs a server" , _shard.ok() );
+
+ uassert( 10172 , "Chunk needs a min" , ! _min.isEmpty() );
+ uassert( 10173 , "Chunk needs a max" , ! _max.isEmpty() );
+ }
+
+
+ Chunk::Chunk(const ChunkManager * info , const BSONObj& min, const BSONObj& max, const Shard& shard)
+ : _manager(info), _min(min), _max(max), _shard(shard), _lastmod(0), _jumbo(false), _dataWritten(mkDataWritten())
+ {}
+
+ long Chunk::mkDataWritten() {
+ return rand() % ( MaxChunkSize / 5 );
+ }
+
+ string Chunk::getns() const {
+ assert( _manager );
+ return _manager->getns();
+ }
+
+ bool Chunk::contains( const BSONObj& obj ) const {
+ return
+ _manager->getShardKey().compare( getMin() , obj ) <= 0 &&
+ _manager->getShardKey().compare( obj , getMax() ) < 0;
+ }
+
+ bool ChunkRange::contains(const BSONObj& obj) const {
+ // same as Chunk method
+ return
+ _manager->getShardKey().compare( getMin() , obj ) <= 0 &&
+ _manager->getShardKey().compare( obj , getMax() ) < 0;
+ }
+
+ bool Chunk::minIsInf() const {
+ return _manager->getShardKey().globalMin().woCompare( getMin() ) == 0;
+ }
+
+ bool Chunk::maxIsInf() const {
+ return _manager->getShardKey().globalMax().woCompare( getMax() ) == 0;
+ }
+
+ BSONObj Chunk::_getExtremeKey( int sort ) const {
+ ShardConnection conn( getShard().getConnString() , _manager->getns() );
+ Query q;
+ if ( sort == 1 ) {
+ q.sort( _manager->getShardKey().key() );
+ }
+ else {
+ // need to invert shard key pattern to sort backwards
+ // TODO: make a helper in ShardKeyPattern?
+
+ BSONObj k = _manager->getShardKey().key();
+ BSONObjBuilder r;
+
+ BSONObjIterator i(k);
+ while( i.more() ) {
+ BSONElement e = i.next();
+ uassert( 10163 , "can only handle numbers here - which i think is correct" , e.isNumber() );
+ r.append( e.fieldName() , -1 * e.number() );
+ }
+
+ q.sort( r.obj() );
+ }
+
+ // find the extreme key
+ BSONObj end = conn->findOne( _manager->getns() , q );
+ conn.done();
+
+ if ( end.isEmpty() )
+ return BSONObj();
+
+ return _manager->getShardKey().extractKey( end );
+ }
+
+ void Chunk::pickMedianKey( BSONObj& medianKey ) const {
+ // Ask the mongod holding this chunk to figure out the split points.
+ ScopedDbConnection conn( getShard().getConnString() );
+ BSONObj result;
+ BSONObjBuilder cmd;
+ cmd.append( "splitVector" , _manager->getns() );
+ cmd.append( "keyPattern" , _manager->getShardKey().key() );
+ cmd.append( "min" , getMin() );
+ cmd.append( "max" , getMax() );
+ cmd.appendBool( "force" , true );
+ BSONObj cmdObj = cmd.obj();
+
+ if ( ! conn->runCommand( "admin" , cmdObj , result )) {
+ conn.done();
+ ostringstream os;
+ os << "splitVector command (median key) failed: " << result;
+ uassert( 13503 , os.str() , 0 );
+ }
+
+ BSONObjIterator it( result.getObjectField( "splitKeys" ) );
+ if ( it.more() ) {
+ medianKey = it.next().Obj().getOwned();
+ }
+
+ conn.done();
+ }
+
+ void Chunk::pickSplitVector( vector<BSONObj>& splitPoints , int chunkSize /* bytes */, int maxPoints, int maxObjs ) const {
+ // Ask the mongod holding this chunk to figure out the split points.
+ ScopedDbConnection conn( getShard().getConnString() );
+ BSONObj result;
+ BSONObjBuilder cmd;
+ cmd.append( "splitVector" , _manager->getns() );
+ cmd.append( "keyPattern" , _manager->getShardKey().key() );
+ cmd.append( "min" , getMin() );
+ cmd.append( "max" , getMax() );
+ cmd.append( "maxChunkSizeBytes" , chunkSize );
+ cmd.append( "maxSplitPoints" , maxPoints );
+ cmd.append( "maxChunkObjects" , maxObjs );
+ BSONObj cmdObj = cmd.obj();
+
+ if ( ! conn->runCommand( "admin" , cmdObj , result )) {
+ conn.done();
+ ostringstream os;
+ os << "splitVector command failed: " << result;
+ uassert( 13345 , os.str() , 0 );
+ }
+
+ BSONObjIterator it( result.getObjectField( "splitKeys" ) );
+ while ( it.more() ) {
+ splitPoints.push_back( it.next().Obj().getOwned() );
+ }
+ conn.done();
+ }
+
+ BSONObj Chunk::singleSplit( bool force , BSONObj& res ) const {
+ vector<BSONObj> splitPoint;
+
+ // if splitting is not obligatory we may return early if there are not enough data
+ // we cap the number of objects that would fall in the first half (before the split point)
+ // the rationale is we'll find a split point without traversing all the data
+ if ( ! force ) {
+ vector<BSONObj> candidates;
+ const int maxPoints = 2;
+ pickSplitVector( candidates , getManager()->getCurrentDesiredChunkSize() , maxPoints , MaxObjectPerChunk );
+ if ( candidates.size() <= 1 ) {
+ // no split points means there isn't enough data to split on
+ // 1 split point means we have between half the chunk size to full chunk size
+ // so we shouldn't split
+ LOG(1) << "chunk not full enough to trigger auto-split " << ( candidates.size() == 0 ? "no split entry" : candidates[0].toString() ) << endl;
+ return BSONObj();
+ }
+
+ splitPoint.push_back( candidates.front() );
+
+ }
+ else {
+ // if forcing a split, use the chunk's median key
+ BSONObj medianKey;
+ pickMedianKey( medianKey );
+ if ( ! medianKey.isEmpty() )
+ splitPoint.push_back( medianKey );
+ }
+
+ // We assume that if the chunk being split is the first (or last) one on the collection, this chunk is
+ // likely to see more insertions. Instead of splitting mid-chunk, we use the very first (or last) key
+ // as a split point.
+ if ( minIsInf() ) {
+ splitPoint.clear();
+ BSONObj key = _getExtremeKey( 1 );
+ if ( ! key.isEmpty() ) {
+ splitPoint.push_back( key );
+ }
+
+ }
+ else if ( maxIsInf() ) {
+ splitPoint.clear();
+ BSONObj key = _getExtremeKey( -1 );
+ if ( ! key.isEmpty() ) {
+ splitPoint.push_back( key );
+ }
+ }
+
+ // Normally, we'd have a sound split point here if the chunk is not empty. It's also a good place to
+ // sanity check.
+ if ( splitPoint.empty() || _min == splitPoint.front() || _max == splitPoint.front() ) {
+ log() << "want to split chunk, but can't find split point chunk " << toString()
+ << " got: " << ( splitPoint.empty() ? "<empty>" : splitPoint.front().toString() ) << endl;
+ return BSONObj();
+ }
+
+ if (multiSplit( splitPoint , res ))
+ return splitPoint.front();
+ else
+ return BSONObj();
+ }
+
+ bool Chunk::multiSplit( const vector<BSONObj>& m , BSONObj& res ) const {
+ const size_t maxSplitPoints = 8192;
+
+ uassert( 10165 , "can't split as shard doesn't have a manager" , _manager );
+ uassert( 13332 , "need a split key to split chunk" , !m.empty() );
+ uassert( 13333 , "can't split a chunk in that many parts", m.size() < maxSplitPoints );
+ uassert( 13003 , "can't split a chunk with only one distinct value" , _min.woCompare(_max) );
+
+ ScopedDbConnection conn( getShard().getConnString() );
+
+ BSONObjBuilder cmd;
+ cmd.append( "splitChunk" , _manager->getns() );
+ cmd.append( "keyPattern" , _manager->getShardKey().key() );
+ cmd.append( "min" , getMin() );
+ cmd.append( "max" , getMax() );
+ cmd.append( "from" , getShard().getConnString() );
+ cmd.append( "splitKeys" , m );
+ cmd.append( "shardId" , genID() );
+ cmd.append( "configdb" , configServer.modelServer() );
+ BSONObj cmdObj = cmd.obj();
+
+ if ( ! conn->runCommand( "admin" , cmdObj , res )) {
+ warning() << "splitChunk failed - cmd: " << cmdObj << " result: " << res << endl;
+ conn.done();
+
+ // reloading won't strictly solve all problems, e.g. the collection's metadata lock can be taken
+ // but we issue here so that mongos may refresh without needing to be written/read against
+ _manager->reload();
+
+ return false;
+ }
+
+ conn.done();
+
+ // force reload of config
+ _manager->reload();
+
+ return true;
+ }
+
+ bool Chunk::moveAndCommit( const Shard& to , long long chunkSize /* bytes */, BSONObj& res ) const {
+ uassert( 10167 , "can't move shard to its current location!" , getShard() != to );
+
+ log() << "moving chunk ns: " << _manager->getns() << " moving ( " << toString() << ") " << _shard.toString() << " -> " << to.toString() << endl;
+
+ Shard from = _shard;
+
+ ScopedDbConnection fromconn( from);
+
+ bool worked = fromconn->runCommand( "admin" ,
+ BSON( "moveChunk" << _manager->getns() <<
+ "from" << from.getConnString() <<
+ "to" << to.getConnString() <<
+ "min" << _min <<
+ "max" << _max <<
+ "maxChunkSizeBytes" << chunkSize <<
+ "shardId" << genID() <<
+ "configdb" << configServer.modelServer()
+ ) ,
+ res
+ );
+
+ fromconn.done();
+
+ log( worked ) << "moveChunk result: " << res << endl;
+
+ // if succeeded, needs to reload to pick up the new location
+ // if failed, mongos may be stale
+ // reload is excessive here as the failure could be simply because collection metadata is taken
+ _manager->reload();
+
+ return worked;
+ }
+
+ bool Chunk::splitIfShould( long dataWritten ) const {
+ LastError::Disabled d( lastError.get() );
+
+ try {
+ _dataWritten += dataWritten;
+ int splitThreshold = getManager()->getCurrentDesiredChunkSize();
+ if ( minIsInf() || maxIsInf() ) {
+ splitThreshold = (int) ((double)splitThreshold * .9);
+ }
+
+ if ( _dataWritten < splitThreshold / 5 )
+ return false;
+
+ if ( ! getManager()->_splitTickets.tryAcquire() ) {
+ LOG(1) << "won't auto split becaue not enough tickets: " << getManager()->getns() << endl;
+ return false;
+ }
+ TicketHolderReleaser releaser( &getManager()->_splitTickets );
+
+ // this is a bit ugly
+ // we need it so that mongos blocks for the writes to actually be committed
+ // this does mean mongos has more back pressure than mongod alone
+ // since it nots 100% tcp queue bound
+ // this was implicit before since we did a splitVector on the same socket
+ ShardConnection::sync();
+
+ LOG(1) << "about to initiate autosplit: " << *this << " dataWritten: " << _dataWritten << " splitThreshold: " << splitThreshold << endl;
+
+ BSONObj res;
+ BSONObj splitPoint = singleSplit( false /* does not force a split if not enough data */ , res );
+ if ( splitPoint.isEmpty() ) {
+ // singleSplit would have issued a message if we got here
+ _dataWritten = 0; // this means there wasn't enough data to split, so don't want to try again until considerable more data
+ return false;
+ }
+
+ if ( maxIsInf() || minIsInf() ) {
+ // we don't want to reset _dataWritten since we kind of want to check the other side right away
+ }
+ else {
+ _dataWritten = 0; // we're splitting, so should wait a bit
+ }
+
+
+
+ log() << "autosplitted " << _manager->getns() << " shard: " << toString()
+ << " on: " << splitPoint << " (splitThreshold " << splitThreshold << ")"
+#ifdef _DEBUG
+ << " size: " << getPhysicalSize() // slow - but can be useful when debugging
+#endif
+ << ( res["shouldMigrate"].eoo() ? "" : " (migrate suggested)" ) << endl;
+
+ BSONElement shouldMigrate = res["shouldMigrate"]; // not in mongod < 1.9.1 but that is ok
+ if (!shouldMigrate.eoo() && grid.shouldBalance()){
+ BSONObj range = shouldMigrate.embeddedObject();
+ BSONObj min = range["min"].embeddedObject();
+ BSONObj max = range["max"].embeddedObject();
+
+ Shard newLocation = Shard::pick( getShard() );
+ if ( getShard() == newLocation ) {
+ // if this is the best shard, then we shouldn't do anything (Shard::pick already logged our shard).
+ LOG(1) << "recently split chunk: " << range << " already in the best shard: " << getShard() << endl;
+ return true; // we did split even if we didn't migrate
+ }
+
+ ChunkManagerPtr cm = _manager->reload(false/*just reloaded in mulitsplit*/);
+ ChunkPtr toMove = cm->findChunk(min);
+
+ if ( ! (toMove->getMin() == min && toMove->getMax() == max) ){
+ LOG(1) << "recently split chunk: " << range << " modified before we could migrate " << toMove << endl;
+ return true;
+ }
+
+ log() << "moving chunk (auto): " << toMove << " to: " << newLocation.toString() << endl;
+
+ BSONObj res;
+ massert( 10412 ,
+ str::stream() << "moveAndCommit failed: " << res ,
+ toMove->moveAndCommit( newLocation , MaxChunkSize , res ) );
+
+ // update our config
+ _manager->reload();
+ }
+
+ return true;
+
+ }
+ catch ( std::exception& e ) {
+ // if the collection lock is taken (e.g. we're migrating), it is fine for the split to fail.
+ warning() << "could have autosplit on collection: " << _manager->getns() << " but: " << e.what() << endl;
+ return false;
+ }
+ }
+
+ long Chunk::getPhysicalSize() const {
+ ScopedDbConnection conn( getShard().getConnString() );
+
+ BSONObj result;
+ uassert( 10169 , "datasize failed!" , conn->runCommand( "admin" ,
+ BSON( "datasize" << _manager->getns()
+ << "keyPattern" << _manager->getShardKey().key()
+ << "min" << getMin()
+ << "max" << getMax()
+ << "maxSize" << ( MaxChunkSize + 1 )
+ << "estimate" << true
+ ) , result ) );
+
+ conn.done();
+ return (long)result["size"].number();
+ }
+
+ void Chunk::appendShortVersion( const char * name , BSONObjBuilder& b ) const {
+ BSONObjBuilder bb( b.subobjStart( name ) );
+ bb.append( "min" , _min );
+ bb.append( "max" , _max );
+ bb.done();
+ }
+
+ bool Chunk::operator==( const Chunk& s ) const {
+ return
+ _manager->getShardKey().compare( _min , s._min ) == 0 &&
+ _manager->getShardKey().compare( _max , s._max ) == 0
+ ;
+ }
+
+ void Chunk::serialize(BSONObjBuilder& to,ShardChunkVersion myLastMod) {
+
+ to.append( "_id" , genID( _manager->getns() , _min ) );
+
+ if ( myLastMod.isSet() ) {
+ to.appendTimestamp( "lastmod" , myLastMod );
+ }
+ else if ( _lastmod.isSet() ) {
+ assert( _lastmod > 0 && _lastmod < 1000 );
+ to.appendTimestamp( "lastmod" , _lastmod );
+ }
+ else {
+ assert(0);
+ }
+
+ to << "ns" << _manager->getns();
+ to << "min" << _min;
+ to << "max" << _max;
+ to << "shard" << _shard.getName();
+ }
+
+ string Chunk::genID( const string& ns , const BSONObj& o ) {
+ StringBuilder buf( ns.size() + o.objsize() + 16 );
+ buf << ns << "-";
+
+ BSONObjIterator i(o);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ buf << e.fieldName() << "_" << e.toString(false, true);
+ }
+
+ return buf.str();
+ }
+
+ string Chunk::toString() const {
+ stringstream ss;
+ ss << "ns:" << _manager->getns() << " at: " << _shard.toString() << " lastmod: " << _lastmod.toString() << " min: " << _min << " max: " << _max;
+ return ss.str();
+ }
+
+ ShardKeyPattern Chunk::skey() const {
+ return _manager->getShardKey();
+ }
+
+ void Chunk::markAsJumbo() const {
+ // set this first
+ // even if we can't set it in the db
+ // at least this mongos won't try and keep moving
+ _jumbo = true;
+
+ try {
+ ScopedDbConnection conn( configServer.modelServer() );
+ conn->update( chunkMetadataNS , BSON( "_id" << genID() ) , BSON( "$set" << BSON( "jumbo" << true ) ) );
+ conn.done();
+ }
+ catch ( std::exception& ) {
+ warning() << "couldn't set jumbo for chunk: " << genID() << endl;
+ }
+ }
+
+ void Chunk::refreshChunkSize() {
+ BSONObj o = grid.getConfigSetting("chunksize");
+
+ if ( o.isEmpty() ) {
+ return;
+ }
+
+ int csize = o["value"].numberInt();
+
+ // validate chunksize before proceeding
+ if ( csize == 0 ) {
+ // setting was not modified; mark as such
+ log() << "warning: invalid chunksize (" << csize << ") ignored" << endl;
+ return;
+ }
+
+ LOG(1) << "Refreshing MaxChunkSize: " << csize << endl;
+ Chunk::MaxChunkSize = csize * 1024 * 1024;
+ }
+
+ // ------- ChunkManager --------
+
+ AtomicUInt ChunkManager::NextSequenceNumber = 1;
+
+ ChunkManager::ChunkManager( string ns , ShardKeyPattern pattern , bool unique ) :
+ _ns( ns ) , _key( pattern ) , _unique( unique ) , _chunkRanges(), _mutex("ChunkManager"),
+ _nsLock( ConnectionString( configServer.modelServer() , ConnectionString::SYNC ) , ns ),
+
+ // The shard versioning mechanism hinges on keeping track of the number of times we reloaded ChunkManager's.
+ // Increasing this number here will prompt checkShardVersion() to refresh the connection-level versions to
+ // the most up to date value.
+ _sequenceNumber(++NextSequenceNumber),
+
+ _splitTickets( 5 )
+
+ {
+ int tries = 3;
+ while (tries--) {
+ ChunkMap chunkMap;
+ set<Shard> shards;
+ ShardVersionMap shardVersions;
+ Timer t;
+ _load(chunkMap, shards, shardVersions);
+ {
+ int ms = t.millis();
+ log() << "ChunkManager: time to load chunks for " << ns << ": " << ms << "ms"
+ << " sequenceNumber: " << _sequenceNumber
+ << " version: " << _version.toString()
+ << endl;
+ }
+
+ if (_isValid(chunkMap)) {
+ // These variables are const for thread-safety. Since the
+ // constructor can only be called from one thread, we don't have
+ // to worry about that here.
+ const_cast<ChunkMap&>(_chunkMap).swap(chunkMap);
+ const_cast<set<Shard>&>(_shards).swap(shards);
+ const_cast<ShardVersionMap&>(_shardVersions).swap(shardVersions);
+ const_cast<ChunkRangeManager&>(_chunkRanges).reloadAll(_chunkMap);
+ return;
+ }
+
+ if (_chunkMap.size() < 10) {
+ _printChunks();
+ }
+
+ warning() << "ChunkManager loaded an invalid config, trying again" << endl;
+
+ sleepmillis(10 * (3-tries));
+ }
+
+ // this will abort construction so we should never have a reference to an invalid config
+ msgasserted(13282, "Couldn't load a valid config for " + _ns + " after 3 attempts. Please try again.");
+ }
+
+ ChunkManagerPtr ChunkManager::reload(bool force) const {
+ return grid.getDBConfig(getns())->getChunkManager(getns(), force);
+ }
+
+ void ChunkManager::_load(ChunkMap& chunkMap, set<Shard>& shards, ShardVersionMap& shardVersions) {
+ ScopedDbConnection conn( configServer.modelServer() );
+
+ // TODO really need the sort?
+ auto_ptr<DBClientCursor> cursor = conn->query( Chunk::chunkMetadataNS, QUERY("ns" << _ns).sort("lastmod",-1), 0, 0, 0, 0,
+ (DEBUG_BUILD ? 2 : 1000000)); // batch size. Try to induce potential race conditions in debug builds
+ assert( cursor.get() );
+ while ( cursor->more() ) {
+ BSONObj d = cursor->next();
+ if ( d["isMaxMarker"].trueValue() ) {
+ continue;
+ }
+
+ ChunkPtr c( new Chunk( this, d ) );
+
+ chunkMap[c->getMax()] = c;
+ shards.insert(c->getShard());
+
+
+ // set global max
+ if ( c->getLastmod() > _version )
+ _version = c->getLastmod();
+
+ // set shard max
+ ShardChunkVersion& shardMax = shardVersions[c->getShard()];
+ if ( c->getLastmod() > shardMax )
+ shardMax = c->getLastmod();
+ }
+ conn.done();
+ }
+
+ bool ChunkManager::_isValid(const ChunkMap& chunkMap) {
+#define ENSURE(x) do { if(!(x)) { log() << "ChunkManager::_isValid failed: " #x << endl; return false; } } while(0)
+
+ if (chunkMap.empty())
+ return true;
+
+ // Check endpoints
+ ENSURE(allOfType(MinKey, chunkMap.begin()->second->getMin()));
+ ENSURE(allOfType(MaxKey, boost::prior(chunkMap.end())->second->getMax()));
+
+ // Make sure there are no gaps or overlaps
+ for (ChunkMap::const_iterator it=boost::next(chunkMap.begin()), end=chunkMap.end(); it != end; ++it) {
+ ChunkMap::const_iterator last = boost::prior(it);
+
+ if (!(it->second->getMin() == last->second->getMax())) {
+ PRINT(it->second->toString());
+ PRINT(it->second->getMin());
+ PRINT(last->second->getMax());
+ }
+ ENSURE(it->second->getMin() == last->second->getMax());
+ }
+
+ return true;
+
+#undef ENSURE
+ }
+
+ void ChunkManager::_printChunks() const {
+ for (ChunkMap::const_iterator it=_chunkMap.begin(), end=_chunkMap.end(); it != end; ++it) {
+ log() << *it->second << endl;
+ }
+ }
+
+ bool ChunkManager::hasShardKey( const BSONObj& obj ) const {
+ return _key.hasShardKey( obj );
+ }
+
+ void ChunkManager::createFirstChunks( const Shard& primary , vector<BSONObj>* initPoints , vector<Shard>* initShards ) const {
+ // TODO distlock?
+ assert( _chunkMap.size() == 0 );
+
+ vector<BSONObj> splitPoints;
+ vector<Shard> shards;
+ unsigned long long numObjects = 0;
+ Chunk c(this, _key.globalMin(), _key.globalMax(), primary);
+
+ if ( !initPoints || !initPoints->size() ) {
+ // discover split points
+ {
+ // get stats to see if there is any data
+ ScopedDbConnection shardConn( primary.getConnString() );
+ numObjects = shardConn->count( getns() );
+ shardConn.done();
+ }
+
+ if ( numObjects > 0 )
+ c.pickSplitVector( splitPoints , Chunk::MaxChunkSize );
+
+ // since docs alread exists, must use primary shard
+ shards.push_back( primary );
+ } else {
+ // make sure points are unique and ordered
+ set<BSONObj> orderedPts;
+ for ( unsigned i = 0; i < initPoints->size(); ++i ) {
+ BSONObj pt = (*initPoints)[i];
+ orderedPts.insert( pt );
+ }
+ for ( set<BSONObj>::iterator it = orderedPts.begin(); it != orderedPts.end(); ++it ) {
+ splitPoints.push_back( *it );
+ }
+
+ if ( !initShards || !initShards->size() ) {
+ // use all shards, starting with primary
+ shards.push_back( primary );
+ vector<Shard> tmp;
+ primary.getAllShards( tmp );
+ for ( unsigned i = 0; i < tmp.size(); ++i ) {
+ if ( tmp[i] != primary )
+ shards.push_back( tmp[i] );
+ }
+ }
+ }
+
+ // this is the first chunk; start the versioning from scratch
+ ShardChunkVersion version;
+ version.incMajor();
+
+ log() << "going to create " << splitPoints.size() + 1 << " chunk(s) for: " << _ns << endl;
+
+ ScopedDbConnection conn( configServer.modelServer() );
+
+ for ( unsigned i=0; i<=splitPoints.size(); i++ ) {
+ BSONObj min = i == 0 ? _key.globalMin() : splitPoints[i-1];
+ BSONObj max = i < splitPoints.size() ? splitPoints[i] : _key.globalMax();
+
+ Chunk temp( this , min , max , shards[ i % shards.size() ] );
+
+ BSONObjBuilder chunkBuilder;
+ temp.serialize( chunkBuilder , version );
+ BSONObj chunkObj = chunkBuilder.obj();
+
+ conn->update( Chunk::chunkMetadataNS, QUERY( "_id" << temp.genID() ), chunkObj, true, false );
+
+ version.incMinor();
+ }
+
+ string errmsg = conn->getLastError();
+ if ( errmsg.size() ) {
+ string ss = str::stream() << "creating first chunks failed. result: " << errmsg;
+ error() << ss << endl;
+ msgasserted( 15903 , ss );
+ }
+
+ conn.done();
+
+ if ( numObjects == 0 ) {
+ // the ensure index will have the (desired) indirect effect of creating the collection on the
+ // assigned shard, as it sets up the index over the sharding keys.
+ ScopedDbConnection shardConn( c.getShard().getConnString() );
+ shardConn->ensureIndex( getns() , getShardKey().key() , _unique , "" , false ); // do not cache ensureIndex SERVER-1691
+ shardConn.done();
+ }
+
+ }
+
+ ChunkPtr ChunkManager::findChunk( const BSONObj & obj ) const {
+ BSONObj key = _key.extractKey(obj);
+
+ {
+ BSONObj foo;
+ ChunkPtr c;
+ {
+ ChunkMap::const_iterator it = _chunkMap.upper_bound(key);
+ if (it != _chunkMap.end()) {
+ foo = it->first;
+ c = it->second;
+ }
+ }
+
+ if ( c ) {
+ if ( c->contains( key ) ){
+ dassert(c->contains(key)); // doesn't use fast-path in extractKey
+ return c;
+ }
+
+ PRINT(foo);
+ PRINT(*c);
+ PRINT(key);
+
+ reload();
+ massert(13141, "Chunk map pointed to incorrect chunk", false);
+ }
+ }
+
+ throw UserException( 8070 , str::stream() << "couldn't find a chunk which should be impossible: " << key );
+ }
+
+ ChunkPtr ChunkManager::findChunkOnServer( const Shard& shard ) const {
+ for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) {
+ ChunkPtr c = i->second;
+ if ( c->getShard() == shard )
+ return c;
+ }
+
+ return ChunkPtr();
+ }
+
+ void ChunkManager::getShardsForQuery( set<Shard>& shards , const BSONObj& query ) const {
+ //TODO look into FieldRangeSetOr
+ OrRangeGenerator org(_ns.c_str(), query, false);
+
+ const string special = org.getSpecial();
+ if (special == "2d") {
+ BSONForEach(field, query) {
+ if (getGtLtOp(field) == BSONObj::opNEAR) {
+ uassert(13501, "use geoNear command rather than $near query", false);
+ // TODO: convert to geoNear rather than erroring out
+ }
+ // $within queries are fine
+ }
+ }
+ else if (!special.empty()) {
+ uassert(13502, "unrecognized special query type: " + special, false);
+ }
+
+ do {
+ boost::scoped_ptr<FieldRangeSetPair> frsp (org.topFrsp());
+ {
+ // special case if most-significant field isn't in query
+ FieldRange range = frsp->singleKeyRange(_key.key().firstElementFieldName());
+ if ( !range.nontrivial() ) {
+ DEV PRINT(range.nontrivial());
+ getShardsForRange( shards, _key.globalMin(), _key.globalMax() );
+ return;
+ }
+ }
+
+ BoundList ranges = frsp->singleKeyIndexBounds(_key.key(), 1);
+ for (BoundList::const_iterator it=ranges.begin(), end=ranges.end(); it != end; ++it) {
+
+ BSONObj minObj = it->first.replaceFieldNames(_key.key());
+ BSONObj maxObj = it->second.replaceFieldNames(_key.key());
+
+ getShardsForRange( shards, minObj, maxObj, false );
+
+ // once we know we need to visit all shards no need to keep looping
+ if( shards.size() == _shards.size() ) return;
+ }
+
+ if (org.moreOrClauses())
+ org.popOrClauseSingleKey();
+
+ }
+ while (org.moreOrClauses());
+ }
+
+ void ChunkManager::getShardsForRange(set<Shard>& shards, const BSONObj& min, const BSONObj& max, bool fullKeyReq ) const {
+
+ if( fullKeyReq ){
+ uassert(13405, str::stream() << "min value " << min << " does not have shard key", hasShardKey(min));
+ uassert(13406, str::stream() << "max value " << max << " does not have shard key", hasShardKey(max));
+ }
+
+ ChunkRangeMap::const_iterator it = _chunkRanges.upper_bound(min);
+ ChunkRangeMap::const_iterator end = _chunkRanges.upper_bound(max);
+
+ massert( 13507 , str::stream() << "no chunks found between bounds " << min << " and " << max , it != _chunkRanges.ranges().end() );
+
+ if( end != _chunkRanges.ranges().end() ) ++end;
+
+ for( ; it != end; ++it ){
+ shards.insert(it->second->getShard());
+
+ // once we know we need to visit all shards no need to keep looping
+ if (shards.size() == _shards.size()) break;
+ }
+ }
+
+ void ChunkManager::getAllShards( set<Shard>& all ) const {
+ all.insert(_shards.begin(), _shards.end());
+ }
+
+ bool ChunkManager::compatibleWith( const ChunkManager& other, const Shard& shard ) const {
+ // TODO: Make this much smarter - currently returns true only if we're the same chunk manager
+ return getns() == other.getns() && getSequenceNumber() == other.getSequenceNumber();
+ }
+
+ void ChunkManager::drop( ChunkManagerPtr me ) const {
+ scoped_lock lk( _mutex );
+
+ configServer.logChange( "dropCollection.start" , _ns , BSONObj() );
+
+ dist_lock_try dlk;
+ try{
+ dlk = dist_lock_try( &_nsLock , "drop" );
+ }
+ catch( LockException& e ){
+ uassert( 14022, str::stream() << "Error locking distributed lock for chunk drop." << causedBy( e ), false);
+ }
+
+ uassert( 13331 , "collection's metadata is undergoing changes. Please try again." , dlk.got() );
+
+ uassert( 10174 , "config servers not all up" , configServer.allUp() );
+
+ set<Shard> seen;
+
+ LOG(1) << "ChunkManager::drop : " << _ns << endl;
+
+ // lock all shards so no one can do a split/migrate
+ for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) {
+ ChunkPtr c = i->second;
+ seen.insert( c->getShard() );
+ }
+
+ LOG(1) << "ChunkManager::drop : " << _ns << "\t all locked" << endl;
+
+ // delete data from mongod
+ for ( set<Shard>::iterator i=seen.begin(); i!=seen.end(); i++ ) {
+ ScopedDbConnection conn( *i );
+ conn->dropCollection( _ns );
+ conn.done();
+ }
+
+ LOG(1) << "ChunkManager::drop : " << _ns << "\t removed shard data" << endl;
+
+ // remove chunk data
+ ScopedDbConnection conn( configServer.modelServer() );
+ conn->remove( Chunk::chunkMetadataNS , BSON( "ns" << _ns ) );
+ conn.done();
+ LOG(1) << "ChunkManager::drop : " << _ns << "\t removed chunk data" << endl;
+
+ for ( set<Shard>::iterator i=seen.begin(); i!=seen.end(); i++ ) {
+ ScopedDbConnection conn( *i );
+ BSONObj res;
+
+ // this is horrible
+ // we need a special command for dropping on the d side
+ // this hack works for the moment
+
+ if ( ! setShardVersion( conn.conn() , _ns , 0 , true , res ) )
+ throw UserException( 8071 , str::stream() << "cleaning up after drop failed: " << res );
+ conn->simpleCommand( "admin", 0, "unsetSharding" );
+ conn.done();
+ }
+
+ LOG(1) << "ChunkManager::drop : " << _ns << "\t DONE" << endl;
+ configServer.logChange( "dropCollection" , _ns , BSONObj() );
+ }
+
+ ShardChunkVersion ChunkManager::getVersion( const Shard& shard ) const {
+ ShardVersionMap::const_iterator i = _shardVersions.find( shard );
+ if ( i == _shardVersions.end() )
+ return 0;
+ return i->second;
+ }
+
+ ShardChunkVersion ChunkManager::getVersion() const {
+ return _version;
+ }
+
+ string ChunkManager::toString() const {
+ stringstream ss;
+ ss << "ChunkManager: " << _ns << " key:" << _key.toString() << '\n';
+ for ( ChunkMap::const_iterator i=_chunkMap.begin(); i!=_chunkMap.end(); ++i ) {
+ const ChunkPtr c = i->second;
+ ss << "\t" << c->toString() << '\n';
+ }
+ return ss.str();
+ }
+
+ void ChunkRangeManager::assertValid() const {
+ if (_ranges.empty())
+ return;
+
+ try {
+ // No Nulls
+ for (ChunkRangeMap::const_iterator it=_ranges.begin(), end=_ranges.end(); it != end; ++it) {
+ assert(it->second);
+ }
+
+ // Check endpoints
+ assert(allOfType(MinKey, _ranges.begin()->second->getMin()));
+ assert(allOfType(MaxKey, boost::prior(_ranges.end())->second->getMax()));
+
+ // Make sure there are no gaps or overlaps
+ for (ChunkRangeMap::const_iterator it=boost::next(_ranges.begin()), end=_ranges.end(); it != end; ++it) {
+ ChunkRangeMap::const_iterator last = boost::prior(it);
+ assert(it->second->getMin() == last->second->getMax());
+ }
+
+ // Check Map keys
+ for (ChunkRangeMap::const_iterator it=_ranges.begin(), end=_ranges.end(); it != end; ++it) {
+ assert(it->first == it->second->getMax());
+ }
+
+ // Make sure we match the original chunks
+ const ChunkMap chunks = _ranges.begin()->second->getManager()->_chunkMap;
+ for ( ChunkMap::const_iterator i=chunks.begin(); i!=chunks.end(); ++i ) {
+ const ChunkPtr chunk = i->second;
+
+ ChunkRangeMap::const_iterator min = _ranges.upper_bound(chunk->getMin());
+ ChunkRangeMap::const_iterator max = _ranges.lower_bound(chunk->getMax());
+
+ assert(min != _ranges.end());
+ assert(max != _ranges.end());
+ assert(min == max);
+ assert(min->second->getShard() == chunk->getShard());
+ assert(min->second->contains( chunk->getMin() ));
+ assert(min->second->contains( chunk->getMax() ) || (min->second->getMax() == chunk->getMax()));
+ }
+
+ }
+ catch (...) {
+ log( LL_ERROR ) << "\t invalid ChunkRangeMap! printing ranges:" << endl;
+
+ for (ChunkRangeMap::const_iterator it=_ranges.begin(), end=_ranges.end(); it != end; ++it)
+ cout << it->first << ": " << *it->second << endl;
+
+ throw;
+ }
+ }
+
+ void ChunkRangeManager::reloadAll(const ChunkMap& chunks) {
+ _ranges.clear();
+ _insertRange(chunks.begin(), chunks.end());
+
+ DEV assertValid();
+ }
+
+ void ChunkRangeManager::_insertRange(ChunkMap::const_iterator begin, const ChunkMap::const_iterator end) {
+ while (begin != end) {
+ ChunkMap::const_iterator first = begin;
+ Shard shard = first->second->getShard();
+ while (begin != end && (begin->second->getShard() == shard))
+ ++begin;
+
+ shared_ptr<ChunkRange> cr (new ChunkRange(first, begin));
+ _ranges[cr->getMax()] = cr;
+ }
+ }
+
+ int ChunkManager::getCurrentDesiredChunkSize() const {
+ // split faster in early chunks helps spread out an initial load better
+ const int minChunkSize = 1 << 20; // 1 MBytes
+
+ int splitThreshold = Chunk::MaxChunkSize;
+
+ int nc = numChunks();
+
+ if ( nc <= 1 ) {
+ return 1024;
+ }
+ else if ( nc < 3 ) {
+ return minChunkSize / 2;
+ }
+ else if ( nc < 10 ) {
+ splitThreshold = max( splitThreshold / 4 , minChunkSize );
+ }
+ else if ( nc < 20 ) {
+ splitThreshold = max( splitThreshold / 2 , minChunkSize );
+ }
+
+ return splitThreshold;
+ }
+
+ class ChunkObjUnitTest : public UnitTest {
+ public:
+ void runShardChunkVersion() {
+ vector<ShardChunkVersion> all;
+ all.push_back( ShardChunkVersion(1,1) );
+ all.push_back( ShardChunkVersion(1,2) );
+ all.push_back( ShardChunkVersion(2,1) );
+ all.push_back( ShardChunkVersion(2,2) );
+
+ for ( unsigned i=0; i<all.size(); i++ ) {
+ for ( unsigned j=i+1; j<all.size(); j++ ) {
+ assert( all[i] < all[j] );
+ }
+ }
+
+ }
+
+ void run() {
+ runShardChunkVersion();
+ LOG(1) << "shardObjTest passed" << endl;
+ }
+ } shardObjTest;
+
+
+ // ----- to be removed ---
+ extern OID serverID;
+
+ // NOTE (careful when deprecating)
+ // currently the sharding is enabled because of a write or read (as opposed to a split or migrate), the shard learns
+ // its name and through the 'setShardVersion' command call
+ bool setShardVersion( DBClientBase & conn , const string& ns , ShardChunkVersion version , bool authoritative , BSONObj& result ) {
+ BSONObjBuilder cmdBuilder;
+ cmdBuilder.append( "setShardVersion" , ns.c_str() );
+ cmdBuilder.append( "configdb" , configServer.modelServer() );
+ cmdBuilder.appendTimestamp( "version" , version.toLong() );
+ cmdBuilder.appendOID( "serverID" , &serverID );
+ if ( authoritative )
+ cmdBuilder.appendBool( "authoritative" , 1 );
+
+ Shard s = Shard::make( conn.getServerAddress() );
+ cmdBuilder.append( "shard" , s.getName() );
+ cmdBuilder.append( "shardHost" , s.getConnString() );
+ BSONObj cmd = cmdBuilder.obj();
+
+ LOG(1) << " setShardVersion " << s.getName() << " " << conn.getServerAddress() << " " << ns << " " << cmd << " " << &conn << endl;
+
+ return conn.runCommand( "admin" , cmd , result );
+ }
+
+} // namespace mongo
diff --git a/src/mongo/s/chunk.h b/src/mongo/s/chunk.h
new file mode 100644
index 00000000000..0f323514a76
--- /dev/null
+++ b/src/mongo/s/chunk.h
@@ -0,0 +1,420 @@
+// @file chunk.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+
+#include "../bson/util/atomic_int.h"
+#include "../client/dbclient.h"
+#include "../client/distlock.h"
+
+#include "shardkey.h"
+#include "shard.h"
+#include "util.h"
+
+namespace mongo {
+
+ class DBConfig;
+ class Chunk;
+ class ChunkRange;
+ class ChunkManager;
+ class ChunkObjUnitTest;
+
+ typedef shared_ptr<const Chunk> ChunkPtr;
+
+ // key is max for each Chunk or ChunkRange
+ typedef map<BSONObj,ChunkPtr,BSONObjCmp> ChunkMap;
+ typedef map<BSONObj,shared_ptr<ChunkRange>,BSONObjCmp> ChunkRangeMap;
+
+ typedef shared_ptr<const ChunkManager> ChunkManagerPtr;
+
+ /**
+ config.chunks
+ { ns : "alleyinsider.fs.chunks" , min : {} , max : {} , server : "localhost:30001" }
+
+ x is in a shard iff
+ min <= x < max
+ */
+ class Chunk : boost::noncopyable {
+ public:
+ Chunk( const ChunkManager * info , BSONObj from);
+ Chunk( const ChunkManager * info , const BSONObj& min, const BSONObj& max, const Shard& shard);
+
+ //
+ // serialization support
+ //
+
+ void serialize(BSONObjBuilder& to, ShardChunkVersion myLastMod=0);
+
+ //
+ // chunk boundary support
+ //
+
+ const BSONObj& getMin() const { return _min; }
+ const BSONObj& getMax() const { return _max; }
+
+ // if min/max key is pos/neg infinity
+ bool minIsInf() const;
+ bool maxIsInf() const;
+
+ bool contains( const BSONObj& obj ) const;
+
+ string genID() const;
+ static string genID( const string& ns , const BSONObj& min );
+
+ //
+ // chunk version support
+ //
+
+ void appendShortVersion( const char * name , BSONObjBuilder& b ) const;
+
+ ShardChunkVersion getLastmod() const { return _lastmod; }
+ void setLastmod( ShardChunkVersion v ) { _lastmod = v; }
+
+ //
+ // split support
+ //
+
+ /**
+ * if the amount of data written nears the max size of a shard
+ * then we check the real size, and if its too big, we split
+ * @return if something was split
+ */
+ bool splitIfShould( long dataWritten ) const;
+
+ /**
+ * Splits this chunk at a non-specificed split key to be chosen by the mongod holding this chunk.
+ *
+ * @param force if set to true, will split the chunk regardless if the split is really necessary size wise
+ * if set to false, will only split if the chunk has reached the currently desired maximum size
+ * @param res the object containing details about the split execution
+ * @return splitPoint if found a key and split successfully, else empty BSONObj
+ */
+ BSONObj singleSplit( bool force , BSONObj& res ) const;
+
+ /**
+ * Splits this chunk at the given key (or keys)
+ *
+ * @param splitPoints the vector of keys that should be used to divide this chunk
+ * @param res the object containing details about the split execution
+ * @return if the split was successful
+ */
+ bool multiSplit( const vector<BSONObj>& splitPoints , BSONObj& res ) const;
+
+ /**
+ * Asks the mongod holding this chunk to find a key that approximately divides this chunk in two
+ *
+ * @param medianKey the key that divides this chunk, if there is one, or empty
+ */
+ void pickMedianKey( BSONObj& medianKey ) const;
+
+ /**
+ * @param splitPoints vector to be filled in
+ * @param chunkSize chunk size to target in bytes
+ * @param maxPoints limits the number of split points that are needed, zero is max (optional)
+ * @param maxObjs limits the number of objects in each chunk, zero is as max (optional)
+ */
+ void pickSplitVector( vector<BSONObj>& splitPoints , int chunkSize , int maxPoints = 0, int maxObjs = 0) const;
+
+ //
+ // migration support
+ //
+
+ /**
+ * Issues a migrate request for this chunk
+ *
+ * @param to shard to move this chunk to
+ * @param chunSize maximum number of bytes beyond which the migrate should no go trhough
+ * @param res the object containing details about the migrate execution
+ * @return true if move was successful
+ */
+ bool moveAndCommit( const Shard& to , long long chunkSize , BSONObj& res ) const;
+
+ /**
+ * @return size of shard in bytes
+ * talks to mongod to do this
+ */
+ long getPhysicalSize() const;
+
+ /**
+ * marks this chunk as a jumbo chunk
+ * that means the chunk will be inelligble for migrates
+ */
+ void markAsJumbo() const;
+
+ bool isJumbo() const { return _jumbo; }
+
+ /**
+ * Attempt to refresh maximum chunk size from config.
+ */
+ static void refreshChunkSize();
+
+ //
+ // public constants
+ //
+
+ static string chunkMetadataNS;
+ static int MaxChunkSize;
+ static int MaxObjectPerChunk;
+ //
+ // accessors and helpers
+ //
+
+ string toString() const;
+
+ friend ostream& operator << (ostream& out, const Chunk& c) { return (out << c.toString()); }
+ bool operator==(const Chunk& s) const;
+ bool operator!=(const Chunk& s) const { return ! ( *this == s ); }
+
+ string getns() const;
+ const char * getNS() { return "config.chunks"; }
+ Shard getShard() const { return _shard; }
+ const ChunkManager* getManager() const { return _manager; }
+
+
+ private:
+
+ // main shard info
+
+ const ChunkManager * _manager;
+
+ BSONObj _min;
+ BSONObj _max;
+ Shard _shard;
+ ShardChunkVersion _lastmod;
+ mutable bool _jumbo;
+
+ // transient stuff
+
+ mutable long _dataWritten;
+
+ // methods, etc..
+
+ /**
+ * if sort 1, return lowest key
+ * if sort -1, return highest key
+ * will return empty object if have none
+ */
+ BSONObj _getExtremeKey( int sort ) const;
+
+ /** initializes _dataWritten with a random value so that a mongos restart wouldn't cause delay in splitting */
+ static long mkDataWritten();
+
+ ShardKeyPattern skey() const;
+ };
+
+ class ChunkRange {
+ public:
+ const ChunkManager* getManager() const { return _manager; }
+ Shard getShard() const { return _shard; }
+
+ const BSONObj& getMin() const { return _min; }
+ const BSONObj& getMax() const { return _max; }
+
+ // clones of Chunk methods
+ bool contains(const BSONObj& obj) const;
+
+ ChunkRange(ChunkMap::const_iterator begin, const ChunkMap::const_iterator end)
+ : _manager(begin->second->getManager())
+ , _shard(begin->second->getShard())
+ , _min(begin->second->getMin())
+ , _max(boost::prior(end)->second->getMax()) {
+ assert( begin != end );
+
+ DEV while (begin != end) {
+ assert(begin->second->getManager() == _manager);
+ assert(begin->second->getShard() == _shard);
+ ++begin;
+ }
+ }
+
+ // Merge min and max (must be adjacent ranges)
+ ChunkRange(const ChunkRange& min, const ChunkRange& max)
+ : _manager(min.getManager())
+ , _shard(min.getShard())
+ , _min(min.getMin())
+ , _max(max.getMax()) {
+ assert(min.getShard() == max.getShard());
+ assert(min.getManager() == max.getManager());
+ assert(min.getMax() == max.getMin());
+ }
+
+ friend ostream& operator<<(ostream& out, const ChunkRange& cr) {
+ return (out << "ChunkRange(min=" << cr._min << ", max=" << cr._max << ", shard=" << cr._shard <<")");
+ }
+
+ private:
+ const ChunkManager* _manager;
+ const Shard _shard;
+ const BSONObj _min;
+ const BSONObj _max;
+ };
+
+
+ class ChunkRangeManager {
+ public:
+ const ChunkRangeMap& ranges() const { return _ranges; }
+
+ void clear() { _ranges.clear(); }
+
+ void reloadAll(const ChunkMap& chunks);
+
+ // Slow operation -- wrap with DEV
+ void assertValid() const;
+
+ ChunkRangeMap::const_iterator upper_bound(const BSONObj& o) const { return _ranges.upper_bound(o); }
+ ChunkRangeMap::const_iterator lower_bound(const BSONObj& o) const { return _ranges.lower_bound(o); }
+
+ private:
+ // assumes nothing in this range exists in _ranges
+ void _insertRange(ChunkMap::const_iterator begin, const ChunkMap::const_iterator end);
+
+ ChunkRangeMap _ranges;
+ };
+
+ /* config.sharding
+ { ns: 'alleyinsider.fs.chunks' ,
+ key: { ts : 1 } ,
+ shards: [ { min: 1, max: 100, server: a } , { min: 101, max: 200 , server : b } ]
+ }
+ */
+ class ChunkManager {
+ public:
+ typedef map<Shard,ShardChunkVersion> ShardVersionMap;
+
+ ChunkManager( string ns , ShardKeyPattern pattern , bool unique );
+
+ string getns() const { return _ns; }
+
+ int numChunks() const { return _chunkMap.size(); }
+ bool hasShardKey( const BSONObj& obj ) const;
+
+ void createFirstChunks( const Shard& primary , vector<BSONObj>* initPoints , vector<Shard>* initShards ) const; // only call from DBConfig::shardCollection
+ ChunkPtr findChunk( const BSONObj& obj ) const;
+ ChunkPtr findChunkOnServer( const Shard& shard ) const;
+
+ const ShardKeyPattern& getShardKey() const { return _key; }
+ bool isUnique() const { return _unique; }
+
+ void getShardsForQuery( set<Shard>& shards , const BSONObj& query ) const;
+ void getAllShards( set<Shard>& all ) const;
+ void getShardsForRange(set<Shard>& shards, const BSONObj& min, const BSONObj& max, bool fullKeyReq = true) const; // [min, max)
+
+ ChunkMap getChunkMap() const { return _chunkMap; }
+
+ /**
+ * Returns true if, for this shard, the chunks are identical in both chunk managers
+ */
+ bool compatibleWith( const ChunkManager& other, const Shard& shard ) const;
+ bool compatibleWith( ChunkManagerPtr other, const Shard& shard ) const { if( ! other ) return false; return compatibleWith( *other, shard ); }
+
+ string toString() const;
+
+ ShardChunkVersion getVersion( const Shard& shard ) const;
+ ShardChunkVersion getVersion() const;
+
+ /**
+ * this is just an increasing number of how many ChunkManagers we have so we know if something has been updated
+ */
+ unsigned long long getSequenceNumber() const { return _sequenceNumber; }
+
+ void getInfo( BSONObjBuilder& b ) const {
+ b.append( "key" , _key.key() );
+ b.appendBool( "unique" , _unique );
+ }
+
+ /**
+ * @param me - so i don't get deleted before i'm done
+ */
+ void drop( ChunkManagerPtr me ) const;
+
+ void _printChunks() const;
+
+ int getCurrentDesiredChunkSize() const;
+
+ private:
+ ChunkManagerPtr reload(bool force=true) const; // doesn't modify self!
+
+ // helpers for constructor
+ void _load(ChunkMap& chunks, set<Shard>& shards, ShardVersionMap& shardVersions);
+ static bool _isValid(const ChunkMap& chunks);
+
+ // All members should be const for thread-safety
+ const string _ns;
+ const ShardKeyPattern _key;
+ const bool _unique;
+
+ const ChunkMap _chunkMap;
+ const ChunkRangeManager _chunkRanges;
+
+ const set<Shard> _shards;
+
+ const ShardVersionMap _shardVersions; // max version per shard
+
+ ShardChunkVersion _version; // max version of any chunk
+
+ mutable mutex _mutex; // only used with _nsLock
+ mutable DistributedLock _nsLock;
+
+ const unsigned long long _sequenceNumber;
+
+ mutable TicketHolder _splitTickets; // number of concurrent splitVector we can do from a splitIfShould per collection
+
+ friend class Chunk;
+ friend class ChunkRangeManager; // only needed for CRM::assertValid()
+ static AtomicUInt NextSequenceNumber;
+ };
+
+ // like BSONObjCmp. for use as an STL comparison functor
+ // key-order in "order" argument must match key-order in shardkey
+ class ChunkCmp {
+ public:
+ ChunkCmp( const BSONObj &order = BSONObj() ) : _cmp( order ) {}
+ bool operator()( const Chunk &l, const Chunk &r ) const {
+ return _cmp(l.getMin(), r.getMin());
+ }
+ bool operator()( const ptr<Chunk> l, const ptr<Chunk> r ) const {
+ return operator()(*l, *r);
+ }
+
+ // Also support ChunkRanges
+ bool operator()( const ChunkRange &l, const ChunkRange &r ) const {
+ return _cmp(l.getMin(), r.getMin());
+ }
+ bool operator()( const shared_ptr<ChunkRange> l, const shared_ptr<ChunkRange> r ) const {
+ return operator()(*l, *r);
+ }
+ private:
+ BSONObjCmp _cmp;
+ };
+
+ /*
+ struct chunk_lock {
+ chunk_lock( const Chunk* c ){
+
+ }
+
+ Chunk _c;
+ };
+ */
+ inline string Chunk::genID() const { return genID(_manager->getns(), _min); }
+
+ bool setShardVersion( DBClientBase & conn , const string& ns , ShardChunkVersion version , bool authoritative , BSONObj& result );
+
+} // namespace mongo
diff --git a/src/mongo/s/client.cpp b/src/mongo/s/client.cpp
new file mode 100644
index 00000000000..36063347d85
--- /dev/null
+++ b/src/mongo/s/client.cpp
@@ -0,0 +1,326 @@
+// s/client.cpp
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "server.h"
+
+#include "../db/commands.h"
+#include "../db/dbmessage.h"
+#include "../db/stats/counters.h"
+
+#include "../client/connpool.h"
+
+#include "client.h"
+#include "request.h"
+#include "config.h"
+#include "chunk.h"
+#include "stats.h"
+#include "cursors.h"
+#include "grid.h"
+#include "s/writeback_listener.h"
+
+namespace mongo {
+
+ /* todo: rename this file clientinfo.cpp would be more intuitive? */
+
+ ClientInfo::ClientInfo() {
+ _cur = &_a;
+ _prev = &_b;
+ _autoSplitOk = true;
+ newRequest();
+ }
+
+ ClientInfo::~ClientInfo() {
+ }
+
+ void ClientInfo::addShard( const string& shard ) {
+ _cur->insert( shard );
+ _sinceLastGetError.insert( shard );
+ }
+
+ void ClientInfo::newRequest( AbstractMessagingPort* p ) {
+
+ if ( p ) {
+ HostAndPort r = p->remote();
+ if ( ! _remote.hasPort() )
+ _remote = r;
+ else if ( _remote != r ) {
+ stringstream ss;
+ ss << "remotes don't match old [" << _remote.toString() << "] new [" << r.toString() << "]";
+ throw UserException( 13134 , ss.str() );
+ }
+ }
+
+ _lastAccess = (int) time(0);
+
+ set<string> * temp = _cur;
+ _cur = _prev;
+ _prev = temp;
+ _cur->clear();
+ }
+
+ ClientInfo * ClientInfo::get() {
+ ClientInfo * info = _tlInfo.get();
+ if ( ! info ) {
+ info = new ClientInfo();
+ _tlInfo.reset( info );
+ info->newRequest();
+ }
+ return info;
+ }
+
+ ClientBasic* ClientBasic::getCurrent() {
+ return ClientInfo::get();
+ }
+
+
+ void ClientInfo::disconnect() {
+ // should be handled by TL cleanup
+ _lastAccess = 0;
+ }
+
+ void ClientInfo::_addWriteBack( vector<WBInfo>& all , const BSONObj& gle ) {
+ BSONElement w = gle["writeback"];
+
+ if ( w.type() != jstOID )
+ return;
+
+ BSONElement cid = gle["connectionId"];
+
+ if ( cid.eoo() ) {
+ error() << "getLastError writeback can't work because of version mismatch" << endl;
+ return;
+ }
+
+ string ident = "";
+ if ( gle["instanceIdent"].type() == String )
+ ident = gle["instanceIdent"].String();
+
+ all.push_back( WBInfo( WriteBackListener::ConnectionIdent( ident , cid.numberLong() ) , w.OID() ) );
+ }
+
+ vector<BSONObj> ClientInfo::_handleWriteBacks( vector<WBInfo>& all , bool fromWriteBackListener ) {
+ vector<BSONObj> res;
+
+ if ( all.size() == 0 )
+ return res;
+
+ if ( fromWriteBackListener ) {
+ LOG(1) << "not doing recursive writeback" << endl;
+ return res;
+ }
+
+ for ( unsigned i=0; i<all.size(); i++ ) {
+ res.push_back( WriteBackListener::waitFor( all[i].ident , all[i].id ) );
+ }
+
+ return res;
+ }
+
+
+
+ bool ClientInfo::getLastError( const BSONObj& options , BSONObjBuilder& result , bool fromWriteBackListener ) {
+ set<string> * shards = getPrev();
+
+ if ( shards->size() == 0 ) {
+ result.appendNull( "err" );
+ return true;
+ }
+
+ vector<WBInfo> writebacks;
+
+ // handle single server
+ if ( shards->size() == 1 ) {
+ string theShard = *(shards->begin() );
+
+ ShardConnection conn( theShard , "" );
+
+ BSONObj res;
+ bool ok = false;
+ try{
+ ok = conn->runCommand( "admin" , options , res );
+ }
+ catch( std::exception &e ){
+
+ warning() << "could not get last error from shard " << theShard << causedBy( e ) << endl;
+
+ // Catch everything that happens here, since we need to ensure we return our connection when we're
+ // finished.
+ conn.done();
+
+ return false;
+ }
+
+ res = res.getOwned();
+ conn.done();
+
+
+ _addWriteBack( writebacks , res );
+
+ // hit other machines just to block
+ for ( set<string>::const_iterator i=sinceLastGetError().begin(); i!=sinceLastGetError().end(); ++i ) {
+ string temp = *i;
+ if ( temp == theShard )
+ continue;
+
+ ShardConnection conn( temp , "" );
+
+ try {
+ _addWriteBack( writebacks , conn->getLastErrorDetailed() );
+ }
+ catch( std::exception &e ){
+ warning() << "could not clear last error from shard " << temp << causedBy( e ) << endl;
+ }
+
+ conn.done();
+ }
+ clearSinceLastGetError();
+
+ if ( writebacks.size() ){
+ vector<BSONObj> v = _handleWriteBacks( writebacks , fromWriteBackListener );
+ if ( v.size() == 0 && fromWriteBackListener ) {
+ // ok
+ }
+ else {
+ // this will usually be 1
+ // it can be greater than 1 if a write to a different shard
+ // than the last write op had a writeback
+ // all we're going to report is the first
+ // since that's the current write
+ // but we block for all
+ assert( v.size() >= 1 );
+ result.appendElements( v[0] );
+ result.appendElementsUnique( res );
+ result.append( "writebackGLE" , v[0] );
+ result.append( "initialGLEHost" , theShard );
+ }
+ }
+ else {
+ result.append( "singleShard" , theShard );
+ result.appendElements( res );
+ }
+
+ return ok;
+ }
+
+ BSONArrayBuilder bbb( result.subarrayStart( "shards" ) );
+ BSONObjBuilder shardRawGLE;
+
+ long long n = 0;
+
+ int updatedExistingStat = 0; // 0 is none, -1 has but false, 1 has true
+
+ // hit each shard
+ vector<string> errors;
+ vector<BSONObj> errorObjects;
+ for ( set<string>::iterator i = shards->begin(); i != shards->end(); i++ ) {
+ string theShard = *i;
+ bbb.append( theShard );
+ ShardConnection conn( theShard , "" );
+ BSONObj res;
+ bool ok = false;
+ try {
+ ok = conn->runCommand( "admin" , options , res );
+ shardRawGLE.append( theShard , res );
+ }
+ catch( std::exception &e ){
+
+ // Safe to return here, since we haven't started any extra processing yet, just collecting
+ // responses.
+
+ warning() << "could not get last error from a shard " << theShard << causedBy( e ) << endl;
+ conn.done();
+
+ return false;
+ }
+
+ _addWriteBack( writebacks, res );
+
+ string temp = DBClientWithCommands::getLastErrorString( res );
+ if ( conn->type() != ConnectionString::SYNC && ( ok == false || temp.size() ) ) {
+ errors.push_back( temp );
+ errorObjects.push_back( res );
+ }
+
+ n += res["n"].numberLong();
+ if ( res["updatedExisting"].type() ) {
+ if ( res["updatedExisting"].trueValue() )
+ updatedExistingStat = 1;
+ else if ( updatedExistingStat == 0 )
+ updatedExistingStat = -1;
+ }
+
+ conn.done();
+ }
+
+ bbb.done();
+ result.append( "shardRawGLE" , shardRawGLE.obj() );
+
+ result.appendNumber( "n" , n );
+ if ( updatedExistingStat )
+ result.appendBool( "updatedExisting" , updatedExistingStat > 0 );
+
+ // hit other machines just to block
+ for ( set<string>::const_iterator i=sinceLastGetError().begin(); i!=sinceLastGetError().end(); ++i ) {
+ string temp = *i;
+ if ( shards->count( temp ) )
+ continue;
+
+ ShardConnection conn( temp , "" );
+ try {
+ _addWriteBack( writebacks, conn->getLastErrorDetailed() );
+ }
+ catch( std::exception &e ){
+ warning() << "could not clear last error from a shard " << temp << causedBy( e ) << endl;
+ }
+ conn.done();
+ }
+ clearSinceLastGetError();
+
+ if ( errors.size() == 0 ) {
+ result.appendNull( "err" );
+ _handleWriteBacks( writebacks , fromWriteBackListener );
+ return true;
+ }
+
+ result.append( "err" , errors[0].c_str() );
+
+ {
+ // errs
+ BSONArrayBuilder all( result.subarrayStart( "errs" ) );
+ for ( unsigned i=0; i<errors.size(); i++ ) {
+ all.append( errors[i].c_str() );
+ }
+ all.done();
+ }
+
+ {
+ // errObjects
+ BSONArrayBuilder all( result.subarrayStart( "errObjects" ) );
+ for ( unsigned i=0; i<errorObjects.size(); i++ ) {
+ all.append( errorObjects[i] );
+ }
+ all.done();
+ }
+ _handleWriteBacks( writebacks , fromWriteBackListener );
+ return true;
+ }
+
+ boost::thread_specific_ptr<ClientInfo> ClientInfo::_tlInfo;
+
+} // namespace mongo
diff --git a/src/mongo/s/client.h b/src/mongo/s/client.h
new file mode 100644
index 00000000000..1237f66b88a
--- /dev/null
+++ b/src/mongo/s/client.h
@@ -0,0 +1,128 @@
+// @file s/client.h
+
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#pragma once
+
+#include "../pch.h"
+#include "writeback_listener.h"
+#include "../db/security.h"
+#include "../db/client_common.h"
+
+namespace mongo {
+
+ /**
+ * holds information about a client connected to a mongos
+ * 1 per client socket
+ * currently implemented with a thread local
+ */
+ class ClientInfo : public ClientBasic {
+ public:
+ ClientInfo();
+ ~ClientInfo();
+
+ /** new request from client, adjusts internal state */
+ void newRequest( AbstractMessagingPort* p = 0 );
+
+ /** client disconnected */
+ void disconnect();
+
+ bool hasRemote() const { return true; }
+
+ /**
+ * @return remote socket address of the client
+ */
+ HostAndPort getRemote() const { return _remote; }
+
+ /**
+ * notes that this client use this shard
+ * keeps track of all shards accessed this request
+ */
+ void addShard( const string& shard );
+
+ /**
+ * gets shards used on the previous request
+ */
+ set<string> * getPrev() const { return _prev; };
+
+ /**
+ * gets all shards we've accessed since the last time we called clearSinceLastGetError
+ */
+ const set<string>& sinceLastGetError() const { return _sinceLastGetError; }
+
+ /**
+ * clears list of shards we've talked to
+ */
+ void clearSinceLastGetError() { _sinceLastGetError.clear(); }
+
+
+ /**
+ * resets the list of shards using to process the current request
+ */
+ void clearCurrentShards(){ _cur->clear(); }
+
+ /**
+ * calls getLastError
+ * resets shards since get last error
+ * @return if the command was ok or if there was an error
+ */
+ bool getLastError( const BSONObj& options , BSONObjBuilder& result , bool fromWriteBackListener = false );
+
+ /** @return if its ok to auto split from this client */
+ bool autoSplitOk() const { return _autoSplitOk; }
+
+ void noAutoSplit() { _autoSplitOk = false; }
+
+ static ClientInfo * get();
+ const AuthenticationInfo* getAuthenticationInfo() const { return (AuthenticationInfo*)&_ai; }
+ AuthenticationInfo* getAuthenticationInfo() { return (AuthenticationInfo*)&_ai; }
+ bool isAdmin() { return _ai.isAuthorized( "admin" ); }
+ private:
+ AuthenticationInfo _ai;
+ struct WBInfo {
+ WBInfo( const WriteBackListener::ConnectionIdent& c , OID o ) : ident( c ) , id( o ) {}
+ WriteBackListener::ConnectionIdent ident;
+ OID id;
+ };
+
+ // for getLastError
+ void _addWriteBack( vector<WBInfo>& all , const BSONObj& o );
+ vector<BSONObj> _handleWriteBacks( vector<WBInfo>& all , bool fromWriteBackListener );
+
+
+ int _id; // unique client id
+ HostAndPort _remote; // server:port of remote socket end
+
+ // we use _a and _b to store shards we've talked to on the current request and the previous
+ // we use 2 so we can flip for getLastError type operations
+
+ set<string> _a; // actual set for _cur or _prev
+ set<string> _b; // "
+
+ set<string> * _cur; // pointer to _a or _b depending on state
+ set<string> * _prev; // ""
+
+
+ set<string> _sinceLastGetError; // all shards accessed since last getLastError
+
+ int _lastAccess;
+ bool _autoSplitOk;
+
+ static boost::thread_specific_ptr<ClientInfo> _tlInfo;
+ };
+
+
+}
diff --git a/src/mongo/s/commands_admin.cpp b/src/mongo/s/commands_admin.cpp
new file mode 100644
index 00000000000..cbe306f47a8
--- /dev/null
+++ b/src/mongo/s/commands_admin.cpp
@@ -0,0 +1,1239 @@
+// s/commands_admin.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* TODO
+ _ concurrency control.
+ _ limit() works right?
+ _ KillCursors
+
+ later
+ _ secondary indexes
+*/
+
+#include "pch.h"
+#include "../util/net/message.h"
+#include "../util/net/listen.h"
+#include "../util/processinfo.h"
+#include "../util/stringutils.h"
+#include "../util/version.h"
+#include "../util/timer.h"
+
+#include "../client/connpool.h"
+
+#include "../db/dbmessage.h"
+#include "../db/commands.h"
+#include "../db/stats/counters.h"
+
+#include "config.h"
+#include "chunk.h"
+#include "grid.h"
+#include "strategy.h"
+#include "stats.h"
+#include "writeback_listener.h"
+#include "client.h"
+#include "../util/ramlog.h"
+
+namespace mongo {
+
+ namespace dbgrid_cmds {
+
+ class GridAdminCmd : public Command {
+ public:
+ GridAdminCmd( const char * n ) : Command( n , false, tolowerString(n).c_str() ) {
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+
+ // all grid commands are designed not to lock
+ virtual LockType locktype() const { return NONE; }
+
+ bool okForConfigChanges( string& errmsg ) {
+ string e;
+ if ( ! configServer.allUp(e) ) {
+ errmsg = str::stream() << "not all config servers are up: " << e;
+ return false;
+ }
+ return true;
+ }
+ };
+
+ // --------------- misc commands ----------------------
+
+ class NetStatCmd : public GridAdminCmd {
+ public:
+ NetStatCmd() : GridAdminCmd("netstat") { }
+ virtual void help( stringstream& help ) const {
+ help << " shows status/reachability of servers in the cluster";
+ }
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ result.append("configserver", configServer.getPrimary().getConnString() );
+ result.append("isdbgrid", 1);
+ return true;
+ }
+ } netstat;
+
+ class FlushRouterConfigCmd : public GridAdminCmd {
+ public:
+ FlushRouterConfigCmd() : GridAdminCmd("flushRouterConfig") { }
+ virtual void help( stringstream& help ) const {
+ help << "flush all router config";
+ }
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ grid.flushConfig();
+ result.appendBool( "flushed" , true );
+ return true;
+ }
+ } flushRouterConfigCmd;
+
+
+ class ServerStatusCmd : public Command {
+ public:
+ ServerStatusCmd() : Command( "serverStatus" , true ) {
+ _started = time(0);
+ }
+
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ result.append( "host" , prettyHostName() );
+ result.append("version", versionString);
+ result.append("process","mongos");
+ result.append("uptime",(double) (time(0)-_started));
+ result.appendDate( "localTime" , jsTime() );
+
+ {
+ BSONObjBuilder t( result.subobjStart( "mem" ) );
+
+ ProcessInfo p;
+ if ( p.supported() ) {
+ t.appendNumber( "resident" , p.getResidentSize() );
+ t.appendNumber( "virtual" , p.getVirtualMemorySize() );
+ t.appendBool( "supported" , true );
+ }
+ else {
+ result.append( "note" , "not all mem info support on this platform" );
+ t.appendBool( "supported" , false );
+ }
+
+ t.done();
+ }
+
+ {
+ BSONObjBuilder bb( result.subobjStart( "connections" ) );
+ bb.append( "current" , connTicketHolder.used() );
+ bb.append( "available" , connTicketHolder.available() );
+ bb.done();
+ }
+
+ {
+ BSONObjBuilder bb( result.subobjStart( "extra_info" ) );
+ bb.append("note", "fields vary by platform");
+ ProcessInfo p;
+ p.getExtraInfo(bb);
+ bb.done();
+ }
+
+ result.append( "opcounters" , globalOpCounters.getObj() );
+ {
+ BSONObjBuilder bb( result.subobjStart( "ops" ) );
+ bb.append( "sharded" , opsSharded.getObj() );
+ bb.append( "notSharded" , opsNonSharded.getObj() );
+ bb.done();
+ }
+
+ result.append( "shardCursorType" , shardedCursorTypes.getObj() );
+
+ {
+ BSONObjBuilder asserts( result.subobjStart( "asserts" ) );
+ asserts.append( "regular" , assertionCount.regular );
+ asserts.append( "warning" , assertionCount.warning );
+ asserts.append( "msg" , assertionCount.msg );
+ asserts.append( "user" , assertionCount.user );
+ asserts.append( "rollovers" , assertionCount.rollovers );
+ asserts.done();
+ }
+
+ {
+ BSONObjBuilder bb( result.subobjStart( "network" ) );
+ networkCounter.append( bb );
+ bb.done();
+ }
+
+ {
+ RamLog* rl = RamLog::get( "warnings" );
+ verify(15879, rl);
+
+ if (rl->lastWrite() >= time(0)-(10*60)){ // only show warnings from last 10 minutes
+ vector<const char*> lines;
+ rl->get( lines );
+
+ BSONArrayBuilder arr( result.subarrayStart( "warnings" ) );
+ for ( unsigned i=std::max(0,(int)lines.size()-10); i<lines.size(); i++ )
+ arr.append( lines[i] );
+ arr.done();
+ }
+ }
+
+ return 1;
+ }
+
+ time_t _started;
+ } cmdServerStatus;
+
+ class FsyncCommand : public GridAdminCmd {
+ public:
+ FsyncCommand() : GridAdminCmd( "fsync" ) {}
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ if ( cmdObj["lock"].trueValue() ) {
+ errmsg = "can't do lock through mongos";
+ return false;
+ }
+
+ BSONObjBuilder sub;
+
+ bool ok = true;
+ int numFiles = 0;
+
+ vector<Shard> shards;
+ Shard::getAllShards( shards );
+ for ( vector<Shard>::iterator i=shards.begin(); i!=shards.end(); i++ ) {
+ Shard s = *i;
+
+ BSONObj x = s.runCommand( "admin" , "fsync" );
+ sub.append( s.getName() , x );
+
+ if ( ! x["ok"].trueValue() ) {
+ ok = false;
+ errmsg = x["errmsg"].String();
+ }
+
+ numFiles += x["numFiles"].numberInt();
+ }
+
+ result.append( "numFiles" , numFiles );
+ result.append( "all" , sub.obj() );
+ return ok;
+ }
+ } fsyncCmd;
+
+ // ------------ database level commands -------------
+
+ class MoveDatabasePrimaryCommand : public GridAdminCmd {
+ public:
+ MoveDatabasePrimaryCommand() : GridAdminCmd("movePrimary") { }
+ virtual void help( stringstream& help ) const {
+ help << " example: { moveprimary : 'foo' , to : 'localhost:9999' }";
+ }
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string dbname = cmdObj.firstElement().valuestrsafe();
+
+ if ( dbname.size() == 0 ) {
+ errmsg = "no db";
+ return false;
+ }
+
+ if ( dbname == "config" ) {
+ errmsg = "can't move config db";
+ return false;
+ }
+
+ DBConfigPtr config = grid.getDBConfig( dbname , false );
+ if ( ! config ) {
+ errmsg = "can't find db!";
+ return false;
+ }
+
+ string to = cmdObj["to"].valuestrsafe();
+ if ( ! to.size() ) {
+ errmsg = "you have to specify where you want to move it";
+ return false;
+ }
+ Shard s = Shard::make( to );
+
+ if ( config->getPrimary() == s.getConnString() ) {
+ errmsg = "it is already the primary";
+ return false;
+ }
+
+ if ( ! grid.knowAboutShard( s.getConnString() ) ) {
+ errmsg = "that server isn't known to me";
+ return false;
+ }
+
+ log() << "Moving " << dbname << " primary from: " << config->getPrimary().toString()
+ << " to: " << s.toString() << endl;
+
+ // Locking enabled now...
+ DistributedLock lockSetup( configServer.getConnectionString(), dbname + "-movePrimary" );
+ dist_lock_try dlk;
+
+ // Distributed locking added.
+ try{
+ dlk = dist_lock_try( &lockSetup , string("Moving primary shard of ") + dbname );
+ }
+ catch( LockException& e ){
+ errmsg = str::stream() << "error locking distributed lock to move primary shard of " << dbname << causedBy( e );
+ warning() << errmsg << endl;
+ return false;
+ }
+
+ if ( ! dlk.got() ) {
+ errmsg = (string)"metadata lock is already taken for moving " + dbname;
+ return false;
+ }
+
+ ScopedDbConnection toconn( s.getConnString() );
+
+ // TODO ERH - we need a clone command which replays operations from clone start to now
+ // can just use local.oplog.$main
+ BSONObj cloneRes;
+ bool worked = toconn->runCommand( dbname.c_str() , BSON( "clone" << config->getPrimary().getConnString() ) , cloneRes );
+ toconn.done();
+
+ if ( ! worked ) {
+ log() << "clone failed" << cloneRes << endl;
+ errmsg = "clone failed";
+ return false;
+ }
+
+ ScopedDbConnection fromconn( config->getPrimary() );
+
+ config->setPrimary( s.getConnString() );
+
+ log() << "movePrimary: dropping " << dbname << " from old" << endl;
+
+ fromconn->dropDatabase( dbname.c_str() );
+ fromconn.done();
+
+ result << "primary " << s.toString();
+
+ return true;
+ }
+ } movePrimary;
+
+ class EnableShardingCmd : public GridAdminCmd {
+ public:
+ EnableShardingCmd() : GridAdminCmd( "enableSharding" ) {}
+ virtual void help( stringstream& help ) const {
+ help
+ << "Enable sharding for a db. (Use 'shardcollection' command afterwards.)\n"
+ << " { enablesharding : \"<dbname>\" }\n";
+ }
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string dbname = cmdObj.firstElement().valuestrsafe();
+ if ( dbname.size() == 0 ) {
+ errmsg = "no db";
+ return false;
+ }
+
+ if ( dbname == "admin" ) {
+ errmsg = "can't shard the admin db";
+ return false;
+ }
+ if ( dbname == "local" ) {
+ errmsg = "can't shard the local db";
+ return false;
+ }
+
+ DBConfigPtr config = grid.getDBConfig( dbname );
+ if ( config->isShardingEnabled() ) {
+ errmsg = "already enabled";
+ return false;
+ }
+
+ if ( ! okForConfigChanges( errmsg ) )
+ return false;
+
+ log() << "enabling sharding on: " << dbname << endl;
+
+ config->enableSharding();
+
+ return true;
+ }
+ } enableShardingCmd;
+
+ // ------------ collection level commands -------------
+
+ class ShardCollectionCmd : public GridAdminCmd {
+ public:
+ ShardCollectionCmd() : GridAdminCmd( "shardCollection" ) {}
+
+ virtual void help( stringstream& help ) const {
+ help
+ << "Shard a collection. Requires key. Optional unique. Sharding must already be enabled for the database.\n"
+ << " { enablesharding : \"<dbname>\" }\n";
+ }
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string ns = cmdObj.firstElement().valuestrsafe();
+ if ( ns.size() == 0 ) {
+ errmsg = "no ns";
+ return false;
+ }
+
+ DBConfigPtr config = grid.getDBConfig( ns );
+ if ( ! config->isShardingEnabled() ) {
+ errmsg = "sharding not enabled for db";
+ return false;
+ }
+
+ if ( config->isSharded( ns ) ) {
+ errmsg = "already sharded";
+ return false;
+ }
+
+ BSONObj key = cmdObj.getObjectField( "key" );
+ if ( key.isEmpty() ) {
+ errmsg = "no shard key";
+ return false;
+ }
+
+ BSONForEach(e, key) {
+ if (!e.isNumber() || e.number() != 1.0) {
+ errmsg = "shard keys must all be ascending";
+ return false;
+ }
+ }
+
+ if ( ns.find( ".system." ) != string::npos ) {
+ errmsg = "can't shard system namespaces";
+ return false;
+ }
+
+ if ( ! okForConfigChanges( errmsg ) )
+ return false;
+
+ // Sharding interacts with indexing in at least three ways:
+ //
+ // 1. A unique index must have the sharding key as its prefix. Otherwise maintaining uniqueness would
+ // require coordinated access to all shards. Trying to shard a collection with such an index is not
+ // allowed.
+ //
+ // 2. Sharding a collection requires an index over the sharding key. That index must be create upfront.
+ // The rationale is that sharding a non-empty collection would need to create the index and that could
+ // be slow. Requiring the index upfront allows the admin to plan before sharding and perhaps use
+ // background index construction. One exception to the rule: empty collections. It's fairly easy to
+ // create the index as part of the sharding process.
+ //
+ // 3. If unique : true is specified, we require that the sharding index be unique or created as unique.
+ //
+ // We enforce both these conditions in what comes next.
+
+ bool careAboutUnique = cmdObj["unique"].trueValue();
+
+ {
+ ShardKeyPattern proposedKey( key );
+ bool hasShardIndex = false;
+ bool hasUniqueShardIndex = false;
+
+ ScopedDbConnection conn( config->getPrimary() );
+ BSONObjBuilder b;
+ b.append( "ns" , ns );
+
+ BSONArrayBuilder allIndexes;
+
+ auto_ptr<DBClientCursor> cursor = conn->query( config->getName() + ".system.indexes" , b.obj() );
+ while ( cursor->more() ) {
+ BSONObj idx = cursor->next();
+
+ allIndexes.append( idx );
+
+ bool idIndex = ! idx["name"].eoo() && idx["name"].String() == "_id_";
+ bool uniqueIndex = ( ! idx["unique"].eoo() && idx["unique"].trueValue() ) ||
+ idIndex;
+
+ // Is index key over the sharding key? Remember that.
+ if ( key.woCompare( idx["key"].embeddedObjectUserCheck() ) == 0 ) {
+
+ if( idx["sparse"].trueValue() ){
+ errmsg = (string)"can't shard collection " + ns + " with sparse shard key index";
+ conn.done();
+ return false;
+ }
+
+ hasShardIndex = true;
+ hasUniqueShardIndex = uniqueIndex;
+ continue;
+ }
+
+ // Not a unique index? Move on.
+ if ( ! uniqueIndex || idIndex )
+ continue;
+
+ // Shard key is prefix of unique index? Move on.
+ if ( proposedKey.isPrefixOf( idx["key"].embeddedObjectUserCheck() ) )
+ continue;
+
+ errmsg = str::stream() << "can't shard collection '" << ns << "' with unique index on: " + idx.toString()
+ << ", uniqueness can't be maintained across unless shard key index is a prefix";
+ conn.done();
+ return false;
+ }
+
+ if( careAboutUnique && hasShardIndex && ! hasUniqueShardIndex ){
+ errmsg = (string)"can't shard collection " + ns + ", shard key index not unique and unique index explicitly specified";
+ conn.done();
+ return false;
+ }
+
+ BSONObj res = conn->findOne( config->getName() + ".system.namespaces" , BSON( "name" << ns ) );
+ if ( res["options"].type() == Object && res["options"].embeddedObject()["capped"].trueValue() ) {
+ errmsg = "can't shard capped collection";
+ conn.done();
+ return false;
+ }
+
+ if ( hasShardIndex ) {
+ // make sure there are no null entries in the sharding index
+ BSONObjBuilder cmd;
+ cmd.append( "checkShardingIndex" , ns );
+ cmd.append( "keyPattern" , key );
+ BSONObj cmdObj = cmd.obj();
+ if ( ! conn->runCommand( "admin" , cmdObj , res )) {
+ errmsg = res["errmsg"].str();
+ conn.done();
+ return false;
+ }
+ }
+
+ if ( ! hasShardIndex && ( conn->count( ns ) != 0 ) ) {
+ errmsg = "please create an index over the sharding key before sharding.";
+ result.append( "proposedKey" , key );
+ result.appendArray( "curIndexes" , allIndexes.done() );
+ conn.done();
+ return false;
+ }
+
+ conn.done();
+ }
+
+ tlog() << "CMD: shardcollection: " << cmdObj << endl;
+
+// vector<BSONObj> pts;
+// if (cmdObj.hasField("splitPoints")) {
+// if ( cmdObj.getField("splitPoints").type() != Array ) {
+// errmsg = "Value of splitPoints must be an array of objects";
+// return false;
+// }
+//
+// vector<BSONElement> elmts = cmdObj.getField("splitPoints").Array();
+// for ( unsigned i = 0 ; i < elmts.size() ; ++i) {
+// if ( elmts[i].type() != Object ) {
+// errmsg = "Elements in the splitPoints array must be objects";
+// return false;
+// }
+// pts.push_back( elmts[i].Obj() );
+// }
+// }
+ config->shardCollection( ns , key , careAboutUnique );
+
+ result << "collectionsharded" << ns;
+ return true;
+ }
+ } shardCollectionCmd;
+
+ class GetShardVersion : public GridAdminCmd {
+ public:
+ GetShardVersion() : GridAdminCmd( "getShardVersion" ) {}
+ virtual void help( stringstream& help ) const {
+ help << " example: { getShardVersion : 'alleyinsider.foo' } ";
+ }
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string ns = cmdObj.firstElement().valuestrsafe();
+ if ( ns.size() == 0 ) {
+ errmsg = "need to specify fully namespace";
+ return false;
+ }
+
+ DBConfigPtr config = grid.getDBConfig( ns );
+ if ( ! config->isSharded( ns ) ) {
+ errmsg = "ns not sharded.";
+ return false;
+ }
+
+ ChunkManagerPtr cm = config->getChunkManagerIfExists( ns );
+ if ( ! cm ) {
+ errmsg = "no chunk manager?";
+ return false;
+ }
+ cm->_printChunks();
+ result.appendTimestamp( "version" , cm->getVersion().toLong() );
+
+ return 1;
+ }
+ } getShardVersionCmd;
+
+ class SplitCollectionCmd : public GridAdminCmd {
+ public:
+ SplitCollectionCmd() : GridAdminCmd( "split" ) {}
+ virtual void help( stringstream& help ) const {
+ help
+ << " example: - split the shard that contains give key \n"
+ << " { split : 'alleyinsider.blog.posts' , find : { ts : 1 } }\n"
+ << " example: - split the shard that contains the key with this as the middle \n"
+ << " { split : 'alleyinsider.blog.posts' , middle : { ts : 1 } }\n"
+ << " NOTE: this does not move move the chunks, it merely creates a logical separation \n"
+ ;
+ }
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+
+ if ( ! okForConfigChanges( errmsg ) )
+ return false;
+
+ ShardConnection::sync();
+
+ string ns = cmdObj.firstElement().valuestrsafe();
+ if ( ns.size() == 0 ) {
+ errmsg = "no ns";
+ return false;
+ }
+
+ DBConfigPtr config = grid.getDBConfig( ns );
+ if ( ! config->isSharded( ns ) ) {
+ config->reload();
+ if ( ! config->isSharded( ns ) ) {
+ errmsg = "ns not sharded. have to shard before can split";
+ return false;
+ }
+ }
+
+ BSONObj find = cmdObj.getObjectField( "find" );
+ if ( find.isEmpty() ) {
+ find = cmdObj.getObjectField( "middle" );
+
+ if ( find.isEmpty() ) {
+ errmsg = "need to specify find or middle";
+ return false;
+ }
+ }
+
+ ChunkManagerPtr info = config->getChunkManager( ns );
+ ChunkPtr chunk = info->findChunk( find );
+ BSONObj middle = cmdObj.getObjectField( "middle" );
+
+ assert( chunk.get() );
+ log() << "splitting: " << ns << " shard: " << chunk << endl;
+
+ BSONObj res;
+ bool worked;
+ if ( middle.isEmpty() ) {
+ BSONObj ret = chunk->singleSplit( true /* force a split even if not enough data */ , res );
+ worked = !ret.isEmpty();
+ }
+ else {
+ // sanity check if the key provided is a valid split point
+ if ( ( middle == chunk->getMin() ) || ( middle == chunk->getMax() ) ) {
+ errmsg = "cannot split on initial or final chunk's key";
+ return false;
+ }
+
+ if (!fieldsMatch(middle, info->getShardKey().key())){
+ errmsg = "middle has different fields (or different order) than shard key";
+ return false;
+ }
+
+ vector<BSONObj> splitPoints;
+ splitPoints.push_back( middle );
+ worked = chunk->multiSplit( splitPoints , res );
+ }
+
+ if ( !worked ) {
+ errmsg = "split failed";
+ result.append( "cause" , res );
+ return false;
+ }
+ config->getChunkManager( ns , true );
+ return true;
+ }
+ } splitCollectionCmd;
+
+ class MoveChunkCmd : public GridAdminCmd {
+ public:
+ MoveChunkCmd() : GridAdminCmd( "moveChunk" ) {}
+ virtual void help( stringstream& help ) const {
+ help << "{ movechunk : 'test.foo' , find : { num : 1 } , to : 'localhost:30001' }";
+ }
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+
+ if ( ! okForConfigChanges( errmsg ) )
+ return false;
+
+ ShardConnection::sync();
+
+ Timer t;
+ string ns = cmdObj.firstElement().valuestrsafe();
+ if ( ns.size() == 0 ) {
+ errmsg = "no ns";
+ return false;
+ }
+
+ DBConfigPtr config = grid.getDBConfig( ns );
+ if ( ! config->isSharded( ns ) ) {
+ config->reload();
+ if ( ! config->isSharded( ns ) ) {
+ errmsg = "ns not sharded. have to shard before we can move a chunk";
+ return false;
+ }
+ }
+
+ BSONObj find = cmdObj.getObjectField( "find" );
+ if ( find.isEmpty() ) {
+ errmsg = "need to specify find. see help";
+ return false;
+ }
+
+ string toString = cmdObj["to"].valuestrsafe();
+ if ( ! toString.size() ) {
+ errmsg = "you have to specify where you want to move the chunk";
+ return false;
+ }
+
+ Shard to = Shard::make( toString );
+
+ // so far, chunk size serves test purposes; it may or may not become a supported parameter
+ long long maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong();
+ if ( maxChunkSizeBytes == 0 ) {
+ maxChunkSizeBytes = Chunk::MaxChunkSize;
+ }
+
+ tlog() << "CMD: movechunk: " << cmdObj << endl;
+
+ ChunkManagerPtr info = config->getChunkManager( ns );
+ ChunkPtr c = info->findChunk( find );
+ const Shard& from = c->getShard();
+
+ if ( from == to ) {
+ errmsg = "that chunk is already on that shard";
+ return false;
+ }
+
+ BSONObj res;
+ if ( ! c->moveAndCommit( to , maxChunkSizeBytes , res ) ) {
+ errmsg = "move failed";
+ result.append( "cause" , res );
+ return false;
+ }
+
+ // preemptively reload the config to get new version info
+ config->getChunkManager( ns , true );
+
+ result.append( "millis" , t.millis() );
+ return true;
+ }
+ } moveChunkCmd;
+
+ // ------------ server level commands -------------
+
+ class ListShardsCmd : public GridAdminCmd {
+ public:
+ ListShardsCmd() : GridAdminCmd("listShards") { }
+ virtual void help( stringstream& help ) const {
+ help << "list all shards of the system";
+ }
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ ScopedDbConnection conn( configServer.getPrimary() );
+
+ vector<BSONObj> all;
+ auto_ptr<DBClientCursor> cursor = conn->query( "config.shards" , BSONObj() );
+ while ( cursor->more() ) {
+ BSONObj o = cursor->next();
+ all.push_back( o );
+ }
+
+ result.append("shards" , all );
+ conn.done();
+
+ return true;
+ }
+ } listShardsCmd;
+
+ /* a shard is a single mongod server or a replica pair. add it (them) to the cluster as a storage partition. */
+ class AddShard : public GridAdminCmd {
+ public:
+ AddShard() : GridAdminCmd("addShard") { }
+ virtual void help( stringstream& help ) const {
+ help << "add a new shard to the system";
+ }
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ errmsg.clear();
+
+ // get replica set component hosts
+ ConnectionString servers = ConnectionString::parse( cmdObj.firstElement().valuestrsafe() , errmsg );
+ if ( ! errmsg.empty() ) {
+ log() << "addshard request " << cmdObj << " failed:" << errmsg << endl;
+ return false;
+ }
+
+ // using localhost in server names implies every other process must use localhost addresses too
+ vector<HostAndPort> serverAddrs = servers.getServers();
+ for ( size_t i = 0 ; i < serverAddrs.size() ; i++ ) {
+ if ( serverAddrs[i].isLocalHost() != grid.allowLocalHost() ) {
+ errmsg = str::stream() <<
+ "can't use localhost as a shard since all shards need to communicate. " <<
+ "either use all shards and configdbs in localhost or all in actual IPs " <<
+ " host: " << serverAddrs[i].toString() << " isLocalHost:" << serverAddrs[i].isLocalHost();
+
+ log() << "addshard request " << cmdObj << " failed: attempt to mix localhosts and IPs" << endl;
+ return false;
+ }
+
+ // it's fine if mongods of a set all use default port
+ if ( ! serverAddrs[i].hasPort() ) {
+ serverAddrs[i].setPort( CmdLine::ShardServerPort );
+ }
+ }
+
+ // name is optional; addShard will provide one if needed
+ string name = "";
+ if ( cmdObj["name"].type() == String ) {
+ name = cmdObj["name"].valuestrsafe();
+ }
+
+ // maxSize is the space usage cap in a shard in MBs
+ long long maxSize = 0;
+ if ( cmdObj[ ShardFields::maxSize.name() ].isNumber() ) {
+ maxSize = cmdObj[ ShardFields::maxSize.name() ].numberLong();
+ }
+
+ if ( ! grid.addShard( &name , servers , maxSize , errmsg ) ) {
+ log() << "addshard request " << cmdObj << " failed: " << errmsg << endl;
+ return false;
+ }
+
+ result << "shardAdded" << name;
+ return true;
+ }
+
+ } addServer;
+
+ /* See usage docs at:
+ * http://www.mongodb.org/display/DOCS/Configuring+Sharding#ConfiguringSharding-Removingashard
+ */
+ class RemoveShardCmd : public GridAdminCmd {
+ public:
+ RemoveShardCmd() : GridAdminCmd("removeShard") { }
+ virtual void help( stringstream& help ) const {
+ help << "remove a shard to the system.";
+ }
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string target = cmdObj.firstElement().valuestrsafe();
+ Shard s = Shard::make( target );
+ if ( ! grid.knowAboutShard( s.getConnString() ) ) {
+ errmsg = "unknown shard";
+ return false;
+ }
+
+ ScopedDbConnection conn( configServer.getPrimary() );
+
+ if (conn->count("config.shards", BSON("_id" << NE << s.getName() << ShardFields::draining(true)))){
+ conn.done();
+ errmsg = "Can't have more than one draining shard at a time";
+ return false;
+ }
+
+ if (conn->count("config.shards", BSON("_id" << NE << s.getName())) == 0){
+ conn.done();
+ errmsg = "Can't remove last shard";
+ return false;
+ }
+
+ BSONObj primaryDoc = BSON( "_id" << NE << "local" << "primary" << s.getName() );
+ BSONObj dbInfo; // appended at end of result on success
+ {
+ boost::scoped_ptr<DBClientCursor> cursor (conn->query("config.databases", primaryDoc));
+ if (cursor->more()) { // skip block and allocations if empty
+ BSONObjBuilder dbInfoBuilder;
+ dbInfoBuilder.append("note", "you need to drop or movePrimary these databases");
+ BSONArrayBuilder dbs(dbInfoBuilder.subarrayStart("dbsToMove"));
+
+ while (cursor->more()){
+ BSONObj db = cursor->nextSafe();
+ dbs.append(db["_id"]);
+ }
+ dbs.doneFast();
+
+ dbInfo = dbInfoBuilder.obj();
+ }
+ }
+
+ // If the server is not yet draining chunks, put it in draining mode.
+ BSONObj searchDoc = BSON( "_id" << s.getName() );
+ BSONObj drainingDoc = BSON( "_id" << s.getName() << ShardFields::draining(true) );
+ BSONObj shardDoc = conn->findOne( "config.shards", drainingDoc );
+ if ( shardDoc.isEmpty() ) {
+
+ // TODO prevent move chunks to this shard.
+
+ log() << "going to start draining shard: " << s.getName() << endl;
+ BSONObj newStatus = BSON( "$set" << BSON( ShardFields::draining(true) ) );
+ conn->update( "config.shards" , searchDoc , newStatus, false /* do no upsert */);
+
+ errmsg = conn->getLastError();
+ if ( errmsg.size() ) {
+ log() << "error starting remove shard: " << s.getName() << " err: " << errmsg << endl;
+ return false;
+ }
+
+ BSONObj primaryLocalDoc = BSON("_id" << "local" << "primary" << s.getName() );
+ PRINT(primaryLocalDoc);
+ if (conn->count("config.databases", primaryLocalDoc)) {
+ log() << "This shard is listed as primary of local db. Removing entry." << endl;
+ conn->remove("config.databases", BSON("_id" << "local"));
+ errmsg = conn->getLastError();
+ if ( errmsg.size() ) {
+ log() << "error removing local db: " << errmsg << endl;
+ return false;
+ }
+ }
+
+ Shard::reloadShardInfo();
+
+ result.append( "msg" , "draining started successfully" );
+ result.append( "state" , "started" );
+ result.append( "shard" , s.getName() );
+ result.appendElements(dbInfo);
+ conn.done();
+ return true;
+ }
+
+ // If the server has been completely drained, remove it from the ConfigDB.
+ // Check not only for chunks but also databases.
+ BSONObj shardIDDoc = BSON( "shard" << shardDoc[ "_id" ].str() );
+ long long chunkCount = conn->count( "config.chunks" , shardIDDoc );
+ long long dbCount = conn->count( "config.databases" , primaryDoc );
+ if ( ( chunkCount == 0 ) && ( dbCount == 0 ) ) {
+ log() << "going to remove shard: " << s.getName() << endl;
+ conn->remove( "config.shards" , searchDoc );
+
+ errmsg = conn->getLastError();
+ if ( errmsg.size() ) {
+ log() << "error concluding remove shard: " << s.getName() << " err: " << errmsg << endl;
+ return false;
+ }
+
+ Shard::removeShard( shardDoc[ "_id" ].str() );
+ Shard::reloadShardInfo();
+
+ result.append( "msg" , "removeshard completed successfully" );
+ result.append( "state" , "completed" );
+ result.append( "shard" , s.getName() );
+ conn.done();
+ return true;
+ }
+
+ // If the server is already in draining mode, just report on its progress.
+ // Report on databases (not just chunks) that are left too.
+ result.append( "msg" , "draining ongoing" );
+ result.append( "state" , "ongoing" );
+ BSONObjBuilder inner;
+ inner.append( "chunks" , chunkCount );
+ inner.append( "dbs" , dbCount );
+ result.append( "remaining" , inner.obj() );
+ result.appendElements(dbInfo);
+
+ conn.done();
+ return true;
+ }
+ } removeShardCmd;
+
+
+ // --------------- public commands ----------------
+
+ class IsDbGridCmd : public Command {
+ public:
+ virtual LockType locktype() const { return NONE; }
+ virtual bool requiresAuth() { return false; }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ IsDbGridCmd() : Command("isdbgrid") { }
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ result.append("isdbgrid", 1);
+ result.append("hostname", getHostNameCached());
+ return true;
+ }
+ } isdbgrid;
+
+ class CmdIsMaster : public Command {
+ public:
+ virtual LockType locktype() const { return NONE; }
+ virtual bool requiresAuth() { return false; }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void help( stringstream& help ) const {
+ help << "test if this is master half of a replica pair";
+ }
+ CmdIsMaster() : Command("isMaster" , false , "ismaster") { }
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ result.appendBool("ismaster", true );
+ result.append("msg", "isdbgrid");
+ result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize);
+ return true;
+ }
+ } ismaster;
+
+ class CmdWhatsMyUri : public Command {
+ public:
+ CmdWhatsMyUri() : Command("whatsmyuri") { }
+ virtual bool logTheOp() {
+ return false; // the modification will be logged directly
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream &help ) const {
+ help << "{whatsmyuri:1}";
+ }
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ result << "you" << ClientInfo::get()->getRemote();
+ return true;
+ }
+ } cmdWhatsMyUri;
+
+
+ class CmdShardingGetPrevError : public Command {
+ public:
+ virtual LockType locktype() const { return NONE; }
+ virtual bool requiresAuth() { return false; }
+
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void help( stringstream& help ) const {
+ help << "get previous error (since last reseterror command)";
+ }
+ CmdShardingGetPrevError() : Command( "getPrevError" , false , "getpreverror") { }
+ virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ errmsg += "getpreverror not supported for sharded environments";
+ return false;
+ }
+ } cmdGetPrevError;
+
+ class CmdShardingGetLastError : public Command {
+ public:
+ virtual LockType locktype() const { return NONE; }
+ virtual bool requiresAuth() { return false; }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual void help( stringstream& help ) const {
+ help << "check for an error on the last command executed";
+ }
+ CmdShardingGetLastError() : Command("getLastError" , false , "getlasterror") { }
+
+ virtual bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ LastError *le = lastError.disableForCommand();
+ {
+ assert( le );
+ if ( le->msg.size() && le->nPrev == 1 ) {
+ le->appendSelf( result );
+ return true;
+ }
+ }
+
+ ClientInfo * client = ClientInfo::get();
+ return client->getLastError( cmdObj , result );
+ }
+ } cmdGetLastError;
+
+ }
+
+ class CmdShardingResetError : public Command {
+ public:
+ CmdShardingResetError() : Command( "resetError" , false , "reseterror" ) {}
+
+ virtual LockType locktype() const { return NONE; }
+ virtual bool slaveOk() const {
+ return true;
+ }
+
+ bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ LastError *le = lastError.get();
+ if ( le )
+ le->reset();
+
+ ClientInfo * client = ClientInfo::get();
+ set<string> * shards = client->getPrev();
+
+ for ( set<string>::iterator i = shards->begin(); i != shards->end(); i++ ) {
+ string theShard = *i;
+ ShardConnection conn( theShard , "" );
+ BSONObj res;
+ conn->runCommand( dbName , cmdObj , res );
+ conn.done();
+ }
+
+ return true;
+ }
+ } cmdShardingResetError;
+
+ class CmdListDatabases : public Command {
+ public:
+ CmdListDatabases() : Command("listDatabases", true , "listdatabases" ) {}
+
+ virtual bool logTheOp() { return false; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool slaveOverrideOk() { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream& help ) const { help << "list databases on cluster"; }
+
+ bool run(const string& , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ vector<Shard> shards;
+ Shard::getAllShards( shards );
+
+ map<string,long long> sizes;
+ map< string,shared_ptr<BSONObjBuilder> > dbShardInfo;
+
+ for ( vector<Shard>::iterator i=shards.begin(); i!=shards.end(); i++ ) {
+ Shard s = *i;
+ BSONObj x = s.runCommand( "admin" , "listDatabases" );
+
+ BSONObjIterator j( x["databases"].Obj() );
+ while ( j.more() ) {
+ BSONObj theDB = j.next().Obj();
+
+ string name = theDB["name"].String();
+ long long size = theDB["sizeOnDisk"].numberLong();
+
+ long long& totalSize = sizes[name];
+ if ( size == 1 ) {
+ if ( totalSize <= 1 )
+ totalSize = 1;
+ }
+ else
+ totalSize += size;
+
+ shared_ptr<BSONObjBuilder>& bb = dbShardInfo[name];
+ if ( ! bb.get() )
+ bb.reset( new BSONObjBuilder() );
+ bb->appendNumber( s.getName() , size );
+ }
+
+ }
+
+ long long totalSize = 0;
+
+ BSONArrayBuilder bb( result.subarrayStart( "databases" ) );
+ for ( map<string,long long>::iterator i=sizes.begin(); i!=sizes.end(); ++i ) {
+ string name = i->first;
+
+ if ( name == "local" ) {
+ // we don't return local
+ // since all shards have their own independent local
+ continue;
+ }
+
+ long long size = i->second;
+ totalSize += size;
+
+ BSONObjBuilder temp;
+ temp.append( "name" , name );
+ temp.appendNumber( "sizeOnDisk" , size );
+ temp.appendBool( "empty" , size == 1 );
+ temp.append( "shards" , dbShardInfo[name]->obj() );
+
+ bb.append( temp.obj() );
+ }
+
+ if ( sizes.find( "config" ) == sizes.end() ){
+ ScopedDbConnection conn( configServer.getPrimary() );
+ BSONObj x;
+ if ( conn->simpleCommand( "config" , &x , "dbstats" ) ){
+ BSONObjBuilder b;
+ b.append( "name" , "config" );
+ b.appendBool( "empty" , false );
+ if ( x["fileSize"].type() )
+ b.appendAs( x["fileSize"] , "sizeOnDisk" );
+ else
+ b.append( "sizeOnDisk" , 1 );
+ bb.append( b.obj() );
+ }
+ else {
+ bb.append( BSON( "name" << "config" ) );
+ }
+ conn.done();
+ }
+
+ bb.done();
+
+ result.appendNumber( "totalSize" , totalSize );
+ result.appendNumber( "totalSizeMb" , totalSize / ( 1024 * 1024 ) );
+
+ return 1;
+ }
+
+ } cmdListDatabases;
+
+ class CmdCloseAllDatabases : public Command {
+ public:
+ CmdCloseAllDatabases() : Command("closeAllDatabases", false , "closeAllDatabases" ) {}
+ virtual bool logTheOp() { return false; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool slaveOverrideOk() { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream& help ) const { help << "Not supported sharded"; }
+
+ bool run(const string& , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& /*result*/, bool /*fromRepl*/) {
+ errmsg = "closeAllDatabases isn't supported through mongos";
+ return false;
+ }
+ } cmdCloseAllDatabases;
+
+
+ class CmdReplSetGetStatus : public Command {
+ public:
+ CmdReplSetGetStatus() : Command("replSetGetStatus"){}
+ virtual bool logTheOp() { return false; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+ virtual void help( stringstream& help ) const { help << "Not supported through mongos"; }
+
+ bool run(const string& , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) {
+ if ( jsobj["forShell"].trueValue() )
+ lastError.disableForCommand();
+
+ errmsg = "replSetGetStatus is not supported through mongos";
+ result.append("info", "mongos"); // see sayReplSetMemberState
+ return false;
+ }
+ } cmdReplSetGetStatus;
+
+ CmdShutdown cmdShutdown;
+
+ void CmdShutdown::help( stringstream& help ) const {
+ help << "shutdown the database. must be ran against admin db and "
+ << "either (1) ran from localhost or (2) authenticated.";
+ }
+
+ bool CmdShutdown::run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ return shutdownHelper();
+ }
+
+} // namespace mongo
diff --git a/src/mongo/s/commands_public.cpp b/src/mongo/s/commands_public.cpp
new file mode 100644
index 00000000000..375c4f6feec
--- /dev/null
+++ b/src/mongo/s/commands_public.cpp
@@ -0,0 +1,1565 @@
+// s/commands_public.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../util/net/message.h"
+#include "../db/dbmessage.h"
+#include "../client/connpool.h"
+#include "../client/parallel.h"
+#include "../db/commands.h"
+#include "../db/commands/pipeline.h"
+#include "../db/pipeline/document_source.h"
+#include "../db/pipeline/expression_context.h"
+#include "../db/queryutil.h"
+#include "../scripting/engine.h"
+#include "../util/timer.h"
+
+
+#include "config.h"
+#include "chunk.h"
+#include "strategy.h"
+#include "grid.h"
+#include "mr_shard.h"
+#include "client.h"
+
+namespace mongo {
+
+ bool setParmsMongodSpecific(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl )
+ {
+ return true;
+ }
+
+ namespace dbgrid_pub_cmds {
+
+ class PublicGridCommand : public Command {
+ public:
+ PublicGridCommand( const char* n, const char* oldname=NULL ) : Command( n, false, oldname ) {
+ }
+ virtual bool slaveOk() const {
+ return true;
+ }
+ virtual bool adminOnly() const {
+ return false;
+ }
+
+ // Override if passthrough should also send query options
+ // Safer as off by default, can slowly enable as we add more tests
+ virtual bool passOptions() const { return false; }
+
+ // all grid commands are designed not to lock
+ virtual LockType locktype() const { return NONE; }
+
+ protected:
+
+ bool passthrough( DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ) {
+ return _passthrough(conf->getName(), conf, cmdObj, 0, result);
+ }
+ bool adminPassthrough( DBConfigPtr conf, const BSONObj& cmdObj , BSONObjBuilder& result ) {
+ return _passthrough("admin", conf, cmdObj, 0, result);
+ }
+
+ bool passthrough( DBConfigPtr conf, const BSONObj& cmdObj , int options, BSONObjBuilder& result ) {
+ return _passthrough(conf->getName(), conf, cmdObj, options, result);
+ }
+ bool adminPassthrough( DBConfigPtr conf, const BSONObj& cmdObj , int options, BSONObjBuilder& result ) {
+ return _passthrough("admin", conf, cmdObj, options, result);
+ }
+
+ private:
+ bool _passthrough(const string& db, DBConfigPtr conf, const BSONObj& cmdObj , int options , BSONObjBuilder& result ) {
+ ShardConnection conn( conf->getPrimary() , "" );
+ BSONObj res;
+ bool ok = conn->runCommand( db , cmdObj , res , passOptions() ? options : 0 );
+ if ( ! ok && res["code"].numberInt() == SendStaleConfigCode ) {
+ conn.done();
+ throw RecvStaleConfigException( res["ns"].toString(),"command failed because of stale config");
+ }
+ result.appendElements( res );
+ conn.done();
+ return ok;
+ }
+ };
+
+ class RunOnAllShardsCommand : public Command {
+ public:
+ RunOnAllShardsCommand(const char* n, const char* oldname=NULL) : Command(n, false, oldname) {}
+
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return false; }
+
+ // all grid commands are designed not to lock
+ virtual LockType locktype() const { return NONE; }
+
+
+ // default impl uses all shards for DB
+ virtual void getShards(const string& dbName , BSONObj& cmdObj, set<Shard>& shards) {
+ DBConfigPtr conf = grid.getDBConfig( dbName , false );
+ conf->getAllShards(shards);
+ }
+
+ virtual void aggregateResults(const vector<BSONObj>& results, BSONObjBuilder& output) {}
+
+ // don't override
+ virtual bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& output, bool) {
+ LOG(1) << "RunOnAllShardsCommand db: " << dbName << " cmd:" << cmdObj << endl;
+ set<Shard> shards;
+ getShards(dbName, cmdObj, shards);
+
+ list< shared_ptr<Future::CommandResult> > futures;
+ for ( set<Shard>::const_iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ) {
+ futures.push_back( Future::spawnCommand( i->getConnString() , dbName , cmdObj, 0 ) );
+ }
+
+ vector<BSONObj> results;
+ BSONObjBuilder subobj (output.subobjStart("raw"));
+ BSONObjBuilder errors;
+ for ( list< shared_ptr<Future::CommandResult> >::iterator i=futures.begin(); i!=futures.end(); i++ ) {
+ shared_ptr<Future::CommandResult> res = *i;
+ if ( ! res->join() ) {
+ errors.appendAs(res->result()["errmsg"], res->getServer());
+ }
+ results.push_back( res->result() );
+ subobj.append( res->getServer() , res->result() );
+ }
+
+ subobj.done();
+
+ BSONObj errobj = errors.done();
+ if (! errobj.isEmpty()) {
+ errmsg = errobj.toString(false, true);
+ return false;
+ }
+
+ aggregateResults(results, output);
+ return true;
+ }
+
+ };
+
+ class AllShardsCollectionCommand : public RunOnAllShardsCommand {
+ public:
+ AllShardsCollectionCommand(const char* n, const char* oldname=NULL) : RunOnAllShardsCommand(n, oldname) {}
+
+ virtual void getShards(const string& dbName , BSONObj& cmdObj, set<Shard>& shards) {
+ string fullns = dbName + '.' + cmdObj.firstElement().valuestrsafe();
+
+ DBConfigPtr conf = grid.getDBConfig( dbName , false );
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
+ shards.insert(conf->getShard(fullns));
+ }
+ else {
+ conf->getChunkManager(fullns)->getAllShards(shards);
+ }
+ }
+ };
+
+
+ class NotAllowedOnShardedCollectionCmd : public PublicGridCommand {
+ public:
+ NotAllowedOnShardedCollectionCmd( const char * n ) : PublicGridCommand( n ) {}
+
+ virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ) = 0;
+
+ virtual bool run(const string& dbName , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) {
+ string fullns = getFullNS( dbName , cmdObj );
+
+ DBConfigPtr conf = grid.getDBConfig( dbName , false );
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
+ return passthrough( conf , cmdObj , options, result );
+ }
+ errmsg = "can't do command: " + name + " on sharded collection";
+ return false;
+ }
+ };
+
+ // ----
+
+ class DropIndexesCmd : public AllShardsCollectionCommand {
+ public:
+ DropIndexesCmd() : AllShardsCollectionCommand("dropIndexes", "deleteIndexes") {}
+ } dropIndexesCmd;
+
+ class ReIndexCmd : public AllShardsCollectionCommand {
+ public:
+ ReIndexCmd() : AllShardsCollectionCommand("reIndex") {}
+ } reIndexCmd;
+
+ class ProfileCmd : public PublicGridCommand {
+ public:
+ ProfileCmd() : PublicGridCommand("profile") {}
+ virtual bool run(const string& dbName , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) {
+ errmsg = "profile currently not supported via mongos";
+ return false;
+ }
+ } profileCmd;
+
+
+ class ValidateCmd : public AllShardsCollectionCommand {
+ public:
+ ValidateCmd() : AllShardsCollectionCommand("validate") {}
+ virtual void aggregateResults(const vector<BSONObj>& results, BSONObjBuilder& output) {
+ for (vector<BSONObj>::const_iterator it(results.begin()), end(results.end()); it!=end; it++){
+ const BSONObj& result = *it;
+ const BSONElement valid = result["valid"];
+ if (!valid.eoo()){
+ if (!valid.trueValue()) {
+ output.appendBool("valid", false);
+ return;
+ }
+ }
+ else {
+ // Support pre-1.9.0 output with everything in a big string
+ const char* s = result["result"].valuestrsafe();
+ if (strstr(s, "exception") || strstr(s, "corrupt")){
+ output.appendBool("valid", false);
+ return;
+ }
+ }
+ }
+
+ output.appendBool("valid", true);
+ }
+ } validateCmd;
+
+ class RepairDatabaseCmd : public RunOnAllShardsCommand {
+ public:
+ RepairDatabaseCmd() : RunOnAllShardsCommand("repairDatabase") {}
+ } repairDatabaseCmd;
+
+ class DBStatsCmd : public RunOnAllShardsCommand {
+ public:
+ DBStatsCmd() : RunOnAllShardsCommand("dbStats", "dbstats") {}
+
+ virtual void aggregateResults(const vector<BSONObj>& results, BSONObjBuilder& output) {
+ long long objects = 0;
+ long long dataSize = 0;
+ long long storageSize = 0;
+ long long numExtents = 0;
+ long long indexes = 0;
+ long long indexSize = 0;
+ long long fileSize = 0;
+
+ for (vector<BSONObj>::const_iterator it(results.begin()), end(results.end()); it != end; ++it) {
+ const BSONObj& b = *it;
+ objects += b["objects"].numberLong();
+ dataSize += b["dataSize"].numberLong();
+ storageSize += b["storageSize"].numberLong();
+ numExtents += b["numExtents"].numberLong();
+ indexes += b["indexes"].numberLong();
+ indexSize += b["indexSize"].numberLong();
+ fileSize += b["fileSize"].numberLong();
+ }
+
+ //result.appendNumber( "collections" , ncollections ); //TODO: need to find a good way to get this
+ output.appendNumber( "objects" , objects );
+ output.append ( "avgObjSize" , double(dataSize) / double(objects) );
+ output.appendNumber( "dataSize" , dataSize );
+ output.appendNumber( "storageSize" , storageSize);
+ output.appendNumber( "numExtents" , numExtents );
+ output.appendNumber( "indexes" , indexes );
+ output.appendNumber( "indexSize" , indexSize );
+ output.appendNumber( "fileSize" , fileSize );
+ }
+ } DBStatsCmdObj;
+
+ class DropCmd : public PublicGridCommand {
+ public:
+ DropCmd() : PublicGridCommand( "drop" ) {}
+ bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string collection = cmdObj.firstElement().valuestrsafe();
+ string fullns = dbName + "." + collection;
+
+ DBConfigPtr conf = grid.getDBConfig( dbName , false );
+
+ log() << "DROP: " << fullns << endl;
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
+ return passthrough( conf , cmdObj , result );
+ }
+
+ ChunkManagerPtr cm = conf->getChunkManager( fullns );
+ massert( 10418 , "how could chunk manager be null!" , cm );
+
+ cm->drop( cm );
+ uassert( 13512 , "drop collection attempted on non-sharded collection" , conf->removeSharding( fullns ) );
+
+ return 1;
+ }
+ } dropCmd;
+
+ class DropDBCmd : public PublicGridCommand {
+ public:
+ DropDBCmd() : PublicGridCommand( "dropDatabase" ) {}
+ bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+
+ BSONElement e = cmdObj.firstElement();
+
+ if ( ! e.isNumber() || e.number() != 1 ) {
+ errmsg = "invalid params";
+ return 0;
+ }
+
+ DBConfigPtr conf = grid.getDBConfig( dbName , false );
+
+ log() << "DROP DATABASE: " << dbName << endl;
+
+ if ( ! conf ) {
+ result.append( "info" , "database didn't exist" );
+ return true;
+ }
+
+ if ( ! conf->dropDatabase( errmsg ) )
+ return false;
+
+ result.append( "dropped" , dbName );
+ return true;
+ }
+ } dropDBCmd;
+
+ class RenameCollectionCmd : public PublicGridCommand {
+ public:
+ RenameCollectionCmd() : PublicGridCommand( "renameCollection" ) {}
+ bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string fullnsFrom = cmdObj.firstElement().valuestrsafe();
+ string dbNameFrom = nsToDatabase( fullnsFrom.c_str() );
+ DBConfigPtr confFrom = grid.getDBConfig( dbNameFrom , false );
+
+ string fullnsTo = cmdObj["to"].valuestrsafe();
+ string dbNameTo = nsToDatabase( fullnsTo.c_str() );
+ DBConfigPtr confTo = grid.getDBConfig( dbNameTo , false );
+
+ uassert(13140, "Don't recognize source or target DB", confFrom && confTo);
+ uassert(13138, "You can't rename a sharded collection", !confFrom->isSharded(fullnsFrom));
+ uassert(13139, "You can't rename to a sharded collection", !confTo->isSharded(fullnsTo));
+
+ const Shard& shardTo = confTo->getShard(fullnsTo);
+ const Shard& shardFrom = confFrom->getShard(fullnsFrom);
+
+ uassert(13137, "Source and destination collections must be on same shard", shardFrom == shardTo);
+
+ return adminPassthrough( confFrom , cmdObj , result );
+ }
+ } renameCollectionCmd;
+
+ class CopyDBCmd : public PublicGridCommand {
+ public:
+ CopyDBCmd() : PublicGridCommand( "copydb" ) {}
+ bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string todb = cmdObj.getStringField("todb");
+ uassert(13402, "need a todb argument", !todb.empty());
+
+ DBConfigPtr confTo = grid.getDBConfig( todb );
+ uassert(13398, "cant copy to sharded DB", !confTo->isShardingEnabled());
+
+ string fromhost = cmdObj.getStringField("fromhost");
+ if (!fromhost.empty()) {
+ return adminPassthrough( confTo , cmdObj , result );
+ }
+ else {
+ string fromdb = cmdObj.getStringField("fromdb");
+ uassert(13399, "need a fromdb argument", !fromdb.empty());
+
+ DBConfigPtr confFrom = grid.getDBConfig( fromdb , false );
+ uassert(13400, "don't know where source DB is", confFrom);
+ uassert(13401, "cant copy from sharded DB", !confFrom->isShardingEnabled());
+
+ BSONObjBuilder b;
+ BSONForEach(e, cmdObj) {
+ if (strcmp(e.fieldName(), "fromhost") != 0)
+ b.append(e);
+ }
+ b.append("fromhost", confFrom->getPrimary().getConnString());
+ BSONObj fixed = b.obj();
+
+ return adminPassthrough( confTo , fixed , result );
+ }
+
+ }
+ } copyDBCmd;
+
+ class CountCmd : public PublicGridCommand {
+ public:
+ CountCmd() : PublicGridCommand("count") { }
+ virtual bool passOptions() const { return true; }
+ bool run(const string& dbName, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) {
+ string collection = cmdObj.firstElement().valuestrsafe();
+ string fullns = dbName + "." + collection;
+
+ BSONObj filter;
+ if ( cmdObj["query"].isABSONObj() )
+ filter = cmdObj["query"].Obj();
+
+ DBConfigPtr conf = grid.getDBConfig( dbName , false );
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
+ ShardConnection conn( conf->getPrimary() , fullns );
+
+ BSONObj temp;
+ bool ok = false;
+ try{
+ ok = conn->runCommand( dbName , cmdObj , temp, options );
+ }
+ catch( RecvStaleConfigException& e ){
+ conn.done();
+ throw e;
+ }
+ conn.done();
+
+ if ( ok ) {
+ result.append( temp["n"] );
+ return true;
+ }
+
+ if ( temp["code"].numberInt() != SendStaleConfigCode ) {
+ errmsg = temp["errmsg"].String();
+ result.appendElements( temp );
+ return false;
+ }
+
+ // this collection got sharded
+ ChunkManagerPtr cm = conf->getChunkManagerIfExists( fullns , true );
+ if ( ! cm ) {
+ errmsg = "should be sharded now";
+ result.append( "root" , temp );
+ return false;
+ }
+ }
+
+ long long total = 0;
+ map<string,long long> shardCounts;
+ int numTries = 0;
+ bool hadToBreak = false;
+
+ ChunkManagerPtr cm = conf->getChunkManagerIfExists( fullns );
+ while ( numTries < 5 ) {
+ numTries++;
+
+ // This all should eventually be replaced by new pcursor framework, but for now match query
+ // retry behavior manually
+ if( numTries >= 2 ) sleepsecs( numTries - 1 );
+
+ if ( ! cm ) {
+ // probably unsharded now
+ return run( dbName , cmdObj , options , errmsg , result, false );
+ }
+
+ set<Shard> shards;
+ cm->getShardsForQuery( shards , filter );
+ assert( shards.size() );
+
+ hadToBreak = false;
+
+ for (set<Shard>::iterator it=shards.begin(), end=shards.end(); it != end; ++it) {
+ ShardConnection conn(*it, fullns);
+ if ( conn.setVersion() ){
+ ChunkManagerPtr newCM = conf->getChunkManagerIfExists( fullns );
+ if( newCM->getVersion() != cm->getVersion() ){
+ cm = newCM;
+ total = 0;
+ shardCounts.clear();
+ conn.done();
+ hadToBreak = true;
+ break;
+ }
+ }
+
+ BSONObj temp;
+ bool ok = false;
+ try{
+ ok = conn->runCommand( dbName , BSON( "count" << collection << "query" << filter ) , temp, options );
+ }
+ catch( RecvStaleConfigException& e ){
+ conn.done();
+ throw e;
+ }
+ conn.done();
+
+ if ( ok ) {
+ long long mine = temp["n"].numberLong();
+ total += mine;
+ shardCounts[it->getName()] = mine;
+ continue;
+
+ }
+
+ if ( SendStaleConfigCode == temp["code"].numberInt() ) {
+ // my version is old
+ total = 0;
+ shardCounts.clear();
+ cm = conf->getChunkManagerIfExists( fullns , true, numTries > 2 ); // Force reload on third attempt
+ hadToBreak = true;
+ break;
+ }
+
+ // command failed :(
+ errmsg = "failed on : " + it->getName();
+ result.append( "cause" , temp );
+ return false;
+ }
+ if ( ! hadToBreak )
+ break;
+ }
+ if (hadToBreak) {
+ errmsg = "Tried 5 times without success to get count for " + fullns + " from all shards";
+ return false;
+ }
+
+ total = applySkipLimit( total , cmdObj );
+ result.appendNumber( "n" , total );
+ BSONObjBuilder temp( result.subobjStart( "shards" ) );
+ for ( map<string,long long>::iterator i=shardCounts.begin(); i!=shardCounts.end(); ++i )
+ temp.appendNumber( i->first , i->second );
+ temp.done();
+ return true;
+ }
+ } countCmd;
+
+ class CollectionStats : public PublicGridCommand {
+ public:
+ CollectionStats() : PublicGridCommand("collStats", "collstats") { }
+ bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string collection = cmdObj.firstElement().valuestrsafe();
+ string fullns = dbName + "." + collection;
+
+ DBConfigPtr conf = grid.getDBConfig( dbName , false );
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
+ result.appendBool("sharded", false);
+ result.append( "primary" , conf->getPrimary().getName() );
+ return passthrough( conf , cmdObj , result);
+ }
+ result.appendBool("sharded", true);
+
+ ChunkManagerPtr cm = conf->getChunkManager( fullns );
+ massert( 12594 , "how could chunk manager be null!" , cm );
+
+ set<Shard> servers;
+ cm->getAllShards(servers);
+
+ BSONObjBuilder shardStats;
+ map<string,long long> counts;
+ map<string,long long> indexSizes;
+ /*
+ long long count=0;
+ long long size=0;
+ long long storageSize=0;
+ */
+ int nindexes=0;
+ bool warnedAboutIndexes = false;
+ for ( set<Shard>::iterator i=servers.begin(); i!=servers.end(); i++ ) {
+ ScopedDbConnection conn( *i );
+ BSONObj res;
+ if ( ! conn->runCommand( dbName , cmdObj , res ) ) {
+ errmsg = "failed on shard: " + res.toString();
+ return false;
+ }
+ conn.done();
+
+ BSONObjIterator j( res );
+ while ( j.more() ) {
+ BSONElement e = j.next();
+
+ if ( str::equals( e.fieldName() , "ns" ) ||
+ str::equals( e.fieldName() , "ok" ) ||
+ str::equals( e.fieldName() , "avgObjSize" ) ||
+ str::equals( e.fieldName() , "lastExtentSize" ) ||
+ str::equals( e.fieldName() , "paddingFactor" ) ) {
+ continue;
+ }
+ else if ( str::equals( e.fieldName() , "count" ) ||
+ str::equals( e.fieldName() , "size" ) ||
+ str::equals( e.fieldName() , "storageSize" ) ||
+ str::equals( e.fieldName() , "numExtents" ) ||
+ str::equals( e.fieldName() , "totalIndexSize" ) ) {
+ counts[e.fieldName()] += e.numberLong();
+ }
+ else if ( str::equals( e.fieldName() , "indexSizes" ) ) {
+ BSONObjIterator k( e.Obj() );
+ while ( k.more() ) {
+ BSONElement temp = k.next();
+ indexSizes[temp.fieldName()] += temp.numberLong();
+ }
+ }
+ else if ( str::equals( e.fieldName() , "flags" ) ) {
+ if ( ! result.hasField( e.fieldName() ) )
+ result.append( e );
+ }
+ else if ( str::equals( e.fieldName() , "nindexes" ) ) {
+ int myIndexes = e.numberInt();
+
+ if ( nindexes == 0 ) {
+ nindexes = myIndexes;
+ }
+ else if ( nindexes == myIndexes ) {
+ // no-op
+ }
+ else {
+ // hopefully this means we're building an index
+
+ if ( myIndexes > nindexes )
+ nindexes = myIndexes;
+
+ if ( ! warnedAboutIndexes ) {
+ result.append( "warning" , "indexes don't all match - ok if ensureIndex is running" );
+ warnedAboutIndexes = true;
+ }
+ }
+ }
+ else {
+ warning() << "mongos collstats doesn't know about: " << e.fieldName() << endl;
+ }
+
+ }
+ shardStats.append(i->getName(), res);
+ }
+
+ result.append("ns", fullns);
+
+ for ( map<string,long long>::iterator i=counts.begin(); i!=counts.end(); ++i )
+ result.appendNumber( i->first , i->second );
+
+ {
+ BSONObjBuilder ib( result.subobjStart( "indexSizes" ) );
+ for ( map<string,long long>::iterator i=indexSizes.begin(); i!=indexSizes.end(); ++i )
+ ib.appendNumber( i->first , i->second );
+ ib.done();
+ }
+
+ if ( counts["count"] > 0 )
+ result.append("avgObjSize", (double)counts["size"] / (double)counts["count"] );
+ else
+ result.append( "avgObjSize", 0.0 );
+
+ result.append("nindexes", nindexes);
+
+ result.append("nchunks", cm->numChunks());
+ result.append("shards", shardStats.obj());
+
+ return true;
+ }
+ } collectionStatsCmd;
+
+ class FindAndModifyCmd : public PublicGridCommand {
+ public:
+ FindAndModifyCmd() : PublicGridCommand("findAndModify", "findandmodify") { }
+ bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string collection = cmdObj.firstElement().valuestrsafe();
+ string fullns = dbName + "." + collection;
+
+ DBConfigPtr conf = grid.getDBConfig( dbName , false );
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
+ return passthrough( conf , cmdObj , result);
+ }
+
+ ChunkManagerPtr cm = conf->getChunkManager( fullns );
+ massert( 13002 , "shard internal error chunk manager should never be null" , cm );
+
+ BSONObj filter = cmdObj.getObjectField("query");
+ uassert(13343, "query for sharded findAndModify must have shardkey", cm->hasShardKey(filter));
+
+ //TODO with upsert consider tracking for splits
+
+ ChunkPtr chunk = cm->findChunk(filter);
+ ShardConnection conn( chunk->getShard() , fullns );
+ BSONObj res;
+ bool ok = conn->runCommand( conf->getName() , cmdObj , res );
+ conn.done();
+
+ if (!ok && res.getIntField("code") == RecvStaleConfigCode) { // code for RecvStaleConfigException
+ throw RecvStaleConfigException(fullns, "FindAndModify"); // Command code traps this and re-runs
+ }
+
+ result.appendElements(res);
+ return ok;
+ }
+
+ } findAndModifyCmd;
+
+ class DataSizeCmd : public PublicGridCommand {
+ public:
+ DataSizeCmd() : PublicGridCommand("dataSize", "datasize") { }
+ bool run(const string& dbName, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string fullns = cmdObj.firstElement().String();
+
+ DBConfigPtr conf = grid.getDBConfig( dbName , false );
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
+ return passthrough( conf , cmdObj , result);
+ }
+
+ ChunkManagerPtr cm = conf->getChunkManager( fullns );
+ massert( 13407 , "how could chunk manager be null!" , cm );
+
+ BSONObj min = cmdObj.getObjectField( "min" );
+ BSONObj max = cmdObj.getObjectField( "max" );
+ BSONObj keyPattern = cmdObj.getObjectField( "keyPattern" );
+
+ uassert(13408, "keyPattern must equal shard key", cm->getShardKey().key() == keyPattern);
+
+ // yes these are doubles...
+ double size = 0;
+ double numObjects = 0;
+ int millis = 0;
+
+ set<Shard> shards;
+ cm->getShardsForRange(shards, min, max);
+ for ( set<Shard>::iterator i=shards.begin(), end=shards.end() ; i != end; ++i ) {
+ ScopedDbConnection conn( *i );
+ BSONObj res;
+ bool ok = conn->runCommand( conf->getName() , cmdObj , res );
+ conn.done();
+
+ if ( ! ok ) {
+ result.appendElements( res );
+ return false;
+ }
+
+ size += res["size"].number();
+ numObjects += res["numObjects"].number();
+ millis += res["millis"].numberInt();
+
+ }
+
+ result.append( "size", size );
+ result.append( "numObjects" , numObjects );
+ result.append( "millis" , millis );
+ return true;
+ }
+
+ } DataSizeCmd;
+
+ class ConvertToCappedCmd : public NotAllowedOnShardedCollectionCmd {
+ public:
+ ConvertToCappedCmd() : NotAllowedOnShardedCollectionCmd("convertToCapped") {}
+
+ virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ) {
+ return dbName + "." + cmdObj.firstElement().valuestrsafe();
+ }
+
+ } convertToCappedCmd;
+
+
+ class GroupCmd : public NotAllowedOnShardedCollectionCmd {
+ public:
+ GroupCmd() : NotAllowedOnShardedCollectionCmd("group") {}
+ virtual bool passOptions() const { return true; }
+ virtual string getFullNS( const string& dbName , const BSONObj& cmdObj ) {
+ return dbName + "." + cmdObj.firstElement().embeddedObjectUserCheck()["ns"].valuestrsafe();
+ }
+
+ } groupCmd;
+
+ class DistinctCmd : public PublicGridCommand {
+ public:
+ DistinctCmd() : PublicGridCommand("distinct") {}
+ virtual void help( stringstream &help ) const {
+ help << "{ distinct : 'collection name' , key : 'a.b' , query : {} }";
+ }
+ virtual bool passOptions() const { return true; }
+ bool run(const string& dbName , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) {
+ string collection = cmdObj.firstElement().valuestrsafe();
+ string fullns = dbName + "." + collection;
+
+ DBConfigPtr conf = grid.getDBConfig( dbName , false );
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
+ return passthrough( conf , cmdObj , options, result );
+ }
+
+ ChunkManagerPtr cm = conf->getChunkManager( fullns );
+ massert( 10420 , "how could chunk manager be null!" , cm );
+
+ BSONObj query = getQuery(cmdObj);
+ set<Shard> shards;
+ cm->getShardsForQuery(shards, query);
+
+ set<BSONObj,BSONObjCmp> all;
+ int size = 32;
+
+ for ( set<Shard>::iterator i=shards.begin(), end=shards.end() ; i != end; ++i ) {
+ ShardConnection conn( *i , fullns );
+ BSONObj res;
+ bool ok = conn->runCommand( conf->getName() , cmdObj , res, options );
+ conn.done();
+
+ if ( ! ok ) {
+ result.appendElements( res );
+ return false;
+ }
+
+ BSONObjIterator it( res["values"].embeddedObject() );
+ while ( it.more() ) {
+ BSONElement nxt = it.next();
+ BSONObjBuilder temp(32);
+ temp.appendAs( nxt , "" );
+ all.insert( temp.obj() );
+ }
+
+ }
+
+ BSONObjBuilder b( size );
+ int n=0;
+ for ( set<BSONObj,BSONObjCmp>::iterator i = all.begin() ; i != all.end(); i++ ) {
+ b.appendAs( i->firstElement() , b.numStr( n++ ) );
+ }
+
+ result.appendArray( "values" , b.obj() );
+ return true;
+ }
+ } disinctCmd;
+
+ class FileMD5Cmd : public PublicGridCommand {
+ public:
+ FileMD5Cmd() : PublicGridCommand("filemd5") {}
+ virtual void help( stringstream &help ) const {
+ help << " example: { filemd5 : ObjectId(aaaaaaa) , root : \"fs\" }";
+ }
+ bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string fullns = dbName;
+ fullns += ".";
+ {
+ string root = cmdObj.getStringField( "root" );
+ if ( root.size() == 0 )
+ root = "fs";
+ fullns += root;
+ }
+ fullns += ".chunks";
+
+ DBConfigPtr conf = grid.getDBConfig( dbName , false );
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
+ return passthrough( conf , cmdObj , result );
+ }
+
+ ChunkManagerPtr cm = conf->getChunkManager( fullns );
+ massert( 13091 , "how could chunk manager be null!" , cm );
+ uassert( 13092 , "GridFS chunks collection can only be sharded on files_id", cm->getShardKey().key() == BSON("files_id" << 1));
+
+ ChunkPtr chunk = cm->findChunk( BSON("files_id" << cmdObj.firstElement()) );
+
+ ShardConnection conn( chunk->getShard() , fullns );
+ BSONObj res;
+ bool ok = conn->runCommand( conf->getName() , cmdObj , res );
+ conn.done();
+
+ result.appendElements(res);
+ return ok;
+ }
+ } fileMD5Cmd;
+
+ class Geo2dFindNearCmd : public PublicGridCommand {
+ public:
+ Geo2dFindNearCmd() : PublicGridCommand( "geoNear" ) {}
+ void help(stringstream& h) const { h << "http://www.mongodb.org/display/DOCS/Geospatial+Indexing#GeospatialIndexing-geoNearCommand"; }
+ virtual bool passOptions() const { return true; }
+ bool run(const string& dbName , BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool) {
+ string collection = cmdObj.firstElement().valuestrsafe();
+ string fullns = dbName + "." + collection;
+
+ DBConfigPtr conf = grid.getDBConfig( dbName , false );
+
+ if ( ! conf || ! conf->isShardingEnabled() || ! conf->isSharded( fullns ) ) {
+ return passthrough( conf , cmdObj , options, result );
+ }
+
+ ChunkManagerPtr cm = conf->getChunkManager( fullns );
+ massert( 13500 , "how could chunk manager be null!" , cm );
+
+ BSONObj query = getQuery(cmdObj);
+ set<Shard> shards;
+ cm->getShardsForQuery(shards, query);
+
+ int limit = 100;
+ if (cmdObj["num"].isNumber())
+ limit = cmdObj["num"].numberInt();
+
+ list< shared_ptr<Future::CommandResult> > futures;
+ BSONArrayBuilder shardArray;
+ for ( set<Shard>::const_iterator i=shards.begin(), end=shards.end() ; i != end ; i++ ) {
+ futures.push_back( Future::spawnCommand( i->getConnString() , dbName , cmdObj, options ) );
+ shardArray.append(i->getName());
+ }
+
+ multimap<double, BSONObj> results; // TODO: maybe use merge-sort instead
+ string nearStr;
+ double time = 0;
+ double btreelocs = 0;
+ double nscanned = 0;
+ double objectsLoaded = 0;
+ for ( list< shared_ptr<Future::CommandResult> >::iterator i=futures.begin(); i!=futures.end(); i++ ) {
+ shared_ptr<Future::CommandResult> res = *i;
+ if ( ! res->join() ) {
+ errmsg = res->result()["errmsg"].String();
+ return false;
+ }
+
+ nearStr = res->result()["near"].String();
+ time += res->result()["stats"]["time"].Number();
+ btreelocs += res->result()["stats"]["btreelocs"].Number();
+ nscanned += res->result()["stats"]["nscanned"].Number();
+ objectsLoaded += res->result()["stats"]["objectsLoaded"].Number();
+
+ BSONForEach(obj, res->result()["results"].embeddedObject()) {
+ results.insert(make_pair(obj["dis"].Number(), obj.embeddedObject().getOwned()));
+ }
+
+ // TODO: maybe shrink results if size() > limit
+ }
+
+ result.append("ns" , fullns);
+ result.append("near", nearStr);
+
+ int outCount = 0;
+ double totalDistance = 0;
+ double maxDistance = 0;
+ {
+ BSONArrayBuilder sub (result.subarrayStart("results"));
+ for (multimap<double, BSONObj>::const_iterator it(results.begin()), end(results.end()); it!= end && outCount < limit; ++it, ++outCount) {
+ totalDistance += it->first;
+ maxDistance = it->first; // guaranteed to be highest so far
+
+ sub.append(it->second);
+ }
+ sub.done();
+ }
+
+ {
+ BSONObjBuilder sub (result.subobjStart("stats"));
+ sub.append("time", time);
+ sub.append("btreelocs", btreelocs);
+ sub.append("nscanned", nscanned);
+ sub.append("objectsLoaded", objectsLoaded);
+ sub.append("avgDistance", totalDistance / outCount);
+ sub.append("maxDistance", maxDistance);
+ sub.append("shards", shardArray.arr());
+ sub.done();
+ }
+
+ return true;
+ }
+ } geo2dFindNearCmd;
+
+ class MRCmd : public PublicGridCommand {
+ public:
+ AtomicUInt JOB_NUMBER;
+
+ MRCmd() : PublicGridCommand( "mapreduce" ) {}
+
+ string getTmpName( const string& coll ) {
+ stringstream ss;
+ ss << "tmp.mrs." << coll << "_" << time(0) << "_" << JOB_NUMBER++;
+ return ss.str();
+ }
+
+ BSONObj fixForShards( const BSONObj& orig , const string& output , string& badShardedField , int maxChunkSizeBytes ) {
+ BSONObjBuilder b;
+ BSONObjIterator i( orig );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ string fn = e.fieldName();
+ if ( fn == "map" ||
+ fn == "mapreduce" ||
+ fn == "mapparams" ||
+ fn == "reduce" ||
+ fn == "query" ||
+ fn == "sort" ||
+ fn == "scope" ||
+ fn == "verbose" ) {
+ b.append( e );
+ }
+ else if ( fn == "out" ||
+ fn == "finalize" ) {
+ // we don't want to copy these
+ }
+ else {
+ badShardedField = fn;
+ return BSONObj();
+ }
+ }
+ b.append( "out" , output );
+
+ if ( maxChunkSizeBytes > 0 ) {
+ // will need to figure out chunks, ask shards for points
+ b.append("splitInfo", maxChunkSizeBytes);
+ }
+
+ return b.obj();
+ }
+
+ ChunkPtr insertSharded( ChunkManagerPtr manager, const char* ns, BSONObj& o, int flags, bool safe ) {
+ // note here, the MR output process requires no splitting / migration during process, hence StaleConfigException should not happen
+ Strategy* s = SHARDED;
+ ChunkPtr c = manager->findChunk( o );
+ LOG(4) << " server:" << c->getShard().toString() << " " << o << endl;
+ s->insert( c->getShard() , ns , o , flags, safe);
+ return c;
+ }
+
+ bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ return run( dbName, cmdObj, errmsg, result, 0 );
+ }
+
+ bool run(const string& dbName , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, int retry ) {
+ Timer t;
+
+ string collection = cmdObj.firstElement().valuestrsafe();
+ string fullns = dbName + "." + collection;
+
+ // Abort after two retries, m/r is an expensive operation
+ if( retry > 2 ){
+ errmsg = "shard version errors preventing parallel mapreduce, check logs for further info";
+ return false;
+ }
+ // Re-check shard version after 1st retry
+ if( retry > 0 ){
+ versionManager.forceRemoteCheckShardVersionCB( fullns );
+ }
+
+ const string shardResultCollection = getTmpName( collection );
+
+ BSONObj customOut;
+ string finalColShort;
+ string finalColLong;
+ bool customOutDB = false;
+ string outDB = dbName;
+ BSONElement outElmt = cmdObj.getField("out");
+ if (outElmt.type() == Object) {
+ // check if there is a custom output
+ BSONObj out = outElmt.embeddedObject();
+ customOut = out;
+ // mode must be 1st element
+ finalColShort = out.firstElement().str();
+ if (customOut.hasField( "db" )) {
+ customOutDB = true;
+ outDB = customOut.getField("db").str();
+ }
+ finalColLong = outDB + "." + finalColShort;
+ }
+
+ DBConfigPtr confIn = grid.getDBConfig( dbName , false );
+ DBConfigPtr confOut = confIn;
+ if (customOutDB) {
+ confOut = grid.getDBConfig( outDB , true );
+ }
+
+ bool shardedInput = confIn && confIn->isShardingEnabled() && confIn->isSharded( fullns );
+ bool shardedOutput = customOut.getBoolField("sharded");
+
+ if (!shardedOutput)
+ uassert( 15920 , "Cannot output to a non-sharded collection, a sharded collection exists" , !confOut->isSharded(finalColLong) );
+ // should we also prevent going from non-sharded to sharded? during the transition client may see partial data
+
+ long long maxChunkSizeBytes = 0;
+ if (shardedOutput) {
+ // will need to figure out chunks, ask shards for points
+ maxChunkSizeBytes = cmdObj["maxChunkSizeBytes"].numberLong();
+ if ( maxChunkSizeBytes == 0 ) {
+ maxChunkSizeBytes = Chunk::MaxChunkSize;
+ }
+ }
+
+ // modify command to run on shards with output to tmp collection
+ string badShardedField;
+ assert( maxChunkSizeBytes < 0x7fffffff );
+ BSONObj shardedCommand = fixForShards( cmdObj , shardResultCollection , badShardedField, static_cast<int>(maxChunkSizeBytes) );
+
+ if ( ! shardedInput && ! shardedOutput && ! customOutDB ) {
+ LOG(1) << "simple MR, just passthrough" << endl;
+ return passthrough( confIn , cmdObj , result );
+ }
+
+ if ( badShardedField.size() ) {
+ errmsg = str::stream() << "unknown m/r field for sharding: " << badShardedField;
+ return false;
+ }
+
+ BSONObjBuilder timingBuilder;
+ BSONObj q;
+ if ( cmdObj["query"].type() == Object ) {
+ q = cmdObj["query"].embeddedObjectUserCheck();
+ }
+
+ set<Shard> shards;
+ set<ServerAndQuery> servers;
+ map<Shard,BSONObj> results;
+
+ BSONObjBuilder shardCountsB;
+ BSONObjBuilder aggCountsB;
+ map<string,long long> countsMap;
+ set< BSONObj > splitPts;
+
+ {
+ // take distributed lock to prevent split / migration
+ ConnectionString config = configServer.getConnectionString();
+ DistributedLock lockSetup( config , fullns );
+ dist_lock_try dlk;
+
+ if (shardedInput) {
+ try{
+ int tryc = 0;
+ while ( !dlk.got() ) {
+ dlk = dist_lock_try( &lockSetup , (string)"mr-parallel" );
+ if ( ! dlk.got() ) {
+ if ( ++tryc % 100 == 0 )
+ warning() << "the collection metadata could not be locked for mapreduce, already locked by " << dlk.other() << endl;
+ sleepmillis(100);
+ }
+ }
+ }
+ catch( LockException& e ){
+ errmsg = str::stream() << "error locking distributed lock for mapreduce " << causedBy( e );
+ return false;
+ }
+ }
+
+ try {
+ SHARDED->commandOp( dbName, shardedCommand, 0, fullns, q, results );
+ }
+ catch( DBException& e ){
+ e.addContext( str::stream() << "could not run map command on all shards for ns " << fullns << " and query " << q );
+ throw;
+ }
+
+ for ( map<Shard,BSONObj>::iterator i = results.begin(); i != results.end(); ++i ){
+
+ BSONObj mrResult = i->second;
+ string server = i->first.getConnString();
+
+ BSONObj counts = mrResult["counts"].embeddedObjectUserCheck();
+ shardCountsB.append( server , counts );
+ servers.insert( server );
+
+ // add up the counts for each shard
+ // some of them will be fixed later like output and reduce
+ BSONObjIterator j( counts );
+ while ( j.more() ) {
+ BSONElement temp = j.next();
+ countsMap[temp.fieldName()] += temp.numberLong();
+ }
+
+ if (mrResult.hasField("splitKeys")) {
+ BSONElement splitKeys = mrResult.getField("splitKeys");
+ vector<BSONElement> pts = splitKeys.Array();
+ for (vector<BSONElement>::iterator it = pts.begin(); it != pts.end(); ++it) {
+ splitPts.insert(it->Obj().getOwned());
+ }
+ }
+ }
+ }
+
+ // build the sharded finish command
+ BSONObjBuilder finalCmd;
+ finalCmd.append( "mapreduce.shardedfinish" , cmdObj );
+ finalCmd.append( "inputNS" , dbName + "." + shardResultCollection );
+
+ BSONObj shardCounts = shardCountsB.done();
+ finalCmd.append( "shardCounts" , shardCounts );
+ timingBuilder.append( "shardProcessing" , t.millis() );
+
+ for ( map<string,long long>::iterator i=countsMap.begin(); i!=countsMap.end(); i++ ) {
+ aggCountsB.append( i->first , i->second );
+ }
+ BSONObj aggCounts = aggCountsB.done();
+ finalCmd.append( "counts" , aggCounts );
+
+ Timer t2;
+ BSONObj singleResult;
+ bool ok = false;
+ long long reduceCount = 0;
+ long long outputCount = 0;
+ BSONObjBuilder postCountsB;
+
+ if (!shardedOutput) {
+ LOG(1) << "MR with single shard output, NS=" << finalColLong << " primary=" << confOut->getPrimary() << endl;
+ ShardConnection conn( confOut->getPrimary() , finalColLong );
+ ok = conn->runCommand( outDB , finalCmd.obj() , singleResult );
+
+ BSONObj counts = singleResult.getObjectField("counts");
+ postCountsB.append(conn->getServerAddress(), counts);
+ reduceCount = counts.getIntField("reduce");
+ outputCount = counts.getIntField("output");
+
+ conn.done();
+ } else {
+
+ LOG(1) << "MR with sharded output, NS=" << finalColLong << endl;
+
+ // create the sharded collection if needed
+ if (!confOut->isSharded(finalColLong)) {
+ // enable sharding on db
+ confOut->enableSharding();
+
+ // shard collection according to split points
+ BSONObj sortKey = BSON( "_id" << 1 );
+ vector<BSONObj> sortedSplitPts;
+ // points will be properly sorted using the set
+ for ( set<BSONObj>::iterator it = splitPts.begin() ; it != splitPts.end() ; ++it )
+ sortedSplitPts.push_back( *it );
+ confOut->shardCollection( finalColLong, sortKey, true, &sortedSplitPts );
+ }
+
+ map<BSONObj, int> chunkSizes;
+ {
+ // take distributed lock to prevent split / migration
+ ConnectionString config = configServer.getConnectionString();
+ DistributedLock lockSetup( config , finalColLong );
+ dist_lock_try dlk;
+
+ try{
+ int tryc = 0;
+ while ( !dlk.got() ) {
+ dlk = dist_lock_try( &lockSetup , (string)"mr-post-process" );
+ if ( ! dlk.got() ) {
+ if ( ++tryc % 100 == 0 )
+ warning() << "the collection metadata could not be locked for mapreduce, already locked by " << dlk.other() << endl;
+ sleepmillis(100);
+ }
+ }
+ }
+ catch( LockException& e ){
+ errmsg = str::stream() << "error locking distributed lock for mapreduce " << causedBy( e );
+ return false;
+ }
+
+ BSONObj finalCmdObj = finalCmd.obj();
+ results.clear();
+
+ try {
+ SHARDED->commandOp( outDB, finalCmdObj, 0, finalColLong, BSONObj(), results );
+ ok = true;
+ }
+ catch( DBException& e ){
+ e.addContext( str::stream() << "could not run final reduce command on all shards for ns " << fullns << ", output " << finalColLong );
+ throw;
+ }
+
+ for ( map<Shard,BSONObj>::iterator i = results.begin(); i != results.end(); ++i ){
+
+ string server = i->first.getConnString();
+ singleResult = i->second;
+
+ BSONObj counts = singleResult.getObjectField("counts");
+ reduceCount += counts.getIntField("reduce");
+ outputCount += counts.getIntField("output");
+ postCountsB.append(server, counts);
+
+ // get the size inserted for each chunk
+ // split cannot be called here since we already have the distributed lock
+ if (singleResult.hasField("chunkSizes")) {
+ vector<BSONElement> sizes = singleResult.getField("chunkSizes").Array();
+ for (unsigned int i = 0; i < sizes.size(); i += 2) {
+ BSONObj key = sizes[i].Obj().getOwned();
+ long long size = sizes[i+1].numberLong();
+ assert( size < 0x7fffffff );
+ chunkSizes[key] = static_cast<int>(size);
+ }
+ }
+ }
+ }
+
+ // do the splitting round
+ ChunkManagerPtr cm = confOut->getChunkManagerIfExists( finalColLong );
+ for ( map<BSONObj, int>::iterator it = chunkSizes.begin() ; it != chunkSizes.end() ; ++it ) {
+ BSONObj key = it->first;
+ int size = it->second;
+ assert( size < 0x7fffffff );
+
+ // key reported should be the chunk's minimum
+ ChunkPtr c = cm->findChunk(key);
+ if ( !c ) {
+ warning() << "Mongod reported " << size << " bytes inserted for key " << key << " but can't find chunk" << endl;
+ } else {
+ c->splitIfShould( size );
+ }
+ }
+ }
+
+ try {
+ // drop collections with tmp results on each shard
+ for ( set<ServerAndQuery>::iterator i=servers.begin(); i!=servers.end(); i++ ) {
+ ScopedDbConnection conn( i->_server );
+ conn->dropCollection( dbName + "." + shardResultCollection );
+ conn.done();
+ }
+ } catch ( std::exception e ) {
+ log() << "Cannot cleanup shard results" << causedBy( e ) << endl;
+ }
+
+ if ( ! ok ) {
+ errmsg = "final reduce failed: ";
+ errmsg += singleResult.toString();
+ return 0;
+ }
+
+ // copy some elements from a single result
+ // annoying that we have to copy all results for inline, but no way around it
+ if (singleResult.hasField("result"))
+ result.append(singleResult.getField("result"));
+ else if (singleResult.hasField("results"))
+ result.append(singleResult.getField("results"));
+
+ BSONObjBuilder countsB(32);
+ // input stat is determined by aggregate MR job
+ countsB.append("input", aggCounts.getField("input").numberLong());
+ countsB.append("emit", aggCounts.getField("emit").numberLong());
+
+ // reduce count is sum of all reduces that happened
+ countsB.append("reduce", aggCounts.getField("reduce").numberLong() + reduceCount);
+
+ // ouput is determined by post processing on each shard
+ countsB.append("output", outputCount);
+ result.append( "counts" , countsB.done() );
+
+ timingBuilder.append( "postProcessing" , t2.millis() );
+
+ result.append( "timeMillis" , t.millis() );
+ result.append( "timing" , timingBuilder.done() );
+ result.append("shardCounts", shardCounts);
+ result.append("postProcessCounts", postCountsB.done());
+ return 1;
+ }
+ } mrCmd;
+
+ class ApplyOpsCmd : public PublicGridCommand {
+ public:
+ ApplyOpsCmd() : PublicGridCommand( "applyOps" ) {}
+ virtual bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ errmsg = "applyOps not allowed through mongos";
+ return false;
+ }
+ } applyOpsCmd;
+
+
+ class CompactCmd : public PublicGridCommand {
+ public:
+ CompactCmd() : PublicGridCommand( "compact" ) {}
+ virtual bool run(const string& dbName , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ errmsg = "compact not allowed through mongos";
+ return false;
+ }
+ } compactCmd;
+
+
+ /*
+ Note these are in the pub_grid_cmds namespace, so they don't
+ conflict with those in db/commands/pipeline_command.cpp.
+ */
+ class PipelineCommand :
+ public PublicGridCommand {
+ public:
+ PipelineCommand();
+
+ // virtuals from Command
+ virtual bool run(const string &dbName , BSONObj &cmdObj,
+ int options, string &errmsg,
+ BSONObjBuilder &result, bool fromRepl);
+
+ private:
+
+ };
+
+
+ /* -------------------- PipelineCommand ----------------------------- */
+
+ static const PipelineCommand pipelineCommand;
+
+ PipelineCommand::PipelineCommand():
+ PublicGridCommand(Pipeline::commandName) {
+ }
+
+ bool PipelineCommand::run(const string &dbName , BSONObj &cmdObj,
+ int options, string &errmsg,
+ BSONObjBuilder &result, bool fromRepl) {
+ //const string shardedOutputCollection = getTmpName( collection );
+
+ intrusive_ptr<ExpressionContext> pCtx(
+ ExpressionContext::create());
+ pCtx->setInRouter(true);
+
+ /* parse the pipeline specification */
+ boost::shared_ptr<Pipeline> pPipeline(
+ Pipeline::parseCommand(errmsg, cmdObj, pCtx));
+ if (!pPipeline.get())
+ return false; // there was some parsing error
+
+ string fullns(dbName + "." + pPipeline->getCollectionName());
+
+ /*
+ If the system isn't running sharded, or the target collection
+ isn't sharded, pass this on to a mongod.
+ */
+ DBConfigPtr conf(grid.getDBConfig(dbName , false));
+ if (!conf || !conf->isShardingEnabled() || !conf->isSharded(fullns))
+ return passthrough(conf, cmdObj, result);
+
+ /* split the pipeline into pieces for mongods and this mongos */
+ boost::shared_ptr<Pipeline> pShardPipeline(
+ pPipeline->splitForSharded());
+
+ /* create the command for the shards */
+ BSONObjBuilder commandBuilder;
+ pShardPipeline->toBson(&commandBuilder);
+ BSONObj shardedCommand(commandBuilder.done());
+
+ BSONObjBuilder shardQueryBuilder;
+ BSONObjBuilder shardSortBuilder;
+ pShardPipeline->getCursorMods(
+ &shardQueryBuilder, &shardSortBuilder);
+ BSONObj shardQuery(shardQueryBuilder.done());
+ BSONObj shardSort(shardSortBuilder.done());
+
+ ChunkManagerPtr cm(conf->getChunkManager(fullns));
+ set<Shard> shards;
+ cm->getShardsForQuery(shards, shardQuery);
+
+ /*
+ From MRCmd::Run: "we need to use our connections to the shard
+ so filtering is done correctly for un-owned docs so we allocate
+ them in our thread and hand off"
+ */
+ vector<boost::shared_ptr<ShardConnection> > shardConns;
+ list<boost::shared_ptr<Future::CommandResult> > futures;
+ for (set<Shard>::iterator i=shards.begin(), end=shards.end();
+ i != end; i++) {
+ boost::shared_ptr<ShardConnection> temp(
+ new ShardConnection(i->getConnString(), fullns));
+ assert(temp->get());
+ futures.push_back(
+ Future::spawnCommand(i->getConnString(), dbName,
+ shardedCommand , 0, temp->get()));
+ shardConns.push_back(temp);
+ }
+
+ /* wrap the list of futures with a source */
+ intrusive_ptr<DocumentSourceCommandFutures> pSource(
+ DocumentSourceCommandFutures::create(errmsg, &futures));
+
+ /* run the pipeline */
+ bool failed = pPipeline->run(result, errmsg, pSource);
+
+/*
+ BSONObjBuilder shardresults;
+ for (list<boost::shared_ptr<Future::CommandResult> >::iterator i(
+ futures.begin()); i!=futures.end(); ++i) {
+ boost::shared_ptr<Future::CommandResult> res(*i);
+ if (!res->join()) {
+ error() << "sharded pipeline failed on shard: " <<
+ res->getServer() << " error: " << res->result() << endl;
+ result.append( "cause" , res->result() );
+ errmsg = "mongod pipeline failed: ";
+ errmsg += res->result().toString();
+ failed = true;
+ continue;
+ }
+
+ shardresults.append( res->getServer() , res->result() );
+ }
+*/
+
+ for(unsigned i = 0; i < shardConns.size(); ++i)
+ shardConns[i]->done();
+
+ if (failed && (errmsg.length() > 0))
+ return false;
+
+ return true;
+ }
+
+ } // namespace pub_grid_cmds
+
+ bool Command::runAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder, int queryOptions) {
+ const char *p = strchr(ns, '.');
+ if ( !p ) return false;
+ if ( strcmp(p, ".$cmd") != 0 ) return false;
+
+ bool ok = false;
+
+ BSONElement e = jsobj.firstElement();
+ map<string,Command*>::iterator i;
+
+ if ( e.eoo() )
+ ;
+ // check for properly registered command objects.
+ else if ( (i = _commands->find(e.fieldName())) != _commands->end() ) {
+ string errmsg;
+ Command *c = i->second;
+ ClientInfo *client = ClientInfo::get();
+ AuthenticationInfo *ai = client->getAuthenticationInfo();
+
+ char cl[256];
+ nsToDatabase(ns, cl);
+ if( c->requiresAuth() && !ai->isAuthorizedForLock(cl, c->locktype())) {
+ ok = false;
+ errmsg = "unauthorized";
+ anObjBuilder.append( "note" , str::stream() << "need to authorized on db: " << cl << " for command: " << e.fieldName() );
+ }
+ else if( c->adminOnly() && c->localHostOnlyIfNoAuth( jsobj ) && noauth && !ai->isLocalHost ) {
+ ok = false;
+ errmsg = "unauthorized: this command must run from localhost when running db without auth";
+ log() << "command denied: " << jsobj.toString() << endl;
+ }
+ else if ( c->adminOnly() && !startsWith(ns, "admin.") ) {
+ ok = false;
+ errmsg = "access denied - use admin db";
+ }
+ else if ( jsobj.getBoolField( "help" ) ) {
+ stringstream help;
+ help << "help for: " << e.fieldName() << " ";
+ c->help( help );
+ anObjBuilder.append( "help" , help.str() );
+ }
+ else {
+ try {
+ ok = c->run( nsToDatabase( ns ) , jsobj, queryOptions, errmsg, anObjBuilder, false );
+ }
+ catch (DBException& e) {
+ int code = e.getCode();
+ if (code == RecvStaleConfigCode) { // code for StaleConfigException
+ throw;
+ }
+
+ {
+ stringstream ss;
+ ss << "exception: " << e.what();
+ anObjBuilder.append( "errmsg" , ss.str() );
+ anObjBuilder.append( "code" , code );
+ }
+ }
+ }
+
+ BSONObj tmp = anObjBuilder.asTempObj();
+ bool have_ok = tmp.hasField("ok");
+ bool have_errmsg = tmp.hasField("errmsg");
+
+ if (!have_ok)
+ anObjBuilder.append( "ok" , ok ? 1.0 : 0.0 );
+
+ if ( !ok && !have_errmsg) {
+ anObjBuilder.append("errmsg", errmsg);
+ uassert_nothrow(errmsg.c_str());
+ }
+ return true;
+ }
+
+ return false;
+ }
+
+} // namespace mongo
+
diff --git a/src/mongo/s/config.cpp b/src/mongo/s/config.cpp
new file mode 100644
index 00000000000..b4923b56a1f
--- /dev/null
+++ b/src/mongo/s/config.cpp
@@ -0,0 +1,879 @@
+// config.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../util/net/message.h"
+#include "../util/stringutils.h"
+#include "../util/unittest.h"
+#include "../client/connpool.h"
+#include "../client/model.h"
+#include "../db/pdfile.h"
+#include "../db/cmdline.h"
+
+#include "chunk.h"
+#include "config.h"
+#include "grid.h"
+#include "server.h"
+
+namespace mongo {
+
+ int ConfigServer::VERSION = 3;
+ Shard Shard::EMPTY;
+
+ string ShardNS::shard = "config.shards";
+ string ShardNS::database = "config.databases";
+ string ShardNS::collection = "config.collections";
+ string ShardNS::chunk = "config.chunks";
+
+ string ShardNS::mongos = "config.mongos";
+ string ShardNS::settings = "config.settings";
+
+ BSONField<bool> ShardFields::draining("draining");
+ BSONField<long long> ShardFields::maxSize ("maxSize");
+
+ OID serverID;
+
+ /* --- DBConfig --- */
+
+ DBConfig::CollectionInfo::CollectionInfo( const BSONObj& in ) {
+ _dirty = false;
+ _dropped = in["dropped"].trueValue();
+ if ( in["key"].isABSONObj() ) {
+ _key = in["key"].Obj().getOwned();
+ _unqiue = in["unique"].trueValue();
+ shard( in["_id"].String() , _key , _unqiue );
+ }
+ _dirty = false;
+ }
+
+
+ void DBConfig::CollectionInfo::shard( const string& ns , const ShardKeyPattern& key , bool unique ) {
+ _cm.reset( new ChunkManager( ns , key , unique ) );
+ _key = key.key().getOwned();
+ _unqiue = unique;
+ _dirty = true;
+ _dropped = false;
+ }
+
+ void DBConfig::CollectionInfo::unshard() {
+ _cm.reset();
+ _dropped = true;
+ _dirty = true;
+ _key = BSONObj();
+ }
+
+ void DBConfig::CollectionInfo::save( const string& ns , DBClientBase* conn ) {
+ BSONObj key = BSON( "_id" << ns );
+
+ BSONObjBuilder val;
+ val.append( "_id" , ns );
+ val.appendDate( "lastmod" , time(0) );
+ val.appendBool( "dropped" , _dropped );
+ if ( _cm )
+ _cm->getInfo( val );
+
+ conn->update( ShardNS::collection , key , val.obj() , true );
+ string err = conn->getLastError();
+ uassert( 13473 , (string)"failed to save collection (" + ns + "): " + err , err.size() == 0 );
+
+ _dirty = false;
+ }
+
+ bool DBConfig::isSharded( const string& ns ) {
+ if ( ! _shardingEnabled )
+ return false;
+ scoped_lock lk( _lock );
+ return _isSharded( ns );
+ }
+
+ bool DBConfig::_isSharded( const string& ns ) {
+ if ( ! _shardingEnabled )
+ return false;
+ Collections::iterator i = _collections.find( ns );
+ if ( i == _collections.end() )
+ return false;
+ return i->second.isSharded();
+ }
+
+ ShardPtr DBConfig::getShardIfExists( const string& ns ){
+ try{
+ // TODO: this function assumes the _primary will not change under-the-covers, but so does
+ // getShard() in general
+ return ShardPtr( new Shard( getShard( ns ) ) );
+ }
+ catch( AssertionException& e ){
+ warning() << "primary not found for " << ns << causedBy( e ) << endl;
+ return ShardPtr();
+ }
+ }
+
+ const Shard& DBConfig::getShard( const string& ns ) {
+ if ( isSharded( ns ) )
+ return Shard::EMPTY;
+
+ uassert( 10178 , "no primary!" , _primary.ok() );
+ return _primary;
+ }
+
+ void DBConfig::enableSharding() {
+ if ( _shardingEnabled )
+ return;
+
+ assert( _name != "config" );
+
+ scoped_lock lk( _lock );
+ _shardingEnabled = true;
+ _save();
+ }
+
+ /**
+ *
+ */
+ ChunkManagerPtr DBConfig::shardCollection( const string& ns , ShardKeyPattern fieldsAndOrder , bool unique , vector<BSONObj>* initPoints, vector<Shard>* initShards ) {
+ uassert( 8042 , "db doesn't have sharding enabled" , _shardingEnabled );
+ uassert( 13648 , str::stream() << "can't shard collection because not all config servers are up" , configServer.allUp() );
+
+
+ {
+ scoped_lock lk( _lock );
+
+ CollectionInfo& ci = _collections[ns];
+ uassert( 8043 , "collection already sharded" , ! ci.isSharded() );
+
+ log() << "enable sharding on: " << ns << " with shard key: " << fieldsAndOrder << endl;
+
+ ci.shard( ns , fieldsAndOrder , unique );
+ ChunkManagerPtr cm = ci.getCM();
+ uassert( 13449 , "collections already sharded" , (cm->numChunks() == 0) );
+
+ cm->createFirstChunks( getPrimary() , initPoints, initShards );
+ _save();
+ }
+
+ ChunkManagerPtr manager = getChunkManager(ns,true,true);
+
+ // Tell the primary mongod to refresh it's data
+ // TODO: Think the real fix here is for mongos to just assume all collections sharded, when we get there
+ for( int i = 0; i < 4; i++ ){
+ if( i == 3 ){
+ warning() << "too many tries updating initial version of " << ns << " on shard primary " << getPrimary() <<
+ ", other mongoses may not see the collection as sharded immediately" << endl;
+ break;
+ }
+ try {
+ ShardConnection conn( getPrimary(), ns );
+ conn.setVersion();
+ conn.done();
+ break;
+ }
+ catch( DBException& e ){
+ warning() << "could not update initial version of " << ns << " on shard primary " << getPrimary() <<
+ causedBy( e ) << endl;
+ }
+ sleepsecs( i );
+ }
+
+ return manager;
+ }
+
+ bool DBConfig::removeSharding( const string& ns ) {
+ if ( ! _shardingEnabled ) {
+ return false;
+ }
+
+ scoped_lock lk( _lock );
+
+ Collections::iterator i = _collections.find( ns );
+
+ if ( i == _collections.end() )
+ return false;
+
+ CollectionInfo& ci = _collections[ns];
+ if ( ! ci.isSharded() )
+ return false;
+
+ ci.unshard();
+ _save( false, true );
+ return true;
+ }
+
+ ChunkManagerPtr DBConfig::getChunkManagerIfExists( const string& ns, bool shouldReload, bool forceReload ){
+ try{
+ return getChunkManager( ns, shouldReload, forceReload );
+ }
+ catch( AssertionException& e ){
+ warning() << "chunk manager not found for " << ns << causedBy( e ) << endl;
+ return ChunkManagerPtr();
+ }
+ }
+
+ ChunkManagerPtr DBConfig::getChunkManager( const string& ns , bool shouldReload, bool forceReload ) {
+ BSONObj key;
+ bool unique;
+ ShardChunkVersion oldVersion;
+
+ {
+ scoped_lock lk( _lock );
+
+ CollectionInfo& ci = _collections[ns];
+
+ bool earlyReload = ! ci.isSharded() && ( shouldReload || forceReload );
+ if ( earlyReload ) {
+ // this is to catch cases where there this is a new sharded collection
+ _reload();
+ ci = _collections[ns];
+ }
+ massert( 10181 , (string)"not sharded:" + ns , ci.isSharded() );
+ assert( ! ci.key().isEmpty() );
+
+ if ( ! ( shouldReload || forceReload ) || earlyReload )
+ return ci.getCM();
+
+ key = ci.key().copy();
+ unique = ci.unique();
+ if ( ci.getCM() )
+ oldVersion = ci.getCM()->getVersion();
+ }
+
+ assert( ! key.isEmpty() );
+
+ BSONObj newest;
+ if ( oldVersion > 0 && ! forceReload ) {
+ ScopedDbConnection conn( configServer.modelServer() , 30.0 );
+ newest = conn->findOne( ShardNS::chunk ,
+ Query( BSON( "ns" << ns ) ).sort( "lastmod" , -1 ) );
+ conn.done();
+
+ if ( ! newest.isEmpty() ) {
+ ShardChunkVersion v = newest["lastmod"];
+ if ( v == oldVersion ) {
+ scoped_lock lk( _lock );
+ CollectionInfo& ci = _collections[ns];
+ massert( 15885 , str::stream() << "not sharded after reloading from chunks : " << ns , ci.isSharded() );
+ return ci.getCM();
+ }
+ }
+
+ }
+ else if( oldVersion == 0 ){
+ warning() << "version 0 found when " << ( forceReload ? "reloading" : "checking" ) << " chunk manager"
+ << ", collection '" << ns << "' initially detected as sharded" << endl;
+ }
+
+ // we are not locked now, and want to load a new ChunkManager
+
+ auto_ptr<ChunkManager> temp;
+
+ {
+ scoped_lock lll ( _hitConfigServerLock );
+
+ if ( ! newest.isEmpty() && ! forceReload ) {
+ // if we have a target we're going for
+ // see if we've hit already
+
+ scoped_lock lk( _lock );
+ CollectionInfo& ci = _collections[ns];
+ if ( ci.isSharded() && ci.getCM() ) {
+ ShardChunkVersion currentVersion = newest["lastmod"];
+ if ( currentVersion == ci.getCM()->getVersion() ) {
+ return ci.getCM();
+ }
+ }
+
+ }
+
+ temp.reset( new ChunkManager( ns , key , unique ) );
+ if ( temp->numChunks() == 0 ) {
+ // maybe we're not sharded any more
+ reload(); // this is a full reload
+ return getChunkManager( ns , false );
+ }
+ }
+
+ scoped_lock lk( _lock );
+
+ CollectionInfo& ci = _collections[ns];
+ massert( 14822 , (string)"state changed in the middle: " + ns , ci.isSharded() );
+
+ bool forced = false;
+ if ( temp->getVersion() > ci.getCM()->getVersion() ||
+ (forced = (temp->getVersion() == ci.getCM()->getVersion() && forceReload ) ) ) {
+
+ if( forced ){
+ warning() << "chunk manager reload forced for collection '" << ns << "', config version is " << temp->getVersion() << endl;
+ }
+
+ // we only want to reset if we're newer or equal and forced
+ // otherwise we go into a bad cycle
+ ci.resetCM( temp.release() );
+ }
+
+ massert( 15883 , str::stream() << "not sharded after chunk manager reset : " << ns , ci.isSharded() );
+ return ci.getCM();
+ }
+
+ void DBConfig::setPrimary( string s ) {
+ scoped_lock lk( _lock );
+ _primary.reset( s );
+ _save();
+ }
+
+ void DBConfig::serialize(BSONObjBuilder& to) {
+ to.append("_id", _name);
+ to.appendBool("partitioned", _shardingEnabled );
+ to.append("primary", _primary.getName() );
+ }
+
+ void DBConfig::unserialize(const BSONObj& from) {
+ LOG(1) << "DBConfig unserialize: " << _name << " " << from << endl;
+ assert( _name == from["_id"].String() );
+
+ _shardingEnabled = from.getBoolField("partitioned");
+ _primary.reset( from.getStringField("primary") );
+
+ // In the 1.5.x series, we used to have collection metadata nested in the database entry. The 1.6.x series
+ // had migration code that ported that info to where it belongs now: the 'collections' collection. We now
+ // just assert that we're not migrating from a 1.5.x directly into a 1.7.x without first converting.
+ BSONObj sharded = from.getObjectField( "sharded" );
+ if ( ! sharded.isEmpty() )
+ uasserted( 13509 , "can't migrate from 1.5.x release to the current one; need to upgrade to 1.6.x first");
+ }
+
+ bool DBConfig::load() {
+ scoped_lock lk( _lock );
+ return _load();
+ }
+
+ bool DBConfig::_load() {
+ ScopedDbConnection conn( configServer.modelServer(), 30.0 );
+
+ BSONObj o = conn->findOne( ShardNS::database , BSON( "_id" << _name ) );
+
+ if ( o.isEmpty() ) {
+ conn.done();
+ return false;
+ }
+
+ unserialize( o );
+
+ BSONObjBuilder b;
+ b.appendRegex( "_id" , (string)"^" + _name + "\\." );
+
+ auto_ptr<DBClientCursor> cursor = conn->query( ShardNS::collection ,b.obj() );
+ assert( cursor.get() );
+ while ( cursor->more() ) {
+ BSONObj o = cursor->next();
+ if( o["dropped"].trueValue() ) _collections.erase( o["_id"].String() );
+ else _collections[o["_id"].String()] = CollectionInfo( o );
+ }
+
+ conn.done();
+
+ return true;
+ }
+
+ void DBConfig::_save( bool db, bool coll ) {
+ ScopedDbConnection conn( configServer.modelServer(), 30.0 );
+
+ if( db ){
+
+ BSONObj n;
+ {
+ BSONObjBuilder b;
+ serialize(b);
+ n = b.obj();
+ }
+
+ conn->update( ShardNS::database , BSON( "_id" << _name ) , n , true );
+ string err = conn->getLastError();
+ uassert( 13396 , (string)"DBConfig save failed: " + err , err.size() == 0 );
+
+ }
+
+ if( coll ){
+
+ for ( Collections::iterator i=_collections.begin(); i!=_collections.end(); ++i ) {
+ if ( ! i->second.isDirty() )
+ continue;
+ i->second.save( i->first , conn.get() );
+ }
+
+ }
+
+ conn.done();
+ }
+
+ bool DBConfig::reload() {
+ scoped_lock lk( _lock );
+ return _reload();
+ }
+
+ bool DBConfig::_reload() {
+ // TODO: i don't think is 100% correct
+ return _load();
+ }
+
+ bool DBConfig::dropDatabase( string& errmsg ) {
+ /**
+ * 1) make sure everything is up
+ * 2) update config server
+ * 3) drop and reset sharded collections
+ * 4) drop and reset primary
+ * 5) drop everywhere to clean up loose ends
+ */
+
+ log() << "DBConfig::dropDatabase: " << _name << endl;
+ configServer.logChange( "dropDatabase.start" , _name , BSONObj() );
+
+ // 1
+ if ( ! configServer.allUp( errmsg ) ) {
+ LOG(1) << "\t DBConfig::dropDatabase not all up" << endl;
+ return 0;
+ }
+
+ // 2
+ grid.removeDB( _name );
+ {
+ ScopedDbConnection conn( configServer.modelServer(), 30.0 );
+ conn->remove( ShardNS::database , BSON( "_id" << _name ) );
+ errmsg = conn->getLastError();
+ if ( ! errmsg.empty() ) {
+ log() << "could not drop '" << _name << "': " << errmsg << endl;
+ conn.done();
+ return false;
+ }
+
+ conn.done();
+ }
+
+ if ( ! configServer.allUp( errmsg ) ) {
+ log() << "error removing from config server even after checking!" << endl;
+ return 0;
+ }
+ LOG(1) << "\t removed entry from config server for: " << _name << endl;
+
+ set<Shard> allServers;
+
+ // 3
+ while ( true ) {
+ int num = 0;
+ if ( ! _dropShardedCollections( num , allServers , errmsg ) )
+ return 0;
+ log() << " DBConfig::dropDatabase: " << _name << " dropped sharded collections: " << num << endl;
+ if ( num == 0 )
+ break;
+ }
+
+ // 4
+ {
+ ScopedDbConnection conn( _primary, 30.0 );
+ BSONObj res;
+ if ( ! conn->dropDatabase( _name , &res ) ) {
+ errmsg = res.toString();
+ return 0;
+ }
+ conn.done();
+ }
+
+ // 5
+ for ( set<Shard>::iterator i=allServers.begin(); i!=allServers.end(); i++ ) {
+ ScopedDbConnection conn( *i, 30.0 );
+ BSONObj res;
+ if ( ! conn->dropDatabase( _name , &res ) ) {
+ errmsg = res.toString();
+ return 0;
+ }
+ conn.done();
+ }
+
+ LOG(1) << "\t dropped primary db for: " << _name << endl;
+
+ configServer.logChange( "dropDatabase" , _name , BSONObj() );
+ return true;
+ }
+
+ bool DBConfig::_dropShardedCollections( int& num, set<Shard>& allServers , string& errmsg ) {
+ num = 0;
+ set<string> seen;
+ while ( true ) {
+ Collections::iterator i = _collections.begin();
+ for ( ; i != _collections.end(); ++i ) {
+ // log() << "coll : " << i->first << " and " << i->second.isSharded() << endl;
+ if ( i->second.isSharded() )
+ break;
+ }
+
+ if ( i == _collections.end() )
+ break;
+
+ if ( seen.count( i->first ) ) {
+ errmsg = "seen a collection twice!";
+ return false;
+ }
+
+ seen.insert( i->first );
+ LOG(1) << "\t dropping sharded collection: " << i->first << endl;
+
+ i->second.getCM()->getAllShards( allServers );
+ i->second.getCM()->drop( i->second.getCM() );
+ uassert( 10176 , str::stream() << "shard state missing for " << i->first , removeSharding( i->first ) );
+
+ num++;
+ uassert( 10184 , "_dropShardedCollections too many collections - bailing" , num < 100000 );
+ LOG(2) << "\t\t dropped " << num << " so far" << endl;
+ }
+
+ return true;
+ }
+
+ void DBConfig::getAllShards(set<Shard>& shards) const {
+ scoped_lock lk( _lock );
+ shards.insert(getPrimary());
+ for (Collections::const_iterator it(_collections.begin()), end(_collections.end()); it != end; ++it) {
+ if (it->second.isSharded()) {
+ it->second.getCM()->getAllShards(shards);
+ } // TODO: handle collections on non-primary shard
+ }
+ }
+
+ /* --- ConfigServer ---- */
+
+ ConfigServer::ConfigServer() : DBConfig( "config" ) {
+ _shardingEnabled = false;
+ }
+
+ ConfigServer::~ConfigServer() {
+ }
+
+ bool ConfigServer::init( string s ) {
+ vector<string> configdbs;
+ splitStringDelim( s, &configdbs, ',' );
+ return init( configdbs );
+ }
+
+ bool ConfigServer::init( vector<string> configHosts ) {
+
+ uassert( 10187 , "need configdbs" , configHosts.size() );
+
+ string hn = getHostName();
+ if ( hn.empty() ) {
+ sleepsecs(5);
+ dbexit( EXIT_BADOPTIONS );
+ }
+
+ set<string> hosts;
+ for ( size_t i=0; i<configHosts.size(); i++ ) {
+ string host = configHosts[i];
+ hosts.insert( getHost( host , false ) );
+ configHosts[i] = getHost( host , true );
+ }
+
+ for ( set<string>::iterator i=hosts.begin(); i!=hosts.end(); i++ ) {
+ string host = *i;
+ bool ok = false;
+ for ( int x=10; x>0; x-- ) {
+ if ( ! hostbyname( host.c_str() ).empty() ) {
+ ok = true;
+ break;
+ }
+ log() << "can't resolve DNS for [" << host << "] sleeping and trying " << x << " more times" << endl;
+ sleepsecs( 10 );
+ }
+ if ( ! ok )
+ return false;
+ }
+
+ _config = configHosts;
+
+ string fullString;
+ joinStringDelim( configHosts, &fullString, ',' );
+ _primary.setAddress( ConnectionString( fullString , ConnectionString::SYNC ) );
+ LOG(1) << " config string : " << fullString << endl;
+
+ return true;
+ }
+
+ bool ConfigServer::checkConfigServersConsistent( string& errmsg , int tries ) const {
+ if ( tries <= 0 )
+ return false;
+
+ unsigned firstGood = 0;
+ int up = 0;
+ vector<BSONObj> res;
+ for ( unsigned i=0; i<_config.size(); i++ ) {
+ BSONObj x;
+ try {
+ ScopedDbConnection conn( _config[i], 30.0 );
+
+ // check auth
+ conn->update("config.foo.bar", BSONObj(), BSON("x" << 1));
+ conn->simpleCommand( "admin", &x, "getlasterror");
+ if (x["err"].type() == String && x["err"].String() == "unauthorized") {
+ errmsg = "not authorized, did you start with --keyFile?";
+ return false;
+ }
+
+ if ( ! conn->simpleCommand( "config" , &x , "dbhash" ) )
+ x = BSONObj();
+ else {
+ x = x.getOwned();
+ if ( up == 0 )
+ firstGood = i;
+ up++;
+ }
+ conn.done();
+ }
+ catch ( SocketException& e ) {
+ warning() << " couldn't check on config server:" << _config[i] << " ok for now : " << e.toString() << endl;
+ }
+ res.push_back(x);
+ }
+
+ if ( _config.size() == 1 )
+ return true;
+
+ if ( up == 0 ) {
+ errmsg = "no config servers reachable";
+ return false;
+ }
+
+ if ( up == 1 ) {
+ log( LL_WARNING ) << "only 1 config server reachable, continuing" << endl;
+ return true;
+ }
+
+ BSONObj base = res[firstGood];
+ for ( unsigned i=firstGood+1; i<res.size(); i++ ) {
+ if ( res[i].isEmpty() )
+ continue;
+
+ string c1 = base.getFieldDotted( "collections.chunks" );
+ string c2 = res[i].getFieldDotted( "collections.chunks" );
+
+ string d1 = base.getFieldDotted( "collections.databases" );
+ string d2 = res[i].getFieldDotted( "collections.databases" );
+
+ if ( c1 == c2 && d1 == d2 )
+ continue;
+
+ stringstream ss;
+ ss << "config servers " << _config[firstGood] << " and " << _config[i] << " differ";
+ log( LL_WARNING ) << ss.str();
+ if ( tries <= 1 ) {
+ ss << "\n" << c1 << "\t" << c2 << "\n" << d1 << "\t" << d2;
+ errmsg = ss.str();
+ return false;
+ }
+
+ return checkConfigServersConsistent( errmsg , tries - 1 );
+ }
+
+ return true;
+ }
+
+ bool ConfigServer::ok( bool checkConsistency ) {
+ if ( ! _primary.ok() )
+ return false;
+
+ if ( checkConsistency ) {
+ string errmsg;
+ if ( ! checkConfigServersConsistent( errmsg ) ) {
+ log( LL_ERROR ) << "config servers not in sync! " << errmsg << warnings;
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ bool ConfigServer::allUp() {
+ string errmsg;
+ return allUp( errmsg );
+ }
+
+ bool ConfigServer::allUp( string& errmsg ) {
+ try {
+ ScopedDbConnection conn( _primary, 30.0 );
+ conn->getLastError();
+ conn.done();
+ return true;
+ }
+ catch ( DBException& ) {
+ log() << "ConfigServer::allUp : " << _primary.toString() << " seems down!" << endl;
+ errmsg = _primary.toString() + " seems down";
+ return false;
+ }
+
+ }
+
+ int ConfigServer::dbConfigVersion() {
+ ScopedDbConnection conn( _primary, 30.0 );
+ int version = dbConfigVersion( conn.conn() );
+ conn.done();
+ return version;
+ }
+
+ int ConfigServer::dbConfigVersion( DBClientBase& conn ) {
+ auto_ptr<DBClientCursor> c = conn.query( "config.version" , BSONObj() );
+ int version = 0;
+ if ( c->more() ) {
+ BSONObj o = c->next();
+ version = o["version"].numberInt();
+ uassert( 10189 , "should only have 1 thing in config.version" , ! c->more() );
+ }
+ else {
+ if ( conn.count( ShardNS::shard ) || conn.count( ShardNS::database ) ) {
+ version = 1;
+ }
+ }
+
+ return version;
+ }
+
+ void ConfigServer::reloadSettings() {
+ set<string> got;
+
+ ScopedDbConnection conn( _primary, 30.0 );
+ auto_ptr<DBClientCursor> c = conn->query( ShardNS::settings , BSONObj() );
+ assert( c.get() );
+ while ( c->more() ) {
+ BSONObj o = c->next();
+ string name = o["_id"].valuestrsafe();
+ got.insert( name );
+ if ( name == "chunksize" ) {
+ int csize = o["value"].numberInt();
+
+ // validate chunksize before proceeding
+ if ( csize == 0 ) {
+ // setting was not modified; mark as such
+ got.erase(name);
+ log() << "warning: invalid chunksize (" << csize << ") ignored" << endl;
+ } else {
+ LOG(1) << "MaxChunkSize: " << csize << endl;
+ Chunk::MaxChunkSize = csize * 1024 * 1024;
+ }
+ }
+ else if ( name == "balancer" ) {
+ // ones we ignore here
+ }
+ else {
+ log() << "warning: unknown setting [" << name << "]" << endl;
+ }
+ }
+
+ if ( ! got.count( "chunksize" ) ) {
+ conn->insert( ShardNS::settings , BSON( "_id" << "chunksize" <<
+ "value" << (Chunk::MaxChunkSize / ( 1024 * 1024 ) ) ) );
+ }
+
+
+ // indexes
+ try {
+ conn->ensureIndex( ShardNS::chunk , BSON( "ns" << 1 << "min" << 1 ) , true );
+ conn->ensureIndex( ShardNS::chunk , BSON( "ns" << 1 << "shard" << 1 << "min" << 1 ) , true );
+ conn->ensureIndex( ShardNS::chunk , BSON( "ns" << 1 << "lastmod" << 1 ) , true );
+ conn->ensureIndex( ShardNS::shard , BSON( "host" << 1 ) , true );
+ }
+ catch ( std::exception& e ) {
+ log( LL_WARNING ) << "couldn't create indexes on config db: " << e.what() << endl;
+ }
+
+ conn.done();
+ }
+
+ string ConfigServer::getHost( string name , bool withPort ) {
+ if ( name.find( ":" ) != string::npos ) {
+ if ( withPort )
+ return name;
+ return name.substr( 0 , name.find( ":" ) );
+ }
+
+ if ( withPort ) {
+ stringstream ss;
+ ss << name << ":" << CmdLine::ConfigServerPort;
+ return ss.str();
+ }
+
+ return name;
+ }
+
+ /* must never throw */
+ void ConfigServer::logChange( const string& what , const string& ns , const BSONObj& detail ) {
+ string changeID;
+
+ try {
+ // get this entry's ID so we can use on the exception code path too
+ stringstream id;
+ static AtomicUInt num;
+ id << getHostNameCached() << "-" << terseCurrentTime() << "-" << num++;
+ changeID = id.str();
+
+ // send a copy of the message to the log in case it doesn't manage to reach config.changelog
+ Client* c = currentClient.get();
+ BSONObj msg = BSON( "_id" << changeID << "server" << getHostNameCached() << "clientAddr" << (c ? c->clientAddress(true) : "N/A")
+ << "time" << DATENOW << "what" << what << "ns" << ns << "details" << detail );
+ log() << "about to log metadata event: " << msg << endl;
+
+ assert( _primary.ok() );
+
+ ScopedDbConnection conn( _primary, 30.0 );
+
+ static bool createdCapped = false;
+ if ( ! createdCapped ) {
+ try {
+ conn->createCollection( "config.changelog" , 1024 * 1024 * 10 , true );
+ }
+ catch ( UserException& e ) {
+ LOG(1) << "couldn't create changelog (like race condition): " << e << endl;
+ // don't care
+ }
+ createdCapped = true;
+ }
+
+ conn->insert( "config.changelog" , msg );
+
+ conn.done();
+
+ }
+
+ catch ( std::exception& e ) {
+ // if we got here, it means the config change is only in the log; it didn't make it to config.changelog
+ log() << "not logging config change: " << changeID << " " << e.what() << endl;
+ }
+ }
+
+ void ConfigServer::replicaSetChange( const ReplicaSetMonitor * monitor ) {
+ try {
+ Shard s = Shard::lookupRSName(monitor->getName());
+ if (s == Shard::EMPTY) {
+ log(1) << "replicaSetChange: shard not found for set: " << monitor->getServerAddress() << endl;
+ return;
+ }
+ ScopedDbConnection conn( configServer.getConnectionString(), 30.0 );
+ conn->update( ShardNS::shard , BSON( "_id" << s.getName() ) , BSON( "$set" << BSON( "host" << monitor->getServerAddress() ) ) );
+ conn.done();
+ }
+ catch ( DBException & ) {
+ error() << "RSChangeWatcher: could not update config db for set: " << monitor->getName() << " to: " << monitor->getServerAddress() << endl;
+ }
+ }
+
+ DBConfigPtr configServerPtr (new ConfigServer());
+ ConfigServer& configServer = dynamic_cast<ConfigServer&>(*configServerPtr);
+
+}
diff --git a/src/mongo/s/config.h b/src/mongo/s/config.h
new file mode 100644
index 00000000000..650371c4fa8
--- /dev/null
+++ b/src/mongo/s/config.h
@@ -0,0 +1,268 @@
+// config.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+/* This file is things related to the "grid configuration":
+ - what machines make up the db component of our cloud
+ - where various ranges of things live
+*/
+
+#pragma once
+
+#include "../db/namespace.h"
+#include "../client/dbclient.h"
+#include "../client/model.h"
+
+#include "chunk.h"
+#include "shard.h"
+#include "shardkey.h"
+
+namespace mongo {
+
+ struct ShardNS {
+ static string shard;
+
+ static string database;
+ static string collection;
+ static string chunk;
+
+ static string mongos;
+ static string settings;
+ };
+
+ /**
+ * Field names used in the 'shards' collection.
+ */
+ struct ShardFields {
+ static BSONField<bool> draining; // is it draining chunks?
+ static BSONField<long long> maxSize; // max allowed disk space usage
+ };
+
+ class ConfigServer;
+
+ class DBConfig;
+ typedef boost::shared_ptr<DBConfig> DBConfigPtr;
+ typedef shared_ptr<Shard> ShardPtr;
+
+ extern DBConfigPtr configServerPtr;
+ extern ConfigServer& configServer;
+
+ /**
+ * top level configuration for a database
+ */
+ class DBConfig {
+
+ struct CollectionInfo {
+ CollectionInfo() {
+ _dirty = false;
+ _dropped = false;
+ }
+
+ CollectionInfo( const BSONObj& in );
+
+ bool isSharded() const {
+ return _cm.get();
+ }
+
+ ChunkManagerPtr getCM() const {
+ return _cm;
+ }
+
+ void resetCM( ChunkManager * cm ) {
+ assert(cm);
+ assert(_cm); // this has to be already sharded
+ _cm.reset( cm );
+ }
+
+ void shard( const string& ns , const ShardKeyPattern& key , bool unique );
+ void unshard();
+
+ bool isDirty() const { return _dirty; }
+ bool wasDropped() const { return _dropped; }
+
+ void save( const string& ns , DBClientBase* conn );
+
+ bool unique() const { return _unqiue; }
+ BSONObj key() const { return _key; }
+
+
+ private:
+ BSONObj _key;
+ bool _unqiue;
+ ChunkManagerPtr _cm;
+ bool _dirty;
+ bool _dropped;
+ };
+
+ typedef map<string,CollectionInfo> Collections;
+
+ public:
+
+ DBConfig( string name )
+ : _name( name ) ,
+ _primary("config","") ,
+ _shardingEnabled(false),
+ _lock("DBConfig") ,
+ _hitConfigServerLock( "DBConfig::_hitConfigServerLock" ) {
+ assert( name.size() );
+ }
+ virtual ~DBConfig() {}
+
+ string getName() { return _name; };
+
+ /**
+ * @return if anything in this db is partitioned or not
+ */
+ bool isShardingEnabled() {
+ return _shardingEnabled;
+ }
+
+ void enableSharding();
+ ChunkManagerPtr shardCollection( const string& ns , ShardKeyPattern fieldsAndOrder , bool unique , vector<BSONObj>* initPoints=0, vector<Shard>* initShards=0 );
+
+ /**
+ @return true if there was sharding info to remove
+ */
+ bool removeSharding( const string& ns );
+
+ /**
+ * @return whether or not the 'ns' collection is partitioned
+ */
+ bool isSharded( const string& ns );
+
+ ChunkManagerPtr getChunkManager( const string& ns , bool reload = false, bool forceReload = false );
+ ChunkManagerPtr getChunkManagerIfExists( const string& ns , bool reload = false, bool forceReload = false );
+
+ const Shard& getShard( const string& ns );
+ /**
+ * @return the correct for shard for the ns
+ * if the namespace is sharded, will return NULL
+ */
+ ShardPtr getShardIfExists( const string& ns );
+
+ const Shard& getPrimary() const {
+ uassert( 8041 , (string)"no primary shard configured for db: " + _name , _primary.ok() );
+ return _primary;
+ }
+
+ void setPrimary( string s );
+
+ bool load();
+ bool reload();
+
+ bool dropDatabase( string& errmsg );
+
+ // model stuff
+
+ // lockless loading
+ void serialize(BSONObjBuilder& to);
+
+ void unserialize(const BSONObj& from);
+
+ void getAllShards(set<Shard>& shards) const;
+
+ protected:
+
+ /**
+ lockless
+ */
+ bool _isSharded( const string& ns );
+
+ bool _dropShardedCollections( int& num, set<Shard>& allServers , string& errmsg );
+
+ bool _load();
+ bool _reload();
+ void _save( bool db = true, bool coll = true );
+
+ string _name; // e.g. "alleyinsider"
+ Shard _primary; // e.g. localhost , mongo.foo.com:9999
+ bool _shardingEnabled;
+
+ //map<string,CollectionInfo> _sharded; // { "alleyinsider.blog.posts" : { ts : 1 } , ... ] - all ns that are sharded
+ //map<string,ChunkManagerPtr> _shards; // this will only have entries for things that have been looked at
+
+ Collections _collections;
+
+ mutable mongo::mutex _lock; // TODO: change to r/w lock ??
+ mutable mongo::mutex _hitConfigServerLock;
+ };
+
+ class ConfigServer : public DBConfig {
+ public:
+
+ ConfigServer();
+ ~ConfigServer();
+
+ bool ok( bool checkConsistency = false );
+
+ virtual string modelServer() {
+ uassert( 10190 , "ConfigServer not setup" , _primary.ok() );
+ return _primary.getConnString();
+ }
+
+ /**
+ call at startup, this will initiate connection to the grid db
+ */
+ bool init( vector<string> configHosts );
+
+ bool init( string s );
+
+ bool allUp();
+ bool allUp( string& errmsg );
+
+ int dbConfigVersion();
+ int dbConfigVersion( DBClientBase& conn );
+
+ void reloadSettings();
+
+ /**
+ * @return 0 = ok, otherwise error #
+ */
+ int checkConfigVersion( bool upgrade );
+
+ /**
+ * Create a metadata change log entry in the config.changelog collection.
+ *
+ * @param what e.g. "split" , "migrate"
+ * @param ns to which collection the metadata change is being applied
+ * @param msg additional info about the metadata change
+ *
+ * This call is guaranteed never to throw.
+ */
+ void logChange( const string& what , const string& ns , const BSONObj& detail = BSONObj() );
+
+ ConnectionString getConnectionString() const {
+ return ConnectionString( _primary.getConnString() , ConnectionString::SYNC );
+ }
+
+ void replicaSetChange( const ReplicaSetMonitor * monitor );
+
+ static int VERSION;
+
+
+ /**
+ * check to see if all config servers have the same state
+ * will try tries time to make sure not catching in a bad state
+ */
+ bool checkConfigServersConsistent( string& errmsg , int tries = 4 ) const;
+
+ private:
+ string getHost( string name , bool withPort );
+ vector<string> _config;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/s/config_migrate.cpp b/src/mongo/s/config_migrate.cpp
new file mode 100644
index 00000000000..fff023cfb5b
--- /dev/null
+++ b/src/mongo/s/config_migrate.cpp
@@ -0,0 +1,196 @@
+// config_migrate.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../util/net/message.h"
+#include "../util/unittest.h"
+#include "../client/connpool.h"
+#include "../client/model.h"
+#include "../db/pdfile.h"
+#include "../db/cmdline.h"
+
+#include "server.h"
+#include "config.h"
+#include "chunk.h"
+
+namespace mongo {
+
+ int ConfigServer::checkConfigVersion( bool upgrade ) {
+ int cur = dbConfigVersion();
+ if ( cur == VERSION )
+ return 0;
+
+ if ( cur == 0 ) {
+ ScopedDbConnection conn( _primary );
+ conn->insert( "config.version" , BSON( "_id" << 1 << "version" << VERSION ) );
+ pool.flush();
+ assert( VERSION == dbConfigVersion( conn.conn() ) );
+ conn.done();
+ return 0;
+ }
+
+ if ( cur == 2 ) {
+
+ // need to upgrade
+ assert( VERSION == 3 );
+ if ( ! upgrade ) {
+ log() << "newer version of mongo meta data\n"
+ << "need to --upgrade after shutting all mongos down"
+ << endl;
+ return -9;
+ }
+
+ ScopedDbConnection conn( _primary );
+
+ // do a backup
+ string backupName;
+ {
+ stringstream ss;
+ ss << "config-backup-" << terseCurrentTime(false);
+ backupName = ss.str();
+ }
+ log() << "backing up config to: " << backupName << endl;
+ conn->copyDatabase( "config" , backupName );
+
+ map<string,string> hostToShard;
+ set<string> shards;
+ // shards
+ {
+ unsigned n = 0;
+ auto_ptr<DBClientCursor> c = conn->query( ShardNS::shard , BSONObj() );
+ while ( c->more() ) {
+ BSONObj o = c->next();
+ string host = o["host"].String();
+
+ string name = "";
+
+ BSONElement id = o["_id"];
+ if ( id.type() == String ) {
+ name = id.String();
+ }
+ else {
+ stringstream ss;
+ ss << "shard" << hostToShard.size();
+ name = ss.str();
+ }
+
+ hostToShard[host] = name;
+ shards.insert( name );
+ n++;
+ }
+
+ assert( n == hostToShard.size() );
+ assert( n == shards.size() );
+
+ conn->remove( ShardNS::shard , BSONObj() );
+
+ for ( map<string,string>::iterator i=hostToShard.begin(); i != hostToShard.end(); i++ ) {
+ conn->insert( ShardNS::shard , BSON( "_id" << i->second << "host" << i->first ) );
+ }
+ }
+
+ // databases
+ {
+ auto_ptr<DBClientCursor> c = conn->query( ShardNS::database , BSONObj() );
+ map<string,BSONObj> newDBs;
+ unsigned n = 0;
+ while ( c->more() ) {
+ BSONObj old = c->next();
+ n++;
+
+ if ( old["name"].eoo() ) {
+ // already done
+ newDBs[old["_id"].String()] = old;
+ continue;
+ }
+
+ BSONObjBuilder b(old.objsize());
+ b.appendAs( old["name"] , "_id" );
+
+ BSONObjIterator i(old);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( strcmp( "_id" , e.fieldName() ) == 0 ||
+ strcmp( "name" , e.fieldName() ) == 0 ) {
+ continue;
+ }
+
+ b.append( e );
+ }
+
+ BSONObj x = b.obj();
+ log() << old << "\n\t" << x << endl;
+ newDBs[old["name"].String()] = x;
+ }
+
+ assert( n == newDBs.size() );
+
+ conn->remove( ShardNS::database , BSONObj() );
+
+ for ( map<string,BSONObj>::iterator i=newDBs.begin(); i!=newDBs.end(); i++ ) {
+ conn->insert( ShardNS::database , i->second );
+ }
+
+ }
+
+ // chunks
+ {
+ unsigned num = 0;
+ map<string,BSONObj> chunks;
+ auto_ptr<DBClientCursor> c = conn->query( ShardNS::chunk , BSONObj() );
+ while ( c->more() ) {
+ BSONObj x = c->next();
+ BSONObjBuilder b;
+
+ string id = Chunk::genID( x["ns"].String() , x["min"].Obj() );
+ b.append( "_id" , id );
+
+ BSONObjIterator i(x);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( strcmp( e.fieldName() , "_id" ) == 0 )
+ continue;
+ b.append( e );
+ }
+
+ BSONObj n = b.obj();
+ log() << x << "\n\t" << n << endl;
+ chunks[id] = n;
+ num++;
+ }
+
+ assert( num == chunks.size() );
+
+ conn->remove( ShardNS::chunk , BSONObj() );
+ for ( map<string,BSONObj>::iterator i=chunks.begin(); i!=chunks.end(); i++ ) {
+ conn->insert( ShardNS::chunk , i->second );
+ }
+
+ }
+
+ conn->update( "config.version" , BSONObj() , BSON( "_id" << 1 << "version" << VERSION ) );
+ conn.done();
+ pool.flush();
+ return 1;
+ }
+
+ log() << "don't know how to upgrade " << cur << " to " << VERSION << endl;
+ return -8;
+ }
+
+}
diff --git a/src/mongo/s/cursors.cpp b/src/mongo/s/cursors.cpp
new file mode 100644
index 00000000000..241c2cfdb8d
--- /dev/null
+++ b/src/mongo/s/cursors.cpp
@@ -0,0 +1,316 @@
+// cursors.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "pch.h"
+#include "cursors.h"
+#include "../client/connpool.h"
+#include "../db/queryutil.h"
+#include "../db/commands.h"
+#include "../util/concurrency/task.h"
+#include "../util/net/listen.h"
+
+namespace mongo {
+
+ // -------- ShardedCursor -----------
+
+ ShardedClientCursor::ShardedClientCursor( QueryMessage& q , ClusteredCursor * cursor ) {
+ assert( cursor );
+ _cursor = cursor;
+
+ _skip = q.ntoskip;
+ _ntoreturn = q.ntoreturn;
+
+ _totalSent = 0;
+ _done = false;
+
+ _id = 0;
+
+ if ( q.queryOptions & QueryOption_NoCursorTimeout ) {
+ _lastAccessMillis = 0;
+ }
+ else
+ _lastAccessMillis = Listener::getElapsedTimeMillis();
+ }
+
+ ShardedClientCursor::~ShardedClientCursor() {
+ assert( _cursor );
+ delete _cursor;
+ _cursor = 0;
+ }
+
+ long long ShardedClientCursor::getId() {
+ if ( _id <= 0 ) {
+ _id = cursorCache.genId();
+ assert( _id >= 0 );
+ }
+ return _id;
+ }
+
+ void ShardedClientCursor::accessed() {
+ if ( _lastAccessMillis > 0 )
+ _lastAccessMillis = Listener::getElapsedTimeMillis();
+ }
+
+ long long ShardedClientCursor::idleTime( long long now ) {
+ if ( _lastAccessMillis == 0 )
+ return 0;
+ return now - _lastAccessMillis;
+ }
+
+ bool ShardedClientCursor::sendNextBatch( Request& r , int ntoreturn ) {
+ uassert( 10191 , "cursor already done" , ! _done );
+
+ int maxSize = 1024 * 1024;
+ if ( _totalSent > 0 )
+ maxSize *= 3;
+
+ BufBuilder b(32768);
+
+ int num = 0;
+ bool sendMore = true;
+
+ while ( _cursor->more() ) {
+ BSONObj o = _cursor->next();
+
+ b.appendBuf( (void*)o.objdata() , o.objsize() );
+ num++;
+
+ if ( b.len() > maxSize ) {
+ break;
+ }
+
+ if ( num == ntoreturn ) {
+ // soft limit aka batch size
+ break;
+ }
+
+ if ( ntoreturn != 0 && ( -1 * num + _totalSent ) == ntoreturn ) {
+ // hard limit - total to send
+ sendMore = false;
+ break;
+ }
+
+ if ( ntoreturn == 0 && _totalSent == 0 && num > 100 ) {
+ // first batch should be max 100 unless batch size specified
+ break;
+ }
+ }
+
+ bool hasMore = sendMore && _cursor->more();
+ LOG(6) << "\t hasMore:" << hasMore << " wouldSendMoreIfHad: " << sendMore << " id:" << getId() << " totalSent: " << _totalSent << endl;
+
+ replyToQuery( 0 , r.p() , r.m() , b.buf() , b.len() , num , _totalSent , hasMore ? getId() : 0 );
+ _totalSent += num;
+ _done = ! hasMore;
+
+ return hasMore;
+ }
+
+ // ---- CursorCache -----
+
+ long long CursorCache::TIMEOUT = 600000;
+
+ CursorCache::CursorCache()
+ :_mutex( "CursorCache" ), _shardedTotal(0) {
+ }
+
+ CursorCache::~CursorCache() {
+ // TODO: delete old cursors?
+ bool print = logLevel > 0;
+ if ( _cursors.size() || _refs.size() )
+ print = true;
+
+ if ( print )
+ cout << " CursorCache at shutdown - "
+ << " sharded: " << _cursors.size()
+ << " passthrough: " << _refs.size()
+ << endl;
+ }
+
+ ShardedClientCursorPtr CursorCache::get( long long id ) const {
+ LOG(_myLogLevel) << "CursorCache::get id: " << id << endl;
+ scoped_lock lk( _mutex );
+ MapSharded::const_iterator i = _cursors.find( id );
+ if ( i == _cursors.end() ) {
+ OCCASIONALLY log() << "Sharded CursorCache missing cursor id: " << id << endl;
+ return ShardedClientCursorPtr();
+ }
+ i->second->accessed();
+ return i->second;
+ }
+
+ void CursorCache::store( ShardedClientCursorPtr cursor ) {
+ LOG(_myLogLevel) << "CursorCache::store cursor " << " id: " << cursor->getId() << endl;
+ assert( cursor->getId() );
+ scoped_lock lk( _mutex );
+ _cursors[cursor->getId()] = cursor;
+ _shardedTotal++;
+ }
+ void CursorCache::remove( long long id ) {
+ assert( id );
+ scoped_lock lk( _mutex );
+ _cursors.erase( id );
+ }
+
+ void CursorCache::storeRef( const string& server , long long id ) {
+ LOG(_myLogLevel) << "CursorCache::storeRef server: " << server << " id: " << id << endl;
+ assert( id );
+ scoped_lock lk( _mutex );
+ _refs[id] = server;
+ }
+
+ string CursorCache::getRef( long long id ) const {
+ LOG(_myLogLevel) << "CursorCache::getRef id: " << id << endl;
+ assert( id );
+ scoped_lock lk( _mutex );
+ MapNormal::const_iterator i = _refs.find( id );
+ if ( i == _refs.end() )
+ return "";
+ return i->second;
+ }
+
+
+ long long CursorCache::genId() {
+ while ( true ) {
+ long long x = Security::getNonce();
+ if ( x == 0 )
+ continue;
+ if ( x < 0 )
+ x *= -1;
+
+ scoped_lock lk( _mutex );
+ MapSharded::iterator i = _cursors.find( x );
+ if ( i != _cursors.end() )
+ continue;
+
+ MapNormal::iterator j = _refs.find( x );
+ if ( j != _refs.end() )
+ continue;
+
+ return x;
+ }
+ }
+
+ void CursorCache::gotKillCursors(Message& m ) {
+ int *x = (int *) m.singleData()->_data;
+ x++; // reserved
+ int n = *x++;
+
+ if ( n > 2000 ) {
+ log( n < 30000 ? LL_WARNING : LL_ERROR ) << "receivedKillCursors, n=" << n << endl;
+ }
+
+
+ uassert( 13286 , "sent 0 cursors to kill" , n >= 1 );
+ uassert( 13287 , "too many cursors to kill" , n < 30000 );
+
+ long long * cursors = (long long *)x;
+ for ( int i=0; i<n; i++ ) {
+ long long id = cursors[i];
+ LOG(_myLogLevel) << "CursorCache::gotKillCursors id: " << id << endl;
+
+ if ( ! id ) {
+ log( LL_WARNING ) << " got cursor id of 0 to kill" << endl;
+ continue;
+ }
+
+ string server;
+ {
+ scoped_lock lk( _mutex );
+
+ MapSharded::iterator i = _cursors.find( id );
+ if ( i != _cursors.end() ) {
+ _cursors.erase( i );
+ continue;
+ }
+
+ MapNormal::iterator j = _refs.find( id );
+ if ( j == _refs.end() ) {
+ log( LL_WARNING ) << "can't find cursor: " << id << endl;
+ continue;
+ }
+ server = j->second;
+ _refs.erase( j );
+ }
+
+ LOG(_myLogLevel) << "CursorCache::found gotKillCursors id: " << id << " server: " << server << endl;
+
+ assert( server.size() );
+ ScopedDbConnection conn( server );
+ conn->killCursor( id );
+ conn.done();
+ }
+ }
+
+ void CursorCache::appendInfo( BSONObjBuilder& result ) const {
+ scoped_lock lk( _mutex );
+ result.append( "sharded" , (int)_cursors.size() );
+ result.appendNumber( "shardedEver" , _shardedTotal );
+ result.append( "refs" , (int)_refs.size() );
+ result.append( "totalOpen" , (int)(_cursors.size() + _refs.size() ) );
+ }
+
+ void CursorCache::doTimeouts() {
+ long long now = Listener::getElapsedTimeMillis();
+ scoped_lock lk( _mutex );
+ for ( MapSharded::iterator i=_cursors.begin(); i!=_cursors.end(); ++i ) {
+ long long idleFor = i->second->idleTime( now );
+ if ( idleFor < TIMEOUT ) {
+ continue;
+ }
+ log() << "killing old cursor " << i->second->getId() << " idle for: " << idleFor << "ms" << endl; // TODO: make log(1)
+ _cursors.erase( i );
+ i = _cursors.begin(); // possible 2nd entry will get skipped, will get on next pass
+ if ( i == _cursors.end() )
+ break;
+ }
+ }
+
+ CursorCache cursorCache;
+
+ const int CursorCache::_myLogLevel = 3;
+
+ class CursorTimeoutTask : public task::Task {
+ public:
+ virtual string name() const { return "cursorTimeout"; }
+ virtual void doWork() {
+ cursorCache.doTimeouts();
+ }
+ } cursorTimeoutTask;
+
+ void CursorCache::startTimeoutThread() {
+ task::repeat( &cursorTimeoutTask , 400 );
+ }
+
+ class CmdCursorInfo : public Command {
+ public:
+ CmdCursorInfo() : Command( "cursorInfo", true ) {}
+ virtual bool slaveOk() const { return true; }
+ virtual void help( stringstream& help ) const {
+ help << " example: { cursorInfo : 1 }";
+ }
+ virtual LockType locktype() const { return NONE; }
+ bool run(const string&, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ cursorCache.appendInfo( result );
+ if ( jsobj["setTimeout"].isNumber() )
+ CursorCache::TIMEOUT = jsobj["setTimeout"].numberLong();
+ return true;
+ }
+ } cmdCursorInfo;
+
+}
diff --git a/src/mongo/s/cursors.h b/src/mongo/s/cursors.h
new file mode 100644
index 00000000000..862f3731031
--- /dev/null
+++ b/src/mongo/s/cursors.h
@@ -0,0 +1,106 @@
+// cursors.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#pragma once
+
+#include "../pch.h"
+
+#include "../db/jsobj.h"
+#include "../db/dbmessage.h"
+#include "../client/dbclient.h"
+#include "../client/parallel.h"
+
+#include "request.h"
+
+namespace mongo {
+
+ class ShardedClientCursor : boost::noncopyable {
+ public:
+ ShardedClientCursor( QueryMessage& q , ClusteredCursor * cursor );
+ virtual ~ShardedClientCursor();
+
+ long long getId();
+
+ /**
+ * @return whether there is more data left
+ */
+ bool sendNextBatch( Request& r ) { return sendNextBatch( r , _ntoreturn ); }
+ bool sendNextBatch( Request& r , int ntoreturn );
+
+ void accessed();
+ /** @return idle time in ms */
+ long long idleTime( long long now );
+
+ protected:
+
+ ClusteredCursor * _cursor;
+
+ int _skip;
+ int _ntoreturn;
+
+ int _totalSent;
+ bool _done;
+
+ long long _id;
+ long long _lastAccessMillis; // 0 means no timeout
+
+ };
+
+ typedef boost::shared_ptr<ShardedClientCursor> ShardedClientCursorPtr;
+
+ class CursorCache {
+ public:
+
+ static long long TIMEOUT;
+
+ typedef map<long long,ShardedClientCursorPtr> MapSharded;
+ typedef map<long long,string> MapNormal;
+
+ CursorCache();
+ ~CursorCache();
+
+ ShardedClientCursorPtr get( long long id ) const;
+ void store( ShardedClientCursorPtr cursor );
+ void remove( long long id );
+
+ void storeRef( const string& server , long long id );
+
+ /** @return the server for id or "" */
+ string getRef( long long id ) const ;
+
+ void gotKillCursors(Message& m );
+
+ void appendInfo( BSONObjBuilder& result ) const ;
+
+ long long genId();
+
+ void doTimeouts();
+ void startTimeoutThread();
+ private:
+ mutable mongo::mutex _mutex;
+
+ MapSharded _cursors;
+ MapNormal _refs;
+
+ long long _shardedTotal;
+
+ static const int _myLogLevel;
+ };
+
+ extern CursorCache cursorCache;
+}
diff --git a/src/mongo/s/d_chunk_manager.cpp b/src/mongo/s/d_chunk_manager.cpp
new file mode 100644
index 00000000000..82a06f61f2c
--- /dev/null
+++ b/src/mongo/s/d_chunk_manager.cpp
@@ -0,0 +1,339 @@
+// @file d_chunk_manager.cpp
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "../client/connpool.h"
+#include "../client/dbclientmockcursor.h"
+#include "../db/instance.h"
+#include "../db/clientcursor.h"
+
+#include "d_chunk_manager.h"
+
+namespace mongo {
+
+ ShardChunkManager::ShardChunkManager( const string& configServer , const string& ns , const string& shardName ) {
+
+ // have to get a connection to the config db
+ // special case if I'm the configdb since I'm locked and if I connect to myself
+ // its a deadlock
+ scoped_ptr<ScopedDbConnection> scoped;
+ scoped_ptr<DBDirectClient> direct;
+ DBClientBase * conn;
+ if ( configServer.empty() ) {
+ direct.reset( new DBDirectClient() );
+ conn = direct.get();
+ }
+ else {
+ scoped.reset( new ScopedDbConnection( configServer ) );
+ conn = scoped->get();
+ }
+
+ // get this collection's sharding key
+ BSONObj collectionDoc = conn->findOne( "config.collections", BSON( "_id" << ns ) );
+ uassert( 13539 , str::stream() << ns << " does not exist" , !collectionDoc.isEmpty() );
+ uassert( 13540 , str::stream() << ns << " collection config entry corrupted" , collectionDoc["dropped"].type() );
+ uassert( 13541 , str::stream() << ns << " dropped. Re-shard collection first." , !collectionDoc["dropped"].Bool() );
+ _fillCollectionKey( collectionDoc );
+
+ // query for all the chunks for 'ns' that live in this shard, sorting so we can efficiently bucket them
+ BSONObj q = BSON( "ns" << ns << "shard" << shardName );
+ auto_ptr<DBClientCursor> cursor = conn->query( "config.chunks" , Query(q).sort( "min" ) );
+ _fillChunks( cursor.get() );
+ _fillRanges();
+
+ if ( scoped.get() )
+ scoped->done();
+
+ if ( _chunksMap.empty() )
+ log() << "no chunk for collection " << ns << " on shard " << shardName << endl;
+ }
+
+ ShardChunkManager::ShardChunkManager( const BSONObj& collectionDoc , const BSONArray& chunksArr ) {
+ _fillCollectionKey( collectionDoc );
+
+ scoped_ptr<DBClientMockCursor> c ( new DBClientMockCursor( chunksArr ) );
+ _fillChunks( c.get() );
+ _fillRanges();
+ }
+
+ void ShardChunkManager::_fillCollectionKey( const BSONObj& collectionDoc ) {
+ BSONElement e = collectionDoc["key"];
+ uassert( 13542 , str::stream() << "collection doesn't have a key: " << collectionDoc , ! e.eoo() && e.isABSONObj() );
+
+ BSONObj keys = e.Obj().getOwned();
+ BSONObjBuilder b;
+ BSONForEach( key , keys ) {
+ b.append( key.fieldName() , 1 );
+ }
+ _key = b.obj();
+ }
+
+ void ShardChunkManager::_fillChunks( DBClientCursorInterface* cursor ) {
+ assert( cursor );
+
+ ShardChunkVersion version;
+ while ( cursor->more() ) {
+ BSONObj d = cursor->next();
+ _chunksMap.insert( make_pair( d["min"].Obj().getOwned() , d["max"].Obj().getOwned() ) );
+
+ ShardChunkVersion currVersion( d["lastmod"] );
+ if ( currVersion > version ) {
+ version = currVersion;
+ }
+ }
+ _version = version;
+ }
+
+ void ShardChunkManager::_fillRanges() {
+ if ( _chunksMap.empty() )
+ return;
+
+ // load the chunk information, coallesceing their ranges
+ // the version for this shard would be the highest version for any of the chunks
+ RangeMap::const_iterator it = _chunksMap.begin();
+ BSONObj min,max;
+ while ( it != _chunksMap.end() ) {
+ BSONObj currMin = it->first;
+ BSONObj currMax = it->second;
+ ++it;
+
+ // coalesce the chunk's bounds in ranges if they are adjacent chunks
+ if ( min.isEmpty() ) {
+ min = currMin;
+ max = currMax;
+ continue;
+ }
+ if ( max == currMin ) {
+ max = currMax;
+ continue;
+ }
+
+ _rangesMap.insert( make_pair( min , max ) );
+
+ min = currMin;
+ max = currMax;
+ }
+ assert( ! min.isEmpty() );
+
+ _rangesMap.insert( make_pair( min , max ) );
+ }
+
+ static bool contains( const BSONObj& min , const BSONObj& max , const BSONObj& point ) {
+ return point.woCompare( min ) >= 0 && point.woCompare( max ) < 0;
+ }
+
+ bool ShardChunkManager::belongsToMe( ClientCursor* cc ) const {
+ verify( 15851 , cc );
+ if ( _rangesMap.size() == 0 )
+ return false;
+
+ return _belongsToMe( cc->extractFields( _key , true ) );
+ }
+
+ bool ShardChunkManager::belongsToMe( const BSONObj& obj ) const {
+ if ( _rangesMap.size() == 0 )
+ return false;
+
+ return _belongsToMe( obj.extractFields( _key , true ) );
+ }
+
+ bool ShardChunkManager::_belongsToMe( const BSONObj& x ) const {
+ RangeMap::const_iterator it = _rangesMap.upper_bound( x );
+ if ( it != _rangesMap.begin() )
+ it--;
+
+ bool good = contains( it->first , it->second , x );
+
+#if 0
+ if ( ! good ) {
+ log() << "bad: " << x << " " << it->first << " " << x.woCompare( it->first ) << " " << x.woCompare( it->second ) << endl;
+ for ( RangeMap::const_iterator i=_rangesMap.begin(); i!=_rangesMap.end(); ++i ) {
+ log() << "\t" << i->first << "\t" << i->second << "\t" << endl;
+ }
+ }
+#endif
+
+ return good;
+ }
+
+ bool ShardChunkManager::getNextChunk( const BSONObj& lookupKey, BSONObj* foundMin , BSONObj* foundMax ) const {
+ assert( foundMin );
+ assert( foundMax );
+ *foundMin = BSONObj();
+ *foundMax = BSONObj();
+
+ if ( _chunksMap.empty() ) {
+ return true;
+ }
+
+ RangeMap::const_iterator it;
+ if ( lookupKey.isEmpty() ) {
+ it = _chunksMap.begin();
+ *foundMin = it->first;
+ *foundMax = it->second;
+ return _chunksMap.size() == 1;
+ }
+
+ it = _chunksMap.upper_bound( lookupKey );
+ if ( it != _chunksMap.end() ) {
+ *foundMin = it->first;
+ *foundMax = it->second;
+ return false;
+ }
+
+ return true;
+ }
+
+ void ShardChunkManager::_assertChunkExists( const BSONObj& min , const BSONObj& max ) const {
+ RangeMap::const_iterator it = _chunksMap.find( min );
+ if ( it == _chunksMap.end() ) {
+ uasserted( 13586 , str::stream() << "couldn't find chunk " << min << "->" << max );
+ }
+
+ if ( it->second.woCompare( max ) != 0 ) {
+ ostringstream os;
+ os << "ranges differ, "
+ << "requested: " << min << " -> " << max << " "
+ << "existing: " << (it == _chunksMap.end()) ? "<empty>" : it->first.toString() + " -> " + it->second.toString();
+ uasserted( 13587 , os.str() );
+ }
+ }
+
+ ShardChunkManager* ShardChunkManager::cloneMinus( const BSONObj& min, const BSONObj& max, const ShardChunkVersion& version ) {
+
+ // check that we have the exact chunk that will be subtracted
+ _assertChunkExists( min , max );
+
+ auto_ptr<ShardChunkManager> p( new ShardChunkManager );
+ p->_key = this->_key;
+
+ if ( _chunksMap.size() == 1 ) {
+ // if left with no chunks, just reset version
+ uassert( 13590 , str::stream() << "setting version to " << version << " on removing last chunk", version == 0 );
+
+ p->_version = 0;
+
+ }
+ else {
+ // can't move version backwards when subtracting chunks
+ // this is what guarantees that no read or write would be taken once we subtract data from the current shard
+ if ( version <= _version ) {
+ uasserted( 13585 , str::stream() << "version " << version.toString() << " not greater than " << _version.toString() );
+ }
+
+ p->_chunksMap = this->_chunksMap;
+ p->_chunksMap.erase( min );
+ p->_version = version;
+ p->_fillRanges();
+ }
+
+ return p.release();
+ }
+
+ static bool overlap( const BSONObj& l1 , const BSONObj& h1 , const BSONObj& l2 , const BSONObj& h2 ) {
+ return ! ( ( h1.woCompare( l2 ) <= 0 ) || ( h2.woCompare( l1 ) <= 0 ) );
+ }
+
+ ShardChunkManager* ShardChunkManager::clonePlus( const BSONObj& min , const BSONObj& max , const ShardChunkVersion& version ) {
+
+ // it is acceptable to move version backwards (e.g., undoing a migration that went bad during commit)
+ // but only cloning away the last chunk may reset the version to 0
+ uassert( 13591 , "version can't be set to zero" , version > 0 );
+
+ if ( ! _chunksMap.empty() ) {
+
+ // check that there isn't any chunk on the interval to be added
+ RangeMap::const_iterator it = _chunksMap.lower_bound( max );
+ if ( it != _chunksMap.begin() ) {
+ --it;
+ }
+ if ( overlap( min , max , it->first , it->second ) ) {
+ ostringstream os;
+ os << "ranges overlap, "
+ << "requested: " << min << " -> " << max << " "
+ << "existing: " << it->first.toString() + " -> " + it->second.toString();
+ uasserted( 13588 , os.str() );
+ }
+ }
+
+ auto_ptr<ShardChunkManager> p( new ShardChunkManager );
+
+ p->_key = this->_key;
+ p->_chunksMap = this->_chunksMap;
+ p->_chunksMap.insert( make_pair( min.getOwned() , max.getOwned() ) );
+ p->_version = version;
+ p->_fillRanges();
+
+ return p.release();
+ }
+
+ ShardChunkManager* ShardChunkManager::cloneSplit( const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
+ const ShardChunkVersion& version ) {
+
+ // the version required in both resulting chunks could be simply an increment in the minor portion of the current version
+ // however, we are enforcing uniqueness over the attributes <ns, lastmod> of the configdb collection 'chunks'
+ // so in practice, a migrate somewhere may force this split to pick up a version that has the major portion higher
+ // than the one that this shard has been using
+ //
+ // TODO drop the uniqueness constraint and tigthen the check below so that only the minor portion of version changes
+ if ( version <= _version ) {
+ uasserted( 14039 , str::stream() << "version " << version.toString() << " not greater than " << _version.toString() );
+ }
+
+ // check that we have the exact chunk that will be split and that the split point is valid
+ _assertChunkExists( min , max );
+ for ( vector<BSONObj>::const_iterator it = splitKeys.begin() ; it != splitKeys.end() ; ++it ) {
+ if ( ! contains( min , max , *it ) ) {
+ uasserted( 14040 , str::stream() << "can split " << min << " -> " << max << " on " << *it );
+ }
+ }
+
+ auto_ptr<ShardChunkManager> p( new ShardChunkManager );
+
+ p->_key = this->_key;
+ p->_chunksMap = this->_chunksMap;
+ p->_version = version; // will increment second, third, ... chunks below
+
+ BSONObj startKey = min;
+ for ( vector<BSONObj>::const_iterator it = splitKeys.begin() ; it != splitKeys.end() ; ++it ) {
+ BSONObj split = *it;
+ p->_chunksMap[min] = split.getOwned();
+ p->_chunksMap.insert( make_pair( split.getOwned() , max.getOwned() ) );
+ p->_version.incMinor();
+ startKey = split;
+ }
+ p->_fillRanges();
+
+ return p.release();
+ }
+
+ string ShardChunkManager::toString() const {
+ StringBuilder ss;
+ ss << " ShardChunkManager version: " << _version << " key: " << _key;
+ bool first = true;
+ for ( RangeMap::const_iterator i=_rangesMap.begin(); i!=_rangesMap.end(); ++i ) {
+ if ( first ) first = false;
+ else ss << " , ";
+
+ ss << i->first << " -> " << i->second;
+ }
+ return ss.str();
+ }
+
+} // namespace mongo
diff --git a/src/mongo/s/d_chunk_manager.h b/src/mongo/s/d_chunk_manager.h
new file mode 100644
index 00000000000..fd5974e4953
--- /dev/null
+++ b/src/mongo/s/d_chunk_manager.h
@@ -0,0 +1,167 @@
+// @file d_chunk_manager.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+
+#include "../db/jsobj.h"
+#include "util.h"
+
+namespace mongo {
+
+ class ClientCursor;
+
+ /**
+ * Controls the boundaries of all the chunks for a given collection that live in this shard.
+ *
+ * ShardChunkManager instances never change after construction. There are methods provided that would generate a
+ * new manager if new chunks are added, subtracted, or split.
+ *
+ * TODO
+ * The responsibility of maintaining the version for a shard is still shared between this class and its caller. The
+ * manager does check corner cases (e.g. cloning out the last chunk generates a manager with version 0) but ultimately
+ * still cannot be responsible to set all versions. Currently, they are a function of the global state as opposed to
+ * the per-shard one.
+ */
+ class ShardChunkManager : public boost::noncopyable {
+ public:
+
+ /**
+ * Loads the ShardChunkManager with all boundaries for chunks of a given collection that live in an given
+ * shard.
+ *
+ * @param configServer name of the server where the configDB currently is. Can be empty to indicate
+ * that the configDB is running locally
+ * @param ns namespace for the collections whose chunks we're interested
+ * @param shardName name of the shard that this chunk matcher should track
+ *
+ * This constructor throws if collection is dropped/malformed and on connectivity errors
+ */
+ ShardChunkManager( const string& configServer , const string& ns , const string& shardName );
+
+ /**
+ * Same as the regular constructor but used in unittest (no access to configDB required).
+ *
+ * @param collectionDoc simulates config.collection's entry for one colleciton
+ * @param chunksDocs simulates config.chunks' entries for one collection's shard
+ */
+ ShardChunkManager( const BSONObj& collectionDoc , const BSONArray& chunksDoc );
+
+ ~ShardChunkManager() {}
+
+ /**
+ * Generates a new manager based on 'this's state minus a given chunk.
+ *
+ * @param min max chunk boundaries for the chunk to subtract
+ * @param version that the resulting manager should be at. The version has to be higher than the current one.
+ * When cloning away the last chunk, verstion must be 0.
+ * @return a new ShardChunkManager, to be owned by the caller
+ */
+ ShardChunkManager* cloneMinus( const BSONObj& min , const BSONObj& max , const ShardChunkVersion& version );
+
+ /**
+ * Generates a new manager based on 'this's state plus a given chunk.
+ *
+ * @param min max chunk boundaries for the chunk to add
+ * @param version that the resulting manager should be at. It can never be 0, though (see CloneMinus).
+ * @return a new ShardChunkManager, to be owned by the caller
+ */
+ ShardChunkManager* clonePlus( const BSONObj& min , const BSONObj& max , const ShardChunkVersion& version );
+
+ /**
+ * Generates a new manager by splitting an existing chunk at one or more points.
+ *
+ * @param min max boundaries of chunk to be split
+ * @param splitKeys points to split original chunk at
+ * @param version to be used in first chunk. The subsequent chunks would increment the minor version.
+ * @return a new ShardChunkManager with the chunk split, to be owned by the caller
+ */
+ ShardChunkManager* cloneSplit( const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
+ const ShardChunkVersion& version );
+
+ /**
+ * Checks whether a document belongs to this shard.
+ *
+ * @param obj document containing sharding keys (and, optionally, other attributes)
+ * @return true if shards hold the object
+ */
+ bool belongsToMe( const BSONObj& obj ) const;
+
+ /**
+ * Checks whether a document belongs to this shard.
+ *
+ * @param obj document containing sharding keys (and, optionally, other attributes)
+ * @return true if shards hold the object
+ */
+ bool belongsToMe( ClientCursor* cc ) const;
+
+ /**
+ * Given a chunk's min key (or empty doc), gets the boundary of the chunk following that one (the first).
+ *
+ * @param lookupKey is the min key for a previously obtained chunk or the empty document
+ * @param foundMin IN/OUT min for chunk following the one starting at lookupKey
+ * @param foundMax IN/OUT max for the above chunk
+ * @return true if the chunk returned is the last one
+ */
+ bool getNextChunk( const BSONObj& lookupKey, BSONObj* foundMin , BSONObj* foundMax ) const;
+
+ // accessors
+
+ ShardChunkVersion getVersion() const { return _version; }
+ BSONObj getKey() const { return _key.getOwned(); }
+ unsigned getNumChunks() const { return _chunksMap.size(); }
+
+ string toString() const;
+ private:
+
+ /**
+ * @same as belongsToMe to but key has to be the shard key
+ */
+ bool _belongsToMe( const BSONObj& key ) const;
+
+
+ // highest ShardChunkVersion for which this ShardChunkManager's information is accurate
+ ShardChunkVersion _version;
+
+ // key pattern for chunks under this range
+ BSONObj _key;
+
+ // a map from a min key into the chunk's (or range's) max boundary
+ typedef map< BSONObj, BSONObj , BSONObjCmp > RangeMap;
+ RangeMap _chunksMap;
+
+ // a map from a min key into a range or continguous chunks
+ // redundant but we expect high chunk continguity, expecially in small installations
+ RangeMap _rangesMap;
+
+ /** constructors helpers */
+ void _fillCollectionKey( const BSONObj& collectionDoc );
+ void _fillChunks( DBClientCursorInterface* cursor );
+ void _fillRanges();
+
+ /** throws if the exact chunk is not in the chunks' map */
+ void _assertChunkExists( const BSONObj& min , const BSONObj& max ) const;
+
+ /** can only be used in the cloning calls */
+ ShardChunkManager() {}
+ };
+
+ typedef shared_ptr<ShardChunkManager> ShardChunkManagerPtr;
+
+} // namespace mongo
diff --git a/src/mongo/s/d_logic.cpp b/src/mongo/s/d_logic.cpp
new file mode 100644
index 00000000000..7350856e91a
--- /dev/null
+++ b/src/mongo/s/d_logic.cpp
@@ -0,0 +1,121 @@
+// @file d_logic.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+
+/**
+ these are commands that live in mongod
+ mostly around shard management and checking
+ */
+
+#include "pch.h"
+#include <map>
+#include <string>
+
+#include "../db/commands.h"
+#include "../db/jsobj.h"
+#include "../db/dbmessage.h"
+#include "../db/ops/query.h"
+
+#include "../client/connpool.h"
+
+#include "../util/queue.h"
+
+#include "shard.h"
+#include "d_logic.h"
+#include "d_writeback.h"
+
+using namespace std;
+
+namespace mongo {
+
+ bool _handlePossibleShardedMessage( Message &m, DbResponse* dbresponse ) {
+ DEV assert( shardingState.enabled() );
+
+ int op = m.operation();
+ if ( op < 2000
+ || op >= 3000
+ || op == dbGetMore // cursors are weird
+ )
+ return false;
+
+ DbMessage d(m);
+ const char *ns = d.getns();
+ string errmsg;
+ if ( shardVersionOk( ns , errmsg ) ) {
+ return false;
+ }
+
+ LOG(1) << "connection meta data too old - will retry ns:(" << ns << ") op:(" << opToString(op) << ") " << errmsg << endl;
+
+ if ( doesOpGetAResponse( op ) ) {
+ assert( dbresponse );
+ BufBuilder b( 32768 );
+ b.skip( sizeof( QueryResult ) );
+ {
+ BSONObj obj = BSON( "$err" << errmsg << "ns" << ns );
+ b.appendBuf( obj.objdata() , obj.objsize() );
+ }
+
+ QueryResult *qr = (QueryResult*)b.buf();
+ qr->_resultFlags() = ResultFlag_ErrSet | ResultFlag_ShardConfigStale;
+ qr->len = b.len();
+ qr->setOperation( opReply );
+ qr->cursorId = 0;
+ qr->startingFrom = 0;
+ qr->nReturned = 1;
+ b.decouple();
+
+ Message * resp = new Message();
+ resp->setData( qr , true );
+
+ dbresponse->response = resp;
+ dbresponse->responseTo = m.header()->id;
+ return true;
+ }
+
+ uassert( 9517 , "writeback" , ( d.reservedField() & DbMessage::Reserved_FromWriteback ) == 0 );
+
+ OID writebackID;
+ writebackID.init();
+ lastError.getSafe()->writeback( writebackID );
+
+ const OID& clientID = ShardedConnectionInfo::get(false)->getID();
+ massert( 10422 , "write with bad shard config and no server id!" , clientID.isSet() );
+
+ LOG(1) << "got write with an old config - writing back ns: " << ns << endl;
+ LOG(1) << m.toString() << endl;
+
+ BSONObjBuilder b;
+ b.appendBool( "writeBack" , true );
+ b.append( "ns" , ns );
+ b.append( "id" , writebackID );
+ b.append( "connectionId" , cc().getConnectionId() );
+ b.append( "instanceIdent" , prettyHostName() );
+ b.appendTimestamp( "version" , shardingState.getVersion( ns ) );
+
+ ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
+ b.appendTimestamp( "yourVersion" , info ? info->getVersion(ns) : (ConfigVersion)0 );
+
+ b.appendBinData( "msg" , m.header()->len , bdtCustom , (char*)(m.singleData()) );
+ LOG(2) << "writing back msg with len: " << m.header()->len << " op: " << m.operation() << endl;
+ writeBackManager.queueWriteBack( clientID.str() , b.obj() );
+
+ return true;
+ }
+
+}
diff --git a/src/mongo/s/d_logic.h b/src/mongo/s/d_logic.h
new file mode 100644
index 00000000000..6cbdfadf6af
--- /dev/null
+++ b/src/mongo/s/d_logic.h
@@ -0,0 +1,246 @@
+// @file d_logic.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#pragma once
+
+#include "../pch.h"
+
+#include "../db/jsobj.h"
+
+#include "d_chunk_manager.h"
+#include "util.h"
+
+namespace mongo {
+
+ class Database;
+ class DiskLoc;
+
+ typedef ShardChunkVersion ConfigVersion;
+
+ // --------------
+ // --- global state ---
+ // --------------
+
+ class ShardingState {
+ public:
+ ShardingState();
+
+ bool enabled() const { return _enabled; }
+ const string& getConfigServer() const { return _configServer; }
+ void enable( const string& server );
+
+ void gotShardName( const string& name );
+ void gotShardHost( string host );
+
+ string getShardName() { return _shardName; }
+ string getShardHost() { return _shardHost; }
+
+ /** Reverts back to a state where this mongod is not sharded. */
+ void resetShardingState();
+
+ // versioning support
+
+ bool hasVersion( const string& ns );
+ bool hasVersion( const string& ns , ConfigVersion& version );
+ const ConfigVersion getVersion( const string& ns ) const;
+
+ /**
+ * Uninstalls the manager for a given collection. This should be used when the collection is dropped.
+ *
+ * NOTE:
+ * An existing collection with no chunks on this shard will have a manager on version 0, which is different than a
+ * a dropped collection, which will not have a manager.
+ *
+ * TODO
+ * When sharding state is enabled, absolutely all collections should have a manager. (The non-sharded ones are
+ * a be degenerate case of one-chunk collections).
+ * For now, a dropped collection and an non-sharded one are indistinguishable (SERVER-1849)
+ *
+ * @param ns the collection to be dropped
+ */
+ void resetVersion( const string& ns );
+
+ /**
+ * Requests to access a collection at a certain version. If the collection's manager is not at that version it
+ * will try to update itself to the newest version. The request is only granted if the version is the current or
+ * the newest one.
+ *
+ * @param ns collection to be accessed
+ * @param version (IN) the client belive this collection is on and (OUT) the version the manager is actually in
+ * @return true if the access can be allowed at the provided version
+ */
+ bool trySetVersion( const string& ns , ConfigVersion& version );
+
+ void appendInfo( BSONObjBuilder& b );
+
+ // querying support
+
+ bool needShardChunkManager( const string& ns ) const;
+ ShardChunkManagerPtr getShardChunkManager( const string& ns );
+
+ // chunk migrate and split support
+
+ /**
+ * Creates and installs a new chunk manager for a given collection by "forgetting" about one of its chunks.
+ * The new manager uses the provided version, which has to be higher than the current manager's.
+ * One exception: if the forgotten chunk is the last one in this shard for the collection, version has to be 0.
+ *
+ * If it runs successfully, clients need to grab the new version to access the collection.
+ *
+ * @param ns the collection
+ * @param min max the chunk to eliminate from the current manager
+ * @param version at which the new manager should be at
+ */
+ void donateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ShardChunkVersion version );
+
+ /**
+ * Creates and installs a new chunk manager for a given collection by reclaiming a previously donated chunk.
+ * The previous manager's version has to be provided.
+ *
+ * If it runs successfully, clients that became stale by the previous donateChunk will be able to access the
+ * collection again.
+ *
+ * @param ns the collection
+ * @param min max the chunk to reclaim and add to the current manager
+ * @param version at which the new manager should be at
+ */
+ void undoDonateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ShardChunkVersion version );
+
+ /**
+ * Creates and installs a new chunk manager for a given collection by splitting one of its chunks in two or more.
+ * The version for the first split chunk should be provided. The subsequent chunks' version would be the latter with the
+ * minor portion incremented.
+ *
+ * The effect on clients will depend on the version used. If the major portion is the same as the current shards,
+ * clients shouldn't perceive the split.
+ *
+ * @param ns the collection
+ * @param min max the chunk that should be split
+ * @param splitKeys point in which to split
+ * @param version at which the new manager should be at
+ */
+ void splitChunk( const string& ns , const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
+ ShardChunkVersion version );
+
+ bool inCriticalMigrateSection();
+
+ private:
+ bool _enabled;
+
+ string _configServer;
+
+ string _shardName;
+ string _shardHost;
+
+ // protects state below
+ mutable mongo::mutex _mutex;
+
+ // map from a namespace into the ensemble of chunk ranges that are stored in this mongod
+ // a ShardChunkManager carries all state we need for a collection at this shard, including its version information
+ typedef map<string,ShardChunkManagerPtr> ChunkManagersMap;
+ ChunkManagersMap _chunks;
+ };
+
+ extern ShardingState shardingState;
+
+ /**
+ * one per connection from mongos
+ * holds version state for each namesapce
+ */
+ class ShardedConnectionInfo {
+ public:
+ ShardedConnectionInfo();
+
+ const OID& getID() const { return _id; }
+ bool hasID() const { return _id.isSet(); }
+ void setID( const OID& id );
+
+ const ConfigVersion getVersion( const string& ns ) const;
+ void setVersion( const string& ns , const ConfigVersion& version );
+
+ static ShardedConnectionInfo* get( bool create );
+ static void reset();
+ static void addHook();
+
+ bool inForceVersionOkMode() const {
+ return _forceVersionOk;
+ }
+
+ void enterForceVersionOkMode() { _forceVersionOk = true; }
+ void leaveForceVersionOkMode() { _forceVersionOk = false; }
+
+ private:
+
+ OID _id;
+ bool _forceVersionOk; // if this is true, then chunk version #s aren't check, and all ops are allowed
+
+ typedef map<string,ConfigVersion> NSVersionMap;
+ NSVersionMap _versions;
+
+ static boost::thread_specific_ptr<ShardedConnectionInfo> _tl;
+ };
+
+ struct ShardForceVersionOkModeBlock {
+ ShardForceVersionOkModeBlock() {
+ info = ShardedConnectionInfo::get( false );
+ if ( info )
+ info->enterForceVersionOkMode();
+ }
+ ~ShardForceVersionOkModeBlock() {
+ if ( info )
+ info->leaveForceVersionOkMode();
+ }
+
+ ShardedConnectionInfo * info;
+ };
+
+ // -----------------
+ // --- core ---
+ // -----------------
+
+ unsigned long long extractVersion( BSONElement e , string& errmsg );
+
+
+ /**
+ * @return true if we have any shard info for the ns
+ */
+ bool haveLocalShardingInfo( const string& ns );
+
+ /**
+ * @return true if the current threads shard version is ok, or not in sharded version
+ */
+ bool shardVersionOk( const string& ns , string& errmsg );
+
+ /**
+ * @return true if we took care of the message and nothing else should be done
+ */
+ struct DbResponse;
+
+ bool _handlePossibleShardedMessage( Message &m, DbResponse * dbresponse );
+
+ /** What does this do? document please? */
+ inline bool handlePossibleShardedMessage( Message &m, DbResponse * dbresponse ) {
+ if( !shardingState.enabled() )
+ return false;
+ return _handlePossibleShardedMessage(m, dbresponse);
+ }
+
+ void logOpForSharding( const char * opstr , const char * ns , const BSONObj& obj , BSONObj * patt );
+ void aboutToDeleteForSharding( const Database* db , const DiskLoc& dl );
+
+}
diff --git a/src/mongo/s/d_migrate.cpp b/src/mongo/s/d_migrate.cpp
new file mode 100644
index 00000000000..5e62661ec7e
--- /dev/null
+++ b/src/mongo/s/d_migrate.cpp
@@ -0,0 +1,1728 @@
+// d_migrate.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+
+/**
+ these are commands that live in mongod
+ mostly around shard management and checking
+ */
+
+#include "pch.h"
+#include <map>
+#include <string>
+#include <algorithm>
+
+#include "../db/commands.h"
+#include "../db/jsobj.h"
+#include "../db/cmdline.h"
+#include "../db/queryoptimizer.h"
+#include "../db/btree.h"
+#include "../db/repl_block.h"
+#include "../db/dur.h"
+#include "../db/clientcursor.h"
+
+#include "../client/connpool.h"
+#include "../client/distlock.h"
+
+#include "../util/queue.h"
+#include "../util/unittest.h"
+#include "../util/processinfo.h"
+#include "../util/ramlog.h"
+
+#include "shard.h"
+#include "d_logic.h"
+#include "config.h"
+#include "chunk.h"
+
+using namespace std;
+
+namespace mongo {
+
+ Tee* migrateLog = new RamLog( "migrate" );
+
+ class MoveTimingHelper {
+ public:
+ MoveTimingHelper( const string& where , const string& ns , BSONObj min , BSONObj max , int total )
+ : _where( where ) , _ns( ns ) , _next( 0 ) , _total( total ) {
+ _nextNote = 0;
+ _b.append( "min" , min );
+ _b.append( "max" , max );
+ }
+
+ ~MoveTimingHelper() {
+ // even if logChange doesn't throw, bson does
+ // sigh
+ try {
+ if ( _next != _total ) {
+ note( "aborted" );
+ }
+ configServer.logChange( (string)"moveChunk." + _where , _ns, _b.obj() );
+ }
+ catch ( const std::exception& e ) {
+ warning() << "couldn't record timing for moveChunk '" << _where << "': " << e.what() << migrateLog;
+ }
+ }
+
+ void done( int step ) {
+ assert( step == ++_next );
+ assert( step <= _total );
+
+ stringstream ss;
+ ss << "step" << step;
+ string s = ss.str();
+
+ CurOp * op = cc().curop();
+ if ( op )
+ op->setMessage( s.c_str() );
+ else
+ warning() << "op is null in MoveTimingHelper::done" << migrateLog;
+
+ _b.appendNumber( s , _t.millis() );
+ _t.reset();
+
+#if 0
+ // debugging for memory leak?
+ ProcessInfo pi;
+ ss << " v:" << pi.getVirtualMemorySize()
+ << " r:" << pi.getResidentSize();
+ log() << ss.str() << migrateLog;
+#endif
+ }
+
+
+ void note( const string& s ) {
+ string field = "note";
+ if ( _nextNote > 0 ) {
+ StringBuilder buf;
+ buf << "note" << _nextNote;
+ field = buf.str();
+ }
+ _nextNote++;
+
+ _b.append( field , s );
+ }
+
+ private:
+ Timer _t;
+
+ string _where;
+ string _ns;
+
+ int _next;
+ int _total; // expected # of steps
+ int _nextNote;
+
+ BSONObjBuilder _b;
+
+ };
+
+ struct OldDataCleanup {
+ static AtomicUInt _numThreads; // how many threads are doing async cleanup
+
+ string ns;
+ BSONObj min;
+ BSONObj max;
+ set<CursorId> initial;
+
+ OldDataCleanup(){
+ _numThreads++;
+ }
+ OldDataCleanup( const OldDataCleanup& other ) {
+ ns = other.ns;
+ min = other.min.getOwned();
+ max = other.max.getOwned();
+ initial = other.initial;
+ _numThreads++;
+ }
+ ~OldDataCleanup(){
+ _numThreads--;
+ }
+
+ string toString() const {
+ return str::stream() << ns << " from " << min << " -> " << max;
+ }
+
+ void doRemove() {
+ ShardForceVersionOkModeBlock sf;
+ {
+ writelock lk(ns);
+ RemoveSaver rs("moveChunk",ns,"post-cleanup");
+ long long numDeleted = Helpers::removeRange( ns , min , max , true , false , cmdLine.moveParanoia ? &rs : 0 );
+ log() << "moveChunk deleted: " << numDeleted << migrateLog;
+ }
+
+
+ ReplTime lastOpApplied = cc().getLastOp().asDate();
+ Timer t;
+ for ( int i=0; i<3600; i++ ) {
+ if ( opReplicatedEnough( lastOpApplied , ( getSlaveCount() / 2 ) + 1 ) ) {
+ LOG(t.seconds() < 30 ? 1 : 0) << "moveChunk repl sync took " << t.seconds() << " seconds" << migrateLog;
+ return;
+ }
+ sleepsecs(1);
+ }
+
+ warning() << "moveChunk repl sync timed out after " << t.seconds() << " seconds" << migrateLog;
+ }
+
+ };
+
+ AtomicUInt OldDataCleanup::_numThreads = 0;
+
+ static const char * const cleanUpThreadName = "cleanupOldData";
+
+ class ChunkCommandHelper : public Command {
+ public:
+ ChunkCommandHelper( const char * name )
+ : Command( name ) {
+ }
+
+ virtual void help( stringstream& help ) const {
+ help << "internal - should not be called directly" << migrateLog;
+ }
+ virtual bool slaveOk() const { return false; }
+ virtual bool adminOnly() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+
+ };
+
+ bool isInRange( const BSONObj& obj , const BSONObj& min , const BSONObj& max ) {
+ BSONObj k = obj.extractFields( min, true );
+
+ return k.woCompare( min ) >= 0 && k.woCompare( max ) < 0;
+ }
+
+
+ class MigrateFromStatus {
+ public:
+
+ MigrateFromStatus() : _m("MigrateFromStatus") , _workLock("MigrateFromStatus::workLock") {
+ _active = false;
+ _inCriticalSection = false;
+ _memoryUsed = 0;
+ }
+
+ void start( string ns , const BSONObj& min , const BSONObj& max ) {
+ scoped_lock ll(_workLock);
+ scoped_lock l(_m); // reads and writes _active
+
+ assert( ! _active );
+
+ assert( ! min.isEmpty() );
+ assert( ! max.isEmpty() );
+ assert( ns.size() );
+
+ _ns = ns;
+ _min = min;
+ _max = max;
+
+ assert( _cloneLocs.size() == 0 );
+ assert( _deleted.size() == 0 );
+ assert( _reload.size() == 0 );
+ assert( _memoryUsed == 0 );
+
+ _active = true;
+ }
+
+ void done() {
+ readlock lk( _ns );
+
+ {
+ scoped_spinlock lk( _trackerLocks );
+ _deleted.clear();
+ _reload.clear();
+ _cloneLocs.clear();
+ }
+ _memoryUsed = 0;
+
+ scoped_lock l(_m);
+ _active = false;
+ _inCriticalSection = false;
+ }
+
+ void logOp( const char * opstr , const char * ns , const BSONObj& obj , BSONObj * patt ) {
+ if ( ! _getActive() )
+ return;
+
+ if ( _ns != ns )
+ return;
+
+ // no need to log if this is not an insertion, an update, or an actual deletion
+ // note: opstr 'db' isn't a deletion but a mention that a database exists (for replication
+ // machinery mostly)
+ char op = opstr[0];
+ if ( op == 'n' || op =='c' || ( op == 'd' && opstr[1] == 'b' ) )
+ return;
+
+ BSONElement ide;
+ if ( patt )
+ ide = patt->getField( "_id" );
+ else
+ ide = obj["_id"];
+
+ if ( ide.eoo() ) {
+ warning() << "logOpForSharding got mod with no _id, ignoring obj: " << obj << migrateLog;
+ return;
+ }
+
+ BSONObj it;
+
+ switch ( opstr[0] ) {
+
+ case 'd': {
+
+ if ( getThreadName() == cleanUpThreadName ) {
+ // we don't want to xfer things we're cleaning
+ // as then they'll be deleted on TO
+ // which is bad
+ return;
+ }
+
+ // can't filter deletes :(
+ _deleted.push_back( ide.wrap() );
+ _memoryUsed += ide.size() + 5;
+ return;
+ }
+
+ case 'i':
+ it = obj;
+ break;
+
+ case 'u':
+ if ( ! Helpers::findById( cc() , _ns.c_str() , ide.wrap() , it ) ) {
+ warning() << "logOpForSharding couldn't find: " << ide << " even though should have" << migrateLog;
+ return;
+ }
+ break;
+
+ }
+
+ if ( ! isInRange( it , _min , _max ) )
+ return;
+
+ _reload.push_back( ide.wrap() );
+ _memoryUsed += ide.size() + 5;
+ }
+
+ void xfer( list<BSONObj> * l , BSONObjBuilder& b , const char * name , long long& size , bool explode ) {
+ const long long maxSize = 1024 * 1024;
+
+ if ( l->size() == 0 || size > maxSize )
+ return;
+
+ BSONArrayBuilder arr(b.subarrayStart(name));
+
+ list<BSONObj>::iterator i = l->begin();
+
+ while ( i != l->end() && size < maxSize ) {
+ BSONObj t = *i;
+ if ( explode ) {
+ BSONObj it;
+ if ( Helpers::findById( cc() , _ns.c_str() , t, it ) ) {
+ arr.append( it );
+ size += it.objsize();
+ }
+ }
+ else {
+ arr.append( t );
+ }
+ i = l->erase( i );
+ size += t.objsize();
+ }
+
+ arr.done();
+ }
+
+ /**
+ * called from the dest of a migrate
+ * transfers mods from src to dest
+ */
+ bool transferMods( string& errmsg , BSONObjBuilder& b ) {
+ if ( ! _getActive() ) {
+ errmsg = "no active migration!";
+ return false;
+ }
+
+ long long size = 0;
+
+ {
+ readlock rl( _ns );
+ Client::Context cx( _ns );
+
+ xfer( &_deleted , b , "deleted" , size , false );
+ xfer( &_reload , b , "reload" , size , true );
+ }
+
+ b.append( "size" , size );
+
+ return true;
+ }
+
+ /**
+ * Get the disklocs that belong to the chunk migrated and sort them in _cloneLocs (to avoid seeking disk later)
+ *
+ * @param maxChunkSize number of bytes beyond which a chunk's base data (no indices) is considered too large to move
+ * @param errmsg filled with textual description of error if this call return false
+ * @return false if approximate chunk size is too big to move or true otherwise
+ */
+ bool storeCurrentLocs( long long maxChunkSize , string& errmsg , BSONObjBuilder& result ) {
+ readlock l( _ns );
+ Client::Context ctx( _ns );
+ NamespaceDetails *d = nsdetails( _ns.c_str() );
+ if ( ! d ) {
+ errmsg = "ns not found, should be impossible";
+ return false;
+ }
+
+ BSONObj keyPattern;
+ // the copies are needed because the indexDetailsForRange destroys the input
+ BSONObj min = _min.copy();
+ BSONObj max = _max.copy();
+ IndexDetails *idx = indexDetailsForRange( _ns.c_str() , errmsg , min , max , keyPattern );
+ if ( idx == NULL ) {
+ errmsg = (string)"can't find index in storeCurrentLocs" + causedBy( errmsg );
+ return false;
+ }
+
+ auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout ,
+ shared_ptr<Cursor>( BtreeCursor::make( d , d->idxNo(*idx) , *idx , min , max , false , 1 ) ) ,
+ _ns ) );
+
+ // use the average object size to estimate how many objects a full chunk would carry
+ // do that while traversing the chunk's range using the sharding index, below
+ // there's a fair amount of slack before we determine a chunk is too large because object sizes will vary
+ unsigned long long maxRecsWhenFull;
+ long long avgRecSize;
+ const long long totalRecs = d->stats.nrecords;
+ if ( totalRecs > 0 ) {
+ avgRecSize = d->stats.datasize / totalRecs;
+ maxRecsWhenFull = maxChunkSize / avgRecSize;
+ maxRecsWhenFull = 130 * maxRecsWhenFull / 100; // slack
+ }
+ else {
+ avgRecSize = 0;
+ maxRecsWhenFull = numeric_limits<long long>::max();
+ }
+
+ // do a full traversal of the chunk and don't stop even if we think it is a large chunk
+ // we want the number of records to better report, in that case
+ bool isLargeChunk = false;
+ unsigned long long recCount = 0;;
+ while ( cc->ok() ) {
+ DiskLoc dl = cc->currLoc();
+ if ( ! isLargeChunk ) {
+ scoped_spinlock lk( _trackerLocks );
+ _cloneLocs.insert( dl );
+ }
+ cc->advance();
+
+ // we can afford to yield here because any change to the base data that we might miss is already being
+ // queued and will be migrated in the 'transferMods' stage
+ if ( ! cc->yieldSometimes( ClientCursor::DontNeed ) ) {
+ cc.release();
+ break;
+ }
+
+ if ( ++recCount > maxRecsWhenFull ) {
+ isLargeChunk = true;
+ }
+ }
+
+ if ( isLargeChunk ) {
+ warning() << "can't move chunk of size (approximately) " << recCount * avgRecSize
+ << " because maximum size allowed to move is " << maxChunkSize
+ << " ns: " << _ns << " " << _min << " -> " << _max
+ << migrateLog;
+ result.appendBool( "chunkTooBig" , true );
+ result.appendNumber( "estimatedChunkSize" , (long long)(recCount * avgRecSize) );
+ errmsg = "chunk too big to move";
+ return false;
+ }
+
+ {
+ scoped_spinlock lk( _trackerLocks );
+ log() << "moveChunk number of documents: " << _cloneLocs.size() << migrateLog;
+ }
+ return true;
+ }
+
+ bool clone( string& errmsg , BSONObjBuilder& result ) {
+ if ( ! _getActive() ) {
+ errmsg = "not active";
+ return false;
+ }
+
+ ElapsedTracker tracker (128, 10); // same as ClientCursor::_yieldSometimesTracker
+
+ int allocSize;
+ {
+ readlock l(_ns);
+ Client::Context ctx( _ns );
+ NamespaceDetails *d = nsdetails( _ns.c_str() );
+ assert( d );
+ scoped_spinlock lk( _trackerLocks );
+ allocSize = std::min(BSONObjMaxUserSize, (int)((12 + d->averageObjectSize()) * _cloneLocs.size()));
+ }
+ BSONArrayBuilder a (allocSize);
+
+ while ( 1 ) {
+ bool filledBuffer = false;
+
+ readlock l( _ns );
+ Client::Context ctx( _ns );
+ scoped_spinlock lk( _trackerLocks );
+ set<DiskLoc>::iterator i = _cloneLocs.begin();
+ for ( ; i!=_cloneLocs.end(); ++i ) {
+ if (tracker.intervalHasElapsed()) // should I yield?
+ break;
+
+ DiskLoc dl = *i;
+ BSONObj o = dl.obj();
+
+ // use the builder size instead of accumulating 'o's size so that we take into consideration
+ // the overhead of BSONArray indices
+ if ( a.len() + o.objsize() + 1024 > BSONObjMaxUserSize ) {
+ filledBuffer = true; // break out of outer while loop
+ break;
+ }
+
+ a.append( o );
+ }
+
+ _cloneLocs.erase( _cloneLocs.begin() , i );
+
+ if ( _cloneLocs.empty() || filledBuffer )
+ break;
+ }
+
+ result.appendArray( "objects" , a.arr() );
+ return true;
+ }
+
+ void aboutToDelete( const Database* db , const DiskLoc& dl ) {
+ d.dbMutex.assertWriteLocked();
+
+ if ( ! _getActive() )
+ return;
+
+ if ( ! db->ownsNS( _ns ) )
+ return;
+
+
+ // not needed right now
+ // but trying to prevent a future bug
+ scoped_spinlock lk( _trackerLocks );
+
+ _cloneLocs.erase( dl );
+ }
+
+ long long mbUsed() const { return _memoryUsed / ( 1024 * 1024 ); }
+
+ bool getInCriticalSection() const { scoped_lock l(_m); return _inCriticalSection; }
+ void setInCriticalSection( bool b ) { scoped_lock l(_m); _inCriticalSection = b; }
+
+ bool isActive() const { return _getActive(); }
+
+ void doRemove( OldDataCleanup& cleanup ) {
+ int it = 0;
+ while ( true ) {
+ if ( it > 20 && it % 10 == 0 ) log() << "doRemote iteration " << it << " for: " << cleanup << endl;
+ {
+ scoped_lock ll(_workLock);
+ if ( ! _active ) {
+ cleanup.doRemove();
+ return;
+ }
+ }
+ sleepmillis( 1000 );
+ }
+ }
+
+ private:
+ mutable mongo::mutex _m; // protect _inCriticalSection and _active
+ bool _inCriticalSection;
+ bool _active;
+
+ string _ns;
+ BSONObj _min;
+ BSONObj _max;
+
+ // we need the lock in case there is a malicious _migrateClone for example
+ // even though it shouldn't be needed under normal operation
+ SpinLock _trackerLocks;
+
+ // disk locs yet to be transferred from here to the other side
+ // no locking needed because built initially by 1 thread in a read lock
+ // emptied by 1 thread in a read lock
+ // updates applied by 1 thread in a write lock
+ set<DiskLoc> _cloneLocs;
+
+ list<BSONObj> _reload; // objects that were modified that must be recloned
+ list<BSONObj> _deleted; // objects deleted during clone that should be deleted later
+ long long _memoryUsed; // bytes in _reload + _deleted
+
+ mutable mongo::mutex _workLock; // this is used to make sure only 1 thread is doing serious work
+ // for now, this means migrate or removing old chunk data
+
+ bool _getActive() const { scoped_lock l(_m); return _active; }
+ void _setActive( bool b ) { scoped_lock l(_m); _active = b; }
+
+ } migrateFromStatus;
+
+ struct MigrateStatusHolder {
+ MigrateStatusHolder( string ns , const BSONObj& min , const BSONObj& max ) {
+ migrateFromStatus.start( ns , min , max );
+ }
+ ~MigrateStatusHolder() {
+ migrateFromStatus.done();
+ }
+ };
+
+ void _cleanupOldData( OldDataCleanup cleanup ) {
+ Client::initThread( cleanUpThreadName );
+ if (!noauth) {
+ cc().getAuthenticationInfo()->authorize("local", internalSecurity.user);
+ }
+ log() << " (start) waiting to cleanup " << cleanup << " # cursors:" << cleanup.initial.size() << migrateLog;
+
+ int loops = 0;
+ Timer t;
+ while ( t.seconds() < 900 ) { // 15 minutes
+ assert( d.dbMutex.getState() == 0 );
+ sleepmillis( 20 );
+
+ set<CursorId> now;
+ ClientCursor::find( cleanup.ns , now );
+
+ set<CursorId> left;
+ for ( set<CursorId>::iterator i=cleanup.initial.begin(); i!=cleanup.initial.end(); ++i ) {
+ CursorId id = *i;
+ if ( now.count(id) )
+ left.insert( id );
+ }
+
+ if ( left.size() == 0 )
+ break;
+ cleanup.initial = left;
+
+ if ( ( loops++ % 200 ) == 0 ) {
+ log() << " (looping " << loops << ") waiting to cleanup " << cleanup.ns << " from " << cleanup.min << " -> " << cleanup.max << " # cursors:" << cleanup.initial.size() << migrateLog;
+
+ stringstream ss;
+ for ( set<CursorId>::iterator i=cleanup.initial.begin(); i!=cleanup.initial.end(); ++i ) {
+ CursorId id = *i;
+ ss << id << " ";
+ }
+ log() << " cursors: " << ss.str() << migrateLog;
+ }
+ }
+
+ migrateFromStatus.doRemove( cleanup );
+
+ cc().shutdown();
+ }
+
+ void cleanupOldData( OldDataCleanup cleanup ) {
+ try {
+ _cleanupOldData( cleanup );
+ }
+ catch ( std::exception& e ) {
+ log() << " error cleaning old data:" << e.what() << migrateLog;
+ }
+ catch ( ... ) {
+ log() << " unknown error cleaning old data" << migrateLog;
+ }
+ }
+
+ void logOpForSharding( const char * opstr , const char * ns , const BSONObj& obj , BSONObj * patt ) {
+ migrateFromStatus.logOp( opstr , ns , obj , patt );
+ }
+
+ void aboutToDeleteForSharding( const Database* db , const DiskLoc& dl ) {
+ migrateFromStatus.aboutToDelete( db , dl );
+ }
+
+ class TransferModsCommand : public ChunkCommandHelper {
+ public:
+ TransferModsCommand() : ChunkCommandHelper( "_transferMods" ) {}
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ return migrateFromStatus.transferMods( errmsg, result );
+ }
+ } transferModsCommand;
+
+
+ class InitialCloneCommand : public ChunkCommandHelper {
+ public:
+ InitialCloneCommand() : ChunkCommandHelper( "_migrateClone" ) {}
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ return migrateFromStatus.clone( errmsg, result );
+ }
+ } initialCloneCommand;
+
+
+ /**
+ * this is the main entry for moveChunk
+ * called to initial a move
+ * usually by a mongos
+ * this is called on the "from" side
+ */
+ class MoveChunkCommand : public Command {
+ public:
+ MoveChunkCommand() : Command( "moveChunk" ) {}
+ virtual void help( stringstream& help ) const {
+ help << "should not be calling this directly" << migrateLog;
+ }
+
+ virtual bool slaveOk() const { return false; }
+ virtual bool adminOnly() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ // 1. parse options
+ // 2. make sure my view is complete and lock
+ // 3. start migrate
+ // in a read lock, get all DiskLoc and sort so we can do as little seeking as possible
+ // tell to start transferring
+ // 4. pause till migrate caught up
+ // 5. LOCK
+ // a) update my config, essentially locking
+ // b) finish migrate
+ // c) update config server
+ // d) logChange to config server
+ // 6. wait for all current cursors to expire
+ // 7. remove data locally
+
+ // -------------------------------
+
+ // 1.
+ string ns = cmdObj.firstElement().str();
+ string to = cmdObj["to"].str();
+ string from = cmdObj["from"].str(); // my public address, a tad redundant, but safe
+ BSONObj min = cmdObj["min"].Obj();
+ BSONObj max = cmdObj["max"].Obj();
+ BSONElement shardId = cmdObj["shardId"];
+ BSONElement maxSizeElem = cmdObj["maxChunkSizeBytes"];
+
+ if ( ns.empty() ) {
+ errmsg = "need to specify namespace in command";
+ return false;
+ }
+
+ if ( to.empty() ) {
+ errmsg = "need to specify server to move chunk to";
+ return false;
+ }
+ if ( from.empty() ) {
+ errmsg = "need to specify server to move chunk from";
+ return false;
+ }
+
+ if ( min.isEmpty() ) {
+ errmsg = "need to specify a min";
+ return false;
+ }
+
+ if ( max.isEmpty() ) {
+ errmsg = "need to specify a max";
+ return false;
+ }
+
+ if ( shardId.eoo() ) {
+ errmsg = "need shardId";
+ return false;
+ }
+
+ if ( maxSizeElem.eoo() || ! maxSizeElem.isNumber() ) {
+ errmsg = "need to specify maxChunkSizeBytes";
+ return false;
+ }
+ const long long maxChunkSize = maxSizeElem.numberLong(); // in bytes
+
+ if ( ! shardingState.enabled() ) {
+ if ( cmdObj["configdb"].type() != String ) {
+ errmsg = "sharding not enabled";
+ return false;
+ }
+ string configdb = cmdObj["configdb"].String();
+ shardingState.enable( configdb );
+ configServer.init( configdb );
+ }
+
+ MoveTimingHelper timing( "from" , ns , min , max , 6 /* steps */);
+
+ Shard fromShard( from );
+ Shard toShard( to );
+
+ log() << "received moveChunk request: " << cmdObj << migrateLog;
+
+ timing.done(1);
+
+ // 2.
+
+ if ( migrateFromStatus.isActive() ) {
+ errmsg = "migration already in progress";
+ return false;
+ }
+
+ DistributedLock lockSetup( ConnectionString( shardingState.getConfigServer() , ConnectionString::SYNC ) , ns );
+ dist_lock_try dlk;
+
+ try{
+ dlk = dist_lock_try( &lockSetup , (string)"migrate-" + min.toString() );
+ }
+ catch( LockException& e ){
+ errmsg = str::stream() << "error locking distributed lock for migration " << "migrate-" << min.toString() << causedBy( e );
+ return false;
+ }
+
+ if ( ! dlk.got() ) {
+ errmsg = str::stream() << "the collection metadata could not be locked with lock " << "migrate-" << min.toString();
+ result.append( "who" , dlk.other() );
+ return false;
+ }
+
+ BSONObj chunkInfo = BSON("min" << min << "max" << max << "from" << fromShard.getName() << "to" << toShard.getName());
+ configServer.logChange( "moveChunk.start" , ns , chunkInfo );
+
+ ShardChunkVersion maxVersion;
+ string myOldShard;
+ {
+ ScopedDbConnection conn( shardingState.getConfigServer() );
+
+ BSONObj x;
+ BSONObj currChunk;
+ try{
+ x = conn->findOne( ShardNS::chunk , Query( BSON( "ns" << ns ) ).sort( BSON( "lastmod" << -1 ) ) );
+ currChunk = conn->findOne( ShardNS::chunk , shardId.wrap( "_id" ) );
+ }
+ catch( DBException& e ){
+ errmsg = str::stream() << "aborted moveChunk because could not get chunk data from config server " << shardingState.getConfigServer() << causedBy( e );
+ warning() << errmsg << endl;
+ return false;
+ }
+
+ maxVersion = x["lastmod"];
+ assert( currChunk["shard"].type() );
+ assert( currChunk["min"].type() );
+ assert( currChunk["max"].type() );
+ myOldShard = currChunk["shard"].String();
+ conn.done();
+
+ BSONObj currMin = currChunk["min"].Obj();
+ BSONObj currMax = currChunk["max"].Obj();
+ if ( currMin.woCompare( min ) || currMax.woCompare( max ) ) {
+ errmsg = "boundaries are outdated (likely a split occurred)";
+ result.append( "currMin" , currMin );
+ result.append( "currMax" , currMax );
+ result.append( "requestedMin" , min );
+ result.append( "requestedMax" , max );
+
+ warning() << "aborted moveChunk because" << errmsg << ": " << min << "->" << max
+ << " is now " << currMin << "->" << currMax << migrateLog;
+ return false;
+ }
+
+ if ( myOldShard != fromShard.getName() ) {
+ errmsg = "location is outdated (likely balance or migrate occurred)";
+ result.append( "from" , fromShard.getName() );
+ result.append( "official" , myOldShard );
+
+ warning() << "aborted moveChunk because " << errmsg << ": chunk is at " << myOldShard
+ << " and not at " << fromShard.getName() << migrateLog;
+ return false;
+ }
+
+ if ( maxVersion < shardingState.getVersion( ns ) ) {
+ errmsg = "official version less than mine?";
+ result.appendTimestamp( "officialVersion" , maxVersion );
+ result.appendTimestamp( "myVersion" , shardingState.getVersion( ns ) );
+
+ warning() << "aborted moveChunk because " << errmsg << ": official " << maxVersion
+ << " mine: " << shardingState.getVersion(ns) << migrateLog;
+ return false;
+ }
+
+ // since this could be the first call that enable sharding we also make sure to have the chunk manager up to date
+ shardingState.gotShardName( myOldShard );
+ ShardChunkVersion shardVersion;
+ shardingState.trySetVersion( ns , shardVersion /* will return updated */ );
+
+ log() << "moveChunk request accepted at version " << shardVersion << migrateLog;
+ }
+
+ timing.done(2);
+
+ // 3.
+ MigrateStatusHolder statusHolder( ns , min , max );
+ {
+ // this gets a read lock, so we know we have a checkpoint for mods
+ if ( ! migrateFromStatus.storeCurrentLocs( maxChunkSize , errmsg , result ) )
+ return false;
+
+ ScopedDbConnection connTo( to );
+ BSONObj res;
+ bool ok;
+ try{
+ ok = connTo->runCommand( "admin" ,
+ BSON( "_recvChunkStart" << ns <<
+ "from" << from <<
+ "min" << min <<
+ "max" << max <<
+ "configServer" << configServer.modelServer()
+ ) ,
+ res );
+ }
+ catch( DBException& e ){
+ errmsg = str::stream() << "moveChunk could not contact to: shard " << to << " to start transfer" << causedBy( e );
+ warning() << errmsg << endl;
+ return false;
+ }
+
+ connTo.done();
+
+ if ( ! ok ) {
+ errmsg = "moveChunk failed to engage TO-shard in the data transfer: ";
+ assert( res["errmsg"].type() );
+ errmsg += res["errmsg"].String();
+ result.append( "cause" , res );
+ return false;
+ }
+
+ }
+ timing.done( 3 );
+
+ // 4.
+ for ( int i=0; i<86400; i++ ) { // don't want a single chunk move to take more than a day
+ assert( d.dbMutex.getState() == 0 );
+ sleepsecs( 1 );
+ ScopedDbConnection conn( to );
+ BSONObj res;
+ bool ok;
+ try {
+ ok = conn->runCommand( "admin" , BSON( "_recvChunkStatus" << 1 ) , res );
+ res = res.getOwned();
+ }
+ catch( DBException& e ){
+ errmsg = str::stream() << "moveChunk could not contact to: shard " << to << " to monitor transfer" << causedBy( e );
+ warning() << errmsg << endl;
+ return false;
+ }
+
+ conn.done();
+
+ log(0) << "moveChunk data transfer progress: " << res << " my mem used: " << migrateFromStatus.mbUsed() << migrateLog;
+
+ if ( ! ok || res["state"].String() == "fail" ) {
+ warning() << "moveChunk error transferring data caused migration abort: " << res << migrateLog;
+ errmsg = "data transfer error";
+ result.append( "cause" , res );
+ return false;
+ }
+
+ if ( res["state"].String() == "steady" )
+ break;
+
+ if ( migrateFromStatus.mbUsed() > (500 * 1024 * 1024) ) {
+ // this is too much memory for us to use for this
+ // so we're going to abort the migrate
+ ScopedDbConnection conn( to );
+ BSONObj res;
+ conn->runCommand( "admin" , BSON( "_recvChunkAbort" << 1 ) , res );
+ res = res.getOwned();
+ conn.done();
+ error() << "aborting migrate because too much memory used res: " << res << migrateLog;
+ errmsg = "aborting migrate because too much memory used";
+ result.appendBool( "split" , true );
+ return false;
+ }
+
+ killCurrentOp.checkForInterrupt();
+ }
+ timing.done(4);
+
+ // 5.
+ {
+ // 5.a
+ // we're under the collection lock here, so no other migrate can change maxVersion or ShardChunkManager state
+ migrateFromStatus.setInCriticalSection( true );
+ ShardChunkVersion currVersion = maxVersion;
+ ShardChunkVersion myVersion = currVersion;
+ myVersion.incMajor();
+
+ {
+ writelock lk( ns );
+ assert( myVersion > shardingState.getVersion( ns ) );
+
+ // bump the chunks manager's version up and "forget" about the chunk being moved
+ // this is not the commit point but in practice the state in this shard won't until the commit it done
+ shardingState.donateChunk( ns , min , max , myVersion );
+ }
+
+ log() << "moveChunk setting version to: " << myVersion << migrateLog;
+
+ // 5.b
+ // we're under the collection lock here, too, so we can undo the chunk donation because no other state change
+ // could be ongoing
+ {
+ BSONObj res;
+ ScopedDbConnection connTo( to );
+ bool ok;
+
+ try{
+ ok = connTo->runCommand( "admin" ,
+ BSON( "_recvChunkCommit" << 1 ) ,
+ res );
+ }
+ catch( DBException& e ){
+ errmsg = str::stream() << "moveChunk could not contact to: shard " << to << " to commit transfer" << causedBy( e );
+ warning() << errmsg << endl;
+ return false;
+ }
+
+ connTo.done();
+
+ if ( ! ok ) {
+ {
+ writelock lk( ns );
+
+ // revert the chunk manager back to the state before "forgetting" about the chunk
+ shardingState.undoDonateChunk( ns , min , max , currVersion );
+ }
+
+ log() << "moveChunk migrate commit not accepted by TO-shard: " << res
+ << " resetting shard version to: " << currVersion << migrateLog;
+
+ errmsg = "_recvChunkCommit failed!";
+ result.append( "cause" , res );
+ return false;
+ }
+
+ log() << "moveChunk migrate commit accepted by TO-shard: " << res << migrateLog;
+ }
+
+ // 5.c
+
+ // version at which the next highest lastmod will be set
+ // if the chunk being moved is the last in the shard, nextVersion is that chunk's lastmod
+ // otherwise the highest version is from the chunk being bumped on the FROM-shard
+ ShardChunkVersion nextVersion;
+
+ // we want to go only once to the configDB but perhaps change two chunks, the one being migrated and another
+ // local one (so to bump version for the entire shard)
+ // we use the 'applyOps' mechanism to group the two updates and make them safer
+ // TODO pull config update code to a module
+
+ BSONObjBuilder cmdBuilder;
+
+ BSONArrayBuilder updates( cmdBuilder.subarrayStart( "applyOps" ) );
+ {
+ // update for the chunk being moved
+ BSONObjBuilder op;
+ op.append( "op" , "u" );
+ op.appendBool( "b" , false /* no upserting */ );
+ op.append( "ns" , ShardNS::chunk );
+
+ BSONObjBuilder n( op.subobjStart( "o" ) );
+ n.append( "_id" , Chunk::genID( ns , min ) );
+ n.appendTimestamp( "lastmod" , myVersion /* same as used on donateChunk */ );
+ n.append( "ns" , ns );
+ n.append( "min" , min );
+ n.append( "max" , max );
+ n.append( "shard" , toShard.getName() );
+ n.done();
+
+ BSONObjBuilder q( op.subobjStart( "o2" ) );
+ q.append( "_id" , Chunk::genID( ns , min ) );
+ q.done();
+
+ updates.append( op.obj() );
+ }
+
+ nextVersion = myVersion;
+
+ // if we have chunks left on the FROM shard, update the version of one of them as well
+ // we can figure that out by grabbing the chunkManager installed on 5.a
+ // TODO expose that manager when installing it
+
+ ShardChunkManagerPtr chunkManager = shardingState.getShardChunkManager( ns );
+ if( chunkManager->getNumChunks() > 0 ) {
+
+ // get another chunk on that shard
+ BSONObj lookupKey;
+ BSONObj bumpMin, bumpMax;
+ do {
+ chunkManager->getNextChunk( lookupKey , &bumpMin , &bumpMax );
+ lookupKey = bumpMin;
+ }
+ while( bumpMin == min );
+
+ BSONObjBuilder op;
+ op.append( "op" , "u" );
+ op.appendBool( "b" , false );
+ op.append( "ns" , ShardNS::chunk );
+
+ nextVersion.incMinor(); // same as used on donateChunk
+ BSONObjBuilder n( op.subobjStart( "o" ) );
+ n.append( "_id" , Chunk::genID( ns , bumpMin ) );
+ n.appendTimestamp( "lastmod" , nextVersion );
+ n.append( "ns" , ns );
+ n.append( "min" , bumpMin );
+ n.append( "max" , bumpMax );
+ n.append( "shard" , fromShard.getName() );
+ n.done();
+
+ BSONObjBuilder q( op.subobjStart( "o2" ) );
+ q.append( "_id" , Chunk::genID( ns , bumpMin ) );
+ q.done();
+
+ updates.append( op.obj() );
+
+ log() << "moveChunk updating self version to: " << nextVersion << " through "
+ << bumpMin << " -> " << bumpMax << " for collection '" << ns << "'" << migrateLog;
+
+ }
+ else {
+
+ log() << "moveChunk moved last chunk out for collection '" << ns << "'" << migrateLog;
+ }
+
+ updates.done();
+
+ BSONArrayBuilder preCond( cmdBuilder.subarrayStart( "preCondition" ) );
+ {
+ BSONObjBuilder b;
+ b.append( "ns" , ShardNS::chunk );
+ b.append( "q" , BSON( "query" << BSON( "ns" << ns ) << "orderby" << BSON( "lastmod" << -1 ) ) );
+ {
+ BSONObjBuilder bb( b.subobjStart( "res" ) );
+ bb.appendTimestamp( "lastmod" , maxVersion );
+ bb.done();
+ }
+ preCond.append( b.obj() );
+ }
+
+ preCond.done();
+
+ BSONObj cmd = cmdBuilder.obj();
+ LOG(7) << "moveChunk update: " << cmd << migrateLog;
+
+ bool ok = false;
+ BSONObj cmdResult;
+ try {
+ ScopedDbConnection conn( shardingState.getConfigServer() );
+ ok = conn->runCommand( "config" , cmd , cmdResult );
+ conn.done();
+ }
+ catch ( DBException& e ) {
+ warning() << e << migrateLog;
+ ok = false;
+ BSONObjBuilder b;
+ e.getInfo().append( b );
+ cmdResult = b.obj();
+ }
+
+ if ( ! ok ) {
+
+ // this could be a blip in the connectivity
+ // wait out a few seconds and check if the commit request made it
+ //
+ // if the commit made it to the config, we'll see the chunk in the new shard and there's no action
+ // if the commit did not make it, currently the only way to fix this state is to bounce the mongod so
+ // that the old state (before migrating) be brought in
+
+ warning() << "moveChunk commit outcome ongoing: " << cmd << " for command :" << cmdResult << migrateLog;
+ sleepsecs( 10 );
+
+ try {
+ ScopedDbConnection conn( shardingState.getConfigServer() );
+
+ // look for the chunk in this shard whose version got bumped
+ // we assume that if that mod made it to the config, the applyOps was successful
+ BSONObj doc = conn->findOne( ShardNS::chunk , Query(BSON( "ns" << ns )).sort( BSON("lastmod" << -1)));
+ ShardChunkVersion checkVersion = doc["lastmod"];
+
+ if ( checkVersion == nextVersion ) {
+ log() << "moveChunk commit confirmed" << migrateLog;
+
+ }
+ else {
+ error() << "moveChunk commit failed: version is at"
+ << checkVersion << " instead of " << nextVersion << migrateLog;
+ error() << "TERMINATING" << migrateLog;
+ dbexit( EXIT_SHARDING_ERROR );
+ }
+
+ conn.done();
+
+ }
+ catch ( ... ) {
+ error() << "moveChunk failed to get confirmation of commit" << migrateLog;
+ error() << "TERMINATING" << migrateLog;
+ dbexit( EXIT_SHARDING_ERROR );
+ }
+ }
+
+ migrateFromStatus.setInCriticalSection( false );
+
+ // 5.d
+ configServer.logChange( "moveChunk.commit" , ns , chunkInfo );
+ }
+
+ migrateFromStatus.done();
+ timing.done(5);
+
+ {
+ // 6.
+ OldDataCleanup c;
+ c.ns = ns;
+ c.min = min.getOwned();
+ c.max = max.getOwned();
+ ClientCursor::find( ns , c.initial );
+ if ( c.initial.size() ) {
+ log() << "forking for cleaning up chunk data" << migrateLog;
+ boost::thread t( boost::bind( &cleanupOldData , c ) );
+ }
+ else {
+ log() << "doing delete inline" << migrateLog;
+ // 7.
+ c.doRemove();
+ }
+
+
+ }
+ timing.done(6);
+
+ return true;
+
+ }
+
+ } moveChunkCmd;
+
+ bool ShardingState::inCriticalMigrateSection() {
+ return migrateFromStatus.getInCriticalSection();
+ }
+
+ /* -----
+ below this are the "to" side commands
+
+ command to initiate
+ worker thread
+ does initial clone
+ pulls initial change set
+ keeps pulling
+ keeps state
+ command to get state
+ commend to "commit"
+ */
+
+ class MigrateStatus {
+ public:
+
+ MigrateStatus() : m_active("MigrateStatus") { active = false; }
+
+ void prepare() {
+ scoped_lock l(m_active); // reading and writing 'active'
+
+ assert( ! active );
+ state = READY;
+ errmsg = "";
+
+ numCloned = 0;
+ clonedBytes = 0;
+ numCatchup = 0;
+ numSteady = 0;
+
+ active = true;
+ }
+
+ void go() {
+ try {
+ _go();
+ }
+ catch ( std::exception& e ) {
+ state = FAIL;
+ errmsg = e.what();
+ error() << "migrate failed: " << e.what() << migrateLog;
+ }
+ catch ( ... ) {
+ state = FAIL;
+ errmsg = "UNKNOWN ERROR";
+ error() << "migrate failed with unknown exception" << migrateLog;
+ }
+ setActive( false );
+ }
+
+ void _go() {
+ assert( getActive() );
+ assert( state == READY );
+ assert( ! min.isEmpty() );
+ assert( ! max.isEmpty() );
+
+ slaveCount = ( getSlaveCount() / 2 ) + 1;
+
+ MoveTimingHelper timing( "to" , ns , min , max , 5 /* steps */ );
+
+ ScopedDbConnection conn( from );
+ conn->getLastError(); // just test connection
+
+ {
+ // 1. copy indexes
+ auto_ptr<DBClientCursor> indexes = conn->getIndexes( ns );
+ vector<BSONObj> all;
+ while ( indexes->more() ) {
+ all.push_back( indexes->next().getOwned() );
+ }
+
+ writelock lk( ns );
+ Client::Context ct( ns );
+
+ string system_indexes = cc().database()->name + ".system.indexes";
+ for ( unsigned i=0; i<all.size(); i++ ) {
+ BSONObj idx = all[i];
+ theDataFileMgr.insertAndLog( system_indexes.c_str() , idx );
+ }
+
+ timing.done(1);
+ }
+
+ {
+ // 2. delete any data already in range
+ writelock lk( ns );
+ RemoveSaver rs( "moveChunk" , ns , "preCleanup" );
+ long long num = Helpers::removeRange( ns , min , max , true , false , cmdLine.moveParanoia ? &rs : 0 );
+ if ( num )
+ warning() << "moveChunkCmd deleted data already in chunk # objects: " << num << migrateLog;
+
+ timing.done(2);
+ }
+
+
+ {
+ // 3. initial bulk clone
+ state = CLONE;
+
+ while ( true ) {
+ BSONObj res;
+ if ( ! conn->runCommand( "admin" , BSON( "_migrateClone" << 1 ) , res ) ) {
+ state = FAIL;
+ errmsg = "_migrateClone failed: ";
+ errmsg += res.toString();
+ error() << errmsg << migrateLog;
+ conn.done();
+ return;
+ }
+
+ BSONObj arr = res["objects"].Obj();
+ int thisTime = 0;
+
+ BSONObjIterator i( arr );
+ while( i.more() ) {
+ BSONObj o = i.next().Obj();
+ {
+ writelock lk( ns );
+ Helpers::upsert( ns , o );
+ }
+ thisTime++;
+ numCloned++;
+ clonedBytes += o.objsize();
+ }
+
+ if ( thisTime == 0 )
+ break;
+ }
+
+ timing.done(3);
+ }
+
+ // if running on a replicated system, we'll need to flush the docs we cloned to the secondaries
+ ReplTime lastOpApplied = cc().getLastOp().asDate();
+
+ {
+ // 4. do bulk of mods
+ state = CATCHUP;
+ while ( true ) {
+ BSONObj res;
+ if ( ! conn->runCommand( "admin" , BSON( "_transferMods" << 1 ) , res ) ) {
+ state = FAIL;
+ errmsg = "_transferMods failed: ";
+ errmsg += res.toString();
+ error() << "_transferMods failed: " << res << migrateLog;
+ conn.done();
+ return;
+ }
+ if ( res["size"].number() == 0 )
+ break;
+
+ apply( res , &lastOpApplied );
+
+ const int maxIterations = 3600*50;
+ int i;
+ for ( i=0;i<maxIterations; i++) {
+ if ( state == ABORT ) {
+ timing.note( "aborted" );
+ return;
+ }
+
+ if ( opReplicatedEnough( lastOpApplied ) )
+ break;
+
+ if ( i > 100 ) {
+ warning() << "secondaries having hard time keeping up with migrate" << migrateLog;
+ }
+
+ sleepmillis( 20 );
+ }
+
+ if ( i == maxIterations ) {
+ errmsg = "secondary can't keep up with migrate";
+ error() << errmsg << migrateLog;
+ conn.done();
+ state = FAIL;
+ return;
+ }
+ }
+
+ timing.done(4);
+ }
+
+ {
+ // pause to wait for replication
+ // this will prevent us from going into critical section until we're ready
+ Timer t;
+ while ( t.minutes() < 600 ) {
+ if ( flushPendingWrites( lastOpApplied ) )
+ break;
+ sleepsecs(1);
+ }
+ }
+
+ {
+ // 5. wait for commit
+
+ state = STEADY;
+ while ( state == STEADY || state == COMMIT_START ) {
+ BSONObj res;
+ if ( ! conn->runCommand( "admin" , BSON( "_transferMods" << 1 ) , res ) ) {
+ log() << "_transferMods failed in STEADY state: " << res << migrateLog;
+ errmsg = res.toString();
+ state = FAIL;
+ conn.done();
+ return;
+ }
+
+ if ( res["size"].number() > 0 && apply( res , &lastOpApplied ) )
+ continue;
+
+ if ( state == ABORT ) {
+ timing.note( "aborted" );
+ return;
+ }
+
+ if ( state == COMMIT_START ) {
+ if ( flushPendingWrites( lastOpApplied ) )
+ break;
+ }
+
+ sleepmillis( 10 );
+ }
+
+ if ( state == FAIL ) {
+ errmsg = "imted out waiting for commit";
+ return;
+ }
+
+ timing.done(5);
+ }
+
+ state = DONE;
+ conn.done();
+ }
+
+ void status( BSONObjBuilder& b ) {
+ b.appendBool( "active" , getActive() );
+
+ b.append( "ns" , ns );
+ b.append( "from" , from );
+ b.append( "min" , min );
+ b.append( "max" , max );
+
+ b.append( "state" , stateString() );
+ if ( state == FAIL )
+ b.append( "errmsg" , errmsg );
+ {
+ BSONObjBuilder bb( b.subobjStart( "counts" ) );
+ bb.append( "cloned" , numCloned );
+ bb.append( "clonedBytes" , clonedBytes );
+ bb.append( "catchup" , numCatchup );
+ bb.append( "steady" , numSteady );
+ bb.done();
+ }
+
+
+ }
+
+ bool apply( const BSONObj& xfer , ReplTime* lastOpApplied ) {
+ ReplTime dummy;
+ if ( lastOpApplied == NULL ) {
+ lastOpApplied = &dummy;
+ }
+
+ bool didAnything = false;
+
+ if ( xfer["deleted"].isABSONObj() ) {
+ writelock lk(ns);
+ Client::Context cx(ns);
+
+ RemoveSaver rs( "moveChunk" , ns , "removedDuring" );
+
+ BSONObjIterator i( xfer["deleted"].Obj() );
+ while ( i.more() ) {
+ BSONObj id = i.next().Obj();
+
+ // do not apply deletes if they do not belong to the chunk being migrated
+ BSONObj fullObj;
+ if ( Helpers::findById( cc() , ns.c_str() , id, fullObj ) ) {
+ if ( ! isInRange( fullObj , min , max ) ) {
+ log() << "not applying out of range deletion: " << fullObj << migrateLog;
+
+ continue;
+ }
+ }
+
+ Helpers::removeRange( ns , id , id, false , true , cmdLine.moveParanoia ? &rs : 0 );
+
+ *lastOpApplied = cx.getClient()->getLastOp().asDate();
+ didAnything = true;
+ }
+ }
+
+ if ( xfer["reload"].isABSONObj() ) {
+ writelock lk(ns);
+ Client::Context cx(ns);
+
+ BSONObjIterator i( xfer["reload"].Obj() );
+ while ( i.more() ) {
+ BSONObj it = i.next().Obj();
+
+ Helpers::upsert( ns , it );
+
+ *lastOpApplied = cx.getClient()->getLastOp().asDate();
+ didAnything = true;
+ }
+ }
+
+ return didAnything;
+ }
+
+ bool opReplicatedEnough( const ReplTime& lastOpApplied ) {
+ // if replication is on, try to force enough secondaries to catch up
+ // TODO opReplicatedEnough should eventually honor priorities and geo-awareness
+ // for now, we try to replicate to a sensible number of secondaries
+ return mongo::opReplicatedEnough( lastOpApplied , slaveCount );
+ }
+
+ bool flushPendingWrites( const ReplTime& lastOpApplied ) {
+ if ( ! opReplicatedEnough( lastOpApplied ) ) {
+ OpTime op( lastOpApplied );
+ OCCASIONALLY warning() << "migrate commit waiting for " << slaveCount
+ << " slaves for '" << ns << "' " << min << " -> " << max
+ << " waiting for: " << op
+ << migrateLog;
+ return false;
+ }
+
+ log() << "migrate commit succeeded flushing to secondaries for '" << ns << "' " << min << " -> " << max << migrateLog;
+
+ {
+ readlock lk(ns); // commitNow() currently requires it
+
+ // if durability is on, force a write to journal
+ if ( getDur().commitNow() ) {
+ log() << "migrate commit flushed to journal for '" << ns << "' " << min << " -> " << max << migrateLog;
+ }
+ }
+
+ return true;
+ }
+
+ string stateString() {
+ switch ( state ) {
+ case READY: return "ready";
+ case CLONE: return "clone";
+ case CATCHUP: return "catchup";
+ case STEADY: return "steady";
+ case COMMIT_START: return "commitStart";
+ case DONE: return "done";
+ case FAIL: return "fail";
+ case ABORT: return "abort";
+ }
+ assert(0);
+ return "";
+ }
+
+ bool startCommit() {
+ if ( state != STEADY )
+ return false;
+ state = COMMIT_START;
+
+ Timer t;
+ // we wait for the commit to succeed before giving up
+ while ( t.minutes() <= 5 ) {
+ sleepmillis(1);
+ if ( state == DONE )
+ return true;
+ }
+ state = FAIL;
+ log() << "startCommit never finished!" << migrateLog;
+ return false;
+ }
+
+ void abort() {
+ state = ABORT;
+ errmsg = "aborted";
+ }
+
+ bool getActive() const { scoped_lock l(m_active); return active; }
+ void setActive( bool b ) { scoped_lock l(m_active); active = b; }
+
+ mutable mongo::mutex m_active;
+ bool active;
+
+ string ns;
+ string from;
+
+ BSONObj min;
+ BSONObj max;
+
+ long long numCloned;
+ long long clonedBytes;
+ long long numCatchup;
+ long long numSteady;
+
+ int slaveCount;
+
+ enum State { READY , CLONE , CATCHUP , STEADY , COMMIT_START , DONE , FAIL , ABORT } state;
+ string errmsg;
+
+ } migrateStatus;
+
+ void migrateThread() {
+ Client::initThread( "migrateThread" );
+ if (!noauth) {
+ ShardedConnectionInfo::addHook();
+ cc().getAuthenticationInfo()->authorize("local", internalSecurity.user);
+ }
+ migrateStatus.go();
+ cc().shutdown();
+ }
+
+ class RecvChunkStartCommand : public ChunkCommandHelper {
+ public:
+ RecvChunkStartCommand() : ChunkCommandHelper( "_recvChunkStart" ) {}
+
+ virtual LockType locktype() const { return WRITE; } // this is so don't have to do locking internally
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+
+ if ( migrateStatus.getActive() ) {
+ errmsg = "migrate already in progress";
+ return false;
+ }
+
+ if ( OldDataCleanup::_numThreads > 0 ) {
+ errmsg =
+ str::stream()
+ << "still waiting for a previous migrates data to get cleaned, can't accept new chunks, num threads: "
+ << OldDataCleanup::_numThreads;
+ return false;
+ }
+
+ if ( ! configServer.ok() )
+ configServer.init( cmdObj["configServer"].String() );
+
+ migrateStatus.prepare();
+
+ migrateStatus.ns = cmdObj.firstElement().String();
+ migrateStatus.from = cmdObj["from"].String();
+ migrateStatus.min = cmdObj["min"].Obj().getOwned();
+ migrateStatus.max = cmdObj["max"].Obj().getOwned();
+
+ boost::thread m( migrateThread );
+
+ result.appendBool( "started" , true );
+ return true;
+ }
+
+ } recvChunkStartCmd;
+
+ class RecvChunkStatusCommand : public ChunkCommandHelper {
+ public:
+ RecvChunkStatusCommand() : ChunkCommandHelper( "_recvChunkStatus" ) {}
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ migrateStatus.status( result );
+ return 1;
+ }
+
+ } recvChunkStatusCommand;
+
+ class RecvChunkCommitCommand : public ChunkCommandHelper {
+ public:
+ RecvChunkCommitCommand() : ChunkCommandHelper( "_recvChunkCommit" ) {}
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ bool ok = migrateStatus.startCommit();
+ migrateStatus.status( result );
+ return ok;
+ }
+
+ } recvChunkCommitCommand;
+
+ class RecvChunkAbortCommand : public ChunkCommandHelper {
+ public:
+ RecvChunkAbortCommand() : ChunkCommandHelper( "_recvChunkAbort" ) {}
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ migrateStatus.abort();
+ migrateStatus.status( result );
+ return true;
+ }
+
+ } recvChunkAboortCommand;
+
+
+ class IsInRangeTest : public UnitTest {
+ public:
+ void run() {
+ BSONObj min = BSON( "x" << 1 );
+ BSONObj max = BSON( "x" << 5 );
+
+ assert( ! isInRange( BSON( "x" << 0 ) , min , max ) );
+ assert( isInRange( BSON( "x" << 1 ) , min , max ) );
+ assert( isInRange( BSON( "x" << 3 ) , min , max ) );
+ assert( isInRange( BSON( "x" << 4 ) , min , max ) );
+ assert( ! isInRange( BSON( "x" << 5 ) , min , max ) );
+ assert( ! isInRange( BSON( "x" << 6 ) , min , max ) );
+
+ LOG(1) << "isInRangeTest passed" << migrateLog;
+ }
+ } isInRangeTest;
+}
diff --git a/src/mongo/s/d_split.cpp b/src/mongo/s/d_split.cpp
new file mode 100644
index 00000000000..d0ba7b44c10
--- /dev/null
+++ b/src/mongo/s/d_split.cpp
@@ -0,0 +1,830 @@
+// @file d_split.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include <map>
+#include <string>
+
+#include "../db/btree.h"
+#include "../db/commands.h"
+#include "../db/jsobj.h"
+#include "../db/instance.h"
+#include "../db/queryoptimizer.h"
+#include "../db/clientcursor.h"
+
+#include "../client/connpool.h"
+#include "../client/distlock.h"
+#include "../util/timer.h"
+
+#include "chunk.h" // for static genID only
+#include "config.h"
+#include "d_logic.h"
+
+namespace mongo {
+
+ // TODO: Fold these checks into each command.
+ static IndexDetails *cmdIndexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern ) {
+ if ( ns[ 0 ] == '\0' || min.isEmpty() || max.isEmpty() ) {
+ errmsg = "invalid command syntax (note: min and max are required)";
+ return 0;
+ }
+ return indexDetailsForRange( ns, errmsg, min, max, keyPattern );
+ }
+
+
+ class CmdMedianKey : public Command {
+ public:
+ CmdMedianKey() : Command( "medianKey" ) {}
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return READ; }
+ virtual void help( stringstream &help ) const {
+ help <<
+ "Internal command.\n"
+ "example: { medianKey:\"blog.posts\", keyPattern:{x:1}, min:{x:10}, max:{x:55} }\n"
+ "NOTE: This command may take a while to run";
+ }
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+ const char *ns = jsobj.getStringField( "medianKey" );
+ BSONObj min = jsobj.getObjectField( "min" );
+ BSONObj max = jsobj.getObjectField( "max" );
+ BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
+
+ Client::Context ctx( ns );
+
+ IndexDetails *id = cmdIndexDetailsForRange( ns, errmsg, min, max, keyPattern );
+ if ( id == 0 )
+ return false;
+
+ Timer timer;
+ int num = 0;
+ NamespaceDetails *d = nsdetails(ns);
+ int idxNo = d->idxNo(*id);
+
+ // only yielding on first half for now
+ // after this it should be in ram, so 2nd should be fast
+ {
+ shared_ptr<Cursor> c( BtreeCursor::make( d, idxNo, *id, min, max, false, 1 ) );
+ auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
+ while ( c->ok() ) {
+ num++;
+ c->advance();
+ if ( ! cc->yieldSometimes( ClientCursor::DontNeed ) ) {
+ cc.release();
+ break;
+ }
+ }
+ }
+
+ num /= 2;
+
+ auto_ptr<BtreeCursor> _c( BtreeCursor::make( d, idxNo, *id, min, max, false, 1 ) );
+ BtreeCursor& c = *_c;
+ for( ; num; c.advance(), --num );
+
+ ostringstream os;
+ os << "Finding median for index: " << keyPattern << " between " << min << " and " << max;
+ logIfSlow( timer , os.str() );
+
+ if ( !c.ok() ) {
+ errmsg = "no index entries in the specified range";
+ return false;
+ }
+
+ BSONObj median = c.prettyKey( c.currKey() );
+ result.append( "median", median );
+
+ int x = median.woCompare( min , BSONObj() , false );
+ int y = median.woCompare( max , BSONObj() , false );
+ if ( x == 0 || y == 0 ) {
+ // its on an edge, ok
+ }
+ else if ( x < 0 && y < 0 ) {
+ log( LL_ERROR ) << "median error (1) min: " << min << " max: " << max << " median: " << median << endl;
+ errmsg = "median error 1";
+ return false;
+ }
+ else if ( x > 0 && y > 0 ) {
+ log( LL_ERROR ) << "median error (2) min: " << min << " max: " << max << " median: " << median << endl;
+ errmsg = "median error 2";
+ return false;
+ }
+
+ return true;
+ }
+ } cmdMedianKey;
+
+ class CheckShardingIndex : public Command {
+ public:
+ CheckShardingIndex() : Command( "checkShardingIndex" , false ) {}
+ virtual bool slaveOk() const { return false; }
+ virtual LockType locktype() const { return READ; }
+ virtual void help( stringstream &help ) const {
+ help << "Internal command.\n";
+ }
+
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+
+ const char* ns = jsobj.getStringField( "checkShardingIndex" );
+ BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
+
+ if ( keyPattern.nFields() == 1 && str::equals( "_id" , keyPattern.firstElementFieldName() ) ) {
+ result.appendBool( "idskip" , true );
+ return true;
+ }
+
+ // If min and max are not provided use the "minKey" and "maxKey" for the sharding key pattern.
+ BSONObj min = jsobj.getObjectField( "min" );
+ BSONObj max = jsobj.getObjectField( "max" );
+ if ( min.isEmpty() && max.isEmpty() ) {
+ BSONObjBuilder minBuilder;
+ BSONObjBuilder maxBuilder;
+ BSONForEach(key, keyPattern) {
+ minBuilder.appendMinKey( key.fieldName() );
+ maxBuilder.appendMaxKey( key.fieldName() );
+ }
+ min = minBuilder.obj();
+ max = maxBuilder.obj();
+ }
+ else if ( min.isEmpty() || max.isEmpty() ) {
+ errmsg = "either provide both min and max or leave both empty";
+ return false;
+ }
+
+ Client::Context ctx( ns );
+ NamespaceDetails *d = nsdetails( ns );
+ if ( ! d ) {
+ errmsg = "ns not found";
+ return false;
+ }
+
+ IndexDetails *idx = cmdIndexDetailsForRange( ns , errmsg , min , max , keyPattern );
+ if ( idx == NULL ) {
+ errmsg = "couldn't find index over splitting key";
+ return false;
+ }
+
+ if( d->isMultikey( d->idxNo( *idx ) ) ) {
+ errmsg = "index is multikey, cannot use for sharding";
+ return false;
+ }
+
+ BtreeCursor * bc = BtreeCursor::make( d , d->idxNo(*idx) , *idx , min , max , false , 1 );
+ shared_ptr<Cursor> c( bc );
+ auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
+ if ( ! cc->ok() ) {
+ // range is empty
+ return true;
+ }
+
+ // for now, the only check is that all shard keys are filled
+ // null is ok,
+ // TODO if $exist for nulls were picking the index, it could be used instead efficiently
+ while ( cc->ok() ) {
+ BSONObj currKey = c->currKey();
+
+ BSONObjIterator i( currKey );
+ int n = 0;
+ while ( i.more() ) {
+ BSONElement key = i.next();
+ n++;
+
+ if ( key.type() && key.type() != jstNULL )
+ continue;
+
+ BSONObj obj = c->current();
+ BSONObjIterator j( keyPattern );
+ BSONElement real;
+ for ( int x=0; x<n; x++ )
+ real = j.next();
+
+ real = obj.getFieldDotted( real.fieldName() );
+
+ if ( real.type() )
+ continue;
+
+ ostringstream os;
+ os << "found null value in key " << bc->prettyKey( currKey ) << " for doc: " << ( obj["_id"].eoo() ? obj.toString() : obj["_id"].toString() );
+ log() << "checkShardingIndex for '" << ns << "' failed: " << os.str() << endl;
+
+ errmsg = os.str();
+ return false;
+ }
+ cc->advance();
+
+ if ( ! cc->yieldSometimes( ClientCursor::DontNeed ) ) {
+ cc.release();
+ break;
+ }
+ }
+
+ return true;
+ }
+ } cmdCheckShardingIndex;
+
+ class SplitVector : public Command {
+ public:
+ SplitVector() : Command( "splitVector" , false ) {}
+ virtual bool slaveOk() const { return false; }
+ virtual LockType locktype() const { return READ; }
+ virtual void help( stringstream &help ) const {
+ help <<
+ "Internal command.\n"
+ "examples:\n"
+ " { splitVector : \"blog.post\" , keyPattern:{x:1} , min:{x:10} , max:{x:20}, maxChunkSize:200 }\n"
+ " maxChunkSize unit in MBs\n"
+ " May optionally specify 'maxSplitPoints' and 'maxChunkObjects' to avoid traversing the whole chunk\n"
+ " \n"
+ " { splitVector : \"blog.post\" , keyPattern:{x:1} , min:{x:10} , max:{x:20}, force: true }\n"
+ " 'force' will produce one split point even if data is small; defaults to false\n"
+ "NOTE: This command may take a while to run";
+ }
+
+ bool run(const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+
+ //
+ // 1.a We'll parse the parameters in two steps. First, make sure the we can use the split index to get
+ // a good approximation of the size of the chunk -- without needing to access the actual data.
+ //
+
+ const char* ns = jsobj.getStringField( "splitVector" );
+ BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
+
+ // If min and max are not provided use the "minKey" and "maxKey" for the sharding key pattern.
+ BSONObj min = jsobj.getObjectField( "min" );
+ BSONObj max = jsobj.getObjectField( "max" );
+ if ( min.isEmpty() && max.isEmpty() ) {
+ BSONObjBuilder minBuilder;
+ BSONObjBuilder maxBuilder;
+ BSONForEach(key, keyPattern) {
+ minBuilder.appendMinKey( key.fieldName() );
+ maxBuilder.appendMaxKey( key.fieldName() );
+ }
+ min = minBuilder.obj();
+ max = maxBuilder.obj();
+ }
+ else if ( min.isEmpty() || max.isEmpty() ) {
+ errmsg = "either provide both min and max or leave both empty";
+ return false;
+ }
+
+ long long maxSplitPoints = 0;
+ BSONElement maxSplitPointsElem = jsobj[ "maxSplitPoints" ];
+ if ( maxSplitPointsElem.isNumber() ) {
+ maxSplitPoints = maxSplitPointsElem.numberLong();
+ }
+
+ long long maxChunkObjects = Chunk::MaxObjectPerChunk;
+ BSONElement MaxChunkObjectsElem = jsobj[ "maxChunkObjects" ];
+ if ( MaxChunkObjectsElem.isNumber() ) {
+ maxChunkObjects = MaxChunkObjectsElem.numberLong();
+ }
+
+ vector<BSONObj> splitKeys;
+
+ {
+ // Get the size estimate for this namespace
+ Client::Context ctx( ns );
+ NamespaceDetails *d = nsdetails( ns );
+ if ( ! d ) {
+ errmsg = "ns not found";
+ return false;
+ }
+
+ IndexDetails *idx = cmdIndexDetailsForRange( ns , errmsg , min , max , keyPattern );
+ if ( idx == NULL ) {
+ errmsg = "couldn't find index over splitting key";
+ return false;
+ }
+
+ const long long recCount = d->stats.nrecords;
+ const long long dataSize = d->stats.datasize;
+
+ //
+ // 1.b Now that we have the size estimate, go over the remaining parameters and apply any maximum size
+ // restrictions specified there.
+ //
+
+ // 'force'-ing a split is equivalent to having maxChunkSize be the size of the current chunk, i.e., the
+ // logic below will split that chunk in half
+ long long maxChunkSize = 0;
+ bool force = false;
+ {
+ BSONElement maxSizeElem = jsobj[ "maxChunkSize" ];
+ BSONElement forceElem = jsobj[ "force" ];
+
+ if ( forceElem.trueValue() ) {
+ force = true;
+ maxChunkSize = dataSize;
+
+ }
+ else if ( maxSizeElem.isNumber() ) {
+ maxChunkSize = maxSizeElem.numberLong() * 1<<20;
+
+ }
+ else {
+ maxSizeElem = jsobj["maxChunkSizeBytes"];
+ if ( maxSizeElem.isNumber() ) {
+ maxChunkSize = maxSizeElem.numberLong();
+ }
+ }
+
+ if ( maxChunkSize <= 0 ) {
+ errmsg = "need to specify the desired max chunk size (maxChunkSize or maxChunkSizeBytes)";
+ return false;
+ }
+ }
+
+
+ // If there's not enough data for more than one chunk, no point continuing.
+ if ( dataSize < maxChunkSize || recCount == 0 ) {
+ vector<BSONObj> emptyVector;
+ result.append( "splitKeys" , emptyVector );
+ return true;
+ }
+
+ log() << "request split points lookup for chunk " << ns << " " << min << " -->> " << max << endl;
+
+ // We'll use the average object size and number of object to find approximately how many keys
+ // each chunk should have. We'll split at half the maxChunkSize or maxChunkObjects, if
+ // provided.
+ const long long avgRecSize = dataSize / recCount;
+ long long keyCount = maxChunkSize / (2 * avgRecSize);
+ if ( maxChunkObjects && ( maxChunkObjects < keyCount ) ) {
+ log() << "limiting split vector to " << maxChunkObjects << " (from " << keyCount << ") objects " << endl;
+ keyCount = maxChunkObjects;
+ }
+
+ //
+ // 2. Traverse the index and add the keyCount-th key to the result vector. If that key
+ // appeared in the vector before, we omit it. The invariant here is that all the
+ // instances of a given key value live in the same chunk.
+ //
+
+ Timer timer;
+ long long currCount = 0;
+ long long numChunks = 0;
+
+ BtreeCursor * bc = BtreeCursor::make( d , d->idxNo(*idx) , *idx , min , max , false , 1 );
+ shared_ptr<Cursor> c( bc );
+ auto_ptr<ClientCursor> cc( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
+ if ( ! cc->ok() ) {
+ errmsg = "can't open a cursor for splitting (desired range is possibly empty)";
+ return false;
+ }
+
+ // Use every 'keyCount'-th key as a split point. We add the initial key as a sentinel, to be removed
+ // at the end. If a key appears more times than entries allowed on a chunk, we issue a warning and
+ // split on the following key.
+ set<BSONObj> tooFrequentKeys;
+ splitKeys.push_back( c->currKey().getOwned() );
+ while ( 1 ) {
+ while ( cc->ok() ) {
+ currCount++;
+ BSONObj currKey = c->currKey();
+
+ DEV assert( currKey.woCompare( max ) <= 0 );
+
+ if ( currCount > keyCount ) {
+ // Do not use this split key if it is the same used in the previous split point.
+ if ( currKey.woCompare( splitKeys.back() ) == 0 ) {
+ tooFrequentKeys.insert( currKey.getOwned() );
+
+ }
+ else {
+ splitKeys.push_back( currKey.getOwned() );
+ currCount = 0;
+ numChunks++;
+
+ LOG(4) << "picked a split key: " << bc->prettyKey( currKey ) << endl;
+ }
+
+ }
+
+ cc->advance();
+
+ // Stop if we have enough split points.
+ if ( maxSplitPoints && ( numChunks >= maxSplitPoints ) ) {
+ log() << "max number of requested split points reached (" << numChunks
+ << ") before the end of chunk " << ns << " " << min << " -->> " << max
+ << endl;
+ break;
+ }
+
+ if ( ! cc->yieldSometimes( ClientCursor::DontNeed ) ) {
+ // we were near and and got pushed to the end
+ // i think returning the splits we've already found is fine
+
+ // don't use the btree cursor pointer to access keys beyond this point but ok
+ // to use it for format the keys we've got already
+ cc.release();
+ break;
+ }
+ }
+
+ if ( splitKeys.size() > 1 || ! force )
+ break;
+
+ force = false;
+ keyCount = currCount / 2;
+ currCount = 0;
+ log() << "splitVector doing another cycle because of force, keyCount now: " << keyCount << endl;
+
+ bc = BtreeCursor::make( d , d->idxNo(*idx) , *idx , min , max , false , 1 );
+ c.reset( bc );
+ cc.reset( new ClientCursor( QueryOption_NoCursorTimeout , c , ns ) );
+ }
+
+ //
+ // 3. Format the result and issue any warnings about the data we gathered while traversing the
+ // index
+ //
+
+ // Warn for keys that are more numerous than maxChunkSize allows.
+ for ( set<BSONObj>::const_iterator it = tooFrequentKeys.begin(); it != tooFrequentKeys.end(); ++it ) {
+ warning() << "chunk is larger than " << maxChunkSize
+ << " bytes because of key " << bc->prettyKey( *it ) << endl;
+ }
+
+ // Remove the sentinel at the beginning before returning and add fieldnames.
+ splitKeys.erase( splitKeys.begin() );
+ assert( c.get() );
+ for ( vector<BSONObj>::iterator it = splitKeys.begin(); it != splitKeys.end() ; ++it ) {
+ *it = bc->prettyKey( *it );
+ }
+
+ if ( timer.millis() > cmdLine.slowMS ) {
+ warning() << "Finding the split vector for " << ns << " over "<< keyPattern
+ << " keyCount: " << keyCount << " numSplits: " << splitKeys.size()
+ << " lookedAt: " << currCount << " took " << timer.millis() << "ms"
+ << endl;
+ }
+
+ // Warning: we are sending back an array of keys but are currently limited to
+ // 4MB work of 'result' size. This should be okay for now.
+
+ }
+
+ result.append( "splitKeys" , splitKeys );
+
+ return true;
+
+ }
+ } cmdSplitVector;
+
+ // ** temporary ** 2010-10-22
+ // chunkInfo is a helper to collect and log information about the chunks generated in splitChunk.
+ // It should hold the chunk state for this module only, while we don't have min/max key info per chunk on the
+ // mongod side. Do not build on this; it will go away.
+ struct ChunkInfo {
+ BSONObj min;
+ BSONObj max;
+ ShardChunkVersion lastmod;
+
+ ChunkInfo() { }
+ ChunkInfo( BSONObj aMin , BSONObj aMax , ShardChunkVersion aVersion ) : min(aMin) , max(aMax) , lastmod(aVersion) {}
+ void appendShortVersion( const char* name, BSONObjBuilder& b ) const;
+ string toString() const;
+ };
+
+ void ChunkInfo::appendShortVersion( const char * name , BSONObjBuilder& b ) const {
+ BSONObjBuilder bb( b.subobjStart( name ) );
+ bb.append( "min" , min );
+ bb.append( "max" , max );
+ bb.appendTimestamp( "lastmod" , lastmod );
+ bb.done();
+ }
+
+ string ChunkInfo::toString() const {
+ ostringstream os;
+ os << "lastmod: " << lastmod.toString() << " min: " << min << " max: " << endl;
+ return os.str();
+ }
+ // ** end temporary **
+
+ class SplitChunkCommand : public Command {
+ public:
+ SplitChunkCommand() : Command( "splitChunk" ) {}
+ virtual void help( stringstream& help ) const {
+ help <<
+ "internal command usage only\n"
+ "example:\n"
+ " { splitChunk:\"db.foo\" , keyPattern: {a:1} , min : {a:100} , max: {a:200} { splitKeys : [ {a:150} , ... ]}";
+ }
+
+ virtual bool slaveOk() const { return false; }
+ virtual bool adminOnly() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+
+ bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) {
+
+ //
+ // 1. check whether parameters passed to splitChunk are sound
+ //
+
+ const string ns = cmdObj.firstElement().str();
+ if ( ns.empty() ) {
+ errmsg = "need to specify namespace in command";
+ return false;
+ }
+
+ const BSONObj keyPattern = cmdObj["keyPattern"].Obj();
+ if ( keyPattern.isEmpty() ) {
+ errmsg = "need to specify the key pattern the collection is sharded over";
+ return false;
+ }
+
+ const BSONObj min = cmdObj["min"].Obj();
+ if ( min.isEmpty() ) {
+ errmsg = "need to specify the min key for the chunk";
+ return false;
+ }
+
+ const BSONObj max = cmdObj["max"].Obj();
+ if ( max.isEmpty() ) {
+ errmsg = "need to specify the max key for the chunk";
+ return false;
+ }
+
+ const string from = cmdObj["from"].str();
+ if ( from.empty() ) {
+ errmsg = "need specify server to split chunk at";
+ return false;
+ }
+
+ const BSONObj splitKeysElem = cmdObj["splitKeys"].Obj();
+ if ( splitKeysElem.isEmpty() ) {
+ errmsg = "need to provide the split points to chunk over";
+ return false;
+ }
+ vector<BSONObj> splitKeys;
+ BSONObjIterator it( splitKeysElem );
+ while ( it.more() ) {
+ splitKeys.push_back( it.next().Obj().getOwned() );
+ }
+
+ const BSONElement shardId = cmdObj["shardId"];
+ if ( shardId.eoo() ) {
+ errmsg = "need to provide shardId";
+ return false;
+ }
+
+ // It is possible that this is the first sharded command this mongod is asked to perform. If so,
+ // start sharding apparatus. We'd still be missing some more shard-related info but we'll get it
+ // in step 2. below.
+ if ( ! shardingState.enabled() ) {
+ if ( cmdObj["configdb"].type() != String ) {
+ errmsg = "sharding not enabled";
+ return false;
+ }
+ string configdb = cmdObj["configdb"].String();
+ shardingState.enable( configdb );
+ configServer.init( configdb );
+ }
+
+ Shard myShard( from );
+
+ log() << "received splitChunk request: " << cmdObj << endl;
+
+ //
+ // 2. lock the collection's metadata and get highest version for the current shard
+ //
+
+ DistributedLock lockSetup( ConnectionString( shardingState.getConfigServer() , ConnectionString::SYNC) , ns );
+ dist_lock_try dlk;
+
+ try{
+ dlk = dist_lock_try( &lockSetup, string("split-") + min.toString() );
+ }
+ catch( LockException& e ){
+ errmsg = str::stream() << "Error locking distributed lock for split." << causedBy( e );
+ return false;
+ }
+
+ if ( ! dlk.got() ) {
+ errmsg = "the collection's metadata lock is taken";
+ result.append( "who" , dlk.other() );
+ return false;
+ }
+
+ // TODO This is a check migrate does to the letter. Factor it out and share. 2010-10-22
+
+ ShardChunkVersion maxVersion;
+ string shard;
+ ChunkInfo origChunk;
+ {
+ ScopedDbConnection conn( shardingState.getConfigServer() );
+
+ BSONObj x = conn->findOne( ShardNS::chunk , Query( BSON( "ns" << ns ) ).sort( BSON( "lastmod" << -1 ) ) );
+ maxVersion = x["lastmod"];
+
+ BSONObj currChunk = conn->findOne( ShardNS::chunk , shardId.wrap( "_id" ) ).getOwned();
+ assert( currChunk["shard"].type() );
+ assert( currChunk["min"].type() );
+ assert( currChunk["max"].type() );
+ shard = currChunk["shard"].String();
+ conn.done();
+
+ BSONObj currMin = currChunk["min"].Obj();
+ BSONObj currMax = currChunk["max"].Obj();
+ if ( currMin.woCompare( min ) || currMax.woCompare( max ) ) {
+ errmsg = "chunk boundaries are outdated (likely a split occurred)";
+ result.append( "currMin" , currMin );
+ result.append( "currMax" , currMax );
+ result.append( "requestedMin" , min );
+ result.append( "requestedMax" , max );
+
+ log( LL_WARNING ) << "aborted split because " << errmsg << ": " << min << "->" << max
+ << " is now " << currMin << "->" << currMax << endl;
+ return false;
+ }
+
+ if ( shard != myShard.getName() ) {
+ errmsg = "location is outdated (likely balance or migrate occurred)";
+ result.append( "from" , myShard.getName() );
+ result.append( "official" , shard );
+
+ log( LL_WARNING ) << "aborted split because " << errmsg << ": chunk is at " << shard
+ << " and not at " << myShard.getName() << endl;
+ return false;
+ }
+
+ if ( maxVersion < shardingState.getVersion( ns ) ) {
+ errmsg = "official version less than mine?";
+ result.appendTimestamp( "officialVersion" , maxVersion );
+ result.appendTimestamp( "myVersion" , shardingState.getVersion( ns ) );
+
+ log( LL_WARNING ) << "aborted split because " << errmsg << ": official " << maxVersion
+ << " mine: " << shardingState.getVersion(ns) << endl;
+ return false;
+ }
+
+ origChunk.min = currMin.getOwned();
+ origChunk.max = currMax.getOwned();
+ origChunk.lastmod = currChunk["lastmod"];
+
+ // since this could be the first call that enable sharding we also make sure to have the chunk manager up to date
+ shardingState.gotShardName( shard );
+ ShardChunkVersion shardVersion;
+ shardingState.trySetVersion( ns , shardVersion /* will return updated */ );
+
+ log() << "splitChunk accepted at version " << shardVersion << endl;
+
+ }
+
+ //
+ // 3. create the batch of updates to metadata ( the new chunks ) to be applied via 'applyOps' command
+ //
+
+ BSONObjBuilder logDetail;
+ origChunk.appendShortVersion( "before" , logDetail );
+ LOG(1) << "before split on " << origChunk << endl;
+ vector<ChunkInfo> newChunks;
+
+ ShardChunkVersion myVersion = maxVersion;
+ BSONObj startKey = min;
+ splitKeys.push_back( max ); // makes it easier to have 'max' in the next loop. remove later.
+
+ BSONObjBuilder cmdBuilder;
+ BSONArrayBuilder updates( cmdBuilder.subarrayStart( "applyOps" ) );
+
+ for ( vector<BSONObj>::const_iterator it = splitKeys.begin(); it != splitKeys.end(); ++it ) {
+ BSONObj endKey = *it;
+
+ // splits only update the 'minor' portion of version
+ myVersion.incMinor();
+
+ // build an update operation against the chunks collection of the config database with
+ // upsert true
+ BSONObjBuilder op;
+ op.append( "op" , "u" );
+ op.appendBool( "b" , true );
+ op.append( "ns" , ShardNS::chunk );
+
+ // add the modified (new) chunk information as the update object
+ BSONObjBuilder n( op.subobjStart( "o" ) );
+ n.append( "_id" , Chunk::genID( ns , startKey ) );
+ n.appendTimestamp( "lastmod" , myVersion );
+ n.append( "ns" , ns );
+ n.append( "min" , startKey );
+ n.append( "max" , endKey );
+ n.append( "shard" , shard );
+ n.done();
+
+ // add the chunk's _id as the query part of the update statement
+ BSONObjBuilder q( op.subobjStart( "o2" ) );
+ q.append( "_id" , Chunk::genID( ns , startKey ) );
+ q.done();
+
+ updates.append( op.obj() );
+
+ // remember this chunk info for logging later
+ newChunks.push_back( ChunkInfo( startKey , endKey, myVersion ) );
+
+ startKey = endKey;
+ }
+
+ updates.done();
+
+ {
+ BSONArrayBuilder preCond( cmdBuilder.subarrayStart( "preCondition" ) );
+ BSONObjBuilder b;
+ b.append( "ns" , ShardNS::chunk );
+ b.append( "q" , BSON( "query" << BSON( "ns" << ns ) << "orderby" << BSON( "lastmod" << -1 ) ) );
+ {
+ BSONObjBuilder bb( b.subobjStart( "res" ) );
+ bb.appendTimestamp( "lastmod" , maxVersion );
+ bb.done();
+ }
+ preCond.append( b.obj() );
+ preCond.done();
+ }
+
+ //
+ // 4. apply the batch of updates to metadata and to the chunk manager
+ //
+
+ BSONObj cmd = cmdBuilder.obj();
+
+ LOG(1) << "splitChunk update: " << cmd << endl;
+
+ bool ok;
+ BSONObj cmdResult;
+ {
+ ScopedDbConnection conn( shardingState.getConfigServer() );
+ ok = conn->runCommand( "config" , cmd , cmdResult );
+ conn.done();
+ }
+
+ if ( ! ok ) {
+ stringstream ss;
+ ss << "saving chunks failed. cmd: " << cmd << " result: " << cmdResult;
+ error() << ss.str() << endl;
+ msgasserted( 13593 , ss.str() ); // assert(13593)
+ }
+
+ // install a chunk manager with knowledge about newly split chunks in this shard's state
+ splitKeys.pop_back(); // 'max' was used as sentinel
+ maxVersion.incMinor();
+ shardingState.splitChunk( ns , min , max , splitKeys , maxVersion );
+
+ //
+ // 5. logChanges
+ //
+
+ // single splits are logged different than multisplits
+ if ( newChunks.size() == 2 ) {
+ newChunks[0].appendShortVersion( "left" , logDetail );
+ newChunks[1].appendShortVersion( "right" , logDetail );
+ configServer.logChange( "split" , ns , logDetail.obj() );
+
+ }
+ else {
+ BSONObj beforeDetailObj = logDetail.obj();
+ BSONObj firstDetailObj = beforeDetailObj.getOwned();
+ const int newChunksSize = newChunks.size();
+
+ for ( int i=0; i < newChunksSize; i++ ) {
+ BSONObjBuilder chunkDetail;
+ chunkDetail.appendElements( beforeDetailObj );
+ chunkDetail.append( "number", i+1 );
+ chunkDetail.append( "of" , newChunksSize );
+ newChunks[i].appendShortVersion( "chunk" , chunkDetail );
+ configServer.logChange( "multi-split" , ns , chunkDetail.obj() );
+ }
+ }
+
+ if (newChunks.size() == 2){
+ // If one of the chunks has only one object in it we should move it
+ static const BSONObj fields = BSON("_id" << 1 );
+ DBDirectClient conn;
+ for (int i=1; i >= 0 ; i--){ // high chunk more likely to have only one obj
+ ChunkInfo chunk = newChunks[i];
+ Query q = Query().minKey(chunk.min).maxKey(chunk.max);
+ scoped_ptr<DBClientCursor> c (conn.query(ns, q, /*limit*/-2, 0, &fields));
+ if (c && c->itcount() == 1) {
+ result.append("shouldMigrate", BSON("min" << chunk.min << "max" << chunk.max));
+ break;
+ }
+ }
+ }
+
+ return true;
+ }
+ } cmdSplitChunk;
+
+} // namespace mongo
diff --git a/src/mongo/s/d_state.cpp b/src/mongo/s/d_state.cpp
new file mode 100644
index 00000000000..39d84b6ff88
--- /dev/null
+++ b/src/mongo/s/d_state.cpp
@@ -0,0 +1,753 @@
+// @file d_state.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+
+/**
+ these are commands that live in mongod
+ mostly around shard management and checking
+ */
+
+#include "pch.h"
+#include <map>
+#include <string>
+
+#include "../db/commands.h"
+#include "../db/jsobj.h"
+#include "../db/db.h"
+#include "../db/replutil.h"
+#include "../client/connpool.h"
+
+#include "../util/queue.h"
+
+#include "shard.h"
+#include "d_logic.h"
+#include "config.h"
+
+using namespace std;
+
+namespace mongo {
+
+ // -----ShardingState START ----
+
+ ShardingState::ShardingState()
+ : _enabled(false) , _mutex( "ShardingState" ) {
+ }
+
+ void ShardingState::enable( const string& server ) {
+ _enabled = true;
+ assert( server.size() );
+ if ( _configServer.size() == 0 )
+ _configServer = server;
+ else {
+ assert( server == _configServer );
+ }
+ }
+
+ void ShardingState::gotShardName( const string& name ) {
+ scoped_lock lk(_mutex);
+ if ( _shardName.size() == 0 ) {
+ // TODO SERVER-2299 verify the name is sound w.r.t IPs
+ _shardName = name;
+ return;
+ }
+
+ if ( _shardName == name )
+ return;
+
+ stringstream ss;
+ ss << "gotShardName different than what i had before "
+ << " before [" << _shardName << "] "
+ << " got [" << name << "] "
+ ;
+ msgasserted( 13298 , ss.str() );
+ }
+
+ void ShardingState::gotShardHost( string host ) {
+ scoped_lock lk(_mutex);
+ size_t slash = host.find( '/' );
+ if ( slash != string::npos )
+ host = host.substr( 0 , slash );
+
+ if ( _shardHost.size() == 0 ) {
+ _shardHost = host;
+ return;
+ }
+
+ if ( _shardHost == host )
+ return;
+
+ stringstream ss;
+ ss << "gotShardHost different than what i had before "
+ << " before [" << _shardHost << "] "
+ << " got [" << host << "] "
+ ;
+ msgasserted( 13299 , ss.str() );
+ }
+
+ void ShardingState::resetShardingState() {
+ scoped_lock lk(_mutex);
+
+ _enabled = false;
+ _configServer.clear();
+ _shardName.clear();
+ _shardHost.clear();
+ _chunks.clear();
+ }
+
+ // TODO we shouldn't need three ways for checking the version. Fix this.
+ bool ShardingState::hasVersion( const string& ns ) {
+ scoped_lock lk(_mutex);
+
+ ChunkManagersMap::const_iterator it = _chunks.find(ns);
+ return it != _chunks.end();
+ }
+
+ bool ShardingState::hasVersion( const string& ns , ConfigVersion& version ) {
+ scoped_lock lk(_mutex);
+
+ ChunkManagersMap::const_iterator it = _chunks.find(ns);
+ if ( it == _chunks.end() )
+ return false;
+
+ ShardChunkManagerPtr p = it->second;
+ version = p->getVersion();
+ return true;
+ }
+
+ const ConfigVersion ShardingState::getVersion( const string& ns ) const {
+ scoped_lock lk(_mutex);
+
+ ChunkManagersMap::const_iterator it = _chunks.find( ns );
+ if ( it != _chunks.end() ) {
+ ShardChunkManagerPtr p = it->second;
+ return p->getVersion();
+ }
+ else {
+ return 0;
+ }
+ }
+
+ void ShardingState::donateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ShardChunkVersion version ) {
+ scoped_lock lk( _mutex );
+
+ ChunkManagersMap::const_iterator it = _chunks.find( ns );
+ assert( it != _chunks.end() ) ;
+ ShardChunkManagerPtr p = it->second;
+
+ // empty shards should have version 0
+ version = ( p->getNumChunks() > 1 ) ? version : ShardChunkVersion( 0 , 0 );
+
+ ShardChunkManagerPtr cloned( p->cloneMinus( min , max , version ) );
+ _chunks[ns] = cloned;
+ }
+
+ void ShardingState::undoDonateChunk( const string& ns , const BSONObj& min , const BSONObj& max , ShardChunkVersion version ) {
+ scoped_lock lk( _mutex );
+
+ ChunkManagersMap::const_iterator it = _chunks.find( ns );
+ assert( it != _chunks.end() ) ;
+ ShardChunkManagerPtr p( it->second->clonePlus( min , max , version ) );
+ _chunks[ns] = p;
+ }
+
+ void ShardingState::splitChunk( const string& ns , const BSONObj& min , const BSONObj& max , const vector<BSONObj>& splitKeys ,
+ ShardChunkVersion version ) {
+ scoped_lock lk( _mutex );
+
+ ChunkManagersMap::const_iterator it = _chunks.find( ns );
+ assert( it != _chunks.end() ) ;
+ ShardChunkManagerPtr p( it->second->cloneSplit( min , max , splitKeys , version ) );
+ _chunks[ns] = p;
+ }
+
+ void ShardingState::resetVersion( const string& ns ) {
+ scoped_lock lk( _mutex );
+
+ _chunks.erase( ns );
+ }
+
+ bool ShardingState::trySetVersion( const string& ns , ConfigVersion& version /* IN-OUT */ ) {
+
+ // fast path - requested version is at the same version as this chunk manager
+ //
+ // cases:
+ // + this shard updated the version for a migrate's commit (FROM side)
+ // a client reloaded chunk state from config and picked the newest version
+ // + two clients reloaded
+ // one triggered the 'slow path' (below)
+ // when the second's request gets here, the version is already current
+ {
+ scoped_lock lk( _mutex );
+ ChunkManagersMap::const_iterator it = _chunks.find( ns );
+ if ( it != _chunks.end() && it->second->getVersion() == version )
+ return true;
+ }
+
+ // slow path - requested version is different than the current chunk manager's, if one exists, so must check for
+ // newest version in the config server
+ //
+ // cases:
+ // + a chunk moved TO here
+ // (we don't bump up the version on the TO side but the commit to config does use higher version)
+ // a client reloads from config an issued the request
+ // + there was a take over from a secondary
+ // the secondary had no state (managers) at all, so every client request will fall here
+ // + a stale client request a version that's not current anymore
+
+ const string c = (_configServer == _shardHost) ? "" /* local */ : _configServer;
+ ShardChunkManagerPtr p( new ShardChunkManager( c , ns , _shardName ) );
+ {
+ scoped_lock lk( _mutex );
+
+ // since we loaded the chunk manager unlocked, other thread may have done the same
+ // make sure we keep the freshest config info only
+ ChunkManagersMap::const_iterator it = _chunks.find( ns );
+ if ( it == _chunks.end() || p->getVersion() >= it->second->getVersion() ) {
+ _chunks[ns] = p;
+ }
+
+ ShardChunkVersion oldVersion = version;
+ version = p->getVersion();
+ return oldVersion == version;
+ }
+ }
+
+ void ShardingState::appendInfo( BSONObjBuilder& b ) {
+ b.appendBool( "enabled" , _enabled );
+ if ( ! _enabled )
+ return;
+
+ b.append( "configServer" , _configServer );
+ b.append( "shardName" , _shardName );
+ b.append( "shardHost" , _shardHost );
+
+ {
+ BSONObjBuilder bb( b.subobjStart( "versions" ) );
+
+ scoped_lock lk(_mutex);
+
+ for ( ChunkManagersMap::iterator it = _chunks.begin(); it != _chunks.end(); ++it ) {
+ ShardChunkManagerPtr p = it->second;
+ bb.appendTimestamp( it->first , p->getVersion() );
+ }
+ bb.done();
+ }
+
+ }
+
+ bool ShardingState::needShardChunkManager( const string& ns ) const {
+ if ( ! _enabled )
+ return false;
+
+ if ( ! ShardedConnectionInfo::get( false ) )
+ return false;
+
+ return true;
+ }
+
+ ShardChunkManagerPtr ShardingState::getShardChunkManager( const string& ns ) {
+ scoped_lock lk( _mutex );
+
+ ChunkManagersMap::const_iterator it = _chunks.find( ns );
+ if ( it == _chunks.end() ) {
+ return ShardChunkManagerPtr();
+ }
+ else {
+ return it->second;
+ }
+ }
+
+ ShardingState shardingState;
+
+ // -----ShardingState END ----
+
+ // -----ShardedConnectionInfo START ----
+
+ boost::thread_specific_ptr<ShardedConnectionInfo> ShardedConnectionInfo::_tl;
+
+ ShardedConnectionInfo::ShardedConnectionInfo() {
+ _forceVersionOk = false;
+ _id.clear();
+ }
+
+ ShardedConnectionInfo* ShardedConnectionInfo::get( bool create ) {
+ ShardedConnectionInfo* info = _tl.get();
+ if ( ! info && create ) {
+ LOG(1) << "entering shard mode for connection" << endl;
+ info = new ShardedConnectionInfo();
+ _tl.reset( info );
+ }
+ return info;
+ }
+
+ void ShardedConnectionInfo::reset() {
+ _tl.reset();
+ }
+
+ const ConfigVersion ShardedConnectionInfo::getVersion( const string& ns ) const {
+ NSVersionMap::const_iterator it = _versions.find( ns );
+ if ( it != _versions.end() ) {
+ return it->second;
+ }
+ else {
+ return 0;
+ }
+ }
+
+ void ShardedConnectionInfo::setVersion( const string& ns , const ConfigVersion& version ) {
+ _versions[ns] = version;
+ }
+
+ void ShardedConnectionInfo::addHook() {
+ static bool done = false;
+ if (!done) {
+ LOG(1) << "adding sharding hook" << endl;
+ pool.addHook(new ShardingConnectionHook(false));
+ shardConnectionPool.addHook(new ShardingConnectionHook(true));
+ done = true;
+ }
+ }
+
+ void ShardedConnectionInfo::setID( const OID& id ) {
+ _id = id;
+ }
+
+ // -----ShardedConnectionInfo END ----
+
+ unsigned long long extractVersion( BSONElement e , string& errmsg ) {
+ if ( e.eoo() ) {
+ errmsg = "no version";
+ return 0;
+ }
+
+ if ( e.isNumber() )
+ return (unsigned long long)e.number();
+
+ if ( e.type() == Date || e.type() == Timestamp )
+ return e._numberLong();
+
+
+ errmsg = "version is not a numeric type";
+ return 0;
+ }
+
+ class MongodShardCommand : public Command {
+ public:
+ MongodShardCommand( const char * n ) : Command( n ) {
+ }
+ virtual bool slaveOk() const {
+ return false;
+ }
+ virtual bool adminOnly() const {
+ return true;
+ }
+ };
+
+
+ bool haveLocalShardingInfo( const string& ns ) {
+ if ( ! shardingState.enabled() )
+ return false;
+
+ if ( ! shardingState.hasVersion( ns ) )
+ return false;
+
+ return ShardedConnectionInfo::get(false) > 0;
+ }
+
+ class UnsetShardingCommand : public MongodShardCommand {
+ public:
+ UnsetShardingCommand() : MongodShardCommand("unsetSharding") {}
+
+ virtual void help( stringstream& help ) const {
+ help << " example: { unsetSharding : 1 } ";
+ }
+
+ virtual LockType locktype() const { return NONE; }
+
+ virtual bool slaveOk() const { return true; }
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ ShardedConnectionInfo::reset();
+ return true;
+ }
+
+ } unsetShardingCommand;
+
+ class SetShardVersion : public MongodShardCommand {
+ public:
+ SetShardVersion() : MongodShardCommand("setShardVersion") {}
+
+ virtual void help( stringstream& help ) const {
+ help << " example: { setShardVersion : 'alleyinsider.foo' , version : 1 , configdb : '' } ";
+ }
+
+ virtual bool slaveOk() const { return true; }
+ virtual LockType locktype() const { return NONE; }
+
+ bool checkConfigOrInit( const string& configdb , bool authoritative , string& errmsg , BSONObjBuilder& result , bool locked=false ) const {
+ if ( configdb.size() == 0 ) {
+ errmsg = "no configdb";
+ return false;
+ }
+
+ if ( shardingState.enabled() ) {
+ if ( configdb == shardingState.getConfigServer() )
+ return true;
+
+ result.append( "configdb" , BSON( "stored" << shardingState.getConfigServer() <<
+ "given" << configdb ) );
+ errmsg = "specified a different configdb!";
+ return false;
+ }
+
+ if ( ! authoritative ) {
+ result.appendBool( "need_authoritative" , true );
+ errmsg = "first setShardVersion";
+ return false;
+ }
+
+ if ( locked ) {
+ ShardedConnectionInfo::addHook();
+ shardingState.enable( configdb );
+ configServer.init( configdb );
+ return true;
+ }
+
+ dblock lk;
+ return checkConfigOrInit( configdb , authoritative , errmsg , result , true );
+ }
+
+ bool checkMongosID( ShardedConnectionInfo* info, const BSONElement& id, string& errmsg ) {
+ if ( id.type() != jstOID ) {
+ if ( ! info->hasID() ) {
+ warning() << "bad serverID set in setShardVersion and none in info: " << id << endl;
+ }
+ // TODO: fix this
+ //errmsg = "need serverID to be an OID";
+ //return 0;
+ return true;
+ }
+
+ OID clientId = id.__oid();
+ if ( ! info->hasID() ) {
+ info->setID( clientId );
+ return true;
+ }
+
+ if ( clientId != info->getID() ) {
+ errmsg = "server id has changed!";
+ return false;
+ }
+
+ return true;
+ }
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+
+ // Steps
+ // 1. check basic config
+ // 2. extract params from command
+ // 3. fast check
+ // 4. slow check (LOCKS)
+
+ // step 1
+
+ lastError.disableForCommand();
+ ShardedConnectionInfo* info = ShardedConnectionInfo::get( true );
+
+ // make sure we have the mongos id for writebacks
+ if ( ! checkMongosID( info , cmdObj["serverID"] , errmsg ) )
+ return false;
+
+ bool authoritative = cmdObj.getBoolField( "authoritative" );
+
+ // check config server is ok or enable sharding
+ if ( ! checkConfigOrInit( cmdObj["configdb"].valuestrsafe() , authoritative , errmsg , result ) )
+ return false;
+
+ // check shard name/hosts are correct
+ if ( cmdObj["shard"].type() == String ) {
+ shardingState.gotShardName( cmdObj["shard"].String() );
+ shardingState.gotShardHost( cmdObj["shardHost"].String() );
+ }
+
+
+ // Handle initial shard connection
+ if( cmdObj["version"].eoo() && cmdObj["init"].trueValue() ){
+ result.append( "initialized", true );
+ return true;
+ }
+
+ // we can run on a slave up to here
+ if ( ! isMaster( "admin" ) ) {
+ result.append( "errmsg" , "not master" );
+ result.append( "note" , "from post init in setShardVersion" );
+ return false;
+ }
+
+ // step 2
+
+ string ns = cmdObj["setShardVersion"].valuestrsafe();
+ if ( ns.size() == 0 ) {
+ errmsg = "need to specify namespace";
+ return false;
+ }
+
+ const ConfigVersion version = extractVersion( cmdObj["version"] , errmsg );
+ if ( errmsg.size() )
+ return false;
+
+ // step 3
+
+ const ConfigVersion oldVersion = info->getVersion(ns);
+ const ConfigVersion globalVersion = shardingState.getVersion(ns);
+
+ result.appendTimestamp( "oldVersion" , oldVersion );
+
+ if ( globalVersion > 0 && version > 0 ) {
+ // this means there is no reset going on an either side
+ // so its safe to make some assumptions
+
+ if ( version == globalVersion ) {
+ // mongos and mongod agree!
+ if ( oldVersion != version ) {
+ if ( oldVersion < globalVersion ) {
+ info->setVersion( ns , version );
+ }
+ else if ( authoritative ) {
+ // this means there was a drop and our version is reset
+ info->setVersion( ns , version );
+ }
+ else {
+ result.append( "ns" , ns );
+ result.appendBool( "need_authoritative" , true );
+ errmsg = "verifying drop on '" + ns + "'";
+ return false;
+ }
+ }
+ return true;
+ }
+
+ }
+
+ // step 4
+
+ // this is because of a weird segfault I saw and I can't see why this should ever be set
+ massert( 13647 , str::stream() << "context should be empty here, is: " << cc().getContext()->ns() , cc().getContext() == 0 );
+
+ dblock setShardVersionLock; // TODO: can we get rid of this??
+
+ if ( oldVersion > 0 && globalVersion == 0 ) {
+ // this had been reset
+ info->setVersion( ns , 0 );
+ }
+
+ if ( version == 0 && globalVersion == 0 ) {
+ // this connection is cleaning itself
+ info->setVersion( ns , 0 );
+ return true;
+ }
+
+ if ( version == 0 && globalVersion > 0 ) {
+ if ( ! authoritative ) {
+ result.appendBool( "need_authoritative" , true );
+ result.append( "ns" , ns );
+ result.appendTimestamp( "globalVersion" , globalVersion );
+ errmsg = "dropping needs to be authoritative";
+ return false;
+ }
+ log() << "wiping data for: " << ns << endl;
+ result.appendTimestamp( "beforeDrop" , globalVersion );
+ // only setting global version on purpose
+ // need clients to re-find meta-data
+ shardingState.resetVersion( ns );
+ info->setVersion( ns , 0 );
+ return true;
+ }
+
+ if ( version < oldVersion ) {
+ errmsg = "this connection already had a newer version of collection '" + ns + "'";
+ result.append( "ns" , ns );
+ result.appendTimestamp( "newVersion" , version );
+ result.appendTimestamp( "globalVersion" , globalVersion );
+ return false;
+ }
+
+ if ( version < globalVersion ) {
+ while ( shardingState.inCriticalMigrateSection() ) {
+ dbtemprelease r;
+ sleepmillis(2);
+ OCCASIONALLY log() << "waiting till out of critical section" << endl;
+ }
+ errmsg = "shard global version for collection is higher than trying to set to '" + ns + "'";
+ result.append( "ns" , ns );
+ result.appendTimestamp( "version" , version );
+ result.appendTimestamp( "globalVersion" , globalVersion );
+ result.appendBool( "reloadConfig" , true );
+ return false;
+ }
+
+ if ( globalVersion == 0 && ! authoritative ) {
+ // need authoritative for first look
+ result.append( "ns" , ns );
+ result.appendBool( "need_authoritative" , true );
+ errmsg = "first time for collection '" + ns + "'";
+ return false;
+ }
+
+ Timer relockTime;
+ {
+ dbtemprelease unlock;
+
+ ShardChunkVersion currVersion = version;
+ if ( ! shardingState.trySetVersion( ns , currVersion ) ) {
+ errmsg = str::stream() << "client version differs from config's for collection '" << ns << "'";
+ result.append( "ns" , ns );
+ result.appendTimestamp( "version" , version );
+ result.appendTimestamp( "globalVersion" , currVersion );
+ return false;
+ }
+ }
+ if ( relockTime.millis() >= ( cmdLine.slowMS - 10 ) ) {
+ log() << "setShardVersion - relocking slow: " << relockTime.millis() << endl;
+ }
+
+ info->setVersion( ns , version );
+ return true;
+ }
+
+ } setShardVersionCmd;
+
+ class GetShardVersion : public MongodShardCommand {
+ public:
+ GetShardVersion() : MongodShardCommand("getShardVersion") {}
+
+ virtual void help( stringstream& help ) const {
+ help << " example: { getShardVersion : 'alleyinsider.foo' } ";
+ }
+
+ virtual LockType locktype() const { return NONE; }
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ string ns = cmdObj["getShardVersion"].valuestrsafe();
+ if ( ns.size() == 0 ) {
+ errmsg = "need to specify full namespace";
+ return false;
+ }
+
+ result.append( "configServer" , shardingState.getConfigServer() );
+
+ result.appendTimestamp( "global" , shardingState.getVersion(ns) );
+
+ ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
+ result.appendBool( "inShardedMode" , info != 0 );
+ if ( info )
+ result.appendTimestamp( "mine" , info->getVersion(ns) );
+ else
+ result.appendTimestamp( "mine" , 0 );
+
+ return true;
+ }
+
+ } getShardVersion;
+
+ class ShardingStateCmd : public MongodShardCommand {
+ public:
+ ShardingStateCmd() : MongodShardCommand( "shardingState" ) {}
+
+ virtual LockType locktype() const { return WRITE; } // TODO: figure out how to make this not need to lock
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ shardingState.appendInfo( result );
+ return true;
+ }
+
+ } shardingStateCmd;
+
+ /**
+ * @ return true if not in sharded mode
+ or if version for this client is ok
+ */
+ bool shardVersionOk( const string& ns , string& errmsg ) {
+ if ( ! shardingState.enabled() )
+ return true;
+
+ if ( ! isMasterNs( ns.c_str() ) ) {
+ // right now connections to secondaries aren't versioned at all
+ return true;
+ }
+
+ ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );
+
+ if ( ! info ) {
+ // this means the client has nothing sharded
+ // so this allows direct connections to do whatever they want
+ // which i think is the correct behavior
+ return true;
+ }
+
+ if ( info->inForceVersionOkMode() ) {
+ return true;
+ }
+
+ // TODO
+ // all collections at some point, be sharded or not, will have a version (and a ShardChunkManager)
+ // for now, we remove the sharding state of dropped collection
+ // so delayed request may come in. This has to be fixed.
+ ConfigVersion clientVersion = info->getVersion(ns);
+ ConfigVersion version;
+ if ( ! shardingState.hasVersion( ns , version ) && clientVersion == 0 ) {
+ return true;
+ }
+
+
+ if ( version == 0 && clientVersion > 0 ) {
+ stringstream ss;
+ ss << "collection was dropped or this shard no longer valid version: " << version << " clientVersion: " << clientVersion;
+ errmsg = ss.str();
+ return false;
+ }
+
+ if ( clientVersion >= version )
+ return true;
+
+
+ if ( clientVersion == 0 ) {
+ stringstream ss;
+ ss << "client in sharded mode, but doesn't have version set for this collection: " << ns << " myVersion: " << version;
+ errmsg = ss.str();
+ return false;
+ }
+
+ if ( version.majorVersion() == clientVersion.majorVersion() ) {
+ // this means there was just a split
+ // since on a split w/o a migrate this server is ok
+ // going to accept
+ return true;
+ }
+
+ stringstream ss;
+ ss << "your version is too old ns: " + ns << " global: " << version << " client: " << clientVersion;
+ errmsg = ss.str();
+ return false;
+ }
+
+ void ShardingConnectionHook::onHandedOut( DBClientBase * conn ) {
+ // no-op for mongod
+ }
+}
diff --git a/src/mongo/s/d_writeback.cpp b/src/mongo/s/d_writeback.cpp
new file mode 100644
index 00000000000..01c0c14ac0a
--- /dev/null
+++ b/src/mongo/s/d_writeback.cpp
@@ -0,0 +1,179 @@
+// d_writeback.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "../db/commands.h"
+#include "../util/queue.h"
+#include "../util/net/listen.h"
+
+#include "d_writeback.h"
+
+using namespace std;
+
+namespace mongo {
+
+ // ---------- WriteBackManager class ----------
+
+ // TODO init at mongod startup
+ WriteBackManager writeBackManager;
+
+ WriteBackManager::WriteBackManager() : _writebackQueueLock("sharding:writebackQueueLock") {
+ }
+
+ WriteBackManager::~WriteBackManager() {
+ }
+
+ void WriteBackManager::queueWriteBack( const string& remote , const BSONObj& o ) {
+ getWritebackQueue( remote )->queue.push( o );
+ }
+
+ shared_ptr<WriteBackManager::QueueInfo> WriteBackManager::getWritebackQueue( const string& remote ) {
+ scoped_lock lk ( _writebackQueueLock );
+ shared_ptr<QueueInfo>& q = _writebackQueues[remote];
+ if ( ! q )
+ q.reset( new QueueInfo() );
+ q->lastCall = Listener::getElapsedTimeMillis();
+ return q;
+ }
+
+ bool WriteBackManager::queuesEmpty() const {
+ scoped_lock lk( _writebackQueueLock );
+ for ( WriteBackQueuesMap::const_iterator it = _writebackQueues.begin(); it != _writebackQueues.end(); ++it ) {
+ const shared_ptr<QueueInfo> queue = it->second;
+ if (! queue->queue.empty() ) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ void WriteBackManager::appendStats( BSONObjBuilder& b ) const {
+ BSONObjBuilder sub;
+ long long totalQueued = 0;
+ long long now = Listener::getElapsedTimeMillis();
+ {
+ scoped_lock lk( _writebackQueueLock );
+ for ( WriteBackQueuesMap::const_iterator it = _writebackQueues.begin(); it != _writebackQueues.end(); ++it ) {
+ const shared_ptr<QueueInfo> queue = it->second;
+
+ BSONObjBuilder t( sub.subobjStart( it->first ) );
+ t.appendNumber( "n" , queue->queue.size() );
+ t.appendNumber( "minutesSinceLastCall" , ( now - queue->lastCall ) / ( 1000 * 60 ) );
+ t.done();
+
+ totalQueued += queue->queue.size();
+ }
+ }
+
+ b.appendBool( "hasOpsQueued" , totalQueued > 0 );
+ b.appendNumber( "totalOpsQueued" , totalQueued );
+ b.append( "queues" , sub.obj() );
+ }
+
+ bool WriteBackManager::cleanupOldQueues() {
+ long long now = Listener::getElapsedTimeMillis();
+
+ scoped_lock lk( _writebackQueueLock );
+ for ( WriteBackQueuesMap::iterator it = _writebackQueues.begin(); it != _writebackQueues.end(); ++it ) {
+ const shared_ptr<QueueInfo> queue = it->second;
+ long long sinceMinutes = ( now - queue->lastCall ) / ( 1000 * 60 );
+
+ if ( sinceMinutes < 60 ) // minutes of inactivity.
+ continue;
+
+ log() << "deleting queue from: " << it->first
+ << " of size: " << queue->queue.size()
+ << " after " << sinceMinutes << " inactivity"
+ << " (normal if any mongos has restarted)"
+ << endl;
+
+ _writebackQueues.erase( it );
+ return true;
+ }
+ return false;
+ }
+
+ void WriteBackManager::Cleaner::taskDoWork() {
+ for ( int i=0; i<1000; i++ ) {
+ if ( ! writeBackManager.cleanupOldQueues() )
+ break;
+ }
+ }
+
+ // ---------- admin commands ----------
+
+ // Note, this command will block until there is something to WriteBack
+ class WriteBackCommand : public Command {
+ public:
+ virtual LockType locktype() const { return NONE; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return true; }
+
+ WriteBackCommand() : Command( "writebacklisten" ) {}
+
+ void help(stringstream& h) const { h<<"internal"; }
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+
+ BSONElement e = cmdObj.firstElement();
+ if ( e.type() != jstOID ) {
+ errmsg = "need oid as first value";
+ return 0;
+ }
+
+ // get the command issuer's (a mongos) serverID
+ const OID id = e.__oid();
+
+ // the command issuer is blocked awaiting a response
+ // we want to do return at least at every 5 minutes so sockets don't timeout
+ BSONObj z;
+ if ( writeBackManager.getWritebackQueue(id.str())->queue.blockingPop( z, 5 * 60 /* 5 minutes */ ) ) {
+ LOG(1) << "WriteBackCommand got : " << z << endl;
+ result.append( "data" , z );
+ }
+ else {
+ result.appendBool( "noop" , true );
+ }
+
+ return true;
+ }
+ } writeBackCommand;
+
+ class WriteBacksQueuedCommand : public Command {
+ public:
+ virtual LockType locktype() const { return NONE; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return true; }
+
+ WriteBacksQueuedCommand() : Command( "writeBacksQueued" ) {}
+
+ void help(stringstream& help) const {
+ help << "Returns whether there are operations in the writeback queue at the time the command was called. "
+ << "This is an internal command";
+ }
+
+ bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
+ writeBackManager.appendStats( result );
+ return true;
+ }
+
+ } writeBacksQueuedCommand;
+
+
+} // namespace mongo
diff --git a/src/mongo/s/d_writeback.h b/src/mongo/s/d_writeback.h
new file mode 100644
index 00000000000..d3f36a14aca
--- /dev/null
+++ b/src/mongo/s/d_writeback.h
@@ -0,0 +1,106 @@
+// @file d_writeback.h
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+
+#include "../util/queue.h"
+#include "../util/background.h"
+
+namespace mongo {
+
+ /*
+ * The WriteBackManager keeps one queue of pending operations per mongos. The operations get here
+ * if they were directed to a chunk that is no longer in this mongod server. The operations are
+ * "written back" to the mongos server per its request (command 'writebacklisten').
+ *
+ * The class is thread safe.
+ */
+ class WriteBackManager {
+ public:
+
+ class QueueInfo : boost::noncopyable {
+ public:
+ QueueInfo(){}
+
+ BlockingQueue<BSONObj> queue;
+ long long lastCall; // this is ellapsed millis since startup
+ };
+
+ // a map from mongos's serverIDs to queues of "rejected" operations
+ // an operation is rejected if it targets data that does not live on this shard anymore
+ typedef map<string,shared_ptr<QueueInfo> > WriteBackQueuesMap;
+
+
+ public:
+ WriteBackManager();
+ ~WriteBackManager();
+
+ /*
+ * @param remote server ID this operation came from
+ * @param op the operation itself
+ *
+ * Enqueues opeartion 'op' in server 'remote's queue. The operation will be written back to
+ * remote at a later stager.
+ */
+ void queueWriteBack( const string& remote , const BSONObj& op );
+
+ /*
+ * @param remote server ID
+ * @return the queue for operations that came from 'remote'
+ *
+ * Gets access to server 'remote's queue, which is synchronized.
+ */
+ shared_ptr<QueueInfo> getWritebackQueue( const string& remote );
+
+ /*
+ * @return true if there is no operation queued for write back
+ */
+ bool queuesEmpty() const;
+
+ /**
+ * appends a number of statistics
+ */
+ void appendStats( BSONObjBuilder& b ) const;
+
+ /**
+ * removes queues that have been idle
+ * @return if something was removed
+ */
+ bool cleanupOldQueues();
+
+ private:
+
+ // '_writebackQueueLock' protects only the map itself, since each queue is syncrhonized.
+ mutable mongo::mutex _writebackQueueLock;
+ WriteBackQueuesMap _writebackQueues;
+
+ class Cleaner : public PeriodicTask {
+ public:
+ virtual string taskName() const { return "WriteBackManager::cleaner"; }
+ virtual void taskDoWork();
+ };
+
+ Cleaner _cleaner;
+ };
+
+ // TODO collect global state in a central place and init during startup
+ extern WriteBackManager writeBackManager;
+
+} // namespace mongo
diff --git a/src/mongo/s/dbgrid.vcxproj b/src/mongo/s/dbgrid.vcxproj
new file mode 100644
index 00000000000..93edc46211e
--- /dev/null
+++ b/src/mongo/s/dbgrid.vcxproj
@@ -0,0 +1,691 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug|x64">
+ <Configuration>Debug</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|x64">
+ <Configuration>Release</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectName>mongos</ProjectName>
+ <ProjectGuid>{E03717ED-69B4-4D21-BC55-DF6690B585C6}</ProjectGuid>
+ <RootNamespace>dbgrid</RootNamespace>
+ <Keyword>Win32Proj</Keyword>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup>
+ <_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</LinkIncremental>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(SolutionDir)$(Configuration)\</OutDir>
+ <OutDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(SolutionDir)$(Configuration)\</OutDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(Configuration)\</IntDir>
+ <IntDir Condition="'$(Configuration)|$(Platform)'=='Release|x64'">$(Configuration)\</IntDir>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">false</LinkIncremental>
+ <LinkIncremental Condition="'$(Configuration)|$(Platform)'=='Release|x64'">false</LinkIncremental>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" />
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRuleSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AllRules.ruleset</CodeAnalysisRuleSet>
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRules Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" />
+ <CodeAnalysisRuleAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|x64'" />
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">..;$(IncludePath)</IncludePath>
+ <IncludePath Condition="'$(Configuration)|$(Platform)'=='Release|x64'">..;$(IncludePath)</IncludePath>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;XP_WIN;OLDJS;STATIC_JS_API;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <MinimalRebuild>No</MinimalRebuild>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>EditAndContinue</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <TargetMachine>MachineX86</TargetMachine>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;XP_WIN;OLDJS;STATIC_JS_API;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
+ <RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_32;\boost\lib\vs2010_32;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <TargetMachine>MachineX86</TargetMachine>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>..\..\js\src;..\third_party\pcre-7.4;C:\boost;\boost;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ <PreprocessorDefinitions>_UNICODE;UNICODE;MONGO_EXPOSE_MACROS;OLDJS;STATIC_JS_API;XP_WIN;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;HAVE_CONFIG_H;PCRE_STATIC;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ <MinimalRebuild>No</MinimalRebuild>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ </ClCompile>
+ <Link>
+ <AdditionalDependencies>ws2_32.lib;Psapi.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>c:\boost\lib\vs2010_64;\boost\lib\vs2010_64;\boost\lib</AdditionalLibraryDirectories>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <SubSystem>Console</SubSystem>
+ <OptimizeReferences>true</OptimizeReferences>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo ..\shell\msvc\createCPPfromJavaScriptFiles.js "$(ProjectDir).."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\bson\oid.cpp" />
+ <ClCompile Include="..\client\dbclientcursor.cpp" />
+ <ClCompile Include="..\client\dbclient_rs.cpp" />
+ <ClCompile Include="..\client\distlock.cpp" />
+ <ClCompile Include="..\db\commands\cloud.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\commands\pipeline.cpp" />
+ <ClCompile Include="..\db\common.cpp">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\dbmessage.cpp" />
+ <ClCompile Include="..\db\dbcommands_generic.cpp" />
+ <ClCompile Include="..\db\dbwebserver.cpp" />
+ <ClCompile Include="..\db\pipeline\accumulator.cpp" />
+ <ClCompile Include="..\db\pipeline\accumulator_add_to_set.cpp" />
+ <ClCompile Include="..\db\pipeline\accumulator_avg.cpp" />
+ <ClCompile Include="..\db\pipeline\accumulator_first.cpp" />
+ <ClCompile Include="..\db\pipeline\accumulator_last.cpp" />
+ <ClCompile Include="..\db\pipeline\accumulator_min_max.cpp" />
+ <ClCompile Include="..\db\pipeline\accumulator_push.cpp" />
+ <ClCompile Include="..\db\pipeline\accumulator_single_value.cpp" />
+ <ClCompile Include="..\db\pipeline\accumulator_sum.cpp" />
+ <ClCompile Include="..\db\pipeline\builder.cpp" />
+ <ClCompile Include="..\db\pipeline\document.cpp" />
+ <ClCompile Include="..\db\pipeline\document_source.cpp" />
+ <ClCompile Include="..\db\pipeline\document_source_bson_array.cpp" />
+ <ClCompile Include="..\db\pipeline\document_source_command_futures.cpp" />
+ <ClCompile Include="..\db\pipeline\document_source_filter.cpp" />
+ <ClCompile Include="..\db\pipeline\document_source_filter_base.cpp" />
+ <ClCompile Include="..\db\pipeline\document_source_group.cpp" />
+ <ClCompile Include="..\db\pipeline\document_source_limit.cpp" />
+ <ClCompile Include="..\db\pipeline\document_source_match.cpp" />
+ <ClCompile Include="..\db\pipeline\document_source_out.cpp" />
+ <ClCompile Include="..\db\pipeline\document_source_project.cpp" />
+ <ClCompile Include="..\db\pipeline\document_source_skip.cpp" />
+ <ClCompile Include="..\db\pipeline\document_source_sort.cpp" />
+ <ClCompile Include="..\db\pipeline\document_source_unwind.cpp" />
+ <ClCompile Include="..\db\pipeline\doc_mem_monitor.cpp" />
+ <ClCompile Include="..\db\pipeline\expression.cpp" />
+ <ClCompile Include="..\db\pipeline\expression_context.cpp" />
+ <ClCompile Include="..\db\pipeline\field_path.cpp" />
+ <ClCompile Include="..\db\pipeline\value.cpp" />
+ <ClCompile Include="..\db\querypattern.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\security_commands.cpp" />
+ <ClCompile Include="..\db\security_common.cpp" />
+ <ClCompile Include="..\db\stats\top.cpp" />
+ <ClCompile Include="..\scripting\bench.cpp" />
+ <ClCompile Include="..\util\alignedbuilder.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\spin_lock.cpp" />
+ <ClCompile Include="..\util\concurrency\task.cpp" />
+ <ClCompile Include="..\util\concurrency\thread_pool.cpp" />
+ <ClCompile Include="..\util\concurrency\vars.cpp" />
+ <ClCompile Include="..\util\intrusive_counter.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\log.cpp" />
+ <ClCompile Include="..\util\net\miniwebserver.cpp" />
+ <ClCompile Include="..\util\net\listen.cpp" />
+ <ClCompile Include="..\util\processinfo.cpp" />
+ <ClCompile Include="..\util\ramlog.cpp" />
+ <ClCompile Include="..\util\signal_handlers.cpp" />
+ <ClCompile Include="..\util\stringutils.cpp" />
+ <ClCompile Include="..\util\systeminfo_win32.cpp" />
+ <ClCompile Include="..\util\text.cpp" />
+ <ClCompile Include="..\util\version.cpp" />
+ <ClCompile Include="balance.cpp" />
+ <ClCompile Include="balancer_policy.cpp" />
+ <ClCompile Include="chunk.cpp" />
+ <ClCompile Include="client.cpp" />
+ <ClCompile Include="commands_admin.cpp" />
+ <ClCompile Include="commands_public.cpp" />
+ <ClCompile Include="config.cpp" />
+ <ClCompile Include="config_migrate.cpp" />
+ <ClCompile Include="cursors.cpp" />
+ <ClCompile Include="..\pch.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Create</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">Create</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\queryutil.cpp" />
+ <ClCompile Include="grid.cpp" />
+ <ClCompile Include="mr_shard.cpp" />
+ <ClCompile Include="request.cpp" />
+ <ClCompile Include="security.cpp" />
+ <ClCompile Include="shardconnection.cpp" />
+ <ClCompile Include="shard_version.cpp" />
+ <ClCompile Include="s_only.cpp" />
+ <ClCompile Include="server.cpp" />
+ <ClCompile Include="shard.cpp" />
+ <ClCompile Include="shardkey.cpp" />
+ <ClCompile Include="stats.cpp" />
+ <ClCompile Include="strategy.cpp" />
+ <ClCompile Include="strategy_shard.cpp" />
+ <ClCompile Include="strategy_single.cpp" />
+ <ClCompile Include="..\scripting\utils.cpp" />
+ <ClCompile Include="..\client\connpool.cpp" />
+ <ClCompile Include="..\third_party\pcre-7.4\pcrecpp.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_chartables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_compile.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_config.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_dfa_exec.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_exec.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_fullinfo.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_get.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_globals.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_info.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_maketables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_newline.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ord2utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_refcount.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_scanner.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_stringpiece.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_study.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_tables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_try_flipped.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ucp_searchfuncs.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_valid_utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_version.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_xclass.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcreposix.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\client\dbclient.cpp" />
+ <ClCompile Include="..\client\model.cpp" />
+ <ClCompile Include="..\util\assert_util.cpp" />
+ <ClCompile Include="..\util\background.cpp" />
+ <ClCompile Include="..\util\base64.cpp" />
+ <ClCompile Include="..\db\cmdline.cpp" />
+ <ClCompile Include="..\db\commands.cpp" />
+ <ClCompile Include="..\db\stats\counters.cpp" />
+ <ClCompile Include="..\util\debug_util.cpp" />
+ <ClCompile Include="..\scripting\engine.cpp" />
+ <ClCompile Include="..\scripting\engine_spidermonkey.cpp" />
+ <ClCompile Include="..\db\indexkey.cpp" />
+ <ClCompile Include="..\db\jsobj.cpp" />
+ <ClCompile Include="..\db\json.cpp" />
+ <ClCompile Include="..\db\lasterror.cpp" />
+ <ClCompile Include="..\db\matcher.cpp" />
+ <ClCompile Include="..\util\md5.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\md5main.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">Use</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">Use</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message.cpp" />
+ <ClCompile Include="..\util\net\message_port.cpp" />
+ <ClCompile Include="..\util\net\message_server_port.cpp" />
+ <ClCompile Include="..\util\mmap.cpp" />
+ <ClCompile Include="..\util\mmap_win.cpp" />
+ <ClCompile Include="..\shell\mongo.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ </PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ </PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\db\nonce.cpp" />
+ <ClCompile Include="..\client\parallel.cpp" />
+ <ClCompile Include="..\util\processinfo_win32.cpp" />
+ <ClCompile Include="..\util\net\sock.cpp" />
+ <ClCompile Include="..\client\syncclusterconnection.cpp" />
+ <ClCompile Include="..\util\util.cpp" />
+ <ClCompile Include="writeback_listener.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\db\commands\pipeline.h" />
+ <ClInclude Include="..\db\pipeline\accumulator.h" />
+ <ClInclude Include="..\db\pipeline\builder.h" />
+ <ClInclude Include="..\db\pipeline\document.h" />
+ <ClInclude Include="..\db\pipeline\document_source.h" />
+ <ClInclude Include="..\db\pipeline\doc_mem_monitor.h" />
+ <ClInclude Include="..\db\pipeline\expression.h" />
+ <ClInclude Include="..\db\pipeline\expression_context.h" />
+ <ClInclude Include="..\db\pipeline\field_path.h" />
+ <ClInclude Include="..\db\pipeline\value.h" />
+ <ClInclude Include="..\util\intrusive_counter.h" />
+ <ClInclude Include="..\util\processinfo.h" />
+ <ClInclude Include="..\util\signal_handlers.h" />
+ <ClInclude Include="..\util\systeminfo.h" />
+ <ClInclude Include="..\util\version.h" />
+ <ClInclude Include="balance.h" />
+ <ClInclude Include="balancer_policy.h" />
+ <ClInclude Include="chunk.h" />
+ <ClInclude Include="client.h" />
+ <ClInclude Include="config.h" />
+ <ClInclude Include="cursors.h" />
+ <ClInclude Include="d_chunk_manager.h" />
+ <ClInclude Include="d_logic.h" />
+ <ClInclude Include="d_writeback.h" />
+ <ClInclude Include="grid.h" />
+ <ClInclude Include="gridconfig.h" />
+ <ClInclude Include="griddatabase.h" />
+ <ClInclude Include="request.h" />
+ <ClInclude Include="server.h" />
+ <ClInclude Include="shard.h" />
+ <ClInclude Include="shardkey.h" />
+ <ClInclude Include="shard_version.h" />
+ <ClInclude Include="stats.h" />
+ <ClInclude Include="strategy.h" />
+ <ClInclude Include="..\util\background.h" />
+ <ClInclude Include="..\db\commands.h" />
+ <ClInclude Include="..\db\dbmessage.h" />
+ <ClInclude Include="..\util\goodies.h" />
+ <ClInclude Include="..\db\jsobj.h" />
+ <ClInclude Include="..\db\json.h" />
+ <ClInclude Include="..\pch.h" />
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\config\auto_link.hpp" />
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\version.hpp" />
+ <ClInclude Include="..\third_party\pcre-7.4\config.h" />
+ <ClInclude Include="..\third_party\pcre-7.4\pcre.h" />
+ <ClInclude Include="..\client\connpool.h" />
+ <ClInclude Include="..\client\dbclient.h" />
+ <ClInclude Include="..\client\model.h" />
+ <ClInclude Include="util.h" />
+ <ClInclude Include="writeback_listener.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <Library Include="..\..\js\js32d.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js32r.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js64d.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\js\js64r.lib">
+ <FileType>Document</FileType>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">true</ExcludedFromBuild>
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project> \ No newline at end of file
diff --git a/src/mongo/s/dbgrid.vcxproj.filters b/src/mongo/s/dbgrid.vcxproj.filters
new file mode 100755
index 00000000000..02b6e9972e9
--- /dev/null
+++ b/src/mongo/s/dbgrid.vcxproj.filters
@@ -0,0 +1,614 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <Filter Include="Source Files">
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+ </Filter>
+ <Filter Include="Header Files">
+ <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>
+ <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>
+ </Filter>
+ <Filter Include="libs_etc">
+ <UniqueIdentifier>{17d48ddf-5c49-4dfd-bafa-16d5fed290cd}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="libs_etc\pcre">
+ <UniqueIdentifier>{4c2dd526-4a57-4ff7-862f-2bd7ec4955b3}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="client">
+ <UniqueIdentifier>{b4f6635b-8c64-4ceb-8077-43203533d0b9}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="Shared Source Files">
+ <UniqueIdentifier>{e59da087-4433-46b9-862d-746cbed27b97}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="Header Shared">
+ <UniqueIdentifier>{4048b883-7255-40b3-b0e9-4c1044cff049}</UniqueIdentifier>
+ </Filter>
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="balance.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="chunk.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="commands_admin.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="commands_public.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="config.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="config_migrate.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="cursors.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\queryutil.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="request.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="s_only.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="server.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="shard.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="shardkey.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="stats.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="strategy.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="strategy_shard.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="strategy_single.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\scripting\utils.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\dbclient.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\dbclientcursor.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\model.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\assert_util.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\background.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\base64.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\cmdline.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\commands.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\stats\counters.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\debug_util.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\scripting\engine.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\scripting\engine_spidermonkey.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\indexkey.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\jsobj.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\json.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\lasterror.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\matcher.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\md5.c">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\md5main.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\mmap.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\mmap_win.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\shell\mongo.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\nonce.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\parallel.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\processinfo_win32.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\syncclusterconnection.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\util.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\pch.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="shardconnection.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\vars.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\thread_pool.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\version.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\text.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="balancer_policy.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\stringutils.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\distlock.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\log.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="grid.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\processinfo.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbwebserver.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\task.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\signal_handlers.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\scripting\bench.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="writeback_listener.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="shard_version.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\concurrency\spin_lock.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\alignedbuilder.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\bson\oid.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\client\dbclient_rs.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="client.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbcommands_generic.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\querypattern.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\ramlog.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="mr_shard.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\common.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\security_common.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="security.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\dbmessage.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\miniwebserver.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\listen.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcrecpp.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_chartables.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_compile.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_config.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_dfa_exec.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_exec.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_fullinfo.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_get.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_globals.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_info.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_maketables.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_newline.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ord2utf8.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_refcount.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_scanner.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_stringpiece.cc">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_study.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_tables.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_try_flipped.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_ucp_searchfuncs.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_valid_utf8.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_version.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcre_xclass.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\third_party\pcre-7.4\pcreposix.c">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message_port.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\message_server_port.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\net\sock.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\security_commands.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClInclude Include="gridconfig.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="griddatabase.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="shard.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="strategy.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\background.h">
+ <Filter>Header Shared</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\commands.h">
+ <Filter>Header Shared</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\dbmessage.h">
+ <Filter>Header Shared</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\goodies.h">
+ <Filter>Header Shared</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\jsobj.h">
+ <Filter>Header Shared</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\json.h">
+ <Filter>Header Shared</Filter>
+ </ClInclude>
+ <ClInclude Include="..\pch.h">
+ <Filter>Header Shared</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\config\auto_link.hpp">
+ <Filter>libs_etc</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\boostw\boost_1_34_1\boost\version.hpp">
+ <Filter>libs_etc</Filter>
+ </ClInclude>
+ <ClInclude Include="..\client\connpool.h">
+ <Filter>client</Filter>
+ </ClInclude>
+ <ClInclude Include="..\client\dbclient.h">
+ <Filter>client</Filter>
+ </ClInclude>
+ <ClInclude Include="..\client\model.h">
+ <Filter>client</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\version.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="balancer_policy.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="grid.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\processinfo.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\signal_handlers.h">
+ <Filter>Shared Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="writeback_listener.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="balance.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="chunk.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="client.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="config.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="cursors.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="d_chunk_manager.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="d_logic.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="d_writeback.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="request.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="server.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="shard_version.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="shardkey.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="stats.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="util.h">
+ <Filter>Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\pcre-7.4\config.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\third_party\pcre-7.4\pcre.h">
+ <Filter>Header Files</Filter>
+ </ClInclude>
+ <ClCompile Include="..\client\connpool.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\commands\cloud.cpp">
+ <Filter>Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\commands\pipeline.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\accumulator.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\accumulator_add_to_set.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\accumulator_avg.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\accumulator_first.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\accumulator_last.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\accumulator_min_max.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\accumulator_push.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\accumulator_single_value.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\accumulator_sum.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\builder.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\doc_mem_monitor.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\document.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\document_source.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\document_source_bson_array.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\document_source_command_futures.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\document_source_filter.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\document_source_filter_base.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\document_source_group.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\document_source_limit.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\document_source_match.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\document_source_out.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\document_source_project.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\document_source_skip.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\document_source_sort.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\document_source_unwind.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\expression.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\expression_context.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\field_path.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\pipeline\value.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\intrusive_counter.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\util\systeminfo_win32.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\db\stats\top.cpp">
+ <Filter>Shared Source Files</Filter>
+ </ClCompile>
+ </ItemGroup>
+ <ItemGroup>
+ <Library Include="..\..\js\js32d.lib" />
+ <Library Include="..\..\js\js32r.lib" />
+ <Library Include="..\..\js\js64d.lib" />
+ <Library Include="..\..\js\js64r.lib" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\db\commands\pipeline.h">
+ <Filter>Shared Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\pipeline\accumulator.h">
+ <Filter>Shared Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\pipeline\builder.h">
+ <Filter>Shared Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\pipeline\doc_mem_monitor.h">
+ <Filter>Shared Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\pipeline\document.h">
+ <Filter>Shared Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\pipeline\document_source.h">
+ <Filter>Shared Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\pipeline\expression.h">
+ <Filter>Shared Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\pipeline\expression_context.h">
+ <Filter>Shared Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\pipeline\field_path.h">
+ <Filter>Shared Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\db\pipeline\value.h">
+ <Filter>Shared Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\intrusive_counter.h">
+ <Filter>Shared Source Files</Filter>
+ </ClInclude>
+ <ClInclude Include="..\util\systeminfo.h">
+ <Filter>Shared Source Files</Filter>
+ </ClInclude>
+ </ItemGroup>
+</Project> \ No newline at end of file
diff --git a/src/mongo/s/default_version.cpp b/src/mongo/s/default_version.cpp
new file mode 100644
index 00000000000..82368672a2e
--- /dev/null
+++ b/src/mongo/s/default_version.cpp
@@ -0,0 +1,52 @@
+// @file default_version.cpp
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "s/util.h"
+#include "shard_version.h"
+
+namespace mongo {
+
+ // Global version manager
+ VersionManager versionManager;
+
+ void VersionManager::resetShardVersionCB( DBClientBase * conn ) {
+ return;
+ }
+
+ bool VersionManager::isVersionableCB( DBClientBase* conn ){
+ return false;
+ }
+
+ bool VersionManager::initShardVersionCB( DBClientBase * conn_in, BSONObj& result ){
+ return false;
+ }
+
+ bool VersionManager::forceRemoteCheckShardVersionCB( const string& ns ){
+ return true;
+ }
+
+ bool VersionManager::checkShardVersionCB( DBClientBase* conn_in , const string& ns , bool authoritative , int tryNumber ) {
+ return false;
+ }
+
+ bool VersionManager::checkShardVersionCB( ShardConnection* conn_in , bool authoritative , int tryNumber ) {
+ return false;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/s/grid.cpp b/src/mongo/s/grid.cpp
new file mode 100644
index 00000000000..9d9c2e4555e
--- /dev/null
+++ b/src/mongo/s/grid.cpp
@@ -0,0 +1,531 @@
+// grid.cpp
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include <iomanip>
+#include "../client/connpool.h"
+#include "../util/stringutils.h"
+#include "../util/unittest.h"
+#include "../db/namespacestring.h"
+
+#include "grid.h"
+#include "shard.h"
+
+namespace mongo {
+
+ DBConfigPtr Grid::getDBConfig( string database , bool create , const string& shardNameHint ) {
+ {
+ string::size_type i = database.find( "." );
+ if ( i != string::npos )
+ database = database.substr( 0 , i );
+ }
+
+ if ( database == "config" )
+ return configServerPtr;
+
+ uassert( 15918 , str::stream() << "invalid database name: " << database , NamespaceString::validDBName( database ) );
+
+ scoped_lock l( _lock );
+
+ DBConfigPtr& cc = _databases[database];
+ if ( !cc ) {
+ cc.reset(new DBConfig( database ));
+ if ( ! cc->load() ) {
+ if ( create ) {
+ // note here that cc->primary == 0.
+ log() << "couldn't find database [" << database << "] in config db" << endl;
+
+ {
+ // lets check case
+ ScopedDbConnection conn( configServer.modelServer() );
+ BSONObjBuilder b;
+ b.appendRegex( "_id" , (string)"^" + database + "$" , "i" );
+ BSONObj d = conn->findOne( ShardNS::database , b.obj() );
+ conn.done();
+
+ if ( ! d.isEmpty() ) {
+ cc.reset();
+ stringstream ss;
+ ss << "can't have 2 databases that just differ on case "
+ << " have: " << d["_id"].String()
+ << " want to add: " << database;
+
+ uasserted( DatabaseDifferCaseCode ,ss.str() );
+ }
+ }
+
+ Shard primary;
+ if ( database == "admin" ) {
+ primary = configServer.getPrimary();
+
+ }
+ else if ( shardNameHint.empty() ) {
+ primary = Shard::pick();
+
+ }
+ else {
+ // use the shard name if provided
+ Shard shard;
+ shard.reset( shardNameHint );
+ primary = shard;
+ }
+
+ if ( primary.ok() ) {
+ cc->setPrimary( primary.getName() ); // saves 'cc' to configDB
+ log() << "\t put [" << database << "] on: " << primary << endl;
+ }
+ else {
+ cc.reset();
+ log() << "\t can't find a shard to put new db on" << endl;
+ uasserted( 10185 , "can't find a shard to put new db on" );
+ }
+ }
+ else {
+ cc.reset();
+ }
+ }
+
+ }
+
+ return cc;
+ }
+
+ void Grid::removeDB( string database ) {
+ uassert( 10186 , "removeDB expects db name" , database.find( '.' ) == string::npos );
+ scoped_lock l( _lock );
+ _databases.erase( database );
+
+ }
+
+ bool Grid::allowLocalHost() const {
+ return _allowLocalShard;
+ }
+
+ void Grid::setAllowLocalHost( bool allow ) {
+ _allowLocalShard = allow;
+ }
+
+ bool Grid::addShard( string* name , const ConnectionString& servers , long long maxSize , string& errMsg ) {
+ // name can be NULL, so provide a dummy one here to avoid testing it elsewhere
+ string nameInternal;
+ if ( ! name ) {
+ name = &nameInternal;
+ }
+
+ ReplicaSetMonitorPtr rsMonitor;
+
+ // Check whether the host (or set) exists and run several sanity checks on this request.
+ // There are two set of sanity checks: making sure adding this particular shard is consistent
+ // with the replica set state (if it exists) and making sure this shards databases can be
+ // brought into the grid without conflict.
+
+ vector<string> dbNames;
+ try {
+ ScopedDbConnection newShardConn( servers );
+ newShardConn->getLastError();
+
+ if ( newShardConn->type() == ConnectionString::SYNC ) {
+ newShardConn.done();
+ errMsg = "can't use sync cluster as a shard. for replica set, have to use <setname>/<server1>,<server2>,...";
+ return false;
+ }
+
+ BSONObj resIsMongos;
+ bool ok = newShardConn->runCommand( "admin" , BSON( "isdbgrid" << 1 ) , resIsMongos );
+
+ // should return ok=0, cmd not found if it's a normal mongod
+ if ( ok ) {
+ errMsg = "can't add a mongos process as a shard";
+ newShardConn.done();
+ return false;
+ }
+
+ BSONObj resIsMaster;
+ ok = newShardConn->runCommand( "admin" , BSON( "isMaster" << 1 ) , resIsMaster );
+ if ( !ok ) {
+ ostringstream ss;
+ ss << "failed running isMaster: " << resIsMaster;
+ errMsg = ss.str();
+ newShardConn.done();
+ return false;
+ }
+
+ // if the shard has only one host, make sure it is not part of a replica set
+ string setName = resIsMaster["setName"].str();
+ string commandSetName = servers.getSetName();
+ if ( commandSetName.empty() && ! setName.empty() ) {
+ ostringstream ss;
+ ss << "host is part of set: " << setName << " use replica set url format <setname>/<server1>,<server2>,....";
+ errMsg = ss.str();
+ newShardConn.done();
+ return false;
+ }
+ if ( !commandSetName.empty() && setName.empty() ) {
+ ostringstream ss;
+ ss << "host did not return a set name, is the replica set still initializing? " << resIsMaster;
+ errMsg = ss.str();
+ newShardConn.done();
+ return false;
+ }
+
+ // if the shard is part of replica set, make sure it is the right one
+ if ( ! commandSetName.empty() && ( commandSetName != setName ) ) {
+ ostringstream ss;
+ ss << "host is part of a different set: " << setName;
+ errMsg = ss.str();
+ newShardConn.done();
+ return false;
+ }
+
+ // if the shard is part of a replica set, make sure all the hosts mentioned in 'servers' are part of
+ // the set. It is fine if not all members of the set are present in 'servers'.
+ bool foundAll = true;
+ string offendingHost;
+ if ( ! commandSetName.empty() ) {
+ set<string> hostSet;
+ BSONObjIterator iter( resIsMaster["hosts"].Obj() );
+ while ( iter.more() ) {
+ hostSet.insert( iter.next().String() ); // host:port
+ }
+ if ( resIsMaster["passives"].isABSONObj() ) {
+ BSONObjIterator piter( resIsMaster["passives"].Obj() );
+ while ( piter.more() ) {
+ hostSet.insert( piter.next().String() ); // host:port
+ }
+ }
+ if ( resIsMaster["arbiters"].isABSONObj() ) {
+ BSONObjIterator piter( resIsMaster["arbiters"].Obj() );
+ while ( piter.more() ) {
+ hostSet.insert( piter.next().String() ); // host:port
+ }
+ }
+
+ vector<HostAndPort> hosts = servers.getServers();
+ for ( size_t i = 0 ; i < hosts.size() ; i++ ) {
+ if (!hosts[i].hasPort()) {
+ hosts[i].setPort(CmdLine::DefaultDBPort);
+ }
+ string host = hosts[i].toString(); // host:port
+ if ( hostSet.find( host ) == hostSet.end() ) {
+ offendingHost = host;
+ foundAll = false;
+ break;
+ }
+ }
+ }
+ if ( ! foundAll ) {
+ ostringstream ss;
+ ss << "in seed list " << servers.toString() << ", host " << offendingHost
+ << " does not belong to replica set " << setName;
+ errMsg = ss.str();
+ newShardConn.done();
+ return false;
+ }
+
+ // shard name defaults to the name of the replica set
+ if ( name->empty() && ! setName.empty() )
+ *name = setName;
+
+ // In order to be accepted as a new shard, that mongod must not have any database name that exists already
+ // in any other shards. If that test passes, the new shard's databases are going to be entered as
+ // non-sharded db's whose primary is the newly added shard.
+
+ BSONObj resListDB;
+ ok = newShardConn->runCommand( "admin" , BSON( "listDatabases" << 1 ) , resListDB );
+ if ( !ok ) {
+ ostringstream ss;
+ ss << "failed listing " << servers.toString() << "'s databases:" << resListDB;
+ errMsg = ss.str();
+ newShardConn.done();
+ return false;
+ }
+
+ BSONObjIterator i( resListDB["databases"].Obj() );
+ while ( i.more() ) {
+ BSONObj dbEntry = i.next().Obj();
+ const string& dbName = dbEntry["name"].String();
+ if ( _isSpecialLocalDB( dbName ) ) {
+ // 'local', 'admin', and 'config' are system DBs and should be excluded here
+ continue;
+ }
+ else {
+ dbNames.push_back( dbName );
+ }
+ }
+
+ if ( newShardConn->type() == ConnectionString::SET )
+ rsMonitor = ReplicaSetMonitor::get( setName );
+
+ newShardConn.done();
+ }
+ catch ( DBException& e ) {
+ ostringstream ss;
+ ss << "couldn't connect to new shard ";
+ ss << e.what();
+ errMsg = ss.str();
+ return false;
+ }
+
+ // check that none of the existing shard candidate's db's exist elsewhere
+ for ( vector<string>::const_iterator it = dbNames.begin(); it != dbNames.end(); ++it ) {
+ DBConfigPtr config = getDBConfig( *it , false );
+ if ( config.get() != NULL ) {
+ ostringstream ss;
+ ss << "can't add shard " << servers.toString() << " because a local database '" << *it;
+ ss << "' exists in another " << config->getPrimary().toString();
+ errMsg = ss.str();
+ return false;
+ }
+ }
+
+ // if a name for a shard wasn't provided, pick one.
+ if ( name->empty() && ! _getNewShardName( name ) ) {
+ errMsg = "error generating new shard name";
+ return false;
+ }
+
+ // build the ConfigDB shard document
+ BSONObjBuilder b;
+ b.append( "_id" , *name );
+ b.append( "host" , rsMonitor ? rsMonitor->getServerAddress() : servers.toString() );
+ if ( maxSize > 0 ) {
+ b.append( ShardFields::maxSize.name() , maxSize );
+ }
+ BSONObj shardDoc = b.obj();
+
+ {
+ ScopedDbConnection conn( configServer.getPrimary() );
+
+ // check whether the set of hosts (or single host) is not an already a known shard
+ BSONObj old = conn->findOne( ShardNS::shard , BSON( "host" << servers.toString() ) );
+ if ( ! old.isEmpty() ) {
+ errMsg = "host already used";
+ conn.done();
+ return false;
+ }
+
+ log() << "going to add shard: " << shardDoc << endl;
+
+ conn->insert( ShardNS::shard , shardDoc );
+ errMsg = conn->getLastError();
+ if ( ! errMsg.empty() ) {
+ log() << "error adding shard: " << shardDoc << " err: " << errMsg << endl;
+ conn.done();
+ return false;
+ }
+
+ conn.done();
+ }
+
+ Shard::reloadShardInfo();
+
+ // add all databases of the new shard
+ for ( vector<string>::const_iterator it = dbNames.begin(); it != dbNames.end(); ++it ) {
+ DBConfigPtr config = getDBConfig( *it , true , *name );
+ if ( ! config ) {
+ log() << "adding shard " << servers << " even though could not add database " << *it << endl;
+ }
+ }
+
+ return true;
+ }
+
+ bool Grid::knowAboutShard( const string& name ) const {
+ ShardConnection conn( configServer.getPrimary() , "" );
+ BSONObj shard = conn->findOne( ShardNS::shard , BSON( "host" << name ) );
+ conn.done();
+ return ! shard.isEmpty();
+ }
+
+ bool Grid::_getNewShardName( string* name ) const {
+ DEV assert( name );
+
+ bool ok = false;
+ int count = 0;
+
+ ShardConnection conn( configServer.getPrimary() , "" );
+ BSONObj o = conn->findOne( ShardNS::shard , Query( fromjson ( "{_id: /^shard/}" ) ).sort( BSON( "_id" << -1 ) ) );
+ if ( ! o.isEmpty() ) {
+ string last = o["_id"].String();
+ istringstream is( last.substr( 5 ) );
+ is >> count;
+ count++;
+ }
+ if (count < 9999) {
+ stringstream ss;
+ ss << "shard" << setfill('0') << setw(4) << count;
+ *name = ss.str();
+ ok = true;
+ }
+ conn.done();
+
+ return ok;
+ }
+
+ bool Grid::shouldBalance() const {
+ ShardConnection conn( configServer.getPrimary() , "" );
+
+ // look for the stop balancer marker
+ BSONObj balancerDoc = conn->findOne( ShardNS::settings, BSON( "_id" << "balancer" ) );
+ conn.done();
+
+ boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();
+ if ( _balancerStopped( balancerDoc ) || ! _inBalancingWindow( balancerDoc , now ) ) {
+ return false;
+ }
+
+ return true;
+ }
+
+ bool Grid::_balancerStopped( const BSONObj& balancerDoc ) {
+ // check the 'stopped' marker maker
+ // if present, it is a simple bool
+ BSONElement stoppedElem = balancerDoc["stopped"];
+ return stoppedElem.trueValue();
+ }
+
+ bool Grid::_inBalancingWindow( const BSONObj& balancerDoc , const boost::posix_time::ptime& now ) {
+ // check the 'activeWindow' marker
+ // if present, it is an interval during the day when the balancer should be active
+ // { start: "08:00" , stop: "19:30" }, strftime format is %H:%M
+ BSONElement windowElem = balancerDoc["activeWindow"];
+ if ( windowElem.eoo() ) {
+ return true;
+ }
+
+ // check if both 'start' and 'stop' are present
+ if ( ! windowElem.isABSONObj() ) {
+ warning() << "'activeWindow' format is { start: \"hh:mm\" , stop: ... }" << balancerDoc << endl;
+ return true;
+ }
+ BSONObj intervalDoc = windowElem.Obj();
+ const string start = intervalDoc["start"].str();
+ const string stop = intervalDoc["stop"].str();
+ if ( start.empty() || stop.empty() ) {
+ warning() << "must specify both start and end of balancing window: " << intervalDoc << endl;
+ return true;
+ }
+
+ // check that both 'start' and 'stop' are valid time-of-day
+ boost::posix_time::ptime startTime, stopTime;
+ if ( ! toPointInTime( start , &startTime ) || ! toPointInTime( stop , &stopTime ) ) {
+ warning() << "cannot parse active window (use hh:mm 24hs format): " << intervalDoc << endl;
+ return true;
+ }
+
+ if ( logLevel ) {
+ stringstream ss;
+ ss << " now: " << now
+ << " startTime: " << startTime
+ << " stopTime: " << stopTime;
+ log() << "_inBalancingWindow: " << ss.str() << endl;
+ }
+
+ // allow balancing if during the activeWindow
+ // note that a window may be open during the night
+ if ( stopTime > startTime ) {
+ if ( ( now >= startTime ) && ( now <= stopTime ) ) {
+ return true;
+ }
+ }
+ else if ( startTime > stopTime ) {
+ if ( ( now >=startTime ) || ( now <= stopTime ) ) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ unsigned long long Grid::getNextOpTime() const {
+ ScopedDbConnection conn( configServer.getPrimary() );
+
+ BSONObj result;
+ massert( 10421 , "getoptime failed" , conn->simpleCommand( "admin" , &result , "getoptime" ) );
+ conn.done();
+
+ return result["optime"]._numberLong();
+ }
+
+ bool Grid::_isSpecialLocalDB( const string& dbName ) {
+ return ( dbName == "local" ) || ( dbName == "admin" ) || ( dbName == "config" );
+ }
+
+ void Grid::flushConfig() {
+ scoped_lock lk( _lock );
+ _databases.clear();
+ }
+
+ BSONObj Grid::getConfigSetting( string name ) const {
+ ScopedDbConnection conn( configServer.getPrimary() );
+ BSONObj result = conn->findOne( ShardNS::settings, BSON( "_id" << name ) );
+ conn.done();
+
+ return result;
+ }
+
+ Grid grid;
+
+
+ // unit tests
+
+ class BalancingWindowUnitTest : public UnitTest {
+ public:
+ void run() {
+
+ if ( ! cmdLine.isMongos() )
+ return;
+
+ // T0 < T1 < now < T2 < T3 and Error
+ const string T0 = "9:00";
+ const string T1 = "11:00";
+ boost::posix_time::ptime now( currentDate(), boost::posix_time::hours( 13 ) + boost::posix_time::minutes( 48 ) );
+ const string T2 = "17:00";
+ const string T3 = "21:30";
+ const string E = "28:35";
+
+ BSONObj w1 = BSON( "activeWindow" << BSON( "start" << T0 << "stop" << T1 ) ); // closed in the past
+ BSONObj w2 = BSON( "activeWindow" << BSON( "start" << T2 << "stop" << T3 ) ); // not opened until the future
+ BSONObj w3 = BSON( "activeWindow" << BSON( "start" << T1 << "stop" << T2 ) ); // open now
+ BSONObj w4 = BSON( "activeWindow" << BSON( "start" << T3 << "stop" << T2 ) ); // open since last day
+
+ assert( ! Grid::_inBalancingWindow( w1 , now ) );
+ assert( ! Grid::_inBalancingWindow( w2 , now ) );
+ assert( Grid::_inBalancingWindow( w3 , now ) );
+ assert( Grid::_inBalancingWindow( w4 , now ) );
+
+ // bad input should not stop the balancer
+
+ BSONObj w5; // empty window
+ BSONObj w6 = BSON( "activeWindow" << BSON( "start" << 1 ) ); // missing stop
+ BSONObj w7 = BSON( "activeWindow" << BSON( "stop" << 1 ) ); // missing start
+ BSONObj w8 = BSON( "wrongMarker" << 1 << "start" << 1 << "stop" << 1 ); // active window marker missing
+ BSONObj w9 = BSON( "activeWindow" << BSON( "start" << T3 << "stop" << E ) ); // garbage in window
+
+ assert( Grid::_inBalancingWindow( w5 , now ) );
+ assert( Grid::_inBalancingWindow( w6 , now ) );
+ assert( Grid::_inBalancingWindow( w7 , now ) );
+ assert( Grid::_inBalancingWindow( w8 , now ) );
+ assert( Grid::_inBalancingWindow( w9 , now ) );
+
+ LOG(1) << "BalancingWidowObjTest passed" << endl;
+ }
+ } BalancingWindowObjTest;
+
+}
diff --git a/src/mongo/s/grid.h b/src/mongo/s/grid.h
new file mode 100644
index 00000000000..9731ada518b
--- /dev/null
+++ b/src/mongo/s/grid.h
@@ -0,0 +1,135 @@
+// grid.h
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include <boost/date_time/posix_time/posix_time.hpp>
+
+#include "../util/time_support.h"
+#include "../util/concurrency/mutex.h"
+
+#include "config.h" // DBConfigPtr
+
+namespace mongo {
+
+ /**
+ * stores meta-information about the grid
+ * TODO: used shard_ptr for DBConfig pointers
+ */
+ class Grid {
+ public:
+ Grid() : _lock( "Grid" ) , _allowLocalShard( true ) { }
+
+ /**
+ * gets the config the db.
+ * will return an empty DBConfig if not in db already
+ */
+ DBConfigPtr getDBConfig( string ns , bool create=true , const string& shardNameHint="" );
+
+ /**
+ * removes db entry.
+ * on next getDBConfig call will fetch from db
+ */
+ void removeDB( string db );
+
+ /**
+ * @return true if shards and config servers are allowed to use 'localhost' in address
+ */
+ bool allowLocalHost() const;
+
+ /**
+ * @param whether to allow shards and config servers to use 'localhost' in address
+ */
+ void setAllowLocalHost( bool allow );
+
+ /**
+ *
+ * addShard will create a new shard in the grid. It expects a mongod process to be runing
+ * on the provided address. Adding a shard that is a replica set is supported.
+ *
+ * @param name is an optional string with the name of the shard. if ommited, grid will
+ * generate one and update the parameter.
+ * @param servers is the connection string of the shard being added
+ * @param maxSize is the optional space quota in bytes. Zeros means there's no limitation to
+ * space usage
+ * @param errMsg is the error description in case the operation failed.
+ * @return true if shard was successfully added.
+ */
+ bool addShard( string* name , const ConnectionString& servers , long long maxSize , string& errMsg );
+
+ /**
+ * @return true if the config database knows about a host 'name'
+ */
+ bool knowAboutShard( const string& name ) const;
+
+ /**
+ * @return true if the chunk balancing functionality is enabled
+ */
+ bool shouldBalance() const;
+
+ /**
+ *
+ * Obtain grid configuration and settings data.
+ *
+ * @param name identifies a particular type of configuration data.
+ * @return a BSON object containing the requested data.
+ */
+ BSONObj getConfigSetting( string name ) const;
+
+ unsigned long long getNextOpTime() const;
+
+ void flushConfig();
+
+ // exposed methods below are for testing only
+
+ /**
+ * @param balancerDoc bson that may contain a window of time for the balancer to work
+ * format { ... , activeWindow: { start: "8:30" , stop: "19:00" } , ... }
+ * @return true if there is no window of time specified for the balancer or it we're currently in it
+ */
+ static bool _inBalancingWindow( const BSONObj& balancerDoc , const boost::posix_time::ptime& now );
+
+ private:
+ mongo::mutex _lock; // protects _databases; TODO: change to r/w lock ??
+ map<string, DBConfigPtr > _databases; // maps ns to DBConfig's
+ bool _allowLocalShard; // can 'localhost' be used in shard addresses?
+
+ /**
+ * @param name is the chose name for the shard. Parameter is mandatory.
+ * @return true if it managed to generate a shard name. May return false if (currently)
+ * 10000 shard
+ */
+ bool _getNewShardName( string* name ) const;
+
+ /**
+ * @return whether a give dbname is used for shard "local" databases (e.g., admin or local)
+ */
+ static bool _isSpecialLocalDB( const string& dbName );
+
+ /**
+ * @param balancerDoc bson that may contain a marker to stop the balancer
+ * format { ... , stopped: [ "true" | "false" ] , ... }
+ * @return true if the marker is present and is set to true
+ */
+ static bool _balancerStopped( const BSONObj& balancerDoc );
+
+ };
+
+ extern Grid grid;
+
+} // namespace mongo
diff --git a/src/mongo/s/mr_shard.cpp b/src/mongo/s/mr_shard.cpp
new file mode 100644
index 00000000000..5bb83afedae
--- /dev/null
+++ b/src/mongo/s/mr_shard.cpp
@@ -0,0 +1,316 @@
+// mr_shard.cpp
+
+/**
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../util/net/message.h"
+#include "../db/dbmessage.h"
+#include "../scripting/engine.h"
+
+#include "mr_shard.h"
+
+namespace mongo {
+
+ namespace mr_shard {
+
+ AtomicUInt Config::JOB_NUMBER;
+
+ JSFunction::JSFunction( string type , const BSONElement& e ) {
+ _type = type;
+ _code = e._asCode();
+
+ if ( e.type() == CodeWScope )
+ _wantedScope = e.codeWScopeObject();
+ }
+
+ void JSFunction::init( State * state ) {
+ _scope = state->scope();
+ assert( _scope );
+ _scope->init( &_wantedScope );
+
+ _func = _scope->createFunction( _code.c_str() );
+ uassert( 14836 , str::stream() << "couldn't compile code for: " << _type , _func );
+
+ // install in JS scope so that it can be called in JS mode
+ _scope->setFunction(_type.c_str(), _code.c_str());
+ }
+
+ /**
+ * Applies the finalize function to a tuple obj (key, val)
+ * Returns tuple obj {_id: key, value: newval}
+ */
+ BSONObj JSFinalizer::finalize( const BSONObj& o ) {
+ Scope * s = _func.scope();
+
+ Scope::NoDBAccess no = s->disableDBAccess( "can't access db inside finalize" );
+ s->invokeSafe( _func.func() , &o, 0 );
+
+ // don't want to use o.objsize() to size b
+ // since there are many cases where the point of finalize
+ // is converting many fields to 1
+ BSONObjBuilder b;
+ b.append( o.firstElement() );
+ s->append( b , "value" , "return" );
+ return b.obj();
+ }
+
+ void JSReducer::init( State * state ) {
+ _func.init( state );
+ }
+
+ /**
+ * Reduces a list of tuple objects (key, value) to a single tuple {"0": key, "1": value}
+ */
+ BSONObj JSReducer::reduce( const BSONList& tuples ) {
+ if (tuples.size() <= 1)
+ return tuples[0];
+ BSONObj key;
+ int endSizeEstimate = 16;
+ _reduce( tuples , key , endSizeEstimate );
+
+ BSONObjBuilder b(endSizeEstimate);
+ b.appendAs( key.firstElement() , "0" );
+ _func.scope()->append( b , "1" , "return" );
+ return b.obj();
+ }
+
+ /**
+ * Reduces a list of tuple object (key, value) to a single tuple {_id: key, value: val}
+ * Also applies a finalizer method if present.
+ */
+ BSONObj JSReducer::finalReduce( const BSONList& tuples , Finalizer * finalizer ) {
+
+ BSONObj res;
+ BSONObj key;
+
+ if (tuples.size() == 1) {
+ // 1 obj, just use it
+ key = tuples[0];
+ BSONObjBuilder b(key.objsize());
+ BSONObjIterator it(key);
+ b.appendAs( it.next() , "_id" );
+ b.appendAs( it.next() , "value" );
+ res = b.obj();
+ }
+ else {
+ // need to reduce
+ int endSizeEstimate = 16;
+ _reduce( tuples , key , endSizeEstimate );
+ BSONObjBuilder b(endSizeEstimate);
+ b.appendAs( key.firstElement() , "_id" );
+ _func.scope()->append( b , "value" , "return" );
+ res = b.obj();
+ }
+
+ if ( finalizer ) {
+ res = finalizer->finalize( res );
+ }
+
+ return res;
+ }
+
+ /**
+ * actually applies a reduce, to a list of tuples (key, value).
+ * After the call, tuples will hold a single tuple {"0": key, "1": value}
+ */
+ void JSReducer::_reduce( const BSONList& tuples , BSONObj& key , int& endSizeEstimate ) {
+ int sizeEstimate = ( tuples.size() * tuples.begin()->getField( "value" ).size() ) + 128;
+
+ // need to build the reduce args: ( key, [values] )
+ BSONObjBuilder reduceArgs( sizeEstimate );
+ boost::scoped_ptr<BSONArrayBuilder> valueBuilder;
+ int sizeSoFar = 0;
+ unsigned n = 0;
+ for ( ; n<tuples.size(); n++ ) {
+ BSONObjIterator j(tuples[n]);
+ BSONElement keyE = j.next();
+ if ( n == 0 ) {
+ reduceArgs.append( keyE );
+ key = keyE.wrap();
+ sizeSoFar = 5 + keyE.size();
+ valueBuilder.reset(new BSONArrayBuilder( reduceArgs.subarrayStart( "tuples" ) ));
+ }
+
+ BSONElement ee = j.next();
+
+ uassert( 14837 , "value too large to reduce" , ee.size() < ( BSONObjMaxUserSize / 2 ) );
+
+ if ( sizeSoFar + ee.size() > BSONObjMaxUserSize ) {
+ assert( n > 1 ); // if not, inf. loop
+ break;
+ }
+
+ valueBuilder->append( ee );
+ sizeSoFar += ee.size();
+ }
+ assert(valueBuilder);
+ valueBuilder->done();
+ BSONObj args = reduceArgs.obj();
+
+ Scope * s = _func.scope();
+
+ s->invokeSafe( _func.func() , &args, 0, 0, false, true, true );
+ ++numReduces;
+
+ if ( s->type( "return" ) == Array ) {
+ uasserted( 14838 , "reduce -> multiple not supported yet");
+ return;
+ }
+
+ endSizeEstimate = key.objsize() + ( args.objsize() / tuples.size() );
+
+ if ( n == tuples.size() )
+ return;
+
+ // the input list was too large, add the rest of elmts to new tuples and reduce again
+ // note: would be better to use loop instead of recursion to avoid stack overflow
+ BSONList x;
+ for ( ; n < tuples.size(); n++ ) {
+ x.push_back( tuples[n] );
+ }
+ BSONObjBuilder temp( endSizeEstimate );
+ temp.append( key.firstElement() );
+ s->append( temp , "1" , "return" );
+ x.push_back( temp.obj() );
+ _reduce( x , key , endSizeEstimate );
+ }
+
+ Config::Config( const string& _dbname , const BSONObj& cmdObj ) {
+
+ dbname = _dbname;
+ ns = dbname + "." + cmdObj.firstElement().valuestr();
+
+ verbose = cmdObj["verbose"].trueValue();
+ jsMode = cmdObj["jsMode"].trueValue();
+
+ jsMaxKeys = 500000;
+ reduceTriggerRatio = 2.0;
+ maxInMemSize = 5 * 1024 * 1024;
+
+ uassert( 14841 , "outType is no longer a valid option" , cmdObj["outType"].eoo() );
+
+ if ( cmdObj["out"].type() == String ) {
+ finalShort = cmdObj["out"].String();
+ outType = REPLACE;
+ }
+ else if ( cmdObj["out"].type() == Object ) {
+ BSONObj o = cmdObj["out"].embeddedObject();
+
+ BSONElement e = o.firstElement();
+ string t = e.fieldName();
+
+ if ( t == "normal" || t == "replace" ) {
+ outType = REPLACE;
+ finalShort = e.String();
+ }
+ else if ( t == "merge" ) {
+ outType = MERGE;
+ finalShort = e.String();
+ }
+ else if ( t == "reduce" ) {
+ outType = REDUCE;
+ finalShort = e.String();
+ }
+ else if ( t == "inline" ) {
+ outType = INMEMORY;
+ }
+ else {
+ uasserted( 14839 , str::stream() << "unknown out specifier [" << t << "]" );
+ }
+
+ if (o.hasElement("db")) {
+ outDB = o["db"].String();
+ }
+
+ if (o.hasElement("nonAtomic")) {
+ outNonAtomic = o["nonAtomic"].Bool();
+ }
+ }
+ else {
+ uasserted( 14840 , "'out' has to be a string or an object" );
+ }
+
+ if ( outType != INMEMORY ) { // setup names
+ tempLong = str::stream() << (outDB.empty() ? dbname : outDB) << ".tmp.mr." << cmdObj.firstElement().String() << "_" << finalShort << "_" << JOB_NUMBER++;
+
+ incLong = tempLong + "_inc";
+
+ finalLong = str::stream() << (outDB.empty() ? dbname : outDB) << "." << finalShort;
+ }
+
+ {
+ // scope and code
+
+ if ( cmdObj["scope"].type() == Object )
+ scopeSetup = cmdObj["scope"].embeddedObjectUserCheck();
+
+ reducer.reset( new JSReducer( cmdObj["reduce"] ) );
+ if ( cmdObj["finalize"].type() && cmdObj["finalize"].trueValue() )
+ finalizer.reset( new JSFinalizer( cmdObj["finalize"] ) );
+
+ }
+
+ {
+ // query options
+ if ( cmdObj["limit"].isNumber() )
+ limit = cmdObj["limit"].numberLong();
+ else
+ limit = 0;
+ }
+ }
+
+ State::State( const Config& c ) : _config( c ) {
+ _onDisk = _config.outType != Config::INMEMORY;
+ }
+
+ State::~State() {
+ if ( _onDisk ) {
+ try {
+// _db.dropCollection( _config.tempLong );
+// _db.dropCollection( _config.incLong );
+ }
+ catch ( std::exception& e ) {
+ error() << "couldn't cleanup after map reduce: " << e.what() << endl;
+ }
+ }
+
+ if (_scope) {
+ // cleanup js objects
+ ScriptingFunction cleanup = _scope->createFunction("delete _emitCt; delete _keyCt; delete _mrMap;");
+ _scope->invoke(cleanup, 0, 0, 0, true);
+ }
+ }
+
+ /**
+ * Initialize the mapreduce operation, creating the inc collection
+ */
+ void State::init() {
+ // setup js
+ _scope.reset(globalScriptEngine->getPooledScope( _config.dbname ).release() );
+// _scope->localConnect( _config.dbname.c_str() );
+ _scope->externalSetup();
+
+ if ( ! _config.scopeSetup.isEmpty() )
+ _scope->init( &_config.scopeSetup );
+
+ _config.reducer->init( this );
+ if ( _config.finalizer )
+ _config.finalizer->init( this );
+ _scope->setBoolean("_doFinal", _config.finalizer);
+ }
+ }
+}
+
diff --git a/src/mongo/s/mr_shard.h b/src/mongo/s/mr_shard.h
new file mode 100644
index 00000000000..7f96b54587f
--- /dev/null
+++ b/src/mongo/s/mr_shard.h
@@ -0,0 +1,235 @@
+// mr_shard.h
+
+/**
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "pch.h"
+
+namespace mongo {
+
+ namespace mr_shard {
+
+ typedef vector<BSONObj> BSONList;
+
+ class State;
+
+ // ------------ function interfaces -----------
+
+ class Finalizer : boost::noncopyable {
+ public:
+ virtual ~Finalizer() {}
+ virtual void init( State * state ) = 0;
+
+ /**
+ * this takes a tuple and returns a tuple
+ */
+ virtual BSONObj finalize( const BSONObj& tuple ) = 0;
+ };
+
+ class Reducer : boost::noncopyable {
+ public:
+ Reducer() : numReduces(0) {}
+ virtual ~Reducer() {}
+ virtual void init( State * state ) = 0;
+
+ virtual BSONObj reduce( const BSONList& tuples ) = 0;
+ /** this means its a final reduce, even if there is no finalizer */
+ virtual BSONObj finalReduce( const BSONList& tuples , Finalizer * finalizer ) = 0;
+
+ long long numReduces;
+ };
+
+ // ------------ js function implementations -----------
+
+ /**
+ * used as a holder for Scope and ScriptingFunction
+ * visitor like pattern as Scope is gotten from first access
+ */
+ class JSFunction : boost::noncopyable {
+ public:
+ /**
+ * @param type (map|reduce|finalize)
+ */
+ JSFunction( string type , const BSONElement& e );
+ virtual ~JSFunction() {}
+
+ virtual void init( State * state );
+
+ Scope * scope() const { return _scope; }
+ ScriptingFunction func() const { return _func; }
+
+ private:
+ string _type;
+ string _code; // actual javascript code
+ BSONObj _wantedScope; // this is for CodeWScope
+
+ Scope * _scope; // this is not owned by us, and might be shared
+ ScriptingFunction _func;
+ };
+
+ class JSReducer : public Reducer {
+ public:
+ JSReducer( const BSONElement& code ) : _func( "_reduce" , code ) {}
+ virtual void init( State * state );
+
+ virtual BSONObj reduce( const BSONList& tuples );
+ virtual BSONObj finalReduce( const BSONList& tuples , Finalizer * finalizer );
+
+ private:
+
+ /**
+ * result in "return"
+ * @param key OUT
+ * @param endSizeEstimate OUT
+ */
+ void _reduce( const BSONList& values , BSONObj& key , int& endSizeEstimate );
+
+ JSFunction _func;
+ };
+
+ class JSFinalizer : public Finalizer {
+ public:
+ JSFinalizer( const BSONElement& code ) : _func( "_finalize" , code ) {}
+ virtual BSONObj finalize( const BSONObj& o );
+ virtual void init( State * state ) { _func.init( state ); }
+ private:
+ JSFunction _func;
+
+ };
+
+ // -----------------
+
+ /**
+ * holds map/reduce config information
+ */
+ class Config {
+ public:
+ Config( const string& _dbname , const BSONObj& cmdObj );
+
+ string dbname;
+ string ns;
+
+ // options
+ bool verbose;
+ bool jsMode;
+
+ // query options
+
+ BSONObj filter;
+ BSONObj sort;
+ long long limit;
+
+ // functions
+ scoped_ptr<Reducer> reducer;
+ scoped_ptr<Finalizer> finalizer;
+
+ BSONObj mapParams;
+ BSONObj scopeSetup;
+
+ // output tables
+ string incLong;
+ string tempLong;
+
+ string finalShort;
+ string finalLong;
+
+ string outDB;
+
+ // max number of keys allowed in JS map before switching mode
+ long jsMaxKeys;
+ // ratio of duplicates vs unique keys before reduce is triggered in js mode
+ float reduceTriggerRatio;
+ // maximum size of map before it gets dumped to disk
+ long maxInMemSize;
+
+ enum { REPLACE , // atomically replace the collection
+ MERGE , // merge keys, override dups
+ REDUCE , // merge keys, reduce dups
+ INMEMORY // only store in memory, limited in size
+ } outType;
+
+ // if true, no lock during output operation
+ bool outNonAtomic;
+
+ static AtomicUInt JOB_NUMBER;
+ }; // end MRsetup
+
+ /**
+ * stores information about intermediate map reduce state
+ * controls flow of data from map->reduce->finalize->output
+ */
+ class State {
+ public:
+ State( const Config& c );
+ ~State();
+
+ void init();
+
+ // ---- prep -----
+ bool sourceExists();
+
+ long long incomingDocuments();
+
+ // ---- map stage ----
+
+ /**
+ * stages on in in-memory storage
+ */
+ void emit( const BSONObj& a );
+
+ /**
+ * if size is big, run a reduce
+ * if its still big, dump to temp collection
+ */
+ void checkSize();
+
+ /**
+ * run reduce on _temp
+ */
+ void reduceInMemory();
+
+ // ------ reduce stage -----------
+
+ void prepTempCollection();
+
+ void finalReduce( BSONList& values );
+
+ void finalReduce( CurOp * op , ProgressMeterHolder& pm );
+
+ // ------ simple accessors -----
+
+ /** State maintains ownership, do no use past State lifetime */
+ Scope* scope() { return _scope.get(); }
+
+ const Config& config() { return _config; }
+
+ const bool isOnDisk() { return _onDisk; }
+
+ long long numReduces() const { return _config.reducer->numReduces; }
+
+ const Config& _config;
+
+ protected:
+
+ scoped_ptr<Scope> _scope;
+ bool _onDisk; // if the end result of this map reduce is disk or not
+ };
+
+ } // end mr namespace
+}
+
+
diff --git a/src/mongo/s/request.cpp b/src/mongo/s/request.cpp
new file mode 100644
index 00000000000..96cce96685d
--- /dev/null
+++ b/src/mongo/s/request.cpp
@@ -0,0 +1,164 @@
+// s/request.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "server.h"
+
+#include "../db/commands.h"
+#include "../db/dbmessage.h"
+#include "../db/stats/counters.h"
+
+#include "../client/connpool.h"
+
+#include "request.h"
+#include "config.h"
+#include "chunk.h"
+#include "stats.h"
+#include "cursors.h"
+#include "grid.h"
+#include "client.h"
+
+namespace mongo {
+
+ Request::Request( Message& m, AbstractMessagingPort* p ) :
+ _m(m) , _d( m ) , _p(p) , _didInit(false) {
+
+ assert( _d.getns() );
+ _id = _m.header()->id;
+
+ _clientInfo = ClientInfo::get();
+ _clientInfo->newRequest( p );
+ }
+
+ void Request::checkAuth( Auth::Level levelNeeded ) const {
+ char cl[256];
+ nsToDatabase(getns(), cl);
+ uassert( 15845 ,
+ str::stream() << "unauthorized for db:" << cl << " level: " << levelNeeded ,
+ _clientInfo->getAuthenticationInfo()->isAuthorizedForLevel(cl,levelNeeded) );
+ }
+
+ void Request::init() {
+ if ( _didInit )
+ return;
+ _didInit = true;
+ reset();
+ }
+
+ // Deprecated, will move to the strategy itself
+ void Request::reset() {
+ if ( _m.operation() == dbKillCursors ) {
+ return;
+ }
+
+ uassert( 13644 , "can't use 'local' database through mongos" , ! str::startsWith( getns() , "local." ) );
+
+ // TODO: Deprecated, keeping to preserve codepath for now
+ const string nsStr (getns()); // use in functions taking string rather than char*
+
+ _config = grid.getDBConfig( nsStr );
+
+ // TODO: In general, throwing an exception when the cm doesn't exist is really annoying
+ if ( _config->isSharded( nsStr ) ) {
+ _chunkManager = _config->getChunkManagerIfExists( nsStr );
+ }
+ else {
+ _chunkManager.reset();
+ }
+
+ _m.header()->id = _id;
+ _clientInfo->clearCurrentShards();
+ }
+
+ // Deprecated, will move to the strategy itself
+ Shard Request::primaryShard() const {
+ assert( _didInit );
+
+ if ( _chunkManager ) {
+ if ( _chunkManager->numChunks() > 1 )
+ throw UserException( 8060 , "can't call primaryShard on a sharded collection" );
+ return _chunkManager->findChunk( _chunkManager->getShardKey().globalMin() )->getShard();
+ }
+ Shard s = _config->getShard( getns() );
+ uassert( 10194 , "can't call primaryShard on a sharded collection!" , s.ok() );
+ return s;
+ }
+
+ void Request::process( int attempt ) {
+ init();
+ int op = _m.operation();
+ assert( op > dbMsg );
+
+ if ( op == dbKillCursors ) {
+ cursorCache.gotKillCursors( _m );
+ return;
+ }
+
+
+ LOG(3) << "Request::process ns: " << getns() << " msg id:" << (int)(_m.header()->id) << " attempt: " << attempt << endl;
+
+ Strategy * s = SHARDED;
+ _counter = &opsNonSharded;
+
+ _d.markSet();
+
+ bool iscmd = false;
+ if ( op == dbQuery ) {
+ iscmd = isCommand();
+ s->queryOp( *this );
+ }
+ else if ( op == dbGetMore ) {
+ checkAuth( Auth::READ ); // this is important so someone can't steal a cursor
+ s->getMore( *this );
+ }
+ else {
+ checkAuth( Auth::WRITE );
+ s->writeOp( op, *this );
+ }
+
+ globalOpCounters.gotOp( op , iscmd );
+ _counter->gotOp( op , iscmd );
+ }
+
+ bool Request::isCommand() const {
+ int x = _d.getQueryNToReturn();
+ return ( x == 1 || x == -1 ) && strstr( getns() , ".$cmd" );
+ }
+
+ void Request::gotInsert() {
+ globalOpCounters.gotInsert();
+ _counter->gotInsert();
+ }
+
+ void Request::reply( Message & response , const string& fromServer ) {
+ assert( _didInit );
+ long long cursor =response.header()->getCursor();
+ if ( cursor ) {
+ if ( fromServer.size() ) {
+ cursorCache.storeRef( fromServer , cursor );
+ }
+ else {
+ // probably a getMore
+ // make sure we have a ref for this
+ assert( cursorCache.getRef( cursor ).size() );
+ }
+ }
+ _p->reply( _m , response , _id );
+ }
+
+} // namespace mongo
diff --git a/src/mongo/s/request.h b/src/mongo/s/request.h
new file mode 100644
index 00000000000..f41ae6f6a5d
--- /dev/null
+++ b/src/mongo/s/request.h
@@ -0,0 +1,114 @@
+// request.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#pragma once
+
+#include "../pch.h"
+#include "../util/net/message.h"
+#include "../db/dbmessage.h"
+#include "config.h"
+#include "util.h"
+
+namespace mongo {
+
+
+ class OpCounters;
+ class ClientInfo;
+
+ class Request : boost::noncopyable {
+ public:
+ Request( Message& m, AbstractMessagingPort* p );
+
+ // ---- message info -----
+
+
+ const char * getns() const {
+ return _d.getns();
+ }
+ int op() const {
+ return _m.operation();
+ }
+ bool expectResponse() const {
+ return op() == dbQuery || op() == dbGetMore;
+ }
+ bool isCommand() const;
+
+ MSGID id() const {
+ return _id;
+ }
+
+ DBConfigPtr getConfig() const {
+ assert( _didInit );
+ return _config;
+ }
+ bool isShardingEnabled() const {
+ assert( _didInit );
+ return _config->isShardingEnabled();
+ }
+
+ ChunkManagerPtr getChunkManager() const {
+ assert( _didInit );
+ return _chunkManager;
+ }
+
+ ClientInfo * getClientInfo() const {
+ return _clientInfo;
+ }
+
+ void checkAuth( Auth::Level levelNeeded ) const;
+
+ // ---- remote location info -----
+
+
+ Shard primaryShard() const ;
+
+ // ---- low level access ----
+
+ void reply( Message & response , const string& fromServer );
+
+ Message& m() { return _m; }
+ DbMessage& d() { return _d; }
+ AbstractMessagingPort* p() const { return _p; }
+
+ void process( int attempt = 0 );
+
+ void gotInsert();
+
+ void init();
+
+ void reset();
+
+ private:
+ Message& _m;
+ DbMessage _d;
+ AbstractMessagingPort* _p;
+
+ MSGID _id;
+ DBConfigPtr _config;
+ ChunkManagerPtr _chunkManager;
+
+ ClientInfo * _clientInfo;
+
+ OpCounters* _counter;
+
+ bool _didInit;
+ };
+
+}
+
+#include "strategy.h"
diff --git a/src/mongo/s/s_only.cpp b/src/mongo/s/s_only.cpp
new file mode 100644
index 00000000000..05e652db57e
--- /dev/null
+++ b/src/mongo/s/s_only.cpp
@@ -0,0 +1,111 @@
+// s_only.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "request.h"
+#include "client.h"
+#include "../client/dbclient.h"
+#include "../db/dbhelpers.h"
+#include "../db/matcher.h"
+#include "../db/commands.h"
+
+/*
+ most a pile of hacks to make linking nicer
+
+ */
+namespace mongo {
+
+ TSP_DEFINE(Client,currentClient)
+
+ Client::LockStatus::LockStatus() {
+ // why is mongo::Client used in mongos? that is very weird.
+ // commenting this out until that is cleaned up or until someone puts a comment here
+ // explaining why it does make sense.
+ ////dassert(false);
+ }
+
+ Client::Client(const char *desc , AbstractMessagingPort *p) :
+ _context(0),
+ _shutdown(false),
+ _desc(desc),
+ _god(0),
+ _lastOp(0),
+ _mp(p) {
+ }
+ Client::~Client() {}
+ bool Client::shutdown() { return true; }
+
+ static unsigned long long nThreads = 0;
+ void assertStartingUp() {
+ dassert( nThreads <= 1 );
+ }
+ Client& Client::initThread(const char *desc, AbstractMessagingPort *mp) {
+ DEV nThreads++; // never decremented. this is for casi class asserts
+ setThreadName(desc);
+ assert( currentClient.get() == 0 );
+ Client *c = new Client(desc, mp);
+ currentClient.reset(c);
+ mongo::lastError.initThread();
+ return *c;
+ }
+
+ string Client::clientAddress(bool includePort) const {
+ ClientInfo * ci = ClientInfo::get();
+ if ( ci )
+ return ci->getRemote();
+ return "";
+ }
+
+ bool execCommand( Command * c ,
+ Client& client , int queryOptions ,
+ const char *ns, BSONObj& cmdObj ,
+ BSONObjBuilder& result,
+ bool fromRepl ) {
+ assert(c);
+
+ string dbname = nsToDatabase( ns );
+
+ if ( cmdObj["help"].trueValue() ) {
+ stringstream ss;
+ ss << "help for: " << c->name << " ";
+ c->help( ss );
+ result.append( "help" , ss.str() );
+ result.append( "lockType" , c->locktype() );
+ return true;
+ }
+
+ if ( c->adminOnly() ) {
+ if ( dbname != "admin" ) {
+ result.append( "errmsg" , "access denied- use admin db" );
+ log() << "command denied: " << cmdObj.toString() << endl;
+ return false;
+ }
+ log( 2 ) << "command: " << cmdObj << endl;
+ }
+
+ if (!client.getAuthenticationInfo()->isAuthorized(dbname)) {
+ result.append("errmsg" , "unauthorized");
+ return false;
+ }
+
+ string errmsg;
+ int ok = c->run( dbname , cmdObj , queryOptions, errmsg , result , fromRepl );
+ if ( ! ok )
+ result.append( "errmsg" , errmsg );
+ return ok;
+ }
+}
diff --git a/src/mongo/s/security.cpp b/src/mongo/s/security.cpp
new file mode 100644
index 00000000000..a88c36b1d20
--- /dev/null
+++ b/src/mongo/s/security.cpp
@@ -0,0 +1,101 @@
+// security.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+// security.cpp
+
+#include "pch.h"
+#include "../db/security_common.h"
+#include "../db/security.h"
+#include "config.h"
+#include "client.h"
+#include "grid.h"
+
+// this is the _mongos only_ implementation of security.h
+
+namespace mongo {
+
+ bool AuthenticationInfo::_warned;
+
+ bool CmdAuthenticate::getUserObj(const string& dbname, const string& user, BSONObj& userObj, string& pwd) {
+ if (user == internalSecurity.user) {
+ uassert(15890, "key file must be used to log in with internal user", cmdLine.keyFile);
+ pwd = internalSecurity.pwd;
+ }
+ else {
+ string systemUsers = dbname + ".system.users";
+ DBConfigPtr config = grid.getDBConfig( systemUsers );
+ Shard s = config->getShard( systemUsers );
+
+ static BSONObj userPattern = BSON("user" << 1);
+
+ ShardConnection conn( s, systemUsers );
+ OCCASIONALLY conn->ensureIndex(systemUsers, userPattern, false, "user_1");
+ {
+ BSONObjBuilder b;
+ b << "user" << user;
+ BSONObj query = b.done();
+ userObj = conn->findOne(systemUsers, query);
+ if( userObj.isEmpty() ) {
+ log() << "auth: couldn't find user " << user << ", " << systemUsers << endl;
+ conn.done(); // return to pool
+ return false;
+ }
+ }
+
+ pwd = userObj.getStringField("pwd");
+
+ conn.done(); // return to pool
+ }
+ return true;
+ }
+
+ bool AuthenticationInfo::_isAuthorizedSpecialChecks( const string& dbname ) const {
+ if ( !isLocalHost ) {
+ return false;
+ }
+
+ string adminNs = "admin.system.users";
+
+ DBConfigPtr config = grid.getDBConfig( adminNs );
+ Shard s = config->getShard( adminNs );
+
+ ShardConnection conn( s, adminNs );
+ BSONObj result = conn->findOne("admin.system.users", Query());
+ if( result.isEmpty() ) {
+ if( ! _warned ) {
+ // you could get a few of these in a race, but that's ok
+ _warned = true;
+ log() << "note: no users configured in admin.system.users, allowing localhost access" << endl;
+ }
+
+ // Must return conn to pool
+ // TODO: Check for errors during findOne(), or just let the conn die?
+ conn.done();
+ return true;
+ }
+
+ // Must return conn to pool
+ conn.done();
+ return false;
+ }
+
+ bool CmdLogout::run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
+ AuthenticationInfo *ai = ClientInfo::get()->getAuthenticationInfo();
+ ai->logout(dbname);
+ return true;
+ }
+}
diff --git a/src/mongo/s/server.cpp b/src/mongo/s/server.cpp
new file mode 100644
index 00000000000..63b3c368ab6
--- /dev/null
+++ b/src/mongo/s/server.cpp
@@ -0,0 +1,429 @@
+// server.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "../util/net/message.h"
+#include "../util/unittest.h"
+#include "../client/connpool.h"
+#include "../util/net/message_server.h"
+#include "../util/stringutils.h"
+#include "../util/version.h"
+#include "../util/ramlog.h"
+#include "../util/signal_handlers.h"
+#include "../util/admin_access.h"
+#include "../util/concurrency/task.h"
+#include "../db/dbwebserver.h"
+#include "../scripting/engine.h"
+
+#include "server.h"
+#include "request.h"
+#include "client.h"
+#include "config.h"
+#include "chunk.h"
+#include "balance.h"
+#include "grid.h"
+#include "cursors.h"
+#include "shard_version.h"
+
+namespace mongo {
+
+ CmdLine cmdLine;
+ Database *database = 0;
+ string mongosCommand;
+ bool dbexitCalled = false;
+ static bool scriptingEnabled = true;
+
+ bool inShutdown() {
+ return dbexitCalled;
+ }
+
+ string getDbContext() {
+ return "?";
+ }
+
+ bool haveLocalShardingInfo( const string& ns ) {
+ assert( 0 );
+ return false;
+ }
+
+ void usage( char * argv[] ) {
+ out() << argv[0] << " usage:\n\n";
+ out() << " -v+ verbose 1: general 2: more 3: per request 4: more\n";
+ out() << " --port <portno>\n";
+ out() << " --configdb <configdbname>,[<configdbname>,<configdbname>]\n";
+ out() << endl;
+ }
+
+ void ShardingConnectionHook::onHandedOut( DBClientBase * conn ) {
+ ClientInfo::get()->addShard( conn->getServerAddress() );
+ }
+
+ class ShardedMessageHandler : public MessageHandler {
+ public:
+ virtual ~ShardedMessageHandler() {}
+
+ virtual void connected( AbstractMessagingPort* p ) {
+ ClientInfo *c = ClientInfo::get();
+ massert(15849, "client info not defined", c);
+ c->getAuthenticationInfo()->isLocalHost = p->remote().isLocalHost();
+ }
+
+ virtual void process( Message& m , AbstractMessagingPort* p , LastError * le) {
+ assert( p );
+ Request r( m , p );
+
+ assert( le );
+ lastError.startRequest( m , le );
+
+ try {
+ r.init();
+ r.process();
+ }
+ catch ( AssertionException & e ) {
+ log( e.isUserAssertion() ? 1 : 0 ) << "AssertionException while processing op type : " << m.operation() << " to : " << r.getns() << causedBy(e) << endl;
+
+ le->raiseError( e.getCode() , e.what() );
+
+ m.header()->id = r.id();
+
+ if ( r.expectResponse() ) {
+ BSONObj err = BSON( "$err" << e.what() << "code" << e.getCode() );
+ replyToQuery( ResultFlag_ErrSet, p , m , err );
+ }
+ }
+ catch ( DBException& e ) {
+ log() << "DBException in process: " << e.what() << endl;
+
+ le->raiseError( e.getCode() , e.what() );
+
+ m.header()->id = r.id();
+
+ if ( r.expectResponse() ) {
+ BSONObj err = BSON( "$err" << e.what() << "code" << e.getCode() );
+ replyToQuery( ResultFlag_ErrSet, p , m , err );
+ }
+ }
+ }
+
+ virtual void disconnected( AbstractMessagingPort* p ) {
+ // all things are thread local
+ }
+ };
+
+ void sighandler(int sig) {
+ dbexit(EXIT_CLEAN, (string("received signal ") + BSONObjBuilder::numStr(sig)).c_str());
+ }
+
+ // this gets called when new fails to allocate memory
+ void my_new_handler() {
+ rawOut( "out of memory, printing stack and exiting:" );
+ printStackTrace();
+ ::exit(EXIT_ABRUPT);
+ }
+
+ void setupSignals( bool inFork ) {
+ signal(SIGTERM, sighandler);
+ signal(SIGINT, sighandler);
+
+#if defined(SIGQUIT)
+ signal( SIGQUIT , printStackAndExit );
+#endif
+ signal( SIGSEGV , printStackAndExit );
+ signal( SIGABRT , printStackAndExit );
+ signal( SIGFPE , printStackAndExit );
+#if defined(SIGBUS)
+ signal( SIGBUS , printStackAndExit );
+#endif
+
+ set_new_handler( my_new_handler );
+ }
+
+ void init() {
+ serverID.init();
+ setupSIGTRAPforGDB();
+ setupCoreSignals();
+ setupSignals( false );
+ Logstream::get().addGlobalTee( new RamLog("global") );
+ }
+
+ void start( const MessageServer::Options& opts ) {
+ setThreadName( "mongosMain" );
+
+ balancer.go();
+ cursorCache.startTimeoutThread();
+ PeriodicTask::theRunner->go();
+
+ ShardedMessageHandler handler;
+ MessageServer * server = createServer( opts , &handler );
+ server->setAsTimeTracker();
+ server->run();
+ }
+
+ DBClientBase *createDirectClient() {
+ uassert( 10197 , "createDirectClient not implemented for sharding yet" , 0 );
+ return 0;
+ }
+
+ void printShardingVersionInfo(bool out) {
+ if (out) {
+ cout << mongosCommand << " " << mongodVersion() << " starting (--help for usage)" << endl;
+ cout << "git version: " << gitVersion() << endl;
+ cout << "build sys info: " << sysInfo() << endl;
+ } else {
+ log() << mongosCommand << " " << mongodVersion() << " starting (--help for usage)" << endl;
+ printGitVersion();
+ printSysInfo();
+ }
+ }
+
+ void cloudCmdLineParamIs(string cmd);
+
+} // namespace mongo
+
+using namespace mongo;
+
+#include <boost/program_options.hpp>
+
+namespace po = boost::program_options;
+
+int _main(int argc, char* argv[]) {
+ static StaticObserver staticObserver;
+ mongosCommand = argv[0];
+
+ po::options_description options("General options");
+ po::options_description sharding_options("Sharding options");
+ po::options_description hidden("Hidden options");
+ po::positional_options_description positional;
+
+ CmdLine::addGlobalOptions( options , hidden );
+
+ sharding_options.add_options()
+ ( "configdb" , po::value<string>() , "1 or 3 comma separated config servers" )
+ ( "test" , "just run unit tests" )
+ ( "upgrade" , "upgrade meta data version" )
+ ( "chunkSize" , po::value<int>(), "maximum amount of data per chunk" )
+ ( "ipv6", "enable IPv6 support (disabled by default)" )
+ ( "jsonp","allow JSONP access via http (has security implications)" )
+ ("noscripting", "disable scripting engine")
+ ;
+
+ options.add(sharding_options);
+ // parse options
+ po::variables_map params;
+ if ( ! CmdLine::store( argc , argv , options , hidden , positional , params ) )
+ return 0;
+
+ // The default value may vary depending on compile options, but for mongos
+ // we want durability to be disabled.
+ cmdLine.dur = false;
+
+ if ( params.count( "help" ) ) {
+ cout << options << endl;
+ return 0;
+ }
+
+ if ( params.count( "version" ) ) {
+ printShardingVersionInfo(true);
+ return 0;
+ }
+
+ if ( params.count( "chunkSize" ) ) {
+ int csize = params["chunkSize"].as<int>();
+
+ // validate chunksize before proceeding
+ if ( csize == 0 ) {
+ out() << "error: need a non-zero chunksize" << endl;
+ return 11;
+ }
+
+ Chunk::MaxChunkSize = csize * 1024 * 1024;
+ }
+
+ if ( params.count( "ipv6" ) ) {
+ enableIPv6();
+ }
+
+ if ( params.count( "jsonp" ) ) {
+ cmdLine.jsonp = true;
+ }
+
+ if ( params.count( "test" ) ) {
+ logLevel = 5;
+ UnitTest::runTests();
+ cout << "tests passed" << endl;
+ return 0;
+ }
+
+ if (params.count("noscripting")) {
+ scriptingEnabled = false;
+ }
+
+ if ( ! params.count( "configdb" ) ) {
+ out() << "error: no args for --configdb" << endl;
+ return 4;
+ }
+
+ if( params.count("cloud") ) {
+ string s = params["cloud"].as<string>();
+ cloudCmdLineParamIs(s);
+ }
+
+ vector<string> configdbs;
+ splitStringDelim( params["configdb"].as<string>() , &configdbs , ',' );
+ if ( configdbs.size() != 1 && configdbs.size() != 3 ) {
+ out() << "need either 1 or 3 configdbs" << endl;
+ return 5;
+ }
+
+ // we either have a setting where all processes are in localhost or none are
+ for ( vector<string>::const_iterator it = configdbs.begin() ; it != configdbs.end() ; ++it ) {
+ try {
+
+ HostAndPort configAddr( *it ); // will throw if address format is invalid
+
+ if ( it == configdbs.begin() ) {
+ grid.setAllowLocalHost( configAddr.isLocalHost() );
+ }
+
+ if ( configAddr.isLocalHost() != grid.allowLocalHost() ) {
+ out() << "cannot mix localhost and ip addresses in configdbs" << endl;
+ return 10;
+ }
+
+ }
+ catch ( DBException& e) {
+ out() << "configdb: " << e.what() << endl;
+ return 9;
+ }
+ }
+
+ // set some global state
+
+ pool.addHook( new ShardingConnectionHook( false ) );
+ pool.setName( "mongos connectionpool" );
+
+ shardConnectionPool.addHook( new ShardingConnectionHook( true ) );
+ shardConnectionPool.setName( "mongos shardconnection connectionpool" );
+
+
+ DBClientConnection::setLazyKillCursor( false );
+
+ ReplicaSetMonitor::setConfigChangeHook( boost::bind( &ConfigServer::replicaSetChange , &configServer , _1 ) );
+
+ if ( argc <= 1 ) {
+ usage( argv );
+ return 3;
+ }
+
+ bool ok = cmdLine.port != 0 && configdbs.size();
+
+ if ( !ok ) {
+ usage( argv );
+ return 1;
+ }
+
+ printShardingVersionInfo(false);
+
+ if ( ! configServer.init( configdbs ) ) {
+ cout << "couldn't resolve config db address" << endl;
+ return 7;
+ }
+
+ if ( ! configServer.ok( true ) ) {
+ cout << "configServer connection startup check failed" << endl;
+ return 8;
+ }
+
+ {
+ class CheckConfigServers : public task::Task {
+ virtual string name() const { return "CheckConfigServers"; }
+ virtual void doWork() { configServer.ok(true); }
+ };
+ static CheckConfigServers checkConfigServers;
+
+ task::repeat(&checkConfigServers, 60*1000);
+ }
+
+ int configError = configServer.checkConfigVersion( params.count( "upgrade" ) );
+ if ( configError ) {
+ if ( configError > 0 ) {
+ cout << "upgrade success!" << endl;
+ }
+ else {
+ cout << "config server error: " << configError << endl;
+ }
+ return configError;
+ }
+ configServer.reloadSettings();
+
+ init();
+
+#ifndef _WIN32
+ CmdLine::launchOk();
+#endif
+
+ boost::thread web( boost::bind(&webServerThread, new NoAdminAccess() /* takes ownership */) );
+
+ if ( scriptingEnabled ) {
+ ScriptEngine::setup();
+// globalScriptEngine->setCheckInterruptCallback( jsInterruptCallback );
+// globalScriptEngine->setGetInterruptSpecCallback( jsGetInterruptSpecCallback );
+ }
+
+ MessageServer::Options opts;
+ opts.port = cmdLine.port;
+ opts.ipList = cmdLine.bind_ip;
+ start(opts);
+
+ dbexit( EXIT_CLEAN );
+ return 0;
+}
+int main(int argc, char* argv[]) {
+ try {
+ doPreServerStartupInits();
+ return _main(argc, argv);
+ }
+ catch(DBException& e) {
+ cout << "uncaught exception in mongos main:" << endl;
+ cout << e.toString() << endl;
+ }
+ catch(std::exception& e) {
+ cout << "uncaught exception in mongos main:" << endl;
+ cout << e.what() << endl;
+ }
+ catch(...) {
+ cout << "uncaught exception in mongos main" << endl;
+ }
+ return 20;
+}
+
+#undef exit
+
+void mongo::exitCleanly( ExitCode code ) {
+ // TODO: do we need to add anything?
+ mongo::dbexit( code );
+}
+
+void mongo::dbexit( ExitCode rc, const char *why, bool tryToGetLock ) {
+ dbexitCalled = true;
+ log() << "dbexit: " << why
+ << " rc:" << rc
+ << " " << ( why ? why : "" )
+ << endl;
+ ::exit(rc);
+}
diff --git a/src/mongo/s/server.h b/src/mongo/s/server.h
new file mode 100644
index 00000000000..18e91e266fd
--- /dev/null
+++ b/src/mongo/s/server.h
@@ -0,0 +1,29 @@
+// server.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include <string>
+#include "../util/net/message.h"
+#include "../db/jsobj.h"
+
+namespace mongo {
+
+ extern OID serverID;
+
+ // from request.cpp
+ void processRequest(Message& m, MessagingPort& p);
+}
diff --git a/src/mongo/s/shard.cpp b/src/mongo/s/shard.cpp
new file mode 100644
index 00000000000..81b41c7fcbc
--- /dev/null
+++ b/src/mongo/s/shard.cpp
@@ -0,0 +1,410 @@
+// shard.cpp
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "shard.h"
+#include "config.h"
+#include "request.h"
+#include "client.h"
+#include "../db/commands.h"
+#include <set>
+
+namespace mongo {
+
+ typedef shared_ptr<Shard> ShardPtr;
+
+ class StaticShardInfo {
+ public:
+ StaticShardInfo() : _mutex("StaticShardInfo"), _rsMutex("RSNameMap") { }
+ void reload() {
+
+ list<BSONObj> all;
+ {
+ ScopedDbConnection conn( configServer.getPrimary() );
+ auto_ptr<DBClientCursor> c = conn->query( ShardNS::shard , Query() );
+ massert( 13632 , "couldn't get updated shard list from config server" , c.get() );
+ while ( c->more() ) {
+ all.push_back( c->next().getOwned() );
+ }
+ conn.done();
+ }
+
+ scoped_lock lk( _mutex );
+
+ // We use the _lookup table for all shards and for the primary config DB. The config DB info,
+ // however, does not come from the ShardNS::shard. So when cleaning the _lookup table we leave
+ // the config state intact. The rationale is that this way we could drop shards that
+ // were removed without reinitializing the config DB information.
+
+ ShardMap::iterator i = _lookup.find( "config" );
+ if ( i != _lookup.end() ) {
+ ShardPtr config = i->second;
+ _lookup.clear();
+ _lookup[ "config" ] = config;
+ }
+ else {
+ _lookup.clear();
+ }
+ _rsLookup.clear();
+
+ for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); ++i ) {
+ BSONObj o = *i;
+ string name = o["_id"].String();
+ string host = o["host"].String();
+
+ long long maxSize = 0;
+ BSONElement maxSizeElem = o[ ShardFields::maxSize.name() ];
+ if ( ! maxSizeElem.eoo() ) {
+ maxSize = maxSizeElem.numberLong();
+ }
+
+ bool isDraining = false;
+ BSONElement isDrainingElem = o[ ShardFields::draining.name() ];
+ if ( ! isDrainingElem.eoo() ) {
+ isDraining = isDrainingElem.Bool();
+ }
+
+ ShardPtr s( new Shard( name , host , maxSize , isDraining ) );
+ _lookup[name] = s;
+ _installHost( host , s );
+ }
+
+ }
+
+ ShardPtr find( const string& ident ) {
+ string mykey = ident;
+
+ {
+ scoped_lock lk( _mutex );
+ ShardMap::iterator i = _lookup.find( mykey );
+
+ if ( i != _lookup.end() )
+ return i->second;
+ }
+
+ // not in our maps, re-load all
+ reload();
+
+ scoped_lock lk( _mutex );
+ ShardMap::iterator i = _lookup.find( mykey );
+ massert( 13129 , (string)"can't find shard for: " + mykey , i != _lookup.end() );
+ return i->second;
+ }
+
+ // Lookup shard by replica set name. Returns Shard::EMTPY if the name can't be found.
+ // Note: this doesn't refresh the table if the name isn't found, so it's possible that
+ // a newly added shard/Replica Set may not be found.
+ Shard lookupRSName( const string& name) {
+ scoped_lock lk( _rsMutex );
+ ShardMap::iterator i = _rsLookup.find( name );
+
+ return (i == _rsLookup.end()) ? Shard::EMPTY : i->second.get();
+ }
+
+ // Useful for ensuring our shard data will not be modified while we use it
+ Shard findCopy( const string& ident ){
+ ShardPtr found = find( ident );
+ scoped_lock lk( _mutex );
+ massert( 13128 , (string)"can't find shard for: " + ident , found.get() );
+ return *found.get();
+ }
+
+ void set( const string& name , const Shard& s , bool setName = true , bool setAddr = true ) {
+ scoped_lock lk( _mutex );
+ ShardPtr ss( new Shard( s ) );
+ if ( setName )
+ _lookup[name] = ss;
+ if ( setAddr )
+ _installHost( s.getConnString() , ss );
+ }
+
+ void _installHost( const string& host , const ShardPtr& s ) {
+ _lookup[host] = s;
+
+ const ConnectionString& cs = s->getAddress();
+ if ( cs.type() == ConnectionString::SET ) {
+ if ( cs.getSetName().size() ) {
+ scoped_lock lk( _rsMutex);
+ _rsLookup[ cs.getSetName() ] = s;
+ }
+ vector<HostAndPort> servers = cs.getServers();
+ for ( unsigned i=0; i<servers.size(); i++ ) {
+ _lookup[ servers[i].toString() ] = s;
+ }
+ }
+ }
+
+ void remove( const string& name ) {
+ scoped_lock lk( _mutex );
+ for ( ShardMap::iterator i = _lookup.begin(); i!=_lookup.end(); ) {
+ ShardPtr s = i->second;
+ if ( s->getName() == name ) {
+ _lookup.erase(i++);
+ }
+ else {
+ ++i;
+ }
+ }
+ for ( ShardMap::iterator i = _rsLookup.begin(); i!=_rsLookup.end(); ) {
+ ShardPtr s = i->second;
+ if ( s->getName() == name ) {
+ _rsLookup.erase(i++);
+ }
+ else {
+ ++i;
+ }
+ }
+ }
+
+ void getAllShards( vector<ShardPtr>& all ) const {
+ scoped_lock lk( _mutex );
+ std::set<string> seen;
+ for ( ShardMap::const_iterator i = _lookup.begin(); i!=_lookup.end(); ++i ) {
+ const ShardPtr& s = i->second;
+ if ( s->getName() == "config" )
+ continue;
+ if ( seen.count( s->getName() ) )
+ continue;
+ seen.insert( s->getName() );
+ all.push_back( s );
+ }
+ }
+
+ void getAllShards( vector<Shard>& all ) const {
+ scoped_lock lk( _mutex );
+ std::set<string> seen;
+ for ( ShardMap::const_iterator i = _lookup.begin(); i!=_lookup.end(); ++i ) {
+ const ShardPtr& s = i->second;
+ if ( s->getName() == "config" )
+ continue;
+ if ( seen.count( s->getName() ) )
+ continue;
+ seen.insert( s->getName() );
+ all.push_back( *s );
+ }
+ }
+
+
+ bool isAShardNode( const string& addr ) const {
+ scoped_lock lk( _mutex );
+
+ // check direct nods or set names
+ ShardMap::const_iterator i = _lookup.find( addr );
+ if ( i != _lookup.end() )
+ return true;
+
+ // check for set nodes
+ for ( ShardMap::const_iterator i = _lookup.begin(); i!=_lookup.end(); ++i ) {
+ if ( i->first == "config" )
+ continue;
+
+ if ( i->second->containsNode( addr ) )
+ return true;
+ }
+
+ return false;
+ }
+
+ bool getShardMap( BSONObjBuilder& result , string& errmsg ) const {
+ scoped_lock lk( _mutex );
+
+ BSONObjBuilder b( _lookup.size() + 50 );
+
+ for ( ShardMap::const_iterator i = _lookup.begin(); i!=_lookup.end(); ++i ) {
+ b.append( i->first , i->second->getConnString() );
+ }
+
+ result.append( "map" , b.obj() );
+
+ return true;
+ }
+
+ private:
+ typedef map<string,ShardPtr> ShardMap;
+ ShardMap _lookup;
+ ShardMap _rsLookup; // Map from ReplSet name to shard
+ mutable mongo::mutex _mutex;
+ mutable mongo::mutex _rsMutex;
+ } staticShardInfo;
+
+
+ class CmdGetShardMap : public Command {
+ public:
+ CmdGetShardMap() : Command( "getShardMap" ){}
+ virtual void help( stringstream &help ) const { help<<"internal"; }
+ virtual LockType locktype() const { return NONE; }
+ virtual bool slaveOk() const { return true; }
+ virtual bool adminOnly() const { return true; }
+
+ virtual bool run(const string&, mongo::BSONObj&, int, std::string& errmsg , mongo::BSONObjBuilder& result, bool) {
+ return staticShardInfo.getShardMap( result , errmsg );
+ }
+ } cmdGetShardMap;
+
+
+ void Shard::_setAddr( const string& addr ) {
+ _addr = addr;
+ if ( !_addr.empty() ) {
+ _cs = ConnectionString( addr , ConnectionString::SET );
+ _rsInit();
+ }
+ }
+
+ void Shard::_rsInit() {
+ if ( _cs.type() == ConnectionString::SET ) {
+ string x = _cs.getSetName();
+ massert( 14807 , str::stream() << "no set name for shard: " << _name << " " << _cs.toString() , x.size() );
+ _rs = ReplicaSetMonitor::get( x , _cs.getServers() );
+ }
+ }
+
+ void Shard::setAddress( const ConnectionString& cs) {
+ assert( _name.size() );
+ _addr = cs.toString();
+ _cs = cs;
+ _rsInit();
+ staticShardInfo.set( _name , *this , true , false );
+ }
+
+ void Shard::reset( const string& ident ) {
+ *this = staticShardInfo.findCopy( ident );
+ _rs.reset();
+ _rsInit();
+ }
+
+ bool Shard::containsNode( const string& node ) const {
+ if ( _addr == node )
+ return true;
+
+ if ( _rs && _rs->contains( node ) )
+ return true;
+
+ return false;
+ }
+
+ void Shard::getAllShards( vector<Shard>& all ) {
+ staticShardInfo.getAllShards( all );
+ }
+
+ bool Shard::isAShardNode( const string& ident ) {
+ return staticShardInfo.isAShardNode( ident );
+ }
+
+ Shard Shard::lookupRSName( const string& name) {
+ return staticShardInfo.lookupRSName(name);
+ }
+
+ void Shard::printShardInfo( ostream& out ) {
+ vector<Shard> all;
+ staticShardInfo.getAllShards( all );
+ for ( unsigned i=0; i<all.size(); i++ )
+ out << all[i].toString() << "\n";
+ out.flush();
+ }
+
+ BSONObj Shard::runCommand( const string& db , const BSONObj& cmd ) const {
+ ScopedDbConnection conn( this );
+ BSONObj res;
+ bool ok = conn->runCommand( db , cmd , res );
+ if ( ! ok ) {
+ stringstream ss;
+ ss << "runCommand (" << cmd << ") on shard (" << _name << ") failed : " << res;
+ throw UserException( 13136 , ss.str() );
+ }
+ res = res.getOwned();
+ conn.done();
+ return res;
+ }
+
+ ShardStatus Shard::getStatus() const {
+ return ShardStatus( *this , runCommand( "admin" , BSON( "serverStatus" << 1 ) ) );
+ }
+
+ void Shard::reloadShardInfo() {
+ staticShardInfo.reload();
+ }
+
+
+ void Shard::removeShard( const string& name ) {
+ staticShardInfo.remove( name );
+ }
+
+ Shard Shard::pick( const Shard& current ) {
+ vector<Shard> all;
+ staticShardInfo.getAllShards( all );
+ if ( all.size() == 0 ) {
+ staticShardInfo.reload();
+ staticShardInfo.getAllShards( all );
+ if ( all.size() == 0 )
+ return EMPTY;
+ }
+
+ // if current shard was provided, pick a different shard only if it is a better choice
+ ShardStatus best = all[0].getStatus();
+ if ( current != EMPTY ) {
+ best = current.getStatus();
+ }
+
+ for ( size_t i=0; i<all.size(); i++ ) {
+ ShardStatus t = all[i].getStatus();
+ if ( t < best )
+ best = t;
+ }
+
+ LOG(1) << "best shard for new allocation is " << best << endl;
+ return best.shard();
+ }
+
+ ShardStatus::ShardStatus( const Shard& shard , const BSONObj& obj )
+ : _shard( shard ) {
+ _mapped = obj.getFieldDotted( "mem.mapped" ).numberLong();
+ _hasOpsQueued = obj["writeBacksQueued"].Bool();
+ _writeLock = 0; // TODO
+ }
+
+ void ShardingConnectionHook::onCreate( DBClientBase * conn ) {
+ if( !noauth ) {
+ string err;
+ LOG(2) << "calling onCreate auth for " << conn->toString() << endl;
+ uassert( 15847, "can't authenticate to shard server",
+ conn->auth("local", internalSecurity.user, internalSecurity.pwd, err, false));
+ }
+
+ if ( _shardedConnections && versionManager.isVersionableCB( conn ) ) {
+
+ // We must initialize sharding on all connections, so that we get exceptions if sharding is enabled on
+ // the collection.
+ BSONObj result;
+ bool ok = versionManager.initShardVersionCB( conn, result );
+
+ // assert that we actually successfully setup sharding
+ uassert( 15907, str::stream() << "could not initialize sharding on connection " << (*conn).toString() <<
+ ( result["errmsg"].type() == String ? causedBy( result["errmsg"].String() ) :
+ causedBy( (string)"unknown failure : " + result.toString() ) ), ok );
+
+ }
+ }
+
+ void ShardingConnectionHook::onDestroy( DBClientBase * conn ) {
+
+ if( _shardedConnections && versionManager.isVersionableCB( conn ) ){
+ versionManager.resetShardVersionCB( conn );
+ }
+
+ }
+}
diff --git a/src/mongo/s/shard.h b/src/mongo/s/shard.h
new file mode 100644
index 00000000000..6b52c58a932
--- /dev/null
+++ b/src/mongo/s/shard.h
@@ -0,0 +1,308 @@
+// @file shard.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "../client/connpool.h"
+
+namespace mongo {
+
+ class ShardConnection;
+ class ShardStatus;
+
+ /*
+ * A "shard" one partition of the overall database (and a replica set typically).
+ */
+
+ class Shard {
+ public:
+ Shard()
+ : _name("") , _addr("") , _maxSize(0) , _isDraining( false ) {
+ }
+
+ Shard( const string& name , const string& addr, long long maxSize = 0 , bool isDraining = false )
+ : _name(name) , _addr( addr ) , _maxSize( maxSize ) , _isDraining( isDraining ) {
+ _setAddr( addr );
+ }
+
+ Shard( const string& ident ) {
+ reset( ident );
+ }
+
+ Shard( const Shard& other )
+ : _name( other._name ) , _addr( other._addr ) , _cs( other._cs ) ,
+ _maxSize( other._maxSize ) , _isDraining( other._isDraining ) , _rs( other._rs ) {
+ }
+
+ Shard( const Shard* other )
+ : _name( other->_name ) , _addr( other->_addr ), _cs( other->_cs ) ,
+ _maxSize( other->_maxSize ) , _isDraining( other->_isDraining ) , _rs( other->_rs ) {
+ }
+
+ static Shard make( const string& ident ) {
+ Shard s;
+ s.reset( ident );
+ return s;
+ }
+
+ /**
+ * @param ident either name or address
+ */
+ void reset( const string& ident );
+
+ void setAddress( const ConnectionString& cs );
+
+ ConnectionString getAddress() const { return _cs; }
+
+ string getName() const {
+ assert( _name.size() );
+ return _name;
+ }
+
+ string getConnString() const {
+ assert( _addr.size() );
+ return _addr;
+ }
+
+ long long getMaxSize() const {
+ return _maxSize;
+ }
+
+ bool isDraining() const {
+ return _isDraining;
+ }
+
+ string toString() const {
+ return _name + ":" + _addr;
+ }
+
+ friend ostream& operator << (ostream& out, const Shard& s) {
+ return (out << s.toString());
+ }
+
+ bool operator==( const Shard& s ) const {
+ bool n = _name == s._name;
+ bool a = _addr == s._addr;
+
+ assert( n == a ); // names and address are 1 to 1
+ return n;
+ }
+
+ bool operator!=( const Shard& s ) const {
+ bool n = _name == s._name;
+ bool a = _addr == s._addr;
+ return ! ( n && a );
+ }
+
+
+ bool operator==( const string& s ) const {
+ return _name == s || _addr == s;
+ }
+
+ bool operator!=( const string& s ) const {
+ return _name != s && _addr != s;
+ }
+
+ bool operator<(const Shard& o) const {
+ return _name < o._name;
+ }
+
+ bool ok() const { return _addr.size() > 0; }
+
+ BSONObj runCommand( const string& db , const string& simple ) const {
+ return runCommand( db , BSON( simple << 1 ) );
+ }
+ BSONObj runCommand( const string& db , const BSONObj& cmd ) const ;
+
+ ShardStatus getStatus() const ;
+
+ /**
+ * mostly for replica set
+ * retursn true if node is the shard
+ * of if the replica set contains node
+ */
+ bool containsNode( const string& node ) const;
+
+ static void getAllShards( vector<Shard>& all );
+ static void printShardInfo( ostream& out );
+ static Shard lookupRSName( const string& name);
+
+ /**
+ * @parm current - shard where the chunk/database currently lives in
+ * @return the currently emptiest shard, if best then current, or EMPTY
+ */
+ static Shard pick( const Shard& current = EMPTY );
+
+ static void reloadShardInfo();
+
+ static void removeShard( const string& name );
+
+ static bool isAShardNode( const string& ident );
+
+ static Shard EMPTY;
+
+ private:
+
+ void _rsInit();
+ void _setAddr( const string& addr );
+
+ string _name;
+ string _addr;
+ ConnectionString _cs;
+ long long _maxSize; // in MBytes, 0 is unlimited
+ bool _isDraining; // shard is currently being removed
+ ReplicaSetMonitorPtr _rs;
+ };
+
+ class ShardStatus {
+ public:
+
+ ShardStatus( const Shard& shard , const BSONObj& obj );
+
+ friend ostream& operator << (ostream& out, const ShardStatus& s) {
+ out << s.toString();
+ return out;
+ }
+
+ string toString() const {
+ stringstream ss;
+ ss << "shard: " << _shard << " mapped: " << _mapped << " writeLock: " << _writeLock;
+ return ss.str();
+ }
+
+ bool operator<( const ShardStatus& other ) const {
+ return _mapped < other._mapped;
+ }
+
+ Shard shard() const {
+ return _shard;
+ }
+
+ long long mapped() const {
+ return _mapped;
+ }
+
+ bool hasOpsQueued() const {
+ return _hasOpsQueued;
+ }
+
+ private:
+ Shard _shard;
+ long long _mapped;
+ bool _hasOpsQueued; // true if 'writebacks' are pending
+ double _writeLock;
+ };
+
+ class ChunkManager;
+ typedef shared_ptr<const ChunkManager> ChunkManagerPtr;
+
+ class ShardConnection : public AScopedConnection {
+ public:
+ ShardConnection( const Shard * s , const string& ns, ChunkManagerPtr manager = ChunkManagerPtr() );
+ ShardConnection( const Shard& s , const string& ns, ChunkManagerPtr manager = ChunkManagerPtr() );
+ ShardConnection( const string& addr , const string& ns, ChunkManagerPtr manager = ChunkManagerPtr() );
+
+ ~ShardConnection();
+
+ void done();
+ void kill();
+
+ DBClientBase& conn() {
+ _finishInit();
+ assert( _conn );
+ return *_conn;
+ }
+
+ DBClientBase* operator->() {
+ _finishInit();
+ assert( _conn );
+ return _conn;
+ }
+
+ DBClientBase* get() {
+ _finishInit();
+ assert( _conn );
+ return _conn;
+ }
+
+ string getHost() const {
+ return _addr;
+ }
+
+ string getNS() const {
+ return _ns;
+ }
+
+ ChunkManagerPtr getManager() const {
+ return _manager;
+ }
+
+ bool setVersion() {
+ _finishInit();
+ return _setVersion;
+ }
+
+ static void sync();
+
+ void donotCheckVersion() {
+ _setVersion = false;
+ _finishedInit = true;
+ }
+
+ bool ok() const { return _conn > 0; }
+
+ /**
+ this just passes through excpet it checks for stale configs
+ */
+ bool runCommand( const string& db , const BSONObj& cmd , BSONObj& res );
+
+ /** checks all of my thread local connections for the version of this ns */
+ static void checkMyConnectionVersions( const string & ns );
+
+ private:
+ void _init();
+ void _finishInit();
+
+ bool _finishedInit;
+
+ string _addr;
+ string _ns;
+ ChunkManagerPtr _manager;
+
+ DBClientBase* _conn;
+ bool _setVersion;
+ };
+
+
+ extern DBConnectionPool shardConnectionPool;
+
+ class ShardingConnectionHook : public DBConnectionHook {
+ public:
+
+ ShardingConnectionHook( bool shardedConnections )
+ : _shardedConnections( shardedConnections ) {
+ }
+
+ virtual void onCreate( DBClientBase * conn );
+ virtual void onHandedOut( DBClientBase * conn );
+ virtual void onDestroy( DBClientBase * conn );
+
+ bool _shardedConnections;
+ };
+}
diff --git a/src/mongo/s/shard_version.cpp b/src/mongo/s/shard_version.cpp
new file mode 100644
index 00000000000..a80b339d858
--- /dev/null
+++ b/src/mongo/s/shard_version.cpp
@@ -0,0 +1,269 @@
+// @file shard_version.cpp
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "chunk.h"
+#include "config.h"
+#include "grid.h"
+#include "util.h"
+#include "shard.h"
+#include "writeback_listener.h"
+
+#include "shard_version.h"
+
+namespace mongo {
+
+ // Global version manager
+ VersionManager versionManager;
+
+ // when running in sharded mode, use chunk shard version control
+ struct ConnectionShardStatus {
+
+ typedef unsigned long long S;
+
+ ConnectionShardStatus()
+ : _mutex( "ConnectionShardStatus" ) {
+ }
+
+ S getSequence( DBClientBase * conn , const string& ns ) {
+ scoped_lock lk( _mutex );
+ return _map[conn][ns];
+ }
+
+ void setSequence( DBClientBase * conn , const string& ns , const S& s ) {
+ scoped_lock lk( _mutex );
+ _map[conn][ns] = s;
+ }
+
+ void reset( DBClientBase * conn ) {
+ scoped_lock lk( _mutex );
+ _map.erase( conn );
+ }
+
+ // protects _map
+ mongo::mutex _mutex;
+
+ // a map from a connection into ChunkManager's sequence number for each namespace
+ map<DBClientBase*, map<string,unsigned long long> > _map;
+
+ } connectionShardStatus;
+
+ void VersionManager::resetShardVersionCB( DBClientBase * conn ) {
+ connectionShardStatus.reset( conn );
+ }
+
+ bool VersionManager::isVersionableCB( DBClientBase* conn ){
+ return conn->type() == ConnectionString::MASTER || conn->type() == ConnectionString::SET;
+ }
+
+ DBClientBase* getVersionable( DBClientBase* conn ){
+
+ switch ( conn->type() ) {
+ case ConnectionString::INVALID:
+ massert( 15904, str::stream() << "cannot set version on invalid connection " << conn->toString(), false );
+ return NULL;
+ case ConnectionString::MASTER:
+ return conn;
+ case ConnectionString::PAIR:
+ massert( 15905, str::stream() << "cannot set version or shard on pair connection " << conn->toString(), false );
+ return NULL;
+ case ConnectionString::SYNC:
+ massert( 15906, str::stream() << "cannot set version or shard on sync connection " << conn->toString(), false );
+ return NULL;
+ case ConnectionString::SET:
+ DBClientReplicaSet* set = (DBClientReplicaSet*) conn;
+ return &( set->masterConn() );
+ }
+
+ assert( false );
+ return NULL;
+ }
+
+ extern OID serverID;
+
+ bool VersionManager::initShardVersionCB( DBClientBase * conn_in, BSONObj& result ){
+
+ WriteBackListener::init( *conn_in );
+
+ DBClientBase* conn = getVersionable( conn_in );
+ assert( conn ); // errors thrown above
+
+ BSONObjBuilder cmdBuilder;
+
+ cmdBuilder.append( "setShardVersion" , "" );
+ cmdBuilder.appendBool( "init", true );
+ cmdBuilder.append( "configdb" , configServer.modelServer() );
+ cmdBuilder.appendOID( "serverID" , &serverID );
+ cmdBuilder.appendBool( "authoritative" , true );
+
+ BSONObj cmd = cmdBuilder.obj();
+
+ LOG(1) << "initializing shard connection to " << conn->toString() << endl;
+ LOG(2) << "initial sharding settings : " << cmd << endl;
+
+ bool ok = conn->runCommand( "admin" , cmd , result );
+
+ // HACK for backwards compatibility with v1.8.x, v2.0.0 and v2.0.1
+ // Result is false, but will still initialize serverID and configdb
+ if( ! ok && ! result["errmsg"].eoo() && ( result["errmsg"].String() == "need to specify namespace"/* 2.0.1/2 */ ||
+ result["errmsg"].String() == "need to speciy namespace" /* 1.8 */ ))
+ {
+ ok = true;
+ }
+
+ LOG(3) << "initial sharding result : " << result << endl;
+
+ return ok;
+
+ }
+
+ bool VersionManager::forceRemoteCheckShardVersionCB( const string& ns ){
+
+ DBConfigPtr conf = grid.getDBConfig( ns );
+ if ( ! conf ) return false;
+ conf->reload();
+
+ ChunkManagerPtr manager = conf->getChunkManagerIfExists( ns, true, true );
+ if( ! manager ) return false;
+
+ return true;
+
+ }
+
+ /**
+ * @return true if had to do something
+ */
+ bool checkShardVersion( DBClientBase * conn_in , const string& ns , ChunkManagerPtr refManager, bool authoritative , int tryNumber ) {
+ // TODO: cache, optimize, etc...
+
+ WriteBackListener::init( *conn_in );
+
+ DBConfigPtr conf = grid.getDBConfig( ns );
+ if ( ! conf )
+ return false;
+
+ DBClientBase* conn = getVersionable( conn_in );
+ assert(conn); // errors thrown above
+
+ unsigned long long officialSequenceNumber = 0;
+
+ ChunkManagerPtr manager;
+ const bool isSharded = conf->isSharded( ns );
+ if ( isSharded ) {
+ manager = conf->getChunkManagerIfExists( ns , authoritative );
+ // It's possible the chunk manager was reset since we checked whether sharded was true,
+ // so must check this here.
+ if( manager ) officialSequenceNumber = manager->getSequenceNumber();
+ }
+
+ // Check this manager against the reference manager
+ if( isSharded && manager ){
+
+ Shard shard = Shard::make( conn->getServerAddress() );
+ if( refManager && ! refManager->compatibleWith( manager, shard ) ){
+ throw SendStaleConfigException( ns, str::stream() << "manager (" << manager->getVersion( shard ) << " : " << manager->getSequenceNumber() << ") "
+ << "not compatible with reference manager (" << refManager->getVersion( shard ) << " : " << refManager->getSequenceNumber() << ") "
+ << "on shard " << shard.getName() << " (" << shard.getAddress().toString() << ")" );
+ }
+ }
+ else if( refManager ){
+ throw SendStaleConfigException( ns, str::stream() << "not sharded (" << ( (manager.get() == 0) ? ( str::stream() << manager->getSequenceNumber() << ") " ) : (string)"<none>) " ) <<
+ "but has reference manager (" << refManager->getSequenceNumber() << ") "
+ << "on conn " << conn->getServerAddress() << " (" << conn_in->getServerAddress() << ")" );
+ }
+
+ // has the ChunkManager been reloaded since the last time we updated the connection-level version?
+ // (ie., last time we issued the setShardVersions below)
+ unsigned long long sequenceNumber = connectionShardStatus.getSequence(conn,ns);
+ if ( sequenceNumber == officialSequenceNumber ) {
+ return false;
+ }
+
+
+ ShardChunkVersion version = 0;
+ if ( isSharded && manager ) {
+ version = manager->getVersion( Shard::make( conn->getServerAddress() ) );
+ }
+
+ if( version == 0 ){
+ LOG(0) << "resetting shard version of " << ns << " on " << conn->getServerAddress() << ", " <<
+ ( ! isSharded ? "no longer sharded" :
+ ( ! manager ? "no chunk manager found" :
+ "version is zero" ) ) << endl;
+ }
+
+
+ LOG(2) << " have to set shard version for conn: " << conn << " ns:" << ns
+ << " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber
+ << " version: " << version << " manager: " << manager.get()
+ << endl;
+
+ BSONObj result;
+ if ( setShardVersion( *conn , ns , version , authoritative , result ) ) {
+ // success!
+ LOG(1) << " setShardVersion success: " << result << endl;
+ connectionShardStatus.setSequence( conn , ns , officialSequenceNumber );
+ return true;
+ }
+
+ LOG(1) << " setShardVersion failed!\n" << result << endl;
+
+ if ( result["need_authoritative"].trueValue() )
+ massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative );
+
+ if ( ! authoritative ) {
+ checkShardVersion( conn , ns , refManager, 1 , tryNumber + 1 );
+ return true;
+ }
+
+ if ( result["reloadConfig"].trueValue() ) {
+ if( result["version"].timestampTime() == 0 ){
+ // reload db
+ conf->reload();
+ }
+ else {
+ // reload config
+ conf->getChunkManager( ns , true );
+ }
+ }
+
+ const int maxNumTries = 7;
+ if ( tryNumber < maxNumTries ) {
+ LOG( tryNumber < ( maxNumTries / 2 ) ? 1 : 0 )
+ << "going to retry checkShardVersion host: " << conn->getServerAddress() << " " << result << endl;
+ sleepmillis( 10 * tryNumber );
+ checkShardVersion( conn , ns , refManager, true , tryNumber + 1 );
+ return true;
+ }
+
+ string errmsg = str::stream() << "setShardVersion failed host: " << conn->getServerAddress() << " " << result;
+ log() << " " << errmsg << endl;
+ massert( 10429 , errmsg , 0 );
+ return true;
+ }
+
+ bool VersionManager::checkShardVersionCB( DBClientBase* conn_in , const string& ns , bool authoritative , int tryNumber ) {
+ return checkShardVersion( conn_in, ns, ChunkManagerPtr(), authoritative, tryNumber );
+ }
+
+ bool VersionManager::checkShardVersionCB( ShardConnection* conn_in , bool authoritative , int tryNumber ) {
+ return checkShardVersion( conn_in->get(), conn_in->getNS(), conn_in->getManager(), authoritative, tryNumber );
+ }
+
+} // namespace mongo
diff --git a/src/mongo/s/shard_version.h b/src/mongo/s/shard_version.h
new file mode 100644
index 00000000000..98cacf67af2
--- /dev/null
+++ b/src/mongo/s/shard_version.h
@@ -0,0 +1,32 @@
+// @file shard_version.h
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+namespace mongo {
+
+ /*
+ * Install chunk shard vesion callbaks in shardconnection code. This activates
+ * the chunk shard version control that mongos needs.
+ *
+ * MUST be called before accepting any connections.
+ */
+ void installChunkShardVersioning();
+
+
+} // namespace mongo
diff --git a/src/mongo/s/shardconnection.cpp b/src/mongo/s/shardconnection.cpp
new file mode 100644
index 00000000000..5db8a67b736
--- /dev/null
+++ b/src/mongo/s/shardconnection.cpp
@@ -0,0 +1,248 @@
+// shardconnection.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "shard.h"
+#include "config.h"
+#include "request.h"
+#include <set>
+
+namespace mongo {
+
+ DBConnectionPool shardConnectionPool;
+
+ /**
+ * holds all the actual db connections for a client to various servers
+ * 1 per thread, so doesn't have to be thread safe
+ */
+ class ClientConnections : boost::noncopyable {
+ public:
+ struct Status : boost::noncopyable {
+ Status() : created(0), avail(0) {}
+
+ long long created;
+ DBClientBase* avail;
+ };
+
+
+ ClientConnections() {}
+
+ ~ClientConnections() {
+ for ( HostMap::iterator i=_hosts.begin(); i!=_hosts.end(); ++i ) {
+ string addr = i->first;
+ Status* ss = i->second;
+ assert( ss );
+ if ( ss->avail ) {
+ /* if we're shutting down, don't want to initiate release mechanism as it is slow,
+ and isn't needed since all connections will be closed anyway */
+ if ( inShutdown() ) {
+ if( versionManager.isVersionableCB( ss->avail ) ) versionManager.resetShardVersionCB( ss->avail );
+ delete ss->avail;
+ }
+ else
+ release( addr , ss->avail );
+ ss->avail = 0;
+ }
+ delete ss;
+ }
+ _hosts.clear();
+ }
+
+ DBClientBase * get( const string& addr , const string& ns ) {
+ _check( ns );
+
+ Status* &s = _hosts[addr];
+ if ( ! s )
+ s = new Status();
+
+ if ( s->avail ) {
+ DBClientBase* c = s->avail;
+ s->avail = 0;
+ try {
+ shardConnectionPool.onHandedOut( c );
+ }
+ catch ( std::exception& ) {
+ delete c;
+ throw;
+ }
+ return c;
+ }
+
+ s->created++;
+ return shardConnectionPool.get( addr );
+ }
+
+ void done( const string& addr , DBClientBase* conn ) {
+ Status* s = _hosts[addr];
+ assert( s );
+ if ( s->avail ) {
+ release( addr , conn );
+ return;
+ }
+ s->avail = conn;
+ }
+
+ void sync() {
+ for ( HostMap::iterator i=_hosts.begin(); i!=_hosts.end(); ++i ) {
+ string addr = i->first;
+ Status* ss = i->second;
+ if ( ss->avail )
+ ss->avail->getLastError();
+
+ }
+ }
+
+ void checkVersions( const string& ns ) {
+
+ vector<Shard> all;
+ Shard::getAllShards( all );
+
+ // Now only check top-level shard connections
+ for ( unsigned i=0; i<all.size(); i++ ) {
+
+ string sconnString = all[i].getConnString();
+ Status* &s = _hosts[sconnString];
+
+ if ( ! s ){
+ s = new Status();
+ }
+
+ if( ! s->avail )
+ s->avail = shardConnectionPool.get( sconnString );
+
+ versionManager.checkShardVersionCB( s->avail, ns, false, 1 );
+
+ }
+ }
+
+ void release( const string& addr , DBClientBase * conn ) {
+ shardConnectionPool.release( addr , conn );
+ }
+
+ void _check( const string& ns ) {
+ if ( ns.size() == 0 || _seenNS.count( ns ) )
+ return;
+ _seenNS.insert( ns );
+ checkVersions( ns );
+ }
+
+ typedef map<string,Status*,DBConnectionPool::serverNameCompare> HostMap;
+ HostMap _hosts;
+ set<string> _seenNS;
+ // -----
+
+ static thread_specific_ptr<ClientConnections> _perThread;
+
+ static ClientConnections* threadInstance() {
+ ClientConnections* cc = _perThread.get();
+ if ( ! cc ) {
+ cc = new ClientConnections();
+ _perThread.reset( cc );
+ }
+ return cc;
+ }
+ };
+
+ thread_specific_ptr<ClientConnections> ClientConnections::_perThread;
+
+ ShardConnection::ShardConnection( const Shard * s , const string& ns, ChunkManagerPtr manager )
+ : _addr( s->getConnString() ) , _ns( ns ), _manager( manager ) {
+ _init();
+ }
+
+ ShardConnection::ShardConnection( const Shard& s , const string& ns, ChunkManagerPtr manager )
+ : _addr( s.getConnString() ) , _ns( ns ), _manager( manager ) {
+ _init();
+ }
+
+ ShardConnection::ShardConnection( const string& addr , const string& ns, ChunkManagerPtr manager )
+ : _addr( addr ) , _ns( ns ), _manager( manager ) {
+ _init();
+ }
+
+ void ShardConnection::_init() {
+ assert( _addr.size() );
+ _conn = ClientConnections::threadInstance()->get( _addr , _ns );
+ _finishedInit = false;
+ }
+
+ void ShardConnection::_finishInit() {
+ if ( _finishedInit )
+ return;
+ _finishedInit = true;
+
+ if ( _ns.size() && versionManager.isVersionableCB( _conn ) ) {
+ // Make sure we specified a manager for the correct namespace
+ if( _manager ) assert( _manager->getns() == _ns );
+ _setVersion = versionManager.checkShardVersionCB( this , false , 1 );
+ }
+ else {
+ // Make sure we didn't specify a manager for an empty namespace
+ assert( ! _manager );
+ _setVersion = false;
+ }
+
+ }
+
+ void ShardConnection::done() {
+ if ( _conn ) {
+ ClientConnections::threadInstance()->done( _addr , _conn );
+ _conn = 0;
+ _finishedInit = true;
+ }
+ }
+
+ void ShardConnection::kill() {
+ if ( _conn ) {
+ if( versionManager.isVersionableCB( _conn ) ) versionManager.resetShardVersionCB( _conn );
+ delete _conn;
+ _conn = 0;
+ _finishedInit = true;
+ }
+ }
+
+ void ShardConnection::sync() {
+ ClientConnections::threadInstance()->sync();
+ }
+
+ bool ShardConnection::runCommand( const string& db , const BSONObj& cmd , BSONObj& res ) {
+ assert( _conn );
+ bool ok = _conn->runCommand( db , cmd , res );
+ if ( ! ok ) {
+ if ( res["code"].numberInt() == SendStaleConfigCode ) {
+ done();
+ throw RecvStaleConfigException( res["ns"].String() , res["errmsg"].String() );
+ }
+ }
+ return ok;
+ }
+
+ void ShardConnection::checkMyConnectionVersions( const string & ns ) {
+ ClientConnections::threadInstance()->checkVersions( ns );
+ }
+
+ ShardConnection::~ShardConnection() {
+ if ( _conn ) {
+ if ( ! _conn->isFailed() ) {
+ /* see done() comments above for why we log this line */
+ log() << "~ScopedDBConnection: _conn != null" << endl;
+ }
+ kill();
+ }
+ }
+}
diff --git a/src/mongo/s/shardkey.cpp b/src/mongo/s/shardkey.cpp
new file mode 100644
index 00000000000..365435ef5ea
--- /dev/null
+++ b/src/mongo/s/shardkey.cpp
@@ -0,0 +1,273 @@
+// shardkey.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "chunk.h"
+#include "../db/jsobj.h"
+#include "../util/unittest.h"
+#include "../util/timer.h"
+
+namespace mongo {
+
+ ShardKeyPattern::ShardKeyPattern( BSONObj p ) : pattern( p.getOwned() ) {
+ pattern.getFieldNames(patternfields);
+
+ BSONObjBuilder min;
+ BSONObjBuilder max;
+
+ BSONObjIterator it(p);
+ while (it.more()) {
+ BSONElement e (it.next());
+ min.appendMinKey(e.fieldName());
+ max.appendMaxKey(e.fieldName());
+ }
+
+ gMin = min.obj();
+ gMax = max.obj();
+ }
+
+ int ShardKeyPattern::compare( const BSONObj& lObject , const BSONObj& rObject ) const {
+ BSONObj L = extractKey(lObject);
+ uassert( 10198 , str::stream() << "left object (" << lObject << ") doesn't have full shard key (" << pattern << ')',
+ L.nFields() == (int)patternfields.size());
+ BSONObj R = extractKey(rObject);
+ uassert( 10199 , str::stream() << "right object (" << rObject << ") doesn't have full shard key (" << pattern << ')',
+ R.nFields() == (int)patternfields.size());
+ return L.woCompare(R);
+ }
+
+ bool ShardKeyPattern::hasShardKey( const BSONObj& obj ) const {
+ /* this is written s.t. if obj has lots of fields, if the shard key fields are early,
+ it is fast. so a bit more work to try to be semi-fast.
+ */
+
+ for(set<string>::const_iterator it = patternfields.begin(); it != patternfields.end(); ++it) {
+ BSONElement e = obj.getFieldDotted(it->c_str());
+ if(e.eoo() || e.type() == Array)
+ return false;
+ }
+ return true;
+ }
+
+ bool ShardKeyPattern::isPrefixOf( const BSONObj& otherPattern ) const {
+ BSONObjIterator a( pattern );
+ BSONObjIterator b( otherPattern );
+
+ while ( a.more() && b.more() ) {
+ BSONElement x = a.next();
+ BSONElement y = b.next();
+ if ( strcmp( x.fieldName() , y.fieldName() ) )
+ return false;
+ }
+
+ return ! a.more();
+ }
+
+ string ShardKeyPattern::toString() const {
+ return pattern.toString();
+ }
+
+ BSONObj ShardKeyPattern::moveToFront(const BSONObj& obj) const {
+ vector<const char*> keysToMove;
+ keysToMove.push_back("_id");
+ BSONForEach(e, pattern) {
+ if (strchr(e.fieldName(), '.') == NULL && strcmp(e.fieldName(), "_id") != 0)
+ keysToMove.push_back(e.fieldName());
+ }
+
+ if (keysToMove.size() == 1) {
+ return obj;
+
+ }
+ else {
+ BufBuilder buf (obj.objsize());
+ buf.appendNum((unsigned)0); // refcount
+ buf.appendNum(obj.objsize());
+
+ vector<pair<const char*, size_t> > copies;
+ pair<const char*, size_t> toCopy ((const char*)NULL, 0); // C++ NULL isn't a pointer type yet
+
+ BSONForEach(e, obj) {
+ bool moveToFront = false;
+ for (vector<const char*>::const_iterator it(keysToMove.begin()), end(keysToMove.end()); it!=end; ++it) {
+ if (strcmp(e.fieldName(), *it) == 0) {
+ moveToFront = true;
+ break;
+ }
+ }
+
+ if (moveToFront) {
+ buf.appendBuf(e.fieldName()-1, e.size());
+ if (toCopy.first) {
+ copies.push_back(toCopy);
+ toCopy.first = NULL;
+ }
+ }
+ else {
+ if (!toCopy.first) {
+ toCopy.first = e.fieldName()-1;
+ toCopy.second = e.size();
+ }
+ else {
+ toCopy.second += e.size();
+ }
+ }
+ }
+
+ for (vector<pair<const char*, size_t> >::const_iterator it(copies.begin()), end(copies.end()); it!=end; ++it) {
+ buf.appendBuf(it->first, it->second);
+ }
+
+ if (toCopy.first) {
+ buf.appendBuf(toCopy.first, toCopy.second);
+ }
+
+ buf.appendChar('\0');
+
+ BSONObj out ((BSONObj::Holder*)buf.buf());
+ buf.decouple();
+ return out;
+ }
+ }
+
+ /* things to test for compound :
+ \ middle (deprecating?)
+ */
+ class ShardKeyUnitTest : public UnitTest {
+ public:
+
+ void testIsPrefixOf() {
+ {
+ ShardKeyPattern k( BSON( "x" << 1 ) );
+ assert( ! k.isPrefixOf( BSON( "a" << 1 ) ) );
+ assert( k.isPrefixOf( BSON( "x" << 1 ) ) );
+ assert( k.isPrefixOf( BSON( "x" << 1 << "a" << 1 ) ) );
+ assert( ! k.isPrefixOf( BSON( "a" << 1 << "x" << 1 ) ) );
+ }
+ {
+ ShardKeyPattern k( BSON( "x" << 1 << "y" << 1 ) );
+ assert( ! k.isPrefixOf( BSON( "x" << 1 ) ) );
+ assert( ! k.isPrefixOf( BSON( "x" << 1 << "z" << 1 ) ) );
+ assert( k.isPrefixOf( BSON( "x" << 1 << "y" << 1 ) ) );
+ assert( k.isPrefixOf( BSON( "x" << 1 << "y" << 1 << "z" << 1 ) ) );
+ }
+ }
+
+ void hasshardkeytest() {
+ BSONObj x = fromjson("{ zid : \"abcdefg\", num: 1.0, name: \"eliot\" }");
+ ShardKeyPattern k( BSON( "num" << 1 ) );
+ assert( k.hasShardKey(x) );
+ assert( !k.hasShardKey( fromjson("{foo:'a'}") ) );
+
+ // try compound key
+ {
+ ShardKeyPattern k( fromjson("{a:1,b:-1,c:1}") );
+ assert( k.hasShardKey( fromjson("{foo:'a',a:'b',c:'z',b:9,k:99}") ) );
+ assert( !k.hasShardKey( fromjson("{foo:'a',a:'b',c:'z',bb:9,k:99}") ) );
+ assert( !k.hasShardKey( fromjson("{k:99}") ) );
+ }
+
+ }
+
+ void extractkeytest() {
+ ShardKeyPattern k( fromjson("{a:1,'sub.b':-1,'sub.c':1}") );
+
+ BSONObj x = fromjson("{a:1,'sub.b':2,'sub.c':3}");
+ assert( k.extractKey( fromjson("{a:1,sub:{b:2,c:3}}") ).binaryEqual(x) );
+ assert( k.extractKey( fromjson("{sub:{b:2,c:3},a:1}") ).binaryEqual(x) );
+ }
+ void moveToFrontTest() {
+ ShardKeyPattern sk (BSON("a" << 1 << "b" << 1));
+
+ BSONObj ret;
+
+ ret = sk.moveToFront(BSON("z" << 1 << "_id" << 1 << "y" << 1 << "a" << 1 << "x" << 1 << "b" << 1 << "w" << 1));
+ assert(ret.binaryEqual(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1)));
+
+ ret = sk.moveToFront(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1));
+ assert(ret.binaryEqual(BSON("_id" << 1 << "a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "x" << 1 << "w" << 1)));
+
+ ret = sk.moveToFront(BSON("z" << 1 << "y" << 1 << "a" << 1 << "b" << 1 << "Z" << 1 << "Y" << 1));
+ assert(ret.binaryEqual(BSON("a" << 1 << "b" << 1 << "z" << 1 << "y" << 1 << "Z" << 1 << "Y" << 1)));
+
+ }
+
+ void moveToFrontBenchmark(int numFields) {
+ BSONObjBuilder bb;
+ bb.append("_id", 1);
+ for (int i=0; i < numFields; i++)
+ bb.append(BSONObjBuilder::numStr(i), 1);
+ bb.append("key", 1);
+ BSONObj o = bb.obj();
+
+ ShardKeyPattern sk (BSON("key" << 1));
+
+ Timer t;
+ const int iterations = 100*1000;
+ for (int i=0; i< iterations; i++) {
+ sk.moveToFront(o);
+ }
+
+ const double secs = t.micros() / 1000000.0;
+ const double ops_per_sec = iterations / secs;
+
+ cout << "moveToFront (" << numFields << " fields) secs: " << secs << " ops_per_sec: " << ops_per_sec << endl;
+ }
+ void run() {
+ extractkeytest();
+
+ ShardKeyPattern k( BSON( "key" << 1 ) );
+
+ BSONObj min = k.globalMin();
+
+// cout << min.jsonString(TenGen) << endl;
+
+ BSONObj max = k.globalMax();
+
+ BSONObj k1 = BSON( "key" << 5 );
+
+ assert( k.compare( min , max ) < 0 );
+ assert( k.compare( min , k1 ) < 0 );
+ assert( k.compare( max , min ) > 0 );
+ assert( k.compare( min , min ) == 0 );
+
+ hasshardkeytest();
+ assert( k.hasShardKey( k1 ) );
+ assert( ! k.hasShardKey( BSON( "key2" << 1 ) ) );
+
+ BSONObj a = k1;
+ BSONObj b = BSON( "key" << 999 );
+
+ assert( k.compare(a,b) < 0 );
+
+ testIsPrefixOf();
+ // add middle multitype tests
+
+ moveToFrontTest();
+
+ if (0) { // toggle to run benchmark
+ moveToFrontBenchmark(0);
+ moveToFrontBenchmark(10);
+ moveToFrontBenchmark(100);
+ }
+
+ LOG(1) << "shardKeyTest passed" << endl;
+ }
+ } shardKeyTest;
+
+} // namespace mongo
diff --git a/src/mongo/s/shardkey.h b/src/mongo/s/shardkey.h
new file mode 100644
index 00000000000..976cff09591
--- /dev/null
+++ b/src/mongo/s/shardkey.h
@@ -0,0 +1,124 @@
+// shardkey.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../client/dbclient.h"
+
+namespace mongo {
+
+ class Chunk;
+
+ /* A ShardKeyPattern is a pattern indicating what data to extract from the object to make the shard key from.
+ Analogous to an index key pattern.
+ */
+ class ShardKeyPattern {
+ public:
+ ShardKeyPattern( BSONObj p = BSONObj() );
+
+ /**
+ global min is the lowest possible value for this key
+ e.g. { num : MinKey }
+ */
+ BSONObj globalMin() const { return gMin; }
+
+ /**
+ global max is the highest possible value for this key
+ */
+ BSONObj globalMax() const { return gMax; }
+
+ bool isGlobalMin( const BSONObj& k ) const {
+ return k.woCompare( globalMin() ) == 0;
+ }
+
+ bool isGlobalMax( const BSONObj& k ) const {
+ return k.woCompare( globalMax() ) == 0;
+ }
+
+ bool isGlobal( const BSONObj& k ) const {
+ return isGlobalMin( k ) || isGlobalMax( k );
+ }
+
+ /** compare shard keys from the objects specified
+ l < r negative
+ l == r 0
+ l > r positive
+ */
+ int compare( const BSONObj& l , const BSONObj& r ) const;
+
+ /**
+ @return whether or not obj has all fields in this shard key pattern
+ e.g.
+ ShardKey({num:1}).hasShardKey({ name:"joe", num:3 }) is true
+ */
+ bool hasShardKey( const BSONObj& obj ) const;
+
+ BSONObj key() const { return pattern; }
+
+ string toString() const;
+
+ BSONObj extractKey(const BSONObj& from) const;
+
+ bool partOfShardKey(const char* key ) const {
+ return pattern.hasField(key);
+ }
+ bool partOfShardKey(const string& key ) const {
+ return pattern.hasField(key.c_str());
+ }
+
+ /**
+ * @return
+ * true if 'this' is a prefix (not necessarily contained) of 'otherPattern'.
+ */
+ bool isPrefixOf( const BSONObj& otherPattern ) const;
+
+ /**
+ * @return BSONObj with _id and shardkey at front. May return original object.
+ */
+ BSONObj moveToFront(const BSONObj& obj) const;
+
+ private:
+ BSONObj pattern;
+ BSONObj gMin;
+ BSONObj gMax;
+
+ /* question: better to have patternfields precomputed or not? depends on if we use copy constructor often. */
+ set<string> patternfields;
+ };
+
+ inline BSONObj ShardKeyPattern::extractKey(const BSONObj& from) const {
+ BSONObj k = from;
+ bool needExtraction = false;
+
+ BSONObjIterator a(from);
+ BSONObjIterator b(pattern);
+ while (a.more() && b.more()){
+ if (strcmp(a.next().fieldName(), b.next().fieldName()) != 0){
+ needExtraction = true;
+ break;
+ }
+ }
+
+ if (needExtraction || a.more() != b.more())
+ k = from.extractFields(pattern);
+
+ uassert(13334, "Shard Key must be less than 512 bytes", k.objsize() < 512);
+ return k;
+ }
+
+}
diff --git a/src/mongo/s/stats.cpp b/src/mongo/s/stats.cpp
new file mode 100644
index 00000000000..460ada3ccd6
--- /dev/null
+++ b/src/mongo/s/stats.cpp
@@ -0,0 +1,28 @@
+// stats.cpp
+
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "stats.h"
+
+namespace mongo {
+
+ OpCounters opsNonSharded;
+ OpCounters opsSharded;
+
+ GenericCounter shardedCursorTypes;
+}
diff --git a/src/mongo/s/stats.h b/src/mongo/s/stats.h
new file mode 100644
index 00000000000..a7cc784e981
--- /dev/null
+++ b/src/mongo/s/stats.h
@@ -0,0 +1,30 @@
+// stats.h
+
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "../pch.h"
+#include "../db/stats/counters.h"
+
+namespace mongo {
+
+ extern OpCounters opsNonSharded;
+ extern OpCounters opsSharded;
+
+ extern GenericCounter shardedCursorTypes;
+}
diff --git a/src/mongo/s/strategy.cpp b/src/mongo/s/strategy.cpp
new file mode 100644
index 00000000000..6f02c4183b1
--- /dev/null
+++ b/src/mongo/s/strategy.cpp
@@ -0,0 +1,111 @@
+// @file strategy.cpp
+
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+
+#include "../client/connpool.h"
+#include "../db/commands.h"
+
+#include "grid.h"
+#include "request.h"
+#include "server.h"
+#include "writeback_listener.h"
+
+#include "strategy.h"
+
+namespace mongo {
+
+ // ----- Strategy ------
+
+ void Strategy::doWrite( int op , Request& r , const Shard& shard , bool checkVersion ) {
+ ShardConnection conn( shard , r.getns() );
+ if ( ! checkVersion )
+ conn.donotCheckVersion();
+ else if ( conn.setVersion() ) {
+ conn.done();
+ throw RecvStaleConfigException( r.getns() , "doWrite" , true );
+ }
+ conn->say( r.m() );
+ conn.done();
+ }
+
+ void Strategy::doQuery( Request& r , const Shard& shard ) {
+
+ r.checkAuth( Auth::READ );
+
+ ShardConnection dbcon( shard , r.getns() );
+ DBClientBase &c = dbcon.conn();
+
+ string actualServer;
+
+ Message response;
+ bool ok = c.call( r.m(), response, true , &actualServer );
+ uassert( 10200 , "mongos: error calling db", ok );
+
+ {
+ QueryResult *qr = (QueryResult *) response.singleData();
+ if ( qr->resultFlags() & ResultFlag_ShardConfigStale ) {
+ dbcon.done();
+ throw RecvStaleConfigException( r.getns() , "Strategy::doQuery" );
+ }
+ }
+
+ r.reply( response , actualServer.size() ? actualServer : c.getServerAddress() );
+ dbcon.done();
+ }
+
+ void Strategy::insert( const Shard& shard , const char * ns , const BSONObj& obj , int flags, bool safe ) {
+ ShardConnection dbcon( shard , ns );
+ if ( dbcon.setVersion() ) {
+ dbcon.done();
+ throw RecvStaleConfigException( ns , "for insert" );
+ }
+ dbcon->insert( ns , obj , flags);
+ if (safe)
+ dbcon->getLastError();
+ dbcon.done();
+ }
+
+ void Strategy::insert( const Shard& shard , const char * ns , const vector<BSONObj>& v , int flags, bool safe ) {
+ ShardConnection dbcon( shard , ns );
+ if ( dbcon.setVersion() ) {
+ dbcon.done();
+ throw RecvStaleConfigException( ns , "for insert" );
+ }
+ dbcon->insert( ns , v , flags);
+ if (safe)
+ dbcon->getLastError();
+ dbcon.done();
+ }
+
+ void Strategy::update( const Shard& shard , const char * ns , const BSONObj& query , const BSONObj& toupdate , int flags, bool safe ) {
+ bool upsert = flags & UpdateOption_Upsert;
+ bool multi = flags & UpdateOption_Multi;
+
+ ShardConnection dbcon( shard , ns );
+ if ( dbcon.setVersion() ) {
+ dbcon.done();
+ throw RecvStaleConfigException( ns , "for insert" );
+ }
+ dbcon->update( ns , query , toupdate, upsert, multi);
+ if (safe)
+ dbcon->getLastError();
+ dbcon.done();
+ }
+
+}
diff --git a/src/mongo/s/strategy.h b/src/mongo/s/strategy.h
new file mode 100644
index 00000000000..25c9b97630e
--- /dev/null
+++ b/src/mongo/s/strategy.h
@@ -0,0 +1,59 @@
+// strategy.h
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#pragma once
+
+#include "../pch.h"
+#include "chunk.h"
+#include "request.h"
+
+namespace mongo {
+
+ class Strategy {
+ public:
+ Strategy() {}
+ virtual ~Strategy() {}
+ virtual void queryOp( Request& r ) = 0;
+ virtual void getMore( Request& r ) = 0;
+ virtual void writeOp( int op , Request& r ) = 0;
+
+ void insert( const Shard& shard , const char * ns , const BSONObj& obj , int flags=0 , bool safe=false );
+
+ virtual void commandOp( const string& db, const BSONObj& command, int options,
+ const string& versionedNS, const BSONObj& filter,
+ map<Shard,BSONObj>& results )
+ {
+ // Only call this from sharded, for now.
+ // TODO: Refactor all this.
+ assert( false );
+ }
+
+ protected:
+ void doWrite( int op , Request& r , const Shard& shard , bool checkVersion = true );
+ void doQuery( Request& r , const Shard& shard );
+
+ void insert( const Shard& shard , const char * ns , const vector<BSONObj>& v , int flags=0 , bool safe=false );
+ void update( const Shard& shard , const char * ns , const BSONObj& query , const BSONObj& toupdate , int flags=0, bool safe=false );
+
+ };
+
+ extern Strategy * SINGLE;
+ extern Strategy * SHARDED;
+
+}
+
diff --git a/src/mongo/s/strategy_shard.cpp b/src/mongo/s/strategy_shard.cpp
new file mode 100644
index 00000000000..3f4c105204a
--- /dev/null
+++ b/src/mongo/s/strategy_shard.cpp
@@ -0,0 +1,414 @@
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+// strategy_sharded.cpp
+
+#include "pch.h"
+#include "request.h"
+#include "chunk.h"
+#include "cursors.h"
+#include "stats.h"
+#include "client.h"
+
+#include "../client/connpool.h"
+#include "../db/commands.h"
+
+// error codes 8010-8040
+
+namespace mongo {
+
+ class ShardStrategy : public Strategy {
+
+ virtual void queryOp( Request& r ) {
+
+ // TODO: These probably should just be handled here.
+ if ( r.isCommand() ) {
+ SINGLE->queryOp( r );
+ return;
+ }
+
+ QueryMessage q( r.d() );
+
+ r.checkAuth( Auth::READ );
+
+ LOG(3) << "shard query: " << q.ns << " " << q.query << endl;
+
+ if ( q.ntoreturn == 1 && strstr(q.ns, ".$cmd") )
+ throw UserException( 8010 , "something is wrong, shouldn't see a command here" );
+
+ QuerySpec qSpec( (string)q.ns, q.query, q.fields, q.ntoskip, q.ntoreturn, q.queryOptions );
+
+ ParallelSortClusteredCursor * cursor = new ParallelSortClusteredCursor( qSpec, CommandInfo() );
+ assert( cursor );
+
+ // TODO: Move out to Request itself, not strategy based
+ try {
+ long long start_millis = 0;
+ if ( qSpec.isExplain() ) start_millis = curTimeMillis64();
+ cursor->init();
+
+ LOG(5) << " cursor type: " << cursor->type() << endl;
+ shardedCursorTypes.hit( cursor->type() );
+
+ if ( qSpec.isExplain() ) {
+ // fetch elapsed time for the query
+ long long elapsed_millis = curTimeMillis64() - start_millis;
+ BSONObjBuilder explain_builder;
+ cursor->explain( explain_builder );
+ explain_builder.appendNumber( "millis", elapsed_millis );
+ BSONObj b = explain_builder.obj();
+
+ replyToQuery( 0 , r.p() , r.m() , b );
+ delete( cursor );
+ return;
+ }
+ }
+ catch(...) {
+ delete cursor;
+ throw;
+ }
+
+ if( cursor->isSharded() ){
+ ShardedClientCursorPtr cc (new ShardedClientCursor( q , cursor ));
+
+ if ( ! cc->sendNextBatch( r ) ) {
+ return;
+ }
+
+ LOG(6) << "storing cursor : " << cc->getId() << endl;
+ cursorCache.store( cc );
+ }
+ else{
+ // TODO: Better merge this logic. We potentially can now use the same cursor logic for everything.
+ ShardPtr primary = cursor->getPrimary();
+ DBClientCursorPtr shardCursor = cursor->getShardCursor( *primary );
+ r.reply( *(shardCursor->getMessage()) , primary->getConnString() );
+ }
+ }
+
+ virtual void commandOp( const string& db, const BSONObj& command, int options,
+ const string& versionedNS, const BSONObj& filter,
+ map<Shard,BSONObj>& results )
+ {
+
+ QuerySpec qSpec( db + ".$cmd", command, BSONObj(), 0, 1, options );
+
+ ParallelSortClusteredCursor cursor( qSpec, CommandInfo( versionedNS, filter ) );
+
+ // Initialize the cursor
+ cursor.init();
+
+ set<Shard> shards;
+ cursor.getQueryShards( shards );
+
+ for( set<Shard>::iterator i = shards.begin(), end = shards.end(); i != end; ++i ){
+ results[ *i ] = cursor.getShardCursor( *i )->peekFirst().getOwned();
+ }
+
+ }
+
+ virtual void getMore( Request& r ) {
+
+ // TODO: Handle stale config exceptions here from coll being dropped or sharded during op
+ // for now has same semantics as legacy request
+ ChunkManagerPtr info = r.getChunkManager();
+
+ if( ! info ){
+ SINGLE->getMore( r );
+ return;
+ }
+ else {
+ int ntoreturn = r.d().pullInt();
+ long long id = r.d().pullInt64();
+
+ LOG(6) << "want cursor : " << id << endl;
+
+ ShardedClientCursorPtr cursor = cursorCache.get( id );
+ if ( ! cursor ) {
+ LOG(6) << "\t invalid cursor :(" << endl;
+ replyToQuery( ResultFlag_CursorNotFound , r.p() , r.m() , 0 , 0 , 0 );
+ return;
+ }
+
+ if ( cursor->sendNextBatch( r , ntoreturn ) ) {
+ // still more data
+ cursor->accessed();
+ return;
+ }
+
+ // we've exhausted the cursor
+ cursorCache.remove( id );
+ }
+ }
+
+ void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) {
+ const int flags = d.reservedField() | InsertOption_ContinueOnError; // ContinueOnError is always on when using sharding.
+ map<ChunkPtr, vector<BSONObj> > insertsForChunk; // Group bulk insert for appropriate shards
+ try {
+ while ( d.moreJSObjs() ) {
+ BSONObj o = d.nextJsObj();
+ if ( ! manager->hasShardKey( o ) ) {
+
+ bool bad = true;
+
+ if ( manager->getShardKey().partOfShardKey( "_id" ) ) {
+ BSONObjBuilder b;
+ b.appendOID( "_id" , 0 , true );
+ b.appendElements( o );
+ o = b.obj();
+ bad = ! manager->hasShardKey( o );
+ }
+
+ if ( bad ) {
+ log() << "tried to insert object with no valid shard key: " << r.getns() << " " << o << endl;
+ uasserted( 8011 , "tried to insert object with no valid shard key" );
+ }
+
+ }
+
+ // Many operations benefit from having the shard key early in the object
+ o = manager->getShardKey().moveToFront(o);
+ insertsForChunk[manager->findChunk(o)].push_back(o);
+ }
+ for (map<ChunkPtr, vector<BSONObj> >::iterator it = insertsForChunk.begin(); it != insertsForChunk.end(); ++it) {
+ ChunkPtr c = it->first;
+ vector<BSONObj> objs = it->second;
+ const int maxTries = 30;
+
+ bool gotThrough = false;
+ for ( int i=0; i<maxTries; i++ ) {
+ try {
+ LOG(4) << " server:" << c->getShard().toString() << " bulk insert " << objs.size() << " documents" << endl;
+ insert( c->getShard() , r.getns() , objs , flags);
+
+ int bytesWritten = 0;
+ for (vector<BSONObj>::iterator vecIt = objs.begin(); vecIt != objs.end(); ++vecIt) {
+ r.gotInsert(); // Record the correct number of individual inserts
+ bytesWritten += (*vecIt).objsize();
+ }
+ if ( r.getClientInfo()->autoSplitOk() )
+ c->splitIfShould( bytesWritten );
+ gotThrough = true;
+ break;
+ }
+ catch ( StaleConfigException& e ) {
+ int logLevel = i < ( maxTries / 2 );
+ LOG( logLevel ) << "retrying bulk insert of " << objs.size() << " documents because of StaleConfigException: " << e << endl;
+ r.reset();
+
+ manager = r.getChunkManager();
+ if( ! manager ) {
+ uasserted(14804, "collection no longer sharded");
+ }
+
+ unsigned long long old = manager->getSequenceNumber();
+
+ LOG( logLevel ) << " sequence number - old: " << old << " new: " << manager->getSequenceNumber() << endl;
+ }
+ sleepmillis( i * 20 );
+ }
+
+ assert( inShutdown() || gotThrough ); // not caught below
+ }
+ } catch (const UserException&){
+ if (!d.moreJSObjs()){
+ throw;
+ }
+ // Ignore and keep going. ContinueOnError is implied with sharding.
+ }
+ }
+
+ void _update( Request& r , DbMessage& d, ChunkManagerPtr manager ) {
+ int flags = d.pullInt();
+
+ BSONObj query = d.nextJsObj();
+ uassert( 13506 , "$atomic not supported sharded" , query["$atomic"].eoo() );
+ uassert( 10201 , "invalid update" , d.moreJSObjs() );
+ BSONObj toupdate = d.nextJsObj();
+ BSONObj chunkFinder = query;
+
+ bool upsert = flags & UpdateOption_Upsert;
+ bool multi = flags & UpdateOption_Multi;
+
+ if (upsert) {
+ uassert(8012, "can't upsert something without valid shard key",
+ (manager->hasShardKey(toupdate) ||
+ (toupdate.firstElementFieldName()[0] == '$' && manager->hasShardKey(query))));
+
+ BSONObj key = manager->getShardKey().extractKey(query);
+ BSONForEach(e, key) {
+ uassert(13465, "shard key in upsert query must be an exact match", getGtLtOp(e) == BSONObj::Equality);
+ }
+ }
+
+ bool save = false;
+ if ( ! manager->hasShardKey( query ) ) {
+ if ( multi ) {
+ }
+ else if ( strcmp( query.firstElementFieldName() , "_id" ) || query.nFields() != 1 ) {
+ log() << "Query " << query << endl;
+ throw UserException( 8013 , "can't do non-multi update with query that doesn't have a valid shard key" );
+ }
+ else {
+ save = true;
+ chunkFinder = toupdate;
+ }
+ }
+
+
+ if ( ! save ) {
+ if ( toupdate.firstElementFieldName()[0] == '$' ) {
+ BSONObjIterator ops(toupdate);
+ while(ops.more()) {
+ BSONElement op(ops.next());
+ if (op.type() != Object)
+ continue;
+ BSONObjIterator fields(op.embeddedObject());
+ while(fields.more()) {
+ const string field = fields.next().fieldName();
+ uassert(13123,
+ str::stream() << "Can't modify shard key's value field" << field
+ << " for collection: " << manager->getns(),
+ ! manager->getShardKey().partOfShardKey(field));
+ }
+ }
+ }
+ else if ( manager->hasShardKey( toupdate ) ) {
+ uassert( 8014,
+ str::stream() << "cannot modify shard key for collection: " << manager->getns(),
+ manager->getShardKey().compare( query , toupdate ) == 0 );
+ }
+ else {
+ uasserted(12376,
+ str::stream() << "valid shard key must be in update object for collection: " << manager->getns() );
+ }
+ }
+
+ if ( multi ) {
+ set<Shard> shards;
+ manager->getShardsForQuery( shards , chunkFinder );
+ int * x = (int*)(r.d().afterNS());
+ x[0] |= UpdateOption_Broadcast;
+ for ( set<Shard>::iterator i=shards.begin(); i!=shards.end(); i++) {
+ doWrite( dbUpdate , r , *i , false );
+ }
+ }
+ else {
+ int left = 5;
+ while ( true ) {
+ try {
+ ChunkPtr c = manager->findChunk( chunkFinder );
+ doWrite( dbUpdate , r , c->getShard() );
+ if ( r.getClientInfo()->autoSplitOk() )
+ c->splitIfShould( d.msg().header()->dataLen() );
+ break;
+ }
+ catch ( StaleConfigException& e ) {
+ if ( left <= 0 )
+ throw e;
+ left--;
+ log() << "update will be retried b/c sharding config info is stale, "
+ << " left:" << left << " ns: " << r.getns() << " query: " << query << endl;
+ r.reset();
+ manager = r.getChunkManager();
+ uassert(14806, "collection no longer sharded", manager);
+ }
+ }
+ }
+ }
+
+ void _delete( Request& r , DbMessage& d, ChunkManagerPtr manager ) {
+
+ int flags = d.pullInt();
+ bool justOne = flags & 1;
+
+ uassert( 10203 , "bad delete message" , d.moreJSObjs() );
+ BSONObj pattern = d.nextJsObj();
+ uassert( 13505 , "$atomic not supported sharded" , pattern["$atomic"].eoo() );
+
+ set<Shard> shards;
+ int left = 5;
+
+ while ( true ) {
+ try {
+ manager->getShardsForQuery( shards , pattern );
+ LOG(2) << "delete : " << pattern << " \t " << shards.size() << " justOne: " << justOne << endl;
+ if ( shards.size() == 1 ) {
+ doWrite( dbDelete , r , *shards.begin() );
+ return;
+ }
+ break;
+ }
+ catch ( StaleConfigException& e ) {
+ if ( left <= 0 )
+ throw e;
+ left--;
+ log() << "delete failed b/c of StaleConfigException, retrying "
+ << " left:" << left << " ns: " << r.getns() << " patt: " << pattern << endl;
+ r.reset();
+ shards.clear();
+ manager = r.getChunkManager();
+ uassert(14805, "collection no longer sharded", manager);
+ }
+ }
+
+ if ( justOne && ! pattern.hasField( "_id" ) )
+ throw UserException( 8015 , "can only delete with a non-shard key pattern if can delete as many as we find" );
+
+ for ( set<Shard>::iterator i=shards.begin(); i!=shards.end(); i++) {
+ int * x = (int*)(r.d().afterNS());
+ x[0] |= RemoveOption_Broadcast;
+ doWrite( dbDelete , r , *i , false );
+ }
+ }
+
+ virtual void writeOp( int op , Request& r ) {
+
+ // TODO: Handle stale config exceptions here from coll being dropped or sharded during op
+ // for now has same semantics as legacy request
+ ChunkManagerPtr info = r.getChunkManager();
+
+ if( ! info ){
+ SINGLE->writeOp( op, r );
+ return;
+ }
+ else{
+ const char *ns = r.getns();
+ LOG(3) << "write: " << ns << endl;
+
+ DbMessage& d = r.d();
+
+ if ( op == dbInsert ) {
+ _insert( r , d , info );
+ }
+ else if ( op == dbUpdate ) {
+ _update( r , d , info );
+ }
+ else if ( op == dbDelete ) {
+ _delete( r , d , info );
+ }
+ else {
+ log() << "sharding can't do write op: " << op << endl;
+ throw UserException( 8016 , "can't do this write op on sharded collection" );
+ }
+ return;
+ }
+ }
+
+ };
+
+ Strategy * SHARDED = new ShardStrategy();
+}
diff --git a/src/mongo/s/strategy_single.cpp b/src/mongo/s/strategy_single.cpp
new file mode 100644
index 00000000000..d3cd958b6b1
--- /dev/null
+++ b/src/mongo/s/strategy_single.cpp
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+// strategy_simple.cpp
+
+#include "pch.h"
+#include "request.h"
+#include "cursors.h"
+#include "../client/connpool.h"
+#include "../db/commands.h"
+
+namespace mongo {
+
+ class SingleStrategy : public Strategy {
+
+ public:
+ SingleStrategy() {
+ _commandsSafeToPass.insert( "$eval" );
+ _commandsSafeToPass.insert( "create" );
+ }
+
+ private:
+ virtual void queryOp( Request& r ) {
+ QueryMessage q( r.d() );
+
+ LOG(3) << "single query: " << q.ns << " " << q.query << " ntoreturn: " << q.ntoreturn << " options : " << q.queryOptions << endl;
+
+ if ( r.isCommand() ) {
+
+ if ( handleSpecialNamespaces( r , q ) )
+ return;
+
+ int loops = 5;
+ while ( true ) {
+ BSONObjBuilder builder;
+ try {
+ BSONObj cmdObj = q.query;
+ {
+ BSONElement e = cmdObj.firstElement();
+ if ( e.type() == Object && (e.fieldName()[0] == '$'
+ ? str::equals("query", e.fieldName()+1)
+ : str::equals("query", e.fieldName())))
+ cmdObj = e.embeddedObject();
+ }
+ bool ok = Command::runAgainstRegistered(q.ns, cmdObj, builder, q.queryOptions);
+ if ( ok ) {
+ BSONObj x = builder.done();
+ replyToQuery(0, r.p(), r.m(), x);
+ return;
+ }
+ break;
+ }
+ catch ( StaleConfigException& e ) {
+ if ( loops <= 0 )
+ throw e;
+
+ loops--;
+ log() << "retrying command: " << q.query << endl;
+ ShardConnection::checkMyConnectionVersions( e.getns() );
+ if( loops < 4 ) versionManager.forceRemoteCheckShardVersionCB( e.getns() );
+ }
+ catch ( AssertionException& e ) {
+ e.getInfo().append( builder , "assertion" , "assertionCode" );
+ builder.append( "errmsg" , "db assertion failure" );
+ builder.append( "ok" , 0 );
+ BSONObj x = builder.done();
+ replyToQuery(0, r.p(), r.m(), x);
+ return;
+ }
+ }
+
+ string commandName = q.query.firstElementFieldName();
+
+ uassert(13390, "unrecognized command: " + commandName, _commandsSafeToPass.count(commandName) != 0);
+ }
+
+ doQuery( r , r.primaryShard() );
+ }
+
+ virtual void getMore( Request& r ) {
+ const char *ns = r.getns();
+
+ LOG(3) << "single getmore: " << ns << endl;
+
+ long long id = r.d().getInt64( 4 );
+
+ // we used ScopedDbConnection because we don't get about config versions
+ // not deleting data is handled elsewhere
+ // and we don't want to call setShardVersion
+ ScopedDbConnection conn( cursorCache.getRef( id ) );
+
+ Message response;
+ bool ok = conn->callRead( r.m() , response);
+ uassert( 10204 , "dbgrid: getmore: error calling db", ok);
+ r.reply( response , "" /*conn->getServerAddress() */ );
+
+ conn.done();
+
+ }
+
+ void handleIndexWrite( int op , Request& r ) {
+
+ DbMessage& d = r.d();
+
+ if ( op == dbInsert ) {
+ while( d.moreJSObjs() ) {
+ BSONObj o = d.nextJsObj();
+ const char * ns = o["ns"].valuestr();
+ if ( r.getConfig()->isSharded( ns ) ) {
+ BSONObj newIndexKey = o["key"].embeddedObjectUserCheck();
+
+ uassert( 10205 , (string)"can't use unique indexes with sharding ns:" + ns +
+ " key: " + o["key"].embeddedObjectUserCheck().toString() ,
+ IndexDetails::isIdIndexPattern( newIndexKey ) ||
+ ! o["unique"].trueValue() ||
+ r.getConfig()->getChunkManager( ns )->getShardKey().isPrefixOf( newIndexKey ) );
+
+ ChunkManagerPtr cm = r.getConfig()->getChunkManager( ns );
+ assert( cm );
+
+ set<Shard> shards;
+ cm->getAllShards(shards);
+ for (set<Shard>::const_iterator it=shards.begin(), end=shards.end(); it != end; ++it)
+ doWrite( op , r , *it );
+ }
+ else {
+ doWrite( op , r , r.primaryShard() );
+ }
+ r.gotInsert();
+ }
+ }
+ else if ( op == dbUpdate ) {
+ throw UserException( 8050 , "can't update system.indexes" );
+ }
+ else if ( op == dbDelete ) {
+ // TODO
+ throw UserException( 8051 , "can't delete indexes on sharded collection yet" );
+ }
+ else {
+ log() << "handleIndexWrite invalid write op: " << op << endl;
+ throw UserException( 8052 , "handleIndexWrite invalid write op" );
+ }
+
+ }
+
+ virtual void writeOp( int op , Request& r ) {
+ const char *ns = r.getns();
+
+ if ( r.isShardingEnabled() &&
+ strstr( ns , ".system.indexes" ) == strchr( ns , '.' ) &&
+ strchr( ns , '.' ) ) {
+ LOG(1) << " .system.indexes write for: " << ns << endl;
+ handleIndexWrite( op , r );
+ return;
+ }
+
+ LOG(3) << "single write: " << ns << endl;
+ doWrite( op , r , r.primaryShard() );
+ r.gotInsert(); // Won't handle mulit-insert correctly. Not worth parsing the request.
+ }
+
+ bool handleSpecialNamespaces( Request& r , QueryMessage& q ) {
+ const char * ns = r.getns();
+ ns = strstr( r.getns() , ".$cmd.sys." );
+ if ( ! ns )
+ return false;
+ ns += 10;
+
+ r.checkAuth( Auth::WRITE );
+
+ BSONObjBuilder b;
+ vector<Shard> shards;
+
+ if ( strcmp( ns , "inprog" ) == 0 ) {
+ Shard::getAllShards( shards );
+
+ BSONArrayBuilder arr( b.subarrayStart( "inprog" ) );
+
+ for ( unsigned i=0; i<shards.size(); i++ ) {
+ Shard shard = shards[i];
+ ScopedDbConnection conn( shard );
+ BSONObj temp = conn->findOne( r.getns() , BSONObj() );
+ if ( temp["inprog"].isABSONObj() ) {
+ BSONObjIterator i( temp["inprog"].Obj() );
+ while ( i.more() ) {
+ BSONObjBuilder x;
+
+ BSONObjIterator j( i.next().Obj() );
+ while( j.more() ) {
+ BSONElement e = j.next();
+ if ( str::equals( e.fieldName() , "opid" ) ) {
+ stringstream ss;
+ ss << shard.getName() << ':' << e.numberInt();
+ x.append( "opid" , ss.str() );
+ }
+ else if ( str::equals( e.fieldName() , "client" ) ) {
+ x.appendAs( e , "client_s" );
+ }
+ else {
+ x.append( e );
+ }
+ }
+ arr.append( x.obj() );
+ }
+ }
+ conn.done();
+ }
+
+ arr.done();
+ }
+ else if ( strcmp( ns , "killop" ) == 0 ) {
+ BSONElement e = q.query["op"];
+ if ( strstr( r.getns() , "admin." ) == 0 ) {
+ b.append( "err" , "unauthorized" );
+ }
+ else if ( e.type() != String ) {
+ b.append( "err" , "bad op" );
+ b.append( e );
+ }
+ else {
+ b.append( e );
+ string s = e.String();
+ string::size_type i = s.find( ':' );
+ if ( i == string::npos ) {
+ b.append( "err" , "bad opid" );
+ }
+ else {
+ string shard = s.substr( 0 , i );
+ int opid = atoi( s.substr( i + 1 ).c_str() );
+ b.append( "shard" , shard );
+ b.append( "shardid" , opid );
+
+ log() << "want to kill op: " << e << endl;
+ Shard s(shard);
+
+ ScopedDbConnection conn( s );
+ conn->findOne( r.getns() , BSON( "op" << opid ) );
+ conn.done();
+ }
+ }
+ }
+ else if ( strcmp( ns , "unlock" ) == 0 ) {
+ b.append( "err" , "can't do unlock through mongos" );
+ }
+ else {
+ log( LL_WARNING ) << "unknown sys command [" << ns << "]" << endl;
+ return false;
+ }
+
+ BSONObj x = b.done();
+ replyToQuery(0, r.p(), r.m(), x);
+ return true;
+ }
+
+ set<string> _commandsSafeToPass;
+ };
+
+ Strategy * SINGLE = new SingleStrategy();
+}
diff --git a/src/mongo/s/util.h b/src/mongo/s/util.h
new file mode 100644
index 00000000000..cce2131ca55
--- /dev/null
+++ b/src/mongo/s/util.h
@@ -0,0 +1,183 @@
+// util.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+#include "../client/dbclient.h"
+#include "../db/jsobj.h"
+
+/**
+ some generic sharding utils that can be used in mongod or mongos
+ */
+
+namespace mongo {
+
+ struct ShardChunkVersion {
+ union {
+ struct {
+ int _minor;
+ int _major;
+ };
+ unsigned long long _combined;
+ };
+
+ ShardChunkVersion( int major=0, int minor=0 )
+ : _minor(minor),_major(major) {
+ }
+
+ ShardChunkVersion( unsigned long long ll )
+ : _combined( ll ) {
+ }
+
+ ShardChunkVersion( const BSONElement& e ) {
+ if ( e.type() == Date || e.type() == Timestamp ) {
+ _combined = e._numberLong();
+ }
+ else if ( e.eoo() ) {
+ _combined = 0;
+ }
+ else {
+ _combined = 0;
+ log() << "ShardChunkVersion can't handle type (" << (int)(e.type()) << ") " << e << endl;
+ assert(0);
+ }
+ }
+
+ void inc( bool major ) {
+ if ( major )
+ incMajor();
+ else
+ incMinor();
+ }
+
+ void incMajor() {
+ _major++;
+ _minor = 0;
+ }
+
+ void incMinor() {
+ _minor++;
+ }
+
+ unsigned long long toLong() const {
+ return _combined;
+ }
+
+ bool isSet() const {
+ return _combined > 0;
+ }
+
+ string toString() const {
+ stringstream ss;
+ ss << _major << "|" << _minor;
+ return ss.str();
+ }
+
+ int majorVersion() const { return _major; }
+ int minorVersion() const { return _minor; }
+
+ operator unsigned long long() const { return _combined; }
+
+ ShardChunkVersion& operator=( const BSONElement& elem ) {
+ switch ( elem.type() ) {
+ case Timestamp:
+ case NumberLong:
+ case Date:
+ _combined = elem._numberLong();
+ break;
+ case EOO:
+ _combined = 0;
+ break;
+ default:
+ massert( 13657 , str::stream() << "unknown type for ShardChunkVersion: " << elem , 0 );
+ }
+ return *this;
+ }
+ };
+
+ inline ostream& operator<<( ostream &s , const ShardChunkVersion& v) {
+ s << v._major << "|" << v._minor;
+ return s;
+ }
+
+ /**
+ * your config info for a given shard/chunk is out of date
+ */
+ class StaleConfigException : public AssertionException {
+ public:
+ StaleConfigException( const string& ns , const string& raw , int code, bool justConnection = false )
+ : AssertionException( (string)"ns: " + ns + " " + raw , code ) ,
+ _justConnection(justConnection) ,
+ _ns(ns) {
+ }
+
+ virtual ~StaleConfigException() throw() {}
+
+ virtual void appendPrefix( stringstream& ss ) const { ss << "stale sharding config exception: "; }
+
+ bool justConnection() const { return _justConnection; }
+
+ string getns() const { return _ns; }
+
+ static bool parse( const string& big , string& ns , string& raw ) {
+ string::size_type start = big.find( '[' );
+ if ( start == string::npos )
+ return false;
+ string::size_type end = big.find( ']' ,start );
+ if ( end == string::npos )
+ return false;
+
+ ns = big.substr( start + 1 , ( end - start ) - 1 );
+ raw = big.substr( end + 1 );
+ return true;
+ }
+ private:
+ bool _justConnection;
+ string _ns;
+ };
+
+ class SendStaleConfigException : public StaleConfigException {
+ public:
+ SendStaleConfigException( const string& ns , const string& raw , bool justConnection = false )
+ : StaleConfigException( ns, raw + "(send)", SendStaleConfigCode, justConnection ) {}
+ };
+
+ class RecvStaleConfigException : public StaleConfigException {
+ public:
+ RecvStaleConfigException( const string& ns , const string& raw , bool justConnection = false )
+ : StaleConfigException( ns, raw + "(recv)", RecvStaleConfigCode, justConnection ) {}
+ };
+
+ class ShardConnection;
+ class VersionManager {
+ public:
+ VersionManager(){};
+
+ bool isVersionableCB( DBClientBase* );
+ bool initShardVersionCB( DBClientBase*, BSONObj& );
+ bool forceRemoteCheckShardVersionCB( const string& );
+ bool checkShardVersionCB( DBClientBase*, const string&, bool, int );
+ bool checkShardVersionCB( ShardConnection*, bool, int );
+ void resetShardVersionCB( DBClientBase* );
+
+ };
+
+ extern VersionManager versionManager;
+
+}
diff --git a/src/mongo/s/writeback_listener.cpp b/src/mongo/s/writeback_listener.cpp
new file mode 100644
index 00000000000..ebdefb05785
--- /dev/null
+++ b/src/mongo/s/writeback_listener.cpp
@@ -0,0 +1,285 @@
+// @file writeback_listener.cpp
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include "../util/timer.h"
+
+#include "config.h"
+#include "grid.h"
+#include "request.h"
+#include "server.h"
+#include "shard.h"
+#include "util.h"
+#include "client.h"
+
+#include "writeback_listener.h"
+
+namespace mongo {
+
+ map<string,WriteBackListener*> WriteBackListener::_cache;
+ set<string> WriteBackListener::_seenSets;
+ mongo::mutex WriteBackListener::_cacheLock("WriteBackListener");
+
+ map<WriteBackListener::ConnectionIdent,WriteBackListener::WBStatus> WriteBackListener::_seenWritebacks;
+ mongo::mutex WriteBackListener::_seenWritebacksLock("WriteBackListener::seen");
+
+ WriteBackListener::WriteBackListener( const string& addr ) : _addr( addr ) {
+ _name = str::stream() << "WriteBackListener-" << addr;
+ log() << "creating WriteBackListener for: " << addr << " serverID: " << serverID << endl;
+ }
+
+ /* static */
+ void WriteBackListener::init( DBClientBase& conn ) {
+
+ if ( conn.type() == ConnectionString::SYNC ) {
+ // don't want write back listeners for config servers
+ return;
+ }
+
+ if ( conn.type() != ConnectionString::SET ) {
+ init( conn.getServerAddress() );
+ return;
+ }
+
+
+ {
+ scoped_lock lk( _cacheLock );
+ if ( _seenSets.count( conn.getServerAddress() ) )
+ return;
+ }
+
+ // we want to do writebacks on all rs nodes
+ string errmsg;
+ ConnectionString cs = ConnectionString::parse( conn.getServerAddress() , errmsg );
+ uassert( 13641 , str::stream() << "can't parse host [" << conn.getServerAddress() << "]" , cs.isValid() );
+
+ vector<HostAndPort> hosts = cs.getServers();
+
+ for ( unsigned i=0; i<hosts.size(); i++ )
+ init( hosts[i].toString() );
+
+ }
+
+ /* static */
+ void WriteBackListener::init( const string& host ) {
+ scoped_lock lk( _cacheLock );
+ WriteBackListener*& l = _cache[host];
+ if ( l )
+ return;
+ l = new WriteBackListener( host );
+ l->go();
+ }
+
+ /* static */
+ BSONObj WriteBackListener::waitFor( const ConnectionIdent& ident, const OID& oid ) {
+ Timer t;
+ for ( int i=0; i<10000; i++ ) {
+ {
+ scoped_lock lk( _seenWritebacksLock );
+ WBStatus s = _seenWritebacks[ident];
+ if ( oid < s.id ) {
+ // this means we're waiting for a GLE that already passed.
+ // it should be impossible because once we call GLE, no other
+ // writebacks should happen with that connection id
+
+ msgasserted( 14041 , str::stream() << "got writeback waitfor for older id " <<
+ " oid: " << oid << " s.id: " << s.id << " ident: " << ident.toString() );
+ }
+ else if ( oid == s.id ) {
+ return s.gle;
+ }
+
+ }
+ sleepmillis( 10 );
+ }
+ uasserted( 13403 , str::stream() << "didn't get writeback for: " << oid << " after: " << t.millis() << " ms" );
+ throw 1; // never gets here
+ }
+
+ void WriteBackListener::run() {
+ int secsToSleep = 0;
+ while ( ! inShutdown() ) {
+
+ if ( ! Shard::isAShardNode( _addr ) ) {
+ LOG(1) << _addr << " is not a shard node" << endl;
+ sleepsecs( 60 );
+ continue;
+ }
+
+ try {
+ ScopedDbConnection conn( _addr );
+
+ BSONObj result;
+
+ {
+ BSONObjBuilder cmd;
+ cmd.appendOID( "writebacklisten" , &serverID ); // Command will block for data
+ if ( ! conn->runCommand( "admin" , cmd.obj() , result ) ) {
+ result = result.getOwned();
+ log() << "writebacklisten command failed! " << result << endl;
+ conn.done();
+ continue;
+ }
+
+ }
+
+ LOG(1) << "writebacklisten result: " << result << endl;
+
+ BSONObj data = result.getObjectField( "data" );
+ if ( data.getBoolField( "writeBack" ) ) {
+ string ns = data["ns"].valuestrsafe();
+
+ ConnectionIdent cid( "" , 0 );
+ OID wid;
+ if ( data["connectionId"].isNumber() && data["id"].type() == jstOID ) {
+ string s = "";
+ if ( data["instanceIdent"].type() == String )
+ s = data["instanceIdent"].String();
+ cid = ConnectionIdent( s , data["connectionId"].numberLong() );
+ wid = data["id"].OID();
+ }
+ else {
+ warning() << "mongos/mongod version mismatch (1.7.5 is the split)" << endl;
+ }
+
+ int len; // not used, but needed for next call
+ Message m( (void*)data["msg"].binData( len ) , false );
+ massert( 10427 , "invalid writeback message" , m.header()->valid() );
+
+ DBConfigPtr db = grid.getDBConfig( ns );
+ ShardChunkVersion needVersion( data["version"] );
+
+ // TODO: The logic here could be refactored, but keeping to the original codepath for safety for now
+ ChunkManagerPtr manager = db->getChunkManagerIfExists( ns );
+
+ LOG(1) << "connectionId: " << cid << " writebackId: " << wid << " needVersion : " << needVersion.toString()
+ << " mine : " << ( manager ? manager->getVersion().toString() : "(unknown)" )
+ << endl;
+
+ LOG(1) << m.toString() << endl;
+
+ if ( needVersion.isSet() && manager && needVersion <= manager->getVersion() ) {
+ // this means when the write went originally, the version was old
+ // if we're here, it means we've already updated the config, so don't need to do again
+ //db->getChunkManager( ns , true ); // SERVER-1349
+ }
+ else {
+ // we received a writeback object that was sent to a previous version of a shard
+ // the actual shard may not have the object the writeback operation is for
+ // we need to reload the chunk manager and get the new shard versions
+ manager = db->getChunkManager( ns , true );
+ }
+
+ // do request and then call getLastError
+ // we have to call getLastError so we can return the right fields to the user if they decide to call getLastError
+
+ BSONObj gle;
+ int attempts = 0;
+ while ( true ) {
+ attempts++;
+
+ try {
+
+ Request r( m , 0 );
+ r.init();
+
+ r.d().reservedField() |= DbMessage::Reserved_FromWriteback;
+
+ ClientInfo * ci = r.getClientInfo();
+ if (!noauth) {
+ ci->getAuthenticationInfo()->authorize("admin", internalSecurity.user);
+ }
+ ci->noAutoSplit();
+
+ r.process();
+
+ ci->newRequest(); // this so we flip prev and cur shards
+
+ BSONObjBuilder b;
+ if ( ! ci->getLastError( BSON( "getLastError" << 1 ) , b , true ) ) {
+ b.appendBool( "commandFailed" , true );
+ }
+ gle = b.obj();
+
+ if ( gle["code"].numberInt() == 9517 ) {
+ log() << "writeback failed because of stale config, retrying attempts: " << attempts << endl;
+ if( ! db->getChunkManagerIfExists( ns , true, attempts > 2 ) ){
+ uassert( 15884, str::stream() << "Could not reload chunk manager after " << attempts << " attempts.", attempts <= 4 );
+ sleepsecs( attempts - 1 );
+ }
+ continue;
+ }
+
+ ci->clearSinceLastGetError();
+ }
+ catch ( DBException& e ) {
+ error() << "error processing writeback: " << e << endl;
+ BSONObjBuilder b;
+ b.append( "err" , e.toString() );
+ e.getInfo().append( b );
+ gle = b.obj();
+ }
+
+ break;
+ }
+
+ {
+ scoped_lock lk( _seenWritebacksLock );
+ WBStatus& s = _seenWritebacks[cid];
+ s.id = wid;
+ s.gle = gle;
+ }
+ }
+ else if ( result["noop"].trueValue() ) {
+ // no-op
+ }
+ else {
+ log() << "unknown writeBack result: " << result << endl;
+ }
+
+ conn.done();
+ secsToSleep = 0;
+ continue;
+ }
+ catch ( std::exception& e ) {
+
+ if ( inShutdown() ) {
+ // we're shutting down, so just clean up
+ return;
+ }
+
+ log() << "WriteBackListener exception : " << e.what() << endl;
+
+ // It's possible this shard was removed
+ Shard::reloadShardInfo();
+ }
+ catch ( ... ) {
+ log() << "WriteBackListener uncaught exception!" << endl;
+ }
+ secsToSleep++;
+ sleepsecs(secsToSleep);
+ if ( secsToSleep > 10 )
+ secsToSleep = 0;
+ }
+
+ log() << "WriteBackListener exiting : address no longer in cluster " << _addr;
+
+ }
+
+} // namespace mongo
diff --git a/src/mongo/s/writeback_listener.h b/src/mongo/s/writeback_listener.h
new file mode 100644
index 00000000000..1ef33dab1ec
--- /dev/null
+++ b/src/mongo/s/writeback_listener.h
@@ -0,0 +1,89 @@
+// @file writeback_listener.h
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+
+#include "../client/connpool.h"
+#include "../util/background.h"
+#include "../db/client.h"
+
+namespace mongo {
+
+ /*
+ * The writeback listener takes back write attempts that were made against a wrong shard.
+ * (Wrong here in the sense that the target chunk moved before this mongos had a chance to
+ * learn so.) It is responsible for reapplying these writes to the correct shard.
+ *
+ * Runs (instantiated) on mongos.
+ * Currently, there is one writebacklistener per shard.
+ */
+ class WriteBackListener : public BackgroundJob {
+ public:
+
+ class ConnectionIdent {
+ public:
+ ConnectionIdent( const string& ii , ConnectionId id )
+ : instanceIdent( ii ) , connectionId( id ) {
+ }
+
+ bool operator<(const ConnectionIdent& other) const {
+ if ( instanceIdent == other.instanceIdent )
+ return connectionId < other.connectionId;
+
+ return instanceIdent < other.instanceIdent;
+ }
+
+ string toString() const { return str::stream() << instanceIdent << ":" << connectionId; }
+
+ string instanceIdent;
+ ConnectionId connectionId;
+ };
+
+ static void init( DBClientBase& conn );
+ static void init( const string& host );
+
+ static BSONObj waitFor( const ConnectionIdent& ident, const OID& oid );
+
+ protected:
+ WriteBackListener( const string& addr );
+
+ string name() const { return _name; }
+ void run();
+
+ private:
+ string _addr;
+ string _name;
+
+ static mongo::mutex _cacheLock; // protects _cache
+ static map<string,WriteBackListener*> _cache; // server to listener
+ static set<string> _seenSets; // cache of set urls we've seen - note this is ever expanding for order, case, changes
+
+ struct WBStatus {
+ OID id;
+ BSONObj gle;
+ };
+
+ static mongo::mutex _seenWritebacksLock; // protects _seenWritbacks
+ static map<ConnectionIdent,WBStatus> _seenWritebacks; // connectionId -> last write back GLE
+ };
+
+ void waitForWriteback( const OID& oid );
+
+} // namespace mongo
diff --git a/src/mongo/scripting/bench.cpp b/src/mongo/scripting/bench.cpp
new file mode 100644
index 00000000000..01291b1e1f0
--- /dev/null
+++ b/src/mongo/scripting/bench.cpp
@@ -0,0 +1,785 @@
+/** @file bench.cpp */
+
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "engine.h"
+#include "../util/md5.hpp"
+#include "../util/version.h"
+#include "../client/dbclient.h"
+#include "../client/connpool.h"
+#include <pcrecpp.h>
+
+// ---------------------------------
+// ---- benchmarking system --------
+// ---------------------------------
+
+// TODO: Maybe extract as library to avoid code duplication?
+namespace {
+ inline pcrecpp::RE_Options flags2options(const char* flags) {
+ pcrecpp::RE_Options options;
+ options.set_utf8(true);
+ while ( flags && *flags ) {
+ if ( *flags == 'i' )
+ options.set_caseless(true);
+ else if ( *flags == 'm' )
+ options.set_multiline(true);
+ else if ( *flags == 'x' )
+ options.set_extended(true);
+ flags++;
+ }
+ return options;
+ }
+}
+
+namespace mongo {
+
+ struct BenchRunConfig {
+ BenchRunConfig() : _mutex( "BenchRunConfig" ) {
+ host = "localhost";
+ db = "test";
+ username = "";
+ password = "";
+
+ parallel = 1;
+ seconds = 1;
+ handleErrors = false;
+ hideErrors = false;
+ hideResults = true;
+
+ active = true;
+ threadsReady = 0;
+ error = false;
+ errCount = 0;
+ throwGLE = false;
+ breakOnTrap = true;
+ }
+
+ string host;
+ string db;
+ string username;
+ string password;
+
+ unsigned parallel;
+ double seconds;
+
+ bool hideResults;
+ bool handleErrors;
+ bool hideErrors;
+
+ shared_ptr< pcrecpp::RE > trapPattern;
+ shared_ptr< pcrecpp::RE > noTrapPattern;
+ shared_ptr< pcrecpp::RE > watchPattern;
+ shared_ptr< pcrecpp::RE > noWatchPattern;
+
+ BSONObj ops;
+
+ volatile bool active; // true at starts, gets set to false when should stop
+ AtomicUInt threadsReady;
+
+ bool error;
+ bool throwGLE;
+ bool breakOnTrap;
+
+ AtomicUInt threadsActive;
+
+ mongo::mutex _mutex;
+ long long errCount;
+ BSONArrayBuilder trapped;
+ };
+
+ static bool _hasSpecial( const BSONObj& obj ) {
+ BSONObjIterator i( obj );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( e.fieldName()[0] == '#' )
+ return true;
+
+ if ( ! e.isABSONObj() )
+ continue;
+
+ if ( _hasSpecial( e.Obj() ) )
+ return true;
+ }
+ return false;
+ }
+
+ static void _fixField( BSONObjBuilder& b , const BSONElement& e ) {
+ assert( e.type() == Object );
+
+ BSONObj sub = e.Obj();
+ assert( sub.nFields() == 1 );
+
+ BSONElement f = sub.firstElement();
+ if ( str::equals( "#RAND_INT" , f.fieldName() ) ) {
+ BSONObjIterator i( f.Obj() );
+ int min = i.next().numberInt();
+ int max = i.next().numberInt();
+
+ int x = min + ( rand() % ( max - min ) );
+ b.append( e.fieldName() , x );
+ }
+ else {
+ uasserted( 14811 , str::stream() << "invalid bench dynamic piece: " << f.fieldName() );
+ }
+
+ }
+
+ static void fixQuery( BSONObjBuilder& b , const BSONObj& obj ) {
+ BSONObjIterator i( obj );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+
+ if ( ! e.isABSONObj() ) {
+ b.append( e );
+ continue;
+ }
+
+ BSONObj sub = e.Obj();
+ if ( sub.firstElement().fieldName()[0] == '#' ) {
+ _fixField( b , e );
+ }
+ else {
+ BSONObjBuilder xx( e.type() == Object ? b.subobjStart( e.fieldName() ) : b.subarrayStart( e.fieldName() ) );
+ fixQuery( xx , sub );
+ xx.done();
+ }
+
+ }
+ }
+
+ static BSONObj fixQuery( const BSONObj& obj ) {
+
+ if ( ! _hasSpecial( obj ) )
+ return obj;
+
+ BSONObjBuilder b( obj.objsize() + 128 );
+ fixQuery( b , obj );
+ return b.obj();
+ }
+
+
+ static void _benchThread( BenchRunConfig * config, ScopedDbConnection& conn ){
+
+ long long count = 0;
+ while ( config->active ) {
+ BSONObjIterator i( config->ops );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+
+ string ns = e["ns"].String();
+ string op = e["op"].String();
+
+ int delay = e["delay"].eoo() ? 0 : e["delay"].Int();
+
+ auto_ptr<Scope> scope;
+ ScriptingFunction scopeFunc = 0;
+ BSONObj scopeObj;
+
+ if (config->username != "") {
+ string errmsg;
+ if (!conn.get()->auth(config->db, config->username, config->password, errmsg)) {
+ uasserted(15931, "Authenticating to connection for _benchThread failed: " + errmsg);
+ }
+ }
+
+ bool check = ! e["check"].eoo();
+ if( check ){
+ if ( e["check"].type() == CodeWScope || e["check"].type() == Code || e["check"].type() == String ) {
+ scope = globalScriptEngine->getPooledScope( ns );
+ assert( scope.get() );
+
+ if ( e.type() == CodeWScope ) {
+ scopeFunc = scope->createFunction( e["check"].codeWScopeCode() );
+ scopeObj = BSONObj( e.codeWScopeScopeData() );
+ }
+ else {
+ scopeFunc = scope->createFunction( e["check"].valuestr() );
+ }
+
+ scope->init( &scopeObj );
+ assert( scopeFunc );
+ }
+ else {
+ warning() << "Invalid check type detected in benchRun op : " << e << endl;
+ check = false;
+ }
+ }
+
+ try {
+ if ( op == "findOne" ) {
+
+ BSONObj result = conn->findOne( ns , fixQuery( e["query"].Obj() ) );
+
+ if( check ){
+ int err = scope->invoke( scopeFunc , 0 , &result, 1000 * 60 , false );
+ if( err ){
+ log() << "Error checking in benchRun thread [findOne]" << causedBy( scope->getError() ) << endl;
+ return;
+ }
+ }
+
+ if( ! config->hideResults || e["showResult"].trueValue() ) log() << "Result from benchRun thread [findOne] : " << result << endl;
+
+ }
+ else if ( op == "command" ) {
+
+ BSONObj result;
+ // TODO
+ /* bool ok = */ conn->runCommand( ns , fixQuery( e["command"].Obj() ), result, e["options"].numberInt() );
+
+ if( check ){
+ int err = scope->invoke( scopeFunc , 0 , &result, 1000 * 60 , false );
+ if( err ){
+ log() << "Error checking in benchRun thread [command]" << causedBy( scope->getError() ) << endl;
+ return;
+ }
+ }
+
+ if( ! config->hideResults || e["showResult"].trueValue() ) log() << "Result from benchRun thread [command] : " << result << endl;
+
+ }
+ else if( op == "find" || op == "query" ) {
+
+ int limit = e["limit"].eoo() ? 0 : e["limit"].numberInt();
+ int skip = e["skip"].eoo() ? 0 : e["skip"].Int();
+ int options = e["options"].eoo() ? 0 : e["options"].Int();
+ int batchSize = e["batchSize"].eoo() ? 0 : e["batchSize"].Int();
+ BSONObj filter = e["filter"].eoo() ? BSONObj() : e["filter"].Obj();
+
+ auto_ptr<DBClientCursor> cursor = conn->query( ns, fixQuery( e["query"].Obj() ), limit, skip, &filter, options, batchSize );
+
+ int count = cursor->itcount();
+
+ if( check ){
+ BSONObj thisValue = BSON( "count" << count );
+ int err = scope->invoke( scopeFunc , 0 , &thisValue, 1000 * 60 , false );
+ if( err ){
+ log() << "Error checking in benchRun thread [find]" << causedBy( scope->getError() ) << endl;
+ return;
+ }
+ }
+
+ if( ! config->hideResults || e["showResult"].trueValue() ) log() << "Result from benchRun thread [query] : " << count << endl;
+
+ }
+ else if( op == "update" ) {
+
+ bool multi = e["multi"].trueValue();
+ bool upsert = e["upsert"].trueValue();
+ BSONObj query = e["query"].eoo() ? BSONObj() : e["query"].Obj();
+ BSONObj update = e["update"].Obj();
+
+ conn->update( ns, fixQuery( query ), update, upsert , multi );
+
+ bool safe = e["safe"].trueValue();
+ if( safe ){
+ BSONObj result = conn->getLastErrorDetailed();
+
+ if( check ){
+ int err = scope->invoke( scopeFunc , 0 , &result, 1000 * 60 , false );
+ if( err ){
+ log() << "Error checking in benchRun thread [update]" << causedBy( scope->getError() ) << endl;
+ return;
+ }
+ }
+
+ if( ! config->hideResults || e["showResult"].trueValue() ) log() << "Result from benchRun thread [safe update] : " << result << endl;
+
+ if( ! result["err"].eoo() && result["err"].type() == String && ( config->throwGLE || e["throwGLE"].trueValue() ) )
+ throw DBException( (string)"From benchRun GLE" + causedBy( result["err"].String() ),
+ result["code"].eoo() ? 0 : result["code"].Int() );
+ }
+ }
+ else if( op == "insert" ) {
+
+ conn->insert( ns, fixQuery( e["doc"].Obj() ) );
+
+ bool safe = e["safe"].trueValue();
+ if( safe ){
+ BSONObj result = conn->getLastErrorDetailed();
+
+ if( check ){
+ int err = scope->invoke( scopeFunc , 0 , &result, 1000 * 60 , false );
+ if( err ){
+ log() << "Error checking in benchRun thread [insert]" << causedBy( scope->getError() ) << endl;
+ return;
+ }
+ }
+
+ if( ! config->hideResults || e["showResult"].trueValue() ) log() << "Result from benchRun thread [safe insert] : " << result << endl;
+
+ if( ! result["err"].eoo() && result["err"].type() == String && ( config->throwGLE || e["throwGLE"].trueValue() ) )
+ throw DBException( (string)"From benchRun GLE" + causedBy( result["err"].String() ),
+ result["code"].eoo() ? 0 : result["code"].Int() );
+ }
+ }
+ else if( op == "delete" || op == "remove" ) {
+
+ bool multi = e["multi"].eoo() ? true : e["multi"].trueValue();
+ BSONObj query = e["query"].eoo() ? BSONObj() : e["query"].Obj();
+
+ conn->remove( ns, fixQuery( query ), ! multi );
+
+ bool safe = e["safe"].trueValue();
+ if( safe ){
+ BSONObj result = conn->getLastErrorDetailed();
+
+ if( check ){
+ int err = scope->invoke( scopeFunc , 0 , &result, 1000 * 60 , false );
+ if( err ){
+ log() << "Error checking in benchRun thread [delete]" << causedBy( scope->getError() ) << endl;
+ return;
+ }
+ }
+
+ if( ! config->hideResults || e["showResult"].trueValue() ) log() << "Result from benchRun thread [safe remove] : " << result << endl;
+
+ if( ! result["err"].eoo() && result["err"].type() == String && ( config->throwGLE || e["throwGLE"].trueValue() ) )
+ throw DBException( (string)"From benchRun GLE " + causedBy( result["err"].String() ),
+ result["code"].eoo() ? 0 : result["code"].Int() );
+ }
+ }
+ else if ( op == "createIndex" ) {
+ conn->ensureIndex( ns , e["key"].Obj() , false , "" , false );
+ }
+ else if ( op == "dropIndex" ) {
+ conn->dropIndex( ns , e["key"].Obj() );
+ }
+ else {
+ log() << "don't understand op: " << op << endl;
+ config->error = true;
+ return;
+ }
+ }
+ catch( DBException& ex ){
+ if( ! config->hideErrors || e["showError"].trueValue() ){
+
+ bool yesWatch = ( config->watchPattern && config->watchPattern->FullMatch( ex.what() ) );
+ bool noWatch = ( config->noWatchPattern && config->noWatchPattern->FullMatch( ex.what() ) );
+
+ if( ( ! config->watchPattern && config->noWatchPattern && ! noWatch ) || // If we're just ignoring things
+ ( ! config->noWatchPattern && config->watchPattern && yesWatch ) || // If we're just watching things
+ ( config->watchPattern && config->noWatchPattern && yesWatch && ! noWatch ) )
+ log() << "Error in benchRun thread for op " << e << causedBy( ex ) << endl;
+ }
+
+ bool yesTrap = ( config->trapPattern && config->trapPattern->FullMatch( ex.what() ) );
+ bool noTrap = ( config->noTrapPattern && config->noTrapPattern->FullMatch( ex.what() ) );
+
+ if( ( ! config->trapPattern && config->noTrapPattern && ! noTrap ) ||
+ ( ! config->noTrapPattern && config->trapPattern && yesTrap ) ||
+ ( config->trapPattern && config->noTrapPattern && yesTrap && ! noTrap ) ){
+ {
+ scoped_lock lock( config->_mutex );
+ config->trapped.append( BSON( "error" << ex.what() << "op" << e << "count" << count ) );
+ }
+ if( config->breakOnTrap ) return;
+ }
+ if( ! config->handleErrors && ! e["handleError"].trueValue() ) return;
+
+ {
+ scoped_lock lock( config->_mutex );
+ config->errCount++;
+ }
+ }
+ catch( ... ){
+ if( ! config->hideErrors || e["showError"].trueValue() ) log() << "Error in benchRun thread caused by unknown error for op " << e << endl;
+ if( ! config->handleErrors && ! e["handleError"].trueValue() ) return;
+
+ {
+ scoped_lock lock( config->_mutex );
+ config->errCount++;
+ }
+ }
+
+ if ( ++count % 100 == 0 ) {
+ conn->getLastError();
+ }
+
+ sleepmillis( delay );
+
+ }
+ }
+ }
+
+ static void benchThread( BenchRunConfig * config ) {
+
+ ScopedDbConnection conn( config->host );
+ config->threadsReady++;
+ config->threadsActive++;
+
+ try {
+ if (config->username != "") {
+ string errmsg;
+ if (!conn.get()->auth(config->db, config->username, config->password, errmsg)) {
+ uasserted(15932, "Authenticating to connection for benchThread failed: " + errmsg);
+ }
+ }
+
+ _benchThread( config, conn );
+ }
+ catch( DBException& e ){
+ error() << "DBException not handled in benchRun thread" << causedBy( e ) << endl;
+ }
+ catch( std::exception& e ){
+ error() << "Exception not handled in benchRun thread" << causedBy( e ) << endl;
+ }
+ catch( ... ){
+ error() << "Exception not handled in benchRun thread." << endl;
+ }
+ conn->getLastError();
+ config->threadsActive--;
+ conn.done();
+
+ }
+
+
+ class BenchRunner {
+ public:
+
+ BenchRunner( ) {
+ }
+
+ ~BenchRunner() {
+ }
+
+ void init( BSONObj& args ){
+
+ oid.init();
+ activeRuns[ oid ] = this;
+
+ if ( args["host"].type() == String )
+ config.host = args["host"].String();
+ if ( args["db"].type() == String )
+ config.db = args["db"].String();
+ if ( args["username"].type() == String )
+ config.username = args["username"].String();
+ if ( args["password"].type() == String )
+ config.db = args["password"].String();
+
+ if ( args["parallel"].isNumber() )
+ config.parallel = args["parallel"].numberInt();
+ if ( args["seconds"].isNumber() )
+ config.seconds = args["seconds"].numberInt();
+ if ( ! args["hideResults"].eoo() )
+ config.hideResults = args["hideResults"].trueValue();
+ if ( ! args["handleErrors"].eoo() )
+ config.handleErrors = args["handleErrors"].trueValue();
+ if ( ! args["hideErrors"].eoo() )
+ config.hideErrors = args["hideErrors"].trueValue();
+ if ( ! args["throwGLE"].eoo() )
+ config.throwGLE = args["throwGLE"].trueValue();
+ if ( ! args["breakOnTrap"].eoo() )
+ config.breakOnTrap = args["breakOnTrap"].trueValue();
+
+
+ if ( ! args["trapPattern"].eoo() ){
+ const char* regex = args["trapPattern"].regex();
+ const char* flags = args["trapPattern"].regexFlags();
+ config.trapPattern = shared_ptr< pcrecpp::RE >( new pcrecpp::RE( regex, flags2options( flags ) ) );
+ }
+
+ if ( ! args["noTrapPattern"].eoo() ){
+ const char* regex = args["noTrapPattern"].regex();
+ const char* flags = args["noTrapPattern"].regexFlags();
+ config.noTrapPattern = shared_ptr< pcrecpp::RE >( new pcrecpp::RE( regex, flags2options( flags ) ) );
+ }
+
+ if ( ! args["watchPattern"].eoo() ){
+ const char* regex = args["watchPattern"].regex();
+ const char* flags = args["watchPattern"].regexFlags();
+ config.watchPattern = shared_ptr< pcrecpp::RE >( new pcrecpp::RE( regex, flags2options( flags ) ) );
+ }
+
+ if ( ! args["noWatchPattern"].eoo() ){
+ const char* regex = args["noWatchPattern"].regex();
+ const char* flags = args["noWatchPattern"].regexFlags();
+ config.noWatchPattern = shared_ptr< pcrecpp::RE >( new pcrecpp::RE( regex, flags2options( flags ) ) );
+ }
+
+ config.ops = args["ops"].Obj().getOwned();
+ conn = shared_ptr< ScopedDbConnection >( new ScopedDbConnection( config.host ) );
+
+ // Get initial stats
+ conn->get()->simpleCommand( "admin" , &before , "serverStatus" );
+
+ // Start threads
+ for ( unsigned i = 0; i < config.parallel; i++ )
+ threads.push_back( shared_ptr< boost::thread >( new boost::thread( boost::bind( benchThread , &config ) ) ) );
+
+ // Give them time to init
+ while ( config.threadsReady < config.parallel ) sleepmillis( 1 );
+
+ }
+
+ void done(){
+
+ log() << "Ending! (waiting for " << threads.size() << " threads)" << endl;
+
+ {
+ scoped_lock lock( config._mutex );
+ config.active = false;
+ }
+
+ for ( unsigned i = 0; i < threads.size(); i++ ) threads[i]->join();
+
+ // Get final stats
+ conn->get()->simpleCommand( "admin" , &after , "serverStatus" );
+ after.getOwned();
+
+ conn.get()->done();
+
+ activeRuns.erase( oid );
+
+ }
+
+ BSONObj status(){
+ scoped_lock lock( config._mutex );
+ return BSON( "errCount" << config.errCount <<
+ "trappedCount" << config.trapped.arrSize() <<
+ "threadsActive" << config.threadsActive.get() );
+ }
+
+ static BenchRunner* get( BSONObj args ){
+ BenchRunner* runner = new BenchRunner();
+ runner->init( args );
+ return runner;
+ }
+
+ static BenchRunner* get( OID oid ){
+ return activeRuns[ oid ];
+ }
+
+ static BSONObj finish( BenchRunner* runner ){
+
+ runner->done();
+
+ // vector<BSONOBj> errors = runner->config.errors;
+ bool error = runner->config.error;
+
+ if ( error )
+ return BSON( "err" << 1 );
+
+ // compute actual ops/sec
+ BSONObj before = runner->before["opcounters"].Obj();
+ BSONObj after = runner->after["opcounters"].Obj();
+
+ BSONObjBuilder buf;
+ buf.append( "note" , "values per second" );
+ buf.append( "errCount", (long long) runner->config.errCount );
+ buf.append( "trapped", runner->config.trapped.arr() );
+ {
+ BSONObjIterator i( after );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ double x = e.number();
+ x = x - before[e.fieldName()].number();
+ buf.append( e.fieldName() , x / runner->config.seconds );
+ }
+ }
+
+ BSONObj zoo = buf.obj();
+
+ delete runner;
+ return zoo;
+ }
+
+ static map< OID, BenchRunner* > activeRuns;
+
+ OID oid;
+ BenchRunConfig config;
+ vector< shared_ptr< boost::thread > > threads;
+
+ shared_ptr< ScopedDbConnection > conn;
+ BSONObj before;
+ BSONObj after;
+
+ };
+
+ map< OID, BenchRunner* > BenchRunner::activeRuns;
+
+
+ /**
+ * benchRun( { ops : [] , host : XXX , db : XXXX , parallel : 5 , seconds : 5 }
+ */
+ BSONObj benchRun( const BSONObj& argsFake, void* data ) {
+ assert( argsFake.firstElement().isABSONObj() );
+ BSONObj args = argsFake.firstElement().Obj();
+
+ // setup
+
+ BenchRunConfig config;
+
+ if ( args["host"].type() == String )
+ config.host = args["host"].String();
+ if ( args["db"].type() == String )
+ config.db = args["db"].String();
+ if ( args["username"].type() == String )
+ config.username = args["username"].String();
+ if ( args["password"].type() == String )
+ config.password = args["password"].String();
+
+ if ( args["parallel"].isNumber() )
+ config.parallel = args["parallel"].numberInt();
+ if ( args["seconds"].isNumber() )
+ config.seconds = args["seconds"].number();
+
+
+ config.ops = args["ops"].Obj();
+
+ // execute
+
+ ScopedDbConnection conn( config.host );
+
+ if (config.username != "") {
+ string errmsg;
+ if (!conn.get()->auth(config.db, config.username, config.password, errmsg)) {
+ uasserted(15930, "Authenticating to connection for bench run failed: " + errmsg);
+ }
+ }
+
+
+ // start threads
+ vector<boost::thread*> all;
+ for ( unsigned i=0; i<config.parallel; i++ )
+ all.push_back( new boost::thread( boost::bind( benchThread , &config ) ) );
+
+ // give them time to init
+ while ( config.threadsReady < config.parallel )
+ sleepmillis( 1 );
+
+ BSONObj before;
+ conn->simpleCommand( "admin" , &before , "serverStatus" );
+
+ sleepmillis( (int)(1000.0 * config.seconds) );
+
+ BSONObj after;
+ conn->simpleCommand( "admin" , &after , "serverStatus" );
+
+ conn.done();
+
+ config.active = false;
+
+ for ( unsigned i=0; i<all.size(); i++ )
+ all[i]->join();
+
+ if ( config.error )
+ return BSON( "err" << 1 );
+
+ // compute actual ops/sec
+
+ before = before["opcounters"].Obj().copy();
+ after = after["opcounters"].Obj().copy();
+
+ bool totals = args["totals"].trueValue();
+
+ BSONObjBuilder buf;
+ if ( ! totals )
+ buf.append( "note" , "values per second" );
+
+ {
+ BSONObjIterator i( after );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ double x = e.number();
+ x = x - before[e.fieldName()].number();
+ if ( ! totals )
+ x = x / config.seconds;
+ buf.append( e.fieldName() , x );
+ }
+ }
+ BSONObj zoo = buf.obj();
+ return BSON( "" << zoo );
+ }
+
+ /**
+ * benchRun( { ops : [] , host : XXX , db : XXXX , parallel : 5 , seconds : 5 }
+ */
+ BSONObj benchRunSync( const BSONObj& argsFake, void* data ) {
+
+ assert( argsFake.firstElement().isABSONObj() );
+ BSONObj args = argsFake.firstElement().Obj();
+
+ // Get new BenchRunner object
+ BenchRunner* runner = BenchRunner::get( args );
+
+ sleepsecs( static_cast<int>( runner->config.seconds ) );
+
+ return BenchRunner::finish( runner );
+
+ }
+
+ /**
+ * benchRun( { ops : [] , host : XXX , db : XXXX , parallel : 5 , seconds : 5 }
+ */
+ BSONObj benchStart( const BSONObj& argsFake, void* data ) {
+
+ assert( argsFake.firstElement().isABSONObj() );
+ BSONObj args = argsFake.firstElement().Obj();
+
+ // Get new BenchRunner object
+ BenchRunner* runner = BenchRunner::get( args );
+
+ log() << "Starting benchRun test " << runner->oid << endl;
+
+ return BSON( "" << runner->oid.toString() );
+ }
+
+ /**
+ * benchRun( { ops : [] , host : XXX , db : XXXX , parallel : 5 , seconds : 5 }
+ */
+ BSONObj benchStatus( const BSONObj& argsFake, void* data ) {
+
+ OID oid = OID( argsFake.firstElement().String() );
+
+ log() << "Getting status for benchRun test " << oid << endl;
+
+ // Get new BenchRunner object
+ BenchRunner* runner = BenchRunner::get( oid );
+
+ BSONObj statusObj = runner->status();
+
+ return BSON( "" << statusObj );
+ }
+
+ /**
+ * benchRun( { ops : [] , host : XXX , db : XXXX , parallel : 5 , seconds : 5 }
+ */
+ BSONObj benchFinish( const BSONObj& argsFake, void* data ) {
+
+ OID oid = OID( argsFake.firstElement().String() );
+
+ log() << "Finishing benchRun test " << oid << endl;
+
+ // Get new BenchRunner object
+ BenchRunner* runner = BenchRunner::get( oid );
+
+ BSONObj finalObj = BenchRunner::finish( runner );
+
+ return BSON( "" << finalObj );
+ }
+
+ void installBenchmarkSystem( Scope& scope ) {
+ scope.injectNative( "benchRun" , benchRun );
+ scope.injectNative( "benchRunSync" , benchRunSync );
+ scope.injectNative( "benchStart" , benchStart );
+ scope.injectNative( "benchStatus" , benchStatus );
+ scope.injectNative( "benchFinish" , benchFinish );
+ }
+
+}
diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp
new file mode 100644
index 00000000000..13fe681ebe5
--- /dev/null
+++ b/src/mongo/scripting/engine.cpp
@@ -0,0 +1,519 @@
+// engine.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "engine.h"
+#include "../util/file.h"
+#include "../client/dbclient.h"
+
+namespace mongo {
+
+ long long Scope::_lastVersion = 1;
+
+ int Scope::_numScopes = 0;
+
+ Scope::Scope() : _localDBName("") , _loadedVersion(0), _numTimeUsed(0) {
+ _numScopes++;
+ }
+
+ Scope::~Scope() {
+ _numScopes--;
+ }
+
+ ScriptEngine::ScriptEngine() : _scopeInitCallback() {
+ }
+
+ ScriptEngine::~ScriptEngine() {
+ }
+
+ void Scope::append( BSONObjBuilder & builder , const char * fieldName , const char * scopeName ) {
+ int t = type( scopeName );
+
+ switch ( t ) {
+ case Object:
+ builder.append( fieldName , getObject( scopeName ) );
+ break;
+ case Array:
+ builder.appendArray( fieldName , getObject( scopeName ) );
+ break;
+ case NumberDouble:
+ builder.append( fieldName , getNumber( scopeName ) );
+ break;
+ case NumberInt:
+ builder.append( fieldName , getNumberInt( scopeName ) );
+ break;
+ case NumberLong:
+ builder.append( fieldName , getNumberLongLong( scopeName ) );
+ break;
+ case String:
+ builder.append( fieldName , getString( scopeName ).c_str() );
+ break;
+ case Bool:
+ builder.appendBool( fieldName , getBoolean( scopeName ) );
+ break;
+ case jstNULL:
+ case Undefined:
+ builder.appendNull( fieldName );
+ break;
+ case Date:
+ // TODO: make signed
+ builder.appendDate( fieldName , Date_t((unsigned long long)getNumber( scopeName )) );
+ break;
+ case Code:
+ builder.appendCode( fieldName , getString( scopeName ) );
+ break;
+ default:
+ stringstream temp;
+ temp << "can't append type from:";
+ temp << t;
+ uassert( 10206 , temp.str() , 0 );
+ }
+
+ }
+
+ int Scope::invoke( const char* code , const BSONObj* args, const BSONObj* recv, int timeoutMs ) {
+ ScriptingFunction func = createFunction( code );
+ uassert( 10207 , "compile failed" , func );
+ return invoke( func , args, recv, timeoutMs );
+ }
+
+ bool Scope::execFile( const string& filename , bool printResult , bool reportError , bool assertOnError, int timeoutMs ) {
+
+ path p( filename );
+
+ if ( ! exists( p ) ) {
+ log() << "file [" << filename << "] doesn't exist" << endl;
+ if ( assertOnError )
+ assert( 0 );
+ return false;
+ }
+
+ // iterate directories and recurse using all *.js files in the directory
+ if ( is_directory( p ) ) {
+ directory_iterator end;
+ bool empty = true;
+ for (directory_iterator it (p); it != end; it++) {
+ empty = false;
+ path sub (*it);
+ if (!endsWith(sub.string().c_str(), ".js"))
+ continue;
+ if (!execFile(sub.string().c_str(), printResult, reportError, assertOnError, timeoutMs))
+ return false;
+ }
+
+ if (empty) {
+ log() << "directory [" << filename << "] doesn't have any *.js files" << endl;
+ if ( assertOnError )
+ assert( 0 );
+ return false;
+ }
+
+ return true;
+ }
+
+ File f;
+ f.open( filename.c_str() , true );
+
+ unsigned L;
+ {
+ fileofs fo = f.len();
+ assert( fo <= 0x7ffffffe );
+ L = (unsigned) fo;
+ }
+ boost::scoped_array<char> data (new char[L+1]);
+ data[L] = 0;
+ f.read( 0 , data.get() , L );
+
+ int offset = 0;
+ if (data[0] == '#' && data[1] == '!') {
+ const char* newline = strchr(data.get(), '\n');
+ if (! newline)
+ return true; // file of just shebang treated same as empty file
+ offset = newline - data.get();
+ }
+
+ StringData code (data.get() + offset, L - offset);
+
+ return exec( code , filename , printResult , reportError , assertOnError, timeoutMs );
+ }
+
+ void Scope::storedFuncMod() {
+ _lastVersion++;
+ }
+
+ void Scope::validateObjectIdString( const string &str ) {
+ massert( 10448 , "invalid object id: length", str.size() == 24 );
+
+ for ( string::size_type i=0; i<str.size(); i++ ) {
+ char c = str[i];
+ if ( ( c >= '0' && c <= '9' ) ||
+ ( c >= 'a' && c <= 'f' ) ||
+ ( c >= 'A' && c <= 'F' ) ) {
+ continue;
+ }
+ massert( 10430 , "invalid object id: not hex", false );
+ }
+ }
+
+ void Scope::loadStored( bool ignoreNotConnected ) {
+ if ( _localDBName.size() == 0 ) {
+ if ( ignoreNotConnected )
+ return;
+ uassert( 10208 , "need to have locallyConnected already" , _localDBName.size() );
+ }
+ if ( _loadedVersion == _lastVersion )
+ return;
+
+ _loadedVersion = _lastVersion;
+
+ string coll = _localDBName + ".system.js";
+
+ static DBClientBase * db = createDirectClient();
+ auto_ptr<DBClientCursor> c = db->query( coll , Query(), 0, 0, NULL, QueryOption_SlaveOk, 0 );
+ assert( c.get() );
+
+ set<string> thisTime;
+
+ while ( c->more() ) {
+ BSONObj o = c->nextSafe();
+
+ BSONElement n = o["_id"];
+ BSONElement v = o["value"];
+
+ uassert( 10209 , str::stream() << "name has to be a string: " << n , n.type() == String );
+ uassert( 10210 , "value has to be set" , v.type() != EOO );
+
+ setElement( n.valuestr() , v );
+
+ thisTime.insert( n.valuestr() );
+ _storedNames.insert( n.valuestr() );
+
+ }
+
+ // --- remove things from scope that were removed
+
+ list<string> toremove;
+
+ for ( set<string>::iterator i=_storedNames.begin(); i!=_storedNames.end(); i++ ) {
+ string n = *i;
+ if ( thisTime.count( n ) == 0 )
+ toremove.push_back( n );
+ }
+
+ for ( list<string>::iterator i=toremove.begin(); i!=toremove.end(); i++ ) {
+ string n = *i;
+ _storedNames.erase( n );
+ execSetup( (string)"delete " + n , "clean up scope" );
+ }
+
+ }
+
+ ScriptingFunction Scope::createFunction( const char * code ) {
+ if ( code[0] == '/' && code [1] == '*' ) {
+ code += 2;
+ while ( code[0] && code[1] ) {
+ if ( code[0] == '*' && code[1] == '/' ) {
+ code += 2;
+ break;
+ }
+ code++;
+ }
+ }
+ map<string,ScriptingFunction>::iterator i = _cachedFunctions.find( code );
+ if ( i != _cachedFunctions.end() )
+ return i->second;
+ ScriptingFunction f = _createFunction( code );
+ _cachedFunctions[code] = f;
+ return f;
+ }
+
+ namespace JSFiles {
+ extern const JSFile collection;
+ extern const JSFile db;
+ extern const JSFile mongo;
+ extern const JSFile mr;
+ extern const JSFile query;
+ extern const JSFile utils;
+ extern const JSFile utils_sh;
+ }
+
+ void Scope::execCoreFiles() {
+ // keeping same order as in SConstruct
+ execSetup(JSFiles::utils);
+ execSetup(JSFiles::utils_sh);
+ execSetup(JSFiles::db);
+ execSetup(JSFiles::mongo);
+ execSetup(JSFiles::mr);
+ execSetup(JSFiles::query);
+ execSetup(JSFiles::collection);
+ }
+
+ typedef map< string , list<Scope*> > PoolToScopes;
+
+ class ScopeCache {
+ public:
+
+ ScopeCache() : _mutex("ScopeCache") {
+ _magic = 17;
+ }
+
+ ~ScopeCache() {
+ assert( _magic == 17 );
+ _magic = 1;
+
+ if ( inShutdown() )
+ return;
+
+ clear();
+ }
+
+ void done( const string& pool , Scope * s ) {
+ scoped_lock lk( _mutex );
+ list<Scope*> & l = _pools[pool];
+ bool oom = s->hasOutOfMemoryException();
+
+ // do not keep too many contexts, or use them for too long
+ if ( l.size() > 10 || s->getTimeUsed() > 100 || oom ) {
+ delete s;
+ }
+ else {
+ l.push_back( s );
+ s->reset();
+ }
+
+ if (oom) {
+ // out of mem, make some room
+ log() << "Clearing all idle JS contexts due to out of memory" << endl;
+ clear();
+ }
+ }
+
+ Scope * get( const string& pool ) {
+ scoped_lock lk( _mutex );
+ list<Scope*> & l = _pools[pool];
+ if ( l.size() == 0 )
+ return 0;
+
+ Scope * s = l.back();
+ l.pop_back();
+ s->reset();
+ s->incTimeUsed();
+ return s;
+ }
+
+ void clear() {
+ set<Scope*> seen;
+
+ for ( PoolToScopes::iterator i=_pools.begin() ; i != _pools.end(); i++ ) {
+ for ( list<Scope*>::iterator j=i->second.begin(); j != i->second.end(); j++ ) {
+ Scope * s = *j;
+ assert( ! seen.count( s ) );
+ delete s;
+ seen.insert( s );
+ }
+ }
+
+ _pools.clear();
+ }
+
+ private:
+ PoolToScopes _pools;
+ mongo::mutex _mutex;
+ int _magic;
+ };
+
+ thread_specific_ptr<ScopeCache> scopeCache;
+
+ class PooledScope : public Scope {
+ public:
+ PooledScope( const string pool , Scope * real ) : _pool( pool ) , _real( real ) {
+ _real->loadStored( true );
+ };
+ virtual ~PooledScope() {
+ ScopeCache * sc = scopeCache.get();
+ if ( sc ) {
+ sc->done( _pool , _real );
+ _real = 0;
+ }
+ else {
+ // this means that the Scope was killed from a different thread
+ // for example a cursor got timed out that has a $where clause
+ log(3) << "warning: scopeCache is empty!" << endl;
+ delete _real;
+ _real = 0;
+ }
+ }
+
+ void reset() {
+ _real->reset();
+ }
+ void init( const BSONObj * data ) {
+ _real->init( data );
+ }
+
+ void localConnect( const char * dbName ) {
+ _real->localConnect( dbName );
+ }
+ void externalSetup() {
+ _real->externalSetup();
+ }
+
+ double getNumber( const char *field ) {
+ return _real->getNumber( field );
+ }
+ string getString( const char *field ) {
+ return _real->getString( field );
+ }
+ bool getBoolean( const char *field ) {
+ return _real->getBoolean( field );
+ }
+ BSONObj getObject( const char *field ) {
+ return _real->getObject( field );
+ }
+
+ int type( const char *field ) {
+ return _real->type( field );
+ }
+
+ void setElement( const char *field , const BSONElement& val ) {
+ _real->setElement( field , val );
+ }
+ void setNumber( const char *field , double val ) {
+ _real->setNumber( field , val );
+ }
+ void setString( const char *field , const char * val ) {
+ _real->setString( field , val );
+ }
+ void setObject( const char *field , const BSONObj& obj , bool readOnly=true ) {
+ _real->setObject( field , obj , readOnly );
+ }
+ void setBoolean( const char *field , bool val ) {
+ _real->setBoolean( field , val );
+ }
+// void setThis( const BSONObj * obj ) {
+// _real->setThis( obj );
+// }
+
+ void setFunction( const char *field , const char * code ) {
+ _real->setFunction(field, code);
+ }
+
+ ScriptingFunction createFunction( const char * code ) {
+ return _real->createFunction( code );
+ }
+
+ ScriptingFunction _createFunction( const char * code ) {
+ return _real->createFunction( code );
+ }
+
+ void rename( const char * from , const char * to ) {
+ _real->rename( from , to );
+ }
+
+ /**
+ * @return 0 on success
+ */
+ int invoke( ScriptingFunction func , const BSONObj* args, const BSONObj* recv, int timeoutMs , bool ignoreReturn, bool readOnlyArgs, bool readOnlyRecv ) {
+ return _real->invoke( func , args , recv, timeoutMs , ignoreReturn, readOnlyArgs, readOnlyRecv );
+ }
+
+ string getError() {
+ return _real->getError();
+ }
+
+ bool hasOutOfMemoryException() {
+ return _real->hasOutOfMemoryException();
+ }
+
+ bool exec( const StringData& code , const string& name , bool printResult , bool reportError , bool assertOnError, int timeoutMs = 0 ) {
+ return _real->exec( code , name , printResult , reportError , assertOnError , timeoutMs );
+ }
+ bool execFile( const string& filename , bool printResult , bool reportError , bool assertOnError, int timeoutMs = 0 ) {
+ return _real->execFile( filename , printResult , reportError , assertOnError , timeoutMs );
+ }
+
+ void injectNative( const char *field, NativeFunction func, void* data ) {
+ _real->injectNative( field , func, data );
+ }
+
+ void gc() {
+ _real->gc();
+ }
+
+ void append( BSONObjBuilder & builder , const char * fieldName , const char * scopeName ) {
+ _real->append(builder, fieldName, scopeName);
+ }
+
+ private:
+ string _pool;
+ Scope * _real;
+ };
+
+ auto_ptr<Scope> ScriptEngine::getPooledScope( const string& pool ) {
+ if ( ! scopeCache.get() ) {
+ scopeCache.reset( new ScopeCache() );
+ }
+
+ Scope * s = scopeCache->get( pool );
+ if ( ! s ) {
+ s = newScope();
+ }
+
+ auto_ptr<Scope> p;
+ p.reset( new PooledScope( pool , s ) );
+ return p;
+ }
+
+ void ScriptEngine::threadDone() {
+ ScopeCache * sc = scopeCache.get();
+ if ( sc ) {
+ sc->clear();
+ }
+ }
+
+ void ( *ScriptEngine::_connectCallback )( DBClientWithCommands & ) = 0;
+ const char * ( *ScriptEngine::_checkInterruptCallback )() = 0;
+ unsigned ( *ScriptEngine::_getInterruptSpecCallback )() = 0;
+
+ ScriptEngine * globalScriptEngine = 0;
+
+ bool hasJSReturn( const string& code ) {
+ size_t x = code.find( "return" );
+ if ( x == string::npos )
+ return false;
+
+ return
+ ( x == 0 || ! isalpha( code[x-1] ) ) &&
+ ! isalpha( code[x+6] );
+ }
+
+ const char * jsSkipWhiteSpace( const char * raw ) {
+ while ( raw[0] ) {
+ while (isspace(*raw)) {
+ raw++;
+ }
+
+ if ( raw[0] != '/' || raw[1] != '/' )
+ break;
+
+ while ( raw[0] && raw[0] != '\n' )
+ raw++;
+ }
+ return raw;
+ }
+}
+
diff --git a/src/mongo/scripting/engine.h b/src/mongo/scripting/engine.h
new file mode 100644
index 00000000000..f4b39740001
--- /dev/null
+++ b/src/mongo/scripting/engine.h
@@ -0,0 +1,235 @@
+// engine.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../pch.h"
+#include "../db/jsobj.h"
+
+namespace mongo {
+
+ struct JSFile {
+ const char* name;
+ const StringData& source;
+ };
+
+ typedef unsigned long long ScriptingFunction;
+ typedef BSONObj (*NativeFunction) ( const BSONObj &args, void* data );
+
+ class Scope : boost::noncopyable {
+ public:
+ Scope();
+ virtual ~Scope();
+
+ virtual void reset() = 0;
+ virtual void init( const BSONObj * data ) = 0;
+ void init( const char * data ) {
+ BSONObj o( data );
+ init( &o );
+ }
+
+ virtual void localConnect( const char * dbName ) = 0;
+ virtual void externalSetup() = 0;
+
+ class NoDBAccess {
+ Scope * _s;
+ public:
+ NoDBAccess( Scope * s ) {
+ _s = s;
+ }
+ ~NoDBAccess() {
+ _s->rename( "____db____" , "db" );
+ }
+ };
+ NoDBAccess disableDBAccess( const char * why ) {
+ rename( "db" , "____db____" );
+ return NoDBAccess( this );
+ }
+
+ virtual double getNumber( const char *field ) = 0;
+ virtual int getNumberInt( const char *field ) { return (int)getNumber( field ); }
+ virtual long long getNumberLongLong( const char *field ) { return (long long)getNumber( field ); }
+ virtual string getString( const char *field ) = 0;
+ virtual bool getBoolean( const char *field ) = 0;
+ virtual BSONObj getObject( const char *field ) = 0;
+
+ virtual int type( const char *field ) = 0;
+
+ virtual void append( BSONObjBuilder & builder , const char * fieldName , const char * scopeName );
+
+ virtual void setElement( const char *field , const BSONElement& e ) = 0;
+ virtual void setNumber( const char *field , double val ) = 0;
+ virtual void setString( const char *field , const char * val ) = 0;
+ virtual void setObject( const char *field , const BSONObj& obj , bool readOnly=true ) = 0;
+ virtual void setBoolean( const char *field , bool val ) = 0;
+ virtual void setFunction( const char *field , const char * code ) = 0;
+// virtual void setThis( const BSONObj * obj ) = 0;
+
+ virtual ScriptingFunction createFunction( const char * code );
+
+ virtual void rename( const char * from , const char * to ) = 0;
+ /**
+ * @return 0 on success
+ */
+ virtual int invoke( ScriptingFunction func , const BSONObj* args, const BSONObj* recv, int timeoutMs = 0 , bool ignoreReturn = false, bool readOnlyArgs = false, bool readOnlyRecv = false ) = 0;
+ void invokeSafe( ScriptingFunction func , const BSONObj* args, const BSONObj* recv, int timeoutMs = 0 , bool ignoreReturn = false, bool readOnlyArgs = false, bool readOnlyRecv = false ) {
+ int res = invoke( func , args , recv, timeoutMs, ignoreReturn, readOnlyArgs, readOnlyRecv );
+ if ( res == 0 )
+ return;
+ throw UserException( 9004 , (string)"invoke failed: " + getError() );
+ }
+ virtual string getError() = 0;
+ virtual bool hasOutOfMemoryException() = 0;
+
+ int invoke( const char* code , const BSONObj* args, const BSONObj* recv, int timeoutMs = 0 );
+ void invokeSafe( const char* code , const BSONObj* args, const BSONObj* recv, int timeoutMs = 0 ) {
+ if ( invoke( code , args , recv, timeoutMs ) == 0 )
+ return;
+ throw UserException( 9005 , (string)"invoke failed: " + getError() );
+ }
+
+ virtual bool exec( const StringData& code , const string& name , bool printResult , bool reportError , bool assertOnError, int timeoutMs = 0 ) = 0;
+ virtual void execSetup( const StringData& code , const string& name = "setup" ) {
+ exec( code , name , false , true , true , 0 );
+ }
+
+ void execSetup( const JSFile& file) {
+ execSetup(file.source, file.name);
+ }
+
+ void execCoreFiles();
+
+ virtual bool execFile( const string& filename , bool printResult , bool reportError , bool assertOnError, int timeoutMs = 0 );
+
+ virtual void injectNative( const char *field, NativeFunction func, void* data = 0 ) = 0;
+
+ virtual void gc() = 0;
+
+ void loadStored( bool ignoreNotConnected = false );
+
+ /**
+ if any changes are made to .system.js, call this
+ right now its just global - slightly inefficient, but a lot simpler
+ */
+ static void storedFuncMod();
+
+ static int getNumScopes() {
+ return _numScopes;
+ }
+
+ static void validateObjectIdString( const string &str );
+
+ /** increments the number of times a scope was used */
+ void incTimeUsed() { ++_numTimeUsed; }
+ /** gets the number of times a scope was used */
+ int getTimeUsed() { return _numTimeUsed; }
+
+ protected:
+
+ virtual ScriptingFunction _createFunction( const char * code ) = 0;
+
+ string _localDBName;
+ long long _loadedVersion;
+ set<string> _storedNames;
+ static long long _lastVersion;
+ map<string,ScriptingFunction> _cachedFunctions;
+ int _numTimeUsed;
+
+ static int _numScopes;
+ };
+
+ void installGlobalUtils( Scope& scope );
+
+ class DBClientWithCommands;
+
+ class ScriptEngine : boost::noncopyable {
+ public:
+ ScriptEngine();
+ virtual ~ScriptEngine();
+
+ virtual Scope * newScope() {
+ Scope *s = createScope();
+ if ( s && _scopeInitCallback )
+ _scopeInitCallback( *s );
+ installGlobalUtils( *s );
+ return s;
+ }
+
+ virtual void runTest() = 0;
+
+ virtual bool utf8Ok() const = 0;
+
+ static void setup();
+
+ /** gets a scope from the pool or a new one if pool is empty
+ * @param pool An identifier for the pool, usually the db name
+ * @return the scope */
+ auto_ptr<Scope> getPooledScope( const string& pool );
+
+ /** call this method to release some JS resources when a thread is done */
+ void threadDone();
+
+ struct Unlocker { virtual ~Unlocker() {} };
+ virtual auto_ptr<Unlocker> newThreadUnlocker() { return auto_ptr< Unlocker >( new Unlocker ); }
+
+ void setScopeInitCallback( void ( *func )( Scope & ) ) { _scopeInitCallback = func; }
+ static void setConnectCallback( void ( *func )( DBClientWithCommands& ) ) { _connectCallback = func; }
+ static void runConnectCallback( DBClientWithCommands &c ) {
+ if ( _connectCallback )
+ _connectCallback( c );
+ }
+
+ // engine implementation may either respond to interrupt events or
+ // poll for interrupts
+
+ // the interrupt functions must not wait indefinitely on a lock
+ virtual void interrupt( unsigned opSpec ) {}
+ virtual void interruptAll() {}
+
+ static void setGetInterruptSpecCallback( unsigned ( *func )() ) { _getInterruptSpecCallback = func; }
+ static bool haveGetInterruptSpecCallback() { return _getInterruptSpecCallback; }
+ static unsigned getInterruptSpec() {
+ massert( 13474, "no _getInterruptSpecCallback", _getInterruptSpecCallback );
+ return _getInterruptSpecCallback();
+ }
+
+ static void setCheckInterruptCallback( const char * ( *func )() ) { _checkInterruptCallback = func; }
+ static bool haveCheckInterruptCallback() { return _checkInterruptCallback; }
+ static const char * checkInterrupt() {
+ return _checkInterruptCallback ? _checkInterruptCallback() : "";
+ }
+ static bool interrupted() {
+ const char *r = checkInterrupt();
+ return r && r[ 0 ];
+ }
+
+ protected:
+ virtual Scope * createScope() = 0;
+
+ private:
+ void ( *_scopeInitCallback )( Scope & );
+ static void ( *_connectCallback )( DBClientWithCommands & );
+ static const char * ( *_checkInterruptCallback )();
+ static unsigned ( *_getInterruptSpecCallback )();
+ };
+
+ bool hasJSReturn( const string& s );
+
+ const char * jsSkipWhiteSpace( const char * raw );
+
+ extern ScriptEngine * globalScriptEngine;
+}
diff --git a/src/mongo/scripting/engine_java.cpp b/src/mongo/scripting/engine_java.cpp
new file mode 100644
index 00000000000..57388166e98
--- /dev/null
+++ b/src/mongo/scripting/engine_java.cpp
@@ -0,0 +1,764 @@
+// java.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "pch.h"
+#include "engine_java.h"
+#include <iostream>
+#include <map>
+#include <list>
+
+#include "../db/jsobj.h"
+#include "../db/db.h"
+
+using namespace boost::filesystem;
+
+namespace mongo {
+
+//#define JNI_DEBUG 1
+
+#ifdef JNI_DEBUG
+#undef JNI_DEBUG
+#define JNI_DEBUG(x) cout << x << endl
+#else
+#undef JNI_DEBUG
+#define JNI_DEBUG(x)
+#endif
+
+} // namespace mongo
+
+
+
+#include "../util/net/message.h"
+#include "../db/db.h"
+
+using namespace std;
+
+namespace mongo {
+
+#if defined(_WIN32)
+ /* [dm] this being undefined without us adding it here means there is
+ no tss cleanup on windows for boost lib?
+ we don't care for now esp on windows only
+
+ the boost source says:
+
+ This function's sole purpose is to cause a link error in cases where
+ automatic tss cleanup is not implemented by Boost.Threads as a
+ reminder that user code is responsible for calling the necessary
+ functions at the appropriate times (and for implementing an a
+ tss_cleanup_implemented() function to eliminate the linker's
+ missing symbol error).
+
+ If Boost.Threads later implements automatic tss cleanup in cases
+ where it currently doesn't (which is the plan), the duplicate
+ symbol error will warn the user that their custom solution is no
+ longer needed and can be removed.
+ */
+ extern "C" void tss_cleanup_implemented(void) {
+ //out() << "tss_cleanup_implemented called" << endl;
+ }
+#endif
+
+ JavaJSImpl * JavaJS = 0;
+ extern string dbExecCommand;
+
+#if !defined(NOJNI)
+
+ void myJNIClean( JNIEnv * env ) {
+ JavaJS->detach( env );
+ }
+
+#if defined(_WIN32)
+ const char SYSTEM_COLON = ';';
+#else
+ const char SYSTEM_COLON = ':';
+#endif
+
+
+ void _addClassPath( const char * ed , stringstream & ss , const char * subdir ) {
+ path includeDir(ed);
+ includeDir /= subdir;
+ directory_iterator end;
+ try {
+ directory_iterator i(includeDir);
+ while ( i != end ) {
+ path p = *i;
+ ss << SYSTEM_COLON << p.string();
+ i++;
+ }
+ }
+ catch (...) {
+ problem() << "exception looking for ed class path includeDir: " << includeDir.string() << endl;
+ sleepsecs(3);
+ dbexit( EXIT_JAVA );
+ }
+ }
+
+
+ JavaJSImpl::JavaJSImpl(const char *appserverPath) {
+ _jvm = 0;
+ _mainEnv = 0;
+ _dbhook = 0;
+
+ stringstream ss;
+ string edTemp;
+
+ const char * ed = 0;
+ ss << "-Djava.class.path=.";
+
+ if ( appserverPath ) {
+ ed = findEd(appserverPath);
+ assert( ed );
+
+ ss << SYSTEM_COLON << ed << "/build/";
+
+ _addClassPath( ed , ss , "include" );
+ _addClassPath( ed , ss , "include/jython/" );
+ _addClassPath( ed , ss , "include/jython/javalib" );
+ }
+ else {
+ const string jars = findJars();
+ _addClassPath( jars.c_str() , ss , "jars" );
+
+ edTemp += (string)jars + "/jars/mongojs-js.jar";
+ ed = edTemp.c_str();
+ }
+
+
+
+#if defined(_WIN32)
+ ss << SYSTEM_COLON << "C:\\Program Files\\Java\\jdk\\lib\\tools.jar";
+#else
+ ss << SYSTEM_COLON << "/opt/java/lib/tools.jar";
+#endif
+
+ if ( getenv( "CLASSPATH" ) )
+ ss << SYSTEM_COLON << getenv( "CLASSPATH" );
+
+ string s = ss.str();
+ char * p = (char *)malloc( s.size() * 4 );
+ strcpy( p , s.c_str() );
+ char *q = p;
+#if defined(_WIN32)
+ while ( *p ) {
+ if ( *p == '/' ) *p = '\\';
+ p++;
+ }
+#endif
+
+ log(1) << "classpath: " << q << endl;
+
+ JavaVMOption * options = new JavaVMOption[4];
+ options[0].optionString = q;
+ options[1].optionString = (char*)"-Djava.awt.headless=true";
+ options[2].optionString = (char*)"-Xmx300m";
+
+ // Prevent JVM from using async signals internally, since on linux the pre-installed handlers for these
+ // signals don't seem to be respected by JNI.
+ options[3].optionString = (char*)"-Xrs";
+ // -Xcheck:jni
+
+ _vmArgs = new JavaVMInitArgs();
+ _vmArgs->version = JNI_VERSION_1_4;
+ _vmArgs->options = options;
+ _vmArgs->nOptions = 4;
+ _vmArgs->ignoreUnrecognized = JNI_FALSE;
+
+ log(1) << "loading JVM" << endl;
+ jint res = JNI_CreateJavaVM( &_jvm, (void**)&_mainEnv, _vmArgs );
+
+ if ( res ) {
+ log() << "using classpath: " << q << endl;
+ log()
+ << " res : " << (unsigned) res << " "
+ << "_jvm : " << _jvm << " "
+ << "_env : " << _mainEnv << " "
+ << endl;
+ problem() << "Couldn't create JVM res:" << (int) res << " terminating" << endl;
+ log() << "(try --nojni if you do not require that functionality)" << endl;
+ exit(22);
+ }
+ jassert( res == 0 );
+ jassert( _jvm > 0 );
+ jassert( _mainEnv > 0 );
+
+ _envs = new boost::thread_specific_ptr<JNIEnv>( myJNIClean );
+ assert( ! _envs->get() );
+ _envs->reset( _mainEnv );
+
+ _dbhook = findClass( "ed/db/JSHook" );
+ if ( _dbhook == 0 ) {
+ log() << "using classpath: " << q << endl;
+ printException();
+ }
+ jassert( _dbhook );
+
+ if ( ed ) {
+ jmethodID init = _mainEnv->GetStaticMethodID( _dbhook , "init" , "(Ljava/lang/String;)V" );
+ jassert( init );
+ _mainEnv->CallStaticVoidMethod( _dbhook , init , _getEnv()->NewStringUTF( ed ) );
+ }
+
+ _dbjni = findClass( "ed/db/DBJni" );
+ jassert( _dbjni );
+
+ _scopeCreate = _mainEnv->GetStaticMethodID( _dbhook , "scopeCreate" , "()J" );
+ _scopeInit = _mainEnv->GetStaticMethodID( _dbhook , "scopeInit" , "(JLjava/nio/ByteBuffer;)Z" );
+ _scopeSetThis = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetThis" , "(JLjava/nio/ByteBuffer;)Z" );
+ _scopeReset = _mainEnv->GetStaticMethodID( _dbhook , "scopeReset" , "(J)Z" );
+ _scopeFree = _mainEnv->GetStaticMethodID( _dbhook , "scopeFree" , "(J)V" );
+
+ _scopeGetNumber = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetNumber" , "(JLjava/lang/String;)D" );
+ _scopeGetString = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetString" , "(JLjava/lang/String;)Ljava/lang/String;" );
+ _scopeGetBoolean = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetBoolean" , "(JLjava/lang/String;)Z" );
+ _scopeGetType = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetType" , "(JLjava/lang/String;)B" );
+ _scopeGetObject = _mainEnv->GetStaticMethodID( _dbhook , "scopeGetObject" , "(JLjava/lang/String;Ljava/nio/ByteBuffer;)I" );
+ _scopeGuessObjectSize = _mainEnv->GetStaticMethodID( _dbhook , "scopeGuessObjectSize" , "(JLjava/lang/String;)J" );
+
+ _scopeSetNumber = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetNumber" , "(JLjava/lang/String;D)Z" );
+ _scopeSetBoolean = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetBoolean" , "(JLjava/lang/String;Z)Z" );
+ _scopeSetString = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetString" , "(JLjava/lang/String;Ljava/lang/String;)Z" );
+ _scopeSetObject = _mainEnv->GetStaticMethodID( _dbhook , "scopeSetObject" , "(JLjava/lang/String;Ljava/nio/ByteBuffer;)Z" );
+
+ _functionCreate = _mainEnv->GetStaticMethodID( _dbhook , "functionCreate" , "(Ljava/lang/String;)J" );
+ _invoke = _mainEnv->GetStaticMethodID( _dbhook , "invoke" , "(JJ)I" );
+
+ jassert( _scopeCreate );
+ jassert( _scopeInit );
+ jassert( _scopeSetThis );
+ jassert( _scopeReset );
+ jassert( _scopeFree );
+
+ jassert( _scopeGetNumber );
+ jassert( _scopeGetString );
+ jassert( _scopeGetObject );
+ jassert( _scopeGetBoolean );
+ jassert( _scopeGetType );
+ jassert( _scopeGuessObjectSize );
+
+ jassert( _scopeSetNumber );
+ jassert( _scopeSetBoolean );
+ jassert( _scopeSetString );
+ jassert( _scopeSetObject );
+
+ jassert( _functionCreate );
+ jassert( _invoke );
+
+ JNINativeMethod * nativeSay = new JNINativeMethod();
+ nativeSay->name = (char*)"native_say";
+ nativeSay->signature = (char*)"(Ljava/nio/ByteBuffer;)V";
+ nativeSay->fnPtr = (void*)java_native_say;
+ _mainEnv->RegisterNatives( _dbjni , nativeSay , 1 );
+
+
+ JNINativeMethod * nativeCall = new JNINativeMethod();
+ nativeCall->name = (char*)"native_call";
+ nativeCall->signature = (char*)"(Ljava/nio/ByteBuffer;Ljava/nio/ByteBuffer;)I";
+ nativeCall->fnPtr = (void*)java_native_call;
+ _mainEnv->RegisterNatives( _dbjni , nativeCall , 1 );
+
+ }
+
+ JavaJSImpl::~JavaJSImpl() {
+ if ( _jvm ) {
+ _jvm->DestroyJavaVM();
+ cout << "Destroying JVM" << endl;
+ }
+ }
+
+// scope
+
+ jlong JavaJSImpl::scopeCreate() {
+ return _getEnv()->CallStaticLongMethod( _dbhook , _scopeCreate );
+ }
+
+ jboolean JavaJSImpl::scopeReset( jlong id ) {
+ return _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeReset );
+ }
+
+ void JavaJSImpl::scopeFree( jlong id ) {
+ _getEnv()->CallStaticVoidMethod( _dbhook , _scopeFree , id );
+ }
+
+// scope setters
+
+ int JavaJSImpl::scopeSetBoolean( jlong id , const char * field , jboolean val ) {
+ jstring fieldString = _getEnv()->NewStringUTF( field );
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetNumber , id , fieldString , val );
+ _getEnv()->DeleteLocalRef( fieldString );
+ return res;
+ }
+
+ int JavaJSImpl::scopeSetNumber( jlong id , const char * field , double val ) {
+ jstring fieldString = _getEnv()->NewStringUTF( field );
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetNumber , id , fieldString , val );
+ _getEnv()->DeleteLocalRef( fieldString );
+ return res;
+ }
+
+ int JavaJSImpl::scopeSetString( jlong id , const char * field , const char * val ) {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ jstring s2 = _getEnv()->NewStringUTF( val );
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetString , id , s1 , s2 );
+ _getEnv()->DeleteLocalRef( s1 );
+ _getEnv()->DeleteLocalRef( s2 );
+ return res;
+ }
+
+ int JavaJSImpl::scopeSetObject( jlong id , const char * field , const BSONObj * obj ) {
+ jobject bb = 0;
+ if ( obj ) {
+ bb = _getEnv()->NewDirectByteBuffer( (void*)(obj->objdata()) , (jlong)(obj->objsize()) );
+ jassert( bb );
+ }
+
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetObject , id , s1 , bb );
+ _getEnv()->DeleteLocalRef( s1 );
+ if ( bb )
+ _getEnv()->DeleteLocalRef( bb );
+
+ return res;
+ }
+
+ int JavaJSImpl::scopeInit( jlong id , const BSONObj * obj ) {
+ if ( ! obj )
+ return 0;
+
+ jobject bb = _getEnv()->NewDirectByteBuffer( (void*)(obj->objdata()) , (jlong)(obj->objsize()) );
+ jassert( bb );
+
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeInit , id , bb );
+ _getEnv()->DeleteLocalRef( bb );
+ return res;
+ }
+
+ int JavaJSImpl::scopeSetThis( jlong id , const BSONObj * obj ) {
+ if ( ! obj )
+ return 0;
+
+ jobject bb = _getEnv()->NewDirectByteBuffer( (void*)(obj->objdata()) , (jlong)(obj->objsize()) );
+ jassert( bb );
+
+ int res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeSetThis , id , bb );
+ _getEnv()->DeleteLocalRef( bb );
+ return res;
+ }
+
+// scope getters
+
+ char JavaJSImpl::scopeGetType( jlong id , const char * field ) {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ int res =_getEnv()->CallStaticByteMethod( _dbhook , _scopeGetType , id , s1 );
+ _getEnv()->DeleteLocalRef( s1 );
+ return res;
+ }
+
+ double JavaJSImpl::scopeGetNumber( jlong id , const char * field ) {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ double res = _getEnv()->CallStaticDoubleMethod( _dbhook , _scopeGetNumber , id , s1 );
+ _getEnv()->DeleteLocalRef( s1 );
+ return res;
+ }
+
+ jboolean JavaJSImpl::scopeGetBoolean( jlong id , const char * field ) {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ jboolean res = _getEnv()->CallStaticBooleanMethod( _dbhook , _scopeGetBoolean , id , s1 );
+ _getEnv()->DeleteLocalRef( s1 );
+ return res;
+ }
+
+ string JavaJSImpl::scopeGetString( jlong id , const char * field ) {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ jstring s = (jstring)_getEnv()->CallStaticObjectMethod( _dbhook , _scopeGetString , id , s1 );
+ _getEnv()->DeleteLocalRef( s1 );
+
+ if ( ! s )
+ return "";
+
+ const char * c = _getEnv()->GetStringUTFChars( s , 0 );
+ string retStr(c);
+ _getEnv()->ReleaseStringUTFChars( s , c );
+ return retStr;
+ }
+
+ BSONObj JavaJSImpl::scopeGetObject( jlong id , const char * field ) {
+ jstring s1 = _getEnv()->NewStringUTF( field );
+ int guess = _getEnv()->CallStaticIntMethod( _dbhook , _scopeGuessObjectSize , id , _getEnv()->NewStringUTF( field ) );
+ _getEnv()->DeleteLocalRef( s1 );
+
+ if ( guess == 0 )
+ return BSONObj();
+
+ BSONObj::Holder* holder = (BSONObj::Holder*) malloc(guess + sizeof(unsigned));
+ holder->zero()
+
+ jobject bb = _getEnv()->NewDirectByteBuffer( (void*)holder->data , guess );
+ jassert( bb );
+
+ int len = _getEnv()->CallStaticIntMethod( _dbhook , _scopeGetObject , id , _getEnv()->NewStringUTF( field ) , bb );
+ _getEnv()->DeleteLocalRef( bb );
+ jassert( len > 0 && len < guess );
+
+ BSONObj obj(holder);
+ assert( obj.objsize() <= guess );
+ return obj;
+ }
+
+// other
+
+ jlong JavaJSImpl::functionCreate( const char * code ) {
+ jstring s = _getEnv()->NewStringUTF( code );
+ jassert( s );
+ jlong id = _getEnv()->CallStaticLongMethod( _dbhook , _functionCreate , s );
+ _getEnv()->DeleteLocalRef( s );
+ return id;
+ }
+
+ int JavaJSImpl::invoke( jlong scope , jlong function ) {
+ return _getEnv()->CallStaticIntMethod( _dbhook , _invoke , scope , function );
+ }
+
+// --- fun run method
+
+ void JavaJSImpl::run( const char * js ) {
+ jclass c = findClass( "ed/js/JS" );
+ jassert( c );
+
+ jmethodID m = _getEnv()->GetStaticMethodID( c , "eval" , "(Ljava/lang/String;)Ljava/lang/Object;" );
+ jassert( m );
+
+ jstring s = _getEnv()->NewStringUTF( js );
+ log() << _getEnv()->CallStaticObjectMethod( c , m , s ) << endl;
+ _getEnv()->DeleteLocalRef( s );
+ }
+
+ void JavaJSImpl::printException() {
+ jthrowable exc = _getEnv()->ExceptionOccurred();
+ if ( exc ) {
+ _getEnv()->ExceptionDescribe();
+ _getEnv()->ExceptionClear();
+ }
+
+ }
+
+ JNIEnv * JavaJSImpl::_getEnv() {
+ JNIEnv * env = _envs->get();
+ if ( env )
+ return env;
+
+ int res = _jvm->AttachCurrentThread( (void**)&env , (void*)&_vmArgs );
+ if ( res ) {
+ out() << "ERROR javajs attachcurrentthread fails res:" << res << '\n';
+ assert(false);
+ }
+
+ _envs->reset( env );
+ return env;
+ }
+
+ Scope * JavaJSImpl::createScope() {
+ return new JavaScope();
+ }
+
+ void ScriptEngine::setup() {
+ if ( ! JavaJS ) {
+ JavaJS = new JavaJSImpl();
+ globalScriptEngine = JavaJS;
+ }
+ }
+
+ void jasserted(const char *msg, const char *file, unsigned line) {
+ log() << "jassert failed " << msg << " " << file << " " << line << endl;
+ if ( JavaJS ) JavaJS->printException();
+ throw AssertionException();
+ }
+
+
+ const char* findEd(const char *path) {
+
+#if defined(_WIN32)
+
+ if (!path) {
+ path = findEd();
+ }
+
+ // @TODO check validity
+
+ return path;
+#else
+
+ if (!path) {
+ return findEd();
+ }
+
+ log() << "Appserver location specified : " << path << endl;
+
+ if (!path) {
+ log() << " invalid appserver location : " << path << " : terminating - prepare for bus error" << endl;
+ return 0;
+ }
+
+ DIR *testDir = opendir(path);
+
+ if (testDir) {
+ log(1) << " found directory for appserver : " << path << endl;
+ closedir(testDir);
+ return path;
+ }
+ else {
+ log() << " ERROR : not a directory for specified appserver location : " << path << " - prepare for bus error" << endl;
+ return null;
+ }
+#endif
+ }
+
+ const char * findEd() {
+
+#if defined(_WIN32)
+ log() << "Appserver location will be WIN32 default : c:/l/ed/" << endl;
+ return "c:/l/ed";
+#else
+
+ static list<const char*> possibleEdDirs;
+ if ( ! possibleEdDirs.size() ) {
+ possibleEdDirs.push_back( "../../ed/ed/" ); // this one for dwight dev box
+ possibleEdDirs.push_back( "../ed/" );
+ possibleEdDirs.push_back( "../../ed/" );
+ possibleEdDirs.push_back( "../babble/" );
+ possibleEdDirs.push_back( "../../babble/" );
+ }
+
+ for ( list<const char*>::iterator i = possibleEdDirs.begin() ; i != possibleEdDirs.end(); i++ ) {
+ const char * temp = *i;
+ DIR * test = opendir( temp );
+ if ( ! test )
+ continue;
+
+ closedir( test );
+ log(1) << "found directory for appserver : " << temp << endl;
+ return temp;
+ }
+
+ return 0;
+#endif
+ };
+
+ const string findJars() {
+
+ static list<string> possible;
+ if ( ! possible.size() ) {
+ possible.push_back( "./" );
+ possible.push_back( "../" );
+
+ log(2) << "dbExecCommand: " << dbExecCommand << endl;
+
+ string dbDir = dbExecCommand;
+#ifdef WIN32
+ if ( dbDir.find( "\\" ) != string::npos ) {
+ dbDir = dbDir.substr( 0 , dbDir.find_last_of( "\\" ) );
+ }
+ else {
+ dbDir = ".";
+ }
+#else
+ if ( dbDir.find( "/" ) != string::npos ) {
+ dbDir = dbDir.substr( 0 , dbDir.find_last_of( "/" ) );
+ }
+ else {
+ bool found = false;
+
+ if ( getenv( "PATH" ) ) {
+ string s = getenv( "PATH" );
+ s += ":";
+ pcrecpp::StringPiece input( s );
+ string dir;
+ pcrecpp::RE re("(.*?):");
+ while ( re.Consume( &input, &dir ) ) {
+ string test = dir + "/" + dbExecCommand;
+ if ( boost::filesystem::exists( test ) ) {
+ while ( boost::filesystem::symbolic_link_exists( test ) ) {
+ char tmp[2048];
+ int len = readlink( test.c_str() , tmp , 2048 );
+ tmp[len] = 0;
+ log(5) << " symlink " << test << " -->> " << tmp << endl;
+ test = tmp;
+
+ dir = test.substr( 0 , test.rfind( "/" ) );
+ }
+ dbDir = dir;
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if ( ! found )
+ dbDir = ".";
+ }
+#endif
+
+ log(2) << "dbDir [" << dbDir << "]" << endl;
+ possible.push_back( ( dbDir + "/../lib/mongo/" ));
+ possible.push_back( ( dbDir + "/../lib64/mongo/" ));
+ possible.push_back( ( dbDir + "/../lib32/mongo/" ));
+ possible.push_back( ( dbDir + "/" ));
+ possible.push_back( ( dbDir + "/lib64/mongo/" ));
+ possible.push_back( ( dbDir + "/lib32/mongo/" ));
+ }
+
+ for ( list<string>::iterator i = possible.begin() ; i != possible.end(); i++ ) {
+ const string temp = *i;
+ const string jarDir = ((string)temp) + "jars/";
+
+ log(5) << "possible jarDir [" << jarDir << "]" << endl;
+
+ path p(jarDir );
+ if ( ! boost::filesystem::exists( p) )
+ continue;
+
+ log(1) << "found directory for jars : " << jarDir << endl;
+ return temp;
+ }
+
+ problem() << "ERROR : can't find directory for jars - terminating" << endl;
+ exit(44);
+ return 0;
+
+ };
+
+
+// ---
+
+ JNIEXPORT void JNICALL java_native_say(JNIEnv * env , jclass, jobject outBuffer ) {
+ JNI_DEBUG( "native say called!" );
+
+ Message out( env->GetDirectBufferAddress( outBuffer ) , false );
+ Message in;
+
+ jniCallback( out , in );
+ assert( ! out.doIFreeIt() );
+ curNs = 0;
+ }
+
+ JNIEXPORT jint JNICALL java_native_call(JNIEnv * env , jclass, jobject outBuffer , jobject inBuffer ) {
+ JNI_DEBUG( "native call called!" );
+
+ Message out( env->GetDirectBufferAddress( outBuffer ) , false );
+ Message in;
+
+ jniCallback( out , in );
+ curNs = 0;
+
+ JNI_DEBUG( "in.data : " << in.data );
+ if ( in.data && in.data->len > 0 ) {
+ JNI_DEBUG( "copying data of len :" << in.data->len );
+ assert( env->GetDirectBufferCapacity( inBuffer ) >= in.data->len );
+ memcpy( env->GetDirectBufferAddress( inBuffer ) , in.data , in.data->len );
+
+ assert( ! out.doIFreeIt() );
+ assert( in.doIFreeIt() );
+ return in.data->len;
+ }
+
+ return 0;
+ }
+
+// ----
+
+ void JavaJSImpl::runTest() {
+
+ const int debug = 0;
+
+ JavaJSImpl& JavaJS = *mongo::JavaJS;
+
+ jlong scope = JavaJS.scopeCreate();
+ jassert( scope );
+ if ( debug ) out() << "got scope" << endl;
+
+
+ jlong func1 = JavaJS.functionCreate( "foo = 5.6; bar = \"eliot\"; abc = { foo : 517 }; " );
+ jassert( ! JavaJS.invoke( scope , func1 ) );
+
+
+ if ( debug ) out() << "func3 start" << endl;
+ jlong func3 = JavaJS.functionCreate( "function(){ z = true; } " );
+ jassert( func3 );
+ jassert( ! JavaJS.invoke( scope , func3 ) );
+ jassert( JavaJS.scopeGetBoolean( scope , "z" ) );
+ if ( debug ) out() << "func3 done" << endl;
+
+ if ( debug ) out() << "going to get object" << endl;
+ BSONObj obj = JavaJS.scopeGetObject( scope , "abc" );
+ if ( debug ) out() << "done getting object" << endl;
+
+ if ( debug ) {
+ out() << "obj : " << obj.toString() << endl;
+ }
+
+ {
+ time_t start = time(0);
+ for ( int i=0; i<5000; i++ ) {
+ JavaJS.scopeSetObject( scope , "obj" , &obj );
+ }
+ time_t end = time(0);
+
+ if ( debug )
+ out() << "time : " << (unsigned) ( end - start ) << endl;
+ }
+
+ if ( debug ) out() << "func4 start" << endl;
+ JavaJS.scopeSetObject( scope , "obj" , &obj );
+ if ( debug ) out() << "\t here 1" << endl;
+ jlong func4 = JavaJS.functionCreate( "tojson( obj );" );
+ if ( debug ) out() << "\t here 2" << endl;
+ jassert( ! JavaJS.invoke( scope , func4 ) );
+ if ( debug ) out() << "func4 end" << endl;
+
+ if ( debug ) out() << "func5 start" << endl;
+ jassert( JavaJS.scopeSetObject( scope , "c" , &obj ) );
+ jlong func5 = JavaJS.functionCreate( "assert.eq( 517 , c.foo );" );
+ jassert( func5 );
+ jassert( ! JavaJS.invoke( scope , func5 ) );
+ if ( debug ) out() << "func5 done" << endl;
+
+ if ( debug ) out() << "func6 start" << endl;
+ for ( int i=0; i<100; i++ ) {
+ double val = i + 5;
+ JavaJS.scopeSetNumber( scope , "zzz" , val );
+ jlong func6 = JavaJS.functionCreate( " xxx = zzz; " );
+ jassert( ! JavaJS.invoke( scope , func6 ) );
+ double n = JavaJS.scopeGetNumber( scope , "xxx" );
+ jassert( val == n );
+ }
+ if ( debug ) out() << "func6 done" << endl;
+
+ jlong func7 = JavaJS.functionCreate( "return 11;" );
+ jassert( ! JavaJS.invoke( scope , func7 ) );
+ assert( 11 == JavaJS.scopeGetNumber( scope , "return" ) );
+
+ scope = JavaJS.scopeCreate();
+ jlong func8 = JavaJS.functionCreate( "function(){ return 12; }" );
+ jassert( ! JavaJS.invoke( scope , func8 ) );
+ assert( 12 == JavaJS.scopeGetNumber( scope , "return" ) );
+
+ }
+
+#endif
+
+} // namespace mongo
diff --git a/src/mongo/scripting/engine_java.h b/src/mongo/scripting/engine_java.h
new file mode 100644
index 00000000000..b8245ba6f22
--- /dev/null
+++ b/src/mongo/scripting/engine_java.h
@@ -0,0 +1,223 @@
+// engine_java.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* this file contains code to call into java (into the 10gen sandbox) from inside the database */
+
+#pragma once
+
+#include "../pch.h"
+
+#include <jni.h>
+#include <errno.h>
+#include <sys/types.h>
+
+#if !defined(_WIN32)
+#include <dirent.h>
+#endif
+
+#include "../db/jsobj.h"
+
+#include "engine.h"
+
+namespace mongo {
+
+ void jasserted(const char *msg, const char *file, unsigned line);
+#define jassert(_Expression) if ( ! ( _Expression ) ){ jasserted(#_Expression, __FILE__, __LINE__); }
+
+ const char * findEd();
+ const char * findEd(const char *);
+ const string findJars();
+
+ class BSONObj;
+
+ class JavaJSImpl : public ScriptEngine {
+ public:
+ JavaJSImpl(const char * = 0);
+ ~JavaJSImpl();
+
+ jlong scopeCreate();
+ int scopeInit( jlong id , const BSONObj * obj );
+ int scopeSetThis( jlong id , const BSONObj * obj );
+ jboolean scopeReset( jlong id );
+ void scopeFree( jlong id );
+
+ double scopeGetNumber( jlong id , const char * field );
+ string scopeGetString( jlong id , const char * field );
+ jboolean scopeGetBoolean( jlong id , const char * field );
+ BSONObj scopeGetObject( jlong id , const char * field );
+ char scopeGetType( jlong id , const char * field );
+
+ int scopeSetNumber( jlong id , const char * field , double val );
+ int scopeSetString( jlong id , const char * field , const char * val );
+ int scopeSetObject( jlong id , const char * field , const BSONObj * obj );
+ int scopeSetBoolean( jlong id , const char * field , jboolean val );
+
+ jlong functionCreate( const char * code );
+
+ /* return values:
+ public static final int NO_SCOPE = -1;
+ public static final int NO_FUNCTION = -2;
+ public static final int INVOKE_ERROR = -3;
+ public static final int INVOKE_SUCCESS = 0;
+ */
+ int invoke( jlong scope , jlong function );
+
+ void printException();
+
+ void run( const char * js );
+
+ void detach( JNIEnv * env ) {
+ _jvm->DetachCurrentThread();
+ }
+
+ Scope * createScope();
+
+ void runTest();
+ private:
+
+ jobject create( const char * name ) {
+ jclass c = findClass( name );
+ if ( ! c )
+ return 0;
+
+ jmethodID cons = _getEnv()->GetMethodID( c , "<init>" , "()V" );
+ if ( ! cons )
+ return 0;
+
+ return _getEnv()->NewObject( c , cons );
+ }
+
+ jclass findClass( const char * name ) {
+ return _getEnv()->FindClass( name );
+ }
+
+
+ private:
+
+ JNIEnv * _getEnv();
+
+ JavaVM * _jvm;
+ JNIEnv * _mainEnv;
+ JavaVMInitArgs * _vmArgs;
+
+ boost::thread_specific_ptr<JNIEnv> * _envs;
+
+ jclass _dbhook;
+ jclass _dbjni;
+
+ jmethodID _scopeCreate;
+ jmethodID _scopeInit;
+ jmethodID _scopeSetThis;
+ jmethodID _scopeReset;
+ jmethodID _scopeFree;
+
+ jmethodID _scopeGetNumber;
+ jmethodID _scopeGetString;
+ jmethodID _scopeGetObject;
+ jmethodID _scopeGetBoolean;
+ jmethodID _scopeGuessObjectSize;
+ jmethodID _scopeGetType;
+
+ jmethodID _scopeSetNumber;
+ jmethodID _scopeSetString;
+ jmethodID _scopeSetObject;
+ jmethodID _scopeSetBoolean;
+
+ jmethodID _functionCreate;
+
+ jmethodID _invoke;
+
+ };
+
+ extern JavaJSImpl *JavaJS;
+
+// a javascript "scope"
+ class JavaScope : public Scope {
+ public:
+ JavaScope() {
+ s = JavaJS->scopeCreate();
+ }
+ virtual ~JavaScope() {
+ JavaJS->scopeFree(s);
+ s = 0;
+ }
+ void reset() {
+ JavaJS->scopeReset(s);
+ }
+
+ void init( BSONObj * o ) {
+ JavaJS->scopeInit( s , o );
+ }
+
+ void localConnect( const char * dbName ) {
+ setString("$client", dbName );
+ }
+
+ double getNumber(const char *field) {
+ return JavaJS->scopeGetNumber(s,field);
+ }
+ string getString(const char *field) {
+ return JavaJS->scopeGetString(s,field);
+ }
+ bool getBoolean(const char *field) {
+ return JavaJS->scopeGetBoolean(s,field);
+ }
+ BSONObj getObject(const char *field ) {
+ return JavaJS->scopeGetObject(s,field);
+ }
+ int type(const char *field ) {
+ return JavaJS->scopeGetType(s,field);
+ }
+
+ void setThis( const BSONObj * obj ) {
+ JavaJS->scopeSetThis( s , obj );
+ }
+
+ void setNumber(const char *field, double val ) {
+ JavaJS->scopeSetNumber(s,field,val);
+ }
+ void setString(const char *field, const char * val ) {
+ JavaJS->scopeSetString(s,field,val);
+ }
+ void setObject(const char *field, const BSONObj& obj , bool readOnly ) {
+ uassert( 10211 , "only readOnly setObject supported in java" , readOnly );
+ JavaJS->scopeSetObject(s,field,&obj);
+ }
+ void setBoolean(const char *field, bool val ) {
+ JavaJS->scopeSetBoolean(s,field,val);
+ }
+
+ ScriptingFunction createFunction( const char * code ) {
+ return JavaJS->functionCreate( code );
+ }
+
+ int invoke( ScriptingFunction function , const BSONObj& args ) {
+ setObject( "args" , args , true );
+ return JavaJS->invoke(s,function);
+ }
+
+ string getError() {
+ return getString( "error" );
+ }
+
+ jlong s;
+ };
+
+ JNIEXPORT void JNICALL java_native_say(JNIEnv *, jclass, jobject outBuffer );
+ JNIEXPORT jint JNICALL java_native_call(JNIEnv *, jclass, jobject outBuffer , jobject inBuffer );
+
+} // namespace mongo
diff --git a/src/mongo/scripting/engine_none.cpp b/src/mongo/scripting/engine_none.cpp
new file mode 100644
index 00000000000..d13dbecc06e
--- /dev/null
+++ b/src/mongo/scripting/engine_none.cpp
@@ -0,0 +1,24 @@
+// engine_none.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "engine.h"
+
+namespace mongo {
+ void ScriptEngine::setup() {
+ // noop
+ }
+}
diff --git a/src/mongo/scripting/engine_spidermonkey.cpp b/src/mongo/scripting/engine_spidermonkey.cpp
new file mode 100644
index 00000000000..70b89cddbb5
--- /dev/null
+++ b/src/mongo/scripting/engine_spidermonkey.cpp
@@ -0,0 +1,1766 @@
+// engine_spidermonkey.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "engine_spidermonkey.h"
+#include "../client/dbclient.h"
+
+#ifndef _WIN32
+#include <boost/date_time/posix_time/posix_time.hpp>
+#undef assert
+#define assert MONGO_assert
+#endif
+
+#define smuassert( cx , msg , val ) \
+ if ( ! ( val ) ){ \
+ JS_ReportError( cx , msg ); \
+ return JS_FALSE; \
+ }
+
+#define CHECKNEWOBJECT(xx,ctx,w) \
+ if ( ! xx ){ \
+ massert(13072,(string)"JS_NewObject failed: " + w ,xx); \
+ }
+
+#define CHECKJSALLOC( newthing ) \
+ massert( 13615 , "JS allocation failed, either memory leak or using too much memory" , newthing )
+
+namespace mongo {
+
+ class InvalidUTF8Exception : public UserException {
+ public:
+ InvalidUTF8Exception() : UserException( 9006 , "invalid utf8" ) {
+ }
+ };
+
+ string trim( string s ) {
+ while ( s.size() && isspace( s[0] ) )
+ s = s.substr( 1 );
+
+ while ( s.size() && isspace( s[s.size()-1] ) )
+ s = s.substr( 0 , s.size() - 1 );
+
+ return s;
+ }
+
+ boost::thread_specific_ptr<SMScope> currentScope( dontDeleteScope );
+ boost::recursive_mutex &smmutex = *( new boost::recursive_mutex );
+#define smlock recursive_scoped_lock ___lk( smmutex );
+
+#define GETHOLDER(x,o) ((BSONHolder*)JS_GetPrivate( x , o ))
+
+ class BSONFieldIterator;
+
+ class BSONHolder {
+ public:
+
+ BSONHolder( BSONObj obj ) {
+ _obj = obj.getOwned();
+ _inResolve = false;
+ _modified = false;
+ _magic = 17;
+ }
+
+ ~BSONHolder() {
+ _magic = 18;
+ }
+
+ void check() {
+ uassert( 10212 , "holder magic value is wrong" , _magic == 17 && _obj.isValid() );
+ }
+
+ BSONFieldIterator * it();
+
+ BSONObj _obj;
+ bool _inResolve;
+ char _magic;
+ list<string> _extra;
+ set<string> _removed;
+ bool _modified;
+ };
+
+ class BSONFieldIterator {
+ public:
+
+ BSONFieldIterator( BSONHolder * holder ) {
+
+ set<string> added;
+
+ BSONObjIterator it( holder->_obj );
+ while ( it.more() ) {
+ BSONElement e = it.next();
+ if ( holder->_removed.count( e.fieldName() ) )
+ continue;
+ _names.push_back( e.fieldName() );
+ added.insert( e.fieldName() );
+ }
+
+ for ( list<string>::iterator i = holder->_extra.begin(); i != holder->_extra.end(); i++ ) {
+ if ( ! added.count( *i ) )
+ _names.push_back( *i );
+ }
+
+ _it = _names.begin();
+ }
+
+ bool more() {
+ return _it != _names.end();
+ }
+
+ string next() {
+ string s = *_it;
+ _it++;
+ return s;
+ }
+
+ private:
+ list<string> _names;
+ list<string>::iterator _it;
+ };
+
+ BSONFieldIterator * BSONHolder::it() {
+ return new BSONFieldIterator( this );
+ }
+
+ class TraverseStack {
+ public:
+ TraverseStack() {
+ _o = 0;
+ _parent = 0;
+ }
+
+ TraverseStack( JSObject * o , const TraverseStack * parent ) {
+ _o = o;
+ _parent = parent;
+ }
+
+ TraverseStack dive( JSObject * o ) const {
+ if ( o ) {
+ uassert( 13076 , (string)"recursive toObject" , ! has( o ) );
+ }
+ return TraverseStack( o , this );
+ }
+
+ int depth() const {
+ int d = 0;
+ const TraverseStack * s = _parent;
+ while ( s ) {
+ s = s->_parent;
+ d++;
+ }
+ return d;
+ }
+
+ bool isTop() const {
+ return _parent == 0;
+ }
+
+ bool has( JSObject * o ) const {
+ if ( ! o )
+ return false;
+ const TraverseStack * s = this;
+ while ( s ) {
+ if ( s->_o == o )
+ return true;
+ s = s->_parent;
+ }
+ return false;
+ }
+
+ JSObject * _o;
+ const TraverseStack * _parent;
+ };
+
+ class Convertor : boost::noncopyable {
+ public:
+ Convertor( JSContext * cx ) {
+ _context = cx;
+ }
+
+ string toString( JSString * so ) {
+ jschar * s = JS_GetStringChars( so );
+ size_t srclen = JS_GetStringLength( so );
+ if( srclen == 0 )
+ return "";
+
+ size_t len = srclen * 6; // we only need *3, but see note on len below
+ char * dst = (char*)malloc( len );
+
+ len /= 2;
+ // doc re weird JS_EncodeCharacters api claims len expected in 16bit
+ // units, but experiments suggest 8bit units expected. We allocate
+ // enough memory that either will work.
+
+ if ( !JS_EncodeCharacters( _context , s , srclen , dst , &len) ) {
+ StringBuilder temp;
+ temp << "Not proper UTF-16: ";
+ for ( size_t i=0; i<srclen; i++ ) {
+ if ( i > 0 )
+ temp << ",";
+ temp << s[i];
+ }
+ uasserted( 13498 , temp.str() );
+ }
+
+ string ss( dst , len );
+ free( dst );
+ if ( !JS_CStringsAreUTF8() )
+ for( string::const_iterator i = ss.begin(); i != ss.end(); ++i )
+ uassert( 10213 , "non ascii character detected", (unsigned char)(*i) <= 127 );
+ return ss;
+ }
+
+ string toString( jsval v ) {
+ return toString( JS_ValueToString( _context , v ) );
+ }
+
+ // NOTE No validation of passed in object
+ long long toNumberLongUnsafe( JSObject *o ) {
+ boost::uint64_t val;
+ if ( hasProperty( o, "top" ) ) {
+ val =
+ ( (boost::uint64_t)(boost::uint32_t)getNumber( o , "top" ) << 32 ) +
+ ( boost::uint32_t)( getNumber( o , "bottom" ) );
+ }
+ else {
+ val = (boost::uint64_t)(boost::int64_t) getNumber( o, "floatApprox" );
+ }
+ return val;
+ }
+
+ int toNumberInt( JSObject *o ) {
+ return (boost::uint32_t)(boost::int32_t) getNumber( o, "floatApprox" );
+ }
+
+ double toNumber( jsval v ) {
+ double d;
+ uassert( 10214 , "not a number" , JS_ValueToNumber( _context , v , &d ) );
+ return d;
+ }
+
+ bool toBoolean( jsval v ) {
+ JSBool b;
+ assert( JS_ValueToBoolean( _context, v , &b ) );
+ return b;
+ }
+
+ OID toOID( jsval v ) {
+ JSContext * cx = _context;
+ assert( JSVAL_IS_OID( v ) );
+
+ JSObject * o = JSVAL_TO_OBJECT( v );
+ OID oid;
+ oid.init( getString( o , "str" ) );
+ return oid;
+ }
+
+ BSONObj toObject( JSObject * o , const TraverseStack& stack=TraverseStack() ) {
+ if ( ! o )
+ return BSONObj();
+
+ if ( JS_InstanceOf( _context , o , &bson_ro_class , 0 ) ) {
+ BSONHolder * holder = GETHOLDER( _context , o );
+ assert( holder );
+ return holder->_obj.getOwned();
+ }
+
+ BSONObj orig;
+ if ( JS_InstanceOf( _context , o , &bson_class , 0 ) ) {
+ BSONHolder * holder = GETHOLDER(_context,o);
+ assert( holder );
+ if ( ! holder->_modified ) {
+ return holder->_obj;
+ }
+ orig = holder->_obj;
+ }
+
+ BSONObjBuilder b;
+
+ if ( ! appendSpecialDBObject( this , b , "value" , OBJECT_TO_JSVAL( o ) , o ) ) {
+
+ if ( stack.isTop() ) {
+ jsval theid = getProperty( o , "_id" );
+ if ( ! JSVAL_IS_VOID( theid ) ) {
+ append( b , "_id" , theid , EOO , stack.dive( o ) );
+ }
+ }
+
+ JSIdArray * properties = JS_Enumerate( _context , o );
+ assert( properties );
+
+ for ( jsint i=0; i<properties->length; i++ ) {
+ jsid id = properties->vector[i];
+ jsval nameval;
+ assert( JS_IdToValue( _context ,id , &nameval ) );
+ string name = toString( nameval );
+ if ( stack.isTop() && name == "_id" )
+ continue;
+
+ append( b , name , getProperty( o , name.c_str() ) , orig[name].type() , stack.dive( o ) );
+ }
+
+ JS_DestroyIdArray( _context , properties );
+ }
+
+ return b.obj();
+ }
+
+ BSONObj toObject( jsval v ) {
+ if ( JSVAL_IS_NULL( v ) ||
+ JSVAL_IS_VOID( v ) )
+ return BSONObj();
+
+ uassert( 10215 , "not an object" , JSVAL_IS_OBJECT( v ) );
+ return toObject( JSVAL_TO_OBJECT( v ) );
+ }
+
+ string getFunctionCode( JSFunction * func ) {
+ return toString( JS_DecompileFunction( _context , func , 0 ) );
+ }
+
+ string getFunctionCode( jsval v ) {
+ uassert( 10216 , "not a function" , JS_TypeOfValue( _context , v ) == JSTYPE_FUNCTION );
+ return getFunctionCode( JS_ValueToFunction( _context , v ) );
+ }
+
+ void appendRegex( BSONObjBuilder& b , const string& name , string s ) {
+ assert( s[0] == '/' );
+ s = s.substr(1);
+ string::size_type end = s.rfind( '/' );
+ b.appendRegex( name , s.substr( 0 , end ) , s.substr( end + 1 ) );
+ }
+
+ void append( BSONObjBuilder& b , string name , jsval val , BSONType oldType = EOO , const TraverseStack& stack=TraverseStack() ) {
+ //cout << "name: " << name << "\t" << typeString( val ) << " oldType: " << oldType << endl;
+ switch ( JS_TypeOfValue( _context , val ) ) {
+
+ case JSTYPE_VOID: b.appendUndefined( name ); break;
+ case JSTYPE_NULL: b.appendNull( name ); break;
+
+ case JSTYPE_NUMBER: {
+ double d = toNumber( val );
+ if ( oldType == NumberInt && ((int)d) == d )
+ b.append( name , (int)d );
+ else
+ b.append( name , d );
+ break;
+ }
+ case JSTYPE_STRING: b.append( name , toString( val ) ); break;
+ case JSTYPE_BOOLEAN: b.appendBool( name , toBoolean( val ) ); break;
+
+ case JSTYPE_OBJECT: {
+ JSObject * o = JSVAL_TO_OBJECT( val );
+ if ( ! o || o == JSVAL_NULL ) {
+ b.appendNull( name );
+ }
+ else if ( ! appendSpecialDBObject( this , b , name , val , o ) ) {
+ BSONObj sub = toObject( o , stack );
+ if ( JS_IsArrayObject( _context , o ) ) {
+ b.appendArray( name , sub );
+ }
+ else {
+ b.append( name , sub );
+ }
+ }
+ break;
+ }
+
+ case JSTYPE_FUNCTION: {
+ string s = toString(val);
+ if ( s[0] == '/' ) {
+ appendRegex( b , name , s );
+ }
+ else {
+ b.appendCode( name , getFunctionCode( val ) );
+ }
+ break;
+ }
+
+ default: uassert( 10217 , (string)"can't append field. name:" + name + " type: " + typeString( val ) , 0 );
+ }
+ }
+
+ // ---------- to spider monkey ---------
+
+ bool hasFunctionIdentifier( const string& code ) {
+ if ( code.size() < 9 || code.find( "function" ) != 0 )
+ return false;
+
+ return code[8] == ' ' || code[8] == '(';
+ }
+
+ bool isSimpleStatement( const string& code ) {
+ if ( hasJSReturn( code ) )
+ return false;
+
+ if ( code.find( ';' ) != string::npos &&
+ code.find( ';' ) != code.rfind( ';' ) )
+ return false;
+
+ if ( code.find( '\n') != string::npos )
+ return false;
+
+ if ( code.find( "for(" ) != string::npos ||
+ code.find( "for (" ) != string::npos ||
+ code.find( "while (" ) != string::npos ||
+ code.find( "while(" ) != string::npos )
+ return false;
+
+ return true;
+ }
+
+ void addRoot( JSFunction * f , const char * name );
+
+ JSFunction * compileFunction( const char * code, JSObject * assoc = 0 ) {
+ const char * gcName = "unknown";
+ JSFunction * f = _compileFunction( code , assoc , gcName );
+ //addRoot( f , gcName );
+ return f;
+ }
+
+ JSFunction * _compileFunction( const char * raw , JSObject * assoc , const char *& gcName ) {
+ if ( ! assoc )
+ assoc = JS_GetGlobalObject( _context );
+
+ raw = jsSkipWhiteSpace( raw );
+
+ //cout << "RAW\n---\n" << raw << "\n---" << endl;
+
+ static int fnum = 1;
+ stringstream fname;
+ fname << "__cf__" << fnum++ << "__";
+
+ if ( ! hasFunctionIdentifier( raw ) ) {
+ string s = raw;
+ if ( isSimpleStatement( s ) ) {
+ s = "return " + s;
+ }
+ gcName = "cf anon";
+ fname << "anon";
+ return JS_CompileFunction( _context , assoc , fname.str().c_str() , 0 , 0 , s.c_str() , s.size() , "nofile_a" , 0 );
+ }
+
+ string code = raw;
+
+ size_t start = code.find( '(' );
+ assert( start != string::npos );
+
+ string fbase;
+ if ( start > 9 ) {
+ fbase = trim( code.substr( 9 , start - 9 ) );
+ }
+ if ( fbase.length() == 0 ) {
+ fbase = "anonymous_function";
+ }
+ fname << "f__" << fbase;
+
+ code = code.substr( start + 1 );
+ size_t end = code.find( ')' );
+ assert( end != string::npos );
+
+ string paramString = trim( code.substr( 0 , end ) );
+ code = code.substr( end + 1 );
+
+ vector<string> params;
+ while ( paramString.size() ) {
+ size_t c = paramString.find( ',' );
+ if ( c == string::npos ) {
+ params.push_back( paramString );
+ break;
+ }
+ params.push_back( trim( paramString.substr( 0 , c ) ) );
+ paramString = trim( paramString.substr( c + 1 ) );
+ paramString = trim( paramString );
+ }
+
+ boost::scoped_array<const char *> paramArray (new const char*[params.size()]);
+ for ( size_t i=0; i<params.size(); i++ )
+ paramArray[i] = params[i].c_str();
+
+ // avoid munging previously munged name (kludge; switching to v8 fixes underlying issue)
+ if ( fbase.find("__cf__") != 0 && fbase.find("__f__") == string::npos ) {
+ fbase = fname.str();
+ }
+
+ JSFunction * func = JS_CompileFunction( _context , assoc , fbase.c_str() , params.size() , paramArray.get() , code.c_str() , code.size() , "nofile_b" , 0 );
+
+ if ( ! func ) {
+ log() << "compile failed for: " << raw << endl;
+ return 0;
+ }
+ gcName = "cf normal";
+ return func;
+ }
+
+ jsval toval( double d ) {
+ jsval val;
+ assert( JS_NewNumberValue( _context, d , &val ) );
+ return val;
+ }
+
+ jsval toval( const char * c ) {
+ JSString * s = JS_NewStringCopyZ( _context , c );
+ if ( s )
+ return STRING_TO_JSVAL( s );
+
+ // possibly unicode, try manual
+
+ size_t len = strlen( c );
+ size_t dstlen = len * 4;
+ jschar * dst = (jschar*)malloc( dstlen );
+
+ JSBool res = JS_DecodeBytes( _context , c , len , dst, &dstlen );
+ if ( res ) {
+ s = JS_NewUCStringCopyN( _context , dst , dstlen );
+ }
+
+ free( dst );
+
+ if ( ! res ) {
+ tlog() << "decode failed. probably invalid utf-8 string [" << c << "]" << endl;
+ jsval v;
+ if ( JS_GetPendingException( _context , &v ) )
+ tlog() << "\t why: " << toString( v ) << endl;
+ throw InvalidUTF8Exception();
+ }
+
+ CHECKJSALLOC( s );
+ return STRING_TO_JSVAL( s );
+ }
+
+ JSObject * toJSObject( const BSONObj * obj , bool readOnly=false ) {
+ static string ref = "$ref";
+ if ( ref == obj->firstElementFieldName() ) {
+ JSObject * o = JS_NewObject( _context , &dbref_class , NULL, NULL);
+ CHECKNEWOBJECT(o,_context,"toJSObject1");
+ assert( JS_SetPrivate( _context , o , (void*)(new BSONHolder( obj->getOwned() ) ) ) );
+ return o;
+ }
+ JSObject * o = JS_NewObject( _context , readOnly ? &bson_ro_class : &bson_class , NULL, NULL);
+ CHECKNEWOBJECT(o,_context,"toJSObject2");
+ assert( JS_SetPrivate( _context , o , (void*)(new BSONHolder( obj->getOwned() ) ) ) );
+ return o;
+ }
+
+ jsval toval( const BSONObj* obj , bool readOnly=false ) {
+ JSObject * o = toJSObject( obj , readOnly );
+ return OBJECT_TO_JSVAL( o );
+ }
+
+ void makeLongObj( long long n, JSObject * o ) {
+ boost::uint64_t val = (boost::uint64_t)n;
+ CHECKNEWOBJECT(o,_context,"NumberLong1");
+ double floatApprox = (double)(boost::int64_t)val;
+ setProperty( o , "floatApprox" , toval( floatApprox ) );
+ if ( (boost::int64_t)val != (boost::int64_t)floatApprox ) {
+ // using 2 doubles here instead of a single double because certain double
+ // bit patterns represent undefined values and sm might trash them
+ setProperty( o , "top" , toval( (double)(boost::uint32_t)( val >> 32 ) ) );
+ setProperty( o , "bottom" , toval( (double)(boost::uint32_t)( val & 0x00000000ffffffff ) ) );
+ }
+ }
+
+ jsval toval( long long n ) {
+ JSObject * o = JS_NewObject( _context , &numberlong_class , 0 , 0 );
+ makeLongObj( n, o );
+ return OBJECT_TO_JSVAL( o );
+ }
+
+ void makeIntObj( int n, JSObject * o ) {
+ boost::uint32_t val = (boost::uint32_t)n;
+ CHECKNEWOBJECT(o,_context,"NumberInt1");
+ double floatApprox = (double)(boost::int32_t)val;
+ setProperty( o , "floatApprox" , toval( floatApprox ) );
+ }
+
+ jsval toval( int n ) {
+ JSObject * o = JS_NewObject( _context , &numberint_class , 0 , 0 );
+ makeIntObj( n, o );
+ return OBJECT_TO_JSVAL( o );
+ }
+
+ jsval toval( const BSONElement& e ) {
+
+ switch( e.type() ) {
+ case EOO:
+ case jstNULL:
+ case Undefined:
+ return JSVAL_NULL;
+ case NumberDouble:
+ case NumberInt:
+ return toval( e.number() );
+// case NumberInt:
+// return toval( e.numberInt() );
+ case Symbol: // TODO: should we make a special class for this
+ case String:
+ return toval( e.valuestr() );
+ case Bool:
+ return e.boolean() ? JSVAL_TRUE : JSVAL_FALSE;
+ case Object: {
+ BSONObj embed = e.embeddedObject().getOwned();
+ return toval( &embed );
+ }
+ case Array: {
+
+ BSONObj embed = e.embeddedObject().getOwned();
+
+ if ( embed.isEmpty() ) {
+ return OBJECT_TO_JSVAL( JS_NewArrayObject( _context , 0 , 0 ) );
+ }
+
+ JSObject * array = JS_NewArrayObject( _context , 1 , 0 );
+ CHECKJSALLOC( array );
+
+ jsval myarray = OBJECT_TO_JSVAL( array );
+
+ BSONObjIterator i( embed );
+ while ( i.more() ){
+ const BSONElement& e = i.next();
+ jsval v = toval( e );
+ assert( JS_SetElement( _context , array , atoi(e.fieldName()) , &v ) );
+ }
+
+ return myarray;
+ }
+ case jstOID: {
+ OID oid = e.__oid();
+ JSObject * o = JS_NewObject( _context , &object_id_class , 0 , 0 );
+ CHECKNEWOBJECT(o,_context,"jstOID");
+ setProperty( o , "str" , toval( oid.str().c_str() ) );
+ return OBJECT_TO_JSVAL( o );
+ }
+ case RegEx: {
+ const char * flags = e.regexFlags();
+ uintN flagNumber = 0;
+ while ( *flags ) {
+ switch ( *flags ) {
+ case 'g': flagNumber |= JSREG_GLOB; break;
+ case 'i': flagNumber |= JSREG_FOLD; break;
+ case 'm': flagNumber |= JSREG_MULTILINE; break;
+ //case 'y': flagNumber |= JSREG_STICKY; break;
+
+ default:
+ log() << "warning: unknown regex flag:" << *flags << endl;
+ }
+ flags++;
+ }
+
+ JSObject * r = JS_NewRegExpObject( _context , (char*)e.regex() , strlen( e.regex() ) , flagNumber );
+ assert( r );
+ return OBJECT_TO_JSVAL( r );
+ }
+ case Code: {
+ JSFunction * func = compileFunction( e.valuestr() );
+ if ( func )
+ return OBJECT_TO_JSVAL( JS_GetFunctionObject( func ) );
+ return JSVAL_NULL;
+ }
+ case CodeWScope: {
+ JSFunction * func = compileFunction( e.codeWScopeCode() );
+ if ( !func )
+ return JSVAL_NULL;
+
+ BSONObj extraScope = e.codeWScopeObject();
+ if ( ! extraScope.isEmpty() ) {
+ log() << "warning: CodeWScope doesn't transfer to db.eval" << endl;
+ }
+
+ return OBJECT_TO_JSVAL( JS_GetFunctionObject( func ) );
+ }
+ case Date:
+ return OBJECT_TO_JSVAL( js_NewDateObjectMsec( _context , (jsdouble) ((long long)e.date().millis) ) );
+
+ case MinKey:
+ return OBJECT_TO_JSVAL( JS_NewObject( _context , &minkey_class , 0 , 0 ) );
+
+ case MaxKey:
+ return OBJECT_TO_JSVAL( JS_NewObject( _context , &maxkey_class , 0 , 0 ) );
+
+ case Timestamp: {
+ JSObject * o = JS_NewObject( _context , &timestamp_class , 0 , 0 );
+ CHECKNEWOBJECT(o,_context,"Timestamp1");
+ setProperty( o , "t" , toval( (double)(e.timestampTime()) ) );
+ setProperty( o , "i" , toval( (double)(e.timestampInc()) ) );
+ return OBJECT_TO_JSVAL( o );
+ }
+ case NumberLong: {
+ return toval( e.numberLong() );
+ }
+ case DBRef: {
+ JSObject * o = JS_NewObject( _context , &dbpointer_class , 0 , 0 );
+ CHECKNEWOBJECT(o,_context,"DBRef1");
+ setProperty( o , "ns" , toval( e.dbrefNS() ) );
+
+ JSObject * oid = JS_NewObject( _context , &object_id_class , 0 , 0 );
+ CHECKNEWOBJECT(oid,_context,"DBRef2");
+ setProperty( oid , "str" , toval( e.dbrefOID().str().c_str() ) );
+
+ setProperty( o , "id" , OBJECT_TO_JSVAL( oid ) );
+ return OBJECT_TO_JSVAL( o );
+ }
+ case BinData: {
+ JSObject * o = JS_NewObject( _context , &bindata_class , 0 , 0 );
+ CHECKNEWOBJECT(o,_context,"Bindata_BinData1");
+ int len;
+ const char * data = e.binData( len );
+ assert( data );
+ assert( JS_SetPrivate( _context , o , new BinDataHolder( data , len ) ) );
+
+ setProperty( o , "len" , toval( (double)len ) );
+ setProperty( o , "type" , toval( (double)e.binDataType() ) );
+ return OBJECT_TO_JSVAL( o );
+ }
+ }
+
+ log() << "toval: unknown type: " << (int) e.type() << endl;
+ uassert( 10218 , "not done: toval" , 0 );
+ return 0;
+ }
+
+ // ------- object helpers ------
+
+ JSObject * getJSObject( JSObject * o , const char * name ) {
+ jsval v;
+ assert( JS_GetProperty( _context , o , name , &v ) );
+ return JSVAL_TO_OBJECT( v );
+ }
+
+ JSObject * getGlobalObject( const char * name ) {
+ return getJSObject( JS_GetGlobalObject( _context ) , name );
+ }
+
+ JSObject * getGlobalPrototype( const char * name ) {
+ return getJSObject( getGlobalObject( name ) , "prototype" );
+ }
+
+ bool hasProperty( JSObject * o , const char * name ) {
+ JSBool res;
+ assert( JS_HasProperty( _context , o , name , & res ) );
+ return res;
+ }
+
+ jsval getProperty( JSObject * o , const char * field ) {
+ uassert( 10219 , "object passed to getPropery is null" , o );
+ jsval v;
+ assert( JS_GetProperty( _context , o , field , &v ) );
+ return v;
+ }
+
+ void setProperty( JSObject * o , const char * field , jsval v ) {
+ assert( JS_SetProperty( _context , o , field , &v ) );
+ }
+
+ string typeString( jsval v ) {
+ JSType t = JS_TypeOfValue( _context , v );
+ return JS_GetTypeName( _context , t );
+ }
+
+ bool getBoolean( JSObject * o , const char * field ) {
+ return toBoolean( getProperty( o , field ) );
+ }
+
+ double getNumber( JSObject * o , const char * field ) {
+ return toNumber( getProperty( o , field ) );
+ }
+
+ string getString( JSObject * o , const char * field ) {
+ return toString( getProperty( o , field ) );
+ }
+
+ JSClass * getClass( JSObject * o , const char * field ) {
+ jsval v;
+ assert( JS_GetProperty( _context , o , field , &v ) );
+ if ( ! JSVAL_IS_OBJECT( v ) )
+ return 0;
+ return JS_GET_CLASS( _context , JSVAL_TO_OBJECT( v ) );
+ }
+
+ JSContext * _context;
+
+
+ };
+
+
+ void bson_finalize( JSContext * cx , JSObject * obj ) {
+ BSONHolder * o = GETHOLDER( cx , obj );
+ if ( o ) {
+ delete o;
+ assert( JS_SetPrivate( cx , obj , 0 ) );
+ }
+ }
+
+ JSBool bson_enumerate( JSContext *cx, JSObject *obj, JSIterateOp enum_op, jsval *statep, jsid *idp ) {
+
+ BSONHolder * o = GETHOLDER( cx , obj );
+
+ if ( enum_op == JSENUMERATE_INIT ) {
+ if ( o ) {
+ BSONFieldIterator * it = o->it();
+ *statep = PRIVATE_TO_JSVAL( it );
+ }
+ else {
+ *statep = 0;
+ }
+ if ( idp )
+ *idp = JSVAL_ZERO;
+ return JS_TRUE;
+ }
+
+ BSONFieldIterator * it = (BSONFieldIterator*)JSVAL_TO_PRIVATE( *statep );
+ if ( ! it ) {
+ *statep = 0;
+ return JS_TRUE;
+ }
+
+ if ( enum_op == JSENUMERATE_NEXT ) {
+ if ( it->more() ) {
+ string name = it->next();
+ Convertor c(cx);
+ assert( JS_ValueToId( cx , c.toval( name.c_str() ) , idp ) );
+ }
+ else {
+ delete it;
+ *statep = 0;
+ }
+ return JS_TRUE;
+ }
+
+ if ( enum_op == JSENUMERATE_DESTROY ) {
+ if ( it )
+ delete it;
+ return JS_TRUE;
+ }
+
+ uassert( 10220 , "don't know what to do with this op" , 0 );
+ return JS_FALSE;
+ }
+
+ JSBool noaccess( JSContext *cx, JSObject *obj, jsval idval, jsval *vp) {
+ BSONHolder * holder = GETHOLDER( cx , obj );
+ if ( ! holder ) {
+ // in init code still
+ return JS_TRUE;
+ }
+ if ( holder->_inResolve )
+ return JS_TRUE;
+ JS_ReportError( cx , "doing write op on read only operation" );
+ return JS_FALSE;
+ }
+
+ JSClass bson_ro_class = {
+ "bson_ro_object" , JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE | JSCLASS_NEW_ENUMERATE ,
+ noaccess, noaccess, JS_PropertyStub, noaccess,
+ (JSEnumerateOp)bson_enumerate, (JSResolveOp)(&resolveBSONField) , JS_ConvertStub, bson_finalize ,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ JSBool bson_cons( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ cerr << "bson_cons : shouldn't be here!" << endl;
+ JS_ReportError( cx , "can't construct bson object" );
+ return JS_FALSE;
+ }
+
+ JSFunctionSpec bson_functions[] = {
+ { 0 }
+ };
+
+ JSBool bson_add_prop( JSContext *cx, JSObject *obj, jsval idval, jsval *vp) {
+ BSONHolder * holder = GETHOLDER( cx , obj );
+ if ( ! holder ) {
+ // static init
+ return JS_TRUE;
+ }
+ if ( ! holder->_inResolve ) {
+ Convertor c(cx);
+ string name = c.toString( idval );
+ if ( holder->_obj[name].eoo() ) {
+ holder->_extra.push_back( name );
+ }
+ holder->_modified = true;
+ }
+ return JS_TRUE;
+ }
+
+
+ JSBool mark_modified( JSContext *cx, JSObject *obj, jsval idval, jsval *vp) {
+ Convertor c(cx);
+ BSONHolder * holder = GETHOLDER( cx , obj );
+ if ( !holder ) // needed when we're messing with DBRef.prototype
+ return JS_TRUE;
+ if ( holder->_inResolve )
+ return JS_TRUE;
+ holder->_modified = true;
+ holder->_removed.erase( c.toString( idval ) );
+ return JS_TRUE;
+ }
+
+ JSBool mark_modified_remove( JSContext *cx, JSObject *obj, jsval idval, jsval *vp) {
+ Convertor c(cx);
+ BSONHolder * holder = GETHOLDER( cx , obj );
+ if ( holder->_inResolve )
+ return JS_TRUE;
+ holder->_modified = true;
+ holder->_removed.insert( c.toString( idval ) );
+ return JS_TRUE;
+ }
+
+ JSClass bson_class = {
+ "bson_object" , JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE | JSCLASS_NEW_ENUMERATE ,
+ bson_add_prop, mark_modified_remove, JS_PropertyStub, mark_modified,
+ (JSEnumerateOp)bson_enumerate, (JSResolveOp)(&resolveBSONField) , JS_ConvertStub, bson_finalize ,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ static JSClass global_class = {
+ "global", JSCLASS_GLOBAL_FLAGS,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ // --- global helpers ---
+
+ JSBool hexToBinData(JSContext * cx, jsval *rval, int subtype, string s) {
+ JSObject * o = JS_NewObject( cx , &bindata_class , 0 , 0 );
+ CHECKNEWOBJECT(o,_context,"Bindata_BinData1");
+ int len = s.size() / 2;
+ char * data = new char[len];
+ char *p = data;
+ const char *src = s.c_str();
+ for( size_t i = 0; i+1 < s.size(); i += 2 ) {
+ *p++ = fromHex(src + i);
+ }
+ assert( JS_SetPrivate( cx , o , new BinDataHolder( data , len ) ) );
+ Convertor c(cx);
+ c.setProperty( o, "len", c.toval((double)len) );
+ c.setProperty( o, "type", c.toval((double)subtype) );
+ *rval = OBJECT_TO_JSVAL( o );
+ delete data;
+ return JS_TRUE;
+ }
+
+ JSBool _HexData( JSContext * cx , JSObject * obj , uintN argc, jsval *argv, jsval *rval ) {
+ Convertor c( cx );
+ if ( argc != 2 ) {
+ JS_ReportError( cx , "HexData needs 2 arguments -- HexData(subtype,hexstring)" );
+ return JS_FALSE;
+ }
+ int type = (int)c.toNumber( argv[ 0 ] );
+ if ( type == 2 ) {
+ JS_ReportError( cx , "BinData subtype 2 is deprecated" );
+ return JS_FALSE;
+ }
+ string s = c.toString(argv[1]);
+ return hexToBinData(cx, rval, type, s);
+ }
+
+ JSBool _UUID( JSContext * cx , JSObject * obj , uintN argc, jsval *argv, jsval *rval ) {
+ Convertor c( cx );
+ if ( argc != 1 ) {
+ JS_ReportError( cx , "UUID needs argument -- UUID(hexstring)" );
+ return JS_FALSE;
+ }
+ string s = c.toString(argv[0]);
+ if( s.size() != 32 ) {
+ JS_ReportError( cx , "bad UUID hex string len" );
+ return JS_FALSE;
+ }
+ return hexToBinData(cx, rval, 3, s);
+ }
+
+ JSBool _MD5( JSContext * cx , JSObject * obj , uintN argc, jsval *argv, jsval *rval ) {
+ Convertor c( cx );
+ if ( argc != 1 ) {
+ JS_ReportError( cx , "MD5 needs argument -- MD5(hexstring)" );
+ return JS_FALSE;
+ }
+ string s = c.toString(argv[0]);
+ if( s.size() != 32 ) {
+ JS_ReportError( cx , "bad MD5 hex string len" );
+ return JS_FALSE;
+ }
+ return hexToBinData(cx, rval, 5, s);
+ }
+
+ JSBool native_print( JSContext * cx , JSObject * obj , uintN argc, jsval *argv, jsval *rval ) {
+ stringstream ss;
+ Convertor c( cx );
+ for ( uintN i=0; i<argc; i++ ) {
+ if ( i > 0 )
+ ss << " ";
+ ss << c.toString( argv[i] );
+ }
+ ss << "\n";
+ Logstream::logLockless( ss.str() );
+ return JS_TRUE;
+ }
+
+ JSBool native_helper( JSContext *cx , JSObject *obj , uintN argc, jsval *argv , jsval *rval ) {
+ Convertor c(cx);
+
+ NativeFunction func = (NativeFunction)((long long)c.getNumber( obj , "x" ) );
+ void* data = (void*)((long long)c.getNumber( obj , "y" ) );
+ assert( func );
+
+ BSONObj a;
+ if ( argc > 0 ) {
+ BSONObjBuilder args;
+ for ( uintN i=0; i<argc; i++ ) {
+ c.append( args , args.numStr( i ) , argv[i] );
+ }
+
+ a = args.obj();
+ }
+
+ BSONObj out;
+ try {
+ out = func( a, data );
+ }
+ catch ( std::exception& e ) {
+ JS_ReportError( cx , e.what() );
+ return JS_FALSE;
+ }
+
+ if ( out.isEmpty() ) {
+ *rval = JSVAL_VOID;
+ }
+ else {
+ *rval = c.toval( out.firstElement() );
+ }
+
+ return JS_TRUE;
+ }
+
+ JSBool native_load( JSContext *cx , JSObject *obj , uintN argc, jsval *argv , jsval *rval );
+
+ JSBool native_gc( JSContext *cx , JSObject *obj , uintN argc, jsval *argv , jsval *rval ) {
+ JS_GC( cx );
+ return JS_TRUE;
+ }
+
+ JSFunctionSpec globalHelpers[] = {
+ { "print" , &native_print , 0 , 0 , 0 } ,
+ { "nativeHelper" , &native_helper , 1 , 0 , 0 } ,
+ { "load" , &native_load , 1 , 0 , 0 } ,
+ { "gc" , &native_gc , 1 , 0 , 0 } ,
+ { "UUID", &_UUID, 0, 0, 0 } ,
+ { "MD5", &_MD5, 0, 0, 0 } ,
+ { "HexData", &_HexData, 0, 0, 0 } ,
+ { 0 , 0 , 0 , 0 , 0 }
+ };
+
+ // ----END global helpers ----
+
+ // Object helpers
+
+ JSBool bson_get_size(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ if ( argc != 1 || !JSVAL_IS_OBJECT( argv[ 0 ] ) ) {
+ JS_ReportError( cx , "bsonsize requires one valid object" );
+ return JS_FALSE;
+ }
+
+ Convertor c(cx);
+
+ if ( argv[0] == JSVAL_VOID || argv[0] == JSVAL_NULL ) {
+ *rval = c.toval( 0.0 );
+ return JS_TRUE;
+ }
+
+ JSObject * o = JSVAL_TO_OBJECT( argv[0] );
+
+ double size = 0;
+
+ if ( JS_InstanceOf( cx , o , &bson_ro_class , 0 ) ||
+ JS_InstanceOf( cx , o , &bson_class , 0 ) ) {
+ BSONHolder * h = GETHOLDER( cx , o );
+ if ( h ) {
+ size = h->_obj.objsize();
+ }
+ }
+ else {
+ BSONObj temp = c.toObject( o );
+ size = temp.objsize();
+ }
+
+ *rval = c.toval( size );
+ return JS_TRUE;
+ }
+
+ JSFunctionSpec objectHelpers[] = {
+ { "bsonsize" , &bson_get_size , 1 , 0 , 0 } ,
+ { 0 , 0 , 0 , 0 , 0 }
+ };
+
+ // end Object helpers
+
+ JSBool resolveBSONField( JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp ) {
+ assert( JS_EnterLocalRootScope( cx ) );
+ Convertor c( cx );
+
+ BSONHolder * holder = GETHOLDER( cx , obj );
+ if ( ! holder ) {
+ // static init
+ *objp = 0;
+ JS_LeaveLocalRootScope( cx );
+ return JS_TRUE;
+ }
+ holder->check();
+
+ string s = c.toString( id );
+
+ BSONElement e = holder->_obj[ s.c_str() ];
+
+ if ( e.type() == EOO || holder->_removed.count( s ) ) {
+ *objp = 0;
+ JS_LeaveLocalRootScope( cx );
+ return JS_TRUE;
+ }
+
+ jsval val;
+ try {
+ val = c.toval( e );
+ }
+ catch ( InvalidUTF8Exception& ) {
+ JS_LeaveLocalRootScope( cx );
+ JS_ReportError( cx , "invalid utf8" );
+ return JS_FALSE;
+ }
+
+ assert( ! holder->_inResolve );
+ holder->_inResolve = true;
+ assert( JS_SetProperty( cx , obj , s.c_str() , &val ) );
+ holder->_inResolve = false;
+
+ if ( val != JSVAL_NULL && val != JSVAL_VOID && JSVAL_IS_OBJECT( val ) ) {
+ // TODO: this is a hack to get around sub objects being modified
+ // basically right now whenever a sub object is read we mark whole obj as possibly modified
+ JSObject * oo = JSVAL_TO_OBJECT( val );
+ if ( JS_InstanceOf( cx , oo , &bson_class , 0 ) ||
+ JS_IsArrayObject( cx , oo ) ) {
+ holder->_modified = true;
+ }
+ }
+
+ *objp = obj;
+ JS_LeaveLocalRootScope( cx );
+ return JS_TRUE;
+ }
+
+
+ class SMScope;
+
+ class SMEngine : public ScriptEngine {
+ public:
+
+ SMEngine() {
+#ifdef SM18
+ JS_SetCStringsAreUTF8();
+#endif
+
+ _runtime = JS_NewRuntime(64L * 1024L * 1024L);
+ uassert( 10221 , "JS_NewRuntime failed" , _runtime );
+
+ if ( ! utf8Ok() ) {
+ log() << "*** warning: spider monkey build without utf8 support. consider rebuilding with utf8 support" << endl;
+ }
+
+ int x = 0;
+ assert( x = 1 );
+ uassert( 10222 , "assert not being executed" , x == 1 );
+ }
+
+ ~SMEngine() {
+ JS_DestroyRuntime( _runtime );
+ JS_ShutDown();
+ }
+
+ Scope * createScope();
+
+ void runTest();
+
+ virtual bool utf8Ok() const { return JS_CStringsAreUTF8(); }
+
+#ifdef XULRUNNER
+ JSClass * _dateClass;
+ JSClass * _regexClass;
+#endif
+
+
+ private:
+ JSRuntime * _runtime;
+ friend class SMScope;
+ };
+
+ SMEngine * globalSMEngine;
+
+
+ void ScriptEngine::setup() {
+ globalSMEngine = new SMEngine();
+ globalScriptEngine = globalSMEngine;
+ }
+
+
+ // ------ scope ------
+
+
+ JSBool no_gc(JSContext *cx, JSGCStatus status) {
+ return JS_FALSE;
+ }
+
+ JSBool yes_gc(JSContext *cx, JSGCStatus status) {
+ return JS_TRUE;
+ }
+
+ class SMScope : public Scope {
+ public:
+ SMScope() : _this( 0 ) , _externalSetup( false ) , _localConnect( false ) {
+ smlock;
+ _context = JS_NewContext( globalSMEngine->_runtime , 8192 );
+ _convertor = new Convertor( _context );
+ massert( 10431 , "JS_NewContext failed" , _context );
+
+ JS_SetOptions( _context , JSOPTION_VAROBJFIX);
+ //JS_SetVersion( _context , JSVERSION_LATEST); TODO
+ JS_SetErrorReporter( _context , errorReporter );
+
+ _global = JS_NewObject( _context , &global_class, NULL, NULL);
+ massert( 10432 , "JS_NewObject failed for global" , _global );
+ JS_SetGlobalObject( _context , _global );
+ massert( 10433 , "js init failed" , JS_InitStandardClasses( _context , _global ) );
+
+ JS_SetOptions( _context , JS_GetOptions( _context ) | JSOPTION_VAROBJFIX );
+
+ JS_DefineFunctions( _context , _global , globalHelpers );
+
+ JS_DefineFunctions( _context , _convertor->getGlobalObject( "Object" ), objectHelpers );
+
+ //JS_SetGCCallback( _context , no_gc ); // this is useful for seeing if something is a gc problem
+
+ _postCreateHacks();
+ }
+
+ ~SMScope() {
+ smlock;
+ uassert( 10223 , "deleted SMScope twice?" , _convertor );
+
+ for ( list<void*>::iterator i=_roots.begin(); i != _roots.end(); i++ ) {
+ JS_RemoveRoot( _context , *i );
+ }
+ _roots.clear();
+
+ if ( _this ) {
+ JS_RemoveRoot( _context , &_this );
+ _this = 0;
+ }
+
+ if ( _convertor ) {
+ delete _convertor;
+ _convertor = 0;
+ }
+
+ if ( _context ) {
+ // This is expected to reclaim _global as well.
+ JS_DestroyContext( _context );
+ _context = 0;
+ }
+
+ }
+
+ void reset() {
+ smlock;
+ assert( _convertor );
+ return;
+ if ( _this ) {
+ JS_RemoveRoot( _context , &_this );
+ _this = 0;
+ }
+ currentScope.reset( this );
+ _error = "";
+ }
+
+ void addRoot( void * root , const char * name ) {
+ JS_AddNamedRoot( _context , root , name );
+ _roots.push_back( root );
+ }
+
+ void init( const BSONObj * data ) {
+ smlock;
+ if ( ! data )
+ return;
+
+ BSONObjIterator i( *data );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ _convertor->setProperty( _global , e.fieldName() , _convertor->toval( e ) );
+ _initFieldNames.insert( e.fieldName() );
+ }
+
+ }
+
+ bool hasOutOfMemoryException() {
+ string err = getError();
+ return err.find("out of memory") != string::npos;
+ }
+
+ void externalSetup() {
+ smlock;
+ uassert( 10224 , "already local connected" , ! _localConnect );
+ if ( _externalSetup )
+ return;
+ initMongoJS( this , _context , _global , false );
+ _externalSetup = true;
+ }
+
+ void localConnect( const char * dbName ) {
+ {
+ smlock;
+ uassert( 10225 , "already setup for external db" , ! _externalSetup );
+ if ( _localConnect ) {
+ uassert( 10226 , "connected to different db" , _localDBName == dbName );
+ return;
+ }
+
+ initMongoJS( this , _context , _global , true );
+
+ exec( "_mongo = new Mongo();" );
+ exec( ((string)"db = _mongo.getDB( \"" + dbName + "\" ); ").c_str() );
+
+ _localConnect = true;
+ _localDBName = dbName;
+ }
+ loadStored();
+ }
+
+ // ----- getters ------
+ double getNumber( const char *field ) {
+ smlock;
+ jsval val;
+ assert( JS_GetProperty( _context , _global , field , &val ) );
+ return _convertor->toNumber( val );
+ }
+
+ string getString( const char *field ) {
+ smlock;
+ jsval val;
+ assert( JS_GetProperty( _context , _global , field , &val ) );
+ JSString * s = JS_ValueToString( _context , val );
+ return _convertor->toString( s );
+ }
+
+ bool getBoolean( const char *field ) {
+ smlock;
+ return _convertor->getBoolean( _global , field );
+ }
+
+ BSONObj getObject( const char *field ) {
+ smlock;
+ return _convertor->toObject( _convertor->getProperty( _global , field ) );
+ }
+
+ JSObject * getJSObject( const char * field ) {
+ smlock;
+ return _convertor->getJSObject( _global , field );
+ }
+
+ int type( const char *field ) {
+ smlock;
+ jsval val;
+ assert( JS_GetProperty( _context , _global , field , &val ) );
+
+ switch ( JS_TypeOfValue( _context , val ) ) {
+ case JSTYPE_VOID: return Undefined;
+ case JSTYPE_NULL: return jstNULL;
+ case JSTYPE_OBJECT: {
+ if ( val == JSVAL_NULL )
+ return jstNULL;
+ JSObject * o = JSVAL_TO_OBJECT( val );
+ if ( JS_IsArrayObject( _context , o ) )
+ return Array;
+ if ( isDate( _context , o ) )
+ return Date;
+ return Object;
+ }
+ case JSTYPE_FUNCTION: return Code;
+ case JSTYPE_STRING: return String;
+ case JSTYPE_NUMBER: return NumberDouble;
+ case JSTYPE_BOOLEAN: return Bool;
+ default:
+ uassert( 10227 , "unknown type" , 0 );
+ }
+ return 0;
+ }
+
+ // ----- setters ------
+
+ void setElement( const char *field , const BSONElement& val ) {
+ smlock;
+ jsval v = _convertor->toval( val );
+ assert( JS_SetProperty( _context , _global , field , &v ) );
+ }
+
+ void setNumber( const char *field , double val ) {
+ smlock;
+ jsval v = _convertor->toval( val );
+ assert( JS_SetProperty( _context , _global , field , &v ) );
+ }
+
+ void setString( const char *field , const char * val ) {
+ smlock;
+ jsval v = _convertor->toval( val );
+ assert( JS_SetProperty( _context , _global , field , &v ) );
+ }
+
+ void setObject( const char *field , const BSONObj& obj , bool readOnly ) {
+ smlock;
+ jsval v = _convertor->toval( &obj , readOnly );
+ JS_SetProperty( _context , _global , field , &v );
+ }
+
+ void setBoolean( const char *field , bool val ) {
+ smlock;
+ jsval v = BOOLEAN_TO_JSVAL( val );
+ assert( JS_SetProperty( _context , _global , field , &v ) );
+ }
+
+ void setThis( const BSONObj * obj ) {
+ smlock;
+ if ( _this ) {
+ JS_RemoveRoot( _context , &_this );
+ _this = 0;
+ }
+
+ if ( obj ) {
+ _this = _convertor->toJSObject( obj );
+ JS_AddNamedRoot( _context , &_this , "scope this" );
+ }
+ }
+
+ void setFunction( const char *field , const char * code ) {
+ smlock;
+ jsval v = OBJECT_TO_JSVAL(JS_GetFunctionObject(_convertor->compileFunction(code)));
+ JS_SetProperty( _context , _global , field , &v );
+ }
+
+ void rename( const char * from , const char * to ) {
+ smlock;
+ jsval v;
+ assert( JS_GetProperty( _context , _global , from , &v ) );
+ assert( JS_SetProperty( _context , _global , to , &v ) );
+ v = JSVAL_VOID;
+ assert( JS_SetProperty( _context , _global , from , &v ) );
+ }
+
+ // ---- functions -----
+
+ ScriptingFunction _createFunction( const char * code ) {
+ smlock;
+ precall();
+ return (ScriptingFunction)_convertor->compileFunction( code );
+ }
+
+ struct TimeoutSpec {
+ boost::posix_time::ptime start;
+ boost::posix_time::time_duration timeout;
+ int count;
+ };
+
+ // should not generate exceptions, as those can be caught in
+ // javascript code; returning false without an exception exits
+ // immediately
+ static JSBool _interrupt( JSContext *cx ) {
+ TimeoutSpec &spec = *(TimeoutSpec *)( JS_GetContextPrivate( cx ) );
+ if ( ++spec.count % 1000 != 0 )
+ return JS_TRUE;
+ const char * interrupt = ScriptEngine::checkInterrupt();
+ if ( interrupt && interrupt[ 0 ] ) {
+ return JS_FALSE;
+ }
+ if ( spec.timeout.ticks() == 0 ) {
+ return JS_TRUE;
+ }
+ boost::posix_time::time_duration elapsed = ( boost::posix_time::microsec_clock::local_time() - spec.start );
+ if ( elapsed < spec.timeout ) {
+ return JS_TRUE;
+ }
+ return JS_FALSE;
+
+ }
+
+ static JSBool interrupt( JSContext *cx, JSScript *script ) {
+ return _interrupt( cx );
+ }
+
+ void installInterrupt( int timeoutMs ) {
+ if ( timeoutMs != 0 || ScriptEngine::haveCheckInterruptCallback() ) {
+ TimeoutSpec *spec = new TimeoutSpec;
+ spec->timeout = boost::posix_time::millisec( timeoutMs );
+ spec->start = boost::posix_time::microsec_clock::local_time();
+ spec->count = 0;
+ JS_SetContextPrivate( _context, (void*)spec );
+#if defined(SM181) && !defined(XULRUNNER190)
+ JS_SetOperationCallback( _context, _interrupt );
+#else
+ JS_SetBranchCallback( _context, interrupt );
+#endif
+ }
+ }
+
+ void uninstallInterrupt( int timeoutMs ) {
+ if ( timeoutMs != 0 || ScriptEngine::haveCheckInterruptCallback() ) {
+#if defined(SM181) && !defined(XULRUNNER190)
+ JS_SetOperationCallback( _context , 0 );
+#else
+ JS_SetBranchCallback( _context, 0 );
+#endif
+ delete (TimeoutSpec *)JS_GetContextPrivate( _context );
+ JS_SetContextPrivate( _context, 0 );
+ }
+ }
+
+ void precall() {
+ _error = "";
+ currentScope.reset( this );
+ }
+
+ bool exec( const StringData& code , const string& name = "(anon)" , bool printResult = false , bool reportError = true , bool assertOnError = true, int timeoutMs = 0 ) {
+ smlock;
+ precall();
+
+ jsval ret = JSVAL_VOID;
+
+ installInterrupt( timeoutMs );
+ JSBool worked = JS_EvaluateScript( _context , _global , code.data() , code.size() , name.c_str() , 1 , &ret );
+ uninstallInterrupt( timeoutMs );
+
+ if ( ! worked && _error.size() == 0 ) {
+ jsval v;
+ if ( JS_GetPendingException( _context , &v ) ) {
+ _error = _convertor->toString( v );
+ if ( reportError )
+ cout << _error << endl;
+ }
+ }
+
+ uassert( 10228 , str::stream() << name + " exec failed: " << _error , worked || ! assertOnError );
+
+ if ( reportError && ! _error.empty() ) {
+ // cout << "exec error: " << _error << endl;
+ // already printed in reportError, so... TODO
+ }
+
+ if ( worked )
+ _convertor->setProperty( _global , "__lastres__" , ret );
+
+ if ( worked && printResult && ! JSVAL_IS_VOID( ret ) )
+ cout << _convertor->toString( ret ) << endl;
+
+ return worked;
+ }
+
+ int invoke( JSFunction * func , const BSONObj* args, const BSONObj* recv, int timeoutMs , bool ignoreReturn, bool readOnlyArgs, bool readOnlyRecv ) {
+ smlock;
+ precall();
+
+ assert( JS_EnterLocalRootScope( _context ) );
+
+ int nargs = args ? args->nFields() : 0;
+ scoped_array<jsval> smargsPtr( new jsval[nargs] );
+ if ( nargs ) {
+ BSONObjIterator it( *args );
+ for ( int i=0; i<nargs; i++ ) {
+ smargsPtr[i] = _convertor->toval( it.next() );
+ }
+ }
+
+ if ( !args ) {
+ _convertor->setProperty( _global , "args" , JSVAL_NULL );
+ }
+ else {
+ setObject( "args" , *args , true ); // this is for backwards compatability
+ }
+
+ JS_LeaveLocalRootScope( _context );
+
+ installInterrupt( timeoutMs );
+ jsval rval;
+ setThis(recv);
+ JSBool ret = JS_CallFunction( _context , _this ? _this : _global , func , nargs , smargsPtr.get() , &rval );
+ setThis(0);
+ uninstallInterrupt( timeoutMs );
+
+ if ( !ret ) {
+ return -3;
+ }
+
+ if ( ! ignoreReturn ) {
+ assert( JS_SetProperty( _context , _global , "return" , &rval ) );
+ }
+
+ return 0;
+ }
+
+ int invoke( ScriptingFunction funcAddr , const BSONObj* args, const BSONObj* recv, int timeoutMs = 0 , bool ignoreReturn = 0, bool readOnlyArgs = false, bool readOnlyRecv = false ) {
+ return invoke( (JSFunction*)funcAddr , args , recv, timeoutMs , ignoreReturn, readOnlyArgs, readOnlyRecv);
+ }
+
+ void gotError( string s ) {
+ _error = s;
+ }
+
+ string getError() {
+ return _error;
+ }
+
+ void injectNative( const char *field, NativeFunction func, void* data ) {
+ smlock;
+ string name = field;
+ _convertor->setProperty( _global , (name + "_").c_str() , _convertor->toval( (double)(long long)func ) );
+
+ stringstream code;
+ if (data) {
+ _convertor->setProperty( _global , (name + "_data_").c_str() , _convertor->toval( (double)(long long)data ) );
+ code << field << "_" << " = { x : " << field << "_ , y: " << field << "_data_ }; ";
+ } else {
+ code << field << "_" << " = { x : " << field << "_ }; ";
+ }
+ code << field << " = function(){ return nativeHelper.apply( " << field << "_ , arguments ); }";
+ exec( code.str() );
+ }
+
+ virtual void gc() {
+ smlock;
+ JS_GC( _context );
+ }
+
+ JSContext *SavedContext() const { return _context; }
+
+ private:
+
+ void _postCreateHacks() {
+#ifdef XULRUNNER
+ exec( "__x__ = new Date(1);" );
+ globalSMEngine->_dateClass = _convertor->getClass( _global , "__x__" );
+ exec( "__x__ = /abc/i" );
+ globalSMEngine->_regexClass = _convertor->getClass( _global , "__x__" );
+#endif
+ }
+
+ JSContext * _context;
+ Convertor * _convertor;
+
+ JSObject * _global;
+ JSObject * _this;
+
+ string _error;
+ list<void*> _roots;
+
+ bool _externalSetup;
+ bool _localConnect;
+
+ set<string> _initFieldNames;
+
+ };
+
+ /* used to make the logging not overly chatty in the mongo shell. */
+ extern bool isShell;
+
+ void errorReporter( JSContext *cx, const char *message, JSErrorReport *report ) {
+ stringstream ss;
+ if( !isShell )
+ ss << "JS Error: ";
+ ss << message;
+
+ if ( report && report->filename ) {
+ ss << " " << report->filename << ":" << report->lineno;
+ }
+
+ tlog() << ss.str() << endl;
+
+ if ( currentScope.get() ) {
+ currentScope->gotError( ss.str() );
+ }
+ }
+
+ JSBool native_load( JSContext *cx , JSObject *obj , uintN argc, jsval *argv , jsval *rval ) {
+ Convertor c(cx);
+
+ Scope * s = currentScope.get();
+
+ for ( uintN i=0; i<argc; i++ ) {
+ string filename = c.toString( argv[i] );
+ //cout << "load [" << filename << "]" << endl;
+
+ if ( ! s->execFile( filename , false , true , false ) ) {
+ JS_ReportError( cx , ((string)"error loading js file: " + filename ).c_str() );
+ return JS_FALSE;
+ }
+ }
+
+ return JS_TRUE;
+ }
+
+
+
+ void SMEngine::runTest() {
+ SMScope s;
+
+ s.localConnect( "foo" );
+
+ s.exec( "assert( db.getMongo() )" );
+ s.exec( "assert( db.bar , 'collection getting does not work' ); " );
+ s.exec( "assert.eq( db._name , 'foo' );" );
+ s.exec( "assert( _mongo == db.getMongo() ); " );
+ s.exec( "assert( _mongo == db._mongo ); " );
+ s.exec( "assert( typeof DB.bar == 'undefined' ); " );
+ s.exec( "assert( typeof DB.prototype.bar == 'undefined' , 'resolution is happening on prototype, not object' ); " );
+
+ s.exec( "assert( db.bar ); " );
+ s.exec( "assert( typeof db.addUser == 'function' )" );
+ s.exec( "assert( db.addUser == DB.prototype.addUser )" );
+ s.exec( "assert.eq( 'foo.bar' , db.bar._fullName ); " );
+ s.exec( "db.bar.verify();" );
+
+ s.exec( "db.bar.silly.verify();" );
+ s.exec( "assert.eq( 'foo.bar.silly' , db.bar.silly._fullName )" );
+ s.exec( "assert.eq( 'function' , typeof _mongo.find , 'mongo.find is not a function' )" );
+
+ assert( (string)"abc" == trim( "abc" ) );
+ assert( (string)"abc" == trim( " abc" ) );
+ assert( (string)"abc" == trim( "abc " ) );
+ assert( (string)"abc" == trim( " abc " ) );
+
+ }
+
+ Scope * SMEngine::createScope() {
+ return new SMScope();
+ }
+
+ void Convertor::addRoot( JSFunction * f , const char * name ) {
+ if ( ! f )
+ return;
+
+ SMScope * scope = currentScope.get();
+ uassert( 10229 , "need a scope" , scope );
+
+ JSObject * o = JS_GetFunctionObject( f );
+ assert( o );
+ scope->addRoot( &o , name );
+ }
+
+}
+
+#include "sm_db.cpp"
diff --git a/src/mongo/scripting/engine_spidermonkey.h b/src/mongo/scripting/engine_spidermonkey.h
new file mode 100644
index 00000000000..9fd430d853d
--- /dev/null
+++ b/src/mongo/scripting/engine_spidermonkey.h
@@ -0,0 +1,105 @@
+// engine_spidermonkey.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "engine.h"
+
+// START inc hacking
+
+#ifdef WIN32
+#include "jstypes.h"
+#undef JS_PUBLIC_API
+#undef JS_PUBLIC_DATA
+#define JS_PUBLIC_API(t) t __cdecl
+#define JS_PUBLIC_DATA(t) t
+#endif
+
+#include "jsapi.h"
+#include "jsobj.h"
+#include "jsdate.h"
+#include "jsregexp.h"
+
+// END inc hacking
+
+// -- SM 1.6 hacks ---
+#ifndef JSCLASS_GLOBAL_FLAGS
+#error old version of spider monkey ( probably 1.6 ) you should upgrade to at least 1.7
+#endif
+// -- END SM 1.6 hacks ---
+
+#ifdef JSVAL_IS_TRACEABLE
+#define SM18
+#endif
+
+#ifdef XULRUNNER
+#define SM181
+#endif
+
+namespace mongo {
+
+ class SMScope;
+ class Convertor;
+
+ extern JSClass bson_class;
+ extern JSClass bson_ro_class;
+
+ extern JSClass object_id_class;
+ extern JSClass dbpointer_class;
+ extern JSClass dbref_class;
+ extern JSClass bindata_class;
+ extern JSClass timestamp_class;
+ extern JSClass numberlong_class;
+ extern JSClass numberint_class;
+ extern JSClass minkey_class;
+ extern JSClass maxkey_class;
+
+ // internal things
+ void dontDeleteScope( SMScope * s ) {}
+ void errorReporter( JSContext *cx, const char *message, JSErrorReport *report );
+ extern boost::thread_specific_ptr<SMScope> currentScope;
+
+ // bson
+ JSBool resolveBSONField( JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp );
+
+
+ // mongo
+ void initMongoJS( SMScope * scope , JSContext * cx , JSObject * global , bool local );
+ bool appendSpecialDBObject( Convertor * c , BSONObjBuilder& b , const string& name , jsval val , JSObject * o );
+
+#define JSVAL_IS_OID(v) ( JSVAL_IS_OBJECT( v ) && JS_InstanceOf( cx , JSVAL_TO_OBJECT( v ) , &object_id_class , 0 ) )
+
+ bool isDate( JSContext * cx , JSObject * o );
+
+ // JS private data must be 2byte aligned, so we use a holder to refer to an unaligned pointer.
+ struct BinDataHolder {
+ BinDataHolder( const char *c, int copyLen = -1 ) :
+ c_( const_cast< char * >( c ) ),
+ iFree_( copyLen != -1 ) {
+ if ( copyLen != -1 ) {
+ c_ = (char*)malloc( copyLen );
+ memcpy( c_, c, copyLen );
+ }
+ }
+ ~BinDataHolder() {
+ if ( iFree_ )
+ free( c_ );
+ }
+ char *c_;
+ bool iFree_;
+ };
+}
diff --git a/src/mongo/scripting/engine_v8.cpp b/src/mongo/scripting/engine_v8.cpp
new file mode 100644
index 00000000000..53539c2f75c
--- /dev/null
+++ b/src/mongo/scripting/engine_v8.cpp
@@ -0,0 +1,1634 @@
+//engine_v8.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(_WIN32)
+/** this is a hack - v8stdint.h defined uint16_t etc. on _WIN32 only, and that collides with
+ our usage of boost */
+#include "boost/cstdint.hpp"
+using namespace boost;
+#define V8STDINT_H_
+#endif
+
+#include "engine_v8.h"
+
+#include "v8_wrapper.h"
+#include "v8_utils.h"
+#include "v8_db.h"
+
+#define V8_SIMPLE_HEADER v8::Isolate::Scope iscope(_isolate); v8::Locker l(_isolate); HandleScope handle_scope; Context::Scope context_scope( _context );
+
+namespace mongo {
+
+ // guarded by v8 mutex
+ map< unsigned, int > __interruptSpecToThreadId;
+ map< unsigned, v8::Isolate* > __interruptSpecToIsolate;
+
+ /**
+ * Unwraps a BSONObj from the JS wrapper
+ */
+ static BSONObj* unwrapBSONObj(const Handle<v8::Object>& obj) {
+ Handle<External> field = Handle<External>::Cast(obj->GetInternalField(0));
+ if (field.IsEmpty() || !field->IsExternal())
+ return 0;
+ void* ptr = field->Value();
+ return (BSONObj*)ptr;
+ }
+
+ static void weakRefBSONCallback(v8::Persistent<v8::Value> p, void* scope) {
+ // should we lock here? no idea, and no doc from v8 of course
+ HandleScope handle_scope;
+ if (!p.IsNearDeath())
+ return;
+ Handle<External> field = Handle<External>::Cast(p->ToObject()->GetInternalField(0));
+ BSONObj* data = (BSONObj*) field->Value();
+ delete data;
+ p.Dispose();
+ }
+
+ Persistent<v8::Object> V8Scope::wrapBSONObject(Local<v8::Object> obj, BSONObj* data) {
+ obj->SetInternalField(0, v8::External::New(data));
+ Persistent<v8::Object> p = Persistent<v8::Object>::New(obj);
+ p.MakeWeak(this, weakRefBSONCallback);
+ return p;
+ }
+
+ static void weakRefArrayCallback(v8::Persistent<v8::Value> p, void* scope) {
+ // should we lock here? no idea, and no doc from v8 of course
+ HandleScope handle_scope;
+ if (!p.IsNearDeath())
+ return;
+ Handle<External> field = Handle<External>::Cast(p->ToObject()->GetInternalField(0));
+ char* data = (char*) field->Value();
+ delete [] data;
+ p.Dispose();
+ }
+
+ Persistent<v8::Object> V8Scope::wrapArrayObject(Local<v8::Object> obj, char* data) {
+ obj->SetInternalField(0, v8::External::New(data));
+ Persistent<v8::Object> p = Persistent<v8::Object>::New(obj);
+ p.MakeWeak(this, weakRefArrayCallback);
+ return p;
+ }
+
+ static Handle<v8::Value> namedGet(Local<v8::String> name, const v8::AccessorInfo &info) {
+ // all properties should be set, otherwise means builtin or deleted
+ if (!(info.This()->HasRealNamedProperty(name)))
+ return v8::Handle<v8::Value>();
+
+ Handle<v8::Value> val = info.This()->GetRealNamedProperty(name);
+ if (!val->IsUndefined()) {
+ // value already cached
+ return val;
+ }
+
+ string key = toSTLString(name);
+ BSONObj *obj = unwrapBSONObj(info.Holder());
+ BSONElement elmt = obj->getField(key.c_str());
+ if (elmt.eoo())
+ return Handle<Value>();
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ val = scope->mongoToV8Element(elmt, false);
+ info.This()->ForceSet(name, val);
+
+ if (elmt.type() == mongo::Object || elmt.type() == mongo::Array) {
+ // if accessing a subobject, it may get modified and base obj would not know
+ // have to set base as modified, which means some optim is lost
+ info.This()->SetHiddenValue(scope->V8STR_MODIFIED, v8::Boolean::New(true));
+ }
+ return val;
+ }
+
+ static Handle<v8::Value> namedGetRO(Local<v8::String> name, const v8::AccessorInfo &info) {
+ string key = toSTLString(name);
+ BSONObj *obj = unwrapBSONObj(info.Holder());
+ BSONElement elmt = obj->getField(key.c_str());
+ if (elmt.eoo())
+ return Handle<Value>();
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ Handle<v8::Value> val = scope->mongoToV8Element(elmt, true);
+ return val;
+ }
+
+ static Handle<v8::Value> namedSet(Local<v8::String> name, Local<v8::Value> value_obj, const v8::AccessorInfo& info) {
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ info.This()->SetHiddenValue(scope->V8STR_MODIFIED, v8::Boolean::New(true));
+ return Handle<Value>();
+ }
+
+ static Handle<v8::Array> namedEnumerator(const AccessorInfo &info) {
+ BSONObj *obj = unwrapBSONObj(info.Holder());
+ Handle<v8::Array> arr = Handle<v8::Array>(v8::Array::New(obj->nFields()));
+ int i = 0;
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ // note here that if keys are parseable number, v8 will access them using index
+ for ( BSONObjIterator it(*obj); it.more(); ++i) {
+ const BSONElement& f = it.next();
+// arr->Set(i, v8::String::NewExternal(new ExternalString(f.fieldName())));
+ Handle<v8::String> name = scope->getV8Str(f.fieldName());
+ arr->Set(i, name);
+ }
+ return arr;
+ }
+
+ Handle<Boolean> namedDelete( Local<v8::String> property, const AccessorInfo& info ) {
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ info.This()->SetHiddenValue(scope->V8STR_MODIFIED, v8::Boolean::New(true));
+ return Handle<Boolean>();
+ }
+
+// v8::Handle<v8::Integer> namedQuery(Local<v8::String> property, const AccessorInfo& info) {
+// string key = ToString(property);
+// return v8::Integer::New(None);
+// }
+
+ static Handle<v8::Value> indexedGet(uint32_t index, const v8::AccessorInfo &info) {
+ // all properties should be set, otherwise means builtin or deleted
+ if (!(info.This()->HasRealIndexedProperty(index)))
+ return v8::Handle<v8::Value>();
+
+ StringBuilder ss;
+ ss << index;
+ string key = ss.str();
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ // cannot get v8 to properly cache the indexed val in the js object
+// Handle<v8::String> name = scope->getV8Str(key);
+// // v8 API really confusing here, must check existence on index, but then fetch with name
+// if (info.This()->HasRealIndexedProperty(index)) {
+// Handle<v8::Value> val = info.This()->GetRealNamedProperty(name);
+// if (!val.IsEmpty() && !val->IsNull())
+// return val;
+// }
+ BSONObj *obj = unwrapBSONObj(info.Holder());
+ BSONElement elmt = obj->getField(key);
+ if (elmt.eoo())
+ return Handle<Value>();
+ Handle<Value> val = scope->mongoToV8Element(elmt, false);
+// info.This()->ForceSet(name, val);
+
+ if (elmt.type() == mongo::Object || elmt.type() == mongo::Array) {
+ // if accessing a subobject, it may get modified and base obj would not know
+ // have to set base as modified, which means some optim is lost
+ info.This()->SetHiddenValue(scope->V8STR_MODIFIED, v8::Boolean::New(true));
+ }
+ return val;
+ }
+
+ Handle<Boolean> indexedDelete( uint32_t index, const AccessorInfo& info ) {
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ info.This()->SetHiddenValue(scope->V8STR_MODIFIED, v8::Boolean::New(true));
+ return Handle<Boolean>();
+ }
+
+ static Handle<v8::Value> indexedGetRO(uint32_t index, const v8::AccessorInfo &info) {
+ StringBuilder ss;
+ ss << index;
+ string key = ss.str();
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ // cannot get v8 to properly cache the indexed val in the js object
+// Handle<v8::String> name = scope->getV8Str(key);
+// // v8 API really confusing here, must check existence on index, but then fetch with name
+// if (info.This()->HasRealIndexedProperty(index)) {
+// Handle<v8::Value> val = info.This()->GetRealNamedProperty(name);
+// if (!val.IsEmpty() && !val->IsNull())
+// return val;
+// }
+ BSONObj *obj = unwrapBSONObj(info.Holder());
+ BSONElement elmt = obj->getField(key);
+ if (elmt.eoo())
+ return Handle<Value>();
+ Handle<Value> val = scope->mongoToV8Element(elmt, true);
+// info.This()->ForceSet(name, val);
+ return val;
+ }
+
+ static Handle<v8::Value> indexedSet(uint32_t index, Local<v8::Value> value_obj, const v8::AccessorInfo& info) {
+ Local< External > scp = External::Cast( *info.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+ info.This()->SetHiddenValue(scope->V8STR_MODIFIED, v8::Boolean::New(true));
+ return Handle<Value>();
+ }
+
+// static Handle<v8::Array> indexedEnumerator(const AccessorInfo &info) {
+// BSONObj *obj = unwrapBSONObj(info.Holder());
+// Handle<v8::Array> arr = Handle<v8::Array>(v8::Array::New(obj->nFields()));
+// Local< External > scp = External::Cast( *info.Data() );
+// V8Scope* scope = (V8Scope*)(scp->Value());
+// int i = 0;
+// for ( BSONObjIterator it(*obj); it.more(); ++i) {
+// const BSONElement& f = it.next();
+//// arr->Set(i, v8::String::NewExternal(new ExternalString(f.fieldName())));
+// arr->Set(i, scope->getV8Str(f.fieldName()));
+// }
+// return arr;
+// }
+
+ Handle<Value> NamedReadOnlySet( Local<v8::String> property, Local<Value> value, const AccessorInfo& info ) {
+ string key = toSTLString(property);
+ cout << "cannot write property " << key << " to read-only object" << endl;
+ return value;
+ }
+
+ Handle<Boolean> NamedReadOnlyDelete( Local<v8::String> property, const AccessorInfo& info ) {
+ string key = toSTLString(property);
+ cout << "cannot delete property " << key << " from read-only object" << endl;
+ return Boolean::New( false );
+ }
+
+ Handle<Value> IndexedReadOnlySet( uint32_t index, Local<Value> value, const AccessorInfo& info ) {
+ cout << "cannot write property " << index << " to read-only array" << endl;
+ return value;
+ }
+
+ Handle<Boolean> IndexedReadOnlyDelete( uint32_t index, const AccessorInfo& info ) {
+ cout << "cannot delete property " << index << " from read-only array" << endl;
+ return Boolean::New( false );
+ }
+
+ // --- engine ---
+
+// void fatalHandler(const char* s1, const char* s2) {
+// cout << "Fatal handler " << s1 << " " << s2;
+// }
+
+ V8ScriptEngine::V8ScriptEngine() {
+ v8::V8::Initialize();
+ v8::Locker l;
+// v8::Locker::StartPreemption( 10 );
+
+ int K = 1024;
+ v8::ResourceConstraints rc;
+ rc.set_max_young_space_size(4 * K * K);
+ rc.set_max_old_space_size(64 * K * K);
+ v8::SetResourceConstraints(&rc);
+// v8::V8::IgnoreOutOfMemoryException();
+// v8::V8::SetFatalErrorHandler(fatalHandler);
+ }
+
+ V8ScriptEngine::~V8ScriptEngine() {
+ }
+
+ void ScriptEngine::setup() {
+ if ( !globalScriptEngine ) {
+ globalScriptEngine = new V8ScriptEngine();
+ }
+ }
+
+ void V8ScriptEngine::interrupt( unsigned opSpec ) {
+ v8::Locker l;
+ v8Locks::InterruptLock il;
+ if ( __interruptSpecToThreadId.count( opSpec ) ) {
+ int thread = __interruptSpecToThreadId[ opSpec ];
+ if ( thread == -2 || thread == -3) {
+ // just mark as interrupted
+ __interruptSpecToThreadId[ opSpec ] = -3;
+ return;
+ }
+
+ V8::TerminateExecution( __interruptSpecToIsolate[ opSpec ] );
+ }
+ }
+
+ void V8ScriptEngine::interruptAll() {
+ v8::Locker l;
+ v8Locks::InterruptLock il;
+ vector< Isolate* > toKill; // v8 mutex could potentially be yielded during the termination call
+
+ for( map< unsigned, Isolate* >::const_iterator i = __interruptSpecToIsolate.begin(); i != __interruptSpecToIsolate.end(); ++i ) {
+ toKill.push_back( i->second );
+ }
+ for( vector< Isolate* >::const_iterator i = toKill.begin(); i != toKill.end(); ++i ) {
+ V8::TerminateExecution( *i );
+ }
+ }
+
+ // --- scope ---
+
+ V8Scope::V8Scope( V8ScriptEngine * engine )
+ : _engine( engine ) ,
+ _connectState( NOT ) {
+
+ _isolate = v8::Isolate::New();
+ v8::Isolate::Scope iscope(_isolate);
+ v8::Locker l(_isolate);
+
+ HandleScope handleScope;
+ _context = Context::New();
+ Context::Scope context_scope( _context );
+ _global = Persistent< v8::Object >::New( _context->Global() );
+ _emptyObj = Persistent< v8::Object >::New( v8::Object::New() );
+
+ // initialize lazy object template
+ lzObjectTemplate = Persistent<ObjectTemplate>::New(ObjectTemplate::New());
+ lzObjectTemplate->SetInternalFieldCount( 1 );
+ lzObjectTemplate->SetNamedPropertyHandler(namedGet, namedSet, 0, namedDelete, 0, v8::External::New(this));
+ lzObjectTemplate->SetIndexedPropertyHandler(indexedGet, indexedSet, 0, indexedDelete, 0, v8::External::New(this));
+
+ roObjectTemplate = Persistent<ObjectTemplate>::New(ObjectTemplate::New());
+ roObjectTemplate->SetInternalFieldCount( 1 );
+ roObjectTemplate->SetNamedPropertyHandler(namedGetRO, NamedReadOnlySet, 0, NamedReadOnlyDelete, namedEnumerator, v8::External::New(this));
+ roObjectTemplate->SetIndexedPropertyHandler(indexedGetRO, IndexedReadOnlySet, 0, IndexedReadOnlyDelete, 0, v8::External::New(this));
+
+ // initialize lazy array template
+ // unfortunately it is not possible to create true v8 array from a template
+ // this means we use an object template and copy methods over
+ // this it creates issues when calling certain methods that check array type
+ lzArrayTemplate = Persistent<ObjectTemplate>::New(ObjectTemplate::New());
+ lzArrayTemplate->SetInternalFieldCount( 1 );
+ lzArrayTemplate->SetIndexedPropertyHandler(indexedGet, 0, 0, 0, 0, v8::External::New(this));
+
+ internalFieldObjects = Persistent<ObjectTemplate>::New(ObjectTemplate::New());
+ internalFieldObjects->SetInternalFieldCount( 1 );
+
+ V8STR_CONN = getV8Str( "_conn" );
+ V8STR_ID = getV8Str( "_id" );
+ V8STR_LENGTH = getV8Str( "length" );
+ V8STR_LEN = getV8Str( "len" );
+ V8STR_TYPE = getV8Str( "type" );
+ V8STR_ISOBJECTID = getV8Str( "isObjectId" );
+ V8STR_RETURN = getV8Str( "return" );
+ V8STR_ARGS = getV8Str( "args" );
+ V8STR_T = getV8Str( "t" );
+ V8STR_I = getV8Str( "i" );
+ V8STR_EMPTY = getV8Str( "" );
+ V8STR_MINKEY = getV8Str( "$MinKey" );
+ V8STR_MAXKEY = getV8Str( "$MaxKey" );
+ V8STR_NUMBERLONG = getV8Str( "__NumberLong" );
+ V8STR_NUMBERINT = getV8Str( "__NumberInt" );
+ V8STR_DBPTR = getV8Str( "__DBPointer" );
+ V8STR_BINDATA = getV8Str( "__BinData" );
+ V8STR_NATIVE_FUNC = getV8Str( "_native_function" );
+ V8STR_NATIVE_DATA = getV8Str( "_native_data" );
+ V8STR_V8_FUNC = getV8Str( "_v8_function" );
+ V8STR_RO = getV8Str( "_ro" );
+ V8STR_MODIFIED = getV8Str( "_mod" );
+ V8STR_FULLNAME = getV8Str( "_fullName" );
+
+ injectV8Function("print", Print);
+ injectV8Function("version", Version);
+ injectV8Function("load", load);
+
+ _wrapper = Persistent< v8::Function >::New( getObjectWrapperTemplate(this)->GetFunction() );
+
+ injectV8Function("gc", GCV8);
+
+ installDBTypes( this, _global );
+ }
+
+ V8Scope::~V8Scope() {
+ // make sure to disable interrupt, otherwise can get segfault on race condition
+ disableV8Interrupt();
+
+ {
+ V8_SIMPLE_HEADER
+ _wrapper.Dispose();
+ _emptyObj.Dispose();
+ for( unsigned i = 0; i < _funcs.size(); ++i )
+ _funcs[ i ].Dispose();
+ _funcs.clear();
+ _global.Dispose();
+ std::map <string, v8::Persistent <v8::String> >::iterator it = _strCache.begin();
+ std::map <string, v8::Persistent <v8::String> >::iterator end = _strCache.end();
+ while (it != end) {
+ it->second.Dispose();
+ ++it;
+ }
+ lzObjectTemplate.Dispose();
+ lzArrayTemplate.Dispose();
+ roObjectTemplate.Dispose();
+ internalFieldObjects.Dispose();
+ _context.Dispose();
+ }
+
+ _isolate->Dispose();
+ }
+
+ bool V8Scope::hasOutOfMemoryException() {
+ if (!_context.IsEmpty())
+ return _context->HasOutOfMemoryException();
+ return false;
+ }
+
+ /**
+ * JS Callback that will call a c++ function with BSON arguments.
+ */
+ Handle< Value > V8Scope::nativeCallback( V8Scope* scope, const Arguments &args ) {
+ V8Lock l;
+ HandleScope handle_scope;
+ Local< External > f = External::Cast( *args.Callee()->Get( scope->V8STR_NATIVE_FUNC ) );
+ NativeFunction function = (NativeFunction)(f->Value());
+ Local< External > data = External::Cast( *args.Callee()->Get( scope->V8STR_NATIVE_DATA ) );
+ BSONObjBuilder b;
+ for( int i = 0; i < args.Length(); ++i ) {
+ stringstream ss;
+ ss << i;
+ scope->v8ToMongoElement( b, ss.str(), args[ i ] );
+ }
+ BSONObj nativeArgs = b.obj();
+ BSONObj ret;
+ try {
+ ret = function( nativeArgs, data->Value() );
+ }
+ catch( const std::exception &e ) {
+ return v8::ThrowException(v8::String::New(e.what()));
+ }
+ catch( ... ) {
+ return v8::ThrowException(v8::String::New("unknown exception"));
+ }
+ return handle_scope.Close( scope->mongoToV8Element( ret.firstElement() ) );
+ }
+
+ Handle< Value > V8Scope::load( V8Scope* scope, const Arguments &args ) {
+ Context::Scope context_scope(scope->_context);
+ for (int i = 0; i < args.Length(); ++i) {
+ std::string filename(toSTLString(args[i]));
+ if (!scope->execFile(filename, false , true , false)) {
+ return v8::ThrowException(v8::String::New((std::string("error loading file: ") + filename).c_str()));
+ }
+ }
+ return v8::True();
+ }
+
+ /**
+ * JS Callback that will call a c++ function with the v8 scope and v8 arguments.
+ * Handles interrupts, exception handling, etc
+ *
+ * The implementation below assumes that SERVER-1816 has been fixed - in
+ * particular, interrupted() must return true if an interrupt was ever
+ * sent; currently that is not the case if a new killop overwrites the data
+ * for an old one
+ */
+ v8::Handle< v8::Value > V8Scope::v8Callback( const v8::Arguments &args ) {
+ Local< External > f = External::Cast( *args.Callee()->Get( v8::String::New( "_v8_function" ) ) );
+ v8Function function = (v8Function)(f->Value());
+ Local< External > scp = External::Cast( *args.Data() );
+ V8Scope* scope = (V8Scope*)(scp->Value());
+
+ // originally v8 interrupt where disabled here cause: don't want to have to audit all v8 calls for termination exceptions
+ // but we do need to keep interrupt because much time may be spent here (e.g. sleep)
+ bool paused = scope->pauseV8Interrupt();
+
+ v8::Handle< v8::Value > ret;
+ string exception;
+ try {
+ ret = function( scope, args );
+ }
+ catch( const std::exception &e ) {
+ exception = e.what();
+ }
+ catch( ... ) {
+ exception = "unknown exception";
+ }
+ if (paused) {
+ bool resume = scope->resumeV8Interrupt();
+ if ( !resume || globalScriptEngine->interrupted() ) {
+ v8::V8::TerminateExecution(scope->_isolate);
+ return v8::ThrowException( v8::String::New( "Interruption in V8 native callback" ) );
+ }
+ }
+ if ( !exception.empty() ) {
+ return v8::ThrowException( v8::String::New( exception.c_str() ) );
+ }
+ return ret;
+ }
+
+ // ---- global stuff ----
+
+ void V8Scope::init( const BSONObj * data ) {
+ V8Lock l;
+ if ( ! data )
+ return;
+
+ BSONObjIterator i( *data );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ setElement( e.fieldName() , e );
+ }
+ }
+
+ void V8Scope::setNumber( const char * field , double val ) {
+ V8_SIMPLE_HEADER
+ _global->Set( getV8Str( field ) , v8::Number::New( val ) );
+ }
+
+ void V8Scope::setString( const char * field , const char * val ) {
+ V8_SIMPLE_HEADER
+ _global->Set( getV8Str( field ) , v8::String::New( val ) );
+ }
+
+ void V8Scope::setBoolean( const char * field , bool val ) {
+ V8_SIMPLE_HEADER
+ _global->Set( getV8Str( field ) , v8::Boolean::New( val ) );
+ }
+
+ void V8Scope::setElement( const char *field , const BSONElement& e ) {
+ V8_SIMPLE_HEADER
+ _global->Set( getV8Str( field ) , mongoToV8Element( e ) );
+ }
+
+ void V8Scope::setObject( const char *field , const BSONObj& obj , bool readOnly) {
+ V8_SIMPLE_HEADER
+ // Set() accepts a ReadOnly parameter, but this just prevents the field itself
+ // from being overwritten and doesn't protect the object stored in 'field'.
+ _global->Set( getV8Str( field ) , mongoToLZV8( obj, false, readOnly) );
+ }
+
+ int V8Scope::type( const char *field ) {
+ V8_SIMPLE_HEADER
+ Handle<Value> v = get( field );
+ if ( v->IsNull() )
+ return jstNULL;
+ if ( v->IsUndefined() )
+ return Undefined;
+ if ( v->IsString() )
+ return String;
+ if ( v->IsFunction() )
+ return Code;
+ if ( v->IsArray() )
+ return Array;
+ if ( v->IsBoolean() )
+ return Bool;
+ // needs to be explicit NumberInt to use integer
+// if ( v->IsInt32() )
+// return NumberInt;
+ if ( v->IsNumber() )
+ return NumberDouble;
+ if ( v->IsExternal() ) {
+ uassert( 10230 , "can't handle external yet" , 0 );
+ return -1;
+ }
+ if ( v->IsDate() )
+ return Date;
+ if ( v->IsObject() )
+ return Object;
+
+ throw UserException( 12509, (string)"don't know what this is: " + field );
+ }
+
+ v8::Handle<v8::Value> V8Scope::get( const char * field ) {
+ return _global->Get( getV8Str( field ) );
+ }
+
+ double V8Scope::getNumber( const char *field ) {
+ V8_SIMPLE_HEADER
+ return get( field )->ToNumber()->Value();
+ }
+
+ int V8Scope::getNumberInt( const char *field ) {
+ V8_SIMPLE_HEADER
+ return get( field )->ToInt32()->Value();
+ }
+
+ long long V8Scope::getNumberLongLong( const char *field ) {
+ V8_SIMPLE_HEADER
+ return get( field )->ToInteger()->Value();
+ }
+
+ string V8Scope::getString( const char *field ) {
+ V8_SIMPLE_HEADER
+ return toSTLString( get( field ) );
+ }
+
+ bool V8Scope::getBoolean( const char *field ) {
+ V8_SIMPLE_HEADER
+ return get( field )->ToBoolean()->Value();
+ }
+
+ BSONObj V8Scope::getObject( const char * field ) {
+ V8_SIMPLE_HEADER
+ Handle<Value> v = get( field );
+ if ( v->IsNull() || v->IsUndefined() )
+ return BSONObj();
+ uassert( 10231 , "not an object" , v->IsObject() );
+ return v8ToMongo( v->ToObject() );
+ }
+
+ // --- functions -----
+
+ bool hasFunctionIdentifier( const string& code ) {
+ if ( code.size() < 9 || code.find( "function" ) != 0 )
+ return false;
+
+ return code[8] == ' ' || code[8] == '(';
+ }
+
+ Local< v8::Function > V8Scope::__createFunction( const char * raw ) {
+ raw = jsSkipWhiteSpace( raw );
+ string code = raw;
+ if ( !hasFunctionIdentifier( code ) ) {
+ if ( code.find( "\n" ) == string::npos &&
+ ! hasJSReturn( code ) &&
+ ( code.find( ";" ) == string::npos || code.find( ";" ) == code.size() - 1 ) ) {
+ code = "return " + code;
+ }
+ code = "function(){ " + code + "}";
+ }
+
+ int num = _funcs.size() + 1;
+
+ string fn;
+ {
+ stringstream ss;
+ ss << "_funcs" << num;
+ fn = ss.str();
+ }
+
+ code = fn + " = " + code;
+
+ TryCatch try_catch;
+ // this might be time consuming, consider allowing an interrupt
+ Handle<Script> script = v8::Script::Compile( v8::String::New( code.c_str() ) ,
+ v8::String::New( fn.c_str() ) );
+ if ( script.IsEmpty() ) {
+ _error = (string)"compile error: " + toSTLString( &try_catch );
+ log() << _error << endl;
+ return Local< v8::Function >();
+ }
+
+ Local<Value> result = script->Run();
+ if ( result.IsEmpty() ) {
+ _error = (string)"compile error: " + toSTLString( &try_catch );
+ log() << _error << endl;
+ return Local< v8::Function >();
+ }
+
+ return v8::Function::Cast( *_global->Get( v8::String::New( fn.c_str() ) ) );
+ }
+
+ ScriptingFunction V8Scope::_createFunction( const char * raw ) {
+ V8_SIMPLE_HEADER
+ Local< Value > ret = __createFunction( raw );
+ if ( ret.IsEmpty() )
+ return 0;
+ Persistent<Value> f = Persistent< Value >::New( ret );
+ uassert( 10232, "not a func" , f->IsFunction() );
+ int num = _funcs.size() + 1;
+ _funcs.push_back( f );
+ return num;
+ }
+
+ void V8Scope::setFunction( const char *field , const char * code ) {
+ V8_SIMPLE_HEADER
+ _global->Set( getV8Str( field ) , __createFunction(code) );
+ }
+
+// void V8Scope::setThis( const BSONObj * obj ) {
+// V8_SIMPLE_HEADER
+// if ( ! obj ) {
+// _this = Persistent< v8::Object >::New( v8::Object::New() );
+// return;
+// }
+//
+// //_this = mongoToV8( *obj );
+// v8::Handle<v8::Value> argv[1];
+// argv[0] = v8::External::New( createWrapperHolder( this, obj , true , false ) );
+// _this = Persistent< v8::Object >::New( _wrapper->NewInstance( 1, argv ) );
+// }
+
+ void V8Scope::rename( const char * from , const char * to ) {
+ V8_SIMPLE_HEADER;
+ Handle<v8::String> f = getV8Str( from );
+ Handle<v8::String> t = getV8Str( to );
+ _global->Set( t , _global->Get( f ) );
+ _global->Set( f , v8::Undefined() );
+ }
+
+ int V8Scope::invoke( ScriptingFunction func , const BSONObj* argsObject, const BSONObj* recv, int timeoutMs , bool ignoreReturn, bool readOnlyArgs, bool readOnlyRecv ) {
+ V8_SIMPLE_HEADER
+ Handle<Value> funcValue = _funcs[func-1];
+
+ TryCatch try_catch;
+ int nargs = argsObject ? argsObject->nFields() : 0;
+ scoped_array< Handle<Value> > args;
+ if ( nargs ) {
+ args.reset( new Handle<Value>[nargs] );
+ BSONObjIterator it( *argsObject );
+ for ( int i=0; i<nargs; i++ ) {
+ BSONElement next = it.next();
+ args[i] = mongoToV8Element( next, readOnlyArgs );
+ }
+ setObject( "args", *argsObject, readOnlyArgs); // for backwards compatibility
+ }
+ else {
+ _global->Set( V8STR_ARGS, v8::Undefined() );
+ }
+ if ( globalScriptEngine->interrupted() ) {
+ stringstream ss;
+ ss << "error in invoke: " << globalScriptEngine->checkInterrupt();
+ _error = ss.str();
+ log() << _error << endl;
+ return 1;
+ }
+ Handle<v8::Object> v8recv;
+ if (recv != 0)
+ v8recv = mongoToLZV8(*recv, false, readOnlyRecv);
+ else
+ v8recv = _global;
+
+ enableV8Interrupt(); // because of v8 locker we can check interrupted, then enable
+ Local<Value> result = ((v8::Function*)(*funcValue))->Call( v8recv , nargs , nargs ? args.get() : 0 );
+ disableV8Interrupt();
+
+ if ( result.IsEmpty() ) {
+ stringstream ss;
+ if ( try_catch.HasCaught() && !try_catch.CanContinue() ) {
+ ss << "error in invoke: " << globalScriptEngine->checkInterrupt();
+ }
+ else {
+ ss << "error in invoke: " << toSTLString( &try_catch );
+ }
+ _error = ss.str();
+ log() << _error << endl;
+ return 1;
+ }
+
+ if ( ! ignoreReturn ) {
+ _global->Set( V8STR_RETURN , result );
+ }
+
+ return 0;
+ }
+
+ bool V8Scope::exec( const StringData& code , const string& name , bool printResult , bool reportError , bool assertOnError, int timeoutMs ) {
+ if ( timeoutMs ) {
+ static bool t = 1;
+ if ( t ) {
+ log() << "timeoutMs not support for v8 yet code: " << code << endl;
+ t = 0;
+ }
+ }
+
+ V8_SIMPLE_HEADER
+
+ TryCatch try_catch;
+
+ Handle<Script> script = v8::Script::Compile( v8::String::New( code.data() ) ,
+ v8::String::New( name.c_str() ) );
+ if (script.IsEmpty()) {
+ stringstream ss;
+ ss << "compile error: " << toSTLString( &try_catch );
+ _error = ss.str();
+ if (reportError)
+ log() << _error << endl;
+ if ( assertOnError )
+ uassert( 10233 , _error , 0 );
+ return false;
+ }
+
+ if ( globalScriptEngine->interrupted() ) {
+ _error = (string)"exec error: " + globalScriptEngine->checkInterrupt();
+ if ( reportError ) {
+ log() << _error << endl;
+ }
+ if ( assertOnError ) {
+ uassert( 13475 , _error , 0 );
+ }
+ return false;
+ }
+ enableV8Interrupt(); // because of v8 locker we can check interrupted, then enable
+ Handle<v8::Value> result = script->Run();
+ disableV8Interrupt();
+ if ( result.IsEmpty() ) {
+ if ( try_catch.HasCaught() && !try_catch.CanContinue() ) {
+ _error = (string)"exec error: " + globalScriptEngine->checkInterrupt();
+ }
+ else {
+ _error = (string)"exec error: " + toSTLString( &try_catch );
+ }
+ if ( reportError )
+ log() << _error << endl;
+ if ( assertOnError )
+ uassert( 10234 , _error , 0 );
+ return false;
+ }
+
+ _global->Set( getV8Str( "__lastres__" ) , result );
+
+ if ( printResult && ! result->IsUndefined() ) {
+ cout << toSTLString( result ) << endl;
+ }
+
+ return true;
+ }
+
+ void V8Scope::injectNative( const char *field, NativeFunction func, void* data ) {
+ injectNative(field, func, _global, data);
+ }
+
+ void V8Scope::injectNative( const char *field, NativeFunction func, Handle<v8::Object>& obj, void* data ) {
+ V8_SIMPLE_HEADER
+
+ Handle< FunctionTemplate > ft = createV8Function(nativeCallback);
+ ft->Set( this->V8STR_NATIVE_FUNC, External::New( (void*)func ) );
+ ft->Set( this->V8STR_NATIVE_DATA, External::New( data ) );
+ obj->Set( getV8Str( field ), ft->GetFunction() );
+ }
+
+ void V8Scope::injectV8Function( const char *field, v8Function func ) {
+ injectV8Function(field, func, _global);
+ }
+
+ void V8Scope::injectV8Function( const char *field, v8Function func, Handle<v8::Object>& obj ) {
+ V8_SIMPLE_HEADER
+
+ Handle< FunctionTemplate > ft = createV8Function(func);
+ Handle<v8::Function> f = ft->GetFunction();
+ obj->Set( getV8Str( field ), f );
+ }
+
+ void V8Scope::injectV8Function( const char *field, v8Function func, Handle<v8::Template>& t ) {
+ V8_SIMPLE_HEADER
+
+ Handle< FunctionTemplate > ft = createV8Function(func);
+ Handle<v8::Function> f = ft->GetFunction();
+ t->Set( getV8Str( field ), f );
+ }
+
+ Handle<FunctionTemplate> V8Scope::createV8Function( v8Function func ) {
+ Handle< FunctionTemplate > ft = v8::FunctionTemplate::New(v8Callback, External::New( this ));
+ ft->Set( this->V8STR_V8_FUNC, External::New( (void*)func ) );
+ return ft;
+ }
+
+ void V8Scope::gc() {
+ cout << "in gc" << endl;
+ V8Lock l;
+ V8::LowMemoryNotification();
+ }
+
+ // ----- db access -----
+
+ void V8Scope::localConnect( const char * dbName ) {
+ {
+ V8_SIMPLE_HEADER
+
+ if ( _connectState == EXTERNAL )
+ throw UserException( 12510, "externalSetup already called, can't call externalSetup" );
+ if ( _connectState == LOCAL ) {
+ if ( _localDBName == dbName )
+ return;
+ throw UserException( 12511, "localConnect called with a different name previously" );
+ }
+
+ //_global->Set( v8::String::New( "Mongo" ) , _engine->_externalTemplate->GetFunction() );
+ _global->Set( getV8Str( "Mongo" ) , getMongoFunctionTemplate( this, true )->GetFunction() );
+ execCoreFiles();
+ exec( "_mongo = new Mongo();" , "local connect 2" , false , true , true , 0 );
+ exec( (string)"db = _mongo.getDB(\"" + dbName + "\");" , "local connect 3" , false , true , true , 0 );
+ _connectState = LOCAL;
+ _localDBName = dbName;
+ }
+ loadStored();
+ }
+
+ void V8Scope::externalSetup() {
+ V8_SIMPLE_HEADER
+ if ( _connectState == EXTERNAL )
+ return;
+ if ( _connectState == LOCAL )
+ throw UserException( 12512, "localConnect already called, can't call externalSetup" );
+
+ installFork( this, _global, _context );
+ _global->Set( getV8Str( "Mongo" ) , getMongoFunctionTemplate( this, false )->GetFunction() );
+ execCoreFiles();
+ _connectState = EXTERNAL;
+ }
+
+ // ----- internal -----
+
+ void V8Scope::reset() {
+ _startCall();
+ }
+
+ void V8Scope::_startCall() {
+ _error = "";
+ }
+
+ Local< v8::Value > newFunction( const char *code ) {
+ stringstream codeSS;
+ codeSS << "____MontoToV8_newFunction_temp = " << code;
+ string codeStr = codeSS.str();
+ Local< Script > compiled = Script::New( v8::String::New( codeStr.c_str() ) );
+ Local< Value > ret = compiled->Run();
+ return ret;
+ }
+
+ Local< v8::Value > V8Scope::newId( const OID &id ) {
+ v8::Function * idCons = this->getObjectIdCons();
+ v8::Handle<v8::Value> argv[1];
+ argv[0] = v8::String::New( id.str().c_str() );
+ return idCons->NewInstance( 1 , argv );
+ }
+
+ Local<v8::Object> V8Scope::mongoToV8( const BSONObj& m , bool array, bool readOnly ) {
+
+ Local<v8::Object> o;
+
+ // handle DBRef. needs to come first. isn't it? (metagoto)
+ static string ref = "$ref";
+ if ( ref == m.firstElement().fieldName() ) {
+ const BSONElement& id = m["$id"];
+ if (!id.eoo()) { // there's no check on $id exitence in sm implementation. risky ?
+ v8::Function* dbRef = getNamedCons( "DBRef" );
+ o = dbRef->NewInstance();
+ }
+ }
+
+ Local< v8::ObjectTemplate > readOnlyObjects;
+
+ if ( !o.IsEmpty() ) {
+ readOnly = false;
+ }
+ else if ( array ) {
+ // NOTE Looks like it's impossible to add interceptors to v8 arrays.
+ // so array itself will never be read only, but its values can be
+ o = v8::Array::New();
+ }
+ else if ( !readOnly ) {
+ o = v8::Object::New();
+ }
+ else {
+ // NOTE Our readOnly implemention relies on undocumented ObjectTemplate
+ // functionality that may be fragile, but it still seems like the best option
+ // for now -- fwiw, the v8 docs are pretty sparse. I've determined experimentally
+ // that when property handlers are set for an object template, they will attach
+ // to objects previously created by that template. To get this to work, though,
+ // it is necessary to initialize the template's property handlers before
+ // creating objects from the template (as I have in the following few lines
+ // of code).
+ // NOTE In my first attempt, I configured the permanent property handlers before
+ // constructiong the object and replaced the Set() calls below with ForceSet().
+ // However, it turns out that ForceSet() only bypasses handlers for named
+ // properties and not for indexed properties.
+ readOnlyObjects = v8::ObjectTemplate::New();
+ // NOTE This internal field will store type info for special db types. For
+ // regular objects the field is unnecessary - for simplicity I'm creating just
+ // one readOnlyObjects template for objects where the field is & isn't necessary,
+ // assuming that the overhead of an internal field is slight.
+ readOnlyObjects->SetInternalFieldCount( 1 );
+ readOnlyObjects->SetNamedPropertyHandler( 0 );
+ readOnlyObjects->SetIndexedPropertyHandler( 0 );
+ o = readOnlyObjects->NewInstance();
+ }
+
+ mongo::BSONObj sub;
+
+ for ( BSONObjIterator i(m); i.more(); ) {
+ const BSONElement& f = i.next();
+
+ Local<Value> v;
+ Handle<v8::String> name = getV8Str(f.fieldName());
+
+ switch ( f.type() ) {
+
+ case mongo::Code:
+ o->Set( name, newFunction( f.valuestr() ) );
+ break;
+
+ case CodeWScope:
+ if ( !f.codeWScopeObject().isEmpty() )
+ log() << "warning: CodeWScope doesn't transfer to db.eval" << endl;
+ o->Set( name, newFunction( f.codeWScopeCode() ) );
+ break;
+
+ case mongo::String:
+ o->Set( name , v8::String::New( f.valuestr() ) );
+ break;
+
+ case mongo::jstOID: {
+ v8::Function * idCons = getObjectIdCons();
+ v8::Handle<v8::Value> argv[1];
+ argv[0] = v8::String::New( f.__oid().str().c_str() );
+ o->Set( name ,
+ idCons->NewInstance( 1 , argv ) );
+ break;
+ }
+
+ case mongo::NumberDouble:
+ case mongo::NumberInt:
+ o->Set( name , v8::Number::New( f.number() ) );
+ break;
+
+// case mongo::NumberInt: {
+// Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
+// int val = f.numberInt();
+// v8::Function* numberInt = getNamedCons( "NumberInt" );
+// v8::Handle<v8::Value> argv[1];
+// argv[0] = v8::Int32::New( val );
+// o->Set( name, numberInt->NewInstance( 1, argv ) );
+// break;
+// }
+
+ case mongo::Array:
+ sub = f.embeddedObject();
+ o->Set( name , mongoToV8( sub , true, readOnly ) );
+ break;
+ case mongo::Object:
+ sub = f.embeddedObject();
+ o->Set( name , mongoToLZV8( sub , false, readOnly ) );
+ break;
+
+ case mongo::Date:
+ o->Set( name , v8::Date::New( (double) ((long long)f.date().millis) ));
+ break;
+
+ case mongo::Bool:
+ o->Set( name , v8::Boolean::New( f.boolean() ) );
+ break;
+
+ case mongo::jstNULL:
+ case mongo::Undefined: // duplicate sm behavior
+ o->Set( name , v8::Null() );
+ break;
+
+ case mongo::RegEx: {
+ v8::Function * regex = getNamedCons( "RegExp" );
+
+ v8::Handle<v8::Value> argv[2];
+ argv[0] = v8::String::New( f.regex() );
+ argv[1] = v8::String::New( f.regexFlags() );
+
+ o->Set( name , regex->NewInstance( 2 , argv ) );
+ break;
+ }
+
+ case mongo::BinData: {
+ int len;
+ const char *data = f.binData( len );
+
+ v8::Function* binData = getNamedCons( "BinData" );
+ v8::Handle<v8::Value> argv[3];
+ argv[0] = v8::Number::New( len );
+ argv[1] = v8::Number::New( f.binDataType() );
+ argv[2] = v8::String::New( data, len );
+ o->Set( name, binData->NewInstance(3, argv) );
+ break;
+ }
+
+ case mongo::Timestamp: {
+ Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
+
+ sub->Set( V8STR_T , v8::Number::New( f.timestampTime() ) );
+ sub->Set( V8STR_I , v8::Number::New( f.timestampInc() ) );
+ sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
+
+ o->Set( name , sub );
+ break;
+ }
+
+ case mongo::NumberLong: {
+ unsigned long long val = f.numberLong();
+ v8::Function* numberLong = getNamedCons( "NumberLong" );
+ double floatApprox = (double)(long long)val;
+ // values above 2^53 are not accurately represented in JS
+ if ( (long long)val == (long long)floatApprox && val < 9007199254740992ULL ) {
+ v8::Handle<v8::Value> argv[1];
+ argv[0] = v8::Number::New( floatApprox );
+ o->Set( name, numberLong->NewInstance( 1, argv ) );
+ }
+ else {
+ v8::Handle<v8::Value> argv[3];
+ argv[0] = v8::Number::New( floatApprox );
+ argv[1] = v8::Integer::New( val >> 32 );
+ argv[2] = v8::Integer::New( (unsigned long)(val & 0x00000000ffffffff) );
+ o->Set( name, numberLong->NewInstance(3, argv) );
+ }
+ break;
+ }
+
+ case mongo::MinKey: {
+ Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
+ sub->Set( V8STR_MINKEY, v8::Boolean::New( true ) );
+ sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
+ o->Set( name , sub );
+ break;
+ }
+
+ case mongo::MaxKey: {
+ Local<v8::Object> sub = readOnly ? readOnlyObjects->NewInstance() : internalFieldObjects->NewInstance();
+ sub->Set( V8STR_MAXKEY, v8::Boolean::New( true ) );
+ sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
+ o->Set( name , sub );
+ break;
+ }
+
+ case mongo::DBRef: {
+ v8::Function* dbPointer = getNamedCons( "DBPointer" );
+ v8::Handle<v8::Value> argv[2];
+ argv[0] = getV8Str( f.dbrefNS() );
+ argv[1] = newId( f.dbrefOID() );
+ o->Set( name, dbPointer->NewInstance(2, argv) );
+ break;
+ }
+
+ default:
+ cout << "can't handle type: ";
+ cout << f.type() << " ";
+ cout << f.toString();
+ cout << endl;
+ break;
+ }
+
+ }
+
+ if ( !array && readOnly ) {
+ readOnlyObjects->SetNamedPropertyHandler( 0, NamedReadOnlySet, 0, NamedReadOnlyDelete );
+ readOnlyObjects->SetIndexedPropertyHandler( 0, IndexedReadOnlySet, 0, IndexedReadOnlyDelete );
+ }
+
+ return o;
+ }
+
+ /**
+ * converts a BSONObj to a Lazy V8 object
+ */
+ Handle<v8::Object> V8Scope::mongoToLZV8( const BSONObj& m , bool array, bool readOnly ) {
+ Local<v8::Object> o;
+
+ if (readOnly) {
+ o = roObjectTemplate->NewInstance();
+ o->SetHiddenValue(V8STR_RO, v8::Boolean::New(true));
+ } else {
+ if (array) {
+ o = lzArrayTemplate->NewInstance();
+ o->SetPrototype(v8::Array::New(1)->GetPrototype());
+ o->Set(V8STR_LENGTH, v8::Integer::New(m.nFields()), DontEnum);
+ // o->Set(ARRAY_STRING, v8::Boolean::New(true), DontEnum);
+ } else {
+ o = lzObjectTemplate->NewInstance();
+
+ static string ref = "$ref";
+ if ( ref == m.firstElement().fieldName() ) {
+ const BSONElement& id = m["$id"];
+ if (!id.eoo()) {
+ v8::Function* dbRef = getNamedCons( "DBRef" );
+ o->SetPrototype(dbRef->NewInstance()->GetPrototype());
+ }
+ }
+ }
+
+ // need to set all keys with dummy values, so that order of keys is correct during enumeration
+ // otherwise v8 will list any newly set property in JS before the ones of underlying BSON obj.
+ for (BSONObjIterator it(m); it.more();) {
+ const BSONElement& f = it.next();
+ o->ForceSet(getV8Str(f.fieldName()), v8::Undefined());
+ }
+ }
+
+ BSONObj* own = new BSONObj(m.getOwned());
+// BSONObj* own = new BSONObj(m);
+ Persistent<v8::Object> p = wrapBSONObject(o, own);
+ return p;
+ }
+
+ Handle<v8::Value> V8Scope::mongoToV8Element( const BSONElement &f, bool readOnly ) {
+// Local< v8::ObjectTemplate > internalFieldObjects = v8::ObjectTemplate::New();
+// internalFieldObjects->SetInternalFieldCount( 1 );
+
+ switch ( f.type() ) {
+
+ case mongo::Code:
+ return newFunction( f.valuestr() );
+
+ case CodeWScope:
+ if ( !f.codeWScopeObject().isEmpty() )
+ log() << "warning: CodeWScope doesn't transfer to db.eval" << endl;
+ return newFunction( f.codeWScopeCode() );
+
+ case mongo::String:
+// return v8::String::NewExternal( new ExternalString( f.valuestr() ));
+ return v8::String::New( f.valuestr() );
+// return getV8Str( f.valuestr() );
+
+ case mongo::jstOID:
+ return newId( f.__oid() );
+
+ case mongo::NumberDouble:
+ case mongo::NumberInt:
+ return v8::Number::New( f.number() );
+
+ case mongo::Array:
+ // for arrays it's better to use non lazy object because:
+ // - the lazy array is not a true v8 array and requires some v8 src change for all methods to work
+ // - it made several tests about 1.5x slower
+ // - most times when an array is accessed, all its values will be used
+ return mongoToV8( f.embeddedObject() , true, readOnly );
+ case mongo::Object:
+ return mongoToLZV8( f.embeddedObject() , false, readOnly);
+
+ case mongo::Date:
+ return v8::Date::New( (double) ((long long)f.date().millis) );
+
+ case mongo::Bool:
+ return v8::Boolean::New( f.boolean() );
+
+ case mongo::EOO:
+ case mongo::jstNULL:
+ case mongo::Undefined: // duplicate sm behavior
+ return v8::Null();
+
+ case mongo::RegEx: {
+ v8::Function * regex = getNamedCons( "RegExp" );
+
+ v8::Handle<v8::Value> argv[2];
+ argv[0] = v8::String::New( f.regex() );
+ argv[1] = v8::String::New( f.regexFlags() );
+
+ return regex->NewInstance( 2 , argv );
+ break;
+ }
+
+ case mongo::BinData: {
+ int len;
+ const char *data = f.binData( len );
+
+ v8::Function* binData = getNamedCons( "BinData" );
+ v8::Handle<v8::Value> argv[3];
+ argv[0] = v8::Number::New( len );
+ argv[1] = v8::Number::New( f.binDataType() );
+ argv[2] = v8::String::New( data, len );
+ return binData->NewInstance( 3, argv );
+ };
+
+ case mongo::Timestamp: {
+ Local<v8::Object> sub = internalFieldObjects->NewInstance();
+
+ sub->Set( V8STR_T , v8::Number::New( f.timestampTime() ) );
+ sub->Set( V8STR_I , v8::Number::New( f.timestampInc() ) );
+ sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
+
+ return sub;
+ }
+
+ case mongo::NumberLong: {
+ unsigned long long val = f.numberLong();
+ v8::Function* numberLong = getNamedCons( "NumberLong" );
+ // values above 2^53 are not accurately represented in JS
+ if ( (long long)val == (long long)(double)(long long)(val) && val < 9007199254740992ULL ) {
+ v8::Handle<v8::Value> argv[1];
+ argv[0] = v8::Number::New( (double)(long long)( val ) );
+ return numberLong->NewInstance( 1, argv );
+ }
+ else {
+ v8::Handle<v8::Value> argv[3];
+ argv[0] = v8::Number::New( (double)(long long)( val ) );
+ argv[1] = v8::Integer::New( val >> 32 );
+ argv[2] = v8::Integer::New( (unsigned long)(val & 0x00000000ffffffff) );
+ return numberLong->NewInstance( 3, argv );
+ }
+ }
+
+// case mongo::NumberInt: {
+// Local<v8::Object> sub = internalFieldObjects->NewInstance();
+// int val = f.numberInt();
+// v8::Function* numberInt = getNamedCons( "NumberInt" );
+// v8::Handle<v8::Value> argv[1];
+// argv[0] = v8::Int32::New(val);
+// return numberInt->NewInstance( 1, argv );
+// }
+
+ case mongo::MinKey: {
+ Local<v8::Object> sub = internalFieldObjects->NewInstance();
+ sub->Set( V8STR_MINKEY, v8::Boolean::New( true ) );
+ sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
+ return sub;
+ }
+
+ case mongo::MaxKey: {
+ Local<v8::Object> sub = internalFieldObjects->NewInstance();
+ sub->Set( V8STR_MAXKEY, v8::Boolean::New( true ) );
+ sub->SetInternalField( 0, v8::Uint32::New( f.type() ) );
+ return sub;
+ }
+
+ case mongo::DBRef: {
+ v8::Function* dbPointer = getNamedCons( "DBPointer" );
+ v8::Handle<v8::Value> argv[2];
+ argv[0] = getV8Str( f.dbrefNS() );
+ argv[1] = newId( f.dbrefOID() );
+ return dbPointer->NewInstance(2, argv);
+ }
+
+ default:
+ cout << "can't handle type: ";
+ cout << f.type() << " ";
+ cout << f.toString();
+ cout << endl;
+ break;
+ }
+
+ return v8::Undefined();
+ }
+
+ void V8Scope::append( BSONObjBuilder & builder , const char * fieldName , const char * scopeName ) {
+ V8_SIMPLE_HEADER
+ Handle<v8::String> v8name = getV8Str(scopeName);
+ Handle<Value> value = _global->Get( v8name );
+ v8ToMongoElement(builder, fieldName, value);
+ }
+
+ void V8Scope::v8ToMongoElement( BSONObjBuilder & b , const string sname , v8::Handle<v8::Value> value , int depth, BSONObj* originalParent ) {
+
+ if ( value->IsString() ) {
+// Handle<v8::String> str = Handle<v8::String>::Cast(value);
+// ExternalString* es = (ExternalString*) (str->GetExternalAsciiStringResource());
+// b.append( sname , es->data() );
+ b.append( sname , toSTLString( value ).c_str() );
+ return;
+ }
+
+ if ( value->IsFunction() ) {
+ b.appendCode( sname , toSTLString( value ) );
+ return;
+ }
+
+ if ( value->IsNumber() ) {
+ double val = value->ToNumber()->Value();
+ // if previous type was integer, keep it
+ int intval = (int)val;
+ if (val == intval && originalParent) {
+ BSONElement elmt = originalParent->getField(sname);
+ if (elmt.type() == mongo::NumberInt) {
+ b.append( sname , intval );
+ return;
+ }
+ }
+
+ b.append( sname , val );
+ return;
+ }
+
+ if ( value->IsArray() ) {
+ BSONObj sub = v8ToMongo( value->ToObject() , depth );
+ b.appendArray( sname , sub );
+ return;
+ }
+
+ if ( value->IsDate() ) {
+ long long dateval = (long long)(v8::Date::Cast( *value )->NumberValue());
+ b.appendDate( sname , Date_t( (unsigned long long) dateval ) );
+ return;
+ }
+
+ if ( value->IsExternal() )
+ return;
+
+ if ( value->IsObject() ) {
+ // The user could potentially modify the fields of these special objects,
+ // wreaking havoc when we attempt to reinterpret them. Not doing any validation
+ // for now...
+ Local< v8::Object > obj = value->ToObject();
+ if ( obj->InternalFieldCount() && obj->GetInternalField( 0 )->IsNumber() ) {
+ switch( obj->GetInternalField( 0 )->ToInt32()->Value() ) { // NOTE Uint32's Value() gave me a linking error, so going with this instead
+ case Timestamp:
+ b.appendTimestamp( sname,
+ Date_t( (unsigned long long)(obj->Get( V8STR_T )->ToNumber()->Value() )),
+ obj->Get( V8STR_I )->ToInt32()->Value() );
+ return;
+ case MinKey:
+ b.appendMinKey( sname );
+ return;
+ case MaxKey:
+ b.appendMaxKey( sname );
+ return;
+ default:
+ assert( "invalid internal field" == 0 );
+ }
+ }
+ string s = toSTLString( value );
+ if ( s.size() && s[0] == '/' ) {
+ s = s.substr( 1 );
+ string r = s.substr( 0 , s.rfind( "/" ) );
+ string o = s.substr( s.rfind( "/" ) + 1 );
+ b.appendRegex( sname , r , o );
+ }
+ else if ( value->ToObject()->GetPrototype()->IsObject() &&
+ value->ToObject()->GetPrototype()->ToObject()->HasRealNamedProperty( V8STR_ISOBJECTID ) ) {
+ OID oid;
+ oid.init( toSTLString( value->ToObject()->Get(getV8Str("str")) ) );
+ b.appendOID( sname , &oid );
+ }
+ else if ( !value->ToObject()->GetHiddenValue( V8STR_NUMBERLONG ).IsEmpty() ) {
+ // TODO might be nice to potentially speed this up with an indexed internal
+ // field, but I don't yet know how to use an ObjectTemplate with a
+ // constructor.
+ v8::Handle< v8::Object > it = value->ToObject();
+ long long val;
+ if ( !it->Has( getV8Str( "top" ) ) ) {
+ val = (long long)( it->Get( getV8Str( "floatApprox" ) )->NumberValue() );
+ }
+ else {
+ val = (long long)
+ ( (unsigned long long)( it->Get( getV8Str( "top" ) )->ToInt32()->Value() ) << 32 ) +
+ (unsigned)( it->Get( getV8Str( "bottom" ) )->ToInt32()->Value() );
+ }
+
+ b.append( sname, val );
+ }
+ else if ( !value->ToObject()->GetHiddenValue( V8STR_NUMBERINT ).IsEmpty() ) {
+ v8::Handle< v8::Object > it = value->ToObject();
+ b.append(sname, it->GetHiddenValue(V8STR_NUMBERINT)->Int32Value());
+ }
+ else if ( !value->ToObject()->GetHiddenValue( V8STR_DBPTR ).IsEmpty() ) {
+ OID oid;
+ Local<Value> theid = value->ToObject()->Get( getV8Str( "id" ) );
+ oid.init( toSTLString( theid->ToObject()->Get(getV8Str("str")) ) );
+ string ns = toSTLString( value->ToObject()->Get( getV8Str( "ns" ) ) );
+ b.appendDBRef( sname, ns, oid );
+ }
+ else if ( !value->ToObject()->GetHiddenValue( V8STR_BINDATA ).IsEmpty() ) {
+ int len = obj->Get( getV8Str( "len" ) )->ToInt32()->Value();
+ Local<External> c = External::Cast( *(obj->GetInternalField( 0 )) );
+ const char* dataArray = (char*)(c->Value());;
+ b.appendBinData( sname,
+ len,
+ mongo::BinDataType( obj->Get( getV8Str( "type" ) )->ToInt32()->Value() ),
+ dataArray );
+ }
+ else {
+ BSONObj sub = v8ToMongo( value->ToObject() , depth );
+ b.append( sname , sub );
+ }
+ return;
+ }
+
+ if ( value->IsBoolean() ) {
+ b.appendBool( sname , value->ToBoolean()->Value() );
+ return;
+ }
+
+ else if ( value->IsUndefined() ) {
+ b.appendUndefined( sname );
+ return;
+ }
+
+ else if ( value->IsNull() ) {
+ b.appendNull( sname );
+ return;
+ }
+
+ cout << "don't know how to convert to mongo field [" << sname << "]\t" << value << endl;
+ }
+
+ BSONObj V8Scope::v8ToMongo( v8::Handle<v8::Object> o , int depth ) {
+ BSONObj* originalBSON = 0;
+ if (o->InternalFieldCount() > 0) {
+ originalBSON = unwrapBSONObj(o);
+
+ if ( !o->GetHiddenValue( V8STR_RO ).IsEmpty() ||
+ (o->HasNamedLookupInterceptor() && o->GetHiddenValue( V8STR_MODIFIED ).IsEmpty()) ) {
+ // object was readonly, use bson as is
+ return *originalBSON;
+ }
+ }
+
+ BSONObjBuilder b;
+
+ if ( depth == 0 ) {
+ if ( o->HasRealNamedProperty( V8STR_ID ) ) {
+ v8ToMongoElement( b , "_id" , o->Get( V8STR_ID ), 0, originalBSON );
+ }
+ }
+
+ Local<v8::Array> names = o->GetPropertyNames();
+ for ( unsigned int i=0; i<names->Length(); i++ ) {
+ v8::Local<v8::String> name = names->Get( i )->ToString();
+
+// if ( o->GetPrototype()->IsObject() &&
+// o->GetPrototype()->ToObject()->HasRealNamedProperty( name ) )
+// continue;
+
+ v8::Local<v8::Value> value = o->Get( name );
+
+ const string sname = toSTLString( name );
+ if ( depth == 0 && sname == "_id" )
+ continue;
+
+ v8ToMongoElement( b , sname , value , depth + 1, originalBSON );
+ }
+ return b.obj();
+ }
+
+ // --- random utils ----
+
+ v8::Function * V8Scope::getNamedCons( const char * name ) {
+ return v8::Function::Cast( *(v8::Context::GetCurrent()->Global()->Get( getV8Str( name ) ) ) );
+ }
+
+ v8::Function * V8Scope::getObjectIdCons() {
+ return getNamedCons( "ObjectId" );
+ }
+
+ Handle<v8::Value> V8Scope::Print(V8Scope* scope, const Arguments& args) {
+ bool first = true;
+ for (int i = 0; i < args.Length(); i++) {
+ HandleScope handle_scope;
+ if (first) {
+ first = false;
+ }
+ else {
+ printf(" ");
+ }
+ v8::String::Utf8Value str(args[i]);
+ printf("%s", *str);
+ }
+ printf("\n");
+ return v8::Undefined();
+ }
+
+ Handle<v8::Value> V8Scope::Version(V8Scope* scope, const Arguments& args) {
+ HandleScope handle_scope;
+ return handle_scope.Close( v8::String::New(v8::V8::GetVersion()) );
+ }
+
+ Handle<v8::Value> V8Scope::GCV8(V8Scope* scope, const Arguments& args) {
+ V8Lock l;
+ v8::V8::LowMemoryNotification();
+ return v8::Undefined();
+ }
+
+ /**
+ * Gets a V8 strings from the scope's cache, creating one if needed
+ */
+ v8::Handle<v8::String> V8Scope::getV8Str(string str) {
+ Persistent<v8::String> ptr = _strCache[str];
+ if (ptr.IsEmpty()) {
+ ptr = Persistent<v8::String>::New(v8::String::New(str.c_str()));
+ _strCache[str] = ptr;
+// cout << "Adding str " + str << endl;
+ }
+// cout << "Returning str " + str << endl;
+ return ptr;
+ }
+
+ // to be called with v8 mutex
+ void V8Scope::enableV8Interrupt() {
+ v8Locks::InterruptLock l;
+ if ( globalScriptEngine->haveGetInterruptSpecCallback() ) {
+ unsigned op = globalScriptEngine->getInterruptSpec();
+ __interruptSpecToThreadId[ op ] = v8::V8::GetCurrentThreadId();
+ __interruptSpecToIsolate[ op ] = _isolate;
+ }
+ }
+
+ // to be called with v8 mutex
+ void V8Scope::disableV8Interrupt() {
+ v8Locks::InterruptLock l;
+ if ( globalScriptEngine->haveGetInterruptSpecCallback() ) {
+ unsigned op = globalScriptEngine->getInterruptSpec();
+ __interruptSpecToIsolate.erase( op );
+ __interruptSpecToThreadId.erase( op );
+ }
+ }
+
+ // to be called with v8 mutex
+ bool V8Scope::pauseV8Interrupt() {
+ v8Locks::InterruptLock l;
+ if ( globalScriptEngine->haveGetInterruptSpecCallback() ) {
+ unsigned op = globalScriptEngine->getInterruptSpec();
+ int thread = __interruptSpecToThreadId[ op ];
+ if ( thread == -2 || thread == -3) {
+ // already paused
+ return false;
+ }
+ __interruptSpecToThreadId[ op ] = -2;
+ }
+ return true;
+ }
+
+ // to be called with v8 mutex
+ bool V8Scope::resumeV8Interrupt() {
+ v8Locks::InterruptLock l;
+ if ( globalScriptEngine->haveGetInterruptSpecCallback() ) {
+ unsigned op = globalScriptEngine->getInterruptSpec();
+ if (__interruptSpecToThreadId[ op ] == -3) {
+ // was interrupted
+ return false;
+ }
+ __interruptSpecToThreadId[ op ] = v8::V8::GetCurrentThreadId();
+ }
+ return true;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/scripting/engine_v8.h b/src/mongo/scripting/engine_v8.h
new file mode 100644
index 00000000000..48a9858c63b
--- /dev/null
+++ b/src/mongo/scripting/engine_v8.h
@@ -0,0 +1,254 @@
+//engine_v8.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <vector>
+#include "engine.h"
+#include <v8.h>
+
+using namespace v8;
+
+namespace mongo {
+
+ class V8ScriptEngine;
+ class V8Scope;
+
+ typedef Handle< Value > (*v8Function) ( V8Scope* scope, const v8::Arguments& args );
+
+ // Preemption is going to be allowed for the v8 mutex, and some of our v8
+ // usage is not preemption safe. So we are using an additional mutex that
+ // will not be preempted. The V8Lock should be used in place of v8::Locker
+ // except in certain special cases involving interrupts.
+ namespace v8Locks {
+ struct InterruptLock {
+ InterruptLock();
+ ~InterruptLock();
+ };
+
+ // the implementations are quite simple - objects must be destroyed in
+ // reverse of the order created, and should not be shared between threads
+ struct RecursiveLock {
+ RecursiveLock();
+ ~RecursiveLock();
+ bool _unlock;
+ };
+ struct RecursiveUnlock {
+ RecursiveUnlock();
+ ~RecursiveUnlock();
+ bool _lock;
+ };
+ } // namespace v8Locks
+ class V8Lock {
+ public:
+ V8Lock() : _preemptionLock(Isolate::GetCurrent()){}
+
+ private:
+ v8Locks::RecursiveLock _noPreemptionLock;
+ v8::Locker _preemptionLock;
+ };
+ struct V8Unlock {
+ public:
+ V8Unlock() : _preemptionUnlock(Isolate::GetCurrent()){}
+
+ private:
+ v8::Unlocker _preemptionUnlock;
+ v8Locks::RecursiveUnlock _noPreemptionUnlock;
+ };
+
+ class V8Scope : public Scope {
+ public:
+
+ V8Scope( V8ScriptEngine * engine );
+ ~V8Scope();
+
+ virtual void reset();
+ virtual void init( const BSONObj * data );
+
+ virtual void localConnect( const char * dbName );
+ virtual void externalSetup();
+
+ v8::Handle<v8::Value> get( const char * field ); // caller must create context and handle scopes
+ virtual double getNumber( const char *field );
+ virtual int getNumberInt( const char *field );
+ virtual long long getNumberLongLong( const char *field );
+ virtual string getString( const char *field );
+ virtual bool getBoolean( const char *field );
+ virtual BSONObj getObject( const char *field );
+ Handle<v8::Object> getGlobalObject() { return _global; };
+
+ virtual int type( const char *field );
+
+ virtual void setNumber( const char *field , double val );
+ virtual void setString( const char *field , const char * val );
+ virtual void setBoolean( const char *field , bool val );
+ virtual void setElement( const char *field , const BSONElement& e );
+ virtual void setObject( const char *field , const BSONObj& obj , bool readOnly);
+ virtual void setFunction( const char *field , const char * code );
+// virtual void setThis( const BSONObj * obj );
+
+ virtual void rename( const char * from , const char * to );
+
+ virtual ScriptingFunction _createFunction( const char * code );
+ Local< v8::Function > __createFunction( const char * code );
+ virtual int invoke( ScriptingFunction func , const BSONObj* args, const BSONObj* recv, int timeoutMs = 0 , bool ignoreReturn = false, bool readOnlyArgs = false, bool readOnlyRecv = false );
+ virtual bool exec( const StringData& code , const string& name , bool printResult , bool reportError , bool assertOnError, int timeoutMs );
+ virtual string getError() { return _error; }
+ virtual bool hasOutOfMemoryException();
+
+ virtual void injectNative( const char *field, NativeFunction func, void* data = 0 );
+ void injectNative( const char *field, NativeFunction func, Handle<v8::Object>& obj, void* data = 0 );
+ void injectV8Function( const char *field, v8Function func );
+ void injectV8Function( const char *field, v8Function func, Handle<v8::Object>& obj );
+ void injectV8Function( const char *field, v8Function func, Handle<v8::Template>& t );
+ Handle<v8::FunctionTemplate> createV8Function( v8Function func );
+
+ void gc();
+
+ Handle< Context > context() const { return _context; }
+
+ v8::Local<v8::Object> mongoToV8( const mongo::BSONObj & m , bool array = 0 , bool readOnly = false );
+ v8::Handle<v8::Object> mongoToLZV8( const mongo::BSONObj & m , bool array = 0 , bool readOnly = false );
+ mongo::BSONObj v8ToMongo( v8::Handle<v8::Object> o , int depth = 0 );
+
+ void v8ToMongoElement( BSONObjBuilder & b , const string sname , v8::Handle<v8::Value> value , int depth = 0, BSONObj* originalParent=0 );
+ v8::Handle<v8::Value> mongoToV8Element( const BSONElement &f, bool readOnly = false );
+ virtual void append( BSONObjBuilder & builder , const char * fieldName , const char * scopeName );
+
+ v8::Function * getNamedCons( const char * name );
+ v8::Function * getObjectIdCons();
+ Local< v8::Value > newId( const OID &id );
+
+ Persistent<v8::Object> wrapBSONObject(Local<v8::Object> obj, BSONObj* data);
+ Persistent<v8::Object> wrapArrayObject(Local<v8::Object> obj, char* data);
+
+ v8::Handle<v8::String> getV8Str(string str);
+// inline v8::Handle<v8::String> getV8Str(string str) { return v8::String::New(str.c_str()); }
+ inline v8::Handle<v8::String> getLocalV8Str(string str) { return v8::String::New(str.c_str()); }
+
+ v8::Isolate* getIsolate() { return _isolate; }
+ Persistent<Context> getContext() { return _context; }
+
+ // call with v8 mutex:
+ void enableV8Interrupt();
+ void disableV8Interrupt();
+ bool pauseV8Interrupt();
+ bool resumeV8Interrupt();
+
+ Handle<v8::String> V8STR_CONN;
+ Handle<v8::String> V8STR_ID;
+ Handle<v8::String> V8STR_LENGTH;
+ Handle<v8::String> V8STR_LEN;
+ Handle<v8::String> V8STR_TYPE;
+ Handle<v8::String> V8STR_ISOBJECTID;
+ Handle<v8::String> V8STR_NATIVE_FUNC;
+ Handle<v8::String> V8STR_NATIVE_DATA;
+ Handle<v8::String> V8STR_V8_FUNC;
+ Handle<v8::String> V8STR_RETURN;
+ Handle<v8::String> V8STR_ARGS;
+ Handle<v8::String> V8STR_T;
+ Handle<v8::String> V8STR_I;
+ Handle<v8::String> V8STR_EMPTY;
+ Handle<v8::String> V8STR_MINKEY;
+ Handle<v8::String> V8STR_MAXKEY;
+ Handle<v8::String> V8STR_NUMBERLONG;
+ Handle<v8::String> V8STR_NUMBERINT;
+ Handle<v8::String> V8STR_DBPTR;
+ Handle<v8::String> V8STR_BINDATA;
+ Handle<v8::String> V8STR_WRAPPER;
+ Handle<v8::String> V8STR_RO;
+ Handle<v8::String> V8STR_MODIFIED;
+ Handle<v8::String> V8STR_FULLNAME;
+
+ private:
+ void _startCall();
+
+ static Handle< Value > nativeCallback( V8Scope* scope, const Arguments &args );
+ static v8::Handle< v8::Value > v8Callback( const v8::Arguments &args );
+ static Handle< Value > load( V8Scope* scope, const Arguments &args );
+ static Handle< Value > Print(V8Scope* scope, const v8::Arguments& args);
+ static Handle< Value > Version(V8Scope* scope, const v8::Arguments& args);
+ static Handle< Value > GCV8(V8Scope* scope, const v8::Arguments& args);
+
+
+ V8ScriptEngine * _engine;
+
+ Persistent<Context> _context;
+ Persistent<v8::Object> _global;
+
+ string _error;
+ vector< Persistent<Value> > _funcs;
+ v8::Persistent<v8::Object> _emptyObj;
+
+ v8::Persistent<v8::Function> _wrapper;
+
+ enum ConnectState { NOT , LOCAL , EXTERNAL };
+ ConnectState _connectState;
+
+ std::map <string, v8::Persistent <v8::String> > _strCache;
+
+ Persistent<v8::ObjectTemplate> lzObjectTemplate;
+ Persistent<v8::ObjectTemplate> roObjectTemplate;
+ Persistent<v8::ObjectTemplate> lzArrayTemplate;
+ Persistent<v8::ObjectTemplate> internalFieldObjects;
+ v8::Isolate* _isolate;
+ };
+
+ class V8ScriptEngine : public ScriptEngine {
+ public:
+ V8ScriptEngine();
+ virtual ~V8ScriptEngine();
+
+ virtual Scope * createScope() { return new V8Scope( this ); }
+
+ virtual void runTest() {}
+
+ bool utf8Ok() const { return true; }
+
+ class V8UnlockForClient : public Unlocker {
+ V8Unlock u_;
+ };
+
+ virtual auto_ptr<Unlocker> newThreadUnlocker() { return auto_ptr< Unlocker >( new V8UnlockForClient ); }
+
+ virtual void interrupt( unsigned opSpec );
+ virtual void interruptAll();
+
+ private:
+ friend class V8Scope;
+ };
+
+ class ExternalString : public v8::String::ExternalAsciiStringResource {
+ public:
+ ExternalString(std::string str) : _data(str) {
+ }
+
+ ~ExternalString() {
+ }
+
+ const char* data () const { return _data.c_str(); }
+ size_t length () const { return _data.length(); }
+ private:
+// string _str;
+// const char* _data;
+ std::string _data;
+// size_t _len;
+ };
+
+ extern ScriptEngine * globalScriptEngine;
+
+}
diff --git a/src/mongo/scripting/sm_db.cpp b/src/mongo/scripting/sm_db.cpp
new file mode 100644
index 00000000000..ea8780fa7c0
--- /dev/null
+++ b/src/mongo/scripting/sm_db.cpp
@@ -0,0 +1,1284 @@
+// sm_db.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// hacked in right now from engine_spidermonkey.cpp
+
+#include "../client/syncclusterconnection.h"
+#include "../util/base64.h"
+#include "../util/text.h"
+#include "../util/hex.h"
+
+#if( BOOST_VERSION >= 104200 )
+//#include <boost/uuid/uuid.hpp>
+#define HAVE_UUID 1
+#else
+;
+#endif
+
+namespace mongo {
+
+ bool haveLocalShardingInfo( const string& ns );
+
+ // ------------ some defs needed ---------------
+
+ JSObject * doCreateCollection( JSContext * cx , JSObject * db , const string& shortName );
+
+ // ------------ utils ------------------
+
+
+ bool isSpecialName( const string& name ) {
+ static set<string> names;
+ if ( names.size() == 0 ) {
+ names.insert( "tojson" );
+ names.insert( "toJson" );
+ names.insert( "toString" );
+ }
+
+ if ( name.length() == 0 )
+ return false;
+
+ if ( name[0] == '_' )
+ return true;
+
+ return names.count( name ) > 0;
+ }
+
+
+ // ------ cursor ------
+
+ class CursorHolder {
+ public:
+ CursorHolder( auto_ptr< DBClientCursor > &cursor, const shared_ptr< DBClientWithCommands > &connection ) :
+ connection_( connection ),
+ cursor_( cursor ) {
+ assert( cursor_.get() );
+ }
+ DBClientCursor *get() const { return cursor_.get(); }
+ private:
+ shared_ptr< DBClientWithCommands > connection_;
+ auto_ptr< DBClientCursor > cursor_;
+ };
+
+ DBClientCursor *getCursor( JSContext *cx, JSObject *obj ) {
+ CursorHolder * holder = (CursorHolder*)JS_GetPrivate( cx , obj );
+ uassert( 10235 , "no cursor!" , holder );
+ return holder->get();
+ }
+
+ JSBool internal_cursor_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ uassert( 10236 , "no args to internal_cursor_constructor" , argc == 0 );
+ assert( JS_SetPrivate( cx , obj , 0 ) ); // just for safety
+ return JS_TRUE;
+ }
+
+ void internal_cursor_finalize( JSContext * cx , JSObject * obj ) {
+ CursorHolder * holder = (CursorHolder*)JS_GetPrivate( cx , obj );
+ if ( holder ) {
+ delete holder;
+ assert( JS_SetPrivate( cx , obj , 0 ) );
+ }
+ }
+
+ JSBool internal_cursor_hasNext(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ DBClientCursor *cursor = getCursor( cx, obj );
+ try {
+ *rval = cursor->more() ? JSVAL_TRUE : JSVAL_FALSE;
+ }
+ catch ( std::exception& e ) {
+ JS_ReportError( cx , e.what() );
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+ }
+
+ JSBool internal_cursor_objsLeftInBatch(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ DBClientCursor *cursor = getCursor( cx, obj );
+ Convertor c(cx);
+ *rval = c.toval((double) cursor->objsLeftInBatch() );
+ return JS_TRUE;
+ }
+
+ JSBool internal_cursor_next(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ DBClientCursor *cursor = getCursor( cx, obj );
+
+ BSONObj n;
+
+ try {
+ if ( ! cursor->more() ) {
+ JS_ReportError( cx , "cursor at the end" );
+ return JS_FALSE;
+ }
+
+ n = cursor->next();
+ }
+ catch ( std::exception& e ) {
+ JS_ReportError( cx , e.what() );
+ return JS_FALSE;
+ }
+
+ Convertor c(cx);
+ *rval = c.toval( &n );
+ return JS_TRUE;
+ }
+
+ JSFunctionSpec internal_cursor_functions[] = {
+ { "hasNext" , internal_cursor_hasNext , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "objsLeftInBatch" , internal_cursor_objsLeftInBatch , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "next" , internal_cursor_next , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { 0 }
+ };
+
+ JSClass internal_cursor_class = {
+ "InternalCursor" , JSCLASS_HAS_PRIVATE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, internal_cursor_finalize,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+
+ // ------ mongo stuff ------
+
+ JSBool mongo_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ uassert( 10237 , "mongo_constructor not implemented yet" , 0 );
+ throw -1;
+ }
+
+ JSBool mongo_local_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ Convertor c( cx );
+
+ shared_ptr< DBClientWithCommands > client( createDirectClient() );
+ assert( JS_SetPrivate( cx , obj , (void*)( new shared_ptr< DBClientWithCommands >( client ) ) ) );
+
+ jsval host = c.toval( "EMBEDDED" );
+ assert( JS_SetProperty( cx , obj , "host" , &host ) );
+
+ return JS_TRUE;
+ }
+
+ JSBool mongo_external_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ Convertor c( cx );
+
+ smuassert( cx , "0 or 1 args to Mongo" , argc <= 1 );
+
+ string host = "127.0.0.1";
+ if ( argc > 0 )
+ host = c.toString( argv[0] );
+
+ string errmsg;
+
+ ConnectionString cs = ConnectionString::parse( host , errmsg );
+ if ( ! cs.isValid() ) {
+ JS_ReportError( cx , errmsg.c_str() );
+ return JS_FALSE;
+ }
+
+ shared_ptr< DBClientWithCommands > conn( cs.connect( errmsg ) );
+ if ( ! conn ) {
+ JS_ReportError( cx , errmsg.c_str() );
+ return JS_FALSE;
+ }
+
+ try{
+ ScriptEngine::runConnectCallback( *conn );
+ }
+ catch( std::exception& e ){
+ // Can happen if connection goes down while we're starting up here
+ // Catch so that we don't get a hard-to-trace segfault from SM
+ JS_ReportError( cx, ((string)( str::stream() << "Error during mongo startup." << causedBy( e ) )).c_str() );
+ return JS_FALSE;
+ }
+
+ assert( JS_SetPrivate( cx , obj , (void*)( new shared_ptr< DBClientWithCommands >( conn ) ) ) );
+ jsval host_val = c.toval( host.c_str() );
+ assert( JS_SetProperty( cx , obj , "host" , &host_val ) );
+ return JS_TRUE;
+
+ }
+
+ DBClientWithCommands *getConnection( JSContext *cx, JSObject *obj ) {
+ shared_ptr< DBClientWithCommands > * connHolder = (shared_ptr< DBClientWithCommands >*)JS_GetPrivate( cx , obj );
+ uassert( 10239 , "no connection!" , connHolder && connHolder->get() );
+ return connHolder->get();
+ }
+
+ void mongo_finalize( JSContext * cx , JSObject * obj ) {
+ shared_ptr< DBClientWithCommands > * connHolder = (shared_ptr< DBClientWithCommands >*)JS_GetPrivate( cx , obj );
+ if ( connHolder ) {
+ delete connHolder;
+ assert( JS_SetPrivate( cx , obj , 0 ) );
+ }
+ }
+
+ JSClass mongo_class = {
+ "Mongo" , JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, mongo_finalize,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ JSBool mongo_auth(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ smuassert( cx , "mongo_auth needs 3 args" , argc == 3 );
+ shared_ptr< DBClientWithCommands > * connHolder = (shared_ptr< DBClientWithCommands >*)JS_GetPrivate( cx , obj );
+ smuassert( cx , "no connection!" , connHolder && connHolder->get() );
+ DBClientWithCommands *conn = connHolder->get();
+
+ Convertor c( cx );
+
+ string db = c.toString( argv[0] );
+ string username = c.toString( argv[1] );
+ string password = c.toString( argv[2] );
+ string errmsg = "";
+
+ try {
+ if (conn->auth(db, username, password, errmsg)) {
+ return JS_TRUE;
+ }
+ JS_ReportError( cx, errmsg.c_str() );
+ }
+ catch ( ... ) {
+ JS_ReportError( cx , "error doing query: unknown" );
+ }
+ return JS_FALSE;
+ }
+
+ JSBool mongo_find(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ smuassert( cx , "mongo_find needs 7 args" , argc == 7 );
+ shared_ptr< DBClientWithCommands > * connHolder = (shared_ptr< DBClientWithCommands >*)JS_GetPrivate( cx , obj );
+ smuassert( cx , "no connection!" , connHolder && connHolder->get() );
+ DBClientWithCommands *conn = connHolder->get();
+
+ Convertor c( cx );
+
+ string ns = c.toString( argv[0] );
+
+ BSONObj q = c.toObject( argv[1] );
+ BSONObj f = c.toObject( argv[2] );
+
+ int nToReturn = (int) c.toNumber( argv[3] );
+ int nToSkip = (int) c.toNumber( argv[4] );
+ int batchSize = (int) c.toNumber( argv[5] );
+ int options = (int)c.toNumber( argv[6] );
+
+ try {
+
+ auto_ptr<DBClientCursor> cursor = conn->query( ns , q , nToReturn , nToSkip , f.nFields() ? &f : 0 , options , batchSize );
+ if ( ! cursor.get() ) {
+ log() << "query failed : " << ns << " " << q << " to: " << conn->toString() << endl;
+ JS_ReportError( cx , "error doing query: failed" );
+ return JS_FALSE;
+ }
+ JSObject * mycursor = JS_NewObject( cx , &internal_cursor_class , 0 , 0 );
+ CHECKNEWOBJECT( mycursor, cx, "internal_cursor_class" );
+ assert( JS_SetPrivate( cx , mycursor , new CursorHolder( cursor, *connHolder ) ) );
+ *rval = OBJECT_TO_JSVAL( mycursor );
+ return JS_TRUE;
+ }
+ catch ( ... ) {
+ JS_ReportError( cx , "error doing query: unknown" );
+ return JS_FALSE;
+ }
+ }
+
+ JSBool mongo_update(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ smuassert( cx , "mongo_update needs at least 3 args" , argc >= 3 );
+ smuassert( cx , "2nd param to update has to be an object" , JSVAL_IS_OBJECT( argv[1] ) );
+ smuassert( cx , "3rd param to update has to be an object" , JSVAL_IS_OBJECT( argv[2] ) );
+
+ Convertor c( cx );
+ if ( c.getBoolean( obj , "readOnly" ) ) {
+ JS_ReportError( cx , "js db in read only mode - mongo_update" );
+ return JS_FALSE;
+ }
+
+ DBClientWithCommands * conn = getConnection( cx, obj );
+ uassert( 10245 , "no connection!" , conn );
+
+ string ns = c.toString( argv[0] );
+
+ bool upsert = argc > 3 && c.toBoolean( argv[3] );
+ bool multi = argc > 4 && c.toBoolean( argv[4] );
+
+ try {
+ conn->update( ns , c.toObject( argv[1] ) , c.toObject( argv[2] ) , upsert , multi );
+ return JS_TRUE;
+ }
+ catch ( ... ) {
+ JS_ReportError( cx , "error doing update" );
+ return JS_FALSE;
+ }
+ }
+
+ JSBool mongo_insert(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ smuassert( cx , "mongo_insert needs 2 args" , argc == 2 );
+ smuassert( cx , "2nd param to insert has to be an object" , JSVAL_IS_OBJECT( argv[1] ) );
+
+ Convertor c( cx );
+ if ( c.getBoolean( obj , "readOnly" ) ) {
+ JS_ReportError( cx , "js db in read only mode - mongo_insert" );
+ return JS_FALSE;
+ }
+
+ DBClientWithCommands * conn = getConnection( cx, obj );
+ uassert( 10248 , "no connection!" , conn );
+
+ string ns = c.toString( argv[0] );
+
+ try {
+ JSObject * insertObj = JSVAL_TO_OBJECT( argv[1] );
+
+ if( JS_IsArrayObject( cx, insertObj ) ){
+ vector<BSONObj> bos;
+
+ jsuint len;
+ JSBool gotLen = JS_GetArrayLength( cx, insertObj, &len );
+ smuassert( cx, "could not get length of array", gotLen );
+
+ for( jsuint i = 0; i < len; i++ ){
+
+ jsval el;
+ JSBool inserted = JS_GetElement( cx, insertObj, i, &el);
+ smuassert( cx, "could not find element in array object", inserted );
+
+ bos.push_back( c.toObject( el ) );
+ }
+
+ conn->insert( ns, bos );
+
+ return JS_TRUE;
+ }
+ else {
+ BSONObj o = c.toObject( argv[1] );
+ // TODO: add _id
+
+ conn->insert( ns , o );
+ return JS_TRUE;
+ }
+ }
+ catch ( std::exception& e ) {
+ stringstream ss;
+ ss << "error doing insert:" << e.what();
+ string s = ss.str();
+ JS_ReportError( cx , s.c_str() );
+ return JS_FALSE;
+ }
+ catch ( ... ) {
+ JS_ReportError( cx , "error doing insert" );
+ return JS_FALSE;
+ }
+ }
+
+ JSBool mongo_remove(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ smuassert( cx , "mongo_remove needs 2 or 3 arguments" , argc == 2 || argc == 3 );
+ smuassert( cx , "2nd param to insert has to be an object" , JSVAL_IS_OBJECT( argv[1] ) );
+
+ Convertor c( cx );
+ if ( c.getBoolean( obj , "readOnly" ) ) {
+ JS_ReportError( cx , "js db in read only mode - mongo_remove" );
+ return JS_FALSE;
+ }
+
+ DBClientWithCommands * conn = getConnection( cx, obj );
+ uassert( 10251 , "no connection!" , conn );
+
+ string ns = c.toString( argv[0] );
+ BSONObj o = c.toObject( argv[1] );
+ bool justOne = false;
+ if ( argc > 2 )
+ justOne = c.toBoolean( argv[2] );
+
+ try {
+ conn->remove( ns , o , justOne );
+ return JS_TRUE;
+ }
+ catch ( std::exception& e ) {
+ JS_ReportError( cx , e.what() );
+ return JS_FALSE;
+ }
+
+ catch ( ... ) {
+ JS_ReportError( cx , "error doing remove" );
+ return JS_FALSE;
+ }
+
+ }
+
+ JSFunctionSpec mongo_functions[] = {
+ { "auth" , mongo_auth , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "find" , mongo_find , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "update" , mongo_update , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "insert" , mongo_insert , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "remove" , mongo_remove , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { 0 }
+ };
+
+ // ------------- db_collection -------------
+
+ JSBool db_collection_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ smuassert( cx , "db_collection_constructor wrong args" , argc == 4 );
+ assert( JS_SetProperty( cx , obj , "_mongo" , &(argv[0]) ) );
+ assert( JS_SetProperty( cx , obj , "_db" , &(argv[1]) ) );
+ assert( JS_SetProperty( cx , obj , "_shortName" , &(argv[2]) ) );
+ assert( JS_SetProperty( cx , obj , "_fullName" , &(argv[3]) ) );
+
+ Convertor c(cx);
+ if ( haveLocalShardingInfo( c.toString( argv[3] ) ) ) {
+ JS_ReportError( cx , "can't use sharded collection from db.eval" );
+ return JS_FALSE;
+ }
+
+ return JS_TRUE;
+ }
+
+ JSBool db_collection_resolve( JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp ) {
+ if ( flags & JSRESOLVE_ASSIGNING )
+ return JS_TRUE;
+
+ Convertor c( cx );
+ string collname = c.toString( id );
+
+ if ( isSpecialName( collname ) )
+ return JS_TRUE;
+
+ if ( obj == c.getGlobalPrototype( "DBCollection" ) )
+ return JS_TRUE;
+
+ JSObject * proto = JS_GetPrototype( cx , obj );
+ if ( c.hasProperty( obj , collname.c_str() ) || ( proto && c.hasProperty( proto , collname.c_str() ) ) )
+ return JS_TRUE;
+
+ string name = c.toString( c.getProperty( obj , "_shortName" ) );
+ name += ".";
+ name += collname;
+
+ jsval db = c.getProperty( obj , "_db" );
+ if ( ! JSVAL_IS_OBJECT( db ) )
+ return JS_TRUE;
+
+ JSObject * coll = doCreateCollection( cx , JSVAL_TO_OBJECT( db ) , name );
+ if ( ! coll )
+ return JS_FALSE;
+ c.setProperty( obj , collname.c_str() , OBJECT_TO_JSVAL( coll ) );
+ *objp = obj;
+ return JS_TRUE;
+ }
+
+ JSClass db_collection_class = {
+ "DBCollection" , JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, (JSResolveOp)(&db_collection_resolve) , JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+
+ JSObject * doCreateCollection( JSContext * cx , JSObject * db , const string& shortName ) {
+ Convertor c(cx);
+
+ assert( c.hasProperty( db , "_mongo" ) );
+ assert( c.hasProperty( db , "_name" ) );
+
+ JSObject * coll = JS_NewObject( cx , &db_collection_class , 0 , 0 );
+ CHECKNEWOBJECT( coll, cx, "doCreateCollection" );
+ c.setProperty( coll , "_mongo" , c.getProperty( db , "_mongo" ) );
+ c.setProperty( coll , "_db" , OBJECT_TO_JSVAL( db ) );
+ c.setProperty( coll , "_shortName" , c.toval( shortName.c_str() ) );
+
+ string name = c.toString( c.getProperty( db , "_name" ) );
+ name += "." + shortName;
+ c.setProperty( coll , "_fullName" , c.toval( name.c_str() ) );
+
+ if ( haveLocalShardingInfo( name ) ) {
+ JS_ReportError( cx , "can't use sharded collection from db.eval" );
+ return 0;
+ }
+
+ return coll;
+ }
+
+ // -------------- DB ---------------
+
+
+ JSBool db_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ smuassert( cx, "wrong number of arguments to DB" , argc == 2 );
+ assert( JS_SetProperty( cx , obj , "_mongo" , &(argv[0]) ) );
+ assert( JS_SetProperty( cx , obj , "_name" , &(argv[1]) ) );
+
+ return JS_TRUE;
+ }
+
+ JSBool db_resolve( JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp ) {
+ if ( flags & JSRESOLVE_ASSIGNING )
+ return JS_TRUE;
+
+ Convertor c( cx );
+
+ if ( obj == c.getGlobalPrototype( "DB" ) )
+ return JS_TRUE;
+
+ string collname = c.toString( id );
+
+ if ( isSpecialName( collname ) )
+ return JS_TRUE;
+
+ JSObject * proto = JS_GetPrototype( cx , obj );
+ if ( proto && c.hasProperty( proto , collname.c_str() ) )
+ return JS_TRUE;
+
+ JSObject * coll = doCreateCollection( cx , obj , collname );
+ if ( ! coll )
+ return JS_FALSE;
+ c.setProperty( obj , collname.c_str() , OBJECT_TO_JSVAL( coll ) );
+
+ *objp = obj;
+ return JS_TRUE;
+ }
+
+ JSClass db_class = {
+ "DB" , JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, (JSResolveOp)(&db_resolve) , JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+
+ // -------------- object id -------------
+
+ JSBool object_id_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ Convertor c( cx );
+
+ OID oid;
+ if ( argc == 0 ) {
+ oid.init();
+ }
+ else {
+ smuassert( cx , "object_id_constructor can't take more than 1 param" , argc == 1 );
+ string s = c.toString( argv[0] );
+
+ try {
+ Scope::validateObjectIdString( s );
+ }
+ catch ( const MsgAssertionException &m ) {
+ static string error = m.toString();
+ JS_ReportError( cx, error.c_str() );
+ return JS_FALSE;
+ }
+ oid.init( s );
+ }
+
+ if ( ! JS_InstanceOf( cx , obj , &object_id_class , 0 ) ) {
+ obj = JS_NewObject( cx , &object_id_class , 0 , 0 );
+ CHECKNEWOBJECT( obj, cx, "object_id_constructor" );
+ *rval = OBJECT_TO_JSVAL( obj );
+ }
+
+ jsval v = c.toval( oid.str().c_str() );
+ assert( JS_SetProperty( cx , obj , "str" , &v ) );
+
+ return JS_TRUE;
+ }
+
+ JSClass object_id_class = {
+ "ObjectId" , JSCLASS_HAS_PRIVATE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ // dbpointer
+
+ JSBool dbpointer_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ Convertor c( cx );
+ if ( ! JS_InstanceOf( cx , obj , &dbpointer_class , 0 ) ) {
+ obj = JS_NewObject( cx , &dbpointer_class , 0 , 0 );
+ CHECKNEWOBJECT( obj, cx, "dbpointer_constructor" );
+ *rval = OBJECT_TO_JSVAL( obj );
+ }
+
+ if ( argc == 2 ) {
+
+ if ( ! JSVAL_IS_OID( argv[1] ) ) {
+ JS_ReportError( cx , "2nd arg to DBPointer needs to be oid" );
+ return JS_FALSE;
+ }
+
+ assert( JS_SetProperty( cx , obj , "ns" , &(argv[0]) ) );
+ assert( JS_SetProperty( cx , obj , "id" , &(argv[1]) ) );
+ return JS_TRUE;
+ }
+ else {
+ JS_ReportError( cx , "DBPointer needs 2 arguments" );
+ return JS_FALSE;
+ }
+ }
+
+ JSClass dbpointer_class = {
+ "DBPointer" , JSCLASS_HAS_PRIVATE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ JSFunctionSpec dbpointer_functions[] = {
+ { 0 }
+ };
+
+
+ JSBool dbref_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ Convertor c( cx );
+ if ( ! JS_InstanceOf( cx , obj , &dbref_class , 0 ) ) {
+ obj = JS_NewObject( cx , &dbref_class , 0 , 0 );
+ CHECKNEWOBJECT( obj, cx, "dbref_constructor" );
+ *rval = OBJECT_TO_JSVAL( obj );
+ }
+
+ if ( argc == 2 ) {
+ JSObject * o = JS_NewObject( cx , NULL , NULL, NULL );
+ CHECKNEWOBJECT( o, cx, "dbref_constructor" );
+ assert( JS_SetProperty( cx, o , "$ref" , &argv[ 0 ] ) );
+ assert( JS_SetProperty( cx, o , "$id" , &argv[ 1 ] ) );
+ BSONObj bo = c.toObject( o );
+ assert( JS_SetPrivate( cx , obj , (void*)(new BSONHolder( bo.getOwned() ) ) ) );
+ return JS_TRUE;
+ }
+ else {
+ JS_ReportError( cx , "DBRef needs 2 arguments" );
+ assert( JS_SetPrivate( cx , obj , (void*)(new BSONHolder( BSONObj().getOwned() ) ) ) );
+ return JS_FALSE;
+ }
+ }
+
+ JSClass dbref_class = bson_class; // name will be fixed later
+
+ // UUID **************************
+
+#if 0
+ JSBool uuid_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ Convertor c( cx );
+
+ if( argc == 0 ) {
+#if defined(HAVE_UUID)
+ //uuids::uuid
+#else
+#endif
+ JS_ReportError( cx , "UUID needs 1 argument -- UUID(hexstr)" );
+ return JS_FALSE;
+ }
+ else if ( argc == 1 ) {
+
+ string encoded = c.toString( argv[ 0 ] );
+ if( encoded.size() != 32 ) {
+ JS_ReportError( cx, "expect 32 char hex string to UUID()" );
+ return JS_FALSE;
+ }
+
+ char buf[16];
+ for( int i = 0; i < 16; i++ ) {
+ buf[i] = fromHex(encoded.c_str() + i * 2);
+ }
+
+zzz
+
+ assert( JS_SetPrivate( cx, obj, new BinDataHolder( buf, 16 ) ) );
+ c.setProperty( obj, "len", c.toval( (double)16 ) );
+ c.setProperty( obj, "type", c.toval( (double)3 ) );
+
+ return JS_TRUE;
+ }
+ else {
+ JS_ReportError( cx , "UUID needs 1 argument -- UUID(hexstr)" );
+ return JS_FALSE;
+ }
+ }
+
+ JSBool uuid_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ Convertor c(cx);
+ void *holder = JS_GetPrivate( cx, obj );
+ assert( holder );
+ const char *data = ( ( BinDataHolder* )( holder ) )->c_;
+ stringstream ss;
+ ss << "UUID(\"" << toHex(data, 16);
+ ss << "\")";
+ string ret = ss.str();
+ return *rval = c.toval( ret.c_str() );
+ }
+
+ void uuid_finalize( JSContext * cx , JSObject * obj ) {
+ Convertor c(cx);
+ void *holder = JS_GetPrivate( cx, obj );
+ if ( holder ) {
+ delete ( BinDataHolder* )holder;
+ assert( JS_SetPrivate( cx , obj , 0 ) );
+ }
+ }
+
+ JSClass uuid_class = {
+ "UUID" , JSCLASS_HAS_PRIVATE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, uuid_finalize,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ JSFunctionSpec uuid_functions[] = {
+ { "toString" , uuid_tostring , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { 0 }
+ };
+
+#endif
+
+ // BinData **************************
+
+ JSBool bindata_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ Convertor c( cx );
+ if ( ! JS_InstanceOf( cx , obj , &bindata_class , 0 ) ) {
+ obj = JS_NewObject( cx , &bindata_class , 0 , 0 );
+ CHECKNEWOBJECT( obj, cx, "bindata_constructor" );
+ *rval = OBJECT_TO_JSVAL( obj );
+ }
+
+ if ( argc == 2 ) {
+
+ int type = (int)c.toNumber( argv[ 0 ] );
+ if( type < 0 || type > 255 ) {
+ JS_ReportError( cx , "invalid BinData subtype -- range is 0..255 see bsonspec.org" );
+ return JS_FALSE;
+ }
+ string encoded = c.toString( argv[ 1 ] );
+ string decoded;
+ try {
+ decoded = base64::decode( encoded );
+ }
+ catch(...) {
+ JS_ReportError(cx, "BinData could not decode base64 parameter");
+ return JS_FALSE;
+ }
+
+ assert( JS_SetPrivate( cx, obj, new BinDataHolder( decoded.data(), decoded.length() ) ) );
+ c.setProperty( obj, "len", c.toval( (double)decoded.length() ) );
+ c.setProperty( obj, "type", c.toval( (double)type ) );
+
+ return JS_TRUE;
+ }
+ else {
+ JS_ReportError( cx , "BinData needs 2 arguments -- BinData(subtype,data)" );
+ return JS_FALSE;
+ }
+ }
+
+ JSBool bindata_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ Convertor c(cx);
+ int type = (int)c.getNumber( obj , "type" );
+ int len = (int)c.getNumber( obj, "len" );
+ void *holder = JS_GetPrivate( cx, obj );
+ assert( holder );
+ const char *data = ( ( BinDataHolder* )( holder ) )->c_;
+ stringstream ss;
+ ss << "BinData(" << type << ",\"";
+ base64::encode( ss, (const char *)data, len );
+ ss << "\")";
+ string ret = ss.str();
+ return *rval = c.toval( ret.c_str() );
+ }
+
+ JSBool bindataBase64(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ Convertor c(cx);
+ int len = (int)c.getNumber( obj, "len" );
+ void *holder = JS_GetPrivate( cx, obj );
+ assert( holder );
+ const char *data = ( ( BinDataHolder* )( holder ) )->c_;
+ stringstream ss;
+ base64::encode( ss, (const char *)data, len );
+ string ret = ss.str();
+ return *rval = c.toval( ret.c_str() );
+ }
+
+ JSBool bindataAsHex(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ Convertor c(cx);
+ int len = (int)c.getNumber( obj, "len" );
+ void *holder = JS_GetPrivate( cx, obj );
+ assert( holder );
+ const char *data = ( ( BinDataHolder* )( holder ) )->c_;
+ stringstream ss;
+ ss.setf (ios_base::hex , ios_base::basefield);
+ ss.fill ('0');
+ ss.setf (ios_base::right , ios_base::adjustfield);
+ for( int i = 0; i < len; i++ ) {
+ unsigned v = (unsigned char) data[i];
+ ss << setw(2) << v;
+ }
+ string ret = ss.str();
+ return *rval = c.toval( ret.c_str() );
+ }
+
+ void bindata_finalize( JSContext * cx , JSObject * obj ) {
+ Convertor c(cx);
+ void *holder = JS_GetPrivate( cx, obj );
+ if ( holder ) {
+ delete ( BinDataHolder* )holder;
+ assert( JS_SetPrivate( cx , obj , 0 ) );
+ }
+ }
+
+ JSClass bindata_class = {
+ "BinData" , JSCLASS_HAS_PRIVATE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, bindata_finalize,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ JSFunctionSpec bindata_functions[] = {
+ { "toString" , bindata_tostring , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "hex", bindataAsHex, 0, JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "base64", bindataBase64, 0, JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { 0 }
+ };
+
+ // Map
+
+ bool specialMapString( const string& s ) {
+ return s == "put" || s == "get" || s == "_get" || s == "values" || s == "_data" || s == "constructor" ;
+ }
+
+ JSBool map_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ if ( argc > 0 ) {
+ JS_ReportError( cx , "Map takes no arguments" );
+ return JS_FALSE;
+ }
+
+ JSObject * array = JS_NewObject( cx , 0 , 0 , 0 );
+ CHECKNEWOBJECT( array, cx, "map_constructor" );
+
+ jsval a = OBJECT_TO_JSVAL( array );
+ JS_SetProperty( cx , obj , "_data" , &a );
+
+ return JS_TRUE;
+ }
+
+ JSBool map_prop( JSContext *cx, JSObject *obj, jsval idval, jsval *vp ) {
+ Convertor c(cx);
+ if ( specialMapString( c.toString( idval ) ) )
+ return JS_TRUE;
+
+ log() << "illegal prop access: " << c.toString( idval ) << endl;
+ JS_ReportError( cx , "can't use array access with Map" );
+ return JS_FALSE;
+ }
+
+ JSClass map_class = {
+ "Map" , JSCLASS_HAS_PRIVATE ,
+ map_prop, JS_PropertyStub, map_prop, map_prop,
+ JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ JSFunctionSpec map_functions[] = {
+ { 0 }
+ };
+
+
+ // -----
+
+ JSClass timestamp_class = {
+ "Timestamp" , JSCLASS_HAS_PRIVATE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ JSBool timestamp_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ smuassert( cx , "Timestamp needs 0 or 2 args" , argc == 0 || argc == 2 );
+
+ if ( ! JS_InstanceOf( cx , obj , &timestamp_class , 0 ) ) {
+ obj = JS_NewObject( cx , &timestamp_class , 0 , 0 );
+ CHECKNEWOBJECT( obj, cx, "timestamp_constructor" );
+ *rval = OBJECT_TO_JSVAL( obj );
+ }
+
+ Convertor c( cx );
+ if ( argc == 0 ) {
+ c.setProperty( obj, "t", c.toval( 0.0 ) );
+ c.setProperty( obj, "i", c.toval( 0.0 ) );
+ }
+ else {
+ c.setProperty( obj, "t", argv[ 0 ] );
+ c.setProperty( obj, "i", argv[ 1 ] );
+ }
+
+ return JS_TRUE;
+ }
+
+ JSClass numberlong_class = {
+ "NumberLong" , JSCLASS_HAS_PRIVATE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ JSBool numberlong_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ smuassert( cx , "NumberLong needs 0 or 1 args" , argc == 0 || argc == 1 );
+
+ if ( ! JS_InstanceOf( cx , obj , &numberlong_class , 0 ) ) {
+ obj = JS_NewObject( cx , &numberlong_class , 0 , 0 );
+ CHECKNEWOBJECT( obj, cx, "numberlong_constructor" );
+ *rval = OBJECT_TO_JSVAL( obj );
+ }
+
+ Convertor c( cx );
+ if ( argc == 0 ) {
+ c.setProperty( obj, "floatApprox", c.toval( 0.0 ) );
+ }
+ else if ( JSVAL_IS_NUMBER( argv[ 0 ] ) ) {
+ c.setProperty( obj, "floatApprox", argv[ 0 ] );
+ }
+ else {
+ string num = c.toString( argv[ 0 ] );
+ //PRINT(num);
+ const char *numStr = num.c_str();
+ long long n;
+ try {
+ n = parseLL( numStr );
+ //PRINT(n);
+ }
+ catch ( const AssertionException & ) {
+ smuassert( cx , "could not convert string to long long" , false );
+ }
+ c.makeLongObj( n, obj );
+ }
+
+ return JS_TRUE;
+ }
+
+ JSBool numberlong_valueof(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ Convertor c(cx);
+ return *rval = c.toval( double( c.toNumberLongUnsafe( obj ) ) );
+ }
+
+ JSBool numberlong_tonumber(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ return numberlong_valueof( cx, obj, argc, argv, rval );
+ }
+
+ JSBool numberlong_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ Convertor c(cx);
+ stringstream ss;
+ long long val = c.toNumberLongUnsafe( obj );
+ const long long limit = 2LL << 30;
+
+ if ( val <= -limit || limit <= val )
+ ss << "NumberLong(\"" << val << "\")";
+ else
+ ss << "NumberLong(" << val << ")";
+
+ string ret = ss.str();
+ return *rval = c.toval( ret.c_str() );
+ }
+
+ JSFunctionSpec numberlong_functions[] = {
+ { "valueOf" , numberlong_valueof , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "toNumber" , numberlong_tonumber , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "toString" , numberlong_tostring , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { 0 }
+ };
+
+ JSClass numberint_class = {
+ "NumberInt" , JSCLASS_HAS_PRIVATE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ JSBool numberint_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ smuassert( cx , "NumberInt needs 0 or 1 args" , argc == 0 || argc == 1 );
+
+ if ( ! JS_InstanceOf( cx , obj , &numberint_class , 0 ) ) {
+ obj = JS_NewObject( cx , &numberint_class , 0 , 0 );
+ CHECKNEWOBJECT( obj, cx, "numberint_constructor" );
+ *rval = OBJECT_TO_JSVAL( obj );
+ }
+
+ Convertor c( cx );
+ if ( argc == 0 ) {
+ c.setProperty( obj, "floatApprox", c.toval( 0.0 ) );
+ }
+ else if ( JSVAL_IS_NUMBER( argv[ 0 ] ) ) {
+ c.setProperty( obj, "floatApprox", argv[ 0 ] );
+ }
+ else {
+ string num = c.toString( argv[ 0 ] );
+ //PRINT(num);
+ const char *numStr = num.c_str();
+ int n;
+ try {
+ n = (int) parseLL( numStr );
+ //PRINT(n);
+ }
+ catch ( const AssertionException & ) {
+ smuassert( cx , "could not convert string to integer" , false );
+ }
+ c.makeIntObj( n, obj );
+ }
+
+ return JS_TRUE;
+ }
+
+ JSBool numberint_valueof(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ Convertor c(cx);
+ return *rval = c.toval( double( c.toNumberInt( obj ) ) );
+ }
+
+ JSBool numberint_tonumber(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ return numberint_valueof( cx, obj, argc, argv, rval );
+ }
+
+ JSBool numberint_tostring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+ Convertor c(cx);
+ int val = c.toNumberInt( obj );
+ string ret = str::stream() << "NumberInt(" << val << ")";
+ return *rval = c.toval( ret.c_str() );
+ }
+
+ JSFunctionSpec numberint_functions[] = {
+ { "valueOf" , numberint_valueof , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "toNumber" , numberint_tonumber , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { "toString" , numberint_tostring , 0 , JSPROP_READONLY | JSPROP_PERMANENT, 0 } ,
+ { 0 }
+ };
+
+ JSClass minkey_class = {
+ "MinKey" , JSCLASS_HAS_PRIVATE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ JSClass maxkey_class = {
+ "MaxKey" , JSCLASS_HAS_PRIVATE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub , JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ // dbquery
+
+ JSBool dbquery_constructor( JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval ) {
+ smuassert( cx , "DDQuery needs at least 4 args" , argc >= 4 );
+
+ Convertor c(cx);
+ c.setProperty( obj , "_mongo" , argv[0] );
+ c.setProperty( obj , "_db" , argv[1] );
+ c.setProperty( obj , "_collection" , argv[2] );
+ c.setProperty( obj , "_ns" , argv[3] );
+
+ if ( argc > 4 && JSVAL_IS_OBJECT( argv[4] ) )
+ c.setProperty( obj , "_query" , argv[4] );
+ else {
+ JSObject * temp = JS_NewObject( cx , 0 , 0 , 0 );
+ CHECKNEWOBJECT( temp, cx, "dbquery_constructor" );
+ c.setProperty( obj , "_query" , OBJECT_TO_JSVAL( temp ) );
+ }
+
+ if ( argc > 5 && JSVAL_IS_OBJECT( argv[5] ) )
+ c.setProperty( obj , "_fields" , argv[5] );
+ else
+ c.setProperty( obj , "_fields" , JSVAL_NULL );
+
+
+ if ( argc > 6 && JSVAL_IS_NUMBER( argv[6] ) )
+ c.setProperty( obj , "_limit" , argv[6] );
+ else
+ c.setProperty( obj , "_limit" , JSVAL_ZERO );
+
+ if ( argc > 7 && JSVAL_IS_NUMBER( argv[7] ) )
+ c.setProperty( obj , "_skip" , argv[7] );
+ else
+ c.setProperty( obj , "_skip" , JSVAL_ZERO );
+
+ if ( argc > 8 && JSVAL_IS_NUMBER( argv[8] ) )
+ c.setProperty( obj , "_batchSize" , argv[8] );
+ else
+ c.setProperty( obj , "_batchSize" , JSVAL_ZERO );
+
+ if ( argc > 9 && JSVAL_IS_NUMBER( argv[9] ) )
+ c.setProperty( obj , "_options" , argv[9] );
+ else
+ c.setProperty( obj , "_options" , JSVAL_ZERO );
+
+
+ c.setProperty( obj , "_cursor" , JSVAL_NULL );
+ c.setProperty( obj , "_numReturned" , JSVAL_ZERO );
+ c.setProperty( obj , "_special" , JSVAL_FALSE );
+
+ return JS_TRUE;
+ }
+
+ JSBool dbquery_resolve( JSContext *cx, JSObject *obj, jsval id, uintN flags, JSObject **objp ) {
+ if ( flags & JSRESOLVE_ASSIGNING )
+ return JS_TRUE;
+
+ if ( ! JSVAL_IS_NUMBER( id ) )
+ return JS_TRUE;
+
+ jsval val = JSVAL_VOID;
+ assert( JS_CallFunctionName( cx , obj , "arrayAccess" , 1 , &id , &val ) );
+ Convertor c(cx);
+ c.setProperty( obj , c.toString( id ).c_str() , val );
+ *objp = obj;
+ return JS_TRUE;
+ }
+
+ JSClass dbquery_class = {
+ "DBQuery" , JSCLASS_NEW_RESOLVE ,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, (JSResolveOp)(&dbquery_resolve) , JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+ };
+
+ // ---- other stuff ----
+
+ void initMongoJS( SMScope * scope , JSContext * cx , JSObject * global , bool local ) {
+
+ assert( JS_InitClass( cx , global , 0 , &mongo_class , local ? mongo_local_constructor : mongo_external_constructor , 0 , 0 , mongo_functions , 0 , 0 ) );
+
+ assert( JS_InitClass( cx , global , 0 , &object_id_class , object_id_constructor , 0 , 0 , 0 , 0 , 0 ) );
+ assert( JS_InitClass( cx , global , 0 , &db_class , db_constructor , 2 , 0 , 0 , 0 , 0 ) );
+ assert( JS_InitClass( cx , global , 0 , &db_collection_class , db_collection_constructor , 4 , 0 , 0 , 0 , 0 ) );
+ assert( JS_InitClass( cx , global , 0 , &internal_cursor_class , internal_cursor_constructor , 0 , 0 , internal_cursor_functions , 0 , 0 ) );
+ assert( JS_InitClass( cx , global , 0 , &dbquery_class , dbquery_constructor , 0 , 0 , 0 , 0 , 0 ) );
+ assert( JS_InitClass( cx , global , 0 , &dbpointer_class , dbpointer_constructor , 0 , 0 , dbpointer_functions , 0 , 0 ) );
+ assert( JS_InitClass( cx , global , 0 , &bindata_class , bindata_constructor , 0 , 0 , bindata_functions , 0 , 0 ) );
+// assert( JS_InitClass( cx , global , 0 , &uuid_class , uuid_constructor , 0 , 0 , uuid_functions , 0 , 0 ) );
+
+ assert( JS_InitClass( cx , global , 0 , &timestamp_class , timestamp_constructor , 0 , 0 , 0 , 0 , 0 ) );
+ assert( JS_InitClass( cx , global , 0 , &numberlong_class , numberlong_constructor , 0 , 0 , numberlong_functions , 0 , 0 ) );
+ assert( JS_InitClass( cx , global , 0 , &numberint_class , numberint_constructor , 0 , 0 , numberint_functions , 0 , 0 ) );
+ assert( JS_InitClass( cx , global , 0 , &minkey_class , 0 , 0 , 0 , 0 , 0 , 0 ) );
+ assert( JS_InitClass( cx , global , 0 , &maxkey_class , 0 , 0 , 0 , 0 , 0 , 0 ) );
+
+ assert( JS_InitClass( cx , global , 0 , &map_class , map_constructor , 0 , 0 , map_functions , 0 , 0 ) );
+
+ assert( JS_InitClass( cx , global , 0 , &bson_ro_class , bson_cons , 0 , 0 , bson_functions , 0 , 0 ) );
+ assert( JS_InitClass( cx , global , 0 , &bson_class , bson_cons , 0 , 0 , bson_functions , 0 , 0 ) );
+
+ static const char *dbrefName = "DBRef";
+ dbref_class.name = dbrefName;
+ assert( JS_InitClass( cx , global , 0 , &dbref_class , dbref_constructor , 2 , 0 , bson_functions , 0 , 0 ) );
+
+ scope->execCoreFiles();
+ }
+
+ bool appendSpecialDBObject( Convertor * c , BSONObjBuilder& b , const string& name , jsval val , JSObject * o ) {
+
+ if ( JS_InstanceOf( c->_context , o , &object_id_class , 0 ) ) {
+ OID oid;
+ oid.init( c->getString( o , "str" ) );
+ b.append( name , oid );
+ return true;
+ }
+
+ if ( JS_InstanceOf( c->_context , o , &minkey_class , 0 ) ) {
+ b.appendMinKey( name );
+ return true;
+ }
+
+ if ( JS_InstanceOf( c->_context , o , &maxkey_class , 0 ) ) {
+ b.appendMaxKey( name );
+ return true;
+ }
+
+ if ( JS_InstanceOf( c->_context , o , &timestamp_class , 0 ) ) {
+ b.appendTimestamp( name , (unsigned long long)c->getNumber( o , "t" ) , (unsigned int )c->getNumber( o , "i" ) );
+ return true;
+ }
+
+ if ( JS_InstanceOf( c->_context , o , &numberlong_class , 0 ) ) {
+ b.append( name , c->toNumberLongUnsafe( o ) );
+ return true;
+ }
+
+ if ( JS_InstanceOf( c->_context , o , &numberint_class , 0 ) ) {
+ b.append( name , c->toNumberInt( o ) );
+ return true;
+ }
+
+ if ( JS_InstanceOf( c->_context , o , &dbpointer_class , 0 ) ) {
+ b.appendDBRef( name , c->getString( o , "ns" ) , c->toOID( c->getProperty( o , "id" ) ) );
+ return true;
+ }
+
+ if ( JS_InstanceOf( c->_context , o , &bindata_class , 0 ) ) {
+ void *holder = JS_GetPrivate( c->_context , o );
+ const char *data = ( ( BinDataHolder * )( holder ) )->c_;
+ b.appendBinData( name ,
+ (int)(c->getNumber( o , "len" )) , (BinDataType)((char)(c->getNumber( o , "type" ) ) ) ,
+ data
+ );
+ return true;
+ }
+
+#if defined( SM16 ) || defined( MOZJS )
+#warning dates do not work in your version of spider monkey
+ {
+ jsdouble d = js_DateGetMsecSinceEpoch( c->_context , o );
+ if ( d ) {
+ b.appendDate( name , Date_t(d) );
+ return true;
+ }
+ }
+#elif defined( XULRUNNER )
+ if ( JS_InstanceOf( c->_context , o, globalSMEngine->_dateClass , 0 ) ) {
+ jsdouble d = js_DateGetMsecSinceEpoch( c->_context , o );
+ b.appendDate( name , Date_t(d) );
+ return true;
+ }
+#else
+ if ( JS_InstanceOf( c->_context , o, &js_DateClass , 0 ) ) {
+ jsdouble d = js_DateGetMsecSinceEpoch( c->_context , o );
+ long long d2 = (long long)d;
+ b.appendDate( name , Date_t((unsigned long long)d2) );
+ return true;
+ }
+#endif
+
+
+ if ( JS_InstanceOf( c->_context , o , &dbquery_class , 0 ) ||
+ JS_InstanceOf( c->_context , o , &mongo_class , 0 ) ||
+ JS_InstanceOf( c->_context , o , &db_collection_class , 0 ) ) {
+ b.append( name , c->toString( val ) );
+ return true;
+ }
+
+#if defined( XULRUNNER )
+ if ( JS_InstanceOf( c->_context , o , globalSMEngine->_regexClass , 0 ) ) {
+ c->appendRegex( b , name , c->toString( val ) );
+ return true;
+ }
+#elif defined( SM18 )
+ if ( JS_InstanceOf( c->_context , o , &js_RegExpClass , 0 ) ) {
+ c->appendRegex( b , name , c->toString( val ) );
+ return true;
+ }
+#endif
+
+ return false;
+ }
+
+ bool isDate( JSContext * cx , JSObject * o ) {
+#if defined( SM16 ) || defined( MOZJS ) || defined( XULRUNNER )
+ return js_DateGetMsecSinceEpoch( cx , o ) != 0;
+#else
+ return JS_InstanceOf( cx , o, &js_DateClass, 0 );
+#endif
+ }
+
+}
diff --git a/src/mongo/scripting/utils.cpp b/src/mongo/scripting/utils.cpp
new file mode 100644
index 00000000000..612b173fdf8
--- /dev/null
+++ b/src/mongo/scripting/utils.cpp
@@ -0,0 +1,77 @@
+// utils.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "pch.h"
+#include "engine.h"
+#include "../util/md5.hpp"
+#include "../util/version.h"
+
+namespace mongo {
+
+ void installBenchmarkSystem( Scope& scope );
+
+ BSONObj jsmd5( const BSONObj &a, void* data ) {
+ uassert( 10261 , "js md5 needs a string" , a.firstElement().type() == String );
+ const char * s = a.firstElement().valuestrsafe();
+
+ md5digest d;
+ md5_state_t st;
+ md5_init(&st);
+ md5_append( &st , (const md5_byte_t*)s , strlen( s ) );
+ md5_finish(&st, d);
+
+ return BSON( "" << digestToString( d ) );
+ }
+
+ BSONObj JSVersion( const BSONObj& args, void* data ) {
+ cout << "version: " << versionString << endl;
+ if ( strstr( versionString , "+" ) )
+ printGitVersion();
+ return BSONObj();
+ }
+
+
+ BSONObj JSSleep(const mongo::BSONObj &args, void* data) {
+ assert( args.nFields() == 1 );
+ assert( args.firstElement().isNumber() );
+ int ms = int( args.firstElement().number() );
+ {
+ auto_ptr< ScriptEngine::Unlocker > u = globalScriptEngine->newThreadUnlocker();
+ sleepmillis( ms );
+ }
+
+ BSONObjBuilder b;
+ b.appendUndefined( "" );
+ return b.obj();
+ }
+
+ // ---------------------------------
+ // ---- installer --------
+ // ---------------------------------
+
+ void installGlobalUtils( Scope& scope ) {
+ scope.injectNative( "hex_md5" , jsmd5 );
+ scope.injectNative( "version" , JSVersion );
+ scope.injectNative( "sleep" , JSSleep );
+
+ installBenchmarkSystem( scope );
+ }
+
+}
+
+
diff --git a/src/mongo/scripting/v8_db.cpp b/src/mongo/scripting/v8_db.cpp
new file mode 100644
index 00000000000..de419b368d9
--- /dev/null
+++ b/src/mongo/scripting/v8_db.cpp
@@ -0,0 +1,1128 @@
+// v8_db.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(_WIN32)
+/** this is a hack - v8stdint.h defined uint16_t etc. on _WIN32 only, and that collides with
+ our usage of boost */
+#include "boost/cstdint.hpp"
+using namespace boost;
+#define V8STDINT_H_
+#endif
+
+#include "v8_wrapper.h"
+#include "v8_utils.h"
+#include "engine_v8.h"
+#include "v8_db.h"
+#include "util/base64.h"
+#include "util/text.h"
+#include "../client/syncclusterconnection.h"
+#include "../s/d_logic.h"
+#include <iostream>
+
+using namespace std;
+using namespace v8;
+
+namespace mongo {
+
+#define DDD(x)
+
+ static v8::Handle<v8::Value> newInstance( v8::Function* f, const v8::Arguments& args ) {
+ // need to translate arguments into an array
+ int argc = args.Length();
+ scoped_array< Handle<Value> > argv( new Handle<Value>[argc] );
+ for (int i = 0; i < argc; ++i) {
+ argv[i] = args[i];
+ }
+ return f->NewInstance(argc, argv.get());
+ }
+
+ v8::Handle<v8::FunctionTemplate> getMongoFunctionTemplate( V8Scope* scope, bool local ) {
+ v8::Handle<v8::FunctionTemplate> mongo;
+ if ( local ) {
+ mongo = scope->createV8Function(mongoConsLocal);
+ }
+ else {
+ mongo = scope->createV8Function(mongoConsExternal);
+ }
+ mongo->InstanceTemplate()->SetInternalFieldCount( 1 );
+ v8::Handle<v8::Template> proto = mongo->PrototypeTemplate();
+ scope->injectV8Function("find", mongoFind, proto);
+ scope->injectV8Function("insert", mongoInsert, proto);
+ scope->injectV8Function("remove", mongoRemove, proto);
+ scope->injectV8Function("update", mongoUpdate, proto);
+ scope->injectV8Function("auth", mongoAuth, proto);
+
+ v8::Handle<FunctionTemplate> ic = scope->createV8Function(internalCursorCons);
+ ic->InstanceTemplate()->SetInternalFieldCount( 1 );
+ v8::Handle<v8::Template> icproto = ic->PrototypeTemplate();
+ scope->injectV8Function("next", internalCursorNext, icproto);
+ scope->injectV8Function("hasNext", internalCursorHasNext, icproto);
+ scope->injectV8Function("objsLeftInBatch", internalCursorObjsLeftInBatch, icproto);
+ scope->injectV8Function("readOnly", internalCursorReadOnly, icproto);
+ proto->Set( scope->getV8Str( "internalCursor" ) , ic );
+
+ return mongo;
+ }
+
+ v8::Handle<v8::FunctionTemplate> getNumberLongFunctionTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> numberLong = scope->createV8Function(numberLongInit);
+ v8::Local<v8::Template> proto = numberLong->PrototypeTemplate();
+ scope->injectV8Function("valueOf", numberLongValueOf, proto);
+ scope->injectV8Function("toNumber", numberLongToNumber, proto);
+ scope->injectV8Function("toString", numberLongToString, proto);
+
+ return numberLong;
+ }
+
+ v8::Handle<v8::FunctionTemplate> getNumberIntFunctionTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> numberInt = scope->createV8Function(numberIntInit);
+ v8::Local<v8::Template> proto = numberInt->PrototypeTemplate();
+ scope->injectV8Function("valueOf", numberIntValueOf, proto);
+ scope->injectV8Function("toNumber", numberIntToNumber, proto);
+ scope->injectV8Function("toString", numberIntToString, proto);
+
+ return numberInt;
+ }
+
+ v8::Handle<v8::FunctionTemplate> getBinDataFunctionTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> binData = scope->createV8Function(binDataInit);
+ binData->InstanceTemplate()->SetInternalFieldCount(1);
+ v8::Local<v8::Template> proto = binData->PrototypeTemplate();
+ scope->injectV8Function("toString", binDataToString, proto);
+ scope->injectV8Function("base64", binDataToBase64, proto);
+ scope->injectV8Function("hex", binDataToHex, proto);
+ return binData;
+ }
+
+ v8::Handle<v8::FunctionTemplate> getUUIDFunctionTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> templ = scope->createV8Function(uuidInit);
+ return templ;
+ }
+
+ v8::Handle<v8::FunctionTemplate> getMD5FunctionTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> templ = scope->createV8Function(md5Init);
+ return templ;
+ }
+
+ v8::Handle<v8::FunctionTemplate> getHexDataFunctionTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> templ = scope->createV8Function(hexDataInit);
+ return templ;
+ }
+
+ v8::Handle<v8::FunctionTemplate> getTimestampFunctionTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> ts = scope->createV8Function(dbTimestampInit);
+ ts->InstanceTemplate()->SetInternalFieldCount( 1 );
+ return ts;
+ }
+
+// void installDBTypes( V8Scope* scope, Handle<ObjectTemplate>& global ) {
+// v8::Handle<v8::FunctionTemplate> db = scope->createV8Function(dbInit);
+// db->InstanceTemplate()->SetNamedPropertyHandler( collectionFallback );
+// global->Set(v8::String::New("DB") , db );
+//
+// v8::Handle<v8::FunctionTemplate> dbCollection = scope->createV8Function(collectionInit);
+// dbCollection->InstanceTemplate()->SetNamedPropertyHandler( collectionFallback );
+// global->Set(v8::String::New("DBCollection") , dbCollection );
+//
+//
+// v8::Handle<v8::FunctionTemplate> dbQuery = scope->createV8Function(dbQueryInit);
+// dbQuery->InstanceTemplate()->SetIndexedPropertyHandler( dbQueryIndexAccess );
+// global->Set(v8::String::New("DBQuery") , dbQuery );
+//
+// global->Set( v8::String::New("ObjectId") , newV8Function< objectIdInit >(scope) );
+//
+// global->Set( v8::String::New("DBRef") , newV8Function< dbRefInit >(scope) );
+//
+// global->Set( v8::String::New("DBPointer") , newV8Function< dbPointerInit >(scope) );
+//
+// global->Set( v8::String::New("BinData") , getBinDataFunctionTemplate(scope) );
+//
+// global->Set( v8::String::New("NumberLong") , getNumberLongFunctionTemplate(scope) );
+//
+// global->Set( v8::String::New("Timestamp") , getTimestampFunctionTemplate(scope) );
+// }
+
+ void installDBTypes( V8Scope* scope, v8::Handle<v8::Object>& global ) {
+ v8::Handle<v8::FunctionTemplate> db = scope->createV8Function(dbInit);
+ db->InstanceTemplate()->SetNamedPropertyHandler( collectionGetter, collectionSetter );
+ global->Set(scope->getV8Str("DB") , db->GetFunction() );
+ v8::Handle<v8::FunctionTemplate> dbCollection = scope->createV8Function(collectionInit);
+ dbCollection->InstanceTemplate()->SetNamedPropertyHandler( collectionGetter, collectionSetter );
+ global->Set(scope->getV8Str("DBCollection") , dbCollection->GetFunction() );
+
+
+ v8::Handle<v8::FunctionTemplate> dbQuery = scope->createV8Function(dbQueryInit);
+ dbQuery->InstanceTemplate()->SetIndexedPropertyHandler( dbQueryIndexAccess );
+ global->Set(scope->getV8Str("DBQuery") , dbQuery->GetFunction() );
+
+ scope->injectV8Function("ObjectId", objectIdInit, global);
+ scope->injectV8Function("DBRef", dbRefInit, global);
+ scope->injectV8Function("DBPointer", dbPointerInit, global);
+
+ global->Set( scope->getV8Str("BinData") , getBinDataFunctionTemplate(scope)->GetFunction() );
+ global->Set( scope->getV8Str("UUID") , getUUIDFunctionTemplate(scope)->GetFunction() );
+ global->Set( scope->getV8Str("MD5") , getMD5FunctionTemplate(scope)->GetFunction() );
+ global->Set( scope->getV8Str("HexData") , getHexDataFunctionTemplate(scope)->GetFunction() );
+ global->Set( scope->getV8Str("NumberLong") , getNumberLongFunctionTemplate(scope)->GetFunction() );
+ global->Set( scope->getV8Str("NumberInt") , getNumberIntFunctionTemplate(scope)->GetFunction() );
+ global->Set( scope->getV8Str("Timestamp") , getTimestampFunctionTemplate(scope)->GetFunction() );
+
+ BSONObjBuilder b;
+ b.appendMaxKey( "" );
+ b.appendMinKey( "" );
+ BSONObj o = b.obj();
+ BSONObjIterator i( o );
+ global->Set( scope->getV8Str("MaxKey"), scope->mongoToV8Element( i.next() ) );
+ global->Set( scope->getV8Str("MinKey"), scope->mongoToV8Element( i.next() ) );
+
+ global->Get( scope->getV8Str( "Object" ) )->ToObject()->Set( scope->getV8Str("bsonsize") , scope->createV8Function(bsonsize)->GetFunction() );
+ }
+
+ void destroyConnection( Persistent<Value> self, void* parameter) {
+ delete static_cast<DBClientBase*>(parameter);
+ self.Dispose();
+ self.Clear();
+ }
+
+ Handle<Value> mongoConsExternal(V8Scope* scope, const Arguments& args) {
+
+ char host[255];
+
+ if ( args.Length() > 0 && args[0]->IsString() ) {
+ assert( args[0]->ToString()->Utf8Length() < 250 );
+ args[0]->ToString()->WriteAscii( host );
+ }
+ else {
+ strcpy( host , "127.0.0.1" );
+ }
+
+ string errmsg;
+ ConnectionString cs = ConnectionString::parse( host , errmsg );
+ if ( ! cs.isValid() )
+ return v8::ThrowException( v8::String::New( errmsg.c_str() ) );
+
+
+ DBClientWithCommands * conn;
+ {
+ V8Unlock ul;
+ conn = cs.connect( errmsg );
+ }
+ if ( ! conn )
+ return v8::ThrowException( v8::String::New( errmsg.c_str() ) );
+
+ Persistent<v8::Object> self = Persistent<v8::Object>::New( args.Holder() );
+ self.MakeWeak( conn , destroyConnection );
+
+ {
+ V8Unlock ul;
+ ScriptEngine::runConnectCallback( *conn );
+ }
+
+ args.This()->SetInternalField( 0 , External::New( conn ) );
+ args.This()->Set( scope->getV8Str( "slaveOk" ) , Boolean::New( false ) );
+ args.This()->Set( scope->getV8Str( "host" ) , scope->getV8Str( host ) );
+
+ return v8::Undefined();
+ }
+
+ Handle<Value> mongoConsLocal(V8Scope* scope, const Arguments& args) {
+
+ if ( args.Length() > 0 )
+ return v8::ThrowException( v8::String::New( "local Mongo constructor takes no args" ) );
+
+ DBClientBase * conn;
+ {
+ V8Unlock ul;
+ conn = createDirectClient();
+ }
+
+ Persistent<v8::Object> self = Persistent<v8::Object>::New( args.This() );
+ self.MakeWeak( conn , destroyConnection );
+
+ // NOTE I don't believe the conn object will ever be freed.
+ args.This()->SetInternalField( 0 , External::New( conn ) );
+ args.This()->Set( scope->getV8Str( "slaveOk" ) , Boolean::New( false ) );
+ args.This()->Set( scope->getV8Str( "host" ) , scope->getV8Str( "EMBEDDED" ) );
+
+ return v8::Undefined();
+ }
+
+
+ // ---
+
+#ifdef _WIN32
+#define GETNS char * ns = new char[args[0]->ToString()->Utf8Length()]; args[0]->ToString()->WriteUtf8( ns );
+#else
+#define GETNS char ns[args[0]->ToString()->Utf8Length()]; args[0]->ToString()->WriteUtf8( ns );
+#endif
+
+ DBClientBase * getConnection( const Arguments& args ) {
+ Local<External> c = External::Cast( *(args.This()->GetInternalField( 0 )) );
+ DBClientBase * conn = (DBClientBase*)(c->Value());
+ assert( conn );
+ return conn;
+ }
+
+ // ---- real methods
+
+ void destroyCursor( Persistent<Value> self, void* parameter) {
+ delete static_cast<mongo::DBClientCursor*>(parameter);
+ self.Dispose();
+ self.Clear();
+ }
+
+ /**
+ 0 - namespace
+ 1 - query
+ 2 - fields
+ 3 - limit
+ 4 - skip
+ */
+ Handle<Value> mongoFind(V8Scope* scope, const Arguments& args) {
+ HandleScope handle_scope;
+
+ jsassert( args.Length() == 7 , "find needs 7 args" );
+ jsassert( args[1]->IsObject() , "needs to be an object" );
+ DBClientBase * conn = getConnection( args );
+ GETNS;
+
+ BSONObj q = scope->v8ToMongo( args[1]->ToObject() );
+ DDD( "query:" << q );
+
+ BSONObj fields;
+ bool haveFields = args[2]->IsObject() && args[2]->ToObject()->GetPropertyNames()->Length() > 0;
+ if ( haveFields )
+ fields = scope->v8ToMongo( args[2]->ToObject() );
+
+ Local<v8::Object> mongo = args.This();
+
+ try {
+ auto_ptr<mongo::DBClientCursor> cursor;
+ int nToReturn = (int)(args[3]->ToNumber()->Value());
+ int nToSkip = (int)(args[4]->ToNumber()->Value());
+ int batchSize = (int)(args[5]->ToNumber()->Value());
+ int options = (int)(args[6]->ToNumber()->Value());
+ {
+ V8Unlock u;
+ cursor = conn->query( ns, q , nToReturn , nToSkip , haveFields ? &fields : 0, options , batchSize );
+ if ( ! cursor.get() )
+ return v8::ThrowException( v8::String::New( "error doing query: failed" ) );
+ }
+ v8::Function * cons = (v8::Function*)( *( mongo->Get( scope->getV8Str( "internalCursor" ) ) ) );
+ if ( !cons ) {
+ // may get here in case of thread termination
+ return v8::ThrowException( v8::String::New( "Could not create a cursor" ) );
+ }
+
+ Persistent<v8::Object> c = Persistent<v8::Object>::New( cons->NewInstance() );
+ c.MakeWeak( cursor.get() , destroyCursor );
+ c->SetInternalField( 0 , External::New( cursor.release() ) );
+ return handle_scope.Close(c);
+ }
+ catch ( ... ) {
+ return v8::ThrowException( v8::String::New( "socket error on query" ) );
+ }
+ }
+
+ v8::Handle<v8::Value> mongoInsert(V8Scope* scope, const v8::Arguments& args) {
+ jsassert( args.Length() == 2 , "insert needs 2 args" );
+ jsassert( args[1]->IsObject() , "have to insert an object" );
+
+ if ( args.This()->Get( scope->getV8Str( "readOnly" ) )->BooleanValue() )
+ return v8::ThrowException( v8::String::New( "js db in read only mode" ) );
+
+ DBClientBase * conn = getConnection( args );
+ GETNS;
+
+ v8::Handle<v8::Object> in = args[1]->ToObject();
+
+ if( args[1]->IsArray() ){
+
+ v8::Local<v8::Array> arr = v8::Array::Cast( *args[1] );
+ vector<BSONObj> bos;
+ uint32_t len = arr->Length();
+
+ for( uint32_t i = 0; i < len; i++ ){
+
+ v8::Local<v8::Object> el = arr->CloneElementAt( i );
+
+ // Set ID on the element if necessary
+ if ( ! el->Has( scope->getV8Str( "_id" ) ) ) {
+ v8::Handle<v8::Value> argv[1];
+ el->Set( scope->getV8Str( "_id" ) , scope->getObjectIdCons()->NewInstance( 0 , argv ) );
+ }
+
+ bos.push_back( scope->v8ToMongo( arr->CloneElementAt( i ) ) );
+ }
+
+ DDD( "want to save batch : " << bos.length );
+ try {
+ V8Unlock u;
+ conn->insert( ns , bos );
+ }
+ catch ( ... ) {
+ return v8::ThrowException( v8::String::New( "socket error on bulk insert" ) );
+ }
+
+ }
+ else {
+
+ if ( ! in->Has( scope->getV8Str( "_id" ) ) ) {
+ v8::Handle<v8::Value> argv[1];
+ in->Set( scope->getV8Str( "_id" ) , scope->getObjectIdCons()->NewInstance( 0 , argv ) );
+ }
+
+ BSONObj o = scope->v8ToMongo( in );
+
+ DDD( "want to save : " << o.jsonString() );
+ try {
+ V8Unlock u;
+ conn->insert( ns , o );
+ }
+ catch ( ... ) {
+ return v8::ThrowException( v8::String::New( "socket error on insert" ) );
+ }
+
+ }
+
+ return v8::Undefined();
+ }
+
+ v8::Handle<v8::Value> mongoRemove(V8Scope* scope, const v8::Arguments& args) {
+ jsassert( args.Length() == 2 || args.Length() == 3 , "remove needs 2 args" );
+ jsassert( args[1]->IsObject() , "have to remove an object template" );
+
+ if ( args.This()->Get( scope->getV8Str( "readOnly" ) )->BooleanValue() )
+ return v8::ThrowException( v8::String::New( "js db in read only mode" ) );
+
+ DBClientBase * conn = getConnection( args );
+ GETNS;
+
+ v8::Handle<v8::Object> in = args[1]->ToObject();
+ BSONObj o = scope->v8ToMongo( in );
+
+ bool justOne = false;
+ if ( args.Length() > 2 ) {
+ justOne = args[2]->BooleanValue();
+ }
+
+ DDD( "want to remove : " << o.jsonString() );
+ try {
+ V8Unlock u;
+ conn->remove( ns , o , justOne );
+ }
+ catch ( ... ) {
+ return v8::ThrowException( v8::String::New( "socket error on remove" ) );
+ }
+
+ return v8::Undefined();
+ }
+
+ v8::Handle<v8::Value> mongoUpdate(V8Scope* scope, const v8::Arguments& args) {
+ jsassert( args.Length() >= 3 , "update needs at least 3 args" );
+ jsassert( args[1]->IsObject() , "1st param to update has to be an object" );
+ jsassert( args[2]->IsObject() , "2nd param to update has to be an object" );
+
+ if ( args.This()->Get( scope->getV8Str( "readOnly" ) )->BooleanValue() )
+ return v8::ThrowException( v8::String::New( "js db in read only mode" ) );
+
+ DBClientBase * conn = getConnection( args );
+ GETNS;
+
+ v8::Handle<v8::Object> q = args[1]->ToObject();
+ v8::Handle<v8::Object> o = args[2]->ToObject();
+
+ bool upsert = args.Length() > 3 && args[3]->IsBoolean() && args[3]->ToBoolean()->Value();
+ bool multi = args.Length() > 4 && args[4]->IsBoolean() && args[4]->ToBoolean()->Value();
+
+ try {
+ BSONObj q1 = scope->v8ToMongo( q );
+ BSONObj o1 = scope->v8ToMongo( o );
+ V8Unlock u;
+ conn->update( ns , q1 , o1 , upsert, multi );
+ }
+ catch ( ... ) {
+ return v8::ThrowException( v8::String::New( "socket error on remove" ) );
+ }
+
+ return v8::Undefined();
+ }
+
+ v8::Handle<v8::Value> mongoAuth(V8Scope* scope, const v8::Arguments& args) {
+ jsassert( args.Length() >= 3 , "update needs at least 3 args" );
+ DBClientBase * conn = getConnection( args );
+ string db = toSTLString(args[0]);
+ string username = toSTLString(args[1]);
+ string password = toSTLString(args[2]);
+ string errmsg = "";
+
+ try {
+ if (conn->auth(db, username, password, errmsg)) {
+ return v8::Boolean::New(true);
+ }
+ } catch ( ... ) {
+ }
+ return v8::ThrowException( v8::String::New( errmsg.c_str() ) );
+ }
+
+// + JSBool mongo_auth(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) {
+// + smuassert( cx , "mongo_auth needs 3 args" , argc == 3 );
+// + shared_ptr< DBClientWithCommands > * connHolder = (shared_ptr< DBClientWithCommands >*)JS_GetPrivate( cx , obj );
+// + smuassert( cx , "no connection!" , connHolder && connHolder->get() );
+// + DBClientWithCommands *conn = connHolder->get();
+// +
+// + Convertor c( cx );
+// +
+// + string db = c.toString( argv[0] );
+// + string username = c.toString( argv[1] );
+// + string password = c.toString( argv[2] );
+// + string errmsg = "";
+// +
+// + try {
+// + if (conn->auth(db, username, password, errmsg)) {
+// + return JS_TRUE;
+// + }
+// + JS_ReportError( cx, errmsg.c_str() );
+// + }
+// + catch ( ... ) {
+// + JS_ReportError( cx , "error doing query: unknown" );
+// + }
+// + return JS_FALSE;
+// + }
+
+
+ // --- cursor ---
+
+ mongo::DBClientCursor * getCursor( const Arguments& args ) {
+ Local<External> c = External::Cast( *(args.This()->GetInternalField( 0 ) ) );
+
+ mongo::DBClientCursor * cursor = (mongo::DBClientCursor*)(c->Value());
+ return cursor;
+ }
+
+ v8::Handle<v8::Value> internalCursorCons(V8Scope* scope, const v8::Arguments& args) {
+ return v8::Undefined();
+ }
+
+ v8::Handle<v8::Value> internalCursorNext(V8Scope* scope, const v8::Arguments& args) {
+ mongo::DBClientCursor * cursor = getCursor( args );
+ if ( ! cursor )
+ return v8::Undefined();
+ BSONObj o;
+ {
+ V8Unlock u;
+ o = cursor->next();
+ }
+ bool ro = false;
+ if (args.This()->Has(scope->V8STR_RO))
+ ro = args.This()->Get(scope->V8STR_RO)->BooleanValue();
+ return scope->mongoToLZV8( o, false, ro );
+ }
+
+ v8::Handle<v8::Value> internalCursorHasNext(V8Scope* scope, const v8::Arguments& args) {
+ mongo::DBClientCursor * cursor = getCursor( args );
+ if ( ! cursor )
+ return Boolean::New( false );
+ bool ret;
+ {
+ V8Unlock u;
+ ret = cursor->more();
+ }
+ return Boolean::New( ret );
+ }
+
+ v8::Handle<v8::Value> internalCursorObjsLeftInBatch(V8Scope* scope, const v8::Arguments& args) {
+ mongo::DBClientCursor * cursor = getCursor( args );
+ if ( ! cursor )
+ return v8::Number::New( (double) 0 );
+ int ret;
+ {
+ V8Unlock u;
+ ret = cursor->objsLeftInBatch();
+ }
+ return v8::Number::New( (double) ret );
+ }
+
+ v8::Handle<v8::Value> internalCursorReadOnly(V8Scope* scope, const v8::Arguments& args) {
+ Local<v8::Object> cursor = args.This();
+ cursor->Set(scope->V8STR_RO, v8::Boolean::New(true));
+ return cursor;
+ }
+
+ // --- DB ----
+
+ v8::Handle<v8::Value> dbInit(V8Scope* scope, const v8::Arguments& args) {
+ assert( args.Length() == 2 );
+
+ args.This()->Set( scope->getV8Str( "_mongo" ) , args[0] );
+ args.This()->Set( scope->getV8Str( "_name" ) , args[1] );
+
+ for ( int i=0; i<args.Length(); i++ )
+ assert( ! args[i]->IsUndefined() );
+
+ return v8::Undefined();
+ }
+
+ v8::Handle<v8::Value> collectionInit( V8Scope* scope, const v8::Arguments& args ) {
+ assert( args.Length() == 4 );
+
+ args.This()->Set( scope->getV8Str( "_mongo" ) , args[0] );
+ args.This()->Set( scope->getV8Str( "_db" ) , args[1] );
+ args.This()->Set( scope->getV8Str( "_shortName" ) , args[2] );
+ args.This()->Set( scope->V8STR_FULLNAME , args[3] );
+
+ if ( haveLocalShardingInfo( toSTLString( args[3] ) ) )
+ return v8::ThrowException( v8::String::New( "can't use sharded collection from db.eval" ) );
+
+ for ( int i=0; i<args.Length(); i++ )
+ assert( ! args[i]->IsUndefined() );
+
+ return v8::Undefined();
+ }
+
+ v8::Handle<v8::Value> dbQueryInit( V8Scope* scope, const v8::Arguments& args ) {
+
+ v8::Handle<v8::Object> t = args.This();
+
+ assert( args.Length() >= 4 );
+
+ t->Set( scope->getV8Str( "_mongo" ) , args[0] );
+ t->Set( scope->getV8Str( "_db" ) , args[1] );
+ t->Set( scope->getV8Str( "_collection" ) , args[2] );
+ t->Set( scope->getV8Str( "_ns" ) , args[3] );
+
+ if ( args.Length() > 4 && args[4]->IsObject() )
+ t->Set( scope->getV8Str( "_query" ) , args[4] );
+ else
+ t->Set( scope->getV8Str( "_query" ) , v8::Object::New() );
+
+ if ( args.Length() > 5 && args[5]->IsObject() )
+ t->Set( scope->getV8Str( "_fields" ) , args[5] );
+ else
+ t->Set( scope->getV8Str( "_fields" ) , v8::Null() );
+
+
+ if ( args.Length() > 6 && args[6]->IsNumber() )
+ t->Set( scope->getV8Str( "_limit" ) , args[6] );
+ else
+ t->Set( scope->getV8Str( "_limit" ) , Number::New( 0 ) );
+
+ if ( args.Length() > 7 && args[7]->IsNumber() )
+ t->Set( scope->getV8Str( "_skip" ) , args[7] );
+ else
+ t->Set( scope->getV8Str( "_skip" ) , Number::New( 0 ) );
+
+ if ( args.Length() > 8 && args[8]->IsNumber() )
+ t->Set( scope->getV8Str( "_batchSize" ) , args[8] );
+ else
+ t->Set( scope->getV8Str( "_batchSize" ) , Number::New( 0 ) );
+
+ if ( args.Length() > 9 && args[9]->IsNumber() )
+ t->Set( scope->getV8Str( "_options" ) , args[9] );
+ else
+ t->Set( scope->getV8Str( "_options" ) , Number::New( 0 ) );
+
+
+ t->Set( scope->getV8Str( "_cursor" ) , v8::Null() );
+ t->Set( scope->getV8Str( "_numReturned" ) , v8::Number::New(0) );
+ t->Set( scope->getV8Str( "_special" ) , Boolean::New(false) );
+
+ return v8::Undefined();
+ }
+
+ Handle<Value> collectionSetter( Local<v8::String> name, Local<Value> value, const AccessorInfo& info ) {
+ // a collection name cannot be overwritten by a variable
+ string sname = toSTLString( name );
+ if ( sname.length() == 0 || sname[0] == '_' ) {
+ // if starts with '_' we allow overwrite
+ return Handle<Value>();
+ }
+ // dont set
+ return value;
+ }
+
+ v8::Handle<v8::Value> collectionGetter( v8::Local<v8::String> name, const v8::AccessorInfo &info) {
+ DDD( "collectionFallback [" << name << "]" );
+
+ // first look in prototype, may be a function
+ v8::Handle<v8::Value> real = info.This()->GetPrototype()->ToObject()->Get( name );
+ if ( !real->IsUndefined() )
+ return real;
+
+ // 2nd look into real values, may be cached collection object
+ string sname = toSTLString( name );
+ if (info.This()->HasRealNamedProperty(name)) {
+ v8::Local<v8::Value> prop = info.This()->GetRealNamedProperty( name );
+ if (prop->IsObject() && prop->ToObject()->HasRealNamedProperty(v8::String::New("_fullName"))) {
+ // need to check every time that the collection did not get sharded
+ if ( haveLocalShardingInfo( toSTLString( prop->ToObject()->GetRealNamedProperty(v8::String::New("_fullName")) ) ) )
+ return v8::ThrowException( v8::String::New( "can't use sharded collection from db.eval" ) );
+ }
+ return prop;
+ } else if ( sname.length() == 0 || sname[0] == '_' ) {
+ // if starts with '_' we dont return collection, one must use getCollection()
+ return v8::Undefined();
+ }
+
+ // no hit, create new collection
+ v8::Handle<v8::Value> getCollection = info.This()->GetPrototype()->ToObject()->Get( v8::String::New( "getCollection" ) );
+ assert( getCollection->IsFunction() );
+
+ TryCatch tryCatch;
+ v8::Function * f = (v8::Function*)(*getCollection);
+ v8::Handle<v8::Value> argv[1];
+ argv[0] = name;
+ v8::Local<v8::Value> coll = f->Call( info.This() , 1 , argv );
+ if (coll.IsEmpty()) {
+ if (tryCatch.HasCaught()) {
+ return v8::ThrowException( tryCatch.Exception() );
+ }
+ return Handle<Value>();
+ }
+
+ // cache collection for reuse, dont enumerate
+ info.This()->ForceSet(name, coll, v8::DontEnum);
+ return coll;
+ }
+
+ v8::Handle<v8::Value> dbQueryIndexAccess( unsigned int index , const v8::AccessorInfo& info ) {
+ v8::Handle<v8::Value> arrayAccess = info.This()->GetPrototype()->ToObject()->Get( v8::String::New( "arrayAccess" ) );
+ assert( arrayAccess->IsFunction() );
+
+ v8::Function * f = (v8::Function*)(*arrayAccess);
+ v8::Handle<v8::Value> argv[1];
+ argv[0] = v8::Number::New( index );
+
+ return f->Call( info.This() , 1 , argv );
+ }
+
+ v8::Handle<v8::Value> objectIdInit( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
+ v8::Function * f = scope->getObjectIdCons();
+ return newInstance(f, args);
+ }
+
+ OID oid;
+
+ if ( args.Length() == 0 ) {
+ oid.init();
+ }
+ else {
+ string s = toSTLString( args[0] );
+ try {
+ Scope::validateObjectIdString( s );
+ }
+ catch ( const MsgAssertionException &m ) {
+ string error = m.toString();
+ return v8::ThrowException( v8::String::New( error.c_str() ) );
+ }
+ oid.init( s );
+ }
+
+ it->Set( scope->getV8Str( "str" ) , v8::String::New( oid.str().c_str() ) );
+
+ return it;
+ }
+
+ v8::Handle<v8::Value> dbRefInit( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
+ v8::Function * f = scope->getNamedCons( "DBRef" );
+ return newInstance(f, args);
+ }
+
+ if (args.Length() != 2 && args.Length() != 0) {
+ return v8::ThrowException( v8::String::New( "DBRef needs 2 arguments" ) );
+ }
+
+ if ( args.Length() == 2 ) {
+ it->Set( scope->getV8Str( "$ref" ) , args[0] );
+ it->Set( scope->getV8Str( "$id" ) , args[1] );
+ }
+
+ return it;
+ }
+
+ v8::Handle<v8::Value> dbPointerInit( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
+ v8::Function * f = scope->getNamedCons( "DBPointer" );
+ return newInstance(f, args);
+ }
+
+ if (args.Length() != 2) {
+ return v8::ThrowException( v8::String::New( "DBPointer needs 2 arguments" ) );
+ }
+
+ it->Set( scope->getV8Str( "ns" ) , args[0] );
+ it->Set( scope->getV8Str( "id" ) , args[1] );
+ it->SetHiddenValue( scope->getV8Str( "__DBPointer" ), v8::Number::New( 1 ) );
+
+ return it;
+ }
+
+ v8::Handle<v8::Value> dbTimestampInit( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
+ v8::Function * f = scope->getNamedCons( "Timestamp" );
+ return newInstance(f, args);
+ }
+
+ if ( args.Length() == 0 ) {
+ it->Set( scope->getV8Str( "t" ) , v8::Number::New( 0 ) );
+ it->Set( scope->getV8Str( "i" ) , v8::Number::New( 0 ) );
+ }
+ else if ( args.Length() == 2 ) {
+ it->Set( scope->getV8Str( "t" ) , args[0] );
+ it->Set( scope->getV8Str( "i" ) , args[1] );
+ }
+ else {
+ return v8::ThrowException( v8::String::New( "Timestamp needs 0 or 2 arguments" ) );
+ }
+
+ it->SetInternalField( 0, v8::Uint32::New( Timestamp ) );
+
+ return it;
+ }
+
+
+ v8::Handle<v8::Value> binDataInit( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Local<v8::Object> it = args.This();
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
+ v8::Function* f = scope->getNamedCons( "BinData" );
+ return newInstance(f, args);
+ }
+
+ Handle<Value> type;
+ Handle<Value> len;
+ int rlen;
+ char* data;
+ if (args.Length() == 3) {
+ // 3 args: len, type, data
+ len = args[0];
+ rlen = len->IntegerValue();
+ type = args[1];
+ v8::String::Utf8Value utf( args[ 2 ] );
+ char* tmp = *utf;
+ data = new char[rlen];
+ memcpy(data, tmp, rlen);
+ }
+ else if ( args.Length() == 2 ) {
+ // 2 args: type, base64 string
+ type = args[0];
+ v8::String::Utf8Value utf( args[ 1 ] );
+ string decoded = base64::decode( *utf );
+ const char* tmp = decoded.data();
+ rlen = decoded.length();
+ data = new char[rlen];
+ memcpy(data, tmp, rlen);
+ len = v8::Number::New(rlen);
+// it->Set( scope->getV8Str( "data" ), v8::String::New( decoded.data(), decoded.length() ) );
+ } else if (args.Length() == 0) {
+ // this is called by subclasses that will fill properties
+ return it;
+ } else {
+ return v8::ThrowException( v8::String::New( "BinData needs 2 or 3 arguments" ) );
+ }
+
+ it->Set( scope->getV8Str( "len" ) , len );
+ it->Set( scope->getV8Str( "type" ) , type );
+ it->SetHiddenValue( scope->V8STR_BINDATA, v8::Number::New( 1 ) );
+ Persistent<v8::Object> res = scope->wrapArrayObject(it, data);
+ return res;
+ }
+
+ v8::Handle<v8::Value> binDataToString( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+ int len = it->Get( scope->V8STR_LEN )->Int32Value();
+ int type = it->Get( scope->V8STR_TYPE )->Int32Value();
+ Local<External> c = External::Cast( *(it->GetInternalField( 0 )) );
+ char* data = (char*)(c->Value());
+
+ stringstream ss;
+ ss << "BinData(" << type << ",\"";
+ base64::encode( ss, data, len );
+ ss << "\")";
+ string ret = ss.str();
+ return v8::String::New( ret.c_str() );
+ }
+
+ v8::Handle<v8::Value> binDataToBase64( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+ int len = Handle<v8::Number>::Cast(it->Get(scope->V8STR_LEN))->Int32Value();
+ Local<External> c = External::Cast( *(it->GetInternalField( 0 )) );
+ char* data = (char*)(c->Value());
+ stringstream ss;
+ base64::encode( ss, (const char *)data, len );
+ return v8::String::New(ss.str().c_str());
+ }
+
+ v8::Handle<v8::Value> binDataToHex( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+ int len = Handle<v8::Number>::Cast(it->Get(scope->V8STR_LEN))->Int32Value();
+ Local<External> c = External::Cast( *(it->GetInternalField( 0 )) );
+ char* data = (char*)(c->Value());
+ stringstream ss;
+ ss.setf (ios_base::hex , ios_base::basefield);
+ ss.fill ('0');
+ ss.setf (ios_base::right , ios_base::adjustfield);
+ for( int i = 0; i < len; i++ ) {
+ unsigned v = (unsigned char) data[i];
+ ss << setw(2) << v;
+ }
+ return v8::String::New(ss.str().c_str());
+ }
+
+ static v8::Handle<v8::Value> hexToBinData( V8Scope* scope, v8::Local<v8::Object> it, int type, string hexstr ) {
+ int len = hexstr.length() / 2;
+ char* data = new char[len];
+ const char* src = hexstr.c_str();
+ for( int i = 0; i < 16; i++ ) {
+ data[i] = fromHex(src + i * 2);
+ }
+
+ it->Set( scope->V8STR_LEN , v8::Number::New(len) );
+ it->Set( scope->V8STR_TYPE , v8::Number::New(type) );
+ it->SetHiddenValue( scope->V8STR_BINDATA, v8::Number::New( 1 ) );
+ Persistent<v8::Object> res = scope->wrapArrayObject(it, data);
+ return res;
+ }
+
+ v8::Handle<v8::Value> uuidInit( V8Scope* scope, const v8::Arguments& args ) {
+ if (args.Length() != 1) {
+ return v8::ThrowException( v8::String::New( "UUIS needs 1 argument" ) );
+ }
+ v8::String::Utf8Value utf( args[ 0 ] );
+ if( utf.length() != 32 ) {
+ return v8::ThrowException( v8::String::New( "UUIS string must have 32 characters" ) );
+ }
+
+ v8::Function * f = scope->getNamedCons("BinData");
+ Local<v8::Object> it = f->NewInstance();
+ return hexToBinData(scope, it, bdtUUID, *utf);
+ }
+
+ v8::Handle<v8::Value> md5Init( V8Scope* scope, const v8::Arguments& args ) {
+ if (args.Length() != 1) {
+ return v8::ThrowException( v8::String::New( "MD5 needs 1 argument" ) );
+ }
+ v8::String::Utf8Value utf( args[ 0 ] );
+ if( utf.length() != 32 ) {
+ return v8::ThrowException( v8::String::New( "MD5 string must have 32 characters" ) );
+ }
+
+ v8::Function * f = scope->getNamedCons("BinData");
+ Local<v8::Object> it = f->NewInstance();
+ return hexToBinData(scope, it, MD5Type, *utf);
+ }
+
+ v8::Handle<v8::Value> hexDataInit( V8Scope* scope, const v8::Arguments& args ) {
+ if (args.Length() != 2) {
+ return v8::ThrowException( v8::String::New( "HexData needs 2 arguments" ) );
+ }
+ v8::String::Utf8Value utf( args[ 1 ] );
+ v8::Function * f = scope->getNamedCons("BinData");
+ Local<v8::Object> it = f->NewInstance();
+ return hexToBinData(scope, it, args[0]->IntegerValue(), *utf);
+ }
+
+ v8::Handle<v8::Value> numberLongInit( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
+ v8::Function * f = scope->getNamedCons( "NumberLong" );
+ return newInstance(f, args);
+ }
+
+ if (args.Length() != 0 && args.Length() != 1 && args.Length() != 3) {
+ return v8::ThrowException( v8::String::New( "NumberLong needs 0, 1 or 3 arguments" ) );
+ }
+
+ if ( args.Length() == 0 ) {
+ it->Set( scope->getV8Str( "floatApprox" ), v8::Number::New( 0 ) );
+ }
+ else if ( args.Length() == 1 ) {
+ if ( args[ 0 ]->IsNumber() ) {
+ it->Set( scope->getV8Str( "floatApprox" ), args[ 0 ] );
+ }
+ else {
+ v8::String::Utf8Value data( args[ 0 ] );
+ string num = *data;
+ const char *numStr = num.c_str();
+ long long n;
+ try {
+ n = parseLL( numStr );
+ }
+ catch ( const AssertionException & ) {
+ return v8::ThrowException( v8::String::New( "could not convert string to long long" ) );
+ }
+ unsigned long long val = n;
+ // values above 2^53 are not accurately represented in JS
+ if ( (long long)val == (long long)(double)(long long)(val) && val < 9007199254740992ULL ) {
+ it->Set( scope->getV8Str( "floatApprox" ), v8::Number::New( (double)(long long)( val ) ) );
+ }
+ else {
+ it->Set( scope->getV8Str( "floatApprox" ), v8::Number::New( (double)(long long)( val ) ) );
+ it->Set( scope->getV8Str( "top" ), v8::Integer::New( val >> 32 ) );
+ it->Set( scope->getV8Str( "bottom" ), v8::Integer::New( (unsigned long)(val & 0x00000000ffffffff) ) );
+ }
+ }
+ }
+ else {
+ it->Set( scope->getV8Str( "floatApprox" ) , args[0] );
+ it->Set( scope->getV8Str( "top" ) , args[1] );
+ it->Set( scope->getV8Str( "bottom" ) , args[2] );
+ }
+ it->SetHiddenValue( scope->V8STR_NUMBERLONG, v8::Number::New( 1 ) );
+
+ return it;
+ }
+
+ long long numberLongVal( const v8::Handle< v8::Object > &it ) {
+ if ( !it->Has( v8::String::New( "top" ) ) )
+ return (long long)( it->Get( v8::String::New( "floatApprox" ) )->NumberValue() );
+ return
+ (long long)
+ ( (unsigned long long)( it->Get( v8::String::New( "top" ) )->ToInt32()->Value() ) << 32 ) +
+ (unsigned)( it->Get( v8::String::New( "bottom" ) )->ToInt32()->Value() );
+ }
+
+ v8::Handle<v8::Value> numberLongValueOf( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+ long long val = numberLongVal( it );
+ return v8::Number::New( double( val ) );
+ }
+
+ v8::Handle<v8::Value> numberLongToNumber( V8Scope* scope, const v8::Arguments& args ) {
+ return numberLongValueOf( scope, args );
+ }
+
+ v8::Handle<v8::Value> numberLongToString( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+
+ stringstream ss;
+ long long val = numberLongVal( it );
+ const long long limit = 2LL << 30;
+
+ if ( val <= -limit || limit <= val )
+ ss << "NumberLong(\"" << val << "\")";
+ else
+ ss << "NumberLong(" << val << ")";
+
+ string ret = ss.str();
+ return v8::String::New( ret.c_str() );
+ }
+
+ v8::Handle<v8::Value> numberIntInit( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+ if ( it->IsUndefined() || it == v8::Context::GetCurrent()->Global() ) {
+ v8::Function * f = scope->getNamedCons( "NumberInt" );
+ return newInstance(f, args);
+ }
+
+ if (args.Length() != 0 && args.Length() != 1) {
+ return v8::ThrowException( v8::String::New( "NumberInt needs 0, 1 argument" ) );
+ }
+
+ if ( args.Length() == 0 ) {
+ it->SetHiddenValue( scope->V8STR_NUMBERINT, v8::Number::New( 0 ) );
+ }
+ else if ( args.Length() == 1 ) {
+ it->SetHiddenValue( scope->V8STR_NUMBERINT, args[0]->ToInt32() );
+ }
+
+ return it;
+ }
+
+ v8::Handle<v8::Value> numberIntValueOf( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+ int val = it->GetHiddenValue( scope->V8STR_NUMBERINT )->Int32Value();
+ return v8::Number::New( double( val ) );
+ }
+
+ v8::Handle<v8::Value> numberIntToNumber( V8Scope* scope, const v8::Arguments& args ) {
+ return numberIntValueOf( scope, args );
+ }
+
+ v8::Handle<v8::Value> numberIntToString( V8Scope* scope, const v8::Arguments& args ) {
+ v8::Handle<v8::Object> it = args.This();
+
+ stringstream ss;
+ int val = it->GetHiddenValue( scope->V8STR_NUMBERINT )->Int32Value();
+ ss << "NumberInt(" << val << ")";
+
+ string ret = ss.str();
+ return v8::String::New( ret.c_str() );
+ }
+
+ v8::Handle<v8::Value> bsonsize( V8Scope* scope, const v8::Arguments& args ) {
+
+ if ( args.Length() != 1 )
+ return v8::ThrowException( v8::String::New( "bsonsize needs 1 argument" ) );
+
+ if ( args[0]->IsNull() )
+ return v8::Number::New(0);
+
+ if ( ! args[ 0 ]->IsObject() )
+ return v8::ThrowException( v8::String::New( "argument to bsonsize has to be an object" ) );
+
+ return v8::Number::New( scope->v8ToMongo( args[ 0 ]->ToObject() ).objsize() );
+ }
+
+ namespace v8Locks {
+ boost::mutex& __interruptMutex = *( new boost::mutex );
+
+ InterruptLock::InterruptLock() {
+ __interruptMutex.lock();
+ }
+
+ InterruptLock::~InterruptLock() {
+ __interruptMutex.unlock();
+ }
+
+ boost::mutex& __v8Mutex = *( new boost::mutex );
+ ThreadLocalValue< bool > __locked;
+
+ RecursiveLock::RecursiveLock() : _unlock() {
+ if ( !__locked.get() ) {
+ __v8Mutex.lock();
+ __locked.set( true );
+ _unlock = true;
+ }
+ }
+ RecursiveLock::~RecursiveLock() {
+ if ( _unlock ) {
+ __v8Mutex.unlock();
+ __locked.set( false );
+ }
+ }
+
+ RecursiveUnlock::RecursiveUnlock() : _lock() {
+ if ( __locked.get() ) {
+ __v8Mutex.unlock();
+ __locked.set( false );
+ _lock = true;
+ }
+ }
+ RecursiveUnlock::~RecursiveUnlock() {
+ if ( _lock ) {
+ __v8Mutex.lock();
+ __locked.set( true );
+ }
+ }
+ } // namespace v8Locks
+}
diff --git a/src/mongo/scripting/v8_db.h b/src/mongo/scripting/v8_db.h
new file mode 100644
index 00000000000..68946e0ed06
--- /dev/null
+++ b/src/mongo/scripting/v8_db.h
@@ -0,0 +1,94 @@
+// v8_db.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <v8.h>
+#include <cstring>
+#include <cstdio>
+#include <cstdlib>
+
+#include "engine_v8.h"
+#include "../client/dbclient.h"
+
+namespace mongo {
+
+ // These functions may depend on the caller creating a handle scope and context scope.
+
+ v8::Handle<v8::FunctionTemplate> getMongoFunctionTemplate( V8Scope * scope, bool local );
+// void installDBTypes( V8Scope * scope, v8::Handle<v8::ObjectTemplate>& global );
+ void installDBTypes( V8Scope * scope, v8::Handle<v8::Object>& global );
+
+ // the actual globals
+
+ mongo::DBClientBase * getConnection( const v8::Arguments& args );
+
+ // Mongo members
+ v8::Handle<v8::Value> mongoConsLocal(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> mongoConsExternal(V8Scope* scope, const v8::Arguments& args);
+
+ v8::Handle<v8::Value> mongoFind(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> mongoInsert(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> mongoRemove(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> mongoUpdate(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> mongoAuth(V8Scope* scope, const v8::Arguments& args);
+
+ v8::Handle<v8::Value> internalCursorCons(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> internalCursorNext(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> internalCursorHasNext(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> internalCursorObjsLeftInBatch(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> internalCursorReadOnly(V8Scope* scope, const v8::Arguments& args);
+
+ // DB members
+
+ v8::Handle<v8::Value> dbInit(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> collectionInit(V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> objectIdInit( V8Scope* scope, const v8::Arguments& args );
+
+ v8::Handle<v8::Value> dbRefInit( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> dbPointerInit( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> dbTimestampInit( V8Scope* scope, const v8::Arguments& args );
+
+ v8::Handle<v8::Value> binDataInit( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> binDataToString( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> binDataToBase64( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> binDataToHex( V8Scope* scope, const v8::Arguments& args );
+
+ v8::Handle<v8::Value> uuidInit( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> md5Init( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> hexDataInit( V8Scope* scope, const v8::Arguments& args );
+
+ v8::Handle<v8::Value> numberLongInit( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> numberLongToNumber(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> numberLongValueOf(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> numberLongToString(V8Scope* scope, const v8::Arguments& args);
+
+ v8::Handle<v8::Value> numberIntInit( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> numberIntToNumber(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> numberIntValueOf(V8Scope* scope, const v8::Arguments& args);
+ v8::Handle<v8::Value> numberIntToString(V8Scope* scope, const v8::Arguments& args);
+
+ v8::Handle<v8::Value> dbQueryInit( V8Scope* scope, const v8::Arguments& args );
+ v8::Handle<v8::Value> dbQueryIndexAccess( uint32_t index , const v8::AccessorInfo& info );
+
+ v8::Handle<v8::Value> collectionGetter( v8::Local<v8::String> name, const v8::AccessorInfo &info);
+ v8::Handle<v8::Value> collectionSetter( Local<v8::String> name, Local<Value> value, const AccessorInfo& info );
+
+ v8::Handle<v8::Value> bsonsize( V8Scope* scope, const v8::Arguments& args );
+
+}
+
diff --git a/src/mongo/scripting/v8_utils.cpp b/src/mongo/scripting/v8_utils.cpp
new file mode 100644
index 00000000000..9e7e8072220
--- /dev/null
+++ b/src/mongo/scripting/v8_utils.cpp
@@ -0,0 +1,295 @@
+// v8_utils.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(_WIN32)
+/** this is a hack - v8stdint.h defined uint16_t etc. on _WIN32 only, and that collides with
+ our usage of boost */
+#include "boost/cstdint.hpp"
+using namespace boost;
+#define V8STDINT_H_
+#endif
+
+#include "v8_utils.h"
+#include "v8_db.h"
+#include <iostream>
+#include <map>
+#include <sstream>
+#include <vector>
+#include <boost/smart_ptr.hpp>
+#include <boost/thread/thread.hpp>
+#include <boost/thread/xtime.hpp>
+#include "engine_v8.h"
+
+using namespace std;
+using namespace v8;
+
+namespace mongo {
+
+ std::string toSTLString( const Handle<v8::Value> & o ) {
+ v8::String::Utf8Value str(o);
+ const char * foo = *str;
+ std::string s(foo);
+ return s;
+ }
+
+ std::string toSTLString( const v8::TryCatch * try_catch ) {
+
+ stringstream ss;
+
+ //while ( try_catch ){ // disabled for v8 bleeding edge
+
+ v8::String::Utf8Value exception(try_catch->Exception());
+ Handle<v8::Message> message = try_catch->Message();
+
+ if (message.IsEmpty()) {
+ ss << *exception << endl;
+ }
+ else {
+
+ v8::String::Utf8Value filename(message->GetScriptResourceName());
+ int linenum = message->GetLineNumber();
+ ss << *filename << ":" << linenum << " " << *exception << endl;
+
+ v8::String::Utf8Value sourceline(message->GetSourceLine());
+ ss << *sourceline << endl;
+
+ int start = message->GetStartColumn();
+ for (int i = 0; i < start; i++)
+ ss << " ";
+
+ int end = message->GetEndColumn();
+ for (int i = start; i < end; i++)
+ ss << "^";
+
+ ss << endl;
+ }
+
+ //try_catch = try_catch->next_;
+ //}
+
+ return ss.str();
+ }
+
+
+ std::ostream& operator<<( std::ostream &s, const Handle<v8::Value> & o ) {
+ v8::String::Utf8Value str(o);
+ s << *str;
+ return s;
+ }
+
+ std::ostream& operator<<( std::ostream &s, const v8::TryCatch * try_catch ) {
+ HandleScope handle_scope;
+ v8::String::Utf8Value exception(try_catch->Exception());
+ Handle<v8::Message> message = try_catch->Message();
+
+ if (message.IsEmpty()) {
+ s << *exception << endl;
+ }
+ else {
+
+ v8::String::Utf8Value filename(message->GetScriptResourceName());
+ int linenum = message->GetLineNumber();
+ cout << *filename << ":" << linenum << " " << *exception << endl;
+
+ v8::String::Utf8Value sourceline(message->GetSourceLine());
+ cout << *sourceline << endl;
+
+ int start = message->GetStartColumn();
+ for (int i = 0; i < start; i++)
+ cout << " ";
+
+ int end = message->GetEndColumn();
+ for (int i = start; i < end; i++)
+ cout << "^";
+
+ cout << endl;
+ }
+
+ //if ( try_catch->next_ ) // disabled for v8 bleeding edge
+ // s << try_catch->next_;
+
+ return s;
+ }
+
+ void ReportException(v8::TryCatch* try_catch) {
+ cout << try_catch << endl;
+ }
+
+ Handle< Context > baseContext_;
+
+ class JSThreadConfig {
+ public:
+ JSThreadConfig( V8Scope* scope, const Arguments &args, bool newScope = false ) : started_(), done_(), newScope_( newScope ) {
+ jsassert( args.Length() > 0, "need at least one argument" );
+ jsassert( args[ 0 ]->IsFunction(), "first argument must be a function" );
+
+ // arguments need to be copied into the isolate, go through bson
+ BSONObjBuilder b;
+ for( int i = 0; i < args.Length(); ++i ) {
+ scope->v8ToMongoElement(b, "arg" + i, args[i]);
+ }
+ args_ = b.obj();
+ }
+
+ ~JSThreadConfig() {
+ }
+
+ void start() {
+ jsassert( !started_, "Thread already started" );
+ // obtain own scope for execution
+ // do it here, not in constructor, otherwise it creates an infinite recursion from ScopedThread
+ _scope.reset( dynamic_cast< V8Scope * >( globalScriptEngine->newScope() ) );
+
+ JSThread jt( *this );
+ thread_.reset( new boost::thread( jt ) );
+ started_ = true;
+ }
+ void join() {
+ jsassert( started_ && !done_, "Thread not running" );
+ thread_->join();
+ done_ = true;
+ }
+
+ BSONObj returnData() {
+ if ( !done_ )
+ join();
+ return returnData_;
+ }
+
+ private:
+ class JSThread {
+ public:
+ JSThread( JSThreadConfig &config ) : config_( config ) {}
+
+ void operator()() {
+ V8Scope* scope = config_._scope.get();
+ v8::Isolate::Scope iscope(scope->getIsolate());
+ v8::Locker l(scope->getIsolate());
+ HandleScope handle_scope;
+ Context::Scope context_scope( scope->getContext() );
+
+ BSONObj args = config_.args_;
+ Local< v8::Function > f = v8::Function::Cast( *(scope->mongoToV8Element(args.firstElement(), true)) );
+ int argc = args.nFields() - 1;
+
+ boost::scoped_array< Local< Value > > argv( new Local< Value >[ argc ] );
+ BSONObjIterator it(args);
+ it.next();
+ for( int i = 0; i < argc; ++i ) {
+ argv[ i ] = Local< Value >::New( scope->mongoToV8Element(*it, true) );
+ it.next();
+ }
+ TryCatch try_catch;
+ Handle< Value > ret = f->Call( scope->getContext()->Global(), argc, argv.get() );
+ if ( ret.IsEmpty() ) {
+ string e = toSTLString( &try_catch );
+ log() << "js thread raised exception: " << e << endl;
+ // v8 probably does something sane if ret is empty, but not going to assume that for now
+ ret = v8::Undefined();
+ }
+ // ret is translated to BSON to switch isolate
+ BSONObjBuilder b;
+ scope->v8ToMongoElement(b, "ret", ret);
+ config_.returnData_ = b.obj();
+ }
+
+ private:
+ JSThreadConfig &config_;
+ };
+
+ bool started_;
+ bool done_;
+ bool newScope_;
+ BSONObj args_;
+ scoped_ptr< boost::thread > thread_;
+ scoped_ptr< V8Scope > _scope;
+ BSONObj returnData_;
+ };
+
+ Handle< Value > ThreadInit( V8Scope* scope, const Arguments &args ) {
+ Handle<v8::Object> it = args.This();
+ // NOTE I believe the passed JSThreadConfig will never be freed. If this
+ // policy is changed, JSThread may no longer be able to store JSThreadConfig
+ // by reference.
+ it->SetHiddenValue( v8::String::New( "_JSThreadConfig" ), External::New( new JSThreadConfig( scope, args ) ) );
+ return v8::Undefined();
+ }
+
+ Handle< Value > ScopedThreadInit( V8Scope* scope, const Arguments &args ) {
+ Handle<v8::Object> it = args.This();
+ // NOTE I believe the passed JSThreadConfig will never be freed. If this
+ // policy is changed, JSThread may no longer be able to store JSThreadConfig
+ // by reference.
+ it->SetHiddenValue( v8::String::New( "_JSThreadConfig" ), External::New( new JSThreadConfig( scope, args, true ) ) );
+ return v8::Undefined();
+ }
+
+ JSThreadConfig *thisConfig( V8Scope* scope, const Arguments &args ) {
+ Local< External > c = External::Cast( *(args.This()->GetHiddenValue( v8::String::New( "_JSThreadConfig" ) ) ) );
+ JSThreadConfig *config = (JSThreadConfig *)( c->Value() );
+ return config;
+ }
+
+ Handle< Value > ThreadStart( V8Scope* scope, const Arguments &args ) {
+ thisConfig( scope, args )->start();
+ return v8::Undefined();
+ }
+
+ Handle< Value > ThreadJoin( V8Scope* scope, const Arguments &args ) {
+ thisConfig( scope, args )->join();
+ return v8::Undefined();
+ }
+
+ Handle< Value > ThreadReturnData( V8Scope* scope, const Arguments &args ) {
+ BSONObj data = thisConfig( scope, args )->returnData();
+ return scope->mongoToV8Element(data.firstElement(), true);
+ }
+
+ Handle< Value > ThreadInject( V8Scope* scope, const Arguments &args ) {
+ jsassert( args.Length() == 1 , "threadInject takes exactly 1 argument" );
+ jsassert( args[0]->IsObject() , "threadInject needs to be passed a prototype" );
+
+ Local<v8::Object> o = args[0]->ToObject();
+
+ // install method on the Thread object
+ scope->injectV8Function("init", ThreadInit, o);
+ scope->injectV8Function("start", ThreadStart, o);
+ scope->injectV8Function("join", ThreadJoin, o);
+ scope->injectV8Function("returnData", ThreadReturnData, o);
+ return v8::Undefined();
+ }
+
+ Handle< Value > ScopedThreadInject( V8Scope* scope, const Arguments &args ) {
+ jsassert( args.Length() == 1 , "threadInject takes exactly 1 argument" );
+ jsassert( args[0]->IsObject() , "threadInject needs to be passed a prototype" );
+
+ Local<v8::Object> o = args[0]->ToObject();
+
+ scope->injectV8Function("init", ScopedThreadInit, o);
+ // inheritance takes care of other member functions
+
+ return v8::Undefined();
+ }
+
+ void installFork( V8Scope* scope, v8::Handle< v8::Object > &global, v8::Handle< v8::Context > &context ) {
+ if ( baseContext_.IsEmpty() ) // if this is the shell, first call will be with shell context, otherwise don't expect to use fork() anyway
+ baseContext_ = context;
+ scope->injectV8Function("_threadInject", ThreadInject, global);
+ scope->injectV8Function("_scopedThreadInject", ScopedThreadInject, global);
+ }
+
+}
diff --git a/src/mongo/scripting/v8_utils.h b/src/mongo/scripting/v8_utils.h
new file mode 100644
index 00000000000..ca5d317885f
--- /dev/null
+++ b/src/mongo/scripting/v8_utils.h
@@ -0,0 +1,43 @@
+// v8_utils.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <v8.h>
+
+#include <cstring>
+#include <cstdio>
+#include <cstdlib>
+#include <assert.h>
+#include <iostream>
+
+namespace mongo {
+
+ void ReportException(v8::TryCatch* handler);
+
+#define jsassert(x,msg) assert(x)
+
+ std::ostream& operator<<( std::ostream &s, const v8::Handle<v8::Value> & o );
+ std::ostream& operator<<( std::ostream &s, const v8::Handle<v8::TryCatch> * try_catch );
+
+ std::string toSTLString( const v8::Handle<v8::Value> & o );
+ std::string toSTLString( const v8::TryCatch * try_catch );
+
+ class V8Scope;
+ void installFork( V8Scope* scope, v8::Handle< v8::Object > &global, v8::Handle< v8::Context > &context );
+}
+
diff --git a/src/mongo/scripting/v8_wrapper.cpp b/src/mongo/scripting/v8_wrapper.cpp
new file mode 100644
index 00000000000..7c28a39cceb
--- /dev/null
+++ b/src/mongo/scripting/v8_wrapper.cpp
@@ -0,0 +1,99 @@
+// v8_wrapper.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined(_WIN32)
+/** this is a hack - v8stdint.h defined uint16_t etc. on _WIN32 only, and that collides with
+ our usage of boost */
+#include "boost/cstdint.hpp"
+using namespace boost;
+#define V8STDINT_H_
+#endif
+
+#include "v8_wrapper.h"
+#include "v8_utils.h"
+#include "v8_db.h"
+#include "engine_v8.h"
+
+#include <iostream>
+
+using namespace std;
+using namespace v8;
+
+namespace mongo {
+
+#define DDD(x)
+
+ // --- object wrapper ---
+
+ class WrapperHolder {
+ public:
+ WrapperHolder( V8Scope* scope, const BSONObj * o , bool readOnly , bool iDelete )
+ : _scope(scope), _o(o), _readOnly( readOnly ), _iDelete( iDelete ) {
+ }
+
+ ~WrapperHolder() {
+ if ( _o && _iDelete ) {
+ delete _o;
+ }
+ _o = 0;
+ }
+
+ v8::Handle<v8::Value> get( v8::Local<v8::String> name ) {
+ const string& s = toSTLString( name );
+ const BSONElement& e = _o->getField( s );
+ return _scope->mongoToV8Element(e);
+ }
+
+ V8Scope* _scope;
+ const BSONObj * _o;
+ bool _readOnly;
+ bool _iDelete;
+ };
+
+ WrapperHolder * createWrapperHolder( V8Scope* scope, const BSONObj * o , bool readOnly , bool iDelete ) {
+ return new WrapperHolder( scope, o , readOnly , iDelete );
+ }
+
+ WrapperHolder * getWrapper( v8::Handle<v8::Object> o ) {
+ Handle<v8::Value> t = o->GetRealNamedProperty( v8::String::New( "_wrapper" ) );
+ assert( t->IsExternal() );
+ Local<External> c = External::Cast( *t );
+ WrapperHolder * w = (WrapperHolder*)(c->Value());
+ assert( w );
+ return w;
+ }
+
+
+ Handle<Value> wrapperCons(V8Scope* scope, const Arguments& args) {
+ if ( ! ( args.Length() == 1 && args[0]->IsExternal() ) )
+ return v8::ThrowException( v8::String::New( "wrapperCons needs 1 External arg" ) );
+
+ args.This()->Set( v8::String::New( "_wrapper" ) , args[0] );
+
+ return v8::Undefined();
+ }
+
+ v8::Handle<v8::Value> wrapperGetHandler( v8::Local<v8::String> name, const v8::AccessorInfo &info) {
+ return getWrapper( info.This() )->get( name );
+ }
+
+ v8::Handle<v8::FunctionTemplate> getObjectWrapperTemplate(V8Scope* scope) {
+ v8::Handle<v8::FunctionTemplate> t = scope->createV8Function(wrapperCons);
+ t->InstanceTemplate()->SetNamedPropertyHandler( wrapperGetHandler );
+ return t;
+ }
+}
diff --git a/src/mongo/scripting/v8_wrapper.h b/src/mongo/scripting/v8_wrapper.h
new file mode 100644
index 00000000000..22f14e6ae94
--- /dev/null
+++ b/src/mongo/scripting/v8_wrapper.h
@@ -0,0 +1,34 @@
+// v8_wrapper.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <v8.h>
+#include <cstring>
+#include <cstdio>
+#include <cstdlib>
+#include "../db/jsobj.h"
+#include "engine_v8.h"
+
+namespace mongo {
+
+ v8::Handle<v8::FunctionTemplate> getObjectWrapperTemplate(V8Scope* scope);
+
+ class WrapperHolder;
+ WrapperHolder * createWrapperHolder( V8Scope* scope, const BSONObj * o , bool readOnly , bool iDelete );
+
+}
diff --git a/src/mongo/server.h b/src/mongo/server.h
new file mode 100644
index 00000000000..d9a711ef780
--- /dev/null
+++ b/src/mongo/server.h
@@ -0,0 +1,46 @@
+/** @file server.h
+
+ This file contains includes commonly needed in the server files (mongod, mongos, test). It is *NOT* included in the C++ client; i.e.
+ this is a very good place for global-ish things that you don't need to be in the client lib.
+
+ Over time we should move more here, and more out of pch.h. And get rid of pch.h at some point.
+*/
+
+#pragma once
+
+#include <map>
+#include <vector>
+#include <set>
+
+#include "bson/inline_decls.h"
+
+//using namespace std;
+//using namespace bson;
+
+/* Note: do not clutter code with these -- ONLY use in hot spots / significant loops. */
+
+// branch prediction. indicate we expect to be true
+#define likely MONGO_likely
+
+// branch prediction. indicate we expect to be false
+#define unlikely MONGO_unlikely
+
+// prefetch data from memory
+//#define PREFETCH MONGOPREFETCH
+
+#if defined(__GNUC__)
+
+#define CACHEALIGN __attribute__((aligned(64))
+
+#elif defined(_MSC_VER)
+
+#define CACHEALIGN __declspec(align(64))
+
+#else
+
+#define CACHEALIGN
+
+#endif
+
+// log but not too fast. this is rather simplistic we can do something fancier later
+#define LOGSOME static time_t __last; time_t __now=time(0); if(__last+5<__now) {} else log()
diff --git a/src/mongo/shell/collection.js b/src/mongo/shell/collection.js
new file mode 100644
index 00000000000..df3fa516f86
--- /dev/null
+++ b/src/mongo/shell/collection.js
@@ -0,0 +1,893 @@
+// @file collection.js - DBCollection support in the mongo shell
+// db.colName is a DBCollection object
+// or db["colName"]
+
+if ( ( typeof DBCollection ) == "undefined" ){
+ DBCollection = function( mongo , db , shortName , fullName ){
+ this._mongo = mongo;
+ this._db = db;
+ this._shortName = shortName;
+ this._fullName = fullName;
+
+ this.verify();
+ }
+}
+
+DBCollection.prototype.verify = function(){
+ assert( this._fullName , "no fullName" );
+ assert( this._shortName , "no shortName" );
+ assert( this._db , "no db" );
+
+ assert.eq( this._fullName , this._db._name + "." + this._shortName , "name mismatch" );
+
+ assert( this._mongo , "no mongo in DBCollection" );
+}
+
+DBCollection.prototype.getName = function(){
+ return this._shortName;
+}
+
+DBCollection.prototype.help = function () {
+ var shortName = this.getName();
+ print("DBCollection help");
+ print("\tdb." + shortName + ".find().help() - show DBCursor help");
+ print("\tdb." + shortName + ".count()");
+ print("\tdb." + shortName + ".copyTo(newColl) - duplicates collection by copying all documents to newColl; no indexes are copied.");
+ print("\tdb." + shortName + ".convertToCapped(maxBytes) - calls {convertToCapped:'" + shortName + "', size:maxBytes}} command");
+ print("\tdb." + shortName + ".dataSize()");
+ print("\tdb." + shortName + ".distinct( key ) - eg. db." + shortName + ".distinct( 'x' )");
+ print("\tdb." + shortName + ".drop() drop the collection");
+ print("\tdb." + shortName + ".dropIndex(name)");
+ print("\tdb." + shortName + ".dropIndexes()");
+ print("\tdb." + shortName + ".ensureIndex(keypattern[,options]) - options is an object with these possible fields: name, unique, dropDups");
+ print("\tdb." + shortName + ".reIndex()");
+ print("\tdb." + shortName + ".find([query],[fields]) - query is an optional query filter. fields is optional set of fields to return.");
+ print("\t e.g. db." + shortName + ".find( {x:77} , {name:1, x:1} )");
+ print("\tdb." + shortName + ".find(...).count()");
+ print("\tdb." + shortName + ".find(...).limit(n)");
+ print("\tdb." + shortName + ".find(...).skip(n)");
+ print("\tdb." + shortName + ".find(...).sort(...)");
+ print("\tdb." + shortName + ".findOne([query])");
+ print("\tdb." + shortName + ".findAndModify( { update : ... , remove : bool [, query: {}, sort: {}, 'new': false] } )");
+ print("\tdb." + shortName + ".getDB() get DB object associated with collection");
+ print("\tdb." + shortName + ".getIndexes()");
+ print("\tdb." + shortName + ".group( { key : ..., initial: ..., reduce : ...[, cond: ...] } )");
+ print("\tdb." + shortName + ".insert(obj)");
+ print("\tdb." + shortName + ".mapReduce( mapFunction , reduceFunction , <optional params> )");
+ print("\tdb." + shortName + ".remove(query)");
+ print("\tdb." + shortName + ".renameCollection( newName , <dropTarget> ) renames the collection.");
+ print("\tdb." + shortName + ".runCommand( name , <options> ) runs a db command with the given name where the first param is the collection name");
+ print("\tdb." + shortName + ".save(obj)");
+ print("\tdb." + shortName + ".stats()");
+ print("\tdb." + shortName + ".storageSize() - includes free space allocated to this collection");
+ print("\tdb." + shortName + ".totalIndexSize() - size in bytes of all the indexes");
+ print("\tdb." + shortName + ".totalSize() - storage allocated for all data and indexes");
+ print("\tdb." + shortName + ".update(query, object[, upsert_bool, multi_bool]) - instead of two flags, you can pass an object with fields: upsert, multi");
+ print("\tdb." + shortName + ".validate( <full> ) - SLOW");;
+ print("\tdb." + shortName + ".getShardVersion() - only for use with sharding");
+ print("\tdb." + shortName + ".getShardDistribution() - prints statistics about data distribution in the cluster");
+ print("\tdb." + shortName + ".getSplitKeysForChunks( <maxChunkSize> ) - calculates split points over all chunks and returns splitter function");
+ return __magicNoPrint;
+}
+
+DBCollection.prototype.getFullName = function(){
+ return this._fullName;
+}
+DBCollection.prototype.getMongo = function(){
+ return this._db.getMongo();
+}
+DBCollection.prototype.getDB = function(){
+ return this._db;
+}
+
+DBCollection.prototype._dbCommand = function( cmd , params ){
+ if ( typeof( cmd ) == "object" )
+ return this._db._dbCommand( cmd );
+
+ var c = {};
+ c[cmd] = this.getName();
+ if ( params )
+ Object.extend( c , params );
+ return this._db._dbCommand( c );
+}
+
+DBCollection.prototype.runCommand = DBCollection.prototype._dbCommand;
+
+DBCollection.prototype._massageObject = function( q ){
+ if ( ! q )
+ return {};
+
+ var type = typeof q;
+
+ if ( type == "function" )
+ return { $where : q };
+
+ if ( q.isObjectId )
+ return { _id : q };
+
+ if ( type == "object" )
+ return q;
+
+ if ( type == "string" ){
+ if ( q.length == 24 )
+ return { _id : q };
+
+ return { $where : q };
+ }
+
+ throw "don't know how to massage : " + type;
+
+}
+
+
+DBCollection.prototype._validateObject = function( o ){
+ if ( o._ensureSpecial && o._checkModify )
+ throw "can't save a DBQuery object";
+}
+
+DBCollection._allowedFields = { $id : 1 , $ref : 1 , $db : 1 , $MinKey : 1, $MaxKey : 1 };
+
+DBCollection.prototype._validateForStorage = function( o ){
+ this._validateObject( o );
+ for ( var k in o ){
+ if ( k.indexOf( "." ) >= 0 ) {
+ throw "can't have . in field names [" + k + "]" ;
+ }
+
+ if ( k.indexOf( "$" ) == 0 && ! DBCollection._allowedFields[k] ) {
+ throw "field names cannot start with $ [" + k + "]";
+ }
+
+ if ( o[k] !== null && typeof( o[k] ) === "object" ) {
+ this._validateForStorage( o[k] );
+ }
+ }
+};
+
+
+DBCollection.prototype.find = function( query , fields , limit , skip, batchSize, options ){
+ return new DBQuery( this._mongo , this._db , this ,
+ this._fullName , this._massageObject( query ) , fields , limit , skip , batchSize , options || this.getQueryOptions() );
+}
+
+DBCollection.prototype.findOne = function( query , fields, options ){
+ var cursor = this._mongo.find( this._fullName , this._massageObject( query ) || {} , fields ,
+ -1 /* limit */ , 0 /* skip*/, 0 /* batchSize */ , options || this.getQueryOptions() /* options */ );
+ if ( ! cursor.hasNext() )
+ return null;
+ var ret = cursor.next();
+ if ( cursor.hasNext() ) throw "findOne has more than 1 result!";
+ if ( ret.$err )
+ throw "error " + tojson( ret );
+ return ret;
+}
+
+DBCollection.prototype.insert = function( obj , _allow_dot ){
+ if ( ! obj )
+ throw "no object passed to insert!";
+ if ( ! _allow_dot ) {
+ this._validateForStorage( obj );
+ }
+ if ( typeof( obj._id ) == "undefined" && ! Array.isArray( obj ) ){
+ var tmp = obj; // don't want to modify input
+ obj = {_id: new ObjectId()};
+ for (var key in tmp){
+ obj[key] = tmp[key];
+ }
+ }
+ this._db._initExtraInfo();
+ this._mongo.insert( this._fullName , obj );
+ this._lastID = obj._id;
+ this._db._getExtraInfo("Inserted");
+}
+
+DBCollection.prototype.remove = function( t , justOne ){
+ for ( var k in t ){
+ if ( k == "_id" && typeof( t[k] ) == "undefined" ){
+ throw "can't have _id set to undefined in a remove expression"
+ }
+ }
+ this._db._initExtraInfo();
+ this._mongo.remove( this._fullName , this._massageObject( t ) , justOne ? true : false );
+ this._db._getExtraInfo("Removed");
+}
+
+DBCollection.prototype.update = function( query , obj , upsert , multi ){
+ assert( query , "need a query" );
+ assert( obj , "need an object" );
+
+ var firstKey = null;
+ for (var k in obj) { firstKey = k; break; }
+
+ if (firstKey != null && firstKey[0] == '$') {
+ // for mods we only validate partially, for example keys may have dots
+ this._validateObject( obj );
+ } else {
+ // we're basically inserting a brand new object, do full validation
+ this._validateForStorage( obj );
+ }
+
+ // can pass options via object for improved readability
+ if ( typeof(upsert) === 'object' ) {
+ assert( multi === undefined, "Fourth argument must be empty when specifying upsert and multi with an object." );
+
+ opts = upsert;
+ multi = opts.multi;
+ upsert = opts.upsert;
+ }
+
+ this._db._initExtraInfo();
+ this._mongo.update( this._fullName , query , obj , upsert ? true : false , multi ? true : false );
+ this._db._getExtraInfo("Updated");
+}
+
+DBCollection.prototype.save = function( obj ){
+ if ( obj == null || typeof( obj ) == "undefined" )
+ throw "can't save a null";
+
+ if ( typeof( obj ) == "number" || typeof( obj) == "string" )
+ throw "can't save a number or string"
+
+ if ( typeof( obj._id ) == "undefined" ){
+ obj._id = new ObjectId();
+ return this.insert( obj );
+ }
+ else {
+ return this.update( { _id : obj._id } , obj , true );
+ }
+}
+
+DBCollection.prototype._genIndexName = function( keys ){
+ var name = "";
+ for ( var k in keys ){
+ var v = keys[k];
+ if ( typeof v == "function" )
+ continue;
+
+ if ( name.length > 0 )
+ name += "_";
+ name += k + "_";
+
+ if ( typeof v == "number" )
+ name += v;
+ }
+ return name;
+}
+
+DBCollection.prototype._indexSpec = function( keys, options ) {
+ var ret = { ns : this._fullName , key : keys , name : this._genIndexName( keys ) };
+
+ if ( ! options ){
+ }
+ else if ( typeof ( options ) == "string" )
+ ret.name = options;
+ else if ( typeof ( options ) == "boolean" )
+ ret.unique = true;
+ else if ( typeof ( options ) == "object" ){
+ if ( options.length ){
+ var nb = 0;
+ for ( var i=0; i<options.length; i++ ){
+ if ( typeof ( options[i] ) == "string" )
+ ret.name = options[i];
+ else if ( typeof( options[i] ) == "boolean" ){
+ if ( options[i] ){
+ if ( nb == 0 )
+ ret.unique = true;
+ if ( nb == 1 )
+ ret.dropDups = true;
+ }
+ nb++;
+ }
+ }
+ }
+ else {
+ Object.extend( ret , options );
+ }
+ }
+ else {
+ throw "can't handle: " + typeof( options );
+ }
+ /*
+ return ret;
+
+ var name;
+ var nTrue = 0;
+
+ if ( ! isObject( options ) ) {
+ options = [ options ];
+ }
+
+ if ( options.length ){
+ for( var i = 0; i < options.length; ++i ) {
+ var o = options[ i ];
+ if ( isString( o ) ) {
+ ret.name = o;
+ } else if ( typeof( o ) == "boolean" ) {
+ if ( o ) {
+ ++nTrue;
+ }
+ }
+ }
+ if ( nTrue > 0 ) {
+ ret.unique = true;
+ }
+ if ( nTrue > 1 ) {
+ ret.dropDups = true;
+ }
+ }
+*/
+ return ret;
+}
+
+DBCollection.prototype.createIndex = function( keys , options ){
+ var o = this._indexSpec( keys, options );
+ this._db.getCollection( "system.indexes" ).insert( o , true );
+}
+
+DBCollection.prototype.ensureIndex = function( keys , options ){
+ var name = this._indexSpec( keys, options ).name;
+ this._indexCache = this._indexCache || {};
+ if ( this._indexCache[ name ] ){
+ return;
+ }
+
+ this.createIndex( keys , options );
+ if ( this.getDB().getLastError() == "" ) {
+ this._indexCache[name] = true;
+ }
+}
+
+DBCollection.prototype.resetIndexCache = function(){
+ this._indexCache = {};
+}
+
+DBCollection.prototype.reIndex = function() {
+ return this._db.runCommand({ reIndex: this.getName() });
+}
+
+DBCollection.prototype.dropIndexes = function(){
+ this.resetIndexCache();
+
+ var res = this._db.runCommand( { deleteIndexes: this.getName(), index: "*" } );
+ assert( res , "no result from dropIndex result" );
+ if ( res.ok )
+ return res;
+
+ if ( res.errmsg.match( /not found/ ) )
+ return res;
+
+ throw "error dropping indexes : " + tojson( res );
+}
+
+
+DBCollection.prototype.drop = function(){
+ if ( arguments.length > 0 )
+ throw "drop takes no argument";
+ this.resetIndexCache();
+ var ret = this._db.runCommand( { drop: this.getName() } );
+ if ( ! ret.ok ){
+ if ( ret.errmsg == "ns not found" )
+ return false;
+ throw "drop failed: " + tojson( ret );
+ }
+ return true;
+}
+
+DBCollection.prototype.findAndModify = function(args){
+ var cmd = { findandmodify: this.getName() };
+ for (var key in args){
+ cmd[key] = args[key];
+ }
+
+ var ret = this._db.runCommand( cmd );
+ if ( ! ret.ok ){
+ if (ret.errmsg == "No matching object found"){
+ return null;
+ }
+ throw "findAndModifyFailed failed: " + tojson( ret.errmsg );
+ }
+ return ret.value;
+}
+
+DBCollection.prototype.renameCollection = function( newName , dropTarget ){
+ return this._db._adminCommand( { renameCollection : this._fullName ,
+ to : this._db._name + "." + newName ,
+ dropTarget : dropTarget } )
+}
+
+DBCollection.prototype.validate = function(full) {
+ var cmd = { validate: this.getName() };
+
+ if (typeof(full) == 'object') // support arbitrary options here
+ Object.extend(cmd, full);
+ else
+ cmd.full = full;
+
+ var res = this._db.runCommand( cmd );
+
+ if (typeof(res.valid) == 'undefined') {
+ // old-style format just put everything in a string. Now using proper fields
+
+ res.valid = false;
+
+ var raw = res.result || res.raw;
+
+ if ( raw ){
+ var str = "-" + tojson( raw );
+ res.valid = ! ( str.match( /exception/ ) || str.match( /corrupt/ ) );
+
+ var p = /lastExtentSize:(\d+)/;
+ var r = p.exec( str );
+ if ( r ){
+ res.lastExtentSize = Number( r[1] );
+ }
+ }
+ }
+
+ return res;
+}
+
+DBCollection.prototype.getShardVersion = function(){
+ return this._db._adminCommand( { getShardVersion : this._fullName } );
+}
+
+DBCollection.prototype.getIndexes = function(){
+ return this.getDB().getCollection( "system.indexes" ).find( { ns : this.getFullName() } ).toArray();
+}
+
+DBCollection.prototype.getIndices = DBCollection.prototype.getIndexes;
+DBCollection.prototype.getIndexSpecs = DBCollection.prototype.getIndexes;
+
+DBCollection.prototype.getIndexKeys = function(){
+ return this.getIndexes().map(
+ function(i){
+ return i.key;
+ }
+ );
+}
+
+
+DBCollection.prototype.count = function( x ){
+ return this.find( x ).count();
+}
+
+/**
+ * Drop free lists. Normally not used.
+ * Note this only does the collection itself, not the namespaces of its indexes (see cleanAll).
+ */
+DBCollection.prototype.clean = function() {
+ return this._dbCommand( { clean: this.getName() } );
+}
+
+
+
+/**
+ * <p>Drop a specified index.</p>
+ *
+ * <p>
+ * Name is the name of the index in the system.indexes name field. (Run db.system.indexes.find() to
+ * see example data.)
+ * </p>
+ *
+ * <p>Note : alpha: space is not reclaimed </p>
+ * @param {String} name of index to delete.
+ * @return A result object. result.ok will be true if successful.
+ */
+DBCollection.prototype.dropIndex = function(index) {
+ assert(index , "need to specify index to dropIndex" );
+
+ if ( ! isString( index ) && isObject( index ) )
+ index = this._genIndexName( index );
+
+ var res = this._dbCommand( "deleteIndexes" ,{ index: index } );
+ this.resetIndexCache();
+ return res;
+}
+
+DBCollection.prototype.copyTo = function( newName ){
+ return this.getDB().eval(
+ function( collName , newName ){
+ var from = db[collName];
+ var to = db[newName];
+ to.ensureIndex( { _id : 1 } );
+ var count = 0;
+
+ var cursor = from.find();
+ while ( cursor.hasNext() ){
+ var o = cursor.next();
+ count++;
+ to.save( o );
+ }
+
+ return count;
+ } , this.getName() , newName
+ );
+}
+
+DBCollection.prototype.getCollection = function( subName ){
+ return this._db.getCollection( this._shortName + "." + subName );
+}
+
+DBCollection.prototype.stats = function( scale ){
+ return this._db.runCommand( { collstats : this._shortName , scale : scale } );
+}
+
+DBCollection.prototype.dataSize = function(){
+ return this.stats().size;
+}
+
+DBCollection.prototype.storageSize = function(){
+ return this.stats().storageSize;
+}
+
+DBCollection.prototype.totalIndexSize = function( verbose ){
+ var stats = this.stats();
+ if (verbose){
+ for (var ns in stats.indexSizes){
+ print( ns + "\t" + stats.indexSizes[ns] );
+ }
+ }
+ return stats.totalIndexSize;
+}
+
+
+DBCollection.prototype.totalSize = function(){
+ var total = this.storageSize();
+ var mydb = this._db;
+ var shortName = this._shortName;
+ this.getIndexes().forEach(
+ function( spec ){
+ var coll = mydb.getCollection( shortName + ".$" + spec.name );
+ var mysize = coll.storageSize();
+ //print( coll + "\t" + mysize + "\t" + tojson( coll.validate() ) );
+ total += coll.dataSize();
+ }
+ );
+ return total;
+}
+
+
+DBCollection.prototype.convertToCapped = function( bytes ){
+ if ( ! bytes )
+ throw "have to specify # of bytes";
+ return this._dbCommand( { convertToCapped : this._shortName , size : bytes } )
+}
+
+DBCollection.prototype.exists = function(){
+ return this._db.system.namespaces.findOne( { name : this._fullName } );
+}
+
+DBCollection.prototype.isCapped = function(){
+ var e = this.exists();
+ return ( e && e.options && e.options.capped ) ? true : false;
+}
+
+DBCollection.prototype._distinct = function( keyString , query ){
+ return this._dbCommand( { distinct : this._shortName , key : keyString , query : query || {} } );
+ if ( ! res.ok )
+ throw "distinct failed: " + tojson( res );
+ return res.values;
+}
+
+DBCollection.prototype.distinct = function( keyString , query ){
+ var res = this._distinct( keyString , query );
+ if ( ! res.ok )
+ throw "distinct failed: " + tojson( res );
+ return res.values;
+}
+
+
+DBCollection.prototype.aggregate = function( ops ) {
+
+ var arr = ops;
+
+ if ( ! ops.length ) {
+ arr = [];
+ for ( var i=0; i<arguments.length; i++ ) {
+ arr.push( arguments[i] )
+ }
+ }
+
+ return this.runCommand( "aggregate" , { pipeline : arr } );
+}
+
+DBCollection.prototype.group = function( params ){
+ params.ns = this._shortName;
+ return this._db.group( params );
+}
+
+DBCollection.prototype.groupcmd = function( params ){
+ params.ns = this._shortName;
+ return this._db.groupcmd( params );
+}
+
+MapReduceResult = function( db , o ){
+ Object.extend( this , o );
+ this._o = o;
+ this._keys = Object.keySet( o );
+ this._db = db;
+ if ( this.result != null ) {
+ this._coll = this._db.getCollection( this.result );
+ }
+}
+
+MapReduceResult.prototype._simpleKeys = function(){
+ return this._o;
+}
+
+MapReduceResult.prototype.find = function(){
+ if ( this.results )
+ return this.results;
+ return DBCollection.prototype.find.apply( this._coll , arguments );
+}
+
+MapReduceResult.prototype.drop = function(){
+ if ( this._coll ) {
+ return this._coll.drop();
+ }
+}
+
+/**
+* just for debugging really
+*/
+MapReduceResult.prototype.convertToSingleObject = function(){
+ var z = {};
+ var it = this.results != null ? this.results : this._coll.find();
+ it.forEach( function(a){ z[a._id] = a.value; } );
+ return z;
+}
+
+DBCollection.prototype.convertToSingleObject = function(valueField){
+ var z = {};
+ this.find().forEach( function(a){ z[a._id] = a[valueField]; } );
+ return z;
+}
+
+/**
+* @param optional object of optional fields;
+*/
+DBCollection.prototype.mapReduce = function( map , reduce , optionsOrOutString ){
+ var c = { mapreduce : this._shortName , map : map , reduce : reduce };
+ assert( optionsOrOutString , "need to supply an optionsOrOutString" )
+
+ if ( typeof( optionsOrOutString ) == "string" )
+ c["out"] = optionsOrOutString;
+ else
+ Object.extend( c , optionsOrOutString );
+
+ var raw = this._db.runCommand( c );
+ if ( ! raw.ok ){
+ __mrerror__ = raw;
+ throw "map reduce failed:" + tojson(raw);
+ }
+ return new MapReduceResult( this._db , raw );
+
+}
+
+DBCollection.prototype.toString = function(){
+ return this.getFullName();
+}
+
+DBCollection.prototype.toString = function(){
+ return this.getFullName();
+}
+
+
+DBCollection.prototype.tojson = DBCollection.prototype.toString;
+
+DBCollection.prototype.shellPrint = DBCollection.prototype.toString;
+
+DBCollection.autocomplete = function(obj){
+ var colls = DB.autocomplete(obj.getDB());
+ var ret = [];
+ for (var i=0; i<colls.length; i++){
+ var c = colls[i];
+ if (c.length <= obj.getName().length) continue;
+ if (c.slice(0,obj.getName().length+1) != obj.getName()+'.') continue;
+
+ ret.push(c.slice(obj.getName().length+1));
+ }
+ return ret;
+}
+
+
+// Sharding additions
+
+/*
+Usage :
+
+mongo <mongos>
+> load('path-to-file/shardingAdditions.js')
+Loading custom sharding extensions...
+true
+
+> var collection = db.getMongo().getCollection("foo.bar")
+> collection.getShardDistribution() // prints statistics related to the collection's data distribution
+
+> collection.getSplitKeysForChunks() // generates split points for all chunks in the collection, based on the
+ // default maxChunkSize or alternately a specified chunk size
+> collection.getSplitKeysForChunks( 10 ) // Mb
+
+> var splitter = collection.getSplitKeysForChunks() // by default, the chunks are not split, the keys are just
+ // found. A splitter function is returned which will actually
+ // do the splits.
+
+> splitter() // ! Actually executes the splits on the cluster !
+
+*/
+
+DBCollection.prototype.getShardDistribution = function(){
+
+ var stats = this.stats()
+
+ if( ! stats.sharded ){
+ print( "Collection " + this + " is not sharded." )
+ return
+ }
+
+ var config = this.getMongo().getDB("config")
+
+ var numChunks = 0
+
+ for( var shard in stats.shards ){
+
+ var shardDoc = config.shards.findOne({ _id : shard })
+
+ print( "\nShard " + shard + " at " + shardDoc.host )
+
+ var shardStats = stats.shards[ shard ]
+
+ var chunks = config.chunks.find({ _id : sh._collRE( this ), shard : shard }).toArray()
+
+ numChunks += chunks.length
+
+ var estChunkData = shardStats.size / chunks.length
+ var estChunkCount = Math.floor( shardStats.count / chunks.length )
+
+ print( " data : " + sh._dataFormat( shardStats.size ) +
+ " docs : " + shardStats.count +
+ " chunks : " + chunks.length )
+ print( " estimated data per chunk : " + sh._dataFormat( estChunkData ) )
+ print( " estimated docs per chunk : " + estChunkCount )
+
+ }
+
+ print( "\nTotals" )
+ print( " data : " + sh._dataFormat( stats.size ) +
+ " docs : " + stats.count +
+ " chunks : " + numChunks )
+ for( var shard in stats.shards ){
+
+ var shardStats = stats.shards[ shard ]
+
+ var estDataPercent = Math.floor( shardStats.size / stats.size * 10000 ) / 100
+ var estDocPercent = Math.floor( shardStats.count / stats.count * 10000 ) / 100
+
+ print( " Shard " + shard + " contains " + estDataPercent + "% data, " + estDocPercent + "% docs in cluster, " +
+ "avg obj size on shard : " + sh._dataFormat( stats.shards[ shard ].avgObjSize ) )
+ }
+
+ print( "\n" )
+
+}
+
+
+DBCollection.prototype.getSplitKeysForChunks = function( chunkSize ){
+
+ var stats = this.stats()
+
+ if( ! stats.sharded ){
+ print( "Collection " + this + " is not sharded." )
+ return
+ }
+
+ var config = this.getMongo().getDB("config")
+
+ if( ! chunkSize ){
+ chunkSize = config.settings.findOne({ _id : "chunksize" }).value
+ print( "Chunk size not set, using default of " + chunkSize + "Mb" )
+ }
+ else{
+ print( "Using chunk size of " + chunkSize + "Mb" )
+ }
+
+ var shardDocs = config.shards.find().toArray()
+
+ var allSplitPoints = {}
+ var numSplits = 0
+
+ for( var i = 0; i < shardDocs.length; i++ ){
+
+ var shardDoc = shardDocs[i]
+ var shard = shardDoc._id
+ var host = shardDoc.host
+ var sconn = new Mongo( host )
+
+ var chunks = config.chunks.find({ _id : sh._collRE( this ), shard : shard }).toArray()
+
+ print( "\nGetting split points for chunks on shard " + shard + " at " + host )
+
+ var splitPoints = []
+
+ for( var j = 0; j < chunks.length; j++ ){
+ var chunk = chunks[j]
+ var result = sconn.getDB("admin").runCommand({ splitVector : this + "", min : chunk.min, max : chunk.max, maxChunkSize : chunkSize })
+ if( ! result.ok ){
+ print( " Had trouble getting split keys for chunk " + sh._pchunk( chunk ) + " :\n" )
+ printjson( result )
+ }
+ else{
+ splitPoints = splitPoints.concat( result.splitKeys )
+
+ if( result.splitKeys.length > 0 )
+ print( " Added " + result.splitKeys.length + " split points for chunk " + sh._pchunk( chunk ) )
+ }
+ }
+
+ print( "Total splits for shard " + shard + " : " + splitPoints.length )
+
+ numSplits += splitPoints.length
+ allSplitPoints[ shard ] = splitPoints
+
+ }
+
+ // Get most recent migration
+ var migration = config.changelog.find({ what : /^move.*/ }).sort({ time : -1 }).limit( 1 ).toArray()
+ if( migration.length == 0 )
+ print( "\nNo migrations found in changelog." )
+ else {
+ migration = migration[0]
+ print( "\nMost recent migration activity was on " + migration.ns + " at " + migration.time )
+ }
+
+ var admin = this.getMongo().getDB("admin")
+ var coll = this
+ var splitFunction = function(){
+
+ // Turn off the balancer, just to be safe
+ print( "Turning off balancer..." )
+ config.settings.update({ _id : "balancer" }, { $set : { stopped : true } }, true )
+ print( "Sleeping for 30s to allow balancers to detect change. To be extra safe, check config.changelog" +
+ " for recent migrations." )
+ sleep( 30000 )
+
+ for( shard in allSplitPoints ){
+ for( var i = 0; i < allSplitPoints[ shard ].length; i++ ){
+ var splitKey = allSplitPoints[ shard ][i]
+ print( "Splitting at " + tojson( splitKey ) )
+ printjson( admin.runCommand({ split : coll + "", middle : splitKey }) )
+ }
+ }
+
+ print( "Turning the balancer back on." )
+ config.settings.update({ _id : "balancer" }, { $set : { stopped : false } } )
+ sleep( 1 )
+ }
+
+ splitFunction.getSplitPoints = function(){ return allSplitPoints; }
+
+ print( "\nGenerated " + numSplits + " split keys, run output function to perform splits.\n" +
+ " ex : \n" +
+ " > var splitter = <collection>.getSplitKeysForChunks()\n" +
+ " > splitter() // Execute splits on cluster !\n" )
+
+ return splitFunction
+
+}
+
+DBCollection.prototype.setSlaveOk = function( value ) {
+ if( value == undefined ) value = true;
+ this._slaveOk = value;
+}
+
+DBCollection.prototype.getSlaveOk = function() {
+ if (this._slaveOk != undefined) return this._slaveOk;
+ return this._db.getSlaveOk();
+}
+
+DBCollection.prototype.getQueryOptions = function() {
+ var options = 0;
+ if (this.getSlaveOk()) options |= 4;
+ return options;
+}
+
diff --git a/src/mongo/shell/db.js b/src/mongo/shell/db.js
new file mode 100644
index 00000000000..6414e0351e7
--- /dev/null
+++ b/src/mongo/shell/db.js
@@ -0,0 +1,881 @@
+// db.js
+
+if ( typeof DB == "undefined" ){
+ DB = function( mongo , name ){
+ this._mongo = mongo;
+ this._name = name;
+ }
+}
+
+DB.prototype.getMongo = function(){
+ assert( this._mongo , "why no mongo!" );
+ return this._mongo;
+}
+
+DB.prototype.getSiblingDB = function( name ){
+ return this.getMongo().getDB( name );
+}
+
+DB.prototype.getSisterDB = DB.prototype.getSiblingDB;
+
+DB.prototype.getName = function(){
+ return this._name;
+}
+
+DB.prototype.stats = function(scale){
+ return this.runCommand( { dbstats : 1 , scale : scale } );
+}
+
+DB.prototype.getCollection = function( name ){
+ return new DBCollection( this._mongo , this , name , this._name + "." + name );
+}
+
+DB.prototype.commandHelp = function( name ){
+ var c = {};
+ c[name] = 1;
+ c.help = true;
+ var res = this.runCommand( c );
+ if ( ! res.ok )
+ throw res.errmsg;
+ return res.help;
+}
+
+DB.prototype.runCommand = function( obj ){
+ if ( typeof( obj ) == "string" ){
+ var n = {};
+ n[obj] = 1;
+ obj = n;
+ }
+ return this.getCollection( "$cmd" ).findOne( obj );
+}
+
+DB.prototype._dbCommand = DB.prototype.runCommand;
+
+DB.prototype.adminCommand = function( obj ){
+ if ( this._name == "admin" )
+ return this.runCommand( obj );
+ return this.getSiblingDB( "admin" ).runCommand( obj );
+}
+
+DB.prototype._adminCommand = DB.prototype.adminCommand; // alias old name
+
+DB.prototype.addUser = function( username , pass, readOnly ){
+ if ( pass == null || pass.length == 0 )
+ throw "password can't be empty";
+
+ readOnly = readOnly || false;
+ var c = this.getCollection( "system.users" );
+
+ var u = c.findOne( { user : username } ) || { user : username };
+ u.readOnly = readOnly;
+ u.pwd = hex_md5( username + ":mongo:" + pass );
+
+ c.save( u );
+ print( tojson( u ) );
+
+ // in mongod version 2.1.0-, this worked
+ var le = {};
+ try {
+ le = this.getLastErrorObj();
+ printjson( le )
+ }
+ catch (e) {}
+
+ if ( le.err )
+ throw "couldn't add user: " + le.err
+}
+
+DB.prototype.logout = function(){
+ return this.runCommand({logout : 1});
+}
+
+DB.prototype.removeUser = function( username ){
+ this.getCollection( "system.users" ).remove( { user : username } );
+}
+
+DB.prototype.__pwHash = function( nonce, username, pass ) {
+ return hex_md5( nonce + username + hex_md5( username + ":mongo:" + pass ) );
+}
+
+DB.prototype.auth = function( username , pass ){
+ var result = 0;
+ try {
+ result = this.getMongo().auth(this.getName(), username, pass);
+ }
+ catch (e) {
+ print(e);
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ Create a new collection in the database. Normally, collection creation is automatic. You would
+ use this function if you wish to specify special options on creation.
+
+ If the collection already exists, no action occurs.
+
+ <p>Options:</p>
+ <ul>
+ <li>
+ size: desired initial extent size for the collection. Must be <= 1000000000.
+ for fixed size (capped) collections, this size is the total/max size of the
+ collection.
+ </li>
+ <li>
+ capped: if true, this is a capped collection (where old data rolls out).
+ </li>
+ <li> max: maximum number of objects if capped (optional).</li>
+ </ul>
+
+ <p>Example: </p>
+
+ <code>db.createCollection("movies", { size: 10 * 1024 * 1024, capped:true } );</code>
+
+ * @param {String} name Name of new collection to create
+ * @param {Object} options Object with options for call. Options are listed above.
+ * @return SOMETHING_FIXME
+*/
+DB.prototype.createCollection = function(name, opt) {
+ var options = opt || {};
+ var cmd = { create: name, capped: options.capped, size: options.size };
+ if (options.max != undefined)
+ cmd.max = options.max;
+ if (options.autoIndexId != undefined)
+ cmd.autoIndexId = options.autoIndexId;
+ var res = this._dbCommand(cmd);
+ return res;
+}
+
+/**
+ * @deprecated use getProfilingStatus
+ * Returns the current profiling level of this database
+ * @return SOMETHING_FIXME or null on error
+ */
+DB.prototype.getProfilingLevel = function() {
+ var res = this._dbCommand( { profile: -1 } );
+ return res ? res.was : null;
+}
+
+/**
+ * @return the current profiling status
+ * example { was : 0, slowms : 100 }
+ * @return SOMETHING_FIXME or null on error
+ */
+DB.prototype.getProfilingStatus = function() {
+ var res = this._dbCommand( { profile: -1 } );
+ if ( ! res.ok )
+ throw "profile command failed: " + tojson( res );
+ delete res.ok
+ return res;
+}
+
+
+/**
+ Erase the entire database. (!)
+
+ * @return Object returned has member ok set to true if operation succeeds, false otherwise.
+*/
+DB.prototype.dropDatabase = function() {
+ if ( arguments.length )
+ throw "dropDatabase doesn't take arguments";
+ return this._dbCommand( { dropDatabase: 1 } );
+}
+
+/**
+ * Shuts down the database. Must be run while using the admin database.
+ * @param opts Options for shutdown. Possible options are:
+ * - force: (boolean) if the server should shut down, even if there is no
+ * up-to-date slave
+ * - timeoutSecs: (number) the server will continue checking over timeoutSecs
+ * if any other servers have caught up enough for it to shut down.
+ */
+DB.prototype.shutdownServer = function(opts) {
+ if( "admin" != this._name ){
+ return "shutdown command only works with the admin database; try 'use admin'";
+ }
+
+ cmd = {"shutdown" : 1};
+ opts = opts || {};
+ for (var o in opts) {
+ cmd[o] = opts[o];
+ }
+
+ try {
+ var res = this.runCommand(cmd);
+ if( res )
+ throw "shutdownServer failed: " + res.errmsg;
+ throw "shutdownServer failed";
+ }
+ catch ( e ){
+ assert( tojson( e ).indexOf( "error doing query: failed" ) >= 0 , "unexpected error: " + tojson( e ) );
+ print( "server should be down..." );
+ }
+}
+
+/**
+ Clone database on another server to here.
+ <p>
+ Generally, you should dropDatabase() first as otherwise the cloned information will MERGE
+ into whatever data is already present in this database. (That is however a valid way to use
+ clone if you are trying to do something intentionally, such as union three non-overlapping
+ databases into one.)
+ <p>
+ This is a low level administrative function will is not typically used.
+
+ * @param {String} from Where to clone from (dbhostname[:port]). May not be this database
+ (self) as you cannot clone to yourself.
+ * @return Object returned has member ok set to true if operation succeeds, false otherwise.
+ * See also: db.copyDatabase()
+*/
+DB.prototype.cloneDatabase = function(from) {
+ assert( isString(from) && from.length );
+ //this.resetIndexCache();
+ return this._dbCommand( { clone: from } );
+}
+
+
+/**
+ Clone collection on another server to here.
+ <p>
+ Generally, you should drop() first as otherwise the cloned information will MERGE
+ into whatever data is already present in this collection. (That is however a valid way to use
+ clone if you are trying to do something intentionally, such as union three non-overlapping
+ collections into one.)
+ <p>
+ This is a low level administrative function is not typically used.
+
+ * @param {String} from mongod instance from which to clnoe (dbhostname:port). May
+ not be this mongod instance, as clone from self is not allowed.
+ * @param {String} collection name of collection to clone.
+ * @param {Object} query query specifying which elements of collection are to be cloned.
+ * @return Object returned has member ok set to true if operation succeeds, false otherwise.
+ * See also: db.cloneDatabase()
+ */
+DB.prototype.cloneCollection = function(from, collection, query) {
+ assert( isString(from) && from.length );
+ assert( isString(collection) && collection.length );
+ collection = this._name + "." + collection;
+ query = query || {};
+ //this.resetIndexCache();
+ return this._dbCommand( { cloneCollection:collection, from:from, query:query } );
+}
+
+
+/**
+ Copy database from one server or name to another server or name.
+
+ Generally, you should dropDatabase() first as otherwise the copied information will MERGE
+ into whatever data is already present in this database (and you will get duplicate objects
+ in collections potentially.)
+
+ For security reasons this function only works when executed on the "admin" db. However,
+ if you have access to said db, you can copy any database from one place to another.
+
+ This method provides a way to "rename" a database by copying it to a new db name and
+ location. Additionally, it effectively provides a repair facility.
+
+ * @param {String} fromdb database name from which to copy.
+ * @param {String} todb database name to copy to.
+ * @param {String} fromhost hostname of the database (and optionally, ":port") from which to
+ copy the data. default if unspecified is to copy from self.
+ * @return Object returned has member ok set to true if operation succeeds, false otherwise.
+ * See also: db.clone()
+*/
+DB.prototype.copyDatabase = function(fromdb, todb, fromhost, username, password) {
+ assert( isString(fromdb) && fromdb.length );
+ assert( isString(todb) && todb.length );
+ fromhost = fromhost || "";
+ if ( username && password ) {
+ var n = this._adminCommand( { copydbgetnonce : 1, fromhost:fromhost } );
+ return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb, username:username, nonce:n.nonce, key:this.__pwHash( n.nonce, username, password ) } );
+ } else {
+ return this._adminCommand( { copydb:1, fromhost:fromhost, fromdb:fromdb, todb:todb } );
+ }
+}
+
+/**
+ Repair database.
+
+ * @return Object returned has member ok set to true if operation succeeds, false otherwise.
+*/
+DB.prototype.repairDatabase = function() {
+ return this._dbCommand( { repairDatabase: 1 } );
+}
+
+
+DB.prototype.help = function() {
+ print("DB methods:");
+ print("\tdb.addUser(username, password[, readOnly=false])");
+ print("\tdb.adminCommand(nameOrDocument) - switches to 'admin' db, and runs command [ just calls db.runCommand(...) ]");
+ print("\tdb.auth(username, password)");
+ print("\tdb.cloneDatabase(fromhost)");
+ print("\tdb.commandHelp(name) returns the help for the command");
+ print("\tdb.copyDatabase(fromdb, todb, fromhost)");
+ print("\tdb.createCollection(name, { size : ..., capped : ..., max : ... } )");
+ print("\tdb.currentOp() displays currently executing operations in the db");
+ print("\tdb.dropDatabase()");
+ print("\tdb.eval(func, args) run code server-side");
+ print("\tdb.fsyncLock() flush data to disk and lock server for backups");
+ print("\tdb.fsyncUnlock() unlocks server following a db.fsyncLock()");
+ print("\tdb.getCollection(cname) same as db['cname'] or db.cname");
+ print("\tdb.getCollectionNames()");
+ print("\tdb.getLastError() - just returns the err msg string");
+ print("\tdb.getLastErrorObj() - return full status object");
+ print("\tdb.getMongo() get the server connection object");
+ print("\tdb.getMongo().setSlaveOk() allow queries on a replication slave server");
+ print("\tdb.getName()");
+ print("\tdb.getPrevError()");
+ print("\tdb.getProfilingLevel() - deprecated");
+ print("\tdb.getProfilingStatus() - returns if profiling is on and slow threshold");
+ print("\tdb.getReplicationInfo()");
+ print("\tdb.getSiblingDB(name) get the db at the same server as this one");
+ print("\tdb.isMaster() check replica primary status");
+ print("\tdb.killOp(opid) kills the current operation in the db");
+ print("\tdb.listCommands() lists all the db commands");
+ print("\tdb.loadServerScripts() loads all the scripts in db.system.js");
+ print("\tdb.logout()");
+ print("\tdb.printCollectionStats()");
+ print("\tdb.printReplicationInfo()");
+ print("\tdb.printShardingStatus()");
+ print("\tdb.printSlaveReplicationInfo()");
+ print("\tdb.removeUser(username)");
+ print("\tdb.repairDatabase()");
+ print("\tdb.resetError()");
+ print("\tdb.runCommand(cmdObj) run a database command. if cmdObj is a string, turns it into { cmdObj : 1 }");
+ print("\tdb.serverStatus()");
+ print("\tdb.setProfilingLevel(level,<slowms>) 0=off 1=slow 2=all");
+ print("\tdb.setVerboseShell(flag) display extra information in shell output");
+ print("\tdb.shutdownServer()");
+ print("\tdb.stats()");
+ print("\tdb.version() current version of the server");
+
+ return __magicNoPrint;
+}
+
+DB.prototype.printCollectionStats = function(){
+ var mydb = this;
+ this.getCollectionNames().forEach(
+ function(z){
+ print( z );
+ printjson( mydb.getCollection(z).stats() );
+ print( "---" );
+ }
+ );
+}
+
+/**
+ * <p> Set profiling level for your db. Profiling gathers stats on query performance. </p>
+ *
+ * <p>Default is off, and resets to off on a database restart -- so if you want it on,
+ * turn it on periodically. </p>
+ *
+ * <p>Levels :</p>
+ * <ul>
+ * <li>0=off</li>
+ * <li>1=log very slow operations; optional argument slowms specifies slowness threshold</li>
+ * <li>2=log all</li>
+ * @param {String} level Desired level of profiling
+ * @param {String} slowms For slow logging, query duration that counts as slow (default 100ms)
+ * @return SOMETHING_FIXME or null on error
+ */
+DB.prototype.setProfilingLevel = function(level,slowms) {
+
+ if (level < 0 || level > 2) {
+ throw { dbSetProfilingException : "input level " + level + " is out of range [0..2]" };
+ }
+
+ var cmd = { profile: level };
+ if ( slowms )
+ cmd["slowms"] = slowms;
+ return this._dbCommand( cmd );
+}
+
+DB.prototype._initExtraInfo = function() {
+ if ( typeof _verboseShell === 'undefined' || !_verboseShell ) return;
+ this.startTime = new Date().getTime();
+}
+
+DB.prototype._getExtraInfo = function(action) {
+ if ( typeof _verboseShell === 'undefined' || !_verboseShell ) {
+ __callLastError = true;
+ return;
+ }
+
+ // explicit w:1 so that replset getLastErrorDefaults aren't used here which would be bad.
+ var res = this.getLastErrorCmd(1);
+ if (res) {
+ if (res.err != undefined && res.err != null) {
+ // error occured, display it
+ print(res.err);
+ return;
+ }
+
+ var info = action + " ";
+ // hack for inserted because res.n is 0
+ info += action != "Inserted" ? res.n : 1;
+ if (res.n > 0 && res.updatedExisting != undefined) info += " " + (res.updatedExisting ? "existing" : "new")
+ info += " record(s)";
+ var time = new Date().getTime() - this.startTime;
+ info += " in " + time + "ms";
+ print(info);
+ }
+}
+
+/**
+ * <p> Evaluate a js expression at the database server.</p>
+ *
+ * <p>Useful if you need to touch a lot of data lightly; in such a scenario
+ * the network transfer of the data could be a bottleneck. A good example
+ * is "select count(*)" -- can be done server side via this mechanism.
+ * </p>
+ *
+ * <p>
+ * If the eval fails, an exception is thrown of the form:
+ * </p>
+ * <code>{ dbEvalException: { retval: functionReturnValue, ok: num [, errno: num] [, errmsg: str] } }</code>
+ *
+ * <p>Example: </p>
+ * <code>print( "mycount: " + db.eval( function(){db.mycoll.find({},{_id:ObjId()}).length();} );</code>
+ *
+ * @param {Function} jsfunction Javascript function to run on server. Note this it not a closure, but rather just "code".
+ * @return result of your function, or null if error
+ *
+ */
+DB.prototype.eval = function(jsfunction) {
+ var cmd = { $eval : jsfunction };
+ if ( arguments.length > 1 ) {
+ cmd.args = argumentsToArray( arguments ).slice(1);
+ }
+
+ var res = this._dbCommand( cmd );
+
+ if (!res.ok)
+ throw tojson( res );
+
+ return res.retval;
+}
+
+DB.prototype.dbEval = DB.prototype.eval;
+
+
+/**
+ *
+ * <p>
+ * Similar to SQL group by. For example: </p>
+ *
+ * <code>select a,b,sum(c) csum from coll where active=1 group by a,b</code>
+ *
+ * <p>
+ * corresponds to the following in 10gen:
+ * </p>
+ *
+ * <code>
+ db.group(
+ {
+ ns: "coll",
+ key: { a:true, b:true },
+ // keyf: ...,
+ cond: { active:1 },
+ reduce: function(obj,prev) { prev.csum += obj.c; } ,
+ initial: { csum: 0 }
+ });
+ </code>
+ *
+ *
+ * <p>
+ * An array of grouped items is returned. The array must fit in RAM, thus this function is not
+ * suitable when the return set is extremely large.
+ * </p>
+ * <p>
+ * To order the grouped data, simply sort it client side upon return.
+ * <p>
+ Defaults
+ cond may be null if you want to run against all rows in the collection
+ keyf is a function which takes an object and returns the desired key. set either key or keyf (not both).
+ * </p>
+*/
+DB.prototype.groupeval = function(parmsObj) {
+
+ var groupFunction = function() {
+ var parms = args[0];
+ var c = db[parms.ns].find(parms.cond||{});
+ var map = new Map();
+ var pks = parms.key ? Object.keySet( parms.key ) : null;
+ var pkl = pks ? pks.length : 0;
+ var key = {};
+
+ while( c.hasNext() ) {
+ var obj = c.next();
+ if ( pks ) {
+ for( var i=0; i<pkl; i++ ){
+ var k = pks[i];
+ key[k] = obj[k];
+ }
+ }
+ else {
+ key = parms.$keyf(obj);
+ }
+
+ var aggObj = map.get(key);
+ if( aggObj == null ) {
+ var newObj = Object.extend({}, key); // clone
+ aggObj = Object.extend(newObj, parms.initial)
+ map.put( key , aggObj );
+ }
+ parms.$reduce(obj, aggObj);
+ }
+
+ return map.values();
+ }
+
+ return this.eval(groupFunction, this._groupFixParms( parmsObj ));
+}
+
+DB.prototype.groupcmd = function( parmsObj ){
+ var ret = this.runCommand( { "group" : this._groupFixParms( parmsObj ) } );
+ if ( ! ret.ok ){
+ throw "group command failed: " + tojson( ret );
+ }
+ return ret.retval;
+}
+
+DB.prototype.group = DB.prototype.groupcmd;
+
+DB.prototype._groupFixParms = function( parmsObj ){
+ var parms = Object.extend({}, parmsObj);
+
+ if( parms.reduce ) {
+ parms.$reduce = parms.reduce; // must have $ to pass to db
+ delete parms.reduce;
+ }
+
+ if( parms.keyf ) {
+ parms.$keyf = parms.keyf;
+ delete parms.keyf;
+ }
+
+ return parms;
+}
+
+DB.prototype.resetError = function(){
+ return this.runCommand( { reseterror : 1 } );
+}
+
+DB.prototype.forceError = function(){
+ return this.runCommand( { forceerror : 1 } );
+}
+
+DB.prototype.getLastError = function( w , wtimeout ){
+ var res = this.getLastErrorObj( w , wtimeout );
+ if ( ! res.ok )
+ throw "getlasterror failed: " + tojson( res );
+ return res.err;
+}
+DB.prototype.getLastErrorObj = function( w , wtimeout ){
+ var cmd = { getlasterror : 1 };
+ if ( w ){
+ cmd.w = w;
+ if ( wtimeout )
+ cmd.wtimeout = wtimeout;
+ }
+ var res = this.runCommand( cmd );
+
+ if ( ! res.ok )
+ throw "getlasterror failed: " + tojson( res );
+ return res;
+}
+DB.prototype.getLastErrorCmd = DB.prototype.getLastErrorObj;
+
+
+/* Return the last error which has occurred, even if not the very last error.
+
+ Returns:
+ { err : <error message>, nPrev : <how_many_ops_back_occurred>, ok : 1 }
+
+ result.err will be null if no error has occurred.
+ */
+DB.prototype.getPrevError = function(){
+ return this.runCommand( { getpreverror : 1 } );
+}
+
+DB.prototype.getCollectionNames = function(){
+ var all = [];
+
+ var nsLength = this._name.length + 1;
+
+ var c = this.getCollection( "system.namespaces" ).find();
+ while ( c.hasNext() ){
+ var name = c.next().name;
+
+ if ( name.indexOf( "$" ) >= 0 && name.indexOf( ".oplog.$" ) < 0 )
+ continue;
+
+ all.push( name.substring( nsLength ) );
+ }
+
+ return all.sort();
+}
+
+DB.prototype.tojson = function(){
+ return this._name;
+}
+
+DB.prototype.toString = function(){
+ return this._name;
+}
+
+DB.prototype.isMaster = function () { return this.runCommand("isMaster"); }
+
+DB.prototype.currentOp = function( arg ){
+ var q = {}
+ if ( arg ) {
+ if ( typeof( arg ) == "object" )
+ Object.extend( q , arg );
+ else if ( arg )
+ q["$all"] = true;
+ }
+ return this.$cmd.sys.inprog.findOne( q );
+}
+DB.prototype.currentOP = DB.prototype.currentOp;
+
+DB.prototype.killOp = function(op) {
+ if( !op )
+ throw "no opNum to kill specified";
+ return this.$cmd.sys.killop.findOne({'op':op});
+}
+DB.prototype.killOP = DB.prototype.killOp;
+
+DB.tsToSeconds = function(x){
+ if ( x.t && x.i )
+ return x.t / 1000;
+ return x / 4294967296; // low 32 bits are ordinal #s within a second
+}
+
+/**
+ Get a replication log information summary.
+ <p>
+ This command is for the database/cloud administer and not applicable to most databases.
+ It is only used with the local database. One might invoke from the JS shell:
+ <pre>
+ use local
+ db.getReplicationInfo();
+ </pre>
+ It is assumed that this database is a replication master -- the information returned is
+ about the operation log stored at local.oplog.$main on the replication master. (It also
+ works on a machine in a replica pair: for replica pairs, both machines are "masters" from
+ an internal database perspective.
+ <p>
+ * @return Object timeSpan: time span of the oplog from start to end if slave is more out
+ * of date than that, it can't recover without a complete resync
+*/
+DB.prototype.getReplicationInfo = function() {
+ var db = this.getSiblingDB("local");
+
+ var result = { };
+ var oplog;
+ if (db.system.namespaces.findOne({name:"local.oplog.rs"}) != null) {
+ oplog = 'oplog.rs';
+ }
+ else if (db.system.namespaces.findOne({name:"local.oplog.$main"}) != null) {
+ oplog = 'oplog.$main';
+ }
+ else {
+ result.errmsg = "neither master/slave nor replica set replication detected";
+ return result;
+ }
+
+ var ol_entry = db.system.namespaces.findOne({name:"local."+oplog});
+ if( ol_entry && ol_entry.options ) {
+ result.logSizeMB = ol_entry.options.size / ( 1024 * 1024 );
+ } else {
+ result.errmsg = "local."+oplog+", or its options, not found in system.namespaces collection";
+ return result;
+ }
+ ol = db.getCollection(oplog);
+
+ result.usedMB = ol.stats().size / ( 1024 * 1024 );
+ result.usedMB = Math.ceil( result.usedMB * 100 ) / 100;
+
+ var firstc = ol.find().sort({$natural:1}).limit(1);
+ var lastc = ol.find().sort({$natural:-1}).limit(1);
+ if( !firstc.hasNext() || !lastc.hasNext() ) {
+ result.errmsg = "objects not found in local.oplog.$main -- is this a new and empty db instance?";
+ result.oplogMainRowCount = ol.count();
+ return result;
+ }
+
+ var first = firstc.next();
+ var last = lastc.next();
+ {
+ var tfirst = first.ts;
+ var tlast = last.ts;
+
+ if( tfirst && tlast ) {
+ tfirst = DB.tsToSeconds( tfirst );
+ tlast = DB.tsToSeconds( tlast );
+ result.timeDiff = tlast - tfirst;
+ result.timeDiffHours = Math.round(result.timeDiff / 36)/100;
+ result.tFirst = (new Date(tfirst*1000)).toString();
+ result.tLast = (new Date(tlast*1000)).toString();
+ result.now = Date();
+ }
+ else {
+ result.errmsg = "ts element not found in oplog objects";
+ }
+ }
+
+ return result;
+};
+
+DB.prototype.printReplicationInfo = function() {
+ var result = this.getReplicationInfo();
+ if( result.errmsg ) {
+ if (!this.isMaster().ismaster) {
+ print("this is a slave, printing slave replication info.");
+ this.printSlaveReplicationInfo();
+ return;
+ }
+ print(tojson(result));
+ return;
+ }
+ print("configured oplog size: " + result.logSizeMB + "MB");
+ print("log length start to end: " + result.timeDiff + "secs (" + result.timeDiffHours + "hrs)");
+ print("oplog first event time: " + result.tFirst);
+ print("oplog last event time: " + result.tLast);
+ print("now: " + result.now);
+}
+
+DB.prototype.printSlaveReplicationInfo = function() {
+ function getReplLag(st) {
+ var now = new Date();
+ print("\t syncedTo: " + st.toString() );
+ var ago = (now-st)/1000;
+ var hrs = Math.round(ago/36)/100;
+ print("\t\t = " + Math.round(ago) + " secs ago (" + hrs + "hrs)");
+ };
+
+ function g(x) {
+ assert( x , "how could this be null (printSlaveReplicationInfo gx)" )
+ print("source: " + x.host);
+ if ( x.syncedTo ){
+ var st = new Date( DB.tsToSeconds( x.syncedTo ) * 1000 );
+ getReplLag(st);
+ }
+ else {
+ print( "\t doing initial sync" );
+ }
+ };
+
+ function r(x) {
+ assert( x , "how could this be null (printSlaveReplicationInfo rx)" );
+ if ( x.state == 1 ) {
+ return;
+ }
+
+ print("source: " + x.name);
+ if ( x.optime ) {
+ getReplLag(x.optimeDate);
+ }
+ else {
+ print( "\t no replication info, yet. State: " + x.stateStr );
+ }
+ };
+
+ var L = this.getSiblingDB("local");
+
+ if (L.system.replset.count() != 0) {
+ var status = this.adminCommand({'replSetGetStatus' : 1});
+ status.members.forEach(r);
+ }
+ else if( L.sources.count() != 0 ) {
+ L.sources.find().forEach(g);
+ }
+ else {
+ print("local.sources is empty; is this db a --slave?");
+ return;
+ }
+}
+
+DB.prototype.serverBuildInfo = function(){
+ return this._adminCommand( "buildinfo" );
+}
+
+DB.prototype.serverStatus = function(){
+ return this._adminCommand( "serverStatus" );
+}
+
+DB.prototype.serverCmdLineOpts = function(){
+ return this._adminCommand( "getCmdLineOpts" );
+}
+
+DB.prototype.version = function(){
+ return this.serverBuildInfo().version;
+}
+
+DB.prototype.serverBits = function(){
+ return this.serverBuildInfo().bits;
+}
+
+DB.prototype.listCommands = function(){
+ var x = this.runCommand( "listCommands" );
+ for ( var name in x.commands ){
+ var c = x.commands[name];
+
+ var s = name + ": ";
+
+ switch ( c.lockType ){
+ case -1: s += "read-lock"; break;
+ case 0: s += "no-lock"; break;
+ case 1: s += "write-lock"; break;
+ default: s += c.lockType;
+ }
+
+ if (c.adminOnly) s += " adminOnly ";
+ if (c.adminOnly) s += " slaveOk ";
+
+ s += "\n ";
+ s += c.help.replace(/\n/g, '\n ');
+ s += "\n";
+
+ print( s );
+ }
+}
+
+DB.prototype.printShardingStatus = function( verbose ){
+ printShardingStatus( this.getSiblingDB( "config" ) , verbose );
+}
+
+DB.prototype.fsyncLock = function() {
+ return this.adminCommand({fsync:1, lock:true});
+}
+
+DB.prototype.fsyncUnlock = function() {
+ return this.getSiblingDB("admin").$cmd.sys.unlock.findOne()
+}
+
+DB.autocomplete = function(obj){
+ var colls = obj.getCollectionNames();
+ var ret=[];
+ for (var i=0; i<colls.length; i++){
+ if (colls[i].match(/^[a-zA-Z0-9_.\$]+$/))
+ ret.push(colls[i]);
+ }
+ return ret;
+}
+
+DB.prototype.setSlaveOk = function( value ) {
+ if( value == undefined ) value = true;
+ this._slaveOk = value;
+}
+
+DB.prototype.getSlaveOk = function() {
+ if (this._slaveOk != undefined) return this._slaveOk;
+ return this._mongo.getSlaveOk();
+}
+
+/* Loads any scripts contained in db.system.js into the client shell.
+*/
+DB.prototype.loadServerScripts = function(){
+ db.system.js.find().forEach(function(u){eval(u._id + " = " + u.value);});
+} \ No newline at end of file
diff --git a/src/mongo/shell/dbshell.cpp b/src/mongo/shell/dbshell.cpp
new file mode 100644
index 00000000000..998301d1ee6
--- /dev/null
+++ b/src/mongo/shell/dbshell.cpp
@@ -0,0 +1,962 @@
+// dbshell.cpp
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include <stdio.h>
+#include <string.h>
+
+#include "../third_party/linenoise/linenoise.h"
+#include "../scripting/engine.h"
+#include "../client/dbclient.h"
+#include "../util/unittest.h"
+#include "../db/cmdline.h"
+#include "utils.h"
+#include "../util/password.h"
+#include "../util/version.h"
+#include "../util/goodies.h"
+#include "../util/file.h"
+#include "../db/repl/rs_member.h"
+
+using namespace std;
+using namespace boost::filesystem;
+using namespace mongo;
+
+string historyFile;
+bool gotInterrupted = false;
+bool inMultiLine = false;
+static volatile bool atPrompt = false; // can eval before getting to prompt
+bool autoKillOp = false;
+
+#if !defined(__freebsd__) && !defined(__openbsd__) && !defined(_WIN32)
+// this is for ctrl-c handling
+#include <setjmp.h>
+jmp_buf jbuf;
+#endif
+
+namespace mongo {
+
+ Scope * shellMainScope;
+
+ extern bool dbexitCalled;
+}
+
+void generateCompletions( const string& prefix , vector<string>& all ) {
+ if ( prefix.find( '"' ) != string::npos )
+ return;
+
+ try {
+ BSONObj args = BSON( "0" << prefix );
+ shellMainScope->invokeSafe( "function callShellAutocomplete(x) {shellAutocomplete(x)}", &args, 0, 1000 );
+ BSONObjBuilder b;
+ shellMainScope->append( b , "" , "__autocomplete__" );
+ BSONObj res = b.obj();
+ BSONObj arr = res.firstElement().Obj();
+
+ BSONObjIterator i( arr );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ all.push_back( e.String() );
+ }
+ }
+ catch ( ... ) {
+ }
+}
+
+void completionHook( const char* text , linenoiseCompletions* lc ) {
+ vector<string> all;
+ generateCompletions( text , all );
+
+ for ( unsigned i = 0; i < all.size(); ++i )
+ linenoiseAddCompletion( lc , (char*)all[i].c_str() );
+}
+
+void shellHistoryInit() {
+ stringstream ss;
+ const char * h = shellUtils::getUserDir();
+ if ( h )
+ ss << h << "/";
+ ss << ".dbshell";
+ historyFile = ss.str();
+
+ linenoiseHistoryLoad( historyFile.c_str() );
+ linenoiseSetCompletionCallback( completionHook );
+}
+
+void shellHistoryDone() {
+ linenoiseHistorySave( historyFile.c_str() );
+ linenoiseHistoryFree();
+}
+void shellHistoryAdd( const char * line ) {
+ if ( line[0] == '\0' )
+ return;
+
+ // dont record duplicate lines
+ static string lastLine;
+ if ( lastLine == line )
+ return;
+ lastLine = line;
+
+ if ( strstr( line, ".auth") == NULL )
+ linenoiseHistoryAdd( line );
+}
+
+#ifdef CTRLC_HANDLE
+void intr( int sig ) {
+ longjmp( jbuf , 1 );
+}
+#endif
+
+void killOps() {
+ if ( mongo::shellUtils::_nokillop || mongo::shellUtils::_allMyUris.size() == 0 )
+ return;
+
+ if ( atPrompt )
+ return;
+
+ sleepmillis(10); // give current op a chance to finish
+
+ for( map< string, set<string> >::const_iterator i = shellUtils::_allMyUris.begin(); i != shellUtils::_allMyUris.end(); ++i ) {
+ string errmsg;
+ ConnectionString cs = ConnectionString::parse( i->first, errmsg );
+ if (!cs.isValid()) continue;
+ boost::scoped_ptr<DBClientWithCommands> conn( cs.connect( errmsg ) );
+ if (!conn) continue;
+
+ const set<string>& uris = i->second;
+
+ BSONObj inprog = conn->findOne( "admin.$cmd.sys.inprog", Query() )["inprog"].embeddedObject().getOwned();
+ BSONForEach( op, inprog ) {
+ if ( uris.count( op["client"].String() ) ) {
+ ONCE if ( !autoKillOp ) {
+ cout << endl << "do you want to kill the current op(s) on the server? (y/n): ";
+ cout.flush();
+
+ char yn;
+ cin >> yn;
+
+ if ( yn != 'y' && yn != 'Y' )
+ return;
+ }
+
+ conn->findOne( "admin.$cmd.sys.killop", QUERY( "op"<< op["opid"] ) );
+ }
+ }
+ }
+}
+
+void quitNicely( int sig ) {
+ mongo::dbexitCalled = true;
+ if ( sig == SIGINT && inMultiLine ) {
+ gotInterrupted = 1;
+ return;
+ }
+
+#if !defined(_WIN32)
+ if ( sig == SIGPIPE )
+ mongo::rawOut( "mongo got signal SIGPIPE\n" );
+#endif
+
+ killOps();
+ shellHistoryDone();
+ exit(0);
+}
+
+// the returned string is allocated with strdup() or malloc() and must be freed by calling free()
+char * shellReadline( const char * prompt , int handlesigint = 0 ) {
+ atPrompt = true;
+
+#ifdef CTRLC_HANDLE
+ if ( ! handlesigint ) {
+ char* ret = linenoise( prompt );
+ atPrompt = false;
+ return ret;
+ }
+ if ( setjmp( jbuf ) ) {
+ gotInterrupted = 1;
+ sigrelse(SIGINT);
+ signal( SIGINT , quitNicely );
+ return 0;
+ }
+ signal( SIGINT , intr );
+#endif
+
+ char * ret = linenoise( prompt );
+ if ( ! ret ) {
+ gotInterrupted = true; // got ^C, break out of multiline
+ }
+
+ signal( SIGINT , quitNicely );
+ atPrompt = false;
+ return ret;
+}
+
+#ifdef _WIN32
+char * strsignal(int sig){
+ switch (sig){
+ case SIGINT: return "SIGINT";
+ case SIGTERM: return "SIGTERM";
+ case SIGABRT: return "SIGABRT";
+ case SIGSEGV: return "SIGSEGV";
+ case SIGFPE: return "SIGFPE";
+ default: return "unknown";
+ }
+}
+#endif
+
+void quitAbruptly( int sig ) {
+ ostringstream ossSig;
+ ossSig << "mongo got signal " << sig << " (" << strsignal( sig ) << "), stack trace: " << endl;
+ mongo::rawOut( ossSig.str() );
+
+ ostringstream ossBt;
+ mongo::printStackTrace( ossBt );
+ mongo::rawOut( ossBt.str() );
+
+ mongo::shellUtils::KillMongoProgramInstances();
+ exit( 14 );
+}
+
+// this will be called in certain c++ error cases, for example if there are two active
+// exceptions
+void myterminate() {
+ mongo::rawOut( "terminate() called in shell, printing stack:" );
+ mongo::printStackTrace();
+ exit( 14 );
+}
+
+void setupSignals() {
+ signal( SIGINT , quitNicely );
+ signal( SIGTERM , quitNicely );
+ signal( SIGABRT , quitAbruptly );
+ signal( SIGSEGV , quitAbruptly );
+ signal( SIGFPE , quitAbruptly );
+
+#if !defined(_WIN32) // surprisingly these are the only ones that don't work on windows
+ signal( SIGPIPE , quitNicely ); // Maybe just log and continue?
+ signal( SIGBUS , quitAbruptly );
+#endif
+
+ set_terminate( myterminate );
+}
+
+string fixHost( string url , string host , string port ) {
+ //cout << "fixHost url: " << url << " host: " << host << " port: " << port << endl;
+
+ if ( host.size() == 0 && port.size() == 0 ) {
+ if ( url.find( "/" ) == string::npos ) {
+ // check for ips
+ if ( url.find( "." ) != string::npos )
+ return url + "/test";
+
+ if ( url.rfind( ":" ) != string::npos &&
+ isdigit( url[url.rfind(":")+1] ) )
+ return url + "/test";
+ }
+ return url;
+ }
+
+ if ( url.find( "/" ) != string::npos ) {
+ cerr << "url can't have host or port if you specify them individually" << endl;
+ exit(-1);
+ }
+
+ if ( host.size() == 0 )
+ host = "127.0.0.1";
+
+ string newurl = host;
+ if ( port.size() > 0 )
+ newurl += ":" + port;
+ else if ( host.find(':') == string::npos ) {
+ // need to add port with IPv6 addresses
+ newurl += ":27017";
+ }
+
+ newurl += "/" + url;
+
+ return newurl;
+}
+
+static string OpSymbols = "~!%^&*-+=|:,<>/?.";
+
+bool isOpSymbol( char c ) {
+ for ( size_t i = 0; i < OpSymbols.size(); i++ )
+ if ( OpSymbols[i] == c ) return true;
+ return false;
+}
+
+bool isUseCmd( string code ) {
+ string cmd = code;
+ if ( cmd.find( " " ) > 0 )
+ cmd = cmd.substr( 0 , cmd.find( " " ) );
+ return cmd == "use";
+}
+
+bool isBalanced( string code ) {
+ if (isUseCmd( code ))
+ return true; // don't balance "use <dbname>" in case dbname contains special chars
+ int brackets = 0;
+ int parens = 0;
+ bool danglingOp = false;
+
+ for ( size_t i=0; i<code.size(); i++ ) {
+ switch( code[i] ) {
+ case '/':
+ if ( i + 1 < code.size() && code[i+1] == '/' ) {
+ while ( i <code.size() && code[i] != '\n' )
+ i++;
+ }
+ continue;
+ case '{': brackets++; break;
+ case '}': if ( brackets <= 0 ) return true; brackets--; break;
+ case '(': parens++; break;
+ case ')': if ( parens <= 0 ) return true; parens--; break;
+ case '"':
+ i++;
+ while ( i < code.size() && code[i] != '"' ) i++;
+ break;
+ case '\'':
+ i++;
+ while ( i < code.size() && code[i] != '\'' ) i++;
+ break;
+ case '\\':
+ if ( i + 1 < code.size() && code[i+1] == '/' ) i++;
+ break;
+ case '+':
+ case '-':
+ if ( i + 1 < code.size() && code[i+1] == code[i] ) {
+ i++;
+ continue; // postfix op (++/--) can't be a dangling op
+ }
+ break;
+ }
+ if ( i >= code.size() ) {
+ danglingOp = false;
+ break;
+ }
+ if ( isOpSymbol( code[i] ) ) danglingOp = true;
+ else if ( !std::isspace( code[i] ) ) danglingOp = false;
+ }
+
+ return brackets == 0 && parens == 0 && !danglingOp;
+}
+
+using mongo::asserted;
+
+struct BalancedTest : public mongo::UnitTest {
+public:
+ void run() {
+ assert( isBalanced( "x = 5" ) );
+ assert( isBalanced( "function(){}" ) );
+ assert( isBalanced( "function(){\n}" ) );
+ assert( ! isBalanced( "function(){" ) );
+ assert( isBalanced( "x = \"{\";" ) );
+ assert( isBalanced( "// {" ) );
+ assert( ! isBalanced( "// \n {" ) );
+ assert( ! isBalanced( "\"//\" {" ) );
+ assert( isBalanced( "{x:/x\\//}" ) );
+ assert( ! isBalanced( "{ \\/// }" ) );
+ assert( isBalanced( "x = 5 + y ") );
+ assert( ! isBalanced( "x = ") );
+ assert( ! isBalanced( "x = // hello") );
+ assert( ! isBalanced( "x = 5 +") );
+ assert( isBalanced( " x ++") );
+ assert( isBalanced( "-- x") );
+ assert( !isBalanced( "a.") );
+ assert( !isBalanced( "a. ") );
+ assert( isBalanced( "a.b") );
+ }
+} balanced_test;
+
+string finishCode( string code ) {
+ while ( ! isBalanced( code ) ) {
+ inMultiLine = true;
+ code += "\n";
+ // cancel multiline if two blank lines are entered
+ if ( code.find( "\n\n\n" ) != string::npos )
+ return ";";
+ char * line = shellReadline( "... " , 1 );
+ if ( gotInterrupted ) {
+ if ( line )
+ free( line );
+ return "";
+ }
+ if ( ! line )
+ return "";
+
+ while ( startsWith( line, "... " ) )
+ line += 4;
+
+ code += line;
+ free( line );
+ }
+ return code;
+}
+
+#include <boost/program_options.hpp>
+namespace po = boost::program_options;
+
+void show_help_text( const char* name, po::options_description options ) {
+ cout << "MongoDB shell version: " << mongo::versionString << endl;
+ cout << "usage: " << name << " [options] [db address] [file names (ending in .js)]" << endl
+ << "db address can be:" << endl
+ << " foo foo database on local machine" << endl
+ << " 192.169.0.5/foo foo database on 192.168.0.5 machine" << endl
+ << " 192.169.0.5:9999/foo foo database on 192.168.0.5 machine on port 9999" << endl
+ << options << endl
+ << "file names: a list of files to run. files have to end in .js and will exit after "
+ << "unless --shell is specified" << endl;
+};
+
+bool fileExists( string file ) {
+ try {
+ path p( file );
+ return boost::filesystem::exists( file );
+ }
+ catch ( ... ) {
+ return false;
+ }
+}
+
+namespace mongo {
+ extern bool isShell;
+ extern DBClientWithCommands *latestConn;
+}
+
+string sayReplSetMemberState() {
+ try {
+ if( latestConn ) {
+ BSONObj info;
+ if( latestConn->runCommand( "admin", BSON( "replSetGetStatus" << 1 << "forShell" << 1 ) , info ) ) {
+ stringstream ss;
+ ss << info["set"].String() << ':';
+
+ int s = info["myState"].Int();
+ MemberState ms( s );
+ ss << ms.toString();
+
+ return ss.str();
+ }
+ else {
+ string s = info.getStringField( "info" );
+ if( s.size() < 20 )
+ return s; // "mongos", "configsvr"
+ }
+ }
+ }
+ catch( std::exception& e ) {
+ log( 1 ) << "error in sayReplSetMemberState:" << e.what() << endl;
+ }
+ return "";
+}
+
+/**
+ * Edit a variable in an external editor -- EDITOR must be defined
+ *
+ * @param var Name of JavaScript variable to be edited
+ */
+static void edit( const string& var ) {
+
+ // EDITOR must be defined in the environment
+ static const char * editor = getenv( "EDITOR" );
+ if ( !editor ) {
+ cout << "please define the EDITOR environment variable" << endl;
+ return;
+ }
+
+ // "var" must look like a variable/property name
+ for ( const char* p=var.c_str(); *p; ++p ) {
+ if ( ! ( isalnum( *p ) || *p == '_' || *p == '.' ) ) {
+ cout << "can only edit variable or property" << endl;
+ return;
+ }
+ }
+
+ // Convert "var" to JavaScript (JSON) text
+ if ( !shellMainScope->exec( "__jsout__ = tojson(" + var + ")", "tojs", false, false, false ) )
+ return; // Error already printed
+
+ const string js = shellMainScope->getString( "__jsout__" );
+
+ if ( strstr( js.c_str(), "[native code]" ) ) {
+ cout << "can't edit native functions" << endl;
+ return;
+ }
+
+ // Pick a name to use for the temp file
+ string filename;
+ const int maxAttempts = 10;
+ int i;
+ for ( i = 0; i < maxAttempts; ++i ) {
+ StringBuilder sb;
+#ifdef _WIN32
+ char tempFolder[MAX_PATH];
+ GetTempPathA( sizeof tempFolder, tempFolder );
+ sb << tempFolder << "mongo_edit" << time( 0 ) + i << ".js";
+#else
+ sb << "/tmp/mongo_edit" << time( 0 ) + i << ".js";
+#endif
+ filename = sb.str();
+ if ( ! fileExists( filename ) )
+ break;
+ }
+ if ( i == maxAttempts ) {
+ cout << "couldn't create unique temp file after " << maxAttempts << " attempts" << endl;
+ return;
+ }
+
+ // Create the temp file
+ FILE * tempFileStream;
+ tempFileStream = fopen( filename.c_str(), "wt" );
+ if ( ! tempFileStream ) {
+ cout << "couldn't create temp file (" << filename << "): " << errnoWithDescription() << endl;
+ return;
+ }
+
+ // Write JSON into the temp file
+ size_t fileSize = js.size();
+ if ( fwrite( js.data(), sizeof( char ), fileSize, tempFileStream ) != fileSize ) {
+ int systemErrno = errno;
+ cout << "failed to write to temp file: " << errnoWithDescription( systemErrno ) << endl;
+ fclose( tempFileStream );
+ remove( filename.c_str() );
+ return;
+ }
+ fclose( tempFileStream );
+
+ // Pass file to editor
+ StringBuilder sb;
+ sb << editor << " " << filename;
+ int ret = ::system( sb.str().c_str() );
+ if ( ret ) {
+ if ( ret == -1 ) {
+ int systemErrno = errno;
+ cout << "failed to launch $EDITOR (" << editor << "): " << errnoWithDescription( systemErrno ) << endl;
+ }
+ else
+ cout << "editor exited with error (" << ret << "), not applying changes" << endl;
+ remove( filename.c_str() );
+ return;
+ }
+
+ // The editor gave return code zero, so read the file back in
+ tempFileStream = fopen( filename.c_str(), "rt" );
+ if ( ! tempFileStream ) {
+ cout << "couldn't open temp file on return from editor: " << errnoWithDescription() << endl;
+ remove( filename.c_str() );
+ return;
+ }
+ sb.reset();
+ sb << var << " = ";
+ int bytes;
+ do {
+ char buf[1024];
+ bytes = fread( buf, sizeof( char ), sizeof buf, tempFileStream );
+ if ( ferror( tempFileStream ) ) {
+ cout << "failed to read temp file: " << errnoWithDescription() << endl;
+ fclose( tempFileStream );
+ remove( filename.c_str() );
+ return;
+ }
+ sb.append( StringData( buf, bytes ) );
+ } while ( bytes );
+
+ // Done with temp file, close and delete it
+ fclose( tempFileStream );
+ remove( filename.c_str() );
+
+ // Try to execute assignment to copy edited value back into the variable
+ const string code = sb.str();
+ if ( !shellMainScope->exec( code, "tojs", false, false, false ) )
+ return; // Error already printed
+}
+
+int _main( int argc, char* argv[] ) {
+ mongo::isShell = true;
+ setupSignals();
+
+ mongo::shellUtils::RecordMyLocation( argv[ 0 ] );
+
+ string url = "test";
+ string dbhost;
+ string port;
+ vector<string> files;
+
+ string username;
+ string password;
+
+ bool runShell = false;
+ bool nodb = false;
+ bool norc = false;
+
+ string script;
+
+ po::options_description shell_options( "options" );
+ po::options_description hidden_options( "Hidden options" );
+ po::options_description cmdline_options( "Command line options" );
+ po::positional_options_description positional_options;
+
+ shell_options.add_options()
+ ( "shell", "run the shell after executing files" )
+ ( "nodb", "don't connect to mongod on startup - no 'db address' arg expected" )
+ ( "norc", "will not run the \".mongorc.js\" file on start up" )
+ ( "quiet", "be less chatty" )
+ ( "port", po::value<string>( &port ), "port to connect to" )
+ ( "host", po::value<string>( &dbhost ), "server to connect to" )
+ ( "eval", po::value<string>( &script ), "evaluate javascript" )
+ ( "username,u", po::value<string>(&username), "username for authentication" )
+ ( "password,p", new mongo::PasswordValue( &password ), "password for authentication" )
+ ( "help,h", "show this usage information" )
+ ( "version", "show version information" )
+ ( "verbose", "increase verbosity" )
+ ( "ipv6", "enable IPv6 support (disabled by default)" )
+#ifdef MONGO_SSL
+ ( "ssl", "use all for connections" )
+#endif
+ ;
+
+ hidden_options.add_options()
+ ( "dbaddress", po::value<string>(), "dbaddress" )
+ ( "files", po::value< vector<string> >(), "files" )
+ ( "nokillop", "nokillop" ) // for testing, kill op will also be disabled automatically if the tests starts a mongo program
+ ( "autokillop", "autokillop" ) // for testing, will kill op without prompting
+ ;
+
+ positional_options.add( "dbaddress", 1 );
+ positional_options.add( "files", -1 );
+
+ cmdline_options.add( shell_options ).add( hidden_options );
+
+ po::variables_map params;
+
+ /* using the same style as db.cpp uses because eventually we're going
+ * to merge some of this stuff. */
+ int command_line_style = (((po::command_line_style::unix_style ^
+ po::command_line_style::allow_guessing) |
+ po::command_line_style::allow_long_disguise) ^
+ po::command_line_style::allow_sticky);
+
+ try {
+ po::store(po::command_line_parser(argc, argv).options(cmdline_options).
+ positional(positional_options).
+ style(command_line_style).run(), params);
+ po::notify( params );
+ }
+ catch ( po::error &e ) {
+ cout << "ERROR: " << e.what() << endl << endl;
+ show_help_text( argv[0], shell_options );
+ return mongo::EXIT_BADOPTIONS;
+ }
+
+ // hide password from ps output
+ for ( int i = 0; i < (argc-1); ++i ) {
+ if ( !strcmp(argv[i], "-p") || !strcmp( argv[i], "--password" ) ) {
+ char* arg = argv[i + 1];
+ while ( *arg ) {
+ *arg++ = 'x';
+ }
+ }
+ }
+
+ if ( params.count( "shell" ) ) {
+ runShell = true;
+ }
+ if ( params.count( "nodb" ) ) {
+ nodb = true;
+ }
+ if ( params.count( "norc" ) ) {
+ norc = true;
+ }
+ if ( params.count( "help" ) ) {
+ show_help_text( argv[0], shell_options );
+ return mongo::EXIT_CLEAN;
+ }
+ if ( params.count( "files" ) ) {
+ files = params["files"].as< vector<string> >();
+ }
+ if ( params.count( "version" ) ) {
+ cout << "MongoDB shell version: " << mongo::versionString << endl;
+ return mongo::EXIT_CLEAN;
+ }
+ if ( params.count( "quiet" ) ) {
+ mongo::cmdLine.quiet = true;
+ }
+#ifdef MONGO_SSL
+ if ( params.count( "ssl" ) ) {
+ mongo::cmdLine.sslOnNormalPorts = true;
+ }
+#endif
+ if ( params.count( "nokillop" ) ) {
+ mongo::shellUtils::_nokillop = true;
+ }
+ if ( params.count( "autokillop" ) ) {
+ autoKillOp = true;
+ }
+
+ /* This is a bit confusing, here are the rules:
+ *
+ * if nodb is set then all positional parameters are files
+ * otherwise the first positional parameter might be a dbaddress, but
+ * only if one of these conditions is met:
+ * - it contains no '.' after the last appearance of '\' or '/'
+ * - it doesn't end in '.js' and it doesn't specify a path to an existing file */
+ if ( params.count( "dbaddress" ) ) {
+ string dbaddress = params["dbaddress"].as<string>();
+ if (nodb) {
+ files.insert( files.begin(), dbaddress );
+ }
+ else {
+ string basename = dbaddress.substr( dbaddress.find_last_of( "/\\" ) + 1 );
+ if (basename.find_first_of( '.' ) == string::npos ||
+ ( basename.find( ".js", basename.size() - 3 ) == string::npos && !fileExists( dbaddress ) ) ) {
+ url = dbaddress;
+ }
+ else {
+ files.insert( files.begin(), dbaddress );
+ }
+ }
+ }
+ if ( params.count( "ipv6" ) ) {
+ mongo::enableIPv6();
+ }
+ if ( params.count( "verbose" ) ) {
+ logLevel = 1;
+ }
+
+ if ( url == "*" ) {
+ cout << "ERROR: " << "\"*\" is an invalid db address" << endl << endl;
+ show_help_text( argv[0], shell_options );
+ return mongo::EXIT_BADOPTIONS;
+ }
+
+ if ( ! mongo::cmdLine.quiet )
+ cout << "MongoDB shell version: " << mongo::versionString << endl;
+
+ mongo::UnitTest::runTests();
+
+ if ( !nodb ) { // connect to db
+ //if ( ! mongo::cmdLine.quiet ) cout << "url: " << url << endl;
+
+ stringstream ss;
+ if ( mongo::cmdLine.quiet )
+ ss << "__quiet = true;";
+ ss << "db = connect( \"" << fixHost( url , dbhost , port ) << "\")";
+
+ mongo::shellUtils::_dbConnect = ss.str();
+
+ if ( params.count( "password" ) && password.empty() )
+ password = mongo::askPassword();
+
+ if ( username.size() && password.size() ) {
+ stringstream ss;
+ ss << "if ( ! db.auth( \"" << username << "\" , \"" << password << "\" ) ){ throw 'login failed'; }";
+ mongo::shellUtils::_dbAuth = ss.str();
+ }
+ }
+
+ mongo::ScriptEngine::setConnectCallback( mongo::shellUtils::onConnect );
+ mongo::ScriptEngine::setup();
+ mongo::globalScriptEngine->setScopeInitCallback( mongo::shellUtils::initScope );
+ auto_ptr< mongo::Scope > scope( mongo::globalScriptEngine->newScope() );
+ shellMainScope = scope.get();
+
+ if( runShell )
+ cout << "type \"help\" for help" << endl;
+
+ if ( !script.empty() ) {
+ mongo::shellUtils::MongoProgramScope s;
+ if ( ! scope->exec( script , "(shell eval)" , true , true , false ) )
+ return -4;
+ }
+
+ for (size_t i = 0; i < files.size(); ++i) {
+ mongo::shellUtils::MongoProgramScope s;
+
+ if ( files.size() > 1 )
+ cout << "loading file: " << files[i] << endl;
+
+ if ( ! scope->execFile( files[i] , false , true , false ) ) {
+ cout << "failed to load: " << files[i] << endl;
+ return -3;
+ }
+ }
+
+ if ( files.size() == 0 && script.empty() )
+ runShell = true;
+
+ if ( runShell ) {
+
+ mongo::shellUtils::MongoProgramScope s;
+
+ if ( !norc ) {
+ string rcLocation;
+#ifndef _WIN32
+ if ( getenv( "HOME" ) != NULL )
+ rcLocation = str::stream() << getenv( "HOME" ) << "/.mongorc.js" ;
+#else
+ if ( getenv( "HOMEDRIVE" ) != NULL && getenv( "HOMEPATH" ) != NULL )
+ rcLocation = str::stream() << getenv( "HOMEDRIVE" ) << getenv( "HOMEPATH" ) << "\\.mongorc.js";
+#endif
+ if ( !rcLocation.empty() && fileExists(rcLocation) ) {
+ if ( ! scope->execFile( rcLocation , false , true , false , 0 ) ) {
+ cout << "The \".mongorc.js\" file located in your home folder could not be executed" << endl;
+ return -5;
+ }
+ }
+ }
+
+ shellHistoryInit();
+
+ string prompt;
+ int promptType;
+
+ //v8::Handle<v8::Object> shellHelper = baseContext_->Global()->Get( v8::String::New( "shellHelper" ) )->ToObject();
+
+ while ( 1 ) {
+ inMultiLine = false;
+ gotInterrupted = false;
+// shellMainScope->localConnect;
+ //DBClientWithCommands *c = getConnection( JSContext *cx, JSObject *obj );
+
+ bool haveStringPrompt = false;
+ promptType = scope->type( "prompt" );
+ if( promptType == String ) {
+ prompt = scope->getString( "prompt" );
+ haveStringPrompt = true;
+ }
+ else if( promptType == Code ) {
+ scope->exec( "delete __prompt__;", "", false, false, false, 0 );
+ scope->exec( "__prompt__ = prompt();", "", false, false, false, 0 );
+ if( scope->type( "__prompt__" ) == String ) {
+ prompt = scope->getString( "__prompt__" );
+ haveStringPrompt = true;
+ }
+ }
+ if( !haveStringPrompt )
+ prompt = sayReplSetMemberState() + "> ";
+
+ char * line = shellReadline( prompt.c_str() );
+
+ char * linePtr = line; // can't clobber 'line', we need to free() it later
+ if ( linePtr ) {
+ while ( linePtr[0] == ' ' )
+ ++linePtr;
+ int lineLen = strlen( linePtr );
+ while ( lineLen > 0 && linePtr[lineLen - 1] == ' ' )
+ linePtr[--lineLen] = 0;
+ }
+
+ if ( ! linePtr || ( strlen( linePtr ) == 4 && strstr( linePtr , "exit" ) ) ) {
+ if ( ! mongo::cmdLine.quiet )
+ cout << "bye" << endl;
+ if ( line )
+ free( line );
+ break;
+ }
+
+ string code = linePtr;
+ if ( code == "exit" || code == "exit;" ) {
+ free( line );
+ break;
+ }
+ if ( code == "cls" ) {
+ free( line );
+ linenoiseClearScreen();
+ continue;
+ }
+
+ if ( code.size() == 0 ) {
+ free( line );
+ continue;
+ }
+
+ if ( startsWith( linePtr, "edit " ) ) {
+ shellHistoryAdd( linePtr );
+
+ const char* s = linePtr + 5; // skip "edit "
+ while( *s && isspace( *s ) )
+ s++;
+
+ edit( s );
+ free( line );
+ continue;
+ }
+
+ gotInterrupted = false;
+ code = finishCode( code );
+ if ( gotInterrupted ) {
+ cout << endl;
+ free( line );
+ continue;
+ }
+
+ if ( code.size() == 0 ) {
+ free( line );
+ break;
+ }
+
+ bool wascmd = false;
+ {
+ string cmd = linePtr;
+ if ( cmd.find( " " ) > 0 )
+ cmd = cmd.substr( 0 , cmd.find( " " ) );
+
+ if ( cmd.find( "\"" ) == string::npos ) {
+ try {
+ scope->exec( (string)"__iscmd__ = shellHelper[\"" + cmd + "\"];" , "(shellhelp1)" , false , true , true );
+ if ( scope->getBoolean( "__iscmd__" ) ) {
+ scope->exec( (string)"shellHelper( \"" + cmd + "\" , \"" + code.substr( cmd.size() ) + "\");" , "(shellhelp2)" , false , true , false );
+ wascmd = true;
+ }
+ }
+ catch ( std::exception& e ) {
+ cout << "error2:" << e.what() << endl;
+ wascmd = true;
+ }
+ }
+ }
+
+ if ( ! wascmd ) {
+ try {
+ if ( scope->exec( code.c_str() , "(shell)" , false , true , false ) )
+ scope->exec( "shellPrintHelper( __lastres__ );" , "(shell2)" , true , true , false );
+ }
+ catch ( std::exception& e ) {
+ cout << "error:" << e.what() << endl;
+ }
+ }
+
+ shellHistoryAdd( code.c_str() );
+ free( line );
+ }
+
+ shellHistoryDone();
+ }
+
+ mongo::dbexitCalled = true;
+ return 0;
+}
+
+int main( int argc, char* argv[] ) {
+ static mongo::StaticObserver staticObserver;
+ try {
+ return _main( argc , argv );
+ }
+ catch ( mongo::DBException& e ) {
+ cerr << "exception: " << e.what() << endl;
+ return -1;
+ }
+}
diff --git a/src/mongo/shell/mongo.js b/src/mongo/shell/mongo.js
new file mode 100644
index 00000000000..5e18f38fb63
--- /dev/null
+++ b/src/mongo/shell/mongo.js
@@ -0,0 +1,102 @@
+// mongo.js
+
+// NOTE 'Mongo' may be defined here or in MongoJS.cpp. Add code to init, not to this constructor.
+if ( typeof Mongo == "undefined" ){
+ Mongo = function( host ){
+ this.init( host );
+ }
+}
+
+if ( ! Mongo.prototype ){
+ throw "Mongo.prototype not defined";
+}
+
+if ( ! Mongo.prototype.find )
+ Mongo.prototype.find = function( ns , query , fields , limit , skip , batchSize , options ){ throw "find not implemented"; }
+if ( ! Mongo.prototype.insert )
+ Mongo.prototype.insert = function( ns , obj ){ throw "insert not implemented"; }
+if ( ! Mongo.prototype.remove )
+ Mongo.prototype.remove = function( ns , pattern ){ throw "remove not implemented;" }
+if ( ! Mongo.prototype.update )
+ Mongo.prototype.update = function( ns , query , obj , upsert ){ throw "update not implemented;" }
+
+if ( typeof mongoInject == "function" ){
+ mongoInject( Mongo.prototype );
+}
+
+Mongo.prototype.setSlaveOk = function( value ) {
+ if( value == undefined ) value = true;
+ this.slaveOk = value;
+}
+
+Mongo.prototype.getSlaveOk = function() {
+ return this.slaveOk || false;
+}
+
+Mongo.prototype.getDB = function( name ){
+ if (jsTest.options().keyFile && ((typeof this.authenticated == 'undefined') || !this.authenticated)) {
+ jsTest.authenticate(this)
+ }
+ return new DB( this , name );
+}
+
+Mongo.prototype.getDBs = function(){
+ var res = this.getDB( "admin" ).runCommand( { "listDatabases" : 1 } );
+ if ( ! res.ok )
+ throw "listDatabases failed:" + tojson( res );
+ return res;
+}
+
+Mongo.prototype.adminCommand = function( cmd ){
+ return this.getDB( "admin" ).runCommand( cmd );
+}
+
+Mongo.prototype.setLogLevel = function( logLevel ){
+ return this.adminCommand({ setParameter : 1, logLevel : logLevel })
+}
+
+Mongo.prototype.getDBNames = function(){
+ return this.getDBs().databases.map(
+ function(z){
+ return z.name;
+ }
+ );
+}
+
+Mongo.prototype.getCollection = function(ns){
+ var idx = ns.indexOf( "." );
+ if ( idx < 0 )
+ throw "need . in ns";
+ var db = ns.substring( 0 , idx );
+ var c = ns.substring( idx + 1 );
+ return this.getDB( db ).getCollection( c );
+}
+
+Mongo.prototype.toString = function(){
+ return "connection to " + this.host;
+}
+Mongo.prototype.tojson = Mongo.prototype.toString;
+
+connect = function( url , user , pass ){
+ chatty( "connecting to: " + url )
+
+ if ( user && ! pass )
+ throw "you specified a user and not a password. either you need a password, or you're using the old connect api";
+
+ var idx = url.lastIndexOf( "/" );
+
+ var db;
+
+ if ( idx < 0 )
+ db = new Mongo().getDB( url );
+ else
+ db = new Mongo( url.substring( 0 , idx ) ).getDB( url.substring( idx + 1 ) );
+
+ if ( user && pass ){
+ if ( ! db.auth( user , pass ) ){
+ throw "couldn't login";
+ }
+ }
+
+ return db;
+}
diff --git a/src/mongo/shell/mr.js b/src/mongo/shell/mr.js
new file mode 100644
index 00000000000..7b0814dd557
--- /dev/null
+++ b/src/mongo/shell/mr.js
@@ -0,0 +1,95 @@
+// mr.js
+
+MR = {};
+
+MR.init = function(){
+ $max = 0;
+ $arr = [];
+ emit = MR.emit;
+ $numEmits = 0;
+ $numReduces = 0;
+ $numReducesToDB = 0;
+ gc(); // this is just so that keep memory size sane
+}
+
+MR.cleanup = function(){
+ MR.init();
+ gc();
+}
+
+MR.emit = function(k,v){
+ $numEmits++;
+ var num = nativeHelper.apply( get_num_ , [ k ] );
+ var data = $arr[num];
+ if ( ! data ){
+ data = { key : k , values : new Array(1000) , count : 0 };
+ $arr[num] = data;
+ }
+ data.values[data.count++] = v;
+ $max = Math.max( $max , data.count );
+}
+
+MR.doReduce = function( useDB ){
+ $numReduces++;
+ if ( useDB )
+ $numReducesToDB++;
+ $max = 0;
+ for ( var i=0; i<$arr.length; i++){
+ var data = $arr[i];
+ if ( ! data )
+ continue;
+
+ if ( useDB ){
+ var x = tempcoll.findOne( { _id : data.key } );
+ if ( x ){
+ data.values[data.count++] = x.value;
+ }
+ }
+
+ var r = $reduce( data.key , data.values.slice( 0 , data.count ) );
+ if ( r && r.length && r[0] ){
+ data.values = r;
+ data.count = r.length;
+ }
+ else{
+ data.values[0] = r;
+ data.count = 1;
+ }
+
+ $max = Math.max( $max , data.count );
+
+ if ( useDB ){
+ if ( data.count == 1 ){
+ tempcoll.save( { _id : data.key , value : data.values[0] } );
+ }
+ else {
+ tempcoll.save( { _id : data.key , value : data.values.slice( 0 , data.count ) } );
+ }
+ }
+ }
+}
+
+MR.check = function(){
+ if ( $max < 2000 && $arr.length < 1000 ){
+ return 0;
+ }
+ MR.doReduce();
+ if ( $max < 2000 && $arr.length < 1000 ){
+ return 1;
+ }
+ MR.doReduce( true );
+ $arr = [];
+ $max = 0;
+ reset_num();
+ gc();
+ return 2;
+}
+
+MR.finalize = function(){
+ tempcoll.find().forEach(
+ function(z){
+ z.value = $finalize( z._id , z.value );
+ tempcoll.save( z );
+ }
+ );
+}
diff --git a/src/mongo/shell/msvc/createCPPfromJavaScriptFiles.js b/src/mongo/shell/msvc/createCPPfromJavaScriptFiles.js
new file mode 100644
index 00000000000..ff6f2a54b12
--- /dev/null
+++ b/src/mongo/shell/msvc/createCPPfromJavaScriptFiles.js
@@ -0,0 +1,105 @@
+// createCPPfromJavaScriptFiles.js
+
+/* Copyright 2011 10gen Inc.
+*
+* Licensed under the Apache License, Version 2.0 (the "License");
+* you may not use this file except in compliance with the License.
+* You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+// This JavaScript file is run under Windows Script Host from the Visual Studio build.
+// It creates .CPP files from JavaScript files and is intended to duplicate the functionality
+// of the jsToH Python function in SConstruct. By using only standard Windows components
+// (Windows Script Host, JScript) we avoid the need for Visual Studio builders to install
+// Python, and we don't need to include the generated files in Git because they can be
+// recreated as required.
+
+var whitespace = " \t";
+function cppEscape( s ) {
+ for ( var i = 0, len = s.length; i < len; ++i ) {
+ if ( whitespace.indexOf( s.charAt( i ) ) === -1 ) {
+ s = s.substring( i );
+ break;
+ }
+ }
+ if ( i == len )
+ return "";
+ for ( i = s.length - 1; i >= 0; --i ) {
+ if ( whitespace.indexOf( s.charAt( i ) ) === -1 ) {
+ s = s.substr( 0, i + 1 );
+ break;
+ }
+ }
+ s = s.replace( /\\/g, "\\\\" );
+ s = s.replace( /"/g, '\\"' );
+ return s;
+};
+
+function jsToH( fso, outputFileNameString, inputFileNameStringArray ) {
+ var displayString = 'jsToH( "' + outputFileNameString + '", [';
+ var i, len = inputFileNameStringArray.length;
+ for ( i = 0; i < len; ++i ) {
+ displayString += '"' + inputFileNameStringArray[i] + '"';
+ if ( i < len - 1 )
+ displayString += ', ';
+ }
+ displayString += '] );'
+ WScript.Echo( displayString );
+ var h = ['#include "bson/stringdata.h"'
+ , 'namespace mongo {'
+ , 'struct JSFile{ const char* name; const StringData& source; };'
+ , 'namespace JSFiles{'
+ ];
+ for ( i = 0; i < len; ++i ) {
+ var filename = inputFileNameStringArray[i];
+ var objname = filename.substring( 0, filename.lastIndexOf( '.' ) ).substr( 1 + filename.lastIndexOf('/') );
+ var stringname = '_jscode_raw_' + objname;
+ h.push( 'const StringData ' + stringname + ' = ' );
+ var inputFile = fso.GetFile( filename );
+ var inputStream = inputFile.OpenAsTextStream( 1 /* ForReading */, 0 /* TristateFalse == ASCII */ );
+ while ( !inputStream.AtEndOfStream )
+ h.push( '"' + cppEscape(inputStream.ReadLine()) + '\\n" ' );
+ inputStream.Close();
+ h.push( ';' );
+ h.push( 'extern const JSFile ' + objname + ';' ); //symbols aren't exported w/o this
+ h.push( 'const JSFile ' + objname + ' = { "' + filename + '" , ' + stringname + ' };' );
+ }
+ h.push( "} // namespace JSFiles" );
+ h.push( "} // namespace mongo" );
+ h.push( "" );
+ var out = fso.CreateTextFile( outputFileNameString, true /* overwrite */ );
+ out.Write( h.join( '\n' ) );
+ out.Close();
+};
+
+function rebuildIfNeeded( fso, outputFileNameString, inputFileNameStringArray ) {
+ var rebuildNeeded = false;
+ if ( !fso.FileExists( outputFileNameString ) ) {
+ rebuildNeeded = true;
+ } else {
+ var outputFileDate = fso.GetFile( outputFileNameString ).DateLastModified;
+ for ( var i = 0, len = inputFileNameStringArray.length; i < len; ++i ) {
+ if ( fso.GetFile( inputFileNameStringArray[i] ).DateLastModified > outputFileDate ) {
+ rebuildNeeded = true;
+ break;
+ }
+ }
+ }
+ if ( rebuildNeeded )
+ jsToH( fso, outputFileNameString, inputFileNameStringArray );
+};
+
+var shell = new ActiveXObject( "WScript.Shell" );
+shell.CurrentDirectory = WScript.Arguments.Unnamed.Item( 0 );
+
+var fso = new ActiveXObject( "Scripting.FileSystemObject" );
+rebuildIfNeeded( fso, "shell/mongo.cpp", ["shell/utils.js", "shell/utils_sh.js", "shell/db.js", "shell/mongo.js", "shell/mr.js", "shell/query.js", "shell/collection.js"] );
+rebuildIfNeeded( fso, "shell/mongo-server.cpp", ["shell/servers.js"] );
diff --git a/src/mongo/shell/msvc/mongo.ico b/src/mongo/shell/msvc/mongo.ico
new file mode 100755
index 00000000000..1eba9ed5131
--- /dev/null
+++ b/src/mongo/shell/msvc/mongo.ico
Binary files differ
diff --git a/src/mongo/shell/msvc/mongo.sln b/src/mongo/shell/msvc/mongo.sln
new file mode 100644
index 00000000000..01c9e1e6e40
--- /dev/null
+++ b/src/mongo/shell/msvc/mongo.sln
@@ -0,0 +1,20 @@
+
+Microsoft Visual Studio Solution File, Format Version 11.00
+# Visual Studio 2010
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "mongo", "mongo.vcxproj", "{FE959BD8-8EE2-4555-AE59-9FA14FFD410E}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Win32 = Debug|Win32
+ Release|Win32 = Release|Win32
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Debug|Win32.ActiveCfg = Debug|Win32
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Debug|Win32.Build.0 = Debug|Win32
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Release|Win32.ActiveCfg = Release|Win32
+ {FE959BD8-8EE2-4555-AE59-9FA14FFD410E}.Release|Win32.Build.0 = Release|Win32
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/src/mongo/shell/msvc/mongo.vcxproj b/src/mongo/shell/msvc/mongo.vcxproj
new file mode 100644
index 00000000000..968215d5b8d
--- /dev/null
+++ b/src/mongo/shell/msvc/mongo.vcxproj
@@ -0,0 +1,272 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{FE959BD8-8EE2-4555-AE59-9FA14FFD410E}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>mongo</RootNamespace>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <LinkIncremental>true</LinkIncremental>
+ <LibraryPath>\boost\lib\vs2010_32\;$(VCInstallDir)lib;$(VCInstallDir)atlmfc\lib;$(WindowsSdkDir)lib;$(FrameworkSDKDir)\lib</LibraryPath>
+ <ExecutablePath>$(VCInstallDir)bin;$(WindowsSdkDir)bin\NETFX 4.0 Tools;$(WindowsSdkDir)bin;$(VSInstallDir)Common7\Tools\bin;$(VSInstallDir)Common7\tools;$(VSInstallDir)Common7\ide;$(ProgramFiles)\HTML Help Workshop;$(FrameworkSDKDir)\bin;$(MSBuildToolsPath32);$(VSInstallDir);$(SystemRoot)\SysWow64;$(FxCopDir);$(PATH);</ExecutablePath>
+ <IncludePath>..\..\..\readline\include;..\..\..\js\src\;..\..\third_party/pcre-7.4;..\..\;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSdkDir)include;$(FrameworkSDKDir)\include</IncludePath>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <IncludePath>..\..\..\readline\include;..\..\..\js\src\;..\..\third_party/pcre-7.4;..\..\;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(WindowsSdkDir)include;$(FrameworkSDKDir)\include</IncludePath>
+ <LinkIncremental>false</LinkIncremental>
+ <LibraryPath>\boost\lib\vs2010_32\;$(VCInstallDir)lib;$(VCInstallDir)atlmfc\lib;$(WindowsSdkDir)lib;$(FrameworkSDKDir)\lib</LibraryPath>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>XP_WIN;HAVE_CONFIG_H;OLDJS;MONGO_EXPOSE_MACROS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>\boost\</AdditionalIncludeDirectories>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalDependencies>ws2_32.lib;psapi.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo createCPPfromJavaScriptFiles.js "$(ProjectDir)..\.."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>Use</PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>USE_READLINE;XP_WIN;HAVE_CONFIG_H;OLDJS;MONGO_EXPOSE_MACROS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <AdditionalIncludeDirectories>\boost\</AdditionalIncludeDirectories>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ <DisableSpecificWarnings>4355;4800;4267;4244;%(DisableSpecificWarnings)</DisableSpecificWarnings>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <AdditionalDependencies>ws2_32.lib;psapi.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ <PreBuildEvent>
+ <Command>cscript //Nologo createCPPfromJavaScriptFiles.js "$(ProjectDir)..\.."</Command>
+ <Message>Create mongo.cpp and mongo-server.cpp from JavaScript source files</Message>
+ </PreBuildEvent>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\bson\oid.cpp" />
+ <ClCompile Include="..\..\client\clientOnly.cpp" />
+ <ClCompile Include="..\..\client\connpool.cpp" />
+ <ClCompile Include="..\..\client\dbclient_rs.cpp" />
+ <ClCompile Include="..\..\client\syncclusterconnection.cpp" />
+ <ClCompile Include="..\..\db\commands.cpp" />
+ <ClCompile Include="..\..\db\lasterror.cpp" />
+ <ClCompile Include="..\..\db\nonce.cpp" />
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcrecpp.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_compile.c">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_config.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_chartables.c">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_stringpiece.cc">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\scripting\bench.cpp" />
+ <ClCompile Include="..\..\scripting\engine_spidermonkey.cpp" />
+ <ClCompile Include="..\..\scripting\utils.cpp" />
+ <ClCompile Include="..\..\third_party\linenoise\linenoise.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PreprocessorDefinitions Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">_CRT_SECURE_NO_WARNINGS;XP_WIN;HAVE_CONFIG_H;OLDJS;MONGO_EXPOSE_MACROS;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PreprocessorDefinitions Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">_CRT_SECURE_NO_WARNINGS;USE_READLINE;XP_WIN;HAVE_CONFIG_H;OLDJS;MONGO_EXPOSE_MACROS;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\util\background.cpp" />
+ <ClCompile Include="..\..\util\concurrency\spin_lock.cpp" />
+ <ClCompile Include="..\..\util\log.cpp" />
+ <ClCompile Include="..\..\util\mmap.cpp" />
+ <ClCompile Include="..\..\util\net\listen.cpp" />
+ <ClCompile Include="..\..\util\net\message.cpp" />
+ <ClCompile Include="..\..\util\net\message_port.cpp" />
+ <ClCompile Include="..\..\util\net\sock.cpp" />
+ <ClCompile Include="..\..\util\password.cpp" />
+ <ClCompile Include="..\..\util\ramlog.cpp" />
+ <ClCompile Include="..\..\util\text.cpp" />
+ <ClCompile Include="..\..\util\mmap_win.cpp" />
+ <ClCompile Include="..\..\util\processinfo_win32.cpp" />
+ <ClCompile Include="..\..\util\assert_util.cpp" />
+ <ClCompile Include="..\..\util\md5main.cpp" />
+ <ClCompile Include="..\..\util\md5.c">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\util\base64.cpp" />
+ <ClCompile Include="..\..\util\debug_util.cpp" />
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_dfa_exec.c">
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_exec.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_fullinfo.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_get.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_globals.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_info.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_maketables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_newline.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_ord2utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_refcount.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_study.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_tables.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_try_flipped.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_ucp_searchfuncs.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_valid_utf8.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_version.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_xclass.c">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\..\client\dbclient.cpp" />
+ <ClCompile Include="..\..\client\dbclientcursor.cpp" />
+ <ClCompile Include="..\..\db\jsobj.cpp" />
+ <ClCompile Include="..\..\db\json.cpp" />
+ <ClCompile Include="..\..\pch.cpp">
+ <PrecompiledHeader>Create</PrecompiledHeader>
+ <PrecompiledHeaderFile>pch.h</PrecompiledHeaderFile>
+ </ClCompile>
+ <ClCompile Include="..\..\scripting\engine.cpp" />
+ <ClCompile Include="..\..\util\concurrency\vars.cpp" />
+ <ClCompile Include="..\..\util\util.cpp" />
+ <ClCompile Include="..\..\util\version.cpp" />
+ <ClCompile Include="..\dbshell.cpp" />
+ <ClCompile Include="..\mongo-server.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\mongo.cpp">
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">NotUsing</PrecompiledHeader>
+ <PrecompiledHeader Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">NotUsing</PrecompiledHeader>
+ </ClCompile>
+ <ClCompile Include="..\shell_utils.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="..\..\SConstruct" />
+ <None Include="..\collection.js" />
+ <None Include="..\db.js" />
+ <None Include="..\mongo.js" />
+ <None Include="..\mr.js" />
+ <None Include="..\query.js" />
+ <None Include="..\servers.js" />
+ <None Include="..\utils.js" />
+ <None Include="..\utils_sh.js" />
+ </ItemGroup>
+ <ItemGroup>
+ <Library Include="..\..\..\js\js32d.lib">
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">true</ExcludedFromBuild>
+ </Library>
+ <Library Include="..\..\..\js\js32r.lib">
+ <ExcludedFromBuild Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">true</ExcludedFromBuild>
+ </Library>
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\db\lasterror.h" />
+ <ClInclude Include="..\..\third_party\linenoise\linenoise.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <ResourceCompile Include="..\..\db\db.rc" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project> \ No newline at end of file
diff --git a/src/mongo/shell/msvc/mongo.vcxproj.filters b/src/mongo/shell/msvc/mongo.vcxproj.filters
new file mode 100644
index 00000000000..dcf9bcd7550
--- /dev/null
+++ b/src/mongo/shell/msvc/mongo.vcxproj.filters
@@ -0,0 +1,285 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <Filter Include="Resource Files">
+ <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
+ <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
+ </Filter>
+ <Filter Include="util">
+ <UniqueIdentifier>{2a0d6120-434d-4732-ac31-2a7bf077f6ee}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="util\concurrency">
+ <UniqueIdentifier>{a1e59094-b70c-463a-8dc1-691efe337f14}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="scripting">
+ <UniqueIdentifier>{2d0fd975-0cc9-43dc-ac8e-53cb8c3a0040}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="bson">
+ <UniqueIdentifier>{a33442e2-39da-4c70-8310-6de9fa70cd71}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="db">
+ <UniqueIdentifier>{1044ce7b-72c4-4892-82c0-f46d8708a6ff}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="client">
+ <UniqueIdentifier>{fc0f6c1a-9627-4254-9b5e-0bcb8b3257f3}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="shared source files">
+ <UniqueIdentifier>{30b62472-d7a7-4b8a-8a07-d7d341bc6252}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="pcre">
+ <UniqueIdentifier>{291e0d72-13ca-42d7-b0fd-2e7b5f89639f}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="shell">
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>
+ </Filter>
+ <Filter Include="_js files">
+ <UniqueIdentifier>{473e7192-9f2a-47c5-ad95-e5b75d4f48f9}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="shell\generated_from_js">
+ <UniqueIdentifier>{96e4c411-7ab4-4bcd-b7c6-a33059f5d492}</UniqueIdentifier>
+ </Filter>
+ <Filter Include="thirdparty">
+ <UniqueIdentifier>{5eca87ab-5987-4fb0-97be-e80cc721e328}</UniqueIdentifier>
+ </Filter>
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\dbshell.cpp">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\version.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\concurrency\vars.cpp">
+ <Filter>util\concurrency</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\scripting\engine.cpp">
+ <Filter>scripting</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\db\jsobj.cpp">
+ <Filter>db</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\client\dbclient.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\client\dbclientcursor.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\pch.cpp" />
+ <ClCompile Include="..\..\db\json.cpp">
+ <Filter>shared source files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\debug_util.cpp">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\db\lasterror.cpp">
+ <Filter>shared source files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\db\nonce.cpp">
+ <Filter>shared source files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\client\connpool.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\processinfo_win32.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\db\commands.cpp">
+ <Filter>db</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\scripting\utils.cpp">
+ <Filter>scripting</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\assert_util.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\background.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\base64.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\client\clientOnly.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\mmap.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\md5main.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\util.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\client\syncclusterconnection.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\md5.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\mmap_win.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\text.cpp">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\shell_utils.cpp">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\scripting\engine_spidermonkey.cpp">
+ <Filter>scripting</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\password.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\mongo.cpp">
+ <Filter>shell\generated_from_js</Filter>
+ </ClCompile>
+ <ClCompile Include="..\mongo-server.cpp">
+ <Filter>shell\generated_from_js</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\log.cpp">
+ <Filter>shared source files</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\scripting\bench.cpp">
+ <Filter>scripting</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\bson\oid.cpp">
+ <Filter>bson</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\client\dbclient_rs.cpp">
+ <Filter>client</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party\linenoise\linenoise.cpp">
+ <Filter>thirdparty</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\concurrency\spin_lock.cpp">
+ <Filter>util\concurrency</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\net\listen.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\net\message.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\net\sock.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\net\message_port.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcrecpp.cc">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_compile.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_config.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_chartables.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_stringpiece.cc">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_dfa_exec.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_exec.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_fullinfo.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_get.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_globals.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_info.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_maketables.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_newline.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_ord2utf8.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_refcount.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_study.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_tables.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_try_flipped.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_ucp_searchfuncs.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_valid_utf8.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_version.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\third_party/pcre-7.4\pcre_xclass.c">
+ <Filter>shell</Filter>
+ </ClCompile>
+ <ClCompile Include="..\..\util\ramlog.cpp">
+ <Filter>util</Filter>
+ </ClCompile>
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="..\..\SConstruct" />
+ <None Include="..\collection.js">
+ <Filter>_js files</Filter>
+ </None>
+ <None Include="..\db.js">
+ <Filter>_js files</Filter>
+ </None>
+ <None Include="..\mongo.js">
+ <Filter>_js files</Filter>
+ </None>
+ <None Include="..\mr.js">
+ <Filter>_js files</Filter>
+ </None>
+ <None Include="..\query.js">
+ <Filter>_js files</Filter>
+ </None>
+ <None Include="..\servers.js">
+ <Filter>_js files</Filter>
+ </None>
+ <None Include="..\utils.js">
+ <Filter>_js files</Filter>
+ </None>
+ <None Include="..\utils_sh.js">
+ <Filter>_js files</Filter>
+ </None>
+ </ItemGroup>
+ <ItemGroup>
+ <Library Include="..\..\..\js\js32d.lib" />
+ <Library Include="..\..\..\js\js32r.lib" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\db\lasterror.h">
+ <Filter>db</Filter>
+ </ClInclude>
+ <ClInclude Include="..\..\third_party\linenoise\linenoise.h">
+ <Filter>thirdparty</Filter>
+ </ClInclude>
+ </ItemGroup>
+ <ItemGroup>
+ <ResourceCompile Include="..\..\db\db.rc">
+ <Filter>Resource Files</Filter>
+ </ResourceCompile>
+ </ItemGroup>
+</Project> \ No newline at end of file
diff --git a/src/mongo/shell/query.js b/src/mongo/shell/query.js
new file mode 100644
index 00000000000..51b9fd8e6a1
--- /dev/null
+++ b/src/mongo/shell/query.js
@@ -0,0 +1,344 @@
+// query.js
+
+if ( typeof DBQuery == "undefined" ){
+ DBQuery = function( mongo , db , collection , ns , query , fields , limit , skip , batchSize , options ){
+
+ this._mongo = mongo; // 0
+ this._db = db; // 1
+ this._collection = collection; // 2
+ this._ns = ns; // 3
+
+ this._query = query || {}; // 4
+ this._fields = fields; // 5
+ this._limit = limit || 0; // 6
+ this._skip = skip || 0; // 7
+ this._batchSize = batchSize || 0;
+ this._options = options || 0;
+
+ this._cursor = null;
+ this._numReturned = 0;
+ this._special = false;
+ this._prettyShell = false;
+ }
+ print( "DBQuery probably won't have array access " );
+}
+
+DBQuery.prototype.help = function () {
+ print("find() modifiers")
+ print("\t.sort( {...} )")
+ print("\t.limit( n )")
+ print("\t.skip( n )")
+ print("\t.count() - total # of objects matching query, ignores skip,limit")
+ print("\t.size() - total # of objects cursor would return, honors skip,limit")
+ print("\t.explain([verbose])")
+ print("\t.hint(...)")
+ print("\t.addOption(n) - adds op_query options -- see wire protocol")
+ print("\t._addSpecial(name, value) - http://www.mongodb.org/display/DOCS/Advanced%20Queries#AdvancedQueries-Metaqueryoperators")
+ print("\t.batchSize(n) - sets the number of docs to return per getMore")
+ print("\t.showDiskLoc() - adds a $diskLoc field to each returned object")
+ print("\t.min(idxDoc)")
+ print("\t.max(idxDoc)")
+
+ print("\nCursor methods");
+ print("\t.toArray() - iterates through docs and returns an array of the results")
+ print("\t.forEach( func )")
+ print("\t.map( func )")
+ print("\t.hasNext()")
+ print("\t.next()")
+ print("\t.objsLeftInBatch() - returns count of docs left in current batch (when exhausted, a new getMore will be issued)")
+ print("\t.count(applySkipLimit) - runs command at server")
+ print("\t.itcount() - iterates through documents and counts them")
+}
+
+DBQuery.prototype.clone = function(){
+ var q = new DBQuery( this._mongo , this._db , this._collection , this._ns ,
+ this._query , this._fields ,
+ this._limit , this._skip , this._batchSize , this._options );
+ q._special = this._special;
+ return q;
+}
+
+DBQuery.prototype._ensureSpecial = function(){
+ if ( this._special )
+ return;
+
+ var n = { query : this._query };
+ this._query = n;
+ this._special = true;
+}
+
+DBQuery.prototype._checkModify = function(){
+ if ( this._cursor )
+ throw "query already executed";
+}
+
+DBQuery.prototype._exec = function(){
+ if ( ! this._cursor ){
+ assert.eq( 0 , this._numReturned );
+ this._cursor = this._mongo.find( this._ns , this._query , this._fields , this._limit , this._skip , this._batchSize , this._options );
+ this._cursorSeen = 0;
+ }
+ return this._cursor;
+}
+
+DBQuery.prototype.limit = function( limit ){
+ this._checkModify();
+ this._limit = limit;
+ return this;
+}
+
+DBQuery.prototype.batchSize = function( batchSize ){
+ this._checkModify();
+ this._batchSize = batchSize;
+ return this;
+}
+
+
+DBQuery.prototype.addOption = function( option ){
+ this._options |= option;
+ return this;
+}
+
+DBQuery.prototype.skip = function( skip ){
+ this._checkModify();
+ this._skip = skip;
+ return this;
+}
+
+DBQuery.prototype.hasNext = function(){
+ this._exec();
+
+ if ( this._limit > 0 && this._cursorSeen >= this._limit )
+ return false;
+ var o = this._cursor.hasNext();
+ return o;
+}
+
+DBQuery.prototype.next = function(){
+ this._exec();
+
+ var o = this._cursor.hasNext();
+ if ( o )
+ this._cursorSeen++;
+ else
+ throw "error hasNext: " + o;
+
+ var ret = this._cursor.next();
+ if ( ret.$err && this._numReturned == 0 && ! this.hasNext() )
+ throw "error: " + tojson( ret );
+
+ this._numReturned++;
+ return ret;
+}
+
+DBQuery.prototype.objsLeftInBatch = function(){
+ this._exec();
+
+ var ret = this._cursor.objsLeftInBatch();
+ if ( ret.$err )
+ throw "error: " + tojson( ret );
+
+ return ret;
+}
+
+DBQuery.prototype.readOnly = function(){
+ this._exec();
+ this._cursor.readOnly();
+ return this;
+}
+
+DBQuery.prototype.toArray = function(){
+ if ( this._arr )
+ return this._arr;
+
+ var a = [];
+ while ( this.hasNext() )
+ a.push( this.next() );
+ this._arr = a;
+ return a;
+}
+
+DBQuery.prototype.count = function( applySkipLimit ){
+ var cmd = { count: this._collection.getName() };
+ if ( this._query ){
+ if ( this._special )
+ cmd.query = this._query.query;
+ else
+ cmd.query = this._query;
+ }
+ cmd.fields = this._fields || {};
+
+ if ( applySkipLimit ){
+ if ( this._limit )
+ cmd.limit = this._limit;
+ if ( this._skip )
+ cmd.skip = this._skip;
+ }
+
+ var res = this._db.runCommand( cmd );
+ if( res && res.n != null ) return res.n;
+ throw "count failed: " + tojson( res );
+}
+
+DBQuery.prototype.size = function(){
+ return this.count( true );
+}
+
+DBQuery.prototype.countReturn = function(){
+ var c = this.count();
+
+ if ( this._skip )
+ c = c - this._skip;
+
+ if ( this._limit > 0 && this._limit < c )
+ return this._limit;
+
+ return c;
+}
+
+/**
+* iterative count - only for testing
+*/
+DBQuery.prototype.itcount = function(){
+ var num = 0;
+ while ( this.hasNext() ){
+ num++;
+ this.next();
+ }
+ return num;
+}
+
+DBQuery.prototype.length = function(){
+ return this.toArray().length;
+}
+
+DBQuery.prototype._addSpecial = function( name , value ){
+ this._ensureSpecial();
+ this._query[name] = value;
+ return this;
+}
+
+DBQuery.prototype.sort = function( sortBy ){
+ return this._addSpecial( "orderby" , sortBy );
+}
+
+DBQuery.prototype.hint = function( hint ){
+ return this._addSpecial( "$hint" , hint );
+}
+
+DBQuery.prototype.min = function( min ) {
+ return this._addSpecial( "$min" , min );
+}
+
+DBQuery.prototype.max = function( max ) {
+ return this._addSpecial( "$max" , max );
+}
+
+DBQuery.prototype.showDiskLoc = function() {
+ return this._addSpecial( "$showDiskLoc" , true);
+}
+
+DBQuery.prototype.forEach = function( func ){
+ while ( this.hasNext() )
+ func( this.next() );
+}
+
+DBQuery.prototype.map = function( func ){
+ var a = [];
+ while ( this.hasNext() )
+ a.push( func( this.next() ) );
+ return a;
+}
+
+DBQuery.prototype.arrayAccess = function( idx ){
+ return this.toArray()[idx];
+}
+DBQuery.prototype.comment = function (comment) {
+ var n = this.clone();
+ n._ensureSpecial();
+ n._addSpecial("$comment", comment);
+ return this.next();
+}
+
+DBQuery.prototype.explain = function (verbose) {
+ /* verbose=true --> include allPlans, oldPlan fields */
+ var n = this.clone();
+ n._ensureSpecial();
+ n._query.$explain = true;
+ n._limit = Math.abs(n._limit) * -1;
+ var e = n.next();
+
+ function cleanup(obj){
+ if (typeof(obj) != 'object'){
+ return;
+ }
+
+ delete obj.allPlans;
+ delete obj.oldPlan;
+
+ if (typeof(obj.length) == 'number'){
+ for (var i=0; i < obj.length; i++){
+ cleanup(obj[i]);
+ }
+ }
+
+ if (obj.shards){
+ for (var key in obj.shards){
+ cleanup(obj.shards[key]);
+ }
+ }
+
+ if (obj.clauses){
+ cleanup(obj.clauses);
+ }
+ }
+
+ if (!verbose)
+ cleanup(e);
+
+ return e;
+}
+
+DBQuery.prototype.snapshot = function(){
+ this._ensureSpecial();
+ this._query.$snapshot = true;
+ return this;
+}
+
+DBQuery.prototype.pretty = function(){
+ this._prettyShell = true;
+ return this;
+}
+
+DBQuery.prototype.shellPrint = function(){
+ try {
+ var start = new Date().getTime();
+ var n = 0;
+ while ( this.hasNext() && n < DBQuery.shellBatchSize ){
+ var s = this._prettyShell ? tojson( this.next() ) : tojson( this.next() , "" , true );
+ print( s );
+ n++;
+ }
+ if (typeof _verboseShell !== 'undefined' && _verboseShell) {
+ var time = new Date().getTime() - start;
+ print("Fetched " + n + " record(s) in " + time + "ms");
+ }
+ if ( this.hasNext() ){
+ print( "Type \"it\" for more" );
+ ___it___ = this;
+ }
+ else {
+ ___it___ = null;
+ }
+ }
+ catch ( e ){
+ print( e );
+ }
+
+}
+
+DBQuery.prototype.toString = function(){
+ return "DBQuery: " + this._ns + " -> " + tojson( this.query );
+}
+
+DBQuery.shellBatchSize = 20;
diff --git a/src/mongo/shell/servers.js b/src/mongo/shell/servers.js
new file mode 100755
index 00000000000..6b12918db2e
--- /dev/null
+++ b/src/mongo/shell/servers.js
@@ -0,0 +1,2618 @@
+_parsePath = function() {
+ var dbpath = "";
+ for( var i = 0; i < arguments.length; ++i )
+ if ( arguments[ i ] == "--dbpath" )
+ dbpath = arguments[ i + 1 ];
+
+ if ( dbpath == "" )
+ throw "No dbpath specified";
+
+ return dbpath;
+}
+
+_parsePort = function() {
+ var port = "";
+ for( var i = 0; i < arguments.length; ++i )
+ if ( arguments[ i ] == "--port" )
+ port = arguments[ i + 1 ];
+
+ if ( port == "" )
+ throw "No port specified";
+ return port;
+}
+
+connectionURLTheSame = function( a , b ){
+
+ if ( a == b )
+ return true;
+
+ if ( ! a || ! b )
+ return false;
+
+ if( a.host ) return connectionURLTheSame( a.host, b )
+ if( b.host ) return connectionURLTheSame( a, b.host )
+
+ if( a.name ) return connectionURLTheSame( a.name, b )
+ if( b.name ) return connectionURLTheSame( a, b.name )
+
+ if( a.indexOf( "/" ) < 0 && b.indexOf( "/" ) < 0 ){
+ a = a.split( ":" )
+ b = b.split( ":" )
+
+ if( a.length != b.length ) return false
+
+ if( a.length == 2 && a[1] != b[1] ) return false
+
+ if( a[0] == "localhost" || a[0] == "127.0.0.1" ) a[0] = getHostName()
+ if( b[0] == "localhost" || b[0] == "127.0.0.1" ) b[0] = getHostName()
+
+ return a[0] == b[0]
+ }
+ else {
+ a0 = a.split( "/" )[0]
+ b0 = b.split( "/" )[0]
+ return a0 == b0
+ }
+}
+
+assert( connectionURLTheSame( "foo" , "foo" ) )
+assert( ! connectionURLTheSame( "foo" , "bar" ) )
+
+assert( connectionURLTheSame( "foo/a,b" , "foo/b,a" ) )
+assert( ! connectionURLTheSame( "foo/a,b" , "bar/a,b" ) )
+
+createMongoArgs = function( binaryName , args ){
+ var fullArgs = [ binaryName ];
+
+ if ( args.length == 1 && isObject( args[0] ) ){
+ var o = args[0];
+ for ( var k in o ){
+ if ( o.hasOwnProperty(k) ){
+ if ( k == "v" && isNumber( o[k] ) ){
+ var n = o[k];
+ if ( n > 0 ){
+ if ( n > 10 ) n = 10;
+ var temp = "-";
+ while ( n-- > 0 ) temp += "v";
+ fullArgs.push( temp );
+ }
+ }
+ else {
+ fullArgs.push( "--" + k );
+ if ( o[k] != "" )
+ fullArgs.push( "" + o[k] );
+ }
+ }
+ }
+ }
+ else {
+ for ( var i=0; i<args.length; i++ )
+ fullArgs.push( args[i] )
+ }
+
+ return fullArgs;
+}
+
+
+MongoRunner = function(){}
+
+MongoRunner.dataDir = "/data/db"
+MongoRunner.dataPath = "/data/db/"
+MongoRunner.usedPortMap = {}
+MongoRunner.logicalOptions = { runId : true,
+ pathOpts : true,
+ remember : true,
+ noRemember : true,
+ appendOptions : true,
+ restart : true,
+ noCleanData : true,
+ cleanData : true,
+ startClean : true,
+ forceLock : true,
+ useLogFiles : true,
+ useHostName : true,
+ useHostname : true,
+ noReplSet : true,
+ forgetPort : true,
+ arbiter : true,
+ noJournalPrealloc : true,
+ noJournal : true }
+
+MongoRunner.toRealPath = function( path, pathOpts ){
+
+ // Replace all $pathOptions with actual values
+ pathOpts = pathOpts || {}
+ path = path.replace( /\$dataPath/g, MongoRunner.dataPath )
+ path = path.replace( /\$dataDir/g, MongoRunner.dataDir )
+ for( key in pathOpts ){
+ path = path.replace( RegExp( "\\$" + key, "g" ), pathOpts[ key ] )
+ }
+
+ // Relative path
+ if( ! path.startsWith( "/" ) ){
+ if( path != "" && ! path.endsWith( "/" ) )
+ path += "/"
+
+ path = MongoRunner.dataPath + path
+ }
+
+ return path
+
+}
+
+MongoRunner.toRealDir = function( path, pathOpts ){
+
+ path = MongoRunner.toRealPath( path, pathOpts )
+
+ if( path.endsWith( "/" ) )
+ path = path.substring( 0, path.length - 1 )
+
+ return path
+}
+
+MongoRunner.toRealFile = MongoRunner.toRealDir
+
+MongoRunner.nextOpenPort = function(){
+
+ var i = 0;
+ while( MongoRunner.usedPortMap[ "" + ( 27000 + i ) ] ) i++;
+ MongoRunner.usedPortMap[ "" + ( 27000 + i ) ] = true
+
+ return 27000 + i
+
+}
+
+MongoRunner.arrOptions = function( binaryName , args ){
+
+ var fullArgs = [ binaryName ]
+
+ if ( isObject( args ) || ( args.length == 1 && isObject( args[0] ) ) ){
+
+ var o = isObject( args ) ? args : args[0]
+ for ( var k in o ){
+
+ if( ! o.hasOwnProperty(k) || k in MongoRunner.logicalOptions ) continue
+
+ if ( ( k == "v" || k == "verbose" ) && isNumber( o[k] ) ){
+ var n = o[k]
+ if ( n > 0 ){
+ if ( n > 10 ) n = 10
+ var temp = "-"
+ while ( n-- > 0 ) temp += "v"
+ fullArgs.push( temp )
+ }
+ }
+ else {
+ if( o[k] == undefined || o[k] == null ) continue
+ fullArgs.push( "--" + k )
+ if ( o[k] != "" )
+ fullArgs.push( "" + o[k] )
+ }
+ }
+ }
+ else {
+ for ( var i=0; i<args.length; i++ )
+ fullArgs.push( args[i] )
+ }
+
+ return fullArgs
+}
+
+MongoRunner.arrToOpts = function( arr ){
+
+ var opts = {}
+ for( var i = 1; i < arr.length; i++ ){
+ if( arr[i].startsWith( "-" ) ){
+ var opt = arr[i].replace( /^-/, "" ).replace( /^-/, "" )
+
+ if( arr.length > i + 1 && ! arr[ i + 1 ].startsWith( "-" ) ){
+ opts[ opt ] = arr[ i + 1 ]
+ i++
+ }
+ else{
+ opts[ opt ] = ""
+ }
+
+ if( opt.replace( /v/g, "" ) == "" ){
+ opts[ "verbose" ] = opt.length
+ }
+ }
+ }
+
+ return opts
+}
+
+MongoRunner.savedOptions = {}
+
+MongoRunner.mongoOptions = function( opts ){
+
+ // Initialize and create a copy of the opts
+ opts = Object.merge( opts || {}, {} )
+
+ if( ! opts.restart ) opts.restart = false
+
+ // RunId can come from a number of places
+ if( isObject( opts.restart ) ){
+ opts.runId = opts.restart
+ opts.restart = true
+ }
+
+ if( isObject( opts.remember ) ){
+ opts.runId = opts.remember
+ opts.remember = true
+ }
+ else if( opts.remember == undefined ){
+ // Remember by default if we're restarting
+ opts.remember = opts.restart
+ }
+
+ // If we passed in restart : <conn> or runId : <conn>
+ if( isObject( opts.runId ) && opts.runId.runId ) opts.runId = opts.runId.runId
+
+ if( opts.restart && opts.remember ) opts = Object.merge( MongoRunner.savedOptions[ opts.runId ], opts )
+
+ // Create a new runId
+ opts.runId = opts.runId || ObjectId()
+
+ // Save the port if required
+ if( ! opts.forgetPort ) opts.port = opts.port || MongoRunner.nextOpenPort()
+
+ var shouldRemember = ( ! opts.restart && ! opts.noRemember ) || ( opts.restart && opts.appendOptions )
+
+ if ( shouldRemember ){
+ MongoRunner.savedOptions[ opts.runId ] = Object.merge( opts, {} )
+ }
+
+ opts.port = opts.port || MongoRunner.nextOpenPort()
+ MongoRunner.usedPortMap[ "" + parseInt( opts.port ) ] = true
+
+ opts.pathOpts = Object.merge( opts.pathOpts || {}, { port : "" + opts.port, runId : "" + opts.runId } )
+
+ return opts
+}
+
+MongoRunner.mongodOptions = function( opts ){
+
+ opts = MongoRunner.mongoOptions( opts )
+
+ opts.dbpath = MongoRunner.toRealDir( opts.dbpath || "$dataDir/mongod-$port",
+ opts.pathOpts )
+
+ opts.pathOpts = Object.merge( opts.pathOpts, { dbpath : opts.dbpath } )
+
+ if( ! opts.logFile && opts.useLogFiles ){
+ opts.logFile = opts.dbpath + "/mongod.log"
+ }
+ else if( opts.logFile ){
+ opts.logFile = MongoRunner.toRealFile( opts.logFile, opts.pathOpts )
+ }
+
+ if( jsTestOptions().noJournalPrealloc || opts.noJournalPrealloc )
+ opts.nopreallocj = ""
+
+ if( jsTestOptions().noJournal || opts.noJournal )
+ opts.nojournal = ""
+
+ if( jsTestOptions().keyFile && !opts.keyFile) {
+ opts.keyFile = jsTestOptions().keyFile
+ }
+
+ if( opts.noReplSet ) opts.replSet = null
+ if( opts.arbiter ) opts.oplogSize = 1
+
+ return opts
+}
+
+MongoRunner.mongosOptions = function( opts ){
+
+ opts = MongoRunner.mongoOptions( opts )
+
+ opts.pathOpts = Object.merge( opts.pathOpts,
+ { configdb : opts.configdb.replace( /:|,/g, "-" ) } )
+
+ if( ! opts.logFile && opts.useLogFiles ){
+ opts.logFile = MongoRunner.toRealFile( "$dataDir/mongos-$configdb-$port.log",
+ opts.pathOpts )
+ }
+ else if( opts.logFile ){
+ opts.logFile = MongoRunner.toRealFile( opts.logFile, opts.pathOpts )
+ }
+
+ if( jsTestOptions().keyFile && !opts.keyFile) {
+ opts.keyFile = jsTestOptions().keyFile
+ }
+
+ return opts
+}
+
+MongoRunner.runMongod = function( opts ){
+
+ var useHostName = false
+ var runId = null
+ if( isObject( opts ) ) {
+
+ opts = MongoRunner.mongodOptions( opts )
+
+ useHostName = opts.useHostName || opts.useHostname
+ runId = opts.runId
+
+ if( opts.forceLock ) removeFile( opts.dbpath + "/mongod.lock" )
+ if( ( opts.cleanData || opts.startClean ) || ( ! opts.restart && ! opts.noCleanData ) ){
+ print( "Resetting db path '" + opts.dbpath + "'" )
+ resetDbpath( opts.dbpath )
+ }
+
+ opts = MongoRunner.arrOptions( "mongod", opts )
+ }
+
+ var mongod = startMongoProgram.apply( null, opts )
+ mongod.commandLine = MongoRunner.arrToOpts( opts )
+ mongod.name = (useHostName ? getHostName() : "localhost") + ":" + mongod.commandLine.port
+ mongod.host = mongod.name
+ mongod.port = parseInt( mongod.commandLine.port )
+ mongod.runId = runId || ObjectId()
+ mongod.savedOptions = MongoRunner.savedOptions[ mongod.runId ]
+
+ return mongod
+}
+
+MongoRunner.runMongos = function( opts ){
+
+ var useHostName = false
+ var runId = null
+ if( isObject( opts ) ) {
+
+ opts = MongoRunner.mongosOptions( opts )
+
+ useHostName = opts.useHostName || opts.useHostname
+ runId = opts.runId
+
+ opts = MongoRunner.arrOptions( "mongos", opts )
+ }
+
+ var mongos = startMongoProgram.apply( null, opts )
+ mongos.commandLine = MongoRunner.arrToOpts( opts )
+ mongos.name = (useHostName ? getHostName() : "localhost") + ":" + mongos.commandLine.port
+ mongos.host = mongos.name
+ mongos.port = parseInt( mongos.commandLine.port )
+ mongos.runId = runId || ObjectId()
+ mongos.savedOptions = MongoRunner.savedOptions[ mongos.runId ]
+
+ return mongos
+}
+
+MongoRunner.stopMongod = function( port, signal ){
+
+ if( ! port ) {
+ print( "Cannot stop mongo process " + port )
+ return
+ }
+
+ signal = signal || 15
+
+ if( port.port )
+ port = parseInt( port.port )
+
+ if( port instanceof ObjectId ){
+ var opts = MongoRunner.savedOptions( port )
+ if( opts ) port = parseInt( opts.port )
+ }
+
+ var exitCode = stopMongod( parseInt( port ), parseInt( signal ) )
+
+ delete MongoRunner.usedPortMap[ "" + parseInt( port ) ]
+
+ return exitCode
+}
+
+MongoRunner.stopMongos = MongoRunner.stopMongod
+
+MongoRunner.isStopped = function( port ){
+
+ if( ! port ) {
+ print( "Cannot detect if process " + port + " is stopped." )
+ return
+ }
+
+ if( port.port )
+ port = parseInt( port.port )
+
+ if( port instanceof ObjectId ){
+ var opts = MongoRunner.savedOptions( port )
+ if( opts ) port = parseInt( opts.port )
+ }
+
+ return MongoRunner.usedPortMap[ "" + parseInt( port ) ] ? false : true
+}
+
+__nextPort = 27000;
+startMongodTest = function (port, dirname, restart, extraOptions ) {
+ if (!port)
+ port = __nextPort++;
+ var f = startMongodEmpty;
+ if (restart)
+ f = startMongodNoReset;
+ if (!dirname)
+ dirname = "" + port; // e.g., data/db/27000
+
+ var useHostname = false;
+ if (extraOptions) {
+ useHostname = extraOptions.useHostname;
+ delete extraOptions.useHostname;
+ }
+
+
+ var options =
+ {
+ port: port,
+ dbpath: "/data/db/" + dirname,
+ noprealloc: "",
+ smallfiles: "",
+ oplogSize: "40",
+ nohttpinterface: ""
+ };
+
+ if( jsTestOptions().noJournal ) options["nojournal"] = ""
+ if( jsTestOptions().noJournalPrealloc ) options["nopreallocj"] = ""
+ if( jsTestOptions().auth ) options["auth"] = ""
+ if( jsTestOptions().keyFile && (!extraOptions || !extraOptions['keyFile']) ) options['keyFile'] = jsTestOptions().keyFile
+
+ if ( extraOptions )
+ Object.extend( options , extraOptions );
+
+ var conn = f.apply(null, [ options ] );
+
+ conn.name = (useHostname ? getHostName() : "localhost") + ":" + port;
+
+ if (options['auth'] || options['keyFile']) {
+ if (!this.shardsvr && !options.replSet) {
+ jsTest.addAuth(conn);
+ }
+ jsTest.authenticate(conn);
+ }
+ return conn;
+}
+
+// Start a mongod instance and return a 'Mongo' object connected to it.
+// This function's arguments are passed as command line arguments to mongod.
+// The specified 'dbpath' is cleared if it exists, created if not.
+// var conn = startMongodEmpty("--port", 30000, "--dbpath", "asdf");
+startMongodEmpty = function () {
+ var args = createMongoArgs("mongod", arguments);
+
+ var dbpath = _parsePath.apply(null, args);
+ resetDbpath(dbpath);
+
+ return startMongoProgram.apply(null, args);
+}
+startMongod = function () {
+ print("startMongod WARNING DELETES DATA DIRECTORY THIS IS FOR TESTING ONLY");
+ return startMongodEmpty.apply(null, arguments);
+}
+startMongodNoReset = function(){
+ var args = createMongoArgs( "mongod" , arguments );
+ return startMongoProgram.apply( null, args );
+}
+
+startMongos = function(args){
+ return MongoRunner.runMongos(args);
+}
+
+/* Start mongod or mongos and return a Mongo() object connected to there.
+ This function's first argument is "mongod" or "mongos" program name, \
+ and subsequent arguments to this function are passed as
+ command line arguments to the program.
+*/
+startMongoProgram = function(){
+ var port = _parsePort.apply( null, arguments );
+
+ _startMongoProgram.apply( null, arguments );
+
+ var m;
+ assert.soon
+ ( function() {
+ try {
+ m = new Mongo( "127.0.0.1:" + port );
+ return true;
+ } catch( e ) {
+ }
+ return false;
+ }, "unable to connect to mongo program on port " + port, 600 * 1000 );
+
+ return m;
+}
+
+// Start a mongo program instance. This function's first argument is the
+// program name, and subsequent arguments to this function are passed as
+// command line arguments to the program. Returns pid of the spawned program.
+startMongoProgramNoConnect = function() {
+ return _startMongoProgram.apply( null, arguments );
+}
+
+myPort = function() {
+ var m = db.getMongo();
+ if ( m.host.match( /:/ ) )
+ return m.host.match( /:(.*)/ )[ 1 ];
+ else
+ return 27017;
+}
+
+/**
+ * otherParams can be:
+ * * useHostname to use the hostname (instead of localhost)
+ */
+ShardingTest = function( testName , numShards , verboseLevel , numMongos , otherParams ){
+
+ // Check if testName is an object, if so, pull params from there
+ var keyFile = undefined
+ otherParams = Object.merge( otherParams || {}, {} )
+ otherParams.extraOptions = otherParams.extraOptions || {}
+
+ if( isObject( testName ) ){
+
+ var params = Object.merge( testName, {} )
+
+ testName = params.name || "test"
+
+ otherParams = Object.merge( params.other || {}, {} )
+ otherParams.extraOptions = otherParams.extraOptions || {}
+
+ numShards = params.shards || 2
+ verboseLevel = params.verbose || 0
+ numMongos = params.mongos || 1
+
+ keyFile = params.keyFile || otherParams.keyFile || otherParams.extraOptions.keyFile
+ otherParams.nopreallocj = params.nopreallocj || otherParams.nopreallocj
+ otherParams.rs = params.rs || ( params.other ? params.other.rs : undefined )
+ otherParams.chunksize = params.chunksize || ( params.other ? params.other.chunksize : undefined )
+
+ // Allow specifying options like :
+ // { mongos : [ { noprealloc : "" } ], config : [ { smallfiles : "" } ], shards : { rs : true, d : true } }
+ if( isObject( numShards ) ){
+ var len = 0
+ for( var i in numShards ){
+ otherParams[ "" + i ] = numShards[i]
+ len++
+ }
+ numShards = len
+ }
+
+ if( isObject( numMongos ) ){
+ var len = 0
+ for( var i in numMongos ){
+ otherParams[ "" + i ] = numMongos[i]
+ len++
+ }
+ numMongos = len
+ }
+ else if( Array.isArray( numMongos ) ){
+ for( var i = 0; i < numMongos.length; i++ )
+ otherParams[ "s" + i ] = numMongos[i]
+ numMongos = numMongos.length
+ }
+
+ if( isObject( params.config ) ){
+ var len = 0
+ for( var i in params.config ){
+ otherParams[ "" + i ] = params.config[i]
+ len++
+ }
+
+ // If we're specifying explicit config options, we need separate config servers
+ otherParams.separateConfig = true
+ if( len == 3 ) otherParams.sync = true
+ else otherParams.sync = false
+ }
+ else if( Array.isArray( params.config ) ){
+ for( var i = 0; i < params.config.length; i++ )
+ otherParams[ "c" + i ] = params.config[i]
+
+ // If we're specifying explicit config options, we need separate config servers
+ otherParams.separateConfig = true
+ if( params.config.length == 3 ) otherParams.sync = true
+ else otherParams.sync = false
+ }
+ else if( params.config ) {
+
+ if( params.config == 3 ){
+ otherParams.separateConfig = otherParams.separateConfig || true
+ otherParams.sync = true
+ }
+
+ }
+ }
+ else {
+ // Handle legacy stuff
+ keyFile = otherParams.extraOptions.keyFile
+ }
+
+ this._testName = testName
+ this._otherParams = otherParams
+
+ var pathOpts = this.pathOpts = { testName : testName }
+
+ var hasRS = false
+ for( var k in otherParams ){
+ if( k.startsWith( "rs" ) ){
+ hasRS = true
+ break
+ }
+ }
+
+ if( hasRS ){
+ otherParams.separateConfig = true
+ otherParams.useHostname = otherParams.useHostname == undefined ? true : otherParams.useHostname
+ }
+
+ var localhost = otherParams.useHostname ? getHostName() : "localhost";
+
+ this._alldbpaths = []
+ this._connections = []
+ this._shardServers = this._connections
+ this._rs = []
+ this._rsObjects = []
+
+ for ( var i = 0; i < numShards; i++ ) {
+ if( otherParams.rs || otherParams["rs" + i] ){
+
+ otherParams.separateConfig = true
+
+ var setName = testName + "-rs" + i;
+
+ rsDefaults = { useHostname : otherParams.useHostname,
+ noJournalPrealloc : otherParams.nopreallocj,
+ oplogSize : 40,
+ nodes : 3,
+ pathOpts : Object.merge( pathOpts, { shard : i } )}
+
+ rsDefaults = Object.merge( rsDefaults, otherParams.rs )
+ rsDefaults = Object.merge( rsDefaults, otherParams.rsOptions )
+ rsDefaults = Object.merge( rsDefaults, otherParams["rs" + i] )
+
+ var numReplicas = rsDefaults.nodes || otherParams.numReplicas || 3
+ delete rsDefaults.nodes
+
+ print( "Replica set test!" )
+
+ var rs = new ReplSetTest( { name : setName , nodes : numReplicas , startPort : 31100 + ( i * 100 ), useHostName : otherParams.useHostname, keyFile : keyFile, shardSvr : true } );
+ this._rs[i] = { setName : setName , test : rs , nodes : rs.startSet( rsDefaults ) , url : rs.getURL() };
+ rs.initiate();
+ this["rs" + i] = rs
+
+ this._rsObjects[i] = rs
+
+ this._alldbpaths.push( null )
+ this._connections.push( null )
+ }
+ else {
+ var options = { useHostname : otherParams.useHostname,
+ noJournalPrealloc : otherParams.nopreallocj,
+ port : 30000 + i,
+ pathOpts : Object.merge( pathOpts, { shard : i } ),
+ dbpath : "$testName$shard",
+ keyFile : keyFile
+ }
+
+ options = Object.merge( options, otherParams.shardOptions )
+ options = Object.merge( options, otherParams["d" + i] )
+
+ var conn = MongoRunner.runMongod( options );
+
+ this._alldbpaths.push( testName +i )
+ this._connections.push( conn );
+ this["shard" + i] = conn
+ this["d" + i] = conn
+
+ this._rs[i] = null
+ this._rsObjects[i] = null
+ }
+ }
+
+ // Do replication on replica sets if required
+ for ( var i = 0; i < numShards; i++ ){
+ if( ! otherParams.rs && ! otherParams["rs" + i] ) continue
+
+ var rs = this._rs[i].test;
+
+ rs.getMaster().getDB( "admin" ).foo.save( { x : 1 } )
+ rs.awaitReplication();
+
+ var rsConn = new Mongo( rs.getURL() );
+ rsConn.name = rs.getURL();
+ this._connections[i] = rsConn
+ this["shard" + i] = rsConn
+ rsConn.rs = rs
+ }
+
+ this._configServers = []
+ this._configNames = []
+
+ if ( otherParams.sync && ! otherParams.separateConfig && numShards < 3 )
+ throw "if you want sync, you need at least 3 servers";
+
+ for ( var i = 0; i < ( otherParams.sync ? 3 : 1 ) ; i++ ) {
+
+ var conn = null
+
+ if( otherParams.separateConfig ){
+
+ var options = { useHostname : otherParams.useHostname,
+ noJournalPrealloc : otherParams.nopreallocj,
+ port : 40000 + i,
+ pathOpts : Object.merge( pathOpts, { config : i } ),
+ dbpath : "$testName-config$config",
+ keyFile : keyFile
+ }
+
+ options = Object.merge( options, otherParams.configOptions )
+ options = Object.merge( options, otherParams["c" + i] )
+
+ var conn = MongoRunner.runMongod( options )
+
+ // TODO: Needed?
+ this._alldbpaths.push( testName + "-config" + i )
+ }
+ else{
+ conn = this["shard" + i]
+ }
+
+ this._configServers.push( conn );
+ this._configNames.push( conn.name )
+ this["config" + i] = conn
+ this["c" + i] = conn
+ }
+
+ printjson( this._configDB = this._configNames.join( "," ) )
+ this._configConnection = new Mongo( this._configDB )
+ if ( ! otherParams.noChunkSize ) {
+ this._configConnection.getDB( "config" ).settings.insert( { _id : "chunksize" , value : otherParams.chunksize || otherParams.chunkSize || 50 } )
+ }
+
+ print( "ShardingTest " + this._testName + " :\n" + tojson( { config : this._configDB, shards : this._connections } ) );
+
+ this._mongos = []
+ this._mongoses = this._mongos
+ for ( var i = 0; i < ( ( numMongos == 0 ? -1 : numMongos ) || 1 ); i++ ){
+
+ var options = { useHostname : otherParams.useHostname,
+ port : 31000 - i - 1,
+ pathOpts : Object.merge( pathOpts, { mongos : i } ),
+ configdb : this._configDB,
+ verbose : verboseLevel || 0,
+ keyFile : keyFile
+ }
+
+ options = Object.merge( options, otherParams.mongosOptions )
+ options = Object.merge( options, otherParams.extraOptions )
+ options = Object.merge( options, otherParams["s" + i] )
+
+ var conn = MongoRunner.runMongos( options )
+
+ this._mongos.push( conn );
+ if ( i == 0 ) this.s = conn
+ this["s" + i] = conn
+ }
+
+ var admin = this.admin = this.s.getDB( "admin" );
+ this.config = this.s.getDB( "config" );
+
+ if ( ! otherParams.manualAddShard ){
+ this._shardNames = []
+ var shardNames = this._shardNames
+ this._connections.forEach(
+ function(z){
+ var n = z.name;
+ if ( ! n ){
+ n = z.host;
+ if ( ! n )
+ n = z;
+ }
+ print( "ShardingTest " + this._testName + " going to add shard : " + n )
+ x = admin.runCommand( { addshard : n } );
+ printjson( x )
+ shardNames.push( x.shardAdded )
+ z.shardName = x.shardAdded
+ }
+ );
+ }
+
+ if (jsTestOptions().keyFile && !keyFile) {
+ jsTest.addAuth(this._mongos[0]);
+ jsTest.authenticateNodes(this._connections);
+ jsTest.authenticateNodes(this._configServers);
+ jsTest.authenticateNodes(this._mongos);
+ }
+}
+
+ShardingTest.prototype.getRSEntry = function( setName ){
+ for ( var i=0; i<this._rs.length; i++ )
+ if ( this._rs[i].setName == setName )
+ return this._rs[i];
+ throw "can't find rs: " + setName;
+}
+
+ShardingTest.prototype.getConfigIndex = function( config ){
+
+ // Assume config is a # if not a conn object
+ if( ! isObject( config ) ) config = getHostName() + ":" + config
+
+ for( var i = 0; i < this._configServers.length; i++ ){
+ if( connectionURLTheSame( this._configServers[i], config ) ) return i
+ }
+
+ return -1
+}
+
+ShardingTest.prototype.getDB = function( name ){
+ return this.s.getDB( name );
+}
+
+ShardingTest.prototype.getServerName = function( dbname ){
+ var x = this.config.databases.findOne( { _id : "" + dbname } );
+ if ( x )
+ return x.primary;
+ this.config.databases.find().forEach( printjson );
+ throw "couldn't find dbname: " + dbname + " total: " + this.config.databases.count();
+}
+
+
+ShardingTest.prototype.getNonPrimaries = function( dbname ){
+ var x = this.config.databases.findOne( { _id : dbname } );
+ if ( ! x ){
+ this.config.databases.find().forEach( printjson );
+ throw "couldn't find dbname: " + dbname + " total: " + this.config.databases.count();
+ }
+
+ return this.config.shards.find( { _id : { $ne : x.primary } } ).map( function(z){ return z._id; } )
+}
+
+
+ShardingTest.prototype.getConnNames = function(){
+ var names = [];
+ for ( var i=0; i<this._connections.length; i++ ){
+ names.push( this._connections[i].name );
+ }
+ return names;
+}
+
+ShardingTest.prototype.getServer = function( dbname ){
+ var name = this.getServerName( dbname );
+
+ var x = this.config.shards.findOne( { _id : name } );
+ if ( x )
+ name = x.host;
+
+ var rsName = null;
+ if ( name.indexOf( "/" ) > 0 )
+ rsName = name.substring( 0 , name.indexOf( "/" ) );
+
+ for ( var i=0; i<this._connections.length; i++ ){
+ var c = this._connections[i];
+ if ( connectionURLTheSame( name , c.name ) ||
+ connectionURLTheSame( rsName , c.name ) )
+ return c;
+ }
+
+ throw "can't find server for: " + dbname + " name:" + name;
+
+}
+
+ShardingTest.prototype.normalize = function( x ){
+ var z = this.config.shards.findOne( { host : x } );
+ if ( z )
+ return z._id;
+ return x;
+}
+
+ShardingTest.prototype.getOther = function( one ){
+ if ( this._connections.length < 2 )
+ throw "getOther only works with 2 servers";
+
+ if ( one._mongo )
+ one = one._mongo
+
+ for( var i = 0; i < this._connections.length; i++ ){
+ if( this._connections[i] != one ) return this._connections[i]
+ }
+
+ return null
+}
+
+ShardingTest.prototype.getAnother = function( one ){
+ if(this._connections.length < 2)
+ throw "getAnother() only works with multiple servers";
+
+ if ( one._mongo )
+ one = one._mongo
+
+ for(var i = 0; i < this._connections.length; i++){
+ if(this._connections[i] == one)
+ return this._connections[(i + 1) % this._connections.length];
+ }
+}
+
+ShardingTest.prototype.getFirstOther = function( one ){
+ for ( var i=0; i<this._connections.length; i++ ){
+ if ( this._connections[i] != one )
+ return this._connections[i];
+ }
+ throw "impossible";
+}
+
+ShardingTest.prototype.stop = function(){
+ for ( var i=0; i<this._mongos.length; i++ ){
+ stopMongoProgram( 31000 - i - 1 );
+ }
+ for ( var i=0; i<this._connections.length; i++){
+ stopMongod( 30000 + i );
+ }
+ if ( this._rs ){
+ for ( var i=0; i<this._rs.length; i++ ){
+ if( this._rs[i] ) this._rs[i].test.stopSet( 15 );
+ }
+ }
+ if( this._otherParams.separateConfig ){
+ for ( var i=0; i<this._configServers.length; i++ ){
+ MongoRunner.stopMongod( this._configServers[i] )
+ }
+ }
+ if ( this._alldbpaths ){
+ for( i=0; i<this._alldbpaths.length; i++ ){
+ resetDbpath( "/data/db/" + this._alldbpaths[i] );
+ }
+ }
+
+ print('*** ShardingTest ' + this._testName + " completed successfully ***");
+}
+
+ShardingTest.prototype.adminCommand = function(cmd){
+ var res = this.admin.runCommand( cmd );
+ if ( res && res.ok == 1 )
+ return true;
+
+ throw "command " + tojson( cmd ) + " failed: " + tojson( res );
+}
+
+ShardingTest.prototype._rangeToString = function(r){
+ return tojsononeline( r.min ) + " -> " + tojsononeline( r.max );
+}
+
+ShardingTest.prototype.printChangeLog = function(){
+ var s = this;
+ this.config.changelog.find().forEach(
+ function(z){
+ var msg = z.server + "\t" + z.time + "\t" + z.what;
+ for ( i=z.what.length; i<15; i++ )
+ msg += " ";
+ msg += " " + z.ns + "\t";
+ if ( z.what == "split" ){
+ msg += s._rangeToString( z.details.before ) + " -->> (" + s._rangeToString( z.details.left ) + "),(" + s._rangeToString( z.details.right ) + ")";
+ }
+ else if (z.what == "multi-split" ){
+ msg += s._rangeToString( z.details.before ) + " -->> (" + z.details.number + "/" + z.details.of + " " + s._rangeToString( z.details.chunk ) + ")";
+ }
+ else {
+ msg += tojsononeline( z.details );
+ }
+
+ print( "ShardingTest " + msg )
+ }
+ );
+
+}
+
+ShardingTest.prototype.getChunksString = function( ns ){
+ var q = {}
+ if ( ns )
+ q.ns = ns;
+
+ var s = "";
+ this.config.chunks.find( q ).sort( { ns : 1 , min : 1 } ).forEach(
+ function(z){
+ s += " " + z._id + "\t" + z.lastmod.t + "|" + z.lastmod.i + "\t" + tojson(z.min) + " -> " + tojson(z.max) + " " + z.shard + " " + z.ns + "\n";
+ }
+ );
+
+ return s;
+}
+
+ShardingTest.prototype.printChunks = function( ns ){
+ print( "ShardingTest " + this.getChunksString( ns ) );
+}
+
+ShardingTest.prototype.printShardingStatus = function(){
+ printShardingStatus( this.config );
+}
+
+ShardingTest.prototype.printCollectionInfo = function( ns , msg ){
+ var out = "";
+ if ( msg )
+ out += msg + "\n";
+ out += "sharding collection info: " + ns + "\n";
+ for ( var i=0; i<this._connections.length; i++ ){
+ var c = this._connections[i];
+ out += " mongod " + c + " " + tojson( c.getCollection( ns ).getShardVersion() , " " , true ) + "\n";
+ }
+ for ( var i=0; i<this._mongos.length; i++ ){
+ var c = this._mongos[i];
+ out += " mongos " + c + " " + tojson( c.getCollection( ns ).getShardVersion() , " " , true ) + "\n";
+ }
+
+ out += this.getChunksString( ns );
+
+ print( "ShardingTest " + out );
+}
+
+printShardingStatus = function( configDB , verbose ){
+ if (configDB === undefined)
+ configDB = db.getSisterDB('config')
+
+ var version = configDB.getCollection( "version" ).findOne();
+ if ( version == null ){
+ print( "printShardingStatus: this db does not have sharding enabled. be sure you are connecting to a mongos from the shell and not to a mongod." );
+ return;
+ }
+
+ var raw = "";
+ var output = function(s){
+ raw += s + "\n";
+ }
+ output( "--- Sharding Status --- " );
+ output( " sharding version: " + tojson( configDB.getCollection( "version" ).findOne() ) );
+
+ output( " shards:" );
+ configDB.shards.find().sort( { _id : 1 } ).forEach(
+ function(z){
+ output( "\t" + tojsononeline( z ) );
+ }
+ );
+
+ output( " databases:" );
+ configDB.databases.find().sort( { name : 1 } ).forEach(
+ function(db){
+ output( "\t" + tojsononeline(db,"",true) );
+
+ if (db.partitioned){
+ configDB.collections.find( { _id : new RegExp( "^" + db._id + "\\." ) } ).sort( { _id : 1 } ).forEach(
+ function( coll ){
+ if ( coll.dropped == false ){
+ output("\t\t" + coll._id + " chunks:");
+
+ res = configDB.chunks.group( { cond : { ns : coll._id } , key : { shard : 1 } , reduce : function( doc , out ){ out.nChunks++; } , initial : { nChunks : 0 } } );
+ var totalChunks = 0;
+ res.forEach( function(z){
+ totalChunks += z.nChunks;
+ output( "\t\t\t\t" + z.shard + "\t" + z.nChunks );
+ } )
+
+ if ( totalChunks < 20 || verbose ){
+ configDB.chunks.find( { "ns" : coll._id } ).sort( { min : 1 } ).forEach(
+ function(chunk){
+ output( "\t\t\t" + tojson( chunk.min ) + " -->> " + tojson( chunk.max ) +
+ " on : " + chunk.shard + " " + tojson( chunk.lastmod ) + " " +
+ ( chunk.jumbo ? "jumbo " : "" ) );
+ }
+ );
+ }
+ else {
+ output( "\t\t\ttoo many chunks to print, use verbose if you want to force print" );
+ }
+ }
+ }
+ )
+ }
+ }
+ );
+
+ print( raw );
+}
+
+printShardingSizes = function(){
+ configDB = db.getSisterDB('config')
+
+ var version = configDB.getCollection( "version" ).findOne();
+ if ( version == null ){
+ print( "printShardingSizes : not a shard db!" );
+ return;
+ }
+
+ var raw = "";
+ var output = function(s){
+ raw += s + "\n";
+ }
+ output( "--- Sharding Status --- " );
+ output( " sharding version: " + tojson( configDB.getCollection( "version" ).findOne() ) );
+
+ output( " shards:" );
+ var shards = {};
+ configDB.shards.find().forEach(
+ function(z){
+ shards[z._id] = new Mongo(z.host);
+ output( " " + tojson(z) );
+ }
+ );
+
+ var saveDB = db;
+ output( " databases:" );
+ configDB.databases.find().sort( { name : 1 } ).forEach(
+ function(db){
+ output( "\t" + tojson(db,"",true) );
+
+ if (db.partitioned){
+ configDB.collections.find( { _id : new RegExp( "^" + db._id + "\." ) } ).sort( { _id : 1 } ).forEach(
+ function( coll ){
+ output("\t\t" + coll._id + " chunks:");
+ configDB.chunks.find( { "ns" : coll._id } ).sort( { min : 1 } ).forEach(
+ function(chunk){
+ var mydb = shards[chunk.shard].getDB(db._id)
+ var out = mydb.runCommand({dataSize: coll._id,
+ keyPattern: coll.key,
+ min: chunk.min,
+ max: chunk.max });
+ delete out.millis;
+ delete out.ok;
+
+ output( "\t\t\t" + tojson( chunk.min ) + " -->> " + tojson( chunk.max ) +
+ " on : " + chunk.shard + " " + tojson( out ) );
+
+ }
+ );
+ }
+ )
+ }
+ }
+ );
+
+ print( raw );
+}
+
+ShardingTest.prototype.sync = function(){
+ this.adminCommand( "connpoolsync" );
+}
+
+ShardingTest.prototype.onNumShards = function( collName , dbName ){
+ this.sync(); // we should sync since we're going directly to mongod here
+ dbName = dbName || "test";
+ var num=0;
+ for ( var i=0; i<this._connections.length; i++ )
+ if ( this._connections[i].getDB( dbName ).getCollection( collName ).count() > 0 )
+ num++;
+ return num;
+}
+
+
+ShardingTest.prototype.shardCounts = function( collName , dbName ){
+ this.sync(); // we should sync since we're going directly to mongod here
+ dbName = dbName || "test";
+ var counts = {}
+ for ( var i=0; i<this._connections.length; i++ )
+ counts[i] = this._connections[i].getDB( dbName ).getCollection( collName ).count();
+ return counts;
+}
+
+ShardingTest.prototype.chunkCounts = function( collName , dbName ){
+ dbName = dbName || "test";
+ var x = {}
+
+ s.config.shards.find().forEach(
+ function(z){
+ x[z._id] = 0;
+ }
+ );
+
+ s.config.chunks.find( { ns : dbName + "." + collName } ).forEach(
+ function(z){
+ if ( x[z.shard] )
+ x[z.shard]++
+ else
+ x[z.shard] = 1;
+ }
+ );
+ return x;
+
+}
+
+ShardingTest.prototype.chunkDiff = function( collName , dbName ){
+ var c = this.chunkCounts( collName , dbName );
+ var min = 100000000;
+ var max = 0;
+ for ( var s in c ){
+ if ( c[s] < min )
+ min = c[s];
+ if ( c[s] > max )
+ max = c[s];
+ }
+ print( "ShardingTest input: " + tojson( c ) + " min: " + min + " max: " + max );
+ return max - min;
+}
+
+ShardingTest.prototype.getShard = function( coll, query, includeEmpty ){
+ var shards = this.getShards( coll, query, includeEmpty )
+ assert.eq( shards.length, 1 )
+ return shards[0]
+}
+
+// Returns the shards on which documents matching a particular query reside
+ShardingTest.prototype.getShards = function( coll, query, includeEmpty ){
+ if( ! coll.getDB )
+ coll = this.s.getCollection( coll )
+
+ var explain = coll.find( query ).explain()
+ var shards = []
+
+ if( explain.shards ){
+
+ for( var shardName in explain.shards ){
+ for( var i = 0; i < explain.shards[shardName].length; i++ ){
+ if( includeEmpty || ( explain.shards[shardName][i].n && explain.shards[shardName][i].n > 0 ) )
+ shards.push( shardName )
+ }
+ }
+
+ }
+
+ for( var i = 0; i < shards.length; i++ ){
+ for( var j = 0; j < this._connections.length; j++ ){
+ if ( connectionURLTheSame( this._connections[j] , shards[i] ) ){
+ shards[i] = this._connections[j]
+ break;
+ }
+ }
+ }
+
+ return shards
+}
+
+ShardingTest.prototype.isSharded = function( collName ){
+
+ var collName = "" + collName
+ var dbName = undefined
+
+ if( typeof collName.getCollectionNames == 'function' ){
+ dbName = "" + collName
+ collName = undefined
+ }
+
+ if( dbName ){
+ var x = this.config.databases.findOne( { _id : dbname } )
+ if( x ) return x.partitioned
+ else return false
+ }
+
+ if( collName ){
+ var x = this.config.collections.findOne( { _id : collName } )
+ if( x ) return true
+ else return false
+ }
+
+}
+
+ShardingTest.prototype.shardGo = function( collName , key , split , move , dbName ){
+
+ split = ( split != false ? ( split || key ) : split )
+ move = ( split != false && move != false ? ( move || split ) : false )
+
+ if( collName.getDB )
+ dbName = "" + collName.getDB()
+ else dbName = dbName || "test";
+
+ var c = dbName + "." + collName;
+ if( collName.getDB )
+ c = "" + collName
+
+ var isEmpty = this.s.getCollection( c ).count() == 0
+
+ if( ! this.isSharded( dbName ) )
+ this.s.adminCommand( { enableSharding : dbName } )
+
+ var result = this.s.adminCommand( { shardcollection : c , key : key } )
+ if( ! result.ok ){
+ printjson( result )
+ assert( false )
+ }
+
+ if( split == false ) return
+
+ result = this.s.adminCommand( { split : c , middle : split } );
+ if( ! result.ok ){
+ printjson( result )
+ assert( false )
+ }
+
+ if( move == false ) return
+
+ var result = null
+ for( var i = 0; i < 5; i++ ){
+ result = this.s.adminCommand( { movechunk : c , find : move , to : this.getOther( this.getServer( dbName ) ).name } );
+ if( result.ok ) break;
+ sleep( 5 * 1000 );
+ }
+ printjson( result )
+ assert( result.ok )
+
+};
+
+ShardingTest.prototype.shardColl = ShardingTest.prototype.shardGo
+
+ShardingTest.prototype.setBalancer = function( balancer ){
+ if( balancer || balancer == undefined ){
+ this.config.settings.update( { _id: "balancer" }, { $set : { stopped: false } } , true )
+ }
+ else if( balancer == false ){
+ this.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true )
+ }
+}
+
+ShardingTest.prototype.stopBalancer = function( timeout, interval ) {
+ this.setBalancer( false )
+
+ if( typeof db == "undefined" ) db = undefined
+ var oldDB = db
+
+ db = this.config
+ sh.waitForBalancer( false, timeout, interval )
+ db = oldDB
+}
+
+ShardingTest.prototype.startBalancer = function( timeout, interval ) {
+ this.setBalancer( true )
+
+ if( typeof db == "undefined" ) db = undefined
+ var oldDB = db
+
+ db = this.config
+ sh.waitForBalancer( true, timeout, interval )
+ db = oldDB
+}
+
+/**
+ * Run a mongod process.
+ *
+ * After initializing a MongodRunner, you must call start() on it.
+ * @param {int} port port to run db on, use allocatePorts(num) to requision
+ * @param {string} dbpath path to use
+ * @param {boolean} peer pass in false (DEPRECATED, was used for replica pair host)
+ * @param {boolean} arbiter pass in false (DEPRECATED, was used for replica pair host)
+ * @param {array} extraArgs other arguments for the command line
+ * @param {object} options other options include no_bind to not bind_ip to 127.0.0.1
+ * (necessary for replica set testing)
+ */
+MongodRunner = function( port, dbpath, peer, arbiter, extraArgs, options ) {
+ this.port_ = port;
+ this.dbpath_ = dbpath;
+ this.peer_ = peer;
+ this.arbiter_ = arbiter;
+ this.extraArgs_ = extraArgs;
+ this.options_ = options ? options : {};
+};
+
+/**
+ * Start this mongod process.
+ *
+ * @param {boolean} reuseData If the data directory should be left intact (default is to wipe it)
+ */
+MongodRunner.prototype.start = function( reuseData ) {
+ var args = [];
+ if ( reuseData ) {
+ args.push( "mongod" );
+ }
+ args.push( "--port" );
+ args.push( this.port_ );
+ args.push( "--dbpath" );
+ args.push( this.dbpath_ );
+ args.push( "--nohttpinterface" );
+ args.push( "--noprealloc" );
+ args.push( "--smallfiles" );
+ if (!this.options_.no_bind) {
+ args.push( "--bind_ip" );
+ args.push( "127.0.0.1" );
+ }
+ if ( this.extraArgs_ ) {
+ args = args.concat( this.extraArgs_ );
+ }
+ removeFile( this.dbpath_ + "/mongod.lock" );
+ if ( reuseData ) {
+ return startMongoProgram.apply( null, args );
+ } else {
+ return startMongod.apply( null, args );
+ }
+}
+
+MongodRunner.prototype.port = function() { return this.port_; }
+
+MongodRunner.prototype.toString = function() { return [ this.port_, this.dbpath_, this.peer_, this.arbiter_ ].toString(); }
+
+ToolTest = function( name ){
+ this.name = name;
+ this.port = allocatePorts(1)[0];
+ this.baseName = "jstests_tool_" + name;
+ this.root = "/data/db/" + this.baseName;
+ this.dbpath = this.root + "/";
+ this.ext = this.root + "_external/";
+ this.extFile = this.root + "_external/a";
+ resetDbpath( this.dbpath );
+ resetDbpath( this.ext );
+}
+
+ToolTest.prototype.startDB = function( coll ){
+ assert( ! this.m , "db already running" );
+
+ this.m = startMongoProgram( "mongod" , "--port", this.port , "--dbpath" , this.dbpath , "--nohttpinterface", "--noprealloc" , "--smallfiles" , "--bind_ip", "127.0.0.1" );
+ this.db = this.m.getDB( this.baseName );
+ if ( coll )
+ return this.db.getCollection( coll );
+ return this.db;
+}
+
+ToolTest.prototype.stop = function(){
+ if ( ! this.m )
+ return;
+ stopMongod( this.port );
+ this.m = null;
+ this.db = null;
+
+ print('*** ' + this.name + " completed successfully ***");
+}
+
+ToolTest.prototype.runTool = function(){
+ var a = [ "mongo" + arguments[0] ];
+
+ var hasdbpath = false;
+
+ for ( var i=1; i<arguments.length; i++ ){
+ a.push( arguments[i] );
+ if ( arguments[i] == "--dbpath" )
+ hasdbpath = true;
+ }
+
+ if ( ! hasdbpath ){
+ a.push( "--host" );
+ a.push( "127.0.0.1:" + this.port );
+ }
+
+ return runMongoProgram.apply( null , a );
+}
+
+
+ReplTest = function( name, ports ){
+ this.name = name;
+ this.ports = ports || allocatePorts( 2 );
+}
+
+ReplTest.prototype.getPort = function( master ){
+ if ( master )
+ return this.ports[ 0 ];
+ return this.ports[ 1 ]
+}
+
+ReplTest.prototype.getPath = function( master ){
+ var p = "/data/db/" + this.name + "-";
+ if ( master )
+ p += "master";
+ else
+ p += "slave"
+ return p;
+}
+
+ReplTest.prototype.getOptions = function( master , extra , putBinaryFirst, norepl ){
+
+ if ( ! extra )
+ extra = {};
+
+ if ( ! extra.oplogSize )
+ extra.oplogSize = "40";
+
+ var a = []
+ if ( putBinaryFirst )
+ a.push( "mongod" )
+ a.push( "--nohttpinterface", "--noprealloc", "--bind_ip" , "127.0.0.1" , "--smallfiles" );
+
+ a.push( "--port" );
+ a.push( this.getPort( master ) );
+
+ a.push( "--dbpath" );
+ a.push( this.getPath( master ) );
+
+ if( jsTestOptions().noJournal ) a.push( "--nojournal" )
+ if( jsTestOptions().noJournalPrealloc ) a.push( "--nopreallocj" )
+ if( jsTestOptions().keyFile ) {
+ a.push( "--keyFile" )
+ a.push( jsTestOptions().keyFile )
+ }
+
+ if ( !norepl ) {
+ if ( master ){
+ a.push( "--master" );
+ }
+ else {
+ a.push( "--slave" );
+ a.push( "--source" );
+ a.push( "127.0.0.1:" + this.ports[0] );
+ }
+ }
+
+ for ( var k in extra ){
+ var v = extra[k];
+ if( k in MongoRunner.logicalOptions ) continue
+ a.push( "--" + k );
+ if ( v != null )
+ a.push( v );
+ }
+
+ return a;
+}
+
+ReplTest.prototype.start = function( master , options , restart, norepl ){
+ var lockFile = this.getPath( master ) + "/mongod.lock";
+ removeFile( lockFile );
+ var o = this.getOptions( master , options , restart, norepl );
+
+
+ if ( restart )
+ return startMongoProgram.apply( null , o );
+ else
+ return startMongod.apply( null , o );
+}
+
+ReplTest.prototype.stop = function( master , signal ){
+ if ( arguments.length == 0 ){
+ this.stop( true );
+ this.stop( false );
+ return;
+ }
+
+ print('*** ' + this.name + " completed successfully ***");
+ return stopMongod( this.getPort( master ) , signal || 15 );
+}
+
+allocatePorts = function( n , startPort ) {
+ var ret = [];
+ var start = startPort || 31000;
+ for( var i = start; i < start + n; ++i )
+ ret.push( i );
+ return ret;
+}
+
+
+SyncCCTest = function( testName , extraMongodOptions ){
+ this._testName = testName;
+ this._connections = [];
+
+ for ( var i=0; i<3; i++ ){
+ this._connections.push( startMongodTest( 30000 + i , testName + i , false, extraMongodOptions ) );
+ }
+
+ this.url = this._connections.map( function(z){ return z.name; } ).join( "," );
+ this.conn = new Mongo( this.url );
+}
+
+SyncCCTest.prototype.stop = function(){
+ for ( var i=0; i<this._connections.length; i++){
+ stopMongod( 30000 + i );
+ }
+
+ print('*** ' + this._testName + " completed successfully ***");
+}
+
+SyncCCTest.prototype.checkHashes = function( dbname , msg ){
+ var hashes = this._connections.map(
+ function(z){
+ return z.getDB( dbname ).runCommand( "dbhash" );
+ }
+ );
+
+ for ( var i=1; i<hashes.length; i++ ){
+ assert.eq( hashes[0].md5 , hashes[i].md5 , "checkHash on " + dbname + " " + msg + "\n" + tojson( hashes ) )
+ }
+}
+
+SyncCCTest.prototype.tempKill = function( num ){
+ num = num || 0;
+ stopMongod( 30000 + num );
+}
+
+SyncCCTest.prototype.tempStart = function( num ){
+ num = num || 0;
+ this._connections[num] = startMongodTest( 30000 + num , this._testName + num , true );
+}
+
+
+function startParallelShell( jsCode, port ){
+ var x;
+
+ var args = ["mongo"];
+ if (port) {
+ args.push("--port", port);
+ }
+
+ if (TestData) {
+ jsCode = "TestData = " + tojson(TestData) + ";jsTest.authenticate(db.getMongo());" + jsCode;
+ }
+
+ args.push("--eval", jsCode);
+
+ if (typeof db == "object") {
+ args.push(db.getMongo().host);
+ }
+
+ x = startMongoProgramNoConnect.apply(null, args);
+ return function(){
+ waitProgram( x );
+ };
+}
+
+var testingReplication = false;
+
+function skipIfTestingReplication(){
+ if (testingReplication) {
+ print("skipIfTestingReplication skipping");
+ quit(0);
+ }
+}
+
+ReplSetTest = function( opts ){
+ this.name = opts.name || "testReplSet";
+ this.host = opts.host || getHostName();
+ this.useHostName = opts.useHostName
+ this.numNodes = opts.nodes || 0;
+ this.oplogSize = opts.oplogSize || 40;
+ this.useSeedList = opts.useSeedList || false;
+ this.bridged = opts.bridged || false;
+ this.ports = [];
+ this.keyFile = opts.keyFile
+ this.shardSvr = opts.shardSvr || false;
+
+ this.startPort = opts.startPort || 31000;
+
+ this.nodeOptions = {}
+ if( isObject( this.numNodes ) ){
+ var len = 0
+ for( var i in this.numNodes ){
+ var options = this.nodeOptions[ "n" + len ] = this.numNodes[i]
+ if( i.startsWith( "a" ) ) options.arbiter = true
+ len++
+ }
+ this.numNodes = len
+ }
+ else if( Array.isArray( this.numNodes ) ){
+ for( var i = 0; i < this.numNodes.length; i++ )
+ this.nodeOptions[ "n" + i ] = this.numNodes[i]
+ this.numNodes = this.numNodes.length
+ }
+
+ if(this.bridged) {
+ this.bridgePorts = [];
+
+ var allPorts = allocatePorts( this.numNodes * 2 , this.startPort );
+ for(var i=0; i < this.numNodes; i++) {
+ this.ports[i] = allPorts[i*2];
+ this.bridgePorts[i] = allPorts[i*2 + 1];
+ }
+
+ this.initBridges();
+ }
+ else {
+ this.ports = allocatePorts( this.numNodes , this.startPort );
+ }
+
+ this.nodes = []
+ this.initLiveNodes()
+
+ Object.extend( this, ReplSetTest.Health )
+ Object.extend( this, ReplSetTest.State )
+
+}
+
+ReplSetTest.prototype.initBridges = function() {
+ for(var i=0; i<this.ports.length; i++) {
+ startMongoProgram( "mongobridge", "--port", this.bridgePorts[i], "--dest", this.host + ":" + this.ports[i] );
+ }
+}
+
+// List of nodes as host:port strings.
+ReplSetTest.prototype.nodeList = function() {
+ var list = [];
+ for(var i=0; i<this.ports.length; i++) {
+ list.push( this.host + ":" + this.ports[i]);
+ }
+
+ return list;
+}
+
+// Here we store a reference to all reachable nodes.
+ReplSetTest.prototype.initLiveNodes = function() {
+ this.liveNodes = { master: null, slaves: [] }
+}
+
+ReplSetTest.prototype.getNodeId = function(node) {
+
+ if( node.toFixed ) return parseInt( node )
+
+ for( var i = 0; i < this.nodes.length; i++ ){
+ if( this.nodes[i] == node ) return i
+ }
+
+ if( node instanceof ObjectId ){
+ for( var i = 0; i < this.nodes.length; i++ ){
+ if( this.nodes[i].runId == node ) return i
+ }
+ }
+
+ if( node.nodeId ) return parseInt( node.nodeId )
+
+ return undefined
+
+}
+
+ReplSetTest.prototype.getPort = function( n ){
+
+ n = this.getNodeId( n )
+
+ print( "ReplSetTest n: " + n + " ports: " + tojson( this.ports ) + "\t" + this.ports[n] + " " + typeof(n) );
+ return this.ports[ n ];
+}
+
+ReplSetTest.prototype.getPath = function( n ){
+
+ if( n.host )
+ n = this.getNodeId( n )
+
+ var p = "/data/db/" + this.name + "-"+n;
+ if ( ! this._alldbpaths )
+ this._alldbpaths = [ p ];
+ else
+ this._alldbpaths.push( p );
+ return p;
+}
+
+ReplSetTest.prototype.getReplSetConfig = function() {
+ var cfg = {};
+
+ cfg['_id'] = this.name;
+ cfg.members = [];
+
+ for(i=0; i<this.ports.length; i++) {
+ member = {};
+ member['_id'] = i;
+
+ if(this.bridged)
+ var port = this.bridgePorts[i];
+ else
+ var port = this.ports[i];
+
+ member['host'] = this.host + ":" + port;
+ if( this.nodeOptions[ "n" + i ] && this.nodeOptions[ "n" + i ].arbiter )
+ member['arbiterOnly'] = true
+
+ cfg.members.push(member);
+ }
+
+ return cfg;
+}
+
+ReplSetTest.prototype.getURL = function(){
+ var hosts = [];
+
+ for(i=0; i<this.ports.length; i++) {
+
+ // Don't include this node in the replica set list
+ if(this.bridged && this.ports[i] == this.ports[n]) {
+ continue;
+ }
+
+ var port;
+ // Connect on the right port
+ if(this.bridged) {
+ port = this.bridgePorts[i];
+ }
+ else {
+ port = this.ports[i];
+ }
+
+ var str = this.host + ":" + port;
+ hosts.push(str);
+ }
+
+ return this.name + "/" + hosts.join(",");
+}
+
+ReplSetTest.prototype.getOptions = function( n , extra , putBinaryFirst ){
+
+ if ( ! extra )
+ extra = {};
+
+ if ( ! extra.oplogSize )
+ extra.oplogSize = this.oplogSize;
+
+ var a = []
+
+
+ if ( putBinaryFirst )
+ a.push( "mongod" );
+
+ if ( extra.noReplSet ) {
+ delete extra.noReplSet;
+ }
+ else {
+ a.push( "--replSet" );
+
+ if( this.useSeedList ) {
+ a.push( this.getURL() );
+ }
+ else {
+ a.push( this.name );
+ }
+ }
+
+ a.push( "--noprealloc", "--smallfiles" );
+
+ a.push( "--rest" );
+
+ a.push( "--port" );
+ a.push( this.getPort( n ) );
+
+ a.push( "--dbpath" );
+ a.push( this.getPath( ( n.host ? this.getNodeId( n ) : n ) ) );
+
+ if( this.keyFile ){
+ a.push( "--keyFile" )
+ a.push( keyFile )
+ }
+
+ if( jsTestOptions().noJournal ) a.push( "--nojournal" )
+ if( jsTestOptions().noJournalPrealloc ) a.push( "--nopreallocj" )
+ if( jsTestOptions().keyFile && !this.keyFile) {
+ a.push( "--keyFile" )
+ a.push( jsTestOptions().keyFile )
+ }
+
+ for ( var k in extra ){
+ var v = extra[k];
+ if( k in MongoRunner.logicalOptions ) continue
+ a.push( "--" + k );
+ if ( v != null ){
+ if( v.replace ){
+ v = v.replace(/\$node/g, "" + ( n.host ? this.getNodeId( n ) : n ) )
+ v = v.replace(/\$set/g, this.name )
+ v = v.replace(/\$path/g, this.getPath( n ) )
+ }
+ a.push( v );
+ }
+ }
+
+ return a;
+}
+
+ReplSetTest.prototype.startSet = function( options ) {
+
+ var nodes = [];
+ print( "ReplSetTest Starting Set" );
+
+ for( n = 0 ; n < this.ports.length; n++ ) {
+ node = this.start(n, options)
+ nodes.push(node);
+ }
+
+ this.nodes = nodes;
+ return this.nodes;
+}
+
+ReplSetTest.prototype.callIsMaster = function() {
+
+ var master = null;
+ this.initLiveNodes();
+
+ for(var i=0; i<this.nodes.length; i++) {
+
+ try {
+ var n = this.nodes[i].getDB('admin').runCommand({ismaster:1});
+
+ if(n['ismaster'] == true) {
+ master = this.nodes[i]
+ this.liveNodes.master = master
+ }
+ else {
+ this.nodes[i].setSlaveOk();
+ this.liveNodes.slaves.push(this.nodes[i]);
+ }
+
+ }
+ catch(err) {
+ print("ReplSetTest Could not call ismaster on node " + i);
+ }
+ }
+
+ return master || false;
+}
+
+ReplSetTest.awaitRSClientHosts = function( conn, host, hostOk, rs ) {
+
+ if( host.length ){
+ for( var i = 0; i < host.length; i++ ) this.awaitOk( conn, host[i] )
+ return
+ }
+
+ if( hostOk == undefined ) hostOk = { ok : true }
+ if( host.host ) host = host.host
+ if( rs && rs.getMaster ) rs = rs.name
+
+ print( "Awaiting " + host + " to be " + tojson( hostOk ) + " for " + conn + " (rs: " + rs + ")" )
+
+ var tests = 0
+ assert.soon( function() {
+ var rsClientHosts = conn.getDB( "admin" ).runCommand( "connPoolStats" )[ "replicaSets" ]
+ if( tests++ % 10 == 0 )
+ printjson( rsClientHosts )
+
+ for ( rsName in rsClientHosts ){
+ if( rs && rs != rsName ) continue
+ for ( var i = 0; i < rsClientHosts[rsName].hosts.length; i++ ){
+ var clientHost = rsClientHosts[rsName].hosts[ i ];
+ if( clientHost.addr != host ) continue
+
+ // Check that *all* host properties are set correctly
+ var propOk = true
+ for( var prop in hostOk ){
+ if( clientHost[prop] != hostOk[prop] ){
+ propOk = false
+ break
+ }
+ }
+
+ if( propOk ) return true;
+
+ }
+ }
+ return false;
+ }, "timed out waiting for replica set client to recognize hosts",
+ 3 * 20 * 1000 /* ReplicaSetMonitorWatcher updates every 20s */ )
+
+}
+
+ReplSetTest.prototype.awaitSecondaryNodes = function( timeout ) {
+ var master = this.getMaster();
+ var slaves = this.liveNodes.slaves;
+ var len = slaves.length;
+
+ jsTest.attempt({context: this, timeout: 60000, desc: "Awaiting secondaries"}, function() {
+ var ready = true;
+ for(var i=0; i<len; i++) {
+ ready = ready && slaves[i].getDB("admin").runCommand({ismaster: 1})['secondary'];
+ }
+
+ return ready;
+ });
+}
+
+ReplSetTest.prototype.getMaster = function( timeout ) {
+ var tries = 0;
+ var sleepTime = 500;
+ var t = timeout || 000;
+ var master = null;
+
+ master = jsTest.attempt({context: this, timeout: 60000, desc: "Finding master"}, this.callIsMaster);
+ return master;
+}
+
+ReplSetTest.prototype.getPrimary = ReplSetTest.prototype.getMaster
+
+ReplSetTest.prototype.getSecondaries = function( timeout ){
+ var master = this.getMaster( timeout )
+ var secs = []
+ for( var i = 0; i < this.nodes.length; i++ ){
+ if( this.nodes[i] != master ){
+ secs.push( this.nodes[i] )
+ }
+ }
+ return secs
+}
+
+ReplSetTest.prototype.getSecondary = function( timeout ){
+ return this.getSecondaries( timeout )[0];
+}
+
+ReplSetTest.prototype.status = function( timeout ){
+ var master = this.callIsMaster()
+ if( ! master ) master = this.liveNodes.slaves[0]
+ return master.getDB("admin").runCommand({replSetGetStatus: 1})
+}
+
+// Add a node to the test set
+ReplSetTest.prototype.add = function( config ) {
+ if(this.ports.length == 0) {
+ var nextPort = allocatePorts( 1, this.startPort )[0];
+ }
+ else {
+ var nextPort = this.ports[this.ports.length-1] + 1;
+ }
+ print("ReplSetTest Next port: " + nextPort);
+ this.ports.push(nextPort);
+ printjson(this.ports);
+
+ var nextId = this.nodes.length;
+ printjson(this.nodes);
+ print("ReplSetTest nextId:" + nextId);
+ var newNode = this.start( nextId );
+
+ return newNode;
+}
+
+ReplSetTest.prototype.remove = function( nodeId ) {
+ nodeId = this.getNodeId( nodeId )
+ this.nodes.splice( nodeId, 1 );
+ this.ports.splice( nodeId, 1 );
+}
+
+ReplSetTest.prototype.initiate = function( cfg , initCmd , timeout ) {
+ var master = this.nodes[0].getDB("admin");
+ var config = cfg || this.getReplSetConfig();
+ var cmd = {};
+ var cmdKey = initCmd || 'replSetInitiate';
+ var timeout = timeout || 30000;
+ cmd[cmdKey] = config;
+ printjson(cmd);
+
+ jsTest.attempt({context:this, timeout: timeout, desc: "Initiate replica set"}, function() {
+ var result = master.runCommand(cmd);
+ printjson(result);
+ return result['ok'] == 1;
+ });
+
+ // Setup authentication if running test with authentication
+ if (jsTestOptions().keyFile && !this.keyFile) {
+ if (!this.shardSvr) {
+ master = this.getMaster();
+ jsTest.addAuth(master);
+ }
+ jsTest.authenticateNodes(this.nodes);
+ }
+}
+
+ReplSetTest.prototype.reInitiate = function() {
+ var master = this.nodes[0];
+ var c = master.getDB("local")['system.replset'].findOne();
+ var config = this.getReplSetConfig();
+ config.version = c.version + 1;
+ this.initiate( config , 'replSetReconfig' );
+}
+
+ReplSetTest.prototype.getLastOpTimeWritten = function() {
+ this.getMaster();
+ jsTest.attempt({context : this, desc : "awaiting oplog query"},
+ function() {
+ try {
+ this.latest = this.liveNodes.master.getDB("local")['oplog.rs'].find({}).sort({'$natural': -1}).limit(1).next()['ts'];
+ }
+ catch(e) {
+ print("ReplSetTest caught exception " + e);
+ return false;
+ }
+ return true;
+ });
+};
+
+ReplSetTest.prototype.awaitReplication = function(timeout) {
+ timeout = timeout || 30000;
+
+ this.getLastOpTimeWritten();
+
+ print("ReplSetTest " + this.latest);
+
+ jsTest.attempt({context: this, timeout: timeout, desc: "awaiting replication"},
+ function() {
+ try {
+ var synced = true;
+ for(var i=0; i<this.liveNodes.slaves.length; i++) {
+ var slave = this.liveNodes.slaves[i];
+
+ // Continue if we're connected to an arbiter
+ if(res = slave.getDB("admin").runCommand({replSetGetStatus: 1})) {
+ if(res.myState == 7) {
+ continue;
+ }
+ }
+
+ slave.getDB("admin").getMongo().setSlaveOk();
+ var log = slave.getDB("local")['oplog.rs'];
+ if(log.find({}).sort({'$natural': -1}).limit(1).hasNext()) {
+ var entry = log.find({}).sort({'$natural': -1}).limit(1).next();
+ printjson( entry );
+ var ts = entry['ts'];
+ print("ReplSetTest await TS for " + slave + " is " + ts.t+":"+ts.i + " and latest is " + this.latest.t+":"+this.latest.i);
+
+ if (this.latest.t < ts.t || (this.latest.t == ts.t && this.latest.i < ts.i)) {
+ this.latest = this.liveNodes.master.getDB("local")['oplog.rs'].find({}).sort({'$natural': -1}).limit(1).next()['ts'];
+ }
+
+ print("ReplSetTest await oplog size for " + slave + " is " + log.count());
+ synced = (synced && friendlyEqual(this.latest,ts))
+ }
+ else {
+ synced = false;
+ }
+ }
+
+ if(synced) {
+ print("ReplSetTest await synced=" + synced);
+ }
+ return synced;
+ }
+ catch (e) {
+ print("ReplSetTest.awaitReplication: caught exception "+e);
+
+ // we might have a new master now
+ this.getLastOpTimeWritten();
+
+ return false;
+ }
+ });
+}
+
+ReplSetTest.prototype.getHashes = function( db ){
+ this.getMaster();
+ var res = {};
+ res.master = this.liveNodes.master.getDB( db ).runCommand( "dbhash" )
+ res.slaves = this.liveNodes.slaves.map( function(z){ return z.getDB( db ).runCommand( "dbhash" ); } )
+ return res;
+}
+
+/**
+ * Starts up a server. Options are saved by default for subsequent starts.
+ *
+ *
+ * Options { remember : true } re-applies the saved options from a prior start.
+ * Options { noRemember : true } ignores the current properties.
+ * Options { appendOptions : true } appends the current options to those remembered.
+ * Options { startClean : true } clears the data directory before starting.
+ *
+ * @param @param {int|conn|[int|conn]} n array or single server number (0, 1, 2, ...) or conn
+ * @param {object} [options]
+ * @param {boolean} [restart] If false, the data directory will be cleared
+ * before the server starts. Defaults to false.
+ *
+ */
+ReplSetTest.prototype.start = function( n , options , restart , wait ){
+
+ if( n.length ){
+
+ var nodes = n
+ var started = []
+
+ for( var i = 0; i < nodes.length; i++ ){
+ if( this.start( nodes[i], Object.merge({}, options), restart, wait ) ){
+ started.push( nodes[i] )
+ }
+ }
+
+ return started
+
+ }
+
+ print( "ReplSetTest n is : " + n )
+
+ defaults = { useHostName : this.useHostName,
+ oplogSize : this.oplogSize,
+ keyFile : this.keyFile,
+ port : this.getPort( n ),
+ noprealloc : "",
+ smallfiles : "",
+ rest : "",
+ replSet : this.useSeedList ? this.getURL() : this.name,
+ dbpath : "$set-$node" }
+
+ // TODO : should we do something special if we don't currently know about this node?
+ n = this.getNodeId( n )
+
+ options = Object.merge( defaults, options )
+ options = Object.merge( options, this.nodeOptions[ "n" + n ] )
+
+ options.restart = restart
+
+ var pathOpts = { node : n, set : this.name }
+ options.pathOpts = Object.merge( options.pathOpts || {}, pathOpts )
+
+ if( tojson(options) != tojson({}) )
+ printjson(options)
+
+ // make sure to call getPath, otherwise folders wont be cleaned
+ this.getPath(n);
+
+ print("ReplSetTest " + (restart ? "(Re)" : "") + "Starting....");
+
+ var rval = this.nodes[n] = MongoRunner.runMongod( options )
+
+ if( ! rval ) return rval
+
+ // Add replica set specific attributes
+ this.nodes[n].nodeId = n
+
+ printjson( this.nodes )
+
+ wait = wait || false
+ if( ! wait.toFixed ){
+ if( wait ) wait = 0
+ else wait = -1
+ }
+
+ if( wait < 0 ) return rval
+
+ // Wait for startup
+ this.waitForHealth( rval, this.UP, wait )
+
+ return rval
+
+}
+
+
+/**
+ * Restarts a db without clearing the data directory by default. If the server is not
+ * stopped first, this function will not work.
+ *
+ * Option { startClean : true } forces clearing the data directory.
+ *
+ * @param {int|conn|[int|conn]} n array or single server number (0, 1, 2, ...) or conn
+ */
+ReplSetTest.prototype.restart = function( n , options, signal, wait ){
+ // Can specify wait as third parameter, if using default signal
+ if( signal == true || signal == false ){
+ wait = signal
+ signal = undefined
+ }
+
+ this.stop( n, signal, wait && wait.toFixed ? wait : true )
+ started = this.start( n , options , true, wait );
+
+ if (jsTestOptions().keyFile && !this.keyFile) {
+ if (started.length) {
+ // if n was an array of conns, start will return an array of connections
+ for (var i = 0; i < started.length; i++) {
+ jsTest.authenticate(started[i]);
+ }
+ } else {
+ jsTest.authenticate(started);
+ }
+ }
+ return started;
+}
+
+ReplSetTest.prototype.stopMaster = function( signal , wait ) {
+ var master = this.getMaster();
+ var master_id = this.getNodeId( master );
+ return this.stop( master_id , signal , wait );
+}
+
+// Stops a particular node or nodes, specified by conn or id
+ReplSetTest.prototype.stop = function( n , signal, wait /* wait for stop */ ){
+
+ // Flatten array of nodes to stop
+ if( n.length ){
+ nodes = n
+
+ var stopped = []
+ for( var i = 0; i < nodes.length; i++ ){
+ if( this.stop( nodes[i], signal, wait ) )
+ stopped.push( nodes[i] )
+ }
+
+ return stopped
+ }
+
+ // Can specify wait as second parameter, if using default signal
+ if( signal == true || signal == false ){
+ wait = signal
+ signal = undefined
+ }
+
+ wait = wait || false
+ if( ! wait.toFixed ){
+ if( wait ) wait = 0
+ else wait = -1
+ }
+
+ var port = this.getPort( n );
+ print('ReplSetTest stop *** Shutting down mongod in port ' + port + ' ***');
+ var ret = MongoRunner.stopMongod( port , signal );
+
+ if( ! ret || wait < 0 ) return ret
+
+ // Wait for shutdown
+ this.waitForHealth( n, this.DOWN, wait )
+
+ return true
+}
+
+
+ReplSetTest.prototype.stopSet = function( signal , forRestart ) {
+ for(i=0; i < this.ports.length; i++) {
+ this.stop( i, signal );
+ }
+ if ( ! forRestart && this._alldbpaths ){
+ print("ReplSetTest stopSet deleting all dbpaths");
+ for( i=0; i<this._alldbpaths.length; i++ ){
+ resetDbpath( this._alldbpaths[i] );
+ }
+ }
+
+ print('ReplSetTest stopSet *** Shut down repl set - test worked ****' )
+};
+
+
+/**
+ * Waits until there is a master node
+ */
+ReplSetTest.prototype.waitForMaster = function( timeout ){
+
+ var master = undefined
+
+ jsTest.attempt({context: this, timeout: timeout, desc: "waiting for master"}, function() {
+ return ( master = this.getMaster() )
+ });
+
+ return master
+}
+
+
+/**
+ * Wait for a health indicator to go to a particular state or states.
+ *
+ * @param node is a single node or list of nodes, by id or conn
+ * @param state is a single state or list of states
+ *
+ */
+ReplSetTest.prototype.waitForHealth = function( node, state, timeout ){
+ this.waitForIndicator( node, state, "health", timeout )
+}
+
+/**
+ * Wait for a state indicator to go to a particular state or states.
+ *
+ * @param node is a single node or list of nodes, by id or conn
+ * @param state is a single state or list of states
+ *
+ */
+ReplSetTest.prototype.waitForState = function( node, state, timeout ){
+ this.waitForIndicator( node, state, "state", timeout )
+}
+
+/**
+ * Wait for a rs indicator to go to a particular state or states.
+ *
+ * @param node is a single node or list of nodes, by id or conn
+ * @param states is a single state or list of states
+ * @param ind is the indicator specified
+ *
+ */
+ReplSetTest.prototype.waitForIndicator = function( node, states, ind, timeout ){
+
+ if( node.length ){
+
+ var nodes = node
+ for( var i = 0; i < nodes.length; i++ ){
+ if( states.length )
+ this.waitForIndicator( nodes[i], states[i], ind, timeout )
+ else
+ this.waitForIndicator( nodes[i], states, ind, timeout )
+ }
+
+ return;
+ }
+
+ timeout = timeout || 30000;
+
+ if( ! node.getDB ){
+ node = this.nodes[node]
+ }
+
+ if( ! states.length ) states = [ states ]
+
+ print( "ReplSetTest waitForIndicator " + ind + " on " + node )
+ printjson( states )
+ print( "ReplSetTest waitForIndicator from node " + node )
+
+ var lastTime = null
+ var currTime = new Date().getTime()
+ var status = undefined
+
+ jsTest.attempt({context: this, timeout: timeout, desc: "waiting for state indicator " + ind + " for " + timeout + "ms" }, function() {
+
+ status = this.status()
+
+ var printStatus = false
+ if( lastTime == null || ( currTime = new Date().getTime() ) - (1000 * 5) > lastTime ){
+ if( lastTime == null ) print( "ReplSetTest waitForIndicator Initial status ( timeout : " + timeout + " ) :" )
+ printjson( status )
+ lastTime = new Date().getTime()
+ printStatus = true
+ }
+
+ if (typeof status.members == 'undefined') {
+ return false;
+ }
+
+ for( var i = 0; i < status.members.length; i++ ){
+ if( printStatus ) print( "Status for : " + status.members[i].name + ", checking " + node.host + "/" + node.name )
+ if( status.members[i].name == node.host || status.members[i].name == node.name ){
+ for( var j = 0; j < states.length; j++ ){
+ if( printStatus ) print( "Status " + " : " + status.members[i][ind] + " target state : " + states[j] )
+ if( status.members[i][ind] == states[j] ) return true;
+ }
+ }
+ }
+
+ return false
+
+ });
+
+ print( "ReplSetTest waitForIndicator final status:" )
+ printjson( status )
+
+}
+
+ReplSetTest.Health = {}
+ReplSetTest.Health.UP = 1
+ReplSetTest.Health.DOWN = 0
+
+ReplSetTest.State = {}
+ReplSetTest.State.PRIMARY = 1
+ReplSetTest.State.SECONDARY = 2
+ReplSetTest.State.RECOVERING = 3
+
+/**
+ * Overflows a replica set secondary or secondaries, specified by id or conn.
+ */
+ReplSetTest.prototype.overflow = function( secondaries ){
+
+ // Create a new collection to overflow, allow secondaries to replicate
+ var master = this.getMaster()
+ var overflowColl = master.getCollection( "_overflow.coll" )
+ overflowColl.insert({ replicated : "value" })
+ this.awaitReplication()
+
+ this.stop( secondaries, undefined, 5 * 60 * 1000 )
+
+ var count = master.getDB("local").oplog.rs.count();
+ var prevCount = -1;
+
+ // Keep inserting till we hit our capped coll limits
+ while (count != prevCount) {
+
+ print("ReplSetTest overflow inserting 10000");
+
+ for (var i = 0; i < 10000; i++) {
+ overflowColl.insert({ overflow : "value" });
+ }
+ prevCount = count;
+ this.awaitReplication();
+
+ count = master.getDB("local").oplog.rs.count();
+
+ print( "ReplSetTest overflow count : " + count + " prev : " + prevCount );
+
+ }
+
+ // Restart all our secondaries and wait for recovery state
+ this.start( secondaries, { remember : true }, true, true )
+ this.waitForState( secondaries, this.RECOVERING, 5 * 60 * 1000 )
+
+}
+
+
+
+
+/**
+ * Bridging allows you to test network partitioning. For example, you can set
+ * up a replica set, run bridge(), then kill the connection between any two
+ * nodes x and y with partition(x, y).
+ *
+ * Once you have called bridging, you cannot reconfigure the replica set.
+ */
+ReplSetTest.prototype.bridge = function() {
+ if (this.bridges) {
+ print("ReplSetTest bridge bridges have already been created!");
+ return;
+ }
+
+ var n = this.nodes.length;
+
+ // create bridges
+ this.bridges = [];
+ for (var i=0; i<n; i++) {
+ var nodeBridges = [];
+ for (var j=0; j<n; j++) {
+ if (i == j) {
+ continue;
+ }
+ nodeBridges[j] = new ReplSetBridge(this, i, j);
+ }
+ this.bridges.push(nodeBridges);
+ }
+ print("ReplSetTest bridge bridges: " + this.bridges);
+
+ // restart everyone independently
+ this.stopSet(null, true);
+ for (var i=0; i<n; i++) {
+ this.restart(i, {noReplSet : true});
+ }
+
+ // create new configs
+ for (var i=0; i<n; i++) {
+ config = this.nodes[i].getDB("local").system.replset.findOne();
+
+ if (!config) {
+ print("ReplSetTest bridge couldn't find config for "+this.nodes[i]);
+ printjson(this.nodes[i].getDB("local").system.namespaces.find().toArray());
+ assert(false);
+ }
+
+ var updateMod = {"$set" : {}};
+ for (var j = 0; j<config.members.length; j++) {
+ if (config.members[j].host == this.host+":"+this.ports[i]) {
+ continue;
+ }
+
+ updateMod['$set']["members."+j+".host"] = this.bridges[i][j].host;
+ }
+ print("ReplSetTest bridge for node " + i + ":");
+ printjson(updateMod);
+ this.nodes[i].getDB("local").system.replset.update({},updateMod);
+ }
+
+ this.stopSet(null, true);
+
+ // start set
+ for (var i=0; i<n; i++) {
+ this.restart(i);
+ }
+
+ return this.getMaster();
+};
+
+/**
+ * This kills the bridge between two nodes. As parameters, specify the from and
+ * to node numbers.
+ *
+ * For example, with a three-member replica set, we'd have nodes 0, 1, and 2,
+ * with the following bridges: 0->1, 0->2, 1->0, 1->2, 2->0, 2->1. We can kill
+ * the connection between nodes 0 and 2 by calling replTest.partition(0,2) or
+ * replTest.partition(2,0) (either way is identical). Then the replica set would
+ * have the following bridges: 0->1, 1->0, 1->2, 2->1.
+ */
+ReplSetTest.prototype.partition = function(from, to) {
+ this.bridges[from][to].stop();
+ this.bridges[to][from].stop();
+};
+
+/**
+ * This reverses a partition created by partition() above.
+ */
+ReplSetTest.prototype.unPartition = function(from, to) {
+ this.bridges[from][to].start();
+ this.bridges[to][from].start();
+};
+
+ReplSetBridge = function(rst, from, to) {
+ var n = rst.nodes.length;
+
+ var startPort = rst.startPort+n;
+ this.port = (startPort+(from*n+to));
+ this.host = rst.host+":"+this.port;
+
+ this.dest = rst.host+":"+rst.ports[to];
+ this.start();
+};
+
+ReplSetBridge.prototype.start = function() {
+ var args = ["mongobridge", "--port", this.port, "--dest", this.dest];
+ print("ReplSetBridge starting: "+tojson(args));
+ this.bridge = startMongoProgram.apply( null , args );
+ print("ReplSetBridge started " + this.bridge);
+};
+
+ReplSetBridge.prototype.stop = function() {
+ print("ReplSetBridge stopping: " + this.port);
+ stopMongod(this.port);
+};
+
+ReplSetBridge.prototype.toString = function() {
+ return this.host+" -> "+this.dest;
+};
diff --git a/src/mongo/shell/shell_utils.cpp b/src/mongo/shell/shell_utils.cpp
new file mode 100644
index 00000000000..f3283ab0ca1
--- /dev/null
+++ b/src/mongo/shell/shell_utils.cpp
@@ -0,0 +1,985 @@
+// utils.cpp
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+
+#include <boost/thread/xtime.hpp>
+
+#include <cstring>
+#include <cstdio>
+#include <cstdlib>
+#include <assert.h>
+#include <iostream>
+#include <map>
+#include <sstream>
+#include <vector>
+#include <fcntl.h>
+
+#ifdef _WIN32
+# include <io.h>
+# define SIGKILL 9
+#else
+# include <sys/socket.h>
+# include <netinet/in.h>
+# include <signal.h>
+# include <sys/stat.h>
+# include <sys/wait.h>
+#endif
+
+#include "utils.h"
+#include "../client/dbclient.h"
+#include "../util/md5.hpp"
+#include "../util/processinfo.h"
+#include "../util/text.h"
+#include "../util/heapcheck.h"
+#include "../util/time_support.h"
+#include "../util/file.h"
+
+namespace mongo {
+
+ DBClientWithCommands *latestConn = 0;
+ extern bool dbexitCalled;
+
+#ifdef _WIN32
+ inline int close(int fd) { return _close(fd); }
+ inline int read(int fd, void* buf, size_t size) { return _read(fd, buf, size); }
+ inline int pipe(int fds[2]) { return _pipe(fds, 4096, _O_TEXT | _O_NOINHERIT); }
+#endif
+
+ namespace JSFiles {
+ extern const JSFile servers;
+ }
+
+ // these functions have not been audited for thread safety - currently they are called with an exclusive js mutex
+ namespace shellUtils {
+
+ Scope* theScope = 0;
+
+ std::string _dbConnect;
+ std::string _dbAuth;
+
+ const char *argv0 = 0;
+ void RecordMyLocation( const char *_argv0 ) { argv0 = _argv0; }
+
+ // helpers
+
+ BSONObj makeUndefined() {
+ BSONObjBuilder b;
+ b.appendUndefined( "" );
+ return b.obj();
+ }
+ const BSONObj undefined_ = makeUndefined();
+
+ BSONObj encapsulate( const BSONObj &obj ) {
+ return BSON( "" << obj );
+ }
+
+ // real methods
+
+ void goingAwaySoon();
+ BSONObj Quit(const BSONObj& args, void* data) {
+ // If not arguments are given first element will be EOO, which
+ // converts to the integer value 0.
+ goingAwaySoon();
+ int exit_code = int( args.firstElement().number() );
+ ::exit(exit_code);
+ return undefined_;
+ }
+
+ BSONObj JSGetMemInfo( const BSONObj& args, void* data ) {
+ ProcessInfo pi;
+ uassert( 10258 , "processinfo not supported" , pi.supported() );
+
+ BSONObjBuilder e;
+ e.append( "virtual" , pi.getVirtualMemorySize() );
+ e.append( "resident" , pi.getResidentSize() );
+
+ BSONObjBuilder b;
+ b.append( "ret" , e.obj() );
+
+ return b.obj();
+ }
+
+
+#ifndef MONGO_SAFE_SHELL
+
+ BSONObj listFiles(const BSONObj& _args, void* data) {
+ static BSONObj cd = BSON( "0" << "." );
+ BSONObj args = _args.isEmpty() ? cd : _args;
+
+ uassert( 10257 , "need to specify 1 argument to listFiles" , args.nFields() == 1 );
+
+ BSONArrayBuilder lst;
+
+ string rootname = args.firstElement().valuestrsafe();
+ path root( rootname );
+ stringstream ss;
+ ss << "listFiles: no such directory: " << rootname;
+ string msg = ss.str();
+ uassert( 12581, msg.c_str(), boost::filesystem::exists( root ) );
+
+ directory_iterator end;
+ directory_iterator i( root);
+
+ while ( i != end ) {
+ path p = *i;
+ BSONObjBuilder b;
+ b << "name" << p.string();
+ b.appendBool( "isDirectory", is_directory( p ) );
+ if ( ! is_directory( p ) ) {
+ try {
+ b.append( "size" , (double)file_size( p ) );
+ }
+ catch ( ... ) {
+ i++;
+ continue;
+ }
+ }
+
+ lst.append( b.obj() );
+ i++;
+ }
+
+ BSONObjBuilder ret;
+ ret.appendArray( "", lst.done() );
+ return ret.obj();
+ }
+
+ BSONObj ls(const BSONObj& args, void* data) {
+ BSONObj o = listFiles(args, data);
+ if( !o.isEmpty() ) {
+ for( BSONObj::iterator i = o.firstElement().Obj().begin(); i.more(); ) {
+ BSONObj f = i.next().Obj();
+ cout << f["name"].String();
+ if( f["isDirectory"].trueValue() ) cout << '/';
+ cout << '\n';
+ }
+ cout.flush();
+ }
+ return BSONObj();
+ }
+
+ BSONObj cd(const BSONObj& args, void* data) {
+#if defined(_WIN32)
+ std::wstring dir = toWideString( args.firstElement().String().c_str() );
+ if( SetCurrentDirectory(dir.c_str()) )
+ return BSONObj();
+#else
+ string dir = args.firstElement().String();
+ /* if( chdir(dir.c_str) ) == 0 )
+ return BSONObj();
+ */
+ if( 1 ) return BSON(""<<"implementation not done for posix");
+#endif
+ return BSON( "" << "change directory failed" );
+ }
+
+ BSONObj pwd(const BSONObj&, void* data) {
+ boost::filesystem::path p = boost::filesystem::current_path();
+ return BSON( "" << p.string() );
+ }
+
+ BSONObj hostname(const BSONObj&, void* data) {
+ return BSON( "" << getHostName() );
+ }
+
+ static BSONElement oneArg(const BSONObj& args) {
+ uassert( 12597 , "need to specify 1 argument" , args.nFields() == 1 );
+ return args.firstElement();
+ }
+
+ const int CANT_OPEN_FILE = 13300;
+
+ BSONObj cat(const BSONObj& args, void* data) {
+ BSONElement e = oneArg(args);
+ stringstream ss;
+ ifstream f(e.valuestrsafe());
+ uassert(CANT_OPEN_FILE, "couldn't open file", f.is_open() );
+
+ streamsize sz = 0;
+ while( 1 ) {
+ char ch = 0;
+ // slow...maybe change one day
+ f.get(ch);
+ if( ch == 0 ) break;
+ ss << ch;
+ sz += 1;
+ uassert(13301, "cat() : file to big to load as a variable", sz < 1024 * 1024 * 16);
+ }
+ return BSON( "" << ss.str() );
+ }
+
+ BSONObj md5sumFile(const BSONObj& args, void* data) {
+ BSONElement e = oneArg(args);
+ stringstream ss;
+ FILE* f = fopen(e.valuestrsafe(), "rb");
+ uassert(CANT_OPEN_FILE, "couldn't open file", f );
+
+ md5digest d;
+ md5_state_t st;
+ md5_init(&st);
+
+ enum {BUFLEN = 4*1024};
+ char buffer[BUFLEN];
+ int bytes_read;
+ while( (bytes_read = fread(buffer, 1, BUFLEN, f)) ) {
+ md5_append( &st , (const md5_byte_t*)(buffer) , bytes_read );
+ }
+
+ md5_finish(&st, d);
+ return BSON( "" << digestToString( d ) );
+ }
+
+ BSONObj mkdir(const BSONObj& args, void* data) {
+ boost::filesystem::create_directories(args.firstElement().String());
+ return BSON( "" << true );
+ }
+
+ BSONObj removeFile(const BSONObj& args, void* data) {
+ BSONElement e = oneArg(args);
+ bool found = false;
+
+ path root( e.valuestrsafe() );
+ if ( boost::filesystem::exists( root ) ) {
+ found = true;
+ boost::filesystem::remove_all( root );
+ }
+
+ BSONObjBuilder b;
+ b.appendBool( "removed" , found );
+ return b.obj();
+ }
+
+ /**
+ * @param args - [ name, byte index ]
+ * In this initial implementation, all bits in the specified byte are flipped.
+ */
+ BSONObj fuzzFile(const BSONObj& args, void* data) {
+ uassert( 13619, "fuzzFile takes 2 arguments", args.nFields() == 2 );
+ shared_ptr< File > f( new File() );
+ f->open( args.getStringField( "0" ) );
+ uassert( 13620, "couldn't open file to fuzz", !f->bad() && f->is_open() );
+
+ char c;
+ f->read( args.getIntField( "1" ), &c, 1 );
+ c = ~c;
+ f->write( args.getIntField( "1" ), &c, 1 );
+
+ return undefined_;
+ // f close is implicit
+ }
+
+ map< int, pair< pid_t, int > > dbs;
+ map< pid_t, int > shells;
+#ifdef _WIN32
+ map< pid_t, HANDLE > handles;
+#endif
+
+ mongo::mutex mongoProgramOutputMutex("mongoProgramOutputMutex");
+ stringstream mongoProgramOutput_;
+
+ void goingAwaySoon() {
+ mongo::mutex::scoped_lock lk( mongoProgramOutputMutex );
+ mongo::dbexitCalled = true;
+ }
+
+ void writeMongoProgramOutputLine( int port, int pid, const char *line ) {
+ mongo::mutex::scoped_lock lk( mongoProgramOutputMutex );
+ if( mongo::dbexitCalled ) throw "program is terminating";
+ stringstream buf;
+ if ( port > 0 )
+ buf << " m" << port << "| " << line;
+ else
+ buf << "sh" << pid << "| " << line;
+ cout << buf.str() << endl;
+ mongoProgramOutput_ << buf.str() << endl;
+ }
+
+ // only returns last 100000 characters
+ BSONObj RawMongoProgramOutput( const BSONObj &args, void* data ) {
+ mongo::mutex::scoped_lock lk( mongoProgramOutputMutex );
+ string out = mongoProgramOutput_.str();
+ size_t len = out.length();
+ if ( len > 100000 )
+ out = out.substr( len - 100000, 100000 );
+ return BSON( "" << out );
+ }
+
+ BSONObj ClearRawMongoProgramOutput( const BSONObj &args, void* data ) {
+ mongo::mutex::scoped_lock lk( mongoProgramOutputMutex );
+ mongoProgramOutput_.str( "" );
+ return undefined_;
+ }
+
+ class ProgramRunner {
+ vector<string> argv_;
+ int port_;
+ int pipe_;
+ pid_t pid_;
+ public:
+ pid_t pid() const { return pid_; }
+ int port() const { return port_; }
+
+ boost::filesystem::path find(string prog) {
+ boost::filesystem::path p = prog;
+#ifdef _WIN32
+ p = change_extension(p, ".exe");
+#endif
+
+ if( boost::filesystem::exists(p) ) {
+#ifndef _WIN32
+ p = boost::filesystem::initial_path() / p;
+#endif
+ return p;
+ }
+
+ {
+ boost::filesystem::path t = boost::filesystem::current_path() / p;
+ if( boost::filesystem::exists(t) ) return t;
+ }
+ try {
+ if( theScope->type("_path") == String ) {
+ string path = theScope->getString("_path");
+ if( !path.empty() ) {
+ boost::filesystem::path t = boost::filesystem::path(path) / p;
+ if( boost::filesystem::exists(t) ) return t;
+ }
+ }
+ }
+ catch(...) { }
+ {
+ boost::filesystem::path t = boost::filesystem::initial_path() / p;
+ if( boost::filesystem::exists(t) ) return t;
+ }
+ return p; // not found; might find via system path
+ }
+
+ ProgramRunner( const BSONObj &args , bool isMongoProgram=true) {
+ assert( !args.isEmpty() );
+
+ string program( args.firstElement().valuestrsafe() );
+ assert( !program.empty() );
+ boost::filesystem::path programPath = find(program);
+
+ if (isMongoProgram) {
+#if 0
+ if (program == "mongos") {
+ argv_.push_back("valgrind");
+ argv_.push_back("--log-file=/tmp/mongos-%p.valgrind");
+ argv_.push_back("--leak-check=yes");
+ argv_.push_back("--suppressions=valgrind.suppressions");
+ //argv_.push_back("--error-exitcode=1");
+ argv_.push_back("--");
+ }
+#endif
+ }
+
+ argv_.push_back( programPath.native_file_string() );
+
+ port_ = -1;
+
+ BSONObjIterator j( args );
+ j.next(); // skip program name (handled above)
+ while(j.more()) {
+ BSONElement e = j.next();
+ string str;
+ if ( e.isNumber() ) {
+ stringstream ss;
+ ss << e.number();
+ str = ss.str();
+ }
+ else {
+ assert( e.type() == mongo::String );
+ str = e.valuestr();
+ }
+ if ( str == "--port" )
+ port_ = -2;
+ else if ( port_ == -2 )
+ port_ = strtol( str.c_str(), 0, 10 );
+ argv_.push_back(str);
+ }
+
+ if ( program != "mongod" && program != "mongos" && program != "mongobridge" )
+ port_ = 0;
+ else {
+ if ( port_ <= 0 )
+ cout << "error: a port number is expected when running mongod (etc.) from the shell" << endl;
+ assert( port_ > 0 );
+ }
+ if ( port_ > 0 && dbs.count( port_ ) != 0 ) {
+ cerr << "count for port: " << port_ << " is not 0 is: " << dbs.count( port_ ) << endl;
+ assert( dbs.count( port_ ) == 0 );
+ }
+ }
+
+ void start() {
+ int pipeEnds[ 2 ];
+ assert( pipe( pipeEnds ) != -1 );
+
+ fflush( 0 );
+ launch_process(pipeEnds[1]); //sets pid_
+
+ {
+ stringstream ss;
+ ss << "shell: started program";
+ for (unsigned i=0; i < argv_.size(); i++)
+ ss << " " << argv_[i];
+ ss << '\n';
+ cout << ss.str(); cout.flush();
+ }
+
+ if ( port_ > 0 )
+ dbs.insert( make_pair( port_, make_pair( pid_, pipeEnds[ 1 ] ) ) );
+ else
+ shells.insert( make_pair( pid_, pipeEnds[ 1 ] ) );
+ pipe_ = pipeEnds[ 0 ];
+ }
+
+ // Continue reading output
+ void operator()() {
+ try {
+ // This assumes there aren't any 0's in the mongo program output.
+ // Hope that's ok.
+ const unsigned bufSize = 128 * 1024;
+ char buf[ bufSize ];
+ char temp[ bufSize ];
+ char *start = buf;
+ while( 1 ) {
+ int lenToRead = ( bufSize - 1 ) - ( start - buf );
+ if ( lenToRead <= 0 ) {
+ cout << "error: lenToRead: " << lenToRead << endl;
+ cout << "first 300: " << string(buf,0,300) << endl;
+ }
+ assert( lenToRead > 0 );
+ int ret = read( pipe_, (void *)start, lenToRead );
+ if( mongo::dbexitCalled )
+ break;
+ assert( ret != -1 );
+ start[ ret ] = '\0';
+ if ( strlen( start ) != unsigned( ret ) )
+ writeMongoProgramOutputLine( port_, pid_, "WARNING: mongod wrote null bytes to output" );
+ char *last = buf;
+ for( char *i = strchr( buf, '\n' ); i; last = i + 1, i = strchr( last, '\n' ) ) {
+ *i = '\0';
+ writeMongoProgramOutputLine( port_, pid_, last );
+ }
+ if ( ret == 0 ) {
+ if ( *last )
+ writeMongoProgramOutputLine( port_, pid_, last );
+ close( pipe_ );
+ break;
+ }
+ if ( last != buf ) {
+ strcpy( temp, last );
+ strcpy( buf, temp );
+ }
+ else {
+ assert( strlen( buf ) < bufSize );
+ }
+ start = buf + strlen( buf );
+ }
+ }
+ catch(...) {
+ }
+ }
+ void launch_process(int child_stdout) {
+#ifdef _WIN32
+ stringstream ss;
+ for( unsigned i=0; i < argv_.size(); i++ ) {
+ if (i) ss << ' ';
+ if (argv_[i].find(' ') == string::npos)
+ ss << argv_[i];
+ else {
+ ss << '"';
+ // escape all embedded quotes
+ for (size_t j=0; j<argv_[i].size(); ++j) {
+ if (argv_[i][j]=='"') ss << '"';
+ ss << argv_[i][j];
+ }
+ ss << '"';
+ }
+ }
+
+ string args = ss.str();
+
+ boost::scoped_array<TCHAR> args_tchar (new TCHAR[args.size() + 1]);
+ size_t i;
+ for(i=0; i < args.size(); i++)
+ args_tchar[i] = args[i];
+ args_tchar[i] = 0;
+
+ HANDLE h = (HANDLE)_get_osfhandle(child_stdout);
+ assert(h != INVALID_HANDLE_VALUE);
+ assert(SetHandleInformation(h, HANDLE_FLAG_INHERIT, 1));
+
+ STARTUPINFO si;
+ ZeroMemory(&si, sizeof(si));
+ si.cb = sizeof(si);
+ si.hStdError = h;
+ si.hStdOutput = h;
+ si.dwFlags |= STARTF_USESTDHANDLES;
+
+ PROCESS_INFORMATION pi;
+ ZeroMemory(&pi, sizeof(pi));
+
+ bool success = CreateProcess( NULL, args_tchar.get(), NULL, NULL, true, 0, NULL, NULL, &si, &pi) != 0;
+ if (!success) {
+ LPSTR lpMsgBuf=0;
+ DWORD dw = GetLastError();
+ FormatMessageA(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ dw,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPSTR)&lpMsgBuf,
+ 0, NULL );
+ stringstream ss;
+ ss << "couldn't start process " << argv_[0] << "; " << lpMsgBuf;
+ uassert(14042, ss.str(), success);
+ LocalFree(lpMsgBuf);
+ }
+
+ CloseHandle(pi.hThread);
+
+ pid_ = pi.dwProcessId;
+ handles.insert( make_pair( pid_, pi.hProcess ) );
+
+#else
+
+ pid_ = fork();
+ assert( pid_ != -1 );
+
+ if ( pid_ == 0 ) {
+ // DON'T ASSERT IN THIS BLOCK - very bad things will happen
+
+ const char** argv = new const char* [argv_.size()+1]; // don't need to free - in child
+ for (unsigned i=0; i < argv_.size(); i++) {
+ argv[i] = argv_[i].c_str();
+ }
+ argv[argv_.size()] = 0;
+
+ if ( dup2( child_stdout, STDOUT_FILENO ) == -1 ||
+ dup2( child_stdout, STDERR_FILENO ) == -1 ) {
+ cout << "Unable to dup2 child output: " << errnoWithDescription() << endl;
+ ::_Exit(-1); //do not pass go, do not call atexit handlers
+ }
+
+ const char** env = new const char* [2]; // don't need to free - in child
+ env[0] = NULL;
+#if defined(HEAP_CHECKING)
+ env[0] = "HEAPCHECK=normal";
+ env[1] = NULL;
+
+ // Heap-check for mongos only. 'argv[0]' must be in the path format.
+ if ( argv_[0].find("mongos") != string::npos) {
+ execvpe( argv[ 0 ], const_cast<char**>(argv) , const_cast<char**>(env) );
+ }
+#endif // HEAP_CHECKING
+
+ execvp( argv[ 0 ], const_cast<char**>(argv) );
+
+ cout << "Unable to start program " << argv[0] << ' ' << errnoWithDescription() << endl;
+ ::_Exit(-1);
+ }
+
+#endif
+ }
+ };
+
+ //returns true if process exited
+ bool wait_for_pid(pid_t pid, bool block=true, int* exit_code=NULL) {
+#ifdef _WIN32
+ assert(handles.count(pid));
+ HANDLE h = handles[pid];
+
+ if (block)
+ WaitForSingleObject(h, INFINITE);
+
+ DWORD tmp;
+ if(GetExitCodeProcess(h, &tmp)) {
+ if ( tmp == STILL_ACTIVE ) {
+ return false;
+ }
+ CloseHandle(h);
+ handles.erase(pid);
+ if (exit_code)
+ *exit_code = tmp;
+ return true;
+ }
+ else {
+ return false;
+ }
+#else
+ int tmp;
+ bool ret = (pid == waitpid(pid, &tmp, (block ? 0 : WNOHANG)));
+ if (exit_code)
+ *exit_code = WEXITSTATUS(tmp);
+ return ret;
+
+#endif
+ }
+
+ BSONObj WaitProgram( const BSONObj& a, void* data ) {
+ int pid = oneArg( a ).numberInt();
+ BSONObj x = BSON( "" << wait_for_pid( pid ) );
+ shells.erase( pid );
+ return x;
+ }
+
+ BSONObj WaitMongoProgramOnPort( const BSONObj &a, void* data ) {
+ int port = oneArg( a ).numberInt();
+ uassert( 13621, "no known mongo program on port", dbs.count( port ) != 0 );
+ log() << "waiting port: " << port << ", pid: " << dbs[ port ].first << endl;
+ bool ret = wait_for_pid( dbs[ port ].first );
+ if ( ret ) {
+ dbs.erase( port );
+ }
+ return BSON( "" << ret );
+ }
+
+ BSONObj StartMongoProgram( const BSONObj &a, void* data ) {
+ _nokillop = true;
+ ProgramRunner r( a );
+ r.start();
+ boost::thread t( r );
+ return BSON( string( "" ) << int( r.pid() ) );
+ }
+
+ BSONObj RunMongoProgram( const BSONObj &a, void* data ) {
+ ProgramRunner r( a );
+ r.start();
+ boost::thread t( r );
+ int exit_code;
+ wait_for_pid( r.pid(), true, &exit_code );
+ if ( r.port() > 0 ) {
+ dbs.erase( r.port() );
+ }
+ else {
+ shells.erase( r.pid() );
+ }
+ return BSON( string( "" ) << exit_code );
+ }
+
+ BSONObj RunProgram(const BSONObj &a, void* data) {
+ ProgramRunner r( a, false );
+ r.start();
+ boost::thread t( r );
+ int exit_code;
+ wait_for_pid(r.pid(), true, &exit_code);
+ shells.erase( r.pid() );
+ return BSON( string( "" ) << exit_code );
+ }
+
+ BSONObj ResetDbpath( const BSONObj &a, void* data ) {
+ assert( a.nFields() == 1 );
+ string path = a.firstElement().valuestrsafe();
+ assert( !path.empty() );
+ if ( boost::filesystem::exists( path ) )
+ boost::filesystem::remove_all( path );
+ boost::filesystem::create_directory( path );
+ return undefined_;
+ }
+
+ void copyDir( const path &from, const path &to ) {
+ directory_iterator end;
+ directory_iterator i( from );
+ while( i != end ) {
+ path p = *i;
+ if ( p.leaf() != "mongod.lock" ) {
+ if ( is_directory( p ) ) {
+ path newDir = to / p.leaf();
+ boost::filesystem::create_directory( newDir );
+ copyDir( p, newDir );
+ }
+ else {
+ boost::filesystem::copy_file( p, to / p.leaf() );
+ }
+ }
+ ++i;
+ }
+ }
+
+ // NOTE target dbpath will be cleared first
+ BSONObj CopyDbpath( const BSONObj &a, void* data ) {
+ assert( a.nFields() == 2 );
+ BSONObjIterator i( a );
+ string from = i.next().str();
+ string to = i.next().str();
+ assert( !from.empty() );
+ assert( !to.empty() );
+ if ( boost::filesystem::exists( to ) )
+ boost::filesystem::remove_all( to );
+ boost::filesystem::create_directory( to );
+ copyDir( from, to );
+ return undefined_;
+ }
+
+ inline void kill_wrapper(pid_t pid, int sig, int port) {
+#ifdef _WIN32
+ if (sig == SIGKILL || port == 0) {
+ assert( handles.count(pid) );
+ TerminateProcess(handles[pid], 1); // returns failure for "zombie" processes.
+ }
+ else {
+ DBClientConnection conn;
+ conn.connect("127.0.0.1:" + BSONObjBuilder::numStr(port));
+ try {
+ conn.simpleCommand("admin", NULL, "shutdown");
+ }
+ catch (...) {
+ //Do nothing. This command never returns data to the client and the driver doesn't like that.
+ }
+ }
+#else
+ int x = kill( pid, sig );
+ if ( x ) {
+ if ( errno == ESRCH ) {
+ }
+ else {
+ cout << "killFailed: " << errnoWithDescription() << endl;
+ assert( x == 0 );
+ }
+ }
+
+#endif
+ }
+
+ int killDb( int port, pid_t _pid, int signal ) {
+ pid_t pid;
+ int exitCode = 0;
+ if ( port > 0 ) {
+ if( dbs.count( port ) != 1 ) {
+ cout << "No db started on port: " << port << endl;
+ return 0;
+ }
+ pid = dbs[ port ].first;
+ }
+ else {
+ pid = _pid;
+ }
+
+ kill_wrapper( pid, signal, port );
+
+ int i = 0;
+ for( ; i < 130; ++i ) {
+ if ( i == 30 ) {
+ char now[64];
+ time_t_to_String(time(0), now);
+ now[ 20 ] = 0;
+ cout << now << " process on port " << port << ", with pid " << pid << " not terminated, sending sigkill" << endl;
+ kill_wrapper( pid, SIGKILL, port );
+ }
+ if(wait_for_pid(pid, false, &exitCode))
+ break;
+ sleepmillis( 1000 );
+ }
+ if ( i == 130 ) {
+ char now[64];
+ time_t_to_String(time(0), now);
+ now[ 20 ] = 0;
+ cout << now << " failed to terminate process on port " << port << ", with pid " << pid << endl;
+ assert( "Failed to terminate process" == 0 );
+ }
+
+ if ( port > 0 ) {
+ close( dbs[ port ].second );
+ dbs.erase( port );
+ }
+ else {
+ close( shells[ pid ] );
+ shells.erase( pid );
+ }
+ // FIXME I think the intention here is to do an extra sleep only when SIGKILL is sent to the child process.
+ // We may want to change the 4 below to 29, since values of i greater than that indicate we sent a SIGKILL.
+ if ( i > 4 || signal == SIGKILL ) {
+ sleepmillis( 4000 ); // allow operating system to reclaim resources
+ }
+
+ return exitCode;
+ }
+
+ int getSignal( const BSONObj &a ) {
+ int ret = SIGTERM;
+ if ( a.nFields() == 2 ) {
+ BSONObjIterator i( a );
+ i.next();
+ BSONElement e = i.next();
+ assert( e.isNumber() );
+ ret = int( e.number() );
+ }
+ return ret;
+ }
+
+ /** stopMongoProgram(port[, signal]) */
+ BSONObj StopMongoProgram( const BSONObj &a, void* data ) {
+ assert( a.nFields() == 1 || a.nFields() == 2 );
+ uassert( 15853 , "stopMongo needs a number" , a.firstElement().isNumber() );
+ int port = int( a.firstElement().number() );
+ int code = killDb( port, 0, getSignal( a ) );
+ cout << "shell: stopped mongo program on port " << port << endl;
+ return BSON( "" << (double)code );
+ }
+
+ BSONObj StopMongoProgramByPid( const BSONObj &a, void* data ) {
+ assert( a.nFields() == 1 || a.nFields() == 2 );
+ uassert( 15852 , "stopMongoByPid needs a number" , a.firstElement().isNumber() );
+ int pid = int( a.firstElement().number() );
+ int code = killDb( 0, pid, getSignal( a ) );
+ cout << "shell: stopped mongo program on pid " << pid << endl;
+ return BSON( "" << (double)code );
+ }
+
+ void KillMongoProgramInstances() {
+ vector< int > ports;
+ for( map< int, pair< pid_t, int > >::iterator i = dbs.begin(); i != dbs.end(); ++i )
+ ports.push_back( i->first );
+ for( vector< int >::iterator i = ports.begin(); i != ports.end(); ++i )
+ killDb( *i, 0, SIGTERM );
+ vector< pid_t > pids;
+ for( map< pid_t, int >::iterator i = shells.begin(); i != shells.end(); ++i )
+ pids.push_back( i->first );
+ for( vector< pid_t >::iterator i = pids.begin(); i != pids.end(); ++i )
+ killDb( 0, *i, SIGTERM );
+ }
+#else // ndef MONGO_SAFE_SHELL
+ void KillMongoProgramInstances() {}
+#endif
+
+ MongoProgramScope::~MongoProgramScope() {
+ DESTRUCTOR_GUARD(
+ KillMongoProgramInstances();
+ ClearRawMongoProgramOutput( BSONObj(), 0 );
+ )
+ }
+
+ unsigned _randomSeed;
+
+ BSONObj JSSrand( const BSONObj &a, void* data ) {
+ uassert( 12518, "srand requires a single numeric argument",
+ a.nFields() == 1 && a.firstElement().isNumber() );
+ _randomSeed = (unsigned)a.firstElement().numberLong(); // grab least significant digits
+ return undefined_;
+ }
+
+ BSONObj JSRand( const BSONObj &a, void* data ) {
+ uassert( 12519, "rand accepts no arguments", a.nFields() == 0 );
+ unsigned r;
+#if !defined(_WIN32)
+ r = rand_r( &_randomSeed );
+#else
+ r = rand(); // seed not used in this case
+#endif
+ return BSON( "" << double( r ) / ( double( RAND_MAX ) + 1 ) );
+ }
+
+ BSONObj isWindows(const BSONObj& a, void* data) {
+ uassert( 13006, "isWindows accepts no arguments", a.nFields() == 0 );
+#ifdef _WIN32
+ return BSON( "" << true );
+#else
+ return BSON( "" << false );
+#endif
+ }
+
+ const char* getUserDir() {
+#ifdef _WIN32
+ return getenv( "USERPROFILE" );
+#else
+ return getenv( "HOME" );
+#endif
+ }
+ BSONObj getHostName(const BSONObj& a, void* data) {
+ uassert( 13411, "getHostName accepts no arguments", a.nFields() == 0 );
+ char buf[260]; // HOST_NAME_MAX is usually 255
+ assert(gethostname(buf, 260) == 0);
+ buf[259] = '\0';
+ return BSON("" << buf);
+
+ }
+
+ void installShellUtils( Scope& scope ) {
+ theScope = &scope;
+ scope.injectNative( "quit", Quit );
+ scope.injectNative( "getMemInfo" , JSGetMemInfo );
+ scope.injectNative( "_srand" , JSSrand );
+ scope.injectNative( "_rand" , JSRand );
+ scope.injectNative( "_isWindows" , isWindows );
+
+#ifndef MONGO_SAFE_SHELL
+ //can't launch programs
+ scope.injectNative( "_startMongoProgram", StartMongoProgram );
+ scope.injectNative( "runProgram", RunProgram );
+ scope.injectNative( "run", RunProgram );
+ scope.injectNative( "runMongoProgram", RunMongoProgram );
+ scope.injectNative( "stopMongod", StopMongoProgram );
+ scope.injectNative( "stopMongoProgram", StopMongoProgram );
+ scope.injectNative( "stopMongoProgramByPid", StopMongoProgramByPid );
+ scope.injectNative( "rawMongoProgramOutput", RawMongoProgramOutput );
+ scope.injectNative( "clearRawMongoProgramOutput", ClearRawMongoProgramOutput );
+ scope.injectNative( "waitProgram" , WaitProgram );
+ scope.injectNative( "waitMongoProgramOnPort" , WaitMongoProgramOnPort );
+
+ scope.injectNative( "getHostName" , getHostName );
+ scope.injectNative( "removeFile" , removeFile );
+ scope.injectNative( "fuzzFile" , fuzzFile );
+ scope.injectNative( "listFiles" , listFiles );
+ scope.injectNative( "ls" , ls );
+ scope.injectNative( "pwd", pwd );
+ scope.injectNative( "cd", cd );
+ scope.injectNative( "cat", cat );
+ scope.injectNative( "hostname", hostname);
+ scope.injectNative( "resetDbpath", ResetDbpath );
+ scope.injectNative( "copyDbpath", CopyDbpath );
+ scope.injectNative( "md5sumFile", md5sumFile );
+ scope.injectNative( "mkdir" , mkdir );
+#endif
+ }
+
+ void initScope( Scope &scope ) {
+ scope.externalSetup();
+ mongo::shellUtils::installShellUtils( scope );
+ scope.execSetup(JSFiles::servers);
+
+ if ( !_dbConnect.empty() ) {
+ uassert( 12513, "connect failed", scope.exec( _dbConnect , "(connect)" , false , true , false ) );
+ if ( !_dbAuth.empty() ) {
+ installGlobalUtils( scope );
+ uassert( 12514, "login failed", scope.exec( _dbAuth , "(auth)" , true , true , false ) );
+ }
+ }
+ }
+
+ // connstr, myuris
+ map< string, set<string> > _allMyUris;
+ mongo::mutex _allMyUrisMutex("_allMyUrisMutex");
+ bool _nokillop = false;
+ void onConnect( DBClientWithCommands &c ) {
+ latestConn = &c;
+ if ( _nokillop ) {
+ return;
+ }
+ BSONObj info;
+ if ( c.runCommand( "admin", BSON( "whatsmyuri" << 1 ), info ) ) {
+ string connstr = dynamic_cast<DBClientBase&>(c).getServerAddress();
+ mongo::mutex::scoped_lock lk( _allMyUrisMutex );
+ _allMyUris[connstr].insert(info[ "you" ].str());
+ }
+ }
+ }
+}
diff --git a/src/mongo/shell/utils.h b/src/mongo/shell/utils.h
new file mode 100644
index 00000000000..433fe7b7d25
--- /dev/null
+++ b/src/mongo/shell/utils.h
@@ -0,0 +1,48 @@
+// utils.h
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#pragma once
+
+#include "../scripting/engine.h"
+
+namespace mongo {
+
+ namespace shellUtils {
+
+ extern std::string _dbConnect;
+ extern std::string _dbAuth;
+ extern map< string, set<string> > _allMyUris;
+ extern bool _nokillop;
+
+ void RecordMyLocation( const char *_argv0 );
+ void installShellUtils( Scope& scope );
+
+ // Scoped management of mongo program instances. Simple implementation:
+ // destructor kills all mongod instances created by the shell.
+ struct MongoProgramScope {
+ MongoProgramScope() {} // Avoid 'unused variable' warning.
+ ~MongoProgramScope();
+ };
+ void KillMongoProgramInstances();
+
+ void initScope( Scope &scope );
+ void onConnect( DBClientWithCommands &c );
+
+ const char* getUserDir();
+ }
+}
diff --git a/src/mongo/shell/utils.js b/src/mongo/shell/utils.js
new file mode 100644
index 00000000000..adc763e3893
--- /dev/null
+++ b/src/mongo/shell/utils.js
@@ -0,0 +1,1896 @@
+__quiet = false;
+__magicNoPrint = { __magicNoPrint : 1111 }
+__callLastError = false;
+_verboseShell = false;
+
+chatty = function(s){
+ if ( ! __quiet )
+ print( s );
+}
+
+friendlyEqual = function( a , b ){
+ if ( a == b )
+ return true;
+
+ a = tojson(a,false,true);
+ b = tojson(b,false,true);
+
+ if ( a == b )
+ return true;
+
+ var clean = function( s ){
+ s = s.replace( /NumberInt\((\-?\d+)\)/g , "$1" );
+ return s;
+ }
+
+ a = clean(a);
+ b = clean(b);
+
+ if ( a == b )
+ return true;
+
+ return false;
+}
+
+printStackTrace = function(){
+ try{
+ throw new Error("Printing Stack Trace");
+ } catch (e) {
+ print(e.stack);
+ }
+}
+
+/**
+ * <p> Set the shell verbosity. If verbose the shell will display more information about command results. </>
+ * <p> Default is off. <p>
+ * @param {Bool} verbosity on / off
+ */
+setVerboseShell = function( value ) {
+ if( value == undefined ) value = true;
+ _verboseShell = value;
+}
+
+doassert = function (msg) {
+ if (msg.indexOf("assert") == 0)
+ print(msg);
+ else
+ print("assert: " + msg);
+ printStackTrace();
+ throw msg;
+}
+
+assert = function( b , msg ){
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+ if ( b )
+ return;
+ doassert( msg == undefined ? "assert failed" : "assert failed : " + msg );
+}
+
+assert.automsg = function( b ) {
+ assert( eval( b ), b );
+}
+
+assert._debug = false;
+
+assert.eq = function( a , b , msg ){
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+
+ if ( a == b )
+ return;
+
+ if ( ( a != null && b != null ) && friendlyEqual( a , b ) )
+ return;
+
+ doassert( "[" + tojson( a ) + "] != [" + tojson( b ) + "] are not equal : " + msg );
+}
+
+assert.eq.automsg = function( a, b ) {
+ assert.eq( eval( a ), eval( b ), "[" + a + "] != [" + b + "]" );
+}
+
+assert.neq = function( a , b , msg ){
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+ if ( a != b )
+ return;
+
+ doassert( "[" + a + "] != [" + b + "] are equal : " + msg );
+}
+
+assert.contains = function( o, arr, msg ){
+ var wasIn = false
+
+ if( ! arr.length ){
+ for( i in arr ){
+ wasIn = arr[i] == o || ( ( arr[i] != null && o != null ) && friendlyEqual( arr[i] , o ) )
+ return;
+ if( wasIn ) break
+ }
+ }
+ else {
+ for( var i = 0; i < arr.length; i++ ){
+ wasIn = arr[i] == o || ( ( arr[i] != null && o != null ) && friendlyEqual( arr[i] , o ) )
+ if( wasIn ) break
+ }
+ }
+
+ if( ! wasIn ) doassert( tojson( o ) + " was not in " + tojson( arr ) + " : " + msg )
+}
+
+assert.repeat = function( f, msg, timeout, interval ) {
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+
+ var start = new Date();
+ timeout = timeout || 30000;
+ interval = interval || 200;
+ var last;
+ while( 1 ) {
+
+ if ( typeof( f ) == "string" ){
+ if ( eval( f ) )
+ return;
+ }
+ else {
+ if ( f() )
+ return;
+ }
+
+ if ( ( new Date() ).getTime() - start.getTime() > timeout )
+ break;
+ sleep( interval );
+ }
+}
+
+assert.soon = function( f, msg, timeout /*ms*/, interval ) {
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+
+ var start = new Date();
+ timeout = timeout || 30000;
+ interval = interval || 200;
+ var last;
+ while( 1 ) {
+
+ if ( typeof( f ) == "string" ){
+ if ( eval( f ) )
+ return;
+ }
+ else {
+ if ( f() )
+ return;
+ }
+
+ diff = ( new Date() ).getTime() - start.getTime();
+ if ( diff > timeout )
+ doassert( "assert.soon failed: " + f + ", msg:" + msg );
+ sleep( interval );
+ }
+}
+
+assert.time = function( f, msg, timeout /*ms*/ ) {
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+
+ var start = new Date();
+ timeout = timeout || 30000;
+
+ if ( typeof( f ) == "string" ){
+ res = eval( f );
+ }
+ else {
+ res = f();
+ }
+
+ diff = ( new Date() ).getTime() - start.getTime();
+ if ( diff > timeout )
+ doassert( "assert.time failed timeout " + timeout + "ms took " + diff + "ms : " + f + ", msg:" + msg );
+ return res;
+}
+
+assert.throws = function( func , params , msg ){
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+
+ if ( params && typeof( params ) == "string" )
+ throw "2nd argument to assert.throws has to be an array"
+
+ try {
+ func.apply( null , params );
+ }
+ catch ( e ){
+ return e;
+ }
+
+ doassert( "did not throw exception: " + msg );
+}
+
+assert.throws.automsg = function( func, params ) {
+ assert.throws( func, params, func.toString() );
+}
+
+assert.commandWorked = function( res , msg ){
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+
+ if ( res.ok == 1 )
+ return;
+
+ doassert( "command failed: " + tojson( res ) + " : " + msg );
+}
+
+assert.commandFailed = function( res , msg ){
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+
+ if ( res.ok == 0 )
+ return;
+
+ doassert( "command worked when it should have failed: " + tojson( res ) + " : " + msg );
+}
+
+assert.isnull = function( what , msg ){
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+
+ if ( what == null )
+ return;
+
+ doassert( "supposed to null (" + ( msg || "" ) + ") was: " + tojson( what ) );
+}
+
+assert.lt = function( a , b , msg ){
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+
+ if ( a < b )
+ return;
+ doassert( a + " is not less than " + b + " : " + msg );
+}
+
+assert.gt = function( a , b , msg ){
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+
+ if ( a > b )
+ return;
+ doassert( a + " is not greater than " + b + " : " + msg );
+}
+
+assert.lte = function( a , b , msg ){
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+
+ if ( a <= b )
+ return;
+ doassert( a + " is not less than or eq " + b + " : " + msg );
+}
+
+assert.gte = function( a , b , msg ){
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+
+ if ( a >= b )
+ return;
+ doassert( a + " is not greater than or eq " + b + " : " + msg );
+}
+
+assert.between = function( a, b, c, msg, inclusive ){
+ if ( assert._debug && msg ) print( "in assert for: " + msg );
+
+ if( ( inclusive == undefined || inclusive == true ) &&
+ a <= b && b <= c ) return;
+ else if( a < b && b < c ) return;
+
+ doassert( b + " is not between " + a + " and " + c + " : " + msg );
+}
+
+assert.betweenIn = function( a, b, c, msg ){ assert.between( a, b, c, msg, true ) }
+assert.betweenEx = function( a, b, c, msg ){ assert.between( a, b, c, msg, false ) }
+
+assert.close = function( a , b , msg , places ){
+ if (places === undefined) {
+ places = 4;
+ }
+ if (Math.round((a - b) * Math.pow(10, places)) === 0) {
+ return;
+ }
+ doassert( a + " is not equal to " + b + " within " + places +
+ " places, diff: " + (a-b) + " : " + msg );
+};
+
+Object.extend = function( dst , src , deep ){
+ for ( var k in src ){
+ var v = src[k];
+ if ( deep && typeof(v) == "object" ){
+ if ( "floatApprox" in v ) { // convert NumberLong properly
+ eval( "v = " + tojson( v ) );
+ } else {
+ v = Object.extend( typeof ( v.length ) == "number" ? [] : {} , v , true );
+ }
+ }
+ dst[k] = v;
+ }
+ return dst;
+}
+
+Object.merge = function( dst, src, deep ){
+ var clone = Object.extend( {}, dst, deep )
+ return Object.extend( clone, src, deep )
+}
+
+argumentsToArray = function( a ){
+ var arr = [];
+ for ( var i=0; i<a.length; i++ )
+ arr[i] = a[i];
+ return arr;
+}
+
+isString = function( x ){
+ return typeof( x ) == "string";
+}
+
+isNumber = function(x){
+ return typeof( x ) == "number";
+}
+
+isObject = function( x ){
+ return typeof( x ) == "object";
+}
+
+String.prototype.trim = function() {
+ return this.replace(/^\s+|\s+$/g,"");
+}
+String.prototype.ltrim = function() {
+ return this.replace(/^\s+/,"");
+}
+String.prototype.rtrim = function() {
+ return this.replace(/\s+$/,"");
+}
+
+String.prototype.startsWith = function (str){
+ return this.indexOf(str) == 0
+}
+
+String.prototype.endsWith = function (str){
+ return new RegExp( str + "$" ).test( this )
+}
+
+Number.prototype.zeroPad = function(width) {
+ var str = this + '';
+ while (str.length < width)
+ str = '0' + str;
+ return str;
+}
+
+Date.timeFunc = function( theFunc , numTimes ){
+
+ var start = new Date();
+
+ numTimes = numTimes || 1;
+ for ( var i=0; i<numTimes; i++ ){
+ theFunc.apply( null , argumentsToArray( arguments ).slice( 2 ) );
+ }
+
+ return (new Date()).getTime() - start.getTime();
+}
+
+Date.prototype.tojson = function(){
+
+ var UTC = Date.printAsUTC ? 'UTC' : '';
+
+ var year = this['get'+UTC+'FullYear']().zeroPad(4);
+ var month = (this['get'+UTC+'Month']() + 1).zeroPad(2);
+ var date = this['get'+UTC+'Date']().zeroPad(2);
+ var hour = this['get'+UTC+'Hours']().zeroPad(2);
+ var minute = this['get'+UTC+'Minutes']().zeroPad(2);
+ var sec = this['get'+UTC+'Seconds']().zeroPad(2)
+
+ if (this['get'+UTC+'Milliseconds']())
+ sec += '.' + this['get'+UTC+'Milliseconds']().zeroPad(3)
+
+ var ofs = 'Z';
+ if (!Date.printAsUTC){
+ var ofsmin = this.getTimezoneOffset();
+ if (ofsmin != 0){
+ ofs = ofsmin > 0 ? '-' : '+'; // This is correct
+ ofs += (ofsmin/60).zeroPad(2)
+ ofs += (ofsmin%60).zeroPad(2)
+ }
+ }
+
+ return 'ISODate("'+year+'-'+month+'-'+date+'T'+hour+':'+minute+':'+sec+ofs+'")';
+}
+
+Date.printAsUTC = true;
+
+
+ISODate = function(isoDateStr){
+ if (!isoDateStr)
+ return new Date();
+
+ var isoDateRegex = /(\d{4})-?(\d{2})-?(\d{2})([T ](\d{2})(:?(\d{2})(:?(\d{2}(\.\d+)?))?)?(Z|([+-])(\d{2}):?(\d{2})?)?)?/;
+ var res = isoDateRegex.exec(isoDateStr);
+
+ if (!res)
+ throw "invalid ISO date";
+
+ var year = parseInt(res[1],10) || 1970; // this should always be present
+ var month = (parseInt(res[2],10) || 1) - 1;
+ var date = parseInt(res[3],10) || 0;
+ var hour = parseInt(res[5],10) || 0;
+ var min = parseInt(res[7],10) || 0;
+ var sec = parseFloat(res[9]) || 0;
+ var ms = Math.round((sec%1) * 1000)
+ sec -= ms/1000
+
+ var time = Date.UTC(year, month, date, hour, min, sec, ms);
+
+ if (res[11] && res[11] != 'Z'){
+ var ofs = 0;
+ ofs += (parseInt(res[13],10) || 0) * 60*60*1000; // hours
+ ofs += (parseInt(res[14],10) || 0) * 60*1000; // mins
+ if (res[12] == '+') // if ahead subtract
+ ofs *= -1;
+
+ time += ofs
+ }
+
+ return new Date(time);
+}
+
+RegExp.prototype.tojson = RegExp.prototype.toString;
+
+Array.contains = function( a , x ){
+ for ( var i=0; i<a.length; i++ ){
+ if ( a[i] == x )
+ return true;
+ }
+ return false;
+}
+
+Array.unique = function( a ){
+ var u = [];
+ for ( var i=0; i<a.length; i++){
+ var o = a[i];
+ if ( ! Array.contains( u , o ) ){
+ u.push( o );
+ }
+ }
+ return u;
+}
+
+Array.shuffle = function( arr ){
+ for ( var i=0; i<arr.length-1; i++ ){
+ var pos = i+Random.randInt(arr.length-i);
+ var save = arr[i];
+ arr[i] = arr[pos];
+ arr[pos] = save;
+ }
+ return arr;
+}
+
+
+Array.tojson = function( a , indent , nolint ){
+ var lineEnding = nolint ? " " : "\n";
+
+ if (!indent)
+ indent = "";
+
+ if ( nolint )
+ indent = "";
+
+ if (a.length == 0) {
+ return "[ ]";
+ }
+
+ var s = "[" + lineEnding;
+ indent += "\t";
+ for ( var i=0; i<a.length; i++){
+ s += indent + tojson( a[i], indent , nolint );
+ if ( i < a.length - 1 ){
+ s += "," + lineEnding;
+ }
+ }
+ if ( a.length == 0 ) {
+ s += indent;
+ }
+
+ indent = indent.substring(1);
+ s += lineEnding+indent+"]";
+ return s;
+}
+
+Array.fetchRefs = function( arr , coll ){
+ var n = [];
+ for ( var i=0; i<arr.length; i ++){
+ var z = arr[i];
+ if ( coll && coll != z.getCollection() )
+ continue;
+ n.push( z.fetch() );
+ }
+
+ return n;
+}
+
+Array.sum = function( arr ){
+ if ( arr.length == 0 )
+ return null;
+ var s = arr[0];
+ for ( var i=1; i<arr.length; i++ )
+ s += arr[i];
+ return s;
+}
+
+Array.avg = function( arr ){
+ if ( arr.length == 0 )
+ return null;
+ return Array.sum( arr ) / arr.length;
+}
+
+Array.stdDev = function( arr ){
+ var avg = Array.avg( arr );
+ var sum = 0;
+
+ for ( var i=0; i<arr.length; i++ ){
+ sum += Math.pow( arr[i] - avg , 2 );
+ }
+
+ return Math.sqrt( sum / arr.length );
+}
+
+if( typeof Array.isArray != "function" ){
+ Array.isArray = function( arr ){
+ return arr != undefined && arr.constructor == Array
+ }
+}
+
+//these two are helpers for Array.sort(func)
+compare = function(l, r){ return (l == r ? 0 : (l < r ? -1 : 1)); }
+
+// arr.sort(compareOn('name'))
+compareOn = function(field){
+ return function(l, r) { return compare(l[field], r[field]); }
+}
+
+Object.keySet = function( o ) {
+ var ret = new Array();
+ for( i in o ) {
+ if ( !( i in o.__proto__ && o[ i ] === o.__proto__[ i ] ) ) {
+ ret.push( i );
+ }
+ }
+ return ret;
+}
+
+if ( ! NumberLong.prototype ) {
+ NumberLong.prototype = {}
+}
+
+NumberLong.prototype.tojson = function() {
+ return this.toString();
+}
+
+if ( ! NumberInt.prototype ) {
+ NumberInt.prototype = {}
+}
+
+NumberInt.prototype.tojson = function() {
+ return this.toString();
+}
+
+if ( ! ObjectId.prototype )
+ ObjectId.prototype = {}
+
+ObjectId.prototype.toString = function(){
+ return "ObjectId(" + tojson(this.str) + ")";
+}
+
+ObjectId.prototype.tojson = function(){
+ return this.toString();
+}
+
+ObjectId.prototype.valueOf = function(){
+ return this.str;
+}
+
+ObjectId.prototype.isObjectId = true;
+
+ObjectId.prototype.getTimestamp = function(){
+ return new Date(parseInt(this.valueOf().slice(0,8), 16)*1000);
+}
+
+ObjectId.prototype.equals = function( other){
+ return this.str == other.str;
+}
+
+if ( typeof( DBPointer ) != "undefined" ){
+ DBPointer.prototype.fetch = function(){
+ assert( this.ns , "need a ns" );
+ assert( this.id , "need an id" );
+
+ return db[ this.ns ].findOne( { _id : this.id } );
+ }
+
+ DBPointer.prototype.tojson = function(indent){
+ return this.toString();
+ }
+
+ DBPointer.prototype.getCollection = function(){
+ return this.ns;
+ }
+
+ DBPointer.prototype.getId = function(){
+ return this.id;
+ }
+
+ DBPointer.prototype.toString = function(){
+ return "DBPointer(" + tojson(this.ns) + ", " + tojson(this.id) + ")";
+ }
+}
+else {
+ print( "warning: no DBPointer" );
+}
+
+if ( typeof( DBRef ) != "undefined" ){
+ DBRef.prototype.fetch = function(){
+ assert( this.$ref , "need a ns" );
+ assert( this.$id , "need an id" );
+
+ return db[ this.$ref ].findOne( { _id : this.$id } );
+ }
+
+ DBRef.prototype.tojson = function(indent){
+ return this.toString();
+ }
+
+ DBRef.prototype.getCollection = function(){
+ return this.$ref;
+ }
+
+ DBRef.prototype.getRef = function(){
+ return this.$ref;
+ }
+
+ DBRef.prototype.getId = function(){
+ return this.$id;
+ }
+
+ DBRef.prototype.toString = function(){
+ return "DBRef(" + tojson(this.$ref) + ", " + tojson(this.$id) + ")";
+ }
+}
+else {
+ print( "warning: no DBRef" );
+}
+
+if ( typeof( Timestamp ) != "undefined" ){
+ Timestamp.prototype.tojson = function () {
+ return this.toString();
+ }
+
+ Timestamp.prototype.getTime = function () {
+ return this.t;
+ }
+
+ Timestamp.prototype.getInc = function () {
+ return this.i;
+ }
+
+ Timestamp.prototype.toString = function () {
+ return "Timestamp(" + this.t + ", " + this.i + ")";
+ }
+}
+else {
+ print( "warning: no Timestamp class" );
+}
+
+if ( typeof( BinData ) != "undefined" ){
+ BinData.prototype.tojson = function () {
+ return this.toString();
+ }
+
+ BinData.prototype.subtype = function () {
+ return this.type;
+ }
+
+ BinData.prototype.length = function () {
+ return this.len;
+ }
+}
+else {
+ print( "warning: no BinData class" );
+}
+
+if ( typeof _threadInject != "undefined" ){
+ print( "fork() available!" );
+
+ Thread = function(){
+ this.init.apply( this, arguments );
+ }
+ _threadInject( Thread.prototype );
+
+ ScopedThread = function() {
+ this.init.apply( this, arguments );
+ }
+ ScopedThread.prototype = new Thread( function() {} );
+ _scopedThreadInject( ScopedThread.prototype );
+
+ fork = function() {
+ var t = new Thread( function() {} );
+ Thread.apply( t, arguments );
+ return t;
+ }
+
+ // Helper class to generate a list of events which may be executed by a ParallelTester
+ EventGenerator = function( me, collectionName, mean ) {
+ this.mean = mean;
+ this.events = new Array( me, collectionName );
+ }
+
+ EventGenerator.prototype._add = function( action ) {
+ this.events.push( [ Random.genExp( this.mean ), action ] );
+ }
+
+ EventGenerator.prototype.addInsert = function( obj ) {
+ this._add( "t.insert( " + tojson( obj ) + " )" );
+ }
+
+ EventGenerator.prototype.addRemove = function( obj ) {
+ this._add( "t.remove( " + tojson( obj ) + " )" );
+ }
+
+ EventGenerator.prototype.addUpdate = function( objOld, objNew ) {
+ this._add( "t.update( " + tojson( objOld ) + ", " + tojson( objNew ) + " )" );
+ }
+
+ EventGenerator.prototype.addCheckCount = function( count, query, shouldPrint, checkQuery ) {
+ query = query || {};
+ shouldPrint = shouldPrint || false;
+ checkQuery = checkQuery || false;
+ var action = "assert.eq( " + count + ", t.count( " + tojson( query ) + " ) );"
+ if ( checkQuery ) {
+ action += " assert.eq( " + count + ", t.find( " + tojson( query ) + " ).toArray().length );"
+ }
+ if ( shouldPrint ) {
+ action += " print( me + ' ' + " + count + " );";
+ }
+ this._add( action );
+ }
+
+ EventGenerator.prototype.getEvents = function() {
+ return this.events;
+ }
+
+ EventGenerator.dispatch = function() {
+ var args = argumentsToArray( arguments );
+ var me = args.shift();
+ var collectionName = args.shift();
+ var m = new Mongo( db.getMongo().host );
+ var t = m.getDB( "test" )[ collectionName ];
+ for( var i in args ) {
+ sleep( args[ i ][ 0 ] );
+ eval( args[ i ][ 1 ] );
+ }
+ }
+
+ // Helper class for running tests in parallel. It assembles a set of tests
+ // and then calls assert.parallelests to run them.
+ ParallelTester = function() {
+ this.params = new Array();
+ }
+
+ ParallelTester.prototype.add = function( fun, args ) {
+ args = args || [];
+ args.unshift( fun );
+ this.params.push( args );
+ }
+
+ ParallelTester.prototype.run = function( msg, newScopes ) {
+ newScopes = newScopes || false;
+ assert.parallelTests( this.params, msg, newScopes );
+ }
+
+ // creates lists of tests from jstests dir in a format suitable for use by
+ // ParallelTester.fileTester. The lists will be in random order.
+ // n: number of lists to split these tests into
+ ParallelTester.createJstestsLists = function( n ) {
+ var params = new Array();
+ for( var i = 0; i < n; ++i ) {
+ params.push( [] );
+ }
+
+ var makeKeys = function( a ) {
+ var ret = {};
+ for( var i in a ) {
+ ret[ a[ i ] ] = 1;
+ }
+ return ret;
+ }
+
+ // some tests can't run in parallel with most others
+ var skipTests = makeKeys( [ "jstests/dbadmin.js",
+ "jstests/repair.js",
+ "jstests/cursor8.js",
+ "jstests/recstore.js",
+ "jstests/extent.js",
+ "jstests/indexb.js",
+ "jstests/profile1.js",
+ "jstests/mr3.js",
+ "jstests/indexh.js",
+ "jstests/apitest_db.js",
+ "jstests/evalb.js",
+ "jstests/evald.js",
+ "jstests/evalf.js",
+ "jstests/killop.js",
+ "jstests/run_program1.js",
+ "jstests/notablescan.js",
+ "jstests/drop2.js",
+ "jstests/dropdb_race.js",
+ "jstests/fsync2.js", // May be placed in serialTestsArr once SERVER-4243 is fixed.
+ "jstests/bench_test1.js"] );
+
+ // some tests can't be run in parallel with each other
+ var serialTestsArr = [ "jstests/fsync.js"
+// ,"jstests/fsync2.js" // SERVER-4243
+ ];
+ var serialTests = makeKeys( serialTestsArr );
+
+ params[ 0 ] = serialTestsArr;
+
+ var files = listFiles("jstests");
+ files = Array.shuffle( files );
+
+ var i = 0;
+ files.forEach(
+ function(x) {
+
+ if ( ( /[\/\\]_/.test(x.name) ) ||
+ ( ! /\.js$/.test(x.name ) ) ||
+ ( x.name in skipTests ) ||
+ ( x.name in serialTests ) ||
+ ! /\.js$/.test(x.name ) ){
+ print(" >>>>>>>>>>>>>>> skipping " + x.name);
+ return;
+ }
+
+ params[ i % n ].push( x.name );
+ ++i;
+ }
+ );
+
+ // randomize ordering of the serialTests
+ params[ 0 ] = Array.shuffle( params[ 0 ] );
+
+ for( var i in params ) {
+ params[ i ].unshift( i );
+ }
+
+ return params;
+ }
+
+ // runs a set of test files
+ // first argument is an identifier for this tester, remaining arguments are file names
+ ParallelTester.fileTester = function() {
+ var args = argumentsToArray( arguments );
+ var suite = args.shift();
+ args.forEach(
+ function( x ) {
+ print(" S" + suite + " Test : " + x + " ...");
+ var time = Date.timeFunc( function() { load(x); }, 1);
+ print(" S" + suite + " Test : " + x + " " + time + "ms" );
+ }
+ );
+ }
+
+ // params: array of arrays, each element of which consists of a function followed
+ // by zero or more arguments to that function. Each function and its arguments will
+ // be called in a separate thread.
+ // msg: failure message
+ // newScopes: if true, each thread starts in a fresh scope
+ assert.parallelTests = function( params, msg, newScopes ) {
+ newScopes = newScopes || false;
+ var wrapper = function( fun, argv ) {
+ eval (
+ "var z = function() {" +
+ "var __parallelTests__fun = " + fun.toString() + ";" +
+ "var __parallelTests__argv = " + tojson( argv ) + ";" +
+ "var __parallelTests__passed = false;" +
+ "try {" +
+ "__parallelTests__fun.apply( 0, __parallelTests__argv );" +
+ "__parallelTests__passed = true;" +
+ "} catch ( e ) {" +
+ "print( '********** Parallel Test FAILED: ' + tojson(e) );" +
+ "}" +
+ "return __parallelTests__passed;" +
+ "}"
+ );
+ return z;
+ }
+ var runners = new Array();
+ for( var i in params ) {
+ var param = params[ i ];
+ var test = param.shift();
+ var t;
+ if ( newScopes )
+ t = new ScopedThread( wrapper( test, param ) );
+ else
+ t = new Thread( wrapper( test, param ) );
+ runners.push( t );
+ }
+
+ runners.forEach( function( x ) { x.start(); } );
+ var nFailed = 0;
+ // v8 doesn't like it if we exit before all threads are joined (SERVER-529)
+ runners.forEach( function( x ) { if( !x.returnData() ) { ++nFailed; } } );
+ assert.eq( 0, nFailed, msg );
+ }
+}
+
+tojsononeline = function( x ){
+ return tojson( x , " " , true );
+}
+
+tojson = function( x, indent , nolint ){
+ if ( x === null )
+ return "null";
+
+ if ( x === undefined )
+ return "undefined";
+
+ if (!indent)
+ indent = "";
+
+ switch ( typeof x ) {
+ case "string": {
+ var s = "\"";
+ for ( var i=0; i<x.length; i++ ){
+ switch (x[i]){
+ case '"': s += '\\"'; break;
+ case '\\': s += '\\\\'; break;
+ case '\b': s += '\\b'; break;
+ case '\f': s += '\\f'; break;
+ case '\n': s += '\\n'; break;
+ case '\r': s += '\\r'; break;
+ case '\t': s += '\\t'; break;
+
+ default: {
+ var code = x.charCodeAt(i);
+ if (code < 0x20){
+ s += (code < 0x10 ? '\\u000' : '\\u00') + code.toString(16);
+ } else {
+ s += x[i];
+ }
+ }
+ }
+ }
+ return s + "\"";
+ }
+ case "number":
+ case "boolean":
+ return "" + x;
+ case "object":{
+ var s = tojsonObject( x, indent , nolint );
+ if ( ( nolint == null || nolint == true ) && s.length < 80 && ( indent == null || indent.length == 0 ) ){
+ s = s.replace( /[\s\r\n ]+/gm , " " );
+ }
+ return s;
+ }
+ case "function":
+ return x.toString();
+ default:
+ throw "tojson can't handle type " + ( typeof x );
+ }
+
+}
+
+tojsonObject = function( x, indent , nolint ){
+ var lineEnding = nolint ? " " : "\n";
+ var tabSpace = nolint ? "" : "\t";
+
+ assert.eq( ( typeof x ) , "object" , "tojsonObject needs object, not [" + ( typeof x ) + "]" );
+
+ if (!indent)
+ indent = "";
+
+ if ( typeof( x.tojson ) == "function" && x.tojson != tojson ) {
+ return x.tojson(indent,nolint);
+ }
+
+ if ( x.constructor && typeof( x.constructor.tojson ) == "function" && x.constructor.tojson != tojson ) {
+ return x.constructor.tojson( x, indent , nolint );
+ }
+
+ if ( x.toString() == "[object MaxKey]" )
+ return "{ $maxKey : 1 }";
+ if ( x.toString() == "[object MinKey]" )
+ return "{ $minKey : 1 }";
+
+ var s = "{" + lineEnding;
+
+ // push one level of indent
+ indent += tabSpace;
+
+ var total = 0;
+ for ( var k in x ) total++;
+ if ( total == 0 ) {
+ s += indent + lineEnding;
+ }
+
+ var keys = x;
+ if ( typeof( x._simpleKeys ) == "function" )
+ keys = x._simpleKeys();
+ var num = 1;
+ for ( var k in keys ){
+
+ var val = x[k];
+ if ( val == DB.prototype || val == DBCollection.prototype )
+ continue;
+
+ s += indent + "\"" + k + "\" : " + tojson( val, indent , nolint );
+ if (num != total) {
+ s += ",";
+ num++;
+ }
+ s += lineEnding;
+ }
+
+ // pop one level of indent
+ indent = indent.substring(1);
+ return s + indent + "}";
+}
+
+shellPrint = function( x ){
+ it = x;
+ if ( x != undefined )
+ shellPrintHelper( x );
+
+ if ( db ){
+ var e = db.getPrevError();
+ if ( e.err ) {
+ if ( e.nPrev <= 1 )
+ print( "error on last call: " + tojson( e.err ) );
+ else
+ print( "an error " + tojson( e.err ) + " occurred " + e.nPrev + " operations back in the command invocation" );
+ }
+ db.resetError();
+ }
+}
+
+printjson = function(x){
+ print( tojson( x ) );
+}
+
+printjsononeline = function(x){
+ print( tojsononeline( x ) );
+}
+
+if ( typeof TestData == "undefined" ){
+ TestData = undefined
+}
+
+jsTestName = function(){
+ if( TestData ) return TestData.testName
+ return "__unknown_name__"
+}
+
+jsTestFile = function(){
+ if( TestData ) return TestData.testFile
+ return "__unknown_file__"
+}
+
+jsTestPath = function(){
+ if( TestData ) return TestData.testPath
+ return "__unknown_path__"
+}
+
+jsTestOptions = function(){
+ if( TestData ) return { noJournal : TestData.noJournal,
+ noJournalPrealloc : TestData.noJournalPrealloc,
+ auth : TestData.auth,
+ keyFile : TestData.keyFile,
+ authUser : "__system",
+ authPassword : TestData.keyFileData,
+ adminUser : "admin",
+ adminPassword : "password" }
+ return {}
+}
+
+jsTestLog = function(msg){
+ print( "\n\n----\n" + msg + "\n----\n\n" )
+}
+
+jsTest = {}
+
+jsTest.name = jsTestName
+jsTest.file = jsTestFile
+jsTest.path = jsTestPath
+jsTest.options = jsTestOptions
+jsTest.log = jsTestLog
+
+jsTest.dir = function(){
+ return jsTest.path().replace( /\/[^\/]+$/, "/" )
+}
+
+jsTest.randomize = function( seed ) {
+ if( seed == undefined ) seed = new Date().getTime()
+ Random.srand( seed )
+ print( "Random seed for test : " + seed )
+}
+
+/**
+* Adds a user to the admin DB on the given connection. This is only used for running the test suite
+* with authentication enabled.
+*/
+jsTest.addAuth = function(conn) {
+ print ("Adding admin user on connection: " + conn);
+ return conn.getDB('admin').addUser(jsTestOptions().adminUser, jsTestOptions().adminPassword);
+}
+
+jsTest.authenticate = function(conn) {
+ conn.authenticated = true;
+ result1 = null;
+ result2 = null;
+ if (jsTest.options().auth) {
+ print ("Authenticating to admin user on connection: " + conn);
+ result1 = conn.getDB('admin').auth(jsTestOptions().adminUser, jsTestOptions().adminPassword);
+ }
+ if (jsTest.options().keyFile && !jsTest.isMongos(conn)) {
+ print ("Authenticating to system user on connection: " + conn);
+ result2 = conn.getDB('local').auth(jsTestOptions().authUser, jsTestOptions().authPassword);
+ }
+
+ if (result1 == 1 || result2 == 1) {
+ return 1;
+ }
+
+ return result2 != null ? result2 : result1;
+}
+
+jsTest.authenticateNodes = function(nodes) {
+ jsTest.attempt({timeout:30000, desc: "Authenticate to nodes: " + nodes}, function() {
+ for (var i = 0; i < nodes.length; i++) {
+ // Don't try to authenticate to arbiters
+ res = nodes[i].getDB("admin").runCommand({replSetGetStatus: 1});
+ if(res.myState == 7) {
+ continue;
+ }
+ if(jsTest.authenticate(nodes[i]) != 1) {
+ return false;
+ }
+ }
+ return true;
+ });
+}
+
+jsTest.isMongos = function(conn) {
+ return conn.getDB('admin').isMaster().msg=='isdbgrid';
+}
+
+// Pass this method a function to call repeatedly until
+// that function returns true. Example:
+// attempt({timeout: 20000, desc: "get master"}, function() { // return false until success })
+jsTest.attempt = function( opts, func ) {
+ var timeout = opts.timeout || 1000;
+ var tries = 0;
+ var sleepTime = 500;
+ var result = null;
+ var context = opts.context || this;
+
+ while((result = func.apply(context)) == false) {
+ tries += 1;
+ sleep(sleepTime);
+ if( tries * sleepTime > timeout) {
+ throw('[' + opts['desc'] + ']' + " timed out after " + timeout + "ms ( " + tries + " tries )");
+ }
+ }
+
+ return result;
+}
+
+
+shellPrintHelper = function (x) {
+ if (typeof (x) == "undefined") {
+ // Make sure that we have a db var before we use it
+ // TODO: This implicit calling of GLE can cause subtle, hard to track issues - remove?
+ if (__callLastError && typeof( db ) != "undefined" && db.getMongo ) {
+ __callLastError = false;
+ // explicit w:1 so that replset getLastErrorDefaults aren't used here which would be bad.
+ var err = db.getLastError(1);
+ if (err != null) {
+ print(err);
+ }
+ }
+ return;
+ }
+
+ if (x == __magicNoPrint)
+ return;
+
+ if (x == null) {
+ print("null");
+ return;
+ }
+
+ if (typeof x != "object")
+ return print(x);
+
+ var p = x.shellPrint;
+ if (typeof p == "function")
+ return x.shellPrint();
+
+ var p = x.tojson;
+ if (typeof p == "function")
+ print(x.tojson());
+ else
+ print(tojson(x));
+}
+
+shellAutocomplete = function ( /*prefix*/ ) { // outer scope function called on init. Actual function at end
+
+ var universalMethods = "constructor prototype toString valueOf toLocaleString hasOwnProperty propertyIsEnumerable".split( ' ' );
+
+ var builtinMethods = {}; // uses constructor objects as keys
+ builtinMethods[Array] = "length concat join pop push reverse shift slice sort splice unshift indexOf lastIndexOf every filter forEach map some".split( ' ' );
+ builtinMethods[Boolean] = "".split( ' ' ); // nothing more than universal methods
+ builtinMethods[Date] = "getDate getDay getFullYear getHours getMilliseconds getMinutes getMonth getSeconds getTime getTimezoneOffset getUTCDate getUTCDay getUTCFullYear getUTCHours getUTCMilliseconds getUTCMinutes getUTCMonth getUTCSeconds getYear parse setDate setFullYear setHours setMilliseconds setMinutes setMonth setSeconds setTime setUTCDate setUTCFullYear setUTCHours setUTCMilliseconds setUTCMinutes setUTCMonth setUTCSeconds setYear toDateString toGMTString toLocaleDateString toLocaleTimeString toTimeString toUTCString UTC".split( ' ' );
+ builtinMethods[Math] = "E LN2 LN10 LOG2E LOG10E PI SQRT1_2 SQRT2 abs acos asin atan atan2 ceil cos exp floor log max min pow random round sin sqrt tan".split( ' ' );
+ builtinMethods[Number] = "MAX_VALUE MIN_VALUE NEGATIVE_INFINITY POSITIVE_INFINITY toExponential toFixed toPrecision".split( ' ' );
+ builtinMethods[RegExp] = "global ignoreCase lastIndex multiline source compile exec test".split( ' ' );
+ builtinMethods[String] = "length charAt charCodeAt concat fromCharCode indexOf lastIndexOf match replace search slice split substr substring toLowerCase toUpperCase".split( ' ' );
+ builtinMethods[Function] = "call apply".split( ' ' );
+ builtinMethods[Object] = "bsonsize".split( ' ' );
+
+ builtinMethods[Mongo] = "find update insert remove".split( ' ' );
+ builtinMethods[BinData] = "hex base64 length subtype".split( ' ' );
+
+ var extraGlobals = "Infinity NaN undefined null true false decodeURI decodeURIComponent encodeURI encodeURIComponent escape eval isFinite isNaN parseFloat parseInt unescape Array Boolean Date Math Number RegExp String print load gc MinKey MaxKey Mongo NumberLong ObjectId DBPointer UUID BinData Map".split( ' ' );
+
+ var isPrivate = function( name ) {
+ if ( shellAutocomplete.showPrivate ) return false;
+ if ( name == '_id' ) return false;
+ if ( name[0] == '_' ) return true;
+ if ( name[name.length - 1] == '_' ) return true; // some native functions have an extra name_ method
+ return false;
+ }
+
+ var customComplete = function( obj ) {
+ try {
+ if ( obj.__proto__.constructor.autocomplete ) {
+ var ret = obj.constructor.autocomplete( obj );
+ if ( ret.constructor != Array ) {
+ print( "\nautocompleters must return real Arrays" );
+ return [];
+ }
+ return ret;
+ } else {
+ return [];
+ }
+ } catch ( e ) {
+ // print( e ); // uncomment if debugging custom completers
+ return [];
+ }
+ }
+
+ var worker = function( prefix ) {
+ var global = ( function() { return this; } ).call(); // trick to get global object
+
+ var curObj = global;
+ var parts = prefix.split( '.' );
+ for ( var p = 0; p < parts.length - 1; p++ ) { // doesn't include last part
+ curObj = curObj[parts[p]];
+ if ( curObj == null )
+ return [];
+ }
+
+ var lastPrefix = parts[parts.length - 1] || '';
+ var lastPrefixLowercase = lastPrefix.toLowerCase()
+ var beginning = parts.slice( 0, parts.length - 1 ).join( '.' );
+ if ( beginning.length )
+ beginning += '.';
+
+ var possibilities = new Array().concat(
+ universalMethods,
+ Object.keySet( curObj ),
+ Object.keySet( curObj.__proto__ ),
+ builtinMethods[curObj] || [], // curObj is a builtin constructor
+ builtinMethods[curObj.__proto__.constructor] || [], // curObj is made from a builtin constructor
+ curObj == global ? extraGlobals : [],
+ customComplete( curObj )
+ );
+
+ var noDuplicates = {}; // see http://dreaminginjavascript.wordpress.com/2008/08/22/eliminating-duplicates/
+ for ( var i = 0; i < possibilities.length; i++ ) {
+ var p = possibilities[i];
+ if ( typeof ( curObj[p] ) == "undefined" && curObj != global ) continue; // extraGlobals aren't in the global object
+ if ( p.length == 0 || p.length < lastPrefix.length ) continue;
+ if ( lastPrefix[0] != '_' && isPrivate( p ) ) continue;
+ if ( p.match( /^[0-9]+$/ ) ) continue; // don't array number indexes
+ if ( p.substr( 0, lastPrefix.length ).toLowerCase() != lastPrefixLowercase ) continue;
+
+ var completion = beginning + p;
+ if ( curObj[p] && curObj[p].constructor == Function && p != 'constructor' )
+ completion += '(';
+
+ noDuplicates[completion] = 0;
+ }
+
+ var ret = [];
+ for ( i in noDuplicates )
+ ret.push( i );
+
+ return ret;
+ }
+
+ // this is the actual function that gets assigned to shellAutocomplete
+ return function( prefix ) {
+ try {
+ __autocomplete__ = worker( prefix ).sort();
+ } catch ( e ) {
+ print( "exception during autocomplete: " + tojson( e.message ) );
+ __autocomplete__ = [];
+ }
+ }
+} ();
+
+shellAutocomplete.showPrivate = false; // toggle to show (useful when working on internals)
+
+shellHelper = function( command , rest , shouldPrint ){
+ command = command.trim();
+ var args = rest.trim().replace(/\s*;$/,"").split( "\s+" );
+
+ if ( ! shellHelper[command] )
+ throw "no command [" + command + "]";
+
+ var res = shellHelper[command].apply( null , args );
+ if ( shouldPrint ){
+ shellPrintHelper( res );
+ }
+ return res;
+}
+
+shellHelper.use = function (dbname) {
+ var s = "" + dbname;
+ if (s == "") {
+ print("bad use parameter");
+ return;
+ }
+ db = db.getMongo().getDB(dbname);
+ print("switched to db " + db.getName());
+}
+
+shellHelper.set = function (str) {
+ if (str == "") {
+ print("bad use parameter");
+ return;
+ }
+ tokens = str.split(" ");
+ param = tokens[0];
+ value = tokens[1];
+
+ if ( value == undefined ) value = true;
+ // value comes in as a string..
+ if ( value == "true" ) value = true;
+ if ( value == "false" ) value = false;
+
+ if (param == "verbose") {
+ _verboseShell = value;
+ }
+ print("set " + param + " to " + value);
+}
+
+shellHelper.it = function(){
+ if ( typeof( ___it___ ) == "undefined" || ___it___ == null ){
+ print( "no cursor" );
+ return;
+ }
+ shellPrintHelper( ___it___ );
+}
+
+shellHelper.show = function (what) {
+ assert(typeof what == "string");
+
+ var args = what.split( /\s+/ );
+ what = args[0]
+ args = args.splice(1)
+
+ if (what == "profile") {
+ if (db.system.profile.count() == 0) {
+ print("db.system.profile is empty");
+ print("Use db.setProfilingLevel(2) will enable profiling");
+ print("Use db.system.profile.find() to show raw profile entries");
+ }
+ else {
+ print();
+ db.system.profile.find({ millis: { $gt: 0} }).sort({ $natural: -1 }).limit(5).forEach(
+ function (x) {
+ print("" + x.op + "\t" + x.ns + " " + x.millis + "ms " + String(x.ts).substring(0, 24));
+ var l = "";
+ for ( var z in x ){
+ if ( z == "op" || z == "ns" || z == "millis" || z == "ts" )
+ continue;
+
+ var val = x[z];
+ var mytype = typeof(val);
+
+ if ( mytype == "string" ||
+ mytype == "number" )
+ l += z + ":" + val + " ";
+ else if ( mytype == "object" )
+ l += z + ":" + tojson(val ) + " ";
+ else if ( mytype == "boolean" )
+ l += z + " ";
+ else
+ l += z + ":" + val + " ";
+
+ }
+ print( l );
+ print("\n");
+ }
+ )
+ }
+ return "";
+ }
+
+ if (what == "users") {
+ db.system.users.find().forEach(printjson);
+ return "";
+ }
+
+ if (what == "collections" || what == "tables") {
+ db.getCollectionNames().forEach(function (x) { print(x) });
+ return "";
+ }
+
+ if (what == "dbs") {
+ var dbs = db.getMongo().getDBs();
+ var size = {};
+ dbs.databases.forEach(function (x) { size[x.name] = x.sizeOnDisk; });
+ var names = dbs.databases.map(function (z) { return z.name; }).sort();
+ names.forEach(function (n) {
+ if (size[n] > 1) {
+ print(n + "\t" + size[n] / 1024 / 1024 / 1024 + "GB");
+ } else {
+ print(n + "\t(empty)");
+ }
+ });
+ //db.getMongo().getDBNames().sort().forEach(function (x) { print(x) });
+ return "";
+ }
+
+ if (what == "log" ) {
+ var n = "global";
+ if ( args.length > 0 )
+ n = args[0]
+
+ var res = db.adminCommand( { getLog : n } )
+ for ( var i=0; i<res.log.length; i++){
+ print( res.log[i] )
+ }
+ return ""
+ }
+
+ if (what == "logs" ) {
+ var res = db.adminCommand( { getLog : "*" } )
+ for ( var i=0; i<res.names.length; i++){
+ print( res.names[i] )
+ }
+ return ""
+ }
+
+
+ throw "don't know how to show [" + what + "]";
+
+}
+
+if ( typeof( Map ) == "undefined" ){
+ Map = function(){
+ this._data = {};
+ }
+}
+
+Map.hash = function( val ){
+ if ( ! val )
+ return val;
+
+ switch ( typeof( val ) ){
+ case 'string':
+ case 'number':
+ case 'date':
+ return val.toString();
+ case 'object':
+ case 'array':
+ var s = "";
+ for ( var k in val ){
+ s += k + val[k];
+ }
+ return s;
+ }
+
+ throw "can't hash : " + typeof( val );
+}
+
+Map.prototype.put = function( key , value ){
+ var o = this._get( key );
+ var old = o.value;
+ o.value = value;
+ return old;
+}
+
+Map.prototype.get = function( key ){
+ return this._get( key ).value;
+}
+
+Map.prototype._get = function( key ){
+ var h = Map.hash( key );
+ var a = this._data[h];
+ if ( ! a ){
+ a = [];
+ this._data[h] = a;
+ }
+
+ for ( var i=0; i<a.length; i++ ){
+ if ( friendlyEqual( key , a[i].key ) ){
+ return a[i];
+ }
+ }
+ var o = { key : key , value : null };
+ a.push( o );
+ return o;
+}
+
+Map.prototype.values = function(){
+ var all = [];
+ for ( var k in this._data ){
+ this._data[k].forEach( function(z){ all.push( z.value ); } );
+ }
+ return all;
+}
+
+if ( typeof( gc ) == "undefined" ){
+ gc = function(){
+ print( "warning: using noop gc()" );
+ }
+}
+
+
+Math.sigFig = function( x , N ){
+ if ( ! N ){
+ N = 3;
+ }
+ var p = Math.pow( 10, N - Math.ceil( Math.log( Math.abs(x) ) / Math.log( 10 )) );
+ return Math.round(x*p)/p;
+}
+
+Random = function() {}
+
+// set random seed
+Random.srand = function( s ) { _srand( s ); }
+
+// random number 0 <= r < 1
+Random.rand = function() { return _rand(); }
+
+// random integer 0 <= r < n
+Random.randInt = function( n ) { return Math.floor( Random.rand() * n ); }
+
+Random.setRandomSeed = function( s ) {
+ s = s || new Date().getTime();
+ print( "setting random seed: " + s );
+ Random.srand( s );
+}
+
+// generate a random value from the exponential distribution with the specified mean
+Random.genExp = function( mean ) {
+ return -Math.log( Random.rand() ) * mean;
+}
+
+Geo = {};
+Geo.distance = function( a , b ){
+ var ax = null;
+ var ay = null;
+ var bx = null;
+ var by = null;
+
+ for ( var key in a ){
+ if ( ax == null )
+ ax = a[key];
+ else if ( ay == null )
+ ay = a[key];
+ }
+
+ for ( var key in b ){
+ if ( bx == null )
+ bx = b[key];
+ else if ( by == null )
+ by = b[key];
+ }
+
+ return Math.sqrt( Math.pow( by - ay , 2 ) +
+ Math.pow( bx - ax , 2 ) );
+}
+
+Geo.sphereDistance = function( a , b ){
+ var ax = null;
+ var ay = null;
+ var bx = null;
+ var by = null;
+
+ // TODO swap order of x and y when done on server
+ for ( var key in a ){
+ if ( ax == null )
+ ax = a[key] * (Math.PI/180);
+ else if ( ay == null )
+ ay = a[key] * (Math.PI/180);
+ }
+
+ for ( var key in b ){
+ if ( bx == null )
+ bx = b[key] * (Math.PI/180);
+ else if ( by == null )
+ by = b[key] * (Math.PI/180);
+ }
+
+ var sin_x1=Math.sin(ax), cos_x1=Math.cos(ax);
+ var sin_y1=Math.sin(ay), cos_y1=Math.cos(ay);
+ var sin_x2=Math.sin(bx), cos_x2=Math.cos(bx);
+ var sin_y2=Math.sin(by), cos_y2=Math.cos(by);
+
+ var cross_prod =
+ (cos_y1*cos_x1 * cos_y2*cos_x2) +
+ (cos_y1*sin_x1 * cos_y2*sin_x2) +
+ (sin_y1 * sin_y2);
+
+ if (cross_prod >= 1 || cross_prod <= -1){
+ // fun with floats
+ assert( Math.abs(cross_prod)-1 < 1e-6 );
+ return cross_prod > 0 ? 0 : Math.PI;
+ }
+
+ return Math.acos(cross_prod);
+}
+
+rs = function () { return "try rs.help()"; }
+
+rs.help = function () {
+ print("\trs.status() { replSetGetStatus : 1 } checks repl set status");
+ print("\trs.initiate() { replSetInitiate : null } initiates set with default settings");
+ print("\trs.initiate(cfg) { replSetInitiate : cfg } initiates set with configuration cfg");
+ print("\trs.conf() get the current configuration object from local.system.replset");
+ print("\trs.reconfig(cfg) updates the configuration of a running replica set with cfg (disconnects)");
+ print("\trs.add(hostportstr) add a new member to the set with default attributes (disconnects)");
+ print("\trs.add(membercfgobj) add a new member to the set with extra attributes (disconnects)");
+ print("\trs.addArb(hostportstr) add a new member which is arbiterOnly:true (disconnects)");
+ print("\trs.stepDown([secs]) step down as primary (momentarily) (disconnects)");
+ print("\trs.freeze(secs) make a node ineligible to become primary for the time specified");
+ print("\trs.remove(hostportstr) remove a host from the replica set (disconnects)");
+ print("\trs.slaveOk() shorthand for db.getMongo().setSlaveOk()");
+ print();
+ print("\tdb.isMaster() check who is primary");
+ print();
+ print("\treconfiguration helpers disconnect from the database so the shell will display");
+ print("\tan error, even if the command succeeds.");
+ print("\tsee also http://<mongod_host>:28017/_replSet for additional diagnostic info");
+}
+rs.slaveOk = function (value) { return db.getMongo().setSlaveOk(value); }
+rs.status = function () { return db._adminCommand("replSetGetStatus"); }
+rs.isMaster = function () { return db.isMaster(); }
+rs.initiate = function (c) { return db._adminCommand({ replSetInitiate: c }); }
+rs._runCmd = function (c) {
+ // after the command, catch the disconnect and reconnect if necessary
+ var res = null;
+ try {
+ res = db.adminCommand(c);
+ }
+ catch (e) {
+ if (("" + e).indexOf("error doing query") >= 0) {
+ // closed connection. reconnect.
+ db.getLastErrorObj();
+ var o = db.getLastErrorObj();
+ if (o.ok) {
+ print("reconnected to server after rs command (which is normal)");
+ }
+ else {
+ printjson(o);
+ }
+ }
+ else {
+ print("shell got exception during repl set operation: " + e);
+ print("in some circumstances, the primary steps down and closes connections on a reconfig");
+ }
+ return "";
+ }
+ return res;
+}
+rs.reconfig = function (cfg, options) {
+ cfg.version = rs.conf().version + 1;
+ cmd = { replSetReconfig: cfg };
+ for (var i in options) {
+ cmd[i] = options[i];
+ }
+ return this._runCmd(cmd);
+}
+rs.add = function (hostport, arb) {
+ var cfg = hostport;
+
+ var local = db.getSisterDB("local");
+ assert(local.system.replset.count() <= 1, "error: local.system.replset has unexpected contents");
+ var c = local.system.replset.findOne();
+ assert(c, "no config object retrievable from local.system.replset");
+
+ c.version++;
+
+ var max = 0;
+ for (var i in c.members)
+ if (c.members[i]._id > max) max = c.members[i]._id;
+ if (isString(hostport)) {
+ cfg = { _id: max + 1, host: hostport };
+ if (arb)
+ cfg.arbiterOnly = true;
+ }
+ c.members.push(cfg);
+ return this._runCmd({ replSetReconfig: c });
+}
+rs.stepDown = function (secs) { return db._adminCommand({ replSetStepDown:(secs === undefined) ? 60:secs}); }
+rs.freeze = function (secs) { return db._adminCommand({replSetFreeze:secs}); }
+rs.addArb = function (hn) { return this.add(hn, true); }
+rs.conf = function () { return db.getSisterDB("local").system.replset.findOne(); }
+rs.config = function () { return rs.conf(); }
+
+rs.remove = function (hn) {
+ var local = db.getSisterDB("local");
+ assert(local.system.replset.count() <= 1, "error: local.system.replset has unexpected contents");
+ var c = local.system.replset.findOne();
+ assert(c, "no config object retrievable from local.system.replset");
+ c.version++;
+
+ for (var i in c.members) {
+ if (c.members[i].host == hn) {
+ c.members.splice(i, 1);
+ return db._adminCommand({ replSetReconfig : c});
+ }
+ }
+
+ return "error: couldn't find "+hn+" in "+tojson(c.members);
+};
+
+rs.debug = {};
+
+rs.debug.nullLastOpWritten = function(primary, secondary) {
+ var p = connect(primary+"/local");
+ var s = connect(secondary+"/local");
+ s.getMongo().setSlaveOk();
+
+ var secondToLast = s.oplog.rs.find().sort({$natural : -1}).limit(1).next();
+ var last = p.runCommand({findAndModify : "oplog.rs",
+ query : {ts : {$gt : secondToLast.ts}},
+ sort : {$natural : 1},
+ update : {$set : {op : "n"}}});
+
+ if (!last.value.o || !last.value.o._id) {
+ print("couldn't find an _id?");
+ }
+ else {
+ last.value.o = {_id : last.value.o._id};
+ }
+
+ print("nulling out this op:");
+ printjson(last);
+};
+
+rs.debug.getLastOpWritten = function(server) {
+ var s = db.getSisterDB("local");
+ if (server) {
+ s = connect(server+"/local");
+ }
+ s.getMongo().setSlaveOk();
+
+ return s.oplog.rs.find().sort({$natural : -1}).limit(1).next();
+};
+
+
+help = shellHelper.help = function (x) {
+ if (x == "mr") {
+ print("\nSee also http://www.mongodb.org/display/DOCS/MapReduce");
+ print("\nfunction mapf() {");
+ print(" // 'this' holds current document to inspect");
+ print(" emit(key, value);");
+ print("}");
+ print("\nfunction reducef(key,value_array) {");
+ print(" return reduced_value;");
+ print("}");
+ print("\ndb.mycollection.mapReduce(mapf, reducef[, options])");
+ print("\noptions");
+ print("{[query : <query filter object>]");
+ print(" [, sort : <sort the query. useful for optimization>]");
+ print(" [, limit : <number of objects to return from collection>]");
+ print(" [, out : <output-collection name>]");
+ print(" [, keeptemp: <true|false>]");
+ print(" [, finalize : <finalizefunction>]");
+ print(" [, scope : <object where fields go into javascript global scope >]");
+ print(" [, verbose : true]}\n");
+ return;
+ } else if (x == "connect") {
+ print("\nNormally one specifies the server on the mongo shell command line. Run mongo --help to see those options.");
+ print("Additional connections may be opened:\n");
+ print(" var x = new Mongo('host[:port]');");
+ print(" var mydb = x.getDB('mydb');");
+ print(" or");
+ print(" var mydb = connect('host[:port]/mydb');");
+ print("\nNote: the REPL prompt only auto-reports getLastError() for the shell command line connection.\n");
+ return;
+ }
+ else if (x == "keys") {
+ print("Tab completion and command history is available at the command prompt.\n");
+ print("Some emacs keystrokes are available too:");
+ print(" Ctrl-A start of line");
+ print(" Ctrl-E end of line");
+ print(" Ctrl-K del to end of line");
+ print("\nMulti-line commands");
+ print("You can enter a multi line javascript expression. If parens, braces, etc. are not closed, you will see a new line ");
+ print("beginning with '...' characters. Type the rest of your expression. Press Ctrl-C to abort the data entry if you");
+ print("get stuck.\n");
+ }
+ else if (x == "misc") {
+ print("\tb = new BinData(subtype,base64str) create a BSON BinData value");
+ print("\tb.subtype() the BinData subtype (0..255)");
+ print("\tb.length() length of the BinData data in bytes");
+ print("\tb.hex() the data as a hex encoded string");
+ print("\tb.base64() the data as a base 64 encoded string");
+ print("\tb.toString()");
+ print();
+ print("\tb = HexData(subtype,hexstr) create a BSON BinData value from a hex string");
+ print("\tb = UUID(hexstr) create a BSON BinData value of UUID subtype");
+ print("\tb = MD5(hexstr) create a BSON BinData value of MD5 subtype");
+ print();
+ print("\to = new ObjectId() create a new ObjectId");
+ print("\to.getTimestamp() return timestamp derived from first 32 bits of the OID");
+ print("\to.isObjectId()");
+ print("\to.toString()");
+ print("\to.equals(otherid)");
+ print();
+ print("\td = ISODate() like Date() but behaves more intuitively when used");
+ print("\td = ISODate('YYYY-MM-DD hh:mm:ss') without an explicit \"new \" prefix on construction");
+ return;
+ }
+ else if (x == "admin") {
+ print("\tls([path]) list files");
+ print("\tpwd() returns current directory");
+ print("\tlistFiles([path]) returns file list");
+ print("\thostname() returns name of this host");
+ print("\tcat(fname) returns contents of text file as a string");
+ print("\tremoveFile(f) delete a file or directory");
+ print("\tload(jsfilename) load and execute a .js file");
+ print("\trun(program[, args...]) spawn a program and wait for its completion");
+ print("\trunProgram(program[, args...]) same as run(), above");
+ print("\tsleep(m) sleep m milliseconds");
+ print("\tgetMemInfo() diagnostic");
+ return;
+ }
+ else if (x == "test") {
+ print("\tstartMongodEmpty(args) DELETES DATA DIR and then starts mongod");
+ print("\t returns a connection to the new server");
+ print("\tstartMongodTest(port,dir,options)");
+ print("\t DELETES DATA DIR");
+ print("\t automatically picks port #s starting at 27000 and increasing");
+ print("\t or you can specify the port as the first arg");
+ print("\t dir is /data/db/<port>/ if not specified as the 2nd arg");
+ print("\t returns a connection to the new server");
+ print("\tresetDbpath(dirpathstr) deletes everything under the dir specified including subdirs");
+ print("\tstopMongoProgram(port[, signal])");
+ return;
+ }
+ else if (x == "") {
+ print("\t" + "db.help() help on db methods");
+ print("\t" + "db.mycoll.help() help on collection methods");
+ print("\t" + "sh.help() sharding helpers");
+ print("\t" + "rs.help() replica set helpers");
+ print("\t" + "help admin administrative help");
+ print("\t" + "help connect connecting to a db help");
+ print("\t" + "help keys key shortcuts");
+ print("\t" + "help misc misc things to know");
+ print("\t" + "help mr mapreduce");
+ print();
+ print("\t" + "show dbs show database names");
+ print("\t" + "show collections show collections in current database");
+ print("\t" + "show users show users in current database");
+ print("\t" + "show profile show most recent system.profile entries with time >= 1ms");
+ print("\t" + "show logs show the accessible logger names");
+ print("\t" + "show log [name] prints out the last segment of log in memory, 'global' is default");
+ print("\t" + "use <db_name> set current database");
+ print("\t" + "db.foo.find() list objects in collection foo");
+ print("\t" + "db.foo.find( { a : 1 } ) list objects in foo where a == 1");
+ print("\t" + "it result of the last line evaluated; use to further iterate");
+ print("\t" + "DBQuery.shellBatchSize = x set default number of items to display on shell");
+ print("\t" + "exit quit the mongo shell");
+ }
+ else
+ print("unknown help option");
+}
diff --git a/src/mongo/shell/utils_sh.js b/src/mongo/shell/utils_sh.js
new file mode 100644
index 00000000000..5c7fbafa75d
--- /dev/null
+++ b/src/mongo/shell/utils_sh.js
@@ -0,0 +1,164 @@
+sh = function() { return "try sh.help();" }
+
+sh._checkMongos = function() {
+ var x = db.runCommand( "ismaster" );
+ if ( x.msg != "isdbgrid" )
+ throw "not connected to a mongos"
+}
+
+sh._checkFullName = function( fullName ) {
+ assert( fullName , "neeed a full name" )
+ assert( fullName.indexOf( "." ) > 0 , "name needs to be fully qualified <db>.<collection>'" )
+}
+
+sh._adminCommand = function( cmd , skipCheck ) {
+ if ( ! skipCheck ) sh._checkMongos();
+ var res = db.getSisterDB( "admin" ).runCommand( cmd );
+
+ if ( res == null || ! res.ok ) {
+ print( "command failed: " + tojson( res ) )
+ }
+
+ return res;
+}
+
+sh._dataFormat = function( bytes ){
+ if( bytes < 1024 ) return Math.floor( bytes ) + "b"
+ if( bytes < 1024 * 1024 ) return Math.floor( bytes / 1024 ) + "kb"
+ if( bytes < 1024 * 1024 * 1024 ) return Math.floor( ( Math.floor( bytes / 1024 ) / 1024 ) * 100 ) / 100 + "Mb"
+ return Math.floor( ( Math.floor( bytes / ( 1024 * 1024 ) ) / 1024 ) * 100 ) / 100 + "Gb"
+}
+
+sh._collRE = function( coll ){
+ return RegExp( "^" + (coll + "").replace(/\./g, "\\.") + "-.*" )
+}
+
+sh._pchunk = function( chunk ){
+ return "[" + tojson( chunk.min ) + " -> " + tojson( chunk.max ) + "]"
+}
+
+sh.help = function() {
+ print( "\tsh.addShard( host ) server:port OR setname/server:port" )
+ print( "\tsh.enableSharding(dbname) enables sharding on the database dbname" )
+ print( "\tsh.shardCollection(fullName,key,unique) shards the collection" );
+
+ print( "\tsh.splitFind(fullName,find) splits the chunk that find is in at the median" );
+ print( "\tsh.splitAt(fullName,middle) splits the chunk that middle is in at middle" );
+ print( "\tsh.moveChunk(fullName,find,to) move the chunk where 'find' is to 'to' (name of shard)");
+
+ print( "\tsh.setBalancerState( <bool on or not> ) turns the balancer on or off true=on, false=off" );
+ print( "\tsh.getBalancerState() return true if on, off if not" );
+ print( "\tsh.isBalancerRunning() return true if the balancer is running on any mongos" );
+
+ print( "\tsh.status() prints a general overview of the cluster" )
+}
+
+sh.status = function( verbose , configDB ) {
+ // TODO: move the actual commadn here
+ printShardingStatus( configDB , verbose );
+}
+
+sh.addShard = function( url ){
+ sh._adminCommand( { addShard : url } , true )
+}
+
+sh.enableSharding = function( dbname ) {
+ assert( dbname , "need a valid dbname" )
+ sh._adminCommand( { enableSharding : dbname } )
+}
+
+sh.shardCollection = function( fullName , key , unique ) {
+ sh._checkFullName( fullName )
+ assert( key , "need a key" )
+ assert( typeof( key ) == "object" , "key needs to be an object" )
+
+ var cmd = { shardCollection : fullName , key : key }
+ if ( unique )
+ cmd.unique = true;
+
+ sh._adminCommand( cmd )
+}
+
+sh.splitFind = function( fullName , find ) {
+ sh._checkFullName( fullName )
+ sh._adminCommand( { split : fullName , find : find } )
+}
+
+sh.splitAt = function( fullName , middle ) {
+ sh._checkFullName( fullName )
+ sh._adminCommand( { split : fullName , middle : middle } )
+}
+
+sh.moveChunk = function( fullName , find , to ) {
+ sh._checkFullName( fullName );
+ return sh._adminCommand( { moveChunk : fullName , find : find , to : to } )
+}
+
+sh.setBalancerState = function( onOrNot ) {
+ db.getSisterDB( "config" ).settings.update({ _id: "balancer" }, { $set : { stopped: onOrNot ? false : true } }, true );
+}
+
+sh.getBalancerState = function() {
+ var x = db.getSisterDB( "config" ).settings.findOne({ _id: "balancer" } )
+ if ( x == null )
+ return true;
+ return ! x.stopped;
+}
+
+sh.isBalancerRunning = function () {
+ var x = db.getSisterDB("config").locks.findOne({ _id: "balancer" });
+ if (x == null) {
+ print("config.locks collection empty or missing. be sure you are connected to a mongos");
+ return false;
+ }
+ return x.state > 0;
+}
+
+sh.stopBalancer = function( timeout, interval ) {
+ sh.setBalancerState( false )
+ sh.waitForBalancer( false, timeout, interval )
+}
+
+sh.startBalancer = function( timeout, interval ) {
+ sh.setBalancerState( true )
+ sh.waitForBalancer( true, timeout, interval )
+}
+
+sh.waitForBalancer = function( onOrNot, timeout, interval ){
+
+ if( onOrNot != undefined ){
+
+ // Wait for balancer to be on or off
+ // Can also wait for particular balancer state
+ var state = null
+ if( ! onOrNot ) state = 0
+ else if( onOrNot == true ) state = 2
+ else state = onOrNot
+
+ assert.soon( function(){ var lock = db.getSisterDB( "config" ).locks.findOne( { _id : "balancer" } );
+ return ( lock == null && state == 0 ) || ( lock != null && lock.state == state )
+ },
+ "waited too long for balancer to " + ( state > 0 ? "start" : "stop" ) + " [ state : " + state + "]",
+ timeout,
+ interval
+ )
+
+ }
+ else{
+
+ // Wait for balancer to run at least once
+
+ var lock = db.getSisterDB( "config" ).locks.findOne({ _id : "balancer" })
+ var ts = lock ? lock.ts : ""
+
+ assert.soon( function(){ var lock = db.getSisterDB( "config" ).locks.findOne({ _id : "balancer" });
+ if( ! lock ) return false;
+ return lock.ts != ts
+ },
+ "waited too long for balancer to activate",
+ timeout,
+ interval
+ )
+ }
+}
+
diff --git a/src/mongo/targetver.h b/src/mongo/targetver.h
new file mode 100644
index 00000000000..eb1b69bceba
--- /dev/null
+++ b/src/mongo/targetver.h
@@ -0,0 +1,20 @@
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+#ifndef _WIN32_WINNT // Allow use of features specific to Windows Vista or later.
+#define _WIN32_WINNT 0x0600 // Change this to the appropriate value to target other versions of Windows.
+#endif
diff --git a/src/mongo/tools/bridge.cpp b/src/mongo/tools/bridge.cpp
new file mode 100644
index 00000000000..341a1dae687
--- /dev/null
+++ b/src/mongo/tools/bridge.cpp
@@ -0,0 +1,166 @@
+// bridge.cpp
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "../util/net/message.h"
+#include "../util/net/listen.h"
+#include "../client/dbclient.h"
+#include "../db/dbmessage.h"
+
+using namespace mongo;
+using namespace std;
+
+int port = 0;
+string destUri;
+
+class Forwarder {
+public:
+ Forwarder( MessagingPort &mp ) : mp_( mp ) {
+ }
+ void operator()() const {
+ DBClientConnection dest;
+ string errmsg;
+ while( !dest.connect( destUri, errmsg ) )
+ sleepmillis( 500 );
+ Message m;
+ while( 1 ) {
+ try {
+ m.reset();
+ if ( !mp_.recv( m ) ) {
+ cout << "end connection " << mp_.remoteString() << endl;
+ mp_.shutdown();
+ break;
+ }
+
+ int oldId = m.header()->id;
+ if ( m.operation() == dbQuery || m.operation() == dbMsg || m.operation() == dbGetMore ) {
+ bool exhaust = false;
+ if ( m.operation() == dbQuery ) {
+ DbMessage d( m );
+ QueryMessage q( d );
+ exhaust = q.queryOptions & QueryOption_Exhaust;
+ }
+ Message response;
+ dest.port().call( m, response );
+ mp_.reply( m, response, oldId );
+ while ( exhaust ) {
+ MsgData *header = response.header();
+ QueryResult *qr = (QueryResult *) header;
+ if ( qr->cursorId ) {
+ response.reset();
+ dest.port().recv( response );
+ mp_.reply( m, response ); // m argument is ignored anyway
+ }
+ else {
+ exhaust = false;
+ }
+ }
+ }
+ else {
+ dest.port().say( m, oldId );
+ }
+ }
+ catch ( ... ) {
+ log() << "caught exception in Forwarder, continuing" << endl;
+ }
+ }
+ }
+private:
+ MessagingPort &mp_;
+};
+
+set<MessagingPort*> ports;
+
+class MyListener : public Listener {
+public:
+ MyListener( int port ) : Listener( "bridge" , "", port ) {}
+ virtual void accepted(MessagingPort *mp) {
+ ports.insert( mp );
+ Forwarder f( *mp );
+ boost::thread t( f );
+ }
+};
+
+auto_ptr< MyListener > listener;
+
+#if !defined(_WIN32)
+void cleanup( int sig ) {
+ ListeningSockets::get()->closeAll();
+ for ( set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ )
+ (*i)->shutdown();
+ ::exit( 0 );
+}
+
+void myterminate() {
+ rawOut( "bridge terminate() called, printing stack:" );
+ printStackTrace();
+ ::abort();
+}
+
+void setupSignals() {
+ signal( SIGINT , cleanup );
+ signal( SIGTERM , cleanup );
+ signal( SIGPIPE , cleanup );
+ signal( SIGABRT , cleanup );
+ signal( SIGSEGV , cleanup );
+ signal( SIGBUS , cleanup );
+ signal( SIGFPE , cleanup );
+ set_terminate( myterminate );
+}
+#else
+inline void setupSignals() {}
+#endif
+
+void helpExit() {
+ cout << "usage mongobridge --port <port> --dest <destUri>" << endl;
+ cout << " port: port to listen for mongo messages" << endl;
+ cout << " destUri: uri of remote mongod instance" << endl;
+ ::exit( -1 );
+}
+
+void check( bool b ) {
+ if ( !b )
+ helpExit();
+}
+
+int main( int argc, char **argv ) {
+ static StaticObserver staticObserver;
+
+ setupSignals();
+
+ check( argc == 5 );
+
+ for( int i = 1; i < 5; ++i ) {
+ check( i % 2 != 0 );
+ if ( strcmp( argv[ i ], "--port" ) == 0 ) {
+ port = strtol( argv[ ++i ], 0, 10 );
+ }
+ else if ( strcmp( argv[ i ], "--dest" ) == 0 ) {
+ destUri = argv[ ++i ];
+ }
+ else {
+ check( false );
+ }
+ }
+ check( port != 0 && !destUri.empty() );
+
+ listener.reset( new MyListener( port ) );
+ listener->initAndListen();
+
+ return 0;
+}
diff --git a/src/mongo/tools/bsondump.cpp b/src/mongo/tools/bsondump.cpp
new file mode 100644
index 00000000000..3825c071cd6
--- /dev/null
+++ b/src/mongo/tools/bsondump.cpp
@@ -0,0 +1,140 @@
+// restore.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "../pch.h"
+#include "../client/dbclient.h"
+#include "../util/mmap.h"
+#include "../util/text.h"
+#include "tool.h"
+
+#include <boost/program_options.hpp>
+
+#include <fcntl.h>
+
+using namespace mongo;
+
+namespace po = boost::program_options;
+
+class BSONDump : public BSONTool {
+
+ enum OutputType { JSON , DEBUG } _type;
+
+public:
+
+ BSONDump() : BSONTool( "bsondump", NONE ) {
+ add_options()
+ ("type" , po::value<string>()->default_value("json") , "type of output: json,debug" )
+ ;
+ add_hidden_options()
+ ("file" , po::value<string>() , ".bson file" )
+ ;
+ addPositionArg( "file" , 1 );
+ _noconnection = true;
+ }
+
+ virtual void printExtraHelp(ostream& out) {
+ out << "Display BSON objects in a data file.\n" << endl;
+ out << "usage: " << _name << " [options] <bson filename>" << endl;
+ }
+
+ virtual int doRun() {
+ {
+ string t = getParam( "type" );
+ if ( t == "json" )
+ _type = JSON;
+ else if ( t == "debug" )
+ _type = DEBUG;
+ else {
+ cerr << "bad type: " << t << endl;
+ return 1;
+ }
+ }
+
+ path root = getParam( "file" );
+ if ( root == "" ) {
+ printExtraHelp(cout);
+ return 1;
+ }
+
+ processFile( root );
+ return 0;
+ }
+
+ bool debug( const BSONObj& o , int depth=0) {
+ string prefix = "";
+ for ( int i=0; i<depth; i++ ) {
+ prefix += "\t\t\t";
+ }
+
+ int read = 4;
+
+ try {
+ cout << prefix << "--- new object ---\n";
+ cout << prefix << "\t size : " << o.objsize() << "\n";
+ BSONObjIterator i(o);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ cout << prefix << "\t\t " << e.fieldName() << "\n" << prefix << "\t\t\t type:" << setw(3) << e.type() << " size: " << e.size() << endl;
+ if ( ( read + e.size() ) > o.objsize() ) {
+ cout << prefix << " SIZE DOES NOT WORK" << endl;
+ return false;
+ }
+ read += e.size();
+ try {
+ e.validate();
+ if ( e.isABSONObj() ) {
+ if ( ! debug( e.Obj() , depth + 1 ) )
+ return false;
+ }
+ else if ( e.type() == String && ! isValidUTF8( e.valuestr() ) ) {
+ cout << prefix << "\t\t\t" << "bad utf8 String!" << endl;
+ }
+ else if ( logLevel > 0 ) {
+ cout << prefix << "\t\t\t" << e << endl;
+ }
+
+ }
+ catch ( std::exception& e ) {
+ cout << prefix << "\t\t\t bad value: " << e.what() << endl;
+ }
+ }
+ }
+ catch ( std::exception& e ) {
+ cout << prefix << "\t" << e.what() << endl;
+ }
+ return true;
+ }
+
+ virtual void gotObject( const BSONObj& o ) {
+ switch ( _type ) {
+ case JSON:
+ cout << o.jsonString( TenGen ) << endl;
+ break;
+ case DEBUG:
+ debug(o);
+ break;
+ default:
+ cerr << "bad type? : " << _type << endl;
+ }
+ }
+};
+
+int main( int argc , char ** argv ) {
+ BSONDump dump;
+ return dump.main( argc , argv );
+}
diff --git a/src/mongo/tools/dump.cpp b/src/mongo/tools/dump.cpp
new file mode 100644
index 00000000000..b6e0d2912e5
--- /dev/null
+++ b/src/mongo/tools/dump.cpp
@@ -0,0 +1,527 @@
+// dump.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "../pch.h"
+#include "../client/dbclient.h"
+#include "../db/db.h"
+#include "tool.h"
+
+#include <fcntl.h>
+#include <map>
+
+using namespace mongo;
+
+namespace po = boost::program_options;
+
+class Dump : public Tool {
+ class FilePtr : boost::noncopyable {
+ public:
+ /*implicit*/ FilePtr(FILE* f) : _f(f) {}
+ ~FilePtr() { fclose(_f); }
+ operator FILE*() { return _f; }
+ private:
+ FILE* _f;
+ };
+public:
+ Dump() : Tool( "dump" , ALL , "" , "" , true ) {
+ add_options()
+ ("out,o", po::value<string>()->default_value("dump"), "output directory or \"-\" for stdout")
+ ("query,q", po::value<string>() , "json query" )
+ ("oplog", "Use oplog for point-in-time snapshotting" )
+ ("repair", "try to recover a crashed database" )
+ ("forceTableScan", "force a table scan (do not use $snapshot)" )
+ ;
+ }
+
+ virtual void preSetup() {
+ string out = getParam("out");
+ if ( out == "-" ) {
+ // write output to standard error to avoid mangling output
+ // must happen early to avoid sending junk to stdout
+ useStandardOutput(false);
+ }
+ }
+
+ virtual void printExtraHelp(ostream& out) {
+ out << "Export MongoDB data to BSON files.\n" << endl;
+ }
+
+ // This is a functor that writes a BSONObj to a file
+ struct Writer {
+ Writer(FILE* out, ProgressMeter* m) :_out(out), _m(m) {}
+
+ void operator () (const BSONObj& obj) {
+ size_t toWrite = obj.objsize();
+ size_t written = 0;
+
+ while (toWrite) {
+ size_t ret = fwrite( obj.objdata()+written, 1, toWrite, _out );
+ uassert(14035, errnoWithPrefix("couldn't write to file"), ret);
+ toWrite -= ret;
+ written += ret;
+ }
+
+ // if there's a progress bar, hit it
+ if (_m) {
+ _m->hit();
+ }
+ }
+
+ FILE* _out;
+ ProgressMeter* _m;
+ };
+
+ void doCollection( const string coll , FILE* out , ProgressMeter *m ) {
+ Query q = _query;
+
+ int queryOptions = QueryOption_SlaveOk | QueryOption_NoCursorTimeout;
+ if (startsWith(coll.c_str(), "local.oplog."))
+ queryOptions |= QueryOption_OplogReplay;
+ else if ( _query.isEmpty() && !hasParam("dbpath") && !hasParam("forceTableScan") )
+ q.snapshot();
+
+ DBClientBase& connBase = conn(true);
+ Writer writer(out, m);
+
+ // use low-latency "exhaust" mode if going over the network
+ if (!_usingMongos && typeid(connBase) == typeid(DBClientConnection&)) {
+ DBClientConnection& conn = static_cast<DBClientConnection&>(connBase);
+ boost::function<void(const BSONObj&)> castedWriter(writer); // needed for overload resolution
+ conn.query( castedWriter, coll.c_str() , q , NULL, queryOptions | QueryOption_Exhaust);
+ }
+ else {
+ //This branch should only be taken with DBDirectClient or mongos which doesn't support exhaust mode
+ scoped_ptr<DBClientCursor> cursor(connBase.query( coll.c_str() , q , 0 , 0 , 0 , queryOptions ));
+ while ( cursor->more() ) {
+ writer(cursor->next());
+ }
+ }
+ }
+
+ void writeCollectionFile( const string coll , path outputFile ) {
+ log() << "\t" << coll << " to " << outputFile.string() << endl;
+
+ FilePtr f (fopen(outputFile.string().c_str(), "wb"));
+ uassert(10262, errnoWithPrefix("couldn't open file"), f);
+
+ ProgressMeter m( conn( true ).count( coll.c_str() , BSONObj() , QueryOption_SlaveOk ) );
+ m.setUnits("objects");
+
+ doCollection(coll, f, &m);
+
+ log() << "\t\t " << m.done() << " objects" << endl;
+ }
+
+ void writeMetadataFile( const string coll, path outputFile, map<string, BSONObj> options, multimap<string, BSONObj> indexes ) {
+ log() << "\tMetadata for " << coll << " to " << outputFile.string() << endl;
+
+ ofstream file (outputFile.string().c_str());
+ uassert(15933, "Couldn't open file: " + outputFile.string(), file.is_open());
+
+ bool hasOptions = options.count(coll) > 0;
+ bool hasIndexes = indexes.count(coll) > 0;
+
+ if (hasOptions) {
+ file << "{options : " << options.find(coll)->second.jsonString();
+
+ if (hasIndexes) {
+ file << ", ";
+ }
+ } else {
+ file << "{";
+ }
+
+ if (hasIndexes) {
+ file << "indexes:[";
+ for (multimap<string, BSONObj>::iterator it=indexes.equal_range(coll).first; it!=indexes.equal_range(coll).second; ++it) {
+ if (it != indexes.equal_range(coll).first) {
+ file << ", ";
+ }
+ file << (*it).second.jsonString();
+ }
+ file << "]";
+ }
+ file << "}";
+ }
+
+
+
+ void writeCollectionStdout( const string coll ) {
+ doCollection(coll, stdout, NULL);
+ }
+
+ void go( const string db , const path outdir ) {
+ log() << "DATABASE: " << db << "\t to \t" << outdir.string() << endl;
+
+ create_directories( outdir );
+
+ map <string, BSONObj> collectionOptions;
+ multimap <string, BSONObj> indexes;
+ vector <string> collections;
+
+ // Save indexes for database
+ string ins = db + ".system.indexes";
+ auto_ptr<DBClientCursor> cursor = conn( true ).query( ins.c_str() , Query() , 0 , 0 , 0 , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
+ while ( cursor->more() ) {
+ BSONObj obj = cursor->nextSafe();
+ const string name = obj.getField( "ns" ).valuestr();
+ indexes.insert( pair<string, BSONObj> (name, obj.getOwned()) );
+ }
+
+ string sns = db + ".system.namespaces";
+ cursor = conn( true ).query( sns.c_str() , Query() , 0 , 0 , 0 , QueryOption_SlaveOk | QueryOption_NoCursorTimeout );
+ while ( cursor->more() ) {
+ BSONObj obj = cursor->nextSafe();
+ const string name = obj.getField( "name" ).valuestr();
+ if (obj.hasField("options")) {
+ collectionOptions.insert( pair<string,BSONObj> (name, obj.getField("options").embeddedObject()) );
+ }
+
+ // skip namespaces with $ in them only if we don't specify a collection to dump
+ if ( _coll == "" && name.find( ".$" ) != string::npos ) {
+ log(1) << "\tskipping collection: " << name << endl;
+ continue;
+ }
+
+ const string filename = name.substr( db.size() + 1 );
+
+ //if a particular collections is specified, and it's not this one, skip it
+ if ( _coll != "" && db + "." + _coll != name && _coll != name )
+ continue;
+
+ // raise error before writing collection with non-permitted filename chars in the name
+ size_t hasBadChars = name.find_first_of("/\0");
+ if (hasBadChars != string::npos){
+ error() << "Cannot dump " << name << ". Collection has '/' or null in the collection name." << endl;
+ continue;
+ }
+
+ // Don't dump indexes
+ if ( endsWith(name.c_str(), ".system.indexes") ) {
+ continue;
+ }
+
+ if ( _coll != "" && db + "." + _coll != name && _coll != name )
+ continue;
+
+ collections.push_back(name);
+ }
+
+ for (vector<string>::iterator it = collections.begin(); it != collections.end(); ++it) {
+ string name = *it;
+ const string filename = name.substr( db.size() + 1 );
+ writeCollectionFile( name , outdir / ( filename + ".bson" ) );
+ writeMetadataFile( name, outdir / (filename + ".metadata.json"), collectionOptions, indexes);
+ }
+
+ }
+
+ int repair() {
+ if ( ! hasParam( "dbpath" ) ){
+ log() << "repair mode only works with --dbpath" << endl;
+ return -1;
+ }
+
+ if ( ! hasParam( "db" ) ){
+ log() << "repair mode only works on 1 db right at a time right now" << endl;
+ return -1;
+ }
+
+ string dbname = getParam( "db" );
+ log() << "going to try and recover data from: " << dbname << endl;
+
+ return _repair( dbname );
+ }
+
+ DiskLoc _repairExtent( Database* db , string ns, bool forward , DiskLoc eLoc , Writer& w ){
+ LogIndentLevel lil;
+
+ if ( eLoc.getOfs() <= 0 ){
+ error() << "invalid extent ofs: " << eLoc.getOfs() << endl;
+ return DiskLoc();
+ }
+
+
+ MongoDataFile * mdf = db->getFile( eLoc.a() );
+
+ Extent * e = mdf->debug_getExtent( eLoc );
+ if ( ! e->isOk() ){
+ warning() << "Extent not ok magic: " << e->magic << " going to try to continue" << endl;
+ }
+
+ log() << "length:" << e->length << endl;
+
+ LogIndentLevel lil2;
+
+ set<DiskLoc> seen;
+
+ DiskLoc loc = forward ? e->firstRecord : e->lastRecord;
+ while ( ! loc.isNull() ){
+
+ if ( ! seen.insert( loc ).second ) {
+ error() << "infinite loop in extend, seen: " << loc << " before" << endl;
+ break;
+ }
+
+ if ( loc.getOfs() <= 0 ){
+ error() << "offset is 0 for record which should be impossible" << endl;
+ break;
+ }
+ log(1) << loc << endl;
+ Record* rec = loc.rec();
+ BSONObj obj;
+ try {
+ obj = loc.obj();
+ assert( obj.valid() );
+ LOG(1) << obj << endl;
+ w( obj );
+ }
+ catch ( std::exception& e ) {
+ log() << "found invalid document @ " << loc << " " << e.what() << endl;
+ if ( ! obj.isEmpty() ) {
+ try {
+ BSONElement e = obj.firstElement();
+ stringstream ss;
+ ss << "first element: " << e;
+ log() << ss.str();
+ }
+ catch ( std::exception& ) {
+ }
+ }
+ }
+ loc = forward ? rec->getNext( loc ) : rec->getPrev( loc );
+ }
+ return forward ? e->xnext : e->xprev;
+
+ }
+
+ void _repair( Database* db , string ns , path outfile ){
+ NamespaceDetails * nsd = nsdetails( ns.c_str() );
+ log() << "nrecords: " << nsd->stats.nrecords
+ << " datasize: " << nsd->stats.datasize
+ << " firstExtent: " << nsd->firstExtent
+ << endl;
+
+ if ( nsd->firstExtent.isNull() ){
+ log() << " ERROR fisrtExtent is null" << endl;
+ return;
+ }
+
+ if ( ! nsd->firstExtent.isValid() ){
+ log() << " ERROR fisrtExtent is not valid" << endl;
+ return;
+ }
+
+ outfile /= ( ns.substr( ns.find( "." ) + 1 ) + ".bson" );
+ log() << "writing to: " << outfile.string() << endl;
+
+ FilePtr f (fopen(outfile.string().c_str(), "wb"));
+
+ ProgressMeter m( nsd->stats.nrecords * 2 );
+ m.setUnits("objects");
+
+ Writer w( f , &m );
+
+ try {
+ log() << "forward extent pass" << endl;
+ LogIndentLevel lil;
+ DiskLoc eLoc = nsd->firstExtent;
+ while ( ! eLoc.isNull() ){
+ log() << "extent loc: " << eLoc << endl;
+ eLoc = _repairExtent( db , ns , true , eLoc , w );
+ }
+ }
+ catch ( DBException& e ){
+ error() << "forward extent pass failed:" << e.toString() << endl;
+ }
+
+ try {
+ log() << "backwards extent pass" << endl;
+ LogIndentLevel lil;
+ DiskLoc eLoc = nsd->lastExtent;
+ while ( ! eLoc.isNull() ){
+ log() << "extent loc: " << eLoc << endl;
+ eLoc = _repairExtent( db , ns , false , eLoc , w );
+ }
+ }
+ catch ( DBException& e ){
+ error() << "ERROR: backwards extent pass failed:" << e.toString() << endl;
+ }
+
+ log() << "\t\t " << m.done() << " objects" << endl;
+ }
+
+ int _repair( string dbname ) {
+ dblock lk;
+ Client::Context cx( dbname );
+ Database * db = cx.db();
+
+ list<string> namespaces;
+ db->namespaceIndex.getNamespaces( namespaces );
+
+ path root = getParam( "out" );
+ root /= dbname;
+ create_directories( root );
+
+ for ( list<string>::iterator i=namespaces.begin(); i!=namespaces.end(); ++i ){
+ LogIndentLevel lil;
+ string ns = *i;
+
+ if ( str::endsWith( ns , ".system.namespaces" ) )
+ continue;
+
+ if ( str::contains( ns , ".tmp.mr." ) )
+ continue;
+
+ if ( _coll != "" && ! str::endsWith( ns , _coll ) )
+ continue;
+
+ log() << "trying to recover: " << ns << endl;
+
+ LogIndentLevel lil2;
+ try {
+ _repair( db , ns , root );
+ }
+ catch ( DBException& e ){
+ log() << "ERROR recovering: " << ns << " " << e.toString() << endl;
+ }
+ }
+
+ return 0;
+ }
+
+ int run() {
+
+ if ( hasParam( "repair" ) ){
+ warning() << "repair is a work in progress" << endl;
+ return repair();
+ }
+
+ {
+ string q = getParam("query");
+ if ( q.size() )
+ _query = fromjson( q );
+ }
+
+ string opLogName = "";
+ unsigned long long opLogStart = 0;
+ if (hasParam("oplog")) {
+ if (hasParam("query") || hasParam("db") || hasParam("collection")) {
+ log() << "oplog mode is only supported on full dumps" << endl;
+ return -1;
+ }
+
+
+ BSONObj isMaster;
+ conn("true").simpleCommand("admin", &isMaster, "isMaster");
+
+ if (isMaster.hasField("hosts")) { // if connected to replica set member
+ opLogName = "local.oplog.rs";
+ }
+ else {
+ opLogName = "local.oplog.$main";
+ if ( ! isMaster["ismaster"].trueValue() ) {
+ log() << "oplog mode is only supported on master or replica set member" << endl;
+ return -1;
+ }
+ }
+
+ auth("local");
+
+ BSONObj op = conn(true).findOne(opLogName, Query().sort("$natural", -1), 0, QueryOption_SlaveOk);
+ if (op.isEmpty()) {
+ log() << "No operations in oplog. Please ensure you are connecting to a master." << endl;
+ return -1;
+ }
+
+ assert(op["ts"].type() == Timestamp);
+ opLogStart = op["ts"]._numberLong();
+ }
+
+ // check if we're outputting to stdout
+ string out = getParam("out");
+ if ( out == "-" ) {
+ if ( _db != "" && _coll != "" ) {
+ writeCollectionStdout( _db+"."+_coll );
+ return 0;
+ }
+ else {
+ log() << "You must specify database and collection to print to stdout" << endl;
+ return -1;
+ }
+ }
+
+ _usingMongos = isMongos();
+
+ path root( out );
+ string db = _db;
+
+ if ( db == "" ) {
+ log() << "all dbs" << endl;
+ auth( "admin" );
+
+ BSONObj res = conn( true ).findOne( "admin.$cmd" , BSON( "listDatabases" << 1 ) );
+ if ( ! res["databases"].isABSONObj() ) {
+ error() << "output of listDatabases isn't what we expected, no 'databases' field:\n" << res << endl;
+ return -2;
+ }
+ BSONObj dbs = res["databases"].embeddedObjectUserCheck();
+ set<string> keys;
+ dbs.getFieldNames( keys );
+ for ( set<string>::iterator i = keys.begin() ; i != keys.end() ; i++ ) {
+ string key = *i;
+
+ if ( ! dbs[key].isABSONObj() ) {
+ error() << "database field not an object key: " << key << " value: " << dbs[key] << endl;
+ return -3;
+ }
+
+ BSONObj dbobj = dbs[key].embeddedObjectUserCheck();
+
+ const char * dbName = dbobj.getField( "name" ).valuestr();
+ if ( (string)dbName == "local" )
+ continue;
+
+ go ( dbName , root / dbName );
+ }
+ }
+ else {
+ auth( db );
+ go( db , root / db );
+ }
+
+ if (!opLogName.empty()) {
+ BSONObjBuilder b;
+ b.appendTimestamp("$gt", opLogStart);
+
+ _query = BSON("ts" << b.obj());
+
+ writeCollectionFile( opLogName , root / "oplog.bson" );
+ }
+
+ return 0;
+ }
+
+ bool _usingMongos;
+ BSONObj _query;
+};
+
+int main( int argc , char ** argv ) {
+ Dump d;
+ return d.main( argc , argv );
+}
diff --git a/src/mongo/tools/export.cpp b/src/mongo/tools/export.cpp
new file mode 100644
index 00000000000..0d9f0225da0
--- /dev/null
+++ b/src/mongo/tools/export.cpp
@@ -0,0 +1,248 @@
+// export.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "client/dbclient.h"
+#include "db/json.h"
+
+#include "tool.h"
+
+#include <fstream>
+#include <iostream>
+
+#include <boost/program_options.hpp>
+
+using namespace mongo;
+
+namespace po = boost::program_options;
+
+class Export : public Tool {
+public:
+ Export() : Tool( "export" ) {
+ addFieldOptions();
+ add_options()
+ ("query,q" , po::value<string>() , "query filter, as a JSON string" )
+ ("csv","export to csv instead of json")
+ ("out,o", po::value<string>(), "output file; if not specified, stdout is used")
+ ("jsonArray", "output to a json array rather than one object per line")
+ ("slaveOk,k", po::value<bool>()->default_value(true) , "use secondaries for export if available, default true")
+ ;
+ _usesstdout = false;
+ }
+
+ virtual void preSetup() {
+ string out = getParam("out");
+ if ( out == "-" ) {
+ // write output to standard error to avoid mangling output
+ // must happen early to avoid sending junk to stdout
+ useStandardOutput(false);
+ }
+ }
+
+ virtual void printExtraHelp( ostream & out ) {
+ out << "Export MongoDB data to CSV, TSV or JSON files.\n" << endl;
+ }
+
+ // Turn every double quote character into two double quote characters
+ // If hasSurroundingQuotes is true, doesn't escape the first and last
+ // characters of the string, if it's false, add a double quote character
+ // around the whole string.
+ string csvEscape(string str, bool hasSurroundingQuotes = false) {
+ size_t index = hasSurroundingQuotes ? 1 : 0;
+ while (((index = str.find('"', index)) != string::npos)
+ && (index < (hasSurroundingQuotes ? str.size() - 1 : str.size()))) {
+ str.replace(index, 1, "\"\"");
+ index += 2;
+ }
+ return hasSurroundingQuotes ? str : "\"" + str + "\"";
+ }
+
+ // Gets the string representation of a BSON object that can be correctly written to a CSV file
+ string csvString (const BSONElement& object) {
+ const char* binData; // Only used with BinData type
+
+ switch (object.type()) {
+ case MinKey:
+ return "$MinKey";
+ case MaxKey:
+ return "$MaxKey";
+ case NumberInt:
+ case NumberDouble:
+ case NumberLong:
+ case Bool:
+ return object.toString(false);
+ case String:
+ case Symbol:
+ return csvEscape(object.toString(false), true);
+ case Object:
+ return csvEscape(object.jsonString(Strict, false));
+ case Array:
+ return csvEscape(object.jsonString(Strict, false));
+ case BinData:
+ int len;
+ binData = object.binDataClean(len);
+ return toHex(binData, len);
+ case jstOID:
+ return "ObjectID(" + object.OID().toString() + ")"; // OIDs are always 24 bytes
+ case Date:
+ return timeToISOString(object.Date() / 1000);
+ case Timestamp:
+ return csvEscape(object.jsonString(Strict, false));
+ case RegEx:
+ return csvEscape("/" + string(object.regex()) + "/" + string(object.regexFlags()));
+ case Code:
+ return csvEscape(object.toString(false));
+ case CodeWScope:
+ if (string(object.codeWScopeScopeData()) == "") {
+ return csvEscape(object.toString(false));
+ } else {
+ return csvEscape(object.jsonString(Strict, false));
+ }
+ case EOO:
+ case Undefined:
+ case DBRef:
+ case jstNULL:
+ cerr << "Invalid BSON object type for CSV output: " << object.type() << endl;
+ return "";
+ }
+ // Can never get here
+ assert(false);
+ return "";
+ }
+
+ int run() {
+ string ns;
+ const bool csv = hasParam( "csv" );
+ const bool jsonArray = hasParam( "jsonArray" );
+ ostream *outPtr = &cout;
+ string outfile = getParam( "out" );
+ auto_ptr<ofstream> fileStream;
+ if ( hasParam( "out" ) ) {
+ size_t idx = outfile.rfind( "/" );
+ if ( idx != string::npos ) {
+ string dir = outfile.substr( 0 , idx + 1 );
+ create_directories( dir );
+ }
+ ofstream * s = new ofstream( outfile.c_str() , ios_base::out );
+ fileStream.reset( s );
+ outPtr = s;
+ if ( ! s->good() ) {
+ cerr << "couldn't open [" << outfile << "]" << endl;
+ return -1;
+ }
+ }
+ ostream &out = *outPtr;
+
+ BSONObj * fieldsToReturn = 0;
+ BSONObj realFieldsToReturn;
+
+ try {
+ ns = getNS();
+ }
+ catch (...) {
+ printHelp(cerr);
+ return 1;
+ }
+
+ auth();
+
+ if ( hasParam( "fields" ) || csv ) {
+ needFields();
+
+ // we can't use just _fieldsObj since we support everything getFieldDotted does
+
+ set<string> seen;
+ BSONObjBuilder b;
+
+ BSONObjIterator i( _fieldsObj );
+ while ( i.more() ){
+ BSONElement e = i.next();
+ string f = str::before( e.fieldName() , '.' );
+ if ( seen.insert( f ).second )
+ b.append( f , 1 );
+ }
+
+ realFieldsToReturn = b.obj();
+ fieldsToReturn = &realFieldsToReturn;
+ }
+
+
+ if ( csv && _fields.size() == 0 ) {
+ cerr << "csv mode requires a field list" << endl;
+ return -1;
+ }
+
+ Query q( getParam( "query" , "" ) );
+ if ( q.getFilter().isEmpty() && !hasParam("dbpath"))
+ q.snapshot();
+
+ bool slaveOk = _params["slaveOk"].as<bool>();
+
+ auto_ptr<DBClientCursor> cursor = conn().query( ns.c_str() , q , 0 , 0 , fieldsToReturn , ( slaveOk ? QueryOption_SlaveOk : 0 ) | QueryOption_NoCursorTimeout );
+
+ if ( csv ) {
+ for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ) {
+ if ( i != _fields.begin() )
+ out << ",";
+ out << *i;
+ }
+ out << endl;
+ }
+
+ if (jsonArray)
+ out << '[';
+
+ long long num = 0;
+ while ( cursor->more() ) {
+ num++;
+ BSONObj obj = cursor->next();
+ if ( csv ) {
+ for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ) {
+ if ( i != _fields.begin() )
+ out << ",";
+ const BSONElement & e = obj.getFieldDotted(i->c_str());
+ if ( ! e.eoo() ) {
+ out << csvString(e);
+ }
+ }
+ out << endl;
+ }
+ else {
+ if (jsonArray && num != 1)
+ out << ',';
+
+ out << obj.jsonString();
+
+ if (!jsonArray)
+ out << endl;
+ }
+ }
+
+ if (jsonArray)
+ out << ']' << endl;
+
+ cerr << "exported " << num << " records" << endl;
+
+ return 0;
+ }
+};
+
+int main( int argc , char ** argv ) {
+ Export e;
+ return e.main( argc , argv );
+}
diff --git a/src/mongo/tools/files.cpp b/src/mongo/tools/files.cpp
new file mode 100644
index 00000000000..06660361485
--- /dev/null
+++ b/src/mongo/tools/files.cpp
@@ -0,0 +1,164 @@
+// files.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "client/gridfs.h"
+#include "client/dbclient.h"
+
+#include "tool.h"
+
+#include <fstream>
+#include <iostream>
+
+#include <boost/program_options.hpp>
+
+using namespace mongo;
+
+namespace po = boost::program_options;
+
+class Files : public Tool {
+public:
+ Files() : Tool( "files" ) {
+ add_options()
+ ( "local,l", po::value<string>(), "local filename for put|get (default is to use the same name as 'gridfs filename')")
+ ( "type,t", po::value<string>(), "MIME type for put (default is to omit)")
+ ( "replace,r", "Remove other files with same name after PUT")
+ ;
+ add_hidden_options()
+ ( "command" , po::value<string>() , "command (list|search|put|get)" )
+ ( "file" , po::value<string>() , "filename for get|put" )
+ ;
+ addPositionArg( "command" , 1 );
+ addPositionArg( "file" , 2 );
+ }
+
+ virtual void printExtraHelp( ostream & out ) {
+ out << "Browse and modify a GridFS filesystem.\n" << endl;
+ out << "usage: " << _name << " [options] command [gridfs filename]" << endl;
+ out << "command:" << endl;
+ out << " one of (list|search|put|get)" << endl;
+ out << " list - list all files. 'gridfs filename' is an optional prefix " << endl;
+ out << " which listed filenames must begin with." << endl;
+ out << " search - search all files. 'gridfs filename' is a substring " << endl;
+ out << " which listed filenames must contain." << endl;
+ out << " put - add a file with filename 'gridfs filename'" << endl;
+ out << " get - get a file with filename 'gridfs filename'" << endl;
+ out << " delete - delete all files with filename 'gridfs filename'" << endl;
+ }
+
+ void display( GridFS * grid , BSONObj obj ) {
+ auto_ptr<DBClientCursor> c = grid->list( obj );
+ while ( c->more() ) {
+ BSONObj obj = c->next();
+ cout
+ << obj["filename"].str() << "\t"
+ << (long)obj["length"].number()
+ << endl;
+ }
+ }
+
+ int run() {
+ string cmd = getParam( "command" );
+ if ( cmd.size() == 0 ) {
+ cerr << "ERROR: need command" << endl << endl;
+ printHelp(cout);
+ return -1;
+ }
+
+ GridFS g( conn() , _db );
+ auth();
+
+ string filename = getParam( "file" );
+
+ if ( cmd == "list" ) {
+ BSONObjBuilder b;
+ if ( filename.size() )
+ b.appendRegex( "filename" , ( (string)"^" + filename ) );
+ display( &g , b.obj() );
+ return 0;
+ }
+
+ if ( filename.size() == 0 ) {
+ cerr << "ERROR: need a filename" << endl << endl;
+ printHelp(cout);
+ return -1;
+ }
+
+ if ( cmd == "search" ) {
+ BSONObjBuilder b;
+ b.appendRegex( "filename" , filename );
+ display( &g , b.obj() );
+ return 0;
+ }
+
+ if ( cmd == "get" ) {
+ GridFile f = g.findFile( filename );
+ if ( ! f.exists() ) {
+ cerr << "ERROR: file not found" << endl;
+ return -2;
+ }
+
+ string out = getParam("local", f.getFilename());
+ f.write( out );
+
+ if (out != "-")
+ cout << "done write to: " << out << endl;
+
+ return 0;
+ }
+
+ if ( cmd == "put" ) {
+ const string& infile = getParam("local", filename);
+ const string& type = getParam("type", "");
+
+ BSONObj file = g.storeFile(infile, filename, type);
+ cout << "added file: " << file << endl;
+
+ if (hasParam("replace")) {
+ auto_ptr<DBClientCursor> cursor = conn().query(_db+".fs.files", BSON("filename" << filename << "_id" << NE << file["_id"] ));
+ while (cursor->more()) {
+ BSONObj o = cursor->nextSafe();
+ conn().remove(_db+".fs.files", BSON("_id" << o["_id"]));
+ conn().remove(_db+".fs.chunks", BSON("_id" << o["_id"]));
+ cout << "removed file: " << o << endl;
+ }
+
+ }
+
+ conn().getLastError();
+ cout << "done!" << endl;
+ return 0;
+ }
+
+ if ( cmd == "delete" ) {
+ g.removeFile(filename);
+ conn().getLastError();
+ cout << "done!" << endl;
+ return 0;
+ }
+
+ cerr << "ERROR: unknown command '" << cmd << "'" << endl << endl;
+ printHelp(cout);
+ return -1;
+ }
+};
+
+int main( int argc , char ** argv ) {
+ Files f;
+ return f.main( argc , argv );
+}
diff --git a/src/mongo/tools/import.cpp b/src/mongo/tools/import.cpp
new file mode 100644
index 00000000000..24741ed46ad
--- /dev/null
+++ b/src/mongo/tools/import.cpp
@@ -0,0 +1,463 @@
+// import.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "client/dbclient.h"
+#include "db/json.h"
+
+#include "tool.h"
+#include "../util/text.h"
+
+#include <fstream>
+#include <iostream>
+
+#include <boost/program_options.hpp>
+#include <boost/algorithm/string.hpp>
+
+using namespace mongo;
+
+namespace po = boost::program_options;
+
+class Import : public Tool {
+
+ enum Type { JSON , CSV , TSV };
+ Type _type;
+
+ const char * _sep;
+ bool _ignoreBlanks;
+ bool _headerLine;
+ bool _upsert;
+ bool _doimport;
+ bool _jsonArray;
+ vector<string> _upsertFields;
+ static const int BUF_SIZE = 1024 * 1024 * 4;
+
+ void csvTokenizeRow(const string& row, vector<string>& tokens) {
+ bool inQuotes = false;
+ bool prevWasQuote = false;
+ bool tokenQuoted = false;
+ string curtoken = "";
+ for (string::const_iterator it = row.begin(); it != row.end(); ++it) {
+ char element = *it;
+ if (element == '"') {
+ if (!inQuotes) {
+ inQuotes = true;
+ tokenQuoted = true;
+ curtoken = "";
+ } else {
+ if (prevWasQuote) {
+ curtoken += "\"";
+ prevWasQuote = false;
+ } else {
+ prevWasQuote = true;
+ }
+ }
+ } else {
+ if (inQuotes && prevWasQuote) {
+ inQuotes = false;
+ prevWasQuote = false;
+ tokens.push_back(curtoken);
+ }
+
+ if (element == ',' && !inQuotes) {
+ if (!tokenQuoted) { // If token was quoted, it's already been added
+ boost::trim(curtoken);
+ tokens.push_back(curtoken);
+ }
+ curtoken = "";
+ tokenQuoted = false;
+ } else {
+ curtoken += element;
+ }
+ }
+ }
+ if (!tokenQuoted || (inQuotes && prevWasQuote)) {
+ boost::trim(curtoken);
+ tokens.push_back(curtoken);
+ }
+ }
+
+ void _append( BSONObjBuilder& b , const string& fieldName , const string& data ) {
+ if ( _ignoreBlanks && data.size() == 0 )
+ return;
+
+ if ( b.appendAsNumber( fieldName , data ) )
+ return;
+
+ // TODO: other types?
+ b.append ( fieldName , data );
+ }
+
+ /*
+ * Reads one line from in into buf.
+ * Returns the number of bytes that should be skipped - the caller should
+ * increment buf by this amount.
+ */
+ int getLine(istream* in, char* buf) {
+ if (_jsonArray) {
+ in->read(buf, BUF_SIZE);
+ uassert(13295, "JSONArray file too large", (in->rdstate() & ios_base::eofbit));
+ buf[ in->gcount() ] = '\0';
+ }
+ else {
+ in->getline( buf , BUF_SIZE );
+ log(1) << "got line:" << buf << endl;
+ }
+ uassert( 10263 , "unknown error reading file" ,
+ (!(in->rdstate() & ios_base::badbit)) &&
+ (!(in->rdstate() & ios_base::failbit) || (in->rdstate() & ios_base::eofbit)) );
+
+ int numBytesSkipped = 0;
+ if (strncmp("\xEF\xBB\xBF", buf, 3) == 0) { // UTF-8 BOM (notepad is stupid)
+ buf += 3;
+ numBytesSkipped += 3;
+ }
+
+ uassert(13289, "Invalid UTF8 character detected", isValidUTF8(buf));
+ return numBytesSkipped;
+ }
+
+ /*
+ * Parses a BSON object out of a JSON array.
+ * Returns number of bytes processed on success and -1 on failure.
+ */
+ int parseJSONArray(char* buf, BSONObj& o) {
+ int len = 0;
+ while (buf[0] != '{' && buf[0] != '\0') {
+ len++;
+ buf++;
+ }
+ if (buf[0] == '\0')
+ return -1;
+
+ int jslen;
+ o = fromjson(buf, &jslen);
+ len += jslen;
+
+ return len;
+ }
+
+ /*
+ * Parses one object from the input file. This usually corresponds to one line in the input
+ * file, unless the file is a CSV and contains a newline within a quoted string entry.
+ * Returns a true if a BSONObj was successfully created and false if not.
+ */
+ bool parseRow(istream* in, BSONObj& o, int& numBytesRead) {
+ boost::scoped_array<char> buffer(new char[BUF_SIZE+2]);
+ char* line = buffer.get();
+
+ numBytesRead = getLine(in, line);
+ line += numBytesRead;
+
+ if (line[0] == '\0') {
+ return false;
+ }
+ numBytesRead += strlen( line );
+
+ if (_type == JSON) {
+ // Strip out trailing whitespace
+ char * end = ( line + strlen( line ) ) - 1;
+ while ( end >= line && isspace(*end) ) {
+ *end = 0;
+ end--;
+ }
+ o = fromjson( line );
+ return true;
+ }
+
+ vector<string> tokens;
+ if (_type == CSV) {
+ string row;
+ bool inside_quotes = false;
+ size_t last_quote = 0;
+ while (true) {
+ string lineStr(line);
+ // Deal with line breaks in quoted strings
+ last_quote = lineStr.find_first_of('"');
+ while (last_quote != string::npos) {
+ inside_quotes = !inside_quotes;
+ last_quote = lineStr.find_first_of('"', last_quote+1);
+ }
+
+ row.append(lineStr);
+
+ if (inside_quotes) {
+ row.append("\n");
+ int num = getLine(in, line);
+ line += num;
+ numBytesRead += num;
+
+ uassert (15854, "CSV file ends while inside quoted field", line[0] != '\0');
+ numBytesRead += strlen( line );
+ } else {
+ break;
+ }
+ }
+ // now 'row' is string corresponding to one row of the CSV file
+ // (which may span multiple lines) and represents one BSONObj
+ csvTokenizeRow(row, tokens);
+ }
+ else { // _type == TSV
+ while (line[0] != '\t' && isspace(line[0])) { // Strip leading whitespace, but not tabs
+ line++;
+ }
+
+ boost::split(tokens, line, boost::is_any_of(_sep));
+ }
+
+ // Now that the row is tokenized, create a BSONObj out of it.
+ BSONObjBuilder b;
+ unsigned int pos=0;
+ for (vector<string>::iterator it = tokens.begin(); it != tokens.end(); ++it) {
+ string token = *it;
+ if ( _headerLine ) {
+ _fields.push_back(token);
+ }
+ else {
+ string name;
+ if ( pos < _fields.size() ) {
+ name = _fields[pos];
+ }
+ else {
+ stringstream ss;
+ ss << "field" << pos;
+ name = ss.str();
+ }
+ pos++;
+
+ _append( b , name , token );
+ }
+ }
+ o = b.obj();
+ return true;
+ }
+
+public:
+ Import() : Tool( "import" ) {
+ addFieldOptions();
+ add_options()
+ ("ignoreBlanks","if given, empty fields in csv and tsv will be ignored")
+ ("type",po::value<string>() , "type of file to import. default: json (json,csv,tsv)")
+ ("file",po::value<string>() , "file to import from; if not specified stdin is used" )
+ ("drop", "drop collection first " )
+ ("headerline","CSV,TSV only - use first line as headers")
+ ("upsert", "insert or update objects that already exist" )
+ ("upsertFields", po::value<string>(), "comma-separated fields for the query part of the upsert. You should make sure this is indexed" )
+ ("stopOnError", "stop importing at first error rather than continuing" )
+ ("jsonArray", "load a json array, not one item per line. Currently limited to 4MB." )
+ ;
+ add_hidden_options()
+ ("noimport", "don't actually import. useful for benchmarking parser" )
+ ;
+ addPositionArg( "file" , 1 );
+ _type = JSON;
+ _ignoreBlanks = false;
+ _headerLine = false;
+ _upsert = false;
+ _doimport = true;
+ _jsonArray = false;
+ }
+
+ virtual void printExtraHelp( ostream & out ) {
+ out << "Import CSV, TSV or JSON data into MongoDB.\n" << endl;
+ }
+
+ int run() {
+ string filename = getParam( "file" );
+ long long fileSize = 0;
+ int headerRows = 0;
+
+ istream * in = &cin;
+
+ ifstream file( filename.c_str() , ios_base::in);
+
+ if ( filename.size() > 0 && filename != "-" ) {
+ if ( ! exists( filename ) ) {
+ error() << "file doesn't exist: " << filename << endl;
+ return -1;
+ }
+ in = &file;
+ fileSize = file_size( filename );
+ }
+
+ // check if we're actually talking to a machine that can write
+ if (!isMaster()) {
+ return -1;
+ }
+
+ string ns;
+
+ try {
+ ns = getNS();
+ }
+ catch (...) {
+ printHelp(cerr);
+ return -1;
+ }
+
+ log(1) << "ns: " << ns << endl;
+
+ auth();
+
+ if ( hasParam( "drop" ) ) {
+ log() << "dropping: " << ns << endl;
+ conn().dropCollection( ns.c_str() );
+ }
+
+ if ( hasParam( "ignoreBlanks" ) ) {
+ _ignoreBlanks = true;
+ }
+
+ if ( hasParam( "upsert" ) || hasParam( "upsertFields" )) {
+ _upsert = true;
+
+ string uf = getParam("upsertFields");
+ if (uf.empty()) {
+ _upsertFields.push_back("_id");
+ }
+ else {
+ StringSplitter(uf.c_str(), ",").split(_upsertFields);
+ }
+ }
+
+ if ( hasParam( "noimport" ) ) {
+ _doimport = false;
+ }
+
+ if ( hasParam( "type" ) ) {
+ string type = getParam( "type" );
+ if ( type == "json" )
+ _type = JSON;
+ else if ( type == "csv" ) {
+ _type = CSV;
+ _sep = ",";
+ }
+ else if ( type == "tsv" ) {
+ _type = TSV;
+ _sep = "\t";
+ }
+ else {
+ error() << "don't know what type [" << type << "] is" << endl;
+ return -1;
+ }
+ }
+
+ if ( _type == CSV || _type == TSV ) {
+ _headerLine = hasParam( "headerline" );
+ if ( _headerLine ) {
+ headerRows = 1;
+ }
+ else {
+ needFields();
+ }
+ }
+
+ if (_type == JSON && hasParam("jsonArray")) {
+ _jsonArray = true;
+ }
+
+ time_t start = time(0);
+ log(1) << "filesize: " << fileSize << endl;
+ ProgressMeter pm( fileSize );
+ int num = 0;
+ int errors = 0;
+ int len = 0;
+ // buffer and line are only used when parsing a jsonArray
+ boost::scoped_array<char> buffer(new char[BUF_SIZE+2]);
+ char* line = buffer.get();
+
+ while ( _jsonArray || in->rdstate() == 0 ) {
+ try {
+ BSONObj o;
+ if (_jsonArray) {
+ int bytesProcessed = 0;
+ if (line == buffer.get()) { // Only read on first pass - the whole array must be on one line.
+ bytesProcessed = getLine(in, line);
+ line += bytesProcessed;
+ len += bytesProcessed;
+ }
+ if ((bytesProcessed = parseJSONArray(line, o)) < 0) {
+ len += bytesProcessed;
+ break;
+ }
+ len += bytesProcessed;
+ line += bytesProcessed;
+ }
+ else {
+ if (!parseRow(in, o, len)) {
+ continue;
+ }
+ }
+
+ if ( _headerLine ) {
+ _headerLine = false;
+ }
+ else if (_doimport) {
+ bool doUpsert = _upsert;
+ BSONObjBuilder b;
+ if (_upsert) {
+ for (vector<string>::const_iterator it=_upsertFields.begin(), end=_upsertFields.end(); it!=end; ++it) {
+ BSONElement e = o.getFieldDotted(it->c_str());
+ if (e.eoo()) {
+ doUpsert = false;
+ break;
+ }
+ b.appendAs(e, *it);
+ }
+ }
+
+ if (doUpsert) {
+ conn().update(ns, Query(b.obj()), o, true);
+ }
+ else {
+ conn().insert( ns.c_str() , o );
+ }
+ }
+
+ num++;
+ }
+ catch ( std::exception& e ) {
+ log() << "exception:" << e.what() << endl;
+ log() << line << endl;
+ errors++;
+
+ if (hasParam("stopOnError") || _jsonArray)
+ break;
+ }
+
+ if ( pm.hit( len + 1 ) ) {
+ log() << "\t\t\t" << num << "\t" << ( num / ( time(0) - start ) ) << "/second" << endl;
+ }
+ }
+
+ log() << "imported " << ( num - headerRows ) << " objects" << endl;
+
+ conn().getLastError();
+
+ if ( errors == 0 )
+ return 0;
+
+ error() << "encountered " << errors << " error" << ( errors == 1 ? "" : "s" ) << endl;
+ return -1;
+ }
+};
+
+int main( int argc , char ** argv ) {
+ Import import;
+ return import.main( argc , argv );
+}
diff --git a/src/mongo/tools/oplog.cpp b/src/mongo/tools/oplog.cpp
new file mode 100644
index 00000000000..1c09f37064f
--- /dev/null
+++ b/src/mongo/tools/oplog.cpp
@@ -0,0 +1,108 @@
+// oplog.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "client/dbclient.h"
+#include "db/json.h"
+#include "db/oplogreader.h"
+
+#include "tool.h"
+
+#include <fstream>
+#include <iostream>
+
+#include <boost/program_options.hpp>
+
+using namespace mongo;
+
+namespace po = boost::program_options;
+
+class OplogTool : public Tool {
+public:
+ OplogTool() : Tool( "oplog" ) {
+ addFieldOptions();
+ add_options()
+ ("seconds,s" , po::value<int>() , "seconds to go back default:86400" )
+ ("from", po::value<string>() , "host to pull from" )
+ ("oplogns", po::value<string>()->default_value( "local.oplog.rs" ) , "ns to pull from" )
+ ;
+ }
+
+ virtual void printExtraHelp(ostream& out) {
+ out << "Pull and replay a remote MongoDB oplog.\n" << endl;
+ }
+
+ int run() {
+
+ if ( ! hasParam( "from" ) ) {
+ log() << "need to specify --from" << endl;
+ return -1;
+ }
+
+ Client::initThread( "oplogreplay" );
+
+ log() << "going to connect" << endl;
+
+ OplogReader r;
+ r.connect( getParam( "from" ) );
+
+ log() << "connected" << endl;
+
+ OpTime start( time(0) - getParam( "seconds" , 86400 ) , 0 );
+ log() << "starting from " << start.toStringPretty() << endl;
+
+ string ns = getParam( "oplogns" );
+ r.tailingQueryGTE( ns.c_str() , start );
+
+ int num = 0;
+ while ( r.more() ) {
+ BSONObj o = r.next();
+
+ bool print = ++num % 100000 == 0;
+ if ( print )
+ cout << num << "\t" << o << endl;
+
+ if ( o["op"].String() == "n" )
+ continue;
+
+ if ( o["op"].String() == "c" ) {
+ cout << "skipping: " << o << endl;
+ continue;
+ }
+
+ BSONObjBuilder b( o.objsize() + 32 );
+ BSONArrayBuilder updates( b.subarrayStart( "applyOps" ) );
+ updates.append( o );
+ updates.done();
+
+ BSONObj c = b.obj();
+
+ BSONObj res;
+ conn().runCommand( "admin" , c , res );
+ if ( print )
+ cout << res << endl;
+ }
+
+ return 0;
+ }
+};
+
+int main( int argc , char** argv ) {
+ OplogTool t;
+ return t.main( argc , argv );
+}
diff --git a/src/mongo/tools/restore.cpp b/src/mongo/tools/restore.cpp
new file mode 100644
index 00000000000..85e91ce6485
--- /dev/null
+++ b/src/mongo/tools/restore.cpp
@@ -0,0 +1,583 @@
+// @file restore.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "../pch.h"
+#include "../client/dbclient.h"
+#include "../util/mmap.h"
+#include "../util/version.h"
+#include "tool.h"
+
+#include <boost/program_options.hpp>
+
+#include <fcntl.h>
+#include <set>
+
+using namespace mongo;
+
+namespace po = boost::program_options;
+
+namespace {
+ const char* OPLOG_SENTINEL = "$oplog"; // compare by ptr not strcmp
+}
+
+class Restore : public BSONTool {
+public:
+
+ bool _drop;
+ bool _keepIndexVersion;
+ bool _restoreOptions;
+ bool _restoreIndexes;
+ bool _restoreShardingConfig;
+ int _w;
+ string _curns;
+ string _curdb;
+ string _curcoll;
+ set<string> _users; // For restoring users with --drop
+ auto_ptr<Matcher> _opmatcher; // For oplog replay
+ Restore() : BSONTool( "restore" ) , _drop(false) {
+ add_options()
+ ("drop" , "drop each collection before import" )
+ ("oplogReplay", "replay oplog for point-in-time restore")
+ ("oplogLimit", po::value<string>(), "exclude oplog entries newer than provided timestamp (epoch[:ordinal])")
+ ("keepIndexVersion" , "don't upgrade indexes to newest version")
+ ("noOptionsRestore" , "don't restore collection options")
+ ("noIndexRestore" , "don't restore indexes")
+ ("restoreShardingConfig", "restore sharding configuration before doing the full import")
+ ("w" , po::value<int>()->default_value(1) , "minimum number of replicas per write" )
+ ;
+ add_hidden_options()
+ ("dir", po::value<string>()->default_value("dump"), "directory to restore from")
+ ("indexesLast" , "wait to add indexes (now default)") // left in for backwards compatibility
+ ("forceConfigRestore", "don't use confirmation prompt when doing --restoreShardingConfig") // For testing
+ ;
+ addPositionArg("dir", 1);
+ }
+
+ virtual void printExtraHelp(ostream& out) {
+ out << "Import BSON files into MongoDB.\n" << endl;
+ out << "usage: " << _name << " [options] [directory or filename to restore from]" << endl;
+ }
+
+ virtual int doRun() {
+ auth();
+ path root = getParam("dir");
+
+ // check if we're actually talking to a machine that can write
+ if (!isMaster()) {
+ return -1;
+ }
+
+ _drop = hasParam( "drop" );
+ _keepIndexVersion = hasParam("keepIndexVersion");
+ _restoreOptions = !hasParam("noOptionRestore");
+ _restoreIndexes = !hasParam("noIndexRestore");
+ _w = getParam( "w" , 1 );
+ _restoreShardingConfig = hasParam("restoreShardingConfig");
+ bool forceConfigRestore = hasParam("forceConfigRestore");
+
+ bool doOplog = hasParam( "oplogReplay" );
+
+ if (doOplog) {
+ // fail early if errors
+
+ if (_db != "") {
+ log() << "Can only replay oplog on full restore" << endl;
+ return -1;
+ }
+
+ if ( ! exists(root / "oplog.bson") ) {
+ log() << "No oplog file to replay. Make sure you run mongodump with --oplog." << endl;
+ return -1;
+ }
+
+
+ BSONObj out;
+ if (! conn().simpleCommand("admin", &out, "buildinfo")) {
+ log() << "buildinfo command failed: " << out["errmsg"].String() << endl;
+ return -1;
+ }
+
+ StringData version = out["version"].valuestr();
+ if (versionCmp(version, "1.7.4-pre-") < 0) {
+ log() << "Can only replay oplog to server version >= 1.7.4" << endl;
+ return -1;
+ }
+
+ string oplogLimit = getParam( "oplogLimit", "" );
+ string oplogInc = "0";
+
+ if(!oplogLimit.empty()) {
+ size_t i = oplogLimit.find_first_of(':');
+ if ( i != string::npos ) {
+ if ( i + 1 < oplogLimit.length() ) {
+ oplogInc = oplogLimit.substr(i + 1);
+ }
+
+ oplogLimit = oplogLimit.substr(0, i);
+ }
+
+ if ( ! oplogLimit.empty() ) {
+ _opmatcher.reset( new Matcher( fromjson( string("{ \"ts\": { \"$lt\": { \"$timestamp\": { \"t\": ") + oplogLimit + string(", \"i\": ") + oplogInc + string(" } } } }") ) ) );
+ }
+ }
+ }
+
+ if (_restoreShardingConfig) {
+ if (_db != "" && _db != "config") {
+ log() << "Can only setup sharding configuration on full restore or on restoring config database" << endl;
+ return -1;
+ }
+
+ // make sure we're talking to a mongos
+ if (!isMongos()) {
+ log() << "Can only use --restoreShardingConfig on a mongos" << endl;
+ return -1;
+ }
+
+ if ( ! exists(root / "config") ) {
+ log() << "No config directory to restore sharding setup from." << endl;
+ return -1;
+ }
+
+ // Make sure this isn't an active cluster.
+ log() << "WARNING: this will drop the config database, overriding any sharding configuration you currently have." << endl
+ << "DO NOT RUN THIS COMMAND WHILE YOUR CLUSTER IS ACTIVE" << endl;
+ if (forceConfigRestore) {
+ log() << "Running with --forceConfigRestore. Continuing." << endl;
+ } else {
+ if (!confirmAction()) {
+ log() << "aborting" << endl;
+ return -1;
+ }
+ }
+
+ // Restore config database
+ BSONObj info;
+ bool ok = conn().runCommand( "config" , BSON( "dropDatabase" << 1 ) , info );
+ if (!ok) {
+ log() << "Error dropping config database. Aborting" << endl;
+ return -1;
+ }
+ drillDown(root / "config", false, false);
+
+ log() << "Finished restoring config database." << endl
+ << "Calling flushRouterConfig on this connection" << endl;
+ conn().runCommand( "config" , BSON( "flushRouterConfig" << 1 ) , info );
+ }
+
+ /* If _db is not "" then the user specified a db name to restore as.
+ *
+ * In that case we better be given either a root directory that
+ * contains only .bson files or a single .bson file (a db).
+ *
+ * In the case where a collection name is specified we better be
+ * given either a root directory that contains only a single
+ * .bson file, or a single .bson file itself (a collection).
+ */
+ drillDown(root, _db != "", _coll != "", true);
+
+ if (_restoreShardingConfig) {
+ log() << "Flushing routing configuration from all mongos that we're aware of" << endl;
+ flushAllMongos();
+ log(1) << "Finished flushing the sharding configuration on all mongos' that the dumped config data knew of." << endl
+ << "If there are new mongos' that weren't just flushed, make sure to call flushRouterConfig on them." << endl;
+ }
+
+ // should this happen for oplog replay as well?
+ conn().getLastError();
+
+ if (doOplog) {
+ log() << "\t Replaying oplog" << endl;
+ _curns = OPLOG_SENTINEL;
+ processFile( root / "oplog.bson" );
+ }
+
+ return EXIT_CLEAN;
+ }
+
+ void drillDown( path root, bool use_db, bool use_coll, bool top_level=false ) {
+ log(2) << "drillDown: " << root.string() << endl;
+
+ // skip hidden files and directories
+ if (root.leaf()[0] == '.' && root.leaf() != ".")
+ return;
+
+ if ( is_directory( root ) ) {
+ directory_iterator end;
+ directory_iterator i(root);
+ path indexes;
+ while ( i != end ) {
+ path p = *i;
+ i++;
+
+ if (use_db) {
+ if (is_directory(p)) {
+ error() << "ERROR: root directory must be a dump of a single database" << endl;
+ error() << " when specifying a db name with --db" << endl;
+ printHelp(cout);
+ return;
+ }
+ }
+
+ if (use_coll) {
+ if (is_directory(p) || i != end) {
+ error() << "ERROR: root directory must be a dump of a single collection" << endl;
+ error() << " when specifying a collection name with --collection" << endl;
+ printHelp(cout);
+ return;
+ }
+ }
+
+ // don't insert oplog
+ if (top_level && !use_db && p.leaf() == "oplog.bson")
+ continue;
+
+ if ( p.leaf() == "system.indexes.bson" ) {
+ indexes = p;
+ }
+ else if (_restoreShardingConfig && is_directory(p) && p.leaf() == "config") {
+ // Config directory should have already been restored. Skip it here.
+ continue;
+ }
+ else {
+ drillDown(p, use_db, use_coll);
+ }
+ }
+
+ if (!indexes.empty())
+ drillDown(indexes, use_db, use_coll);
+
+ return;
+ }
+
+ if ( endsWith( root.string().c_str() , ".metadata.json" ) ) {
+ // Metadata files are handled when the corresponding .bson file is handled
+ return;
+ }
+
+ if ( ! ( endsWith( root.string().c_str() , ".bson" ) ||
+ endsWith( root.string().c_str() , ".bin" ) ) ) {
+ error() << "don't know what to do with file [" << root.string() << "]" << endl;
+ return;
+ }
+
+ log() << root.string() << endl;
+
+ if ( root.leaf() == "system.profile.bson" ) {
+ log() << "\t skipping" << endl;
+ return;
+ }
+
+ string ns;
+ if (use_db) {
+ ns += _db;
+ }
+ else {
+ string dir = root.branch_path().string();
+ if ( dir.find( "/" ) == string::npos )
+ ns += dir;
+ else
+ ns += dir.substr( dir.find_last_of( "/" ) + 1 );
+
+ if ( ns.size() == 0 )
+ ns = "test";
+ }
+
+ assert( ns.size() );
+
+ string oldCollName = root.leaf(); // Name of the collection that was dumped from
+ oldCollName = oldCollName.substr( 0 , oldCollName.find_last_of( "." ) );
+ if (use_coll) {
+ ns += "." + _coll;
+ }
+ else {
+ ns += "." + oldCollName;
+ }
+
+ log() << "\tgoing into namespace [" << ns << "]" << endl;
+
+ if ( _drop ) {
+ if (root.leaf() != "system.users.bson" ) {
+ log() << "\t dropping" << endl;
+ conn().dropCollection( ns );
+ } else {
+ // Create map of the users currently in the DB
+ BSONObj fields = BSON("user" << 1);
+ scoped_ptr<DBClientCursor> cursor(conn().query(ns, Query(), 0, 0, &fields));
+ while (cursor->more()) {
+ BSONObj user = cursor->next();
+ _users.insert(user["user"].String());
+ }
+ }
+ }
+
+ BSONObj metadataObject;
+ if (_restoreOptions || _restoreIndexes) {
+ path metadataFile = (root.branch_path() / (oldCollName + ".metadata.json"));
+ if (!exists(metadataFile.string())) {
+ // This is fine because dumps from before 2.1 won't have a metadata file, just print a warning.
+ // System collections shouldn't have metadata so don't warn if that file is missing.
+ if (!startsWith(metadataFile.leaf(), "system.")) {
+ log() << metadataFile.string() << " not found. Skipping." << endl;
+ }
+ } else {
+ metadataObject = parseMetadataFile(metadataFile.string());
+ }
+ }
+
+ _curns = ns.c_str();
+ _curdb = NamespaceString(_curns).db;
+ _curcoll = NamespaceString(_curns).coll;
+
+ if (_restoreOptions && metadataObject.hasField("options")) {
+ // Try to create collection with given options
+ createCollectionWithOptions(metadataObject["options"].Obj());
+ }
+
+ processFile( root );
+ if (_drop && root.leaf() == "system.users.bson") {
+ // Delete any users that used to exist but weren't in the dump file
+ for (set<string>::iterator it = _users.begin(); it != _users.end(); ++it) {
+ BSONObj userMatch = BSON("user" << *it);
+ conn().remove(ns, Query(userMatch));
+ }
+ _users.clear();
+ }
+
+ if (_restoreIndexes && metadataObject.hasField("indexes")) {
+ vector<BSONElement> indexes = metadataObject["indexes"].Array();
+ for (vector<BSONElement>::iterator it = indexes.begin(); it != indexes.end(); ++it) {
+ createIndex((*it).Obj(), false);
+ }
+ }
+ }
+
+ virtual void gotObject( const BSONObj& obj ) {
+ if (_curns == OPLOG_SENTINEL) { // intentional ptr compare
+ if (obj["op"].valuestr()[0] == 'n') // skip no-ops
+ return;
+
+ // exclude operations that don't meet (timestamp) criteria
+ if ( _opmatcher.get() && ! _opmatcher->matches ( obj ) ) {
+ return;
+ }
+
+ string db = obj["ns"].valuestr();
+ db = db.substr(0, db.find('.'));
+
+ BSONObj cmd = BSON( "applyOps" << BSON_ARRAY( obj ) );
+ BSONObj out;
+ conn().runCommand(db, cmd, out);
+
+ // wait for ops to propagate to "w" nodes (doesn't warn if w used without replset)
+ if ( _w > 1 ) {
+ conn().getLastError(false, false, _w);
+ }
+ }
+ else if ( endsWith( _curns.c_str() , ".system.indexes" )) {
+ createIndex(obj, true);
+ }
+ else if (_drop && endsWith(_curns.c_str(), ".system.users") && _users.count(obj["user"].String())) {
+ // Since system collections can't be dropped, we have to manually
+ // replace the contents of the system.users collection
+ BSONObj userMatch = BSON("user" << obj["user"].String());
+ conn().update(_curns, Query(userMatch), obj);
+ _users.erase(obj["user"].String());
+ } else {
+ conn().insert( _curns , obj );
+
+ // wait for insert to propagate to "w" nodes (doesn't warn if w used without replset)
+ if ( _w > 1 ) {
+ conn().getLastErrorDetailed(false, false, _w);
+ }
+ }
+ }
+
+private:
+
+ BSONObj parseMetadataFile(string filePath) {
+ long long fileSize = file_size(filePath);
+ ifstream file(filePath.c_str(), ios_base::in);
+
+ scoped_ptr<char> buf(new char[fileSize]);
+ file.read(buf.get(), fileSize);
+ int objSize;
+ BSONObj obj;
+ obj = fromjson (buf.get(), &objSize);
+ uassert(15934, "JSON object size didn't match file size", objSize == fileSize);
+ return obj;
+ }
+
+ // Compares 2 BSONObj representing collection options. Returns true if the objects
+ // represent different options. Ignores the "create" field.
+ bool optionsSame(BSONObj obj1, BSONObj obj2) {
+ int nfields = 0;
+ BSONObjIterator i(obj1);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if (!obj2.hasField(e.fieldName())) {
+ if (strcmp(e.fieldName(), "create") == 0) {
+ continue;
+ } else {
+ return false;
+ }
+ }
+ nfields++;
+ if (e != obj2[e.fieldName()]) {
+ return false;
+ }
+ }
+ return nfields == obj2.nFields();
+ }
+
+ void createCollectionWithOptions(BSONObj cmdObj) {
+ if (!cmdObj.hasField("create") || cmdObj["create"].String() != _curcoll) {
+ BSONObjBuilder bo;
+ if (!cmdObj.hasField("create")) {
+ bo.append("create", _curcoll);
+ }
+
+ BSONObjIterator i(cmdObj);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if (strcmp(e.fieldName(), "create") == 0) {
+ bo.append("create", _curcoll);
+ }
+ else {
+ bo.append(e);
+ }
+ }
+ cmdObj = bo.obj();
+ }
+
+ BSONObj fields = BSON("options" << 1);
+ scoped_ptr<DBClientCursor> cursor(conn().query(_curdb + ".system.namespaces", Query(BSON("name" << _curns)), 0, 0, &fields));
+
+ bool createColl = true;
+ if (cursor->more()) {
+ createColl = false;
+ BSONObj obj = cursor->next();
+ if (!obj.hasField("options") || !optionsSame(cmdObj, obj["options"].Obj())) {
+ log() << "WARNING: collection " << _curns << " exists with different options than are in the metadata.json file and not using --drop. Options in the metadata file will be ignored." << endl;
+ }
+ }
+
+ if (!createColl) {
+ return;
+ }
+
+ BSONObj info;
+ if (!conn().runCommand(_curdb, cmdObj, info)) {
+ uasserted(15936, "Creating collection " + _curns + " failed. Errmsg: " + info["errmsg"].String());
+ } else {
+ log() << "\tCreated collection " << _curns << " with options: " << cmdObj.jsonString() << endl;
+ }
+ }
+
+ /* We must handle if the dbname or collection name is different at restore time than what was dumped.
+ If keepCollName is true, however, we keep the same collection name that's in the index object.
+ */
+ void createIndex(BSONObj indexObj, bool keepCollName) {
+ BSONObjBuilder bo;
+ BSONObjIterator i(indexObj);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if (strcmp(e.fieldName(), "ns") == 0) {
+ NamespaceString n(e.String());
+ string s = _curdb + "." + (keepCollName ? n.coll : _curcoll);
+ bo.append("ns", s);
+ }
+ else if (strcmp(e.fieldName(), "v") != 0 || _keepIndexVersion) { // Remove index version number
+ bo.append(e);
+ }
+ }
+ BSONObj o = bo.obj();
+ log(0) << "\tCreating index: " << o << endl;
+ conn().insert( _curdb + ".system.indexes" , o );
+
+ // We're stricter about errors for indexes than for regular data
+ BSONObj err = conn().getLastErrorDetailed(false, false, _w);
+
+ if ( ! ( err["err"].isNull() ) ) {
+ if (err["err"].String() == "norepl" && _w > 1) {
+ error() << "Cannot specify write concern for non-replicas" << endl;
+ }
+ else {
+ error() << "Error creating index " << o["ns"].String();
+ error() << ": " << err["code"].Int() << " " << err["err"].String() << endl;
+ error() << "To resume index restoration, run " << _name << " on file" << _fileName << " manually." << endl;
+ }
+
+ ::abort();
+ }
+ }
+
+ // Calls flushRouterConfig on all mongos' in the config db's mongos collection, as well as on the one
+ // we're currently connected to.
+ void flushAllMongos() {
+ BSONObj info;
+
+ auto_ptr<DBClientCursor> cursor = conn().query ("config.mongos", Query());
+ while (cursor->more()) {
+ BSONObj obj = cursor->nextSafe();
+ string mongos = obj.getField("_id").valuestr();
+ string errmsg;
+ ConnectionString cs = ConnectionString::parse( mongos , errmsg );
+
+ if ( ! cs.isValid() ) {
+ error() << "invalid mongos hostname [" << mongos << "] " << errmsg << endl;
+ continue;
+ }
+
+ DBClientBase* mongosConn = cs.connect( errmsg );
+ if ( ! mongosConn ) {
+ error() << "Error connecting to mongos [" << mongos << "]: " << errmsg << endl;
+ continue;
+ }
+
+ log(1) << "Calling flushRouterConfig on mongos: " << mongos << endl;
+ mongosConn->runCommand( "config" , BSON( "flushRouterConfig" << 1 ) , info );
+ }
+ }
+
+ bool confirmAction() {
+ string userInput;
+ int attemptCount = 0;
+ while (attemptCount < 3) {
+ log() << "Are you sure you want to continue? [y/n]: ";
+ cin >> userInput;
+ if (userInput == "Y" || userInput == "y" || userInput == "yes" || userInput == "Yes" || userInput == "YES") {
+ return true;
+ }
+ else if (userInput == "N" || userInput == "n" || userInput == "no" || userInput == "No" || userInput == "NO") {
+ return false;
+ }
+ else {
+ log() << "Invalid input." << endl;
+ attemptCount++;
+ }
+ }
+ log() << "Entered invalid input 3 times in a row." << endl;
+ return false;
+ }
+};
+
+int main( int argc , char ** argv ) {
+ Restore restore;
+ return restore.main( argc , argv );
+}
diff --git a/src/mongo/tools/sniffer.cpp b/src/mongo/tools/sniffer.cpp
new file mode 100644
index 00000000000..aeab808cfed
--- /dev/null
+++ b/src/mongo/tools/sniffer.cpp
@@ -0,0 +1,566 @@
+// sniffer.cpp
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+/*
+ TODO:
+ large messages - need to track what's left and ingore
+ single object over packet size - can only display begging of object
+
+ getmore
+ delete
+ killcursors
+
+ */
+#include "../pch.h"
+#include <pcap.h>
+
+#ifdef _WIN32
+#undef min
+#undef max
+#endif
+
+#include "../bson/util/builder.h"
+#include "../util/net/message.h"
+#include "../util/mmap.h"
+#include "../db/dbmessage.h"
+#include "../client/dbclient.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <errno.h>
+#include <sys/types.h>
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#endif
+
+#include <iostream>
+#include <map>
+#include <string>
+
+#include <boost/shared_ptr.hpp>
+
+using namespace std;
+using mongo::asserted;
+using mongo::Message;
+using mongo::MsgData;
+using mongo::DbMessage;
+using mongo::BSONObj;
+using mongo::BufBuilder;
+using mongo::DBClientConnection;
+using mongo::QueryResult;
+using mongo::MemoryMappedFile;
+
+mongo::CmdLine mongo::cmdLine;
+namespace mongo {
+ void setupSignals( bool inFork ){}
+}
+
+#define SNAP_LEN 65535
+
+int captureHeaderSize;
+set<int> serverPorts;
+string forwardAddress;
+bool objcheck = false;
+
+ostream *outPtr = &cout;
+ostream &out() { return *outPtr; }
+
+/* IP header */
+struct sniff_ip {
+ u_char ip_vhl; /* version << 4 | header length >> 2 */
+ u_char ip_tos; /* type of service */
+ u_short ip_len; /* total length */
+ u_short ip_id; /* identification */
+ u_short ip_off; /* fragment offset field */
+#define IP_RF 0x8000 /* reserved fragment flag */
+#define IP_DF 0x4000 /* dont fragment flag */
+#define IP_MF 0x2000 /* more fragments flag */
+#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */
+ u_char ip_ttl; /* time to live */
+ u_char ip_p; /* protocol */
+ u_short ip_sum; /* checksum */
+ struct in_addr ip_src,ip_dst; /* source and dest address */
+};
+#define IP_HL(ip) (((ip)->ip_vhl) & 0x0f)
+#define IP_V(ip) (((ip)->ip_vhl) >> 4)
+
+/* TCP header */
+#ifdef _WIN32
+typedef unsigned __int32 uint32_t;
+#endif
+typedef uint32_t tcp_seq;
+
+struct sniff_tcp {
+ u_short th_sport; /* source port */
+ u_short th_dport; /* destination port */
+ tcp_seq th_seq; /* sequence number */
+ tcp_seq th_ack; /* acknowledgement number */
+ u_char th_offx2; /* data offset, rsvd */
+#define TH_OFF(th) (((th)->th_offx2 & 0xf0) >> 4)
+ u_char th_flags;
+#define TH_FIN 0x01
+#define TH_SYN 0x02
+#define TH_RST 0x04
+#define TH_PUSH 0x08
+#define TH_ACK 0x10
+#define TH_URG 0x20
+#define TH_ECE 0x40
+#define TH_CWR 0x80
+
+#ifndef TH_FLAGS
+#define TH_FLAGS (TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG|TH_ECE|TH_CWR)
+#endif
+
+ u_short th_win; /* window */
+ u_short th_sum; /* checksum */
+ u_short th_urp; /* urgent pointer */
+};
+
+#pragma pack( 1 )
+struct Connection {
+ struct in_addr srcAddr;
+ u_short srcPort;
+ struct in_addr dstAddr;
+ u_short dstPort;
+ bool operator<( const Connection &other ) const {
+ return memcmp( this, &other, sizeof( Connection ) ) < 0;
+ }
+ Connection reverse() const {
+ Connection c;
+ c.srcAddr = dstAddr;
+ c.srcPort = dstPort;
+ c.dstAddr = srcAddr;
+ c.dstPort = srcPort;
+ return c;
+ }
+};
+#pragma pack()
+
+map< Connection, bool > seen;
+map< Connection, int > bytesRemainingInMessage;
+map< Connection, boost::shared_ptr< BufBuilder > > messageBuilder;
+map< Connection, unsigned > expectedSeq;
+map< Connection, boost::shared_ptr<DBClientConnection> > forwarder;
+map< Connection, long long > lastCursor;
+map< Connection, map< long long, long long > > mapCursor;
+
+void processMessage( Connection& c , Message& d );
+
+void got_packet(u_char *args, const struct pcap_pkthdr *header, const u_char *packet) {
+
+ const struct sniff_ip* ip = (struct sniff_ip*)(packet + captureHeaderSize);
+ int size_ip = IP_HL(ip)*4;
+ if ( size_ip < 20 ) {
+ cerr << "*** Invalid IP header length: " << size_ip << " bytes" << endl;
+ return;
+ }
+
+ assert( ip->ip_p == IPPROTO_TCP );
+
+ const struct sniff_tcp* tcp = (struct sniff_tcp*)(packet + captureHeaderSize + size_ip);
+ int size_tcp = TH_OFF(tcp)*4;
+ if (size_tcp < 20) {
+ cerr << "*** Invalid TCP header length: " << size_tcp << " bytes" << endl;
+ return;
+ }
+
+ if ( ! ( serverPorts.count( ntohs( tcp->th_sport ) ) ||
+ serverPorts.count( ntohs( tcp->th_dport ) ) ) ) {
+ return;
+ }
+
+ const u_char * payload = (const u_char*)(packet + captureHeaderSize + size_ip + size_tcp);
+
+ unsigned totalSize = ntohs(ip->ip_len);
+ assert( totalSize <= header->caplen );
+
+ int size_payload = totalSize - (size_ip + size_tcp);
+ if (size_payload <= 0 )
+ return;
+
+ Connection c;
+ c.srcAddr = ip->ip_src;
+ c.srcPort = tcp->th_sport;
+ c.dstAddr = ip->ip_dst;
+ c.dstPort = tcp->th_dport;
+
+ if ( seen[ c ] ) {
+ if ( expectedSeq[ c ] != ntohl( tcp->th_seq ) ) {
+ cerr << "Warning: sequence # mismatch, there may be dropped packets" << endl;
+ }
+ }
+ else {
+ seen[ c ] = true;
+ }
+
+ expectedSeq[ c ] = ntohl( tcp->th_seq ) + size_payload;
+
+ Message m;
+
+ if ( bytesRemainingInMessage[ c ] == 0 ) {
+ m.setData( (MsgData*)payload , false );
+ if ( !m.header()->valid() ) {
+ cerr << "Invalid message start, skipping packet." << endl;
+ return;
+ }
+ if ( size_payload > m.header()->len ) {
+ cerr << "Multiple messages in packet, skipping packet." << endl;
+ return;
+ }
+ if ( size_payload < m.header()->len ) {
+ bytesRemainingInMessage[ c ] = m.header()->len - size_payload;
+ messageBuilder[ c ].reset( new BufBuilder() );
+ messageBuilder[ c ]->appendBuf( (void*)payload, size_payload );
+ return;
+ }
+ }
+ else {
+ bytesRemainingInMessage[ c ] -= size_payload;
+ messageBuilder[ c ]->appendBuf( (void*)payload, size_payload );
+ if ( bytesRemainingInMessage[ c ] < 0 ) {
+ cerr << "Received too many bytes to complete message, resetting buffer" << endl;
+ bytesRemainingInMessage[ c ] = 0;
+ messageBuilder[ c ].reset();
+ return;
+ }
+ if ( bytesRemainingInMessage[ c ] > 0 )
+ return;
+ m.setData( (MsgData*)messageBuilder[ c ]->buf(), true );
+ messageBuilder[ c ]->decouple();
+ messageBuilder[ c ].reset();
+ }
+
+ DbMessage d( m );
+
+ out() << inet_ntoa(ip->ip_src) << ":" << ntohs( tcp->th_sport )
+ << ( serverPorts.count( ntohs( tcp->th_dport ) ) ? " -->> " : " <<-- " )
+ << inet_ntoa(ip->ip_dst) << ":" << ntohs( tcp->th_dport )
+ << " " << d.getns()
+ << " " << m.header()->len << " bytes "
+ << " id:" << hex << m.header()->id << dec << "\t" << m.header()->id;
+
+ processMessage( c , m );
+}
+
+class AuditingDbMessage : public DbMessage {
+public:
+ AuditingDbMessage( const Message &m ) : DbMessage( m ) {}
+ BSONObj nextJsObj( const char *context ) {
+ BSONObj ret = DbMessage::nextJsObj();
+ if ( objcheck && !ret.valid() ) {
+ // TODO provide more debugging info
+ cout << "invalid object in " << context << ": " << ret.hexDump() << endl;
+ }
+ return ret;
+ }
+};
+
+void processMessage( Connection& c , Message& m ) {
+ AuditingDbMessage d(m);
+
+ if ( m.operation() == mongo::opReply )
+ out() << " - " << (unsigned)m.header()->responseTo;
+ out() << '\n';
+
+ try {
+ switch( m.operation() ) {
+ case mongo::opReply: {
+ mongo::QueryResult* r = (mongo::QueryResult*)m.singleData();
+ out() << "\treply" << " n:" << r->nReturned << " cursorId: " << r->cursorId << endl;
+ if ( r->nReturned ) {
+ mongo::BSONObj o( r->data() );
+ out() << "\t" << o << endl;
+ }
+ break;
+ }
+ case mongo::dbQuery: {
+ mongo::QueryMessage q(d);
+ out() << "\tquery: " << q.query << " ntoreturn: " << q.ntoreturn << " ntoskip: " << q.ntoskip;
+ if( !q.fields.isEmpty() )
+ out() << " hasfields";
+ if( q.queryOptions & mongo::QueryOption_SlaveOk )
+ out() << " SlaveOk";
+ if( q.queryOptions & mongo::QueryOption_NoCursorTimeout )
+ out() << " NoCursorTimeout";
+ if( q.queryOptions & ~(mongo::QueryOption_SlaveOk | mongo::QueryOption_NoCursorTimeout) )
+ out() << " queryOptions:" << hex << q.queryOptions;
+ out() << endl;
+ break;
+ }
+ case mongo::dbUpdate: {
+ int flags = d.pullInt();
+ BSONObj q = d.nextJsObj( "update" );
+ BSONObj o = d.nextJsObj( "update" );
+ out() << "\tupdate flags:" << flags << " q:" << q << " o:" << o << endl;
+ break;
+ }
+ case mongo::dbInsert: {
+ out() << "\tinsert: " << d.nextJsObj( "insert" ) << endl;
+ while ( d.moreJSObjs() ) {
+ out() << "\t\t" << d.nextJsObj( "insert" ) << endl;
+ }
+ break;
+ }
+ case mongo::dbGetMore: {
+ int nToReturn = d.pullInt();
+ long long cursorId = d.pullInt64();
+ out() << "\tgetMore nToReturn: " << nToReturn << " cursorId: " << cursorId << endl;
+ break;
+ }
+ case mongo::dbDelete: {
+ int flags = d.pullInt();
+ BSONObj q = d.nextJsObj( "delete" );
+ out() << "\tdelete flags: " << flags << " q: " << q << endl;
+ break;
+ }
+ case mongo::dbKillCursors: {
+ int *x = (int *) m.singleData()->_data;
+ x++; // reserved
+ int n = *x;
+ out() << "\tkillCursors n: " << n << endl;
+ break;
+ }
+ default:
+ out() << "\tunknown opcode " << m.operation() << endl;
+ cerr << "*** CANNOT HANDLE TYPE: " << m.operation() << endl;
+ }
+ }
+ catch ( ... ) {
+ cerr << "Error parsing message for operation: " << m.operation() << endl;
+ }
+
+
+ if ( !forwardAddress.empty() ) {
+ if ( m.operation() != mongo::opReply ) {
+ boost::shared_ptr<DBClientConnection> conn = forwarder[ c ];
+ if ( !conn ) {
+ conn.reset(new DBClientConnection( true ));
+ conn->connect( forwardAddress );
+ forwarder[ c ] = conn;
+ }
+ if ( m.operation() == mongo::dbQuery || m.operation() == mongo::dbGetMore ) {
+ if ( m.operation() == mongo::dbGetMore ) {
+ DbMessage d( m );
+ d.pullInt();
+ long long &cId = d.pullInt64();
+ cId = mapCursor[ c ][ cId ];
+ }
+ Message response;
+ conn->port().call( m, response );
+ QueryResult *qr = (QueryResult *) response.singleData();
+ if ( !( qr->resultFlags() & mongo::ResultFlag_CursorNotFound ) ) {
+ if ( qr->cursorId != 0 ) {
+ lastCursor[ c ] = qr->cursorId;
+ return;
+ }
+ }
+ lastCursor[ c ] = 0;
+ }
+ else {
+ conn->port().say( m );
+ }
+ }
+ else {
+ Connection r = c.reverse();
+ long long myCursor = lastCursor[ r ];
+ QueryResult *qr = (QueryResult *) m.singleData();
+ long long yourCursor = qr->cursorId;
+ if ( ( qr->resultFlags() & mongo::ResultFlag_CursorNotFound ) )
+ yourCursor = 0;
+ if ( myCursor && !yourCursor )
+ cerr << "Expected valid cursor in sniffed response, found none" << endl;
+ if ( !myCursor && yourCursor )
+ cerr << "Sniffed valid cursor when none expected" << endl;
+ if ( myCursor && yourCursor ) {
+ mapCursor[ r ][ qr->cursorId ] = lastCursor[ r ];
+ lastCursor[ r ] = 0;
+ }
+ }
+ }
+}
+
+void processDiagLog( const char * file ) {
+ Connection c;
+ MemoryMappedFile f;
+ long length;
+ unsigned long long L = 0;
+ char * root = (char*)f.map( file , L, MemoryMappedFile::SEQUENTIAL );
+ assert( L < 0x80000000 );
+ length = (long) L;
+ assert( root );
+ assert( length > 0 );
+
+ char * pos = root;
+
+ long read = 0;
+ while ( read < length ) {
+ Message m(pos,false);
+ int len = m.header()->len;
+ DbMessage d(m);
+ cout << len << " " << d.getns() << endl;
+
+ processMessage( c , m );
+
+ read += len;
+ pos += len;
+ }
+
+ f.close();
+}
+
+void usage() {
+ cout <<
+ "Usage: mongosniff [--help] [--forward host:port] [--source (NET <interface> | (FILE | DIAGLOG) <filename>)] [<port0> <port1> ... ]\n"
+ "--forward Forward all parsed request messages to mongod instance at \n"
+ " specified host:port\n"
+ "--source Source of traffic to sniff, either a network interface or a\n"
+ " file containing previously captured packets in pcap format,\n"
+ " or a file containing output from mongod's --diaglog option.\n"
+ " If no source is specified, mongosniff will attempt to sniff\n"
+ " from one of the machine's network interfaces.\n"
+ "--objcheck Log hex representation of invalid BSON objects and nothing\n"
+ " else. Spurious messages about invalid objects may result\n"
+ " when there are dropped tcp packets.\n"
+ "<port0>... These parameters are used to filter sniffing. By default, \n"
+ " only port 27017 is sniffed.\n"
+ "--help Print this help message.\n"
+ << endl;
+}
+
+int main(int argc, char **argv) {
+
+ stringstream nullStream;
+ nullStream.clear(ios::failbit);
+
+ const char *dev = NULL;
+ char errbuf[PCAP_ERRBUF_SIZE];
+ pcap_t *handle;
+
+ struct bpf_program fp;
+ bpf_u_int32 mask;
+ bpf_u_int32 net;
+
+ bool source = false;
+ bool replay = false;
+ bool diaglog = false;
+ const char *file = 0;
+
+ vector< const char * > args;
+ for( int i = 1; i < argc; ++i )
+ args.push_back( argv[ i ] );
+
+ try {
+ for( unsigned i = 0; i < args.size(); ++i ) {
+ const char *arg = args[ i ];
+ if ( arg == string( "--help" ) ) {
+ usage();
+ return 0;
+ }
+ else if ( arg == string( "--forward" ) ) {
+ forwardAddress = args[ ++i ];
+ }
+ else if ( arg == string( "--source" ) ) {
+ uassert( 10266 , "can't use --source twice" , source == false );
+ uassert( 10267 , "source needs more args" , args.size() > i + 2);
+ source = true;
+ replay = ( args[ ++i ] == string( "FILE" ) );
+ diaglog = ( args[ i ] == string( "DIAGLOG" ) );
+ if ( replay || diaglog )
+ file = args[ ++i ];
+ else
+ dev = args[ ++i ];
+ }
+ else if ( arg == string( "--objcheck" ) ) {
+ objcheck = true;
+ outPtr = &nullStream;
+ }
+ else {
+ serverPorts.insert( atoi( args[ i ] ) );
+ }
+ }
+ }
+ catch ( ... ) {
+ usage();
+ return -1;
+ }
+
+ if ( !serverPorts.size() )
+ serverPorts.insert( 27017 );
+
+ if ( diaglog ) {
+ processDiagLog( file );
+ return 0;
+ }
+ else if ( replay ) {
+ handle = pcap_open_offline(file, errbuf);
+ if ( ! handle ) {
+ cerr << "error opening capture file!" << endl;
+ return -1;
+ }
+ }
+ else {
+ if ( !dev ) {
+ dev = pcap_lookupdev(errbuf);
+ if ( ! dev ) {
+ cerr << "error finding device: " << errbuf << endl;
+ return -1;
+ }
+ cout << "found device: " << dev << endl;
+ }
+ if (pcap_lookupnet(dev, &net, &mask, errbuf) == -1) {
+ cerr << "can't get netmask: " << errbuf << endl;
+ return -1;
+ }
+ handle = pcap_open_live(dev, SNAP_LEN, 1, 1000, errbuf);
+ if ( ! handle ) {
+ cerr << "error opening device: " << errbuf << endl;
+ return -1;
+ }
+ }
+
+ switch ( pcap_datalink( handle ) ) {
+ case DLT_EN10MB:
+ captureHeaderSize = 14;
+ break;
+ case DLT_NULL:
+ captureHeaderSize = 4;
+ break;
+ default:
+ cerr << "don't know how to handle datalink type: " << pcap_datalink( handle ) << endl;
+ }
+
+ assert( pcap_compile(handle, &fp, const_cast< char * >( "tcp" ) , 0, net) != -1 );
+ assert( pcap_setfilter(handle, &fp) != -1 );
+
+ cout << "sniffing... ";
+ for ( set<int>::iterator i = serverPorts.begin(); i != serverPorts.end(); i++ )
+ cout << *i << " ";
+ cout << endl;
+
+ pcap_loop(handle, 0 , got_packet, NULL);
+
+ pcap_freecode(&fp);
+ pcap_close(handle);
+
+ return 0;
+}
+
diff --git a/src/mongo/tools/stat.cpp b/src/mongo/tools/stat.cpp
new file mode 100644
index 00000000000..f5c506308e2
--- /dev/null
+++ b/src/mongo/tools/stat.cpp
@@ -0,0 +1,544 @@
+// stat.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "client/dbclient.h"
+#include "db/json.h"
+#include "../util/net/httpclient.h"
+#include "../util/text.h"
+#include "tool.h"
+#include "stat_util.h"
+#include <fstream>
+#include <iostream>
+#include <boost/program_options.hpp>
+
+namespace po = boost::program_options;
+
+namespace mongo {
+
+ class Stat : public Tool {
+ public:
+
+ Stat() : Tool( "stat" , REMOTE_SERVER , "admin" ) {
+ _http = false;
+ _many = false;
+
+ add_hidden_options()
+ ( "sleep" , po::value<int>() , "time to sleep between calls" )
+ ;
+ add_options()
+ ("noheaders", "don't output column names")
+ ("rowcount,n", po::value<int>()->default_value(0), "number of stats lines to print (0 for indefinite)")
+ ("http", "use http instead of raw db connection")
+ ("discover" , "discover nodes and display stats for all" )
+ ("all" , "all optional fields" )
+ ;
+
+ addPositionArg( "sleep" , 1 );
+
+ _autoreconnect = true;
+ }
+
+ virtual void printExtraHelp( ostream & out ) {
+ out << "View live MongoDB performance statistics.\n" << endl;
+ out << "usage: " << _name << " [options] [sleep time]" << endl;
+ out << "sleep time: time to wait (in seconds) between calls" << endl;
+ }
+
+ virtual void printExtraHelpAfter( ostream & out ) {
+ out << "\n";
+ out << " Fields\n";
+ out << " inserts \t- # of inserts per second (* means replicated op)\n";
+ out << " query \t- # of queries per second\n";
+ out << " update \t- # of updates per second\n";
+ out << " delete \t- # of deletes per second\n";
+ out << " getmore \t- # of get mores (cursor batch) per second\n";
+ out << " command \t- # of commands per second, on a slave its local|replicated\n";
+ out << " flushes \t- # of fsync flushes per second\n";
+ out << " mapped \t- amount of data mmaped (total data size) megabytes\n";
+ out << " vsize \t- virtual size of process in megabytes\n";
+ out << " res \t- resident size of process in megabytes\n";
+ out << " faults \t- # of pages faults per sec (linux only)\n";
+ out << " locked \t- percent of time in global write lock\n";
+ out << " idx miss \t- percent of btree page misses (sampled)\n";
+ out << " qr|qw \t- queue lengths for clients waiting (read|write)\n";
+ out << " ar|aw \t- active clients (read|write)\n";
+ out << " netIn \t- network traffic in - bits\n";
+ out << " netOut \t- network traffic out - bits\n";
+ out << " conn \t- number of open connections\n";
+ out << " set \t- replica set name\n";
+ out << " repl \t- replication type \n";
+ out << " \t PRI - primary (master)\n";
+ out << " \t SEC - secondary\n";
+ out << " \t REC - recovering\n";
+ out << " \t UNK - unknown\n";
+ out << " \t SLV - slave\n";
+ out << " \t RTR - mongos process (\"router\")\n";
+ }
+
+ BSONObj stats() {
+ if ( _http ) {
+ HttpClient c;
+ HttpClient::Result r;
+
+ string url;
+ {
+ stringstream ss;
+ ss << "http://" << _host;
+ if ( _host.find( ":" ) == string::npos )
+ ss << ":28017";
+ ss << "/_status";
+ url = ss.str();
+ }
+
+ if ( c.get( url , &r ) != 200 ) {
+ cout << "error (http): " << r.getEntireResponse() << endl;
+ return BSONObj();
+ }
+
+ BSONObj x = fromjson( r.getBody() );
+ BSONElement e = x["serverStatus"];
+ if ( e.type() != Object ) {
+ cout << "BROKEN: " << x << endl;
+ return BSONObj();
+ }
+ return e.embeddedObjectUserCheck();
+ }
+ BSONObj out;
+ if ( ! conn().simpleCommand( _db , &out , "serverStatus" ) ) {
+ cout << "error: " << out << endl;
+ return BSONObj();
+ }
+ return out.getOwned();
+ }
+
+
+ virtual void preSetup() {
+ if ( hasParam( "http" ) ) {
+ _http = true;
+ _noconnection = true;
+ }
+
+ if ( hasParam( "host" ) &&
+ getParam( "host" ).find( ',' ) != string::npos ) {
+ _noconnection = true;
+ _many = true;
+ }
+
+ if ( hasParam( "discover" ) ) {
+ _many = true;
+ }
+ }
+
+ int run() {
+ _statUtil.setSeconds( getParam( "sleep" , 1 ) );
+ _statUtil.setAll( hasParam( "all" ) );
+ if ( _many )
+ return runMany();
+ return runNormal();
+ }
+
+ static void printHeaders( const BSONObj& o ) {
+ BSONObjIterator i(o);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ BSONObj x = e.Obj();
+ cout << setw( x["width"].numberInt() ) << e.fieldName() << ' ';
+ }
+ cout << endl;
+ }
+
+ static void printData( const BSONObj& o , const BSONObj& headers ) {
+
+ BSONObjIterator i(headers);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ BSONObj h = e.Obj();
+ int w = h["width"].numberInt();
+
+ BSONElement data;
+ {
+ BSONElement temp = o[e.fieldName()];
+ if ( temp.isABSONObj() )
+ data = temp.Obj()["data"];
+ }
+
+ if ( data.type() == String )
+ cout << setw(w) << data.String();
+ else if ( data.type() == NumberDouble )
+ cout << setw(w) << setprecision(3) << data.number();
+ else if ( data.type() == NumberInt )
+ cout << setw(w) << data.numberInt();
+ else if ( data.eoo() )
+ cout << setw(w) << "";
+ else
+ cout << setw(w) << "???";
+
+ cout << ' ';
+ }
+ cout << endl;
+ }
+
+ int runNormal() {
+ bool showHeaders = ! hasParam( "noheaders" );
+ int rowCount = getParam( "rowcount" , 0 );
+ int rowNum = 0;
+
+ auth();
+
+ BSONObj prev = stats();
+ if ( prev.isEmpty() )
+ return -1;
+
+ while ( rowCount == 0 || rowNum < rowCount ) {
+ sleepsecs((int)ceil(_statUtil.getSeconds()));
+ BSONObj now;
+ try {
+ now = stats();
+ }
+ catch ( std::exception& e ) {
+ cout << "can't get data: " << e.what() << endl;
+ continue;
+ }
+
+ if ( now.isEmpty() )
+ return -2;
+
+ try {
+
+ BSONObj out = _statUtil.doRow( prev , now );
+
+ if ( showHeaders && rowNum % 10 == 0 ) {
+ printHeaders( out );
+ }
+
+ printData( out , out );
+
+ }
+ catch ( AssertionException& e ) {
+ cout << "\nerror: " << e.what() << "\n"
+ << now
+ << endl;
+ }
+
+ prev = now;
+ rowNum++;
+ }
+ return 0;
+ }
+
+ struct ServerState {
+ ServerState() : lock( "Stat::ServerState" ) {}
+ string host;
+ scoped_ptr<boost::thread> thr;
+
+ mongo::mutex lock;
+
+ BSONObj prev;
+ BSONObj now;
+ time_t lastUpdate;
+ vector<BSONObj> shards;
+
+ string error;
+ bool mongos;
+
+ string username;
+ string password;
+ };
+
+ static void serverThread( shared_ptr<ServerState> state ) {
+ try {
+ DBClientConnection conn( true );
+ conn._logLevel = 1;
+ string errmsg;
+ if ( ! conn.connect( state->host , errmsg ) )
+ state->error = errmsg;
+ long long cycleNumber = 0;
+
+ conn.auth("admin", state->username, state->password, errmsg);
+
+ while ( ++cycleNumber ) {
+ try {
+ BSONObj out;
+ if ( conn.simpleCommand( "admin" , &out , "serverStatus" ) ) {
+ scoped_lock lk( state->lock );
+ state->error = "";
+ state->lastUpdate = time(0);
+ state->prev = state->now;
+ state->now = out.getOwned();
+ }
+ else {
+ scoped_lock lk( state->lock );
+ state->error = "serverStatus failed";
+ state->lastUpdate = time(0);
+ }
+
+ if ( out["shardCursorType"].type() == Object ) {
+ state->mongos = true;
+ if ( cycleNumber % 10 == 1 ) {
+ auto_ptr<DBClientCursor> c = conn.query( "config.shards" , BSONObj() );
+ vector<BSONObj> shards;
+ while ( c->more() ) {
+ shards.push_back( c->next().getOwned() );
+ }
+ scoped_lock lk( state->lock );
+ state->shards = shards;
+ }
+ }
+ }
+ catch ( std::exception& e ) {
+ scoped_lock lk( state->lock );
+ state->error = e.what();
+ }
+
+ sleepsecs( 1 );
+ }
+
+
+ }
+ catch ( std::exception& e ) {
+ cout << "serverThread (" << state->host << ") fatal error : " << e.what() << endl;
+ }
+ catch ( ... ) {
+ cout << "serverThread (" << state->host << ") fatal error" << endl;
+ }
+ }
+
+ typedef map<string,shared_ptr<ServerState> > StateMap;
+
+ bool _add( StateMap& threads , string host ) {
+ shared_ptr<ServerState>& state = threads[host];
+ if ( state )
+ return false;
+
+ state.reset( new ServerState() );
+ state->host = host;
+ state->thr.reset( new boost::thread( boost::bind( serverThread , state ) ) );
+ state->username = _username;
+ state->password = _password;
+
+ return true;
+ }
+
+ /**
+ * @param hosts [ "a.foo.com" , "b.foo.com" ]
+ */
+ bool _addAll( StateMap& threads , const BSONObj& hosts ) {
+ BSONObjIterator i( hosts );
+ bool added = false;
+ while ( i.more() ) {
+ bool me = _add( threads , i.next().String() );
+ added = added || me;
+ }
+ return added;
+ }
+
+ bool _discover( StateMap& threads , const string& host , const shared_ptr<ServerState>& ss ) {
+
+ BSONObj info = ss->now;
+
+ bool found = false;
+
+ if ( info["repl"].isABSONObj() ) {
+ BSONObj x = info["repl"].Obj();
+ if ( x["hosts"].isABSONObj() )
+ if ( _addAll( threads , x["hosts"].Obj() ) )
+ found = true;
+ if ( x["passives"].isABSONObj() )
+ if ( _addAll( threads , x["passives"].Obj() ) )
+ found = true;
+ }
+
+ if ( ss->mongos ) {
+ for ( unsigned i=0; i<ss->shards.size(); i++ ) {
+ BSONObj x = ss->shards[i];
+
+ string errmsg;
+ ConnectionString cs = ConnectionString::parse( x["host"].String() , errmsg );
+ if ( errmsg.size() ) {
+ cerr << errmsg << endl;
+ continue;
+ }
+
+ vector<HostAndPort> v = cs.getServers();
+ for ( unsigned i=0; i<v.size(); i++ ) {
+ if ( _add( threads , v[i].toString() ) )
+ found = true;
+ }
+ }
+ }
+
+ return found;
+ }
+
+ int runMany() {
+ StateMap threads;
+
+ {
+ string orig = getParam( "host" );
+ if ( orig == "" )
+ orig = "localhost";
+
+ if ( orig.find( ":" ) == string::npos ) {
+ if ( hasParam( "port" ) )
+ orig += ":" + _params["port"].as<string>();
+ else
+ orig += ":27017";
+ }
+
+ StringSplitter ss( orig.c_str() , "," );
+ while ( ss.more() ) {
+ string host = ss.next();
+ _add( threads , host );
+ }
+ }
+
+ sleepsecs(1);
+
+ int row = 0;
+ bool discover = hasParam( "discover" );
+
+ while ( 1 ) {
+ sleepsecs( (int)ceil(_statUtil.getSeconds()) );
+
+ // collect data
+ vector<Row> rows;
+ for ( map<string,shared_ptr<ServerState> >::iterator i=threads.begin(); i!=threads.end(); ++i ) {
+ scoped_lock lk( i->second->lock );
+
+ if ( i->second->error.size() ) {
+ rows.push_back( Row( i->first , i->second->error ) );
+ }
+ else if ( i->second->prev.isEmpty() || i->second->now.isEmpty() ) {
+ rows.push_back( Row( i->first ) );
+ }
+ else {
+ BSONObj out = _statUtil.doRow( i->second->prev , i->second->now );
+ rows.push_back( Row( i->first , out ) );
+ }
+
+ if ( discover && ! i->second->now.isEmpty() ) {
+ if ( _discover( threads , i->first , i->second ) )
+ break;
+ }
+ }
+
+ // compute some stats
+ unsigned longestHost = 0;
+ BSONObj biggest;
+ for ( unsigned i=0; i<rows.size(); i++ ) {
+ if ( rows[i].host.size() > longestHost )
+ longestHost = rows[i].host.size();
+ if ( rows[i].data.nFields() > biggest.nFields() )
+ biggest = rows[i].data;
+ }
+
+ {
+ // check for any headers not in biggest
+
+ // TODO: we put any new headers at end,
+ // ideally we would interleave
+
+ set<string> seen;
+
+ BSONObjBuilder b;
+
+ {
+ // iterate biggest
+ BSONObjIterator i( biggest );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ seen.insert( e.fieldName() );
+ b.append( e );
+ }
+ }
+
+ // now do the rest
+ for ( unsigned j=0; j<rows.size(); j++ ) {
+ BSONObjIterator i( rows[j].data );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( seen.count( e.fieldName() ) )
+ continue;
+ seen.insert( e.fieldName() );
+ b.append( e );
+ }
+
+ }
+
+ biggest = b.obj();
+
+ }
+
+ // display data
+
+ cout << endl;
+
+ // header
+ if ( row++ % 5 == 0 && ! biggest.isEmpty() ) {
+ cout << setw( longestHost ) << "" << "\t";
+ printHeaders( biggest );
+ }
+
+ // rows
+ for ( unsigned i=0; i<rows.size(); i++ ) {
+ cout << setw( longestHost ) << rows[i].host << "\t";
+ if ( rows[i].err.size() )
+ cout << rows[i].err << endl;
+ else if ( rows[i].data.isEmpty() )
+ cout << "no data" << endl;
+ else
+ printData( rows[i].data , biggest );
+ }
+
+ }
+
+ return 0;
+ }
+
+ StatUtil _statUtil;
+ bool _http;
+ bool _many;
+
+ struct Row {
+ Row( string h , string e ) {
+ host = h;
+ err = e;
+ }
+
+ Row( string h ) {
+ host = h;
+ }
+
+ Row( string h , BSONObj d ) {
+ host = h;
+ data = d;
+ }
+ string host;
+ string err;
+ BSONObj data;
+ };
+ };
+
+}
+
+int main( int argc , char ** argv ) {
+ mongo::Stat stat;
+ return stat.main( argc , argv );
+}
+
diff --git a/src/mongo/tools/stat_util.cpp b/src/mongo/tools/stat_util.cpp
new file mode 100644
index 00000000000..38f780e8734
--- /dev/null
+++ b/src/mongo/tools/stat_util.cpp
@@ -0,0 +1,269 @@
+// stat_util.cpp
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "stat_util.h"
+#include "../util/mongoutils/str.h"
+
+using namespace mongoutils;
+
+namespace mongo {
+
+ StatUtil::StatUtil( double seconds , bool all ) :
+ _seconds( seconds ) ,
+ _all( all )
+
+ {
+
+ }
+
+ bool StatUtil::_in( const BSONElement& me , const BSONElement& arr ) {
+ if ( me.type() != String || arr.type() != Array )
+ return false;
+
+ string s = me.String();
+ BSONForEach(e, arr.Obj()) {
+ if ( e.type() == String && s == e.String() )
+ return true;
+ }
+ return false;
+ }
+
+ BSONObj StatUtil::doRow( const BSONObj& a , const BSONObj& b ) {
+ BSONObjBuilder result;
+
+ bool isMongos = b["shardCursorType"].type() == Object; // TODO: should have a better check
+
+ if ( a["opcounters"].isABSONObj() && b["opcounters"].isABSONObj() ) {
+ BSONObj ax = a["opcounters"].embeddedObject();
+ BSONObj bx = b["opcounters"].embeddedObject();
+
+ BSONObj ar = a["opcountersRepl"].isABSONObj() ? a["opcountersRepl"].embeddedObject() : BSONObj();
+ BSONObj br = b["opcountersRepl"].isABSONObj() ? b["opcountersRepl"].embeddedObject() : BSONObj();
+
+ BSONObjIterator i( bx );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ if ( ar.isEmpty() || br.isEmpty() ) {
+ _append( result , e.fieldName() , 6 , (int)diff( e.fieldName() , ax , bx ) );
+ }
+ else {
+ string f = e.fieldName();
+
+ int m = (int)diff( f , ax , bx );
+ int r = (int)diff( f , ar , br );
+
+ string myout;
+
+ if ( f == "command" ) {
+ myout = str::stream() << m << "|" << r;
+ }
+ else if ( f == "getmore" ) {
+ myout = str::stream() << m;
+ }
+ else if ( m && r ) {
+ // this is weird...
+ myout = str::stream() << m << "|" << r;
+ }
+ else if ( m ) {
+ myout = str::stream() << m;
+ }
+ else if ( r ) {
+ myout = str::stream() << "*" << r;
+ }
+ else {
+ myout = "*0";
+ }
+
+ _append( result , f , 6 , myout );
+ }
+ }
+ }
+
+ if ( b["backgroundFlushing"].type() == Object ) {
+ BSONObj ax = a["backgroundFlushing"].embeddedObject();
+ BSONObj bx = b["backgroundFlushing"].embeddedObject();
+ _append( result , "flushes" , 6 , (int)diff( "flushes" , ax , bx ) );
+ }
+
+ if ( b.getFieldDotted("mem.supported").trueValue() ) {
+ BSONObj bx = b["mem"].embeddedObject();
+ BSONObjIterator i( bx );
+ if (!isMongos)
+ _appendMem( result , "mapped" , 6 , bx["mapped"].numberInt() );
+ _appendMem( result , "vsize" , 6 , bx["virtual"].numberInt() );
+ _appendMem( result , "res" , 6 , bx["resident"].numberInt() );
+
+ if ( !isMongos && _all )
+ _appendMem( result , "non-mapped" , 6 , bx["virtual"].numberInt() - bx["mapped"].numberInt() );
+ }
+
+ if ( b["extra_info"].type() == Object ) {
+ BSONObj ax = a["extra_info"].embeddedObject();
+ BSONObj bx = b["extra_info"].embeddedObject();
+ if ( ax["page_faults"].type() || ax["page_faults"].type() )
+ _append( result , "faults" , 6 , (int)diff( "page_faults" , ax , bx ) );
+ }
+
+ if (!isMongos) {
+ _append( result , "locked %" , 8 , percent( "globalLock.totalTime" , "globalLock.lockTime" , a , b ) );
+ _append( result , "idx miss %" , 8 , percent( "indexCounters.btree.accesses" , "indexCounters.btree.misses" , a , b ) );
+ }
+
+ if ( b.getFieldDotted( "globalLock.currentQueue" ).type() == Object ) {
+ int r = b.getFieldDotted( "globalLock.currentQueue.readers" ).numberInt();
+ int w = b.getFieldDotted( "globalLock.currentQueue.writers" ).numberInt();
+ stringstream temp;
+ temp << r << "|" << w;
+ _append( result , "qr|qw" , 9 , temp.str() );
+ }
+
+ if ( b.getFieldDotted( "globalLock.activeClients" ).type() == Object ) {
+ int r = b.getFieldDotted( "globalLock.activeClients.readers" ).numberInt();
+ int w = b.getFieldDotted( "globalLock.activeClients.writers" ).numberInt();
+ stringstream temp;
+ temp << r << "|" << w;
+ _append( result , "ar|aw" , 7 , temp.str() );
+ }
+
+ if ( a["network"].isABSONObj() && b["network"].isABSONObj() ) {
+ BSONObj ax = a["network"].embeddedObject();
+ BSONObj bx = b["network"].embeddedObject();
+ _appendNet( result , "netIn" , diff( "bytesIn" , ax , bx ) );
+ _appendNet( result , "netOut" , diff( "bytesOut" , ax , bx ) );
+ }
+
+ _append( result , "conn" , 5 , b.getFieldDotted( "connections.current" ).numberInt() );
+
+ if ( b["repl"].type() == Object ) {
+
+ BSONObj x = b["repl"].embeddedObject();
+ bool isReplSet = x["setName"].type() == String;
+
+ stringstream ss;
+
+ if ( isReplSet ) {
+ string setName = x["setName"].String();
+ _append( result , "set" , setName.size() , setName );
+ }
+
+ if ( x["ismaster"].trueValue() )
+ ss << "PRI";
+ else if ( x["secondary"].trueValue() )
+ ss << "SEC";
+ else if ( x["isreplicaset"].trueValue() )
+ ss << "REC";
+ else if ( x["arbiterOnly"].trueValue() )
+ ss << "ARB";
+ else if ( _in( x["me"] , x["passives"] ) )
+ ss << "PSV";
+ else if ( isReplSet )
+ ss << "UNK";
+ else
+ ss << "SLV";
+
+ _append( result , "repl" , 4 , ss.str() );
+
+ }
+ else if ( isMongos ) {
+ _append( result , "repl" , 4 , "RTR" );
+ }
+
+ {
+ struct tm t;
+ time_t_to_Struct( time(0), &t , true );
+ stringstream temp;
+ temp << setfill('0') << setw(2) << t.tm_hour
+ << ":"
+ << setfill('0') << setw(2) << t.tm_min
+ << ":"
+ << setfill('0') << setw(2) << t.tm_sec;
+ _append( result , "time" , 10 , temp.str() );
+ }
+ return result.obj();
+ }
+
+
+
+ double StatUtil::percent( const char * outof , const char * val , const BSONObj& a , const BSONObj& b ) {
+ double x = ( b.getFieldDotted( val ).number() - a.getFieldDotted( val ).number() );
+ double y = ( b.getFieldDotted( outof ).number() - a.getFieldDotted( outof ).number() );
+ if ( y == 0 )
+ return 0;
+ double p = x / y;
+ p = (double)((int)(p * 1000)) / 10;
+ return p;
+ }
+
+
+
+ double StatUtil::diff( const string& name , const BSONObj& a , const BSONObj& b ) {
+ BSONElement x = a.getFieldDotted( name.c_str() );
+ BSONElement y = b.getFieldDotted( name.c_str() );
+ if ( ! x.isNumber() || ! y.isNumber() )
+ return -1;
+ return ( y.number() - x.number() ) / _seconds;
+ }
+
+
+ void StatUtil::_appendNet( BSONObjBuilder& result , const string& name , double diff ) {
+ // I think 1000 is correct for megabit, but I've seen conflicting things (ERH 11/2010)
+ const double div = 1000;
+
+ string unit = "b";
+
+ if ( diff >= div ) {
+ unit = "k";
+ diff /= div;
+ }
+
+ if ( diff >= div ) {
+ unit = "m";
+ diff /= div;
+ }
+
+ if ( diff >= div ) {
+ unit = "g";
+ diff /= div;
+ }
+
+ string out = str::stream() << (int)diff << unit;
+ _append( result , name , 6 , out );
+ }
+
+
+
+ void StatUtil::_appendMem( BSONObjBuilder& result , const string& name , unsigned width , double sz ) {
+ string unit = "m";
+ if ( sz > 1024 ) {
+ unit = "g";
+ sz /= 1024;
+ }
+
+ if ( sz >= 1000 ) {
+ string s = str::stream() << (int)sz << unit;
+ _append( result , name , width , s );
+ return;
+ }
+
+ stringstream ss;
+ ss << setprecision(3) << sz << unit;
+ _append( result , name , width , ss.str() );
+ }
+
+}
+
diff --git a/src/mongo/tools/stat_util.h b/src/mongo/tools/stat_util.h
new file mode 100644
index 00000000000..4990e91624c
--- /dev/null
+++ b/src/mongo/tools/stat_util.h
@@ -0,0 +1,78 @@
+// stat_util.h
+
+/**
+ * Copyright (C) 2008 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "../pch.h"
+#include "../db/jsobj.h"
+
+namespace mongo {
+
+ /**
+ * static methods useful for computing status from serverStatus type things
+ */
+ class StatUtil {
+ public:
+ /**
+ * @param seconds - seconds between calls to serverStatus
+ * @param all - show all fields
+ */
+ StatUtil( double seconds = 1 , bool all = false );
+
+ /**
+ * @param a older serverStatus
+ * @param b newer serverStatus
+ */
+ BSONObj doRow( const BSONObj& a , const BSONObj& b );
+
+ double getSeconds() const { return _seconds; }
+ bool getAll() const { return _all; }
+
+ void setSeconds( double seconds ) { _seconds = seconds; }
+ void setAll( bool all ) { _all = all; }
+
+ private:
+
+
+ double percent( const char * outof , const char * val , const BSONObj& a , const BSONObj& b );
+
+ double diff( const string& name , const BSONObj& a , const BSONObj& b );
+
+ void _appendMem( BSONObjBuilder& result , const string& name , unsigned width , double sz );
+
+ void _appendNet( BSONObjBuilder& result , const string& name , double diff );
+
+ template<typename T>
+ void _append( BSONObjBuilder& result , const string& name , unsigned width , const T& t ) {
+ if ( name.size() > width )
+ width = name.size();
+ result.append( name , BSON( "width" << (int)width << "data" << t ) );
+ }
+
+ bool _in( const BSONElement& me , const BSONElement& arr );
+
+
+ // -------
+
+ double _seconds;
+ bool _all;
+
+ };
+
+}
+
diff --git a/src/mongo/tools/tool.cpp b/src/mongo/tools/tool.cpp
new file mode 100644
index 00000000000..dc08625a545
--- /dev/null
+++ b/src/mongo/tools/tool.cpp
@@ -0,0 +1,526 @@
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+// Tool.cpp
+
+#include "tool.h"
+
+#include <iostream>
+
+#include <boost/filesystem/operations.hpp>
+#include "pcrecpp.h"
+
+#include "util/file_allocator.h"
+#include "util/password.h"
+#include "util/version.h"
+
+using namespace std;
+using namespace mongo;
+
+namespace po = boost::program_options;
+
+namespace mongo {
+
+ CmdLine cmdLine;
+
+ Tool::Tool( string name , DBAccess access , string defaultDB ,
+ string defaultCollection , bool usesstdout ) :
+ _name( name ) , _db( defaultDB ) , _coll( defaultCollection ) ,
+ _usesstdout(usesstdout), _noconnection(false), _autoreconnect(false), _conn(0), _slaveConn(0), _paired(false) {
+
+ _options = new po::options_description( "options" );
+ _options->add_options()
+ ("help","produce help message")
+ ("verbose,v", "be more verbose (include multiple times for more verbosity e.g. -vvvvv)")
+ ("version", "print the program's version and exit" )
+ ;
+
+ if ( access & REMOTE_SERVER )
+ _options->add_options()
+ ("host,h",po::value<string>(), "mongo host to connect to ( <set name>/s1,s2 for sets)" )
+ ("port",po::value<string>(), "server port. Can also use --host hostname:port" )
+ ("ipv6", "enable IPv6 support (disabled by default)")
+#ifdef MONGO_SSL
+ ("ssl", "use all for connections")
+#endif
+
+ ("username,u",po::value<string>(), "username" )
+ ("password,p", new PasswordValue( &_password ), "password" )
+ ;
+
+ if ( access & LOCAL_SERVER )
+ _options->add_options()
+ ("dbpath",po::value<string>(), "directly access mongod database "
+ "files in the given path, instead of connecting to a mongod "
+ "server - needs to lock the data directory, so cannot be "
+ "used if a mongod is currently accessing the same path" )
+ ("directoryperdb", "if dbpath specified, each db is in a separate directory" )
+ ("journal", "enable journaling" )
+ ;
+
+ if ( access & SPECIFY_DBCOL )
+ _options->add_options()
+ ("db,d",po::value<string>(), "database to use" )
+ ("collection,c",po::value<string>(), "collection to use (some commands)" )
+ ;
+
+ _hidden_options = new po::options_description( name + " hidden options" );
+
+ /* support for -vv -vvvv etc. */
+ for (string s = "vv"; s.length() <= 10; s.append("v")) {
+ _hidden_options->add_options()(s.c_str(), "verbose");
+ }
+ }
+
+ Tool::~Tool() {
+ delete( _options );
+ delete( _hidden_options );
+ if ( _conn )
+ delete _conn;
+ }
+
+ void Tool::printHelp(ostream &out) {
+ printExtraHelp(out);
+ _options->print(out);
+ printExtraHelpAfter(out);
+ }
+
+ void Tool::printVersion(ostream &out) {
+ out << _name << " version " << mongo::versionString;
+ if (mongo::versionString[strlen(mongo::versionString)-1] == '-')
+ out << " (commit " << mongo::gitVersion() << ")";
+ out << endl;
+ }
+ int Tool::main( int argc , char ** argv ) {
+ static StaticObserver staticObserver;
+
+ cmdLine.prealloc = false;
+
+ // The default value may vary depending on compile options, but for tools
+ // we want durability to be disabled.
+ cmdLine.dur = false;
+
+#if( BOOST_VERSION >= 104500 )
+ boost::filesystem::path::default_name_check( boost::filesystem2::no_check );
+#else
+ boost::filesystem::path::default_name_check( boost::filesystem::no_check );
+#endif
+
+ _name = argv[0];
+
+ /* using the same style as db.cpp */
+ int command_line_style = (((po::command_line_style::unix_style ^
+ po::command_line_style::allow_guessing) |
+ po::command_line_style::allow_long_disguise) ^
+ po::command_line_style::allow_sticky);
+ try {
+ po::options_description all_options("all options");
+ all_options.add(*_options).add(*_hidden_options);
+
+ po::store( po::command_line_parser( argc , argv ).
+ options(all_options).
+ positional( _positonalOptions ).
+ style(command_line_style).run() , _params );
+
+ po::notify( _params );
+ }
+ catch (po::error &e) {
+ cerr << "ERROR: " << e.what() << endl << endl;
+ printHelp(cerr);
+ return EXIT_BADOPTIONS;
+ }
+
+ // hide password from ps output
+ for (int i=0; i < (argc-1); ++i) {
+ if (!strcmp(argv[i], "-p") || !strcmp(argv[i], "--password")) {
+ char* arg = argv[i+1];
+ while (*arg) {
+ *arg++ = 'x';
+ }
+ }
+ }
+
+ if ( _params.count( "help" ) ) {
+ printHelp(cout);
+ return 0;
+ }
+
+ if ( _params.count( "version" ) ) {
+ printVersion(cout);
+ return 0;
+ }
+
+ if ( _params.count( "verbose" ) ) {
+ logLevel = 1;
+ }
+
+ for (string s = "vv"; s.length() <= 10; s.append("v")) {
+ if (_params.count(s)) {
+ logLevel = s.length();
+ }
+ }
+
+
+#ifdef MONGO_SSL
+ if (_params.count("ssl")) {
+ mongo::cmdLine.sslOnNormalPorts = true;
+ }
+#endif
+
+ preSetup();
+
+ bool useDirectClient = hasParam( "dbpath" );
+
+ if ( ! useDirectClient ) {
+ _host = "127.0.0.1";
+ if ( _params.count( "host" ) )
+ _host = _params["host"].as<string>();
+
+ if ( _params.count( "port" ) )
+ _host += ':' + _params["port"].as<string>();
+
+ if ( _noconnection ) {
+ // do nothing
+ }
+ else {
+ string errmsg;
+
+ ConnectionString cs = ConnectionString::parse( _host , errmsg );
+ if ( ! cs.isValid() ) {
+ cerr << "invalid hostname [" << _host << "] " << errmsg << endl;
+ return -1;
+ }
+
+ _conn = cs.connect( errmsg );
+ if ( ! _conn ) {
+ cerr << "couldn't connect to [" << _host << "] " << errmsg << endl;
+ return -1;
+ }
+
+ (_usesstdout ? cout : cerr ) << "connected to: " << _host << endl;
+ }
+
+ }
+ else {
+ if ( _params.count( "directoryperdb" ) ) {
+ directoryperdb = true;
+ }
+ assert( lastError.get( true ) );
+
+ if (_params.count("journal")){
+ cmdLine.dur = true;
+ }
+
+ Client::initThread("tools");
+ _conn = new DBDirectClient();
+ _host = "DIRECT";
+ static string myDbpath = getParam( "dbpath" );
+ dbpath = myDbpath.c_str();
+ try {
+ acquirePathLock();
+ }
+ catch ( DBException& ) {
+ cerr << endl << "If you are running a mongod on the same "
+ "path you should connect to that instead of direct data "
+ "file access" << endl << endl;
+ dbexit( EXIT_CLEAN );
+ return -1;
+ }
+
+ FileAllocator::get()->start();
+
+ dur::startup();
+ }
+
+ if ( _params.count( "db" ) )
+ _db = _params["db"].as<string>();
+
+ if ( _params.count( "collection" ) )
+ _coll = _params["collection"].as<string>();
+
+ if ( _params.count( "username" ) )
+ _username = _params["username"].as<string>();
+
+ if ( _params.count( "password" )
+ && ( _password.empty() ) ) {
+ _password = askPassword();
+ }
+
+ if (_params.count("ipv6"))
+ enableIPv6();
+
+ int ret = -1;
+ try {
+ ret = run();
+ }
+ catch ( DBException& e ) {
+ cerr << "assertion: " << e.toString() << endl;
+ ret = -1;
+ }
+ catch(const boost::filesystem::filesystem_error &fse) {
+ /*
+ https://jira.mongodb.org/browse/SERVER-2904
+
+ Simple tools that don't access the database, such as
+ bsondump, aren't throwing DBExceptions, but are throwing
+ boost exceptions.
+
+ The currently available set of error codes don't seem to match
+ boost documentation. boost::filesystem::not_found_error
+ (from http://www.boost.org/doc/libs/1_31_0/libs/filesystem/doc/exception.htm)
+ doesn't seem to exist in our headers. Also, fse.code() isn't
+ boost::system::errc::no_such_file_or_directory when this
+ happens, as you would expect. And, determined from
+ experimentation that the command-line argument gets turned into
+ "\\?" instead of "/?" !!!
+ */
+#if defined(_WIN32)
+ if (/*(fse.code() == boost::system::errc::no_such_file_or_directory) &&*/
+ (fse.path1() == "\\?"))
+ printHelp(cerr);
+ else
+#endif // _WIN32
+ cerr << "error: " << fse.what() << endl;
+
+ ret = -1;
+ }
+
+ if ( currentClient.get() )
+ currentClient.get()->shutdown();
+
+ if ( useDirectClient )
+ dbexit( EXIT_CLEAN );
+ return ret;
+ }
+
+ DBClientBase& Tool::conn( bool slaveIfPaired ) {
+ if ( slaveIfPaired && _conn->type() == ConnectionString::SET ) {
+ if (!_slaveConn)
+ _slaveConn = &((DBClientReplicaSet*)_conn)->slaveConn();
+ return *_slaveConn;
+ }
+ return *_conn;
+ }
+
+ bool Tool::isMaster() {
+ if ( hasParam("dbpath") ) {
+ return true;
+ }
+
+ BSONObj info;
+ bool isMaster;
+ bool ok = conn().isMaster(isMaster, &info);
+
+ if (ok && !isMaster) {
+ cerr << "ERROR: trying to write to non-master " << conn().toString() << endl;
+ cerr << "isMaster info: " << info << endl;
+ return false;
+ }
+
+ return true;
+ }
+
+ bool Tool::isMongos() {
+ // TODO: when mongos supports QueryOption_Exaust add a version check (SERVER-2628)
+ BSONObj isdbgrid;
+ conn("true").simpleCommand("admin", &isdbgrid, "isdbgrid");
+ return isdbgrid["isdbgrid"].trueValue();
+ }
+
+ void Tool::addFieldOptions() {
+ add_options()
+ ("fields,f" , po::value<string>() , "comma separated list of field names e.g. -f name,age" )
+ ("fieldFile" , po::value<string>() , "file with fields names - 1 per line" )
+ ;
+ }
+
+ void Tool::needFields() {
+
+ if ( hasParam( "fields" ) ) {
+ BSONObjBuilder b;
+
+ string fields_arg = getParam("fields");
+ pcrecpp::StringPiece input(fields_arg);
+
+ string f;
+ pcrecpp::RE re("([#\\w\\.\\s\\-]+),?" );
+ while ( re.Consume( &input, &f ) ) {
+ _fields.push_back( f );
+ b.append( f , 1 );
+ }
+
+ _fieldsObj = b.obj();
+ return;
+ }
+
+ if ( hasParam( "fieldFile" ) ) {
+ string fn = getParam( "fieldFile" );
+ if ( ! exists( fn ) )
+ throw UserException( 9999 , ((string)"file: " + fn ) + " doesn't exist" );
+
+ const int BUF_SIZE = 1024;
+ char line[ 1024 + 128];
+ ifstream file( fn.c_str() );
+
+ BSONObjBuilder b;
+ while ( file.rdstate() == ios_base::goodbit ) {
+ file.getline( line , BUF_SIZE );
+ const char * cur = line;
+ while ( isspace( cur[0] ) ) cur++;
+ if ( cur[0] == '\0' )
+ continue;
+
+ _fields.push_back( cur );
+ b.append( cur , 1 );
+ }
+ _fieldsObj = b.obj();
+ return;
+ }
+
+ throw UserException( 9998 , "you need to specify fields" );
+ }
+
+ void Tool::auth( string dbname ) {
+ if ( ! dbname.size() )
+ dbname = _db;
+
+ if ( ! ( _username.size() || _password.size() ) ) {
+ // Make sure that we don't need authentication to connect to this db
+ // findOne throws an AssertionException if it's not authenticated.
+ if (_coll.size() > 0) {
+ // BSONTools don't have a collection
+ conn().findOne(getNS(), Query("{}"), 0, QueryOption_SlaveOk);
+ }
+ return;
+ }
+
+ string errmsg;
+ if ( _conn->auth( dbname , _username , _password , errmsg ) )
+ return;
+
+ // try against the admin db
+ string err2;
+ if ( _conn->auth( "admin" , _username , _password , errmsg ) )
+ return;
+
+ throw UserException( 9997 , (string)"authentication failed: " + errmsg );
+ }
+
+ BSONTool::BSONTool( const char * name, DBAccess access , bool objcheck )
+ : Tool( name , access , "" , "" , false ) , _objcheck( objcheck ) {
+
+ add_options()
+ ("objcheck" , "validate object before inserting" )
+ ("filter" , po::value<string>() , "filter to apply before inserting" )
+ ;
+ }
+
+
+ int BSONTool::run() {
+ _objcheck = hasParam( "objcheck" );
+
+ if ( hasParam( "filter" ) )
+ _matcher.reset( new Matcher( fromjson( getParam( "filter" ) ) ) );
+
+ return doRun();
+ }
+
+ long long BSONTool::processFile( const path& root ) {
+ _fileName = root.string();
+
+ unsigned long long fileLength = file_size( root );
+
+ if ( fileLength == 0 ) {
+ out() << "file " << _fileName << " empty, skipping" << endl;
+ return 0;
+ }
+
+
+ FILE* file = fopen( _fileName.c_str() , "rb" );
+ if ( ! file ) {
+ log() << "error opening file: " << _fileName << " " << errnoWithDescription() << endl;
+ return 0;
+ }
+
+#if !defined(__sunos__) && defined(POSIX_FADV_SEQUENTIAL)
+ posix_fadvise(fileno(file), 0, fileLength, POSIX_FADV_SEQUENTIAL);
+#endif
+
+ log(1) << "\t file size: " << fileLength << endl;
+
+ unsigned long long read = 0;
+ unsigned long long num = 0;
+ unsigned long long processed = 0;
+
+ const int BUF_SIZE = BSONObjMaxUserSize + ( 1024 * 1024 );
+ boost::scoped_array<char> buf_holder(new char[BUF_SIZE]);
+ char * buf = buf_holder.get();
+
+ ProgressMeter m( fileLength );
+ m.setUnits( "bytes" );
+
+ while ( read < fileLength ) {
+ size_t amt = fread(buf, 1, 4, file);
+ assert( amt == 4 );
+
+ int size = ((int*)buf)[0];
+ uassert( 10264 , str::stream() << "invalid object size: " << size , size < BUF_SIZE );
+
+ amt = fread(buf+4, 1, size-4, file);
+ assert( amt == (size_t)( size - 4 ) );
+
+ BSONObj o( buf );
+ if ( _objcheck && ! o.valid() ) {
+ cerr << "INVALID OBJECT - going try and pring out " << endl;
+ cerr << "size: " << size << endl;
+ BSONObjIterator i(o);
+ while ( i.more() ) {
+ BSONElement e = i.next();
+ try {
+ e.validate();
+ }
+ catch ( ... ) {
+ cerr << "\t\t NEXT ONE IS INVALID" << endl;
+ }
+ cerr << "\t name : " << e.fieldName() << " " << e.type() << endl;
+ cerr << "\t " << e << endl;
+ }
+ }
+
+ if ( _matcher.get() == 0 || _matcher->matches( o ) ) {
+ gotObject( o );
+ processed++;
+ }
+
+ read += o.objsize();
+ num++;
+
+ m.hit( o.objsize() );
+ }
+
+ fclose( file );
+
+ uassert( 10265 , "counts don't match" , m.done() == fileLength );
+ (_usesstdout ? cout : cerr ) << m.hits() << " objects found" << endl;
+ if ( _matcher.get() )
+ (_usesstdout ? cout : cerr ) << processed << " objects processed" << endl;
+ return processed;
+ }
+
+
+
+ void setupSignals( bool inFork ) {}
+}
diff --git a/src/mongo/tools/tool.h b/src/mongo/tools/tool.h
new file mode 100644
index 00000000000..e40109362c5
--- /dev/null
+++ b/src/mongo/tools/tool.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+// Tool.h
+
+#pragma once
+
+#include <string>
+
+#include <boost/program_options.hpp>
+
+#if defined(_WIN32)
+#include <io.h>
+#endif
+
+#include "client/dbclient.h"
+#include "db/instance.h"
+#include "db/matcher.h"
+
+using std::string;
+
+namespace mongo {
+
+ class Tool {
+ public:
+ enum DBAccess {
+ NONE = 0 ,
+ REMOTE_SERVER = 1 << 1 ,
+ LOCAL_SERVER = 1 << 2 ,
+ SPECIFY_DBCOL = 1 << 3 ,
+ ALL = REMOTE_SERVER | LOCAL_SERVER | SPECIFY_DBCOL
+ };
+
+ Tool( string name , DBAccess access=ALL, string defaultDB="test" ,
+ string defaultCollection="", bool usesstdout=true);
+ virtual ~Tool();
+
+ int main( int argc , char ** argv );
+
+ boost::program_options::options_description_easy_init add_options() {
+ return _options->add_options();
+ }
+ boost::program_options::options_description_easy_init add_hidden_options() {
+ return _hidden_options->add_options();
+ }
+ void addPositionArg( const char * name , int pos ) {
+ _positonalOptions.add( name , pos );
+ }
+
+ string getParam( string name , string def="" ) {
+ if ( _params.count( name ) )
+ return _params[name.c_str()].as<string>();
+ return def;
+ }
+ int getParam( string name , int def ) {
+ if ( _params.count( name ) )
+ return _params[name.c_str()].as<int>();
+ return def;
+ }
+ bool hasParam( string name ) {
+ return _params.count( name );
+ }
+
+ string getNS() {
+ if ( _coll.size() == 0 ) {
+ cerr << "no collection specified!" << endl;
+ throw -1;
+ }
+ return _db + "." + _coll;
+ }
+
+ void useStandardOutput( bool mode ) {
+ _usesstdout = mode;
+ }
+
+ bool isMaster();
+ bool isMongos();
+
+ virtual void preSetup() {}
+
+ virtual int run() = 0;
+
+ virtual void printHelp(ostream &out);
+
+ virtual void printExtraHelp( ostream & out ) {}
+ virtual void printExtraHelpAfter( ostream & out ) {}
+
+ virtual void printVersion(ostream &out);
+
+ protected:
+
+ mongo::DBClientBase &conn( bool slaveIfPaired = false );
+ void auth( string db = "" );
+
+ string _name;
+
+ string _db;
+ string _coll;
+ string _fileName;
+
+ string _username;
+ string _password;
+
+ bool _usesstdout;
+ bool _noconnection;
+ bool _autoreconnect;
+
+ void addFieldOptions();
+ void needFields();
+
+ vector<string> _fields;
+ BSONObj _fieldsObj;
+
+
+ string _host;
+
+ protected:
+
+ mongo::DBClientBase * _conn;
+ mongo::DBClientBase * _slaveConn;
+ bool _paired;
+
+ boost::program_options::options_description * _options;
+ boost::program_options::options_description * _hidden_options;
+ boost::program_options::positional_options_description _positonalOptions;
+
+ boost::program_options::variables_map _params;
+
+ };
+
+ class BSONTool : public Tool {
+ bool _objcheck;
+ auto_ptr<Matcher> _matcher;
+
+ public:
+ BSONTool( const char * name , DBAccess access=ALL, bool objcheck = false );
+
+ virtual int doRun() = 0;
+ virtual void gotObject( const BSONObj& obj ) = 0;
+
+ virtual int run();
+
+ long long processFile( const path& file );
+
+ };
+
+}
diff --git a/src/mongo/tools/top.cpp b/src/mongo/tools/top.cpp
new file mode 100644
index 00000000000..6479bb2bd7a
--- /dev/null
+++ b/src/mongo/tools/top.cpp
@@ -0,0 +1,200 @@
+// stat.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "client/dbclient.h"
+#include "db/json.h"
+#include "../util/text.h"
+#include "tool.h"
+#include <fstream>
+#include <iostream>
+#include <boost/program_options.hpp>
+
+namespace po = boost::program_options;
+
+namespace mongo {
+
+ class TopTool : public Tool {
+ public:
+
+ TopTool() : Tool( "top" , REMOTE_SERVER , "admin" ) {
+ _sleep = 1;
+
+ add_hidden_options()
+ ( "sleep" , po::value<int>() , "time to sleep between calls" )
+ ;
+ addPositionArg( "sleep" , 1 );
+
+ _autoreconnect = true;
+ }
+
+ virtual void printExtraHelp( ostream & out ) {
+ out << "View live MongoDB collection statistics.\n" << endl;
+ }
+
+ BSONObj getData() {
+ BSONObj out;
+ if ( ! conn().simpleCommand( _db , &out , "top" ) ) {
+ cout << "error: " << out << endl;
+ return BSONObj();
+ }
+ return out.getOwned();
+ }
+
+ void printDiff( BSONObj prev , BSONObj now ) {
+ if ( ! prev["totals"].isABSONObj() ||
+ ! now["totals"].isABSONObj() ) {
+ cout << "." << endl;
+ return;
+ }
+
+ prev = prev["totals"].Obj();
+ now = now["totals"].Obj();
+
+ vector<NSInfo> data;
+
+ unsigned longest = 30;
+
+ BSONObjIterator i( now );
+ while ( i.more() ) {
+ BSONElement e = i.next();
+
+ // invalid, data fixed in 1.8.0
+ if ( e.fieldName()[0] == '?' )
+ continue;
+
+ if ( ! str::contains( e.fieldName() , '.' ) )
+ continue;
+
+ BSONElement old = prev[e.fieldName()];
+ if ( old.eoo() )
+ continue;
+
+ if ( strlen( e.fieldName() ) > longest )
+ longest = strlen(e.fieldName());
+
+ data.push_back( NSInfo( e.fieldName() , old.Obj() , e.Obj() ) );
+ }
+
+ std::sort( data.begin() , data.end() );
+
+ cout << "\n"
+ << setw(longest) << "ns"
+ << "\ttotal "
+ << "\tread "
+ << "\twrite "
+ << "\t\t" << terseCurrentTime()
+ << endl;
+ for ( int i=data.size()-1; i>=0 && data.size() - i < 10 ; i-- ) {
+ cout << setw(longest) << data[i].ns
+ << "\t" << setprecision(3) << data[i].diffTimeMS( "total" ) << "ms"
+ << "\t" << setprecision(3) << data[i].diffTimeMS( "readLock" ) << "ms"
+ << "\t" << setprecision(3) << data[i].diffTimeMS( "writeLock" ) << "ms"
+ << endl;
+ }
+ }
+
+ int run() {
+ _sleep = getParam( "sleep" , _sleep );
+
+ BSONObj prev = getData();
+
+ while ( true ) {
+ sleepsecs( _sleep );
+
+ BSONObj now;
+ try {
+ now = getData();
+ }
+ catch ( std::exception& e ) {
+ cout << "can't get data: " << e.what() << endl;
+ continue;
+ }
+
+ if ( now.isEmpty() )
+ return -2;
+
+ try {
+ printDiff( prev , now );
+ }
+ catch ( AssertionException& e ) {
+ cout << "\nerror: " << e.what() << "\n"
+ << now
+ << endl;
+ }
+
+
+ prev = now;
+ }
+
+ return 0;
+ }
+
+ struct NSInfo {
+ NSInfo( string thens , BSONObj a , BSONObj b ) {
+ ns = thens;
+ prev = a;
+ cur = b;
+
+ timeDiff = diffTime( "total" );
+ }
+
+
+ int diffTimeMS( const char * field ) const {
+ return (int)(diffTime( field ) / 1000);
+ }
+
+ double diffTime( const char * field ) const {
+ return diff( field , "time" );
+ }
+
+ double diffCount( const char * field ) const {
+ return diff( field , "count" );
+ }
+
+ /**
+ * @param field total,readLock, etc...
+ * @param type time or count
+ */
+ double diff( const char * field , const char * type ) const {
+ return cur[field].Obj()[type].number() - prev[field].Obj()[type].number();
+ }
+
+ bool operator<(const NSInfo& r) const {
+ return timeDiff < r.timeDiff;
+ }
+
+ string ns;
+
+ BSONObj prev;
+ BSONObj cur;
+
+ double timeDiff; // time diff between prev and cur
+ };
+
+ private:
+ int _sleep;
+ };
+
+}
+
+int main( int argc , char ** argv ) {
+ mongo::TopTool top;
+ return top.main( argc , argv );
+}
+
diff --git a/src/mongo/util/admin_access.h b/src/mongo/util/admin_access.h
new file mode 100644
index 00000000000..bb882b2b4c5
--- /dev/null
+++ b/src/mongo/util/admin_access.h
@@ -0,0 +1,52 @@
+/** @file admin_access.h
+ */
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+namespace mongo {
+
+ /*
+ * An AdminAccess is an interface class used to determine if certain users have
+ * priviledges to a given resource.
+ *
+ */
+ class AdminAccess {
+ public:
+ virtual ~AdminAccess() { }
+
+ /** @return if there are any priviledge users. This should not
+ * block for long and throw if can't get a lock if needed.
+ */
+ virtual bool haveAdminUsers() const = 0;
+
+ /** @return priviledged user with this name. This should not block
+ * for long and throw if can't get a lock if needed
+ */
+ virtual BSONObj getAdminUser( const string& username ) const = 0;
+ };
+
+ class NoAdminAccess : public AdminAccess {
+ public:
+ virtual ~NoAdminAccess() { }
+
+ virtual bool haveAdminUsers() const { return false; }
+ virtual BSONObj getAdminUser( const string& username ) const { return BSONObj(); }
+ };
+
+} // namespace mongo
diff --git a/src/mongo/util/alignedbuilder.cpp b/src/mongo/util/alignedbuilder.cpp
new file mode 100644
index 00000000000..b2e0461b733
--- /dev/null
+++ b/src/mongo/util/alignedbuilder.cpp
@@ -0,0 +1,141 @@
+// @file alignedbuilder.cpp
+
+/**
+* Copyright (C) 2009 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "alignedbuilder.h"
+
+namespace mongo {
+
+ AlignedBuilder::AlignedBuilder(unsigned initSize) {
+ _len = 0;
+ _malloc(initSize);
+ uassert(13584, "out of memory AlignedBuilder", _p._allocationAddress);
+ }
+
+ BOOST_STATIC_ASSERT(sizeof(void*) == sizeof(size_t));
+
+ /** reset for a re-use. shrinks if > 128MB */
+ void AlignedBuilder::reset() {
+ _len = 0;
+ RARELY {
+ const unsigned sizeCap = 128*1024*1024;
+ if (_p._size > sizeCap)
+ _realloc(sizeCap, _len);
+ }
+ }
+
+ /** reset with a hint as to the upcoming needed size specified */
+ void AlignedBuilder::reset(unsigned sz) {
+ _len = 0;
+ unsigned Q = 32 * 1024 * 1024 - 1;
+ unsigned want = (sz+Q) & (~Q);
+ if( _p._size == want ) {
+ return;
+ }
+ if( _p._size > want ) {
+ if( _p._size <= 64 * 1024 * 1024 )
+ return;
+ bool downsize = false;
+ RARELY { downsize = true; }
+ if( !downsize )
+ return;
+ }
+ _realloc(want, _len);
+ }
+
+ void AlignedBuilder::mallocSelfAligned(unsigned sz) {
+ assert( sz == _p._size );
+ void *p = malloc(sz + Alignment - 1);
+ _p._allocationAddress = p;
+ size_t s = (size_t) p;
+ size_t sold = s;
+ s += Alignment - 1;
+ s = (s/Alignment)*Alignment;
+ assert( s >= sold ); // begining
+ assert( (s + sz) <= (sold + sz + Alignment - 1) ); //end
+ _p._data = (char *) s;
+ }
+
+ /* "slow"/infrequent portion of 'grow()' */
+ void NOINLINE_DECL AlignedBuilder::growReallocate(unsigned oldLen) {
+ dassert( _len > _p._size );
+ unsigned a = _p._size;
+ assert( a );
+ while( 1 ) {
+ if( a < 128 * 1024 * 1024 )
+ a *= 2;
+ else if( sizeof(int*) == 4 )
+ a += 32 * 1024 * 1024;
+ else
+ a += 64 * 1024 * 1024;
+ DEV if( a > 256*1024*1024 ) {
+ log() << "dur AlignedBuilder too big, aborting in _DEBUG build" << endl;
+ abort();
+ }
+ wassert( a <= 256*1024*1024 );
+ assert( a <= 512*1024*1024 );
+ if( _len < a )
+ break;
+ }
+ _realloc(a, oldLen);
+ }
+
+ void AlignedBuilder::_malloc(unsigned sz) {
+ _p._size = sz;
+#if defined(_WIN32)
+ void *p = VirtualAlloc(0, sz, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+ _p._allocationAddress = p;
+ _p._data = (char *) p;
+#elif defined(__linux__)
+ // in theory #ifdef _POSIX_VERSION should work, but it doesn't on OS X 10.4, and needs to be testeed on solaris.
+ // so for now, linux only for this.
+ void *p = 0;
+ int res = posix_memalign(&p, Alignment, sz);
+ massert(13524, "out of memory AlignedBuilder", res == 0);
+ _p._allocationAddress = p;
+ _p._data = (char *) p;
+#else
+ mallocSelfAligned(sz);
+ assert( ((size_t) _p._data) % Alignment == 0 );
+#endif
+ }
+
+ void AlignedBuilder::_realloc(unsigned newSize, unsigned oldLen) {
+ // posix_memalign alignment is not maintained on reallocs, so we can't use realloc().
+ AllocationInfo old = _p;
+ _malloc(newSize);
+ assert( oldLen <= _len );
+ memcpy(_p._data, old._data, oldLen);
+ _free(old._allocationAddress);
+ }
+
+ void AlignedBuilder::_free(void *p) {
+#if defined(_WIN32)
+ VirtualFree(p, 0, MEM_RELEASE);
+#else
+ free(p);
+#endif
+ }
+
+ void AlignedBuilder::kill() {
+ _free(_p._allocationAddress);
+ _p._allocationAddress = 0;
+ _p._data = 0;
+ }
+
+}
diff --git a/src/mongo/util/alignedbuilder.h b/src/mongo/util/alignedbuilder.h
new file mode 100644
index 00000000000..1d246a9d78e
--- /dev/null
+++ b/src/mongo/util/alignedbuilder.h
@@ -0,0 +1,125 @@
+// @file alignedbuilder.h
+
+/**
+* Copyright (C) 2009 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../bson/stringdata.h"
+
+namespace mongo {
+
+ /** a page-aligned BufBuilder. */
+ class AlignedBuilder {
+ public:
+ AlignedBuilder(unsigned init_size);
+ ~AlignedBuilder() { kill(); }
+
+ /** reset with a hint as to the upcoming needed size specified */
+ void reset(unsigned sz);
+
+ /** reset for a re-use. shrinks if > 128MB */
+ void reset();
+
+ /** note this may be deallocated (realloced) if you keep writing or reset(). */
+ const char* buf() const { return _p._data; }
+
+ /** leave room for some stuff later
+ @return offset in the buffer that was our current position
+ */
+ size_t skip(unsigned n) {
+ unsigned l = len();
+ grow(n);
+ return l;
+ }
+
+ /** if buffer grows pointer no longer valid */
+ char* atOfs(unsigned ofs) { return _p._data + ofs; }
+
+ /** if buffer grows pointer no longer valid */
+ char* cur() { return _p._data + _len; }
+
+ void appendChar(char j) {
+ *((char*)grow(sizeof(char))) = j;
+ }
+ void appendNum(char j) {
+ *((char*)grow(sizeof(char))) = j;
+ }
+ void appendNum(short j) {
+ *((short*)grow(sizeof(short))) = j;
+ }
+ void appendNum(int j) {
+ *((int*)grow(sizeof(int))) = j;
+ }
+ void appendNum(unsigned j) {
+ *((unsigned*)grow(sizeof(unsigned))) = j;
+ }
+ void appendNum(bool j) {
+ *((bool*)grow(sizeof(bool))) = j;
+ }
+ void appendNum(double j) {
+ *((double*)grow(sizeof(double))) = j;
+ }
+ void appendNum(long long j) {
+ *((long long*)grow(sizeof(long long))) = j;
+ }
+ void appendNum(unsigned long long j) {
+ *((unsigned long long*)grow(sizeof(unsigned long long))) = j;
+ }
+
+ void appendBuf(const void *src, size_t len) { memcpy(grow((unsigned) len), src, len); }
+
+ template<class T>
+ void appendStruct(const T& s) { appendBuf(&s, sizeof(T)); }
+
+ void appendStr(const StringData &str , bool includeEOO = true ) {
+ const unsigned len = str.size() + ( includeEOO ? 1 : 0 );
+ assert( len < (unsigned) BSONObjMaxUserSize );
+ memcpy(grow(len), str.data(), len);
+ }
+
+ /** @return the in-use length */
+ unsigned len() const { return _len; }
+
+ private:
+ static const unsigned Alignment = 8192;
+
+ /** returns the pre-grow write position */
+ inline char* grow(unsigned by) {
+ unsigned oldlen = _len;
+ _len += by;
+ if (MONGO_unlikely( _len > _p._size )) {
+ growReallocate(oldlen);
+ }
+ return _p._data + oldlen;
+ }
+
+ void growReallocate(unsigned oldLenInUse);
+ void kill();
+ void mallocSelfAligned(unsigned sz);
+ void _malloc(unsigned sz);
+ void _realloc(unsigned newSize, unsigned oldLenInUse);
+ void _free(void*);
+
+ struct AllocationInfo {
+ char *_data;
+ void *_allocationAddress;
+ unsigned _size;
+ } _p;
+ unsigned _len; // bytes in use
+ };
+
+}
diff --git a/src/mongo/util/allocator.h b/src/mongo/util/allocator.h
new file mode 100644
index 00000000000..a642e7cab56
--- /dev/null
+++ b/src/mongo/util/allocator.h
@@ -0,0 +1,39 @@
+// allocator.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongo {
+
+ inline void * ourmalloc(size_t size) {
+ void *x = malloc(size);
+ if ( x == 0 ) dbexit( EXIT_OOM_MALLOC , "malloc fails");
+ return x;
+ }
+
+ inline void * ourrealloc(void *ptr, size_t size) {
+ void *x = realloc(ptr, size);
+ if ( x == 0 ) dbexit( EXIT_OOM_REALLOC , "realloc fails");
+ return x;
+ }
+
+#define MONGO_malloc mongo::ourmalloc
+#define malloc MONGO_malloc
+#define MONGO_realloc mongo::ourrealloc
+#define realloc MONGO_realloc
+
+} // namespace mongo
diff --git a/src/mongo/util/array.h b/src/mongo/util/array.h
new file mode 100644
index 00000000000..12822252fd7
--- /dev/null
+++ b/src/mongo/util/array.h
@@ -0,0 +1,127 @@
+// array.h
+
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace mongo {
+
+ /*
+ * simple array class that does no allocations
+ * same api as vector
+ * fixed buffer, so once capacity is exceeded, will assert
+ * meant to be-reused with clear()
+ */
+ template<typename T>
+ class FastArray {
+ public:
+ FastArray( int capacity=10000 )
+ : _capacity( capacity ) , _size(0) , _end(this,capacity) {
+ _data = new T[capacity];
+ }
+
+ ~FastArray() {
+ delete[] _data;
+ }
+
+ void clear() {
+ _size = 0;
+ }
+
+ T& operator[]( int x ) {
+ assert( x >= 0 && x < _capacity );
+ return _data[x];
+ }
+
+ T& getNext() {
+ return _data[_size++];
+ }
+
+ void push_back( const T& t ) {
+ assert( _size < _capacity );
+ _data[_size++] = t;
+ }
+
+ void sort( int (*comp)(const void *, const void *) ) {
+ qsort( _data , _size , sizeof(T) , comp );
+ }
+
+ int size() {
+ return _size;
+ }
+
+ bool hasSpace() {
+ return _size < _capacity;
+ }
+ class iterator {
+ public:
+ iterator() {
+ _it = 0;
+ _pos = 0;
+ }
+
+ iterator( FastArray * it , int pos=0 ) {
+ _it = it;
+ _pos = pos;
+ }
+
+ bool operator==(const iterator& other ) const {
+ return _pos == other._pos;
+ }
+
+ bool operator!=(const iterator& other ) const {
+ return _pos != other._pos;
+ }
+
+ void operator++() {
+ _pos++;
+ }
+
+ T& operator*() {
+ return _it->_data[_pos];
+ }
+
+ string toString() const {
+ stringstream ss;
+ ss << _pos;
+ return ss.str();
+ }
+ private:
+ FastArray * _it;
+ int _pos;
+
+ friend class FastArray;
+ };
+
+
+ iterator begin() {
+ return iterator(this);
+ }
+
+ iterator end() {
+ _end._pos = _size;
+ return _end;
+ }
+
+
+ private:
+ int _capacity;
+ int _size;
+
+ iterator _end;
+
+ T * _data;
+ };
+}
diff --git a/src/mongo/util/assert_util.cpp b/src/mongo/util/assert_util.cpp
new file mode 100644
index 00000000000..2199cb1ce11
--- /dev/null
+++ b/src/mongo/util/assert_util.cpp
@@ -0,0 +1,213 @@
+// assert_util.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "assert_util.h"
+#include "assert.h"
+//#include "file.h"
+#include <cmath>
+using namespace std;
+
+#ifndef _WIN32
+#include <cxxabi.h>
+#include <sys/file.h>
+#endif
+
+//#include "../bson/bson.h"
+#include "../db/jsobj.h"
+
+namespace mongo {
+
+ AssertionCount assertionCount;
+
+ AssertionCount::AssertionCount()
+ : regular(0),warning(0),msg(0),user(0),rollovers(0) {
+ }
+
+ void AssertionCount::rollover() {
+ rollovers++;
+ regular = 0;
+ warning = 0;
+ msg = 0;
+ user = 0;
+ }
+
+ void AssertionCount::condrollover( int newvalue ) {
+ static int max = (int)pow( 2.0 , 30 );
+ if ( newvalue >= max )
+ rollover();
+ }
+
+ bool DBException::traceExceptions = false;
+
+ void ExceptionInfo::append( BSONObjBuilder& b , const char * m , const char * c ) const {
+ if ( msg.empty() )
+ b.append( m , "unknown assertion" );
+ else
+ b.append( m , msg );
+
+ if ( code )
+ b.append( c , code );
+ }
+
+ string getDbContext();
+
+ /* "warning" assert -- safe to continue, so we don't throw exception. */
+ NOINLINE_DECL void wasserted(const char *msg, const char *file, unsigned line) {
+ static bool rateLimited;
+ static time_t lastWhen;
+ static unsigned lastLine;
+ if( lastLine == line && time(0)-lastWhen < 5 ) {
+ if( rateLimited++ == 0 ) {
+ log() << "rate limiting wassert" << endl;
+ }
+ return;
+ }
+ lastWhen = time(0);
+ lastLine = line;
+
+ problem() << "warning assertion failure " << msg << ' ' << file << ' ' << dec << line << endl;
+ sayDbContext();
+ raiseError(0,msg && *msg ? msg : "wassertion failure");
+ assertionCount.condrollover( ++assertionCount.warning );
+#if defined(_DEBUG) || defined(_DURABLEDEFAULTON) || defined(_DURABLEDEFAULTOFF)
+ // this is so we notice in buildbot
+ log() << "\n\n***aborting after wassert() failure in a debug/test build\n\n" << endl;
+ abort();
+#endif
+ }
+
+ NOINLINE_DECL void asserted(const char *msg, const char *file, unsigned line) {
+ assertionCount.condrollover( ++assertionCount.regular );
+ problem() << "Assertion failure " << msg << ' ' << file << ' ' << dec << line << endl;
+ sayDbContext();
+ raiseError(0,msg && *msg ? msg : "assertion failure");
+ stringstream temp;
+ temp << "assertion " << file << ":" << line;
+ AssertionException e(temp.str(),0);
+ breakpoint();
+#if defined(_DEBUG) || defined(_DURABLEDEFAULTON) || defined(_DURABLEDEFAULTOFF)
+ // this is so we notice in buildbot
+ log() << "\n\n***aborting after assert() failure as this is a debug/test build\n\n" << endl;
+ abort();
+#endif
+ throw e;
+ }
+
+ NOINLINE_DECL void verifyFailed( int msgid ) {
+ assertionCount.condrollover( ++assertionCount.regular );
+ problem() << "Assertion failure " << msgid << endl;
+ sayDbContext();
+ raiseError(0,"assertion failure");
+ stringstream temp;
+ temp << msgid;
+ AssertionException e(temp.str(),0);
+ breakpoint();
+#if defined(_DEBUG) || defined(_DURABLEDEFAULTON) || defined(_DURABLEDEFAULTOFF)
+ // this is so we notice in buildbot
+ log() << "\n\n***aborting after verify() failure in a debug/test build\n\n" << endl;
+ abort();
+#endif
+ throw e;
+ }
+
+ void uassert_nothrow(const char *msg) {
+ raiseError(0,msg);
+ }
+
+ void uasserted(int msgid , const string &msg) {
+ uasserted(msgid, msg.c_str());
+ }
+
+ NOINLINE_DECL void uasserted(int msgid, const char *msg) {
+ assertionCount.condrollover( ++assertionCount.user );
+ LOG(1) << "User Assertion: " << msgid << ":" << msg << endl;
+ raiseError(msgid,msg);
+ throw UserException(msgid, msg);
+ }
+
+ void msgasserted(int msgid, const string &msg) {
+ msgasserted(msgid, msg.c_str());
+ }
+
+ NOINLINE_DECL void msgasserted(int msgid, const char *msg) {
+ assertionCount.condrollover( ++assertionCount.warning );
+ tlog() << "Assertion: " << msgid << ":" << msg << endl;
+ raiseError(msgid,msg && *msg ? msg : "massert failure");
+ breakpoint();
+ printStackTrace();
+ throw MsgAssertionException(msgid, msg);
+ }
+
+ NOINLINE_DECL void msgassertedNoTrace(int msgid, const char *msg) {
+ assertionCount.condrollover( ++assertionCount.warning );
+ log() << "Assertion: " << msgid << ":" << msg << endl;
+ raiseError(msgid,msg && *msg ? msg : "massert failure");
+ throw MsgAssertionException(msgid, msg);
+ }
+
+ NOINLINE_DECL void streamNotGood( int code , string msg , std::ios& myios ) {
+ stringstream ss;
+ // errno might not work on all systems for streams
+ // if it doesn't for a system should deal with here
+ ss << msg << " stream invalid: " << errnoWithDescription();
+ throw UserException( code , ss.str() );
+ }
+
+ string errnoWithPrefix( const char * prefix ) {
+ stringstream ss;
+ if ( prefix )
+ ss << prefix << ": ";
+ ss << errnoWithDescription();
+ return ss.str();
+ }
+
+ string demangleName( const type_info& typeinfo ) {
+#ifdef _WIN32
+ return typeinfo.name();
+#else
+ int status;
+
+ char * niceName = abi::__cxa_demangle(typeinfo.name(), 0, 0, &status);
+ if ( ! niceName )
+ return typeinfo.name();
+
+ string s = niceName;
+ free(niceName);
+ return s;
+#endif
+ }
+
+ NOINLINE_DECL ErrorMsg::ErrorMsg(const char *msg, char ch) {
+ int l = strlen(msg);
+ assert( l < 128);
+ memcpy(buf, msg, l);
+ char *p = buf + l;
+ p[0] = ch;
+ p[1] = 0;
+ }
+
+ NOINLINE_DECL ErrorMsg::ErrorMsg(const char *msg, unsigned val) {
+ int l = strlen(msg);
+ assert( l < 128);
+ memcpy(buf, msg, l);
+ char *p = buf + l;
+ sprintf(p, "%u", val);
+ }
+
+}
+
diff --git a/src/mongo/util/assert_util.h b/src/mongo/util/assert_util.h
new file mode 100644
index 00000000000..2e6b2a9732a
--- /dev/null
+++ b/src/mongo/util/assert_util.h
@@ -0,0 +1,275 @@
+// assert_util.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#pragma once
+
+#include "../db/lasterror.h"
+
+// MONGO_NORETURN undefed at end of file
+#ifdef __GNUC__
+# define MONGO_NORETURN __attribute__((__noreturn__))
+#else
+# define MONGO_NORETURN
+#endif
+
+namespace mongo {
+
+ enum CommonErrorCodes {
+ DatabaseDifferCaseCode = 13297 ,
+ SendStaleConfigCode = 13388 ,
+ RecvStaleConfigCode = 9996
+ };
+
+ class AssertionCount {
+ public:
+ AssertionCount();
+ void rollover();
+ void condrollover( int newValue );
+
+ int regular;
+ int warning;
+ int msg;
+ int user;
+ int rollovers;
+ };
+
+ extern AssertionCount assertionCount;
+
+ struct ExceptionInfo {
+ ExceptionInfo() : msg(""),code(-1) {}
+ ExceptionInfo( const char * m , int c )
+ : msg( m ) , code( c ) {
+ }
+ ExceptionInfo( const string& m , int c )
+ : msg( m ) , code( c ) {
+ }
+ void append( BSONObjBuilder& b , const char * m = "$err" , const char * c = "code" ) const ;
+ string toString() const { stringstream ss; ss << "exception: " << code << " " << msg; return ss.str(); }
+ bool empty() const { return msg.empty(); }
+
+ void reset(){ msg = ""; code=-1; }
+
+ string msg;
+ int code;
+ };
+
+ /** helper class that builds error strings. lighter weight than a StringBuilder, albeit less flexible.
+ NOINLINE_DECL used in the constructor implementations as we are assuming this is a cold code path when used.
+
+ example:
+ throw UserException(123, ErrorMsg("blah", num_val));
+ */
+ class ErrorMsg {
+ public:
+ ErrorMsg(const char *msg, char ch);
+ ErrorMsg(const char *msg, unsigned val);
+ operator string() const { return buf; }
+ private:
+ char buf[256];
+ };
+
+ class DBException;
+ string causedBy( const DBException& e );
+ string causedBy( const string& e );
+
+ class DBException : public std::exception {
+ public:
+ DBException( const ExceptionInfo& ei ) : _ei(ei) { traceIfNeeded(*this); }
+ DBException( const char * msg , int code ) : _ei(msg,code) { traceIfNeeded(*this); }
+ DBException( const string& msg , int code ) : _ei(msg,code) { traceIfNeeded(*this); }
+ virtual ~DBException() throw() { }
+
+ virtual const char* what() const throw() { return _ei.msg.c_str(); }
+ virtual int getCode() const { return _ei.code; }
+
+ virtual void appendPrefix( stringstream& ss ) const { }
+ virtual void addContext( const string& str ) {
+ _ei.msg = str + causedBy( _ei.msg );
+ }
+
+ virtual string toString() const {
+ stringstream ss; ss << getCode() << " " << what(); return ss.str();
+ return ss.str();
+ }
+
+ const ExceptionInfo& getInfo() const { return _ei; }
+
+ static void traceIfNeeded( const DBException& e ) {
+ if( traceExceptions && ! inShutdown() ){
+ warning() << "DBException thrown" << causedBy( e ) << endl;
+ printStackTrace();
+ }
+ }
+
+ static bool traceExceptions;
+
+ protected:
+ ExceptionInfo _ei;
+ };
+
+ class AssertionException : public DBException {
+ public:
+
+ AssertionException( const ExceptionInfo& ei ) : DBException(ei) {}
+ AssertionException( const char * msg , int code ) : DBException(msg,code) {}
+ AssertionException( const string& msg , int code ) : DBException(msg,code) {}
+
+ virtual ~AssertionException() throw() { }
+
+ virtual bool severe() { return true; }
+ virtual bool isUserAssertion() { return false; }
+
+ /* true if an interrupted exception - see KillCurrentOp */
+ bool interrupted() {
+ return _ei.code == 11600 || _ei.code == 11601;
+ }
+ };
+
+ /* UserExceptions are valid errors that a user can cause, like out of disk space or duplicate key */
+ class UserException : public AssertionException {
+ public:
+ UserException(int c , const string& m) : AssertionException( m , c ) {}
+
+ virtual bool severe() { return false; }
+ virtual bool isUserAssertion() { return true; }
+ virtual void appendPrefix( stringstream& ss ) const { ss << "userassert:"; }
+ };
+
+ class MsgAssertionException : public AssertionException {
+ public:
+ MsgAssertionException( const ExceptionInfo& ei ) : AssertionException( ei ) {}
+ MsgAssertionException(int c, const string& m) : AssertionException( m , c ) {}
+ virtual bool severe() { return false; }
+ virtual void appendPrefix( stringstream& ss ) const { ss << "massert:"; }
+ };
+
+ void asserted(const char *msg, const char *file, unsigned line) MONGO_NORETURN;
+ void wasserted(const char *msg, const char *file, unsigned line);
+ void verifyFailed( int msgid );
+
+ /** a "user assertion". throws UserAssertion. logs. typically used for errors that a user
+ could cause, such as duplicate key, disk full, etc.
+ */
+ void uasserted(int msgid, const char *msg) MONGO_NORETURN;
+ void uasserted(int msgid , const string &msg);
+
+ /** reported via lasterror, but don't throw exception */
+ void uassert_nothrow(const char *msg);
+
+ /** msgassert and massert are for errors that are internal but have a well defined error text string.
+ a stack trace is logged.
+ */
+ void msgassertedNoTrace(int msgid, const char *msg) MONGO_NORETURN;
+ inline void msgassertedNoTrace(int msgid, const string& msg) { msgassertedNoTrace( msgid , msg.c_str() ); }
+ void msgasserted(int msgid, const char *msg) MONGO_NORETURN;
+ void msgasserted(int msgid, const string &msg);
+
+ /* convert various types of exceptions to strings */
+ inline string causedBy( const char* e ){ return (string)" :: caused by :: " + e; }
+ inline string causedBy( const DBException& e ){ return causedBy( e.toString().c_str() ); }
+ inline string causedBy( const std::exception& e ){ return causedBy( e.what() ); }
+ inline string causedBy( const string& e ){ return causedBy( e.c_str() ); }
+
+ /** in the mongodb source, use verify() instead of assert(). verify is always evaluated even in release builds. */
+ inline void verify( int msgid , bool testOK ) { if ( ! testOK ) verifyFailed( msgid ); }
+
+#ifdef assert
+#undef assert
+#endif
+
+#define MONGO_assert(_Expression) (void)( MONGO_likely(!!(_Expression)) || (mongo::asserted(#_Expression, __FILE__, __LINE__), 0) )
+#define assert MONGO_assert
+
+ /* "user assert". if asserts, user did something wrong, not our code */
+#define MONGO_uassert(msgid, msg, expr) (void)( MONGO_likely(!!(expr)) || (mongo::uasserted(msgid, msg), 0) )
+#define uassert MONGO_uassert
+
+ /* warning only - keeps going */
+#define MONGO_wassert(_Expression) (void)( MONGO_likely(!!(_Expression)) || (mongo::wasserted(#_Expression, __FILE__, __LINE__), 0) )
+#define wassert MONGO_wassert
+
+ /* display a message, no context, and throw assertionexception
+
+ easy way to throw an exception and log something without our stack trace
+ display happening.
+ */
+#define MONGO_massert(msgid, msg, expr) (void)( MONGO_likely(!!(expr)) || (mongo::msgasserted(msgid, msg), 0) )
+#define massert MONGO_massert
+
+ /* dassert is 'debug assert' -- might want to turn off for production as these
+ could be slow.
+ */
+#if defined(_DEBUG)
+# define MONGO_dassert assert
+#else
+# define MONGO_dassert(x)
+#endif
+#define dassert MONGO_dassert
+
+ // some special ids that we want to duplicate
+
+ // > 10000 asserts
+ // < 10000 UserException
+
+ enum { ASSERT_ID_DUPKEY = 11000 };
+
+ /* throws a uassertion with an appropriate msg */
+ void streamNotGood( int code , string msg , std::ios& myios ) MONGO_NORETURN;
+
+ inline void assertStreamGood(unsigned msgid, string msg, std::ios& myios) {
+ if( !myios.good() ) streamNotGood(msgid, msg, myios);
+ }
+
+ string demangleName( const type_info& typeinfo );
+
+} // namespace mongo
+
+#define BOOST_CHECK_EXCEPTION MONGO_BOOST_CHECK_EXCEPTION
+#define MONGO_BOOST_CHECK_EXCEPTION( expression ) \
+ try { \
+ expression; \
+ } catch ( const std::exception &e ) { \
+ stringstream ss; \
+ ss << "caught boost exception: " << e.what() << ' ' << __FILE__ << ' ' << __LINE__; \
+ msgasserted( 13294 , ss.str() ); \
+ } catch ( ... ) { \
+ massert( 10437 , "unknown boost failed" , false ); \
+ }
+
+#define MONGO_BOOST_CHECK_EXCEPTION_WITH_MSG( expression, msg ) \
+ try { \
+ expression; \
+ } catch ( const std::exception &e ) { \
+ stringstream ss; \
+ ss << msg << " caught boost exception: " << e.what(); \
+ msgasserted( 14043 , ss.str() ); \
+ } catch ( ... ) { \
+ msgasserted( 14044 , string("unknown boost failed ") + msg ); \
+ }
+
+#define DESTRUCTOR_GUARD MONGO_DESTRUCTOR_GUARD
+#define MONGO_DESTRUCTOR_GUARD( expression ) \
+ try { \
+ expression; \
+ } catch ( const std::exception &e ) { \
+ problem() << "caught exception (" << e.what() << ") in destructor (" << __FUNCTION__ << ")" << endl; \
+ } catch ( ... ) { \
+ problem() << "caught unknown exception in destructor (" << __FUNCTION__ << ")" << endl; \
+ }
+
+#undef MONGO_NORETURN
diff --git a/src/mongo/util/background.cpp b/src/mongo/util/background.cpp
new file mode 100644
index 00000000000..ef3ee9426b9
--- /dev/null
+++ b/src/mongo/util/background.cpp
@@ -0,0 +1,190 @@
+// @file background.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+
+#include "concurrency/mutex.h"
+#include "concurrency/spin_lock.h"
+
+#include "background.h"
+#include "time_support.h"
+#include "timer.h"
+
+#include "mongoutils/str.h"
+
+namespace mongo {
+
+ // both the BackgroundJob and the internal thread point to JobStatus
+ struct BackgroundJob::JobStatus {
+ JobStatus( bool delFlag )
+ : deleteSelf(delFlag), m("backgroundJob"), state(NotStarted) { }
+
+ const bool deleteSelf;
+
+ mongo::mutex m; // protects state below
+ boost::condition finished; // means _state == Done
+ State state;
+ };
+
+ BackgroundJob::BackgroundJob( bool selfDelete ) {
+ _status.reset( new JobStatus( selfDelete ) );
+ }
+
+ // Background object can be only be destroyed after jobBody() ran
+ void BackgroundJob::jobBody( boost::shared_ptr<JobStatus> status ) {
+ LOG(1) << "BackgroundJob starting: " << name() << endl;
+ {
+ scoped_lock l( status->m );
+ massert( 13643 , mongoutils::str::stream() << "backgroundjob already started: " << name() , status->state == NotStarted );
+ status->state = Running;
+ }
+
+ const string threadName = name();
+ if( ! threadName.empty() )
+ setThreadName( threadName.c_str() );
+
+ try {
+ run();
+ }
+ catch ( std::exception& e ) {
+ log( LL_ERROR ) << "backgroundjob " << name() << "error: " << e.what() << endl;
+ }
+ catch(...) {
+ log( LL_ERROR ) << "uncaught exception in BackgroundJob " << name() << endl;
+ }
+
+ {
+ scoped_lock l( status->m );
+ status->state = Done;
+ status->finished.notify_all();
+ }
+
+ if( status->deleteSelf )
+ delete this;
+ }
+
+ BackgroundJob& BackgroundJob::go() {
+ boost::thread t( boost::bind( &BackgroundJob::jobBody , this, _status ) );
+ return *this;
+ }
+
+ bool BackgroundJob::wait( unsigned msTimeOut ) {
+ assert( !_status->deleteSelf ); // you cannot call wait on a self-deleting job
+ scoped_lock l( _status->m );
+ while ( _status->state != Done ) {
+ if ( msTimeOut ) {
+ // add msTimeOut millisecond to current time
+ boost::xtime xt;
+ boost::xtime_get( &xt, boost::TIME_UTC );
+
+ unsigned long long ns = msTimeOut * 1000000ULL; // milli to nano
+ if ( xt.nsec + ns < 1000000000 ) {
+ xt.nsec = (boost::xtime::xtime_nsec_t) (xt.nsec + ns);
+ }
+ else {
+ xt.sec += 1 + ns / 1000000000;
+ xt.nsec = ( ns + xt.nsec ) % 1000000000;
+ }
+
+ if ( ! _status->finished.timed_wait( l.boost() , xt ) )
+ return false;
+
+ }
+ else {
+ _status->finished.wait( l.boost() );
+ }
+ }
+ return true;
+ }
+
+ BackgroundJob::State BackgroundJob::getState() const {
+ scoped_lock l( _status->m);
+ return _status->state;
+ }
+
+ bool BackgroundJob::running() const {
+ scoped_lock l( _status->m);
+ return _status->state == Running;
+ }
+
+ // -------------------------
+
+ PeriodicTask::PeriodicTask() {
+ if ( ! theRunner )
+ theRunner = new Runner();
+ theRunner->add( this );
+ }
+
+ PeriodicTask::~PeriodicTask() {
+ theRunner->remove( this );
+ }
+
+ void PeriodicTask::Runner::add( PeriodicTask* task ) {
+ scoped_spinlock lk( _lock );
+ _tasks.push_back( task );
+ }
+
+ void PeriodicTask::Runner::remove( PeriodicTask* task ) {
+ scoped_spinlock lk( _lock );
+ for ( size_t i=0; i<_tasks.size(); i++ ) {
+ if ( _tasks[i] == task ) {
+ _tasks[i] = 0;
+ break;
+ }
+ }
+ }
+
+ void PeriodicTask::Runner::run() {
+ int sleeptime = 60;
+ DEV sleeptime = 5; // to catch race conditions
+
+ while ( ! inShutdown() ) {
+
+ sleepsecs( sleeptime );
+
+ scoped_spinlock lk( _lock );
+
+ size_t size = _tasks.size();
+
+ for ( size_t i=0; i<size; i++ ) {
+ PeriodicTask * t = _tasks[i];
+ if ( ! t )
+ continue;
+
+ if ( inShutdown() )
+ break;
+
+ Timer timer;
+ try {
+ t->taskDoWork();
+ }
+ catch ( std::exception& e ) {
+ error() << "task: " << t->taskName() << " failed: " << e.what() << endl;
+ }
+ catch ( ... ) {
+ error() << "task: " << t->taskName() << " failed with unknown error" << endl;
+ }
+
+ int ms = timer.millis();
+ LOG( ms <= 3 ? 1 : 0 ) << "task: " << t->taskName() << " took: " << ms << "ms" << endl;
+ }
+ }
+ }
+
+ PeriodicTask::Runner* PeriodicTask::theRunner = 0;
+
+} // namespace mongo
diff --git a/src/mongo/util/background.h b/src/mongo/util/background.h
new file mode 100644
index 00000000000..496a1f44f88
--- /dev/null
+++ b/src/mongo/util/background.h
@@ -0,0 +1,155 @@
+// @file background.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "concurrency/spin_lock.h"
+
+namespace mongo {
+
+ /**
+ * Background thread dispatching.
+ * subclass and define run()
+ *
+ * It is ok to call go(), that is, run the job, more than once -- if the
+ * previous invocation has finished. Thus one pattern of use is to embed
+ * a backgroundjob in your object and reuse it (or same thing with
+ * inheritance). Each go() call spawns a new thread.
+ *
+ * Thread safety:
+ * note when job destructs, the thread is not terminated if still running.
+ * generally if the thread could still be running, allocate the job dynamically
+ * and set deleteSelf to true.
+ *
+ * go() and wait() are not thread safe
+ * run() will be executed on the background thread
+ * BackgroundJob object must exist for as long the background thread is running
+ */
+
+ class BackgroundJob : boost::noncopyable {
+ protected:
+ /**
+ * sub-class must intantiate the BackgrounJob
+ *
+ * @param selfDelete if set to true, object will destruct itself after the run() finished
+ * @note selfDelete instantes cannot be wait()-ed upon
+ */
+ explicit BackgroundJob(bool selfDelete = false);
+
+ virtual string name() const = 0;
+
+ /**
+ * define this to do your work.
+ * after this returns, state is set to done.
+ * after this returns, deleted if deleteSelf true.
+ *
+ * NOTE:
+ * if run() throws, the exception will be caught within 'this' object and will ultimately lead to the
+ * BackgroundJob's thread being finished, as if run() returned.
+ *
+ */
+ virtual void run() = 0;
+
+ public:
+ enum State {
+ NotStarted,
+ Running,
+ Done
+ };
+
+ virtual ~BackgroundJob() { }
+
+ /**
+ * starts job.
+ * returns immediatelly after dispatching.
+ *
+ * @note the BackgroundJob object must live for as long the thread is still running, ie
+ * until getState() returns Done.
+ */
+ BackgroundJob& go();
+
+ /**
+ * wait for completion.
+ *
+ * @param msTimeOut maximum amount of time to wait in millisecons
+ * @return true if did not time out. false otherwise.
+ *
+ * @note you can call wait() more than once if the first call times out.
+ * but you cannot call wait on a self-deleting job.
+ */
+ bool wait( unsigned msTimeOut = 0 );
+
+ // accessors
+ State getState() const;
+ bool running() const;
+
+ private:
+ struct JobStatus;
+ boost::shared_ptr<JobStatus> _status; // shared between 'this' and body() thread
+
+ void jobBody( boost::shared_ptr<JobStatus> status );
+
+ };
+
+ /**
+ * these run "roughly" every minute
+ * instantiate statically
+ * class MyTask : public PeriodicTask {
+ * public:
+ * virtual string name() const { return "MyTask; " }
+ * virtual void doWork() { log() << "hi" << endl; }
+ * } myTask;
+ */
+ class PeriodicTask {
+ public:
+ PeriodicTask();
+ virtual ~PeriodicTask();
+
+ virtual void taskDoWork() = 0;
+ virtual string taskName() const = 0;
+
+ class Runner : public BackgroundJob {
+ public:
+ virtual ~Runner(){}
+
+ virtual string name() const { return "PeriodicTask::Runner"; }
+
+ virtual void run();
+
+ void add( PeriodicTask* task );
+ void remove( PeriodicTask* task );
+
+ private:
+
+ SpinLock _lock;
+
+ // these are NOT owned by Runner
+ // Runner will not delete these
+ // this never gets smaller
+ // only fields replaced with nulls
+ vector<PeriodicTask*> _tasks;
+
+ };
+
+ static Runner* theRunner;
+
+ };
+
+
+
+
+} // namespace mongo
diff --git a/src/mongo/util/base64.cpp b/src/mongo/util/base64.cpp
new file mode 100644
index 00000000000..aff06e26126
--- /dev/null
+++ b/src/mongo/util/base64.cpp
@@ -0,0 +1,109 @@
+// util/base64.cpp
+
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "base64.h"
+
+namespace mongo {
+ namespace base64 {
+
+ Alphabet alphabet;
+
+ void encode( stringstream& ss , const char * data , int size ) {
+ for ( int i=0; i<size; i+=3 ) {
+ int left = size - i;
+ const unsigned char * start = (const unsigned char*)data + i;
+
+ // byte 0
+ ss << alphabet.e(start[0]>>2);
+
+ // byte 1
+ unsigned char temp = ( start[0] << 4 );
+ if ( left == 1 ) {
+ ss << alphabet.e(temp);
+ break;
+ }
+ temp |= ( ( start[1] >> 4 ) & 0xF );
+ ss << alphabet.e(temp);
+
+ // byte 2
+ temp = ( start[1] & 0xF ) << 2;
+ if ( left == 2 ) {
+ ss << alphabet.e(temp);
+ break;
+ }
+ temp |= ( ( start[2] >> 6 ) & 0x3 );
+ ss << alphabet.e(temp);
+
+ // byte 3
+ ss << alphabet.e(start[2] & 0x3f);
+ }
+
+ int mod = size % 3;
+ if ( mod == 1 ) {
+ ss << "==";
+ }
+ else if ( mod == 2 ) {
+ ss << "=";
+ }
+ }
+
+
+ string encode( const char * data , int size ) {
+ stringstream ss;
+ encode( ss , data ,size );
+ return ss.str();
+ }
+
+ string encode( const string& s ) {
+ return encode( s.c_str() , s.size() );
+ }
+
+
+ void decode( stringstream& ss , const string& s ) {
+ uassert( 10270 , "invalid base64" , s.size() % 4 == 0 );
+ const unsigned char * data = (const unsigned char*)s.c_str();
+ int size = s.size();
+
+ unsigned char buf[3];
+ for ( int i=0; i<size; i+=4) {
+ const unsigned char * start = data + i;
+ buf[0] = ( ( alphabet.decode[start[0]] << 2 ) & 0xFC ) | ( ( alphabet.decode[start[1]] >> 4 ) & 0x3 );
+ buf[1] = ( ( alphabet.decode[start[1]] << 4 ) & 0xF0 ) | ( ( alphabet.decode[start[2]] >> 2 ) & 0xF );
+ buf[2] = ( ( alphabet.decode[start[2]] << 6 ) & 0xC0 ) | ( ( alphabet.decode[start[3]] & 0x3F ) );
+
+ int len = 3;
+ if ( start[3] == '=' ) {
+ len = 2;
+ if ( start[2] == '=' ) {
+ len = 1;
+ }
+ }
+ ss.write( (const char*)buf , len );
+ }
+ }
+
+ string decode( const string& s ) {
+ stringstream ss;
+ decode( ss , s );
+ return ss.str();
+ }
+
+ }
+}
+
diff --git a/src/mongo/util/base64.h b/src/mongo/util/base64.h
new file mode 100644
index 00000000000..505b5d78cca
--- /dev/null
+++ b/src/mongo/util/base64.h
@@ -0,0 +1,68 @@
+// util/base64.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongo {
+ namespace base64 {
+
+ class Alphabet {
+ public:
+ Alphabet()
+ : encode((unsigned char*)
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789"
+ "+/")
+ , decode(new unsigned char[257]) {
+ memset( decode.get() , 0 , 256 );
+ for ( int i=0; i<64; i++ ) {
+ decode[ encode[i] ] = i;
+ }
+
+ test();
+ }
+ void test() {
+ assert( strlen( (char*)encode ) == 64 );
+ for ( int i=0; i<26; i++ )
+ assert( encode[i] == toupper( encode[i+26] ) );
+ }
+
+ char e( int x ) {
+ return encode[x&0x3f];
+ }
+
+ private:
+ const unsigned char * encode;
+ public:
+ boost::scoped_array<unsigned char> decode;
+ };
+
+ extern Alphabet alphabet;
+
+
+ void encode( stringstream& ss , const char * data , int size );
+ string encode( const char * data , int size );
+ string encode( const string& s );
+
+ void decode( stringstream& ss , const string& s );
+ string decode( const string& s );
+
+
+ void testAlphabet();
+ }
+}
diff --git a/src/mongo/util/bson_util.h b/src/mongo/util/bson_util.h
new file mode 100644
index 00000000000..973e31f1af1
--- /dev/null
+++ b/src/mongo/util/bson_util.h
@@ -0,0 +1,42 @@
+// bson_util.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../pch.h"
+
+namespace mongo {
+
+template <typename T>
+void bsonArrToNumVector(BSONElement el, vector<T>& results){
+
+ if(el.type() == Array){
+
+ vector<BSONElement> elements = el.Array();
+
+ for(vector<BSONElement>::iterator i = elements.begin(); i != elements.end(); ++i){
+ results.push_back( (T) (*i).Number() );
+ }
+ }
+ else if(el.isNumber()){
+ results.push_back( (T) el.Number() );
+ }
+
+}
+
+
+}
diff --git a/src/mongo/util/bufreader.h b/src/mongo/util/bufreader.h
new file mode 100644
index 00000000000..53f0ba744e2
--- /dev/null
+++ b/src/mongo/util/bufreader.h
@@ -0,0 +1,100 @@
+// @file bufreader.h parse a memory region into usable pieces
+
+/**
+* Copyright (C) 2009 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+namespace mongo {
+
+ /** helper to read and parse a block of memory
+ methods throw the eof exception if the operation would pass the end of the
+ buffer with which we are working.
+ */
+ class BufReader : boost::noncopyable {
+ public:
+ class eof : public std::exception {
+ public:
+ eof() { }
+ virtual const char * what() { return "BufReader eof"; }
+ };
+
+ BufReader(const void *p, unsigned len) : _start(p), _pos(p), _end(((char *)_pos)+len) { }
+
+ bool atEof() const { return _pos == _end; }
+
+ /** read in the object specified, and advance buffer pointer */
+ template <typename T>
+ void read(T &t) {
+ T* cur = (T*) _pos;
+ T *next = cur + 1;
+ if( _end < next ) throw eof();
+ t = *cur;
+ _pos = next;
+ }
+
+ /** verify we can look at t, but do not advance */
+ template <typename T>
+ void peek(T &t) {
+ T* cur = (T*) _pos;
+ T *next = cur + 1;
+ if( _end < next ) throw eof();
+ t = *cur;
+ }
+
+ /** return current offset into buffer */
+ unsigned offset() const { return (char*)_pos - (char*)_start; }
+
+ /** return remaining bytes */
+ unsigned remaining() const { return (char*)_end -(char*)_pos; }
+
+ /** back up by nbytes */
+ void rewind(unsigned nbytes) {
+ _pos = ((char *) _pos) - nbytes;
+ assert( _pos >= _start );
+ }
+
+ /** return current position pointer, and advance by len */
+ const void* skip(unsigned len) {
+ const char *nxt = ((char *) _pos) + len;
+ if( _end < nxt ) throw eof();
+ const void *p = _pos;
+ _pos = nxt;
+ return p;
+ }
+
+ void readStr(string& s) {
+ StringBuilder b;
+ while( 1 ) {
+ char ch;
+ read(ch);
+ if( ch == 0 )
+ break;
+ b << ch;
+ }
+ s = b.str();
+ }
+
+ const void* pos() { return _pos; }
+ const void* start() { return _start; }
+
+ private:
+ const void *_start;
+ const void *_pos;
+ const void *_end;
+ };
+
+}
diff --git a/src/mongo/util/checksum.h b/src/mongo/util/checksum.h
new file mode 100644
index 00000000000..009ab56fbeb
--- /dev/null
+++ b/src/mongo/util/checksum.h
@@ -0,0 +1,37 @@
+#pragma once
+#include "../pch.h"
+namespace mongo {
+ /** a simple, rather dumb, but very fast checksum. see perftests.cpp for unit tests. */
+ struct Checksum {
+ union {
+ unsigned char bytes[16];
+ unsigned long long words[2];
+ };
+
+ // if you change this you must bump dur::CurrentVersion
+ void gen(const void *buf, unsigned len) {
+ wassert( ((size_t)buf) % 8 == 0 ); // performance warning
+ unsigned n = len / 8 / 2;
+ const unsigned long long *p = (const unsigned long long *) buf;
+ unsigned long long a = 0;
+ for( unsigned i = 0; i < n; i++ ) {
+ a += (*p ^ i);
+ p++;
+ }
+ unsigned long long b = 0;
+ for( unsigned i = 0; i < n; i++ ) {
+ b += (*p ^ i);
+ p++;
+ }
+ unsigned long long c = 0;
+ for( unsigned i = n * 2 * 8; i < len; i++ ) { // 0-7 bytes left
+ c = (c << 8) | ((const char *)buf)[i];
+ }
+ words[0] = a ^ len;
+ words[1] = b ^ c;
+ }
+
+ bool operator==(const Checksum& rhs) const { return words[0]==rhs.words[0] && words[1]==rhs.words[1]; }
+ bool operator!=(const Checksum& rhs) const { return words[0]!=rhs.words[0] || words[1]!=rhs.words[1]; }
+ };
+}
diff --git a/src/mongo/util/compress.cpp b/src/mongo/util/compress.cpp
new file mode 100644
index 00000000000..bcde488b88b
--- /dev/null
+++ b/src/mongo/util/compress.cpp
@@ -0,0 +1,31 @@
+// @file compress.cpp
+
+#include "../third_party/snappy/snappy.h"
+#include "compress.h"
+#include <string>
+#include <string.h>
+#include <assert.h>
+
+namespace mongo {
+
+ void rawCompress(const char* input,
+ size_t input_length,
+ char* compressed,
+ size_t* compressed_length)
+ {
+ snappy::RawCompress(input, input_length, compressed, compressed_length);
+ }
+
+ size_t maxCompressedLength(size_t source_len) {
+ return snappy::MaxCompressedLength(source_len);
+ }
+
+ size_t compress(const char* input, size_t input_length, std::string* output) {
+ return snappy::Compress(input, input_length, output);
+ }
+
+ bool uncompress(const char* compressed, size_t compressed_length, std::string* uncompressed) {
+ return snappy::Uncompress(compressed, compressed_length, uncompressed);
+ }
+
+}
diff --git a/src/mongo/util/compress.h b/src/mongo/util/compress.h
new file mode 100644
index 00000000000..5bc5a3392bb
--- /dev/null
+++ b/src/mongo/util/compress.h
@@ -0,0 +1,21 @@
+// @file compress.h
+
+#pragma once
+
+#include <string>
+
+namespace mongo {
+
+ size_t compress(const char* input, size_t input_length, std::string* output);
+
+ bool uncompress(const char* compressed, size_t compressed_length, std::string* uncompressed);
+
+ size_t maxCompressedLength(size_t source_len);
+ void rawCompress(const char* input,
+ size_t input_length,
+ char* compressed,
+ size_t* compressed_length);
+
+}
+
+
diff --git a/src/mongo/util/concurrency/README b/src/mongo/util/concurrency/README
new file mode 100644
index 00000000000..1a19264f4b6
--- /dev/null
+++ b/src/mongo/util/concurrency/README
@@ -0,0 +1,39 @@
+util/concurrency/ files
+
+msg.h - message passing between threads
+
+mutex.h - small enhancements that wrap boost::mutex
+ also SimpleMutex
+
+mvar.h
+ This is based on haskell's MVar synchronization primitive:
+ http://www.haskell.org/ghc/docs/latest/html/libraries/base/Control-Concurrent-MVar.html
+ It is a thread-safe queue that can hold at most one object.
+ You can also think of it as a box that can be either full or empty.
+
+race.h
+ RACECHECK
+
+rwlock.h - read/write locks (RWLock)
+ RWLock
+ RWLockRecursive
+ RWLockRecursiveNongreedy
+
+spin_lock.h
+
+synchronization.h
+ Notification, NotifyAll
+
+threadlocal.h
+
+thread_pool.h
+
+value.h
+ Guarded
+ DiagStr
+ mapsf
+
+goofy things that need reworking:
+ list.h
+ task.h
+
diff --git a/src/mongo/util/concurrency/list.h b/src/mongo/util/concurrency/list.h
new file mode 100644
index 00000000000..61bdd55f46f
--- /dev/null
+++ b/src/mongo/util/concurrency/list.h
@@ -0,0 +1,99 @@
+// list.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+namespace mongo {
+
+ /* DONT USE THIS. it was a dumb idea.
+
+ this class uses a mutex for writes, but not for reads.
+ we can get fancier later...
+
+ struct Member : public List1<Member>::Base {
+ const char *host;
+ int port;
+ };
+ List1<Member> _members;
+ _members.head()->next();
+
+ */
+ template<typename T>
+ class List1 : boost::noncopyable {
+ public:
+ /* next() and head() return 0 at end of list */
+
+ List1() : _head(0), _m("List1"), _orphans(0) { }
+
+ class Base {
+ friend class List1;
+ T *_next;
+ public:
+ Base() : _next(0){}
+ ~Base() { wassert(false); } // we never want this to happen
+ T* next() const { return _next; }
+ };
+
+ /** note this is safe:
+
+ T* p = mylist.head();
+ if( p )
+ use(p);
+
+ and this is not:
+
+ if( mylist.head() )
+ use( mylist.head() ); // could become 0
+ */
+ T* head() const { return (T*) _head; }
+
+ void push(T* t) {
+ assert( t->_next == 0 );
+ scoped_lock lk(_m);
+ t->_next = (T*) _head;
+ _head = t;
+ }
+
+ // intentionally leaks.
+ void orphanAll() {
+ scoped_lock lk(_m);
+ _head = 0;
+ }
+
+ /* t is not deleted, but is removed from the list. (orphaned) */
+ void orphan(T* t) {
+ scoped_lock lk(_m);
+ T *&prev = (T*&) _head;
+ T *n = prev;
+ while( n != t ) {
+ uassert( 14050 , "List1: item to orphan not in list", n );
+ prev = n->_next;
+ n = prev;
+ }
+ prev = t->_next;
+ if( ++_orphans > 500 )
+ log() << "warning List1 orphans=" << _orphans << '\n';
+ }
+
+ private:
+ volatile T *_head;
+ mongo::mutex _m;
+ int _orphans;
+ };
+
+};
diff --git a/src/mongo/util/concurrency/msg.h b/src/mongo/util/concurrency/msg.h
new file mode 100644
index 00000000000..0b9a7c5048c
--- /dev/null
+++ b/src/mongo/util/concurrency/msg.h
@@ -0,0 +1,61 @@
+// @file msg.h - interthread message passing
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,b
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include <deque>
+#include "task.h"
+
+namespace mongo {
+
+ namespace task {
+
+ typedef boost::function<void()> lam;
+
+ /** typical usage is: task::fork( new Server("threadname") ); */
+ class Server : public Task {
+ public:
+ /** send a message to the port */
+ void send(lam);
+
+ Server(string name) : m("server"), _name(name), rq(false) { }
+ virtual ~Server() { }
+
+ /** send message but block until function completes */
+ void call(const lam&);
+
+ void requeue() { rq = true; }
+
+ protected:
+ /* REMINDER : for use in mongod, you will want to have this call Client::initThread(). */
+ virtual void starting() { }
+
+ private:
+ virtual bool initClient() { return true; }
+ virtual string name() const { return _name; }
+ void doWork();
+ deque<lam> d;
+ mongo::mutex m;
+ boost::condition c;
+ string _name;
+ bool rq;
+ };
+
+ }
+
+}
diff --git a/src/mongo/util/concurrency/mutex.h b/src/mongo/util/concurrency/mutex.h
new file mode 100644
index 00000000000..429f280b1cb
--- /dev/null
+++ b/src/mongo/util/concurrency/mutex.h
@@ -0,0 +1,228 @@
+// @file mutex.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../heapcheck.h"
+#include "threadlocal.h"
+#if defined(_DEBUG)
+#include "mutexdebugger.h"
+#endif
+
+namespace mongo {
+
+ void printStackTrace( ostream &o );
+
+ inline boost::xtime incxtimemillis( long long s ) {
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ xt.sec += (int)( s / 1000 );
+ xt.nsec += (int)(( s % 1000 ) * 1000000);
+ if ( xt.nsec >= 1000000000 ) {
+ xt.nsec -= 1000000000;
+ xt.sec++;
+ }
+ return xt;
+ }
+
+ // If you create a local static instance of this class, that instance will be destroyed
+ // before all global static objects are destroyed, so _destroyingStatics will be set
+ // to true before the global static variables are destroyed.
+ class StaticObserver : boost::noncopyable {
+ public:
+ static bool _destroyingStatics;
+ ~StaticObserver() { _destroyingStatics = true; }
+ };
+
+ /** On pthread systems, it is an error to destroy a mutex while held (boost mutex
+ * may use pthread). Static global mutexes may be held upon shutdown in our
+ * implementation, and this way we avoid destroying them.
+ * NOT recursive.
+ */
+ class mutex : boost::noncopyable {
+ public:
+ const char * const _name;
+ mutex(const char *name) : _name(name)
+ {
+ _m = new boost::timed_mutex();
+ IGNORE_OBJECT( _m ); // Turn-off heap checking on _m
+ }
+ ~mutex() {
+ if( !StaticObserver::_destroyingStatics ) {
+ UNIGNORE_OBJECT( _m );
+ delete _m;
+ }
+ }
+
+ class try_lock : boost::noncopyable {
+ public:
+ try_lock( mongo::mutex &m , int millis = 0 )
+ : _l( m.boost() , incxtimemillis( millis ) ) ,
+#if BOOST_VERSION >= 103500
+ ok( _l.owns_lock() )
+#else
+ ok( _l.locked() )
+#endif
+ { }
+ private:
+ boost::timed_mutex::scoped_timed_lock _l;
+ public:
+ const bool ok;
+ };
+
+ class scoped_lock : boost::noncopyable {
+ public:
+#if defined(_DEBUG)
+ struct PostStaticCheck {
+ PostStaticCheck() {
+ if ( StaticObserver::_destroyingStatics ) {
+ cout << "_DEBUG warning trying to lock a mongo::mutex during static shutdown" << endl;
+ printStackTrace( cout );
+ }
+ }
+ } _check;
+ mongo::mutex * const _mut;
+#endif
+ scoped_lock( mongo::mutex &m ) :
+#if defined(_DEBUG)
+ _mut(&m),
+#endif
+ _l( m.boost() ) {
+#if defined(_DEBUG)
+ mutexDebugger.entering(_mut->_name);
+#endif
+ }
+ ~scoped_lock() {
+#if defined(_DEBUG)
+ mutexDebugger.leaving(_mut->_name);
+#endif
+ }
+ boost::timed_mutex::scoped_lock &boost() { return _l; }
+ private:
+ boost::timed_mutex::scoped_lock _l;
+ };
+ private:
+ boost::timed_mutex &boost() { return *_m; }
+ boost::timed_mutex *_m;
+ };
+
+ typedef mutex::scoped_lock scoped_lock;
+ typedef boost::recursive_mutex::scoped_lock recursive_scoped_lock;
+
+ /** The concept with SimpleMutex is that it is a basic lock/unlock with no
+ special functionality (such as try and try timeout). Thus it can be
+ implemented using OS-specific facilities in all environments (if desired).
+ On Windows, the implementation below is faster than boost mutex.
+ */
+#if defined(_WIN32)
+ class SimpleMutex : boost::noncopyable {
+ CRITICAL_SECTION _cs;
+ public:
+ SimpleMutex(const char *name) { InitializeCriticalSection(&_cs); }
+ ~SimpleMutex() { DeleteCriticalSection(&_cs); }
+
+#if defined(_DEBUG)
+ ThreadLocalValue<int> _nlocksByMe;
+ void lock() {
+ assert( _nlocksByMe.get() == 0 ); // indicates you rae trying to lock recursively
+ _nlocksByMe.set(1);
+ EnterCriticalSection(&_cs);
+ }
+ void dassertLocked() const {
+ assert( _nlocksByMe.get() == 1 );
+ }
+ void unlock() {
+ dassertLocked();
+ _nlocksByMe.set(0);
+ LeaveCriticalSection(&_cs);
+ }
+#else
+ void dassertLocked() const { }
+ void lock() {
+ EnterCriticalSection(&_cs);
+ }
+ void unlock() {
+ LeaveCriticalSection(&_cs);
+ }
+#endif
+
+ class scoped_lock : boost::noncopyable {
+ SimpleMutex& _m;
+ public:
+ scoped_lock( SimpleMutex &m ) : _m(m) { _m.lock(); }
+ ~scoped_lock() { _m.unlock(); }
+# if defined(_DEBUG)
+ const SimpleMutex& m() const { return _m; }
+# endif
+ };
+ };
+#else
+ class SimpleMutex : boost::noncopyable {
+ public:
+ void dassertLocked() const { }
+ SimpleMutex(const char* name) { assert( pthread_mutex_init(&_lock,0) == 0 ); }
+ ~SimpleMutex(){
+ if ( ! StaticObserver::_destroyingStatics ) {
+ assert( pthread_mutex_destroy(&_lock) == 0 );
+ }
+ }
+
+ void lock() { assert( pthread_mutex_lock(&_lock) == 0 ); }
+ void unlock() { assert( pthread_mutex_unlock(&_lock) == 0 ); }
+ public:
+ class scoped_lock : boost::noncopyable {
+ SimpleMutex& _m;
+ public:
+ scoped_lock( SimpleMutex &m ) : _m(m) { _m.lock(); }
+ ~scoped_lock() { _m.unlock(); }
+ const SimpleMutex& m() const { return _m; }
+ };
+
+ private:
+ pthread_mutex_t _lock;
+ };
+
+#endif
+
+ /** This can be used instead of boost recursive mutex. The advantage is the _DEBUG checks
+ * and ability to assertLocked(). This has not yet been tested for speed vs. the boost one.
+ */
+ class RecursiveMutex : boost::noncopyable {
+ public:
+ RecursiveMutex(const char* name) : m(name) { }
+ bool isLocked() const { return n.get() > 0; }
+ class scoped_lock : boost::noncopyable {
+ RecursiveMutex& rm;
+ int& nLocksByMe;
+ public:
+ scoped_lock( RecursiveMutex &m ) : rm(m), nLocksByMe(rm.n.getRef()) {
+ if( nLocksByMe++ == 0 )
+ rm.m.lock();
+ }
+ ~scoped_lock() {
+ assert( nLocksByMe > 0 );
+ if( --nLocksByMe == 0 ) {
+ rm.m.unlock();
+ }
+ }
+ };
+ private:
+ SimpleMutex m;
+ ThreadLocalValue<int> n;
+ };
+
+}
diff --git a/src/mongo/util/concurrency/mutexdebugger.h b/src/mongo/util/concurrency/mutexdebugger.h
new file mode 100644
index 00000000000..7dc57f29e98
--- /dev/null
+++ b/src/mongo/util/concurrency/mutexdebugger.h
@@ -0,0 +1,117 @@
+#pragma once
+
+namespace mongo {
+
+ /** only used on _DEBUG builds.
+ MutexDebugger checks that we always acquire locks for multiple mutexes in a consistant (acyclic) order.
+ If we were inconsistent we could deadlock.
+ */
+ class MutexDebugger {
+ typedef const char * mid; // mid = mutex ID
+ typedef map<mid,int> Preceeding;
+ map< mid, int > maxNest;
+ boost::thread_specific_ptr< Preceeding > us;
+ map< mid, set<mid> > followers;
+ boost::mutex &x;
+ unsigned magic;
+ void aBreakPoint() { } // for debugging
+ public:
+ // set these to create an assert that
+ // b must never be locked before a
+ // so
+ // a.lock(); b.lock(); is fine
+ // b.lock(); alone is fine too
+ // only checked on _DEBUG builds.
+ string a,b;
+
+ /** outputs some diagnostic info on mutexes (on _DEBUG builds) */
+ void programEnding();
+
+ MutexDebugger();
+
+ string currentlyLocked() const {
+ Preceeding *_preceeding = us.get();
+ if( _preceeding == 0 )
+ return "";
+ Preceeding &preceeding = *_preceeding;
+ stringstream q;
+ for( Preceeding::const_iterator i = preceeding.begin(); i != preceeding.end(); i++ ) {
+ if( i->second > 0 )
+ q << " " << i->first << ' ' << i->second << '\n';
+ }
+ return q.str();
+ }
+
+ void entering(mid m) {
+ if( this == 0 || m == 0 ) return;
+ assert( magic == 0x12345678 );
+
+ Preceeding *_preceeding = us.get();
+ if( _preceeding == 0 )
+ us.reset( _preceeding = new Preceeding() );
+ Preceeding &preceeding = *_preceeding;
+
+ if( a == m ) {
+ aBreakPoint();
+ if( preceeding[b.c_str()] ) {
+ cout << "****** MutexDebugger error! warning " << b << " was locked before " << a << endl;
+ assert(false);
+ }
+ }
+
+ preceeding[m]++;
+ if( preceeding[m] > 1 ) {
+ // recursive re-locking.
+ if( preceeding[m] > maxNest[m] )
+ maxNest[m] = preceeding[m];
+ return;
+ }
+
+ bool failed = false;
+ string err;
+ {
+ boost::mutex::scoped_lock lk(x);
+ followers[m];
+ for( Preceeding::iterator i = preceeding.begin(); i != preceeding.end(); i++ ) {
+ if( m != i->first && i->second > 0 ) {
+ followers[i->first].insert(m);
+ if( followers[m].count(i->first) != 0 ) {
+ failed = true;
+ stringstream ss;
+ mid bad = i->first;
+ ss << "mutex problem" <<
+ "\n when locking " << m <<
+ "\n " << bad << " was already locked and should not be."
+ "\n set a and b above to debug.\n";
+ stringstream q;
+ for( Preceeding::iterator i = preceeding.begin(); i != preceeding.end(); i++ ) {
+ if( i->first != m && i->first != bad && i->second > 0 )
+ q << " " << i->first << '\n';
+ }
+ string also = q.str();
+ if( !also.empty() )
+ ss << "also locked before " << m << " in this thread (no particular order):\n" << also;
+ err = ss.str();
+ break;
+ }
+ }
+ }
+ }
+ if( failed ) {
+ cout << err << endl;
+ assert( 0 );
+ }
+ }
+ void leaving(mid m) {
+ if( this == 0 || m == 0 ) return; // still in startup pre-main()
+ Preceeding& preceeding = *us.get();
+ preceeding[m]--;
+ if( preceeding[m] < 0 ) {
+ cout << "ERROR: lock count for " << m << " is " << preceeding[m] << endl;
+ assert( preceeding[m] >= 0 );
+ }
+ }
+ };
+ extern MutexDebugger &mutexDebugger;
+
+}
diff --git a/src/mongo/util/concurrency/mvar.h b/src/mongo/util/concurrency/mvar.h
new file mode 100644
index 00000000000..bc1855a85cc
--- /dev/null
+++ b/src/mongo/util/concurrency/mvar.h
@@ -0,0 +1,118 @@
+// mvar.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongo {
+
+ /* This is based on haskell's MVar synchronization primitive:
+ * http://www.haskell.org/ghc/docs/latest/html/libraries/base/Control-Concurrent-MVar.html
+ *
+ * It is a thread-safe queue that can hold at most one object.
+ * You can also think of it as a box that can be either full or empty.
+ */
+
+ template <typename T>
+ class MVar {
+ public:
+ enum State {EMPTY=0, FULL};
+
+ // create an empty MVar
+ MVar()
+ : _state(EMPTY)
+ {}
+
+ // creates a full MVar
+ MVar(const T& val)
+ : _state(FULL)
+ , _value(val)
+ {}
+
+ // puts val into the MVar and returns true or returns false if full
+ // never blocks
+ bool tryPut(const T& val) {
+ // intentionally repeat test before and after lock
+ if (_state == FULL) return false;
+ Mutex::scoped_lock lock(_mutex);
+ if (_state == FULL) return false;
+
+ _state = FULL;
+ _value = val;
+
+ // unblock threads waiting to 'take'
+ _condition.notify_all();
+
+ return true;
+ }
+
+ // puts val into the MVar
+ // will block if the MVar is already full
+ void put(const T& val) {
+ Mutex::scoped_lock lock(_mutex);
+ while (!tryPut(val)) {
+ // unlocks lock while waiting and relocks before returning
+ _condition.wait(lock);
+ }
+ }
+
+ // takes val out of the MVar and returns true or returns false if empty
+ // never blocks
+ bool tryTake(T& out) {
+ // intentionally repeat test before and after lock
+ if (_state == EMPTY) return false;
+ Mutex::scoped_lock lock(_mutex);
+ if (_state == EMPTY) return false;
+
+ _state = EMPTY;
+ out = _value;
+
+ // unblock threads waiting to 'put'
+ _condition.notify_all();
+
+ return true;
+ }
+
+ // takes val out of the MVar
+ // will block if the MVar is empty
+ T take() {
+ T ret = T();
+
+ Mutex::scoped_lock lock(_mutex);
+ while (!tryTake(ret)) {
+ // unlocks lock while waiting and relocks before returning
+ _condition.wait(lock);
+ }
+
+ return ret;
+ }
+
+
+ // Note: this is fast because there is no locking, but state could
+ // change before you get a chance to act on it.
+ // Mainly useful for sanity checks / asserts.
+ State getState() { return _state; }
+
+
+ private:
+ State _state;
+ T _value;
+ typedef boost::recursive_mutex Mutex;
+ Mutex _mutex;
+ boost::condition _condition;
+ };
+
+}
diff --git a/src/mongo/util/concurrency/race.h b/src/mongo/util/concurrency/race.h
new file mode 100644
index 00000000000..837ae23ac13
--- /dev/null
+++ b/src/mongo/util/concurrency/race.h
@@ -0,0 +1,77 @@
+#pragma once
+
+#include "../goodies.h" // printStackTrace
+#include "mutexdebugger.h"
+
+namespace mongo {
+
+ namespace race {
+
+#ifdef _WIN32
+ typedef unsigned threadId_t;
+#else
+ typedef pthread_t threadId_t;
+#endif
+
+#if defined(_DEBUG)
+
+ class Block {
+ volatile int n;
+ unsigned ncalls;
+ const string file;
+ const unsigned line;
+ void fail() {
+ log() << "\n\n\nrace: synchronization (race condition) failure\ncurrent locks this thread (" << getThreadName() << "):\n"
+ << mutexDebugger.currentlyLocked() << endl;
+ printStackTrace();
+ ::abort();
+ }
+ void enter() {
+ if( ++n != 1 ) fail();
+ ncalls++;
+ if( ncalls < 100 ) {
+ sleepmillis(0);
+ }
+ else {
+ RARELY {
+ sleepmillis(0);
+ if( ncalls < 128 * 20 ) {
+ OCCASIONALLY {
+ sleepmillis(3);
+ }
+ }
+ }
+ }
+ }
+ void leave() {
+ if( --n != 0 ) fail();
+ }
+ public:
+ Block(string f, unsigned l) : n(0), ncalls(0), file(f), line(l) { }
+ ~Block() {
+ if( ncalls > 1000000 ) {
+ // just so we know if we are slowing things down
+ log() << "race::Block lots of calls " << file << ' ' << line << " n:" << ncalls << endl;
+ }
+ }
+ class Within {
+ Block& _s;
+ public:
+ Within(Block& s) : _s(s) { _s.enter(); }
+ ~Within() { _s.leave(); }
+ };
+ };
+
+ /* in a rwlock situation this will fail, so not appropriate for things like that. */
+# define RACECHECK \
+ static race::Block __cp(__FILE__, __LINE__); \
+ race::Block::Within __ck(__cp);
+
+#else
+ /* !_DEBUG */
+# define RACECHECK
+
+#endif
+
+ }
+}
diff --git a/src/mongo/util/concurrency/rwlock.h b/src/mongo/util/concurrency/rwlock.h
new file mode 100644
index 00000000000..3dbfc35ed6e
--- /dev/null
+++ b/src/mongo/util/concurrency/rwlock.h
@@ -0,0 +1,271 @@
+// @file rwlock.h generic reader-writer lock (cross platform support)
+
+/*
+ * Copyright (C) 2010 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "mutex.h"
+#include "../time_support.h"
+#include "rwlockimpl.h"
+
+#if defined(_DEBUG)
+#include "mutexdebugger.h"
+#endif
+
+namespace mongo {
+
+ /** separated out as later the implementation of this may be different than RWLock,
+ depending on OS, as there is no upgrade etc. facility herein.
+ */
+ class SimpleRWLock : public RWLockBase {
+ public:
+ void lock() { RWLockBase::lock(); }
+ void unlock() { RWLockBase::unlock(); }
+ void lock_shared() { RWLockBase::lock_shared(); }
+ void unlock_shared() { RWLockBase::unlock_shared(); }
+ class Shared : boost::noncopyable {
+ SimpleRWLock& _r;
+ public:
+ Shared(SimpleRWLock& rwlock) : _r(rwlock) {_r.lock_shared(); }
+ ~Shared() { _r.unlock_shared(); }
+ };
+ class Exclusive : boost::noncopyable {
+ SimpleRWLock& _r;
+ public:
+ Exclusive(SimpleRWLock& rwlock) : _r(rwlock) {_r.lock(); }
+ ~Exclusive() { _r.unlock(); }
+ };
+ };
+
+ class RWLock : public RWLockBase {
+ enum { NilState, UpgradableState, Exclusive } x; // only bother to set when doing upgradable related things
+ public:
+ const char * const _name;
+ RWLock(const char *name) : _name(name) {
+ x = NilState;
+ }
+ void lock() {
+ RWLockBase::lock();
+#if defined(_DEBUG)
+ mutexDebugger.entering(_name);
+#endif
+ }
+ void unlock() {
+#if defined(_DEBUG)
+ mutexDebugger.leaving(_name);
+#endif
+ RWLockBase::unlock();
+ }
+
+ void lock_shared() { RWLockBase::lock_shared(); }
+ void unlock_shared() { RWLockBase::unlock_shared(); }
+ private:
+ void lockAsUpgradable() { RWLockBase::lockAsUpgradable(); }
+ void unlockFromUpgradable() { // upgradable -> unlocked
+ RWLockBase::unlockFromUpgradable();
+ }
+ public:
+ void upgrade() { // upgradable -> exclusive lock
+ assert( x == UpgradableState );
+ RWLockBase::upgrade();
+ x = Exclusive;
+ }
+
+ bool lock_shared_try( int millis ) { return RWLockBase::lock_shared_try(millis); }
+
+ bool lock_try( int millis = 0 ) {
+ if( RWLockBase::lock_try(millis) ) {
+#if defined(_DEBUG)
+ mutexDebugger.entering(_name);
+#endif
+ return true;
+ }
+ return false;
+ }
+
+ /** acquire upgradable state. You must be unlocked before creating.
+ unlocks on destruction, whether in upgradable state or upgraded to exclusive
+ in the interim.
+ */
+ class Upgradable : boost::noncopyable {
+ RWLock& _r;
+ public:
+ Upgradable(RWLock& r) : _r(r) {
+ r.lockAsUpgradable();
+ assert( _r.x == NilState );
+ _r.x = RWLock::UpgradableState;
+ }
+ ~Upgradable() {
+ if( _r.x == RWLock::UpgradableState ) {
+ _r.x = NilState;
+ _r.unlockFromUpgradable();
+ }
+ else {
+ //TEMP assert( _r.x == Exclusive ); // has been upgraded
+ _r.x = NilState;
+ _r.unlock();
+ }
+ }
+ };
+ };
+
+ /** throws on failure to acquire in the specified time period. */
+ class rwlock_try_write : boost::noncopyable {
+ public:
+ struct exception { };
+ rwlock_try_write(RWLock& l, int millis = 0) : _l(l) {
+ if( !l.lock_try(millis) )
+ throw exception();
+ }
+ ~rwlock_try_write() { _l.unlock(); }
+ private:
+ RWLock& _l;
+ };
+
+ class rwlock_shared : boost::noncopyable {
+ public:
+ rwlock_shared(RWLock& rwlock) : _r(rwlock) {_r.lock_shared(); }
+ ~rwlock_shared() { _r.unlock_shared(); }
+ private:
+ RWLock& _r;
+ };
+
+ /* scoped lock for RWLock */
+ class rwlock : boost::noncopyable {
+ public:
+ /**
+ * @param write acquire write lock if true sharable if false
+ * @param lowPriority if > 0, will try to get the lock non-greedily for that many ms
+ */
+ rwlock( const RWLock& lock , bool write, /* bool alreadyHaveLock = false , */int lowPriorityWaitMS = 0 )
+ : _lock( (RWLock&)lock ) , _write( write ) {
+ {
+ if ( _write ) {
+ _lock.lock();
+ }
+ else {
+ _lock.lock_shared();
+ }
+ }
+ }
+ ~rwlock() {
+ if ( _write )
+ _lock.unlock();
+ else
+ _lock.unlock_shared();
+ }
+ private:
+ RWLock& _lock;
+ const bool _write;
+ };
+
+ // ----------------------------------------------------------------------------------------
+
+ /** recursive on shared locks is ok for this implementation */
+ class RWLockRecursive : protected RWLockBase {
+ protected:
+ ThreadLocalValue<int> _state;
+ void lock(); // not implemented - Lock() should be used; didn't overload this name to avoid mistakes
+ virtual void Lock() { RWLockBase::lock(); }
+ public:
+ virtual ~RWLockRecursive() { }
+ const char * const _name;
+ RWLockRecursive(const char *name) : _name(name) { }
+
+ void assertExclusivelyLocked() {
+ assert( _state.get() < 0 );
+ }
+
+ class Exclusive : boost::noncopyable {
+ RWLockRecursive& _r;
+ public:
+ Exclusive(RWLockRecursive& r) : _r(r) {
+ int s = _r._state.get();
+ dassert( s <= 0 );
+ if( s == 0 )
+ _r.Lock();
+ _r._state.set(s-1);
+ }
+ ~Exclusive() {
+ int s = _r._state.get();
+ DEV wassert( s < 0 ); // wassert: don't throw from destructors
+ ++s;
+ _r._state.set(s);
+ if ( s == 0 )
+ _r.unlock();
+ }
+ };
+
+ class Shared : boost::noncopyable {
+ RWLockRecursive& _r;
+ bool _alreadyLockedExclusiveByUs;
+ public:
+ Shared(RWLockRecursive& r) : _r(r) {
+ int s = _r._state.get();
+ _alreadyLockedExclusiveByUs = s < 0;
+ if( !_alreadyLockedExclusiveByUs ) {
+ dassert( s >= 0 ); // -1 would mean exclusive
+ if( s == 0 )
+ _r.lock_shared();
+ _r._state.set(s+1);
+ }
+ }
+ ~Shared() {
+ if( _alreadyLockedExclusiveByUs ) {
+ DEV wassert( _r._state.get() < 0 );
+ }
+ else {
+ int s = _r._state.get() - 1;
+ DEV wassert( s >= 0 );
+ _r._state.set(s);
+ if( s == 0 )
+ _r.unlock_shared();
+ }
+ }
+ };
+ };
+
+ class RWLockRecursiveNongreedy : public RWLockRecursive {
+ virtual void Lock() {
+ bool got = false;
+ for ( int i=0; i<lowPriorityWaitMS; i++ ) {
+ if ( lock_try(0) ) {
+ got = true;
+ break;
+ }
+ int sleep = 1;
+ if ( i > ( lowPriorityWaitMS / 20 ) )
+ sleep = 10;
+ sleepmillis(sleep);
+ i += ( sleep - 1 );
+ }
+ if ( ! got ) {
+ log() << "couldn't lazily get rwlock" << endl;
+ RWLockBase::lock();
+ }
+ }
+
+ public:
+ const int lowPriorityWaitMS;
+ RWLockRecursiveNongreedy(const char *nm, int lpwaitms) : RWLockRecursive(nm), lowPriorityWaitMS(lpwaitms) { }
+ const char * implType() const { return RWLockRecursive::implType(); }
+
+ //just for testing:
+ bool __lock_try( int millis ) { return RWLockRecursive::lock_try(millis); }
+ };
+
+}
diff --git a/src/mongo/util/concurrency/rwlockimpl.h b/src/mongo/util/concurrency/rwlockimpl.h
new file mode 100644
index 00000000000..4e07231447b
--- /dev/null
+++ b/src/mongo/util/concurrency/rwlockimpl.h
@@ -0,0 +1,170 @@
+// @file rwlockimpl.h
+
+#pragma once
+
+#if defined(MONGO_USE_SRW_ON_WINDOWS) && defined(_WIN32)
+
+// windows slimreaderwriter version. newer windows versions only
+
+namespace mongo {
+ class RWLockBase : boost::noncopyable {
+ SRWLOCK _lock;
+ protected:
+ RWLockBase() { InitializeSRWLock(&_lock); }
+ ~RWLockBase() {
+ // no special action needed to destroy a SRWLOCK
+ }
+ void lock() { AcquireSRWLockExclusive(&_lock); }
+ void unlock() { ReleaseSRWLockExclusive(&_lock); }
+ void lock_shared() { AcquireSRWLockShared(&_lock); }
+ void unlock_shared() { ReleaseSRWLockShared(&_lock); }
+ bool lock_shared_try( int millis ) {
+ if( TryAcquireSRWLockShared(&_lock) )
+ return true;
+ if( millis == 0 )
+ return false;
+ unsigned long long end = curTimeMicros64() + millis*1000;
+ while( 1 ) {
+ Sleep(1);
+ if( TryAcquireSRWLockShared(&_lock) )
+ return true;
+ if( curTimeMicros64() >= end )
+ break;
+ }
+ return false;
+ }
+ bool lock_try( int millis = 0 ) {
+ if( TryAcquireSRWLockExclusive(&_lock) ) // quick check to optimistically avoid calling curTimeMicros64
+ return true;
+ if( millis == 0 )
+ return false;
+ unsigned long long end = curTimeMicros64() + millis*1000;
+ do {
+ Sleep(1);
+ if( TryAcquireSRWLockExclusive(&_lock) )
+ return true;
+ } while( curTimeMicros64() < end );
+ return false;
+ }
+ // no upgradable for this impl
+ void lockAsUpgradable() { lock(); }
+ void unlockFromUpgradable() { unlock(); }
+ void upgrade() { }
+ public:
+ const char * implType() const { return "WINSRW"; }
+ };
+}
+
+#elif( BOOST_VERSION < 103500 )
+
+# if defined(_WIN32)
+# error need boost >= 1.35 for windows
+# endif
+
+// pthreads version
+
+# include <pthread.h>
+
+namespace mongo {
+ class RWLockBase : boost::noncopyable {
+ pthread_rwlock_t _lock;
+ static void check( int x ) {
+ if( x == 0 ) return;
+ log() << "pthread rwlock failed: " << x << endl;
+ assert( x == 0 );
+ }
+
+ ~RWLockBase() {
+ if ( ! StaticObserver::_destroyingStatics ) {
+ wassert( pthread_rwlock_destroy( &_lock ) == 0 ); // wassert as don't want to throw from a destructor
+ }
+ }
+
+ protected:
+ RWLockBase() {
+ check( pthread_rwlock_init( &_lock , 0 ) );
+ }
+
+ void lock() { check( pthread_rwlock_wrlock( &_lock ) ); }
+ void unlock() { check( pthread_rwlock_unlock( &_lock ) ); }
+ void lock_shared() { check( pthread_rwlock_rdlock( &_lock ) ); }
+ void unlock_shared() { check( pthread_rwlock_unlock( &_lock ) ); }
+ bool lock_shared_try( int millis ) { return _try( millis , false ); }
+ bool lock_try( int millis = 0 ) { return _try( millis , true ); }
+ bool _try( int millis , bool write ) {
+ while ( true ) {
+ int x = write ?
+ pthread_rwlock_trywrlock( &_lock ) :
+ pthread_rwlock_tryrdlock( &_lock );
+ if ( x <= 0 )
+ return true;
+ if ( millis-- <= 0 )
+ return false;
+ if ( x == EBUSY ) {
+ sleepmillis(1);
+ continue;
+ }
+ check(x);
+ }
+ return false;
+ }
+ // no upgradable for this impl
+ void lockAsUpgradable() { lock(); }
+ void unlockFromUpgradable() { unlock(); }
+ void upgrade() { }
+ public:
+ const char * implType() const { return "posix"; }
+ };
+}
+
+#else
+
+// Boost version
+
+# if defined(_WIN32)
+# include "shared_mutex_win.hpp"
+namespace mongo { typedef boost::modified_shared_mutex shared_mutex; }
+# else
+# include <boost/thread/shared_mutex.hpp>
+namespace mongo { using boost::shared_mutex; }
+# endif
+# undef assert
+# define assert MONGO_assert
+
+namespace mongo {
+ class RWLockBase : boost::noncopyable {
+ shared_mutex _m;
+ protected:
+ void lock() {
+ _m.lock();
+ }
+ void unlock() {
+ _m.unlock();
+ }
+ void lockAsUpgradable() {
+ _m.lock_upgrade();
+ }
+ void unlockFromUpgradable() { // upgradable -> unlocked
+ _m.unlock_upgrade();
+ }
+ void upgrade() { // upgradable -> exclusive lock
+ _m.unlock_upgrade_and_lock();
+ }
+ void lock_shared() {
+ _m.lock_shared();
+ }
+ void unlock_shared() {
+ _m.unlock_shared();
+ }
+ bool lock_shared_try( int millis ) {
+ return _m.timed_lock_shared( boost::posix_time::milliseconds(millis) );
+ }
+ bool lock_try( int millis = 0 ) {
+ return _m.timed_lock( boost::posix_time::milliseconds(millis) );
+ }
+ public:
+ const char * implType() const { return "boost"; }
+ };
+}
+
+#endif
diff --git a/src/mongo/util/concurrency/shared_mutex_win.hpp b/src/mongo/util/concurrency/shared_mutex_win.hpp
new file mode 100644
index 00000000000..e850fc6bab4
--- /dev/null
+++ b/src/mongo/util/concurrency/shared_mutex_win.hpp
@@ -0,0 +1,594 @@
+#ifndef BOOST_THREAD_WIN32_SHARED_MUTEX_HPP_MODIFIED
+#define BOOST_THREAD_WIN32_SHARED_MUTEX_HPP_MODIFIED
+
+// (C) Copyright 2006-8 Anthony Williams
+//
+// Distributed under the Boost Software License, Version 1.0. (See
+// accompanying file LICENSE_1_0.txt or copy at
+// http://www.boost.org/LICENSE_1_0.txt)
+
+/* MongoDB :
+ Slightly modified boost file to not die above 127 pending writes
+ Here is what changed (from boost 1.42.0 shared_mutex.hpp):
+ 1,2c1,2
+ < #ifndef BOOST_THREAD_WIN32_SHARED_MUTEX_HPP
+ < #define BOOST_THREAD_WIN32_SHARED_MUTEX_HPP
+ ---
+ > #ifndef BOOST_THREAD_WIN32_SHARED_MUTEX_HPP_MODIFIED
+ > #define BOOST_THREAD_WIN32_SHARED_MUTEX_HPP_MODIFIED
+ 22c27
+ < class shared_mutex:
+ ---
+ > class modified_shared_mutex:
+ 73c78
+ < shared_mutex():
+ ---
+ > modified_shared_mutex():
+ 84c89
+ < ~shared_mutex()
+ ---
+ > ~modified_shared_mutex()
+ 283a289,290
+ > if( new_state.exclusive_waiting == 127 ) // the maximum already!
+ > break;
+*/
+
+#include <boost/assert.hpp>
+#include <boost/detail/interlocked.hpp>
+#include <boost/thread/win32/thread_primitives.hpp>
+#include <boost/static_assert.hpp>
+#include <limits.h>
+#include <boost/utility.hpp>
+#include <boost/thread/thread_time.hpp>
+
+#include <boost/config/abi_prefix.hpp>
+
+namespace boost
+{
+ class modified_shared_mutex:
+ private boost::noncopyable
+ {
+ private:
+ struct state_data
+ {
+ unsigned shared_count:11,
+ shared_waiting:11,
+ exclusive:1,
+ upgrade:1,
+ exclusive_waiting:7,
+ exclusive_waiting_blocked:1;
+
+ friend bool operator==(state_data const& lhs,state_data const& rhs)
+ {
+ return *reinterpret_cast<unsigned const*>(&lhs)==*reinterpret_cast<unsigned const*>(&rhs);
+ }
+ };
+
+
+ template<typename T>
+ T interlocked_compare_exchange(T* target,T new_value,T comparand)
+ {
+ BOOST_STATIC_ASSERT(sizeof(T)==sizeof(long));
+ long const res=BOOST_INTERLOCKED_COMPARE_EXCHANGE(reinterpret_cast<long*>(target),
+ *reinterpret_cast<long*>(&new_value),
+ *reinterpret_cast<long*>(&comparand));
+ return *reinterpret_cast<T const*>(&res);
+ }
+
+ state_data state;
+ detail::win32::handle semaphores[2];
+ detail::win32::handle &unlock_sem;
+ detail::win32::handle &exclusive_sem;
+ detail::win32::handle upgrade_sem;
+
+ void release_waiters(state_data old_state)
+ {
+ if(old_state.exclusive_waiting)
+ {
+ BOOST_VERIFY(detail::win32::ReleaseSemaphore(exclusive_sem,1,0)!=0);
+ }
+
+ if(old_state.shared_waiting || old_state.exclusive_waiting)
+ {
+ BOOST_VERIFY(detail::win32::ReleaseSemaphore(unlock_sem,old_state.shared_waiting + (old_state.exclusive_waiting?1:0),0)!=0);
+ }
+ }
+
+
+ public:
+ modified_shared_mutex():
+ unlock_sem(semaphores[0]),
+ exclusive_sem(semaphores[1])
+ {
+ unlock_sem=detail::win32::create_anonymous_semaphore(0,LONG_MAX);
+ exclusive_sem=detail::win32::create_anonymous_semaphore(0,LONG_MAX);
+ upgrade_sem=detail::win32::create_anonymous_semaphore(0,LONG_MAX);
+ state_data state_={0};
+ state=state_;
+ }
+
+ ~modified_shared_mutex()
+ {
+ detail::win32::CloseHandle(upgrade_sem);
+ detail::win32::CloseHandle(unlock_sem);
+ detail::win32::CloseHandle(exclusive_sem);
+ }
+
+ bool try_lock_shared()
+ {
+ state_data old_state=state;
+ for(;;)
+ {
+ state_data new_state=old_state;
+ if(!new_state.exclusive && !new_state.exclusive_waiting_blocked)
+ {
+ ++new_state.shared_count;
+ }
+
+ state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
+ if(current_state==old_state)
+ {
+ break;
+ }
+ old_state=current_state;
+ }
+ return !(old_state.exclusive| old_state.exclusive_waiting_blocked);
+ }
+
+ void lock_shared()
+ {
+ BOOST_VERIFY(timed_lock_shared(::boost::detail::get_system_time_sentinel()));
+ }
+
+ template<typename TimeDuration>
+ bool timed_lock_shared(TimeDuration const & relative_time)
+ {
+ return timed_lock_shared(get_system_time()+relative_time);
+ }
+
+ bool timed_lock_shared(boost::system_time const& wait_until)
+ {
+ for(;;)
+ {
+ state_data old_state=state;
+ for(;;)
+ {
+ state_data new_state=old_state;
+ if(new_state.exclusive || new_state.exclusive_waiting_blocked)
+ {
+ ++new_state.shared_waiting;
+ }
+ else
+ {
+ ++new_state.shared_count;
+ }
+
+ state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
+ if(current_state==old_state)
+ {
+ break;
+ }
+ old_state=current_state;
+ }
+
+ if(!(old_state.exclusive| old_state.exclusive_waiting_blocked))
+ {
+ return true;
+ }
+
+ unsigned long const res=detail::win32::WaitForSingleObject(unlock_sem,::boost::detail::get_milliseconds_until(wait_until));
+ if(res==detail::win32::timeout)
+ {
+ for(;;)
+ {
+ state_data new_state=old_state;
+ if(new_state.exclusive || new_state.exclusive_waiting_blocked)
+ {
+ if(new_state.shared_waiting)
+ {
+ --new_state.shared_waiting;
+ }
+ }
+ else
+ {
+ ++new_state.shared_count;
+ }
+
+ state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
+ if(current_state==old_state)
+ {
+ break;
+ }
+ old_state=current_state;
+ }
+
+ if(!(old_state.exclusive| old_state.exclusive_waiting_blocked))
+ {
+ return true;
+ }
+ return false;
+ }
+
+ BOOST_ASSERT(res==0);
+ }
+ }
+
+ void unlock_shared()
+ {
+ state_data old_state=state;
+ for(;;)
+ {
+ state_data new_state=old_state;
+ bool const last_reader=!--new_state.shared_count;
+
+ if(last_reader)
+ {
+ if(new_state.upgrade)
+ {
+ new_state.upgrade=false;
+ new_state.exclusive=true;
+ }
+ else
+ {
+ if(new_state.exclusive_waiting)
+ {
+ --new_state.exclusive_waiting;
+ new_state.exclusive_waiting_blocked=false;
+ }
+ new_state.shared_waiting=0;
+ }
+ }
+
+ state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
+ if(current_state==old_state)
+ {
+ if(last_reader)
+ {
+ if(old_state.upgrade)
+ {
+ BOOST_VERIFY(detail::win32::ReleaseSemaphore(upgrade_sem,1,0)!=0);
+ }
+ else
+ {
+ release_waiters(old_state);
+ }
+ }
+ break;
+ }
+ old_state=current_state;
+ }
+ }
+
+ void lock()
+ {
+ BOOST_VERIFY(timed_lock(::boost::detail::get_system_time_sentinel()));
+ }
+
+ template<typename TimeDuration>
+ bool timed_lock(TimeDuration const & relative_time)
+ {
+ return timed_lock(get_system_time()+relative_time);
+ }
+
+ bool try_lock()
+ {
+ state_data old_state=state;
+ for(;;)
+ {
+ state_data new_state=old_state;
+ if(new_state.shared_count || new_state.exclusive)
+ {
+ return false;
+ }
+ else
+ {
+ new_state.exclusive=true;
+ }
+
+ state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
+ if(current_state==old_state)
+ {
+ break;
+ }
+ old_state=current_state;
+ }
+ return true;
+ }
+
+
+ bool timed_lock(boost::system_time const& wait_until)
+ {
+ for(;;)
+ {
+ state_data old_state=state;
+
+ for(;;)
+ {
+ state_data new_state=old_state;
+ if(new_state.shared_count || new_state.exclusive)
+ {
+ if( new_state.exclusive_waiting == 127 ) // the maximum already!
+ break;
+ ++new_state.exclusive_waiting;
+ new_state.exclusive_waiting_blocked=true;
+ }
+ else
+ {
+ new_state.exclusive=true;
+ }
+
+ state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
+ if(current_state==old_state)
+ {
+ break;
+ }
+ old_state=current_state;
+ }
+
+ if(!old_state.shared_count && !old_state.exclusive)
+ {
+ return true;
+ }
+ unsigned long const wait_res=detail::win32::WaitForMultipleObjects(2,semaphores,true,::boost::detail::get_milliseconds_until(wait_until));
+ if(wait_res==detail::win32::timeout)
+ {
+ for(;;)
+ {
+ state_data new_state=old_state;
+ if(new_state.shared_count || new_state.exclusive)
+ {
+ if(new_state.exclusive_waiting)
+ {
+ if(!--new_state.exclusive_waiting)
+ {
+ new_state.exclusive_waiting_blocked=false;
+ }
+ }
+ }
+ else
+ {
+ new_state.exclusive=true;
+ }
+
+ state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
+ if(current_state==old_state)
+ {
+ break;
+ }
+ old_state=current_state;
+ }
+ if(!old_state.shared_count && !old_state.exclusive)
+ {
+ return true;
+ }
+ return false;
+ }
+ BOOST_ASSERT(wait_res<2);
+ }
+ }
+
+ void unlock()
+ {
+ state_data old_state=state;
+ for(;;)
+ {
+ state_data new_state=old_state;
+ new_state.exclusive=false;
+ if(new_state.exclusive_waiting)
+ {
+ --new_state.exclusive_waiting;
+ new_state.exclusive_waiting_blocked=false;
+ }
+ new_state.shared_waiting=0;
+
+ state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
+ if(current_state==old_state)
+ {
+ break;
+ }
+ old_state=current_state;
+ }
+ release_waiters(old_state);
+ }
+
+ void lock_upgrade()
+ {
+ for(;;)
+ {
+ state_data old_state=state;
+ for(;;)
+ {
+ state_data new_state=old_state;
+ if(new_state.exclusive || new_state.exclusive_waiting_blocked || new_state.upgrade)
+ {
+ ++new_state.shared_waiting;
+ }
+ else
+ {
+ ++new_state.shared_count;
+ new_state.upgrade=true;
+ }
+
+ state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
+ if(current_state==old_state)
+ {
+ break;
+ }
+ old_state=current_state;
+ }
+
+ if(!(old_state.exclusive|| old_state.exclusive_waiting_blocked|| old_state.upgrade))
+ {
+ return;
+ }
+
+ BOOST_VERIFY(!detail::win32::WaitForSingleObject(unlock_sem,detail::win32::infinite));
+ }
+ }
+
+ bool try_lock_upgrade()
+ {
+ state_data old_state=state;
+ for(;;)
+ {
+ state_data new_state=old_state;
+ if(new_state.exclusive || new_state.exclusive_waiting_blocked || new_state.upgrade)
+ {
+ return false;
+ }
+ else
+ {
+ ++new_state.shared_count;
+ new_state.upgrade=true;
+ }
+
+ state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
+ if(current_state==old_state)
+ {
+ break;
+ }
+ old_state=current_state;
+ }
+ return true;
+ }
+
+ void unlock_upgrade()
+ {
+ state_data old_state=state;
+ for(;;)
+ {
+ state_data new_state=old_state;
+ new_state.upgrade=false;
+ bool const last_reader=!--new_state.shared_count;
+
+ if(last_reader)
+ {
+ if(new_state.exclusive_waiting)
+ {
+ --new_state.exclusive_waiting;
+ new_state.exclusive_waiting_blocked=false;
+ }
+ new_state.shared_waiting=0;
+ }
+
+ state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
+ if(current_state==old_state)
+ {
+ if(last_reader)
+ {
+ release_waiters(old_state);
+ }
+ break;
+ }
+ old_state=current_state;
+ }
+ }
+
+ void unlock_upgrade_and_lock()
+ {
+ state_data old_state=state;
+ for(;;)
+ {
+ state_data new_state=old_state;
+ bool const last_reader=!--new_state.shared_count;
+
+ if(last_reader)
+ {
+ new_state.upgrade=false;
+ new_state.exclusive=true;
+ }
+
+ state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
+ if(current_state==old_state)
+ {
+ if(!last_reader)
+ {
+ BOOST_VERIFY(!detail::win32::WaitForSingleObject(upgrade_sem,detail::win32::infinite));
+ }
+ break;
+ }
+ old_state=current_state;
+ }
+ }
+
+ void unlock_and_lock_upgrade()
+ {
+ state_data old_state=state;
+ for(;;)
+ {
+ state_data new_state=old_state;
+ new_state.exclusive=false;
+ new_state.upgrade=true;
+ ++new_state.shared_count;
+ if(new_state.exclusive_waiting)
+ {
+ --new_state.exclusive_waiting;
+ new_state.exclusive_waiting_blocked=false;
+ }
+ new_state.shared_waiting=0;
+
+ state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
+ if(current_state==old_state)
+ {
+ break;
+ }
+ old_state=current_state;
+ }
+ release_waiters(old_state);
+ }
+
+ void unlock_and_lock_shared()
+ {
+ state_data old_state=state;
+ for(;;)
+ {
+ state_data new_state=old_state;
+ new_state.exclusive=false;
+ ++new_state.shared_count;
+ if(new_state.exclusive_waiting)
+ {
+ --new_state.exclusive_waiting;
+ new_state.exclusive_waiting_blocked=false;
+ }
+ new_state.shared_waiting=0;
+
+ state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
+ if(current_state==old_state)
+ {
+ break;
+ }
+ old_state=current_state;
+ }
+ release_waiters(old_state);
+ }
+
+ void unlock_upgrade_and_lock_shared()
+ {
+ state_data old_state=state;
+ for(;;)
+ {
+ state_data new_state=old_state;
+ new_state.upgrade=false;
+ if(new_state.exclusive_waiting)
+ {
+ --new_state.exclusive_waiting;
+ new_state.exclusive_waiting_blocked=false;
+ }
+ new_state.shared_waiting=0;
+
+ state_data const current_state=interlocked_compare_exchange(&state,new_state,old_state);
+ if(current_state==old_state)
+ {
+ break;
+ }
+ old_state=current_state;
+ }
+ release_waiters(old_state);
+ }
+
+ };
+}
+
+#include <boost/config/abi_suffix.hpp>
+
+#endif
diff --git a/src/mongo/util/concurrency/spin_lock.cpp b/src/mongo/util/concurrency/spin_lock.cpp
new file mode 100644
index 00000000000..cbf517b2746
--- /dev/null
+++ b/src/mongo/util/concurrency/spin_lock.cpp
@@ -0,0 +1,107 @@
+// spin_lock.cpp
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h" // todo eliminate this include
+#include <time.h>
+#include "spin_lock.h"
+
+namespace mongo {
+
+ SpinLock::~SpinLock() {
+#if defined(_WIN32)
+ DeleteCriticalSection(&_cs);
+#elif defined(__USE_XOPEN2K)
+ pthread_spin_destroy(&_lock);
+#endif
+ }
+
+ SpinLock::SpinLock()
+#if defined(_WIN32)
+ { InitializeCriticalSectionAndSpinCount(&_cs, 4000); }
+#elif defined(__USE_XOPEN2K)
+ { pthread_spin_init( &_lock , 0 ); }
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ : _locked( false ) { }
+#else
+ : _mutex( "SpinLock" ) { }
+#endif
+
+#if defined(__USE_XOPEN2K)
+ NOINLINE_DECL void SpinLock::_lk() {
+ /**
+ * this is designed to perform close to the default spin lock
+ * the reason for the mild insanity is to prevent horrible performance
+ * when contention spikes
+ * it allows spinlocks to be used in many more places
+ * which is good because even with this change they are about 8x faster on linux
+ */
+
+ for ( int i=0; i<1000; i++ ) {
+ if ( pthread_spin_trylock( &_lock ) == 0 )
+ return;
+ asm volatile ( "pause" ) ; // maybe trylock does this; just in case.
+ }
+
+ for ( int i=0; i<1000; i++ ) {
+ if ( pthread_spin_trylock( &_lock ) == 0 )
+ return;
+ pthread_yield();
+ }
+
+ struct timespec t;
+ t.tv_sec = 0;
+ t.tv_nsec = 5000000;
+
+ while ( pthread_spin_trylock( &_lock ) != 0 ) {
+ nanosleep(&t, NULL);
+ }
+ }
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ void SpinLock::lock() {
+
+ // fast path
+ if (!_locked && !__sync_lock_test_and_set(&_locked, true)) {
+ return;
+ }
+
+ // wait for lock
+ int wait = 1000;
+ while ((wait-- > 0) && (_locked)) {
+ asm volatile ( "pause" ) ;
+ }
+
+ // if failed to grab lock, sleep
+ struct timespec t;
+ t.tv_sec = 0;
+ t.tv_nsec = 5000000;
+ while (__sync_lock_test_and_set(&_locked, true)) {
+ nanosleep(&t, NULL);
+ }
+ }
+#endif
+
+ bool SpinLock::isfast() {
+#if defined(_WIN32) || defined(__USE_XOPEN2K) || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ return true;
+#else
+ return false;
+#endif
+ }
+
+
+} // namespace mongo
diff --git a/src/mongo/util/concurrency/spin_lock.h b/src/mongo/util/concurrency/spin_lock.h
new file mode 100644
index 00000000000..d90de51afac
--- /dev/null
+++ b/src/mongo/util/concurrency/spin_lock.h
@@ -0,0 +1,77 @@
+// spin_lock.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "mutex.h"
+
+namespace mongo {
+
+ /**
+ * The spinlock currently requires late GCC support routines to be efficient.
+ * Other platforms default to a mutex implemenation.
+ */
+ class SpinLock : boost::noncopyable {
+ public:
+ SpinLock();
+ ~SpinLock();
+
+ static bool isfast(); // true if a real spinlock on this platform
+
+ private:
+#if defined(_WIN32)
+ CRITICAL_SECTION _cs;
+ public:
+ void lock() {EnterCriticalSection(&_cs); }
+ void unlock() { LeaveCriticalSection(&_cs); }
+#elif defined(__USE_XOPEN2K)
+ pthread_spinlock_t _lock;
+ void _lk();
+ public:
+ void unlock() { pthread_spin_unlock(&_lock); }
+ void lock() {
+ if ( pthread_spin_trylock( &_lock ) == 0 )
+ return;
+ _lk();
+ }
+#elif defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
+ volatile bool _locked;
+ public:
+ void unlock() {__sync_lock_release(&_locked); }
+ void lock();
+#else
+ // default to a mutex if not implemented
+ SimpleMutex _mutex;
+ public:
+ void unlock() { _mutex.unlock(); }
+ void lock() { _mutex.lock(); }
+#endif
+ };
+
+ class scoped_spinlock : boost::noncopyable {
+ public:
+ scoped_spinlock( SpinLock& l ) : _l(l) {
+ _l.lock();
+ }
+ ~scoped_spinlock() {
+ _l.unlock();}
+ private:
+ SpinLock& _l;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/util/concurrency/synchronization.cpp b/src/mongo/util/concurrency/synchronization.cpp
new file mode 100644
index 00000000000..4186745dc16
--- /dev/null
+++ b/src/mongo/util/concurrency/synchronization.cpp
@@ -0,0 +1,81 @@
+// synchronization.cpp
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "synchronization.h"
+
+namespace mongo {
+
+ Notification::Notification() : _mutex ( "Notification" ){
+ lookFor = 1;
+ cur = 0;
+ }
+
+ Notification::~Notification() { }
+
+ void Notification::waitToBeNotified() {
+ scoped_lock lock( _mutex );
+ while ( lookFor != cur )
+ _condition.wait( lock.boost() );
+ lookFor++;
+ }
+
+ void Notification::notifyOne() {
+ scoped_lock lock( _mutex );
+ assert( cur != lookFor );
+ cur++;
+ _condition.notify_one();
+ }
+
+ /* --- NotifyAll --- */
+
+ NotifyAll::NotifyAll() : _mutex("NotifyAll") {
+ _lastDone = 0;
+ _lastReturned = 0;
+ _nWaiting = 0;
+ }
+
+ NotifyAll::When NotifyAll::now() {
+ scoped_lock lock( _mutex );
+ return ++_lastReturned;
+ }
+
+ void NotifyAll::waitFor(When e) {
+ scoped_lock lock( _mutex );
+ ++_nWaiting;
+ while( _lastDone < e ) {
+ _condition.wait( lock.boost() );
+ }
+ }
+
+ void NotifyAll::awaitBeyondNow() {
+ scoped_lock lock( _mutex );
+ ++_nWaiting;
+ When e = ++_lastReturned;
+ while( _lastDone <= e ) {
+ _condition.wait( lock.boost() );
+ }
+ }
+
+ void NotifyAll::notifyAll(When e) {
+ scoped_lock lock( _mutex );
+ _lastDone = e;
+ _nWaiting = 0;
+ _condition.notify_all();
+ }
+
+} // namespace mongo
diff --git a/src/mongo/util/concurrency/synchronization.h b/src/mongo/util/concurrency/synchronization.h
new file mode 100644
index 00000000000..f9a40cc3ab9
--- /dev/null
+++ b/src/mongo/util/concurrency/synchronization.h
@@ -0,0 +1,86 @@
+// synchronization.h
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <boost/thread/condition.hpp>
+#include "mutex.h"
+
+namespace mongo {
+
+ /*
+ * A class to establish a synchronization point between two threads. One thread is the waiter and one is
+ * the notifier. After the notification event, both proceed normally.
+ *
+ * This class is thread-safe.
+ */
+ class Notification {
+ public:
+ Notification();
+ ~Notification();
+
+ /*
+ * Blocks until the method 'notifyOne()' is called.
+ */
+ void waitToBeNotified();
+
+ /*
+ * Notifies the waiter of '*this' that it can proceed. Can only be called once.
+ */
+ void notifyOne();
+
+ private:
+ mongo::mutex _mutex; // protects state below
+ unsigned long long lookFor;
+ unsigned long long cur;
+ boost::condition _condition; // cond over _notified being true
+ };
+
+ /** establishes a synchronization point between threads. N threads are waits and one is notifier.
+ threadsafe.
+ */
+ class NotifyAll : boost::noncopyable {
+ public:
+ NotifyAll();
+
+ typedef unsigned long long When;
+
+ When now();
+
+ /** awaits the next notifyAll() call by another thread. notifications that precede this
+ call are ignored -- we are looking for a fresh event.
+ */
+ void waitFor(When);
+
+ /** a bit faster than waitFor( now() ) */
+ void awaitBeyondNow();
+
+ /** may be called multiple times. notifies all waiters */
+ void notifyAll(When);
+
+ /** indicates how many threads are waiting for a notify. */
+ unsigned nWaiting() const { return _nWaiting; }
+
+ private:
+ mongo::mutex _mutex;
+ boost::condition _condition;
+ When _lastDone;
+ When _lastReturned;
+ unsigned _nWaiting;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/util/concurrency/task.cpp b/src/mongo/util/concurrency/task.cpp
new file mode 100644
index 00000000000..0b6ab166f19
--- /dev/null
+++ b/src/mongo/util/concurrency/task.cpp
@@ -0,0 +1,181 @@
+// @file task.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,b
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include <boost/thread/condition.hpp>
+
+#include "task.h"
+#include "../goodies.h"
+#include "../unittest.h"
+#include "../time_support.h"
+
+namespace mongo {
+
+ namespace task {
+
+ /*void foo() {
+ boost::mutex m;
+ boost::mutex::scoped_lock lk(m);
+ boost::condition cond;
+ cond.wait(lk);
+ cond.notify_one();
+ }*/
+
+ Task::Task()
+ : BackgroundJob( true /* deleteSelf */ ) {
+ n = 0;
+ repeat = 0;
+ }
+
+ void Task::halt() { repeat = 0; }
+
+ void Task::run() {
+ assert( n == 0 );
+ while( 1 ) {
+ n++;
+ try {
+ doWork();
+ }
+ catch(...) { }
+ if( repeat == 0 )
+ break;
+ sleepmillis(repeat);
+ if( inShutdown() )
+ break;
+ }
+ }
+
+ void Task::begin() {
+ go();
+ }
+
+ void fork(Task *t) {
+ t->begin();
+ }
+
+ void repeat(Task *t, unsigned millis) {
+ t->repeat = millis;
+ t->begin();
+ }
+
+ }
+}
+
+#include "msg.h"
+
+/* task::Server */
+
+namespace mongo {
+ namespace task {
+
+ /* to get back a return value */
+ struct Ret {
+ Ret() : done(false),m("Ret") { }
+ bool done;
+ mongo::mutex m;
+ boost::condition c;
+ const lam *msg;
+ void f() {
+ (*msg)();
+ done = true;
+ c.notify_one();
+ }
+ };
+
+ void Server::call( const lam& msg ) {
+ Ret r;
+ r.msg = &msg;
+ lam f = boost::bind(&Ret::f, &r);
+ send(f);
+ {
+ scoped_lock lk(r.m);
+ while( !r.done )
+ r.c.wait(lk.boost());
+ }
+ }
+
+ void Server::send( lam msg ) {
+ {
+ scoped_lock lk(m);
+ d.push_back(msg);
+ wassert( d.size() < 1024 );
+ }
+ c.notify_one();
+ }
+
+ void Server::doWork() {
+ starting();
+ while( 1 ) {
+ lam f;
+ try {
+ scoped_lock lk(m);
+ while( d.empty() )
+ c.wait(lk.boost());
+ f = d.front();
+ d.pop_front();
+ }
+ catch(...) {
+ log() << "ERROR exception in Server:doWork?" << endl;
+ }
+ try {
+ f();
+ if( rq ) {
+ rq = false;
+ {
+ scoped_lock lk(m);
+ d.push_back(f);
+ }
+ }
+ }
+ catch(std::exception& e) {
+ log() << "Server::doWork task:" << name() << " exception:" << e.what() << endl;
+ }
+ catch(const char *p) {
+ log() << "Server::doWork task:" << name() << " unknown c exception:" <<
+ ((p&&strlen(p)<800)?p:"?") << endl;
+ }
+ catch(...) {
+ log() << "Server::doWork unknown exception task:" << name() << endl;
+ }
+ }
+ }
+
+ static Server *s;
+ static void abc(int i) {
+ cout << "Hello " << i << endl;
+ s->requeue();
+ }
+ class TaskUnitTest : public mongo::UnitTest {
+ public:
+ virtual void run() {
+ lam f = boost::bind(abc, 3);
+ //f();
+
+ s = new Server("unittest");
+ fork(s);
+ s->send(f);
+
+ sleepsecs(30);
+ cout <<" done" << endl;
+
+ }
+ }; // not running. taskunittest;
+
+ }
+}
diff --git a/src/mongo/util/concurrency/task.h b/src/mongo/util/concurrency/task.h
new file mode 100644
index 00000000000..d7b45eeef24
--- /dev/null
+++ b/src/mongo/util/concurrency/task.h
@@ -0,0 +1,72 @@
+// @file task.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,b
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../background.h"
+
+namespace mongo {
+
+ namespace task {
+
+ /** abstraction around threads. simpler than BackgroundJob which is used behind the scenes.
+ allocate the Task dynamically. when the thread terminates, the Task object will delete itself.
+ */
+ class Task : private BackgroundJob {
+ protected:
+ virtual void doWork() = 0; // implement the task here.
+ virtual string name() const = 0; // name the threada
+ public:
+ Task();
+
+ /** for a repeating task, stop after current invocation ends. can be called by other threads
+ as long as the Task is still in scope.
+ */
+ void halt();
+ private:
+ unsigned n, repeat;
+ friend void fork(Task* t);
+ friend void repeat(Task* t, unsigned millis);
+ virtual void run();
+ //virtual void ending() { }
+ void begin();
+ };
+
+ /** run once */
+ void fork(Task *t);
+
+ /** run doWork() over and over, with a pause between runs of millis */
+ void repeat(Task *t, unsigned millis);
+
+ /*** Example ***
+ inline void sample() {
+ class Sample : public Task {
+ public:
+ int result;
+ virtual void doWork() { result = 1234; }
+ Sample() : result(0) { }
+ };
+ shared_ptr<Sample> q( new Sample() );
+ fork(q);
+ cout << q->result << endl; // could print 1234 or 0.
+ }
+ */
+
+ }
+
+}
diff --git a/src/mongo/util/concurrency/thread_pool.cpp b/src/mongo/util/concurrency/thread_pool.cpp
new file mode 100644
index 00000000000..1c258847cb5
--- /dev/null
+++ b/src/mongo/util/concurrency/thread_pool.cpp
@@ -0,0 +1,141 @@
+/* threadpool.cpp
+*/
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "thread_pool.h"
+#include "mvar.h"
+
+namespace mongo {
+ namespace threadpool {
+
+ // Worker thread
+ class Worker : boost::noncopyable {
+ public:
+ explicit Worker(ThreadPool& owner)
+ : _owner(owner)
+ , _is_done(true)
+ , _thread(boost::bind(&Worker::loop, this))
+ {}
+
+ // destructor will block until current operation is completed
+ // Acts as a "join" on this thread
+ ~Worker() {
+ _task.put(Task());
+ _thread.join();
+ }
+
+ void set_task(Task& func) {
+ assert(!func.empty());
+ assert(_is_done);
+ _is_done = false;
+
+ _task.put(func);
+ }
+
+ private:
+ ThreadPool& _owner;
+ MVar<Task> _task;
+ bool _is_done; // only used for error detection
+ boost::thread _thread;
+
+ void loop() {
+ while (true) {
+ Task task = _task.take();
+ if (task.empty())
+ break; // ends the thread
+
+ try {
+ task();
+ }
+ catch (std::exception e) {
+ log() << "Unhandled exception in worker thread: " << e.what() << endl;;
+ }
+ catch (...) {
+ log() << "Unhandled non-exception in worker thread" << endl;
+ }
+ _is_done = true;
+ _owner.task_done(this);
+ }
+ }
+ };
+
+ ThreadPool::ThreadPool(int nThreads)
+ : _mutex("ThreadPool"), _tasksRemaining(0)
+ , _nThreads(nThreads) {
+ scoped_lock lock(_mutex);
+ while (nThreads-- > 0) {
+ Worker* worker = new Worker(*this);
+ _freeWorkers.push_front(worker);
+ }
+ }
+
+ ThreadPool::~ThreadPool() {
+ join();
+
+ assert(_tasks.empty());
+
+ // O(n) but n should be small
+ assert(_freeWorkers.size() == (unsigned)_nThreads);
+
+ while(!_freeWorkers.empty()) {
+ delete _freeWorkers.front();
+ _freeWorkers.pop_front();
+ }
+ }
+
+ void ThreadPool::join() {
+ scoped_lock lock(_mutex);
+ while(_tasksRemaining) {
+ _condition.wait(lock.boost());
+ }
+ }
+
+ void ThreadPool::schedule(Task task) {
+ scoped_lock lock(_mutex);
+
+ _tasksRemaining++;
+
+ if (!_freeWorkers.empty()) {
+ _freeWorkers.front()->set_task(task);
+ _freeWorkers.pop_front();
+ }
+ else {
+ _tasks.push_back(task);
+ }
+ }
+
+ // should only be called by a worker from the worker thread
+ void ThreadPool::task_done(Worker* worker) {
+ scoped_lock lock(_mutex);
+
+ if (!_tasks.empty()) {
+ worker->set_task(_tasks.front());
+ _tasks.pop_front();
+ }
+ else {
+ _freeWorkers.push_front(worker);
+ }
+
+ _tasksRemaining--;
+
+ if(_tasksRemaining == 0)
+ _condition.notify_all();
+ }
+
+ } //namespace threadpool
+} //namespace mongo
diff --git a/src/mongo/util/concurrency/thread_pool.h b/src/mongo/util/concurrency/thread_pool.h
new file mode 100644
index 00000000000..b348ed1d01b
--- /dev/null
+++ b/src/mongo/util/concurrency/thread_pool.h
@@ -0,0 +1,82 @@
+// thread_pool.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <boost/function.hpp>
+#include <boost/bind.hpp>
+#undef assert
+#define assert MONGO_assert
+
+namespace mongo {
+
+ namespace threadpool {
+ class Worker;
+
+ typedef boost::function<void(void)> Task; //nullary function or functor
+
+ // exported to the mongo namespace
+ class ThreadPool : boost::noncopyable {
+ public:
+ explicit ThreadPool(int nThreads=8);
+
+ // blocks until all tasks are complete (tasks_remaining() == 0)
+ // You should not call schedule while in the destructor
+ ~ThreadPool();
+
+ // blocks until all tasks are complete (tasks_remaining() == 0)
+ // does not prevent new tasks from being scheduled so could wait forever.
+ // Also, new tasks could be scheduled after this returns.
+ void join();
+
+ // task will be copied a few times so make sure it's relatively cheap
+ void schedule(Task task);
+
+ // Helpers that wrap schedule and boost::bind.
+ // Functor and args will be copied a few times so make sure it's relatively cheap
+ template<typename F, typename A>
+ void schedule(F f, A a) { schedule(boost::bind(f,a)); }
+ template<typename F, typename A, typename B>
+ void schedule(F f, A a, B b) { schedule(boost::bind(f,a,b)); }
+ template<typename F, typename A, typename B, typename C>
+ void schedule(F f, A a, B b, C c) { schedule(boost::bind(f,a,b,c)); }
+ template<typename F, typename A, typename B, typename C, typename D>
+ void schedule(F f, A a, B b, C c, D d) { schedule(boost::bind(f,a,b,c,d)); }
+ template<typename F, typename A, typename B, typename C, typename D, typename E>
+ void schedule(F f, A a, B b, C c, D d, E e) { schedule(boost::bind(f,a,b,c,d,e)); }
+
+ int tasks_remaining() { return _tasksRemaining; }
+
+ private:
+ mongo::mutex _mutex;
+ boost::condition _condition;
+
+ list<Worker*> _freeWorkers; //used as LIFO stack (always front)
+ list<Task> _tasks; //used as FIFO queue (push_back, pop_front)
+ int _tasksRemaining; // in queue + currently processing
+ int _nThreads; // only used for sanity checking. could be removed in the future.
+
+ // should only be called by a worker from the worker's thread
+ void task_done(Worker* worker);
+ friend class Worker;
+ };
+
+ } //namespace threadpool
+
+ using threadpool::ThreadPool;
+
+} //namespace mongo
diff --git a/src/mongo/util/concurrency/threadlocal.h b/src/mongo/util/concurrency/threadlocal.h
new file mode 100644
index 00000000000..57a4f799dfa
--- /dev/null
+++ b/src/mongo/util/concurrency/threadlocal.h
@@ -0,0 +1,126 @@
+#pragma once
+
+/**
+* Copyright (C) 2011 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include <boost/thread/tss.hpp>
+
+namespace mongo {
+
+ using boost::thread_specific_ptr;
+
+ /* thread local "value" rather than a pointer
+ good for things which have copy constructors (and the copy constructor is fast enough)
+ e.g.
+ ThreadLocalValue<int> myint;
+ */
+ template<class T>
+ class ThreadLocalValue {
+ public:
+ ThreadLocalValue( T def = 0 ) : _default( def ) { }
+
+ T get() const {
+ T * val = _val.get();
+ if ( val )
+ return *val;
+ return _default;
+ }
+
+ void set( const T& i ) {
+ T *v = _val.get();
+ if( v ) {
+ *v = i;
+ return;
+ }
+ v = new T(i);
+ _val.reset( v );
+ }
+
+ T& getRef() {
+ T *v = _val.get();
+ if( v ) {
+ return *v;
+ }
+ v = new T(_default);
+ _val.reset( v );
+ return *v;
+ }
+
+ private:
+ boost::thread_specific_ptr<T> _val;
+ const T _default;
+ };
+
+ /* TSP
+ These macros use intrinsics which are faster than boost::thread_specific_ptr.
+ However the intrinsics don't free up objects on thread closure. Thus we use
+ a combination here, with the assumption that reset's are infrequent, so that
+ get's are fast.
+ */
+#if defined(_WIN32) || (defined(__GNUC__) && defined(__linux__))
+
+ template< class T >
+ struct TSP {
+ boost::thread_specific_ptr<T> tsp;
+ public:
+ T* get() const;
+ void reset(T* v);
+ };
+
+# if defined(_WIN32)
+
+# define TSP_DECLARE(T,p) extern TSP<T> p;
+
+# define TSP_DEFINE(T,p) __declspec( thread ) T* _ ## p; \
+ TSP<T> p; \
+ template<> T* TSP<T>::get() const { return _ ## p; } \
+ void TSP<T>::reset(T* v) { \
+ tsp.reset(v); \
+ _ ## p = v; \
+ }
+# else
+
+# define TSP_DECLARE(T,p) \
+ extern __thread T* _ ## p; \
+ template<> inline T* TSP<T>::get() const { return _ ## p; } \
+ extern TSP<T> p;
+
+# define TSP_DEFINE(T,p) \
+ __thread T* _ ## p; \
+ template<> void TSP<T>::reset(T* v) { \
+ tsp.reset(v); \
+ _ ## p = v; \
+ } \
+ TSP<T> p;
+# endif
+
+#else
+
+ template< class T >
+ struct TSP {
+ thread_specific_ptr<T> tsp;
+ public:
+ T* get() const { return tsp.get(); }
+ void reset(T* v) { tsp.reset(v); }
+ };
+
+# define TSP_DECLARE(T,p) extern TSP<T> p;
+
+# define TSP_DEFINE(T,p) TSP<T> p;
+
+#endif
+
+}
diff --git a/src/mongo/util/concurrency/value.h b/src/mongo/util/concurrency/value.h
new file mode 100644
index 00000000000..fdd0d9bbb42
--- /dev/null
+++ b/src/mongo/util/concurrency/value.h
@@ -0,0 +1,139 @@
+/* @file value.h
+ concurrency helpers DiagStr, Guarded
+*/
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,b
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "spin_lock.h"
+
+namespace mongo {
+
+ /** declare that a variable that is "guarded" by a mutex.
+
+ The decl documents the rule. For example "counta and countb are guarded by xyzMutex":
+
+ Guarded<int, xyzMutex> counta;
+ Guarded<int, xyzMutex> countb;
+
+ Upon use, specify the scoped_lock object. This makes it hard for someone
+ later to forget to be in the lock. Check is made that it is the right lock in _DEBUG
+ builds at runtime.
+ */
+ template <typename T, SimpleMutex& BY>
+ class Guarded {
+ T _val;
+ public:
+ T& ref(const SimpleMutex::scoped_lock& lk) {
+ dassert( &lk.m() == &BY );
+ return _val;
+ }
+ };
+
+ // todo: rename this to ThreadSafeString or something
+ /** there is now one mutex per DiagStr. If you have hundreds or millions of
+ DiagStrs you'll need to do something different.
+ */
+ class DiagStr {
+ mutable SpinLock m;
+ string _s;
+ public:
+ DiagStr(const DiagStr& r) : _s(r.get()) { }
+ DiagStr(const string& r) : _s(r) { }
+ DiagStr() { }
+ bool empty() const {
+ scoped_spinlock lk(m);
+ return _s.empty();
+ }
+ string get() const {
+ scoped_spinlock lk(m);
+ return _s;
+ }
+ void set(const char *s) {
+ scoped_spinlock lk(m);
+ _s = s;
+ }
+ void set(const string& s) {
+ scoped_spinlock lk(m);
+ _s = s;
+ }
+ operator string() const { return get(); }
+ void operator=(const string& s) { set(s); }
+ void operator=(const DiagStr& rhs) {
+ set( rhs.get() );
+ }
+
+ // == is not defined. use get() == ... instead. done this way so one thinks about if composing multiple operations
+ bool operator==(const string& s) const;
+ };
+
+ /** Thread safe map.
+ Be careful not to use this too much or it could make things slow;
+ if not a hot code path no problem.
+
+ Examples:
+
+ mapsf<int,int> mp;
+
+ int x = mp.get();
+
+ map<int,int> two;
+ mp.swap(two);
+
+ {
+ mapsf<int,int>::ref r(mp);
+ r[9] = 1;
+ map<int,int>::iterator i = r.r.begin();
+ }
+
+ */
+ template< class K, class V >
+ struct mapsf : boost::noncopyable {
+ SimpleMutex m;
+ map<K,V> val;
+ friend struct ref;
+ public:
+ mapsf() : m("mapsf") { }
+ void swap(map<K,V>& rhs) {
+ SimpleMutex::scoped_lock lk(m);
+ val.swap(rhs);
+ }
+ bool empty() {
+ SimpleMutex::scoped_lock lk(m);
+ return val.empty();
+ }
+ // safe as we pass by value:
+ V get(K k) {
+ SimpleMutex::scoped_lock lk(m);
+ typename map<K,V>::iterator i = val.find(k);
+ if( i == val.end() )
+ return V();
+ return i->second;
+ }
+ // think about deadlocks when using ref. the other methods
+ // above will always be safe as they are "leaf" operations.
+ struct ref {
+ SimpleMutex::scoped_lock lk;
+ public:
+ map<K,V> &r;
+ ref(mapsf<K,V> &m) : lk(m.m), r(m.val) { }
+ V& operator[](const K& k) { return r[k]; }
+ };
+ };
+
+}
diff --git a/src/mongo/util/concurrency/vars.cpp b/src/mongo/util/concurrency/vars.cpp
new file mode 100644
index 00000000000..0b2fc960c04
--- /dev/null
+++ b/src/mongo/util/concurrency/vars.cpp
@@ -0,0 +1,56 @@
+// vars.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,b
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "mutex.h"
+#include "value.h"
+
+namespace mongo {
+
+#if defined(_DEBUG)
+
+ // intentional leak. otherwise destructor orders can be problematic at termination.
+ MutexDebugger &mutexDebugger = *(new MutexDebugger());
+
+ MutexDebugger::MutexDebugger() :
+ x( *(new boost::mutex()) ), magic(0x12345678) {
+ // optional way to debug lock order
+ /*
+ a = "a_lock";
+ b = "b_lock";
+ */
+ }
+
+ void MutexDebugger::programEnding() {
+ if( logLevel>=1 && followers.size() ) {
+ std::cout << followers.size() << " mutexes in program" << endl;
+ for( map< mid, set<mid> >::iterator i = followers.begin(); i != followers.end(); i++ ) {
+ cout << i->first;
+ if( maxNest[i->first] > 1 )
+ cout << " maxNest:" << maxNest[i->first];
+ cout << '\n';
+ for( set<mid>::iterator j = i->second.begin(); j != i->second.end(); j++ )
+ cout << " " << *j << '\n';
+ }
+ cout.flush();
+ }
+ }
+
+#endif
+
+}
diff --git a/src/mongo/util/debug_util.cpp b/src/mongo/util/debug_util.cpp
new file mode 100644
index 00000000000..8ba6534ef7c
--- /dev/null
+++ b/src/mongo/util/debug_util.cpp
@@ -0,0 +1,60 @@
+// debug_util.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "../db/cmdline.h"
+#include "../db/jsobj.h"
+
+namespace mongo {
+
+#if defined(USE_GDBSERVER)
+ /* Magic gdb trampoline
+ * Do not call directly! call setupSIGTRAPforGDB()
+ * Assumptions:
+ * 1) gdbserver is on your path
+ * 2) You have run "handle SIGSTOP noprint" in gdb
+ * 3) cmdLine.port + 2000 is free
+ */
+ void launchGDB(int) {
+ // Don't come back here
+ signal(SIGTRAP, SIG_IGN);
+
+ int newPort = cmdLine.port + 2000;
+ string newPortStr = "localhost:" + BSONObjBuilder::numStr(newPort);
+ string pidToDebug = BSONObjBuilder::numStr(getpid());
+
+ cout << "\n\n\t**** Launching gdbserver on " << newPortStr << " ****" << endl << endl;
+ if (fork() == 0) {
+ //child
+ execlp("gdbserver", "gdbserver", "--attach", newPortStr.c_str(), pidToDebug.c_str(), NULL);
+ perror(NULL);
+ }
+ else {
+ //parent
+ raise(SIGSTOP); // pause all threads until gdb connects and continues
+ raise(SIGTRAP); // break inside gdbserver
+ }
+ }
+
+ void setupSIGTRAPforGDB() {
+ assert( signal(SIGTRAP , launchGDB ) != SIG_ERR );
+ }
+#else
+ void setupSIGTRAPforGDB() {
+ }
+#endif
+}
diff --git a/src/mongo/util/debug_util.h b/src/mongo/util/debug_util.h
new file mode 100644
index 00000000000..abed8d94924
--- /dev/null
+++ b/src/mongo/util/debug_util.h
@@ -0,0 +1,106 @@
+// debug_util.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#ifndef _WIN32
+#include <signal.h>
+#endif
+
+namespace mongo {
+
+// for debugging
+ typedef struct _Ints {
+ int i[100];
+ } *Ints;
+ typedef struct _Chars {
+ char c[200];
+ } *Chars;
+
+ typedef char CHARS[400];
+
+ typedef struct _OWS {
+ int size;
+ char type;
+ char string[400];
+ } *OWS;
+
+#if defined(_DEBUG)
+ enum {DEBUG_BUILD = 1};
+#else
+ enum {DEBUG_BUILD = 0};
+#endif
+
+#define MONGO_DEV if( DEBUG_BUILD )
+#define DEV MONGO_DEV
+
+#define MONGO_DEBUGGING if( 0 )
+#define DEBUGGING MONGO_DEBUGGING
+
+// The following declare one unique counter per enclosing function.
+// NOTE The implementation double-increments on a match, but we don't really care.
+#define MONGO_SOMETIMES( occasion, howOften ) for( static unsigned occasion = 0; ++occasion % howOften == 0; )
+#define SOMETIMES MONGO_SOMETIMES
+
+#define MONGO_OCCASIONALLY SOMETIMES( occasionally, 16 )
+#define OCCASIONALLY MONGO_OCCASIONALLY
+
+#define MONGO_RARELY SOMETIMES( rarely, 128 )
+#define RARELY MONGO_RARELY
+
+#define MONGO_ONCE for( static bool undone = true; undone; undone = false )
+#define ONCE MONGO_ONCE
+
+#if defined(_WIN32)
+ inline int strcasecmp(const char* s1, const char* s2) {return _stricmp(s1, s2);}
+#endif
+
+ // Sets SIGTRAP handler to launch GDB
+ // Noop unless on *NIX and compiled with _DEBUG
+ void setupSIGTRAPforGDB();
+
+ extern int tlogLevel;
+
+ inline void breakpoint() {
+ if ( tlogLevel < 0 )
+ return;
+#ifdef _WIN32
+ //DEV DebugBreak();
+#endif
+#ifndef _WIN32
+ // code to raise a breakpoint in GDB
+ ONCE {
+ //prevent SIGTRAP from crashing the program if default action is specified and we are not in gdb
+ struct sigaction current;
+ sigaction(SIGTRAP, NULL, &current);
+ if (current.sa_handler == SIG_DFL) {
+ signal(SIGTRAP, SIG_IGN);
+ }
+ }
+
+ raise(SIGTRAP);
+#endif
+ }
+
+
+ // conditional breakpoint
+ inline void breakif(bool test) {
+ if (test)
+ breakpoint();
+ }
+
+} // namespace mongo
diff --git a/src/mongo/util/embedded_builder.h b/src/mongo/util/embedded_builder.h
new file mode 100644
index 00000000000..abf518e2583
--- /dev/null
+++ b/src/mongo/util/embedded_builder.h
@@ -0,0 +1,92 @@
+// embedded_builder.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongo {
+
+ // utility class for assembling hierarchical objects
+ class EmbeddedBuilder {
+ public:
+ EmbeddedBuilder( BSONObjBuilder *b ) {
+ _builders.push_back( make_pair( "", b ) );
+ }
+ // It is assumed that the calls to prepareContext will be made with the 'name'
+ // parameter in lex ascending order.
+ void prepareContext( string &name ) {
+ int i = 1, n = _builders.size();
+ while( i < n &&
+ name.substr( 0, _builders[ i ].first.length() ) == _builders[ i ].first &&
+ ( name[ _builders[i].first.length() ] == '.' || name[ _builders[i].first.length() ] == 0 )
+ ) {
+ name = name.substr( _builders[ i ].first.length() + 1 );
+ ++i;
+ }
+ for( int j = n - 1; j >= i; --j ) {
+ popBuilder();
+ }
+ for( string next = splitDot( name ); !next.empty(); next = splitDot( name ) ) {
+ addBuilder( next );
+ }
+ }
+ void appendAs( const BSONElement &e, string name ) {
+ if ( e.type() == Object && e.valuesize() == 5 ) { // empty object -- this way we can add to it later
+ string dummyName = name + ".foo";
+ prepareContext( dummyName );
+ return;
+ }
+ prepareContext( name );
+ back()->appendAs( e, name );
+ }
+ BufBuilder &subarrayStartAs( string name ) {
+ prepareContext( name );
+ return back()->subarrayStart( name );
+ }
+ void done() {
+ while( ! _builderStorage.empty() )
+ popBuilder();
+ }
+
+ static string splitDot( string & str ) {
+ size_t pos = str.find( '.' );
+ if ( pos == string::npos )
+ return "";
+ string ret = str.substr( 0, pos );
+ str = str.substr( pos + 1 );
+ return ret;
+ }
+
+ private:
+ void addBuilder( const string &name ) {
+ shared_ptr< BSONObjBuilder > newBuilder( new BSONObjBuilder( back()->subobjStart( name ) ) );
+ _builders.push_back( make_pair( name, newBuilder.get() ) );
+ _builderStorage.push_back( newBuilder );
+ }
+ void popBuilder() {
+ back()->done();
+ _builders.pop_back();
+ _builderStorage.pop_back();
+ }
+
+ BSONObjBuilder *back() { return _builders.back().second; }
+
+ vector< pair< string, BSONObjBuilder * > > _builders;
+ vector< shared_ptr< BSONObjBuilder > > _builderStorage;
+
+ };
+
+} //namespace mongo
diff --git a/src/mongo/util/file.h b/src/mongo/util/file.h
new file mode 100644
index 00000000000..368e6927b43
--- /dev/null
+++ b/src/mongo/util/file.h
@@ -0,0 +1,230 @@
+// file.h cross platform basic file class. supports 64 bit offsets and such.
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#if !defined(_WIN32)
+#include "errno.h"
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/statvfs.h>
+#endif
+#include "text.h"
+
+namespace mongo {
+
+#ifndef __sunos__
+ typedef uint64_t fileofs;
+#else
+ typedef boost::uint64_t fileofs;
+#endif
+
+ /* NOTE: not thread-safe. (at least the windows implementation isn't. */
+
+ class FileInterface {
+ public:
+ void open(const char *fn) {}
+ void write(fileofs o, const char *data, unsigned len) {}
+ void read(fileofs o, char *data, unsigned len) {}
+ bool bad() {return false;}
+ bool is_open() {return false;}
+ fileofs len() { return 0; }
+ void fsync() { assert(false); }
+
+ // shrink file to size bytes. No-op if file already smaller.
+ void truncate(fileofs size);
+
+ /** @return -1 if error or unavailable */
+ static boost::intmax_t freeSpace(const string &path) { assert(false); return -1; }
+ };
+
+#if defined(_WIN32)
+#include <io.h>
+
+ class File : public FileInterface {
+ HANDLE fd;
+ bool _bad;
+ string _name;
+ void err(BOOL b=false) { /* false = error happened */
+ if( !b && !_bad ) {
+ _bad = true;
+ log() << "File " << _name << "I/O error " << GetLastError() << '\n';
+ }
+ }
+ public:
+ File() {
+ fd = INVALID_HANDLE_VALUE;
+ _bad = true;
+ }
+ ~File() {
+ if( is_open() ) CloseHandle(fd);
+ fd = INVALID_HANDLE_VALUE;
+ }
+ void open(const char *filename, bool readOnly=false , bool direct=false) {
+ _name = filename;
+ fd = CreateFile(
+ toNativeString(filename).c_str(),
+ ( readOnly ? 0 : GENERIC_WRITE ) | GENERIC_READ, FILE_SHARE_WRITE|FILE_SHARE_READ,
+ NULL, OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
+ if( !is_open() ) {
+ DWORD e = GetLastError();
+ log() << "Create/Open File failed " << filename << ' ' << errnoWithDescription(e) << endl;
+ }
+ else
+ _bad = false;
+ }
+ static boost::intmax_t freeSpace(const string &path) {
+ ULARGE_INTEGER avail;
+ if( GetDiskFreeSpaceEx(toNativeString(path.c_str()).c_str(), &avail, NULL, NULL) ) {
+ return avail.QuadPart;
+ }
+ DWORD e = GetLastError();
+ log() << "GetDiskFreeSpaceEx fails errno: " << e << endl;
+ return -1;
+ }
+ void write(fileofs o, const char *data, unsigned len) {
+ LARGE_INTEGER li;
+ li.QuadPart = o;
+ SetFilePointerEx(fd, li, NULL, FILE_BEGIN);
+ DWORD written;
+ err( WriteFile(fd, data, len, &written, NULL) );
+ }
+ void read(fileofs o, char *data, unsigned len) {
+ DWORD read;
+ LARGE_INTEGER li;
+ li.QuadPart = o;
+ SetFilePointerEx(fd, li, NULL, FILE_BEGIN);
+ int ok = ReadFile(fd, data, len, &read, 0);
+ if( !ok )
+ err(ok);
+ else
+ massert( 10438 , "ReadFile error - truncated file?", read == len);
+ }
+ bool bad() { return _bad; }
+ bool is_open() { return fd != INVALID_HANDLE_VALUE; }
+ fileofs len() {
+ LARGE_INTEGER li;
+ li.LowPart = GetFileSize(fd, (DWORD *) &li.HighPart);
+ if( li.HighPart == 0 && li.LowPart == INVALID_FILE_SIZE ) {
+ err( false );
+ return 0;
+ }
+ return li.QuadPart;
+ }
+ void fsync() { FlushFileBuffers(fd); }
+
+ void truncate(fileofs size) {
+ if (len() <= size)
+ return;
+
+ LARGE_INTEGER li;
+ li.QuadPart = size;
+ if (SetFilePointerEx(fd, li, NULL, FILE_BEGIN) == 0){
+ err(false);
+ return; //couldn't seek
+ }
+
+ err(SetEndOfFile(fd));
+ }
+ };
+
+#else
+
+ class File : public FileInterface {
+ public:
+ int fd;
+ private:
+ bool _bad;
+ void err(bool ok) {
+ if( !ok && !_bad ) {
+ _bad = true;
+ log() << "File I/O " << errnoWithDescription() << '\n';
+ }
+ }
+ public:
+ File() {
+ fd = -1;
+ _bad = true;
+ }
+ ~File() {
+ if( is_open() ) ::close(fd);
+ fd = -1;
+ }
+
+#ifndef O_NOATIME
+#define O_NOATIME 0
+#endif
+
+ void open(const char *filename, bool readOnly=false , bool direct=false) {
+ fd = ::open(filename,
+ O_CREAT | ( readOnly ? 0 : ( O_RDWR | O_NOATIME ) )
+#if defined(O_DIRECT)
+ | ( direct ? O_DIRECT : 0 )
+#endif
+ ,
+ S_IRUSR | S_IWUSR);
+ if ( fd <= 0 ) {
+ out() << "couldn't open " << filename << ' ' << errnoWithDescription() << endl;
+ return;
+ }
+ _bad = false;
+ }
+ void write(fileofs o, const char *data, unsigned len) {
+ err( ::pwrite(fd, data, len, o) == (int) len );
+ }
+ void read(fileofs o, char *data, unsigned len) {
+ ssize_t s = ::pread(fd, data, len, o);
+ if( s == -1 ) {
+ err(false);
+ }
+ else if( s != (int) len ) {
+ _bad = true;
+ log() << "File error read:" << s << " bytes, wanted:" << len << " ofs:" << o << endl;
+ }
+ }
+ bool bad() { return _bad; }
+ bool is_open() { return fd > 0; }
+ fileofs len() {
+ off_t o = lseek(fd, 0, SEEK_END);
+ if( o != (off_t) -1 )
+ return o;
+ err(false);
+ return 0;
+ }
+ void fsync() { ::fsync(fd); }
+ static boost::intmax_t freeSpace ( const string &path ) {
+ struct statvfs info;
+ assert( !statvfs( path.c_str() , &info ) );
+ return boost::intmax_t( info.f_bavail ) * info.f_frsize;
+ }
+
+ void truncate(fileofs size) {
+ if (len() <= size)
+ return;
+
+ err(ftruncate(fd, size) == 0);
+ }
+ };
+
+
+#endif
+
+
+}
+
diff --git a/src/mongo/util/file_allocator.cpp b/src/mongo/util/file_allocator.cpp
new file mode 100644
index 00000000000..b0572f971bd
--- /dev/null
+++ b/src/mongo/util/file_allocator.cpp
@@ -0,0 +1,329 @@
+// @file file_allocator.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include <fcntl.h>
+#include <errno.h>
+
+#if defined(__freebsd__) || defined(__openbsd__)
+#include <sys/stat.h>
+#endif
+
+#include "timer.h"
+#include "mongoutils/str.h"
+using namespace mongoutils;
+
+#ifndef O_NOATIME
+#define O_NOATIME (0)
+#endif
+
+#include "file_allocator.h"
+#include "paths.h"
+
+namespace mongo {
+
+ boost::filesystem::path ensureParentDirCreated(const boost::filesystem::path& p){
+ const boost::filesystem::path parent = p.branch_path();
+
+ if (! boost::filesystem::exists(parent)){
+ ensureParentDirCreated(parent);
+ log() << "creating directory " << parent.string() << endl;
+ boost::filesystem::create_directory(parent);
+ flushMyDirectory(parent); // flushes grandparent to ensure parent exists after crash
+ }
+
+ assert(boost::filesystem::is_directory(parent));
+ return parent;
+ }
+
+#if defined(_WIN32)
+ FileAllocator::FileAllocator() {
+ }
+
+ void FileAllocator::start() {
+ }
+
+ void FileAllocator::requestAllocation( const string &name, long &size ) {
+ /* Some of the system calls in the file allocator don't work in win,
+ so no win support - 32 or 64 bit. Plus we don't seem to need preallocation
+ on windows anyway as we don't have to pre-zero the file there.
+ */
+ }
+
+ void FileAllocator::allocateAsap( const string &name, unsigned long long &size ) {
+ // no-op
+ }
+
+ void FileAllocator::waitUntilFinished() const {
+ // no-op
+ }
+
+ void FileAllocator::ensureLength(int fd , long size) {
+ // we don't zero on windows
+ // TODO : we should to avoid fragmentation
+ }
+
+ bool FileAllocator::hasFailed() const {
+ return false;
+ }
+
+#else
+
+ FileAllocator::FileAllocator()
+ : _pendingMutex("FileAllocator"), _failed() {
+ }
+
+
+ void FileAllocator::start() {
+ boost::thread t( boost::bind( &FileAllocator::run , this ) );
+ }
+
+ void FileAllocator::requestAllocation( const string &name, long &size ) {
+ scoped_lock lk( _pendingMutex );
+ if ( _failed )
+ return;
+ long oldSize = prevSize( name );
+ if ( oldSize != -1 ) {
+ size = oldSize;
+ return;
+ }
+ _pending.push_back( name );
+ _pendingSize[ name ] = size;
+ _pendingUpdated.notify_all();
+ }
+
+ void FileAllocator::allocateAsap( const string &name, unsigned long long &size ) {
+ scoped_lock lk( _pendingMutex );
+ long oldSize = prevSize( name );
+ if ( oldSize != -1 ) {
+ size = oldSize;
+ if ( !inProgress( name ) )
+ return;
+ }
+ checkFailure();
+ _pendingSize[ name ] = size;
+ if ( _pending.size() == 0 )
+ _pending.push_back( name );
+ else if ( _pending.front() != name ) {
+ _pending.remove( name );
+ list< string >::iterator i = _pending.begin();
+ ++i;
+ _pending.insert( i, name );
+ }
+ _pendingUpdated.notify_all();
+ while( inProgress( name ) ) {
+ checkFailure();
+ _pendingUpdated.wait( lk.boost() );
+ }
+
+ }
+
+ void FileAllocator::waitUntilFinished() const {
+ if ( _failed )
+ return;
+ scoped_lock lk( _pendingMutex );
+ while( _pending.size() != 0 )
+ _pendingUpdated.wait( lk.boost() );
+ }
+
+ void FileAllocator::ensureLength(int fd , long size) {
+#if defined(__linux__)
+ int ret = posix_fallocate(fd,0,size);
+ if ( ret == 0 )
+ return;
+
+ log() << "FileAllocator: posix_fallocate failed: " << errnoWithDescription( ret ) << " falling back" << endl;
+#endif
+
+ off_t filelen = lseek(fd, 0, SEEK_END);
+ if ( filelen < size ) {
+ if (filelen != 0) {
+ stringstream ss;
+ ss << "failure creating new datafile; lseek failed for fd " << fd << " with errno: " << errnoWithDescription();
+ uassert( 10440 , ss.str(), filelen == 0 );
+ }
+ // Check for end of disk.
+
+ uassert( 10441 , str::stream() << "Unable to allocate new file of size " << size << ' ' << errnoWithDescription(),
+ size - 1 == lseek(fd, size - 1, SEEK_SET) );
+ uassert( 10442 , str::stream() << "Unable to allocate new file of size " << size << ' ' << errnoWithDescription(),
+ 1 == write(fd, "", 1) );
+ lseek(fd, 0, SEEK_SET);
+
+ const long z = 256 * 1024;
+ const boost::scoped_array<char> buf_holder (new char[z]);
+ char* buf = buf_holder.get();
+ memset(buf, 0, z);
+ long left = size;
+ while ( left > 0 ) {
+ long towrite = left;
+ if ( towrite > z )
+ towrite = z;
+
+ int written = write( fd , buf , towrite );
+ uassert( 10443 , errnoWithPrefix("FileAllocator: file write failed" ), written > 0 );
+ left -= written;
+ }
+ }
+ }
+
+ bool FileAllocator::hasFailed() const {
+ return _failed;
+ }
+
+ void FileAllocator::checkFailure() {
+ if (_failed) {
+ // we want to log the problem (diskfull.js expects it) but we do not want to dump a stack tracke
+ msgassertedNoTrace( 12520, "new file allocation failure" );
+ }
+ }
+
+ long FileAllocator::prevSize( const string &name ) const {
+ if ( _pendingSize.count( name ) > 0 )
+ return _pendingSize[ name ];
+ if ( boost::filesystem::exists( name ) )
+ return boost::filesystem::file_size( name );
+ return -1;
+ }
+
+ // caller must hold _pendingMutex lock.
+ bool FileAllocator::inProgress( const string &name ) const {
+ for( list< string >::const_iterator i = _pending.begin(); i != _pending.end(); ++i )
+ if ( *i == name )
+ return true;
+ return false;
+ }
+
+ string makeTempFileName( path root ) {
+ while( 1 ) {
+ path p = root / "_tmp";
+ stringstream ss;
+ ss << (unsigned) rand();
+ p /= ss.str();
+ string fn = p.string();
+ if( !boost::filesystem::exists(p) )
+ return fn;
+ }
+ return "";
+ }
+
+ void FileAllocator::run( FileAllocator * fa ) {
+ setThreadName( "FileAllocator" );
+ while( 1 ) {
+ {
+ scoped_lock lk( fa->_pendingMutex );
+ if ( fa->_pending.size() == 0 )
+ fa->_pendingUpdated.wait( lk.boost() );
+ }
+ while( 1 ) {
+ string name;
+ long size;
+ {
+ scoped_lock lk( fa->_pendingMutex );
+ if ( fa->_pending.size() == 0 )
+ break;
+ name = fa->_pending.front();
+ size = fa->_pendingSize[ name ];
+ }
+
+ string tmp;
+ long fd = 0;
+ try {
+ log() << "allocating new datafile " << name << ", filling with zeroes..." << endl;
+
+ boost::filesystem::path parent = ensureParentDirCreated(name);
+ tmp = makeTempFileName( parent );
+ ensureParentDirCreated(tmp);
+
+ fd = open(tmp.c_str(), O_CREAT | O_RDWR | O_NOATIME, S_IRUSR | S_IWUSR);
+ if ( fd <= 0 ) {
+ log() << "FileAllocator: couldn't create " << name << " (" << tmp << ") " << errnoWithDescription() << endl;
+ uasserted(10439, "");
+ }
+
+#if defined(POSIX_FADV_DONTNEED)
+ if( posix_fadvise(fd, 0, size, POSIX_FADV_DONTNEED) ) {
+ log() << "warning: posix_fadvise fails " << name << " (" << tmp << ") " << errnoWithDescription() << endl;
+ }
+#endif
+
+ Timer t;
+
+ /* make sure the file is the full desired length */
+ ensureLength( fd , size );
+
+ close( fd );
+ fd = 0;
+
+ if( rename(tmp.c_str(), name.c_str()) ) {
+ log() << "error: couldn't rename " << tmp << " to " << name << ' ' << errnoWithDescription() << endl;
+ uasserted(13653, "");
+ }
+ flushMyDirectory(name);
+
+ log() << "done allocating datafile " << name << ", "
+ << "size: " << size/1024/1024 << "MB, "
+ << " took " << ((double)t.millis())/1000.0 << " secs"
+ << endl;
+
+ // no longer in a failed state. allow new writers.
+ fa->_failed = false;
+ }
+ catch ( ... ) {
+ if ( fd > 0 )
+ close( fd );
+ log() << "error failed to allocate new file: " << name
+ << " size: " << size << ' ' << errnoWithDescription() << warnings;
+ log() << " will try again in 10 seconds" << endl; // not going to warning logs
+ try {
+ if ( tmp.size() )
+ BOOST_CHECK_EXCEPTION( boost::filesystem::remove( tmp ) );
+ BOOST_CHECK_EXCEPTION( boost::filesystem::remove( name ) );
+ }
+ catch ( ... ) {
+ }
+ scoped_lock lk( fa->_pendingMutex );
+ fa->_failed = true;
+ // not erasing from pending
+ fa->_pendingUpdated.notify_all();
+
+
+ sleepsecs(10);
+ continue;
+ }
+
+ {
+ scoped_lock lk( fa->_pendingMutex );
+ fa->_pendingSize.erase( name );
+ fa->_pending.pop_front();
+ fa->_pendingUpdated.notify_all();
+ }
+ }
+ }
+ }
+
+#endif
+
+ FileAllocator* FileAllocator::_instance = 0;
+
+ FileAllocator* FileAllocator::get(){
+ if ( ! _instance )
+ _instance = new FileAllocator();
+ return _instance;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/util/file_allocator.h b/src/mongo/util/file_allocator.h
new file mode 100644
index 00000000000..7c3cacb2888
--- /dev/null
+++ b/src/mongo/util/file_allocator.h
@@ -0,0 +1,91 @@
+// @file file_allocator.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "../pch.h"
+
+namespace mongo {
+
+ /*
+ * Handles allocation of contiguous files on disk. Allocation may be
+ * requested asynchronously or synchronously.
+ * singleton
+ */
+ class FileAllocator : boost::noncopyable {
+ /*
+ * The public functions may not be called concurrently. The allocation
+ * functions may be called multiple times per file, but only the first
+ * size specified per file will be used.
+ */
+ public:
+ void start();
+
+ /**
+ * May be called if file exists. If file exists, or its allocation has
+ * been requested, size is updated to match existing file size.
+ */
+ void requestAllocation( const string &name, long &size );
+
+
+ /**
+ * Returns when file has been allocated. If file exists, size is
+ * updated to match existing file size.
+ */
+ void allocateAsap( const string &name, unsigned long long &size );
+
+ void waitUntilFinished() const;
+
+ bool hasFailed() const;
+
+ static void ensureLength(int fd , long size);
+
+ /** @return the singletone */
+ static FileAllocator * get();
+
+ private:
+
+ FileAllocator();
+
+#if !defined(_WIN32)
+ void checkFailure();
+
+ // caller must hold pendingMutex_ lock. Returns size if allocated or
+ // allocation requested, -1 otherwise.
+ long prevSize( const string &name ) const;
+
+ // caller must hold pendingMutex_ lock.
+ bool inProgress( const string &name ) const;
+
+ /** called from the worked thread */
+ static void run( FileAllocator * fa );
+
+ mutable mongo::mutex _pendingMutex;
+ mutable boost::condition _pendingUpdated;
+
+ list< string > _pending;
+ mutable map< string, long > _pendingSize;
+
+ bool _failed;
+#endif
+
+ static FileAllocator* _instance;
+
+ };
+
+ /** like "mkdir -p" but on parent dir of p rather than p itself */
+ boost::filesystem::path ensureParentDirCreated(const boost::filesystem::path& p);
+
+} // namespace mongo
diff --git a/src/mongo/util/goodies.h b/src/mongo/util/goodies.h
new file mode 100644
index 00000000000..9398b5c3f1d
--- /dev/null
+++ b/src/mongo/util/goodies.h
@@ -0,0 +1,475 @@
+// @file goodies.h
+// miscellaneous
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../bson/util/misc.h"
+#include "concurrency/mutex.h"
+
+namespace mongo {
+
+ /* @return a dump of the buffer as hex byte ascii output */
+ string hexdump(const char *data, unsigned len);
+
+ /**
+ * @return if this name has an increasing counter associated, return the value
+ * otherwise 0
+ */
+ unsigned setThreadName(const char * name);
+ string getThreadName();
+
+ template<class T>
+ inline string ToString(const T& t) {
+ stringstream s;
+ s << t;
+ return s.str();
+ }
+
+#if !defined(_WIN32) && !defined(NOEXECINFO) && !defined(__freebsd__) && !defined(__openbsd__) && !defined(__sun__)
+
+} // namespace mongo
+
+#include <pthread.h>
+#include <execinfo.h>
+
+namespace mongo {
+
+ inline pthread_t GetCurrentThreadId() {
+ return pthread_self();
+ }
+
+ /* use "addr2line -CFe <exe>" to parse. */
+ inline void printStackTrace( ostream &o = cout ) {
+ void *b[20];
+
+ int size = backtrace(b, 20);
+ for (int i = 0; i < size; i++)
+ o << hex << b[i] << dec << ' ';
+ o << endl;
+
+ char **strings;
+
+ strings = backtrace_symbols(b, size);
+ for (int i = 0; i < size; i++)
+ o << ' ' << strings[i] << '\n';
+ o.flush();
+ free (strings);
+ }
+#else
+ inline void printStackTrace( ostream &o = cout ) { }
+#endif
+
+ bool isPrime(int n);
+ int nextPrime(int n);
+
+ inline void dumpmemory(const char *data, int len) {
+ if ( len > 1024 )
+ len = 1024;
+ try {
+ const char *q = data;
+ const char *p = q;
+ while ( len > 0 ) {
+ for ( int i = 0; i < 16; i++ ) {
+ if ( *p >= 32 && *p <= 126 )
+ cout << *p;
+ else
+ cout << '.';
+ p++;
+ }
+ cout << " ";
+ p -= 16;
+ for ( int i = 0; i < 16; i++ )
+ cout << (unsigned) ((unsigned char)*p++) << ' ';
+ cout << endl;
+ len -= 16;
+ }
+ }
+ catch (...) {
+ }
+ }
+
+// PRINT(2+2); prints "2+2: 4"
+#define MONGO_PRINT(x) cout << #x ": " << (x) << endl
+#define PRINT MONGO_PRINT
+// PRINTFL; prints file:line
+#define MONGO_PRINTFL cout << __FILE__ ":" << __LINE__ << endl
+#define PRINTFL MONGO_PRINTFL
+#define MONGO_FLOG log() << __FILE__ ":" << __LINE__ << endl
+#define FLOG MONGO_FLOG
+
+#undef assert
+#define assert MONGO_assert
+
+ inline bool startsWith(const char *str, const char *prefix) {
+ size_t l = strlen(prefix);
+ if ( strlen(str) < l ) return false;
+ return strncmp(str, prefix, l) == 0;
+ }
+ inline bool startsWith(string s, string p) { return startsWith(s.c_str(), p.c_str()); }
+
+ inline bool endsWith(const char *p, const char *suffix) {
+ size_t a = strlen(p);
+ size_t b = strlen(suffix);
+ if ( b > a ) return false;
+ return strcmp(p + a - b, suffix) == 0;
+ }
+
+ inline unsigned long swapEndian(unsigned long x) {
+ return
+ ((x & 0xff) << 24) |
+ ((x & 0xff00) << 8) |
+ ((x & 0xff0000) >> 8) |
+ ((x & 0xff000000) >> 24);
+ }
+
+#if defined(BOOST_LITTLE_ENDIAN)
+ inline unsigned long fixEndian(unsigned long x) {
+ return x;
+ }
+#else
+ inline unsigned long fixEndian(unsigned long x) {
+ return swapEndian(x);
+ }
+#endif
+
+#if !defined(_WIN32)
+ typedef int HANDLE;
+ inline void strcpy_s(char *dst, unsigned len, const char *src) {
+ assert( strlen(src) < len );
+ strcpy(dst, src);
+ }
+#else
+ typedef void *HANDLE;
+#endif
+
+ class ProgressMeter : boost::noncopyable {
+ public:
+ ProgressMeter( unsigned long long total , int secondsBetween = 3 , int checkInterval = 100 , string units = "" ) : _units(units) {
+ reset( total , secondsBetween , checkInterval );
+ }
+
+ ProgressMeter() {
+ _active = 0;
+ _units = "";
+ }
+
+ // typically you do ProgressMeterHolder
+ void reset( unsigned long long total , int secondsBetween = 3 , int checkInterval = 100 ) {
+ _total = total;
+ _secondsBetween = secondsBetween;
+ _checkInterval = checkInterval;
+
+ _done = 0;
+ _hits = 0;
+ _lastTime = (int)time(0);
+
+ _active = 1;
+ }
+
+ void finished() {
+ _active = 0;
+ }
+
+ bool isActive() {
+ return _active;
+ }
+
+ /**
+ * @param n how far along we are relative to the total # we set in CurOp::setMessage
+ * @return if row was printed
+ */
+ bool hit( int n = 1 ) {
+ if ( ! _active ) {
+ cout << "warning: hit an inactive ProgressMeter" << endl;
+ return false;
+ }
+
+ _done += n;
+ _hits++;
+ if ( _hits % _checkInterval )
+ return false;
+
+ int t = (int) time(0);
+ if ( t - _lastTime < _secondsBetween )
+ return false;
+
+ if ( _total > 0 ) {
+ int per = (int)( ( (double)_done * 100.0 ) / (double)_total );
+ cout << "\t\t" << _done << "/" << _total << "\t" << per << "%";
+
+ if ( ! _units.empty() ) {
+ cout << "\t(" << _units << ")" << endl;
+ }
+ else {
+ cout << endl;
+ }
+ }
+ _lastTime = t;
+ return true;
+ }
+
+ void setUnits( string units ) {
+ _units = units;
+ }
+
+ void setTotalWhileRunning( unsigned long long total ) {
+ _total = total;
+ }
+
+ unsigned long long done() const { return _done; }
+
+ unsigned long long hits() const { return _hits; }
+
+ unsigned long long total() const { return _total; }
+
+ string toString() const {
+ if ( ! _active )
+ return "";
+ stringstream buf;
+ buf << _done << "/" << _total << " " << (_done*100)/_total << "%";
+
+ if ( ! _units.empty() ) {
+ buf << "\t(" << _units << ")" << endl;
+ }
+
+ return buf.str();
+ }
+
+ bool operator==( const ProgressMeter& other ) const {
+ return this == &other;
+ }
+ private:
+
+ bool _active;
+
+ unsigned long long _total;
+ int _secondsBetween;
+ int _checkInterval;
+
+ unsigned long long _done;
+ unsigned long long _hits;
+ int _lastTime;
+
+ string _units;
+ };
+
+ // e.g.:
+ // CurOp * op = cc().curop();
+ // ProgressMeterHolder pm( op->setMessage( "index: (1/3) external sort" , d->stats.nrecords , 10 ) );
+ // loop { pm.hit(); }
+ class ProgressMeterHolder : boost::noncopyable {
+ public:
+ ProgressMeterHolder( ProgressMeter& pm )
+ : _pm( pm ) {
+ }
+
+ ~ProgressMeterHolder() {
+ _pm.finished();
+ }
+
+ ProgressMeter* operator->() {
+ return &_pm;
+ }
+
+ bool hit( int n = 1 ) {
+ return _pm.hit( n );
+ }
+
+ void finished() {
+ _pm.finished();
+ }
+
+ bool operator==( const ProgressMeter& other ) {
+ return _pm == other;
+ }
+
+ private:
+ ProgressMeter& _pm;
+ };
+
+ class TicketHolder {
+ public:
+ TicketHolder( int num ) : _mutex("TicketHolder") {
+ _outof = num;
+ _num = num;
+ }
+
+ bool tryAcquire() {
+ scoped_lock lk( _mutex );
+ if ( _num <= 0 ) {
+ if ( _num < 0 ) {
+ cerr << "DISASTER! in TicketHolder" << endl;
+ }
+ return false;
+ }
+ _num--;
+ return true;
+ }
+
+ void release() {
+ scoped_lock lk( _mutex );
+ _num++;
+ }
+
+ void resize( int newSize ) {
+ scoped_lock lk( _mutex );
+ int used = _outof - _num;
+ if ( used > newSize ) {
+ cout << "ERROR: can't resize since we're using (" << used << ") more than newSize(" << newSize << ")" << endl;
+ return;
+ }
+
+ _outof = newSize;
+ _num = _outof - used;
+ }
+
+ int available() const {
+ return _num;
+ }
+
+ int used() const {
+ return _outof - _num;
+ }
+
+ int outof() const { return _outof; }
+
+ private:
+ int _outof;
+ int _num;
+ mongo::mutex _mutex;
+ };
+
+ class TicketHolderReleaser {
+ public:
+ TicketHolderReleaser( TicketHolder * holder ) {
+ _holder = holder;
+ }
+
+ ~TicketHolderReleaser() {
+ _holder->release();
+ }
+ private:
+ TicketHolder * _holder;
+ };
+
+
+ /**
+ * this is a thread safe string
+ * you will never get a bad pointer, though data may be mungedd
+ */
+ class ThreadSafeString : boost::noncopyable {
+ public:
+ ThreadSafeString( size_t size=256 )
+ : _size( size ) , _buf( new char[size] ) {
+ memset( _buf , 0 , _size );
+ }
+
+ ThreadSafeString( const ThreadSafeString& other )
+ : _size( other._size ) , _buf( new char[_size] ) {
+ strncpy( _buf , other._buf , _size );
+ }
+
+ ~ThreadSafeString() {
+ delete[] _buf;
+ _buf = 0;
+ }
+
+ string toString() const {
+ string s = _buf;
+ return s;
+ }
+
+ ThreadSafeString& operator=( const char * str ) {
+ size_t s = strlen(str);
+ if ( s >= _size - 2 )
+ s = _size - 2;
+ strncpy( _buf , str , s );
+ _buf[s] = 0;
+ return *this;
+ }
+
+ bool operator==( const ThreadSafeString& other ) const {
+ return strcmp( _buf , other._buf ) == 0;
+ }
+
+ bool operator==( const char * str ) const {
+ return strcmp( _buf , str ) == 0;
+ }
+
+ bool operator!=( const char * str ) const {
+ return strcmp( _buf , str ) != 0;
+ }
+
+ bool empty() const {
+ return _buf == 0 || _buf[0] == 0;
+ }
+
+ private:
+ size_t _size;
+ char * _buf;
+ };
+
+ ostream& operator<<( ostream &s, const ThreadSafeString &o );
+
+ /** A generic pointer type for function arguments.
+ * It will convert from any pointer type except auto_ptr.
+ * Semantics are the same as passing the pointer returned from get()
+ * const ptr<T> => T * const
+ * ptr<const T> => T const * or const T*
+ */
+ template <typename T>
+ struct ptr {
+
+ ptr() : _p(NULL) {}
+
+ // convert to ptr<T>
+ ptr(T* p) : _p(p) {} // needed for NULL
+ template<typename U> ptr(U* p) : _p(p) {}
+ template<typename U> ptr(const ptr<U>& p) : _p(p) {}
+ template<typename U> ptr(const boost::shared_ptr<U>& p) : _p(p.get()) {}
+ template<typename U> ptr(const boost::scoped_ptr<U>& p) : _p(p.get()) {}
+ //template<typename U> ptr(const auto_ptr<U>& p) : _p(p.get()) {}
+
+ // assign to ptr<T>
+ ptr& operator= (T* p) { _p = p; return *this; } // needed for NULL
+ template<typename U> ptr& operator= (U* p) { _p = p; return *this; }
+ template<typename U> ptr& operator= (const ptr<U>& p) { _p = p; return *this; }
+ template<typename U> ptr& operator= (const boost::shared_ptr<U>& p) { _p = p.get(); return *this; }
+ template<typename U> ptr& operator= (const boost::scoped_ptr<U>& p) { _p = p.get(); return *this; }
+ //template<typename U> ptr& operator= (const auto_ptr<U>& p) { _p = p.get(); return *this; }
+
+ // use
+ T* operator->() const { return _p; }
+ T& operator*() const { return *_p; }
+
+ // convert from ptr<T>
+ operator T* () const { return _p; }
+
+ private:
+ T* _p;
+ };
+
+
+
+ using boost::shared_ptr;
+ using boost::scoped_ptr;
+ using boost::scoped_array;
+ using boost::intrusive_ptr;
+ using boost::bad_lexical_cast;
+ using boost::dynamic_pointer_cast;
+} // namespace mongo
diff --git a/src/mongo/util/hashtab.h b/src/mongo/util/hashtab.h
new file mode 100644
index 00000000000..f1a33068e07
--- /dev/null
+++ b/src/mongo/util/hashtab.h
@@ -0,0 +1,179 @@
+/* hashtab.h
+
+ Simple, fixed size hash table. Darn simple.
+
+ Uses a contiguous block of memory, so you can put it in a memory mapped file very easily.
+*/
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../pch.h"
+#include <map>
+#include "../db/dur.h"
+
+namespace mongo {
+
+#pragma pack(1)
+
+ /* you should define:
+
+ int Key::hash() return > 0 always.
+ */
+
+ template <
+ class Key,
+ class Type
+ >
+ class HashTable : boost::noncopyable {
+ public:
+ const char *name;
+ struct Node {
+ int hash;
+ Key k;
+ Type value;
+ bool inUse() {
+ return hash != 0;
+ }
+ void setUnused() {
+ hash = 0;
+ }
+ };
+ void* _buf;
+ int n; // number of hashtable buckets
+ int maxChain;
+
+ Node& nodes(int i) {
+ Node *nodes = (Node *) _buf;
+ return nodes[i];
+ }
+
+ int _find(const Key& k, bool& found) {
+ found = false;
+ int h = k.hash();
+ int i = h % n;
+ int start = i;
+ int chain = 0;
+ int firstNonUsed = -1;
+ while ( 1 ) {
+ if ( !nodes(i).inUse() ) {
+ if ( firstNonUsed < 0 )
+ firstNonUsed = i;
+ }
+
+ if ( nodes(i).hash == h && nodes(i).k == k ) {
+ if ( chain >= 200 )
+ out() << "warning: hashtable " << name << " long chain " << endl;
+ found = true;
+ return i;
+ }
+ chain++;
+ i = (i+1) % n;
+ if ( i == start ) {
+ // shouldn't get here / defensive for infinite loops
+ out() << "error: hashtable " << name << " is full n:" << n << endl;
+ return -1;
+ }
+ if( chain >= maxChain ) {
+ if ( firstNonUsed >= 0 )
+ return firstNonUsed;
+ out() << "error: hashtable " << name << " max chain reached:" << maxChain << endl;
+ return -1;
+ }
+ }
+ }
+
+ public:
+ /* buf must be all zeroes on initialization. */
+ HashTable(void* buf, int buflen, const char *_name) : name(_name) {
+ int m = sizeof(Node);
+ // out() << "hashtab init, buflen:" << buflen << " m:" << m << endl;
+ n = buflen / m;
+ if ( (n & 1) == 0 )
+ n--;
+ maxChain = (int) (n * 0.05);
+ _buf = buf;
+ //nodes = (Node *) buf;
+
+ if ( sizeof(Node) != 628 ) {
+ out() << "HashTable() " << _name << " sizeof(node):" << sizeof(Node) << " n:" << n << " sizeof(Key): " << sizeof(Key) << " sizeof(Type):" << sizeof(Type) << endl;
+ assert( sizeof(Node) == 628 );
+ }
+
+ }
+
+ Type* get(const Key& k) {
+ bool found;
+ int i = _find(k, found);
+ if ( found )
+ return &nodes(i).value;
+ return 0;
+ }
+
+ void kill(const Key& k) {
+ bool found;
+ int i = _find(k, found);
+ if ( i >= 0 && found ) {
+ Node* n = &nodes(i);
+ n = getDur().writing(n);
+ n->k.kill();
+ n->setUnused();
+ }
+ }
+
+ /** returns false if too full */
+ bool put(const Key& k, const Type& value) {
+ bool found;
+ int i = _find(k, found);
+ if ( i < 0 )
+ return false;
+ Node* n = getDur().writing( &nodes(i) );
+ if ( !found ) {
+ n->k = k;
+ n->hash = k.hash();
+ }
+ else {
+ assert( n->hash == k.hash() );
+ }
+ n->value = value;
+ return true;
+ }
+
+ typedef void (*IteratorCallback)( const Key& k , Type& v );
+ void iterAll( IteratorCallback callback ) {
+ for ( int i=0; i<n; i++ ) {
+ if ( nodes(i).inUse() ) {
+ callback( nodes(i).k , nodes(i).value );
+ }
+ }
+ }
+
+ // TODO: should probably use boost::bind for this, but didn't want to look at it
+ typedef void (*IteratorCallback2)( const Key& k , Type& v , void * extra );
+ void iterAll( IteratorCallback2 callback , void * extra ) {
+ for ( int i=0; i<n; i++ ) {
+ if ( nodes(i).inUse() ) {
+ callback( nodes(i).k , nodes(i).value , extra );
+ }
+ }
+ }
+
+ };
+
+#pragma pack()
+
+} // namespace mongo
diff --git a/src/mongo/util/heapcheck.h b/src/mongo/util/heapcheck.h
new file mode 100644
index 00000000000..95da9538db5
--- /dev/null
+++ b/src/mongo/util/heapcheck.h
@@ -0,0 +1,33 @@
+// @file heapcheck.h
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#if defined(HEAP_CHECKING)
+
+#include <google/heap-checker.h>
+
+#define IGNORE_OBJECT( a ) HeapLeakChecker::IgnoreObject( a )
+#define UNIGNORE_OBJECT( a ) HeapLeakChecker::UnIgnoreObject( a )
+
+#else
+
+#define IGNORE_OBJECT( a )
+#define UNIGNORE_OBJECT( a )
+
+#endif
diff --git a/src/mongo/util/hex.h b/src/mongo/util/hex.h
new file mode 100644
index 00000000000..8cf30f2d9d3
--- /dev/null
+++ b/src/mongo/util/hex.h
@@ -0,0 +1,67 @@
+// util/hex.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongo {
+ //can't use hex namespace because it conflicts with hex iostream function
+ inline int fromHex( char c ) {
+ if ( '0' <= c && c <= '9' )
+ return c - '0';
+ if ( 'a' <= c && c <= 'f' )
+ return c - 'a' + 10;
+ if ( 'A' <= c && c <= 'F' )
+ return c - 'A' + 10;
+ assert( false );
+ return 0xff;
+ }
+ inline char fromHex( const char *c ) {
+ return (char)(( fromHex( c[ 0 ] ) << 4 ) | fromHex( c[ 1 ] ));
+ }
+
+ inline string toHex(const void* inRaw, int len) {
+ static const char hexchars[] = "0123456789ABCDEF";
+
+ StringBuilder out;
+ const char* in = reinterpret_cast<const char*>(inRaw);
+ for (int i=0; i<len; ++i) {
+ char c = in[i];
+ char hi = hexchars[(c & 0xF0) >> 4];
+ char lo = hexchars[(c & 0x0F)];
+
+ out << hi << lo;
+ }
+
+ return out.str();
+ }
+
+ inline string toHexLower(const void* inRaw, int len) {
+ static const char hexchars[] = "0123456789abcdef";
+
+ StringBuilder out;
+ const char* in = reinterpret_cast<const char*>(inRaw);
+ for (int i=0; i<len; ++i) {
+ char c = in[i];
+ char hi = hexchars[(c & 0xF0) >> 4];
+ char lo = hexchars[(c & 0x0F)];
+
+ out << hi << lo;
+ }
+
+ return out.str();
+ }
+}
diff --git a/src/mongo/util/histogram.cpp b/src/mongo/util/histogram.cpp
new file mode 100644
index 00000000000..17a85059d58
--- /dev/null
+++ b/src/mongo/util/histogram.cpp
@@ -0,0 +1,131 @@
+// histogram.cc
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include <iomanip>
+#include <limits>
+#include <sstream>
+
+#include "histogram.h"
+
+namespace mongo {
+
+ using std::ostringstream;
+ using std::setfill;
+ using std::setw;
+
+ Histogram::Histogram( const Options& opts )
+ : _initialValue( opts.initialValue )
+ , _numBuckets( opts.numBuckets )
+ , _boundaries( new uint32_t[_numBuckets] )
+ , _buckets( new uint64_t[_numBuckets] ) {
+
+ // TODO more sanity checks
+ // + not too few buckets
+ // + initialBucket and bucketSize fit within 32 bit ints
+
+ // _boundaries store the maximum value falling in that bucket.
+ if ( opts.exponential ) {
+ uint32_t twoPow = 1; // 2^0
+ for ( uint32_t i = 0; i < _numBuckets - 1; i++) {
+ _boundaries[i] = _initialValue + opts.bucketSize * twoPow;
+ twoPow *= 2; // 2^i+1
+ }
+ }
+ else {
+ _boundaries[0] = _initialValue + opts.bucketSize;
+ for ( uint32_t i = 1; i < _numBuckets - 1; i++ ) {
+ _boundaries[i] = _boundaries[ i-1 ] + opts.bucketSize;
+ }
+ }
+ _boundaries[ _numBuckets-1 ] = std::numeric_limits<uint32_t>::max();
+
+ for ( uint32_t i = 0; i < _numBuckets; i++ ) {
+ _buckets[i] = 0;
+ }
+ }
+
+ Histogram::~Histogram() {
+ delete [] _boundaries;
+ delete [] _buckets;
+ }
+
+ void Histogram::insert( uint32_t element ) {
+ if ( element < _initialValue) return;
+
+ _buckets[ _findBucket(element) ] += 1;
+ }
+
+ string Histogram::toHTML() const {
+ uint64_t max = 0;
+ for ( uint32_t i = 0; i < _numBuckets; i++ ) {
+ if ( _buckets[i] > max ) {
+ max = _buckets[i];
+ }
+ }
+ if ( max == 0 ) {
+ return "histogram is empty\n";
+ }
+
+ // normalize buckets to max
+ const int maxBar = 20;
+ ostringstream ss;
+ for ( uint32_t i = 0; i < _numBuckets; i++ ) {
+ int barSize = _buckets[i] * maxBar / max;
+ ss << string( barSize,'*' )
+ << setfill(' ') << setw( maxBar-barSize + 12 )
+ << _boundaries[i] << '\n';
+ }
+
+ return ss.str();
+ }
+
+ uint64_t Histogram::getCount( uint32_t bucket ) const {
+ if ( bucket >= _numBuckets ) return 0;
+
+ return _buckets[ bucket ];
+ }
+
+ uint32_t Histogram::getBoundary( uint32_t bucket ) const {
+ if ( bucket >= _numBuckets ) return 0;
+
+ return _boundaries[ bucket ];
+ }
+
+ uint32_t Histogram::getBucketsNum() const {
+ return _numBuckets;
+ }
+
+ uint32_t Histogram::_findBucket( uint32_t element ) const {
+ // TODO assert not too small a value?
+
+ uint32_t low = 0;
+ uint32_t high = _numBuckets - 1;
+ while ( low < high ) {
+ // low + ( (high - low) / 2 );
+ uint32_t mid = ( low + high ) >> 1;
+ if ( element > _boundaries[ mid ] ) {
+ low = mid + 1;
+ }
+ else {
+ high = mid;
+ }
+ }
+ return low;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/util/histogram.h b/src/mongo/util/histogram.h
new file mode 100644
index 00000000000..40ec5628dda
--- /dev/null
+++ b/src/mongo/util/histogram.h
@@ -0,0 +1,128 @@
+// histogram.h
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef UTIL_HISTOGRAM_HEADER
+#define UTIL_HISTOGRAM_HEADER
+
+#include "../pch.h"
+
+#include <string>
+
+namespace mongo {
+
+ using std::string;
+
+ /**
+ * A histogram for a 32-bit integer range.
+ */
+ class Histogram {
+ public:
+ /**
+ * Construct a histogram with 'numBuckets' buckets, optionally
+ * having the first bucket start at 'initialValue' rather than
+ * 0. By default, the histogram buckets will be 'bucketSize' wide.
+ *
+ * Usage example:
+ * Histogram::Options opts;
+ * opts.numBuckets = 3;
+ * opts.bucketSize = 10;
+ * Histogram h( opts );
+ *
+ * Generates the bucket ranges [0..10],[11..20],[21..max_int]
+ *
+ * Alternatively, the flag 'exponential' could be turned on, in
+ * which case a bucket's maximum value will be
+ * initialValue + bucketSize * 2 ^ [0..numBuckets-1]
+ *
+ * Usage example:
+ * Histogram::Options opts;
+ * opts.numBuckets = 4;
+ * opts.bucketSize = 125;
+ * opts.exponential = true;
+ * Histogram h( opts );
+ *
+ * Generates the bucket ranges [0..125],[126..250],[251..500],[501..max_int]
+ */
+ struct Options {
+ boost::uint32_t numBuckets;
+ boost::uint32_t bucketSize;
+ boost::uint32_t initialValue;
+
+ // use exponential buckets?
+ bool exponential;
+
+ Options()
+ : numBuckets(0)
+ , bucketSize(0)
+ , initialValue(0)
+ , exponential(false) {}
+ };
+ explicit Histogram( const Options& opts );
+ ~Histogram();
+
+ /**
+ * Find the bucket that 'element' falls into and increment its count.
+ */
+ void insert( boost::uint32_t element );
+
+ /**
+ * Render the histogram as string that can be used inside an
+ * HTML doc.
+ */
+ string toHTML() const;
+
+ // testing interface below -- consider it private
+
+ /**
+ * Return the count for the 'bucket'-th bucket.
+ */
+ boost::uint64_t getCount( boost::uint32_t bucket ) const;
+
+ /**
+ * Return the maximum element that would fall in the
+ * 'bucket'-th bucket.
+ */
+ boost::uint32_t getBoundary( boost::uint32_t bucket ) const;
+
+ /**
+ * Return the number of buckets in this histogram.
+ */
+ boost::uint32_t getBucketsNum() const;
+
+ private:
+ /**
+ * Returns the bucket where 'element' should fall
+ * into. Currently assumes that 'element' is greater than the
+ * minimum 'inialValue'.
+ */
+ boost::uint32_t _findBucket( boost::uint32_t element ) const;
+
+ boost::uint32_t _initialValue; // no value lower than it is recorded
+ boost::uint32_t _numBuckets; // total buckets in the histogram
+
+ // all below owned here
+ boost::uint32_t* _boundaries; // maximum element of each bucket
+ boost::uint64_t* _buckets; // current count of each bucket
+
+ Histogram( const Histogram& );
+ Histogram& operator=( const Histogram& );
+ };
+
+} // namespace mongo
+
+#endif // UTIL_HISTOGRAM_HEADER
diff --git a/src/mongo/util/intrusive_counter.cpp b/src/mongo/util/intrusive_counter.cpp
new file mode 100755
index 00000000000..fc01f40b41a
--- /dev/null
+++ b/src/mongo/util/intrusive_counter.cpp
@@ -0,0 +1,30 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "util/intrusive_counter.h"
+
+namespace mongo {
+
+ void IntrusiveCounterUnsigned::addRef() const {
+ ++counter;
+ }
+
+ void IntrusiveCounterUnsigned::release() const {
+ if (!--counter)
+ delete this;
+ }
+
+}
diff --git a/src/mongo/util/intrusive_counter.h b/src/mongo/util/intrusive_counter.h
new file mode 100755
index 00000000000..bcebb6288cf
--- /dev/null
+++ b/src/mongo/util/intrusive_counter.h
@@ -0,0 +1,79 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <boost/intrusive_ptr.hpp>
+#include <boost/noncopyable.hpp>
+
+namespace mongo {
+
+/*
+ IntrusiveCounter is a sharable implementation of a reference counter that
+ objects can use to be compatible with boost::intrusive_ptr<>.
+
+ Some objects that use IntrusiveCounter are immutable, and only have
+ const methods. This may require their pointers to be declared as
+ intrusive_ptr<const ClassName> . In order to be able to share pointers to
+ these immutables, the methods associated with IntrusiveCounter are declared
+ as const, and the counter itself is marked as mutable.
+
+ IntrusiveCounter itself is abstract, allowing for multiple implementations.
+ For example, IntrusiveCounterUnsigned uses ordinary unsigned integers for
+ the reference count, and is good for situations where thread safety is not
+ required. For others, other implementations using atomic integers should
+ be used. For static objects, the implementations of addRef() and release()
+ can be overridden to do nothing.
+ */
+ class IntrusiveCounter :
+ boost::noncopyable {
+ public:
+ virtual ~IntrusiveCounter() {};
+
+ // these are here for the boost intrusive_ptr<> class
+ friend inline void intrusive_ptr_add_ref(const IntrusiveCounter *pIC) {
+ pIC->addRef(); };
+ friend inline void intrusive_ptr_release(const IntrusiveCounter *pIC) {
+ pIC->release(); };
+
+ virtual void addRef() const = 0;
+ virtual void release() const = 0;
+ };
+
+ class IntrusiveCounterUnsigned :
+ public IntrusiveCounter {
+ public:
+ // virtuals from IntrusiveCounter
+ virtual void addRef() const;
+ virtual void release() const;
+
+ IntrusiveCounterUnsigned();
+
+ private:
+ mutable unsigned counter;
+ };
+
+};
+
+/* ======================= INLINED IMPLEMENTATIONS ========================== */
+
+namespace mongo {
+
+ inline IntrusiveCounterUnsigned::IntrusiveCounterUnsigned():
+ counter(0) {
+ }
+
+};
diff --git a/src/mongo/util/log.cpp b/src/mongo/util/log.cpp
new file mode 100644
index 00000000000..aa249597b57
--- /dev/null
+++ b/src/mongo/util/log.cpp
@@ -0,0 +1,197 @@
+/** @file log.cpp
+ */
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "assert_util.h"
+#include "assert.h"
+#include <cmath>
+#include "time_support.h"
+using namespace std;
+
+#ifdef _WIN32
+# include <io.h>
+# include <fcntl.h>
+#else
+# include <cxxabi.h>
+# include <sys/file.h>
+#endif
+
+#ifdef _WIN32
+# define dup2 _dup2 // Microsoft headers use ISO C names
+# define fileno _fileno
+#endif
+
+namespace mongo {
+
+ Nullstream nullstream;
+ vector<Tee*>* Logstream::globalTees = 0;
+
+ thread_specific_ptr<Logstream> Logstream::tsp;
+
+ class LoggingManager {
+ public:
+ LoggingManager()
+ : _enabled(0) , _file(0) {
+ }
+
+ void start( const string& lp , bool append ) {
+ uassert( 10268 , "LoggingManager already started" , ! _enabled );
+ _append = append;
+
+ bool exists = boost::filesystem::exists(lp);
+ bool isdir = boost::filesystem::is_directory(lp);
+ bool isreg = boost::filesystem::is_regular_file(lp);
+
+ if ( exists ) {
+ if ( isdir ) {
+ cout << "logpath [" << lp << "] should be a filename, not a directory" << endl;
+
+ dbexit( EXIT_BADOPTIONS );
+ assert( 0 );
+ }
+
+ if ( ! append ) {
+ // only attempt rename if log is regular file
+ if ( isreg ) {
+ stringstream ss;
+ ss << lp << "." << terseCurrentTime( false );
+ string s = ss.str();
+
+ if ( ! rename( lp.c_str() , s.c_str() ) ) {
+ cout << "log file [" << lp << "] exists; copied to temporary file [" << s << "]" << endl;
+ } else {
+ cout << "log file [" << lp << "] exists and couldn't make backup; run with --logappend or manually remove file (" << strerror(errno) << ")" << endl;
+
+ dbexit( EXIT_BADOPTIONS );
+ assert( 0 );
+ }
+ }
+ }
+ }
+ // test path
+ FILE * test = fopen( lp.c_str() , _append ? "a" : "w" );
+ if ( ! test ) {
+ cout << "can't open [" << lp << "] for log file: " << errnoWithDescription() << endl;
+ dbexit( EXIT_BADOPTIONS );
+ assert( 0 );
+ }
+
+ if (append && exists){
+ // two blank lines before and after
+ const string msg = "\n\n***** SERVER RESTARTED *****\n\n\n";
+ massert(14036, errnoWithPrefix("couldn't write to log file"),
+ fwrite(msg.data(), 1, msg.size(), test) == msg.size());
+ }
+
+ fclose( test );
+
+ _path = lp;
+ _enabled = 1;
+ rotate();
+ }
+
+ void rotate() {
+ if ( ! _enabled ) {
+ cout << "LoggingManager not enabled" << endl;
+ return;
+ }
+
+ if ( _file ) {
+
+#ifdef POSIX_FADV_DONTNEED
+ posix_fadvise(fileno(_file), 0, 0, POSIX_FADV_DONTNEED);
+#endif
+
+ // Rename the (open) existing log file to a timestamped name
+ stringstream ss;
+ ss << _path << "." << terseCurrentTime( false );
+ string s = ss.str();
+ rename( _path.c_str() , s.c_str() );
+ }
+
+ FILE* tmp = 0; // The new file using the original logpath name
+
+#if _WIN32
+ // We rename an open log file (above, on next rotation) and the trick to getting Windows to do that is
+ // to open the file with FILE_SHARE_DELETE. So, we can't use the freopen() call that non-Windows
+ // versions use because it would open the file without the FILE_SHARE_DELETE flag we need.
+ //
+ HANDLE newFileHandle = CreateFileA(
+ _path.c_str(),
+ GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ NULL,
+ OPEN_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL,
+ NULL
+ );
+ if ( INVALID_HANDLE_VALUE != newFileHandle ) {
+ int newFileDescriptor = _open_osfhandle( reinterpret_cast<intptr_t>(newFileHandle), _O_APPEND );
+ tmp = _fdopen( newFileDescriptor, _append ? "a" : "w" );
+ }
+#else
+ tmp = freopen(_path.c_str(), _append ? "a" : "w", stdout);
+#endif
+ if ( !tmp ) {
+ cerr << "can't open: " << _path.c_str() << " for log file" << endl;
+ dbexit( EXIT_BADOPTIONS );
+ assert( 0 );
+ }
+
+ // redirect stdout and stderr to log file
+ dup2( fileno( tmp ), 1 ); // stdout
+ dup2( fileno( tmp ), 2 ); // stderr
+
+ Logstream::setLogFile(tmp); // after this point no thread will be using old file
+
+#if _WIN32
+ if ( _file )
+ fclose( _file ); // In Windows, we still have the old file open, close it now
+#endif
+
+#if 0 // enable to test redirection
+ cout << "written to cout" << endl;
+ cerr << "written to cerr" << endl;
+ log() << "written to log()" << endl;
+#endif
+
+ _file = tmp; // Save new file for next rotation
+ }
+
+ private:
+ bool _enabled;
+ string _path;
+ bool _append;
+ FILE * _file;
+
+ } loggingManager;
+
+ void initLogging( const string& lp , bool append ) {
+ cout << "all output going to: " << lp << endl;
+ loggingManager.start( lp , append );
+ }
+
+ void rotateLogs( int signal ) {
+ loggingManager.rotate();
+ }
+
+ // done *before* static initialization
+ FILE* Logstream::logfile = stdout;
+ bool Logstream::isSyslog = false;
+
+}
diff --git a/src/mongo/util/log.h b/src/mongo/util/log.h
new file mode 100644
index 00000000000..a393d4d29a5
--- /dev/null
+++ b/src/mongo/util/log.h
@@ -0,0 +1,581 @@
+// @file log.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string.h>
+#include <errno.h>
+#include "../bson/util/builder.h"
+
+#ifndef _WIN32
+#include <syslog.h>
+#endif
+
+namespace mongo {
+
+ enum LogLevel { LL_DEBUG , LL_INFO , LL_NOTICE , LL_WARNING , LL_ERROR , LL_SEVERE };
+
+ inline const char * logLevelToString( LogLevel l ) {
+ switch ( l ) {
+ case LL_DEBUG:
+ case LL_INFO:
+ case LL_NOTICE:
+ return "";
+ case LL_WARNING:
+ return "warning" ;
+ case LL_ERROR:
+ return "ERROR";
+ case LL_SEVERE:
+ return "SEVERE";
+ default:
+ return "UNKNOWN";
+ }
+ }
+
+#ifndef _WIN32
+ inline const int logLevelToSysLogLevel( LogLevel l) {
+ switch ( l ) {
+ case LL_DEBUG:
+ return LOG_DEBUG;
+ case LL_INFO:
+ return LOG_INFO;
+ case LL_NOTICE:
+ return LOG_NOTICE;
+ case LL_WARNING:
+ return LOG_WARNING;
+ case LL_ERROR:
+ return LOG_ERR;
+ case LL_SEVERE:
+ return LOG_CRIT;
+ default:
+ return LL_INFO;
+ }
+ }
+#endif
+
+ class LabeledLevel {
+ public:
+
+ LabeledLevel( int level ) : _level( level ) {}
+ LabeledLevel( const char* label, int level ) : _label( label ), _level( level ) {}
+ LabeledLevel( const string& label, int level ) : _label( label ), _level( level ) {}
+
+ LabeledLevel operator+( int i ) const {
+ return LabeledLevel( _label, _level + i );
+ }
+
+ LabeledLevel operator+( const char* label ) const {
+ if( _label == "" )
+ return LabeledLevel( label, _level );
+ return LabeledLevel( _label + string("::") + label, _level );
+ }
+
+ LabeledLevel operator+( string& label ) const {
+ return LabeledLevel( _label + string("::") + label, _level );
+ }
+
+ LabeledLevel operator-( int i ) const {
+ return LabeledLevel( _label, _level - i );
+ }
+
+ const string& getLabel() const { return _label; }
+ int getLevel() const { return _level; }
+
+ private:
+ string _label;
+ int _level;
+ };
+
+ class LazyString {
+ public:
+ virtual ~LazyString() {}
+ virtual string val() const = 0;
+ };
+
+ // Utility class for stringifying object only when val() called.
+ template< class T >
+ class LazyStringImpl : public LazyString {
+ public:
+ LazyStringImpl( const T &t ) : t_( t ) {}
+ virtual string val() const { return t_.toString(); }
+ private:
+ const T& t_;
+ };
+
+ class Tee {
+ public:
+ virtual ~Tee() {}
+ virtual void write(LogLevel level , const string& str) = 0;
+ };
+
+ class Nullstream {
+ public:
+ virtual Nullstream& operator<< (Tee* tee) {
+ return *this;
+ }
+ virtual ~Nullstream() {}
+ virtual Nullstream& operator<<(const char *) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(const string& ) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(const StringData& ) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(char *) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(char) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(int) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(ExitCode) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(unsigned long) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(long) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(unsigned) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(unsigned short) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(double) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(void *) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(const void *) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(long long) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(unsigned long long) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(bool) {
+ return *this;
+ }
+ virtual Nullstream& operator<<(const LazyString&) {
+ return *this;
+ }
+ template< class T >
+ Nullstream& operator<<(T *t) {
+ return operator<<( static_cast<void*>( t ) );
+ }
+ template< class T >
+ Nullstream& operator<<(const T *t) {
+ return operator<<( static_cast<const void*>( t ) );
+ }
+ template< class T >
+ Nullstream& operator<<(const shared_ptr<T> p ) {
+ T * t = p.get();
+ if ( ! t )
+ *this << "null";
+ else
+ *this << *t;
+ return *this;
+ }
+ template< class T >
+ Nullstream& operator<<(const T &t) {
+ return operator<<( static_cast<const LazyString&>( LazyStringImpl< T >( t ) ) );
+ }
+
+ virtual Nullstream& operator<< (ostream& ( *endl )(ostream&)) {
+ return *this;
+ }
+ virtual Nullstream& operator<< (ios_base& (*hex)(ios_base&)) {
+ return *this;
+ }
+
+ virtual void flush(Tee *t = 0) {}
+ };
+ extern Nullstream nullstream;
+
+ class Logstream : public Nullstream {
+ static mongo::mutex mutex;
+ static int doneSetup;
+ stringstream ss;
+ int indent;
+ LogLevel logLevel;
+ static FILE* logfile;
+ static boost::scoped_ptr<ostream> stream;
+ static vector<Tee*> * globalTees;
+ static bool isSyslog;
+ public:
+ inline static void logLockless( const StringData& s );
+
+ static void setLogFile(FILE* f) {
+ scoped_lock lk(mutex);
+ logfile = f;
+ }
+#ifndef _WIN32
+ static void useSyslog(const char * name) {
+ cout << "using syslog ident: " << name << endl;
+
+ // openlog requires heap allocated non changing pointer
+ // this should only be called once per pragram execution
+
+ char * newName = (char *) malloc( strlen(name) + 1 );
+ strcpy( newName , name);
+ openlog( newName , LOG_ODELAY , LOG_USER );
+ isSyslog = true;
+ }
+#endif
+
+ static int magicNumber() {
+ return 1717;
+ }
+
+ static int getLogDesc() {
+ int fd = -1;
+ if (logfile != NULL)
+#if defined(_WIN32)
+ // the ISO C++ conformant name is _fileno
+ fd = _fileno( logfile );
+#else
+ fd = fileno( logfile );
+#endif
+ return fd;
+ }
+
+ inline void flush(Tee *t = 0);
+
+ inline Nullstream& setLogLevel(LogLevel l) {
+ logLevel = l;
+ return *this;
+ }
+
+ /** note these are virtual */
+ Logstream& operator<<(const char *x) { ss << x; return *this; }
+ Logstream& operator<<(const string& x) { ss << x; return *this; }
+ Logstream& operator<<(const StringData& x) { ss << x.data(); return *this; }
+ Logstream& operator<<(char *x) { ss << x; return *this; }
+ Logstream& operator<<(char x) { ss << x; return *this; }
+ Logstream& operator<<(int x) { ss << x; return *this; }
+ Logstream& operator<<(ExitCode x) { ss << x; return *this; }
+ Logstream& operator<<(long x) { ss << x; return *this; }
+ Logstream& operator<<(unsigned long x) { ss << x; return *this; }
+ Logstream& operator<<(unsigned x) { ss << x; return *this; }
+ Logstream& operator<<(unsigned short x){ ss << x; return *this; }
+ Logstream& operator<<(double x) { ss << x; return *this; }
+ Logstream& operator<<(void *x) { ss << x; return *this; }
+ Logstream& operator<<(const void *x) { ss << x; return *this; }
+ Logstream& operator<<(long long x) { ss << x; return *this; }
+ Logstream& operator<<(unsigned long long x) { ss << x; return *this; }
+ Logstream& operator<<(bool x) { ss << x; return *this; }
+
+ Logstream& operator<<(const LazyString& x) {
+ ss << x.val();
+ return *this;
+ }
+ Nullstream& operator<< (Tee* tee) {
+ ss << '\n';
+ flush(tee);
+ return *this;
+ }
+ Logstream& operator<< (ostream& ( *_endl )(ostream&)) {
+ ss << '\n';
+ flush(0);
+ return *this;
+ }
+ Logstream& operator<< (ios_base& (*_hex)(ios_base&)) {
+ ss << _hex;
+ return *this;
+ }
+
+ Logstream& prolog() {
+ return *this;
+ }
+
+ void addGlobalTee( Tee * t ) {
+ if ( ! globalTees )
+ globalTees = new vector<Tee*>();
+ globalTees->push_back( t );
+ }
+
+ void indentInc(){ indent++; }
+ void indentDec(){ indent--; }
+ int getIndent() const { return indent; }
+
+ private:
+ static thread_specific_ptr<Logstream> tsp;
+ Logstream() {
+ indent = 0;
+ _init();
+ }
+ void _init() {
+ ss.str("");
+ logLevel = LL_INFO;
+ }
+ public:
+ static Logstream& get() {
+ if ( StaticObserver::_destroyingStatics ) {
+ cout << "Logstream::get called in uninitialized state" << endl;
+ }
+ Logstream *p = tsp.get();
+ if( p == 0 )
+ tsp.reset( p = new Logstream() );
+ return *p;
+ }
+ };
+
+ extern int logLevel;
+ extern int tlogLevel;
+
+ inline Nullstream& out( int level = 0 ) {
+ if ( level > logLevel )
+ return nullstream;
+ return Logstream::get();
+ }
+
+ /* flush the log stream if the log level is
+ at the specified level or higher. */
+ inline void logflush(int level = 0) {
+ if( level > logLevel )
+ Logstream::get().flush(0);
+ }
+
+ /* without prolog */
+ inline Nullstream& _log( int level = 0 ) {
+ if ( level > logLevel )
+ return nullstream;
+ return Logstream::get();
+ }
+
+ /** logging which we may not want during unit tests (dbtests) runs.
+ set tlogLevel to -1 to suppress tlog() output in a test program. */
+ inline Nullstream& tlog( int level = 0 ) {
+ if ( level > tlogLevel || level > logLevel )
+ return nullstream;
+ return Logstream::get().prolog();
+ }
+
+ // log if debug build or if at a certain level
+ inline Nullstream& dlog( int level ) {
+ if ( level <= logLevel || DEBUG_BUILD )
+ return Logstream::get().prolog();
+ return nullstream;
+ }
+
+ inline Nullstream& log( int level ) {
+ if ( level > logLevel )
+ return nullstream;
+ return Logstream::get().prolog();
+ }
+
+#define MONGO_LOG(level) if ( MONGO_likely(logLevel < (level)) ) { } else log( level )
+#define LOG MONGO_LOG
+
+ inline Nullstream& log( LogLevel l ) {
+ return Logstream::get().prolog().setLogLevel( l );
+ }
+
+ inline Nullstream& log( const LabeledLevel& ll ) {
+ Nullstream& stream = log( ll.getLevel() );
+ if( ll.getLabel() != "" )
+ stream << "[" << ll.getLabel() << "] ";
+ return stream;
+ }
+
+ inline Nullstream& log() {
+ return Logstream::get().prolog();
+ }
+
+ inline Nullstream& error() {
+ return log( LL_ERROR );
+ }
+
+ inline Nullstream& warning() {
+ return log( LL_WARNING );
+ }
+
+ /* default impl returns "" -- mongod overrides */
+ extern const char * (*getcurns)();
+
+ inline Nullstream& problem( int level = 0 ) {
+ if ( level > logLevel )
+ return nullstream;
+ Logstream& l = Logstream::get().prolog();
+ l << ' ' << getcurns() << ' ';
+ return l;
+ }
+
+ /**
+ log to a file rather than stdout
+ defined in assert_util.cpp
+ */
+ void initLogging( const string& logpath , bool append );
+ void rotateLogs( int signal = 0 );
+
+ std::string toUtf8String(const std::wstring& wide);
+
+#if defined(_WIN32)
+ inline string errnoWithDescription(DWORD x = GetLastError()) {
+#else
+ inline string errnoWithDescription(int x = errno) {
+#endif
+ stringstream s;
+ s << "errno:" << x << ' ';
+
+#if defined(_WIN32)
+ LPTSTR errorText = NULL;
+ FormatMessage(
+ FORMAT_MESSAGE_FROM_SYSTEM
+ |FORMAT_MESSAGE_ALLOCATE_BUFFER
+ |FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL,
+ x, 0,
+ (LPTSTR) &errorText, // output
+ 0, // minimum size for output buffer
+ NULL);
+ if( errorText ) {
+ string x = toUtf8String(errorText);
+ for( string::iterator i = x.begin(); i != x.end(); i++ ) {
+ if( *i == '\n' || *i == '\r' )
+ break;
+ s << *i;
+ }
+ LocalFree(errorText);
+ }
+ else
+ s << strerror(x);
+ /*
+ DWORD n = FormatMessage(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, x,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPTSTR) &lpMsgBuf, 0, NULL);
+ */
+#else
+ s << strerror(x);
+#endif
+ return s.str();
+ }
+
+ /** output the error # and error message with prefix.
+ handy for use as parm in uassert/massert.
+ */
+ string errnoWithPrefix( const char * prefix );
+
+ void Logstream::logLockless( const StringData& s ) {
+ if ( s.size() == 0 )
+ return;
+
+ if ( doneSetup == 1717 ) {
+#ifndef _WIN32
+ if ( isSyslog ) {
+ syslog( LOG_INFO , "%s" , s.data() );
+ } else
+#endif
+ if (fwrite(s.data(), s.size(), 1, logfile)) {
+ fflush(logfile);
+ }
+ else {
+ int x = errno;
+ cout << "Failed to write to logfile: " << errnoWithDescription(x) << endl;
+ }
+ }
+ else {
+ cout << s.data();
+ cout.flush();
+ }
+ }
+
+ void Logstream::flush(Tee *t) {
+ // this ensures things are sane
+ if ( doneSetup == 1717 ) {
+ string msg = ss.str();
+ string threadName = getThreadName();
+ const char * type = logLevelToString(logLevel);
+
+ int spaceNeeded = (int)(msg.size() + 64 + threadName.size());
+ int bufSize = 128;
+ while ( bufSize < spaceNeeded )
+ bufSize += 128;
+
+ BufBuilder b(bufSize);
+ time_t_to_String( time(0) , b.grow(20) );
+ if (!threadName.empty()) {
+ b.appendChar( '[' );
+ b.appendStr( threadName , false );
+ b.appendChar( ']' );
+ b.appendChar( ' ' );
+ }
+
+ for ( int i=0; i<indent; i++ )
+ b.appendChar( '\t' );
+
+ if ( type[0] ) {
+ b.appendStr( type , false );
+ b.appendStr( ": " , false );
+ }
+
+ b.appendStr( msg );
+
+ string out( b.buf() , b.len() - 1);
+
+ scoped_lock lk(mutex);
+
+ if( t ) t->write(logLevel,out);
+ if ( globalTees ) {
+ for ( unsigned i=0; i<globalTees->size(); i++ )
+ (*globalTees)[i]->write(logLevel,out);
+ }
+#ifndef _WIN32
+ if ( isSyslog ) {
+ syslog( logLevelToSysLogLevel(logLevel) , "%s" , out.data() );
+ } else
+#endif
+ if(fwrite(out.data(), out.size(), 1, logfile)) {
+ fflush(logfile);
+ }
+ else {
+ int x = errno;
+ cout << "Failed to write to logfile: " << errnoWithDescription(x) << ": " << out << endl;
+ }
+#ifdef POSIX_FADV_DONTNEED
+ // This only applies to pages that have already been flushed
+ RARELY posix_fadvise(fileno(logfile), 0, 0, POSIX_FADV_DONTNEED);
+#endif
+ }
+ _init();
+ }
+
+ struct LogIndentLevel {
+ LogIndentLevel(){
+ Logstream::get().indentInc();
+ }
+ ~LogIndentLevel(){
+ Logstream::get().indentDec();
+ }
+ };
+
+ extern Tee* const warnings; // Things put here go in serverStatus
+
+} // namespace mongo
diff --git a/src/mongo/util/logfile.cpp b/src/mongo/util/logfile.cpp
new file mode 100644
index 00000000000..7c362be08d1
--- /dev/null
+++ b/src/mongo/util/logfile.cpp
@@ -0,0 +1,253 @@
+// @file logfile.cpp simple file log writing / journaling
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+#include "logfile.h"
+#include "text.h"
+#include "mongoutils/str.h"
+#include "unittest.h"
+
+using namespace mongoutils;
+
+namespace mongo {
+ struct LogfileTest : public UnitTest {
+ LogfileTest() { }
+ void run() {
+ if( 0 && debug ) {
+ try {
+ LogFile f("logfile_test");
+ void *p = malloc(16384);
+ char *buf = (char*) p;
+ buf += 4095;
+ buf = (char*) (((size_t)buf)&(~0xfff));
+ memset(buf, 'z', 8192);
+ buf[8190] = '\n';
+ buf[8191] = 'B';
+ buf[0] = 'A';
+ f.synchronousAppend(buf, 8192);
+ f.synchronousAppend(buf, 8192);
+ free(p);
+ }
+ catch(DBException& e ) {
+ log() << "logfile.cpp test failed : " << e.what() << endl;
+ throw;
+ }
+ }
+ }
+ } __test;
+}
+
+#if defined(_WIN32)
+
+namespace mongo {
+
+ LogFile::LogFile(string name, bool readwrite) : _name(name) {
+ _fd = CreateFile(
+ toNativeString(name.c_str()).c_str(),
+ (readwrite?GENERIC_READ:0)|GENERIC_WRITE,
+ FILE_SHARE_READ,
+ NULL,
+ OPEN_ALWAYS,
+ FILE_FLAG_NO_BUFFERING | FILE_FLAG_WRITE_THROUGH,
+ NULL);
+ if( _fd == INVALID_HANDLE_VALUE ) {
+ DWORD e = GetLastError();
+ uasserted(13518, str::stream() << "couldn't open file " << name << " for writing " << errnoWithDescription(e));
+ }
+ SetFilePointer(_fd, 0, 0, FILE_BEGIN);
+ }
+
+ LogFile::~LogFile() {
+ if( _fd != INVALID_HANDLE_VALUE )
+ CloseHandle(_fd);
+ }
+
+ void LogFile::truncate() {
+ verify(15870, _fd != INVALID_HANDLE_VALUE);
+
+ if (!SetEndOfFile(_fd)){
+ msgasserted(15871, "Couldn't truncate file: " + errnoWithDescription());
+ }
+ }
+
+ void LogFile::writeAt(unsigned long long offset, const void *_buf, size_t _len) {
+// TODO 64 bit offsets
+ OVERLAPPED o;
+ memset(&o,0,sizeof(o));
+ (unsigned long long&) o.Offset = offset;
+ BOOL ok= WriteFile(_fd, _buf, _len, 0, &o);
+ assert(ok);
+ }
+
+ void LogFile::readAt(unsigned long long offset, void *_buf, size_t _len) {
+// TODO 64 bit offsets
+ OVERLAPPED o;
+ memset(&o,0,sizeof(o));
+ (unsigned long long&) o.Offset = offset;
+ DWORD nr;
+ BOOL ok = ReadFile(_fd, _buf, _len, &nr, &o);
+ if( !ok ) {
+ string e = errnoWithDescription();
+ //DWORD e = GetLastError();
+ log() << "LogFile readAt(" << offset << ") len:" << _len << "errno:" << e << endl;
+ assert(false);
+ }
+ }
+
+ void LogFile::synchronousAppend(const void *_buf, size_t _len) {
+ const size_t BlockSize = 8 * 1024 * 1024;
+ assert(_fd);
+ assert(_len % 4096 == 0);
+ const char *buf = (const char *) _buf;
+ size_t left = _len;
+ while( left ) {
+ size_t toWrite = min(left, BlockSize);
+ DWORD written;
+ if( !WriteFile(_fd, buf, toWrite, &written, NULL) ) {
+ DWORD e = GetLastError();
+ if( e == 87 )
+ msgasserted(13519, "error 87 appending to file - invalid parameter");
+ else
+ uasserted(13517, str::stream() << "error appending to file " << _name << ' ' << _len << ' ' << toWrite << ' ' << errnoWithDescription(e));
+ }
+ else {
+ dassert( written == toWrite );
+ }
+ left -= written;
+ buf += written;
+ }
+ }
+
+}
+
+#else
+
+/// posix
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "paths.h"
+
+namespace mongo {
+
+ LogFile::LogFile(string name, bool readwrite) : _name(name) {
+ int options = O_CREAT
+ | (readwrite?O_RDWR:O_WRONLY)
+#if defined(O_DIRECT)
+ | O_DIRECT
+#endif
+#if defined(O_NOATIME)
+ | O_NOATIME
+#endif
+ ;
+
+ _fd = open(name.c_str(), options, S_IRUSR | S_IWUSR);
+
+#if defined(O_DIRECT)
+ _direct = true;
+ if( _fd < 0 ) {
+ _direct = false;
+ options &= ~O_DIRECT;
+ _fd = open(name.c_str(), options, S_IRUSR | S_IWUSR);
+ }
+#else
+ _direct = false;
+#endif
+
+ if( _fd < 0 ) {
+ uasserted(13516, str::stream() << "couldn't open file " << name << " for writing " << errnoWithDescription());
+ }
+
+ flushMyDirectory(name);
+ }
+
+ LogFile::~LogFile() {
+ if( _fd >= 0 )
+ close(_fd);
+ _fd = -1;
+ }
+
+ void LogFile::truncate() {
+ verify(15872, _fd >= 0);
+
+ BOOST_STATIC_ASSERT(sizeof(off_t) == 8); // we don't want overflow here
+ const off_t pos = lseek(_fd, 0, SEEK_CUR); // doesn't actually seek
+ if (ftruncate(_fd, pos) != 0){
+ msgasserted(15873, "Couldn't truncate file: " + errnoWithDescription());
+ }
+
+ fsync(_fd);
+ }
+
+ void LogFile::writeAt(unsigned long long offset, const void *buf, size_t len) {
+ assert(((size_t)buf)%4096==0); // aligned
+ ssize_t written = pwrite(_fd, buf, len, offset);
+ if( written != (ssize_t) len ) {
+ log() << "writeAt fails " << errnoWithDescription() << endl;
+ }
+#if defined(__linux__)
+ fdatasync(_fd);
+#else
+ fsync(_fd);
+#endif
+ }
+
+ void LogFile::readAt(unsigned long long offset, void *_buf, size_t _len) {
+ assert(((size_t)_buf)%4096==0); // aligned
+ ssize_t rd = pread(_fd, _buf, _len, offset);
+ assert( rd != -1 );
+ }
+
+ void LogFile::synchronousAppend(const void *b, size_t len) {
+#ifdef POSIX_FADV_DONTNEED
+ const off_t pos = lseek(_fd, 0, SEEK_CUR); // doesn't actually seek, just get current position
+#endif
+
+ const char *buf = (char *) b;
+ assert(_fd);
+ assert(((size_t)buf)%4096==0); // aligned
+ if( len % 4096 != 0 ) {
+ log() << len << ' ' << len % 4096 << endl;
+ assert(false);
+ }
+ ssize_t written = write(_fd, buf, len);
+ if( written != (ssize_t) len ) {
+ log() << "write fails written:" << written << " len:" << len << " buf:" << buf << ' ' << errnoWithDescription() << endl;
+ uasserted(13515, str::stream() << "error appending to file " << _fd << ' ' << errnoWithDescription());
+ }
+
+ if(
+#if defined(__linux__)
+ fdatasync(_fd) < 0
+#else
+ fsync(_fd)
+#endif
+ ) {
+ uasserted(13514, str::stream() << "error appending to file on fsync " << ' ' << errnoWithDescription());
+ }
+
+#ifdef POSIX_FADV_DONTNEED
+ if (!_direct)
+ posix_fadvise(_fd, pos, len, POSIX_FADV_DONTNEED);
+#endif
+ }
+
+}
+
+#endif
diff --git a/src/mongo/util/logfile.h b/src/mongo/util/logfile.h
new file mode 100644
index 00000000000..e41ecc2f6ec
--- /dev/null
+++ b/src/mongo/util/logfile.h
@@ -0,0 +1,58 @@
+// @file logfile.h simple file log writing / journaling
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+namespace mongo {
+
+ class LogFile {
+ public:
+ /** create the file and open. must not already exist.
+ throws UserAssertion on i/o error
+ */
+ LogFile(string name, bool readwrite = false);
+
+ /** closes */
+ ~LogFile();
+
+ /** append to file. does not return until sync'd. uses direct i/o when possible.
+ throws UserAssertion on an i/o error
+ note direct i/o may have alignment requirements
+ */
+ void synchronousAppend(const void *buf, size_t len);
+
+ /** write at specified offset. must be aligned. noreturn until physically written. thread safe */
+ void writeAt(unsigned long long offset, const void *_bug, size_t _len);
+
+ void readAt(unsigned long long offset, void *_buf, size_t _len);
+
+ const string _name;
+
+ void truncate(); // Removes extra data after current position
+
+ private:
+#if defined(_WIN32)
+ typedef HANDLE fd_type;
+#else
+ typedef int fd_type;
+#endif
+ fd_type _fd;
+ bool _direct; // are we using direct I/O
+ };
+
+}
diff --git a/src/mongo/util/lruishmap.h b/src/mongo/util/lruishmap.h
new file mode 100644
index 00000000000..ba91bf6f0f6
--- /dev/null
+++ b/src/mongo/util/lruishmap.h
@@ -0,0 +1,78 @@
+// lru-ish map.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../pch.h"
+#include "../util/goodies.h"
+
+namespace mongo {
+
+ /* Your K object must define:
+ int hash() - must always return > 0.
+ operator==
+ */
+
+ template <class K, class V, int MaxChain>
+ class LRUishMap {
+ public:
+ LRUishMap(int _n) {
+ n = nextPrime(_n);
+ keys = new K[n];
+ hashes = new int[n];
+ for ( int i = 0; i < n; i++ ) hashes[i] = 0;
+ }
+ ~LRUishMap() {
+ delete[] keys;
+ delete[] hashes;
+ }
+
+ int _find(const K& k, bool& found) {
+ int h = k.hash();
+ assert( h > 0 );
+ int j = h % n;
+ int first = j;
+ for ( int i = 0; i < MaxChain; i++ ) {
+ if ( hashes[j] == h ) {
+ if ( keys[j] == k ) {
+ found = true;
+ return j;
+ }
+ }
+ else if ( hashes[j] == 0 ) {
+ found = false;
+ return j;
+ }
+ }
+ found = false;
+ return first;
+ }
+
+ V* find(const K& k) {
+ bool found;
+ int j = _find(k, found);
+ return found ? &values[j] : 0;
+ }
+
+ private:
+ int n;
+ K *keys;
+ int *hashes;
+ V *values;
+ };
+
+} // namespace mongo
diff --git a/src/mongo/util/md5.c b/src/mongo/util/md5.c
new file mode 100644
index 00000000000..c35d96c5ef5
--- /dev/null
+++ b/src/mongo/util/md5.c
@@ -0,0 +1,381 @@
+/*
+ Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved.
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ L. Peter Deutsch
+ ghost@aladdin.com
+
+ */
+/* $Id: md5.c,v 1.6 2002/04/13 19:20:28 lpd Exp $ */
+/*
+ Independent implementation of MD5 (RFC 1321).
+
+ This code implements the MD5 Algorithm defined in RFC 1321, whose
+ text is available at
+ http://www.ietf.org/rfc/rfc1321.txt
+ The code is derived from the text of the RFC, including the test suite
+ (section A.5) but excluding the rest of Appendix A. It does not include
+ any code or documentation that is identified in the RFC as being
+ copyrighted.
+
+ The original and principal author of md5.c is L. Peter Deutsch
+ <ghost@aladdin.com>. Other authors are noted in the change history
+ that follows (in reverse chronological order):
+
+ 2002-04-13 lpd Clarified derivation from RFC 1321; now handles byte order
+ either statically or dynamically; added missing #include <string.h>
+ in library.
+ 2002-03-11 lpd Corrected argument list for main(), and added int return
+ type, in test program and T value program.
+ 2002-02-21 lpd Added missing #include <stdio.h> in test program.
+ 2000-07-03 lpd Patched to eliminate warnings about "constant is
+ unsigned in ANSI C, signed in traditional"; made test program
+ self-checking.
+ 1999-11-04 lpd Edited comments slightly for automatic TOC extraction.
+ 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5).
+ 1999-05-03 lpd Original version.
+ */
+
+#include "md5.h"
+#include <string.h>
+
+#undef BYTE_ORDER /* 1 = big-endian, -1 = little-endian, 0 = unknown */
+#ifdef ARCH_IS_BIG_ENDIAN
+# define BYTE_ORDER (ARCH_IS_BIG_ENDIAN ? 1 : -1)
+#else
+# define BYTE_ORDER 0
+#endif
+
+#define T_MASK ((md5_word_t)~0)
+#define T1 /* 0xd76aa478 */ (T_MASK ^ 0x28955b87)
+#define T2 /* 0xe8c7b756 */ (T_MASK ^ 0x173848a9)
+#define T3 0x242070db
+#define T4 /* 0xc1bdceee */ (T_MASK ^ 0x3e423111)
+#define T5 /* 0xf57c0faf */ (T_MASK ^ 0x0a83f050)
+#define T6 0x4787c62a
+#define T7 /* 0xa8304613 */ (T_MASK ^ 0x57cfb9ec)
+#define T8 /* 0xfd469501 */ (T_MASK ^ 0x02b96afe)
+#define T9 0x698098d8
+#define T10 /* 0x8b44f7af */ (T_MASK ^ 0x74bb0850)
+#define T11 /* 0xffff5bb1 */ (T_MASK ^ 0x0000a44e)
+#define T12 /* 0x895cd7be */ (T_MASK ^ 0x76a32841)
+#define T13 0x6b901122
+#define T14 /* 0xfd987193 */ (T_MASK ^ 0x02678e6c)
+#define T15 /* 0xa679438e */ (T_MASK ^ 0x5986bc71)
+#define T16 0x49b40821
+#define T17 /* 0xf61e2562 */ (T_MASK ^ 0x09e1da9d)
+#define T18 /* 0xc040b340 */ (T_MASK ^ 0x3fbf4cbf)
+#define T19 0x265e5a51
+#define T20 /* 0xe9b6c7aa */ (T_MASK ^ 0x16493855)
+#define T21 /* 0xd62f105d */ (T_MASK ^ 0x29d0efa2)
+#define T22 0x02441453
+#define T23 /* 0xd8a1e681 */ (T_MASK ^ 0x275e197e)
+#define T24 /* 0xe7d3fbc8 */ (T_MASK ^ 0x182c0437)
+#define T25 0x21e1cde6
+#define T26 /* 0xc33707d6 */ (T_MASK ^ 0x3cc8f829)
+#define T27 /* 0xf4d50d87 */ (T_MASK ^ 0x0b2af278)
+#define T28 0x455a14ed
+#define T29 /* 0xa9e3e905 */ (T_MASK ^ 0x561c16fa)
+#define T30 /* 0xfcefa3f8 */ (T_MASK ^ 0x03105c07)
+#define T31 0x676f02d9
+#define T32 /* 0x8d2a4c8a */ (T_MASK ^ 0x72d5b375)
+#define T33 /* 0xfffa3942 */ (T_MASK ^ 0x0005c6bd)
+#define T34 /* 0x8771f681 */ (T_MASK ^ 0x788e097e)
+#define T35 0x6d9d6122
+#define T36 /* 0xfde5380c */ (T_MASK ^ 0x021ac7f3)
+#define T37 /* 0xa4beea44 */ (T_MASK ^ 0x5b4115bb)
+#define T38 0x4bdecfa9
+#define T39 /* 0xf6bb4b60 */ (T_MASK ^ 0x0944b49f)
+#define T40 /* 0xbebfbc70 */ (T_MASK ^ 0x4140438f)
+#define T41 0x289b7ec6
+#define T42 /* 0xeaa127fa */ (T_MASK ^ 0x155ed805)
+#define T43 /* 0xd4ef3085 */ (T_MASK ^ 0x2b10cf7a)
+#define T44 0x04881d05
+#define T45 /* 0xd9d4d039 */ (T_MASK ^ 0x262b2fc6)
+#define T46 /* 0xe6db99e5 */ (T_MASK ^ 0x1924661a)
+#define T47 0x1fa27cf8
+#define T48 /* 0xc4ac5665 */ (T_MASK ^ 0x3b53a99a)
+#define T49 /* 0xf4292244 */ (T_MASK ^ 0x0bd6ddbb)
+#define T50 0x432aff97
+#define T51 /* 0xab9423a7 */ (T_MASK ^ 0x546bdc58)
+#define T52 /* 0xfc93a039 */ (T_MASK ^ 0x036c5fc6)
+#define T53 0x655b59c3
+#define T54 /* 0x8f0ccc92 */ (T_MASK ^ 0x70f3336d)
+#define T55 /* 0xffeff47d */ (T_MASK ^ 0x00100b82)
+#define T56 /* 0x85845dd1 */ (T_MASK ^ 0x7a7ba22e)
+#define T57 0x6fa87e4f
+#define T58 /* 0xfe2ce6e0 */ (T_MASK ^ 0x01d3191f)
+#define T59 /* 0xa3014314 */ (T_MASK ^ 0x5cfebceb)
+#define T60 0x4e0811a1
+#define T61 /* 0xf7537e82 */ (T_MASK ^ 0x08ac817d)
+#define T62 /* 0xbd3af235 */ (T_MASK ^ 0x42c50dca)
+#define T63 0x2ad7d2bb
+#define T64 /* 0xeb86d391 */ (T_MASK ^ 0x14792c6e)
+
+
+static void
+md5_process(md5_state_t *pms, const md5_byte_t *data /*[64]*/)
+{
+ md5_word_t
+ a = pms->abcd[0], b = pms->abcd[1],
+ c = pms->abcd[2], d = pms->abcd[3];
+ md5_word_t t;
+#if BYTE_ORDER > 0
+ /* Define storage only for big-endian CPUs. */
+ md5_word_t X[16];
+#else
+ /* Define storage for little-endian or both types of CPUs. */
+ md5_word_t xbuf[16];
+ const md5_word_t *X;
+#endif
+
+ {
+#if BYTE_ORDER == 0
+ /*
+ * Determine dynamically whether this is a big-endian or
+ * little-endian machine, since we can use a more efficient
+ * algorithm on the latter.
+ */
+ static const int w = 1;
+
+ if (*((const md5_byte_t *)&w)) /* dynamic little-endian */
+#endif
+#if BYTE_ORDER <= 0 /* little-endian */
+ {
+ /*
+ * On little-endian machines, we can process properly aligned
+ * data without copying it.
+ */
+ if (!((data - (const md5_byte_t *)0) & 3)) {
+ /* data are properly aligned */
+ X = (const md5_word_t *)data;
+ } else {
+ /* not aligned */
+ memcpy(xbuf, data, 64);
+ X = xbuf;
+ }
+ }
+#endif
+#if BYTE_ORDER == 0
+ else /* dynamic big-endian */
+#endif
+#if BYTE_ORDER >= 0 /* big-endian */
+ {
+ /*
+ * On big-endian machines, we must arrange the bytes in the
+ * right order.
+ */
+ const md5_byte_t *xp = data;
+ int i;
+
+# if BYTE_ORDER == 0
+ X = xbuf; /* (dynamic only) */
+# else
+# define xbuf X /* (static only) */
+# endif
+ for (i = 0; i < 16; ++i, xp += 4)
+ xbuf[i] = xp[0] + (xp[1] << 8) + (xp[2] << 16) + (xp[3] << 24);
+ }
+#endif
+ }
+
+#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n))))
+
+ /* Round 1. */
+ /* Let [abcd k s i] denote the operation
+ a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */
+#define F(x, y, z) (((x) & (y)) | (~(x) & (z)))
+#define SET(a, b, c, d, k, s, Ti)\
+ t = a + F(b,c,d) + X[k] + Ti;\
+ a = ROTATE_LEFT(t, s) + b
+ /* Do the following 16 operations. */
+ SET(a, b, c, d, 0, 7, T1);
+ SET(d, a, b, c, 1, 12, T2);
+ SET(c, d, a, b, 2, 17, T3);
+ SET(b, c, d, a, 3, 22, T4);
+ SET(a, b, c, d, 4, 7, T5);
+ SET(d, a, b, c, 5, 12, T6);
+ SET(c, d, a, b, 6, 17, T7);
+ SET(b, c, d, a, 7, 22, T8);
+ SET(a, b, c, d, 8, 7, T9);
+ SET(d, a, b, c, 9, 12, T10);
+ SET(c, d, a, b, 10, 17, T11);
+ SET(b, c, d, a, 11, 22, T12);
+ SET(a, b, c, d, 12, 7, T13);
+ SET(d, a, b, c, 13, 12, T14);
+ SET(c, d, a, b, 14, 17, T15);
+ SET(b, c, d, a, 15, 22, T16);
+#undef SET
+
+ /* Round 2. */
+ /* Let [abcd k s i] denote the operation
+ a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */
+#define G(x, y, z) (((x) & (z)) | ((y) & ~(z)))
+#define SET(a, b, c, d, k, s, Ti)\
+ t = a + G(b,c,d) + X[k] + Ti;\
+ a = ROTATE_LEFT(t, s) + b
+ /* Do the following 16 operations. */
+ SET(a, b, c, d, 1, 5, T17);
+ SET(d, a, b, c, 6, 9, T18);
+ SET(c, d, a, b, 11, 14, T19);
+ SET(b, c, d, a, 0, 20, T20);
+ SET(a, b, c, d, 5, 5, T21);
+ SET(d, a, b, c, 10, 9, T22);
+ SET(c, d, a, b, 15, 14, T23);
+ SET(b, c, d, a, 4, 20, T24);
+ SET(a, b, c, d, 9, 5, T25);
+ SET(d, a, b, c, 14, 9, T26);
+ SET(c, d, a, b, 3, 14, T27);
+ SET(b, c, d, a, 8, 20, T28);
+ SET(a, b, c, d, 13, 5, T29);
+ SET(d, a, b, c, 2, 9, T30);
+ SET(c, d, a, b, 7, 14, T31);
+ SET(b, c, d, a, 12, 20, T32);
+#undef SET
+
+ /* Round 3. */
+ /* Let [abcd k s t] denote the operation
+ a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */
+#define H(x, y, z) ((x) ^ (y) ^ (z))
+#define SET(a, b, c, d, k, s, Ti)\
+ t = a + H(b,c,d) + X[k] + Ti;\
+ a = ROTATE_LEFT(t, s) + b
+ /* Do the following 16 operations. */
+ SET(a, b, c, d, 5, 4, T33);
+ SET(d, a, b, c, 8, 11, T34);
+ SET(c, d, a, b, 11, 16, T35);
+ SET(b, c, d, a, 14, 23, T36);
+ SET(a, b, c, d, 1, 4, T37);
+ SET(d, a, b, c, 4, 11, T38);
+ SET(c, d, a, b, 7, 16, T39);
+ SET(b, c, d, a, 10, 23, T40);
+ SET(a, b, c, d, 13, 4, T41);
+ SET(d, a, b, c, 0, 11, T42);
+ SET(c, d, a, b, 3, 16, T43);
+ SET(b, c, d, a, 6, 23, T44);
+ SET(a, b, c, d, 9, 4, T45);
+ SET(d, a, b, c, 12, 11, T46);
+ SET(c, d, a, b, 15, 16, T47);
+ SET(b, c, d, a, 2, 23, T48);
+#undef SET
+
+ /* Round 4. */
+ /* Let [abcd k s t] denote the operation
+ a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */
+#define I(x, y, z) ((y) ^ ((x) | ~(z)))
+#define SET(a, b, c, d, k, s, Ti)\
+ t = a + I(b,c,d) + X[k] + Ti;\
+ a = ROTATE_LEFT(t, s) + b
+ /* Do the following 16 operations. */
+ SET(a, b, c, d, 0, 6, T49);
+ SET(d, a, b, c, 7, 10, T50);
+ SET(c, d, a, b, 14, 15, T51);
+ SET(b, c, d, a, 5, 21, T52);
+ SET(a, b, c, d, 12, 6, T53);
+ SET(d, a, b, c, 3, 10, T54);
+ SET(c, d, a, b, 10, 15, T55);
+ SET(b, c, d, a, 1, 21, T56);
+ SET(a, b, c, d, 8, 6, T57);
+ SET(d, a, b, c, 15, 10, T58);
+ SET(c, d, a, b, 6, 15, T59);
+ SET(b, c, d, a, 13, 21, T60);
+ SET(a, b, c, d, 4, 6, T61);
+ SET(d, a, b, c, 11, 10, T62);
+ SET(c, d, a, b, 2, 15, T63);
+ SET(b, c, d, a, 9, 21, T64);
+#undef SET
+
+ /* Then perform the following additions. (That is increment each
+ of the four registers by the value it had before this block
+ was started.) */
+ pms->abcd[0] += a;
+ pms->abcd[1] += b;
+ pms->abcd[2] += c;
+ pms->abcd[3] += d;
+}
+
+void
+md5_init(md5_state_t *pms)
+{
+ pms->count[0] = pms->count[1] = 0;
+ pms->abcd[0] = 0x67452301;
+ pms->abcd[1] = /*0xefcdab89*/ T_MASK ^ 0x10325476;
+ pms->abcd[2] = /*0x98badcfe*/ T_MASK ^ 0x67452301;
+ pms->abcd[3] = 0x10325476;
+}
+
+void
+md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes)
+{
+ const md5_byte_t *p = data;
+ int left = nbytes;
+ int offset = (pms->count[0] >> 3) & 63;
+ md5_word_t nbits = (md5_word_t)(nbytes << 3);
+
+ if (nbytes <= 0)
+ return;
+
+ /* Update the message length. */
+ pms->count[1] += nbytes >> 29;
+ pms->count[0] += nbits;
+ if (pms->count[0] < nbits)
+ pms->count[1]++;
+
+ /* Process an initial partial block. */
+ if (offset) {
+ int copy = (offset + nbytes > 64 ? 64 - offset : nbytes);
+
+ memcpy(pms->buf + offset, p, copy);
+ if (offset + copy < 64)
+ return;
+ p += copy;
+ left -= copy;
+ md5_process(pms, pms->buf);
+ }
+
+ /* Process full blocks. */
+ for (; left >= 64; p += 64, left -= 64)
+ md5_process(pms, p);
+
+ /* Process a final partial block. */
+ if (left)
+ memcpy(pms->buf, p, left);
+}
+
+void
+md5_finish(md5_state_t *pms, md5_byte_t digest[16])
+{
+ static const md5_byte_t pad[64] = {
+ 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ };
+ md5_byte_t data[8];
+ int i;
+
+ /* Save the length before padding. */
+ for (i = 0; i < 8; ++i)
+ data[i] = (md5_byte_t)(pms->count[i >> 2] >> ((i & 3) << 3));
+ /* Pad to 56 bytes mod 64. */
+ md5_append(pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1);
+ /* Append the length. */
+ md5_append(pms, data, 8);
+ for (i = 0; i < 16; ++i)
+ digest[i] = (md5_byte_t)(pms->abcd[i >> 2] >> ((i & 3) << 3));
+}
diff --git a/src/mongo/util/md5.h b/src/mongo/util/md5.h
new file mode 100644
index 00000000000..a3f3b6db0e2
--- /dev/null
+++ b/src/mongo/util/md5.h
@@ -0,0 +1,91 @@
+/*
+ Copyright (C) 1999, 2002 Aladdin Enterprises. All rights reserved.
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ L. Peter Deutsch
+ ghost@aladdin.com
+
+ */
+/* $Id: md5.h,v 1.4 2002/04/13 19:20:28 lpd Exp $ */
+/*
+ Independent implementation of MD5 (RFC 1321).
+
+ This code implements the MD5 Algorithm defined in RFC 1321, whose
+ text is available at
+ http://www.ietf.org/rfc/rfc1321.txt
+ The code is derived from the text of the RFC, including the test suite
+ (section A.5) but excluding the rest of Appendix A. It does not include
+ any code or documentation that is identified in the RFC as being
+ copyrighted.
+
+ The original and principal author of md5.h is L. Peter Deutsch
+ <ghost@aladdin.com>. Other authors are noted in the change history
+ that follows (in reverse chronological order):
+
+ 2002-04-13 lpd Removed support for non-ANSI compilers; removed
+ references to Ghostscript; clarified derivation from RFC 1321;
+ now handles byte order either statically or dynamically.
+ 1999-11-04 lpd Edited comments slightly for automatic TOC extraction.
+ 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5);
+ added conditionalization for C++ compilation from Martin
+ Purschke <purschke@bnl.gov>.
+ 1999-05-03 lpd Original version.
+ */
+
+#ifndef md5_INCLUDED
+# define md5_INCLUDED
+
+/*
+ * This package supports both compile-time and run-time determination of CPU
+ * byte order. If ARCH_IS_BIG_ENDIAN is defined as 0, the code will be
+ * compiled to run only on little-endian CPUs; if ARCH_IS_BIG_ENDIAN is
+ * defined as non-zero, the code will be compiled to run only on big-endian
+ * CPUs; if ARCH_IS_BIG_ENDIAN is not defined, the code will be compiled to
+ * run on either big- or little-endian CPUs, but will run slightly less
+ * efficiently on either one than if ARCH_IS_BIG_ENDIAN is defined.
+ */
+
+typedef unsigned char md5_byte_t; /* 8-bit byte */
+typedef unsigned int md5_word_t; /* 32-bit word */
+
+/* Define the state of the MD5 Algorithm. */
+typedef struct md5_state_s {
+ md5_word_t count[2]; /* message length in bits, lsw first */
+ md5_word_t abcd[4]; /* digest buffer */
+ md5_byte_t buf[64]; /* accumulate block */
+} md5_state_t;
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+ /* Initialize the algorithm. */
+ void md5_init(md5_state_t *pms);
+
+ /* Append a string to the message. */
+ void md5_append(md5_state_t *pms, const md5_byte_t *data, int nbytes);
+
+ /* Finish the message and return the digest. */
+ void md5_finish(md5_state_t *pms, md5_byte_t digest[16]);
+
+#ifdef __cplusplus
+} /* end extern "C" */
+#endif
+
+#endif /* md5_INCLUDED */
diff --git a/src/mongo/util/md5.hpp b/src/mongo/util/md5.hpp
new file mode 100644
index 00000000000..dc061719747
--- /dev/null
+++ b/src/mongo/util/md5.hpp
@@ -0,0 +1,58 @@
+// md5.hpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "md5.h"
+
+namespace mongo {
+
+ typedef unsigned char md5digest[16];
+
+ inline void md5(const void *buf, int nbytes, md5digest digest) {
+ md5_state_t st;
+ md5_init(&st);
+ md5_append(&st, (const md5_byte_t *) buf, nbytes);
+ md5_finish(&st, digest);
+ }
+
+ inline void md5(const char *str, md5digest digest) {
+ md5(str, strlen(str), digest);
+ }
+
+ inline std::string digestToString( md5digest digest ){
+ static const char * letters = "0123456789abcdef";
+ stringstream ss;
+ for ( int i=0; i<16; i++){
+ unsigned char c = digest[i];
+ ss << letters[ ( c >> 4 ) & 0xf ] << letters[ c & 0xf ];
+ }
+ return ss.str();
+ }
+
+ inline std::string md5simpledigest( const void* buf, int nbytes){
+ md5digest d;
+ md5( buf, nbytes , d );
+ return digestToString( d );
+ }
+
+ inline std::string md5simpledigest( string s ){
+ return md5simpledigest(s.data(), s.size());
+ }
+
+
+} // namespace mongo
diff --git a/src/mongo/util/md5main.cpp b/src/mongo/util/md5main.cpp
new file mode 100644
index 00000000000..9995fee8fa7
--- /dev/null
+++ b/src/mongo/util/md5main.cpp
@@ -0,0 +1,142 @@
+/*
+ Copyright (C) 2002 Aladdin Enterprises. All rights reserved.
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ L. Peter Deutsch
+ ghost@aladdin.com
+
+ */
+/* $Id: md5main.c,v 1.1 2002/04/13 19:20:28 lpd Exp $ */
+/*
+ Independent implementation of MD5 (RFC 1321).
+
+ This code implements the MD5 Algorithm defined in RFC 1321, whose
+ text is available at
+ http://www.ietf.org/rfc/rfc1321.txt
+ The code is derived from the text of the RFC, including the test suite
+ (section A.5) but excluding the rest of Appendix A. It does not include
+ any code or documentation that is identified in the RFC as being
+ copyrighted.
+
+ The original and principal author of md5.c is L. Peter Deutsch
+ <ghost@aladdin.com>. Other authors are noted in the change history
+ that follows (in reverse chronological order):
+
+ 2002-04-13 lpd Splits off main program into a separate file, md5main.c.
+ */
+
+#include "pch.h"
+#include "md5.h"
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+
+/*
+ * This file builds an executable that performs various functions related
+ * to the MD5 library. Typical compilation:
+ * gcc -o md5main -lm md5main.c md5.c
+ */
+static const char *const usage = "\
+Usage:\n\
+ md5main --test # run the self-test (A.5 of RFC 1321)\n\
+ md5main --t-values # print the T values for the library\n\
+ md5main --version # print the version of the package\n\
+";
+static const char *const version = "2002-04-13";
+
+/* modified: not static, renamed */
+/* Run the self-test. */
+/*static*/ int
+//do_test(void)
+do_md5_test(void) {
+ static const char *const test[7*2] = {
+ "", "d41d8cd98f00b204e9800998ecf8427e",
+ "a", "0cc175b9c0f1b6a831c399e269772661",
+ "abc", "900150983cd24fb0d6963f7d28e17f72",
+ "message digest", "f96b697d7cb7938d525a2f31aaf161d0",
+ "abcdefghijklmnopqrstuvwxyz", "c3fcd3d76192e4007dfb496cca67e13b",
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
+ "d174ab98d277d9f5a5611c2c9f419d9f",
+ "12345678901234567890123456789012345678901234567890123456789012345678901234567890", "57edf4a22be3c955ac49da2e2107b67a"
+ };
+ int i;
+ int status = 0;
+
+ for (i = 0; i < 7*2; i += 2) {
+ md5_state_t state;
+ md5_byte_t digest[16];
+ char hex_output[16*2 + 1];
+ int di;
+
+ md5_init(&state);
+ md5_append(&state, (const md5_byte_t *)test[i], strlen(test[i]));
+ md5_finish(&state, digest);
+ for (di = 0; di < 16; ++di)
+ sprintf(hex_output + di * 2, "%02x", digest[di]);
+ if (strcmp(hex_output, test[i + 1])) {
+ printf("MD5 (\"%s\") = ", test[i]);
+ puts(hex_output);
+ printf("**** ERROR, should be: %s\n", test[i + 1]);
+ status = 1;
+ }
+ }
+// if (status == 0)
+ /*modified commented out: puts("md5 self-test completed successfully."); */
+ return status;
+}
+
+/* Print the T values. */
+static int
+do_t_values(void) {
+ int i;
+ for (i = 1; i <= 64; ++i) {
+ unsigned long v = (unsigned long)(4294967296.0 * fabs(sin((double)i)));
+
+ /*
+ * The following nonsense is only to avoid compiler warnings about
+ * "integer constant is unsigned in ANSI C, signed with -traditional".
+ */
+ if (v >> 31) {
+ printf("#define T%d /* 0x%08lx */ (T_MASK ^ 0x%08lx)\n", i,
+ v, (unsigned long)(unsigned int)(~v));
+ }
+ else {
+ printf("#define T%d 0x%08lx\n", i, v);
+ }
+ }
+ return 0;
+}
+
+/* modified from original code changed function name main->md5main */
+/* Main program */
+int
+md5main(int argc, char *argv[]) {
+ if (argc == 2) {
+ if (!strcmp(argv[1], "--test"))
+ return do_md5_test();
+ if (!strcmp(argv[1], "--t-values"))
+ return do_t_values();
+ if (!strcmp(argv[1], "--version")) {
+ puts(version);
+ return 0;
+ }
+ }
+ puts(usage);
+ return 0;
+}
+
diff --git a/src/mongo/util/mmap.cpp b/src/mongo/util/mmap.cpp
new file mode 100755
index 00000000000..1eb0242e657
--- /dev/null
+++ b/src/mongo/util/mmap.cpp
@@ -0,0 +1,211 @@
+// mmap.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "mmap.h"
+#include "processinfo.h"
+#include "concurrency/rwlock.h"
+#include "../db/namespace.h"
+#include "../db/cmdline.h"
+
+namespace mongo {
+
+ set<MongoFile*> MongoFile::mmfiles;
+ map<string,MongoFile*> MongoFile::pathToFile;
+
+ /* Create. Must not exist.
+ @param zero fill file with zeros when true
+ */
+ void* MemoryMappedFile::create(string filename, unsigned long long len, bool zero) {
+ uassert( 13468, string("can't create file already exists ") + filename, !exists(filename) );
+ void *p = map(filename.c_str(), len);
+ if( p && zero ) {
+ size_t sz = (size_t) len;
+ assert( len == sz );
+ memset(p, 0, sz);
+ }
+ return p;
+ }
+
+ /*static*/ void MemoryMappedFile::updateLength( const char *filename, unsigned long long &length ) {
+ if ( !boost::filesystem::exists( filename ) )
+ return;
+ // make sure we map full length if preexisting file.
+ boost::uintmax_t l = boost::filesystem::file_size( filename );
+ length = l;
+ }
+
+ void* MemoryMappedFile::map(const char *filename) {
+ unsigned long long l;
+ try {
+ l = boost::filesystem::file_size( filename );
+ }
+ catch(boost::filesystem::filesystem_error& e) {
+ uasserted(15922, str::stream() << "couldn't get file length when opening mapping " << filename << ' ' << e.what() );
+ }
+ return map( filename , l );
+ }
+ void* MemoryMappedFile::mapWithOptions(const char *filename, int options) {
+ unsigned long long l;
+ try {
+ l = boost::filesystem::file_size( filename );
+ }
+ catch(boost::filesystem::filesystem_error& e) {
+ uasserted(15923, str::stream() << "couldn't get file length when opening mapping " << filename << ' ' << e.what() );
+ }
+ return map( filename , l, options );
+ }
+
+ /* --- MongoFile -------------------------------------------------
+ this is the administrative stuff
+ */
+
+ RWLockRecursiveNongreedy LockMongoFilesShared::mmmutex("mmmutex",10*60*1000 /* 10 minutes */);
+ unsigned LockMongoFilesShared::era = 99; // note this rolls over
+
+ /* subclass must call in destructor (or at close).
+ removes this from pathToFile and other maps
+ safe to call more than once, albeit might be wasted work
+ ideal to call close to the close, if the close is well before object destruction
+ */
+ void MongoFile::destroyed() {
+ LockMongoFilesShared::assertExclusivelyLocked();
+ mmfiles.erase(this);
+ pathToFile.erase( filename() );
+ }
+
+ /*static*/
+ void MongoFile::closeAllFiles( stringstream &message ) {
+ static int closingAllFiles = 0;
+ if ( closingAllFiles ) {
+ message << "warning closingAllFiles=" << closingAllFiles << endl;
+ return;
+ }
+ ++closingAllFiles;
+
+ LockMongoFilesExclusive lk;
+
+ ProgressMeter pm( mmfiles.size() , 2 , 1 );
+ set<MongoFile*> temp = mmfiles;
+ for ( set<MongoFile*>::iterator i = temp.begin(); i != temp.end(); i++ ) {
+ (*i)->close(); // close() now removes from mmfiles
+ pm.hit();
+ }
+ message << "closeAllFiles() finished";
+ --closingAllFiles;
+ }
+
+ /*static*/ long long MongoFile::totalMappedLength() {
+ unsigned long long total = 0;
+
+ LockMongoFilesShared lk;
+
+ for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ )
+ total += (*i)->length();
+
+ return total;
+ }
+
+ void nullFunc() { }
+
+ // callback notifications
+ void (*MongoFile::notifyPreFlush)() = nullFunc;
+ void (*MongoFile::notifyPostFlush)() = nullFunc;
+
+ /*static*/ int MongoFile::flushAll( bool sync ) {
+ notifyPreFlush();
+ int x = _flushAll(sync);
+ notifyPostFlush();
+ return x;
+ }
+
+ /*static*/ int MongoFile::_flushAll( bool sync ) {
+ if ( ! sync ) {
+ int num = 0;
+ LockMongoFilesShared lk;
+ for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
+ num++;
+ MongoFile * mmf = *i;
+ if ( ! mmf )
+ continue;
+
+ mmf->flush( sync );
+ }
+ return num;
+ }
+
+ // want to do it sync
+ set<MongoFile*> seen;
+ while ( true ) {
+ auto_ptr<Flushable> f;
+ {
+ LockMongoFilesShared lk;
+ for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
+ MongoFile * mmf = *i;
+ if ( ! mmf )
+ continue;
+ if ( seen.count( mmf ) )
+ continue;
+ f.reset( mmf->prepareFlush() );
+ seen.insert( mmf );
+ break;
+ }
+ }
+ if ( ! f.get() )
+ break;
+
+ f->flush();
+ }
+ return seen.size();
+ }
+
+ void MongoFile::created() {
+ LockMongoFilesExclusive lk;
+ mmfiles.insert(this);
+ }
+
+ void MongoFile::setFilename(string fn) {
+ LockMongoFilesExclusive lk;
+ assert( _filename.empty() );
+ _filename = fn;
+ MongoFile *&ptf = pathToFile[fn];
+ massert(13617, "MongoFile : multiple opens of same filename", ptf == 0);
+ ptf = this;
+ }
+
+#if defined(_DEBUG)
+ void MongoFile::markAllWritable() {
+ if( cmdLine.dur )
+ return;
+ LockMongoFilesShared lk;
+ for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
+ MongoFile * mmf = *i;
+ if (mmf) mmf->_lock();
+ }
+ }
+
+ void MongoFile::unmarkAllWritable() {
+ if( cmdLine.dur )
+ return;
+ LockMongoFilesShared lk;
+ for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ ) {
+ MongoFile * mmf = *i;
+ if (mmf) mmf->_unlock();
+ }
+ }
+#endif
+} // namespace mongo
diff --git a/src/mongo/util/mmap.h b/src/mongo/util/mmap.h
new file mode 100644
index 00000000000..2d4454bbc7f
--- /dev/null
+++ b/src/mongo/util/mmap.h
@@ -0,0 +1,305 @@
+// mmap.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+#include <boost/thread/xtime.hpp>
+#include "concurrency/rwlock.h"
+
+namespace mongo {
+
+ class MAdvise {
+ void *_p;
+ unsigned _len;
+ public:
+ enum Advice { Sequential=1 };
+ MAdvise(void *p, unsigned len, Advice a);
+ ~MAdvise(); // destructor resets the range to MADV_NORMAL
+ };
+
+ // lock order: lock dbMutex before this if you lock both
+ class LockMongoFilesShared {
+ friend class LockMongoFilesExclusive;
+ static RWLockRecursiveNongreedy mmmutex;
+ static unsigned era;
+ RWLockRecursive::Shared lk;
+ public:
+ LockMongoFilesShared() : lk(mmmutex) { }
+
+ /** era changes anytime memory maps come and go. thus you can use this as a cheap way to verify
+ that things are still in the condition you expected. of course you must be shared locked
+ otherwise someone could be in progress. if you have unlocked this is a reasonable way to
+ check your memory mapped pointer is still good.
+ */
+ static unsigned getEra() { return era; }
+
+ static void assertExclusivelyLocked() { mmmutex.assertExclusivelyLocked(); }
+ };
+
+ class LockMongoFilesExclusive {
+ RWLockRecursive::Exclusive lk;
+ public:
+ LockMongoFilesExclusive() : lk(LockMongoFilesShared::mmmutex) {
+ LockMongoFilesShared::era++;
+ }
+ };
+
+ /* the administrative-ish stuff here */
+ class MongoFile : boost::noncopyable {
+ public:
+ /** Flushable has to fail nicely if the underlying object gets killed */
+ class Flushable {
+ public:
+ virtual ~Flushable() {}
+ virtual void flush() = 0;
+ };
+
+ virtual ~MongoFile() {}
+
+ enum Options {
+ SEQUENTIAL = 1, // hint - e.g. FILE_FLAG_SEQUENTIAL_SCAN on windows
+ READONLY = 2 // not contractually guaranteed, but if specified the impl has option to fault writes
+ };
+
+ /** @param fun is called for each MongoFile.
+ calledl from within a mutex that MongoFile uses. so be careful not to deadlock.
+ */
+ template < class F >
+ static void forEach( F fun );
+
+ /** note: you need to be in mmmutex when using this. forEach (above) handles that for you automatically.
+*/
+ static set<MongoFile*>& getAllFiles() { return mmfiles; }
+
+ // callbacks if you need them
+ static void (*notifyPreFlush)();
+ static void (*notifyPostFlush)();
+
+ static int flushAll( bool sync ); // returns n flushed
+ static long long totalMappedLength();
+ static void closeAllFiles( stringstream &message );
+
+#if defined(_DEBUG)
+ static void markAllWritable();
+ static void unmarkAllWritable();
+#else
+ static void markAllWritable() { }
+ static void unmarkAllWritable() { }
+#endif
+
+ static bool exists(boost::filesystem::path p) { return boost::filesystem::exists(p); }
+
+ virtual bool isMongoMMF() { return false; }
+
+ string filename() const { return _filename; }
+ void setFilename(string fn);
+
+ private:
+ string _filename;
+ static int _flushAll( bool sync ); // returns n flushed
+ protected:
+ virtual void close() = 0;
+ virtual void flush(bool sync) = 0;
+ /**
+ * returns a thread safe object that you can call flush on
+ * Flushable has to fail nicely if the underlying object gets killed
+ */
+ virtual Flushable * prepareFlush() = 0;
+
+ void created(); /* subclass must call after create */
+
+ /* subclass must call in destructor (or at close).
+ removes this from pathToFile and other maps
+ safe to call more than once, albeit might be wasted work
+ ideal to call close to the close, if the close is well before object destruction
+ */
+ void destroyed();
+
+ virtual unsigned long long length() const = 0;
+
+ // only supporting on posix mmap
+ virtual void _lock() {}
+ virtual void _unlock() {}
+
+ static set<MongoFile*> mmfiles;
+ public:
+ static map<string,MongoFile*> pathToFile;
+ };
+
+ /** look up a MMF by filename. scoped mutex locking convention.
+ example:
+ MMFFinderByName finder;
+ MongoMMF *a = finder.find("file_name_a");
+ MongoMMF *b = finder.find("file_name_b");
+ */
+ class MongoFileFinder : boost::noncopyable {
+ public:
+ MongoFileFinder() { }
+
+ /** @return The MongoFile object associated with the specified file name. If no file is open
+ with the specified name, returns null.
+ */
+ MongoFile* findByPath(string path) {
+ map<string,MongoFile*>::iterator i = MongoFile::pathToFile.find(path);
+ return i == MongoFile::pathToFile.end() ? NULL : i->second;
+ }
+
+ private:
+ LockMongoFilesShared _lk;
+ };
+
+ struct MongoFileAllowWrites {
+ MongoFileAllowWrites() {
+ MongoFile::markAllWritable();
+ }
+ ~MongoFileAllowWrites() {
+ MongoFile::unmarkAllWritable();
+ }
+ };
+
+ class MemoryMappedFile : public MongoFile {
+ protected:
+ virtual void* viewForFlushing() {
+ if( views.size() == 0 )
+ return 0;
+ assert( views.size() == 1 );
+ return views[0];
+ }
+ public:
+ MemoryMappedFile();
+
+ virtual ~MemoryMappedFile() {
+ LockMongoFilesExclusive lk;
+ close();
+ }
+
+ virtual void close();
+
+ // Throws exception if file doesn't exist. (dm may2010: not sure if this is always true?)
+ void* map(const char *filename);
+
+ /** @param options see MongoFile::Options
+ */
+ void* mapWithOptions(const char *filename, int options);
+
+ /* Creates with length if DNE, otherwise uses existing file length,
+ passed length.
+ @param options MongoFile::Options bits
+ */
+ void* map(const char *filename, unsigned long long &length, int options = 0 );
+
+ /* Create. Must not exist.
+ @param zero fill file with zeros when true
+ */
+ void* create(string filename, unsigned long long len, bool zero);
+
+ void flush(bool sync);
+ virtual Flushable * prepareFlush();
+
+ long shortLength() const { return (long) len; }
+ unsigned long long length() const { return len; }
+
+ /** create a new view with the specified properties.
+ automatically cleaned up upon close/destruction of the MemoryMappedFile object.
+ */
+ void* createReadOnlyMap();
+ void* createPrivateMap();
+
+ /** make the private map range writable (necessary for our windows implementation) */
+ static void makeWritable(void *, unsigned len)
+#if defined(_WIN32)
+ ;
+#else
+ { }
+#endif
+
+ private:
+ static void updateLength( const char *filename, unsigned long long &length );
+
+ HANDLE fd;
+ HANDLE maphandle;
+ vector<void *> views;
+ unsigned long long len;
+
+#ifdef _WIN32
+ boost::shared_ptr<mutex> _flushMutex;
+ void clearWritableBits(void *privateView);
+ public:
+ static const unsigned ChunkSize = 64 * 1024 * 1024;
+ static const unsigned NChunks = 1024 * 1024;
+#else
+ void clearWritableBits(void *privateView) { }
+#endif
+
+ protected:
+ // only posix mmap implementations will support this
+ virtual void _lock();
+ virtual void _unlock();
+
+ /** close the current private view and open a new replacement */
+ void* remapPrivateView(void *oldPrivateAddr);
+ };
+
+ typedef MemoryMappedFile MMF;
+
+ /** p is called from within a mutex that MongoFile uses. so be careful not to deadlock. */
+ template < class F >
+ inline void MongoFile::forEach( F p ) {
+ LockMongoFilesShared lklk;
+ for ( set<MongoFile*>::iterator i = mmfiles.begin(); i != mmfiles.end(); i++ )
+ p(*i);
+ }
+
+#if defined(_WIN32)
+ class ourbitset {
+ volatile unsigned bits[MemoryMappedFile::NChunks]; // volatile as we are doing double check locking
+ public:
+ ourbitset() {
+ memset((void*) bits, 0, sizeof(bits));
+ }
+ bool get(unsigned i) const {
+ unsigned x = i / 32;
+ assert( x < MemoryMappedFile::NChunks );
+ return (bits[x] & (1 << (i%32))) != 0;
+ }
+ void set(unsigned i) {
+ unsigned x = i / 32;
+ wassert( x < (MemoryMappedFile::NChunks*2/3) ); // warn if getting close to limit
+ assert( x < MemoryMappedFile::NChunks );
+ bits[x] |= (1 << (i%32));
+ }
+ void clear(unsigned i) {
+ unsigned x = i / 32;
+ assert( x < MemoryMappedFile::NChunks );
+ bits[x] &= ~(1 << (i%32));
+ }
+ };
+ extern ourbitset writable;
+ void makeChunkWritable(size_t chunkno);
+ inline void MemoryMappedFile::makeWritable(void *_p, unsigned len) {
+ size_t p = (size_t) _p;
+ unsigned a = p/ChunkSize;
+ unsigned b = (p+len)/ChunkSize;
+ for( unsigned i = a; i <= b; i++ ) {
+ if( !writable.get(i) ) {
+ makeChunkWritable(i);
+ }
+ }
+ }
+
+#endif
+
+} // namespace mongo
diff --git a/src/mongo/util/mmap_mm.cpp b/src/mongo/util/mmap_mm.cpp
new file mode 100644
index 00000000000..ec2400e02d3
--- /dev/null
+++ b/src/mongo/util/mmap_mm.cpp
@@ -0,0 +1,52 @@
+// mmap_mm.cpp - in memory (no file) version
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "mmap.h"
+
+/* in memory (no file) version */
+
+namespace mongo {
+
+ MemoryMappedFile::MemoryMappedFile() {
+ fd = 0;
+ maphandle = 0;
+ view = 0;
+ len = 0;
+ }
+
+ void MemoryMappedFile::close() {
+ if ( view )
+ free( view );
+ view = 0;
+ len = 0;
+ }
+
+ void* MemoryMappedFile::map(const char *filename, long& length , int options ) {
+ assert( length );
+ view = malloc( length );
+ return view;
+ }
+
+ void MemoryMappedFile::flush(bool sync) {
+ }
+
+ void MemoryMappedFile::_lock() {}
+ void MemoryMappedFile::_unlock() {}
+
+}
+
diff --git a/src/mongo/util/mmap_posix.cpp b/src/mongo/util/mmap_posix.cpp
new file mode 100644
index 00000000000..8097ef1b370
--- /dev/null
+++ b/src/mongo/util/mmap_posix.cpp
@@ -0,0 +1,214 @@
+// mmap_posix.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "mmap.h"
+#include "file_allocator.h"
+#include "../db/concurrency.h"
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include "../util/processinfo.h"
+#include "mongoutils/str.h"
+using namespace mongoutils;
+
+namespace mongo {
+
+ MemoryMappedFile::MemoryMappedFile() {
+ fd = 0;
+ maphandle = 0;
+ len = 0;
+ created();
+ }
+
+ void MemoryMappedFile::close() {
+ LockMongoFilesShared::assertExclusivelyLocked();
+ for( vector<void*>::iterator i = views.begin(); i != views.end(); i++ ) {
+ munmap(*i,len);
+ }
+ views.clear();
+
+ if ( fd )
+ ::close(fd);
+ fd = 0;
+ destroyed(); // cleans up from the master list of mmaps
+ }
+
+#ifndef O_NOATIME
+#define O_NOATIME (0)
+#endif
+
+#ifndef MAP_NORESERVE
+#define MAP_NORESERVE (0)
+#endif
+
+#if defined(__sunos__)
+ MAdvise::MAdvise(void *,unsigned, Advice) { }
+ MAdvise::~MAdvise() { }
+#else
+ MAdvise::MAdvise(void *p, unsigned len, Advice a) : _p(p), _len(len) {
+ assert( a == Sequential ); // more later
+ madvise(_p,_len,MADV_SEQUENTIAL);
+ }
+ MAdvise::~MAdvise() {
+ madvise(_p,_len,MADV_NORMAL);
+ }
+#endif
+
+ void* MemoryMappedFile::map(const char *filename, unsigned long long &length, int options) {
+ // length may be updated by callee.
+ setFilename(filename);
+ FileAllocator::get()->allocateAsap( filename, length );
+ len = length;
+
+ massert( 10446 , str::stream() << "mmap: can't map area of size 0 file: " << filename, length > 0 );
+
+ fd = open(filename, O_RDWR | O_NOATIME);
+ if ( fd <= 0 ) {
+ log() << "couldn't open " << filename << ' ' << errnoWithDescription() << endl;
+ fd = 0; // our sentinel for not opened
+ return 0;
+ }
+
+ unsigned long long filelen = lseek(fd, 0, SEEK_END);
+ uassert(10447, str::stream() << "map file alloc failed, wanted: " << length << " filelen: " << filelen << ' ' << sizeof(size_t), filelen == length );
+ lseek( fd, 0, SEEK_SET );
+
+ void * view = mmap(NULL, length, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+ if ( view == MAP_FAILED ) {
+ error() << " mmap() failed for " << filename << " len:" << length << " " << errnoWithDescription() << endl;
+ if ( errno == ENOMEM ) {
+ if( sizeof(void*) == 4 )
+ error() << "mmap failed with out of memory. You are using a 32-bit build and probably need to upgrade to 64" << endl;
+ else
+ error() << "mmap failed with out of memory. (64 bit build)" << endl;
+ }
+ return 0;
+ }
+
+
+#if defined(__sunos__)
+#warning madvise not supported on solaris yet
+#else
+ if ( options & SEQUENTIAL ) {
+ if ( madvise( view , length , MADV_SEQUENTIAL ) ) {
+ warning() << "map: madvise failed for " << filename << ' ' << errnoWithDescription() << endl;
+ }
+ }
+#endif
+
+ views.push_back( view );
+
+ DEV if (! d.dbMutex.info().isLocked()) {
+ _unlock();
+ }
+
+ return view;
+ }
+
+ void* MemoryMappedFile::createReadOnlyMap() {
+ void * x = mmap( /*start*/0 , len , PROT_READ , MAP_SHARED , fd , 0 );
+ if( x == MAP_FAILED ) {
+ if ( errno == ENOMEM ) {
+ if( sizeof(void*) == 4 )
+ error() << "mmap ro failed with out of memory. You are using a 32-bit build and probably need to upgrade to 64" << endl;
+ else
+ error() << "mmap ro failed with out of memory. (64 bit build)" << endl;
+ }
+ return 0;
+ }
+ return x;
+ }
+
+ void* MemoryMappedFile::createPrivateMap() {
+ void * x = mmap( /*start*/0 , len , PROT_READ|PROT_WRITE , MAP_PRIVATE|MAP_NORESERVE , fd , 0 );
+ if( x == MAP_FAILED ) {
+ if ( errno == ENOMEM ) {
+ if( sizeof(void*) == 4 ) {
+ error() << "mmap private failed with out of memory. You are using a 32-bit build and probably need to upgrade to 64" << endl;
+ }
+ else {
+ error() << "mmap private failed with out of memory. (64 bit build)" << endl;
+ }
+ }
+ else {
+ error() << "mmap private failed " << errnoWithDescription() << endl;
+ }
+ return 0;
+ }
+
+ views.push_back(x);
+ return x;
+ }
+
+ void* MemoryMappedFile::remapPrivateView(void *oldPrivateAddr) {
+ // don't unmap, just mmap over the old region
+ void * x = mmap( oldPrivateAddr, len , PROT_READ|PROT_WRITE , MAP_PRIVATE|MAP_NORESERVE|MAP_FIXED , fd , 0 );
+ if( x == MAP_FAILED ) {
+ int err = errno;
+ error() << "13601 Couldn't remap private view: " << errnoWithDescription(err) << endl;
+ log() << "aborting" << endl;
+ printMemInfo();
+ abort();
+ }
+ assert( x == oldPrivateAddr );
+ return x;
+ }
+
+ void MemoryMappedFile::flush(bool sync) {
+ if ( views.empty() || fd == 0 )
+ return;
+ if ( msync(viewForFlushing(), len, sync ? MS_SYNC : MS_ASYNC) )
+ problem() << "msync " << errnoWithDescription() << endl;
+ }
+
+ class PosixFlushable : public MemoryMappedFile::Flushable {
+ public:
+ PosixFlushable( void * view , HANDLE fd , long len )
+ : _view( view ) , _fd( fd ) , _len(len) {
+ }
+
+ void flush() {
+ if ( _view && _fd )
+ if ( msync(_view, _len, MS_SYNC ) )
+ problem() << "msync " << errnoWithDescription() << endl;
+
+ }
+
+ void * _view;
+ HANDLE _fd;
+ long _len;
+ };
+
+ MemoryMappedFile::Flushable * MemoryMappedFile::prepareFlush() {
+ return new PosixFlushable( viewForFlushing() , fd , len );
+ }
+
+ void MemoryMappedFile::_lock() {
+ if (! views.empty() && isMongoMMF() )
+ assert(mprotect(views[0], len, PROT_READ | PROT_WRITE) == 0);
+ }
+
+ void MemoryMappedFile::_unlock() {
+ if (! views.empty() && isMongoMMF() )
+ assert(mprotect(views[0], len, PROT_READ) == 0);
+ }
+
+} // namespace mongo
+
diff --git a/src/mongo/util/mmap_win.cpp b/src/mongo/util/mmap_win.cpp
new file mode 100644
index 00000000000..26115d096c1
--- /dev/null
+++ b/src/mongo/util/mmap_win.cpp
@@ -0,0 +1,202 @@
+// mmap_win.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "mmap.h"
+#include "text.h"
+#include "../db/mongommf.h"
+#include "../db/concurrency.h"
+
+namespace mongo {
+
+ mutex mapViewMutex("mapView");
+ ourbitset writable;
+
+ MAdvise::MAdvise(void *,unsigned, Advice) { }
+ MAdvise::~MAdvise() { }
+
+ /** notification on unmapping so we can clear writable bits */
+ void MemoryMappedFile::clearWritableBits(void *p) {
+ for( unsigned i = ((size_t)p)/ChunkSize; i <= (((size_t)p)+len)/ChunkSize; i++ ) {
+ writable.clear(i);
+ assert( !writable.get(i) );
+ }
+ }
+
+ MemoryMappedFile::MemoryMappedFile()
+ : _flushMutex(new mutex("flushMutex")) {
+ fd = 0;
+ maphandle = 0;
+ len = 0;
+ created();
+ }
+
+ void MemoryMappedFile::close() {
+ LockMongoFilesShared::assertExclusivelyLocked();
+ for( vector<void*>::iterator i = views.begin(); i != views.end(); i++ ) {
+ clearWritableBits(*i);
+ UnmapViewOfFile(*i);
+ }
+ views.clear();
+ if ( maphandle )
+ CloseHandle(maphandle);
+ maphandle = 0;
+ if ( fd )
+ CloseHandle(fd);
+ fd = 0;
+ destroyed(); // cleans up from the master list of mmaps
+ }
+
+ unsigned long long mapped = 0;
+
+ void* MemoryMappedFile::createReadOnlyMap() {
+ assert( maphandle );
+ scoped_lock lk(mapViewMutex);
+ void *p = MapViewOfFile(maphandle, FILE_MAP_READ, /*f ofs hi*/0, /*f ofs lo*/ 0, /*dwNumberOfBytesToMap 0 means to eof*/0);
+ if ( p == 0 ) {
+ DWORD e = GetLastError();
+ log() << "FILE_MAP_READ MapViewOfFile failed " << filename() << " " << errnoWithDescription(e) << endl;
+ }
+ else {
+ views.push_back(p);
+ }
+ return p;
+ }
+
+ void* MemoryMappedFile::map(const char *filenameIn, unsigned long long &length, int options) {
+ assert( fd == 0 && len == 0 ); // can't open more than once
+ setFilename(filenameIn);
+ /* big hack here: Babble uses db names with colons. doesn't seem to work on windows. temporary perhaps. */
+ char filename[256];
+ strncpy(filename, filenameIn, 255);
+ filename[255] = 0;
+ {
+ size_t len = strlen( filename );
+ for ( size_t i=len-1; i>=0; i-- ) {
+ if ( filename[i] == '/' ||
+ filename[i] == '\\' )
+ break;
+
+ if ( filename[i] == ':' )
+ filename[i] = '_';
+ }
+ }
+
+ updateLength( filename, length );
+
+ {
+ DWORD createOptions = FILE_ATTRIBUTE_NORMAL;
+ if ( options & SEQUENTIAL )
+ createOptions |= FILE_FLAG_SEQUENTIAL_SCAN;
+ DWORD rw = GENERIC_READ | GENERIC_WRITE;
+ fd = CreateFile(
+ toNativeString(filename).c_str(),
+ rw, // desired access
+ FILE_SHARE_WRITE | FILE_SHARE_READ, // share mode
+ NULL, // security
+ OPEN_ALWAYS, // create disposition
+ createOptions , // flags
+ NULL); // hTempl
+ if ( fd == INVALID_HANDLE_VALUE ) {
+ DWORD e = GetLastError();
+ log() << "Create/OpenFile failed " << filename << " errno:" << e << endl;
+ return 0;
+ }
+ }
+
+ mapped += length;
+
+ {
+ DWORD flProtect = PAGE_READWRITE; //(options & READONLY)?PAGE_READONLY:PAGE_READWRITE;
+ maphandle = CreateFileMapping(fd, NULL, flProtect,
+ length >> 32 /*maxsizehigh*/,
+ (unsigned) length /*maxsizelow*/,
+ NULL/*lpName*/);
+ if ( maphandle == NULL ) {
+ DWORD e = GetLastError(); // log() call was killing lasterror before we get to that point in the stream
+ log() << "CreateFileMapping failed " << filename << ' ' << errnoWithDescription(e) << endl;
+ close();
+ return 0;
+ }
+ }
+
+ void *view = 0;
+ {
+ scoped_lock lk(mapViewMutex);
+ DWORD access = (options&READONLY)? FILE_MAP_READ : FILE_MAP_ALL_ACCESS;
+ view = MapViewOfFile(maphandle, access, /*f ofs hi*/0, /*f ofs lo*/ 0, /*dwNumberOfBytesToMap 0 means to eof*/0);
+ }
+ if ( view == 0 ) {
+ DWORD e = GetLastError();
+ log() << "MapViewOfFile failed " << filename << " " << errnoWithDescription(e) <<
+ ((sizeof(void*)==4)?" (32 bit build)":"") << endl;
+ close();
+ }
+ else {
+ views.push_back(view);
+ }
+ len = length;
+
+ return view;
+ }
+
+ class WindowsFlushable : public MemoryMappedFile::Flushable {
+ public:
+ WindowsFlushable( void * view , HANDLE fd , string filename , boost::shared_ptr<mutex> flushMutex )
+ : _view(view) , _fd(fd) , _filename(filename) , _flushMutex(flushMutex)
+ {}
+
+ void flush() {
+ if (!_view || !_fd)
+ return;
+
+ scoped_lock lk(*_flushMutex);
+
+ BOOL success = FlushViewOfFile(_view, 0); // 0 means whole mapping
+ if (!success) {
+ int err = GetLastError();
+ out() << "FlushViewOfFile failed " << err << " file: " << _filename << endl;
+ }
+
+ success = FlushFileBuffers(_fd);
+ if (!success) {
+ int err = GetLastError();
+ out() << "FlushFileBuffers failed " << err << " file: " << _filename << endl;
+ }
+ }
+
+ void * _view;
+ HANDLE _fd;
+ string _filename;
+ boost::shared_ptr<mutex> _flushMutex;
+ };
+
+ void MemoryMappedFile::flush(bool sync) {
+ uassert(13056, "Async flushing not supported on windows", sync);
+ if( !views.empty() ) {
+ WindowsFlushable f( viewForFlushing() , fd , filename() , _flushMutex);
+ f.flush();
+ }
+ }
+
+ MemoryMappedFile::Flushable * MemoryMappedFile::prepareFlush() {
+ return new WindowsFlushable( viewForFlushing() , fd , filename() , _flushMutex );
+ }
+ void MemoryMappedFile::_lock() {}
+ void MemoryMappedFile::_unlock() {}
+
+}
diff --git a/src/mongo/util/mongoutils/README b/src/mongo/util/mongoutils/README
new file mode 100755
index 00000000000..f61277c7409
--- /dev/null
+++ b/src/mongo/util/mongoutils/README
@@ -0,0 +1,15 @@
+ mongoutils namespace requirements:
+
+ (1) code is not database specific, rather, true utilities
+ (2) are cross platform
+ (3) may require boost headers, but not libs
+ (4) are clean and easy to use in any c++ project without pulling in lots of other stuff.
+ specifically, does not use anything in the mongo namespace!
+ (5) apache license
+ (6) must be documented! if you aren't going to bother (but don't do that), stick it in util.
+ (7) ideally header only (in the spirit of #3)
+
+ So basically, easy to use, general purpose stuff, with no arduous dependencies to drop into
+ any new project.
+
+ *** PLACE UNIT TESTS IN mongoutils/test.cpp ***
diff --git a/src/mongo/util/mongoutils/checksum.h b/src/mongo/util/mongoutils/checksum.h
new file mode 100644
index 00000000000..ea3d05131ce
--- /dev/null
+++ b/src/mongo/util/mongoutils/checksum.h
@@ -0,0 +1,32 @@
+/** @file checksum.h */
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongoutils {
+
+ /**
+ * this is a silly temporary implementation
+ */
+ inline int checksum( const char* x , int size ) {
+ int ck = 0;
+ for ( int i=0; i<size; i++ )
+ ck += ( (int)x[i] * ( i + 1 ) );
+ return ck;
+ }
+
+}
diff --git a/src/mongo/util/mongoutils/hash.h b/src/mongo/util/mongoutils/hash.h
new file mode 100644
index 00000000000..49f30b3242a
--- /dev/null
+++ b/src/mongo/util/mongoutils/hash.h
@@ -0,0 +1,41 @@
+/** @file hash.h */
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongoutils {
+
+ /** @return hash of a pointer to an unsigned. so you get a 32 bit hash out, regardless of whether
+ pointers are 32 or 64 bit on the particular platform.
+
+ is there a faster way to impl this that hashes just as well?
+ */
+ inline unsigned hashPointer(void *v) {
+ unsigned x = 0;
+ unsigned char *p = (unsigned char *) &v;
+ for( unsigned i = 0; i < sizeof(void*); i++ ) {
+ x = x * 131 + p[i];
+ }
+ return x;
+ }
+
+ inline unsigned hash(unsigned u) {
+ unsigned char *p = (unsigned char *) &u;
+ return (((((p[3] * 131) + p[2]) * 131) + p[1]) * 131) + p[0];
+ }
+
+}
diff --git a/src/mongo/util/mongoutils/html.h b/src/mongo/util/mongoutils/html.h
new file mode 100644
index 00000000000..f79e6ca514f
--- /dev/null
+++ b/src/mongo/util/mongoutils/html.h
@@ -0,0 +1,158 @@
+// @file html.h
+
+#pragma once
+
+/* Things in the mongoutils namespace
+ (1) are not database specific, rather, true utilities
+ (2) are cross platform
+ (3) may require boost headers, but not libs
+ (4) are clean and easy to use in any c++ project without pulling in lots of other stuff
+*/
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <sstream>
+
+namespace mongoutils {
+
+ namespace html {
+
+ using namespace std;
+
+ inline string _end() { return "</body></html>"; }
+ inline string _table() { return "</table>\n\n"; }
+ inline string _tr() { return "</tr>\n"; }
+
+ inline string tr() { return "<tr>"; }
+ inline string tr(string a, string b) {
+ stringstream ss;
+ ss << "<tr><td>" << a << "</td><td>" << b << "</td></tr>\n";
+ return ss.str();
+ }
+ template <class T>
+ inline string td(T x) {
+ stringstream ss;
+ ss << "<td>" << x << "</td>";
+ return ss.str();
+ }
+ inline string td(string x) {
+ return "<td>" + x + "</td>";
+ }
+ inline string th(string x) {
+ return "<th>" + x + "</th>";
+ }
+
+ inline void tablecell( stringstream& ss , bool b ) {
+ ss << "<td>" << (b ? "<b>X</b>" : "") << "</td>";
+ }
+
+ template< typename T>
+ inline void tablecell( stringstream& ss , const T& t ) {
+ ss << "<td>" << t << "</td>";
+ }
+
+ inline string table(const char *headers[] = 0, bool border = true) {
+ stringstream ss;
+ ss << "\n<table "
+ << (border?"border=1 ":"")
+ << "cellpadding=2 cellspacing=0>\n";
+ if( headers ) {
+ ss << "<tr>";
+ while( *headers ) {
+ ss << "<th>" << *headers << "</th>";
+ headers++;
+ }
+ ss << "</tr>\n";
+ }
+ return ss.str();
+ }
+
+ inline string start(string title) {
+ stringstream ss;
+ ss << "<html><head>\n<title>";
+ ss << title;
+ ss << "</title>\n";
+
+ ss << "<style type=\"text/css\" media=\"screen\">"
+ "body { font-family: helvetica, arial, san-serif }\n"
+ "table { border-collapse:collapse; border-color:#999; margin-top:.5em }\n"
+ "th { background-color:#bbb; color:#000 }\n"
+ "td,th { padding:.25em }\n"
+ "</style>\n";
+
+ ss << "</head>\n<body>\n";
+ return ss.str();
+ }
+
+ inline string red(string contentHtml, bool color=true) {
+ if( !color ) return contentHtml;
+ stringstream ss;
+ ss << "<span style=\"color:#A00;\">" << contentHtml << "</span>";
+ return ss.str();
+ }
+ inline string grey(string contentHtml, bool color=true) {
+ if( !color ) return contentHtml;
+ stringstream ss;
+ ss << "<span style=\"color:#888;\">" << contentHtml << "</span>";
+ return ss.str();
+ }
+ inline string blue(string contentHtml, bool color=true) {
+ if( !color ) return contentHtml;
+ stringstream ss;
+ ss << "<span style=\"color:#00A;\">" << contentHtml << "</span>";
+ return ss.str();
+ }
+ inline string yellow(string contentHtml, bool color=true) {
+ if( !color ) return contentHtml;
+ stringstream ss;
+ ss << "<span style=\"color:#A80;\">" << contentHtml << "</span>";
+ return ss.str();
+ }
+ inline string green(string contentHtml, bool color=true) {
+ if( !color ) return contentHtml;
+ stringstream ss;
+ ss << "<span style=\"color:#0A0;\">" << contentHtml << "</span>";
+ return ss.str();
+ }
+
+ inline string p(string contentHtml) {
+ stringstream ss;
+ ss << "<p>" << contentHtml << "</p>\n";
+ return ss.str();
+ }
+
+ inline string h2(string contentHtml) {
+ stringstream ss;
+ ss << "<h2>" << contentHtml << "</h2>\n";
+ return ss.str();
+ }
+
+ /* does NOT escape the strings. */
+ inline string a(string href, string title="", string contentHtml = "") {
+ stringstream ss;
+ ss << "<a";
+ if( !href.empty() ) ss << " href=\"" << href << '"';
+ if( !title.empty() ) ss << " title=\"" << title << '"';
+ ss << '>';
+ if( !contentHtml.empty() ) {
+ ss << contentHtml << "</a>";
+ }
+ return ss.str();
+ }
+
+ }
+
+}
diff --git a/src/mongo/util/mongoutils/mongoutils.vcxproj b/src/mongo/util/mongoutils/mongoutils.vcxproj
new file mode 100755
index 00000000000..f6ec0935ca9
--- /dev/null
+++ b/src/mongo/util/mongoutils/mongoutils.vcxproj
@@ -0,0 +1,75 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{7B84584E-92BC-4DB9-971B-A1A8F93E5053}</ProjectGuid>
+ <RootNamespace>mongoutils</RootNamespace>
+ <ProjectName>mongoutils test program</ProjectName>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>MultiByte</CharacterSet>
+ </PropertyGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup />
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <AdditionalIncludeDirectories>c:\boost;\boost</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="test.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="html.h" />
+ <ClInclude Include="str.h" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project> \ No newline at end of file
diff --git a/src/mongo/util/mongoutils/mongoutils.vcxproj.filters b/src/mongo/util/mongoutils/mongoutils.vcxproj.filters
new file mode 100755
index 00000000000..84ecbffede0
--- /dev/null
+++ b/src/mongo/util/mongoutils/mongoutils.vcxproj.filters
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <ClCompile Include="test.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="html.h" />
+ <ClInclude Include="str.h" />
+ </ItemGroup>
+</Project> \ No newline at end of file
diff --git a/src/mongo/util/mongoutils/str.h b/src/mongo/util/mongoutils/str.h
new file mode 100644
index 00000000000..97b121b0068
--- /dev/null
+++ b/src/mongo/util/mongoutils/str.h
@@ -0,0 +1,216 @@
+// @file str.h
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+/* Things in the mongoutils namespace
+ (1) are not database specific, rather, true utilities
+ (2) are cross platform
+ (3) may require boost headers, but not libs
+ (4) are clean and easy to use in any c++ project without pulling in lots of other stuff
+
+ Note: within this module, we use int for all offsets -- there are no unsigned offsets
+ and no size_t's. If you need 3 gigabyte long strings, don't use this module.
+*/
+
+#include <string>
+#include <sstream>
+
+// this violates the README rules for mongoutils:
+#include "../../bson/util/builder.h"
+
+namespace mongoutils {
+
+ namespace str {
+
+ typedef std::string string;
+
+ /** the idea here is to make one liners easy. e.g.:
+
+ return str::stream() << 1 << ' ' << 2;
+
+ since the following doesn't work:
+
+ (stringstream() << 1).str();
+ */
+ class stream {
+ public:
+ mongo::StringBuilder ss;
+ template<class T>
+ stream& operator<<(const T& v) {
+ ss << v;
+ return *this;
+ }
+ operator std::string () const { return ss.str(); }
+ };
+
+ inline bool startsWith(const char *str, const char *prefix) {
+ const char *s = str;
+ const char *p = prefix;
+ while( *p ) {
+ if( *p != *s ) return false;
+ p++; s++;
+ }
+ return true;
+ }
+ inline bool startsWith(string s, string p) { return startsWith(s.c_str(), p.c_str()); }
+
+ // while these are trivial today use in case we do different wide char things later
+ inline bool startsWith(const char *p, char ch) { return *p == ch; }
+ inline bool startsWith(string s, char ch) { return startsWith(s.c_str(), ch); }
+
+ inline bool endsWith(string s, string p) {
+ int l = p.size();
+ int x = s.size();
+ if( x < l ) return false;
+ return strncmp(s.c_str()+x-l, p.c_str(), l) == 0;
+ }
+ inline bool endsWith(const char *s, char p) {
+ size_t len = strlen(s);
+ return len && s[len-1] == p;
+ }
+
+ inline bool equals( const char * a , const char * b ) { return strcmp( a , b ) == 0; }
+
+ /** find char x, and return rest of string thereafter, or "" if not found */
+ inline const char * after(const char *s, char x) {
+ const char *p = strchr(s, x);
+ return (p != 0) ? p+1 : "";
+ }
+ inline string after(const string& s, char x) {
+ const char *p = strchr(s.c_str(), x);
+ return (p != 0) ? string(p+1) : "";
+ }
+
+ /** find string x, and return rest of string thereafter, or "" if not found */
+ inline const char * after(const char *s, const char *x) {
+ const char *p = strstr(s, x);
+ return (p != 0) ? p+strlen(x) : "";
+ }
+ inline string after(string s, string x) {
+ const char *p = strstr(s.c_str(), x.c_str());
+ return (p != 0) ? string(p+x.size()) : "";
+ }
+
+ /** @return true if s contains x */
+ inline bool contains(string s, string x) {
+ return strstr(s.c_str(), x.c_str()) != 0;
+ }
+ inline bool contains(string s, char x) {
+ return strchr(s.c_str(), x) != 0;
+ }
+
+ /** @return everything before the character x, else entire string */
+ inline string before(const string& s, char x) {
+ const char *p = strchr(s.c_str(), x);
+ return (p != 0) ? s.substr(0, p-s.c_str()) : s;
+ }
+
+ /** @return everything before the string x, else entire string */
+ inline string before(const string& s, const string& x) {
+ const char *p = strstr(s.c_str(), x.c_str());
+ return (p != 0) ? s.substr(0, p-s.c_str()) : s;
+ }
+
+ /** check if if strings share a common starting prefix
+ @return offset of divergence (or length if equal). 0=nothing in common. */
+ inline int shareCommonPrefix(const char *p, const char *q) {
+ int ofs = 0;
+ while( 1 ) {
+ if( *p == 0 || *q == 0 )
+ break;
+ if( *p != *q )
+ break;
+ p++; q++; ofs++;
+ }
+ return ofs;
+ }
+ inline int shareCommonPrefix(const string &a, const string &b)
+ { return shareCommonPrefix(a.c_str(), b.c_str()); }
+
+ /** string to unsigned. zero if not a number. can end with non-num chars */
+ inline unsigned toUnsigned(const string& a) {
+ unsigned x = 0;
+ const char *p = a.c_str();
+ while( 1 ) {
+ if( !isdigit(*p) )
+ break;
+ x = x * 10 + (*p - '0');
+ p++;
+ }
+ return x;
+ }
+
+ /** split a string on a specific char. We don't split N times, just once
+ on the first occurrence. If char not present entire string is in L
+ and R is empty.
+ @return true if char found
+ */
+ inline bool splitOn(const string &s, char c, string& L, string& R) {
+ const char *start = s.c_str();
+ const char *p = strchr(start, c);
+ if( p == 0 ) {
+ L = s; R.clear();
+ return false;
+ }
+ L = string(start, p-start);
+ R = string(p+1);
+ return true;
+ }
+ /** split scanning reverse direction. Splits ONCE ONLY. */
+ inline bool rSplitOn(const string &s, char c, string& L, string& R) {
+ const char *start = s.c_str();
+ const char *p = strrchr(start, c);
+ if( p == 0 ) {
+ L = s; R.clear();
+ return false;
+ }
+ L = string(start, p-start);
+ R = string(p+1);
+ return true;
+ }
+
+ /** @return number of occurrences of c in s */
+ inline unsigned count( const string& s , char c ) {
+ unsigned n=0;
+ for ( unsigned i=0; i<s.size(); i++ )
+ if ( s[i] == c )
+ n++;
+ return n;
+ }
+
+ /** trim leading spaces. spaces only, not tabs etc. */
+ inline string ltrim(const string& s) {
+ const char *p = s.c_str();
+ while( *p == ' ' ) p++;
+ return p;
+ }
+
+ /** remove trailing chars in place */
+ inline void stripTrailing(string& s, const char *chars) {
+ string::iterator i = s.end();
+ while( s.begin() != i ) {
+ i--;
+ if( contains(chars, *i) ) {
+ s.erase(i);
+ }
+ }
+ }
+
+ }
+
+}
diff --git a/src/mongo/util/mongoutils/test.cpp b/src/mongo/util/mongoutils/test.cpp
new file mode 100644
index 00000000000..45268c5ca49
--- /dev/null
+++ b/src/mongo/util/mongoutils/test.cpp
@@ -0,0 +1,45 @@
+/* @file test.cpp
+ utils/mongoutils/test.cpp
+ unit tests for mongoutils
+*/
+
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include "str.h"
+#include "html.h"
+
+using namespace std;
+using namespace mongoutils;
+
+int main() {
+ {
+ string s = "abcde";
+ str::stripTrailing(s, "ef");
+ assert( s == "abcd" );
+ str::stripTrailing(s, "abcd");
+ assert( s.empty() );
+ s = "abcddd";
+ str::stripTrailing(s, "d");
+ assert( s == "abc" );
+ }
+
+ string x = str::after("abcde", 'c');
+ assert( x == "de" );
+ assert( str::after("abcde", 'x') == "" );
+ return 0;
+}
diff --git a/src/mongo/util/moveablebuffer.h b/src/mongo/util/moveablebuffer.h
new file mode 100644
index 00000000000..e01f2d8d9a4
--- /dev/null
+++ b/src/mongo/util/moveablebuffer.h
@@ -0,0 +1,51 @@
+/* moveablebuffer.h
+*/
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongo {
+
+ /** this is a sort of smart pointer class where we can move where something is and all the pointers will adjust.
+ not threadsafe.
+ */
+ struct MoveableBuffer {
+ MoveableBuffer();
+ MoveableBuffer(void *);
+ MoveableBuffer& operator=(const MoveableBuffer&);
+ ~MoveableBuffer();
+
+ void *p;
+ };
+
+ /* implementation (inlines) below */
+
+ // this is a temp stub implementation...not really done yet - just having everything compile & such for checkpointing into git
+
+ inline MoveableBuffer::MoveableBuffer() : p(0) { }
+
+ inline MoveableBuffer::MoveableBuffer(void *_p) : p(_p) { }
+
+ inline MoveableBuffer& MoveableBuffer::operator=(const MoveableBuffer& r) {
+ p = r.p;
+ return *this;
+ }
+
+ inline MoveableBuffer::~MoveableBuffer() {
+ }
+
+}
diff --git a/src/mongo/util/net/hostandport.h b/src/mongo/util/net/hostandport.h
new file mode 100644
index 00000000000..3f4a64b79a9
--- /dev/null
+++ b/src/mongo/util/net/hostandport.h
@@ -0,0 +1,239 @@
+// hostandport.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "sock.h"
+#include "../../db/cmdline.h"
+#include "../mongoutils/str.h"
+
+namespace mongo {
+
+ using namespace mongoutils;
+
+ void dynHostResolve(string& name, int& port);
+ string dynHostMyName();
+
+ /** helper for manipulating host:port connection endpoints.
+ */
+ struct HostAndPort {
+ HostAndPort() : _port(-1) { }
+
+ /** From a string hostname[:portnumber] or a #dynname
+ Throws user assertion if bad config string or bad port #.
+ */
+ HostAndPort(string s);
+
+ /** @param p port number. -1 is ok to use default. */
+ HostAndPort(string h, int p /*= -1*/) : _host(h), _port(p) {
+ assert( !str::startsWith(h, '#') );
+ }
+
+ HostAndPort(const SockAddr& sock ) : _host( sock.getAddr() ) , _port( sock.getPort() ) { }
+
+ static HostAndPort me() { return HostAndPort("localhost", cmdLine.port); }
+
+ /* uses real hostname instead of localhost */
+ static HostAndPort Me();
+
+ bool operator<(const HostAndPort& r) const {
+ string h = host();
+ string rh = r.host();
+ if( h < rh )
+ return true;
+ if( h == rh )
+ return port() < r.port();
+ return false;
+ }
+
+ bool operator==(const HostAndPort& r) const {
+ return host() == r.host() && port() == r.port();
+ }
+
+ bool operator!=(const HostAndPort& r) const { return !(*this == r); }
+
+ /* returns true if the host/port combo identifies this process instance. */
+ bool isSelf() const; // defined in isself.cpp
+
+ bool isLocalHost() const;
+
+ /**
+ * @param includePort host:port if true, host otherwise
+ */
+ string toString( bool includePort=true ) const;
+
+ // get the logical name if using a #dynhostname instead of resolving to current actual name
+ string dynString() const;
+ string toStringLong() const;
+
+ operator string() const { return toString(); }
+
+ bool empty() const {
+ return _dynName.empty() && _host.empty() && _port < 0;
+ }
+ string host() const {
+ if( !dyn() )
+ return _host;
+ string h = _dynName;
+ int p;
+ dynHostResolve(h, p);
+ return h;
+ }
+ int port() const {
+ int p = -2;
+ if( dyn() ) {
+ string h = _dynName;
+ dynHostResolve(h,p);
+ }
+ else {
+ p = _port;
+ }
+ return p >= 0 ? p : CmdLine::DefaultDBPort;
+ }
+ bool hasPort() const {
+ int p = -2;
+ if( dyn() ) {
+ string h = _dynName;
+ dynHostResolve(h,p);
+ }
+ else {
+ p = _port;
+ }
+ return p >= 0;
+ }
+ void setPort( int port ) {
+ if( dyn() ) {
+ log() << "INFO skipping setPort() HostAndPort dyn()=true" << endl;
+ return;
+ }
+ _port = port;
+ }
+
+ private:
+ bool dyn() const { return !_dynName.empty(); }
+ void init(const char *);
+ // invariant (except full obj assignment):
+ string _dynName; // when this is set, _host and _port aren't used, rather, we look up the dyn info every time.
+ string _host;
+ int _port; // -1 indicates unspecified
+ };
+
+ inline HostAndPort HostAndPort::Me() {
+ {
+ string s = dynHostMyName();
+ if( !s.empty() )
+ return HostAndPort(s);
+ }
+
+ const char* ips = cmdLine.bind_ip.c_str();
+ while(*ips) {
+ string ip;
+ const char * comma = strchr(ips, ',');
+ if (comma) {
+ ip = string(ips, comma - ips);
+ ips = comma + 1;
+ }
+ else {
+ ip = string(ips);
+ ips = "";
+ }
+ HostAndPort h = HostAndPort(ip, cmdLine.port);
+ if (!h.isLocalHost()) {
+ return h;
+ }
+ }
+
+ string h = getHostName();
+ assert( !h.empty() );
+ assert( h != "localhost" );
+ return HostAndPort(h, cmdLine.port);
+ }
+
+ inline string HostAndPort::dynString() const {
+ return dyn() ? _dynName : toString();
+ }
+
+ inline string HostAndPort::toStringLong() const {
+ return _dynName + ':' + toString();
+ }
+
+ inline string HostAndPort::toString( bool includePort ) const {
+ string h = host();
+ int p = port();
+
+ if ( ! includePort )
+ return h;
+
+ stringstream ss;
+ ss << h;
+ if ( p != -1 ) {
+ ss << ':';
+#if defined(_DEBUG)
+ if( p >= 44000 && p < 44100 ) {
+ log() << "warning: special debug port 44xxx used" << endl;
+ ss << p+1;
+ }
+ else
+ ss << p;
+#else
+ ss << p;
+#endif
+ }
+ return ss.str();
+ }
+
+ inline bool HostAndPort::isLocalHost() const {
+ string _host = host();
+ return ( _host == "localhost"
+ || startsWith(_host.c_str(), "127.")
+ || _host == "::1"
+ || _host == "anonymous unix socket"
+ || _host.c_str()[0] == '/' // unix socket
+ );
+ }
+
+ inline void HostAndPort::init(const char *p) {
+ uassert(13110, "HostAndPort: bad host:port config string", *p);
+ assert( *p != '#' );
+ assert( _dynName.empty() );
+ const char *colon = strrchr(p, ':');
+ if( colon ) {
+ int port = atoi(colon+1);
+ uassert(13095, "HostAndPort: bad port #", port > 0);
+ _host = string(p,colon-p);
+ _port = port;
+ }
+ else {
+ // no port specified.
+ _host = p;
+ _port = -1;
+ }
+ }
+
+ inline HostAndPort::HostAndPort(string s) {
+ const char *p = s.c_str();
+ if( *p == '#' ) {
+ _dynName = s;
+ _port = -2;
+ _host = "invalid_hostname_dyn_in_use";
+ }
+ else {
+ init(p);
+ }
+ }
+
+}
diff --git a/src/mongo/util/net/httpclient.cpp b/src/mongo/util/net/httpclient.cpp
new file mode 100644
index 00000000000..16eaa0ae80a
--- /dev/null
+++ b/src/mongo/util/net/httpclient.cpp
@@ -0,0 +1,177 @@
+// httpclient.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "httpclient.h"
+#include "sock.h"
+#include "message.h"
+#include "message_port.h"
+#include "../mongoutils/str.h"
+#include "../../bson/util/builder.h"
+
+namespace mongo {
+
+ //#define HD(x) cout << x << endl;
+#define HD(x)
+
+
+ int HttpClient::get( string url , Result * result ) {
+ return _go( "GET" , url , 0 , result );
+ }
+
+ int HttpClient::post( string url , string data , Result * result ) {
+ return _go( "POST" , url , data.c_str() , result );
+ }
+
+ int HttpClient::_go( const char * command , string url , const char * body , Result * result ) {
+ bool ssl = false;
+ if ( url.find( "https://" ) == 0 ) {
+ ssl = true;
+ url = url.substr( 8 );
+ }
+ else {
+ uassert( 10271 , "invalid url" , url.find( "http://" ) == 0 );
+ url = url.substr( 7 );
+ }
+
+ string host , path;
+ if ( url.find( "/" ) == string::npos ) {
+ host = url;
+ path = "/";
+ }
+ else {
+ host = url.substr( 0 , url.find( "/" ) );
+ path = url.substr( url.find( "/" ) );
+ }
+
+
+ HD( "host [" << host << "]" );
+ HD( "path [" << path << "]" );
+
+ string server = host;
+ int port = ssl ? 443 : 80;
+
+ string::size_type idx = host.find( ":" );
+ if ( idx != string::npos ) {
+ server = host.substr( 0 , idx );
+ string t = host.substr( idx + 1 );
+ port = atoi( t.c_str() );
+ }
+
+ HD( "server [" << server << "]" );
+ HD( "port [" << port << "]" );
+
+ string req;
+ {
+ stringstream ss;
+ ss << command << " " << path << " HTTP/1.1\r\n";
+ ss << "Host: " << host << "\r\n";
+ ss << "Connection: Close\r\n";
+ ss << "User-Agent: mongodb http client\r\n";
+ if ( body ) {
+ ss << "Content-Length: " << strlen( body ) << "\r\n";
+ }
+ ss << "\r\n";
+ if ( body ) {
+ ss << body;
+ }
+
+ req = ss.str();
+ }
+
+ SockAddr addr( server.c_str() , port );
+ HD( "addr: " << addr.toString() );
+
+ Socket sock;
+ if ( ! sock.connect( addr ) )
+ return -1;
+
+ if ( ssl ) {
+#ifdef MONGO_SSL
+ _checkSSLManager();
+ sock.secure( _sslManager.get() );
+#else
+ uasserted( 15862 , "no ssl support" );
+#endif
+ }
+
+ {
+ const char * out = req.c_str();
+ int toSend = req.size();
+ sock.send( out , toSend, "_go" );
+ }
+
+ char buf[4096];
+ int got = sock.unsafe_recv( buf , 4096 );
+ buf[got] = 0;
+
+ int rc;
+ char version[32];
+ assert( sscanf( buf , "%s %d" , version , &rc ) == 2 );
+ HD( "rc: " << rc );
+
+ StringBuilder sb;
+ if ( result )
+ sb << buf;
+
+ while ( ( got = sock.unsafe_recv( buf , 4096 ) ) > 0) {
+ if ( result )
+ sb << buf;
+ }
+
+ if ( result ) {
+ result->_init( rc , sb.str() );
+ }
+
+ return rc;
+ }
+
+ void HttpClient::Result::_init( int code , string entire ) {
+ _code = code;
+ _entireResponse = entire;
+
+ while ( true ) {
+ size_t i = entire.find( '\n' );
+ if ( i == string::npos ) {
+ // invalid
+ break;
+ }
+
+ string h = entire.substr( 0 , i );
+ entire = entire.substr( i + 1 );
+
+ if ( h.size() && h[h.size()-1] == '\r' )
+ h = h.substr( 0 , h.size() - 1 );
+
+ if ( h.size() == 0 )
+ break;
+
+ i = h.find( ':' );
+ if ( i != string::npos )
+ _headers[h.substr(0,i)] = str::ltrim(h.substr(i+1));
+ }
+
+ _body = entire;
+ }
+
+#ifdef MONGO_SSL
+ void HttpClient::_checkSSLManager() {
+ _sslManager.reset( new SSLManager( true ) );
+ }
+#endif
+
+}
diff --git a/src/mongo/util/net/httpclient.h b/src/mongo/util/net/httpclient.h
new file mode 100644
index 00000000000..126969f4f70
--- /dev/null
+++ b/src/mongo/util/net/httpclient.h
@@ -0,0 +1,78 @@
+// httpclient.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../../pch.h"
+#include "sock.h"
+
+namespace mongo {
+
+ class HttpClient : boost::noncopyable {
+ public:
+
+ typedef map<string,string> Headers;
+
+ class Result {
+ public:
+ Result() {}
+
+ const string& getEntireResponse() const {
+ return _entireResponse;
+ }
+
+ Headers getHeaders() const {
+ return _headers;
+ }
+
+ const string& getBody() const {
+ return _body;
+ }
+
+ private:
+
+ void _init( int code , string entire );
+
+ int _code;
+ string _entireResponse;
+
+ Headers _headers;
+ string _body;
+
+ friend class HttpClient;
+ };
+
+ /**
+ * @return response code
+ */
+ int get( string url , Result * result = 0 );
+
+ /**
+ * @return response code
+ */
+ int post( string url , string body , Result * result = 0 );
+
+ private:
+ int _go( const char * command , string url , const char * body , Result * result );
+
+#ifdef MONGO_SSL
+ void _checkSSLManager();
+
+ scoped_ptr<SSLManager> _sslManager;
+#endif
+ };
+}
diff --git a/src/mongo/util/net/listen.cpp b/src/mongo/util/net/listen.cpp
new file mode 100644
index 00000000000..ec3e4fa0ee8
--- /dev/null
+++ b/src/mongo/util/net/listen.cpp
@@ -0,0 +1,394 @@
+// listen.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "pch.h"
+#include "listen.h"
+#include "message_port.h"
+
+#ifndef _WIN32
+
+# ifndef __sunos__
+# include <ifaddrs.h>
+# endif
+# include <sys/resource.h>
+# include <sys/stat.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <arpa/inet.h>
+#include <errno.h>
+#include <netdb.h>
+#ifdef __openbsd__
+# include <sys/uio.h>
+#endif
+
+#else
+
+// errno doesn't work for winsock.
+#undef errno
+#define errno WSAGetLastError()
+
+#endif
+
+namespace mongo {
+
+
+ void checkTicketNumbers();
+
+
+ // ----- Listener -------
+
+ const Listener* Listener::_timeTracker;
+
+ vector<SockAddr> ipToAddrs(const char* ips, int port, bool useUnixSockets) {
+ vector<SockAddr> out;
+ if (*ips == '\0') {
+ out.push_back(SockAddr("0.0.0.0", port)); // IPv4 all
+
+ if (IPv6Enabled())
+ out.push_back(SockAddr("::", port)); // IPv6 all
+#ifndef _WIN32
+ if (useUnixSockets)
+ out.push_back(SockAddr(makeUnixSockPath(port).c_str(), port)); // Unix socket
+#endif
+ return out;
+ }
+
+ while(*ips) {
+ string ip;
+ const char * comma = strchr(ips, ',');
+ if (comma) {
+ ip = string(ips, comma - ips);
+ ips = comma + 1;
+ }
+ else {
+ ip = string(ips);
+ ips = "";
+ }
+
+ SockAddr sa(ip.c_str(), port);
+ out.push_back(sa);
+
+#ifndef _WIN32
+ if (useUnixSockets && (sa.getAddr() == "127.0.0.1" || sa.getAddr() == "0.0.0.0")) // only IPv4
+ out.push_back(SockAddr(makeUnixSockPath(port).c_str(), port));
+#endif
+ }
+ return out;
+
+ }
+
+ Listener::Listener(const string& name, const string &ip, int port, bool logConnect )
+ : _port(port), _name(name), _ip(ip), _logConnect(logConnect), _elapsedTime(0) {
+#ifdef MONGO_SSL
+ _ssl = 0;
+ _sslPort = 0;
+
+ if ( cmdLine.sslOnNormalPorts && cmdLine.sslServerManager ) {
+ secure( cmdLine.sslServerManager );
+ }
+#endif
+ }
+
+ Listener::~Listener() {
+ if ( _timeTracker == this )
+ _timeTracker = 0;
+ }
+
+#ifdef MONGO_SSL
+ void Listener::secure( SSLManager* manager ) {
+ _ssl = manager;
+ }
+
+ void Listener::addSecurePort( SSLManager* manager , int additionalPort ) {
+ _ssl = manager;
+ _sslPort = additionalPort;
+ }
+
+#endif
+
+ bool Listener::_setupSockets( const vector<SockAddr>& mine , vector<SOCKET>& socks ) {
+ for (vector<SockAddr>::const_iterator it=mine.begin(), end=mine.end(); it != end; ++it) {
+ const SockAddr& me = *it;
+
+ SOCKET sock = ::socket(me.getType(), SOCK_STREAM, 0);
+ massert( 15863 , str::stream() << "listen(): invalid socket? " << errnoWithDescription() , sock >= 0 );
+
+ if (me.getType() == AF_UNIX) {
+#if !defined(_WIN32)
+ if (unlink(me.getAddr().c_str()) == -1) {
+ int x = errno;
+ if (x != ENOENT) {
+ log() << "couldn't unlink socket file " << me << errnoWithDescription(x) << " skipping" << endl;
+ continue;
+ }
+ }
+#endif
+ }
+ else if (me.getType() == AF_INET6) {
+ // IPv6 can also accept IPv4 connections as mapped addresses (::ffff:127.0.0.1)
+ // That causes a conflict if we don't do set it to IPV6_ONLY
+ const int one = 1;
+ setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (const char*) &one, sizeof(one));
+ }
+
+#if !defined(_WIN32)
+ {
+ const int one = 1;
+ if ( setsockopt( sock , SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) < 0 )
+ out() << "Failed to set socket opt, SO_REUSEADDR" << endl;
+ }
+#endif
+
+ if ( ::bind(sock, me.raw(), me.addressSize) != 0 ) {
+ int x = errno;
+ error() << "listen(): bind() failed " << errnoWithDescription(x) << " for socket: " << me.toString() << endl;
+ if ( x == EADDRINUSE )
+ error() << " addr already in use" << endl;
+ closesocket(sock);
+ return false;
+ }
+
+#if !defined(_WIN32)
+ if (me.getType() == AF_UNIX) {
+ if (chmod(me.getAddr().c_str(), 0777) == -1) {
+ error() << "couldn't chmod socket file " << me << errnoWithDescription() << endl;
+ }
+ ListeningSockets::get()->addPath( me.getAddr() );
+ }
+#endif
+
+ if ( ::listen(sock, 128) != 0 ) {
+ error() << "listen(): listen() failed " << errnoWithDescription() << endl;
+ closesocket(sock);
+ return false;
+ }
+
+ ListeningSockets::get()->add( sock );
+
+ socks.push_back(sock);
+ }
+
+ return true;
+ }
+
+ void Listener::initAndListen() {
+ checkTicketNumbers();
+ vector<SOCKET> socks;
+ set<int> sslSocks;
+
+ { // normal sockets
+ vector<SockAddr> mine = ipToAddrs(_ip.c_str(), _port, (!cmdLine.noUnixSocket && useUnixSockets()));
+ if ( ! _setupSockets( mine , socks ) )
+ return;
+ }
+
+#ifdef MONGO_SSL
+ if ( _ssl && _sslPort > 0 ) {
+ unsigned prev = socks.size();
+
+ vector<SockAddr> mine = ipToAddrs(_ip.c_str(), _sslPort, false );
+ if ( ! _setupSockets( mine , socks ) )
+ return;
+
+ for ( unsigned i=prev; i<socks.size(); i++ ) {
+ sslSocks.insert( socks[i] );
+ }
+
+ }
+#endif
+
+ SOCKET maxfd = 0; // needed for select()
+ for ( unsigned i=0; i<socks.size(); i++ ) {
+ if ( socks[i] > maxfd )
+ maxfd = socks[i];
+ }
+
+#ifdef MONGO_SSL
+ if ( _ssl == 0 ) {
+ _logListen( _port , false );
+ }
+ else if ( _sslPort == 0 ) {
+ _logListen( _port , true );
+ }
+ else {
+ // both
+ _logListen( _port , false );
+ _logListen( _sslPort , true );
+ }
+#else
+ _logListen( _port , false );
+#endif
+
+ static long connNumber = 0;
+ struct timeval maxSelectTime;
+ while ( ! inShutdown() ) {
+ fd_set fds[1];
+ FD_ZERO(fds);
+
+ for (vector<SOCKET>::iterator it=socks.begin(), end=socks.end(); it != end; ++it) {
+ FD_SET(*it, fds);
+ }
+
+ maxSelectTime.tv_sec = 0;
+ maxSelectTime.tv_usec = 10000;
+ const int ret = select(maxfd+1, fds, NULL, NULL, &maxSelectTime);
+
+ if (ret == 0) {
+#if defined(__linux__)
+ _elapsedTime += ( 10000 - maxSelectTime.tv_usec ) / 1000;
+#else
+ _elapsedTime += 10;
+#endif
+ continue;
+ }
+
+ if (ret < 0) {
+ int x = errno;
+#ifdef EINTR
+ if ( x == EINTR ) {
+ log() << "select() signal caught, continuing" << endl;
+ continue;
+ }
+#endif
+ if ( ! inShutdown() )
+ log() << "select() failure: ret=" << ret << " " << errnoWithDescription(x) << endl;
+ return;
+ }
+
+#if defined(__linux__)
+ _elapsedTime += max(ret, (int)(( 10000 - maxSelectTime.tv_usec ) / 1000));
+#else
+ _elapsedTime += ret; // assume 1ms to grab connection. very rough
+#endif
+
+ for (vector<SOCKET>::iterator it=socks.begin(), end=socks.end(); it != end; ++it) {
+ if (! (FD_ISSET(*it, fds)))
+ continue;
+
+ SockAddr from;
+ int s = accept(*it, from.raw(), &from.addressSize);
+ if ( s < 0 ) {
+ int x = errno; // so no global issues
+ if ( x == ECONNABORTED || x == EBADF ) {
+ log() << "Listener on port " << _port << " aborted" << endl;
+ return;
+ }
+ if ( x == 0 && inShutdown() ) {
+ return; // socket closed
+ }
+ if( !inShutdown() ) {
+ log() << "Listener: accept() returns " << s << " " << errnoWithDescription(x) << endl;
+ if (x == EMFILE || x == ENFILE) {
+ // Connection still in listen queue but we can't accept it yet
+ error() << "Out of file descriptors. Waiting one second before trying to accept more connections." << warnings;
+ sleepsecs(1);
+ }
+ }
+ continue;
+ }
+ if (from.getType() != AF_UNIX)
+ disableNagle(s);
+ if ( _logConnect && ! cmdLine.quiet ){
+ int conns = connTicketHolder.used()+1;
+ const char* word = (conns == 1 ? " connection" : " connections");
+ log() << "connection accepted from " << from.toString() << " #" << ++connNumber << " (" << conns << word << " now open)" << endl;
+ }
+
+ Socket newSock = Socket(s, from);
+#ifdef MONGO_SSL
+ if ( _ssl && ( _sslPort == 0 || sslSocks.count(*it) ) ) {
+ newSock.secureAccepted( _ssl );
+ }
+#endif
+ accepted( newSock );
+ }
+ }
+ }
+
+ void Listener::_logListen( int port , bool ssl ) {
+ log() << _name << ( _name.size() ? " " : "" ) << "waiting for connections on port " << port << ( ssl ? " ssl" : "" ) << endl;
+ }
+
+
+ void Listener::accepted(Socket socket) {
+ accepted( new MessagingPort(socket) );
+ }
+
+ void Listener::accepted(MessagingPort *mp) {
+ assert(!"You must overwrite one of the accepted methods");
+ }
+
+ // ----- ListeningSockets -------
+
+ ListeningSockets* ListeningSockets::_instance = new ListeningSockets();
+
+ ListeningSockets* ListeningSockets::get() {
+ return _instance;
+ }
+
+ // ------ connection ticket and control ------
+
+ const int DEFAULT_MAX_CONN = 20000;
+ const int MAX_MAX_CONN = 20000;
+
+ int getMaxConnections() {
+#ifdef _WIN32
+ return DEFAULT_MAX_CONN;
+#else
+ struct rlimit limit;
+ assert( getrlimit(RLIMIT_NOFILE,&limit) == 0 );
+
+ int max = (int)(limit.rlim_cur * .8);
+
+ log(1) << "fd limit"
+ << " hard:" << limit.rlim_max
+ << " soft:" << limit.rlim_cur
+ << " max conn: " << max
+ << endl;
+
+ if ( max > MAX_MAX_CONN )
+ max = MAX_MAX_CONN;
+
+ return max;
+#endif
+ }
+
+ void checkTicketNumbers() {
+ int want = getMaxConnections();
+ int current = connTicketHolder.outof();
+ if ( current != DEFAULT_MAX_CONN ) {
+ if ( current < want ) {
+ // they want fewer than they can handle
+ // which is fine
+ log(1) << " only allowing " << current << " connections" << endl;
+ return;
+ }
+ if ( current > want ) {
+ log() << " --maxConns too high, can only handle " << want << endl;
+ }
+ }
+ connTicketHolder.resize( want );
+ }
+
+ TicketHolder connTicketHolder(DEFAULT_MAX_CONN);
+
+}
diff --git a/src/mongo/util/net/listen.h b/src/mongo/util/net/listen.h
new file mode 100644
index 00000000000..ca90e835b97
--- /dev/null
+++ b/src/mongo/util/net/listen.h
@@ -0,0 +1,190 @@
+// listen.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "sock.h"
+
+namespace mongo {
+
+ class MessagingPort;
+
+ class Listener : boost::noncopyable {
+ public:
+
+ Listener(const string& name, const string &ip, int port, bool logConnect=true );
+
+ virtual ~Listener();
+
+#ifdef MONGO_SSL
+ /**
+ * make this an ssl socket
+ * ownership of SSLManager remains with the caller
+ */
+ void secure( SSLManager* manager );
+
+ void addSecurePort( SSLManager* manager , int additionalPort );
+#endif
+
+ void initAndListen(); // never returns unless error (start a thread)
+
+ /* spawn a thread, etc., then return */
+ virtual void accepted(Socket socket);
+ virtual void accepted(MessagingPort *mp);
+
+ const int _port;
+
+ /**
+ * @return a rough estimate of elapsed time since the server started
+ */
+ long long getMyElapsedTimeMillis() const { return _elapsedTime; }
+
+ void setAsTimeTracker() {
+ _timeTracker = this;
+ }
+
+ static const Listener* getTimeTracker() {
+ return _timeTracker;
+ }
+
+ static long long getElapsedTimeMillis() {
+ if ( _timeTracker )
+ return _timeTracker->getMyElapsedTimeMillis();
+
+ // should this assert or throw? seems like callers may not expect to get zero back, certainly not forever.
+ return 0;
+ }
+
+ private:
+ string _name;
+ string _ip;
+ bool _logConnect;
+ long long _elapsedTime;
+
+#ifdef MONGO_SSL
+ SSLManager* _ssl;
+ int _sslPort;
+#endif
+
+ /**
+ * @return true iff everything went ok
+ */
+ bool _setupSockets( const vector<SockAddr>& mine , vector<SOCKET>& socks );
+
+ void _logListen( int port , bool ssl );
+
+ static const Listener* _timeTracker;
+
+ virtual bool useUnixSockets() const { return false; }
+ };
+
+ /**
+ * keep track of elapsed time
+ * after a set amount of time, tells you to do something
+ * only in this file because depends on Listener
+ */
+ class ElapsedTracker {
+ public:
+ ElapsedTracker( int hitsBetweenMarks , int msBetweenMarks )
+ : _h( hitsBetweenMarks ) , _ms( msBetweenMarks ) , _pings(0) {
+ _last = Listener::getElapsedTimeMillis();
+ }
+
+ /**
+ * call this for every iteration
+ * returns true if one of the triggers has gone off
+ */
+ bool intervalHasElapsed() {
+ if ( ( ++_pings % _h ) == 0 ) {
+ _last = Listener::getElapsedTimeMillis();
+ return true;
+ }
+
+ long long now = Listener::getElapsedTimeMillis();
+ if ( now - _last > _ms ) {
+ _last = now;
+ return true;
+ }
+
+ return false;
+ }
+
+ private:
+ const int _h;
+ const int _ms;
+
+ unsigned long long _pings;
+
+ long long _last;
+
+ };
+
+ class ListeningSockets {
+ public:
+ ListeningSockets()
+ : _mutex("ListeningSockets")
+ , _sockets( new set<int>() )
+ , _socketPaths( new set<string>() )
+ { }
+ void add( int sock ) {
+ scoped_lock lk( _mutex );
+ _sockets->insert( sock );
+ }
+ void addPath( string path ) {
+ scoped_lock lk( _mutex );
+ _socketPaths->insert( path );
+ }
+ void remove( int sock ) {
+ scoped_lock lk( _mutex );
+ _sockets->erase( sock );
+ }
+ void closeAll() {
+ set<int>* sockets;
+ set<string>* paths;
+
+ {
+ scoped_lock lk( _mutex );
+ sockets = _sockets;
+ _sockets = new set<int>();
+ paths = _socketPaths;
+ _socketPaths = new set<string>();
+ }
+
+ for ( set<int>::iterator i=sockets->begin(); i!=sockets->end(); i++ ) {
+ int sock = *i;
+ log() << "closing listening socket: " << sock << endl;
+ closesocket( sock );
+ }
+
+ for ( set<string>::iterator i=paths->begin(); i!=paths->end(); i++ ) {
+ string path = *i;
+ log() << "removing socket file: " << path << endl;
+ ::remove( path.c_str() );
+ }
+ }
+ static ListeningSockets* get();
+ private:
+ mongo::mutex _mutex;
+ set<int>* _sockets;
+ set<string>* _socketPaths; // for unix domain sockets
+ static ListeningSockets* _instance;
+ };
+
+
+ extern TicketHolder connTicketHolder;
+
+}
diff --git a/src/mongo/util/net/message.cpp b/src/mongo/util/net/message.cpp
new file mode 100644
index 00000000000..a84e5c48c5c
--- /dev/null
+++ b/src/mongo/util/net/message.cpp
@@ -0,0 +1,64 @@
+// message.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+
+#include <fcntl.h>
+#include <errno.h>
+#include <time.h>
+
+#include "message.h"
+#include "message_port.h"
+#include "listen.h"
+
+#include "../goodies.h"
+#include "../../client/dbclient.h"
+
+namespace mongo {
+
+ void Message::send( MessagingPort &p, const char *context ) {
+ if ( empty() ) {
+ return;
+ }
+ if ( _buf != 0 ) {
+ p.send( (char*)_buf, _buf->len, context );
+ }
+ else {
+ p.send( _data, context );
+ }
+ }
+
+ MSGID NextMsgId;
+
+ /*struct MsgStart {
+ MsgStart() {
+ NextMsgId = (((unsigned) time(0)) << 16) ^ curTimeMillis();
+ assert(MsgDataHeaderSize == 16);
+ }
+ } msgstart;*/
+
+ MSGID nextMessageId() {
+ MSGID msgid = NextMsgId++;
+ return msgid;
+ }
+
+ bool doesOpGetAResponse( int op ) {
+ return op == dbQuery || op == dbGetMore;
+ }
+
+
+} // namespace mongo
diff --git a/src/mongo/util/net/message.h b/src/mongo/util/net/message.h
new file mode 100644
index 00000000000..14b0241f21d
--- /dev/null
+++ b/src/mongo/util/net/message.h
@@ -0,0 +1,312 @@
+// message.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "sock.h"
+#include "../../bson/util/atomic_int.h"
+#include "hostandport.h"
+
+namespace mongo {
+
+ class Message;
+ class MessagingPort;
+ class PiggyBackData;
+
+ typedef AtomicUInt MSGID;
+
+ enum Operations {
+ opReply = 1, /* reply. responseTo is set. */
+ dbMsg = 1000, /* generic msg command followed by a string */
+ dbUpdate = 2001, /* update object */
+ dbInsert = 2002,
+ //dbGetByOID = 2003,
+ dbQuery = 2004,
+ dbGetMore = 2005,
+ dbDelete = 2006,
+ dbKillCursors = 2007
+ };
+
+ bool doesOpGetAResponse( int op );
+
+ inline const char * opToString( int op ) {
+ switch ( op ) {
+ case 0: return "none";
+ case opReply: return "reply";
+ case dbMsg: return "msg";
+ case dbUpdate: return "update";
+ case dbInsert: return "insert";
+ case dbQuery: return "query";
+ case dbGetMore: return "getmore";
+ case dbDelete: return "remove";
+ case dbKillCursors: return "killcursors";
+ default:
+ PRINT(op);
+ assert(0);
+ return "";
+ }
+ }
+
+ inline bool opIsWrite( int op ) {
+ switch ( op ) {
+
+ case 0:
+ case opReply:
+ case dbMsg:
+ case dbQuery:
+ case dbGetMore:
+ case dbKillCursors:
+ return false;
+
+ case dbUpdate:
+ case dbInsert:
+ case dbDelete:
+ return true;
+
+ default:
+ PRINT(op);
+ assert(0);
+ return "";
+ }
+
+ }
+
+#pragma pack(1)
+ /* see http://www.mongodb.org/display/DOCS/Mongo+Wire+Protocol
+ */
+ struct MSGHEADER {
+ int messageLength; // total message size, including this
+ int requestID; // identifier for this message
+ int responseTo; // requestID from the original request
+ // (used in reponses from db)
+ int opCode;
+ };
+ struct OP_GETMORE : public MSGHEADER {
+ MSGHEADER header; // standard message header
+ int ZERO_or_flags; // 0 - reserved for future use
+ //cstring fullCollectionName; // "dbname.collectionname"
+ //int32 numberToReturn; // number of documents to return
+ //int64 cursorID; // cursorID from the OP_REPLY
+ };
+#pragma pack()
+
+#pragma pack(1)
+ /* todo merge this with MSGHEADER (or inherit from it). */
+ struct MsgData {
+ int len; /* len of the msg, including this field */
+ MSGID id; /* request/reply id's match... */
+ MSGID responseTo; /* id of the message we are responding to */
+ short _operation;
+ char _flags;
+ char _version;
+ int operation() const {
+ return _operation;
+ }
+ void setOperation(int o) {
+ _flags = 0;
+ _version = 0;
+ _operation = o;
+ }
+ char _data[4];
+
+ int& dataAsInt() {
+ return *((int *) _data);
+ }
+
+ bool valid() {
+ if ( len <= 0 || len > ( 4 * BSONObjMaxInternalSize ) )
+ return false;
+ if ( _operation < 0 || _operation > 30000 )
+ return false;
+ return true;
+ }
+
+ long long getCursor() {
+ assert( responseTo > 0 );
+ assert( _operation == opReply );
+ long long * l = (long long *)(_data + 4);
+ return l[0];
+ }
+
+ int dataLen(); // len without header
+ };
+ const int MsgDataHeaderSize = sizeof(MsgData) - 4;
+ inline int MsgData::dataLen() {
+ return len - MsgDataHeaderSize;
+ }
+#pragma pack()
+
+ class Message {
+ public:
+ // we assume here that a vector with initial size 0 does no allocation (0 is the default, but wanted to make it explicit).
+ Message() : _buf( 0 ), _data( 0 ), _freeIt( false ) {}
+ Message( void * data , bool freeIt ) :
+ _buf( 0 ), _data( 0 ), _freeIt( false ) {
+ _setData( reinterpret_cast< MsgData* >( data ), freeIt );
+ };
+ Message(Message& r) : _buf( 0 ), _data( 0 ), _freeIt( false ) {
+ *this = r;
+ }
+ ~Message() {
+ reset();
+ }
+
+ SockAddr _from;
+
+ MsgData *header() const {
+ assert( !empty() );
+ return _buf ? _buf : reinterpret_cast< MsgData* > ( _data[ 0 ].first );
+ }
+ int operation() const { return header()->operation(); }
+
+ MsgData *singleData() const {
+ massert( 13273, "single data buffer expected", _buf );
+ return header();
+ }
+
+ bool empty() const { return !_buf && _data.empty(); }
+
+ int size() const {
+ int res = 0;
+ if ( _buf ) {
+ res = _buf->len;
+ }
+ else {
+ for (MsgVec::const_iterator it = _data.begin(); it != _data.end(); ++it) {
+ res += it->second;
+ }
+ }
+ return res;
+ }
+
+ int dataSize() const { return size() - sizeof(MSGHEADER); }
+
+ // concat multiple buffers - noop if <2 buffers already, otherwise can be expensive copy
+ // can get rid of this if we make response handling smarter
+ void concat() {
+ if ( _buf || empty() ) {
+ return;
+ }
+
+ assert( _freeIt );
+ int totalSize = 0;
+ for( vector< pair< char *, int > >::const_iterator i = _data.begin(); i != _data.end(); ++i ) {
+ totalSize += i->second;
+ }
+ char *buf = (char*)malloc( totalSize );
+ char *p = buf;
+ for( vector< pair< char *, int > >::const_iterator i = _data.begin(); i != _data.end(); ++i ) {
+ memcpy( p, i->first, i->second );
+ p += i->second;
+ }
+ reset();
+ _setData( (MsgData*)buf, true );
+ }
+
+ // vector swap() so this is fast
+ Message& operator=(Message& r) {
+ assert( empty() );
+ assert( r._freeIt );
+ _buf = r._buf;
+ r._buf = 0;
+ if ( r._data.size() > 0 ) {
+ _data.swap( r._data );
+ }
+ r._freeIt = false;
+ _freeIt = true;
+ return *this;
+ }
+
+ void reset() {
+ if ( _freeIt ) {
+ if ( _buf ) {
+ free( _buf );
+ }
+ for( vector< pair< char *, int > >::const_iterator i = _data.begin(); i != _data.end(); ++i ) {
+ free(i->first);
+ }
+ }
+ _buf = 0;
+ _data.clear();
+ _freeIt = false;
+ }
+
+ // use to add a buffer
+ // assumes message will free everything
+ void appendData(char *d, int size) {
+ if ( size <= 0 ) {
+ return;
+ }
+ if ( empty() ) {
+ MsgData *md = (MsgData*)d;
+ md->len = size; // can be updated later if more buffers added
+ _setData( md, true );
+ return;
+ }
+ assert( _freeIt );
+ if ( _buf ) {
+ _data.push_back( make_pair( (char*)_buf, _buf->len ) );
+ _buf = 0;
+ }
+ _data.push_back( make_pair( d, size ) );
+ header()->len += size;
+ }
+
+ // use to set first buffer if empty
+ void setData(MsgData *d, bool freeIt) {
+ assert( empty() );
+ _setData( d, freeIt );
+ }
+ void setData(int operation, const char *msgtxt) {
+ setData(operation, msgtxt, strlen(msgtxt)+1);
+ }
+ void setData(int operation, const char *msgdata, size_t len) {
+ assert( empty() );
+ size_t dataLen = len + sizeof(MsgData) - 4;
+ MsgData *d = (MsgData *) malloc(dataLen);
+ memcpy(d->_data, msgdata, len);
+ d->len = fixEndian(dataLen);
+ d->setOperation(operation);
+ _setData( d, true );
+ }
+
+ bool doIFreeIt() {
+ return _freeIt;
+ }
+
+ void send( MessagingPort &p, const char *context );
+
+ string toString() const;
+
+ private:
+ void _setData( MsgData *d, bool freeIt ) {
+ _freeIt = freeIt;
+ _buf = d;
+ }
+ // if just one buffer, keep it in _buf, otherwise keep a sequence of buffers in _data
+ MsgData * _buf;
+ // byte buffer(s) - the first must contain at least a full MsgData unless using _buf for storage instead
+ typedef vector< pair< char*, int > > MsgVec;
+ MsgVec _data;
+ bool _freeIt;
+ };
+
+
+ MSGID nextMessageId();
+
+
+} // namespace mongo
diff --git a/src/mongo/util/net/message_port.cpp b/src/mongo/util/net/message_port.cpp
new file mode 100644
index 00000000000..c342ed3c8b7
--- /dev/null
+++ b/src/mongo/util/net/message_port.cpp
@@ -0,0 +1,303 @@
+// message_port.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+
+#include <fcntl.h>
+#include <errno.h>
+#include <time.h>
+
+#include "message.h"
+#include "message_port.h"
+#include "listen.h"
+
+#include "../goodies.h"
+#include "../background.h"
+#include "../time_support.h"
+#include "../../db/cmdline.h"
+#include "../../client/dbclient.h"
+
+
+#ifndef _WIN32
+# ifndef __sunos__
+# include <ifaddrs.h>
+# endif
+# include <sys/resource.h>
+# include <sys/stat.h>
+#else
+
+// errno doesn't work for winsock.
+#undef errno
+#define errno WSAGetLastError()
+
+#endif
+
+namespace mongo {
+
+
+// if you want trace output:
+#define mmm(x)
+
+ /* messagingport -------------------------------------------------------------- */
+
+ class PiggyBackData {
+ public:
+ PiggyBackData( MessagingPort * port ) {
+ _port = port;
+ _buf = new char[1300];
+ _cur = _buf;
+ }
+
+ ~PiggyBackData() {
+ DESTRUCTOR_GUARD (
+ flush();
+ delete[]( _cur );
+ );
+ }
+
+ void append( Message& m ) {
+ assert( m.header()->len <= 1300 );
+
+ if ( len() + m.header()->len > 1300 )
+ flush();
+
+ memcpy( _cur , m.singleData() , m.header()->len );
+ _cur += m.header()->len;
+ }
+
+ void flush() {
+ if ( _buf == _cur )
+ return;
+
+ _port->send( _buf , len(), "flush" );
+ _cur = _buf;
+ }
+
+ int len() const { return _cur - _buf; }
+
+ private:
+ MessagingPort* _port;
+ char * _buf;
+ char * _cur;
+ };
+
+ class Ports {
+ set<MessagingPort*> ports;
+ mongo::mutex m;
+ public:
+ Ports() : ports(), m("Ports") {}
+ void closeAll(unsigned skip_mask) {
+ scoped_lock bl(m);
+ for ( set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ ) {
+ if( (*i)->tag & skip_mask )
+ continue;
+ (*i)->shutdown();
+ }
+ }
+ void insert(MessagingPort* p) {
+ scoped_lock bl(m);
+ ports.insert(p);
+ }
+ void erase(MessagingPort* p) {
+ scoped_lock bl(m);
+ ports.erase(p);
+ }
+ };
+
+ // we "new" this so it is still be around when other automatic global vars
+ // are being destructed during termination.
+ Ports& ports = *(new Ports());
+
+ void MessagingPort::closeAllSockets(unsigned mask) {
+ ports.closeAll(mask);
+ }
+
+ MessagingPort::MessagingPort(int fd, const SockAddr& remote)
+ : Socket( fd , remote ) , piggyBackData(0) {
+ ports.insert(this);
+ }
+
+ MessagingPort::MessagingPort( double timeout, int ll )
+ : Socket( timeout, ll ) {
+ ports.insert(this);
+ piggyBackData = 0;
+ }
+
+ MessagingPort::MessagingPort( Socket& sock )
+ : Socket( sock ) , piggyBackData( 0 ) {
+ ports.insert(this);
+ }
+
+ void MessagingPort::shutdown() {
+ close();
+ }
+
+ MessagingPort::~MessagingPort() {
+ if ( piggyBackData )
+ delete( piggyBackData );
+ shutdown();
+ ports.erase(this);
+ }
+
+ bool MessagingPort::recv(Message& m) {
+ try {
+again:
+ mmm( log() << "* recv() sock:" << this->sock << endl; )
+ int len = -1;
+
+ char *lenbuf = (char *) &len;
+ int lft = 4;
+ Socket::recv( lenbuf, lft );
+
+ if ( len < 16 || len > 48000000 ) { // messages must be large enough for headers
+ if ( len == -1 ) {
+ // Endian check from the client, after connecting, to see what mode server is running in.
+ unsigned foo = 0x10203040;
+ send( (char *) &foo, 4, "endian" );
+ goto again;
+ }
+
+ if ( len == 542393671 ) {
+ // an http GET
+ log(_logLevel) << "looks like you're trying to access db over http on native driver port. please add 1000 for webserver" << endl;
+ string msg = "You are trying to access MongoDB on the native driver port. For http diagnostic access, add 1000 to the port number\n";
+ stringstream ss;
+ ss << "HTTP/1.0 200 OK\r\nConnection: close\r\nContent-Type: text/plain\r\nContent-Length: " << msg.size() << "\r\n\r\n" << msg;
+ string s = ss.str();
+ send( s.c_str(), s.size(), "http" );
+ return false;
+ }
+ log(0) << "recv(): message len " << len << " is too large" << len << endl;
+ return false;
+ }
+
+ int z = (len+1023)&0xfffffc00;
+ assert(z>=len);
+ MsgData *md = (MsgData *) malloc(z);
+ assert(md);
+ md->len = len;
+
+ char *p = (char *) &md->id;
+ int left = len -4;
+
+ try {
+ Socket::recv( p, left );
+ }
+ catch (...) {
+ free(md);
+ throw;
+ }
+
+ m.setData(md, true);
+ return true;
+
+ }
+ catch ( const SocketException & e ) {
+ log(_logLevel + (e.shouldPrint() ? 0 : 1) ) << "SocketException: remote: " << remote() << " error: " << e << endl;
+ m.reset();
+ return false;
+ }
+ }
+
+ void MessagingPort::reply(Message& received, Message& response) {
+ say(/*received.from, */response, received.header()->id);
+ }
+
+ void MessagingPort::reply(Message& received, Message& response, MSGID responseTo) {
+ say(/*received.from, */response, responseTo);
+ }
+
+ bool MessagingPort::call(Message& toSend, Message& response) {
+ mmm( log() << "*call()" << endl; )
+ say(toSend);
+ return recv( toSend , response );
+ }
+
+ bool MessagingPort::recv( const Message& toSend , Message& response ) {
+ while ( 1 ) {
+ bool ok = recv(response);
+ if ( !ok )
+ return false;
+ //log() << "got response: " << response.data->responseTo << endl;
+ if ( response.header()->responseTo == toSend.header()->id )
+ break;
+ error() << "MessagingPort::call() wrong id got:" << hex << (unsigned)response.header()->responseTo << " expect:" << (unsigned)toSend.header()->id << '\n'
+ << dec
+ << " toSend op: " << (unsigned)toSend.operation() << '\n'
+ << " response msgid:" << (unsigned)response.header()->id << '\n'
+ << " response len: " << (unsigned)response.header()->len << '\n'
+ << " response op: " << response.operation() << '\n'
+ << " remote: " << remoteString() << endl;
+ assert(false);
+ response.reset();
+ }
+ mmm( log() << "*call() end" << endl; )
+ return true;
+ }
+
+ void MessagingPort::assertStillConnected() {
+ uassert(15901, "client disconnected during operation", Socket::stillConnected());
+ }
+
+ void MessagingPort::say(Message& toSend, int responseTo) {
+ assert( !toSend.empty() );
+ mmm( log() << "* say() sock:" << this->sock << " thr:" << GetCurrentThreadId() << endl; )
+ toSend.header()->id = nextMessageId();
+ toSend.header()->responseTo = responseTo;
+
+ if ( piggyBackData && piggyBackData->len() ) {
+ mmm( log() << "* have piggy back" << endl; )
+ if ( ( piggyBackData->len() + toSend.header()->len ) > 1300 ) {
+ // won't fit in a packet - so just send it off
+ piggyBackData->flush();
+ }
+ else {
+ piggyBackData->append( toSend );
+ piggyBackData->flush();
+ return;
+ }
+ }
+
+ toSend.send( *this, "say" );
+ }
+
+ void MessagingPort::piggyBack( Message& toSend , int responseTo ) {
+
+ if ( toSend.header()->len > 1300 ) {
+ // not worth saving because its almost an entire packet
+ say( toSend );
+ return;
+ }
+
+ // we're going to be storing this, so need to set it up
+ toSend.header()->id = nextMessageId();
+ toSend.header()->responseTo = responseTo;
+
+ if ( ! piggyBackData )
+ piggyBackData = new PiggyBackData( this );
+
+ piggyBackData->append( toSend );
+ }
+
+ HostAndPort MessagingPort::remote() const {
+ if ( ! _remoteParsed.hasPort() )
+ _remoteParsed = HostAndPort( remoteAddr() );
+ return _remoteParsed;
+ }
+
+
+} // namespace mongo
diff --git a/src/mongo/util/net/message_port.h b/src/mongo/util/net/message_port.h
new file mode 100644
index 00000000000..5d404d84f8a
--- /dev/null
+++ b/src/mongo/util/net/message_port.h
@@ -0,0 +1,108 @@
+// message_port.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "sock.h"
+#include "message.h"
+
+namespace mongo {
+
+ class MessagingPort;
+ class PiggyBackData;
+
+ typedef AtomicUInt MSGID;
+
+ class AbstractMessagingPort : boost::noncopyable {
+ public:
+ AbstractMessagingPort() : tag(0) {}
+ virtual ~AbstractMessagingPort() { }
+ virtual void reply(Message& received, Message& response, MSGID responseTo) = 0; // like the reply below, but doesn't rely on received.data still being available
+ virtual void reply(Message& received, Message& response) = 0;
+
+ virtual HostAndPort remote() const = 0;
+ virtual unsigned remotePort() const = 0;
+
+ virtual void assertStillConnected() = 0;
+
+ public:
+ // TODO make this private with some helpers
+
+ /* ports can be tagged with various classes. see closeAllSockets(tag). defaults to 0. */
+ unsigned tag;
+
+ };
+
+ class MessagingPort : public AbstractMessagingPort , public Socket {
+ public:
+ MessagingPort(int fd, const SockAddr& remote);
+
+ // in some cases the timeout will actually be 2x this value - eg we do a partial send,
+ // then the timeout fires, then we try to send again, then the timeout fires again with
+ // no data sent, then we detect that the other side is down
+ MessagingPort(double so_timeout = 0, int logLevel = 0 );
+
+ MessagingPort(Socket& socket);
+
+ virtual ~MessagingPort();
+
+ void shutdown();
+
+ /* it's assumed if you reuse a message object, that it doesn't cross MessagingPort's.
+ also, the Message data will go out of scope on the subsequent recv call.
+ */
+ bool recv(Message& m);
+ void reply(Message& received, Message& response, MSGID responseTo);
+ void reply(Message& received, Message& response);
+ bool call(Message& toSend, Message& response);
+
+ void say(Message& toSend, int responseTo = -1);
+
+ /**
+ * this is used for doing 'async' queries
+ * instead of doing call( to , from )
+ * you would do
+ * say( to )
+ * recv( from )
+ * Note: if you fail to call recv and someone else uses this port,
+ * horrible things will happend
+ */
+ bool recv( const Message& sent , Message& response );
+
+ void piggyBack( Message& toSend , int responseTo = -1 );
+
+ unsigned remotePort() const { return Socket::remotePort(); }
+ virtual HostAndPort remote() const;
+
+ void assertStillConnected();
+
+ private:
+
+ PiggyBackData * piggyBackData;
+
+ // this is the parsed version of remote
+ // mutable because its initialized only on call to remote()
+ mutable HostAndPort _remoteParsed;
+
+ public:
+ static void closeAllSockets(unsigned tagMask = 0xffffffff);
+
+ friend class PiggyBackData;
+ };
+
+
+} // namespace mongo
diff --git a/src/mongo/util/net/message_server.h b/src/mongo/util/net/message_server.h
new file mode 100644
index 00000000000..ae77b97bb0f
--- /dev/null
+++ b/src/mongo/util/net/message_server.h
@@ -0,0 +1,66 @@
+// message_server.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ abstract database server
+ async io core, worker thread system
+ */
+
+#pragma once
+
+#include "../../pch.h"
+
+namespace mongo {
+
+ class MessageHandler {
+ public:
+ virtual ~MessageHandler() {}
+
+ /**
+ * called once when a socket is connected
+ */
+ virtual void connected( AbstractMessagingPort* p ) = 0;
+
+ /**
+ * called every time a message comes in
+ * handler is responsible for responding to client
+ */
+ virtual void process( Message& m , AbstractMessagingPort* p , LastError * err ) = 0;
+
+ /**
+ * called once when a socket is disconnected
+ */
+ virtual void disconnected( AbstractMessagingPort* p ) = 0;
+ };
+
+ class MessageServer {
+ public:
+ struct Options {
+ int port; // port to bind to
+ string ipList; // addresses to bind to
+
+ Options() : port(0), ipList("") {}
+ };
+
+ virtual ~MessageServer() {}
+ virtual void run() = 0;
+ virtual void setAsTimeTracker() = 0;
+ };
+
+ // TODO use a factory here to decide between port and asio variations
+ MessageServer * createServer( const MessageServer::Options& opts , MessageHandler * handler );
+}
diff --git a/src/mongo/util/net/message_server_asio.cpp b/src/mongo/util/net/message_server_asio.cpp
new file mode 100644
index 00000000000..0c6a7d925da
--- /dev/null
+++ b/src/mongo/util/net/message_server_asio.cpp
@@ -0,0 +1,261 @@
+// message_server_asio.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef USE_ASIO
+
+#include <boost/asio.hpp>
+#include <boost/bind.hpp>
+#include <boost/enable_shared_from_this.hpp>
+#include <boost/shared_ptr.hpp>
+
+#include <iostream>
+#include <vector>
+
+#include "message.h"
+#include "message_server.h"
+#include "../util/concurrency/mvar.h"
+
+using namespace boost;
+using namespace boost::asio;
+using namespace boost::asio::ip;
+
+namespace mongo {
+ class MessageServerSession;
+
+ namespace {
+ class StickyThread {
+ public:
+ StickyThread()
+ : _thread(boost::ref(*this))
+ {}
+
+ ~StickyThread() {
+ _mss.put(boost::shared_ptr<MessageServerSession>());
+ _thread.join();
+ }
+
+ void ready(boost::shared_ptr<MessageServerSession> mss) {
+ _mss.put(mss);
+ }
+
+ void operator() () {
+ boost::shared_ptr<MessageServerSession> mss;
+ while((mss = _mss.take())) { // intentionally not using ==
+ task(mss.get());
+ mss.reset();
+ }
+ }
+
+ private:
+ boost::thread _thread;
+ inline void task(MessageServerSession* mss); // must be defined after MessageServerSession
+
+ MVar<boost::shared_ptr<MessageServerSession> > _mss; // populated when given a task
+ };
+
+ vector<boost::shared_ptr<StickyThread> > thread_pool;
+ mongo::mutex tp_mutex; // this is only needed if io_service::run() is called from multiple threads
+ }
+
+ class MessageServerSession : public boost::enable_shared_from_this<MessageServerSession> , public AbstractMessagingPort {
+ public:
+ MessageServerSession( MessageHandler * handler , io_service& ioservice )
+ : _handler( handler )
+ , _socket( ioservice )
+ , _portCache(0)
+ { }
+
+ ~MessageServerSession() {
+ cout << "disconnect from: " << _socket.remote_endpoint() << endl;
+ }
+
+ tcp::socket& socket() {
+ return _socket;
+ }
+
+ void start() {
+ cout << "MessageServerSession start from:" << _socket.remote_endpoint() << endl;
+ _startHeaderRead();
+ }
+
+ void handleReadHeader( const boost::system::error_code& error ) {
+ if ( _inHeader.len == 0 )
+ return;
+
+ if ( ! _inHeader.valid() ) {
+ cout << " got invalid header from: " << _socket.remote_endpoint() << " closing connected" << endl;
+ return;
+ }
+
+ char * raw = (char*)malloc( _inHeader.len );
+
+ MsgData * data = (MsgData*)raw;
+ memcpy( data , &_inHeader , sizeof( _inHeader ) );
+ assert( data->len == _inHeader.len );
+
+ uassert( 10273 , "_cur not empty! pipelining requests not supported" , ! _cur.data );
+
+ _cur.setData( data , true );
+ async_read( _socket ,
+ buffer( raw + sizeof( _inHeader ) , _inHeader.len - sizeof( _inHeader ) ) ,
+ boost::bind( &MessageServerSession::handleReadBody , shared_from_this() , boost::asio::placeholders::error ) );
+ }
+
+ void handleReadBody( const boost::system::error_code& error ) {
+ if (!_myThread) {
+ mongo::mutex::scoped_lock(tp_mutex);
+ if (!thread_pool.empty()) {
+ _myThread = thread_pool.back();
+ thread_pool.pop_back();
+ }
+ }
+
+ if (!_myThread) // pool is empty
+ _myThread.reset(new StickyThread());
+
+ assert(_myThread);
+
+ _myThread->ready(shared_from_this());
+ }
+
+ void process() {
+ _handler->process( _cur , this );
+
+ if (_reply.data) {
+ async_write( _socket ,
+ buffer( (char*)_reply.data , _reply.data->len ) ,
+ boost::bind( &MessageServerSession::handleWriteDone , shared_from_this() , boost::asio::placeholders::error ) );
+ }
+ else {
+ _cur.reset();
+ _startHeaderRead();
+ }
+ }
+
+ void handleWriteDone( const boost::system::error_code& error ) {
+ {
+ // return thread to pool after we have sent data to the client
+ mongo::mutex::scoped_lock(tp_mutex);
+ assert(_myThread);
+ thread_pool.push_back(_myThread);
+ _myThread.reset();
+ }
+ _cur.reset();
+ _reply.reset();
+ _startHeaderRead();
+ }
+
+ virtual void reply( Message& received, Message& response ) {
+ reply( received , response , received.data->id );
+ }
+
+ virtual void reply( Message& query , Message& toSend, MSGID responseTo ) {
+ _reply = toSend;
+
+ _reply.data->id = nextMessageId();
+ _reply.data->responseTo = responseTo;
+ uassert( 10274 , "pipelining requests doesn't work yet" , query.data->id == _cur.data->id );
+ }
+
+
+ virtual unsigned remotePort() {
+ if (!_portCache)
+ _portCache = _socket.remote_endpoint().port(); //this is expensive
+ return _portCache;
+ }
+
+ private:
+
+ void _startHeaderRead() {
+ _inHeader.len = 0;
+ async_read( _socket ,
+ buffer( &_inHeader , sizeof( _inHeader ) ) ,
+ boost::bind( &MessageServerSession::handleReadHeader , shared_from_this() , boost::asio::placeholders::error ) );
+ }
+
+ MessageHandler * _handler;
+ tcp::socket _socket;
+ MsgData _inHeader;
+ Message _cur;
+ Message _reply;
+
+ unsigned _portCache;
+
+ boost::shared_ptr<StickyThread> _myThread;
+ };
+
+ void StickyThread::task(MessageServerSession* mss) {
+ mss->process();
+ }
+
+
+ class AsyncMessageServer : public MessageServer {
+ public:
+ // TODO accept an IP address to bind to
+ AsyncMessageServer( const MessageServer::Options& opts , MessageHandler * handler )
+ : _port( opts.port )
+ , _handler(handler)
+ , _endpoint( tcp::v4() , opts.port )
+ , _acceptor( _ioservice , _endpoint ) {
+ _accept();
+ }
+ virtual ~AsyncMessageServer() {
+
+ }
+
+ void run() {
+ cout << "AsyncMessageServer starting to listen on: " << _port << endl;
+ boost::thread other(boost::bind(&io_service::run, &_ioservice));
+ _ioservice.run();
+ cout << "AsyncMessageServer done listening on: " << _port << endl;
+ }
+
+ void handleAccept( shared_ptr<MessageServerSession> session ,
+ const boost::system::error_code& error ) {
+ if ( error ) {
+ cout << "handleAccept error!" << endl;
+ return;
+ }
+ session->start();
+ _accept();
+ }
+
+ void _accept( ) {
+ shared_ptr<MessageServerSession> session( new MessageServerSession( _handler , _ioservice ) );
+ _acceptor.async_accept( session->socket() ,
+ boost::bind( &AsyncMessageServer::handleAccept,
+ this,
+ session,
+ boost::asio::placeholders::error )
+ );
+ }
+
+ private:
+ int _port;
+ MessageHandler * _handler;
+ io_service _ioservice;
+ tcp::endpoint _endpoint;
+ tcp::acceptor _acceptor;
+ };
+
+ MessageServer * createServer( const MessageServer::Options& opts , MessageHandler * handler ) {
+ return new AsyncMessageServer( opts , handler );
+ }
+
+}
+
+#endif
diff --git a/src/mongo/util/net/message_server_port.cpp b/src/mongo/util/net/message_server_port.cpp
new file mode 100644
index 00000000000..7e6a731529b
--- /dev/null
+++ b/src/mongo/util/net/message_server_port.cpp
@@ -0,0 +1,204 @@
+// message_server_port.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+
+#ifndef USE_ASIO
+
+#include "message.h"
+#include "message_port.h"
+#include "message_server.h"
+#include "listen.h"
+
+#include "../../db/cmdline.h"
+#include "../../db/lasterror.h"
+#include "../../db/stats/counters.h"
+
+#ifdef __linux__ // TODO: consider making this ifndef _WIN32
+# include <sys/resource.h>
+#endif
+
+namespace mongo {
+
+ namespace pms {
+
+ MessageHandler * handler;
+
+ void threadRun( MessagingPort * inPort) {
+ TicketHolderReleaser connTicketReleaser( &connTicketHolder );
+
+ setThreadName( "conn" );
+
+ assert( inPort );
+ inPort->setLogLevel(1);
+ scoped_ptr<MessagingPort> p( inPort );
+
+ p->postFork();
+
+ string otherSide;
+
+ Message m;
+ try {
+ LastError * le = new LastError();
+ lastError.reset( le ); // lastError now has ownership
+
+ otherSide = p->remoteString();
+
+ handler->connected( p.get() );
+
+ while ( ! inShutdown() ) {
+ m.reset();
+ p->clearCounters();
+
+ if ( ! p->recv(m) ) {
+ if( !cmdLine.quiet ){
+ int conns = connTicketHolder.used()-1;
+ const char* word = (conns == 1 ? " connection" : " connections");
+ log() << "end connection " << otherSide << " (" << conns << word << " now open)" << endl;
+ }
+ p->shutdown();
+ break;
+ }
+
+ handler->process( m , p.get() , le );
+ networkCounter.hit( p->getBytesIn() , p->getBytesOut() );
+ }
+ }
+ catch ( AssertionException& e ) {
+ log() << "AssertionException handling request, closing client connection: " << e << endl;
+ p->shutdown();
+ }
+ catch ( SocketException& e ) {
+ log() << "SocketException handling request, closing client connection: " << e << endl;
+ p->shutdown();
+ }
+ catch ( const ClockSkewException & ) {
+ log() << "ClockSkewException - shutting down" << endl;
+ exitCleanly( EXIT_CLOCK_SKEW );
+ }
+ catch ( const DBException& e ) { // must be right above std::exception to avoid catching subclasses
+ log() << "DBException handling request, closing client connection: " << e << endl;
+ p->shutdown();
+ }
+ catch ( std::exception &e ) {
+ error() << "Uncaught std::exception: " << e.what() << ", terminating" << endl;
+ dbexit( EXIT_UNCAUGHT );
+ }
+ catch ( ... ) {
+ error() << "Uncaught exception, terminating" << endl;
+ dbexit( EXIT_UNCAUGHT );
+ }
+
+ handler->disconnected( p.get() );
+ }
+
+ }
+
+ class PortMessageServer : public MessageServer , public Listener {
+ public:
+ PortMessageServer( const MessageServer::Options& opts, MessageHandler * handler ) :
+ Listener( "" , opts.ipList, opts.port ) {
+
+ uassert( 10275 , "multiple PortMessageServer not supported" , ! pms::handler );
+ pms::handler = handler;
+ }
+
+ virtual void accepted(MessagingPort * p) {
+
+ if ( ! connTicketHolder.tryAcquire() ) {
+ log() << "connection refused because too many open connections: " << connTicketHolder.used() << endl;
+
+ // TODO: would be nice if we notified them...
+ p->shutdown();
+ delete p;
+
+ sleepmillis(2); // otherwise we'll hard loop
+ return;
+ }
+
+ try {
+#ifndef __linux__ // TODO: consider making this ifdef _WIN32
+ boost::thread thr( boost::bind( &pms::threadRun , p ) );
+#else
+ pthread_attr_t attrs;
+ pthread_attr_init(&attrs);
+ pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED);
+
+ static const size_t STACK_SIZE = 1024*1024; // if we change this we need to update the warning
+
+ struct rlimit limits;
+ verify(15887, getrlimit(RLIMIT_STACK, &limits) == 0);
+ if (limits.rlim_cur > STACK_SIZE) {
+ pthread_attr_setstacksize(&attrs, (DEBUG_BUILD
+ ? (STACK_SIZE / 2)
+ : STACK_SIZE));
+ } else if (limits.rlim_cur < 1024*1024) {
+ warning() << "Stack size set to " << (limits.rlim_cur/1024) << "KB. We suggest 1MB" << endl;
+ }
+
+
+ pthread_t thread;
+ int failed = pthread_create(&thread, &attrs, (void*(*)(void*)) &pms::threadRun, p);
+
+ pthread_attr_destroy(&attrs);
+
+ if (failed) {
+ log() << "pthread_create failed: " << errnoWithDescription(failed) << endl;
+ throw boost::thread_resource_error(); // for consistency with boost::thread
+ }
+#endif
+ }
+ catch ( boost::thread_resource_error& ) {
+ connTicketHolder.release();
+ log() << "can't create new thread, closing connection" << endl;
+
+ p->shutdown();
+ delete p;
+
+ sleepmillis(2);
+ }
+ catch ( ... ) {
+ connTicketHolder.release();
+ log() << "unknown error accepting new socket" << endl;
+
+ p->shutdown();
+ delete p;
+
+ sleepmillis(2);
+ }
+
+ }
+
+ virtual void setAsTimeTracker() {
+ Listener::setAsTimeTracker();
+ }
+
+ void run() {
+ initAndListen();
+ }
+
+ virtual bool useUnixSockets() const { return true; }
+ };
+
+
+ MessageServer * createServer( const MessageServer::Options& opts , MessageHandler * handler ) {
+ return new PortMessageServer( opts , handler );
+ }
+
+}
+
+#endif
diff --git a/src/mongo/util/net/miniwebserver.cpp b/src/mongo/util/net/miniwebserver.cpp
new file mode 100644
index 00000000000..f0b58569d22
--- /dev/null
+++ b/src/mongo/util/net/miniwebserver.cpp
@@ -0,0 +1,212 @@
+// miniwebserver.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "miniwebserver.h"
+#include "../hex.h"
+
+#include "pcrecpp.h"
+
+namespace mongo {
+
+ MiniWebServer::MiniWebServer(const string& name, const string &ip, int port)
+ : Listener(name, ip, port, false)
+ {}
+
+ string MiniWebServer::parseURL( const char * buf ) {
+ const char * urlStart = strchr( buf , ' ' );
+ if ( ! urlStart )
+ return "/";
+
+ urlStart++;
+
+ const char * end = strchr( urlStart , ' ' );
+ if ( ! end ) {
+ end = strchr( urlStart , '\r' );
+ if ( ! end ) {
+ end = strchr( urlStart , '\n' );
+ }
+ }
+
+ if ( ! end )
+ return "/";
+
+ int diff = (int)(end-urlStart);
+ if ( diff < 0 || diff > 255 )
+ return "/";
+
+ return string( urlStart , (int)(end-urlStart) );
+ }
+
+ void MiniWebServer::parseParams( BSONObj & params , string query ) {
+ if ( query.size() == 0 )
+ return;
+
+ BSONObjBuilder b;
+ while ( query.size() ) {
+
+ string::size_type amp = query.find( "&" );
+
+ string cur;
+ if ( amp == string::npos ) {
+ cur = query;
+ query = "";
+ }
+ else {
+ cur = query.substr( 0 , amp );
+ query = query.substr( amp + 1 );
+ }
+
+ string::size_type eq = cur.find( "=" );
+ if ( eq == string::npos )
+ continue;
+
+ b.append( urlDecode(cur.substr(0,eq)) , urlDecode(cur.substr(eq+1) ) );
+ }
+
+ params = b.obj();
+ }
+
+ string MiniWebServer::parseMethod( const char * headers ) {
+ const char * end = strchr( headers , ' ' );
+ if ( ! end )
+ return "GET";
+ return string( headers , (int)(end-headers) );
+ }
+
+ const char *MiniWebServer::body( const char *buf ) {
+ const char *ret = strstr( buf, "\r\n\r\n" );
+ return ret ? ret + 4 : ret;
+ }
+
+ bool MiniWebServer::fullReceive( const char *buf ) {
+ const char *bod = body( buf );
+ if ( !bod )
+ return false;
+ const char *lenString = "Content-Length:";
+ const char *lengthLoc = strstr( buf, lenString );
+ if ( !lengthLoc )
+ return true;
+ lengthLoc += strlen( lenString );
+ long len = strtol( lengthLoc, 0, 10 );
+ if ( long( strlen( bod ) ) == len )
+ return true;
+ return false;
+ }
+
+ void MiniWebServer::accepted(Socket sock) {
+ sock.postFork();
+ sock.setTimeout(8);
+ char buf[4096];
+ int len = 0;
+ while ( 1 ) {
+ int left = sizeof(buf) - 1 - len;
+ if( left == 0 )
+ break;
+ int x = sock.unsafe_recv( buf + len , left );
+ if ( x <= 0 ) {
+ sock.close();
+ return;
+ }
+ len += x;
+ buf[ len ] = 0;
+ if ( fullReceive( buf ) ) {
+ break;
+ }
+ }
+ buf[len] = 0;
+
+ string responseMsg;
+ int responseCode = 599;
+ vector<string> headers;
+
+ try {
+ doRequest(buf, parseURL( buf ), responseMsg, responseCode, headers, sock.remoteAddr() );
+ }
+ catch ( std::exception& e ) {
+ responseCode = 500;
+ responseMsg = "error loading page: ";
+ responseMsg += e.what();
+ }
+ catch ( ... ) {
+ responseCode = 500;
+ responseMsg = "unknown error loading page";
+ }
+
+ stringstream ss;
+ ss << "HTTP/1.0 " << responseCode;
+ if ( responseCode == 200 ) ss << " OK";
+ ss << "\r\n";
+ if ( headers.empty() ) {
+ ss << "Content-Type: text/html\r\n";
+ }
+ else {
+ for ( vector<string>::iterator i = headers.begin(); i != headers.end(); i++ ) {
+ assert( strncmp("Content-Length", i->c_str(), 14) );
+ ss << *i << "\r\n";
+ }
+ }
+ ss << "Connection: close\r\n";
+ ss << "Content-Length: " << responseMsg.size() << "\r\n";
+ ss << "\r\n";
+ ss << responseMsg;
+ string response = ss.str();
+
+ try {
+ sock.send( response.c_str(), response.size() , "http response" );
+ sock.close();
+ }
+ catch ( SocketException& e ) {
+ log(1) << "couldn't send data to http client: " << e << endl;
+ }
+ }
+
+ string MiniWebServer::getHeader( const char * req , string wanted ) {
+ const char * headers = strchr( req , '\n' );
+ if ( ! headers )
+ return "";
+ pcrecpp::StringPiece input( headers + 1 );
+
+ string name;
+ string val;
+ pcrecpp::RE re("([\\w\\-]+): (.*?)\r?\n");
+ while ( re.Consume( &input, &name, &val) ) {
+ if ( name == wanted )
+ return val;
+ }
+ return "";
+ }
+
+ string MiniWebServer::urlDecode(const char* s) {
+ stringstream out;
+ while(*s) {
+ if (*s == '+') {
+ out << ' ';
+ }
+ else if (*s == '%') {
+ out << fromHex(s+1);
+ s+=2;
+ }
+ else {
+ out << *s;
+ }
+ s++;
+ }
+ return out.str();
+ }
+
+} // namespace mongo
diff --git a/src/mongo/util/net/miniwebserver.h b/src/mongo/util/net/miniwebserver.h
new file mode 100644
index 00000000000..1fb6b3f2e65
--- /dev/null
+++ b/src/mongo/util/net/miniwebserver.h
@@ -0,0 +1,60 @@
+// miniwebserver.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../../pch.h"
+#include "message.h"
+#include "message_port.h"
+#include "listen.h"
+#include "../../db/jsobj.h"
+
+namespace mongo {
+
+ class MiniWebServer : public Listener {
+ public:
+ MiniWebServer(const string& name, const string &ip, int _port);
+ virtual ~MiniWebServer() {}
+
+ virtual void doRequest(
+ const char *rq, // the full request
+ string url,
+ // set these and return them:
+ string& responseMsg,
+ int& responseCode,
+ vector<string>& headers, // if completely empty, content-type: text/html will be added
+ const SockAddr &from
+ ) = 0;
+
+ // --- static helpers ----
+
+ static void parseParams( BSONObj & params , string query );
+
+ static string parseURL( const char * buf );
+ static string parseMethod( const char * headers );
+ static string getHeader( const char * headers , string name );
+ static const char *body( const char *buf );
+
+ static string urlDecode(const char* s);
+ static string urlDecode(string s) {return urlDecode(s.c_str());}
+
+ private:
+ void accepted(Socket socket);
+ static bool fullReceive( const char *buf );
+ };
+
+} // namespace mongo
diff --git a/src/mongo/util/net/sock.cpp b/src/mongo/util/net/sock.cpp
new file mode 100644
index 00000000000..bd08e6c64b9
--- /dev/null
+++ b/src/mongo/util/net/sock.cpp
@@ -0,0 +1,763 @@
+// @file sock.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "sock.h"
+#include "../background.h"
+#include "../concurrency/value.h"
+#include "../mongoutils/str.h"
+
+#if !defined(_WIN32)
+# include <sys/socket.h>
+# include <sys/types.h>
+# include <sys/socket.h>
+# include <sys/un.h>
+# include <netinet/in.h>
+# include <netinet/tcp.h>
+# include <arpa/inet.h>
+# include <errno.h>
+# include <netdb.h>
+# if defined(__openbsd__)
+# include <sys/uio.h>
+# endif
+#endif
+
+#ifdef MONGO_SSL
+#include <openssl/err.h>
+#include <openssl/ssl.h>
+#endif
+
+using namespace mongoutils;
+
+namespace mongo {
+
+ void dynHostResolve(string& name, int& port);
+ string dynHostMyName();
+
+ static bool ipv6 = false;
+ void enableIPv6(bool state) { ipv6 = state; }
+ bool IPv6Enabled() { return ipv6; }
+
+ void setSockTimeouts(int sock, double secs) {
+ struct timeval tv;
+ tv.tv_sec = (int)secs;
+ tv.tv_usec = (int)((long long)(secs*1000*1000) % (1000*1000));
+ bool report = logLevel > 3; // solaris doesn't provide these
+ DEV report = true;
+#if defined(_WIN32)
+ tv.tv_sec *= 1000; // Windows timeout is a DWORD, in milliseconds.
+ int status = setsockopt( sock, SOL_SOCKET, SO_RCVTIMEO, (char *) &tv.tv_sec, sizeof(DWORD) ) == 0;
+ if( report && (status == SOCKET_ERROR) ) log() << "unable to set SO_RCVTIMEO" << endl;
+ status = setsockopt( sock, SOL_SOCKET, SO_SNDTIMEO, (char *) &tv.tv_sec, sizeof(DWORD) ) == 0;
+ DEV if( report && (status == SOCKET_ERROR) ) log() << "unable to set SO_SNDTIMEO" << endl;
+#else
+ bool ok = setsockopt( sock, SOL_SOCKET, SO_RCVTIMEO, (char *) &tv, sizeof(tv) ) == 0;
+ if( report && !ok ) log() << "unable to set SO_RCVTIMEO" << endl;
+ ok = setsockopt( sock, SOL_SOCKET, SO_SNDTIMEO, (char *) &tv, sizeof(tv) ) == 0;
+ DEV if( report && !ok ) log() << "unable to set SO_SNDTIMEO" << endl;
+#endif
+ }
+
+#if defined(_WIN32)
+ void disableNagle(int sock) {
+ int x = 1;
+ if ( setsockopt(sock, IPPROTO_TCP, TCP_NODELAY, (char *) &x, sizeof(x)) )
+ error() << "disableNagle failed" << endl;
+ if ( setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *) &x, sizeof(x)) )
+ error() << "SO_KEEPALIVE failed" << endl;
+ }
+#else
+
+ void disableNagle(int sock) {
+ int x = 1;
+
+#ifdef SOL_TCP
+ int level = SOL_TCP;
+#else
+ int level = SOL_SOCKET;
+#endif
+
+ if ( setsockopt(sock, level, TCP_NODELAY, (char *) &x, sizeof(x)) )
+ error() << "disableNagle failed: " << errnoWithDescription() << endl;
+
+#ifdef SO_KEEPALIVE
+ if ( setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, (char *) &x, sizeof(x)) )
+ error() << "SO_KEEPALIVE failed: " << errnoWithDescription() << endl;
+
+# ifdef __linux__
+ socklen_t len = sizeof(x);
+ if ( getsockopt(sock, level, TCP_KEEPIDLE, (char *) &x, &len) )
+ error() << "can't get TCP_KEEPIDLE: " << errnoWithDescription() << endl;
+
+ if (x > 300) {
+ x = 300;
+ if ( setsockopt(sock, level, TCP_KEEPIDLE, (char *) &x, sizeof(x)) ) {
+ error() << "can't set TCP_KEEPIDLE: " << errnoWithDescription() << endl;
+ }
+ }
+
+ len = sizeof(x); // just in case it changed
+ if ( getsockopt(sock, level, TCP_KEEPINTVL, (char *) &x, &len) )
+ error() << "can't get TCP_KEEPINTVL: " << errnoWithDescription() << endl;
+
+ if (x > 300) {
+ x = 300;
+ if ( setsockopt(sock, level, TCP_KEEPINTVL, (char *) &x, sizeof(x)) ) {
+ error() << "can't set TCP_KEEPINTVL: " << errnoWithDescription() << endl;
+ }
+ }
+# endif
+#endif
+
+ }
+
+#endif
+
+ string getAddrInfoStrError(int code) {
+#if !defined(_WIN32)
+ return gai_strerror(code);
+#else
+ /* gai_strerrorA is not threadsafe on windows. don't use it. */
+ return errnoWithDescription(code);
+#endif
+ }
+
+
+ // --- SockAddr
+
+ SockAddr::SockAddr(int sourcePort) {
+ memset(as<sockaddr_in>().sin_zero, 0, sizeof(as<sockaddr_in>().sin_zero));
+ as<sockaddr_in>().sin_family = AF_INET;
+ as<sockaddr_in>().sin_port = htons(sourcePort);
+ as<sockaddr_in>().sin_addr.s_addr = htonl(INADDR_ANY);
+ addressSize = sizeof(sockaddr_in);
+ }
+
+ SockAddr::SockAddr(const char * _iporhost , int port) {
+ string target = _iporhost;
+ bool cloudName = *_iporhost == '#';
+ if( target == "localhost" ) {
+ target = "127.0.0.1";
+ }
+ else if( cloudName ) {
+ dynHostResolve(target, port);
+ }
+
+ if( str::contains(target, '/') ) {
+#ifdef _WIN32
+ uassert(13080, "no unix socket support on windows", false);
+#endif
+ uassert(13079, "path to unix socket too long", target.size() < sizeof(as<sockaddr_un>().sun_path));
+ as<sockaddr_un>().sun_family = AF_UNIX;
+ strcpy(as<sockaddr_un>().sun_path, target.c_str());
+ addressSize = sizeof(sockaddr_un);
+ }
+ else {
+ addrinfo* addrs = NULL;
+ addrinfo hints;
+ memset(&hints, 0, sizeof(addrinfo));
+ hints.ai_socktype = SOCK_STREAM;
+ //hints.ai_flags = AI_ADDRCONFIG; // This is often recommended but don't do it. SERVER-1579
+ hints.ai_flags |= AI_NUMERICHOST; // first pass tries w/o DNS lookup
+ hints.ai_family = (IPv6Enabled() ? AF_UNSPEC : AF_INET);
+
+ StringBuilder ss;
+ ss << port;
+ int ret = getaddrinfo(target.c_str(), ss.str().c_str(), &hints, &addrs);
+
+ // old C compilers on IPv6-capable hosts return EAI_NODATA error
+#ifdef EAI_NODATA
+ int nodata = (ret == EAI_NODATA);
+#else
+ int nodata = false;
+#endif
+ if ( (ret == EAI_NONAME || nodata) && !cloudName ) {
+ // iporhost isn't an IP address, allow DNS lookup
+ hints.ai_flags &= ~AI_NUMERICHOST;
+ ret = getaddrinfo(target.c_str(), ss.str().c_str(), &hints, &addrs);
+ }
+
+ if (ret) {
+ // we were unsuccessful
+ if( target != "0.0.0.0" ) { // don't log if this as it is a CRT construction and log() may not work yet.
+ log() << "getaddrinfo(\"" << target << "\") failed: " << gai_strerror(ret) << endl;
+ }
+ *this = SockAddr(port);
+ }
+ else {
+ //TODO: handle other addresses in linked list;
+ assert(addrs->ai_addrlen <= sizeof(sa));
+ memcpy(&sa, addrs->ai_addr, addrs->ai_addrlen);
+ addressSize = addrs->ai_addrlen;
+ freeaddrinfo(addrs);
+ }
+ }
+ }
+
+ bool SockAddr::isLocalHost() const {
+ switch (getType()) {
+ case AF_INET: return getAddr() == "127.0.0.1";
+ case AF_INET6: return getAddr() == "::1";
+ case AF_UNIX: return true;
+ default: return false;
+ }
+ assert(false);
+ return false;
+ }
+
+ string SockAddr::toString(bool includePort) const {
+ string out = getAddr();
+ if (includePort && getType() != AF_UNIX && getType() != AF_UNSPEC)
+ out += mongoutils::str::stream() << ':' << getPort();
+ return out;
+ }
+
+ sa_family_t SockAddr::getType() const {
+ return sa.ss_family;
+ }
+
+ unsigned SockAddr::getPort() const {
+ switch (getType()) {
+ case AF_INET: return ntohs(as<sockaddr_in>().sin_port);
+ case AF_INET6: return ntohs(as<sockaddr_in6>().sin6_port);
+ case AF_UNIX: return 0;
+ case AF_UNSPEC: return 0;
+ default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false); return 0;
+ }
+ }
+
+ string SockAddr::getAddr() const {
+ switch (getType()) {
+ case AF_INET:
+ case AF_INET6: {
+ const int buflen=128;
+ char buffer[buflen];
+ int ret = getnameinfo(raw(), addressSize, buffer, buflen, NULL, 0, NI_NUMERICHOST);
+ massert(13082, str::stream() << "getnameinfo error " << getAddrInfoStrError(ret), ret == 0);
+ return buffer;
+ }
+
+ case AF_UNIX: return (addressSize > 2 ? as<sockaddr_un>().sun_path : "anonymous unix socket");
+ case AF_UNSPEC: return "(NONE)";
+ default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false); return "";
+ }
+ }
+
+ bool SockAddr::operator==(const SockAddr& r) const {
+ if (getType() != r.getType())
+ return false;
+
+ if (getPort() != r.getPort())
+ return false;
+
+ switch (getType()) {
+ case AF_INET: return as<sockaddr_in>().sin_addr.s_addr == r.as<sockaddr_in>().sin_addr.s_addr;
+ case AF_INET6: return memcmp(as<sockaddr_in6>().sin6_addr.s6_addr, r.as<sockaddr_in6>().sin6_addr.s6_addr, sizeof(in6_addr)) == 0;
+ case AF_UNIX: return strcmp(as<sockaddr_un>().sun_path, r.as<sockaddr_un>().sun_path) == 0;
+ case AF_UNSPEC: return true; // assume all unspecified addresses are the same
+ default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false);
+ }
+ return false;
+ }
+
+ bool SockAddr::operator!=(const SockAddr& r) const {
+ return !(*this == r);
+ }
+
+ bool SockAddr::operator<(const SockAddr& r) const {
+ if (getType() < r.getType())
+ return true;
+ else if (getType() > r.getType())
+ return false;
+
+ if (getPort() < r.getPort())
+ return true;
+ else if (getPort() > r.getPort())
+ return false;
+
+ switch (getType()) {
+ case AF_INET: return as<sockaddr_in>().sin_addr.s_addr < r.as<sockaddr_in>().sin_addr.s_addr;
+ case AF_INET6: return memcmp(as<sockaddr_in6>().sin6_addr.s6_addr, r.as<sockaddr_in6>().sin6_addr.s6_addr, sizeof(in6_addr)) < 0;
+ case AF_UNIX: return strcmp(as<sockaddr_un>().sun_path, r.as<sockaddr_un>().sun_path) < 0;
+ case AF_UNSPEC: return false;
+ default: massert(SOCK_FAMILY_UNKNOWN_ERROR, "unsupported address family", false);
+ }
+ return false;
+ }
+
+ SockAddr unknownAddress( "0.0.0.0", 0 );
+
+ // If an ip address is passed in, just return that. If a hostname is passed
+ // in, look up its ip and return that. Returns "" on failure.
+ string hostbyname(const char *hostname) {
+ if( *hostname == '#' ) {
+ string s = hostname;
+ int port;
+ dynHostResolve(s, port);
+ return s;
+ }
+
+ string addr = SockAddr(hostname, 0).getAddr();
+ if (addr == "0.0.0.0")
+ return "";
+ else
+ return addr;
+ }
+
+ // --- my --
+
+ DiagStr _hostNameCached;
+
+ string getHostName() {
+ {
+ string s = dynHostMyName();
+ if( !s.empty() )
+ return s;
+ }
+
+ char buf[256];
+ int ec = gethostname(buf, 127);
+ if ( ec || *buf == 0 ) {
+ log() << "can't get this server's hostname " << errnoWithDescription() << endl;
+ return "";
+ }
+ return buf;
+ }
+
+ static void _hostNameCachedInit() {
+ _hostNameCached = getHostName();
+ }
+ boost::once_flag _hostNameCachedInitFlags = BOOST_ONCE_INIT;
+
+ /** we store our host name once */
+ // ok w dynhosts map?
+ string getHostNameCached() {
+ boost::call_once( _hostNameCachedInit , _hostNameCachedInitFlags );
+ return _hostNameCached;
+ }
+
+ // --------- SocketException ----------
+
+#ifdef MSG_NOSIGNAL
+ const int portSendFlags = MSG_NOSIGNAL;
+ const int portRecvFlags = MSG_NOSIGNAL;
+#else
+ const int portSendFlags = 0;
+ const int portRecvFlags = 0;
+#endif
+
+ string SocketException::toString() const {
+ stringstream ss;
+ ss << _ei.code << " socket exception [" << _type << "] ";
+
+ if ( _server.size() )
+ ss << "server [" << _server << "] ";
+
+ if ( _extra.size() )
+ ss << _extra;
+
+ return ss.str();
+ }
+
+
+ // ------------ SSLManager -----------------
+
+#ifdef MONGO_SSL
+ SSLManager::SSLManager( bool client ) {
+ _client = client;
+ SSL_library_init();
+ SSL_load_error_strings();
+ ERR_load_crypto_strings();
+
+ _context = SSL_CTX_new( client ? SSLv23_client_method() : SSLv23_server_method() );
+ massert( 15864 , mongoutils::str::stream() << "can't create SSL Context: " << ERR_error_string(ERR_get_error(), NULL) , _context );
+
+ SSL_CTX_set_options( _context, SSL_OP_ALL);
+ }
+
+ void SSLManager::setupPubPriv( const string& privateKeyFile , const string& publicKeyFile ) {
+ massert( 15865 ,
+ mongoutils::str::stream() << "Can't read SSL certificate from file "
+ << publicKeyFile << ":" << ERR_error_string(ERR_get_error(), NULL) ,
+ SSL_CTX_use_certificate_file(_context, publicKeyFile.c_str(), SSL_FILETYPE_PEM) );
+
+
+ massert( 15866 ,
+ mongoutils::str::stream() << "Can't read SSL private key from file "
+ << privateKeyFile << " : " << ERR_error_string(ERR_get_error(), NULL) ,
+ SSL_CTX_use_PrivateKey_file(_context, privateKeyFile.c_str(), SSL_FILETYPE_PEM) );
+ }
+
+
+ int SSLManager::password_cb(char *buf,int num, int rwflag,void *userdata){
+ SSLManager* sm = (SSLManager*)userdata;
+ string pass = sm->_password;
+ strcpy(buf,pass.c_str());
+ return(pass.size());
+ }
+
+ void SSLManager::setupPEM( const string& keyFile , const string& password ) {
+ _password = password;
+
+ massert( 15867 , "Can't read certificate file" , SSL_CTX_use_certificate_chain_file( _context , keyFile.c_str() ) );
+
+ SSL_CTX_set_default_passwd_cb_userdata( _context , this );
+ SSL_CTX_set_default_passwd_cb( _context, &SSLManager::password_cb );
+
+ massert( 15868 , "Can't read key file" , SSL_CTX_use_PrivateKey_file( _context , keyFile.c_str() , SSL_FILETYPE_PEM ) );
+ }
+
+ SSL * SSLManager::secure( int fd ) {
+ SSL * ssl = SSL_new( _context );
+ massert( 15861 , "can't create SSL" , ssl );
+ SSL_set_fd( ssl , fd );
+ return ssl;
+ }
+
+
+#endif
+
+ // ------------ Socket -----------------
+
+ Socket::Socket(int fd , const SockAddr& remote) :
+ _fd(fd), _remote(remote), _timeout(0) {
+ _logLevel = 0;
+ _init();
+ }
+
+ Socket::Socket( double timeout, int ll ) {
+ _logLevel = ll;
+ _fd = -1;
+ _timeout = timeout;
+ _init();
+ }
+
+ void Socket::_init() {
+ _bytesOut = 0;
+ _bytesIn = 0;
+#ifdef MONGO_SSL
+ _sslAccepted = 0;
+#endif
+ }
+
+ void Socket::close() {
+#ifdef MONGO_SSL
+ _ssl.reset();
+#endif
+ if ( _fd >= 0 ) {
+ closesocket( _fd );
+ _fd = -1;
+ }
+ }
+
+#ifdef MONGO_SSL
+ void Socket::secure( SSLManager * ssl ) {
+ assert( ssl );
+ assert( _fd >= 0 );
+ _ssl.reset( ssl->secure( _fd ) );
+ SSL_connect( _ssl.get() );
+ }
+
+ void Socket::secureAccepted( SSLManager * ssl ) {
+ _sslAccepted = ssl;
+ }
+#endif
+
+ void Socket::postFork() {
+#ifdef MONGO_SSL
+ if ( _sslAccepted ) {
+ assert( _fd );
+ _ssl.reset( _sslAccepted->secure( _fd ) );
+ SSL_accept( _ssl.get() );
+ _sslAccepted = 0;
+ }
+#endif
+ }
+
+ class ConnectBG : public BackgroundJob {
+ public:
+ ConnectBG(int sock, SockAddr remote) : _sock(sock), _remote(remote) { }
+
+ void run() { _res = ::connect(_sock, _remote.raw(), _remote.addressSize); }
+ string name() const { return "ConnectBG"; }
+ int inError() const { return _res; }
+
+ private:
+ int _sock;
+ int _res;
+ SockAddr _remote;
+ };
+
+ bool Socket::connect(SockAddr& remote) {
+ _remote = remote;
+
+ _fd = socket(remote.getType(), SOCK_STREAM, 0);
+ if ( _fd == INVALID_SOCKET ) {
+ log(_logLevel) << "ERROR: connect invalid socket " << errnoWithDescription() << endl;
+ return false;
+ }
+
+ if ( _timeout > 0 ) {
+ setTimeout( _timeout );
+ }
+
+ ConnectBG bg(_fd, remote);
+ bg.go();
+ if ( bg.wait(5000) ) {
+ if ( bg.inError() ) {
+ close();
+ return false;
+ }
+ }
+ else {
+ // time out the connect
+ close();
+ bg.wait(); // so bg stays in scope until bg thread terminates
+ return false;
+ }
+
+ if (remote.getType() != AF_UNIX)
+ disableNagle(_fd);
+
+#ifdef SO_NOSIGPIPE
+ // osx
+ const int one = 1;
+ setsockopt( _fd , SOL_SOCKET, SO_NOSIGPIPE, &one, sizeof(int));
+#endif
+
+ return true;
+ }
+
+ int Socket::_send( const char * data , int len ) {
+#ifdef MONGO_SSL
+ if ( _ssl ) {
+ return SSL_write( _ssl.get() , data , len );
+ }
+#endif
+ return ::send( _fd , data , len , portSendFlags );
+ }
+
+ bool Socket::stillConnected() {
+#ifdef MONGO_SSL
+ DEV log() << "TODO stillConnected() w/SSL" << endl;
+#else
+ int r = _send("", 0);
+ if( r < 0 ) {
+#if defined(_WIN32)
+ if ( WSAGetLastError() == WSAETIMEDOUT ) {
+#else
+ if ( ( errno == EAGAIN || errno == EWOULDBLOCK ) ) {
+#endif
+ ;
+ }
+ else {
+ return false;
+ }
+ }
+#endif
+ return true;
+ }
+
+ // sends all data or throws an exception
+ void Socket::send( const char * data , int len, const char *context ) {
+ while( len > 0 ) {
+ int ret = _send( data , len );
+ if ( ret == -1 ) {
+
+#ifdef MONGO_SSL
+ if ( _ssl ) {
+ log() << "SSL Error ret: " << ret << " err: " << SSL_get_error( _ssl.get() , ret )
+ << " " << ERR_error_string(ERR_get_error(), NULL)
+ << endl;
+ }
+#endif
+
+#if defined(_WIN32)
+ if ( WSAGetLastError() == WSAETIMEDOUT && _timeout != 0 ) {
+#else
+ if ( ( errno == EAGAIN || errno == EWOULDBLOCK ) && _timeout != 0 ) {
+#endif
+ log(_logLevel) << "Socket " << context << " send() timed out " << _remote.toString() << endl;
+ throw SocketException( SocketException::SEND_TIMEOUT , remoteString() );
+ }
+ else {
+ SocketException::Type t = SocketException::SEND_ERROR;
+ log(_logLevel) << "Socket " << context << " send() "
+ << errnoWithDescription() << ' ' << remoteString() << endl;
+ throw SocketException( t , remoteString() );
+ }
+ }
+ else {
+ _bytesOut += ret;
+
+ assert( ret <= len );
+ len -= ret;
+ data += ret;
+ }
+ }
+ }
+
+ void Socket::_send( const vector< pair< char *, int > > &data, const char *context ) {
+ for( vector< pair< char *, int > >::const_iterator i = data.begin(); i != data.end(); ++i ) {
+ char * data = i->first;
+ int len = i->second;
+ send( data, len, context );
+ }
+ }
+
+ /** sends all data or throws an exception
+ * @param context descriptive for logging
+ */
+ void Socket::send( const vector< pair< char *, int > > &data, const char *context ) {
+
+#ifdef MONGO_SSL
+ if ( _ssl ) {
+ _send( data , context );
+ return;
+ }
+#endif
+
+#if defined(_WIN32)
+ // TODO use scatter/gather api
+ _send( data , context );
+#else
+ vector< struct iovec > d( data.size() );
+ int i = 0;
+ for( vector< pair< char *, int > >::const_iterator j = data.begin(); j != data.end(); ++j ) {
+ if ( j->second > 0 ) {
+ d[ i ].iov_base = j->first;
+ d[ i ].iov_len = j->second;
+ ++i;
+ _bytesOut += j->second;
+ }
+ }
+ struct msghdr meta;
+ memset( &meta, 0, sizeof( meta ) );
+ meta.msg_iov = &d[ 0 ];
+ meta.msg_iovlen = d.size();
+
+ while( meta.msg_iovlen > 0 ) {
+ int ret = ::sendmsg( _fd , &meta , portSendFlags );
+ if ( ret == -1 ) {
+ if ( errno != EAGAIN || _timeout == 0 ) {
+ log(_logLevel) << "Socket " << context << " send() " << errnoWithDescription() << ' ' << remoteString() << endl;
+ throw SocketException( SocketException::SEND_ERROR , remoteString() );
+ }
+ else {
+ log(_logLevel) << "Socket " << context << " send() remote timeout " << remoteString() << endl;
+ throw SocketException( SocketException::SEND_TIMEOUT , remoteString() );
+ }
+ }
+ else {
+ struct iovec *& i = meta.msg_iov;
+ while( ret > 0 ) {
+ if ( i->iov_len > unsigned( ret ) ) {
+ i->iov_len -= ret;
+ i->iov_base = (char*)(i->iov_base) + ret;
+ ret = 0;
+ }
+ else {
+ ret -= i->iov_len;
+ ++i;
+ --(meta.msg_iovlen);
+ }
+ }
+ }
+ }
+#endif
+ }
+
+ void Socket::recv( char * buf , int len ) {
+ unsigned retries = 0;
+ while( len > 0 ) {
+ int ret = unsafe_recv( buf , len );
+ if ( ret > 0 ) {
+ if ( len <= 4 && ret != len )
+ log(_logLevel) << "Socket recv() got " << ret << " bytes wanted len=" << len << endl;
+ assert( ret <= len );
+ len -= ret;
+ buf += ret;
+ }
+ else if ( ret == 0 ) {
+ log(3) << "Socket recv() conn closed? " << remoteString() << endl;
+ throw SocketException( SocketException::CLOSED , remoteString() );
+ }
+ else { /* ret < 0 */
+#if defined(_WIN32)
+ int e = WSAGetLastError();
+#else
+ int e = errno;
+# if defined(EINTR)
+ if( e == EINTR ) {
+ if( ++retries == 1 ) {
+ log() << "EINTR retry" << endl;
+ continue;
+ }
+ }
+# endif
+#endif
+ if ( ( e == EAGAIN
+#if defined(_WIN32)
+ || e == WSAETIMEDOUT
+#endif
+ ) && _timeout > 0 )
+ {
+ // this is a timeout
+ log(_logLevel) << "Socket recv() timeout " << remoteString() <<endl;
+ throw SocketException( SocketException::RECV_TIMEOUT, remoteString() );
+ }
+
+ log(_logLevel) << "Socket recv() " << errnoWithDescription(e) << " " << remoteString() <<endl;
+ throw SocketException( SocketException::RECV_ERROR , remoteString() );
+ }
+ }
+ }
+
+ int Socket::unsafe_recv( char *buf, int max ) {
+ int x = _recv( buf , max );
+ _bytesIn += x;
+ return x;
+ }
+
+
+ int Socket::_recv( char *buf, int max ) {
+#ifdef MONGO_SSL
+ if ( _ssl ){
+ return SSL_read( _ssl.get() , buf , max );
+ }
+#endif
+ return ::recv( _fd , buf , max , portRecvFlags );
+ }
+
+ void Socket::setTimeout( double secs ) {
+ setSockTimeouts( _fd, secs );
+ }
+
+#if defined(_WIN32)
+ struct WinsockInit {
+ WinsockInit() {
+ WSADATA d;
+ if ( WSAStartup(MAKEWORD(2,2), &d) != 0 ) {
+ out() << "ERROR: wsastartup failed " << errnoWithDescription() << endl;
+ problem() << "ERROR: wsastartup failed " << errnoWithDescription() << endl;
+ dbexit( EXIT_NTSERVICE_ERROR );
+ }
+ }
+ } winsock_init;
+#endif
+
+} // namespace mongo
diff --git a/src/mongo/util/net/sock.h b/src/mongo/util/net/sock.h
new file mode 100644
index 00000000000..2053768cbd5
--- /dev/null
+++ b/src/mongo/util/net/sock.h
@@ -0,0 +1,261 @@
+// @file sock.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../../pch.h"
+
+#include <stdio.h>
+#include <sstream>
+#include "../goodies.h"
+#include "../../db/cmdline.h"
+#include "../mongoutils/str.h"
+
+#ifndef _WIN32
+
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <errno.h>
+
+#ifdef __openbsd__
+# include <sys/uio.h>
+#endif
+
+#endif // _WIN32
+
+#ifdef MONGO_SSL
+#include <openssl/ssl.h>
+#endif
+
+namespace mongo {
+
+ const int SOCK_FAMILY_UNKNOWN_ERROR=13078;
+
+ void disableNagle(int sock);
+
+#if defined(_WIN32)
+
+ typedef short sa_family_t;
+ typedef int socklen_t;
+
+ // This won't actually be used on windows
+ struct sockaddr_un {
+ short sun_family;
+ char sun_path[108]; // length from unix header
+ };
+
+#else // _WIN32
+
+ inline void closesocket(int s) { close(s); }
+ const int INVALID_SOCKET = -1;
+ typedef int SOCKET;
+
+#endif // _WIN32
+
+ inline string makeUnixSockPath(int port) {
+ return mongoutils::str::stream() << cmdLine.socket << "/mongodb-" << port << ".sock";
+ }
+
+ // If an ip address is passed in, just return that. If a hostname is passed
+ // in, look up its ip and return that. Returns "" on failure.
+ string hostbyname(const char *hostname);
+
+ void enableIPv6(bool state=true);
+ bool IPv6Enabled();
+ void setSockTimeouts(int sock, double secs);
+
+ /**
+ * wrapped around os representation of network address
+ */
+ struct SockAddr {
+ SockAddr() {
+ addressSize = sizeof(sa);
+ memset(&sa, 0, sizeof(sa));
+ sa.ss_family = AF_UNSPEC;
+ }
+ SockAddr(int sourcePort); /* listener side */
+ SockAddr(const char *ip, int port); /* EndPoint (remote) side, or if you want to specify which interface locally */
+
+ template <typename T> T& as() { return *(T*)(&sa); }
+ template <typename T> const T& as() const { return *(const T*)(&sa); }
+
+ string toString(bool includePort=true) const;
+
+ /**
+ * @return one of AF_INET, AF_INET6, or AF_UNIX
+ */
+ sa_family_t getType() const;
+
+ unsigned getPort() const;
+
+ string getAddr() const;
+
+ bool isLocalHost() const;
+
+ bool operator==(const SockAddr& r) const;
+
+ bool operator!=(const SockAddr& r) const;
+
+ bool operator<(const SockAddr& r) const;
+
+ const sockaddr* raw() const {return (sockaddr*)&sa;}
+ sockaddr* raw() {return (sockaddr*)&sa;}
+
+ socklen_t addressSize;
+ private:
+ struct sockaddr_storage sa;
+ };
+
+ extern SockAddr unknownAddress; // ( "0.0.0.0", 0 )
+
+ /** this is not cache and does a syscall */
+ string getHostName();
+
+ /** this is cached, so if changes during the process lifetime
+ * will be stale */
+ string getHostNameCached();
+
+ /**
+ * thrown by Socket and SockAddr
+ */
+ class SocketException : public DBException {
+ public:
+ const enum Type { CLOSED , RECV_ERROR , SEND_ERROR, RECV_TIMEOUT, SEND_TIMEOUT, FAILED_STATE, CONNECT_ERROR } _type;
+
+ SocketException( Type t , string server , int code = 9001 , string extra="" )
+ : DBException( "socket exception" , code ) , _type(t) , _server(server), _extra(extra){ }
+ virtual ~SocketException() throw() {}
+
+ bool shouldPrint() const { return _type != CLOSED; }
+ virtual string toString() const;
+
+ private:
+ string _server;
+ string _extra;
+ };
+
+#ifdef MONGO_SSL
+ class SSLManager : boost::noncopyable {
+ public:
+ SSLManager( bool client );
+
+ void setupPEM( const string& keyFile , const string& password );
+ void setupPubPriv( const string& privateKeyFile , const string& publicKeyFile );
+
+ /**
+ * creates an SSL context to be used for this file descriptor
+ * caller should delete
+ */
+ SSL * secure( int fd );
+
+ static int password_cb( char *buf,int num, int rwflag,void *userdata );
+
+ private:
+ bool _client;
+ SSL_CTX* _context;
+ string _password;
+ };
+#endif
+
+ /**
+ * thin wrapped around file descriptor and system calls
+ * todo: ssl
+ */
+ class Socket {
+ public:
+ Socket(int sock, const SockAddr& farEnd);
+
+ /** In some cases the timeout will actually be 2x this value - eg we do a partial send,
+ then the timeout fires, then we try to send again, then the timeout fires again with
+ no data sent, then we detect that the other side is down.
+
+ Generally you don't want a timeout, you should be very prepared for errors if you set one.
+ */
+ Socket(double so_timeout = 0, int logLevel = 0 );
+
+ bool connect(SockAddr& farEnd);
+ void close();
+
+ void send( const char * data , int len, const char *context );
+ void send( const vector< pair< char *, int > > &data, const char *context );
+
+ // recv len or throw SocketException
+ void recv( char * data , int len );
+ int unsafe_recv( char *buf, int max );
+
+ int getLogLevel() const { return _logLevel; }
+ void setLogLevel( int ll ) { _logLevel = ll; }
+
+ SockAddr remoteAddr() const { return _remote; }
+ string remoteString() const { return _remote.toString(); }
+ unsigned remotePort() const { return _remote.getPort(); }
+
+ void clearCounters() { _bytesIn = 0; _bytesOut = 0; }
+ long long getBytesIn() const { return _bytesIn; }
+ long long getBytesOut() const { return _bytesOut; }
+
+ void setTimeout( double secs );
+
+ bool stillConnected();
+
+#ifdef MONGO_SSL
+ /** secures inline */
+ void secure( SSLManager * ssl );
+
+ void secureAccepted( SSLManager * ssl );
+#endif
+
+ /**
+ * call this after a fork for server sockets
+ */
+ void postFork();
+
+ private:
+ void _init();
+
+ /** raw send, same semantics as ::send */
+ public:
+ int _send( const char * data , int len );
+ private:
+
+ /** sends dumbly, just each buffer at a time */
+ void _send( const vector< pair< char *, int > > &data, const char *context );
+
+ /** raw recv, same semantics as ::recv */
+ int _recv( char * buf , int max );
+
+ int _fd;
+ SockAddr _remote;
+ double _timeout;
+
+ long long _bytesIn;
+ long long _bytesOut;
+
+#ifdef MONGO_SSL
+ shared_ptr<SSL> _ssl;
+ SSLManager * _sslAccepted;
+#endif
+
+ protected:
+ int _logLevel; // passed to log() when logging errors
+
+ };
+
+
+} // namespace mongo
diff --git a/src/mongo/util/ntservice.cpp b/src/mongo/util/ntservice.cpp
new file mode 100644
index 00000000000..93cfd4a2de0
--- /dev/null
+++ b/src/mongo/util/ntservice.cpp
@@ -0,0 +1,408 @@
+// ntservice.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "ntservice.h"
+#include "../db/client.h"
+#include "winutil.h"
+#include "text.h"
+#include <direct.h>
+
+#if defined(_WIN32)
+
+namespace mongo {
+
+ void shutdownServer();
+
+ SERVICE_STATUS_HANDLE ServiceController::_statusHandle = NULL;
+ std::wstring ServiceController::_serviceName;
+ ServiceCallback ServiceController::_serviceCallback = NULL;
+
+ ServiceController::ServiceController() {}
+
+ bool initService();
+
+ // returns true if the service is started.
+ bool serviceParamsCheck( boost::program_options::variables_map& params, const std::string dbpath, int argc, char* argv[] ) {
+ bool installService = false;
+ bool removeService = false;
+ bool reinstallService = false;
+ bool startService = false;
+
+ std::wstring windowsServiceName = L"MongoDB";
+ std::wstring windowsServiceDisplayName = L"Mongo DB";
+ std::wstring windowsServiceDescription = L"Mongo DB Server";
+ std::wstring windowsServiceUser = L"";
+ std::wstring windowsServicePassword = L"";
+
+ if (params.count("install")) {
+ if ( ! params.count( "logpath" ) ) {
+ cerr << "--install has to be used with --logpath" << endl;
+ ::exit(-1);
+ }
+ installService = true;
+ }
+ if (params.count("reinstall")) {
+ if ( ! params.count( "logpath" ) ) {
+ cerr << "--reinstall has to be used with --logpath" << endl;
+ ::exit(-1);
+ }
+ reinstallService = true;
+ }
+ if (params.count("remove")) {
+ removeService = true;
+ }
+ if (params.count("service")) {
+ startService = true;
+ }
+
+ if (params.count("serviceName")) {
+ string x = params["serviceName"].as<string>();
+ windowsServiceName = wstring(x.size(),L' ');
+ for ( size_t i=0; i<x.size(); i++) {
+ windowsServiceName[i] = x[i];
+ }
+ }
+ if (params.count("serviceDisplayName")) {
+ string x = params["serviceDisplayName"].as<string>();
+ windowsServiceDisplayName = wstring(x.size(),L' ');
+ for ( size_t i=0; i<x.size(); i++) {
+ windowsServiceDisplayName[i] = x[i];
+ }
+ }
+ if (params.count("serviceDescription")) {
+ string x = params["serviceDescription"].as<string>();
+ windowsServiceDescription = wstring(x.size(),L' ');
+ for ( size_t i=0; i<x.size(); i++) {
+ windowsServiceDescription[i] = x[i];
+ }
+ }
+ if (params.count("serviceUser")) {
+ string x = params["serviceUser"].as<string>();
+ windowsServiceUser = wstring(x.size(),L' ');
+ for ( size_t i=0; i<x.size(); i++) {
+ windowsServiceUser[i] = x[i];
+ }
+ }
+ if (params.count("servicePassword")) {
+ string x = params["servicePassword"].as<string>();
+ windowsServicePassword = wstring(x.size(),L' ');
+ for ( size_t i=0; i<x.size(); i++) {
+ windowsServicePassword[i] = x[i];
+ }
+ }
+
+ if ( reinstallService ) {
+ ServiceController::removeService( windowsServiceName );
+ }
+ if ( installService || reinstallService ) {
+ if ( !ServiceController::installService( windowsServiceName , windowsServiceDisplayName, windowsServiceDescription, windowsServiceUser, windowsServicePassword, dbpath, argc, argv ) )
+ dbexit( EXIT_NTSERVICE_ERROR );
+ dbexit( EXIT_CLEAN );
+ }
+ else if ( removeService ) {
+ if ( !ServiceController::removeService( windowsServiceName ) )
+ dbexit( EXIT_NTSERVICE_ERROR );
+ dbexit( EXIT_CLEAN );
+ }
+ else if ( startService ) {
+ if ( !ServiceController::startService( windowsServiceName , mongo::initService ) )
+ dbexit( EXIT_NTSERVICE_ERROR );
+ return true;
+ }
+ return false;
+ }
+
+ bool ServiceController::installService( const std::wstring& serviceName, const std::wstring& displayName, const std::wstring& serviceDesc, const std::wstring& serviceUser, const std::wstring& servicePassword, const std::string dbpath, int argc, char* argv[] ) {
+ assert(argc >= 1);
+
+ stringstream commandLine;
+
+ char exePath[1024];
+ GetModuleFileNameA( NULL, exePath, sizeof exePath );
+ commandLine << '"' << exePath << "\" ";
+
+ for ( int i = 1; i < argc; i++ ) {
+ std::string arg( argv[ i ] );
+ // replace install command to indicate process is being started as a service
+ if ( arg == "--install" || arg == "--reinstall" ) {
+ arg = "--service";
+ }
+ else if ( arg == "--dbpath" && i + 1 < argc ) {
+ commandLine << arg << " \"" << dbpath << "\" ";
+ i++;
+ continue;
+ }
+ else if ( arg == "--logpath" && i + 1 < argc ) {
+ commandLine << arg << " \"" << argv[i+1] << "\" ";
+ i++;
+ continue;
+ }
+ else if ( arg == "-f" && i + 1 < argc ) {
+ commandLine << arg << " \"" << argv[i+1] << "\" ";
+ i++;
+ continue;
+ }
+ else if ( arg == "--config" && i + 1 < argc ) {
+ commandLine << arg << " \"" << argv[i+1] << "\" ";
+ i++;
+ continue;
+ }
+ else if ( arg == "--pidfilepath" && i + 1 < argc ) {
+ commandLine << arg << " \"" << argv[i+1] << "\" ";
+ i++;
+ continue;
+ }
+ else if ( arg == "--repairpath" && i + 1 < argc ) {
+ commandLine << arg << " \"" << argv[i+1] << "\" ";
+ i++;
+ continue;
+ }
+ else if ( arg == "--keyfile" && i + 1 < argc ) {
+ commandLine << arg << " \"" << argv[i+1] << "\" ";
+ i++;
+ continue;
+ }
+ else if ( arg.length() > 9 && arg.substr(0, 9) == "--service" ) {
+ // Strip off --service(Name|User|Password) arguments
+ i++;
+ continue;
+ }
+ commandLine << arg << " ";
+ }
+
+ SC_HANDLE schSCManager = ::OpenSCManager( NULL, NULL, SC_MANAGER_ALL_ACCESS );
+ if ( schSCManager == NULL ) {
+ DWORD err = ::GetLastError();
+ cerr << "Error connecting to the Service Control Manager: " << GetWinErrMsg(err) << endl;
+ return false;
+ }
+
+ // Make sure servise doesn't already exist.
+ // TODO: Check to see if service is in "Deleting" status, suggest the user close down Services MMC snap-ins.
+ SC_HANDLE schService = ::OpenService( schSCManager, serviceName.c_str(), SERVICE_ALL_ACCESS );
+ if ( schService != NULL ) {
+ cerr << "There is already a service named " << toUtf8String(serviceName) << ". Aborting" << endl;
+ ::CloseServiceHandle( schService );
+ ::CloseServiceHandle( schSCManager );
+ return false;
+ }
+ std::basic_ostringstream< TCHAR > commandLineWide;
+ commandLineWide << commandLine.str().c_str();
+
+ cerr << "Creating service " << toUtf8String(serviceName) << "." << endl;
+
+ // create new service
+ schService = ::CreateService( schSCManager, serviceName.c_str(), displayName.c_str(),
+ SERVICE_ALL_ACCESS, SERVICE_WIN32_OWN_PROCESS,
+ SERVICE_AUTO_START, SERVICE_ERROR_NORMAL,
+ commandLineWide.str().c_str(), NULL, NULL, L"\0\0", NULL, NULL );
+ if ( schService == NULL ) {
+ DWORD err = ::GetLastError();
+ cerr << "Error creating service: " << GetWinErrMsg(err) << endl;
+ ::CloseServiceHandle( schSCManager );
+ return false;
+ }
+
+ cerr << "Service creation successful." << endl;
+ cerr << "Service can be started from the command line via 'net start \"" << toUtf8String(serviceName) << "\"'." << endl;
+
+ bool serviceInstalled;
+
+ // TODO: If neccessary grant user "Login as a Service" permission.
+ if ( !serviceUser.empty() ) {
+ std::wstring actualServiceUser;
+ if ( serviceUser.find(L"\\") == string::npos ) {
+ actualServiceUser = L".\\" + serviceUser;
+ }
+ else {
+ actualServiceUser = serviceUser;
+ }
+
+ cerr << "Setting service login credentials. User: " << toUtf8String(actualServiceUser) << endl;
+ serviceInstalled = ::ChangeServiceConfig( schService, SERVICE_NO_CHANGE, SERVICE_NO_CHANGE, SERVICE_NO_CHANGE, NULL, NULL, NULL, NULL, actualServiceUser.c_str(), servicePassword.c_str(), NULL );
+ if ( !serviceInstalled ) {
+ cerr << "Setting service login failed. Service has 'LocalService' permissions." << endl;
+ }
+ }
+
+ // set the service description
+ SERVICE_DESCRIPTION serviceDescription;
+ serviceDescription.lpDescription = (LPTSTR)serviceDesc.c_str();
+ serviceInstalled = ::ChangeServiceConfig2( schService, SERVICE_CONFIG_DESCRIPTION, &serviceDescription );
+
+#if 1
+ if ( ! serviceInstalled ) {
+#else
+ // This code sets the mongod service to auto-restart, forever.
+ // This might be a fine thing to do except that when mongod or Windows has a crash, the mongo.lock
+ // file is still around, so any attempt at a restart will immediately fail. With auto-restart, we
+ // go into a loop, crashing and restarting, crashing and restarting, until someone comes in and
+ // disables the service or deletes the mongod.lock file.
+ //
+ // I'm leaving the old code here for now in case we solve this and are able to turn SC_ACTION_RESTART
+ // back on.
+ //
+ if ( serviceInstalled ) {
+ SC_ACTION aActions[ 3 ] = { { SC_ACTION_RESTART, 0 }, { SC_ACTION_RESTART, 0 }, { SC_ACTION_RESTART, 0 } };
+
+ SERVICE_FAILURE_ACTIONS serviceFailure;
+ ZeroMemory( &serviceFailure, sizeof( SERVICE_FAILURE_ACTIONS ) );
+ serviceFailure.cActions = 3;
+ serviceFailure.lpsaActions = aActions;
+
+ // set service recovery options
+ serviceInstalled = ::ChangeServiceConfig2( schService, SERVICE_CONFIG_FAILURE_ACTIONS, &serviceFailure );
+
+ }
+ else {
+#endif
+ cerr << "Could not set service description. Check the event log for more details." << endl;
+ }
+
+ ::CloseServiceHandle( schService );
+ ::CloseServiceHandle( schSCManager );
+
+ return serviceInstalled;
+ }
+
+ bool ServiceController::removeService( const std::wstring& serviceName ) {
+ SC_HANDLE schSCManager = ::OpenSCManager( NULL, NULL, SC_MANAGER_ALL_ACCESS );
+ if ( schSCManager == NULL ) {
+ DWORD err = ::GetLastError();
+ cerr << "Error connecting to the Service Control Manager: " << GetWinErrMsg(err) << endl;
+ return false;
+ }
+
+ SC_HANDLE schService = ::OpenService( schSCManager, serviceName.c_str(), SERVICE_ALL_ACCESS );
+ if ( schService == NULL ) {
+ cerr << "Could not find a service named " << toUtf8String(serviceName) << " to uninstall." << endl;
+ ::CloseServiceHandle( schSCManager );
+ return false;
+ }
+
+ SERVICE_STATUS serviceStatus;
+
+ // stop service if its running
+ if ( ::ControlService( schService, SERVICE_CONTROL_STOP, &serviceStatus ) ) {
+ cerr << "Service " << toUtf8String(serviceName) << " is currently running. Stopping service." << endl;
+ while ( ::QueryServiceStatus( schService, &serviceStatus ) ) {
+ if ( serviceStatus.dwCurrentState == SERVICE_STOP_PENDING ) {
+ Sleep( 1000 );
+ }
+ else { break; }
+ }
+ cerr << "Service stopped." << endl;
+ }
+
+ cerr << "Deleting service " << toUtf8String(serviceName) << "." << endl;
+ bool serviceRemoved = ::DeleteService( schService );
+
+ ::CloseServiceHandle( schService );
+ ::CloseServiceHandle( schSCManager );
+
+ if (serviceRemoved) {
+ cerr << "Service deleted successfully." << endl;
+ }
+ else {
+ cerr << "Failed to delete service." << endl;
+ }
+
+ return serviceRemoved;
+ }
+
+ bool ServiceController::startService( const std::wstring& serviceName, ServiceCallback startService ) {
+ _serviceName = serviceName;
+ _serviceCallback = startService;
+
+ SERVICE_TABLE_ENTRY dispTable[] = {
+ { (LPTSTR)serviceName.c_str(), (LPSERVICE_MAIN_FUNCTION)ServiceController::initService },
+ { NULL, NULL }
+ };
+
+ return StartServiceCtrlDispatcher( dispTable );
+ }
+
+ bool ServiceController::reportStatus( DWORD reportState, DWORD waitHint ) {
+ if ( _statusHandle == NULL )
+ return false;
+
+ static DWORD checkPoint = 1;
+
+ SERVICE_STATUS ssStatus;
+
+ DWORD dwControlsAccepted;
+ switch ( reportState ) {
+ case SERVICE_START_PENDING:
+ case SERVICE_STOP_PENDING:
+ case SERVICE_STOPPED:
+ dwControlsAccepted = 0;
+ break;
+ default:
+ dwControlsAccepted = SERVICE_ACCEPT_STOP | SERVICE_ACCEPT_SHUTDOWN;
+ break;
+ }
+
+ ssStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS;
+ ssStatus.dwServiceSpecificExitCode = 0;
+ ssStatus.dwControlsAccepted = dwControlsAccepted;
+ ssStatus.dwCurrentState = reportState;
+ ssStatus.dwWin32ExitCode = NO_ERROR;
+ ssStatus.dwWaitHint = waitHint;
+ ssStatus.dwCheckPoint = ( reportState == SERVICE_RUNNING || reportState == SERVICE_STOPPED ) ? 0 : checkPoint++;
+
+ return SetServiceStatus( _statusHandle, &ssStatus );
+ }
+
+ void WINAPI ServiceController::initService( DWORD argc, LPTSTR *argv ) {
+ _statusHandle = RegisterServiceCtrlHandler( _serviceName.c_str(), serviceCtrl );
+ if ( !_statusHandle )
+ return;
+
+ reportStatus( SERVICE_START_PENDING, 1000 );
+
+ _serviceCallback();
+ dbexit( EXIT_CLEAN );
+
+ reportStatus( SERVICE_STOPPED );
+ }
+
+ static void serviceShutdown( const char* controlCodeName ) {
+ Client::initThread( "serviceShutdown" );
+ log() << "got " << controlCodeName << " request from Windows Service Controller, " <<
+ ( inShutdown() ? "already in shutdown" : "will terminate after current cmd ends" ) << endl;
+ ServiceController::reportStatus( SERVICE_STOP_PENDING );
+ if ( ! inShutdown() ) {
+ exitCleanly( EXIT_WINDOWS_SERVICE_STOP );
+ ServiceController::reportStatus( SERVICE_STOPPED );
+ }
+ }
+
+ void WINAPI ServiceController::serviceCtrl( DWORD ctrlCode ) {
+ switch ( ctrlCode ) {
+ case SERVICE_CONTROL_STOP:
+ serviceShutdown( "SERVICE_CONTROL_STOP" );
+ break;
+ case SERVICE_CONTROL_SHUTDOWN:
+ serviceShutdown( "SERVICE_CONTROL_SHUTDOWN" );
+ break;
+ }
+ }
+
+} // namespace mongo
+
+#endif
diff --git a/src/mongo/util/ntservice.h b/src/mongo/util/ntservice.h
new file mode 100644
index 00000000000..2570dfa9bef
--- /dev/null
+++ b/src/mongo/util/ntservice.h
@@ -0,0 +1,49 @@
+// ntservice.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#if defined(_WIN32)
+#include <windows.h>
+
+namespace mongo {
+
+ typedef bool ( *ServiceCallback )( void );
+ bool serviceParamsCheck( boost::program_options::variables_map& params, const std::string dbpath, int argc, char* argv[] );
+
+ class ServiceController {
+ public:
+ ServiceController();
+ virtual ~ServiceController() {}
+
+ static bool installService( const std::wstring& serviceName, const std::wstring& displayName, const std::wstring& serviceDesc, const std::wstring& serviceUser, const std::wstring& servicePassword, const std::string dbpath, int argc, char* argv[] );
+ static bool removeService( const std::wstring& serviceName );
+ static bool startService( const std::wstring& serviceName, ServiceCallback startService );
+ static bool reportStatus( DWORD reportState, DWORD waitHint = 0 );
+
+ static void WINAPI initService( DWORD argc, LPTSTR *argv );
+ static void WINAPI serviceCtrl( DWORD ctrlCode );
+
+ protected:
+ static std::wstring _serviceName;
+ static SERVICE_STATUS_HANDLE _statusHandle;
+ static ServiceCallback _serviceCallback;
+ };
+
+} // namespace mongo
+
+#endif
diff --git a/src/mongo/util/optime.h b/src/mongo/util/optime.h
new file mode 100644
index 00000000000..031ad960d20
--- /dev/null
+++ b/src/mongo/util/optime.h
@@ -0,0 +1,170 @@
+// optime.h - OpTime class
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+//#include "../db/concurrency.h"
+
+namespace mongo {
+ void exitCleanly( ExitCode code );
+
+ struct ClockSkewException : public DBException {
+ ClockSkewException() : DBException( "clock skew exception" , 20001 ) {}
+ };
+
+ /* replsets used to use RSOpTime.
+ M/S uses OpTime.
+ But this is useable from both.
+ */
+ typedef unsigned long long ReplTime;
+
+ /* Operation sequence #. A combination of current second plus an ordinal value.
+ */
+#pragma pack(4)
+ class OpTime {
+ unsigned i; // ordinal comes first so we can do a single 64 bit compare on little endian
+ unsigned secs;
+ static OpTime last;
+ static OpTime skewed();
+ public:
+ static void setLast(const Date_t &date) {
+ notifier().notify_all(); // won't really do anything until write-lock released
+
+ last = OpTime(date);
+ }
+ unsigned getSecs() const {
+ return secs;
+ }
+ unsigned getInc() const {
+ return i;
+ }
+ OpTime(Date_t date) {
+ reinterpret_cast<unsigned long long&>(*this) = date.millis;
+ dassert( (int)secs >= 0 );
+ }
+ OpTime(ReplTime x) {
+ reinterpret_cast<unsigned long long&>(*this) = x;
+ dassert( (int)secs >= 0 );
+ }
+ OpTime(unsigned a, unsigned b) {
+ secs = a;
+ i = b;
+ dassert( (int)secs >= 0 );
+ }
+ OpTime( const OpTime& other ) {
+ secs = other.secs;
+ i = other.i;
+ dassert( (int)secs >= 0 );
+ }
+ OpTime() {
+ secs = 0;
+ i = 0;
+ }
+ // it isn't generally safe to not be locked for this. so use now(). some tests use this.
+ static OpTime now_inlock() {
+ notifier().notify_all(); // won't really do anything until write-lock released
+
+ unsigned t = (unsigned) time(0);
+ if ( last.secs == t ) {
+ last.i++;
+ return last;
+ }
+ if ( t < last.secs ) {
+ return skewed(); // separate function to keep out of the hot code path
+ }
+ last = OpTime(t, 1);
+ return last;
+ }
+ static OpTime now();
+ static OpTime last_inlock();
+
+ // Waits for global OpTime to be different from *this
+ // Must be atLeastReadLocked
+ // Defined in instance.cpp (only current user) as it needs dbtemprelease
+ void waitForDifferent(unsigned millis);
+
+ /* We store OpTime's in the database as BSON Date datatype -- we needed some sort of
+ 64 bit "container" for these values. While these are not really "Dates", that seems a
+ better choice for now than say, Number, which is floating point. Note the BinData type
+ is perhaps the cleanest choice, lacking a true unsigned64 datatype, but BinData has 5
+ bytes of overhead.
+ */
+ unsigned long long asDate() const {
+ return reinterpret_cast<const unsigned long long*>(&i)[0];
+ }
+ long long asLL() const {
+ return reinterpret_cast<const long long*>(&i)[0];
+ }
+
+ bool isNull() const { return secs == 0; }
+
+ string toStringLong() const {
+ char buf[64];
+ time_t_to_String(secs, buf);
+ stringstream ss;
+ ss << time_t_to_String_short(secs) << ' ';
+ ss << hex << secs << ':' << i;
+ return ss.str();
+ }
+
+ string toStringPretty() const {
+ stringstream ss;
+ ss << time_t_to_String_short(secs) << ':' << hex << i;
+ return ss.str();
+ }
+
+ string toString() const {
+ stringstream ss;
+ ss << hex << secs << ':' << i;
+ return ss.str();
+ }
+
+ bool operator==(const OpTime& r) const {
+ return i == r.i && secs == r.secs;
+ }
+ bool operator!=(const OpTime& r) const {
+ return !(*this == r);
+ }
+ bool operator<(const OpTime& r) const {
+ if ( secs != r.secs )
+ return secs < r.secs;
+ return i < r.i;
+ }
+ bool operator<=(const OpTime& r) const {
+ return *this < r || *this == r;
+ }
+ bool operator>(const OpTime& r) const {
+ return !(*this <= r);
+ }
+ bool operator>=(const OpTime& r) const {
+ return !(*this < r);
+ }
+ private:
+
+ // The following functions are to get around the need to define class-level statics in a cpp
+ static boost::condition& notifier() {
+ static boost::condition* holder = new boost::condition();
+ return *holder;
+ };
+ static boost::mutex& notifyMutex() {
+ static boost::mutex* holder = new boost::mutex();
+ return *holder;
+ };
+ };
+#pragma pack()
+
+} // namespace mongo
diff --git a/src/mongo/util/password.cpp b/src/mongo/util/password.cpp
new file mode 100644
index 00000000000..18164c3aa0a
--- /dev/null
+++ b/src/mongo/util/password.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "password.h"
+#include <iostream>
+
+#ifndef _WIN32
+#include <termios.h>
+#endif
+
+using namespace std;
+
+namespace mongo {
+
+ string askPassword() {
+
+ std::string password;
+ cout << "Enter password: ";
+#ifndef _WIN32
+ const int stdinfd = 0;
+ termios termio;
+ tcflag_t old = 0;
+ if ( isatty( stdinfd ) ) {
+ int i = tcgetattr( stdinfd, &termio );
+ if( i == -1 ) {
+ cerr << "Cannot get terminal attributes " << errnoWithDescription() << endl;
+ return string();
+ }
+ old = termio.c_lflag;
+ termio.c_lflag &= ~ECHO;
+ i = tcsetattr( stdinfd, TCSANOW, &termio );
+ if( i == -1 ) {
+ cerr << "Cannot set terminal attributes " << errnoWithDescription() << endl;
+ return string();
+ }
+ }
+
+ getline( cin, password );
+
+ if ( isatty( stdinfd ) ) {
+ termio.c_lflag = old;
+ int i = tcsetattr( stdinfd, TCSANOW, &termio );
+ if( i == -1 ) {
+ cerr << "Cannot set terminal attributes " << errnoWithDescription() << endl;
+ return string();
+ }
+ }
+#else
+ HANDLE stdinh = GetStdHandle( STD_INPUT_HANDLE );
+ if ( stdinh == INVALID_HANDLE_VALUE) {
+ cerr << "Cannot get stdin handle " << GetLastError() << "\n";
+ return string();
+ }
+
+ DWORD old;
+ if ( !GetConsoleMode( stdinh, &old ) ) {
+ cerr << "Cannot get console mode " << GetLastError() << "\n";
+ return string();
+ }
+
+ DWORD noecho = ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT;
+ if ( !SetConsoleMode( stdinh, noecho ) ) {
+ cerr << "Cannot set console mode " << GetLastError() << "\n";
+ return string();
+ }
+
+ getline( cin, password );
+
+ if ( !SetConsoleMode( stdinh, old ) ) {
+ cerr << "Cannot set console mode " << GetLastError() << "\n";
+ return string();
+ }
+#endif
+ cout << "\n";
+ return password;
+ }
+}
diff --git a/src/mongo/util/password.h b/src/mongo/util/password.h
new file mode 100644
index 00000000000..519f712ee7e
--- /dev/null
+++ b/src/mongo/util/password.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#pragma once
+
+#include <boost/program_options.hpp>
+#include <string>
+
+namespace mongo {
+
+ struct PasswordValue : public boost::program_options::typed_value<std::string> {
+
+ PasswordValue( std::string* val )
+ : boost::program_options::typed_value<std::string>( val ) { }
+
+ unsigned min_tokens() const {
+ return 0;
+ }
+
+ unsigned max_tokens() const {
+ return 1;
+ }
+
+ bool is_required() const {
+ return false;
+ }
+
+ void xparse( boost::any& value_store,
+ const std::vector<std::string>& new_tokens ) const {
+ if ( !value_store.empty() )
+#if BOOST_VERSION >= 104200
+ boost::throw_exception( boost::program_options::validation_error( boost::program_options::validation_error::multiple_values_not_allowed ) );
+#else
+ boost::throw_exception( boost::program_options::validation_error( "multiple values not allowed" ) );
+#endif
+ else if ( !new_tokens.empty() )
+ boost::program_options::typed_value<std::string>::xparse
+ (value_store, new_tokens);
+ else
+ value_store = std::string();
+ }
+
+ };
+
+ std::string askPassword();
+
+}
diff --git a/src/mongo/util/paths.h b/src/mongo/util/paths.h
new file mode 100644
index 00000000000..bb82df0c730
--- /dev/null
+++ b/src/mongo/util/paths.h
@@ -0,0 +1,124 @@
+// @file paths.h
+// file paths and directory handling
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "mongoutils/str.h"
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+namespace mongo {
+
+ using namespace mongoutils;
+
+ extern string dbpath;
+
+ /** this is very much like a boost::path. however, we define a new type to get some type
+ checking. if you want to say 'my param MUST be a relative path", use this.
+ */
+ struct RelativePath {
+ string _p;
+
+ bool empty() const { return _p.empty(); }
+
+ static RelativePath fromRelativePath(string f) {
+ RelativePath rp;
+ rp._p = f;
+ return rp;
+ }
+
+ /** from a full path */
+ static RelativePath fromFullPath(path f) {
+ path dbp(dbpath); // normalizes / and backslash
+ string fullpath = f.string();
+ string relative = str::after(fullpath, dbp.string());
+ if( relative.empty() ) {
+ log() << "warning file is not under db path? " << fullpath << ' ' << dbp.string() << endl;
+ RelativePath rp;
+ rp._p = fullpath;
+ return rp;
+ }
+ /*uassert(13600,
+ str::stream() << "file path is not under the db path? " << fullpath << ' ' << dbpath,
+ relative != fullpath);*/
+ if( str::startsWith(relative, "/") || str::startsWith(relative, "\\") ) {
+ relative.erase(0, 1);
+ }
+ RelativePath rp;
+ rp._p = relative;
+ return rp;
+ }
+
+ string toString() const { return _p; }
+
+ bool operator!=(const RelativePath& r) const { return _p != r._p; }
+ bool operator==(const RelativePath& r) const { return _p == r._p; }
+ bool operator<(const RelativePath& r) const { return _p < r._p; }
+
+ string asFullPath() const {
+ path x(dbpath);
+ x /= _p;
+ return x.string();
+ }
+
+ };
+
+ inline dev_t getPartition(const string& path){
+ struct stat stats;
+
+ if (stat(path.c_str(), &stats) != 0){
+ uasserted(13646, str::stream() << "stat() failed for file: " << path << " " << errnoWithDescription());
+ }
+
+ return stats.st_dev;
+ }
+
+ inline bool onSamePartition(const string& path1, const string& path2){
+ dev_t dev1 = getPartition(path1);
+ dev_t dev2 = getPartition(path2);
+
+ return dev1 == dev2;
+ }
+
+ inline void flushMyDirectory(const boost::filesystem::path& file){
+#ifdef __linux__ // this isn't needed elsewhere
+ // if called without a fully qualified path it asserts; that makes mongoperf fail. so make a warning. need a better solution longer term.
+ // massert(13652, str::stream() << "Couldn't find parent dir for file: " << file.string(), );
+ if( !file.has_branch_path() ) {
+ log() << "warning flushMYDirectory couldn't find parent dir for file: " << file.string() << endl;
+ return;
+ }
+
+
+ boost::filesystem::path dir = file.branch_path(); // parent_path in new boosts
+
+ log(1) << "flushing directory " << dir.string() << endl;
+
+ int fd = ::open(dir.string().c_str(), O_RDONLY); // DO NOT THROW OR ASSERT BEFORE CLOSING
+ massert(13650, str::stream() << "Couldn't open directory '" << dir.string() << "' for flushing: " << errnoWithDescription(), fd >= 0);
+ if (fsync(fd) != 0){
+ int e = errno;
+ close(fd);
+ massert(13651, str::stream() << "Couldn't fsync directory '" << dir.string() << "': " << errnoWithDescription(e), false);
+ }
+ close(fd);
+#endif
+ }
+
+}
diff --git a/src/mongo/util/processinfo.cpp b/src/mongo/util/processinfo.cpp
new file mode 100644
index 00000000000..082d42b3bc0
--- /dev/null
+++ b/src/mongo/util/processinfo.cpp
@@ -0,0 +1,48 @@
+// processinfo.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "processinfo.h"
+#include "mmap.h"
+
+#include <iostream>
+using namespace std;
+
+namespace mongo {
+
+ class PidFileWiper {
+ public:
+ ~PidFileWiper() {
+ ofstream out( path.c_str() , ios_base::out );
+ out.close();
+ }
+
+ void write( const string& p ) {
+ path = p;
+ ofstream out( path.c_str() , ios_base::out );
+ out << getpid() << endl;
+ out.close();
+ }
+
+ string path;
+ } pidFileWiper;
+
+ void writePidFile( const string& path ) {
+ pidFileWiper.write( path );
+ }
+
+}
diff --git a/src/mongo/util/processinfo.h b/src/mongo/util/processinfo.h
new file mode 100644
index 00000000000..5272831eb74
--- /dev/null
+++ b/src/mongo/util/processinfo.h
@@ -0,0 +1,67 @@
+// processinfo.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <sys/types.h>
+#include <string>
+
+#ifndef _WIN32
+#include <unistd.h>
+#else
+typedef int pid_t;
+int getpid();
+#endif
+
+namespace mongo {
+
+ class BSONObjBuilder;
+
+ class ProcessInfo {
+ public:
+ ProcessInfo( pid_t pid = getpid() );
+ ~ProcessInfo();
+
+ /**
+ * @return mbytes
+ */
+ int getVirtualMemorySize();
+
+ /**
+ * @return mbytes
+ */
+ int getResidentSize();
+
+ /**
+ * Append platform-specific data to obj
+ */
+ void getExtraInfo(BSONObjBuilder& info);
+
+ bool supported();
+
+ static bool blockCheckSupported();
+ static bool blockInMemory( char * start );
+
+ private:
+ pid_t _pid;
+ };
+
+ void writePidFile( const std::string& path );
+
+ void printMemInfo( const char * whereContextStr = 0 );
+
+}
diff --git a/src/mongo/util/processinfo_darwin.cpp b/src/mongo/util/processinfo_darwin.cpp
new file mode 100644
index 00000000000..9f73cbffd4f
--- /dev/null
+++ b/src/mongo/util/processinfo_darwin.cpp
@@ -0,0 +1,116 @@
+// processinfo_darwin.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "../pch.h"
+#include "processinfo.h"
+#include "log.h"
+
+#include <mach/vm_statistics.h>
+#include <mach/task_info.h>
+#include <mach/mach_init.h>
+#include <mach/mach_host.h>
+#include <mach/mach_traps.h>
+#include <mach/task.h>
+#include <mach/vm_map.h>
+#include <mach/shared_region.h>
+#include <iostream>
+
+#include <sys/types.h>
+#include <sys/mman.h>
+
+using namespace std;
+
+namespace mongo {
+
+ ProcessInfo::ProcessInfo( pid_t pid ) : _pid( pid ) {
+ }
+
+ ProcessInfo::~ProcessInfo() {
+ }
+
+ bool ProcessInfo::supported() {
+ return true;
+ }
+
+ int ProcessInfo::getVirtualMemorySize() {
+ task_t result;
+
+ mach_port_t task;
+
+ if ( ( result = task_for_pid( mach_task_self() , _pid , &task) ) != KERN_SUCCESS ) {
+ cout << "error getting task\n";
+ return 0;
+ }
+
+#if !defined(__LP64__)
+ task_basic_info_32 ti;
+#else
+ task_basic_info_64 ti;
+#endif
+ mach_msg_type_number_t count = TASK_BASIC_INFO_COUNT;
+ if ( ( result = task_info( task , TASK_BASIC_INFO , (task_info_t)&ti, &count ) ) != KERN_SUCCESS ) {
+ cout << "error getting task_info: " << result << endl;
+ return 0;
+ }
+ return (int)((double)ti.virtual_size / (1024.0 * 1024 ) );
+ }
+
+ int ProcessInfo::getResidentSize() {
+ task_t result;
+
+ mach_port_t task;
+
+ if ( ( result = task_for_pid( mach_task_self() , _pid , &task) ) != KERN_SUCCESS ) {
+ cout << "error getting task\n";
+ return 0;
+ }
+
+
+#if !defined(__LP64__)
+ task_basic_info_32 ti;
+#else
+ task_basic_info_64 ti;
+#endif
+ mach_msg_type_number_t count = TASK_BASIC_INFO_COUNT;
+ if ( ( result = task_info( task , TASK_BASIC_INFO , (task_info_t)&ti, &count ) ) != KERN_SUCCESS ) {
+ cout << "error getting task_info: " << result << endl;
+ return 0;
+ }
+ return (int)( ti.resident_size / (1024 * 1024 ) );
+ }
+
+ void ProcessInfo::getExtraInfo(BSONObjBuilder& info) {}
+
+ bool ProcessInfo::blockCheckSupported() {
+ return true;
+ }
+
+ bool ProcessInfo::blockInMemory( char * start ) {
+ static long pageSize = 0;
+ if ( pageSize == 0 ) {
+ pageSize = sysconf( _SC_PAGESIZE );
+ }
+ start = start - ( (unsigned long long)start % pageSize );
+ char x = 0;
+ if ( mincore( start , 128 , &x ) ) {
+ log() << "mincore failed: " << errnoWithDescription() << endl;
+ return 1;
+ }
+ return x & 0x1;
+ }
+
+}
diff --git a/src/mongo/util/processinfo_linux2.cpp b/src/mongo/util/processinfo_linux2.cpp
new file mode 100644
index 00000000000..3eaccafd030
--- /dev/null
+++ b/src/mongo/util/processinfo_linux2.cpp
@@ -0,0 +1,244 @@
+// processinfo_linux2.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "processinfo.h"
+
+#include <iostream>
+#include <stdio.h>
+#include <malloc.h>
+#include <db/jsobj.h>
+#include <unistd.h>
+#include <sys/mman.h>
+
+using namespace std;
+
+#define KLONG long
+#define KLF "l"
+
+namespace mongo {
+
+ class LinuxProc {
+ public:
+ LinuxProc( pid_t pid = getpid() ) {
+ char name[128];
+ sprintf( name , "/proc/%d/stat" , pid );
+
+ FILE * f = fopen( name , "r");
+ if ( ! f ) {
+ stringstream ss;
+ ss << "couldn't open [" << name << "] " << errnoWithDescription();
+ string s = ss.str();
+ // help the assert# control uasserted( 13538 , s.c_str() );
+ msgassertedNoTrace( 13538 , s.c_str() );
+ }
+ int found = fscanf(f,
+ "%d %s %c "
+ "%d %d %d %d %d "
+ "%lu %lu %lu %lu %lu "
+ "%lu %lu %ld %ld " /* utime stime cutime cstime */
+ "%ld %ld "
+ "%ld "
+ "%ld "
+ "%lu " /* start_time */
+ "%lu "
+ "%ld " // rss
+ "%lu %"KLF"u %"KLF"u %"KLF"u %"KLF"u %"KLF"u "
+ /*
+ "%*s %*s %*s %*s "
+ "%"KLF"u %*lu %*lu "
+ "%d %d "
+ "%lu %lu"
+ */
+
+ ,
+
+ &_pid,
+ _comm,
+ &_state,
+ &_ppid, &_pgrp, &_session, &_tty, &_tpgid,
+ &_flags, &_min_flt, &_cmin_flt, &_maj_flt, &_cmaj_flt,
+ &_utime, &_stime, &_cutime, &_cstime,
+ &_priority, &_nice,
+ &_alarm,
+ &_nlwp,
+ &_start_time,
+ &_vsize,
+ &_rss,
+ &_rss_rlim, &_start_code, &_end_code, &_start_stack, &_kstk_esp, &_kstk_eip
+
+ /*
+ &_wchan,
+ &_exit_signal, &_processor,
+ &_rtprio, &_sched
+ */
+ );
+ if ( found == 0 ) {
+ cout << "system error: reading proc info" << endl;
+ }
+ fclose( f );
+ }
+
+ unsigned long getVirtualMemorySize() {
+ return _vsize;
+ }
+
+ unsigned long getResidentSize() {
+ return (unsigned long)_rss * 4 * 1024;
+ }
+
+ int _pid;
+ // The process ID.
+
+ char _comm[128];
+ // The filename of the executable, in parentheses. This is visible whether or not the executable is swapped out.
+
+ char _state;
+ //One character from the string "RSDZTW" where R is running, S is sleeping in an interruptible wait, D is waiting in uninterruptible
+ // disk sleep, Z is zombie, T is traced or stopped (on a signal), and W is paging.
+
+ int _ppid;
+ // The PID of the parent.
+
+ int _pgrp;
+ // The process group ID of the process.
+
+ int _session;
+ // The session ID of the process.
+
+ int _tty;
+ // The tty the process uses.
+
+ int _tpgid;
+ // The process group ID of the process which currently owns the tty that the process is connected to.
+
+ unsigned long _flags; // %lu
+ // The kernel flags word of the process. For bit meanings, see the PF_* defines in <linux/sched.h>. Details depend on the kernel version.
+
+ unsigned long _min_flt; // %lu
+ // The number of minor faults the process has made which have not required loading a memory page from disk.
+
+ unsigned long _cmin_flt; // %lu
+ // The number of minor faults that the process
+
+ unsigned long _maj_flt; // %lu
+ // The number of major faults the process has made which have required loading a memory page from disk.
+
+ unsigned long _cmaj_flt; // %lu
+ // The number of major faults that the process
+
+ unsigned long _utime; // %lu
+ // The number of jiffies that this process has been scheduled in user mode.
+
+ unsigned long _stime; // %lu
+ // The number of jiffies that this process has been scheduled in kernel mode.
+
+ long _cutime; // %ld
+ // The number of jiffies that this removed field.
+
+ long _cstime; // %ld
+
+ long _priority;
+ long _nice;
+
+ long _nlwp; // %ld
+ // The time in jiffies before the next SIGALRM is sent to the process due to an interval timer.
+
+ unsigned long _alarm;
+
+ unsigned long _start_time; // %lu
+ // The time in jiffies the process started after system boot.
+
+ unsigned long _vsize; // %lu
+ // Virtual memory size in bytes.
+
+ long _rss; // %ld
+ // Resident Set Size: number of pages the process has in real memory, minus 3 for administrative purposes. This is just the pages which
+ // count towards text, data, or stack space. This does not include pages which have not been demand-loaded in, or which are swapped out
+
+ unsigned long _rss_rlim; // %lu
+ // Current limit in bytes on the rss of the process (usually 4294967295 on i386).
+
+ unsigned long _start_code; // %lu
+ // The address above which program text can run.
+
+ unsigned long _end_code; // %lu
+ // The address below which program text can run.
+
+ unsigned long _start_stack; // %lu
+ // The address of the start of the stack.
+
+ unsigned long _kstk_esp; // %lu
+ // The current value of esp (stack pointer), as found in the kernel stack page for the process.
+
+ unsigned long _kstk_eip; // %lu
+ // The current EIP (instruction pointer).
+
+
+
+ };
+
+
+ ProcessInfo::ProcessInfo( pid_t pid ) : _pid( pid ) {
+ }
+
+ ProcessInfo::~ProcessInfo() {
+ }
+
+ bool ProcessInfo::supported() {
+ return true;
+ }
+
+ int ProcessInfo::getVirtualMemorySize() {
+ LinuxProc p(_pid);
+ return (int)( p.getVirtualMemorySize() / ( 1024.0 * 1024 ) );
+ }
+
+ int ProcessInfo::getResidentSize() {
+ LinuxProc p(_pid);
+ return (int)( p.getResidentSize() / ( 1024.0 * 1024 ) );
+ }
+
+ void ProcessInfo::getExtraInfo(BSONObjBuilder& info) {
+ // [dm] i don't think mallinfo works. (64 bit.) ??
+ struct mallinfo malloc_info = mallinfo(); // structure has same name as function that returns it. (see malloc.h)
+ info.append("heap_usage_bytes", malloc_info.uordblks/*main arena*/ + malloc_info.hblkhd/*mmap blocks*/);
+ //docs claim hblkhd is included in uordblks but it isn't
+
+ LinuxProc p(_pid);
+ info.append("page_faults", (int)p._maj_flt);
+ }
+
+ bool ProcessInfo::blockCheckSupported() {
+ return true;
+ }
+
+ bool ProcessInfo::blockInMemory( char * start ) {
+ static long pageSize = 0;
+ if ( pageSize == 0 ) {
+ pageSize = sysconf( _SC_PAGESIZE );
+ }
+ start = start - ( (unsigned long long)start % pageSize );
+ unsigned char x = 0;
+ if ( mincore( start , 128 , &x ) ) {
+ log() << "mincore failed: " << errnoWithDescription() << endl;
+ return 1;
+ }
+ return x & 0x1;
+ }
+
+
+}
diff --git a/src/mongo/util/processinfo_none.cpp b/src/mongo/util/processinfo_none.cpp
new file mode 100644
index 00000000000..7d1e84d377c
--- /dev/null
+++ b/src/mongo/util/processinfo_none.cpp
@@ -0,0 +1,55 @@
+// processinfo_none.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "processinfo.h"
+
+#include <iostream>
+using namespace std;
+
+namespace mongo {
+
+ ProcessInfo::ProcessInfo( pid_t pid ) {
+ }
+
+ ProcessInfo::~ProcessInfo() {
+ }
+
+ bool ProcessInfo::supported() {
+ return false;
+ }
+
+ int ProcessInfo::getVirtualMemorySize() {
+ return -1;
+ }
+
+ int ProcessInfo::getResidentSize() {
+ return -1;
+ }
+
+ void ProcessInfo::getExtraInfo(BSONObjBuilder& info) {}
+
+ bool ProcessInfo::blockCheckSupported() {
+ return false;
+ }
+
+ bool ProcessInfo::blockInMemory( char * start ) {
+ assert(0);
+ return true;
+ }
+
+}
diff --git a/src/mongo/util/processinfo_win32.cpp b/src/mongo/util/processinfo_win32.cpp
new file mode 100644
index 00000000000..87d92db7e18
--- /dev/null
+++ b/src/mongo/util/processinfo_win32.cpp
@@ -0,0 +1,102 @@
+// processinfo_win32.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "processinfo.h"
+#include <iostream>
+#include <psapi.h>
+#include "../bson/bsonobjbuilder.h"
+using namespace std;
+
+int getpid() {
+ return GetCurrentProcessId();
+}
+
+namespace mongo {
+
+ int _wconvertmtos( SIZE_T s ) {
+ return (int)( s / ( 1024 * 1024 ) );
+ }
+
+ ProcessInfo::ProcessInfo( pid_t pid ) {
+ }
+
+ ProcessInfo::~ProcessInfo() {
+ }
+
+ bool ProcessInfo::supported() {
+ return true;
+ }
+
+ int ProcessInfo::getVirtualMemorySize() {
+ MEMORYSTATUSEX mse;
+ mse.dwLength = sizeof(mse);
+ assert( GlobalMemoryStatusEx( &mse ) );
+ DWORDLONG x = (mse.ullTotalVirtual - mse.ullAvailVirtual) / (1024 * 1024) ;
+ assert( x <= 0x7fffffff );
+ return (int) x;
+ }
+
+ int ProcessInfo::getResidentSize() {
+ PROCESS_MEMORY_COUNTERS pmc;
+ assert( GetProcessMemoryInfo( GetCurrentProcess() , &pmc, sizeof(pmc) ) );
+ return _wconvertmtos( pmc.WorkingSetSize );
+ }
+
+ void ProcessInfo::getExtraInfo(BSONObjBuilder& info) {
+ MEMORYSTATUSEX mse;
+ mse.dwLength = sizeof(mse);
+ PROCESS_MEMORY_COUNTERS pmc;
+ if( GetProcessMemoryInfo( GetCurrentProcess() , &pmc, sizeof(pmc) ) ) {
+ info.append("page_faults", static_cast<int>(pmc.PageFaultCount));
+ info.append("usagePageFileMB", static_cast<int>(pmc.PagefileUsage / 1024 / 1024));
+ }
+ if( GlobalMemoryStatusEx( &mse ) ) {
+ info.append("totalPageFileMB", static_cast<int>(mse.ullTotalPageFile / 1024 / 1024));
+ info.append("availPageFileMB", static_cast<int>(mse.ullAvailPageFile / 1024 / 1024));
+ info.append("ramMB", static_cast<int>(mse.ullTotalPhys / 1024 / 1024));
+ }
+ }
+
+ bool ProcessInfo::blockCheckSupported() {
+ return true;
+ }
+
+ bool ProcessInfo::blockInMemory( char * start ) {
+#if 0
+ // code for printing out page fault addresses and pc's --
+ // this could be useful for targetting heavy pagefault locations in the code
+ static BOOL bstat = InitializeProcessForWsWatch( GetCurrentProcess() );
+ PSAPI_WS_WATCH_INFORMATION_EX wiex[30];
+ DWORD bufsize = sizeof(wiex);
+ bstat = GetWsChangesEx( GetCurrentProcess(), &wiex[0], &bufsize );
+ if (bstat) {
+ for (int i=0; i<30; i++) {
+ if (wiex[i].BasicInfo.FaultingPc == 0) break;
+ cout << "faulting pc = " << wiex[i].BasicInfo.FaultingPc << " address = " << wiex[i].BasicInfo.FaultingVa << " thread id = " << wiex[i].FaultingThreadId << endl;
+ }
+ }
+#endif
+ PSAPI_WORKING_SET_EX_INFORMATION wsinfo;
+ wsinfo.VirtualAddress = start;
+ BOOL result = QueryWorkingSetEx( GetCurrentProcess(), &wsinfo, sizeof(wsinfo) );
+ if ( result )
+ if ( wsinfo.VirtualAttributes.Valid )
+ return true;
+ return false;
+ }
+}
diff --git a/src/mongo/util/queue.h b/src/mongo/util/queue.h
new file mode 100644
index 00000000000..4223bd6c256
--- /dev/null
+++ b/src/mongo/util/queue.h
@@ -0,0 +1,106 @@
+// @file queue.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "../pch.h"
+
+#include <queue>
+
+#include "../util/timer.h"
+
+namespace mongo {
+
+ /**
+ * simple blocking queue
+ */
+ template<typename T> class BlockingQueue : boost::noncopyable {
+ public:
+ BlockingQueue() : _lock("BlockingQueue") { }
+
+ void push(T const& t) {
+ scoped_lock l( _lock );
+ _queue.push( t );
+ _condition.notify_one();
+ }
+
+ bool empty() const {
+ scoped_lock l( _lock );
+ return _queue.empty();
+ }
+
+ size_t size() const {
+ scoped_lock l( _lock );
+ return _queue.size();
+ }
+
+
+ bool tryPop( T & t ) {
+ scoped_lock l( _lock );
+ if ( _queue.empty() )
+ return false;
+
+ t = _queue.front();
+ _queue.pop();
+
+ return true;
+ }
+
+ T blockingPop() {
+
+ scoped_lock l( _lock );
+ while( _queue.empty() )
+ _condition.wait( l.boost() );
+
+ T t = _queue.front();
+ _queue.pop();
+ return t;
+ }
+
+
+ /**
+ * blocks waiting for an object until maxSecondsToWait passes
+ * if got one, return true and set in t
+ * otherwise return false and t won't be changed
+ */
+ bool blockingPop( T& t , int maxSecondsToWait ) {
+
+ Timer timer;
+
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ xt.sec += maxSecondsToWait;
+
+ scoped_lock l( _lock );
+ while( _queue.empty() ) {
+ if ( ! _condition.timed_wait( l.boost() , xt ) )
+ return false;
+ }
+
+ t = _queue.front();
+ _queue.pop();
+ return true;
+ }
+
+ private:
+ std::queue<T> _queue;
+
+ mutable mongo::mutex _lock;
+ boost::condition _condition;
+ };
+
+}
diff --git a/src/mongo/util/ramlog.cpp b/src/mongo/util/ramlog.cpp
new file mode 100644
index 00000000000..d7a839a3fff
--- /dev/null
+++ b/src/mongo/util/ramlog.cpp
@@ -0,0 +1,190 @@
+// ramlog.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "log.h"
+#include "ramlog.h"
+#include "mongoutils/html.h"
+#include "mongoutils/str.h"
+
+namespace mongo {
+
+ using namespace mongoutils;
+
+ RamLog::RamLog( string name ) : _name(name), _lastWrite(0) {
+ h = 0; n = 0;
+ for( int i = 0; i < N; i++ )
+ lines[i][C-1] = 0;
+
+ if ( name.size() ) {
+
+ if ( ! _namedLock )
+ _namedLock = new mongo::mutex("RamLog::_namedLock");
+
+ scoped_lock lk( *_namedLock );
+ if ( ! _named )
+ _named = new RM();
+ (*_named)[name] = this;
+ }
+
+ }
+
+ RamLog::~RamLog() {
+
+ }
+
+ void RamLog::write(LogLevel ll, const string& str) {
+ _lastWrite = time(0);
+
+ char *p = lines[(h+n)%N];
+
+ unsigned sz = str.size();
+ if( sz < C ) {
+ if ( str.c_str()[sz-1] == '\n' ) {
+ memcpy(p, str.c_str(), sz-1);
+ p[sz-1] = 0;
+ }
+ else
+ strcpy(p, str.c_str());
+ }
+ else {
+ memcpy(p, str.c_str(), C-1);
+ }
+
+ if( n < N ) n++;
+ else h = (h+1) % N;
+ }
+
+ void RamLog::get( vector<const char*>& v) const {
+ for( unsigned x=0, i=h; x++ < n; i=(i+1)%N )
+ v.push_back(lines[i]);
+ }
+
+ int RamLog::repeats(const vector<const char *>& v, int i) {
+ for( int j = i-1; j >= 0 && j+8 > i; j-- ) {
+ if( strcmp(v[i]+20,v[j]+20) == 0 ) {
+ for( int x = 1; ; x++ ) {
+ if( j+x == i ) return j;
+ if( i+x>=(int) v.size() ) return -1;
+ if( strcmp(v[i+x]+20,v[j+x]+20) ) return -1;
+ }
+ return -1;
+ }
+ }
+ return -1;
+ }
+
+
+ string RamLog::clean(const vector<const char *>& v, int i, string line ) {
+ if( line.empty() ) line = v[i];
+ if( i > 0 && strncmp(v[i], v[i-1], 11) == 0 )
+ return string(" ") + line.substr(11);
+ return v[i];
+ }
+
+ string RamLog::color(string line) {
+ string s = str::after(line, "replSet ");
+ if( str::startsWith(s, "warning") || startsWith(s, "error") )
+ return html::red(line);
+ if( str::startsWith(s, "info") ) {
+ if( str::endsWith(s, " up\n") )
+ return html::green(line);
+ else if( str::contains(s, " down ") || str::endsWith(s, " down\n") )
+ return html::yellow(line);
+ return line; //html::blue(line);
+ }
+
+ return line;
+ }
+
+ /* turn http:... into an anchor */
+ string RamLog::linkify(const char *s) {
+ const char *p = s;
+ const char *h = strstr(p, "http://");
+ if( h == 0 ) return s;
+
+ const char *sp = h + 7;
+ while( *sp && *sp != ' ' ) sp++;
+
+ string url(h, sp-h);
+ stringstream ss;
+ ss << string(s, h-s) << "<a href=\"" << url << "\">" << url << "</a>" << sp;
+ return ss.str();
+ }
+
+ void RamLog::toHTML(stringstream& s) {
+ vector<const char*> v;
+ get( v );
+
+ s << "<pre>\n";
+ for( int i = 0; i < (int)v.size(); i++ ) {
+ assert( strlen(v[i]) > 20 );
+ int r = repeats(v, i);
+ if( r < 0 ) {
+ s << color( linkify( clean(v,i).c_str() ) ) << '\n';
+ }
+ else {
+ stringstream x;
+ x << string(v[i], 0, 20);
+ int nr = (i-r);
+ int last = i+nr-1;
+ for( ; r < i ; r++ ) x << '.';
+ if( 1 ) {
+ stringstream r;
+ if( nr == 1 ) r << "repeat last line";
+ else r << "repeats last " << nr << " lines; ends " << string(v[last]+4,0,15);
+ s << html::a("", r.str(), clean(v,i,x.str()));
+ }
+ else s << x.str();
+ s << '\n';
+ i = last;
+ }
+ }
+ s << "</pre>\n";
+ }
+
+ // ---------------
+ // static things
+ // ---------------
+
+ RamLog* RamLog::get( string name ) {
+ if ( ! _named )
+ return 0;
+
+ scoped_lock lk( *_namedLock );
+ RM::iterator i = _named->find( name );
+ if ( i == _named->end() )
+ return 0;
+ return i->second;
+ }
+
+ void RamLog::getNames( vector<string>& names ) {
+ if ( ! _named )
+ return;
+
+ scoped_lock lk( *_namedLock );
+ for ( RM::iterator i=_named->begin(); i!=_named->end(); ++i ) {
+ if ( i->second->n )
+ names.push_back( i->first );
+ }
+ }
+
+ mongo::mutex* RamLog::_namedLock;
+ RamLog::RM* RamLog::_named = 0;
+
+ Tee* const warnings = new RamLog("warnings"); // Things put here go in serverStatus
+}
diff --git a/src/mongo/util/ramlog.h b/src/mongo/util/ramlog.h
new file mode 100644
index 00000000000..d3d5c8fbb4e
--- /dev/null
+++ b/src/mongo/util/ramlog.h
@@ -0,0 +1,65 @@
+// ramlog.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "log.h"
+
+namespace mongo {
+
+ class RamLog : public Tee {
+ public:
+ RamLog( string name );
+
+ virtual void write(LogLevel ll, const string& str);
+
+ void get( vector<const char*>& v) const;
+
+ void toHTML(stringstream& s);
+
+ static RamLog* get( string name );
+ static void getNames( vector<string>& names );
+
+ time_t lastWrite() { return _lastWrite; } // 0 if no writes
+
+ protected:
+ static int repeats(const vector<const char *>& v, int i);
+ static string clean(const vector<const char *>& v, int i, string line="");
+ static string color(string line);
+
+ /* turn http:... into an anchor */
+ static string linkify(const char *s);
+
+ private:
+ ~RamLog(); // want this private as we want to leak so we can use them till the very end
+
+ enum {
+ N = 128, // number of links
+ C = 256 // max size of line
+ };
+ char lines[N][C];
+ unsigned h; // current position
+ unsigned n; // numer of lines stores 0 o N
+ string _name;
+
+ typedef map<string,RamLog*> RM;
+ static mongo::mutex* _namedLock;
+ static RM* _named;
+ time_t _lastWrite;
+ };
+
+}
diff --git a/src/mongo/util/scopeguard.h b/src/mongo/util/scopeguard.h
new file mode 100644
index 00000000000..b87a4b51871
--- /dev/null
+++ b/src/mongo/util/scopeguard.h
@@ -0,0 +1,427 @@
+////////////////////////////////////////////////////////////////////////////////
+// The Loki Library
+// Copyright (c) 2000 Andrei Alexandrescu
+// Copyright (c) 2000 Petru Marginean
+// Copyright (c) 2005 Joshua Lehrer
+//
+// Permission to use, copy, modify, distribute and sell this software for any
+// purpose is hereby granted without fee, provided that the above copyright
+// notice appear in all copies and that both that copyright notice and this
+// permission notice appear in supporting documentation.
+// The author makes no representations about the
+// suitability of this software for any purpose. It is provided "as is"
+// without express or implied warranty.
+////////////////////////////////////////////////////////////////////////////////
+#ifndef LOKI_SCOPEGUARD_H_
+#define LOKI_SCOPEGUARD_H_
+
+namespace mongo
+{
+
+ ////////////////////////////////////////////////////////////////////////////////
+ /// \class RefToValue
+ ///
+ /// Transports a reference as a value
+ /// Serves to implement the Colvin/Gibbons trick for SmartPtr/ScopeGuard
+ ////////////////////////////////////////////////////////////////////////////////
+
+ template <class T>
+ class RefToValue
+ {
+ public:
+
+ RefToValue(T& ref) : ref_(ref)
+ {}
+
+ RefToValue(const RefToValue& rhs) : ref_(rhs.ref_)
+ {}
+
+ operator T& () const
+ {
+ return ref_;
+ }
+
+ private:
+ // Disable - not implemented
+ RefToValue();
+ RefToValue& operator=(const RefToValue&);
+
+ T& ref_;
+ };
+
+
+ ////////////////////////////////////////////////////////////////////////////////
+ /// RefToValue creator.
+ ////////////////////////////////////////////////////////////////////////////////
+
+ template <class T>
+ inline RefToValue<T> ByRef(T& t)
+ {
+ return RefToValue<T>(t);
+ }
+
+
+
+
+ ////////////////////////////////////////////
+ /// ScopeGuard
+ /*
+ Trivial example for use:
+
+ FILE* f = fopen("myfile.txt", "w+");
+ if (!f)
+ return error;
+ ON_BLOCK_EXIT(fclose, f);
+
+
+ More complicated example:
+
+ ScopeGuard guard = MakeGuard(my_rollback_func, myparam);
+ ...
+ if (successful) {
+ guard.Dismiss();
+ return;
+ }
+ // guard is still active here and will fire at scope exit
+ ...
+
+
+ */
+
+
+ class ScopeGuardImplBase
+ {
+ ScopeGuardImplBase& operator =(const ScopeGuardImplBase&);
+
+ protected:
+
+ ~ScopeGuardImplBase()
+ {}
+
+ ScopeGuardImplBase(const ScopeGuardImplBase& other) throw()
+ : dismissed_(other.dismissed_)
+ {
+ other.Dismiss();
+ }
+
+ template <typename J>
+ static void SafeExecute(J& j) throw()
+ {
+ if (!j.dismissed_)
+ try
+ {
+ j.Execute();
+ }
+ catch(...)
+ {}
+ }
+
+ mutable bool dismissed_;
+
+ public:
+ ScopeGuardImplBase() throw() : dismissed_(false)
+ {}
+
+ void Dismiss() const throw()
+ {
+ dismissed_ = true;
+ }
+ };
+
+ ////////////////////////////////////////////////////////////////
+ ///
+ /// \typedef typedef const ScopeGuardImplBase& ScopeGuard
+ ///
+ /// See Andrei's and Petru Marginean's CUJ article
+ /// http://www.cuj.com/documents/s=8000/cujcexp1812alexandr/alexandr.htm
+ ///
+ /// Changes to the original code by Joshua Lehrer:
+ /// http://www.lehrerfamily.com/scopeguard.html
+ ////////////////////////////////////////////////////////////////
+
+ typedef const ScopeGuardImplBase& ScopeGuard;
+
+ template <typename F>
+ class ScopeGuardImpl0 : public ScopeGuardImplBase
+ {
+ public:
+ static ScopeGuardImpl0<F> MakeGuard(F fun)
+ {
+ return ScopeGuardImpl0<F>(fun);
+ }
+
+ ~ScopeGuardImpl0() throw()
+ {
+ SafeExecute(*this);
+ }
+
+ void Execute()
+ {
+ fun_();
+ }
+
+ protected:
+ ScopeGuardImpl0(F fun) : fun_(fun)
+ {}
+
+ F fun_;
+ };
+
+ template <typename F>
+ inline ScopeGuardImpl0<F> MakeGuard(F fun)
+ {
+ return ScopeGuardImpl0<F>::MakeGuard(fun);
+ }
+
+ template <typename F, typename P1>
+ class ScopeGuardImpl1 : public ScopeGuardImplBase
+ {
+ public:
+ static ScopeGuardImpl1<F, P1> MakeGuard(F fun, P1 p1)
+ {
+ return ScopeGuardImpl1<F, P1>(fun, p1);
+ }
+
+ ~ScopeGuardImpl1() throw()
+ {
+ SafeExecute(*this);
+ }
+
+ void Execute()
+ {
+ fun_(p1_);
+ }
+
+ protected:
+ ScopeGuardImpl1(F fun, P1 p1) : fun_(fun), p1_(p1)
+ {}
+
+ F fun_;
+ const P1 p1_;
+ };
+
+ template <typename F, typename P1>
+ inline ScopeGuardImpl1<F, P1> MakeGuard(F fun, P1 p1)
+ {
+ return ScopeGuardImpl1<F, P1>::MakeGuard(fun, p1);
+ }
+
+ template <typename F, typename P1, typename P2>
+ class ScopeGuardImpl2: public ScopeGuardImplBase
+ {
+ public:
+ static ScopeGuardImpl2<F, P1, P2> MakeGuard(F fun, P1 p1, P2 p2)
+ {
+ return ScopeGuardImpl2<F, P1, P2>(fun, p1, p2);
+ }
+
+ ~ScopeGuardImpl2() throw()
+ {
+ SafeExecute(*this);
+ }
+
+ void Execute()
+ {
+ fun_(p1_, p2_);
+ }
+
+ protected:
+ ScopeGuardImpl2(F fun, P1 p1, P2 p2) : fun_(fun), p1_(p1), p2_(p2)
+ {}
+
+ F fun_;
+ const P1 p1_;
+ const P2 p2_;
+ };
+
+ template <typename F, typename P1, typename P2>
+ inline ScopeGuardImpl2<F, P1, P2> MakeGuard(F fun, P1 p1, P2 p2)
+ {
+ return ScopeGuardImpl2<F, P1, P2>::MakeGuard(fun, p1, p2);
+ }
+
+ template <typename F, typename P1, typename P2, typename P3>
+ class ScopeGuardImpl3 : public ScopeGuardImplBase
+ {
+ public:
+ static ScopeGuardImpl3<F, P1, P2, P3> MakeGuard(F fun, P1 p1, P2 p2, P3 p3)
+ {
+ return ScopeGuardImpl3<F, P1, P2, P3>(fun, p1, p2, p3);
+ }
+
+ ~ScopeGuardImpl3() throw()
+ {
+ SafeExecute(*this);
+ }
+
+ void Execute()
+ {
+ fun_(p1_, p2_, p3_);
+ }
+
+ protected:
+ ScopeGuardImpl3(F fun, P1 p1, P2 p2, P3 p3) : fun_(fun), p1_(p1), p2_(p2), p3_(p3)
+ {}
+
+ F fun_;
+ const P1 p1_;
+ const P2 p2_;
+ const P3 p3_;
+ };
+
+ template <typename F, typename P1, typename P2, typename P3>
+ inline ScopeGuardImpl3<F, P1, P2, P3> MakeGuard(F fun, P1 p1, P2 p2, P3 p3)
+ {
+ return ScopeGuardImpl3<F, P1, P2, P3>::MakeGuard(fun, p1, p2, p3);
+ }
+
+ //************************************************************
+
+ template <class Obj, typename MemFun>
+ class ObjScopeGuardImpl0 : public ScopeGuardImplBase
+ {
+ public:
+ static ObjScopeGuardImpl0<Obj, MemFun> MakeObjGuard(Obj& obj, MemFun memFun)
+ {
+ return ObjScopeGuardImpl0<Obj, MemFun>(obj, memFun);
+ }
+
+ ~ObjScopeGuardImpl0() throw()
+ {
+ SafeExecute(*this);
+ }
+
+ void Execute()
+ {
+ (obj_.*memFun_)();
+ }
+
+ protected:
+ ObjScopeGuardImpl0(Obj& obj, MemFun memFun) : obj_(obj), memFun_(memFun)
+ {}
+
+ Obj& obj_;
+ MemFun memFun_;
+ };
+
+ template <class Obj, typename MemFun>
+ inline ObjScopeGuardImpl0<Obj, MemFun> MakeObjGuard(Obj& obj, MemFun memFun)
+ {
+ return ObjScopeGuardImpl0<Obj, MemFun>::MakeObjGuard(obj, memFun);
+ }
+
+ template <typename Ret, class Obj1, class Obj2>
+ inline ObjScopeGuardImpl0<Obj1,Ret(Obj2::*)()> MakeGuard(Ret(Obj2::*memFun)(), Obj1 &obj)
+ {
+ return ObjScopeGuardImpl0<Obj1,Ret(Obj2::*)()>::MakeObjGuard(obj,memFun);
+ }
+
+ template <typename Ret, class Obj1, class Obj2>
+ inline ObjScopeGuardImpl0<Obj1,Ret(Obj2::*)()> MakeGuard(Ret(Obj2::*memFun)(), Obj1 *obj)
+ {
+ return ObjScopeGuardImpl0<Obj1,Ret(Obj2::*)()>::MakeObjGuard(*obj,memFun);
+ }
+
+ template <class Obj, typename MemFun, typename P1>
+ class ObjScopeGuardImpl1 : public ScopeGuardImplBase
+ {
+ public:
+ static ObjScopeGuardImpl1<Obj, MemFun, P1> MakeObjGuard(Obj& obj, MemFun memFun, P1 p1)
+ {
+ return ObjScopeGuardImpl1<Obj, MemFun, P1>(obj, memFun, p1);
+ }
+
+ ~ObjScopeGuardImpl1() throw()
+ {
+ SafeExecute(*this);
+ }
+
+ void Execute()
+ {
+ (obj_.*memFun_)(p1_);
+ }
+
+ protected:
+ ObjScopeGuardImpl1(Obj& obj, MemFun memFun, P1 p1) : obj_(obj), memFun_(memFun), p1_(p1)
+ {}
+
+ Obj& obj_;
+ MemFun memFun_;
+ const P1 p1_;
+ };
+
+ template <class Obj, typename MemFun, typename P1>
+ inline ObjScopeGuardImpl1<Obj, MemFun, P1> MakeObjGuard(Obj& obj, MemFun memFun, P1 p1)
+ {
+ return ObjScopeGuardImpl1<Obj, MemFun, P1>::MakeObjGuard(obj, memFun, p1);
+ }
+
+ template <typename Ret, class Obj1, class Obj2, typename P1a, typename P1b>
+ inline ObjScopeGuardImpl1<Obj1,Ret(Obj2::*)(P1a),P1b> MakeGuard(Ret(Obj2::*memFun)(P1a), Obj1 &obj, P1b p1)
+ {
+ return ObjScopeGuardImpl1<Obj1,Ret(Obj2::*)(P1a),P1b>::MakeObjGuard(obj,memFun,p1);
+ }
+
+ template <typename Ret, class Obj1, class Obj2, typename P1a, typename P1b>
+ inline ObjScopeGuardImpl1<Obj1,Ret(Obj2::*)(P1a),P1b> MakeGuard(Ret(Obj2::*memFun)(P1a), Obj1 *obj, P1b p1)
+ {
+ return ObjScopeGuardImpl1<Obj1,Ret(Obj2::*)(P1a),P1b>::MakeObjGuard(*obj,memFun,p1);
+ }
+
+ template <class Obj, typename MemFun, typename P1, typename P2>
+ class ObjScopeGuardImpl2 : public ScopeGuardImplBase
+ {
+ public:
+ static ObjScopeGuardImpl2<Obj, MemFun, P1, P2> MakeObjGuard(Obj& obj, MemFun memFun, P1 p1, P2 p2)
+ {
+ return ObjScopeGuardImpl2<Obj, MemFun, P1, P2>(obj, memFun, p1, p2);
+ }
+
+ ~ObjScopeGuardImpl2() throw()
+ {
+ SafeExecute(*this);
+ }
+
+ void Execute()
+ {
+ (obj_.*memFun_)(p1_, p2_);
+ }
+
+ protected:
+ ObjScopeGuardImpl2(Obj& obj, MemFun memFun, P1 p1, P2 p2) : obj_(obj), memFun_(memFun), p1_(p1), p2_(p2)
+ {}
+
+ Obj& obj_;
+ MemFun memFun_;
+ const P1 p1_;
+ const P2 p2_;
+ };
+
+ template <class Obj, typename MemFun, typename P1, typename P2>
+ inline ObjScopeGuardImpl2<Obj, MemFun, P1, P2> MakeObjGuard(Obj& obj, MemFun memFun, P1 p1, P2 p2)
+ {
+ return ObjScopeGuardImpl2<Obj, MemFun, P1, P2>::MakeObjGuard(obj, memFun, p1, p2);
+ }
+
+ template <typename Ret, class Obj1, class Obj2, typename P1a, typename P1b, typename P2a, typename P2b>
+ inline ObjScopeGuardImpl2<Obj1,Ret(Obj2::*)(P1a,P2a),P1b,P2b> MakeGuard(Ret(Obj2::*memFun)(P1a,P2a), Obj1 &obj, P1b p1, P2b p2)
+ {
+ return ObjScopeGuardImpl2<Obj1,Ret(Obj2::*)(P1a,P2a),P1b,P2b>::MakeObjGuard(obj,memFun,p1,p2);
+ }
+
+ template <typename Ret, class Obj1, class Obj2, typename P1a, typename P1b, typename P2a, typename P2b>
+ inline ObjScopeGuardImpl2<Obj1,Ret(Obj2::*)(P1a,P2a),P1b,P2b> MakeGuard(Ret(Obj2::*memFun)(P1a,P2a), Obj1 *obj, P1b p1, P2b p2)
+ {
+ return ObjScopeGuardImpl2<Obj1,Ret(Obj2::*)(P1a,P2a),P1b,P2b>::MakeObjGuard(*obj,memFun,p1,p2);
+ }
+
+} // namespace Loki
+
+#define LOKI_CONCATENATE_DIRECT(s1, s2) s1##s2
+#define LOKI_CONCATENATE(s1, s2) LOKI_CONCATENATE_DIRECT(s1, s2)
+#define LOKI_ANONYMOUS_VARIABLE(str) LOKI_CONCATENATE(str, __LINE__)
+
+#define ON_BLOCK_EXIT ScopeGuard LOKI_ANONYMOUS_VARIABLE(scopeGuard) = MakeGuard
+#define ON_BLOCK_EXIT_OBJ ScopeGuard LOKI_ANONYMOUS_VARIABLE(scopeGuard) = MakeObjGuard
+
+#endif //LOKI_SCOPEGUARD_H_
diff --git a/src/mongo/util/signal_handlers.cpp b/src/mongo/util/signal_handlers.cpp
new file mode 100644
index 00000000000..0e9ec7a9b15
--- /dev/null
+++ b/src/mongo/util/signal_handlers.cpp
@@ -0,0 +1,122 @@
+// signal_handlers.cpp
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+#include <cstdarg>
+#include <cstdio>
+#include <cstdlib>
+
+#if !defined(_WIN32) // TODO: windows support
+#include <unistd.h>
+#endif
+
+#if !defined(_WIN32) && !defined(NOEXECINFO)
+#include <execinfo.h>
+#endif
+
+#include "log.h"
+#include "signal_handlers.h"
+
+namespace mongo {
+
+ /*
+ * WARNING: PLEASE READ BEFORE CHANGING THIS MODULE
+ *
+ * All code in this module should be singal-friendly. Before adding any system
+ * call or other dependency, please make sure the latter still holds.
+ *
+ */
+
+ static int rawWrite( int fd , char* c , int size ) {
+#if !defined(_WIN32)
+
+ int toWrite = size;
+ int writePos = 0;
+ int wrote;
+ while ( toWrite > 0 ) {
+ wrote = write( fd , &c[writePos] , toWrite );
+ if ( wrote < 1 ) break;
+ toWrite -= wrote;
+ writePos += wrote;
+ }
+ return writePos;
+
+#else
+
+ return -1;
+
+#endif
+ }
+
+ static int formattedWrite( int fd , const char* format, ... ) {
+ const int MAX_ENTRY = 256;
+ static char entryBuf[MAX_ENTRY];
+
+ va_list ap;
+ va_start( ap , format );
+ int entrySize = vsnprintf( entryBuf , MAX_ENTRY-1 , format , ap );
+ if ( entrySize < 0 ) {
+ return -1;
+ }
+
+ if ( rawWrite( fd , entryBuf , entrySize ) < 0 ) {
+ return -1;
+ }
+
+ return 0;
+ }
+
+ static void formattedBacktrace( int fd ) {
+
+#if !defined(_WIN32) && !defined(NOEXECINFO)
+
+ int numFrames;
+ const int MAX_DEPTH = 20;
+ void* stackFrames[MAX_DEPTH];
+
+ numFrames = backtrace( stackFrames , 20 );
+ for ( int i = 0; i < numFrames; i++ ) {
+ formattedWrite( fd , "%p " , stackFrames[i] );
+ }
+ formattedWrite( fd , "\n" );
+
+ backtrace_symbols_fd( stackFrames , numFrames , fd );
+
+#else
+
+ formattedWrite( fd, "backtracing not implemented for this platform yet\n" );
+
+#endif
+
+ }
+
+ void printStackAndExit( int signalNum ) {
+ int fd = Logstream::getLogDesc();
+
+ if ( fd >= 0 ) {
+ formattedWrite( fd , "Received signal %d\n" , signalNum );
+ formattedWrite( fd , "Backtrace: " );
+ formattedBacktrace( fd );
+ formattedWrite( fd , "===\n" );
+ }
+
+ ::exit( EXIT_ABRUPT );
+ }
+
+} // namespace mongo
diff --git a/src/mongo/util/signal_handlers.h b/src/mongo/util/signal_handlers.h
new file mode 100644
index 00000000000..9d3a735a723
--- /dev/null
+++ b/src/mongo/util/signal_handlers.h
@@ -0,0 +1,34 @@
+// signal_handlers.h
+
+/**
+* Copyright (C) 2010 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#pragma once
+
+#include "../pch.h"
+
+namespace mongo {
+
+ /**
+ * Obtains the log file handler and writes the current thread's stack trace to
+ * it. This call issues an exit(). The function can safely be called from within a
+ * signal handler.
+ *
+ * @param signal that this hadler is called for
+ */
+ void printStackAndExit( int signalNum );
+
+} // namespace mongo
diff --git a/src/mongo/util/string_writer.h b/src/mongo/util/string_writer.h
new file mode 100755
index 00000000000..e83881bf6f6
--- /dev/null
+++ b/src/mongo/util/string_writer.h
@@ -0,0 +1,28 @@
+/**
+ * Copyright 2011 (c) 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "pch.h"
+
+namespace mongo {
+
+ class StringWriter {
+ public:
+ virtual ~StringWriter() {};
+ virtual void writeString(stringstream &ss) const = 0;
+ };
+}
diff --git a/src/mongo/util/stringutils.cpp b/src/mongo/util/stringutils.cpp
new file mode 100644
index 00000000000..229f57bb3cb
--- /dev/null
+++ b/src/mongo/util/stringutils.cpp
@@ -0,0 +1,44 @@
+// stringutils.cpp
+
+/**
+* Copyright (C) 2008 10gen Inc.
+*
+* This program is free software: you can redistribute it and/or modify
+* it under the terms of the GNU Affero General Public License, version 3,
+* as published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU Affero General Public License for more details.
+*
+* You should have received a copy of the GNU Affero General Public License
+* along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include "pch.h"
+
+namespace mongo {
+
+ void splitStringDelim( const string& str , vector<string>* res , char delim ) {
+ if ( str.empty() )
+ return;
+
+ size_t beg = 0;
+ size_t pos = str.find( delim );
+ while ( pos != string::npos ) {
+ res->push_back( str.substr( beg, pos - beg) );
+ beg = ++pos;
+ pos = str.find( delim, beg );
+ }
+ res->push_back( str.substr( beg ) );
+ }
+
+ void joinStringDelim( const vector<string>& strs , string* res , char delim ) {
+ for ( vector<string>::const_iterator it = strs.begin(); it != strs.end(); ++it ) {
+ if ( it !=strs.begin() ) res->push_back( delim );
+ res->append( *it );
+ }
+ }
+
+} // namespace mongo
diff --git a/src/mongo/util/stringutils.h b/src/mongo/util/stringutils.h
new file mode 100644
index 00000000000..93598aa520b
--- /dev/null
+++ b/src/mongo/util/stringutils.h
@@ -0,0 +1,139 @@
+// stringutils.h
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongo {
+
+ // see also mongoutils/str.h - perhaps move these there?
+ // see also text.h
+
+ void splitStringDelim( const string& str , vector<string>* res , char delim );
+
+ void joinStringDelim( const vector<string>& strs , string* res , char delim );
+
+ inline string tolowerString( const string& input ) {
+ string::size_type sz = input.size();
+
+ boost::scoped_array<char> line(new char[sz+1]);
+ char * copy = line.get();
+
+ for ( string::size_type i=0; i<sz; i++ ) {
+ char c = input[i];
+ copy[i] = (char)tolower( (int)c );
+ }
+ copy[sz] = 0;
+ return string(copy);
+ }
+
+ /**
+ * Non numeric characters are compared lexicographically; numeric substrings
+ * are compared numerically; dots separate ordered comparable subunits.
+ * For convenience, character 255 is greater than anything else.
+ */
+ inline int lexNumCmp( const char *s1, const char *s2 ) {
+ //cout << "START : " << s1 << "\t" << s2 << endl;
+
+ bool startWord = true;
+
+ while( *s1 && *s2 ) {
+
+ bool d1 = ( *s1 == '.' );
+ bool d2 = ( *s2 == '.' );
+ if ( d1 && !d2 )
+ return -1;
+ if ( d2 && !d1 )
+ return 1;
+ if ( d1 && d2 ) {
+ ++s1; ++s2;
+ startWord = true;
+ continue;
+ }
+
+ bool p1 = ( *s1 == (char)255 );
+ bool p2 = ( *s2 == (char)255 );
+ //cout << "\t\t " << p1 << "\t" << p2 << endl;
+ if ( p1 && !p2 )
+ return 1;
+ if ( p2 && !p1 )
+ return -1;
+
+ bool n1 = isNumber( *s1 );
+ bool n2 = isNumber( *s2 );
+
+ if ( n1 && n2 ) {
+ // get rid of leading 0s
+ if ( startWord ) {
+ while ( *s1 == '0' ) s1++;
+ while ( *s2 == '0' ) s2++;
+ }
+
+ char * e1 = (char*)s1;
+ char * e2 = (char*)s2;
+
+ // find length
+ // if end of string, will break immediately ('\0')
+ while ( isNumber (*e1) ) e1++;
+ while ( isNumber (*e2) ) e2++;
+
+ int len1 = (int)(e1-s1);
+ int len2 = (int)(e2-s2);
+
+ int result;
+ // if one is longer than the other, return
+ if ( len1 > len2 ) {
+ return 1;
+ }
+ else if ( len2 > len1 ) {
+ return -1;
+ }
+ // if the lengths are equal, just strcmp
+ else if ( (result = strncmp(s1, s2, len1)) != 0 ) {
+ return result;
+ }
+
+ // otherwise, the numbers are equal
+ s1 = e1;
+ s2 = e2;
+ startWord = false;
+ continue;
+ }
+
+ if ( n1 )
+ return 1;
+
+ if ( n2 )
+ return -1;
+
+ if ( *s1 > *s2 )
+ return 1;
+
+ if ( *s2 > *s1 )
+ return -1;
+
+ s1++; s2++;
+ startWord = false;
+ }
+
+ if ( *s1 )
+ return 1;
+ if ( *s2 )
+ return -1;
+ return 0;
+ }
+
+} // namespace mongo
diff --git a/src/mongo/util/systeminfo.h b/src/mongo/util/systeminfo.h
new file mode 100755
index 00000000000..be4404ff785
--- /dev/null
+++ b/src/mongo/util/systeminfo.h
@@ -0,0 +1,41 @@
+/**
+ * Copyright 2011 (c) 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <cstddef>
+
+namespace mongo {
+
+ class SystemInfo {
+ public:
+ /*
+ Get the amount of physical memory available on the host.
+
+ This should only be used for "advisory" purposes, and not as a hard
+ value, because this could be deceptive on virtual hosts, and because
+ this will return zero on platforms that do not support it.
+
+ @returns amount of physical memory, or zero
+ */
+ static size_t getPhysicalRam();
+
+ private:
+ // don't instantiate this class
+ SystemInfo(); // no implementation
+ };
+
+}
diff --git a/src/mongo/util/systeminfo_linux2.cpp b/src/mongo/util/systeminfo_linux2.cpp
new file mode 100755
index 00000000000..c1b7c861768
--- /dev/null
+++ b/src/mongo/util/systeminfo_linux2.cpp
@@ -0,0 +1,47 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "util/systeminfo.h"
+
+#include <unistd.h>
+
+namespace mongo {
+
+ size_t SystemInfo::getPhysicalRam() {
+ /*
+ The value of this should not be changing while the system is running,
+ so it should be safe to do this once for the lifetime of the
+ application.
+
+ This could present a race condition if multiple threads do this at
+ the same time, but all paths through here will produce the same
+ result, so it's not worth locking or worrying about it.
+ */
+ static bool unknown = true;
+ static size_t ramSize = 0;
+
+ if (unknown) {
+ long pages = sysconf(_SC_PHYS_PAGES);
+ long page_size = sysconf(_SC_PAGE_SIZE);
+ ramSize = pages * page_size;
+ unknown = false;
+ }
+
+ return ramSize;
+ }
+
+}
diff --git a/src/mongo/util/systeminfo_none.cpp b/src/mongo/util/systeminfo_none.cpp
new file mode 100755
index 00000000000..d22ce17f6b9
--- /dev/null
+++ b/src/mongo/util/systeminfo_none.cpp
@@ -0,0 +1,26 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "util/systeminfo.h"
+
+namespace mongo {
+
+ size_t SystemInfo::getPhysicalRam() {
+ return 0;
+ }
+
+}
diff --git a/src/mongo/util/systeminfo_win32.cpp b/src/mongo/util/systeminfo_win32.cpp
new file mode 100755
index 00000000000..19c182878ee
--- /dev/null
+++ b/src/mongo/util/systeminfo_win32.cpp
@@ -0,0 +1,48 @@
+/**
+ * Copyright (c) 2011 10gen Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "pch.h"
+#include "util/systeminfo.h"
+
+#include <windows.h>
+
+namespace mongo {
+
+ size_t SystemInfo::getPhysicalRam() {
+ /*
+ The value of this should not be changing while the system is running,
+ so it should be safe to do this once for the lifetime of the
+ application.
+
+ This could present a race condition if multiple threads do this at
+ the same time, but all paths through here will produce the same
+ result, so it's not worth locking or worrying about it.
+ */
+ static bool unknown = true;
+ static size_t ramSize = 0;
+
+ if (unknown) {
+ MEMORYSTATUSEX status;
+ status.dwLength = sizeof(status);
+ GlobalMemoryStatusEx(&status);
+ ramSize = static_cast<size_t>(status.ullTotalPhys);
+ unknown = false;
+ }
+
+ return ramSize;
+ }
+
+}
diff --git a/src/mongo/util/text.cpp b/src/mongo/util/text.cpp
new file mode 100644
index 00000000000..51a2556afdc
--- /dev/null
+++ b/src/mongo/util/text.cpp
@@ -0,0 +1,115 @@
+// text.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "text.h"
+#include "unittest.h"
+
+namespace mongo {
+
+ inline int leadingOnes(unsigned char c) {
+ if (c < 0x80) return 0;
+ static const char _leadingOnes[128] = {
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x80 - 0x8F
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0x90 - 0x99
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0xA0 - 0xA9
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 0xB0 - 0xB9
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 0xC0 - 0xC9
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, // 0xD0 - 0xD9
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, // 0xE0 - 0xE9
+ 4, 4, 4, 4, 4, 4, 4, 4, // 0xF0 - 0xF7
+ 5, 5, 5, 5, // 0xF8 - 0xFB
+ 6, 6, // 0xFC - 0xFD
+ 7, // 0xFE
+ 8, // 0xFF
+ };
+ return _leadingOnes[c & 0x7f];
+
+ }
+
+ bool isValidUTF8(const char *s) {
+ int left = 0; // how many bytes are left in the current codepoint
+ while (*s) {
+ const unsigned char c = (unsigned char) *(s++);
+ const int ones = leadingOnes(c);
+ if (left) {
+ if (ones != 1) return false; // should be a continuation byte
+ left--;
+ }
+ else {
+ if (ones == 0) continue; // ASCII byte
+ if (ones == 1) return false; // unexpected continuation byte
+ if (c > 0xF4) return false; // codepoint too large (< 0x10FFFF)
+ if (c == 0xC0 || c == 0xC1) return false; // codepoints <= 0x7F shouldn't be 2 bytes
+
+ // still valid
+ left = ones-1;
+ }
+ }
+ if (left!=0) return false; // string ended mid-codepoint
+ return true;
+ }
+
+#if defined(_WIN32)
+
+ std::string toUtf8String(const std::wstring& wide) {
+ if (wide.size() > boost::integer_traits<int>::const_max)
+ throw std::length_error(
+ "Wide string cannot be more than INT_MAX characters long.");
+ if (wide.size() == 0)
+ return "";
+
+ // Calculate necessary buffer size
+ int len = ::WideCharToMultiByte(
+ CP_UTF8, 0, wide.c_str(), static_cast<int>(wide.size()),
+ NULL, 0, NULL, NULL);
+
+ // Perform actual conversion
+ if (len > 0) {
+ std::vector<char> buffer(len);
+ len = ::WideCharToMultiByte(
+ CP_UTF8, 0, wide.c_str(), static_cast<int>(wide.size()),
+ &buffer[0], static_cast<int>(buffer.size()), NULL, NULL);
+ if (len > 0) {
+ assert(len == static_cast<int>(buffer.size()));
+ return std::string(&buffer[0], buffer.size());
+ }
+ }
+
+ throw boost::system::system_error(
+ ::GetLastError(), boost::system::system_category);
+ }
+
+#if defined(_UNICODE)
+ std::wstring toWideString(const char *s) {
+ std::basic_ostringstream<TCHAR> buf;
+ buf << s;
+ return buf.str();
+ }
+#endif
+
+#endif
+
+ struct TextUnitTest : public UnitTest {
+ void run() {
+ assert( parseLL("123") == 123 );
+ assert( parseLL("-123000000000") == -123000000000LL );
+ }
+ } textUnitTest;
+
+}
+
diff --git a/src/mongo/util/text.h b/src/mongo/util/text.h
new file mode 100644
index 00000000000..bf25c86fd39
--- /dev/null
+++ b/src/mongo/util/text.h
@@ -0,0 +1,148 @@
+// text.h
+/*
+ * Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongo {
+
+ class StringSplitter {
+ public:
+ /** @param big the string to be split
+ @param splitter the delimiter
+ */
+ StringSplitter( const char * big , const char * splitter )
+ : _big( big ) , _splitter( splitter ) {
+ }
+
+ /** @return true if more to be taken via next() */
+ bool more() {
+ return _big[0] != 0;
+ }
+
+ /** get next split string fragment */
+ string next() {
+ const char * foo = strstr( _big , _splitter );
+ if ( foo ) {
+ string s( _big , foo - _big );
+ _big = foo + 1;
+ while ( *_big && strstr( _big , _splitter ) == _big )
+ _big++;
+ return s;
+ }
+
+ string s = _big;
+ _big += strlen( _big );
+ return s;
+ }
+
+ void split( vector<string>& l ) {
+ while ( more() ) {
+ l.push_back( next() );
+ }
+ }
+
+ vector<string> split() {
+ vector<string> l;
+ split( l );
+ return l;
+ }
+
+ static vector<string> split( const string& big , const string& splitter ) {
+ StringSplitter ss( big.c_str() , splitter.c_str() );
+ return ss.split();
+ }
+
+ static string join( vector<string>& l , const string& split ) {
+ stringstream ss;
+ for ( unsigned i=0; i<l.size(); i++ ) {
+ if ( i > 0 )
+ ss << split;
+ ss << l[i];
+ }
+ return ss.str();
+ }
+
+ private:
+ const char * _big;
+ const char * _splitter;
+ };
+
+ /* This doesn't defend against ALL bad UTF8, but it will guarantee that the
+ * string can be converted to sequence of codepoints. However, it doesn't
+ * guarantee that the codepoints are valid.
+ */
+ bool isValidUTF8(const char *s);
+ inline bool isValidUTF8(string s) { return isValidUTF8(s.c_str()); }
+
+#if defined(_WIN32)
+
+ std::string toUtf8String(const std::wstring& wide);
+
+ std::wstring toWideString(const char *s);
+
+ /* like toWideString but UNICODE macro sensitive */
+# if !defined(_UNICODE)
+#error temp error
+ inline std::string toNativeString(const char *s) { return s; }
+# else
+ inline std::wstring toNativeString(const char *s) { return toWideString(s); }
+# endif
+
+#endif
+
+ // expect that n contains a base ten number and nothing else after it
+ // NOTE win version hasn't been tested directly
+ inline long long parseLL( const char *n ) {
+ long long ret;
+ uassert( 13307, "cannot convert empty string to long long", *n != 0 );
+#if !defined(_WIN32)
+ char *endPtr = 0;
+ errno = 0;
+ ret = strtoll( n, &endPtr, 10 );
+ uassert( 13305, "could not convert string to long long", *endPtr == 0 && errno == 0 );
+#elif _MSC_VER>=1600 // 1600 is VS2k10 1500 is VS2k8
+ size_t endLen = 0;
+ try {
+ ret = stoll( n, &endLen, 10 );
+ }
+ catch ( ... ) {
+ endLen = 0;
+ }
+ uassert( 13306, "could not convert string to long long", endLen != 0 && n[ endLen ] == 0 );
+#else // stoll() wasn't introduced until VS 2010.
+ char* endPtr = 0;
+ ret = _strtoi64( n, &endPtr, 10 );
+ uassert( 13310, "could not convert string to long long", (*endPtr == 0) && (ret != _I64_MAX) && (ret != _I64_MIN) );
+#endif // !defined(_WIN32)
+ return ret;
+ }
+}
diff --git a/src/mongo/util/time_support.h b/src/mongo/util/time_support.h
new file mode 100644
index 00000000000..18181eb805a
--- /dev/null
+++ b/src/mongo/util/time_support.h
@@ -0,0 +1,255 @@
+// @file time_support.h
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <cstdio> // sscanf
+#include <ctime>
+#include <boost/date_time/posix_time/posix_time.hpp>
+#include <boost/thread/xtime.hpp>
+#undef assert
+#define assert MONGO_assert
+
+namespace mongo {
+
+ inline void time_t_to_Struct(time_t t, struct tm * buf , bool local = false ) {
+#if defined(_WIN32)
+ if ( local )
+ localtime_s( buf , &t );
+ else
+ gmtime_s(buf, &t);
+#else
+ if ( local )
+ localtime_r(&t, buf);
+ else
+ gmtime_r(&t, buf);
+#endif
+ }
+
+ // uses ISO 8601 dates without trailing Z
+ // colonsOk should be false when creating filenames
+ inline string terseCurrentTime(bool colonsOk=true) {
+ struct tm t;
+ time_t_to_Struct( time(0) , &t );
+
+ const char* fmt = (colonsOk ? "%Y-%m-%dT%H:%M:%S" : "%Y-%m-%dT%H-%M-%S");
+ char buf[32];
+ assert(strftime(buf, sizeof(buf), fmt, &t) == 19);
+ return buf;
+ }
+
+ inline string timeToISOString(time_t time) {
+ struct tm t;
+ time_t_to_Struct( time, &t );
+
+ const char* fmt = "%Y-%m-%dT%H:%M:%SZ";
+ char buf[32];
+ assert(strftime(buf, sizeof(buf), fmt, &t) == 20);
+ return buf;
+ }
+
+ inline boost::gregorian::date currentDate() {
+ boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();
+ return now.date();
+ }
+
+ // parses time of day in "hh:mm" format assuming 'hh' is 00-23
+ inline bool toPointInTime( const string& str , boost::posix_time::ptime* timeOfDay ) {
+ int hh = 0;
+ int mm = 0;
+ if ( 2 != sscanf( str.c_str() , "%d:%d" , &hh , &mm ) ) {
+ return false;
+ }
+
+ // verify that time is well formed
+ if ( ( hh / 24 ) || ( mm / 60 ) ) {
+ return false;
+ }
+
+ boost::posix_time::ptime res( currentDate() , boost::posix_time::hours( hh ) + boost::posix_time::minutes( mm ) );
+ *timeOfDay = res;
+ return true;
+ }
+
+#define MONGO_asctime _asctime_not_threadsafe_
+#define asctime MONGO_asctime
+#define MONGO_gmtime _gmtime_not_threadsafe_
+#define gmtime MONGO_gmtime
+#define MONGO_localtime _localtime_not_threadsafe_
+#define localtime MONGO_localtime
+#define MONGO_ctime _ctime_is_not_threadsafe_
+#define ctime MONGO_ctime
+
+#if defined(_WIN32)
+ inline void sleepsecs(int s) {
+ // todo : add an assert here that we are not locked in d.dbMutex. there may be debugging things where we
+ // are but otherwise it's quite likely that would be a mistake.
+ Sleep(s*1000);
+ }
+ inline void sleepmillis(long long s) {
+ assert( s <= 0xffffffff );
+ Sleep((DWORD) s);
+ }
+ inline void sleepmicros(long long s) {
+ if ( s <= 0 )
+ return;
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ xt.sec += (int)( s / 1000000 );
+ xt.nsec += (int)(( s % 1000000 ) * 1000);
+ if ( xt.nsec >= 1000000000 ) {
+ xt.nsec -= 1000000000;
+ xt.sec++;
+ }
+ boost::thread::sleep(xt);
+ }
+#elif defined(__sunos__)
+ inline void sleepsecs(int s) {
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ xt.sec += s;
+ boost::thread::sleep(xt);
+ }
+ inline void sleepmillis(long long s) {
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ xt.sec += (int)( s / 1000 );
+ xt.nsec += (int)(( s % 1000 ) * 1000000);
+ if ( xt.nsec >= 1000000000 ) {
+ xt.nsec -= 1000000000;
+ xt.sec++;
+ }
+ boost::thread::sleep(xt);
+ }
+ inline void sleepmicros(long long s) {
+ if ( s <= 0 )
+ return;
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ xt.sec += (int)( s / 1000000 );
+ xt.nsec += (int)(( s % 1000000 ) * 1000);
+ if ( xt.nsec >= 1000000000 ) {
+ xt.nsec -= 1000000000;
+ xt.sec++;
+ }
+ boost::thread::sleep(xt);
+ }
+#else
+ inline void sleepsecs(int s) {
+ struct timespec t;
+ t.tv_sec = s;
+ t.tv_nsec = 0;
+ if ( nanosleep( &t , 0 ) ) {
+ cout << "nanosleep failed" << endl;
+ }
+ }
+ inline void sleepmicros(long long s) {
+ if ( s <= 0 )
+ return;
+ struct timespec t;
+ t.tv_sec = (int)(s / 1000000);
+ t.tv_nsec = 1000 * ( s % 1000000 );
+ struct timespec out;
+ if ( nanosleep( &t , &out ) ) {
+ cout << "nanosleep failed" << endl;
+ }
+ }
+ inline void sleepmillis(long long s) {
+ sleepmicros( s * 1000 );
+ }
+#endif
+
+ extern long long jsTime_virtual_skew;
+ extern boost::thread_specific_ptr<long long> jsTime_virtual_thread_skew;
+
+ // DO NOT TOUCH except for testing
+ inline void jsTimeVirtualSkew( long long skew ){
+ jsTime_virtual_skew = skew;
+ }
+ inline long long getJSTimeVirtualSkew(){
+ return jsTime_virtual_skew;
+ }
+
+ inline void jsTimeVirtualThreadSkew( long long skew ){
+ jsTime_virtual_thread_skew.reset(new long long(skew));
+ }
+ inline long long getJSTimeVirtualThreadSkew(){
+ if(jsTime_virtual_thread_skew.get()){
+ return *(jsTime_virtual_thread_skew.get());
+ }
+ else return 0;
+ }
+
+ /** Date_t is milliseconds since epoch */
+ inline Date_t jsTime();
+
+ /** warning this will wrap */
+ inline unsigned curTimeMicros();
+
+ inline unsigned long long curTimeMicros64();
+#ifdef _WIN32 // no gettimeofday on windows
+ inline unsigned long long curTimeMillis64() {
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ return ((unsigned long long)xt.sec) * 1000 + xt.nsec / 1000000;
+ }
+ inline Date_t jsTime() {
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ unsigned long long t = xt.nsec / 1000000;
+ return ((unsigned long long) xt.sec * 1000) + t + getJSTimeVirtualSkew() + getJSTimeVirtualThreadSkew();
+ }
+ inline unsigned long long curTimeMicros64() {
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ unsigned long long t = xt.nsec / 1000;
+ return (((unsigned long long) xt.sec) * 1000000) + t;
+ }
+ inline unsigned curTimeMicros() {
+ boost::xtime xt;
+ boost::xtime_get(&xt, boost::TIME_UTC);
+ unsigned t = xt.nsec / 1000;
+ unsigned secs = xt.sec % 1024;
+ return secs*1000000 + t;
+ }
+#else
+# include <sys/time.h>
+ inline unsigned long long curTimeMillis64() {
+ timeval tv;
+ gettimeofday(&tv, NULL);
+ return ((unsigned long long)tv.tv_sec) * 1000 + tv.tv_usec / 1000;
+ }
+ inline Date_t jsTime() {
+ timeval tv;
+ gettimeofday(&tv, NULL);
+ unsigned long long t = tv.tv_usec / 1000;
+ return ((unsigned long long) tv.tv_sec * 1000) + t + getJSTimeVirtualSkew() + getJSTimeVirtualThreadSkew();
+ }
+ inline unsigned long long curTimeMicros64() {
+ timeval tv;
+ gettimeofday(&tv, NULL);
+ return (((unsigned long long) tv.tv_sec) * 1000*1000) + tv.tv_usec;
+ }
+ inline unsigned curTimeMicros() {
+ timeval tv;
+ gettimeofday(&tv, NULL);
+ unsigned secs = tv.tv_sec % 1024;
+ return secs*1000*1000 + tv.tv_usec;
+ }
+#endif
+
+} // namespace mongo
diff --git a/src/mongo/util/timer.h b/src/mongo/util/timer.h
new file mode 100644
index 00000000000..224651ac224
--- /dev/null
+++ b/src/mongo/util/timer.h
@@ -0,0 +1,115 @@
+// @file timer.h
+
+/* Copyright 2010 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "time_support.h"
+
+namespace mongo {
+
+#if !defined(_WIN32)
+
+ /**
+ * simple scoped timer
+ */
+ class Timer /*copyable*/ {
+ public:
+ Timer() { reset(); }
+ int seconds() const { return (int)(micros() / 1000000); }
+ int millis() const { return (int)(micros() / 1000); }
+ int minutes() const { return seconds() / 60; }
+
+
+ /** gets time interval and resets at the same time. this way we can call curTimeMicros
+ once instead of twice if one wanted millis() and then reset().
+ @return time in millis
+ */
+ int millisReset() {
+ unsigned long long now = curTimeMicros64();
+ int m = (int)((now-old)/1000);
+ old = now;
+ return m;
+ }
+
+ // note: dubious that the resolution is as anywhere near as high as ethod name implies!
+ unsigned long long micros() const {
+ unsigned long long n = curTimeMicros64();
+ return n - old;
+ }
+ unsigned long long micros(unsigned long long & n) const { // returns cur time in addition to timer result
+ n = curTimeMicros64();
+ return n - old;
+ }
+
+ void reset() { old = curTimeMicros64(); }
+ private:
+ unsigned long long old;
+ };
+
+#else
+
+ class Timer /*copyable*/ {
+ public:
+ Timer() { reset(); }
+
+ int seconds() const {
+ int s = static_cast<int>((now() - old) / countsPerSecond);
+ return s;
+ }
+
+ int millis() const {
+ return (int)
+ ((now() - old) * 1000.0 / countsPerSecond);
+ }
+
+ int minutes() const { return seconds() / 60; }
+
+ /** gets time interval and resets at the same time. this way we can call curTimeMicros
+ once instead of twice if one wanted millis() and then reset().
+ @return time in millis
+ */
+ int millisReset() {
+ unsigned long long nw = now();
+ int m = static_cast<int>((nw - old) * 1000.0 / countsPerSecond);
+ old = nw;
+ return m;
+ }
+
+ void reset() {
+ old = now();
+ }
+
+ unsigned long long micros() const {
+ return (unsigned long long)
+ ((now() - old) * 1000000.0 / countsPerSecond);
+ }
+
+ static unsigned long long countsPerSecond;
+
+ private:
+ unsigned long long now() const {
+ LARGE_INTEGER i;
+ QueryPerformanceCounter(&i);
+ return i.QuadPart;
+ }
+
+ unsigned long long old;
+ };
+
+#endif
+
+} // namespace mongo
diff --git a/src/mongo/util/unittest.h b/src/mongo/util/unittest.h
new file mode 100644
index 00000000000..94be444363f
--- /dev/null
+++ b/src/mongo/util/unittest.h
@@ -0,0 +1,62 @@
+// unittest.h
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace mongo {
+
+ /* The idea here is to let all initialization of global variables (classes inheriting from UnitTest)
+ complete before we run the tests -- otherwise order of initilization being arbitrary may mess
+ us up. The app's main() function should call runTests().
+
+ To define a unit test, inherit from this and implement run. instantiate one object for the new class
+ as a global.
+
+ These tests are ran on *every* startup of mongod, so they have to be very lightweight. But it is a
+ good quick check for a bad build.
+ */
+ struct UnitTest {
+ UnitTest() {
+ registerTest(this);
+ }
+ virtual ~UnitTest() {}
+
+ // assert if fails
+ virtual void run() = 0;
+
+ static bool testsInProgress() { return running; }
+ private:
+ static vector<UnitTest*> *tests;
+ static bool running;
+ public:
+ static void registerTest(UnitTest *t) {
+ if ( tests == 0 )
+ tests = new vector<UnitTest*>();
+ tests->push_back(t);
+ }
+
+ static void runTests() {
+ running = true;
+ for ( vector<UnitTest*>::iterator i = tests->begin(); i != tests->end(); i++ ) {
+ (*i)->run();
+ }
+ running = false;
+ }
+ };
+
+
+} // namespace mongo
diff --git a/src/mongo/util/util.cpp b/src/mongo/util/util.cpp
new file mode 100644
index 00000000000..356c640f449
--- /dev/null
+++ b/src/mongo/util/util.cpp
@@ -0,0 +1,220 @@
+// @file util.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include "goodies.h"
+#include "unittest.h"
+#include "file_allocator.h"
+#include "optime.h"
+#include "time_support.h"
+#include "mongoutils/str.h"
+#include "timer.h"
+
+namespace mongo {
+
+#if defined(_WIN32)
+ unsigned long long Timer::countsPerSecond;
+ struct AtStartup {
+ AtStartup() {
+ LARGE_INTEGER x;
+ bool ok = QueryPerformanceFrequency(&x);
+ assert(ok);
+ Timer::countsPerSecond = x.QuadPart;
+ }
+ } atstartuputil;
+#endif
+
+ string hexdump(const char *data, unsigned len) {
+ assert( len < 1000000 );
+ const unsigned char *p = (const unsigned char *) data;
+ stringstream ss;
+ for( unsigned i = 0; i < 4 && i < len; i++ ) {
+ ss << std::hex << setw(2) << setfill('0');
+ unsigned n = p[i];
+ ss << n;
+ ss << ' ';
+ }
+ string s = ss.str();
+ return s;
+ }
+
+ boost::thread_specific_ptr<string> _threadName;
+
+ unsigned _setThreadName( const char * name ) {
+ if ( ! name ) name = "NONE";
+
+ static unsigned N = 0;
+
+ if ( strcmp( name , "conn" ) == 0 ) {
+ string* x = _threadName.get();
+ if ( x && mongoutils::str::startsWith( *x , "conn" ) ) {
+ int n = atoi( x->c_str() + 4 );
+ if ( n > 0 )
+ return n;
+ warning() << "unexpected thread name [" << *x << "] parsed to " << n << endl;
+ }
+ unsigned n = ++N;
+ stringstream ss;
+ ss << name << n;
+ _threadName.reset( new string( ss.str() ) );
+ return n;
+ }
+
+ _threadName.reset( new string(name) );
+ return 0;
+ }
+
+#if defined(_WIN32)
+#define MS_VC_EXCEPTION 0x406D1388
+#pragma pack(push,8)
+ typedef struct tagTHREADNAME_INFO {
+ DWORD dwType; // Must be 0x1000.
+ LPCSTR szName; // Pointer to name (in user addr space).
+ DWORD dwThreadID; // Thread ID (-1=caller thread).
+ DWORD dwFlags; // Reserved for future use, must be zero.
+ } THREADNAME_INFO;
+#pragma pack(pop)
+
+ void setWinThreadName(const char *name) {
+ /* is the sleep here necessary???
+ Sleep(10);
+ */
+ THREADNAME_INFO info;
+ info.dwType = 0x1000;
+ info.szName = name;
+ info.dwThreadID = -1;
+ info.dwFlags = 0;
+ __try {
+ RaiseException( MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(ULONG_PTR), (ULONG_PTR*)&info );
+ }
+ __except(EXCEPTION_EXECUTE_HANDLER) {
+ }
+ }
+
+ unsigned setThreadName(const char *name) {
+ unsigned n = _setThreadName( name );
+#if !defined(_DEBUG)
+ // naming might be expensive so don't do "conn*" over and over
+ if( string("conn") == name )
+ return n;
+#endif
+ setWinThreadName(name);
+ return n;
+ }
+
+#else
+
+ unsigned setThreadName(const char * name ) {
+ return _setThreadName( name );
+ }
+
+#endif
+
+ string getThreadName() {
+ string * s = _threadName.get();
+ if ( s )
+ return *s;
+ return "";
+ }
+
+ vector<UnitTest*> *UnitTest::tests = 0;
+ bool UnitTest::running = false;
+
+ const char *default_getcurns() { return ""; }
+ const char * (*getcurns)() = default_getcurns;
+
+ int logLevel = 0;
+ int tlogLevel = 0;
+ mongo::mutex Logstream::mutex("Logstream");
+ int Logstream::doneSetup = Logstream::magicNumber();
+
+ bool isPrime(int n) {
+ int z = 2;
+ while ( 1 ) {
+ if ( z*z > n )
+ break;
+ if ( n % z == 0 )
+ return false;
+ z++;
+ }
+ return true;
+ }
+
+ int nextPrime(int n) {
+ n |= 1; // 2 goes to 3...don't care...
+ while ( !isPrime(n) )
+ n += 2;
+ return n;
+ }
+
+ struct UtilTest : public UnitTest {
+ void run() {
+ assert( isPrime(3) );
+ assert( isPrime(2) );
+ assert( isPrime(13) );
+ assert( isPrime(17) );
+ assert( !isPrime(9) );
+ assert( !isPrime(6) );
+ assert( nextPrime(4) == 5 );
+ assert( nextPrime(8) == 11 );
+
+ assert( endsWith("abcde", "de") );
+ assert( !endsWith("abcde", "dasdfasdfashkfde") );
+
+ assert( swapEndian(0x01020304) == 0x04030201 );
+
+ }
+ } utilTest;
+
+ OpTime OpTime::last(0, 0);
+
+ /* this is a good place to set a breakpoint when debugging, as lots of warning things
+ (assert, wassert) call it.
+ */
+ void sayDbContext(const char *errmsg) {
+ if ( errmsg ) {
+ problem() << errmsg << endl;
+ }
+ printStackTrace();
+ }
+
+ /* note: can't use malloc herein - may be in signal handler.
+ logLockless() likely does not comply and should still be fixed todo
+ likewise class string?
+ */
+ void rawOut( const string &s ) {
+ if( s.empty() ) return;
+
+ char buf[64];
+ time_t_to_String( time(0) , buf );
+ /* truncate / don't show the year: */
+ buf[19] = ' ';
+ buf[20] = 0;
+
+ Logstream::logLockless(buf);
+ Logstream::logLockless(s);
+ Logstream::logLockless("\n");
+ }
+
+ ostream& operator<<( ostream &s, const ThreadSafeString &o ) {
+ s << o.toString();
+ return s;
+ }
+
+ bool StaticObserver::_destroyingStatics = false;
+
+} // namespace mongo
diff --git a/src/mongo/util/version.cpp b/src/mongo/util/version.cpp
new file mode 100644
index 00000000000..1e4bc457f91
--- /dev/null
+++ b/src/mongo/util/version.cpp
@@ -0,0 +1,288 @@
+// @file version.cpp
+
+/* Copyright 2009 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "pch.h"
+#include <cstdlib>
+#include <iostream>
+#include <iomanip>
+#include <sstream>
+#include <string>
+#include "unittest.h"
+#include "version.h"
+#include "stringutils.h"
+#include "../db/jsobj.h"
+#include "file.h"
+#include "ramlog.h"
+#include "../db/cmdline.h"
+
+namespace mongo {
+
+ /* Approved formats for versionString:
+ * 1.2.3
+ * 1.2.3-pre-
+ * 1.2.3-rc4 (up to rc9)
+ * 1.2.3-rc4-pre-
+ * If you really need to do something else you'll need to fix _versionArray()
+ */
+ const char versionString[] = "2.1.0-pre-";
+
+ // See unit test for example outputs
+ static BSONArray _versionArray(const char* version){
+ // this is inefficient, but cached so it doesn't matter
+ BSONArrayBuilder b;
+ string curPart;
+ const char* c = version;
+ int finalPart = 0; // 0 = final release, -100 = pre, -10 to -1 = -10 + X for rcX
+ do { //walks versionString including NUL byte
+ if (!(*c == '.' || *c == '-' || *c == '\0')){
+ curPart += *c;
+ continue;
+ }
+
+ try {
+ unsigned num = stringToNum(curPart.c_str());
+ b.append((int) num);
+ }
+ catch (...){ // not a number
+ if (curPart.empty()){
+ assert(*c == '\0');
+ break;
+ }
+ else if (startsWith(curPart, "rc")){
+ finalPart = -10 + stringToNum(curPart.c_str()+2);
+ break;
+ }
+ else if (curPart == "pre"){
+ finalPart = -100;
+ break;
+ }
+ }
+
+ curPart = "";
+ } while (*c++);
+
+ b.append(finalPart);
+ return b.arr();
+ }
+
+ const BSONArray versionArray = _versionArray(versionString);
+
+ string mongodVersion() {
+ stringstream ss;
+ ss << "db version v" << versionString << ", pdfile version " << PDFILE_VERSION << "." << PDFILE_VERSION_MINOR;
+ return ss.str();
+ }
+
+#ifndef _SCONS
+ // only works in scons
+ const char * gitVersion() { return "not-scons"; }
+#endif
+
+ void printGitVersion() { log() << "git version: " << gitVersion() << endl; }
+
+#ifndef _SCONS
+#if defined(_WIN32)
+ string sysInfo() {
+ stringstream ss;
+ ss << "not-scons win";
+ ss << " mscver:" << _MSC_FULL_VER << " built:" << __DATE__;
+ ss << " boostver:" << BOOST_VERSION;
+#if( !defined(_MT) )
+#error _MT is not defined
+#endif
+ ss << (sizeof(char *) == 8) ? " 64bit" : " 32bit";
+ return ss.str();
+ }
+#else
+ string sysInfo() { return ""; }
+#endif
+#endif
+
+ void printSysInfo() {
+ log() << "build info: " << sysInfo() << endl;
+ }
+
+
+ static Tee * startupWarningsLog = new RamLog("startupWarnings"); //intentionally leaked
+
+ //
+ // system warnings
+ //
+ void show_warnings() {
+ // each message adds a leading and a trailing newline
+
+ bool warned = false;
+ {
+ const char * foo = strchr( versionString , '.' ) + 1;
+ int bar = atoi( foo );
+ if ( ( 2 * ( bar / 2 ) ) != bar ) {
+ log() << startupWarningsLog;
+ log() << "** NOTE: This is a development version (" << versionString << ") of MongoDB." << startupWarningsLog;
+ log() << "** Not recommended for production." << startupWarningsLog;
+ warned = true;
+ }
+ }
+
+ if ( sizeof(int*) == 4 ) {
+ log() << startupWarningsLog;
+ log() << "** NOTE: when using MongoDB 32 bit, you are limited to about 2 gigabytes of data" << startupWarningsLog;
+ log() << "** see http://blog.mongodb.org/post/137788967/32-bit-limitations" << startupWarningsLog;
+ log() << "** with --journal, the limit is lower" << startupWarningsLog;
+ warned = true;
+ }
+
+#ifdef __linux__
+ if (boost::filesystem::exists("/proc/vz") && !boost::filesystem::exists("/proc/bc")) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: You are running in OpenVZ. This is known to be broken!!!" << startupWarningsLog;
+ warned = true;
+ }
+
+ if (boost::filesystem::exists("/sys/devices/system/node/node1")){
+ // We are on a box with a NUMA enabled kernel and more than 1 numa node (they start at node0)
+ // Now we look at the first line of /proc/self/numa_maps
+ //
+ // Bad example:
+ // $ cat /proc/self/numa_maps
+ // 00400000 default file=/bin/cat mapped=6 N4=6
+ //
+ // Good example:
+ // $ numactl --interleave=all cat /proc/self/numa_maps
+ // 00400000 interleave:0-7 file=/bin/cat mapped=6 N4=6
+
+ File f;
+ f.open("/proc/self/numa_maps", /*read_only*/true);
+ if ( f.is_open() && ! f.bad() ) {
+ char line[100]; //we only need the first line
+ if (read(f.fd, line, sizeof(line)) < 0){
+ warning() << "failed to read from /proc/self/numa_maps: " << errnoWithDescription() << startupWarningsLog;
+ warned = true;
+ }
+ else {
+ // just in case...
+ line[98] = ' ';
+ line[99] = '\0';
+
+ // skip over pointer
+ const char* space = strchr(line, ' ');
+
+ if ( ! space ) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: cannot parse numa_maps" << startupWarningsLog;
+ warned = true;
+ }
+ else if ( ! startsWith(space+1, "interleave") ) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: You are running on a NUMA machine." << startupWarningsLog;
+ log() << "** We suggest launching mongod like this to avoid performance problems:" << startupWarningsLog;
+ log() << "** numactl --interleave=all mongod [other options]" << startupWarningsLog;
+ warned = true;
+ }
+ }
+ }
+ }
+
+ if (cmdLine.dur){
+ fstream f ("/proc/sys/vm/overcommit_memory", ios_base::in);
+ unsigned val;
+ f >> val;
+
+ if (val == 2) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: /proc/sys/vm/overcommit_memory is " << val << startupWarningsLog;
+ log() << "** Journaling works best with it set to 0 or 1" << startupWarningsLog;
+ }
+ }
+
+ if (boost::filesystem::exists("/proc/sys/vm/zone_reclaim_mode")){
+ fstream f ("/proc/sys/vm/zone_reclaim_mode", ios_base::in);
+ unsigned val;
+ f >> val;
+
+ if (val != 0) {
+ log() << startupWarningsLog;
+ log() << "** WARNING: /proc/sys/vm/zone_reclaim_mode is " << val << startupWarningsLog;
+ log() << "** We suggest setting it to 0" << startupWarningsLog;
+ log() << "** http://www.kernel.org/doc/Documentation/sysctl/vm.txt" << startupWarningsLog;
+ }
+ }
+#endif
+
+ if (warned) {
+ log() << startupWarningsLog;
+ }
+ }
+
+ int versionCmp(StringData rhs, StringData lhs) {
+ if (strcmp(rhs.data(),lhs.data()) == 0)
+ return 0;
+
+ // handle "1.2.3-" and "1.2.3-pre"
+ if (rhs.size() < lhs.size()) {
+ if (strncmp(rhs.data(), lhs.data(), rhs.size()) == 0 && lhs.data()[rhs.size()] == '-')
+ return +1;
+ }
+ else if (rhs.size() > lhs.size()) {
+ if (strncmp(rhs.data(), lhs.data(), lhs.size()) == 0 && rhs.data()[lhs.size()] == '-')
+ return -1;
+ }
+
+ return lexNumCmp(rhs.data(), lhs.data());
+ }
+
+ class VersionCmpTest : public UnitTest {
+ public:
+ void run() {
+ assert( versionCmp("1.2.3", "1.2.3") == 0 );
+ assert( versionCmp("1.2.3", "1.2.4") < 0 );
+ assert( versionCmp("1.2.3", "1.2.20") < 0 );
+ assert( versionCmp("1.2.3", "1.20.3") < 0 );
+ assert( versionCmp("2.2.3", "10.2.3") < 0 );
+ assert( versionCmp("1.2.3", "1.2.3-") > 0 );
+ assert( versionCmp("1.2.3", "1.2.3-pre") > 0 );
+ assert( versionCmp("1.2.3", "1.2.4-") < 0 );
+ assert( versionCmp("1.2.3-", "1.2.3") < 0 );
+ assert( versionCmp("1.2.3-pre", "1.2.3") < 0 );
+
+ log(1) << "versionCmpTest passed" << endl;
+ }
+ } versionCmpTest;
+
+ class VersionArrayTest : public UnitTest {
+ public:
+ void run() {
+ assert( _versionArray("1.2.3") == BSON_ARRAY(1 << 2 << 3 << 0) );
+ assert( _versionArray("1.2.0") == BSON_ARRAY(1 << 2 << 0 << 0) );
+ assert( _versionArray("2.0.0") == BSON_ARRAY(2 << 0 << 0 << 0) );
+
+ assert( _versionArray("1.2.3-pre-") == BSON_ARRAY(1 << 2 << 3 << -100) );
+ assert( _versionArray("1.2.0-pre-") == BSON_ARRAY(1 << 2 << 0 << -100) );
+ assert( _versionArray("2.0.0-pre-") == BSON_ARRAY(2 << 0 << 0 << -100) );
+
+ assert( _versionArray("1.2.3-rc0") == BSON_ARRAY(1 << 2 << 3 << -10) );
+ assert( _versionArray("1.2.0-rc1") == BSON_ARRAY(1 << 2 << 0 << -9) );
+ assert( _versionArray("2.0.0-rc2") == BSON_ARRAY(2 << 0 << 0 << -8) );
+
+ // Note that the pre of an rc is the same as the rc itself
+ assert( _versionArray("1.2.3-rc3-pre-") == BSON_ARRAY(1 << 2 << 3 << -7) );
+ assert( _versionArray("1.2.0-rc4-pre-") == BSON_ARRAY(1 << 2 << 0 << -6) );
+ assert( _versionArray("2.0.0-rc5-pre-") == BSON_ARRAY(2 << 0 << 0 << -5) );
+
+ log(1) << "versionArrayTest passed" << endl;
+ }
+ } versionArrayTest;
+}
diff --git a/src/mongo/util/version.h b/src/mongo/util/version.h
new file mode 100644
index 00000000000..64f8b140fd5
--- /dev/null
+++ b/src/mongo/util/version.h
@@ -0,0 +1,27 @@
+#ifndef UTIL_VERSION_HEADER
+#define UTIL_VERSION_HEADER
+
+#include <string>
+
+namespace mongo {
+ struct BSONArray;
+
+ using std::string;
+
+ // mongo version
+ extern const char versionString[];
+ extern const BSONArray versionArray;
+ string mongodVersion();
+ int versionCmp(StringData rhs, StringData lhs); // like strcmp
+
+ const char * gitVersion();
+ void printGitVersion();
+
+ string sysInfo();
+ void printSysInfo();
+
+ void show_warnings();
+
+} // namespace mongo
+
+#endif // UTIL_VERSION_HEADER
diff --git a/src/mongo/util/winutil.h b/src/mongo/util/winutil.h
new file mode 100644
index 00000000000..b69b69a630d
--- /dev/null
+++ b/src/mongo/util/winutil.h
@@ -0,0 +1,44 @@
+// @file winutil.cpp : Windows related utility functions
+//
+// /**
+// * Copyright (C) 2008 10gen Inc.
+// *
+// * This program is free software: you can redistribute it and/or modify
+// * it under the terms of the GNU Affero General Public License, version 3,
+// * as published by the Free Software Foundation.
+// *
+// * This program is distributed in the hope that it will be useful,
+// * but WITHOUT ANY WARRANTY; without even the implied warranty of
+// * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// * GNU Affero General Public License for more details.
+// *
+// * You should have received a copy of the GNU Affero General Public License
+// * along with this program. If not, see <http://www.gnu.org/licenses/>.
+// */
+//
+// #include "pch.h"
+
+#pragma once
+
+#if defined(_WIN32)
+#include <windows.h>
+#include "text.h"
+
+namespace mongo {
+
+ inline string GetWinErrMsg(DWORD err) {
+ LPTSTR errMsg;
+ ::FormatMessage( FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, NULL, err, 0, (LPTSTR)&errMsg, 0, NULL );
+ std::string errMsgStr = toUtf8String( errMsg );
+ ::LocalFree( errMsg );
+ // FormatMessage() appends a newline to the end of error messages, we trim it because endl flushes the buffer.
+ errMsgStr = errMsgStr.erase( errMsgStr.length() - 2 );
+ std::ostringstream output;
+ output << errMsgStr << " (" << err << ")";
+
+ return output.str();
+ }
+}
+
+#endif
+
diff --git a/src/third_party/README b/src/third_party/README
new file mode 100644
index 00000000000..57f702d0b97
--- /dev/null
+++ b/src/third_party/README
@@ -0,0 +1,6 @@
+
+linenoise
+ when making changes here, also publish to
+ http://github.com/erh/linenoise
+ 6cdc775807e57b2c3fd64bd207814f8ee1fe35f3
+
diff --git a/src/third_party/js-1.7/Makefile.in b/src/third_party/js-1.7/Makefile.in
new file mode 100644
index 00000000000..08bb674fc7f
--- /dev/null
+++ b/src/third_party/js-1.7/Makefile.in
@@ -0,0 +1,388 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+DEPTH = ../..
+topsrcdir = @top_srcdir@
+srcdir = @srcdir@
+VPATH = @srcdir@
+
+include $(DEPTH)/config/autoconf.mk
+
+MODULE = js
+LIBRARY_NAME = mozjs
+LIB_IS_C_ONLY = 1
+GRE_MODULE = 1
+
+ifeq (,$(filter-out WINNT WINCE,$(OS_ARCH)))
+LIBRARY_NAME = js$(MOZ_BITS)$(VERSION_NUMBER)
+RESFILE = js$(MOZ_BITS)40.res
+endif
+
+PACKAGE_FILE = js.pkg
+
+# JavaScript must be built shared, even for static builds, as it is used by
+# other modules which are always built shared. Failure to do so results in
+# the js code getting copied into xpinstall and jsd as well as mozilla-bin,
+# and then the static data cells used for locking no longer work.
+
+ifndef JS_STATIC_BUILD
+FORCE_SHARED_LIB = 1
+endif
+
+CSRCS = \
+ jsapi.c \
+ jsarena.c \
+ jsarray.c \
+ jsatom.c \
+ jsbool.c \
+ jscntxt.c \
+ jsdate.c \
+ jsdbgapi.c \
+ jsdhash.c \
+ jsdtoa.c \
+ jsemit.c \
+ jsexn.c \
+ jsfun.c \
+ jsgc.c \
+ jshash.c \
+ jsinterp.c \
+ jsiter.c \
+ jslock.c \
+ jslog2.c \
+ jslong.c \
+ jsmath.c \
+ jsnum.c \
+ jsobj.c \
+ jsopcode.c \
+ jsparse.c \
+ jsprf.c \
+ jsregexp.c \
+ jsscan.c \
+ jsscope.c \
+ jsscript.c \
+ jsstr.c \
+ jsutil.c \
+ jsxdrapi.c \
+ jsxml.c \
+ prmjtime.c \
+ $(NULL)
+
+EXPORTS = \
+ jsautocfg.h \
+ jsautokw.h \
+ js.msg \
+ jsapi.h \
+ jsarray.h \
+ jsarena.h \
+ jsatom.h \
+ jsbit.h \
+ jsbool.h \
+ jsclist.h \
+ jscntxt.h \
+ jscompat.h \
+ jsconfig.h \
+ jsdate.h \
+ jsdbgapi.h \
+ jsdhash.h \
+ jsemit.h \
+ jsfun.h \
+ jsgc.h \
+ jshash.h \
+ jsinterp.h \
+ jsiter.h \
+ jslock.h \
+ jslong.h \
+ jsmath.h \
+ jsnum.h \
+ jsobj.h \
+ jsopcode.tbl \
+ jsopcode.h \
+ jsosdep.h \
+ jsotypes.h \
+ jsparse.h \
+ jsprf.h \
+ jsproto.tbl \
+ jsprvtd.h \
+ jspubtd.h \
+ jsregexp.h \
+ jsscan.h \
+ jsscope.h \
+ jsscript.h \
+ jsstddef.h \
+ jsstr.h \
+ jstypes.h \
+ jsutil.h \
+ jsxdrapi.h \
+ jsxml.h \
+ $(NULL)
+
+ifeq (,$(filter-out WINNT WINCE,$(OS_ARCH)))
+EXPORTS += jscpucfg.h
+endif
+
+JS_SAFE_ARENA = 1
+
+DASH_R = -r
+
+include $(topsrcdir)/config/config.mk
+
+EXTRA_DSO_LDOPTS += $(NSPR_LIBS)
+
+# When using gcc the assembly is inlined in the C-file (see jslock.c)
+ifeq ($(OS_ARCH),SunOS)
+ifneq ($(OS_TEST),i86pc)
+ifndef GNU_CC
+ASFILES = lock_$(OS_ARCH).s
+endif
+endif
+endif
+
+ifndef BUILD_OPT
+MOCHAFILE = 1
+endif
+
+ifndef NSBUILDROOT
+JSJAVA_STUBHEADERS = \
+ -I$(topsrcdir)/sun-java/include/_gen \
+ -I$(topsrcdir)/sun-java/netscape/javascript/_jri \
+ -I$(topsrcdir)/sun-java/netscape/security/_jri
+else
+JSJAVA_STUBHEADERS = -I$(JRI_GEN_DIR) -I$(JDK_GEN_DIR)
+endif
+
+JSJAVA_CFLAGS = \
+ -I$(topsrcdir)/sun-java/md-include \
+ -I$(topsrcdir)/sun-java/include \
+ $(JSJAVA_STUBHEADERS)
+
+# Define keyword generator before rules.mk, see bug 323979 comment 50
+
+HOST_SIMPLE_PROGRAMS += host_jskwgen$(HOST_BIN_SUFFIX)
+GARBAGE += jsautokw.h host_jskwgen$(HOST_BIN_SUFFIX)
+
+include $(topsrcdir)/config/rules.mk
+
+DEFINES += -DEXPORT_JS_API
+
+INCLUDES += -I$(srcdir)
+
+# MSVC '-Gy' cc flag and '/OPT:REF' linker flag cause JS_GetArgument and
+# JS_GetLocalVariable to be folded to the same address by the linker,
+# leading to a crash on startup. See bug 151066. So, in optimized builds,
+# add the /OPT:NOICF flag, which turns off 'identical COMDAT folding'.
+#
+# N.B.: 'identical COMDAT folding' that folds functions whose addresses
+# are taken violates the ISO C and C++ standards.
+ifndef MOZ_DEBUG
+ifeq (_WINNT,$(GNU_CC)_$(OS_ARCH))
+LDFLAGS += -OPT:NOICF
+endif
+endif
+
+GARBAGE += jscpucfg.o jsautocfg.h jsautocfg.tmp jscpucfg
+
+ifneq (,$(CROSS_COMPILE)$(filter-out WINNT,$(OS_ARCH)))
+TARGETS += jscpucfg$(HOST_BIN_SUFFIX)
+endif
+
+ifdef JS_SAFE_ARENA
+DEFINES += -DJS_USE_SAFE_ARENA
+endif
+
+ifdef JS_THREADSAFE
+DEFINES += -DJS_THREADSAFE
+endif
+
+ifdef JS_NO_THIN_LOCKS
+DEFINES += -DJS_USE_ONLY_NSPR_LOCKS
+endif
+
+ifdef JS_VERSION
+DEFINES += -DJS_VERSION=$(JS_VERSION)
+endif
+
+ifneq ($(findstring -L,$(NSPR_LIBS)),)
+NSPR_STATIC_PATH = $(subst -L,,$(findstring -L,$(NSPR_LIBS)))
+else
+NSPR_STATIC_PATH = $(DIST)/lib
+endif
+
+LDFLAGS += $(pathsubst -l%,$(NSPR_STATIC_PATH)/%.a,$(NSPR_LIBS))
+
+# BeOS and HP-UX do not require the extra linking of "-lm"
+ifeq (,$(filter BeOS HP-UX WINNT WINCE OpenVMS,$(OS_ARCH)))
+LDFLAGS += -lm
+endif
+
+# Prevent floating point errors caused by VC++ optimizations
+ifeq ($(OS_ARCH)_$(GNU_CC),WINNT_)
+ifeq (,$(filter-out 1200 1300 1310,$(_MSC_VER)))
+CFLAGS += -Op
+else
+CFLAGS += -fp:precise
+endif
+endif # WINNT
+
+ifeq ($(OS_ARCH),FreeBSD)
+LDFLAGS += -pthread
+endif
+ifeq ($(OS_ARCH),IRIX)
+ifdef USE_N32
+DASH_R += -n32
+endif
+endif
+ifeq ($(OS_ARCH),Linux)
+LDFLAGS += -ldl
+endif
+ifeq ($(OS_ARCH),OSF1)
+LDFLAGS += -lc_r
+endif
+ifeq ($(OS_ARCH),SunOS)
+ifeq ($(TARGET_CPU),sparc)
+
+ifdef JS_ULTRASPARC_OPTS
+DEFINES += -DULTRA_SPARC
+ifdef GNU_CC
+CFLAGS += -Wa,-xarch=v8plus,-DULTRA_SPARC,-P,-L,-D_ASM,-D__STDC__=0
+CXXFLAGS += -Wa,-xarch=v8plus,-DULTRA_SPARC,-P,-L,-D_ASM,-D__STDC__=0,-K,PIC
+else
+ASFLAGS += -xarch=v8plus -DULTRA_SPARC -P -L -D_ASM -D__STDC__=0 -K PIC
+endif # GNU_CC
+endif # JS_ULTRASPARC_OPTS
+
+endif
+ifeq ($(OS_RELEASE),4.1)
+LDFLAGS += -ldl -lnsl
+else
+LDFLAGS += -lposix4 -ldl -lnsl -lsocket
+endif
+endif
+
+ifeq ($(OS_ARCH),IRIX)
+ifndef GNU_CC
+_COMPILE_CFLAGS = $(patsubst -O%,-O1,$(COMPILE_CFLAGS))
+jsapi.o jsarena.o jsarray.o jsatom.o jsemit.o jsfun.o jsinterp.o jsregexp.o jsparse.o jsopcode.o jsscript.o: %.o: %.c Makefile.in
+ $(REPORT_BUILD)
+ @$(MAKE_DEPS_AUTO)
+ $(CC) -o $@ -c $(_COMPILE_CFLAGS) $<
+endif
+endif
+
+# An AIX Optimization bug causes PR_dtoa() & JS_dtoa to produce wrong result.
+# This suppresses optimization for this single compilation unit.
+ifeq ($(OS_ARCH),AIX)
+jsatom.o: jsatom.c Makefile.in
+ $(REPORT_BUILD)
+ @$(MAKE_DEPS_AUTO)
+ $(CC) -o $@ -c $(filter-out $(MOZ_OPTIMIZE_FLAGS), $(COMPILE_CFLAGS)) $<
+jsdtoa.o: jsdtoa.c Makefile.in
+ $(REPORT_BUILD)
+ @$(MAKE_DEPS_AUTO)
+ $(CC) -o $@ -c $(filter-out $(MOZ_OPTIMIZE_FLAGS), $(COMPILE_CFLAGS)) $<
+endif
+
+jsopcode.h jsopcode.c: jsopcode.tbl
+
+ifeq (,$(CROSS_COMPILE)$(filter-out WINNT,$(OS_ARCH)))
+jsautocfg.h:
+ touch $@
+else
+ifeq ($(OS_ARCH),WINCE)
+jsautocfg.h:
+ touch $@
+else
+jsautocfg.h: jscpucfg$(HOST_BIN_SUFFIX)
+ @rm -f $@ jsautocfg.tmp
+ ./jscpucfg > jsautocfg.tmp
+ mv jsautocfg.tmp $@
+endif
+endif
+
+# jscpucfg is a strange target
+# Needs to be built with the host compiler but needs to include
+# the mdcpucfg for the target so it needs the appropriate target defines
+ifdef HOST_NSPR_MDCPUCFG
+HOST_CC := $(HOST_CC) -DMDCPUCFG=$(TARGET_NSPR_MDCPUCFG)
+HOST_CFLAGS := $(patsubst -DXP_%,,$(HOST_CFLAGS))
+endif
+
+ifdef CROSS_COMPILE
+# jscpucfg needs to know when it's supposed to produce a config for the target
+JSCPUCFG_DEFINES = $(ACDEFINES)
+
+# This is incredibly hacky. Darwin NSPR uses the same MDCPUCFG for multiple
+# processors, and determines which processor to configure for based on
+# #ifdef i386. This macro is among the NSPR defines, but is also automatically
+# defined by the compiler when building for i386. It therefore needs to be
+# defined here if targeting i386, and explicitly undefined otherwise.
+ifeq ($(OS_ARCH),Darwin)
+ifeq ($(TARGET_CPU),powerpc)
+JSCPUCFG_DEFINES += -Ui386
+else
+JSCPUCFG_DEFINES += -Di386=1
+endif
+endif
+endif
+
+ifeq ($(OS_ARCH),QNX)
+ifneq ($(OS_TARGET),NTO)
+# QNX's compiler apparently can't build a binary directly from a source file.
+jscpucfg.o: jscpucfg.c Makefile.in
+ $(HOST_CC) $(HOST_CFLAGS) -c $(JSCPUCFG_DEFINES) $(DEFINES) $(NSPR_CFLAGS) -o $@ $<
+
+jscpucfg: jscpucfg.o
+ $(HOST_CC) $(HOST_CFLAGS) $(JSCPUCFG_DEFINES) $(DEFINES) -o $@ $<
+endif
+else
+ifeq ($(OS_ARCH),WINCE)
+jscpucfg$(HOST_BIN_SUFFIX):
+ echo no need to build jscpucfg $<
+else
+jscpucfg$(HOST_BIN_SUFFIX): jscpucfg.c Makefile.in
+ $(HOST_CC) $(HOST_CFLAGS) $(JSCPUCFG_DEFINES) $(DEFINES) $(NSPR_CFLAGS) $(OUTOPTION)$@ $<
+endif
+endif
+
+# Extra dependancies and rules for keyword switch code
+jsscan.$(OBJ_SUFFIX): jsautokw.h jskeyword.tbl
+
+host_jskwgen.$(OBJ_SUFFIX): jsconfig.h jskeyword.tbl
+
+jsautokw.h: host_jskwgen$(HOST_BIN_SUFFIX)
+ ./host_jskwgen$(HOST_BIN_SUFFIX) $@
diff --git a/src/third_party/js-1.7/Makefile.ref b/src/third_party/js-1.7/Makefile.ref
new file mode 100644
index 00000000000..587ab86ce28
--- /dev/null
+++ b/src/third_party/js-1.7/Makefile.ref
@@ -0,0 +1,375 @@
+# -*- Mode: makefile -*-
+# vim: ft=make
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Michael Ang <mang@subcarrier.org>
+# Kevin Buhr <buhr@stat.wisc.edu>
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# JSRef GNUmake makefile.
+#
+# Note: dependency rules are missing for some files (some
+# .h, all .msg, etc.) Re-make clean if in doubt.
+#
+
+
+DEPTH = .
+
+include config.mk
+
+#NS_USE_NATIVE = 1
+
+ifdef NARCISSUS
+DEFINES += -DNARCISSUS
+endif
+
+# Look in OBJDIR to find jsautocfg.h and jsautokw.h
+INCLUDES += -I$(OBJDIR)
+
+ifdef JS_THREADSAFE
+DEFINES += -DJS_THREADSAFE
+INCLUDES += -I$(DIST)/include/nspr
+ifdef USE_MSVC
+OTHER_LIBS += $(DIST)/lib/libnspr$(NSPR_LIBSUFFIX).lib
+else
+OTHER_LIBS += -L$(DIST)/lib -lnspr$(NSPR_LIBSUFFIX)
+endif
+endif
+
+ifdef JS_NO_THIN_LOCKS
+DEFINES += -DJS_USE_ONLY_NSPR_LOCKS
+endif
+
+ifdef JS_HAS_FILE_OBJECT
+DEFINES += -DJS_HAS_FILE_OBJECT
+endif
+
+#
+# XCFLAGS may be set in the environment or on the gmake command line
+#
+CFLAGS += $(OPTIMIZER) $(OS_CFLAGS) $(DEFINES) $(INCLUDES) $(XCFLAGS)
+
+LDFLAGS = $(XLDFLAGS)
+
+ifndef NO_LIBM
+LDFLAGS += -lm
+endif
+
+# Prevent floating point errors caused by VC++ optimizations
+ifeq ($(OS_ARCH),WINNT)
+_MSC_VER = $(shell $(CC) 2>&1 | sed -n 's/.*Compiler Version \([0-9]*\)\.\([0-9]*\).*/\1\2/p')
+ifeq (,$(filter-out 1200 1300 1310,$(_MSC_VER)))
+CFLAGS += -Op
+else
+CFLAGS += -fp:precise
+endif
+endif # WINNT
+
+#
+# Ask perl what flags it was built with, so we can build js with similar flags
+# and link properly. Viva gmake.
+#
+ifdef JS_PERLCONNECT
+DEFINES += -DPERLCONNECT -D_GNU_SOURCE
+
+PERLCFLAGS := $(shell perl -MExtUtils::Embed -e ccopts)
+PERLLDFLAGS := $(shell perl -MExtUtils::Embed -e ldopts)
+
+# perl erroneously reports compiler flag -rdynamic (interpreted by ld
+# as -r) when it really meant -export-dynamic.
+PERLLDFLAGS := $(subst -rdynamic,-export-dynamic,$(PERLLDFLAGS))
+
+CFLAGS += $(PERLCFLAGS)
+#LDFLAGS += $(PERLLDFLAGS) #PH removed this assgnment
+INCLUDES += -I. #needed for perlconnect/jsperl.c
+endif
+
+#
+# Server-related changes :
+#
+ifdef NES40
+DEFINES += -DNES40
+endif
+
+#
+# Line editing support.
+# Define JS_READLINE or JS_EDITLINE to enable line editing in the
+# js command-line interpreter.
+#
+ifdef JS_READLINE
+# For those platforms with the readline library installed.
+DEFINES += -DEDITLINE
+PROG_LIBS += -lreadline -ltermcap
+else
+ifdef JS_EDITLINE
+# Use the editline library, built locally.
+PREDIRS += editline
+DEFINES += -DEDITLINE
+PROG_LIBS += editline/$(OBJDIR)/libedit.a
+endif
+endif
+
+# For purify
+PURE_CFLAGS = -DXP_UNIX $(OPTIMIZER) $(PURE_OS_CFLAGS) $(DEFINES) \
+ $(INCLUDES) $(XCFLAGS)
+
+#
+# JS file lists
+#
+JS_HFILES = \
+ jsarray.h \
+ jsatom.h \
+ jsbool.h \
+ jsconfig.h \
+ jscntxt.h \
+ jsdate.h \
+ jsemit.h \
+ jsexn.h \
+ jsfun.h \
+ jsgc.h \
+ jsinterp.h \
+ jsiter.h \
+ jslibmath.h \
+ jslock.h \
+ jsmath.h \
+ jsnum.h \
+ jsobj.h \
+ jsopcode.h \
+ jsparse.h \
+ jsarena.h \
+ jsclist.h \
+ jsdhash.h \
+ jsdtoa.h \
+ jshash.h \
+ jslong.h \
+ jsosdep.h \
+ jstypes.h \
+ jsprvtd.h \
+ jspubtd.h \
+ jsregexp.h \
+ jsscan.h \
+ jsscope.h \
+ jsscript.h \
+ jsstr.h \
+ jsxdrapi.h \
+ jsxml.h \
+ $(NULL)
+
+API_HFILES = \
+ jsapi.h \
+ jsdbgapi.h \
+ $(NULL)
+
+OTHER_HFILES = \
+ jsbit.h \
+ jscompat.h \
+ jscpucfg.h \
+ jsotypes.h \
+ jsstddef.h \
+ prmjtime.h \
+ resource.h \
+ jsopcode.tbl \
+ jsproto.tbl \
+ js.msg \
+ jsshell.msg \
+ jskeyword.tbl \
+ $(NULL)
+
+ifndef PREBUILT_CPUCFG
+OTHER_HFILES += $(OBJDIR)/jsautocfg.h
+endif
+OTHER_HFILES += $(OBJDIR)/jsautokw.h
+
+HFILES = $(JS_HFILES) $(API_HFILES) $(OTHER_HFILES)
+
+JS_CFILES = \
+ jsapi.c \
+ jsarena.c \
+ jsarray.c \
+ jsatom.c \
+ jsbool.c \
+ jscntxt.c \
+ jsdate.c \
+ jsdbgapi.c \
+ jsdhash.c \
+ jsdtoa.c \
+ jsemit.c \
+ jsexn.c \
+ jsfun.c \
+ jsgc.c \
+ jshash.c \
+ jsinterp.c \
+ jsiter.c \
+ jslock.c \
+ jslog2.c \
+ jslong.c \
+ jsmath.c \
+ jsnum.c \
+ jsobj.c \
+ jsopcode.c \
+ jsparse.c \
+ jsprf.c \
+ jsregexp.c \
+ jsscan.c \
+ jsscope.c \
+ jsscript.c \
+ jsstr.c \
+ jsutil.c \
+ jsxdrapi.c \
+ jsxml.c \
+ prmjtime.c \
+ $(NULL)
+
+ifdef JS_LIVECONNECT
+DIRS += liveconnect
+endif
+
+ifdef JS_PERLCONNECT
+JS_CFILES += perlconnect/jsperl.c
+endif
+
+ifdef JS_HAS_FILE_OBJECT
+JS_CFILES += jsfile.c
+JS_HFILES += jsfile.h
+endif
+
+LIB_CFILES = $(JS_CFILES)
+LIB_ASFILES := $(wildcard *_$(OS_ARCH).s)
+PROG_CFILES = js.c
+
+ifdef USE_MSVC
+LIBRARY = $(OBJDIR)/js32.lib
+SHARED_LIBRARY = $(OBJDIR)/js32.dll
+PROGRAM = $(OBJDIR)/js.exe
+else
+LIBRARY = $(OBJDIR)/libjs.a
+SHARED_LIBRARY = $(OBJDIR)/libjs.$(SO_SUFFIX)
+PROGRAM = $(OBJDIR)/js
+ifdef JS_PERLCONNECT
+PROG_LIBS += $(PERLLDFLAGS)
+endif
+endif
+
+include rules.mk
+
+MOZ_DEPTH = ../..
+include jsconfig.mk
+
+nsinstall-target:
+ cd ../../config; $(MAKE) OBJDIR=$(OBJDIR) OBJDIR_NAME=$(OBJDIR)
+
+#
+# Rules for keyword switch generation
+#
+
+GARBAGE += $(OBJDIR)/jsautokw.h $(OBJDIR)/jskwgen$(HOST_BIN_SUFFIX)
+GARBAGE += $(OBJDIR)/jskwgen.$(OBJ_SUFFIX)
+
+$(OBJDIR)/jsscan.$(OBJ_SUFFIX): $(OBJDIR)/jsautokw.h jskeyword.tbl
+
+$(OBJDIR)/jskwgen.$(OBJ_SUFFIX): jskwgen.c jskeyword.tbl
+
+$(OBJDIR)/jsautokw.h: $(OBJDIR)/jskwgen$(HOST_BIN_SUFFIX) jskeyword.tbl
+ $(OBJDIR)/jskwgen$(HOST_BIN_SUFFIX) $@
+
+ifdef USE_MSVC
+
+$(OBJDIR)/jskwgen.obj: jskwgen.c jskeyword.tbl
+ @$(MAKE_OBJDIR)
+ $(CC) -Fo$(OBJDIR)/ -c $(CFLAGS) $<
+
+$(OBJDIR)/jskwgen$(HOST_BIN_SUFFIX): $(OBJDIR)/jskwgen.$(OBJ_SUFFIX)
+ link.exe -out:"$@" $(EXE_LINK_FLAGS) $^
+
+else
+
+$(OBJDIR)/jskwgen.o: jskwgen.c jskeyword.tbl
+ @$(MAKE_OBJDIR)
+ $(CC) -o $@ -c $(CFLAGS) $<
+
+$(OBJDIR)/jskwgen$(HOST_BIN_SUFFIX): $(OBJDIR)/jskwgen.$(OBJ_SUFFIX)
+ $(CC) -o $@ $(CFLAGS) $(LDFLAGS) $^
+
+endif
+
+#
+# JS shell executable
+#
+
+ifdef USE_MSVC
+$(PROGRAM): $(PROG_OBJS) $(LIBRARY)
+ link.exe -out:"$@" $(EXE_LINK_FLAGS) $^
+else
+$(PROGRAM): $(PROG_OBJS) $(LIBRARY)
+ $(CC) -o $@ $(CFLAGS) $(PROG_OBJS) $(LIBRARY) $(LDFLAGS) $(OTHER_LIBS) \
+ $(PROG_LIBS)
+endif
+
+$(PROGRAM).pure: $(PROG_OBJS) $(LIBRARY)
+ purify $(PUREFLAGS) \
+ $(CC) -o $@ $(PURE_OS_CFLAGS) $(PROG_OBJS) $(LIBRARY) $(LDFLAGS) \
+ $(OTHER_LIBS) $(PROG_LIBS)
+
+ifndef PREBUILT_CPUCFG
+$(HFILES) $(CFILES): $(OBJDIR)/jsautocfg.h
+
+$(OBJDIR)/jsautocfg.h: $(OBJDIR)/jscpucfg
+ rm -f $@
+ $(OBJDIR)/jscpucfg > $@
+
+$(OBJDIR)/jscpucfg: $(OBJDIR)/jscpucfg.o
+ $(CC) -o $@ $(OBJDIR)/jscpucfg.o
+
+# Add to TARGETS for clobber rule
+TARGETS += $(OBJDIR)/jsautocfg.h $(OBJDIR)/jscpucfg \
+ $(OBJDIR)/jscpucfg.o
+endif
+
+#
+# Hardwire dependencies on jsopcode.tbl
+#
+jsopcode.h jsopcode.c: jsopcode.tbl
+
+-include $(DEPENDENCIES)
+
+TARNAME = jsref.tar
+TARFILES = files `cat files`
+
+SUFFIXES: .i
+%.i: %.c
+ $(CC) -C -E $(CFLAGS) $< > $*.i
diff --git a/src/third_party/js-1.7/README.html b/src/third_party/js-1.7/README.html
new file mode 100644
index 00000000000..b2942e3858b
--- /dev/null
+++ b/src/third_party/js-1.7/README.html
@@ -0,0 +1,826 @@
+<!-- ***** BEGIN LICENSE BLOCK *****
+ - Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ -
+ - The contents of this file are subject to the Mozilla Public License Version
+ - 1.1 (the "License"); you may not use this file except in compliance with
+ - the License. You may obtain a copy of the License at
+ - http://www.mozilla.org/MPL/
+ -
+ - Software distributed under the License is distributed on an "AS IS" basis,
+ - WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ - for the specific language governing rights and limitations under the
+ - License.
+ -
+ - The Original Code is Mozilla Communicator client code, released
+ - March 31, 1998.
+ -
+ - The Initial Developer of the Original Code is
+ - Netscape Communications Corporation.
+ - Portions created by the Initial Developer are Copyright (C) 1998-1999
+ - the Initial Developer. All Rights Reserved.
+ -
+ - Contributor(s):
+ -
+ - Alternatively, the contents of this file may be used under the terms of
+ - either of the GNU General Public License Version 2 or later (the "GPL"),
+ - or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ - in which case the provisions of the GPL or the LGPL are applicable instead
+ - of those above. If you wish to allow use of your version of this file only
+ - under the terms of either the GPL or the LGPL, and not to allow others to
+ - use your version of this file under the terms of the MPL, indicate your
+ - decision by deleting the provisions above and replace them with the notice
+ - and other provisions required by the GPL or the LGPL. If you do not delete
+ - the provisions above, a recipient may use your version of this file under
+ - the terms of any one of the MPL, the GPL or the LGPL.
+ -
+ - ***** END LICENSE BLOCK ***** -->
+<!doctype html public "-//w3c//dtd html 4.0 transitional//en">
+<html>
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
+ <meta name="GENERATOR" content="Mozilla/4.5 [en] (WinNT; I) [Netscape]">
+ <title>JavaScript Reference Implementation (JSRef) README</title>
+</head>
+<body>
+
+<h2>
+Table of Contents</h2>
+
+<ul>
+<li>
+<a href="#Introduction">Introduction</a></li>
+
+<li>
+<a href="#Build">Build conventions (standalone JS engine and shell)</a></li>
+
+<li>
+<a href="#Debugging">Debugging notes</a></li>
+
+<li>
+<a href="#Conventions">Naming and coding conventions</a></li>
+
+<li>
+<a href="#JSAPI">Using the JS API</a></li>
+
+<li>
+<a href="#Design">Design walk-through</a></li>
+
+<li>
+<a href="#Resources">Additional Resources (links, API docs, and newsgroups)</a></li>
+
+</ul>
+
+<h2>
+<a NAME="Introduction"></a>Introduction</h2>
+This is the README file for the&nbsp;<span CLASS=LXRSHORTDESC>JavaScript
+Reference (JSRef, now better known as SpiderMonkey) implementation.</span>
+It consists of build conventions
+and instructions, source code conventions, a design walk-through, and a
+brief file-by-file description of the source.
+<p><span CLASS=LXRLONGDESC>JSRef builds a library or DLL containing the
+JavaScript runtime (compiler, interpreter, decompiler, garbage collector,
+atom manager, standard classes). It then compiles a small "shell" program
+and links that with the library to make an interpreter that can be used
+interactively and with test .js files to run scripts.&nbsp; The code has
+no dependencies on the rest of the Mozilla codebase.</span>
+<p><i>Quick start tip</i>: skip to "Using the JS API" below, build the
+js shell, and play with the object named "it" (start by setting 'it.noisy
+= true').
+<h2>
+<a NAME="Build"></a>Build conventions (standalone JS engine and shell)
+(OUT OF DATE!)</h2>
+These build directions refer only to building the standalone JavaScript
+engine and shell.&nbsp; To build within the browser, refer to the <a
+href="http://www.mozilla.org/build/">build
+directions</a> on the mozilla.org website.
+<p>By default, all platforms build a version of the JS engine that is <i>not</i>
+threadsafe.&nbsp; If you require thread-safety, you must also populate
+the <tt>mozilla/dist</tt> directory with <a href="http://www.mozilla.org/projects/nspr/reference/html/"
+>NSPR</a>
+headers and libraries.&nbsp; (NSPR implements a portable threading library,
+among other things.&nbsp; The source is downloadable via <a href="http://www.mozilla.org/cvs.html">CVS</a>
+from <tt><a href="http://lxr.mozilla.org/mozilla/source/nsprpub">mozilla/nsprpub</a></tt>.)&nbsp;
+Next, you must define <tt>JS_THREADSAFE</tt> when building the JS engine,
+either on the command-line (gmake/nmake) or in a universal header file.
+<h3>
+Windows</h3>
+
+<ul>
+<li>
+Use MSVC 4.2 or 5.0.</li>
+
+<li>
+For building from the IDE use <tt>js/src/js.mdp</tt>.&nbsp; (<tt>js.mdp</tt>
+is an MSVC4.2 project file, but if you load it into MSVC5, it will be converted
+to the newer project file format.)&nbsp; <font color="#CC0000">NOTE: makefile.win
+is an nmake file used only for building the JS-engine in the Mozilla browser.&nbsp;
+Don't attempt to use it to build the standalone JS-engine.</font></li>
+
+<li>
+If you prefer to build from the command-line, use '<tt>nmake -f js.mak</tt>'</li>
+
+<li>
+Executable shell <tt>js.exe</tt> and runtime library <tt>js32.dll</tt>
+are created in either <tt>js/src/Debug</tt> or <tt>js/src/Release</tt>.</li>
+</ul>
+
+<h3>
+Macintosh</h3>
+
+<ul>
+<li>
+Use CodeWarrior 3.x</li>
+
+<li>
+Load the project file <tt>js:src:macbuild:JSRef.mcp </tt>and select "Make"
+from the menu.</li>
+</ul>
+
+<h3>
+Unix</h3>
+
+<ul>
+<li>
+Use '<tt>gmake -f Makefile.ref</tt>' to build. To compile optimized code,
+pass <tt>BUILD_OPT=1</tt> on the gmake command line or preset it in the
+environment or <tt>Makefile.ref</tt>.&nbsp; <font color="#CC0000">NOTE:
+Do not attempt to use Makefile to build the standalone JavaScript engine.&nbsp;
+This file is used only for building the JS-engine in the Mozilla browser.</font></li>
+
+<li>
+<font color="#000000">Each platform on which JS is built must have a <tt>*.mk</tt>
+configuration file in the <tt>js/src/config</tt> directory.&nbsp; The configuration
+file specifies the compiler/linker to be used and allows for customization
+of command-line options.&nbsp; To date, the build system has been tested
+on Solaris, AIX, HP/UX, OSF, IRIX, x86 Linux and Windows NT.</font></li>
+
+<li>
+<font color="#000000">Most platforms will work with either the vendor compiler
+</font>or
+<a href="ftp://prep.ai.mit.edu/pub/gnu">gcc</a>.&nbsp;
+(Except that HP builds only work using the native compiler.&nbsp; gcc won't
+link correctly with shared libraries on that platform.&nbsp; If someone
+knows a way to fix this, <a href="mailto:wynholds@netscape.com">let us
+know</a>.)</li>
+
+<li>
+<font color="#000000">If you define <tt>JS_LIVECONNECT</tt>, gmake will
+descend into the liveconnect directory and build
+<a href="http://lxr.mozilla.org/mozilla/source/js/src/liveconnect/README.html">LiveConnect</a>
+after building the JS engine.</font></li>
+
+<li>
+To build a binary drop (a zip'ed up file of headers, libraries, binaries),
+check out <tt>mozilla/config</tt> and <tt>mozilla/nsprpub/config</tt>.&nbsp;
+Use '<tt>gmake -f Makefile.ref nsinstall-target all export ship</tt>'</li>
+</ul>
+
+<h2>
+<a NAME="Debugging"></a>Debugging notes</h2>
+
+<ul>
+<li>
+To turn on GC instrumentation, define <tt>JS_GCMETER</tt>.</li>
+
+<ul>
+<li>
+To turn on GC mark-phase debugging, useful to find leaked objects by their
+address, and to dump the GC heap, define <tt>GC_MARK_DEBUG</tt>.
+See the code in jsgc.c around the declaration and use of
+<tt>js_LiveThingToFind</tt>.</li>
+
+<li>
+To turn on the arena package's instrumentation, define <tt>JS_ARENAMETER</tt>.</li>
+
+<li>
+To turn on the hash table package's metering, define <tt>JS_HASHMETER</tt>.</li>
+</ul>
+
+<h2>
+<a NAME="Conventions"></a>Naming and coding conventions</h2>
+
+<ul>
+<li>
+Public function names begin with <tt>JS_</tt> followed by capitalized "intercaps",
+e.g. <tt>JS_NewObject</tt>.</li>
+
+<li>
+Extern but library-private function names use a <tt>js_</tt> prefix and
+mixed case, e.g. <tt>js_SearchScope</tt>.</li>
+
+<li>
+Most static function names have unprefixed, mixed-case names: <tt>GetChar</tt>.</li>
+
+<li>
+But static native methods of JS objects have lowercase, underscore-separated
+or intercaps names, e.g., <tt>str_indexOf</tt>.</li>
+
+<li>
+And library-private and static data use underscores, not intercaps (but
+library-private data do use a <tt>js_</tt> prefix).</li>
+
+<li>
+Scalar type names are lowercase and js-prefixed: <tt>jsdouble</tt>.</li>
+
+<li>
+Aggregate type names are JS-prefixed and mixed-case: <tt>JSObject.</tt></li>
+
+<li>
+Macros are generally <tt>ALL_CAPS </tt>and underscored, to call out potential
+side effects, multiple uses of a formal argument, etc.</li>
+
+<li>
+Four spaces of indentation per statement nesting level.</li>
+
+<li>
+Tabs are taken to be eight spaces, and an Emacs magic comment at the top
+of each file tries to help. If you're using MSVC or similar, you'll want
+to set tab width to 8, and help convert these files to be space-filled.
+<font color="#CC0000">Do not add hard tabs to source files; do remove them
+whenever possible.</font></li>
+
+<li>
+DLL entry points have their return type expanded within a <tt>JS_PUBLIC_API()</tt>
+macro call, to get the right Windows secret type qualifiers in the right
+places for all build variants.</li>
+
+<li>
+Callback functions that might be called from a DLL are similarly macroized
+with <tt>JS_STATIC_DLL_CALLBACK</tt> (if the function otherwise would be
+static to hide its name) or <tt>JS_DLL_CALLBACK</tt> (this macro takes
+no type argument; it should be used after the return type and before the
+function name).</li>
+</ul>
+
+<h2>
+<a NAME="JSAPI"></a>Using the JS API</h2>
+
+<h4>
+Starting up</h4>
+
+<pre><tt>&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * Tune this to avoid wasting space for shallow stacks, while saving on
+&nbsp;&nbsp;&nbsp;&nbsp; * malloc overhead/fragmentation for deep or highly-variable stacks.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; #define STACK_CHUNK_SIZE&nbsp;&nbsp;&nbsp; 8192
+
+&nbsp;&nbsp;&nbsp; JSRuntime *rt;
+&nbsp;&nbsp;&nbsp; JSContext *cx;
+
+&nbsp;&nbsp;&nbsp; /* You need a runtime and one or more contexts to do anything with JS. */
+&nbsp;&nbsp;&nbsp; rt = JS_NewRuntime(0x400000L);
+&nbsp;&nbsp;&nbsp; if (!rt)
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; fail("can't create JavaScript runtime");
+&nbsp;&nbsp;&nbsp; cx = JS_NewContext(rt, STACK_CHUNK_SIZE);
+&nbsp;&nbsp;&nbsp; if (!cx)
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; fail("can't create JavaScript context");
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * The context definitely wants a global object, in order to have standard
+&nbsp;&nbsp;&nbsp;&nbsp; * classes and functions like Date and parseInt.&nbsp; See below for details on
+&nbsp;&nbsp;&nbsp;&nbsp; * JS_NewObject.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; JSObject *globalObj;
+
+&nbsp;&nbsp;&nbsp; globalObj = JS_NewObject(cx, &amp;my_global_class, 0, 0);
+&nbsp;&nbsp;&nbsp; JS_InitStandardClasses(cx, globalObj);</tt></pre>
+
+<h4>
+Defining objects and properties</h4>
+
+<pre><tt>&nbsp;&nbsp;&nbsp; /* Statically initialize a class to make "one-off" objects. */
+&nbsp;&nbsp;&nbsp; JSClass my_class = {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; "MyClass",
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; /* All of these can be replaced with the corresponding JS_*Stub
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; function pointers. */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; my_addProperty, my_delProperty, my_getProperty, my_setProperty,
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; my_enumerate,&nbsp;&nbsp; my_resolve,&nbsp;&nbsp;&nbsp;&nbsp; my_convert,&nbsp;&nbsp;&nbsp;&nbsp; my_finalize
+&nbsp;&nbsp;&nbsp; };
+
+&nbsp;&nbsp;&nbsp; JSObject *obj;
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * Define an object named in the global scope that can be enumerated by
+&nbsp;&nbsp;&nbsp;&nbsp; * for/in loops.&nbsp; The parent object is passed as the second argument, as
+&nbsp;&nbsp;&nbsp;&nbsp; * with all other API calls that take an object/name pair.&nbsp; The prototype
+&nbsp;&nbsp;&nbsp;&nbsp; * passed in is null, so the default object prototype will be used.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; obj = JS_DefineObject(cx, globalObj, "myObject", &amp;my_class, NULL,
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; JSPROP_ENUMERATE);
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * Define a bunch of properties with a JSPropertySpec array statically
+&nbsp;&nbsp;&nbsp;&nbsp; * initialized and terminated with a null-name entry.&nbsp; Besides its name,
+&nbsp;&nbsp;&nbsp;&nbsp; * each property has a "tiny" identifier (MY_COLOR, e.g.) that can be used
+&nbsp;&nbsp;&nbsp;&nbsp; * in switch statements (in a common my_getProperty function, for example).
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; enum my_tinyid {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MY_COLOR, MY_HEIGHT, MY_WIDTH, MY_FUNNY, MY_ARRAY, MY_RDONLY
+&nbsp;&nbsp;&nbsp; };
+
+&nbsp;&nbsp;&nbsp; static JSPropertySpec my_props[] = {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"color",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MY_COLOR,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; JSPROP_ENUMERATE},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"height",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MY_HEIGHT,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; JSPROP_ENUMERATE},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"width",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MY_WIDTH,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; JSPROP_ENUMERATE},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"funny",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MY_FUNNY,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; JSPROP_ENUMERATE},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"array",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MY_ARRAY,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; JSPROP_ENUMERATE},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"rdonly",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MY_RDONLY,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; JSPROP_READONLY},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {0}
+&nbsp;&nbsp;&nbsp; };
+
+&nbsp;&nbsp;&nbsp; JS_DefineProperties(cx, obj, my_props);
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * Given the above definitions and call to JS_DefineProperties, obj will
+&nbsp;&nbsp;&nbsp;&nbsp; * need this sort of "getter" method in its class (my_class, above).&nbsp; See
+&nbsp;&nbsp;&nbsp;&nbsp; * the example for the "It" class in js.c.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; static JSBool
+&nbsp;&nbsp;&nbsp; my_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+&nbsp;&nbsp;&nbsp; {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; if (JSVAL_IS_INT(id)) {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; switch (JSVAL_TO_INT(id)) {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; case MY_COLOR:&nbsp; *vp = . . .; break;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; case MY_HEIGHT: *vp = . . .; break;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; case MY_WIDTH:&nbsp; *vp = . . .; break;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; case MY_FUNNY:&nbsp; *vp = . . .; break;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; case MY_ARRAY:&nbsp; *vp = . . .; break;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; case MY_RDONLY: *vp = . . .; break;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; return JS_TRUE;
+&nbsp;&nbsp;&nbsp; }</tt></pre>
+
+<h4>
+Defining functions</h4>
+
+<pre><tt>&nbsp;&nbsp;&nbsp; /* Define a bunch of native functions first: */
+&nbsp;&nbsp;&nbsp; static JSBool
+&nbsp;&nbsp;&nbsp; my_abs(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+&nbsp;&nbsp;&nbsp; {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; jsdouble x, z;
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; if (!JS_ValueToNumber(cx, argv[0], &amp;x))
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; return JS_FALSE;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; z = (x &lt; 0) ? -x : x;
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; return JS_NewDoubleValue(cx, z, rval);
+&nbsp;&nbsp;&nbsp; }
+
+&nbsp;&nbsp;&nbsp; . . .
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * Use a JSFunctionSpec array terminated with a null name to define a
+&nbsp;&nbsp;&nbsp;&nbsp; * bunch of native functions.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; static JSFunctionSpec my_functions[] = {
+&nbsp;&nbsp;&nbsp; /*&nbsp;&nbsp;&nbsp; name&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; native&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; nargs&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"abs",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; my_abs,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 1},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"acos",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; my_acos,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 1},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {"asin",&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; my_asin,&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; 1},
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; . . .
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; {0}
+&nbsp;&nbsp;&nbsp; };
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * Pass a particular object to define methods for it alone.&nbsp; If you pass
+&nbsp;&nbsp;&nbsp;&nbsp; * a prototype object, the methods will apply to all instances past and
+&nbsp;&nbsp;&nbsp;&nbsp; * future of the prototype's class (see below for classes).
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; JS_DefineFunctions(cx, globalObj, my_functions);</tt></pre>
+
+<h4>
+Defining classes</h4>
+
+<pre><tt>&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * This pulls together the above API elements by defining a constructor
+&nbsp;&nbsp;&nbsp;&nbsp; * function, a prototype object, and properties of the prototype and of
+&nbsp;&nbsp;&nbsp;&nbsp; * the constructor, all with one API call.
+&nbsp;&nbsp;&nbsp;&nbsp; *
+&nbsp;&nbsp;&nbsp;&nbsp; * Initialize a class by defining its constructor function, prototype, and
+&nbsp;&nbsp;&nbsp;&nbsp; * per-instance and per-class properties.&nbsp; The latter are called "static"
+&nbsp;&nbsp;&nbsp;&nbsp; * below by analogy to Java.&nbsp; They are defined in the constructor object's
+&nbsp;&nbsp;&nbsp;&nbsp; * scope, so that 'MyClass.myStaticProp' works along with 'new MyClass()'.
+&nbsp;&nbsp;&nbsp;&nbsp; *
+&nbsp;&nbsp;&nbsp;&nbsp; * JS_InitClass takes a lot of arguments, but you can pass null for any of
+&nbsp;&nbsp;&nbsp;&nbsp; * the last four if there are no such properties or methods.
+&nbsp;&nbsp;&nbsp;&nbsp; *
+&nbsp;&nbsp;&nbsp;&nbsp; * Note that you do not need to call JS_InitClass to make a new instance of
+&nbsp;&nbsp;&nbsp;&nbsp; * that class -- otherwise there would be a chicken-and-egg problem making
+&nbsp;&nbsp;&nbsp;&nbsp; * the global object -- but you should call JS_InitClass if you require a
+&nbsp;&nbsp;&nbsp;&nbsp; * constructor function for script authors to call via new, and/or a class
+&nbsp;&nbsp;&nbsp;&nbsp; * prototype object ('MyClass.prototype') for authors to extend with new
+&nbsp;&nbsp;&nbsp;&nbsp; * properties at run-time. In general, if you want to support multiple
+&nbsp;&nbsp;&nbsp;&nbsp; * instances that share behavior, use JS_InitClass.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; protoObj = JS_InitClass(cx, globalObj, NULL, &amp;my_class,
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; /* native constructor function and min arg count */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; MyClass, 0,
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; /* prototype object properties and methods -- these
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; will be "inherited" by all instances through
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; delegation up the instance's prototype link. */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; my_props, my_methods,
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; /* class constructor properties and methods */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; my_static_props, my_static_methods);</tt></pre>
+
+<h4>
+Running scripts</h4>
+
+<pre><tt>&nbsp;&nbsp;&nbsp; /* These should indicate source location for diagnostics. */
+&nbsp;&nbsp;&nbsp; char *filename;
+&nbsp;&nbsp;&nbsp; uintN lineno;
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * The return value comes back here -- if it could be a GC thing, you must
+&nbsp;&nbsp;&nbsp;&nbsp; * add it to the GC's "root set" with JS_AddRoot(cx, &amp;thing) where thing
+&nbsp;&nbsp;&nbsp;&nbsp; * is a JSString *, JSObject *, or jsdouble *, and remove the root before
+&nbsp;&nbsp;&nbsp;&nbsp; * rval goes out of scope, or when rval is no longer needed.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; jsval rval;
+&nbsp;&nbsp;&nbsp; JSBool ok;
+
+&nbsp;&nbsp;&nbsp; /*
+&nbsp;&nbsp;&nbsp;&nbsp; * Some example source in a C string.&nbsp; Larger, non-null-terminated buffers
+&nbsp;&nbsp;&nbsp;&nbsp; * can be used, if you pass the buffer length to JS_EvaluateScript.
+&nbsp;&nbsp;&nbsp;&nbsp; */
+&nbsp;&nbsp;&nbsp; char *source = "x * f(y)";
+
+&nbsp;&nbsp;&nbsp; ok = JS_EvaluateScript(cx, globalObj, source, strlen(source),
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; filename, lineno, &amp;rval);
+
+&nbsp;&nbsp;&nbsp; if (ok) {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; /* Should get a number back from the example source. */
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; jsdouble d;
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; ok = JS_ValueToNumber(cx, rval, &amp;d);
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; . . .
+&nbsp;&nbsp;&nbsp; }</tt></pre>
+
+<h4>
+Calling functions</h4>
+
+<pre><tt>&nbsp;&nbsp;&nbsp; /* Call a global function named "foo" that takes no arguments. */
+&nbsp;&nbsp;&nbsp; ok = JS_CallFunctionName(cx, globalObj, "foo", 0, 0, &amp;rval);
+
+&nbsp;&nbsp;&nbsp; jsval argv[2];
+
+&nbsp;&nbsp;&nbsp; /* Call a function in obj's scope named "method", passing two arguments. */
+&nbsp;&nbsp;&nbsp; argv[0] = . . .;
+&nbsp;&nbsp;&nbsp; argv[1] = . . .;
+&nbsp;&nbsp;&nbsp; ok = JS_CallFunctionName(cx, obj, "method", 2, argv, &amp;rval);</tt></pre>
+
+<h4>
+Shutting down</h4>
+
+<pre><tt>&nbsp;&nbsp;&nbsp; /* For each context you've created: */
+&nbsp;&nbsp;&nbsp; JS_DestroyContext(cx);
+
+&nbsp;&nbsp;&nbsp; /* For each runtime: */
+&nbsp;&nbsp;&nbsp; JS_DestroyRuntime(rt);
+
+&nbsp;&nbsp;&nbsp; /* And finally: */
+&nbsp;&nbsp;&nbsp; JS_ShutDown();</tt></pre>
+
+<h4>
+Debugging API</h4>
+See the<tt> trap, untrap, watch, unwatch, line2pc</tt>, and <tt>pc2line</tt>
+commands in <tt>js.c</tt>. Also the (scant) comments in <i>jsdbgapi.h</i>.
+<h2>
+<a NAME="Design"></a>Design walk-through</h2>
+This section must be brief for now -- it could easily turn into a book.
+<h4>
+JS "JavaScript Proper"</h4>
+JS modules declare and implement the JavaScript compiler, interpreter,
+decompiler, GC and atom manager, and standard classes.
+<p>JavaScript uses untyped bytecode and runtime type tagging of data values.
+The <tt>jsval</tt> type is a signed machine word that contains either a
+signed integer value (if the low bit is set), or a type-tagged pointer
+or boolean value (if the low bit is clear). Tagged pointers all refer to
+8-byte-aligned things in the GC heap.
+<p>Objects consist of a possibly shared structural description, called
+the map or scope; and unshared property values in a vector, called the
+slots. Object properties are associated with nonnegative integers stored
+in <tt>jsval</tt>'s, or with atoms (unique string descriptors) if named
+by an identifier or a non-integral index expression.
+<p>Scripts contain bytecode, source annotations, and a pool of string,
+number, and identifier literals. Functions are objects that extend scripts
+or native functions with formal parameters, a literal syntax, and a distinct
+primitive type ("function").
+<p>The compiler consists of a recursive-descent parser and a random-logic
+rather than table-driven lexical scanner. Semantic and lexical feedback
+are used to disambiguate hard cases such as missing semicolons, assignable
+expressions ("lvalues" in C parlance), etc. The parser generates bytecode
+as it parses, using fixup lists for downward branches and code buffering
+and rewriting for exceptional cases such as for loops. It attempts no error
+recovery. The interpreter executes the bytecode of top-level scripts, and
+calls itself indirectly to interpret function bodies (which are also scripts).
+All state associated with an interpreter instance is passed through formal
+parameters to the interpreter entry point; most implicit state is collected
+in a type named JSContext. Therefore, all API and almost all other functions
+in JSRef take a JSContext pointer as their first argument.
+<p>The decompiler translates postfix bytecode into infix source by consulting
+a separate byte-sized code, called source notes, to disambiguate bytecodes
+that result from more than one grammatical production.
+<p>The GC is a mark-and-sweep, non-conservative (exact) collector. It
+can allocate only fixed-sized things -- the current size is two machine
+words. It is used to hold JS object and string descriptors (but not property
+lists or string bytes), and double-precision floating point numbers. It
+runs automatically only when maxbytes (as passed to <tt>JS_NewRuntime()</tt>)
+bytes of GC things have been allocated and another thing-allocation request
+is made. JS API users should call <tt>JS_GC()</tt> or <tt>JS_MaybeGC()</tt>
+between script executions or from the branch callback, as often as necessary.
+<p>An important point about the GC's "exactness": you must add roots for
+new objects created by your native methods if you store references to them
+into a non-JS structure in the malloc heap or in static data. Also, if
+you make a new object in a native method, but do not store it through the
+<tt>rval</tt>
+result parameter (see math_abs in the "Using the JS API" section above)
+so that it is in a known root, the object is guaranteed to survive only
+until another new object is created. Either lock the first new object when
+making two in a row, or store it in a root you've added, or store it via
+rval.
+See the <a href="http://www.mozilla.org/js/spidermonkey/gctips.html">GC tips</a>
+document for more.
+<p>The atom manager consists of a hash table associating strings uniquely
+with scanner/parser information such as keyword type, index in script or
+function literal pool, etc. Atoms play three roles in JSRef: as literals
+referred to by unaligned 16-bit immediate bytecode operands, as unique
+string descriptors for efficient property name hashing, and as members
+of the root GC set for exact GC.
+<p>Native objects and methods for arrays, booleans, dates, functions, numbers,
+and strings are implemented using the JS API and certain internal interfaces
+used as "fast paths".
+<p>In general, errors are signaled by false or unoverloaded-null return
+values, and are reported using <tt>JS_ReportError()</tt> or one of its
+variants by the lowest level in order to provide the most detail. Client
+code can substitute its own error reporting function and suppress errors,
+or reflect them into Java or some other runtime system as exceptions, GUI
+dialogs, etc..
+<h2>
+File walk-through (OUT OF DATE!)</h2>
+
+<h4>
+jsapi.c, jsapi.h</h4>
+The public API to be used by almost all client code.&nbsp; If your client
+code can't make do with <tt>jsapi.h</tt>, and must reach into a friend
+or private js* file, please let us know so we can extend <tt>jsapi.h</tt>
+to include what you need in a fashion that we can support over the long
+run.
+<h4>
+jspubtd.h, jsprvtd.h</h4>
+These files exist to group struct and scalar typedefs so they can be used
+everywhere without dragging in struct definitions from N different files.
+The <tt>jspubtd.h</tt> file contains public typedefs, and is included by
+<tt>jsapi.h</tt>.
+The <tt>jsprvtd.h</tt> file contains private typedefs and is included by
+various .h files that need type names, but not type sizes or declarations.
+<h4>
+jsdbgapi.c, jsdbgapi.h</h4>
+The Debugging API, still very much under development. Provided so far:
+<ul>
+<li>
+Traps, with which breakpoints, single-stepping, step over, step out, and
+so on can be implemented. The debugger will have to consult jsopcode.def
+on its own to figure out where to plant trap instructions to implement
+functions like step out, but a future jsdbgapi.h will provide convenience
+interfaces to do these things. At most one trap per bytecode can be set.
+When a script (<tt>JSScript</tt>) is destroyed, all traps set in its bytecode
+are cleared.</li>
+
+<li>
+Watchpoints, for intercepting set operations on properties and running
+a debugger-supplied function that receives the old value and a pointer
+to the new one, which it can use to modify the new value being set.</li>
+
+<li>
+Line number to PC and back mapping functions. The line-to-PC direction
+"rounds" toward the next bytecode generated from a line greater than or
+equal to the input line, and may return the PC of a for-loop update part,
+if given the line number of the loop body's closing brace. Any line after
+the last one in a script or function maps to a PC one byte beyond the last
+bytecode in the script. An example, from perfect.js:</li>
+
+<pre><tt>14&nbsp;&nbsp; function perfect(n)
+15&nbsp;&nbsp; {
+16&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; print("The perfect numbers up to " +&nbsp; n + " are:");
+17
+18&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // We build sumOfDivisors[i] to hold a string expression for
+19&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // the sum of the divisors of i, excluding i itself.
+20&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; var sumOfDivisors = new ExprArray(n+1,1);
+21&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; for (var divisor = 2; divisor &lt;= n; divisor++) {
+22&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; for (var j = divisor + divisor; j &lt;= n; j += divisor) {
+23&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; sumOfDivisors[j] += " + " + divisor;
+24&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }
+25&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // At this point everything up to 'divisor' has its sumOfDivisors
+26&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // expression calculated, so we can determine whether it's perfect
+27&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; // already by evaluating.
+28&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; if (eval(sumOfDivisors[divisor]) == divisor) {
+29&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; print("" + divisor + " = " + sumOfDivisors[divisor]);
+30&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }
+31&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }
+32&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; delete sumOfDivisors;
+33&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; print("That's all.");
+34&nbsp;&nbsp; }</tt></pre>
+The line number to PC and back mappings can be tested using the js program
+with the following script:
+<pre><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; load("perfect.js")
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; print(perfect)
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; dis(perfect)
+
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; print()
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; for (var ln = 0; ln &lt;= 40; ln++) {
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; var pc = line2pc(perfect,ln)
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; var ln2 = pc2line(perfect,pc)
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; print("\tline " + ln + " => pc " + pc + " => line " + ln2)
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; }</tt></pre>
+The result of the for loop over lines 0 to 40 inclusive is:
+<pre><tt>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 0 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 1 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 2 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 3 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 4 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 5 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 6 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 7 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 8 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 9 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 10 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 11 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 12 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 13 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 14 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 15 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 16 => pc 0 => line 16
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 17 => pc 19 => line 20
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 18 => pc 19 => line 20
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 19 => pc 19 => line 20
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 20 => pc 19 => line 20
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 21 => pc 36 => line 21
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 22 => pc 53 => line 22
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 23 => pc 74 => line 23
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 24 => pc 92 => line 22
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 25 => pc 106 => line 28
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 26 => pc 106 => line 28
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 27 => pc 106 => line 28
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 28 => pc 106 => line 28
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 29 => pc 127 => line 29
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 30 => pc 154 => line 21
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 31 => pc 154 => line 21
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 32 => pc 161 => line 32
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 33 => pc 172 => line 33
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 34 => pc 172 => line 33
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 35 => pc 172 => line 33
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 36 => pc 172 => line 33
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 37 => pc 172 => line 33
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 38 => pc 172 => line 33
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 39 => pc 172 => line 33
+&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; line 40 => pc 172 => line 33</tt></pre>
+</ul>
+
+<h4>
+jsconfig.h</h4>
+Various configuration macros defined as 0 or 1 depending on how <tt>JS_VERSION</tt>
+is defined (as 10 for JavaScript 1.0, 11 for JavaScript 1.1, etc.). Not
+all macros are tested around related code yet. In particular, JS 1.0 support
+is missing from JSRef. JS 1.2 support will appear in a future JSRef release.
+<br>&nbsp;
+<h4>
+js.c</h4>
+The "JS shell", a simple interpreter program that uses the JS API and more
+than a few internal interfaces (some of these internal interfaces could
+be replaced by <tt>jsapi.h</tt> calls). The js program built from this
+source provides a test vehicle for evaluating scripts and calling functions,
+trying out new debugger primitives, etc.
+<h4>
+jsarray.*, jsbool.*, jdsdate.*, jsfun.*, jsmath.*, jsnum.*, jsstr.*</h4>
+These file pairs implement the standard classes and (where they exist)
+their underlying primitive types. They have similar structure, generally
+starting with class definitions and continuing with internal constructors,
+finalizers, and helper functions.
+<h4>
+jsobj.*, jsscope.*</h4>
+These two pairs declare and implement the JS object system. All of the
+following happen here:
+<ul>
+<li>
+creating objects by class and prototype, and finalizing objects;</li>
+
+<li>
+defining, looking up, getting, setting, and deleting properties;</li>
+
+<li>
+creating and destroying properties and binding names to them.</li>
+</ul>
+The details of a native object's map (scope) are mostly hidden in
+<tt>jsscope.[ch]</tt>.
+<h4>
+jsatom.c, jsatom.h</h4>
+The atom manager. Contains well-known string constants, their atoms, the
+global atom hash table and related state, the js_Atomize() function that
+turns a counted string of bytes into an atom, and literal pool (<tt>JSAtomMap</tt>)
+methods.
+<h4>
+jsgc.c, jsgc.h</h4>
+[TBD]
+<h4>
+jsinterp.*, jscntxt.*</h4>
+The bytecode interpreter, and related functions such as Call and AllocStack,
+live in <i>jsinterp.c</i>. The JSContext constructor and destructor are
+factored out into <i>jscntxt.c</i> for minimal linking when the compiler
+part of JS is split from the interpreter part into a separate program.
+<h4>
+jsemit.*, jsopcode.tbl, jsopcode.*, jsparse.*, jsscan.*, jsscript.*</h4>
+Compiler and decompiler modules. The <i>jsopcode.tbl</i> file is a C preprocessor
+source that defines almost everything there is to know about JS bytecodes.
+See its major comment for how to use it. For now, a debugger will use it
+and its dependents such as <i>jsopcode.h</i> directly, but over time we
+intend to extend <i>jsdbgapi.h</i> to hide uninteresting details and provide
+conveniences. The code generator is split across paragraphs of code in
+<i>jsparse.c</i>,
+and the utility methods called on <tt>JSCodeGenerator</tt> appear in <i>jsemit.c</i>.
+Source notes generated by <i>jsparse.c</i> and
+<i>jsemit.c</i> are used
+in <i>jsscript.c</i> to map line number to program counter and back.
+<h4>
+jstypes.h, jslog2.c</h4>
+Fundamental representation types and utility macros. This file alone among
+all .h files in JSRef must be included first by .c files. It is not nested
+in .h files, as other prerequisite .h files generally are, since it is
+also a direct dependency of most .c files and would be over-included if
+nested in addition to being directly included. The one "not-quite-a-macro
+macro" is the <tt>JS_CeilingLog2()</tt> function in <i>jslog2.c</i>.
+<h4>
+jsarena.c, jsarena.h</h4>
+Last-In-First-Out allocation macros that amortize malloc costs and allow
+for en-masse freeing. See the paper mentioned in prarena.h's major comment.
+<h4>
+jsutil.c, jsutil.h</h4>
+The <tt>JS_ASSERT</tt> macro is used throughout JSRef source as a proof
+device to make invariants and preconditions clear to the reader, and to
+hold the line during maintenance and evolution against regressions or violations
+of assumptions that it would be too expensive to test unconditionally at
+run-time. Certain assertions are followed by run-time tests that cope with
+assertion failure, but only where I'm too smart or paranoid to believe
+the assertion will never fail...
+<h4>
+jsclist.h</h4>
+Doubly-linked circular list struct and macros.
+<h4>
+jscpucfg.c</h4>
+This standalone program generates <i>jscpucfg.h</i>, a header file containing
+bytes per word and other constants that depend on CPU architecture and
+C compiler type model. It tries to discover most of these constants by
+running its own experiments on the build host, so if you are cross-compiling,
+beware.
+<h4>
+prdtoa.c, prdtoa.h</h4>
+David Gay's portable double-precision floating point to string conversion
+code, with Permission To Use notice included.
+<h4>
+prhash.c, prhash.h</h4>
+Portable, extensible hash tables. These use multiplicative hash for strength
+reduction over division hash, yet with very good key distribution over
+power of two table sizes. Collisions resolve via chaining, so each entry
+burns a malloc and can fragment the heap.
+<h4>
+prlong.c, prlong.h</h4>
+64-bit integer emulation, and compatible macros that use C's long long
+type where it exists (my last company mapped long long to a 128-bit type,
+but no real architecture does 128-bit ints yet).
+<h4>
+jsosdep.h</h4>
+Annoying OS dependencies rationalized into a few "feature-test" macros
+such as <tt>JS_HAVE_LONG_LONG</tt>.
+<h4>
+jsprf.*</h4>
+Portable, buffer-overrun-resistant sprintf and friends. For no good reason
+save lack of time, the %e, %f, and %g formats cause your system's native
+sprintf, rather than <tt>JS_dtoa()</tt>, to be used. This bug doesn't affect
+JSRef, because it uses its own <tt>JS_dtoa()</tt> call in <i>jsnum.c</i>
+to convert from double to string, but it's a bug that we'll fix later,
+and one you should be aware of if you intend to use a <tt>JS_*printf()</tt>&nbsp;
+function with your own floating type arguments - various vendor sprintf's
+mishandle NaN, +/-Inf, and some even print normal floating values inaccurately.
+<h4>
+prmjtime.c, prmjtime.h</h4>
+Time functions. These interfaces are named in a way that makes local vs.
+universal time confusion likely. Caveat emptor, and we're working on it.
+To make matters worse, Java (and therefore JavaScript) uses "local" time
+numbers (offsets from the epoch) in its Date class.
+
+
+<h2>
+<a NAME="Resources"></a>Additional Resources (links, API docs, and newsgroups)</h2>
+<ul>
+<li><a href ="http://www.mozilla.org/js/">http://www.mozilla.org/js/</a>
+<li><a href ="http://www.mozilla.org/js/spidermonkey/">http://www.mozilla.org/js/spidermonkey/</a>
+<li><a href ="news://news.mozilla.org/netscape.public.mozilla.jseng">news://news.mozilla.org/netscape.public.mozilla.jseng</a>
+</ul>
+
+
+
+</body>
+</html>
diff --git a/src/third_party/js-1.7/SpiderMonkey.rsp b/src/third_party/js-1.7/SpiderMonkey.rsp
new file mode 100644
index 00000000000..8025c6cd627
--- /dev/null
+++ b/src/third_party/js-1.7/SpiderMonkey.rsp
@@ -0,0 +1,12 @@
+mozilla/js/src/*
+mozilla/js/src/config/*
+mozilla/js/src/fdlibm/*
+mozilla/js/src/liveconnect/*
+mozilla/js/src/liveconnect/_jni/*
+mozilla/js/src/liveconnect/classes/*
+mozilla/js/src/liveconnect/classes/netscape/*
+mozilla/js/src/liveconnect/classes/netscape/javascript/*
+mozilla/js/src/liveconnect/config/*
+mozilla/js/src/liveconnect/macbuild/*
+mozilla/js/src/liveconnect/macbuild/JavaSession/*
+mozilla/js/src/macbuild/*
diff --git a/src/third_party/js-1.7/Y.js b/src/third_party/js-1.7/Y.js
new file mode 100644
index 00000000000..e92a65a5df9
--- /dev/null
+++ b/src/third_party/js-1.7/Y.js
@@ -0,0 +1,19 @@
+// The Y combinator, applied to the factorial function
+
+function factorial(proc) {
+ return function (n) {
+ return (n <= 1) ? 1 : n * proc(n-1);
+ }
+}
+
+function Y(outer) {
+ function inner(proc) {
+ function apply(arg) {
+ return proc(proc)(arg);
+ }
+ return outer(apply);
+ }
+ return inner(inner);
+}
+
+print("5! is " + Y(factorial)(5));
diff --git a/src/third_party/js-1.7/config.mk b/src/third_party/js-1.7/config.mk
new file mode 100644
index 00000000000..f622d30a81b
--- /dev/null
+++ b/src/third_party/js-1.7/config.mk
@@ -0,0 +1,186 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998-1999
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+ifdef JS_DIST
+DIST = $(JS_DIST)
+else
+DIST = $(DEPTH)/../../dist
+endif
+
+# Set os+release dependent make variables
+OS_ARCH := $(subst /,_,$(shell uname -s | sed /\ /s//_/))
+
+# Attempt to differentiate between SunOS 5.4 and x86 5.4
+OS_CPUARCH := $(shell uname -m)
+ifeq ($(OS_CPUARCH),i86pc)
+OS_RELEASE := $(shell uname -r)_$(OS_CPUARCH)
+else
+ifeq ($(OS_ARCH),AIX)
+OS_RELEASE := $(shell uname -v).$(shell uname -r)
+else
+OS_RELEASE := $(shell uname -r)
+endif
+endif
+ifeq ($(OS_ARCH),IRIX64)
+OS_ARCH := IRIX
+endif
+
+# Handle output from win32 unames other than Netscape's version
+ifeq (,$(filter-out Windows_95 Windows_98 CYGWIN_95-4.0 CYGWIN_98-4.10, $(OS_ARCH)))
+ OS_ARCH := WIN95
+endif
+ifeq ($(OS_ARCH),WIN95)
+ OS_ARCH := WINNT
+ OS_RELEASE := 4.0
+endif
+ifeq ($(OS_ARCH), Windows_NT)
+ OS_ARCH := WINNT
+ OS_MINOR_RELEASE := $(shell uname -v)
+ ifeq ($(OS_MINOR_RELEASE),00)
+ OS_MINOR_RELEASE = 0
+ endif
+ OS_RELEASE := $(OS_RELEASE).$(OS_MINOR_RELEASE)
+endif
+ifeq (CYGWIN_NT,$(findstring CYGWIN_NT,$(OS_ARCH)))
+ OS_RELEASE := $(patsubst CYGWIN_NT-%,%,$(OS_ARCH))
+ OS_ARCH := WINNT
+endif
+ifeq ($(OS_ARCH), CYGWIN32_NT)
+ OS_ARCH := WINNT
+endif
+ifeq (MINGW32_NT,$(findstring MINGW32_NT,$(OS_ARCH)))
+ OS_RELEASE := $(patsubst MINGW32_NT-%,%,$(OS_ARCH))
+ OS_ARCH := WINNT
+endif
+
+# Virtually all Linux versions are identical.
+# Any distinctions are handled in linux.h
+ifeq ($(OS_ARCH),Linux)
+OS_CONFIG := Linux_All
+else
+ifeq ($(OS_ARCH),dgux)
+OS_CONFIG := dgux
+else
+ifeq ($(OS_ARCH),Darwin)
+OS_CONFIG := Darwin
+else
+OS_CONFIG := $(OS_ARCH)$(OS_OBJTYPE)$(OS_RELEASE)
+endif
+endif
+endif
+
+ASFLAGS =
+DEFINES =
+
+ifeq ($(OS_ARCH), WINNT)
+INSTALL = nsinstall
+CP = cp
+else
+INSTALL = $(DIST)/bin/nsinstall
+CP = cp
+endif
+
+ifdef BUILD_OPT
+OPTIMIZER = -O
+DEFINES += -UDEBUG -DNDEBUG -UDEBUG_$(USER)
+OBJDIR_TAG = _OPT
+else
+ifdef USE_MSVC
+OPTIMIZER = -Zi
+else
+OPTIMIZER = -g
+endif
+DEFINES += -DDEBUG -DDEBUG_$(USER)
+OBJDIR_TAG = _DBG
+endif
+
+SO_SUFFIX = so
+
+NS_USE_NATIVE = 1
+
+# Java stuff
+CLASSDIR = $(DEPTH)/liveconnect/classes
+JAVA_CLASSES = $(patsubst %.java,%.class,$(JAVA_SRCS))
+TARGETS += $(addprefix $(CLASSDIR)/$(OBJDIR)/$(JARPATH)/, $(JAVA_CLASSES))
+JAVAC = $(JDK)/bin/javac
+JAVAC_FLAGS = -classpath "$(CLASSPATH)" -d $(CLASSDIR)/$(OBJDIR)
+ifeq ($(OS_ARCH), WINNT)
+ SEP = ;
+else
+ SEP = :
+endif
+CLASSPATH = $(JDK)/lib/classes.zip$(SEP)$(CLASSDIR)/$(OBJDIR)
+
+include $(DEPTH)/config/$(OS_CONFIG).mk
+
+ifndef OBJ_SUFFIX
+ifdef USE_MSVC
+OBJ_SUFFIX = obj
+else
+OBJ_SUFFIX = o
+endif
+endif
+
+ifndef HOST_BIN_SUFFIX
+ifeq ($(OS_ARCH),WINNT)
+HOST_BIN_SUFFIX = .exe
+else
+HOST_BIN_SUFFIX =
+endif
+endif
+
+# Name of the binary code directories
+ifdef BUILD_IDG
+OBJDIR = $(OS_CONFIG)$(OBJDIR_TAG).OBJD
+else
+OBJDIR = $(OS_CONFIG)$(OBJDIR_TAG).OBJ
+endif
+VPATH = $(OBJDIR)
+
+# Automatic make dependencies file
+DEPENDENCIES = $(OBJDIR)/.md
+
+LCJAR = js15lc30.jar
+
+# Library name
+LIBDIR := lib
+ifeq ($(CPU_ARCH), x86_64)
+LIBDIR := lib64
+endif
+
diff --git a/src/third_party/js-1.7/config/AIX4.1.mk b/src/third_party/js-1.7/config/AIX4.1.mk
new file mode 100644
index 00000000000..09c7cb94cf3
--- /dev/null
+++ b/src/third_party/js-1.7/config/AIX4.1.mk
@@ -0,0 +1,65 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for AIX
+#
+
+CC = xlC_r
+CCC = xlC_r
+
+RANLIB = ranlib
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+ARCH := aix
+CPU_ARCH = rs6000
+GFX_ARCH = x
+INLINES = js_compare_and_swap:js_fast_lock1:js_fast_unlock1:js_lock_get_slot:js_lock_set_slot:js_lock_scope1
+
+OS_CFLAGS = -qarch=com -qinline+$(INLINES) -DXP_UNIX -DAIX -DAIXV3 -DSYSV -DHAVE_LOCALTIME_R
+OS_LIBS = -lbsd -lsvld -lm
+#-lpthreads -lc_r
+
+MKSHLIB = $(LD) -bM:SRE -bh:4 -bnoentry -berok
+XLDFLAGS += -lc
+
+ifdef JS_THREADSAFE
+XLDFLAGS += -lsvld
+endif
diff --git a/src/third_party/js-1.7/config/AIX4.2.mk b/src/third_party/js-1.7/config/AIX4.2.mk
new file mode 100644
index 00000000000..1e3f1f183d1
--- /dev/null
+++ b/src/third_party/js-1.7/config/AIX4.2.mk
@@ -0,0 +1,64 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for AIX
+#
+
+CC = xlC_r
+CCC = xlC_r
+CFLAGS += -qarch=com -qnoansialias -qinline+$(INLINES) -DXP_UNIX -DAIX -DAIXV3 -DSYSV -DHAVE_LOCALTIME_R
+
+RANLIB = ranlib
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+ARCH := aix
+CPU_ARCH = rs6000
+GFX_ARCH = x
+INLINES = js_compare_and_swap:js_fast_lock1:js_fast_unlock1:js_lock_get_slot:js_lock_set_slot:js_lock_scope1
+
+#-lpthreads -lc_r
+
+MKSHLIB = /usr/lpp/xlC/bin/makeC++SharedLib_r -p 0 -G -berok
+
+ifdef JS_THREADSAFE
+XLDFLAGS += -ldl
+endif
+
diff --git a/src/third_party/js-1.7/config/AIX4.3.mk b/src/third_party/js-1.7/config/AIX4.3.mk
new file mode 100644
index 00000000000..df05d8c9256
--- /dev/null
+++ b/src/third_party/js-1.7/config/AIX4.3.mk
@@ -0,0 +1,65 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for AIX
+#
+
+CC = xlC_r
+CCC = xlC_r
+CFLAGS += -qarch=com -qnoansialias -qinline+$(INLINES) -DXP_UNIX -DAIX -DAIXV3 -DSYSV -DAIX4_3 -DHAVE_LOCALTIME_R
+
+RANLIB = ranlib
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+ARCH := aix
+CPU_ARCH = rs6000
+GFX_ARCH = x
+INLINES = js_compare_and_swap:js_fast_lock1:js_fast_unlock1:js_lock_get_slot:js_lock_set_slot:js_lock_scope1
+
+#-lpthreads -lc_r
+
+MKSHLIB_BIN = /usr/ibmcxx/bin/makeC++SharedLib_r
+MKSHLIB = $(MKSHLIB_BIN) -p 0 -G -berok -bM:UR
+
+ifdef JS_THREADSAFE
+XLDFLAGS += -ldl
+endif
+
diff --git a/src/third_party/js-1.7/config/CVS/Entries b/src/third_party/js-1.7/config/CVS/Entries
new file mode 100644
index 00000000000..01df8fbee54
--- /dev/null
+++ b/src/third_party/js-1.7/config/CVS/Entries
@@ -0,0 +1,36 @@
+/AIX4.1.mk/1.7/Sat Feb 12 20:10:33 2005//TJS_170
+/AIX4.2.mk/1.9/Sat Feb 12 20:10:33 2005//TJS_170
+/AIX4.3.mk/1.9/Sat Feb 12 20:10:33 2005//TJS_170
+/Darwin.mk/1.6/Mon Feb 5 16:24:49 2007//TJS_170
+/Darwin1.3.mk/1.3/Sat Feb 12 20:10:33 2005//TJS_170
+/Darwin1.4.mk/1.3/Sat Feb 12 20:10:33 2005//TJS_170
+/Darwin5.2.mk/1.3/Sat Feb 12 20:10:33 2005//TJS_170
+/Darwin5.3.mk/1.3/Sat Feb 12 20:10:33 2005//TJS_170
+/HP-UXB.10.10.mk/1.9/Sat Feb 12 20:10:33 2005//TJS_170
+/HP-UXB.10.20.mk/1.8/Sat Feb 12 20:10:33 2005//TJS_170
+/HP-UXB.11.00.mk/1.9/Sat Feb 12 20:10:33 2005//TJS_170
+/IRIX.mk/1.9/Sat Feb 12 20:10:33 2005//TJS_170
+/IRIX5.3.mk/1.7/Sat Feb 12 20:10:33 2005//TJS_170
+/IRIX6.1.mk/1.7/Sat Feb 12 20:10:33 2005//TJS_170
+/IRIX6.2.mk/1.6/Sat Feb 12 20:10:33 2005//TJS_170
+/IRIX6.3.mk/1.6/Sat Feb 12 20:10:33 2005//TJS_170
+/IRIX6.5.mk/1.6/Sat Feb 12 20:10:33 2005//TJS_170
+/Linux_All.mk/1.14/Tue May 10 19:53:44 2005//TJS_170
+/Mac_OS10.0.mk/1.4/Sat Feb 12 20:10:33 2005//TJS_170
+/OSF1V4.0.mk/1.9/Sat Feb 12 20:10:33 2005//TJS_170
+/OSF1V5.0.mk/1.5/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS4.1.4.mk/1.6/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.3.mk/1.7/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.4.mk/1.7/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.5.1.mk/1.8/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.5.mk/1.10/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.6.mk/1.13/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.7.mk/1.6/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.8.mk/1.4/Sat Feb 12 20:10:33 2005//TJS_170
+/SunOS5.9.mk/1.2/Sat Feb 12 20:10:33 2005//TJS_170
+/WINNT4.0.mk/1.15/Wed Jul 18 19:55:15 2007//TJS_170
+/WINNT5.0.mk/1.10/Fri Aug 10 23:23:38 2007//TJS_170
+/WINNT5.1.mk/1.6/Fri Aug 10 23:23:38 2007//TJS_170
+/WINNT5.2.mk/1.5/Fri Aug 10 23:23:38 2007//TJS_170
+/dgux.mk/1.7/Sat Feb 12 20:10:33 2005//TJS_170
+D
diff --git a/src/third_party/js-1.7/config/CVS/Repository b/src/third_party/js-1.7/config/CVS/Repository
new file mode 100644
index 00000000000..d0ce95c588f
--- /dev/null
+++ b/src/third_party/js-1.7/config/CVS/Repository
@@ -0,0 +1 @@
+mozilla/js/src/config
diff --git a/src/third_party/js-1.7/config/CVS/Root b/src/third_party/js-1.7/config/CVS/Root
new file mode 100644
index 00000000000..cdb6f4a0739
--- /dev/null
+++ b/src/third_party/js-1.7/config/CVS/Root
@@ -0,0 +1 @@
+:pserver:anonymous@cvs-mirror.mozilla.org:/cvsroot
diff --git a/src/third_party/js-1.7/config/CVS/Tag b/src/third_party/js-1.7/config/CVS/Tag
new file mode 100644
index 00000000000..2a8b15898d7
--- /dev/null
+++ b/src/third_party/js-1.7/config/CVS/Tag
@@ -0,0 +1 @@
+NJS_170
diff --git a/src/third_party/js-1.7/config/Darwin.mk b/src/third_party/js-1.7/config/Darwin.mk
new file mode 100644
index 00000000000..23b503e376f
--- /dev/null
+++ b/src/third_party/js-1.7/config/Darwin.mk
@@ -0,0 +1,83 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Steve Zellers (zellers@apple.com)
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Mac OS X as of PR3
+# Just ripped from Linux config
+#
+
+CC = cc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D_BSD_SOURCE -DPOSIX_SOURCE -DDARWIN
+
+RANLIB = ranlib
+MKSHLIB = $(CC) -dynamiclib $(XMKSHLIBOPTS) -framework System
+
+SO_SUFFIX = dylib
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = $(shell uname -m)
+ifeq (86,$(findstring 86,$(CPU_ARCH)))
+CPU_ARCH = x86
+OS_CFLAGS+= -DX86_LINUX
+endif
+GFX_ARCH = x
+
+OS_LIBS = -lc -framework System
+
+ASFLAGS += -x assembler-with-cpp
+
+ifeq ($(CPU_ARCH),alpha)
+
+# Ask the C compiler on alpha linux to let us work with denormalized
+# double values, which are required by the ECMA spec.
+
+OS_CFLAGS += -mieee
+endif
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
+
+# Don't allow Makefile.ref to use libmath
+NO_LIBM = 1
+
diff --git a/src/third_party/js-1.7/config/Darwin1.3.mk b/src/third_party/js-1.7/config/Darwin1.3.mk
new file mode 100755
index 00000000000..05d3767a0d2
--- /dev/null
+++ b/src/third_party/js-1.7/config/Darwin1.3.mk
@@ -0,0 +1,81 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Steve Zellers (zellers@apple.com)
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Mac OS X as of PR3
+# Just ripped from Linux config
+#
+
+CC = cc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D_BSD_SOURCE -DPOSIX_SOURCE -DRHAPSODY
+
+RANLIB = ranlib
+MKSHLIB = libtool $(XMKSHLIBOPTS) -framework System
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = $(shell uname -m)
+ifeq (86,$(findstring 86,$(CPU_ARCH)))
+CPU_ARCH = x86
+OS_CFLAGS+= -DX86_LINUX
+endif
+GFX_ARCH = x
+
+OS_LIBS = -lc -framework System
+
+ASFLAGS += -x assembler-with-cpp
+
+ifeq ($(CPU_ARCH),alpha)
+
+# Ask the C compiler on alpha linux to let us work with denormalized
+# double values, which are required by the ECMA spec.
+
+OS_CFLAGS += -mieee
+endif
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
+
+# Don't allow Makefile.ref to use libmath
+NO_LIBM = 1
+
diff --git a/src/third_party/js-1.7/config/Darwin1.4.mk b/src/third_party/js-1.7/config/Darwin1.4.mk
new file mode 100755
index 00000000000..f7b6af8ec0a
--- /dev/null
+++ b/src/third_party/js-1.7/config/Darwin1.4.mk
@@ -0,0 +1,41 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mike McCabe <mike+mozilla@meer.net>
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+include $(DEPTH)/config/Darwin1.3.mk
diff --git a/src/third_party/js-1.7/config/Darwin5.2.mk b/src/third_party/js-1.7/config/Darwin5.2.mk
new file mode 100755
index 00000000000..9b9b6ff0977
--- /dev/null
+++ b/src/third_party/js-1.7/config/Darwin5.2.mk
@@ -0,0 +1,81 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Steve Zellers (zellers@apple.com)
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Mac OS X as of PR3
+# Just ripped from Linux config
+#
+
+CC = cc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D_BSD_SOURCE -DPOSIX_SOURCE -DDARWIN
+
+RANLIB = ranlib
+MKSHLIB = libtool $(XMKSHLIBOPTS) -framework System
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = $(shell uname -m)
+ifeq (86,$(findstring 86,$(CPU_ARCH)))
+CPU_ARCH = x86
+OS_CFLAGS+= -DX86_LINUX
+endif
+GFX_ARCH = x
+
+OS_LIBS = -lc -framework System
+
+ASFLAGS += -x assembler-with-cpp
+
+ifeq ($(CPU_ARCH),alpha)
+
+# Ask the C compiler on alpha linux to let us work with denormalized
+# double values, which are required by the ECMA spec.
+
+OS_CFLAGS += -mieee
+endif
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
+
+# Don't allow Makefile.ref to use libmath
+NO_LIBM = 1
+
diff --git a/src/third_party/js-1.7/config/Darwin5.3.mk b/src/third_party/js-1.7/config/Darwin5.3.mk
new file mode 100644
index 00000000000..9b9b6ff0977
--- /dev/null
+++ b/src/third_party/js-1.7/config/Darwin5.3.mk
@@ -0,0 +1,81 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Steve Zellers (zellers@apple.com)
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Mac OS X as of PR3
+# Just ripped from Linux config
+#
+
+CC = cc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D_BSD_SOURCE -DPOSIX_SOURCE -DDARWIN
+
+RANLIB = ranlib
+MKSHLIB = libtool $(XMKSHLIBOPTS) -framework System
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = $(shell uname -m)
+ifeq (86,$(findstring 86,$(CPU_ARCH)))
+CPU_ARCH = x86
+OS_CFLAGS+= -DX86_LINUX
+endif
+GFX_ARCH = x
+
+OS_LIBS = -lc -framework System
+
+ASFLAGS += -x assembler-with-cpp
+
+ifeq ($(CPU_ARCH),alpha)
+
+# Ask the C compiler on alpha linux to let us work with denormalized
+# double values, which are required by the ECMA spec.
+
+OS_CFLAGS += -mieee
+endif
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
+
+# Don't allow Makefile.ref to use libmath
+NO_LIBM = 1
+
diff --git a/src/third_party/js-1.7/config/HP-UXB.10.10.mk b/src/third_party/js-1.7/config/HP-UXB.10.10.mk
new file mode 100644
index 00000000000..8cd9d20671b
--- /dev/null
+++ b/src/third_party/js-1.7/config/HP-UXB.10.10.mk
@@ -0,0 +1,77 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for HPUX
+#
+
+# CC = gcc
+# CCC = g++
+# CFLAGS += -Wall -Wno-format -fPIC
+
+CC = cc -Ae +Z
+CCC = CC -Ae +a1 +eh +Z
+
+RANLIB = echo
+MKSHLIB = $(LD) -b
+
+SO_SUFFIX = sl
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = hppa
+GFX_ARCH = x
+
+OS_CFLAGS = -DXP_UNIX -DHPUX -DSYSV -DHAVE_LOCALTIME_R
+OS_LIBS = -ldld
+
+ifeq ($(OS_RELEASE),B.10)
+PLATFORM_FLAGS += -DHPUX10 -Dhpux10
+PORT_FLAGS += -DRW_NO_OVERLOAD_SCHAR -DHAVE_MODEL_H
+ifeq ($(OS_VERSION),.10)
+PLATFORM_FLAGS += -DHPUX10_10
+endif
+ifeq ($(OS_VERSION),.20)
+PLATFORM_FLAGS += -DHPUX10_20
+endif
+ifeq ($(OS_VERSION),.30)
+PLATFORM_FLAGS += -DHPUX10_30
+endif
+endif
diff --git a/src/third_party/js-1.7/config/HP-UXB.10.20.mk b/src/third_party/js-1.7/config/HP-UXB.10.20.mk
new file mode 100644
index 00000000000..8cd9d20671b
--- /dev/null
+++ b/src/third_party/js-1.7/config/HP-UXB.10.20.mk
@@ -0,0 +1,77 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for HPUX
+#
+
+# CC = gcc
+# CCC = g++
+# CFLAGS += -Wall -Wno-format -fPIC
+
+CC = cc -Ae +Z
+CCC = CC -Ae +a1 +eh +Z
+
+RANLIB = echo
+MKSHLIB = $(LD) -b
+
+SO_SUFFIX = sl
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = hppa
+GFX_ARCH = x
+
+OS_CFLAGS = -DXP_UNIX -DHPUX -DSYSV -DHAVE_LOCALTIME_R
+OS_LIBS = -ldld
+
+ifeq ($(OS_RELEASE),B.10)
+PLATFORM_FLAGS += -DHPUX10 -Dhpux10
+PORT_FLAGS += -DRW_NO_OVERLOAD_SCHAR -DHAVE_MODEL_H
+ifeq ($(OS_VERSION),.10)
+PLATFORM_FLAGS += -DHPUX10_10
+endif
+ifeq ($(OS_VERSION),.20)
+PLATFORM_FLAGS += -DHPUX10_20
+endif
+ifeq ($(OS_VERSION),.30)
+PLATFORM_FLAGS += -DHPUX10_30
+endif
+endif
diff --git a/src/third_party/js-1.7/config/HP-UXB.11.00.mk b/src/third_party/js-1.7/config/HP-UXB.11.00.mk
new file mode 100644
index 00000000000..239188d6011
--- /dev/null
+++ b/src/third_party/js-1.7/config/HP-UXB.11.00.mk
@@ -0,0 +1,80 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for HPUX
+#
+
+ifdef NS_USE_NATIVE
+ CC = cc +Z +DAportable +DS2.0 +u4
+# LD = aCC +Z -b -Wl,+s -Wl,-B,symbolic
+else
+ CC = gcc -Wall -Wno-format -fPIC
+ CCC = g++ -Wall -Wno-format -fPIC
+endif
+
+RANLIB = echo
+MKSHLIB = $(LD) -b
+
+SO_SUFFIX = sl
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = hppa
+GFX_ARCH = x
+
+OS_CFLAGS = -DXP_UNIX -DHPUX -DSYSV -D_HPUX -DNATIVE -D_POSIX_C_SOURCE=199506L -DHAVE_LOCALTIME_R
+OS_LIBS = -ldld
+
+XLDFLAGS = -lpthread
+
+ifeq ($(OS_RELEASE),B.10)
+PLATFORM_FLAGS += -DHPUX10 -Dhpux10
+PORT_FLAGS += -DRW_NO_OVERLOAD_SCHAR -DHAVE_MODEL_H
+ifeq ($(OS_VERSION),.10)
+PLATFORM_FLAGS += -DHPUX10_10
+endif
+ifeq ($(OS_VERSION),.20)
+PLATFORM_FLAGS += -DHPUX10_20
+endif
+ifeq ($(OS_VERSION),.30)
+PLATFORM_FLAGS += -DHPUX10_30
+endif
+endif
diff --git a/src/third_party/js-1.7/config/IRIX.mk b/src/third_party/js-1.7/config/IRIX.mk
new file mode 100644
index 00000000000..88b162f2271
--- /dev/null
+++ b/src/third_party/js-1.7/config/IRIX.mk
@@ -0,0 +1,87 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for IRIX
+#
+
+CPU_ARCH = mips
+GFX_ARCH = x
+
+RANLIB = /bin/true
+
+#NS_USE_GCC = 1
+
+ifndef NS_USE_NATIVE
+CC = gcc
+CCC = g++
+AS = $(CC) -x assembler-with-cpp
+ODD_CFLAGS = -Wall -Wno-format
+ifdef BUILD_OPT
+OPTIMIZER = -O6
+endif
+else
+ifeq ($(OS_RELEASE),6.2)
+CC = cc -n32 -DIRIX6_2
+endif
+ifeq ($(OS_RELEASE),6.3)
+CC = cc -n32 -DIRIX6_3
+endif
+ifeq ($(OS_RELEASE),6.5)
+CC = cc -n32 -DIRIX6_5
+endif
+CCC = CC
+# LD = CC
+ODD_CFLAGS = -fullwarn -xansi
+ifdef BUILD_OPT
+OPTIMIZER += -Olimit 4000
+endif
+endif
+
+# For purify
+HAVE_PURIFY = 1
+PURE_OS_CFLAGS = $(ODD_CFLAGS) -DXP_UNIX -DSVR4 -DSW_THREADS -DIRIX -DHAVE_LOCALTIME_R
+
+OS_CFLAGS = $(PURE_OS_CFLAGS) -MDupdate $(DEPENDENCIES)
+
+BSDECHO = echo
+MKSHLIB = $(LD) -n32 -shared
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
diff --git a/src/third_party/js-1.7/config/IRIX5.3.mk b/src/third_party/js-1.7/config/IRIX5.3.mk
new file mode 100644
index 00000000000..f38cc94874b
--- /dev/null
+++ b/src/third_party/js-1.7/config/IRIX5.3.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for IRIX5.3
+#
+
+include $(DEPTH)/config/IRIX.mk
diff --git a/src/third_party/js-1.7/config/IRIX6.1.mk b/src/third_party/js-1.7/config/IRIX6.1.mk
new file mode 100644
index 00000000000..354f1d119d9
--- /dev/null
+++ b/src/third_party/js-1.7/config/IRIX6.1.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for IRIX6.3
+#
+
+include $(DEPTH)/config/IRIX.mk
diff --git a/src/third_party/js-1.7/config/IRIX6.2.mk b/src/third_party/js-1.7/config/IRIX6.2.mk
new file mode 100644
index 00000000000..354f1d119d9
--- /dev/null
+++ b/src/third_party/js-1.7/config/IRIX6.2.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for IRIX6.3
+#
+
+include $(DEPTH)/config/IRIX.mk
diff --git a/src/third_party/js-1.7/config/IRIX6.3.mk b/src/third_party/js-1.7/config/IRIX6.3.mk
new file mode 100644
index 00000000000..354f1d119d9
--- /dev/null
+++ b/src/third_party/js-1.7/config/IRIX6.3.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for IRIX6.3
+#
+
+include $(DEPTH)/config/IRIX.mk
diff --git a/src/third_party/js-1.7/config/IRIX6.5.mk b/src/third_party/js-1.7/config/IRIX6.5.mk
new file mode 100644
index 00000000000..354f1d119d9
--- /dev/null
+++ b/src/third_party/js-1.7/config/IRIX6.5.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for IRIX6.3
+#
+
+include $(DEPTH)/config/IRIX.mk
diff --git a/src/third_party/js-1.7/config/Linux_All.mk b/src/third_party/js-1.7/config/Linux_All.mk
new file mode 100644
index 00000000000..0c43df4b922
--- /dev/null
+++ b/src/third_party/js-1.7/config/Linux_All.mk
@@ -0,0 +1,103 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for all versions of Linux
+#
+
+CC = gcc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D_BSD_SOURCE -DPOSIX_SOURCE -DHAVE_LOCALTIME_R
+
+RANLIB = echo
+MKSHLIB = $(LD) -shared $(XMKSHLIBOPTS)
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = $(shell uname -m)
+# don't filter in x86-64 architecture
+ifneq (x86_64,$(CPU_ARCH))
+ifeq (86,$(findstring 86,$(CPU_ARCH)))
+CPU_ARCH = x86
+OS_CFLAGS+= -DX86_LINUX
+
+ifeq (gcc, $(CC))
+# if using gcc on x86, check version for opt bug
+# (http://bugzilla.mozilla.org/show_bug.cgi?id=24892)
+GCC_VERSION := $(shell gcc -v 2>&1 | grep version | awk '{ print $$3 }')
+GCC_LIST:=$(sort 2.91.66 $(GCC_VERSION) )
+
+ifeq (2.91.66, $(firstword $(GCC_LIST)))
+CFLAGS+= -DGCC_OPT_BUG
+endif
+endif
+endif
+endif
+
+GFX_ARCH = x
+
+OS_LIBS = -lm -lc
+
+ASFLAGS += -x assembler-with-cpp
+
+
+ifeq ($(CPU_ARCH),alpha)
+
+# Ask the C compiler on alpha linux to let us work with denormalized
+# double values, which are required by the ECMA spec.
+
+OS_CFLAGS += -mieee
+endif
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
+
+ifeq ($(CPU_ARCH),x86_64)
+# Use VA_COPY() standard macro on x86-64
+# FIXME: better use it everywhere
+OS_CFLAGS += -DHAVE_VA_COPY -DVA_COPY=va_copy
+endif
+
+ifeq ($(CPU_ARCH),x86_64)
+# We need PIC code for shared libraries
+# FIXME: better patch rules.mk & fdlibm/Makefile*
+OS_CFLAGS += -DPIC -fPIC
+endif
diff --git a/src/third_party/js-1.7/config/Mac_OS10.0.mk b/src/third_party/js-1.7/config/Mac_OS10.0.mk
new file mode 100755
index 00000000000..74ba151e3b5
--- /dev/null
+++ b/src/third_party/js-1.7/config/Mac_OS10.0.mk
@@ -0,0 +1,82 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Steve Zellers (zellers@apple.com)
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Mac OS X as of PR3
+# Just ripped from Linux config
+#
+
+CC = cc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D_BSD_SOURCE -DPOSIX_SOURCE
+-DRHAPSODY
+
+RANLIB = ranlib
+MKSHLIB = libtool -dynamic $(XMKSHLIBOPTS) -framework System
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = $(shell uname -m)
+ifeq (86,$(findstring 86,$(CPU_ARCH)))
+CPU_ARCH = x86
+OS_CFLAGS+= -DX86_LINUX
+endif
+GFX_ARCH = x
+
+OS_LIBS = -lc -framework System
+
+ASFLAGS += -x assembler-with-cpp
+
+ifeq ($(CPU_ARCH),alpha)
+
+# Ask the C compiler on alpha linux to let us work with denormalized
+# double values, which are required by the ECMA spec.
+
+OS_CFLAGS += -mieee
+endif
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
+
+# Don't allow Makefile.ref to use libmath
+NO_LIBM = 1
+
diff --git a/src/third_party/js-1.7/config/OSF1V4.0.mk b/src/third_party/js-1.7/config/OSF1V4.0.mk
new file mode 100644
index 00000000000..337ca745926
--- /dev/null
+++ b/src/third_party/js-1.7/config/OSF1V4.0.mk
@@ -0,0 +1,72 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for Data General DG/UX
+#
+
+#
+# Initial DG/UX port by Marc Fraioli (fraioli@dg-rtp.dg.com)
+#
+
+ifndef NS_USE_NATIVE
+CC = gcc
+CCC = g++
+CFLAGS += -mieee -Wall -Wno-format
+else
+CC = cc
+CCC = cxx
+CFLAGS += -ieee -std
+# LD = cxx
+endif
+
+RANLIB = echo
+MKSHLIB = $(LD) -shared -taso -all -expect_unresolved "*"
+
+#
+# _DGUX_SOURCE is needed to turn on a lot of stuff in the headers if
+# you're not using DG's compiler. It shouldn't hurt if you are.
+#
+# _POSIX4A_DRAFT10_SOURCE is needed to pick up localtime_r, used in
+# prtime.c
+#
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -DDGUX -D_DGUX_SOURCE -D_POSIX4A_DRAFT10_SOURCE -DOSF1 -DHAVE_LOCALTIME_R
+OS_LIBS = -lsocket -lnsl
+
+NOSUCHFILE = /no-such-file
diff --git a/src/third_party/js-1.7/config/OSF1V5.0.mk b/src/third_party/js-1.7/config/OSF1V5.0.mk
new file mode 100644
index 00000000000..b65738c4e43
--- /dev/null
+++ b/src/third_party/js-1.7/config/OSF1V5.0.mk
@@ -0,0 +1,69 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for Tru64 Unix 5.0
+#
+
+#
+# Initial DG/UX port by Marc Fraioli (fraioli@dg-rtp.dg.com)
+#
+
+ifndef NS_USE_NATIVE
+CC = gcc
+CCC = g++
+CFLAGS += -mieee -Wall -Wno-format
+else
+CC = cc
+CCC = cxx
+CFLAGS += -ieee -std -pthread
+# LD = cxx
+endif
+
+RANLIB = echo
+MKSHLIB = $(LD) -shared -all -expect_unresolved "*"
+
+#
+# _POSIX4A_DRAFT10_SOURCE is needed to pick up localtime_r, used in
+# prtime.c
+#
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D_POSIX4A_DRAFT10_SOURCE -DOSF1 -DHAVE_LOCALTIME_R
+OS_LIBS = -lsocket -lnsl
+
+NOSUCHFILE = /no-such-file
diff --git a/src/third_party/js-1.7/config/SunOS4.1.4.mk b/src/third_party/js-1.7/config/SunOS4.1.4.mk
new file mode 100644
index 00000000000..62f4815b462
--- /dev/null
+++ b/src/third_party/js-1.7/config/SunOS4.1.4.mk
@@ -0,0 +1,101 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS4.1
+#
+
+CC = gcc
+CCC = g++
+RANLIB = ranlib
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = sparc
+GFX_ARCH = x
+
+# A pile of -D's to build xfe on sunos
+MOZ_CFLAGS = -DSTRINGS_ALIGNED -DNO_REGEX -DNO_ISDIR -DUSE_RE_COMP \
+ -DNO_REGCOMP -DUSE_GETWD -DNO_MEMMOVE -DNO_ALLOCA \
+ -DBOGUS_MB_MAX -DNO_CONST
+
+# Purify doesn't like -MDupdate
+NOMD_OS_CFLAGS = -DXP_UNIX -Wall -Wno-format -DSW_THREADS -DSUNOS4 -DNEED_SYSCALL \
+ $(MOZ_CFLAGS)
+
+OS_CFLAGS = $(NOMD_OS_CFLAGS) -MDupdate $(DEPENDENCIES)
+OS_LIBS = -ldl -lm
+
+MKSHLIB = $(LD) -L$(MOTIF)/lib
+
+HAVE_PURIFY = 1
+MOTIF = /home/motif/usr
+MOTIFLIB = -L$(MOTIF)/lib -lXm
+INCLUDES += -I/usr/X11R5/include -I$(MOTIF)/include
+
+NOSUCHFILE = /solaris-rm-f-sucks
+
+LOCALE_MAP = $(DEPTH)/cmd/xfe/intl/sunos.lm
+
+EN_LOCALE = en_US
+DE_LOCALE = de
+FR_LOCALE = fr
+JP_LOCALE = ja
+SJIS_LOCALE = ja_JP.SJIS
+KR_LOCALE = ko
+CN_LOCALE = zh
+TW_LOCALE = zh_TW
+I2_LOCALE = i2
+IT_LOCALE = it
+SV_LOCALE = sv
+ES_LOCALE = es
+NL_LOCALE = nl
+PT_LOCALE = pt
+
+LOC_LIB_DIR = /usr/openwin/lib/locale
+
+BSDECHO = echo
+
+#
+# These defines are for building unix plugins
+#
+BUILD_UNIX_PLUGINS = 1
+DSO_LDOPTS =
+DSO_LDFLAGS =
diff --git a/src/third_party/js-1.7/config/SunOS5.3.mk b/src/third_party/js-1.7/config/SunOS5.3.mk
new file mode 100644
index 00000000000..bd615dee404
--- /dev/null
+++ b/src/third_party/js-1.7/config/SunOS5.3.mk
@@ -0,0 +1,91 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.3
+#
+
+CC = gcc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+
+#CC = /opt/SUNWspro/SC3.0.1/bin/cc
+RANLIB = echo
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = sparc
+GFX_ARCH = x
+
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -DSOLARIS -DHAVE_LOCALTIME_R
+OS_LIBS = -lsocket -lnsl -ldl
+
+ASFLAGS += -P -L -K PIC -D_ASM -D__STDC__=0
+
+HAVE_PURIFY = 1
+
+NOSUCHFILE = /solaris-rm-f-sucks
+
+ifndef JS_NO_ULTRA
+ULTRA_OPTIONS := -xarch=v8plus
+ULTRA_OPTIONSD := -DULTRA_SPARC
+else
+ULTRA_OPTIONS := -xarch=v8
+ULTRA_OPTIONSD :=
+endif
+
+ifeq ($(OS_CPUARCH),sun4u)
+DEFINES += $(ULTRA_OPTIONSD)
+ifeq ($(findstring gcc,$(CC)),gcc)
+DEFINES += -Wa,$(ULTRA_OPTIONS),$(ULTRA_OPTIONSD)
+else
+ASFLAGS += $(ULTRA_OPTIONS) $(ULTRA_OPTIONSD)
+endif
+endif
+
+ifeq ($(OS_CPUARCH),sun4m)
+ifeq ($(findstring gcc,$(CC)),gcc)
+DEFINES += -Wa,-xarch=v8
+else
+ASFLAGS += -xarch=v8
+endif
+endif
+
+MKSHLIB = $(LD) -G
diff --git a/src/third_party/js-1.7/config/SunOS5.4.mk b/src/third_party/js-1.7/config/SunOS5.4.mk
new file mode 100644
index 00000000000..de019247a2d
--- /dev/null
+++ b/src/third_party/js-1.7/config/SunOS5.4.mk
@@ -0,0 +1,92 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.4
+#
+
+ifdef NS_USE_NATIVE
+CC = cc
+CCC = CC
+else
+CC = gcc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+endif
+
+RANLIB = echo
+
+CPU_ARCH = sparc
+GFX_ARCH = x
+
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -D__svr4 -DSOLARIS -DHAVE_LOCALTIME_R
+OS_LIBS = -lsocket -lnsl -ldl
+
+ASFLAGS += -P -L -K PIC -D_ASM -D__STDC__=0
+
+HAVE_PURIFY = 1
+
+NOSUCHFILE = /solaris-rm-f-sucks
+
+ifndef JS_NO_ULTRA
+ULTRA_OPTIONS := -xarch=v8plus
+ULTRA_OPTIONSD := -DULTRA_SPARC
+else
+ULTRA_OPTIONS := -xarch=v8
+ULTRA_OPTIONSD :=
+endif
+
+ifeq ($(OS_CPUARCH),sun4u)
+DEFINES += $(ULTRA_OPTIONSD)
+ifeq ($(findstring gcc,$(CC)),gcc)
+DEFINES += -Wa,$(ULTRA_OPTIONS),$(ULTRA_OPTIONSD)
+else
+ASFLAGS += $(ULTRA_OPTIONS) $(ULTRA_OPTIONSD)
+endif
+endif
+
+ifeq ($(OS_CPUARCH),sun4m)
+ifeq ($(findstring gcc,$(CC)),gcc)
+DEFINES += -Wa,-xarch=v8
+else
+ASFLAGS += -xarch=v8
+endif
+endif
+
+MKSHLIB = $(LD) -G
diff --git a/src/third_party/js-1.7/config/SunOS5.5.1.mk b/src/third_party/js-1.7/config/SunOS5.5.1.mk
new file mode 100644
index 00000000000..648f72ffa0d
--- /dev/null
+++ b/src/third_party/js-1.7/config/SunOS5.5.1.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.5.1
+#
+
+include $(DEPTH)/config/SunOS5.5.mk
diff --git a/src/third_party/js-1.7/config/SunOS5.5.mk b/src/third_party/js-1.7/config/SunOS5.5.mk
new file mode 100644
index 00000000000..e26b3a3e0ec
--- /dev/null
+++ b/src/third_party/js-1.7/config/SunOS5.5.mk
@@ -0,0 +1,87 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.5
+#
+
+AS = /usr/ccs/bin/as
+ifndef NS_USE_NATIVE
+CC = gcc
+CCC = g++
+CFLAGS += -Wall -Wno-format
+else
+CC = cc
+CCC = CC
+endif
+
+RANLIB = echo
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = sparc
+GFX_ARCH = x
+
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -DSOLARIS -DHAVE_LOCALTIME_R
+OS_LIBS = -lsocket -lnsl -ldl
+
+ASFLAGS += -P -L -K PIC -D_ASM -D__STDC__=0
+
+HAVE_PURIFY = 1
+
+NOSUCHFILE = /solaris-rm-f-sucks
+
+ifeq ($(OS_CPUARCH),sun4u) # ultra sparc?
+ifeq ($(CC),gcc) # using gcc?
+ifndef JS_NO_ULTRA # do we want ultra?
+ifdef JS_THREADSAFE # only in thread-safe mode
+DEFINES += -DULTRA_SPARC
+DEFINES += -Wa,-xarch=v8plus,-DULTRA_SPARC
+else
+ASFLAGS += -xarch=v8plus -DULTRA_SPARC
+endif
+endif
+endif
+endif
+
+MKSHLIB = $(LD) -G
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
diff --git a/src/third_party/js-1.7/config/SunOS5.6.mk b/src/third_party/js-1.7/config/SunOS5.6.mk
new file mode 100644
index 00000000000..efe11528435
--- /dev/null
+++ b/src/third_party/js-1.7/config/SunOS5.6.mk
@@ -0,0 +1,89 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.5
+#
+
+AS = /usr/ccs/bin/as
+ifndef NS_USE_NATIVE
+ CC = gcc
+ CCC = g++
+ CFLAGS += -Wall -Wno-format
+else
+ CC = cc
+ CCC = CC
+ CFLAGS += -mt -KPIC
+# LD = CC
+endif
+
+RANLIB = echo
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = sparc
+GFX_ARCH = x
+
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -DSOLARIS -DHAVE_LOCALTIME_R
+OS_LIBS = -lsocket -lnsl -ldl
+
+ASFLAGS += -P -L -K PIC -D_ASM -D__STDC__=0
+
+HAVE_PURIFY = 1
+
+NOSUCHFILE = /solaris-rm-f-sucks
+
+ifeq ($(OS_CPUARCH),sun4u) # ultra sparc?
+ifeq ($(CC),gcc) # using gcc?
+ifndef JS_NO_ULTRA # do we want ultra?
+ifdef JS_THREADSAFE # only in thread-safe mode
+DEFINES += -DULTRA_SPARC
+DEFINES += -Wa,-xarch=v8plus,-DULTRA_SPARC
+else
+ASFLAGS += -xarch=v8plus -DULTRA_SPARC
+endif
+endif
+endif
+endif
+
+MKSHLIB = $(LD) -G
+
+# Use the editline library to provide line-editing support.
+JS_EDITLINE = 1
diff --git a/src/third_party/js-1.7/config/SunOS5.7.mk b/src/third_party/js-1.7/config/SunOS5.7.mk
new file mode 100644
index 00000000000..2cb02f29590
--- /dev/null
+++ b/src/third_party/js-1.7/config/SunOS5.7.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1999
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.7
+#
+
+include $(DEPTH)/config/SunOS5.5.mk
diff --git a/src/third_party/js-1.7/config/SunOS5.8.mk b/src/third_party/js-1.7/config/SunOS5.8.mk
new file mode 100644
index 00000000000..dd8a32d47b1
--- /dev/null
+++ b/src/third_party/js-1.7/config/SunOS5.8.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1999
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.8
+#
+
+include $(DEPTH)/config/SunOS5.5.mk
diff --git a/src/third_party/js-1.7/config/SunOS5.9.mk b/src/third_party/js-1.7/config/SunOS5.9.mk
new file mode 100644
index 00000000000..b01ec9c26fc
--- /dev/null
+++ b/src/third_party/js-1.7/config/SunOS5.9.mk
@@ -0,0 +1,44 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1999
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for SunOS5.9
+#
+
+include $(DEPTH)/config/SunOS5.5.mk
diff --git a/src/third_party/js-1.7/config/WINNT4.0.mk b/src/third_party/js-1.7/config/WINNT4.0.mk
new file mode 100644
index 00000000000..15a5a6fd2d0
--- /dev/null
+++ b/src/third_party/js-1.7/config/WINNT4.0.mk
@@ -0,0 +1,117 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Windows NT using MS Visual C++ (version?)
+#
+
+CC = cl
+
+RANLIB = echo
+
+PDBFILE = $(basename $(@F)).pdb
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = x86 # XXX fixme
+GFX_ARCH = win32
+
+# MSVC compiler options for both debug/optimize
+# -nologo - suppress copyright message
+# -W3 - Warning level 3
+# -Gm - enable minimal rebuild
+# -Z7 - put debug info into the executable, not in .pdb file
+# -Zi - put debug info into .pdb file
+# -YX - automatic precompiled headers
+# -GX - enable C++ exception support
+WIN_CFLAGS = -nologo -W3
+
+# MSVC compiler options for debug builds linked to MSVCRTD.DLL
+# -MDd - link with MSVCRTD.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_IDG_CFLAGS = -MDd -Od -Z7
+
+# MSVC compiler options for debug builds linked to MSVCRT.DLL
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_DEBUG_CFLAGS = -MD -Od -Zi -Fd$(OBJDIR)/$(PDBFILE)
+
+# MSVC compiler options for release (optimized) builds
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, C-runtime)
+# -O2 - Optimize for speed
+# -G5 - Optimize for Pentium
+WIN_OPT_CFLAGS = -MD -O2
+
+ifdef BUILD_OPT
+OPTIMIZER = $(WIN_OPT_CFLAGS)
+else
+ifdef BUILD_IDG
+OPTIMIZER = $(WIN_IDG_CFLAGS)
+else
+OPTIMIZER = $(WIN_DEBUG_CFLAGS)
+endif
+endif
+
+OS_CFLAGS = -D_X86_=1 -DXP_WIN -DXP_WIN32 -DWIN32 -D_WINDOWS -D_WIN32 $(WIN_CFLAGS)
+JSDLL_CFLAGS = -DEXPORT_JS_API
+OS_LIBS = -lm -lc
+
+PREBUILT_CPUCFG = 1
+USE_MSVC = 1
+
+LIB_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib \
+ winmm.lib \
+ -nologo\
+ -subsystem:windows -dll -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+EXE_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib -nologo\
+ -subsystem:console -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+# CAFEDIR = t:/cafe
+# JCLASSPATH = $(CAFEDIR)/Java/Lib/classes.zip
+# JAVAC = $(CAFEDIR)/Bin/sj.exe
+# JAVAH = $(CAFEDIR)/Java/Bin/javah.exe
+# JCFLAGS = -I$(CAFEDIR)/Java/Include -I$(CAFEDIR)/Java/Include/win32
diff --git a/src/third_party/js-1.7/config/WINNT5.0.mk b/src/third_party/js-1.7/config/WINNT5.0.mk
new file mode 100644
index 00000000000..2b796a4f193
--- /dev/null
+++ b/src/third_party/js-1.7/config/WINNT5.0.mk
@@ -0,0 +1,117 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Windows NT using MS Visual C++ (version?)
+#
+
+CC = cl
+
+RANLIB = echo
+
+PDBFILE = $(basename $(@F)).pdb
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = x86 # XXX fixme
+GFX_ARCH = win32
+
+# MSVC compiler options for both debug/optimize
+# -nologo - suppress copyright message
+# -W3 - Warning level 3
+# -Gm - enable minimal rebuild
+# -Z7 - put debug info into the executable, not in .pdb file
+# -Zi - put debug info into .pdb file
+# -YX - automatic precompiled headers
+# -GX - enable C++ exception support
+WIN_CFLAGS = -nologo -W3
+
+# MSVC compiler options for debug builds linked to MSVCRTD.DLL
+# -MDd - link with MSVCRTD.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_IDG_CFLAGS = -MDd -Od -Z7
+
+# MSVC compiler options for debug builds linked to MSVCRT.DLL
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_DEBUG_CFLAGS = -MD -Od -Zi -Fd$(OBJDIR)/$(PDBFILE)
+
+# MSVC compiler options for release (optimized) builds
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, C-runtime)
+# -O2 - Optimize for speed
+# -G5 - Optimize for Pentium
+WIN_OPT_CFLAGS = -MD -O2
+
+ifdef BUILD_OPT
+OPTIMIZER = $(WIN_OPT_CFLAGS)
+else
+ifdef BUILD_IDG
+OPTIMIZER = $(WIN_IDG_CFLAGS)
+else
+OPTIMIZER = $(WIN_DEBUG_CFLAGS)
+endif
+endif
+
+OS_CFLAGS = -D_X86_=1 -DXP_WIN -DXP_WIN32 -DWIN32 -D_WINDOWS -D_WIN32 -DWINVER=0x500 -D_WIN32_WINNT=0x500 $(WIN_CFLAGS)
+JSDLL_CFLAGS = -DEXPORT_JS_API
+OS_LIBS = -lm -lc
+
+PREBUILT_CPUCFG = 1
+USE_MSVC = 1
+
+LIB_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib \
+ winmm.lib \
+ -nologo\
+ -subsystem:windows -dll -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+EXE_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib -nologo\
+ -subsystem:console -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+# CAFEDIR = t:/cafe
+# JCLASSPATH = $(CAFEDIR)/Java/Lib/classes.zip
+# JAVAC = $(CAFEDIR)/Bin/sj.exe
+# JAVAH = $(CAFEDIR)/Java/Bin/javah.exe
+# JCFLAGS = -I$(CAFEDIR)/Java/Include -I$(CAFEDIR)/Java/Include/win32
diff --git a/src/third_party/js-1.7/config/WINNT5.1.mk b/src/third_party/js-1.7/config/WINNT5.1.mk
new file mode 100644
index 00000000000..2b796a4f193
--- /dev/null
+++ b/src/third_party/js-1.7/config/WINNT5.1.mk
@@ -0,0 +1,117 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Windows NT using MS Visual C++ (version?)
+#
+
+CC = cl
+
+RANLIB = echo
+
+PDBFILE = $(basename $(@F)).pdb
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = x86 # XXX fixme
+GFX_ARCH = win32
+
+# MSVC compiler options for both debug/optimize
+# -nologo - suppress copyright message
+# -W3 - Warning level 3
+# -Gm - enable minimal rebuild
+# -Z7 - put debug info into the executable, not in .pdb file
+# -Zi - put debug info into .pdb file
+# -YX - automatic precompiled headers
+# -GX - enable C++ exception support
+WIN_CFLAGS = -nologo -W3
+
+# MSVC compiler options for debug builds linked to MSVCRTD.DLL
+# -MDd - link with MSVCRTD.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_IDG_CFLAGS = -MDd -Od -Z7
+
+# MSVC compiler options for debug builds linked to MSVCRT.DLL
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_DEBUG_CFLAGS = -MD -Od -Zi -Fd$(OBJDIR)/$(PDBFILE)
+
+# MSVC compiler options for release (optimized) builds
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, C-runtime)
+# -O2 - Optimize for speed
+# -G5 - Optimize for Pentium
+WIN_OPT_CFLAGS = -MD -O2
+
+ifdef BUILD_OPT
+OPTIMIZER = $(WIN_OPT_CFLAGS)
+else
+ifdef BUILD_IDG
+OPTIMIZER = $(WIN_IDG_CFLAGS)
+else
+OPTIMIZER = $(WIN_DEBUG_CFLAGS)
+endif
+endif
+
+OS_CFLAGS = -D_X86_=1 -DXP_WIN -DXP_WIN32 -DWIN32 -D_WINDOWS -D_WIN32 -DWINVER=0x500 -D_WIN32_WINNT=0x500 $(WIN_CFLAGS)
+JSDLL_CFLAGS = -DEXPORT_JS_API
+OS_LIBS = -lm -lc
+
+PREBUILT_CPUCFG = 1
+USE_MSVC = 1
+
+LIB_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib \
+ winmm.lib \
+ -nologo\
+ -subsystem:windows -dll -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+EXE_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib -nologo\
+ -subsystem:console -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+# CAFEDIR = t:/cafe
+# JCLASSPATH = $(CAFEDIR)/Java/Lib/classes.zip
+# JAVAC = $(CAFEDIR)/Bin/sj.exe
+# JAVAH = $(CAFEDIR)/Java/Bin/javah.exe
+# JCFLAGS = -I$(CAFEDIR)/Java/Include -I$(CAFEDIR)/Java/Include/win32
diff --git a/src/third_party/js-1.7/config/WINNT5.2.mk b/src/third_party/js-1.7/config/WINNT5.2.mk
new file mode 100644
index 00000000000..2b796a4f193
--- /dev/null
+++ b/src/third_party/js-1.7/config/WINNT5.2.mk
@@ -0,0 +1,117 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config for Windows NT using MS Visual C++ (version?)
+#
+
+CC = cl
+
+RANLIB = echo
+
+PDBFILE = $(basename $(@F)).pdb
+
+#.c.o:
+# $(CC) -c -MD $*.d $(CFLAGS) $<
+
+CPU_ARCH = x86 # XXX fixme
+GFX_ARCH = win32
+
+# MSVC compiler options for both debug/optimize
+# -nologo - suppress copyright message
+# -W3 - Warning level 3
+# -Gm - enable minimal rebuild
+# -Z7 - put debug info into the executable, not in .pdb file
+# -Zi - put debug info into .pdb file
+# -YX - automatic precompiled headers
+# -GX - enable C++ exception support
+WIN_CFLAGS = -nologo -W3
+
+# MSVC compiler options for debug builds linked to MSVCRTD.DLL
+# -MDd - link with MSVCRTD.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_IDG_CFLAGS = -MDd -Od -Z7
+
+# MSVC compiler options for debug builds linked to MSVCRT.DLL
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, debug C-runtime)
+# -Od - minimal optimization
+WIN_DEBUG_CFLAGS = -MD -Od -Zi -Fd$(OBJDIR)/$(PDBFILE)
+
+# MSVC compiler options for release (optimized) builds
+# -MD - link with MSVCRT.LIB (Dynamically-linked, multi-threaded, C-runtime)
+# -O2 - Optimize for speed
+# -G5 - Optimize for Pentium
+WIN_OPT_CFLAGS = -MD -O2
+
+ifdef BUILD_OPT
+OPTIMIZER = $(WIN_OPT_CFLAGS)
+else
+ifdef BUILD_IDG
+OPTIMIZER = $(WIN_IDG_CFLAGS)
+else
+OPTIMIZER = $(WIN_DEBUG_CFLAGS)
+endif
+endif
+
+OS_CFLAGS = -D_X86_=1 -DXP_WIN -DXP_WIN32 -DWIN32 -D_WINDOWS -D_WIN32 -DWINVER=0x500 -D_WIN32_WINNT=0x500 $(WIN_CFLAGS)
+JSDLL_CFLAGS = -DEXPORT_JS_API
+OS_LIBS = -lm -lc
+
+PREBUILT_CPUCFG = 1
+USE_MSVC = 1
+
+LIB_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib \
+ winmm.lib \
+ -nologo\
+ -subsystem:windows -dll -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+EXE_LINK_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib oldnames.lib -nologo\
+ -subsystem:console -debug -pdb:$(OBJDIR)/$(PDBFILE)\
+ -machine:I386\
+ -opt:ref -opt:noicf
+
+# CAFEDIR = t:/cafe
+# JCLASSPATH = $(CAFEDIR)/Java/Lib/classes.zip
+# JAVAC = $(CAFEDIR)/Bin/sj.exe
+# JAVAH = $(CAFEDIR)/Java/Bin/javah.exe
+# JCFLAGS = -I$(CAFEDIR)/Java/Include -I$(CAFEDIR)/Java/Include/win32
diff --git a/src/third_party/js-1.7/config/dgux.mk b/src/third_party/js-1.7/config/dgux.mk
new file mode 100644
index 00000000000..3b5967e3d1f
--- /dev/null
+++ b/src/third_party/js-1.7/config/dgux.mk
@@ -0,0 +1,64 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either the GNU General Public License Version 2 or later (the "GPL"), or
+# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# Config stuff for Data General DG/UX
+#
+
+#
+# Initial DG/UX port by Marc Fraioli (fraioli@dg-rtp.dg.com)
+#
+
+AS = as
+CC = gcc
+CCC = g++
+
+RANLIB = echo
+
+#
+# _DGUX_SOURCE is needed to turn on a lot of stuff in the headers if
+# you're not using DG's compiler. It shouldn't hurt if you are.
+#
+# _POSIX4A_DRAFT10_SOURCE is needed to pick up localtime_r, used in
+# prtime.c
+#
+OS_CFLAGS = -DXP_UNIX -DSVR4 -DSYSV -DDGUX -D_DGUX_SOURCE -D_POSIX4A_DRAFT10_SOURCE -DHAVE_LOCALTIME_R
+OS_LIBS = -lsocket -lnsl
+
+NOSUCHFILE = /no-such-file
diff --git a/src/third_party/js-1.7/fdlibm/.cvsignore b/src/third_party/js-1.7/fdlibm/.cvsignore
new file mode 100644
index 00000000000..bb5cc66ee00
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/.cvsignore
@@ -0,0 +1,7 @@
+*.pdb
+*.ncb
+*.opt
+*.plg
+Debug
+Release
+Makefile
diff --git a/src/third_party/js-1.7/fdlibm/CVS/Entries b/src/third_party/js-1.7/fdlibm/CVS/Entries
new file mode 100644
index 00000000000..4c586387298
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/CVS/Entries
@@ -0,0 +1,87 @@
+/.cvsignore/1.3/Sat Dec 5 09:02:32 1998//TJS_170
+/Makefile.in/1.13/Sat Nov 15 00:11:04 2003//TJS_170
+/Makefile.ref/1.7/Sat Nov 15 00:11:04 2003//TJS_170
+/e_acos.c/1.9/Sat Nov 15 00:11:04 2003//TJS_170
+/e_acosh.c/1.8/Sat Nov 15 00:11:04 2003//TJS_170
+/e_asin.c/1.10/Sat Nov 15 00:11:04 2003//TJS_170
+/e_atan2.c/1.9/Sat Nov 15 00:11:04 2003//TJS_170
+/e_atanh.c/1.9/Sat Nov 15 00:11:04 2003//TJS_170
+/e_cosh.c/1.9/Sat Nov 15 00:11:04 2003//TJS_170
+/e_exp.c/1.10/Sat Nov 15 00:11:04 2003//TJS_170
+/e_fmod.c/1.8/Sat Nov 15 00:11:04 2003//TJS_170
+/e_gamma.c/1.7/Sat Nov 15 00:11:04 2003//TJS_170
+/e_gamma_r.c/1.7/Sat Nov 15 00:11:04 2003//TJS_170
+/e_hypot.c/1.8/Sat Nov 15 00:11:04 2003//TJS_170
+/e_j0.c/1.9/Sat Nov 15 00:11:05 2003//TJS_170
+/e_j1.c/1.9/Sat Nov 15 00:11:05 2003//TJS_170
+/e_jn.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/e_lgamma.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/e_lgamma_r.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/e_log.c/1.10/Sat Nov 15 00:11:05 2003//TJS_170
+/e_log10.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/e_pow.c/1.12/Sat Nov 15 00:11:05 2003//TJS_170
+/e_rem_pio2.c/1.9/Thu Jul 7 18:26:28 2005//TJS_170
+/e_remainder.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/e_scalb.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/e_sinh.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/e_sqrt.c/1.10/Sat Nov 15 00:11:05 2003//TJS_170
+/fdlibm.h/1.16.2.1/Mon Mar 27 05:55:15 2006//TJS_170
+/fdlibm.mak/1.3/Sun Apr 4 19:46:38 2004//TJS_170
+/fdlibm.mdp/1.3/Wed May 26 01:34:31 1999/-kb/TJS_170
+/k_cos.c/1.9/Thu Jul 7 18:26:28 2005//TJS_170
+/k_rem_pio2.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/k_sin.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/k_standard.c/1.12/Sat Nov 15 00:11:05 2003//TJS_170
+/k_tan.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_asinh.c/1.9/Sat Nov 15 00:11:05 2003//TJS_170
+/s_atan.c/1.9/Sat Nov 15 00:11:05 2003//TJS_170
+/s_cbrt.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_ceil.c/1.9/Sat Nov 15 00:11:05 2003//TJS_170
+/s_copysign.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_cos.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_erf.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_expm1.c/1.9/Sat Nov 15 00:11:05 2003//TJS_170
+/s_fabs.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_finite.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_floor.c/1.9/Sat Nov 15 00:11:05 2003//TJS_170
+/s_frexp.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_ilogb.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_isnan.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_ldexp.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/s_lib_version.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/s_log1p.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_logb.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_matherr.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/s_modf.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_nextafter.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_rint.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_scalbn.c/1.10/Sat Nov 15 00:11:05 2003//TJS_170
+/s_signgam.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/s_significand.c/1.7/Sat Nov 15 00:11:05 2003//TJS_170
+/s_sin.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_tan.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/s_tanh.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_acos.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_acosh.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_asin.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_atan2.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_atanh.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_cosh.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_exp.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_fmod.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_gamma.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_gamma_r.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_hypot.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_j0.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_j1.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_jn.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_lgamma.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_lgamma_r.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_log.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_log10.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_pow.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_remainder.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_scalb.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_sinh.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+/w_sqrt.c/1.8/Sat Nov 15 00:11:05 2003//TJS_170
+D
diff --git a/src/third_party/js-1.7/fdlibm/CVS/Repository b/src/third_party/js-1.7/fdlibm/CVS/Repository
new file mode 100644
index 00000000000..88fbd658913
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/CVS/Repository
@@ -0,0 +1 @@
+mozilla/js/src/fdlibm
diff --git a/src/third_party/js-1.7/fdlibm/CVS/Root b/src/third_party/js-1.7/fdlibm/CVS/Root
new file mode 100644
index 00000000000..cdb6f4a0739
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/CVS/Root
@@ -0,0 +1 @@
+:pserver:anonymous@cvs-mirror.mozilla.org:/cvsroot
diff --git a/src/third_party/js-1.7/fdlibm/CVS/Tag b/src/third_party/js-1.7/fdlibm/CVS/Tag
new file mode 100644
index 00000000000..2a8b15898d7
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/CVS/Tag
@@ -0,0 +1 @@
+NJS_170
diff --git a/src/third_party/js-1.7/fdlibm/Makefile.in b/src/third_party/js-1.7/fdlibm/Makefile.in
new file mode 100644
index 00000000000..fdec7b7e8e6
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/Makefile.in
@@ -0,0 +1,127 @@
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+DEPTH = ../../..
+topsrcdir = @top_srcdir@
+srcdir = @srcdir@
+VPATH = @srcdir@
+
+include $(DEPTH)/config/autoconf.mk
+
+MODULE = js
+LIBRARY_NAME = fdm
+
+CSRCS = \
+ e_acos.c \
+ e_asin.c \
+ e_atan2.c \
+ e_exp.c \
+ e_fmod.c \
+ e_log.c \
+ e_pow.c \
+ e_rem_pio2.c \
+ s_scalbn.c \
+ e_sqrt.c \
+ k_cos.c \
+ k_sin.c \
+ k_rem_pio2.c \
+ k_tan.c \
+ s_atan.c \
+ s_ceil.c \
+ s_copysign.c \
+ s_cos.c \
+ s_fabs.c \
+ s_finite.c \
+ s_floor.c \
+ s_isnan.c \
+ s_lib_version.c \
+ s_sin.c \
+ s_tan.c \
+ w_acos.c \
+ w_asin.c \
+ w_atan2.c \
+ w_exp.c \
+ w_fmod.c \
+ w_log.c \
+ w_pow.c \
+ w_sqrt.c \
+ $(NULL)
+
+EXPORTS = fdlibm.h
+
+# we need to force a static lib for the linking that js/src/Makefile.in wants
+# to do, and we don't really need a shared library ever, so:
+FORCE_STATIC_LIB = 1
+FORCE_USE_PIC = 1
+
+include $(topsrcdir)/config/rules.mk
+
+#
+# Default IEEE libm
+#
+CFLAGS += -D_IEEE_LIBM
+
+ifeq ($(OS_ARCH),Linux)
+LDFLAGS += -ldl
+endif
+
+ifeq ($(OS_ARCH),OSF1)
+LDFLAGS += -lc_r
+endif
+
+ifeq ($(OS_ARCH),SunOS)
+LDFLAGS += -lposix4 -ldl -lnsl -lsocket
+ifeq ($(CPU_ARCH),sparc)
+
+ifndef JS_NO_ULTRA
+ULTRA_OPTIONS := -xarch=v8plus,-DULTRA_SPARC
+ULTRA_OPTIONSCC := -DULTRA_SPARC
+else
+ULTRA_OPTIONS := -xarch=v8
+ULTRA_OPTIONSCC :=
+endif
+
+ifeq ($(shell uname -m),sun4u)
+ASFLAGS += -Wa,$(ULTRA_OPTIONS),-P,-L,-D_ASM,-D__STDC__=0 $(ULTRA_OPTIONSCC)
+else
+ASFLAGS += -Wa,-xarch=v8,-P,-L,-D_ASM,-D__STDC__=0
+endif
+
+endif
+endif
+
diff --git a/src/third_party/js-1.7/fdlibm/Makefile.ref b/src/third_party/js-1.7/fdlibm/Makefile.ref
new file mode 100644
index 00000000000..de378025c70
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/Makefile.ref
@@ -0,0 +1,192 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Sun Microsystems, Inc.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# @(#)Makefile 1.4 95/01/18
+#
+# ====================================================
+# Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+#
+# Developed at SunSoft, a Sun Microsystems, Inc. business.
+# Permission to use, copy, modify, and distribute this
+# software is freely granted, provided that this notice
+# is preserved.
+# ====================================================
+#
+#
+
+#
+# There are two options in making libm at fdlibm compile time:
+# _IEEE_LIBM --- IEEE libm; smaller, and somewhat faster
+# _MULTI_LIBM --- Support multi-standard at runtime by
+# imposing wrapper functions defined in
+# fdlibm.h:
+# _IEEE_MODE -- IEEE
+# _XOPEN_MODE -- X/OPEN
+# _POSIX_MODE -- POSIX/ANSI
+# _SVID3_MODE -- SVID
+#
+# Here is how to set up CFLAGS to create the desired libm at
+# compile time:
+#
+# CFLAGS = -D_IEEE_LIBM ... IEEE libm (recommended)
+# CFLAGS = -D_SVID3_MODE ... Multi-standard supported
+# libm with SVID as the
+# default standard
+# CFLAGS = -D_XOPEN_MODE ... Multi-standard supported
+# libm with XOPEN as the
+# default standard
+# CFLAGS = -D_POSIX_MODE ... Multi-standard supported
+# libm with POSIX as the
+# default standard
+# CFLAGS = ... Multi-standard supported
+# libm with IEEE as the
+# default standard
+#
+# NOTE: if scalb's second arguement is an int, then one must
+# define _SCALB_INT in CFLAGS. The default prototype of scalb
+# is double scalb(double, double)
+#
+
+DEPTH = ..
+
+include $(DEPTH)/config.mk
+
+#
+# Default IEEE libm
+#
+CFLAGS += -DXP_UNIX $(OPTIMIZER) $(OS_CFLAGS) $(DEFINES) $(INCLUDES) \
+ -DJSFILE $(XCFLAGS) -D_IEEE_LIBM
+
+# Need for jstypes.h and friends
+INCLUDES += -I..
+INCLUDES += -I../$(OBJDIR)
+
+#CC = cc
+
+INCFILES = fdlibm.h
+.INIT: $(INCFILES)
+.KEEP_STATE:
+FDLIBM_CFILES = \
+ k_standard.c k_rem_pio2.c \
+ k_cos.c k_sin.c k_tan.c \
+ e_acos.c e_acosh.c e_asin.c e_atan2.c \
+ e_atanh.c e_cosh.c e_exp.c e_fmod.c \
+ e_gamma.c e_gamma_r.c e_hypot.c e_j0.c \
+ e_j1.c e_jn.c e_lgamma.c e_lgamma_r.c \
+ e_log.c e_log10.c e_pow.c e_rem_pio2.c e_remainder.c \
+ e_scalb.c e_sinh.c e_sqrt.c \
+ w_acos.c w_acosh.c w_asin.c w_atan2.c \
+ w_atanh.c w_cosh.c w_exp.c w_fmod.c \
+ w_gamma.c w_gamma_r.c w_hypot.c w_j0.c \
+ w_j1.c w_jn.c w_lgamma.c w_lgamma_r.c \
+ w_log.c w_log10.c w_pow.c w_remainder.c \
+ w_scalb.c w_sinh.c w_sqrt.c \
+ s_asinh.c s_atan.c s_cbrt.c s_ceil.c s_copysign.c \
+ s_cos.c s_erf.c s_expm1.c s_fabs.c s_finite.c s_floor.c \
+ s_frexp.c s_ilogb.c s_isnan.c s_ldexp.c s_lib_version.c \
+ s_log1p.c s_logb.c s_matherr.c s_modf.c s_nextafter.c \
+ s_rint.c s_scalbn.c s_signgam.c s_significand.c s_sin.c \
+ s_tan.c s_tanh.c
+
+ifdef USE_MSVC
+FDLIBM_OBJS = $(addprefix $(OBJDIR)/, $(FDLIBM_CFILES:.c=.obj))
+else
+FDLIBM_OBJS = $(addprefix $(OBJDIR)/, $(FDLIBM_CFILES:.c=.o))
+endif
+
+ifdef USE_MSVC
+LIBRARY = $(OBJDIR)/fdlibm.lib
+else
+LIBRARY = $(OBJDIR)/libfdm.a
+endif
+
+define MAKE_OBJDIR
+if test ! -d $(@D); then rm -rf $(@D); mkdir -p $(@D); fi
+endef
+
+all: $(LIBRARY)
+
+export:
+
+$(OBJDIR)/%: %.c
+ @$(MAKE_OBJDIR)
+ $(CC) -o $@ $(CFLAGS) $*.c $(LDFLAGS)
+
+$(OBJDIR)/%.o: %.c
+ @$(MAKE_OBJDIR)
+ $(CC) -o $@ -c $(CFLAGS) $*.c
+
+$(OBJDIR)/%.o: %.s
+ @$(MAKE_OBJDIR)
+ $(AS) -o $@ $(ASFLAGS) $*.s
+
+# windows only
+$(OBJDIR)/%.obj: %.c
+ @$(MAKE_OBJDIR)
+ $(CC) -Fo$(OBJDIR)/ -c $(CFLAGS) $*.c
+
+ifeq ($(OS_ARCH),OS2)
+$(LIBRARY): $(FDLIBM_OBJS)
+ $(AR) $@ $? $(AR_OS2_SUFFIX)
+ $(RANLIB) $@
+else
+ifdef USE_MSVC
+$(LIBRARY): $(FDLIBM_OBJS)
+ lib.exe /out:"$@" $?
+else
+$(LIBRARY): $(FDLIBM_OBJS)
+ $(AR) rv $@ $?
+ $(RANLIB) $@
+endif
+endif
+
+libfdm.a : $(FDLIBM_OBJS)
+ $(AR) cru $(OBJDIR)/libfdm.a $(FDLIBM_OBJS)
+ $(RANLIB) $(OBJDIR)/libfdm.a
+
+clean:
+ rm -rf $(FDLIBM_OBJS)
+
+clobber:
+ rm -rf $(FDLIBM_OBJS) $(LIBRARY) $(DEPENDENCIES)
+
+SUFFIXES: .i
+%.i: %.c
+ $(CC) -C -E $(CFLAGS) $< > $*.i
diff --git a/src/third_party/js-1.7/fdlibm/e_acos.c b/src/third_party/js-1.7/fdlibm/e_acos.c
new file mode 100644
index 00000000000..a07c1eebc77
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_acos.c
@@ -0,0 +1,147 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_acos.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_acos(x)
+ * Method :
+ * acos(x) = pi/2 - asin(x)
+ * acos(-x) = pi/2 + asin(x)
+ * For |x|<=0.5
+ * acos(x) = pi/2 - (x + x*x^2*R(x^2)) (see asin.c)
+ * For x>0.5
+ * acos(x) = pi/2 - (pi/2 - 2asin(sqrt((1-x)/2)))
+ * = 2asin(sqrt((1-x)/2))
+ * = 2s + 2s*z*R(z) ...z=(1-x)/2, s=sqrt(z)
+ * = 2f + (2c + 2s*z*R(z))
+ * where f=hi part of s, and c = (z-f*f)/(s+f) is the correction term
+ * for f so that f+c ~ sqrt(z).
+ * For x<-0.5
+ * acos(x) = pi - 2asin(sqrt((1-|x|)/2))
+ * = pi - 0.5*(s+s*z*R(z)), where z=(1-|x|)/2,s=sqrt(z)
+ *
+ * Special cases:
+ * if x is NaN, return x itself;
+ * if |x|>1, return NaN with invalid signal.
+ *
+ * Function needed: sqrt
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one= 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+pi = 3.14159265358979311600e+00, /* 0x400921FB, 0x54442D18 */
+pio2_hi = 1.57079632679489655800e+00, /* 0x3FF921FB, 0x54442D18 */
+pio2_lo = 6.12323399573676603587e-17, /* 0x3C91A626, 0x33145C07 */
+pS0 = 1.66666666666666657415e-01, /* 0x3FC55555, 0x55555555 */
+pS1 = -3.25565818622400915405e-01, /* 0xBFD4D612, 0x03EB6F7D */
+pS2 = 2.01212532134862925881e-01, /* 0x3FC9C155, 0x0E884455 */
+pS3 = -4.00555345006794114027e-02, /* 0xBFA48228, 0xB5688F3B */
+pS4 = 7.91534994289814532176e-04, /* 0x3F49EFE0, 0x7501B288 */
+pS5 = 3.47933107596021167570e-05, /* 0x3F023DE1, 0x0DFDF709 */
+qS1 = -2.40339491173441421878e+00, /* 0xC0033A27, 0x1C8A2D4B */
+qS2 = 2.02094576023350569471e+00, /* 0x40002AE5, 0x9C598AC8 */
+qS3 = -6.88283971605453293030e-01, /* 0xBFE6066C, 0x1B8D0159 */
+qS4 = 7.70381505559019352791e-02; /* 0x3FB3B8C5, 0xB12E9282 */
+
+#ifdef __STDC__
+ double __ieee754_acos(double x)
+#else
+ double __ieee754_acos(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double df;
+ double z,p,q,r,w,s,c;
+ int hx,ix;
+ u.d = x;
+ hx = __HI(u);
+ ix = hx&0x7fffffff;
+ if(ix>=0x3ff00000) { /* |x| >= 1 */
+ if(((ix-0x3ff00000)|__LO(u))==0) { /* |x|==1 */
+ if(hx>0) return 0.0; /* acos(1) = 0 */
+ else return pi+2.0*pio2_lo; /* acos(-1)= pi */
+ }
+ return (x-x)/(x-x); /* acos(|x|>1) is NaN */
+ }
+ if(ix<0x3fe00000) { /* |x| < 0.5 */
+ if(ix<=0x3c600000) return pio2_hi+pio2_lo;/*if|x|<2**-57*/
+ z = x*x;
+ p = z*(pS0+z*(pS1+z*(pS2+z*(pS3+z*(pS4+z*pS5)))));
+ q = one+z*(qS1+z*(qS2+z*(qS3+z*qS4)));
+ r = p/q;
+ return pio2_hi - (x - (pio2_lo-x*r));
+ } else if (hx<0) { /* x < -0.5 */
+ z = (one+x)*0.5;
+ p = z*(pS0+z*(pS1+z*(pS2+z*(pS3+z*(pS4+z*pS5)))));
+ q = one+z*(qS1+z*(qS2+z*(qS3+z*qS4)));
+ s = fd_sqrt(z);
+ r = p/q;
+ w = r*s-pio2_lo;
+ return pi - 2.0*(s+w);
+ } else { /* x > 0.5 */
+ z = (one-x)*0.5;
+ s = fd_sqrt(z);
+ u.d = s;
+ __LO(u) = 0;
+ df = u.d;
+ c = (z-df*df)/(s+df);
+ p = z*(pS0+z*(pS1+z*(pS2+z*(pS3+z*(pS4+z*pS5)))));
+ q = one+z*(qS1+z*(qS2+z*(qS3+z*qS4)));
+ r = p/q;
+ w = r*s+c;
+ return 2.0*(df+w);
+ }
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_acosh.c b/src/third_party/js-1.7/fdlibm/e_acosh.c
new file mode 100644
index 00000000000..725cceefb42
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_acosh.c
@@ -0,0 +1,105 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_acosh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_acosh(x)
+ * Method :
+ * Based on
+ * acosh(x) = log [ x + sqrt(x*x-1) ]
+ * we have
+ * acosh(x) := log(x)+ln2, if x is large; else
+ * acosh(x) := log(2x-1/(sqrt(x*x-1)+x)) if x>2; else
+ * acosh(x) := log1p(t+sqrt(2.0*t+t*t)); where t=x-1.
+ *
+ * Special cases:
+ * acosh(x) is NaN with signal if x<1.
+ * acosh(NaN) is NaN without signal.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one = 1.0,
+ln2 = 6.93147180559945286227e-01; /* 0x3FE62E42, 0xFEFA39EF */
+
+#ifdef __STDC__
+ double __ieee754_acosh(double x)
+#else
+ double __ieee754_acosh(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double t;
+ int hx;
+ u.d = x;
+ hx = __HI(u);
+ if(hx<0x3ff00000) { /* x < 1 */
+ return (x-x)/(x-x);
+ } else if(hx >=0x41b00000) { /* x > 2**28 */
+ if(hx >=0x7ff00000) { /* x is inf of NaN */
+ return x+x;
+ } else
+ return __ieee754_log(x)+ln2; /* acosh(huge)=log(2x) */
+ } else if(((hx-0x3ff00000)|__LO(u))==0) {
+ return 0.0; /* acosh(1) = 0 */
+ } else if (hx > 0x40000000) { /* 2**28 > x > 2 */
+ t=x*x;
+ return __ieee754_log(2.0*x-one/(x+fd_sqrt(t-one)));
+ } else { /* 1<x<2 */
+ t = x-one;
+ return fd_log1p(t+fd_sqrt(2.0*t+t*t));
+ }
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_asin.c b/src/third_party/js-1.7/fdlibm/e_asin.c
new file mode 100644
index 00000000000..624c4d2ce61
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_asin.c
@@ -0,0 +1,156 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_asin.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_asin(x)
+ * Method :
+ * Since asin(x) = x + x^3/6 + x^5*3/40 + x^7*15/336 + ...
+ * we approximate asin(x) on [0,0.5] by
+ * asin(x) = x + x*x^2*R(x^2)
+ * where
+ * R(x^2) is a rational approximation of (asin(x)-x)/x^3
+ * and its remez error is bounded by
+ * |(asin(x)-x)/x^3 - R(x^2)| < 2^(-58.75)
+ *
+ * For x in [0.5,1]
+ * asin(x) = pi/2-2*asin(sqrt((1-x)/2))
+ * Let y = (1-x), z = y/2, s := sqrt(z), and pio2_hi+pio2_lo=pi/2;
+ * then for x>0.98
+ * asin(x) = pi/2 - 2*(s+s*z*R(z))
+ * = pio2_hi - (2*(s+s*z*R(z)) - pio2_lo)
+ * For x<=0.98, let pio4_hi = pio2_hi/2, then
+ * f = hi part of s;
+ * c = sqrt(z) - f = (z-f*f)/(s+f) ...f+c=sqrt(z)
+ * and
+ * asin(x) = pi/2 - 2*(s+s*z*R(z))
+ * = pio4_hi+(pio4-2s)-(2s*z*R(z)-pio2_lo)
+ * = pio4_hi+(pio4-2f)-(2s*z*R(z)-(pio2_lo+2c))
+ *
+ * Special cases:
+ * if x is NaN, return x itself;
+ * if |x|>1, return NaN with invalid signal.
+ *
+ */
+
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+really_big = 1.000e+300,
+pio2_hi = 1.57079632679489655800e+00, /* 0x3FF921FB, 0x54442D18 */
+pio2_lo = 6.12323399573676603587e-17, /* 0x3C91A626, 0x33145C07 */
+pio4_hi = 7.85398163397448278999e-01, /* 0x3FE921FB, 0x54442D18 */
+ /* coefficient for R(x^2) */
+pS0 = 1.66666666666666657415e-01, /* 0x3FC55555, 0x55555555 */
+pS1 = -3.25565818622400915405e-01, /* 0xBFD4D612, 0x03EB6F7D */
+pS2 = 2.01212532134862925881e-01, /* 0x3FC9C155, 0x0E884455 */
+pS3 = -4.00555345006794114027e-02, /* 0xBFA48228, 0xB5688F3B */
+pS4 = 7.91534994289814532176e-04, /* 0x3F49EFE0, 0x7501B288 */
+pS5 = 3.47933107596021167570e-05, /* 0x3F023DE1, 0x0DFDF709 */
+qS1 = -2.40339491173441421878e+00, /* 0xC0033A27, 0x1C8A2D4B */
+qS2 = 2.02094576023350569471e+00, /* 0x40002AE5, 0x9C598AC8 */
+qS3 = -6.88283971605453293030e-01, /* 0xBFE6066C, 0x1B8D0159 */
+qS4 = 7.70381505559019352791e-02; /* 0x3FB3B8C5, 0xB12E9282 */
+
+#ifdef __STDC__
+ double __ieee754_asin(double x)
+#else
+ double __ieee754_asin(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double w,t,p,q,c,r,s;
+ int hx,ix;
+ u.d = x;
+ hx = __HI(u);
+ x = u.d;
+ ix = hx&0x7fffffff;
+ if(ix>= 0x3ff00000) { /* |x|>= 1 */
+ if(((ix-0x3ff00000)|__LO(u))==0)
+ /* asin(1)=+-pi/2 with inexact */
+ return x*pio2_hi+x*pio2_lo;
+ return (x-x)/(x-x); /* asin(|x|>1) is NaN */
+ } else if (ix<0x3fe00000) { /* |x|<0.5 */
+ if(ix<0x3e400000) { /* if |x| < 2**-27 */
+ if(really_big+x>one) return x;/* return x with inexact if x!=0*/
+ } else
+ t = x*x;
+ p = t*(pS0+t*(pS1+t*(pS2+t*(pS3+t*(pS4+t*pS5)))));
+ q = one+t*(qS1+t*(qS2+t*(qS3+t*qS4)));
+ w = p/q;
+ return x+x*w;
+ }
+ /* 1> |x|>= 0.5 */
+ w = one-fd_fabs(x);
+ t = w*0.5;
+ p = t*(pS0+t*(pS1+t*(pS2+t*(pS3+t*(pS4+t*pS5)))));
+ q = one+t*(qS1+t*(qS2+t*(qS3+t*qS4)));
+ s = fd_sqrt(t);
+ if(ix>=0x3FEF3333) { /* if |x| > 0.975 */
+ w = p/q;
+ t = pio2_hi-(2.0*(s+s*w)-pio2_lo);
+ } else {
+ u.d = s;
+ __LO(u) = 0;
+ w = u.d;
+ c = (t-w*w)/(s+w);
+ r = p/q;
+ p = 2.0*s*r-(pio2_lo-2.0*c);
+ q = pio4_hi-2.0*w;
+ t = pio4_hi-(p-q);
+ }
+ if(hx>0) return t; else return -t;
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_atan2.c b/src/third_party/js-1.7/fdlibm/e_atan2.c
new file mode 100644
index 00000000000..9c9a2c01f33
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_atan2.c
@@ -0,0 +1,165 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_atan2.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_atan2(y,x)
+ * Method :
+ * 1. Reduce y to positive by atan2(y,x)=-atan2(-y,x).
+ * 2. Reduce x to positive by (if x and y are unexceptional):
+ * ARG (x+iy) = arctan(y/x) ... if x > 0,
+ * ARG (x+iy) = pi - arctan[y/(-x)] ... if x < 0,
+ *
+ * Special cases:
+ *
+ * ATAN2((anything), NaN ) is NaN;
+ * ATAN2(NAN , (anything) ) is NaN;
+ * ATAN2(+-0, +(anything but NaN)) is +-0 ;
+ * ATAN2(+-0, -(anything but NaN)) is +-pi ;
+ * ATAN2(+-(anything but 0 and NaN), 0) is +-pi/2;
+ * ATAN2(+-(anything but INF and NaN), +INF) is +-0 ;
+ * ATAN2(+-(anything but INF and NaN), -INF) is +-pi;
+ * ATAN2(+-INF,+INF ) is +-pi/4 ;
+ * ATAN2(+-INF,-INF ) is +-3pi/4;
+ * ATAN2(+-INF, (anything but,0,NaN, and INF)) is +-pi/2;
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+tiny = 1.0e-300,
+zero = 0.0,
+pi_o_4 = 7.8539816339744827900E-01, /* 0x3FE921FB, 0x54442D18 */
+pi_o_2 = 1.5707963267948965580E+00, /* 0x3FF921FB, 0x54442D18 */
+pi = 3.1415926535897931160E+00, /* 0x400921FB, 0x54442D18 */
+pi_lo = 1.2246467991473531772E-16; /* 0x3CA1A626, 0x33145C07 */
+
+#ifdef __STDC__
+ double __ieee754_atan2(double y, double x)
+#else
+ double __ieee754_atan2(y,x)
+ double y,x;
+#endif
+{
+ fd_twoints ux, uy, uz;
+ double z;
+ int k,m,hx,hy,ix,iy;
+ unsigned lx,ly;
+
+ ux.d = x; uy.d = y;
+ hx = __HI(ux); ix = hx&0x7fffffff;
+ lx = __LO(ux);
+ hy = __HI(uy); iy = hy&0x7fffffff;
+ ly = __LO(uy);
+ if(((ix|((lx|-(int)lx)>>31))>0x7ff00000)||
+ ((iy|((ly|-(int)ly)>>31))>0x7ff00000)) /* x or y is NaN */
+ return x+y;
+ if(((hx-0x3ff00000)|lx)==0) return fd_atan(y); /* x=1.0 */
+ m = ((hy>>31)&1)|((hx>>30)&2); /* 2*sign(x)+sign(y) */
+
+ /* when y = 0 */
+ if((iy|ly)==0) {
+ switch(m) {
+ case 0:
+ case 1: return y; /* atan(+-0,+anything)=+-0 */
+ case 2: return pi+tiny;/* atan(+0,-anything) = pi */
+ case 3: return -pi-tiny;/* atan(-0,-anything) =-pi */
+ }
+ }
+ /* when x = 0 */
+ if((ix|lx)==0) return (hy<0)? -pi_o_2-tiny: pi_o_2+tiny;
+
+ /* when x is INF */
+ if(ix==0x7ff00000) {
+ if(iy==0x7ff00000) {
+ switch(m) {
+ case 0: return pi_o_4+tiny;/* atan(+INF,+INF) */
+ case 1: return -pi_o_4-tiny;/* atan(-INF,+INF) */
+ case 2: return 3.0*pi_o_4+tiny;/*atan(+INF,-INF)*/
+ case 3: return -3.0*pi_o_4-tiny;/*atan(-INF,-INF)*/
+ }
+ } else {
+ switch(m) {
+ case 0: return zero ; /* atan(+...,+INF) */
+ case 1: return -zero ; /* atan(-...,+INF) */
+ case 2: return pi+tiny ; /* atan(+...,-INF) */
+ case 3: return -pi-tiny ; /* atan(-...,-INF) */
+ }
+ }
+ }
+ /* when y is INF */
+ if(iy==0x7ff00000) return (hy<0)? -pi_o_2-tiny: pi_o_2+tiny;
+
+ /* compute y/x */
+ k = (iy-ix)>>20;
+ if(k > 60) z=pi_o_2+0.5*pi_lo; /* |y/x| > 2**60 */
+ else if(hx<0&&k<-60) z=0.0; /* |y|/x < -2**60 */
+ else z=fd_atan(fd_fabs(y/x)); /* safe to do y/x */
+ switch (m) {
+ case 0: return z ; /* atan(+,+) */
+ case 1: uz.d = z;
+ __HI(uz) ^= 0x80000000;
+ z = uz.d;
+ return z ; /* atan(-,+) */
+ case 2: return pi-(z-pi_lo);/* atan(+,-) */
+ default: /* case 3 */
+ return (z-pi_lo)-pi;/* atan(-,-) */
+ }
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_atanh.c b/src/third_party/js-1.7/fdlibm/e_atanh.c
new file mode 100644
index 00000000000..dc4a90c8e03
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_atanh.c
@@ -0,0 +1,110 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_atanh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_atanh(x)
+ * Method :
+ * 1.Reduced x to positive by atanh(-x) = -atanh(x)
+ * 2.For x>=0.5
+ * 1 2x x
+ * atanh(x) = --- * log(1 + -------) = 0.5 * log1p(2 * --------)
+ * 2 1 - x 1 - x
+ *
+ * For x<0.5
+ * atanh(x) = 0.5*log1p(2x+2x*x/(1-x))
+ *
+ * Special cases:
+ * atanh(x) is NaN if |x| > 1 with signal;
+ * atanh(NaN) is that NaN with no signal;
+ * atanh(+-1) is +-INF with signal.
+ *
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double one = 1.0, really_big = 1e300;
+#else
+static double one = 1.0, really_big = 1e300;
+#endif
+
+static double zero = 0.0;
+
+#ifdef __STDC__
+ double __ieee754_atanh(double x)
+#else
+ double __ieee754_atanh(x)
+ double x;
+#endif
+{
+ double t;
+ int hx,ix;
+ unsigned lx;
+ fd_twoints u;
+ u.d = x;
+ hx = __HI(u); /* high word */
+ lx = __LO(u); /* low word */
+ ix = hx&0x7fffffff;
+ if ((ix|((lx|(-(int)lx))>>31))>0x3ff00000) /* |x|>1 */
+ return (x-x)/(x-x);
+ if(ix==0x3ff00000)
+ return x/zero;
+ if(ix<0x3e300000&&(really_big+x)>zero) return x; /* x<2**-28 */
+ u.d = x;
+ __HI(u) = ix; /* x <- |x| */
+ x = u.d;
+ if(ix<0x3fe00000) { /* x < 0.5 */
+ t = x+x;
+ t = 0.5*fd_log1p(t+t*x/(one-x));
+ } else
+ t = 0.5*fd_log1p((x+x)/(one-x));
+ if(hx>=0) return t; else return -t;
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_cosh.c b/src/third_party/js-1.7/fdlibm/e_cosh.c
new file mode 100644
index 00000000000..4f8d4f7697d
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_cosh.c
@@ -0,0 +1,133 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_cosh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_cosh(x)
+ * Method :
+ * mathematically cosh(x) if defined to be (exp(x)+exp(-x))/2
+ * 1. Replace x by |x| (cosh(x) = cosh(-x)).
+ * 2.
+ * [ exp(x) - 1 ]^2
+ * 0 <= x <= ln2/2 : cosh(x) := 1 + -------------------
+ * 2*exp(x)
+ *
+ * exp(x) + 1/exp(x)
+ * ln2/2 <= x <= 22 : cosh(x) := -------------------
+ * 2
+ * 22 <= x <= lnovft : cosh(x) := exp(x)/2
+ * lnovft <= x <= ln2ovft: cosh(x) := exp(x/2)/2 * exp(x/2)
+ * ln2ovft < x : cosh(x) := huge*huge (overflow)
+ *
+ * Special cases:
+ * cosh(x) is |x| if x is +INF, -INF, or NaN.
+ * only cosh(0)=1 is exact for finite x.
+ */
+
+#include "fdlibm.h"
+
+#ifdef _WIN32
+#define huge myhuge
+#endif
+
+#ifdef __STDC__
+static const double one = 1.0, half=0.5, really_big = 1.0e300;
+#else
+static double one = 1.0, half=0.5, really_big = 1.0e300;
+#endif
+
+#ifdef __STDC__
+ double __ieee754_cosh(double x)
+#else
+ double __ieee754_cosh(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double t,w;
+ int ix;
+ unsigned lx;
+
+ /* High word of |x|. */
+ u.d = x;
+ ix = __HI(u);
+ ix &= 0x7fffffff;
+
+ /* x is INF or NaN */
+ if(ix>=0x7ff00000) return x*x;
+
+ /* |x| in [0,0.5*ln2], return 1+expm1(|x|)^2/(2*exp(|x|)) */
+ if(ix<0x3fd62e43) {
+ t = fd_expm1(fd_fabs(x));
+ w = one+t;
+ if (ix<0x3c800000) return w; /* cosh(tiny) = 1 */
+ return one+(t*t)/(w+w);
+ }
+
+ /* |x| in [0.5*ln2,22], return (exp(|x|)+1/exp(|x|)/2; */
+ if (ix < 0x40360000) {
+ t = __ieee754_exp(fd_fabs(x));
+ return half*t+half/t;
+ }
+
+ /* |x| in [22, log(maxdouble)] return half*exp(|x|) */
+ if (ix < 0x40862E42) return half*__ieee754_exp(fd_fabs(x));
+
+ /* |x| in [log(maxdouble), overflowthresold] */
+ lx = *( (((*(unsigned*)&one)>>29)) + (unsigned*)&x);
+ if (ix<0x408633CE ||
+ (ix==0x408633ce)&&(lx<=(unsigned)0x8fb9f87d)) {
+ w = __ieee754_exp(half*fd_fabs(x));
+ t = half*w;
+ return t*w;
+ }
+
+ /* |x| > overflowthresold, cosh(x) overflow */
+ return really_big*really_big;
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_exp.c b/src/third_party/js-1.7/fdlibm/e_exp.c
new file mode 100644
index 00000000000..ad9cec12419
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_exp.c
@@ -0,0 +1,202 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_exp.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_exp(x)
+ * Returns the exponential of x.
+ *
+ * Method
+ * 1. Argument reduction:
+ * Reduce x to an r so that |r| <= 0.5*ln2 ~ 0.34658.
+ * Given x, find r and integer k such that
+ *
+ * x = k*ln2 + r, |r| <= 0.5*ln2.
+ *
+ * Here r will be represented as r = hi-lo for better
+ * accuracy.
+ *
+ * 2. Approximation of exp(r) by a special rational function on
+ * the interval [0,0.34658]:
+ * Write
+ * R(r**2) = r*(exp(r)+1)/(exp(r)-1) = 2 + r*r/6 - r**4/360 + ...
+ * We use a special Reme algorithm on [0,0.34658] to generate
+ * a polynomial of degree 5 to approximate R. The maximum error
+ * of this polynomial approximation is bounded by 2**-59. In
+ * other words,
+ * R(z) ~ 2.0 + P1*z + P2*z**2 + P3*z**3 + P4*z**4 + P5*z**5
+ * (where z=r*r, and the values of P1 to P5 are listed below)
+ * and
+ * | 5 | -59
+ * | 2.0+P1*z+...+P5*z - R(z) | <= 2
+ * | |
+ * The computation of exp(r) thus becomes
+ * 2*r
+ * exp(r) = 1 + -------
+ * R - r
+ * r*R1(r)
+ * = 1 + r + ----------- (for better accuracy)
+ * 2 - R1(r)
+ * where
+ * 2 4 10
+ * R1(r) = r - (P1*r + P2*r + ... + P5*r ).
+ *
+ * 3. Scale back to obtain exp(x):
+ * From step 1, we have
+ * exp(x) = 2^k * exp(r)
+ *
+ * Special cases:
+ * exp(INF) is INF, exp(NaN) is NaN;
+ * exp(-INF) is 0, and
+ * for finite argument, only exp(0)=1 is exact.
+ *
+ * Accuracy:
+ * according to an error analysis, the error is always less than
+ * 1 ulp (unit in the last place).
+ *
+ * Misc. info.
+ * For IEEE double
+ * if x > 7.09782712893383973096e+02 then exp(x) overflow
+ * if x < -7.45133219101941108420e+02 then exp(x) underflow
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one = 1.0,
+halF[2] = {0.5,-0.5,},
+really_big = 1.0e+300,
+twom1000= 9.33263618503218878990e-302, /* 2**-1000=0x01700000,0*/
+o_threshold= 7.09782712893383973096e+02, /* 0x40862E42, 0xFEFA39EF */
+u_threshold= -7.45133219101941108420e+02, /* 0xc0874910, 0xD52D3051 */
+ln2HI[2] ={ 6.93147180369123816490e-01, /* 0x3fe62e42, 0xfee00000 */
+ -6.93147180369123816490e-01,},/* 0xbfe62e42, 0xfee00000 */
+ln2LO[2] ={ 1.90821492927058770002e-10, /* 0x3dea39ef, 0x35793c76 */
+ -1.90821492927058770002e-10,},/* 0xbdea39ef, 0x35793c76 */
+invln2 = 1.44269504088896338700e+00, /* 0x3ff71547, 0x652b82fe */
+P1 = 1.66666666666666019037e-01, /* 0x3FC55555, 0x5555553E */
+P2 = -2.77777777770155933842e-03, /* 0xBF66C16C, 0x16BEBD93 */
+P3 = 6.61375632143793436117e-05, /* 0x3F11566A, 0xAF25DE2C */
+P4 = -1.65339022054652515390e-06, /* 0xBEBBBD41, 0xC5D26BF1 */
+P5 = 4.13813679705723846039e-08; /* 0x3E663769, 0x72BEA4D0 */
+
+
+#ifdef __STDC__
+ double __ieee754_exp(double x) /* default IEEE double exp */
+#else
+ double __ieee754_exp(x) /* default IEEE double exp */
+ double x;
+#endif
+{
+ fd_twoints u;
+ double y,hi,lo,c,t;
+ int k, xsb;
+ unsigned hx;
+
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ xsb = (hx>>31)&1; /* sign bit of x */
+ hx &= 0x7fffffff; /* high word of |x| */
+
+ /* filter out non-finite argument */
+ if(hx >= 0x40862E42) { /* if |x|>=709.78... */
+ if(hx>=0x7ff00000) {
+ u.d = x;
+ if(((hx&0xfffff)|__LO(u))!=0)
+ return x+x; /* NaN */
+ else return (xsb==0)? x:0.0; /* exp(+-inf)={inf,0} */
+ }
+ if(x > o_threshold) return really_big*really_big; /* overflow */
+ if(x < u_threshold) return twom1000*twom1000; /* underflow */
+ }
+
+ /* argument reduction */
+ if(hx > 0x3fd62e42) { /* if |x| > 0.5 ln2 */
+ if(hx < 0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
+ hi = x-ln2HI[xsb]; lo=ln2LO[xsb]; k = 1-xsb-xsb;
+ } else {
+ k = (int)(invln2*x+halF[xsb]);
+ t = k;
+ hi = x - t*ln2HI[0]; /* t*ln2HI is exact here */
+ lo = t*ln2LO[0];
+ }
+ x = hi - lo;
+ }
+ else if(hx < 0x3e300000) { /* when |x|<2**-28 */
+ if(really_big+x>one) return one+x;/* trigger inexact */
+ }
+ else k = 0;
+
+ /* x is now in primary range */
+ t = x*x;
+ c = x - t*(P1+t*(P2+t*(P3+t*(P4+t*P5))));
+ if(k==0) return one-((x*c)/(c-2.0)-x);
+ else y = one-((lo-(x*c)/(2.0-c))-hi);
+ if(k >= -1021) {
+ u.d = y;
+ __HI(u) += (k<<20); /* add k to y's exponent */
+ y = u.d;
+ return y;
+ } else {
+ u.d = y;
+ __HI(u) += ((k+1000)<<20);/* add k to y's exponent */
+ y = u.d;
+ return y*twom1000;
+ }
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_fmod.c b/src/third_party/js-1.7/fdlibm/e_fmod.c
new file mode 100644
index 00000000000..7b5ce780f38
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_fmod.c
@@ -0,0 +1,184 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_fmod.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * __ieee754_fmod(x,y)
+ * Return x mod y in exact arithmetic
+ * Method: shift and subtract
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double one = 1.0, Zero[] = {0.0, -0.0,};
+#else
+static double one = 1.0, Zero[] = {0.0, -0.0,};
+#endif
+
+#ifdef __STDC__
+ double __ieee754_fmod(double x, double y)
+#else
+ double __ieee754_fmod(x,y)
+ double x,y ;
+#endif
+{
+ fd_twoints ux, uy;
+ int n,hx,hy,hz,ix,iy,sx,i;
+ unsigned lx,ly,lz;
+
+ ux.d = x; uy.d = y;
+ hx = __HI(ux); /* high word of x */
+ lx = __LO(ux); /* low word of x */
+ hy = __HI(uy); /* high word of y */
+ ly = __LO(uy); /* low word of y */
+ sx = hx&0x80000000; /* sign of x */
+ hx ^=sx; /* |x| */
+ hy &= 0x7fffffff; /* |y| */
+
+ /* purge off exception values */
+ if((hy|ly)==0||(hx>=0x7ff00000)|| /* y=0,or x not finite */
+ ((hy|((ly|-(int)ly)>>31))>0x7ff00000)) /* or y is NaN */
+ return (x*y)/(x*y);
+ if(hx<=hy) {
+ if((hx<hy)||(lx<ly)) return x; /* |x|<|y| return x */
+ if(lx==ly)
+ return Zero[(unsigned)sx>>31]; /* |x|=|y| return x*0*/
+ }
+
+ /* determine ix = ilogb(x) */
+ if(hx<0x00100000) { /* subnormal x */
+ if(hx==0) {
+ for (ix = -1043, i=lx; i>0; i<<=1) ix -=1;
+ } else {
+ for (ix = -1022,i=(hx<<11); i>0; i<<=1) ix -=1;
+ }
+ } else ix = (hx>>20)-1023;
+
+ /* determine iy = ilogb(y) */
+ if(hy<0x00100000) { /* subnormal y */
+ if(hy==0) {
+ for (iy = -1043, i=ly; i>0; i<<=1) iy -=1;
+ } else {
+ for (iy = -1022,i=(hy<<11); i>0; i<<=1) iy -=1;
+ }
+ } else iy = (hy>>20)-1023;
+
+ /* set up {hx,lx}, {hy,ly} and align y to x */
+ if(ix >= -1022)
+ hx = 0x00100000|(0x000fffff&hx);
+ else { /* subnormal x, shift x to normal */
+ n = -1022-ix;
+ if(n<=31) {
+ hx = (hx<<n)|(lx>>(32-n));
+ lx <<= n;
+ } else {
+ hx = lx<<(n-32);
+ lx = 0;
+ }
+ }
+ if(iy >= -1022)
+ hy = 0x00100000|(0x000fffff&hy);
+ else { /* subnormal y, shift y to normal */
+ n = -1022-iy;
+ if(n<=31) {
+ hy = (hy<<n)|(ly>>(32-n));
+ ly <<= n;
+ } else {
+ hy = ly<<(n-32);
+ ly = 0;
+ }
+ }
+
+ /* fix point fmod */
+ n = ix - iy;
+ while(n--) {
+ hz=hx-hy;lz=lx-ly; if(lx<ly) hz -= 1;
+ if(hz<0){hx = hx+hx+(lx>>31); lx = lx+lx;}
+ else {
+ if((hz|lz)==0) /* return sign(x)*0 */
+ return Zero[(unsigned)sx>>31];
+ hx = hz+hz+(lz>>31); lx = lz+lz;
+ }
+ }
+ hz=hx-hy;lz=lx-ly; if(lx<ly) hz -= 1;
+ if(hz>=0) {hx=hz;lx=lz;}
+
+ /* convert back to floating value and restore the sign */
+ if((hx|lx)==0) /* return sign(x)*0 */
+ return Zero[(unsigned)sx>>31];
+ while(hx<0x00100000) { /* normalize x */
+ hx = hx+hx+(lx>>31); lx = lx+lx;
+ iy -= 1;
+ }
+ if(iy>= -1022) { /* normalize output */
+ hx = ((hx-0x00100000)|((iy+1023)<<20));
+ ux.d = x;
+ __HI(ux) = hx|sx;
+ __LO(ux) = lx;
+ x = ux.d;
+ } else { /* subnormal output */
+ n = -1022 - iy;
+ if(n<=20) {
+ lx = (lx>>n)|((unsigned)hx<<(32-n));
+ hx >>= n;
+ } else if (n<=31) {
+ lx = (hx<<(32-n))|(lx>>n); hx = sx;
+ } else {
+ lx = hx>>(n-32); hx = sx;
+ }
+ ux.d = x;
+ __HI(ux) = hx|sx;
+ __LO(ux) = lx;
+ x = ux.d;
+ x *= one; /* create necessary signal */
+ }
+ return x; /* exact output */
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_gamma.c b/src/third_party/js-1.7/fdlibm/e_gamma.c
new file mode 100644
index 00000000000..a34faa32cf6
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_gamma.c
@@ -0,0 +1,71 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_gamma.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_gamma(x)
+ * Return the logarithm of the Gamma function of x.
+ *
+ * Method: call __ieee754_gamma_r
+ */
+
+#include "fdlibm.h"
+
+extern int signgam;
+
+#ifdef __STDC__
+ double __ieee754_gamma(double x)
+#else
+ double __ieee754_gamma(x)
+ double x;
+#endif
+{
+ return __ieee754_gamma_r(x,&signgam);
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_gamma_r.c b/src/third_party/js-1.7/fdlibm/e_gamma_r.c
new file mode 100644
index 00000000000..f10e32e361e
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_gamma_r.c
@@ -0,0 +1,70 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_gamma_r.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_gamma_r(x, signgamp)
+ * Reentrant version of the logarithm of the Gamma function
+ * with user provide pointer for the sign of Gamma(x).
+ *
+ * Method: See __ieee754_lgamma_r
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double __ieee754_gamma_r(double x, int *signgamp)
+#else
+ double __ieee754_gamma_r(x,signgamp)
+ double x; int *signgamp;
+#endif
+{
+ return __ieee754_lgamma_r(x,signgamp);
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_hypot.c b/src/third_party/js-1.7/fdlibm/e_hypot.c
new file mode 100644
index 00000000000..3900230878e
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_hypot.c
@@ -0,0 +1,173 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_hypot.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_hypot(x,y)
+ *
+ * Method :
+ * If (assume round-to-nearest) z=x*x+y*y
+ * has error less than sqrt(2)/2 ulp, than
+ * sqrt(z) has error less than 1 ulp (exercise).
+ *
+ * So, compute sqrt(x*x+y*y) with some care as
+ * follows to get the error below 1 ulp:
+ *
+ * Assume x>y>0;
+ * (if possible, set rounding to round-to-nearest)
+ * 1. if x > 2y use
+ * x1*x1+(y*y+(x2*(x+x1))) for x*x+y*y
+ * where x1 = x with lower 32 bits cleared, x2 = x-x1; else
+ * 2. if x <= 2y use
+ * t1*y1+((x-y)*(x-y)+(t1*y2+t2*y))
+ * where t1 = 2x with lower 32 bits cleared, t2 = 2x-t1,
+ * y1= y with lower 32 bits chopped, y2 = y-y1.
+ *
+ * NOTE: scaling may be necessary if some argument is too
+ * large or too tiny
+ *
+ * Special cases:
+ * hypot(x,y) is INF if x or y is +INF or -INF; else
+ * hypot(x,y) is NAN if x or y is NAN.
+ *
+ * Accuracy:
+ * hypot(x,y) returns sqrt(x^2+y^2) with error less
+ * than 1 ulps (units in the last place)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double __ieee754_hypot(double x, double y)
+#else
+ double __ieee754_hypot(x,y)
+ double x, y;
+#endif
+{
+ fd_twoints ux, uy;
+ double a=x,b=y,t1,t2,y1,y2,w;
+ int j,k,ha,hb;
+
+ ux.d = x; uy.d = y;
+ ha = __HI(ux)&0x7fffffff; /* high word of x */
+ hb = __HI(uy)&0x7fffffff; /* high word of y */
+ if(hb > ha) {a=y;b=x;j=ha; ha=hb;hb=j;} else {a=x;b=y;}
+ ux.d = a; uy.d = b;
+ __HI(ux) = ha; /* a <- |a| */
+ __HI(uy) = hb; /* b <- |b| */
+ a = ux.d; b = uy.d;
+ if((ha-hb)>0x3c00000) {return a+b;} /* x/y > 2**60 */
+ k=0;
+ if(ha > 0x5f300000) { /* a>2**500 */
+ if(ha >= 0x7ff00000) { /* Inf or NaN */
+ w = a+b; /* for sNaN */
+ ux.d = a; uy.d = b;
+ if(((ha&0xfffff)|__LO(ux))==0) w = a;
+ if(((hb^0x7ff00000)|__LO(uy))==0) w = b;
+ return w;
+ }
+ /* scale a and b by 2**-600 */
+ ha -= 0x25800000; hb -= 0x25800000; k += 600;
+ ux.d = a; uy.d = b;
+ __HI(ux) = ha;
+ __HI(uy) = hb;
+ a = ux.d; b = uy.d;
+ }
+ if(hb < 0x20b00000) { /* b < 2**-500 */
+ if(hb <= 0x000fffff) { /* subnormal b or 0 */
+ uy.d = b;
+ if((hb|(__LO(uy)))==0) return a;
+ t1=0;
+ ux.d = t1;
+ __HI(ux) = 0x7fd00000; /* t1=2^1022 */
+ t1 = ux.d;
+ b *= t1;
+ a *= t1;
+ k -= 1022;
+ } else { /* scale a and b by 2^600 */
+ ha += 0x25800000; /* a *= 2^600 */
+ hb += 0x25800000; /* b *= 2^600 */
+ k -= 600;
+ ux.d = a; uy.d = b;
+ __HI(ux) = ha;
+ __HI(uy) = hb;
+ a = ux.d; b = uy.d;
+ }
+ }
+ /* medium size a and b */
+ w = a-b;
+ if (w>b) {
+ t1 = 0;
+ ux.d = t1;
+ __HI(ux) = ha;
+ t1 = ux.d;
+ t2 = a-t1;
+ w = fd_sqrt(t1*t1-(b*(-b)-t2*(a+t1)));
+ } else {
+ a = a+a;
+ y1 = 0;
+ ux.d = y1;
+ __HI(ux) = hb;
+ y1 = ux.d;
+ y2 = b - y1;
+ t1 = 0;
+ ux.d = t1;
+ __HI(ux) = ha+0x00100000;
+ t1 = ux.d;
+ t2 = a - t1;
+ w = fd_sqrt(t1*y1-(w*(-w)-(t1*y2+t2*b)));
+ }
+ if(k!=0) {
+ t1 = 1.0;
+ ux.d = t1;
+ __HI(ux) += (k<<20);
+ t1 = ux.d;
+ return t1*w;
+ } else return w;
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_j0.c b/src/third_party/js-1.7/fdlibm/e_j0.c
new file mode 100644
index 00000000000..078e096415c
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_j0.c
@@ -0,0 +1,524 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_j0.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_j0(x), __ieee754_y0(x)
+ * Bessel function of the first and second kinds of order zero.
+ * Method -- j0(x):
+ * 1. For tiny x, we use j0(x) = 1 - x^2/4 + x^4/64 - ...
+ * 2. Reduce x to |x| since j0(x)=j0(-x), and
+ * for x in (0,2)
+ * j0(x) = 1-z/4+ z^2*R0/S0, where z = x*x;
+ * (precision: |j0-1+z/4-z^2R0/S0 |<2**-63.67 )
+ * for x in (2,inf)
+ * j0(x) = sqrt(2/(pi*x))*(p0(x)*cos(x0)-q0(x)*sin(x0))
+ * where x0 = x-pi/4. It is better to compute sin(x0),cos(x0)
+ * as follow:
+ * cos(x0) = cos(x)cos(pi/4)+sin(x)sin(pi/4)
+ * = 1/sqrt(2) * (cos(x) + sin(x))
+ * sin(x0) = sin(x)cos(pi/4)-cos(x)sin(pi/4)
+ * = 1/sqrt(2) * (sin(x) - cos(x))
+ * (To avoid cancellation, use
+ * sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+ * to compute the worse one.)
+ *
+ * 3 Special cases
+ * j0(nan)= nan
+ * j0(0) = 1
+ * j0(inf) = 0
+ *
+ * Method -- y0(x):
+ * 1. For x<2.
+ * Since
+ * y0(x) = 2/pi*(j0(x)*(ln(x/2)+Euler) + x^2/4 - ...)
+ * therefore y0(x)-2/pi*j0(x)*ln(x) is an even function.
+ * We use the following function to approximate y0,
+ * y0(x) = U(z)/V(z) + (2/pi)*(j0(x)*ln(x)), z= x^2
+ * where
+ * U(z) = u00 + u01*z + ... + u06*z^6
+ * V(z) = 1 + v01*z + ... + v04*z^4
+ * with absolute approximation error bounded by 2**-72.
+ * Note: For tiny x, U/V = u0 and j0(x)~1, hence
+ * y0(tiny) = u0 + (2/pi)*ln(tiny), (choose tiny<2**-27)
+ * 2. For x>=2.
+ * y0(x) = sqrt(2/(pi*x))*(p0(x)*cos(x0)+q0(x)*sin(x0))
+ * where x0 = x-pi/4. It is better to compute sin(x0),cos(x0)
+ * by the method mentioned above.
+ * 3. Special cases: y0(0)=-inf, y0(x<0)=NaN, y0(inf)=0.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static double pzero(double), qzero(double);
+#else
+static double pzero(), qzero();
+#endif
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+really_big = 1e300,
+one = 1.0,
+invsqrtpi= 5.64189583547756279280e-01, /* 0x3FE20DD7, 0x50429B6D */
+tpi = 6.36619772367581382433e-01, /* 0x3FE45F30, 0x6DC9C883 */
+ /* R0/S0 on [0, 2.00] */
+R02 = 1.56249999999999947958e-02, /* 0x3F8FFFFF, 0xFFFFFFFD */
+R03 = -1.89979294238854721751e-04, /* 0xBF28E6A5, 0xB61AC6E9 */
+R04 = 1.82954049532700665670e-06, /* 0x3EBEB1D1, 0x0C503919 */
+R05 = -4.61832688532103189199e-09, /* 0xBE33D5E7, 0x73D63FCE */
+S01 = 1.56191029464890010492e-02, /* 0x3F8FFCE8, 0x82C8C2A4 */
+S02 = 1.16926784663337450260e-04, /* 0x3F1EA6D2, 0xDD57DBF4 */
+S03 = 5.13546550207318111446e-07, /* 0x3EA13B54, 0xCE84D5A9 */
+S04 = 1.16614003333790000205e-09; /* 0x3E1408BC, 0xF4745D8F */
+
+static double zero = 0.0;
+
+#ifdef __STDC__
+ double __ieee754_j0(double x)
+#else
+ double __ieee754_j0(x)
+ double x;
+#endif
+{
+ fd_twoints un;
+ double z, s,c,ss,cc,r,u,v;
+ int hx,ix;
+
+ un.d = x;
+ hx = __HI(un);
+ ix = hx&0x7fffffff;
+ if(ix>=0x7ff00000) return one/(x*x);
+ x = fd_fabs(x);
+ if(ix >= 0x40000000) { /* |x| >= 2.0 */
+ s = fd_sin(x);
+ c = fd_cos(x);
+ ss = s-c;
+ cc = s+c;
+ if(ix<0x7fe00000) { /* make sure x+x not overflow */
+ z = -fd_cos(x+x);
+ if ((s*c)<zero) cc = z/ss;
+ else ss = z/cc;
+ }
+ /*
+ * j0(x) = 1/sqrt(pi) * (P(0,x)*cc - Q(0,x)*ss) / sqrt(x)
+ * y0(x) = 1/sqrt(pi) * (P(0,x)*ss + Q(0,x)*cc) / sqrt(x)
+ */
+ if(ix>0x48000000) z = (invsqrtpi*cc)/fd_sqrt(x);
+ else {
+ u = pzero(x); v = qzero(x);
+ z = invsqrtpi*(u*cc-v*ss)/fd_sqrt(x);
+ }
+ return z;
+ }
+ if(ix<0x3f200000) { /* |x| < 2**-13 */
+ if(really_big+x>one) { /* raise inexact if x != 0 */
+ if(ix<0x3e400000) return one; /* |x|<2**-27 */
+ else return one - 0.25*x*x;
+ }
+ }
+ z = x*x;
+ r = z*(R02+z*(R03+z*(R04+z*R05)));
+ s = one+z*(S01+z*(S02+z*(S03+z*S04)));
+ if(ix < 0x3FF00000) { /* |x| < 1.00 */
+ return one + z*(-0.25+(r/s));
+ } else {
+ u = 0.5*x;
+ return((one+u)*(one-u)+z*(r/s));
+ }
+}
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+u00 = -7.38042951086872317523e-02, /* 0xBFB2E4D6, 0x99CBD01F */
+u01 = 1.76666452509181115538e-01, /* 0x3FC69D01, 0x9DE9E3FC */
+u02 = -1.38185671945596898896e-02, /* 0xBF8C4CE8, 0xB16CFA97 */
+u03 = 3.47453432093683650238e-04, /* 0x3F36C54D, 0x20B29B6B */
+u04 = -3.81407053724364161125e-06, /* 0xBECFFEA7, 0x73D25CAD */
+u05 = 1.95590137035022920206e-08, /* 0x3E550057, 0x3B4EABD4 */
+u06 = -3.98205194132103398453e-11, /* 0xBDC5E43D, 0x693FB3C8 */
+v01 = 1.27304834834123699328e-02, /* 0x3F8A1270, 0x91C9C71A */
+v02 = 7.60068627350353253702e-05, /* 0x3F13ECBB, 0xF578C6C1 */
+v03 = 2.59150851840457805467e-07, /* 0x3E91642D, 0x7FF202FD */
+v04 = 4.41110311332675467403e-10; /* 0x3DFE5018, 0x3BD6D9EF */
+
+#ifdef __STDC__
+ double __ieee754_y0(double x)
+#else
+ double __ieee754_y0(x)
+ double x;
+#endif
+{
+ fd_twoints un;
+ double z, s,c,ss,cc,u,v;
+ int hx,ix,lx;
+
+ un.d = x;
+ hx = __HI(un);
+ ix = 0x7fffffff&hx;
+ lx = __LO(un);
+ /* Y0(NaN) is NaN, y0(-inf) is Nan, y0(inf) is 0 */
+ if(ix>=0x7ff00000) return one/(x+x*x);
+ if((ix|lx)==0) return -one/zero;
+ if(hx<0) return zero/zero;
+ if(ix >= 0x40000000) { /* |x| >= 2.0 */
+ /* y0(x) = sqrt(2/(pi*x))*(p0(x)*sin(x0)+q0(x)*cos(x0))
+ * where x0 = x-pi/4
+ * Better formula:
+ * cos(x0) = cos(x)cos(pi/4)+sin(x)sin(pi/4)
+ * = 1/sqrt(2) * (sin(x) + cos(x))
+ * sin(x0) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
+ * = 1/sqrt(2) * (sin(x) - cos(x))
+ * To avoid cancellation, use
+ * sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+ * to compute the worse one.
+ */
+ s = fd_sin(x);
+ c = fd_cos(x);
+ ss = s-c;
+ cc = s+c;
+ /*
+ * j0(x) = 1/sqrt(pi) * (P(0,x)*cc - Q(0,x)*ss) / sqrt(x)
+ * y0(x) = 1/sqrt(pi) * (P(0,x)*ss + Q(0,x)*cc) / sqrt(x)
+ */
+ if(ix<0x7fe00000) { /* make sure x+x not overflow */
+ z = -fd_cos(x+x);
+ if ((s*c)<zero) cc = z/ss;
+ else ss = z/cc;
+ }
+ if(ix>0x48000000) z = (invsqrtpi*ss)/fd_sqrt(x);
+ else {
+ u = pzero(x); v = qzero(x);
+ z = invsqrtpi*(u*ss+v*cc)/fd_sqrt(x);
+ }
+ return z;
+ }
+ if(ix<=0x3e400000) { /* x < 2**-27 */
+ return(u00 + tpi*__ieee754_log(x));
+ }
+ z = x*x;
+ u = u00+z*(u01+z*(u02+z*(u03+z*(u04+z*(u05+z*u06)))));
+ v = one+z*(v01+z*(v02+z*(v03+z*v04)));
+ return(u/v + tpi*(__ieee754_j0(x)*__ieee754_log(x)));
+}
+
+/* The asymptotic expansions of pzero is
+ * 1 - 9/128 s^2 + 11025/98304 s^4 - ..., where s = 1/x.
+ * For x >= 2, We approximate pzero by
+ * pzero(x) = 1 + (R/S)
+ * where R = pR0 + pR1*s^2 + pR2*s^4 + ... + pR5*s^10
+ * S = 1 + pS0*s^2 + ... + pS4*s^10
+ * and
+ * | pzero(x)-1-R/S | <= 2 ** ( -60.26)
+ */
+#ifdef __STDC__
+static const double pR8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#else
+static double pR8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#endif
+ 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
+ -7.03124999999900357484e-02, /* 0xBFB1FFFF, 0xFFFFFD32 */
+ -8.08167041275349795626e+00, /* 0xC02029D0, 0xB44FA779 */
+ -2.57063105679704847262e+02, /* 0xC0701102, 0x7B19E863 */
+ -2.48521641009428822144e+03, /* 0xC0A36A6E, 0xCD4DCAFC */
+ -5.25304380490729545272e+03, /* 0xC0B4850B, 0x36CC643D */
+};
+#ifdef __STDC__
+static const double pS8[5] = {
+#else
+static double pS8[5] = {
+#endif
+ 1.16534364619668181717e+02, /* 0x405D2233, 0x07A96751 */
+ 3.83374475364121826715e+03, /* 0x40ADF37D, 0x50596938 */
+ 4.05978572648472545552e+04, /* 0x40E3D2BB, 0x6EB6B05F */
+ 1.16752972564375915681e+05, /* 0x40FC810F, 0x8F9FA9BD */
+ 4.76277284146730962675e+04, /* 0x40E74177, 0x4F2C49DC */
+};
+
+#ifdef __STDC__
+static const double pR5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#else
+static double pR5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#endif
+ -1.14125464691894502584e-11, /* 0xBDA918B1, 0x47E495CC */
+ -7.03124940873599280078e-02, /* 0xBFB1FFFF, 0xE69AFBC6 */
+ -4.15961064470587782438e+00, /* 0xC010A370, 0xF90C6BBF */
+ -6.76747652265167261021e+01, /* 0xC050EB2F, 0x5A7D1783 */
+ -3.31231299649172967747e+02, /* 0xC074B3B3, 0x6742CC63 */
+ -3.46433388365604912451e+02, /* 0xC075A6EF, 0x28A38BD7 */
+};
+#ifdef __STDC__
+static const double pS5[5] = {
+#else
+static double pS5[5] = {
+#endif
+ 6.07539382692300335975e+01, /* 0x404E6081, 0x0C98C5DE */
+ 1.05125230595704579173e+03, /* 0x40906D02, 0x5C7E2864 */
+ 5.97897094333855784498e+03, /* 0x40B75AF8, 0x8FBE1D60 */
+ 9.62544514357774460223e+03, /* 0x40C2CCB8, 0xFA76FA38 */
+ 2.40605815922939109441e+03, /* 0x40A2CC1D, 0xC70BE864 */
+};
+
+#ifdef __STDC__
+static const double pR3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+#else
+static double pR3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+#endif
+ -2.54704601771951915620e-09, /* 0xBE25E103, 0x6FE1AA86 */
+ -7.03119616381481654654e-02, /* 0xBFB1FFF6, 0xF7C0E24B */
+ -2.40903221549529611423e+00, /* 0xC00345B2, 0xAEA48074 */
+ -2.19659774734883086467e+01, /* 0xC035F74A, 0x4CB94E14 */
+ -5.80791704701737572236e+01, /* 0xC04D0A22, 0x420A1A45 */
+ -3.14479470594888503854e+01, /* 0xC03F72AC, 0xA892D80F */
+};
+#ifdef __STDC__
+static const double pS3[5] = {
+#else
+static double pS3[5] = {
+#endif
+ 3.58560338055209726349e+01, /* 0x4041ED92, 0x84077DD3 */
+ 3.61513983050303863820e+02, /* 0x40769839, 0x464A7C0E */
+ 1.19360783792111533330e+03, /* 0x4092A66E, 0x6D1061D6 */
+ 1.12799679856907414432e+03, /* 0x40919FFC, 0xB8C39B7E */
+ 1.73580930813335754692e+02, /* 0x4065B296, 0xFC379081 */
+};
+
+#ifdef __STDC__
+static const double pR2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#else
+static double pR2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#endif
+ -8.87534333032526411254e-08, /* 0xBE77D316, 0xE927026D */
+ -7.03030995483624743247e-02, /* 0xBFB1FF62, 0x495E1E42 */
+ -1.45073846780952986357e+00, /* 0xBFF73639, 0x8A24A843 */
+ -7.63569613823527770791e+00, /* 0xC01E8AF3, 0xEDAFA7F3 */
+ -1.11931668860356747786e+01, /* 0xC02662E6, 0xC5246303 */
+ -3.23364579351335335033e+00, /* 0xC009DE81, 0xAF8FE70F */
+};
+#ifdef __STDC__
+static const double pS2[5] = {
+#else
+static double pS2[5] = {
+#endif
+ 2.22202997532088808441e+01, /* 0x40363865, 0x908B5959 */
+ 1.36206794218215208048e+02, /* 0x4061069E, 0x0EE8878F */
+ 2.70470278658083486789e+02, /* 0x4070E786, 0x42EA079B */
+ 1.53875394208320329881e+02, /* 0x40633C03, 0x3AB6FAFF */
+ 1.46576176948256193810e+01, /* 0x402D50B3, 0x44391809 */
+};
+
+#ifdef __STDC__
+ static double pzero(double x)
+#else
+ static double pzero(x)
+ double x;
+#endif
+{
+#ifdef __STDC__
+ const double *p,*q;
+#else
+ double *p,*q;
+#endif
+ fd_twoints u;
+ double z,r,s;
+ int ix;
+ u.d = x;
+ ix = 0x7fffffff&__HI(u);
+ if(ix>=0x40200000) {p = pR8; q= pS8;}
+ else if(ix>=0x40122E8B){p = pR5; q= pS5;}
+ else if(ix>=0x4006DB6D){p = pR3; q= pS3;}
+ else if(ix>=0x40000000){p = pR2; q= pS2;}
+ z = one/(x*x);
+ r = p[0]+z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))));
+ s = one+z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*q[4]))));
+ return one+ r/s;
+}
+
+
+/* For x >= 8, the asymptotic expansions of qzero is
+ * -1/8 s + 75/1024 s^3 - ..., where s = 1/x.
+ * We approximate pzero by
+ * qzero(x) = s*(-1.25 + (R/S))
+ * where R = qR0 + qR1*s^2 + qR2*s^4 + ... + qR5*s^10
+ * S = 1 + qS0*s^2 + ... + qS5*s^12
+ * and
+ * | qzero(x)/s +1.25-R/S | <= 2 ** ( -61.22)
+ */
+#ifdef __STDC__
+static const double qR8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#else
+static double qR8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#endif
+ 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
+ 7.32421874999935051953e-02, /* 0x3FB2BFFF, 0xFFFFFE2C */
+ 1.17682064682252693899e+01, /* 0x40278952, 0x5BB334D6 */
+ 5.57673380256401856059e+02, /* 0x40816D63, 0x15301825 */
+ 8.85919720756468632317e+03, /* 0x40C14D99, 0x3E18F46D */
+ 3.70146267776887834771e+04, /* 0x40E212D4, 0x0E901566 */
+};
+#ifdef __STDC__
+static const double qS8[6] = {
+#else
+static double qS8[6] = {
+#endif
+ 1.63776026895689824414e+02, /* 0x406478D5, 0x365B39BC */
+ 8.09834494656449805916e+03, /* 0x40BFA258, 0x4E6B0563 */
+ 1.42538291419120476348e+05, /* 0x41016652, 0x54D38C3F */
+ 8.03309257119514397345e+05, /* 0x412883DA, 0x83A52B43 */
+ 8.40501579819060512818e+05, /* 0x4129A66B, 0x28DE0B3D */
+ -3.43899293537866615225e+05, /* 0xC114FD6D, 0x2C9530C5 */
+};
+
+#ifdef __STDC__
+static const double qR5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#else
+static double qR5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#endif
+ 1.84085963594515531381e-11, /* 0x3DB43D8F, 0x29CC8CD9 */
+ 7.32421766612684765896e-02, /* 0x3FB2BFFF, 0xD172B04C */
+ 5.83563508962056953777e+00, /* 0x401757B0, 0xB9953DD3 */
+ 1.35111577286449829671e+02, /* 0x4060E392, 0x0A8788E9 */
+ 1.02724376596164097464e+03, /* 0x40900CF9, 0x9DC8C481 */
+ 1.98997785864605384631e+03, /* 0x409F17E9, 0x53C6E3A6 */
+};
+#ifdef __STDC__
+static const double qS5[6] = {
+#else
+static double qS5[6] = {
+#endif
+ 8.27766102236537761883e+01, /* 0x4054B1B3, 0xFB5E1543 */
+ 2.07781416421392987104e+03, /* 0x40A03BA0, 0xDA21C0CE */
+ 1.88472887785718085070e+04, /* 0x40D267D2, 0x7B591E6D */
+ 5.67511122894947329769e+04, /* 0x40EBB5E3, 0x97E02372 */
+ 3.59767538425114471465e+04, /* 0x40E19118, 0x1F7A54A0 */
+ -5.35434275601944773371e+03, /* 0xC0B4EA57, 0xBEDBC609 */
+};
+
+#ifdef __STDC__
+static const double qR3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+#else
+static double qR3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+#endif
+ 4.37741014089738620906e-09, /* 0x3E32CD03, 0x6ADECB82 */
+ 7.32411180042911447163e-02, /* 0x3FB2BFEE, 0x0E8D0842 */
+ 3.34423137516170720929e+00, /* 0x400AC0FC, 0x61149CF5 */
+ 4.26218440745412650017e+01, /* 0x40454F98, 0x962DAEDD */
+ 1.70808091340565596283e+02, /* 0x406559DB, 0xE25EFD1F */
+ 1.66733948696651168575e+02, /* 0x4064D77C, 0x81FA21E0 */
+};
+#ifdef __STDC__
+static const double qS3[6] = {
+#else
+static double qS3[6] = {
+#endif
+ 4.87588729724587182091e+01, /* 0x40486122, 0xBFE343A6 */
+ 7.09689221056606015736e+02, /* 0x40862D83, 0x86544EB3 */
+ 3.70414822620111362994e+03, /* 0x40ACF04B, 0xE44DFC63 */
+ 6.46042516752568917582e+03, /* 0x40B93C6C, 0xD7C76A28 */
+ 2.51633368920368957333e+03, /* 0x40A3A8AA, 0xD94FB1C0 */
+ -1.49247451836156386662e+02, /* 0xC062A7EB, 0x201CF40F */
+};
+
+#ifdef __STDC__
+static const double qR2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#else
+static double qR2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#endif
+ 1.50444444886983272379e-07, /* 0x3E84313B, 0x54F76BDB */
+ 7.32234265963079278272e-02, /* 0x3FB2BEC5, 0x3E883E34 */
+ 1.99819174093815998816e+00, /* 0x3FFFF897, 0xE727779C */
+ 1.44956029347885735348e+01, /* 0x402CFDBF, 0xAAF96FE5 */
+ 3.16662317504781540833e+01, /* 0x403FAA8E, 0x29FBDC4A */
+ 1.62527075710929267416e+01, /* 0x403040B1, 0x71814BB4 */
+};
+#ifdef __STDC__
+static const double qS2[6] = {
+#else
+static double qS2[6] = {
+#endif
+ 3.03655848355219184498e+01, /* 0x403E5D96, 0xF7C07AED */
+ 2.69348118608049844624e+02, /* 0x4070D591, 0xE4D14B40 */
+ 8.44783757595320139444e+02, /* 0x408A6645, 0x22B3BF22 */
+ 8.82935845112488550512e+02, /* 0x408B977C, 0x9C5CC214 */
+ 2.12666388511798828631e+02, /* 0x406A9553, 0x0E001365 */
+ -5.31095493882666946917e+00, /* 0xC0153E6A, 0xF8B32931 */
+};
+
+#ifdef __STDC__
+ static double qzero(double x)
+#else
+ static double qzero(x)
+ double x;
+#endif
+{
+#ifdef __STDC__
+ const double *p,*q;
+#else
+ double *p,*q;
+#endif
+ fd_twoints u;
+ double s,r,z;
+ int ix;
+ u.d = x;
+ ix = 0x7fffffff&__HI(u);
+ if(ix>=0x40200000) {p = qR8; q= qS8;}
+ else if(ix>=0x40122E8B){p = qR5; q= qS5;}
+ else if(ix>=0x4006DB6D){p = qR3; q= qS3;}
+ else if(ix>=0x40000000){p = qR2; q= qS2;}
+ z = one/(x*x);
+ r = p[0]+z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))));
+ s = one+z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*(q[4]+z*q[5])))));
+ return (-.125 + r/s)/x;
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_j1.c b/src/third_party/js-1.7/fdlibm/e_j1.c
new file mode 100644
index 00000000000..8982ac86a38
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_j1.c
@@ -0,0 +1,523 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_j1.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_j1(x), __ieee754_y1(x)
+ * Bessel function of the first and second kinds of order zero.
+ * Method -- j1(x):
+ * 1. For tiny x, we use j1(x) = x/2 - x^3/16 + x^5/384 - ...
+ * 2. Reduce x to |x| since j1(x)=-j1(-x), and
+ * for x in (0,2)
+ * j1(x) = x/2 + x*z*R0/S0, where z = x*x;
+ * (precision: |j1/x - 1/2 - R0/S0 |<2**-61.51 )
+ * for x in (2,inf)
+ * j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1))
+ * y1(x) = sqrt(2/(pi*x))*(p1(x)*sin(x1)+q1(x)*cos(x1))
+ * where x1 = x-3*pi/4. It is better to compute sin(x1),cos(x1)
+ * as follow:
+ * cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
+ * = 1/sqrt(2) * (sin(x) - cos(x))
+ * sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
+ * = -1/sqrt(2) * (sin(x) + cos(x))
+ * (To avoid cancellation, use
+ * sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+ * to compute the worse one.)
+ *
+ * 3 Special cases
+ * j1(nan)= nan
+ * j1(0) = 0
+ * j1(inf) = 0
+ *
+ * Method -- y1(x):
+ * 1. screen out x<=0 cases: y1(0)=-inf, y1(x<0)=NaN
+ * 2. For x<2.
+ * Since
+ * y1(x) = 2/pi*(j1(x)*(ln(x/2)+Euler)-1/x-x/2+5/64*x^3-...)
+ * therefore y1(x)-2/pi*j1(x)*ln(x)-1/x is an odd function.
+ * We use the following function to approximate y1,
+ * y1(x) = x*U(z)/V(z) + (2/pi)*(j1(x)*ln(x)-1/x), z= x^2
+ * where for x in [0,2] (abs err less than 2**-65.89)
+ * U(z) = U0[0] + U0[1]*z + ... + U0[4]*z^4
+ * V(z) = 1 + v0[0]*z + ... + v0[4]*z^5
+ * Note: For tiny x, 1/x dominate y1 and hence
+ * y1(tiny) = -2/pi/tiny, (choose tiny<2**-54)
+ * 3. For x>=2.
+ * y1(x) = sqrt(2/(pi*x))*(p1(x)*sin(x1)+q1(x)*cos(x1))
+ * where x1 = x-3*pi/4. It is better to compute sin(x1),cos(x1)
+ * by method mentioned above.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static double pone(double), qone(double);
+#else
+static double pone(), qone();
+#endif
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+really_big = 1e300,
+one = 1.0,
+invsqrtpi= 5.64189583547756279280e-01, /* 0x3FE20DD7, 0x50429B6D */
+tpi = 6.36619772367581382433e-01, /* 0x3FE45F30, 0x6DC9C883 */
+ /* R0/S0 on [0,2] */
+r00 = -6.25000000000000000000e-02, /* 0xBFB00000, 0x00000000 */
+r01 = 1.40705666955189706048e-03, /* 0x3F570D9F, 0x98472C61 */
+r02 = -1.59955631084035597520e-05, /* 0xBEF0C5C6, 0xBA169668 */
+r03 = 4.96727999609584448412e-08, /* 0x3E6AAAFA, 0x46CA0BD9 */
+s01 = 1.91537599538363460805e-02, /* 0x3F939D0B, 0x12637E53 */
+s02 = 1.85946785588630915560e-04, /* 0x3F285F56, 0xB9CDF664 */
+s03 = 1.17718464042623683263e-06, /* 0x3EB3BFF8, 0x333F8498 */
+s04 = 5.04636257076217042715e-09, /* 0x3E35AC88, 0xC97DFF2C */
+s05 = 1.23542274426137913908e-11; /* 0x3DAB2ACF, 0xCFB97ED8 */
+
+static double zero = 0.0;
+
+#ifdef __STDC__
+ double __ieee754_j1(double x)
+#else
+ double __ieee754_j1(x)
+ double x;
+#endif
+{
+ fd_twoints un;
+ double z, s,c,ss,cc,r,u,v,y;
+ int hx,ix;
+
+ un.d = x;
+ hx = __HI(un);
+ ix = hx&0x7fffffff;
+ if(ix>=0x7ff00000) return one/x;
+ y = fd_fabs(x);
+ if(ix >= 0x40000000) { /* |x| >= 2.0 */
+ s = fd_sin(y);
+ c = fd_cos(y);
+ ss = -s-c;
+ cc = s-c;
+ if(ix<0x7fe00000) { /* make sure y+y not overflow */
+ z = fd_cos(y+y);
+ if ((s*c)>zero) cc = z/ss;
+ else ss = z/cc;
+ }
+ /*
+ * j1(x) = 1/sqrt(pi) * (P(1,x)*cc - Q(1,x)*ss) / sqrt(x)
+ * y1(x) = 1/sqrt(pi) * (P(1,x)*ss + Q(1,x)*cc) / sqrt(x)
+ */
+ if(ix>0x48000000) z = (invsqrtpi*cc)/fd_sqrt(y);
+ else {
+ u = pone(y); v = qone(y);
+ z = invsqrtpi*(u*cc-v*ss)/fd_sqrt(y);
+ }
+ if(hx<0) return -z;
+ else return z;
+ }
+ if(ix<0x3e400000) { /* |x|<2**-27 */
+ if(really_big+x>one) return 0.5*x;/* inexact if x!=0 necessary */
+ }
+ z = x*x;
+ r = z*(r00+z*(r01+z*(r02+z*r03)));
+ s = one+z*(s01+z*(s02+z*(s03+z*(s04+z*s05))));
+ r *= x;
+ return(x*0.5+r/s);
+}
+
+#ifdef __STDC__
+static const double U0[5] = {
+#else
+static double U0[5] = {
+#endif
+ -1.96057090646238940668e-01, /* 0xBFC91866, 0x143CBC8A */
+ 5.04438716639811282616e-02, /* 0x3FA9D3C7, 0x76292CD1 */
+ -1.91256895875763547298e-03, /* 0xBF5F55E5, 0x4844F50F */
+ 2.35252600561610495928e-05, /* 0x3EF8AB03, 0x8FA6B88E */
+ -9.19099158039878874504e-08, /* 0xBE78AC00, 0x569105B8 */
+};
+#ifdef __STDC__
+static const double V0[5] = {
+#else
+static double V0[5] = {
+#endif
+ 1.99167318236649903973e-02, /* 0x3F94650D, 0x3F4DA9F0 */
+ 2.02552581025135171496e-04, /* 0x3F2A8C89, 0x6C257764 */
+ 1.35608801097516229404e-06, /* 0x3EB6C05A, 0x894E8CA6 */
+ 6.22741452364621501295e-09, /* 0x3E3ABF1D, 0x5BA69A86 */
+ 1.66559246207992079114e-11, /* 0x3DB25039, 0xDACA772A */
+};
+
+#ifdef __STDC__
+ double __ieee754_y1(double x)
+#else
+ double __ieee754_y1(x)
+ double x;
+#endif
+{
+ fd_twoints un;
+ double z, s,c,ss,cc,u,v;
+ int hx,ix,lx;
+
+ un.d = x;
+ hx = __HI(un);
+ ix = 0x7fffffff&hx;
+ lx = __LO(un);
+ /* if Y1(NaN) is NaN, Y1(-inf) is NaN, Y1(inf) is 0 */
+ if(ix>=0x7ff00000) return one/(x+x*x);
+ if((ix|lx)==0) return -one/zero;
+ if(hx<0) return zero/zero;
+ if(ix >= 0x40000000) { /* |x| >= 2.0 */
+ s = fd_sin(x);
+ c = fd_cos(x);
+ ss = -s-c;
+ cc = s-c;
+ if(ix<0x7fe00000) { /* make sure x+x not overflow */
+ z = fd_cos(x+x);
+ if ((s*c)>zero) cc = z/ss;
+ else ss = z/cc;
+ }
+ /* y1(x) = sqrt(2/(pi*x))*(p1(x)*sin(x0)+q1(x)*cos(x0))
+ * where x0 = x-3pi/4
+ * Better formula:
+ * cos(x0) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4)
+ * = 1/sqrt(2) * (sin(x) - cos(x))
+ * sin(x0) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4)
+ * = -1/sqrt(2) * (cos(x) + sin(x))
+ * To avoid cancellation, use
+ * sin(x) +- cos(x) = -cos(2x)/(sin(x) -+ cos(x))
+ * to compute the worse one.
+ */
+ if(ix>0x48000000) z = (invsqrtpi*ss)/fd_sqrt(x);
+ else {
+ u = pone(x); v = qone(x);
+ z = invsqrtpi*(u*ss+v*cc)/fd_sqrt(x);
+ }
+ return z;
+ }
+ if(ix<=0x3c900000) { /* x < 2**-54 */
+ return(-tpi/x);
+ }
+ z = x*x;
+ u = U0[0]+z*(U0[1]+z*(U0[2]+z*(U0[3]+z*U0[4])));
+ v = one+z*(V0[0]+z*(V0[1]+z*(V0[2]+z*(V0[3]+z*V0[4]))));
+ return(x*(u/v) + tpi*(__ieee754_j1(x)*__ieee754_log(x)-one/x));
+}
+
+/* For x >= 8, the asymptotic expansions of pone is
+ * 1 + 15/128 s^2 - 4725/2^15 s^4 - ..., where s = 1/x.
+ * We approximate pone by
+ * pone(x) = 1 + (R/S)
+ * where R = pr0 + pr1*s^2 + pr2*s^4 + ... + pr5*s^10
+ * S = 1 + ps0*s^2 + ... + ps4*s^10
+ * and
+ * | pone(x)-1-R/S | <= 2 ** ( -60.06)
+ */
+
+#ifdef __STDC__
+static const double pr8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#else
+static double pr8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#endif
+ 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
+ 1.17187499999988647970e-01, /* 0x3FBDFFFF, 0xFFFFFCCE */
+ 1.32394806593073575129e+01, /* 0x402A7A9D, 0x357F7FCE */
+ 4.12051854307378562225e+02, /* 0x4079C0D4, 0x652EA590 */
+ 3.87474538913960532227e+03, /* 0x40AE457D, 0xA3A532CC */
+ 7.91447954031891731574e+03, /* 0x40BEEA7A, 0xC32782DD */
+};
+#ifdef __STDC__
+static const double ps8[5] = {
+#else
+static double ps8[5] = {
+#endif
+ 1.14207370375678408436e+02, /* 0x405C8D45, 0x8E656CAC */
+ 3.65093083420853463394e+03, /* 0x40AC85DC, 0x964D274F */
+ 3.69562060269033463555e+04, /* 0x40E20B86, 0x97C5BB7F */
+ 9.76027935934950801311e+04, /* 0x40F7D42C, 0xB28F17BB */
+ 3.08042720627888811578e+04, /* 0x40DE1511, 0x697A0B2D */
+};
+
+#ifdef __STDC__
+static const double pr5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#else
+static double pr5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#endif
+ 1.31990519556243522749e-11, /* 0x3DAD0667, 0xDAE1CA7D */
+ 1.17187493190614097638e-01, /* 0x3FBDFFFF, 0xE2C10043 */
+ 6.80275127868432871736e+00, /* 0x401B3604, 0x6E6315E3 */
+ 1.08308182990189109773e+02, /* 0x405B13B9, 0x452602ED */
+ 5.17636139533199752805e+02, /* 0x40802D16, 0xD052D649 */
+ 5.28715201363337541807e+02, /* 0x408085B8, 0xBB7E0CB7 */
+};
+#ifdef __STDC__
+static const double ps5[5] = {
+#else
+static double ps5[5] = {
+#endif
+ 5.92805987221131331921e+01, /* 0x404DA3EA, 0xA8AF633D */
+ 9.91401418733614377743e+02, /* 0x408EFB36, 0x1B066701 */
+ 5.35326695291487976647e+03, /* 0x40B4E944, 0x5706B6FB */
+ 7.84469031749551231769e+03, /* 0x40BEA4B0, 0xB8A5BB15 */
+ 1.50404688810361062679e+03, /* 0x40978030, 0x036F5E51 */
+};
+
+#ifdef __STDC__
+static const double pr3[6] = {
+#else
+static double pr3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+#endif
+ 3.02503916137373618024e-09, /* 0x3E29FC21, 0xA7AD9EDD */
+ 1.17186865567253592491e-01, /* 0x3FBDFFF5, 0x5B21D17B */
+ 3.93297750033315640650e+00, /* 0x400F76BC, 0xE85EAD8A */
+ 3.51194035591636932736e+01, /* 0x40418F48, 0x9DA6D129 */
+ 9.10550110750781271918e+01, /* 0x4056C385, 0x4D2C1837 */
+ 4.85590685197364919645e+01, /* 0x4048478F, 0x8EA83EE5 */
+};
+#ifdef __STDC__
+static const double ps3[5] = {
+#else
+static double ps3[5] = {
+#endif
+ 3.47913095001251519989e+01, /* 0x40416549, 0xA134069C */
+ 3.36762458747825746741e+02, /* 0x40750C33, 0x07F1A75F */
+ 1.04687139975775130551e+03, /* 0x40905B7C, 0x5037D523 */
+ 8.90811346398256432622e+02, /* 0x408BD67D, 0xA32E31E9 */
+ 1.03787932439639277504e+02, /* 0x4059F26D, 0x7C2EED53 */
+};
+
+#ifdef __STDC__
+static const double pr2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#else
+static double pr2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#endif
+ 1.07710830106873743082e-07, /* 0x3E7CE9D4, 0xF65544F4 */
+ 1.17176219462683348094e-01, /* 0x3FBDFF42, 0xBE760D83 */
+ 2.36851496667608785174e+00, /* 0x4002F2B7, 0xF98FAEC0 */
+ 1.22426109148261232917e+01, /* 0x40287C37, 0x7F71A964 */
+ 1.76939711271687727390e+01, /* 0x4031B1A8, 0x177F8EE2 */
+ 5.07352312588818499250e+00, /* 0x40144B49, 0xA574C1FE */
+};
+#ifdef __STDC__
+static const double ps2[5] = {
+#else
+static double ps2[5] = {
+#endif
+ 2.14364859363821409488e+01, /* 0x40356FBD, 0x8AD5ECDC */
+ 1.25290227168402751090e+02, /* 0x405F5293, 0x14F92CD5 */
+ 2.32276469057162813669e+02, /* 0x406D08D8, 0xD5A2DBD9 */
+ 1.17679373287147100768e+02, /* 0x405D6B7A, 0xDA1884A9 */
+ 8.36463893371618283368e+00, /* 0x4020BAB1, 0xF44E5192 */
+};
+
+#ifdef __STDC__
+ static double pone(double x)
+#else
+ static double pone(x)
+ double x;
+#endif
+{
+#ifdef __STDC__
+ const double *p,*q;
+#else
+ double *p,*q;
+#endif
+ fd_twoints un;
+ double z,r,s;
+ int ix;
+ un.d = x;
+ ix = 0x7fffffff&__HI(un);
+ if(ix>=0x40200000) {p = pr8; q= ps8;}
+ else if(ix>=0x40122E8B){p = pr5; q= ps5;}
+ else if(ix>=0x4006DB6D){p = pr3; q= ps3;}
+ else if(ix>=0x40000000){p = pr2; q= ps2;}
+ z = one/(x*x);
+ r = p[0]+z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))));
+ s = one+z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*q[4]))));
+ return one+ r/s;
+}
+
+
+/* For x >= 8, the asymptotic expansions of qone is
+ * 3/8 s - 105/1024 s^3 - ..., where s = 1/x.
+ * We approximate pone by
+ * qone(x) = s*(0.375 + (R/S))
+ * where R = qr1*s^2 + qr2*s^4 + ... + qr5*s^10
+ * S = 1 + qs1*s^2 + ... + qs6*s^12
+ * and
+ * | qone(x)/s -0.375-R/S | <= 2 ** ( -61.13)
+ */
+
+#ifdef __STDC__
+static const double qr8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#else
+static double qr8[6] = { /* for x in [inf, 8]=1/[0,0.125] */
+#endif
+ 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
+ -1.02539062499992714161e-01, /* 0xBFBA3FFF, 0xFFFFFDF3 */
+ -1.62717534544589987888e+01, /* 0xC0304591, 0xA26779F7 */
+ -7.59601722513950107896e+02, /* 0xC087BCD0, 0x53E4B576 */
+ -1.18498066702429587167e+04, /* 0xC0C724E7, 0x40F87415 */
+ -4.84385124285750353010e+04, /* 0xC0E7A6D0, 0x65D09C6A */
+};
+#ifdef __STDC__
+static const double qs8[6] = {
+#else
+static double qs8[6] = {
+#endif
+ 1.61395369700722909556e+02, /* 0x40642CA6, 0xDE5BCDE5 */
+ 7.82538599923348465381e+03, /* 0x40BE9162, 0xD0D88419 */
+ 1.33875336287249578163e+05, /* 0x4100579A, 0xB0B75E98 */
+ 7.19657723683240939863e+05, /* 0x4125F653, 0x72869C19 */
+ 6.66601232617776375264e+05, /* 0x412457D2, 0x7719AD5C */
+ -2.94490264303834643215e+05, /* 0xC111F969, 0x0EA5AA18 */
+};
+
+#ifdef __STDC__
+static const double qr5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#else
+static double qr5[6] = { /* for x in [8,4.5454]=1/[0.125,0.22001] */
+#endif
+ -2.08979931141764104297e-11, /* 0xBDB6FA43, 0x1AA1A098 */
+ -1.02539050241375426231e-01, /* 0xBFBA3FFF, 0xCB597FEF */
+ -8.05644828123936029840e+00, /* 0xC0201CE6, 0xCA03AD4B */
+ -1.83669607474888380239e+02, /* 0xC066F56D, 0x6CA7B9B0 */
+ -1.37319376065508163265e+03, /* 0xC09574C6, 0x6931734F */
+ -2.61244440453215656817e+03, /* 0xC0A468E3, 0x88FDA79D */
+};
+#ifdef __STDC__
+static const double qs5[6] = {
+#else
+static double qs5[6] = {
+#endif
+ 8.12765501384335777857e+01, /* 0x405451B2, 0xFF5A11B2 */
+ 1.99179873460485964642e+03, /* 0x409F1F31, 0xE77BF839 */
+ 1.74684851924908907677e+04, /* 0x40D10F1F, 0x0D64CE29 */
+ 4.98514270910352279316e+04, /* 0x40E8576D, 0xAABAD197 */
+ 2.79480751638918118260e+04, /* 0x40DB4B04, 0xCF7C364B */
+ -4.71918354795128470869e+03, /* 0xC0B26F2E, 0xFCFFA004 */
+};
+
+#ifdef __STDC__
+static const double qr3[6] = {
+#else
+static double qr3[6] = {/* for x in [4.547,2.8571]=1/[0.2199,0.35001] */
+#endif
+ -5.07831226461766561369e-09, /* 0xBE35CFA9, 0xD38FC84F */
+ -1.02537829820837089745e-01, /* 0xBFBA3FEB, 0x51AEED54 */
+ -4.61011581139473403113e+00, /* 0xC01270C2, 0x3302D9FF */
+ -5.78472216562783643212e+01, /* 0xC04CEC71, 0xC25D16DA */
+ -2.28244540737631695038e+02, /* 0xC06C87D3, 0x4718D55F */
+ -2.19210128478909325622e+02, /* 0xC06B66B9, 0x5F5C1BF6 */
+};
+#ifdef __STDC__
+static const double qs3[6] = {
+#else
+static double qs3[6] = {
+#endif
+ 4.76651550323729509273e+01, /* 0x4047D523, 0xCCD367E4 */
+ 6.73865112676699709482e+02, /* 0x40850EEB, 0xC031EE3E */
+ 3.38015286679526343505e+03, /* 0x40AA684E, 0x448E7C9A */
+ 5.54772909720722782367e+03, /* 0x40B5ABBA, 0xA61D54A6 */
+ 1.90311919338810798763e+03, /* 0x409DBC7A, 0x0DD4DF4B */
+ -1.35201191444307340817e+02, /* 0xC060E670, 0x290A311F */
+};
+
+#ifdef __STDC__
+static const double qr2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#else
+static double qr2[6] = {/* for x in [2.8570,2]=1/[0.3499,0.5] */
+#endif
+ -1.78381727510958865572e-07, /* 0xBE87F126, 0x44C626D2 */
+ -1.02517042607985553460e-01, /* 0xBFBA3E8E, 0x9148B010 */
+ -2.75220568278187460720e+00, /* 0xC0060484, 0x69BB4EDA */
+ -1.96636162643703720221e+01, /* 0xC033A9E2, 0xC168907F */
+ -4.23253133372830490089e+01, /* 0xC04529A3, 0xDE104AAA */
+ -2.13719211703704061733e+01, /* 0xC0355F36, 0x39CF6E52 */
+};
+#ifdef __STDC__
+static const double qs2[6] = {
+#else
+static double qs2[6] = {
+#endif
+ 2.95333629060523854548e+01, /* 0x403D888A, 0x78AE64FF */
+ 2.52981549982190529136e+02, /* 0x406F9F68, 0xDB821CBA */
+ 7.57502834868645436472e+02, /* 0x4087AC05, 0xCE49A0F7 */
+ 7.39393205320467245656e+02, /* 0x40871B25, 0x48D4C029 */
+ 1.55949003336666123687e+02, /* 0x40637E5E, 0x3C3ED8D4 */
+ -4.95949898822628210127e+00, /* 0xC013D686, 0xE71BE86B */
+};
+
+#ifdef __STDC__
+ static double qone(double x)
+#else
+ static double qone(x)
+ double x;
+#endif
+{
+#ifdef __STDC__
+ const double *p,*q;
+#else
+ double *p,*q;
+#endif
+ fd_twoints un;
+ double s,r,z;
+ int ix;
+ un.d = x;
+ ix = 0x7fffffff&__HI(un);
+ if(ix>=0x40200000) {p = qr8; q= qs8;}
+ else if(ix>=0x40122E8B){p = qr5; q= qs5;}
+ else if(ix>=0x4006DB6D){p = qr3; q= qs3;}
+ else if(ix>=0x40000000){p = qr2; q= qs2;}
+ z = one/(x*x);
+ r = p[0]+z*(p[1]+z*(p[2]+z*(p[3]+z*(p[4]+z*p[5]))));
+ s = one+z*(q[0]+z*(q[1]+z*(q[2]+z*(q[3]+z*(q[4]+z*q[5])))));
+ return (.375 + r/s)/x;
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_jn.c b/src/third_party/js-1.7/fdlibm/e_jn.c
new file mode 100644
index 00000000000..2b61b44399d
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_jn.c
@@ -0,0 +1,315 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_jn.c 1.4 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * __ieee754_jn(n, x), __ieee754_yn(n, x)
+ * floating point Bessel's function of the 1st and 2nd kind
+ * of order n
+ *
+ * Special cases:
+ * y0(0)=y1(0)=yn(n,0) = -inf with division by zero signal;
+ * y0(-ve)=y1(-ve)=yn(n,-ve) are NaN with invalid signal.
+ * Note 2. About jn(n,x), yn(n,x)
+ * For n=0, j0(x) is called,
+ * for n=1, j1(x) is called,
+ * for n<x, forward recursion us used starting
+ * from values of j0(x) and j1(x).
+ * for n>x, a continued fraction approximation to
+ * j(n,x)/j(n-1,x) is evaluated and then backward
+ * recursion is used starting from a supposed value
+ * for j(n,x). The resulting value of j(0,x) is
+ * compared with the actual value to correct the
+ * supposed value of j(n,x).
+ *
+ * yn(n,x) is similar in all respects, except
+ * that forward recursion is used for all
+ * values of n>1.
+ *
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+invsqrtpi= 5.64189583547756279280e-01, /* 0x3FE20DD7, 0x50429B6D */
+two = 2.00000000000000000000e+00, /* 0x40000000, 0x00000000 */
+one = 1.00000000000000000000e+00; /* 0x3FF00000, 0x00000000 */
+
+static double zero = 0.00000000000000000000e+00;
+
+#ifdef __STDC__
+ double __ieee754_jn(int n, double x)
+#else
+ double __ieee754_jn(n,x)
+ int n; double x;
+#endif
+{
+ fd_twoints u;
+ int i,hx,ix,lx, sgn;
+ double a, b, temp, di;
+ double z, w;
+
+ /* J(-n,x) = (-1)^n * J(n, x), J(n, -x) = (-1)^n * J(n, x)
+ * Thus, J(-n,x) = J(n,-x)
+ */
+ u.d = x;
+ hx = __HI(u);
+ ix = 0x7fffffff&hx;
+ lx = __LO(u);
+ /* if J(n,NaN) is NaN */
+ if((ix|((unsigned)(lx|-lx))>>31)>0x7ff00000) return x+x;
+ if(n<0){
+ n = -n;
+ x = -x;
+ hx ^= 0x80000000;
+ }
+ if(n==0) return(__ieee754_j0(x));
+ if(n==1) return(__ieee754_j1(x));
+ sgn = (n&1)&(hx>>31); /* even n -- 0, odd n -- sign(x) */
+ x = fd_fabs(x);
+ if((ix|lx)==0||ix>=0x7ff00000) /* if x is 0 or inf */
+ b = zero;
+ else if((double)n<=x) {
+ /* Safe to use J(n+1,x)=2n/x *J(n,x)-J(n-1,x) */
+ if(ix>=0x52D00000) { /* x > 2**302 */
+ /* (x >> n**2)
+ * Jn(x) = cos(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+ * Yn(x) = sin(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+ * Let s=sin(x), c=cos(x),
+ * xn=x-(2n+1)*pi/4, sqt2 = sqrt(2),then
+ *
+ * n sin(xn)*sqt2 cos(xn)*sqt2
+ * ----------------------------------
+ * 0 s-c c+s
+ * 1 -s-c -c+s
+ * 2 -s+c -c-s
+ * 3 s+c c-s
+ */
+ switch(n&3) {
+ case 0: temp = fd_cos(x)+fd_sin(x); break;
+ case 1: temp = -fd_cos(x)+fd_sin(x); break;
+ case 2: temp = -fd_cos(x)-fd_sin(x); break;
+ case 3: temp = fd_cos(x)-fd_sin(x); break;
+ }
+ b = invsqrtpi*temp/fd_sqrt(x);
+ } else {
+ a = __ieee754_j0(x);
+ b = __ieee754_j1(x);
+ for(i=1;i<n;i++){
+ temp = b;
+ b = b*((double)(i+i)/x) - a; /* avoid underflow */
+ a = temp;
+ }
+ }
+ } else {
+ if(ix<0x3e100000) { /* x < 2**-29 */
+ /* x is tiny, return the first Taylor expansion of J(n,x)
+ * J(n,x) = 1/n!*(x/2)^n - ...
+ */
+ if(n>33) /* underflow */
+ b = zero;
+ else {
+ temp = x*0.5; b = temp;
+ for (a=one,i=2;i<=n;i++) {
+ a *= (double)i; /* a = n! */
+ b *= temp; /* b = (x/2)^n */
+ }
+ b = b/a;
+ }
+ } else {
+ /* use backward recurrence */
+ /* x x^2 x^2
+ * J(n,x)/J(n-1,x) = ---- ------ ------ .....
+ * 2n - 2(n+1) - 2(n+2)
+ *
+ * 1 1 1
+ * (for large x) = ---- ------ ------ .....
+ * 2n 2(n+1) 2(n+2)
+ * -- - ------ - ------ -
+ * x x x
+ *
+ * Let w = 2n/x and h=2/x, then the above quotient
+ * is equal to the continued fraction:
+ * 1
+ * = -----------------------
+ * 1
+ * w - -----------------
+ * 1
+ * w+h - ---------
+ * w+2h - ...
+ *
+ * To determine how many terms needed, let
+ * Q(0) = w, Q(1) = w(w+h) - 1,
+ * Q(k) = (w+k*h)*Q(k-1) - Q(k-2),
+ * When Q(k) > 1e4 good for single
+ * When Q(k) > 1e9 good for double
+ * When Q(k) > 1e17 good for quadruple
+ */
+ /* determine k */
+ double t,v;
+ double q0,q1,h,tmp; int k,m;
+ w = (n+n)/(double)x; h = 2.0/(double)x;
+ q0 = w; z = w+h; q1 = w*z - 1.0; k=1;
+ while(q1<1.0e9) {
+ k += 1; z += h;
+ tmp = z*q1 - q0;
+ q0 = q1;
+ q1 = tmp;
+ }
+ m = n+n;
+ for(t=zero, i = 2*(n+k); i>=m; i -= 2) t = one/(i/x-t);
+ a = t;
+ b = one;
+ /* estimate log((2/x)^n*n!) = n*log(2/x)+n*ln(n)
+ * Hence, if n*(log(2n/x)) > ...
+ * single 8.8722839355e+01
+ * double 7.09782712893383973096e+02
+ * long double 1.1356523406294143949491931077970765006170e+04
+ * then recurrent value may overflow and the result is
+ * likely underflow to zero
+ */
+ tmp = n;
+ v = two/x;
+ tmp = tmp*__ieee754_log(fd_fabs(v*tmp));
+ if(tmp<7.09782712893383973096e+02) {
+ for(i=n-1,di=(double)(i+i);i>0;i--){
+ temp = b;
+ b *= di;
+ b = b/x - a;
+ a = temp;
+ di -= two;
+ }
+ } else {
+ for(i=n-1,di=(double)(i+i);i>0;i--){
+ temp = b;
+ b *= di;
+ b = b/x - a;
+ a = temp;
+ di -= two;
+ /* scale b to avoid spurious overflow */
+ if(b>1e100) {
+ a /= b;
+ t /= b;
+ b = one;
+ }
+ }
+ }
+ b = (t*__ieee754_j0(x)/b);
+ }
+ }
+ if(sgn==1) return -b; else return b;
+}
+
+#ifdef __STDC__
+ double __ieee754_yn(int n, double x)
+#else
+ double __ieee754_yn(n,x)
+ int n; double x;
+#endif
+{
+ fd_twoints u;
+ int i,hx,ix,lx;
+ int sign;
+ double a, b, temp;
+
+ u.d = x;
+ hx = __HI(u);
+ ix = 0x7fffffff&hx;
+ lx = __LO(u);
+ /* if Y(n,NaN) is NaN */
+ if((ix|((unsigned)(lx|-lx))>>31)>0x7ff00000) return x+x;
+ if((ix|lx)==0) return -one/zero;
+ if(hx<0) return zero/zero;
+ sign = 1;
+ if(n<0){
+ n = -n;
+ sign = 1 - ((n&1)<<1);
+ }
+ if(n==0) return(__ieee754_y0(x));
+ if(n==1) return(sign*__ieee754_y1(x));
+ if(ix==0x7ff00000) return zero;
+ if(ix>=0x52D00000) { /* x > 2**302 */
+ /* (x >> n**2)
+ * Jn(x) = cos(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+ * Yn(x) = sin(x-(2n+1)*pi/4)*sqrt(2/x*pi)
+ * Let s=sin(x), c=cos(x),
+ * xn=x-(2n+1)*pi/4, sqt2 = sqrt(2),then
+ *
+ * n sin(xn)*sqt2 cos(xn)*sqt2
+ * ----------------------------------
+ * 0 s-c c+s
+ * 1 -s-c -c+s
+ * 2 -s+c -c-s
+ * 3 s+c c-s
+ */
+ switch(n&3) {
+ case 0: temp = fd_sin(x)-fd_cos(x); break;
+ case 1: temp = -fd_sin(x)-fd_cos(x); break;
+ case 2: temp = -fd_sin(x)+fd_cos(x); break;
+ case 3: temp = fd_sin(x)+fd_cos(x); break;
+ }
+ b = invsqrtpi*temp/fd_sqrt(x);
+ } else {
+ a = __ieee754_y0(x);
+ b = __ieee754_y1(x);
+ /* quit if b is -inf */
+ u.d = b;
+ for(i=1;i<n&&(__HI(u) != 0xfff00000);i++){
+ temp = b;
+ b = ((double)(i+i)/x)*b - a;
+ a = temp;
+ }
+ }
+ if(sign>0) return b; else return -b;
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_lgamma.c b/src/third_party/js-1.7/fdlibm/e_lgamma.c
new file mode 100644
index 00000000000..beb3bd93219
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_lgamma.c
@@ -0,0 +1,71 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_lgamma.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_lgamma(x)
+ * Return the logarithm of the Gamma function of x.
+ *
+ * Method: call __ieee754_lgamma_r
+ */
+
+#include "fdlibm.h"
+
+extern int signgam;
+
+#ifdef __STDC__
+ double __ieee754_lgamma(double x)
+#else
+ double __ieee754_lgamma(x)
+ double x;
+#endif
+{
+ return __ieee754_lgamma_r(x,&signgam);
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_lgamma_r.c b/src/third_party/js-1.7/fdlibm/e_lgamma_r.c
new file mode 100644
index 00000000000..df92e7a26c9
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_lgamma_r.c
@@ -0,0 +1,347 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_lgamma_r.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_lgamma_r(x, signgamp)
+ * Reentrant version of the logarithm of the Gamma function
+ * with user provide pointer for the sign of Gamma(x).
+ *
+ * Method:
+ * 1. Argument Reduction for 0 < x <= 8
+ * Since gamma(1+s)=s*gamma(s), for x in [0,8], we may
+ * reduce x to a number in [1.5,2.5] by
+ * lgamma(1+s) = log(s) + lgamma(s)
+ * for example,
+ * lgamma(7.3) = log(6.3) + lgamma(6.3)
+ * = log(6.3*5.3) + lgamma(5.3)
+ * = log(6.3*5.3*4.3*3.3*2.3) + lgamma(2.3)
+ * 2. Polynomial approximation of lgamma around its
+ * minimun ymin=1.461632144968362245 to maintain monotonicity.
+ * On [ymin-0.23, ymin+0.27] (i.e., [1.23164,1.73163]), use
+ * Let z = x-ymin;
+ * lgamma(x) = -1.214862905358496078218 + z^2*poly(z)
+ * where
+ * poly(z) is a 14 degree polynomial.
+ * 2. Rational approximation in the primary interval [2,3]
+ * We use the following approximation:
+ * s = x-2.0;
+ * lgamma(x) = 0.5*s + s*P(s)/Q(s)
+ * with accuracy
+ * |P/Q - (lgamma(x)-0.5s)| < 2**-61.71
+ * Our algorithms are based on the following observation
+ *
+ * zeta(2)-1 2 zeta(3)-1 3
+ * lgamma(2+s) = s*(1-Euler) + --------- * s - --------- * s + ...
+ * 2 3
+ *
+ * where Euler = 0.5771... is the Euler constant, which is very
+ * close to 0.5.
+ *
+ * 3. For x>=8, we have
+ * lgamma(x)~(x-0.5)log(x)-x+0.5*log(2pi)+1/(12x)-1/(360x**3)+....
+ * (better formula:
+ * lgamma(x)~(x-0.5)*(log(x)-1)-.5*(log(2pi)-1) + ...)
+ * Let z = 1/x, then we approximation
+ * f(z) = lgamma(x) - (x-0.5)(log(x)-1)
+ * by
+ * 3 5 11
+ * w = w0 + w1*z + w2*z + w3*z + ... + w6*z
+ * where
+ * |w - f(z)| < 2**-58.74
+ *
+ * 4. For negative x, since (G is gamma function)
+ * -x*G(-x)*G(x) = pi/sin(pi*x),
+ * we have
+ * G(x) = pi/(sin(pi*x)*(-x)*G(-x))
+ * since G(-x) is positive, sign(G(x)) = sign(sin(pi*x)) for x<0
+ * Hence, for x<0, signgam = sign(sin(pi*x)) and
+ * lgamma(x) = log(|Gamma(x)|)
+ * = log(pi/(|x*sin(pi*x)|)) - lgamma(-x);
+ * Note: one should avoid compute pi*(-x) directly in the
+ * computation of sin(pi*(-x)).
+ *
+ * 5. Special Cases
+ * lgamma(2+s) ~ s*(1-Euler) for tiny s
+ * lgamma(1)=lgamma(2)=0
+ * lgamma(x) ~ -log(x) for tiny x
+ * lgamma(0) = lgamma(inf) = inf
+ * lgamma(-integer) = +-inf
+ *
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+two52= 4.50359962737049600000e+15, /* 0x43300000, 0x00000000 */
+half= 5.00000000000000000000e-01, /* 0x3FE00000, 0x00000000 */
+one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+pi = 3.14159265358979311600e+00, /* 0x400921FB, 0x54442D18 */
+a0 = 7.72156649015328655494e-02, /* 0x3FB3C467, 0xE37DB0C8 */
+a1 = 3.22467033424113591611e-01, /* 0x3FD4A34C, 0xC4A60FAD */
+a2 = 6.73523010531292681824e-02, /* 0x3FB13E00, 0x1A5562A7 */
+a3 = 2.05808084325167332806e-02, /* 0x3F951322, 0xAC92547B */
+a4 = 7.38555086081402883957e-03, /* 0x3F7E404F, 0xB68FEFE8 */
+a5 = 2.89051383673415629091e-03, /* 0x3F67ADD8, 0xCCB7926B */
+a6 = 1.19270763183362067845e-03, /* 0x3F538A94, 0x116F3F5D */
+a7 = 5.10069792153511336608e-04, /* 0x3F40B6C6, 0x89B99C00 */
+a8 = 2.20862790713908385557e-04, /* 0x3F2CF2EC, 0xED10E54D */
+a9 = 1.08011567247583939954e-04, /* 0x3F1C5088, 0x987DFB07 */
+a10 = 2.52144565451257326939e-05, /* 0x3EFA7074, 0x428CFA52 */
+a11 = 4.48640949618915160150e-05, /* 0x3F07858E, 0x90A45837 */
+tc = 1.46163214496836224576e+00, /* 0x3FF762D8, 0x6356BE3F */
+tf = -1.21486290535849611461e-01, /* 0xBFBF19B9, 0xBCC38A42 */
+/* tt = -(tail of tf) */
+tt = -3.63867699703950536541e-18, /* 0xBC50C7CA, 0xA48A971F */
+t0 = 4.83836122723810047042e-01, /* 0x3FDEF72B, 0xC8EE38A2 */
+t1 = -1.47587722994593911752e-01, /* 0xBFC2E427, 0x8DC6C509 */
+t2 = 6.46249402391333854778e-02, /* 0x3FB08B42, 0x94D5419B */
+t3 = -3.27885410759859649565e-02, /* 0xBFA0C9A8, 0xDF35B713 */
+t4 = 1.79706750811820387126e-02, /* 0x3F9266E7, 0x970AF9EC */
+t5 = -1.03142241298341437450e-02, /* 0xBF851F9F, 0xBA91EC6A */
+t6 = 6.10053870246291332635e-03, /* 0x3F78FCE0, 0xE370E344 */
+t7 = -3.68452016781138256760e-03, /* 0xBF6E2EFF, 0xB3E914D7 */
+t8 = 2.25964780900612472250e-03, /* 0x3F6282D3, 0x2E15C915 */
+t9 = -1.40346469989232843813e-03, /* 0xBF56FE8E, 0xBF2D1AF1 */
+t10 = 8.81081882437654011382e-04, /* 0x3F4CDF0C, 0xEF61A8E9 */
+t11 = -5.38595305356740546715e-04, /* 0xBF41A610, 0x9C73E0EC */
+t12 = 3.15632070903625950361e-04, /* 0x3F34AF6D, 0x6C0EBBF7 */
+t13 = -3.12754168375120860518e-04, /* 0xBF347F24, 0xECC38C38 */
+t14 = 3.35529192635519073543e-04, /* 0x3F35FD3E, 0xE8C2D3F4 */
+u0 = -7.72156649015328655494e-02, /* 0xBFB3C467, 0xE37DB0C8 */
+u1 = 6.32827064025093366517e-01, /* 0x3FE4401E, 0x8B005DFF */
+u2 = 1.45492250137234768737e+00, /* 0x3FF7475C, 0xD119BD6F */
+u3 = 9.77717527963372745603e-01, /* 0x3FEF4976, 0x44EA8450 */
+u4 = 2.28963728064692451092e-01, /* 0x3FCD4EAE, 0xF6010924 */
+u5 = 1.33810918536787660377e-02, /* 0x3F8B678B, 0xBF2BAB09 */
+v1 = 2.45597793713041134822e+00, /* 0x4003A5D7, 0xC2BD619C */
+v2 = 2.12848976379893395361e+00, /* 0x40010725, 0xA42B18F5 */
+v3 = 7.69285150456672783825e-01, /* 0x3FE89DFB, 0xE45050AF */
+v4 = 1.04222645593369134254e-01, /* 0x3FBAAE55, 0xD6537C88 */
+v5 = 3.21709242282423911810e-03, /* 0x3F6A5ABB, 0x57D0CF61 */
+s0 = -7.72156649015328655494e-02, /* 0xBFB3C467, 0xE37DB0C8 */
+s1 = 2.14982415960608852501e-01, /* 0x3FCB848B, 0x36E20878 */
+s2 = 3.25778796408930981787e-01, /* 0x3FD4D98F, 0x4F139F59 */
+s3 = 1.46350472652464452805e-01, /* 0x3FC2BB9C, 0xBEE5F2F7 */
+s4 = 2.66422703033638609560e-02, /* 0x3F9B481C, 0x7E939961 */
+s5 = 1.84028451407337715652e-03, /* 0x3F5E26B6, 0x7368F239 */
+s6 = 3.19475326584100867617e-05, /* 0x3F00BFEC, 0xDD17E945 */
+r1 = 1.39200533467621045958e+00, /* 0x3FF645A7, 0x62C4AB74 */
+r2 = 7.21935547567138069525e-01, /* 0x3FE71A18, 0x93D3DCDC */
+r3 = 1.71933865632803078993e-01, /* 0x3FC601ED, 0xCCFBDF27 */
+r4 = 1.86459191715652901344e-02, /* 0x3F9317EA, 0x742ED475 */
+r5 = 7.77942496381893596434e-04, /* 0x3F497DDA, 0xCA41A95B */
+r6 = 7.32668430744625636189e-06, /* 0x3EDEBAF7, 0xA5B38140 */
+w0 = 4.18938533204672725052e-01, /* 0x3FDACFE3, 0x90C97D69 */
+w1 = 8.33333333333329678849e-02, /* 0x3FB55555, 0x5555553B */
+w2 = -2.77777777728775536470e-03, /* 0xBF66C16C, 0x16B02E5C */
+w3 = 7.93650558643019558500e-04, /* 0x3F4A019F, 0x98CF38B6 */
+w4 = -5.95187557450339963135e-04, /* 0xBF4380CB, 0x8C0FE741 */
+w5 = 8.36339918996282139126e-04, /* 0x3F4B67BA, 0x4CDAD5D1 */
+w6 = -1.63092934096575273989e-03; /* 0xBF5AB89D, 0x0B9E43E4 */
+
+static double zero= 0.00000000000000000000e+00;
+
+#ifdef __STDC__
+ static double sin_pi(double x)
+#else
+ static double sin_pi(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double y,z;
+ int n,ix;
+
+ u.d = x;
+ ix = 0x7fffffff&__HI(u);
+
+ if(ix<0x3fd00000) return __kernel_sin(pi*x,zero,0);
+ y = -x; /* x is assume negative */
+
+ /*
+ * argument reduction, make sure inexact flag not raised if input
+ * is an integer
+ */
+ z = fd_floor(y);
+ if(z!=y) { /* inexact anyway */
+ y *= 0.5;
+ y = 2.0*(y - fd_floor(y)); /* y = |x| mod 2.0 */
+ n = (int) (y*4.0);
+ } else {
+ if(ix>=0x43400000) {
+ y = zero; n = 0; /* y must be even */
+ } else {
+ if(ix<0x43300000) z = y+two52; /* exact */
+ u.d = z;
+ n = __LO(u)&1; /* lower word of z */
+ y = n;
+ n<<= 2;
+ }
+ }
+ switch (n) {
+ case 0: y = __kernel_sin(pi*y,zero,0); break;
+ case 1:
+ case 2: y = __kernel_cos(pi*(0.5-y),zero); break;
+ case 3:
+ case 4: y = __kernel_sin(pi*(one-y),zero,0); break;
+ case 5:
+ case 6: y = -__kernel_cos(pi*(y-1.5),zero); break;
+ default: y = __kernel_sin(pi*(y-2.0),zero,0); break;
+ }
+ return -y;
+}
+
+
+#ifdef __STDC__
+ double __ieee754_lgamma_r(double x, int *signgamp)
+#else
+ double __ieee754_lgamma_r(x,signgamp)
+ double x; int *signgamp;
+#endif
+{
+ fd_twoints u;
+ double t,y,z,nadj,p,p1,p2,p3,q,r,w;
+ int i,hx,lx,ix;
+
+ u.d = x;
+ hx = __HI(u);
+ lx = __LO(u);
+
+ /* purge off +-inf, NaN, +-0, and negative arguments */
+ *signgamp = 1;
+ ix = hx&0x7fffffff;
+ if(ix>=0x7ff00000) return x*x;
+ if((ix|lx)==0) return one/zero;
+ if(ix<0x3b900000) { /* |x|<2**-70, return -log(|x|) */
+ if(hx<0) {
+ *signgamp = -1;
+ return -__ieee754_log(-x);
+ } else return -__ieee754_log(x);
+ }
+ if(hx<0) {
+ if(ix>=0x43300000) /* |x|>=2**52, must be -integer */
+ return one/zero;
+ t = sin_pi(x);
+ if(t==zero) return one/zero; /* -integer */
+ nadj = __ieee754_log(pi/fd_fabs(t*x));
+ if(t<zero) *signgamp = -1;
+ x = -x;
+ }
+
+ /* purge off 1 and 2 */
+ if((((ix-0x3ff00000)|lx)==0)||(((ix-0x40000000)|lx)==0)) r = 0;
+ /* for x < 2.0 */
+ else if(ix<0x40000000) {
+ if(ix<=0x3feccccc) { /* lgamma(x) = lgamma(x+1)-log(x) */
+ r = -__ieee754_log(x);
+ if(ix>=0x3FE76944) {y = one-x; i= 0;}
+ else if(ix>=0x3FCDA661) {y= x-(tc-one); i=1;}
+ else {y = x; i=2;}
+ } else {
+ r = zero;
+ if(ix>=0x3FFBB4C3) {y=2.0-x;i=0;} /* [1.7316,2] */
+ else if(ix>=0x3FF3B4C4) {y=x-tc;i=1;} /* [1.23,1.73] */
+ else {y=x-one;i=2;}
+ }
+ switch(i) {
+ case 0:
+ z = y*y;
+ p1 = a0+z*(a2+z*(a4+z*(a6+z*(a8+z*a10))));
+ p2 = z*(a1+z*(a3+z*(a5+z*(a7+z*(a9+z*a11)))));
+ p = y*p1+p2;
+ r += (p-0.5*y); break;
+ case 1:
+ z = y*y;
+ w = z*y;
+ p1 = t0+w*(t3+w*(t6+w*(t9 +w*t12))); /* parallel comp */
+ p2 = t1+w*(t4+w*(t7+w*(t10+w*t13)));
+ p3 = t2+w*(t5+w*(t8+w*(t11+w*t14)));
+ p = z*p1-(tt-w*(p2+y*p3));
+ r += (tf + p); break;
+ case 2:
+ p1 = y*(u0+y*(u1+y*(u2+y*(u3+y*(u4+y*u5)))));
+ p2 = one+y*(v1+y*(v2+y*(v3+y*(v4+y*v5))));
+ r += (-0.5*y + p1/p2);
+ }
+ }
+ else if(ix<0x40200000) { /* x < 8.0 */
+ i = (int)x;
+ t = zero;
+ y = x-(double)i;
+ p = y*(s0+y*(s1+y*(s2+y*(s3+y*(s4+y*(s5+y*s6))))));
+ q = one+y*(r1+y*(r2+y*(r3+y*(r4+y*(r5+y*r6)))));
+ r = half*y+p/q;
+ z = one; /* lgamma(1+s) = log(s) + lgamma(s) */
+ switch(i) {
+ case 7: z *= (y+6.0); /* FALLTHRU */
+ case 6: z *= (y+5.0); /* FALLTHRU */
+ case 5: z *= (y+4.0); /* FALLTHRU */
+ case 4: z *= (y+3.0); /* FALLTHRU */
+ case 3: z *= (y+2.0); /* FALLTHRU */
+ r += __ieee754_log(z); break;
+ }
+ /* 8.0 <= x < 2**58 */
+ } else if (ix < 0x43900000) {
+ t = __ieee754_log(x);
+ z = one/x;
+ y = z*z;
+ w = w0+z*(w1+y*(w2+y*(w3+y*(w4+y*(w5+y*w6)))));
+ r = (x-half)*(t-one)+w;
+ } else
+ /* 2**58 <= x <= inf */
+ r = x*(__ieee754_log(x)-one);
+ if(hx<0) r = nadj - r;
+ return r;
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_log.c b/src/third_party/js-1.7/fdlibm/e_log.c
new file mode 100644
index 00000000000..8645d6efdcc
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_log.c
@@ -0,0 +1,184 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_log.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_log(x)
+ * Return the logrithm of x
+ *
+ * Method :
+ * 1. Argument Reduction: find k and f such that
+ * x = 2^k * (1+f),
+ * where sqrt(2)/2 < 1+f < sqrt(2) .
+ *
+ * 2. Approximation of log(1+f).
+ * Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
+ * = 2s + 2/3 s**3 + 2/5 s**5 + .....,
+ * = 2s + s*R
+ * We use a special Reme algorithm on [0,0.1716] to generate
+ * a polynomial of degree 14 to approximate R The maximum error
+ * of this polynomial approximation is bounded by 2**-58.45. In
+ * other words,
+ * 2 4 6 8 10 12 14
+ * R(z) ~ Lg1*s +Lg2*s +Lg3*s +Lg4*s +Lg5*s +Lg6*s +Lg7*s
+ * (the values of Lg1 to Lg7 are listed in the program)
+ * and
+ * | 2 14 | -58.45
+ * | Lg1*s +...+Lg7*s - R(z) | <= 2
+ * | |
+ * Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
+ * In order to guarantee error in log below 1ulp, we compute log
+ * by
+ * log(1+f) = f - s*(f - R) (if f is not too large)
+ * log(1+f) = f - (hfsq - s*(hfsq+R)). (better accuracy)
+ *
+ * 3. Finally, log(x) = k*ln2 + log(1+f).
+ * = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
+ * Here ln2 is split into two floating point number:
+ * ln2_hi + ln2_lo,
+ * where n*ln2_hi is always exact for |n| < 2000.
+ *
+ * Special cases:
+ * log(x) is NaN with signal if x < 0 (including -INF) ;
+ * log(+INF) is +INF; log(0) is -INF with signal;
+ * log(NaN) is that NaN with no signal.
+ *
+ * Accuracy:
+ * according to an error analysis, the error is always less than
+ * 1 ulp (unit in the last place).
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+ln2_hi = 6.93147180369123816490e-01, /* 3fe62e42 fee00000 */
+ln2_lo = 1.90821492927058770002e-10, /* 3dea39ef 35793c76 */
+two54 = 1.80143985094819840000e+16, /* 43500000 00000000 */
+Lg1 = 6.666666666666735130e-01, /* 3FE55555 55555593 */
+Lg2 = 3.999999999940941908e-01, /* 3FD99999 9997FA04 */
+Lg3 = 2.857142874366239149e-01, /* 3FD24924 94229359 */
+Lg4 = 2.222219843214978396e-01, /* 3FCC71C5 1D8E78AF */
+Lg5 = 1.818357216161805012e-01, /* 3FC74664 96CB03DE */
+Lg6 = 1.531383769920937332e-01, /* 3FC39A09 D078C69F */
+Lg7 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
+
+static double zero = 0.0;
+
+#ifdef __STDC__
+ double __ieee754_log(double x)
+#else
+ double __ieee754_log(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double hfsq,f,s,z,R,w,t1,t2,dk;
+ int k,hx,i,j;
+ unsigned lx;
+
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ lx = __LO(u); /* low word of x */
+
+ k=0;
+ if (hx < 0x00100000) { /* x < 2**-1022 */
+ if (((hx&0x7fffffff)|lx)==0)
+ return -two54/zero; /* log(+-0)=-inf */
+ if (hx<0) return (x-x)/zero; /* log(-#) = NaN */
+ k -= 54; x *= two54; /* subnormal number, scale up x */
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ }
+ if (hx >= 0x7ff00000) return x+x;
+ k += (hx>>20)-1023;
+ hx &= 0x000fffff;
+ i = (hx+0x95f64)&0x100000;
+ u.d = x;
+ __HI(u) = hx|(i^0x3ff00000); /* normalize x or x/2 */
+ x = u.d;
+ k += (i>>20);
+ f = x-1.0;
+ if((0x000fffff&(2+hx))<3) { /* |f| < 2**-20 */
+ if(f==zero) {
+ if(k==0) return zero; else {dk=(double)k;
+ return dk*ln2_hi+dk*ln2_lo;}
+ }
+ R = f*f*(0.5-0.33333333333333333*f);
+ if(k==0) return f-R; else {dk=(double)k;
+ return dk*ln2_hi-((R-dk*ln2_lo)-f);}
+ }
+ s = f/(2.0+f);
+ dk = (double)k;
+ z = s*s;
+ i = hx-0x6147a;
+ w = z*z;
+ j = 0x6b851-hx;
+ t1= w*(Lg2+w*(Lg4+w*Lg6));
+ t2= z*(Lg1+w*(Lg3+w*(Lg5+w*Lg7)));
+ i |= j;
+ R = t2+t1;
+ if(i>0) {
+ hfsq=0.5*f*f;
+ if(k==0) return f-(hfsq-s*(hfsq+R)); else
+ return dk*ln2_hi-((hfsq-(s*(hfsq+R)+dk*ln2_lo))-f);
+ } else {
+ if(k==0) return f-s*(f-R); else
+ return dk*ln2_hi-((s*(f-R)-dk*ln2_lo)-f);
+ }
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_log10.c b/src/third_party/js-1.7/fdlibm/e_log10.c
new file mode 100644
index 00000000000..5f88f4b4cfe
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_log10.c
@@ -0,0 +1,134 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_log10.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_log10(x)
+ * Return the base 10 logarithm of x
+ *
+ * Method :
+ * Let log10_2hi = leading 40 bits of log10(2) and
+ * log10_2lo = log10(2) - log10_2hi,
+ * ivln10 = 1/log(10) rounded.
+ * Then
+ * n = ilogb(x),
+ * if(n<0) n = n+1;
+ * x = scalbn(x,-n);
+ * log10(x) := n*log10_2hi + (n*log10_2lo + ivln10*log(x))
+ *
+ * Note 1:
+ * To guarantee log10(10**n)=n, where 10**n is normal, the rounding
+ * mode must set to Round-to-Nearest.
+ * Note 2:
+ * [1/log(10)] rounded to 53 bits has error .198 ulps;
+ * log10 is monotonic at all binary break points.
+ *
+ * Special cases:
+ * log10(x) is NaN with signal if x < 0;
+ * log10(+INF) is +INF with no signal; log10(0) is -INF with signal;
+ * log10(NaN) is that NaN with no signal;
+ * log10(10**N) = N for N=0,1,...,22.
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following constants.
+ * The decimal values may be used, provided that the compiler will convert
+ * from decimal to binary accurately enough to produce the hexadecimal values
+ * shown.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
+ivln10 = 4.34294481903251816668e-01, /* 0x3FDBCB7B, 0x1526E50E */
+log10_2hi = 3.01029995663611771306e-01, /* 0x3FD34413, 0x509F6000 */
+log10_2lo = 3.69423907715893078616e-13; /* 0x3D59FEF3, 0x11F12B36 */
+
+static double zero = 0.0;
+
+#ifdef __STDC__
+ double __ieee754_log10(double x)
+#else
+ double __ieee754_log10(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double y,z;
+ int i,k,hx;
+ unsigned lx;
+
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ lx = __LO(u); /* low word of x */
+
+ k=0;
+ if (hx < 0x00100000) { /* x < 2**-1022 */
+ if (((hx&0x7fffffff)|lx)==0)
+ return -two54/zero; /* log(+-0)=-inf */
+ if (hx<0) return (x-x)/zero; /* log(-#) = NaN */
+ k -= 54; x *= two54; /* subnormal number, scale up x */
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ }
+ if (hx >= 0x7ff00000) return x+x;
+ k += (hx>>20)-1023;
+ i = ((unsigned)k&0x80000000)>>31;
+ hx = (hx&0x000fffff)|((0x3ff-i)<<20);
+ y = (double)(k+i);
+ u.d = x;
+ __HI(u) = hx;
+ x = u.d;
+ z = y*log10_2lo + ivln10*__ieee754_log(x);
+ return z+y*log10_2hi;
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_pow.c b/src/third_party/js-1.7/fdlibm/e_pow.c
new file mode 100644
index 00000000000..18c8d069507
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_pow.c
@@ -0,0 +1,386 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_pow.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_pow(x,y) return x**y
+ *
+ * n
+ * Method: Let x = 2 * (1+f)
+ * 1. Compute and return log2(x) in two pieces:
+ * log2(x) = w1 + w2,
+ * where w1 has 53-24 = 29 bit trailing zeros.
+ * 2. Perform y*log2(x) = n+y' by simulating muti-precision
+ * arithmetic, where |y'|<=0.5.
+ * 3. Return x**y = 2**n*exp(y'*log2)
+ *
+ * Special cases:
+ * 1. (anything) ** 0 is 1
+ * 2. (anything) ** 1 is itself
+ * 3. (anything) ** NAN is NAN
+ * 4. NAN ** (anything except 0) is NAN
+ * 5. +-(|x| > 1) ** +INF is +INF
+ * 6. +-(|x| > 1) ** -INF is +0
+ * 7. +-(|x| < 1) ** +INF is +0
+ * 8. +-(|x| < 1) ** -INF is +INF
+ * 9. +-1 ** +-INF is NAN
+ * 10. +0 ** (+anything except 0, NAN) is +0
+ * 11. -0 ** (+anything except 0, NAN, odd integer) is +0
+ * 12. +0 ** (-anything except 0, NAN) is +INF
+ * 13. -0 ** (-anything except 0, NAN, odd integer) is +INF
+ * 14. -0 ** (odd integer) = -( +0 ** (odd integer) )
+ * 15. +INF ** (+anything except 0,NAN) is +INF
+ * 16. +INF ** (-anything except 0,NAN) is +0
+ * 17. -INF ** (anything) = -0 ** (-anything)
+ * 18. (-anything) ** (integer) is (-1)**(integer)*(+anything**integer)
+ * 19. (-anything except 0 and inf) ** (non-integer) is NAN
+ *
+ * Accuracy:
+ * pow(x,y) returns x**y nearly rounded. In particular
+ * pow(integer,integer)
+ * always returns the correct integer provided it is
+ * representable.
+ *
+ * Constants :
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+#include "fdlibm.h"
+
+#if defined(_MSC_VER)
+/* Microsoft Compiler */
+#pragma warning( disable : 4723 ) /* disables potential divide by 0 warning */
+#endif
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+bp[] = {1.0, 1.5,},
+dp_h[] = { 0.0, 5.84962487220764160156e-01,}, /* 0x3FE2B803, 0x40000000 */
+dp_l[] = { 0.0, 1.35003920212974897128e-08,}, /* 0x3E4CFDEB, 0x43CFD006 */
+zero = 0.0,
+one = 1.0,
+two = 2.0,
+two53 = 9007199254740992.0, /* 0x43400000, 0x00000000 */
+really_big = 1.0e300,
+tiny = 1.0e-300,
+ /* poly coefs for (3/2)*(log(x)-2s-2/3*s**3 */
+L1 = 5.99999999999994648725e-01, /* 0x3FE33333, 0x33333303 */
+L2 = 4.28571428578550184252e-01, /* 0x3FDB6DB6, 0xDB6FABFF */
+L3 = 3.33333329818377432918e-01, /* 0x3FD55555, 0x518F264D */
+L4 = 2.72728123808534006489e-01, /* 0x3FD17460, 0xA91D4101 */
+L5 = 2.30660745775561754067e-01, /* 0x3FCD864A, 0x93C9DB65 */
+L6 = 2.06975017800338417784e-01, /* 0x3FCA7E28, 0x4A454EEF */
+P1 = 1.66666666666666019037e-01, /* 0x3FC55555, 0x5555553E */
+P2 = -2.77777777770155933842e-03, /* 0xBF66C16C, 0x16BEBD93 */
+P3 = 6.61375632143793436117e-05, /* 0x3F11566A, 0xAF25DE2C */
+P4 = -1.65339022054652515390e-06, /* 0xBEBBBD41, 0xC5D26BF1 */
+P5 = 4.13813679705723846039e-08, /* 0x3E663769, 0x72BEA4D0 */
+lg2 = 6.93147180559945286227e-01, /* 0x3FE62E42, 0xFEFA39EF */
+lg2_h = 6.93147182464599609375e-01, /* 0x3FE62E43, 0x00000000 */
+lg2_l = -1.90465429995776804525e-09, /* 0xBE205C61, 0x0CA86C39 */
+ovt = 8.0085662595372944372e-0017, /* -(1024-log2(ovfl+.5ulp)) */
+cp = 9.61796693925975554329e-01, /* 0x3FEEC709, 0xDC3A03FD =2/(3ln2) */
+cp_h = 9.61796700954437255859e-01, /* 0x3FEEC709, 0xE0000000 =(float)cp */
+cp_l = -7.02846165095275826516e-09, /* 0xBE3E2FE0, 0x145B01F5 =tail of cp_h*/
+ivln2 = 1.44269504088896338700e+00, /* 0x3FF71547, 0x652B82FE =1/ln2 */
+ivln2_h = 1.44269502162933349609e+00, /* 0x3FF71547, 0x60000000 =24b 1/ln2*/
+ivln2_l = 1.92596299112661746887e-08; /* 0x3E54AE0B, 0xF85DDF44 =1/ln2 tail*/
+
+#ifdef __STDC__
+ double __ieee754_pow(double x, double y)
+#else
+ double __ieee754_pow(x,y)
+ double x, y;
+#endif
+{
+ fd_twoints ux, uy, uz;
+ double y1,t1,p_h,t,z,ax;
+ double z_h,z_l,p_l;
+ double t2,r,s,u,v,w;
+ int i,j,k,yisint,n;
+ int hx,hy,ix,iy;
+ unsigned lx,ly;
+
+ ux.d = x; uy.d = y;
+ hx = __HI(ux); lx = __LO(ux);
+ hy = __HI(uy); ly = __LO(uy);
+ ix = hx&0x7fffffff; iy = hy&0x7fffffff;
+
+ /* y==zero: x**0 = 1 */
+ if((iy|ly)==0) return one;
+
+ /* +-NaN return x+y */
+ if(ix > 0x7ff00000 || ((ix==0x7ff00000)&&(lx!=0)) ||
+ iy > 0x7ff00000 || ((iy==0x7ff00000)&&(ly!=0)))
+ return x+y;
+
+ /* determine if y is an odd int when x < 0
+ * yisint = 0 ... y is not an integer
+ * yisint = 1 ... y is an odd int
+ * yisint = 2 ... y is an even int
+ */
+ yisint = 0;
+ if(hx<0) {
+ if(iy>=0x43400000) yisint = 2; /* even integer y */
+ else if(iy>=0x3ff00000) {
+ k = (iy>>20)-0x3ff; /* exponent */
+ if(k>20) {
+ j = ly>>(52-k);
+ if((j<<(52-k))==(int)ly) yisint = 2-(j&1);
+ } else if(ly==0) {
+ j = iy>>(20-k);
+ if((j<<(20-k))==iy) yisint = 2-(j&1);
+ }
+ }
+ }
+
+ /* special value of y */
+ if(ly==0) {
+ if (iy==0x7ff00000) { /* y is +-inf */
+ if(((ix-0x3ff00000)|lx)==0)
+#ifdef _WIN32
+/* VC++ optimizer reduces y - y to 0 */
+ return y / y;
+#else
+ return y - y; /* inf**+-1 is NaN */
+#endif
+ else if (ix >= 0x3ff00000)/* (|x|>1)**+-inf = inf,0 */
+ return (hy>=0)? y: zero;
+ else /* (|x|<1)**-,+inf = inf,0 */
+ return (hy<0)?-y: zero;
+ }
+ if(iy==0x3ff00000) { /* y is +-1 */
+ if(hy<0) return one/x; else return x;
+ }
+ if(hy==0x40000000) return x*x; /* y is 2 */
+ if(hy==0x3fe00000) { /* y is 0.5 */
+ if(hx>=0) /* x >= +0 */
+ return fd_sqrt(x);
+ }
+ }
+
+ ax = fd_fabs(x);
+ /* special value of x */
+ if(lx==0) {
+ if(ix==0x7ff00000||ix==0||ix==0x3ff00000){
+ z = ax; /*x is +-0,+-inf,+-1*/
+ if(hy<0) z = one/z; /* z = (1/|x|) */
+ if(hx<0) {
+ if(((ix-0x3ff00000)|yisint)==0) {
+ z = (z-z)/(z-z); /* (-1)**non-int is NaN */
+ } else if(yisint==1) {
+#ifdef HPUX
+ uz.d = z;
+ __HI(uz) ^= 1<<31; /* some HPUXes cannot negate 0.. */
+ z = uz.d;
+#else
+ z = -z; /* (x<0)**odd = -(|x|**odd) */
+#endif
+ }
+ }
+ return z;
+ }
+ }
+
+ /* (x<0)**(non-int) is NaN */
+ if((((hx>>31)+1)|yisint)==0) return (x-x)/(x-x);
+
+ /* |y| is really_big */
+ if(iy>0x41e00000) { /* if |y| > 2**31 */
+ if(iy>0x43f00000){ /* if |y| > 2**64, must o/uflow */
+ if(ix<=0x3fefffff) return (hy<0)? really_big*really_big:tiny*tiny;
+ if(ix>=0x3ff00000) return (hy>0)? really_big*really_big:tiny*tiny;
+ }
+ /* over/underflow if x is not close to one */
+ if(ix<0x3fefffff) return (hy<0)? really_big*really_big:tiny*tiny;
+ if(ix>0x3ff00000) return (hy>0)? really_big*really_big:tiny*tiny;
+ /* now |1-x| is tiny <= 2**-20, suffice to compute
+ log(x) by x-x^2/2+x^3/3-x^4/4 */
+ t = x-1; /* t has 20 trailing zeros */
+ w = (t*t)*(0.5-t*(0.3333333333333333333333-t*0.25));
+ u = ivln2_h*t; /* ivln2_h has 21 sig. bits */
+ v = t*ivln2_l-w*ivln2;
+ t1 = u+v;
+ uz.d = t1;
+ __LO(uz) = 0;
+ t1 = uz.d;
+ t2 = v-(t1-u);
+ } else {
+ double s_h,t_h;
+ double s2,s_l,t_l;
+ n = 0;
+ /* take care subnormal number */
+ if(ix<0x00100000)
+ {ax *= two53; n -= 53; uz.d = ax; ix = __HI(uz); }
+ n += ((ix)>>20)-0x3ff;
+ j = ix&0x000fffff;
+ /* determine interval */
+ ix = j|0x3ff00000; /* normalize ix */
+ if(j<=0x3988E) k=0; /* |x|<sqrt(3/2) */
+ else if(j<0xBB67A) k=1; /* |x|<sqrt(3) */
+ else {k=0;n+=1;ix -= 0x00100000;}
+ uz.d = ax;
+ __HI(uz) = ix;
+ ax = uz.d;
+
+ /* compute s = s_h+s_l = (x-1)/(x+1) or (x-1.5)/(x+1.5) */
+ u = ax-bp[k]; /* bp[0]=1.0, bp[1]=1.5 */
+ v = one/(ax+bp[k]);
+ s = u*v;
+ s_h = s;
+ uz.d = s_h;
+ __LO(uz) = 0;
+ s_h = uz.d;
+ /* t_h=ax+bp[k] High */
+ t_h = zero;
+ uz.d = t_h;
+ __HI(uz)=((ix>>1)|0x20000000)+0x00080000+(k<<18);
+ t_h = uz.d;
+ t_l = ax - (t_h-bp[k]);
+ s_l = v*((u-s_h*t_h)-s_h*t_l);
+ /* compute log(ax) */
+ s2 = s*s;
+ r = s2*s2*(L1+s2*(L2+s2*(L3+s2*(L4+s2*(L5+s2*L6)))));
+ r += s_l*(s_h+s);
+ s2 = s_h*s_h;
+ t_h = 3.0+s2+r;
+ uz.d = t_h;
+ __LO(uz) = 0;
+ t_h = uz.d;
+ t_l = r-((t_h-3.0)-s2);
+ /* u+v = s*(1+...) */
+ u = s_h*t_h;
+ v = s_l*t_h+t_l*s;
+ /* 2/(3log2)*(s+...) */
+ p_h = u+v;
+ uz.d = p_h;
+ __LO(uz) = 0;
+ p_h = uz.d;
+ p_l = v-(p_h-u);
+ z_h = cp_h*p_h; /* cp_h+cp_l = 2/(3*log2) */
+ z_l = cp_l*p_h+p_l*cp+dp_l[k];
+ /* log2(ax) = (s+..)*2/(3*log2) = n + dp_h + z_h + z_l */
+ t = (double)n;
+ t1 = (((z_h+z_l)+dp_h[k])+t);
+ uz.d = t1;
+ __LO(uz) = 0;
+ t1 = uz.d;
+ t2 = z_l-(((t1-t)-dp_h[k])-z_h);
+ }
+
+ s = one; /* s (sign of result -ve**odd) = -1 else = 1 */
+ if((((hx>>31)+1)|(yisint-1))==0) s = -one;/* (-ve)**(odd int) */
+
+ /* split up y into y1+y2 and compute (y1+y2)*(t1+t2) */
+ y1 = y;
+ uy.d = y1;
+ __LO(uy) = 0;
+ y1 = uy.d;
+ p_l = (y-y1)*t1+y*t2;
+ p_h = y1*t1;
+ z = p_l+p_h;
+ uz.d = z;
+ j = __HI(uz);
+ i = __LO(uz);
+
+ if (j>=0x40900000) { /* z >= 1024 */
+ if(((j-0x40900000)|i)!=0) /* if z > 1024 */
+ return s*really_big*really_big; /* overflow */
+ else {
+ if(p_l+ovt>z-p_h) return s*really_big*really_big; /* overflow */
+ }
+ } else if((j&0x7fffffff)>=0x4090cc00 ) { /* z <= -1075 */
+ if(((j-0xc090cc00)|i)!=0) /* z < -1075 */
+ return s*tiny*tiny; /* underflow */
+ else {
+ if(p_l<=z-p_h) return s*tiny*tiny; /* underflow */
+ }
+ }
+ /*
+ * compute 2**(p_h+p_l)
+ */
+ i = j&0x7fffffff;
+ k = (i>>20)-0x3ff;
+ n = 0;
+ if(i>0x3fe00000) { /* if |z| > 0.5, set n = [z+0.5] */
+ n = j+(0x00100000>>(k+1));
+ k = ((n&0x7fffffff)>>20)-0x3ff; /* new k for n */
+ t = zero;
+ uz.d = t;
+ __HI(uz) = (n&~(0x000fffff>>k));
+ t = uz.d;
+ n = ((n&0x000fffff)|0x00100000)>>(20-k);
+ if(j<0) n = -n;
+ p_h -= t;
+ }
+ t = p_l+p_h;
+ uz.d = t;
+ __LO(uz) = 0;
+ t = uz.d;
+ u = t*lg2_h;
+ v = (p_l-(t-p_h))*lg2+t*lg2_l;
+ z = u+v;
+ w = v-(z-u);
+ t = z*z;
+ t1 = z - t*(P1+t*(P2+t*(P3+t*(P4+t*P5))));
+ r = (z*t1)/(t1-two)-(w+z*w);
+ z = one-(r-z);
+ uz.d = z;
+ j = __HI(uz);
+ j += (n<<20);
+ if((j>>20)<=0) z = fd_scalbn(z,n); /* subnormal output */
+ else { uz.d = z; __HI(uz) += (n<<20); z = uz.d; }
+ return s*z;
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_rem_pio2.c b/src/third_party/js-1.7/fdlibm/e_rem_pio2.c
new file mode 100644
index 00000000000..c9d26187510
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_rem_pio2.c
@@ -0,0 +1,222 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_rem_pio2.c 1.4 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* __ieee754_rem_pio2(x,y)
+ *
+ * return the remainder of x rem pi/2 in y[0]+y[1]
+ * use __kernel_rem_pio2()
+ */
+
+#include "fdlibm.h"
+
+/*
+ * Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
+ */
+#ifdef __STDC__
+static const int two_over_pi[] = {
+#else
+static int two_over_pi[] = {
+#endif
+0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62,
+0x95993C, 0x439041, 0xFE5163, 0xABDEBB, 0xC561B7, 0x246E3A,
+0x424DD2, 0xE00649, 0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129,
+0xA73EE8, 0x8235F5, 0x2EBB44, 0x84E99C, 0x7026B4, 0x5F7E41,
+0x3991D6, 0x398353, 0x39F49C, 0x845F8B, 0xBDF928, 0x3B1FF8,
+0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D, 0x367ECF,
+0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5,
+0xF17B3D, 0x0739F7, 0x8A5292, 0xEA6BFB, 0x5FB11F, 0x8D5D08,
+0x560330, 0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3,
+0x91615E, 0xE61B08, 0x659985, 0x5F14A0, 0x68408D, 0xFFD880,
+0x4D7327, 0x310606, 0x1556CA, 0x73A8C9, 0x60E27B, 0xC08C6B,
+};
+
+#ifdef __STDC__
+static const int npio2_hw[] = {
+#else
+static int npio2_hw[] = {
+#endif
+0x3FF921FB, 0x400921FB, 0x4012D97C, 0x401921FB, 0x401F6A7A, 0x4022D97C,
+0x4025FDBB, 0x402921FB, 0x402C463A, 0x402F6A7A, 0x4031475C, 0x4032D97C,
+0x40346B9C, 0x4035FDBB, 0x40378FDB, 0x403921FB, 0x403AB41B, 0x403C463A,
+0x403DD85A, 0x403F6A7A, 0x40407E4C, 0x4041475C, 0x4042106C, 0x4042D97C,
+0x4043A28C, 0x40446B9C, 0x404534AC, 0x4045FDBB, 0x4046C6CB, 0x40478FDB,
+0x404858EB, 0x404921FB,
+};
+
+/*
+ * invpio2: 53 bits of 2/pi
+ * pio2_1: first 33 bit of pi/2
+ * pio2_1t: pi/2 - pio2_1
+ * pio2_2: second 33 bit of pi/2
+ * pio2_2t: pi/2 - (pio2_1+pio2_2)
+ * pio2_3: third 33 bit of pi/2
+ * pio2_3t: pi/2 - (pio2_1+pio2_2+pio2_3)
+ */
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+zero = 0.00000000000000000000e+00, /* 0x00000000, 0x00000000 */
+half = 5.00000000000000000000e-01, /* 0x3FE00000, 0x00000000 */
+two24 = 1.67772160000000000000e+07, /* 0x41700000, 0x00000000 */
+invpio2 = 6.36619772367581382433e-01, /* 0x3FE45F30, 0x6DC9C883 */
+pio2_1 = 1.57079632673412561417e+00, /* 0x3FF921FB, 0x54400000 */
+pio2_1t = 6.07710050650619224932e-11, /* 0x3DD0B461, 0x1A626331 */
+pio2_2 = 6.07710050630396597660e-11, /* 0x3DD0B461, 0x1A600000 */
+pio2_2t = 2.02226624879595063154e-21, /* 0x3BA3198A, 0x2E037073 */
+pio2_3 = 2.02226624871116645580e-21, /* 0x3BA3198A, 0x2E000000 */
+pio2_3t = 8.47842766036889956997e-32; /* 0x397B839A, 0x252049C1 */
+
+#ifdef __STDC__
+ int __ieee754_rem_pio2(double x, double *y)
+#else
+ int __ieee754_rem_pio2(x,y)
+ double x,y[];
+#endif
+{
+ fd_twoints u, ux, uz;
+ double z = 0;
+ double w,t,r,fn;
+ double tx[3];
+ int e0,i,j,nx,n,ix,hx;
+
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ ix = hx&0x7fffffff;
+ if(ix<=0x3fe921fb) /* |x| ~<= pi/4 , no need for reduction */
+ {y[0] = x; y[1] = 0; return 0;}
+ if(ix<0x4002d97c) { /* |x| < 3pi/4, special case with n=+-1 */
+ if(hx>0) {
+ z = x - pio2_1;
+ if(ix!=0x3ff921fb) { /* 33+53 bit pi is good enough */
+ y[0] = z - pio2_1t;
+ y[1] = (z-y[0])-pio2_1t;
+ } else { /* near pi/2, use 33+33+53 bit pi */
+ z -= pio2_2;
+ y[0] = z - pio2_2t;
+ y[1] = (z-y[0])-pio2_2t;
+ }
+ return 1;
+ } else { /* negative x */
+ z = x + pio2_1;
+ if(ix!=0x3ff921fb) { /* 33+53 bit pi is good enough */
+ y[0] = z + pio2_1t;
+ y[1] = (z-y[0])+pio2_1t;
+ } else { /* near pi/2, use 33+33+53 bit pi */
+ z += pio2_2;
+ y[0] = z + pio2_2t;
+ y[1] = (z-y[0])+pio2_2t;
+ }
+ return -1;
+ }
+ }
+ if(ix<=0x413921fb) { /* |x| ~<= 2^19*(pi/2), medium size */
+ t = fd_fabs(x);
+ n = (int) (t*invpio2+half);
+ fn = (double)n;
+ r = t-fn*pio2_1;
+ w = fn*pio2_1t; /* 1st round good to 85 bit */
+ if(n<32&&ix!=npio2_hw[n-1]) {
+ y[0] = r-w; /* quick check no cancellation */
+ } else {
+ j = ix>>20;
+ y[0] = r-w;
+ u.d = y[0];
+ i = j-(((__HI(u))>>20)&0x7ff);
+ if(i>16) { /* 2nd iteration needed, good to 118 */
+ t = r;
+ w = fn*pio2_2;
+ r = t-w;
+ w = fn*pio2_2t-((t-r)-w);
+ y[0] = r-w;
+ u.d = y[0];
+ i = j-(((__HI(u))>>20)&0x7ff);
+ if(i>49) { /* 3rd iteration need, 151 bits acc */
+ t = r; /* will cover all possible cases */
+ w = fn*pio2_3;
+ r = t-w;
+ w = fn*pio2_3t-((t-r)-w);
+ y[0] = r-w;
+ }
+ }
+ }
+ y[1] = (r-y[0])-w;
+ if(hx<0) {y[0] = -y[0]; y[1] = -y[1]; return -n;}
+ else return n;
+ }
+ /*
+ * all other (large) arguments
+ */
+ if(ix>=0x7ff00000) { /* x is inf or NaN */
+ y[0]=y[1]=x-x; return 0;
+ }
+ /* set z = scalbn(|x|,ilogb(x)-23) */
+ ux.d = x; uz.d = z;
+ __LO(uz) = __LO(ux);
+ z = uz.d;
+ e0 = (ix>>20)-1046; /* e0 = ilogb(z)-23; */
+ uz.d = z;
+ __HI(uz) = ix - (e0<<20);
+ z = uz.d;
+ for(i=0;i<2;i++) {
+ tx[i] = (double)((int)(z));
+ z = (z-tx[i])*two24;
+ }
+ tx[2] = z;
+ nx = 3;
+ while(tx[nx-1]==zero) nx--; /* skip zero term */
+ n = __kernel_rem_pio2(tx,y,e0,nx,2,two_over_pi);
+ if(hx<0) {y[0] = -y[0]; y[1] = -y[1]; return -n;}
+ return n;
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_remainder.c b/src/third_party/js-1.7/fdlibm/e_remainder.c
new file mode 100644
index 00000000000..de40f0c2a5e
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_remainder.c
@@ -0,0 +1,120 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_remainder.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_remainder(x,p)
+ * Return :
+ * returns x REM p = x - [x/p]*p as if in infinite
+ * precise arithmetic, where [x/p] is the (infinite bit)
+ * integer nearest x/p (in half way case choose the even one).
+ * Method :
+ * Based on fmod() return x-[x/p]chopped*p exactlp.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double zero = 0.0;
+#else
+static double zero = 0.0;
+#endif
+
+
+#ifdef __STDC__
+ double __ieee754_remainder(double x, double p)
+#else
+ double __ieee754_remainder(x,p)
+ double x,p;
+#endif
+{
+ fd_twoints u;
+ int hx,hp;
+ unsigned sx,lx,lp;
+ double p_half;
+
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ lx = __LO(u); /* low word of x */
+ u.d = p;
+ hp = __HI(u); /* high word of p */
+ lp = __LO(u); /* low word of p */
+ sx = hx&0x80000000;
+ hp &= 0x7fffffff;
+ hx &= 0x7fffffff;
+
+ /* purge off exception values */
+ if((hp|lp)==0) return (x*p)/(x*p); /* p = 0 */
+ if((hx>=0x7ff00000)|| /* x not finite */
+ ((hp>=0x7ff00000)&& /* p is NaN */
+ (((hp-0x7ff00000)|lp)!=0)))
+ return (x*p)/(x*p);
+
+
+ if (hp<=0x7fdfffff) x = __ieee754_fmod(x,p+p); /* now x < 2p */
+ if (((hx-hp)|(lx-lp))==0) return zero*x;
+ x = fd_fabs(x);
+ p = fd_fabs(p);
+ if (hp<0x00200000) {
+ if(x+x>p) {
+ x-=p;
+ if(x+x>=p) x -= p;
+ }
+ } else {
+ p_half = 0.5*p;
+ if(x>p_half) {
+ x-=p;
+ if(x>=p_half) x -= p;
+ }
+ }
+ u.d = x;
+ __HI(u) ^= sx;
+ x = u.d;
+ return x;
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_scalb.c b/src/third_party/js-1.7/fdlibm/e_scalb.c
new file mode 100644
index 00000000000..621704ea048
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_scalb.c
@@ -0,0 +1,89 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_scalb.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * __ieee754_scalb(x, fn) is provide for
+ * passing various standard test suite. One
+ * should use scalbn() instead.
+ */
+
+#include "fdlibm.h"
+
+#ifdef _SCALB_INT
+#ifdef __STDC__
+ double __ieee754_scalb(double x, int fn)
+#else
+ double __ieee754_scalb(x,fn)
+ double x; int fn;
+#endif
+#else
+#ifdef __STDC__
+ double __ieee754_scalb(double x, double fn)
+#else
+ double __ieee754_scalb(x,fn)
+ double x, fn;
+#endif
+#endif
+{
+#ifdef _SCALB_INT
+ return fd_scalbn(x,fn);
+#else
+ if (fd_isnan(x)||fd_isnan(fn)) return x*fn;
+ if (!fd_finite(fn)) {
+ if(fn>0.0) return x*fn;
+ else return x/(-fn);
+ }
+ if (fd_rint(fn)!=fn) return (fn-fn)/(fn-fn);
+ if ( fn > 65000.0) return fd_scalbn(x, 65000);
+ if (-fn > 65000.0) return fd_scalbn(x,-65000);
+ return fd_scalbn(x,(int)fn);
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_sinh.c b/src/third_party/js-1.7/fdlibm/e_sinh.c
new file mode 100644
index 00000000000..98ab9b5a32f
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_sinh.c
@@ -0,0 +1,122 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)e_sinh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_sinh(x)
+ * Method :
+ * mathematically sinh(x) if defined to be (exp(x)-exp(-x))/2
+ * 1. Replace x by |x| (sinh(-x) = -sinh(x)).
+ * 2.
+ * E + E/(E+1)
+ * 0 <= x <= 22 : sinh(x) := --------------, E=expm1(x)
+ * 2
+ *
+ * 22 <= x <= lnovft : sinh(x) := exp(x)/2
+ * lnovft <= x <= ln2ovft: sinh(x) := exp(x/2)/2 * exp(x/2)
+ * ln2ovft < x : sinh(x) := x*shuge (overflow)
+ *
+ * Special cases:
+ * sinh(x) is |x| if x is +INF, -INF, or NaN.
+ * only sinh(0)=0 is exact for finite x.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double one = 1.0, shuge = 1.0e307;
+#else
+static double one = 1.0, shuge = 1.0e307;
+#endif
+
+#ifdef __STDC__
+ double __ieee754_sinh(double x)
+#else
+ double __ieee754_sinh(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double t,w,h;
+ int ix,jx;
+ unsigned lx;
+
+ /* High word of |x|. */
+ u.d = x;
+ jx = __HI(u);
+ ix = jx&0x7fffffff;
+
+ /* x is INF or NaN */
+ if(ix>=0x7ff00000) return x+x;
+
+ h = 0.5;
+ if (jx<0) h = -h;
+ /* |x| in [0,22], return sign(x)*0.5*(E+E/(E+1))) */
+ if (ix < 0x40360000) { /* |x|<22 */
+ if (ix<0x3e300000) /* |x|<2**-28 */
+ if(shuge+x>one) return x;/* sinh(tiny) = tiny with inexact */
+ t = fd_expm1(fd_fabs(x));
+ if(ix<0x3ff00000) return h*(2.0*t-t*t/(t+one));
+ return h*(t+t/(t+one));
+ }
+
+ /* |x| in [22, log(maxdouble)] return 0.5*exp(|x|) */
+ if (ix < 0x40862E42) return h*__ieee754_exp(fd_fabs(x));
+
+ /* |x| in [log(maxdouble), overflowthresold] */
+ lx = *( (((*(unsigned*)&one)>>29)) + (unsigned*)&x);
+ if (ix<0x408633CE || (ix==0x408633ce)&&(lx<=(unsigned)0x8fb9f87d)) {
+ w = __ieee754_exp(0.5*fd_fabs(x));
+ t = h*w;
+ return t*w;
+ }
+
+ /* |x| > overflowthresold, sinh(x) overflow */
+ return x*shuge;
+}
diff --git a/src/third_party/js-1.7/fdlibm/e_sqrt.c b/src/third_party/js-1.7/fdlibm/e_sqrt.c
new file mode 100644
index 00000000000..91802839b08
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/e_sqrt.c
@@ -0,0 +1,497 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+/* @(#)e_sqrt.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __ieee754_sqrt(x)
+ * Return correctly rounded sqrt.
+ * ------------------------------------------
+ * | Use the hardware sqrt if you have one |
+ * ------------------------------------------
+ * Method:
+ * Bit by bit method using integer arithmetic. (Slow, but portable)
+ * 1. Normalization
+ * Scale x to y in [1,4) with even powers of 2:
+ * find an integer k such that 1 <= (y=x*2^(2k)) < 4, then
+ * sqrt(y) = 2^k * sqrt(x)
+ * 2. Bit by bit computation
+ * Let q = sqrt(y) truncated to i bit after binary point (q = 1),
+ * i 0
+ * i+1 2
+ * s = 2*q , and y = 2 * ( y - q ). (1)
+ * i i i i
+ *
+ * To compute q from q , one checks whether
+ * i+1 i
+ *
+ * -(i+1) 2
+ * (q + 2 ) <= y. (2)
+ * i
+ * -(i+1)
+ * If (2) is false, then q = q ; otherwise q = q + 2 .
+ * i+1 i i+1 i
+ *
+ * With some algebric manipulation, it is not difficult to see
+ * that (2) is equivalent to
+ * -(i+1)
+ * s + 2 <= y (3)
+ * i i
+ *
+ * The advantage of (3) is that s and y can be computed by
+ * i i
+ * the following recurrence formula:
+ * if (3) is false
+ *
+ * s = s , y = y ; (4)
+ * i+1 i i+1 i
+ *
+ * otherwise,
+ * -i -(i+1)
+ * s = s + 2 , y = y - s - 2 (5)
+ * i+1 i i+1 i i
+ *
+ * One may easily use induction to prove (4) and (5).
+ * Note. Since the left hand side of (3) contain only i+2 bits,
+ * it does not necessary to do a full (53-bit) comparison
+ * in (3).
+ * 3. Final rounding
+ * After generating the 53 bits result, we compute one more bit.
+ * Together with the remainder, we can decide whether the
+ * result is exact, bigger than 1/2ulp, or less than 1/2ulp
+ * (it will never equal to 1/2ulp).
+ * The rounding mode can be detected by checking whether
+ * huge + tiny is equal to huge, and whether huge - tiny is
+ * equal to huge for some floating point number "huge" and "tiny".
+ *
+ * Special cases:
+ * sqrt(+-0) = +-0 ... exact
+ * sqrt(inf) = inf
+ * sqrt(-ve) = NaN ... with invalid signal
+ * sqrt(NaN) = NaN ... with invalid signal for signaling NaN
+ *
+ * Other methods : see the appended file at the end of the program below.
+ *---------------
+ */
+
+#include "fdlibm.h"
+
+#if defined(_MSC_VER)
+/* Microsoft Compiler */
+#pragma warning( disable : 4723 ) /* disables potential divide by 0 warning */
+#endif
+
+#ifdef __STDC__
+static const double one = 1.0, tiny=1.0e-300;
+#else
+static double one = 1.0, tiny=1.0e-300;
+#endif
+
+#ifdef __STDC__
+ double __ieee754_sqrt(double x)
+#else
+ double __ieee754_sqrt(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double z;
+ int sign = (int)0x80000000;
+ unsigned r,t1,s1,ix1,q1;
+ int ix0,s0,q,m,t,i;
+
+ u.d = x;
+ ix0 = __HI(u); /* high word of x */
+ ix1 = __LO(u); /* low word of x */
+
+ /* take care of Inf and NaN */
+ if((ix0&0x7ff00000)==0x7ff00000) {
+ return x*x+x; /* sqrt(NaN)=NaN, sqrt(+inf)=+inf
+ sqrt(-inf)=sNaN */
+ }
+ /* take care of zero */
+ if(ix0<=0) {
+ if(((ix0&(~sign))|ix1)==0) return x;/* sqrt(+-0) = +-0 */
+ else if(ix0<0)
+ return (x-x)/(x-x); /* sqrt(-ve) = sNaN */
+ }
+ /* normalize x */
+ m = (ix0>>20);
+ if(m==0) { /* subnormal x */
+ while(ix0==0) {
+ m -= 21;
+ ix0 |= (ix1>>11); ix1 <<= 21;
+ }
+ for(i=0;(ix0&0x00100000)==0;i++) ix0<<=1;
+ m -= i-1;
+ ix0 |= (ix1>>(32-i));
+ ix1 <<= i;
+ }
+ m -= 1023; /* unbias exponent */
+ ix0 = (ix0&0x000fffff)|0x00100000;
+ if(m&1){ /* odd m, double x to make it even */
+ ix0 += ix0 + ((ix1&sign)>>31);
+ ix1 += ix1;
+ }
+ m >>= 1; /* m = [m/2] */
+
+ /* generate sqrt(x) bit by bit */
+ ix0 += ix0 + ((ix1&sign)>>31);
+ ix1 += ix1;
+ q = q1 = s0 = s1 = 0; /* [q,q1] = sqrt(x) */
+ r = 0x00200000; /* r = moving bit from right to left */
+
+ while(r!=0) {
+ t = s0+r;
+ if(t<=ix0) {
+ s0 = t+r;
+ ix0 -= t;
+ q += r;
+ }
+ ix0 += ix0 + ((ix1&sign)>>31);
+ ix1 += ix1;
+ r>>=1;
+ }
+
+ r = sign;
+ while(r!=0) {
+ t1 = s1+r;
+ t = s0;
+ if((t<ix0)||((t==ix0)&&(t1<=ix1))) {
+ s1 = t1+r;
+ if(((int)(t1&sign)==sign)&&(s1&sign)==0) s0 += 1;
+ ix0 -= t;
+ if (ix1 < t1) ix0 -= 1;
+ ix1 -= t1;
+ q1 += r;
+ }
+ ix0 += ix0 + ((ix1&sign)>>31);
+ ix1 += ix1;
+ r>>=1;
+ }
+
+ /* use floating add to find out rounding direction */
+ if((ix0|ix1)!=0) {
+ z = one-tiny; /* trigger inexact flag */
+ if (z>=one) {
+ z = one+tiny;
+ if (q1==(unsigned)0xffffffff) { q1=0; q += 1;}
+ else if (z>one) {
+ if (q1==(unsigned)0xfffffffe) q+=1;
+ q1+=2;
+ } else
+ q1 += (q1&1);
+ }
+ }
+ ix0 = (q>>1)+0x3fe00000;
+ ix1 = q1>>1;
+ if ((q&1)==1) ix1 |= sign;
+ ix0 += (m <<20);
+ u.d = z;
+ __HI(u) = ix0;
+ __LO(u) = ix1;
+ z = u.d;
+ return z;
+}
+
+/*
+Other methods (use floating-point arithmetic)
+-------------
+(This is a copy of a drafted paper by Prof W. Kahan
+and K.C. Ng, written in May, 1986)
+
+ Two algorithms are given here to implement sqrt(x)
+ (IEEE double precision arithmetic) in software.
+ Both supply sqrt(x) correctly rounded. The first algorithm (in
+ Section A) uses newton iterations and involves four divisions.
+ The second one uses reciproot iterations to avoid division, but
+ requires more multiplications. Both algorithms need the ability
+ to chop results of arithmetic operations instead of round them,
+ and the INEXACT flag to indicate when an arithmetic operation
+ is executed exactly with no roundoff error, all part of the
+ standard (IEEE 754-1985). The ability to perform shift, add,
+ subtract and logical AND operations upon 32-bit words is needed
+ too, though not part of the standard.
+
+A. sqrt(x) by Newton Iteration
+
+ (1) Initial approximation
+
+ Let x0 and x1 be the leading and the trailing 32-bit words of
+ a floating point number x (in IEEE double format) respectively
+
+ 1 11 52 ...widths
+ ------------------------------------------------------
+ x: |s| e | f |
+ ------------------------------------------------------
+ msb lsb msb lsb ...order
+
+
+ ------------------------ ------------------------
+ x0: |s| e | f1 | x1: | f2 |
+ ------------------------ ------------------------
+
+ By performing shifts and subtracts on x0 and x1 (both regarded
+ as integers), we obtain an 8-bit approximation of sqrt(x) as
+ follows.
+
+ k := (x0>>1) + 0x1ff80000;
+ y0 := k - T1[31&(k>>15)]. ... y ~ sqrt(x) to 8 bits
+ Here k is a 32-bit integer and T1[] is an integer array containing
+ correction terms. Now magically the floating value of y (y's
+ leading 32-bit word is y0, the value of its trailing word is 0)
+ approximates sqrt(x) to almost 8-bit.
+
+ Value of T1:
+ static int T1[32]= {
+ 0, 1024, 3062, 5746, 9193, 13348, 18162, 23592,
+ 29598, 36145, 43202, 50740, 58733, 67158, 75992, 85215,
+ 83599, 71378, 60428, 50647, 41945, 34246, 27478, 21581,
+ 16499, 12183, 8588, 5674, 3403, 1742, 661, 130,};
+
+ (2) Iterative refinement
+
+ Apply Heron's rule three times to y, we have y approximates
+ sqrt(x) to within 1 ulp (Unit in the Last Place):
+
+ y := (y+x/y)/2 ... almost 17 sig. bits
+ y := (y+x/y)/2 ... almost 35 sig. bits
+ y := y-(y-x/y)/2 ... within 1 ulp
+
+
+ Remark 1.
+ Another way to improve y to within 1 ulp is:
+
+ y := (y+x/y) ... almost 17 sig. bits to 2*sqrt(x)
+ y := y - 0x00100006 ... almost 18 sig. bits to sqrt(x)
+
+ 2
+ (x-y )*y
+ y := y + 2* ---------- ...within 1 ulp
+ 2
+ 3y + x
+
+
+ This formula has one division fewer than the one above; however,
+ it requires more multiplications and additions. Also x must be
+ scaled in advance to avoid spurious overflow in evaluating the
+ expression 3y*y+x. Hence it is not recommended uless division
+ is slow. If division is very slow, then one should use the
+ reciproot algorithm given in section B.
+
+ (3) Final adjustment
+
+ By twiddling y's last bit it is possible to force y to be
+ correctly rounded according to the prevailing rounding mode
+ as follows. Let r and i be copies of the rounding mode and
+ inexact flag before entering the square root program. Also we
+ use the expression y+-ulp for the next representable floating
+ numbers (up and down) of y. Note that y+-ulp = either fixed
+ point y+-1, or multiply y by nextafter(1,+-inf) in chopped
+ mode.
+
+ I := FALSE; ... reset INEXACT flag I
+ R := RZ; ... set rounding mode to round-toward-zero
+ z := x/y; ... chopped quotient, possibly inexact
+ If(not I) then { ... if the quotient is exact
+ if(z=y) {
+ I := i; ... restore inexact flag
+ R := r; ... restore rounded mode
+ return sqrt(x):=y.
+ } else {
+ z := z - ulp; ... special rounding
+ }
+ }
+ i := TRUE; ... sqrt(x) is inexact
+ If (r=RN) then z=z+ulp ... rounded-to-nearest
+ If (r=RP) then { ... round-toward-+inf
+ y = y+ulp; z=z+ulp;
+ }
+ y := y+z; ... chopped sum
+ y0:=y0-0x00100000; ... y := y/2 is correctly rounded.
+ I := i; ... restore inexact flag
+ R := r; ... restore rounded mode
+ return sqrt(x):=y.
+
+ (4) Special cases
+
+ Square root of +inf, +-0, or NaN is itself;
+ Square root of a negative number is NaN with invalid signal.
+
+
+B. sqrt(x) by Reciproot Iteration
+
+ (1) Initial approximation
+
+ Let x0 and x1 be the leading and the trailing 32-bit words of
+ a floating point number x (in IEEE double format) respectively
+ (see section A). By performing shifs and subtracts on x0 and y0,
+ we obtain a 7.8-bit approximation of 1/sqrt(x) as follows.
+
+ k := 0x5fe80000 - (x0>>1);
+ y0:= k - T2[63&(k>>14)]. ... y ~ 1/sqrt(x) to 7.8 bits
+
+ Here k is a 32-bit integer and T2[] is an integer array
+ containing correction terms. Now magically the floating
+ value of y (y's leading 32-bit word is y0, the value of
+ its trailing word y1 is set to zero) approximates 1/sqrt(x)
+ to almost 7.8-bit.
+
+ Value of T2:
+ static int T2[64]= {
+ 0x1500, 0x2ef8, 0x4d67, 0x6b02, 0x87be, 0xa395, 0xbe7a, 0xd866,
+ 0xf14a, 0x1091b,0x11fcd,0x13552,0x14999,0x15c98,0x16e34,0x17e5f,
+ 0x18d03,0x19a01,0x1a545,0x1ae8a,0x1b5c4,0x1bb01,0x1bfde,0x1c28d,
+ 0x1c2de,0x1c0db,0x1ba73,0x1b11c,0x1a4b5,0x1953d,0x18266,0x16be0,
+ 0x1683e,0x179d8,0x18a4d,0x19992,0x1a789,0x1b445,0x1bf61,0x1c989,
+ 0x1d16d,0x1d77b,0x1dddf,0x1e2ad,0x1e5bf,0x1e6e8,0x1e654,0x1e3cd,
+ 0x1df2a,0x1d635,0x1cb16,0x1be2c,0x1ae4e,0x19bde,0x1868e,0x16e2e,
+ 0x1527f,0x1334a,0x11051,0xe951, 0xbe01, 0x8e0d, 0x5924, 0x1edd,};
+
+ (2) Iterative refinement
+
+ Apply Reciproot iteration three times to y and multiply the
+ result by x to get an approximation z that matches sqrt(x)
+ to about 1 ulp. To be exact, we will have
+ -1ulp < sqrt(x)-z<1.0625ulp.
+
+ ... set rounding mode to Round-to-nearest
+ y := y*(1.5-0.5*x*y*y) ... almost 15 sig. bits to 1/sqrt(x)
+ y := y*((1.5-2^-30)+0.5*x*y*y)... about 29 sig. bits to 1/sqrt(x)
+ ... special arrangement for better accuracy
+ z := x*y ... 29 bits to sqrt(x), with z*y<1
+ z := z + 0.5*z*(1-z*y) ... about 1 ulp to sqrt(x)
+
+ Remark 2. The constant 1.5-2^-30 is chosen to bias the error so that
+ (a) the term z*y in the final iteration is always less than 1;
+ (b) the error in the final result is biased upward so that
+ -1 ulp < sqrt(x) - z < 1.0625 ulp
+ instead of |sqrt(x)-z|<1.03125ulp.
+
+ (3) Final adjustment
+
+ By twiddling y's last bit it is possible to force y to be
+ correctly rounded according to the prevailing rounding mode
+ as follows. Let r and i be copies of the rounding mode and
+ inexact flag before entering the square root program. Also we
+ use the expression y+-ulp for the next representable floating
+ numbers (up and down) of y. Note that y+-ulp = either fixed
+ point y+-1, or multiply y by nextafter(1,+-inf) in chopped
+ mode.
+
+ R := RZ; ... set rounding mode to round-toward-zero
+ switch(r) {
+ case RN: ... round-to-nearest
+ if(x<= z*(z-ulp)...chopped) z = z - ulp; else
+ if(x<= z*(z+ulp)...chopped) z = z; else z = z+ulp;
+ break;
+ case RZ:case RM: ... round-to-zero or round-to--inf
+ R:=RP; ... reset rounding mod to round-to-+inf
+ if(x<z*z ... rounded up) z = z - ulp; else
+ if(x>=(z+ulp)*(z+ulp) ...rounded up) z = z+ulp;
+ break;
+ case RP: ... round-to-+inf
+ if(x>(z+ulp)*(z+ulp)...chopped) z = z+2*ulp; else
+ if(x>z*z ...chopped) z = z+ulp;
+ break;
+ }
+
+ Remark 3. The above comparisons can be done in fixed point. For
+ example, to compare x and w=z*z chopped, it suffices to compare
+ x1 and w1 (the trailing parts of x and w), regarding them as
+ two's complement integers.
+
+ ...Is z an exact square root?
+ To determine whether z is an exact square root of x, let z1 be the
+ trailing part of z, and also let x0 and x1 be the leading and
+ trailing parts of x.
+
+ If ((z1&0x03ffffff)!=0) ... not exact if trailing 26 bits of z!=0
+ I := 1; ... Raise Inexact flag: z is not exact
+ else {
+ j := 1 - [(x0>>20)&1] ... j = logb(x) mod 2
+ k := z1 >> 26; ... get z's 25-th and 26-th
+ fraction bits
+ I := i or (k&j) or ((k&(j+j+1))!=(x1&3));
+ }
+ R:= r ... restore rounded mode
+ return sqrt(x):=z.
+
+ If multiplication is cheaper then the foregoing red tape, the
+ Inexact flag can be evaluated by
+
+ I := i;
+ I := (z*z!=x) or I.
+
+ Note that z*z can overwrite I; this value must be sensed if it is
+ True.
+
+ Remark 4. If z*z = x exactly, then bit 25 to bit 0 of z1 must be
+ zero.
+
+ --------------------
+ z1: | f2 |
+ --------------------
+ bit 31 bit 0
+
+ Further more, bit 27 and 26 of z1, bit 0 and 1 of x1, and the odd
+ or even of logb(x) have the following relations:
+
+ -------------------------------------------------
+ bit 27,26 of z1 bit 1,0 of x1 logb(x)
+ -------------------------------------------------
+ 00 00 odd and even
+ 01 01 even
+ 10 10 odd
+ 10 00 even
+ 11 01 even
+ -------------------------------------------------
+
+ (4) Special cases (see (4) of Section A).
+
+ */
+
diff --git a/src/third_party/js-1.7/fdlibm/fdlibm.h b/src/third_party/js-1.7/fdlibm/fdlibm.h
new file mode 100644
index 00000000000..e623be56e45
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/fdlibm.h
@@ -0,0 +1,273 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)fdlibm.h 1.5 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* Modified defines start here.. */
+#undef __LITTLE_ENDIAN
+
+#ifdef _WIN32
+#define huge myhuge
+#define __LITTLE_ENDIAN
+#endif
+
+#ifdef XP_OS2
+#define __LITTLE_ENDIAN
+#endif
+
+#if defined(linux) && (defined(__i386__) || defined(__x86_64__) || defined(__ia64) || (defined(__mips) && defined(__MIPSEL__)))
+#define __LITTLE_ENDIAN
+#endif
+
+/* End here. The rest is the standard file. */
+
+#ifdef SOLARIS /* special setup for Sun test regime */
+#if defined(i386) || defined(i486) || \
+ defined(intel) || defined(x86) || defined(i86pc)
+#define __LITTLE_ENDIAN
+#endif
+#endif
+
+typedef union {
+#ifdef __LITTLE_ENDIAN
+ struct { int lo, hi; } ints;
+#else
+ struct { int hi, lo; } ints;
+#endif
+ double d;
+} fd_twoints;
+
+#define __HI(x) x.ints.hi
+#define __LO(x) x.ints.lo
+
+#undef __P
+#ifdef __STDC__
+#define __P(p) p
+#else
+#define __P(p) ()
+#endif
+
+/*
+ * ANSI/POSIX
+ */
+
+extern int signgam;
+
+#define MAXFLOAT ((float)3.40282346638528860e+38)
+
+enum fdversion {fdlibm_ieee = -1, fdlibm_svid, fdlibm_xopen, fdlibm_posix};
+
+#define _LIB_VERSION_TYPE enum fdversion
+#define _LIB_VERSION _fdlib_version
+
+/* if global variable _LIB_VERSION is not desirable, one may
+ * change the following to be a constant by:
+ * #define _LIB_VERSION_TYPE const enum version
+ * In that case, after one initializes the value _LIB_VERSION (see
+ * s_lib_version.c) during compile time, it cannot be modified
+ * in the middle of a program
+ */
+extern _LIB_VERSION_TYPE _LIB_VERSION;
+
+#define _IEEE_ fdlibm_ieee
+#define _SVID_ fdlibm_svid
+#define _XOPEN_ fdlibm_xopen
+#define _POSIX_ fdlibm_posix
+
+struct exception {
+ int type;
+ char *name;
+ double arg1;
+ double arg2;
+ double retval;
+};
+
+#define HUGE MAXFLOAT
+
+/*
+ * set X_TLOSS = pi*2**52, which is possibly defined in <values.h>
+ * (one may replace the following line by "#include <values.h>")
+ */
+
+#define X_TLOSS 1.41484755040568800000e+16
+
+#define DOMAIN 1
+#define SING 2
+#define OVERFLOW 3
+#define UNDERFLOW 4
+#define TLOSS 5
+#define PLOSS 6
+
+/*
+ * ANSI/POSIX
+ */
+
+extern double fd_acos __P((double));
+extern double fd_asin __P((double));
+extern double fd_atan __P((double));
+extern double fd_atan2 __P((double, double));
+extern double fd_cos __P((double));
+extern double fd_sin __P((double));
+extern double fd_tan __P((double));
+
+extern double fd_cosh __P((double));
+extern double fd_sinh __P((double));
+extern double fd_tanh __P((double));
+
+extern double fd_exp __P((double));
+extern double fd_frexp __P((double, int *));
+extern double fd_ldexp __P((double, int));
+extern double fd_log __P((double));
+extern double fd_log10 __P((double));
+extern double fd_modf __P((double, double *));
+
+extern double fd_pow __P((double, double));
+extern double fd_sqrt __P((double));
+
+extern double fd_ceil __P((double));
+extern double fd_fabs __P((double));
+extern double fd_floor __P((double));
+extern double fd_fmod __P((double, double));
+
+extern double fd_erf __P((double));
+extern double fd_erfc __P((double));
+extern double fd_gamma __P((double));
+extern double fd_hypot __P((double, double));
+extern int fd_isnan __P((double));
+extern int fd_finite __P((double));
+extern double fd_j0 __P((double));
+extern double fd_j1 __P((double));
+extern double fd_jn __P((int, double));
+extern double fd_lgamma __P((double));
+extern double fd_y0 __P((double));
+extern double fd_y1 __P((double));
+extern double fd_yn __P((int, double));
+
+extern double fd_acosh __P((double));
+extern double fd_asinh __P((double));
+extern double fd_atanh __P((double));
+extern double fd_cbrt __P((double));
+extern double fd_logb __P((double));
+extern double fd_nextafter __P((double, double));
+extern double fd_remainder __P((double, double));
+#ifdef _SCALB_INT
+extern double fd_scalb __P((double, int));
+#else
+extern double fd_scalb __P((double, double));
+#endif
+
+extern int fd_matherr __P((struct exception *));
+
+/*
+ * IEEE Test Vector
+ */
+extern double significand __P((double));
+
+/*
+ * Functions callable from C, intended to support IEEE arithmetic.
+ */
+extern double fd_copysign __P((double, double));
+extern int fd_ilogb __P((double));
+extern double fd_rint __P((double));
+extern double fd_scalbn __P((double, int));
+
+/*
+ * BSD math library entry points
+ */
+extern double fd_expm1 __P((double));
+extern double fd_log1p __P((double));
+
+/*
+ * Reentrant version of gamma & lgamma; passes signgam back by reference
+ * as the second argument; user must allocate space for signgam.
+ */
+#ifdef _REENTRANT
+extern double gamma_r __P((double, int *));
+extern double lgamma_r __P((double, int *));
+#endif /* _REENTRANT */
+
+/* ieee style elementary functions */
+extern double __ieee754_sqrt __P((double));
+extern double __ieee754_acos __P((double));
+extern double __ieee754_acosh __P((double));
+extern double __ieee754_log __P((double));
+extern double __ieee754_atanh __P((double));
+extern double __ieee754_asin __P((double));
+extern double __ieee754_atan2 __P((double,double));
+extern double __ieee754_exp __P((double));
+extern double __ieee754_cosh __P((double));
+extern double __ieee754_fmod __P((double,double));
+extern double __ieee754_pow __P((double,double));
+extern double __ieee754_lgamma_r __P((double,int *));
+extern double __ieee754_gamma_r __P((double,int *));
+extern double __ieee754_lgamma __P((double));
+extern double __ieee754_gamma __P((double));
+extern double __ieee754_log10 __P((double));
+extern double __ieee754_sinh __P((double));
+extern double __ieee754_hypot __P((double,double));
+extern double __ieee754_j0 __P((double));
+extern double __ieee754_j1 __P((double));
+extern double __ieee754_y0 __P((double));
+extern double __ieee754_y1 __P((double));
+extern double __ieee754_jn __P((int,double));
+extern double __ieee754_yn __P((int,double));
+extern double __ieee754_remainder __P((double,double));
+extern int __ieee754_rem_pio2 __P((double,double*));
+#ifdef _SCALB_INT
+extern double __ieee754_scalb __P((double,int));
+#else
+extern double __ieee754_scalb __P((double,double));
+#endif
+
+/* fdlibm kernel function */
+extern double __kernel_standard __P((double,double,int,int*));
+extern double __kernel_sin __P((double,double,int));
+extern double __kernel_cos __P((double,double));
+extern double __kernel_tan __P((double,double,int));
+extern int __kernel_rem_pio2 __P((double*,double*,int,int,int,const int*));
diff --git a/src/third_party/js-1.7/fdlibm/fdlibm.mak b/src/third_party/js-1.7/fdlibm/fdlibm.mak
new file mode 100644
index 00000000000..436c1c45058
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/fdlibm.mak
@@ -0,0 +1,1453 @@
+# Microsoft Developer Studio Generated NMAKE File, Format Version 4.20
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Static Library" 0x0104
+
+!IF "$(CFG)" == ""
+CFG=fdlibm - Win32 Debug
+!MESSAGE No configuration specified. Defaulting to fdlibm - Win32 Debug.
+!ENDIF
+
+!IF "$(CFG)" != "fdlibm - Win32 Release" && "$(CFG)" != "fdlibm - Win32 Debug"
+!MESSAGE Invalid configuration "$(CFG)" specified.
+!MESSAGE You can specify a configuration when running NMAKE on this makefile
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "fdlibm.mak" CFG="fdlibm - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "fdlibm - Win32 Release" (based on "Win32 (x86) Static Library")
+!MESSAGE "fdlibm - Win32 Debug" (based on "Win32 (x86) Static Library")
+!MESSAGE
+!ERROR An invalid configuration is specified.
+!ENDIF
+
+!IF "$(OS)" == "Windows_NT"
+NULL=
+!ELSE
+NULL=nul
+!ENDIF
+################################################################################
+# Begin Project
+CPP=cl.exe
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "fdlibm__"
+# PROP BASE Intermediate_Dir "fdlibm__"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "fdlibm__"
+# PROP Intermediate_Dir "fdlibm__"
+# PROP Target_Dir ""
+OUTDIR=.\fdlibm__
+INTDIR=.\fdlibm__
+
+ALL : "$(OUTDIR)\fdlibm.lib"
+
+CLEAN :
+ -@erase "$(INTDIR)\e_acos.obj"
+ -@erase "$(INTDIR)\e_acosh.obj"
+ -@erase "$(INTDIR)\e_asin.obj"
+ -@erase "$(INTDIR)\e_atan2.obj"
+ -@erase "$(INTDIR)\e_atanh.obj"
+ -@erase "$(INTDIR)\e_cosh.obj"
+ -@erase "$(INTDIR)\e_exp.obj"
+ -@erase "$(INTDIR)\e_fmod.obj"
+ -@erase "$(INTDIR)\e_gamma.obj"
+ -@erase "$(INTDIR)\e_gamma_r.obj"
+ -@erase "$(INTDIR)\e_hypot.obj"
+ -@erase "$(INTDIR)\e_j0.obj"
+ -@erase "$(INTDIR)\e_j1.obj"
+ -@erase "$(INTDIR)\e_jn.obj"
+ -@erase "$(INTDIR)\e_lgamma.obj"
+ -@erase "$(INTDIR)\e_lgamma_r.obj"
+ -@erase "$(INTDIR)\e_log.obj"
+ -@erase "$(INTDIR)\e_log10.obj"
+ -@erase "$(INTDIR)\e_pow.obj"
+ -@erase "$(INTDIR)\e_rem_pio2.obj"
+ -@erase "$(INTDIR)\e_remainder.obj"
+ -@erase "$(INTDIR)\e_scalb.obj"
+ -@erase "$(INTDIR)\e_sinh.obj"
+ -@erase "$(INTDIR)\e_sqrt.obj"
+ -@erase "$(INTDIR)\k_cos.obj"
+ -@erase "$(INTDIR)\k_rem_pio2.obj"
+ -@erase "$(INTDIR)\k_sin.obj"
+ -@erase "$(INTDIR)\k_standard.obj"
+ -@erase "$(INTDIR)\k_tan.obj"
+ -@erase "$(INTDIR)\s_asinh.obj"
+ -@erase "$(INTDIR)\s_atan.obj"
+ -@erase "$(INTDIR)\s_cbrt.obj"
+ -@erase "$(INTDIR)\s_ceil.obj"
+ -@erase "$(INTDIR)\s_copysign.obj"
+ -@erase "$(INTDIR)\s_cos.obj"
+ -@erase "$(INTDIR)\s_erf.obj"
+ -@erase "$(INTDIR)\s_expm1.obj"
+ -@erase "$(INTDIR)\s_fabs.obj"
+ -@erase "$(INTDIR)\s_finite.obj"
+ -@erase "$(INTDIR)\s_floor.obj"
+ -@erase "$(INTDIR)\s_frexp.obj"
+ -@erase "$(INTDIR)\s_ilogb.obj"
+ -@erase "$(INTDIR)\s_isnan.obj"
+ -@erase "$(INTDIR)\s_ldexp.obj"
+ -@erase "$(INTDIR)\s_lib_version.obj"
+ -@erase "$(INTDIR)\s_log1p.obj"
+ -@erase "$(INTDIR)\s_logb.obj"
+ -@erase "$(INTDIR)\s_matherr.obj"
+ -@erase "$(INTDIR)\s_modf.obj"
+ -@erase "$(INTDIR)\s_nextafter.obj"
+ -@erase "$(INTDIR)\s_rint.obj"
+ -@erase "$(INTDIR)\s_scalbn.obj"
+ -@erase "$(INTDIR)\s_signgam.obj"
+ -@erase "$(INTDIR)\s_significand.obj"
+ -@erase "$(INTDIR)\s_sin.obj"
+ -@erase "$(INTDIR)\s_tan.obj"
+ -@erase "$(INTDIR)\s_tanh.obj"
+ -@erase "$(INTDIR)\w_acos.obj"
+ -@erase "$(INTDIR)\w_acosh.obj"
+ -@erase "$(INTDIR)\w_asin.obj"
+ -@erase "$(INTDIR)\w_atan2.obj"
+ -@erase "$(INTDIR)\w_atanh.obj"
+ -@erase "$(INTDIR)\w_cosh.obj"
+ -@erase "$(INTDIR)\w_exp.obj"
+ -@erase "$(INTDIR)\w_fmod.obj"
+ -@erase "$(INTDIR)\w_gamma.obj"
+ -@erase "$(INTDIR)\w_gamma_r.obj"
+ -@erase "$(INTDIR)\w_hypot.obj"
+ -@erase "$(INTDIR)\w_j0.obj"
+ -@erase "$(INTDIR)\w_j1.obj"
+ -@erase "$(INTDIR)\w_jn.obj"
+ -@erase "$(INTDIR)\w_lgamma.obj"
+ -@erase "$(INTDIR)\w_lgamma_r.obj"
+ -@erase "$(INTDIR)\w_log.obj"
+ -@erase "$(INTDIR)\w_log10.obj"
+ -@erase "$(INTDIR)\w_pow.obj"
+ -@erase "$(INTDIR)\w_remainder.obj"
+ -@erase "$(INTDIR)\w_scalb.obj"
+ -@erase "$(INTDIR)\w_sinh.obj"
+ -@erase "$(INTDIR)\w_sqrt.obj"
+ -@erase "$(OUTDIR)\fdlibm.lib"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /c
+# ADD CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS" /YX /c
+CPP_PROJ=/nologo /ML /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_WINDOWS"\
+ /Fp"$(INTDIR)/fdlibm.pch" /YX /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\fdlibm__/
+CPP_SBRS=.\.
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/fdlibm.bsc"
+BSC32_SBRS= \
+
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo
+# ADD LIB32 /nologo
+LIB32_FLAGS=/nologo /out:"$(OUTDIR)/fdlibm.lib"
+LIB32_OBJS= \
+ "$(INTDIR)\e_acos.obj" \
+ "$(INTDIR)\e_acosh.obj" \
+ "$(INTDIR)\e_asin.obj" \
+ "$(INTDIR)\e_atan2.obj" \
+ "$(INTDIR)\e_atanh.obj" \
+ "$(INTDIR)\e_cosh.obj" \
+ "$(INTDIR)\e_exp.obj" \
+ "$(INTDIR)\e_fmod.obj" \
+ "$(INTDIR)\e_gamma.obj" \
+ "$(INTDIR)\e_gamma_r.obj" \
+ "$(INTDIR)\e_hypot.obj" \
+ "$(INTDIR)\e_j0.obj" \
+ "$(INTDIR)\e_j1.obj" \
+ "$(INTDIR)\e_jn.obj" \
+ "$(INTDIR)\e_lgamma.obj" \
+ "$(INTDIR)\e_lgamma_r.obj" \
+ "$(INTDIR)\e_log.obj" \
+ "$(INTDIR)\e_log10.obj" \
+ "$(INTDIR)\e_pow.obj" \
+ "$(INTDIR)\e_rem_pio2.obj" \
+ "$(INTDIR)\e_remainder.obj" \
+ "$(INTDIR)\e_scalb.obj" \
+ "$(INTDIR)\e_sinh.obj" \
+ "$(INTDIR)\e_sqrt.obj" \
+ "$(INTDIR)\k_cos.obj" \
+ "$(INTDIR)\k_rem_pio2.obj" \
+ "$(INTDIR)\k_sin.obj" \
+ "$(INTDIR)\k_standard.obj" \
+ "$(INTDIR)\k_tan.obj" \
+ "$(INTDIR)\s_asinh.obj" \
+ "$(INTDIR)\s_atan.obj" \
+ "$(INTDIR)\s_cbrt.obj" \
+ "$(INTDIR)\s_ceil.obj" \
+ "$(INTDIR)\s_copysign.obj" \
+ "$(INTDIR)\s_cos.obj" \
+ "$(INTDIR)\s_erf.obj" \
+ "$(INTDIR)\s_expm1.obj" \
+ "$(INTDIR)\s_fabs.obj" \
+ "$(INTDIR)\s_finite.obj" \
+ "$(INTDIR)\s_floor.obj" \
+ "$(INTDIR)\s_frexp.obj" \
+ "$(INTDIR)\s_ilogb.obj" \
+ "$(INTDIR)\s_isnan.obj" \
+ "$(INTDIR)\s_ldexp.obj" \
+ "$(INTDIR)\s_lib_version.obj" \
+ "$(INTDIR)\s_log1p.obj" \
+ "$(INTDIR)\s_logb.obj" \
+ "$(INTDIR)\s_matherr.obj" \
+ "$(INTDIR)\s_modf.obj" \
+ "$(INTDIR)\s_nextafter.obj" \
+ "$(INTDIR)\s_rint.obj" \
+ "$(INTDIR)\s_scalbn.obj" \
+ "$(INTDIR)\s_signgam.obj" \
+ "$(INTDIR)\s_significand.obj" \
+ "$(INTDIR)\s_sin.obj" \
+ "$(INTDIR)\s_tan.obj" \
+ "$(INTDIR)\s_tanh.obj" \
+ "$(INTDIR)\w_acos.obj" \
+ "$(INTDIR)\w_acosh.obj" \
+ "$(INTDIR)\w_asin.obj" \
+ "$(INTDIR)\w_atan2.obj" \
+ "$(INTDIR)\w_atanh.obj" \
+ "$(INTDIR)\w_cosh.obj" \
+ "$(INTDIR)\w_exp.obj" \
+ "$(INTDIR)\w_fmod.obj" \
+ "$(INTDIR)\w_gamma.obj" \
+ "$(INTDIR)\w_gamma_r.obj" \
+ "$(INTDIR)\w_hypot.obj" \
+ "$(INTDIR)\w_j0.obj" \
+ "$(INTDIR)\w_j1.obj" \
+ "$(INTDIR)\w_jn.obj" \
+ "$(INTDIR)\w_lgamma.obj" \
+ "$(INTDIR)\w_lgamma_r.obj" \
+ "$(INTDIR)\w_log.obj" \
+ "$(INTDIR)\w_log10.obj" \
+ "$(INTDIR)\w_pow.obj" \
+ "$(INTDIR)\w_remainder.obj" \
+ "$(INTDIR)\w_scalb.obj" \
+ "$(INTDIR)\w_sinh.obj" \
+ "$(INTDIR)\w_sqrt.obj"
+
+"$(OUTDIR)\fdlibm.lib" : "$(OUTDIR)" $(DEF_FILE) $(LIB32_OBJS)
+ $(LIB32) @<<
+ $(LIB32_FLAGS) $(DEF_FLAGS) $(LIB32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "fdlibm_0"
+# PROP BASE Intermediate_Dir "fdlibm_0"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "fdlibm_0"
+# PROP Intermediate_Dir "fdlibm_0"
+# PROP Target_Dir ""
+OUTDIR=.\fdlibm_0
+INTDIR=.\fdlibm_0
+
+ALL : "$(OUTDIR)\fdlibm.lib"
+
+CLEAN :
+ -@erase "$(INTDIR)\e_acos.obj"
+ -@erase "$(INTDIR)\e_acosh.obj"
+ -@erase "$(INTDIR)\e_asin.obj"
+ -@erase "$(INTDIR)\e_atan2.obj"
+ -@erase "$(INTDIR)\e_atanh.obj"
+ -@erase "$(INTDIR)\e_cosh.obj"
+ -@erase "$(INTDIR)\e_exp.obj"
+ -@erase "$(INTDIR)\e_fmod.obj"
+ -@erase "$(INTDIR)\e_gamma.obj"
+ -@erase "$(INTDIR)\e_gamma_r.obj"
+ -@erase "$(INTDIR)\e_hypot.obj"
+ -@erase "$(INTDIR)\e_j0.obj"
+ -@erase "$(INTDIR)\e_j1.obj"
+ -@erase "$(INTDIR)\e_jn.obj"
+ -@erase "$(INTDIR)\e_lgamma.obj"
+ -@erase "$(INTDIR)\e_lgamma_r.obj"
+ -@erase "$(INTDIR)\e_log.obj"
+ -@erase "$(INTDIR)\e_log10.obj"
+ -@erase "$(INTDIR)\e_pow.obj"
+ -@erase "$(INTDIR)\e_rem_pio2.obj"
+ -@erase "$(INTDIR)\e_remainder.obj"
+ -@erase "$(INTDIR)\e_scalb.obj"
+ -@erase "$(INTDIR)\e_sinh.obj"
+ -@erase "$(INTDIR)\e_sqrt.obj"
+ -@erase "$(INTDIR)\k_cos.obj"
+ -@erase "$(INTDIR)\k_rem_pio2.obj"
+ -@erase "$(INTDIR)\k_sin.obj"
+ -@erase "$(INTDIR)\k_standard.obj"
+ -@erase "$(INTDIR)\k_tan.obj"
+ -@erase "$(INTDIR)\s_asinh.obj"
+ -@erase "$(INTDIR)\s_atan.obj"
+ -@erase "$(INTDIR)\s_cbrt.obj"
+ -@erase "$(INTDIR)\s_ceil.obj"
+ -@erase "$(INTDIR)\s_copysign.obj"
+ -@erase "$(INTDIR)\s_cos.obj"
+ -@erase "$(INTDIR)\s_erf.obj"
+ -@erase "$(INTDIR)\s_expm1.obj"
+ -@erase "$(INTDIR)\s_fabs.obj"
+ -@erase "$(INTDIR)\s_finite.obj"
+ -@erase "$(INTDIR)\s_floor.obj"
+ -@erase "$(INTDIR)\s_frexp.obj"
+ -@erase "$(INTDIR)\s_ilogb.obj"
+ -@erase "$(INTDIR)\s_isnan.obj"
+ -@erase "$(INTDIR)\s_ldexp.obj"
+ -@erase "$(INTDIR)\s_lib_version.obj"
+ -@erase "$(INTDIR)\s_log1p.obj"
+ -@erase "$(INTDIR)\s_logb.obj"
+ -@erase "$(INTDIR)\s_matherr.obj"
+ -@erase "$(INTDIR)\s_modf.obj"
+ -@erase "$(INTDIR)\s_nextafter.obj"
+ -@erase "$(INTDIR)\s_rint.obj"
+ -@erase "$(INTDIR)\s_scalbn.obj"
+ -@erase "$(INTDIR)\s_signgam.obj"
+ -@erase "$(INTDIR)\s_significand.obj"
+ -@erase "$(INTDIR)\s_sin.obj"
+ -@erase "$(INTDIR)\s_tan.obj"
+ -@erase "$(INTDIR)\s_tanh.obj"
+ -@erase "$(INTDIR)\w_acos.obj"
+ -@erase "$(INTDIR)\w_acosh.obj"
+ -@erase "$(INTDIR)\w_asin.obj"
+ -@erase "$(INTDIR)\w_atan2.obj"
+ -@erase "$(INTDIR)\w_atanh.obj"
+ -@erase "$(INTDIR)\w_cosh.obj"
+ -@erase "$(INTDIR)\w_exp.obj"
+ -@erase "$(INTDIR)\w_fmod.obj"
+ -@erase "$(INTDIR)\w_gamma.obj"
+ -@erase "$(INTDIR)\w_gamma_r.obj"
+ -@erase "$(INTDIR)\w_hypot.obj"
+ -@erase "$(INTDIR)\w_j0.obj"
+ -@erase "$(INTDIR)\w_j1.obj"
+ -@erase "$(INTDIR)\w_jn.obj"
+ -@erase "$(INTDIR)\w_lgamma.obj"
+ -@erase "$(INTDIR)\w_lgamma_r.obj"
+ -@erase "$(INTDIR)\w_log.obj"
+ -@erase "$(INTDIR)\w_log10.obj"
+ -@erase "$(INTDIR)\w_pow.obj"
+ -@erase "$(INTDIR)\w_remainder.obj"
+ -@erase "$(INTDIR)\w_scalb.obj"
+ -@erase "$(INTDIR)\w_sinh.obj"
+ -@erase "$(INTDIR)\w_sqrt.obj"
+ -@erase "$(OUTDIR)\fdlibm.lib"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+# ADD BASE CPP /nologo /W3 /GX /Z7 /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /c
+# ADD CPP /nologo /W3 /GX /Z7 /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS" /YX /c
+CPP_PROJ=/nologo /MLd /W3 /GX /Z7 /Od /D "WIN32" /D "_DEBUG" /D "_WINDOWS"\
+ /Fp"$(INTDIR)/fdlibm.pch" /YX /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\fdlibm_0/
+CPP_SBRS=.\.
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/fdlibm.bsc"
+BSC32_SBRS= \
+
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo
+# ADD LIB32 /nologo
+LIB32_FLAGS=/nologo /out:"$(OUTDIR)/fdlibm.lib"
+LIB32_OBJS= \
+ "$(INTDIR)\e_acos.obj" \
+ "$(INTDIR)\e_acosh.obj" \
+ "$(INTDIR)\e_asin.obj" \
+ "$(INTDIR)\e_atan2.obj" \
+ "$(INTDIR)\e_atanh.obj" \
+ "$(INTDIR)\e_cosh.obj" \
+ "$(INTDIR)\e_exp.obj" \
+ "$(INTDIR)\e_fmod.obj" \
+ "$(INTDIR)\e_gamma.obj" \
+ "$(INTDIR)\e_gamma_r.obj" \
+ "$(INTDIR)\e_hypot.obj" \
+ "$(INTDIR)\e_j0.obj" \
+ "$(INTDIR)\e_j1.obj" \
+ "$(INTDIR)\e_jn.obj" \
+ "$(INTDIR)\e_lgamma.obj" \
+ "$(INTDIR)\e_lgamma_r.obj" \
+ "$(INTDIR)\e_log.obj" \
+ "$(INTDIR)\e_log10.obj" \
+ "$(INTDIR)\e_pow.obj" \
+ "$(INTDIR)\e_rem_pio2.obj" \
+ "$(INTDIR)\e_remainder.obj" \
+ "$(INTDIR)\e_scalb.obj" \
+ "$(INTDIR)\e_sinh.obj" \
+ "$(INTDIR)\e_sqrt.obj" \
+ "$(INTDIR)\k_cos.obj" \
+ "$(INTDIR)\k_rem_pio2.obj" \
+ "$(INTDIR)\k_sin.obj" \
+ "$(INTDIR)\k_standard.obj" \
+ "$(INTDIR)\k_tan.obj" \
+ "$(INTDIR)\s_asinh.obj" \
+ "$(INTDIR)\s_atan.obj" \
+ "$(INTDIR)\s_cbrt.obj" \
+ "$(INTDIR)\s_ceil.obj" \
+ "$(INTDIR)\s_copysign.obj" \
+ "$(INTDIR)\s_cos.obj" \
+ "$(INTDIR)\s_erf.obj" \
+ "$(INTDIR)\s_expm1.obj" \
+ "$(INTDIR)\s_fabs.obj" \
+ "$(INTDIR)\s_finite.obj" \
+ "$(INTDIR)\s_floor.obj" \
+ "$(INTDIR)\s_frexp.obj" \
+ "$(INTDIR)\s_ilogb.obj" \
+ "$(INTDIR)\s_isnan.obj" \
+ "$(INTDIR)\s_ldexp.obj" \
+ "$(INTDIR)\s_lib_version.obj" \
+ "$(INTDIR)\s_log1p.obj" \
+ "$(INTDIR)\s_logb.obj" \
+ "$(INTDIR)\s_matherr.obj" \
+ "$(INTDIR)\s_modf.obj" \
+ "$(INTDIR)\s_nextafter.obj" \
+ "$(INTDIR)\s_rint.obj" \
+ "$(INTDIR)\s_scalbn.obj" \
+ "$(INTDIR)\s_signgam.obj" \
+ "$(INTDIR)\s_significand.obj" \
+ "$(INTDIR)\s_sin.obj" \
+ "$(INTDIR)\s_tan.obj" \
+ "$(INTDIR)\s_tanh.obj" \
+ "$(INTDIR)\w_acos.obj" \
+ "$(INTDIR)\w_acosh.obj" \
+ "$(INTDIR)\w_asin.obj" \
+ "$(INTDIR)\w_atan2.obj" \
+ "$(INTDIR)\w_atanh.obj" \
+ "$(INTDIR)\w_cosh.obj" \
+ "$(INTDIR)\w_exp.obj" \
+ "$(INTDIR)\w_fmod.obj" \
+ "$(INTDIR)\w_gamma.obj" \
+ "$(INTDIR)\w_gamma_r.obj" \
+ "$(INTDIR)\w_hypot.obj" \
+ "$(INTDIR)\w_j0.obj" \
+ "$(INTDIR)\w_j1.obj" \
+ "$(INTDIR)\w_jn.obj" \
+ "$(INTDIR)\w_lgamma.obj" \
+ "$(INTDIR)\w_lgamma_r.obj" \
+ "$(INTDIR)\w_log.obj" \
+ "$(INTDIR)\w_log10.obj" \
+ "$(INTDIR)\w_pow.obj" \
+ "$(INTDIR)\w_remainder.obj" \
+ "$(INTDIR)\w_scalb.obj" \
+ "$(INTDIR)\w_sinh.obj" \
+ "$(INTDIR)\w_sqrt.obj"
+
+"$(OUTDIR)\fdlibm.lib" : "$(OUTDIR)" $(DEF_FILE) $(LIB32_OBJS)
+ $(LIB32) @<<
+ $(LIB32_FLAGS) $(DEF_FLAGS) $(LIB32_OBJS)
+<<
+
+!ENDIF
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+################################################################################
+# Begin Target
+
+# Name "fdlibm - Win32 Release"
+# Name "fdlibm - Win32 Debug"
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+!ENDIF
+
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_sqrt.c
+DEP_CPP_W_SQR=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_sqrt.obj" : $(SOURCE) $(DEP_CPP_W_SQR) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_acosh.c
+DEP_CPP_E_ACO=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_acosh.obj" : $(SOURCE) $(DEP_CPP_E_ACO) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_asin.c
+DEP_CPP_E_ASI=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_asin.obj" : $(SOURCE) $(DEP_CPP_E_ASI) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_atan2.c
+DEP_CPP_E_ATA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_atan2.obj" : $(SOURCE) $(DEP_CPP_E_ATA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_atanh.c
+DEP_CPP_E_ATAN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_atanh.obj" : $(SOURCE) $(DEP_CPP_E_ATAN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_cosh.c
+DEP_CPP_E_COS=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_cosh.obj" : $(SOURCE) $(DEP_CPP_E_COS) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_exp.c
+DEP_CPP_E_EXP=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_exp.obj" : $(SOURCE) $(DEP_CPP_E_EXP) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_fmod.c
+DEP_CPP_E_FMO=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_fmod.obj" : $(SOURCE) $(DEP_CPP_E_FMO) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_gamma.c
+DEP_CPP_E_GAM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_gamma.obj" : $(SOURCE) $(DEP_CPP_E_GAM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_gamma_r.c
+DEP_CPP_E_GAMM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_gamma_r.obj" : $(SOURCE) $(DEP_CPP_E_GAMM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_hypot.c
+DEP_CPP_E_HYP=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_hypot.obj" : $(SOURCE) $(DEP_CPP_E_HYP) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_j0.c
+DEP_CPP_E_J0_=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_j0.obj" : $(SOURCE) $(DEP_CPP_E_J0_) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_j1.c
+DEP_CPP_E_J1_=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_j1.obj" : $(SOURCE) $(DEP_CPP_E_J1_) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_jn.c
+DEP_CPP_E_JN_=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_jn.obj" : $(SOURCE) $(DEP_CPP_E_JN_) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_lgamma.c
+DEP_CPP_E_LGA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_lgamma.obj" : $(SOURCE) $(DEP_CPP_E_LGA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_lgamma_r.c
+DEP_CPP_E_LGAM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_lgamma_r.obj" : $(SOURCE) $(DEP_CPP_E_LGAM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_log.c
+DEP_CPP_E_LOG=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_log.obj" : $(SOURCE) $(DEP_CPP_E_LOG) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_log10.c
+DEP_CPP_E_LOG1=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_log10.obj" : $(SOURCE) $(DEP_CPP_E_LOG1) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_pow.c
+DEP_CPP_E_POW=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_pow.obj" : $(SOURCE) $(DEP_CPP_E_POW) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_rem_pio2.c
+DEP_CPP_E_REM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_rem_pio2.obj" : $(SOURCE) $(DEP_CPP_E_REM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_remainder.c
+DEP_CPP_E_REMA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_remainder.obj" : $(SOURCE) $(DEP_CPP_E_REMA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_scalb.c
+DEP_CPP_E_SCA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_scalb.obj" : $(SOURCE) $(DEP_CPP_E_SCA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_sinh.c
+DEP_CPP_E_SIN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_sinh.obj" : $(SOURCE) $(DEP_CPP_E_SIN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_sqrt.c
+DEP_CPP_E_SQR=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_sqrt.obj" : $(SOURCE) $(DEP_CPP_E_SQR) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm.h
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\k_cos.c
+DEP_CPP_K_COS=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\k_cos.obj" : $(SOURCE) $(DEP_CPP_K_COS) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\k_rem_pio2.c
+DEP_CPP_K_REM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\k_rem_pio2.obj" : $(SOURCE) $(DEP_CPP_K_REM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\k_sin.c
+DEP_CPP_K_SIN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\k_sin.obj" : $(SOURCE) $(DEP_CPP_K_SIN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\k_standard.c
+DEP_CPP_K_STA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\k_standard.obj" : $(SOURCE) $(DEP_CPP_K_STA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\k_tan.c
+DEP_CPP_K_TAN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\k_tan.obj" : $(SOURCE) $(DEP_CPP_K_TAN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_asinh.c
+DEP_CPP_S_ASI=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_asinh.obj" : $(SOURCE) $(DEP_CPP_S_ASI) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_atan.c
+DEP_CPP_S_ATA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_atan.obj" : $(SOURCE) $(DEP_CPP_S_ATA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_cbrt.c
+DEP_CPP_S_CBR=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_cbrt.obj" : $(SOURCE) $(DEP_CPP_S_CBR) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_ceil.c
+DEP_CPP_S_CEI=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_ceil.obj" : $(SOURCE) $(DEP_CPP_S_CEI) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_copysign.c
+DEP_CPP_S_COP=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_copysign.obj" : $(SOURCE) $(DEP_CPP_S_COP) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_cos.c
+DEP_CPP_S_COS=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_cos.obj" : $(SOURCE) $(DEP_CPP_S_COS) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_erf.c
+DEP_CPP_S_ERF=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_erf.obj" : $(SOURCE) $(DEP_CPP_S_ERF) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_expm1.c
+DEP_CPP_S_EXP=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_expm1.obj" : $(SOURCE) $(DEP_CPP_S_EXP) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_fabs.c
+DEP_CPP_S_FAB=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_fabs.obj" : $(SOURCE) $(DEP_CPP_S_FAB) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_finite.c
+DEP_CPP_S_FIN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_finite.obj" : $(SOURCE) $(DEP_CPP_S_FIN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_floor.c
+DEP_CPP_S_FLO=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_floor.obj" : $(SOURCE) $(DEP_CPP_S_FLO) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_frexp.c
+DEP_CPP_S_FRE=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_frexp.obj" : $(SOURCE) $(DEP_CPP_S_FRE) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_ilogb.c
+DEP_CPP_S_ILO=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_ilogb.obj" : $(SOURCE) $(DEP_CPP_S_ILO) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_isnan.c
+DEP_CPP_S_ISN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_isnan.obj" : $(SOURCE) $(DEP_CPP_S_ISN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_ldexp.c
+DEP_CPP_S_LDE=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_ldexp.obj" : $(SOURCE) $(DEP_CPP_S_LDE) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_lib_version.c
+DEP_CPP_S_LIB=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_lib_version.obj" : $(SOURCE) $(DEP_CPP_S_LIB) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_log1p.c
+DEP_CPP_S_LOG=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_log1p.obj" : $(SOURCE) $(DEP_CPP_S_LOG) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_logb.c
+DEP_CPP_S_LOGB=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_logb.obj" : $(SOURCE) $(DEP_CPP_S_LOGB) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_matherr.c
+DEP_CPP_S_MAT=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_matherr.obj" : $(SOURCE) $(DEP_CPP_S_MAT) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_modf.c
+DEP_CPP_S_MOD=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_modf.obj" : $(SOURCE) $(DEP_CPP_S_MOD) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_nextafter.c
+DEP_CPP_S_NEX=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_nextafter.obj" : $(SOURCE) $(DEP_CPP_S_NEX) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_rint.c
+DEP_CPP_S_RIN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_rint.obj" : $(SOURCE) $(DEP_CPP_S_RIN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_scalbn.c
+DEP_CPP_S_SCA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_scalbn.obj" : $(SOURCE) $(DEP_CPP_S_SCA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_signgam.c
+DEP_CPP_S_SIG=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_signgam.obj" : $(SOURCE) $(DEP_CPP_S_SIG) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_significand.c
+DEP_CPP_S_SIGN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_significand.obj" : $(SOURCE) $(DEP_CPP_S_SIGN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_sin.c
+DEP_CPP_S_SIN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_sin.obj" : $(SOURCE) $(DEP_CPP_S_SIN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_tan.c
+DEP_CPP_S_TAN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_tan.obj" : $(SOURCE) $(DEP_CPP_S_TAN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\s_tanh.c
+DEP_CPP_S_TANH=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\s_tanh.obj" : $(SOURCE) $(DEP_CPP_S_TANH) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_acos.c
+DEP_CPP_W_ACO=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_acos.obj" : $(SOURCE) $(DEP_CPP_W_ACO) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_acosh.c
+DEP_CPP_W_ACOS=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_acosh.obj" : $(SOURCE) $(DEP_CPP_W_ACOS) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_asin.c
+DEP_CPP_W_ASI=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_asin.obj" : $(SOURCE) $(DEP_CPP_W_ASI) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_atan2.c
+DEP_CPP_W_ATA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_atan2.obj" : $(SOURCE) $(DEP_CPP_W_ATA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_atanh.c
+DEP_CPP_W_ATAN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_atanh.obj" : $(SOURCE) $(DEP_CPP_W_ATAN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_cosh.c
+DEP_CPP_W_COS=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_cosh.obj" : $(SOURCE) $(DEP_CPP_W_COS) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_exp.c
+DEP_CPP_W_EXP=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_exp.obj" : $(SOURCE) $(DEP_CPP_W_EXP) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_fmod.c
+DEP_CPP_W_FMO=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_fmod.obj" : $(SOURCE) $(DEP_CPP_W_FMO) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_gamma.c
+DEP_CPP_W_GAM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_gamma.obj" : $(SOURCE) $(DEP_CPP_W_GAM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_gamma_r.c
+DEP_CPP_W_GAMM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_gamma_r.obj" : $(SOURCE) $(DEP_CPP_W_GAMM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_hypot.c
+DEP_CPP_W_HYP=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_hypot.obj" : $(SOURCE) $(DEP_CPP_W_HYP) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_j0.c
+DEP_CPP_W_J0_=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_j0.obj" : $(SOURCE) $(DEP_CPP_W_J0_) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_j1.c
+DEP_CPP_W_J1_=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_j1.obj" : $(SOURCE) $(DEP_CPP_W_J1_) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_jn.c
+DEP_CPP_W_JN_=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_jn.obj" : $(SOURCE) $(DEP_CPP_W_JN_) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_lgamma.c
+DEP_CPP_W_LGA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_lgamma.obj" : $(SOURCE) $(DEP_CPP_W_LGA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_lgamma_r.c
+DEP_CPP_W_LGAM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_lgamma_r.obj" : $(SOURCE) $(DEP_CPP_W_LGAM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_log.c
+DEP_CPP_W_LOG=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_log.obj" : $(SOURCE) $(DEP_CPP_W_LOG) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_log10.c
+DEP_CPP_W_LOG1=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_log10.obj" : $(SOURCE) $(DEP_CPP_W_LOG1) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_pow.c
+DEP_CPP_W_POW=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_pow.obj" : $(SOURCE) $(DEP_CPP_W_POW) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_remainder.c
+DEP_CPP_W_REM=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_remainder.obj" : $(SOURCE) $(DEP_CPP_W_REM) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_scalb.c
+DEP_CPP_W_SCA=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_scalb.obj" : $(SOURCE) $(DEP_CPP_W_SCA) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\w_sinh.c
+DEP_CPP_W_SIN=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\w_sinh.obj" : $(SOURCE) $(DEP_CPP_W_SIN) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\e_acos.c
+DEP_CPP_E_ACOS=\
+ ".\fdlibm.h"\
+
+
+"$(INTDIR)\e_acos.obj" : $(SOURCE) $(DEP_CPP_E_ACOS) "$(INTDIR)"
+
+
+# End Source File
+# End Target
+# End Project
+################################################################################
diff --git a/src/third_party/js-1.7/fdlibm/fdlibm.mdp b/src/third_party/js-1.7/fdlibm/fdlibm.mdp
new file mode 100644
index 00000000000..5904c494072
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/fdlibm.mdp
Binary files differ
diff --git a/src/third_party/js-1.7/fdlibm/k_cos.c b/src/third_party/js-1.7/fdlibm/k_cos.c
new file mode 100644
index 00000000000..1d18c803433
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/k_cos.c
@@ -0,0 +1,135 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)k_cos.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * __kernel_cos( x, y )
+ * kernel cos function on [-pi/4, pi/4], pi/4 ~ 0.785398164
+ * Input x is assumed to be bounded by ~pi/4 in magnitude.
+ * Input y is the tail of x.
+ *
+ * Algorithm
+ * 1. Since cos(-x) = cos(x), we need only to consider positive x.
+ * 2. if x < 2^-27 (hx<0x3e400000 0), return 1 with inexact if x!=0.
+ * 3. cos(x) is approximated by a polynomial of degree 14 on
+ * [0,pi/4]
+ * 4 14
+ * cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x
+ * where the remez error is
+ *
+ * | 2 4 6 8 10 12 14 | -58
+ * |cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x +C6*x )| <= 2
+ * | |
+ *
+ * 4 6 8 10 12 14
+ * 4. let r = C1*x +C2*x +C3*x +C4*x +C5*x +C6*x , then
+ * cos(x) = 1 - x*x/2 + r
+ * since cos(x+y) ~ cos(x) - sin(x)*y
+ * ~ cos(x) - x*y,
+ * a correction term is necessary in cos(x) and hence
+ * cos(x+y) = 1 - (x*x/2 - (r - x*y))
+ * For better accuracy when x > 0.3, let qx = |x|/4 with
+ * the last 32 bits mask off, and if x > 0.78125, let qx = 0.28125.
+ * Then
+ * cos(x+y) = (1-qx) - ((x*x/2-qx) - (r-x*y)).
+ * Note that 1-qx and (x*x/2-qx) is EXACT here, and the
+ * magnitude of the latter is at least a quarter of x*x/2,
+ * thus, reducing the rounding error in the subtraction.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+C1 = 4.16666666666666019037e-02, /* 0x3FA55555, 0x5555554C */
+C2 = -1.38888888888741095749e-03, /* 0xBF56C16C, 0x16C15177 */
+C3 = 2.48015872894767294178e-05, /* 0x3EFA01A0, 0x19CB1590 */
+C4 = -2.75573143513906633035e-07, /* 0xBE927E4F, 0x809C52AD */
+C5 = 2.08757232129817482790e-09, /* 0x3E21EE9E, 0xBDB4B1C4 */
+C6 = -1.13596475577881948265e-11; /* 0xBDA8FAE9, 0xBE8838D4 */
+
+#ifdef __STDC__
+ double __kernel_cos(double x, double y)
+#else
+ double __kernel_cos(x, y)
+ double x,y;
+#endif
+{
+ fd_twoints u;
+ double qx = 0;
+ double a,hz,z,r;
+ int ix;
+ u.d = x;
+ ix = __HI(u)&0x7fffffff; /* ix = |x|'s high word*/
+ if(ix<0x3e400000) { /* if x < 2**27 */
+ if(((int)x)==0) return one; /* generate inexact */
+ }
+ z = x*x;
+ r = z*(C1+z*(C2+z*(C3+z*(C4+z*(C5+z*C6)))));
+ if(ix < 0x3FD33333) /* if |x| < 0.3 */
+ return one - (0.5*z - (z*r - x*y));
+ else {
+ if(ix > 0x3fe90000) { /* x > 0.78125 */
+ qx = 0.28125;
+ } else {
+ u.d = qx;
+ __HI(u) = ix-0x00200000; /* x/4 */
+ __LO(u) = 0;
+ qx = u.d;
+ }
+ hz = 0.5*z-qx;
+ a = one-qx;
+ return a - (hz - (z*r-x*y));
+ }
+}
diff --git a/src/third_party/js-1.7/fdlibm/k_rem_pio2.c b/src/third_party/js-1.7/fdlibm/k_rem_pio2.c
new file mode 100644
index 00000000000..d261e190af4
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/k_rem_pio2.c
@@ -0,0 +1,354 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)k_rem_pio2.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * __kernel_rem_pio2(x,y,e0,nx,prec,ipio2)
+ * double x[],y[]; int e0,nx,prec; int ipio2[];
+ *
+ * __kernel_rem_pio2 return the last three digits of N with
+ * y = x - N*pi/2
+ * so that |y| < pi/2.
+ *
+ * The method is to compute the integer (mod 8) and fraction parts of
+ * (2/pi)*x without doing the full multiplication. In general we
+ * skip the part of the product that are known to be a huge integer (
+ * more accurately, = 0 mod 8 ). Thus the number of operations are
+ * independent of the exponent of the input.
+ *
+ * (2/pi) is represented by an array of 24-bit integers in ipio2[].
+ *
+ * Input parameters:
+ * x[] The input value (must be positive) is broken into nx
+ * pieces of 24-bit integers in double precision format.
+ * x[i] will be the i-th 24 bit of x. The scaled exponent
+ * of x[0] is given in input parameter e0 (i.e., x[0]*2^e0
+ * match x's up to 24 bits.
+ *
+ * Example of breaking a double positive z into x[0]+x[1]+x[2]:
+ * e0 = ilogb(z)-23
+ * z = scalbn(z,-e0)
+ * for i = 0,1,2
+ * x[i] = floor(z)
+ * z = (z-x[i])*2**24
+ *
+ *
+ * y[] ouput result in an array of double precision numbers.
+ * The dimension of y[] is:
+ * 24-bit precision 1
+ * 53-bit precision 2
+ * 64-bit precision 2
+ * 113-bit precision 3
+ * The actual value is the sum of them. Thus for 113-bit
+ * precison, one may have to do something like:
+ *
+ * long double t,w,r_head, r_tail;
+ * t = (long double)y[2] + (long double)y[1];
+ * w = (long double)y[0];
+ * r_head = t+w;
+ * r_tail = w - (r_head - t);
+ *
+ * e0 The exponent of x[0]
+ *
+ * nx dimension of x[]
+ *
+ * prec an integer indicating the precision:
+ * 0 24 bits (single)
+ * 1 53 bits (double)
+ * 2 64 bits (extended)
+ * 3 113 bits (quad)
+ *
+ * ipio2[]
+ * integer array, contains the (24*i)-th to (24*i+23)-th
+ * bit of 2/pi after binary point. The corresponding
+ * floating value is
+ *
+ * ipio2[i] * 2^(-24(i+1)).
+ *
+ * External function:
+ * double scalbn(), floor();
+ *
+ *
+ * Here is the description of some local variables:
+ *
+ * jk jk+1 is the initial number of terms of ipio2[] needed
+ * in the computation. The recommended value is 2,3,4,
+ * 6 for single, double, extended,and quad.
+ *
+ * jz local integer variable indicating the number of
+ * terms of ipio2[] used.
+ *
+ * jx nx - 1
+ *
+ * jv index for pointing to the suitable ipio2[] for the
+ * computation. In general, we want
+ * ( 2^e0*x[0] * ipio2[jv-1]*2^(-24jv) )/8
+ * is an integer. Thus
+ * e0-3-24*jv >= 0 or (e0-3)/24 >= jv
+ * Hence jv = max(0,(e0-3)/24).
+ *
+ * jp jp+1 is the number of terms in PIo2[] needed, jp = jk.
+ *
+ * q[] double array with integral value, representing the
+ * 24-bits chunk of the product of x and 2/pi.
+ *
+ * q0 the corresponding exponent of q[0]. Note that the
+ * exponent for q[i] would be q0-24*i.
+ *
+ * PIo2[] double precision array, obtained by cutting pi/2
+ * into 24 bits chunks.
+ *
+ * f[] ipio2[] in floating point
+ *
+ * iq[] integer array by breaking up q[] in 24-bits chunk.
+ *
+ * fq[] final product of x*(2/pi) in fq[0],..,fq[jk]
+ *
+ * ih integer. If >0 it indicates q[] is >= 0.5, hence
+ * it also indicates the *sign* of the result.
+ *
+ */
+
+
+/*
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const int init_jk[] = {2,3,4,6}; /* initial value for jk */
+#else
+static int init_jk[] = {2,3,4,6};
+#endif
+
+#ifdef __STDC__
+static const double PIo2[] = {
+#else
+static double PIo2[] = {
+#endif
+ 1.57079625129699707031e+00, /* 0x3FF921FB, 0x40000000 */
+ 7.54978941586159635335e-08, /* 0x3E74442D, 0x00000000 */
+ 5.39030252995776476554e-15, /* 0x3CF84698, 0x80000000 */
+ 3.28200341580791294123e-22, /* 0x3B78CC51, 0x60000000 */
+ 1.27065575308067607349e-29, /* 0x39F01B83, 0x80000000 */
+ 1.22933308981111328932e-36, /* 0x387A2520, 0x40000000 */
+ 2.73370053816464559624e-44, /* 0x36E38222, 0x80000000 */
+ 2.16741683877804819444e-51, /* 0x3569F31D, 0x00000000 */
+};
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+zero = 0.0,
+one = 1.0,
+two24 = 1.67772160000000000000e+07, /* 0x41700000, 0x00000000 */
+twon24 = 5.96046447753906250000e-08; /* 0x3E700000, 0x00000000 */
+
+#ifdef __STDC__
+ int __kernel_rem_pio2(double *x, double *y, int e0, int nx, int prec, const int *ipio2)
+#else
+ int __kernel_rem_pio2(x,y,e0,nx,prec,ipio2)
+ double x[], y[]; int e0,nx,prec; int ipio2[];
+#endif
+{
+ int jz,jx,jv,jp,jk,carry,n,iq[20],i,j,k,m,q0,ih;
+ double z,fw,f[20],fq[20],q[20];
+
+ /* initialize jk*/
+ jk = init_jk[prec];
+ jp = jk;
+
+ /* determine jx,jv,q0, note that 3>q0 */
+ jx = nx-1;
+ jv = (e0-3)/24; if(jv<0) jv=0;
+ q0 = e0-24*(jv+1);
+
+ /* set up f[0] to f[jx+jk] where f[jx+jk] = ipio2[jv+jk] */
+ j = jv-jx; m = jx+jk;
+ for(i=0;i<=m;i++,j++) f[i] = (j<0)? zero : (double) ipio2[j];
+
+ /* compute q[0],q[1],...q[jk] */
+ for (i=0;i<=jk;i++) {
+ for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j]; q[i] = fw;
+ }
+
+ jz = jk;
+recompute:
+ /* distill q[] into iq[] reversingly */
+ for(i=0,j=jz,z=q[jz];j>0;i++,j--) {
+ fw = (double)((int)(twon24* z));
+ iq[i] = (int)(z-two24*fw);
+ z = q[j-1]+fw;
+ }
+
+ /* compute n */
+ z = fd_scalbn(z,q0); /* actual value of z */
+ z -= 8.0*fd_floor(z*0.125); /* trim off integer >= 8 */
+ n = (int) z;
+ z -= (double)n;
+ ih = 0;
+ if(q0>0) { /* need iq[jz-1] to determine n */
+ i = (iq[jz-1]>>(24-q0)); n += i;
+ iq[jz-1] -= i<<(24-q0);
+ ih = iq[jz-1]>>(23-q0);
+ }
+ else if(q0==0) ih = iq[jz-1]>>23;
+ else if(z>=0.5) ih=2;
+
+ if(ih>0) { /* q > 0.5 */
+ n += 1; carry = 0;
+ for(i=0;i<jz ;i++) { /* compute 1-q */
+ j = iq[i];
+ if(carry==0) {
+ if(j!=0) {
+ carry = 1; iq[i] = 0x1000000- j;
+ }
+ } else iq[i] = 0xffffff - j;
+ }
+ if(q0>0) { /* rare case: chance is 1 in 12 */
+ switch(q0) {
+ case 1:
+ iq[jz-1] &= 0x7fffff; break;
+ case 2:
+ iq[jz-1] &= 0x3fffff; break;
+ }
+ }
+ if(ih==2) {
+ z = one - z;
+ if(carry!=0) z -= fd_scalbn(one,q0);
+ }
+ }
+
+ /* check if recomputation is needed */
+ if(z==zero) {
+ j = 0;
+ for (i=jz-1;i>=jk;i--) j |= iq[i];
+ if(j==0) { /* need recomputation */
+ for(k=1;iq[jk-k]==0;k++); /* k = no. of terms needed */
+
+ for(i=jz+1;i<=jz+k;i++) { /* add q[jz+1] to q[jz+k] */
+ f[jx+i] = (double) ipio2[jv+i];
+ for(j=0,fw=0.0;j<=jx;j++) fw += x[j]*f[jx+i-j];
+ q[i] = fw;
+ }
+ jz += k;
+ goto recompute;
+ }
+ }
+
+ /* chop off zero terms */
+ if(z==0.0) {
+ jz -= 1; q0 -= 24;
+ while(iq[jz]==0) { jz--; q0-=24;}
+ } else { /* break z into 24-bit if necessary */
+ z = fd_scalbn(z,-q0);
+ if(z>=two24) {
+ fw = (double)((int)(twon24*z));
+ iq[jz] = (int)(z-two24*fw);
+ jz += 1; q0 += 24;
+ iq[jz] = (int) fw;
+ } else iq[jz] = (int) z ;
+ }
+
+ /* convert integer "bit" chunk to floating-point value */
+ fw = fd_scalbn(one,q0);
+ for(i=jz;i>=0;i--) {
+ q[i] = fw*(double)iq[i]; fw*=twon24;
+ }
+
+ /* compute PIo2[0,...,jp]*q[jz,...,0] */
+ for(i=jz;i>=0;i--) {
+ for(fw=0.0,k=0;k<=jp&&k<=jz-i;k++) fw += PIo2[k]*q[i+k];
+ fq[jz-i] = fw;
+ }
+
+ /* compress fq[] into y[] */
+ switch(prec) {
+ case 0:
+ fw = 0.0;
+ for (i=jz;i>=0;i--) fw += fq[i];
+ y[0] = (ih==0)? fw: -fw;
+ break;
+ case 1:
+ case 2:
+ fw = 0.0;
+ for (i=jz;i>=0;i--) fw += fq[i];
+ y[0] = (ih==0)? fw: -fw;
+ fw = fq[0]-fw;
+ for (i=1;i<=jz;i++) fw += fq[i];
+ y[1] = (ih==0)? fw: -fw;
+ break;
+ case 3: /* painful */
+ for (i=jz;i>0;i--) {
+ fw = fq[i-1]+fq[i];
+ fq[i] += fq[i-1]-fw;
+ fq[i-1] = fw;
+ }
+ for (i=jz;i>1;i--) {
+ fw = fq[i-1]+fq[i];
+ fq[i] += fq[i-1]-fw;
+ fq[i-1] = fw;
+ }
+ for (fw=0.0,i=jz;i>=2;i--) fw += fq[i];
+ if(ih==0) {
+ y[0] = fq[0]; y[1] = fq[1]; y[2] = fw;
+ } else {
+ y[0] = -fq[0]; y[1] = -fq[1]; y[2] = -fw;
+ }
+ }
+ return n&7;
+}
diff --git a/src/third_party/js-1.7/fdlibm/k_sin.c b/src/third_party/js-1.7/fdlibm/k_sin.c
new file mode 100644
index 00000000000..d2bdabd6da3
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/k_sin.c
@@ -0,0 +1,114 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)k_sin.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __kernel_sin( x, y, iy)
+ * kernel sin function on [-pi/4, pi/4], pi/4 ~ 0.7854
+ * Input x is assumed to be bounded by ~pi/4 in magnitude.
+ * Input y is the tail of x.
+ * Input iy indicates whether y is 0. (if iy=0, y assume to be 0).
+ *
+ * Algorithm
+ * 1. Since sin(-x) = -sin(x), we need only to consider positive x.
+ * 2. if x < 2^-27 (hx<0x3e400000 0), return x with inexact if x!=0.
+ * 3. sin(x) is approximated by a polynomial of degree 13 on
+ * [0,pi/4]
+ * 3 13
+ * sin(x) ~ x + S1*x + ... + S6*x
+ * where
+ *
+ * |sin(x) 2 4 6 8 10 12 | -58
+ * |----- - (1+S1*x +S2*x +S3*x +S4*x +S5*x +S6*x )| <= 2
+ * | x |
+ *
+ * 4. sin(x+y) = sin(x) + sin'(x')*y
+ * ~ sin(x) + (1-x*x/2)*y
+ * For better accuracy, let
+ * 3 2 2 2 2
+ * r = x *(S2+x *(S3+x *(S4+x *(S5+x *S6))))
+ * then 3 2
+ * sin(x) = x + (S1*x + (x *(r-y/2)+y))
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+half = 5.00000000000000000000e-01, /* 0x3FE00000, 0x00000000 */
+S1 = -1.66666666666666324348e-01, /* 0xBFC55555, 0x55555549 */
+S2 = 8.33333333332248946124e-03, /* 0x3F811111, 0x1110F8A6 */
+S3 = -1.98412698298579493134e-04, /* 0xBF2A01A0, 0x19C161D5 */
+S4 = 2.75573137070700676789e-06, /* 0x3EC71DE3, 0x57B1FE7D */
+S5 = -2.50507602534068634195e-08, /* 0xBE5AE5E6, 0x8A2B9CEB */
+S6 = 1.58969099521155010221e-10; /* 0x3DE5D93A, 0x5ACFD57C */
+
+#ifdef __STDC__
+ double __kernel_sin(double x, double y, int iy)
+#else
+ double __kernel_sin(x, y, iy)
+ double x,y; int iy; /* iy=0 if y is zero */
+#endif
+{
+ fd_twoints u;
+ double z,r,v;
+ int ix;
+ u.d = x;
+ ix = __HI(u)&0x7fffffff; /* high word of x */
+ if(ix<0x3e400000) /* |x| < 2**-27 */
+ {if((int)x==0) return x;} /* generate inexact */
+ z = x*x;
+ v = z*x;
+ r = S2+z*(S3+z*(S4+z*(S5+z*S6)));
+ if(iy==0) return x+v*(S1+z*r);
+ else return x-((z*(half*y-v*r)-y)-v*S1);
+}
diff --git a/src/third_party/js-1.7/fdlibm/k_standard.c b/src/third_party/js-1.7/fdlibm/k_standard.c
new file mode 100644
index 00000000000..720109c9d6a
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/k_standard.c
@@ -0,0 +1,785 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)k_standard.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+#include "fdlibm.h"
+
+/* XXX ugly hack to get msvc to link without error. */
+#if _LIB_VERSION == _IEEE_ && !(defined(DARWIN) || defined(XP_MACOSX))
+ int errno;
+# define EDOM 0
+# define ERANGE 0
+#else
+# include <errno.h>
+#endif
+
+
+#ifndef _USE_WRITE
+#include <stdio.h> /* fputs(), stderr */
+#define WRITE2(u,v) fputs(u, stderr)
+#else /* !defined(_USE_WRITE) */
+#include <unistd.h> /* write */
+#define WRITE2(u,v) write(2, u, v)
+#undef fflush
+#endif /* !defined(_USE_WRITE) */
+
+static double zero = 0.0; /* used as const */
+
+/*
+ * Standard conformance (non-IEEE) on exception cases.
+ * Mapping:
+ * 1 -- acos(|x|>1)
+ * 2 -- asin(|x|>1)
+ * 3 -- atan2(+-0,+-0)
+ * 4 -- hypot overflow
+ * 5 -- cosh overflow
+ * 6 -- exp overflow
+ * 7 -- exp underflow
+ * 8 -- y0(0)
+ * 9 -- y0(-ve)
+ * 10-- y1(0)
+ * 11-- y1(-ve)
+ * 12-- yn(0)
+ * 13-- yn(-ve)
+ * 14-- lgamma(finite) overflow
+ * 15-- lgamma(-integer)
+ * 16-- log(0)
+ * 17-- log(x<0)
+ * 18-- log10(0)
+ * 19-- log10(x<0)
+ * 20-- pow(0.0,0.0)
+ * 21-- pow(x,y) overflow
+ * 22-- pow(x,y) underflow
+ * 23-- pow(0,negative)
+ * 24-- pow(neg,non-integral)
+ * 25-- sinh(finite) overflow
+ * 26-- sqrt(negative)
+ * 27-- fmod(x,0)
+ * 28-- remainder(x,0)
+ * 29-- acosh(x<1)
+ * 30-- atanh(|x|>1)
+ * 31-- atanh(|x|=1)
+ * 32-- scalb overflow
+ * 33-- scalb underflow
+ * 34-- j0(|x|>X_TLOSS)
+ * 35-- y0(x>X_TLOSS)
+ * 36-- j1(|x|>X_TLOSS)
+ * 37-- y1(x>X_TLOSS)
+ * 38-- jn(|x|>X_TLOSS, n)
+ * 39-- yn(x>X_TLOSS, n)
+ * 40-- gamma(finite) overflow
+ * 41-- gamma(-integer)
+ * 42-- pow(NaN,0.0)
+ */
+
+
+#ifdef __STDC__
+ double __kernel_standard(double x, double y, int type, int *err)
+#else
+ double __kernel_standard(x,y,type, err)
+ double x,y; int type;int *err;
+#endif
+{
+ struct exception exc;
+#ifndef HUGE_VAL /* this is the only routine that uses HUGE_VAL */
+#define HUGE_VAL inf
+ double inf = 0.0;
+ fd_twoints u;
+
+ u.d = inf;
+ __HI(u) = 0x7ff00000; /* set inf to infinite */
+ inf = u.d;
+#endif
+
+ *err = 0;
+
+#ifdef _USE_WRITE
+ (void) fflush(stdout);
+#endif
+ exc.arg1 = x;
+ exc.arg2 = y;
+ switch(type) {
+ case 1:
+ /* acos(|x|>1) */
+ exc.type = DOMAIN;
+ exc.name = "acos";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if(_LIB_VERSION == _SVID_) {
+ (void) WRITE2("acos: DOMAIN error\n", 19);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 2:
+ /* asin(|x|>1) */
+ exc.type = DOMAIN;
+ exc.name = "asin";
+ exc.retval = zero;
+ if(_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if(_LIB_VERSION == _SVID_) {
+ (void) WRITE2("asin: DOMAIN error\n", 19);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 3:
+ /* atan2(+-0,+-0) */
+ exc.arg1 = y;
+ exc.arg2 = x;
+ exc.type = DOMAIN;
+ exc.name = "atan2";
+ exc.retval = zero;
+ if(_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if(_LIB_VERSION == _SVID_) {
+ (void) WRITE2("atan2: DOMAIN error\n", 20);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 4:
+ /* hypot(finite,finite) overflow */
+ exc.type = OVERFLOW;
+ exc.name = "hypot";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = HUGE;
+ else
+ exc.retval = HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 5:
+ /* cosh(finite) overflow */
+ exc.type = OVERFLOW;
+ exc.name = "cosh";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = HUGE;
+ else
+ exc.retval = HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 6:
+ /* exp(finite) overflow */
+ exc.type = OVERFLOW;
+ exc.name = "exp";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = HUGE;
+ else
+ exc.retval = HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 7:
+ /* exp(finite) underflow */
+ exc.type = UNDERFLOW;
+ exc.name = "exp";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 8:
+ /* y0(0) = -inf */
+ exc.type = DOMAIN; /* should be SING for IEEE */
+ exc.name = "y0";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("y0: DOMAIN error\n", 17);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 9:
+ /* y0(x<0) = NaN */
+ exc.type = DOMAIN;
+ exc.name = "y0";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("y0: DOMAIN error\n", 17);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 10:
+ /* y1(0) = -inf */
+ exc.type = DOMAIN; /* should be SING for IEEE */
+ exc.name = "y1";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("y1: DOMAIN error\n", 17);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 11:
+ /* y1(x<0) = NaN */
+ exc.type = DOMAIN;
+ exc.name = "y1";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("y1: DOMAIN error\n", 17);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 12:
+ /* yn(n,0) = -inf */
+ exc.type = DOMAIN; /* should be SING for IEEE */
+ exc.name = "yn";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("yn: DOMAIN error\n", 17);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 13:
+ /* yn(x<0) = NaN */
+ exc.type = DOMAIN;
+ exc.name = "yn";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("yn: DOMAIN error\n", 17);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 14:
+ /* lgamma(finite) overflow */
+ exc.type = OVERFLOW;
+ exc.name = "lgamma";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = HUGE;
+ else
+ exc.retval = HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 15:
+ /* lgamma(-integer) or lgamma(0) */
+ exc.type = SING;
+ exc.name = "lgamma";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = HUGE;
+ else
+ exc.retval = HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("lgamma: SING error\n", 19);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 16:
+ /* log(0) */
+ exc.type = SING;
+ exc.name = "log";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("log: SING error\n", 16);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 17:
+ /* log(x<0) */
+ exc.type = DOMAIN;
+ exc.name = "log";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("log: DOMAIN error\n", 18);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 18:
+ /* log10(0) */
+ exc.type = SING;
+ exc.name = "log10";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("log10: SING error\n", 18);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 19:
+ /* log10(x<0) */
+ exc.type = DOMAIN;
+ exc.name = "log10";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = -HUGE;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("log10: DOMAIN error\n", 20);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 20:
+ /* pow(0.0,0.0) */
+ /* error only if _LIB_VERSION == _SVID_ */
+ exc.type = DOMAIN;
+ exc.name = "pow";
+ exc.retval = zero;
+ if (_LIB_VERSION != _SVID_) exc.retval = 1.0;
+ else if (!fd_matherr(&exc)) {
+ (void) WRITE2("pow(0,0): DOMAIN error\n", 23);
+ *err = EDOM;
+ }
+ break;
+ case 21:
+ /* pow(x,y) overflow */
+ exc.type = OVERFLOW;
+ exc.name = "pow";
+ if (_LIB_VERSION == _SVID_) {
+ exc.retval = HUGE;
+ y *= 0.5;
+ if(x<zero&&fd_rint(y)!=y) exc.retval = -HUGE;
+ } else {
+ exc.retval = HUGE_VAL;
+ y *= 0.5;
+ if(x<zero&&fd_rint(y)!=y) exc.retval = -HUGE_VAL;
+ }
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 22:
+ /* pow(x,y) underflow */
+ exc.type = UNDERFLOW;
+ exc.name = "pow";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 23:
+ /* 0**neg */
+ exc.type = DOMAIN;
+ exc.name = "pow";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = zero;
+ else
+ exc.retval = -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("pow(0,neg): DOMAIN error\n", 25);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 24:
+ /* neg**non-integral */
+ exc.type = DOMAIN;
+ exc.name = "pow";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = zero;
+ else
+ exc.retval = zero/zero; /* X/Open allow NaN */
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("neg**non-integral: DOMAIN error\n", 32);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 25:
+ /* sinh(finite) overflow */
+ exc.type = OVERFLOW;
+ exc.name = "sinh";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = ( (x>zero) ? HUGE : -HUGE);
+ else
+ exc.retval = ( (x>zero) ? HUGE_VAL : -HUGE_VAL);
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 26:
+ /* sqrt(x<0) */
+ exc.type = DOMAIN;
+ exc.name = "sqrt";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = zero;
+ else
+ exc.retval = zero/zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("sqrt: DOMAIN error\n", 19);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 27:
+ /* fmod(x,0) */
+ exc.type = DOMAIN;
+ exc.name = "fmod";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = x;
+ else
+ exc.retval = zero/zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("fmod: DOMAIN error\n", 20);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 28:
+ /* remainder(x,0) */
+ exc.type = DOMAIN;
+ exc.name = "remainder";
+ exc.retval = zero/zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("remainder: DOMAIN error\n", 24);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 29:
+ /* acosh(x<1) */
+ exc.type = DOMAIN;
+ exc.name = "acosh";
+ exc.retval = zero/zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("acosh: DOMAIN error\n", 20);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 30:
+ /* atanh(|x|>1) */
+ exc.type = DOMAIN;
+ exc.name = "atanh";
+ exc.retval = zero/zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("atanh: DOMAIN error\n", 20);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 31:
+ /* atanh(|x|=1) */
+ exc.type = SING;
+ exc.name = "atanh";
+ exc.retval = x/zero; /* sign(x)*inf */
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("atanh: SING error\n", 18);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 32:
+ /* scalb overflow; SVID also returns +-HUGE_VAL */
+ exc.type = OVERFLOW;
+ exc.name = "scalb";
+ exc.retval = x > zero ? HUGE_VAL : -HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 33:
+ /* scalb underflow */
+ exc.type = UNDERFLOW;
+ exc.name = "scalb";
+ exc.retval = fd_copysign(zero,x);
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 34:
+ /* j0(|x|>X_TLOSS) */
+ exc.type = TLOSS;
+ exc.name = "j0";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2(exc.name, 2);
+ (void) WRITE2(": TLOSS error\n", 14);
+ }
+ *err = ERANGE;
+ }
+ break;
+ case 35:
+ /* y0(x>X_TLOSS) */
+ exc.type = TLOSS;
+ exc.name = "y0";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2(exc.name, 2);
+ (void) WRITE2(": TLOSS error\n", 14);
+ }
+ *err = ERANGE;
+ }
+ break;
+ case 36:
+ /* j1(|x|>X_TLOSS) */
+ exc.type = TLOSS;
+ exc.name = "j1";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2(exc.name, 2);
+ (void) WRITE2(": TLOSS error\n", 14);
+ }
+ *err = ERANGE;
+ }
+ break;
+ case 37:
+ /* y1(x>X_TLOSS) */
+ exc.type = TLOSS;
+ exc.name = "y1";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2(exc.name, 2);
+ (void) WRITE2(": TLOSS error\n", 14);
+ }
+ *err = ERANGE;
+ }
+ break;
+ case 38:
+ /* jn(|x|>X_TLOSS) */
+ exc.type = TLOSS;
+ exc.name = "jn";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2(exc.name, 2);
+ (void) WRITE2(": TLOSS error\n", 14);
+ }
+ *err = ERANGE;
+ }
+ break;
+ case 39:
+ /* yn(x>X_TLOSS) */
+ exc.type = TLOSS;
+ exc.name = "yn";
+ exc.retval = zero;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2(exc.name, 2);
+ (void) WRITE2(": TLOSS error\n", 14);
+ }
+ *err = ERANGE;
+ }
+ break;
+ case 40:
+ /* gamma(finite) overflow */
+ exc.type = OVERFLOW;
+ exc.name = "gamma";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = HUGE;
+ else
+ exc.retval = HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = ERANGE;
+ else if (!fd_matherr(&exc)) {
+ *err = ERANGE;
+ }
+ break;
+ case 41:
+ /* gamma(-integer) or gamma(0) */
+ exc.type = SING;
+ exc.name = "gamma";
+ if (_LIB_VERSION == _SVID_)
+ exc.retval = HUGE;
+ else
+ exc.retval = HUGE_VAL;
+ if (_LIB_VERSION == _POSIX_)
+ *err = EDOM;
+ else if (!fd_matherr(&exc)) {
+ if (_LIB_VERSION == _SVID_) {
+ (void) WRITE2("gamma: SING error\n", 18);
+ }
+ *err = EDOM;
+ }
+ break;
+ case 42:
+ /* pow(NaN,0.0) */
+ /* error only if _LIB_VERSION == _SVID_ & _XOPEN_ */
+ exc.type = DOMAIN;
+ exc.name = "pow";
+ exc.retval = x;
+ if (_LIB_VERSION == _IEEE_ ||
+ _LIB_VERSION == _POSIX_) exc.retval = 1.0;
+ else if (!fd_matherr(&exc)) {
+ *err = EDOM;
+ }
+ break;
+ }
+ return exc.retval;
+}
diff --git a/src/third_party/js-1.7/fdlibm/k_tan.c b/src/third_party/js-1.7/fdlibm/k_tan.c
new file mode 100644
index 00000000000..1e7681b8883
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/k_tan.c
@@ -0,0 +1,170 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)k_tan.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* __kernel_tan( x, y, k )
+ * kernel tan function on [-pi/4, pi/4], pi/4 ~ 0.7854
+ * Input x is assumed to be bounded by ~pi/4 in magnitude.
+ * Input y is the tail of x.
+ * Input k indicates whether tan (if k=1) or
+ * -1/tan (if k= -1) is returned.
+ *
+ * Algorithm
+ * 1. Since tan(-x) = -tan(x), we need only to consider positive x.
+ * 2. if x < 2^-28 (hx<0x3e300000 0), return x with inexact if x!=0.
+ * 3. tan(x) is approximated by a odd polynomial of degree 27 on
+ * [0,0.67434]
+ * 3 27
+ * tan(x) ~ x + T1*x + ... + T13*x
+ * where
+ *
+ * |tan(x) 2 4 26 | -59.2
+ * |----- - (1+T1*x +T2*x +.... +T13*x )| <= 2
+ * | x |
+ *
+ * Note: tan(x+y) = tan(x) + tan'(x)*y
+ * ~ tan(x) + (1+x*x)*y
+ * Therefore, for better accuracy in computing tan(x+y), let
+ * 3 2 2 2 2
+ * r = x *(T2+x *(T3+x *(...+x *(T12+x *T13))))
+ * then
+ * 3 2
+ * tan(x+y) = x + (T1*x + (x *(r+y)+y))
+ *
+ * 4. For x in [0.67434,pi/4], let y = pi/4 - x, then
+ * tan(x) = tan(pi/4-y) = (1-tan(y))/(1+tan(y))
+ * = 1 - 2*(tan(y) - (tan(y)^2)/(1+tan(y)))
+ */
+
+#include "fdlibm.h"
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+pio4 = 7.85398163397448278999e-01, /* 0x3FE921FB, 0x54442D18 */
+pio4lo= 3.06161699786838301793e-17, /* 0x3C81A626, 0x33145C07 */
+T[] = {
+ 3.33333333333334091986e-01, /* 0x3FD55555, 0x55555563 */
+ 1.33333333333201242699e-01, /* 0x3FC11111, 0x1110FE7A */
+ 5.39682539762260521377e-02, /* 0x3FABA1BA, 0x1BB341FE */
+ 2.18694882948595424599e-02, /* 0x3F9664F4, 0x8406D637 */
+ 8.86323982359930005737e-03, /* 0x3F8226E3, 0xE96E8493 */
+ 3.59207910759131235356e-03, /* 0x3F6D6D22, 0xC9560328 */
+ 1.45620945432529025516e-03, /* 0x3F57DBC8, 0xFEE08315 */
+ 5.88041240820264096874e-04, /* 0x3F4344D8, 0xF2F26501 */
+ 2.46463134818469906812e-04, /* 0x3F3026F7, 0x1A8D1068 */
+ 7.81794442939557092300e-05, /* 0x3F147E88, 0xA03792A6 */
+ 7.14072491382608190305e-05, /* 0x3F12B80F, 0x32F0A7E9 */
+ -1.85586374855275456654e-05, /* 0xBEF375CB, 0xDB605373 */
+ 2.59073051863633712884e-05, /* 0x3EFB2A70, 0x74BF7AD4 */
+};
+
+#ifdef __STDC__
+ double __kernel_tan(double x, double y, int iy)
+#else
+ double __kernel_tan(x, y, iy)
+ double x,y; int iy;
+#endif
+{
+ fd_twoints u;
+ double z,r,v,w,s;
+ int ix,hx;
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ ix = hx&0x7fffffff; /* high word of |x| */
+ if(ix<0x3e300000) /* x < 2**-28 */
+ {if((int)x==0) { /* generate inexact */
+ u.d =x;
+ if(((ix|__LO(u))|(iy+1))==0) return one/fd_fabs(x);
+ else return (iy==1)? x: -one/x;
+ }
+ }
+ if(ix>=0x3FE59428) { /* |x|>=0.6744 */
+ if(hx<0) {x = -x; y = -y;}
+ z = pio4-x;
+ w = pio4lo-y;
+ x = z+w; y = 0.0;
+ }
+ z = x*x;
+ w = z*z;
+ /* Break x^5*(T[1]+x^2*T[2]+...) into
+ * x^5(T[1]+x^4*T[3]+...+x^20*T[11]) +
+ * x^5(x^2*(T[2]+x^4*T[4]+...+x^22*[T12]))
+ */
+ r = T[1]+w*(T[3]+w*(T[5]+w*(T[7]+w*(T[9]+w*T[11]))));
+ v = z*(T[2]+w*(T[4]+w*(T[6]+w*(T[8]+w*(T[10]+w*T[12])))));
+ s = z*x;
+ r = y + z*(s*(r+v)+y);
+ r += T[0]*s;
+ w = x+r;
+ if(ix>=0x3FE59428) {
+ v = (double)iy;
+ return (double)(1-((hx>>30)&2))*(v-2.0*(x-(w*w/(w+v)-r)));
+ }
+ if(iy==1) return w;
+ else { /* if allow error up to 2 ulp,
+ simply return -1.0/(x+r) here */
+ /* compute -1.0/(x+r) accurately */
+ double a,t;
+ z = w;
+ u.d = z;
+ __LO(u) = 0;
+ z = u.d;
+ v = r-(z - x); /* z+v = r+x */
+ t = a = -1.0/w; /* a = -1.0/w */
+ u.d = t;
+ __LO(u) = 0;
+ t = u.d;
+ s = 1.0+t*z;
+ return t+a*(s+t*v);
+ }
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_asinh.c b/src/third_party/js-1.7/fdlibm/s_asinh.c
new file mode 100644
index 00000000000..fdf70a91cf0
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_asinh.c
@@ -0,0 +1,101 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_asinh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* asinh(x)
+ * Method :
+ * Based on
+ * asinh(x) = sign(x) * log [ |x| + sqrt(x*x+1) ]
+ * we have
+ * asinh(x) := x if 1+x*x=1,
+ * := sign(x)*(log(x)+ln2)) for large |x|, else
+ * := sign(x)*log(2|x|+1/(|x|+sqrt(x*x+1))) if|x|>2, else
+ * := sign(x)*log1p(|x| + x^2/(1 + sqrt(1+x^2)))
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+ln2 = 6.93147180559945286227e-01, /* 0x3FE62E42, 0xFEFA39EF */
+really_big= 1.00000000000000000000e+300;
+
+#ifdef __STDC__
+ double fd_asinh(double x)
+#else
+ double fd_asinh(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double t,w;
+ int hx,ix;
+ u.d = x;
+ hx = __HI(u);
+ ix = hx&0x7fffffff;
+ if(ix>=0x7ff00000) return x+x; /* x is inf or NaN */
+ if(ix< 0x3e300000) { /* |x|<2**-28 */
+ if(really_big+x>one) return x; /* return x inexact except 0 */
+ }
+ if(ix>0x41b00000) { /* |x| > 2**28 */
+ w = __ieee754_log(fd_fabs(x))+ln2;
+ } else if (ix>0x40000000) { /* 2**28 > |x| > 2.0 */
+ t = fd_fabs(x);
+ w = __ieee754_log(2.0*t+one/(fd_sqrt(x*x+one)+t));
+ } else { /* 2.0 > |x| > 2**-28 */
+ t = x*x;
+ w =fd_log1p(fd_fabs(x)+t/(one+fd_sqrt(one+t)));
+ }
+ if(hx>0) return w; else return -w;
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_atan.c b/src/third_party/js-1.7/fdlibm/s_atan.c
new file mode 100644
index 00000000000..99a00c68646
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_atan.c
@@ -0,0 +1,175 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_atan.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* atan(x)
+ * Method
+ * 1. Reduce x to positive by atan(x) = -atan(-x).
+ * 2. According to the integer k=4t+0.25 chopped, t=x, the argument
+ * is further reduced to one of the following intervals and the
+ * arctangent of t is evaluated by the corresponding formula:
+ *
+ * [0,7/16] atan(x) = t-t^3*(a1+t^2*(a2+...(a10+t^2*a11)...)
+ * [7/16,11/16] atan(x) = atan(1/2) + atan( (t-0.5)/(1+t/2) )
+ * [11/16.19/16] atan(x) = atan( 1 ) + atan( (t-1)/(1+t) )
+ * [19/16,39/16] atan(x) = atan(3/2) + atan( (t-1.5)/(1+1.5t) )
+ * [39/16,INF] atan(x) = atan(INF) + atan( -1/t )
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double atanhi[] = {
+#else
+static double atanhi[] = {
+#endif
+ 4.63647609000806093515e-01, /* atan(0.5)hi 0x3FDDAC67, 0x0561BB4F */
+ 7.85398163397448278999e-01, /* atan(1.0)hi 0x3FE921FB, 0x54442D18 */
+ 9.82793723247329054082e-01, /* atan(1.5)hi 0x3FEF730B, 0xD281F69B */
+ 1.57079632679489655800e+00, /* atan(inf)hi 0x3FF921FB, 0x54442D18 */
+};
+
+#ifdef __STDC__
+static const double atanlo[] = {
+#else
+static double atanlo[] = {
+#endif
+ 2.26987774529616870924e-17, /* atan(0.5)lo 0x3C7A2B7F, 0x222F65E2 */
+ 3.06161699786838301793e-17, /* atan(1.0)lo 0x3C81A626, 0x33145C07 */
+ 1.39033110312309984516e-17, /* atan(1.5)lo 0x3C700788, 0x7AF0CBBD */
+ 6.12323399573676603587e-17, /* atan(inf)lo 0x3C91A626, 0x33145C07 */
+};
+
+#ifdef __STDC__
+static const double aT[] = {
+#else
+static double aT[] = {
+#endif
+ 3.33333333333329318027e-01, /* 0x3FD55555, 0x5555550D */
+ -1.99999999998764832476e-01, /* 0xBFC99999, 0x9998EBC4 */
+ 1.42857142725034663711e-01, /* 0x3FC24924, 0x920083FF */
+ -1.11111104054623557880e-01, /* 0xBFBC71C6, 0xFE231671 */
+ 9.09088713343650656196e-02, /* 0x3FB745CD, 0xC54C206E */
+ -7.69187620504482999495e-02, /* 0xBFB3B0F2, 0xAF749A6D */
+ 6.66107313738753120669e-02, /* 0x3FB10D66, 0xA0D03D51 */
+ -5.83357013379057348645e-02, /* 0xBFADDE2D, 0x52DEFD9A */
+ 4.97687799461593236017e-02, /* 0x3FA97B4B, 0x24760DEB */
+ -3.65315727442169155270e-02, /* 0xBFA2B444, 0x2C6A6C2F */
+ 1.62858201153657823623e-02, /* 0x3F90AD3A, 0xE322DA11 */
+};
+
+#ifdef __STDC__
+ static const double
+#else
+ static double
+#endif
+one = 1.0,
+really_big = 1.0e300;
+
+#ifdef __STDC__
+ double fd_atan(double x)
+#else
+ double fd_atan(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double w,s1,s2,z;
+ int ix,hx,id;
+
+ u.d = x;
+ hx = __HI(u);
+ ix = hx&0x7fffffff;
+ if(ix>=0x44100000) { /* if |x| >= 2^66 */
+ u.d = x;
+ if(ix>0x7ff00000||
+ (ix==0x7ff00000&&(__LO(u)!=0)))
+ return x+x; /* NaN */
+ if(hx>0) return atanhi[3]+atanlo[3];
+ else return -atanhi[3]-atanlo[3];
+ } if (ix < 0x3fdc0000) { /* |x| < 0.4375 */
+ if (ix < 0x3e200000) { /* |x| < 2^-29 */
+ if(really_big+x>one) return x; /* raise inexact */
+ }
+ id = -1;
+ } else {
+ x = fd_fabs(x);
+ if (ix < 0x3ff30000) { /* |x| < 1.1875 */
+ if (ix < 0x3fe60000) { /* 7/16 <=|x|<11/16 */
+ id = 0; x = (2.0*x-one)/(2.0+x);
+ } else { /* 11/16<=|x|< 19/16 */
+ id = 1; x = (x-one)/(x+one);
+ }
+ } else {
+ if (ix < 0x40038000) { /* |x| < 2.4375 */
+ id = 2; x = (x-1.5)/(one+1.5*x);
+ } else { /* 2.4375 <= |x| < 2^66 */
+ id = 3; x = -1.0/x;
+ }
+ }}
+ /* end of argument reduction */
+ z = x*x;
+ w = z*z;
+ /* break sum from i=0 to 10 aT[i]z**(i+1) into odd and even poly */
+ s1 = z*(aT[0]+w*(aT[2]+w*(aT[4]+w*(aT[6]+w*(aT[8]+w*aT[10])))));
+ s2 = w*(aT[1]+w*(aT[3]+w*(aT[5]+w*(aT[7]+w*aT[9]))));
+ if (id<0) return x - x*(s1+s2);
+ else {
+ z = atanhi[id] - ((x*(s1+s2) - atanlo[id]) - x);
+ return (hx<0)? -z:z;
+ }
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_cbrt.c b/src/third_party/js-1.7/fdlibm/s_cbrt.c
new file mode 100644
index 00000000000..4aed19b1bb1
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_cbrt.c
@@ -0,0 +1,133 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_cbrt.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+#include "fdlibm.h"
+
+/* cbrt(x)
+ * Return cube root of x
+ */
+#ifdef __STDC__
+static const unsigned
+#else
+static unsigned
+#endif
+ B1 = 715094163, /* B1 = (682-0.03306235651)*2**20 */
+ B2 = 696219795; /* B2 = (664-0.03306235651)*2**20 */
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+C = 5.42857142857142815906e-01, /* 19/35 = 0x3FE15F15, 0xF15F15F1 */
+D = -7.05306122448979611050e-01, /* -864/1225 = 0xBFE691DE, 0x2532C834 */
+E = 1.41428571428571436819e+00, /* 99/70 = 0x3FF6A0EA, 0x0EA0EA0F */
+F = 1.60714285714285720630e+00, /* 45/28 = 0x3FF9B6DB, 0x6DB6DB6E */
+G = 3.57142857142857150787e-01; /* 5/14 = 0x3FD6DB6D, 0xB6DB6DB7 */
+
+#ifdef __STDC__
+ double fd_cbrt(double x)
+#else
+ double fd_cbrt(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ int hx;
+ double r,s,t=0.0,w;
+ unsigned sign;
+
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ sign=hx&0x80000000; /* sign= sign(x) */
+ hx ^=sign;
+ if(hx>=0x7ff00000) return(x+x); /* cbrt(NaN,INF) is itself */
+ if((hx|__LO(u))==0) {
+ x = u.d;
+ return(x); /* cbrt(0) is itself */
+ }
+ u.d = x;
+ __HI(u) = hx; /* x <- |x| */
+ x = u.d;
+ /* rough cbrt to 5 bits */
+ if(hx<0x00100000) /* subnormal number */
+ {u.d = t; __HI(u)=0x43500000; t=u.d; /* set t= 2**54 */
+ t*=x; __HI(u)=__HI(u)/3+B2;
+ }
+ else {
+ u.d = t; __HI(u)=hx/3+B1; t = u.d;
+ }
+
+
+ /* new cbrt to 23 bits, may be implemented in single precision */
+ r=t*t/x;
+ s=C+r*t;
+ t*=G+F/(s+E+D/s);
+
+ /* chopped to 20 bits and make it larger than cbrt(x) */
+ u.d = t;
+ __LO(u)=0; __HI(u)+=0x00000001;
+ t = u.d;
+
+ /* one step newton iteration to 53 bits with error less than 0.667 ulps */
+ s=t*t; /* t*t is exact */
+ r=x/s;
+ w=t+t;
+ r=(r-t)/(w+r); /* r-s is exact */
+ t=t+t*r;
+
+ /* retore the sign bit */
+ u.d = t;
+ __HI(u) |= sign;
+ t = u.d;
+ return(t);
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_ceil.c b/src/third_party/js-1.7/fdlibm/s_ceil.c
new file mode 100644
index 00000000000..826bcac6c4a
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_ceil.c
@@ -0,0 +1,120 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_ceil.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * ceil(x)
+ * Return x rounded toward -inf to integral value
+ * Method:
+ * Bit twiddling.
+ * Exception:
+ * Inexact flag raised if x not equal to ceil(x).
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double really_big = 1.0e300;
+#else
+static double really_big = 1.0e300;
+#endif
+
+#ifdef __STDC__
+ double fd_ceil(double x)
+#else
+ double fd_ceil(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ int i0,i1,j0;
+ unsigned i,j;
+ u.d = x;
+ i0 = __HI(u);
+ i1 = __LO(u);
+ j0 = ((i0>>20)&0x7ff)-0x3ff;
+ if(j0<20) {
+ if(j0<0) { /* raise inexact if x != 0 */
+ if(really_big+x>0.0) {/* return 0*sign(x) if |x|<1 */
+ if(i0<0) {i0=0x80000000;i1=0;}
+ else if((i0|i1)!=0) { i0=0x3ff00000;i1=0;}
+ }
+ } else {
+ i = (0x000fffff)>>j0;
+ if(((i0&i)|i1)==0) return x; /* x is integral */
+ if(really_big+x>0.0) { /* raise inexact flag */
+ if(i0>0) i0 += (0x00100000)>>j0;
+ i0 &= (~i); i1=0;
+ }
+ }
+ } else if (j0>51) {
+ if(j0==0x400) return x+x; /* inf or NaN */
+ else return x; /* x is integral */
+ } else {
+ i = ((unsigned)(0xffffffff))>>(j0-20);
+ if((i1&i)==0) return x; /* x is integral */
+ if(really_big+x>0.0) { /* raise inexact flag */
+ if(i0>0) {
+ if(j0==20) i0+=1;
+ else {
+ j = i1 + (1<<(52-j0));
+ if((int)j<i1) i0+=1; /* got a carry */
+ i1 = j;
+ }
+ }
+ i1 &= (~i);
+ }
+ }
+ u.d = x;
+ __HI(u) = i0;
+ __LO(u) = i1;
+ x = u.d;
+ return x;
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_copysign.c b/src/third_party/js-1.7/fdlibm/s_copysign.c
new file mode 100644
index 00000000000..8869a1233b9
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_copysign.c
@@ -0,0 +1,72 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_copysign.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * copysign(double x, double y)
+ * copysign(x,y) returns a value with the magnitude of x and
+ * with the sign bit of y.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_copysign(double x, double y)
+#else
+ double fd_copysign(x,y)
+ double x,y;
+#endif
+{
+ fd_twoints ux, uy;
+ ux.d = x; uy.d = y;
+ __HI(ux) = (__HI(ux)&0x7fffffff)|(__HI(uy)&0x80000000);
+ x = ux.d;
+ return x;
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_cos.c b/src/third_party/js-1.7/fdlibm/s_cos.c
new file mode 100644
index 00000000000..3fb0a6b15a1
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_cos.c
@@ -0,0 +1,118 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_cos.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* cos(x)
+ * Return cosine function of x.
+ *
+ * kernel function:
+ * __kernel_sin ... sine function on [-pi/4,pi/4]
+ * __kernel_cos ... cosine function on [-pi/4,pi/4]
+ * __ieee754_rem_pio2 ... argument reduction routine
+ *
+ * Method.
+ * Let S,C and T denote the sin, cos and tan respectively on
+ * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+ * in [-pi/4 , +pi/4], and let n = k mod 4.
+ * We have
+ *
+ * n sin(x) cos(x) tan(x)
+ * ----------------------------------------------------------
+ * 0 S C T
+ * 1 C -S -1/T
+ * 2 -S -C T
+ * 3 -C S -1/T
+ * ----------------------------------------------------------
+ *
+ * Special cases:
+ * Let trig be any of sin, cos, or tan.
+ * trig(+-INF) is NaN, with signals;
+ * trig(NaN) is that NaN;
+ *
+ * Accuracy:
+ * TRIG(x) returns trig(x) nearly rounded
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_cos(double x)
+#else
+ double fd_cos(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double y[2],z=0.0;
+ int n, ix;
+
+ /* High word of x. */
+ u.d = x;
+ ix = __HI(u);
+
+ /* |x| ~< pi/4 */
+ ix &= 0x7fffffff;
+ if(ix <= 0x3fe921fb) return __kernel_cos(x,z);
+
+ /* cos(Inf or NaN) is NaN */
+ else if (ix>=0x7ff00000) return x-x;
+
+ /* argument reduction needed */
+ else {
+ n = __ieee754_rem_pio2(x,y);
+ switch(n&3) {
+ case 0: return __kernel_cos(y[0],y[1]);
+ case 1: return -__kernel_sin(y[0],y[1],1);
+ case 2: return -__kernel_cos(y[0],y[1]);
+ default:
+ return __kernel_sin(y[0],y[1],1);
+ }
+ }
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_erf.c b/src/third_party/js-1.7/fdlibm/s_erf.c
new file mode 100644
index 00000000000..6eae8de3b10
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_erf.c
@@ -0,0 +1,356 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_erf.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* double erf(double x)
+ * double erfc(double x)
+ * x
+ * 2 |\
+ * erf(x) = --------- | exp(-t*t)dt
+ * sqrt(pi) \|
+ * 0
+ *
+ * erfc(x) = 1-erf(x)
+ * Note that
+ * erf(-x) = -erf(x)
+ * erfc(-x) = 2 - erfc(x)
+ *
+ * Method:
+ * 1. For |x| in [0, 0.84375]
+ * erf(x) = x + x*R(x^2)
+ * erfc(x) = 1 - erf(x) if x in [-.84375,0.25]
+ * = 0.5 + ((0.5-x)-x*R) if x in [0.25,0.84375]
+ * where R = P/Q where P is an odd poly of degree 8 and
+ * Q is an odd poly of degree 10.
+ * -57.90
+ * | R - (erf(x)-x)/x | <= 2
+ *
+ *
+ * Remark. The formula is derived by noting
+ * erf(x) = (2/sqrt(pi))*(x - x^3/3 + x^5/10 - x^7/42 + ....)
+ * and that
+ * 2/sqrt(pi) = 1.128379167095512573896158903121545171688
+ * is close to one. The interval is chosen because the fix
+ * point of erf(x) is near 0.6174 (i.e., erf(x)=x when x is
+ * near 0.6174), and by some experiment, 0.84375 is chosen to
+ * guarantee the error is less than one ulp for erf.
+ *
+ * 2. For |x| in [0.84375,1.25], let s = |x| - 1, and
+ * c = 0.84506291151 rounded to single (24 bits)
+ * erf(x) = sign(x) * (c + P1(s)/Q1(s))
+ * erfc(x) = (1-c) - P1(s)/Q1(s) if x > 0
+ * 1+(c+P1(s)/Q1(s)) if x < 0
+ * |P1/Q1 - (erf(|x|)-c)| <= 2**-59.06
+ * Remark: here we use the taylor series expansion at x=1.
+ * erf(1+s) = erf(1) + s*Poly(s)
+ * = 0.845.. + P1(s)/Q1(s)
+ * That is, we use rational approximation to approximate
+ * erf(1+s) - (c = (single)0.84506291151)
+ * Note that |P1/Q1|< 0.078 for x in [0.84375,1.25]
+ * where
+ * P1(s) = degree 6 poly in s
+ * Q1(s) = degree 6 poly in s
+ *
+ * 3. For x in [1.25,1/0.35(~2.857143)],
+ * erfc(x) = (1/x)*exp(-x*x-0.5625+R1/S1)
+ * erf(x) = 1 - erfc(x)
+ * where
+ * R1(z) = degree 7 poly in z, (z=1/x^2)
+ * S1(z) = degree 8 poly in z
+ *
+ * 4. For x in [1/0.35,28]
+ * erfc(x) = (1/x)*exp(-x*x-0.5625+R2/S2) if x > 0
+ * = 2.0 - (1/x)*exp(-x*x-0.5625+R2/S2) if -6<x<0
+ * = 2.0 - tiny (if x <= -6)
+ * erf(x) = sign(x)*(1.0 - erfc(x)) if x < 6, else
+ * erf(x) = sign(x)*(1.0 - tiny)
+ * where
+ * R2(z) = degree 6 poly in z, (z=1/x^2)
+ * S2(z) = degree 7 poly in z
+ *
+ * Note1:
+ * To compute exp(-x*x-0.5625+R/S), let s be a single
+ * precision number and s := x; then
+ * -x*x = -s*s + (s-x)*(s+x)
+ * exp(-x*x-0.5626+R/S) =
+ * exp(-s*s-0.5625)*exp((s-x)*(s+x)+R/S);
+ * Note2:
+ * Here 4 and 5 make use of the asymptotic series
+ * exp(-x*x)
+ * erfc(x) ~ ---------- * ( 1 + Poly(1/x^2) )
+ * x*sqrt(pi)
+ * We use rational approximation to approximate
+ * g(s)=f(1/x^2) = log(erfc(x)*x) - x*x + 0.5625
+ * Here is the error bound for R1/S1 and R2/S2
+ * |R1/S1 - f(x)| < 2**(-62.57)
+ * |R2/S2 - f(x)| < 2**(-61.52)
+ *
+ * 5. For inf > x >= 28
+ * erf(x) = sign(x) *(1 - tiny) (raise inexact)
+ * erfc(x) = tiny*tiny (raise underflow) if x > 0
+ * = 2 - tiny if x<0
+ *
+ * 7. Special case:
+ * erf(0) = 0, erf(inf) = 1, erf(-inf) = -1,
+ * erfc(0) = 1, erfc(inf) = 0, erfc(-inf) = 2,
+ * erfc/erf(NaN) is NaN
+ */
+
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+tiny = 1e-300,
+half= 5.00000000000000000000e-01, /* 0x3FE00000, 0x00000000 */
+one = 1.00000000000000000000e+00, /* 0x3FF00000, 0x00000000 */
+two = 2.00000000000000000000e+00, /* 0x40000000, 0x00000000 */
+ /* c = (float)0.84506291151 */
+erx = 8.45062911510467529297e-01, /* 0x3FEB0AC1, 0x60000000 */
+/*
+ * Coefficients for approximation to erf on [0,0.84375]
+ */
+efx = 1.28379167095512586316e-01, /* 0x3FC06EBA, 0x8214DB69 */
+efx8= 1.02703333676410069053e+00, /* 0x3FF06EBA, 0x8214DB69 */
+pp0 = 1.28379167095512558561e-01, /* 0x3FC06EBA, 0x8214DB68 */
+pp1 = -3.25042107247001499370e-01, /* 0xBFD4CD7D, 0x691CB913 */
+pp2 = -2.84817495755985104766e-02, /* 0xBF9D2A51, 0xDBD7194F */
+pp3 = -5.77027029648944159157e-03, /* 0xBF77A291, 0x236668E4 */
+pp4 = -2.37630166566501626084e-05, /* 0xBEF8EAD6, 0x120016AC */
+qq1 = 3.97917223959155352819e-01, /* 0x3FD97779, 0xCDDADC09 */
+qq2 = 6.50222499887672944485e-02, /* 0x3FB0A54C, 0x5536CEBA */
+qq3 = 5.08130628187576562776e-03, /* 0x3F74D022, 0xC4D36B0F */
+qq4 = 1.32494738004321644526e-04, /* 0x3F215DC9, 0x221C1A10 */
+qq5 = -3.96022827877536812320e-06, /* 0xBED09C43, 0x42A26120 */
+/*
+ * Coefficients for approximation to erf in [0.84375,1.25]
+ */
+pa0 = -2.36211856075265944077e-03, /* 0xBF6359B8, 0xBEF77538 */
+pa1 = 4.14856118683748331666e-01, /* 0x3FDA8D00, 0xAD92B34D */
+pa2 = -3.72207876035701323847e-01, /* 0xBFD7D240, 0xFBB8C3F1 */
+pa3 = 3.18346619901161753674e-01, /* 0x3FD45FCA, 0x805120E4 */
+pa4 = -1.10894694282396677476e-01, /* 0xBFBC6398, 0x3D3E28EC */
+pa5 = 3.54783043256182359371e-02, /* 0x3FA22A36, 0x599795EB */
+pa6 = -2.16637559486879084300e-03, /* 0xBF61BF38, 0x0A96073F */
+qa1 = 1.06420880400844228286e-01, /* 0x3FBB3E66, 0x18EEE323 */
+qa2 = 5.40397917702171048937e-01, /* 0x3FE14AF0, 0x92EB6F33 */
+qa3 = 7.18286544141962662868e-02, /* 0x3FB2635C, 0xD99FE9A7 */
+qa4 = 1.26171219808761642112e-01, /* 0x3FC02660, 0xE763351F */
+qa5 = 1.36370839120290507362e-02, /* 0x3F8BEDC2, 0x6B51DD1C */
+qa6 = 1.19844998467991074170e-02, /* 0x3F888B54, 0x5735151D */
+/*
+ * Coefficients for approximation to erfc in [1.25,1/0.35]
+ */
+ra0 = -9.86494403484714822705e-03, /* 0xBF843412, 0x600D6435 */
+ra1 = -6.93858572707181764372e-01, /* 0xBFE63416, 0xE4BA7360 */
+ra2 = -1.05586262253232909814e+01, /* 0xC0251E04, 0x41B0E726 */
+ra3 = -6.23753324503260060396e+01, /* 0xC04F300A, 0xE4CBA38D */
+ra4 = -1.62396669462573470355e+02, /* 0xC0644CB1, 0x84282266 */
+ra5 = -1.84605092906711035994e+02, /* 0xC067135C, 0xEBCCABB2 */
+ra6 = -8.12874355063065934246e+01, /* 0xC0545265, 0x57E4D2F2 */
+ra7 = -9.81432934416914548592e+00, /* 0xC023A0EF, 0xC69AC25C */
+sa1 = 1.96512716674392571292e+01, /* 0x4033A6B9, 0xBD707687 */
+sa2 = 1.37657754143519042600e+02, /* 0x4061350C, 0x526AE721 */
+sa3 = 4.34565877475229228821e+02, /* 0x407B290D, 0xD58A1A71 */
+sa4 = 6.45387271733267880336e+02, /* 0x40842B19, 0x21EC2868 */
+sa5 = 4.29008140027567833386e+02, /* 0x407AD021, 0x57700314 */
+sa6 = 1.08635005541779435134e+02, /* 0x405B28A3, 0xEE48AE2C */
+sa7 = 6.57024977031928170135e+00, /* 0x401A47EF, 0x8E484A93 */
+sa8 = -6.04244152148580987438e-02, /* 0xBFAEEFF2, 0xEE749A62 */
+/*
+ * Coefficients for approximation to erfc in [1/.35,28]
+ */
+rb0 = -9.86494292470009928597e-03, /* 0xBF843412, 0x39E86F4A */
+rb1 = -7.99283237680523006574e-01, /* 0xBFE993BA, 0x70C285DE */
+rb2 = -1.77579549177547519889e+01, /* 0xC031C209, 0x555F995A */
+rb3 = -1.60636384855821916062e+02, /* 0xC064145D, 0x43C5ED98 */
+rb4 = -6.37566443368389627722e+02, /* 0xC083EC88, 0x1375F228 */
+rb5 = -1.02509513161107724954e+03, /* 0xC0900461, 0x6A2E5992 */
+rb6 = -4.83519191608651397019e+02, /* 0xC07E384E, 0x9BDC383F */
+sb1 = 3.03380607434824582924e+01, /* 0x403E568B, 0x261D5190 */
+sb2 = 3.25792512996573918826e+02, /* 0x40745CAE, 0x221B9F0A */
+sb3 = 1.53672958608443695994e+03, /* 0x409802EB, 0x189D5118 */
+sb4 = 3.19985821950859553908e+03, /* 0x40A8FFB7, 0x688C246A */
+sb5 = 2.55305040643316442583e+03, /* 0x40A3F219, 0xCEDF3BE6 */
+sb6 = 4.74528541206955367215e+02, /* 0x407DA874, 0xE79FE763 */
+sb7 = -2.24409524465858183362e+01; /* 0xC03670E2, 0x42712D62 */
+
+#ifdef __STDC__
+ double fd_erf(double x)
+#else
+ double fd_erf(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ int hx,ix,i;
+ double R,S,P,Q,s,y,z,r;
+ u.d = x;
+ hx = __HI(u);
+ ix = hx&0x7fffffff;
+ if(ix>=0x7ff00000) { /* erf(nan)=nan */
+ i = ((unsigned)hx>>31)<<1;
+ return (double)(1-i)+one/x; /* erf(+-inf)=+-1 */
+ }
+
+ if(ix < 0x3feb0000) { /* |x|<0.84375 */
+ if(ix < 0x3e300000) { /* |x|<2**-28 */
+ if (ix < 0x00800000)
+ return 0.125*(8.0*x+efx8*x); /*avoid underflow */
+ return x + efx*x;
+ }
+ z = x*x;
+ r = pp0+z*(pp1+z*(pp2+z*(pp3+z*pp4)));
+ s = one+z*(qq1+z*(qq2+z*(qq3+z*(qq4+z*qq5))));
+ y = r/s;
+ return x + x*y;
+ }
+ if(ix < 0x3ff40000) { /* 0.84375 <= |x| < 1.25 */
+ s = fd_fabs(x)-one;
+ P = pa0+s*(pa1+s*(pa2+s*(pa3+s*(pa4+s*(pa5+s*pa6)))));
+ Q = one+s*(qa1+s*(qa2+s*(qa3+s*(qa4+s*(qa5+s*qa6)))));
+ if(hx>=0) return erx + P/Q; else return -erx - P/Q;
+ }
+ if (ix >= 0x40180000) { /* inf>|x|>=6 */
+ if(hx>=0) return one-tiny; else return tiny-one;
+ }
+ x = fd_fabs(x);
+ s = one/(x*x);
+ if(ix< 0x4006DB6E) { /* |x| < 1/0.35 */
+ R=ra0+s*(ra1+s*(ra2+s*(ra3+s*(ra4+s*(
+ ra5+s*(ra6+s*ra7))))));
+ S=one+s*(sa1+s*(sa2+s*(sa3+s*(sa4+s*(
+ sa5+s*(sa6+s*(sa7+s*sa8)))))));
+ } else { /* |x| >= 1/0.35 */
+ R=rb0+s*(rb1+s*(rb2+s*(rb3+s*(rb4+s*(
+ rb5+s*rb6)))));
+ S=one+s*(sb1+s*(sb2+s*(sb3+s*(sb4+s*(
+ sb5+s*(sb6+s*sb7))))));
+ }
+ z = x;
+ u.d = z;
+ __LO(u) = 0;
+ z = u.d;
+ r = __ieee754_exp(-z*z-0.5625)*__ieee754_exp((z-x)*(z+x)+R/S);
+ if(hx>=0) return one-r/x; else return r/x-one;
+}
+
+#ifdef __STDC__
+ double erfc(double x)
+#else
+ double erfc(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ int hx,ix;
+ double R,S,P,Q,s,y,z,r;
+ u.d = x;
+ hx = __HI(u);
+ ix = hx&0x7fffffff;
+ if(ix>=0x7ff00000) { /* erfc(nan)=nan */
+ /* erfc(+-inf)=0,2 */
+ return (double)(((unsigned)hx>>31)<<1)+one/x;
+ }
+
+ if(ix < 0x3feb0000) { /* |x|<0.84375 */
+ if(ix < 0x3c700000) /* |x|<2**-56 */
+ return one-x;
+ z = x*x;
+ r = pp0+z*(pp1+z*(pp2+z*(pp3+z*pp4)));
+ s = one+z*(qq1+z*(qq2+z*(qq3+z*(qq4+z*qq5))));
+ y = r/s;
+ if(hx < 0x3fd00000) { /* x<1/4 */
+ return one-(x+x*y);
+ } else {
+ r = x*y;
+ r += (x-half);
+ return half - r ;
+ }
+ }
+ if(ix < 0x3ff40000) { /* 0.84375 <= |x| < 1.25 */
+ s = fd_fabs(x)-one;
+ P = pa0+s*(pa1+s*(pa2+s*(pa3+s*(pa4+s*(pa5+s*pa6)))));
+ Q = one+s*(qa1+s*(qa2+s*(qa3+s*(qa4+s*(qa5+s*qa6)))));
+ if(hx>=0) {
+ z = one-erx; return z - P/Q;
+ } else {
+ z = erx+P/Q; return one+z;
+ }
+ }
+ if (ix < 0x403c0000) { /* |x|<28 */
+ x = fd_fabs(x);
+ s = one/(x*x);
+ if(ix< 0x4006DB6D) { /* |x| < 1/.35 ~ 2.857143*/
+ R=ra0+s*(ra1+s*(ra2+s*(ra3+s*(ra4+s*(
+ ra5+s*(ra6+s*ra7))))));
+ S=one+s*(sa1+s*(sa2+s*(sa3+s*(sa4+s*(
+ sa5+s*(sa6+s*(sa7+s*sa8)))))));
+ } else { /* |x| >= 1/.35 ~ 2.857143 */
+ if(hx<0&&ix>=0x40180000) return two-tiny;/* x < -6 */
+ R=rb0+s*(rb1+s*(rb2+s*(rb3+s*(rb4+s*(
+ rb5+s*rb6)))));
+ S=one+s*(sb1+s*(sb2+s*(sb3+s*(sb4+s*(
+ sb5+s*(sb6+s*sb7))))));
+ }
+ z = x;
+ u.d = z;
+ __LO(u) = 0;
+ z = u.d;
+ r = __ieee754_exp(-z*z-0.5625)*
+ __ieee754_exp((z-x)*(z+x)+R/S);
+ if(hx>0) return r/x; else return two-r/x;
+ } else {
+ if(hx>0) return tiny*tiny; else return two-tiny;
+ }
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_expm1.c b/src/third_party/js-1.7/fdlibm/s_expm1.c
new file mode 100644
index 00000000000..578d2e144ae
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_expm1.c
@@ -0,0 +1,267 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_expm1.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* expm1(x)
+ * Returns exp(x)-1, the exponential of x minus 1.
+ *
+ * Method
+ * 1. Argument reduction:
+ * Given x, find r and integer k such that
+ *
+ * x = k*ln2 + r, |r| <= 0.5*ln2 ~ 0.34658
+ *
+ * Here a correction term c will be computed to compensate
+ * the error in r when rounded to a floating-point number.
+ *
+ * 2. Approximating expm1(r) by a special rational function on
+ * the interval [0,0.34658]:
+ * Since
+ * r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 - r^4/360 + ...
+ * we define R1(r*r) by
+ * r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 * R1(r*r)
+ * That is,
+ * R1(r**2) = 6/r *((exp(r)+1)/(exp(r)-1) - 2/r)
+ * = 6/r * ( 1 + 2.0*(1/(exp(r)-1) - 1/r))
+ * = 1 - r^2/60 + r^4/2520 - r^6/100800 + ...
+ * We use a special Reme algorithm on [0,0.347] to generate
+ * a polynomial of degree 5 in r*r to approximate R1. The
+ * maximum error of this polynomial approximation is bounded
+ * by 2**-61. In other words,
+ * R1(z) ~ 1.0 + Q1*z + Q2*z**2 + Q3*z**3 + Q4*z**4 + Q5*z**5
+ * where Q1 = -1.6666666666666567384E-2,
+ * Q2 = 3.9682539681370365873E-4,
+ * Q3 = -9.9206344733435987357E-6,
+ * Q4 = 2.5051361420808517002E-7,
+ * Q5 = -6.2843505682382617102E-9;
+ * (where z=r*r, and the values of Q1 to Q5 are listed below)
+ * with error bounded by
+ * | 5 | -61
+ * | 1.0+Q1*z+...+Q5*z - R1(z) | <= 2
+ * | |
+ *
+ * expm1(r) = exp(r)-1 is then computed by the following
+ * specific way which minimize the accumulation rounding error:
+ * 2 3
+ * r r [ 3 - (R1 + R1*r/2) ]
+ * expm1(r) = r + --- + --- * [--------------------]
+ * 2 2 [ 6 - r*(3 - R1*r/2) ]
+ *
+ * To compensate the error in the argument reduction, we use
+ * expm1(r+c) = expm1(r) + c + expm1(r)*c
+ * ~ expm1(r) + c + r*c
+ * Thus c+r*c will be added in as the correction terms for
+ * expm1(r+c). Now rearrange the term to avoid optimization
+ * screw up:
+ * ( 2 2 )
+ * ({ ( r [ R1 - (3 - R1*r/2) ] ) } r )
+ * expm1(r+c)~r - ({r*(--- * [--------------------]-c)-c} - --- )
+ * ({ ( 2 [ 6 - r*(3 - R1*r/2) ] ) } 2 )
+ * ( )
+ *
+ * = r - E
+ * 3. Scale back to obtain expm1(x):
+ * From step 1, we have
+ * expm1(x) = either 2^k*[expm1(r)+1] - 1
+ * = or 2^k*[expm1(r) + (1-2^-k)]
+ * 4. Implementation notes:
+ * (A). To save one multiplication, we scale the coefficient Qi
+ * to Qi*2^i, and replace z by (x^2)/2.
+ * (B). To achieve maximum accuracy, we compute expm1(x) by
+ * (i) if x < -56*ln2, return -1.0, (raise inexact if x!=inf)
+ * (ii) if k=0, return r-E
+ * (iii) if k=-1, return 0.5*(r-E)-0.5
+ * (iv) if k=1 if r < -0.25, return 2*((r+0.5)- E)
+ * else return 1.0+2.0*(r-E);
+ * (v) if (k<-2||k>56) return 2^k(1-(E-r)) - 1 (or exp(x)-1)
+ * (vi) if k <= 20, return 2^k((1-2^-k)-(E-r)), else
+ * (vii) return 2^k(1-((E+2^-k)-r))
+ *
+ * Special cases:
+ * expm1(INF) is INF, expm1(NaN) is NaN;
+ * expm1(-INF) is -1, and
+ * for finite argument, only expm1(0)=0 is exact.
+ *
+ * Accuracy:
+ * according to an error analysis, the error is always less than
+ * 1 ulp (unit in the last place).
+ *
+ * Misc. info.
+ * For IEEE double
+ * if x > 7.09782712893383973096e+02 then expm1(x) overflow
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+one = 1.0,
+really_big = 1.0e+300,
+tiny = 1.0e-300,
+o_threshold = 7.09782712893383973096e+02,/* 0x40862E42, 0xFEFA39EF */
+ln2_hi = 6.93147180369123816490e-01,/* 0x3fe62e42, 0xfee00000 */
+ln2_lo = 1.90821492927058770002e-10,/* 0x3dea39ef, 0x35793c76 */
+invln2 = 1.44269504088896338700e+00,/* 0x3ff71547, 0x652b82fe */
+ /* scaled coefficients related to expm1 */
+Q1 = -3.33333333333331316428e-02, /* BFA11111 111110F4 */
+Q2 = 1.58730158725481460165e-03, /* 3F5A01A0 19FE5585 */
+Q3 = -7.93650757867487942473e-05, /* BF14CE19 9EAADBB7 */
+Q4 = 4.00821782732936239552e-06, /* 3ED0CFCA 86E65239 */
+Q5 = -2.01099218183624371326e-07; /* BE8AFDB7 6E09C32D */
+
+#ifdef __STDC__
+ double fd_expm1(double x)
+#else
+ double fd_expm1(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double y,hi,lo,c,t,e,hxs,hfx,r1;
+ int k,xsb;
+ unsigned hx;
+
+ u.d = x;
+ hx = __HI(u); /* high word of x */
+ xsb = hx&0x80000000; /* sign bit of x */
+ if(xsb==0) y=x; else y= -x; /* y = |x| */
+ hx &= 0x7fffffff; /* high word of |x| */
+
+ /* filter out huge and non-finite argument */
+ if(hx >= 0x4043687A) { /* if |x|>=56*ln2 */
+ if(hx >= 0x40862E42) { /* if |x|>=709.78... */
+ if(hx>=0x7ff00000) {
+ u.d = x;
+ if(((hx&0xfffff)|__LO(u))!=0)
+ return x+x; /* NaN */
+ else return (xsb==0)? x:-1.0;/* exp(+-inf)={inf,-1} */
+ }
+ if(x > o_threshold) return really_big*really_big; /* overflow */
+ }
+ if(xsb!=0) { /* x < -56*ln2, return -1.0 with inexact */
+ if(x+tiny<0.0) /* raise inexact */
+ return tiny-one; /* return -1 */
+ }
+ }
+
+ /* argument reduction */
+ if(hx > 0x3fd62e42) { /* if |x| > 0.5 ln2 */
+ if(hx < 0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
+ if(xsb==0)
+ {hi = x - ln2_hi; lo = ln2_lo; k = 1;}
+ else
+ {hi = x + ln2_hi; lo = -ln2_lo; k = -1;}
+ } else {
+ k = (int)(invln2*x+((xsb==0)?0.5:-0.5));
+ t = k;
+ hi = x - t*ln2_hi; /* t*ln2_hi is exact here */
+ lo = t*ln2_lo;
+ }
+ x = hi - lo;
+ c = (hi-x)-lo;
+ }
+ else if(hx < 0x3c900000) { /* when |x|<2**-54, return x */
+ t = really_big+x; /* return x with inexact flags when x!=0 */
+ return x - (t-(really_big+x));
+ }
+ else k = 0;
+
+ /* x is now in primary range */
+ hfx = 0.5*x;
+ hxs = x*hfx;
+ r1 = one+hxs*(Q1+hxs*(Q2+hxs*(Q3+hxs*(Q4+hxs*Q5))));
+ t = 3.0-r1*hfx;
+ e = hxs*((r1-t)/(6.0 - x*t));
+ if(k==0) return x - (x*e-hxs); /* c is 0 */
+ else {
+ e = (x*(e-c)-c);
+ e -= hxs;
+ if(k== -1) return 0.5*(x-e)-0.5;
+ if(k==1)
+ if(x < -0.25) return -2.0*(e-(x+0.5));
+ else return one+2.0*(x-e);
+ if (k <= -2 || k>56) { /* suffice to return exp(x)-1 */
+ y = one-(e-x);
+ u.d = y;
+ __HI(u) += (k<<20); /* add k to y's exponent */
+ y = u.d;
+ return y-one;
+ }
+ t = one;
+ if(k<20) {
+ u.d = t;
+ __HI(u) = 0x3ff00000 - (0x200000>>k); /* t=1-2^-k */
+ t = u.d;
+ y = t-(e-x);
+ u.d = y;
+ __HI(u) += (k<<20); /* add k to y's exponent */
+ y = u.d;
+ } else {
+ u.d = t;
+ __HI(u) = ((0x3ff-k)<<20); /* 2^-k */
+ t = u.d;
+ y = x-(e+t);
+ y += one;
+ u.d = y;
+ __HI(u) += (k<<20); /* add k to y's exponent */
+ y = u.d;
+ }
+ }
+ return y;
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_fabs.c b/src/third_party/js-1.7/fdlibm/s_fabs.c
new file mode 100644
index 00000000000..6b029da1013
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_fabs.c
@@ -0,0 +1,70 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_fabs.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * fabs(x) returns the absolute value of x.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_fabs(double x)
+#else
+ double fd_fabs(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ u.d = x;
+ __HI(u) &= 0x7fffffff;
+ x = u.d;
+ return x;
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_finite.c b/src/third_party/js-1.7/fdlibm/s_finite.c
new file mode 100644
index 00000000000..4a0a4d3c603
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_finite.c
@@ -0,0 +1,71 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_finite.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * finite(x) returns 1 is x is finite, else 0;
+ * no branching!
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ int fd_finite(double x)
+#else
+ int fd_finite(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ int hx;
+ u.d = x;
+ hx = __HI(u);
+ return (unsigned)((hx&0x7fffffff)-0x7ff00000)>>31;
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_floor.c b/src/third_party/js-1.7/fdlibm/s_floor.c
new file mode 100644
index 00000000000..6c23495f0e0
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_floor.c
@@ -0,0 +1,121 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_floor.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * floor(x)
+ * Return x rounded toward -inf to integral value
+ * Method:
+ * Bit twiddling.
+ * Exception:
+ * Inexact flag raised if x not equal to floor(x).
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double really_big = 1.0e300;
+#else
+static double really_big = 1.0e300;
+#endif
+
+#ifdef __STDC__
+ double fd_floor(double x)
+#else
+ double fd_floor(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ int i0,i1,j0;
+ unsigned i,j;
+ u.d = x;
+ i0 = __HI(u);
+ i1 = __LO(u);
+ j0 = ((i0>>20)&0x7ff)-0x3ff;
+ if(j0<20) {
+ if(j0<0) { /* raise inexact if x != 0 */
+ if(really_big+x>0.0) {/* return 0*sign(x) if |x|<1 */
+ if(i0>=0) {i0=i1=0;}
+ else if(((i0&0x7fffffff)|i1)!=0)
+ { i0=0xbff00000;i1=0;}
+ }
+ } else {
+ i = (0x000fffff)>>j0;
+ if(((i0&i)|i1)==0) return x; /* x is integral */
+ if(really_big+x>0.0) { /* raise inexact flag */
+ if(i0<0) i0 += (0x00100000)>>j0;
+ i0 &= (~i); i1=0;
+ }
+ }
+ } else if (j0>51) {
+ if(j0==0x400) return x+x; /* inf or NaN */
+ else return x; /* x is integral */
+ } else {
+ i = ((unsigned)(0xffffffff))>>(j0-20);
+ if((i1&i)==0) return x; /* x is integral */
+ if(really_big+x>0.0) { /* raise inexact flag */
+ if(i0<0) {
+ if(j0==20) i0+=1;
+ else {
+ j = i1+(1<<(52-j0));
+ if((int)j<i1) i0 +=1 ; /* got a carry */
+ i1=j;
+ }
+ }
+ i1 &= (~i);
+ }
+ }
+ u.d = x;
+ __HI(u) = i0;
+ __LO(u) = i1;
+ x = u.d;
+ return x;
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_frexp.c b/src/third_party/js-1.7/fdlibm/s_frexp.c
new file mode 100644
index 00000000000..bec2ecebc6d
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_frexp.c
@@ -0,0 +1,99 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_frexp.c 1.4 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * for non-zero x
+ * x = frexp(arg,&exp);
+ * return a double fp quantity x such that 0.5 <= |x| <1.0
+ * and the corresponding binary exponent "exp". That is
+ * arg = x*2^exp.
+ * If arg is inf, 0.0, or NaN, then frexp(arg,&exp) returns arg
+ * with *exp=0.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+two54 = 1.80143985094819840000e+16; /* 0x43500000, 0x00000000 */
+
+#ifdef __STDC__
+ double fd_frexp(double x, int *eptr)
+#else
+ double fd_frexp(x, eptr)
+ double x; int *eptr;
+#endif
+{
+ int hx, ix, lx;
+ fd_twoints u;
+ u.d = x;
+ hx = __HI(u);
+ ix = 0x7fffffff&hx;
+ lx = __LO(u);
+ *eptr = 0;
+ if(ix>=0x7ff00000||((ix|lx)==0)) return x; /* 0,inf,nan */
+ if (ix<0x00100000) { /* subnormal */
+ x *= two54;
+ u.d = x;
+ hx = __HI(u);
+ ix = hx&0x7fffffff;
+ *eptr = -54;
+ }
+ *eptr += (ix>>20)-1022;
+ hx = (hx&0x800fffff)|0x3fe00000;
+ u.d = x;
+ __HI(u) = hx;
+ x = u.d;
+ return x;
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_ilogb.c b/src/third_party/js-1.7/fdlibm/s_ilogb.c
new file mode 100644
index 00000000000..f769781a62f
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_ilogb.c
@@ -0,0 +1,85 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_ilogb.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* ilogb(double x)
+ * return the binary exponent of non-zero x
+ * ilogb(0) = 0x80000001
+ * ilogb(inf/NaN) = 0x7fffffff (no signal is raised)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ int fd_ilogb(double x)
+#else
+ int fd_ilogb(x)
+ double x;
+#endif
+{
+ int hx,lx,ix;
+ fd_twoints u;
+ u.d = x;
+ hx = (__HI(u))&0x7fffffff; /* high word of x */
+ if(hx<0x00100000) {
+ lx = __LO(u);
+ if((hx|lx)==0)
+ return 0x80000001; /* ilogb(0) = 0x80000001 */
+ else /* subnormal x */
+ if(hx==0) {
+ for (ix = -1043; lx>0; lx<<=1) ix -=1;
+ } else {
+ for (ix = -1022,hx<<=11; hx>0; hx<<=1) ix -=1;
+ }
+ return ix;
+ }
+ else if (hx<0x7ff00000) return (hx>>20)-1023;
+ else return 0x7fffffff;
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_isnan.c b/src/third_party/js-1.7/fdlibm/s_isnan.c
new file mode 100644
index 00000000000..52f8759c793
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_isnan.c
@@ -0,0 +1,74 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_isnan.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * isnan(x) returns 1 is x is nan, else 0;
+ * no branching!
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ int fd_isnan(double x)
+#else
+ int fd_isnan(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ int hx,lx;
+ u.d = x;
+ hx = (__HI(u)&0x7fffffff);
+ lx = __LO(u);
+ hx |= (unsigned)(lx|(-lx))>>31;
+ hx = 0x7ff00000 - hx;
+ return ((unsigned)(hx))>>31;
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_ldexp.c b/src/third_party/js-1.7/fdlibm/s_ldexp.c
new file mode 100644
index 00000000000..9475520d480
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_ldexp.c
@@ -0,0 +1,66 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_ldexp.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+#include "fdlibm.h"
+#include <errno.h>
+
+#ifdef __STDC__
+ double fd_ldexp(double value, int exp)
+#else
+ double fd_ldexp(value, exp)
+ double value; int exp;
+#endif
+{
+ if(!fd_finite(value)||value==0.0) return value;
+ value = fd_scalbn(value,exp);
+ if(!fd_finite(value)||value==0.0) errno = ERANGE;
+ return value;
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_lib_version.c b/src/third_party/js-1.7/fdlibm/s_lib_version.c
new file mode 100644
index 00000000000..2ccf67d51c8
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_lib_version.c
@@ -0,0 +1,73 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_lib_version.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * MACRO for standards
+ */
+
+#include "fdlibm.h"
+
+/*
+ * define and initialize _LIB_VERSION
+ */
+#ifdef _POSIX_MODE
+_LIB_VERSION_TYPE _LIB_VERSION = _POSIX_;
+#else
+#ifdef _XOPEN_MODE
+_LIB_VERSION_TYPE _LIB_VERSION = _XOPEN_;
+#else
+#ifdef _SVID3_MODE
+_LIB_VERSION_TYPE _LIB_VERSION = _SVID_;
+#else /* default _IEEE_MODE */
+_LIB_VERSION_TYPE _LIB_VERSION = _IEEE_;
+#endif
+#endif
+#endif
diff --git a/src/third_party/js-1.7/fdlibm/s_log1p.c b/src/third_party/js-1.7/fdlibm/s_log1p.c
new file mode 100644
index 00000000000..1840156b1cd
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_log1p.c
@@ -0,0 +1,211 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_log1p.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* double log1p(double x)
+ *
+ * Method :
+ * 1. Argument Reduction: find k and f such that
+ * 1+x = 2^k * (1+f),
+ * where sqrt(2)/2 < 1+f < sqrt(2) .
+ *
+ * Note. If k=0, then f=x is exact. However, if k!=0, then f
+ * may not be representable exactly. In that case, a correction
+ * term is need. Let u=1+x rounded. Let c = (1+x)-u, then
+ * log(1+x) - log(u) ~ c/u. Thus, we proceed to compute log(u),
+ * and add back the correction term c/u.
+ * (Note: when x > 2**53, one can simply return log(x))
+ *
+ * 2. Approximation of log1p(f).
+ * Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
+ * = 2s + 2/3 s**3 + 2/5 s**5 + .....,
+ * = 2s + s*R
+ * We use a special Reme algorithm on [0,0.1716] to generate
+ * a polynomial of degree 14 to approximate R The maximum error
+ * of this polynomial approximation is bounded by 2**-58.45. In
+ * other words,
+ * 2 4 6 8 10 12 14
+ * R(z) ~ Lp1*s +Lp2*s +Lp3*s +Lp4*s +Lp5*s +Lp6*s +Lp7*s
+ * (the values of Lp1 to Lp7 are listed in the program)
+ * and
+ * | 2 14 | -58.45
+ * | Lp1*s +...+Lp7*s - R(z) | <= 2
+ * | |
+ * Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
+ * In order to guarantee error in log below 1ulp, we compute log
+ * by
+ * log1p(f) = f - (hfsq - s*(hfsq+R)).
+ *
+ * 3. Finally, log1p(x) = k*ln2 + log1p(f).
+ * = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
+ * Here ln2 is split into two floating point number:
+ * ln2_hi + ln2_lo,
+ * where n*ln2_hi is always exact for |n| < 2000.
+ *
+ * Special cases:
+ * log1p(x) is NaN with signal if x < -1 (including -INF) ;
+ * log1p(+INF) is +INF; log1p(-1) is -INF with signal;
+ * log1p(NaN) is that NaN with no signal.
+ *
+ * Accuracy:
+ * according to an error analysis, the error is always less than
+ * 1 ulp (unit in the last place).
+ *
+ * Constants:
+ * The hexadecimal values are the intended ones for the following
+ * constants. The decimal values may be used, provided that the
+ * compiler will convert from decimal to binary accurately enough
+ * to produce the hexadecimal values shown.
+ *
+ * Note: Assuming log() return accurate answer, the following
+ * algorithm can be used to compute log1p(x) to within a few ULP:
+ *
+ * u = 1+x;
+ * if(u==1.0) return x ; else
+ * return log(u)*(x/(u-1.0));
+ *
+ * See HP-15C Advanced Functions Handbook, p.193.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+ln2_hi = 6.93147180369123816490e-01, /* 3fe62e42 fee00000 */
+ln2_lo = 1.90821492927058770002e-10, /* 3dea39ef 35793c76 */
+two54 = 1.80143985094819840000e+16, /* 43500000 00000000 */
+Lp1 = 6.666666666666735130e-01, /* 3FE55555 55555593 */
+Lp2 = 3.999999999940941908e-01, /* 3FD99999 9997FA04 */
+Lp3 = 2.857142874366239149e-01, /* 3FD24924 94229359 */
+Lp4 = 2.222219843214978396e-01, /* 3FCC71C5 1D8E78AF */
+Lp5 = 1.818357216161805012e-01, /* 3FC74664 96CB03DE */
+Lp6 = 1.531383769920937332e-01, /* 3FC39A09 D078C69F */
+Lp7 = 1.479819860511658591e-01; /* 3FC2F112 DF3E5244 */
+
+static double zero = 0.0;
+
+#ifdef __STDC__
+ double fd_log1p(double x)
+#else
+ double fd_log1p(x)
+ double x;
+#endif
+{
+ double hfsq,f,c,s,z,R,u;
+ int k,hx,hu,ax;
+ fd_twoints un;
+
+ un.d = x;
+ hx = __HI(un); /* high word of x */
+ ax = hx&0x7fffffff;
+
+ k = 1;
+ if (hx < 0x3FDA827A) { /* x < 0.41422 */
+ if(ax>=0x3ff00000) { /* x <= -1.0 */
+ if(x==-1.0) return -two54/zero; /* log1p(-1)=+inf */
+ else return (x-x)/(x-x); /* log1p(x<-1)=NaN */
+ }
+ if(ax<0x3e200000) { /* |x| < 2**-29 */
+ if(two54+x>zero /* raise inexact */
+ &&ax<0x3c900000) /* |x| < 2**-54 */
+ return x;
+ else
+ return x - x*x*0.5;
+ }
+ if(hx>0||hx<=((int)0xbfd2bec3)) {
+ k=0;f=x;hu=1;} /* -0.2929<x<0.41422 */
+ }
+ if (hx >= 0x7ff00000) return x+x;
+ if(k!=0) {
+ if(hx<0x43400000) {
+ u = 1.0+x;
+ un.d = u;
+ hu = __HI(un); /* high word of u */
+ k = (hu>>20)-1023;
+ c = (k>0)? 1.0-(u-x):x-(u-1.0);/* correction term */
+ c /= u;
+ } else {
+ u = x;
+ un.d = u;
+ hu = __HI(un); /* high word of u */
+ k = (hu>>20)-1023;
+ c = 0;
+ }
+ hu &= 0x000fffff;
+ if(hu<0x6a09e) {
+ un.d = u;
+ __HI(un) = hu|0x3ff00000; /* normalize u */
+ u = un.d;
+ } else {
+ k += 1;
+ un.d = u;
+ __HI(un) = hu|0x3fe00000; /* normalize u/2 */
+ u = un.d;
+ hu = (0x00100000-hu)>>2;
+ }
+ f = u-1.0;
+ }
+ hfsq=0.5*f*f;
+ if(hu==0) { /* |f| < 2**-20 */
+ if(f==zero) if(k==0) return zero;
+ else {c += k*ln2_lo; return k*ln2_hi+c;}
+ R = hfsq*(1.0-0.66666666666666666*f);
+ if(k==0) return f-R; else
+ return k*ln2_hi-((R-(k*ln2_lo+c))-f);
+ }
+ s = f/(2.0+f);
+ z = s*s;
+ R = z*(Lp1+z*(Lp2+z*(Lp3+z*(Lp4+z*(Lp5+z*(Lp6+z*Lp7))))));
+ if(k==0) return f-(hfsq-s*(hfsq+R)); else
+ return k*ln2_hi-((hfsq-(s*(hfsq+R)+(k*ln2_lo+c)))-f);
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_logb.c b/src/third_party/js-1.7/fdlibm/s_logb.c
new file mode 100644
index 00000000000..f885c4dc389
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_logb.c
@@ -0,0 +1,79 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_logb.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * double logb(x)
+ * IEEE 754 logb. Included to pass IEEE test suite. Not recommend.
+ * Use ilogb instead.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_logb(double x)
+#else
+ double fd_logb(x)
+ double x;
+#endif
+{
+ int lx,ix;
+ fd_twoints u;
+
+ u.d = x;
+ ix = (__HI(u))&0x7fffffff; /* high |x| */
+ lx = __LO(u); /* low x */
+ if((ix|lx)==0) return -1.0/fd_fabs(x);
+ if(ix>=0x7ff00000) return x*x;
+ if((ix>>=20)==0) /* IEEE 754 logb */
+ return -1022.0;
+ else
+ return (double) (ix-1023);
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_matherr.c b/src/third_party/js-1.7/fdlibm/s_matherr.c
new file mode 100644
index 00000000000..cd99ca88f3c
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_matherr.c
@@ -0,0 +1,64 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_matherr.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ int fd_matherr(struct exception *x)
+#else
+ int fd_matherr(x)
+ struct exception *x;
+#endif
+{
+ int n=0;
+ if(x->arg1!=x->arg1) return 0;
+ return n;
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_modf.c b/src/third_party/js-1.7/fdlibm/s_modf.c
new file mode 100644
index 00000000000..3b182bd3bbf
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_modf.c
@@ -0,0 +1,132 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_modf.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * modf(double x, double *iptr)
+ * return fraction part of x, and return x's integral part in *iptr.
+ * Method:
+ * Bit twiddling.
+ *
+ * Exception:
+ * No exception.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double one = 1.0;
+#else
+static double one = 1.0;
+#endif
+
+#ifdef __STDC__
+ double fd_modf(double x, double *iptr)
+#else
+ double fd_modf(x, iptr)
+ double x,*iptr;
+#endif
+{
+ int i0,i1,j0;
+ unsigned i;
+ fd_twoints u;
+ u.d = x;
+ i0 = __HI(u); /* high x */
+ i1 = __LO(u); /* low x */
+ j0 = ((i0>>20)&0x7ff)-0x3ff; /* exponent of x */
+ if(j0<20) { /* integer part in high x */
+ if(j0<0) { /* |x|<1 */
+ u.d = *iptr;
+ __HI(u) = i0&0x80000000;
+ __LO(u) = 0; /* *iptr = +-0 */
+ *iptr = u.d;
+ return x;
+ } else {
+ i = (0x000fffff)>>j0;
+ if(((i0&i)|i1)==0) { /* x is integral */
+ *iptr = x;
+ u.d = x;
+ __HI(u) &= 0x80000000;
+ __LO(u) = 0; /* return +-0 */
+ x = u.d;
+ return x;
+ } else {
+ u.d = *iptr;
+ __HI(u) = i0&(~i);
+ __LO(u) = 0;
+ *iptr = u.d;
+ return x - *iptr;
+ }
+ }
+ } else if (j0>51) { /* no fraction part */
+ *iptr = x*one;
+ u.d = x;
+ __HI(u) &= 0x80000000;
+ __LO(u) = 0; /* return +-0 */
+ x = u.d;
+ return x;
+ } else { /* fraction part in low x */
+ i = ((unsigned)(0xffffffff))>>(j0-20);
+ if((i1&i)==0) { /* x is integral */
+ *iptr = x;
+ u.d = x;
+ __HI(u) &= 0x80000000;
+ __LO(u) = 0; /* return +-0 */
+ x = u.d;
+ return x;
+ } else {
+ u.d = *iptr;
+ __HI(u) = i0;
+ __LO(u) = i1&(~i);
+ *iptr = u.d;
+ return x - *iptr;
+ }
+ }
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_nextafter.c b/src/third_party/js-1.7/fdlibm/s_nextafter.c
new file mode 100644
index 00000000000..f71c5c8359a
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_nextafter.c
@@ -0,0 +1,124 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_nextafter.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* IEEE functions
+ * nextafter(x,y)
+ * return the next machine floating-point number of x in the
+ * direction toward y.
+ * Special cases:
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_nextafter(double x, double y)
+#else
+ double fd_nextafter(x,y)
+ double x,y;
+#endif
+{
+ int hx,hy,ix,iy;
+ unsigned lx,ly;
+ fd_twoints ux, uy;
+
+ ux.d = x; uy.d = y;
+ hx = __HI(ux); /* high word of x */
+ lx = __LO(ux); /* low word of x */
+ hy = __HI(uy); /* high word of y */
+ ly = __LO(uy); /* low word of y */
+ ix = hx&0x7fffffff; /* |x| */
+ iy = hy&0x7fffffff; /* |y| */
+
+ if(((ix>=0x7ff00000)&&((ix-0x7ff00000)|lx)!=0) || /* x is nan */
+ ((iy>=0x7ff00000)&&((iy-0x7ff00000)|ly)!=0)) /* y is nan */
+ return x+y;
+ if(x==y) return x; /* x=y, return x */
+ if((ix|lx)==0) { /* x == 0 */
+ ux.d = x;
+ __HI(ux) = hy&0x80000000; /* return +-minsubnormal */
+ __LO(ux) = 1;
+ x = ux.d;
+ y = x*x;
+ if(y==x) return y; else return x; /* raise underflow flag */
+ }
+ if(hx>=0) { /* x > 0 */
+ if(hx>hy||((hx==hy)&&(lx>ly))) { /* x > y, x -= ulp */
+ if(lx==0) hx -= 1;
+ lx -= 1;
+ } else { /* x < y, x += ulp */
+ lx += 1;
+ if(lx==0) hx += 1;
+ }
+ } else { /* x < 0 */
+ if(hy>=0||hx>hy||((hx==hy)&&(lx>ly))){/* x < y, x -= ulp */
+ if(lx==0) hx -= 1;
+ lx -= 1;
+ } else { /* x > y, x += ulp */
+ lx += 1;
+ if(lx==0) hx += 1;
+ }
+ }
+ hy = hx&0x7ff00000;
+ if(hy>=0x7ff00000) return x+x; /* overflow */
+ if(hy<0x00100000) { /* underflow */
+ y = x*x;
+ if(y!=x) { /* raise underflow flag */
+ uy.d = y;
+ __HI(uy) = hx; __LO(uy) = lx;
+ y = uy.d;
+ return y;
+ }
+ }
+ ux.d = x;
+ __HI(ux) = hx; __LO(ux) = lx;
+ x = ux.d;
+ return x;
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_rint.c b/src/third_party/js-1.7/fdlibm/s_rint.c
new file mode 100644
index 00000000000..3c4fab6d9d9
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_rint.c
@@ -0,0 +1,131 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_rint.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * rint(x)
+ * Return x rounded to integral value according to the prevailing
+ * rounding mode.
+ * Method:
+ * Using floating addition.
+ * Exception:
+ * Inexact flag raised if x not equal to rint(x).
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+TWO52[2]={
+ 4.50359962737049600000e+15, /* 0x43300000, 0x00000000 */
+ -4.50359962737049600000e+15, /* 0xC3300000, 0x00000000 */
+};
+
+#ifdef __STDC__
+ double fd_rint(double x)
+#else
+ double fd_rint(x)
+ double x;
+#endif
+{
+ int i0,j0,sx;
+ unsigned i,i1;
+ double w,t;
+ fd_twoints u;
+
+ u.d = x;
+ i0 = __HI(u);
+ sx = (i0>>31)&1;
+ i1 = __LO(u);
+ j0 = ((i0>>20)&0x7ff)-0x3ff;
+ if(j0<20) {
+ if(j0<0) {
+ if(((i0&0x7fffffff)|i1)==0) return x;
+ i1 |= (i0&0x0fffff);
+ i0 &= 0xfffe0000;
+ i0 |= ((i1|-(int)i1)>>12)&0x80000;
+ u.d = x;
+ __HI(u)=i0;
+ x = u.d;
+ w = TWO52[sx]+x;
+ t = w-TWO52[sx];
+ u.d = t;
+ i0 = __HI(u);
+ __HI(u) = (i0&0x7fffffff)|(sx<<31);
+ t = u.d;
+ return t;
+ } else {
+ i = (0x000fffff)>>j0;
+ if(((i0&i)|i1)==0) return x; /* x is integral */
+ i>>=1;
+ if(((i0&i)|i1)!=0) {
+ if(j0==19) i1 = 0x40000000; else
+ i0 = (i0&(~i))|((0x20000)>>j0);
+ }
+ }
+ } else if (j0>51) {
+ if(j0==0x400) return x+x; /* inf or NaN */
+ else return x; /* x is integral */
+ } else {
+ i = ((unsigned)(0xffffffff))>>(j0-20);
+ if((i1&i)==0) return x; /* x is integral */
+ i>>=1;
+ if((i1&i)!=0) i1 = (i1&(~i))|((0x40000000)>>(j0-20));
+ }
+ u.d = x;
+ __HI(u) = i0;
+ __LO(u) = i1;
+ x = u.d;
+ w = TWO52[sx]+x;
+ return w-TWO52[sx];
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_scalbn.c b/src/third_party/js-1.7/fdlibm/s_scalbn.c
new file mode 100644
index 00000000000..3deeaa3057e
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_scalbn.c
@@ -0,0 +1,107 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_scalbn.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * scalbn (double x, int n)
+ * scalbn(x,n) returns x* 2**n computed by exponent
+ * manipulation rather than by actually performing an
+ * exponentiation or a multiplication.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
+twom54 = 5.55111512312578270212e-17, /* 0x3C900000, 0x00000000 */
+really_big = 1.0e+300,
+tiny = 1.0e-300;
+
+#ifdef __STDC__
+ double fd_scalbn (double x, int n)
+#else
+ double fd_scalbn (x,n)
+ double x; int n;
+#endif
+{
+ fd_twoints u;
+ int k,hx,lx;
+ u.d = x;
+ hx = __HI(u);
+ lx = __LO(u);
+ k = (hx&0x7ff00000)>>20; /* extract exponent */
+ if (k==0) { /* 0 or subnormal x */
+ if ((lx|(hx&0x7fffffff))==0) return x; /* +-0 */
+ x *= two54;
+ u.d = x;
+ hx = __HI(u);
+ k = ((hx&0x7ff00000)>>20) - 54;
+ if (n< -50000) return tiny*x; /*underflow*/
+ }
+ if (k==0x7ff) return x+x; /* NaN or Inf */
+ k = k+n;
+ if (k > 0x7fe) return really_big*fd_copysign(really_big,x); /* overflow */
+ if (k > 0) /* normal result */
+ {u.d = x; __HI(u) = (hx&0x800fffff)|(k<<20); x = u.d; return x;}
+ if (k <= -54) {
+ if (n > 50000) /* in case integer overflow in n+k */
+ return really_big*fd_copysign(really_big,x); /*overflow*/
+ else return tiny*fd_copysign(tiny,x); /*underflow*/
+ }
+ k += 54; /* subnormal result */
+ u.d = x;
+ __HI(u) = (hx&0x800fffff)|(k<<20);
+ x = u.d;
+ return x*twom54;
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_signgam.c b/src/third_party/js-1.7/fdlibm/s_signgam.c
new file mode 100644
index 00000000000..4eb8ce72f8c
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_signgam.c
@@ -0,0 +1,40 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+#include "fdlibm.h"
+int signgam = 0;
diff --git a/src/third_party/js-1.7/fdlibm/s_significand.c b/src/third_party/js-1.7/fdlibm/s_significand.c
new file mode 100644
index 00000000000..2e1c0b28fb1
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_significand.c
@@ -0,0 +1,68 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_significand.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * significand(x) computes just
+ * scalb(x, (double) -ilogb(x)),
+ * for exercising the fraction-part(F) IEEE 754-1985 test vector.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_significand(double x)
+#else
+ double fd_significand(x)
+ double x;
+#endif
+{
+ return __ieee754_scalb(x,(double) -fd_ilogb(x));
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_sin.c b/src/third_party/js-1.7/fdlibm/s_sin.c
new file mode 100644
index 00000000000..8bbc5c62d92
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_sin.c
@@ -0,0 +1,118 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_sin.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* sin(x)
+ * Return sine function of x.
+ *
+ * kernel function:
+ * __kernel_sin ... sine function on [-pi/4,pi/4]
+ * __kernel_cos ... cose function on [-pi/4,pi/4]
+ * __ieee754_rem_pio2 ... argument reduction routine
+ *
+ * Method.
+ * Let S,C and T denote the sin, cos and tan respectively on
+ * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+ * in [-pi/4 , +pi/4], and let n = k mod 4.
+ * We have
+ *
+ * n sin(x) cos(x) tan(x)
+ * ----------------------------------------------------------
+ * 0 S C T
+ * 1 C -S -1/T
+ * 2 -S -C T
+ * 3 -C S -1/T
+ * ----------------------------------------------------------
+ *
+ * Special cases:
+ * Let trig be any of sin, cos, or tan.
+ * trig(+-INF) is NaN, with signals;
+ * trig(NaN) is that NaN;
+ *
+ * Accuracy:
+ * TRIG(x) returns trig(x) nearly rounded
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_sin(double x)
+#else
+ double fd_sin(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double y[2],z=0.0;
+ int n, ix;
+
+ /* High word of x. */
+ u.d = x;
+ ix = __HI(u);
+
+ /* |x| ~< pi/4 */
+ ix &= 0x7fffffff;
+ if(ix <= 0x3fe921fb) return __kernel_sin(x,z,0);
+
+ /* sin(Inf or NaN) is NaN */
+ else if (ix>=0x7ff00000) return x-x;
+
+ /* argument reduction needed */
+ else {
+ n = __ieee754_rem_pio2(x,y);
+ switch(n&3) {
+ case 0: return __kernel_sin(y[0],y[1],1);
+ case 1: return __kernel_cos(y[0],y[1]);
+ case 2: return -__kernel_sin(y[0],y[1],1);
+ default:
+ return -__kernel_cos(y[0],y[1]);
+ }
+ }
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_tan.c b/src/third_party/js-1.7/fdlibm/s_tan.c
new file mode 100644
index 00000000000..ded36c1d7c1
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_tan.c
@@ -0,0 +1,112 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_tan.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* tan(x)
+ * Return tangent function of x.
+ *
+ * kernel function:
+ * __kernel_tan ... tangent function on [-pi/4,pi/4]
+ * __ieee754_rem_pio2 ... argument reduction routine
+ *
+ * Method.
+ * Let S,C and T denote the sin, cos and tan respectively on
+ * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2
+ * in [-pi/4 , +pi/4], and let n = k mod 4.
+ * We have
+ *
+ * n sin(x) cos(x) tan(x)
+ * ----------------------------------------------------------
+ * 0 S C T
+ * 1 C -S -1/T
+ * 2 -S -C T
+ * 3 -C S -1/T
+ * ----------------------------------------------------------
+ *
+ * Special cases:
+ * Let trig be any of sin, cos, or tan.
+ * trig(+-INF) is NaN, with signals;
+ * trig(NaN) is that NaN;
+ *
+ * Accuracy:
+ * TRIG(x) returns trig(x) nearly rounded
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_tan(double x)
+#else
+ double fd_tan(x)
+ double x;
+#endif
+{
+ fd_twoints u;
+ double y[2],z=0.0;
+ int n, ix;
+
+ /* High word of x. */
+ u.d = x;
+ ix = __HI(u);
+
+ /* |x| ~< pi/4 */
+ ix &= 0x7fffffff;
+ if(ix <= 0x3fe921fb) return __kernel_tan(x,z,1);
+
+ /* tan(Inf or NaN) is NaN */
+ else if (ix>=0x7ff00000) return x-x; /* NaN */
+
+ /* argument reduction needed */
+ else {
+ n = __ieee754_rem_pio2(x,y);
+ return __kernel_tan(y[0],y[1],1-((n&1)<<1)); /* 1 -- n even
+ -1 -- n odd */
+ }
+}
diff --git a/src/third_party/js-1.7/fdlibm/s_tanh.c b/src/third_party/js-1.7/fdlibm/s_tanh.c
new file mode 100644
index 00000000000..aa6809f8518
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/s_tanh.c
@@ -0,0 +1,122 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)s_tanh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/* Tanh(x)
+ * Return the Hyperbolic Tangent of x
+ *
+ * Method :
+ * x -x
+ * e - e
+ * 0. tanh(x) is defined to be -----------
+ * x -x
+ * e + e
+ * 1. reduce x to non-negative by tanh(-x) = -tanh(x).
+ * 2. 0 <= x <= 2**-55 : tanh(x) := x*(one+x)
+ * -t
+ * 2**-55 < x <= 1 : tanh(x) := -----; t = expm1(-2x)
+ * t + 2
+ * 2
+ * 1 <= x <= 22.0 : tanh(x) := 1- ----- ; t=expm1(2x)
+ * t + 2
+ * 22.0 < x <= INF : tanh(x) := 1.
+ *
+ * Special cases:
+ * tanh(NaN) is NaN;
+ * only tanh(0)=0 is exact for finite argument.
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double one=1.0, two=2.0, tiny = 1.0e-300;
+#else
+static double one=1.0, two=2.0, tiny = 1.0e-300;
+#endif
+
+#ifdef __STDC__
+ double fd_tanh(double x)
+#else
+ double fd_tanh(x)
+ double x;
+#endif
+{
+ double t,z;
+ int jx,ix;
+ fd_twoints u;
+
+ /* High word of |x|. */
+ u.d = x;
+ jx = __HI(u);
+ ix = jx&0x7fffffff;
+
+ /* x is INF or NaN */
+ if(ix>=0x7ff00000) {
+ if (jx>=0) return one/x+one; /* tanh(+-inf)=+-1 */
+ else return one/x-one; /* tanh(NaN) = NaN */
+ }
+
+ /* |x| < 22 */
+ if (ix < 0x40360000) { /* |x|<22 */
+ if (ix<0x3c800000) /* |x|<2**-55 */
+ return x*(one+x); /* tanh(small) = small */
+ if (ix>=0x3ff00000) { /* |x|>=1 */
+ t = fd_expm1(two*fd_fabs(x));
+ z = one - two/(t+two);
+ } else {
+ t = fd_expm1(-two*fd_fabs(x));
+ z= -t/(t+two);
+ }
+ /* |x| > 22, return +-1 */
+ } else {
+ z = one - tiny; /* raised inexact flag */
+ }
+ return (jx>=0)? z: -z;
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_acos.c b/src/third_party/js-1.7/fdlibm/w_acos.c
new file mode 100644
index 00000000000..872c81d2081
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_acos.c
@@ -0,0 +1,78 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_acos.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrap_acos(x)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_acos(double x) /* wrapper acos */
+#else
+ double fd_acos(x) /* wrapper acos */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_acos(x);
+#else
+ double z;
+ z = __ieee754_acos(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ if(fd_fabs(x)>1.0) {
+ int err;
+ return __kernel_standard(x,x,1,&err); /* acos(|x|>1) */
+ } else
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_acosh.c b/src/third_party/js-1.7/fdlibm/w_acosh.c
new file mode 100644
index 00000000000..745d402eabc
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_acosh.c
@@ -0,0 +1,78 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_acosh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/*
+ * wrapper acosh(x)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_acosh(double x) /* wrapper acosh */
+#else
+ double fd_acosh(x) /* wrapper acosh */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_acosh(x);
+#else
+ double z;
+ z = __ieee754_acosh(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ if(x<1.0) {
+ int err;
+ return __kernel_standard(x,x,29,&err); /* acosh(x<1) */
+ } else
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_asin.c b/src/third_party/js-1.7/fdlibm/w_asin.c
new file mode 100644
index 00000000000..18aaefde9ba
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_asin.c
@@ -0,0 +1,80 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_asin.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/*
+ * wrapper asin(x)
+ */
+
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_asin(double x) /* wrapper asin */
+#else
+ double fd_asin(x) /* wrapper asin */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_asin(x);
+#else
+ double z;
+ z = __ieee754_asin(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ if(fd_fabs(x)>1.0) {
+ int err;
+ return __kernel_standard(x,x,2,&err); /* asin(|x|>1) */
+ } else
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_atan2.c b/src/third_party/js-1.7/fdlibm/w_atan2.c
new file mode 100644
index 00000000000..8cfa4bbbde4
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_atan2.c
@@ -0,0 +1,79 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_atan2.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/*
+ * wrapper atan2(y,x)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_atan2(double y, double x) /* wrapper atan2 */
+#else
+ double fd_atan2(y,x) /* wrapper atan2 */
+ double y,x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_atan2(y,x);
+#else
+ double z;
+ z = __ieee754_atan2(y,x);
+ if(_LIB_VERSION == _IEEE_||fd_isnan(x)||fd_isnan(y)) return z;
+ if(x==0.0&&y==0.0) {
+ int err;
+ return __kernel_standard(y,x,3,&err); /* atan2(+-0,+-0) */
+ } else
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_atanh.c b/src/third_party/js-1.7/fdlibm/w_atanh.c
new file mode 100644
index 00000000000..6ba52d1e26f
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_atanh.c
@@ -0,0 +1,81 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_atanh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+/*
+ * wrapper atanh(x)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_atanh(double x) /* wrapper atanh */
+#else
+ double fd_atanh(x) /* wrapper atanh */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_atanh(x);
+#else
+ double z,y;
+ z = __ieee754_atanh(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ y = fd_fabs(x);
+ if(y>=1.0) {
+ int err;
+ if(y>1.0)
+ return __kernel_standard(x,x,30,&err); /* atanh(|x|>1) */
+ else
+ return __kernel_standard(x,x,31,&err); /* atanh(|x|==1) */
+ } else
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_cosh.c b/src/third_party/js-1.7/fdlibm/w_cosh.c
new file mode 100644
index 00000000000..146449e0206
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_cosh.c
@@ -0,0 +1,77 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_cosh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper cosh(x)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_cosh(double x) /* wrapper cosh */
+#else
+ double fd_cosh(x) /* wrapper cosh */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_cosh(x);
+#else
+ double z;
+ z = __ieee754_cosh(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ if(fd_fabs(x)>7.10475860073943863426e+02) {
+ int err;
+ return __kernel_standard(x,x,5,&err); /* cosh overflow */
+ } else
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_exp.c b/src/third_party/js-1.7/fdlibm/w_exp.c
new file mode 100644
index 00000000000..f5dea0b0111
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_exp.c
@@ -0,0 +1,88 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_exp.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper exp(x)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+static const double
+#else
+static double
+#endif
+o_threshold= 7.09782712893383973096e+02, /* 0x40862E42, 0xFEFA39EF */
+u_threshold= -7.45133219101941108420e+02; /* 0xc0874910, 0xD52D3051 */
+
+#ifdef __STDC__
+ double fd_exp(double x) /* wrapper exp */
+#else
+ double fd_exp(x) /* wrapper exp */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_exp(x);
+#else
+ double z;
+ z = __ieee754_exp(x);
+ if(_LIB_VERSION == _IEEE_) return z;
+ if(fd_finite(x)) {
+ int err;
+ if(x>o_threshold)
+ return __kernel_standard(x,x,6,&err); /* exp overflow */
+ else if(x<u_threshold)
+ return __kernel_standard(x,x,7,&err); /* exp underflow */
+ }
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_fmod.c b/src/third_party/js-1.7/fdlibm/w_fmod.c
new file mode 100644
index 00000000000..76862096d63
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_fmod.c
@@ -0,0 +1,78 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_fmod.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper fmod(x,y)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_fmod(double x, double y) /* wrapper fmod */
+#else
+ double fd_fmod(x,y) /* wrapper fmod */
+ double x,y;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_fmod(x,y);
+#else
+ double z;
+ z = __ieee754_fmod(x,y);
+ if(_LIB_VERSION == _IEEE_ ||fd_isnan(y)||fd_isnan(x)) return z;
+ if(y==0.0) {
+ int err;
+ return __kernel_standard(x,y,27,&err); /* fmod(x,0) */
+ } else
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_gamma.c b/src/third_party/js-1.7/fdlibm/w_gamma.c
new file mode 100644
index 00000000000..9eb8e429c45
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_gamma.c
@@ -0,0 +1,85 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_gamma.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* double gamma(double x)
+ * Return the logarithm of the Gamma function of x.
+ *
+ * Method: call gamma_r
+ */
+
+#include "fdlibm.h"
+
+extern int signgam;
+
+#ifdef __STDC__
+ double fd_gamma(double x)
+#else
+ double fd_gamma(x)
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_gamma_r(x,&signgam);
+#else
+ double y;
+ y = __ieee754_gamma_r(x,&signgam);
+ if(_LIB_VERSION == _IEEE_) return y;
+ if(!fd_finite(y)&&fd_finite(x)) {
+ int err;
+ if(fd_floor(x)==x&&x<=0.0)
+ return __kernel_standard(x,x,41,&err); /* gamma pole */
+ else
+ return __kernel_standard(x,x,40,&err); /* gamma overflow */
+ } else
+ return y;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_gamma_r.c b/src/third_party/js-1.7/fdlibm/w_gamma_r.c
new file mode 100644
index 00000000000..2669b4f5964
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_gamma_r.c
@@ -0,0 +1,81 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_gamma_r.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper double gamma_r(double x, int *signgamp)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_gamma_r(double x, int *signgamp) /* wrapper lgamma_r */
+#else
+ double fd_gamma_r(x,signgamp) /* wrapper lgamma_r */
+ double x; int *signgamp;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_gamma_r(x,signgamp);
+#else
+ double y;
+ y = __ieee754_gamma_r(x,signgamp);
+ if(_LIB_VERSION == _IEEE_) return y;
+ if(!fd_finite(y)&&fd_finite(x)) {
+ int err;
+ if(fd_floor(x)==x&&x<=0.0)
+ return __kernel_standard(x,x,41,&err); /* gamma pole */
+ else
+ return __kernel_standard(x,x,40,&err); /* gamma overflow */
+ } else
+ return y;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_hypot.c b/src/third_party/js-1.7/fdlibm/w_hypot.c
new file mode 100644
index 00000000000..bfaac66c028
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_hypot.c
@@ -0,0 +1,78 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_hypot.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper hypot(x,y)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_hypot(double x, double y)/* wrapper hypot */
+#else
+ double fd_hypot(x,y) /* wrapper hypot */
+ double x,y;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_hypot(x,y);
+#else
+ double z;
+ z = __ieee754_hypot(x,y);
+ if(_LIB_VERSION == _IEEE_) return z;
+ if((!fd_finite(z))&&fd_finite(x)&&fd_finite(y)) {
+ int err;
+ return __kernel_standard(x,y,4,&err); /* hypot overflow */
+ } else
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_j0.c b/src/third_party/js-1.7/fdlibm/w_j0.c
new file mode 100644
index 00000000000..5e676ff6028
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_j0.c
@@ -0,0 +1,105 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_j0.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper j0(double x), y0(double x)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_j0(double x) /* wrapper j0 */
+#else
+ double fd_j0(x) /* wrapper j0 */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_j0(x);
+#else
+ double z = __ieee754_j0(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ if(fd_fabs(x)>X_TLOSS) {
+ int err;
+ return __kernel_standard(x,x,34,&err); /* j0(|x|>X_TLOSS) */
+ } else
+ return z;
+#endif
+}
+
+#ifdef __STDC__
+ double y0(double x) /* wrapper y0 */
+#else
+ double y0(x) /* wrapper y0 */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_y0(x);
+#else
+ double z;
+ int err;
+ z = __ieee754_y0(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x) ) return z;
+ if(x <= 0.0){
+ if(x==0.0)
+ /* d= -one/(x-x); */
+ return __kernel_standard(x,x,8,&err);
+ else
+ /* d = zero/(x-x); */
+ return __kernel_standard(x,x,9,&err);
+ }
+ if(x>X_TLOSS) {
+ return __kernel_standard(x,x,35,&err); /* y0(x>X_TLOSS) */
+ } else
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_j1.c b/src/third_party/js-1.7/fdlibm/w_j1.c
new file mode 100644
index 00000000000..86a506bc289
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_j1.c
@@ -0,0 +1,106 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_j1.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper of j1,y1
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_j1(double x) /* wrapper j1 */
+#else
+ double fd_j1(x) /* wrapper j1 */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_j1(x);
+#else
+ double z;
+ z = __ieee754_j1(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x) ) return z;
+ if(fd_fabs(x)>X_TLOSS) {
+ int err;
+ return __kernel_standard(x,x,36,&err); /* j1(|x|>X_TLOSS) */
+ } else
+ return z;
+#endif
+}
+
+#ifdef __STDC__
+ double y1(double x) /* wrapper y1 */
+#else
+ double y1(x) /* wrapper y1 */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_y1(x);
+#else
+ double z;
+ int err;
+ z = __ieee754_y1(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x) ) return z;
+ if(x <= 0.0){
+ if(x==0.0)
+ /* d= -one/(x-x); */
+ return __kernel_standard(x,x,10,&err);
+ else
+ /* d = zero/(x-x); */
+ return __kernel_standard(x,x,11,&err);
+ }
+ if(x>X_TLOSS) {
+ return __kernel_standard(x,x,37,&err); /* y1(x>X_TLOSS) */
+ } else
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_jn.c b/src/third_party/js-1.7/fdlibm/w_jn.c
new file mode 100644
index 00000000000..6926b0da87a
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_jn.c
@@ -0,0 +1,128 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_jn.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper jn(int n, double x), yn(int n, double x)
+ * floating point Bessel's function of the 1st and 2nd kind
+ * of order n
+ *
+ * Special cases:
+ * y0(0)=y1(0)=yn(n,0) = -inf with division by zero signal;
+ * y0(-ve)=y1(-ve)=yn(n,-ve) are NaN with invalid signal.
+ * Note 2. About jn(n,x), yn(n,x)
+ * For n=0, j0(x) is called,
+ * for n=1, j1(x) is called,
+ * for n<x, forward recursion us used starting
+ * from values of j0(x) and j1(x).
+ * for n>x, a continued fraction approximation to
+ * j(n,x)/j(n-1,x) is evaluated and then backward
+ * recursion is used starting from a supposed value
+ * for j(n,x). The resulting value of j(0,x) is
+ * compared with the actual value to correct the
+ * supposed value of j(n,x).
+ *
+ * yn(n,x) is similar in all respects, except
+ * that forward recursion is used for all
+ * values of n>1.
+ *
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_jn(int n, double x) /* wrapper jn */
+#else
+ double fd_jn(n,x) /* wrapper jn */
+ double x; int n;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_jn(n,x);
+#else
+ double z;
+ z = __ieee754_jn(n,x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x) ) return z;
+ if(fd_fabs(x)>X_TLOSS) {
+ int err;
+ return __kernel_standard((double)n,x,38,&err); /* jn(|x|>X_TLOSS,n) */
+ } else
+ return z;
+#endif
+}
+
+#ifdef __STDC__
+ double yn(int n, double x) /* wrapper yn */
+#else
+ double yn(n,x) /* wrapper yn */
+ double x; int n;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_yn(n,x);
+#else
+ double z;
+ int err;
+ z = __ieee754_yn(n,x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x) ) return z;
+ if(x <= 0.0){
+ if(x==0.0)
+ /* d= -one/(x-x); */
+ return __kernel_standard((double)n,x,12,&err);
+ else
+ /* d = zero/(x-x); */
+ return __kernel_standard((double)n,x,13,&err);
+ }
+ if(x>X_TLOSS) {
+ return __kernel_standard((double)n,x,39,&err); /* yn(x>X_TLOSS,n) */
+ } else
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_lgamma.c b/src/third_party/js-1.7/fdlibm/w_lgamma.c
new file mode 100644
index 00000000000..f7576e89213
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_lgamma.c
@@ -0,0 +1,85 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_lgamma.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ *
+ */
+
+/* double lgamma(double x)
+ * Return the logarithm of the Gamma function of x.
+ *
+ * Method: call __ieee754_lgamma_r
+ */
+
+#include "fdlibm.h"
+
+extern int signgam;
+
+#ifdef __STDC__
+ double fd_lgamma(double x)
+#else
+ double fd_lgamma(x)
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_lgamma_r(x,&signgam);
+#else
+ double y;
+ y = __ieee754_lgamma_r(x,&signgam);
+ if(_LIB_VERSION == _IEEE_) return y;
+ if(!fd_finite(y)&&fd_finite(x)) {
+ int err;
+ if(fd_floor(x)==x&&x<=0.0)
+ return __kernel_standard(x,x,15,&err); /* lgamma pole */
+ else
+ return __kernel_standard(x,x,14,&err); /* lgamma overflow */
+ } else
+ return y;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_lgamma_r.c b/src/third_party/js-1.7/fdlibm/w_lgamma_r.c
new file mode 100644
index 00000000000..ba2ad5933d5
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_lgamma_r.c
@@ -0,0 +1,81 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_lgamma_r.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper double lgamma_r(double x, int *signgamp)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_lgamma_r(double x, int *signgamp) /* wrapper lgamma_r */
+#else
+ double fd_lgamma_r(x,signgamp) /* wrapper lgamma_r */
+ double x; int *signgamp;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_lgamma_r(x,signgamp);
+#else
+ double y;
+ y = __ieee754_lgamma_r(x,signgamp);
+ if(_LIB_VERSION == _IEEE_) return y;
+ if(!fd_finite(y)&&fd_finite(x)) {
+ int err;
+ if(fd_floor(x)==x&&x<=0.0)
+ return __kernel_standard(x,x,15,&err); /* lgamma pole */
+ else
+ return __kernel_standard(x,x,14,&err); /* lgamma overflow */
+ } else
+ return y;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_log.c b/src/third_party/js-1.7/fdlibm/w_log.c
new file mode 100644
index 00000000000..7e358fcf117
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_log.c
@@ -0,0 +1,78 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_log.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper log(x)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_log(double x) /* wrapper log */
+#else
+ double fd_log(x) /* wrapper log */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_log(x);
+#else
+ double z;
+ int err;
+ z = __ieee754_log(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x) || x > 0.0) return z;
+ if(x==0.0)
+ return __kernel_standard(x,x,16,&err); /* log(0) */
+ else
+ return __kernel_standard(x,x,17,&err); /* log(x<0) */
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_log10.c b/src/third_party/js-1.7/fdlibm/w_log10.c
new file mode 100644
index 00000000000..6b298b236f6
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_log10.c
@@ -0,0 +1,81 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_log10.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper log10(X)
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_log10(double x) /* wrapper log10 */
+#else
+ double fd_log10(x) /* wrapper log10 */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_log10(x);
+#else
+ double z;
+ z = __ieee754_log10(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ if(x<=0.0) {
+ int err;
+ if(x==0.0)
+ return __kernel_standard(x,x,18,&err); /* log10(0) */
+ else
+ return __kernel_standard(x,x,19,&err); /* log10(x<0) */
+ } else
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_pow.c b/src/third_party/js-1.7/fdlibm/w_pow.c
new file mode 100644
index 00000000000..3d2c15ad305
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_pow.c
@@ -0,0 +1,99 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+
+/* @(#)w_pow.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper pow(x,y) return x**y
+ */
+
+#include "fdlibm.h"
+
+
+#ifdef __STDC__
+ double fd_pow(double x, double y) /* wrapper pow */
+#else
+ double fd_pow(x,y) /* wrapper pow */
+ double x,y;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_pow(x,y);
+#else
+ double z;
+ int err;
+ z=__ieee754_pow(x,y);
+ if(_LIB_VERSION == _IEEE_|| fd_isnan(y)) return z;
+ if(fd_isnan(x)) {
+ if(y==0.0)
+ return __kernel_standard(x,y,42,&err); /* pow(NaN,0.0) */
+ else
+ return z;
+ }
+ if(x==0.0){
+ if(y==0.0)
+ return __kernel_standard(x,y,20,&err); /* pow(0.0,0.0) */
+ if(fd_finite(y)&&y<0.0)
+ return __kernel_standard(x,y,23,&err); /* pow(0.0,negative) */
+ return z;
+ }
+ if(!fd_finite(z)) {
+ if(fd_finite(x)&&fd_finite(y)) {
+ if(fd_isnan(z))
+ return __kernel_standard(x,y,24,&err); /* pow neg**non-int */
+ else
+ return __kernel_standard(x,y,21,&err); /* pow overflow */
+ }
+ }
+ if(z==0.0&&fd_finite(x)&&fd_finite(y))
+ return __kernel_standard(x,y,22,&err); /* pow underflow */
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_remainder.c b/src/third_party/js-1.7/fdlibm/w_remainder.c
new file mode 100644
index 00000000000..25d1ba171fa
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_remainder.c
@@ -0,0 +1,77 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_remainder.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper remainder(x,p)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_remainder(double x, double y) /* wrapper remainder */
+#else
+ double fd_remainder(x,y) /* wrapper remainder */
+ double x,y;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_remainder(x,y);
+#else
+ double z;
+ z = __ieee754_remainder(x,y);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(y)) return z;
+ if(y==0.0) {
+ int err;
+ return __kernel_standard(x,y,28,&err); /* remainder(x,0) */
+ } else
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_scalb.c b/src/third_party/js-1.7/fdlibm/w_scalb.c
new file mode 100644
index 00000000000..35c16a50014
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_scalb.c
@@ -0,0 +1,95 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_scalb.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper scalb(double x, double fn) is provide for
+ * passing various standard test suite. One
+ * should use scalbn() instead.
+ */
+
+#include "fdlibm.h"
+
+#include <errno.h>
+
+#ifdef __STDC__
+#ifdef _SCALB_INT
+ double fd_scalb(double x, int fn) /* wrapper scalb */
+#else
+ double fd_scalb(double x, double fn) /* wrapper scalb */
+#endif
+#else
+ double fd_scalb(x,fn) /* wrapper scalb */
+#ifdef _SCALB_INT
+ double x; int fn;
+#else
+ double x,fn;
+#endif
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_scalb(x,fn);
+#else
+ double z;
+ int err;
+ z = __ieee754_scalb(x,fn);
+ if(_LIB_VERSION == _IEEE_) return z;
+ if(!(fd_finite(z)||fd_isnan(z))&&fd_finite(x)) {
+ return __kernel_standard(x,(double)fn,32,&err); /* scalb overflow */
+ }
+ if(z==0.0&&z!=x) {
+ return __kernel_standard(x,(double)fn,33,&err); /* scalb underflow */
+ }
+#ifndef _SCALB_INT
+ if(!fd_finite(fn)) errno = ERANGE;
+#endif
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_sinh.c b/src/third_party/js-1.7/fdlibm/w_sinh.c
new file mode 100644
index 00000000000..8b04ecb7fe9
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_sinh.c
@@ -0,0 +1,77 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_sinh.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper sinh(x)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_sinh(double x) /* wrapper sinh */
+#else
+ double fd_sinh(x) /* wrapper sinh */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_sinh(x);
+#else
+ double z;
+ z = __ieee754_sinh(x);
+ if(_LIB_VERSION == _IEEE_) return z;
+ if(!fd_finite(z)&&fd_finite(x)) {
+ int err;
+ return __kernel_standard(x,x,25,&err); /* sinh overflow */
+ } else
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/fdlibm/w_sqrt.c b/src/third_party/js-1.7/fdlibm/w_sqrt.c
new file mode 100644
index 00000000000..462d776f8ef
--- /dev/null
+++ b/src/third_party/js-1.7/fdlibm/w_sqrt.c
@@ -0,0 +1,77 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Sun Microsystems, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* @(#)w_sqrt.c 1.3 95/01/18 */
+/*
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunSoft, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * wrapper sqrt(x)
+ */
+
+#include "fdlibm.h"
+
+#ifdef __STDC__
+ double fd_sqrt(double x) /* wrapper sqrt */
+#else
+ double fd_sqrt(x) /* wrapper sqrt */
+ double x;
+#endif
+{
+#ifdef _IEEE_LIBM
+ return __ieee754_sqrt(x);
+#else
+ double z;
+ z = __ieee754_sqrt(x);
+ if(_LIB_VERSION == _IEEE_ || fd_isnan(x)) return z;
+ if(x<0.0) {
+ int err;
+ return __kernel_standard(x,x,26,&err); /* sqrt(negative) */
+ } else
+ return z;
+#endif
+}
diff --git a/src/third_party/js-1.7/js.c b/src/third_party/js-1.7/js.c
new file mode 100644
index 00000000000..fb4332f5c9a
--- /dev/null
+++ b/src/third_party/js-1.7/js.c
@@ -0,0 +1,3181 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS shell.
+ */
+#include "jsstddef.h"
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <locale.h>
+#include "jstypes.h"
+#include "jsarena.h"
+#include "jsutil.h"
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsdbgapi.h"
+#include "jsemit.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jslock.h"
+#include "jsobj.h"
+#include "jsparse.h"
+#include "jsscope.h"
+#include "jsscript.h"
+
+#ifdef PERLCONNECT
+#include "perlconnect/jsperl.h"
+#endif
+
+#ifdef LIVECONNECT
+#include "jsjava.h"
+#endif
+
+#ifdef JSDEBUGGER
+#include "jsdebug.h"
+#ifdef JSDEBUGGER_JAVA_UI
+#include "jsdjava.h"
+#endif /* JSDEBUGGER_JAVA_UI */
+#ifdef JSDEBUGGER_C_UI
+#include "jsdb.h"
+#endif /* JSDEBUGGER_C_UI */
+#endif /* JSDEBUGGER */
+
+#ifdef XP_UNIX
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#endif
+
+#if defined(XP_WIN) || defined(XP_OS2)
+#include <io.h> /* for isatty() */
+#endif
+
+typedef enum JSShellExitCode {
+ EXITCODE_RUNTIME_ERROR = 3,
+ EXITCODE_FILE_NOT_FOUND = 4,
+ EXITCODE_OUT_OF_MEMORY = 5
+} JSShellExitCode;
+
+size_t gStackChunkSize = 8192;
+
+/* Assume that we can not use more than 5e5 bytes of C stack by default. */
+static size_t gMaxStackSize = 500000;
+
+static jsuword gStackBase;
+int gExitCode = 0;
+JSBool gQuitting = JS_FALSE;
+FILE *gErrFile = NULL;
+FILE *gOutFile = NULL;
+
+#ifdef JSDEBUGGER
+static JSDContext *_jsdc;
+#ifdef JSDEBUGGER_JAVA_UI
+static JSDJContext *_jsdjc;
+#endif /* JSDEBUGGER_JAVA_UI */
+#endif /* JSDEBUGGER */
+
+static JSBool reportWarnings = JS_TRUE;
+static JSBool compileOnly = JS_FALSE;
+
+typedef enum JSShellErrNum {
+#define MSG_DEF(name, number, count, exception, format) \
+ name = number,
+#include "jsshell.msg"
+#undef MSG_DEF
+ JSShellErr_Limit
+#undef MSGDEF
+} JSShellErrNum;
+
+static const JSErrorFormatString *
+my_GetErrorMessage(void *userRef, const char *locale, const uintN errorNumber);
+static JSObject *
+split_setup(JSContext *cx);
+
+#ifdef EDITLINE
+extern char *readline(const char *prompt);
+extern void add_history(char *line);
+#endif
+
+static JSBool
+GetLine(JSContext *cx, char *bufp, FILE *file, const char *prompt) {
+#ifdef EDITLINE
+ /*
+ * Use readline only if file is stdin, because there's no way to specify
+ * another handle. Are other filehandles interactive?
+ */
+ if (file == stdin) {
+ char *linep = readline(prompt);
+ if (!linep)
+ return JS_FALSE;
+ if (linep[0] != '\0')
+ add_history(linep);
+ strcpy(bufp, linep);
+ JS_free(cx, linep);
+ bufp += strlen(bufp);
+ *bufp++ = '\n';
+ *bufp = '\0';
+ } else
+#endif
+ {
+ char line[256];
+ fprintf(gOutFile, prompt);
+ fflush(gOutFile);
+ if (!fgets(line, sizeof line, file))
+ return JS_FALSE;
+ strcpy(bufp, line);
+ }
+ return JS_TRUE;
+}
+
+static void
+Process(JSContext *cx, JSObject *obj, char *filename, JSBool forceTTY)
+{
+ JSBool ok, hitEOF;
+ JSScript *script;
+ jsval result;
+ JSString *str;
+ char buffer[4096];
+ char *bufp;
+ int lineno;
+ int startline;
+ FILE *file;
+ jsuword stackLimit;
+
+ if (forceTTY || !filename || strcmp(filename, "-") == 0) {
+ file = stdin;
+ } else {
+ file = fopen(filename, "r");
+ if (!file) {
+ JS_ReportErrorNumber(cx, my_GetErrorMessage, NULL,
+ JSSMSG_CANT_OPEN, filename, strerror(errno));
+ gExitCode = EXITCODE_FILE_NOT_FOUND;
+ return;
+ }
+ }
+
+ if (gMaxStackSize == 0) {
+ /*
+ * Disable checking for stack overflow if limit is zero.
+ */
+ stackLimit = 0;
+ } else {
+#if JS_STACK_GROWTH_DIRECTION > 0
+ stackLimit = gStackBase + gMaxStackSize;
+#else
+ stackLimit = gStackBase - gMaxStackSize;
+#endif
+ }
+ JS_SetThreadStackLimit(cx, stackLimit);
+
+ if (!forceTTY && !isatty(fileno(file))) {
+ /*
+ * It's not interactive - just execute it.
+ *
+ * Support the UNIX #! shell hack; gobble the first line if it starts
+ * with '#'. TODO - this isn't quite compatible with sharp variables,
+ * as a legal js program (using sharp variables) might start with '#'.
+ * But that would require multi-character lookahead.
+ */
+ int ch = fgetc(file);
+ if (ch == '#') {
+ while((ch = fgetc(file)) != EOF) {
+ if (ch == '\n' || ch == '\r')
+ break;
+ }
+ }
+ ungetc(ch, file);
+ script = JS_CompileFileHandle(cx, obj, filename, file);
+ if (script) {
+ if (!compileOnly)
+ (void)JS_ExecuteScript(cx, obj, script, &result);
+ JS_DestroyScript(cx, script);
+ }
+
+ return;
+ }
+
+ /* It's an interactive filehandle; drop into read-eval-print loop. */
+ lineno = 1;
+ hitEOF = JS_FALSE;
+ do {
+ bufp = buffer;
+ *bufp = '\0';
+
+ /*
+ * Accumulate lines until we get a 'compilable unit' - one that either
+ * generates an error (before running out of source) or that compiles
+ * cleanly. This should be whenever we get a complete statement that
+ * coincides with the end of a line.
+ */
+ startline = lineno;
+ do {
+ if (!GetLine(cx, bufp, file, startline == lineno ? "js> " : "")) {
+ hitEOF = JS_TRUE;
+ break;
+ }
+ bufp += strlen(bufp);
+ lineno++;
+ } while (!JS_BufferIsCompilableUnit(cx, obj, buffer, strlen(buffer)));
+
+ /* Clear any pending exception from previous failed compiles. */
+ JS_ClearPendingException(cx);
+ script = JS_CompileScript(cx, obj, buffer, strlen(buffer), "typein",
+ startline);
+ if (script) {
+ if (!compileOnly) {
+ ok = JS_ExecuteScript(cx, obj, script, &result);
+ if (ok && result != JSVAL_VOID) {
+ str = JS_ValueToString(cx, result);
+ if (str)
+ fprintf(gOutFile, "%s\n", JS_GetStringBytes(str));
+ else
+ ok = JS_FALSE;
+ }
+ }
+ JS_DestroyScript(cx, script);
+ }
+ } while (!hitEOF && !gQuitting);
+ fprintf(gOutFile, "\n");
+ return;
+}
+
+static int
+usage(void)
+{
+ fprintf(gErrFile, "%s\n", JS_GetImplementationVersion());
+ fprintf(gErrFile, "usage: js [-PswWxCi] [-b branchlimit] [-c stackchunksize] [-v version] [-f scriptfile] [-e script] [-S maxstacksize] [scriptfile] [scriptarg...]\n");
+ return 2;
+}
+
+static uint32 gBranchCount;
+static uint32 gBranchLimit;
+
+static JSBool
+my_BranchCallback(JSContext *cx, JSScript *script)
+{
+ if (++gBranchCount == gBranchLimit) {
+ if (script) {
+ if (script->filename)
+ fprintf(gErrFile, "%s:", script->filename);
+ fprintf(gErrFile, "%u: script branch callback (%u callbacks)\n",
+ script->lineno, gBranchLimit);
+ } else {
+ fprintf(gErrFile, "native branch callback (%u callbacks)\n",
+ gBranchLimit);
+ }
+ gBranchCount = 0;
+ return JS_FALSE;
+ }
+ if ((gBranchCount & 0x3fff) == 1)
+ JS_MaybeGC(cx);
+ return JS_TRUE;
+}
+
+extern JSClass global_class;
+
+static int
+ProcessArgs(JSContext *cx, JSObject *obj, char **argv, int argc)
+{
+ int i, j, length;
+ JSObject *argsObj;
+ char *filename = NULL;
+ JSBool isInteractive = JS_TRUE;
+ JSBool forceTTY = JS_FALSE;
+
+ /*
+ * Scan past all optional arguments so we can create the arguments object
+ * before processing any -f options, which must interleave properly with
+ * -v and -w options. This requires two passes, and without getopt, we'll
+ * have to keep the option logic here and in the second for loop in sync.
+ */
+ for (i = 0; i < argc; i++) {
+ if (argv[i][0] != '-' || argv[i][1] == '\0') {
+ ++i;
+ break;
+ }
+ switch (argv[i][1]) {
+ case 'b':
+ case 'c':
+ case 'f':
+ case 'e':
+ case 'v':
+ case 'S':
+ ++i;
+ break;
+ default:;
+ }
+ }
+
+ /*
+ * Create arguments early and define it to root it, so it's safe from any
+ * GC calls nested below, and so it is available to -f <file> arguments.
+ */
+ argsObj = JS_NewArrayObject(cx, 0, NULL);
+ if (!argsObj)
+ return 1;
+ if (!JS_DefineProperty(cx, obj, "arguments", OBJECT_TO_JSVAL(argsObj),
+ NULL, NULL, 0)) {
+ return 1;
+ }
+
+ length = argc - i;
+ for (j = 0; j < length; j++) {
+ JSString *str = JS_NewStringCopyZ(cx, argv[i++]);
+ if (!str)
+ return 1;
+ if (!JS_DefineElement(cx, argsObj, j, STRING_TO_JSVAL(str),
+ NULL, NULL, JSPROP_ENUMERATE)) {
+ return 1;
+ }
+ }
+
+ for (i = 0; i < argc; i++) {
+ if (argv[i][0] != '-' || argv[i][1] == '\0') {
+ filename = argv[i++];
+ isInteractive = JS_FALSE;
+ break;
+ }
+
+ switch (argv[i][1]) {
+ case 'v':
+ if (++i == argc)
+ return usage();
+
+ JS_SetVersion(cx, (JSVersion) atoi(argv[i]));
+ break;
+
+ case 'w':
+ reportWarnings = JS_TRUE;
+ break;
+
+ case 'W':
+ reportWarnings = JS_FALSE;
+ break;
+
+ case 's':
+ JS_ToggleOptions(cx, JSOPTION_STRICT);
+ break;
+
+ case 'x':
+ JS_ToggleOptions(cx, JSOPTION_XML);
+ break;
+
+ case 'P':
+ if (JS_GET_CLASS(cx, JS_GetPrototype(cx, obj)) != &global_class) {
+ JSObject *gobj;
+
+ if (!JS_SealObject(cx, obj, JS_TRUE))
+ return JS_FALSE;
+ gobj = JS_NewObject(cx, &global_class, NULL, NULL);
+ if (!gobj)
+ return JS_FALSE;
+ if (!JS_SetPrototype(cx, gobj, obj))
+ return JS_FALSE;
+ JS_SetParent(cx, gobj, NULL);
+ JS_SetGlobalObject(cx, gobj);
+ obj = gobj;
+ }
+ break;
+
+ case 'b':
+ gBranchLimit = atoi(argv[++i]);
+ JS_SetBranchCallback(cx, my_BranchCallback);
+ JS_ToggleOptions(cx, JSOPTION_NATIVE_BRANCH_CALLBACK);
+ break;
+
+ case 'c':
+ /* set stack chunk size */
+ gStackChunkSize = atoi(argv[++i]);
+ break;
+
+ case 'f':
+ if (++i == argc)
+ return usage();
+
+ Process(cx, obj, argv[i], JS_FALSE);
+
+ /*
+ * XXX: js -f foo.js should interpret foo.js and then
+ * drop into interactive mode, but that breaks the test
+ * harness. Just execute foo.js for now.
+ */
+ isInteractive = JS_FALSE;
+ break;
+
+ case 'e':
+ {
+ jsval rval;
+
+ if (++i == argc)
+ return usage();
+
+ /* Pass a filename of -e to imitate PERL */
+ JS_EvaluateScript(cx, obj, argv[i], strlen(argv[i]),
+ "-e", 1, &rval);
+
+ isInteractive = JS_FALSE;
+ break;
+
+ }
+ case 'C':
+ compileOnly = JS_TRUE;
+ isInteractive = JS_FALSE;
+ break;
+
+ case 'i':
+ isInteractive = forceTTY = JS_TRUE;
+ break;
+
+ case 'S':
+ if (++i == argc)
+ return usage();
+
+ /* Set maximum stack size. */
+ gMaxStackSize = atoi(argv[i]);
+ break;
+
+ case 'z':
+ obj = split_setup(cx);
+ break;
+
+ default:
+ return usage();
+ }
+ }
+
+ if (filename || isInteractive)
+ Process(cx, obj, filename, forceTTY);
+ return gExitCode;
+}
+
+
+static JSBool
+Version(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (argc > 0 && JSVAL_IS_INT(argv[0]))
+ *rval = INT_TO_JSVAL(JS_SetVersion(cx, (JSVersion) JSVAL_TO_INT(argv[0])));
+ else
+ *rval = INT_TO_JSVAL(JS_GetVersion(cx));
+ return JS_TRUE;
+}
+
+static struct {
+ const char *name;
+ uint32 flag;
+} js_options[] = {
+ {"strict", JSOPTION_STRICT},
+ {"werror", JSOPTION_WERROR},
+ {"atline", JSOPTION_ATLINE},
+ {"xml", JSOPTION_XML},
+ {0, 0}
+};
+
+static JSBool
+Options(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ uint32 optset, flag;
+ uintN i, j, found;
+ JSString *str;
+ const char *opt;
+ char *names;
+
+ optset = 0;
+ for (i = 0; i < argc; i++) {
+ str = JS_ValueToString(cx, argv[i]);
+ if (!str)
+ return JS_FALSE;
+ opt = JS_GetStringBytes(str);
+ for (j = 0; js_options[j].name; j++) {
+ if (strcmp(js_options[j].name, opt) == 0) {
+ optset |= js_options[j].flag;
+ break;
+ }
+ }
+ }
+ optset = JS_ToggleOptions(cx, optset);
+
+ names = NULL;
+ found = 0;
+ while (optset != 0) {
+ flag = optset;
+ optset &= optset - 1;
+ flag &= ~optset;
+ for (j = 0; js_options[j].name; j++) {
+ if (js_options[j].flag == flag) {
+ names = JS_sprintf_append(names, "%s%s",
+ names ? "," : "", js_options[j].name);
+ found++;
+ break;
+ }
+ }
+ }
+ if (!found)
+ names = strdup("");
+ if (!names) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ str = JS_NewString(cx, names, strlen(names));
+ if (!str) {
+ free(names);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+Load(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ uintN i;
+ JSString *str;
+ const char *filename;
+ JSScript *script;
+ JSBool ok;
+ jsval result;
+ uint32 oldopts;
+
+ for (i = 0; i < argc; i++) {
+ str = JS_ValueToString(cx, argv[i]);
+ if (!str)
+ return JS_FALSE;
+ argv[i] = STRING_TO_JSVAL(str);
+ filename = JS_GetStringBytes(str);
+ errno = 0;
+ oldopts = JS_GetOptions(cx);
+ JS_SetOptions(cx, oldopts | JSOPTION_COMPILE_N_GO);
+ script = JS_CompileFile(cx, obj, filename);
+ if (!script) {
+ ok = JS_FALSE;
+ } else {
+ ok = !compileOnly
+ ? JS_ExecuteScript(cx, obj, script, &result)
+ : JS_TRUE;
+ JS_DestroyScript(cx, script);
+ }
+ JS_SetOptions(cx, oldopts);
+ if (!ok)
+ return JS_FALSE;
+ }
+
+ return JS_TRUE;
+}
+
+/*
+ * function readline()
+ * Provides a hook for scripts to read a line from stdin.
+ */
+static JSBool
+ReadLine(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+#define BUFSIZE 256
+ FILE *from;
+ char *buf, *tmp;
+ size_t bufsize, buflength, gotlength;
+ JSString *str;
+
+ from = stdin;
+ buflength = 0;
+ bufsize = BUFSIZE;
+ buf = JS_malloc(cx, bufsize);
+ if (!buf)
+ return JS_FALSE;
+
+ while ((gotlength =
+ js_fgets(buf + buflength, bufsize - buflength, from)) > 0) {
+ buflength += gotlength;
+
+ /* Are we done? */
+ if (buf[buflength - 1] == '\n') {
+ buf[buflength - 1] = '\0';
+ break;
+ }
+
+ /* Else, grow our buffer for another pass. */
+ tmp = JS_realloc(cx, buf, bufsize * 2);
+ if (!tmp) {
+ JS_free(cx, buf);
+ return JS_FALSE;
+ }
+
+ bufsize *= 2;
+ buf = tmp;
+ }
+
+ /* Treat the empty string specially. */
+ if (buflength == 0) {
+ *rval = JS_GetEmptyStringValue(cx);
+ JS_free(cx, buf);
+ return JS_TRUE;
+ }
+
+ /* Shrink the buffer to the real size. */
+ tmp = JS_realloc(cx, buf, buflength);
+ if (!tmp) {
+ JS_free(cx, buf);
+ return JS_FALSE;
+ }
+
+ buf = tmp;
+
+ /*
+ * Turn buf into a JSString. Note that buflength includes the trailing null
+ * character.
+ */
+ str = JS_NewString(cx, buf, buflength - 1);
+ if (!str) {
+ JS_free(cx, buf);
+ return JS_FALSE;
+ }
+
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+Print(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ uintN i, n;
+ JSString *str;
+
+ for (i = n = 0; i < argc; i++) {
+ str = JS_ValueToString(cx, argv[i]);
+ if (!str)
+ return JS_FALSE;
+ fprintf(gOutFile, "%s%s", i ? " " : "", JS_GetStringBytes(str));
+ }
+ n++;
+ if (n)
+ fputc('\n', gOutFile);
+ return JS_TRUE;
+}
+
+static JSBool
+Help(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval);
+
+static JSBool
+Quit(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+#ifdef LIVECONNECT
+ JSJ_SimpleShutdown();
+#endif
+
+ JS_ConvertArguments(cx, argc, argv,"/ i", &gExitCode);
+
+ gQuitting = JS_TRUE;
+ return JS_FALSE;
+}
+
+static JSBool
+GC(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSRuntime *rt;
+ uint32 preBytes;
+
+ rt = cx->runtime;
+ preBytes = rt->gcBytes;
+#ifdef GC_MARK_DEBUG
+ if (argc && JSVAL_IS_STRING(argv[0])) {
+ char *name = JS_GetStringBytes(JSVAL_TO_STRING(argv[0]));
+ FILE *file = fopen(name, "w");
+ if (!file) {
+ fprintf(gErrFile, "gc: can't open %s: %s\n", strerror(errno));
+ return JS_FALSE;
+ }
+ js_DumpGCHeap = file;
+ } else {
+ js_DumpGCHeap = stdout;
+ }
+#endif
+ JS_GC(cx);
+#ifdef GC_MARK_DEBUG
+ if (js_DumpGCHeap != stdout)
+ fclose(js_DumpGCHeap);
+ js_DumpGCHeap = NULL;
+#endif
+ fprintf(gOutFile, "before %lu, after %lu, break %08lx\n",
+ (unsigned long)preBytes, (unsigned long)rt->gcBytes,
+#ifdef XP_UNIX
+ (unsigned long)sbrk(0)
+#else
+ 0
+#endif
+ );
+#ifdef JS_GCMETER
+ js_DumpGCStats(rt, stdout);
+#endif
+ return JS_TRUE;
+}
+
+static JSScript *
+ValueToScript(JSContext *cx, jsval v)
+{
+ JSScript *script;
+ JSFunction *fun;
+
+ if (!JSVAL_IS_PRIMITIVE(v) &&
+ JS_GET_CLASS(cx, JSVAL_TO_OBJECT(v)) == &js_ScriptClass) {
+ script = (JSScript *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ } else {
+ fun = JS_ValueToFunction(cx, v);
+ if (!fun)
+ return NULL;
+ script = FUN_SCRIPT(fun);
+ }
+ return script;
+}
+
+static JSBool
+GetTrapArgs(JSContext *cx, uintN argc, jsval *argv, JSScript **scriptp,
+ int32 *ip)
+{
+ jsval v;
+ uintN intarg;
+ JSScript *script;
+
+ *scriptp = cx->fp->down->script;
+ *ip = 0;
+ if (argc != 0) {
+ v = argv[0];
+ intarg = 0;
+ if (!JSVAL_IS_PRIMITIVE(v) &&
+ (JS_GET_CLASS(cx, JSVAL_TO_OBJECT(v)) == &js_FunctionClass ||
+ JS_GET_CLASS(cx, JSVAL_TO_OBJECT(v)) == &js_ScriptClass)) {
+ script = ValueToScript(cx, v);
+ if (!script)
+ return JS_FALSE;
+ *scriptp = script;
+ intarg++;
+ }
+ if (argc > intarg) {
+ if (!JS_ValueToInt32(cx, argv[intarg], ip))
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSTrapStatus
+TrapHandler(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval,
+ void *closure)
+{
+ JSString *str;
+ JSStackFrame *caller;
+
+ str = (JSString *) closure;
+ caller = JS_GetScriptedCaller(cx, NULL);
+ if (!JS_EvaluateScript(cx, caller->scopeChain,
+ JS_GetStringBytes(str), JS_GetStringLength(str),
+ caller->script->filename, caller->script->lineno,
+ rval)) {
+ return JSTRAP_ERROR;
+ }
+ if (*rval != JSVAL_VOID)
+ return JSTRAP_RETURN;
+ return JSTRAP_CONTINUE;
+}
+
+static JSBool
+Trap(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ JSScript *script;
+ int32 i;
+
+ if (argc == 0) {
+ JS_ReportErrorNumber(cx, my_GetErrorMessage, NULL, JSSMSG_TRAP_USAGE);
+ return JS_FALSE;
+ }
+ argc--;
+ str = JS_ValueToString(cx, argv[argc]);
+ if (!str)
+ return JS_FALSE;
+ argv[argc] = STRING_TO_JSVAL(str);
+ if (!GetTrapArgs(cx, argc, argv, &script, &i))
+ return JS_FALSE;
+ return JS_SetTrap(cx, script, script->code + i, TrapHandler, str);
+}
+
+static JSBool
+Untrap(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSScript *script;
+ int32 i;
+
+ if (!GetTrapArgs(cx, argc, argv, &script, &i))
+ return JS_FALSE;
+ JS_ClearTrap(cx, script, script->code + i, NULL, NULL);
+ return JS_TRUE;
+}
+
+static JSBool
+LineToPC(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSScript *script;
+ int32 i;
+ uintN lineno;
+ jsbytecode *pc;
+
+ if (argc == 0) {
+ JS_ReportErrorNumber(cx, my_GetErrorMessage, NULL, JSSMSG_LINE2PC_USAGE);
+ return JS_FALSE;
+ }
+ script = cx->fp->down->script;
+ if (!GetTrapArgs(cx, argc, argv, &script, &i))
+ return JS_FALSE;
+ lineno = (i == 0) ? script->lineno : (uintN)i;
+ pc = JS_LineNumberToPC(cx, script, lineno);
+ if (!pc)
+ return JS_FALSE;
+ *rval = INT_TO_JSVAL(PTRDIFF(pc, script->code, jsbytecode));
+ return JS_TRUE;
+}
+
+static JSBool
+PCToLine(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSScript *script;
+ int32 i;
+ uintN lineno;
+
+ if (!GetTrapArgs(cx, argc, argv, &script, &i))
+ return JS_FALSE;
+ lineno = JS_PCToLineNumber(cx, script, script->code + i);
+ if (!lineno)
+ return JS_FALSE;
+ *rval = INT_TO_JSVAL(lineno);
+ return JS_TRUE;
+}
+
+#ifdef DEBUG
+
+static void
+GetSwitchTableBounds(JSScript *script, uintN offset,
+ uintN *start, uintN *end)
+{
+ jsbytecode *pc;
+ JSOp op;
+ ptrdiff_t jmplen;
+ jsint low, high, n;
+
+ pc = script->code + offset;
+ op = *pc;
+ switch (op) {
+ case JSOP_TABLESWITCHX:
+ jmplen = JUMPX_OFFSET_LEN;
+ goto jump_table;
+ case JSOP_TABLESWITCH:
+ jmplen = JUMP_OFFSET_LEN;
+ jump_table:
+ pc += jmplen;
+ low = GET_JUMP_OFFSET(pc);
+ pc += JUMP_OFFSET_LEN;
+ high = GET_JUMP_OFFSET(pc);
+ pc += JUMP_OFFSET_LEN;
+ n = high - low + 1;
+ break;
+
+ case JSOP_LOOKUPSWITCHX:
+ jmplen = JUMPX_OFFSET_LEN;
+ goto lookup_table;
+ default:
+ JS_ASSERT(op == JSOP_LOOKUPSWITCH);
+ jmplen = JUMP_OFFSET_LEN;
+ lookup_table:
+ pc += jmplen;
+ n = GET_ATOM_INDEX(pc);
+ pc += ATOM_INDEX_LEN;
+ jmplen += ATOM_INDEX_LEN;
+ break;
+ }
+
+ *start = (uintN)(pc - script->code);
+ *end = *start + (uintN)(n * jmplen);
+}
+
+
+/*
+ * SrcNotes assumes that SRC_METHODBASE should be distinguished from SRC_LABEL
+ * using the bytecode the source note points to.
+ */
+JS_STATIC_ASSERT(SRC_LABEL == SRC_METHODBASE);
+
+static void
+SrcNotes(JSContext *cx, JSScript *script)
+{
+ uintN offset, delta, caseOff, switchTableStart, switchTableEnd;
+ jssrcnote *notes, *sn;
+ JSSrcNoteType type;
+ const char *name;
+ JSOp op;
+ jsatomid atomIndex;
+ JSAtom *atom;
+
+ fprintf(gOutFile, "\nSource notes:\n");
+ offset = 0;
+ notes = SCRIPT_NOTES(script);
+ switchTableEnd = switchTableStart = 0;
+ for (sn = notes; !SN_IS_TERMINATOR(sn); sn = SN_NEXT(sn)) {
+ delta = SN_DELTA(sn);
+ offset += delta;
+ type = (JSSrcNoteType) SN_TYPE(sn);
+ name = js_SrcNoteSpec[type].name;
+ if (type == SRC_LABEL) {
+ /* Heavily overloaded case. */
+ if (switchTableStart <= offset && offset < switchTableEnd) {
+ name = "case";
+ } else {
+ op = script->code[offset];
+ if (op == JSOP_GETMETHOD || op == JSOP_SETMETHOD) {
+ /* This is SRC_METHODBASE which we print as SRC_PCBASE. */
+ type = SRC_PCBASE;
+ name = "methodbase";
+ } else {
+ JS_ASSERT(op == JSOP_NOP);
+ }
+ }
+ }
+ fprintf(gOutFile, "%3u: %5u [%4u] %-8s",
+ PTRDIFF(sn, notes, jssrcnote), offset, delta, name);
+ switch (type) {
+ case SRC_SETLINE:
+ fprintf(gOutFile, " lineno %u", (uintN) js_GetSrcNoteOffset(sn, 0));
+ break;
+ case SRC_FOR:
+ fprintf(gOutFile, " cond %u update %u tail %u",
+ (uintN) js_GetSrcNoteOffset(sn, 0),
+ (uintN) js_GetSrcNoteOffset(sn, 1),
+ (uintN) js_GetSrcNoteOffset(sn, 2));
+ break;
+ case SRC_IF_ELSE:
+ fprintf(gOutFile, " else %u elseif %u",
+ (uintN) js_GetSrcNoteOffset(sn, 0),
+ (uintN) js_GetSrcNoteOffset(sn, 1));
+ break;
+ case SRC_COND:
+ case SRC_WHILE:
+ case SRC_PCBASE:
+ case SRC_PCDELTA:
+ case SRC_DECL:
+ case SRC_BRACE:
+ fprintf(gOutFile, " offset %u", (uintN) js_GetSrcNoteOffset(sn, 0));
+ break;
+ case SRC_LABEL:
+ case SRC_LABELBRACE:
+ case SRC_BREAK2LABEL:
+ case SRC_CONT2LABEL:
+ case SRC_FUNCDEF: {
+ const char *bytes;
+ JSFunction *fun;
+ JSString *str;
+
+ atomIndex = (jsatomid) js_GetSrcNoteOffset(sn, 0);
+ atom = js_GetAtom(cx, &script->atomMap, atomIndex);
+ if (type != SRC_FUNCDEF) {
+ bytes = js_AtomToPrintableString(cx, atom);
+ } else {
+ fun = (JSFunction *)
+ JS_GetPrivate(cx, ATOM_TO_OBJECT(atom));
+ str = JS_DecompileFunction(cx, fun, JS_DONT_PRETTY_PRINT);
+ bytes = str ? JS_GetStringBytes(str) : "N/A";
+ }
+ fprintf(gOutFile, " atom %u (%s)", (uintN)atomIndex, bytes);
+ break;
+ }
+ case SRC_SWITCH:
+ fprintf(gOutFile, " length %u", (uintN) js_GetSrcNoteOffset(sn, 0));
+ caseOff = (uintN) js_GetSrcNoteOffset(sn, 1);
+ if (caseOff)
+ fprintf(gOutFile, " first case offset %u", caseOff);
+ GetSwitchTableBounds(script, offset,
+ &switchTableStart, &switchTableEnd);
+ break;
+ case SRC_CATCH:
+ delta = (uintN) js_GetSrcNoteOffset(sn, 0);
+ if (delta) {
+ if (script->main[offset] == JSOP_LEAVEBLOCK)
+ fprintf(gOutFile, " stack depth %u", delta);
+ else
+ fprintf(gOutFile, " guard delta %u", delta);
+ }
+ break;
+ default:;
+ }
+ fputc('\n', gOutFile);
+ }
+}
+
+static JSBool
+Notes(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ uintN i;
+ JSScript *script;
+
+ for (i = 0; i < argc; i++) {
+ script = ValueToScript(cx, argv[i]);
+ if (!script)
+ continue;
+
+ SrcNotes(cx, script);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+TryNotes(JSContext *cx, JSScript *script)
+{
+ JSTryNote *tn = script->trynotes;
+
+ if (!tn)
+ return JS_TRUE;
+ fprintf(gOutFile, "\nException table:\nstart\tend\tcatch\n");
+ while (tn->start && tn->catchStart) {
+ fprintf(gOutFile, " %d\t%d\t%d\n",
+ tn->start, tn->start + tn->length, tn->catchStart);
+ tn++;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+Disassemble(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSBool lines;
+ uintN i;
+ JSScript *script;
+
+ if (argc > 0 &&
+ JSVAL_IS_STRING(argv[0]) &&
+ !strcmp(JS_GetStringBytes(JSVAL_TO_STRING(argv[0])), "-l")) {
+ lines = JS_TRUE;
+ argv++, argc--;
+ } else {
+ lines = JS_FALSE;
+ }
+ for (i = 0; i < argc; i++) {
+ script = ValueToScript(cx, argv[i]);
+ if (!script)
+ return JS_FALSE;
+
+ if (VALUE_IS_FUNCTION(cx, argv[i])) {
+ JSFunction *fun = JS_ValueToFunction(cx, argv[i]);
+ if (fun && (fun->flags & JSFUN_FLAGS_MASK)) {
+ uint16 flags = fun->flags;
+ fputs("flags:", stdout);
+
+#define SHOW_FLAG(flag) if (flags & JSFUN_##flag) fputs(" " #flag, stdout);
+
+ SHOW_FLAG(LAMBDA);
+ SHOW_FLAG(SETTER);
+ SHOW_FLAG(GETTER);
+ SHOW_FLAG(BOUND_METHOD);
+ SHOW_FLAG(HEAVYWEIGHT);
+ SHOW_FLAG(THISP_STRING);
+ SHOW_FLAG(THISP_NUMBER);
+ SHOW_FLAG(THISP_BOOLEAN);
+ SHOW_FLAG(INTERPRETED);
+
+#undef SHOW_FLAG
+ putchar('\n');
+ }
+ }
+
+ if (!js_Disassemble(cx, script, lines, stdout))
+ return JS_FALSE;
+ SrcNotes(cx, script);
+ TryNotes(cx, script);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+DisassWithSrc(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+#define LINE_BUF_LEN 512
+ uintN i, len, line1, line2, bupline;
+ JSScript *script;
+ FILE *file;
+ char linebuf[LINE_BUF_LEN];
+ jsbytecode *pc, *end;
+ static char sep[] = ";-------------------------";
+
+ for (i = 0; i < argc; i++) {
+ script = ValueToScript(cx, argv[i]);
+ if (!script)
+ return JS_FALSE;
+
+ if (!script || !script->filename) {
+ JS_ReportErrorNumber(cx, my_GetErrorMessage, NULL,
+ JSSMSG_FILE_SCRIPTS_ONLY);
+ return JS_FALSE;
+ }
+
+ file = fopen(script->filename, "r");
+ if (!file) {
+ JS_ReportErrorNumber(cx, my_GetErrorMessage, NULL,
+ JSSMSG_CANT_OPEN,
+ script->filename, strerror(errno));
+ return JS_FALSE;
+ }
+
+ pc = script->code;
+ end = pc + script->length;
+
+ /* burn the leading lines */
+ line2 = JS_PCToLineNumber(cx, script, pc);
+ for (line1 = 0; line1 < line2 - 1; line1++)
+ fgets(linebuf, LINE_BUF_LEN, file);
+
+ bupline = 0;
+ while (pc < end) {
+ line2 = JS_PCToLineNumber(cx, script, pc);
+
+ if (line2 < line1) {
+ if (bupline != line2) {
+ bupline = line2;
+ fprintf(gOutFile, "%s %3u: BACKUP\n", sep, line2);
+ }
+ } else {
+ if (bupline && line1 == line2)
+ fprintf(gOutFile, "%s %3u: RESTORE\n", sep, line2);
+ bupline = 0;
+ while (line1 < line2) {
+ if (!fgets(linebuf, LINE_BUF_LEN, file)) {
+ JS_ReportErrorNumber(cx, my_GetErrorMessage, NULL,
+ JSSMSG_UNEXPECTED_EOF,
+ script->filename);
+ goto bail;
+ }
+ line1++;
+ fprintf(gOutFile, "%s %3u: %s", sep, line1, linebuf);
+ }
+ }
+
+ len = js_Disassemble1(cx, script, pc,
+ PTRDIFF(pc, script->code, jsbytecode),
+ JS_TRUE, stdout);
+ if (!len)
+ return JS_FALSE;
+ pc += len;
+ }
+
+ bail:
+ fclose(file);
+ }
+ return JS_TRUE;
+#undef LINE_BUF_LEN
+}
+
+static JSBool
+Tracing(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSBool bval;
+ JSString *str;
+
+ if (argc == 0) {
+ *rval = BOOLEAN_TO_JSVAL(cx->tracefp != 0);
+ return JS_TRUE;
+ }
+
+ switch (JS_TypeOfValue(cx, argv[0])) {
+ case JSTYPE_NUMBER:
+ bval = JSVAL_IS_INT(argv[0])
+ ? JSVAL_TO_INT(argv[0])
+ : (jsint) *JSVAL_TO_DOUBLE(argv[0]);
+ break;
+ case JSTYPE_BOOLEAN:
+ bval = JSVAL_TO_BOOLEAN(argv[0]);
+ break;
+ default:
+ str = JS_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ fprintf(gErrFile, "tracing: illegal argument %s\n",
+ JS_GetStringBytes(str));
+ return JS_TRUE;
+ }
+ cx->tracefp = bval ? stderr : NULL;
+ return JS_TRUE;
+}
+
+typedef struct DumpAtomArgs {
+ JSContext *cx;
+ FILE *fp;
+} DumpAtomArgs;
+
+static int
+DumpAtom(JSHashEntry *he, int i, void *arg)
+{
+ DumpAtomArgs *args = (DumpAtomArgs *)arg;
+ FILE *fp = args->fp;
+ JSAtom *atom = (JSAtom *)he;
+
+ fprintf(fp, "%3d %08x %5lu ",
+ i, (uintN)he->keyHash, (unsigned long)atom->number);
+ if (ATOM_IS_STRING(atom))
+ fprintf(fp, "\"%s\"\n", js_AtomToPrintableString(args->cx, atom));
+ else if (ATOM_IS_INT(atom))
+ fprintf(fp, "%ld\n", (long)ATOM_TO_INT(atom));
+ else
+ fprintf(fp, "%.16g\n", *ATOM_TO_DOUBLE(atom));
+ return HT_ENUMERATE_NEXT;
+}
+
+static void
+DumpScope(JSContext *cx, JSObject *obj, FILE *fp)
+{
+ uintN i;
+ JSScope *scope;
+ JSScopeProperty *sprop;
+
+ i = 0;
+ scope = OBJ_SCOPE(obj);
+ for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
+ if (SCOPE_HAD_MIDDLE_DELETE(scope) && !SCOPE_HAS_PROPERTY(scope, sprop))
+ continue;
+ fprintf(fp, "%3u %p", i, (void *)sprop);
+ if (JSID_IS_INT(sprop->id)) {
+ fprintf(fp, " [%ld]", (long)JSVAL_TO_INT(sprop->id));
+ } else if (JSID_IS_ATOM(sprop->id)) {
+ JSAtom *atom = JSID_TO_ATOM(sprop->id);
+ fprintf(fp, " \"%s\"", js_AtomToPrintableString(cx, atom));
+ } else {
+ jsval v = OBJECT_TO_JSVAL(JSID_TO_OBJECT(sprop->id));
+ fprintf(fp, " \"%s\"", js_ValueToPrintableString(cx, v));
+ }
+
+#define DUMP_ATTR(name) if (sprop->attrs & JSPROP_##name) fputs(" " #name, fp)
+ DUMP_ATTR(ENUMERATE);
+ DUMP_ATTR(READONLY);
+ DUMP_ATTR(PERMANENT);
+ DUMP_ATTR(EXPORTED);
+ DUMP_ATTR(GETTER);
+ DUMP_ATTR(SETTER);
+#undef DUMP_ATTR
+
+ fprintf(fp, " slot %lu flags %x shortid %d\n",
+ (unsigned long)sprop->slot, sprop->flags, sprop->shortid);
+ }
+}
+
+static JSBool
+DumpStats(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ uintN i;
+ JSString *str;
+ const char *bytes;
+ JSAtom *atom;
+ JSObject *obj2;
+ JSProperty *prop;
+ jsval value;
+
+ for (i = 0; i < argc; i++) {
+ str = JS_ValueToString(cx, argv[i]);
+ if (!str)
+ return JS_FALSE;
+ bytes = JS_GetStringBytes(str);
+ if (strcmp(bytes, "arena") == 0) {
+#ifdef JS_ARENAMETER
+ JS_DumpArenaStats(stdout);
+#endif
+ } else if (strcmp(bytes, "atom") == 0) {
+ DumpAtomArgs args;
+
+ fprintf(gOutFile, "\natom table contents:\n");
+ args.cx = cx;
+ args.fp = stdout;
+ JS_HashTableEnumerateEntries(cx->runtime->atomState.table,
+ DumpAtom,
+ &args);
+#ifdef HASHMETER
+ JS_HashTableDumpMeter(cx->runtime->atomState.table,
+ DumpAtom,
+ stdout);
+#endif
+ } else if (strcmp(bytes, "global") == 0) {
+ DumpScope(cx, cx->globalObject, stdout);
+ } else {
+ atom = js_Atomize(cx, bytes, JS_GetStringLength(str), 0);
+ if (!atom)
+ return JS_FALSE;
+ if (!js_FindProperty(cx, ATOM_TO_JSID(atom), &obj, &obj2, &prop))
+ return JS_FALSE;
+ if (prop) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ if (!OBJ_GET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &value))
+ return JS_FALSE;
+ }
+ if (!prop || !JSVAL_IS_OBJECT(value)) {
+ fprintf(gErrFile, "js: invalid stats argument %s\n",
+ bytes);
+ continue;
+ }
+ obj = JSVAL_TO_OBJECT(value);
+ if (obj)
+ DumpScope(cx, obj, stdout);
+ }
+ }
+ return JS_TRUE;
+}
+
+#endif /* DEBUG */
+
+#ifdef TEST_EXPORT
+static JSBool
+DoExport(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSAtom *atom;
+ JSObject *obj2;
+ JSProperty *prop;
+ JSBool ok;
+ uintN attrs;
+
+ if (argc != 2) {
+ JS_ReportErrorNumber(cx, my_GetErrorMessage, NULL, JSSMSG_DOEXP_USAGE);
+ return JS_FALSE;
+ }
+ if (!JS_ValueToObject(cx, argv[0], &obj))
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(obj);
+ atom = js_ValueToStringAtom(cx, argv[1]);
+ if (!atom)
+ return JS_FALSE;
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &obj2, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ ok = OBJ_DEFINE_PROPERTY(cx, obj, id, JSVAL_VOID, NULL, NULL,
+ JSPROP_EXPORTED, NULL);
+ } else {
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, ATOM_TO_JSID(atom), prop, &attrs);
+ if (ok) {
+ attrs |= JSPROP_EXPORTED;
+ ok = OBJ_SET_ATTRIBUTES(cx, obj, ATOM_TO_JSID(atom), prop, &attrs);
+ }
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ }
+ return ok;
+}
+#endif
+
+#ifdef TEST_CVTARGS
+#include <ctype.h>
+
+static const char *
+EscapeWideString(jschar *w)
+{
+ static char enuf[80];
+ static char hex[] = "0123456789abcdef";
+ jschar u;
+ unsigned char b, c;
+ int i, j;
+
+ if (!w)
+ return "";
+ for (i = j = 0; i < sizeof enuf - 1; i++, j++) {
+ u = w[j];
+ if (u == 0)
+ break;
+ b = (unsigned char)(u >> 8);
+ c = (unsigned char)(u);
+ if (b) {
+ if (i >= sizeof enuf - 6)
+ break;
+ enuf[i++] = '\\';
+ enuf[i++] = 'u';
+ enuf[i++] = hex[b >> 4];
+ enuf[i++] = hex[b & 15];
+ enuf[i++] = hex[c >> 4];
+ enuf[i] = hex[c & 15];
+ } else if (!isprint(c)) {
+ if (i >= sizeof enuf - 4)
+ break;
+ enuf[i++] = '\\';
+ enuf[i++] = 'x';
+ enuf[i++] = hex[c >> 4];
+ enuf[i] = hex[c & 15];
+ } else {
+ enuf[i] = (char)c;
+ }
+ }
+ enuf[i] = 0;
+ return enuf;
+}
+
+#include <stdarg.h>
+
+static JSBool
+ZZ_formatter(JSContext *cx, const char *format, JSBool fromJS, jsval **vpp,
+ va_list *app)
+{
+ jsval *vp;
+ va_list ap;
+ jsdouble re, im;
+
+ printf("entering ZZ_formatter");
+ vp = *vpp;
+ ap = *app;
+ if (fromJS) {
+ if (!JS_ValueToNumber(cx, vp[0], &re))
+ return JS_FALSE;
+ if (!JS_ValueToNumber(cx, vp[1], &im))
+ return JS_FALSE;
+ *va_arg(ap, jsdouble *) = re;
+ *va_arg(ap, jsdouble *) = im;
+ } else {
+ re = va_arg(ap, jsdouble);
+ im = va_arg(ap, jsdouble);
+ if (!JS_NewNumberValue(cx, re, &vp[0]))
+ return JS_FALSE;
+ if (!JS_NewNumberValue(cx, im, &vp[1]))
+ return JS_FALSE;
+ }
+ *vpp = vp + 2;
+ *app = ap;
+ printf("leaving ZZ_formatter");
+ return JS_TRUE;
+}
+
+static JSBool
+ConvertArgs(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSBool b = JS_FALSE;
+ jschar c = 0;
+ int32 i = 0, j = 0;
+ uint32 u = 0;
+ jsdouble d = 0, I = 0, re = 0, im = 0;
+ char *s = NULL;
+ JSString *str = NULL;
+ jschar *w = NULL;
+ JSObject *obj2 = NULL;
+ JSFunction *fun = NULL;
+ jsval v = JSVAL_VOID;
+ JSBool ok;
+
+ if (!JS_AddArgumentFormatter(cx, "ZZ", ZZ_formatter))
+ return JS_FALSE;;
+ ok = JS_ConvertArguments(cx, argc, argv, "b/ciujdIsSWofvZZ*",
+ &b, &c, &i, &u, &j, &d, &I, &s, &str, &w, &obj2,
+ &fun, &v, &re, &im);
+ JS_RemoveArgumentFormatter(cx, "ZZ");
+ if (!ok)
+ return JS_FALSE;
+ fprintf(gOutFile,
+ "b %u, c %x (%c), i %ld, u %lu, j %ld\n",
+ b, c, (char)c, i, u, j);
+ fprintf(gOutFile,
+ "d %g, I %g, s %s, S %s, W %s, obj %s, fun %s\n"
+ "v %s, re %g, im %g\n",
+ d, I, s, str ? JS_GetStringBytes(str) : "", EscapeWideString(w),
+ JS_GetStringBytes(JS_ValueToString(cx, OBJECT_TO_JSVAL(obj2))),
+ fun ? JS_GetStringBytes(JS_DecompileFunction(cx, fun, 4)) : "",
+ JS_GetStringBytes(JS_ValueToString(cx, v)), re, im);
+ return JS_TRUE;
+}
+#endif
+
+static JSBool
+BuildDate(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ char version[20] = "\n";
+#if JS_VERSION < 150
+ sprintf(version, " for version %d\n", JS_VERSION);
+#endif
+ fprintf(gOutFile, "built on %s at %s%s", __DATE__, __TIME__, version);
+ return JS_TRUE;
+}
+
+static JSBool
+Clear(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (argc != 0 && !JS_ValueToObject(cx, argv[0], &obj))
+ return JS_FALSE;
+ JS_ClearScope(cx, obj);
+ return JS_TRUE;
+}
+
+static JSBool
+Intern(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+
+ str = JS_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ if (!JS_InternUCStringN(cx, JS_GetStringChars(str),
+ JS_GetStringLength(str))) {
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+Clone(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFunction *fun;
+ JSObject *funobj, *parent, *clone;
+
+ fun = JS_ValueToFunction(cx, argv[0]);
+ if (!fun)
+ return JS_FALSE;
+ funobj = JS_GetFunctionObject(fun);
+ if (argc > 1) {
+ if (!JS_ValueToObject(cx, argv[1], &parent))
+ return JS_FALSE;
+ } else {
+ parent = JS_GetParent(cx, funobj);
+ }
+ clone = JS_CloneFunctionObject(cx, funobj, parent);
+ if (!clone)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(clone);
+ return JS_TRUE;
+}
+
+static JSBool
+Seal(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSObject *target;
+ JSBool deep = JS_FALSE;
+
+ if (!JS_ConvertArguments(cx, argc, argv, "o/b", &target, &deep))
+ return JS_FALSE;
+ if (!target)
+ return JS_TRUE;
+ return JS_SealObject(cx, target, deep);
+}
+
+static JSBool
+GetPDA(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSObject *vobj, *aobj, *pdobj;
+ JSBool ok;
+ JSPropertyDescArray pda;
+ JSPropertyDesc *pd;
+ uint32 i;
+ jsval v;
+
+ if (!JS_ValueToObject(cx, argv[0], &vobj))
+ return JS_FALSE;
+ if (!vobj)
+ return JS_TRUE;
+
+ aobj = JS_NewArrayObject(cx, 0, NULL);
+ if (!aobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(aobj);
+
+ ok = JS_GetPropertyDescArray(cx, vobj, &pda);
+ if (!ok)
+ return JS_FALSE;
+ pd = pda.array;
+ for (i = 0; i < pda.length; i++) {
+ pdobj = JS_NewObject(cx, NULL, NULL, NULL);
+ if (!pdobj) {
+ ok = JS_FALSE;
+ break;
+ }
+
+ ok = JS_SetProperty(cx, pdobj, "id", &pd->id) &&
+ JS_SetProperty(cx, pdobj, "value", &pd->value) &&
+ (v = INT_TO_JSVAL(pd->flags),
+ JS_SetProperty(cx, pdobj, "flags", &v)) &&
+ (v = INT_TO_JSVAL(pd->slot),
+ JS_SetProperty(cx, pdobj, "slot", &v)) &&
+ JS_SetProperty(cx, pdobj, "alias", &pd->alias);
+ if (!ok)
+ break;
+
+ v = OBJECT_TO_JSVAL(pdobj);
+ ok = JS_SetElement(cx, aobj, i, &v);
+ if (!ok)
+ break;
+ }
+ JS_PutPropertyDescArray(cx, &pda);
+ return ok;
+}
+
+static JSBool
+GetSLX(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSScript *script;
+
+ script = ValueToScript(cx, argv[0]);
+ if (!script)
+ return JS_FALSE;
+ *rval = INT_TO_JSVAL(js_GetScriptLineExtent(script));
+ return JS_TRUE;
+}
+
+static JSBool
+ToInt32(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ int32 i;
+
+ if (!JS_ValueToInt32(cx, argv[0], &i))
+ return JS_FALSE;
+ return JS_NewNumberValue(cx, i, rval);
+}
+
+static JSBool
+StringsAreUtf8(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ *rval = JS_CStringsAreUTF8() ? JSVAL_TRUE : JSVAL_FALSE;
+ return JS_TRUE;
+}
+
+static const char* badUtf8 = "...\xC0...";
+static const char* bigUtf8 = "...\xFB\xBF\xBF\xBF\xBF...";
+static const jschar badSurrogate[] = { 'A', 'B', 'C', 0xDEEE, 'D', 'E', 0 };
+
+static JSBool
+TestUtf8(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ intN mode = 1;
+ jschar chars[20];
+ size_t charsLength = 5;
+ char bytes[20];
+ size_t bytesLength = 20;
+ if (argc && !JS_ValueToInt32(cx, *argv, &mode))
+ return JS_FALSE;
+
+ /* The following throw errors if compiled with UTF-8. */
+ switch (mode) {
+ /* mode 1: malformed UTF-8 string. */
+ case 1:
+ JS_NewStringCopyZ(cx, badUtf8);
+ break;
+ /* mode 2: big UTF-8 character. */
+ case 2:
+ JS_NewStringCopyZ(cx, bigUtf8);
+ break;
+ /* mode 3: bad surrogate character. */
+ case 3:
+ JS_EncodeCharacters(cx, badSurrogate, 6, bytes, &bytesLength);
+ break;
+ /* mode 4: use a too small buffer. */
+ case 4:
+ JS_DecodeBytes(cx, "1234567890", 10, chars, &charsLength);
+ break;
+ default:
+ JS_ReportError(cx, "invalid mode parameter");
+ return JS_FALSE;
+ }
+ return !JS_IsExceptionPending (cx);
+}
+
+static JSBool
+ThrowError(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JS_ReportError(cx, "This is an error");
+ return JS_FALSE;
+}
+
+#define LAZY_STANDARD_CLASSES
+
+/* A class for easily testing the inner/outer object callbacks. */
+typedef struct ComplexObject {
+ JSBool isInner;
+ JSObject *inner;
+ JSObject *outer;
+} ComplexObject;
+
+static JSObject *
+split_create_outer(JSContext *cx);
+
+static JSObject *
+split_create_inner(JSContext *cx, JSObject *outer);
+
+static ComplexObject *
+split_get_private(JSContext *cx, JSObject *obj);
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+split_addProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ ComplexObject *cpx;
+ jsid asId;
+
+ cpx = split_get_private(cx, obj);
+ if (!cpx)
+ return JS_TRUE;
+ if (!cpx->isInner && cpx->inner) {
+ /* Make sure to define this property on the inner object. */
+ if (!JS_ValueToId(cx, *vp, &asId))
+ return JS_FALSE;
+ return OBJ_DEFINE_PROPERTY(cx, cpx->inner, asId, *vp, NULL, NULL,
+ JSPROP_ENUMERATE, NULL);
+ }
+ return JS_TRUE;
+}
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+split_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ ComplexObject *cpx;
+
+ cpx = split_get_private(cx, obj);
+ if (!cpx)
+ return JS_TRUE;
+ if (!cpx->isInner && cpx->inner) {
+ if (JSVAL_IS_STRING(id)) {
+ JSString *str;
+
+ str = JSVAL_TO_STRING(id);
+ return JS_GetUCProperty(cx, cpx->inner, JS_GetStringChars(str),
+ JS_GetStringLength(str), vp);
+ }
+ if (JSVAL_IS_INT(id))
+ return JS_GetElement(cx, cpx->inner, JSVAL_TO_INT(id), vp);
+ return JS_TRUE;
+ }
+
+ return JS_TRUE;
+}
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+split_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ ComplexObject *cpx;
+
+ cpx = split_get_private(cx, obj);
+ if (!cpx)
+ return JS_TRUE;
+ if (!cpx->isInner && cpx->inner) {
+ if (JSVAL_IS_STRING(id)) {
+ JSString *str;
+
+ str = JSVAL_TO_STRING(id);
+ return JS_SetUCProperty(cx, cpx->inner, JS_GetStringChars(str),
+ JS_GetStringLength(str), vp);
+ }
+ if (JSVAL_IS_INT(id))
+ return JS_SetElement(cx, cpx->inner, JSVAL_TO_INT(id), vp);
+ return JS_TRUE;
+ }
+
+ return JS_TRUE;
+}
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+split_delProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ ComplexObject *cpx;
+ jsid asId;
+
+ cpx = split_get_private(cx, obj);
+ if (!cpx)
+ return JS_TRUE;
+ if (!cpx->isInner && cpx->inner) {
+ /* Make sure to define this property on the inner object. */
+ if (!JS_ValueToId(cx, *vp, &asId))
+ return JS_FALSE;
+ return OBJ_DELETE_PROPERTY(cx, cpx->inner, asId, vp);
+ }
+ return JS_TRUE;
+}
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+split_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
+ jsval *statep, jsid *idp)
+{
+ ComplexObject *cpx;
+ JSObject *iterator;
+
+ switch (enum_op) {
+ case JSENUMERATE_INIT:
+ cpx = JS_GetPrivate(cx, obj);
+
+ if (!cpx->isInner && cpx->inner)
+ obj = cpx->inner;
+
+ iterator = JS_NewPropertyIterator(cx, obj);
+ if (!iterator)
+ return JS_FALSE;
+
+ *statep = OBJECT_TO_JSVAL(iterator);
+ if (idp)
+ *idp = JSVAL_ZERO;
+ break;
+
+ case JSENUMERATE_NEXT:
+ iterator = (JSObject*)JSVAL_TO_OBJECT(*statep);
+ if (!JS_NextProperty(cx, iterator, idp))
+ return JS_FALSE;
+
+ if (*idp != JSVAL_VOID)
+ break;
+ /* Fall through. */
+
+ case JSENUMERATE_DESTROY:
+ /* Let GC at our iterator object. */
+ *statep = JSVAL_NULL;
+ break;
+ }
+
+ return JS_TRUE;
+}
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+split_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ ComplexObject *cpx;
+
+ cpx = split_get_private(cx, obj);
+ if (!cpx)
+ return JS_TRUE;
+ if (!cpx->isInner && cpx->inner) {
+ jsid asId;
+ JSProperty *prop;
+
+ if (!JS_ValueToId(cx, id, &asId))
+ return JS_FALSE;
+
+ if (!OBJ_LOOKUP_PROPERTY(cx, cpx->inner, asId, objp, &prop))
+ return JS_FALSE;
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, cpx->inner, prop);
+
+ return JS_TRUE;
+ }
+
+#ifdef LAZY_STANDARD_CLASSES
+ if (!(flags & JSRESOLVE_ASSIGNING)) {
+ JSBool resolved;
+
+ if (!JS_ResolveStandardClass(cx, obj, id, &resolved))
+ return JS_FALSE;
+
+ if (resolved) {
+ *objp = obj;
+ return JS_TRUE;
+ }
+ }
+#endif
+
+ /* XXX For additional realism, let's resolve some random property here. */
+ return JS_TRUE;
+}
+
+JS_STATIC_DLL_CALLBACK(void)
+split_finalize(JSContext *cx, JSObject *obj)
+{
+ JS_free(cx, JS_GetPrivate(cx, obj));
+}
+
+JS_STATIC_DLL_CALLBACK(uint32)
+split_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ ComplexObject *cpx;
+
+ cpx = JS_GetPrivate(cx, obj);
+
+ if (!cpx->isInner && cpx->inner) {
+ /* Mark the inner object. */
+ JS_MarkGCThing(cx, cpx->inner, "ComplexObject.inner", arg);
+ }
+
+ return 0;
+}
+
+JS_STATIC_DLL_CALLBACK(JSObject *)
+split_outerObject(JSContext *cx, JSObject *obj)
+{
+ ComplexObject *cpx;
+
+ cpx = JS_GetPrivate(cx, obj);
+ return cpx->isInner ? cpx->outer : obj;
+}
+
+JS_STATIC_DLL_CALLBACK(JSObject *)
+split_innerObject(JSContext *cx, JSObject *obj)
+{
+ ComplexObject *cpx;
+
+ cpx = JS_GetPrivate(cx, obj);
+ return !cpx->isInner ? cpx->inner : obj;
+}
+
+static JSExtendedClass split_global_class = {
+ {"split_global",
+ JSCLASS_NEW_RESOLVE | JSCLASS_HAS_PRIVATE | JSCLASS_IS_EXTENDED,
+ split_addProperty, split_delProperty,
+ split_getProperty, split_setProperty,
+ (JSEnumerateOp)split_enumerate,
+ (JSResolveOp)split_resolve,
+ JS_ConvertStub, split_finalize,
+ NULL, NULL, NULL, NULL, NULL, NULL,
+ split_mark, NULL},
+ NULL, split_outerObject, split_innerObject,
+ NULL, NULL, NULL, NULL, NULL
+};
+
+JSObject *
+split_create_outer(JSContext *cx)
+{
+ ComplexObject *cpx;
+ JSObject *obj;
+
+ cpx = JS_malloc(cx, sizeof *obj);
+ if (!cpx)
+ return NULL;
+ cpx->outer = NULL;
+ cpx->inner = NULL;
+ cpx->isInner = JS_FALSE;
+
+ obj = JS_NewObject(cx, &split_global_class.base, NULL, NULL);
+ if (!obj) {
+ JS_free(cx, cpx);
+ return NULL;
+ }
+
+ JS_ASSERT(!JS_GetParent(cx, obj));
+ if (!JS_SetPrivate(cx, obj, cpx)) {
+ JS_free(cx, cpx);
+ return NULL;
+ }
+
+ return obj;
+}
+
+static JSObject *
+split_create_inner(JSContext *cx, JSObject *outer)
+{
+ ComplexObject *cpx, *outercpx;
+ JSObject *obj;
+
+ JS_ASSERT(JS_GET_CLASS(cx, outer) == &split_global_class.base);
+
+ cpx = JS_malloc(cx, sizeof *cpx);
+ if (!cpx)
+ return NULL;
+ cpx->outer = outer;
+ cpx->inner = NULL;
+ cpx->isInner = JS_TRUE;
+
+ obj = JS_NewObject(cx, &split_global_class.base, NULL, NULL);
+ if (!obj || !JS_SetParent(cx, obj, NULL) || !JS_SetPrivate(cx, obj, cpx)) {
+ JS_free(cx, cpx);
+ return NULL;
+ }
+
+ outercpx = JS_GetPrivate(cx, outer);
+ outercpx->inner = obj;
+
+ return obj;
+}
+
+static ComplexObject *
+split_get_private(JSContext *cx, JSObject *obj)
+{
+ do {
+ if (JS_GET_CLASS(cx, obj) == &split_global_class.base)
+ return JS_GetPrivate(cx, obj);
+ obj = JS_GetParent(cx, obj);
+ } while (obj);
+
+ return NULL;
+}
+
+static JSBool
+sandbox_enumerate(JSContext *cx, JSObject *obj)
+{
+ jsval v;
+ JSBool b;
+
+ if (!JS_GetProperty(cx, obj, "lazy", &v) || !JS_ValueToBoolean(cx, v, &b))
+ return JS_FALSE;
+ return !b || JS_EnumerateStandardClasses(cx, obj);
+}
+
+static JSBool
+sandbox_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ jsval v;
+ JSBool b, resolved;
+
+ if (!JS_GetProperty(cx, obj, "lazy", &v) || !JS_ValueToBoolean(cx, v, &b))
+ return JS_FALSE;
+ if (b && (flags & JSRESOLVE_ASSIGNING) == 0) {
+ if (!JS_ResolveStandardClass(cx, obj, id, &resolved))
+ return JS_FALSE;
+ if (resolved) {
+ *objp = obj;
+ return JS_TRUE;
+ }
+ }
+ *objp = NULL;
+ return JS_TRUE;
+}
+
+static JSClass sandbox_class = {
+ "sandbox",
+ JSCLASS_NEW_RESOLVE,
+ JS_PropertyStub, JS_PropertyStub,
+ JS_PropertyStub, JS_PropertyStub,
+ sandbox_enumerate, (JSResolveOp)sandbox_resolve,
+ JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+static JSBool
+EvalInContext(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ JSObject *sobj;
+ JSContext *scx;
+ const jschar *src;
+ size_t srclen;
+ JSBool lazy, ok;
+ jsval v;
+ JSStackFrame *fp;
+
+ sobj = NULL;
+ if (!JS_ConvertArguments(cx, argc, argv, "S / o", &str, &sobj))
+ return JS_FALSE;
+
+ scx = JS_NewContext(JS_GetRuntime(cx), gStackChunkSize);
+ if (!scx) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ src = JS_GetStringChars(str);
+ srclen = JS_GetStringLength(str);
+ lazy = JS_FALSE;
+ if (srclen == 4 &&
+ src[0] == 'l' && src[1] == 'a' && src[2] == 'z' && src[3] == 'y') {
+ lazy = JS_TRUE;
+ srclen = 0;
+ }
+
+ if (!sobj) {
+ sobj = JS_NewObject(scx, &sandbox_class, NULL, NULL);
+ if (!sobj || (!lazy && !JS_InitStandardClasses(scx, sobj))) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ v = BOOLEAN_TO_JSVAL(v);
+ ok = JS_SetProperty(cx, sobj, "lazy", &v);
+ if (!ok)
+ goto out;
+ }
+
+ if (srclen == 0) {
+ *rval = OBJECT_TO_JSVAL(sobj);
+ ok = JS_TRUE;
+ } else {
+ fp = JS_GetScriptedCaller(cx, NULL);
+ ok = JS_EvaluateUCScript(scx, sobj, src, srclen,
+ fp->script->filename,
+ JS_PCToLineNumber(cx, fp->script, fp->pc),
+ rval);
+ }
+
+out:
+ JS_DestroyContext(scx);
+ return ok;
+}
+
+static JSFunctionSpec shell_functions[] = {
+ {"version", Version, 0,0,0},
+ {"options", Options, 0,0,0},
+ {"load", Load, 1,0,0},
+ {"readline", ReadLine, 0,0,0},
+ {"print", Print, 0,0,0},
+ {"help", Help, 0,0,0},
+ {"quit", Quit, 0,0,0},
+ {"gc", GC, 0,0,0},
+ {"trap", Trap, 3,0,0},
+ {"untrap", Untrap, 2,0,0},
+ {"line2pc", LineToPC, 0,0,0},
+ {"pc2line", PCToLine, 0,0,0},
+ {"stringsAreUtf8", StringsAreUtf8, 0,0,0},
+ {"testUtf8", TestUtf8, 1,0,0},
+ {"throwError", ThrowError, 0,0,0},
+#ifdef DEBUG
+ {"dis", Disassemble, 1,0,0},
+ {"dissrc", DisassWithSrc, 1,0,0},
+ {"notes", Notes, 1,0,0},
+ {"tracing", Tracing, 0,0,0},
+ {"stats", DumpStats, 1,0,0},
+#endif
+#ifdef TEST_EXPORT
+ {"xport", DoExport, 2,0,0},
+#endif
+#ifdef TEST_CVTARGS
+ {"cvtargs", ConvertArgs, 0,0,12},
+#endif
+ {"build", BuildDate, 0,0,0},
+ {"clear", Clear, 0,0,0},
+ {"intern", Intern, 1,0,0},
+ {"clone", Clone, 1,0,0},
+ {"seal", Seal, 1,0,1},
+ {"getpda", GetPDA, 1,0,0},
+ {"getslx", GetSLX, 1,0,0},
+ {"toint32", ToInt32, 1,0,0},
+ {"evalcx", EvalInContext, 1,0,0},
+ {NULL,NULL,0,0,0}
+};
+
+/* NOTE: These must be kept in sync with the above. */
+
+static char *shell_help_messages[] = {
+ "version([number]) Get or set JavaScript version number",
+ "options([option ...]) Get or toggle JavaScript options",
+ "load(['foo.js' ...]) Load files named by string arguments",
+ "readline() Read a single line from stdin",
+ "print([exp ...]) Evaluate and print expressions",
+ "help([name ...]) Display usage and help messages",
+ "quit() Quit the shell",
+ "gc() Run the garbage collector",
+ "trap([fun, [pc,]] exp) Trap bytecode execution",
+ "untrap(fun[, pc]) Remove a trap",
+ "line2pc([fun,] line) Map line number to PC",
+ "pc2line(fun[, pc]) Map PC to line number",
+ "stringsAreUTF8() Check if strings are UTF-8 encoded",
+ "testUTF8(mode) Perform UTF-8 tests (modes are 1 to 4)",
+ "throwError() Throw an error from JS_ReportError",
+#ifdef DEBUG
+ "dis([fun]) Disassemble functions into bytecodes",
+ "dissrc([fun]) Disassemble functions with source lines",
+ "notes([fun]) Show source notes for functions",
+ "tracing([toggle]) Turn tracing on or off",
+ "stats([string ...]) Dump 'arena', 'atom', 'global' stats",
+#endif
+#ifdef TEST_EXPORT
+ "xport(obj, id) Export identified property from object",
+#endif
+#ifdef TEST_CVTARGS
+ "cvtargs(b, c, ...) Test JS_ConvertArguments",
+#endif
+ "build() Show build date and time",
+ "clear([obj]) Clear properties of object",
+ "intern(str) Internalize str in the atom table",
+ "clone(fun[, scope]) Clone function object",
+ "seal(obj[, deep]) Seal object, or object graph if deep",
+ "getpda(obj) Get the property descriptors for obj",
+ "getslx(obj) Get script line extent",
+ "toint32(n) Testing hook for JS_ValueToInt32",
+ "evalcx(s[, o]) Evaluate s in optional sandbox object o\n"
+ " if (s == '' && !o) return new o with eager standard classes\n"
+ " if (s == 'lazy' && !o) return new o with lazy standard classes",
+ 0
+};
+
+static void
+ShowHelpHeader(void)
+{
+ fprintf(gOutFile, "%-14s %-22s %s\n", "Command", "Usage", "Description");
+ fprintf(gOutFile, "%-14s %-22s %s\n", "=======", "=====", "===========");
+}
+
+static void
+ShowHelpForCommand(uintN n)
+{
+ fprintf(gOutFile, "%-14.14s %s\n", shell_functions[n].name, shell_help_messages[n]);
+}
+
+static JSObject *
+split_setup(JSContext *cx)
+{
+ JSObject *outer, *inner, *arguments;
+
+ outer = split_create_outer(cx);
+ if (!outer)
+ return NULL;
+ JS_SetGlobalObject(cx, outer);
+
+ inner = split_create_inner(cx, outer);
+ if (!inner)
+ return NULL;
+
+ if (!JS_DefineFunctions(cx, inner, shell_functions))
+ return NULL;
+ JS_ClearScope(cx, outer);
+
+ /* Create a dummy arguments object. */
+ arguments = JS_NewArrayObject(cx, 0, NULL);
+ if (!arguments ||
+ !JS_DefineProperty(cx, inner, "arguments", OBJECT_TO_JSVAL(arguments),
+ NULL, NULL, 0)) {
+ return NULL;
+ }
+
+#ifndef LAZY_STANDARD_CLASSES
+ if (!JS_InitStandardClasses(cx, inner))
+ return NULL;
+#endif
+
+ return inner;
+}
+
+static JSBool
+Help(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ uintN i, j;
+ int did_header, did_something;
+ JSType type;
+ JSFunction *fun;
+ JSString *str;
+ const char *bytes;
+
+ fprintf(gOutFile, "%s\n", JS_GetImplementationVersion());
+ if (argc == 0) {
+ ShowHelpHeader();
+ for (i = 0; shell_functions[i].name; i++)
+ ShowHelpForCommand(i);
+ } else {
+ did_header = 0;
+ for (i = 0; i < argc; i++) {
+ did_something = 0;
+ type = JS_TypeOfValue(cx, argv[i]);
+ if (type == JSTYPE_FUNCTION) {
+ fun = JS_ValueToFunction(cx, argv[i]);
+ str = fun->atom ? ATOM_TO_STRING(fun->atom) : NULL;
+ } else if (type == JSTYPE_STRING) {
+ str = JSVAL_TO_STRING(argv[i]);
+ } else {
+ str = NULL;
+ }
+ if (str) {
+ bytes = JS_GetStringBytes(str);
+ for (j = 0; shell_functions[j].name; j++) {
+ if (!strcmp(bytes, shell_functions[j].name)) {
+ if (!did_header) {
+ did_header = 1;
+ ShowHelpHeader();
+ }
+ did_something = 1;
+ ShowHelpForCommand(j);
+ break;
+ }
+ }
+ }
+ if (!did_something) {
+ str = JS_ValueToString(cx, argv[i]);
+ if (!str)
+ return JS_FALSE;
+ fprintf(gErrFile, "Sorry, no help for %s\n",
+ JS_GetStringBytes(str));
+ }
+ }
+ }
+ return JS_TRUE;
+}
+
+/*
+ * Define a JS object called "it". Give it class operations that printf why
+ * they're being called for tutorial purposes.
+ */
+enum its_tinyid {
+ ITS_COLOR, ITS_HEIGHT, ITS_WIDTH, ITS_FUNNY, ITS_ARRAY, ITS_RDONLY
+};
+
+static JSPropertySpec its_props[] = {
+ {"color", ITS_COLOR, JSPROP_ENUMERATE, NULL, NULL},
+ {"height", ITS_HEIGHT, JSPROP_ENUMERATE, NULL, NULL},
+ {"width", ITS_WIDTH, JSPROP_ENUMERATE, NULL, NULL},
+ {"funny", ITS_FUNNY, JSPROP_ENUMERATE, NULL, NULL},
+ {"array", ITS_ARRAY, JSPROP_ENUMERATE, NULL, NULL},
+ {"rdonly", ITS_RDONLY, JSPROP_READONLY, NULL, NULL},
+ {NULL,0,0,NULL,NULL}
+};
+
+static JSBool
+its_item(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ *rval = OBJECT_TO_JSVAL(obj);
+ if (argc != 0)
+ JS_SetCallReturnValue2(cx, argv[0]);
+ return JS_TRUE;
+}
+
+static JSBool
+its_bindMethod(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ char *name;
+ JSObject *method;
+
+ if (!JS_ConvertArguments(cx, argc, argv, "so", &name, &method))
+ return JS_FALSE;
+
+ *rval = OBJECT_TO_JSVAL(method);
+
+ if (JS_TypeOfValue(cx, *rval) != JSTYPE_FUNCTION) {
+ JSString *valstr = JS_ValueToString(cx, *rval);
+ if (valstr) {
+ JS_ReportError(cx, "can't bind method %s to non-callable object %s",
+ name, JS_GetStringBytes(valstr));
+ }
+ return JS_FALSE;
+ }
+
+ if (!JS_DefineProperty(cx, obj, name, *rval, NULL, NULL, JSPROP_ENUMERATE))
+ return JS_FALSE;
+
+ return JS_SetParent(cx, method, obj);
+}
+
+static JSFunctionSpec its_methods[] = {
+ {"item", its_item, 0,0,0},
+ {"bindMethod", its_bindMethod, 2,0,0},
+ {NULL,NULL,0,0,0}
+};
+
+#ifdef JSD_LOWLEVEL_SOURCE
+/*
+ * This facilitates sending source to JSD (the debugger system) in the shell
+ * where the source is loaded using the JSFILE hack in jsscan. The function
+ * below is used as a callback for the jsdbgapi JS_SetSourceHandler hook.
+ * A more normal embedding (e.g. mozilla) loads source itself and can send
+ * source directly to JSD without using this hook scheme.
+ */
+static void
+SendSourceToJSDebugger(const char *filename, uintN lineno,
+ jschar *str, size_t length,
+ void **listenerTSData, JSDContext* jsdc)
+{
+ JSDSourceText *jsdsrc = (JSDSourceText *) *listenerTSData;
+
+ if (!jsdsrc) {
+ if (!filename)
+ filename = "typein";
+ if (1 == lineno) {
+ jsdsrc = JSD_NewSourceText(jsdc, filename);
+ } else {
+ jsdsrc = JSD_FindSourceForURL(jsdc, filename);
+ if (jsdsrc && JSD_SOURCE_PARTIAL !=
+ JSD_GetSourceStatus(jsdc, jsdsrc)) {
+ jsdsrc = NULL;
+ }
+ }
+ }
+ if (jsdsrc) {
+ jsdsrc = JSD_AppendUCSourceText(jsdc,jsdsrc, str, length,
+ JSD_SOURCE_PARTIAL);
+ }
+ *listenerTSData = jsdsrc;
+}
+#endif /* JSD_LOWLEVEL_SOURCE */
+
+static JSBool its_noisy; /* whether to be noisy when finalizing it */
+
+static JSBool
+its_addProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ if (its_noisy) {
+ fprintf(gOutFile, "adding its property %s,",
+ JS_GetStringBytes(JS_ValueToString(cx, id)));
+ fprintf(gOutFile, " initial value %s\n",
+ JS_GetStringBytes(JS_ValueToString(cx, *vp)));
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+its_delProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ if (its_noisy) {
+ fprintf(gOutFile, "deleting its property %s,",
+ JS_GetStringBytes(JS_ValueToString(cx, id)));
+ fprintf(gOutFile, " current value %s\n",
+ JS_GetStringBytes(JS_ValueToString(cx, *vp)));
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+its_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ if (its_noisy) {
+ fprintf(gOutFile, "getting its property %s,",
+ JS_GetStringBytes(JS_ValueToString(cx, id)));
+ fprintf(gOutFile, " current value %s\n",
+ JS_GetStringBytes(JS_ValueToString(cx, *vp)));
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+its_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ if (its_noisy) {
+ fprintf(gOutFile, "setting its property %s,",
+ JS_GetStringBytes(JS_ValueToString(cx, id)));
+ fprintf(gOutFile, " new value %s\n",
+ JS_GetStringBytes(JS_ValueToString(cx, *vp)));
+ }
+ if (JSVAL_IS_STRING(id) &&
+ !strcmp(JS_GetStringBytes(JSVAL_TO_STRING(id)), "noisy")) {
+ return JS_ValueToBoolean(cx, *vp, &its_noisy);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+its_enumerate(JSContext *cx, JSObject *obj)
+{
+ if (its_noisy)
+ fprintf(gOutFile, "enumerate its properties\n");
+ return JS_TRUE;
+}
+
+static JSBool
+its_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ if (its_noisy) {
+ fprintf(gOutFile, "resolving its property %s, flags {%s,%s,%s}\n",
+ JS_GetStringBytes(JS_ValueToString(cx, id)),
+ (flags & JSRESOLVE_QUALIFIED) ? "qualified" : "",
+ (flags & JSRESOLVE_ASSIGNING) ? "assigning" : "",
+ (flags & JSRESOLVE_DETECTING) ? "detecting" : "");
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+its_convert(JSContext *cx, JSObject *obj, JSType type, jsval *vp)
+{
+ if (its_noisy)
+ fprintf(gOutFile, "converting it to %s type\n", JS_GetTypeName(cx, type));
+ return JS_TRUE;
+}
+
+static void
+its_finalize(JSContext *cx, JSObject *obj)
+{
+ if (its_noisy)
+ fprintf(gOutFile, "finalizing it\n");
+}
+
+static JSClass its_class = {
+ "It", JSCLASS_NEW_RESOLVE,
+ its_addProperty, its_delProperty, its_getProperty, its_setProperty,
+ its_enumerate, (JSResolveOp)its_resolve,
+ its_convert, its_finalize,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+JSErrorFormatString jsShell_ErrorFormatString[JSErr_Limit] = {
+#define MSG_DEF(name, number, count, exception, format) \
+ { format, count, JSEXN_ERR } ,
+#include "jsshell.msg"
+#undef MSG_DEF
+};
+
+static const JSErrorFormatString *
+my_GetErrorMessage(void *userRef, const char *locale, const uintN errorNumber)
+{
+ if ((errorNumber > 0) && (errorNumber < JSShellErr_Limit))
+ return &jsShell_ErrorFormatString[errorNumber];
+ return NULL;
+}
+
+static void
+my_ErrorReporter(JSContext *cx, const char *message, JSErrorReport *report)
+{
+ int i, j, k, n;
+ char *prefix, *tmp;
+ const char *ctmp;
+
+ if (!report) {
+ fprintf(gErrFile, "%s\n", message);
+ return;
+ }
+
+ /* Conditionally ignore reported warnings. */
+ if (JSREPORT_IS_WARNING(report->flags) && !reportWarnings)
+ return;
+
+ prefix = NULL;
+ if (report->filename)
+ prefix = JS_smprintf("%s:", report->filename);
+ if (report->lineno) {
+ tmp = prefix;
+ prefix = JS_smprintf("%s%u: ", tmp ? tmp : "", report->lineno);
+ JS_free(cx, tmp);
+ }
+ if (JSREPORT_IS_WARNING(report->flags)) {
+ tmp = prefix;
+ prefix = JS_smprintf("%s%swarning: ",
+ tmp ? tmp : "",
+ JSREPORT_IS_STRICT(report->flags) ? "strict " : "");
+ JS_free(cx, tmp);
+ }
+
+ /* embedded newlines -- argh! */
+ while ((ctmp = strchr(message, '\n')) != 0) {
+ ctmp++;
+ if (prefix)
+ fputs(prefix, gErrFile);
+ fwrite(message, 1, ctmp - message, gErrFile);
+ message = ctmp;
+ }
+
+ /* If there were no filename or lineno, the prefix might be empty */
+ if (prefix)
+ fputs(prefix, gErrFile);
+ fputs(message, gErrFile);
+
+ if (!report->linebuf) {
+ fputc('\n', gErrFile);
+ goto out;
+ }
+
+ /* report->linebuf usually ends with a newline. */
+ n = strlen(report->linebuf);
+ fprintf(gErrFile, ":\n%s%s%s%s",
+ prefix,
+ report->linebuf,
+ (n > 0 && report->linebuf[n-1] == '\n') ? "" : "\n",
+ prefix);
+ n = PTRDIFF(report->tokenptr, report->linebuf, char);
+ for (i = j = 0; i < n; i++) {
+ if (report->linebuf[i] == '\t') {
+ for (k = (j + 8) & ~7; j < k; j++) {
+ fputc('.', gErrFile);
+ }
+ continue;
+ }
+ fputc('.', gErrFile);
+ j++;
+ }
+ fputs("^\n", gErrFile);
+ out:
+ if (!JSREPORT_IS_WARNING(report->flags)) {
+ if (report->errorNumber == JSMSG_OUT_OF_MEMORY) {
+ gExitCode = EXITCODE_OUT_OF_MEMORY;
+ } else {
+ gExitCode = EXITCODE_RUNTIME_ERROR;
+ }
+ }
+ JS_free(cx, prefix);
+}
+
+#if defined(SHELL_HACK) && defined(DEBUG) && defined(XP_UNIX)
+static JSBool
+Exec(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFunction *fun;
+ const char *name, **nargv;
+ uintN i, nargc;
+ JSString *str;
+ pid_t pid;
+ int status;
+
+ fun = JS_ValueToFunction(cx, argv[-2]);
+ if (!fun)
+ return JS_FALSE;
+ if (!fun->atom)
+ return JS_TRUE;
+ name = JS_GetStringBytes(ATOM_TO_STRING(fun->atom));
+ nargc = 1 + argc;
+ nargv = JS_malloc(cx, (nargc + 1) * sizeof(char *));
+ if (!nargv)
+ return JS_FALSE;
+ nargv[0] = name;
+ for (i = 1; i < nargc; i++) {
+ str = JS_ValueToString(cx, argv[i-1]);
+ if (!str) {
+ JS_free(cx, nargv);
+ return JS_FALSE;
+ }
+ nargv[i] = JS_GetStringBytes(str);
+ }
+ nargv[nargc] = 0;
+ pid = fork();
+ switch (pid) {
+ case -1:
+ perror("js");
+ break;
+ case 0:
+ (void) execvp(name, (char **)nargv);
+ perror("js");
+ exit(127);
+ default:
+ while (waitpid(pid, &status, 0) < 0 && errno == EINTR)
+ continue;
+ break;
+ }
+ JS_free(cx, nargv);
+ return JS_TRUE;
+}
+#endif
+
+static JSBool
+global_enumerate(JSContext *cx, JSObject *obj)
+{
+#ifdef LAZY_STANDARD_CLASSES
+ return JS_EnumerateStandardClasses(cx, obj);
+#else
+ return JS_TRUE;
+#endif
+}
+
+static JSBool
+global_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+#ifdef LAZY_STANDARD_CLASSES
+ JSBool resolved;
+
+ if (!JS_ResolveStandardClass(cx, obj, id, &resolved))
+ return JS_FALSE;
+ if (resolved) {
+ *objp = obj;
+ return JS_TRUE;
+ }
+#endif
+
+#if defined(SHELL_HACK) && defined(DEBUG) && defined(XP_UNIX)
+ if ((flags & (JSRESOLVE_QUALIFIED | JSRESOLVE_ASSIGNING)) == 0) {
+ /*
+ * Do this expensive hack only for unoptimized Unix builds, which are
+ * not used for benchmarking.
+ */
+ char *path, *comp, *full;
+ const char *name;
+ JSBool ok, found;
+ JSFunction *fun;
+
+ if (!JSVAL_IS_STRING(id))
+ return JS_TRUE;
+ path = getenv("PATH");
+ if (!path)
+ return JS_TRUE;
+ path = JS_strdup(cx, path);
+ if (!path)
+ return JS_FALSE;
+ name = JS_GetStringBytes(JSVAL_TO_STRING(id));
+ ok = JS_TRUE;
+ for (comp = strtok(path, ":"); comp; comp = strtok(NULL, ":")) {
+ if (*comp != '\0') {
+ full = JS_smprintf("%s/%s", comp, name);
+ if (!full) {
+ JS_ReportOutOfMemory(cx);
+ ok = JS_FALSE;
+ break;
+ }
+ } else {
+ full = (char *)name;
+ }
+ found = (access(full, X_OK) == 0);
+ if (*comp != '\0')
+ free(full);
+ if (found) {
+ fun = JS_DefineFunction(cx, obj, name, Exec, 0,
+ JSPROP_ENUMERATE);
+ ok = (fun != NULL);
+ if (ok)
+ *objp = obj;
+ break;
+ }
+ }
+ JS_free(cx, path);
+ return ok;
+ }
+#else
+ return JS_TRUE;
+#endif
+}
+
+JSClass global_class = {
+ "global", JSCLASS_NEW_RESOLVE | JSCLASS_GLOBAL_FLAGS,
+ JS_PropertyStub, JS_PropertyStub,
+ JS_PropertyStub, JS_PropertyStub,
+ global_enumerate, (JSResolveOp) global_resolve,
+ JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+static JSBool
+env_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+/* XXX porting may be easy, but these don't seem to supply setenv by default */
+#if !defined XP_BEOS && !defined XP_OS2 && !defined SOLARIS
+ JSString *idstr, *valstr;
+ const char *name, *value;
+ int rv;
+
+ idstr = JS_ValueToString(cx, id);
+ valstr = JS_ValueToString(cx, *vp);
+ if (!idstr || !valstr)
+ return JS_FALSE;
+ name = JS_GetStringBytes(idstr);
+ value = JS_GetStringBytes(valstr);
+#if defined XP_WIN || defined HPUX || defined OSF1 || defined IRIX
+ {
+ char *waste = JS_smprintf("%s=%s", name, value);
+ if (!waste) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ rv = putenv(waste);
+#ifdef XP_WIN
+ /*
+ * HPUX9 at least still has the bad old non-copying putenv.
+ *
+ * Per mail from <s.shanmuganathan@digital.com>, OSF1 also has a putenv
+ * that will crash if you pass it an auto char array (so it must place
+ * its argument directly in the char *environ[] array).
+ */
+ free(waste);
+#endif
+ }
+#else
+ rv = setenv(name, value, 1);
+#endif
+ if (rv < 0) {
+ JS_ReportError(cx, "can't set envariable %s to %s", name, value);
+ return JS_FALSE;
+ }
+ *vp = STRING_TO_JSVAL(valstr);
+#endif /* !defined XP_BEOS && !defined XP_OS2 && !defined SOLARIS */
+ return JS_TRUE;
+}
+
+static JSBool
+env_enumerate(JSContext *cx, JSObject *obj)
+{
+ static JSBool reflected;
+ char **evp, *name, *value;
+ JSString *valstr;
+ JSBool ok;
+
+ if (reflected)
+ return JS_TRUE;
+
+ for (evp = (char **)JS_GetPrivate(cx, obj); (name = *evp) != NULL; evp++) {
+ value = strchr(name, '=');
+ if (!value)
+ continue;
+ *value++ = '\0';
+ valstr = JS_NewStringCopyZ(cx, value);
+ if (!valstr) {
+ ok = JS_FALSE;
+ } else {
+ ok = JS_DefineProperty(cx, obj, name, STRING_TO_JSVAL(valstr),
+ NULL, NULL, JSPROP_ENUMERATE);
+ }
+ value[-1] = '=';
+ if (!ok)
+ return JS_FALSE;
+ }
+
+ reflected = JS_TRUE;
+ return JS_TRUE;
+}
+
+static JSBool
+env_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ JSString *idstr, *valstr;
+ const char *name, *value;
+
+ if (flags & JSRESOLVE_ASSIGNING)
+ return JS_TRUE;
+
+ idstr = JS_ValueToString(cx, id);
+ if (!idstr)
+ return JS_FALSE;
+ name = JS_GetStringBytes(idstr);
+ value = getenv(name);
+ if (value) {
+ valstr = JS_NewStringCopyZ(cx, value);
+ if (!valstr)
+ return JS_FALSE;
+ if (!JS_DefineProperty(cx, obj, name, STRING_TO_JSVAL(valstr),
+ NULL, NULL, JSPROP_ENUMERATE)) {
+ return JS_FALSE;
+ }
+ *objp = obj;
+ }
+ return JS_TRUE;
+}
+
+static JSClass env_class = {
+ "environment", JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE,
+ JS_PropertyStub, JS_PropertyStub,
+ JS_PropertyStub, env_setProperty,
+ env_enumerate, (JSResolveOp) env_resolve,
+ JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+#ifdef NARCISSUS
+
+static JSBool
+defineProperty(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ jsval value;
+ JSBool dontDelete, readOnly, dontEnum;
+ const jschar *chars;
+ size_t length;
+ uintN attrs;
+
+ dontDelete = readOnly = dontEnum = JS_FALSE;
+ if (!JS_ConvertArguments(cx, argc, argv, "Sv/bbb",
+ &str, &value, &dontDelete, &readOnly, &dontEnum)) {
+ return JS_FALSE;
+ }
+ chars = JS_GetStringChars(str);
+ length = JS_GetStringLength(str);
+ attrs = dontEnum ? 0 : JSPROP_ENUMERATE;
+ if (dontDelete)
+ attrs |= JSPROP_PERMANENT;
+ if (readOnly)
+ attrs |= JSPROP_READONLY;
+ return JS_DefineUCProperty(cx, obj, chars, length, value, NULL, NULL,
+ attrs);
+}
+
+static JSBool
+Evaluate(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ /* function evaluate(source, filename, lineno) { ... } */
+ JSString *source;
+ const char *filename = "";
+ jsuint lineno = 0;
+ uint32 oldopts;
+ JSBool ok;
+
+ if (argc == 0) {
+ *rval = JSVAL_VOID;
+ return JS_TRUE;
+ }
+
+ if (!JS_ConvertArguments(cx, argc, argv, "S/su",
+ &source, &filename, &lineno)) {
+ return JS_FALSE;
+ }
+
+ oldopts = JS_GetOptions(cx);
+ JS_SetOptions(cx, oldopts | JSOPTION_COMPILE_N_GO);
+ ok = JS_EvaluateUCScript(cx, obj, JS_GetStringChars(source),
+ JS_GetStringLength(source), filename,
+ lineno, rval);
+ JS_SetOptions(cx, oldopts);
+
+ return ok;
+}
+
+#include <fcntl.h>
+#include <sys/stat.h>
+
+/*
+ * Returns a JS_malloc'd string (that the caller needs to JS_free)
+ * containing the directory (non-leaf) part of |from| prepended to |leaf|.
+ * If |from| is empty or a leaf, MakeAbsolutePathname returns a copy of leaf.
+ * Returns NULL to indicate an error.
+ */
+static char *
+MakeAbsolutePathname(JSContext *cx, const char *from, const char *leaf)
+{
+ size_t dirlen;
+ char *dir;
+ const char *slash = NULL, *cp;
+
+ cp = from;
+ while (*cp) {
+ if (*cp == '/'
+#ifdef XP_WIN
+ || *cp == '\\'
+#endif
+ ) {
+ slash = cp;
+ }
+
+ ++cp;
+ }
+
+ if (!slash) {
+ /* We were given a leaf or |from| was empty. */
+ return JS_strdup(cx, leaf);
+ }
+
+ /* Else, we were given a real pathname, return that + the leaf. */
+ dirlen = slash - from + 1;
+ dir = JS_malloc(cx, dirlen + strlen(leaf) + 1);
+ if (!dir)
+ return NULL;
+
+ strncpy(dir, from, dirlen);
+ strcpy(dir + dirlen, leaf); /* Note: we can't use strcat here. */
+
+ return dir;
+}
+
+static JSBool
+snarf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ const char *filename;
+ char *pathname;
+ JSStackFrame *fp;
+ JSBool ok;
+ off_t cc, len;
+ char *buf;
+ FILE *file;
+
+ str = JS_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ filename = JS_GetStringBytes(str);
+
+ /* Get the currently executing script's name. */
+ fp = JS_GetScriptedCaller(cx, NULL);
+ JS_ASSERT(fp && fp->script->filename);
+ pathname = MakeAbsolutePathname(cx, fp->script->filename, filename);
+ if (!pathname)
+ return JS_FALSE;
+
+ ok = JS_FALSE;
+ len = 0;
+ buf = NULL;
+ file = fopen(pathname, "rb");
+ if (!file) {
+ JS_ReportError(cx, "can't open %s: %s", pathname, strerror(errno));
+ } else {
+ if (fseek(file, 0, SEEK_END) == EOF) {
+ JS_ReportError(cx, "can't seek end of %s", pathname);
+ } else {
+ len = ftell(file);
+ if (fseek(file, 0, SEEK_SET) == EOF) {
+ JS_ReportError(cx, "can't seek start of %s", pathname);
+ } else {
+ buf = JS_malloc(cx, len + 1);
+ if (buf) {
+ cc = fread(buf, 1, len, file);
+ if (cc != len) {
+ JS_free(cx, buf);
+ JS_ReportError(cx, "can't read %s: %s", pathname,
+ (cc < 0) ? strerror(errno)
+ : "short read");
+ } else {
+ len = (size_t)cc;
+ ok = JS_TRUE;
+ }
+ }
+ }
+ }
+ fclose(file);
+ }
+ JS_free(cx, pathname);
+ if (!ok) {
+ JS_free(cx, buf);
+ return ok;
+ }
+
+ buf[len] = '\0';
+ str = JS_NewString(cx, buf, len);
+ if (!str) {
+ JS_free(cx, buf);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+#endif /* NARCISSUS */
+
+int
+main(int argc, char **argv, char **envp)
+{
+ int stackDummy;
+ JSRuntime *rt;
+ JSContext *cx;
+ JSObject *glob, *it, *envobj;
+ int result;
+#ifdef LIVECONNECT
+ JavaVM *java_vm = NULL;
+#endif
+#ifdef JSDEBUGGER_JAVA_UI
+ JNIEnv *java_env;
+#endif
+
+ gStackBase = (jsuword)&stackDummy;
+
+ setlocale(LC_ALL, "");
+
+#ifdef XP_OS2
+ /* these streams are normally line buffered on OS/2 and need a \n, *
+ * so we need to unbuffer then to get a reasonable prompt */
+ setbuf(stdout,0);
+ setbuf(stderr,0);
+#endif
+
+ gErrFile = stderr;
+ gOutFile = stdout;
+
+ argc--;
+ argv++;
+
+ rt = JS_NewRuntime(64L * 1024L * 1024L);
+ if (!rt)
+ return 1;
+
+ cx = JS_NewContext(rt, gStackChunkSize);
+ if (!cx)
+ return 1;
+ JS_SetErrorReporter(cx, my_ErrorReporter);
+
+#ifdef JS_THREADSAFE
+ JS_BeginRequest(cx);
+#endif
+
+ glob = JS_NewObject(cx, &global_class, NULL, NULL);
+ if (!glob)
+ return 1;
+#ifdef LAZY_STANDARD_CLASSES
+ JS_SetGlobalObject(cx, glob);
+#else
+ if (!JS_InitStandardClasses(cx, glob))
+ return 1;
+#endif
+ if (!JS_DefineFunctions(cx, glob, shell_functions))
+ return 1;
+
+ it = JS_DefineObject(cx, glob, "it", &its_class, NULL, 0);
+ if (!it)
+ return 1;
+ if (!JS_DefineProperties(cx, it, its_props))
+ return 1;
+ if (!JS_DefineFunctions(cx, it, its_methods))
+ return 1;
+
+#ifdef PERLCONNECT
+ if (!JS_InitPerlClass(cx, glob))
+ return 1;
+#endif
+
+#ifdef JSDEBUGGER
+ /*
+ * XXX A command line option to enable debugging (or not) would be good
+ */
+ _jsdc = JSD_DebuggerOnForUser(rt, NULL, NULL);
+ if (!_jsdc)
+ return 1;
+ JSD_JSContextInUse(_jsdc, cx);
+#ifdef JSD_LOWLEVEL_SOURCE
+ JS_SetSourceHandler(rt, SendSourceToJSDebugger, _jsdc);
+#endif /* JSD_LOWLEVEL_SOURCE */
+#ifdef JSDEBUGGER_JAVA_UI
+ _jsdjc = JSDJ_CreateContext();
+ if (! _jsdjc)
+ return 1;
+ JSDJ_SetJSDContext(_jsdjc, _jsdc);
+ java_env = JSDJ_CreateJavaVMAndStartDebugger(_jsdjc);
+#ifdef LIVECONNECT
+ if (java_env)
+ (*java_env)->GetJavaVM(java_env, &java_vm);
+#endif
+ /*
+ * XXX This would be the place to wait for the debugger to start.
+ * Waiting would be nice in general, but especially when a js file
+ * is passed on the cmd line.
+ */
+#endif /* JSDEBUGGER_JAVA_UI */
+#ifdef JSDEBUGGER_C_UI
+ JSDB_InitDebugger(rt, _jsdc, 0);
+#endif /* JSDEBUGGER_C_UI */
+#endif /* JSDEBUGGER */
+
+#ifdef LIVECONNECT
+ if (!JSJ_SimpleInit(cx, glob, java_vm, getenv("CLASSPATH")))
+ return 1;
+#endif
+
+ envobj = JS_DefineObject(cx, glob, "environment", &env_class, NULL, 0);
+ if (!envobj || !JS_SetPrivate(cx, envobj, envp))
+ return 1;
+
+#ifdef NARCISSUS
+ {
+ jsval v;
+ static const char Object_prototype[] = "Object.prototype";
+
+ if (!JS_DefineFunction(cx, glob, "snarf", snarf, 1, 0))
+ return 1;
+ if (!JS_DefineFunction(cx, glob, "evaluate", Evaluate, 3, 0))
+ return 1;
+
+ if (!JS_EvaluateScript(cx, glob,
+ Object_prototype, sizeof Object_prototype - 1,
+ NULL, 0, &v)) {
+ return 1;
+ }
+ if (!JS_DefineFunction(cx, JSVAL_TO_OBJECT(v), "__defineProperty__",
+ defineProperty, 5, 0)) {
+ return 1;
+ }
+ }
+#endif
+
+ result = ProcessArgs(cx, glob, argv, argc);
+
+#ifdef JSDEBUGGER
+ if (_jsdc)
+ JSD_DebuggerOff(_jsdc);
+#endif /* JSDEBUGGER */
+
+#ifdef JS_THREADSAFE
+ JS_EndRequest(cx);
+#endif
+
+ JS_DestroyContext(cx);
+ JS_DestroyRuntime(rt);
+ JS_ShutDown();
+ return result;
+}
diff --git a/src/third_party/js-1.7/js.mak b/src/third_party/js-1.7/js.mak
new file mode 100644
index 00000000000..f0f32b88277
--- /dev/null
+++ b/src/third_party/js-1.7/js.mak
@@ -0,0 +1,4344 @@
+# Microsoft Developer Studio Generated NMAKE File, Format Version 4.20
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) Console Application" 0x0103
+# TARGTYPE "Win32 (x86) Dynamic-Link Library" 0x0102
+# TARGTYPE "Win32 (x86) Static Library" 0x0104
+
+!IF "$(CFG)" == ""
+CFG=jsshell - Win32 Debug
+!MESSAGE No configuration specified. Defaulting to jsshell - Win32 Debug.
+!ENDIF
+
+!IF "$(CFG)" != "js - Win32 Release" && "$(CFG)" != "js - Win32 Debug" &&\
+ "$(CFG)" != "jsshell - Win32 Release" && "$(CFG)" != "jsshell - Win32 Debug" &&\
+ "$(CFG)" != "jskwgen - Win32 Release" && "$(CFG)" != "jskwgen - Win32 Debug" &&\
+ "$(CFG)" != "fdlibm - Win32 Release" && "$(CFG)" != "fdlibm - Win32 Debug"
+!MESSAGE Invalid configuration "$(CFG)" specified.
+!MESSAGE You can specify a configuration when running NMAKE on this makefile
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "js.mak" CFG="jsshell - Win32 Debug"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "js - Win32 Release" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "js - Win32 Debug" (based on "Win32 (x86) Dynamic-Link Library")
+!MESSAGE "jsshell - Win32 Release" (based on "Win32 (x86) Console Application")
+!MESSAGE "jsshell - Win32 Debug" (based on "Win32 (x86) Console Application")
+!MESSAGE "jskwgen - Win32 Release" (based on "Win32 (x86) Static Library")
+!MESSAGE "jskwgen - Win32 Debug" (based on "Win32 (x86) Static Library")
+!MESSAGE "fdlibm - Win32 Release" (based on "Win32 (x86) Static Library")
+!MESSAGE "fdlibm - Win32 Debug" (based on "Win32 (x86) Static Library")
+!MESSAGE
+!ERROR An invalid configuration is specified.
+!ENDIF
+
+!IF "$(OS)" == "Windows_NT"
+NULL=
+!ELSE
+NULL=nul
+!ENDIF
+################################################################################
+# Begin Project
+# PROP Target_Last_Scanned "jsshell - Win32 Debug"
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "js___Wi1"
+# PROP BASE Intermediate_Dir "js___Wi1"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Target_Dir ""
+OUTDIR=.\Release
+INTDIR=.\Release
+
+ALL : "fdlibm - Win32 Release" "jskwgen - Win32 Release" "$(OUTDIR)\js32.dll"
+
+CLEAN :
+ -@erase "$(INTDIR)\jsapi.obj"
+ -@erase "$(INTDIR)\jsarena.obj"
+ -@erase "$(INTDIR)\jsarray.obj"
+ -@erase "$(INTDIR)\jsatom.obj"
+ -@erase "$(INTDIR)\jsbool.obj"
+ -@erase "$(INTDIR)\jscntxt.obj"
+ -@erase "$(INTDIR)\jsdate.obj"
+ -@erase "$(INTDIR)\jsdbgapi.obj"
+ -@erase "$(INTDIR)\jsdhash.obj"
+ -@erase "$(INTDIR)\jsdtoa.obj"
+ -@erase "$(INTDIR)\jsemit.obj"
+ -@erase "$(INTDIR)\jsexn.obj"
+ -@erase "$(INTDIR)\jsfun.obj"
+ -@erase "$(INTDIR)\jsgc.obj"
+ -@erase "$(INTDIR)\jshash.obj"
+ -@erase "$(INTDIR)\jsinterp.obj"
+ -@erase "$(INTDIR)\jslock.obj"
+ -@erase "$(INTDIR)\jslog2.obj"
+ -@erase "$(INTDIR)\jslong.obj"
+ -@erase "$(INTDIR)\jsmath.obj"
+ -@erase "$(INTDIR)\jsnum.obj"
+ -@erase "$(INTDIR)\jsobj.obj"
+ -@erase "$(INTDIR)\jsopcode.obj"
+ -@erase "$(INTDIR)\jsparse.obj"
+ -@erase "$(INTDIR)\jsprf.obj"
+ -@erase "$(INTDIR)\jsregexp.obj"
+ -@erase "$(INTDIR)\jsscan.obj"
+ -@erase "$(INTDIR)\jsscope.obj"
+ -@erase "$(INTDIR)\jsscript.obj"
+ -@erase "$(INTDIR)\jsstr.obj"
+ -@erase "$(INTDIR)\jsutil.obj"
+ -@erase "$(INTDIR)\jsxdrapi.obj"
+ -@erase "$(INTDIR)\jsxml.obj"
+ -@erase "$(INTDIR)\prmjtime.obj"
+ -@erase "$(INTDIR)\js.pch"
+ -@erase "$(INTDIR)\jsautokw.h"
+ -@erase "$(OUTDIR)\js32.dll"
+ -@erase "$(OUTDIR)\js32.exp"
+ -@erase "$(OUTDIR)\js32.lib"
+ -@$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="fdlibm - Win32 Release" clean
+ -@$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="jskwgen - Win32 Release" clean
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /MT /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D _X86_=1 /D "_WINDOWS" /YX /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /D "NDEBUG" /D _X86_=1 /D "_WINDOWS" /D "WIN32" /D "XP_WIN" /D "JSFILE" /D "EXPORT_JS_API" /I"$(INTDIR)" /YX /c
+CPP_PROJ=/nologo /MD /W3 /GX /O2 /D "NDEBUG" /D _X86_=1 /D "_WINDOWS" /D "WIN32" /D\
+ "XP_WIN" /D "JSFILE" /D "EXPORT_JS_API" /Fp"$(INTDIR)/js.pch" /I"$(INTDIR)" /YX\
+ /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\Release/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+MTL=mktyplib.exe
+# ADD BASE MTL /nologo /D "NDEBUG" /win32
+# ADD MTL /nologo /D "NDEBUG" /win32
+MTL_PROJ=/nologo /D "NDEBUG" /win32
+RSC=rc.exe
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/js.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /machine:I386 /out:"Release/js32.dll"
+# SUBTRACT LINK32 /nodefaultlib
+LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\
+ odbccp32.lib /nologo /subsystem:windows /dll /incremental:no\
+ /pdb:"$(OUTDIR)/js32.pdb" /machine:I386 /out:"$(OUTDIR)/js32.dll"\
+ /implib:"$(OUTDIR)/js32.lib" /opt:ref /opt:noicf
+LINK32_OBJS= \
+ "$(INTDIR)\jsapi.obj" \
+ "$(INTDIR)\jsarena.obj" \
+ "$(INTDIR)\jsarray.obj" \
+ "$(INTDIR)\jsatom.obj" \
+ "$(INTDIR)\jsbool.obj" \
+ "$(INTDIR)\jscntxt.obj" \
+ "$(INTDIR)\jsdate.obj" \
+ "$(INTDIR)\jsdbgapi.obj" \
+ "$(INTDIR)\jsdhash.obj" \
+ "$(INTDIR)\jsdtoa.obj" \
+ "$(INTDIR)\jsemit.obj" \
+ "$(INTDIR)\jsexn.obj" \
+ "$(INTDIR)\jsfun.obj" \
+ "$(INTDIR)\jsgc.obj" \
+ "$(INTDIR)\jshash.obj" \
+ "$(INTDIR)\jsinterp.obj" \
+ "$(INTDIR)\jslock.obj" \
+ "$(INTDIR)\jslog2.obj" \
+ "$(INTDIR)\jslong.obj" \
+ "$(INTDIR)\jsmath.obj" \
+ "$(INTDIR)\jsnum.obj" \
+ "$(INTDIR)\jsobj.obj" \
+ "$(INTDIR)\jsopcode.obj" \
+ "$(INTDIR)\jsparse.obj" \
+ "$(INTDIR)\jsprf.obj" \
+ "$(INTDIR)\jsregexp.obj" \
+ "$(INTDIR)\jsscan.obj" \
+ "$(INTDIR)\jsscope.obj" \
+ "$(INTDIR)\jsscript.obj" \
+ "$(INTDIR)\jsstr.obj" \
+ "$(INTDIR)\jsutil.obj" \
+ "$(INTDIR)\jsxdrapi.obj" \
+ "$(INTDIR)\jsxml.obj" \
+ "$(INTDIR)\prmjtime.obj" \
+ "$(OUTDIR)\fdlibm.lib"
+
+"$(OUTDIR)\js32.dll" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "js___Wi2"
+# PROP BASE Intermediate_Dir "js___Wi2"
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Target_Dir ""
+OUTDIR=.\Debug
+INTDIR=.\Debug
+
+ALL : "fdlibm - Win32 Debug" "jskwgen - Win32 Debug" "$(OUTDIR)\js32.dll"
+
+CLEAN :
+ -@erase "$(INTDIR)\jsapi.obj"
+ -@erase "$(INTDIR)\jsarena.obj"
+ -@erase "$(INTDIR)\jsarray.obj"
+ -@erase "$(INTDIR)\jsatom.obj"
+ -@erase "$(INTDIR)\jsbool.obj"
+ -@erase "$(INTDIR)\jscntxt.obj"
+ -@erase "$(INTDIR)\jsdate.obj"
+ -@erase "$(INTDIR)\jsdbgapi.obj"
+ -@erase "$(INTDIR)\jsdhash.obj"
+ -@erase "$(INTDIR)\jsdtoa.obj"
+ -@erase "$(INTDIR)\jsemit.obj"
+ -@erase "$(INTDIR)\jsexn.obj"
+ -@erase "$(INTDIR)\jsfun.obj"
+ -@erase "$(INTDIR)\jsgc.obj"
+ -@erase "$(INTDIR)\jshash.obj"
+ -@erase "$(INTDIR)\jsinterp.obj"
+ -@erase "$(INTDIR)\jslock.obj"
+ -@erase "$(INTDIR)\jslog2.obj"
+ -@erase "$(INTDIR)\jslong.obj"
+ -@erase "$(INTDIR)\jsmath.obj"
+ -@erase "$(INTDIR)\jsnum.obj"
+ -@erase "$(INTDIR)\jsobj.obj"
+ -@erase "$(INTDIR)\jsopcode.obj"
+ -@erase "$(INTDIR)\jsparse.obj"
+ -@erase "$(INTDIR)\jsprf.obj"
+ -@erase "$(INTDIR)\jsregexp.obj"
+ -@erase "$(INTDIR)\jsscan.obj"
+ -@erase "$(INTDIR)\jsscope.obj"
+ -@erase "$(INTDIR)\jsscript.obj"
+ -@erase "$(INTDIR)\jsstr.obj"
+ -@erase "$(INTDIR)\jsutil.obj"
+ -@erase "$(INTDIR)\jsxdrapi.obj"
+ -@erase "$(INTDIR)\jsxml.obj"
+ -@erase "$(INTDIR)\prmjtime.obj"
+ -@erase "$(INTDIR)\js.pch"
+ -@erase "$(INTDIR)\jsautokw.h"
+ -@erase "$(OUTDIR)\js32.dll"
+ -@erase "$(OUTDIR)\js32.exp"
+ -@erase "$(OUTDIR)\js32.ilk"
+ -@erase "$(OUTDIR)\js32.lib"
+ -@erase "$(OUTDIR)\js32.pdb"
+ -@$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="fdlibm - Win32 Debug" clean
+ -@$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="jskwgen - Win32 Debug" clean
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /MTd /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D _X86_=1 /D "_WINDOWS" /YX /c
+# ADD CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /D "_DEBUG" /D "DEBUG" /D _X86_=1 /D "_WINDOWS" /D "WIN32" /D "XP_WIN" /D "JSFILE" /D "EXPORT_JS_API" /I"$(INTDIR)" /YX /c
+CPP_PROJ=/nologo /MDd /W3 /Gm /GX /Zi /Od /D "_DEBUG" /D "DEBUG" /D _X86_=1 /D "_WINDOWS"\
+ /D "WIN32" /D "XP_WIN" /D "JSFILE" /D "EXPORT_JS_API" /Fp"$(INTDIR)/js.pch" /I"$(INTDIR)" /YX\
+ /Fo"$(INTDIR)/" /Fd"$(INTDIR)/" /c
+CPP_OBJS=.\Debug/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+MTL=mktyplib.exe
+# ADD BASE MTL /nologo /D "_DEBUG" /win32
+# ADD MTL /nologo /D "_DEBUG" /win32
+MTL_PROJ=/nologo /D "_DEBUG" /win32
+RSC=rc.exe
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/js.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:windows /dll /debug /machine:I386 /out:"Debug/js32.dll"
+# SUBTRACT LINK32 /nodefaultlib
+LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\
+ odbccp32.lib /nologo /subsystem:windows /dll /incremental:yes\
+ /pdb:"$(OUTDIR)/js32.pdb" /debug /machine:I386 /out:"$(OUTDIR)/js32.dll"\
+ /implib:"$(OUTDIR)/js32.lib"
+LINK32_OBJS= \
+ "$(INTDIR)\jsapi.obj" \
+ "$(INTDIR)\jsarena.obj" \
+ "$(INTDIR)\jsarray.obj" \
+ "$(INTDIR)\jsatom.obj" \
+ "$(INTDIR)\jsbool.obj" \
+ "$(INTDIR)\jscntxt.obj" \
+ "$(INTDIR)\jsdate.obj" \
+ "$(INTDIR)\jsdbgapi.obj" \
+ "$(INTDIR)\jsdhash.obj" \
+ "$(INTDIR)\jsdtoa.obj" \
+ "$(INTDIR)\jsemit.obj" \
+ "$(INTDIR)\jsexn.obj" \
+ "$(INTDIR)\jsfun.obj" \
+ "$(INTDIR)\jsgc.obj" \
+ "$(INTDIR)\jshash.obj" \
+ "$(INTDIR)\jsinterp.obj" \
+ "$(INTDIR)\jslock.obj" \
+ "$(INTDIR)\jslog2.obj" \
+ "$(INTDIR)\jslong.obj" \
+ "$(INTDIR)\jsmath.obj" \
+ "$(INTDIR)\jsnum.obj" \
+ "$(INTDIR)\jsobj.obj" \
+ "$(INTDIR)\jsopcode.obj" \
+ "$(INTDIR)\jsparse.obj" \
+ "$(INTDIR)\jsprf.obj" \
+ "$(INTDIR)\jsregexp.obj" \
+ "$(INTDIR)\jsscan.obj" \
+ "$(INTDIR)\jsscope.obj" \
+ "$(INTDIR)\jsscript.obj" \
+ "$(INTDIR)\jsstr.obj" \
+ "$(INTDIR)\jsutil.obj" \
+ "$(INTDIR)\jsxdrapi.obj" \
+ "$(INTDIR)\jsxml.obj" \
+ "$(INTDIR)\prmjtime.obj" \
+ "$(OUTDIR)\fdlibm.lib"
+
+"$(OUTDIR)\js32.dll" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "jskwgen - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "jsshell\Release"
+# PROP BASE Intermediate_Dir "jskwgen\Release"
+# PROP BASE Target_Dir "jskwgen"
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Target_Dir "jskwgen"
+OUTDIR=.\Release
+INTDIR=.\Release
+
+ALL : "$(INTDIR)" "$(INTDIR)\host_jskwgen.exe"
+
+CLEAN :
+ -@erase "$(INTDIR)\jskwgen.obj"
+ -@erase "$(INTDIR)\jskwgen.pch"
+ -@erase "$(INTDIR)\host_jskwgen.exe"
+
+"$(INTDIR)" :
+ if not exist "$(INTDIR)/$(NULL)" mkdir "$(INTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /YX /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "_CONSOLE" /D "WIN32" /D "XP_WIN" /D "JSFILE" /YX /c
+CPP_PROJ=/nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "_CONSOLE" /D "WIN32" /D\
+ "XP_WIN" /D "JSFILE" /Fp"$(INTDIR)/jskwgen.pch" /YX /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\Release/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+RSC=rc.exe
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(INTDIR)/jskwgen.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\
+ odbccp32.lib /nologo /subsystem:console /incremental:no\
+ /pdb:"$(INTDIR)/jskwgen.pdb" /machine:I386 /out:"$(INTDIR)/host_jskwgen.exe"
+LINK32_OBJS= \
+ "$(INTDIR)\jskwgen.obj" \
+
+"$(INTDIR)\host_jskwgen.exe" : "$(INTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "jskwgen - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "jsshell\Debug"
+# PROP BASE Intermediate_Dir "jskwgen\Debug"
+# PROP BASE Target_Dir "jskwgen"
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Target_Dir "jskwgen"
+OUTDIR=.\Debug
+INTDIR=.\Debug
+
+ALL : "$(INTDIR)" "$(INTDIR)\host_jskwgen.exe"
+
+CLEAN :
+ -@erase "$(INTDIR)\jskwgen.obj"
+ -@erase "$(INTDIR)\jskwgen.pch"
+ -@erase "$(INTDIR)\host_jskwgen.exe"
+
+"$(INTDIR)" :
+ if not exist "$(INTDIR)/$(NULL)" mkdir "$(INTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /YX /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "_CONSOLE" /D "WIN32" /D "XP_WIN" /D "JSFILE" /YX /c
+CPP_PROJ=/nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "_CONSOLE" /D "WIN32" /D\
+ "XP_WIN" /D "JSFILE" /Fp"$(INTDIR)/jskwgen.pch" /YX /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\Debug/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+RSC=rc.exe
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(INTDIR)/jskwgen.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\
+ odbccp32.lib /nologo /subsystem:console /incremental:no\
+ /pdb:"$(INTDIR)/jskwgen.pdb" /machine:I386 /out:"$(INTDIR)/host_jskwgen.exe"
+LINK32_OBJS= \
+ "$(INTDIR)\jskwgen.obj" \
+
+"$(INTDIR)\host_jskwgen.exe" : "$(INTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "jsshell - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "jsshell\Release"
+# PROP BASE Intermediate_Dir "jsshell\Release"
+# PROP BASE Target_Dir "jsshell"
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Target_Dir "jsshell"
+OUTDIR=.\Release
+INTDIR=.\Release
+
+ALL : "js - Win32 Release" "$(OUTDIR)\jsshell.exe"
+
+CLEAN :
+ -@erase "$(INTDIR)\js.obj"
+ -@erase "$(INTDIR)\jsshell.pch"
+ -@erase "$(OUTDIR)\jsshell.exe"
+ -@$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="js - Win32 Release" clean
+
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /YX /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "_CONSOLE" /D "WIN32" /D "XP_WIN" /D "JSFILE" /I"$(INTDIR)" /YX /c
+CPP_PROJ=/nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "_CONSOLE" /D "WIN32" /D\
+ "XP_WIN" /D "JSFILE" /Fp"$(INTDIR)/jsshell.pch" /I"$(INTDIR)" /YX /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\Release/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+RSC=rc.exe
+# ADD BASE RSC /l 0x409 /d "NDEBUG"
+# ADD RSC /l 0x409 /d "NDEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/jsshell.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /machine:I386
+LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\
+ odbccp32.lib /nologo /subsystem:console /incremental:no\
+ /pdb:"$(OUTDIR)/jsshell.pdb" /machine:I386 /out:"$(OUTDIR)/jsshell.exe"
+LINK32_OBJS= \
+ "$(INTDIR)\js.obj" \
+ "$(OUTDIR)\js32.lib"
+
+"$(OUTDIR)\jsshell.exe" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "jsshell - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "jsshell\jsshell_"
+# PROP BASE Intermediate_Dir "jsshell\jsshell_"
+# PROP BASE Target_Dir "jsshell"
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Target_Dir "jsshell"
+OUTDIR=.\Debug
+INTDIR=.\Debug
+
+ALL : "js - Win32 Debug" "$(OUTDIR)\jsshell.exe"
+
+CLEAN :
+ -@erase "$(INTDIR)\js.obj"
+ -@erase "$(INTDIR)\jsshell.pch"
+ -@erase "$(OUTDIR)\jsshell.exe"
+ -@erase "$(OUTDIR)\jsshell.ilk"
+ -@erase "$(OUTDIR)\jsshell.pdb"
+ -@$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="js - Win32 Debug" clean
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /YX /c
+# ADD CPP /nologo /MDd /W3 /Gm /GX /Zi /Od /D "_CONSOLE" /D "_DEBUG" /D "WIN32" /D "XP_WIN" /D "JSFILE" /D "DEBUG" /YX /c
+CPP_PROJ=/nologo /MDd /W3 /Gm /GX /Zi /Od /D "_CONSOLE" /D "_DEBUG" /D "WIN32"\
+ /D "XP_WIN" /D "JSFILE" /D "DEBUG" /Fp"$(INTDIR)/jsshell.pch" /YX\
+ /Fo"$(INTDIR)/" /Fd"$(INTDIR)/" /c
+CPP_OBJS=.\Debug/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+RSC=rc.exe
+# ADD BASE RSC /l 0x409 /d "_DEBUG"
+# ADD RSC /l 0x409 /d "_DEBUG"
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/jsshell.bsc"
+BSC32_SBRS= \
+
+LINK32=link.exe
+# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386
+# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386
+LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\
+ odbccp32.lib /nologo /subsystem:console /incremental:yes\
+ /pdb:"$(OUTDIR)/jsshell.pdb" /debug /machine:I386 /out:"$(OUTDIR)/jsshell.exe"
+LINK32_OBJS= \
+ "$(INTDIR)\js.obj" \
+ "$(OUTDIR)\js32.lib"
+
+"$(OUTDIR)\jsshell.exe" : "$(OUTDIR)" $(DEF_FILE) $(LINK32_OBJS)
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir "fdlibm\Release"
+# PROP BASE Intermediate_Dir "fdlibm\Release"
+# PROP BASE Target_Dir "fdlibm"
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir "Release"
+# PROP Intermediate_Dir "Release"
+# PROP Target_Dir "fdlibm"
+OUTDIR=.\Release
+INTDIR=.\Release
+
+ALL : "$(OUTDIR)\fdlibm.lib"
+
+CLEAN :
+ -@erase "$(INTDIR)\e_atan2.obj"
+ -@erase "$(INTDIR)\e_pow.obj"
+ -@erase "$(INTDIR)\e_sqrt.obj"
+ -@erase "$(INTDIR)\k_standard.obj"
+ -@erase "$(INTDIR)\s_atan.obj"
+ -@erase "$(INTDIR)\s_copysign.obj"
+ -@erase "$(INTDIR)\s_fabs.obj"
+ -@erase "$(INTDIR)\s_finite.obj"
+ -@erase "$(INTDIR)\s_isnan.obj"
+ -@erase "$(INTDIR)\s_matherr.obj"
+ -@erase "$(INTDIR)\s_rint.obj"
+ -@erase "$(INTDIR)\s_scalbn.obj"
+ -@erase "$(INTDIR)\w_atan2.obj"
+ -@erase "$(INTDIR)\w_pow.obj"
+ -@erase "$(INTDIR)\w_sqrt.obj"
+ -@erase "$(INTDIR)\fdlibm.pch"
+ -@erase "$(OUTDIR)\fdlibm.lib"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /W3 /GX /O2 /D "WIN32" /D "NDEBUG" /D _X86_=1 /D "_WINDOWS" /YX /c
+# ADD CPP /nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "WIN32" /D _X86_=1 /D "_WINDOWS" /D "_IEEE_LIBM" /YX /c
+CPP_PROJ=/nologo /MD /W3 /GX /O2 /D "NDEBUG" /D "WIN32" /D _X86_=1 /D "_WINDOWS" /D\
+ "_IEEE_LIBM" /D "XP_WIN" /I .\ /Fp"$(INTDIR)/fdlibm.pch" /YX /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\Release/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/fdlibm.bsc"
+BSC32_SBRS= \
+
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo
+# ADD LIB32 /nologo
+LIB32_FLAGS=/nologo /out:"$(OUTDIR)/fdlibm.lib"
+LIB32_OBJS= \
+ "$(INTDIR)\e_atan2.obj" \
+ "$(INTDIR)\e_pow.obj" \
+ "$(INTDIR)\e_sqrt.obj" \
+ "$(INTDIR)\k_standard.obj" \
+ "$(INTDIR)\s_atan.obj" \
+ "$(INTDIR)\s_copysign.obj" \
+ "$(INTDIR)\s_fabs.obj" \
+ "$(INTDIR)\s_finite.obj" \
+ "$(INTDIR)\s_isnan.obj" \
+ "$(INTDIR)\s_matherr.obj" \
+ "$(INTDIR)\s_rint.obj" \
+ "$(INTDIR)\s_scalbn.obj" \
+ "$(INTDIR)\w_atan2.obj" \
+ "$(INTDIR)\w_pow.obj" \
+ "$(INTDIR)\w_sqrt.obj"
+
+"$(OUTDIR)\fdlibm.lib" : "$(OUTDIR)" $(DEF_FILE) $(LIB32_OBJS)
+ $(LIB32) @<<
+ $(LIB32_FLAGS) $(DEF_FLAGS) $(LIB32_OBJS)
+<<
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 1
+# PROP BASE Output_Dir "fdlibm\Debug"
+# PROP BASE Intermediate_Dir "fdlibm\Debug"
+# PROP BASE Target_Dir "fdlibm"
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 1
+# PROP Output_Dir "Debug"
+# PROP Intermediate_Dir "Debug"
+# PROP Target_Dir "fdlibm"
+OUTDIR=.\Debug
+INTDIR=.\Debug
+
+ALL : "$(OUTDIR)\fdlibm.lib"
+
+CLEAN :
+ -@erase "$(INTDIR)\e_atan2.obj"
+ -@erase "$(INTDIR)\e_pow.obj"
+ -@erase "$(INTDIR)\e_sqrt.obj"
+ -@erase "$(INTDIR)\k_standard.obj"
+ -@erase "$(INTDIR)\s_atan.obj"
+ -@erase "$(INTDIR)\s_copysign.obj"
+ -@erase "$(INTDIR)\s_fabs.obj"
+ -@erase "$(INTDIR)\s_finite.obj"
+ -@erase "$(INTDIR)\s_isnan.obj"
+ -@erase "$(INTDIR)\s_matherr.obj"
+ -@erase "$(INTDIR)\s_rint.obj"
+ -@erase "$(INTDIR)\s_scalbn.obj"
+ -@erase "$(INTDIR)\w_atan2.obj"
+ -@erase "$(INTDIR)\w_pow.obj"
+ -@erase "$(INTDIR)\w_sqrt.obj"
+ -@erase "$(INTDIR)\fdlibm.pch"
+ -@erase "$(OUTDIR)\fdlibm.lib"
+
+"$(OUTDIR)" :
+ if not exist "$(OUTDIR)/$(NULL)" mkdir "$(OUTDIR)"
+
+CPP=cl.exe
+# ADD BASE CPP /nologo /W3 /GX /Z7 /Od /D "WIN32" /D "_DEBUG" /D _X86_=1 /D "_WINDOWS" /YX /c
+# ADD CPP /nologo /MDd /W3 /GX /Z7 /Od /D "_DEBUG" /D "WIN32" /D _X86_=1 /D "_WINDOWS" /D "_IEEE_LIBM" /YX /c
+CPP_PROJ=/nologo /MDd /W3 /GX /Z7 /Od /D "_DEBUG" /D "WIN32" /D _X86_=1 /D "_WINDOWS" /D\
+ "_IEEE_LIBM" /D "XP_WIN" -I .\ /Fp"$(INTDIR)/fdlibm.pch" /YX /Fo"$(INTDIR)/" /c
+CPP_OBJS=.\Debug/
+CPP_SBRS=.\.
+
+.c{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_OBJS)}.obj:
+ $(CPP) $(CPP_PROJ) $<
+
+.c{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cpp{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+.cxx{$(CPP_SBRS)}.sbr:
+ $(CPP) $(CPP_PROJ) $<
+
+BSC32=bscmake.exe
+# ADD BASE BSC32 /nologo
+# ADD BSC32 /nologo
+BSC32_FLAGS=/nologo /o"$(OUTDIR)/fdlibm.bsc"
+BSC32_SBRS= \
+
+LIB32=link.exe -lib
+# ADD BASE LIB32 /nologo
+# ADD LIB32 /nologo
+LIB32_FLAGS=/nologo /out:"$(OUTDIR)/fdlibm.lib"
+LIB32_OBJS= \
+ "$(INTDIR)\e_atan2.obj" \
+ "$(INTDIR)\e_pow.obj" \
+ "$(INTDIR)\e_sqrt.obj" \
+ "$(INTDIR)\k_standard.obj" \
+ "$(INTDIR)\s_atan.obj" \
+ "$(INTDIR)\s_copysign.obj" \
+ "$(INTDIR)\s_fabs.obj" \
+ "$(INTDIR)\s_finite.obj" \
+ "$(INTDIR)\s_isnan.obj" \
+ "$(INTDIR)\s_matherr.obj" \
+ "$(INTDIR)\s_rint.obj" \
+ "$(INTDIR)\s_scalbn.obj" \
+ "$(INTDIR)\w_atan2.obj" \
+ "$(INTDIR)\w_pow.obj" \
+ "$(INTDIR)\w_sqrt.obj"
+
+"$(OUTDIR)\fdlibm.lib" : "$(OUTDIR)" $(DEF_FILE) $(LIB32_OBJS)
+ $(LIB32) @<<
+ $(LIB32_FLAGS) $(DEF_FLAGS) $(LIB32_OBJS)
+<<
+
+!ENDIF
+
+################################################################################
+# Begin Target
+
+# Name "js - Win32 Release"
+# Name "js - Win32 Debug"
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+!ENDIF
+
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsapi.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSAPI=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdate.h"\
+ ".\jsemit.h"\
+ ".\jsexn.h"\
+ ".\jsfile.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsmath.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSAPI=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsapi.obj" : $(SOURCE) $(DEP_CPP_JSAPI) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSAPI=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdate.h"\
+ ".\jsemit.h"\
+ ".\jsexn.h"\
+ ".\jsfile.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsmath.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSAPI=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsapi.obj" : $(SOURCE) $(DEP_CPP_JSAPI) "$(INTDIR)"
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsarena.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSARE=\
+ ".\jsarena.h"\
+ ".\jsbit.h"\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsstddef.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSARE=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsarena.obj" : $(SOURCE) $(DEP_CPP_JSARE) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSARE=\
+ ".\jsarena.h"\
+ ".\jsbit.h"\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsstddef.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSARE=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsarena.obj" : $(SOURCE) $(DEP_CPP_JSARE) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsarray.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSARR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSARR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsarray.obj" : $(SOURCE) $(DEP_CPP_JSARR) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSARR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSARR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsarray.obj" : $(SOURCE) $(DEP_CPP_JSARR) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsatom.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSATO=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSATO=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsatom.obj" : $(SOURCE) $(DEP_CPP_JSATO) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSATO=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSATO=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsatom.obj" : $(SOURCE) $(DEP_CPP_JSATO) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsbool.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSBOO=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSBOO=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsbool.obj" : $(SOURCE) $(DEP_CPP_JSBOO) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSBOO=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSBOO=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsbool.obj" : $(SOURCE) $(DEP_CPP_JSBOO) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jscntxt.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSCNT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsexn.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSCNT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jscntxt.obj" : $(SOURCE) $(DEP_CPP_JSCNT) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSCNT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsexn.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSCNT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jscntxt.obj" : $(SOURCE) $(DEP_CPP_JSCNT) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsdate.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSDAT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdate.h"\
+ ".\jsdtoa.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\prmjtime.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDAT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsdate.obj" : $(SOURCE) $(DEP_CPP_JSDAT) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSDAT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdate.h"\
+ ".\jsdtoa.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\prmjtime.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDAT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsdate.obj" : $(SOURCE) $(DEP_CPP_JSDAT) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsdbgapi.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSDBG=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDBG=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsdbgapi.obj" : $(SOURCE) $(DEP_CPP_JSDBG) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSDBG=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDBG=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsdbgapi.obj" : $(SOURCE) $(DEP_CPP_JSDBG) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsdhash.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSDHA=\
+ ".\jsbit.h"\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jsdhash.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDHA=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsdhash.obj" : $(SOURCE) $(DEP_CPP_JSDHA) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSDHA=\
+ ".\jsbit.h"\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jsdhash.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDHA=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsdhash.obj" : $(SOURCE) $(DEP_CPP_JSDHA) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsdtoa.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSDTO=\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jsdtoa.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsstddef.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDTO=\
+ ".\jsautocfg.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsdtoa.obj" : $(SOURCE) $(DEP_CPP_JSDTO) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSDTO=\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jsdtoa.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsstddef.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSDTO=\
+ ".\jsautocfg.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsdtoa.obj" : $(SOURCE) $(DEP_CPP_JSDTO) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsemit.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSEMI=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSEMI=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsemit.obj" : $(SOURCE) $(DEP_CPP_JSEMI) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSEMI=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSEMI=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsemit.obj" : $(SOURCE) $(DEP_CPP_JSEMI) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsexn.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSEXN=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsexn.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSEXN=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsexn.obj" : $(SOURCE) $(DEP_CPP_JSEXN) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSEXN=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsexn.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSEXN=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsexn.obj" : $(SOURCE) $(DEP_CPP_JSEXN) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsfun.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSFUN=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSFUN=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsfun.obj" : $(SOURCE) $(DEP_CPP_JSFUN) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSFUN=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSFUN=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsfun.obj" : $(SOURCE) $(DEP_CPP_JSFUN) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsgc.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSGC_=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSGC_=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsgc.obj" : $(SOURCE) $(DEP_CPP_JSGC_) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSGC_=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSGC_=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsgc.obj" : $(SOURCE) $(DEP_CPP_JSGC_) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jshash.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSHAS=\
+ ".\jsbit.h"\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jshash.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSHAS=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jshash.obj" : $(SOURCE) $(DEP_CPP_JSHAS) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSHAS=\
+ ".\jsbit.h"\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jshash.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSHAS=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jshash.obj" : $(SOURCE) $(DEP_CPP_JSHAS) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsinterp.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSINT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSINT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsinterp.obj" : $(SOURCE) $(DEP_CPP_JSINT) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSINT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSINT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsinterp.obj" : $(SOURCE) $(DEP_CPP_JSINT) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jslock.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSLOC=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSLOC=\
+ ".\jsautocfg.h"\
+ ".\pratom.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+ ".\prthread.h"\
+
+
+"$(INTDIR)\jslock.obj" : $(SOURCE) $(DEP_CPP_JSLOC) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSLOC=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSLOC=\
+ ".\jsautocfg.h"\
+ ".\pratom.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+ ".\prthread.h"\
+
+
+"$(INTDIR)\jslock.obj" : $(SOURCE) $(DEP_CPP_JSLOC) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jslog2.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSLOG=\
+ ".\jsbit.h"\
+ ".\jscpucfg.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSLOG=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jslog2.obj" : $(SOURCE) $(DEP_CPP_JSLOG) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSLOG=\
+ ".\jsbit.h"\
+ ".\jscpucfg.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSLOG=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jslog2.obj" : $(SOURCE) $(DEP_CPP_JSLOG) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jslong.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSLON=\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSLON=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jslong.obj" : $(SOURCE) $(DEP_CPP_JSLON) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSLON=\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jstypes.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSLON=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jslong.obj" : $(SOURCE) $(DEP_CPP_JSLON) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsmath.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSMAT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslibmath.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsmath.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\prmjtime.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSMAT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsmath.obj" : $(SOURCE) $(DEP_CPP_JSMAT) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSMAT=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslibmath.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsmath.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\prmjtime.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSMAT=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsmath.obj" : $(SOURCE) $(DEP_CPP_JSMAT) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsnum.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSNUM=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdtoa.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSNUM=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsnum.obj" : $(SOURCE) $(DEP_CPP_JSNUM) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSNUM=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdtoa.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSNUM=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsnum.obj" : $(SOURCE) $(DEP_CPP_JSNUM) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsobj.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSOBJ=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSOBJ=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsobj.obj" : $(SOURCE) $(DEP_CPP_JSOBJ) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSOBJ=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSOBJ=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsobj.obj" : $(SOURCE) $(DEP_CPP_JSOBJ) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsopcode.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSOPC=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsdtoa.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSOPC=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsopcode.obj" : $(SOURCE) $(DEP_CPP_JSOPC) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSOPC=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsdtoa.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSOPC=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsopcode.obj" : $(SOURCE) $(DEP_CPP_JSOPC) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsparse.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSPAR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSPAR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsparse.obj" : $(SOURCE) $(DEP_CPP_JSPAR) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSPAR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSPAR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsparse.obj" : $(SOURCE) $(DEP_CPP_JSPAR) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsprf.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSPRF=\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSPRF=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsprf.obj" : $(SOURCE) $(DEP_CPP_JSPRF) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSPRF=\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSPRF=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsprf.obj" : $(SOURCE) $(DEP_CPP_JSPRF) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsregexp.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSREG=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSREG=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsregexp.obj" : $(SOURCE) $(DEP_CPP_JSREG) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSREG=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSREG=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsregexp.obj" : $(SOURCE) $(DEP_CPP_JSREG) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsscan.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSSCA=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdtoa.h"\
+ ".\jsexn.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSSCA=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsscan.obj" : $(SOURCE) $(DEP_CPP_JSSCA) "$(INTDIR)" "$(INTDIR)\jsautokw.h"
+
+"$(INTDIR)\jsautokw.h" : $(INTDIR)\host_jskwgen.exe jskeyword.tbl
+ $(INTDIR)\host_jskwgen.exe $(INTDIR)\jsautokw.h
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSSCA=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdtoa.h"\
+ ".\jsexn.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+ $(INTDIR)\jsautokw.h \
+
+NODEP_CPP_JSSCA=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsscan.obj" : $(SOURCE) $(DEP_CPP_JSSCA) "$(INTDIR)"
+
+"$(INTDIR)\jsautokw.h" : $(INTDIR)\host_jskwgen.exe jskeyword.tbl
+ $(INTDIR)\host_jskwgen.exe $(INTDIR)\jsautokw.h
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jskwgen.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSSCO=\
+ ".\jskwgen.c"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+
+"$(INTDIR)\jskwgen.obj" : $(SOURCE) $(DEP_CPP_JSSCO) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSSCO=\
+ ".\jskwgen.c"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\
+ advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\
+ odbccp32.lib /nologo /subsystem:console /incremental:no\
+ /pdb:"$(INTDIR)/host_jskwgen.pdb" /machine:I386 /out:"$(INTDIR)/host_jskwgen.exe"
+
+LINK32_OBJS= \
+ "$(INTDIR)\jskwgen.obj"
+
+"$(INTDIR)\host_jskwgen.exe" : "$(INTDIR)" $(SOURCE) $(DEP_CPP_JSSCO) "$(INTDIR)"
+ $(LINK32) @<<
+ $(LINK32_FLAGS) $(LINK32_OBJS)
+<<
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsscope.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSSCO=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSSCO=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsscope.obj" : $(SOURCE) $(DEP_CPP_JSSCO) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSSCO=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSSCO=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsscope.obj" : $(SOURCE) $(DEP_CPP_JSSCO) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsscript.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSSCR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSSCR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsscript.obj" : $(SOURCE) $(DEP_CPP_JSSCR) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSSCR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSSCR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsscript.obj" : $(SOURCE) $(DEP_CPP_JSSCR) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsstr.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSSTR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSSTR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsstr.obj" : $(SOURCE) $(DEP_CPP_JSSTR) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSSTR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbool.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSSTR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsstr.obj" : $(SOURCE) $(DEP_CPP_JSSTR) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsutil.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSUTI=\
+ ".\jscpucfg.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSUTI=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsutil.obj" : $(SOURCE) $(DEP_CPP_JSUTI) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSUTI=\
+ ".\jscpucfg.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSUTI=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\jsutil.obj" : $(SOURCE) $(DEP_CPP_JSUTI) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsxdrapi.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSXDR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSXDR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsxdrapi.obj" : $(SOURCE) $(DEP_CPP_JSXDR) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSXDR=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscope.h"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxdrapi.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSXDR=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsxdrapi.obj" : $(SOURCE) $(DEP_CPP_JSXDR) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\jsxml.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_JSXML=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbit.h"\
+ ".\jsbool.h"\
+ ".\jscntxt.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsparse.h"\
+ ".\jsprf.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSXML=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsxml.obj" : $(SOURCE) $(DEP_CPP_JSXML) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_JSXML=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarray.h"\
+ ".\jsatom.h"\
+ ".\jsbit.h"\
+ ".\jsbool.h"\
+ ".\jscntxt.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jsnum.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsparse.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ ".\jsxml.h"\
+ ".\jsprf.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JSXML=\
+ ".\jsautocfg.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\jsxml.obj" : $(SOURCE) $(DEP_CPP_JSXML) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\prmjtime.c
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+DEP_CPP_PRMJT=\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jstypes.h"\
+ ".\prmjtime.h"\
+ {$(INCLUDE)}"\sys\TIMEB.H"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_PRMJT=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\prmjtime.obj" : $(SOURCE) $(DEP_CPP_PRMJT) "$(INTDIR)"
+
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+DEP_CPP_PRMJT=\
+ ".\jscompat.h"\
+ ".\jscpucfg.h"\
+ ".\jslong.h"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsprf.h"\
+ ".\jstypes.h"\
+ ".\prmjtime.h"\
+ {$(INCLUDE)}"\sys\TIMEB.H"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_PRMJT=\
+ ".\jsautocfg.h"\
+
+
+"$(INTDIR)\prmjtime.obj" : $(SOURCE) $(DEP_CPP_PRMJT) "$(INTDIR)"
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Project Dependency
+
+# Project_Dep_Name "fdlibm"
+
+!IF "$(CFG)" == "js - Win32 Debug"
+
+"fdlibm - Win32 Debug" :
+ @$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="fdlibm - Win32 Debug"
+
+!ELSEIF "$(CFG)" == "js - Win32 Release"
+
+"fdlibm - Win32 Release" :
+ @$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="fdlibm - Win32 Release"
+
+!ENDIF
+
+# End Project Dependency
+# End Target
+################################################################################
+# Begin Target
+
+# Name "jsshell - Win32 Release"
+# Name "jsshell - Win32 Debug"
+
+!IF "$(CFG)" == "jsshell - Win32 Release"
+
+!ELSEIF "$(CFG)" == "jsshell - Win32 Debug"
+
+!ENDIF
+
+################################################################################
+# Begin Source File
+
+SOURCE=.\js.c
+DEP_CPP_JS_C42=\
+ ".\js.msg"\
+ ".\jsapi.h"\
+ ".\jsarena.h"\
+ ".\jsatom.h"\
+ ".\jsclist.h"\
+ ".\jscntxt.h"\
+ ".\jscompat.h"\
+ ".\jsconfig.h"\
+ ".\jscpucfg.h"\
+ ".\jsdbgapi.h"\
+ ".\jsemit.h"\
+ ".\jsfun.h"\
+ ".\jsgc.h"\
+ ".\jshash.h"\
+ ".\jsinterp.h"\
+ ".\jslock.h"\
+ ".\jslong.h"\
+ ".\jsobj.h"\
+ ".\jsopcode.h"\
+ ".\jsopcode.tbl"\
+ ".\jsosdep.h"\
+ ".\jsotypes.h"\
+ ".\jsparse.h"\
+ ".\jsprf.h"\
+ ".\jsprvtd.h"\
+ ".\jspubtd.h"\
+ ".\jsregexp.h"\
+ ".\jsscan.h"\
+ ".\jsscope.h"\
+ ".\jsscript.h"\
+ ".\jsshell.msg"\
+ ".\jsstddef.h"\
+ ".\jsstr.h"\
+ ".\jstypes.h"\
+ ".\jsutil.h"\
+ {$(INCLUDE)}"\sys\types.h"\
+
+NODEP_CPP_JS_C42=\
+ ".\jsautocfg.h"\
+ ".\jsdb.h"\
+ ".\jsdebug.h"\
+ ".\jsdjava.h"\
+ ".\jsjava.h"\
+ ".\jsperl.h"\
+ ".\prcvar.h"\
+ ".\prlock.h"\
+
+
+"$(INTDIR)\js.obj" : $(SOURCE) $(DEP_CPP_JS_C42) "$(INTDIR)"
+
+
+# End Source File
+################################################################################
+# Begin Project Dependency
+
+# Project_Dep_Name "jskwgen"
+
+!IF "$(CFG)" == "js - Win32 Release"
+
+"jskwgen - Win32 Release" :
+ @$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="jskwgen - Win32 Release"
+
+!ELSEIF "$(CFG)" == "js - Win32 Debug"
+
+"jskwgen - Win32 Debug" :
+ @$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="jskwgen - Win32 Debug"
+
+!ENDIF
+
+# End Project Dependency
+# End Target
+################################################################################
+# Begin Project Dependency
+
+# Project_Dep_Name "js"
+
+!IF "$(CFG)" == "jsshell - Win32 Release"
+
+"js - Win32 Release" :
+ @$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="js - Win32 Release"
+
+!ELSEIF "$(CFG)" == "jsshell - Win32 Debug"
+
+"js - Win32 Debug" :
+ @$(MAKE) /nologo /$(MAKEFLAGS) /F ".\js.mak" CFG="js - Win32 Debug"
+
+!ENDIF
+
+# End Project Dependency
+# End Target
+################################################################################
+# Begin Target
+
+# Name "fdlibm - Win32 Release"
+# Name "fdlibm - Win32 Debug"
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+!ENDIF
+
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\w_atan2.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_W_ATA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\w_atan2.obj" : $(SOURCE) $(DEP_CPP_W_ATA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_W_ATA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\w_atan2.obj" : $(SOURCE) $(DEP_CPP_W_ATA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_copysign.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_COP=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_copysign.obj" : $(SOURCE) $(DEP_CPP_S_COP) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_COP=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_copysign.obj" : $(SOURCE) $(DEP_CPP_S_COP) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\w_pow.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_W_POW=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\w_pow.obj" : $(SOURCE) $(DEP_CPP_W_POW) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_W_POW=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\w_pow.obj" : $(SOURCE) $(DEP_CPP_W_POW) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\e_pow.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_E_POW=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\e_pow.obj" : $(SOURCE) $(DEP_CPP_E_POW) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_E_POW=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\e_pow.obj" : $(SOURCE) $(DEP_CPP_E_POW) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\k_standard.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_K_STA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\k_standard.obj" : $(SOURCE) $(DEP_CPP_K_STA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_K_STA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\k_standard.obj" : $(SOURCE) $(DEP_CPP_K_STA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\e_atan2.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_E_ATA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\e_atan2.obj" : $(SOURCE) $(DEP_CPP_E_ATA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_E_ATA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\e_atan2.obj" : $(SOURCE) $(DEP_CPP_E_ATA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_isnan.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_ISN=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_isnan.obj" : $(SOURCE) $(DEP_CPP_S_ISN) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_ISN=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_isnan.obj" : $(SOURCE) $(DEP_CPP_S_ISN) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_fabs.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_FAB=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_fabs.obj" : $(SOURCE) $(DEP_CPP_S_FAB) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_FAB=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_fabs.obj" : $(SOURCE) $(DEP_CPP_S_FAB) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\w_sqrt.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_W_SQR=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\w_sqrt.obj" : $(SOURCE) $(DEP_CPP_W_SQR) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_W_SQR=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\w_sqrt.obj" : $(SOURCE) $(DEP_CPP_W_SQR) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_scalbn.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_SCA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_scalbn.obj" : $(SOURCE) $(DEP_CPP_S_SCA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_SCA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_scalbn.obj" : $(SOURCE) $(DEP_CPP_S_SCA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\e_sqrt.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_E_SQR=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\e_sqrt.obj" : $(SOURCE) $(DEP_CPP_E_SQR) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_E_SQR=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\e_sqrt.obj" : $(SOURCE) $(DEP_CPP_E_SQR) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_rint.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_RIN=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_rint.obj" : $(SOURCE) $(DEP_CPP_S_RIN) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_RIN=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_rint.obj" : $(SOURCE) $(DEP_CPP_S_RIN) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_atan.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_ATA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_atan.obj" : $(SOURCE) $(DEP_CPP_S_ATA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_ATA=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_atan.obj" : $(SOURCE) $(DEP_CPP_S_ATA) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_finite.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_FIN=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_finite.obj" : $(SOURCE) $(DEP_CPP_S_FIN) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_FIN=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_finite.obj" : $(SOURCE) $(DEP_CPP_S_FIN) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+################################################################################
+# Begin Source File
+
+SOURCE=.\fdlibm\s_matherr.c
+
+!IF "$(CFG)" == "fdlibm - Win32 Release"
+
+DEP_CPP_S_MAT=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_matherr.obj" : $(SOURCE) $(DEP_CPP_S_MAT) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ELSEIF "$(CFG)" == "fdlibm - Win32 Debug"
+
+DEP_CPP_S_MAT=\
+ ".\fdlibm\fdlibm.h"\
+
+
+"$(INTDIR)\s_matherr.obj" : $(SOURCE) $(DEP_CPP_S_MAT) "$(INTDIR)"
+ $(CPP) $(CPP_PROJ) $(SOURCE)
+
+
+!ENDIF
+
+# End Source File
+# End Target
+# End Project
+################################################################################
diff --git a/src/third_party/js-1.7/js.mdp b/src/third_party/js-1.7/js.mdp
new file mode 100644
index 00000000000..8da64fb6b61
--- /dev/null
+++ b/src/third_party/js-1.7/js.mdp
Binary files differ
diff --git a/src/third_party/js-1.7/js.msg b/src/third_party/js-1.7/js.msg
new file mode 100644
index 00000000000..2686af0356d
--- /dev/null
+++ b/src/third_party/js-1.7/js.msg
@@ -0,0 +1,301 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * This is the JavaScript error message file.
+ *
+ * The format for each JS error message is:
+ *
+ * MSG_DEF(<SYMBOLIC_NAME>, <ERROR_NUMBER>, <ARGUMENT_COUNT>, <EXCEPTION_NAME>,
+ * <FORMAT_STRING>)
+ *
+ * where ;
+ * <SYMBOLIC_NAME> is a legal C identifer that will be used in the
+ * JS engine source.
+ *
+ * <ERROR_NUMBER> is an unique integral value identifying this error.
+ *
+ * <ARGUMENT_COUNT> is an integer literal specifying the total number of
+ * replaceable arguments in the following format string.
+ *
+ * <EXCEPTION_NAME> is an exception index from the enum in jsexn.c;
+ * JSEXN_NONE for none. The given exception index will be raised by the
+ * engine when the corresponding error occurs.
+ *
+ * <FORMAT_STRING> is a string literal, optionally containing sequences
+ * {X} where X is an integer representing the argument number that will
+ * be replaced with a string value when the error is reported.
+ *
+ * e.g.
+ *
+ * MSG_DEF(JSMSG_NOT_A_SUBSPECIES, 73, JSEXN_NONE, 2,
+ * "{0} is not a member of the {1} family")
+ *
+ * can be used:
+ *
+ * JS_ReportErrorNumber(JSMSG_NOT_A_SUBSPECIES, "Rhino", "Monkey");
+ *
+ * to report:
+ *
+ * "Rhino is not a member of the Monkey family"
+ *
+ * Before adding a new MSG_DEF at the end, look for JSMSG_UNUSED<n> free
+ * index placeholders in the middle of the list.
+ */
+
+MSG_DEF(JSMSG_NOT_AN_ERROR, 0, 0, JSEXN_NONE, "<Error #0 is reserved>")
+MSG_DEF(JSMSG_NOT_DEFINED, 1, 1, JSEXN_REFERENCEERR, "{0} is not defined")
+MSG_DEF(JSMSG_INACTIVE, 2, 0, JSEXN_INTERNALERR, "nothing active on context")
+MSG_DEF(JSMSG_MORE_ARGS_NEEDED, 3, 3, JSEXN_TYPEERR, "{0} requires more than {1} argument{2}")
+MSG_DEF(JSMSG_BAD_CHAR, 4, 1, JSEXN_INTERNALERR, "invalid format character {0}")
+MSG_DEF(JSMSG_BAD_TYPE, 5, 1, JSEXN_TYPEERR, "unknown type {0}")
+MSG_DEF(JSMSG_CANT_LOCK, 6, 0, JSEXN_INTERNALERR, "can't lock memory")
+MSG_DEF(JSMSG_CANT_UNLOCK, 7, 0, JSEXN_INTERNALERR, "can't unlock memory")
+MSG_DEF(JSMSG_INCOMPATIBLE_PROTO, 8, 3, JSEXN_TYPEERR, "{0}.prototype.{1} called on incompatible {2}")
+MSG_DEF(JSMSG_NO_CONSTRUCTOR, 9, 1, JSEXN_TYPEERR, "{0} has no constructor")
+MSG_DEF(JSMSG_CANT_ALIAS, 10, 3, JSEXN_TYPEERR, "can't alias {0} to {1} in class {2}")
+MSG_DEF(JSMSG_NOT_SCRIPTED_FUNCTION, 11, 1, JSEXN_TYPEERR, "{0} is not a scripted function")
+MSG_DEF(JSMSG_BAD_SORT_ARG, 12, 0, JSEXN_TYPEERR, "invalid Array.prototype.sort argument")
+MSG_DEF(JSMSG_BAD_ATOMIC_NUMBER, 13, 1, JSEXN_INTERNALERR, "internal error: no index for atom {0}")
+MSG_DEF(JSMSG_TOO_MANY_LITERALS, 14, 0, JSEXN_INTERNALERR, "too many literals")
+MSG_DEF(JSMSG_CANT_WATCH, 15, 1, JSEXN_TYPEERR, "can't watch non-native objects of class {0}")
+MSG_DEF(JSMSG_STACK_UNDERFLOW, 16, 2, JSEXN_INTERNALERR, "internal error compiling {0}: stack underflow at pc {1}")
+MSG_DEF(JSMSG_NEED_DIET, 17, 1, JSEXN_INTERNALERR, "{0} too large")
+MSG_DEF(JSMSG_TOO_MANY_LOCAL_ROOTS, 18, 0, JSEXN_ERR, "out of local root space")
+MSG_DEF(JSMSG_READ_ONLY, 19, 1, JSEXN_ERR, "{0} is read-only")
+MSG_DEF(JSMSG_BAD_FORMAL, 20, 0, JSEXN_SYNTAXERR, "malformed formal parameter")
+MSG_DEF(JSMSG_BAD_ITERATOR, 21, 3, JSEXN_TYPEERR, "{0} has invalid {1} value {2}")
+MSG_DEF(JSMSG_NOT_FUNCTION, 22, 1, JSEXN_TYPEERR, "{0} is not a function")
+MSG_DEF(JSMSG_NOT_CONSTRUCTOR, 23, 1, JSEXN_TYPEERR, "{0} is not a constructor")
+MSG_DEF(JSMSG_STACK_OVERFLOW, 24, 1, JSEXN_INTERNALERR, "stack overflow in {0}")
+MSG_DEF(JSMSG_NOT_EXPORTED, 25, 1, JSEXN_TYPEERR, "{0} is not exported")
+MSG_DEF(JSMSG_OVER_RECURSED, 26, 0, JSEXN_INTERNALERR, "too much recursion")
+MSG_DEF(JSMSG_IN_NOT_OBJECT, 27, 1, JSEXN_TYPEERR, "invalid 'in' operand {0}")
+MSG_DEF(JSMSG_BAD_NEW_RESULT, 28, 1, JSEXN_TYPEERR, "invalid new expression result {0}")
+MSG_DEF(JSMSG_BAD_SHARP_DEF, 29, 1, JSEXN_ERR, "invalid sharp variable definition #{0}=")
+MSG_DEF(JSMSG_BAD_SHARP_USE, 30, 1, JSEXN_ERR, "invalid sharp variable use #{0}#")
+MSG_DEF(JSMSG_BAD_INSTANCEOF_RHS, 31, 1, JSEXN_TYPEERR, "invalid 'instanceof' operand {0}")
+MSG_DEF(JSMSG_BAD_BYTECODE, 32, 1, JSEXN_INTERNALERR, "unimplemented JavaScript bytecode {0}")
+MSG_DEF(JSMSG_BAD_RADIX, 33, 1, JSEXN_ERR, "illegal radix {0}")
+MSG_DEF(JSMSG_PAREN_BEFORE_LET, 34, 0, JSEXN_SYNTAXERR, "missing ( before let head")
+MSG_DEF(JSMSG_CANT_CONVERT, 35, 1, JSEXN_ERR, "can't convert {0} to an integer")
+MSG_DEF(JSMSG_CYCLIC_VALUE, 36, 1, JSEXN_ERR, "cyclic {0} value")
+MSG_DEF(JSMSG_PERMANENT, 37, 1, JSEXN_ERR, "{0} is permanent")
+MSG_DEF(JSMSG_CANT_CONVERT_TO, 38, 2, JSEXN_TYPEERR, "can't convert {0} to {1}")
+MSG_DEF(JSMSG_NO_PROPERTIES, 39, 1, JSEXN_TYPEERR, "{0} has no properties")
+MSG_DEF(JSMSG_CANT_FIND_CLASS, 40, 1, JSEXN_TYPEERR, "can't find class id {0}")
+MSG_DEF(JSMSG_CANT_XDR_CLASS, 41, 1, JSEXN_TYPEERR, "can't XDR class {0}")
+MSG_DEF(JSMSG_BYTECODE_TOO_BIG, 42, 2, JSEXN_INTERNALERR, "bytecode {0} too large (limit {1})")
+MSG_DEF(JSMSG_UNKNOWN_FORMAT, 43, 1, JSEXN_INTERNALERR, "unknown bytecode format {0}")
+MSG_DEF(JSMSG_TOO_MANY_CON_ARGS, 44, 0, JSEXN_SYNTAXERR, "too many constructor arguments")
+MSG_DEF(JSMSG_TOO_MANY_FUN_ARGS, 45, 0, JSEXN_SYNTAXERR, "too many function arguments")
+MSG_DEF(JSMSG_BAD_QUANTIFIER, 46, 1, JSEXN_SYNTAXERR, "invalid quantifier {0}")
+MSG_DEF(JSMSG_MIN_TOO_BIG, 47, 1, JSEXN_SYNTAXERR, "overlarge minimum {0}")
+MSG_DEF(JSMSG_MAX_TOO_BIG, 48, 1, JSEXN_SYNTAXERR, "overlarge maximum {0}")
+MSG_DEF(JSMSG_OUT_OF_ORDER, 49, 1, JSEXN_SYNTAXERR, "maximum {0} less than minimum")
+MSG_DEF(JSMSG_BAD_DESTRUCT_DECL, 50, 0, JSEXN_SYNTAXERR, "missing = in destructuring declaration")
+MSG_DEF(JSMSG_BAD_DESTRUCT_ASS, 51, 0, JSEXN_SYNTAXERR, "invalid destructuring assignment operator")
+MSG_DEF(JSMSG_PAREN_AFTER_LET, 52, 0, JSEXN_SYNTAXERR, "missing ) after let head")
+MSG_DEF(JSMSG_CURLY_AFTER_LET, 53, 0, JSEXN_SYNTAXERR, "missing } after let block")
+MSG_DEF(JSMSG_MISSING_PAREN, 54, 0, JSEXN_SYNTAXERR, "unterminated parenthetical")
+MSG_DEF(JSMSG_UNTERM_CLASS, 55, 1, JSEXN_SYNTAXERR, "unterminated character class {0}")
+MSG_DEF(JSMSG_TRAILING_SLASH, 56, 0, JSEXN_SYNTAXERR, "trailing \\ in regular expression")
+MSG_DEF(JSMSG_BAD_CLASS_RANGE, 57, 0, JSEXN_SYNTAXERR, "invalid range in character class")
+MSG_DEF(JSMSG_BAD_FLAG, 58, 1, JSEXN_SYNTAXERR, "invalid regular expression flag {0}")
+MSG_DEF(JSMSG_NO_INPUT, 59, 3, JSEXN_SYNTAXERR, "no input for /{0}/{1}{2}")
+MSG_DEF(JSMSG_CANT_OPEN, 60, 2, JSEXN_ERR, "can't open {0}: {1}")
+MSG_DEF(JSMSG_BAD_STRING_MASK, 61, 1, JSEXN_ERR, "invalid string escape mask {0}")
+MSG_DEF(JSMSG_UNMATCHED_RIGHT_PAREN, 62, 0, JSEXN_SYNTAXERR, "unmatched ) in regular expression")
+MSG_DEF(JSMSG_END_OF_DATA, 63, 0, JSEXN_INTERNALERR, "unexpected end of data")
+MSG_DEF(JSMSG_SEEK_BEYOND_START, 64, 0, JSEXN_INTERNALERR, "illegal seek beyond start")
+MSG_DEF(JSMSG_SEEK_BEYOND_END, 65, 0, JSEXN_INTERNALERR, "illegal seek beyond end")
+MSG_DEF(JSMSG_END_SEEK, 66, 0, JSEXN_INTERNALERR, "illegal end-based seek")
+MSG_DEF(JSMSG_WHITHER_WHENCE, 67, 1, JSEXN_INTERNALERR, "unknown seek whence: {0}")
+MSG_DEF(JSMSG_BAD_SCRIPT_MAGIC, 68, 0, JSEXN_INTERNALERR, "bad script XDR magic number")
+MSG_DEF(JSMSG_PAREN_BEFORE_FORMAL, 69, 0, JSEXN_SYNTAXERR, "missing ( before formal parameters")
+MSG_DEF(JSMSG_MISSING_FORMAL, 70, 0, JSEXN_SYNTAXERR, "missing formal parameter")
+MSG_DEF(JSMSG_PAREN_AFTER_FORMAL, 71, 0, JSEXN_SYNTAXERR, "missing ) after formal parameters")
+MSG_DEF(JSMSG_CURLY_BEFORE_BODY, 72, 0, JSEXN_SYNTAXERR, "missing { before function body")
+MSG_DEF(JSMSG_CURLY_AFTER_BODY, 73, 0, JSEXN_SYNTAXERR, "missing } after function body")
+MSG_DEF(JSMSG_PAREN_BEFORE_COND, 74, 0, JSEXN_SYNTAXERR, "missing ( before condition")
+MSG_DEF(JSMSG_PAREN_AFTER_COND, 75, 0, JSEXN_SYNTAXERR, "missing ) after condition")
+MSG_DEF(JSMSG_NO_IMPORT_NAME, 76, 0, JSEXN_SYNTAXERR, "missing name in import statement")
+MSG_DEF(JSMSG_NAME_AFTER_DOT, 77, 0, JSEXN_SYNTAXERR, "missing name after . operator")
+MSG_DEF(JSMSG_BRACKET_IN_INDEX, 78, 0, JSEXN_SYNTAXERR, "missing ] in index expression")
+MSG_DEF(JSMSG_NO_EXPORT_NAME, 79, 0, JSEXN_SYNTAXERR, "missing name in export statement")
+MSG_DEF(JSMSG_PAREN_BEFORE_SWITCH, 80, 0, JSEXN_SYNTAXERR, "missing ( before switch expression")
+MSG_DEF(JSMSG_PAREN_AFTER_SWITCH, 81, 0, JSEXN_SYNTAXERR, "missing ) after switch expression")
+MSG_DEF(JSMSG_CURLY_BEFORE_SWITCH, 82, 0, JSEXN_SYNTAXERR, "missing { before switch body")
+MSG_DEF(JSMSG_COLON_AFTER_CASE, 83, 0, JSEXN_SYNTAXERR, "missing : after case label")
+MSG_DEF(JSMSG_WHILE_AFTER_DO, 84, 0, JSEXN_SYNTAXERR, "missing while after do-loop body")
+MSG_DEF(JSMSG_PAREN_AFTER_FOR, 85, 0, JSEXN_SYNTAXERR, "missing ( after for")
+MSG_DEF(JSMSG_SEMI_AFTER_FOR_INIT, 86, 0, JSEXN_SYNTAXERR, "missing ; after for-loop initializer")
+MSG_DEF(JSMSG_SEMI_AFTER_FOR_COND, 87, 0, JSEXN_SYNTAXERR, "missing ; after for-loop condition")
+MSG_DEF(JSMSG_PAREN_AFTER_FOR_CTRL, 88, 0, JSEXN_SYNTAXERR, "missing ) after for-loop control")
+MSG_DEF(JSMSG_CURLY_BEFORE_TRY, 89, 0, JSEXN_SYNTAXERR, "missing { before try block")
+MSG_DEF(JSMSG_CURLY_AFTER_TRY, 90, 0, JSEXN_SYNTAXERR, "missing } after try block")
+MSG_DEF(JSMSG_PAREN_BEFORE_CATCH, 91, 0, JSEXN_SYNTAXERR, "missing ( before catch")
+MSG_DEF(JSMSG_CATCH_IDENTIFIER, 92, 0, JSEXN_SYNTAXERR, "missing identifier in catch")
+MSG_DEF(JSMSG_PAREN_AFTER_CATCH, 93, 0, JSEXN_SYNTAXERR, "missing ) after catch")
+MSG_DEF(JSMSG_CURLY_BEFORE_CATCH, 94, 0, JSEXN_SYNTAXERR, "missing { before catch block")
+MSG_DEF(JSMSG_CURLY_AFTER_CATCH, 95, 0, JSEXN_SYNTAXERR, "missing } after catch block")
+MSG_DEF(JSMSG_CURLY_BEFORE_FINALLY, 96, 0, JSEXN_SYNTAXERR, "missing { before finally block")
+MSG_DEF(JSMSG_CURLY_AFTER_FINALLY, 97, 0, JSEXN_SYNTAXERR, "missing } after finally block")
+MSG_DEF(JSMSG_CATCH_OR_FINALLY, 98, 0, JSEXN_SYNTAXERR, "missing catch or finally after try")
+MSG_DEF(JSMSG_PAREN_BEFORE_WITH, 99, 0, JSEXN_SYNTAXERR, "missing ( before with-statement object")
+MSG_DEF(JSMSG_PAREN_AFTER_WITH, 100, 0, JSEXN_SYNTAXERR, "missing ) after with-statement object")
+MSG_DEF(JSMSG_CURLY_IN_COMPOUND, 101, 0, JSEXN_SYNTAXERR, "missing } in compound statement")
+MSG_DEF(JSMSG_NO_VARIABLE_NAME, 102, 0, JSEXN_SYNTAXERR, "missing variable name")
+MSG_DEF(JSMSG_COLON_IN_COND, 103, 0, JSEXN_SYNTAXERR, "missing : in conditional expression")
+MSG_DEF(JSMSG_PAREN_AFTER_ARGS, 104, 0, JSEXN_SYNTAXERR, "missing ) after argument list")
+MSG_DEF(JSMSG_BRACKET_AFTER_LIST, 105, 0, JSEXN_SYNTAXERR, "missing ] after element list")
+MSG_DEF(JSMSG_COLON_AFTER_ID, 106, 0, JSEXN_SYNTAXERR, "missing : after property id")
+MSG_DEF(JSMSG_CURLY_AFTER_LIST, 107, 0, JSEXN_SYNTAXERR, "missing } after property list")
+MSG_DEF(JSMSG_PAREN_IN_PAREN, 108, 0, JSEXN_SYNTAXERR, "missing ) in parenthetical")
+MSG_DEF(JSMSG_SEMI_BEFORE_STMNT, 109, 0, JSEXN_SYNTAXERR, "missing ; before statement")
+MSG_DEF(JSMSG_NO_RETURN_VALUE, 110, 1, JSEXN_TYPEERR, "function {0} does not always return a value")
+MSG_DEF(JSMSG_DUPLICATE_FORMAL, 111, 1, JSEXN_TYPEERR, "duplicate formal argument {0}")
+MSG_DEF(JSMSG_EQUAL_AS_ASSIGN, 112, 1, JSEXN_SYNTAXERR, "test for equality (==) mistyped as assignment (=)?{0}")
+MSG_DEF(JSMSG_BAD_IMPORT, 113, 0, JSEXN_SYNTAXERR, "invalid import expression")
+MSG_DEF(JSMSG_TOO_MANY_DEFAULTS, 114, 0, JSEXN_SYNTAXERR, "more than one switch default")
+MSG_DEF(JSMSG_TOO_MANY_CASES, 115, 0, JSEXN_INTERNALERR, "too many switch cases")
+MSG_DEF(JSMSG_BAD_SWITCH, 116, 0, JSEXN_SYNTAXERR, "invalid switch statement")
+MSG_DEF(JSMSG_BAD_FOR_LEFTSIDE, 117, 0, JSEXN_SYNTAXERR, "invalid for/in left-hand side")
+MSG_DEF(JSMSG_CATCH_AFTER_GENERAL, 118, 0, JSEXN_SYNTAXERR, "catch after unconditional catch")
+MSG_DEF(JSMSG_CATCH_WITHOUT_TRY, 119, 0, JSEXN_SYNTAXERR, "catch without try")
+MSG_DEF(JSMSG_FINALLY_WITHOUT_TRY, 120, 0, JSEXN_SYNTAXERR, "finally without try")
+MSG_DEF(JSMSG_LABEL_NOT_FOUND, 121, 0, JSEXN_SYNTAXERR, "label not found")
+MSG_DEF(JSMSG_TOUGH_BREAK, 122, 0, JSEXN_SYNTAXERR, "invalid break")
+MSG_DEF(JSMSG_BAD_CONTINUE, 123, 0, JSEXN_SYNTAXERR, "invalid continue")
+MSG_DEF(JSMSG_BAD_RETURN_OR_YIELD, 124, 1, JSEXN_SYNTAXERR, "{0} not in function")
+MSG_DEF(JSMSG_BAD_LABEL, 125, 0, JSEXN_SYNTAXERR, "invalid label")
+MSG_DEF(JSMSG_DUPLICATE_LABEL, 126, 0, JSEXN_SYNTAXERR, "duplicate label")
+MSG_DEF(JSMSG_VAR_HIDES_ARG, 127, 1, JSEXN_TYPEERR, "variable {0} hides argument")
+MSG_DEF(JSMSG_BAD_VAR_INIT, 128, 0, JSEXN_SYNTAXERR, "invalid variable initialization")
+MSG_DEF(JSMSG_BAD_LEFTSIDE_OF_ASS, 129, 0, JSEXN_SYNTAXERR, "invalid assignment left-hand side")
+MSG_DEF(JSMSG_BAD_OPERAND, 130, 1, JSEXN_SYNTAXERR, "invalid {0} operand")
+MSG_DEF(JSMSG_BAD_PROP_ID, 131, 0, JSEXN_SYNTAXERR, "invalid property id")
+MSG_DEF(JSMSG_RESERVED_ID, 132, 1, JSEXN_SYNTAXERR, "{0} is a reserved identifier")
+MSG_DEF(JSMSG_SYNTAX_ERROR, 133, 0, JSEXN_SYNTAXERR, "syntax error")
+MSG_DEF(JSMSG_BAD_SHARP_VAR_DEF, 134, 0, JSEXN_SYNTAXERR, "invalid sharp variable definition")
+MSG_DEF(JSMSG_BAD_PROTOTYPE, 135, 1, JSEXN_TYPEERR, "'prototype' property of {0} is not an object")
+MSG_DEF(JSMSG_MISSING_EXPONENT, 136, 0, JSEXN_SYNTAXERR, "missing exponent")
+MSG_DEF(JSMSG_OUT_OF_MEMORY, 137, 0, JSEXN_ERR, "out of memory")
+MSG_DEF(JSMSG_UNTERMINATED_STRING, 138, 0, JSEXN_SYNTAXERR, "unterminated string literal")
+MSG_DEF(JSMSG_TOO_MANY_PARENS, 139, 0, JSEXN_INTERNALERR, "too many parentheses in regular expression")
+MSG_DEF(JSMSG_UNTERMINATED_COMMENT, 140, 0, JSEXN_SYNTAXERR, "unterminated comment")
+MSG_DEF(JSMSG_UNTERMINATED_REGEXP, 141, 0, JSEXN_SYNTAXERR, "unterminated regular expression literal")
+MSG_DEF(JSMSG_BAD_REGEXP_FLAG, 142, 0, JSEXN_SYNTAXERR, "invalid flag after regular expression")
+MSG_DEF(JSMSG_SHARPVAR_TOO_BIG, 143, 0, JSEXN_SYNTAXERR, "overlarge sharp variable number")
+MSG_DEF(JSMSG_ILLEGAL_CHARACTER, 144, 0, JSEXN_SYNTAXERR, "illegal character")
+MSG_DEF(JSMSG_BAD_OCTAL, 145, 1, JSEXN_SYNTAXERR, "{0} is not a legal ECMA-262 octal constant")
+MSG_DEF(JSMSG_BAD_INDIRECT_CALL, 146, 1, JSEXN_EVALERR, "function {0} must be called directly, and not by way of a function of another name")
+MSG_DEF(JSMSG_UNCAUGHT_EXCEPTION, 147, 1, JSEXN_INTERNALERR, "uncaught exception: {0}")
+MSG_DEF(JSMSG_INVALID_BACKREF, 148, 0, JSEXN_SYNTAXERR, "non-octal digit in an escape sequence that doesn't match a back-reference")
+MSG_DEF(JSMSG_BAD_BACKREF, 149, 0, JSEXN_SYNTAXERR, "back-reference exceeds number of capturing parentheses")
+MSG_DEF(JSMSG_PRECISION_RANGE, 150, 1, JSEXN_RANGEERR, "precision {0} out of range")
+MSG_DEF(JSMSG_BAD_GETTER_OR_SETTER, 151, 1, JSEXN_SYNTAXERR, "invalid {0} usage")
+MSG_DEF(JSMSG_BAD_ARRAY_LENGTH, 152, 0, JSEXN_RANGEERR, "invalid array length")
+MSG_DEF(JSMSG_CANT_DESCRIBE_PROPS, 153, 1, JSEXN_TYPEERR, "can't describe non-native properties of class {0}")
+MSG_DEF(JSMSG_BAD_APPLY_ARGS, 154, 1, JSEXN_TYPEERR, "second argument to Function.prototype.{0} must be an array")
+MSG_DEF(JSMSG_REDECLARED_VAR, 155, 2, JSEXN_TYPEERR, "redeclaration of {0} {1}")
+MSG_DEF(JSMSG_UNDECLARED_VAR, 156, 1, JSEXN_TYPEERR, "assignment to undeclared variable {0}")
+MSG_DEF(JSMSG_ANON_NO_RETURN_VALUE, 157, 0, JSEXN_TYPEERR, "anonymous function does not always return a value")
+MSG_DEF(JSMSG_DEPRECATED_USAGE, 158, 1, JSEXN_REFERENCEERR, "deprecated {0} usage")
+MSG_DEF(JSMSG_BAD_URI, 159, 0, JSEXN_URIERR, "malformed URI sequence")
+MSG_DEF(JSMSG_GETTER_ONLY, 160, 0, JSEXN_TYPEERR, "setting a property that has only a getter")
+MSG_DEF(JSMSG_TRAILING_COMMA, 161, 0, JSEXN_SYNTAXERR, "trailing comma is not legal in ECMA-262 object initializers")
+MSG_DEF(JSMSG_UNDEFINED_PROP, 162, 1, JSEXN_REFERENCEERR, "reference to undefined property {0}")
+MSG_DEF(JSMSG_USELESS_EXPR, 163, 0, JSEXN_TYPEERR, "useless expression")
+MSG_DEF(JSMSG_REDECLARED_PARAM, 164, 1, JSEXN_TYPEERR, "redeclaration of formal parameter {0}")
+MSG_DEF(JSMSG_NEWREGEXP_FLAGGED, 165, 0, JSEXN_TYPEERR, "can't supply flags when constructing one RegExp from another")
+MSG_DEF(JSMSG_RESERVED_SLOT_RANGE, 166, 0, JSEXN_RANGEERR, "reserved slot index out of range")
+MSG_DEF(JSMSG_CANT_DECODE_PRINCIPALS, 167, 0, JSEXN_INTERNALERR, "can't decode JSPrincipals")
+MSG_DEF(JSMSG_CANT_SEAL_OBJECT, 168, 1, JSEXN_ERR, "can't seal {0} objects")
+MSG_DEF(JSMSG_TOO_MANY_CATCH_VARS, 169, 0, JSEXN_SYNTAXERR, "too many catch variables")
+MSG_DEF(JSMSG_BAD_XML_MARKUP, 170, 0, JSEXN_SYNTAXERR, "invalid XML markup")
+MSG_DEF(JSMSG_BAD_XML_CHARACTER, 171, 0, JSEXN_SYNTAXERR, "illegal XML character")
+MSG_DEF(JSMSG_BAD_DEFAULT_XML_NAMESPACE,172,0,JSEXN_SYNTAXERR, "invalid default XML namespace")
+MSG_DEF(JSMSG_BAD_XML_NAME_SYNTAX, 173, 0, JSEXN_SYNTAXERR, "invalid XML name")
+MSG_DEF(JSMSG_BRACKET_AFTER_ATTR_EXPR,174, 0, JSEXN_SYNTAXERR, "missing ] after attribute expression")
+MSG_DEF(JSMSG_NESTING_GENERATOR, 175, 1, JSEXN_TYPEERR, "already executing generator {0}")
+MSG_DEF(JSMSG_CURLY_IN_XML_EXPR, 176, 0, JSEXN_SYNTAXERR, "missing } in XML expression")
+MSG_DEF(JSMSG_BAD_XML_NAMESPACE, 177, 1, JSEXN_TYPEERR, "invalid XML namespace {0}")
+MSG_DEF(JSMSG_BAD_XML_ATTR_NAME, 178, 1, JSEXN_TYPEERR, "invalid XML attribute name {0}")
+MSG_DEF(JSMSG_BAD_XML_NAME, 179, 1, JSEXN_TYPEERR, "invalid XML name {0}")
+MSG_DEF(JSMSG_BAD_XML_CONVERSION, 180, 1, JSEXN_TYPEERR, "can't convert {0} to XML")
+MSG_DEF(JSMSG_BAD_XMLLIST_CONVERSION, 181, 1, JSEXN_TYPEERR, "can't convert {0} to XMLList")
+MSG_DEF(JSMSG_BAD_GENERATOR_SEND, 182, 1, JSEXN_TYPEERR, "attempt to send {0} to newborn generator")
+MSG_DEF(JSMSG_NO_ASSIGN_IN_XML_ATTR, 183, 0, JSEXN_SYNTAXERR, "missing = in XML attribute")
+MSG_DEF(JSMSG_BAD_XML_ATTR_VALUE, 184, 0, JSEXN_SYNTAXERR, "invalid XML attribute value")
+MSG_DEF(JSMSG_XML_TAG_NAME_MISMATCH, 185, 1, JSEXN_SYNTAXERR, "XML tag name mismatch (expected {0})")
+MSG_DEF(JSMSG_BAD_XML_TAG_SYNTAX, 186, 0, JSEXN_SYNTAXERR, "invalid XML tag syntax")
+MSG_DEF(JSMSG_BAD_XML_LIST_SYNTAX, 187, 0, JSEXN_SYNTAXERR, "invalid XML list syntax")
+MSG_DEF(JSMSG_INCOMPATIBLE_METHOD, 188, 3, JSEXN_TYPEERR, "{0} {1} called on incompatible {2}")
+MSG_DEF(JSMSG_CANT_SET_XML_ATTRS, 189, 0, JSEXN_INTERNALERR, "can't set XML property attributes")
+MSG_DEF(JSMSG_END_OF_XML_SOURCE, 190, 0, JSEXN_SYNTAXERR, "unexpected end of XML source")
+MSG_DEF(JSMSG_END_OF_XML_ENTITY, 191, 0, JSEXN_SYNTAXERR, "unexpected end of XML entity")
+MSG_DEF(JSMSG_BAD_XML_QNAME, 192, 0, JSEXN_SYNTAXERR, "invalid XML qualified name")
+MSG_DEF(JSMSG_BAD_FOR_EACH_LOOP, 193, 0, JSEXN_SYNTAXERR, "invalid for each loop")
+MSG_DEF(JSMSG_BAD_XMLLIST_PUT, 194, 1, JSEXN_TYPEERR, "can't set property {0} in XMLList")
+MSG_DEF(JSMSG_UNKNOWN_XML_ENTITY, 195, 1, JSEXN_TYPEERR, "unknown XML entity {0}")
+MSG_DEF(JSMSG_BAD_XML_NCR, 196, 1, JSEXN_TYPEERR, "malformed XML character {0}")
+MSG_DEF(JSMSG_UNDEFINED_XML_NAME, 197, 1, JSEXN_REFERENCEERR, "reference to undefined XML name {0}")
+MSG_DEF(JSMSG_DUPLICATE_XML_ATTR, 198, 1, JSEXN_TYPEERR, "duplicate XML attribute {0}")
+MSG_DEF(JSMSG_TOO_MANY_FUN_VARS, 199, 0, JSEXN_SYNTAXERR, "too many local variables")
+MSG_DEF(JSMSG_ARRAY_INIT_TOO_BIG, 200, 0, JSEXN_INTERNALERR, "array initialiser too large")
+MSG_DEF(JSMSG_REGEXP_TOO_COMPLEX, 201, 0, JSEXN_INTERNALERR, "regular expression too complex")
+MSG_DEF(JSMSG_BUFFER_TOO_SMALL, 202, 0, JSEXN_INTERNALERR, "buffer too small")
+MSG_DEF(JSMSG_BAD_SURROGATE_CHAR, 203, 1, JSEXN_TYPEERR, "bad surrogate character {0}")
+MSG_DEF(JSMSG_UTF8_CHAR_TOO_LARGE, 204, 1, JSEXN_TYPEERR, "UTF-8 character {0} too large")
+MSG_DEF(JSMSG_MALFORMED_UTF8_CHAR, 205, 1, JSEXN_TYPEERR, "malformed UTF-8 character sequence at offset {0}")
+MSG_DEF(JSMSG_USER_DEFINED_ERROR, 206, 0, JSEXN_ERR, "JS_ReportError was called")
+MSG_DEF(JSMSG_WRONG_CONSTRUCTOR, 207, 1, JSEXN_TYPEERR, "wrong constructor called for {0}")
+MSG_DEF(JSMSG_BAD_GENERATOR_RETURN, 208, 1, JSEXN_TYPEERR, "generator function {0} returns a value")
+MSG_DEF(JSMSG_BAD_ANON_GENERATOR_RETURN, 209, 0, JSEXN_TYPEERR, "anonymous generator function returns a value")
+MSG_DEF(JSMSG_NAME_AFTER_FOR_PAREN, 210, 0, JSEXN_SYNTAXERR, "missing name after for (")
+MSG_DEF(JSMSG_IN_AFTER_FOR_NAME, 211, 0, JSEXN_SYNTAXERR, "missing in after for")
+MSG_DEF(JSMSG_BAD_ITERATOR_RETURN, 212, 2, JSEXN_TYPEERR, "{0}.{1} returned a primitive value")
+MSG_DEF(JSMSG_KEYWORD_NOT_NS, 213, 0, JSEXN_SYNTAXERR, "keyword is used as namespace")
+MSG_DEF(JSMSG_BAD_GENERATOR_YIELD, 214, 1, JSEXN_TYPEERR, "yield from closing generator {0}")
+MSG_DEF(JSMSG_BAD_YIELD_SYNTAX, 215, 0, JSEXN_SYNTAXERR, "yield expression must be parenthesized")
+MSG_DEF(JSMSG_ARRAY_COMP_LEFTSIDE, 216, 0, JSEXN_SYNTAXERR, "invalid array comprehension left-hand side")
+MSG_DEF(JSMSG_YIELD_FROM_FILTER, 217, 0, JSEXN_INTERNALERR, "yield not yet supported from filtering predicate")
+MSG_DEF(JSMSG_COMPILE_EXECED_SCRIPT, 218, 0, JSEXN_TYPEERR, "cannot compile over a script that is currently executing")
+MSG_DEF(JSMSG_NON_LIST_XML_METHOD, 219, 2, JSEXN_TYPEERR, "cannot call {0} method on an XML list with {1} elements")
diff --git a/src/third_party/js-1.7/js.pkg b/src/third_party/js-1.7/js.pkg
new file mode 100644
index 00000000000..93185a92fe0
--- /dev/null
+++ b/src/third_party/js-1.7/js.pkg
@@ -0,0 +1,2 @@
+[gecko xpi-bootstrap]
+dist/bin/@SHARED_LIBRARY@
diff --git a/src/third_party/js-1.7/js3240.rc b/src/third_party/js-1.7/js3240.rc
new file mode 100644
index 00000000000..1a9f62c00a4
--- /dev/null
+++ b/src/third_party/js-1.7/js3240.rc
@@ -0,0 +1,79 @@
+//Microsoft Developer Studio generated resource script.
+//
+#include "resource.h"
+
+#define APSTUDIO_READONLY_SYMBOLS
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 2 resource.
+//
+#include "winver.h"
+
+/////////////////////////////////////////////////////////////////////////////
+#undef APSTUDIO_READONLY_SYMBOLS
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// Version
+//
+
+VS_VERSION_INFO VERSIONINFO
+ FILEVERSION 4,0,0,0
+ PRODUCTVERSION 4,0,0,0
+ FILEFLAGSMASK 0x3fL
+#ifdef _DEBUG
+ FILEFLAGS 0x1L
+#else
+ FILEFLAGS 0x0L
+#endif
+ FILEOS 0x10004L
+ FILETYPE 0x2L
+ FILESUBTYPE 0x0L
+BEGIN
+ BLOCK "StringFileInfo"
+ BEGIN
+ BLOCK "040904e4"
+ BEGIN
+ VALUE "CompanyName", "Netscape Communications Corporation\0"
+ VALUE "FileDescription", "Netscape 32-bit JavaScript Module\0"
+ VALUE "FileVersion", "4.0\0"
+ VALUE "InternalName", "JS3240\0"
+ VALUE "LegalCopyright", "Copyright Netscape Communications. 1994-96\0"
+ VALUE "LegalTrademarks", "Netscape, Mozilla\0"
+ VALUE "OriginalFilename", "js3240.dll\0"
+ VALUE "ProductName", "NETSCAPE\0"
+ VALUE "ProductVersion", "4.0\0"
+ END
+ END
+ BLOCK "VarFileInfo"
+ BEGIN
+ VALUE "Translation", 0x409, 1252
+ END
+END
+
+#ifdef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// TEXTINCLUDE
+//
+
+1 TEXTINCLUDE DISCARDABLE
+BEGIN
+ "resource.h\0"
+END
+
+2 TEXTINCLUDE DISCARDABLE
+BEGIN
+ "#include ""winver.h""\r\n"
+ "\0"
+END
+
+3 TEXTINCLUDE DISCARDABLE
+BEGIN
+ "\r\n"
+ "\0"
+END
+
+#endif // APSTUDIO_INVOKED
+
+///////////////////////////////////////////////////////////////////////////// \ No newline at end of file
diff --git a/src/third_party/js-1.7/jsOS240.def b/src/third_party/js-1.7/jsOS240.def
new file mode 100644
index 00000000000..8f27d649791
--- /dev/null
+++ b/src/third_party/js-1.7/jsOS240.def
@@ -0,0 +1,654 @@
+; ***** BEGIN LICENSE BLOCK *****
+; Version: MPL 1.1/GPL 2.0/LGPL 2.1
+;
+; The contents of this file are subject to the Mozilla Public License Version
+; 1.1 (the "License"); you may not use this file except in compliance with
+; the License. You may obtain a copy of the License at
+; http://www.mozilla.org/MPL/
+;
+; Software distributed under the License is distributed on an "AS IS" basis,
+; WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+; for the specific language governing rights and limitations under the
+; License.
+;
+; The Original Code is Mozilla Communicator client code, released
+; March 31, 1998.
+;
+; The Initial Developer of the Original Code is
+; Netscape Communications Corporation.
+; Portions created by the Initial Developer are Copyright (C) 1998
+; the Initial Developer. All Rights Reserved.
+;
+; Contributor(s):
+;
+; Alternatively, the contents of this file may be used under the terms of
+; either of the GNU General Public License Version 2 or later (the "GPL"),
+; or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+; in which case the provisions of the GPL or the LGPL are applicable instead
+; of those above. If you wish to allow use of your version of this file only
+; under the terms of either the GPL or the LGPL, and not to allow others to
+; use your version of this file under the terms of the MPL, indicate your
+; decision by deleting the provisions above and replace them with the notice
+; and other provisions required by the GPL or the LGPL. If you do not delete
+; the provisions above, a recipient may use your version of this file under
+; the terms of any one of the MPL, the GPL or the LGPL.
+;
+; ***** END LICENSE BLOCK *****
+
+LIBRARY JS3240 INITINSTANCE TERMINSTANCE
+PROTMODE
+
+DESCRIPTION 'Netscape OS/2 JavaScript Library'
+
+
+CODE LOADONCALL MOVEABLE DISCARDABLE
+DATA PRELOAD MOVEABLE MULTIPLE NONSHARED
+
+
+EXPORTS
+;====================== win16 exports these at least... ===========
+; JS_Init = JS_Init @2
+; JS_Finish = JS_Finish @3
+; JS_GetNaNValue
+; JS_GetNegativeInfinityValue
+; JS_GetPositiveInfinityValue
+; JS_GetEmptyStringValue
+; JS_ConvertValue
+; JS_ValueToObject
+; JS_ValueToFunction
+; JS_ValueToString
+; JS_ValueToNumber
+; JS_ValueToBoolean
+; JS_TypeOfValue
+; JS_GetTypeName
+; JS_Lock
+; JS_Unlock
+; JS_NewContext
+; JS_DestroyContext
+; JS_ContextIterator
+; JS_GetGlobalObject
+; JS_SetGlobalObject
+; JS_InitStandardClasses
+;; JS_GetStaticLink
+; JS_malloc
+; JS_realloc
+; JS_free
+; JS_strdup
+; JS_NewDouble
+; JS_NewDoubleValue
+; JS_AddRoot
+; JS_RemoveRoot
+; JS_LockGCThing
+; JS_UnlockGCThing
+; JS_GC
+; JS_PropertyStub
+; JS_EnumerateStub
+; JS_ResolveStub
+; JS_ConvertStub
+; JS_FinalizeStub
+; JS_InitClass
+; JS_GetClass
+; JS_InstanceOf
+; JS_GetPrivate
+; JS_SetPrivate
+; JS_GetInstancePrivate
+; JS_GetPrototype
+; JS_GetParent
+; JS_SetParent
+; JS_GetConstructor
+; JS_NewObject
+; JS_DefineObject
+; JS_DefineConstDoubles
+; JS_DefineProperties
+; JS_DefineProperty
+; JS_DefinePropertyWithTinyId
+; JS_AliasProperty
+; JS_LookupProperty
+; JS_GetProperty
+; JS_SetProperty
+; JS_DeleteProperty
+; JS_NewArrayObject
+; JS_DefineElement
+; JS_AliasElement
+; JS_LookupElement
+; JS_GetElement
+; JS_SetElement
+; JS_DeleteElement
+; JS_ClearScope
+; JS_NewFunction
+; JS_GetFunctionObject
+; JS_GetFunctionName
+; JS_DefineFunctions
+; JS_DefineFunction
+; JS_CompileScript
+; JS_DestroyScript
+; JS_CompileFunction
+; JS_DecompileScript
+; JS_DecompileFunction
+; JS_DecompileFunctionBody
+; JS_ExecuteScript
+; JS_EvaluateScript
+; JS_CallFunction
+; JS_CallFunctionName
+; JS_CallFunctionValue
+; JS_SetBranchCallback
+; JS_IsRunning
+; JS_IsConstructing
+; JS_SetCallReturnValue2
+; JS_NewString
+; JS_NewStringCopyN
+; JS_NewStringCopyZ
+; JS_InternString
+; JS_GetStringBytes
+; JS_GetStringLength
+; JS_CompareStrings
+; JS_ReportError
+; JS_ReportOutOfMemory
+; JS_SetErrorReporter
+; JS_NewRegExpObject
+; JS_SetRegExpInput
+; JS_ClearRegExpStatics
+;=================================================
+
+
+;00001:jsstr (OFFSET:0x00002e17, SIZE:0x0000ae17):
+; - Public Definitions:
+; js_EmptySubString
+; js_CompareStrings
+; js_HashString
+; js_ValueToString
+; js_StringToObject
+; js_FinalizeString
+; js_NewStringCopyZ
+; js_NewString
+; js_InitStringClass
+; js_NewStringCopyN
+; js_BoyerMooreHorspool
+;
+;
+;00002:jsscript (OFFSET:0x0000dc2e, SIZE:0x00003abb):
+; - Public Definitions:
+; js_LineNumberToPC
+; js_PCToLineNumber
+; js_GetSrcNote
+; js_DestroyScript
+; js_NewScript
+;
+;
+;00003:jsscope (OFFSET:0x000116e9, SIZE:0x00004f82):
+; - Public Definitions:
+; js_hash_scope_ops
+; js_list_scope_ops
+; js_DestroyProperty
+; js_NewProperty
+; js_IdToValue
+; js_HashValue
+; js_DestroyScope
+; js_MutateScope
+; js_DropScope
+; js_HoldScope
+; js_NewScope
+; js_GetMutableScope
+; js_HoldProperty
+; js_DropProperty
+;
+;
+;00004:jsscan (OFFSET:0x0001666b, SIZE:0x00008890):
+; - Public Definitions:
+; js_MatchToken
+; js_FlushNewlines
+; js_PeekTokenSameLine
+; js_UngetToken
+; js_GetToken
+; js_PeekToken
+; js_ReportCompileError
+ js_CloseTokenStream
+ js_NewBufferTokenStream
+; js_NewTokenStream
+; js_InitScanner
+;
+;
+;00005:jsregexp (OFFSET:0x0001eefb, SIZE:0x0000eee4):
+; - Public Definitions:
+; js_RegExpClass
+; reopsize
+; js_NewRegExpObject
+; js_InitRegExpClass
+; js_FreeRegExpStatics
+; js_InitRegExpStatics
+; js_ExecuteRegExp
+; js_NewRegExpOpt
+; js_DestroyRegExp
+; js_NewRegExp
+;
+;
+;00006:jsparse (OFFSET:0x0002dddf, SIZE:0x00010b71):
+; - Public Definitions:
+; js_ParseFunctionBody
+ js_Parse
+;
+;
+;00007:jsopcode (OFFSET:0x0003e950, SIZE:0x0000d362):
+; - Public Definitions:
+; js_EscapeMap
+; js_NumCodeSpecs
+; js_CodeSpec
+; js_incop_str
+; js_true_str
+; js_false_str
+; js_this_str
+; js_null_str
+; js_void_str
+; js_typeof_str
+; js_delete_str
+; js_new_str
+; js_ValueToSource
+; js_DecompileScript
+; js_DecompileCode
+; js_DecompileFunction
+; js_puts
+; js_printf
+; js_GetPrinterOutput
+; js_DestroyPrinter
+; js_NewPrinter
+; js_EscapeString
+; js_Disassemble1
+; js_Disassemble
+;
+;00008:jsobj (OFFSET:0x0004bcb2, SIZE:0x000090a4):
+; - Public Definitions:
+; js_WithClass
+; js_ObjectClass
+; js_TryValueOf
+; js_ValueToNonNullObject
+; js_TryMethod
+; js_ObjectToString
+; js_SetClassPrototype
+; js_DeleteProperty2
+; js_DeleteProperty
+; js_SetProperty
+; js_GetProperty
+; js_FindVariableScope
+; js_FindVariable
+; js_FindProperty
+; js_LookupProperty
+; js_DefineProperty
+; js_FreeSlot
+; js_AllocSlot
+; js_FinalizeObject
+; js_GetClassPrototype
+; js_NewObject
+; js_InitObjectClass
+; js_ValueToObject
+; js_obj_toString
+; js_SetSlot
+; js_GetSlot
+;
+;
+;00009:jsnum (OFFSET:0x00054d56, SIZE:0x00004f29):
+; - Public Definitions:
+; js_ValueToInt32
+; js_NumberToObject
+; js_FinalizeDouble
+; js_InitNumberClass
+; js_NumberToString
+; js_NewDoubleValue
+; js_NewDouble
+; js_ValueToNumber
+;
+;
+;00010:jsmath (OFFSET:0x00059c7f, SIZE:0x000054b6):
+; - Public Definitions:
+; js_InitMathClass
+;
+;
+;00011:jsjava (OFFSET:0x0005f135, SIZE:0x00022aad):
+; - Public Definitions:
+; js_Hooks
+; MojaSrcLog
+; finalizeTask
+ JSJ_FindCurrentJSContext
+; JSJ_GetPrincipals
+ JSJ_IsSafeMethod
+ JSJ_InitContext
+ JSJ_Init
+ js_JSErrorToJException
+ js_JavaErrorReporter
+ js_RemoveReflection
+ js_ReflectJObjectToJSObject
+ js_convertJObjectToJSValue
+ js_convertJSValueToJObject
+ js_ReflectJSObjectToJObject
+; js_ReflectJClassToJSObject
+ JSJ_ExitJS
+ JSJ_EnterJS
+ JSJ_CurrentContext
+ JSJ_IsEnabled
+;added in GA code - DSR70297
+ JSJ_Finish
+ JSJ_IsCalledFromJava
+ js_GetJSPrincipalsFromJavaCaller
+
+;
+;
+;00012:jsinterp (OFFSET:0x00081be2, SIZE:0x00012274):
+; - Public Definitions:
+; js_Call
+; js_Interpret
+; js_SetLocalVariable
+; js_GetLocalVariable
+; js_SetArgument
+; js_GetArgument
+; js_FlushPropertyCacheByProp
+; js_FlushPropertyCache
+;
+;
+;00013:jsgc (OFFSET:0x00093e56, SIZE:0x00004f8d):
+; - Public Definitions:
+; js_ForceGC
+; js_UnlockGCThing
+; js_LockGCThing
+; js_GC
+; js_AllocGCThing
+; js_RemoveRoot
+; js_AddRoot
+; js_FinishGC
+; js_InitGC
+;
+;
+;00014:jsfun (OFFSET:0x00098de3, SIZE:0x0000977c):
+; - Public Definitions:
+; js_FunctionClass
+; js_ClosureClass
+; js_CallClass
+; js_DefineFunction
+; js_NewFunction
+; js_InitCallAndClosureClasses
+; js_InitFunctionClass
+; js_ValueToFunction
+; js_SetCallVariable
+; js_GetCallVariable
+; js_PutCallObject
+; js_GetCallObject
+;
+;
+;00015:jsemit (OFFSET:0x000a255f, SIZE:0x000077be):
+; - Public Definitions:
+; js_SrcNoteName
+; js_SrcNoteArity
+ js_FinishTakingSrcNotes
+; js_MoveSrcNotes
+; js_GetSrcNoteOffset
+; js_BumpSrcNoteDelta
+; js_NewSrcNote3
+; js_NewSrcNote2
+; js_PopStatement
+; js_EmitContinue
+; js_EmitBreak
+; js_SetSrcNoteOffset
+; js_NewSrcNote
+; js_PushStatement
+; js_MoveCode
+; js_SetJumpOffset
+; js_Emit3
+; js_Emit2
+; js_Emit1
+; js_UpdateDepth
+; js_SrcNoteLength
+; js_CancelLastOpcode
+ js_InitCodeGenerator
+;
+;
+;00016:jsdbgapi (OFFSET:0x000a9d1d, SIZE:0x000057db):
+; - Public Definitions:
+; js_watchpoint_list
+; js_trap_list
+; JS_SetAnnotationInFrame
+; JS_GetAnnotationFromFrame
+; JS_GetJSPrincipalArrayFromFrame
+; JS_NextJSFrame
+; JS_InitJSFrameIterator
+ JS_LineNumberToPC
+ JS_PCToLineNumber
+ JS_ClearAllWatchPoints
+ JS_ClearWatchPoint
+ JS_SetWatchPoint
+ JS_HandleTrap
+ JS_ClearAllTraps
+ JS_ClearScriptTraps
+ JS_ClearTrap
+ JS_GetTrapOpcode
+ JS_SetTrap
+;DSR070297 - added in GA code
+ JS_FrameIterator
+ JS_GetFrameAnnotation
+ JS_GetFramePrincipalArray
+ JS_GetFrameScript
+ JS_GetScriptFilename
+ JS_SetFrameAnnotation
+ JS_GetFramePC
+ JS_GetFunctionScript
+
+;
+;
+;00017:jsdate (OFFSET:0x000af4f8, SIZE:0x00009a8e):
+; - Public Definitions:
+ js_DateGetSeconds
+ js_DateGetMinutes
+ js_DateGetHours
+ js_DateGetDate
+ js_DateGetMonth
+ js_DateGetYear
+ js_NewDateObject
+; js_InitDateClass
+;
+;
+;00018:jscntxt (OFFSET:0x000b8f86, SIZE:0x00003732):
+; - Public Definitions:
+; js_InterpreterHooks
+; js_ReportIsNotDefined
+; js_ReportErrorAgain
+; js_ReportErrorVA
+; js_ContextIterator
+; js_DestroyContext
+; js_NewContext
+; js_SetInterpreterHooks
+;
+;
+;00019:jsbool (OFFSET:0x000bc6b8, SIZE:0x00003375):
+; - Public Definitions:
+; js_BooleanToString
+; js_BooleanToObject
+; js_InitBooleanClass
+; js_ValueToBoolean
+;
+;
+;00020:jsatom (OFFSET:0x000bfa2d, SIZE:0x000058d0):
+; - Public Definitions:
+; js_valueOf_str
+; js_toString_str
+; js_length_str
+; js_eval_str
+; js_constructor_str
+; js_class_prototype_str
+; js_assign_str
+; js_anonymous_str
+; js_Object_str
+; js_Array_str
+; js_type_str
+; js_DropUnmappedAtoms
+ js_FreeAtomMap
+ js_InitAtomMap
+; js_GetAtom
+; js_DropAtom
+; js_IndexAtom
+; js_ValueToStringAtom
+; js_AtomizeString
+; js_AtomizeDouble
+; js_AtomizeInt
+; js_AtomizeBoolean
+; js_AtomizeObject
+; js_HoldAtom
+; js_MarkAtomState
+; js_FreeAtomState
+; js_Atomize
+; js_InitAtomState
+;
+;
+;00021:jsarray (OFFSET:0x000c52fd, SIZE:0x00007c86):
+; - Public Definitions:
+; js_ArrayClass
+; js_SetArrayLength
+; js_GetArrayLength
+; js_InitArrayClass
+; js_NewArrayObject
+; PR_qsort
+;
+;
+;00022:jsapi (OFFSET:0x000ccf83, SIZE:0x0000de8c):
+; - Public Definitions:
+ JS_ClearRegExpStatics
+ JS_SetRegExpInput
+ JS_NewRegExpObject
+ JS_SetErrorReporter
+ JS_CompareStrings
+ JS_GetStringLength
+ JS_GetStringBytes
+ JS_InternString
+ JS_NewStringCopyZ
+ JS_NewStringCopyN
+ JS_NewString
+ JS_IsRunning
+ JS_SetBranchCallback
+ JS_CallFunctionValue
+ JS_CallFunctionName
+ JS_CallFunction
+ JS_EvaluateScriptForPrincipals
+ JS_EvaluateScript
+ JS_ExecuteScript
+ JS_DecompileFunctionBody
+ JS_DecompileFunction
+ JS_DecompileScript
+ JS_CompileFunctionForPrincipals
+ JS_CompileFunction
+ JS_DestroyScript
+ JS_CompileScriptForPrincipals
+ JS_CompileScript
+ JS_DefineFunction
+ JS_GetFunctionName
+ JS_GetFunctionObject
+ JS_NewFunction
+ JS_ClearScope
+ JS_DeleteElement
+ JS_SetElement
+ JS_GetElement
+ JS_LookupElement
+ JS_AliasElement
+ JS_DefineElement
+ JS_SetArrayLength
+ JS_GetArrayLength
+ JS_NewArrayObject
+ JS_DeleteProperty
+ JS_SetProperty
+ JS_GetProperty
+ JS_LookupProperty
+ JS_AliasProperty
+ JS_DefinePropertyWithTinyId
+ JS_DefineProperty
+ JS_DefineConstDoubles
+ JS_DefineObject
+ JS_NewObject
+ JS_GetConstructor
+ JS_SetParent
+ JS_GetParent
+ JS_SetPrototype
+ JS_GetPrototype
+ JS_GetInstancePrivate
+ JS_SetPrivate
+ JS_GetPrivate
+ JS_InstanceOf
+ JS_GetClass
+ JS_DefineFunctions
+ JS_DefineProperties
+ JS_InitClass
+ JS_FinalizeStub
+ JS_ConvertStub
+ JS_ResolveStub
+ JS_EnumerateStub
+ JS_PropertyStub
+ JS_GC
+ JS_UnlockGCThing
+ JS_LockGCThing
+ JS_RemoveRoot
+ JS_AddRoot
+ JS_NewDoubleValue
+ JS_NewDouble
+ JS_strdup
+ JS_free
+ JS_realloc
+ JS_ReportOutOfMemory
+ JS_malloc
+ JS_GetScopeChain
+ JS_InitStandardClasses
+ JS_SetGlobalObject
+ JS_GetGlobalObject
+ JS_SetVersion
+ JS_GetVersion
+ JS_ContextIterator
+ JS_GetTaskState
+ JS_DestroyContext
+ JS_NewContext
+ JS_Unlock
+ JS_Lock
+ JS_Finish
+ JS_Init
+ JS_GetTypeName
+ JS_TypeOfValue
+ JS_ValueToBoolean
+ JS_ValueToInt32
+ JS_ValueToNumber
+ JS_ValueToString
+ JS_ValueToFunction
+ JS_ValueToObject
+ JS_ReportError
+ JS_ConvertValue
+ JS_GetEmptyStringValue
+ JS_GetPositiveInfinityValue
+ JS_GetNegativeInfinityValue
+ JS_GetNaNValue
+;DSR062897 - added for GA code
+ JS_MaybeGC
+ JS_GetScriptPrincipals
+ JS_IsAssigning
+ JS_SetCharSetInfo
+;brendan@mozilla.org, 2-Sept-2000
+ JS_SetCallReturnValue2
+ JS_SetGCCallback
+ JS_SetGCCallbackRT
+ JS_AddExternalStringFinalizer
+ JS_RemoveExternalStringFinalizer
+ JS_NewExternalString
+;
+;
+;00023:prmjtime (OFFSET:0x000dae0f, SIZE:0x00008986):
+; - Public Definitions:
+ PRMJ_FormatTimeUSEnglish
+ PRMJ_gmtime
+ PRMJ_FormatTime
+ PRMJ_mktime
+ PRMJ_ComputeTime
+ PRMJ_localtime
+ PRMJ_ExplodeTime
+ PRMJ_ToLocal
+ PRMJ_ToGMT
+ PRMJ_NowLocal
+ PRMJ_DSTOffset
+ PRMJ_NowS
+ PRMJ_NowMS
+ PRMJ_Now
+ PRMJ_ToExtendedTime
+ PRMJ_ToBaseTime
+ PRMJ_setDST
+ PRMJ_LocalGMTDifference
+
+
diff --git a/src/third_party/js-1.7/jsapi.c b/src/third_party/js-1.7/jsapi.c
new file mode 100644
index 00000000000..f03fa364f0a
--- /dev/null
+++ b/src/third_party/js-1.7/jsapi.c
@@ -0,0 +1,5011 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JavaScript API.
+ */
+#include "jsstddef.h"
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsclist.h"
+#include "jsdhash.h"
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdate.h"
+#include "jsdtoa.h"
+#include "jsemit.h"
+#include "jsexn.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsmath.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsparse.h"
+#include "jsregexp.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+#include "prmjtime.h"
+
+#if JS_HAS_FILE_OBJECT
+#include "jsfile.h"
+#endif
+
+#if JS_HAS_XML_SUPPORT
+#include "jsxml.h"
+#endif
+
+#if JS_HAS_GENERATORS
+#include "jsiter.h"
+#endif
+
+#ifdef HAVE_VA_LIST_AS_ARRAY
+#define JS_ADDRESSOF_VA_LIST(ap) ((va_list *)(ap))
+#else
+#define JS_ADDRESSOF_VA_LIST(ap) (&(ap))
+#endif
+
+#if defined(JS_PARANOID_REQUEST) && defined(JS_THREADSAFE)
+#define CHECK_REQUEST(cx) JS_ASSERT(cx->requestDepth)
+#else
+#define CHECK_REQUEST(cx) ((void)0)
+#endif
+
+JS_PUBLIC_API(int64)
+JS_Now()
+{
+ return PRMJ_Now();
+}
+
+JS_PUBLIC_API(jsval)
+JS_GetNaNValue(JSContext *cx)
+{
+ return DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+}
+
+JS_PUBLIC_API(jsval)
+JS_GetNegativeInfinityValue(JSContext *cx)
+{
+ return DOUBLE_TO_JSVAL(cx->runtime->jsNegativeInfinity);
+}
+
+JS_PUBLIC_API(jsval)
+JS_GetPositiveInfinityValue(JSContext *cx)
+{
+ return DOUBLE_TO_JSVAL(cx->runtime->jsPositiveInfinity);
+}
+
+JS_PUBLIC_API(jsval)
+JS_GetEmptyStringValue(JSContext *cx)
+{
+ return STRING_TO_JSVAL(cx->runtime->emptyString);
+}
+
+static JSBool
+TryArgumentFormatter(JSContext *cx, const char **formatp, JSBool fromJS,
+ jsval **vpp, va_list *app)
+{
+ const char *format;
+ JSArgumentFormatMap *map;
+
+ format = *formatp;
+ for (map = cx->argumentFormatMap; map; map = map->next) {
+ if (!strncmp(format, map->format, map->length)) {
+ *formatp = format + map->length;
+ return map->formatter(cx, format, fromJS, vpp, app);
+ }
+ }
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_CHAR, format);
+ return JS_FALSE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ConvertArguments(JSContext *cx, uintN argc, jsval *argv, const char *format,
+ ...)
+{
+ va_list ap;
+ JSBool ok;
+
+ va_start(ap, format);
+ ok = JS_ConvertArgumentsVA(cx, argc, argv, format, ap);
+ va_end(ap);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ConvertArgumentsVA(JSContext *cx, uintN argc, jsval *argv,
+ const char *format, va_list ap)
+{
+ jsval *sp;
+ JSBool required;
+ char c;
+ JSFunction *fun;
+ jsdouble d;
+ JSString *str;
+ JSObject *obj;
+
+ CHECK_REQUEST(cx);
+ sp = argv;
+ required = JS_TRUE;
+ while ((c = *format++) != '\0') {
+ if (isspace(c))
+ continue;
+ if (c == '/') {
+ required = JS_FALSE;
+ continue;
+ }
+ if (sp == argv + argc) {
+ if (required) {
+ fun = js_ValueToFunction(cx, &argv[-2], 0);
+ if (fun) {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%u", argc);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_MORE_ARGS_NEEDED,
+ JS_GetFunctionName(fun), numBuf,
+ (argc == 1) ? "" : "s");
+ }
+ return JS_FALSE;
+ }
+ break;
+ }
+ switch (c) {
+ case 'b':
+ if (!js_ValueToBoolean(cx, *sp, va_arg(ap, JSBool *)))
+ return JS_FALSE;
+ break;
+ case 'c':
+ if (!js_ValueToUint16(cx, *sp, va_arg(ap, uint16 *)))
+ return JS_FALSE;
+ break;
+ case 'i':
+ if (!js_ValueToECMAInt32(cx, *sp, va_arg(ap, int32 *)))
+ return JS_FALSE;
+ break;
+ case 'u':
+ if (!js_ValueToECMAUint32(cx, *sp, va_arg(ap, uint32 *)))
+ return JS_FALSE;
+ break;
+ case 'j':
+ if (!js_ValueToInt32(cx, *sp, va_arg(ap, int32 *)))
+ return JS_FALSE;
+ break;
+ case 'd':
+ if (!js_ValueToNumber(cx, *sp, va_arg(ap, jsdouble *)))
+ return JS_FALSE;
+ break;
+ case 'I':
+ if (!js_ValueToNumber(cx, *sp, &d))
+ return JS_FALSE;
+ *va_arg(ap, jsdouble *) = js_DoubleToInteger(d);
+ break;
+ case 's':
+ case 'S':
+ case 'W':
+ str = js_ValueToString(cx, *sp);
+ if (!str)
+ return JS_FALSE;
+ *sp = STRING_TO_JSVAL(str);
+ if (c == 's')
+ *va_arg(ap, char **) = JS_GetStringBytes(str);
+ else if (c == 'W')
+ *va_arg(ap, jschar **) = JS_GetStringChars(str);
+ else
+ *va_arg(ap, JSString **) = str;
+ break;
+ case 'o':
+ if (!js_ValueToObject(cx, *sp, &obj))
+ return JS_FALSE;
+ *sp = OBJECT_TO_JSVAL(obj);
+ *va_arg(ap, JSObject **) = obj;
+ break;
+ case 'f':
+ obj = js_ValueToFunctionObject(cx, sp, 0);
+ if (!obj)
+ return JS_FALSE;
+ *va_arg(ap, JSFunction **) = (JSFunction *) JS_GetPrivate(cx, obj);
+ break;
+ case 'v':
+ *va_arg(ap, jsval *) = *sp;
+ break;
+ case '*':
+ break;
+ default:
+ format--;
+ if (!TryArgumentFormatter(cx, &format, JS_TRUE, &sp,
+ JS_ADDRESSOF_VA_LIST(ap))) {
+ return JS_FALSE;
+ }
+ /* NB: the formatter already updated sp, so we continue here. */
+ continue;
+ }
+ sp++;
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(jsval *)
+JS_PushArguments(JSContext *cx, void **markp, const char *format, ...)
+{
+ va_list ap;
+ jsval *argv;
+
+ va_start(ap, format);
+ argv = JS_PushArgumentsVA(cx, markp, format, ap);
+ va_end(ap);
+ return argv;
+}
+
+JS_PUBLIC_API(jsval *)
+JS_PushArgumentsVA(JSContext *cx, void **markp, const char *format, va_list ap)
+{
+ uintN argc;
+ jsval *argv, *sp;
+ char c;
+ const char *cp;
+ JSString *str;
+ JSFunction *fun;
+ JSStackHeader *sh;
+
+ CHECK_REQUEST(cx);
+ *markp = NULL;
+ argc = 0;
+ for (cp = format; (c = *cp) != '\0'; cp++) {
+ /*
+ * Count non-space non-star characters as individual jsval arguments.
+ * This may over-allocate stack, but we'll fix below.
+ */
+ if (isspace(c) || c == '*')
+ continue;
+ argc++;
+ }
+ sp = js_AllocStack(cx, argc, markp);
+ if (!sp)
+ return NULL;
+ argv = sp;
+ while ((c = *format++) != '\0') {
+ if (isspace(c) || c == '*')
+ continue;
+ switch (c) {
+ case 'b':
+ *sp = BOOLEAN_TO_JSVAL((JSBool) va_arg(ap, int));
+ break;
+ case 'c':
+ *sp = INT_TO_JSVAL((uint16) va_arg(ap, unsigned int));
+ break;
+ case 'i':
+ case 'j':
+ if (!js_NewNumberValue(cx, (jsdouble) va_arg(ap, int32), sp))
+ goto bad;
+ break;
+ case 'u':
+ if (!js_NewNumberValue(cx, (jsdouble) va_arg(ap, uint32), sp))
+ goto bad;
+ break;
+ case 'd':
+ case 'I':
+ if (!js_NewDoubleValue(cx, va_arg(ap, jsdouble), sp))
+ goto bad;
+ break;
+ case 's':
+ str = JS_NewStringCopyZ(cx, va_arg(ap, char *));
+ if (!str)
+ goto bad;
+ *sp = STRING_TO_JSVAL(str);
+ break;
+ case 'W':
+ str = JS_NewUCStringCopyZ(cx, va_arg(ap, jschar *));
+ if (!str)
+ goto bad;
+ *sp = STRING_TO_JSVAL(str);
+ break;
+ case 'S':
+ str = va_arg(ap, JSString *);
+ *sp = STRING_TO_JSVAL(str);
+ break;
+ case 'o':
+ *sp = OBJECT_TO_JSVAL(va_arg(ap, JSObject *));
+ break;
+ case 'f':
+ fun = va_arg(ap, JSFunction *);
+ *sp = fun ? OBJECT_TO_JSVAL(fun->object) : JSVAL_NULL;
+ break;
+ case 'v':
+ *sp = va_arg(ap, jsval);
+ break;
+ default:
+ format--;
+ if (!TryArgumentFormatter(cx, &format, JS_FALSE, &sp,
+ JS_ADDRESSOF_VA_LIST(ap))) {
+ goto bad;
+ }
+ /* NB: the formatter already updated sp, so we continue here. */
+ continue;
+ }
+ sp++;
+ }
+
+ /*
+ * We may have overallocated stack due to a multi-character format code
+ * handled by a JSArgumentFormatter. Give back that stack space!
+ */
+ JS_ASSERT(sp <= argv + argc);
+ if (sp < argv + argc) {
+ /* Return slots not pushed to the current stack arena. */
+ cx->stackPool.current->avail = (jsuword)sp;
+
+ /* Reduce the count of slots the GC will scan in this stack segment. */
+ sh = cx->stackHeaders;
+ JS_ASSERT(JS_STACK_SEGMENT(sh) + sh->nslots == argv + argc);
+ sh->nslots -= argc - (sp - argv);
+ }
+ return argv;
+
+bad:
+ js_FreeStack(cx, *markp);
+ return NULL;
+}
+
+JS_PUBLIC_API(void)
+JS_PopArguments(JSContext *cx, void *mark)
+{
+ CHECK_REQUEST(cx);
+ js_FreeStack(cx, mark);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_AddArgumentFormatter(JSContext *cx, const char *format,
+ JSArgumentFormatter formatter)
+{
+ size_t length;
+ JSArgumentFormatMap **mpp, *map;
+
+ length = strlen(format);
+ mpp = &cx->argumentFormatMap;
+ while ((map = *mpp) != NULL) {
+ /* Insert before any shorter string to match before prefixes. */
+ if (map->length < length)
+ break;
+ if (map->length == length && !strcmp(map->format, format))
+ goto out;
+ mpp = &map->next;
+ }
+ map = (JSArgumentFormatMap *) JS_malloc(cx, sizeof *map);
+ if (!map)
+ return JS_FALSE;
+ map->format = format;
+ map->length = length;
+ map->next = *mpp;
+ *mpp = map;
+out:
+ map->formatter = formatter;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(void)
+JS_RemoveArgumentFormatter(JSContext *cx, const char *format)
+{
+ size_t length;
+ JSArgumentFormatMap **mpp, *map;
+
+ length = strlen(format);
+ mpp = &cx->argumentFormatMap;
+ while ((map = *mpp) != NULL) {
+ if (map->length == length && !strcmp(map->format, format)) {
+ *mpp = map->next;
+ JS_free(cx, map);
+ return;
+ }
+ mpp = &map->next;
+ }
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ConvertValue(JSContext *cx, jsval v, JSType type, jsval *vp)
+{
+ JSBool ok, b;
+ JSObject *obj;
+ JSString *str;
+ jsdouble d, *dp;
+
+ CHECK_REQUEST(cx);
+ switch (type) {
+ case JSTYPE_VOID:
+ *vp = JSVAL_VOID;
+ ok = JS_TRUE;
+ break;
+ case JSTYPE_OBJECT:
+ ok = js_ValueToObject(cx, v, &obj);
+ if (ok)
+ *vp = OBJECT_TO_JSVAL(obj);
+ break;
+ case JSTYPE_FUNCTION:
+ *vp = v;
+ obj = js_ValueToFunctionObject(cx, vp, JSV2F_SEARCH_STACK);
+ ok = (obj != NULL);
+ break;
+ case JSTYPE_STRING:
+ str = js_ValueToString(cx, v);
+ ok = (str != NULL);
+ if (ok)
+ *vp = STRING_TO_JSVAL(str);
+ break;
+ case JSTYPE_NUMBER:
+ ok = js_ValueToNumber(cx, v, &d);
+ if (ok) {
+ dp = js_NewDouble(cx, d, 0);
+ ok = (dp != NULL);
+ if (ok)
+ *vp = DOUBLE_TO_JSVAL(dp);
+ }
+ break;
+ case JSTYPE_BOOLEAN:
+ ok = js_ValueToBoolean(cx, v, &b);
+ if (ok)
+ *vp = BOOLEAN_TO_JSVAL(b);
+ break;
+ default: {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%d", (int)type);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_TYPE,
+ numBuf);
+ ok = JS_FALSE;
+ break;
+ }
+ }
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToObject(JSContext *cx, jsval v, JSObject **objp)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToObject(cx, v, objp);
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_ValueToFunction(JSContext *cx, jsval v)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToFunction(cx, &v, JSV2F_SEARCH_STACK);
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_ValueToConstructor(JSContext *cx, jsval v)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToFunction(cx, &v, JSV2F_SEARCH_STACK);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_ValueToString(JSContext *cx, jsval v)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToString(cx, v);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToNumber(JSContext *cx, jsval v, jsdouble *dp)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToNumber(cx, v, dp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToECMAInt32(JSContext *cx, jsval v, int32 *ip)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToECMAInt32(cx, v, ip);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToECMAUint32(JSContext *cx, jsval v, uint32 *ip)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToECMAUint32(cx, v, ip);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToInt32(JSContext *cx, jsval v, int32 *ip)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToInt32(cx, v, ip);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToUint16(JSContext *cx, jsval v, uint16 *ip)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToUint16(cx, v, ip);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToBoolean(JSContext *cx, jsval v, JSBool *bp)
+{
+ CHECK_REQUEST(cx);
+ return js_ValueToBoolean(cx, v, bp);
+}
+
+JS_PUBLIC_API(JSType)
+JS_TypeOfValue(JSContext *cx, jsval v)
+{
+ JSType type;
+ JSObject *obj;
+ JSObjectOps *ops;
+ JSClass *clasp;
+
+ CHECK_REQUEST(cx);
+ if (JSVAL_IS_OBJECT(v)) {
+ type = JSTYPE_OBJECT; /* XXXbe JSTYPE_NULL for JS2 */
+ obj = JSVAL_TO_OBJECT(v);
+ if (obj) {
+ ops = obj->map->ops;
+#if JS_HAS_XML_SUPPORT
+ if (ops == &js_XMLObjectOps.base) {
+ type = JSTYPE_XML;
+ } else
+#endif
+ {
+ /*
+ * ECMA 262, 11.4.3 says that any native object that implements
+ * [[Call]] should be of type "function". Note that RegExp and
+ * Script are both of type "function" for compatibility with
+ * older SpiderMonkeys.
+ */
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if ((ops == &js_ObjectOps)
+ ? (clasp->call
+ ? (clasp == &js_RegExpClass || clasp == &js_ScriptClass)
+ : clasp == &js_FunctionClass)
+ : ops->call != NULL) {
+ type = JSTYPE_FUNCTION;
+ } else {
+#ifdef NARCISSUS
+ if (!OBJ_GET_PROPERTY(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .callAtom),
+ &v)) {
+ JS_ClearPendingException(cx);
+ } else if (VALUE_IS_FUNCTION(cx, v)) {
+ type = JSTYPE_FUNCTION;
+ }
+#endif
+ }
+ }
+ }
+ } else if (JSVAL_IS_NUMBER(v)) {
+ type = JSTYPE_NUMBER;
+ } else if (JSVAL_IS_STRING(v)) {
+ type = JSTYPE_STRING;
+ } else if (JSVAL_IS_BOOLEAN(v)) {
+ type = JSTYPE_BOOLEAN;
+ } else {
+ type = JSTYPE_VOID;
+ }
+ return type;
+}
+
+JS_PUBLIC_API(const char *)
+JS_GetTypeName(JSContext *cx, JSType type)
+{
+ if ((uintN)type >= (uintN)JSTYPE_LIMIT)
+ return NULL;
+ return js_type_strs[type];
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(JSRuntime *)
+JS_NewRuntime(uint32 maxbytes)
+{
+ JSRuntime *rt;
+
+#ifdef DEBUG
+ static JSBool didFirstChecks;
+
+ if (!didFirstChecks) {
+ /*
+ * This code asserts that the numbers associated with the error names
+ * in jsmsg.def are monotonically increasing. It uses values for the
+ * error names enumerated in jscntxt.c. It's not a compile-time check
+ * but it's better than nothing.
+ */
+ int errorNumber = 0;
+#define MSG_DEF(name, number, count, exception, format) \
+ JS_ASSERT(name == errorNumber++);
+#include "js.msg"
+#undef MSG_DEF
+
+#define MSG_DEF(name, number, count, exception, format) \
+ JS_BEGIN_MACRO \
+ uintN numfmtspecs = 0; \
+ const char *fmt; \
+ for (fmt = format; *fmt != '\0'; fmt++) { \
+ if (*fmt == '{' && isdigit(fmt[1])) \
+ ++numfmtspecs; \
+ } \
+ JS_ASSERT(count == numfmtspecs); \
+ JS_END_MACRO;
+#include "js.msg"
+#undef MSG_DEF
+
+ didFirstChecks = JS_TRUE;
+ }
+#endif /* DEBUG */
+
+ rt = (JSRuntime *) malloc(sizeof(JSRuntime));
+ if (!rt)
+ return NULL;
+
+ /* Initialize infallibly first, so we can goto bad and JS_DestroyRuntime. */
+ memset(rt, 0, sizeof(JSRuntime));
+ JS_INIT_CLIST(&rt->contextList);
+ JS_INIT_CLIST(&rt->trapList);
+ JS_INIT_CLIST(&rt->watchPointList);
+
+ if (!js_InitGC(rt, maxbytes))
+ goto bad;
+#ifdef JS_THREADSAFE
+ if (PR_FAILURE == PR_NewThreadPrivateIndex(&rt->threadTPIndex,
+ js_ThreadDestructorCB)) {
+ goto bad;
+ }
+ rt->gcLock = JS_NEW_LOCK();
+ if (!rt->gcLock)
+ goto bad;
+ rt->gcDone = JS_NEW_CONDVAR(rt->gcLock);
+ if (!rt->gcDone)
+ goto bad;
+ rt->requestDone = JS_NEW_CONDVAR(rt->gcLock);
+ if (!rt->requestDone)
+ goto bad;
+ /* this is asymmetric with JS_ShutDown: */
+ if (!js_SetupLocks(8, 16))
+ goto bad;
+ rt->rtLock = JS_NEW_LOCK();
+ if (!rt->rtLock)
+ goto bad;
+ rt->stateChange = JS_NEW_CONDVAR(rt->gcLock);
+ if (!rt->stateChange)
+ goto bad;
+ rt->setSlotLock = JS_NEW_LOCK();
+ if (!rt->setSlotLock)
+ goto bad;
+ rt->setSlotDone = JS_NEW_CONDVAR(rt->setSlotLock);
+ if (!rt->setSlotDone)
+ goto bad;
+ rt->scopeSharingDone = JS_NEW_CONDVAR(rt->gcLock);
+ if (!rt->scopeSharingDone)
+ goto bad;
+ rt->scopeSharingTodo = NO_SCOPE_SHARING_TODO;
+#endif
+ rt->propertyCache.empty = JS_TRUE;
+ if (!js_InitPropertyTree(rt))
+ goto bad;
+ return rt;
+
+bad:
+ JS_DestroyRuntime(rt);
+ return NULL;
+}
+
+JS_PUBLIC_API(void)
+JS_DestroyRuntime(JSRuntime *rt)
+{
+#ifdef DEBUG
+ /* Don't hurt everyone in leaky ol' Mozilla with a fatal JS_ASSERT! */
+ if (!JS_CLIST_IS_EMPTY(&rt->contextList)) {
+ JSContext *cx, *iter = NULL;
+ uintN cxcount = 0;
+ while ((cx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL)
+ cxcount++;
+ fprintf(stderr,
+"JS API usage error: %u contexts left in runtime upon JS_DestroyRuntime.\n",
+ cxcount);
+ }
+#endif
+
+ js_FreeRuntimeScriptState(rt);
+ js_FinishAtomState(&rt->atomState);
+ js_FinishGC(rt);
+#ifdef JS_THREADSAFE
+ if (rt->gcLock)
+ JS_DESTROY_LOCK(rt->gcLock);
+ if (rt->gcDone)
+ JS_DESTROY_CONDVAR(rt->gcDone);
+ if (rt->requestDone)
+ JS_DESTROY_CONDVAR(rt->requestDone);
+ if (rt->rtLock)
+ JS_DESTROY_LOCK(rt->rtLock);
+ if (rt->stateChange)
+ JS_DESTROY_CONDVAR(rt->stateChange);
+ if (rt->setSlotLock)
+ JS_DESTROY_LOCK(rt->setSlotLock);
+ if (rt->setSlotDone)
+ JS_DESTROY_CONDVAR(rt->setSlotDone);
+ if (rt->scopeSharingDone)
+ JS_DESTROY_CONDVAR(rt->scopeSharingDone);
+#else
+ GSN_CACHE_CLEAR(&rt->gsnCache);
+#endif
+ js_FinishPropertyTree(rt);
+ free(rt);
+}
+
+JS_PUBLIC_API(void)
+JS_ShutDown(void)
+{
+ js_FinishDtoa();
+#ifdef JS_THREADSAFE
+ js_CleanupLocks();
+#endif
+}
+
+JS_PUBLIC_API(void *)
+JS_GetRuntimePrivate(JSRuntime *rt)
+{
+ return rt->data;
+}
+
+JS_PUBLIC_API(void)
+JS_SetRuntimePrivate(JSRuntime *rt, void *data)
+{
+ rt->data = data;
+}
+
+#ifdef JS_THREADSAFE
+
+JS_PUBLIC_API(void)
+JS_BeginRequest(JSContext *cx)
+{
+ JSRuntime *rt;
+
+ JS_ASSERT(cx->thread->id == js_CurrentThreadId());
+ if (!cx->requestDepth) {
+ /* Wait until the GC is finished. */
+ rt = cx->runtime;
+ JS_LOCK_GC(rt);
+
+ /* NB: we use cx->thread here, not js_GetCurrentThread(). */
+ if (rt->gcThread != cx->thread) {
+ while (rt->gcLevel > 0)
+ JS_AWAIT_GC_DONE(rt);
+ }
+
+ /* Indicate that a request is running. */
+ rt->requestCount++;
+ cx->requestDepth = 1;
+ JS_UNLOCK_GC(rt);
+ return;
+ }
+ cx->requestDepth++;
+}
+
+JS_PUBLIC_API(void)
+JS_EndRequest(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSScope *scope, **todop;
+ uintN nshares;
+
+ CHECK_REQUEST(cx);
+ JS_ASSERT(cx->requestDepth > 0);
+ if (cx->requestDepth == 1) {
+ /* Lock before clearing to interlock with ClaimScope, in jslock.c. */
+ rt = cx->runtime;
+ JS_LOCK_GC(rt);
+ cx->requestDepth = 0;
+
+ /* See whether cx has any single-threaded scopes to start sharing. */
+ todop = &rt->scopeSharingTodo;
+ nshares = 0;
+ while ((scope = *todop) != NO_SCOPE_SHARING_TODO) {
+ if (scope->ownercx != cx) {
+ todop = &scope->u.link;
+ continue;
+ }
+ *todop = scope->u.link;
+ scope->u.link = NULL; /* null u.link for sanity ASAP */
+
+ /*
+ * If js_DropObjectMap returns null, we held the last ref to scope.
+ * The waiting thread(s) must have been killed, after which the GC
+ * collected the object that held this scope. Unlikely, because it
+ * requires that the GC ran (e.g., from a branch callback) during
+ * this request, but possible.
+ */
+ if (js_DropObjectMap(cx, &scope->map, NULL)) {
+ js_InitLock(&scope->lock);
+ scope->u.count = 0; /* NULL may not pun as 0 */
+ js_FinishSharingScope(rt, scope); /* set ownercx = NULL */
+ nshares++;
+ }
+ }
+ if (nshares)
+ JS_NOTIFY_ALL_CONDVAR(rt->scopeSharingDone);
+
+ /* Give the GC a chance to run if this was the last request running. */
+ JS_ASSERT(rt->requestCount > 0);
+ rt->requestCount--;
+ if (rt->requestCount == 0)
+ JS_NOTIFY_REQUEST_DONE(rt);
+
+ JS_UNLOCK_GC(rt);
+ return;
+ }
+
+ cx->requestDepth--;
+}
+
+/* Yield to pending GC operations, regardless of request depth */
+JS_PUBLIC_API(void)
+JS_YieldRequest(JSContext *cx)
+{
+ JSRuntime *rt;
+
+ JS_ASSERT(cx->thread);
+ CHECK_REQUEST(cx);
+
+ rt = cx->runtime;
+ JS_LOCK_GC(rt);
+ JS_ASSERT(rt->requestCount > 0);
+ rt->requestCount--;
+ if (rt->requestCount == 0)
+ JS_NOTIFY_REQUEST_DONE(rt);
+ JS_UNLOCK_GC(rt);
+ /* XXXbe give the GC or another request calling it a chance to run here?
+ Assumes FIFO scheduling */
+ JS_LOCK_GC(rt);
+ if (rt->gcThread != cx->thread) {
+ while (rt->gcLevel > 0)
+ JS_AWAIT_GC_DONE(rt);
+ }
+ rt->requestCount++;
+ JS_UNLOCK_GC(rt);
+}
+
+JS_PUBLIC_API(jsrefcount)
+JS_SuspendRequest(JSContext *cx)
+{
+ jsrefcount saveDepth = cx->requestDepth;
+
+ while (cx->requestDepth)
+ JS_EndRequest(cx);
+ return saveDepth;
+}
+
+JS_PUBLIC_API(void)
+JS_ResumeRequest(JSContext *cx, jsrefcount saveDepth)
+{
+ JS_ASSERT(!cx->requestDepth);
+ while (--saveDepth >= 0)
+ JS_BeginRequest(cx);
+}
+
+#endif /* JS_THREADSAFE */
+
+JS_PUBLIC_API(void)
+JS_Lock(JSRuntime *rt)
+{
+ JS_LOCK_RUNTIME(rt);
+}
+
+JS_PUBLIC_API(void)
+JS_Unlock(JSRuntime *rt)
+{
+ JS_UNLOCK_RUNTIME(rt);
+}
+
+JS_PUBLIC_API(JSContextCallback)
+JS_SetContextCallback(JSRuntime *rt, JSContextCallback cxCallback)
+{
+ JSContextCallback old;
+
+ old = rt->cxCallback;
+ rt->cxCallback = cxCallback;
+ return old;
+}
+
+JS_PUBLIC_API(JSContext *)
+JS_NewContext(JSRuntime *rt, size_t stackChunkSize)
+{
+ return js_NewContext(rt, stackChunkSize);
+}
+
+JS_PUBLIC_API(void)
+JS_DestroyContext(JSContext *cx)
+{
+ js_DestroyContext(cx, JSDCM_FORCE_GC);
+}
+
+JS_PUBLIC_API(void)
+JS_DestroyContextNoGC(JSContext *cx)
+{
+ js_DestroyContext(cx, JSDCM_NO_GC);
+}
+
+JS_PUBLIC_API(void)
+JS_DestroyContextMaybeGC(JSContext *cx)
+{
+ js_DestroyContext(cx, JSDCM_MAYBE_GC);
+}
+
+JS_PUBLIC_API(void *)
+JS_GetContextPrivate(JSContext *cx)
+{
+ return cx->data;
+}
+
+JS_PUBLIC_API(void)
+JS_SetContextPrivate(JSContext *cx, void *data)
+{
+ cx->data = data;
+}
+
+JS_PUBLIC_API(JSRuntime *)
+JS_GetRuntime(JSContext *cx)
+{
+ return cx->runtime;
+}
+
+JS_PUBLIC_API(JSContext *)
+JS_ContextIterator(JSRuntime *rt, JSContext **iterp)
+{
+ return js_ContextIterator(rt, JS_TRUE, iterp);
+}
+
+JS_PUBLIC_API(JSVersion)
+JS_GetVersion(JSContext *cx)
+{
+ return cx->version & JSVERSION_MASK;
+}
+
+JS_PUBLIC_API(JSVersion)
+JS_SetVersion(JSContext *cx, JSVersion version)
+{
+ JSVersion oldVersion;
+
+ JS_ASSERT(version != JSVERSION_UNKNOWN);
+ JS_ASSERT((version & ~JSVERSION_MASK) == 0);
+
+ oldVersion = cx->version & JSVERSION_MASK;
+ if (version == oldVersion)
+ return oldVersion;
+
+ /* We no longer support 1.4 or below. */
+ if (version != JSVERSION_DEFAULT && version <= JSVERSION_1_4)
+ return oldVersion;
+
+ cx->version = (cx->version & ~JSVERSION_MASK) | version;
+ js_OnVersionChange(cx);
+ return oldVersion;
+}
+
+static struct v2smap {
+ JSVersion version;
+ const char *string;
+} v2smap[] = {
+ {JSVERSION_1_0, "1.0"},
+ {JSVERSION_1_1, "1.1"},
+ {JSVERSION_1_2, "1.2"},
+ {JSVERSION_1_3, "1.3"},
+ {JSVERSION_1_4, "1.4"},
+ {JSVERSION_ECMA_3, "ECMAv3"},
+ {JSVERSION_1_5, "1.5"},
+ {JSVERSION_1_6, "1.6"},
+ {JSVERSION_1_7, "1.7"},
+ {JSVERSION_DEFAULT, js_default_str},
+ {JSVERSION_UNKNOWN, NULL}, /* must be last, NULL is sentinel */
+};
+
+JS_PUBLIC_API(const char *)
+JS_VersionToString(JSVersion version)
+{
+ int i;
+
+ for (i = 0; v2smap[i].string; i++)
+ if (v2smap[i].version == version)
+ return v2smap[i].string;
+ return "unknown";
+}
+
+JS_PUBLIC_API(JSVersion)
+JS_StringToVersion(const char *string)
+{
+ int i;
+
+ for (i = 0; v2smap[i].string; i++)
+ if (strcmp(v2smap[i].string, string) == 0)
+ return v2smap[i].version;
+ return JSVERSION_UNKNOWN;
+}
+
+JS_PUBLIC_API(uint32)
+JS_GetOptions(JSContext *cx)
+{
+ return cx->options;
+}
+
+#define SYNC_OPTIONS_TO_VERSION(cx) \
+ JS_BEGIN_MACRO \
+ if ((cx)->options & JSOPTION_XML) \
+ (cx)->version |= JSVERSION_HAS_XML; \
+ else \
+ (cx)->version &= ~JSVERSION_HAS_XML; \
+ JS_END_MACRO
+
+JS_PUBLIC_API(uint32)
+JS_SetOptions(JSContext *cx, uint32 options)
+{
+ uint32 oldopts = cx->options;
+ cx->options = options;
+ SYNC_OPTIONS_TO_VERSION(cx);
+ return oldopts;
+}
+
+JS_PUBLIC_API(uint32)
+JS_ToggleOptions(JSContext *cx, uint32 options)
+{
+ uint32 oldopts = cx->options;
+ cx->options ^= options;
+ SYNC_OPTIONS_TO_VERSION(cx);
+ return oldopts;
+}
+
+JS_PUBLIC_API(const char *)
+JS_GetImplementationVersion(void)
+{
+ return "JavaScript-C 1.7.0 2007-10-03";
+}
+
+
+JS_PUBLIC_API(JSObject *)
+JS_GetGlobalObject(JSContext *cx)
+{
+ return cx->globalObject;
+}
+
+JS_PUBLIC_API(void)
+JS_SetGlobalObject(JSContext *cx, JSObject *obj)
+{
+ cx->globalObject = obj;
+
+#if JS_HAS_XML_SUPPORT
+ cx->xmlSettingFlags = 0;
+#endif
+}
+
+JSObject *
+js_InitFunctionAndObjectClasses(JSContext *cx, JSObject *obj)
+{
+ JSDHashTable *table;
+ JSBool resolving;
+ JSRuntime *rt;
+ JSResolvingKey key;
+ JSResolvingEntry *entry;
+ JSObject *fun_proto, *obj_proto;
+
+ /* If cx has no global object, use obj so prototypes can be found. */
+ if (!cx->globalObject)
+ JS_SetGlobalObject(cx, obj);
+
+ /* Record Function and Object in cx->resolvingTable, if we are resolving. */
+ table = cx->resolvingTable;
+ resolving = (table && table->entryCount);
+ rt = cx->runtime;
+ key.obj = obj;
+ if (resolving) {
+ key.id = ATOM_TO_JSID(rt->atomState.classAtoms[JSProto_Function]);
+ entry = (JSResolvingEntry *)
+ JS_DHashTableOperate(table, &key, JS_DHASH_ADD);
+ if (entry && entry->key.obj && (entry->flags & JSRESFLAG_LOOKUP)) {
+ /* Already resolving Function, record Object too. */
+ JS_ASSERT(entry->key.obj == obj);
+ key.id = ATOM_TO_JSID(rt->atomState.classAtoms[JSProto_Object]);
+ entry = (JSResolvingEntry *)
+ JS_DHashTableOperate(table, &key, JS_DHASH_ADD);
+ }
+ if (!entry) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ JS_ASSERT(!entry->key.obj && entry->flags == 0);
+ entry->key = key;
+ entry->flags = JSRESFLAG_LOOKUP;
+ } else {
+ key.id = ATOM_TO_JSID(rt->atomState.classAtoms[JSProto_Object]);
+ if (!js_StartResolving(cx, &key, JSRESFLAG_LOOKUP, &entry))
+ return NULL;
+
+ key.id = ATOM_TO_JSID(rt->atomState.classAtoms[JSProto_Function]);
+ if (!js_StartResolving(cx, &key, JSRESFLAG_LOOKUP, &entry)) {
+ key.id = ATOM_TO_JSID(rt->atomState.classAtoms[JSProto_Object]);
+ JS_DHashTableOperate(table, &key, JS_DHASH_REMOVE);
+ return NULL;
+ }
+
+ table = cx->resolvingTable;
+ }
+
+ /* Initialize the function class first so constructors can be made. */
+ fun_proto = js_InitFunctionClass(cx, obj);
+ if (!fun_proto)
+ goto out;
+
+ /* Initialize the object class next so Object.prototype works. */
+ obj_proto = js_InitObjectClass(cx, obj);
+ if (!obj_proto) {
+ fun_proto = NULL;
+ goto out;
+ }
+
+ /* Function.prototype and the global object delegate to Object.prototype. */
+ OBJ_SET_PROTO(cx, fun_proto, obj_proto);
+ if (!OBJ_GET_PROTO(cx, obj))
+ OBJ_SET_PROTO(cx, obj, obj_proto);
+
+out:
+ /* If resolving, remove the other entry (Object or Function) from table. */
+ JS_DHashTableOperate(table, &key, JS_DHASH_REMOVE);
+ if (!resolving) {
+ /* If not resolving, remove the first entry added above, for Object. */
+ JS_ASSERT(key.id == \
+ ATOM_TO_JSID(rt->atomState.classAtoms[JSProto_Function]));
+ key.id = ATOM_TO_JSID(rt->atomState.classAtoms[JSProto_Object]);
+ JS_DHashTableOperate(table, &key, JS_DHASH_REMOVE);
+ }
+ return fun_proto;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_InitStandardClasses(JSContext *cx, JSObject *obj)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+
+ /* Define a top-level property 'undefined' with the undefined value. */
+ atom = cx->runtime->atomState.typeAtoms[JSTYPE_VOID];
+ if (!OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), JSVAL_VOID,
+ NULL, NULL, JSPROP_PERMANENT, NULL)) {
+ return JS_FALSE;
+ }
+
+ /* Function and Object require cooperative bootstrapping magic. */
+ if (!js_InitFunctionAndObjectClasses(cx, obj))
+ return JS_FALSE;
+
+ /* Initialize the rest of the standard objects and functions. */
+ return js_InitArrayClass(cx, obj) &&
+ js_InitBlockClass(cx, obj) &&
+ js_InitBooleanClass(cx, obj) &&
+ js_InitCallClass(cx, obj) &&
+ js_InitExceptionClasses(cx, obj) &&
+ js_InitMathClass(cx, obj) &&
+ js_InitNumberClass(cx, obj) &&
+ js_InitRegExpClass(cx, obj) &&
+ js_InitStringClass(cx, obj) &&
+#if JS_HAS_SCRIPT_OBJECT
+ js_InitScriptClass(cx, obj) &&
+#endif
+#if JS_HAS_XML_SUPPORT
+ js_InitXMLClasses(cx, obj) &&
+#endif
+#if JS_HAS_FILE_OBJECT
+ js_InitFileClass(cx, obj) &&
+#endif
+#if JS_HAS_GENERATORS
+ js_InitIteratorClasses(cx, obj) &&
+#endif
+ js_InitDateClass(cx, obj);
+}
+
+#define ATOM_OFFSET(name) offsetof(JSAtomState,name##Atom)
+#define CLASS_ATOM_OFFSET(name) offsetof(JSAtomState,classAtoms[JSProto_##name])
+#define OFFSET_TO_ATOM(rt,off) (*(JSAtom **)((char*)&(rt)->atomState + (off)))
+#define CLASP(name) (JSClass *)&js_##name##Class
+
+#define EAGER_ATOM(name) ATOM_OFFSET(name), NULL
+#define EAGER_CLASS_ATOM(name) CLASS_ATOM_OFFSET(name), NULL
+#define EAGER_ATOM_AND_CLASP(name) EAGER_CLASS_ATOM(name), CLASP(name)
+#define LAZY_ATOM(name) ATOM_OFFSET(lazy.name), js_##name##_str
+
+typedef struct JSStdName {
+ JSObjectOp init;
+ size_t atomOffset; /* offset of atom pointer in JSAtomState */
+ const char *name; /* null if atom is pre-pinned, else name */
+ JSClass *clasp;
+} JSStdName;
+
+static JSAtom *
+StdNameToAtom(JSContext *cx, JSStdName *stdn)
+{
+ size_t offset;
+ JSAtom *atom;
+ const char *name;
+
+ offset = stdn->atomOffset;
+ atom = OFFSET_TO_ATOM(cx->runtime, offset);
+ if (!atom) {
+ name = stdn->name;
+ if (name) {
+ atom = js_Atomize(cx, name, strlen(name), ATOM_PINNED);
+ OFFSET_TO_ATOM(cx->runtime, offset) = atom;
+ }
+ }
+ return atom;
+}
+
+/*
+ * Table of class initializers and their atom offsets in rt->atomState.
+ * If you add a "standard" class, remember to update this table.
+ */
+static JSStdName standard_class_atoms[] = {
+ {js_InitFunctionAndObjectClasses, EAGER_ATOM_AND_CLASP(Function)},
+ {js_InitFunctionAndObjectClasses, EAGER_ATOM_AND_CLASP(Object)},
+ {js_InitArrayClass, EAGER_ATOM_AND_CLASP(Array)},
+ {js_InitBlockClass, EAGER_ATOM_AND_CLASP(Block)},
+ {js_InitBooleanClass, EAGER_ATOM_AND_CLASP(Boolean)},
+ {js_InitDateClass, EAGER_ATOM_AND_CLASP(Date)},
+ {js_InitMathClass, EAGER_ATOM_AND_CLASP(Math)},
+ {js_InitNumberClass, EAGER_ATOM_AND_CLASP(Number)},
+ {js_InitStringClass, EAGER_ATOM_AND_CLASP(String)},
+ {js_InitCallClass, EAGER_ATOM_AND_CLASP(Call)},
+ {js_InitExceptionClasses, EAGER_ATOM_AND_CLASP(Error)},
+ {js_InitRegExpClass, EAGER_ATOM_AND_CLASP(RegExp)},
+#if JS_HAS_SCRIPT_OBJECT
+ {js_InitScriptClass, EAGER_ATOM_AND_CLASP(Script)},
+#endif
+#if JS_HAS_XML_SUPPORT
+ {js_InitXMLClass, EAGER_ATOM_AND_CLASP(XML)},
+ {js_InitNamespaceClass, EAGER_ATOM_AND_CLASP(Namespace)},
+ {js_InitQNameClass, EAGER_ATOM_AND_CLASP(QName)},
+#endif
+#if JS_HAS_FILE_OBJECT
+ {js_InitFileClass, EAGER_ATOM_AND_CLASP(File)},
+#endif
+#if JS_HAS_GENERATORS
+ {js_InitIteratorClasses, EAGER_ATOM_AND_CLASP(StopIteration)},
+#endif
+ {NULL, 0, NULL, NULL}
+};
+
+/*
+ * Table of top-level function and constant names and their init functions.
+ * If you add a "standard" global function or property, remember to update
+ * this table.
+ */
+static JSStdName standard_class_names[] = {
+ /* ECMA requires that eval be a direct property of the global object. */
+ {js_InitObjectClass, EAGER_ATOM(eval), NULL},
+
+ /* Global properties and functions defined by the Number class. */
+ {js_InitNumberClass, LAZY_ATOM(NaN), NULL},
+ {js_InitNumberClass, LAZY_ATOM(Infinity), NULL},
+ {js_InitNumberClass, LAZY_ATOM(isNaN), NULL},
+ {js_InitNumberClass, LAZY_ATOM(isFinite), NULL},
+ {js_InitNumberClass, LAZY_ATOM(parseFloat), NULL},
+ {js_InitNumberClass, LAZY_ATOM(parseInt), NULL},
+
+ /* String global functions. */
+ {js_InitStringClass, LAZY_ATOM(escape), NULL},
+ {js_InitStringClass, LAZY_ATOM(unescape), NULL},
+ {js_InitStringClass, LAZY_ATOM(decodeURI), NULL},
+ {js_InitStringClass, LAZY_ATOM(encodeURI), NULL},
+ {js_InitStringClass, LAZY_ATOM(decodeURIComponent), NULL},
+ {js_InitStringClass, LAZY_ATOM(encodeURIComponent), NULL},
+#if JS_HAS_UNEVAL
+ {js_InitStringClass, LAZY_ATOM(uneval), NULL},
+#endif
+
+ /* Exception constructors. */
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(Error), CLASP(Error)},
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(InternalError), CLASP(Error)},
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(EvalError), CLASP(Error)},
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(RangeError), CLASP(Error)},
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(ReferenceError), CLASP(Error)},
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(SyntaxError), CLASP(Error)},
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(TypeError), CLASP(Error)},
+ {js_InitExceptionClasses, EAGER_CLASS_ATOM(URIError), CLASP(Error)},
+
+#if JS_HAS_XML_SUPPORT
+ {js_InitAnyNameClass, EAGER_ATOM_AND_CLASP(AnyName)},
+ {js_InitAttributeNameClass, EAGER_ATOM_AND_CLASP(AttributeName)},
+ {js_InitXMLClass, LAZY_ATOM(XMLList), &js_XMLClass},
+ {js_InitXMLClass, LAZY_ATOM(isXMLName), NULL},
+#endif
+
+#if JS_HAS_GENERATORS
+ {js_InitIteratorClasses, EAGER_ATOM_AND_CLASP(Iterator)},
+ {js_InitIteratorClasses, EAGER_ATOM_AND_CLASP(Generator)},
+#endif
+
+ {NULL, 0, NULL, NULL}
+};
+
+static JSStdName object_prototype_names[] = {
+ /* Object.prototype properties (global delegates to Object.prototype). */
+ {js_InitObjectClass, EAGER_ATOM(proto), NULL},
+ {js_InitObjectClass, EAGER_ATOM(parent), NULL},
+ {js_InitObjectClass, EAGER_ATOM(count), NULL},
+#if JS_HAS_TOSOURCE
+ {js_InitObjectClass, EAGER_ATOM(toSource), NULL},
+#endif
+ {js_InitObjectClass, EAGER_ATOM(toString), NULL},
+ {js_InitObjectClass, EAGER_ATOM(toLocaleString), NULL},
+ {js_InitObjectClass, EAGER_ATOM(valueOf), NULL},
+#if JS_HAS_OBJ_WATCHPOINT
+ {js_InitObjectClass, LAZY_ATOM(watch), NULL},
+ {js_InitObjectClass, LAZY_ATOM(unwatch), NULL},
+#endif
+ {js_InitObjectClass, LAZY_ATOM(hasOwnProperty), NULL},
+ {js_InitObjectClass, LAZY_ATOM(isPrototypeOf), NULL},
+ {js_InitObjectClass, LAZY_ATOM(propertyIsEnumerable), NULL},
+#if JS_HAS_GETTER_SETTER
+ {js_InitObjectClass, LAZY_ATOM(defineGetter), NULL},
+ {js_InitObjectClass, LAZY_ATOM(defineSetter), NULL},
+ {js_InitObjectClass, LAZY_ATOM(lookupGetter), NULL},
+ {js_InitObjectClass, LAZY_ATOM(lookupSetter), NULL},
+#endif
+
+ {NULL, 0, NULL, NULL}
+};
+
+JS_PUBLIC_API(JSBool)
+JS_ResolveStandardClass(JSContext *cx, JSObject *obj, jsval id,
+ JSBool *resolved)
+{
+ JSString *idstr;
+ JSRuntime *rt;
+ JSAtom *atom;
+ JSStdName *stdnm;
+ uintN i;
+
+ CHECK_REQUEST(cx);
+ *resolved = JS_FALSE;
+
+ if (!JSVAL_IS_STRING(id))
+ return JS_TRUE;
+ idstr = JSVAL_TO_STRING(id);
+ rt = cx->runtime;
+
+ /* Check whether we're resolving 'undefined', and define it if so. */
+ atom = rt->atomState.typeAtoms[JSTYPE_VOID];
+ if (idstr == ATOM_TO_STRING(atom)) {
+ *resolved = JS_TRUE;
+ return OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), JSVAL_VOID,
+ NULL, NULL, JSPROP_PERMANENT, NULL);
+ }
+
+ /* Try for class constructors/prototypes named by well-known atoms. */
+ stdnm = NULL;
+ for (i = 0; standard_class_atoms[i].init; i++) {
+ atom = OFFSET_TO_ATOM(rt, standard_class_atoms[i].atomOffset);
+ if (idstr == ATOM_TO_STRING(atom)) {
+ stdnm = &standard_class_atoms[i];
+ break;
+ }
+ }
+
+ if (!stdnm) {
+ /* Try less frequently used top-level functions and constants. */
+ for (i = 0; standard_class_names[i].init; i++) {
+ atom = StdNameToAtom(cx, &standard_class_names[i]);
+ if (!atom)
+ return JS_FALSE;
+ if (idstr == ATOM_TO_STRING(atom)) {
+ stdnm = &standard_class_names[i];
+ break;
+ }
+ }
+
+ if (!stdnm && !OBJ_GET_PROTO(cx, obj)) {
+ /*
+ * Try even less frequently used names delegated from the global
+ * object to Object.prototype, but only if the Object class hasn't
+ * yet been initialized.
+ */
+ for (i = 0; object_prototype_names[i].init; i++) {
+ atom = StdNameToAtom(cx, &object_prototype_names[i]);
+ if (!atom)
+ return JS_FALSE;
+ if (idstr == ATOM_TO_STRING(atom)) {
+ stdnm = &standard_class_names[i];
+ break;
+ }
+ }
+ }
+ }
+
+ if (stdnm) {
+ /*
+ * If this standard class is anonymous and obj advertises itself as a
+ * global object (in order to reserve slots for standard class object
+ * pointers), then we don't want to resolve by name.
+ *
+ * If inversely, either id does not name a class, or id does not name
+ * an anonymous class, or the global does not reserve slots for class
+ * objects, then we must call the init hook here.
+ */
+ if (stdnm->clasp &&
+ (stdnm->clasp->flags & JSCLASS_IS_ANONYMOUS) &&
+ (OBJ_GET_CLASS(cx, obj)->flags & JSCLASS_IS_GLOBAL)) {
+ return JS_TRUE;
+ }
+
+ if (!stdnm->init(cx, obj))
+ return JS_FALSE;
+ *resolved = JS_TRUE;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+AlreadyHasOwnProperty(JSContext *cx, JSObject *obj, JSAtom *atom)
+{
+ JSScopeProperty *sprop;
+ JSScope *scope;
+
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ JS_LOCK_OBJ(cx, obj);
+ scope = OBJ_SCOPE(obj);
+ sprop = SCOPE_GET_PROPERTY(scope, ATOM_TO_JSID(atom));
+ JS_UNLOCK_SCOPE(cx, scope);
+ return sprop != NULL;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_EnumerateStandardClasses(JSContext *cx, JSObject *obj)
+{
+ JSRuntime *rt;
+ JSAtom *atom;
+ uintN i;
+
+ CHECK_REQUEST(cx);
+ rt = cx->runtime;
+
+ /* Check whether we need to bind 'undefined' and define it if so. */
+ atom = rt->atomState.typeAtoms[JSTYPE_VOID];
+ if (!AlreadyHasOwnProperty(cx, obj, atom) &&
+ !OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), JSVAL_VOID,
+ NULL, NULL, JSPROP_PERMANENT, NULL)) {
+ return JS_FALSE;
+ }
+
+ /* Initialize any classes that have not been resolved yet. */
+ for (i = 0; standard_class_atoms[i].init; i++) {
+ atom = OFFSET_TO_ATOM(rt, standard_class_atoms[i].atomOffset);
+ if (!AlreadyHasOwnProperty(cx, obj, atom) &&
+ !standard_class_atoms[i].init(cx, obj)) {
+ return JS_FALSE;
+ }
+ }
+
+ return JS_TRUE;
+}
+
+static JSIdArray *
+AddAtomToArray(JSContext *cx, JSAtom *atom, JSIdArray *ida, jsint *ip)
+{
+ jsint i, length;
+
+ i = *ip;
+ length = ida->length;
+ if (i >= length) {
+ ida = js_SetIdArrayLength(cx, ida, JS_MAX(length * 2, 8));
+ if (!ida)
+ return NULL;
+ JS_ASSERT(i < ida->length);
+ }
+ ida->vector[i] = ATOM_TO_JSID(atom);
+ *ip = i + 1;
+ return ida;
+}
+
+static JSIdArray *
+EnumerateIfResolved(JSContext *cx, JSObject *obj, JSAtom *atom, JSIdArray *ida,
+ jsint *ip, JSBool *foundp)
+{
+ *foundp = AlreadyHasOwnProperty(cx, obj, atom);
+ if (*foundp)
+ ida = AddAtomToArray(cx, atom, ida, ip);
+ return ida;
+}
+
+JS_PUBLIC_API(JSIdArray *)
+JS_EnumerateResolvedStandardClasses(JSContext *cx, JSObject *obj,
+ JSIdArray *ida)
+{
+ JSRuntime *rt;
+ jsint i, j, k;
+ JSAtom *atom;
+ JSBool found;
+ JSObjectOp init;
+
+ CHECK_REQUEST(cx);
+ rt = cx->runtime;
+ if (ida) {
+ i = ida->length;
+ } else {
+ ida = js_NewIdArray(cx, 8);
+ if (!ida)
+ return NULL;
+ i = 0;
+ }
+
+ /* Check whether 'undefined' has been resolved and enumerate it if so. */
+ atom = rt->atomState.typeAtoms[JSTYPE_VOID];
+ ida = EnumerateIfResolved(cx, obj, atom, ida, &i, &found);
+ if (!ida)
+ return NULL;
+
+ /* Enumerate only classes that *have* been resolved. */
+ for (j = 0; standard_class_atoms[j].init; j++) {
+ atom = OFFSET_TO_ATOM(rt, standard_class_atoms[j].atomOffset);
+ ida = EnumerateIfResolved(cx, obj, atom, ida, &i, &found);
+ if (!ida)
+ return NULL;
+
+ if (found) {
+ init = standard_class_atoms[j].init;
+
+ for (k = 0; standard_class_names[k].init; k++) {
+ if (standard_class_names[k].init == init) {
+ atom = StdNameToAtom(cx, &standard_class_names[k]);
+ ida = AddAtomToArray(cx, atom, ida, &i);
+ if (!ida)
+ return NULL;
+ }
+ }
+
+ if (init == js_InitObjectClass) {
+ for (k = 0; object_prototype_names[k].init; k++) {
+ atom = StdNameToAtom(cx, &object_prototype_names[k]);
+ ida = AddAtomToArray(cx, atom, ida, &i);
+ if (!ida)
+ return NULL;
+ }
+ }
+ }
+ }
+
+ /* Trim to exact length via js_SetIdArrayLength. */
+ return js_SetIdArrayLength(cx, ida, i);
+}
+
+#undef ATOM_OFFSET
+#undef CLASS_ATOM_OFFSET
+#undef OFFSET_TO_ATOM
+#undef CLASP
+
+#undef EAGER_ATOM
+#undef EAGER_CLASS_ATOM
+#undef EAGER_ATOM_CLASP
+#undef LAZY_ATOM
+
+JS_PUBLIC_API(JSBool)
+JS_GetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key,
+ JSObject **objp)
+{
+ CHECK_REQUEST(cx);
+ return js_GetClassObject(cx, obj, key, objp);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetScopeChain(JSContext *cx)
+{
+ JSStackFrame *fp;
+
+ fp = cx->fp;
+ if (!fp) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_INACTIVE);
+ return NULL;
+ }
+ return js_GetScopeChain(cx, fp);
+}
+
+JS_PUBLIC_API(void *)
+JS_malloc(JSContext *cx, size_t nbytes)
+{
+ void *p;
+
+ JS_ASSERT(nbytes != 0);
+ if (nbytes == 0)
+ nbytes = 1;
+
+ p = malloc(nbytes);
+ if (!p) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ js_UpdateMallocCounter(cx, nbytes);
+
+ return p;
+}
+
+JS_PUBLIC_API(void *)
+JS_realloc(JSContext *cx, void *p, size_t nbytes)
+{
+ p = realloc(p, nbytes);
+ if (!p)
+ JS_ReportOutOfMemory(cx);
+ return p;
+}
+
+JS_PUBLIC_API(void)
+JS_free(JSContext *cx, void *p)
+{
+ if (p)
+ free(p);
+}
+
+JS_PUBLIC_API(char *)
+JS_strdup(JSContext *cx, const char *s)
+{
+ size_t n;
+ void *p;
+
+ n = strlen(s) + 1;
+ p = JS_malloc(cx, n);
+ if (!p)
+ return NULL;
+ return (char *)memcpy(p, s, n);
+}
+
+JS_PUBLIC_API(jsdouble *)
+JS_NewDouble(JSContext *cx, jsdouble d)
+{
+ CHECK_REQUEST(cx);
+ return js_NewDouble(cx, d, 0);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_NewDoubleValue(JSContext *cx, jsdouble d, jsval *rval)
+{
+ CHECK_REQUEST(cx);
+ return js_NewDoubleValue(cx, d, rval);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_NewNumberValue(JSContext *cx, jsdouble d, jsval *rval)
+{
+ CHECK_REQUEST(cx);
+ return js_NewNumberValue(cx, d, rval);
+}
+
+#undef JS_AddRoot
+JS_PUBLIC_API(JSBool)
+JS_AddRoot(JSContext *cx, void *rp)
+{
+ CHECK_REQUEST(cx);
+ return js_AddRoot(cx, rp, NULL);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_AddNamedRootRT(JSRuntime *rt, void *rp, const char *name)
+{
+ return js_AddRootRT(rt, rp, name);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_RemoveRoot(JSContext *cx, void *rp)
+{
+ CHECK_REQUEST(cx);
+ return js_RemoveRoot(cx->runtime, rp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_RemoveRootRT(JSRuntime *rt, void *rp)
+{
+ return js_RemoveRoot(rt, rp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_AddNamedRoot(JSContext *cx, void *rp, const char *name)
+{
+ CHECK_REQUEST(cx);
+ return js_AddRoot(cx, rp, name);
+}
+
+JS_PUBLIC_API(void)
+JS_ClearNewbornRoots(JSContext *cx)
+{
+ JS_CLEAR_WEAK_ROOTS(&cx->weakRoots);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_EnterLocalRootScope(JSContext *cx)
+{
+ CHECK_REQUEST(cx);
+ return js_EnterLocalRootScope(cx);
+}
+
+JS_PUBLIC_API(void)
+JS_LeaveLocalRootScope(JSContext *cx)
+{
+ CHECK_REQUEST(cx);
+ js_LeaveLocalRootScope(cx);
+}
+
+JS_PUBLIC_API(void)
+JS_LeaveLocalRootScopeWithResult(JSContext *cx, jsval rval)
+{
+ CHECK_REQUEST(cx);
+ js_LeaveLocalRootScopeWithResult(cx, rval);
+}
+
+JS_PUBLIC_API(void)
+JS_ForgetLocalRoot(JSContext *cx, void *thing)
+{
+ CHECK_REQUEST(cx);
+ js_ForgetLocalRoot(cx, (jsval) thing);
+}
+
+#ifdef DEBUG
+
+JS_PUBLIC_API(void)
+JS_DumpNamedRoots(JSRuntime *rt,
+ void (*dump)(const char *name, void *rp, void *data),
+ void *data)
+{
+ js_DumpNamedRoots(rt, dump, data);
+}
+
+#endif /* DEBUG */
+
+JS_PUBLIC_API(uint32)
+JS_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
+{
+ return js_MapGCRoots(rt, map, data);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_LockGCThing(JSContext *cx, void *thing)
+{
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ ok = js_LockGCThing(cx, thing);
+ if (!ok)
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_LOCK);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_LockGCThingRT(JSRuntime *rt, void *thing)
+{
+ return js_LockGCThingRT(rt, thing);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_UnlockGCThing(JSContext *cx, void *thing)
+{
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ ok = js_UnlockGCThingRT(cx->runtime, thing);
+ if (!ok)
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_UNLOCK);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_UnlockGCThingRT(JSRuntime *rt, void *thing)
+{
+ return js_UnlockGCThingRT(rt, thing);
+}
+
+JS_PUBLIC_API(void)
+JS_MarkGCThing(JSContext *cx, void *thing, const char *name, void *arg)
+{
+ JS_ASSERT(cx->runtime->gcLevel > 0);
+#ifdef JS_THREADSAFE
+ JS_ASSERT(cx->runtime->gcThread->id == js_CurrentThreadId());
+#endif
+
+ GC_MARK(cx, thing, name);
+}
+
+JS_PUBLIC_API(void)
+JS_GC(JSContext *cx)
+{
+#if JS_HAS_GENERATORS
+ /* Run previously scheduled but delayed close hooks. */
+ js_RunCloseHooks(cx);
+#endif
+
+ /* Don't nuke active arenas if executing or compiling. */
+ if (cx->stackPool.current == &cx->stackPool.first)
+ JS_FinishArenaPool(&cx->stackPool);
+ if (cx->tempPool.current == &cx->tempPool.first)
+ JS_FinishArenaPool(&cx->tempPool);
+ js_GC(cx, GC_NORMAL);
+
+#if JS_HAS_GENERATORS
+ /*
+ * Run close hooks for objects that became unreachable after the last GC.
+ */
+ js_RunCloseHooks(cx);
+#endif
+}
+
+JS_PUBLIC_API(void)
+JS_MaybeGC(JSContext *cx)
+{
+#ifdef WAY_TOO_MUCH_GC
+ JS_GC(cx);
+#else
+ JSRuntime *rt;
+ uint32 bytes, lastBytes;
+
+ rt = cx->runtime;
+ bytes = rt->gcBytes;
+ lastBytes = rt->gcLastBytes;
+
+ /*
+ * We run the GC if we used all available free GC cells and had to
+ * allocate extra 1/5 of GC arenas since the last run of GC, or if
+ * we have malloc'd more bytes through JS_malloc than we were told
+ * to allocate by JS_NewRuntime.
+ *
+ * The reason for
+ * bytes > 6/5 lastBytes
+ * condition is the following. Bug 312238 changed bytes and lastBytes
+ * to mean the total amount of memory that the GC uses now and right
+ * after the last GC.
+ *
+ * Before the bug the variables meant the size of allocated GC things
+ * now and right after the last GC. That size did not include the
+ * memory taken by free GC cells and the condition was
+ * bytes > 3/2 lastBytes.
+ * That is, we run the GC if we have half again as many bytes of
+ * GC-things as the last time we GC'd. To be compatible we need to
+ * express that condition through the new meaning of bytes and
+ * lastBytes.
+ *
+ * We write the original condition as
+ * B*(1-F) > 3/2 Bl*(1-Fl)
+ * where B is the total memory size allocated by GC and F is the free
+ * cell density currently and Sl and Fl are the size and the density
+ * right after GC. The density by definition is memory taken by free
+ * cells divided by total amount of memory. In other words, B and Bl
+ * are bytes and lastBytes with the new meaning and B*(1-F) and
+ * Bl*(1-Fl) are bytes and lastBytes with the original meaning.
+ *
+ * Our task is to exclude F and Fl from the last statement. According
+ * the stats from bug 331770 Fl is about 20-30% for GC allocations
+ * that contribute to S and Sl for a typical run of the browser. It
+ * means that the original condition implied that we did not run GC
+ * unless we exhausted the pool of free cells. Indeed if we still
+ * have free cells, then B == Bl since we did not yet allocated any
+ * new arenas and the condition means
+ * 1 - F > 3/2 (1-Fl) or 3/2Fl > 1/2 + F
+ * That implies 3/2 Fl > 1/2 or Fl > 1/3. That can not be fulfilled
+ * for the state described by the stats. So we can write the original
+ * condition as:
+ * F == 0 && B > 3/2 Bl(1-Fl)
+ * Again using the stats we see that Fl is about 20% when the browser
+ * starts up and when we are far from hitting rt->gcMaxBytes. With
+ * this F we have
+ * F == 0 && B > 3/2 Bl(1-0.8) or just B > 6/5 Bl.
+ */
+ if ((bytes > 8192 && bytes > lastBytes + lastBytes / 5) ||
+ rt->gcMallocBytes >= rt->gcMaxMallocBytes) {
+ JS_GC(cx);
+ }
+#if JS_HAS_GENERATORS
+ else {
+ /* Run scheduled but not yet executed close hooks. */
+ js_RunCloseHooks(cx);
+ }
+#endif
+#endif
+}
+
+JS_PUBLIC_API(JSGCCallback)
+JS_SetGCCallback(JSContext *cx, JSGCCallback cb)
+{
+ return JS_SetGCCallbackRT(cx->runtime, cb);
+}
+
+JS_PUBLIC_API(JSGCCallback)
+JS_SetGCCallbackRT(JSRuntime *rt, JSGCCallback cb)
+{
+ JSGCCallback oldcb;
+
+ oldcb = rt->gcCallback;
+ rt->gcCallback = cb;
+ return oldcb;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsAboutToBeFinalized(JSContext *cx, void *thing)
+{
+ JS_ASSERT(thing);
+ return js_IsAboutToBeFinalized(cx, thing);
+}
+
+JS_PUBLIC_API(void)
+JS_SetGCParameter(JSRuntime *rt, JSGCParamKey key, uint32 value)
+{
+ switch (key) {
+ case JSGC_MAX_BYTES:
+ rt->gcMaxBytes = value;
+ break;
+ case JSGC_MAX_MALLOC_BYTES:
+ rt->gcMaxMallocBytes = value;
+ break;
+ }
+}
+
+JS_PUBLIC_API(intN)
+JS_AddExternalStringFinalizer(JSStringFinalizeOp finalizer)
+{
+ return js_ChangeExternalStringFinalizer(NULL, finalizer);
+}
+
+JS_PUBLIC_API(intN)
+JS_RemoveExternalStringFinalizer(JSStringFinalizeOp finalizer)
+{
+ return js_ChangeExternalStringFinalizer(finalizer, NULL);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewExternalString(JSContext *cx, jschar *chars, size_t length, intN type)
+{
+ JSString *str;
+
+ CHECK_REQUEST(cx);
+ JS_ASSERT(GCX_EXTERNAL_STRING <= type && type < (intN) GCX_NTYPES);
+
+ str = (JSString *) js_NewGCThing(cx, (uintN) type, sizeof(JSString));
+ if (!str)
+ return NULL;
+ str->length = length;
+ str->chars = chars;
+ return str;
+}
+
+JS_PUBLIC_API(intN)
+JS_GetExternalStringGCType(JSRuntime *rt, JSString *str)
+{
+ uint8 type = (uint8) (*js_GetGCThingFlags(str) & GCF_TYPEMASK);
+
+ if (type >= GCX_EXTERNAL_STRING)
+ return (intN)type;
+ JS_ASSERT(type == GCX_STRING || type == GCX_MUTABLE_STRING);
+ return -1;
+}
+
+JS_PUBLIC_API(void)
+JS_SetThreadStackLimit(JSContext *cx, jsuword limitAddr)
+{
+#if JS_STACK_GROWTH_DIRECTION > 0
+ if (limitAddr == 0)
+ limitAddr = (jsuword)-1;
+#endif
+ cx->stackLimit = limitAddr;
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(void)
+JS_DestroyIdArray(JSContext *cx, JSIdArray *ida)
+{
+ JS_free(cx, ida);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ValueToId(JSContext *cx, jsval v, jsid *idp)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ if (JSVAL_IS_INT(v)) {
+ *idp = INT_JSVAL_TO_JSID(v);
+ } else {
+#if JS_HAS_XML_SUPPORT
+ if (JSVAL_IS_OBJECT(v)) {
+ *idp = OBJECT_JSVAL_TO_JSID(v);
+ return JS_TRUE;
+ }
+#endif
+ atom = js_ValueToStringAtom(cx, v);
+ if (!atom)
+ return JS_FALSE;
+ *idp = ATOM_TO_JSID(atom);
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IdToValue(JSContext *cx, jsid id, jsval *vp)
+{
+ CHECK_REQUEST(cx);
+ *vp = ID_TO_VALUE(id);
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_PropertyStub(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_EnumerateStub(JSContext *cx, JSObject *obj)
+{
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ResolveStub(JSContext *cx, JSObject *obj, jsval id)
+{
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ConvertStub(JSContext *cx, JSObject *obj, JSType type, jsval *vp)
+{
+ return js_TryValueOf(cx, obj, type, vp);
+}
+
+JS_PUBLIC_API(void)
+JS_FinalizeStub(JSContext *cx, JSObject *obj)
+{
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_InitClass(JSContext *cx, JSObject *obj, JSObject *parent_proto,
+ JSClass *clasp, JSNative constructor, uintN nargs,
+ JSPropertySpec *ps, JSFunctionSpec *fs,
+ JSPropertySpec *static_ps, JSFunctionSpec *static_fs)
+{
+ JSAtom *atom;
+ JSProtoKey key;
+ JSObject *proto, *ctor;
+ JSTempValueRooter tvr;
+ jsval cval, rval;
+ JSBool named;
+ JSFunction *fun;
+
+ CHECK_REQUEST(cx);
+ atom = js_Atomize(cx, clasp->name, strlen(clasp->name), 0);
+ if (!atom)
+ return NULL;
+
+ /*
+ * When initializing a standard class, if no parent_proto (grand-proto of
+ * instances of the class, parent-proto of the class's prototype object)
+ * is given, we must use Object.prototype if it is available. Otherwise,
+ * we could look up the wrong binding for a class name in obj. Example:
+ *
+ * String = Array;
+ * print("hi there".join);
+ *
+ * should print undefined, not Array.prototype.join. This is required by
+ * ECMA-262, alas. It might have been better to make String readonly and
+ * permanent in the global object, instead -- but that's too big a change
+ * to swallow at this point.
+ */
+ key = JSCLASS_CACHED_PROTO_KEY(clasp);
+ if (key != JSProto_Null &&
+ !parent_proto &&
+ !js_GetClassPrototype(cx, obj, INT_TO_JSID(JSProto_Object),
+ &parent_proto)) {
+ return NULL;
+ }
+
+ /* Create a prototype object for this class. */
+ proto = js_NewObject(cx, clasp, parent_proto, obj);
+ if (!proto)
+ return NULL;
+
+ /* After this point, control must exit via label bad or out. */
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, proto, &tvr);
+
+ if (!constructor) {
+ /*
+ * Lacking a constructor, name the prototype (e.g., Math) unless this
+ * class (a) is anonymous, i.e. for internal use only; (b) the class
+ * of obj (the global object) is has a reserved slot indexed by key;
+ * and (c) key is not the null key.
+ */
+ if ((clasp->flags & JSCLASS_IS_ANONYMOUS) &&
+ (OBJ_GET_CLASS(cx, obj)->flags & JSCLASS_IS_GLOBAL) &&
+ key != JSProto_Null) {
+ named = JS_FALSE;
+ } else {
+ named = OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom),
+ OBJECT_TO_JSVAL(proto),
+ NULL, NULL,
+ (clasp->flags & JSCLASS_IS_ANONYMOUS)
+ ? JSPROP_READONLY | JSPROP_PERMANENT
+ : 0,
+ NULL);
+ if (!named)
+ goto bad;
+ }
+
+ ctor = proto;
+ } else {
+ /* Define the constructor function in obj's scope. */
+ fun = js_DefineFunction(cx, obj, atom, constructor, nargs, 0);
+ named = (fun != NULL);
+ if (!fun)
+ goto bad;
+
+ /*
+ * Remember the class this function is a constructor for so that
+ * we know to create an object of this class when we call the
+ * constructor.
+ */
+ fun->clasp = clasp;
+
+ /*
+ * Optionally construct the prototype object, before the class has
+ * been fully initialized. Allow the ctor to replace proto with a
+ * different object, as is done for operator new -- and as at least
+ * XML support requires.
+ */
+ ctor = fun->object;
+ if (clasp->flags & JSCLASS_CONSTRUCT_PROTOTYPE) {
+ cval = OBJECT_TO_JSVAL(ctor);
+ if (!js_InternalConstruct(cx, proto, cval, 0, NULL, &rval))
+ goto bad;
+ if (!JSVAL_IS_PRIMITIVE(rval) && JSVAL_TO_OBJECT(rval) != proto)
+ proto = JSVAL_TO_OBJECT(rval);
+ }
+
+ /* Connect constructor and prototype by named properties. */
+ if (!js_SetClassPrototype(cx, ctor, proto,
+ JSPROP_READONLY | JSPROP_PERMANENT)) {
+ goto bad;
+ }
+
+ /* Bootstrap Function.prototype (see also JS_InitStandardClasses). */
+ if (OBJ_GET_CLASS(cx, ctor) == clasp) {
+ JS_ASSERT(!OBJ_GET_PROTO(cx, ctor));
+ OBJ_SET_PROTO(cx, ctor, proto);
+ }
+ }
+
+ /* Add properties and methods to the prototype and the constructor. */
+ if ((ps && !JS_DefineProperties(cx, proto, ps)) ||
+ (fs && !JS_DefineFunctions(cx, proto, fs)) ||
+ (static_ps && !JS_DefineProperties(cx, ctor, static_ps)) ||
+ (static_fs && !JS_DefineFunctions(cx, ctor, static_fs))) {
+ goto bad;
+ }
+
+ /* If this is a standard class, cache its prototype. */
+ if (key != JSProto_Null && !js_SetClassObject(cx, obj, key, ctor))
+ goto bad;
+
+out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return proto;
+
+bad:
+ if (named)
+ (void) OBJ_DELETE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &rval);
+ proto = NULL;
+ goto out;
+}
+
+#ifdef JS_THREADSAFE
+JS_PUBLIC_API(JSClass *)
+JS_GetClass(JSContext *cx, JSObject *obj)
+{
+ return (JSClass *)
+ JSVAL_TO_PRIVATE(GC_AWARE_GET_SLOT(cx, obj, JSSLOT_CLASS));
+}
+#else
+JS_PUBLIC_API(JSClass *)
+JS_GetClass(JSObject *obj)
+{
+ return LOCKED_OBJ_GET_CLASS(obj);
+}
+#endif
+
+JS_PUBLIC_API(JSBool)
+JS_InstanceOf(JSContext *cx, JSObject *obj, JSClass *clasp, jsval *argv)
+{
+ JSFunction *fun;
+
+ CHECK_REQUEST(cx);
+ if (OBJ_GET_CLASS(cx, obj) == clasp)
+ return JS_TRUE;
+ if (argv) {
+ fun = js_ValueToFunction(cx, &argv[-2], 0);
+ if (fun) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_INCOMPATIBLE_PROTO,
+ clasp->name, JS_GetFunctionName(fun),
+ OBJ_GET_CLASS(cx, obj)->name);
+ }
+ }
+ return JS_FALSE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_HasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ return js_HasInstance(cx, obj, v, bp);
+}
+
+JS_PUBLIC_API(void *)
+JS_GetPrivate(JSContext *cx, JSObject *obj)
+{
+ jsval v;
+
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj)->flags & JSCLASS_HAS_PRIVATE);
+ v = GC_AWARE_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ if (!JSVAL_IS_INT(v))
+ return NULL;
+ return JSVAL_TO_PRIVATE(v);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetPrivate(JSContext *cx, JSObject *obj, void *data)
+{
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj)->flags & JSCLASS_HAS_PRIVATE);
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, PRIVATE_TO_JSVAL(data));
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(void *)
+JS_GetInstancePrivate(JSContext *cx, JSObject *obj, JSClass *clasp,
+ jsval *argv)
+{
+ if (!JS_InstanceOf(cx, obj, clasp, argv))
+ return NULL;
+ return JS_GetPrivate(cx, obj);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetPrototype(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+
+ CHECK_REQUEST(cx);
+ proto = JSVAL_TO_OBJECT(GC_AWARE_GET_SLOT(cx, obj, JSSLOT_PROTO));
+
+ /* Beware ref to dead object (we may be called from obj's finalizer). */
+ return proto && proto->map ? proto : NULL;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetPrototype(JSContext *cx, JSObject *obj, JSObject *proto)
+{
+ CHECK_REQUEST(cx);
+ if (obj->map->ops->setProto)
+ return obj->map->ops->setProto(cx, obj, JSSLOT_PROTO, proto);
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PROTO, OBJECT_TO_JSVAL(proto));
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetParent(JSContext *cx, JSObject *obj)
+{
+ JSObject *parent;
+
+ parent = JSVAL_TO_OBJECT(GC_AWARE_GET_SLOT(cx, obj, JSSLOT_PARENT));
+
+ /* Beware ref to dead object (we may be called from obj's finalizer). */
+ return parent && parent->map ? parent : NULL;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetParent(JSContext *cx, JSObject *obj, JSObject *parent)
+{
+ CHECK_REQUEST(cx);
+ if (obj->map->ops->setParent)
+ return obj->map->ops->setParent(cx, obj, JSSLOT_PARENT, parent);
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PARENT, OBJECT_TO_JSVAL(parent));
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetConstructor(JSContext *cx, JSObject *proto)
+{
+ jsval cval;
+
+ CHECK_REQUEST(cx);
+ if (!OBJ_GET_PROPERTY(cx, proto,
+ ATOM_TO_JSID(cx->runtime->atomState.constructorAtom),
+ &cval)) {
+ return NULL;
+ }
+ if (!VALUE_IS_FUNCTION(cx, cval)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NO_CONSTRUCTOR,
+ OBJ_GET_CLASS(cx, proto)->name);
+ return NULL;
+ }
+ return JSVAL_TO_OBJECT(cval);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetObjectId(JSContext *cx, JSObject *obj, jsid *idp)
+{
+ JS_ASSERT(((jsid)obj & JSID_TAGMASK) == 0);
+ *idp = OBJECT_TO_JSID(obj);
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_NewObject(JSContext *cx, JSClass *clasp, JSObject *proto, JSObject *parent)
+{
+ CHECK_REQUEST(cx);
+ if (!clasp)
+ clasp = &js_ObjectClass; /* default class is Object */
+ return js_NewObject(cx, clasp, proto, parent);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SealObject(JSContext *cx, JSObject *obj, JSBool deep)
+{
+ JSScope *scope;
+ JSIdArray *ida;
+ uint32 nslots;
+ jsval v, *vp, *end;
+
+ if (!OBJ_IS_NATIVE(obj)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_SEAL_OBJECT,
+ OBJ_GET_CLASS(cx, obj)->name);
+ return JS_FALSE;
+ }
+
+ scope = OBJ_SCOPE(obj);
+
+#if defined JS_THREADSAFE && defined DEBUG
+ /* Insist on scope being used exclusively by cx's thread. */
+ if (scope->ownercx != cx) {
+ JS_LOCK_OBJ(cx, obj);
+ JS_ASSERT(OBJ_SCOPE(obj) == scope);
+ JS_ASSERT(scope->ownercx == cx);
+ JS_UNLOCK_SCOPE(cx, scope);
+ }
+#endif
+
+ /* Nothing to do if obj's scope is already sealed. */
+ if (SCOPE_IS_SEALED(scope))
+ return JS_TRUE;
+
+ /* XXX Enumerate lazy properties now, as they can't be added later. */
+ ida = JS_Enumerate(cx, obj);
+ if (!ida)
+ return JS_FALSE;
+ JS_DestroyIdArray(cx, ida);
+
+ /* Ensure that obj has its own, mutable scope, and seal that scope. */
+ JS_LOCK_OBJ(cx, obj);
+ scope = js_GetMutableScope(cx, obj);
+ if (scope)
+ SCOPE_SET_SEALED(scope);
+ JS_UNLOCK_OBJ(cx, obj);
+ if (!scope)
+ return JS_FALSE;
+
+ /* If we are not sealing an entire object graph, we're done. */
+ if (!deep)
+ return JS_TRUE;
+
+ /* Walk obj->slots and if any value is a non-null object, seal it. */
+ nslots = JS_MIN(scope->map.freeslot, scope->map.nslots);
+ for (vp = obj->slots, end = vp + nslots; vp < end; vp++) {
+ v = *vp;
+ if (JSVAL_IS_PRIMITIVE(v))
+ continue;
+ if (!JS_SealObject(cx, JSVAL_TO_OBJECT(v), deep))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto,
+ JSObject *parent)
+{
+ CHECK_REQUEST(cx);
+ if (!clasp)
+ clasp = &js_ObjectClass; /* default class is Object */
+ return js_ConstructObject(cx, clasp, proto, parent, 0, NULL);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_ConstructObjectWithArguments(JSContext *cx, JSClass *clasp, JSObject *proto,
+ JSObject *parent, uintN argc, jsval *argv)
+{
+ CHECK_REQUEST(cx);
+ if (!clasp)
+ clasp = &js_ObjectClass; /* default class is Object */
+ return js_ConstructObject(cx, clasp, proto, parent, argc, argv);
+}
+
+static JSBool
+DefineProperty(JSContext *cx, JSObject *obj, const char *name, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs,
+ uintN flags, intN tinyid)
+{
+ jsid id;
+ JSAtom *atom;
+
+ if (attrs & JSPROP_INDEX) {
+ id = INT_TO_JSID(JS_PTR_TO_INT32(name));
+ atom = NULL;
+ attrs &= ~JSPROP_INDEX;
+ } else {
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ id = ATOM_TO_JSID(atom);
+ }
+ if (flags != 0 && OBJ_IS_NATIVE(obj)) {
+ return js_DefineNativeProperty(cx, obj, id, value, getter, setter,
+ attrs, flags, tinyid, NULL);
+ }
+ return OBJ_DEFINE_PROPERTY(cx, obj, id, value, getter, setter, attrs,
+ NULL);
+}
+
+#define AUTO_NAMELEN(s,n) (((n) == (size_t)-1) ? js_strlen(s) : (n))
+
+static JSBool
+DefineUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs,
+ uintN flags, intN tinyid)
+{
+ JSAtom *atom;
+
+ atom = js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0);
+ if (!atom)
+ return JS_FALSE;
+ if (flags != 0 && OBJ_IS_NATIVE(obj)) {
+ return js_DefineNativeProperty(cx, obj, ATOM_TO_JSID(atom), value,
+ getter, setter, attrs, flags, tinyid,
+ NULL);
+ }
+ return OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), value,
+ getter, setter, attrs, NULL);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_DefineObject(JSContext *cx, JSObject *obj, const char *name, JSClass *clasp,
+ JSObject *proto, uintN attrs)
+{
+ JSObject *nobj;
+
+ CHECK_REQUEST(cx);
+ if (!clasp)
+ clasp = &js_ObjectClass; /* default class is Object */
+ nobj = js_NewObject(cx, clasp, proto, obj);
+ if (!nobj)
+ return NULL;
+ if (!DefineProperty(cx, obj, name, OBJECT_TO_JSVAL(nobj), NULL, NULL, attrs,
+ 0, 0)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ return nobj;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefineConstDoubles(JSContext *cx, JSObject *obj, JSConstDoubleSpec *cds)
+{
+ JSBool ok;
+ jsval value;
+ uintN flags;
+
+ CHECK_REQUEST(cx);
+ for (ok = JS_TRUE; cds->name; cds++) {
+ ok = js_NewNumberValue(cx, cds->dval, &value);
+ if (!ok)
+ break;
+ flags = cds->flags;
+ if (!flags)
+ flags = JSPROP_READONLY | JSPROP_PERMANENT;
+ ok = DefineProperty(cx, obj, cds->name, value, NULL, NULL, flags, 0, 0);
+ if (!ok)
+ break;
+ }
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefineProperties(JSContext *cx, JSObject *obj, JSPropertySpec *ps)
+{
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ for (ok = JS_TRUE; ps->name; ps++) {
+ ok = DefineProperty(cx, obj, ps->name, JSVAL_VOID,
+ ps->getter, ps->setter, ps->flags,
+ SPROP_HAS_SHORTID, ps->tinyid);
+ if (!ok)
+ break;
+ }
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefineProperty(JSContext *cx, JSObject *obj, const char *name, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs)
+{
+ CHECK_REQUEST(cx);
+ return DefineProperty(cx, obj, name, value, getter, setter, attrs, 0, 0);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefinePropertyWithTinyId(JSContext *cx, JSObject *obj, const char *name,
+ int8 tinyid, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter,
+ uintN attrs)
+{
+ CHECK_REQUEST(cx);
+ return DefineProperty(cx, obj, name, value, getter, setter, attrs,
+ SPROP_HAS_SHORTID, tinyid);
+}
+
+static JSBool
+LookupProperty(JSContext *cx, JSObject *obj, const char *name, JSObject **objp,
+ JSProperty **propp)
+{
+ JSAtom *atom;
+
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), objp, propp);
+}
+
+static JSBool
+LookupUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ JSObject **objp, JSProperty **propp)
+{
+ JSAtom *atom;
+
+ atom = js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), objp, propp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_AliasProperty(JSContext *cx, JSObject *obj, const char *name,
+ const char *alias)
+{
+ JSObject *obj2;
+ JSProperty *prop;
+ JSAtom *atom;
+ JSBool ok;
+ JSScopeProperty *sprop;
+
+ CHECK_REQUEST(cx);
+ if (!LookupProperty(cx, obj, name, &obj2, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ js_ReportIsNotDefined(cx, name);
+ return JS_FALSE;
+ }
+ if (obj2 != obj || !OBJ_IS_NATIVE(obj)) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_ALIAS,
+ alias, name, OBJ_GET_CLASS(cx, obj2)->name);
+ return JS_FALSE;
+ }
+ atom = js_Atomize(cx, alias, strlen(alias), 0);
+ if (!atom) {
+ ok = JS_FALSE;
+ } else {
+ sprop = (JSScopeProperty *)prop;
+ ok = (js_AddNativeProperty(cx, obj, ATOM_TO_JSID(atom),
+ sprop->getter, sprop->setter, sprop->slot,
+ sprop->attrs, sprop->flags | SPROP_IS_ALIAS,
+ sprop->shortid)
+ != NULL);
+ }
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+}
+
+static jsval
+LookupResult(JSContext *cx, JSObject *obj, JSObject *obj2, JSProperty *prop)
+{
+ JSScopeProperty *sprop;
+ jsval rval;
+
+ if (!prop) {
+ /* XXX bad API: no way to tell "not defined" from "void value" */
+ return JSVAL_VOID;
+ }
+ if (OBJ_IS_NATIVE(obj2)) {
+ /* Peek at the native property's slot value, without doing a Get. */
+ sprop = (JSScopeProperty *)prop;
+ rval = SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(obj2))
+ ? LOCKED_OBJ_GET_SLOT(obj2, sprop->slot)
+ : JSVAL_TRUE;
+ } else {
+ /* XXX bad API: no way to return "defined but value unknown" */
+ rval = JSVAL_TRUE;
+ }
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ return rval;
+}
+
+static JSBool
+GetPropertyAttributes(JSContext *cx, JSObject *obj, JSAtom *atom,
+ uintN *attrsp, JSBool *foundp,
+ JSPropertyOp *getterp, JSPropertyOp *setterp)
+{
+ JSObject *obj2;
+ JSProperty *prop;
+ JSBool ok;
+
+ if (!atom)
+ return JS_FALSE;
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &obj2, &prop))
+ return JS_FALSE;
+
+ if (!prop || obj != obj2) {
+ *attrsp = 0;
+ *foundp = JS_FALSE;
+ if (getterp)
+ *getterp = NULL;
+ if (setterp)
+ *setterp = NULL;
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ return JS_TRUE;
+ }
+
+ *foundp = JS_TRUE;
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, ATOM_TO_JSID(atom), prop, attrsp);
+ if (ok && OBJ_IS_NATIVE(obj)) {
+ JSScopeProperty *sprop = (JSScopeProperty *) prop;
+
+ if (getterp)
+ *getterp = sprop->getter;
+ if (setterp)
+ *setterp = sprop->setter;
+ }
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+}
+
+static JSBool
+SetPropertyAttributes(JSContext *cx, JSObject *obj, JSAtom *atom,
+ uintN attrs, JSBool *foundp)
+{
+ JSObject *obj2;
+ JSProperty *prop;
+ JSBool ok;
+
+ if (!atom)
+ return JS_FALSE;
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &obj2, &prop))
+ return JS_FALSE;
+ if (!prop || obj != obj2) {
+ *foundp = JS_FALSE;
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ return JS_TRUE;
+ }
+
+ *foundp = JS_TRUE;
+ ok = OBJ_SET_ATTRIBUTES(cx, obj, ATOM_TO_JSID(atom), prop, &attrs);
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetPropertyAttributes(JSContext *cx, JSObject *obj, const char *name,
+ uintN *attrsp, JSBool *foundp)
+{
+ CHECK_REQUEST(cx);
+ return GetPropertyAttributes(cx, obj,
+ js_Atomize(cx, name, strlen(name), 0),
+ attrsp, foundp, NULL, NULL);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetPropertyAttrsGetterAndSetter(JSContext *cx, JSObject *obj,
+ const char *name,
+ uintN *attrsp, JSBool *foundp,
+ JSPropertyOp *getterp,
+ JSPropertyOp *setterp)
+{
+ CHECK_REQUEST(cx);
+ return GetPropertyAttributes(cx, obj,
+ js_Atomize(cx, name, strlen(name), 0),
+ attrsp, foundp, getterp, setterp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetPropertyAttributes(JSContext *cx, JSObject *obj, const char *name,
+ uintN attrs, JSBool *foundp)
+{
+ CHECK_REQUEST(cx);
+ return SetPropertyAttributes(cx, obj,
+ js_Atomize(cx, name, strlen(name), 0),
+ attrs, foundp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_HasProperty(JSContext *cx, JSObject *obj, const char *name, JSBool *foundp)
+{
+ JSBool ok;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ CHECK_REQUEST(cx);
+ ok = LookupProperty(cx, obj, name, &obj2, &prop);
+ if (ok) {
+ *foundp = (prop != NULL);
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ }
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_LookupProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp)
+{
+ JSBool ok;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ CHECK_REQUEST(cx);
+ ok = LookupProperty(cx, obj, name, &obj2, &prop);
+ if (ok)
+ *vp = LookupResult(cx, obj, obj2, prop);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, const char *name,
+ uintN flags, jsval *vp)
+{
+ JSAtom *atom;
+ JSBool ok;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ CHECK_REQUEST(cx);
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ ok = OBJ_IS_NATIVE(obj)
+ ? js_LookupPropertyWithFlags(cx, obj, ATOM_TO_JSID(atom), flags,
+ &obj2, &prop)
+ : OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &obj2, &prop);
+ if (ok)
+ *vp = LookupResult(cx, obj, obj2, prop);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_GET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), vp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetMethodById(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ jsval *vp)
+{
+ CHECK_REQUEST(cx);
+
+#if JS_HAS_XML_SUPPORT
+ if (OBJECT_IS_XML(cx, obj)) {
+ JSXMLObjectOps *ops;
+
+ ops = (JSXMLObjectOps *) obj->map->ops;
+ obj = ops->getMethod(cx, obj, id, vp);
+ if (!obj)
+ return JS_FALSE;
+ } else
+#endif
+ {
+ if (!OBJ_GET_PROPERTY(cx, obj, id, vp))
+ return JS_FALSE;
+ }
+
+ *objp = obj;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetMethod(JSContext *cx, JSObject *obj, const char *name, JSObject **objp,
+ jsval *vp)
+{
+ JSAtom *atom;
+
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ return JS_GetMethodById(cx, obj, ATOM_TO_JSID(atom), objp, vp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_SET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), vp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DeleteProperty(JSContext *cx, JSObject *obj, const char *name)
+{
+ jsval junk;
+
+ CHECK_REQUEST(cx);
+ return JS_DeleteProperty2(cx, obj, name, &junk);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DeleteProperty2(JSContext *cx, JSObject *obj, const char *name,
+ jsval *rval)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_DELETE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), rval);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefineUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter,
+ uintN attrs)
+{
+ CHECK_REQUEST(cx);
+ return DefineUCProperty(cx, obj, name, namelen, value, getter, setter,
+ attrs, 0, 0);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetUCPropertyAttributes(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ uintN *attrsp, JSBool *foundp)
+{
+ CHECK_REQUEST(cx);
+ return GetPropertyAttributes(cx, obj,
+ js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0),
+ attrsp, foundp, NULL, NULL);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetUCPropertyAttrsGetterAndSetter(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ uintN *attrsp, JSBool *foundp,
+ JSPropertyOp *getterp,
+ JSPropertyOp *setterp)
+{
+ CHECK_REQUEST(cx);
+ return GetPropertyAttributes(cx, obj,
+ js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0),
+ attrsp, foundp, getterp, setterp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetUCPropertyAttributes(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ uintN attrs, JSBool *foundp)
+{
+ CHECK_REQUEST(cx);
+ return SetPropertyAttributes(cx, obj,
+ js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0),
+ attrs, foundp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefineUCPropertyWithTinyId(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ int8 tinyid, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter,
+ uintN attrs)
+{
+ CHECK_REQUEST(cx);
+ return DefineUCProperty(cx, obj, name, namelen, value, getter, setter,
+ attrs, SPROP_HAS_SHORTID, tinyid);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_HasUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ JSBool *vp)
+{
+ JSBool ok;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ CHECK_REQUEST(cx);
+ ok = LookupUCProperty(cx, obj, name, namelen, &obj2, &prop);
+ if (ok) {
+ *vp = (prop != NULL);
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ }
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_LookupUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *vp)
+{
+ JSBool ok;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ CHECK_REQUEST(cx);
+ ok = LookupUCProperty(cx, obj, name, namelen, &obj2, &prop);
+ if (ok)
+ *vp = LookupResult(cx, obj, obj2, prop);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *vp)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_GET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), vp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *vp)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_SET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), vp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DeleteUCProperty2(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *rval)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0);
+ if (!atom)
+ return JS_FALSE;
+ return OBJ_DELETE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), rval);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_NewArrayObject(JSContext *cx, jsint length, jsval *vector)
+{
+ CHECK_REQUEST(cx);
+ /* NB: jsuint cast does ToUint32. */
+ return js_NewArrayObject(cx, (jsuint)length, vector);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsArrayObject(JSContext *cx, JSObject *obj)
+{
+ return OBJ_GET_CLASS(cx, obj) == &js_ArrayClass;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetArrayLength(JSContext *cx, JSObject *obj, jsuint *lengthp)
+{
+ CHECK_REQUEST(cx);
+ return js_GetLengthProperty(cx, obj, lengthp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetArrayLength(JSContext *cx, JSObject *obj, jsuint length)
+{
+ CHECK_REQUEST(cx);
+ return js_SetLengthProperty(cx, obj, length);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_HasArrayLength(JSContext *cx, JSObject *obj, jsuint *lengthp)
+{
+ CHECK_REQUEST(cx);
+ return js_HasLengthProperty(cx, obj, lengthp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefineElement(JSContext *cx, JSObject *obj, jsint index, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs)
+{
+ CHECK_REQUEST(cx);
+ return OBJ_DEFINE_PROPERTY(cx, obj, INT_TO_JSID(index), value,
+ getter, setter, attrs, NULL);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_AliasElement(JSContext *cx, JSObject *obj, const char *name, jsint alias)
+{
+ JSObject *obj2;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ if (!LookupProperty(cx, obj, name, &obj2, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ js_ReportIsNotDefined(cx, name);
+ return JS_FALSE;
+ }
+ if (obj2 != obj || !OBJ_IS_NATIVE(obj)) {
+ char numBuf[12];
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ JS_snprintf(numBuf, sizeof numBuf, "%ld", (long)alias);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_ALIAS,
+ numBuf, name, OBJ_GET_CLASS(cx, obj2)->name);
+ return JS_FALSE;
+ }
+ sprop = (JSScopeProperty *)prop;
+ ok = (js_AddNativeProperty(cx, obj, INT_TO_JSID(alias),
+ sprop->getter, sprop->setter, sprop->slot,
+ sprop->attrs, sprop->flags | SPROP_IS_ALIAS,
+ sprop->shortid)
+ != NULL);
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_HasElement(JSContext *cx, JSObject *obj, jsint index, JSBool *foundp)
+{
+ JSBool ok;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ CHECK_REQUEST(cx);
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, INT_TO_JSID(index), &obj2, &prop);
+ if (ok) {
+ *foundp = (prop != NULL);
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ }
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_LookupElement(JSContext *cx, JSObject *obj, jsint index, jsval *vp)
+{
+ JSBool ok;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ CHECK_REQUEST(cx);
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, INT_TO_JSID(index), &obj2, &prop);
+ if (ok)
+ *vp = LookupResult(cx, obj, obj2, prop);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetElement(JSContext *cx, JSObject *obj, jsint index, jsval *vp)
+{
+ CHECK_REQUEST(cx);
+ return OBJ_GET_PROPERTY(cx, obj, INT_TO_JSID(index), vp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetElement(JSContext *cx, JSObject *obj, jsint index, jsval *vp)
+{
+ CHECK_REQUEST(cx);
+ return OBJ_SET_PROPERTY(cx, obj, INT_TO_JSID(index), vp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DeleteElement(JSContext *cx, JSObject *obj, jsint index)
+{
+ jsval junk;
+
+ CHECK_REQUEST(cx);
+ return JS_DeleteElement2(cx, obj, index, &junk);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DeleteElement2(JSContext *cx, JSObject *obj, jsint index, jsval *rval)
+{
+ CHECK_REQUEST(cx);
+ return OBJ_DELETE_PROPERTY(cx, obj, INT_TO_JSID(index), rval);
+}
+
+JS_PUBLIC_API(void)
+JS_ClearScope(JSContext *cx, JSObject *obj)
+{
+ CHECK_REQUEST(cx);
+
+ if (obj->map->ops->clear)
+ obj->map->ops->clear(cx, obj);
+
+ /* Clear cached class objects on the global object. */
+ if (JS_GET_CLASS(cx, obj)->flags & JSCLASS_IS_GLOBAL) {
+ JSProtoKey key;
+
+ for (key = JSProto_Null; key < JSProto_LIMIT; key++)
+ JS_SetReservedSlot(cx, obj, key, JSVAL_VOID);
+ }
+}
+
+JS_PUBLIC_API(JSIdArray *)
+JS_Enumerate(JSContext *cx, JSObject *obj)
+{
+ jsint i, n;
+ jsval iter_state, num_properties;
+ jsid id;
+ JSIdArray *ida;
+ jsval *vector;
+
+ CHECK_REQUEST(cx);
+
+ ida = NULL;
+ iter_state = JSVAL_NULL;
+
+ /* Get the number of properties to enumerate. */
+ if (!OBJ_ENUMERATE(cx, obj, JSENUMERATE_INIT, &iter_state, &num_properties))
+ goto error;
+ if (!JSVAL_IS_INT(num_properties)) {
+ JS_ASSERT(0);
+ goto error;
+ }
+
+ /* Grow as needed if we don't know the exact amount ahead of time. */
+ n = JSVAL_TO_INT(num_properties);
+ if (n <= 0)
+ n = 8;
+
+ /* Create an array of jsids large enough to hold all the properties */
+ ida = js_NewIdArray(cx, n);
+ if (!ida)
+ goto error;
+
+ i = 0;
+ vector = &ida->vector[0];
+ for (;;) {
+ if (!OBJ_ENUMERATE(cx, obj, JSENUMERATE_NEXT, &iter_state, &id))
+ goto error;
+
+ /* No more jsid's to enumerate ? */
+ if (iter_state == JSVAL_NULL)
+ break;
+
+ if (i == ida->length) {
+ ida = js_SetIdArrayLength(cx, ida, ida->length * 2);
+ if (!ida)
+ goto error;
+ vector = &ida->vector[0];
+ }
+ vector[i++] = id;
+ }
+ return js_SetIdArrayLength(cx, ida, i);
+
+error:
+ if (iter_state != JSVAL_NULL)
+ OBJ_ENUMERATE(cx, obj, JSENUMERATE_DESTROY, &iter_state, 0);
+ if (ida)
+ JS_DestroyIdArray(cx, ida);
+ return NULL;
+}
+
+/*
+ * XXX reverse iterator for properties, unreverse and meld with jsinterp.c's
+ * prop_iterator_class somehow...
+ * + preserve the OBJ_ENUMERATE API while optimizing the native object case
+ * + native case here uses a JSScopeProperty *, but that iterates in reverse!
+ * + so we make non-native match, by reverse-iterating after JS_Enumerating
+ */
+#define JSSLOT_ITER_INDEX (JSSLOT_PRIVATE + 1)
+
+#if JSSLOT_ITER_INDEX >= JS_INITIAL_NSLOTS
+# error "JSSLOT_ITER_INDEX botch!"
+#endif
+
+static void
+prop_iter_finalize(JSContext *cx, JSObject *obj)
+{
+ jsval v;
+ jsint i;
+ JSIdArray *ida;
+
+ v = GC_AWARE_GET_SLOT(cx, obj, JSSLOT_ITER_INDEX);
+ if (JSVAL_IS_VOID(v))
+ return;
+
+ i = JSVAL_TO_INT(v);
+ if (i >= 0) {
+ /* Non-native case: destroy the ida enumerated when obj was created. */
+ ida = (JSIdArray *) JS_GetPrivate(cx, obj);
+ if (ida)
+ JS_DestroyIdArray(cx, ida);
+ }
+}
+
+static uint32
+prop_iter_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ jsval v;
+ jsint i, n;
+ JSScopeProperty *sprop;
+ JSIdArray *ida;
+ jsid id;
+
+ v = GC_AWARE_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ JS_ASSERT(!JSVAL_IS_VOID(v));
+
+ i = JSVAL_TO_INT(OBJ_GET_SLOT(cx, obj, JSSLOT_ITER_INDEX));
+ if (i < 0) {
+ /* Native case: just mark the next property to visit. */
+ sprop = (JSScopeProperty *) JSVAL_TO_PRIVATE(v);
+ if (sprop)
+ MARK_SCOPE_PROPERTY(cx, sprop);
+ } else {
+ /* Non-native case: mark each id in the JSIdArray private. */
+ ida = (JSIdArray *) JSVAL_TO_PRIVATE(v);
+ for (i = 0, n = ida->length; i < n; i++) {
+ id = ida->vector[i];
+ MARK_ID(cx, id);
+ }
+ }
+ return 0;
+}
+
+static JSClass prop_iter_class = {
+ "PropertyIterator",
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_RESERVED_SLOTS(1),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, prop_iter_finalize,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, prop_iter_mark, NULL
+};
+
+JS_PUBLIC_API(JSObject *)
+JS_NewPropertyIterator(JSContext *cx, JSObject *obj)
+{
+ JSObject *iterobj;
+ JSScope *scope;
+ void *pdata;
+ jsint index;
+ JSIdArray *ida;
+
+ CHECK_REQUEST(cx);
+ iterobj = js_NewObject(cx, &prop_iter_class, NULL, obj);
+ if (!iterobj)
+ return NULL;
+
+ if (OBJ_IS_NATIVE(obj)) {
+ /* Native case: start with the last property in obj's own scope. */
+ scope = OBJ_SCOPE(obj);
+ pdata = (scope->object == obj) ? scope->lastProp : NULL;
+ index = -1;
+ } else {
+ JSTempValueRooter tvr;
+
+ /*
+ * Non-native case: enumerate a JSIdArray and keep it via private.
+ *
+ * Note: we have to make sure that we root obj around the call to
+ * JS_Enumerate to protect against multiple allocations under it.
+ */
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, OBJECT_TO_JSVAL(iterobj), &tvr);
+ ida = JS_Enumerate(cx, obj);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ if (!ida)
+ goto bad;
+ pdata = ida;
+ index = ida->length;
+ }
+
+ /* iterobj can not escape to other threads here. */
+ iterobj->slots[JSSLOT_PRIVATE] = PRIVATE_TO_JSVAL(pdata);
+ iterobj->slots[JSSLOT_ITER_INDEX] = INT_TO_JSVAL(index);
+ return iterobj;
+
+ bad:
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_NextProperty(JSContext *cx, JSObject *iterobj, jsid *idp)
+{
+ jsint i;
+ JSObject *obj;
+ JSScope *scope;
+ JSScopeProperty *sprop;
+ JSIdArray *ida;
+
+ CHECK_REQUEST(cx);
+ i = JSVAL_TO_INT(OBJ_GET_SLOT(cx, iterobj, JSSLOT_ITER_INDEX));
+ if (i < 0) {
+ /* Native case: private data is a property tree node pointer. */
+ obj = OBJ_GET_PARENT(cx, iterobj);
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ scope = OBJ_SCOPE(obj);
+ JS_ASSERT(scope->object == obj);
+ sprop = (JSScopeProperty *) JS_GetPrivate(cx, iterobj);
+
+ /*
+ * If the next property mapped by scope in the property tree ancestor
+ * line is not enumerable, or it's an alias, or one or more properties
+ * were deleted from the "middle" of the scope-mapped ancestor line
+ * and the next property was among those deleted, skip it and keep on
+ * trying to find an enumerable property that is still in scope.
+ */
+ while (sprop &&
+ (!(sprop->attrs & JSPROP_ENUMERATE) ||
+ (sprop->flags & SPROP_IS_ALIAS) ||
+ (SCOPE_HAD_MIDDLE_DELETE(scope) &&
+ !SCOPE_HAS_PROPERTY(scope, sprop)))) {
+ sprop = sprop->parent;
+ }
+
+ if (!sprop) {
+ *idp = JSVAL_VOID;
+ } else {
+ if (!JS_SetPrivate(cx, iterobj, sprop->parent))
+ return JS_FALSE;
+ *idp = sprop->id;
+ }
+ } else {
+ /* Non-native case: use the ida enumerated when iterobj was created. */
+ ida = (JSIdArray *) JS_GetPrivate(cx, iterobj);
+ JS_ASSERT(i <= ida->length);
+ if (i == 0) {
+ *idp = JSVAL_VOID;
+ } else {
+ *idp = ida->vector[--i];
+ OBJ_SET_SLOT(cx, iterobj, JSSLOT_ITER_INDEX, INT_TO_JSVAL(i));
+ }
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode,
+ jsval *vp, uintN *attrsp)
+{
+ CHECK_REQUEST(cx);
+ return OBJ_CHECK_ACCESS(cx, obj, id, mode, vp, attrsp);
+}
+
+JS_PUBLIC_API(JSCheckAccessOp)
+JS_SetCheckObjectAccessCallback(JSRuntime *rt, JSCheckAccessOp acb)
+{
+ JSCheckAccessOp oldacb;
+
+ oldacb = rt->checkObjectAccess;
+ rt->checkObjectAccess = acb;
+ return oldacb;
+}
+
+static JSBool
+ReservedSlotIndexOK(JSContext *cx, JSObject *obj, JSClass *clasp,
+ uint32 index, uint32 limit)
+{
+ /* Check the computed, possibly per-instance, upper bound. */
+ if (clasp->reserveSlots)
+ JS_LOCK_OBJ_VOID(cx, obj, limit += clasp->reserveSlots(cx, obj));
+ if (index >= limit) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_RESERVED_SLOT_RANGE);
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, jsval *vp)
+{
+ JSClass *clasp;
+ uint32 limit, slot;
+
+ CHECK_REQUEST(cx);
+ clasp = OBJ_GET_CLASS(cx, obj);
+ limit = JSCLASS_RESERVED_SLOTS(clasp);
+ if (index >= limit && !ReservedSlotIndexOK(cx, obj, clasp, index, limit))
+ return JS_FALSE;
+ slot = JSSLOT_START(clasp) + index;
+ *vp = OBJ_GET_REQUIRED_SLOT(cx, obj, slot);
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, jsval v)
+{
+ JSClass *clasp;
+ uint32 limit, slot;
+
+ CHECK_REQUEST(cx);
+ clasp = OBJ_GET_CLASS(cx, obj);
+ limit = JSCLASS_RESERVED_SLOTS(clasp);
+ if (index >= limit && !ReservedSlotIndexOK(cx, obj, clasp, index, limit))
+ return JS_FALSE;
+ slot = JSSLOT_START(clasp) + index;
+ return OBJ_SET_REQUIRED_SLOT(cx, obj, slot, v);
+}
+
+#ifdef JS_THREADSAFE
+JS_PUBLIC_API(jsrefcount)
+JS_HoldPrincipals(JSContext *cx, JSPrincipals *principals)
+{
+ return JS_ATOMIC_INCREMENT(&principals->refcount);
+}
+
+JS_PUBLIC_API(jsrefcount)
+JS_DropPrincipals(JSContext *cx, JSPrincipals *principals)
+{
+ jsrefcount rc = JS_ATOMIC_DECREMENT(&principals->refcount);
+ if (rc == 0)
+ principals->destroy(cx, principals);
+ return rc;
+}
+#endif
+
+JS_PUBLIC_API(JSPrincipalsTranscoder)
+JS_SetPrincipalsTranscoder(JSRuntime *rt, JSPrincipalsTranscoder px)
+{
+ JSPrincipalsTranscoder oldpx;
+
+ oldpx = rt->principalsTranscoder;
+ rt->principalsTranscoder = px;
+ return oldpx;
+}
+
+JS_PUBLIC_API(JSObjectPrincipalsFinder)
+JS_SetObjectPrincipalsFinder(JSRuntime *rt, JSObjectPrincipalsFinder fop)
+{
+ JSObjectPrincipalsFinder oldfop;
+
+ oldfop = rt->findObjectPrincipals;
+ rt->findObjectPrincipals = fop;
+ return oldfop;
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_NewFunction(JSContext *cx, JSNative native, uintN nargs, uintN flags,
+ JSObject *parent, const char *name)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+
+ if (!name) {
+ atom = NULL;
+ } else {
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return NULL;
+ }
+ return js_NewFunction(cx, NULL, native, nargs, flags, parent, atom);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_CloneFunctionObject(JSContext *cx, JSObject *funobj, JSObject *parent)
+{
+ CHECK_REQUEST(cx);
+ if (OBJ_GET_CLASS(cx, funobj) != &js_FunctionClass) {
+ /* Indicate we cannot clone this object. */
+ return funobj;
+ }
+ return js_CloneFunctionObject(cx, funobj, parent);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetFunctionObject(JSFunction *fun)
+{
+ return fun->object;
+}
+
+JS_PUBLIC_API(const char *)
+JS_GetFunctionName(JSFunction *fun)
+{
+ return fun->atom
+ ? JS_GetStringBytes(ATOM_TO_STRING(fun->atom))
+ : js_anonymous_str;
+}
+
+JS_PUBLIC_API(JSString *)
+JS_GetFunctionId(JSFunction *fun)
+{
+ return fun->atom ? ATOM_TO_STRING(fun->atom) : NULL;
+}
+
+JS_PUBLIC_API(uintN)
+JS_GetFunctionFlags(JSFunction *fun)
+{
+#ifdef MOZILLA_1_8_BRANCH
+ uintN flags = fun->flags;
+
+ return JSFUN_DISJOINT_FLAGS(flags) |
+ (JSFUN_GETTER_TEST(flags) ? JSFUN_GETTER : 0) |
+ (JSFUN_SETTER_TEST(flags) ? JSFUN_SETTER : 0) |
+ (JSFUN_BOUND_METHOD_TEST(flags) ? JSFUN_BOUND_METHOD : 0) |
+ (JSFUN_HEAVYWEIGHT_TEST(flags) ? JSFUN_HEAVYWEIGHT : 0);
+#else
+ return fun->flags;
+#endif
+}
+
+JS_PUBLIC_API(uint16)
+JS_GetFunctionArity(JSFunction *fun)
+{
+ return fun->nargs;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ObjectIsFunction(JSContext *cx, JSObject *obj)
+{
+ return OBJ_GET_CLASS(cx, obj) == &js_FunctionClass;
+}
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+js_generic_native_method_dispatcher(JSContext *cx, JSObject *obj,
+ uintN argc, jsval *argv, jsval *rval)
+{
+ jsval fsv;
+ JSFunctionSpec *fs;
+ JSObject *tmp;
+
+ if (!JS_GetReservedSlot(cx, JSVAL_TO_OBJECT(argv[-2]), 0, &fsv))
+ return JS_FALSE;
+ fs = (JSFunctionSpec *) JSVAL_TO_PRIVATE(fsv);
+
+ /*
+ * We know that argv[0] is valid because JS_DefineFunctions, which is our
+ * only (indirect) referrer, defined us as requiring at least one argument
+ * (notice how it passes fs->nargs + 1 as the next-to-last argument to
+ * JS_DefineFunction).
+ */
+ if (JSVAL_IS_PRIMITIVE(argv[0])) {
+ /*
+ * Make sure that this is an object or null, as required by the generic
+ * functions.
+ */
+ if (!js_ValueToObject(cx, argv[0], &tmp))
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(tmp);
+ }
+
+ /*
+ * Copy all actual (argc) and required but missing (fs->nargs + 1 - argc)
+ * args down over our |this| parameter, argv[-1], which is almost always
+ * the class constructor object, e.g. Array. Then call the corresponding
+ * prototype native method with our first argument passed as |this|.
+ */
+ memmove(argv - 1, argv, JS_MAX(fs->nargs + 1U, argc) * sizeof(jsval));
+
+ /*
+ * Follow Function.prototype.apply and .call by using the global object as
+ * the 'this' param if no args.
+ */
+ JS_ASSERT(cx->fp->argv == argv);
+ tmp = js_ComputeThis(cx, JSVAL_TO_OBJECT(argv[-1]), argv);
+ if (!tmp)
+ return JS_FALSE;
+ cx->fp->thisp = tmp;
+
+ /*
+ * Protect against argc - 1 underflowing below. By calling js_ComputeThis,
+ * we made it as if the static was called with one parameter.
+ */
+ if (argc == 0)
+ argc = 1;
+
+ return fs->call(cx, JSVAL_TO_OBJECT(argv[-1]), argc - 1, argv, rval);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DefineFunctions(JSContext *cx, JSObject *obj, JSFunctionSpec *fs)
+{
+ uintN flags;
+ JSObject *ctor;
+ JSFunction *fun;
+
+ CHECK_REQUEST(cx);
+ ctor = NULL;
+ for (; fs->name; fs++) {
+
+ /* High bits of fs->extra are reserved. */
+ JS_ASSERT((fs->extra & 0xFFFF0000) == 0);
+ flags = fs->flags;
+
+ /*
+ * Define a generic arity N+1 static method for the arity N prototype
+ * method if flags contains JSFUN_GENERIC_NATIVE.
+ */
+ if (flags & JSFUN_GENERIC_NATIVE) {
+ if (!ctor) {
+ ctor = JS_GetConstructor(cx, obj);
+ if (!ctor)
+ return JS_FALSE;
+ }
+
+ flags &= ~JSFUN_GENERIC_NATIVE;
+ fun = JS_DefineFunction(cx, ctor, fs->name,
+ js_generic_native_method_dispatcher,
+ fs->nargs + 1, flags);
+ if (!fun)
+ return JS_FALSE;
+ fun->u.n.extra = (uint16)fs->extra;
+
+ /*
+ * As jsapi.h notes, fs must point to storage that lives as long
+ * as fun->object lives.
+ */
+ if (!JS_SetReservedSlot(cx, fun->object, 0, PRIVATE_TO_JSVAL(fs)))
+ return JS_FALSE;
+ }
+
+ fun = JS_DefineFunction(cx, obj, fs->name, fs->call, fs->nargs, flags);
+ if (!fun)
+ return JS_FALSE;
+ fun->u.n.extra = (uint16)fs->extra;
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_DefineFunction(JSContext *cx, JSObject *obj, const char *name, JSNative call,
+ uintN nargs, uintN attrs)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return NULL;
+ return js_DefineFunction(cx, obj, atom, call, nargs, attrs);
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_DefineUCFunction(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen, JSNative call,
+ uintN nargs, uintN attrs)
+{
+ JSAtom *atom;
+
+ atom = js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0);
+ if (!atom)
+ return NULL;
+ return js_DefineFunction(cx, obj, atom, call, nargs, attrs);
+}
+
+static JSScript *
+CompileTokenStream(JSContext *cx, JSObject *obj, JSTokenStream *ts,
+ void *tempMark, JSBool *eofp)
+{
+ JSBool eof;
+ JSArenaPool codePool, notePool;
+ JSCodeGenerator cg;
+ JSScript *script;
+
+ CHECK_REQUEST(cx);
+ eof = JS_FALSE;
+ JS_InitArenaPool(&codePool, "code", 1024, sizeof(jsbytecode));
+ JS_InitArenaPool(&notePool, "note", 1024, sizeof(jssrcnote));
+ if (!js_InitCodeGenerator(cx, &cg, &codePool, &notePool,
+ ts->filename, ts->lineno,
+ ts->principals)) {
+ script = NULL;
+ } else if (!js_CompileTokenStream(cx, obj, ts, &cg)) {
+ script = NULL;
+ eof = (ts->flags & TSF_EOF) != 0;
+ } else {
+ script = js_NewScriptFromCG(cx, &cg, NULL);
+ }
+ if (eofp)
+ *eofp = eof;
+ if (!js_CloseTokenStream(cx, ts)) {
+ if (script)
+ js_DestroyScript(cx, script);
+ script = NULL;
+ }
+ cg.tempMark = tempMark;
+ js_FinishCodeGenerator(cx, &cg);
+ JS_FinishArenaPool(&codePool);
+ JS_FinishArenaPool(&notePool);
+ return script;
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_CompileScript(JSContext *cx, JSObject *obj,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno)
+{
+ jschar *chars;
+ JSScript *script;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return NULL;
+ script = JS_CompileUCScript(cx, obj, chars, length, filename, lineno);
+ JS_free(cx, chars);
+ return script;
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_CompileScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno)
+{
+ jschar *chars;
+ JSScript *script;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return NULL;
+ script = JS_CompileUCScriptForPrincipals(cx, obj, principals,
+ chars, length, filename, lineno);
+ JS_free(cx, chars);
+ return script;
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_CompileUCScript(JSContext *cx, JSObject *obj,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno)
+{
+ CHECK_REQUEST(cx);
+ return JS_CompileUCScriptForPrincipals(cx, obj, NULL, chars, length,
+ filename, lineno);
+}
+
+#define LAST_FRAME_EXCEPTION_CHECK(cx,result) \
+ JS_BEGIN_MACRO \
+ if (!(result) && !((cx)->options & JSOPTION_DONT_REPORT_UNCAUGHT)) \
+ js_ReportUncaughtException(cx); \
+ JS_END_MACRO
+
+#define LAST_FRAME_CHECKS(cx,result) \
+ JS_BEGIN_MACRO \
+ if (!(cx)->fp) { \
+ (cx)->weakRoots.lastInternalResult = JSVAL_NULL; \
+ LAST_FRAME_EXCEPTION_CHECK(cx, result); \
+ } \
+ JS_END_MACRO
+
+JS_PUBLIC_API(JSScript *)
+JS_CompileUCScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno)
+{
+ void *mark;
+ JSTokenStream *ts;
+ JSScript *script;
+
+ CHECK_REQUEST(cx);
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ ts = js_NewTokenStream(cx, chars, length, filename, lineno, principals);
+ if (!ts)
+ return NULL;
+ script = CompileTokenStream(cx, obj, ts, mark, NULL);
+ LAST_FRAME_CHECKS(cx, script);
+ return script;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_BufferIsCompilableUnit(JSContext *cx, JSObject *obj,
+ const char *bytes, size_t length)
+{
+ jschar *chars;
+ JSBool result;
+ JSExceptionState *exnState;
+ void *tempMark;
+ JSTokenStream *ts;
+ JSErrorReporter older;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return JS_TRUE;
+
+ /*
+ * Return true on any out-of-memory error, so our caller doesn't try to
+ * collect more buffered source.
+ */
+ result = JS_TRUE;
+ exnState = JS_SaveExceptionState(cx);
+ tempMark = JS_ARENA_MARK(&cx->tempPool);
+ ts = js_NewTokenStream(cx, chars, length, NULL, 0, NULL);
+ if (ts) {
+ older = JS_SetErrorReporter(cx, NULL);
+ if (!js_ParseTokenStream(cx, obj, ts) &&
+ (ts->flags & TSF_UNEXPECTED_EOF)) {
+ /*
+ * We ran into an error. If it was because we ran out of source,
+ * we return false, so our caller will know to try to collect more
+ * buffered source.
+ */
+ result = JS_FALSE;
+ }
+
+ JS_SetErrorReporter(cx, older);
+ js_CloseTokenStream(cx, ts);
+ JS_ARENA_RELEASE(&cx->tempPool, tempMark);
+ }
+
+ JS_free(cx, chars);
+ JS_RestoreExceptionState(cx, exnState);
+ return result;
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_CompileFile(JSContext *cx, JSObject *obj, const char *filename)
+{
+ void *mark;
+ JSTokenStream *ts;
+ JSScript *script;
+
+ CHECK_REQUEST(cx);
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ ts = js_NewFileTokenStream(cx, filename, stdin);
+ if (!ts)
+ return NULL;
+ script = CompileTokenStream(cx, obj, ts, mark, NULL);
+ LAST_FRAME_CHECKS(cx, script);
+ return script;
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_CompileFileHandle(JSContext *cx, JSObject *obj, const char *filename,
+ FILE *file)
+{
+ return JS_CompileFileHandleForPrincipals(cx, obj, filename, file, NULL);
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_CompileFileHandleForPrincipals(JSContext *cx, JSObject *obj,
+ const char *filename, FILE *file,
+ JSPrincipals *principals)
+{
+ void *mark;
+ JSTokenStream *ts;
+ JSScript *script;
+
+ CHECK_REQUEST(cx);
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ ts = js_NewFileTokenStream(cx, NULL, file);
+ if (!ts)
+ return NULL;
+ ts->filename = filename;
+ /* XXXshaver js_NewFileTokenStream should do this, because it drops */
+ if (principals) {
+ ts->principals = principals;
+ JSPRINCIPALS_HOLD(cx, ts->principals);
+ }
+ script = CompileTokenStream(cx, obj, ts, mark, NULL);
+ LAST_FRAME_CHECKS(cx, script);
+ return script;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_NewScriptObject(JSContext *cx, JSScript *script)
+{
+ JSObject *obj;
+
+ obj = js_NewObject(cx, &js_ScriptClass, NULL, NULL);
+ if (!obj)
+ return NULL;
+
+ if (script) {
+ if (!JS_SetPrivate(cx, obj, script))
+ return NULL;
+ script->object = obj;
+ }
+ return obj;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetScriptObject(JSScript *script)
+{
+ return script->object;
+}
+
+JS_PUBLIC_API(void)
+JS_DestroyScript(JSContext *cx, JSScript *script)
+{
+ CHECK_REQUEST(cx);
+ js_DestroyScript(cx, script);
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_CompileFunction(JSContext *cx, JSObject *obj, const char *name,
+ uintN nargs, const char **argnames,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno)
+{
+ jschar *chars;
+ JSFunction *fun;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return NULL;
+ fun = JS_CompileUCFunction(cx, obj, name, nargs, argnames, chars, length,
+ filename, lineno);
+ JS_free(cx, chars);
+ return fun;
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_CompileFunctionForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals, const char *name,
+ uintN nargs, const char **argnames,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno)
+{
+ jschar *chars;
+ JSFunction *fun;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return NULL;
+ fun = JS_CompileUCFunctionForPrincipals(cx, obj, principals, name,
+ nargs, argnames, chars, length,
+ filename, lineno);
+ JS_free(cx, chars);
+ return fun;
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_CompileUCFunction(JSContext *cx, JSObject *obj, const char *name,
+ uintN nargs, const char **argnames,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno)
+{
+ CHECK_REQUEST(cx);
+ return JS_CompileUCFunctionForPrincipals(cx, obj, NULL, name,
+ nargs, argnames,
+ chars, length,
+ filename, lineno);
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_CompileUCFunctionForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals, const char *name,
+ uintN nargs, const char **argnames,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno)
+{
+ void *mark;
+ JSTokenStream *ts;
+ JSFunction *fun;
+ JSAtom *funAtom, *argAtom;
+ uintN i;
+
+ CHECK_REQUEST(cx);
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ ts = js_NewTokenStream(cx, chars, length, filename, lineno, principals);
+ if (!ts) {
+ fun = NULL;
+ goto out;
+ }
+ if (!name) {
+ funAtom = NULL;
+ } else {
+ funAtom = js_Atomize(cx, name, strlen(name), 0);
+ if (!funAtom) {
+ fun = NULL;
+ goto out;
+ }
+ }
+ fun = js_NewFunction(cx, NULL, NULL, nargs, 0, obj, funAtom);
+ if (!fun)
+ goto out;
+ if (nargs) {
+ for (i = 0; i < nargs; i++) {
+ argAtom = js_Atomize(cx, argnames[i], strlen(argnames[i]), 0);
+ if (!argAtom)
+ break;
+ if (!js_AddHiddenProperty(cx, fun->object, ATOM_TO_JSID(argAtom),
+ js_GetArgument, js_SetArgument,
+ SPROP_INVALID_SLOT,
+ JSPROP_PERMANENT | JSPROP_SHARED,
+ SPROP_HAS_SHORTID, i)) {
+ break;
+ }
+ }
+ if (i < nargs) {
+ fun = NULL;
+ goto out;
+ }
+ }
+ if (!js_CompileFunctionBody(cx, ts, fun)) {
+ fun = NULL;
+ goto out;
+ }
+ if (obj && funAtom) {
+ if (!OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(funAtom),
+ OBJECT_TO_JSVAL(fun->object),
+ NULL, NULL, JSPROP_ENUMERATE, NULL)) {
+ return NULL;
+ }
+ }
+out:
+ if (ts)
+ js_CloseTokenStream(cx, ts);
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ LAST_FRAME_CHECKS(cx, fun);
+ return fun;
+}
+
+JS_PUBLIC_API(JSString *)
+JS_DecompileScript(JSContext *cx, JSScript *script, const char *name,
+ uintN indent)
+{
+ JSPrinter *jp;
+ JSString *str;
+
+ CHECK_REQUEST(cx);
+ jp = js_NewPrinter(cx, name,
+ indent & ~JS_DONT_PRETTY_PRINT,
+ !(indent & JS_DONT_PRETTY_PRINT));
+ if (!jp)
+ return NULL;
+ if (js_DecompileScript(jp, script))
+ str = js_GetPrinterOutput(jp);
+ else
+ str = NULL;
+ js_DestroyPrinter(jp);
+ return str;
+}
+
+JS_PUBLIC_API(JSString *)
+JS_DecompileFunction(JSContext *cx, JSFunction *fun, uintN indent)
+{
+ JSPrinter *jp;
+ JSString *str;
+
+ CHECK_REQUEST(cx);
+ jp = js_NewPrinter(cx, JS_GetFunctionName(fun),
+ indent & ~JS_DONT_PRETTY_PRINT,
+ !(indent & JS_DONT_PRETTY_PRINT));
+ if (!jp)
+ return NULL;
+ if (js_DecompileFunction(jp, fun))
+ str = js_GetPrinterOutput(jp);
+ else
+ str = NULL;
+ js_DestroyPrinter(jp);
+ return str;
+}
+
+JS_PUBLIC_API(JSString *)
+JS_DecompileFunctionBody(JSContext *cx, JSFunction *fun, uintN indent)
+{
+ JSPrinter *jp;
+ JSString *str;
+
+ CHECK_REQUEST(cx);
+ jp = js_NewPrinter(cx, JS_GetFunctionName(fun),
+ indent & ~JS_DONT_PRETTY_PRINT,
+ !(indent & JS_DONT_PRETTY_PRINT));
+ if (!jp)
+ return NULL;
+ if (js_DecompileFunctionBody(jp, fun))
+ str = js_GetPrinterOutput(jp);
+ else
+ str = NULL;
+ js_DestroyPrinter(jp);
+ return str;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ExecuteScript(JSContext *cx, JSObject *obj, JSScript *script, jsval *rval)
+{
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ ok = js_Execute(cx, obj, script, NULL, 0, rval);
+ LAST_FRAME_CHECKS(cx, ok);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ExecuteScriptPart(JSContext *cx, JSObject *obj, JSScript *script,
+ JSExecPart part, jsval *rval)
+{
+ JSScript tmp;
+ JSRuntime *rt;
+ JSBool ok;
+
+ /* Make a temporary copy of the JSScript structure and farble it a bit. */
+ tmp = *script;
+ if (part == JSEXEC_PROLOG) {
+ tmp.length = PTRDIFF(tmp.main, tmp.code, jsbytecode);
+ } else {
+ tmp.length -= PTRDIFF(tmp.main, tmp.code, jsbytecode);
+ tmp.code = tmp.main;
+ }
+
+ /* Tell the debugger about our temporary copy of the script structure. */
+ rt = cx->runtime;
+ if (rt->newScriptHook) {
+ rt->newScriptHook(cx, tmp.filename, tmp.lineno, &tmp, NULL,
+ rt->newScriptHookData);
+ }
+
+ /* Execute the farbled struct and tell the debugger to forget about it. */
+ ok = JS_ExecuteScript(cx, obj, &tmp, rval);
+ if (rt->destroyScriptHook)
+ rt->destroyScriptHook(cx, &tmp, rt->destroyScriptHookData);
+ return ok;
+}
+
+/* Ancient uintN nbytes is part of API/ABI, so use size_t length local. */
+JS_PUBLIC_API(JSBool)
+JS_EvaluateScript(JSContext *cx, JSObject *obj,
+ const char *bytes, uintN nbytes,
+ const char *filename, uintN lineno,
+ jsval *rval)
+{
+ size_t length = nbytes;
+ jschar *chars;
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return JS_FALSE;
+ ok = JS_EvaluateUCScript(cx, obj, chars, length, filename, lineno, rval);
+ JS_free(cx, chars);
+ return ok;
+}
+
+/* Ancient uintN nbytes is part of API/ABI, so use size_t length local. */
+JS_PUBLIC_API(JSBool)
+JS_EvaluateScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const char *bytes, uintN nbytes,
+ const char *filename, uintN lineno,
+ jsval *rval)
+{
+ size_t length = nbytes;
+ jschar *chars;
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return JS_FALSE;
+ ok = JS_EvaluateUCScriptForPrincipals(cx, obj, principals, chars, length,
+ filename, lineno, rval);
+ JS_free(cx, chars);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_EvaluateUCScript(JSContext *cx, JSObject *obj,
+ const jschar *chars, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval)
+{
+ CHECK_REQUEST(cx);
+ return JS_EvaluateUCScriptForPrincipals(cx, obj, NULL, chars, length,
+ filename, lineno, rval);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_EvaluateUCScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const jschar *chars, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval)
+{
+ uint32 options;
+ JSScript *script;
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ options = cx->options;
+ cx->options = options | JSOPTION_COMPILE_N_GO;
+ script = JS_CompileUCScriptForPrincipals(cx, obj, principals, chars, length,
+ filename, lineno);
+ cx->options = options;
+ if (!script)
+ return JS_FALSE;
+ ok = js_Execute(cx, obj, script, NULL, 0, rval);
+ LAST_FRAME_CHECKS(cx, ok);
+ JS_DestroyScript(cx, script);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_CallFunction(JSContext *cx, JSObject *obj, JSFunction *fun, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ ok = js_InternalCall(cx, obj, OBJECT_TO_JSVAL(fun->object), argc, argv,
+ rval);
+ LAST_FRAME_CHECKS(cx, ok);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_CallFunctionName(JSContext *cx, JSObject *obj, const char *name, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ JSBool ok;
+ jsval fval;
+
+ CHECK_REQUEST(cx);
+#if JS_HAS_XML_SUPPORT
+ if (OBJECT_IS_XML(cx, obj)) {
+ JSXMLObjectOps *ops;
+ JSAtom *atom;
+
+ ops = (JSXMLObjectOps *) obj->map->ops;
+ atom = js_Atomize(cx, name, strlen(name), 0);
+ if (!atom)
+ return JS_FALSE;
+ obj = ops->getMethod(cx, obj, ATOM_TO_JSID(atom), &fval);
+ if (!obj)
+ return JS_FALSE;
+ } else
+#endif
+ if (!JS_GetProperty(cx, obj, name, &fval))
+ return JS_FALSE;
+ ok = js_InternalCall(cx, obj, fval, argc, argv, rval);
+ LAST_FRAME_CHECKS(cx, ok);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_CallFunctionValue(JSContext *cx, JSObject *obj, jsval fval, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ JSBool ok;
+
+ CHECK_REQUEST(cx);
+ ok = js_InternalCall(cx, obj, fval, argc, argv, rval);
+ LAST_FRAME_CHECKS(cx, ok);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBranchCallback)
+JS_SetBranchCallback(JSContext *cx, JSBranchCallback cb)
+{
+ JSBranchCallback oldcb;
+
+ oldcb = cx->branchCallback;
+ cx->branchCallback = cb;
+ return oldcb;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsRunning(JSContext *cx)
+{
+ return cx->fp != NULL;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsConstructing(JSContext *cx)
+{
+ return cx->fp && (cx->fp->flags & JSFRAME_CONSTRUCTING);
+}
+
+JS_FRIEND_API(JSBool)
+JS_IsAssigning(JSContext *cx)
+{
+ JSStackFrame *fp;
+ jsbytecode *pc;
+
+ for (fp = cx->fp; fp && !fp->script; fp = fp->down)
+ continue;
+ if (!fp || !(pc = fp->pc))
+ return JS_FALSE;
+ return (js_CodeSpec[*pc].format & JOF_ASSIGNING) != 0;
+}
+
+JS_PUBLIC_API(void)
+JS_SetCallReturnValue2(JSContext *cx, jsval v)
+{
+#if JS_HAS_LVALUE_RETURN
+ cx->rval2 = v;
+ cx->rval2set = JS_TRUE;
+#endif
+}
+
+JS_PUBLIC_API(JSStackFrame *)
+JS_SaveFrameChain(JSContext *cx)
+{
+ JSStackFrame *fp;
+
+ fp = cx->fp;
+ if (!fp)
+ return fp;
+
+ JS_ASSERT(!fp->dormantNext);
+ fp->dormantNext = cx->dormantFrameChain;
+ cx->dormantFrameChain = fp;
+ cx->fp = NULL;
+ return fp;
+}
+
+JS_PUBLIC_API(void)
+JS_RestoreFrameChain(JSContext *cx, JSStackFrame *fp)
+{
+ JS_ASSERT(!cx->fp);
+ if (!fp)
+ return;
+
+ JS_ASSERT(cx->dormantFrameChain == fp);
+ cx->fp = fp;
+ cx->dormantFrameChain = fp->dormantNext;
+ fp->dormantNext = NULL;
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(JSString *)
+JS_NewString(JSContext *cx, char *bytes, size_t nbytes)
+{
+ size_t length = nbytes;
+ jschar *chars;
+ JSString *str;
+
+ CHECK_REQUEST(cx);
+
+ /* Make a UTF-16 vector from the 8-bit char codes in bytes. */
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return NULL;
+
+ /* Free chars (but not bytes, which caller frees on error) if we fail. */
+ str = js_NewString(cx, chars, length, 0);
+ if (!str) {
+ JS_free(cx, chars);
+ return NULL;
+ }
+
+ /* Hand off bytes to the deflated string cache, if possible. */
+ if (!js_SetStringBytes(cx->runtime, str, bytes, nbytes))
+ JS_free(cx, bytes);
+ return str;
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewStringCopyN(JSContext *cx, const char *s, size_t n)
+{
+ jschar *js;
+ JSString *str;
+
+ CHECK_REQUEST(cx);
+ js = js_InflateString(cx, s, &n);
+ if (!js)
+ return NULL;
+ str = js_NewString(cx, js, n, 0);
+ if (!str)
+ JS_free(cx, js);
+ return str;
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewStringCopyZ(JSContext *cx, const char *s)
+{
+ size_t n;
+ jschar *js;
+ JSString *str;
+
+ CHECK_REQUEST(cx);
+ if (!s)
+ return cx->runtime->emptyString;
+ n = strlen(s);
+ js = js_InflateString(cx, s, &n);
+ if (!js)
+ return NULL;
+ str = js_NewString(cx, js, n, 0);
+ if (!str)
+ JS_free(cx, js);
+ return str;
+}
+
+JS_PUBLIC_API(JSString *)
+JS_InternString(JSContext *cx, const char *s)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_Atomize(cx, s, strlen(s), ATOM_INTERNED);
+ if (!atom)
+ return NULL;
+ return ATOM_TO_STRING(atom);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewUCString(JSContext *cx, jschar *chars, size_t length)
+{
+ CHECK_REQUEST(cx);
+ return js_NewString(cx, chars, length, 0);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewUCStringCopyN(JSContext *cx, const jschar *s, size_t n)
+{
+ CHECK_REQUEST(cx);
+ return js_NewStringCopyN(cx, s, n, 0);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewUCStringCopyZ(JSContext *cx, const jschar *s)
+{
+ CHECK_REQUEST(cx);
+ if (!s)
+ return cx->runtime->emptyString;
+ return js_NewStringCopyZ(cx, s, 0);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_InternUCStringN(JSContext *cx, const jschar *s, size_t length)
+{
+ JSAtom *atom;
+
+ CHECK_REQUEST(cx);
+ atom = js_AtomizeChars(cx, s, length, ATOM_INTERNED);
+ if (!atom)
+ return NULL;
+ return ATOM_TO_STRING(atom);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_InternUCString(JSContext *cx, const jschar *s)
+{
+ return JS_InternUCStringN(cx, s, js_strlen(s));
+}
+
+JS_PUBLIC_API(char *)
+JS_GetStringBytes(JSString *str)
+{
+ JSRuntime *rt;
+ char *bytes;
+
+ rt = js_GetGCStringRuntime(str);
+ bytes = js_GetStringBytes(rt, str);
+ return bytes ? bytes : "";
+}
+
+JS_PUBLIC_API(jschar *)
+JS_GetStringChars(JSString *str)
+{
+ /*
+ * API botch (again, shades of JS_GetStringBytes): we have no cx to pass
+ * to js_UndependString (called by js_GetStringChars) for out-of-memory
+ * error reports, so js_UndependString passes NULL and suppresses errors.
+ * If it fails to convert a dependent string into an independent one, our
+ * caller will not be guaranteed a \u0000 terminator as a backstop. This
+ * may break some clients who already misbehave on embedded NULs.
+ *
+ * The gain of dependent strings, which cure quadratic and cubic growth
+ * rate bugs in string concatenation, is worth this slight loss in API
+ * compatibility.
+ */
+ jschar *chars;
+
+ chars = js_GetStringChars(str);
+ return chars ? chars : JSSTRING_CHARS(str);
+}
+
+JS_PUBLIC_API(size_t)
+JS_GetStringLength(JSString *str)
+{
+ return JSSTRING_LENGTH(str);
+}
+
+JS_PUBLIC_API(intN)
+JS_CompareStrings(JSString *str1, JSString *str2)
+{
+ return js_CompareStrings(str1, str2);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewGrowableString(JSContext *cx, jschar *chars, size_t length)
+{
+ CHECK_REQUEST(cx);
+ return js_NewString(cx, chars, length, GCF_MUTABLE);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_NewDependentString(JSContext *cx, JSString *str, size_t start,
+ size_t length)
+{
+ CHECK_REQUEST(cx);
+ return js_NewDependentString(cx, str, start, length, 0);
+}
+
+JS_PUBLIC_API(JSString *)
+JS_ConcatStrings(JSContext *cx, JSString *left, JSString *right)
+{
+ CHECK_REQUEST(cx);
+ return js_ConcatStrings(cx, left, right);
+}
+
+JS_PUBLIC_API(const jschar *)
+JS_UndependString(JSContext *cx, JSString *str)
+{
+ CHECK_REQUEST(cx);
+ return js_UndependString(cx, str);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_MakeStringImmutable(JSContext *cx, JSString *str)
+{
+ CHECK_REQUEST(cx);
+ if (!js_UndependString(cx, str))
+ return JS_FALSE;
+
+ *js_GetGCThingFlags(str) &= ~GCF_MUTABLE;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_EncodeCharacters(JSContext *cx, const jschar *src, size_t srclen, char *dst,
+ size_t *dstlenp)
+{
+ return js_DeflateStringToBuffer(cx, src, srclen, dst, dstlenp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DecodeBytes(JSContext *cx, const char *src, size_t srclen, jschar *dst,
+ size_t *dstlenp)
+{
+ return js_InflateStringToBuffer(cx, src, srclen, dst, dstlenp);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_CStringsAreUTF8()
+{
+#ifdef JS_C_STRINGS_ARE_UTF8
+ return JS_TRUE;
+#else
+ return JS_FALSE;
+#endif
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(void)
+JS_ReportError(JSContext *cx, const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ js_ReportErrorVA(cx, JSREPORT_ERROR, format, ap);
+ va_end(ap);
+}
+
+JS_PUBLIC_API(void)
+JS_ReportErrorNumber(JSContext *cx, JSErrorCallback errorCallback,
+ void *userRef, const uintN errorNumber, ...)
+{
+ va_list ap;
+
+ va_start(ap, errorNumber);
+ js_ReportErrorNumberVA(cx, JSREPORT_ERROR, errorCallback, userRef,
+ errorNumber, JS_TRUE, ap);
+ va_end(ap);
+}
+
+JS_PUBLIC_API(void)
+JS_ReportErrorNumberUC(JSContext *cx, JSErrorCallback errorCallback,
+ void *userRef, const uintN errorNumber, ...)
+{
+ va_list ap;
+
+ va_start(ap, errorNumber);
+ js_ReportErrorNumberVA(cx, JSREPORT_ERROR, errorCallback, userRef,
+ errorNumber, JS_FALSE, ap);
+ va_end(ap);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ReportWarning(JSContext *cx, const char *format, ...)
+{
+ va_list ap;
+ JSBool ok;
+
+ va_start(ap, format);
+ ok = js_ReportErrorVA(cx, JSREPORT_WARNING, format, ap);
+ va_end(ap);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ReportErrorFlagsAndNumber(JSContext *cx, uintN flags,
+ JSErrorCallback errorCallback, void *userRef,
+ const uintN errorNumber, ...)
+{
+ va_list ap;
+ JSBool ok;
+
+ va_start(ap, errorNumber);
+ ok = js_ReportErrorNumberVA(cx, flags, errorCallback, userRef,
+ errorNumber, JS_TRUE, ap);
+ va_end(ap);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ReportErrorFlagsAndNumberUC(JSContext *cx, uintN flags,
+ JSErrorCallback errorCallback, void *userRef,
+ const uintN errorNumber, ...)
+{
+ va_list ap;
+ JSBool ok;
+
+ va_start(ap, errorNumber);
+ ok = js_ReportErrorNumberVA(cx, flags, errorCallback, userRef,
+ errorNumber, JS_FALSE, ap);
+ va_end(ap);
+ return ok;
+}
+
+JS_PUBLIC_API(void)
+JS_ReportOutOfMemory(JSContext *cx)
+{
+ js_ReportOutOfMemory(cx);
+}
+
+JS_PUBLIC_API(JSErrorReporter)
+JS_SetErrorReporter(JSContext *cx, JSErrorReporter er)
+{
+ JSErrorReporter older;
+
+ older = cx->errorReporter;
+ cx->errorReporter = er;
+ return older;
+}
+
+/************************************************************************/
+
+/*
+ * Regular Expressions.
+ */
+JS_PUBLIC_API(JSObject *)
+JS_NewRegExpObject(JSContext *cx, char *bytes, size_t length, uintN flags)
+{
+ jschar *chars;
+ JSObject *obj;
+
+ CHECK_REQUEST(cx);
+ chars = js_InflateString(cx, bytes, &length);
+ if (!chars)
+ return NULL;
+ obj = js_NewRegExpObject(cx, NULL, chars, length, flags);
+ JS_free(cx, chars);
+ return obj;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_NewUCRegExpObject(JSContext *cx, jschar *chars, size_t length, uintN flags)
+{
+ CHECK_REQUEST(cx);
+ return js_NewRegExpObject(cx, NULL, chars, length, flags);
+}
+
+JS_PUBLIC_API(void)
+JS_SetRegExpInput(JSContext *cx, JSString *input, JSBool multiline)
+{
+ JSRegExpStatics *res;
+
+ CHECK_REQUEST(cx);
+ /* No locking required, cx is thread-private and input must be live. */
+ res = &cx->regExpStatics;
+ res->input = input;
+ res->multiline = multiline;
+ cx->runtime->gcPoke = JS_TRUE;
+}
+
+JS_PUBLIC_API(void)
+JS_ClearRegExpStatics(JSContext *cx)
+{
+ JSRegExpStatics *res;
+
+ /* No locking required, cx is thread-private and input must be live. */
+ res = &cx->regExpStatics;
+ res->input = NULL;
+ res->multiline = JS_FALSE;
+ res->parenCount = 0;
+ res->lastMatch = res->lastParen = js_EmptySubString;
+ res->leftContext = res->rightContext = js_EmptySubString;
+ cx->runtime->gcPoke = JS_TRUE;
+}
+
+JS_PUBLIC_API(void)
+JS_ClearRegExpRoots(JSContext *cx)
+{
+ JSRegExpStatics *res;
+
+ /* No locking required, cx is thread-private and input must be live. */
+ res = &cx->regExpStatics;
+ res->input = NULL;
+ cx->runtime->gcPoke = JS_TRUE;
+}
+
+/* TODO: compile, execute, get/set other statics... */
+
+/************************************************************************/
+
+JS_PUBLIC_API(void)
+JS_SetLocaleCallbacks(JSContext *cx, JSLocaleCallbacks *callbacks)
+{
+ cx->localeCallbacks = callbacks;
+}
+
+JS_PUBLIC_API(JSLocaleCallbacks *)
+JS_GetLocaleCallbacks(JSContext *cx)
+{
+ return cx->localeCallbacks;
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(JSBool)
+JS_IsExceptionPending(JSContext *cx)
+{
+ return (JSBool) cx->throwing;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetPendingException(JSContext *cx, jsval *vp)
+{
+ CHECK_REQUEST(cx);
+ if (!cx->throwing)
+ return JS_FALSE;
+ *vp = cx->exception;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(void)
+JS_SetPendingException(JSContext *cx, jsval v)
+{
+ CHECK_REQUEST(cx);
+ cx->throwing = JS_TRUE;
+ cx->exception = v;
+}
+
+JS_PUBLIC_API(void)
+JS_ClearPendingException(JSContext *cx)
+{
+ cx->throwing = JS_FALSE;
+ cx->exception = JSVAL_VOID;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ReportPendingException(JSContext *cx)
+{
+ JSBool save, ok;
+
+ CHECK_REQUEST(cx);
+
+ /*
+ * Set cx->creatingException to suppress the standard error-to-exception
+ * conversion done by all {js,JS}_Report* functions except for OOM. The
+ * cx->creatingException flag was added to suppress recursive divergence
+ * under js_ErrorToException, but it serves for our purposes here too.
+ */
+ save = cx->creatingException;
+ cx->creatingException = JS_TRUE;
+ ok = js_ReportUncaughtException(cx);
+ cx->creatingException = save;
+ return ok;
+}
+
+struct JSExceptionState {
+ JSBool throwing;
+ jsval exception;
+};
+
+JS_PUBLIC_API(JSExceptionState *)
+JS_SaveExceptionState(JSContext *cx)
+{
+ JSExceptionState *state;
+
+ CHECK_REQUEST(cx);
+ state = (JSExceptionState *) JS_malloc(cx, sizeof(JSExceptionState));
+ if (state) {
+ state->throwing = JS_GetPendingException(cx, &state->exception);
+ if (state->throwing && JSVAL_IS_GCTHING(state->exception))
+ js_AddRoot(cx, &state->exception, "JSExceptionState.exception");
+ }
+ return state;
+}
+
+JS_PUBLIC_API(void)
+JS_RestoreExceptionState(JSContext *cx, JSExceptionState *state)
+{
+ CHECK_REQUEST(cx);
+ if (state) {
+ if (state->throwing)
+ JS_SetPendingException(cx, state->exception);
+ else
+ JS_ClearPendingException(cx);
+ JS_DropExceptionState(cx, state);
+ }
+}
+
+JS_PUBLIC_API(void)
+JS_DropExceptionState(JSContext *cx, JSExceptionState *state)
+{
+ CHECK_REQUEST(cx);
+ if (state) {
+ if (state->throwing && JSVAL_IS_GCTHING(state->exception))
+ JS_RemoveRoot(cx, &state->exception);
+ JS_free(cx, state);
+ }
+}
+
+JS_PUBLIC_API(JSErrorReport *)
+JS_ErrorFromException(JSContext *cx, jsval v)
+{
+ CHECK_REQUEST(cx);
+ return js_ErrorFromException(cx, v);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ThrowReportedError(JSContext *cx, const char *message,
+ JSErrorReport *reportp)
+{
+ return js_ErrorToException(cx, message, reportp);
+}
+
+#ifdef JS_THREADSAFE
+/*
+ * Get the owning thread id of a context. Returns 0 if the context is not
+ * owned by any thread.
+ */
+JS_PUBLIC_API(jsword)
+JS_GetContextThread(JSContext *cx)
+{
+ return JS_THREAD_ID(cx);
+}
+
+/*
+ * Set the current thread as the owning thread of a context. Returns the
+ * old owning thread id, or -1 if the operation failed.
+ */
+JS_PUBLIC_API(jsword)
+JS_SetContextThread(JSContext *cx)
+{
+ jsword old = JS_THREAD_ID(cx);
+ if (!js_SetContextThread(cx))
+ return -1;
+ return old;
+}
+
+JS_PUBLIC_API(jsword)
+JS_ClearContextThread(JSContext *cx)
+{
+ jsword old = JS_THREAD_ID(cx);
+ js_ClearContextThread(cx);
+ return old;
+}
+#endif
+
+/************************************************************************/
+
+#if defined(XP_WIN)
+#include <windows.h>
+/*
+ * Initialization routine for the JS DLL...
+ */
+
+/*
+ * Global Instance handle...
+ * In Win32 this is the module handle of the DLL.
+ *
+ * In Win16 this is the instance handle of the application
+ * which loaded the DLL.
+ */
+
+#ifdef _WIN32
+BOOL WINAPI DllMain (HINSTANCE hDLL, DWORD dwReason, LPVOID lpReserved)
+{
+ return TRUE;
+}
+
+#else /* !_WIN32 */
+
+int CALLBACK LibMain( HINSTANCE hInst, WORD wDataSeg,
+ WORD cbHeapSize, LPSTR lpszCmdLine )
+{
+ return TRUE;
+}
+
+BOOL CALLBACK __loadds WEP(BOOL fSystemExit)
+{
+ return TRUE;
+}
+
+#endif /* !_WIN32 */
+#endif /* XP_WIN */
diff --git a/src/third_party/js-1.7/jsapi.h b/src/third_party/js-1.7/jsapi.h
new file mode 100644
index 00000000000..464f19ffc3a
--- /dev/null
+++ b/src/third_party/js-1.7/jsapi.h
@@ -0,0 +1,2220 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsapi_h___
+#define jsapi_h___
+/*
+ * JavaScript API.
+ */
+#include <stddef.h>
+#include <stdio.h>
+#include "jspubtd.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * Type tags stored in the low bits of a jsval.
+ */
+#define JSVAL_OBJECT 0x0 /* untagged reference to object */
+#define JSVAL_INT 0x1 /* tagged 31-bit integer value */
+#define JSVAL_DOUBLE 0x2 /* tagged reference to double */
+#define JSVAL_STRING 0x4 /* tagged reference to string */
+#define JSVAL_BOOLEAN 0x6 /* tagged boolean value */
+
+/* Type tag bitfield length and derived macros. */
+#define JSVAL_TAGBITS 3
+#define JSVAL_TAGMASK JS_BITMASK(JSVAL_TAGBITS)
+#define JSVAL_TAG(v) ((v) & JSVAL_TAGMASK)
+#define JSVAL_SETTAG(v,t) ((v) | (t))
+#define JSVAL_CLRTAG(v) ((v) & ~(jsval)JSVAL_TAGMASK)
+#define JSVAL_ALIGN JS_BIT(JSVAL_TAGBITS)
+
+/* Predicates for type testing. */
+#define JSVAL_IS_OBJECT(v) (JSVAL_TAG(v) == JSVAL_OBJECT)
+#define JSVAL_IS_NUMBER(v) (JSVAL_IS_INT(v) || JSVAL_IS_DOUBLE(v))
+#define JSVAL_IS_INT(v) (((v) & JSVAL_INT) && (v) != JSVAL_VOID)
+#define JSVAL_IS_DOUBLE(v) (JSVAL_TAG(v) == JSVAL_DOUBLE)
+#define JSVAL_IS_STRING(v) (JSVAL_TAG(v) == JSVAL_STRING)
+#define JSVAL_IS_BOOLEAN(v) (JSVAL_TAG(v) == JSVAL_BOOLEAN)
+#define JSVAL_IS_NULL(v) ((v) == JSVAL_NULL)
+#define JSVAL_IS_VOID(v) ((v) == JSVAL_VOID)
+#define JSVAL_IS_PRIMITIVE(v) (!JSVAL_IS_OBJECT(v) || JSVAL_IS_NULL(v))
+
+/* Objects, strings, and doubles are GC'ed. */
+#define JSVAL_IS_GCTHING(v) (!((v) & JSVAL_INT) && !JSVAL_IS_BOOLEAN(v))
+#define JSVAL_TO_GCTHING(v) ((void *)JSVAL_CLRTAG(v))
+#define JSVAL_TO_OBJECT(v) ((JSObject *)JSVAL_TO_GCTHING(v))
+#define JSVAL_TO_DOUBLE(v) ((jsdouble *)JSVAL_TO_GCTHING(v))
+#define JSVAL_TO_STRING(v) ((JSString *)JSVAL_TO_GCTHING(v))
+#define OBJECT_TO_JSVAL(obj) ((jsval)(obj))
+#define DOUBLE_TO_JSVAL(dp) JSVAL_SETTAG((jsval)(dp), JSVAL_DOUBLE)
+#define STRING_TO_JSVAL(str) JSVAL_SETTAG((jsval)(str), JSVAL_STRING)
+
+/* Lock and unlock the GC thing held by a jsval. */
+#define JSVAL_LOCK(cx,v) (JSVAL_IS_GCTHING(v) \
+ ? JS_LockGCThing(cx, JSVAL_TO_GCTHING(v)) \
+ : JS_TRUE)
+#define JSVAL_UNLOCK(cx,v) (JSVAL_IS_GCTHING(v) \
+ ? JS_UnlockGCThing(cx, JSVAL_TO_GCTHING(v)) \
+ : JS_TRUE)
+
+/* Domain limits for the jsval int type. */
+#define JSVAL_INT_BITS 31
+#define JSVAL_INT_POW2(n) ((jsval)1 << (n))
+#define JSVAL_INT_MIN ((jsval)1 - JSVAL_INT_POW2(30))
+#define JSVAL_INT_MAX (JSVAL_INT_POW2(30) - 1)
+#define INT_FITS_IN_JSVAL(i) ((jsuint)((i)+JSVAL_INT_MAX) <= 2*JSVAL_INT_MAX)
+#define JSVAL_TO_INT(v) ((jsint)(v) >> 1)
+#define INT_TO_JSVAL(i) (((jsval)(i) << 1) | JSVAL_INT)
+
+/* Convert between boolean and jsval. */
+#define JSVAL_TO_BOOLEAN(v) ((JSBool)((v) >> JSVAL_TAGBITS))
+#define BOOLEAN_TO_JSVAL(b) JSVAL_SETTAG((jsval)(b) << JSVAL_TAGBITS, \
+ JSVAL_BOOLEAN)
+
+/* A private data pointer (2-byte-aligned) can be stored as an int jsval. */
+#define JSVAL_TO_PRIVATE(v) ((void *)((v) & ~JSVAL_INT))
+#define PRIVATE_TO_JSVAL(p) ((jsval)(p) | JSVAL_INT)
+
+/* Property attributes, set in JSPropertySpec and passed to API functions. */
+#define JSPROP_ENUMERATE 0x01 /* property is visible to for/in loop */
+#define JSPROP_READONLY 0x02 /* not settable: assignment is no-op */
+#define JSPROP_PERMANENT 0x04 /* property cannot be deleted */
+#define JSPROP_EXPORTED 0x08 /* property is exported from object */
+#define JSPROP_GETTER 0x10 /* property holds getter function */
+#define JSPROP_SETTER 0x20 /* property holds setter function */
+#define JSPROP_SHARED 0x40 /* don't allocate a value slot for this
+ property; don't copy the property on
+ set of the same-named property in an
+ object that delegates to a prototype
+ containing this property */
+#define JSPROP_INDEX 0x80 /* name is actually (jsint) index */
+
+/* Function flags, set in JSFunctionSpec and passed to JS_NewFunction etc. */
+#define JSFUN_LAMBDA 0x08 /* expressed, not declared, function */
+#define JSFUN_GETTER JSPROP_GETTER
+#define JSFUN_SETTER JSPROP_SETTER
+#define JSFUN_BOUND_METHOD 0x40 /* bind this to fun->object's parent */
+#define JSFUN_HEAVYWEIGHT 0x80 /* activation requires a Call object */
+
+#define JSFUN_DISJOINT_FLAGS(f) ((f) & 0x0f)
+#define JSFUN_GSFLAGS(f) ((f) & (JSFUN_GETTER | JSFUN_SETTER))
+
+#ifdef MOZILLA_1_8_BRANCH
+
+/*
+ * Squeeze three more bits into existing 8-bit flags by taking advantage of
+ * the invalid combination (JSFUN_GETTER | JSFUN_SETTER).
+ */
+#define JSFUN_GETTER_TEST(f) (JSFUN_GSFLAGS(f) == JSFUN_GETTER)
+#define JSFUN_SETTER_TEST(f) (JSFUN_GSFLAGS(f) == JSFUN_SETTER)
+#define JSFUN_FLAGS_TEST(f,t) (JSFUN_GSFLAGS(~(f)) ? (f) & (t) : 0)
+#define JSFUN_BOUND_METHOD_TEST(f) JSFUN_FLAGS_TEST(f, JSFUN_BOUND_METHOD)
+#define JSFUN_HEAVYWEIGHT_TEST(f) JSFUN_FLAGS_TEST(f, JSFUN_HEAVYWEIGHT)
+
+#define JSFUN_GSFLAG2ATTR(f) (JSFUN_GETTER_TEST(f) ? JSPROP_GETTER : \
+ JSFUN_SETTER_TEST(f) ? JSPROP_SETTER : 0)
+
+#define JSFUN_THISP_FLAGS(f) (JSFUN_GSFLAGS(~(f)) ? 0 : \
+ (f) & JSFUN_THISP_PRIMITIVE)
+#define JSFUN_THISP_TEST(f,t) ((f) == (t) || (f) == JSFUN_THISP_PRIMITIVE)
+
+#define JSFUN_THISP_STRING 0x30 /* |this| may be a primitive string */
+#define JSFUN_THISP_NUMBER 0x70 /* |this| may be a primitive number */
+#define JSFUN_THISP_BOOLEAN 0xb0 /* |this| may be a primitive boolean */
+#define JSFUN_THISP_PRIMITIVE 0xf0 /* |this| may be any primitive value */
+
+#define JSFUN_FLAGS_MASK 0xf8 /* overlay JSFUN_* attributes */
+
+#else
+
+#define JSFUN_GETTER_TEST(f) ((f) & JSFUN_GETTER)
+#define JSFUN_SETTER_TEST(f) ((f) & JSFUN_SETTER)
+#define JSFUN_BOUND_METHOD_TEST(f) ((f) & JSFUN_BOUND_METHOD)
+#define JSFUN_HEAVYWEIGHT_TEST(f) ((f) & JSFUN_HEAVYWEIGHT)
+
+#define JSFUN_GSFLAG2ATTR(f) JSFUN_GSFLAGS(f)
+
+#define JSFUN_THISP_FLAGS(f) (f)
+#define JSFUN_THISP_TEST(f,t) ((f) & t)
+
+#define JSFUN_THISP_STRING 0x0100 /* |this| may be a primitive string */
+#define JSFUN_THISP_NUMBER 0x0200 /* |this| may be a primitive number */
+#define JSFUN_THISP_BOOLEAN 0x0400 /* |this| may be a primitive boolean */
+#define JSFUN_THISP_PRIMITIVE 0x0700 /* |this| may be any primitive value */
+
+#define JSFUN_FLAGS_MASK 0x07f8 /* overlay JSFUN_* attributes --
+ note that bit #15 is used internally
+ to flag interpreted functions */
+
+#endif
+
+/*
+ * Re-use JSFUN_LAMBDA, which applies only to scripted functions, for use in
+ * JSFunctionSpec arrays that specify generic native prototype methods, i.e.,
+ * methods of a class prototype that are exposed as static methods taking an
+ * extra leading argument: the generic |this| parameter.
+ *
+ * If you set this flag in a JSFunctionSpec struct's flags initializer, then
+ * that struct must live at least as long as the native static method object
+ * created due to this flag by JS_DefineFunctions or JS_InitClass. Typically
+ * JSFunctionSpec structs are allocated in static arrays.
+ */
+#define JSFUN_GENERIC_NATIVE JSFUN_LAMBDA
+
+/*
+ * Well-known JS values. The extern'd variables are initialized when the
+ * first JSContext is created by JS_NewContext (see below).
+ */
+#define JSVAL_VOID INT_TO_JSVAL(0 - JSVAL_INT_POW2(30))
+#define JSVAL_NULL OBJECT_TO_JSVAL(0)
+#define JSVAL_ZERO INT_TO_JSVAL(0)
+#define JSVAL_ONE INT_TO_JSVAL(1)
+#define JSVAL_FALSE BOOLEAN_TO_JSVAL(JS_FALSE)
+#define JSVAL_TRUE BOOLEAN_TO_JSVAL(JS_TRUE)
+
+/*
+ * Microseconds since the epoch, midnight, January 1, 1970 UTC. See the
+ * comment in jstypes.h regarding safe int64 usage.
+ */
+extern JS_PUBLIC_API(int64)
+JS_Now();
+
+/* Don't want to export data, so provide accessors for non-inline jsvals. */
+extern JS_PUBLIC_API(jsval)
+JS_GetNaNValue(JSContext *cx);
+
+extern JS_PUBLIC_API(jsval)
+JS_GetNegativeInfinityValue(JSContext *cx);
+
+extern JS_PUBLIC_API(jsval)
+JS_GetPositiveInfinityValue(JSContext *cx);
+
+extern JS_PUBLIC_API(jsval)
+JS_GetEmptyStringValue(JSContext *cx);
+
+/*
+ * Format is a string of the following characters (spaces are insignificant),
+ * specifying the tabulated type conversions:
+ *
+ * b JSBool Boolean
+ * c uint16/jschar ECMA uint16, Unicode char
+ * i int32 ECMA int32
+ * u uint32 ECMA uint32
+ * j int32 Rounded int32 (coordinate)
+ * d jsdouble IEEE double
+ * I jsdouble Integral IEEE double
+ * s char * C string
+ * S JSString * Unicode string, accessed by a JSString pointer
+ * W jschar * Unicode character vector, 0-terminated (W for wide)
+ * o JSObject * Object reference
+ * f JSFunction * Function private
+ * v jsval Argument value (no conversion)
+ * * N/A Skip this argument (no vararg)
+ * / N/A End of required arguments
+ *
+ * The variable argument list after format must consist of &b, &c, &s, e.g.,
+ * where those variables have the types given above. For the pointer types
+ * char *, JSString *, and JSObject *, the pointed-at memory returned belongs
+ * to the JS runtime, not to the calling native code. The runtime promises
+ * to keep this memory valid so long as argv refers to allocated stack space
+ * (so long as the native function is active).
+ *
+ * Fewer arguments than format specifies may be passed only if there is a /
+ * in format after the last required argument specifier and argc is at least
+ * the number of required arguments. More arguments than format specifies
+ * may be passed without error; it is up to the caller to deal with trailing
+ * unconverted arguments.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ConvertArguments(JSContext *cx, uintN argc, jsval *argv, const char *format,
+ ...);
+
+#ifdef va_start
+extern JS_PUBLIC_API(JSBool)
+JS_ConvertArgumentsVA(JSContext *cx, uintN argc, jsval *argv,
+ const char *format, va_list ap);
+#endif
+
+/*
+ * Inverse of JS_ConvertArguments: scan format and convert trailing arguments
+ * into jsvals, GC-rooted if necessary by the JS stack. Return null on error,
+ * and a pointer to the new argument vector on success. Also return a stack
+ * mark on success via *markp, in which case the caller must eventually clean
+ * up by calling JS_PopArguments.
+ *
+ * Note that the number of actual arguments supplied is specified exclusively
+ * by format, so there is no argc parameter.
+ */
+extern JS_PUBLIC_API(jsval *)
+JS_PushArguments(JSContext *cx, void **markp, const char *format, ...);
+
+#ifdef va_start
+extern JS_PUBLIC_API(jsval *)
+JS_PushArgumentsVA(JSContext *cx, void **markp, const char *format, va_list ap);
+#endif
+
+extern JS_PUBLIC_API(void)
+JS_PopArguments(JSContext *cx, void *mark);
+
+#ifdef JS_ARGUMENT_FORMATTER_DEFINED
+
+/*
+ * Add and remove a format string handler for JS_{Convert,Push}Arguments{,VA}.
+ * The handler function has this signature (see jspubtd.h):
+ *
+ * JSBool MyArgumentFormatter(JSContext *cx, const char *format,
+ * JSBool fromJS, jsval **vpp, va_list *app);
+ *
+ * It should return true on success, and return false after reporting an error
+ * or detecting an already-reported error.
+ *
+ * For a given format string, for example "AA", the formatter is called from
+ * JS_ConvertArgumentsVA like so:
+ *
+ * formatter(cx, "AA...", JS_TRUE, &sp, &ap);
+ *
+ * sp points into the arguments array on the JS stack, while ap points into
+ * the stdarg.h va_list on the C stack. The JS_TRUE passed for fromJS tells
+ * the formatter to convert zero or more jsvals at sp to zero or more C values
+ * accessed via pointers-to-values at ap, updating both sp (via *vpp) and ap
+ * (via *app) to point past the converted arguments and their result pointers
+ * on the C stack.
+ *
+ * When called from JS_PushArgumentsVA, the formatter is invoked thus:
+ *
+ * formatter(cx, "AA...", JS_FALSE, &sp, &ap);
+ *
+ * where JS_FALSE for fromJS means to wrap the C values at ap according to the
+ * format specifier and store them at sp, updating ap and sp appropriately.
+ *
+ * The "..." after "AA" is the rest of the format string that was passed into
+ * JS_{Convert,Push}Arguments{,VA}. The actual format trailing substring used
+ * in each Convert or PushArguments call is passed to the formatter, so that
+ * one such function may implement several formats, in order to share code.
+ *
+ * Remove just forgets about any handler associated with format. Add does not
+ * copy format, it points at the string storage allocated by the caller, which
+ * is typically a string constant. If format is in dynamic storage, it is up
+ * to the caller to keep the string alive until Remove is called.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_AddArgumentFormatter(JSContext *cx, const char *format,
+ JSArgumentFormatter formatter);
+
+extern JS_PUBLIC_API(void)
+JS_RemoveArgumentFormatter(JSContext *cx, const char *format);
+
+#endif /* JS_ARGUMENT_FORMATTER_DEFINED */
+
+extern JS_PUBLIC_API(JSBool)
+JS_ConvertValue(JSContext *cx, jsval v, JSType type, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToObject(JSContext *cx, jsval v, JSObject **objp);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_ValueToFunction(JSContext *cx, jsval v);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_ValueToConstructor(JSContext *cx, jsval v);
+
+extern JS_PUBLIC_API(JSString *)
+JS_ValueToString(JSContext *cx, jsval v);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToNumber(JSContext *cx, jsval v, jsdouble *dp);
+
+/*
+ * Convert a value to a number, then to an int32, according to the ECMA rules
+ * for ToInt32.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToECMAInt32(JSContext *cx, jsval v, int32 *ip);
+
+/*
+ * Convert a value to a number, then to a uint32, according to the ECMA rules
+ * for ToUint32.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToECMAUint32(JSContext *cx, jsval v, uint32 *ip);
+
+/*
+ * Convert a value to a number, then to an int32 if it fits by rounding to
+ * nearest; but failing with an error report if the double is out of range
+ * or unordered.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToInt32(JSContext *cx, jsval v, int32 *ip);
+
+/*
+ * ECMA ToUint16, for mapping a jsval to a Unicode point.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToUint16(JSContext *cx, jsval v, uint16 *ip);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToBoolean(JSContext *cx, jsval v, JSBool *bp);
+
+extern JS_PUBLIC_API(JSType)
+JS_TypeOfValue(JSContext *cx, jsval v);
+
+extern JS_PUBLIC_API(const char *)
+JS_GetTypeName(JSContext *cx, JSType type);
+
+/************************************************************************/
+
+/*
+ * Initialization, locking, contexts, and memory allocation.
+ */
+#define JS_NewRuntime JS_Init
+#define JS_DestroyRuntime JS_Finish
+#define JS_LockRuntime JS_Lock
+#define JS_UnlockRuntime JS_Unlock
+
+extern JS_PUBLIC_API(JSRuntime *)
+JS_NewRuntime(uint32 maxbytes);
+
+extern JS_PUBLIC_API(void)
+JS_DestroyRuntime(JSRuntime *rt);
+
+extern JS_PUBLIC_API(void)
+JS_ShutDown(void);
+
+JS_PUBLIC_API(void *)
+JS_GetRuntimePrivate(JSRuntime *rt);
+
+JS_PUBLIC_API(void)
+JS_SetRuntimePrivate(JSRuntime *rt, void *data);
+
+#ifdef JS_THREADSAFE
+
+extern JS_PUBLIC_API(void)
+JS_BeginRequest(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_EndRequest(JSContext *cx);
+
+/* Yield to pending GC operations, regardless of request depth */
+extern JS_PUBLIC_API(void)
+JS_YieldRequest(JSContext *cx);
+
+extern JS_PUBLIC_API(jsrefcount)
+JS_SuspendRequest(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_ResumeRequest(JSContext *cx, jsrefcount saveDepth);
+
+#ifdef __cplusplus
+JS_END_EXTERN_C
+
+class JSAutoRequest {
+ public:
+ JSAutoRequest(JSContext *cx) : mContext(cx), mSaveDepth(0) {
+ JS_BeginRequest(mContext);
+ }
+ ~JSAutoRequest() {
+ JS_EndRequest(mContext);
+ }
+
+ void suspend() {
+ mSaveDepth = JS_SuspendRequest(mContext);
+ }
+ void resume() {
+ JS_ResumeRequest(mContext, mSaveDepth);
+ }
+
+ protected:
+ JSContext *mContext;
+ jsrefcount mSaveDepth;
+
+#if 0
+ private:
+ static void *operator new(size_t) CPP_THROW_NEW { return 0; };
+ static void operator delete(void *, size_t) { };
+#endif
+};
+
+JS_BEGIN_EXTERN_C
+#endif
+
+#endif /* JS_THREADSAFE */
+
+extern JS_PUBLIC_API(void)
+JS_Lock(JSRuntime *rt);
+
+extern JS_PUBLIC_API(void)
+JS_Unlock(JSRuntime *rt);
+
+extern JS_PUBLIC_API(JSContextCallback)
+JS_SetContextCallback(JSRuntime *rt, JSContextCallback cxCallback);
+
+extern JS_PUBLIC_API(JSContext *)
+JS_NewContext(JSRuntime *rt, size_t stackChunkSize);
+
+extern JS_PUBLIC_API(void)
+JS_DestroyContext(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_DestroyContextNoGC(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_DestroyContextMaybeGC(JSContext *cx);
+
+extern JS_PUBLIC_API(void *)
+JS_GetContextPrivate(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_SetContextPrivate(JSContext *cx, void *data);
+
+extern JS_PUBLIC_API(JSRuntime *)
+JS_GetRuntime(JSContext *cx);
+
+extern JS_PUBLIC_API(JSContext *)
+JS_ContextIterator(JSRuntime *rt, JSContext **iterp);
+
+extern JS_PUBLIC_API(JSVersion)
+JS_GetVersion(JSContext *cx);
+
+extern JS_PUBLIC_API(JSVersion)
+JS_SetVersion(JSContext *cx, JSVersion version);
+
+extern JS_PUBLIC_API(const char *)
+JS_VersionToString(JSVersion version);
+
+extern JS_PUBLIC_API(JSVersion)
+JS_StringToVersion(const char *string);
+
+/*
+ * JS options are orthogonal to version, and may be freely composed with one
+ * another as well as with version.
+ *
+ * JSOPTION_VAROBJFIX is recommended -- see the comments associated with the
+ * prototypes for JS_ExecuteScript, JS_EvaluateScript, etc.
+ */
+#define JSOPTION_STRICT JS_BIT(0) /* warn on dubious practice */
+#define JSOPTION_WERROR JS_BIT(1) /* convert warning to error */
+#define JSOPTION_VAROBJFIX JS_BIT(2) /* make JS_EvaluateScript use
+ the last object on its 'obj'
+ param's scope chain as the
+ ECMA 'variables object' */
+#define JSOPTION_PRIVATE_IS_NSISUPPORTS \
+ JS_BIT(3) /* context private data points
+ to an nsISupports subclass */
+#define JSOPTION_COMPILE_N_GO JS_BIT(4) /* caller of JS_Compile*Script
+ promises to execute compiled
+ script once only; enables
+ compile-time scope chain
+ resolution of consts. */
+#define JSOPTION_ATLINE JS_BIT(5) /* //@line number ["filename"]
+ option supported for the
+ XUL preprocessor and kindred
+ beasts. */
+#define JSOPTION_XML JS_BIT(6) /* EMCAScript for XML support:
+ parse <!-- --> as a token,
+ not backward compatible with
+ the comment-hiding hack used
+ in HTML script tags. */
+#define JSOPTION_NATIVE_BRANCH_CALLBACK \
+ JS_BIT(7) /* the branch callback set by
+ JS_SetBranchCallback may be
+ called with a null script
+ parameter, by native code
+ that loops intensively */
+#define JSOPTION_DONT_REPORT_UNCAUGHT \
+ JS_BIT(8) /* When returning from the
+ outermost API call, prevent
+ uncaught exceptions from
+ being converted to error
+ reports */
+
+extern JS_PUBLIC_API(uint32)
+JS_GetOptions(JSContext *cx);
+
+extern JS_PUBLIC_API(uint32)
+JS_SetOptions(JSContext *cx, uint32 options);
+
+extern JS_PUBLIC_API(uint32)
+JS_ToggleOptions(JSContext *cx, uint32 options);
+
+extern JS_PUBLIC_API(const char *)
+JS_GetImplementationVersion(void);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetGlobalObject(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_SetGlobalObject(JSContext *cx, JSObject *obj);
+
+/*
+ * Initialize standard JS class constructors, prototypes, and any top-level
+ * functions and constants associated with the standard classes (e.g. isNaN
+ * for Number).
+ *
+ * NB: This sets cx's global object to obj if it was null.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_InitStandardClasses(JSContext *cx, JSObject *obj);
+
+/*
+ * Resolve id, which must contain either a string or an int, to a standard
+ * class name in obj if possible, defining the class's constructor and/or
+ * prototype and storing true in *resolved. If id does not name a standard
+ * class or a top-level property induced by initializing a standard class,
+ * store false in *resolved and just return true. Return false on error,
+ * as usual for JSBool result-typed API entry points.
+ *
+ * This API can be called directly from a global object class's resolve op,
+ * to define standard classes lazily. The class's enumerate op should call
+ * JS_EnumerateStandardClasses(cx, obj), to define eagerly during for..in
+ * loops any classes not yet resolved lazily.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ResolveStandardClass(JSContext *cx, JSObject *obj, jsval id,
+ JSBool *resolved);
+
+extern JS_PUBLIC_API(JSBool)
+JS_EnumerateStandardClasses(JSContext *cx, JSObject *obj);
+
+/*
+ * Enumerate any already-resolved standard class ids into ida, or into a new
+ * JSIdArray if ida is null. Return the augmented array on success, null on
+ * failure with ida (if it was non-null on entry) destroyed.
+ */
+extern JS_PUBLIC_API(JSIdArray *)
+JS_EnumerateResolvedStandardClasses(JSContext *cx, JSObject *obj,
+ JSIdArray *ida);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key,
+ JSObject **objp);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetScopeChain(JSContext *cx);
+
+extern JS_PUBLIC_API(void *)
+JS_malloc(JSContext *cx, size_t nbytes);
+
+extern JS_PUBLIC_API(void *)
+JS_realloc(JSContext *cx, void *p, size_t nbytes);
+
+extern JS_PUBLIC_API(void)
+JS_free(JSContext *cx, void *p);
+
+extern JS_PUBLIC_API(char *)
+JS_strdup(JSContext *cx, const char *s);
+
+extern JS_PUBLIC_API(jsdouble *)
+JS_NewDouble(JSContext *cx, jsdouble d);
+
+extern JS_PUBLIC_API(JSBool)
+JS_NewDoubleValue(JSContext *cx, jsdouble d, jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_NewNumberValue(JSContext *cx, jsdouble d, jsval *rval);
+
+/*
+ * A JS GC root is a pointer to a JSObject *, JSString *, or jsdouble * that
+ * itself points into the GC heap (more recently, we support this extension:
+ * a root may be a pointer to a jsval v for which JSVAL_IS_GCTHING(v) is true).
+ *
+ * Therefore, you never pass JSObject *obj to JS_AddRoot(cx, obj). You always
+ * call JS_AddRoot(cx, &obj), passing obj by reference. And later, before obj
+ * or the structure it is embedded within goes out of scope or is freed, you
+ * must call JS_RemoveRoot(cx, &obj).
+ *
+ * Also, use JS_AddNamedRoot(cx, &structPtr->memberObj, "structPtr->memberObj")
+ * in preference to JS_AddRoot(cx, &structPtr->memberObj), in order to identify
+ * roots by their source callsites. This way, you can find the callsite while
+ * debugging if you should fail to do JS_RemoveRoot(cx, &structPtr->memberObj)
+ * before freeing structPtr's memory.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_AddRoot(JSContext *cx, void *rp);
+
+#ifdef NAME_ALL_GC_ROOTS
+#define JS_DEFINE_TO_TOKEN(def) #def
+#define JS_DEFINE_TO_STRING(def) JS_DEFINE_TO_TOKEN(def)
+#define JS_AddRoot(cx,rp) JS_AddNamedRoot((cx), (rp), (__FILE__ ":" JS_TOKEN_TO_STRING(__LINE__))
+#endif
+
+extern JS_PUBLIC_API(JSBool)
+JS_AddNamedRoot(JSContext *cx, void *rp, const char *name);
+
+extern JS_PUBLIC_API(JSBool)
+JS_AddNamedRootRT(JSRuntime *rt, void *rp, const char *name);
+
+extern JS_PUBLIC_API(JSBool)
+JS_RemoveRoot(JSContext *cx, void *rp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_RemoveRootRT(JSRuntime *rt, void *rp);
+
+/*
+ * The last GC thing of each type (object, string, double, external string
+ * types) created on a given context is kept alive until another thing of the
+ * same type is created, using a newborn root in the context. These newborn
+ * roots help native code protect newly-created GC-things from GC invocations
+ * activated before those things can be rooted using local or global roots.
+ *
+ * However, the newborn roots can also entrain great gobs of garbage, so the
+ * JS_GC entry point clears them for the context on which GC is being forced.
+ * Embeddings may need to do likewise for all contexts.
+ *
+ * See the scoped local root API immediately below for a better way to manage
+ * newborns in cases where native hooks (functions, getters, setters, etc.)
+ * create many GC-things, potentially without connecting them to predefined
+ * local roots such as *rval or argv[i] in an active native function. Using
+ * JS_EnterLocalRootScope disables updating of the context's per-gc-thing-type
+ * newborn roots, until control flow unwinds and leaves the outermost nesting
+ * local root scope.
+ */
+extern JS_PUBLIC_API(void)
+JS_ClearNewbornRoots(JSContext *cx);
+
+/*
+ * Scoped local root management allows native functions, getter/setters, etc.
+ * to avoid worrying about the newborn root pigeon-holes, overloading local
+ * roots allocated in argv and *rval, or ending up having to call JS_Add*Root
+ * and JS_RemoveRoot to manage global roots temporarily.
+ *
+ * Instead, calling JS_EnterLocalRootScope and JS_LeaveLocalRootScope around
+ * the body of the native hook causes the engine to allocate a local root for
+ * each newborn created in between the two API calls, using a local root stack
+ * associated with cx. For example:
+ *
+ * JSBool
+ * my_GetProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+ * {
+ * JSBool ok;
+ *
+ * if (!JS_EnterLocalRootScope(cx))
+ * return JS_FALSE;
+ * ok = my_GetPropertyBody(cx, obj, id, vp);
+ * JS_LeaveLocalRootScope(cx);
+ * return ok;
+ * }
+ *
+ * NB: JS_LeaveLocalRootScope must be called once for every prior successful
+ * call to JS_EnterLocalRootScope. If JS_EnterLocalRootScope fails, you must
+ * not make the matching JS_LeaveLocalRootScope call.
+ *
+ * JS_LeaveLocalRootScopeWithResult(cx, rval) is an alternative way to leave
+ * a local root scope that protects a result or return value, by effectively
+ * pushing it in the caller's local root scope.
+ *
+ * In case a native hook allocates many objects or other GC-things, but the
+ * native protects some of those GC-things by storing them as property values
+ * in an object that is itself protected, the hook can call JS_ForgetLocalRoot
+ * to free the local root automatically pushed for the now-protected GC-thing.
+ *
+ * JS_ForgetLocalRoot works on any GC-thing allocated in the current local
+ * root scope, but it's more time-efficient when called on references to more
+ * recently created GC-things. Calling it successively on other than the most
+ * recently allocated GC-thing will tend to average the time inefficiency, and
+ * may risk O(n^2) growth rate, but in any event, you shouldn't allocate too
+ * many local roots if you can root as you go (build a tree of objects from
+ * the top down, forgetting each latest-allocated GC-thing immediately upon
+ * linking it to its parent).
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_EnterLocalRootScope(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_LeaveLocalRootScope(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_LeaveLocalRootScopeWithResult(JSContext *cx, jsval rval);
+
+extern JS_PUBLIC_API(void)
+JS_ForgetLocalRoot(JSContext *cx, void *thing);
+
+#ifdef __cplusplus
+JS_END_EXTERN_C
+
+class JSAutoLocalRootScope {
+ public:
+ JSAutoLocalRootScope(JSContext *cx) : mContext(cx) {
+ JS_EnterLocalRootScope(mContext);
+ }
+ ~JSAutoLocalRootScope() {
+ JS_LeaveLocalRootScope(mContext);
+ }
+
+ void forget(void *thing) {
+ JS_ForgetLocalRoot(mContext, thing);
+ }
+
+ protected:
+ JSContext *mContext;
+
+#if 0
+ private:
+ static void *operator new(size_t) CPP_THROW_NEW { return 0; };
+ static void operator delete(void *, size_t) { };
+#endif
+};
+
+JS_BEGIN_EXTERN_C
+#endif
+
+#ifdef DEBUG
+extern JS_PUBLIC_API(void)
+JS_DumpNamedRoots(JSRuntime *rt,
+ void (*dump)(const char *name, void *rp, void *data),
+ void *data);
+#endif
+
+/*
+ * Call JS_MapGCRoots to map the GC's roots table using map(rp, name, data).
+ * The root is pointed at by rp; if the root is unnamed, name is null; data is
+ * supplied from the third parameter to JS_MapGCRoots.
+ *
+ * The map function should return JS_MAP_GCROOT_REMOVE to cause the currently
+ * enumerated root to be removed. To stop enumeration, set JS_MAP_GCROOT_STOP
+ * in the return value. To keep on mapping, return JS_MAP_GCROOT_NEXT. These
+ * constants are flags; you can OR them together.
+ *
+ * This function acquires and releases rt's GC lock around the mapping of the
+ * roots table, so the map function should run to completion in as few cycles
+ * as possible. Of course, map cannot call JS_GC, JS_MaybeGC, JS_BeginRequest,
+ * or any JS API entry point that acquires locks, without double-tripping or
+ * deadlocking on the GC lock.
+ *
+ * JS_MapGCRoots returns the count of roots that were successfully mapped.
+ */
+#define JS_MAP_GCROOT_NEXT 0 /* continue mapping entries */
+#define JS_MAP_GCROOT_STOP 1 /* stop mapping entries */
+#define JS_MAP_GCROOT_REMOVE 2 /* remove and free the current entry */
+
+typedef intN
+(* JS_DLL_CALLBACK JSGCRootMapFun)(void *rp, const char *name, void *data);
+
+extern JS_PUBLIC_API(uint32)
+JS_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data);
+
+extern JS_PUBLIC_API(JSBool)
+JS_LockGCThing(JSContext *cx, void *thing);
+
+extern JS_PUBLIC_API(JSBool)
+JS_LockGCThingRT(JSRuntime *rt, void *thing);
+
+extern JS_PUBLIC_API(JSBool)
+JS_UnlockGCThing(JSContext *cx, void *thing);
+
+extern JS_PUBLIC_API(JSBool)
+JS_UnlockGCThingRT(JSRuntime *rt, void *thing);
+
+/*
+ * For implementors of JSObjectOps.mark, to mark a GC-thing reachable via a
+ * property or other strong ref identified for debugging purposes by name.
+ * The name argument's storage needs to live only as long as the call to
+ * this routine.
+ *
+ * The final arg is used by GC_MARK_DEBUG code to build a ref path through
+ * the GC's live thing graph. Implementors of JSObjectOps.mark should pass
+ * its final arg through to this function when marking all GC-things that are
+ * directly reachable from the object being marked.
+ *
+ * See the JSMarkOp typedef in jspubtd.h, and the JSObjectOps struct below.
+ */
+extern JS_PUBLIC_API(void)
+JS_MarkGCThing(JSContext *cx, void *thing, const char *name, void *arg);
+
+extern JS_PUBLIC_API(void)
+JS_GC(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_MaybeGC(JSContext *cx);
+
+extern JS_PUBLIC_API(JSGCCallback)
+JS_SetGCCallback(JSContext *cx, JSGCCallback cb);
+
+extern JS_PUBLIC_API(JSGCCallback)
+JS_SetGCCallbackRT(JSRuntime *rt, JSGCCallback cb);
+
+extern JS_PUBLIC_API(JSBool)
+JS_IsAboutToBeFinalized(JSContext *cx, void *thing);
+
+typedef enum JSGCParamKey {
+ JSGC_MAX_BYTES = 0, /* maximum nominal heap before last ditch GC */
+ JSGC_MAX_MALLOC_BYTES = 1 /* # of JS_malloc bytes before last ditch GC */
+} JSGCParamKey;
+
+extern JS_PUBLIC_API(void)
+JS_SetGCParameter(JSRuntime *rt, JSGCParamKey key, uint32 value);
+
+/*
+ * Add a finalizer for external strings created by JS_NewExternalString (see
+ * below) using a type-code returned from this function, and that understands
+ * how to free or release the memory pointed at by JS_GetStringChars(str).
+ *
+ * Return a nonnegative type index if there is room for finalizer in the
+ * global GC finalizers table, else return -1. If the engine is compiled
+ * JS_THREADSAFE and used in a multi-threaded environment, this function must
+ * be invoked on the primordial thread only, at startup -- or else the entire
+ * program must single-thread itself while loading a module that calls this
+ * function.
+ */
+extern JS_PUBLIC_API(intN)
+JS_AddExternalStringFinalizer(JSStringFinalizeOp finalizer);
+
+/*
+ * Remove finalizer from the global GC finalizers table, returning its type
+ * code if found, -1 if not found.
+ *
+ * As with JS_AddExternalStringFinalizer, there is a threading restriction
+ * if you compile the engine JS_THREADSAFE: this function may be called for a
+ * given finalizer pointer on only one thread; different threads may call to
+ * remove distinct finalizers safely.
+ *
+ * You must ensure that all strings with finalizer's type have been collected
+ * before calling this function. Otherwise, string data will be leaked by the
+ * GC, for want of a finalizer to call.
+ */
+extern JS_PUBLIC_API(intN)
+JS_RemoveExternalStringFinalizer(JSStringFinalizeOp finalizer);
+
+/*
+ * Create a new JSString whose chars member refers to external memory, i.e.,
+ * memory requiring special, type-specific finalization. The type code must
+ * be a nonnegative return value from JS_AddExternalStringFinalizer.
+ */
+extern JS_PUBLIC_API(JSString *)
+JS_NewExternalString(JSContext *cx, jschar *chars, size_t length, intN type);
+
+/*
+ * Returns the external-string finalizer index for this string, or -1 if it is
+ * an "internal" (native to JS engine) string.
+ */
+extern JS_PUBLIC_API(intN)
+JS_GetExternalStringGCType(JSRuntime *rt, JSString *str);
+
+/*
+ * Sets maximum (if stack grows upward) or minimum (downward) legal stack byte
+ * address in limitAddr for the thread or process stack used by cx. To disable
+ * stack size checking, pass 0 for limitAddr.
+ */
+extern JS_PUBLIC_API(void)
+JS_SetThreadStackLimit(JSContext *cx, jsuword limitAddr);
+
+/************************************************************************/
+
+/*
+ * Classes, objects, and properties.
+ */
+
+/* For detailed comments on the function pointer types, see jspubtd.h. */
+struct JSClass {
+ const char *name;
+ uint32 flags;
+
+ /* Mandatory non-null function pointer members. */
+ JSPropertyOp addProperty;
+ JSPropertyOp delProperty;
+ JSPropertyOp getProperty;
+ JSPropertyOp setProperty;
+ JSEnumerateOp enumerate;
+ JSResolveOp resolve;
+ JSConvertOp convert;
+ JSFinalizeOp finalize;
+
+ /* Optionally non-null members start here. */
+ JSGetObjectOps getObjectOps;
+ JSCheckAccessOp checkAccess;
+ JSNative call;
+ JSNative construct;
+ JSXDRObjectOp xdrObject;
+ JSHasInstanceOp hasInstance;
+ JSMarkOp mark;
+ JSReserveSlotsOp reserveSlots;
+};
+
+struct JSExtendedClass {
+ JSClass base;
+ JSEqualityOp equality;
+ JSObjectOp outerObject;
+ JSObjectOp innerObject;
+ void (*reserved0)();
+ void (*reserved1)();
+ void (*reserved2)();
+ void (*reserved3)();
+ void (*reserved4)();
+};
+
+#define JSCLASS_HAS_PRIVATE (1<<0) /* objects have private slot */
+#define JSCLASS_NEW_ENUMERATE (1<<1) /* has JSNewEnumerateOp hook */
+#define JSCLASS_NEW_RESOLVE (1<<2) /* has JSNewResolveOp hook */
+#define JSCLASS_PRIVATE_IS_NSISUPPORTS (1<<3) /* private is (nsISupports *) */
+#define JSCLASS_SHARE_ALL_PROPERTIES (1<<4) /* all properties are SHARED */
+#define JSCLASS_NEW_RESOLVE_GETS_START (1<<5) /* JSNewResolveOp gets starting
+ object in prototype chain
+ passed in via *objp in/out
+ parameter */
+#define JSCLASS_CONSTRUCT_PROTOTYPE (1<<6) /* call constructor on class
+ prototype */
+#define JSCLASS_DOCUMENT_OBSERVER (1<<7) /* DOM document observer */
+
+/*
+ * To reserve slots fetched and stored via JS_Get/SetReservedSlot, bitwise-or
+ * JSCLASS_HAS_RESERVED_SLOTS(n) into the initializer for JSClass.flags, where
+ * n is a constant in [1, 255]. Reserved slots are indexed from 0 to n-1.
+ */
+#define JSCLASS_RESERVED_SLOTS_SHIFT 8 /* room for 8 flags below */
+#define JSCLASS_RESERVED_SLOTS_WIDTH 8 /* and 16 above this field */
+#define JSCLASS_RESERVED_SLOTS_MASK JS_BITMASK(JSCLASS_RESERVED_SLOTS_WIDTH)
+#define JSCLASS_HAS_RESERVED_SLOTS(n) (((n) & JSCLASS_RESERVED_SLOTS_MASK) \
+ << JSCLASS_RESERVED_SLOTS_SHIFT)
+#define JSCLASS_RESERVED_SLOTS(clasp) (((clasp)->flags \
+ >> JSCLASS_RESERVED_SLOTS_SHIFT) \
+ & JSCLASS_RESERVED_SLOTS_MASK)
+
+#define JSCLASS_HIGH_FLAGS_SHIFT (JSCLASS_RESERVED_SLOTS_SHIFT + \
+ JSCLASS_RESERVED_SLOTS_WIDTH)
+
+/* True if JSClass is really a JSExtendedClass. */
+#define JSCLASS_IS_EXTENDED (1<<(JSCLASS_HIGH_FLAGS_SHIFT+0))
+#define JSCLASS_IS_ANONYMOUS (1<<(JSCLASS_HIGH_FLAGS_SHIFT+1))
+#define JSCLASS_IS_GLOBAL (1<<(JSCLASS_HIGH_FLAGS_SHIFT+2))
+
+/*
+ * ECMA-262 requires that most constructors used internally create objects
+ * with "the original Foo.prototype value" as their [[Prototype]] (__proto__)
+ * member initial value. The "original ... value" verbiage is there because
+ * in ECMA-262, global properties naming class objects are read/write and
+ * deleteable, for the most part.
+ *
+ * Implementing this efficiently requires that global objects have classes
+ * with the following flags. Failure to use JSCLASS_GLOBAL_FLAGS won't break
+ * anything except the ECMA-262 "original prototype value" behavior, which was
+ * broken for years in SpiderMonkey. In other words, without these flags you
+ * get backward compatibility.
+ */
+#define JSCLASS_GLOBAL_FLAGS \
+ (JSCLASS_IS_GLOBAL | JSCLASS_HAS_RESERVED_SLOTS(JSProto_LIMIT))
+
+/* Fast access to the original value of each standard class's prototype. */
+#define JSCLASS_CACHED_PROTO_SHIFT (JSCLASS_HIGH_FLAGS_SHIFT + 8)
+#define JSCLASS_CACHED_PROTO_WIDTH 8
+#define JSCLASS_CACHED_PROTO_MASK JS_BITMASK(JSCLASS_CACHED_PROTO_WIDTH)
+#define JSCLASS_HAS_CACHED_PROTO(key) ((key) << JSCLASS_CACHED_PROTO_SHIFT)
+#define JSCLASS_CACHED_PROTO_KEY(clasp) (((clasp)->flags \
+ >> JSCLASS_CACHED_PROTO_SHIFT) \
+ & JSCLASS_CACHED_PROTO_MASK)
+
+/* Initializer for unused members of statically initialized JSClass structs. */
+#define JSCLASS_NO_OPTIONAL_MEMBERS 0,0,0,0,0,0,0,0
+#define JSCLASS_NO_RESERVED_MEMBERS 0,0,0,0,0
+
+/* For detailed comments on these function pointer types, see jspubtd.h. */
+struct JSObjectOps {
+ /* Mandatory non-null function pointer members. */
+ JSNewObjectMapOp newObjectMap;
+ JSObjectMapOp destroyObjectMap;
+ JSLookupPropOp lookupProperty;
+ JSDefinePropOp defineProperty;
+ JSPropertyIdOp getProperty;
+ JSPropertyIdOp setProperty;
+ JSAttributesOp getAttributes;
+ JSAttributesOp setAttributes;
+ JSPropertyIdOp deleteProperty;
+ JSConvertOp defaultValue;
+ JSNewEnumerateOp enumerate;
+ JSCheckAccessIdOp checkAccess;
+
+ /* Optionally non-null members start here. */
+ JSObjectOp thisObject;
+ JSPropertyRefOp dropProperty;
+ JSNative call;
+ JSNative construct;
+ JSXDRObjectOp xdrObject;
+ JSHasInstanceOp hasInstance;
+ JSSetObjectSlotOp setProto;
+ JSSetObjectSlotOp setParent;
+ JSMarkOp mark;
+ JSFinalizeOp clear;
+ JSGetRequiredSlotOp getRequiredSlot;
+ JSSetRequiredSlotOp setRequiredSlot;
+};
+
+struct JSXMLObjectOps {
+ JSObjectOps base;
+ JSGetMethodOp getMethod;
+ JSSetMethodOp setMethod;
+ JSEnumerateValuesOp enumerateValues;
+ JSEqualityOp equality;
+ JSConcatenateOp concatenate;
+};
+
+/*
+ * Classes that expose JSObjectOps via a non-null getObjectOps class hook may
+ * derive a property structure from this struct, return a pointer to it from
+ * lookupProperty and defineProperty, and use the pointer to avoid rehashing
+ * in getAttributes and setAttributes.
+ *
+ * The jsid type contains either an int jsval (see JSVAL_IS_INT above), or an
+ * internal pointer that is opaque to users of this API, but which users may
+ * convert from and to a jsval using JS_ValueToId and JS_IdToValue.
+ */
+struct JSProperty {
+ jsid id;
+};
+
+struct JSIdArray {
+ jsint length;
+ jsid vector[1]; /* actually, length jsid words */
+};
+
+extern JS_PUBLIC_API(void)
+JS_DestroyIdArray(JSContext *cx, JSIdArray *ida);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ValueToId(JSContext *cx, jsval v, jsid *idp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_IdToValue(JSContext *cx, jsid id, jsval *vp);
+
+/*
+ * The magic XML namespace id is int-tagged, but not a valid integer jsval.
+ * Global object classes in embeddings that enable JS_HAS_XML_SUPPORT (E4X)
+ * should handle this id specially before converting id via JSVAL_TO_INT.
+ */
+#define JS_DEFAULT_XML_NAMESPACE_ID ((jsid) JSVAL_VOID)
+
+/*
+ * JSNewResolveOp flag bits.
+ */
+#define JSRESOLVE_QUALIFIED 0x01 /* resolve a qualified property id */
+#define JSRESOLVE_ASSIGNING 0x02 /* resolve on the left of assignment */
+#define JSRESOLVE_DETECTING 0x04 /* 'if (o.p)...' or '(o.p) ?...:...' */
+#define JSRESOLVE_DECLARING 0x08 /* var, const, or function prolog op */
+#define JSRESOLVE_CLASSNAME 0x10 /* class name used when constructing */
+
+extern JS_PUBLIC_API(JSBool)
+JS_PropertyStub(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_EnumerateStub(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ResolveStub(JSContext *cx, JSObject *obj, jsval id);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ConvertStub(JSContext *cx, JSObject *obj, JSType type, jsval *vp);
+
+extern JS_PUBLIC_API(void)
+JS_FinalizeStub(JSContext *cx, JSObject *obj);
+
+struct JSConstDoubleSpec {
+ jsdouble dval;
+ const char *name;
+ uint8 flags;
+ uint8 spare[3];
+};
+
+/*
+ * To define an array element rather than a named property member, cast the
+ * element's index to (const char *) and initialize name with it, and set the
+ * JSPROP_INDEX bit in flags.
+ */
+struct JSPropertySpec {
+ const char *name;
+ int8 tinyid;
+ uint8 flags;
+ JSPropertyOp getter;
+ JSPropertyOp setter;
+};
+
+struct JSFunctionSpec {
+ const char *name;
+ JSNative call;
+#ifdef MOZILLA_1_8_BRANCH
+ uint8 nargs;
+ uint8 flags;
+ uint16 extra;
+#else
+ uint16 nargs;
+ uint16 flags;
+ uint32 extra; /* extra & 0xFFFF:
+ number of arg slots for local GC roots
+ extra >> 16:
+ reserved, must be zero */
+#endif
+};
+
+extern JS_PUBLIC_API(JSObject *)
+JS_InitClass(JSContext *cx, JSObject *obj, JSObject *parent_proto,
+ JSClass *clasp, JSNative constructor, uintN nargs,
+ JSPropertySpec *ps, JSFunctionSpec *fs,
+ JSPropertySpec *static_ps, JSFunctionSpec *static_fs);
+
+#ifdef JS_THREADSAFE
+extern JS_PUBLIC_API(JSClass *)
+JS_GetClass(JSContext *cx, JSObject *obj);
+
+#define JS_GET_CLASS(cx,obj) JS_GetClass(cx, obj)
+#else
+extern JS_PUBLIC_API(JSClass *)
+JS_GetClass(JSObject *obj);
+
+#define JS_GET_CLASS(cx,obj) JS_GetClass(obj)
+#endif
+
+extern JS_PUBLIC_API(JSBool)
+JS_InstanceOf(JSContext *cx, JSObject *obj, JSClass *clasp, jsval *argv);
+
+extern JS_PUBLIC_API(JSBool)
+JS_HasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp);
+
+extern JS_PUBLIC_API(void *)
+JS_GetPrivate(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetPrivate(JSContext *cx, JSObject *obj, void *data);
+
+extern JS_PUBLIC_API(void *)
+JS_GetInstancePrivate(JSContext *cx, JSObject *obj, JSClass *clasp,
+ jsval *argv);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetPrototype(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetPrototype(JSContext *cx, JSObject *obj, JSObject *proto);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetParent(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetParent(JSContext *cx, JSObject *obj, JSObject *parent);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetConstructor(JSContext *cx, JSObject *proto);
+
+/*
+ * Get a unique identifier for obj, good for the lifetime of obj (even if it
+ * is moved by a copying GC). Return false on failure (likely out of memory),
+ * and true with *idp containing the unique id on success.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_GetObjectId(JSContext *cx, JSObject *obj, jsid *idp);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_NewObject(JSContext *cx, JSClass *clasp, JSObject *proto, JSObject *parent);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SealObject(JSContext *cx, JSObject *obj, JSBool deep);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto,
+ JSObject *parent);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_ConstructObjectWithArguments(JSContext *cx, JSClass *clasp, JSObject *proto,
+ JSObject *parent, uintN argc, jsval *argv);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_DefineObject(JSContext *cx, JSObject *obj, const char *name, JSClass *clasp,
+ JSObject *proto, uintN attrs);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefineConstDoubles(JSContext *cx, JSObject *obj, JSConstDoubleSpec *cds);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefineProperties(JSContext *cx, JSObject *obj, JSPropertySpec *ps);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefineProperty(JSContext *cx, JSObject *obj, const char *name, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs);
+
+/*
+ * Determine the attributes (JSPROP_* flags) of a property on a given object.
+ *
+ * If the object does not have a property by that name, *foundp will be
+ * JS_FALSE and the value of *attrsp is undefined.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_GetPropertyAttributes(JSContext *cx, JSObject *obj, const char *name,
+ uintN *attrsp, JSBool *foundp);
+
+/*
+ * The same, but if the property is native, return its getter and setter via
+ * *getterp and *setterp, respectively (and only if the out parameter pointer
+ * is not null).
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_GetPropertyAttrsGetterAndSetter(JSContext *cx, JSObject *obj,
+ const char *name,
+ uintN *attrsp, JSBool *foundp,
+ JSPropertyOp *getterp,
+ JSPropertyOp *setterp);
+
+/*
+ * Set the attributes of a property on a given object.
+ *
+ * If the object does not have a property by that name, *foundp will be
+ * JS_FALSE and nothing will be altered.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_SetPropertyAttributes(JSContext *cx, JSObject *obj, const char *name,
+ uintN attrs, JSBool *foundp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefinePropertyWithTinyId(JSContext *cx, JSObject *obj, const char *name,
+ int8 tinyid, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter,
+ uintN attrs);
+
+extern JS_PUBLIC_API(JSBool)
+JS_AliasProperty(JSContext *cx, JSObject *obj, const char *name,
+ const char *alias);
+
+extern JS_PUBLIC_API(JSBool)
+JS_HasProperty(JSContext *cx, JSObject *obj, const char *name, JSBool *foundp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_LookupProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, const char *name,
+ uintN flags, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetMethodById(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetMethod(JSContext *cx, JSObject *obj, const char *name, JSObject **objp,
+ jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DeleteProperty(JSContext *cx, JSObject *obj, const char *name);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DeleteProperty2(JSContext *cx, JSObject *obj, const char *name,
+ jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefineUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter,
+ uintN attrs);
+
+/*
+ * Determine the attributes (JSPROP_* flags) of a property on a given object.
+ *
+ * If the object does not have a property by that name, *foundp will be
+ * JS_FALSE and the value of *attrsp is undefined.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_GetUCPropertyAttributes(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ uintN *attrsp, JSBool *foundp);
+
+/*
+ * The same, but if the property is native, return its getter and setter via
+ * *getterp and *setterp, respectively (and only if the out parameter pointer
+ * is not null).
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_GetUCPropertyAttrsGetterAndSetter(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ uintN *attrsp, JSBool *foundp,
+ JSPropertyOp *getterp,
+ JSPropertyOp *setterp);
+
+/*
+ * Set the attributes of a property on a given object.
+ *
+ * If the object does not have a property by that name, *foundp will be
+ * JS_FALSE and nothing will be altered.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_SetUCPropertyAttributes(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ uintN attrs, JSBool *foundp);
+
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefineUCPropertyWithTinyId(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ int8 tinyid, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter,
+ uintN attrs);
+
+extern JS_PUBLIC_API(JSBool)
+JS_HasUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ JSBool *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_LookupUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetUCProperty(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DeleteUCProperty2(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen,
+ jsval *rval);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_NewArrayObject(JSContext *cx, jsint length, jsval *vector);
+
+extern JS_PUBLIC_API(JSBool)
+JS_IsArrayObject(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetArrayLength(JSContext *cx, JSObject *obj, jsuint *lengthp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetArrayLength(JSContext *cx, JSObject *obj, jsuint length);
+
+extern JS_PUBLIC_API(JSBool)
+JS_HasArrayLength(JSContext *cx, JSObject *obj, jsuint *lengthp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefineElement(JSContext *cx, JSObject *obj, jsint index, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs);
+
+extern JS_PUBLIC_API(JSBool)
+JS_AliasElement(JSContext *cx, JSObject *obj, const char *name, jsint alias);
+
+extern JS_PUBLIC_API(JSBool)
+JS_HasElement(JSContext *cx, JSObject *obj, jsint index, JSBool *foundp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_LookupElement(JSContext *cx, JSObject *obj, jsint index, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetElement(JSContext *cx, JSObject *obj, jsint index, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetElement(JSContext *cx, JSObject *obj, jsint index, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DeleteElement(JSContext *cx, JSObject *obj, jsint index);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DeleteElement2(JSContext *cx, JSObject *obj, jsint index, jsval *rval);
+
+extern JS_PUBLIC_API(void)
+JS_ClearScope(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSIdArray *)
+JS_Enumerate(JSContext *cx, JSObject *obj);
+
+/*
+ * Create an object to iterate over enumerable properties of obj, in arbitrary
+ * property definition order. NB: This differs from longstanding for..in loop
+ * order, which uses order of property definition in obj.
+ */
+extern JS_PUBLIC_API(JSObject *)
+JS_NewPropertyIterator(JSContext *cx, JSObject *obj);
+
+/*
+ * Return true on success with *idp containing the id of the next enumerable
+ * property to visit using iterobj, or JSVAL_VOID if there is no such property
+ * left to visit. Return false on error.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_NextProperty(JSContext *cx, JSObject *iterobj, jsid *idp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode,
+ jsval *vp, uintN *attrsp);
+
+extern JS_PUBLIC_API(JSCheckAccessOp)
+JS_SetCheckObjectAccessCallback(JSRuntime *rt, JSCheckAccessOp acb);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, jsval v);
+
+/************************************************************************/
+
+/*
+ * Security protocol.
+ */
+struct JSPrincipals {
+ char *codebase;
+
+ /* XXX unspecified and unused by Mozilla code -- can we remove these? */
+ void * (* JS_DLL_CALLBACK getPrincipalArray)(JSContext *cx, JSPrincipals *);
+ JSBool (* JS_DLL_CALLBACK globalPrivilegesEnabled)(JSContext *cx, JSPrincipals *);
+
+ /* Don't call "destroy"; use reference counting macros below. */
+ jsrefcount refcount;
+
+ void (* JS_DLL_CALLBACK destroy)(JSContext *cx, JSPrincipals *);
+ JSBool (* JS_DLL_CALLBACK subsume)(JSPrincipals *, JSPrincipals *);
+};
+
+#ifdef JS_THREADSAFE
+#define JSPRINCIPALS_HOLD(cx, principals) JS_HoldPrincipals(cx,principals)
+#define JSPRINCIPALS_DROP(cx, principals) JS_DropPrincipals(cx,principals)
+
+extern JS_PUBLIC_API(jsrefcount)
+JS_HoldPrincipals(JSContext *cx, JSPrincipals *principals);
+
+extern JS_PUBLIC_API(jsrefcount)
+JS_DropPrincipals(JSContext *cx, JSPrincipals *principals);
+
+#else
+#define JSPRINCIPALS_HOLD(cx, principals) (++(principals)->refcount)
+#define JSPRINCIPALS_DROP(cx, principals) \
+ ((--(principals)->refcount == 0) \
+ ? ((*(principals)->destroy)((cx), (principals)), 0) \
+ : (principals)->refcount)
+#endif
+
+extern JS_PUBLIC_API(JSPrincipalsTranscoder)
+JS_SetPrincipalsTranscoder(JSRuntime *rt, JSPrincipalsTranscoder px);
+
+extern JS_PUBLIC_API(JSObjectPrincipalsFinder)
+JS_SetObjectPrincipalsFinder(JSRuntime *rt, JSObjectPrincipalsFinder fop);
+
+/************************************************************************/
+
+/*
+ * Functions and scripts.
+ */
+extern JS_PUBLIC_API(JSFunction *)
+JS_NewFunction(JSContext *cx, JSNative call, uintN nargs, uintN flags,
+ JSObject *parent, const char *name);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetFunctionObject(JSFunction *fun);
+
+/*
+ * Deprecated, useful only for diagnostics. Use JS_GetFunctionId instead for
+ * anonymous vs. "anonymous" disambiguation and Unicode fidelity.
+ */
+extern JS_PUBLIC_API(const char *)
+JS_GetFunctionName(JSFunction *fun);
+
+/*
+ * Return the function's identifier as a JSString, or null if fun is unnamed.
+ * The returned string lives as long as fun, so you don't need to root a saved
+ * reference to it if fun is well-connected or rooted, and provided you bound
+ * the use of the saved reference by fun's lifetime.
+ *
+ * Prefer JS_GetFunctionId over JS_GetFunctionName because it returns null for
+ * truly anonymous functions, and because it doesn't chop to ISO-Latin-1 chars
+ * from UTF-16-ish jschars.
+ */
+extern JS_PUBLIC_API(JSString *)
+JS_GetFunctionId(JSFunction *fun);
+
+/*
+ * Return JSFUN_* flags for fun.
+ */
+extern JS_PUBLIC_API(uintN)
+JS_GetFunctionFlags(JSFunction *fun);
+
+/*
+ * Return the arity (length) of fun.
+ */
+extern JS_PUBLIC_API(uint16)
+JS_GetFunctionArity(JSFunction *fun);
+
+/*
+ * Infallible predicate to test whether obj is a function object (faster than
+ * comparing obj's class name to "Function", but equivalent unless someone has
+ * overwritten the "Function" identifier with a different constructor and then
+ * created instances using that constructor that might be passed in as obj).
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ObjectIsFunction(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DefineFunctions(JSContext *cx, JSObject *obj, JSFunctionSpec *fs);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_DefineFunction(JSContext *cx, JSObject *obj, const char *name, JSNative call,
+ uintN nargs, uintN attrs);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_DefineUCFunction(JSContext *cx, JSObject *obj,
+ const jschar *name, size_t namelen, JSNative call,
+ uintN nargs, uintN attrs);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_CloneFunctionObject(JSContext *cx, JSObject *funobj, JSObject *parent);
+
+/*
+ * Given a buffer, return JS_FALSE if the buffer might become a valid
+ * javascript statement with the addition of more lines. Otherwise return
+ * JS_TRUE. The intent is to support interactive compilation - accumulate
+ * lines in a buffer until JS_BufferIsCompilableUnit is true, then pass it to
+ * the compiler.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_BufferIsCompilableUnit(JSContext *cx, JSObject *obj,
+ const char *bytes, size_t length);
+
+/*
+ * The JSScript objects returned by the following functions refer to string and
+ * other kinds of literals, including doubles and RegExp objects. These
+ * literals are vulnerable to garbage collection; to root script objects and
+ * prevent literals from being collected, create a rootable object using
+ * JS_NewScriptObject, and root the resulting object using JS_Add[Named]Root.
+ */
+extern JS_PUBLIC_API(JSScript *)
+JS_CompileScript(JSContext *cx, JSObject *obj,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_CompileScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_CompileUCScript(JSContext *cx, JSObject *obj,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_CompileUCScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_CompileFile(JSContext *cx, JSObject *obj, const char *filename);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_CompileFileHandle(JSContext *cx, JSObject *obj, const char *filename,
+ FILE *fh);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_CompileFileHandleForPrincipals(JSContext *cx, JSObject *obj,
+ const char *filename, FILE *fh,
+ JSPrincipals *principals);
+
+/*
+ * NB: you must use JS_NewScriptObject and root a pointer to its return value
+ * in order to keep a JSScript and its atoms safe from garbage collection after
+ * creating the script via JS_Compile* and before a JS_ExecuteScript* call.
+ * E.g., and without error checks:
+ *
+ * JSScript *script = JS_CompileFile(cx, global, filename);
+ * JSObject *scrobj = JS_NewScriptObject(cx, script);
+ * JS_AddNamedRoot(cx, &scrobj, "scrobj");
+ * do {
+ * jsval result;
+ * JS_ExecuteScript(cx, global, script, &result);
+ * JS_GC();
+ * } while (!JSVAL_IS_BOOLEAN(result) || JSVAL_TO_BOOLEAN(result));
+ * JS_RemoveRoot(cx, &scrobj);
+ */
+extern JS_PUBLIC_API(JSObject *)
+JS_NewScriptObject(JSContext *cx, JSScript *script);
+
+/*
+ * Infallible getter for a script's object. If JS_NewScriptObject has not been
+ * called on script yet, the return value will be null.
+ */
+extern JS_PUBLIC_API(JSObject *)
+JS_GetScriptObject(JSScript *script);
+
+extern JS_PUBLIC_API(void)
+JS_DestroyScript(JSContext *cx, JSScript *script);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_CompileFunction(JSContext *cx, JSObject *obj, const char *name,
+ uintN nargs, const char **argnames,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_CompileFunctionForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals, const char *name,
+ uintN nargs, const char **argnames,
+ const char *bytes, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_CompileUCFunction(JSContext *cx, JSObject *obj, const char *name,
+ uintN nargs, const char **argnames,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_CompileUCFunctionForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals, const char *name,
+ uintN nargs, const char **argnames,
+ const jschar *chars, size_t length,
+ const char *filename, uintN lineno);
+
+extern JS_PUBLIC_API(JSString *)
+JS_DecompileScript(JSContext *cx, JSScript *script, const char *name,
+ uintN indent);
+
+/*
+ * API extension: OR this into indent to avoid pretty-printing the decompiled
+ * source resulting from JS_DecompileFunction{,Body}.
+ */
+#define JS_DONT_PRETTY_PRINT ((uintN)0x8000)
+
+extern JS_PUBLIC_API(JSString *)
+JS_DecompileFunction(JSContext *cx, JSFunction *fun, uintN indent);
+
+extern JS_PUBLIC_API(JSString *)
+JS_DecompileFunctionBody(JSContext *cx, JSFunction *fun, uintN indent);
+
+/*
+ * NB: JS_ExecuteScript, JS_ExecuteScriptPart, and the JS_Evaluate*Script*
+ * quadruplets all use the obj parameter as the initial scope chain header,
+ * the 'this' keyword value, and the variables object (ECMA parlance for where
+ * 'var' and 'function' bind names) of the execution context for script.
+ *
+ * Using obj as the variables object is problematic if obj's parent (which is
+ * the scope chain link; see JS_SetParent and JS_NewObject) is not null: in
+ * this case, variables created by 'var x = 0', e.g., go in obj, but variables
+ * created by assignment to an unbound id, 'x = 0', go in the last object on
+ * the scope chain linked by parent.
+ *
+ * ECMA calls that last scoping object the "global object", but note that many
+ * embeddings have several such objects. ECMA requires that "global code" be
+ * executed with the variables object equal to this global object. But these
+ * JS API entry points provide freedom to execute code against a "sub-global",
+ * i.e., a parented or scoped object, in which case the variables object will
+ * differ from the last object on the scope chain, resulting in confusing and
+ * non-ECMA explicit vs. implicit variable creation.
+ *
+ * Caveat embedders: unless you already depend on this buggy variables object
+ * binding behavior, you should call JS_SetOptions(cx, JSOPTION_VAROBJFIX) or
+ * JS_SetOptions(cx, JS_GetOptions(cx) | JSOPTION_VAROBJFIX) -- the latter if
+ * someone may have set other options on cx already -- for each context in the
+ * application, if you pass parented objects as the obj parameter, or may ever
+ * pass such objects in the future.
+ *
+ * Why a runtime option? The alternative is to add six or so new API entry
+ * points with signatures matching the following six, and that doesn't seem
+ * worth the code bloat cost. Such new entry points would probably have less
+ * obvious names, too, so would not tend to be used. The JS_SetOption call,
+ * OTOH, can be more easily hacked into existing code that does not depend on
+ * the bug; such code can continue to use the familiar JS_EvaluateScript,
+ * etc., entry points.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ExecuteScript(JSContext *cx, JSObject *obj, JSScript *script, jsval *rval);
+
+/*
+ * Execute either the function-defining prolog of a script, or the script's
+ * main body, but not both.
+ */
+typedef enum JSExecPart { JSEXEC_PROLOG, JSEXEC_MAIN } JSExecPart;
+
+extern JS_PUBLIC_API(JSBool)
+JS_ExecuteScriptPart(JSContext *cx, JSObject *obj, JSScript *script,
+ JSExecPart part, jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_EvaluateScript(JSContext *cx, JSObject *obj,
+ const char *bytes, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_EvaluateScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const char *bytes, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_EvaluateUCScript(JSContext *cx, JSObject *obj,
+ const jschar *chars, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_EvaluateUCScriptForPrincipals(JSContext *cx, JSObject *obj,
+ JSPrincipals *principals,
+ const jschar *chars, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_CallFunction(JSContext *cx, JSObject *obj, JSFunction *fun, uintN argc,
+ jsval *argv, jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_CallFunctionName(JSContext *cx, JSObject *obj, const char *name, uintN argc,
+ jsval *argv, jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_CallFunctionValue(JSContext *cx, JSObject *obj, jsval fval, uintN argc,
+ jsval *argv, jsval *rval);
+
+extern JS_PUBLIC_API(JSBranchCallback)
+JS_SetBranchCallback(JSContext *cx, JSBranchCallback cb);
+
+extern JS_PUBLIC_API(JSBool)
+JS_IsRunning(JSContext *cx);
+
+extern JS_PUBLIC_API(JSBool)
+JS_IsConstructing(JSContext *cx);
+
+/*
+ * Returns true if a script is executing and its current bytecode is a set
+ * (assignment) operation, even if there are native (no script) stack frames
+ * between the script and the caller to JS_IsAssigning.
+ */
+extern JS_FRIEND_API(JSBool)
+JS_IsAssigning(JSContext *cx);
+
+/*
+ * Set the second return value, which should be a string or int jsval that
+ * identifies a property in the returned object, to form an ECMA reference
+ * type value (obj, id). Only native methods can return reference types,
+ * and if the returned value is used on the left-hand side of an assignment
+ * op, the identified property will be set. If the return value is in an
+ * r-value, the interpreter just gets obj[id]'s value.
+ */
+extern JS_PUBLIC_API(void)
+JS_SetCallReturnValue2(JSContext *cx, jsval v);
+
+/*
+ * Saving and restoring frame chains.
+ *
+ * These two functions are used to set aside cx->fp while that frame is
+ * inactive. After a call to JS_SaveFrameChain, it looks as if there is no
+ * code running on cx. Before calling JS_RestoreFrameChain, cx's call stack
+ * must be balanced and all nested calls to JS_SaveFrameChain must have had
+ * matching JS_RestoreFrameChain calls.
+ *
+ * JS_SaveFrameChain deals with cx not having any code running on it. A null
+ * return does not signify an error and JS_RestoreFrameChain handles null
+ * frames.
+ */
+extern JS_PUBLIC_API(JSStackFrame *)
+JS_SaveFrameChain(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_RestoreFrameChain(JSContext *cx, JSStackFrame *fp);
+
+/************************************************************************/
+
+/*
+ * Strings.
+ *
+ * NB: JS_NewString takes ownership of bytes on success, avoiding a copy; but
+ * on error (signified by null return), it leaves bytes owned by the caller.
+ * So the caller must free bytes in the error case, if it has no use for them.
+ * In contrast, all the JS_New*StringCopy* functions do not take ownership of
+ * the character memory passed to them -- they copy it.
+ */
+extern JS_PUBLIC_API(JSString *)
+JS_NewString(JSContext *cx, char *bytes, size_t length);
+
+extern JS_PUBLIC_API(JSString *)
+JS_NewStringCopyN(JSContext *cx, const char *s, size_t n);
+
+extern JS_PUBLIC_API(JSString *)
+JS_NewStringCopyZ(JSContext *cx, const char *s);
+
+extern JS_PUBLIC_API(JSString *)
+JS_InternString(JSContext *cx, const char *s);
+
+extern JS_PUBLIC_API(JSString *)
+JS_NewUCString(JSContext *cx, jschar *chars, size_t length);
+
+extern JS_PUBLIC_API(JSString *)
+JS_NewUCStringCopyN(JSContext *cx, const jschar *s, size_t n);
+
+extern JS_PUBLIC_API(JSString *)
+JS_NewUCStringCopyZ(JSContext *cx, const jschar *s);
+
+extern JS_PUBLIC_API(JSString *)
+JS_InternUCStringN(JSContext *cx, const jschar *s, size_t length);
+
+extern JS_PUBLIC_API(JSString *)
+JS_InternUCString(JSContext *cx, const jschar *s);
+
+extern JS_PUBLIC_API(char *)
+JS_GetStringBytes(JSString *str);
+
+extern JS_PUBLIC_API(jschar *)
+JS_GetStringChars(JSString *str);
+
+extern JS_PUBLIC_API(size_t)
+JS_GetStringLength(JSString *str);
+
+extern JS_PUBLIC_API(intN)
+JS_CompareStrings(JSString *str1, JSString *str2);
+
+/*
+ * Mutable string support. A string's characters are never mutable in this JS
+ * implementation, but a growable string has a buffer that can be reallocated,
+ * and a dependent string is a substring of another (growable, dependent, or
+ * immutable) string. The direct data members of the (opaque to API clients)
+ * JSString struct may be changed in a single-threaded way for growable and
+ * dependent strings.
+ *
+ * Therefore mutable strings cannot be used by more than one thread at a time.
+ * You may call JS_MakeStringImmutable to convert the string from a mutable
+ * (growable or dependent) string to an immutable (and therefore thread-safe)
+ * string. The engine takes care of converting growable and dependent strings
+ * to immutable for you if you store strings in multi-threaded objects using
+ * JS_SetProperty or kindred API entry points.
+ *
+ * If you store a JSString pointer in a native data structure that is (safely)
+ * accessible to multiple threads, you must call JS_MakeStringImmutable before
+ * retiring the store.
+ */
+extern JS_PUBLIC_API(JSString *)
+JS_NewGrowableString(JSContext *cx, jschar *chars, size_t length);
+
+/*
+ * Create a dependent string, i.e., a string that owns no character storage,
+ * but that refers to a slice of another string's chars. Dependent strings
+ * are mutable by definition, so the thread safety comments above apply.
+ */
+extern JS_PUBLIC_API(JSString *)
+JS_NewDependentString(JSContext *cx, JSString *str, size_t start,
+ size_t length);
+
+/*
+ * Concatenate two strings, resulting in a new growable string. If you create
+ * the left string and pass it to JS_ConcatStrings on a single thread, try to
+ * use JS_NewGrowableString to create the left string -- doing so helps Concat
+ * avoid allocating a new buffer for the result and copying left's chars into
+ * the new buffer. See above for thread safety comments.
+ */
+extern JS_PUBLIC_API(JSString *)
+JS_ConcatStrings(JSContext *cx, JSString *left, JSString *right);
+
+/*
+ * Convert a dependent string into an independent one. This function does not
+ * change the string's mutability, so the thread safety comments above apply.
+ */
+extern JS_PUBLIC_API(const jschar *)
+JS_UndependString(JSContext *cx, JSString *str);
+
+/*
+ * Convert a mutable string (either growable or dependent) into an immutable,
+ * thread-safe one.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_MakeStringImmutable(JSContext *cx, JSString *str);
+
+/*
+ * Return JS_TRUE if C (char []) strings passed via the API and internally
+ * are UTF-8. The source must be compiled with JS_C_STRINGS_ARE_UTF8 defined
+ * to get UTF-8 support.
+ */
+JS_PUBLIC_API(JSBool)
+JS_CStringsAreUTF8();
+
+/*
+ * Character encoding support.
+ *
+ * For both JS_EncodeCharacters and JS_DecodeBytes, set *dstlenp to the size
+ * of the destination buffer before the call; on return, *dstlenp contains the
+ * number of bytes (JS_EncodeCharacters) or jschars (JS_DecodeBytes) actually
+ * stored. To determine the necessary destination buffer size, make a sizing
+ * call that passes NULL for dst.
+ *
+ * On errors, the functions report the error. In that case, *dstlenp contains
+ * the number of characters or bytes transferred so far. If cx is NULL, no
+ * error is reported on failure, and the functions simply return JS_FALSE.
+ *
+ * NB: Neither function stores an additional zero byte or jschar after the
+ * transcoded string.
+ *
+ * If the source has been compiled with the #define JS_C_STRINGS_ARE_UTF8 to
+ * enable UTF-8 interpretation of C char[] strings, then JS_EncodeCharacters
+ * encodes to UTF-8, and JS_DecodeBytes decodes from UTF-8, which may create
+ * addititional errors if the character sequence is malformed. If UTF-8
+ * support is disabled, the functions deflate and inflate, respectively.
+ */
+JS_PUBLIC_API(JSBool)
+JS_EncodeCharacters(JSContext *cx, const jschar *src, size_t srclen, char *dst,
+ size_t *dstlenp);
+
+JS_PUBLIC_API(JSBool)
+JS_DecodeBytes(JSContext *cx, const char *src, size_t srclen, jschar *dst,
+ size_t *dstlenp);
+
+/************************************************************************/
+
+/*
+ * Locale specific string conversion and error message callbacks.
+ */
+struct JSLocaleCallbacks {
+ JSLocaleToUpperCase localeToUpperCase;
+ JSLocaleToLowerCase localeToLowerCase;
+ JSLocaleCompare localeCompare;
+ JSLocaleToUnicode localeToUnicode;
+ JSErrorCallback localeGetErrorMessage;
+};
+
+/*
+ * Establish locale callbacks. The pointer must persist as long as the
+ * JSContext. Passing NULL restores the default behaviour.
+ */
+extern JS_PUBLIC_API(void)
+JS_SetLocaleCallbacks(JSContext *cx, JSLocaleCallbacks *callbacks);
+
+/*
+ * Return the address of the current locale callbacks struct, which may
+ * be NULL.
+ */
+extern JS_PUBLIC_API(JSLocaleCallbacks *)
+JS_GetLocaleCallbacks(JSContext *cx);
+
+/************************************************************************/
+
+/*
+ * Error reporting.
+ */
+
+/*
+ * Report an exception represented by the sprintf-like conversion of format
+ * and its arguments. This exception message string is passed to a pre-set
+ * JSErrorReporter function (set by JS_SetErrorReporter; see jspubtd.h for
+ * the JSErrorReporter typedef).
+ */
+extern JS_PUBLIC_API(void)
+JS_ReportError(JSContext *cx, const char *format, ...);
+
+/*
+ * Use an errorNumber to retrieve the format string, args are char *
+ */
+extern JS_PUBLIC_API(void)
+JS_ReportErrorNumber(JSContext *cx, JSErrorCallback errorCallback,
+ void *userRef, const uintN errorNumber, ...);
+
+/*
+ * Use an errorNumber to retrieve the format string, args are jschar *
+ */
+extern JS_PUBLIC_API(void)
+JS_ReportErrorNumberUC(JSContext *cx, JSErrorCallback errorCallback,
+ void *userRef, const uintN errorNumber, ...);
+
+/*
+ * As above, but report a warning instead (JSREPORT_IS_WARNING(report.flags)).
+ * Return true if there was no error trying to issue the warning, and if the
+ * warning was not converted into an error due to the JSOPTION_WERROR option
+ * being set, false otherwise.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ReportWarning(JSContext *cx, const char *format, ...);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ReportErrorFlagsAndNumber(JSContext *cx, uintN flags,
+ JSErrorCallback errorCallback, void *userRef,
+ const uintN errorNumber, ...);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ReportErrorFlagsAndNumberUC(JSContext *cx, uintN flags,
+ JSErrorCallback errorCallback, void *userRef,
+ const uintN errorNumber, ...);
+
+/*
+ * Complain when out of memory.
+ */
+extern JS_PUBLIC_API(void)
+JS_ReportOutOfMemory(JSContext *cx);
+
+struct JSErrorReport {
+ const char *filename; /* source file name, URL, etc., or null */
+ uintN lineno; /* source line number */
+ const char *linebuf; /* offending source line without final \n */
+ const char *tokenptr; /* pointer to error token in linebuf */
+ const jschar *uclinebuf; /* unicode (original) line buffer */
+ const jschar *uctokenptr; /* unicode (original) token pointer */
+ uintN flags; /* error/warning, etc. */
+ uintN errorNumber; /* the error number, e.g. see js.msg */
+ const jschar *ucmessage; /* the (default) error message */
+ const jschar **messageArgs; /* arguments for the error message */
+};
+
+/*
+ * JSErrorReport flag values. These may be freely composed.
+ */
+#define JSREPORT_ERROR 0x0 /* pseudo-flag for default case */
+#define JSREPORT_WARNING 0x1 /* reported via JS_ReportWarning */
+#define JSREPORT_EXCEPTION 0x2 /* exception was thrown */
+#define JSREPORT_STRICT 0x4 /* error or warning due to strict option */
+
+/*
+ * If JSREPORT_EXCEPTION is set, then a JavaScript-catchable exception
+ * has been thrown for this runtime error, and the host should ignore it.
+ * Exception-aware hosts should also check for JS_IsExceptionPending if
+ * JS_ExecuteScript returns failure, and signal or propagate the exception, as
+ * appropriate.
+ */
+#define JSREPORT_IS_WARNING(flags) (((flags) & JSREPORT_WARNING) != 0)
+#define JSREPORT_IS_EXCEPTION(flags) (((flags) & JSREPORT_EXCEPTION) != 0)
+#define JSREPORT_IS_STRICT(flags) (((flags) & JSREPORT_STRICT) != 0)
+
+extern JS_PUBLIC_API(JSErrorReporter)
+JS_SetErrorReporter(JSContext *cx, JSErrorReporter er);
+
+/************************************************************************/
+
+/*
+ * Regular Expressions.
+ */
+#define JSREG_FOLD 0x01 /* fold uppercase to lowercase */
+#define JSREG_GLOB 0x02 /* global exec, creates array of matches */
+#define JSREG_MULTILINE 0x04 /* treat ^ and $ as begin and end of line */
+
+extern JS_PUBLIC_API(JSObject *)
+JS_NewRegExpObject(JSContext *cx, char *bytes, size_t length, uintN flags);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_NewUCRegExpObject(JSContext *cx, jschar *chars, size_t length, uintN flags);
+
+extern JS_PUBLIC_API(void)
+JS_SetRegExpInput(JSContext *cx, JSString *input, JSBool multiline);
+
+extern JS_PUBLIC_API(void)
+JS_ClearRegExpStatics(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_ClearRegExpRoots(JSContext *cx);
+
+/* TODO: compile, exec, get/set other statics... */
+
+/************************************************************************/
+
+extern JS_PUBLIC_API(JSBool)
+JS_IsExceptionPending(JSContext *cx);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetPendingException(JSContext *cx, jsval *vp);
+
+extern JS_PUBLIC_API(void)
+JS_SetPendingException(JSContext *cx, jsval v);
+
+extern JS_PUBLIC_API(void)
+JS_ClearPendingException(JSContext *cx);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ReportPendingException(JSContext *cx);
+
+/*
+ * Save the current exception state. This takes a snapshot of cx's current
+ * exception state without making any change to that state.
+ *
+ * The returned state pointer MUST be passed later to JS_RestoreExceptionState
+ * (to restore that saved state, overriding any more recent state) or else to
+ * JS_DropExceptionState (to free the state struct in case it is not correct
+ * or desirable to restore it). Both Restore and Drop free the state struct,
+ * so callers must stop using the pointer returned from Save after calling the
+ * Release or Drop API.
+ */
+extern JS_PUBLIC_API(JSExceptionState *)
+JS_SaveExceptionState(JSContext *cx);
+
+extern JS_PUBLIC_API(void)
+JS_RestoreExceptionState(JSContext *cx, JSExceptionState *state);
+
+extern JS_PUBLIC_API(void)
+JS_DropExceptionState(JSContext *cx, JSExceptionState *state);
+
+/*
+ * If the given value is an exception object that originated from an error,
+ * the exception will contain an error report struct, and this API will return
+ * the address of that struct. Otherwise, it returns NULL. The lifetime of
+ * the error report struct that might be returned is the same as the lifetime
+ * of the exception object.
+ */
+extern JS_PUBLIC_API(JSErrorReport *)
+JS_ErrorFromException(JSContext *cx, jsval v);
+
+/*
+ * Given a reported error's message and JSErrorReport struct pointer, throw
+ * the corresponding exception on cx.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_ThrowReportedError(JSContext *cx, const char *message,
+ JSErrorReport *reportp);
+
+#ifdef JS_THREADSAFE
+
+/*
+ * Associate the current thread with the given context. This is done
+ * implicitly by JS_NewContext.
+ *
+ * Returns the old thread id for this context, which should be treated as
+ * an opaque value. This value is provided for comparison to 0, which
+ * indicates that ClearContextThread has been called on this context
+ * since the last SetContextThread, or non-0, which indicates the opposite.
+ */
+extern JS_PUBLIC_API(jsword)
+JS_GetContextThread(JSContext *cx);
+
+extern JS_PUBLIC_API(jsword)
+JS_SetContextThread(JSContext *cx);
+
+extern JS_PUBLIC_API(jsword)
+JS_ClearContextThread(JSContext *cx);
+
+#endif /* JS_THREADSAFE */
+
+/************************************************************************/
+
+JS_END_EXTERN_C
+
+#endif /* jsapi_h___ */
diff --git a/src/third_party/js-1.7/jsarena.c b/src/third_party/js-1.7/jsarena.c
new file mode 100644
index 00000000000..ef6ccd19a90
--- /dev/null
+++ b/src/third_party/js-1.7/jsarena.c
@@ -0,0 +1,502 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * Lifetime-based fast allocation, inspired by much prior art, including
+ * "Fast Allocation and Deallocation of Memory Based on Object Lifetimes"
+ * David R. Hanson, Software -- Practice and Experience, Vol. 20(1).
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsbit.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+
+#ifdef JS_ARENAMETER
+static JSArenaStats *arena_stats_list;
+
+#define COUNT(pool,what) (pool)->stats.what++
+#else
+#define COUNT(pool,what) /* nothing */
+#endif
+
+#define JS_ARENA_DEFAULT_ALIGN sizeof(double)
+
+JS_PUBLIC_API(void)
+JS_InitArenaPool(JSArenaPool *pool, const char *name, size_t size, size_t align)
+{
+ if (align == 0)
+ align = JS_ARENA_DEFAULT_ALIGN;
+ pool->mask = JS_BITMASK(JS_CeilingLog2(align));
+ pool->first.next = NULL;
+ pool->first.base = pool->first.avail = pool->first.limit =
+ JS_ARENA_ALIGN(pool, &pool->first + 1);
+ pool->current = &pool->first;
+ pool->arenasize = size;
+#ifdef JS_ARENAMETER
+ memset(&pool->stats, 0, sizeof pool->stats);
+ pool->stats.name = strdup(name);
+ pool->stats.next = arena_stats_list;
+ arena_stats_list = &pool->stats;
+#endif
+}
+
+/*
+ * An allocation that consumes more than pool->arenasize also has a header
+ * pointing back to its previous arena's next member. This header is not
+ * included in [a->base, a->limit), so its space can't be wrongly claimed.
+ *
+ * As the header is a pointer, it must be well-aligned. If pool->mask is
+ * greater than or equal to POINTER_MASK, the header just preceding a->base
+ * for an oversized arena a is well-aligned, because a->base is well-aligned.
+ * However, we may need to add more space to pad the JSArena ** back-pointer
+ * so that it lies just behind a->base, because a might not be aligned such
+ * that (jsuword)(a + 1) is on a pointer boundary.
+ *
+ * By how much must we pad? Let M be the alignment modulus for pool and P
+ * the modulus for a pointer. Given M >= P, the base of an oversized arena
+ * that satisfies M is well-aligned for P.
+ *
+ * On the other hand, if M < P, we must include enough space in the header
+ * size to align the back-pointer on a P boundary so that it can be found by
+ * subtracting P from a->base. This means a->base must be on a P boundary,
+ * even though subsequent allocations from a may be aligned on a lesser (M)
+ * boundary. Given powers of two M and P as above, the extra space needed
+ * when M < P is P-M or POINTER_MASK - pool->mask.
+ *
+ * The size of a header including padding is given by the HEADER_SIZE macro,
+ * below, for any pool (for any value of M).
+ *
+ * The mask to align a->base for any pool is (pool->mask | POINTER_MASK), or
+ * HEADER_BASE_MASK(pool).
+ *
+ * PTR_TO_HEADER computes the address of the back-pointer, given an oversized
+ * allocation at p. By definition, p must be a->base for the arena a that
+ * contains p. GET_HEADER and SET_HEADER operate on an oversized arena a, in
+ * the case of SET_HEADER with back-pointer ap.
+ */
+#define POINTER_MASK ((jsuword)(JS_ALIGN_OF_POINTER - 1))
+#define HEADER_SIZE(pool) (sizeof(JSArena **) \
+ + (((pool)->mask < POINTER_MASK) \
+ ? POINTER_MASK - (pool)->mask \
+ : 0))
+#define HEADER_BASE_MASK(pool) ((pool)->mask | POINTER_MASK)
+#define PTR_TO_HEADER(pool,p) (JS_ASSERT(((jsuword)(p) \
+ & HEADER_BASE_MASK(pool)) \
+ == 0), \
+ (JSArena ***)(p) - 1)
+#define GET_HEADER(pool,a) (*PTR_TO_HEADER(pool, (a)->base))
+#define SET_HEADER(pool,a,ap) (*PTR_TO_HEADER(pool, (a)->base) = (ap))
+
+JS_PUBLIC_API(void *)
+JS_ArenaAllocate(JSArenaPool *pool, size_t nb)
+{
+ JSArena **ap, *a, *b;
+ jsuword extra, hdrsz, gross;
+ void *p;
+
+ /*
+ * Search pool from current forward till we find or make enough space.
+ *
+ * NB: subtract nb from a->limit in the loop condition, instead of adding
+ * nb to a->avail, to avoid overflowing a 32-bit address space (possible
+ * when running a 32-bit program on a 64-bit system where the kernel maps
+ * the heap up against the top of the 32-bit address space).
+ *
+ * Thanks to Juergen Kreileder <jk@blackdown.de>, who brought this up in
+ * https://bugzilla.mozilla.org/show_bug.cgi?id=279273.
+ */
+ JS_ASSERT((nb & pool->mask) == 0);
+ for (a = pool->current; nb > a->limit || a->avail > a->limit - nb;
+ pool->current = a) {
+ ap = &a->next;
+ if (!*ap) {
+ /* Not enough space in pool, so we must malloc. */
+ extra = (nb > pool->arenasize) ? HEADER_SIZE(pool) : 0;
+ hdrsz = sizeof *a + extra + pool->mask;
+ gross = hdrsz + JS_MAX(nb, pool->arenasize);
+ if (gross < nb)
+ return NULL;
+ b = (JSArena *) malloc(gross);
+ if (!b)
+ return NULL;
+ b->next = NULL;
+ b->limit = (jsuword)b + gross;
+ JS_COUNT_ARENA(pool,++);
+ COUNT(pool, nmallocs);
+
+ /* If oversized, store ap in the header, just before a->base. */
+ *ap = a = b;
+ JS_ASSERT(gross <= JS_UPTRDIFF(a->limit, a));
+ if (extra) {
+ a->base = a->avail =
+ ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
+ SET_HEADER(pool, a, ap);
+ } else {
+ a->base = a->avail = JS_ARENA_ALIGN(pool, a + 1);
+ }
+ continue;
+ }
+ a = *ap; /* move to next arena */
+ }
+
+ p = (void *)a->avail;
+ a->avail += nb;
+ JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
+ return p;
+}
+
+JS_PUBLIC_API(void *)
+JS_ArenaRealloc(JSArenaPool *pool, void *p, size_t size, size_t incr)
+{
+ JSArena **ap, *a, *b;
+ jsuword boff, aoff, extra, hdrsz, gross;
+
+ /*
+ * Use the oversized-single-allocation header to avoid searching for ap.
+ * See JS_ArenaAllocate, the SET_HEADER call.
+ */
+ if (size > pool->arenasize) {
+ ap = *PTR_TO_HEADER(pool, p);
+ a = *ap;
+ } else {
+ ap = &pool->first.next;
+ while ((a = *ap) != pool->current)
+ ap = &a->next;
+ }
+
+ JS_ASSERT(a->base == (jsuword)p);
+ boff = JS_UPTRDIFF(a->base, a);
+ aoff = JS_ARENA_ALIGN(pool, size + incr);
+ JS_ASSERT(aoff > pool->arenasize);
+ extra = HEADER_SIZE(pool); /* oversized header holds ap */
+ hdrsz = sizeof *a + extra + pool->mask; /* header and alignment slop */
+ gross = hdrsz + aoff;
+ JS_ASSERT(gross > aoff);
+ a = (JSArena *) realloc(a, gross);
+ if (!a)
+ return NULL;
+#ifdef JS_ARENAMETER
+ pool->stats.nreallocs++;
+#endif
+
+ if (a != *ap) {
+ /* Oops, realloc moved the allocation: update other pointers to a. */
+ if (pool->current == *ap)
+ pool->current = a;
+ b = a->next;
+ if (b && b->avail - b->base > pool->arenasize) {
+ JS_ASSERT(GET_HEADER(pool, b) == &(*ap)->next);
+ SET_HEADER(pool, b, &a->next);
+ }
+
+ /* Now update *ap, the next link of the arena before a. */
+ *ap = a;
+ }
+
+ a->base = ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
+ a->limit = (jsuword)a + gross;
+ a->avail = a->base + aoff;
+ JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
+
+ /* Check whether realloc aligned differently, and copy if necessary. */
+ if (boff != JS_UPTRDIFF(a->base, a))
+ memmove((void *)a->base, (char *)a + boff, size);
+
+ /* Store ap in the oversized-load arena header. */
+ SET_HEADER(pool, a, ap);
+ return (void *)a->base;
+}
+
+JS_PUBLIC_API(void *)
+JS_ArenaGrow(JSArenaPool *pool, void *p, size_t size, size_t incr)
+{
+ void *newp;
+
+ /*
+ * If p points to an oversized allocation, it owns an entire arena, so we
+ * can simply realloc the arena.
+ */
+ if (size > pool->arenasize)
+ return JS_ArenaRealloc(pool, p, size, incr);
+
+ JS_ARENA_ALLOCATE(newp, pool, size + incr);
+ if (newp)
+ memcpy(newp, p, size);
+ return newp;
+}
+
+/*
+ * Free tail arenas linked after head, which may not be the true list head.
+ * Reset pool->current to point to head in case it pointed at a tail arena.
+ */
+static void
+FreeArenaList(JSArenaPool *pool, JSArena *head)
+{
+ JSArena **ap, *a;
+
+ ap = &head->next;
+ a = *ap;
+ if (!a)
+ return;
+
+#ifdef DEBUG
+ do {
+ JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
+ a->avail = a->base;
+ JS_CLEAR_UNUSED(a);
+ } while ((a = a->next) != NULL);
+ a = *ap;
+#endif
+
+ do {
+ *ap = a->next;
+ JS_CLEAR_ARENA(a);
+ JS_COUNT_ARENA(pool,--);
+ free(a);
+ } while ((a = *ap) != NULL);
+
+ pool->current = head;
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaRelease(JSArenaPool *pool, char *mark)
+{
+ JSArena *a;
+
+ for (a = &pool->first; a; a = a->next) {
+ JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
+
+ if (JS_UPTRDIFF(mark, a->base) <= JS_UPTRDIFF(a->avail, a->base)) {
+ a->avail = JS_ARENA_ALIGN(pool, mark);
+ JS_ASSERT(a->avail <= a->limit);
+ FreeArenaList(pool, a);
+ return;
+ }
+ }
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaFreeAllocation(JSArenaPool *pool, void *p, size_t size)
+{
+ JSArena **ap, *a, *b;
+ jsuword q;
+
+ /*
+ * If the allocation is oversized, it consumes an entire arena, and it has
+ * a header just before the allocation pointing back to its predecessor's
+ * next member. Otherwise, we have to search pool for a.
+ */
+ if (size > pool->arenasize) {
+ ap = *PTR_TO_HEADER(pool, p);
+ a = *ap;
+ } else {
+ q = (jsuword)p + size;
+ q = JS_ARENA_ALIGN(pool, q);
+ ap = &pool->first.next;
+ while ((a = *ap) != NULL) {
+ JS_ASSERT(a->base <= a->avail && a->avail <= a->limit);
+
+ if (a->avail == q) {
+ /*
+ * If a is consumed by the allocation at p, we can free it to
+ * the malloc heap.
+ */
+ if (a->base == (jsuword)p)
+ break;
+
+ /*
+ * We can't free a, but we can "retract" its avail cursor --
+ * whether there are others after it in pool.
+ */
+ a->avail = (jsuword)p;
+ return;
+ }
+ ap = &a->next;
+ }
+ }
+
+ /*
+ * At this point, a is doomed, so ensure that pool->current doesn't point
+ * at it. We must preserve LIFO order of mark/release cursors, so we use
+ * the oversized-allocation arena's back pointer (or if not oversized, we
+ * use the result of searching the entire pool) to compute the address of
+ * the arena that precedes a.
+ */
+ if (pool->current == a)
+ pool->current = (JSArena *) ((char *)ap - offsetof(JSArena, next));
+
+ /*
+ * This is a non-LIFO deallocation, so take care to fix up a->next's back
+ * pointer in its header, if a->next is oversized.
+ */
+ *ap = b = a->next;
+ if (b && b->avail - b->base > pool->arenasize) {
+ JS_ASSERT(GET_HEADER(pool, b) == &a->next);
+ SET_HEADER(pool, b, ap);
+ }
+ JS_CLEAR_ARENA(a);
+ JS_COUNT_ARENA(pool,--);
+ free(a);
+}
+
+JS_PUBLIC_API(void)
+JS_FreeArenaPool(JSArenaPool *pool)
+{
+ FreeArenaList(pool, &pool->first);
+ COUNT(pool, ndeallocs);
+}
+
+JS_PUBLIC_API(void)
+JS_FinishArenaPool(JSArenaPool *pool)
+{
+ FreeArenaList(pool, &pool->first);
+#ifdef JS_ARENAMETER
+ {
+ JSArenaStats *stats, **statsp;
+
+ if (pool->stats.name)
+ free(pool->stats.name);
+ for (statsp = &arena_stats_list; (stats = *statsp) != 0;
+ statsp = &stats->next) {
+ if (stats == &pool->stats) {
+ *statsp = stats->next;
+ return;
+ }
+ }
+ }
+#endif
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaFinish()
+{
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaShutDown(void)
+{
+}
+
+#ifdef JS_ARENAMETER
+JS_PUBLIC_API(void)
+JS_ArenaCountAllocation(JSArenaPool *pool, size_t nb)
+{
+ pool->stats.nallocs++;
+ pool->stats.nbytes += nb;
+ if (nb > pool->stats.maxalloc)
+ pool->stats.maxalloc = nb;
+ pool->stats.variance += nb * nb;
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaCountInplaceGrowth(JSArenaPool *pool, size_t size, size_t incr)
+{
+ pool->stats.ninplace++;
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaCountGrowth(JSArenaPool *pool, size_t size, size_t incr)
+{
+ pool->stats.ngrows++;
+ pool->stats.nbytes += incr;
+ pool->stats.variance -= size * size;
+ size += incr;
+ if (size > pool->stats.maxalloc)
+ pool->stats.maxalloc = size;
+ pool->stats.variance += size * size;
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaCountRelease(JSArenaPool *pool, char *mark)
+{
+ pool->stats.nreleases++;
+}
+
+JS_PUBLIC_API(void)
+JS_ArenaCountRetract(JSArenaPool *pool, char *mark)
+{
+ pool->stats.nfastrels++;
+}
+
+#include <math.h>
+#include <stdio.h>
+
+JS_PUBLIC_API(void)
+JS_DumpArenaStats(FILE *fp)
+{
+ JSArenaStats *stats;
+ uint32 nallocs, nbytes;
+ double mean, variance, sigma;
+
+ for (stats = arena_stats_list; stats; stats = stats->next) {
+ nallocs = stats->nallocs;
+ if (nallocs != 0) {
+ nbytes = stats->nbytes;
+ mean = (double)nbytes / nallocs;
+ variance = stats->variance * nallocs - nbytes * nbytes;
+ if (variance < 0 || nallocs == 1)
+ variance = 0;
+ else
+ variance /= nallocs * (nallocs - 1);
+ sigma = sqrt(variance);
+ } else {
+ mean = variance = sigma = 0;
+ }
+
+ fprintf(fp, "\n%s allocation statistics:\n", stats->name);
+ fprintf(fp, " number of arenas: %u\n", stats->narenas);
+ fprintf(fp, " number of allocations: %u\n", stats->nallocs);
+ fprintf(fp, " number of free arena reclaims: %u\n", stats->nreclaims);
+ fprintf(fp, " number of malloc calls: %u\n", stats->nmallocs);
+ fprintf(fp, " number of deallocations: %u\n", stats->ndeallocs);
+ fprintf(fp, " number of allocation growths: %u\n", stats->ngrows);
+ fprintf(fp, " number of in-place growths: %u\n", stats->ninplace);
+ fprintf(fp, " number of realloc'ing growths: %u\n", stats->nreallocs);
+ fprintf(fp, "number of released allocations: %u\n", stats->nreleases);
+ fprintf(fp, " number of fast releases: %u\n", stats->nfastrels);
+ fprintf(fp, " total bytes allocated: %u\n", stats->nbytes);
+ fprintf(fp, " mean allocation size: %g\n", mean);
+ fprintf(fp, " standard deviation: %g\n", sigma);
+ fprintf(fp, " maximum allocation size: %u\n", stats->maxalloc);
+ }
+}
+#endif /* JS_ARENAMETER */
diff --git a/src/third_party/js-1.7/jsarena.h b/src/third_party/js-1.7/jsarena.h
new file mode 100644
index 00000000000..8be15d0871f
--- /dev/null
+++ b/src/third_party/js-1.7/jsarena.h
@@ -0,0 +1,303 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsarena_h___
+#define jsarena_h___
+/*
+ * Lifetime-based fast allocation, inspired by much prior art, including
+ * "Fast Allocation and Deallocation of Memory Based on Object Lifetimes"
+ * David R. Hanson, Software -- Practice and Experience, Vol. 20(1).
+ *
+ * Also supports LIFO allocation (JS_ARENA_MARK/JS_ARENA_RELEASE).
+ */
+#include <stdlib.h>
+#include "jstypes.h"
+#include "jscompat.h"
+
+JS_BEGIN_EXTERN_C
+
+typedef struct JSArena JSArena;
+typedef struct JSArenaPool JSArenaPool;
+
+struct JSArena {
+ JSArena *next; /* next arena for this lifetime */
+ jsuword base; /* aligned base address, follows this header */
+ jsuword limit; /* one beyond last byte in arena */
+ jsuword avail; /* points to next available byte */
+};
+
+#ifdef JS_ARENAMETER
+typedef struct JSArenaStats JSArenaStats;
+
+struct JSArenaStats {
+ JSArenaStats *next; /* next in arenaStats list */
+ char *name; /* name for debugging */
+ uint32 narenas; /* number of arenas in pool */
+ uint32 nallocs; /* number of JS_ARENA_ALLOCATE() calls */
+ uint32 nmallocs; /* number of malloc() calls */
+ uint32 ndeallocs; /* number of lifetime deallocations */
+ uint32 ngrows; /* number of JS_ARENA_GROW() calls */
+ uint32 ninplace; /* number of in-place growths */
+ uint32 nreallocs; /* number of arena grow extending reallocs */
+ uint32 nreleases; /* number of JS_ARENA_RELEASE() calls */
+ uint32 nfastrels; /* number of "fast path" releases */
+ size_t nbytes; /* total bytes allocated */
+ size_t maxalloc; /* maximum allocation size in bytes */
+ double variance; /* size variance accumulator */
+};
+#endif
+
+struct JSArenaPool {
+ JSArena first; /* first arena in pool list */
+ JSArena *current; /* arena from which to allocate space */
+ size_t arenasize; /* net exact size of a new arena */
+ jsuword mask; /* alignment mask (power-of-2 - 1) */
+#ifdef JS_ARENAMETER
+ JSArenaStats stats;
+#endif
+};
+
+/*
+ * If the including .c file uses only one power-of-2 alignment, it may define
+ * JS_ARENA_CONST_ALIGN_MASK to the alignment mask and save a few instructions
+ * per ALLOCATE and GROW.
+ */
+#ifdef JS_ARENA_CONST_ALIGN_MASK
+#define JS_ARENA_ALIGN(pool, n) (((jsuword)(n) + JS_ARENA_CONST_ALIGN_MASK) \
+ & ~(jsuword)JS_ARENA_CONST_ALIGN_MASK)
+
+#define JS_INIT_ARENA_POOL(pool, name, size) \
+ JS_InitArenaPool(pool, name, size, JS_ARENA_CONST_ALIGN_MASK + 1)
+#else
+#define JS_ARENA_ALIGN(pool, n) (((jsuword)(n) + (pool)->mask) & ~(pool)->mask)
+#endif
+
+#define JS_ARENA_ALLOCATE(p, pool, nb) \
+ JS_ARENA_ALLOCATE_CAST(p, void *, pool, nb)
+
+#define JS_ARENA_ALLOCATE_TYPE(p, type, pool) \
+ JS_ARENA_ALLOCATE_COMMON(p, type *, pool, sizeof(type), 0)
+
+#define JS_ARENA_ALLOCATE_CAST(p, type, pool, nb) \
+ JS_ARENA_ALLOCATE_COMMON(p, type, pool, nb, _nb > _a->limit)
+
+/*
+ * NB: In JS_ARENA_ALLOCATE_CAST and JS_ARENA_GROW_CAST, always subtract _nb
+ * from a->limit rather than adding _nb to _p, to avoid overflowing a 32-bit
+ * address space (possible when running a 32-bit program on a 64-bit system
+ * where the kernel maps the heap up against the top of the 32-bit address
+ * space).
+ *
+ * Thanks to Juergen Kreileder <jk@blackdown.de>, who brought this up in
+ * https://bugzilla.mozilla.org/show_bug.cgi?id=279273.
+ */
+#define JS_ARENA_ALLOCATE_COMMON(p, type, pool, nb, guard) \
+ JS_BEGIN_MACRO \
+ JSArena *_a = (pool)->current; \
+ size_t _nb = JS_ARENA_ALIGN(pool, nb); \
+ jsuword _p = _a->avail; \
+ if ((guard) || _p > _a->limit - _nb) \
+ _p = (jsuword)JS_ArenaAllocate(pool, _nb); \
+ else \
+ _a->avail = _p + _nb; \
+ p = (type) _p; \
+ JS_ArenaCountAllocation(pool, nb); \
+ JS_END_MACRO
+
+#define JS_ARENA_GROW(p, pool, size, incr) \
+ JS_ARENA_GROW_CAST(p, void *, pool, size, incr)
+
+#define JS_ARENA_GROW_CAST(p, type, pool, size, incr) \
+ JS_BEGIN_MACRO \
+ JSArena *_a = (pool)->current; \
+ if (_a->avail == (jsuword)(p) + JS_ARENA_ALIGN(pool, size)) { \
+ size_t _nb = (size) + (incr); \
+ _nb = JS_ARENA_ALIGN(pool, _nb); \
+ if (_a->limit >= _nb && (jsuword)(p) <= _a->limit - _nb) { \
+ _a->avail = (jsuword)(p) + _nb; \
+ JS_ArenaCountInplaceGrowth(pool, size, incr); \
+ } else if ((jsuword)(p) == _a->base) { \
+ p = (type) JS_ArenaRealloc(pool, p, size, incr); \
+ } else { \
+ p = (type) JS_ArenaGrow(pool, p, size, incr); \
+ } \
+ } else { \
+ p = (type) JS_ArenaGrow(pool, p, size, incr); \
+ } \
+ JS_ArenaCountGrowth(pool, size, incr); \
+ JS_END_MACRO
+
+#define JS_ARENA_MARK(pool) ((void *) (pool)->current->avail)
+#define JS_UPTRDIFF(p,q) ((jsuword)(p) - (jsuword)(q))
+
+#ifdef DEBUG
+#define JS_FREE_PATTERN 0xDA
+#define JS_CLEAR_UNUSED(a) (JS_ASSERT((a)->avail <= (a)->limit), \
+ memset((void*)(a)->avail, JS_FREE_PATTERN, \
+ (a)->limit - (a)->avail))
+#define JS_CLEAR_ARENA(a) memset((void*)(a), JS_FREE_PATTERN, \
+ (a)->limit - (jsuword)(a))
+#else
+#define JS_CLEAR_UNUSED(a) /* nothing */
+#define JS_CLEAR_ARENA(a) /* nothing */
+#endif
+
+#define JS_ARENA_RELEASE(pool, mark) \
+ JS_BEGIN_MACRO \
+ char *_m = (char *)(mark); \
+ JSArena *_a = (pool)->current; \
+ if (_a != &(pool)->first && \
+ JS_UPTRDIFF(_m, _a->base) <= JS_UPTRDIFF(_a->avail, _a->base)) { \
+ _a->avail = (jsuword)JS_ARENA_ALIGN(pool, _m); \
+ JS_ASSERT(_a->avail <= _a->limit); \
+ JS_CLEAR_UNUSED(_a); \
+ JS_ArenaCountRetract(pool, _m); \
+ } else { \
+ JS_ArenaRelease(pool, _m); \
+ } \
+ JS_ArenaCountRelease(pool, _m); \
+ JS_END_MACRO
+
+#ifdef JS_ARENAMETER
+#define JS_COUNT_ARENA(pool,op) ((pool)->stats.narenas op)
+#else
+#define JS_COUNT_ARENA(pool,op)
+#endif
+
+#define JS_ARENA_DESTROY(pool, a, pnext) \
+ JS_BEGIN_MACRO \
+ JS_COUNT_ARENA(pool,--); \
+ if ((pool)->current == (a)) (pool)->current = &(pool)->first; \
+ *(pnext) = (a)->next; \
+ JS_CLEAR_ARENA(a); \
+ free(a); \
+ (a) = NULL; \
+ JS_END_MACRO
+
+/*
+ * Initialize an arena pool with the given name for debugging and metering,
+ * with a minimum size per arena of size bytes.
+ */
+extern JS_PUBLIC_API(void)
+JS_InitArenaPool(JSArenaPool *pool, const char *name, size_t size,
+ size_t align);
+
+/*
+ * Free the arenas in pool. The user may continue to allocate from pool
+ * after calling this function. There is no need to call JS_InitArenaPool()
+ * again unless JS_FinishArenaPool(pool) has been called.
+ */
+extern JS_PUBLIC_API(void)
+JS_FreeArenaPool(JSArenaPool *pool);
+
+/*
+ * Free the arenas in pool and finish using it altogether.
+ */
+extern JS_PUBLIC_API(void)
+JS_FinishArenaPool(JSArenaPool *pool);
+
+/*
+ * Deprecated do-nothing function.
+ */
+extern JS_PUBLIC_API(void)
+JS_ArenaFinish(void);
+
+/*
+ * Deprecated do-nothing function.
+ */
+extern JS_PUBLIC_API(void)
+JS_ArenaShutDown(void);
+
+/*
+ * Friend functions used by the JS_ARENA_*() macros.
+ */
+extern JS_PUBLIC_API(void *)
+JS_ArenaAllocate(JSArenaPool *pool, size_t nb);
+
+extern JS_PUBLIC_API(void *)
+JS_ArenaRealloc(JSArenaPool *pool, void *p, size_t size, size_t incr);
+
+extern JS_PUBLIC_API(void *)
+JS_ArenaGrow(JSArenaPool *pool, void *p, size_t size, size_t incr);
+
+extern JS_PUBLIC_API(void)
+JS_ArenaRelease(JSArenaPool *pool, char *mark);
+
+/*
+ * Function to be used directly when an allocation has likely grown to consume
+ * an entire JSArena, in which case the arena is returned to the malloc heap.
+ */
+extern JS_PUBLIC_API(void)
+JS_ArenaFreeAllocation(JSArenaPool *pool, void *p, size_t size);
+
+#ifdef JS_ARENAMETER
+
+#include <stdio.h>
+
+extern JS_PUBLIC_API(void)
+JS_ArenaCountAllocation(JSArenaPool *pool, size_t nb);
+
+extern JS_PUBLIC_API(void)
+JS_ArenaCountInplaceGrowth(JSArenaPool *pool, size_t size, size_t incr);
+
+extern JS_PUBLIC_API(void)
+JS_ArenaCountGrowth(JSArenaPool *pool, size_t size, size_t incr);
+
+extern JS_PUBLIC_API(void)
+JS_ArenaCountRelease(JSArenaPool *pool, char *mark);
+
+extern JS_PUBLIC_API(void)
+JS_ArenaCountRetract(JSArenaPool *pool, char *mark);
+
+extern JS_PUBLIC_API(void)
+JS_DumpArenaStats(FILE *fp);
+
+#else /* !JS_ARENAMETER */
+
+#define JS_ArenaCountAllocation(ap, nb) /* nothing */
+#define JS_ArenaCountInplaceGrowth(ap, size, incr) /* nothing */
+#define JS_ArenaCountGrowth(ap, size, incr) /* nothing */
+#define JS_ArenaCountRelease(ap, mark) /* nothing */
+#define JS_ArenaCountRetract(ap, mark) /* nothing */
+
+#endif /* !JS_ARENAMETER */
+
+JS_END_EXTERN_C
+
+#endif /* jsarena_h___ */
diff --git a/src/third_party/js-1.7/jsarray.c b/src/third_party/js-1.7/jsarray.c
new file mode 100644
index 00000000000..532a1be389e
--- /dev/null
+++ b/src/third_party/js-1.7/jsarray.c
@@ -0,0 +1,1864 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set sw=4 ts=8 et tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS array class.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsstr.h"
+
+/* 2^32 - 1 as a number and a string */
+#define MAXINDEX 4294967295u
+#define MAXSTR "4294967295"
+
+/*
+ * Determine if the id represents an array index or an XML property index.
+ *
+ * An id is an array index according to ECMA by (15.4):
+ *
+ * "Array objects give special treatment to a certain class of property names.
+ * A property name P (in the form of a string value) is an array index if and
+ * only if ToString(ToUint32(P)) is equal to P and ToUint32(P) is not equal
+ * to 2^32-1."
+ *
+ * In our implementation, it would be sufficient to check for JSVAL_IS_INT(id)
+ * except that by using signed 32-bit integers we miss the top half of the
+ * valid range. This function checks the string representation itself; note
+ * that calling a standard conversion routine might allow strings such as
+ * "08" or "4.0" as array indices, which they are not.
+ */
+JSBool
+js_IdIsIndex(jsval id, jsuint *indexp)
+{
+ JSString *str;
+ jschar *cp;
+
+ if (JSVAL_IS_INT(id)) {
+ jsint i;
+ i = JSVAL_TO_INT(id);
+ if (i < 0)
+ return JS_FALSE;
+ *indexp = (jsuint)i;
+ return JS_TRUE;
+ }
+
+ /* NB: id should be a string, but jsxml.c may call us with an object id. */
+ if (!JSVAL_IS_STRING(id))
+ return JS_FALSE;
+
+ str = JSVAL_TO_STRING(id);
+ cp = JSSTRING_CHARS(str);
+ if (JS7_ISDEC(*cp) && JSSTRING_LENGTH(str) < sizeof(MAXSTR)) {
+ jsuint index = JS7_UNDEC(*cp++);
+ jsuint oldIndex = 0;
+ jsuint c = 0;
+ if (index != 0) {
+ while (JS7_ISDEC(*cp)) {
+ oldIndex = index;
+ c = JS7_UNDEC(*cp);
+ index = 10*index + c;
+ cp++;
+ }
+ }
+
+ /* Ensure that all characters were consumed and we didn't overflow. */
+ if (*cp == 0 &&
+ (oldIndex < (MAXINDEX / 10) ||
+ (oldIndex == (MAXINDEX / 10) && c < (MAXINDEX % 10))))
+ {
+ *indexp = index;
+ return JS_TRUE;
+ }
+ }
+ return JS_FALSE;
+}
+
+static JSBool
+ValueIsLength(JSContext *cx, jsval v, jsuint *lengthp)
+{
+ jsint i;
+ jsdouble d;
+
+ if (JSVAL_IS_INT(v)) {
+ i = JSVAL_TO_INT(v);
+ if (i < 0) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_ARRAY_LENGTH);
+ return JS_FALSE;
+ }
+ *lengthp = (jsuint) i;
+ return JS_TRUE;
+ }
+
+ if (!js_ValueToNumber(cx, v, &d)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_ARRAY_LENGTH);
+ return JS_FALSE;
+ }
+ if (!js_DoubleToECMAUint32(cx, d, (uint32 *)lengthp)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_ARRAY_LENGTH);
+ return JS_FALSE;
+ }
+ if (JSDOUBLE_IS_NaN(d) || d != *lengthp) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_ARRAY_LENGTH);
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+JSBool
+js_GetLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp)
+{
+ JSTempValueRooter tvr;
+ jsid id;
+ JSBool ok;
+ jsint i;
+
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, JSVAL_NULL, &tvr);
+ id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom);
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &tvr.u.value);
+ if (ok) {
+ /*
+ * Short-circuit, because js_ValueToECMAUint32 fails when called
+ * during init time.
+ */
+ if (JSVAL_IS_INT(tvr.u.value)) {
+ i = JSVAL_TO_INT(tvr.u.value);
+ *lengthp = (jsuint)i; /* jsuint cast does ToUint32 */
+ } else {
+ ok = js_ValueToECMAUint32(cx, tvr.u.value, (uint32 *)lengthp);
+ }
+ }
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+}
+
+static JSBool
+IndexToValue(JSContext *cx, jsuint index, jsval *vp)
+{
+ if (index <= JSVAL_INT_MAX) {
+ *vp = INT_TO_JSVAL(index);
+ return JS_TRUE;
+ }
+ return js_NewDoubleValue(cx, (jsdouble)index, vp);
+}
+
+static JSBool
+BigIndexToId(JSContext *cx, JSObject *obj, jsuint index, JSBool createAtom,
+ jsid *idp)
+{
+ jschar buf[10], *start;
+ JSClass *clasp;
+ JSAtom *atom;
+ JS_STATIC_ASSERT((jsuint)-1 == 4294967295U);
+
+ JS_ASSERT(index > JSVAL_INT_MAX);
+
+ start = JS_ARRAY_END(buf);
+ do {
+ --start;
+ *start = (jschar)('0' + index % 10);
+ index /= 10;
+ } while (index != 0);
+
+ /*
+ * Skip the atomization if the class is known to store atoms corresponding
+ * to big indexes together with elements. In such case we know that the
+ * array does not have an element at the given index if its atom does not
+ * exist.
+ */
+ if (!createAtom &&
+ ((clasp = OBJ_GET_CLASS(cx, obj)) == &js_ArrayClass ||
+ clasp == &js_ArgumentsClass ||
+ clasp == &js_ObjectClass)) {
+ atom = js_GetExistingStringAtom(cx, start, JS_ARRAY_END(buf) - start);
+ if (!atom) {
+ *idp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+ } else {
+ atom = js_AtomizeChars(cx, start, JS_ARRAY_END(buf) - start, 0);
+ if (!atom)
+ return JS_FALSE;
+ }
+
+ *idp = ATOM_TO_JSID(atom);
+ return JS_TRUE;
+}
+
+/*
+ * If the property at the given index exists, get its value into location
+ * pointed by vp and set *hole to false. Otherwise set *hole to true and *vp
+ * to JSVAL_VOID. This function assumes that the location pointed by vp is
+ * properly rooted and can be used as GC-protected storage for temporaries.
+ */
+static JSBool
+GetArrayElement(JSContext *cx, JSObject *obj, jsuint index, JSBool *hole,
+ jsval *vp)
+{
+ jsid id;
+ JSObject *obj2;
+ JSProperty *prop;
+
+ if (index <= JSVAL_INT_MAX) {
+ id = INT_TO_JSID(index);
+ } else {
+ if (!BigIndexToId(cx, obj, index, JS_FALSE, &id))
+ return JS_FALSE;
+ if (id == JSVAL_VOID) {
+ *hole = JS_TRUE;
+ *vp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+ }
+
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ *hole = JS_TRUE;
+ *vp = JSVAL_VOID;
+ } else {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ if (!OBJ_GET_PROPERTY(cx, obj, id, vp))
+ return JS_FALSE;
+ *hole = JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+/*
+ * Set the value of the property at the given index to v assuming v is rooted.
+ */
+static JSBool
+SetArrayElement(JSContext *cx, JSObject *obj, jsuint index, jsval v)
+{
+ jsid id;
+
+ if (index <= JSVAL_INT_MAX) {
+ id = INT_TO_JSID(index);
+ } else {
+ if (!BigIndexToId(cx, obj, index, JS_TRUE, &id))
+ return JS_FALSE;
+ JS_ASSERT(id != JSVAL_VOID);
+ }
+ return OBJ_SET_PROPERTY(cx, obj, id, &v);
+}
+
+static JSBool
+DeleteArrayElement(JSContext *cx, JSObject *obj, jsuint index)
+{
+ jsid id;
+ jsval junk;
+
+ if (index <= JSVAL_INT_MAX) {
+ id = INT_TO_JSID(index);
+ } else {
+ if (!BigIndexToId(cx, obj, index, JS_FALSE, &id))
+ return JS_FALSE;
+ if (id == JSVAL_VOID)
+ return JS_TRUE;
+ }
+ return OBJ_DELETE_PROPERTY(cx, obj, id, &junk);
+}
+
+/*
+ * When hole is true, delete the property at the given index. Otherwise set
+ * its value to v assuming v is rooted.
+ */
+static JSBool
+SetOrDeleteArrayElement(JSContext *cx, JSObject *obj, jsuint index,
+ JSBool hole, jsval v)
+{
+ if (hole) {
+ JS_ASSERT(v == JSVAL_VOID);
+ return DeleteArrayElement(cx, obj, index);
+ } else {
+ return SetArrayElement(cx, obj, index, v);
+ }
+}
+
+
+JSBool
+js_SetLengthProperty(JSContext *cx, JSObject *obj, jsuint length)
+{
+ jsval v;
+ jsid id;
+
+ if (!IndexToValue(cx, length, &v))
+ return JS_FALSE;
+ id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom);
+ return OBJ_SET_PROPERTY(cx, obj, id, &v);
+}
+
+JSBool
+js_HasLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp)
+{
+ JSErrorReporter older;
+ JSTempValueRooter tvr;
+ jsid id;
+ JSBool ok;
+
+ older = JS_SetErrorReporter(cx, NULL);
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, JSVAL_NULL, &tvr);
+ id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom);
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &tvr.u.value);
+ JS_SetErrorReporter(cx, older);
+ if (ok)
+ ok = ValueIsLength(cx, tvr.u.value, lengthp);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+}
+
+JSBool
+js_IsArrayLike(JSContext *cx, JSObject *obj, JSBool *answerp, jsuint *lengthp)
+{
+ JSClass *clasp;
+
+ clasp = OBJ_GET_CLASS(cx, obj);
+ *answerp = (clasp == &js_ArgumentsClass || clasp == &js_ArrayClass);
+ if (!*answerp) {
+ *lengthp = 0;
+ return JS_TRUE;
+ }
+ return js_GetLengthProperty(cx, obj, lengthp);
+}
+
+/*
+ * This get function is specific to Array.prototype.length and other array
+ * instance length properties. It calls back through the class get function
+ * in case some magic happens there (see call_getProperty in jsfun.c).
+ */
+static JSBool
+array_length_getter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ return OBJ_GET_CLASS(cx, obj)->getProperty(cx, obj, id, vp);
+}
+
+static JSBool
+array_length_setter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsuint newlen, oldlen, gap, index;
+ jsid id2;
+ jsval junk;
+ JSObject *iter;
+ JSTempValueRooter tvr;
+ JSBool ok;
+
+ if (!ValueIsLength(cx, *vp, &newlen))
+ return JS_FALSE;
+ if (!js_GetLengthProperty(cx, obj, &oldlen))
+ return JS_FALSE;
+ if (oldlen > newlen) {
+ if (oldlen - newlen < (1 << 24)) {
+ do {
+ --oldlen;
+ if (!DeleteArrayElement(cx, obj, oldlen))
+ return JS_FALSE;
+ } while (oldlen != newlen);
+ } else {
+ /*
+ * We are going to remove a lot of indexes in a presumably sparse
+ * array. So instead of looping through indexes between newlen and
+ * oldlen, we iterate through all properties and remove those that
+ * correspond to indexes from the [newlen, oldlen) range.
+ * See bug 322135.
+ */
+ iter = JS_NewPropertyIterator(cx, obj);
+ if (!iter)
+ return JS_FALSE;
+
+ /* Protect iter against GC in OBJ_DELETE_PROPERTY. */
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, iter, &tvr);
+ gap = oldlen - newlen;
+ for (;;) {
+ ok = JS_NextProperty(cx, iter, &id2);
+ if (!ok)
+ break;
+ if (id2 == JSVAL_VOID)
+ break;
+ if (js_IdIsIndex(id2, &index) && index - newlen < gap) {
+ ok = OBJ_DELETE_PROPERTY(cx, obj, id2, &junk);
+ if (!ok)
+ break;
+ }
+ }
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ if (!ok)
+ return JS_FALSE;
+ }
+ }
+ return IndexToValue(cx, newlen, vp);
+}
+
+static JSBool
+array_addProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsuint index, length;
+
+ if (!js_IdIsIndex(id, &index))
+ return JS_TRUE;
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+ if (index >= length) {
+ length = index + 1;
+ return js_SetLengthProperty(cx, obj, length);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+array_convert(JSContext *cx, JSObject *obj, JSType type, jsval *vp)
+{
+ return js_TryValueOf(cx, obj, type, vp);
+}
+
+JSClass js_ArrayClass = {
+ "Array",
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Array),
+ array_addProperty, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, array_convert, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+enum ArrayToStringOp {
+ TO_STRING,
+ TO_LOCALE_STRING,
+ TO_SOURCE
+};
+
+/*
+ * When op is TO_STRING or TO_LOCALE_STRING sep indicates a separator to use
+ * or "," when sep is NULL.
+ * When op is TO_SOURCE sep must be NULL.
+ */
+static JSBool
+array_join_sub(JSContext *cx, JSObject *obj, enum ArrayToStringOp op,
+ JSString *sep, jsval *rval)
+{
+ JSBool ok, hole;
+ jsuint length, index;
+ jschar *chars, *ochars;
+ size_t nchars, growth, seplen, tmplen, extratail;
+ const jschar *sepstr;
+ JSString *str;
+ JSHashEntry *he;
+ JSTempValueRooter tvr;
+ JSAtom *atom;
+ int stackDummy;
+
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ return JS_FALSE;
+ }
+
+ ok = js_GetLengthProperty(cx, obj, &length);
+ if (!ok)
+ return JS_FALSE;
+
+ he = js_EnterSharpObject(cx, obj, NULL, &chars);
+ if (!he)
+ return JS_FALSE;
+#ifdef DEBUG
+ growth = (size_t) -1;
+#endif
+
+ if (op == TO_SOURCE) {
+ if (IS_SHARP(he)) {
+#if JS_HAS_SHARP_VARS
+ nchars = js_strlen(chars);
+#else
+ chars[0] = '[';
+ chars[1] = ']';
+ chars[2] = 0;
+ nchars = 2;
+#endif
+ goto make_string;
+ }
+
+ /*
+ * Always allocate 2 extra chars for closing ']' and terminating 0
+ * and then preallocate 1 + extratail to include starting '['.
+ */
+ extratail = 2;
+ growth = (1 + extratail) * sizeof(jschar);
+ if (!chars) {
+ nchars = 0;
+ chars = (jschar *) malloc(growth);
+ if (!chars)
+ goto done;
+ } else {
+ MAKE_SHARP(he);
+ nchars = js_strlen(chars);
+ growth += nchars * sizeof(jschar);
+ chars = (jschar *)realloc((ochars = chars), growth);
+ if (!chars) {
+ free(ochars);
+ goto done;
+ }
+ }
+ chars[nchars++] = '[';
+ JS_ASSERT(sep == NULL);
+ sepstr = NULL; /* indicates to use ", " as separator */
+ seplen = 2;
+ } else {
+ /*
+ * Free any sharp variable definition in chars. Normally, we would
+ * MAKE_SHARP(he) so that only the first sharp variable annotation is
+ * a definition, and all the rest are references, but in the current
+ * case of (op != TO_SOURCE), we don't need chars at all.
+ */
+ if (chars)
+ JS_free(cx, chars);
+ chars = NULL;
+ nchars = 0;
+ extratail = 1; /* allocate extra char for terminating 0 */
+
+ /* Return the empty string on a cycle as well as on empty join. */
+ if (IS_BUSY(he) || length == 0) {
+ js_LeaveSharpObject(cx, NULL);
+ *rval = JS_GetEmptyStringValue(cx);
+ return ok;
+ }
+
+ /* Flag he as BUSY so we can distinguish a cycle from a join-point. */
+ MAKE_BUSY(he);
+
+ if (sep) {
+ sepstr = JSSTRING_CHARS(sep);
+ seplen = JSSTRING_LENGTH(sep);
+ } else {
+ sepstr = NULL; /* indicates to use "," as separator */
+ seplen = 1;
+ }
+ }
+
+ /* Use rval to locally root each element value as we loop and convert. */
+#define v (*rval)
+
+ for (index = 0; index < length; index++) {
+ ok = GetArrayElement(cx, obj, index, &hole, &v);
+ if (!ok)
+ goto done;
+ if (hole ||
+ (op != TO_SOURCE && (JSVAL_IS_VOID(v) || JSVAL_IS_NULL(v)))) {
+ str = cx->runtime->emptyString;
+ } else {
+ if (op == TO_LOCALE_STRING) {
+ atom = cx->runtime->atomState.toLocaleStringAtom;
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, NULL, &tvr);
+ ok = js_ValueToObject(cx, v, &tvr.u.object) &&
+ js_TryMethod(cx, tvr.u.object, atom, 0, NULL, &v);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ if (!ok)
+ goto done;
+ str = js_ValueToString(cx, v);
+ } else if (op == TO_STRING) {
+ str = js_ValueToString(cx, v);
+ } else {
+ JS_ASSERT(op == TO_SOURCE);
+ str = js_ValueToSource(cx, v);
+ }
+ if (!str) {
+ ok = JS_FALSE;
+ goto done;
+ }
+ }
+
+ /*
+ * Do not append separator after the last element unless it is a hole
+ * and we are in toSource. In that case we append single ",".
+ */
+ if (index + 1 == length)
+ seplen = (hole && op == TO_SOURCE) ? 1 : 0;
+
+ /* Allocate 1 at end for closing bracket and zero. */
+ tmplen = JSSTRING_LENGTH(str);
+ growth = nchars + tmplen + seplen + extratail;
+ if (nchars > growth || tmplen > growth ||
+ growth > (size_t)-1 / sizeof(jschar)) {
+ if (chars) {
+ free(chars);
+ chars = NULL;
+ }
+ goto done;
+ }
+ growth *= sizeof(jschar);
+ if (!chars) {
+ chars = (jschar *) malloc(growth);
+ if (!chars)
+ goto done;
+ } else {
+ chars = (jschar *) realloc((ochars = chars), growth);
+ if (!chars) {
+ free(ochars);
+ goto done;
+ }
+ }
+
+ js_strncpy(&chars[nchars], JSSTRING_CHARS(str), tmplen);
+ nchars += tmplen;
+
+ if (seplen) {
+ if (sepstr) {
+ js_strncpy(&chars[nchars], sepstr, seplen);
+ } else {
+ JS_ASSERT(seplen == 1 || seplen == 2);
+ chars[nchars] = ',';
+ if (seplen == 2)
+ chars[nchars + 1] = ' ';
+ }
+ nchars += seplen;
+ }
+ }
+
+ done:
+ if (op == TO_SOURCE) {
+ if (chars)
+ chars[nchars++] = ']';
+ } else {
+ CLEAR_BUSY(he);
+ }
+ js_LeaveSharpObject(cx, NULL);
+ if (!ok) {
+ if (chars)
+ free(chars);
+ return ok;
+ }
+
+#undef v
+
+ make_string:
+ if (!chars) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ chars[nchars] = 0;
+ JS_ASSERT(growth == (size_t)-1 || (nchars + 1) * sizeof(jschar) == growth);
+ str = js_NewString(cx, chars, nchars, 0);
+ if (!str) {
+ free(chars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+#if JS_HAS_TOSOURCE
+static JSBool
+array_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_join_sub(cx, obj, TO_SOURCE, NULL, rval);
+}
+#endif
+
+static JSBool
+array_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_join_sub(cx, obj, TO_STRING, NULL, rval);
+}
+
+static JSBool
+array_toLocaleString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ /*
+ * Passing comma here as the separator. Need a way to get a
+ * locale-specific version.
+ */
+ return array_join_sub(cx, obj, TO_LOCALE_STRING, NULL, rval);
+}
+
+static JSBool
+InitArrayElements(JSContext *cx, JSObject *obj, jsuint start, jsuint end,
+ jsval *vector)
+{
+ while (start != end) {
+ if (!SetArrayElement(cx, obj, start++, *vector++))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+InitArrayObject(JSContext *cx, JSObject *obj, jsuint length, jsval *vector)
+{
+ jsval v;
+ jsid id;
+
+ if (!IndexToValue(cx, length, &v))
+ return JS_FALSE;
+ id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom);
+ if (!OBJ_DEFINE_PROPERTY(cx, obj, id, v,
+ array_length_getter, array_length_setter,
+ JSPROP_PERMANENT,
+ NULL)) {
+ return JS_FALSE;
+ }
+ if (!vector)
+ return JS_TRUE;
+ return InitArrayElements(cx, obj, 0, length, vector);
+}
+
+/*
+ * Perl-inspired join, reverse, and sort.
+ */
+static JSBool
+array_join(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+
+ if (JSVAL_IS_VOID(argv[0])) {
+ str = NULL;
+ } else {
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+ }
+ return array_join_sub(cx, obj, TO_STRING, str, rval);
+}
+
+static JSBool
+array_reverse(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsuint len, half, i;
+ JSBool hole, hole2;
+ jsval *tmproot, *tmproot2;
+
+ if (!js_GetLengthProperty(cx, obj, &len))
+ return JS_FALSE;
+
+ /*
+ * Use argv[argc] and argv[argc + 1] as local roots to hold temporarily
+ * array elements for GC-safe swap.
+ */
+ tmproot = argv + argc;
+ tmproot2 = argv + argc + 1;
+ half = len / 2;
+ for (i = 0; i < half; i++) {
+ if (!GetArrayElement(cx, obj, i, &hole, tmproot) ||
+ !GetArrayElement(cx, obj, len - i - 1, &hole2, tmproot2) ||
+ !SetOrDeleteArrayElement(cx, obj, len - i - 1, hole, *tmproot) ||
+ !SetOrDeleteArrayElement(cx, obj, i, hole2, *tmproot2)) {
+ return JS_FALSE;
+ }
+ }
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+typedef struct HSortArgs {
+ void *vec;
+ size_t elsize;
+ void *pivot;
+ JSComparator cmp;
+ void *arg;
+ JSBool fastcopy;
+} HSortArgs;
+
+static JSBool
+sort_compare(void *arg, const void *a, const void *b, int *result);
+
+static int
+sort_compare_strings(void *arg, const void *a, const void *b, int *result);
+
+static JSBool
+HeapSortHelper(JSBool building, HSortArgs *hsa, size_t lo, size_t hi)
+{
+ void *pivot, *vec, *vec2, *arg, *a, *b;
+ size_t elsize;
+ JSComparator cmp;
+ JSBool fastcopy;
+ size_t j, hiDiv2;
+ int cmp_result;
+
+ pivot = hsa->pivot;
+ vec = hsa->vec;
+ elsize = hsa->elsize;
+ vec2 = (char *)vec - 2 * elsize;
+ cmp = hsa->cmp;
+ arg = hsa->arg;
+
+ fastcopy = hsa->fastcopy;
+#define MEMCPY(p,q,n) \
+ (fastcopy ? (void)(*(jsval*)(p) = *(jsval*)(q)) : (void)memcpy(p, q, n))
+#define CALL_CMP(a, b) \
+ if (!cmp(arg, (a), (b), &cmp_result)) return JS_FALSE;
+
+ if (lo == 1) {
+ j = 2;
+ b = (char *)vec + elsize;
+ if (j < hi) {
+ CALL_CMP(vec, b);
+ if (cmp_result < 0)
+ j++;
+ }
+ a = (char *)vec + (hi - 1) * elsize;
+ b = (char *)vec2 + j * elsize;
+
+ /*
+ * During sorting phase b points to a member of heap that cannot be
+ * bigger then biggest of vec[0] and vec[1], and cmp(a, b, arg) <= 0
+ * always holds.
+ */
+ if (building || hi == 2) {
+ CALL_CMP(a, b);
+ if (cmp_result >= 0)
+ return JS_TRUE;
+ }
+
+ MEMCPY(pivot, a, elsize);
+ MEMCPY(a, b, elsize);
+ lo = j;
+ } else {
+ a = (char *)vec2 + lo * elsize;
+ MEMCPY(pivot, a, elsize);
+ }
+
+ hiDiv2 = hi/2;
+ while (lo <= hiDiv2) {
+ j = lo + lo;
+ a = (char *)vec2 + j * elsize;
+ b = (char *)vec + (j - 1) * elsize;
+ if (j < hi) {
+ CALL_CMP(a, b);
+ if (cmp_result < 0)
+ j++;
+ }
+ b = (char *)vec2 + j * elsize;
+ CALL_CMP(pivot, b);
+ if (cmp_result >= 0)
+ break;
+
+ a = (char *)vec2 + lo * elsize;
+ MEMCPY(a, b, elsize);
+ lo = j;
+ }
+
+ a = (char *)vec2 + lo * elsize;
+ MEMCPY(a, pivot, elsize);
+
+ return JS_TRUE;
+
+#undef CALL_CMP
+#undef MEMCPY
+
+}
+
+JSBool
+js_HeapSort(void *vec, size_t nel, void *pivot, size_t elsize,
+ JSComparator cmp, void *arg)
+{
+ HSortArgs hsa;
+ size_t i;
+
+ hsa.vec = vec;
+ hsa.elsize = elsize;
+ hsa.pivot = pivot;
+ hsa.cmp = cmp;
+ hsa.arg = arg;
+ hsa.fastcopy = (cmp == sort_compare || cmp == sort_compare_strings);
+
+ for (i = nel/2; i != 0; i--) {
+ if (!HeapSortHelper(JS_TRUE, &hsa, i, nel))
+ return JS_FALSE;
+ }
+ while (nel > 2) {
+ if (!HeapSortHelper(JS_FALSE, &hsa, 1, --nel))
+ return JS_FALSE;
+ }
+
+ return JS_TRUE;
+}
+
+typedef struct CompareArgs {
+ JSContext *context;
+ jsval fval;
+ jsval *localroot; /* need one local root, for sort_compare */
+} CompareArgs;
+
+static JSBool
+sort_compare(void *arg, const void *a, const void *b, int *result)
+{
+ jsval av = *(const jsval *)a, bv = *(const jsval *)b;
+ CompareArgs *ca = (CompareArgs *) arg;
+ JSContext *cx = ca->context;
+ jsval fval;
+ JSBool ok;
+
+ /**
+ * array_sort deals with holes and undefs on its own and they should not
+ * come here.
+ */
+ JS_ASSERT(av != JSVAL_VOID);
+ JS_ASSERT(bv != JSVAL_VOID);
+
+ *result = 0;
+ ok = JS_TRUE;
+ fval = ca->fval;
+ if (fval == JSVAL_NULL) {
+ JSString *astr, *bstr;
+
+ if (av != bv) {
+ /*
+ * Set our local root to astr in case the second js_ValueToString
+ * displaces the newborn root in cx, and the GC nests under that
+ * call. Don't bother guarding the local root store with an astr
+ * non-null test. If we tag null as a string, the GC will untag,
+ * null-test, and avoid dereferencing null.
+ */
+ astr = js_ValueToString(cx, av);
+ *ca->localroot = STRING_TO_JSVAL(astr);
+ if (astr && (bstr = js_ValueToString(cx, bv)))
+ *result = js_CompareStrings(astr, bstr);
+ else
+ ok = JS_FALSE;
+ }
+ } else {
+ jsdouble cmp;
+ jsval argv[2];
+
+ argv[0] = av;
+ argv[1] = bv;
+ ok = js_InternalCall(cx,
+ OBJ_GET_PARENT(cx, JSVAL_TO_OBJECT(fval)),
+ fval, 2, argv, ca->localroot);
+ if (ok) {
+ ok = js_ValueToNumber(cx, *ca->localroot, &cmp);
+
+ /* Clamp cmp to -1, 0, 1. */
+ if (ok) {
+ if (JSDOUBLE_IS_NaN(cmp)) {
+ /*
+ * XXX report some kind of error here? ECMA talks about
+ * 'consistent compare functions' that don't return NaN,
+ * but is silent about what the result should be. So we
+ * currently ignore it.
+ */
+ } else if (cmp != 0) {
+ *result = cmp > 0 ? 1 : -1;
+ }
+ }
+ }
+ }
+ return ok;
+}
+
+static int
+sort_compare_strings(void *arg, const void *a, const void *b, int *result)
+{
+ jsval av = *(const jsval *)a, bv = *(const jsval *)b;
+
+ *result = (int) js_CompareStrings(JSVAL_TO_STRING(av), JSVAL_TO_STRING(bv));
+ return JS_TRUE;
+}
+
+static JSBool
+array_sort(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval fval, *vec, *pivotroot;
+ CompareArgs ca;
+ jsuint len, newlen, i, undefs;
+ JSTempValueRooter tvr;
+ JSBool hole, ok;
+
+ /*
+ * Optimize the default compare function case if all of obj's elements
+ * have values of type string.
+ */
+ JSBool all_strings;
+
+ if (argc > 0) {
+ if (JSVAL_IS_PRIMITIVE(argv[0])) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_SORT_ARG);
+ return JS_FALSE;
+ }
+ fval = argv[0];
+ all_strings = JS_FALSE; /* non-default compare function */
+ } else {
+ fval = JSVAL_NULL;
+ all_strings = JS_TRUE; /* check for all string values */
+ }
+
+ if (!js_GetLengthProperty(cx, obj, &len))
+ return JS_FALSE;
+ if (len == 0) {
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+ }
+
+ /*
+ * We need a temporary array of len jsvals to hold elements of the array.
+ * Check that its size does not overflow size_t, which would allow for
+ * indexing beyond the end of the malloc'd vector.
+ */
+ if (len > ((size_t) -1) / sizeof(jsval)) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ vec = (jsval *) JS_malloc(cx, ((size_t) len) * sizeof(jsval));
+ if (!vec)
+ return JS_FALSE;
+
+ /*
+ * Initialize vec as a root. We will clear elements of vec one by
+ * one while increasing tvr.count when we know that the property at
+ * the corresponding index exists and its value must be rooted.
+ *
+ * In this way when sorting a huge mostly sparse array we will not
+ * access the tail of vec corresponding to properties that do not
+ * exist, allowing OS to avoiding committing RAM. See bug 330812.
+ *
+ * After this point control must flow through label out: to exit.
+ */
+ JS_PUSH_TEMP_ROOT(cx, 0, vec, &tvr);
+
+ /*
+ * By ECMA 262, 15.4.4.11, a property that does not exist (which we
+ * call a "hole") is always greater than an existing property with
+ * value undefined and that is always greater than any other property.
+ * Thus to sort holes and undefs we simply count them, sort the rest
+ * of elements, append undefs after them and then make holes after
+ * undefs.
+ */
+ undefs = 0;
+ newlen = 0;
+ for (i = 0; i < len; i++) {
+ /* Clear vec[newlen] before including it in the rooted set. */
+ vec[newlen] = JSVAL_NULL;
+ tvr.count = newlen + 1;
+ ok = GetArrayElement(cx, obj, i, &hole, &vec[newlen]);
+ if (!ok)
+ goto out;
+
+ if (hole)
+ continue;
+
+ if (vec[newlen] == JSVAL_VOID) {
+ ++undefs;
+ continue;
+ }
+
+ /* We know JSVAL_IS_STRING yields 0 or 1, so avoid a branch via &=. */
+ all_strings &= JSVAL_IS_STRING(vec[newlen]);
+
+ ++newlen;
+ }
+
+ /* Here len == newlen + undefs + number_of_holes. */
+ ca.context = cx;
+ ca.fval = fval;
+ ca.localroot = argv + argc; /* local GC root for temporary string */
+ pivotroot = argv + argc + 1; /* local GC root for pivot val */
+ ok = js_HeapSort(vec, (size_t) newlen, pivotroot, sizeof(jsval),
+ all_strings ? sort_compare_strings : sort_compare,
+ &ca);
+ if (!ok)
+ goto out;
+
+ ok = InitArrayElements(cx, obj, 0, newlen, vec);
+ if (!ok)
+ goto out;
+
+ out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ JS_free(cx, vec);
+ if (!ok)
+ return JS_FALSE;
+
+ /* Set undefs that sorted after the rest of elements. */
+ while (undefs != 0) {
+ --undefs;
+ if (!SetArrayElement(cx, obj, newlen++, JSVAL_VOID))
+ return JS_FALSE;
+ }
+
+ /* Re-create any holes that sorted to the end of the array. */
+ while (len > newlen) {
+ if (!DeleteArrayElement(cx, obj, --len))
+ return JS_FALSE;
+ }
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+/*
+ * Perl-inspired push, pop, shift, unshift, and splice methods.
+ */
+static JSBool
+array_push(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsuint length, newlength;
+
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+ newlength = length + argc;
+ if (!InitArrayElements(cx, obj, length, newlength, argv))
+ return JS_FALSE;
+
+ /* Per ECMA-262, return the new array length. */
+ if (!IndexToValue(cx, newlength, rval))
+ return JS_FALSE;
+ return js_SetLengthProperty(cx, obj, newlength);
+}
+
+static JSBool
+array_pop(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsuint index;
+ JSBool hole;
+
+ if (!js_GetLengthProperty(cx, obj, &index))
+ return JS_FALSE;
+ if (index > 0) {
+ index--;
+
+ /* Get the to-be-deleted property's value into rval. */
+ if (!GetArrayElement(cx, obj, index, &hole, rval))
+ return JS_FALSE;
+ if (!hole && !DeleteArrayElement(cx, obj, index))
+ return JS_FALSE;
+ }
+ return js_SetLengthProperty(cx, obj, index);
+}
+
+static JSBool
+array_shift(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsuint length, i;
+ JSBool hole;
+
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+ if (length == 0) {
+ *rval = JSVAL_VOID;
+ } else {
+ length--;
+
+ /* Get the to-be-deleted property's value into rval ASAP. */
+ if (!GetArrayElement(cx, obj, 0, &hole, rval))
+ return JS_FALSE;
+
+ /*
+ * Slide down the array above the first element.
+ */
+ for (i = 0; i != length; i++) {
+ if (!GetArrayElement(cx, obj, i + 1, &hole, &argv[0]))
+ return JS_FALSE;
+ if (!SetOrDeleteArrayElement(cx, obj, i, hole, argv[0]))
+ return JS_FALSE;
+ }
+
+ /* Delete the only or last element when it exist. */
+ if (!hole && !DeleteArrayElement(cx, obj, length))
+ return JS_FALSE;
+ }
+ return js_SetLengthProperty(cx, obj, length);
+}
+
+static JSBool
+array_unshift(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsuint length, last;
+ jsval *vp;
+ JSBool hole;
+
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+ if (argc > 0) {
+ /* Slide up the array to make room for argc at the bottom. */
+ if (length > 0) {
+ last = length;
+ vp = argv + argc; /* local root */
+ do {
+ --last;
+ if (!GetArrayElement(cx, obj, last, &hole, vp) ||
+ !SetOrDeleteArrayElement(cx, obj, last + argc, hole, *vp)) {
+ return JS_FALSE;
+ }
+ } while (last != 0);
+ }
+
+ /* Copy from argv to the bottom of the array. */
+ if (!InitArrayElements(cx, obj, 0, argc, argv))
+ return JS_FALSE;
+
+ length += argc;
+ if (!js_SetLengthProperty(cx, obj, length))
+ return JS_FALSE;
+ }
+
+ /* Follow Perl by returning the new array length. */
+ return IndexToValue(cx, length, rval);
+}
+
+static JSBool
+array_splice(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval *vp;
+ jsuint length, begin, end, count, delta, last;
+ jsdouble d;
+ JSBool hole;
+ JSObject *obj2;
+
+ /*
+ * Nothing to do if no args. Otherwise point vp at our one explicit local
+ * root and get length.
+ */
+ if (argc == 0)
+ return JS_TRUE;
+ vp = argv + argc;
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+
+ /* Convert the first argument into a starting index. */
+ if (!js_ValueToNumber(cx, *argv, &d))
+ return JS_FALSE;
+ d = js_DoubleToInteger(d);
+ if (d < 0) {
+ d += length;
+ if (d < 0)
+ d = 0;
+ } else if (d > length) {
+ d = length;
+ }
+ begin = (jsuint)d; /* d has been clamped to uint32 */
+ argc--;
+ argv++;
+
+ /* Convert the second argument from a count into a fencepost index. */
+ delta = length - begin;
+ if (argc == 0) {
+ count = delta;
+ end = length;
+ } else {
+ if (!js_ValueToNumber(cx, *argv, &d))
+ return JS_FALSE;
+ d = js_DoubleToInteger(d);
+ if (d < 0)
+ d = 0;
+ else if (d > delta)
+ d = delta;
+ count = (jsuint)d;
+ end = begin + count;
+ argc--;
+ argv++;
+ }
+
+
+ /*
+ * Create a new array value to return. Our ECMA v2 proposal specs
+ * that splice always returns an array value, even when given no
+ * arguments. We think this is best because it eliminates the need
+ * for callers to do an extra test to handle the empty splice case.
+ */
+ obj2 = js_NewArrayObject(cx, 0, NULL);
+ if (!obj2)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj2);
+
+ /* If there are elements to remove, put them into the return value. */
+ if (count > 0) {
+ for (last = begin; last < end; last++) {
+ if (!GetArrayElement(cx, obj, last, &hole, vp))
+ return JS_FALSE;
+
+ /* Copy *vp to new array unless it's a hole. */
+ if (!hole && !SetArrayElement(cx, obj2, last - begin, *vp))
+ return JS_FALSE;
+ }
+
+ if (!js_SetLengthProperty(cx, obj2, end - begin))
+ return JS_FALSE;
+ }
+
+ /* Find the direction (up or down) to copy and make way for argv. */
+ if (argc > count) {
+ delta = (jsuint)argc - count;
+ last = length;
+ /* (uint) end could be 0, so can't use vanilla >= test */
+ while (last-- > end) {
+ if (!GetArrayElement(cx, obj, last, &hole, vp) ||
+ !SetOrDeleteArrayElement(cx, obj, last + delta, hole, *vp)) {
+ return JS_FALSE;
+ }
+ }
+ length += delta;
+ } else if (argc < count) {
+ delta = count - (jsuint)argc;
+ for (last = end; last < length; last++) {
+ if (!GetArrayElement(cx, obj, last, &hole, vp) ||
+ !SetOrDeleteArrayElement(cx, obj, last - delta, hole, *vp)) {
+ return JS_FALSE;
+ }
+ }
+ length -= delta;
+ }
+
+ /* Copy from argv into the hole to complete the splice. */
+ if (!InitArrayElements(cx, obj, begin, begin + argc, argv))
+ return JS_FALSE;
+
+ /* Update length in case we deleted elements from the end. */
+ return js_SetLengthProperty(cx, obj, length);
+}
+
+/*
+ * Python-esque sequence operations.
+ */
+static JSBool
+array_concat(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval *vp, v;
+ JSObject *nobj, *aobj;
+ jsuint length, alength, slot;
+ uintN i;
+ JSBool hole;
+
+ /* Hoist the explicit local root address computation. */
+ vp = argv + argc;
+
+ /* Treat obj as the first argument; see ECMA 15.4.4.4. */
+ --argv;
+ JS_ASSERT(obj == JSVAL_TO_OBJECT(argv[0]));
+
+ /* Create a new Array object and store it in the rval local root. */
+ nobj = js_NewArrayObject(cx, 0, NULL);
+ if (!nobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(nobj);
+
+ /* Loop over [0, argc] to concat args into nobj, expanding all Arrays. */
+ length = 0;
+ for (i = 0; i <= argc; i++) {
+ v = argv[i];
+ if (JSVAL_IS_OBJECT(v)) {
+ aobj = JSVAL_TO_OBJECT(v);
+ if (aobj && OBJ_GET_CLASS(cx, aobj) == &js_ArrayClass) {
+ if (!OBJ_GET_PROPERTY(cx, aobj,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .lengthAtom),
+ vp)) {
+ return JS_FALSE;
+ }
+ if (!ValueIsLength(cx, *vp, &alength))
+ return JS_FALSE;
+ for (slot = 0; slot < alength; slot++) {
+ if (!GetArrayElement(cx, aobj, slot, &hole, vp))
+ return JS_FALSE;
+
+ /*
+ * Per ECMA 262, 15.4.4.4, step 9, ignore non-existent
+ * properties.
+ */
+ if (!hole && !SetArrayElement(cx, nobj, length + slot, *vp))
+ return JS_FALSE;
+ }
+ length += alength;
+ continue;
+ }
+ }
+
+ if (!SetArrayElement(cx, nobj, length, v))
+ return JS_FALSE;
+ length++;
+ }
+
+ return js_SetLengthProperty(cx, nobj, length);
+}
+
+static JSBool
+array_slice(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval *vp;
+ JSObject *nobj;
+ jsuint length, begin, end, slot;
+ jsdouble d;
+ JSBool hole;
+
+ /* Hoist the explicit local root address computation. */
+ vp = argv + argc;
+
+ /* Create a new Array object and store it in the rval local root. */
+ nobj = js_NewArrayObject(cx, 0, NULL);
+ if (!nobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(nobj);
+
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+ begin = 0;
+ end = length;
+
+ if (argc > 0) {
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ d = js_DoubleToInteger(d);
+ if (d < 0) {
+ d += length;
+ if (d < 0)
+ d = 0;
+ } else if (d > length) {
+ d = length;
+ }
+ begin = (jsuint)d;
+
+ if (argc > 1) {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+ d = js_DoubleToInteger(d);
+ if (d < 0) {
+ d += length;
+ if (d < 0)
+ d = 0;
+ } else if (d > length) {
+ d = length;
+ }
+ end = (jsuint)d;
+ }
+ }
+
+ if (begin > end)
+ begin = end;
+
+ for (slot = begin; slot < end; slot++) {
+ if (!GetArrayElement(cx, obj, slot, &hole, vp))
+ return JS_FALSE;
+ if (!hole && !SetArrayElement(cx, nobj, slot - begin, *vp))
+ return JS_FALSE;
+ }
+ return js_SetLengthProperty(cx, nobj, end - begin);
+}
+
+#if JS_HAS_ARRAY_EXTRAS
+
+static JSBool
+array_indexOfHelper(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval, JSBool isLast)
+{
+ jsuint length, i, stop;
+ jsint direction;
+ JSBool hole;
+
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+ if (length == 0)
+ goto not_found;
+
+ if (argc <= 1) {
+ i = isLast ? length - 1 : 0;
+ } else {
+ jsdouble start;
+
+ if (!js_ValueToNumber(cx, argv[1], &start))
+ return JS_FALSE;
+ start = js_DoubleToInteger(start);
+ if (start < 0) {
+ start += length;
+ if (start < 0) {
+ if (isLast)
+ goto not_found;
+ i = 0;
+ } else {
+ i = (jsuint)start;
+ }
+ } else if (start >= length) {
+ if (!isLast)
+ goto not_found;
+ i = length - 1;
+ } else {
+ i = (jsuint)start;
+ }
+ }
+
+ if (isLast) {
+ stop = 0;
+ direction = -1;
+ } else {
+ stop = length - 1;
+ direction = 1;
+ }
+
+ for (;;) {
+ if (!GetArrayElement(cx, obj, (jsuint)i, &hole, rval))
+ return JS_FALSE;
+ if (!hole && js_StrictlyEqual(*rval, argv[0]))
+ return js_NewNumberValue(cx, i, rval);
+ if (i == stop)
+ goto not_found;
+ i += direction;
+ }
+
+ not_found:
+ *rval = INT_TO_JSVAL(-1);
+ return JS_TRUE;
+}
+
+static JSBool
+array_indexOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_indexOfHelper(cx, obj, argc, argv, rval, JS_FALSE);
+}
+
+static JSBool
+array_lastIndexOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_indexOfHelper(cx, obj, argc, argv, rval, JS_TRUE);
+}
+
+/* Order is important; extras that use a caller's predicate must follow MAP. */
+typedef enum ArrayExtraMode {
+ FOREACH,
+ MAP,
+ FILTER,
+ SOME,
+ EVERY
+} ArrayExtraMode;
+
+static JSBool
+array_extra(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval,
+ ArrayExtraMode mode)
+{
+ jsval *vp, *sp, *origsp, *oldsp;
+ jsuint length, newlen, i;
+ JSObject *callable, *thisp, *newarr;
+ void *mark;
+ JSStackFrame *fp;
+ JSBool ok, cond, hole;
+
+ /* Hoist the explicit local root address computation. */
+ vp = argv + argc;
+
+ if (!js_GetLengthProperty(cx, obj, &length))
+ return JS_FALSE;
+
+ /*
+ * First, get or compute our callee, so that we error out consistently
+ * when passed a non-callable object.
+ */
+ callable = js_ValueToCallableObject(cx, &argv[0], JSV2F_SEARCH_STACK);
+ if (!callable)
+ return JS_FALSE;
+
+ /*
+ * Set our initial return condition, used for zero-length array cases
+ * (and pre-size our map return to match our known length, for all cases).
+ */
+#ifdef __GNUC__ /* quell GCC overwarning */
+ newlen = 0;
+ newarr = NULL;
+ ok = JS_TRUE;
+#endif
+ switch (mode) {
+ case MAP:
+ case FILTER:
+ newlen = (mode == MAP) ? length : 0;
+ newarr = js_NewArrayObject(cx, newlen, NULL);
+ if (!newarr)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(newarr);
+ break;
+ case SOME:
+ *rval = JSVAL_FALSE;
+ break;
+ case EVERY:
+ *rval = JSVAL_TRUE;
+ break;
+ case FOREACH:
+ break;
+ }
+
+ if (length == 0)
+ return JS_TRUE;
+
+ if (argc > 1) {
+ if (!js_ValueToObject(cx, argv[1], &thisp))
+ return JS_FALSE;
+ argv[1] = OBJECT_TO_JSVAL(thisp);
+ } else {
+ thisp = NULL;
+ }
+
+ /* We call with 3 args (value, index, array), plus room for rval. */
+ origsp = js_AllocStack(cx, 2 + 3 + 1, &mark);
+ if (!origsp)
+ return JS_FALSE;
+
+ /* Lift current frame to include our args. */
+ fp = cx->fp;
+ oldsp = fp->sp;
+
+ for (i = 0; i < length; i++) {
+ ok = GetArrayElement(cx, obj, i, &hole, vp);
+ if (!ok)
+ break;
+ if (hole)
+ continue;
+
+ /*
+ * Push callable and 'this', then args. We must do this for every
+ * iteration around the loop since js_Invoke uses origsp[0] for rval
+ * storage and some native functions use origsp[1] for local rooting.
+ */
+ sp = origsp;
+ *sp++ = OBJECT_TO_JSVAL(callable);
+ *sp++ = OBJECT_TO_JSVAL(thisp);
+ *sp++ = *vp;
+ *sp++ = INT_TO_JSVAL(i);
+ *sp++ = OBJECT_TO_JSVAL(obj);
+
+ /* Do the call. */
+ fp->sp = sp;
+ ok = js_Invoke(cx, 3, JSINVOKE_INTERNAL);
+ vp[1] = fp->sp[-1];
+ fp->sp = oldsp;
+ if (!ok)
+ break;
+
+ if (mode > MAP) {
+ if (vp[1] == JSVAL_NULL) {
+ cond = JS_FALSE;
+ } else if (JSVAL_IS_BOOLEAN(vp[1])) {
+ cond = JSVAL_TO_BOOLEAN(vp[1]);
+ } else {
+ ok = js_ValueToBoolean(cx, vp[1], &cond);
+ if (!ok)
+ goto out;
+ }
+ }
+
+ switch (mode) {
+ case FOREACH:
+ break;
+ case MAP:
+ ok = SetArrayElement(cx, newarr, i, vp[1]);
+ if (!ok)
+ goto out;
+ break;
+ case FILTER:
+ if (!cond)
+ break;
+ /* Filter passed *vp, push as result. */
+ ok = SetArrayElement(cx, newarr, newlen++, *vp);
+ if (!ok)
+ goto out;
+ break;
+ case SOME:
+ if (cond) {
+ *rval = JSVAL_TRUE;
+ goto out;
+ }
+ break;
+ case EVERY:
+ if (!cond) {
+ *rval = JSVAL_FALSE;
+ goto out;
+ }
+ break;
+ }
+ }
+
+ out:
+ js_FreeStack(cx, mark);
+ if (ok && mode == FILTER)
+ ok = js_SetLengthProperty(cx, newarr, newlen);
+ return ok;
+}
+
+static JSBool
+array_forEach(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_extra(cx, obj, argc, argv, rval, FOREACH);
+}
+
+static JSBool
+array_map(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_extra(cx, obj, argc, argv, rval, MAP);
+}
+
+static JSBool
+array_filter(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_extra(cx, obj, argc, argv, rval, FILTER);
+}
+
+static JSBool
+array_some(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_extra(cx, obj, argc, argv, rval, SOME);
+}
+
+static JSBool
+array_every(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return array_extra(cx, obj, argc, argv, rval, EVERY);
+}
+#endif
+
+static JSFunctionSpec array_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, array_toSource, 0,0,0},
+#endif
+ {js_toString_str, array_toString, 0,0,0},
+ {js_toLocaleString_str, array_toLocaleString, 0,0,0},
+
+ /* Perl-ish methods. */
+ {"join", array_join, 1,JSFUN_GENERIC_NATIVE,0},
+ {"reverse", array_reverse, 0,JSFUN_GENERIC_NATIVE,2},
+ {"sort", array_sort, 1,JSFUN_GENERIC_NATIVE,2},
+ {"push", array_push, 1,JSFUN_GENERIC_NATIVE,0},
+ {"pop", array_pop, 0,JSFUN_GENERIC_NATIVE,0},
+ {"shift", array_shift, 0,JSFUN_GENERIC_NATIVE,1},
+ {"unshift", array_unshift, 1,JSFUN_GENERIC_NATIVE,1},
+ {"splice", array_splice, 2,JSFUN_GENERIC_NATIVE,1},
+
+ /* Python-esque sequence methods. */
+ {"concat", array_concat, 1,JSFUN_GENERIC_NATIVE,1},
+ {"slice", array_slice, 2,JSFUN_GENERIC_NATIVE,1},
+
+#if JS_HAS_ARRAY_EXTRAS
+ {"indexOf", array_indexOf, 1,JSFUN_GENERIC_NATIVE,0},
+ {"lastIndexOf", array_lastIndexOf, 1,JSFUN_GENERIC_NATIVE,0},
+ {"forEach", array_forEach, 1,JSFUN_GENERIC_NATIVE,2},
+ {"map", array_map, 1,JSFUN_GENERIC_NATIVE,2},
+ {"filter", array_filter, 1,JSFUN_GENERIC_NATIVE,2},
+ {"some", array_some, 1,JSFUN_GENERIC_NATIVE,2},
+ {"every", array_every, 1,JSFUN_GENERIC_NATIVE,2},
+#endif
+
+ {0,0,0,0,0}
+};
+
+static JSBool
+Array(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsuint length;
+ jsval *vector;
+
+ /* If called without new, replace obj with a new Array object. */
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ obj = js_NewObject(cx, &js_ArrayClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+
+ if (argc == 0) {
+ length = 0;
+ vector = NULL;
+ } else if (argc > 1) {
+ length = (jsuint) argc;
+ vector = argv;
+ } else if (!JSVAL_IS_NUMBER(argv[0])) {
+ length = 1;
+ vector = argv;
+ } else {
+ if (!ValueIsLength(cx, argv[0], &length))
+ return JS_FALSE;
+ vector = NULL;
+ }
+ return InitArrayObject(cx, obj, length, vector);
+}
+
+JSObject *
+js_InitArrayClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_ArrayClass, Array, 1,
+ NULL, array_methods, NULL, NULL);
+
+ /* Initialize the Array prototype object so it gets a length property. */
+ if (!proto || !InitArrayObject(cx, proto, 0, NULL))
+ return NULL;
+ return proto;
+}
+
+JSObject *
+js_NewArrayObject(JSContext *cx, jsuint length, jsval *vector)
+{
+ JSTempValueRooter tvr;
+ JSObject *obj;
+
+ obj = js_NewObject(cx, &js_ArrayClass, NULL, NULL);
+ if (!obj)
+ return NULL;
+
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, obj, &tvr);
+ if (!InitArrayObject(cx, obj, length, vector))
+ obj = NULL;
+ JS_POP_TEMP_ROOT(cx, &tvr);
+
+ /* Set/clear newborn root, in case we lost it. */
+ cx->weakRoots.newborn[GCX_OBJECT] = (JSGCThing *) obj;
+ return obj;
+}
diff --git a/src/third_party/js-1.7/jsarray.h b/src/third_party/js-1.7/jsarray.h
new file mode 100644
index 00000000000..a89561b4ca3
--- /dev/null
+++ b/src/third_party/js-1.7/jsarray.h
@@ -0,0 +1,95 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsarray_h___
+#define jsarray_h___
+/*
+ * JS Array interface.
+ */
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+JS_BEGIN_EXTERN_C
+
+/* Generous sanity-bound on length (in elements) of array initialiser. */
+#define ARRAY_INIT_LIMIT JS_BIT(24)
+
+extern JSBool
+js_IdIsIndex(jsval id, jsuint *indexp);
+
+extern JSClass js_ArrayClass;
+
+extern JSObject *
+js_InitArrayClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_NewArrayObject(JSContext *cx, jsuint length, jsval *vector);
+
+extern JSBool
+js_GetLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp);
+
+extern JSBool
+js_SetLengthProperty(JSContext *cx, JSObject *obj, jsuint length);
+
+extern JSBool
+js_HasLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp);
+
+/*
+ * Test whether an object is "array-like". Currently this means whether obj
+ * is an Array or an arguments object. We would like an API, and probably a
+ * way in the language, to bless other objects as array-like: having indexed
+ * properties, and a 'length' property of uint32 value equal to one more than
+ * the greatest index.
+ */
+extern JSBool
+js_IsArrayLike(JSContext *cx, JSObject *obj, JSBool *answerp, jsuint *lengthp);
+
+/*
+ * JS-specific heap sort function.
+ */
+typedef JSBool (*JSComparator)(void *arg, const void *a, const void *b,
+ int *result);
+
+extern JSBool
+js_HeapSort(void *vec, size_t nel, void *pivot, size_t elsize,
+ JSComparator cmp, void *arg);
+
+JS_END_EXTERN_C
+
+#endif /* jsarray_h___ */
diff --git a/src/third_party/js-1.7/jsatom.c b/src/third_party/js-1.7/jsatom.c
new file mode 100644
index 00000000000..02ee250676f
--- /dev/null
+++ b/src/third_party/js-1.7/jsatom.c
@@ -0,0 +1,999 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS atom table.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jshash.h" /* Added by JSIFY */
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsgc.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsscan.h"
+#include "jsstr.h"
+
+JS_FRIEND_API(const char *)
+js_AtomToPrintableString(JSContext *cx, JSAtom *atom)
+{
+ return js_ValueToPrintableString(cx, ATOM_KEY(atom));
+}
+
+/*
+ * Keep this in sync with jspubtd.h -- an assertion below will insist that
+ * its length match the JSType enum's JSTYPE_LIMIT limit value.
+ */
+const char *js_type_strs[] = {
+ "undefined",
+ js_object_str,
+ "function",
+ "string",
+ "number",
+ "boolean",
+ "null",
+ "xml",
+};
+
+JS_STATIC_ASSERT(JSTYPE_LIMIT ==
+ sizeof js_type_strs / sizeof js_type_strs[0]);
+
+const char *js_boolean_strs[] = {
+ js_false_str,
+ js_true_str
+};
+
+#define JS_PROTO(name,code,init) const char js_##name##_str[] = #name;
+#include "jsproto.tbl"
+#undef JS_PROTO
+
+const char *js_proto_strs[JSProto_LIMIT] = {
+#define JS_PROTO(name,code,init) js_##name##_str,
+#include "jsproto.tbl"
+#undef JS_PROTO
+};
+
+const char js_anonymous_str[] = "anonymous";
+const char js_arguments_str[] = "arguments";
+const char js_arity_str[] = "arity";
+const char js_callee_str[] = "callee";
+const char js_caller_str[] = "caller";
+const char js_class_prototype_str[] = "prototype";
+const char js_constructor_str[] = "constructor";
+const char js_count_str[] = "__count__";
+const char js_each_str[] = "each";
+const char js_eval_str[] = "eval";
+const char js_fileName_str[] = "fileName";
+const char js_get_str[] = "get";
+const char js_getter_str[] = "getter";
+const char js_index_str[] = "index";
+const char js_input_str[] = "input";
+const char js_iterator_str[] = "__iterator__";
+const char js_length_str[] = "length";
+const char js_lineNumber_str[] = "lineNumber";
+const char js_message_str[] = "message";
+const char js_name_str[] = "name";
+const char js_next_str[] = "next";
+const char js_noSuchMethod_str[] = "__noSuchMethod__";
+const char js_object_str[] = "object";
+const char js_parent_str[] = "__parent__";
+const char js_proto_str[] = "__proto__";
+const char js_setter_str[] = "setter";
+const char js_set_str[] = "set";
+const char js_stack_str[] = "stack";
+const char js_toSource_str[] = "toSource";
+const char js_toString_str[] = "toString";
+const char js_toLocaleString_str[] = "toLocaleString";
+const char js_valueOf_str[] = "valueOf";
+
+#if JS_HAS_XML_SUPPORT
+const char js_etago_str[] = "</";
+const char js_namespace_str[] = "namespace";
+const char js_ptagc_str[] = "/>";
+const char js_qualifier_str[] = "::";
+const char js_space_str[] = " ";
+const char js_stago_str[] = "<";
+const char js_star_str[] = "*";
+const char js_starQualifier_str[] = "*::";
+const char js_tagc_str[] = ">";
+const char js_xml_str[] = "xml";
+#endif
+
+#if JS_HAS_GENERATORS
+const char js_close_str[] = "close";
+const char js_send_str[] = "send";
+#endif
+
+#ifdef NARCISSUS
+const char js_call_str[] = "__call__";
+const char js_construct_str[] = "__construct__";
+const char js_hasInstance_str[] = "__hasInstance__";
+const char js_ExecutionContext_str[] = "ExecutionContext";
+const char js_current_str[] = "current";
+#endif
+
+#define HASH_OBJECT(o) (JS_PTR_TO_UINT32(o) >> JSVAL_TAGBITS)
+#define HASH_INT(i) ((JSHashNumber)(i))
+#define HASH_DOUBLE(dp) ((JSDOUBLE_HI32(*dp) ^ JSDOUBLE_LO32(*dp)))
+#define HASH_BOOLEAN(b) ((JSHashNumber)(b))
+
+JS_STATIC_DLL_CALLBACK(JSHashNumber)
+js_hash_atom_key(const void *key)
+{
+ jsval v;
+ jsdouble *dp;
+
+ /* Order JSVAL_IS_* tests by likelihood of success. */
+ v = (jsval)key;
+ if (JSVAL_IS_STRING(v))
+ return js_HashString(JSVAL_TO_STRING(v));
+ if (JSVAL_IS_INT(v))
+ return HASH_INT(JSVAL_TO_INT(v));
+ if (JSVAL_IS_DOUBLE(v)) {
+ dp = JSVAL_TO_DOUBLE(v);
+ return HASH_DOUBLE(dp);
+ }
+ if (JSVAL_IS_OBJECT(v))
+ return HASH_OBJECT(JSVAL_TO_OBJECT(v));
+ if (JSVAL_IS_BOOLEAN(v))
+ return HASH_BOOLEAN(JSVAL_TO_BOOLEAN(v));
+ return (JSHashNumber)v;
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_compare_atom_keys(const void *k1, const void *k2)
+{
+ jsval v1, v2;
+
+ v1 = (jsval)k1, v2 = (jsval)k2;
+ if (JSVAL_IS_STRING(v1) && JSVAL_IS_STRING(v2))
+ return js_EqualStrings(JSVAL_TO_STRING(v1), JSVAL_TO_STRING(v2));
+ if (JSVAL_IS_DOUBLE(v1) && JSVAL_IS_DOUBLE(v2)) {
+ double d1 = *JSVAL_TO_DOUBLE(v1);
+ double d2 = *JSVAL_TO_DOUBLE(v2);
+ if (JSDOUBLE_IS_NaN(d1))
+ return JSDOUBLE_IS_NaN(d2);
+#if defined(XP_WIN)
+ /* XXX MSVC miscompiles such that (NaN == 0) */
+ if (JSDOUBLE_IS_NaN(d2))
+ return JS_FALSE;
+#endif
+ return d1 == d2;
+ }
+ return v1 == v2;
+}
+
+JS_STATIC_DLL_CALLBACK(int)
+js_compare_stub(const void *v1, const void *v2)
+{
+ return 1;
+}
+
+/* These next two are exported to jsscript.c and used similarly there. */
+void * JS_DLL_CALLBACK
+js_alloc_table_space(void *priv, size_t size)
+{
+ return malloc(size);
+}
+
+void JS_DLL_CALLBACK
+js_free_table_space(void *priv, void *item)
+{
+ free(item);
+}
+
+JS_STATIC_DLL_CALLBACK(JSHashEntry *)
+js_alloc_atom(void *priv, const void *key)
+{
+ JSAtomState *state = (JSAtomState *) priv;
+ JSAtom *atom;
+
+ atom = (JSAtom *) malloc(sizeof(JSAtom));
+ if (!atom)
+ return NULL;
+#ifdef JS_THREADSAFE
+ state->tablegen++;
+#endif
+ atom->entry.key = key;
+ atom->entry.value = NULL;
+ atom->flags = 0;
+ atom->number = state->number++;
+ return &atom->entry;
+}
+
+JS_STATIC_DLL_CALLBACK(void)
+js_free_atom(void *priv, JSHashEntry *he, uintN flag)
+{
+ if (flag != HT_FREE_ENTRY)
+ return;
+#ifdef JS_THREADSAFE
+ ((JSAtomState *)priv)->tablegen++;
+#endif
+ free(he);
+}
+
+static JSHashAllocOps atom_alloc_ops = {
+ js_alloc_table_space, js_free_table_space,
+ js_alloc_atom, js_free_atom
+};
+
+#define JS_ATOM_HASH_SIZE 1024
+
+JSBool
+js_InitAtomState(JSContext *cx, JSAtomState *state)
+{
+ state->table = JS_NewHashTable(JS_ATOM_HASH_SIZE, js_hash_atom_key,
+ js_compare_atom_keys, js_compare_stub,
+ &atom_alloc_ops, state);
+ if (!state->table) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ state->runtime = cx->runtime;
+#ifdef JS_THREADSAFE
+ js_InitLock(&state->lock);
+ state->tablegen = 0;
+#endif
+
+ if (!js_InitPinnedAtoms(cx, state)) {
+ js_FreeAtomState(cx, state);
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+JSBool
+js_InitPinnedAtoms(JSContext *cx, JSAtomState *state)
+{
+ uintN i;
+
+#define FROB(lval,str) \
+ JS_BEGIN_MACRO \
+ if (!(state->lval = js_Atomize(cx, str, strlen(str), ATOM_PINNED))) \
+ return JS_FALSE; \
+ JS_END_MACRO
+
+ for (i = 0; i < JSTYPE_LIMIT; i++)
+ FROB(typeAtoms[i], js_type_strs[i]);
+
+ for (i = 0; i < JSProto_LIMIT; i++)
+ FROB(classAtoms[i], js_proto_strs[i]);
+
+ FROB(booleanAtoms[0], js_false_str);
+ FROB(booleanAtoms[1], js_true_str);
+ FROB(nullAtom, js_null_str);
+
+ FROB(anonymousAtom, js_anonymous_str);
+ FROB(argumentsAtom, js_arguments_str);
+ FROB(arityAtom, js_arity_str);
+ FROB(calleeAtom, js_callee_str);
+ FROB(callerAtom, js_caller_str);
+ FROB(classPrototypeAtom, js_class_prototype_str);
+ FROB(constructorAtom, js_constructor_str);
+ FROB(countAtom, js_count_str);
+ FROB(eachAtom, js_each_str);
+ FROB(evalAtom, js_eval_str);
+ FROB(fileNameAtom, js_fileName_str);
+ FROB(getAtom, js_get_str);
+ FROB(getterAtom, js_getter_str);
+ FROB(indexAtom, js_index_str);
+ FROB(inputAtom, js_input_str);
+ FROB(iteratorAtom, js_iterator_str);
+ FROB(lengthAtom, js_length_str);
+ FROB(lineNumberAtom, js_lineNumber_str);
+ FROB(messageAtom, js_message_str);
+ FROB(nameAtom, js_name_str);
+ FROB(nextAtom, js_next_str);
+ FROB(noSuchMethodAtom, js_noSuchMethod_str);
+ FROB(parentAtom, js_parent_str);
+ FROB(protoAtom, js_proto_str);
+ FROB(setAtom, js_set_str);
+ FROB(setterAtom, js_setter_str);
+ FROB(stackAtom, js_stack_str);
+ FROB(toSourceAtom, js_toSource_str);
+ FROB(toStringAtom, js_toString_str);
+ FROB(toLocaleStringAtom, js_toLocaleString_str);
+ FROB(valueOfAtom, js_valueOf_str);
+
+#if JS_HAS_XML_SUPPORT
+ FROB(etagoAtom, js_etago_str);
+ FROB(namespaceAtom, js_namespace_str);
+ FROB(ptagcAtom, js_ptagc_str);
+ FROB(qualifierAtom, js_qualifier_str);
+ FROB(spaceAtom, js_space_str);
+ FROB(stagoAtom, js_stago_str);
+ FROB(starAtom, js_star_str);
+ FROB(starQualifierAtom, js_starQualifier_str);
+ FROB(tagcAtom, js_tagc_str);
+ FROB(xmlAtom, js_xml_str);
+#endif
+
+#if JS_HAS_GENERATORS
+ FROB(closeAtom, js_close_str);
+#endif
+
+#ifdef NARCISSUS
+ FROB(callAtom, js_call_str);
+ FROB(constructAtom, js_construct_str);
+ FROB(hasInstanceAtom, js_hasInstance_str);
+ FROB(ExecutionContextAtom, js_ExecutionContext_str);
+ FROB(currentAtom, js_current_str);
+#endif
+
+#undef FROB
+
+ memset(&state->lazy, 0, sizeof state->lazy);
+ return JS_TRUE;
+}
+
+/* NB: cx unused; js_FinishAtomState calls us with null cx. */
+void
+js_FreeAtomState(JSContext *cx, JSAtomState *state)
+{
+ if (state->table)
+ JS_HashTableDestroy(state->table);
+#ifdef JS_THREADSAFE
+ js_FinishLock(&state->lock);
+#endif
+ memset(state, 0, sizeof *state);
+}
+
+typedef struct UninternArgs {
+ JSRuntime *rt;
+ jsatomid leaks;
+} UninternArgs;
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_atom_uninterner(JSHashEntry *he, intN i, void *arg)
+{
+ JSAtom *atom;
+ UninternArgs *args;
+
+ atom = (JSAtom *)he;
+ args = (UninternArgs *)arg;
+ if (ATOM_IS_STRING(atom))
+ js_FinalizeStringRT(args->rt, ATOM_TO_STRING(atom));
+ else if (ATOM_IS_OBJECT(atom))
+ args->leaks++;
+ return HT_ENUMERATE_NEXT;
+}
+
+void
+js_FinishAtomState(JSAtomState *state)
+{
+ UninternArgs args;
+
+ if (!state->table)
+ return;
+ args.rt = state->runtime;
+ args.leaks = 0;
+ JS_HashTableEnumerateEntries(state->table, js_atom_uninterner, &args);
+#ifdef DEBUG
+ if (args.leaks != 0) {
+ fprintf(stderr,
+"JS engine warning: %lu atoms remain after destroying the JSRuntime.\n"
+" These atoms may point to freed memory. Things reachable\n"
+" through them have not been finalized.\n",
+ (unsigned long) args.leaks);
+ }
+#endif
+ js_FreeAtomState(NULL, state);
+}
+
+typedef struct MarkArgs {
+ JSBool keepAtoms;
+ JSGCThingMarker mark;
+ void *data;
+} MarkArgs;
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_atom_marker(JSHashEntry *he, intN i, void *arg)
+{
+ JSAtom *atom;
+ MarkArgs *args;
+ jsval key;
+
+ atom = (JSAtom *)he;
+ args = (MarkArgs *)arg;
+ if ((atom->flags & (ATOM_PINNED | ATOM_INTERNED)) || args->keepAtoms) {
+ atom->flags |= ATOM_MARK;
+ key = ATOM_KEY(atom);
+ if (JSVAL_IS_GCTHING(key))
+ args->mark(JSVAL_TO_GCTHING(key), args->data);
+ }
+ return HT_ENUMERATE_NEXT;
+}
+
+void
+js_MarkAtomState(JSAtomState *state, JSBool keepAtoms, JSGCThingMarker mark,
+ void *data)
+{
+ MarkArgs args;
+
+ if (!state->table)
+ return;
+ args.keepAtoms = keepAtoms;
+ args.mark = mark;
+ args.data = data;
+ JS_HashTableEnumerateEntries(state->table, js_atom_marker, &args);
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_atom_sweeper(JSHashEntry *he, intN i, void *arg)
+{
+ JSAtom *atom;
+ JSAtomState *state;
+
+ atom = (JSAtom *)he;
+ if (atom->flags & ATOM_MARK) {
+ atom->flags &= ~ATOM_MARK;
+ state = (JSAtomState *)arg;
+ state->liveAtoms++;
+ return HT_ENUMERATE_NEXT;
+ }
+ JS_ASSERT((atom->flags & (ATOM_PINNED | ATOM_INTERNED)) == 0);
+ atom->entry.key = atom->entry.value = NULL;
+ atom->flags = 0;
+ return HT_ENUMERATE_REMOVE;
+}
+
+void
+js_SweepAtomState(JSAtomState *state)
+{
+ state->liveAtoms = 0;
+ if (state->table)
+ JS_HashTableEnumerateEntries(state->table, js_atom_sweeper, state);
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_atom_unpinner(JSHashEntry *he, intN i, void *arg)
+{
+ JSAtom *atom;
+
+ atom = (JSAtom *)he;
+ atom->flags &= ~ATOM_PINNED;
+ return HT_ENUMERATE_NEXT;
+}
+
+void
+js_UnpinPinnedAtoms(JSAtomState *state)
+{
+ if (state->table)
+ JS_HashTableEnumerateEntries(state->table, js_atom_unpinner, NULL);
+}
+
+static JSAtom *
+js_AtomizeHashedKey(JSContext *cx, jsval key, JSHashNumber keyHash, uintN flags)
+{
+ JSAtomState *state;
+ JSHashTable *table;
+ JSHashEntry *he, **hep;
+ JSAtom *atom;
+
+ state = &cx->runtime->atomState;
+ JS_LOCK(&state->lock, cx);
+ table = state->table;
+ hep = JS_HashTableRawLookup(table, keyHash, (void *)key);
+ if ((he = *hep) == NULL) {
+ he = JS_HashTableRawAdd(table, hep, keyHash, (void *)key, NULL);
+ if (!he) {
+ JS_ReportOutOfMemory(cx);
+ atom = NULL;
+ goto out;
+ }
+ }
+
+ atom = (JSAtom *)he;
+ atom->flags |= flags;
+ cx->weakRoots.lastAtom = atom;
+out:
+ JS_UNLOCK(&state->lock,cx);
+ return atom;
+}
+
+JSAtom *
+js_AtomizeObject(JSContext *cx, JSObject *obj, uintN flags)
+{
+ jsval key;
+ JSHashNumber keyHash;
+
+ /* XXX must be set in the following order or MSVC1.52 will crash */
+ keyHash = HASH_OBJECT(obj);
+ key = OBJECT_TO_JSVAL(obj);
+ return js_AtomizeHashedKey(cx, key, keyHash, flags);
+}
+
+JSAtom *
+js_AtomizeBoolean(JSContext *cx, JSBool b, uintN flags)
+{
+ jsval key;
+ JSHashNumber keyHash;
+
+ key = BOOLEAN_TO_JSVAL(b);
+ keyHash = HASH_BOOLEAN(b);
+ return js_AtomizeHashedKey(cx, key, keyHash, flags);
+}
+
+JSAtom *
+js_AtomizeInt(JSContext *cx, jsint i, uintN flags)
+{
+ jsval key;
+ JSHashNumber keyHash;
+
+ key = INT_TO_JSVAL(i);
+ keyHash = HASH_INT(i);
+ return js_AtomizeHashedKey(cx, key, keyHash, flags);
+}
+
+/* Worst-case alignment grain and aligning macro for 2x-sized buffer. */
+#define ALIGNMENT(t) JS_MAX(JSVAL_ALIGN, sizeof(t))
+#define ALIGN(b,t) ((t*) &(b)[ALIGNMENT(t) - (jsuword)(b) % ALIGNMENT(t)])
+
+JSAtom *
+js_AtomizeDouble(JSContext *cx, jsdouble d, uintN flags)
+{
+ jsdouble *dp;
+ JSHashNumber keyHash;
+ jsval key;
+ JSAtomState *state;
+ JSHashTable *table;
+ JSHashEntry *he, **hep;
+ JSAtom *atom;
+ char buf[2 * ALIGNMENT(double)];
+
+ dp = ALIGN(buf, double);
+ *dp = d;
+ keyHash = HASH_DOUBLE(dp);
+ key = DOUBLE_TO_JSVAL(dp);
+ state = &cx->runtime->atomState;
+ JS_LOCK(&state->lock, cx);
+ table = state->table;
+ hep = JS_HashTableRawLookup(table, keyHash, (void *)key);
+ if ((he = *hep) == NULL) {
+#ifdef JS_THREADSAFE
+ uint32 gen = state->tablegen;
+#endif
+ JS_UNLOCK(&state->lock,cx);
+ if (!js_NewDoubleValue(cx, d, &key))
+ return NULL;
+ JS_LOCK(&state->lock, cx);
+#ifdef JS_THREADSAFE
+ if (state->tablegen != gen) {
+ hep = JS_HashTableRawLookup(table, keyHash, (void *)key);
+ if ((he = *hep) != NULL) {
+ atom = (JSAtom *)he;
+ goto out;
+ }
+ }
+#endif
+ he = JS_HashTableRawAdd(table, hep, keyHash, (void *)key, NULL);
+ if (!he) {
+ JS_ReportOutOfMemory(cx);
+ atom = NULL;
+ goto out;
+ }
+ }
+
+ atom = (JSAtom *)he;
+ atom->flags |= flags;
+ cx->weakRoots.lastAtom = atom;
+out:
+ JS_UNLOCK(&state->lock,cx);
+ return atom;
+}
+
+/*
+ * To put an atom into the hidden subspace. XOR its keyHash with this value,
+ * which is (sqrt(2)-1) in 32-bit fixed point.
+ */
+#define HIDDEN_ATOM_SUBSPACE_KEYHASH 0x6A09E667
+
+JSAtom *
+js_AtomizeString(JSContext *cx, JSString *str, uintN flags)
+{
+ JSHashNumber keyHash;
+ jsval key;
+ JSAtomState *state;
+ JSHashTable *table;
+ JSHashEntry *he, **hep;
+ JSAtom *atom;
+
+ keyHash = js_HashString(str);
+ if (flags & ATOM_HIDDEN)
+ keyHash ^= HIDDEN_ATOM_SUBSPACE_KEYHASH;
+ key = STRING_TO_JSVAL(str);
+ state = &cx->runtime->atomState;
+ JS_LOCK(&state->lock, cx);
+ table = state->table;
+ hep = JS_HashTableRawLookup(table, keyHash, (void *)key);
+ if ((he = *hep) == NULL) {
+#ifdef JS_THREADSAFE
+ uint32 gen = state->tablegen;
+ JS_UNLOCK(&state->lock, cx);
+#endif
+
+ if (flags & ATOM_TMPSTR) {
+ str = (flags & ATOM_NOCOPY)
+ ? js_NewString(cx, str->chars, str->length, 0)
+ : js_NewStringCopyN(cx, str->chars, str->length, 0);
+ if (!str)
+ return NULL;
+ key = STRING_TO_JSVAL(str);
+ } else {
+ if (!JS_MakeStringImmutable(cx, str))
+ return NULL;
+ }
+
+#ifdef JS_THREADSAFE
+ JS_LOCK(&state->lock, cx);
+ if (state->tablegen != gen) {
+ hep = JS_HashTableRawLookup(table, keyHash, (void *)key);
+ if ((he = *hep) != NULL) {
+ atom = (JSAtom *)he;
+ if (flags & ATOM_NOCOPY)
+ str->chars = NULL;
+ goto out;
+ }
+ }
+#endif
+
+ he = JS_HashTableRawAdd(table, hep, keyHash, (void *)key, NULL);
+ if (!he) {
+ JS_ReportOutOfMemory(cx);
+ atom = NULL;
+ goto out;
+ }
+ }
+
+ atom = (JSAtom *)he;
+ atom->flags |= flags & (ATOM_PINNED | ATOM_INTERNED | ATOM_HIDDEN);
+ cx->weakRoots.lastAtom = atom;
+out:
+ JS_UNLOCK(&state->lock,cx);
+ return atom;
+}
+
+JS_FRIEND_API(JSAtom *)
+js_Atomize(JSContext *cx, const char *bytes, size_t length, uintN flags)
+{
+ jschar *chars;
+ JSString *str;
+ JSAtom *atom;
+ char buf[2 * ALIGNMENT(JSString)];
+
+ /*
+ * Avoiding the malloc in js_InflateString on shorter strings saves us
+ * over 20,000 malloc calls on mozilla browser startup. This compares to
+ * only 131 calls where the string is longer than a 31 char (net) buffer.
+ * The vast majority of atomized strings are already in the hashtable. So
+ * js_AtomizeString rarely has to copy the temp string we make.
+ */
+#define ATOMIZE_BUF_MAX 32
+ jschar inflated[ATOMIZE_BUF_MAX];
+ size_t inflatedLength = ATOMIZE_BUF_MAX - 1;
+
+ if (length < ATOMIZE_BUF_MAX) {
+ js_InflateStringToBuffer(cx, bytes, length, inflated, &inflatedLength);
+ inflated[inflatedLength] = 0;
+ chars = inflated;
+ } else {
+ inflatedLength = length;
+ chars = js_InflateString(cx, bytes, &inflatedLength);
+ if (!chars)
+ return NULL;
+ flags |= ATOM_NOCOPY;
+ }
+
+ str = ALIGN(buf, JSString);
+
+ str->chars = chars;
+ str->length = inflatedLength;
+ atom = js_AtomizeString(cx, str, ATOM_TMPSTR | flags);
+ if (chars != inflated && (!atom || ATOM_TO_STRING(atom)->chars != chars))
+ JS_free(cx, chars);
+ return atom;
+}
+
+JS_FRIEND_API(JSAtom *)
+js_AtomizeChars(JSContext *cx, const jschar *chars, size_t length, uintN flags)
+{
+ JSString *str;
+ char buf[2 * ALIGNMENT(JSString)];
+
+ str = ALIGN(buf, JSString);
+ str->chars = (jschar *)chars;
+ str->length = length;
+ return js_AtomizeString(cx, str, ATOM_TMPSTR | flags);
+}
+
+JSAtom *
+js_GetExistingStringAtom(JSContext *cx, const jschar *chars, size_t length)
+{
+ JSString *str;
+ char buf[2 * ALIGNMENT(JSString)];
+ JSHashNumber keyHash;
+ jsval key;
+ JSAtomState *state;
+ JSHashTable *table;
+ JSHashEntry **hep;
+
+ str = ALIGN(buf, JSString);
+ str->chars = (jschar *)chars;
+ str->length = length;
+ keyHash = js_HashString(str);
+ key = STRING_TO_JSVAL(str);
+ state = &cx->runtime->atomState;
+ JS_LOCK(&state->lock, cx);
+ table = state->table;
+ hep = JS_HashTableRawLookup(table, keyHash, (void *)key);
+ JS_UNLOCK(&state->lock, cx);
+ return (hep) ? (JSAtom *)*hep : NULL;
+}
+
+JSAtom *
+js_AtomizeValue(JSContext *cx, jsval value, uintN flags)
+{
+ if (JSVAL_IS_STRING(value))
+ return js_AtomizeString(cx, JSVAL_TO_STRING(value), flags);
+ if (JSVAL_IS_INT(value))
+ return js_AtomizeInt(cx, JSVAL_TO_INT(value), flags);
+ if (JSVAL_IS_DOUBLE(value))
+ return js_AtomizeDouble(cx, *JSVAL_TO_DOUBLE(value), flags);
+ if (JSVAL_IS_OBJECT(value))
+ return js_AtomizeObject(cx, JSVAL_TO_OBJECT(value), flags);
+ if (JSVAL_IS_BOOLEAN(value))
+ return js_AtomizeBoolean(cx, JSVAL_TO_BOOLEAN(value), flags);
+ return js_AtomizeHashedKey(cx, value, (JSHashNumber)value, flags);
+}
+
+JSAtom *
+js_ValueToStringAtom(JSContext *cx, jsval v)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, v);
+ if (!str)
+ return NULL;
+ return js_AtomizeString(cx, str, 0);
+}
+
+JS_STATIC_DLL_CALLBACK(JSHashNumber)
+js_hash_atom_ptr(const void *key)
+{
+ const JSAtom *atom = key;
+ return atom->number;
+}
+
+JS_STATIC_DLL_CALLBACK(void *)
+js_alloc_temp_space(void *priv, size_t size)
+{
+ JSContext *cx = priv;
+ void *space;
+
+ JS_ARENA_ALLOCATE(space, &cx->tempPool, size);
+ if (!space)
+ JS_ReportOutOfMemory(cx);
+ return space;
+}
+
+JS_STATIC_DLL_CALLBACK(void)
+js_free_temp_space(void *priv, void *item)
+{
+}
+
+JS_STATIC_DLL_CALLBACK(JSHashEntry *)
+js_alloc_temp_entry(void *priv, const void *key)
+{
+ JSContext *cx = priv;
+ JSAtomListElement *ale;
+
+ JS_ARENA_ALLOCATE_TYPE(ale, JSAtomListElement, &cx->tempPool);
+ if (!ale) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ return &ale->entry;
+}
+
+JS_STATIC_DLL_CALLBACK(void)
+js_free_temp_entry(void *priv, JSHashEntry *he, uintN flag)
+{
+}
+
+static JSHashAllocOps temp_alloc_ops = {
+ js_alloc_temp_space, js_free_temp_space,
+ js_alloc_temp_entry, js_free_temp_entry
+};
+
+JSAtomListElement *
+js_IndexAtom(JSContext *cx, JSAtom *atom, JSAtomList *al)
+{
+ JSAtomListElement *ale, *ale2, *next;
+ JSHashEntry **hep;
+
+ ATOM_LIST_LOOKUP(ale, hep, al, atom);
+ if (!ale) {
+ if (al->count < 10) {
+ /* Few enough for linear search, no hash table needed. */
+ JS_ASSERT(!al->table);
+ ale = (JSAtomListElement *)js_alloc_temp_entry(cx, atom);
+ if (!ale)
+ return NULL;
+ ALE_SET_ATOM(ale, atom);
+ ALE_SET_NEXT(ale, al->list);
+ al->list = ale;
+ } else {
+ /* We want to hash. Have we already made a hash table? */
+ if (!al->table) {
+ /* No hash table yet, so hep had better be null! */
+ JS_ASSERT(!hep);
+ al->table = JS_NewHashTable(al->count + 1, js_hash_atom_ptr,
+ JS_CompareValues, JS_CompareValues,
+ &temp_alloc_ops, cx);
+ if (!al->table)
+ return NULL;
+
+ /*
+ * Set ht->nentries explicitly, because we are moving entries
+ * from al to ht, not calling JS_HashTable(Raw|)Add.
+ */
+ al->table->nentries = al->count;
+
+ /* Insert each ale on al->list into the new hash table. */
+ for (ale2 = al->list; ale2; ale2 = next) {
+ next = ALE_NEXT(ale2);
+ ale2->entry.keyHash = ALE_ATOM(ale2)->number;
+ hep = JS_HashTableRawLookup(al->table, ale2->entry.keyHash,
+ ale2->entry.key);
+ ALE_SET_NEXT(ale2, *hep);
+ *hep = &ale2->entry;
+ }
+ al->list = NULL;
+
+ /* Set hep for insertion of atom's ale, immediately below. */
+ hep = JS_HashTableRawLookup(al->table, atom->number, atom);
+ }
+
+ /* Finally, add an entry for atom into the hash bucket at hep. */
+ ale = (JSAtomListElement *)
+ JS_HashTableRawAdd(al->table, hep, atom->number, atom, NULL);
+ if (!ale)
+ return NULL;
+ }
+
+ ALE_SET_INDEX(ale, al->count++);
+ }
+ return ale;
+}
+
+JS_FRIEND_API(JSAtom *)
+js_GetAtom(JSContext *cx, JSAtomMap *map, jsatomid i)
+{
+ JSAtom *atom;
+ static JSAtom dummy;
+
+ JS_ASSERT(map->vector && i < map->length);
+ if (!map->vector || i >= map->length) {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%lu", (unsigned long)i);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_ATOMIC_NUMBER, numBuf);
+ return &dummy;
+ }
+ atom = map->vector[i];
+ JS_ASSERT(atom);
+ return atom;
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_map_atom(JSHashEntry *he, intN i, void *arg)
+{
+ JSAtomListElement *ale = (JSAtomListElement *)he;
+ JSAtom **vector = arg;
+
+ vector[ALE_INDEX(ale)] = ALE_ATOM(ale);
+ return HT_ENUMERATE_NEXT;
+}
+
+#ifdef DEBUG
+static jsrefcount js_atom_map_count;
+static jsrefcount js_atom_map_hash_table_count;
+#endif
+
+JS_FRIEND_API(JSBool)
+js_InitAtomMap(JSContext *cx, JSAtomMap *map, JSAtomList *al)
+{
+ JSAtom **vector;
+ JSAtomListElement *ale;
+ uint32 count;
+
+#ifdef DEBUG
+ JS_ATOMIC_INCREMENT(&js_atom_map_count);
+#endif
+ ale = al->list;
+ if (!ale && !al->table) {
+ map->vector = NULL;
+ map->length = 0;
+ return JS_TRUE;
+ }
+
+ count = al->count;
+ if (count >= ATOM_INDEX_LIMIT) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_LITERALS);
+ return JS_FALSE;
+ }
+ vector = (JSAtom **) JS_malloc(cx, (size_t) count * sizeof *vector);
+ if (!vector)
+ return JS_FALSE;
+
+ if (al->table) {
+#ifdef DEBUG
+ JS_ATOMIC_INCREMENT(&js_atom_map_hash_table_count);
+#endif
+ JS_HashTableEnumerateEntries(al->table, js_map_atom, vector);
+ } else {
+ do {
+ vector[ALE_INDEX(ale)] = ALE_ATOM(ale);
+ } while ((ale = ALE_NEXT(ale)) != NULL);
+ }
+ ATOM_LIST_INIT(al);
+
+ map->vector = vector;
+ map->length = (jsatomid)count;
+ return JS_TRUE;
+}
+
+JS_FRIEND_API(void)
+js_FreeAtomMap(JSContext *cx, JSAtomMap *map)
+{
+ if (map->vector) {
+ JS_free(cx, map->vector);
+ map->vector = NULL;
+ }
+ map->length = 0;
+}
diff --git a/src/third_party/js-1.7/jsatom.h b/src/third_party/js-1.7/jsatom.h
new file mode 100644
index 00000000000..4fb3d8d5188
--- /dev/null
+++ b/src/third_party/js-1.7/jsatom.h
@@ -0,0 +1,456 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsatom_h___
+#define jsatom_h___
+/*
+ * JS atom table.
+ */
+#include <stddef.h>
+#include "jstypes.h"
+#include "jshash.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+#ifdef JS_THREADSAFE
+#include "jslock.h"
+#endif
+
+JS_BEGIN_EXTERN_C
+
+#define ATOM_PINNED 0x01 /* atom is pinned against GC */
+#define ATOM_INTERNED 0x02 /* pinned variant for JS_Intern* API */
+#define ATOM_MARK 0x04 /* atom is reachable via GC */
+#define ATOM_HIDDEN 0x08 /* atom is in special hidden subspace */
+#define ATOM_NOCOPY 0x40 /* don't copy atom string bytes */
+#define ATOM_TMPSTR 0x80 /* internal, to avoid extra string */
+
+struct JSAtom {
+ JSHashEntry entry; /* key is jsval or unhidden atom
+ if ATOM_HIDDEN */
+ uint32 flags; /* pinned, interned, and mark flags */
+ jsatomid number; /* atom serial number and hash code */
+};
+
+#define ATOM_KEY(atom) ((jsval)(atom)->entry.key)
+#define ATOM_IS_OBJECT(atom) JSVAL_IS_OBJECT(ATOM_KEY(atom))
+#define ATOM_TO_OBJECT(atom) JSVAL_TO_OBJECT(ATOM_KEY(atom))
+#define ATOM_IS_INT(atom) JSVAL_IS_INT(ATOM_KEY(atom))
+#define ATOM_TO_INT(atom) JSVAL_TO_INT(ATOM_KEY(atom))
+#define ATOM_IS_DOUBLE(atom) JSVAL_IS_DOUBLE(ATOM_KEY(atom))
+#define ATOM_TO_DOUBLE(atom) JSVAL_TO_DOUBLE(ATOM_KEY(atom))
+#define ATOM_IS_STRING(atom) JSVAL_IS_STRING(ATOM_KEY(atom))
+#define ATOM_TO_STRING(atom) JSVAL_TO_STRING(ATOM_KEY(atom))
+#define ATOM_IS_BOOLEAN(atom) JSVAL_IS_BOOLEAN(ATOM_KEY(atom))
+#define ATOM_TO_BOOLEAN(atom) JSVAL_TO_BOOLEAN(ATOM_KEY(atom))
+
+/*
+ * Return a printable, lossless char[] representation of a string-type atom.
+ * The lifetime of the result extends at least until the next GC activation,
+ * longer if cx's string newborn root is not overwritten.
+ */
+extern JS_FRIEND_API(const char *)
+js_AtomToPrintableString(JSContext *cx, JSAtom *atom);
+
+struct JSAtomListElement {
+ JSHashEntry entry;
+};
+
+#define ALE_ATOM(ale) ((JSAtom *) (ale)->entry.key)
+#define ALE_INDEX(ale) ((jsatomid) JS_PTR_TO_UINT32((ale)->entry.value))
+#define ALE_JSOP(ale) ((JSOp) (ale)->entry.value)
+#define ALE_VALUE(ale) ((jsval) (ale)->entry.value)
+#define ALE_NEXT(ale) ((JSAtomListElement *) (ale)->entry.next)
+
+#define ALE_SET_ATOM(ale,atom) ((ale)->entry.key = (const void *)(atom))
+#define ALE_SET_INDEX(ale,index)((ale)->entry.value = JS_UINT32_TO_PTR(index))
+#define ALE_SET_JSOP(ale,op) ((ale)->entry.value = JS_UINT32_TO_PTR(op))
+#define ALE_SET_VALUE(ale,val) ((ale)->entry.value = (JSHashEntry *)(val))
+#define ALE_SET_NEXT(ale,link) ((ale)->entry.next = (JSHashEntry *)(link))
+
+struct JSAtomList {
+ JSAtomListElement *list; /* literals indexed for mapping */
+ JSHashTable *table; /* hash table if list gets too long */
+ jsuint count; /* count of indexed literals */
+};
+
+#define ATOM_LIST_INIT(al) ((al)->list = NULL, (al)->table = NULL, \
+ (al)->count = 0)
+
+#define ATOM_LIST_SEARCH(_ale,_al,_atom) \
+ JS_BEGIN_MACRO \
+ JSHashEntry **_hep; \
+ ATOM_LIST_LOOKUP(_ale, _hep, _al, _atom); \
+ JS_END_MACRO
+
+#define ATOM_LIST_LOOKUP(_ale,_hep,_al,_atom) \
+ JS_BEGIN_MACRO \
+ if ((_al)->table) { \
+ _hep = JS_HashTableRawLookup((_al)->table, _atom->number, _atom); \
+ _ale = *_hep ? (JSAtomListElement *) *_hep : NULL; \
+ } else { \
+ JSAtomListElement **_alep = &(_al)->list; \
+ _hep = NULL; \
+ while ((_ale = *_alep) != NULL) { \
+ if (ALE_ATOM(_ale) == (_atom)) { \
+ /* Hit, move atom's element to the front of the list. */ \
+ *_alep = ALE_NEXT(_ale); \
+ ALE_SET_NEXT(_ale, (_al)->list); \
+ (_al)->list = _ale; \
+ break; \
+ } \
+ _alep = (JSAtomListElement **)&_ale->entry.next; \
+ } \
+ } \
+ JS_END_MACRO
+
+struct JSAtomMap {
+ JSAtom **vector; /* array of ptrs to indexed atoms */
+ jsatomid length; /* count of (to-be-)indexed atoms */
+};
+
+struct JSAtomState {
+ JSRuntime *runtime; /* runtime that owns us */
+ JSHashTable *table; /* hash table containing all atoms */
+ jsatomid number; /* one beyond greatest atom number */
+ jsatomid liveAtoms; /* number of live atoms after last GC */
+
+ /* The rt->emptyString atom, see jsstr.c's js_InitRuntimeStringState. */
+ JSAtom *emptyAtom;
+
+ /* Type names and value literals. */
+ JSAtom *typeAtoms[JSTYPE_LIMIT];
+ JSAtom *booleanAtoms[2];
+ JSAtom *nullAtom;
+
+ /* Standard class constructor or prototype names. */
+ JSAtom *classAtoms[JSProto_LIMIT];
+
+ /* Various built-in or commonly-used atoms, pinned on first context. */
+ JSAtom *anonymousAtom;
+ JSAtom *argumentsAtom;
+ JSAtom *arityAtom;
+ JSAtom *calleeAtom;
+ JSAtom *callerAtom;
+ JSAtom *classPrototypeAtom;
+ JSAtom *closeAtom;
+ JSAtom *constructorAtom;
+ JSAtom *countAtom;
+ JSAtom *eachAtom;
+ JSAtom *etagoAtom;
+ JSAtom *evalAtom;
+ JSAtom *fileNameAtom;
+ JSAtom *getAtom;
+ JSAtom *getterAtom;
+ JSAtom *indexAtom;
+ JSAtom *inputAtom;
+ JSAtom *iteratorAtom;
+ JSAtom *lengthAtom;
+ JSAtom *lineNumberAtom;
+ JSAtom *messageAtom;
+ JSAtom *nameAtom;
+ JSAtom *namespaceAtom;
+ JSAtom *nextAtom;
+ JSAtom *noSuchMethodAtom;
+ JSAtom *parentAtom;
+ JSAtom *protoAtom;
+ JSAtom *ptagcAtom;
+ JSAtom *qualifierAtom;
+ JSAtom *setAtom;
+ JSAtom *setterAtom;
+ JSAtom *spaceAtom;
+ JSAtom *stackAtom;
+ JSAtom *stagoAtom;
+ JSAtom *starAtom;
+ JSAtom *starQualifierAtom;
+ JSAtom *tagcAtom;
+ JSAtom *toLocaleStringAtom;
+ JSAtom *toSourceAtom;
+ JSAtom *toStringAtom;
+ JSAtom *valueOfAtom;
+ JSAtom *xmlAtom;
+
+ /* Less frequently used atoms, pinned lazily by JS_ResolveStandardClass. */
+ struct {
+ JSAtom *InfinityAtom;
+ JSAtom *NaNAtom;
+ JSAtom *XMLListAtom;
+ JSAtom *decodeURIAtom;
+ JSAtom *decodeURIComponentAtom;
+ JSAtom *defineGetterAtom;
+ JSAtom *defineSetterAtom;
+ JSAtom *encodeURIAtom;
+ JSAtom *encodeURIComponentAtom;
+ JSAtom *escapeAtom;
+ JSAtom *functionNamespaceURIAtom;
+ JSAtom *hasOwnPropertyAtom;
+ JSAtom *isFiniteAtom;
+ JSAtom *isNaNAtom;
+ JSAtom *isPrototypeOfAtom;
+ JSAtom *isXMLNameAtom;
+ JSAtom *lookupGetterAtom;
+ JSAtom *lookupSetterAtom;
+ JSAtom *parseFloatAtom;
+ JSAtom *parseIntAtom;
+ JSAtom *propertyIsEnumerableAtom;
+ JSAtom *unescapeAtom;
+ JSAtom *unevalAtom;
+ JSAtom *unwatchAtom;
+ JSAtom *watchAtom;
+ } lazy;
+
+#ifdef JS_THREADSAFE
+ JSThinLock lock;
+ volatile uint32 tablegen;
+#endif
+#ifdef NARCISSUS
+ JSAtom *callAtom;
+ JSAtom *constructAtom;
+ JSAtom *hasInstanceAtom;
+ JSAtom *ExecutionContextAtom;
+ JSAtom *currentAtom;
+#endif
+};
+
+#define CLASS_ATOM(cx,name) \
+ ((cx)->runtime->atomState.classAtoms[JSProto_##name])
+
+/* Well-known predefined strings and their atoms. */
+extern const char *js_type_strs[];
+extern const char *js_boolean_strs[];
+extern const char *js_proto_strs[];
+
+#define JS_PROTO(name,code,init) extern const char js_##name##_str[];
+#include "jsproto.tbl"
+#undef JS_PROTO
+
+extern const char js_anonymous_str[];
+extern const char js_arguments_str[];
+extern const char js_arity_str[];
+extern const char js_callee_str[];
+extern const char js_caller_str[];
+extern const char js_class_prototype_str[];
+extern const char js_close_str[];
+extern const char js_constructor_str[];
+extern const char js_count_str[];
+extern const char js_etago_str[];
+extern const char js_each_str[];
+extern const char js_eval_str[];
+extern const char js_fileName_str[];
+extern const char js_get_str[];
+extern const char js_getter_str[];
+extern const char js_index_str[];
+extern const char js_input_str[];
+extern const char js_iterator_str[];
+extern const char js_length_str[];
+extern const char js_lineNumber_str[];
+extern const char js_message_str[];
+extern const char js_name_str[];
+extern const char js_namespace_str[];
+extern const char js_next_str[];
+extern const char js_noSuchMethod_str[];
+extern const char js_object_str[];
+extern const char js_parent_str[];
+extern const char js_private_str[];
+extern const char js_proto_str[];
+extern const char js_ptagc_str[];
+extern const char js_qualifier_str[];
+extern const char js_send_str[];
+extern const char js_setter_str[];
+extern const char js_set_str[];
+extern const char js_space_str[];
+extern const char js_stack_str[];
+extern const char js_stago_str[];
+extern const char js_star_str[];
+extern const char js_starQualifier_str[];
+extern const char js_tagc_str[];
+extern const char js_toSource_str[];
+extern const char js_toString_str[];
+extern const char js_toLocaleString_str[];
+extern const char js_valueOf_str[];
+extern const char js_xml_str[];
+
+#ifdef NARCISSUS
+extern const char js_call_str[];
+extern const char js_construct_str[];
+extern const char js_hasInstance_str[];
+extern const char js_ExecutionContext_str[];
+extern const char js_current_str[];
+#endif
+
+/*
+ * Initialize atom state. Return true on success, false with an out of
+ * memory error report on failure.
+ */
+extern JSBool
+js_InitAtomState(JSContext *cx, JSAtomState *state);
+
+/*
+ * Free and clear atom state (except for any interned string atoms).
+ */
+extern void
+js_FreeAtomState(JSContext *cx, JSAtomState *state);
+
+/*
+ * Interned strings are atoms that live until state's runtime is destroyed.
+ * This function frees all interned string atoms, and then frees and clears
+ * state's members (just as js_FreeAtomState does), unless there aren't any
+ * interned strings in state -- in which case state must be "free" already.
+ *
+ * NB: js_FreeAtomState is called for each "last" context being destroyed in
+ * a runtime, where there may yet be another context created in the runtime;
+ * whereas js_FinishAtomState is called from JS_DestroyRuntime, when we know
+ * that no more contexts will be created. Thus we minimize garbage during
+ * context-free episodes on a runtime, while preserving atoms created by the
+ * JS_Intern*String APIs for the life of the runtime.
+ */
+extern void
+js_FinishAtomState(JSAtomState *state);
+
+/*
+ * Atom garbage collection hooks.
+ */
+typedef void
+(*JSGCThingMarker)(void *thing, void *data);
+
+extern void
+js_MarkAtomState(JSAtomState *state, JSBool keepAtoms, JSGCThingMarker mark,
+ void *data);
+
+extern void
+js_SweepAtomState(JSAtomState *state);
+
+extern JSBool
+js_InitPinnedAtoms(JSContext *cx, JSAtomState *state);
+
+extern void
+js_UnpinPinnedAtoms(JSAtomState *state);
+
+/*
+ * Find or create the atom for an object. If we create a new atom, give it the
+ * type indicated in flags. Return 0 on failure to allocate memory.
+ */
+extern JSAtom *
+js_AtomizeObject(JSContext *cx, JSObject *obj, uintN flags);
+
+/*
+ * Find or create the atom for a Boolean value. If we create a new atom, give
+ * it the type indicated in flags. Return 0 on failure to allocate memory.
+ */
+extern JSAtom *
+js_AtomizeBoolean(JSContext *cx, JSBool b, uintN flags);
+
+/*
+ * Find or create the atom for an integer value. If we create a new atom, give
+ * it the type indicated in flags. Return 0 on failure to allocate memory.
+ */
+extern JSAtom *
+js_AtomizeInt(JSContext *cx, jsint i, uintN flags);
+
+/*
+ * Find or create the atom for a double value. If we create a new atom, give
+ * it the type indicated in flags. Return 0 on failure to allocate memory.
+ */
+extern JSAtom *
+js_AtomizeDouble(JSContext *cx, jsdouble d, uintN flags);
+
+/*
+ * Find or create the atom for a string. If we create a new atom, give it the
+ * type indicated in flags. Return 0 on failure to allocate memory.
+ */
+extern JSAtom *
+js_AtomizeString(JSContext *cx, JSString *str, uintN flags);
+
+extern JS_FRIEND_API(JSAtom *)
+js_Atomize(JSContext *cx, const char *bytes, size_t length, uintN flags);
+
+extern JS_FRIEND_API(JSAtom *)
+js_AtomizeChars(JSContext *cx, const jschar *chars, size_t length, uintN flags);
+
+/*
+ * Return an existing atom for the given char array or null if the char
+ * sequence is currently not atomized.
+ */
+extern JSAtom *
+js_GetExistingStringAtom(JSContext *cx, const jschar *chars, size_t length);
+
+/*
+ * This variant handles all value tag types.
+ */
+extern JSAtom *
+js_AtomizeValue(JSContext *cx, jsval value, uintN flags);
+
+/*
+ * Convert v to an atomized string.
+ */
+extern JSAtom *
+js_ValueToStringAtom(JSContext *cx, jsval v);
+
+/*
+ * Assign atom an index and insert it on al.
+ */
+extern JSAtomListElement *
+js_IndexAtom(JSContext *cx, JSAtom *atom, JSAtomList *al);
+
+/*
+ * Get the atom with index i from map.
+ */
+extern JS_FRIEND_API(JSAtom *)
+js_GetAtom(JSContext *cx, JSAtomMap *map, jsatomid i);
+
+/*
+ * For all unmapped atoms recorded in al, add a mapping from the atom's index
+ * to its address. The GC must not run until all indexed atoms in atomLists
+ * have been mapped by scripts connected to live objects (Function and Script
+ * class objects have scripts as/in their private data -- the GC knows about
+ * these two classes).
+ */
+extern JS_FRIEND_API(JSBool)
+js_InitAtomMap(JSContext *cx, JSAtomMap *map, JSAtomList *al);
+
+/*
+ * Free map->vector and clear map.
+ */
+extern JS_FRIEND_API(void)
+js_FreeAtomMap(JSContext *cx, JSAtomMap *map);
+
+JS_END_EXTERN_C
+
+#endif /* jsatom_h___ */
diff --git a/src/third_party/js-1.7/jsbit.h b/src/third_party/js-1.7/jsbit.h
new file mode 100644
index 00000000000..87bb0476a72
--- /dev/null
+++ b/src/third_party/js-1.7/jsbit.h
@@ -0,0 +1,195 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsbit_h___
+#define jsbit_h___
+
+#include "jstypes.h"
+#include "jsutil.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+** A jsbitmap_t is a long integer that can be used for bitmaps
+*/
+typedef JSUword jsbitmap_t; /* NSPR name, a la Unix system types */
+typedef jsbitmap_t jsbitmap; /* JS-style scalar typedef name */
+
+#define JS_TEST_BIT(_map,_bit) \
+ ((_map)[(_bit)>>JS_BITS_PER_WORD_LOG2] & (1L << ((_bit) & (JS_BITS_PER_WORD-1))))
+#define JS_SET_BIT(_map,_bit) \
+ ((_map)[(_bit)>>JS_BITS_PER_WORD_LOG2] |= (1L << ((_bit) & (JS_BITS_PER_WORD-1))))
+#define JS_CLEAR_BIT(_map,_bit) \
+ ((_map)[(_bit)>>JS_BITS_PER_WORD_LOG2] &= ~(1L << ((_bit) & (JS_BITS_PER_WORD-1))))
+
+/*
+** Compute the log of the least power of 2 greater than or equal to n
+*/
+extern JS_PUBLIC_API(JSIntn) JS_CeilingLog2(JSUint32 i);
+
+/*
+** Compute the log of the greatest power of 2 less than or equal to n
+*/
+extern JS_PUBLIC_API(JSIntn) JS_FloorLog2(JSUint32 i);
+
+/*
+ * Check if __builtin_clz is available which apeared first in GCC 3.4.
+ * The built-in allows to speedup calculations of ceiling/floor log2,
+ * see bug 327129.
+ */
+#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
+# define JS_HAS_GCC_BUILTIN_CLZ
+#endif
+
+/*
+** Macro version of JS_CeilingLog2: Compute the log of the least power of
+** 2 greater than or equal to _n. The result is returned in _log2.
+*/
+#ifdef JS_HAS_GCC_BUILTIN_CLZ
+/*
+ * Use __builtin_clz or count-leading-zeros to calculate ceil(log2(_n)).
+ * The macro checks for "n <= 1" and not "n != 0" as __builtin_clz(0) is
+ * undefined.
+ */
+# define JS_CEILING_LOG2(_log2,_n) \
+ JS_BEGIN_MACRO \
+ JS_STATIC_ASSERT(sizeof(unsigned int) == sizeof(JSUint32)); \
+ unsigned int j_ = (unsigned int)(_n); \
+ (_log2) = (j_ <= 1 ? 0 : 32 - __builtin_clz(j_ - 1)); \
+ JS_END_MACRO
+#else
+# define JS_CEILING_LOG2(_log2,_n) \
+ JS_BEGIN_MACRO \
+ JSUint32 j_ = (JSUint32)(_n); \
+ (_log2) = 0; \
+ if ((j_) & ((j_)-1)) \
+ (_log2) += 1; \
+ if ((j_) >> 16) \
+ (_log2) += 16, (j_) >>= 16; \
+ if ((j_) >> 8) \
+ (_log2) += 8, (j_) >>= 8; \
+ if ((j_) >> 4) \
+ (_log2) += 4, (j_) >>= 4; \
+ if ((j_) >> 2) \
+ (_log2) += 2, (j_) >>= 2; \
+ if ((j_) >> 1) \
+ (_log2) += 1; \
+ JS_END_MACRO
+#endif
+
+/*
+** Macro version of JS_FloorLog2: Compute the log of the greatest power of
+** 2 less than or equal to _n. The result is returned in _log2.
+**
+** This is equivalent to finding the highest set bit in the word.
+*/
+#if JS_GCC_HAS_BUILTIN_CLZ
+/*
+ * Use __builtin_clz or count-leading-zeros to calculate floor(log2(_n)).
+ * Since __builtin_clz(0) is undefined, the macro set the loweset bit to 1
+ * to ensure 0 result when _n == 0.
+ */
+# define JS_FLOOR_LOG2(_log2,_n) \
+ JS_BEGIN_MACRO \
+ JS_STATIC_ASSERT(sizeof(unsigned int) == sizeof(JSUint32)); \
+ (_log2) = 31 - __builtin_clz(((unsigned int)(_n)) | 1); \
+ JS_END_MACRO
+#else
+# define JS_FLOOR_LOG2(_log2,_n) \
+ JS_BEGIN_MACRO \
+ JSUint32 j_ = (JSUint32)(_n); \
+ (_log2) = 0; \
+ if ((j_) >> 16) \
+ (_log2) += 16, (j_) >>= 16; \
+ if ((j_) >> 8) \
+ (_log2) += 8, (j_) >>= 8; \
+ if ((j_) >> 4) \
+ (_log2) += 4, (j_) >>= 4; \
+ if ((j_) >> 2) \
+ (_log2) += 2, (j_) >>= 2; \
+ if ((j_) >> 1) \
+ (_log2) += 1; \
+ JS_END_MACRO
+#endif
+
+/*
+ * Internal function.
+ * Compute the log of the least power of 2 greater than or equal to n.
+ * This is a version of JS_CeilingLog2 that operates on jsuword with
+ * CPU-dependant size.
+ */
+#define JS_CEILING_LOG2W(n) ((n) <= 1 ? 0 : 1 + JS_FLOOR_LOG2W((n) - 1))
+
+/*
+ * Internal function.
+ * Compute the log of the greatest power of 2 less than or equal to n.
+ * This is a version of JS_FloorLog2 that operates on jsuword with
+ * CPU-dependant size and requires that n != 0.
+ */
+#define JS_FLOOR_LOG2W(n) (JS_ASSERT((n) != 0), js_FloorLog2wImpl(n))
+
+#ifdef JS_HAS_GCC_BUILTIN_CLZ
+
+# if JS_BYTES_PER_WORD == 4
+JS_STATIC_ASSERT(sizeof(unsigned) == sizeof(JSUword));
+# define js_FloorLog2wImpl(n) \
+ ((JSUword)(JS_BITS_PER_WORD - 1 - __builtin_clz(n)))
+# elif JS_BYTES_PER_WORD == 8
+JS_STATIC_ASSERT(sizeof(unsigned long long) == sizeof(JSUword));
+# define js_FloorLog2wImpl(n) \
+ ((JSUword)(JS_BITS_PER_WORD - 1 - __builtin_clzll(n)))
+# else
+# error "NOT SUPPORTED"
+# endif
+
+#else
+
+# if JS_BYTES_PER_WORD == 4
+# define js_FloorLog2wImpl(n) ((JSUword)JS_FloorLog2(n))
+# elif JS_BYTES_PER_WORD == 8
+extern JSUword
+js_FloorLog2wImpl(JSUword n);
+# else
+# error "NOT SUPPORTED"
+# endif
+
+#endif
+
+
+JS_END_EXTERN_C
+#endif /* jsbit_h___ */
diff --git a/src/third_party/js-1.7/jsbool.c b/src/third_party/js-1.7/jsbool.c
new file mode 100644
index 00000000000..543b4f3f60e
--- /dev/null
+++ b/src/third_party/js-1.7/jsbool.c
@@ -0,0 +1,227 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS boolean implementation.
+ */
+#include "jsstddef.h"
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsstr.h"
+
+JSClass js_BooleanClass = {
+ "Boolean",
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_CACHED_PROTO(JSProto_Boolean),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+#if JS_HAS_TOSOURCE
+#include "jsprf.h"
+
+static JSBool
+bool_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval v;
+ char buf[32];
+ JSString *str;
+
+ if (JSVAL_IS_BOOLEAN((jsval)obj)) {
+ v = (jsval)obj;
+ } else {
+ if (!JS_InstanceOf(cx, obj, &js_BooleanClass, argv))
+ return JS_FALSE;
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ if (!JSVAL_IS_BOOLEAN(v))
+ return js_obj_toSource(cx, obj, argc, argv, rval);
+ }
+ JS_snprintf(buf, sizeof buf, "(new %s(%s))",
+ js_BooleanClass.name,
+ js_boolean_strs[JSVAL_TO_BOOLEAN(v) ? 1 : 0]);
+ str = JS_NewStringCopyZ(cx, buf);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+#endif
+
+static JSBool
+bool_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval v;
+ JSAtom *atom;
+ JSString *str;
+
+ if (JSVAL_IS_BOOLEAN((jsval)obj)) {
+ v = (jsval)obj;
+ } else {
+ if (!JS_InstanceOf(cx, obj, &js_BooleanClass, argv))
+ return JS_FALSE;
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ if (!JSVAL_IS_BOOLEAN(v))
+ return js_obj_toString(cx, obj, argc, argv, rval);
+ }
+ atom = cx->runtime->atomState.booleanAtoms[JSVAL_TO_BOOLEAN(v) ? 1 : 0];
+ str = ATOM_TO_STRING(atom);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+bool_valueOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (JSVAL_IS_BOOLEAN((jsval)obj)) {
+ *rval = (jsval)obj;
+ return JS_TRUE;
+ }
+ if (!JS_InstanceOf(cx, obj, &js_BooleanClass, argv))
+ return JS_FALSE;
+ *rval = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ return JS_TRUE;
+}
+
+static JSFunctionSpec boolean_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, bool_toSource, 0,JSFUN_THISP_BOOLEAN,0},
+#endif
+ {js_toString_str, bool_toString, 0,JSFUN_THISP_BOOLEAN,0},
+ {js_valueOf_str, bool_valueOf, 0,JSFUN_THISP_BOOLEAN,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+Boolean(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSBool b;
+ jsval bval;
+
+ if (argc != 0) {
+ if (!js_ValueToBoolean(cx, argv[0], &b))
+ return JS_FALSE;
+ bval = BOOLEAN_TO_JSVAL(b);
+ } else {
+ bval = JSVAL_FALSE;
+ }
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ *rval = bval;
+ return JS_TRUE;
+ }
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, bval);
+ return JS_TRUE;
+}
+
+JSObject *
+js_InitBooleanClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_BooleanClass, Boolean, 1,
+ NULL, boolean_methods, NULL, NULL);
+ if (!proto)
+ return NULL;
+ OBJ_SET_SLOT(cx, proto, JSSLOT_PRIVATE, JSVAL_FALSE);
+ return proto;
+}
+
+JSObject *
+js_BooleanToObject(JSContext *cx, JSBool b)
+{
+ JSObject *obj;
+
+ obj = js_NewObject(cx, &js_BooleanClass, NULL, NULL);
+ if (!obj)
+ return NULL;
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, BOOLEAN_TO_JSVAL(b));
+ return obj;
+}
+
+JSString *
+js_BooleanToString(JSContext *cx, JSBool b)
+{
+ return ATOM_TO_STRING(cx->runtime->atomState.booleanAtoms[b ? 1 : 0]);
+}
+
+JSBool
+js_ValueToBoolean(JSContext *cx, jsval v, JSBool *bp)
+{
+ JSBool b;
+ jsdouble d;
+
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v)) {
+ b = JS_FALSE;
+ } else if (JSVAL_IS_OBJECT(v)) {
+ if (!JS_VERSION_IS_ECMA(cx)) {
+ if (!OBJ_DEFAULT_VALUE(cx, JSVAL_TO_OBJECT(v), JSTYPE_BOOLEAN, &v))
+ return JS_FALSE;
+ if (!JSVAL_IS_BOOLEAN(v))
+ v = JSVAL_TRUE; /* non-null object is true */
+ b = JSVAL_TO_BOOLEAN(v);
+ } else {
+ b = JS_TRUE;
+ }
+ } else if (JSVAL_IS_STRING(v)) {
+ b = JSSTRING_LENGTH(JSVAL_TO_STRING(v)) ? JS_TRUE : JS_FALSE;
+ } else if (JSVAL_IS_INT(v)) {
+ b = JSVAL_TO_INT(v) ? JS_TRUE : JS_FALSE;
+ } else if (JSVAL_IS_DOUBLE(v)) {
+ d = *JSVAL_TO_DOUBLE(v);
+ b = (!JSDOUBLE_IS_NaN(d) && d != 0) ? JS_TRUE : JS_FALSE;
+ } else {
+ JS_ASSERT(JSVAL_IS_BOOLEAN(v));
+ b = JSVAL_TO_BOOLEAN(v);
+ }
+
+ *bp = b;
+ return JS_TRUE;
+}
diff --git a/src/third_party/js-1.7/jsbool.h b/src/third_party/js-1.7/jsbool.h
new file mode 100644
index 00000000000..8dbd21813fc
--- /dev/null
+++ b/src/third_party/js-1.7/jsbool.h
@@ -0,0 +1,76 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsbool_h___
+#define jsbool_h___
+/*
+ * JS boolean interface.
+ */
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * Crypto-booleans, not visible to script but used internally by the engine.
+ *
+ * JSVAL_HOLE is a useful value for identifying a hole in an array. It's also
+ * used in the interpreter to represent "no exception pending". In general it
+ * can be used to represent "no value".
+ *
+ * JSVAL_ARETURN is used to throw asynchronous return for generator.close().
+ */
+#define JSVAL_HOLE BOOLEAN_TO_JSVAL(2)
+#define JSVAL_ARETURN BOOLEAN_TO_JSVAL(3)
+
+extern JSClass js_BooleanClass;
+
+extern JSObject *
+js_InitBooleanClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_BooleanToObject(JSContext *cx, JSBool b);
+
+extern JSString *
+js_BooleanToString(JSContext *cx, JSBool b);
+
+extern JSBool
+js_ValueToBoolean(JSContext *cx, jsval v, JSBool *bp);
+
+JS_END_EXTERN_C
+
+#endif /* jsbool_h___ */
diff --git a/src/third_party/js-1.7/jsclist.h b/src/third_party/js-1.7/jsclist.h
new file mode 100644
index 00000000000..604ec0ec956
--- /dev/null
+++ b/src/third_party/js-1.7/jsclist.h
@@ -0,0 +1,139 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsclist_h___
+#define jsclist_h___
+
+#include "jstypes.h"
+
+/*
+** Circular linked list
+*/
+typedef struct JSCListStr {
+ struct JSCListStr *next;
+ struct JSCListStr *prev;
+} JSCList;
+
+/*
+** Insert element "_e" into the list, before "_l".
+*/
+#define JS_INSERT_BEFORE(_e,_l) \
+ JS_BEGIN_MACRO \
+ (_e)->next = (_l); \
+ (_e)->prev = (_l)->prev; \
+ (_l)->prev->next = (_e); \
+ (_l)->prev = (_e); \
+ JS_END_MACRO
+
+/*
+** Insert element "_e" into the list, after "_l".
+*/
+#define JS_INSERT_AFTER(_e,_l) \
+ JS_BEGIN_MACRO \
+ (_e)->next = (_l)->next; \
+ (_e)->prev = (_l); \
+ (_l)->next->prev = (_e); \
+ (_l)->next = (_e); \
+ JS_END_MACRO
+
+/*
+** Return the element following element "_e"
+*/
+#define JS_NEXT_LINK(_e) \
+ ((_e)->next)
+/*
+** Return the element preceding element "_e"
+*/
+#define JS_PREV_LINK(_e) \
+ ((_e)->prev)
+
+/*
+** Append an element "_e" to the end of the list "_l"
+*/
+#define JS_APPEND_LINK(_e,_l) JS_INSERT_BEFORE(_e,_l)
+
+/*
+** Insert an element "_e" at the head of the list "_l"
+*/
+#define JS_INSERT_LINK(_e,_l) JS_INSERT_AFTER(_e,_l)
+
+/* Return the head/tail of the list */
+#define JS_LIST_HEAD(_l) (_l)->next
+#define JS_LIST_TAIL(_l) (_l)->prev
+
+/*
+** Remove the element "_e" from it's circular list.
+*/
+#define JS_REMOVE_LINK(_e) \
+ JS_BEGIN_MACRO \
+ (_e)->prev->next = (_e)->next; \
+ (_e)->next->prev = (_e)->prev; \
+ JS_END_MACRO
+
+/*
+** Remove the element "_e" from it's circular list. Also initializes the
+** linkage.
+*/
+#define JS_REMOVE_AND_INIT_LINK(_e) \
+ JS_BEGIN_MACRO \
+ (_e)->prev->next = (_e)->next; \
+ (_e)->next->prev = (_e)->prev; \
+ (_e)->next = (_e); \
+ (_e)->prev = (_e); \
+ JS_END_MACRO
+
+/*
+** Return non-zero if the given circular list "_l" is empty, zero if the
+** circular list is not empty
+*/
+#define JS_CLIST_IS_EMPTY(_l) \
+ ((_l)->next == (_l))
+
+/*
+** Initialize a circular list
+*/
+#define JS_INIT_CLIST(_l) \
+ JS_BEGIN_MACRO \
+ (_l)->next = (_l); \
+ (_l)->prev = (_l); \
+ JS_END_MACRO
+
+#define JS_INIT_STATIC_CLIST(_l) \
+ {(_l), (_l)}
+
+#endif /* jsclist_h___ */
diff --git a/src/third_party/js-1.7/jscntxt.c b/src/third_party/js-1.7/jscntxt.c
new file mode 100644
index 00000000000..139ad9b8ac5
--- /dev/null
+++ b/src/third_party/js-1.7/jscntxt.c
@@ -0,0 +1,1229 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS execution context.
+ */
+#include "jsstddef.h"
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsclist.h"
+#include "jsprf.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsexn.h"
+#include "jsgc.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+
+#ifdef JS_THREADSAFE
+
+/*
+ * Callback function to delete a JSThread info when the thread that owns it
+ * is destroyed.
+ */
+void JS_DLL_CALLBACK
+js_ThreadDestructorCB(void *ptr)
+{
+ JSThread *thread = (JSThread *)ptr;
+
+ if (!thread)
+ return;
+ while (!JS_CLIST_IS_EMPTY(&thread->contextList)) {
+ /* NB: use a temporary, as the macro evaluates its args many times. */
+ JSCList *link = thread->contextList.next;
+
+ JS_REMOVE_AND_INIT_LINK(link);
+ }
+ GSN_CACHE_CLEAR(&thread->gsnCache);
+ free(thread);
+}
+
+/*
+ * Get current thread-local JSThread info, creating one if it doesn't exist.
+ * Each thread has a unique JSThread pointer.
+ *
+ * Since we are dealing with thread-local data, no lock is needed.
+ *
+ * Return a pointer to the thread local info, NULL if the system runs out
+ * of memory, or it failed to set thread private data (neither case is very
+ * likely; both are probably due to out-of-memory). It is up to the caller
+ * to report an error, if possible.
+ */
+JSThread *
+js_GetCurrentThread(JSRuntime *rt)
+{
+ JSThread *thread;
+
+ thread = (JSThread *)PR_GetThreadPrivate(rt->threadTPIndex);
+ if (!thread) {
+ thread = (JSThread *) calloc(1, sizeof(JSThread));
+ if (!thread)
+ return NULL;
+
+ if (PR_FAILURE == PR_SetThreadPrivate(rt->threadTPIndex, thread)) {
+ free(thread);
+ return NULL;
+ }
+
+ JS_INIT_CLIST(&thread->contextList);
+ thread->id = js_CurrentThreadId();
+
+ /* js_SetContextThread initialize gcFreeLists as necessary. */
+#ifdef DEBUG
+ memset(thread->gcFreeLists, JS_FREE_PATTERN,
+ sizeof(thread->gcFreeLists));
+#endif
+ }
+ return thread;
+}
+
+/*
+ * Sets current thread as owning thread of a context by assigning the
+ * thread-private info to the context. If the current thread doesn't have
+ * private JSThread info, create one.
+ */
+JSBool
+js_SetContextThread(JSContext *cx)
+{
+ JSThread *thread = js_GetCurrentThread(cx->runtime);
+
+ if (!thread) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ /*
+ * Clear gcFreeLists on each transition from 0 to 1 context active on the
+ * current thread. See bug 351602.
+ */
+ if (JS_CLIST_IS_EMPTY(&thread->contextList))
+ memset(thread->gcFreeLists, 0, sizeof(thread->gcFreeLists));
+
+ cx->thread = thread;
+ JS_REMOVE_LINK(&cx->threadLinks);
+ JS_APPEND_LINK(&cx->threadLinks, &thread->contextList);
+ return JS_TRUE;
+}
+
+/* Remove the owning thread info of a context. */
+void
+js_ClearContextThread(JSContext *cx)
+{
+ JS_REMOVE_AND_INIT_LINK(&cx->threadLinks);
+#ifdef DEBUG
+ if (JS_CLIST_IS_EMPTY(&cx->thread->contextList)) {
+ memset(cx->thread->gcFreeLists, JS_FREE_PATTERN,
+ sizeof(cx->thread->gcFreeLists));
+ }
+#endif
+ cx->thread = NULL;
+}
+
+#endif /* JS_THREADSAFE */
+
+void
+js_OnVersionChange(JSContext *cx)
+{
+#ifdef DEBUG
+ JSVersion version = JSVERSION_NUMBER(cx);
+
+ JS_ASSERT(version == JSVERSION_DEFAULT || version >= JSVERSION_ECMA_3);
+#endif
+}
+
+void
+js_SetVersion(JSContext *cx, JSVersion version)
+{
+ cx->version = version;
+ js_OnVersionChange(cx);
+}
+
+JSContext *
+js_NewContext(JSRuntime *rt, size_t stackChunkSize)
+{
+ JSContext *cx;
+ JSBool ok, first;
+ JSContextCallback cxCallback;
+
+ cx = (JSContext *) malloc(sizeof *cx);
+ if (!cx)
+ return NULL;
+ memset(cx, 0, sizeof *cx);
+
+ cx->runtime = rt;
+#if JS_STACK_GROWTH_DIRECTION > 0
+ cx->stackLimit = (jsuword)-1;
+#endif
+#ifdef JS_THREADSAFE
+ JS_INIT_CLIST(&cx->threadLinks);
+ js_SetContextThread(cx);
+#endif
+
+ JS_LOCK_GC(rt);
+ for (;;) {
+ first = (rt->contextList.next == &rt->contextList);
+ if (rt->state == JSRTS_UP) {
+ JS_ASSERT(!first);
+ break;
+ }
+ if (rt->state == JSRTS_DOWN) {
+ JS_ASSERT(first);
+ rt->state = JSRTS_LAUNCHING;
+ break;
+ }
+ JS_WAIT_CONDVAR(rt->stateChange, JS_NO_TIMEOUT);
+ }
+ JS_APPEND_LINK(&cx->links, &rt->contextList);
+ JS_UNLOCK_GC(rt);
+
+ /*
+ * First we do the infallible, every-time per-context initializations.
+ * Should a later, fallible initialization (js_InitRegExpStatics, e.g.,
+ * or the stuff under 'if (first)' below) fail, at least the version
+ * and arena-pools will be valid and safe to use (say, from the last GC
+ * done by js_DestroyContext).
+ */
+ cx->version = JSVERSION_DEFAULT;
+ cx->jsop_eq = JSOP_EQ;
+ cx->jsop_ne = JSOP_NE;
+ JS_InitArenaPool(&cx->stackPool, "stack", stackChunkSize, sizeof(jsval));
+ JS_InitArenaPool(&cx->tempPool, "temp", 1024, sizeof(jsdouble));
+
+ if (!js_InitRegExpStatics(cx, &cx->regExpStatics)) {
+ js_DestroyContext(cx, JSDCM_NEW_FAILED);
+ return NULL;
+ }
+
+ /*
+ * If cx is the first context on this runtime, initialize well-known atoms,
+ * keywords, numbers, and strings. If one of these steps should fail, the
+ * runtime will be left in a partially initialized state, with zeroes and
+ * nulls stored in the default-initialized remainder of the struct. We'll
+ * clean the runtime up under js_DestroyContext, because cx will be "last"
+ * as well as "first".
+ */
+ if (first) {
+#ifdef JS_THREADSAFE
+ JS_BeginRequest(cx);
+#endif
+ /*
+ * Both atomState and the scriptFilenameTable may be left over from a
+ * previous episode of non-zero contexts alive in rt, so don't re-init
+ * either table if it's not necessary. Just repopulate atomState with
+ * well-known internal atoms, and with the reserved identifiers added
+ * by the scanner.
+ */
+ ok = (rt->atomState.liveAtoms == 0)
+ ? js_InitAtomState(cx, &rt->atomState)
+ : js_InitPinnedAtoms(cx, &rt->atomState);
+ if (ok && !rt->scriptFilenameTable)
+ ok = js_InitRuntimeScriptState(rt);
+ if (ok)
+ ok = js_InitRuntimeNumberState(cx);
+ if (ok)
+ ok = js_InitRuntimeStringState(cx);
+#ifdef JS_THREADSAFE
+ JS_EndRequest(cx);
+#endif
+ if (!ok) {
+ js_DestroyContext(cx, JSDCM_NEW_FAILED);
+ return NULL;
+ }
+
+ JS_LOCK_GC(rt);
+ rt->state = JSRTS_UP;
+ JS_NOTIFY_ALL_CONDVAR(rt->stateChange);
+ JS_UNLOCK_GC(rt);
+ }
+
+ cxCallback = rt->cxCallback;
+ if (cxCallback && !cxCallback(cx, JSCONTEXT_NEW)) {
+ js_DestroyContext(cx, JSDCM_NEW_FAILED);
+ return NULL;
+ }
+ return cx;
+}
+
+void
+js_DestroyContext(JSContext *cx, JSDestroyContextMode mode)
+{
+ JSRuntime *rt;
+ JSContextCallback cxCallback;
+ JSBool last;
+ JSArgumentFormatMap *map;
+ JSLocalRootStack *lrs;
+ JSLocalRootChunk *lrc;
+
+ rt = cx->runtime;
+
+ if (mode != JSDCM_NEW_FAILED) {
+ cxCallback = rt->cxCallback;
+ if (cxCallback) {
+ /*
+ * JSCONTEXT_DESTROY callback is not allowed to fail and must
+ * return true.
+ */
+#ifdef DEBUG
+ JSBool callbackStatus =
+#endif
+ cxCallback(cx, JSCONTEXT_DESTROY);
+ JS_ASSERT(callbackStatus);
+ }
+ }
+
+ /* Remove cx from context list first. */
+ JS_LOCK_GC(rt);
+ JS_ASSERT(rt->state == JSRTS_UP || rt->state == JSRTS_LAUNCHING);
+ JS_REMOVE_LINK(&cx->links);
+ last = (rt->contextList.next == &rt->contextList);
+ if (last)
+ rt->state = JSRTS_LANDING;
+ JS_UNLOCK_GC(rt);
+
+ if (last) {
+#ifdef JS_THREADSAFE
+ /*
+ * If cx is not in a request already, begin one now so that we wait
+ * for any racing GC started on a not-last context to finish, before
+ * we plow ahead and unpin atoms. Note that even though we begin a
+ * request here if necessary, we end all requests on cx below before
+ * forcing a final GC. This lets any not-last context destruction
+ * racing in another thread try to force or maybe run the GC, but by
+ * that point, rt->state will not be JSRTS_UP, and that GC attempt
+ * will return early.
+ */
+ if (cx->requestDepth == 0)
+ JS_BeginRequest(cx);
+#endif
+
+ /* Unpin all pinned atoms before final GC. */
+ js_UnpinPinnedAtoms(&rt->atomState);
+
+ /* Unlock and clear GC things held by runtime pointers. */
+ js_FinishRuntimeNumberState(cx);
+ js_FinishRuntimeStringState(cx);
+
+ /* Clear debugging state to remove GC roots. */
+ JS_ClearAllTraps(cx);
+ JS_ClearAllWatchPoints(cx);
+ }
+
+ /*
+ * Remove more GC roots in regExpStatics, then collect garbage.
+ * XXX anti-modularity alert: we rely on the call to js_RemoveRoot within
+ * XXX this function call to wait for any racing GC to complete, in the
+ * XXX case where JS_DestroyContext is called outside of a request on cx
+ */
+ js_FreeRegExpStatics(cx, &cx->regExpStatics);
+
+#ifdef JS_THREADSAFE
+ /*
+ * Destroying a context implicitly calls JS_EndRequest(). Also, we must
+ * end our request here in case we are "last" -- in that event, another
+ * js_DestroyContext that was not last might be waiting in the GC for our
+ * request to end. We'll let it run below, just before we do the truly
+ * final GC and then free atom state.
+ *
+ * At this point, cx must be inaccessible to other threads. It's off the
+ * rt->contextList, and it should not be reachable via any object private
+ * data structure.
+ */
+ while (cx->requestDepth != 0)
+ JS_EndRequest(cx);
+#endif
+
+ if (last) {
+ js_GC(cx, GC_LAST_CONTEXT);
+
+ /* Try to free atom state, now that no unrooted scripts survive. */
+ if (rt->atomState.liveAtoms == 0)
+ js_FreeAtomState(cx, &rt->atomState);
+
+ /* Also free the script filename table if it exists and is empty. */
+ if (rt->scriptFilenameTable && rt->scriptFilenameTable->nentries == 0)
+ js_FinishRuntimeScriptState(rt);
+
+ /*
+ * Free the deflated string cache, but only after the last GC has
+ * collected all unleaked strings.
+ */
+ js_FinishDeflatedStringCache(rt);
+
+ /* Take the runtime down, now that it has no contexts or atoms. */
+ JS_LOCK_GC(rt);
+ rt->state = JSRTS_DOWN;
+ JS_NOTIFY_ALL_CONDVAR(rt->stateChange);
+ JS_UNLOCK_GC(rt);
+ } else {
+ if (mode == JSDCM_FORCE_GC)
+ js_GC(cx, GC_NORMAL);
+ else if (mode == JSDCM_MAYBE_GC)
+ JS_MaybeGC(cx);
+ }
+
+ /* Free the stuff hanging off of cx. */
+ JS_FinishArenaPool(&cx->stackPool);
+ JS_FinishArenaPool(&cx->tempPool);
+
+ if (cx->lastMessage)
+ free(cx->lastMessage);
+
+ /* Remove any argument formatters. */
+ map = cx->argumentFormatMap;
+ while (map) {
+ JSArgumentFormatMap *temp = map;
+ map = map->next;
+ JS_free(cx, temp);
+ }
+
+ /* Destroy the resolve recursion damper. */
+ if (cx->resolvingTable) {
+ JS_DHashTableDestroy(cx->resolvingTable);
+ cx->resolvingTable = NULL;
+ }
+
+ lrs = cx->localRootStack;
+ if (lrs) {
+ while ((lrc = lrs->topChunk) != &lrs->firstChunk) {
+ lrs->topChunk = lrc->down;
+ JS_free(cx, lrc);
+ }
+ JS_free(cx, lrs);
+ }
+
+#ifdef JS_THREADSAFE
+ js_ClearContextThread(cx);
+#endif
+
+ /* Finally, free cx itself. */
+ free(cx);
+}
+
+JSBool
+js_ValidContextPointer(JSRuntime *rt, JSContext *cx)
+{
+ JSCList *cl;
+
+ for (cl = rt->contextList.next; cl != &rt->contextList; cl = cl->next) {
+ if (cl == &cx->links)
+ return JS_TRUE;
+ }
+ JS_RUNTIME_METER(rt, deadContexts);
+ return JS_FALSE;
+}
+
+JSContext *
+js_ContextIterator(JSRuntime *rt, JSBool unlocked, JSContext **iterp)
+{
+ JSContext *cx = *iterp;
+
+ if (unlocked)
+ JS_LOCK_GC(rt);
+ if (!cx)
+ cx = (JSContext *)&rt->contextList;
+ cx = (JSContext *)cx->links.next;
+ if (&cx->links == &rt->contextList)
+ cx = NULL;
+ *iterp = cx;
+ if (unlocked)
+ JS_UNLOCK_GC(rt);
+ return cx;
+}
+
+JS_STATIC_DLL_CALLBACK(const void *)
+resolving_GetKey(JSDHashTable *table, JSDHashEntryHdr *hdr)
+{
+ JSResolvingEntry *entry = (JSResolvingEntry *)hdr;
+
+ return &entry->key;
+}
+
+JS_STATIC_DLL_CALLBACK(JSDHashNumber)
+resolving_HashKey(JSDHashTable *table, const void *ptr)
+{
+ const JSResolvingKey *key = (const JSResolvingKey *)ptr;
+
+ return ((JSDHashNumber)JS_PTR_TO_UINT32(key->obj) >> JSVAL_TAGBITS) ^ key->id;
+}
+
+JS_PUBLIC_API(JSBool)
+resolving_MatchEntry(JSDHashTable *table,
+ const JSDHashEntryHdr *hdr,
+ const void *ptr)
+{
+ const JSResolvingEntry *entry = (const JSResolvingEntry *)hdr;
+ const JSResolvingKey *key = (const JSResolvingKey *)ptr;
+
+ return entry->key.obj == key->obj && entry->key.id == key->id;
+}
+
+static const JSDHashTableOps resolving_dhash_ops = {
+ JS_DHashAllocTable,
+ JS_DHashFreeTable,
+ resolving_GetKey,
+ resolving_HashKey,
+ resolving_MatchEntry,
+ JS_DHashMoveEntryStub,
+ JS_DHashClearEntryStub,
+ JS_DHashFinalizeStub,
+ NULL
+};
+
+JSBool
+js_StartResolving(JSContext *cx, JSResolvingKey *key, uint32 flag,
+ JSResolvingEntry **entryp)
+{
+ JSDHashTable *table;
+ JSResolvingEntry *entry;
+
+ table = cx->resolvingTable;
+ if (!table) {
+ table = JS_NewDHashTable(&resolving_dhash_ops, NULL,
+ sizeof(JSResolvingEntry),
+ JS_DHASH_MIN_SIZE);
+ if (!table)
+ goto outofmem;
+ cx->resolvingTable = table;
+ }
+
+ entry = (JSResolvingEntry *)
+ JS_DHashTableOperate(table, key, JS_DHASH_ADD);
+ if (!entry)
+ goto outofmem;
+
+ if (entry->flags & flag) {
+ /* An entry for (key, flag) exists already -- dampen recursion. */
+ entry = NULL;
+ } else {
+ /* Fill in key if we were the first to add entry, then set flag. */
+ if (!entry->key.obj)
+ entry->key = *key;
+ entry->flags |= flag;
+ }
+ *entryp = entry;
+ return JS_TRUE;
+
+outofmem:
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+}
+
+void
+js_StopResolving(JSContext *cx, JSResolvingKey *key, uint32 flag,
+ JSResolvingEntry *entry, uint32 generation)
+{
+ JSDHashTable *table;
+
+ /*
+ * Clear flag from entry->flags and return early if other flags remain.
+ * We must take care to re-lookup entry if the table has changed since
+ * it was found by js_StartResolving.
+ */
+ table = cx->resolvingTable;
+ if (!entry || table->generation != generation) {
+ entry = (JSResolvingEntry *)
+ JS_DHashTableOperate(table, key, JS_DHASH_LOOKUP);
+ }
+ JS_ASSERT(JS_DHASH_ENTRY_IS_BUSY(&entry->hdr));
+ entry->flags &= ~flag;
+ if (entry->flags)
+ return;
+
+ /*
+ * Do a raw remove only if fewer entries were removed than would cause
+ * alpha to be less than .5 (alpha is at most .75). Otherwise, we just
+ * call JS_DHashTableOperate to re-lookup the key and remove its entry,
+ * compressing or shrinking the table as needed.
+ */
+ if (table->removedCount < JS_DHASH_TABLE_SIZE(table) >> 2)
+ JS_DHashTableRawRemove(table, &entry->hdr);
+ else
+ JS_DHashTableOperate(table, key, JS_DHASH_REMOVE);
+}
+
+JSBool
+js_EnterLocalRootScope(JSContext *cx)
+{
+ JSLocalRootStack *lrs;
+ int mark;
+
+ lrs = cx->localRootStack;
+ if (!lrs) {
+ lrs = (JSLocalRootStack *) JS_malloc(cx, sizeof *lrs);
+ if (!lrs)
+ return JS_FALSE;
+ lrs->scopeMark = JSLRS_NULL_MARK;
+ lrs->rootCount = 0;
+ lrs->topChunk = &lrs->firstChunk;
+ lrs->firstChunk.down = NULL;
+ cx->localRootStack = lrs;
+ }
+
+ /* Push lrs->scopeMark to save it for restore when leaving. */
+ mark = js_PushLocalRoot(cx, lrs, INT_TO_JSVAL(lrs->scopeMark));
+ if (mark < 0)
+ return JS_FALSE;
+ lrs->scopeMark = (uint32) mark;
+ return JS_TRUE;
+}
+
+void
+js_LeaveLocalRootScopeWithResult(JSContext *cx, jsval rval)
+{
+ JSLocalRootStack *lrs;
+ uint32 mark, m, n;
+ JSLocalRootChunk *lrc;
+
+ /* Defend against buggy native callers. */
+ lrs = cx->localRootStack;
+ JS_ASSERT(lrs && lrs->rootCount != 0);
+ if (!lrs || lrs->rootCount == 0)
+ return;
+
+ mark = lrs->scopeMark;
+ JS_ASSERT(mark != JSLRS_NULL_MARK);
+ if (mark == JSLRS_NULL_MARK)
+ return;
+
+ /* Free any chunks being popped by this leave operation. */
+ m = mark >> JSLRS_CHUNK_SHIFT;
+ n = (lrs->rootCount - 1) >> JSLRS_CHUNK_SHIFT;
+ while (n > m) {
+ lrc = lrs->topChunk;
+ JS_ASSERT(lrc != &lrs->firstChunk);
+ lrs->topChunk = lrc->down;
+ JS_free(cx, lrc);
+ --n;
+ }
+
+ /*
+ * Pop the scope, restoring lrs->scopeMark. If rval is a GC-thing, push
+ * it on the caller's scope, or store it in lastInternalResult if we are
+ * leaving the outermost scope. We don't need to allocate a new lrc
+ * because we can overwrite the old mark's slot with rval.
+ */
+ lrc = lrs->topChunk;
+ m = mark & JSLRS_CHUNK_MASK;
+ lrs->scopeMark = (uint32) JSVAL_TO_INT(lrc->roots[m]);
+ if (JSVAL_IS_GCTHING(rval) && !JSVAL_IS_NULL(rval)) {
+ if (mark == 0) {
+ cx->weakRoots.lastInternalResult = rval;
+ } else {
+ /*
+ * Increment m to avoid the "else if (m == 0)" case below. If
+ * rval is not a GC-thing, that case would take care of freeing
+ * any chunk that contained only the old mark. Since rval *is*
+ * a GC-thing here, we want to reuse that old mark's slot.
+ */
+ lrc->roots[m++] = rval;
+ ++mark;
+ }
+ }
+ lrs->rootCount = (uint32) mark;
+
+ /*
+ * Free the stack eagerly, risking malloc churn. The alternative would
+ * require an lrs->entryCount member, maintained by Enter and Leave, and
+ * tested by the GC in addition to the cx->localRootStack non-null test.
+ *
+ * That approach would risk hoarding 264 bytes (net) per context. Right
+ * now it seems better to give fresh (dirty in CPU write-back cache, and
+ * the data is no longer needed) memory back to the malloc heap.
+ */
+ if (mark == 0) {
+ cx->localRootStack = NULL;
+ JS_free(cx, lrs);
+ } else if (m == 0) {
+ lrs->topChunk = lrc->down;
+ JS_free(cx, lrc);
+ }
+}
+
+void
+js_ForgetLocalRoot(JSContext *cx, jsval v)
+{
+ JSLocalRootStack *lrs;
+ uint32 i, j, m, n, mark;
+ JSLocalRootChunk *lrc, *lrc2;
+ jsval top;
+
+ lrs = cx->localRootStack;
+ JS_ASSERT(lrs && lrs->rootCount);
+ if (!lrs || lrs->rootCount == 0)
+ return;
+
+ /* Prepare to pop the top-most value from the stack. */
+ n = lrs->rootCount - 1;
+ m = n & JSLRS_CHUNK_MASK;
+ lrc = lrs->topChunk;
+ top = lrc->roots[m];
+
+ /* Be paranoid about calls on an empty scope. */
+ mark = lrs->scopeMark;
+ JS_ASSERT(mark < n);
+ if (mark >= n)
+ return;
+
+ /* If v was not the last root pushed in the top scope, find it. */
+ if (top != v) {
+ /* Search downward in case v was recently pushed. */
+ i = n;
+ j = m;
+ lrc2 = lrc;
+ while (--i > mark) {
+ if (j == 0)
+ lrc2 = lrc2->down;
+ j = i & JSLRS_CHUNK_MASK;
+ if (lrc2->roots[j] == v)
+ break;
+ }
+
+ /* If we didn't find v in this scope, assert and bail out. */
+ JS_ASSERT(i != mark);
+ if (i == mark)
+ return;
+
+ /* Swap top and v so common tail code can pop v. */
+ lrc2->roots[j] = top;
+ }
+
+ /* Pop the last value from the stack. */
+ lrc->roots[m] = JSVAL_NULL;
+ lrs->rootCount = n;
+ if (m == 0) {
+ JS_ASSERT(n != 0);
+ JS_ASSERT(lrc != &lrs->firstChunk);
+ lrs->topChunk = lrc->down;
+ JS_free(cx, lrc);
+ }
+}
+
+int
+js_PushLocalRoot(JSContext *cx, JSLocalRootStack *lrs, jsval v)
+{
+ uint32 n, m;
+ JSLocalRootChunk *lrc;
+
+ n = lrs->rootCount;
+ m = n & JSLRS_CHUNK_MASK;
+ if (n == 0 || m != 0) {
+ /*
+ * At start of first chunk, or not at start of a non-first top chunk.
+ * Check for lrs->rootCount overflow.
+ */
+ if ((uint32)(n + 1) == 0) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_LOCAL_ROOTS);
+ return -1;
+ }
+ lrc = lrs->topChunk;
+ JS_ASSERT(n != 0 || lrc == &lrs->firstChunk);
+ } else {
+ /*
+ * After lrs->firstChunk, trying to index at a power-of-two chunk
+ * boundary: need a new chunk.
+ */
+ lrc = (JSLocalRootChunk *) JS_malloc(cx, sizeof *lrc);
+ if (!lrc)
+ return -1;
+ lrc->down = lrs->topChunk;
+ lrs->topChunk = lrc;
+ }
+ lrs->rootCount = n + 1;
+ lrc->roots[m] = v;
+ return (int) n;
+}
+
+void
+js_MarkLocalRoots(JSContext *cx, JSLocalRootStack *lrs)
+{
+ uint32 n, m, mark;
+ JSLocalRootChunk *lrc;
+
+ n = lrs->rootCount;
+ if (n == 0)
+ return;
+
+ mark = lrs->scopeMark;
+ lrc = lrs->topChunk;
+ do {
+ while (--n > mark) {
+#ifdef GC_MARK_DEBUG
+ char name[22];
+ JS_snprintf(name, sizeof name, "<local root %u>", n);
+#endif
+ m = n & JSLRS_CHUNK_MASK;
+ JS_ASSERT(JSVAL_IS_GCTHING(lrc->roots[m]));
+ GC_MARK(cx, JSVAL_TO_GCTHING(lrc->roots[m]), name);
+ if (m == 0)
+ lrc = lrc->down;
+ }
+ m = n & JSLRS_CHUNK_MASK;
+ mark = JSVAL_TO_INT(lrc->roots[m]);
+ if (m == 0)
+ lrc = lrc->down;
+ } while (n != 0);
+ JS_ASSERT(!lrc);
+}
+
+static void
+ReportError(JSContext *cx, const char *message, JSErrorReport *reportp)
+{
+ /*
+ * Check the error report, and set a JavaScript-catchable exception
+ * if the error is defined to have an associated exception. If an
+ * exception is thrown, then the JSREPORT_EXCEPTION flag will be set
+ * on the error report, and exception-aware hosts should ignore it.
+ */
+ JS_ASSERT(reportp);
+ if (reportp->errorNumber == JSMSG_UNCAUGHT_EXCEPTION)
+ reportp->flags |= JSREPORT_EXCEPTION;
+
+ /*
+ * Call the error reporter only if an exception wasn't raised.
+ *
+ * If an exception was raised, then we call the debugErrorHook
+ * (if present) to give it a chance to see the error before it
+ * propagates out of scope. This is needed for compatability
+ * with the old scheme.
+ */
+ if (!js_ErrorToException(cx, message, reportp)) {
+ js_ReportErrorAgain(cx, message, reportp);
+ } else if (cx->runtime->debugErrorHook && cx->errorReporter) {
+ JSDebugErrorHook hook = cx->runtime->debugErrorHook;
+ /* test local in case debugErrorHook changed on another thread */
+ if (hook)
+ hook(cx, message, reportp, cx->runtime->debugErrorHookData);
+ }
+}
+
+/*
+ * We don't post an exception in this case, since doing so runs into
+ * complications of pre-allocating an exception object which required
+ * running the Exception class initializer early etc.
+ * Instead we just invoke the errorReporter with an "Out Of Memory"
+ * type message, and then hope the process ends swiftly.
+ */
+void
+js_ReportOutOfMemory(JSContext *cx)
+{
+ JSStackFrame *fp;
+ JSErrorReport report;
+ JSErrorReporter onError = cx->errorReporter;
+
+ /* Get the message for this error, but we won't expand any arguments. */
+ const JSErrorFormatString *efs =
+ js_GetLocalizedErrorMessage(cx, NULL, NULL, JSMSG_OUT_OF_MEMORY);
+ const char *msg = efs ? efs->format : "Out of memory";
+
+ /* Fill out the report, but don't do anything that requires allocation. */
+ memset(&report, 0, sizeof (struct JSErrorReport));
+ report.flags = JSREPORT_ERROR;
+ report.errorNumber = JSMSG_OUT_OF_MEMORY;
+
+ /*
+ * Walk stack until we find a frame that is associated with some script
+ * rather than a native frame.
+ */
+ for (fp = cx->fp; fp; fp = fp->down) {
+ if (fp->script && fp->pc) {
+ report.filename = fp->script->filename;
+ report.lineno = js_PCToLineNumber(cx, fp->script, fp->pc);
+ break;
+ }
+ }
+
+ /*
+ * If debugErrorHook is present then we give it a chance to veto
+ * sending the error on to the regular ErrorReporter.
+ */
+ if (onError) {
+ JSDebugErrorHook hook = cx->runtime->debugErrorHook;
+ if (hook &&
+ !hook(cx, msg, &report, cx->runtime->debugErrorHookData)) {
+ onError = NULL;
+ }
+ }
+
+ if (onError)
+ onError(cx, msg, &report);
+}
+
+JSBool
+js_ReportErrorVA(JSContext *cx, uintN flags, const char *format, va_list ap)
+{
+ char *message;
+ jschar *ucmessage;
+ size_t messagelen;
+ JSStackFrame *fp;
+ JSErrorReport report;
+ JSBool warning;
+
+ if ((flags & JSREPORT_STRICT) && !JS_HAS_STRICT_OPTION(cx))
+ return JS_TRUE;
+
+ message = JS_vsmprintf(format, ap);
+ if (!message)
+ return JS_FALSE;
+ messagelen = strlen(message);
+
+ memset(&report, 0, sizeof (struct JSErrorReport));
+ report.flags = flags;
+ report.errorNumber = JSMSG_USER_DEFINED_ERROR;
+ report.ucmessage = ucmessage = js_InflateString(cx, message, &messagelen);
+
+ /* Find the top-most active script frame, for best line number blame. */
+ for (fp = cx->fp; fp; fp = fp->down) {
+ if (fp->script && fp->pc) {
+ report.filename = fp->script->filename;
+ report.lineno = js_PCToLineNumber(cx, fp->script, fp->pc);
+ break;
+ }
+ }
+
+ warning = JSREPORT_IS_WARNING(report.flags);
+ if (warning && JS_HAS_WERROR_OPTION(cx)) {
+ report.flags &= ~JSREPORT_WARNING;
+ warning = JS_FALSE;
+ }
+
+ ReportError(cx, message, &report);
+ free(message);
+ JS_free(cx, ucmessage);
+ return warning;
+}
+
+/*
+ * The arguments from ap need to be packaged up into an array and stored
+ * into the report struct.
+ *
+ * The format string addressed by the error number may contain operands
+ * identified by the format {N}, where N is a decimal digit. Each of these
+ * is to be replaced by the Nth argument from the va_list. The complete
+ * message is placed into reportp->ucmessage converted to a JSString.
+ *
+ * Returns true if the expansion succeeds (can fail if out of memory).
+ */
+JSBool
+js_ExpandErrorArguments(JSContext *cx, JSErrorCallback callback,
+ void *userRef, const uintN errorNumber,
+ char **messagep, JSErrorReport *reportp,
+ JSBool *warningp, JSBool charArgs, va_list ap)
+{
+ const JSErrorFormatString *efs;
+ int i;
+ int argCount;
+
+ *warningp = JSREPORT_IS_WARNING(reportp->flags);
+ if (*warningp && JS_HAS_WERROR_OPTION(cx)) {
+ reportp->flags &= ~JSREPORT_WARNING;
+ *warningp = JS_FALSE;
+ }
+
+ *messagep = NULL;
+
+ /* Most calls supply js_GetErrorMessage; if this is so, assume NULL. */
+ if (!callback || callback == js_GetErrorMessage)
+ efs = js_GetLocalizedErrorMessage(cx, userRef, NULL, errorNumber);
+ else
+ efs = callback(userRef, NULL, errorNumber);
+ if (efs) {
+ size_t totalArgsLength = 0;
+ size_t argLengths[10]; /* only {0} thru {9} supported */
+ argCount = efs->argCount;
+ JS_ASSERT(argCount <= 10);
+ if (argCount > 0) {
+ /*
+ * Gather the arguments into an array, and accumulate
+ * their sizes. We allocate 1 more than necessary and
+ * null it out to act as the caboose when we free the
+ * pointers later.
+ */
+ reportp->messageArgs = (const jschar **)
+ JS_malloc(cx, sizeof(jschar *) * (argCount + 1));
+ if (!reportp->messageArgs)
+ return JS_FALSE;
+ reportp->messageArgs[argCount] = NULL;
+ for (i = 0; i < argCount; i++) {
+ if (charArgs) {
+ char *charArg = va_arg(ap, char *);
+ size_t charArgLength = strlen(charArg);
+ reportp->messageArgs[i]
+ = js_InflateString(cx, charArg, &charArgLength);
+ if (!reportp->messageArgs[i])
+ goto error;
+ } else {
+ reportp->messageArgs[i] = va_arg(ap, jschar *);
+ }
+ argLengths[i] = js_strlen(reportp->messageArgs[i]);
+ totalArgsLength += argLengths[i];
+ }
+ /* NULL-terminate for easy copying. */
+ reportp->messageArgs[i] = NULL;
+ }
+ /*
+ * Parse the error format, substituting the argument X
+ * for {X} in the format.
+ */
+ if (argCount > 0) {
+ if (efs->format) {
+ jschar *buffer, *fmt, *out;
+ int expandedArgs = 0;
+ size_t expandedLength;
+ size_t len = strlen(efs->format);
+
+ buffer = fmt = js_InflateString (cx, efs->format, &len);
+ if (!buffer)
+ goto error;
+ expandedLength = len
+ - (3 * argCount) /* exclude the {n} */
+ + totalArgsLength;
+
+ /*
+ * Note - the above calculation assumes that each argument
+ * is used once and only once in the expansion !!!
+ */
+ reportp->ucmessage = out = (jschar *)
+ JS_malloc(cx, (expandedLength + 1) * sizeof(jschar));
+ if (!out) {
+ JS_free (cx, buffer);
+ goto error;
+ }
+ while (*fmt) {
+ if (*fmt == '{') {
+ if (isdigit(fmt[1])) {
+ int d = JS7_UNDEC(fmt[1]);
+ JS_ASSERT(d < argCount);
+ js_strncpy(out, reportp->messageArgs[d],
+ argLengths[d]);
+ out += argLengths[d];
+ fmt += 3;
+ expandedArgs++;
+ continue;
+ }
+ }
+ *out++ = *fmt++;
+ }
+ JS_ASSERT(expandedArgs == argCount);
+ *out = 0;
+ JS_free (cx, buffer);
+ *messagep =
+ js_DeflateString(cx, reportp->ucmessage,
+ (size_t)(out - reportp->ucmessage));
+ if (!*messagep)
+ goto error;
+ }
+ } else {
+ /*
+ * Zero arguments: the format string (if it exists) is the
+ * entire message.
+ */
+ if (efs->format) {
+ size_t len;
+ *messagep = JS_strdup(cx, efs->format);
+ if (!*messagep)
+ goto error;
+ len = strlen(*messagep);
+ reportp->ucmessage = js_InflateString(cx, *messagep, &len);
+ if (!reportp->ucmessage)
+ goto error;
+ }
+ }
+ }
+ if (*messagep == NULL) {
+ /* where's the right place for this ??? */
+ const char *defaultErrorMessage
+ = "No error message available for error number %d";
+ size_t nbytes = strlen(defaultErrorMessage) + 16;
+ *messagep = (char *)JS_malloc(cx, nbytes);
+ if (!*messagep)
+ goto error;
+ JS_snprintf(*messagep, nbytes, defaultErrorMessage, errorNumber);
+ }
+ return JS_TRUE;
+
+error:
+ if (reportp->messageArgs) {
+ /* free the arguments only if we allocated them */
+ if (charArgs) {
+ i = 0;
+ while (reportp->messageArgs[i])
+ JS_free(cx, (void *)reportp->messageArgs[i++]);
+ }
+ JS_free(cx, (void *)reportp->messageArgs);
+ reportp->messageArgs = NULL;
+ }
+ if (reportp->ucmessage) {
+ JS_free(cx, (void *)reportp->ucmessage);
+ reportp->ucmessage = NULL;
+ }
+ if (*messagep) {
+ JS_free(cx, (void *)*messagep);
+ *messagep = NULL;
+ }
+ return JS_FALSE;
+}
+
+JSBool
+js_ReportErrorNumberVA(JSContext *cx, uintN flags, JSErrorCallback callback,
+ void *userRef, const uintN errorNumber,
+ JSBool charArgs, va_list ap)
+{
+ JSStackFrame *fp;
+ JSErrorReport report;
+ char *message;
+ JSBool warning;
+
+ if ((flags & JSREPORT_STRICT) && !JS_HAS_STRICT_OPTION(cx))
+ return JS_TRUE;
+
+ memset(&report, 0, sizeof (struct JSErrorReport));
+ report.flags = flags;
+ report.errorNumber = errorNumber;
+
+ /*
+ * If we can't find out where the error was based on the current frame,
+ * see if the next frame has a script/pc combo we can use.
+ */
+ for (fp = cx->fp; fp; fp = fp->down) {
+ if (fp->script && fp->pc) {
+ report.filename = fp->script->filename;
+ report.lineno = js_PCToLineNumber(cx, fp->script, fp->pc);
+ break;
+ }
+ }
+
+ if (!js_ExpandErrorArguments(cx, callback, userRef, errorNumber,
+ &message, &report, &warning, charArgs, ap)) {
+ return JS_FALSE;
+ }
+
+ ReportError(cx, message, &report);
+
+ if (message)
+ JS_free(cx, message);
+ if (report.messageArgs) {
+ /*
+ * js_ExpandErrorArguments owns its messageArgs only if it had to
+ * inflate the arguments (from regular |char *|s).
+ */
+ if (charArgs) {
+ int i = 0;
+ while (report.messageArgs[i])
+ JS_free(cx, (void *)report.messageArgs[i++]);
+ }
+ JS_free(cx, (void *)report.messageArgs);
+ }
+ if (report.ucmessage)
+ JS_free(cx, (void *)report.ucmessage);
+
+ return warning;
+}
+
+JS_FRIEND_API(void)
+js_ReportErrorAgain(JSContext *cx, const char *message, JSErrorReport *reportp)
+{
+ JSErrorReporter onError;
+
+ if (!message)
+ return;
+
+ if (cx->lastMessage)
+ free(cx->lastMessage);
+ cx->lastMessage = JS_strdup(cx, message);
+ if (!cx->lastMessage)
+ return;
+ onError = cx->errorReporter;
+
+ /*
+ * If debugErrorHook is present then we give it a chance to veto
+ * sending the error on to the regular ErrorReporter.
+ */
+ if (onError) {
+ JSDebugErrorHook hook = cx->runtime->debugErrorHook;
+ if (hook &&
+ !hook(cx, cx->lastMessage, reportp,
+ cx->runtime->debugErrorHookData)) {
+ onError = NULL;
+ }
+ }
+ if (onError)
+ onError(cx, cx->lastMessage, reportp);
+}
+
+void
+js_ReportIsNotDefined(JSContext *cx, const char *name)
+{
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NOT_DEFINED, name);
+}
+
+#if defined DEBUG && defined XP_UNIX
+/* For gdb usage. */
+void js_traceon(JSContext *cx) { cx->tracefp = stderr; }
+void js_traceoff(JSContext *cx) { cx->tracefp = NULL; }
+#endif
+
+JSErrorFormatString js_ErrorFormatString[JSErr_Limit] = {
+#define MSG_DEF(name, number, count, exception, format) \
+ { format, count, exception } ,
+#include "js.msg"
+#undef MSG_DEF
+};
+
+const JSErrorFormatString *
+js_GetErrorMessage(void *userRef, const char *locale, const uintN errorNumber)
+{
+ if ((errorNumber > 0) && (errorNumber < JSErr_Limit))
+ return &js_ErrorFormatString[errorNumber];
+ return NULL;
+}
diff --git a/src/third_party/js-1.7/jscntxt.h b/src/third_party/js-1.7/jscntxt.h
new file mode 100644
index 00000000000..7ca678e50bd
--- /dev/null
+++ b/src/third_party/js-1.7/jscntxt.h
@@ -0,0 +1,1013 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jscntxt_h___
+#define jscntxt_h___
+/*
+ * JS execution context.
+ */
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsclist.h"
+#include "jslong.h"
+#include "jsatom.h"
+#include "jsconfig.h"
+#include "jsdhash.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jsobj.h"
+#include "jsprvtd.h"
+#include "jspubtd.h"
+#include "jsregexp.h"
+#include "jsutil.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * js_GetSrcNote cache to avoid O(n^2) growth in finding a source note for a
+ * given pc in a script.
+ */
+typedef struct JSGSNCache {
+ JSScript *script;
+ JSDHashTable table;
+#ifdef JS_GSNMETER
+ uint32 hits;
+ uint32 misses;
+ uint32 fills;
+ uint32 clears;
+# define GSN_CACHE_METER(cache,cnt) (++(cache)->cnt)
+#else
+# define GSN_CACHE_METER(cache,cnt) /* nothing */
+#endif
+} JSGSNCache;
+
+#define GSN_CACHE_CLEAR(cache) \
+ JS_BEGIN_MACRO \
+ (cache)->script = NULL; \
+ if ((cache)->table.ops) { \
+ JS_DHashTableFinish(&(cache)->table); \
+ (cache)->table.ops = NULL; \
+ } \
+ GSN_CACHE_METER(cache, clears); \
+ JS_END_MACRO
+
+/* These helper macros take a cx as parameter and operate on its GSN cache. */
+#define JS_CLEAR_GSN_CACHE(cx) GSN_CACHE_CLEAR(&JS_GSN_CACHE(cx))
+#define JS_METER_GSN_CACHE(cx,cnt) GSN_CACHE_METER(&JS_GSN_CACHE(cx), cnt)
+
+#ifdef JS_THREADSAFE
+
+/*
+ * Structure uniquely representing a thread. It holds thread-private data
+ * that can be accessed without a global lock.
+ */
+struct JSThread {
+ /* Linked list of all contexts active on this thread. */
+ JSCList contextList;
+
+ /* Opaque thread-id, from NSPR's PR_GetCurrentThread(). */
+ jsword id;
+
+ /* Thread-local gc free lists array. */
+ JSGCThing *gcFreeLists[GC_NUM_FREELISTS];
+
+ /*
+ * Thread-local version of JSRuntime.gcMallocBytes to avoid taking
+ * locks on each JS_malloc.
+ */
+ uint32 gcMallocBytes;
+
+#if JS_HAS_GENERATORS
+ /* Flag indicating that the current thread is executing close hooks. */
+ JSBool gcRunningCloseHooks;
+#endif
+
+ /*
+ * Store the GSN cache in struct JSThread, not struct JSContext, both to
+ * save space and to simplify cleanup in js_GC. Any embedding (Firefox
+ * or another Gecko application) that uses many contexts per thread is
+ * unlikely to interleave js_GetSrcNote-intensive loops in the decompiler
+ * among two or more contexts running script in one thread.
+ */
+ JSGSNCache gsnCache;
+};
+
+#define JS_GSN_CACHE(cx) ((cx)->thread->gsnCache)
+
+extern void JS_DLL_CALLBACK
+js_ThreadDestructorCB(void *ptr);
+
+extern JSBool
+js_SetContextThread(JSContext *cx);
+
+extern void
+js_ClearContextThread(JSContext *cx);
+
+extern JSThread *
+js_GetCurrentThread(JSRuntime *rt);
+
+#endif /* JS_THREADSAFE */
+
+typedef enum JSDestroyContextMode {
+ JSDCM_NO_GC,
+ JSDCM_MAYBE_GC,
+ JSDCM_FORCE_GC,
+ JSDCM_NEW_FAILED
+} JSDestroyContextMode;
+
+typedef enum JSRuntimeState {
+ JSRTS_DOWN,
+ JSRTS_LAUNCHING,
+ JSRTS_UP,
+ JSRTS_LANDING
+} JSRuntimeState;
+
+typedef struct JSPropertyTreeEntry {
+ JSDHashEntryHdr hdr;
+ JSScopeProperty *child;
+} JSPropertyTreeEntry;
+
+/*
+ * Forward declaration for opaque JSRuntime.nativeIteratorStates.
+ */
+typedef struct JSNativeIteratorState JSNativeIteratorState;
+
+struct JSRuntime {
+ /* Runtime state, synchronized by the stateChange/gcLock condvar/lock. */
+ JSRuntimeState state;
+
+ /* Context create/destroy callback. */
+ JSContextCallback cxCallback;
+
+ /* Garbage collector state, used by jsgc.c. */
+ JSGCArenaList gcArenaList[GC_NUM_FREELISTS];
+ JSDHashTable gcRootsHash;
+ JSDHashTable *gcLocksHash;
+ jsrefcount gcKeepAtoms;
+ uint32 gcBytes;
+ uint32 gcLastBytes;
+ uint32 gcMaxBytes;
+ uint32 gcMaxMallocBytes;
+ uint32 gcLevel;
+ uint32 gcNumber;
+
+ /*
+ * NB: do not pack another flag here by claiming gcPadding unless the new
+ * flag is written only by the GC thread. Atomic updates to packed bytes
+ * are not guaranteed, so stores issued by one thread may be lost due to
+ * unsynchronized read-modify-write cycles on other threads.
+ */
+ JSPackedBool gcPoke;
+ JSPackedBool gcRunning;
+ uint16 gcPadding;
+
+ JSGCCallback gcCallback;
+ uint32 gcMallocBytes;
+ JSGCArena *gcUnscannedArenaStackTop;
+#ifdef DEBUG
+ size_t gcUnscannedBagSize;
+#endif
+
+ /*
+ * API compatibility requires keeping GCX_PRIVATE bytes separate from the
+ * original GC types' byte tally. Otherwise embeddings that configure a
+ * good limit for pre-GCX_PRIVATE versions of the engine will see memory
+ * over-pressure too often, possibly leading to failed last-ditch GCs.
+ *
+ * The new XML GC-thing types do add to gcBytes, and they're larger than
+ * the original GC-thing type size (8 bytes on most architectures). So a
+ * user who enables E4X may want to increase the maxbytes value passed to
+ * JS_NewRuntime. TODO: Note this in the API docs.
+ */
+ uint32 gcPrivateBytes;
+
+ /*
+ * Table for tracking iterators to ensure that we close iterator's state
+ * before finalizing the iterable object.
+ */
+ JSPtrTable gcIteratorTable;
+
+#if JS_HAS_GENERATORS
+ /* Runtime state to support close hooks. */
+ JSGCCloseState gcCloseState;
+#endif
+
+#ifdef JS_GCMETER
+ JSGCStats gcStats;
+#endif
+
+ /* Literal table maintained by jsatom.c functions. */
+ JSAtomState atomState;
+
+ /* Random number generator state, used by jsmath.c. */
+ JSBool rngInitialized;
+ int64 rngMultiplier;
+ int64 rngAddend;
+ int64 rngMask;
+ int64 rngSeed;
+ jsdouble rngDscale;
+
+ /* Well-known numbers held for use by this runtime's contexts. */
+ jsdouble *jsNaN;
+ jsdouble *jsNegativeInfinity;
+ jsdouble *jsPositiveInfinity;
+
+#ifdef JS_THREADSAFE
+ JSLock *deflatedStringCacheLock;
+#endif
+ JSHashTable *deflatedStringCache;
+#ifdef DEBUG
+ uint32 deflatedStringCacheBytes;
+#endif
+
+ /* Empty string held for use by this runtime's contexts. */
+ JSString *emptyString;
+
+ /* List of active contexts sharing this runtime; protected by gcLock. */
+ JSCList contextList;
+
+ /* These are used for debugging -- see jsprvtd.h and jsdbgapi.h. */
+ JSTrapHandler interruptHandler;
+ void *interruptHandlerData;
+ JSNewScriptHook newScriptHook;
+ void *newScriptHookData;
+ JSDestroyScriptHook destroyScriptHook;
+ void *destroyScriptHookData;
+ JSTrapHandler debuggerHandler;
+ void *debuggerHandlerData;
+ JSSourceHandler sourceHandler;
+ void *sourceHandlerData;
+ JSInterpreterHook executeHook;
+ void *executeHookData;
+ JSInterpreterHook callHook;
+ void *callHookData;
+ JSObjectHook objectHook;
+ void *objectHookData;
+ JSTrapHandler throwHook;
+ void *throwHookData;
+ JSDebugErrorHook debugErrorHook;
+ void *debugErrorHookData;
+
+ /* More debugging state, see jsdbgapi.c. */
+ JSCList trapList;
+ JSCList watchPointList;
+
+ /* Weak links to properties, indexed by quickened get/set opcodes. */
+ /* XXX must come after JSCLists or MSVC alignment bug bites empty lists */
+ JSPropertyCache propertyCache;
+
+ /* Client opaque pointer */
+ void *data;
+
+#ifdef JS_THREADSAFE
+ /* These combine to interlock the GC and new requests. */
+ PRLock *gcLock;
+ PRCondVar *gcDone;
+ PRCondVar *requestDone;
+ uint32 requestCount;
+ JSThread *gcThread;
+
+ /* Lock and owning thread pointer for JS_LOCK_RUNTIME. */
+ PRLock *rtLock;
+#ifdef DEBUG
+ jsword rtLockOwner;
+#endif
+
+ /* Used to synchronize down/up state change; protected by gcLock. */
+ PRCondVar *stateChange;
+
+ /* Used to serialize cycle checks when setting __proto__ or __parent__. */
+ PRLock *setSlotLock;
+ PRCondVar *setSlotDone;
+ JSBool setSlotBusy;
+ JSScope *setSlotScope; /* deadlock avoidance, see jslock.c */
+
+ /*
+ * State for sharing single-threaded scopes, once a second thread tries to
+ * lock a scope. The scopeSharingDone condvar is protected by rt->gcLock,
+ * to minimize number of locks taken in JS_EndRequest.
+ *
+ * The scopeSharingTodo linked list is likewise "global" per runtime, not
+ * one-list-per-context, to conserve space over all contexts, optimizing
+ * for the likely case that scopes become shared rarely, and among a very
+ * small set of threads (contexts).
+ */
+ PRCondVar *scopeSharingDone;
+ JSScope *scopeSharingTodo;
+
+/*
+ * Magic terminator for the rt->scopeSharingTodo linked list, threaded through
+ * scope->u.link. This hack allows us to test whether a scope is on the list
+ * by asking whether scope->u.link is non-null. We use a large, likely bogus
+ * pointer here to distinguish this value from any valid u.count (small int)
+ * value.
+ */
+#define NO_SCOPE_SHARING_TODO ((JSScope *) 0xfeedbeef)
+
+ /*
+ * The index for JSThread info, returned by PR_NewThreadPrivateIndex.
+ * The value is visible and shared by all threads, but the data is
+ * private to each thread.
+ */
+ PRUintn threadTPIndex;
+#endif /* JS_THREADSAFE */
+
+ /*
+ * Check property accessibility for objects of arbitrary class. Used at
+ * present to check f.caller accessibility for any function object f.
+ */
+ JSCheckAccessOp checkObjectAccess;
+
+ /* Security principals serialization support. */
+ JSPrincipalsTranscoder principalsTranscoder;
+
+ /* Optional hook to find principals for an object in this runtime. */
+ JSObjectPrincipalsFinder findObjectPrincipals;
+
+ /*
+ * Shared scope property tree, and arena-pool for allocating its nodes.
+ * The propertyRemovals counter is incremented for every js_ClearScope,
+ * and for each js_RemoveScopeProperty that frees a slot in an object.
+ * See js_NativeGet and js_NativeSet in jsobj.c.
+ */
+ JSDHashTable propertyTreeHash;
+ JSScopeProperty *propertyFreeList;
+ JSArenaPool propertyArenaPool;
+ int32 propertyRemovals;
+
+ /* Script filename table. */
+ struct JSHashTable *scriptFilenameTable;
+ JSCList scriptFilenamePrefixes;
+#ifdef JS_THREADSAFE
+ PRLock *scriptFilenameTableLock;
+#endif
+
+ /* Number localization, used by jsnum.c */
+ const char *thousandsSeparator;
+ const char *decimalSeparator;
+ const char *numGrouping;
+
+ /*
+ * Weak references to lazily-created, well-known XML singletons.
+ *
+ * NB: Singleton objects must be carefully disconnected from the rest of
+ * the object graph usually associated with a JSContext's global object,
+ * including the set of standard class objects. See jsxml.c for details.
+ */
+ JSObject *anynameObject;
+ JSObject *functionNamespaceObject;
+
+ /*
+ * A helper list for the GC, so it can mark native iterator states. See
+ * js_MarkNativeIteratorStates for details.
+ */
+ JSNativeIteratorState *nativeIteratorStates;
+
+#ifndef JS_THREADSAFE
+ /*
+ * For thread-unsafe embeddings, the GSN cache lives in the runtime and
+ * not each context, since we expect it to be filled once when decompiling
+ * a longer script, then hit repeatedly as js_GetSrcNote is called during
+ * the decompiler activation that filled it.
+ */
+ JSGSNCache gsnCache;
+
+#define JS_GSN_CACHE(cx) ((cx)->runtime->gsnCache)
+#endif
+
+#ifdef DEBUG
+ /* Function invocation metering. */
+ jsrefcount inlineCalls;
+ jsrefcount nativeCalls;
+ jsrefcount nonInlineCalls;
+ jsrefcount constructs;
+
+ /* Scope lock and property metering. */
+ jsrefcount claimAttempts;
+ jsrefcount claimedScopes;
+ jsrefcount deadContexts;
+ jsrefcount deadlocksAvoided;
+ jsrefcount liveScopes;
+ jsrefcount sharedScopes;
+ jsrefcount totalScopes;
+ jsrefcount badUndependStrings;
+ jsrefcount liveScopeProps;
+ jsrefcount totalScopeProps;
+ jsrefcount livePropTreeNodes;
+ jsrefcount duplicatePropTreeNodes;
+ jsrefcount totalPropTreeNodes;
+ jsrefcount propTreeKidsChunks;
+ jsrefcount middleDeleteFixups;
+
+ /* String instrumentation. */
+ jsrefcount liveStrings;
+ jsrefcount totalStrings;
+ jsrefcount liveDependentStrings;
+ jsrefcount totalDependentStrings;
+ double lengthSum;
+ double lengthSquaredSum;
+ double strdepLengthSum;
+ double strdepLengthSquaredSum;
+#endif
+};
+
+#ifdef DEBUG
+# define JS_RUNTIME_METER(rt, which) JS_ATOMIC_INCREMENT(&(rt)->which)
+# define JS_RUNTIME_UNMETER(rt, which) JS_ATOMIC_DECREMENT(&(rt)->which)
+#else
+# define JS_RUNTIME_METER(rt, which) /* nothing */
+# define JS_RUNTIME_UNMETER(rt, which) /* nothing */
+#endif
+
+#define JS_KEEP_ATOMS(rt) JS_ATOMIC_INCREMENT(&(rt)->gcKeepAtoms);
+#define JS_UNKEEP_ATOMS(rt) JS_ATOMIC_DECREMENT(&(rt)->gcKeepAtoms);
+
+#ifdef JS_ARGUMENT_FORMATTER_DEFINED
+/*
+ * Linked list mapping format strings for JS_{Convert,Push}Arguments{,VA} to
+ * formatter functions. Elements are sorted in non-increasing format string
+ * length order.
+ */
+struct JSArgumentFormatMap {
+ const char *format;
+ size_t length;
+ JSArgumentFormatter formatter;
+ JSArgumentFormatMap *next;
+};
+#endif
+
+struct JSStackHeader {
+ uintN nslots;
+ JSStackHeader *down;
+};
+
+#define JS_STACK_SEGMENT(sh) ((jsval *)(sh) + 2)
+
+/*
+ * Key and entry types for the JSContext.resolvingTable hash table, typedef'd
+ * here because all consumers need to see these declarations (and not just the
+ * typedef names, as would be the case for an opaque pointer-to-typedef'd-type
+ * declaration), along with cx->resolvingTable.
+ */
+typedef struct JSResolvingKey {
+ JSObject *obj;
+ jsid id;
+} JSResolvingKey;
+
+typedef struct JSResolvingEntry {
+ JSDHashEntryHdr hdr;
+ JSResolvingKey key;
+ uint32 flags;
+} JSResolvingEntry;
+
+#define JSRESFLAG_LOOKUP 0x1 /* resolving id from lookup */
+#define JSRESFLAG_WATCH 0x2 /* resolving id from watch */
+
+typedef struct JSLocalRootChunk JSLocalRootChunk;
+
+#define JSLRS_CHUNK_SHIFT 8
+#define JSLRS_CHUNK_SIZE JS_BIT(JSLRS_CHUNK_SHIFT)
+#define JSLRS_CHUNK_MASK JS_BITMASK(JSLRS_CHUNK_SHIFT)
+
+struct JSLocalRootChunk {
+ jsval roots[JSLRS_CHUNK_SIZE];
+ JSLocalRootChunk *down;
+};
+
+typedef struct JSLocalRootStack {
+ uint32 scopeMark;
+ uint32 rootCount;
+ JSLocalRootChunk *topChunk;
+ JSLocalRootChunk firstChunk;
+} JSLocalRootStack;
+
+#define JSLRS_NULL_MARK ((uint32) -1)
+
+typedef struct JSTempValueRooter JSTempValueRooter;
+typedef void
+(* JS_DLL_CALLBACK JSTempValueMarker)(JSContext *cx, JSTempValueRooter *tvr);
+
+typedef union JSTempValueUnion {
+ jsval value;
+ JSObject *object;
+ JSString *string;
+ void *gcthing;
+ JSTempValueMarker marker;
+ JSScopeProperty *sprop;
+ JSWeakRoots *weakRoots;
+ jsval *array;
+} JSTempValueUnion;
+
+/*
+ * The following allows to reinterpret JSTempValueUnion.object as jsval using
+ * the tagging property of a generic jsval described below.
+ */
+JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(jsval));
+JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(JSObject *));
+
+/*
+ * Context-linked stack of temporary GC roots.
+ *
+ * If count is -1, then u.value contains the single value or GC-thing to root.
+ * If count is -2, then u.marker holds a mark hook called to mark the values.
+ * If count is -3, then u.sprop points to the property tree node to mark.
+ * If count is -4, then u.weakRoots points to saved weak roots.
+ * If count >= 0, then u.array points to a stack-allocated vector of jsvals.
+ *
+ * To root a single GC-thing pointer, which need not be tagged and stored as a
+ * jsval, use JS_PUSH_TEMP_ROOT_GCTHING. The macro reinterprets an arbitrary
+ * GC-thing as jsval. It works because a GC-thing is aligned on a 0 mod 8
+ * boundary, and object has the 0 jsval tag. So any GC-thing may be tagged as
+ * if it were an object and untagged, if it's then used only as an opaque
+ * pointer until discriminated by other means than tag bits (this is how the
+ * GC mark function uses its |thing| parameter -- it consults GC-thing flags
+ * stored separately from the thing to decide the type of thing).
+ *
+ * JS_PUSH_TEMP_ROOT_OBJECT and JS_PUSH_TEMP_ROOT_STRING are type-safe
+ * alternatives to JS_PUSH_TEMP_ROOT_GCTHING for JSObject and JSString. They
+ * also provide a simple way to get a single pointer to rooted JSObject or
+ * JSString via JS_PUSH_TEMP_ROOT_(OBJECT|STRTING)(cx, NULL, &tvr). Then
+ * &tvr.u.object or tvr.u.string gives the necessary pointer, which puns
+ * tvr.u.value safely because JSObject * and JSString * are GC-things and, as
+ * such, their tag bits are all zeroes.
+ *
+ * If you need to protect a result value that flows out of a C function across
+ * several layers of other functions, use the js_LeaveLocalRootScopeWithResult
+ * internal API (see further below) instead.
+ */
+struct JSTempValueRooter {
+ JSTempValueRooter *down;
+ ptrdiff_t count;
+ JSTempValueUnion u;
+};
+
+#define JSTVU_SINGLE (-1)
+#define JSTVU_MARKER (-2)
+#define JSTVU_SPROP (-3)
+#define JSTVU_WEAK_ROOTS (-4)
+
+#define JS_PUSH_TEMP_ROOT_COMMON(cx,tvr) \
+ JS_BEGIN_MACRO \
+ JS_ASSERT((cx)->tempValueRooters != (tvr)); \
+ (tvr)->down = (cx)->tempValueRooters; \
+ (cx)->tempValueRooters = (tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_SINGLE_TEMP_ROOT(cx,val,tvr) \
+ JS_BEGIN_MACRO \
+ (tvr)->count = JSTVU_SINGLE; \
+ (tvr)->u.value = val; \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_TEMP_ROOT(cx,cnt,arr,tvr) \
+ JS_BEGIN_MACRO \
+ JS_ASSERT((ptrdiff_t)(cnt) >= 0); \
+ (tvr)->count = (ptrdiff_t)(cnt); \
+ (tvr)->u.array = (arr); \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_TEMP_ROOT_MARKER(cx,marker_,tvr) \
+ JS_BEGIN_MACRO \
+ (tvr)->count = JSTVU_MARKER; \
+ (tvr)->u.marker = (marker_); \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_TEMP_ROOT_OBJECT(cx,obj,tvr) \
+ JS_BEGIN_MACRO \
+ (tvr)->count = JSTVU_SINGLE; \
+ (tvr)->u.object = (obj); \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_TEMP_ROOT_STRING(cx,str,tvr) \
+ JS_BEGIN_MACRO \
+ (tvr)->count = JSTVU_SINGLE; \
+ (tvr)->u.string = (str); \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_TEMP_ROOT_GCTHING(cx,thing,tvr) \
+ JS_BEGIN_MACRO \
+ JS_ASSERT(JSVAL_IS_OBJECT((jsval)thing)); \
+ (tvr)->count = JSTVU_SINGLE; \
+ (tvr)->u.gcthing = (thing); \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+#define JS_POP_TEMP_ROOT(cx,tvr) \
+ JS_BEGIN_MACRO \
+ JS_ASSERT((cx)->tempValueRooters == (tvr)); \
+ (cx)->tempValueRooters = (tvr)->down; \
+ JS_END_MACRO
+
+#define JS_TEMP_ROOT_EVAL(cx,cnt,val,expr) \
+ JS_BEGIN_MACRO \
+ JSTempValueRooter tvr; \
+ JS_PUSH_TEMP_ROOT(cx, cnt, val, &tvr); \
+ (expr); \
+ JS_POP_TEMP_ROOT(cx, &tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_TEMP_ROOT_SPROP(cx,sprop_,tvr) \
+ JS_BEGIN_MACRO \
+ (tvr)->count = JSTVU_SPROP; \
+ (tvr)->u.sprop = (sprop_); \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+#define JS_PUSH_TEMP_ROOT_WEAK_COPY(cx,weakRoots_,tvr) \
+ JS_BEGIN_MACRO \
+ (tvr)->count = JSTVU_WEAK_ROOTS; \
+ (tvr)->u.weakRoots = (weakRoots_); \
+ JS_PUSH_TEMP_ROOT_COMMON(cx, tvr); \
+ JS_END_MACRO
+
+struct JSContext {
+ /* JSRuntime contextList linkage. */
+ JSCList links;
+
+ /* Interpreter activation count. */
+ uintN interpLevel;
+
+ /* Limit pointer for checking stack consumption during recursion. */
+ jsuword stackLimit;
+
+ /* Runtime version control identifier and equality operators. */
+ uint16 version;
+ jsbytecode jsop_eq;
+ jsbytecode jsop_ne;
+
+ /* Data shared by threads in an address space. */
+ JSRuntime *runtime;
+
+ /* Stack arena pool and frame pointer register. */
+ JSArenaPool stackPool;
+ JSStackFrame *fp;
+
+ /* Temporary arena pool used while compiling and decompiling. */
+ JSArenaPool tempPool;
+
+ /* Top-level object and pointer to top stack frame's scope chain. */
+ JSObject *globalObject;
+
+ /* Storage to root recently allocated GC things and script result. */
+ JSWeakRoots weakRoots;
+
+ /* Regular expression class statics (XXX not shared globally). */
+ JSRegExpStatics regExpStatics;
+
+ /* State for object and array toSource conversion. */
+ JSSharpObjectMap sharpObjectMap;
+
+ /* Argument formatter support for JS_{Convert,Push}Arguments{,VA}. */
+ JSArgumentFormatMap *argumentFormatMap;
+
+ /* Last message string and trace file for debugging. */
+ char *lastMessage;
+#ifdef DEBUG
+ void *tracefp;
+#endif
+
+ /* Per-context optional user callbacks. */
+ JSBranchCallback branchCallback;
+ JSErrorReporter errorReporter;
+
+ /* Client opaque pointer */
+ void *data;
+
+ /* GC and thread-safe state. */
+ JSStackFrame *dormantFrameChain; /* dormant stack frame to scan */
+#ifdef JS_THREADSAFE
+ JSThread *thread;
+ jsrefcount requestDepth;
+ JSScope *scopeToShare; /* weak reference, see jslock.c */
+ JSScope *lockedSealedScope; /* weak ref, for low-cost sealed
+ scope locking */
+ JSCList threadLinks; /* JSThread contextList linkage */
+
+#define CX_FROM_THREAD_LINKS(tl) \
+ ((JSContext *)((char *)(tl) - offsetof(JSContext, threadLinks)))
+#endif
+
+#if JS_HAS_LVALUE_RETURN
+ /*
+ * Secondary return value from native method called on the left-hand side
+ * of an assignment operator. The native should store the object in which
+ * to set a property in *rval, and return the property's id expressed as a
+ * jsval by calling JS_SetCallReturnValue2(cx, idval).
+ */
+ jsval rval2;
+ JSPackedBool rval2set;
+#endif
+
+#if JS_HAS_XML_SUPPORT
+ /*
+ * Bit-set formed from binary exponentials of the XML_* tiny-ids defined
+ * for boolean settings in jsxml.c, plus an XSF_CACHE_VALID bit. Together
+ * these act as a cache of the boolean XML.ignore* and XML.prettyPrinting
+ * property values associated with this context's global object.
+ */
+ uint8 xmlSettingFlags;
+#endif
+
+ /*
+ * True if creating an exception object, to prevent runaway recursion.
+ * NB: creatingException packs with rval2set, #if JS_HAS_LVALUE_RETURN;
+ * with xmlSettingFlags, #if JS_HAS_XML_SUPPORT; and with throwing below.
+ */
+ JSPackedBool creatingException;
+
+ /*
+ * Exception state -- the exception member is a GC root by definition.
+ * NB: throwing packs with creatingException and rval2set, above.
+ */
+ JSPackedBool throwing; /* is there a pending exception? */
+ jsval exception; /* most-recently-thrown exception */
+ /* Flag to indicate that we run inside gcCallback(cx, JSGC_MARK_END). */
+ JSPackedBool insideGCMarkCallback;
+
+ /* Per-context options. */
+ uint32 options; /* see jsapi.h for JSOPTION_* */
+
+ /* Locale specific callbacks for string conversion. */
+ JSLocaleCallbacks *localeCallbacks;
+
+ /*
+ * cx->resolvingTable is non-null and non-empty if we are initializing
+ * standard classes lazily, or if we are otherwise recursing indirectly
+ * from js_LookupProperty through a JSClass.resolve hook. It is used to
+ * limit runaway recursion (see jsapi.c and jsobj.c).
+ */
+ JSDHashTable *resolvingTable;
+
+ /* PDL of stack headers describing stack slots not rooted by argv, etc. */
+ JSStackHeader *stackHeaders;
+
+ /* Optional stack of heap-allocated scoped local GC roots. */
+ JSLocalRootStack *localRootStack;
+
+ /* Stack of thread-stack-allocated temporary GC roots. */
+ JSTempValueRooter *tempValueRooters;
+
+#ifdef GC_MARK_DEBUG
+ /* Top of the GC mark stack. */
+ void *gcCurrentMarkNode;
+#endif
+};
+
+#ifdef JS_THREADSAFE
+# define JS_THREAD_ID(cx) ((cx)->thread ? (cx)->thread->id : 0)
+#endif
+
+#ifdef __cplusplus
+/* FIXME(bug 332648): Move this into a public header. */
+class JSAutoTempValueRooter
+{
+ public:
+ JSAutoTempValueRooter(JSContext *cx, size_t len, jsval *vec)
+ : mContext(cx) {
+ JS_PUSH_TEMP_ROOT(mContext, len, vec, &mTvr);
+ }
+ JSAutoTempValueRooter(JSContext *cx, jsval v)
+ : mContext(cx) {
+ JS_PUSH_SINGLE_TEMP_ROOT(mContext, v, &mTvr);
+ }
+
+ ~JSAutoTempValueRooter() {
+ JS_POP_TEMP_ROOT(mContext, &mTvr);
+ }
+
+ private:
+ static void *operator new(size_t);
+ static void operator delete(void *, size_t);
+
+ JSContext *mContext;
+ JSTempValueRooter mTvr;
+};
+#endif
+
+/*
+ * Slightly more readable macros for testing per-context option settings (also
+ * to hide bitset implementation detail).
+ *
+ * JSOPTION_XML must be handled specially in order to propagate from compile-
+ * to run-time (from cx->options to script->version/cx->version). To do that,
+ * we copy JSOPTION_XML from cx->options into cx->version as JSVERSION_HAS_XML
+ * whenever options are set, and preserve this XML flag across version number
+ * changes done via the JS_SetVersion API.
+ *
+ * But when executing a script or scripted function, the interpreter changes
+ * cx->version, including the XML flag, to script->version. Thus JSOPTION_XML
+ * is a compile-time option that causes a run-time version change during each
+ * activation of the compiled script. That version change has the effect of
+ * changing JS_HAS_XML_OPTION, so that any compiling done via eval enables XML
+ * support. If an XML-enabled script or function calls a non-XML function,
+ * the flag bit will be cleared during the callee's activation.
+ *
+ * Note that JS_SetVersion API calls never pass JSVERSION_HAS_XML or'd into
+ * that API's version parameter.
+ *
+ * Note also that script->version must contain this XML option flag in order
+ * for XDR'ed scripts to serialize and deserialize with that option preserved
+ * for detection at run-time. We can't copy other compile-time options into
+ * script->version because that would break backward compatibility (certain
+ * other options, e.g. JSOPTION_VAROBJFIX, are analogous to JSOPTION_XML).
+ */
+#define JS_HAS_OPTION(cx,option) (((cx)->options & (option)) != 0)
+#define JS_HAS_STRICT_OPTION(cx) JS_HAS_OPTION(cx, JSOPTION_STRICT)
+#define JS_HAS_WERROR_OPTION(cx) JS_HAS_OPTION(cx, JSOPTION_WERROR)
+#define JS_HAS_COMPILE_N_GO_OPTION(cx) JS_HAS_OPTION(cx, JSOPTION_COMPILE_N_GO)
+#define JS_HAS_ATLINE_OPTION(cx) JS_HAS_OPTION(cx, JSOPTION_ATLINE)
+
+#define JSVERSION_MASK 0x0FFF /* see JSVersion in jspubtd.h */
+#define JSVERSION_HAS_XML 0x1000 /* flag induced by XML option */
+
+#define JSVERSION_NUMBER(cx) ((cx)->version & JSVERSION_MASK)
+#define JS_HAS_XML_OPTION(cx) ((cx)->version & JSVERSION_HAS_XML || \
+ JSVERSION_NUMBER(cx) >= JSVERSION_1_6)
+
+#define JS_HAS_NATIVE_BRANCH_CALLBACK_OPTION(cx) \
+ JS_HAS_OPTION(cx, JSOPTION_NATIVE_BRANCH_CALLBACK)
+
+/*
+ * Wrappers for the JSVERSION_IS_* macros from jspubtd.h taking JSContext *cx
+ * and masking off the XML flag and any other high order bits.
+ */
+#define JS_VERSION_IS_ECMA(cx) JSVERSION_IS_ECMA(JSVERSION_NUMBER(cx))
+
+/*
+ * Common subroutine of JS_SetVersion and js_SetVersion, to update per-context
+ * data that depends on version.
+ */
+extern void
+js_OnVersionChange(JSContext *cx);
+
+/*
+ * Unlike the JS_SetVersion API, this function stores JSVERSION_HAS_XML and
+ * any future non-version-number flags induced by compiler options.
+ */
+extern void
+js_SetVersion(JSContext *cx, JSVersion version);
+
+/*
+ * Create and destroy functions for JSContext, which is manually allocated
+ * and exclusively owned.
+ */
+extern JSContext *
+js_NewContext(JSRuntime *rt, size_t stackChunkSize);
+
+extern void
+js_DestroyContext(JSContext *cx, JSDestroyContextMode mode);
+
+/*
+ * Return true if cx points to a context in rt->contextList, else return false.
+ * NB: the caller (see jslock.c:ClaimScope) must hold rt->gcLock.
+ */
+extern JSBool
+js_ValidContextPointer(JSRuntime *rt, JSContext *cx);
+
+/*
+ * If unlocked, acquire and release rt->gcLock around *iterp update; otherwise
+ * the caller must be holding rt->gcLock.
+ */
+extern JSContext *
+js_ContextIterator(JSRuntime *rt, JSBool unlocked, JSContext **iterp);
+
+/*
+ * JSClass.resolve and watchpoint recursion damping machinery.
+ */
+extern JSBool
+js_StartResolving(JSContext *cx, JSResolvingKey *key, uint32 flag,
+ JSResolvingEntry **entryp);
+
+extern void
+js_StopResolving(JSContext *cx, JSResolvingKey *key, uint32 flag,
+ JSResolvingEntry *entry, uint32 generation);
+
+/*
+ * Local root set management.
+ *
+ * NB: the jsval parameters below may be properly tagged jsvals, or GC-thing
+ * pointers cast to (jsval). This relies on JSObject's tag being zero, but
+ * on the up side it lets us push int-jsval-encoded scopeMark values on the
+ * local root stack.
+ */
+extern JSBool
+js_EnterLocalRootScope(JSContext *cx);
+
+#define js_LeaveLocalRootScope(cx) \
+ js_LeaveLocalRootScopeWithResult(cx, JSVAL_NULL)
+
+extern void
+js_LeaveLocalRootScopeWithResult(JSContext *cx, jsval rval);
+
+extern void
+js_ForgetLocalRoot(JSContext *cx, jsval v);
+
+extern int
+js_PushLocalRoot(JSContext *cx, JSLocalRootStack *lrs, jsval v);
+
+extern void
+js_MarkLocalRoots(JSContext *cx, JSLocalRootStack *lrs);
+
+/*
+ * Report an exception, which is currently realized as a printf-style format
+ * string and its arguments.
+ */
+typedef enum JSErrNum {
+#define MSG_DEF(name, number, count, exception, format) \
+ name = number,
+#include "js.msg"
+#undef MSG_DEF
+ JSErr_Limit
+} JSErrNum;
+
+extern const JSErrorFormatString *
+js_GetErrorMessage(void *userRef, const char *locale, const uintN errorNumber);
+
+#ifdef va_start
+extern JSBool
+js_ReportErrorVA(JSContext *cx, uintN flags, const char *format, va_list ap);
+
+extern JSBool
+js_ReportErrorNumberVA(JSContext *cx, uintN flags, JSErrorCallback callback,
+ void *userRef, const uintN errorNumber,
+ JSBool charArgs, va_list ap);
+
+extern JSBool
+js_ExpandErrorArguments(JSContext *cx, JSErrorCallback callback,
+ void *userRef, const uintN errorNumber,
+ char **message, JSErrorReport *reportp,
+ JSBool *warningp, JSBool charArgs, va_list ap);
+#endif
+
+extern void
+js_ReportOutOfMemory(JSContext *cx);
+
+/*
+ * Report an exception using a previously composed JSErrorReport.
+ * XXXbe remove from "friend" API
+ */
+extern JS_FRIEND_API(void)
+js_ReportErrorAgain(JSContext *cx, const char *message, JSErrorReport *report);
+
+extern void
+js_ReportIsNotDefined(JSContext *cx, const char *name);
+
+extern JSErrorFormatString js_ErrorFormatString[JSErr_Limit];
+
+/*
+ * See JS_SetThreadStackLimit in jsapi.c, where we check that the stack grows
+ * in the expected direction. On Unix-y systems, JS_STACK_GROWTH_DIRECTION is
+ * computed on the build host by jscpucfg.c and written into jsautocfg.h. The
+ * macro is hardcoded in jscpucfg.h on Windows and Mac systems (for historical
+ * reasons pre-dating autoconf usage).
+ */
+#if JS_STACK_GROWTH_DIRECTION > 0
+# define JS_CHECK_STACK_SIZE(cx, lval) ((jsuword)&(lval) < (cx)->stackLimit)
+#else
+# define JS_CHECK_STACK_SIZE(cx, lval) ((jsuword)&(lval) > (cx)->stackLimit)
+#endif
+
+JS_END_EXTERN_C
+
+#endif /* jscntxt_h___ */
diff --git a/src/third_party/js-1.7/jscompat.h b/src/third_party/js-1.7/jscompat.h
new file mode 100644
index 00000000000..80d86056ecd
--- /dev/null
+++ b/src/third_party/js-1.7/jscompat.h
@@ -0,0 +1,57 @@
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998-1999
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* -*- Mode: C; tab-width: 8 -*-
+ * Copyright (C) 1996-1999 Netscape Communications Corporation, All Rights Reserved.
+ */
+#ifndef jscompat_h___
+#define jscompat_h___
+/*
+ * Compatibility glue for various NSPR versions. We must always define int8,
+ * int16, jsword, and so on to minimize differences with js/ref, no matter what
+ * the NSPR typedef names may be.
+ */
+#include "jstypes.h"
+#include "jslong.h"
+
+typedef JSIntn intN;
+typedef JSUintn uintN;
+typedef JSUword jsuword;
+typedef JSWord jsword;
+typedef float float32;
+#define allocPriv allocPool
+#endif /* jscompat_h___ */
diff --git a/src/third_party/js-1.7/jsconfig.h b/src/third_party/js-1.7/jsconfig.h
new file mode 100644
index 00000000000..d61e802986e
--- /dev/null
+++ b/src/third_party/js-1.7/jsconfig.h
@@ -0,0 +1,208 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS configuration macros.
+ */
+#ifndef JS_VERSION
+#define JS_VERSION 170
+#endif
+
+/*
+ * Compile-time JS version configuration. The JS version numbers lie on the
+ * number line like so:
+ *
+ * 1.0 1.1 1.2 1.3 1.4 ECMAv3 1.5 1.6
+ * ^ ^
+ * | |
+ * basis for ECMAv1 close to ECMAv2
+ *
+ * where ECMAv3 stands for ECMA-262 Edition 3. See the runtime version enum
+ * JSVersion in jspubtd.h. Code in the engine can therefore count on version
+ * <= JSVERSION_1_4 to mean "before the Third Edition of ECMA-262" and version
+ * > JSVERSION_1_4 to mean "at or after the Third Edition".
+ *
+ * In the (likely?) event that SpiderMonkey grows to implement JavaScript 2.0,
+ * or ECMA-262 Edition 4 (JS2 without certain extensions), the version number
+ * to use would be near 200, or greater.
+ *
+ * The JS_VERSION_ECMA_3 version is the minimal configuration conforming to
+ * the ECMA-262 Edition 3 specification. Use it for minimal embeddings, where
+ * you're sure you don't need any of the extensions disabled in this version.
+ * In order to facilitate testing, JS_HAS_OBJ_PROTO_PROP is defined as part of
+ * the JS_VERSION_ECMA_3_TEST version.
+ *
+ * To keep things sane in the modern age, where we need exceptions in order to
+ * implement, e.g., iterators and generators, we are dropping support for all
+ * versions <= 1.4.
+ */
+#define JS_VERSION_ECMA_3 148
+#define JS_VERSION_ECMA_3_TEST 149
+
+#if JS_VERSION == JS_VERSION_ECMA_3 || \
+ JS_VERSION == JS_VERSION_ECMA_3_TEST
+
+#define JS_HAS_STR_HTML_HELPERS 0 /* has str.anchor, str.bold, etc. */
+#define JS_HAS_PERL_SUBSTR 0 /* has str.substr */
+#if JS_VERSION == JS_VERSION_ECMA_3_TEST
+#define JS_HAS_OBJ_PROTO_PROP 1 /* has o.__proto__ etc. */
+#else
+#define JS_HAS_OBJ_PROTO_PROP 0 /* has o.__proto__ etc. */
+#endif
+#define JS_HAS_OBJ_WATCHPOINT 0 /* has o.watch and o.unwatch */
+#define JS_HAS_EXPORT_IMPORT 0 /* has export fun; import obj.fun */
+#define JS_HAS_EVAL_THIS_SCOPE 0 /* Math.eval is same as with (Math) */
+#define JS_HAS_SHARP_VARS 0 /* has #n=, #n# for object literals */
+#define JS_HAS_SCRIPT_OBJECT 0 /* has (new Script("x++")).exec() */
+#define JS_HAS_XDR 0 /* has XDR API and internal support */
+#define JS_HAS_XDR_FREEZE_THAW 0 /* has XDR freeze/thaw script methods */
+#define JS_HAS_TOSOURCE 0 /* has Object/Array toSource method */
+#define JS_HAS_DEBUGGER_KEYWORD 0 /* has hook for debugger keyword */
+#define JS_HAS_CATCH_GUARD 0 /* has exception handling catch guard */
+#define JS_HAS_SPARSE_ARRAYS 0 /* array methods preserve empty elems */
+#define JS_HAS_GETTER_SETTER 0 /* has JS2 getter/setter functions */
+#define JS_HAS_UNEVAL 0 /* has uneval() top-level function */
+#define JS_HAS_CONST 0 /* has JS2 const as alternative var */
+#define JS_HAS_FUN_EXPR_STMT 0 /* has function expression statement */
+#define JS_HAS_LVALUE_RETURN 1 /* has o.item(i) = j; for native item */
+#define JS_HAS_NO_SUCH_METHOD 0 /* has o.__noSuchMethod__ handler */
+#define JS_HAS_XML_SUPPORT 0 /* has ECMAScript for XML support */
+#define JS_HAS_ARRAY_EXTRAS 0 /* has indexOf and Lispy extras */
+#define JS_HAS_GENERATORS 0 /* has yield in generator function */
+#define JS_HAS_BLOCK_SCOPE 0 /* has block scope via let/arraycomp */
+#define JS_HAS_DESTRUCTURING 0 /* has [a,b] = ... or {p:a,q:b} = ... */
+
+#elif JS_VERSION < 150
+
+#error "unsupported JS_VERSION"
+
+#elif JS_VERSION == 150
+
+#define JS_HAS_STR_HTML_HELPERS 1 /* has str.anchor, str.bold, etc. */
+#define JS_HAS_PERL_SUBSTR 1 /* has str.substr */
+#define JS_HAS_OBJ_PROTO_PROP 1 /* has o.__proto__ etc. */
+#define JS_HAS_OBJ_WATCHPOINT 1 /* has o.watch and o.unwatch */
+#define JS_HAS_EXPORT_IMPORT 1 /* has export fun; import obj.fun */
+#define JS_HAS_EVAL_THIS_SCOPE 1 /* Math.eval is same as with (Math) */
+#define JS_HAS_SHARP_VARS 1 /* has #n=, #n# for object literals */
+#define JS_HAS_SCRIPT_OBJECT 1 /* has (new Script("x++")).exec() */
+#define JS_HAS_XDR 1 /* has XDR API and internal support */
+#define JS_HAS_XDR_FREEZE_THAW 0 /* has XDR freeze/thaw script methods */
+#define JS_HAS_TOSOURCE 1 /* has Object/Array toSource method */
+#define JS_HAS_DEBUGGER_KEYWORD 1 /* has hook for debugger keyword */
+#define JS_HAS_CATCH_GUARD 1 /* has exception handling catch guard */
+#define JS_HAS_SPARSE_ARRAYS 0 /* array methods preserve empty elems */
+#define JS_HAS_GETTER_SETTER 1 /* has JS2 getter/setter functions */
+#define JS_HAS_UNEVAL 1 /* has uneval() top-level function */
+#define JS_HAS_CONST 1 /* has JS2 const as alternative var */
+#define JS_HAS_FUN_EXPR_STMT 1 /* has function expression statement */
+#define JS_HAS_LVALUE_RETURN 1 /* has o.item(i) = j; for native item */
+#define JS_HAS_NO_SUCH_METHOD 1 /* has o.__noSuchMethod__ handler */
+#define JS_HAS_XML_SUPPORT 0 /* has ECMAScript for XML support */
+#define JS_HAS_ARRAY_EXTRAS 0 /* has indexOf and Lispy extras */
+#define JS_HAS_GENERATORS 0 /* has yield in generator function */
+#define JS_HAS_BLOCK_SCOPE 0 /* has block scope via let/arraycomp */
+#define JS_HAS_DESTRUCTURING 0 /* has [a,b] = ... or {p:a,q:b} = ... */
+
+#elif JS_VERSION == 160
+
+#define JS_HAS_STR_HTML_HELPERS 1 /* has str.anchor, str.bold, etc. */
+#define JS_HAS_PERL_SUBSTR 1 /* has str.substr */
+#define JS_HAS_OBJ_PROTO_PROP 1 /* has o.__proto__ etc. */
+#define JS_HAS_OBJ_WATCHPOINT 1 /* has o.watch and o.unwatch */
+#define JS_HAS_EXPORT_IMPORT 1 /* has export fun; import obj.fun */
+#define JS_HAS_EVAL_THIS_SCOPE 1 /* Math.eval is same as with (Math) */
+#define JS_HAS_SHARP_VARS 1 /* has #n=, #n# for object literals */
+#define JS_HAS_SCRIPT_OBJECT 1 /* has (new Script("x++")).exec() */
+#define JS_HAS_XDR 1 /* has XDR API and internal support */
+#define JS_HAS_XDR_FREEZE_THAW 0 /* has XDR freeze/thaw script methods */
+#define JS_HAS_TOSOURCE 1 /* has Object/Array toSource method */
+#define JS_HAS_DEBUGGER_KEYWORD 1 /* has hook for debugger keyword */
+#define JS_HAS_CATCH_GUARD 1 /* has exception handling catch guard */
+#define JS_HAS_SPARSE_ARRAYS 0 /* array methods preserve empty elems */
+#define JS_HAS_GETTER_SETTER 1 /* has JS2 getter/setter functions */
+#define JS_HAS_UNEVAL 1 /* has uneval() top-level function */
+#define JS_HAS_CONST 1 /* has JS2 const as alternative var */
+#define JS_HAS_FUN_EXPR_STMT 1 /* has function expression statement */
+#define JS_HAS_LVALUE_RETURN 1 /* has o.item(i) = j; for native item */
+#define JS_HAS_NO_SUCH_METHOD 1 /* has o.__noSuchMethod__ handler */
+#define JS_HAS_XML_SUPPORT 1 /* has ECMAScript for XML support */
+#define JS_HAS_ARRAY_EXTRAS 1 /* has indexOf and Lispy extras */
+#define JS_HAS_GENERATORS 0 /* has yield in generator function */
+#define JS_HAS_BLOCK_SCOPE 0 /* has block scope via let/arraycomp */
+#define JS_HAS_DESTRUCTURING 0 /* has [a,b] = ... or {p:a,q:b} = ... */
+
+#elif JS_VERSION == 170
+
+#define JS_HAS_STR_HTML_HELPERS 1 /* has str.anchor, str.bold, etc. */
+#define JS_HAS_PERL_SUBSTR 1 /* has str.substr */
+#define JS_HAS_OBJ_PROTO_PROP 1 /* has o.__proto__ etc. */
+#define JS_HAS_OBJ_WATCHPOINT 1 /* has o.watch and o.unwatch */
+#define JS_HAS_EXPORT_IMPORT 1 /* has export fun; import obj.fun */
+#define JS_HAS_EVAL_THIS_SCOPE 1 /* Math.eval is same as with (Math) */
+#define JS_HAS_SHARP_VARS 1 /* has #n=, #n# for object literals */
+#define JS_HAS_SCRIPT_OBJECT 1 /* has (new Script("x++")).exec() */
+#define JS_HAS_XDR 1 /* has XDR API and internal support */
+#define JS_HAS_XDR_FREEZE_THAW 0 /* has XDR freeze/thaw script methods */
+#define JS_HAS_TOSOURCE 1 /* has Object/Array toSource method */
+#define JS_HAS_DEBUGGER_KEYWORD 1 /* has hook for debugger keyword */
+#define JS_HAS_CATCH_GUARD 1 /* has exception handling catch guard */
+#define JS_HAS_SPARSE_ARRAYS 0 /* array methods preserve empty elems */
+#define JS_HAS_GETTER_SETTER 1 /* has JS2 getter/setter functions */
+#define JS_HAS_UNEVAL 1 /* has uneval() top-level function */
+#define JS_HAS_CONST 1 /* has JS2 const as alternative var */
+#define JS_HAS_FUN_EXPR_STMT 1 /* has function expression statement */
+#define JS_HAS_LVALUE_RETURN 1 /* has o.item(i) = j; for native item */
+#define JS_HAS_NO_SUCH_METHOD 1 /* has o.__noSuchMethod__ handler */
+#define JS_HAS_XML_SUPPORT 1 /* has ECMAScript for XML support */
+#define JS_HAS_ARRAY_EXTRAS 1 /* has indexOf and Lispy extras */
+#define JS_HAS_GENERATORS 1 /* has yield in generator function */
+#define JS_HAS_BLOCK_SCOPE 1 /* has block scope via let/arraycomp */
+#define JS_HAS_DESTRUCTURING 1 /* has [a,b] = ... or {p:a,q:b} = ... */
+
+#else
+
+#error "unknown JS_VERSION"
+
+#endif
+
+/* Features that are present in all versions. */
+#define JS_HAS_RESERVED_JAVA_KEYWORDS 1
+#define JS_HAS_RESERVED_ECMA_KEYWORDS 1
+
diff --git a/src/third_party/js-1.7/jsconfig.mk b/src/third_party/js-1.7/jsconfig.mk
new file mode 100644
index 00000000000..a3b88673220
--- /dev/null
+++ b/src/third_party/js-1.7/jsconfig.mk
@@ -0,0 +1,181 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998-1999
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+ifndef OBJDIR
+ ifdef OBJDIR_NAME
+ OBJDIR = $(OBJDIR_NAME)
+ endif
+endif
+
+NSPR_VERSION = v4.0
+NSPR_LIBSUFFIX = 4
+
+NSPR_LOCAL = $(MOZ_DEPTH)/dist/$(OBJDIR)/nspr
+NSPR_DIST = $(MOZ_DEPTH)/dist/$(OBJDIR)
+NSPR_OBJDIR = $(OBJDIR)
+ifeq ($(OS_ARCH), SunOS)
+ NSPR_OBJDIR := $(subst _sparc,,$(NSPR_OBJDIR))
+endif
+ifeq ($(OS_ARCH), Linux)
+ LINUX_REL := $(shell uname -r)
+ ifneq (,$(findstring 2.0,$(LINUX_REL)))
+ NSPR_OBJDIR := $(subst _All,2.0_x86_glibc_PTH,$(NSPR_OBJDIR))
+ else
+ NSPR_OBJDIR := $(subst _All,2.2_x86_glibc_PTH,$(NSPR_OBJDIR))
+ endif
+endif
+ifeq ($(OS_ARCH), AIX)
+ NSPR_OBJDIR := $(subst 4.1,4.2,$(NSPR_OBJDIR))
+endif
+ifeq ($(OS_CONFIG), IRIX6.2)
+ NSPR_OBJDIR := $(subst 6.2,6.2_n32_PTH,$(NSPR_OBJDIR))
+endif
+ifeq ($(OS_CONFIG), IRIX6.5)
+ NSPR_OBJDIR := $(subst 6.5,6.5_n32_PTH,$(NSPR_OBJDIR))
+endif
+ifeq ($(OS_ARCH), WINNT)
+ ifeq ($(OBJDIR), WIN32_D.OBJ)
+ NSPR_OBJDIR = WINNT4.0_DBG.OBJ
+ endif
+ ifeq ($(OBJDIR), WIN32_O.OBJ)
+ NSPR_OBJDIR = WINNT4.0_OPT.OBJ
+ endif
+endif
+NSPR_SHARED = /share/builds/components/nspr20/$(NSPR_VERSION)/$(NSPR_OBJDIR)
+ifeq ($(OS_ARCH), WINNT)
+ NSPR_SHARED = nspr20/$(NSPR_VERSION)/$(NSPR_OBJDIR)
+endif
+NSPR_VERSIONFILE = $(NSPR_LOCAL)/Version
+NSPR_CURVERSION := $(shell cat $(NSPR_VERSIONFILE))
+
+get_nspr:
+ @echo "Grabbing NSPR component..."
+ifeq ($(NSPR_VERSION), $(NSPR_CURVERSION))
+ @echo "No need, NSPR is up to date in this tree (ver=$(NSPR_VERSION))."
+else
+ mkdir -p $(NSPR_LOCAL)
+ mkdir -p $(NSPR_DIST)
+ ifneq ($(OS_ARCH), WINNT)
+ cp $(NSPR_SHARED)/*.jar $(NSPR_LOCAL)
+ else
+ sh $(MOZ_DEPTH)/../reltools/compftp.sh $(NSPR_SHARED) $(NSPR_LOCAL) *.jar
+ endif
+ unzip -o $(NSPR_LOCAL)/mdbinary.jar -d $(NSPR_DIST)
+ mkdir -p $(NSPR_DIST)/include
+ unzip -o $(NSPR_LOCAL)/mdheader.jar -d $(NSPR_DIST)/include
+ rm -rf $(NSPR_DIST)/META-INF
+ rm -rf $(NSPR_DIST)/include/META-INF
+ echo $(NSPR_VERSION) > $(NSPR_VERSIONFILE)
+endif
+
+SHIP_DIST = $(MOZ_DEPTH)/dist/$(OBJDIR)
+SHIP_DIR = $(SHIP_DIST)/SHIP
+
+SHIP_LIBS = libjs.$(SO_SUFFIX) libjs.a
+ifdef JS_LIVECONNECT
+ SHIP_LIBS += libjsj.$(SO_SUFFIX) libjsj.a
+endif
+ifeq ($(OS_ARCH), WINNT)
+ SHIP_LIBS = js32.dll js32.lib
+ ifdef JS_LIVECONNECT
+ SHIP_LIBS += jsj.dll jsj.lib
+ endif
+endif
+SHIP_LIBS += $(LCJAR)
+SHIP_LIBS := $(addprefix $(SHIP_DIST)/lib/, $(SHIP_LIBS))
+
+SHIP_INCS = js*.h prmjtime.h resource.h *.msg *.tbl
+ifdef JS_LIVECONNECT
+ SHIP_INCS += netscape*.h nsC*.h nsI*.h
+endif
+SHIP_INCS := $(addprefix $(SHIP_DIST)/include/, $(SHIP_INCS))
+
+SHIP_BINS = js
+ifdef JS_LIVECONNECT
+ SHIP_BINS += lcshell
+endif
+ifeq ($(OS_ARCH), WINNT)
+ SHIP_BINS := $(addsuffix .exe, $(SHIP_BINS))
+endif
+SHIP_BINS := $(addprefix $(SHIP_DIST)/bin/, $(SHIP_BINS))
+
+ifdef BUILD_OPT
+ JSREFJAR = jsref_opt.jar
+else
+ifdef BUILD_IDG
+ JSREFJAR = jsref_idg.jar
+else
+ JSREFJAR = jsref_dbg.jar
+endif
+endif
+
+ship:
+ mkdir -p $(SHIP_DIR)/$(LIBDIR)
+ mkdir -p $(SHIP_DIR)/include
+ mkdir -p $(SHIP_DIR)/bin
+ cp $(SHIP_LIBS) $(SHIP_DIR)/$(LIBDIR)
+ cp $(SHIP_INCS) $(SHIP_DIR)/include
+ cp $(SHIP_BINS) $(SHIP_DIR)/bin
+ cd $(SHIP_DIR); \
+ zip -r $(JSREFJAR) bin lib include
+ifdef BUILD_SHIP
+ cp $(SHIP_DIR)/$(JSREFJAR) $(BUILD_SHIP)
+endif
+
+CWD = $(shell pwd)
+shipSource: $(SHIP_DIR)/jsref_src.lst .FORCE
+ mkdir -p $(SHIP_DIR)
+ cd $(MOZ_DEPTH)/.. ; \
+ zip $(CWD)/$(SHIP_DIR)/jsref_src.jar -@ < $(CWD)/$(SHIP_DIR)/jsref_src.lst
+ifdef BUILD_SHIP
+ cp $(SHIP_DIR)/jsref_src.jar $(BUILD_SHIP)
+endif
+
+JSREFSRCDIRS := $(shell cat $(DEPTH)/SpiderMonkey.rsp)
+$(SHIP_DIR)/jsref_src.lst: .FORCE
+ mkdir -p $(SHIP_DIR)
+ rm -f $@
+ touch $@
+ for d in $(JSREFSRCDIRS); do \
+ cd $(MOZ_DEPTH)/..; \
+ ls -1 -d $$d | grep -v CVS | grep -v \.OBJ >> $(CWD)/$@; \
+ cd $(CWD); \
+ done
+
+.FORCE:
diff --git a/src/third_party/js-1.7/jscpucfg.c b/src/third_party/js-1.7/jscpucfg.c
new file mode 100644
index 00000000000..daa912188f9
--- /dev/null
+++ b/src/third_party/js-1.7/jscpucfg.c
@@ -0,0 +1,380 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * Roland Mainz <roland.mainz@informatik.med.uni-giessen.de>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * Generate CPU-specific bit-size and similar #defines.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+
+#ifdef CROSS_COMPILE
+#include <prtypes.h>
+#define INT64 PRInt64
+#else
+
+/************************************************************************/
+
+/* Generate cpucfg.h */
+
+#if defined(XP_WIN) || defined(XP_OS2)
+#ifdef WIN32
+#if defined(__GNUC__)
+#define INT64 long long
+#else
+#define INT64 _int64
+#endif /* __GNUC__ */
+#else
+#define INT64 long
+#endif
+#else
+#if defined(HPUX) || defined(__QNX__) || defined(_SCO_DS) || defined(UNIXWARE)
+#define INT64 long
+#else
+#define INT64 long long
+#endif
+#endif
+
+#endif /* CROSS_COMPILE */
+
+#ifdef __GNUC__
+#define NS_NEVER_INLINE __attribute__((noinline))
+#else
+#define NS_NEVER_INLINE
+#endif
+
+#ifdef __SUNPRO_C
+static int StackGrowthDirection(int *dummy1addr);
+#pragma no_inline(StackGrowthDirection)
+#endif
+
+typedef void *prword;
+
+struct align_short {
+ char c;
+ short a;
+};
+struct align_int {
+ char c;
+ int a;
+};
+struct align_long {
+ char c;
+ long a;
+};
+struct align_int64 {
+ char c;
+ INT64 a;
+};
+struct align_fakelonglong {
+ char c;
+ struct {
+ long hi, lo;
+ } a;
+};
+struct align_float {
+ char c;
+ float a;
+};
+struct align_double {
+ char c;
+ double a;
+};
+struct align_pointer {
+ char c;
+ void *a;
+};
+struct align_prword {
+ char c;
+ prword a;
+};
+
+#define ALIGN_OF(type) \
+ (((char*)&(((struct align_##type *)0)->a)) - ((char*)0))
+
+unsigned int bpb;
+
+static int Log2(unsigned int n)
+{
+ int log2 = 0;
+
+ if (n & (n-1))
+ log2++;
+ if (n >> 16)
+ log2 += 16, n >>= 16;
+ if (n >> 8)
+ log2 += 8, n >>= 8;
+ if (n >> 4)
+ log2 += 4, n >>= 4;
+ if (n >> 2)
+ log2 += 2, n >>= 2;
+ if (n >> 1)
+ log2++;
+ return log2;
+}
+
+/*
+ * Conceivably this could actually be used, but there is lots of code out
+ * there with ands and shifts in it that assumes a byte is exactly 8 bits,
+ * so forget about porting THIS code to all those non 8 bit byte machines.
+ */
+static void BitsPerByte(void)
+{
+ bpb = 8;
+}
+
+static int NS_NEVER_INLINE StackGrowthDirection(int *dummy1addr)
+{
+ int dummy2;
+
+ return (&dummy2 < dummy1addr) ? -1 : 1;
+}
+
+int main(int argc, char **argv)
+{
+ int sizeof_char, sizeof_short, sizeof_int, sizeof_int64, sizeof_long,
+ sizeof_float, sizeof_double, sizeof_word, sizeof_dword;
+ int bits_per_int64_log2, align_of_short, align_of_int, align_of_long,
+ align_of_int64, align_of_float, align_of_double, align_of_pointer,
+ align_of_word;
+ int dummy1;
+
+ BitsPerByte();
+
+ printf("#ifndef js_cpucfg___\n");
+ printf("#define js_cpucfg___\n\n");
+
+ printf("/* AUTOMATICALLY GENERATED - DO NOT EDIT */\n\n");
+
+#ifdef CROSS_COMPILE
+#if defined(IS_LITTLE_ENDIAN)
+ printf("#define IS_LITTLE_ENDIAN 1\n");
+ printf("#undef IS_BIG_ENDIAN\n\n");
+#elif defined(IS_BIG_ENDIAN)
+ printf("#undef IS_LITTLE_ENDIAN\n");
+ printf("#define IS_BIG_ENDIAN 1\n\n");
+#else
+#error "Endianess not defined."
+#endif
+
+ sizeof_char = PR_BYTES_PER_BYTE;
+ sizeof_short = PR_BYTES_PER_SHORT;
+ sizeof_int = PR_BYTES_PER_INT;
+ sizeof_int64 = PR_BYTES_PER_INT64;
+ sizeof_long = PR_BYTES_PER_LONG;
+ sizeof_float = PR_BYTES_PER_FLOAT;
+ sizeof_double = PR_BYTES_PER_DOUBLE;
+ sizeof_word = PR_BYTES_PER_WORD;
+ sizeof_dword = PR_BYTES_PER_DWORD;
+
+ bits_per_int64_log2 = PR_BITS_PER_INT64_LOG2;
+
+ align_of_short = PR_ALIGN_OF_SHORT;
+ align_of_int = PR_ALIGN_OF_INT;
+ align_of_long = PR_ALIGN_OF_LONG;
+ align_of_int64 = PR_ALIGN_OF_INT64;
+ align_of_float = PR_ALIGN_OF_FLOAT;
+ align_of_double = PR_ALIGN_OF_DOUBLE;
+ align_of_pointer = PR_ALIGN_OF_POINTER;
+ align_of_word = PR_ALIGN_OF_WORD;
+
+#else /* !CROSS_COMPILE */
+
+ /*
+ * We don't handle PDP-endian or similar orders: if a short is big-endian,
+ * so must int and long be big-endian for us to generate the IS_BIG_ENDIAN
+ * #define and the IS_LITTLE_ENDIAN #undef.
+ */
+ {
+ int big_endian = 0, little_endian = 0, ntests = 0;
+
+ if (sizeof(short) == 2) {
+ /* force |volatile| here to get rid of any compiler optimisations
+ * (var in register etc.) which may be appiled to |auto| vars -
+ * even those in |union|s...
+ * (|static| is used to get the same functionality for compilers
+ * which do not honor |volatile|...).
+ */
+ volatile static union {
+ short i;
+ char c[2];
+ } u;
+
+ u.i = 0x0102;
+ big_endian += (u.c[0] == 0x01 && u.c[1] == 0x02);
+ little_endian += (u.c[0] == 0x02 && u.c[1] == 0x01);
+ ntests++;
+ }
+
+ if (sizeof(int) == 4) {
+ /* force |volatile| here ... */
+ volatile static union {
+ int i;
+ char c[4];
+ } u;
+
+ u.i = 0x01020304;
+ big_endian += (u.c[0] == 0x01 && u.c[1] == 0x02 &&
+ u.c[2] == 0x03 && u.c[3] == 0x04);
+ little_endian += (u.c[0] == 0x04 && u.c[1] == 0x03 &&
+ u.c[2] == 0x02 && u.c[3] == 0x01);
+ ntests++;
+ }
+
+ if (sizeof(long) == 8) {
+ /* force |volatile| here ... */
+ volatile static union {
+ long i;
+ char c[8];
+ } u;
+
+ /*
+ * Write this as portably as possible: avoid 0x0102030405060708L
+ * and <<= 32.
+ */
+ u.i = 0x01020304;
+ u.i <<= 16, u.i <<= 16;
+ u.i |= 0x05060708;
+ big_endian += (u.c[0] == 0x01 && u.c[1] == 0x02 &&
+ u.c[2] == 0x03 && u.c[3] == 0x04 &&
+ u.c[4] == 0x05 && u.c[5] == 0x06 &&
+ u.c[6] == 0x07 && u.c[7] == 0x08);
+ little_endian += (u.c[0] == 0x08 && u.c[1] == 0x07 &&
+ u.c[2] == 0x06 && u.c[3] == 0x05 &&
+ u.c[4] == 0x04 && u.c[5] == 0x03 &&
+ u.c[6] == 0x02 && u.c[7] == 0x01);
+ ntests++;
+ }
+
+ if (big_endian && big_endian == ntests) {
+ printf("#undef IS_LITTLE_ENDIAN\n");
+ printf("#define IS_BIG_ENDIAN 1\n\n");
+ } else if (little_endian && little_endian == ntests) {
+ printf("#define IS_LITTLE_ENDIAN 1\n");
+ printf("#undef IS_BIG_ENDIAN\n\n");
+ } else {
+ fprintf(stderr, "%s: unknown byte order"
+ "(big_endian=%d, little_endian=%d, ntests=%d)!\n",
+ argv[0], big_endian, little_endian, ntests);
+ return EXIT_FAILURE;
+ }
+ }
+
+ sizeof_char = sizeof(char);
+ sizeof_short = sizeof(short);
+ sizeof_int = sizeof(int);
+ sizeof_int64 = 8;
+ sizeof_long = sizeof(long);
+ sizeof_float = sizeof(float);
+ sizeof_double = sizeof(double);
+ sizeof_word = sizeof(prword);
+ sizeof_dword = 8;
+
+ bits_per_int64_log2 = 6;
+
+ align_of_short = ALIGN_OF(short);
+ align_of_int = ALIGN_OF(int);
+ align_of_long = ALIGN_OF(long);
+ if (sizeof(INT64) < 8) {
+ /* this machine doesn't actually support int64's */
+ align_of_int64 = ALIGN_OF(fakelonglong);
+ } else {
+ align_of_int64 = ALIGN_OF(int64);
+ }
+ align_of_float = ALIGN_OF(float);
+ align_of_double = ALIGN_OF(double);
+ align_of_pointer = ALIGN_OF(pointer);
+ align_of_word = ALIGN_OF(prword);
+
+#endif /* CROSS_COMPILE */
+
+ printf("#define JS_BYTES_PER_BYTE %dL\n", sizeof_char);
+ printf("#define JS_BYTES_PER_SHORT %dL\n", sizeof_short);
+ printf("#define JS_BYTES_PER_INT %dL\n", sizeof_int);
+ printf("#define JS_BYTES_PER_INT64 %dL\n", sizeof_int64);
+ printf("#define JS_BYTES_PER_LONG %dL\n", sizeof_long);
+ printf("#define JS_BYTES_PER_FLOAT %dL\n", sizeof_float);
+ printf("#define JS_BYTES_PER_DOUBLE %dL\n", sizeof_double);
+ printf("#define JS_BYTES_PER_WORD %dL\n", sizeof_word);
+ printf("#define JS_BYTES_PER_DWORD %dL\n", sizeof_dword);
+ printf("\n");
+
+ printf("#define JS_BITS_PER_BYTE %dL\n", bpb);
+ printf("#define JS_BITS_PER_SHORT %dL\n", bpb * sizeof_short);
+ printf("#define JS_BITS_PER_INT %dL\n", bpb * sizeof_int);
+ printf("#define JS_BITS_PER_INT64 %dL\n", bpb * sizeof_int64);
+ printf("#define JS_BITS_PER_LONG %dL\n", bpb * sizeof_long);
+ printf("#define JS_BITS_PER_FLOAT %dL\n", bpb * sizeof_float);
+ printf("#define JS_BITS_PER_DOUBLE %dL\n", bpb * sizeof_double);
+ printf("#define JS_BITS_PER_WORD %dL\n", bpb * sizeof_word);
+ printf("\n");
+
+ printf("#define JS_BITS_PER_BYTE_LOG2 %dL\n", Log2(bpb));
+ printf("#define JS_BITS_PER_SHORT_LOG2 %dL\n", Log2(bpb * sizeof_short));
+ printf("#define JS_BITS_PER_INT_LOG2 %dL\n", Log2(bpb * sizeof_int));
+ printf("#define JS_BITS_PER_INT64_LOG2 %dL\n", bits_per_int64_log2);
+ printf("#define JS_BITS_PER_LONG_LOG2 %dL\n", Log2(bpb * sizeof_long));
+ printf("#define JS_BITS_PER_FLOAT_LOG2 %dL\n", Log2(bpb * sizeof_float));
+ printf("#define JS_BITS_PER_DOUBLE_LOG2 %dL\n", Log2(bpb * sizeof_double));
+ printf("#define JS_BITS_PER_WORD_LOG2 %dL\n", Log2(bpb * sizeof_word));
+ printf("\n");
+
+ printf("#define JS_ALIGN_OF_SHORT %dL\n", align_of_short);
+ printf("#define JS_ALIGN_OF_INT %dL\n", align_of_int);
+ printf("#define JS_ALIGN_OF_LONG %dL\n", align_of_long);
+ printf("#define JS_ALIGN_OF_INT64 %dL\n", align_of_int64);
+ printf("#define JS_ALIGN_OF_FLOAT %dL\n", align_of_float);
+ printf("#define JS_ALIGN_OF_DOUBLE %dL\n", align_of_double);
+ printf("#define JS_ALIGN_OF_POINTER %dL\n", align_of_pointer);
+ printf("#define JS_ALIGN_OF_WORD %dL\n", align_of_word);
+ printf("\n");
+
+ printf("#define JS_BYTES_PER_WORD_LOG2 %dL\n", Log2(sizeof_word));
+ printf("#define JS_BYTES_PER_DWORD_LOG2 %dL\n", Log2(sizeof_dword));
+ printf("#define JS_WORDS_PER_DWORD_LOG2 %dL\n", Log2(sizeof_dword/sizeof_word));
+ printf("\n");
+
+ printf("#define JS_STACK_GROWTH_DIRECTION (%d)\n", StackGrowthDirection(&dummy1));
+ printf("\n");
+
+ printf("#endif /* js_cpucfg___ */\n");
+
+ return EXIT_SUCCESS;
+}
+
diff --git a/src/third_party/js-1.7/jscpucfg.h b/src/third_party/js-1.7/jscpucfg.h
new file mode 100644
index 00000000000..63ef932a690
--- /dev/null
+++ b/src/third_party/js-1.7/jscpucfg.h
@@ -0,0 +1,212 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef js_cpucfg___
+#define js_cpucfg___
+
+#include "jsosdep.h"
+
+#if defined(XP_WIN) || defined(XP_OS2) || defined(WINCE)
+
+#if defined(_WIN64)
+
+#if defined(_M_X64) || defined(_M_AMD64) || defined(_AMD64_)
+#define IS_LITTLE_ENDIAN 1
+#undef IS_BIG_ENDIAN
+
+#define JS_BYTES_PER_BYTE 1L
+#define JS_BYTES_PER_SHORT 2L
+#define JS_BYTES_PER_INT 4L
+#define JS_BYTES_PER_INT64 8L
+#define JS_BYTES_PER_LONG 4L
+#define JS_BYTES_PER_FLOAT 4L
+#define JS_BYTES_PER_DOUBLE 8L
+#define JS_BYTES_PER_WORD 8L
+#define JS_BYTES_PER_DWORD 8L
+
+#define JS_BITS_PER_BYTE 8L
+#define JS_BITS_PER_SHORT 16L
+#define JS_BITS_PER_INT 32L
+#define JS_BITS_PER_INT64 64L
+#define JS_BITS_PER_LONG 32L
+#define JS_BITS_PER_FLOAT 32L
+#define JS_BITS_PER_DOUBLE 64L
+#define JS_BITS_PER_WORD 64L
+
+#define JS_BITS_PER_BYTE_LOG2 3L
+#define JS_BITS_PER_SHORT_LOG2 4L
+#define JS_BITS_PER_INT_LOG2 5L
+#define JS_BITS_PER_INT64_LOG2 6L
+#define JS_BITS_PER_LONG_LOG2 5L
+#define JS_BITS_PER_FLOAT_LOG2 5L
+#define JS_BITS_PER_DOUBLE_LOG2 6L
+#define JS_BITS_PER_WORD_LOG2 6L
+
+#define JS_ALIGN_OF_SHORT 2L
+#define JS_ALIGN_OF_INT 4L
+#define JS_ALIGN_OF_LONG 4L
+#define JS_ALIGN_OF_INT64 8L
+#define JS_ALIGN_OF_FLOAT 4L
+#define JS_ALIGN_OF_DOUBLE 8L
+#define JS_ALIGN_OF_POINTER 8L
+#define JS_ALIGN_OF_WORD 8L
+
+#define JS_BYTES_PER_WORD_LOG2 3L
+#define JS_BYTES_PER_DWORD_LOG2 3L
+#define PR_WORDS_PER_DWORD_LOG2 0L
+#else /* !(defined(_M_X64) || defined(_M_AMD64) || defined(_AMD64_)) */
+#error "CPU type is unknown"
+#endif /* !(defined(_M_X64) || defined(_M_AMD64) || defined(_AMD64_)) */
+
+#elif defined(_WIN32) || defined(XP_OS2) || defined(WINCE)
+
+#ifdef __WATCOMC__
+#define HAVE_VA_LIST_AS_ARRAY
+#endif
+
+#define IS_LITTLE_ENDIAN 1
+#undef IS_BIG_ENDIAN
+
+#define JS_BYTES_PER_BYTE 1L
+#define JS_BYTES_PER_SHORT 2L
+#define JS_BYTES_PER_INT 4L
+#define JS_BYTES_PER_INT64 8L
+#define JS_BYTES_PER_LONG 4L
+#define JS_BYTES_PER_FLOAT 4L
+#define JS_BYTES_PER_DOUBLE 8L
+#define JS_BYTES_PER_WORD 4L
+#define JS_BYTES_PER_DWORD 8L
+
+#define JS_BITS_PER_BYTE 8L
+#define JS_BITS_PER_SHORT 16L
+#define JS_BITS_PER_INT 32L
+#define JS_BITS_PER_INT64 64L
+#define JS_BITS_PER_LONG 32L
+#define JS_BITS_PER_FLOAT 32L
+#define JS_BITS_PER_DOUBLE 64L
+#define JS_BITS_PER_WORD 32L
+
+#define JS_BITS_PER_BYTE_LOG2 3L
+#define JS_BITS_PER_SHORT_LOG2 4L
+#define JS_BITS_PER_INT_LOG2 5L
+#define JS_BITS_PER_INT64_LOG2 6L
+#define JS_BITS_PER_LONG_LOG2 5L
+#define JS_BITS_PER_FLOAT_LOG2 5L
+#define JS_BITS_PER_DOUBLE_LOG2 6L
+#define JS_BITS_PER_WORD_LOG2 5L
+
+#define JS_ALIGN_OF_SHORT 2L
+#define JS_ALIGN_OF_INT 4L
+#define JS_ALIGN_OF_LONG 4L
+#define JS_ALIGN_OF_INT64 8L
+#define JS_ALIGN_OF_FLOAT 4L
+#define JS_ALIGN_OF_DOUBLE 4L
+#define JS_ALIGN_OF_POINTER 4L
+#define JS_ALIGN_OF_WORD 4L
+
+#define JS_BYTES_PER_WORD_LOG2 2L
+#define JS_BYTES_PER_DWORD_LOG2 3L
+#define PR_WORDS_PER_DWORD_LOG2 1L
+#endif /* _WIN32 || XP_OS2 || WINCE*/
+
+#if defined(_WINDOWS) && !defined(_WIN32) /* WIN16 */
+
+#define IS_LITTLE_ENDIAN 1
+#undef IS_BIG_ENDIAN
+
+#define JS_BYTES_PER_BYTE 1L
+#define JS_BYTES_PER_SHORT 2L
+#define JS_BYTES_PER_INT 2L
+#define JS_BYTES_PER_INT64 8L
+#define JS_BYTES_PER_LONG 4L
+#define JS_BYTES_PER_FLOAT 4L
+#define JS_BYTES_PER_DOUBLE 8L
+#define JS_BYTES_PER_WORD 4L
+#define JS_BYTES_PER_DWORD 8L
+
+#define JS_BITS_PER_BYTE 8L
+#define JS_BITS_PER_SHORT 16L
+#define JS_BITS_PER_INT 16L
+#define JS_BITS_PER_INT64 64L
+#define JS_BITS_PER_LONG 32L
+#define JS_BITS_PER_FLOAT 32L
+#define JS_BITS_PER_DOUBLE 64L
+#define JS_BITS_PER_WORD 32L
+
+#define JS_BITS_PER_BYTE_LOG2 3L
+#define JS_BITS_PER_SHORT_LOG2 4L
+#define JS_BITS_PER_INT_LOG2 4L
+#define JS_BITS_PER_INT64_LOG2 6L
+#define JS_BITS_PER_LONG_LOG2 5L
+#define JS_BITS_PER_FLOAT_LOG2 5L
+#define JS_BITS_PER_DOUBLE_LOG2 6L
+#define JS_BITS_PER_WORD_LOG2 5L
+
+#define JS_ALIGN_OF_SHORT 2L
+#define JS_ALIGN_OF_INT 2L
+#define JS_ALIGN_OF_LONG 2L
+#define JS_ALIGN_OF_INT64 2L
+#define JS_ALIGN_OF_FLOAT 2L
+#define JS_ALIGN_OF_DOUBLE 2L
+#define JS_ALIGN_OF_POINTER 2L
+#define JS_ALIGN_OF_WORD 2L
+
+#define JS_BYTES_PER_WORD_LOG2 2L
+#define JS_BYTES_PER_DWORD_LOG2 3L
+#define PR_WORDS_PER_DWORD_LOG2 1L
+
+#endif /* defined(_WINDOWS) && !defined(_WIN32) */
+
+#elif defined(XP_UNIX) || defined(XP_BEOS)
+
+#error "This file is supposed to be auto-generated on UNIX platforms, but the"
+#error "static version for Mac and Windows platforms is being used."
+#error "Something's probably wrong with paths/headers/dependencies/Makefiles."
+
+#else
+
+#error "Must define one of XP_BEOS, XP_OS2, XP_WIN, or XP_UNIX"
+
+#endif
+
+#ifndef JS_STACK_GROWTH_DIRECTION
+#define JS_STACK_GROWTH_DIRECTION (-1)
+#endif
+
+#endif /* js_cpucfg___ */
diff --git a/src/third_party/js-1.7/jsdate.c b/src/third_party/js-1.7/jsdate.c
new file mode 100644
index 00000000000..9e6697f0fac
--- /dev/null
+++ b/src/third_party/js-1.7/jsdate.c
@@ -0,0 +1,2371 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS date methods.
+ */
+
+/*
+ * "For example, OS/360 devotes 26 bytes of the permanently
+ * resident date-turnover routine to the proper handling of
+ * December 31 on leap years (when it is Day 366). That
+ * might have been left to the operator."
+ *
+ * Frederick Brooks, 'The Second-System Effect'.
+ */
+
+#include "jsstddef.h"
+#include <ctype.h>
+#include <locale.h>
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsprf.h"
+#include "prmjtime.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsconfig.h"
+#include "jscntxt.h"
+#include "jsdate.h"
+#include "jsinterp.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsstr.h"
+
+/*
+ * The JS 'Date' object is patterned after the Java 'Date' object.
+ * Here is an script:
+ *
+ * today = new Date();
+ *
+ * print(today.toLocaleString());
+ *
+ * weekDay = today.getDay();
+ *
+ *
+ * These Java (and ECMA-262) methods are supported:
+ *
+ * UTC
+ * getDate (getUTCDate)
+ * getDay (getUTCDay)
+ * getHours (getUTCHours)
+ * getMinutes (getUTCMinutes)
+ * getMonth (getUTCMonth)
+ * getSeconds (getUTCSeconds)
+ * getMilliseconds (getUTCMilliseconds)
+ * getTime
+ * getTimezoneOffset
+ * getYear
+ * getFullYear (getUTCFullYear)
+ * parse
+ * setDate (setUTCDate)
+ * setHours (setUTCHours)
+ * setMinutes (setUTCMinutes)
+ * setMonth (setUTCMonth)
+ * setSeconds (setUTCSeconds)
+ * setMilliseconds (setUTCMilliseconds)
+ * setTime
+ * setYear (setFullYear, setUTCFullYear)
+ * toGMTString (toUTCString)
+ * toLocaleString
+ * toString
+ *
+ *
+ * These Java methods are not supported
+ *
+ * setDay
+ * before
+ * after
+ * equals
+ * hashCode
+ */
+
+/*
+ * 11/97 - jsdate.c has been rewritten to conform to the ECMA-262 language
+ * definition and reduce dependence on NSPR. NSPR is used to get the current
+ * time in milliseconds, the time zone offset, and the daylight savings time
+ * offset for a given time. NSPR is also used for Date.toLocaleString(), for
+ * locale-specific formatting, and to get a string representing the timezone.
+ * (Which turns out to be platform-dependent.)
+ *
+ * To do:
+ * (I did some performance tests by timing how long it took to run what
+ * I had of the js ECMA conformance tests.)
+ *
+ * - look at saving results across multiple calls to supporting
+ * functions; the toString functions compute some of the same values
+ * multiple times. Although - I took a quick stab at this, and I lost
+ * rather than gained. (Fractionally.) Hard to tell what compilers/processors
+ * are doing these days.
+ *
+ * - look at tweaking function return types to return double instead
+ * of int; this seems to make things run slightly faster sometimes.
+ * (though it could be architecture-dependent.) It'd be good to see
+ * how this does on win32. (Tried it on irix.) Types could use a
+ * general going-over.
+ */
+
+/*
+ * Supporting functions - ECMA 15.9.1.*
+ */
+
+#define HalfTimeDomain 8.64e15
+#define HoursPerDay 24.0
+#define MinutesPerDay (HoursPerDay * MinutesPerHour)
+#define MinutesPerHour 60.0
+#define SecondsPerDay (MinutesPerDay * SecondsPerMinute)
+#define SecondsPerHour (MinutesPerHour * SecondsPerMinute)
+#define SecondsPerMinute 60.0
+
+#if defined(XP_WIN) || defined(XP_OS2)
+/* Work around msvc double optimization bug by making these runtime values; if
+ * they're available at compile time, msvc optimizes division by them by
+ * computing the reciprocal and multiplying instead of dividing - this loses
+ * when the reciprocal isn't representable in a double.
+ */
+static jsdouble msPerSecond = 1000.0;
+static jsdouble msPerDay = SecondsPerDay * 1000.0;
+static jsdouble msPerHour = SecondsPerHour * 1000.0;
+static jsdouble msPerMinute = SecondsPerMinute * 1000.0;
+#else
+#define msPerDay (SecondsPerDay * msPerSecond)
+#define msPerHour (SecondsPerHour * msPerSecond)
+#define msPerMinute (SecondsPerMinute * msPerSecond)
+#define msPerSecond 1000.0
+#endif
+
+#define Day(t) floor((t) / msPerDay)
+
+static jsdouble
+TimeWithinDay(jsdouble t)
+{
+ jsdouble result;
+ result = fmod(t, msPerDay);
+ if (result < 0)
+ result += msPerDay;
+ return result;
+}
+
+#define DaysInYear(y) ((y) % 4 == 0 && ((y) % 100 || ((y) % 400 == 0)) \
+ ? 366 : 365)
+
+/* math here has to be f.p, because we need
+ * floor((1968 - 1969) / 4) == -1
+ */
+#define DayFromYear(y) (365 * ((y)-1970) + floor(((y)-1969)/4.0) \
+ - floor(((y)-1901)/100.0) + floor(((y)-1601)/400.0))
+#define TimeFromYear(y) (DayFromYear(y) * msPerDay)
+
+static jsint
+YearFromTime(jsdouble t)
+{
+ jsint y = (jsint) floor(t /(msPerDay*365.2425)) + 1970;
+ jsdouble t2 = (jsdouble) TimeFromYear(y);
+
+ if (t2 > t) {
+ y--;
+ } else {
+ if (t2 + msPerDay * DaysInYear(y) <= t)
+ y++;
+ }
+ return y;
+}
+
+#define InLeapYear(t) (JSBool) (DaysInYear(YearFromTime(t)) == 366)
+
+#define DayWithinYear(t, year) ((intN) (Day(t) - DayFromYear(year)))
+
+/*
+ * The following array contains the day of year for the first day of
+ * each month, where index 0 is January, and day 0 is January 1.
+ */
+static jsdouble firstDayOfMonth[2][12] = {
+ {0.0, 31.0, 59.0, 90.0, 120.0, 151.0, 181.0, 212.0, 243.0, 273.0, 304.0, 334.0},
+ {0.0, 31.0, 60.0, 91.0, 121.0, 152.0, 182.0, 213.0, 244.0, 274.0, 305.0, 335.0}
+};
+
+#define DayFromMonth(m, leap) firstDayOfMonth[leap][(intN)m];
+
+static intN
+MonthFromTime(jsdouble t)
+{
+ intN d, step;
+ jsint year = YearFromTime(t);
+ d = DayWithinYear(t, year);
+
+ if (d < (step = 31))
+ return 0;
+ step += (InLeapYear(t) ? 29 : 28);
+ if (d < step)
+ return 1;
+ if (d < (step += 31))
+ return 2;
+ if (d < (step += 30))
+ return 3;
+ if (d < (step += 31))
+ return 4;
+ if (d < (step += 30))
+ return 5;
+ if (d < (step += 31))
+ return 6;
+ if (d < (step += 31))
+ return 7;
+ if (d < (step += 30))
+ return 8;
+ if (d < (step += 31))
+ return 9;
+ if (d < (step += 30))
+ return 10;
+ return 11;
+}
+
+static intN
+DateFromTime(jsdouble t)
+{
+ intN d, step, next;
+ jsint year = YearFromTime(t);
+ d = DayWithinYear(t, year);
+
+ if (d <= (next = 30))
+ return d + 1;
+ step = next;
+ next += (InLeapYear(t) ? 29 : 28);
+ if (d <= next)
+ return d - step;
+ step = next;
+ if (d <= (next += 31))
+ return d - step;
+ step = next;
+ if (d <= (next += 30))
+ return d - step;
+ step = next;
+ if (d <= (next += 31))
+ return d - step;
+ step = next;
+ if (d <= (next += 30))
+ return d - step;
+ step = next;
+ if (d <= (next += 31))
+ return d - step;
+ step = next;
+ if (d <= (next += 31))
+ return d - step;
+ step = next;
+ if (d <= (next += 30))
+ return d - step;
+ step = next;
+ if (d <= (next += 31))
+ return d - step;
+ step = next;
+ if (d <= (next += 30))
+ return d - step;
+ step = next;
+ return d - step;
+}
+
+static intN
+WeekDay(jsdouble t)
+{
+ jsint result;
+ result = (jsint) Day(t) + 4;
+ result = result % 7;
+ if (result < 0)
+ result += 7;
+ return (intN) result;
+}
+
+#define MakeTime(hour, min, sec, ms) \
+((((hour) * MinutesPerHour + (min)) * SecondsPerMinute + (sec)) * msPerSecond + (ms))
+
+static jsdouble
+MakeDay(jsdouble year, jsdouble month, jsdouble date)
+{
+ JSBool leap;
+ jsdouble yearday;
+ jsdouble monthday;
+
+ year += floor(month / 12);
+
+ month = fmod(month, 12.0);
+ if (month < 0)
+ month += 12;
+
+ leap = (DaysInYear((jsint) year) == 366);
+
+ yearday = floor(TimeFromYear(year) / msPerDay);
+ monthday = DayFromMonth(month, leap);
+
+ return yearday + monthday + date - 1;
+}
+
+#define MakeDate(day, time) ((day) * msPerDay + (time))
+
+/*
+ * Years and leap years on which Jan 1 is a Sunday, Monday, etc.
+ *
+ * yearStartingWith[0][i] is an example non-leap year where
+ * Jan 1 appears on Sunday (i == 0), Monday (i == 1), etc.
+ *
+ * yearStartingWith[1][i] is an example leap year where
+ * Jan 1 appears on Sunday (i == 0), Monday (i == 1), etc.
+ */
+static jsint yearStartingWith[2][7] = {
+ {1978, 1973, 1974, 1975, 1981, 1971, 1977},
+ {1984, 1996, 1980, 1992, 1976, 1988, 1972}
+};
+
+/*
+ * Find a year for which any given date will fall on the same weekday.
+ *
+ * This function should be used with caution when used other than
+ * for determining DST; it hasn't been proven not to produce an
+ * incorrect year for times near year boundaries.
+ */
+static jsint
+EquivalentYearForDST(jsint year)
+{
+ jsint day;
+ JSBool isLeapYear;
+
+ day = (jsint) DayFromYear(year) + 4;
+ day = day % 7;
+ if (day < 0)
+ day += 7;
+
+ isLeapYear = (DaysInYear(year) == 366);
+
+ return yearStartingWith[isLeapYear][day];
+}
+
+/* LocalTZA gets set by js_InitDateClass() */
+static jsdouble LocalTZA;
+
+static jsdouble
+DaylightSavingTA(jsdouble t)
+{
+ volatile int64 PR_t;
+ int64 ms2us;
+ int64 offset;
+ jsdouble result;
+
+ /* abort if NaN */
+ if (JSDOUBLE_IS_NaN(t))
+ return t;
+
+ /*
+ * If earlier than 1970 or after 2038, potentially beyond the ken of
+ * many OSes, map it to an equivalent year before asking.
+ */
+ if (t < 0.0 || t > 2145916800000.0) {
+ jsint year;
+ jsdouble day;
+
+ year = EquivalentYearForDST(YearFromTime(t));
+ day = MakeDay(year, MonthFromTime(t), DateFromTime(t));
+ t = MakeDate(day, TimeWithinDay(t));
+ }
+
+ /* put our t in an LL, and map it to usec for prtime */
+ JSLL_D2L(PR_t, t);
+ JSLL_I2L(ms2us, PRMJ_USEC_PER_MSEC);
+ JSLL_MUL(PR_t, PR_t, ms2us);
+
+ offset = PRMJ_DSTOffset(PR_t);
+
+ JSLL_DIV(offset, offset, ms2us);
+ JSLL_L2D(result, offset);
+ return result;
+}
+
+
+#define AdjustTime(t) fmod(LocalTZA + DaylightSavingTA(t), msPerDay)
+
+#define LocalTime(t) ((t) + AdjustTime(t))
+
+static jsdouble
+UTC(jsdouble t)
+{
+ return t - AdjustTime(t - LocalTZA);
+}
+
+static intN
+HourFromTime(jsdouble t)
+{
+ intN result = (intN) fmod(floor(t/msPerHour), HoursPerDay);
+ if (result < 0)
+ result += (intN)HoursPerDay;
+ return result;
+}
+
+static intN
+MinFromTime(jsdouble t)
+{
+ intN result = (intN) fmod(floor(t / msPerMinute), MinutesPerHour);
+ if (result < 0)
+ result += (intN)MinutesPerHour;
+ return result;
+}
+
+static intN
+SecFromTime(jsdouble t)
+{
+ intN result = (intN) fmod(floor(t / msPerSecond), SecondsPerMinute);
+ if (result < 0)
+ result += (intN)SecondsPerMinute;
+ return result;
+}
+
+static intN
+msFromTime(jsdouble t)
+{
+ intN result = (intN) fmod(t, msPerSecond);
+ if (result < 0)
+ result += (intN)msPerSecond;
+ return result;
+}
+
+#define TIMECLIP(d) ((JSDOUBLE_IS_FINITE(d) \
+ && !((d < 0 ? -d : d) > HalfTimeDomain)) \
+ ? js_DoubleToInteger(d + (+0.)) : *cx->runtime->jsNaN)
+
+/**
+ * end of ECMA 'support' functions
+ */
+
+/*
+ * Other Support routines and definitions
+ */
+
+JSClass js_DateClass = {
+ js_Date_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_CACHED_PROTO(JSProto_Date),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+/* for use by date_parse */
+
+static const char* wtb[] = {
+ "am", "pm",
+ "monday", "tuesday", "wednesday", "thursday", "friday",
+ "saturday", "sunday",
+ "january", "february", "march", "april", "may", "june",
+ "july", "august", "september", "october", "november", "december",
+ "gmt", "ut", "utc",
+ "est", "edt",
+ "cst", "cdt",
+ "mst", "mdt",
+ "pst", "pdt"
+ /* time zone table needs to be expanded */
+};
+
+static int ttb[] = {
+ -1, -2, 0, 0, 0, 0, 0, 0, 0, /* AM/PM */
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
+ 10000 + 0, 10000 + 0, 10000 + 0, /* GMT/UT/UTC */
+ 10000 + 5 * 60, 10000 + 4 * 60, /* EST/EDT */
+ 10000 + 6 * 60, 10000 + 5 * 60, /* CST/CDT */
+ 10000 + 7 * 60, 10000 + 6 * 60, /* MST/MDT */
+ 10000 + 8 * 60, 10000 + 7 * 60 /* PST/PDT */
+};
+
+/* helper for date_parse */
+static JSBool
+date_regionMatches(const char* s1, int s1off, const jschar* s2, int s2off,
+ int count, int ignoreCase)
+{
+ JSBool result = JS_FALSE;
+ /* return true if matches, otherwise, false */
+
+ while (count > 0 && s1[s1off] && s2[s2off]) {
+ if (ignoreCase) {
+ if (JS_TOLOWER((jschar)s1[s1off]) != JS_TOLOWER(s2[s2off])) {
+ break;
+ }
+ } else {
+ if ((jschar)s1[s1off] != s2[s2off]) {
+ break;
+ }
+ }
+ s1off++;
+ s2off++;
+ count--;
+ }
+
+ if (count == 0) {
+ result = JS_TRUE;
+ }
+
+ return result;
+}
+
+/* find UTC time from given date... no 1900 correction! */
+static jsdouble
+date_msecFromDate(jsdouble year, jsdouble mon, jsdouble mday, jsdouble hour,
+ jsdouble min, jsdouble sec, jsdouble msec)
+{
+ jsdouble day;
+ jsdouble msec_time;
+ jsdouble result;
+
+ day = MakeDay(year, mon, mday);
+ msec_time = MakeTime(hour, min, sec, msec);
+ result = MakeDate(day, msec_time);
+ return result;
+}
+
+/*
+ * See ECMA 15.9.4.[3-10];
+ */
+/* XXX this function must be above date_parseString to avoid a
+ horrid bug in the Win16 1.52 compiler */
+#define MAXARGS 7
+static JSBool
+date_UTC(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble array[MAXARGS];
+ uintN loop;
+ jsdouble d;
+
+ for (loop = 0; loop < MAXARGS; loop++) {
+ if (loop < argc) {
+ if (!js_ValueToNumber(cx, argv[loop], &d))
+ return JS_FALSE;
+ /* return NaN if any arg is NaN */
+ if (!JSDOUBLE_IS_FINITE(d)) {
+ return js_NewNumberValue(cx, d, rval);
+ }
+ array[loop] = floor(d);
+ } else {
+ array[loop] = 0;
+ }
+ }
+
+ /* adjust 2-digit years into the 20th century */
+ if (array[0] >= 0 && array[0] <= 99)
+ array[0] += 1900;
+
+ /* if we got a 0 for 'date' (which is out of range)
+ * pretend it's a 1. (So Date.UTC(1972, 5) works) */
+ if (array[2] < 1)
+ array[2] = 1;
+
+ d = date_msecFromDate(array[0], array[1], array[2],
+ array[3], array[4], array[5], array[6]);
+ d = TIMECLIP(d);
+
+ return js_NewNumberValue(cx, d, rval);
+}
+
+static JSBool
+date_parseString(JSString *str, jsdouble *result)
+{
+ jsdouble msec;
+
+ const jschar *s = JSSTRING_CHARS(str);
+ size_t limit = JSSTRING_LENGTH(str);
+ size_t i = 0;
+ int year = -1;
+ int mon = -1;
+ int mday = -1;
+ int hour = -1;
+ int min = -1;
+ int sec = -1;
+ int c = -1;
+ int n = -1;
+ jsdouble tzoffset = -1; /* was an int, overflowed on win16!!! */
+ int prevc = 0;
+ JSBool seenplusminus = JS_FALSE;
+ int temp;
+ JSBool seenmonthname = JS_FALSE;
+
+ if (limit == 0)
+ goto syntax;
+ while (i < limit) {
+ c = s[i];
+ i++;
+ if (c <= ' ' || c == ',' || c == '-') {
+ if (c == '-' && '0' <= s[i] && s[i] <= '9') {
+ prevc = c;
+ }
+ continue;
+ }
+ if (c == '(') { /* comments) */
+ int depth = 1;
+ while (i < limit) {
+ c = s[i];
+ i++;
+ if (c == '(') depth++;
+ else if (c == ')')
+ if (--depth <= 0)
+ break;
+ }
+ continue;
+ }
+ if ('0' <= c && c <= '9') {
+ n = c - '0';
+ while (i < limit && '0' <= (c = s[i]) && c <= '9') {
+ n = n * 10 + c - '0';
+ i++;
+ }
+
+ /* allow TZA before the year, so
+ * 'Wed Nov 05 21:49:11 GMT-0800 1997'
+ * works */
+
+ /* uses of seenplusminus allow : in TZA, so Java
+ * no-timezone style of GMT+4:30 works
+ */
+
+ if ((prevc == '+' || prevc == '-')/* && year>=0 */) {
+ /* make ':' case below change tzoffset */
+ seenplusminus = JS_TRUE;
+
+ /* offset */
+ if (n < 24)
+ n = n * 60; /* EG. "GMT-3" */
+ else
+ n = n % 100 + n / 100 * 60; /* eg "GMT-0430" */
+ if (prevc == '+') /* plus means east of GMT */
+ n = -n;
+ if (tzoffset != 0 && tzoffset != -1)
+ goto syntax;
+ tzoffset = n;
+ } else if (prevc == '/' && mon >= 0 && mday >= 0 && year < 0) {
+ if (c <= ' ' || c == ',' || c == '/' || i >= limit)
+ year = n;
+ else
+ goto syntax;
+ } else if (c == ':') {
+ if (hour < 0)
+ hour = /*byte*/ n;
+ else if (min < 0)
+ min = /*byte*/ n;
+ else
+ goto syntax;
+ } else if (c == '/') {
+ /* until it is determined that mon is the actual
+ month, keep it as 1-based rather than 0-based */
+ if (mon < 0)
+ mon = /*byte*/ n;
+ else if (mday < 0)
+ mday = /*byte*/ n;
+ else
+ goto syntax;
+ } else if (i < limit && c != ',' && c > ' ' && c != '-' && c != '(') {
+ goto syntax;
+ } else if (seenplusminus && n < 60) { /* handle GMT-3:30 */
+ if (tzoffset < 0)
+ tzoffset -= n;
+ else
+ tzoffset += n;
+ } else if (hour >= 0 && min < 0) {
+ min = /*byte*/ n;
+ } else if (prevc == ':' && min >= 0 && sec < 0) {
+ sec = /*byte*/ n;
+ } else if (mon < 0) {
+ mon = /*byte*/n;
+ } else if (mon >= 0 && mday < 0) {
+ mday = /*byte*/ n;
+ } else if (mon >= 0 && mday >= 0 && year < 0) {
+ year = n;
+ } else {
+ goto syntax;
+ }
+ prevc = 0;
+ } else if (c == '/' || c == ':' || c == '+' || c == '-') {
+ prevc = c;
+ } else {
+ size_t st = i - 1;
+ int k;
+ while (i < limit) {
+ c = s[i];
+ if (!(('A' <= c && c <= 'Z') || ('a' <= c && c <= 'z')))
+ break;
+ i++;
+ }
+ if (i <= st + 1)
+ goto syntax;
+ for (k = (sizeof(wtb)/sizeof(char*)); --k >= 0;)
+ if (date_regionMatches(wtb[k], 0, s, st, i-st, 1)) {
+ int action = ttb[k];
+ if (action != 0) {
+ if (action < 0) {
+ /*
+ * AM/PM. Count 12:30 AM as 00:30, 12:30 PM as
+ * 12:30, instead of blindly adding 12 if PM.
+ */
+ JS_ASSERT(action == -1 || action == -2);
+ if (hour > 12 || hour < 0) {
+ goto syntax;
+ } else {
+ if (action == -1 && hour == 12) { /* am */
+ hour = 0;
+ } else if (action == -2 && hour != 12) { /* pm */
+ hour += 12;
+ }
+ }
+ } else if (action <= 13) { /* month! */
+ /* Adjust mon to be 1-based until the final values
+ for mon, mday and year are adjusted below */
+ if (seenmonthname) {
+ goto syntax;
+ }
+ seenmonthname = JS_TRUE;
+ temp = /*byte*/ (action - 2) + 1;
+
+ if (mon < 0) {
+ mon = temp;
+ } else if (mday < 0) {
+ mday = mon;
+ mon = temp;
+ } else if (year < 0) {
+ year = mon;
+ mon = temp;
+ } else {
+ goto syntax;
+ }
+ } else {
+ tzoffset = action - 10000;
+ }
+ }
+ break;
+ }
+ if (k < 0)
+ goto syntax;
+ prevc = 0;
+ }
+ }
+ if (year < 0 || mon < 0 || mday < 0)
+ goto syntax;
+ /*
+ Case 1. The input string contains an English month name.
+ The form of the string can be month f l, or f month l, or
+ f l month which each evaluate to the same date.
+ If f and l are both greater than or equal to 70, or
+ both less than 70, the date is invalid.
+ The year is taken to be the greater of the values f, l.
+ If the year is greater than or equal to 70 and less than 100,
+ it is considered to be the number of years after 1900.
+ Case 2. The input string is of the form "f/m/l" where f, m and l are
+ integers, e.g. 7/16/45.
+ Adjust the mon, mday and year values to achieve 100% MSIE
+ compatibility.
+ a. If 0 <= f < 70, f/m/l is interpreted as month/day/year.
+ i. If year < 100, it is the number of years after 1900
+ ii. If year >= 100, it is the number of years after 0.
+ b. If 70 <= f < 100
+ i. If m < 70, f/m/l is interpreted as
+ year/month/day where year is the number of years after
+ 1900.
+ ii. If m >= 70, the date is invalid.
+ c. If f >= 100
+ i. If m < 70, f/m/l is interpreted as
+ year/month/day where year is the number of years after 0.
+ ii. If m >= 70, the date is invalid.
+ */
+ if (seenmonthname) {
+ if ((mday >= 70 && year >= 70) || (mday < 70 && year < 70)) {
+ goto syntax;
+ }
+ if (mday > year) {
+ temp = year;
+ year = mday;
+ mday = temp;
+ }
+ if (year >= 70 && year < 100) {
+ year += 1900;
+ }
+ } else if (mon < 70) { /* (a) month/day/year */
+ if (year < 100) {
+ year += 1900;
+ }
+ } else if (mon < 100) { /* (b) year/month/day */
+ if (mday < 70) {
+ temp = year;
+ year = mon + 1900;
+ mon = mday;
+ mday = temp;
+ } else {
+ goto syntax;
+ }
+ } else { /* (c) year/month/day */
+ if (mday < 70) {
+ temp = year;
+ year = mon;
+ mon = mday;
+ mday = temp;
+ } else {
+ goto syntax;
+ }
+ }
+ mon -= 1; /* convert month to 0-based */
+ if (sec < 0)
+ sec = 0;
+ if (min < 0)
+ min = 0;
+ if (hour < 0)
+ hour = 0;
+ if (tzoffset == -1) { /* no time zone specified, have to use local */
+ jsdouble msec_time;
+ msec_time = date_msecFromDate(year, mon, mday, hour, min, sec, 0);
+
+ *result = UTC(msec_time);
+ return JS_TRUE;
+ }
+
+ msec = date_msecFromDate(year, mon, mday, hour, min, sec, 0);
+ msec += tzoffset * msPerMinute;
+ *result = msec;
+ return JS_TRUE;
+
+syntax:
+ /* syntax error */
+ *result = 0;
+ return JS_FALSE;
+}
+
+static JSBool
+date_parse(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ jsdouble result;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ if (!date_parseString(str, &result)) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+
+ result = TIMECLIP(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_now(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ int64 us, ms, us2ms;
+ jsdouble msec_time;
+
+ us = PRMJ_Now();
+ JSLL_UI2L(us2ms, PRMJ_USEC_PER_MSEC);
+ JSLL_DIV(ms, us, us2ms);
+ JSLL_L2D(msec_time, ms);
+
+ return js_NewDoubleValue(cx, msec_time, rval);
+}
+
+/*
+ * Check that obj is an object of class Date, and get the date value.
+ * Return NULL on failure.
+ */
+static jsdouble *
+date_getProlog(JSContext *cx, JSObject *obj, jsval *argv)
+{
+ if (!JS_InstanceOf(cx, obj, &js_DateClass, argv))
+ return NULL;
+ return JSVAL_TO_DOUBLE(OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE));
+}
+
+/*
+ * See ECMA 15.9.5.4 thru 15.9.5.23
+ */
+static JSBool
+date_getTime(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ return js_NewNumberValue(cx, *date, rval);
+}
+
+static JSBool
+date_getYear(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble *date;
+ jsdouble result;
+
+ date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ result = *date;
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = YearFromTime(LocalTime(result));
+
+ /* Follow ECMA-262 to the letter, contrary to IE JScript. */
+ result -= 1900;
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getFullYear(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = YearFromTime(LocalTime(result));
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getUTCFullYear(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = YearFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getMonth(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = MonthFromTime(LocalTime(result));
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getUTCMonth(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = MonthFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getDate(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = LocalTime(result);
+ result = DateFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getUTCDate(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = DateFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getDay(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = LocalTime(result);
+ result = WeekDay(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getUTCDay(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = WeekDay(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getHours(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = HourFromTime(LocalTime(result));
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getUTCHours(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = HourFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getMinutes(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = MinFromTime(LocalTime(result));
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getUTCMinutes(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = MinFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+/* Date.getSeconds is mapped to getUTCSeconds */
+
+static JSBool
+date_getUTCSeconds(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = SecFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+/* Date.getMilliseconds is mapped to getUTCMilliseconds */
+
+static JSBool
+date_getUTCMilliseconds(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ result = msFromTime(result);
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_getTimezoneOffset(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ result = *date;
+
+ /*
+ * Return the time zone offset in minutes for the current locale
+ * that is appropriate for this time. This value would be a
+ * constant except for daylight savings time.
+ */
+ result = (result - LocalTime(result)) / msPerMinute;
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_setTime(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble result;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ if (!js_ValueToNumber(cx, argv[0], &result))
+ return JS_FALSE;
+
+ result = TIMECLIP(result);
+
+ *date = result;
+ return js_NewNumberValue(cx, result, rval);
+}
+
+static JSBool
+date_makeTime(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ uintN maxargs, JSBool local, jsval *rval)
+{
+ uintN i;
+ jsdouble args[4], *argp, *stop;
+ jsdouble hour, min, sec, msec;
+ jsdouble lorutime; /* Local or UTC version of *date */
+
+ jsdouble msec_time;
+ jsdouble result;
+
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ result = *date;
+
+ /* just return NaN if the date is already NaN */
+ if (!JSDOUBLE_IS_FINITE(result))
+ return js_NewNumberValue(cx, result, rval);
+
+ /* Satisfy the ECMA rule that if a function is called with
+ * fewer arguments than the specified formal arguments, the
+ * remaining arguments are set to undefined. Seems like all
+ * the Date.setWhatever functions in ECMA are only varargs
+ * beyond the first argument; this should be set to undefined
+ * if it's not given. This means that "d = new Date();
+ * d.setMilliseconds()" returns NaN. Blech.
+ */
+ if (argc == 0)
+ argc = 1; /* should be safe, because length of all setters is 1 */
+ else if (argc > maxargs)
+ argc = maxargs; /* clamp argc */
+
+ for (i = 0; i < argc; i++) {
+ if (!js_ValueToNumber(cx, argv[i], &args[i]))
+ return JS_FALSE;
+ if (!JSDOUBLE_IS_FINITE(args[i])) {
+ *date = *cx->runtime->jsNaN;
+ return js_NewNumberValue(cx, *date, rval);
+ }
+ args[i] = js_DoubleToInteger(args[i]);
+ }
+
+ if (local)
+ lorutime = LocalTime(result);
+ else
+ lorutime = result;
+
+ argp = args;
+ stop = argp + argc;
+ if (maxargs >= 4 && argp < stop)
+ hour = *argp++;
+ else
+ hour = HourFromTime(lorutime);
+
+ if (maxargs >= 3 && argp < stop)
+ min = *argp++;
+ else
+ min = MinFromTime(lorutime);
+
+ if (maxargs >= 2 && argp < stop)
+ sec = *argp++;
+ else
+ sec = SecFromTime(lorutime);
+
+ if (maxargs >= 1 && argp < stop)
+ msec = *argp;
+ else
+ msec = msFromTime(lorutime);
+
+ msec_time = MakeTime(hour, min, sec, msec);
+ result = MakeDate(Day(lorutime), msec_time);
+
+/* fprintf(stderr, "%f\n", result); */
+
+ if (local)
+ result = UTC(result);
+
+/* fprintf(stderr, "%f\n", result); */
+
+ *date = TIMECLIP(result);
+ return js_NewNumberValue(cx, *date, rval);
+}
+
+static JSBool
+date_setMilliseconds(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 1, JS_TRUE, rval);
+}
+
+static JSBool
+date_setUTCMilliseconds(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 1, JS_FALSE, rval);
+}
+
+static JSBool
+date_setSeconds(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 2, JS_TRUE, rval);
+}
+
+static JSBool
+date_setUTCSeconds(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 2, JS_FALSE, rval);
+}
+
+static JSBool
+date_setMinutes(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 3, JS_TRUE, rval);
+}
+
+static JSBool
+date_setUTCMinutes(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 3, JS_FALSE, rval);
+}
+
+static JSBool
+date_setHours(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 4, JS_TRUE, rval);
+}
+
+static JSBool
+date_setUTCHours(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeTime(cx, obj, argc, argv, 4, JS_FALSE, rval);
+}
+
+static JSBool
+date_makeDate(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, uintN maxargs, JSBool local, jsval *rval)
+{
+ uintN i;
+ jsdouble lorutime; /* local or UTC version of *date */
+ jsdouble args[3], *argp, *stop;
+ jsdouble year, month, day;
+ jsdouble result;
+
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ result = *date;
+
+ /* see complaint about ECMA in date_MakeTime */
+ if (argc == 0)
+ argc = 1; /* should be safe, because length of all setters is 1 */
+ else if (argc > maxargs)
+ argc = maxargs; /* clamp argc */
+
+ for (i = 0; i < argc; i++) {
+ if (!js_ValueToNumber(cx, argv[i], &args[i]))
+ return JS_FALSE;
+ if (!JSDOUBLE_IS_FINITE(args[i])) {
+ *date = *cx->runtime->jsNaN;
+ return js_NewNumberValue(cx, *date, rval);
+ }
+ args[i] = js_DoubleToInteger(args[i]);
+ }
+
+ /* return NaN if date is NaN and we're not setting the year,
+ * If we are, use 0 as the time. */
+ if (!(JSDOUBLE_IS_FINITE(result))) {
+ if (maxargs < 3)
+ return js_NewNumberValue(cx, result, rval);
+ else
+ lorutime = +0.;
+ } else {
+ if (local)
+ lorutime = LocalTime(result);
+ else
+ lorutime = result;
+ }
+
+ argp = args;
+ stop = argp + argc;
+ if (maxargs >= 3 && argp < stop)
+ year = *argp++;
+ else
+ year = YearFromTime(lorutime);
+
+ if (maxargs >= 2 && argp < stop)
+ month = *argp++;
+ else
+ month = MonthFromTime(lorutime);
+
+ if (maxargs >= 1 && argp < stop)
+ day = *argp++;
+ else
+ day = DateFromTime(lorutime);
+
+ day = MakeDay(year, month, day); /* day within year */
+ result = MakeDate(day, TimeWithinDay(lorutime));
+
+ if (local)
+ result = UTC(result);
+
+ *date = TIMECLIP(result);
+ return js_NewNumberValue(cx, *date, rval);
+}
+
+static JSBool
+date_setDate(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeDate(cx, obj, argc, argv, 1, JS_TRUE, rval);
+}
+
+static JSBool
+date_setUTCDate(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeDate(cx, obj, argc, argv, 1, JS_FALSE, rval);
+}
+
+static JSBool
+date_setMonth(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeDate(cx, obj, argc, argv, 2, JS_TRUE, rval);
+}
+
+static JSBool
+date_setUTCMonth(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeDate(cx, obj, argc, argv, 2, JS_FALSE, rval);
+}
+
+static JSBool
+date_setFullYear(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeDate(cx, obj, argc, argv, 3, JS_TRUE, rval);
+}
+
+static JSBool
+date_setUTCFullYear(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_makeDate(cx, obj, argc, argv, 3, JS_FALSE, rval);
+}
+
+static JSBool
+date_setYear(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ jsdouble t;
+ jsdouble year;
+ jsdouble day;
+ jsdouble result;
+
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ result = *date;
+
+ if (!js_ValueToNumber(cx, argv[0], &year))
+ return JS_FALSE;
+ if (!JSDOUBLE_IS_FINITE(year)) {
+ *date = *cx->runtime->jsNaN;
+ return js_NewNumberValue(cx, *date, rval);
+ }
+
+ year = js_DoubleToInteger(year);
+
+ if (!JSDOUBLE_IS_FINITE(result)) {
+ t = +0.0;
+ } else {
+ t = LocalTime(result);
+ }
+
+ if (year >= 0 && year <= 99)
+ year += 1900;
+
+ day = MakeDay(year, MonthFromTime(t), DateFromTime(t));
+ result = MakeDate(day, TimeWithinDay(t));
+ result = UTC(result);
+
+ *date = TIMECLIP(result);
+ return js_NewNumberValue(cx, *date, rval);
+}
+
+/* constants for toString, toUTCString */
+static char js_NaN_date_str[] = "Invalid Date";
+static const char* days[] =
+{
+ "Sun","Mon","Tue","Wed","Thu","Fri","Sat"
+};
+static const char* months[] =
+{
+ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"
+};
+
+static JSBool
+date_toGMTString(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ char buf[100];
+ JSString *str;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ if (!JSDOUBLE_IS_FINITE(*date)) {
+ JS_snprintf(buf, sizeof buf, js_NaN_date_str);
+ } else {
+ jsdouble temp = *date;
+
+ /* Avoid dependence on PRMJ_FormatTimeUSEnglish, because it
+ * requires a PRMJTime... which only has 16-bit years. Sub-ECMA.
+ */
+ JS_snprintf(buf, sizeof buf, "%s, %.2d %s %.4d %.2d:%.2d:%.2d GMT",
+ days[WeekDay(temp)],
+ DateFromTime(temp),
+ months[MonthFromTime(temp)],
+ YearFromTime(temp),
+ HourFromTime(temp),
+ MinFromTime(temp),
+ SecFromTime(temp));
+ }
+ str = JS_NewStringCopyZ(cx, buf);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+/* for Date.toLocaleString; interface to PRMJTime date struct.
+ * If findEquivalent is true, then try to map the year to an equivalent year
+ * that's in range.
+ */
+static void
+new_explode(jsdouble timeval, PRMJTime *split, JSBool findEquivalent)
+{
+ jsint year = YearFromTime(timeval);
+ int16 adjustedYear;
+
+ /* If the year doesn't fit in a PRMJTime, find something to do about it. */
+ if (year > 32767 || year < -32768) {
+ if (findEquivalent) {
+ /* We're really just trying to get a timezone string; map the year
+ * to some equivalent year in the range 0 to 2800. Borrowed from
+ * A. D. Olsen.
+ */
+ jsint cycles;
+#define CYCLE_YEARS 2800L
+ cycles = (year >= 0) ? year / CYCLE_YEARS
+ : -1 - (-1 - year) / CYCLE_YEARS;
+ adjustedYear = (int16)(year - cycles * CYCLE_YEARS);
+ } else {
+ /* Clamp it to the nearest representable year. */
+ adjustedYear = (int16)((year > 0) ? 32767 : - 32768);
+ }
+ } else {
+ adjustedYear = (int16)year;
+ }
+
+ split->tm_usec = (int32) msFromTime(timeval) * 1000;
+ split->tm_sec = (int8) SecFromTime(timeval);
+ split->tm_min = (int8) MinFromTime(timeval);
+ split->tm_hour = (int8) HourFromTime(timeval);
+ split->tm_mday = (int8) DateFromTime(timeval);
+ split->tm_mon = (int8) MonthFromTime(timeval);
+ split->tm_wday = (int8) WeekDay(timeval);
+ split->tm_year = (int16) adjustedYear;
+ split->tm_yday = (int16) DayWithinYear(timeval, year);
+
+ /* not sure how this affects things, but it doesn't seem
+ to matter. */
+ split->tm_isdst = (DaylightSavingTA(timeval) != 0);
+}
+
+typedef enum formatspec {
+ FORMATSPEC_FULL, FORMATSPEC_DATE, FORMATSPEC_TIME
+} formatspec;
+
+/* helper function */
+static JSBool
+date_format(JSContext *cx, jsdouble date, formatspec format, jsval *rval)
+{
+ char buf[100];
+ JSString *str;
+ char tzbuf[100];
+ JSBool usetz;
+ size_t i, tzlen;
+ PRMJTime split;
+
+ if (!JSDOUBLE_IS_FINITE(date)) {
+ JS_snprintf(buf, sizeof buf, js_NaN_date_str);
+ } else {
+ jsdouble local = LocalTime(date);
+
+ /* offset from GMT in minutes. The offset includes daylight savings,
+ if it applies. */
+ jsint minutes = (jsint) floor(AdjustTime(date) / msPerMinute);
+
+ /* map 510 minutes to 0830 hours */
+ intN offset = (minutes / 60) * 100 + minutes % 60;
+
+ /* print as "Wed Nov 05 19:38:03 GMT-0800 (PST) 1997" The TZA is
+ * printed as 'GMT-0800' rather than as 'PST' to avoid
+ * operating-system dependence on strftime (which
+ * PRMJ_FormatTimeUSEnglish calls, for %Z only.) win32 prints
+ * PST as 'Pacific Standard Time.' This way we always know
+ * what we're getting, and can parse it if we produce it.
+ * The OS TZA string is included as a comment.
+ */
+
+ /* get a timezone string from the OS to include as a
+ comment. */
+ new_explode(date, &split, JS_TRUE);
+ if (PRMJ_FormatTime(tzbuf, sizeof tzbuf, "(%Z)", &split) != 0) {
+
+ /* Decide whether to use the resulting timezone string.
+ *
+ * Reject it if it contains any non-ASCII, non-alphanumeric
+ * characters. It's then likely in some other character
+ * encoding, and we probably won't display it correctly.
+ */
+ usetz = JS_TRUE;
+ tzlen = strlen(tzbuf);
+ if (tzlen > 100) {
+ usetz = JS_FALSE;
+ } else {
+ for (i = 0; i < tzlen; i++) {
+ jschar c = tzbuf[i];
+ if (c > 127 ||
+ !(isalpha(c) || isdigit(c) ||
+ c == ' ' || c == '(' || c == ')')) {
+ usetz = JS_FALSE;
+ }
+ }
+ }
+
+ /* Also reject it if it's not parenthesized or if it's '()'. */
+ if (tzbuf[0] != '(' || tzbuf[1] == ')')
+ usetz = JS_FALSE;
+ } else
+ usetz = JS_FALSE;
+
+ switch (format) {
+ case FORMATSPEC_FULL:
+ /*
+ * Avoid dependence on PRMJ_FormatTimeUSEnglish, because it
+ * requires a PRMJTime... which only has 16-bit years. Sub-ECMA.
+ */
+ /* Tue Oct 31 2000 09:41:40 GMT-0800 (PST) */
+ JS_snprintf(buf, sizeof buf,
+ "%s %s %.2d %.4d %.2d:%.2d:%.2d GMT%+.4d%s%s",
+ days[WeekDay(local)],
+ months[MonthFromTime(local)],
+ DateFromTime(local),
+ YearFromTime(local),
+ HourFromTime(local),
+ MinFromTime(local),
+ SecFromTime(local),
+ offset,
+ usetz ? " " : "",
+ usetz ? tzbuf : "");
+ break;
+ case FORMATSPEC_DATE:
+ /* Tue Oct 31 2000 */
+ JS_snprintf(buf, sizeof buf,
+ "%s %s %.2d %.4d",
+ days[WeekDay(local)],
+ months[MonthFromTime(local)],
+ DateFromTime(local),
+ YearFromTime(local));
+ break;
+ case FORMATSPEC_TIME:
+ /* 09:41:40 GMT-0800 (PST) */
+ JS_snprintf(buf, sizeof buf,
+ "%.2d:%.2d:%.2d GMT%+.4d%s%s",
+ HourFromTime(local),
+ MinFromTime(local),
+ SecFromTime(local),
+ offset,
+ usetz ? " " : "",
+ usetz ? tzbuf : "");
+ break;
+ }
+ }
+
+ str = JS_NewStringCopyZ(cx, buf);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+date_toLocaleHelper(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval, char *format)
+{
+ char buf[100];
+ JSString *str;
+ PRMJTime split;
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ if (!JSDOUBLE_IS_FINITE(*date)) {
+ JS_snprintf(buf, sizeof buf, js_NaN_date_str);
+ } else {
+ intN result_len;
+ jsdouble local = LocalTime(*date);
+ new_explode(local, &split, JS_FALSE);
+
+ /* let PRMJTime format it. */
+ result_len = PRMJ_FormatTime(buf, sizeof buf, format, &split);
+
+ /* If it failed, default to toString. */
+ if (result_len == 0)
+ return date_format(cx, *date, FORMATSPEC_FULL, rval);
+
+ /* Hacked check against undesired 2-digit year 00/00/00 form. */
+ if (strcmp(format, "%x") == 0 && result_len >= 6 &&
+ /* Format %x means use OS settings, which may have 2-digit yr, so
+ hack end of 3/11/22 or 11.03.22 or 11Mar22 to use 4-digit yr...*/
+ !isdigit(buf[result_len - 3]) &&
+ isdigit(buf[result_len - 2]) && isdigit(buf[result_len - 1]) &&
+ /* ...but not if starts with 4-digit year, like 2022/3/11. */
+ !(isdigit(buf[0]) && isdigit(buf[1]) &&
+ isdigit(buf[2]) && isdigit(buf[3]))) {
+ JS_snprintf(buf + (result_len - 2), (sizeof buf) - (result_len - 2),
+ "%d", js_DateGetYear(cx, obj));
+ }
+
+ }
+
+ if (cx->localeCallbacks && cx->localeCallbacks->localeToUnicode)
+ return cx->localeCallbacks->localeToUnicode(cx, buf, rval);
+
+ str = JS_NewStringCopyZ(cx, buf);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+date_toLocaleString(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ /* Use '%#c' for windows, because '%c' is
+ * backward-compatible and non-y2k with msvc; '%#c' requests that a
+ * full year be used in the result string.
+ */
+ return date_toLocaleHelper(cx, obj, argc, argv, rval,
+#if defined(_WIN32) && !defined(__MWERKS__)
+ "%#c"
+#else
+ "%c"
+#endif
+ );
+}
+
+static JSBool
+date_toLocaleDateString(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ /* Use '%#x' for windows, because '%x' is
+ * backward-compatible and non-y2k with msvc; '%#x' requests that a
+ * full year be used in the result string.
+ */
+ return date_toLocaleHelper(cx, obj, argc, argv, rval,
+#if defined(_WIN32) && !defined(__MWERKS__)
+ "%#x"
+#else
+ "%x"
+#endif
+ );
+}
+
+static JSBool
+date_toLocaleTimeString(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ return date_toLocaleHelper(cx, obj, argc, argv, rval, "%X");
+}
+
+static JSBool
+date_toLocaleFormat(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *fmt;
+
+ if (argc == 0)
+ return date_toLocaleString(cx, obj, argc, argv, rval);
+
+ fmt = JS_ValueToString(cx, argv[0]);
+ if (!fmt)
+ return JS_FALSE;
+
+ return date_toLocaleHelper(cx, obj, argc, argv, rval,
+ JS_GetStringBytes(fmt));
+}
+
+static JSBool
+date_toTimeString(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ return date_format(cx, *date, FORMATSPEC_TIME, rval);
+}
+
+static JSBool
+date_toDateString(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ return date_format(cx, *date, FORMATSPEC_DATE, rval);
+}
+
+#if JS_HAS_TOSOURCE
+#include <string.h>
+#include "jsdtoa.h"
+
+static JSBool
+date_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble *date;
+ char buf[DTOSTR_STANDARD_BUFFER_SIZE], *numStr, *bytes;
+ JSString *str;
+
+ date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+
+ numStr = JS_dtostr(buf, sizeof buf, DTOSTR_STANDARD, 0, *date);
+ if (!numStr) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ bytes = JS_smprintf("(new %s(%s))", js_Date_str, numStr);
+ if (!bytes) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ str = JS_NewString(cx, bytes, strlen(bytes));
+ if (!str) {
+ free(bytes);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+#endif
+
+static JSBool
+date_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsdouble *date = date_getProlog(cx, obj, argv);
+ if (!date)
+ return JS_FALSE;
+ return date_format(cx, *date, FORMATSPEC_FULL, rval);
+}
+
+static JSBool
+date_valueOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ /* It is an error to call date_valueOf on a non-date object, but we don't
+ * need to check for that explicitly here because every path calls
+ * date_getProlog, which does the check.
+ */
+
+ /* If called directly with no arguments, convert to a time number. */
+ if (argc == 0)
+ return date_getTime(cx, obj, argc, argv, rval);
+
+ /* Convert to number only if the hint was given, otherwise favor string. */
+ if (argc == 1) {
+ JSString *str, *str2;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ str2 = ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[JSTYPE_NUMBER]);
+ if (js_EqualStrings(str, str2))
+ return date_getTime(cx, obj, argc, argv, rval);
+ }
+ return date_toString(cx, obj, argc, argv, rval);
+}
+
+
+/*
+ * creation and destruction
+ */
+
+static JSFunctionSpec date_static_methods[] = {
+ {"UTC", date_UTC, MAXARGS,0,0 },
+ {"parse", date_parse, 1,0,0 },
+ {"now", date_now, 0,0,0 },
+ {0,0,0,0,0}
+};
+
+static JSFunctionSpec date_methods[] = {
+ {"getTime", date_getTime, 0,0,0 },
+ {"getTimezoneOffset", date_getTimezoneOffset, 0,0,0 },
+ {"getYear", date_getYear, 0,0,0 },
+ {"getFullYear", date_getFullYear, 0,0,0 },
+ {"getUTCFullYear", date_getUTCFullYear, 0,0,0 },
+ {"getMonth", date_getMonth, 0,0,0 },
+ {"getUTCMonth", date_getUTCMonth, 0,0,0 },
+ {"getDate", date_getDate, 0,0,0 },
+ {"getUTCDate", date_getUTCDate, 0,0,0 },
+ {"getDay", date_getDay, 0,0,0 },
+ {"getUTCDay", date_getUTCDay, 0,0,0 },
+ {"getHours", date_getHours, 0,0,0 },
+ {"getUTCHours", date_getUTCHours, 0,0,0 },
+ {"getMinutes", date_getMinutes, 0,0,0 },
+ {"getUTCMinutes", date_getUTCMinutes, 0,0,0 },
+ {"getSeconds", date_getUTCSeconds, 0,0,0 },
+ {"getUTCSeconds", date_getUTCSeconds, 0,0,0 },
+ {"getMilliseconds", date_getUTCMilliseconds,0,0,0 },
+ {"getUTCMilliseconds", date_getUTCMilliseconds,0,0,0 },
+ {"setTime", date_setTime, 1,0,0 },
+ {"setYear", date_setYear, 1,0,0 },
+ {"setFullYear", date_setFullYear, 3,0,0 },
+ {"setUTCFullYear", date_setUTCFullYear, 3,0,0 },
+ {"setMonth", date_setMonth, 2,0,0 },
+ {"setUTCMonth", date_setUTCMonth, 2,0,0 },
+ {"setDate", date_setDate, 1,0,0 },
+ {"setUTCDate", date_setUTCDate, 1,0,0 },
+ {"setHours", date_setHours, 4,0,0 },
+ {"setUTCHours", date_setUTCHours, 4,0,0 },
+ {"setMinutes", date_setMinutes, 3,0,0 },
+ {"setUTCMinutes", date_setUTCMinutes, 3,0,0 },
+ {"setSeconds", date_setSeconds, 2,0,0 },
+ {"setUTCSeconds", date_setUTCSeconds, 2,0,0 },
+ {"setMilliseconds", date_setMilliseconds, 1,0,0 },
+ {"setUTCMilliseconds", date_setUTCMilliseconds,1,0,0 },
+ {"toUTCString", date_toGMTString, 0,0,0 },
+ {js_toLocaleString_str, date_toLocaleString, 0,0,0 },
+ {"toLocaleDateString", date_toLocaleDateString,0,0,0 },
+ {"toLocaleTimeString", date_toLocaleTimeString,0,0,0 },
+ {"toLocaleFormat", date_toLocaleFormat, 1,0,0 },
+ {"toDateString", date_toDateString, 0,0,0 },
+ {"toTimeString", date_toTimeString, 0,0,0 },
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, date_toSource, 0,0,0 },
+#endif
+ {js_toString_str, date_toString, 0,0,0 },
+ {js_valueOf_str, date_valueOf, 0,0,0 },
+ {0,0,0,0,0}
+};
+
+static jsdouble *
+date_constructor(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date;
+
+ date = js_NewDouble(cx, 0.0, 0);
+ if (!date)
+ return NULL;
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, DOUBLE_TO_JSVAL(date));
+ return date;
+}
+
+static JSBool
+Date(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble *date;
+ JSString *str;
+ jsdouble d;
+
+ /* Date called as function. */
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ int64 us, ms, us2ms;
+ jsdouble msec_time;
+
+ /* NSPR 2.0 docs say 'We do not support PRMJ_NowMS and PRMJ_NowS',
+ * so compute ms from PRMJ_Now.
+ */
+ us = PRMJ_Now();
+ JSLL_UI2L(us2ms, PRMJ_USEC_PER_MSEC);
+ JSLL_DIV(ms, us, us2ms);
+ JSLL_L2D(msec_time, ms);
+
+ return date_format(cx, msec_time, FORMATSPEC_FULL, rval);
+ }
+
+ /* Date called as constructor. */
+ if (argc == 0) {
+ int64 us, ms, us2ms;
+ jsdouble msec_time;
+
+ date = date_constructor(cx, obj);
+ if (!date)
+ return JS_FALSE;
+
+ us = PRMJ_Now();
+ JSLL_UI2L(us2ms, PRMJ_USEC_PER_MSEC);
+ JSLL_DIV(ms, us, us2ms);
+ JSLL_L2D(msec_time, ms);
+
+ *date = msec_time;
+ } else if (argc == 1) {
+ if (!JSVAL_IS_STRING(argv[0])) {
+ /* the argument is a millisecond number */
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ date = date_constructor(cx, obj);
+ if (!date)
+ return JS_FALSE;
+ *date = TIMECLIP(d);
+ } else {
+ /* the argument is a string; parse it. */
+ date = date_constructor(cx, obj);
+ if (!date)
+ return JS_FALSE;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+
+ if (!date_parseString(str, date))
+ *date = *cx->runtime->jsNaN;
+ *date = TIMECLIP(*date);
+ }
+ } else {
+ jsdouble array[MAXARGS];
+ uintN loop;
+ jsdouble double_arg;
+ jsdouble day;
+ jsdouble msec_time;
+
+ for (loop = 0; loop < MAXARGS; loop++) {
+ if (loop < argc) {
+ if (!js_ValueToNumber(cx, argv[loop], &double_arg))
+ return JS_FALSE;
+ /* if any arg is NaN, make a NaN date object
+ and return */
+ if (!JSDOUBLE_IS_FINITE(double_arg)) {
+ date = date_constructor(cx, obj);
+ if (!date)
+ return JS_FALSE;
+ *date = *cx->runtime->jsNaN;
+ return JS_TRUE;
+ }
+ array[loop] = js_DoubleToInteger(double_arg);
+ } else {
+ if (loop == 2) {
+ array[loop] = 1; /* Default the date argument to 1. */
+ } else {
+ array[loop] = 0;
+ }
+ }
+ }
+
+ date = date_constructor(cx, obj);
+ if (!date)
+ return JS_FALSE;
+
+ /* adjust 2-digit years into the 20th century */
+ if (array[0] >= 0 && array[0] <= 99)
+ array[0] += 1900;
+
+ day = MakeDay(array[0], array[1], array[2]);
+ msec_time = MakeTime(array[3], array[4], array[5], array[6]);
+ msec_time = MakeDate(day, msec_time);
+ msec_time = UTC(msec_time);
+ *date = TIMECLIP(msec_time);
+ }
+ return JS_TRUE;
+}
+
+JSObject *
+js_InitDateClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+ jsdouble *proto_date;
+
+ /* set static LocalTZA */
+ LocalTZA = -(PRMJ_LocalGMTDifference() * msPerSecond);
+ proto = JS_InitClass(cx, obj, NULL, &js_DateClass, Date, MAXARGS,
+ NULL, date_methods, NULL, date_static_methods);
+ if (!proto)
+ return NULL;
+
+ /* Alias toUTCString with toGMTString. (ECMA B.2.6) */
+ if (!JS_AliasProperty(cx, proto, "toUTCString", "toGMTString"))
+ return NULL;
+
+ /* Set the value of the Date.prototype date to NaN */
+ proto_date = date_constructor(cx, proto);
+ if (!proto_date)
+ return NULL;
+ *proto_date = *cx->runtime->jsNaN;
+
+ return proto;
+}
+
+JS_FRIEND_API(JSObject *)
+js_NewDateObjectMsec(JSContext *cx, jsdouble msec_time)
+{
+ JSObject *obj;
+ jsdouble *date;
+
+ obj = js_NewObject(cx, &js_DateClass, NULL, NULL);
+ if (!obj)
+ return NULL;
+
+ date = date_constructor(cx, obj);
+ if (!date)
+ return NULL;
+
+ *date = msec_time;
+ return obj;
+}
+
+JS_FRIEND_API(JSObject *)
+js_NewDateObject(JSContext* cx, int year, int mon, int mday,
+ int hour, int min, int sec)
+{
+ JSObject *obj;
+ jsdouble msec_time;
+
+ msec_time = date_msecFromDate(year, mon, mday, hour, min, sec, 0);
+ obj = js_NewDateObjectMsec(cx, UTC(msec_time));
+ return obj;
+}
+
+JS_FRIEND_API(JSBool)
+js_DateIsValid(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return JS_FALSE;
+ else
+ return JS_TRUE;
+}
+
+JS_FRIEND_API(int)
+js_DateGetYear(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+
+ /* Preserve legacy API behavior of returning 0 for invalid dates. */
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return 0;
+ return (int) YearFromTime(LocalTime(*date));
+}
+
+JS_FRIEND_API(int)
+js_DateGetMonth(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return 0;
+ return (int) MonthFromTime(LocalTime(*date));
+}
+
+JS_FRIEND_API(int)
+js_DateGetDate(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return 0;
+ return (int) DateFromTime(LocalTime(*date));
+}
+
+JS_FRIEND_API(int)
+js_DateGetHours(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return 0;
+ return (int) HourFromTime(LocalTime(*date));
+}
+
+JS_FRIEND_API(int)
+js_DateGetMinutes(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return 0;
+ return (int) MinFromTime(LocalTime(*date));
+}
+
+JS_FRIEND_API(int)
+js_DateGetSeconds(JSContext *cx, JSObject* obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return 0;
+ return (int) SecFromTime(*date);
+}
+
+JS_FRIEND_API(void)
+js_DateSetYear(JSContext *cx, JSObject *obj, int year)
+{
+ jsdouble local;
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+ if (!date)
+ return;
+ local = LocalTime(*date);
+ /* reset date if it was NaN */
+ if (JSDOUBLE_IS_NaN(local))
+ local = 0;
+ local = date_msecFromDate(year,
+ MonthFromTime(local),
+ DateFromTime(local),
+ HourFromTime(local),
+ MinFromTime(local),
+ SecFromTime(local),
+ msFromTime(local));
+ *date = UTC(local);
+}
+
+JS_FRIEND_API(void)
+js_DateSetMonth(JSContext *cx, JSObject *obj, int month)
+{
+ jsdouble local;
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+ if (!date)
+ return;
+ local = LocalTime(*date);
+ /* bail if date was NaN */
+ if (JSDOUBLE_IS_NaN(local))
+ return;
+ local = date_msecFromDate(YearFromTime(local),
+ month,
+ DateFromTime(local),
+ HourFromTime(local),
+ MinFromTime(local),
+ SecFromTime(local),
+ msFromTime(local));
+ *date = UTC(local);
+}
+
+JS_FRIEND_API(void)
+js_DateSetDate(JSContext *cx, JSObject *obj, int date)
+{
+ jsdouble local;
+ jsdouble *datep = date_getProlog(cx, obj, NULL);
+ if (!datep)
+ return;
+ local = LocalTime(*datep);
+ if (JSDOUBLE_IS_NaN(local))
+ return;
+ local = date_msecFromDate(YearFromTime(local),
+ MonthFromTime(local),
+ date,
+ HourFromTime(local),
+ MinFromTime(local),
+ SecFromTime(local),
+ msFromTime(local));
+ *datep = UTC(local);
+}
+
+JS_FRIEND_API(void)
+js_DateSetHours(JSContext *cx, JSObject *obj, int hours)
+{
+ jsdouble local;
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+ if (!date)
+ return;
+ local = LocalTime(*date);
+ if (JSDOUBLE_IS_NaN(local))
+ return;
+ local = date_msecFromDate(YearFromTime(local),
+ MonthFromTime(local),
+ DateFromTime(local),
+ hours,
+ MinFromTime(local),
+ SecFromTime(local),
+ msFromTime(local));
+ *date = UTC(local);
+}
+
+JS_FRIEND_API(void)
+js_DateSetMinutes(JSContext *cx, JSObject *obj, int minutes)
+{
+ jsdouble local;
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+ if (!date)
+ return;
+ local = LocalTime(*date);
+ if (JSDOUBLE_IS_NaN(local))
+ return;
+ local = date_msecFromDate(YearFromTime(local),
+ MonthFromTime(local),
+ DateFromTime(local),
+ HourFromTime(local),
+ minutes,
+ SecFromTime(local),
+ msFromTime(local));
+ *date = UTC(local);
+}
+
+JS_FRIEND_API(void)
+js_DateSetSeconds(JSContext *cx, JSObject *obj, int seconds)
+{
+ jsdouble local;
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+ if (!date)
+ return;
+ local = LocalTime(*date);
+ if (JSDOUBLE_IS_NaN(local))
+ return;
+ local = date_msecFromDate(YearFromTime(local),
+ MonthFromTime(local),
+ DateFromTime(local),
+ HourFromTime(local),
+ MinFromTime(local),
+ seconds,
+ msFromTime(local));
+ *date = UTC(local);
+}
+
+JS_FRIEND_API(jsdouble)
+js_DateGetMsecSinceEpoch(JSContext *cx, JSObject *obj)
+{
+ jsdouble *date = date_getProlog(cx, obj, NULL);
+ if (!date || JSDOUBLE_IS_NaN(*date))
+ return 0;
+ return (*date);
+}
diff --git a/src/third_party/js-1.7/jsdate.h b/src/third_party/js-1.7/jsdate.h
new file mode 100644
index 00000000000..88bd5f586ed
--- /dev/null
+++ b/src/third_party/js-1.7/jsdate.h
@@ -0,0 +1,120 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS Date class interface.
+ */
+
+#ifndef jsdate_h___
+#define jsdate_h___
+
+JS_BEGIN_EXTERN_C
+
+extern JSClass js_DateClass;
+
+extern JSObject *
+js_InitDateClass(JSContext *cx, JSObject *obj);
+
+/*
+ * These functions provide a C interface to the date/time object
+ */
+
+/*
+ * Construct a new Date Object from a time value given in milliseconds UTC
+ * since the epoch.
+ */
+extern JS_FRIEND_API(JSObject*)
+js_NewDateObjectMsec(JSContext* cx, jsdouble msec_time);
+
+/*
+ * Construct a new Date Object from an exploded local time value.
+ */
+extern JS_FRIEND_API(JSObject*)
+js_NewDateObject(JSContext* cx, int year, int mon, int mday,
+ int hour, int min, int sec);
+
+/*
+ * Detect whether the internal date value is NaN. (Because failure is
+ * out-of-band for js_DateGet*)
+ */
+extern JS_FRIEND_API(JSBool)
+js_DateIsValid(JSContext *cx, JSObject* obj);
+
+extern JS_FRIEND_API(int)
+js_DateGetYear(JSContext *cx, JSObject* obj);
+
+extern JS_FRIEND_API(int)
+js_DateGetMonth(JSContext *cx, JSObject* obj);
+
+extern JS_FRIEND_API(int)
+js_DateGetDate(JSContext *cx, JSObject* obj);
+
+extern JS_FRIEND_API(int)
+js_DateGetHours(JSContext *cx, JSObject* obj);
+
+extern JS_FRIEND_API(int)
+js_DateGetMinutes(JSContext *cx, JSObject* obj);
+
+extern JS_FRIEND_API(int)
+js_DateGetSeconds(JSContext *cx, JSObject* obj);
+
+extern JS_FRIEND_API(void)
+js_DateSetYear(JSContext *cx, JSObject *obj, int year);
+
+extern JS_FRIEND_API(void)
+js_DateSetMonth(JSContext *cx, JSObject *obj, int year);
+
+extern JS_FRIEND_API(void)
+js_DateSetDate(JSContext *cx, JSObject *obj, int date);
+
+extern JS_FRIEND_API(void)
+js_DateSetHours(JSContext *cx, JSObject *obj, int hours);
+
+extern JS_FRIEND_API(void)
+js_DateSetMinutes(JSContext *cx, JSObject *obj, int minutes);
+
+extern JS_FRIEND_API(void)
+js_DateSetSeconds(JSContext *cx, JSObject *obj, int seconds);
+
+extern JS_FRIEND_API(jsdouble)
+js_DateGetMsecSinceEpoch(JSContext *cx, JSObject *obj);
+
+JS_END_EXTERN_C
+
+#endif /* jsdate_h___ */
diff --git a/src/third_party/js-1.7/jsdbgapi.c b/src/third_party/js-1.7/jsdbgapi.c
new file mode 100644
index 00000000000..8fa0e684f3a
--- /dev/null
+++ b/src/third_party/js-1.7/jsdbgapi.c
@@ -0,0 +1,1439 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS debugging API.
+ */
+#include "jsstddef.h"
+#include <string.h>
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsclist.h"
+#include "jsapi.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+
+typedef struct JSTrap {
+ JSCList links;
+ JSScript *script;
+ jsbytecode *pc;
+ JSOp op;
+ JSTrapHandler handler;
+ void *closure;
+} JSTrap;
+
+static JSTrap *
+FindTrap(JSRuntime *rt, JSScript *script, jsbytecode *pc)
+{
+ JSTrap *trap;
+
+ for (trap = (JSTrap *)rt->trapList.next;
+ trap != (JSTrap *)&rt->trapList;
+ trap = (JSTrap *)trap->links.next) {
+ if (trap->script == script && trap->pc == pc)
+ return trap;
+ }
+ return NULL;
+}
+
+void
+js_PatchOpcode(JSContext *cx, JSScript *script, jsbytecode *pc, JSOp op)
+{
+ JSTrap *trap;
+
+ trap = FindTrap(cx->runtime, script, pc);
+ if (trap)
+ trap->op = op;
+ else
+ *pc = (jsbytecode)op;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetTrap(JSContext *cx, JSScript *script, jsbytecode *pc,
+ JSTrapHandler handler, void *closure)
+{
+ JSRuntime *rt;
+ JSTrap *trap;
+
+ rt = cx->runtime;
+ trap = FindTrap(rt, script, pc);
+ if (trap) {
+ JS_ASSERT(trap->script == script && trap->pc == pc);
+ JS_ASSERT(*pc == JSOP_TRAP);
+ } else {
+ trap = (JSTrap *) JS_malloc(cx, sizeof *trap);
+ if (!trap || !js_AddRoot(cx, &trap->closure, "trap->closure")) {
+ if (trap)
+ JS_free(cx, trap);
+ return JS_FALSE;
+ }
+ JS_APPEND_LINK(&trap->links, &rt->trapList);
+ trap->script = script;
+ trap->pc = pc;
+ trap->op = (JSOp)*pc;
+ *pc = JSOP_TRAP;
+ }
+ trap->handler = handler;
+ trap->closure = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSOp)
+JS_GetTrapOpcode(JSContext *cx, JSScript *script, jsbytecode *pc)
+{
+ JSTrap *trap;
+
+ trap = FindTrap(cx->runtime, script, pc);
+ if (!trap) {
+ JS_ASSERT(0); /* XXX can't happen */
+ return JSOP_LIMIT;
+ }
+ return trap->op;
+}
+
+static void
+DestroyTrap(JSContext *cx, JSTrap *trap)
+{
+ JS_REMOVE_LINK(&trap->links);
+ *trap->pc = (jsbytecode)trap->op;
+ js_RemoveRoot(cx->runtime, &trap->closure);
+ JS_free(cx, trap);
+}
+
+JS_PUBLIC_API(void)
+JS_ClearTrap(JSContext *cx, JSScript *script, jsbytecode *pc,
+ JSTrapHandler *handlerp, void **closurep)
+{
+ JSTrap *trap;
+
+ trap = FindTrap(cx->runtime, script, pc);
+ if (handlerp)
+ *handlerp = trap ? trap->handler : NULL;
+ if (closurep)
+ *closurep = trap ? trap->closure : NULL;
+ if (trap)
+ DestroyTrap(cx, trap);
+}
+
+JS_PUBLIC_API(void)
+JS_ClearScriptTraps(JSContext *cx, JSScript *script)
+{
+ JSRuntime *rt;
+ JSTrap *trap, *next;
+
+ rt = cx->runtime;
+ for (trap = (JSTrap *)rt->trapList.next;
+ trap != (JSTrap *)&rt->trapList;
+ trap = next) {
+ next = (JSTrap *)trap->links.next;
+ if (trap->script == script)
+ DestroyTrap(cx, trap);
+ }
+}
+
+JS_PUBLIC_API(void)
+JS_ClearAllTraps(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSTrap *trap, *next;
+
+ rt = cx->runtime;
+ for (trap = (JSTrap *)rt->trapList.next;
+ trap != (JSTrap *)&rt->trapList;
+ trap = next) {
+ next = (JSTrap *)trap->links.next;
+ DestroyTrap(cx, trap);
+ }
+}
+
+JS_PUBLIC_API(JSTrapStatus)
+JS_HandleTrap(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval)
+{
+ JSTrap *trap;
+ JSTrapStatus status;
+ jsint op;
+
+ trap = FindTrap(cx->runtime, script, pc);
+ if (!trap) {
+ JS_ASSERT(0); /* XXX can't happen */
+ return JSTRAP_ERROR;
+ }
+ /*
+ * It's important that we not use 'trap->' after calling the callback --
+ * the callback might remove the trap!
+ */
+ op = (jsint)trap->op;
+ status = trap->handler(cx, script, pc, rval, trap->closure);
+ if (status == JSTRAP_CONTINUE) {
+ /* By convention, return the true op to the interpreter in rval. */
+ *rval = INT_TO_JSVAL(op);
+ }
+ return status;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetInterrupt(JSRuntime *rt, JSTrapHandler handler, void *closure)
+{
+ rt->interruptHandler = handler;
+ rt->interruptHandlerData = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ClearInterrupt(JSRuntime *rt, JSTrapHandler *handlerp, void **closurep)
+{
+ if (handlerp)
+ *handlerp = (JSTrapHandler)rt->interruptHandler;
+ if (closurep)
+ *closurep = rt->interruptHandlerData;
+ rt->interruptHandler = 0;
+ rt->interruptHandlerData = 0;
+ return JS_TRUE;
+}
+
+/************************************************************************/
+
+typedef struct JSWatchPoint {
+ JSCList links;
+ JSObject *object; /* weak link, see js_FinalizeObject */
+ JSScopeProperty *sprop;
+ JSPropertyOp setter;
+ JSWatchPointHandler handler;
+ void *closure;
+ uintN flags;
+} JSWatchPoint;
+
+#define JSWP_LIVE 0x1 /* live because set and not cleared */
+#define JSWP_HELD 0x2 /* held while running handler/setter */
+
+static JSBool
+DropWatchPoint(JSContext *cx, JSWatchPoint *wp, uintN flag)
+{
+ JSBool ok;
+ JSScopeProperty *sprop;
+ JSObject *pobj;
+ JSProperty *prop;
+ JSPropertyOp setter;
+
+ ok = JS_TRUE;
+ wp->flags &= ~flag;
+ if (wp->flags != 0)
+ return JS_TRUE;
+
+ /*
+ * Remove wp from the list, then if there are no other watchpoints for
+ * wp->sprop in any scope, restore wp->sprop->setter from wp.
+ */
+ JS_REMOVE_LINK(&wp->links);
+ sprop = wp->sprop;
+
+ /*
+ * If js_ChangeNativePropertyAttrs fails, propagate failure after removing
+ * wp->closure's root and freeing wp.
+ */
+ setter = js_GetWatchedSetter(cx->runtime, NULL, sprop);
+ if (!setter) {
+ ok = js_LookupProperty(cx, wp->object, sprop->id, &pobj, &prop);
+
+ /*
+ * If the property wasn't found on wp->object or didn't exist, then
+ * someone else has dealt with this sprop, and we don't need to change
+ * the property attributes.
+ */
+ if (ok && prop) {
+ if (pobj == wp->object) {
+ JS_ASSERT(OBJ_SCOPE(pobj)->object == pobj);
+
+ sprop = js_ChangeScopePropertyAttrs(cx, OBJ_SCOPE(pobj), sprop,
+ 0, sprop->attrs,
+ sprop->getter,
+ wp->setter);
+ if (!sprop)
+ ok = JS_FALSE;
+ }
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+ }
+
+ js_RemoveRoot(cx->runtime, &wp->closure);
+ JS_free(cx, wp);
+ return ok;
+}
+
+void
+js_MarkWatchPoints(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSWatchPoint *wp;
+
+ rt = cx->runtime;
+ for (wp = (JSWatchPoint *)rt->watchPointList.next;
+ wp != (JSWatchPoint *)&rt->watchPointList;
+ wp = (JSWatchPoint *)wp->links.next) {
+ MARK_SCOPE_PROPERTY(cx, wp->sprop);
+ if (wp->sprop->attrs & JSPROP_SETTER)
+ JS_MarkGCThing(cx, wp->setter, "wp->setter", NULL);
+ }
+}
+
+static JSWatchPoint *
+FindWatchPoint(JSRuntime *rt, JSScope *scope, jsid id)
+{
+ JSWatchPoint *wp;
+
+ for (wp = (JSWatchPoint *)rt->watchPointList.next;
+ wp != (JSWatchPoint *)&rt->watchPointList;
+ wp = (JSWatchPoint *)wp->links.next) {
+ if (wp->object == scope->object && wp->sprop->id == id)
+ return wp;
+ }
+ return NULL;
+}
+
+JSScopeProperty *
+js_FindWatchPoint(JSRuntime *rt, JSScope *scope, jsid id)
+{
+ JSWatchPoint *wp;
+
+ wp = FindWatchPoint(rt, scope, id);
+ if (!wp)
+ return NULL;
+ return wp->sprop;
+}
+
+JSPropertyOp
+js_GetWatchedSetter(JSRuntime *rt, JSScope *scope,
+ const JSScopeProperty *sprop)
+{
+ JSWatchPoint *wp;
+
+ for (wp = (JSWatchPoint *)rt->watchPointList.next;
+ wp != (JSWatchPoint *)&rt->watchPointList;
+ wp = (JSWatchPoint *)wp->links.next) {
+ if ((!scope || wp->object == scope->object) && wp->sprop == sprop)
+ return wp->setter;
+ }
+ return NULL;
+}
+
+JSBool JS_DLL_CALLBACK
+js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSRuntime *rt;
+ JSWatchPoint *wp;
+ JSScopeProperty *sprop;
+ jsval propid, userid;
+ JSScope *scope;
+ JSBool ok;
+
+ rt = cx->runtime;
+ for (wp = (JSWatchPoint *)rt->watchPointList.next;
+ wp != (JSWatchPoint *)&rt->watchPointList;
+ wp = (JSWatchPoint *)wp->links.next) {
+ sprop = wp->sprop;
+ if (wp->object == obj && SPROP_USERID(sprop) == id &&
+ !(wp->flags & JSWP_HELD)) {
+ wp->flags |= JSWP_HELD;
+
+ JS_LOCK_OBJ(cx, obj);
+ propid = ID_TO_VALUE(sprop->id);
+ userid = (sprop->flags & SPROP_HAS_SHORTID)
+ ? INT_TO_JSVAL(sprop->shortid)
+ : propid;
+ scope = OBJ_SCOPE(obj);
+ JS_UNLOCK_OBJ(cx, obj);
+
+ /* NB: wp is held, so we can safely dereference it still. */
+ ok = wp->handler(cx, obj, propid,
+ SPROP_HAS_VALID_SLOT(sprop, scope)
+ ? OBJ_GET_SLOT(cx, obj, sprop->slot)
+ : JSVAL_VOID,
+ vp, wp->closure);
+ if (ok) {
+ /*
+ * Create a pseudo-frame for the setter invocation so that any
+ * stack-walking security code under the setter will correctly
+ * identify the guilty party. So that the watcher appears to
+ * be active to obj_eval and other such code, point frame.pc
+ * at the JSOP_STOP at the end of the script.
+ */
+ JSObject *closure;
+ JSClass *clasp;
+ JSFunction *fun;
+ JSScript *script;
+ uintN nslots;
+ jsval smallv[5];
+ jsval *argv;
+ JSStackFrame frame;
+
+ closure = (JSObject *) wp->closure;
+ clasp = OBJ_GET_CLASS(cx, closure);
+ if (clasp == &js_FunctionClass) {
+ fun = (JSFunction *) JS_GetPrivate(cx, closure);
+ script = FUN_SCRIPT(fun);
+ } else if (clasp == &js_ScriptClass) {
+ fun = NULL;
+ script = (JSScript *) JS_GetPrivate(cx, closure);
+ } else {
+ fun = NULL;
+ script = NULL;
+ }
+
+ nslots = 2;
+ if (fun) {
+ nslots += fun->nargs;
+ if (FUN_NATIVE(fun))
+ nslots += fun->u.n.extra;
+ }
+
+ if (nslots <= JS_ARRAY_LENGTH(smallv)) {
+ argv = smallv;
+ } else {
+ argv = JS_malloc(cx, nslots * sizeof(jsval));
+ if (!argv) {
+ DropWatchPoint(cx, wp, JSWP_HELD);
+ return JS_FALSE;
+ }
+ }
+
+ argv[0] = OBJECT_TO_JSVAL(closure);
+ argv[1] = JSVAL_NULL;
+ memset(argv + 2, 0, (nslots - 2) * sizeof(jsval));
+
+ memset(&frame, 0, sizeof(frame));
+ frame.script = script;
+ if (script) {
+ JS_ASSERT(script->length >= JSOP_STOP_LENGTH);
+ frame.pc = script->code + script->length
+ - JSOP_STOP_LENGTH;
+ }
+ frame.fun = fun;
+ frame.argv = argv + 2;
+ frame.down = cx->fp;
+ frame.scopeChain = OBJ_GET_PARENT(cx, closure);
+
+ cx->fp = &frame;
+ ok = !wp->setter ||
+ ((sprop->attrs & JSPROP_SETTER)
+ ? js_InternalCall(cx, obj, OBJECT_TO_JSVAL(wp->setter),
+ 1, vp, vp)
+ : wp->setter(cx, OBJ_THIS_OBJECT(cx, obj), userid, vp));
+ cx->fp = frame.down;
+ if (argv != smallv)
+ JS_free(cx, argv);
+ }
+ return DropWatchPoint(cx, wp, JSWP_HELD) && ok;
+ }
+ }
+ return JS_TRUE;
+}
+
+JSBool JS_DLL_CALLBACK
+js_watch_set_wrapper(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSObject *funobj;
+ JSFunction *wrapper;
+ jsval userid;
+
+ funobj = JSVAL_TO_OBJECT(argv[-2]);
+ JS_ASSERT(OBJ_GET_CLASS(cx, funobj) == &js_FunctionClass);
+ wrapper = (JSFunction *) JS_GetPrivate(cx, funobj);
+ userid = ATOM_KEY(wrapper->atom);
+ *rval = argv[0];
+ return js_watch_set(cx, obj, userid, rval);
+}
+
+JSPropertyOp
+js_WrapWatchedSetter(JSContext *cx, jsid id, uintN attrs, JSPropertyOp setter)
+{
+ JSAtom *atom;
+ JSFunction *wrapper;
+
+ if (!(attrs & JSPROP_SETTER))
+ return &js_watch_set; /* & to silence schoolmarmish MSVC */
+
+ if (JSID_IS_ATOM(id)) {
+ atom = JSID_TO_ATOM(id);
+ } else if (JSID_IS_INT(id)) {
+ atom = js_AtomizeInt(cx, JSID_TO_INT(id), 0);
+ if (!atom)
+ return NULL;
+ } else {
+ atom = NULL;
+ }
+ wrapper = js_NewFunction(cx, NULL, js_watch_set_wrapper, 1, 0,
+ OBJ_GET_PARENT(cx, (JSObject *)setter),
+ atom);
+ if (!wrapper)
+ return NULL;
+ return (JSPropertyOp) wrapper->object;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetWatchPoint(JSContext *cx, JSObject *obj, jsval id,
+ JSWatchPointHandler handler, void *closure)
+{
+ JSAtom *atom;
+ jsid propid;
+ JSObject *pobj;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ JSRuntime *rt;
+ JSBool ok;
+ JSWatchPoint *wp;
+ JSPropertyOp watcher;
+
+ if (!OBJ_IS_NATIVE(obj)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_WATCH,
+ OBJ_GET_CLASS(cx, obj)->name);
+ return JS_FALSE;
+ }
+
+ if (JSVAL_IS_INT(id)) {
+ propid = INT_JSVAL_TO_JSID(id);
+ atom = NULL;
+ } else {
+ atom = js_ValueToStringAtom(cx, id);
+ if (!atom)
+ return JS_FALSE;
+ propid = ATOM_TO_JSID(atom);
+ }
+
+ if (!js_LookupProperty(cx, obj, propid, &pobj, &prop))
+ return JS_FALSE;
+ sprop = (JSScopeProperty *) prop;
+ rt = cx->runtime;
+ if (!sprop) {
+ /* Check for a deleted symbol watchpoint, which holds its property. */
+ sprop = js_FindWatchPoint(rt, OBJ_SCOPE(obj), propid);
+ if (!sprop) {
+ /* Make a new property in obj so we can watch for the first set. */
+ if (!js_DefineProperty(cx, obj, propid, JSVAL_VOID,
+ NULL, NULL, JSPROP_ENUMERATE,
+ &prop)) {
+ return JS_FALSE;
+ }
+ sprop = (JSScopeProperty *) prop;
+ }
+ } else if (pobj != obj) {
+ /* Clone the prototype property so we can watch the right object. */
+ jsval value;
+ JSPropertyOp getter, setter;
+ uintN attrs, flags;
+ intN shortid;
+
+ if (OBJ_IS_NATIVE(pobj)) {
+ value = SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj))
+ ? LOCKED_OBJ_GET_SLOT(pobj, sprop->slot)
+ : JSVAL_VOID;
+ getter = sprop->getter;
+ setter = sprop->setter;
+ attrs = sprop->attrs;
+ flags = sprop->flags;
+ shortid = sprop->shortid;
+ } else {
+ if (!OBJ_GET_PROPERTY(cx, pobj, id, &value) ||
+ !OBJ_GET_ATTRIBUTES(cx, pobj, id, prop, &attrs)) {
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ return JS_FALSE;
+ }
+ getter = setter = NULL;
+ flags = 0;
+ shortid = 0;
+ }
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+
+ /* Recall that obj is native, whether or not pobj is native. */
+ if (!js_DefineNativeProperty(cx, obj, propid, value, getter, setter,
+ attrs, flags, shortid, &prop)) {
+ return JS_FALSE;
+ }
+ sprop = (JSScopeProperty *) prop;
+ }
+
+ /*
+ * At this point, prop/sprop exists in obj, obj is locked, and we must
+ * OBJ_DROP_PROPERTY(cx, obj, prop) before returning.
+ */
+ ok = JS_TRUE;
+ wp = FindWatchPoint(rt, OBJ_SCOPE(obj), propid);
+ if (!wp) {
+ watcher = js_WrapWatchedSetter(cx, propid, sprop->attrs, sprop->setter);
+ if (!watcher) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ wp = (JSWatchPoint *) JS_malloc(cx, sizeof *wp);
+ if (!wp) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ wp->handler = NULL;
+ wp->closure = NULL;
+ ok = js_AddRoot(cx, &wp->closure, "wp->closure");
+ if (!ok) {
+ JS_free(cx, wp);
+ goto out;
+ }
+ wp->object = obj;
+ JS_ASSERT(sprop->setter != js_watch_set || pobj != obj);
+ wp->setter = sprop->setter;
+ wp->flags = JSWP_LIVE;
+
+ /* XXXbe nest in obj lock here */
+ sprop = js_ChangeNativePropertyAttrs(cx, obj, sprop, 0, sprop->attrs,
+ sprop->getter, watcher);
+ if (!sprop) {
+ /* Self-link so DropWatchPoint can JS_REMOVE_LINK it. */
+ JS_INIT_CLIST(&wp->links);
+ DropWatchPoint(cx, wp, JSWP_LIVE);
+ ok = JS_FALSE;
+ goto out;
+ }
+ wp->sprop = sprop;
+
+ /*
+ * Now that wp is fully initialized, append it to rt's wp list.
+ * Because obj is locked we know that no other thread could have added
+ * a watchpoint for (obj, propid).
+ */
+ JS_ASSERT(!FindWatchPoint(rt, OBJ_SCOPE(obj), propid));
+ JS_APPEND_LINK(&wp->links, &rt->watchPointList);
+ }
+ wp->handler = handler;
+ wp->closure = closure;
+
+out:
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ClearWatchPoint(JSContext *cx, JSObject *obj, jsval id,
+ JSWatchPointHandler *handlerp, void **closurep)
+{
+ JSRuntime *rt;
+ JSWatchPoint *wp;
+
+ rt = cx->runtime;
+ for (wp = (JSWatchPoint *)rt->watchPointList.next;
+ wp != (JSWatchPoint *)&rt->watchPointList;
+ wp = (JSWatchPoint *)wp->links.next) {
+ if (wp->object == obj && SPROP_USERID(wp->sprop) == id) {
+ if (handlerp)
+ *handlerp = wp->handler;
+ if (closurep)
+ *closurep = wp->closure;
+ return DropWatchPoint(cx, wp, JSWP_LIVE);
+ }
+ }
+ if (handlerp)
+ *handlerp = NULL;
+ if (closurep)
+ *closurep = NULL;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ClearWatchPointsForObject(JSContext *cx, JSObject *obj)
+{
+ JSRuntime *rt;
+ JSWatchPoint *wp, *next;
+
+ rt = cx->runtime;
+ for (wp = (JSWatchPoint *)rt->watchPointList.next;
+ wp != (JSWatchPoint *)&rt->watchPointList;
+ wp = next) {
+ next = (JSWatchPoint *)wp->links.next;
+ if (wp->object == obj && !DropWatchPoint(cx, wp, JSWP_LIVE))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_ClearAllWatchPoints(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSWatchPoint *wp, *next;
+
+ rt = cx->runtime;
+ for (wp = (JSWatchPoint *)rt->watchPointList.next;
+ wp != (JSWatchPoint *)&rt->watchPointList;
+ wp = next) {
+ next = (JSWatchPoint *)wp->links.next;
+ if (!DropWatchPoint(cx, wp, JSWP_LIVE))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(uintN)
+JS_PCToLineNumber(JSContext *cx, JSScript *script, jsbytecode *pc)
+{
+ return js_PCToLineNumber(cx, script, pc);
+}
+
+JS_PUBLIC_API(jsbytecode *)
+JS_LineNumberToPC(JSContext *cx, JSScript *script, uintN lineno)
+{
+ return js_LineNumberToPC(script, lineno);
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_GetFunctionScript(JSContext *cx, JSFunction *fun)
+{
+ return FUN_SCRIPT(fun);
+}
+
+JS_PUBLIC_API(JSNative)
+JS_GetFunctionNative(JSContext *cx, JSFunction *fun)
+{
+ return FUN_NATIVE(fun);
+}
+
+JS_PUBLIC_API(JSPrincipals *)
+JS_GetScriptPrincipals(JSContext *cx, JSScript *script)
+{
+ return script->principals;
+}
+
+/************************************************************************/
+
+/*
+ * Stack Frame Iterator
+ */
+JS_PUBLIC_API(JSStackFrame *)
+JS_FrameIterator(JSContext *cx, JSStackFrame **iteratorp)
+{
+ *iteratorp = (*iteratorp == NULL) ? cx->fp : (*iteratorp)->down;
+ return *iteratorp;
+}
+
+JS_PUBLIC_API(JSScript *)
+JS_GetFrameScript(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->script;
+}
+
+JS_PUBLIC_API(jsbytecode *)
+JS_GetFramePC(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->pc;
+}
+
+JS_PUBLIC_API(JSStackFrame *)
+JS_GetScriptedCaller(JSContext *cx, JSStackFrame *fp)
+{
+ if (!fp)
+ fp = cx->fp;
+ while ((fp = fp->down) != NULL) {
+ if (fp->script)
+ return fp;
+ }
+ return NULL;
+}
+
+JS_PUBLIC_API(JSPrincipals *)
+JS_StackFramePrincipals(JSContext *cx, JSStackFrame *fp)
+{
+ if (fp->fun) {
+ JSRuntime *rt = cx->runtime;
+
+ if (rt->findObjectPrincipals) {
+ JSObject *callee = JSVAL_TO_OBJECT(fp->argv[-2]);
+
+ if (fp->fun->object != callee)
+ return rt->findObjectPrincipals(cx, callee);
+ /* FALL THROUGH */
+ }
+ }
+ if (fp->script)
+ return fp->script->principals;
+ return NULL;
+}
+
+JS_PUBLIC_API(JSPrincipals *)
+JS_EvalFramePrincipals(JSContext *cx, JSStackFrame *fp, JSStackFrame *caller)
+{
+ JSRuntime *rt;
+ JSObject *callee;
+ JSPrincipals *principals, *callerPrincipals;
+
+ rt = cx->runtime;
+ if (rt->findObjectPrincipals) {
+ callee = JSVAL_TO_OBJECT(fp->argv[-2]);
+ principals = rt->findObjectPrincipals(cx, callee);
+ } else {
+ principals = NULL;
+ }
+ if (!caller)
+ return principals;
+ callerPrincipals = JS_StackFramePrincipals(cx, caller);
+ return (callerPrincipals && principals &&
+ callerPrincipals->subsume(callerPrincipals, principals))
+ ? principals
+ : callerPrincipals;
+}
+
+JS_PUBLIC_API(void *)
+JS_GetFrameAnnotation(JSContext *cx, JSStackFrame *fp)
+{
+ if (fp->annotation && fp->script) {
+ JSPrincipals *principals = JS_StackFramePrincipals(cx, fp);
+
+ if (principals && principals->globalPrivilegesEnabled(cx, principals)) {
+ /*
+ * Give out an annotation only if privileges have not been revoked
+ * or disabled globally.
+ */
+ return fp->annotation;
+ }
+ }
+
+ return NULL;
+}
+
+JS_PUBLIC_API(void)
+JS_SetFrameAnnotation(JSContext *cx, JSStackFrame *fp, void *annotation)
+{
+ fp->annotation = annotation;
+}
+
+JS_PUBLIC_API(void *)
+JS_GetFramePrincipalArray(JSContext *cx, JSStackFrame *fp)
+{
+ JSPrincipals *principals;
+
+ principals = JS_StackFramePrincipals(cx, fp);
+ if (!principals)
+ return NULL;
+ return principals->getPrincipalArray(cx, principals);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsNativeFrame(JSContext *cx, JSStackFrame *fp)
+{
+ return !fp->script;
+}
+
+/* this is deprecated, use JS_GetFrameScopeChain instead */
+JS_PUBLIC_API(JSObject *)
+JS_GetFrameObject(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->scopeChain;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetFrameScopeChain(JSContext *cx, JSStackFrame *fp)
+{
+ /* Force creation of argument and call objects if not yet created */
+ (void) JS_GetFrameCallObject(cx, fp);
+ return js_GetScopeChain(cx, fp);
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetFrameCallObject(JSContext *cx, JSStackFrame *fp)
+{
+ if (! fp->fun)
+ return NULL;
+
+ /* Force creation of argument object if not yet created */
+ (void) js_GetArgsObject(cx, fp);
+
+ /*
+ * XXX ill-defined: null return here means error was reported, unlike a
+ * null returned above or in the #else
+ */
+ return js_GetCallObject(cx, fp, NULL);
+}
+
+
+JS_PUBLIC_API(JSObject *)
+JS_GetFrameThis(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->thisp;
+}
+
+JS_PUBLIC_API(JSFunction *)
+JS_GetFrameFunction(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->fun;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetFrameFunctionObject(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->argv && fp->fun ? JSVAL_TO_OBJECT(fp->argv[-2]) : NULL;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsConstructorFrame(JSContext *cx, JSStackFrame *fp)
+{
+ return (fp->flags & JSFRAME_CONSTRUCTING) != 0;
+}
+
+JS_PUBLIC_API(JSObject *)
+JS_GetFrameCalleeObject(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->argv ? JSVAL_TO_OBJECT(fp->argv[-2]) : NULL;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsDebuggerFrame(JSContext *cx, JSStackFrame *fp)
+{
+ return (fp->flags & JSFRAME_DEBUGGER) != 0;
+}
+
+JS_PUBLIC_API(jsval)
+JS_GetFrameReturnValue(JSContext *cx, JSStackFrame *fp)
+{
+ return fp->rval;
+}
+
+JS_PUBLIC_API(void)
+JS_SetFrameReturnValue(JSContext *cx, JSStackFrame *fp, jsval rval)
+{
+ fp->rval = rval;
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(const char *)
+JS_GetScriptFilename(JSContext *cx, JSScript *script)
+{
+ return script->filename;
+}
+
+JS_PUBLIC_API(uintN)
+JS_GetScriptBaseLineNumber(JSContext *cx, JSScript *script)
+{
+ return script->lineno;
+}
+
+JS_PUBLIC_API(uintN)
+JS_GetScriptLineExtent(JSContext *cx, JSScript *script)
+{
+ return js_GetScriptLineExtent(script);
+}
+
+JS_PUBLIC_API(JSVersion)
+JS_GetScriptVersion(JSContext *cx, JSScript *script)
+{
+ return script->version & JSVERSION_MASK;
+}
+
+/***************************************************************************/
+
+JS_PUBLIC_API(void)
+JS_SetNewScriptHook(JSRuntime *rt, JSNewScriptHook hook, void *callerdata)
+{
+ rt->newScriptHook = hook;
+ rt->newScriptHookData = callerdata;
+}
+
+JS_PUBLIC_API(void)
+JS_SetDestroyScriptHook(JSRuntime *rt, JSDestroyScriptHook hook,
+ void *callerdata)
+{
+ rt->destroyScriptHook = hook;
+ rt->destroyScriptHookData = callerdata;
+}
+
+/***************************************************************************/
+
+JS_PUBLIC_API(JSBool)
+JS_EvaluateUCInStackFrame(JSContext *cx, JSStackFrame *fp,
+ const jschar *chars, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval)
+{
+ JSObject *scobj;
+ uint32 flags, options;
+ JSScript *script;
+ JSBool ok;
+
+ scobj = JS_GetFrameScopeChain(cx, fp);
+ if (!scobj)
+ return JS_FALSE;
+
+ /*
+ * XXX Hack around ancient compiler API to propagate the JSFRAME_SPECIAL
+ * flags to the code generator (see js_EmitTree's TOK_SEMI case).
+ */
+ flags = fp->flags;
+ fp->flags |= JSFRAME_DEBUGGER | JSFRAME_EVAL;
+ options = cx->options;
+ cx->options = options | JSOPTION_COMPILE_N_GO;
+ script = JS_CompileUCScriptForPrincipals(cx, scobj,
+ JS_StackFramePrincipals(cx, fp),
+ chars, length, filename, lineno);
+ fp->flags = flags;
+ cx->options = options;
+ if (!script)
+ return JS_FALSE;
+
+ ok = js_Execute(cx, scobj, script, fp, JSFRAME_DEBUGGER | JSFRAME_EVAL,
+ rval);
+ js_DestroyScript(cx, script);
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_EvaluateInStackFrame(JSContext *cx, JSStackFrame *fp,
+ const char *bytes, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval)
+{
+ jschar *chars;
+ JSBool ok;
+ size_t len = length;
+
+ chars = js_InflateString(cx, bytes, &len);
+ if (!chars)
+ return JS_FALSE;
+ length = (uintN) len;
+ ok = JS_EvaluateUCInStackFrame(cx, fp, chars, length, filename, lineno,
+ rval);
+ JS_free(cx, chars);
+
+ return ok;
+}
+
+/************************************************************************/
+
+/* XXXbe this all needs to be reworked to avoid requiring JSScope types. */
+
+JS_PUBLIC_API(JSScopeProperty *)
+JS_PropertyIterator(JSObject *obj, JSScopeProperty **iteratorp)
+{
+ JSScopeProperty *sprop;
+ JSScope *scope;
+
+ sprop = *iteratorp;
+ scope = OBJ_SCOPE(obj);
+
+ /* XXXbe minor(?) incompatibility: iterate in reverse definition order */
+ if (!sprop) {
+ sprop = SCOPE_LAST_PROP(scope);
+ } else {
+ while ((sprop = sprop->parent) != NULL) {
+ if (!SCOPE_HAD_MIDDLE_DELETE(scope))
+ break;
+ if (SCOPE_HAS_PROPERTY(scope, sprop))
+ break;
+ }
+ }
+ *iteratorp = sprop;
+ return sprop;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetPropertyDesc(JSContext *cx, JSObject *obj, JSScopeProperty *sprop,
+ JSPropertyDesc *pd)
+{
+ JSPropertyOp getter;
+ JSScope *scope;
+ JSScopeProperty *aprop;
+ jsval lastException;
+ JSBool wasThrowing;
+
+ pd->id = ID_TO_VALUE(sprop->id);
+
+ wasThrowing = cx->throwing;
+ if (wasThrowing) {
+ lastException = cx->exception;
+ if (JSVAL_IS_GCTHING(lastException) &&
+ !js_AddRoot(cx, &lastException, "lastException")) {
+ return JS_FALSE;
+ }
+ cx->throwing = JS_FALSE;
+ }
+
+ if (!js_GetProperty(cx, obj, sprop->id, &pd->value)) {
+ if (!cx->throwing) {
+ pd->flags = JSPD_ERROR;
+ pd->value = JSVAL_VOID;
+ } else {
+ pd->flags = JSPD_EXCEPTION;
+ pd->value = cx->exception;
+ }
+ } else {
+ pd->flags = 0;
+ }
+
+ cx->throwing = wasThrowing;
+ if (wasThrowing) {
+ cx->exception = lastException;
+ if (JSVAL_IS_GCTHING(lastException))
+ js_RemoveRoot(cx->runtime, &lastException);
+ }
+
+ getter = sprop->getter;
+ pd->flags |= ((sprop->attrs & JSPROP_ENUMERATE) ? JSPD_ENUMERATE : 0)
+ | ((sprop->attrs & JSPROP_READONLY) ? JSPD_READONLY : 0)
+ | ((sprop->attrs & JSPROP_PERMANENT) ? JSPD_PERMANENT : 0)
+ | ((getter == js_GetCallVariable) ? JSPD_VARIABLE : 0)
+ | ((getter == js_GetArgument) ? JSPD_ARGUMENT : 0)
+ | ((getter == js_GetLocalVariable) ? JSPD_VARIABLE : 0);
+
+ /* for Call Object 'real' getter isn't passed in to us */
+ if (OBJ_GET_CLASS(cx, obj) == &js_CallClass &&
+ getter == js_CallClass.getProperty) {
+ /*
+ * Property of a heavyweight function's variable object having the
+ * class-default getter. It's either an argument if permanent, or a
+ * nested function if impermanent. Local variables have a special
+ * getter (js_GetCallVariable, tested above) and setter, and not the
+ * class default.
+ */
+ pd->flags |= (sprop->attrs & JSPROP_PERMANENT)
+ ? JSPD_ARGUMENT
+ : JSPD_VARIABLE;
+ }
+
+ pd->spare = 0;
+ pd->slot = (pd->flags & (JSPD_ARGUMENT | JSPD_VARIABLE))
+ ? sprop->shortid
+ : 0;
+ pd->alias = JSVAL_VOID;
+ scope = OBJ_SCOPE(obj);
+ if (SPROP_HAS_VALID_SLOT(sprop, scope)) {
+ for (aprop = SCOPE_LAST_PROP(scope); aprop; aprop = aprop->parent) {
+ if (aprop != sprop && aprop->slot == sprop->slot) {
+ pd->alias = ID_TO_VALUE(aprop->id);
+ break;
+ }
+ }
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_GetPropertyDescArray(JSContext *cx, JSObject *obj, JSPropertyDescArray *pda)
+{
+ JSClass *clasp;
+ JSScope *scope;
+ uint32 i, n;
+ JSPropertyDesc *pd;
+ JSScopeProperty *sprop;
+
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (!OBJ_IS_NATIVE(obj) || (clasp->flags & JSCLASS_NEW_ENUMERATE)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_DESCRIBE_PROPS, clasp->name);
+ return JS_FALSE;
+ }
+ if (!clasp->enumerate(cx, obj))
+ return JS_FALSE;
+
+ /* have no props, or object's scope has not mutated from that of proto */
+ scope = OBJ_SCOPE(obj);
+ if (scope->object != obj || scope->entryCount == 0) {
+ pda->length = 0;
+ pda->array = NULL;
+ return JS_TRUE;
+ }
+
+ n = scope->entryCount;
+ if (n > scope->map.nslots)
+ n = scope->map.nslots;
+ pd = (JSPropertyDesc *) JS_malloc(cx, (size_t)n * sizeof(JSPropertyDesc));
+ if (!pd)
+ return JS_FALSE;
+ i = 0;
+ for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
+ if (SCOPE_HAD_MIDDLE_DELETE(scope) && !SCOPE_HAS_PROPERTY(scope, sprop))
+ continue;
+ if (!js_AddRoot(cx, &pd[i].id, NULL))
+ goto bad;
+ if (!js_AddRoot(cx, &pd[i].value, NULL))
+ goto bad;
+ if (!JS_GetPropertyDesc(cx, obj, sprop, &pd[i]))
+ goto bad;
+ if ((pd[i].flags & JSPD_ALIAS) && !js_AddRoot(cx, &pd[i].alias, NULL))
+ goto bad;
+ if (++i == n)
+ break;
+ }
+ pda->length = i;
+ pda->array = pd;
+ return JS_TRUE;
+
+bad:
+ pda->length = i + 1;
+ pda->array = pd;
+ JS_PutPropertyDescArray(cx, pda);
+ return JS_FALSE;
+}
+
+JS_PUBLIC_API(void)
+JS_PutPropertyDescArray(JSContext *cx, JSPropertyDescArray *pda)
+{
+ JSPropertyDesc *pd;
+ uint32 i;
+
+ pd = pda->array;
+ for (i = 0; i < pda->length; i++) {
+ js_RemoveRoot(cx->runtime, &pd[i].id);
+ js_RemoveRoot(cx->runtime, &pd[i].value);
+ if (pd[i].flags & JSPD_ALIAS)
+ js_RemoveRoot(cx->runtime, &pd[i].alias);
+ }
+ JS_free(cx, pd);
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(JSBool)
+JS_SetDebuggerHandler(JSRuntime *rt, JSTrapHandler handler, void *closure)
+{
+ rt->debuggerHandler = handler;
+ rt->debuggerHandlerData = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetSourceHandler(JSRuntime *rt, JSSourceHandler handler, void *closure)
+{
+ rt->sourceHandler = handler;
+ rt->sourceHandlerData = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetExecuteHook(JSRuntime *rt, JSInterpreterHook hook, void *closure)
+{
+ rt->executeHook = hook;
+ rt->executeHookData = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetCallHook(JSRuntime *rt, JSInterpreterHook hook, void *closure)
+{
+ rt->callHook = hook;
+ rt->callHookData = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetObjectHook(JSRuntime *rt, JSObjectHook hook, void *closure)
+{
+ rt->objectHook = hook;
+ rt->objectHookData = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetThrowHook(JSRuntime *rt, JSTrapHandler hook, void *closure)
+{
+ rt->throwHook = hook;
+ rt->throwHookData = closure;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_SetDebugErrorHook(JSRuntime *rt, JSDebugErrorHook hook, void *closure)
+{
+ rt->debugErrorHook = hook;
+ rt->debugErrorHookData = closure;
+ return JS_TRUE;
+}
+
+/************************************************************************/
+
+JS_PUBLIC_API(size_t)
+JS_GetObjectTotalSize(JSContext *cx, JSObject *obj)
+{
+ size_t nbytes;
+ JSScope *scope;
+
+ nbytes = sizeof *obj + obj->map->nslots * sizeof obj->slots[0];
+ if (OBJ_IS_NATIVE(obj)) {
+ scope = OBJ_SCOPE(obj);
+ if (scope->object == obj) {
+ nbytes += sizeof *scope;
+ nbytes += SCOPE_CAPACITY(scope) * sizeof(JSScopeProperty *);
+ }
+ }
+ return nbytes;
+}
+
+static size_t
+GetAtomTotalSize(JSContext *cx, JSAtom *atom)
+{
+ size_t nbytes;
+
+ nbytes = sizeof *atom;
+ if (ATOM_IS_STRING(atom)) {
+ nbytes += sizeof(JSString);
+ nbytes += (ATOM_TO_STRING(atom)->length + 1) * sizeof(jschar);
+ } else if (ATOM_IS_DOUBLE(atom)) {
+ nbytes += sizeof(jsdouble);
+ } else if (ATOM_IS_OBJECT(atom)) {
+ nbytes += JS_GetObjectTotalSize(cx, ATOM_TO_OBJECT(atom));
+ }
+ return nbytes;
+}
+
+JS_PUBLIC_API(size_t)
+JS_GetFunctionTotalSize(JSContext *cx, JSFunction *fun)
+{
+ size_t nbytes;
+
+ nbytes = sizeof *fun;
+ if (fun->object)
+ nbytes += JS_GetObjectTotalSize(cx, fun->object);
+ if (FUN_INTERPRETED(fun))
+ nbytes += JS_GetScriptTotalSize(cx, fun->u.i.script);
+ if (fun->atom)
+ nbytes += GetAtomTotalSize(cx, fun->atom);
+ return nbytes;
+}
+
+#include "jsemit.h"
+
+JS_PUBLIC_API(size_t)
+JS_GetScriptTotalSize(JSContext *cx, JSScript *script)
+{
+ size_t nbytes, pbytes;
+ JSObject *obj;
+ jsatomid i;
+ jssrcnote *sn, *notes;
+ JSTryNote *tn, *tnotes;
+ JSPrincipals *principals;
+
+ nbytes = sizeof *script;
+ obj = script->object;
+ if (obj)
+ nbytes += JS_GetObjectTotalSize(cx, obj);
+
+ nbytes += script->length * sizeof script->code[0];
+ nbytes += script->atomMap.length * sizeof script->atomMap.vector[0];
+ for (i = 0; i < script->atomMap.length; i++)
+ nbytes += GetAtomTotalSize(cx, script->atomMap.vector[i]);
+
+ if (script->filename)
+ nbytes += strlen(script->filename) + 1;
+
+ notes = SCRIPT_NOTES(script);
+ for (sn = notes; !SN_IS_TERMINATOR(sn); sn = SN_NEXT(sn))
+ continue;
+ nbytes += (sn - notes + 1) * sizeof *sn;
+
+ tnotes = script->trynotes;
+ if (tnotes) {
+ for (tn = tnotes; tn->catchStart; tn++)
+ continue;
+ nbytes += (tn - tnotes + 1) * sizeof *tn;
+ }
+
+ principals = script->principals;
+ if (principals) {
+ JS_ASSERT(principals->refcount);
+ pbytes = sizeof *principals;
+ if (principals->refcount > 1)
+ pbytes = JS_HOWMANY(pbytes, principals->refcount);
+ nbytes += pbytes;
+ }
+
+ return nbytes;
+}
+
+JS_PUBLIC_API(uint32)
+JS_GetTopScriptFilenameFlags(JSContext *cx, JSStackFrame *fp)
+{
+ if (!fp)
+ fp = cx->fp;
+ while (fp) {
+ if (fp->script) {
+ return JS_GetScriptFilenameFlags(fp->script);
+ }
+ fp = fp->down;
+ }
+ return 0;
+ }
+
+JS_PUBLIC_API(uint32)
+JS_GetScriptFilenameFlags(JSScript *script)
+{
+ JS_ASSERT(script);
+ if (!script->filename)
+ return JSFILENAME_NULL;
+ return js_GetScriptFilenameFlags(script->filename);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_FlagScriptFilenamePrefix(JSRuntime *rt, const char *prefix, uint32 flags)
+{
+ if (!js_SaveScriptFilenameRT(rt, prefix, flags))
+ return JS_FALSE;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_IsSystemObject(JSContext *cx, JSObject *obj)
+{
+ return (*js_GetGCThingFlags(obj) & GCF_SYSTEM) != 0;
+}
+
+JS_PUBLIC_API(void)
+JS_FlagSystemObject(JSContext *cx, JSObject *obj)
+{
+ uint8 *flagp;
+
+ flagp = js_GetGCThingFlags(obj);
+ *flagp |= GCF_SYSTEM;
+}
diff --git a/src/third_party/js-1.7/jsdbgapi.h b/src/third_party/js-1.7/jsdbgapi.h
new file mode 100644
index 00000000000..d2e1f1c69ec
--- /dev/null
+++ b/src/third_party/js-1.7/jsdbgapi.h
@@ -0,0 +1,406 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsdbgapi_h___
+#define jsdbgapi_h___
+/*
+ * JS debugger API.
+ */
+#include "jsapi.h"
+#include "jsopcode.h"
+#include "jsprvtd.h"
+
+JS_BEGIN_EXTERN_C
+
+extern void
+js_PatchOpcode(JSContext *cx, JSScript *script, jsbytecode *pc, JSOp op);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetTrap(JSContext *cx, JSScript *script, jsbytecode *pc,
+ JSTrapHandler handler, void *closure);
+
+extern JS_PUBLIC_API(JSOp)
+JS_GetTrapOpcode(JSContext *cx, JSScript *script, jsbytecode *pc);
+
+extern JS_PUBLIC_API(void)
+JS_ClearTrap(JSContext *cx, JSScript *script, jsbytecode *pc,
+ JSTrapHandler *handlerp, void **closurep);
+
+extern JS_PUBLIC_API(void)
+JS_ClearScriptTraps(JSContext *cx, JSScript *script);
+
+extern JS_PUBLIC_API(void)
+JS_ClearAllTraps(JSContext *cx);
+
+extern JS_PUBLIC_API(JSTrapStatus)
+JS_HandleTrap(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetInterrupt(JSRuntime *rt, JSTrapHandler handler, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ClearInterrupt(JSRuntime *rt, JSTrapHandler *handlerp, void **closurep);
+
+/************************************************************************/
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetWatchPoint(JSContext *cx, JSObject *obj, jsval id,
+ JSWatchPointHandler handler, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ClearWatchPoint(JSContext *cx, JSObject *obj, jsval id,
+ JSWatchPointHandler *handlerp, void **closurep);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ClearWatchPointsForObject(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(JSBool)
+JS_ClearAllWatchPoints(JSContext *cx);
+
+#ifdef JS_HAS_OBJ_WATCHPOINT
+/*
+ * Hide these non-API function prototypes by testing whether the internal
+ * header file "jsconfig.h" has been included.
+ */
+extern void
+js_MarkWatchPoints(JSContext *cx);
+
+extern JSScopeProperty *
+js_FindWatchPoint(JSRuntime *rt, JSScope *scope, jsid id);
+
+extern JSPropertyOp
+js_GetWatchedSetter(JSRuntime *rt, JSScope *scope,
+ const JSScopeProperty *sprop);
+
+extern JSBool JS_DLL_CALLBACK
+js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JSBool JS_DLL_CALLBACK
+js_watch_set_wrapper(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+extern JSPropertyOp
+js_WrapWatchedSetter(JSContext *cx, jsid id, uintN attrs, JSPropertyOp setter);
+
+#endif /* JS_HAS_OBJ_WATCHPOINT */
+
+/************************************************************************/
+
+extern JS_PUBLIC_API(uintN)
+JS_PCToLineNumber(JSContext *cx, JSScript *script, jsbytecode *pc);
+
+extern JS_PUBLIC_API(jsbytecode *)
+JS_LineNumberToPC(JSContext *cx, JSScript *script, uintN lineno);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_GetFunctionScript(JSContext *cx, JSFunction *fun);
+
+extern JS_PUBLIC_API(JSNative)
+JS_GetFunctionNative(JSContext *cx, JSFunction *fun);
+
+extern JS_PUBLIC_API(JSPrincipals *)
+JS_GetScriptPrincipals(JSContext *cx, JSScript *script);
+
+/*
+ * Stack Frame Iterator
+ *
+ * Used to iterate through the JS stack frames to extract
+ * information from the frames.
+ */
+
+extern JS_PUBLIC_API(JSStackFrame *)
+JS_FrameIterator(JSContext *cx, JSStackFrame **iteratorp);
+
+extern JS_PUBLIC_API(JSScript *)
+JS_GetFrameScript(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(jsbytecode *)
+JS_GetFramePC(JSContext *cx, JSStackFrame *fp);
+
+/*
+ * Get the closest scripted frame below fp. If fp is null, start from cx->fp.
+ */
+extern JS_PUBLIC_API(JSStackFrame *)
+JS_GetScriptedCaller(JSContext *cx, JSStackFrame *fp);
+
+/*
+ * Return a weak reference to fp's principals. A null return does not denote
+ * an error, it means there are no principals.
+ */
+extern JS_PUBLIC_API(JSPrincipals *)
+JS_StackFramePrincipals(JSContext *cx, JSStackFrame *fp);
+
+/*
+ * This API is like JS_StackFramePrincipals(cx, caller), except that if
+ * cx->runtime->findObjectPrincipals is non-null, it returns the weaker of
+ * the caller's principals and the object principals of fp's callee function
+ * object (fp->argv[-2]), which is eval, Function, or a similar eval-like
+ * method. The caller parameter should be JS_GetScriptedCaller(cx, fp).
+ *
+ * All eval-like methods must use JS_EvalFramePrincipals to acquire a weak
+ * reference to the correct principals for the eval call to be secure, given
+ * an embedding that calls JS_SetObjectPrincipalsFinder (see jsapi.h).
+ */
+extern JS_PUBLIC_API(JSPrincipals *)
+JS_EvalFramePrincipals(JSContext *cx, JSStackFrame *fp, JSStackFrame *caller);
+
+extern JS_PUBLIC_API(void *)
+JS_GetFrameAnnotation(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(void)
+JS_SetFrameAnnotation(JSContext *cx, JSStackFrame *fp, void *annotation);
+
+extern JS_PUBLIC_API(void *)
+JS_GetFramePrincipalArray(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_IsNativeFrame(JSContext *cx, JSStackFrame *fp);
+
+/* this is deprecated, use JS_GetFrameScopeChain instead */
+extern JS_PUBLIC_API(JSObject *)
+JS_GetFrameObject(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetFrameScopeChain(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetFrameCallObject(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetFrameThis(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(JSFunction *)
+JS_GetFrameFunction(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(JSObject *)
+JS_GetFrameFunctionObject(JSContext *cx, JSStackFrame *fp);
+
+/* XXXrginda Initially published with typo */
+#define JS_IsContructorFrame JS_IsConstructorFrame
+extern JS_PUBLIC_API(JSBool)
+JS_IsConstructorFrame(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_IsDebuggerFrame(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(jsval)
+JS_GetFrameReturnValue(JSContext *cx, JSStackFrame *fp);
+
+extern JS_PUBLIC_API(void)
+JS_SetFrameReturnValue(JSContext *cx, JSStackFrame *fp, jsval rval);
+
+/**
+ * Return fp's callee function object (fp->argv[-2]) if it has one.
+ */
+extern JS_PUBLIC_API(JSObject *)
+JS_GetFrameCalleeObject(JSContext *cx, JSStackFrame *fp);
+
+/************************************************************************/
+
+extern JS_PUBLIC_API(const char *)
+JS_GetScriptFilename(JSContext *cx, JSScript *script);
+
+extern JS_PUBLIC_API(uintN)
+JS_GetScriptBaseLineNumber(JSContext *cx, JSScript *script);
+
+extern JS_PUBLIC_API(uintN)
+JS_GetScriptLineExtent(JSContext *cx, JSScript *script);
+
+extern JS_PUBLIC_API(JSVersion)
+JS_GetScriptVersion(JSContext *cx, JSScript *script);
+
+/************************************************************************/
+
+/*
+ * Hook setters for script creation and destruction, see jsprvtd.h for the
+ * typedefs. These macros provide binary compatibility and newer, shorter
+ * synonyms.
+ */
+#define JS_SetNewScriptHook JS_SetNewScriptHookProc
+#define JS_SetDestroyScriptHook JS_SetDestroyScriptHookProc
+
+extern JS_PUBLIC_API(void)
+JS_SetNewScriptHook(JSRuntime *rt, JSNewScriptHook hook, void *callerdata);
+
+extern JS_PUBLIC_API(void)
+JS_SetDestroyScriptHook(JSRuntime *rt, JSDestroyScriptHook hook,
+ void *callerdata);
+
+/************************************************************************/
+
+extern JS_PUBLIC_API(JSBool)
+JS_EvaluateUCInStackFrame(JSContext *cx, JSStackFrame *fp,
+ const jschar *chars, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval);
+
+extern JS_PUBLIC_API(JSBool)
+JS_EvaluateInStackFrame(JSContext *cx, JSStackFrame *fp,
+ const char *bytes, uintN length,
+ const char *filename, uintN lineno,
+ jsval *rval);
+
+/************************************************************************/
+
+typedef struct JSPropertyDesc {
+ jsval id; /* primary id, a string or int */
+ jsval value; /* property value */
+ uint8 flags; /* flags, see below */
+ uint8 spare; /* unused */
+ uint16 slot; /* argument/variable slot */
+ jsval alias; /* alias id if JSPD_ALIAS flag */
+} JSPropertyDesc;
+
+#define JSPD_ENUMERATE 0x01 /* visible to for/in loop */
+#define JSPD_READONLY 0x02 /* assignment is error */
+#define JSPD_PERMANENT 0x04 /* property cannot be deleted */
+#define JSPD_ALIAS 0x08 /* property has an alias id */
+#define JSPD_ARGUMENT 0x10 /* argument to function */
+#define JSPD_VARIABLE 0x20 /* local variable in function */
+#define JSPD_EXCEPTION 0x40 /* exception occurred fetching the property, */
+ /* value is exception */
+#define JSPD_ERROR 0x80 /* native getter returned JS_FALSE without */
+ /* throwing an exception */
+
+typedef struct JSPropertyDescArray {
+ uint32 length; /* number of elements in array */
+ JSPropertyDesc *array; /* alloc'd by Get, freed by Put */
+} JSPropertyDescArray;
+
+extern JS_PUBLIC_API(JSScopeProperty *)
+JS_PropertyIterator(JSObject *obj, JSScopeProperty **iteratorp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetPropertyDesc(JSContext *cx, JSObject *obj, JSScopeProperty *sprop,
+ JSPropertyDesc *pd);
+
+extern JS_PUBLIC_API(JSBool)
+JS_GetPropertyDescArray(JSContext *cx, JSObject *obj, JSPropertyDescArray *pda);
+
+extern JS_PUBLIC_API(void)
+JS_PutPropertyDescArray(JSContext *cx, JSPropertyDescArray *pda);
+
+/************************************************************************/
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetDebuggerHandler(JSRuntime *rt, JSTrapHandler handler, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetSourceHandler(JSRuntime *rt, JSSourceHandler handler, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetExecuteHook(JSRuntime *rt, JSInterpreterHook hook, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetCallHook(JSRuntime *rt, JSInterpreterHook hook, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetObjectHook(JSRuntime *rt, JSObjectHook hook, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetThrowHook(JSRuntime *rt, JSTrapHandler hook, void *closure);
+
+extern JS_PUBLIC_API(JSBool)
+JS_SetDebugErrorHook(JSRuntime *rt, JSDebugErrorHook hook, void *closure);
+
+/************************************************************************/
+
+extern JS_PUBLIC_API(size_t)
+JS_GetObjectTotalSize(JSContext *cx, JSObject *obj);
+
+extern JS_PUBLIC_API(size_t)
+JS_GetFunctionTotalSize(JSContext *cx, JSFunction *fun);
+
+extern JS_PUBLIC_API(size_t)
+JS_GetScriptTotalSize(JSContext *cx, JSScript *script);
+
+/*
+ * Get the top-most running script on cx starting from fp, or from the top of
+ * cx's frame stack if fp is null, and return its script filename flags. If
+ * the script has a null filename member, return JSFILENAME_NULL.
+ */
+extern JS_PUBLIC_API(uint32)
+JS_GetTopScriptFilenameFlags(JSContext *cx, JSStackFrame *fp);
+
+/*
+ * Get the script filename flags for the script. If the script doesn't have a
+ * filename, return JSFILENAME_NULL.
+ */
+extern JS_PUBLIC_API(uint32)
+JS_GetScriptFilenameFlags(JSScript *script);
+
+/*
+ * Associate flags with a script filename prefix in rt, so that any subsequent
+ * script compilation will inherit those flags if the script's filename is the
+ * same as prefix, or if prefix is a substring of the script's filename.
+ *
+ * The API defines only one flag bit, JSFILENAME_SYSTEM, leaving the remaining
+ * 31 bits up to the API client to define. The union of all 32 bits must not
+ * be a legal combination, however, in order to preserve JSFILENAME_NULL as a
+ * unique value. API clients may depend on JSFILENAME_SYSTEM being a set bit
+ * in JSFILENAME_NULL -- a script with a null filename member is presumed to
+ * be a "system" script.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_FlagScriptFilenamePrefix(JSRuntime *rt, const char *prefix, uint32 flags);
+
+#define JSFILENAME_NULL 0xffffffff /* null script filename */
+#define JSFILENAME_SYSTEM 0x00000001 /* "system" script, see below */
+
+/*
+ * Return true if obj is a "system" object, that is, one flagged by a prior
+ * call to JS_FlagSystemObject(cx, obj). What "system" means is up to the API
+ * client, but it can be used to coordinate access control policies based on
+ * script filenames and their prefixes, using JS_FlagScriptFilenamePrefix and
+ * JS_GetTopScriptFilenameFlags.
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_IsSystemObject(JSContext *cx, JSObject *obj);
+
+/*
+ * Flag obj as a "system" object. The API client can flag system objects to
+ * optimize access control checks. The engine stores but does not interpret
+ * the per-object flag set by this call.
+ */
+extern JS_PUBLIC_API(void)
+JS_FlagSystemObject(JSContext *cx, JSObject *obj);
+
+JS_END_EXTERN_C
+
+#endif /* jsdbgapi_h___ */
diff --git a/src/third_party/js-1.7/jsdhash.c b/src/third_party/js-1.7/jsdhash.c
new file mode 100644
index 00000000000..295883b2fdf
--- /dev/null
+++ b/src/third_party/js-1.7/jsdhash.c
@@ -0,0 +1,826 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla JavaScript code.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1999-2001
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * Brendan Eich <brendan@mozilla.org> (Original Author)
+ * Chris Waterson <waterson@netscape.com>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * Double hashing implementation.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jsbit.h"
+#include "jsdhash.h"
+#include "jsutil.h" /* for JS_ASSERT */
+
+#ifdef JS_DHASHMETER
+# if defined MOZILLA_CLIENT && defined DEBUG_XXXbrendan
+# include "nsTraceMalloc.h"
+# endif
+# define METER(x) x
+#else
+# define METER(x) /* nothing */
+#endif
+
+/*
+ * The following DEBUG-only code is used to assert that calls to one of
+ * table->ops or to an enumerator do not cause re-entry into a call that
+ * can mutate the table. The recursion level is stored in additional
+ * space allocated at the end of the entry store to avoid changing
+ * JSDHashTable, which could cause issues when mixing DEBUG and
+ * non-DEBUG components.
+ */
+#ifdef DEBUG
+
+#define RECURSION_LEVEL(table_) (*(uint32*)(table_->entryStore + \
+ JS_DHASH_TABLE_SIZE(table_) * \
+ table_->entrySize))
+
+#define ENTRY_STORE_EXTRA sizeof(uint32)
+#define INCREMENT_RECURSION_LEVEL(table_) (++RECURSION_LEVEL(table_))
+#define DECREMENT_RECURSION_LEVEL(table_) (--RECURSION_LEVEL(table_))
+
+#else
+
+#define ENTRY_STORE_EXTRA 0
+#define INCREMENT_RECURSION_LEVEL(table_) ((void)1)
+#define DECREMENT_RECURSION_LEVEL(table_) ((void)0)
+
+#endif /* defined(DEBUG) */
+
+JS_PUBLIC_API(void *)
+JS_DHashAllocTable(JSDHashTable *table, uint32 nbytes)
+{
+ return malloc(nbytes);
+}
+
+JS_PUBLIC_API(void)
+JS_DHashFreeTable(JSDHashTable *table, void *ptr)
+{
+ free(ptr);
+}
+
+JS_PUBLIC_API(JSDHashNumber)
+JS_DHashStringKey(JSDHashTable *table, const void *key)
+{
+ JSDHashNumber h;
+ const unsigned char *s;
+
+ h = 0;
+ for (s = key; *s != '\0'; s++)
+ h = (h >> (JS_DHASH_BITS - 4)) ^ (h << 4) ^ *s;
+ return h;
+}
+
+JS_PUBLIC_API(const void *)
+JS_DHashGetKeyStub(JSDHashTable *table, JSDHashEntryHdr *entry)
+{
+ JSDHashEntryStub *stub = (JSDHashEntryStub *)entry;
+
+ return stub->key;
+}
+
+JS_PUBLIC_API(JSDHashNumber)
+JS_DHashVoidPtrKeyStub(JSDHashTable *table, const void *key)
+{
+ return (JSDHashNumber)(unsigned long)key >> 2;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DHashMatchEntryStub(JSDHashTable *table,
+ const JSDHashEntryHdr *entry,
+ const void *key)
+{
+ const JSDHashEntryStub *stub = (const JSDHashEntryStub *)entry;
+
+ return stub->key == key;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DHashMatchStringKey(JSDHashTable *table,
+ const JSDHashEntryHdr *entry,
+ const void *key)
+{
+ const JSDHashEntryStub *stub = (const JSDHashEntryStub *)entry;
+
+ /* XXX tolerate null keys on account of sloppy Mozilla callers. */
+ return stub->key == key ||
+ (stub->key && key && strcmp(stub->key, key) == 0);
+}
+
+JS_PUBLIC_API(void)
+JS_DHashMoveEntryStub(JSDHashTable *table,
+ const JSDHashEntryHdr *from,
+ JSDHashEntryHdr *to)
+{
+ memcpy(to, from, table->entrySize);
+}
+
+JS_PUBLIC_API(void)
+JS_DHashClearEntryStub(JSDHashTable *table, JSDHashEntryHdr *entry)
+{
+ memset(entry, 0, table->entrySize);
+}
+
+JS_PUBLIC_API(void)
+JS_DHashFreeStringKey(JSDHashTable *table, JSDHashEntryHdr *entry)
+{
+ const JSDHashEntryStub *stub = (const JSDHashEntryStub *)entry;
+
+ free((void *) stub->key);
+ memset(entry, 0, table->entrySize);
+}
+
+JS_PUBLIC_API(void)
+JS_DHashFinalizeStub(JSDHashTable *table)
+{
+}
+
+static const JSDHashTableOps stub_ops = {
+ JS_DHashAllocTable,
+ JS_DHashFreeTable,
+ JS_DHashGetKeyStub,
+ JS_DHashVoidPtrKeyStub,
+ JS_DHashMatchEntryStub,
+ JS_DHashMoveEntryStub,
+ JS_DHashClearEntryStub,
+ JS_DHashFinalizeStub,
+ NULL
+};
+
+JS_PUBLIC_API(const JSDHashTableOps *)
+JS_DHashGetStubOps(void)
+{
+ return &stub_ops;
+}
+
+JS_PUBLIC_API(JSDHashTable *)
+JS_NewDHashTable(const JSDHashTableOps *ops, void *data, uint32 entrySize,
+ uint32 capacity)
+{
+ JSDHashTable *table;
+
+ table = (JSDHashTable *) malloc(sizeof *table);
+ if (!table)
+ return NULL;
+ if (!JS_DHashTableInit(table, ops, data, entrySize, capacity)) {
+ free(table);
+ return NULL;
+ }
+ return table;
+}
+
+JS_PUBLIC_API(void)
+JS_DHashTableDestroy(JSDHashTable *table)
+{
+ JS_DHashTableFinish(table);
+ free(table);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_DHashTableInit(JSDHashTable *table, const JSDHashTableOps *ops, void *data,
+ uint32 entrySize, uint32 capacity)
+{
+ int log2;
+ uint32 nbytes;
+
+#ifdef DEBUG
+ if (entrySize > 10 * sizeof(void *)) {
+ fprintf(stderr,
+ "jsdhash: for the table at address %p, the given entrySize"
+ " of %lu %s favors chaining over double hashing.\n",
+ (void *)table,
+ (unsigned long) entrySize,
+ (entrySize > 16 * sizeof(void*)) ? "definitely" : "probably");
+ }
+#endif
+
+ table->ops = ops;
+ table->data = data;
+ if (capacity < JS_DHASH_MIN_SIZE)
+ capacity = JS_DHASH_MIN_SIZE;
+
+ JS_CEILING_LOG2(log2, capacity);
+
+ capacity = JS_BIT(log2);
+ if (capacity >= JS_DHASH_SIZE_LIMIT)
+ return JS_FALSE;
+ table->hashShift = JS_DHASH_BITS - log2;
+ table->maxAlphaFrac = 0xC0; /* .75 */
+ table->minAlphaFrac = 0x40; /* .25 */
+ table->entrySize = entrySize;
+ table->entryCount = table->removedCount = 0;
+ table->generation = 0;
+ nbytes = capacity * entrySize;
+
+ table->entryStore = ops->allocTable(table, nbytes + ENTRY_STORE_EXTRA);
+ if (!table->entryStore)
+ return JS_FALSE;
+ memset(table->entryStore, 0, nbytes);
+ METER(memset(&table->stats, 0, sizeof table->stats));
+
+#ifdef DEBUG
+ RECURSION_LEVEL(table) = 0;
+#endif
+
+ return JS_TRUE;
+}
+
+/*
+ * Compute max and min load numbers (entry counts) from table params.
+ */
+#define MAX_LOAD(table, size) (((table)->maxAlphaFrac * (size)) >> 8)
+#define MIN_LOAD(table, size) (((table)->minAlphaFrac * (size)) >> 8)
+
+JS_PUBLIC_API(void)
+JS_DHashTableSetAlphaBounds(JSDHashTable *table,
+ float maxAlpha,
+ float minAlpha)
+{
+ uint32 size;
+
+ /*
+ * Reject obviously insane bounds, rather than trying to guess what the
+ * buggy caller intended.
+ */
+ JS_ASSERT(0.5 <= maxAlpha && maxAlpha < 1 && 0 <= minAlpha);
+ if (maxAlpha < 0.5 || 1 <= maxAlpha || minAlpha < 0)
+ return;
+
+ /*
+ * Ensure that at least one entry will always be free. If maxAlpha at
+ * minimum size leaves no entries free, reduce maxAlpha based on minimum
+ * size and the precision limit of maxAlphaFrac's fixed point format.
+ */
+ JS_ASSERT(JS_DHASH_MIN_SIZE - (maxAlpha * JS_DHASH_MIN_SIZE) >= 1);
+ if (JS_DHASH_MIN_SIZE - (maxAlpha * JS_DHASH_MIN_SIZE) < 1) {
+ maxAlpha = (float)
+ (JS_DHASH_MIN_SIZE - JS_MAX(JS_DHASH_MIN_SIZE / 256, 1))
+ / JS_DHASH_MIN_SIZE;
+ }
+
+ /*
+ * Ensure that minAlpha is strictly less than half maxAlpha. Take care
+ * not to truncate an entry's worth of alpha when storing in minAlphaFrac
+ * (8-bit fixed point format).
+ */
+ JS_ASSERT(minAlpha < maxAlpha / 2);
+ if (minAlpha >= maxAlpha / 2) {
+ size = JS_DHASH_TABLE_SIZE(table);
+ minAlpha = (size * maxAlpha - JS_MAX(size / 256, 1)) / (2 * size);
+ }
+
+ table->maxAlphaFrac = (uint8)(maxAlpha * 256);
+ table->minAlphaFrac = (uint8)(minAlpha * 256);
+}
+
+/*
+ * Double hashing needs the second hash code to be relatively prime to table
+ * size, so we simply make hash2 odd.
+ */
+#define HASH1(hash0, shift) ((hash0) >> (shift))
+#define HASH2(hash0,log2,shift) ((((hash0) << (log2)) >> (shift)) | 1)
+
+/*
+ * Reserve keyHash 0 for free entries and 1 for removed-entry sentinels. Note
+ * that a removed-entry sentinel need be stored only if the removed entry had
+ * a colliding entry added after it. Therefore we can use 1 as the collision
+ * flag in addition to the removed-entry sentinel value. Multiplicative hash
+ * uses the high order bits of keyHash, so this least-significant reservation
+ * should not hurt the hash function's effectiveness much.
+ *
+ * If you change any of these magic numbers, also update JS_DHASH_ENTRY_IS_LIVE
+ * in jsdhash.h. It used to be private to jsdhash.c, but then became public to
+ * assist iterator writers who inspect table->entryStore directly.
+ */
+#define COLLISION_FLAG ((JSDHashNumber) 1)
+#define MARK_ENTRY_FREE(entry) ((entry)->keyHash = 0)
+#define MARK_ENTRY_REMOVED(entry) ((entry)->keyHash = 1)
+#define ENTRY_IS_REMOVED(entry) ((entry)->keyHash == 1)
+#define ENTRY_IS_LIVE(entry) JS_DHASH_ENTRY_IS_LIVE(entry)
+#define ENSURE_LIVE_KEYHASH(hash0) if (hash0 < 2) hash0 -= 2; else (void)0
+
+/* Match an entry's keyHash against an unstored one computed from a key. */
+#define MATCH_ENTRY_KEYHASH(entry,hash0) \
+ (((entry)->keyHash & ~COLLISION_FLAG) == (hash0))
+
+/* Compute the address of the indexed entry in table. */
+#define ADDRESS_ENTRY(table, index) \
+ ((JSDHashEntryHdr *)((table)->entryStore + (index) * (table)->entrySize))
+
+JS_PUBLIC_API(void)
+JS_DHashTableFinish(JSDHashTable *table)
+{
+ char *entryAddr, *entryLimit;
+ uint32 entrySize;
+ JSDHashEntryHdr *entry;
+
+#ifdef DEBUG_XXXbrendan
+ static FILE *dumpfp = NULL;
+ if (!dumpfp) dumpfp = fopen("/tmp/jsdhash.bigdump", "w");
+ if (dumpfp) {
+#ifdef MOZILLA_CLIENT
+ NS_TraceStack(1, dumpfp);
+#endif
+ JS_DHashTableDumpMeter(table, NULL, dumpfp);
+ fputc('\n', dumpfp);
+ }
+#endif
+
+ INCREMENT_RECURSION_LEVEL(table);
+
+ /* Call finalize before clearing entries, so it can enumerate them. */
+ table->ops->finalize(table);
+
+ /* Clear any remaining live entries. */
+ entryAddr = table->entryStore;
+ entrySize = table->entrySize;
+ entryLimit = entryAddr + JS_DHASH_TABLE_SIZE(table) * entrySize;
+ while (entryAddr < entryLimit) {
+ entry = (JSDHashEntryHdr *)entryAddr;
+ if (ENTRY_IS_LIVE(entry)) {
+ METER(table->stats.removeEnums++);
+ table->ops->clearEntry(table, entry);
+ }
+ entryAddr += entrySize;
+ }
+
+ DECREMENT_RECURSION_LEVEL(table);
+ JS_ASSERT(RECURSION_LEVEL(table) == 0);
+
+ /* Free entry storage last. */
+ table->ops->freeTable(table, table->entryStore);
+}
+
+static JSDHashEntryHdr * JS_DHASH_FASTCALL
+SearchTable(JSDHashTable *table, const void *key, JSDHashNumber keyHash,
+ JSDHashOperator op)
+{
+ JSDHashNumber hash1, hash2;
+ int hashShift, sizeLog2;
+ JSDHashEntryHdr *entry, *firstRemoved;
+ JSDHashMatchEntry matchEntry;
+ uint32 sizeMask;
+
+ METER(table->stats.searches++);
+ JS_ASSERT(!(keyHash & COLLISION_FLAG));
+
+ /* Compute the primary hash address. */
+ hashShift = table->hashShift;
+ hash1 = HASH1(keyHash, hashShift);
+ entry = ADDRESS_ENTRY(table, hash1);
+
+ /* Miss: return space for a new entry. */
+ if (JS_DHASH_ENTRY_IS_FREE(entry)) {
+ METER(table->stats.misses++);
+ return entry;
+ }
+
+ /* Hit: return entry. */
+ matchEntry = table->ops->matchEntry;
+ if (MATCH_ENTRY_KEYHASH(entry, keyHash) && matchEntry(table, entry, key)) {
+ METER(table->stats.hits++);
+ return entry;
+ }
+
+ /* Collision: double hash. */
+ sizeLog2 = JS_DHASH_BITS - table->hashShift;
+ hash2 = HASH2(keyHash, sizeLog2, hashShift);
+ sizeMask = JS_BITMASK(sizeLog2);
+
+ /* Save the first removed entry pointer so JS_DHASH_ADD can recycle it. */
+ if (ENTRY_IS_REMOVED(entry)) {
+ firstRemoved = entry;
+ } else {
+ firstRemoved = NULL;
+ if (op == JS_DHASH_ADD)
+ entry->keyHash |= COLLISION_FLAG;
+ }
+
+ for (;;) {
+ METER(table->stats.steps++);
+ hash1 -= hash2;
+ hash1 &= sizeMask;
+
+ entry = ADDRESS_ENTRY(table, hash1);
+ if (JS_DHASH_ENTRY_IS_FREE(entry)) {
+ METER(table->stats.misses++);
+ return (firstRemoved && op == JS_DHASH_ADD) ? firstRemoved : entry;
+ }
+
+ if (MATCH_ENTRY_KEYHASH(entry, keyHash) &&
+ matchEntry(table, entry, key)) {
+ METER(table->stats.hits++);
+ return entry;
+ }
+
+ if (ENTRY_IS_REMOVED(entry)) {
+ if (!firstRemoved)
+ firstRemoved = entry;
+ } else {
+ if (op == JS_DHASH_ADD)
+ entry->keyHash |= COLLISION_FLAG;
+ }
+ }
+
+ /* NOTREACHED */
+ return NULL;
+}
+
+static JSBool
+ChangeTable(JSDHashTable *table, int deltaLog2)
+{
+ int oldLog2, newLog2;
+ uint32 oldCapacity, newCapacity;
+ char *newEntryStore, *oldEntryStore, *oldEntryAddr;
+ uint32 entrySize, i, nbytes;
+ JSDHashEntryHdr *oldEntry, *newEntry;
+ JSDHashGetKey getKey;
+ JSDHashMoveEntry moveEntry;
+#ifdef DEBUG
+ uint32 recursionLevel;
+#endif
+
+ /* Look, but don't touch, until we succeed in getting new entry store. */
+ oldLog2 = JS_DHASH_BITS - table->hashShift;
+ newLog2 = oldLog2 + deltaLog2;
+ oldCapacity = JS_BIT(oldLog2);
+ newCapacity = JS_BIT(newLog2);
+ if (newCapacity >= JS_DHASH_SIZE_LIMIT)
+ return JS_FALSE;
+ entrySize = table->entrySize;
+ nbytes = newCapacity * entrySize;
+
+ newEntryStore = table->ops->allocTable(table, nbytes + ENTRY_STORE_EXTRA);
+ if (!newEntryStore)
+ return JS_FALSE;
+
+ /* We can't fail from here on, so update table parameters. */
+#ifdef DEBUG
+ recursionLevel = RECURSION_LEVEL(table);
+#endif
+ table->hashShift = JS_DHASH_BITS - newLog2;
+ table->removedCount = 0;
+ table->generation++;
+
+ /* Assign the new entry store to table. */
+ memset(newEntryStore, 0, nbytes);
+ oldEntryAddr = oldEntryStore = table->entryStore;
+ table->entryStore = newEntryStore;
+ getKey = table->ops->getKey;
+ moveEntry = table->ops->moveEntry;
+#ifdef DEBUG
+ RECURSION_LEVEL(table) = recursionLevel;
+#endif
+
+ /* Copy only live entries, leaving removed ones behind. */
+ for (i = 0; i < oldCapacity; i++) {
+ oldEntry = (JSDHashEntryHdr *)oldEntryAddr;
+ if (ENTRY_IS_LIVE(oldEntry)) {
+ oldEntry->keyHash &= ~COLLISION_FLAG;
+ newEntry = SearchTable(table, getKey(table, oldEntry),
+ oldEntry->keyHash, JS_DHASH_ADD);
+ JS_ASSERT(JS_DHASH_ENTRY_IS_FREE(newEntry));
+ moveEntry(table, oldEntry, newEntry);
+ newEntry->keyHash = oldEntry->keyHash;
+ }
+ oldEntryAddr += entrySize;
+ }
+
+ table->ops->freeTable(table, oldEntryStore);
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSDHashEntryHdr *) JS_DHASH_FASTCALL
+JS_DHashTableOperate(JSDHashTable *table, const void *key, JSDHashOperator op)
+{
+ JSDHashNumber keyHash;
+ JSDHashEntryHdr *entry;
+ uint32 size;
+ int deltaLog2;
+
+ JS_ASSERT(op == JS_DHASH_LOOKUP || RECURSION_LEVEL(table) == 0);
+ INCREMENT_RECURSION_LEVEL(table);
+
+ keyHash = table->ops->hashKey(table, key);
+ keyHash *= JS_DHASH_GOLDEN_RATIO;
+
+ /* Avoid 0 and 1 hash codes, they indicate free and removed entries. */
+ ENSURE_LIVE_KEYHASH(keyHash);
+ keyHash &= ~COLLISION_FLAG;
+
+ switch (op) {
+ case JS_DHASH_LOOKUP:
+ METER(table->stats.lookups++);
+ entry = SearchTable(table, key, keyHash, op);
+ break;
+
+ case JS_DHASH_ADD:
+ /*
+ * If alpha is >= .75, grow or compress the table. If key is already
+ * in the table, we may grow once more than necessary, but only if we
+ * are on the edge of being overloaded.
+ */
+ size = JS_DHASH_TABLE_SIZE(table);
+ if (table->entryCount + table->removedCount >= MAX_LOAD(table, size)) {
+ /* Compress if a quarter or more of all entries are removed. */
+ if (table->removedCount >= size >> 2) {
+ METER(table->stats.compresses++);
+ deltaLog2 = 0;
+ } else {
+ METER(table->stats.grows++);
+ deltaLog2 = 1;
+ }
+
+ /*
+ * Grow or compress table, returning null if ChangeTable fails and
+ * falling through might claim the last free entry.
+ */
+ if (!ChangeTable(table, deltaLog2) &&
+ table->entryCount + table->removedCount == size - 1) {
+ METER(table->stats.addFailures++);
+ entry = NULL;
+ break;
+ }
+ }
+
+ /*
+ * Look for entry after possibly growing, so we don't have to add it,
+ * then skip it while growing the table and re-add it after.
+ */
+ entry = SearchTable(table, key, keyHash, op);
+ if (!ENTRY_IS_LIVE(entry)) {
+ /* Initialize the entry, indicating that it's no longer free. */
+ METER(table->stats.addMisses++);
+ if (ENTRY_IS_REMOVED(entry)) {
+ METER(table->stats.addOverRemoved++);
+ table->removedCount--;
+ keyHash |= COLLISION_FLAG;
+ }
+ if (table->ops->initEntry &&
+ !table->ops->initEntry(table, entry, key)) {
+ /* We haven't claimed entry yet; fail with null return. */
+ memset(entry + 1, 0, table->entrySize - sizeof *entry);
+ entry = NULL;
+ break;
+ }
+ entry->keyHash = keyHash;
+ table->entryCount++;
+ }
+ METER(else table->stats.addHits++);
+ break;
+
+ case JS_DHASH_REMOVE:
+ entry = SearchTable(table, key, keyHash, op);
+ if (ENTRY_IS_LIVE(entry)) {
+ /* Clear this entry and mark it as "removed". */
+ METER(table->stats.removeHits++);
+ JS_DHashTableRawRemove(table, entry);
+
+ /* Shrink if alpha is <= .25 and table isn't too small already. */
+ size = JS_DHASH_TABLE_SIZE(table);
+ if (size > JS_DHASH_MIN_SIZE &&
+ table->entryCount <= MIN_LOAD(table, size)) {
+ METER(table->stats.shrinks++);
+ (void) ChangeTable(table, -1);
+ }
+ }
+ METER(else table->stats.removeMisses++);
+ entry = NULL;
+ break;
+
+ default:
+ JS_ASSERT(0);
+ entry = NULL;
+ }
+
+ DECREMENT_RECURSION_LEVEL(table);
+
+ return entry;
+}
+
+JS_PUBLIC_API(void)
+JS_DHashTableRawRemove(JSDHashTable *table, JSDHashEntryHdr *entry)
+{
+ JSDHashNumber keyHash; /* load first in case clearEntry goofs it */
+
+ JS_ASSERT(JS_DHASH_ENTRY_IS_LIVE(entry));
+ keyHash = entry->keyHash;
+ table->ops->clearEntry(table, entry);
+ if (keyHash & COLLISION_FLAG) {
+ MARK_ENTRY_REMOVED(entry);
+ table->removedCount++;
+ } else {
+ METER(table->stats.removeFrees++);
+ MARK_ENTRY_FREE(entry);
+ }
+ table->entryCount--;
+}
+
+JS_PUBLIC_API(uint32)
+JS_DHashTableEnumerate(JSDHashTable *table, JSDHashEnumerator etor, void *arg)
+{
+ char *entryAddr, *entryLimit;
+ uint32 i, capacity, entrySize, ceiling;
+ JSBool didRemove;
+ JSDHashEntryHdr *entry;
+ JSDHashOperator op;
+
+ INCREMENT_RECURSION_LEVEL(table);
+
+ entryAddr = table->entryStore;
+ entrySize = table->entrySize;
+ capacity = JS_DHASH_TABLE_SIZE(table);
+ entryLimit = entryAddr + capacity * entrySize;
+ i = 0;
+ didRemove = JS_FALSE;
+ while (entryAddr < entryLimit) {
+ entry = (JSDHashEntryHdr *)entryAddr;
+ if (ENTRY_IS_LIVE(entry)) {
+ op = etor(table, entry, i++, arg);
+ if (op & JS_DHASH_REMOVE) {
+ METER(table->stats.removeEnums++);
+ JS_DHashTableRawRemove(table, entry);
+ didRemove = JS_TRUE;
+ }
+ if (op & JS_DHASH_STOP)
+ break;
+ }
+ entryAddr += entrySize;
+ }
+
+ JS_ASSERT(!didRemove || RECURSION_LEVEL(table) == 1);
+
+ /*
+ * Shrink or compress if a quarter or more of all entries are removed, or
+ * if the table is underloaded according to the configured minimum alpha,
+ * and is not minimal-size already. Do this only if we removed above, so
+ * non-removing enumerations can count on stable table->entryStore until
+ * the next non-lookup-Operate or removing-Enumerate.
+ */
+ if (didRemove &&
+ (table->removedCount >= capacity >> 2 ||
+ (capacity > JS_DHASH_MIN_SIZE &&
+ table->entryCount <= MIN_LOAD(table, capacity)))) {
+ METER(table->stats.enumShrinks++);
+ capacity = table->entryCount;
+ capacity += capacity >> 1;
+ if (capacity < JS_DHASH_MIN_SIZE)
+ capacity = JS_DHASH_MIN_SIZE;
+
+ JS_CEILING_LOG2(ceiling, capacity);
+ ceiling -= JS_DHASH_BITS - table->hashShift;
+
+ (void) ChangeTable(table, ceiling);
+ }
+
+ DECREMENT_RECURSION_LEVEL(table);
+
+ return i;
+}
+
+#ifdef JS_DHASHMETER
+#include <math.h>
+
+JS_PUBLIC_API(void)
+JS_DHashTableDumpMeter(JSDHashTable *table, JSDHashEnumerator dump, FILE *fp)
+{
+ char *entryAddr;
+ uint32 entrySize, entryCount;
+ int hashShift, sizeLog2;
+ uint32 i, tableSize, sizeMask, chainLen, maxChainLen, chainCount;
+ JSDHashNumber hash1, hash2, saveHash1, maxChainHash1, maxChainHash2;
+ double sqsum, mean, variance, sigma;
+ JSDHashEntryHdr *entry, *probe;
+
+ entryAddr = table->entryStore;
+ entrySize = table->entrySize;
+ hashShift = table->hashShift;
+ sizeLog2 = JS_DHASH_BITS - hashShift;
+ tableSize = JS_DHASH_TABLE_SIZE(table);
+ sizeMask = JS_BITMASK(sizeLog2);
+ chainCount = maxChainLen = 0;
+ hash2 = 0;
+ sqsum = 0;
+
+ for (i = 0; i < tableSize; i++) {
+ entry = (JSDHashEntryHdr *)entryAddr;
+ entryAddr += entrySize;
+ if (!ENTRY_IS_LIVE(entry))
+ continue;
+ hash1 = HASH1(entry->keyHash & ~COLLISION_FLAG, hashShift);
+ saveHash1 = hash1;
+ probe = ADDRESS_ENTRY(table, hash1);
+ chainLen = 1;
+ if (probe == entry) {
+ /* Start of a (possibly unit-length) chain. */
+ chainCount++;
+ } else {
+ hash2 = HASH2(entry->keyHash & ~COLLISION_FLAG, sizeLog2,
+ hashShift);
+ do {
+ chainLen++;
+ hash1 -= hash2;
+ hash1 &= sizeMask;
+ probe = ADDRESS_ENTRY(table, hash1);
+ } while (probe != entry);
+ }
+ sqsum += chainLen * chainLen;
+ if (chainLen > maxChainLen) {
+ maxChainLen = chainLen;
+ maxChainHash1 = saveHash1;
+ maxChainHash2 = hash2;
+ }
+ }
+
+ entryCount = table->entryCount;
+ if (entryCount && chainCount) {
+ mean = (double)entryCount / chainCount;
+ variance = chainCount * sqsum - entryCount * entryCount;
+ if (variance < 0 || chainCount == 1)
+ variance = 0;
+ else
+ variance /= chainCount * (chainCount - 1);
+ sigma = sqrt(variance);
+ } else {
+ mean = sigma = 0;
+ }
+
+ fprintf(fp, "Double hashing statistics:\n");
+ fprintf(fp, " table size (in entries): %u\n", tableSize);
+ fprintf(fp, " number of entries: %u\n", table->entryCount);
+ fprintf(fp, " number of removed entries: %u\n", table->removedCount);
+ fprintf(fp, " number of searches: %u\n", table->stats.searches);
+ fprintf(fp, " number of hits: %u\n", table->stats.hits);
+ fprintf(fp, " number of misses: %u\n", table->stats.misses);
+ fprintf(fp, " mean steps per search: %g\n", table->stats.searches ?
+ (double)table->stats.steps
+ / table->stats.searches :
+ 0.);
+ fprintf(fp, " mean hash chain length: %g\n", mean);
+ fprintf(fp, " standard deviation: %g\n", sigma);
+ fprintf(fp, " maximum hash chain length: %u\n", maxChainLen);
+ fprintf(fp, " number of lookups: %u\n", table->stats.lookups);
+ fprintf(fp, " adds that made a new entry: %u\n", table->stats.addMisses);
+ fprintf(fp, "adds that recycled removeds: %u\n", table->stats.addOverRemoved);
+ fprintf(fp, " adds that found an entry: %u\n", table->stats.addHits);
+ fprintf(fp, " add failures: %u\n", table->stats.addFailures);
+ fprintf(fp, " useful removes: %u\n", table->stats.removeHits);
+ fprintf(fp, " useless removes: %u\n", table->stats.removeMisses);
+ fprintf(fp, "removes that freed an entry: %u\n", table->stats.removeFrees);
+ fprintf(fp, " removes while enumerating: %u\n", table->stats.removeEnums);
+ fprintf(fp, " number of grows: %u\n", table->stats.grows);
+ fprintf(fp, " number of shrinks: %u\n", table->stats.shrinks);
+ fprintf(fp, " number of compresses: %u\n", table->stats.compresses);
+ fprintf(fp, "number of enumerate shrinks: %u\n", table->stats.enumShrinks);
+
+ if (dump && maxChainLen && hash2) {
+ fputs("Maximum hash chain:\n", fp);
+ hash1 = maxChainHash1;
+ hash2 = maxChainHash2;
+ entry = ADDRESS_ENTRY(table, hash1);
+ i = 0;
+ do {
+ if (dump(table, entry, i++, fp) != JS_DHASH_NEXT)
+ break;
+ hash1 -= hash2;
+ hash1 &= sizeMask;
+ entry = ADDRESS_ENTRY(table, hash1);
+ } while (JS_DHASH_ENTRY_IS_BUSY(entry));
+ }
+}
+#endif /* JS_DHASHMETER */
diff --git a/src/third_party/js-1.7/jsdhash.h b/src/third_party/js-1.7/jsdhash.h
new file mode 100644
index 00000000000..76867e52a61
--- /dev/null
+++ b/src/third_party/js-1.7/jsdhash.h
@@ -0,0 +1,581 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla JavaScript code.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1999-2001
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * Brendan Eich <brendan@mozilla.org> (Original Author)
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsdhash_h___
+#define jsdhash_h___
+/*
+ * Double hashing, a la Knuth 6.
+ */
+#include "jstypes.h"
+
+JS_BEGIN_EXTERN_C
+
+#if defined(__GNUC__) && defined(__i386__) && (__GNUC__ >= 3) && !defined(XP_OS2)
+#define JS_DHASH_FASTCALL __attribute__ ((regparm (3),stdcall))
+#elif defined(XP_WIN)
+#define JS_DHASH_FASTCALL __fastcall
+#else
+#define JS_DHASH_FASTCALL
+#endif
+
+#ifdef DEBUG_XXXbrendan
+#define JS_DHASHMETER 1
+#endif
+
+/* Table size limit, do not equal or exceed (see min&maxAlphaFrac, below). */
+#undef JS_DHASH_SIZE_LIMIT
+#define JS_DHASH_SIZE_LIMIT JS_BIT(24)
+
+/* Minimum table size, or gross entry count (net is at most .75 loaded). */
+#ifndef JS_DHASH_MIN_SIZE
+#define JS_DHASH_MIN_SIZE 16
+#elif (JS_DHASH_MIN_SIZE & (JS_DHASH_MIN_SIZE - 1)) != 0
+#error "JS_DHASH_MIN_SIZE must be a power of two!"
+#endif
+
+/*
+ * Multiplicative hash uses an unsigned 32 bit integer and the golden ratio,
+ * expressed as a fixed-point 32-bit fraction.
+ */
+#define JS_DHASH_BITS 32
+#define JS_DHASH_GOLDEN_RATIO 0x9E3779B9U
+
+/* Primitive and forward-struct typedefs. */
+typedef uint32 JSDHashNumber;
+typedef struct JSDHashEntryHdr JSDHashEntryHdr;
+typedef struct JSDHashEntryStub JSDHashEntryStub;
+typedef struct JSDHashTable JSDHashTable;
+typedef struct JSDHashTableOps JSDHashTableOps;
+
+/*
+ * Table entry header structure.
+ *
+ * In order to allow in-line allocation of key and value, we do not declare
+ * either here. Instead, the API uses const void *key as a formal parameter,
+ * and asks each entry for its key when necessary via a getKey callback, used
+ * when growing or shrinking the table. Other callback types are defined
+ * below and grouped into the JSDHashTableOps structure, for single static
+ * initialization per hash table sub-type.
+ *
+ * Each hash table sub-type should nest the JSDHashEntryHdr structure at the
+ * front of its particular entry type. The keyHash member contains the result
+ * of multiplying the hash code returned from the hashKey callback (see below)
+ * by JS_DHASH_GOLDEN_RATIO, then constraining the result to avoid the magic 0
+ * and 1 values. The stored keyHash value is table size invariant, and it is
+ * maintained automatically by JS_DHashTableOperate -- users should never set
+ * it, and its only uses should be via the entry macros below.
+ *
+ * The JS_DHASH_ENTRY_IS_LIVE macro tests whether entry is neither free nor
+ * removed. An entry may be either busy or free; if busy, it may be live or
+ * removed. Consumers of this API should not access members of entries that
+ * are not live.
+ *
+ * However, use JS_DHASH_ENTRY_IS_BUSY for faster liveness testing of entries
+ * returned by JS_DHashTableOperate, as JS_DHashTableOperate never returns a
+ * non-live, busy (i.e., removed) entry pointer to its caller. See below for
+ * more details on JS_DHashTableOperate's calling rules.
+ */
+struct JSDHashEntryHdr {
+ JSDHashNumber keyHash; /* every entry must begin like this */
+};
+
+#define JS_DHASH_ENTRY_IS_FREE(entry) ((entry)->keyHash == 0)
+#define JS_DHASH_ENTRY_IS_BUSY(entry) (!JS_DHASH_ENTRY_IS_FREE(entry))
+#define JS_DHASH_ENTRY_IS_LIVE(entry) ((entry)->keyHash >= 2)
+
+/*
+ * A JSDHashTable is currently 8 words (without the JS_DHASHMETER overhead)
+ * on most architectures, and may be allocated on the stack or within another
+ * structure or class (see below for the Init and Finish functions to use).
+ *
+ * To decide whether to use double hashing vs. chaining, we need to develop a
+ * trade-off relation, as follows:
+ *
+ * Let alpha be the load factor, esize the entry size in words, count the
+ * entry count, and pow2 the power-of-two table size in entries.
+ *
+ * (JSDHashTable overhead) > (JSHashTable overhead)
+ * (unused table entry space) > (malloc and .next overhead per entry) +
+ * (buckets overhead)
+ * (1 - alpha) * esize * pow2 > 2 * count + pow2
+ *
+ * Notice that alpha is by definition (count / pow2):
+ *
+ * (1 - alpha) * esize * pow2 > 2 * alpha * pow2 + pow2
+ * (1 - alpha) * esize > 2 * alpha + 1
+ *
+ * esize > (1 + 2 * alpha) / (1 - alpha)
+ *
+ * This assumes both tables must keep keyHash, key, and value for each entry,
+ * where key and value point to separately allocated strings or structures.
+ * If key and value can be combined into one pointer, then the trade-off is:
+ *
+ * esize > (1 + 3 * alpha) / (1 - alpha)
+ *
+ * If the entry value can be a subtype of JSDHashEntryHdr, rather than a type
+ * that must be allocated separately and referenced by an entry.value pointer
+ * member, and provided key's allocation can be fused with its entry's, then
+ * k (the words wasted per entry with chaining) is 4.
+ *
+ * To see these curves, feed gnuplot input like so:
+ *
+ * gnuplot> f(x,k) = (1 + k * x) / (1 - x)
+ * gnuplot> plot [0:.75] f(x,2), f(x,3), f(x,4)
+ *
+ * For k of 2 and a well-loaded table (alpha > .5), esize must be more than 4
+ * words for chaining to be more space-efficient than double hashing.
+ *
+ * Solving for alpha helps us decide when to shrink an underloaded table:
+ *
+ * esize > (1 + k * alpha) / (1 - alpha)
+ * esize - alpha * esize > 1 + k * alpha
+ * esize - 1 > (k + esize) * alpha
+ * (esize - 1) / (k + esize) > alpha
+ *
+ * alpha < (esize - 1) / (esize + k)
+ *
+ * Therefore double hashing should keep alpha >= (esize - 1) / (esize + k),
+ * assuming esize is not too large (in which case, chaining should probably be
+ * used for any alpha). For esize=2 and k=3, we want alpha >= .2; for esize=3
+ * and k=2, we want alpha >= .4. For k=4, esize could be 6, and alpha >= .5
+ * would still obtain. See the JS_DHASH_MIN_ALPHA macro further below.
+ *
+ * The current implementation uses a configurable lower bound on alpha, which
+ * defaults to .25, when deciding to shrink the table (while still respecting
+ * JS_DHASH_MIN_SIZE).
+ *
+ * Note a qualitative difference between chaining and double hashing: under
+ * chaining, entry addresses are stable across table shrinks and grows. With
+ * double hashing, you can't safely hold an entry pointer and use it after an
+ * ADD or REMOVE operation, unless you sample table->generation before adding
+ * or removing, and compare the sample after, dereferencing the entry pointer
+ * only if table->generation has not changed.
+ *
+ * The moral of this story: there is no one-size-fits-all hash table scheme,
+ * but for small table entry size, and assuming entry address stability is not
+ * required, double hashing wins.
+ */
+struct JSDHashTable {
+ const JSDHashTableOps *ops; /* virtual operations, see below */
+ void *data; /* ops- and instance-specific data */
+ int16 hashShift; /* multiplicative hash shift */
+ uint8 maxAlphaFrac; /* 8-bit fixed point max alpha */
+ uint8 minAlphaFrac; /* 8-bit fixed point min alpha */
+ uint32 entrySize; /* number of bytes in an entry */
+ uint32 entryCount; /* number of entries in table */
+ uint32 removedCount; /* removed entry sentinels in table */
+ uint32 generation; /* entry storage generation number */
+ char *entryStore; /* entry storage */
+#ifdef JS_DHASHMETER
+ struct JSDHashStats {
+ uint32 searches; /* total number of table searches */
+ uint32 steps; /* hash chain links traversed */
+ uint32 hits; /* searches that found key */
+ uint32 misses; /* searches that didn't find key */
+ uint32 lookups; /* number of JS_DHASH_LOOKUPs */
+ uint32 addMisses; /* adds that miss, and do work */
+ uint32 addOverRemoved; /* adds that recycled a removed entry */
+ uint32 addHits; /* adds that hit an existing entry */
+ uint32 addFailures; /* out-of-memory during add growth */
+ uint32 removeHits; /* removes that hit, and do work */
+ uint32 removeMisses; /* useless removes that miss */
+ uint32 removeFrees; /* removes that freed entry directly */
+ uint32 removeEnums; /* removes done by Enumerate */
+ uint32 grows; /* table expansions */
+ uint32 shrinks; /* table contractions */
+ uint32 compresses; /* table compressions */
+ uint32 enumShrinks; /* contractions after Enumerate */
+ } stats;
+#endif
+};
+
+/*
+ * Size in entries (gross, not net of free and removed sentinels) for table.
+ * We store hashShift rather than sizeLog2 to optimize the collision-free case
+ * in SearchTable.
+ */
+#define JS_DHASH_TABLE_SIZE(table) JS_BIT(JS_DHASH_BITS - (table)->hashShift)
+
+/*
+ * Table space at entryStore is allocated and freed using these callbacks.
+ * The allocator should return null on error only (not if called with nbytes
+ * equal to 0; but note that jsdhash.c code will never call with 0 nbytes).
+ */
+typedef void *
+(* JS_DLL_CALLBACK JSDHashAllocTable)(JSDHashTable *table, uint32 nbytes);
+
+typedef void
+(* JS_DLL_CALLBACK JSDHashFreeTable) (JSDHashTable *table, void *ptr);
+
+/*
+ * When a table grows or shrinks, each entry is queried for its key using this
+ * callback. NB: in that event, entry is not in table any longer; it's in the
+ * old entryStore vector, which is due to be freed once all entries have been
+ * moved via moveEntry callbacks.
+ */
+typedef const void *
+(* JS_DLL_CALLBACK JSDHashGetKey) (JSDHashTable *table,
+ JSDHashEntryHdr *entry);
+
+/*
+ * Compute the hash code for a given key to be looked up, added, or removed
+ * from table. A hash code may have any JSDHashNumber value.
+ */
+typedef JSDHashNumber
+(* JS_DLL_CALLBACK JSDHashHashKey) (JSDHashTable *table, const void *key);
+
+/*
+ * Compare the key identifying entry in table with the provided key parameter.
+ * Return JS_TRUE if keys match, JS_FALSE otherwise.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSDHashMatchEntry)(JSDHashTable *table,
+ const JSDHashEntryHdr *entry,
+ const void *key);
+
+/*
+ * Copy the data starting at from to the new entry storage at to. Do not add
+ * reference counts for any strong references in the entry, however, as this
+ * is a "move" operation: the old entry storage at from will be freed without
+ * any reference-decrementing callback shortly.
+ */
+typedef void
+(* JS_DLL_CALLBACK JSDHashMoveEntry)(JSDHashTable *table,
+ const JSDHashEntryHdr *from,
+ JSDHashEntryHdr *to);
+
+/*
+ * Clear the entry and drop any strong references it holds. This callback is
+ * invoked during a JS_DHASH_REMOVE operation (see below for operation codes),
+ * but only if the given key is found in the table.
+ */
+typedef void
+(* JS_DLL_CALLBACK JSDHashClearEntry)(JSDHashTable *table,
+ JSDHashEntryHdr *entry);
+
+/*
+ * Called when a table (whether allocated dynamically by itself, or nested in
+ * a larger structure, or allocated on the stack) is finished. This callback
+ * allows table->ops-specific code to finalize table->data.
+ */
+typedef void
+(* JS_DLL_CALLBACK JSDHashFinalize) (JSDHashTable *table);
+
+/*
+ * Initialize a new entry, apart from keyHash. This function is called when
+ * JS_DHashTableOperate's JS_DHASH_ADD case finds no existing entry for the
+ * given key, and must add a new one. At that point, entry->keyHash is not
+ * set yet, to avoid claiming the last free entry in a severely overloaded
+ * table.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSDHashInitEntry)(JSDHashTable *table,
+ JSDHashEntryHdr *entry,
+ const void *key);
+
+/*
+ * Finally, the "vtable" structure for JSDHashTable. The first eight hooks
+ * must be provided by implementations; they're called unconditionally by the
+ * generic jsdhash.c code. Hooks after these may be null.
+ *
+ * Summary of allocation-related hook usage with C++ placement new emphasis:
+ * allocTable Allocate raw bytes with malloc, no ctors run.
+ * freeTable Free raw bytes with free, no dtors run.
+ * initEntry Call placement new using default key-based ctor.
+ * Return JS_TRUE on success, JS_FALSE on error.
+ * moveEntry Call placement new using copy ctor, run dtor on old
+ * entry storage.
+ * clearEntry Run dtor on entry.
+ * finalize Stub unless table->data was initialized and needs to
+ * be finalized.
+ *
+ * Note the reason why initEntry is optional: the default hooks (stubs) clear
+ * entry storage: On successful JS_DHashTableOperate(tbl, key, JS_DHASH_ADD),
+ * the returned entry pointer addresses an entry struct whose keyHash member
+ * has been set non-zero, but all other entry members are still clear (null).
+ * JS_DHASH_ADD callers can test such members to see whether the entry was
+ * newly created by the JS_DHASH_ADD call that just succeeded. If placement
+ * new or similar initialization is required, define an initEntry hook. Of
+ * course, the clearEntry hook must zero or null appropriately.
+ *
+ * XXX assumes 0 is null for pointer types.
+ */
+struct JSDHashTableOps {
+ /* Mandatory hooks. All implementations must provide these. */
+ JSDHashAllocTable allocTable;
+ JSDHashFreeTable freeTable;
+ JSDHashGetKey getKey;
+ JSDHashHashKey hashKey;
+ JSDHashMatchEntry matchEntry;
+ JSDHashMoveEntry moveEntry;
+ JSDHashClearEntry clearEntry;
+ JSDHashFinalize finalize;
+
+ /* Optional hooks start here. If null, these are not called. */
+ JSDHashInitEntry initEntry;
+};
+
+/*
+ * Default implementations for the above ops.
+ */
+extern JS_PUBLIC_API(void *)
+JS_DHashAllocTable(JSDHashTable *table, uint32 nbytes);
+
+extern JS_PUBLIC_API(void)
+JS_DHashFreeTable(JSDHashTable *table, void *ptr);
+
+extern JS_PUBLIC_API(JSDHashNumber)
+JS_DHashStringKey(JSDHashTable *table, const void *key);
+
+/* A minimal entry contains a keyHash header and a void key pointer. */
+struct JSDHashEntryStub {
+ JSDHashEntryHdr hdr;
+ const void *key;
+};
+
+extern JS_PUBLIC_API(const void *)
+JS_DHashGetKeyStub(JSDHashTable *table, JSDHashEntryHdr *entry);
+
+extern JS_PUBLIC_API(JSDHashNumber)
+JS_DHashVoidPtrKeyStub(JSDHashTable *table, const void *key);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DHashMatchEntryStub(JSDHashTable *table,
+ const JSDHashEntryHdr *entry,
+ const void *key);
+
+extern JS_PUBLIC_API(JSBool)
+JS_DHashMatchStringKey(JSDHashTable *table,
+ const JSDHashEntryHdr *entry,
+ const void *key);
+
+extern JS_PUBLIC_API(void)
+JS_DHashMoveEntryStub(JSDHashTable *table,
+ const JSDHashEntryHdr *from,
+ JSDHashEntryHdr *to);
+
+extern JS_PUBLIC_API(void)
+JS_DHashClearEntryStub(JSDHashTable *table, JSDHashEntryHdr *entry);
+
+extern JS_PUBLIC_API(void)
+JS_DHashFreeStringKey(JSDHashTable *table, JSDHashEntryHdr *entry);
+
+extern JS_PUBLIC_API(void)
+JS_DHashFinalizeStub(JSDHashTable *table);
+
+/*
+ * If you use JSDHashEntryStub or a subclass of it as your entry struct, and
+ * if your entries move via memcpy and clear via memset(0), you can use these
+ * stub operations.
+ */
+extern JS_PUBLIC_API(const JSDHashTableOps *)
+JS_DHashGetStubOps(void);
+
+/*
+ * Dynamically allocate a new JSDHashTable using malloc, initialize it using
+ * JS_DHashTableInit, and return its address. Return null on malloc failure.
+ * Note that the entry storage at table->entryStore will be allocated using
+ * the ops->allocTable callback.
+ */
+extern JS_PUBLIC_API(JSDHashTable *)
+JS_NewDHashTable(const JSDHashTableOps *ops, void *data, uint32 entrySize,
+ uint32 capacity);
+
+/*
+ * Finalize table's data, free its entry storage (via table->ops->freeTable),
+ * and return the memory starting at table to the malloc heap.
+ */
+extern JS_PUBLIC_API(void)
+JS_DHashTableDestroy(JSDHashTable *table);
+
+/*
+ * Initialize table with ops, data, entrySize, and capacity. Capacity is a
+ * guess for the smallest table size at which the table will usually be less
+ * than 75% loaded (the table will grow or shrink as needed; capacity serves
+ * only to avoid inevitable early growth from JS_DHASH_MIN_SIZE).
+ */
+extern JS_PUBLIC_API(JSBool)
+JS_DHashTableInit(JSDHashTable *table, const JSDHashTableOps *ops, void *data,
+ uint32 entrySize, uint32 capacity);
+
+/*
+ * Set maximum and minimum alpha for table. The defaults are 0.75 and .25.
+ * maxAlpha must be in [0.5, 0.9375] for the default JS_DHASH_MIN_SIZE; or if
+ * MinSize=JS_DHASH_MIN_SIZE <= 256, in [0.5, (float)(MinSize-1)/MinSize]; or
+ * else in [0.5, 255.0/256]. minAlpha must be in [0, maxAlpha / 2), so that
+ * we don't shrink on the very next remove after growing a table upon adding
+ * an entry that brings entryCount past maxAlpha * tableSize.
+ */
+extern JS_PUBLIC_API(void)
+JS_DHashTableSetAlphaBounds(JSDHashTable *table,
+ float maxAlpha,
+ float minAlpha);
+
+/*
+ * Call this macro with k, the number of pointer-sized words wasted per entry
+ * under chaining, to compute the minimum alpha at which double hashing still
+ * beats chaining.
+ */
+#define JS_DHASH_MIN_ALPHA(table, k) \
+ ((float)((table)->entrySize / sizeof(void *) - 1) \
+ / ((table)->entrySize / sizeof(void *) + (k)))
+
+/*
+ * Finalize table's data, free its entry storage using table->ops->freeTable,
+ * and leave its members unchanged from their last live values (which leaves
+ * pointers dangling). If you want to burn cycles clearing table, it's up to
+ * your code to call memset.
+ */
+extern JS_PUBLIC_API(void)
+JS_DHashTableFinish(JSDHashTable *table);
+
+/*
+ * To consolidate keyHash computation and table grow/shrink code, we use a
+ * single entry point for lookup, add, and remove operations. The operation
+ * codes are declared here, along with codes returned by JSDHashEnumerator
+ * functions, which control JS_DHashTableEnumerate's behavior.
+ */
+typedef enum JSDHashOperator {
+ JS_DHASH_LOOKUP = 0, /* lookup entry */
+ JS_DHASH_ADD = 1, /* add entry */
+ JS_DHASH_REMOVE = 2, /* remove entry, or enumerator says remove */
+ JS_DHASH_NEXT = 0, /* enumerator says continue */
+ JS_DHASH_STOP = 1 /* enumerator says stop */
+} JSDHashOperator;
+
+/*
+ * To lookup a key in table, call:
+ *
+ * entry = JS_DHashTableOperate(table, key, JS_DHASH_LOOKUP);
+ *
+ * If JS_DHASH_ENTRY_IS_BUSY(entry) is true, key was found and it identifies
+ * entry. If JS_DHASH_ENTRY_IS_FREE(entry) is true, key was not found.
+ *
+ * To add an entry identified by key to table, call:
+ *
+ * entry = JS_DHashTableOperate(table, key, JS_DHASH_ADD);
+ *
+ * If entry is null upon return, then either the table is severely overloaded,
+ * and memory can't be allocated for entry storage via table->ops->allocTable;
+ * Or if table->ops->initEntry is non-null, the table->ops->initEntry op may
+ * have returned false.
+ *
+ * Otherwise, entry->keyHash has been set so that JS_DHASH_ENTRY_IS_BUSY(entry)
+ * is true, and it is up to the caller to initialize the key and value parts
+ * of the entry sub-type, if they have not been set already (i.e. if entry was
+ * not already in the table, and if the optional initEntry hook was not used).
+ *
+ * To remove an entry identified by key from table, call:
+ *
+ * (void) JS_DHashTableOperate(table, key, JS_DHASH_REMOVE);
+ *
+ * If key's entry is found, it is cleared (via table->ops->clearEntry) and
+ * the entry is marked so that JS_DHASH_ENTRY_IS_FREE(entry). This operation
+ * returns null unconditionally; you should ignore its return value.
+ */
+extern JS_PUBLIC_API(JSDHashEntryHdr *) JS_DHASH_FASTCALL
+JS_DHashTableOperate(JSDHashTable *table, const void *key, JSDHashOperator op);
+
+/*
+ * Remove an entry already accessed via LOOKUP or ADD.
+ *
+ * NB: this is a "raw" or low-level routine, intended to be used only where
+ * the inefficiency of a full JS_DHashTableOperate (which rehashes in order
+ * to find the entry given its key) is not tolerable. This function does not
+ * shrink the table if it is underloaded. It does not update stats #ifdef
+ * JS_DHASHMETER, either.
+ */
+extern JS_PUBLIC_API(void)
+JS_DHashTableRawRemove(JSDHashTable *table, JSDHashEntryHdr *entry);
+
+/*
+ * Enumerate entries in table using etor:
+ *
+ * count = JS_DHashTableEnumerate(table, etor, arg);
+ *
+ * JS_DHashTableEnumerate calls etor like so:
+ *
+ * op = etor(table, entry, number, arg);
+ *
+ * where number is a zero-based ordinal assigned to live entries according to
+ * their order in table->entryStore.
+ *
+ * The return value, op, is treated as a set of flags. If op is JS_DHASH_NEXT,
+ * then continue enumerating. If op contains JS_DHASH_REMOVE, then clear (via
+ * table->ops->clearEntry) and free entry. Then we check whether op contains
+ * JS_DHASH_STOP; if so, stop enumerating and return the number of live entries
+ * that were enumerated so far. Return the total number of live entries when
+ * enumeration completes normally.
+ *
+ * If etor calls JS_DHashTableOperate on table with op != JS_DHASH_LOOKUP, it
+ * must return JS_DHASH_STOP; otherwise undefined behavior results.
+ *
+ * If any enumerator returns JS_DHASH_REMOVE, table->entryStore may be shrunk
+ * or compressed after enumeration, but before JS_DHashTableEnumerate returns.
+ * Such an enumerator therefore can't safely set aside entry pointers, but an
+ * enumerator that never returns JS_DHASH_REMOVE can set pointers to entries
+ * aside, e.g., to avoid copying live entries into an array of the entry type.
+ * Copying entry pointers is cheaper, and safe so long as the caller of such a
+ * "stable" Enumerate doesn't use the set-aside pointers after any call either
+ * to PL_DHashTableOperate, or to an "unstable" form of Enumerate, which might
+ * grow or shrink entryStore.
+ *
+ * If your enumerator wants to remove certain entries, but set aside pointers
+ * to other entries that it retains, it can use JS_DHashTableRawRemove on the
+ * entries to be removed, returning JS_DHASH_NEXT to skip them. Likewise, if
+ * you want to remove entries, but for some reason you do not want entryStore
+ * to be shrunk or compressed, you can call JS_DHashTableRawRemove safely on
+ * the entry being enumerated, rather than returning JS_DHASH_REMOVE.
+ */
+typedef JSDHashOperator
+(* JS_DLL_CALLBACK JSDHashEnumerator)(JSDHashTable *table, JSDHashEntryHdr *hdr,
+ uint32 number, void *arg);
+
+extern JS_PUBLIC_API(uint32)
+JS_DHashTableEnumerate(JSDHashTable *table, JSDHashEnumerator etor, void *arg);
+
+#ifdef JS_DHASHMETER
+#include <stdio.h>
+
+extern JS_PUBLIC_API(void)
+JS_DHashTableDumpMeter(JSDHashTable *table, JSDHashEnumerator dump, FILE *fp);
+#endif
+
+JS_END_EXTERN_C
+
+#endif /* jsdhash_h___ */
diff --git a/src/third_party/js-1.7/jsdtoa.c b/src/third_party/js-1.7/jsdtoa.c
new file mode 100644
index 00000000000..5b0b09ff9b4
--- /dev/null
+++ b/src/third_party/js-1.7/jsdtoa.c
@@ -0,0 +1,3132 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * Portable double to alphanumeric string and back converters.
+ */
+#include "jsstddef.h"
+#include "jslibmath.h"
+#include "jstypes.h"
+#include "jsdtoa.h"
+#include "jsprf.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jspubtd.h"
+#include "jsnum.h"
+
+#ifdef JS_THREADSAFE
+#include "prlock.h"
+#endif
+
+/****************************************************************
+ *
+ * The author of this software is David M. Gay.
+ *
+ * Copyright (c) 1991 by Lucent Technologies.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose without fee is hereby granted, provided that this entire notice
+ * is included in all copies of any software which is or includes a copy
+ * or modification of this software and in all copies of the supporting
+ * documentation for such software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
+ * WARRANTY. IN PARTICULAR, NEITHER THE AUTHOR NOR LUCENT MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY
+ * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
+ *
+ ***************************************************************/
+
+/* Please send bug reports to
+ David M. Gay
+ Bell Laboratories, Room 2C-463
+ 600 Mountain Avenue
+ Murray Hill, NJ 07974-0636
+ U.S.A.
+ dmg@bell-labs.com
+ */
+
+/* On a machine with IEEE extended-precision registers, it is
+ * necessary to specify double-precision (53-bit) rounding precision
+ * before invoking strtod or dtoa. If the machine uses (the equivalent
+ * of) Intel 80x87 arithmetic, the call
+ * _control87(PC_53, MCW_PC);
+ * does this with many compilers. Whether this or another call is
+ * appropriate depends on the compiler; for this to work, it may be
+ * necessary to #include "float.h" or another system-dependent header
+ * file.
+ */
+
+/* strtod for IEEE-arithmetic machines.
+ *
+ * This strtod returns a nearest machine number to the input decimal
+ * string (or sets err to JS_DTOA_ERANGE or JS_DTOA_ENOMEM). With IEEE
+ * arithmetic, ties are broken by the IEEE round-even rule. Otherwise
+ * ties are broken by biased rounding (add half and chop).
+ *
+ * Inspired loosely by William D. Clinger's paper "How to Read Floating
+ * Point Numbers Accurately" [Proc. ACM SIGPLAN '90, pp. 92-101].
+ *
+ * Modifications:
+ *
+ * 1. We only require IEEE double-precision
+ * arithmetic (not IEEE double-extended).
+ * 2. We get by with floating-point arithmetic in a case that
+ * Clinger missed -- when we're computing d * 10^n
+ * for a small integer d and the integer n is not too
+ * much larger than 22 (the maximum integer k for which
+ * we can represent 10^k exactly), we may be able to
+ * compute (d*10^k) * 10^(e-k) with just one roundoff.
+ * 3. Rather than a bit-at-a-time adjustment of the binary
+ * result in the hard case, we use floating-point
+ * arithmetic to determine the adjustment to within
+ * one bit; only in really hard cases do we need to
+ * compute a second residual.
+ * 4. Because of 3., we don't need a large table of powers of 10
+ * for ten-to-e (just some small tables, e.g. of 10^k
+ * for 0 <= k <= 22).
+ */
+
+/*
+ * #define IEEE_8087 for IEEE-arithmetic machines where the least
+ * significant byte has the lowest address.
+ * #define IEEE_MC68k for IEEE-arithmetic machines where the most
+ * significant byte has the lowest address.
+ * #define Long int on machines with 32-bit ints and 64-bit longs.
+ * #define Sudden_Underflow for IEEE-format machines without gradual
+ * underflow (i.e., that flush to zero on underflow).
+ * #define No_leftright to omit left-right logic in fast floating-point
+ * computation of js_dtoa.
+ * #define Check_FLT_ROUNDS if FLT_ROUNDS can assume the values 2 or 3.
+ * #define RND_PRODQUOT to use rnd_prod and rnd_quot (assembly routines
+ * that use extended-precision instructions to compute rounded
+ * products and quotients) with IBM.
+ * #define ROUND_BIASED for IEEE-format with biased rounding.
+ * #define Inaccurate_Divide for IEEE-format with correctly rounded
+ * products but inaccurate quotients, e.g., for Intel i860.
+ * #define JS_HAVE_LONG_LONG on machines that have a "long long"
+ * integer type (of >= 64 bits). If long long is available and the name is
+ * something other than "long long", #define Llong to be the name,
+ * and if "unsigned Llong" does not work as an unsigned version of
+ * Llong, #define #ULLong to be the corresponding unsigned type.
+ * #define Bad_float_h if your system lacks a float.h or if it does not
+ * define some or all of DBL_DIG, DBL_MAX_10_EXP, DBL_MAX_EXP,
+ * FLT_RADIX, FLT_ROUNDS, and DBL_MAX.
+ * #define MALLOC your_malloc, where your_malloc(n) acts like malloc(n)
+ * if memory is available and otherwise does something you deem
+ * appropriate. If MALLOC is undefined, malloc will be invoked
+ * directly -- and assumed always to succeed.
+ * #define Omit_Private_Memory to omit logic (added Jan. 1998) for making
+ * memory allocations from a private pool of memory when possible.
+ * When used, the private pool is PRIVATE_MEM bytes long: 2000 bytes,
+ * unless #defined to be a different length. This default length
+ * suffices to get rid of MALLOC calls except for unusual cases,
+ * such as decimal-to-binary conversion of a very long string of
+ * digits.
+ * #define INFNAN_CHECK on IEEE systems to cause strtod to check for
+ * Infinity and NaN (case insensitively). On some systems (e.g.,
+ * some HP systems), it may be necessary to #define NAN_WORD0
+ * appropriately -- to the most significant word of a quiet NaN.
+ * (On HP Series 700/800 machines, -DNAN_WORD0=0x7ff40000 works.)
+ * #define MULTIPLE_THREADS if the system offers preemptively scheduled
+ * multiple threads. In this case, you must provide (or suitably
+ * #define) two locks, acquired by ACQUIRE_DTOA_LOCK() and released
+ * by RELEASE_DTOA_LOCK(). (The second lock, accessed
+ * in pow5mult, ensures lazy evaluation of only one copy of high
+ * powers of 5; omitting this lock would introduce a small
+ * probability of wasting memory, but would otherwise be harmless.)
+ * You must also invoke freedtoa(s) to free the value s returned by
+ * dtoa. You may do so whether or not MULTIPLE_THREADS is #defined.
+ * #define NO_IEEE_Scale to disable new (Feb. 1997) logic in strtod that
+ * avoids underflows on inputs whose result does not underflow.
+ */
+#ifdef IS_LITTLE_ENDIAN
+#define IEEE_8087
+#else
+#define IEEE_MC68k
+#endif
+
+#ifndef Long
+#define Long int32
+#endif
+
+#ifndef ULong
+#define ULong uint32
+#endif
+
+#define Bug(errorMessageString) JS_ASSERT(!errorMessageString)
+
+#include "stdlib.h"
+#include "string.h"
+
+#ifdef MALLOC
+extern void *MALLOC(size_t);
+#else
+#define MALLOC malloc
+#endif
+
+#define Omit_Private_Memory
+/* Private memory currently doesn't work with JS_THREADSAFE */
+#ifndef Omit_Private_Memory
+#ifndef PRIVATE_MEM
+#define PRIVATE_MEM 2000
+#endif
+#define PRIVATE_mem ((PRIVATE_MEM+sizeof(double)-1)/sizeof(double))
+static double private_mem[PRIVATE_mem], *pmem_next = private_mem;
+#endif
+
+#ifdef Bad_float_h
+#undef __STDC__
+
+#define DBL_DIG 15
+#define DBL_MAX_10_EXP 308
+#define DBL_MAX_EXP 1024
+#define FLT_RADIX 2
+#define FLT_ROUNDS 1
+#define DBL_MAX 1.7976931348623157e+308
+
+
+
+#ifndef LONG_MAX
+#define LONG_MAX 2147483647
+#endif
+
+#else /* ifndef Bad_float_h */
+#include "float.h"
+#endif /* Bad_float_h */
+
+#ifndef __MATH_H__
+#include "math.h"
+#endif
+
+#ifndef CONST
+#define CONST const
+#endif
+
+#if defined(IEEE_8087) + defined(IEEE_MC68k) != 1
+Exactly one of IEEE_8087 or IEEE_MC68k should be defined.
+#endif
+
+#define word0(x) JSDOUBLE_HI32(x)
+#define set_word0(x, y) JSDOUBLE_SET_HI32(x, y)
+#define word1(x) JSDOUBLE_LO32(x)
+#define set_word1(x, y) JSDOUBLE_SET_LO32(x, y)
+
+#define Storeinc(a,b,c) (*(a)++ = (b) << 16 | (c) & 0xffff)
+
+/* #define P DBL_MANT_DIG */
+/* Ten_pmax = floor(P*log(2)/log(5)) */
+/* Bletch = (highest power of 2 < DBL_MAX_10_EXP) / 16 */
+/* Quick_max = floor((P-1)*log(FLT_RADIX)/log(10) - 1) */
+/* Int_max = floor(P*log(FLT_RADIX)/log(10) - 1) */
+
+#define Exp_shift 20
+#define Exp_shift1 20
+#define Exp_msk1 0x100000
+#define Exp_msk11 0x100000
+#define Exp_mask 0x7ff00000
+#define P 53
+#define Bias 1023
+#define Emin (-1022)
+#define Exp_1 0x3ff00000
+#define Exp_11 0x3ff00000
+#define Ebits 11
+#define Frac_mask 0xfffff
+#define Frac_mask1 0xfffff
+#define Ten_pmax 22
+#define Bletch 0x10
+#define Bndry_mask 0xfffff
+#define Bndry_mask1 0xfffff
+#define LSB 1
+#define Sign_bit 0x80000000
+#define Log2P 1
+#define Tiny0 0
+#define Tiny1 1
+#define Quick_max 14
+#define Int_max 14
+#define Infinite(x) (word0(x) == 0x7ff00000) /* sufficient test for here */
+#ifndef NO_IEEE_Scale
+#define Avoid_Underflow
+#endif
+
+
+
+#ifdef RND_PRODQUOT
+#define rounded_product(a,b) a = rnd_prod(a, b)
+#define rounded_quotient(a,b) a = rnd_quot(a, b)
+extern double rnd_prod(double, double), rnd_quot(double, double);
+#else
+#define rounded_product(a,b) a *= b
+#define rounded_quotient(a,b) a /= b
+#endif
+
+#define Big0 (Frac_mask1 | Exp_msk1*(DBL_MAX_EXP+Bias-1))
+#define Big1 0xffffffff
+
+#ifndef JS_HAVE_LONG_LONG
+#undef ULLong
+#else /* long long available */
+#ifndef Llong
+#define Llong JSInt64
+#endif
+#ifndef ULLong
+#define ULLong JSUint64
+#endif
+#endif /* JS_HAVE_LONG_LONG */
+
+#ifdef JS_THREADSAFE
+#define MULTIPLE_THREADS
+static PRLock *freelist_lock;
+#define ACQUIRE_DTOA_LOCK() \
+ JS_BEGIN_MACRO \
+ if (!initialized) \
+ InitDtoa(); \
+ PR_Lock(freelist_lock); \
+ JS_END_MACRO
+#define RELEASE_DTOA_LOCK() PR_Unlock(freelist_lock)
+#else
+#undef MULTIPLE_THREADS
+#define ACQUIRE_DTOA_LOCK() /*nothing*/
+#define RELEASE_DTOA_LOCK() /*nothing*/
+#endif
+
+#define Kmax 15
+
+struct Bigint {
+ struct Bigint *next; /* Free list link */
+ int32 k; /* lg2(maxwds) */
+ int32 maxwds; /* Number of words allocated for x */
+ int32 sign; /* Zero if positive, 1 if negative. Ignored by most Bigint routines! */
+ int32 wds; /* Actual number of words. If value is nonzero, the most significant word must be nonzero. */
+ ULong x[1]; /* wds words of number in little endian order */
+};
+
+#ifdef ENABLE_OOM_TESTING
+/* Out-of-memory testing. Use a good testcase (over and over) and then use
+ * these routines to cause a memory failure on every possible Balloc allocation,
+ * to make sure that all out-of-memory paths can be followed. See bug 14044.
+ */
+
+static int allocationNum; /* which allocation is next? */
+static int desiredFailure; /* which allocation should fail? */
+
+/**
+ * js_BigintTestingReset
+ *
+ * Call at the beginning of a test run to set the allocation failure position.
+ * (Set to 0 to just have the engine count allocations without failing.)
+ */
+JS_PUBLIC_API(void)
+js_BigintTestingReset(int newFailure)
+{
+ allocationNum = 0;
+ desiredFailure = newFailure;
+}
+
+/**
+ * js_BigintTestingWhere
+ *
+ * Report the current allocation position. This is really only useful when you
+ * want to learn how many allocations a test run has.
+ */
+JS_PUBLIC_API(int)
+js_BigintTestingWhere()
+{
+ return allocationNum;
+}
+
+
+/*
+ * So here's what you do: Set up a fantastic test case that exercises the
+ * elements of the code you wish. Set the failure point at 0 and run the test,
+ * then get the allocation position. This number is the number of allocations
+ * your test makes. Now loop from 1 to that number, setting the failure point
+ * at each loop count, and run the test over and over, causing failures at each
+ * step. Any memory failure *should* cause a Out-Of-Memory exception; if it
+ * doesn't, then there's still an error here.
+ */
+#endif
+
+typedef struct Bigint Bigint;
+
+static Bigint *freelist[Kmax+1];
+
+/*
+ * Allocate a Bigint with 2^k words.
+ * This is not threadsafe. The caller must use thread locks
+ */
+static Bigint *Balloc(int32 k)
+{
+ int32 x;
+ Bigint *rv;
+#ifndef Omit_Private_Memory
+ uint32 len;
+#endif
+
+#ifdef ENABLE_OOM_TESTING
+ if (++allocationNum == desiredFailure) {
+ printf("Forced Failing Allocation number %d\n", allocationNum);
+ return NULL;
+ }
+#endif
+
+ if ((rv = freelist[k]) != NULL)
+ freelist[k] = rv->next;
+ if (rv == NULL) {
+ x = 1 << k;
+#ifdef Omit_Private_Memory
+ rv = (Bigint *)MALLOC(sizeof(Bigint) + (x-1)*sizeof(ULong));
+#else
+ len = (sizeof(Bigint) + (x-1)*sizeof(ULong) + sizeof(double) - 1)
+ /sizeof(double);
+ if (pmem_next - private_mem + len <= PRIVATE_mem) {
+ rv = (Bigint*)pmem_next;
+ pmem_next += len;
+ }
+ else
+ rv = (Bigint*)MALLOC(len*sizeof(double));
+#endif
+ if (!rv)
+ return NULL;
+ rv->k = k;
+ rv->maxwds = x;
+ }
+ rv->sign = rv->wds = 0;
+ return rv;
+}
+
+static void Bfree(Bigint *v)
+{
+ if (v) {
+ v->next = freelist[v->k];
+ freelist[v->k] = v;
+ }
+}
+
+#define Bcopy(x,y) memcpy((char *)&x->sign, (char *)&y->sign, \
+ y->wds*sizeof(Long) + 2*sizeof(int32))
+
+/* Return b*m + a. Deallocate the old b. Both a and m must be between 0 and
+ * 65535 inclusive. NOTE: old b is deallocated on memory failure.
+ */
+static Bigint *multadd(Bigint *b, int32 m, int32 a)
+{
+ int32 i, wds;
+#ifdef ULLong
+ ULong *x;
+ ULLong carry, y;
+#else
+ ULong carry, *x, y;
+ ULong xi, z;
+#endif
+ Bigint *b1;
+
+#ifdef ENABLE_OOM_TESTING
+ if (++allocationNum == desiredFailure) {
+ /* Faux allocation, because I'm not getting all of the failure paths
+ * without it.
+ */
+ printf("Forced Failing Allocation number %d\n", allocationNum);
+ Bfree(b);
+ return NULL;
+ }
+#endif
+
+ wds = b->wds;
+ x = b->x;
+ i = 0;
+ carry = a;
+ do {
+#ifdef ULLong
+ y = *x * (ULLong)m + carry;
+ carry = y >> 32;
+ *x++ = (ULong)(y & 0xffffffffUL);
+#else
+ xi = *x;
+ y = (xi & 0xffff) * m + carry;
+ z = (xi >> 16) * m + (y >> 16);
+ carry = z >> 16;
+ *x++ = (z << 16) + (y & 0xffff);
+#endif
+ }
+ while(++i < wds);
+ if (carry) {
+ if (wds >= b->maxwds) {
+ b1 = Balloc(b->k+1);
+ if (!b1) {
+ Bfree(b);
+ return NULL;
+ }
+ Bcopy(b1, b);
+ Bfree(b);
+ b = b1;
+ }
+ b->x[wds++] = (ULong)carry;
+ b->wds = wds;
+ }
+ return b;
+}
+
+static Bigint *s2b(CONST char *s, int32 nd0, int32 nd, ULong y9)
+{
+ Bigint *b;
+ int32 i, k;
+ Long x, y;
+
+ x = (nd + 8) / 9;
+ for(k = 0, y = 1; x > y; y <<= 1, k++) ;
+ b = Balloc(k);
+ if (!b)
+ return NULL;
+ b->x[0] = y9;
+ b->wds = 1;
+
+ i = 9;
+ if (9 < nd0) {
+ s += 9;
+ do {
+ b = multadd(b, 10, *s++ - '0');
+ if (!b)
+ return NULL;
+ } while(++i < nd0);
+ s++;
+ }
+ else
+ s += 10;
+ for(; i < nd; i++) {
+ b = multadd(b, 10, *s++ - '0');
+ if (!b)
+ return NULL;
+ }
+ return b;
+}
+
+
+/* Return the number (0 through 32) of most significant zero bits in x. */
+static int32 hi0bits(register ULong x)
+{
+ register int32 k = 0;
+
+ if (!(x & 0xffff0000)) {
+ k = 16;
+ x <<= 16;
+ }
+ if (!(x & 0xff000000)) {
+ k += 8;
+ x <<= 8;
+ }
+ if (!(x & 0xf0000000)) {
+ k += 4;
+ x <<= 4;
+ }
+ if (!(x & 0xc0000000)) {
+ k += 2;
+ x <<= 2;
+ }
+ if (!(x & 0x80000000)) {
+ k++;
+ if (!(x & 0x40000000))
+ return 32;
+ }
+ return k;
+}
+
+
+/* Return the number (0 through 32) of least significant zero bits in y.
+ * Also shift y to the right past these 0 through 32 zeros so that y's
+ * least significant bit will be set unless y was originally zero. */
+static int32 lo0bits(ULong *y)
+{
+ register int32 k;
+ register ULong x = *y;
+
+ if (x & 7) {
+ if (x & 1)
+ return 0;
+ if (x & 2) {
+ *y = x >> 1;
+ return 1;
+ }
+ *y = x >> 2;
+ return 2;
+ }
+ k = 0;
+ if (!(x & 0xffff)) {
+ k = 16;
+ x >>= 16;
+ }
+ if (!(x & 0xff)) {
+ k += 8;
+ x >>= 8;
+ }
+ if (!(x & 0xf)) {
+ k += 4;
+ x >>= 4;
+ }
+ if (!(x & 0x3)) {
+ k += 2;
+ x >>= 2;
+ }
+ if (!(x & 1)) {
+ k++;
+ x >>= 1;
+ if (!x & 1)
+ return 32;
+ }
+ *y = x;
+ return k;
+}
+
+/* Return a new Bigint with the given integer value, which must be nonnegative. */
+static Bigint *i2b(int32 i)
+{
+ Bigint *b;
+
+ b = Balloc(1);
+ if (!b)
+ return NULL;
+ b->x[0] = i;
+ b->wds = 1;
+ return b;
+}
+
+/* Return a newly allocated product of a and b. */
+static Bigint *mult(CONST Bigint *a, CONST Bigint *b)
+{
+ CONST Bigint *t;
+ Bigint *c;
+ int32 k, wa, wb, wc;
+ ULong y;
+ ULong *xc, *xc0, *xce;
+ CONST ULong *x, *xa, *xae, *xb, *xbe;
+#ifdef ULLong
+ ULLong carry, z;
+#else
+ ULong carry, z;
+ ULong z2;
+#endif
+
+ if (a->wds < b->wds) {
+ t = a;
+ a = b;
+ b = t;
+ }
+ k = a->k;
+ wa = a->wds;
+ wb = b->wds;
+ wc = wa + wb;
+ if (wc > a->maxwds)
+ k++;
+ c = Balloc(k);
+ if (!c)
+ return NULL;
+ for(xc = c->x, xce = xc + wc; xc < xce; xc++)
+ *xc = 0;
+ xa = a->x;
+ xae = xa + wa;
+ xb = b->x;
+ xbe = xb + wb;
+ xc0 = c->x;
+#ifdef ULLong
+ for(; xb < xbe; xc0++) {
+ if ((y = *xb++) != 0) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ do {
+ z = *x++ * (ULLong)y + *xc + carry;
+ carry = z >> 32;
+ *xc++ = (ULong)(z & 0xffffffffUL);
+ }
+ while(x < xae);
+ *xc = (ULong)carry;
+ }
+ }
+#else
+ for(; xb < xbe; xb++, xc0++) {
+ if ((y = *xb & 0xffff) != 0) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ do {
+ z = (*x & 0xffff) * y + (*xc & 0xffff) + carry;
+ carry = z >> 16;
+ z2 = (*x++ >> 16) * y + (*xc >> 16) + carry;
+ carry = z2 >> 16;
+ Storeinc(xc, z2, z);
+ }
+ while(x < xae);
+ *xc = carry;
+ }
+ if ((y = *xb >> 16) != 0) {
+ x = xa;
+ xc = xc0;
+ carry = 0;
+ z2 = *xc;
+ do {
+ z = (*x & 0xffff) * y + (*xc >> 16) + carry;
+ carry = z >> 16;
+ Storeinc(xc, z, z2);
+ z2 = (*x++ >> 16) * y + (*xc & 0xffff) + carry;
+ carry = z2 >> 16;
+ }
+ while(x < xae);
+ *xc = z2;
+ }
+ }
+#endif
+ for(xc0 = c->x, xc = xc0 + wc; wc > 0 && !*--xc; --wc) ;
+ c->wds = wc;
+ return c;
+}
+
+/*
+ * 'p5s' points to a linked list of Bigints that are powers of 5.
+ * This list grows on demand, and it can only grow: it won't change
+ * in any other way. So if we read 'p5s' or the 'next' field of
+ * some Bigint on the list, and it is not NULL, we know it won't
+ * change to NULL or some other value. Only when the value of
+ * 'p5s' or 'next' is NULL do we need to acquire the lock and add
+ * a new Bigint to the list.
+ */
+
+static Bigint *p5s;
+
+#ifdef JS_THREADSAFE
+static PRLock *p5s_lock;
+#endif
+
+/* Return b * 5^k. Deallocate the old b. k must be nonnegative. */
+/* NOTE: old b is deallocated on memory failure. */
+static Bigint *pow5mult(Bigint *b, int32 k)
+{
+ Bigint *b1, *p5, *p51;
+ int32 i;
+ static CONST int32 p05[3] = { 5, 25, 125 };
+
+ if ((i = k & 3) != 0) {
+ b = multadd(b, p05[i-1], 0);
+ if (!b)
+ return NULL;
+ }
+
+ if (!(k >>= 2))
+ return b;
+ if (!(p5 = p5s)) {
+#ifdef JS_THREADSAFE
+ /*
+ * We take great care to not call i2b() and Bfree()
+ * while holding the lock.
+ */
+ Bigint *wasted_effort = NULL;
+ p5 = i2b(625);
+ if (!p5) {
+ Bfree(b);
+ return NULL;
+ }
+ /* lock and check again */
+ PR_Lock(p5s_lock);
+ if (!p5s) {
+ /* first time */
+ p5s = p5;
+ p5->next = 0;
+ } else {
+ /* some other thread just beat us */
+ wasted_effort = p5;
+ p5 = p5s;
+ }
+ PR_Unlock(p5s_lock);
+ if (wasted_effort) {
+ Bfree(wasted_effort);
+ }
+#else
+ /* first time */
+ p5 = p5s = i2b(625);
+ if (!p5) {
+ Bfree(b);
+ return NULL;
+ }
+ p5->next = 0;
+#endif
+ }
+ for(;;) {
+ if (k & 1) {
+ b1 = mult(b, p5);
+ Bfree(b);
+ if (!b1)
+ return NULL;
+ b = b1;
+ }
+ if (!(k >>= 1))
+ break;
+ if (!(p51 = p5->next)) {
+#ifdef JS_THREADSAFE
+ Bigint *wasted_effort = NULL;
+ p51 = mult(p5, p5);
+ if (!p51) {
+ Bfree(b);
+ return NULL;
+ }
+ PR_Lock(p5s_lock);
+ if (!p5->next) {
+ p5->next = p51;
+ p51->next = 0;
+ } else {
+ wasted_effort = p51;
+ p51 = p5->next;
+ }
+ PR_Unlock(p5s_lock);
+ if (wasted_effort) {
+ Bfree(wasted_effort);
+ }
+#else
+ p51 = mult(p5,p5);
+ if (!p51) {
+ Bfree(b);
+ return NULL;
+ }
+ p51->next = 0;
+ p5->next = p51;
+#endif
+ }
+ p5 = p51;
+ }
+ return b;
+}
+
+/* Return b * 2^k. Deallocate the old b. k must be nonnegative.
+ * NOTE: on memory failure, old b is deallocated. */
+static Bigint *lshift(Bigint *b, int32 k)
+{
+ int32 i, k1, n, n1;
+ Bigint *b1;
+ ULong *x, *x1, *xe, z;
+
+ n = k >> 5;
+ k1 = b->k;
+ n1 = n + b->wds + 1;
+ for(i = b->maxwds; n1 > i; i <<= 1)
+ k1++;
+ b1 = Balloc(k1);
+ if (!b1)
+ goto done;
+ x1 = b1->x;
+ for(i = 0; i < n; i++)
+ *x1++ = 0;
+ x = b->x;
+ xe = x + b->wds;
+ if (k &= 0x1f) {
+ k1 = 32 - k;
+ z = 0;
+ do {
+ *x1++ = *x << k | z;
+ z = *x++ >> k1;
+ }
+ while(x < xe);
+ if ((*x1 = z) != 0)
+ ++n1;
+ }
+ else do
+ *x1++ = *x++;
+ while(x < xe);
+ b1->wds = n1 - 1;
+done:
+ Bfree(b);
+ return b1;
+}
+
+/* Return -1, 0, or 1 depending on whether a<b, a==b, or a>b, respectively. */
+static int32 cmp(Bigint *a, Bigint *b)
+{
+ ULong *xa, *xa0, *xb, *xb0;
+ int32 i, j;
+
+ i = a->wds;
+ j = b->wds;
+#ifdef DEBUG
+ if (i > 1 && !a->x[i-1])
+ Bug("cmp called with a->x[a->wds-1] == 0");
+ if (j > 1 && !b->x[j-1])
+ Bug("cmp called with b->x[b->wds-1] == 0");
+#endif
+ if (i -= j)
+ return i;
+ xa0 = a->x;
+ xa = xa0 + j;
+ xb0 = b->x;
+ xb = xb0 + j;
+ for(;;) {
+ if (*--xa != *--xb)
+ return *xa < *xb ? -1 : 1;
+ if (xa <= xa0)
+ break;
+ }
+ return 0;
+}
+
+static Bigint *diff(Bigint *a, Bigint *b)
+{
+ Bigint *c;
+ int32 i, wa, wb;
+ ULong *xa, *xae, *xb, *xbe, *xc;
+#ifdef ULLong
+ ULLong borrow, y;
+#else
+ ULong borrow, y;
+ ULong z;
+#endif
+
+ i = cmp(a,b);
+ if (!i) {
+ c = Balloc(0);
+ if (!c)
+ return NULL;
+ c->wds = 1;
+ c->x[0] = 0;
+ return c;
+ }
+ if (i < 0) {
+ c = a;
+ a = b;
+ b = c;
+ i = 1;
+ }
+ else
+ i = 0;
+ c = Balloc(a->k);
+ if (!c)
+ return NULL;
+ c->sign = i;
+ wa = a->wds;
+ xa = a->x;
+ xae = xa + wa;
+ wb = b->wds;
+ xb = b->x;
+ xbe = xb + wb;
+ xc = c->x;
+ borrow = 0;
+#ifdef ULLong
+ do {
+ y = (ULLong)*xa++ - *xb++ - borrow;
+ borrow = y >> 32 & 1UL;
+ *xc++ = (ULong)(y & 0xffffffffUL);
+ }
+ while(xb < xbe);
+ while(xa < xae) {
+ y = *xa++ - borrow;
+ borrow = y >> 32 & 1UL;
+ *xc++ = (ULong)(y & 0xffffffffUL);
+ }
+#else
+ do {
+ y = (*xa & 0xffff) - (*xb & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*xa++ >> 16) - (*xb++ >> 16) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(xc, z, y);
+ }
+ while(xb < xbe);
+ while(xa < xae) {
+ y = (*xa & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*xa++ >> 16) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(xc, z, y);
+ }
+#endif
+ while(!*--xc)
+ wa--;
+ c->wds = wa;
+ return c;
+}
+
+/* Return the absolute difference between x and the adjacent greater-magnitude double number (ignoring exponent overflows). */
+static double ulp(double x)
+{
+ register Long L;
+ double a = 0;
+
+ L = (word0(x) & Exp_mask) - (P-1)*Exp_msk1;
+#ifndef Sudden_Underflow
+ if (L > 0) {
+#endif
+ set_word0(a, L);
+ set_word1(a, 0);
+#ifndef Sudden_Underflow
+ }
+ else {
+ L = -L >> Exp_shift;
+ if (L < Exp_shift) {
+ set_word0(a, 0x80000 >> L);
+ set_word1(a, 0);
+ }
+ else {
+ set_word0(a, 0);
+ L -= Exp_shift;
+ set_word1(a, L >= 31 ? 1 : 1 << (31 - L));
+ }
+ }
+#endif
+ return a;
+}
+
+
+static double b2d(Bigint *a, int32 *e)
+{
+ ULong *xa, *xa0, w, y, z;
+ int32 k;
+ double d = 0;
+#define d0 word0(d)
+#define d1 word1(d)
+#define set_d0(x) set_word0(d, x)
+#define set_d1(x) set_word1(d, x)
+
+ xa0 = a->x;
+ xa = xa0 + a->wds;
+ y = *--xa;
+#ifdef DEBUG
+ if (!y) Bug("zero y in b2d");
+#endif
+ k = hi0bits(y);
+ *e = 32 - k;
+ if (k < Ebits) {
+ set_d0(Exp_1 | y >> (Ebits - k));
+ w = xa > xa0 ? *--xa : 0;
+ set_d1(y << (32-Ebits + k) | w >> (Ebits - k));
+ goto ret_d;
+ }
+ z = xa > xa0 ? *--xa : 0;
+ if (k -= Ebits) {
+ set_d0(Exp_1 | y << k | z >> (32 - k));
+ y = xa > xa0 ? *--xa : 0;
+ set_d1(z << k | y >> (32 - k));
+ }
+ else {
+ set_d0(Exp_1 | y);
+ set_d1(z);
+ }
+ ret_d:
+#undef d0
+#undef d1
+#undef set_d0
+#undef set_d1
+ return d;
+}
+
+
+/* Convert d into the form b*2^e, where b is an odd integer. b is the returned
+ * Bigint and e is the returned binary exponent. Return the number of significant
+ * bits in b in bits. d must be finite and nonzero. */
+static Bigint *d2b(double d, int32 *e, int32 *bits)
+{
+ Bigint *b;
+ int32 de, i, k;
+ ULong *x, y, z;
+#define d0 word0(d)
+#define d1 word1(d)
+#define set_d0(x) set_word0(d, x)
+#define set_d1(x) set_word1(d, x)
+
+ b = Balloc(1);
+ if (!b)
+ return NULL;
+ x = b->x;
+
+ z = d0 & Frac_mask;
+ set_d0(d0 & 0x7fffffff); /* clear sign bit, which we ignore */
+#ifdef Sudden_Underflow
+ de = (int32)(d0 >> Exp_shift);
+ z |= Exp_msk11;
+#else
+ if ((de = (int32)(d0 >> Exp_shift)) != 0)
+ z |= Exp_msk1;
+#endif
+ if ((y = d1) != 0) {
+ if ((k = lo0bits(&y)) != 0) {
+ x[0] = y | z << (32 - k);
+ z >>= k;
+ }
+ else
+ x[0] = y;
+ i = b->wds = (x[1] = z) ? 2 : 1;
+ }
+ else {
+ JS_ASSERT(z);
+ k = lo0bits(&z);
+ x[0] = z;
+ i = b->wds = 1;
+ k += 32;
+ }
+#ifndef Sudden_Underflow
+ if (de) {
+#endif
+ *e = de - Bias - (P-1) + k;
+ *bits = P - k;
+#ifndef Sudden_Underflow
+ }
+ else {
+ *e = de - Bias - (P-1) + 1 + k;
+ *bits = 32*i - hi0bits(x[i-1]);
+ }
+#endif
+ return b;
+}
+#undef d0
+#undef d1
+#undef set_d0
+#undef set_d1
+
+
+static double ratio(Bigint *a, Bigint *b)
+{
+ double da, db;
+ int32 k, ka, kb;
+
+ da = b2d(a, &ka);
+ db = b2d(b, &kb);
+ k = ka - kb + 32*(a->wds - b->wds);
+ if (k > 0)
+ set_word0(da, word0(da) + k*Exp_msk1);
+ else {
+ k = -k;
+ set_word0(db, word0(db) + k*Exp_msk1);
+ }
+ return da / db;
+}
+
+static CONST double
+tens[] = {
+ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
+ 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
+ 1e20, 1e21, 1e22
+};
+
+static CONST double bigtens[] = { 1e16, 1e32, 1e64, 1e128, 1e256 };
+static CONST double tinytens[] = { 1e-16, 1e-32, 1e-64, 1e-128,
+#ifdef Avoid_Underflow
+ 9007199254740992.e-256
+#else
+ 1e-256
+#endif
+ };
+/* The factor of 2^53 in tinytens[4] helps us avoid setting the underflow */
+/* flag unnecessarily. It leads to a song and dance at the end of strtod. */
+#define Scale_Bit 0x10
+#define n_bigtens 5
+
+
+#ifdef INFNAN_CHECK
+
+#ifndef NAN_WORD0
+#define NAN_WORD0 0x7ff80000
+#endif
+
+#ifndef NAN_WORD1
+#define NAN_WORD1 0
+#endif
+
+static int match(CONST char **sp, char *t)
+{
+ int c, d;
+ CONST char *s = *sp;
+
+ while(d = *t++) {
+ if ((c = *++s) >= 'A' && c <= 'Z')
+ c += 'a' - 'A';
+ if (c != d)
+ return 0;
+ }
+ *sp = s + 1;
+ return 1;
+ }
+#endif /* INFNAN_CHECK */
+
+
+#ifdef JS_THREADSAFE
+static JSBool initialized = JS_FALSE;
+
+/* hacked replica of nspr _PR_InitDtoa */
+static void InitDtoa(void)
+{
+ freelist_lock = PR_NewLock();
+ p5s_lock = PR_NewLock();
+ initialized = JS_TRUE;
+}
+#endif
+
+void js_FinishDtoa(void)
+{
+ int count;
+ Bigint *temp;
+
+#ifdef JS_THREADSAFE
+ if (initialized == JS_TRUE) {
+ PR_DestroyLock(freelist_lock);
+ PR_DestroyLock(p5s_lock);
+ initialized = JS_FALSE;
+ }
+#endif
+
+ /* clear down the freelist array and p5s */
+
+ /* static Bigint *freelist[Kmax+1]; */
+ for (count = 0; count <= Kmax; count++) {
+ Bigint **listp = &freelist[count];
+ while ((temp = *listp) != NULL) {
+ *listp = temp->next;
+ free(temp);
+ }
+ freelist[count] = NULL;
+ }
+
+ /* static Bigint *p5s; */
+ while (p5s) {
+ temp = p5s;
+ p5s = p5s->next;
+ free(temp);
+ }
+}
+
+/* nspr2 watcom bug ifdef omitted */
+
+JS_FRIEND_API(double)
+JS_strtod(CONST char *s00, char **se, int *err)
+{
+ int32 scale;
+ int32 bb2, bb5, bbe, bd2, bd5, bbbits, bs2, c, dsign,
+ e, e1, esign, i, j, k, nd, nd0, nf, nz, nz0, sign;
+ CONST char *s, *s0, *s1;
+ double aadj, aadj1, adj, rv, rv0;
+ Long L;
+ ULong y, z;
+ Bigint *bb, *bb1, *bd, *bd0, *bs, *delta;
+
+ *err = 0;
+
+ bb = bd = bs = delta = NULL;
+ sign = nz0 = nz = 0;
+ rv = 0.;
+
+ /* Locking for Balloc's shared buffers that will be used in this block */
+ ACQUIRE_DTOA_LOCK();
+
+ for(s = s00;;s++) switch(*s) {
+ case '-':
+ sign = 1;
+ /* no break */
+ case '+':
+ if (*++s)
+ goto break2;
+ /* no break */
+ case 0:
+ s = s00;
+ goto ret;
+ case '\t':
+ case '\n':
+ case '\v':
+ case '\f':
+ case '\r':
+ case ' ':
+ continue;
+ default:
+ goto break2;
+ }
+break2:
+
+ if (*s == '0') {
+ nz0 = 1;
+ while(*++s == '0') ;
+ if (!*s)
+ goto ret;
+ }
+ s0 = s;
+ y = z = 0;
+ for(nd = nf = 0; (c = *s) >= '0' && c <= '9'; nd++, s++)
+ if (nd < 9)
+ y = 10*y + c - '0';
+ else if (nd < 16)
+ z = 10*z + c - '0';
+ nd0 = nd;
+ if (c == '.') {
+ c = *++s;
+ if (!nd) {
+ for(; c == '0'; c = *++s)
+ nz++;
+ if (c > '0' && c <= '9') {
+ s0 = s;
+ nf += nz;
+ nz = 0;
+ goto have_dig;
+ }
+ goto dig_done;
+ }
+ for(; c >= '0' && c <= '9'; c = *++s) {
+ have_dig:
+ nz++;
+ if (c -= '0') {
+ nf += nz;
+ for(i = 1; i < nz; i++)
+ if (nd++ < 9)
+ y *= 10;
+ else if (nd <= DBL_DIG + 1)
+ z *= 10;
+ if (nd++ < 9)
+ y = 10*y + c;
+ else if (nd <= DBL_DIG + 1)
+ z = 10*z + c;
+ nz = 0;
+ }
+ }
+ }
+dig_done:
+ e = 0;
+ if (c == 'e' || c == 'E') {
+ if (!nd && !nz && !nz0) {
+ s = s00;
+ goto ret;
+ }
+ s00 = s;
+ esign = 0;
+ switch(c = *++s) {
+ case '-':
+ esign = 1;
+ case '+':
+ c = *++s;
+ }
+ if (c >= '0' && c <= '9') {
+ while(c == '0')
+ c = *++s;
+ if (c > '0' && c <= '9') {
+ L = c - '0';
+ s1 = s;
+ while((c = *++s) >= '0' && c <= '9')
+ L = 10*L + c - '0';
+ if (s - s1 > 8 || L > 19999)
+ /* Avoid confusion from exponents
+ * so large that e might overflow.
+ */
+ e = 19999; /* safe for 16 bit ints */
+ else
+ e = (int32)L;
+ if (esign)
+ e = -e;
+ }
+ else
+ e = 0;
+ }
+ else
+ s = s00;
+ }
+ if (!nd) {
+ if (!nz && !nz0) {
+#ifdef INFNAN_CHECK
+ /* Check for Nan and Infinity */
+ switch(c) {
+ case 'i':
+ case 'I':
+ if (match(&s,"nfinity")) {
+ set_word0(rv, 0x7ff00000);
+ set_word1(rv, 0);
+ goto ret;
+ }
+ break;
+ case 'n':
+ case 'N':
+ if (match(&s, "an")) {
+ set_word0(rv, NAN_WORD0);
+ set_word1(rv, NAN_WORD1);
+ goto ret;
+ }
+ }
+#endif /* INFNAN_CHECK */
+ s = s00;
+ }
+ goto ret;
+ }
+ e1 = e -= nf;
+
+ /* Now we have nd0 digits, starting at s0, followed by a
+ * decimal point, followed by nd-nd0 digits. The number we're
+ * after is the integer represented by those digits times
+ * 10**e */
+
+ if (!nd0)
+ nd0 = nd;
+ k = nd < DBL_DIG + 1 ? nd : DBL_DIG + 1;
+ rv = y;
+ if (k > 9)
+ rv = tens[k - 9] * rv + z;
+ bd0 = 0;
+ if (nd <= DBL_DIG
+#ifndef RND_PRODQUOT
+ && FLT_ROUNDS == 1
+#endif
+ ) {
+ if (!e)
+ goto ret;
+ if (e > 0) {
+ if (e <= Ten_pmax) {
+ /* rv = */ rounded_product(rv, tens[e]);
+ goto ret;
+ }
+ i = DBL_DIG - nd;
+ if (e <= Ten_pmax + i) {
+ /* A fancier test would sometimes let us do
+ * this for larger i values.
+ */
+ e -= i;
+ rv *= tens[i];
+ /* rv = */ rounded_product(rv, tens[e]);
+ goto ret;
+ }
+ }
+#ifndef Inaccurate_Divide
+ else if (e >= -Ten_pmax) {
+ /* rv = */ rounded_quotient(rv, tens[-e]);
+ goto ret;
+ }
+#endif
+ }
+ e1 += nd - k;
+
+ scale = 0;
+
+ /* Get starting approximation = rv * 10**e1 */
+
+ if (e1 > 0) {
+ if ((i = e1 & 15) != 0)
+ rv *= tens[i];
+ if (e1 &= ~15) {
+ if (e1 > DBL_MAX_10_EXP) {
+ ovfl:
+ *err = JS_DTOA_ERANGE;
+#ifdef __STDC__
+ rv = HUGE_VAL;
+#else
+ /* Can't trust HUGE_VAL */
+ set_word0(rv, Exp_mask);
+ set_word1(rv, 0);
+#endif
+ if (bd0)
+ goto retfree;
+ goto ret;
+ }
+ e1 >>= 4;
+ for(j = 0; e1 > 1; j++, e1 >>= 1)
+ if (e1 & 1)
+ rv *= bigtens[j];
+ /* The last multiplication could overflow. */
+ set_word0(rv, word0(rv) - P*Exp_msk1);
+ rv *= bigtens[j];
+ if ((z = word0(rv) & Exp_mask) > Exp_msk1*(DBL_MAX_EXP+Bias-P))
+ goto ovfl;
+ if (z > Exp_msk1*(DBL_MAX_EXP+Bias-1-P)) {
+ /* set to largest number */
+ /* (Can't trust DBL_MAX) */
+ set_word0(rv, Big0);
+ set_word1(rv, Big1);
+ }
+ else
+ set_word0(rv, word0(rv) + P*Exp_msk1);
+ }
+ }
+ else if (e1 < 0) {
+ e1 = -e1;
+ if ((i = e1 & 15) != 0)
+ rv /= tens[i];
+ if (e1 &= ~15) {
+ e1 >>= 4;
+ if (e1 >= 1 << n_bigtens)
+ goto undfl;
+#ifdef Avoid_Underflow
+ if (e1 & Scale_Bit)
+ scale = P;
+ for(j = 0; e1 > 0; j++, e1 >>= 1)
+ if (e1 & 1)
+ rv *= tinytens[j];
+ if (scale && (j = P + 1 - ((word0(rv) & Exp_mask)
+ >> Exp_shift)) > 0) {
+ /* scaled rv is denormal; zap j low bits */
+ if (j >= 32) {
+ set_word1(rv, 0);
+ set_word0(rv, word0(rv) & (0xffffffff << (j-32)));
+ if (!word0(rv))
+ set_word0(rv, 1);
+ }
+ else
+ set_word1(rv, word1(rv) & (0xffffffff << j));
+ }
+#else
+ for(j = 0; e1 > 1; j++, e1 >>= 1)
+ if (e1 & 1)
+ rv *= tinytens[j];
+ /* The last multiplication could underflow. */
+ rv0 = rv;
+ rv *= tinytens[j];
+ if (!rv) {
+ rv = 2.*rv0;
+ rv *= tinytens[j];
+#endif
+ if (!rv) {
+ undfl:
+ rv = 0.;
+ *err = JS_DTOA_ERANGE;
+ if (bd0)
+ goto retfree;
+ goto ret;
+ }
+#ifndef Avoid_Underflow
+ set_word0(rv, Tiny0);
+ set_word1(rv, Tiny1);
+ /* The refinement below will clean
+ * this approximation up.
+ */
+ }
+#endif
+ }
+ }
+
+ /* Now the hard part -- adjusting rv to the correct value.*/
+
+ /* Put digits into bd: true value = bd * 10^e */
+
+ bd0 = s2b(s0, nd0, nd, y);
+ if (!bd0)
+ goto nomem;
+
+ for(;;) {
+ bd = Balloc(bd0->k);
+ if (!bd)
+ goto nomem;
+ Bcopy(bd, bd0);
+ bb = d2b(rv, &bbe, &bbbits); /* rv = bb * 2^bbe */
+ if (!bb)
+ goto nomem;
+ bs = i2b(1);
+ if (!bs)
+ goto nomem;
+
+ if (e >= 0) {
+ bb2 = bb5 = 0;
+ bd2 = bd5 = e;
+ }
+ else {
+ bb2 = bb5 = -e;
+ bd2 = bd5 = 0;
+ }
+ if (bbe >= 0)
+ bb2 += bbe;
+ else
+ bd2 -= bbe;
+ bs2 = bb2;
+#ifdef Sudden_Underflow
+ j = P + 1 - bbbits;
+#else
+#ifdef Avoid_Underflow
+ j = bbe - scale;
+#else
+ j = bbe;
+#endif
+ i = j + bbbits - 1; /* logb(rv) */
+ if (i < Emin) /* denormal */
+ j += P - Emin;
+ else
+ j = P + 1 - bbbits;
+#endif
+ bb2 += j;
+ bd2 += j;
+#ifdef Avoid_Underflow
+ bd2 += scale;
+#endif
+ i = bb2 < bd2 ? bb2 : bd2;
+ if (i > bs2)
+ i = bs2;
+ if (i > 0) {
+ bb2 -= i;
+ bd2 -= i;
+ bs2 -= i;
+ }
+ if (bb5 > 0) {
+ bs = pow5mult(bs, bb5);
+ if (!bs)
+ goto nomem;
+ bb1 = mult(bs, bb);
+ if (!bb1)
+ goto nomem;
+ Bfree(bb);
+ bb = bb1;
+ }
+ if (bb2 > 0) {
+ bb = lshift(bb, bb2);
+ if (!bb)
+ goto nomem;
+ }
+ if (bd5 > 0) {
+ bd = pow5mult(bd, bd5);
+ if (!bd)
+ goto nomem;
+ }
+ if (bd2 > 0) {
+ bd = lshift(bd, bd2);
+ if (!bd)
+ goto nomem;
+ }
+ if (bs2 > 0) {
+ bs = lshift(bs, bs2);
+ if (!bs)
+ goto nomem;
+ }
+ delta = diff(bb, bd);
+ if (!delta)
+ goto nomem;
+ dsign = delta->sign;
+ delta->sign = 0;
+ i = cmp(delta, bs);
+ if (i < 0) {
+ /* Error is less than half an ulp -- check for
+ * special case of mantissa a power of two.
+ */
+ if (dsign || word1(rv) || word0(rv) & Bndry_mask
+#ifdef Avoid_Underflow
+ || (word0(rv) & Exp_mask) <= Exp_msk1 + P*Exp_msk1
+#else
+ || (word0(rv) & Exp_mask) <= Exp_msk1
+#endif
+ ) {
+#ifdef Avoid_Underflow
+ if (!delta->x[0] && delta->wds == 1)
+ dsign = 2;
+#endif
+ break;
+ }
+ delta = lshift(delta,Log2P);
+ if (!delta)
+ goto nomem;
+ if (cmp(delta, bs) > 0)
+ goto drop_down;
+ break;
+ }
+ if (i == 0) {
+ /* exactly half-way between */
+ if (dsign) {
+ if ((word0(rv) & Bndry_mask1) == Bndry_mask1
+ && word1(rv) == 0xffffffff) {
+ /*boundary case -- increment exponent*/
+ set_word0(rv, (word0(rv) & Exp_mask) + Exp_msk1);
+ set_word1(rv, 0);
+#ifdef Avoid_Underflow
+ dsign = 0;
+#endif
+ break;
+ }
+ }
+ else if (!(word0(rv) & Bndry_mask) && !word1(rv)) {
+#ifdef Avoid_Underflow
+ dsign = 2;
+#endif
+ drop_down:
+ /* boundary case -- decrement exponent */
+#ifdef Sudden_Underflow
+ L = word0(rv) & Exp_mask;
+ if (L <= Exp_msk1)
+ goto undfl;
+ L -= Exp_msk1;
+#else
+ L = (word0(rv) & Exp_mask) - Exp_msk1;
+#endif
+ set_word0(rv, L | Bndry_mask1);
+ set_word1(rv, 0xffffffff);
+ break;
+ }
+#ifndef ROUND_BIASED
+ if (!(word1(rv) & LSB))
+ break;
+#endif
+ if (dsign)
+ rv += ulp(rv);
+#ifndef ROUND_BIASED
+ else {
+ rv -= ulp(rv);
+#ifndef Sudden_Underflow
+ if (!rv)
+ goto undfl;
+#endif
+ }
+#ifdef Avoid_Underflow
+ dsign = 1 - dsign;
+#endif
+#endif
+ break;
+ }
+ if ((aadj = ratio(delta, bs)) <= 2.) {
+ if (dsign)
+ aadj = aadj1 = 1.;
+ else if (word1(rv) || word0(rv) & Bndry_mask) {
+#ifndef Sudden_Underflow
+ if (word1(rv) == Tiny1 && !word0(rv))
+ goto undfl;
+#endif
+ aadj = 1.;
+ aadj1 = -1.;
+ }
+ else {
+ /* special case -- power of FLT_RADIX to be */
+ /* rounded down... */
+
+ if (aadj < 2./FLT_RADIX)
+ aadj = 1./FLT_RADIX;
+ else
+ aadj *= 0.5;
+ aadj1 = -aadj;
+ }
+ }
+ else {
+ aadj *= 0.5;
+ aadj1 = dsign ? aadj : -aadj;
+#ifdef Check_FLT_ROUNDS
+ switch(FLT_ROUNDS) {
+ case 2: /* towards +infinity */
+ aadj1 -= 0.5;
+ break;
+ case 0: /* towards 0 */
+ case 3: /* towards -infinity */
+ aadj1 += 0.5;
+ }
+#else
+ if (FLT_ROUNDS == 0)
+ aadj1 += 0.5;
+#endif
+ }
+ y = word0(rv) & Exp_mask;
+
+ /* Check for overflow */
+
+ if (y == Exp_msk1*(DBL_MAX_EXP+Bias-1)) {
+ rv0 = rv;
+ set_word0(rv, word0(rv) - P*Exp_msk1);
+ adj = aadj1 * ulp(rv);
+ rv += adj;
+ if ((word0(rv) & Exp_mask) >=
+ Exp_msk1*(DBL_MAX_EXP+Bias-P)) {
+ if (word0(rv0) == Big0 && word1(rv0) == Big1)
+ goto ovfl;
+ set_word0(rv, Big0);
+ set_word1(rv, Big1);
+ goto cont;
+ }
+ else
+ set_word0(rv, word0(rv) + P*Exp_msk1);
+ }
+ else {
+#ifdef Sudden_Underflow
+ if ((word0(rv) & Exp_mask) <= P*Exp_msk1) {
+ rv0 = rv;
+ set_word0(rv, word0(rv) + P*Exp_msk1);
+ adj = aadj1 * ulp(rv);
+ rv += adj;
+ if ((word0(rv) & Exp_mask) <= P*Exp_msk1)
+ {
+ if (word0(rv0) == Tiny0
+ && word1(rv0) == Tiny1)
+ goto undfl;
+ set_word0(rv, Tiny0);
+ set_word1(rv, Tiny1);
+ goto cont;
+ }
+ else
+ set_word0(rv, word0(rv) - P*Exp_msk1);
+ }
+ else {
+ adj = aadj1 * ulp(rv);
+ rv += adj;
+ }
+#else
+ /* Compute adj so that the IEEE rounding rules will
+ * correctly round rv + adj in some half-way cases.
+ * If rv * ulp(rv) is denormalized (i.e.,
+ * y <= (P-1)*Exp_msk1), we must adjust aadj to avoid
+ * trouble from bits lost to denormalization;
+ * example: 1.2e-307 .
+ */
+#ifdef Avoid_Underflow
+ if (y <= P*Exp_msk1 && aadj > 1.)
+#else
+ if (y <= (P-1)*Exp_msk1 && aadj > 1.)
+#endif
+ {
+ aadj1 = (double)(int32)(aadj + 0.5);
+ if (!dsign)
+ aadj1 = -aadj1;
+ }
+#ifdef Avoid_Underflow
+ if (scale && y <= P*Exp_msk1)
+ set_word0(aadj1, word0(aadj1) + (P+1)*Exp_msk1 - y);
+#endif
+ adj = aadj1 * ulp(rv);
+ rv += adj;
+#endif
+ }
+ z = word0(rv) & Exp_mask;
+#ifdef Avoid_Underflow
+ if (!scale)
+#endif
+ if (y == z) {
+ /* Can we stop now? */
+ L = (Long)aadj;
+ aadj -= L;
+ /* The tolerances below are conservative. */
+ if (dsign || word1(rv) || word0(rv) & Bndry_mask) {
+ if (aadj < .4999999 || aadj > .5000001)
+ break;
+ }
+ else if (aadj < .4999999/FLT_RADIX)
+ break;
+ }
+ cont:
+ Bfree(bb);
+ Bfree(bd);
+ Bfree(bs);
+ Bfree(delta);
+ bb = bd = bs = delta = NULL;
+ }
+#ifdef Avoid_Underflow
+ if (scale) {
+ rv0 = 0.;
+ set_word0(rv0, Exp_1 - P*Exp_msk1);
+ set_word1(rv0, 0);
+ if ((word0(rv) & Exp_mask) <= P*Exp_msk1
+ && word1(rv) & 1
+ && dsign != 2) {
+ if (dsign) {
+#ifdef Sudden_Underflow
+ /* rv will be 0, but this would give the */
+ /* right result if only rv *= rv0 worked. */
+ set_word0(rv, word0(rv) + P*Exp_msk1);
+ set_word0(rv0, Exp_1 - 2*P*Exp_msk1);
+#endif
+ rv += ulp(rv);
+ }
+ else
+ set_word1(rv, word1(rv) & ~1);
+ }
+ rv *= rv0;
+ }
+#endif /* Avoid_Underflow */
+retfree:
+ Bfree(bb);
+ Bfree(bd);
+ Bfree(bs);
+ Bfree(bd0);
+ Bfree(delta);
+ret:
+ RELEASE_DTOA_LOCK();
+ if (se)
+ *se = (char *)s;
+ return sign ? -rv : rv;
+
+nomem:
+ Bfree(bb);
+ Bfree(bd);
+ Bfree(bs);
+ Bfree(bd0);
+ Bfree(delta);
+ RELEASE_DTOA_LOCK();
+ *err = JS_DTOA_ENOMEM;
+ return 0;
+}
+
+
+/* Return floor(b/2^k) and set b to be the remainder. The returned quotient must be less than 2^32. */
+static uint32 quorem2(Bigint *b, int32 k)
+{
+ ULong mask;
+ ULong result;
+ ULong *bx, *bxe;
+ int32 w;
+ int32 n = k >> 5;
+ k &= 0x1F;
+ mask = (1<<k) - 1;
+
+ w = b->wds - n;
+ if (w <= 0)
+ return 0;
+ JS_ASSERT(w <= 2);
+ bx = b->x;
+ bxe = bx + n;
+ result = *bxe >> k;
+ *bxe &= mask;
+ if (w == 2) {
+ JS_ASSERT(!(bxe[1] & ~mask));
+ if (k)
+ result |= bxe[1] << (32 - k);
+ }
+ n++;
+ while (!*bxe && bxe != bx) {
+ n--;
+ bxe--;
+ }
+ b->wds = n;
+ return result;
+}
+
+/* Return floor(b/S) and set b to be the remainder. As added restrictions, b must not have
+ * more words than S, the most significant word of S must not start with a 1 bit, and the
+ * returned quotient must be less than 36. */
+static int32 quorem(Bigint *b, Bigint *S)
+{
+ int32 n;
+ ULong *bx, *bxe, q, *sx, *sxe;
+#ifdef ULLong
+ ULLong borrow, carry, y, ys;
+#else
+ ULong borrow, carry, y, ys;
+ ULong si, z, zs;
+#endif
+
+ n = S->wds;
+ JS_ASSERT(b->wds <= n);
+ if (b->wds < n)
+ return 0;
+ sx = S->x;
+ sxe = sx + --n;
+ bx = b->x;
+ bxe = bx + n;
+ JS_ASSERT(*sxe <= 0x7FFFFFFF);
+ q = *bxe / (*sxe + 1); /* ensure q <= true quotient */
+ JS_ASSERT(q < 36);
+ if (q) {
+ borrow = 0;
+ carry = 0;
+ do {
+#ifdef ULLong
+ ys = *sx++ * (ULLong)q + carry;
+ carry = ys >> 32;
+ y = *bx - (ys & 0xffffffffUL) - borrow;
+ borrow = y >> 32 & 1UL;
+ *bx++ = (ULong)(y & 0xffffffffUL);
+#else
+ si = *sx++;
+ ys = (si & 0xffff) * q + carry;
+ zs = (si >> 16) * q + (ys >> 16);
+ carry = zs >> 16;
+ y = (*bx & 0xffff) - (ys & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*bx >> 16) - (zs & 0xffff) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(bx, z, y);
+#endif
+ }
+ while(sx <= sxe);
+ if (!*bxe) {
+ bx = b->x;
+ while(--bxe > bx && !*bxe)
+ --n;
+ b->wds = n;
+ }
+ }
+ if (cmp(b, S) >= 0) {
+ q++;
+ borrow = 0;
+ carry = 0;
+ bx = b->x;
+ sx = S->x;
+ do {
+#ifdef ULLong
+ ys = *sx++ + carry;
+ carry = ys >> 32;
+ y = *bx - (ys & 0xffffffffUL) - borrow;
+ borrow = y >> 32 & 1UL;
+ *bx++ = (ULong)(y & 0xffffffffUL);
+#else
+ si = *sx++;
+ ys = (si & 0xffff) + carry;
+ zs = (si >> 16) + (ys >> 16);
+ carry = zs >> 16;
+ y = (*bx & 0xffff) - (ys & 0xffff) - borrow;
+ borrow = (y & 0x10000) >> 16;
+ z = (*bx >> 16) - (zs & 0xffff) - borrow;
+ borrow = (z & 0x10000) >> 16;
+ Storeinc(bx, z, y);
+#endif
+ } while(sx <= sxe);
+ bx = b->x;
+ bxe = bx + n;
+ if (!*bxe) {
+ while(--bxe > bx && !*bxe)
+ --n;
+ b->wds = n;
+ }
+ }
+ return (int32)q;
+}
+
+/* dtoa for IEEE arithmetic (dmg): convert double to ASCII string.
+ *
+ * Inspired by "How to Print Floating-Point Numbers Accurately" by
+ * Guy L. Steele, Jr. and Jon L. White [Proc. ACM SIGPLAN '90, pp. 92-101].
+ *
+ * Modifications:
+ * 1. Rather than iterating, we use a simple numeric overestimate
+ * to determine k = floor(log10(d)). We scale relevant
+ * quantities using O(log2(k)) rather than O(k) multiplications.
+ * 2. For some modes > 2 (corresponding to ecvt and fcvt), we don't
+ * try to generate digits strictly left to right. Instead, we
+ * compute with fewer bits and propagate the carry if necessary
+ * when rounding the final digit up. This is often faster.
+ * 3. Under the assumption that input will be rounded nearest,
+ * mode 0 renders 1e23 as 1e23 rather than 9.999999999999999e22.
+ * That is, we allow equality in stopping tests when the
+ * round-nearest rule will give the same floating-point value
+ * as would satisfaction of the stopping test with strict
+ * inequality.
+ * 4. We remove common factors of powers of 2 from relevant
+ * quantities.
+ * 5. When converting floating-point integers less than 1e16,
+ * we use floating-point arithmetic rather than resorting
+ * to multiple-precision integers.
+ * 6. When asked to produce fewer than 15 digits, we first try
+ * to get by with floating-point arithmetic; we resort to
+ * multiple-precision integer arithmetic only if we cannot
+ * guarantee that the floating-point calculation has given
+ * the correctly rounded result. For k requested digits and
+ * "uniformly" distributed input, the probability is
+ * something like 10^(k-15) that we must resort to the Long
+ * calculation.
+ */
+
+/* Always emits at least one digit. */
+/* If biasUp is set, then rounding in modes 2 and 3 will round away from zero
+ * when the number is exactly halfway between two representable values. For example,
+ * rounding 2.5 to zero digits after the decimal point will return 3 and not 2.
+ * 2.49 will still round to 2, and 2.51 will still round to 3. */
+/* bufsize should be at least 20 for modes 0 and 1. For the other modes,
+ * bufsize should be two greater than the maximum number of output characters expected. */
+static JSBool
+js_dtoa(double d, int mode, JSBool biasUp, int ndigits,
+ int *decpt, int *sign, char **rve, char *buf, size_t bufsize)
+{
+ /* Arguments ndigits, decpt, sign are similar to those
+ of ecvt and fcvt; trailing zeros are suppressed from
+ the returned string. If not null, *rve is set to point
+ to the end of the return value. If d is +-Infinity or NaN,
+ then *decpt is set to 9999.
+
+ mode:
+ 0 ==> shortest string that yields d when read in
+ and rounded to nearest.
+ 1 ==> like 0, but with Steele & White stopping rule;
+ e.g. with IEEE P754 arithmetic , mode 0 gives
+ 1e23 whereas mode 1 gives 9.999999999999999e22.
+ 2 ==> max(1,ndigits) significant digits. This gives a
+ return value similar to that of ecvt, except
+ that trailing zeros are suppressed.
+ 3 ==> through ndigits past the decimal point. This
+ gives a return value similar to that from fcvt,
+ except that trailing zeros are suppressed, and
+ ndigits can be negative.
+ 4-9 should give the same return values as 2-3, i.e.,
+ 4 <= mode <= 9 ==> same return as mode
+ 2 + (mode & 1). These modes are mainly for
+ debugging; often they run slower but sometimes
+ faster than modes 2-3.
+ 4,5,8,9 ==> left-to-right digit generation.
+ 6-9 ==> don't try fast floating-point estimate
+ (if applicable).
+
+ Values of mode other than 0-9 are treated as mode 0.
+
+ Sufficient space is allocated to the return value
+ to hold the suppressed trailing zeros.
+ */
+
+ int32 bbits, b2, b5, be, dig, i, ieps, ilim, ilim0, ilim1,
+ j, j1, k, k0, k_check, leftright, m2, m5, s2, s5,
+ spec_case, try_quick;
+ Long L;
+#ifndef Sudden_Underflow
+ int32 denorm;
+ ULong x;
+#endif
+ Bigint *b, *b1, *delta, *mlo, *mhi, *S;
+ double d2, ds, eps;
+ char *s;
+
+ if (word0(d) & Sign_bit) {
+ /* set sign for everything, including 0's and NaNs */
+ *sign = 1;
+ set_word0(d, word0(d) & ~Sign_bit); /* clear sign bit */
+ }
+ else
+ *sign = 0;
+
+ if ((word0(d) & Exp_mask) == Exp_mask) {
+ /* Infinity or NaN */
+ *decpt = 9999;
+ s = !word1(d) && !(word0(d) & Frac_mask) ? "Infinity" : "NaN";
+ if ((s[0] == 'I' && bufsize < 9) || (s[0] == 'N' && bufsize < 4)) {
+ JS_ASSERT(JS_FALSE);
+/* JS_SetError(JS_BUFFER_OVERFLOW_ERROR, 0); */
+ return JS_FALSE;
+ }
+ strcpy(buf, s);
+ if (rve) {
+ *rve = buf[3] ? buf + 8 : buf + 3;
+ JS_ASSERT(**rve == '\0');
+ }
+ return JS_TRUE;
+ }
+
+ b = NULL; /* initialize for abort protection */
+ S = NULL;
+ mlo = mhi = NULL;
+
+ if (!d) {
+ no_digits:
+ *decpt = 1;
+ if (bufsize < 2) {
+ JS_ASSERT(JS_FALSE);
+/* JS_SetError(JS_BUFFER_OVERFLOW_ERROR, 0); */
+ return JS_FALSE;
+ }
+ buf[0] = '0'; buf[1] = '\0'; /* copy "0" to buffer */
+ if (rve)
+ *rve = buf + 1;
+ /* We might have jumped to "no_digits" from below, so we need
+ * to be sure to free the potentially allocated Bigints to avoid
+ * memory leaks. */
+ Bfree(b);
+ Bfree(S);
+ if (mlo != mhi)
+ Bfree(mlo);
+ Bfree(mhi);
+ return JS_TRUE;
+ }
+
+ b = d2b(d, &be, &bbits);
+ if (!b)
+ goto nomem;
+#ifdef Sudden_Underflow
+ i = (int32)(word0(d) >> Exp_shift1 & (Exp_mask>>Exp_shift1));
+#else
+ if ((i = (int32)(word0(d) >> Exp_shift1 & (Exp_mask>>Exp_shift1))) != 0) {
+#endif
+ d2 = d;
+ set_word0(d2, word0(d2) & Frac_mask1);
+ set_word0(d2, word0(d2) | Exp_11);
+
+ /* log(x) ~=~ log(1.5) + (x-1.5)/1.5
+ * log10(x) = log(x) / log(10)
+ * ~=~ log(1.5)/log(10) + (x-1.5)/(1.5*log(10))
+ * log10(d) = (i-Bias)*log(2)/log(10) + log10(d2)
+ *
+ * This suggests computing an approximation k to log10(d) by
+ *
+ * k = (i - Bias)*0.301029995663981
+ * + ( (d2-1.5)*0.289529654602168 + 0.176091259055681 );
+ *
+ * We want k to be too large rather than too small.
+ * The error in the first-order Taylor series approximation
+ * is in our favor, so we just round up the constant enough
+ * to compensate for any error in the multiplication of
+ * (i - Bias) by 0.301029995663981; since |i - Bias| <= 1077,
+ * and 1077 * 0.30103 * 2^-52 ~=~ 7.2e-14,
+ * adding 1e-13 to the constant term more than suffices.
+ * Hence we adjust the constant term to 0.1760912590558.
+ * (We could get a more accurate k by invoking log10,
+ * but this is probably not worthwhile.)
+ */
+
+ i -= Bias;
+#ifndef Sudden_Underflow
+ denorm = 0;
+ }
+ else {
+ /* d is denormalized */
+
+ i = bbits + be + (Bias + (P-1) - 1);
+ x = i > 32 ? word0(d) << (64 - i) | word1(d) >> (i - 32) : word1(d) << (32 - i);
+ d2 = x;
+ set_word0(d2, word0(d2) - 31*Exp_msk1); /* adjust exponent */
+ i -= (Bias + (P-1) - 1) + 1;
+ denorm = 1;
+ }
+#endif
+ /* At this point d = f*2^i, where 1 <= f < 2. d2 is an approximation of f. */
+ ds = (d2-1.5)*0.289529654602168 + 0.1760912590558 + i*0.301029995663981;
+ k = (int32)ds;
+ if (ds < 0. && ds != k)
+ k--; /* want k = floor(ds) */
+ k_check = 1;
+ if (k >= 0 && k <= Ten_pmax) {
+ if (d < tens[k])
+ k--;
+ k_check = 0;
+ }
+ /* At this point floor(log10(d)) <= k <= floor(log10(d))+1.
+ If k_check is zero, we're guaranteed that k = floor(log10(d)). */
+ j = bbits - i - 1;
+ /* At this point d = b/2^j, where b is an odd integer. */
+ if (j >= 0) {
+ b2 = 0;
+ s2 = j;
+ }
+ else {
+ b2 = -j;
+ s2 = 0;
+ }
+ if (k >= 0) {
+ b5 = 0;
+ s5 = k;
+ s2 += k;
+ }
+ else {
+ b2 -= k;
+ b5 = -k;
+ s5 = 0;
+ }
+ /* At this point d/10^k = (b * 2^b2 * 5^b5) / (2^s2 * 5^s5), where b is an odd integer,
+ b2 >= 0, b5 >= 0, s2 >= 0, and s5 >= 0. */
+ if (mode < 0 || mode > 9)
+ mode = 0;
+ try_quick = 1;
+ if (mode > 5) {
+ mode -= 4;
+ try_quick = 0;
+ }
+ leftright = 1;
+ ilim = ilim1 = 0;
+ switch(mode) {
+ case 0:
+ case 1:
+ ilim = ilim1 = -1;
+ i = 18;
+ ndigits = 0;
+ break;
+ case 2:
+ leftright = 0;
+ /* no break */
+ case 4:
+ if (ndigits <= 0)
+ ndigits = 1;
+ ilim = ilim1 = i = ndigits;
+ break;
+ case 3:
+ leftright = 0;
+ /* no break */
+ case 5:
+ i = ndigits + k + 1;
+ ilim = i;
+ ilim1 = i - 1;
+ if (i <= 0)
+ i = 1;
+ }
+ /* ilim is the maximum number of significant digits we want, based on k and ndigits. */
+ /* ilim1 is the maximum number of significant digits we want, based on k and ndigits,
+ when it turns out that k was computed too high by one. */
+
+ /* Ensure space for at least i+1 characters, including trailing null. */
+ if (bufsize <= (size_t)i) {
+ Bfree(b);
+ JS_ASSERT(JS_FALSE);
+ return JS_FALSE;
+ }
+ s = buf;
+
+ if (ilim >= 0 && ilim <= Quick_max && try_quick) {
+
+ /* Try to get by with floating-point arithmetic. */
+
+ i = 0;
+ d2 = d;
+ k0 = k;
+ ilim0 = ilim;
+ ieps = 2; /* conservative */
+ /* Divide d by 10^k, keeping track of the roundoff error and avoiding overflows. */
+ if (k > 0) {
+ ds = tens[k&0xf];
+ j = k >> 4;
+ if (j & Bletch) {
+ /* prevent overflows */
+ j &= Bletch - 1;
+ d /= bigtens[n_bigtens-1];
+ ieps++;
+ }
+ for(; j; j >>= 1, i++)
+ if (j & 1) {
+ ieps++;
+ ds *= bigtens[i];
+ }
+ d /= ds;
+ }
+ else if ((j1 = -k) != 0) {
+ d *= tens[j1 & 0xf];
+ for(j = j1 >> 4; j; j >>= 1, i++)
+ if (j & 1) {
+ ieps++;
+ d *= bigtens[i];
+ }
+ }
+ /* Check that k was computed correctly. */
+ if (k_check && d < 1. && ilim > 0) {
+ if (ilim1 <= 0)
+ goto fast_failed;
+ ilim = ilim1;
+ k--;
+ d *= 10.;
+ ieps++;
+ }
+ /* eps bounds the cumulative error. */
+ eps = ieps*d + 7.;
+ set_word0(eps, word0(eps) - (P-1)*Exp_msk1);
+ if (ilim == 0) {
+ S = mhi = 0;
+ d -= 5.;
+ if (d > eps)
+ goto one_digit;
+ if (d < -eps)
+ goto no_digits;
+ goto fast_failed;
+ }
+#ifndef No_leftright
+ if (leftright) {
+ /* Use Steele & White method of only
+ * generating digits needed.
+ */
+ eps = 0.5/tens[ilim-1] - eps;
+ for(i = 0;;) {
+ L = (Long)d;
+ d -= L;
+ *s++ = '0' + (char)L;
+ if (d < eps)
+ goto ret1;
+ if (1. - d < eps)
+ goto bump_up;
+ if (++i >= ilim)
+ break;
+ eps *= 10.;
+ d *= 10.;
+ }
+ }
+ else {
+#endif
+ /* Generate ilim digits, then fix them up. */
+ eps *= tens[ilim-1];
+ for(i = 1;; i++, d *= 10.) {
+ L = (Long)d;
+ d -= L;
+ *s++ = '0' + (char)L;
+ if (i == ilim) {
+ if (d > 0.5 + eps)
+ goto bump_up;
+ else if (d < 0.5 - eps) {
+ while(*--s == '0') ;
+ s++;
+ goto ret1;
+ }
+ break;
+ }
+ }
+#ifndef No_leftright
+ }
+#endif
+ fast_failed:
+ s = buf;
+ d = d2;
+ k = k0;
+ ilim = ilim0;
+ }
+
+ /* Do we have a "small" integer? */
+
+ if (be >= 0 && k <= Int_max) {
+ /* Yes. */
+ ds = tens[k];
+ if (ndigits < 0 && ilim <= 0) {
+ S = mhi = 0;
+ if (ilim < 0 || d < 5*ds || (!biasUp && d == 5*ds))
+ goto no_digits;
+ goto one_digit;
+ }
+
+ /* Use true number of digits to limit looping. */
+ for(i = 1; i<=k+1; i++) {
+ L = (Long) (d / ds);
+ d -= L*ds;
+#ifdef Check_FLT_ROUNDS
+ /* If FLT_ROUNDS == 2, L will usually be high by 1 */
+ if (d < 0) {
+ L--;
+ d += ds;
+ }
+#endif
+ *s++ = '0' + (char)L;
+ if (i == ilim) {
+ d += d;
+ if ((d > ds) || (d == ds && (L & 1 || biasUp))) {
+ bump_up:
+ while(*--s == '9')
+ if (s == buf) {
+ k++;
+ *s = '0';
+ break;
+ }
+ ++*s++;
+ }
+ break;
+ }
+ d *= 10.;
+ }
+ goto ret1;
+ }
+
+ m2 = b2;
+ m5 = b5;
+ if (leftright) {
+ if (mode < 2) {
+ i =
+#ifndef Sudden_Underflow
+ denorm ? be + (Bias + (P-1) - 1 + 1) :
+#endif
+ 1 + P - bbits;
+ /* i is 1 plus the number of trailing zero bits in d's significand. Thus,
+ (2^m2 * 5^m5) / (2^(s2+i) * 5^s5) = (1/2 lsb of d)/10^k. */
+ }
+ else {
+ j = ilim - 1;
+ if (m5 >= j)
+ m5 -= j;
+ else {
+ s5 += j -= m5;
+ b5 += j;
+ m5 = 0;
+ }
+ if ((i = ilim) < 0) {
+ m2 -= i;
+ i = 0;
+ }
+ /* (2^m2 * 5^m5) / (2^(s2+i) * 5^s5) = (1/2 * 10^(1-ilim))/10^k. */
+ }
+ b2 += i;
+ s2 += i;
+ mhi = i2b(1);
+ if (!mhi)
+ goto nomem;
+ /* (mhi * 2^m2 * 5^m5) / (2^s2 * 5^s5) = one-half of last printed (when mode >= 2) or
+ input (when mode < 2) significant digit, divided by 10^k. */
+ }
+ /* We still have d/10^k = (b * 2^b2 * 5^b5) / (2^s2 * 5^s5). Reduce common factors in
+ b2, m2, and s2 without changing the equalities. */
+ if (m2 > 0 && s2 > 0) {
+ i = m2 < s2 ? m2 : s2;
+ b2 -= i;
+ m2 -= i;
+ s2 -= i;
+ }
+
+ /* Fold b5 into b and m5 into mhi. */
+ if (b5 > 0) {
+ if (leftright) {
+ if (m5 > 0) {
+ mhi = pow5mult(mhi, m5);
+ if (!mhi)
+ goto nomem;
+ b1 = mult(mhi, b);
+ if (!b1)
+ goto nomem;
+ Bfree(b);
+ b = b1;
+ }
+ if ((j = b5 - m5) != 0) {
+ b = pow5mult(b, j);
+ if (!b)
+ goto nomem;
+ }
+ }
+ else {
+ b = pow5mult(b, b5);
+ if (!b)
+ goto nomem;
+ }
+ }
+ /* Now we have d/10^k = (b * 2^b2) / (2^s2 * 5^s5) and
+ (mhi * 2^m2) / (2^s2 * 5^s5) = one-half of last printed or input significant digit, divided by 10^k. */
+
+ S = i2b(1);
+ if (!S)
+ goto nomem;
+ if (s5 > 0) {
+ S = pow5mult(S, s5);
+ if (!S)
+ goto nomem;
+ }
+ /* Now we have d/10^k = (b * 2^b2) / (S * 2^s2) and
+ (mhi * 2^m2) / (S * 2^s2) = one-half of last printed or input significant digit, divided by 10^k. */
+
+ /* Check for special case that d is a normalized power of 2. */
+ spec_case = 0;
+ if (mode < 2) {
+ if (!word1(d) && !(word0(d) & Bndry_mask)
+#ifndef Sudden_Underflow
+ && word0(d) & (Exp_mask & Exp_mask << 1)
+#endif
+ ) {
+ /* The special case. Here we want to be within a quarter of the last input
+ significant digit instead of one half of it when the decimal output string's value is less than d. */
+ b2 += Log2P;
+ s2 += Log2P;
+ spec_case = 1;
+ }
+ }
+
+ /* Arrange for convenient computation of quotients:
+ * shift left if necessary so divisor has 4 leading 0 bits.
+ *
+ * Perhaps we should just compute leading 28 bits of S once
+ * and for all and pass them and a shift to quorem, so it
+ * can do shifts and ors to compute the numerator for q.
+ */
+ if ((i = ((s5 ? 32 - hi0bits(S->x[S->wds-1]) : 1) + s2) & 0x1f) != 0)
+ i = 32 - i;
+ /* i is the number of leading zero bits in the most significant word of S*2^s2. */
+ if (i > 4) {
+ i -= 4;
+ b2 += i;
+ m2 += i;
+ s2 += i;
+ }
+ else if (i < 4) {
+ i += 28;
+ b2 += i;
+ m2 += i;
+ s2 += i;
+ }
+ /* Now S*2^s2 has exactly four leading zero bits in its most significant word. */
+ if (b2 > 0) {
+ b = lshift(b, b2);
+ if (!b)
+ goto nomem;
+ }
+ if (s2 > 0) {
+ S = lshift(S, s2);
+ if (!S)
+ goto nomem;
+ }
+ /* Now we have d/10^k = b/S and
+ (mhi * 2^m2) / S = maximum acceptable error, divided by 10^k. */
+ if (k_check) {
+ if (cmp(b,S) < 0) {
+ k--;
+ b = multadd(b, 10, 0); /* we botched the k estimate */
+ if (!b)
+ goto nomem;
+ if (leftright) {
+ mhi = multadd(mhi, 10, 0);
+ if (!mhi)
+ goto nomem;
+ }
+ ilim = ilim1;
+ }
+ }
+ /* At this point 1 <= d/10^k = b/S < 10. */
+
+ if (ilim <= 0 && mode > 2) {
+ /* We're doing fixed-mode output and d is less than the minimum nonzero output in this mode.
+ Output either zero or the minimum nonzero output depending on which is closer to d. */
+ if (ilim < 0)
+ goto no_digits;
+ S = multadd(S,5,0);
+ if (!S)
+ goto nomem;
+ i = cmp(b,S);
+ if (i < 0 || (i == 0 && !biasUp)) {
+ /* Always emit at least one digit. If the number appears to be zero
+ using the current mode, then emit one '0' digit and set decpt to 1. */
+ /*no_digits:
+ k = -1 - ndigits;
+ goto ret; */
+ goto no_digits;
+ }
+ one_digit:
+ *s++ = '1';
+ k++;
+ goto ret;
+ }
+ if (leftright) {
+ if (m2 > 0) {
+ mhi = lshift(mhi, m2);
+ if (!mhi)
+ goto nomem;
+ }
+
+ /* Compute mlo -- check for special case
+ * that d is a normalized power of 2.
+ */
+
+ mlo = mhi;
+ if (spec_case) {
+ mhi = Balloc(mhi->k);
+ if (!mhi)
+ goto nomem;
+ Bcopy(mhi, mlo);
+ mhi = lshift(mhi, Log2P);
+ if (!mhi)
+ goto nomem;
+ }
+ /* mlo/S = maximum acceptable error, divided by 10^k, if the output is less than d. */
+ /* mhi/S = maximum acceptable error, divided by 10^k, if the output is greater than d. */
+
+ for(i = 1;;i++) {
+ dig = quorem(b,S) + '0';
+ /* Do we yet have the shortest decimal string
+ * that will round to d?
+ */
+ j = cmp(b, mlo);
+ /* j is b/S compared with mlo/S. */
+ delta = diff(S, mhi);
+ if (!delta)
+ goto nomem;
+ j1 = delta->sign ? 1 : cmp(b, delta);
+ Bfree(delta);
+ /* j1 is b/S compared with 1 - mhi/S. */
+#ifndef ROUND_BIASED
+ if (j1 == 0 && !mode && !(word1(d) & 1)) {
+ if (dig == '9')
+ goto round_9_up;
+ if (j > 0)
+ dig++;
+ *s++ = (char)dig;
+ goto ret;
+ }
+#endif
+ if ((j < 0) || (j == 0 && !mode
+#ifndef ROUND_BIASED
+ && !(word1(d) & 1)
+#endif
+ )) {
+ if (j1 > 0) {
+ /* Either dig or dig+1 would work here as the least significant decimal digit.
+ Use whichever would produce a decimal value closer to d. */
+ b = lshift(b, 1);
+ if (!b)
+ goto nomem;
+ j1 = cmp(b, S);
+ if (((j1 > 0) || (j1 == 0 && (dig & 1 || biasUp)))
+ && (dig++ == '9'))
+ goto round_9_up;
+ }
+ *s++ = (char)dig;
+ goto ret;
+ }
+ if (j1 > 0) {
+ if (dig == '9') { /* possible if i == 1 */
+ round_9_up:
+ *s++ = '9';
+ goto roundoff;
+ }
+ *s++ = (char)dig + 1;
+ goto ret;
+ }
+ *s++ = (char)dig;
+ if (i == ilim)
+ break;
+ b = multadd(b, 10, 0);
+ if (!b)
+ goto nomem;
+ if (mlo == mhi) {
+ mlo = mhi = multadd(mhi, 10, 0);
+ if (!mhi)
+ goto nomem;
+ }
+ else {
+ mlo = multadd(mlo, 10, 0);
+ if (!mlo)
+ goto nomem;
+ mhi = multadd(mhi, 10, 0);
+ if (!mhi)
+ goto nomem;
+ }
+ }
+ }
+ else
+ for(i = 1;; i++) {
+ *s++ = (char)(dig = quorem(b,S) + '0');
+ if (i >= ilim)
+ break;
+ b = multadd(b, 10, 0);
+ if (!b)
+ goto nomem;
+ }
+
+ /* Round off last digit */
+
+ b = lshift(b, 1);
+ if (!b)
+ goto nomem;
+ j = cmp(b, S);
+ if ((j > 0) || (j == 0 && (dig & 1 || biasUp))) {
+ roundoff:
+ while(*--s == '9')
+ if (s == buf) {
+ k++;
+ *s++ = '1';
+ goto ret;
+ }
+ ++*s++;
+ }
+ else {
+ /* Strip trailing zeros */
+ while(*--s == '0') ;
+ s++;
+ }
+ ret:
+ Bfree(S);
+ if (mhi) {
+ if (mlo && mlo != mhi)
+ Bfree(mlo);
+ Bfree(mhi);
+ }
+ ret1:
+ Bfree(b);
+ JS_ASSERT(s < buf + bufsize);
+ *s = '\0';
+ if (rve)
+ *rve = s;
+ *decpt = k + 1;
+ return JS_TRUE;
+
+nomem:
+ Bfree(S);
+ if (mhi) {
+ if (mlo && mlo != mhi)
+ Bfree(mlo);
+ Bfree(mhi);
+ }
+ Bfree(b);
+ return JS_FALSE;
+}
+
+
+/* Mapping of JSDToStrMode -> js_dtoa mode */
+static const int dtoaModes[] = {
+ 0, /* DTOSTR_STANDARD */
+ 0, /* DTOSTR_STANDARD_EXPONENTIAL, */
+ 3, /* DTOSTR_FIXED, */
+ 2, /* DTOSTR_EXPONENTIAL, */
+ 2}; /* DTOSTR_PRECISION */
+
+JS_FRIEND_API(char *)
+JS_dtostr(char *buffer, size_t bufferSize, JSDToStrMode mode, int precision, double d)
+{
+ int decPt; /* Position of decimal point relative to first digit returned by js_dtoa */
+ int sign; /* Nonzero if the sign bit was set in d */
+ int nDigits; /* Number of significand digits returned by js_dtoa */
+ char *numBegin = buffer+2; /* Pointer to the digits returned by js_dtoa; the +2 leaves space for */
+ /* the sign and/or decimal point */
+ char *numEnd; /* Pointer past the digits returned by js_dtoa */
+ JSBool dtoaRet;
+
+ JS_ASSERT(bufferSize >= (size_t)(mode <= DTOSTR_STANDARD_EXPONENTIAL ? DTOSTR_STANDARD_BUFFER_SIZE :
+ DTOSTR_VARIABLE_BUFFER_SIZE(precision)));
+
+ if (mode == DTOSTR_FIXED && (d >= 1e21 || d <= -1e21))
+ mode = DTOSTR_STANDARD; /* Change mode here rather than below because the buffer may not be large enough to hold a large integer. */
+
+ /* Locking for Balloc's shared buffers */
+ ACQUIRE_DTOA_LOCK();
+ dtoaRet = js_dtoa(d, dtoaModes[mode], mode >= DTOSTR_FIXED, precision, &decPt, &sign, &numEnd, numBegin, bufferSize-2);
+ RELEASE_DTOA_LOCK();
+ if (!dtoaRet)
+ return 0;
+
+ nDigits = numEnd - numBegin;
+
+ /* If Infinity, -Infinity, or NaN, return the string regardless of the mode. */
+ if (decPt != 9999) {
+ JSBool exponentialNotation = JS_FALSE;
+ int minNDigits = 0; /* Minimum number of significand digits required by mode and precision */
+ char *p;
+ char *q;
+
+ switch (mode) {
+ case DTOSTR_STANDARD:
+ if (decPt < -5 || decPt > 21)
+ exponentialNotation = JS_TRUE;
+ else
+ minNDigits = decPt;
+ break;
+
+ case DTOSTR_FIXED:
+ if (precision >= 0)
+ minNDigits = decPt + precision;
+ else
+ minNDigits = decPt;
+ break;
+
+ case DTOSTR_EXPONENTIAL:
+ JS_ASSERT(precision > 0);
+ minNDigits = precision;
+ /* Fall through */
+ case DTOSTR_STANDARD_EXPONENTIAL:
+ exponentialNotation = JS_TRUE;
+ break;
+
+ case DTOSTR_PRECISION:
+ JS_ASSERT(precision > 0);
+ minNDigits = precision;
+ if (decPt < -5 || decPt > precision)
+ exponentialNotation = JS_TRUE;
+ break;
+ }
+
+ /* If the number has fewer than minNDigits, pad it with zeros at the end */
+ if (nDigits < minNDigits) {
+ p = numBegin + minNDigits;
+ nDigits = minNDigits;
+ do {
+ *numEnd++ = '0';
+ } while (numEnd != p);
+ *numEnd = '\0';
+ }
+
+ if (exponentialNotation) {
+ /* Insert a decimal point if more than one significand digit */
+ if (nDigits != 1) {
+ numBegin--;
+ numBegin[0] = numBegin[1];
+ numBegin[1] = '.';
+ }
+ JS_snprintf(numEnd, bufferSize - (numEnd - buffer), "e%+d", decPt-1);
+ } else if (decPt != nDigits) {
+ /* Some kind of a fraction in fixed notation */
+ JS_ASSERT(decPt <= nDigits);
+ if (decPt > 0) {
+ /* dd...dd . dd...dd */
+ p = --numBegin;
+ do {
+ *p = p[1];
+ p++;
+ } while (--decPt);
+ *p = '.';
+ } else {
+ /* 0 . 00...00dd...dd */
+ p = numEnd;
+ numEnd += 1 - decPt;
+ q = numEnd;
+ JS_ASSERT(numEnd < buffer + bufferSize);
+ *numEnd = '\0';
+ while (p != numBegin)
+ *--q = *--p;
+ for (p = numBegin + 1; p != q; p++)
+ *p = '0';
+ *numBegin = '.';
+ *--numBegin = '0';
+ }
+ }
+ }
+
+ /* If negative and neither -0.0 nor NaN, output a leading '-'. */
+ if (sign &&
+ !(word0(d) == Sign_bit && word1(d) == 0) &&
+ !((word0(d) & Exp_mask) == Exp_mask &&
+ (word1(d) || (word0(d) & Frac_mask)))) {
+ *--numBegin = '-';
+ }
+ return numBegin;
+}
+
+
+/* Let b = floor(b / divisor), and return the remainder. b must be nonnegative.
+ * divisor must be between 1 and 65536.
+ * This function cannot run out of memory. */
+static uint32
+divrem(Bigint *b, uint32 divisor)
+{
+ int32 n = b->wds;
+ uint32 remainder = 0;
+ ULong *bx;
+ ULong *bp;
+
+ JS_ASSERT(divisor > 0 && divisor <= 65536);
+
+ if (!n)
+ return 0; /* b is zero */
+ bx = b->x;
+ bp = bx + n;
+ do {
+ ULong a = *--bp;
+ ULong dividend = remainder << 16 | a >> 16;
+ ULong quotientHi = dividend / divisor;
+ ULong quotientLo;
+
+ remainder = dividend - quotientHi*divisor;
+ JS_ASSERT(quotientHi <= 0xFFFF && remainder < divisor);
+ dividend = remainder << 16 | (a & 0xFFFF);
+ quotientLo = dividend / divisor;
+ remainder = dividend - quotientLo*divisor;
+ JS_ASSERT(quotientLo <= 0xFFFF && remainder < divisor);
+ *bp = quotientHi << 16 | quotientLo;
+ } while (bp != bx);
+ /* Decrease the size of the number if its most significant word is now zero. */
+ if (bx[n-1] == 0)
+ b->wds--;
+ return remainder;
+}
+
+
+/* "-0.0000...(1073 zeros after decimal point)...0001\0" is the longest string that we could produce,
+ * which occurs when printing -5e-324 in binary. We could compute a better estimate of the size of
+ * the output string and malloc fewer bytes depending on d and base, but why bother? */
+#define DTOBASESTR_BUFFER_SIZE 1078
+#define BASEDIGIT(digit) ((char)(((digit) >= 10) ? 'a' - 10 + (digit) : '0' + (digit)))
+
+JS_FRIEND_API(char *)
+JS_dtobasestr(int base, double d)
+{
+ char *buffer; /* The output string */
+ char *p; /* Pointer to current position in the buffer */
+ char *pInt; /* Pointer to the beginning of the integer part of the string */
+ char *q;
+ uint32 digit;
+ double di; /* d truncated to an integer */
+ double df; /* The fractional part of d */
+
+ JS_ASSERT(base >= 2 && base <= 36);
+
+ buffer = (char*) malloc(DTOBASESTR_BUFFER_SIZE);
+ if (buffer) {
+ p = buffer;
+ if (d < 0.0
+#if defined(XP_WIN) || defined(XP_OS2)
+ && !((word0(d) & Exp_mask) == Exp_mask && ((word0(d) & Frac_mask) || word1(d))) /* Visual C++ doesn't know how to compare against NaN */
+#endif
+ ) {
+ *p++ = '-';
+ d = -d;
+ }
+
+ /* Check for Infinity and NaN */
+ if ((word0(d) & Exp_mask) == Exp_mask) {
+ strcpy(p, !word1(d) && !(word0(d) & Frac_mask) ? "Infinity" : "NaN");
+ return buffer;
+ }
+
+ /* Locking for Balloc's shared buffers */
+ ACQUIRE_DTOA_LOCK();
+
+ /* Output the integer part of d with the digits in reverse order. */
+ pInt = p;
+ di = fd_floor(d);
+ if (di <= 4294967295.0) {
+ uint32 n = (uint32)di;
+ if (n)
+ do {
+ uint32 m = n / base;
+ digit = n - m*base;
+ n = m;
+ JS_ASSERT(digit < (uint32)base);
+ *p++ = BASEDIGIT(digit);
+ } while (n);
+ else *p++ = '0';
+ } else {
+ int32 e;
+ int32 bits; /* Number of significant bits in di; not used. */
+ Bigint *b = d2b(di, &e, &bits);
+ if (!b)
+ goto nomem1;
+ b = lshift(b, e);
+ if (!b) {
+ nomem1:
+ Bfree(b);
+ RELEASE_DTOA_LOCK();
+ free(buffer);
+ return NULL;
+ }
+ do {
+ digit = divrem(b, base);
+ JS_ASSERT(digit < (uint32)base);
+ *p++ = BASEDIGIT(digit);
+ } while (b->wds);
+ Bfree(b);
+ }
+ /* Reverse the digits of the integer part of d. */
+ q = p-1;
+ while (q > pInt) {
+ char ch = *pInt;
+ *pInt++ = *q;
+ *q-- = ch;
+ }
+
+ df = d - di;
+ if (df != 0.0) {
+ /* We have a fraction. */
+ int32 e, bbits, s2, done;
+ Bigint *b, *s, *mlo, *mhi;
+
+ b = s = mlo = mhi = NULL;
+
+ *p++ = '.';
+ b = d2b(df, &e, &bbits);
+ if (!b) {
+ nomem2:
+ Bfree(b);
+ Bfree(s);
+ if (mlo != mhi)
+ Bfree(mlo);
+ Bfree(mhi);
+ RELEASE_DTOA_LOCK();
+ free(buffer);
+ return NULL;
+ }
+ JS_ASSERT(e < 0);
+ /* At this point df = b * 2^e. e must be less than zero because 0 < df < 1. */
+
+ s2 = -(int32)(word0(d) >> Exp_shift1 & Exp_mask>>Exp_shift1);
+#ifndef Sudden_Underflow
+ if (!s2)
+ s2 = -1;
+#endif
+ s2 += Bias + P;
+ /* 1/2^s2 = (nextDouble(d) - d)/2 */
+ JS_ASSERT(-s2 < e);
+ mlo = i2b(1);
+ if (!mlo)
+ goto nomem2;
+ mhi = mlo;
+ if (!word1(d) && !(word0(d) & Bndry_mask)
+#ifndef Sudden_Underflow
+ && word0(d) & (Exp_mask & Exp_mask << 1)
+#endif
+ ) {
+ /* The special case. Here we want to be within a quarter of the last input
+ significant digit instead of one half of it when the output string's value is less than d. */
+ s2 += Log2P;
+ mhi = i2b(1<<Log2P);
+ if (!mhi)
+ goto nomem2;
+ }
+ b = lshift(b, e + s2);
+ if (!b)
+ goto nomem2;
+ s = i2b(1);
+ if (!s)
+ goto nomem2;
+ s = lshift(s, s2);
+ if (!s)
+ goto nomem2;
+ /* At this point we have the following:
+ * s = 2^s2;
+ * 1 > df = b/2^s2 > 0;
+ * (d - prevDouble(d))/2 = mlo/2^s2;
+ * (nextDouble(d) - d)/2 = mhi/2^s2. */
+
+ done = JS_FALSE;
+ do {
+ int32 j, j1;
+ Bigint *delta;
+
+ b = multadd(b, base, 0);
+ if (!b)
+ goto nomem2;
+ digit = quorem2(b, s2);
+ if (mlo == mhi) {
+ mlo = mhi = multadd(mlo, base, 0);
+ if (!mhi)
+ goto nomem2;
+ }
+ else {
+ mlo = multadd(mlo, base, 0);
+ if (!mlo)
+ goto nomem2;
+ mhi = multadd(mhi, base, 0);
+ if (!mhi)
+ goto nomem2;
+ }
+
+ /* Do we yet have the shortest string that will round to d? */
+ j = cmp(b, mlo);
+ /* j is b/2^s2 compared with mlo/2^s2. */
+ delta = diff(s, mhi);
+ if (!delta)
+ goto nomem2;
+ j1 = delta->sign ? 1 : cmp(b, delta);
+ Bfree(delta);
+ /* j1 is b/2^s2 compared with 1 - mhi/2^s2. */
+
+#ifndef ROUND_BIASED
+ if (j1 == 0 && !(word1(d) & 1)) {
+ if (j > 0)
+ digit++;
+ done = JS_TRUE;
+ } else
+#endif
+ if (j < 0 || (j == 0
+#ifndef ROUND_BIASED
+ && !(word1(d) & 1)
+#endif
+ )) {
+ if (j1 > 0) {
+ /* Either dig or dig+1 would work here as the least significant digit.
+ Use whichever would produce an output value closer to d. */
+ b = lshift(b, 1);
+ if (!b)
+ goto nomem2;
+ j1 = cmp(b, s);
+ if (j1 > 0) /* The even test (|| (j1 == 0 && (digit & 1))) is not here because it messes up odd base output
+ * such as 3.5 in base 3. */
+ digit++;
+ }
+ done = JS_TRUE;
+ } else if (j1 > 0) {
+ digit++;
+ done = JS_TRUE;
+ }
+ JS_ASSERT(digit < (uint32)base);
+ *p++ = BASEDIGIT(digit);
+ } while (!done);
+ Bfree(b);
+ Bfree(s);
+ if (mlo != mhi)
+ Bfree(mlo);
+ Bfree(mhi);
+ }
+ JS_ASSERT(p < buffer + DTOBASESTR_BUFFER_SIZE);
+ *p = '\0';
+ RELEASE_DTOA_LOCK();
+ }
+ return buffer;
+}
diff --git a/src/third_party/js-1.7/jsdtoa.h b/src/third_party/js-1.7/jsdtoa.h
new file mode 100644
index 00000000000..409f45454b5
--- /dev/null
+++ b/src/third_party/js-1.7/jsdtoa.h
@@ -0,0 +1,130 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsdtoa_h___
+#define jsdtoa_h___
+/*
+ * Public interface to portable double-precision floating point to string
+ * and back conversion package.
+ */
+
+#include "jscompat.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * JS_strtod() returns as a double-precision floating-point number
+ * the value represented by the character string pointed to by
+ * s00. The string is scanned up to the first unrecognized
+ * character.
+ * If the value of se is not (char **)NULL, a pointer to
+ * the character terminating the scan is returned in the location pointed
+ * to by se. If no number can be formed, se is set to s00r, and
+ * zero is returned.
+ *
+ * *err is set to zero on success; it's set to JS_DTOA_ERANGE on range
+ * errors and JS_DTOA_ENOMEM on memory failure.
+ */
+#define JS_DTOA_ERANGE 1
+#define JS_DTOA_ENOMEM 2
+JS_FRIEND_API(double)
+JS_strtod(const char *s00, char **se, int *err);
+
+/*
+ * Modes for converting floating-point numbers to strings.
+ *
+ * Some of the modes can round-trip; this means that if the number is converted to
+ * a string using one of these mode and then converted back to a number, the result
+ * will be identical to the original number (except that, due to ECMA, -0 will get converted
+ * to +0). These round-trip modes return the minimum number of significand digits that
+ * permit the round trip.
+ *
+ * Some of the modes take an integer parameter <precision>.
+ */
+/* NB: Keep this in sync with number_constants[]. */
+typedef enum JSDToStrMode {
+ DTOSTR_STANDARD, /* Either fixed or exponential format; round-trip */
+ DTOSTR_STANDARD_EXPONENTIAL, /* Always exponential format; round-trip */
+ DTOSTR_FIXED, /* Round to <precision> digits after the decimal point; exponential if number is large */
+ DTOSTR_EXPONENTIAL, /* Always exponential format; <precision> significant digits */
+ DTOSTR_PRECISION /* Either fixed or exponential format; <precision> significant digits */
+} JSDToStrMode;
+
+
+/* Maximum number of characters (including trailing null) that a DTOSTR_STANDARD or DTOSTR_STANDARD_EXPONENTIAL
+ * conversion can produce. This maximum is reached for a number like -0.0000012345678901234567. */
+#define DTOSTR_STANDARD_BUFFER_SIZE 26
+
+/* Maximum number of characters (including trailing null) that one of the other conversions
+ * can produce. This maximum is reached for TO_FIXED, which can generate up to 21 digits before the decimal point. */
+#define DTOSTR_VARIABLE_BUFFER_SIZE(precision) ((precision)+24 > DTOSTR_STANDARD_BUFFER_SIZE ? (precision)+24 : DTOSTR_STANDARD_BUFFER_SIZE)
+
+/*
+ * Convert dval according to the given mode and return a pointer to the resulting ASCII string.
+ * The result is held somewhere in buffer, but not necessarily at the beginning. The size of
+ * buffer is given in bufferSize, and must be at least as large as given by the above macros.
+ *
+ * Return NULL if out of memory.
+ */
+JS_FRIEND_API(char *)
+JS_dtostr(char *buffer, size_t bufferSize, JSDToStrMode mode, int precision, double dval);
+
+/*
+ * Convert d to a string in the given base. The integral part of d will be printed exactly
+ * in that base, regardless of how large it is, because there is no exponential notation for non-base-ten
+ * numbers. The fractional part will be rounded to as few digits as possible while still preserving
+ * the round-trip property (analogous to that of printing decimal numbers). In other words, if one were
+ * to read the resulting string in via a hypothetical base-number-reading routine that rounds to the nearest
+ * IEEE double (and to an even significand if there are two equally near doubles), then the result would
+ * equal d (except for -0.0, which converts to "0", and NaN, which is not equal to itself).
+ *
+ * Return NULL if out of memory. If the result is not NULL, it must be released via free().
+ */
+JS_FRIEND_API(char *)
+JS_dtobasestr(int base, double d);
+
+/*
+ * Clean up any persistent RAM allocated during the execution of DtoA
+ * routines, and remove any locks that might have been created.
+ */
+extern void js_FinishDtoa(void);
+
+JS_END_EXTERN_C
+
+#endif /* jsdtoa_h___ */
diff --git a/src/third_party/js-1.7/jsemit.c b/src/third_party/js-1.7/jsemit.c
new file mode 100644
index 00000000000..f8a06beb5fd
--- /dev/null
+++ b/src/third_party/js-1.7/jsemit.c
@@ -0,0 +1,6845 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS bytecode generation.
+ */
+#include "jsstddef.h"
+#ifdef HAVE_MEMORY_H
+#include <memory.h>
+#endif
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsbit.h"
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsemit.h"
+#include "jsfun.h"
+#include "jsnum.h"
+#include "jsopcode.h"
+#include "jsparse.h"
+#include "jsregexp.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+
+/* Allocation chunk counts, must be powers of two in general. */
+#define BYTECODE_CHUNK 256 /* code allocation increment */
+#define SRCNOTE_CHUNK 64 /* initial srcnote allocation increment */
+#define TRYNOTE_CHUNK 64 /* trynote allocation increment */
+
+/* Macros to compute byte sizes from typed element counts. */
+#define BYTECODE_SIZE(n) ((n) * sizeof(jsbytecode))
+#define SRCNOTE_SIZE(n) ((n) * sizeof(jssrcnote))
+#define TRYNOTE_SIZE(n) ((n) * sizeof(JSTryNote))
+
+JS_FRIEND_API(JSBool)
+js_InitCodeGenerator(JSContext *cx, JSCodeGenerator *cg,
+ JSArenaPool *codePool, JSArenaPool *notePool,
+ const char *filename, uintN lineno,
+ JSPrincipals *principals)
+{
+ memset(cg, 0, sizeof *cg);
+ TREE_CONTEXT_INIT(&cg->treeContext);
+ cg->treeContext.flags |= TCF_COMPILING;
+ cg->codePool = codePool;
+ cg->notePool = notePool;
+ cg->codeMark = JS_ARENA_MARK(codePool);
+ cg->noteMark = JS_ARENA_MARK(notePool);
+ cg->tempMark = JS_ARENA_MARK(&cx->tempPool);
+ cg->current = &cg->main;
+ cg->filename = filename;
+ cg->firstLine = cg->prolog.currentLine = cg->main.currentLine = lineno;
+ cg->principals = principals;
+ ATOM_LIST_INIT(&cg->atomList);
+ cg->prolog.noteMask = cg->main.noteMask = SRCNOTE_CHUNK - 1;
+ ATOM_LIST_INIT(&cg->constList);
+ return JS_TRUE;
+}
+
+JS_FRIEND_API(void)
+js_FinishCodeGenerator(JSContext *cx, JSCodeGenerator *cg)
+{
+ TREE_CONTEXT_FINISH(&cg->treeContext);
+ JS_ARENA_RELEASE(cg->codePool, cg->codeMark);
+ JS_ARENA_RELEASE(cg->notePool, cg->noteMark);
+ JS_ARENA_RELEASE(&cx->tempPool, cg->tempMark);
+}
+
+static ptrdiff_t
+EmitCheck(JSContext *cx, JSCodeGenerator *cg, JSOp op, ptrdiff_t delta)
+{
+ jsbytecode *base, *limit, *next;
+ ptrdiff_t offset, length;
+ size_t incr, size;
+
+ base = CG_BASE(cg);
+ next = CG_NEXT(cg);
+ limit = CG_LIMIT(cg);
+ offset = PTRDIFF(next, base, jsbytecode);
+ if (next + delta > limit) {
+ length = offset + delta;
+ length = (length <= BYTECODE_CHUNK)
+ ? BYTECODE_CHUNK
+ : JS_BIT(JS_CeilingLog2(length));
+ incr = BYTECODE_SIZE(length);
+ if (!base) {
+ JS_ARENA_ALLOCATE_CAST(base, jsbytecode *, cg->codePool, incr);
+ } else {
+ size = BYTECODE_SIZE(PTRDIFF(limit, base, jsbytecode));
+ incr -= size;
+ JS_ARENA_GROW_CAST(base, jsbytecode *, cg->codePool, size, incr);
+ }
+ if (!base) {
+ JS_ReportOutOfMemory(cx);
+ return -1;
+ }
+ CG_BASE(cg) = base;
+ CG_LIMIT(cg) = base + length;
+ CG_NEXT(cg) = base + offset;
+ }
+ return offset;
+}
+
+static void
+UpdateDepth(JSContext *cx, JSCodeGenerator *cg, ptrdiff_t target)
+{
+ jsbytecode *pc;
+ const JSCodeSpec *cs;
+ intN nuses;
+
+ pc = CG_CODE(cg, target);
+ cs = &js_CodeSpec[pc[0]];
+ nuses = cs->nuses;
+ if (nuses < 0)
+ nuses = 2 + GET_ARGC(pc); /* stack: fun, this, [argc arguments] */
+ cg->stackDepth -= nuses;
+ JS_ASSERT(cg->stackDepth >= 0);
+ if (cg->stackDepth < 0) {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%d", target);
+ JS_ReportErrorFlagsAndNumber(cx, JSREPORT_WARNING,
+ js_GetErrorMessage, NULL,
+ JSMSG_STACK_UNDERFLOW,
+ cg->filename ? cg->filename : "stdin",
+ numBuf);
+ }
+ cg->stackDepth += cs->ndefs;
+ if ((uintN)cg->stackDepth > cg->maxStackDepth)
+ cg->maxStackDepth = cg->stackDepth;
+}
+
+ptrdiff_t
+js_Emit1(JSContext *cx, JSCodeGenerator *cg, JSOp op)
+{
+ ptrdiff_t offset = EmitCheck(cx, cg, op, 1);
+
+ if (offset >= 0) {
+ *CG_NEXT(cg)++ = (jsbytecode)op;
+ UpdateDepth(cx, cg, offset);
+ }
+ return offset;
+}
+
+ptrdiff_t
+js_Emit2(JSContext *cx, JSCodeGenerator *cg, JSOp op, jsbytecode op1)
+{
+ ptrdiff_t offset = EmitCheck(cx, cg, op, 2);
+
+ if (offset >= 0) {
+ jsbytecode *next = CG_NEXT(cg);
+ next[0] = (jsbytecode)op;
+ next[1] = op1;
+ CG_NEXT(cg) = next + 2;
+ UpdateDepth(cx, cg, offset);
+ }
+ return offset;
+}
+
+ptrdiff_t
+js_Emit3(JSContext *cx, JSCodeGenerator *cg, JSOp op, jsbytecode op1,
+ jsbytecode op2)
+{
+ ptrdiff_t offset = EmitCheck(cx, cg, op, 3);
+
+ if (offset >= 0) {
+ jsbytecode *next = CG_NEXT(cg);
+ next[0] = (jsbytecode)op;
+ next[1] = op1;
+ next[2] = op2;
+ CG_NEXT(cg) = next + 3;
+ UpdateDepth(cx, cg, offset);
+ }
+ return offset;
+}
+
+ptrdiff_t
+js_EmitN(JSContext *cx, JSCodeGenerator *cg, JSOp op, size_t extra)
+{
+ ptrdiff_t length = 1 + (ptrdiff_t)extra;
+ ptrdiff_t offset = EmitCheck(cx, cg, op, length);
+
+ if (offset >= 0) {
+ jsbytecode *next = CG_NEXT(cg);
+ *next = (jsbytecode)op;
+ memset(next + 1, 0, BYTECODE_SIZE(extra));
+ CG_NEXT(cg) = next + length;
+ UpdateDepth(cx, cg, offset);
+ }
+ return offset;
+}
+
+/* XXX too many "... statement" L10N gaffes below -- fix via js.msg! */
+const char js_with_statement_str[] = "with statement";
+const char js_finally_block_str[] = "finally block";
+const char js_script_str[] = "script";
+
+static const char *statementName[] = {
+ "label statement", /* LABEL */
+ "if statement", /* IF */
+ "else statement", /* ELSE */
+ "switch statement", /* SWITCH */
+ "block", /* BLOCK */
+ js_with_statement_str, /* WITH */
+ "catch block", /* CATCH */
+ "try block", /* TRY */
+ js_finally_block_str, /* FINALLY */
+ js_finally_block_str, /* SUBROUTINE */
+ "do loop", /* DO_LOOP */
+ "for loop", /* FOR_LOOP */
+ "for/in loop", /* FOR_IN_LOOP */
+ "while loop", /* WHILE_LOOP */
+};
+
+static const char *
+StatementName(JSCodeGenerator *cg)
+{
+ if (!cg->treeContext.topStmt)
+ return js_script_str;
+ return statementName[cg->treeContext.topStmt->type];
+}
+
+static void
+ReportStatementTooLarge(JSContext *cx, JSCodeGenerator *cg)
+{
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NEED_DIET,
+ StatementName(cg));
+}
+
+/**
+ Span-dependent instructions in JS bytecode consist of the jump (JOF_JUMP)
+ and switch (JOF_LOOKUPSWITCH, JOF_TABLESWITCH) format opcodes, subdivided
+ into unconditional (gotos and gosubs), and conditional jumps or branches
+ (which pop a value, test it, and jump depending on its value). Most jumps
+ have just one immediate operand, a signed offset from the jump opcode's pc
+ to the target bytecode. The lookup and table switch opcodes may contain
+ many jump offsets.
+
+ Mozilla bug #80981 (http://bugzilla.mozilla.org/show_bug.cgi?id=80981) was
+ fixed by adding extended "X" counterparts to the opcodes/formats (NB: X is
+ suffixed to prefer JSOP_ORX thereby avoiding a JSOP_XOR name collision for
+ the extended form of the JSOP_OR branch opcode). The unextended or short
+ formats have 16-bit signed immediate offset operands, the extended or long
+ formats have 32-bit signed immediates. The span-dependency problem consists
+ of selecting as few long instructions as possible, or about as few -- since
+ jumps can span other jumps, extending one jump may cause another to need to
+ be extended.
+
+ Most JS scripts are short, so need no extended jumps. We optimize for this
+ case by generating short jumps until we know a long jump is needed. After
+ that point, we keep generating short jumps, but each jump's 16-bit immediate
+ offset operand is actually an unsigned index into cg->spanDeps, an array of
+ JSSpanDep structs. Each struct tells the top offset in the script of the
+ opcode, the "before" offset of the jump (which will be the same as top for
+ simplex jumps, but which will index further into the bytecode array for a
+ non-initial jump offset in a lookup or table switch), the after "offset"
+ adjusted during span-dependent instruction selection (initially the same
+ value as the "before" offset), and the jump target (more below).
+
+ Since we generate cg->spanDeps lazily, from within js_SetJumpOffset, we must
+ ensure that all bytecode generated so far can be inspected to discover where
+ the jump offset immediate operands lie within CG_CODE(cg). But the bonus is
+ that we generate span-dependency records sorted by their offsets, so we can
+ binary-search when trying to find a JSSpanDep for a given bytecode offset,
+ or the nearest JSSpanDep at or above a given pc.
+
+ To avoid limiting scripts to 64K jumps, if the cg->spanDeps index overflows
+ 65534, we store SPANDEP_INDEX_HUGE in the jump's immediate operand. This
+ tells us that we need to binary-search for the cg->spanDeps entry by the
+ jump opcode's bytecode offset (sd->before).
+
+ Jump targets need to be maintained in a data structure that lets us look
+ up an already-known target by its address (jumps may have a common target),
+ and that also lets us update the addresses (script-relative, a.k.a. absolute
+ offsets) of targets that come after a jump target (for when a jump below
+ that target needs to be extended). We use an AVL tree, implemented using
+ recursion, but with some tricky optimizations to its height-balancing code
+ (see http://www.cmcrossroads.com/bradapp/ftp/src/libs/C++/AvlTrees.html).
+
+ A final wrinkle: backpatch chains are linked by jump-to-jump offsets with
+ positive sign, even though they link "backward" (i.e., toward lower bytecode
+ address). We don't want to waste space and search time in the AVL tree for
+ such temporary backpatch deltas, so we use a single-bit wildcard scheme to
+ tag true JSJumpTarget pointers and encode untagged, signed (positive) deltas
+ in JSSpanDep.target pointers, depending on whether the JSSpanDep has a known
+ target, or is still awaiting backpatching.
+
+ Note that backpatch chains would present a problem for BuildSpanDepTable,
+ which inspects bytecode to build cg->spanDeps on demand, when the first
+ short jump offset overflows. To solve this temporary problem, we emit a
+ proxy bytecode (JSOP_BACKPATCH; JSOP_BACKPATCH_POP for branch ops) whose
+ nuses/ndefs counts help keep the stack balanced, but whose opcode format
+ distinguishes its backpatch delta immediate operand from a normal jump
+ offset.
+ */
+static int
+BalanceJumpTargets(JSJumpTarget **jtp)
+{
+ JSJumpTarget *jt, *jt2, *root;
+ int dir, otherDir, heightChanged;
+ JSBool doubleRotate;
+
+ jt = *jtp;
+ JS_ASSERT(jt->balance != 0);
+
+ if (jt->balance < -1) {
+ dir = JT_RIGHT;
+ doubleRotate = (jt->kids[JT_LEFT]->balance > 0);
+ } else if (jt->balance > 1) {
+ dir = JT_LEFT;
+ doubleRotate = (jt->kids[JT_RIGHT]->balance < 0);
+ } else {
+ return 0;
+ }
+
+ otherDir = JT_OTHER_DIR(dir);
+ if (doubleRotate) {
+ jt2 = jt->kids[otherDir];
+ *jtp = root = jt2->kids[dir];
+
+ jt->kids[otherDir] = root->kids[dir];
+ root->kids[dir] = jt;
+
+ jt2->kids[dir] = root->kids[otherDir];
+ root->kids[otherDir] = jt2;
+
+ heightChanged = 1;
+ root->kids[JT_LEFT]->balance = -JS_MAX(root->balance, 0);
+ root->kids[JT_RIGHT]->balance = -JS_MIN(root->balance, 0);
+ root->balance = 0;
+ } else {
+ *jtp = root = jt->kids[otherDir];
+ jt->kids[otherDir] = root->kids[dir];
+ root->kids[dir] = jt;
+
+ heightChanged = (root->balance != 0);
+ jt->balance = -((dir == JT_LEFT) ? --root->balance : ++root->balance);
+ }
+
+ return heightChanged;
+}
+
+typedef struct AddJumpTargetArgs {
+ JSContext *cx;
+ JSCodeGenerator *cg;
+ ptrdiff_t offset;
+ JSJumpTarget *node;
+} AddJumpTargetArgs;
+
+static int
+AddJumpTarget(AddJumpTargetArgs *args, JSJumpTarget **jtp)
+{
+ JSJumpTarget *jt;
+ int balanceDelta;
+
+ jt = *jtp;
+ if (!jt) {
+ JSCodeGenerator *cg = args->cg;
+
+ jt = cg->jtFreeList;
+ if (jt) {
+ cg->jtFreeList = jt->kids[JT_LEFT];
+ } else {
+ JS_ARENA_ALLOCATE_CAST(jt, JSJumpTarget *, &args->cx->tempPool,
+ sizeof *jt);
+ if (!jt) {
+ JS_ReportOutOfMemory(args->cx);
+ return 0;
+ }
+ }
+ jt->offset = args->offset;
+ jt->balance = 0;
+ jt->kids[JT_LEFT] = jt->kids[JT_RIGHT] = NULL;
+ cg->numJumpTargets++;
+ args->node = jt;
+ *jtp = jt;
+ return 1;
+ }
+
+ if (jt->offset == args->offset) {
+ args->node = jt;
+ return 0;
+ }
+
+ if (args->offset < jt->offset)
+ balanceDelta = -AddJumpTarget(args, &jt->kids[JT_LEFT]);
+ else
+ balanceDelta = AddJumpTarget(args, &jt->kids[JT_RIGHT]);
+ if (!args->node)
+ return 0;
+
+ jt->balance += balanceDelta;
+ return (balanceDelta && jt->balance)
+ ? 1 - BalanceJumpTargets(jtp)
+ : 0;
+}
+
+#ifdef DEBUG_brendan
+static int AVLCheck(JSJumpTarget *jt)
+{
+ int lh, rh;
+
+ if (!jt) return 0;
+ JS_ASSERT(-1 <= jt->balance && jt->balance <= 1);
+ lh = AVLCheck(jt->kids[JT_LEFT]);
+ rh = AVLCheck(jt->kids[JT_RIGHT]);
+ JS_ASSERT(jt->balance == rh - lh);
+ return 1 + JS_MAX(lh, rh);
+}
+#endif
+
+static JSBool
+SetSpanDepTarget(JSContext *cx, JSCodeGenerator *cg, JSSpanDep *sd,
+ ptrdiff_t off)
+{
+ AddJumpTargetArgs args;
+
+ if (off < JUMPX_OFFSET_MIN || JUMPX_OFFSET_MAX < off) {
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+
+ args.cx = cx;
+ args.cg = cg;
+ args.offset = sd->top + off;
+ args.node = NULL;
+ AddJumpTarget(&args, &cg->jumpTargets);
+ if (!args.node)
+ return JS_FALSE;
+
+#ifdef DEBUG_brendan
+ AVLCheck(cg->jumpTargets);
+#endif
+
+ SD_SET_TARGET(sd, args.node);
+ return JS_TRUE;
+}
+
+#define SPANDEPS_MIN 256
+#define SPANDEPS_SIZE(n) ((n) * sizeof(JSSpanDep))
+#define SPANDEPS_SIZE_MIN SPANDEPS_SIZE(SPANDEPS_MIN)
+
+static JSBool
+AddSpanDep(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc, jsbytecode *pc2,
+ ptrdiff_t off)
+{
+ uintN index;
+ JSSpanDep *sdbase, *sd;
+ size_t size;
+
+ index = cg->numSpanDeps;
+ if (index + 1 == 0) {
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+
+ if ((index & (index - 1)) == 0 &&
+ (!(sdbase = cg->spanDeps) || index >= SPANDEPS_MIN)) {
+ if (!sdbase) {
+ size = SPANDEPS_SIZE_MIN;
+ JS_ARENA_ALLOCATE_CAST(sdbase, JSSpanDep *, &cx->tempPool, size);
+ } else {
+ size = SPANDEPS_SIZE(index);
+ JS_ARENA_GROW_CAST(sdbase, JSSpanDep *, &cx->tempPool, size, size);
+ }
+ if (!sdbase)
+ return JS_FALSE;
+ cg->spanDeps = sdbase;
+ }
+
+ cg->numSpanDeps = index + 1;
+ sd = cg->spanDeps + index;
+ sd->top = PTRDIFF(pc, CG_BASE(cg), jsbytecode);
+ sd->offset = sd->before = PTRDIFF(pc2, CG_BASE(cg), jsbytecode);
+
+ if (js_CodeSpec[*pc].format & JOF_BACKPATCH) {
+ /* Jump offset will be backpatched if off is a non-zero "bpdelta". */
+ if (off != 0) {
+ JS_ASSERT(off >= 1 + JUMP_OFFSET_LEN);
+ if (off > BPDELTA_MAX) {
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+ }
+ SD_SET_BPDELTA(sd, off);
+ } else if (off == 0) {
+ /* Jump offset will be patched directly, without backpatch chaining. */
+ SD_SET_TARGET(sd, NULL);
+ } else {
+ /* The jump offset in off is non-zero, therefore it's already known. */
+ if (!SetSpanDepTarget(cx, cg, sd, off))
+ return JS_FALSE;
+ }
+
+ if (index > SPANDEP_INDEX_MAX)
+ index = SPANDEP_INDEX_HUGE;
+ SET_SPANDEP_INDEX(pc2, index);
+ return JS_TRUE;
+}
+
+static JSBool
+BuildSpanDepTable(JSContext *cx, JSCodeGenerator *cg)
+{
+ jsbytecode *pc, *end;
+ JSOp op;
+ const JSCodeSpec *cs;
+ ptrdiff_t len, off;
+
+ pc = CG_BASE(cg) + cg->spanDepTodo;
+ end = CG_NEXT(cg);
+ while (pc < end) {
+ op = (JSOp)*pc;
+ cs = &js_CodeSpec[op];
+ len = (ptrdiff_t)cs->length;
+
+ switch (cs->format & JOF_TYPEMASK) {
+ case JOF_JUMP:
+ off = GET_JUMP_OFFSET(pc);
+ if (!AddSpanDep(cx, cg, pc, pc, off))
+ return JS_FALSE;
+ break;
+
+ case JOF_TABLESWITCH:
+ {
+ jsbytecode *pc2;
+ jsint i, low, high;
+
+ pc2 = pc;
+ off = GET_JUMP_OFFSET(pc2);
+ if (!AddSpanDep(cx, cg, pc, pc2, off))
+ return JS_FALSE;
+ pc2 += JUMP_OFFSET_LEN;
+ low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ high = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ for (i = low; i <= high; i++) {
+ off = GET_JUMP_OFFSET(pc2);
+ if (!AddSpanDep(cx, cg, pc, pc2, off))
+ return JS_FALSE;
+ pc2 += JUMP_OFFSET_LEN;
+ }
+ len = 1 + pc2 - pc;
+ break;
+ }
+
+ case JOF_LOOKUPSWITCH:
+ {
+ jsbytecode *pc2;
+ jsint npairs;
+
+ pc2 = pc;
+ off = GET_JUMP_OFFSET(pc2);
+ if (!AddSpanDep(cx, cg, pc, pc2, off))
+ return JS_FALSE;
+ pc2 += JUMP_OFFSET_LEN;
+ npairs = (jsint) GET_ATOM_INDEX(pc2);
+ pc2 += ATOM_INDEX_LEN;
+ while (npairs) {
+ pc2 += ATOM_INDEX_LEN;
+ off = GET_JUMP_OFFSET(pc2);
+ if (!AddSpanDep(cx, cg, pc, pc2, off))
+ return JS_FALSE;
+ pc2 += JUMP_OFFSET_LEN;
+ npairs--;
+ }
+ len = 1 + pc2 - pc;
+ break;
+ }
+ }
+
+ JS_ASSERT(len > 0);
+ pc += len;
+ }
+
+ return JS_TRUE;
+}
+
+static JSSpanDep *
+GetSpanDep(JSCodeGenerator *cg, jsbytecode *pc)
+{
+ uintN index;
+ ptrdiff_t offset;
+ int lo, hi, mid;
+ JSSpanDep *sd;
+
+ index = GET_SPANDEP_INDEX(pc);
+ if (index != SPANDEP_INDEX_HUGE)
+ return cg->spanDeps + index;
+
+ offset = PTRDIFF(pc, CG_BASE(cg), jsbytecode);
+ lo = 0;
+ hi = cg->numSpanDeps - 1;
+ while (lo <= hi) {
+ mid = (lo + hi) / 2;
+ sd = cg->spanDeps + mid;
+ if (sd->before == offset)
+ return sd;
+ if (sd->before < offset)
+ lo = mid + 1;
+ else
+ hi = mid - 1;
+ }
+
+ JS_ASSERT(0);
+ return NULL;
+}
+
+static JSBool
+SetBackPatchDelta(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc,
+ ptrdiff_t delta)
+{
+ JSSpanDep *sd;
+
+ JS_ASSERT(delta >= 1 + JUMP_OFFSET_LEN);
+ if (!cg->spanDeps && delta < JUMP_OFFSET_MAX) {
+ SET_JUMP_OFFSET(pc, delta);
+ return JS_TRUE;
+ }
+
+ if (delta > BPDELTA_MAX) {
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+
+ if (!cg->spanDeps && !BuildSpanDepTable(cx, cg))
+ return JS_FALSE;
+
+ sd = GetSpanDep(cg, pc);
+ JS_ASSERT(SD_GET_BPDELTA(sd) == 0);
+ SD_SET_BPDELTA(sd, delta);
+ return JS_TRUE;
+}
+
+static void
+UpdateJumpTargets(JSJumpTarget *jt, ptrdiff_t pivot, ptrdiff_t delta)
+{
+ if (jt->offset > pivot) {
+ jt->offset += delta;
+ if (jt->kids[JT_LEFT])
+ UpdateJumpTargets(jt->kids[JT_LEFT], pivot, delta);
+ }
+ if (jt->kids[JT_RIGHT])
+ UpdateJumpTargets(jt->kids[JT_RIGHT], pivot, delta);
+}
+
+static JSSpanDep *
+FindNearestSpanDep(JSCodeGenerator *cg, ptrdiff_t offset, int lo,
+ JSSpanDep *guard)
+{
+ int num, hi, mid;
+ JSSpanDep *sdbase, *sd;
+
+ num = cg->numSpanDeps;
+ JS_ASSERT(num > 0);
+ hi = num - 1;
+ sdbase = cg->spanDeps;
+ while (lo <= hi) {
+ mid = (lo + hi) / 2;
+ sd = sdbase + mid;
+ if (sd->before == offset)
+ return sd;
+ if (sd->before < offset)
+ lo = mid + 1;
+ else
+ hi = mid - 1;
+ }
+ if (lo == num)
+ return guard;
+ sd = sdbase + lo;
+ JS_ASSERT(sd->before >= offset && (lo == 0 || sd[-1].before < offset));
+ return sd;
+}
+
+static void
+FreeJumpTargets(JSCodeGenerator *cg, JSJumpTarget *jt)
+{
+ if (jt->kids[JT_LEFT])
+ FreeJumpTargets(cg, jt->kids[JT_LEFT]);
+ if (jt->kids[JT_RIGHT])
+ FreeJumpTargets(cg, jt->kids[JT_RIGHT]);
+ jt->kids[JT_LEFT] = cg->jtFreeList;
+ cg->jtFreeList = jt;
+}
+
+static JSBool
+OptimizeSpanDeps(JSContext *cx, JSCodeGenerator *cg)
+{
+ jsbytecode *pc, *oldpc, *base, *limit, *next;
+ JSSpanDep *sd, *sd2, *sdbase, *sdlimit, *sdtop, guard;
+ ptrdiff_t offset, growth, delta, top, pivot, span, length, target;
+ JSBool done;
+ JSOp op;
+ uint32 type;
+ size_t size, incr;
+ jssrcnote *sn, *snlimit;
+ JSSrcNoteSpec *spec;
+ uintN i, n, noteIndex;
+ JSTryNote *tn, *tnlimit;
+#ifdef DEBUG_brendan
+ int passes = 0;
+#endif
+
+ base = CG_BASE(cg);
+ sdbase = cg->spanDeps;
+ sdlimit = sdbase + cg->numSpanDeps;
+ offset = CG_OFFSET(cg);
+ growth = 0;
+
+ do {
+ done = JS_TRUE;
+ delta = 0;
+ top = pivot = -1;
+ sdtop = NULL;
+ pc = NULL;
+ op = JSOP_NOP;
+ type = 0;
+#ifdef DEBUG_brendan
+ passes++;
+#endif
+
+ for (sd = sdbase; sd < sdlimit; sd++) {
+ JS_ASSERT(JT_HAS_TAG(sd->target));
+ sd->offset += delta;
+
+ if (sd->top != top) {
+ sdtop = sd;
+ top = sd->top;
+ JS_ASSERT(top == sd->before);
+ pivot = sd->offset;
+ pc = base + top;
+ op = (JSOp) *pc;
+ type = (js_CodeSpec[op].format & JOF_TYPEMASK);
+ if (JOF_TYPE_IS_EXTENDED_JUMP(type)) {
+ /*
+ * We already extended all the jump offset operands for
+ * the opcode at sd->top. Jumps and branches have only
+ * one jump offset operand, but switches have many, all
+ * of which are adjacent in cg->spanDeps.
+ */
+ continue;
+ }
+
+ JS_ASSERT(type == JOF_JUMP ||
+ type == JOF_TABLESWITCH ||
+ type == JOF_LOOKUPSWITCH);
+ }
+
+ if (!JOF_TYPE_IS_EXTENDED_JUMP(type)) {
+ span = SD_SPAN(sd, pivot);
+ if (span < JUMP_OFFSET_MIN || JUMP_OFFSET_MAX < span) {
+ ptrdiff_t deltaFromTop = 0;
+
+ done = JS_FALSE;
+
+ switch (op) {
+ case JSOP_GOTO: op = JSOP_GOTOX; break;
+ case JSOP_IFEQ: op = JSOP_IFEQX; break;
+ case JSOP_IFNE: op = JSOP_IFNEX; break;
+ case JSOP_OR: op = JSOP_ORX; break;
+ case JSOP_AND: op = JSOP_ANDX; break;
+ case JSOP_GOSUB: op = JSOP_GOSUBX; break;
+ case JSOP_CASE: op = JSOP_CASEX; break;
+ case JSOP_DEFAULT: op = JSOP_DEFAULTX; break;
+ case JSOP_TABLESWITCH: op = JSOP_TABLESWITCHX; break;
+ case JSOP_LOOKUPSWITCH: op = JSOP_LOOKUPSWITCHX; break;
+ default:
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+ *pc = (jsbytecode) op;
+
+ for (sd2 = sdtop; sd2 < sdlimit && sd2->top == top; sd2++) {
+ if (sd2 <= sd) {
+ /*
+ * sd2->offset already includes delta as it stood
+ * before we entered this loop, but it must also
+ * include the delta relative to top due to all the
+ * extended jump offset immediates for the opcode
+ * starting at top, which we extend in this loop.
+ *
+ * If there is only one extended jump offset, then
+ * sd2->offset won't change and this for loop will
+ * iterate once only.
+ */
+ sd2->offset += deltaFromTop;
+ deltaFromTop += JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN;
+ } else {
+ /*
+ * sd2 comes after sd, and won't be revisited by
+ * the outer for loop, so we have to increase its
+ * offset by delta, not merely by deltaFromTop.
+ */
+ sd2->offset += delta;
+ }
+
+ delta += JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN;
+ UpdateJumpTargets(cg->jumpTargets, sd2->offset,
+ JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN);
+ }
+ sd = sd2 - 1;
+ }
+ }
+ }
+
+ growth += delta;
+ } while (!done);
+
+ if (growth) {
+#ifdef DEBUG_brendan
+ printf("%s:%u: %u/%u jumps extended in %d passes (%d=%d+%d)\n",
+ cg->filename ? cg->filename : "stdin", cg->firstLine,
+ growth / (JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN), cg->numSpanDeps,
+ passes, offset + growth, offset, growth);
+#endif
+
+ /*
+ * Ensure that we have room for the extended jumps, but don't round up
+ * to a power of two -- we're done generating code, so we cut to fit.
+ */
+ limit = CG_LIMIT(cg);
+ length = offset + growth;
+ next = base + length;
+ if (next > limit) {
+ JS_ASSERT(length > BYTECODE_CHUNK);
+ size = BYTECODE_SIZE(PTRDIFF(limit, base, jsbytecode));
+ incr = BYTECODE_SIZE(length) - size;
+ JS_ARENA_GROW_CAST(base, jsbytecode *, cg->codePool, size, incr);
+ if (!base) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ CG_BASE(cg) = base;
+ CG_LIMIT(cg) = next = base + length;
+ }
+ CG_NEXT(cg) = next;
+
+ /*
+ * Set up a fake span dependency record to guard the end of the code
+ * being generated. This guard record is returned as a fencepost by
+ * FindNearestSpanDep if there is no real spandep at or above a given
+ * unextended code offset.
+ */
+ guard.top = -1;
+ guard.offset = offset + growth;
+ guard.before = offset;
+ guard.target = NULL;
+ }
+
+ /*
+ * Now work backwards through the span dependencies, copying chunks of
+ * bytecode between each extended jump toward the end of the grown code
+ * space, and restoring immediate offset operands for all jump bytecodes.
+ * The first chunk of bytecodes, starting at base and ending at the first
+ * extended jump offset (NB: this chunk includes the operation bytecode
+ * just before that immediate jump offset), doesn't need to be copied.
+ */
+ JS_ASSERT(sd == sdlimit);
+ top = -1;
+ while (--sd >= sdbase) {
+ if (sd->top != top) {
+ top = sd->top;
+ op = (JSOp) base[top];
+ type = (js_CodeSpec[op].format & JOF_TYPEMASK);
+
+ for (sd2 = sd - 1; sd2 >= sdbase && sd2->top == top; sd2--)
+ continue;
+ sd2++;
+ pivot = sd2->offset;
+ JS_ASSERT(top == sd2->before);
+ }
+
+ oldpc = base + sd->before;
+ span = SD_SPAN(sd, pivot);
+
+ /*
+ * If this jump didn't need to be extended, restore its span immediate
+ * offset operand now, overwriting the index of sd within cg->spanDeps
+ * that was stored temporarily after *pc when BuildSpanDepTable ran.
+ *
+ * Note that span might fit in 16 bits even for an extended jump op,
+ * if the op has multiple span operands, not all of which overflowed
+ * (e.g. JSOP_LOOKUPSWITCH or JSOP_TABLESWITCH where some cases are in
+ * range for a short jump, but others are not).
+ */
+ if (!JOF_TYPE_IS_EXTENDED_JUMP(type)) {
+ JS_ASSERT(JUMP_OFFSET_MIN <= span && span <= JUMP_OFFSET_MAX);
+ SET_JUMP_OFFSET(oldpc, span);
+ continue;
+ }
+
+ /*
+ * Set up parameters needed to copy the next run of bytecode starting
+ * at offset (which is a cursor into the unextended, original bytecode
+ * vector), down to sd->before (a cursor of the same scale as offset,
+ * it's the index of the original jump pc). Reuse delta to count the
+ * nominal number of bytes to copy.
+ */
+ pc = base + sd->offset;
+ delta = offset - sd->before;
+ JS_ASSERT(delta >= 1 + JUMP_OFFSET_LEN);
+
+ /*
+ * Don't bother copying the jump offset we're about to reset, but do
+ * copy the bytecode at oldpc (which comes just before its immediate
+ * jump offset operand), on the next iteration through the loop, by
+ * including it in offset's new value.
+ */
+ offset = sd->before + 1;
+ size = BYTECODE_SIZE(delta - (1 + JUMP_OFFSET_LEN));
+ if (size) {
+ memmove(pc + 1 + JUMPX_OFFSET_LEN,
+ oldpc + 1 + JUMP_OFFSET_LEN,
+ size);
+ }
+
+ SET_JUMPX_OFFSET(pc, span);
+ }
+
+ if (growth) {
+ /*
+ * Fix source note deltas. Don't hardwire the delta fixup adjustment,
+ * even though currently it must be JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN
+ * at each sd that moved. The future may bring different offset sizes
+ * for span-dependent instruction operands. However, we fix only main
+ * notes here, not prolog notes -- we know that prolog opcodes are not
+ * span-dependent, and aren't likely ever to be.
+ */
+ offset = growth = 0;
+ sd = sdbase;
+ for (sn = cg->main.notes, snlimit = sn + cg->main.noteCount;
+ sn < snlimit;
+ sn = SN_NEXT(sn)) {
+ /*
+ * Recall that the offset of a given note includes its delta, and
+ * tells the offset of the annotated bytecode from the main entry
+ * point of the script.
+ */
+ offset += SN_DELTA(sn);
+ while (sd < sdlimit && sd->before < offset) {
+ /*
+ * To compute the delta to add to sn, we need to look at the
+ * spandep after sd, whose offset - (before + growth) tells by
+ * how many bytes sd's instruction grew.
+ */
+ sd2 = sd + 1;
+ if (sd2 == sdlimit)
+ sd2 = &guard;
+ delta = sd2->offset - (sd2->before + growth);
+ if (delta > 0) {
+ JS_ASSERT(delta == JUMPX_OFFSET_LEN - JUMP_OFFSET_LEN);
+ sn = js_AddToSrcNoteDelta(cx, cg, sn, delta);
+ if (!sn)
+ return JS_FALSE;
+ snlimit = cg->main.notes + cg->main.noteCount;
+ growth += delta;
+ }
+ sd++;
+ }
+
+ /*
+ * If sn has span-dependent offset operands, check whether each
+ * covers further span-dependencies, and increase those operands
+ * accordingly. Some source notes measure offset not from the
+ * annotated pc, but from that pc plus some small bias. NB: we
+ * assume that spec->offsetBias can't itself span span-dependent
+ * instructions!
+ */
+ spec = &js_SrcNoteSpec[SN_TYPE(sn)];
+ if (spec->isSpanDep) {
+ pivot = offset + spec->offsetBias;
+ n = spec->arity;
+ for (i = 0; i < n; i++) {
+ span = js_GetSrcNoteOffset(sn, i);
+ if (span == 0)
+ continue;
+ target = pivot + span * spec->isSpanDep;
+ sd2 = FindNearestSpanDep(cg, target,
+ (target >= pivot)
+ ? sd - sdbase
+ : 0,
+ &guard);
+
+ /*
+ * Increase target by sd2's before-vs-after offset delta,
+ * which is absolute (i.e., relative to start of script,
+ * as is target). Recompute the span by subtracting its
+ * adjusted pivot from target.
+ */
+ target += sd2->offset - sd2->before;
+ span = target - (pivot + growth);
+ span *= spec->isSpanDep;
+ noteIndex = sn - cg->main.notes;
+ if (!js_SetSrcNoteOffset(cx, cg, noteIndex, i, span))
+ return JS_FALSE;
+ sn = cg->main.notes + noteIndex;
+ snlimit = cg->main.notes + cg->main.noteCount;
+ }
+ }
+ }
+ cg->main.lastNoteOffset += growth;
+
+ /*
+ * Fix try/catch notes (O(numTryNotes * log2(numSpanDeps)), but it's
+ * not clear how we can beat that).
+ */
+ for (tn = cg->tryBase, tnlimit = cg->tryNext; tn < tnlimit; tn++) {
+ /*
+ * First, look for the nearest span dependency at/above tn->start.
+ * There may not be any such spandep, in which case the guard will
+ * be returned.
+ */
+ offset = tn->start;
+ sd = FindNearestSpanDep(cg, offset, 0, &guard);
+ delta = sd->offset - sd->before;
+ tn->start = offset + delta;
+
+ /*
+ * Next, find the nearest spandep at/above tn->start + tn->length.
+ * Use its delta minus tn->start's delta to increase tn->length.
+ */
+ length = tn->length;
+ sd2 = FindNearestSpanDep(cg, offset + length, sd - sdbase, &guard);
+ if (sd2 != sd)
+ tn->length = length + sd2->offset - sd2->before - delta;
+
+ /*
+ * Finally, adjust tn->catchStart upward only if it is non-zero,
+ * and provided there are spandeps below it that grew.
+ */
+ offset = tn->catchStart;
+ if (offset != 0) {
+ sd = FindNearestSpanDep(cg, offset, sd2 - sdbase, &guard);
+ tn->catchStart = offset + sd->offset - sd->before;
+ }
+ }
+ }
+
+#ifdef DEBUG_brendan
+ {
+ uintN bigspans = 0;
+ top = -1;
+ for (sd = sdbase; sd < sdlimit; sd++) {
+ offset = sd->offset;
+
+ /* NB: sd->top cursors into the original, unextended bytecode vector. */
+ if (sd->top != top) {
+ JS_ASSERT(top == -1 ||
+ !JOF_TYPE_IS_EXTENDED_JUMP(type) ||
+ bigspans != 0);
+ bigspans = 0;
+ top = sd->top;
+ JS_ASSERT(top == sd->before);
+ op = (JSOp) base[offset];
+ type = (js_CodeSpec[op].format & JOF_TYPEMASK);
+ JS_ASSERT(type == JOF_JUMP ||
+ type == JOF_JUMPX ||
+ type == JOF_TABLESWITCH ||
+ type == JOF_TABLESWITCHX ||
+ type == JOF_LOOKUPSWITCH ||
+ type == JOF_LOOKUPSWITCHX);
+ pivot = offset;
+ }
+
+ pc = base + offset;
+ if (JOF_TYPE_IS_EXTENDED_JUMP(type)) {
+ span = GET_JUMPX_OFFSET(pc);
+ if (span < JUMP_OFFSET_MIN || JUMP_OFFSET_MAX < span) {
+ bigspans++;
+ } else {
+ JS_ASSERT(type == JOF_TABLESWITCHX ||
+ type == JOF_LOOKUPSWITCHX);
+ }
+ } else {
+ span = GET_JUMP_OFFSET(pc);
+ }
+ JS_ASSERT(SD_SPAN(sd, pivot) == span);
+ }
+ JS_ASSERT(!JOF_TYPE_IS_EXTENDED_JUMP(type) || bigspans != 0);
+ }
+#endif
+
+ /*
+ * Reset so we optimize at most once -- cg may be used for further code
+ * generation of successive, independent, top-level statements. No jump
+ * can span top-level statements, because JS lacks goto.
+ */
+ size = SPANDEPS_SIZE(JS_BIT(JS_CeilingLog2(cg->numSpanDeps)));
+ JS_ArenaFreeAllocation(&cx->tempPool, cg->spanDeps,
+ JS_MAX(size, SPANDEPS_SIZE_MIN));
+ cg->spanDeps = NULL;
+ FreeJumpTargets(cg, cg->jumpTargets);
+ cg->jumpTargets = NULL;
+ cg->numSpanDeps = cg->numJumpTargets = 0;
+ cg->spanDepTodo = CG_OFFSET(cg);
+ return JS_TRUE;
+}
+
+static JSBool
+EmitJump(JSContext *cx, JSCodeGenerator *cg, JSOp op, ptrdiff_t off)
+{
+ JSBool extend;
+ ptrdiff_t jmp;
+ jsbytecode *pc;
+
+ extend = off < JUMP_OFFSET_MIN || JUMP_OFFSET_MAX < off;
+ if (extend && !cg->spanDeps && !BuildSpanDepTable(cx, cg))
+ return JS_FALSE;
+
+ jmp = js_Emit3(cx, cg, op, JUMP_OFFSET_HI(off), JUMP_OFFSET_LO(off));
+ if (jmp >= 0 && (extend || cg->spanDeps)) {
+ pc = CG_CODE(cg, jmp);
+ if (!AddSpanDep(cx, cg, pc, pc, off))
+ return JS_FALSE;
+ }
+ return jmp;
+}
+
+static ptrdiff_t
+GetJumpOffset(JSCodeGenerator *cg, jsbytecode *pc)
+{
+ JSSpanDep *sd;
+ JSJumpTarget *jt;
+ ptrdiff_t top;
+
+ if (!cg->spanDeps)
+ return GET_JUMP_OFFSET(pc);
+
+ sd = GetSpanDep(cg, pc);
+ jt = sd->target;
+ if (!JT_HAS_TAG(jt))
+ return JT_TO_BPDELTA(jt);
+
+ top = sd->top;
+ while (--sd >= cg->spanDeps && sd->top == top)
+ continue;
+ sd++;
+ return JT_CLR_TAG(jt)->offset - sd->offset;
+}
+
+JSBool
+js_SetJumpOffset(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc,
+ ptrdiff_t off)
+{
+ if (!cg->spanDeps) {
+ if (JUMP_OFFSET_MIN <= off && off <= JUMP_OFFSET_MAX) {
+ SET_JUMP_OFFSET(pc, off);
+ return JS_TRUE;
+ }
+
+ if (!BuildSpanDepTable(cx, cg))
+ return JS_FALSE;
+ }
+
+ return SetSpanDepTarget(cx, cg, GetSpanDep(cg, pc), off);
+}
+
+JSBool
+js_InStatement(JSTreeContext *tc, JSStmtType type)
+{
+ JSStmtInfo *stmt;
+
+ for (stmt = tc->topStmt; stmt; stmt = stmt->down) {
+ if (stmt->type == type)
+ return JS_TRUE;
+ }
+ return JS_FALSE;
+}
+
+JSBool
+js_IsGlobalReference(JSTreeContext *tc, JSAtom *atom, JSBool *loopyp)
+{
+ JSStmtInfo *stmt;
+ JSObject *obj;
+ JSScope *scope;
+
+ *loopyp = JS_FALSE;
+ for (stmt = tc->topStmt; stmt; stmt = stmt->down) {
+ if (stmt->type == STMT_WITH)
+ return JS_FALSE;
+ if (STMT_IS_LOOP(stmt)) {
+ *loopyp = JS_TRUE;
+ continue;
+ }
+ if (stmt->flags & SIF_SCOPE) {
+ obj = ATOM_TO_OBJECT(stmt->atom);
+ JS_ASSERT(LOCKED_OBJ_GET_CLASS(obj) == &js_BlockClass);
+ scope = OBJ_SCOPE(obj);
+ if (SCOPE_GET_PROPERTY(scope, ATOM_TO_JSID(atom)))
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+void
+js_PushStatement(JSTreeContext *tc, JSStmtInfo *stmt, JSStmtType type,
+ ptrdiff_t top)
+{
+ stmt->type = type;
+ stmt->flags = 0;
+ SET_STATEMENT_TOP(stmt, top);
+ stmt->atom = NULL;
+ stmt->down = tc->topStmt;
+ tc->topStmt = stmt;
+ if (STMT_LINKS_SCOPE(stmt)) {
+ stmt->downScope = tc->topScopeStmt;
+ tc->topScopeStmt = stmt;
+ } else {
+ stmt->downScope = NULL;
+ }
+}
+
+void
+js_PushBlockScope(JSTreeContext *tc, JSStmtInfo *stmt, JSAtom *blockAtom,
+ ptrdiff_t top)
+{
+ JSObject *blockObj;
+
+ js_PushStatement(tc, stmt, STMT_BLOCK, top);
+ stmt->flags |= SIF_SCOPE;
+ blockObj = ATOM_TO_OBJECT(blockAtom);
+ blockObj->slots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(tc->blockChain);
+ stmt->downScope = tc->topScopeStmt;
+ tc->topScopeStmt = stmt;
+ tc->blockChain = blockObj;
+ stmt->atom = blockAtom;
+}
+
+/*
+ * Emit a backpatch op with offset pointing to the previous jump of this type,
+ * so that we can walk back up the chain fixing up the op and jump offset.
+ */
+static ptrdiff_t
+EmitBackPatchOp(JSContext *cx, JSCodeGenerator *cg, JSOp op, ptrdiff_t *lastp)
+{
+ ptrdiff_t offset, delta;
+
+ offset = CG_OFFSET(cg);
+ delta = offset - *lastp;
+ *lastp = offset;
+ JS_ASSERT(delta > 0);
+ return EmitJump(cx, cg, op, delta);
+}
+
+/*
+ * Macro to emit a bytecode followed by a uint16 immediate operand stored in
+ * big-endian order, used for arg and var numbers as well as for atomIndexes.
+ * NB: We use cx and cg from our caller's lexical environment, and return
+ * false on error.
+ */
+#define EMIT_UINT16_IMM_OP(op, i) \
+ JS_BEGIN_MACRO \
+ if (js_Emit3(cx, cg, op, UINT16_HI(i), UINT16_LO(i)) < 0) \
+ return JS_FALSE; \
+ JS_END_MACRO
+
+/* Emit additional bytecode(s) for non-local jumps. */
+static JSBool
+EmitNonLocalJumpFixup(JSContext *cx, JSCodeGenerator *cg, JSStmtInfo *toStmt,
+ JSOp *returnop)
+{
+ intN depth;
+ JSStmtInfo *stmt;
+ ptrdiff_t jmp;
+
+ /*
+ * Return from within a try block that has a finally clause must be split
+ * into two ops: JSOP_SETRVAL, to pop the r.v. and store it in fp->rval;
+ * and JSOP_RETRVAL, which makes control flow go back to the caller, who
+ * picks up fp->rval as usual. Otherwise, the stack will be unbalanced
+ * when executing the finally clause.
+ *
+ * We mutate *returnop once only if we find an enclosing try-block (viz,
+ * STMT_FINALLY) to ensure that we emit just one JSOP_SETRVAL before one
+ * or more JSOP_GOSUBs and other fixup opcodes emitted by this function.
+ * Our caller (the TOK_RETURN case of js_EmitTree) then emits *returnop.
+ * The fixup opcodes and gosubs must interleave in the proper order, from
+ * inner statement to outer, so that finally clauses run at the correct
+ * stack depth.
+ */
+ if (returnop) {
+ JS_ASSERT(*returnop == JSOP_RETURN);
+ for (stmt = cg->treeContext.topStmt; stmt != toStmt;
+ stmt = stmt->down) {
+ if (stmt->type == STMT_FINALLY ||
+ ((cg->treeContext.flags & TCF_FUN_HEAVYWEIGHT) &&
+ STMT_MAYBE_SCOPE(stmt))) {
+ if (js_Emit1(cx, cg, JSOP_SETRVAL) < 0)
+ return JS_FALSE;
+ *returnop = JSOP_RETRVAL;
+ break;
+ }
+ }
+
+ /*
+ * If there are no try-with-finally blocks open around this return
+ * statement, we can generate a return forthwith and skip generating
+ * any fixup code.
+ */
+ if (*returnop == JSOP_RETURN)
+ return JS_TRUE;
+ }
+
+ /*
+ * The non-local jump fixup we emit will unbalance cg->stackDepth, because
+ * the fixup replicates balanced code such as JSOP_LEAVEWITH emitted at the
+ * end of a with statement, so we save cg->stackDepth here and restore it
+ * just before a successful return.
+ */
+ depth = cg->stackDepth;
+ for (stmt = cg->treeContext.topStmt; stmt != toStmt; stmt = stmt->down) {
+ switch (stmt->type) {
+ case STMT_FINALLY:
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ jmp = EmitBackPatchOp(cx, cg, JSOP_BACKPATCH, &GOSUBS(*stmt));
+ if (jmp < 0)
+ return JS_FALSE;
+ break;
+
+ case STMT_WITH:
+ /* There's a With object on the stack that we need to pop. */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_LEAVEWITH) < 0)
+ return JS_FALSE;
+ break;
+
+ case STMT_FOR_IN_LOOP:
+ /*
+ * The iterator and the object being iterated need to be popped.
+ */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_ENDITER) < 0)
+ return JS_FALSE;
+ break;
+
+ case STMT_SUBROUTINE:
+ /*
+ * There's a [exception or hole, retsub pc-index] pair on the
+ * stack that we need to pop.
+ */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_POP2) < 0)
+ return JS_FALSE;
+ break;
+
+ default:;
+ }
+
+ if (stmt->flags & SIF_SCOPE) {
+ uintN i;
+
+ /* There is a Block object with locals on the stack to pop. */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ i = OBJ_BLOCK_COUNT(cx, ATOM_TO_OBJECT(stmt->atom));
+ EMIT_UINT16_IMM_OP(JSOP_LEAVEBLOCK, i);
+ }
+ }
+
+ cg->stackDepth = depth;
+ return JS_TRUE;
+}
+
+static ptrdiff_t
+EmitGoto(JSContext *cx, JSCodeGenerator *cg, JSStmtInfo *toStmt,
+ ptrdiff_t *lastp, JSAtomListElement *label, JSSrcNoteType noteType)
+{
+ intN index;
+
+ if (!EmitNonLocalJumpFixup(cx, cg, toStmt, NULL))
+ return -1;
+
+ if (label)
+ index = js_NewSrcNote2(cx, cg, noteType, (ptrdiff_t) ALE_INDEX(label));
+ else if (noteType != SRC_NULL)
+ index = js_NewSrcNote(cx, cg, noteType);
+ else
+ index = 0;
+ if (index < 0)
+ return -1;
+
+ return EmitBackPatchOp(cx, cg, JSOP_BACKPATCH, lastp);
+}
+
+static JSBool
+BackPatch(JSContext *cx, JSCodeGenerator *cg, ptrdiff_t last,
+ jsbytecode *target, jsbytecode op)
+{
+ jsbytecode *pc, *stop;
+ ptrdiff_t delta, span;
+
+ pc = CG_CODE(cg, last);
+ stop = CG_CODE(cg, -1);
+ while (pc != stop) {
+ delta = GetJumpOffset(cg, pc);
+ span = PTRDIFF(target, pc, jsbytecode);
+ CHECK_AND_SET_JUMP_OFFSET(cx, cg, pc, span);
+
+ /*
+ * Set *pc after jump offset in case bpdelta didn't overflow, but span
+ * does (if so, CHECK_AND_SET_JUMP_OFFSET might call BuildSpanDepTable
+ * and need to see the JSOP_BACKPATCH* op at *pc).
+ */
+ *pc = op;
+ pc -= delta;
+ }
+ return JS_TRUE;
+}
+
+void
+js_PopStatement(JSTreeContext *tc)
+{
+ JSStmtInfo *stmt;
+ JSObject *blockObj;
+
+ stmt = tc->topStmt;
+ tc->topStmt = stmt->down;
+ if (STMT_LINKS_SCOPE(stmt)) {
+ tc->topScopeStmt = stmt->downScope;
+ if (stmt->flags & SIF_SCOPE) {
+ blockObj = ATOM_TO_OBJECT(stmt->atom);
+ tc->blockChain = JSVAL_TO_OBJECT(blockObj->slots[JSSLOT_PARENT]);
+ }
+ }
+}
+
+JSBool
+js_PopStatementCG(JSContext *cx, JSCodeGenerator *cg)
+{
+ JSStmtInfo *stmt;
+
+ stmt = cg->treeContext.topStmt;
+ if (!STMT_IS_TRYING(stmt) &&
+ (!BackPatch(cx, cg, stmt->breaks, CG_NEXT(cg), JSOP_GOTO) ||
+ !BackPatch(cx, cg, stmt->continues, CG_CODE(cg, stmt->update),
+ JSOP_GOTO))) {
+ return JS_FALSE;
+ }
+ js_PopStatement(&cg->treeContext);
+ return JS_TRUE;
+}
+
+JSBool
+js_DefineCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
+ JSParseNode *pn)
+{
+ jsdouble dval;
+ jsint ival;
+ JSAtom *valueAtom;
+ JSAtomListElement *ale;
+
+ /* XXX just do numbers for now */
+ if (pn->pn_type == TOK_NUMBER) {
+ dval = pn->pn_dval;
+ valueAtom = (JSDOUBLE_IS_INT(dval, ival) && INT_FITS_IN_JSVAL(ival))
+ ? js_AtomizeInt(cx, ival, 0)
+ : js_AtomizeDouble(cx, dval, 0);
+ if (!valueAtom)
+ return JS_FALSE;
+ ale = js_IndexAtom(cx, atom, &cg->constList);
+ if (!ale)
+ return JS_FALSE;
+ ALE_SET_VALUE(ale, ATOM_KEY(valueAtom));
+ }
+ return JS_TRUE;
+}
+
+JSStmtInfo *
+js_LexicalLookup(JSTreeContext *tc, JSAtom *atom, jsint *slotp, JSBool letdecl)
+{
+ JSStmtInfo *stmt;
+ JSObject *obj;
+ JSScope *scope;
+ JSScopeProperty *sprop;
+ jsval v;
+
+ for (stmt = tc->topScopeStmt; stmt; stmt = stmt->downScope) {
+ if (stmt->type == STMT_WITH) {
+ /* Ignore with statements enclosing a single let declaration. */
+ if (letdecl)
+ continue;
+ break;
+ }
+
+ /* Skip "maybe scope" statements that don't contain let bindings. */
+ if (!(stmt->flags & SIF_SCOPE))
+ continue;
+
+ obj = ATOM_TO_OBJECT(stmt->atom);
+ JS_ASSERT(LOCKED_OBJ_GET_CLASS(obj) == &js_BlockClass);
+ scope = OBJ_SCOPE(obj);
+ sprop = SCOPE_GET_PROPERTY(scope, ATOM_TO_JSID(atom));
+ if (sprop) {
+ JS_ASSERT(sprop->flags & SPROP_HAS_SHORTID);
+
+ if (slotp) {
+ /*
+ * Use LOCKED_OBJ_GET_SLOT since we know obj is single-
+ * threaded and owned by this compiler activation.
+ */
+ v = LOCKED_OBJ_GET_SLOT(obj, JSSLOT_BLOCK_DEPTH);
+ JS_ASSERT(JSVAL_IS_INT(v) && JSVAL_TO_INT(v) >= 0);
+ *slotp = JSVAL_TO_INT(v) + sprop->shortid;
+ }
+ return stmt;
+ }
+ }
+
+ if (slotp)
+ *slotp = -1;
+ return stmt;
+}
+
+JSBool
+js_LookupCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
+ jsval *vp)
+{
+ JSBool ok;
+ JSStackFrame *fp;
+ JSStmtInfo *stmt;
+ jsint slot;
+ JSAtomListElement *ale;
+ JSObject *obj, *pobj;
+ JSProperty *prop;
+ uintN attrs;
+
+ /*
+ * fp chases cg down the stack, but only until we reach the outermost cg.
+ * This enables propagating consts from top-level into switch cases in a
+ * function compiled along with the top-level script. All stack frames
+ * with matching code generators should be flagged with JSFRAME_COMPILING;
+ * we check sanity here.
+ */
+ *vp = JSVAL_VOID;
+ ok = JS_TRUE;
+ fp = cx->fp;
+ do {
+ JS_ASSERT(fp->flags & JSFRAME_COMPILING);
+
+ obj = fp->varobj;
+ if (obj == fp->scopeChain) {
+ /* XXX this will need revising when 'let const' is added. */
+ stmt = js_LexicalLookup(&cg->treeContext, atom, &slot, JS_FALSE);
+ if (stmt)
+ return JS_TRUE;
+
+ ATOM_LIST_SEARCH(ale, &cg->constList, atom);
+ if (ale) {
+ *vp = ALE_VALUE(ale);
+ return JS_TRUE;
+ }
+
+ /*
+ * Try looking in the variable object for a direct property that
+ * is readonly and permanent. We know such a property can't be
+ * shadowed by another property on obj's prototype chain, or a
+ * with object or catch variable; nor can prop's value be changed,
+ * nor can prop be deleted.
+ */
+ prop = NULL;
+ if (OBJ_GET_CLASS(cx, obj) == &js_FunctionClass) {
+ ok = js_LookupHiddenProperty(cx, obj, ATOM_TO_JSID(atom),
+ &pobj, &prop);
+ if (!ok)
+ break;
+ if (prop) {
+#ifdef DEBUG
+ JSScopeProperty *sprop = (JSScopeProperty *)prop;
+
+ /*
+ * Any hidden property must be a formal arg or local var,
+ * which will shadow a global const of the same name.
+ */
+ JS_ASSERT(sprop->getter == js_GetArgument ||
+ sprop->getter == js_GetLocalVariable);
+#endif
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ break;
+ }
+ }
+
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, ATOM_TO_JSID(atom), &pobj, &prop);
+ if (ok) {
+ if (pobj == obj &&
+ (fp->flags & (JSFRAME_EVAL | JSFRAME_COMPILE_N_GO))) {
+ /*
+ * We're compiling code that will be executed immediately,
+ * not re-executed against a different scope chain and/or
+ * variable object. Therefore we can get constant values
+ * from our variable object here.
+ */
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, ATOM_TO_JSID(atom), prop,
+ &attrs);
+ if (ok && !(~attrs & (JSPROP_READONLY | JSPROP_PERMANENT)))
+ ok = OBJ_GET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), vp);
+ }
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+ if (!ok || prop)
+ break;
+ }
+ fp = fp->down;
+ } while ((cg = cg->parent) != NULL);
+ return ok;
+}
+
+/*
+ * Allocate an index invariant for all activations of the code being compiled
+ * in cg, that can be used to store and fetch a reference to a cloned RegExp
+ * object that shares the same JSRegExp private data created for the object
+ * literal in pn->pn_atom. We need clones to hold lastIndex and other direct
+ * properties that should not be shared among threads sharing a precompiled
+ * function or script.
+ *
+ * If the code being compiled is function code, allocate a reserved slot in
+ * the cloned function object that shares its precompiled script with other
+ * cloned function objects and with the compiler-created clone-parent. There
+ * are fun->nregexps such reserved slots in each function object cloned from
+ * fun->object. NB: during compilation, funobj slots must never be allocated,
+ * because js_AllocSlot could hand out one of the slots that should be given
+ * to a regexp clone.
+ *
+ * If the code being compiled is global code, reserve the fp->vars slot at
+ * ALE_INDEX(ale), by ensuring that cg->treeContext.numGlobalVars is at least
+ * one more than this index. For global code, fp->vars is parallel to the
+ * global script->atomMap.vector array, but possibly shorter for the common
+ * case (where var declarations and regexp literals cluster toward the front
+ * of the script or function body).
+ *
+ * Global variable name literals in script->atomMap have fast-global slot
+ * numbers (stored as int-tagged jsvals) in the corresponding fp->vars array
+ * element. The atomIndex for a regexp object literal thus also addresses an
+ * fp->vars element that is not used by any optimized global variable, so we
+ * use that GC-scanned element to keep the regexp object clone alive, as well
+ * as to lazily create and find it at run-time for the JSOP_REGEXP bytecode.
+ *
+ * In no case can cx->fp->varobj be a Call object here, because that implies
+ * we are compiling eval code, in which case (cx->fp->flags & JSFRAME_EVAL)
+ * is true, and js_GetToken will have already selected JSOP_OBJECT instead of
+ * JSOP_REGEXP, to avoid all this RegExp object cloning business.
+ *
+ * Why clone regexp objects? ECMA specifies that when a regular expression
+ * literal is scanned, a RegExp object is created. In the spec, compilation
+ * and execution happen indivisibly, but in this implementation and many of
+ * its embeddings, code is precompiled early and re-executed in multiple
+ * threads, or using multiple global objects, or both, for efficiency.
+ *
+ * In such cases, naively following ECMA leads to wrongful sharing of RegExp
+ * objects, which makes for collisions on the lastIndex property (especially
+ * for global regexps) and on any ad-hoc properties. Also, __proto__ and
+ * __parent__ refer to the pre-compilation prototype and global objects, a
+ * pigeon-hole problem for instanceof tests.
+ */
+static JSBool
+IndexRegExpClone(JSContext *cx, JSParseNode *pn, JSAtomListElement *ale,
+ JSCodeGenerator *cg)
+{
+ JSObject *varobj, *reobj;
+ JSClass *clasp;
+ JSFunction *fun;
+ JSRegExp *re;
+ uint16 *countPtr;
+ uintN cloneIndex;
+
+ JS_ASSERT(!(cx->fp->flags & (JSFRAME_EVAL | JSFRAME_COMPILE_N_GO)));
+
+ varobj = cx->fp->varobj;
+ clasp = OBJ_GET_CLASS(cx, varobj);
+ if (clasp == &js_FunctionClass) {
+ fun = (JSFunction *) JS_GetPrivate(cx, varobj);
+ countPtr = &fun->u.i.nregexps;
+ cloneIndex = *countPtr;
+ } else {
+ JS_ASSERT(clasp != &js_CallClass);
+ countPtr = &cg->treeContext.numGlobalVars;
+ cloneIndex = ALE_INDEX(ale);
+ }
+
+ if ((cloneIndex + 1) >> 16) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NEED_DIET, js_script_str);
+ return JS_FALSE;
+ }
+ if (cloneIndex >= *countPtr)
+ *countPtr = cloneIndex + 1;
+
+ reobj = ATOM_TO_OBJECT(pn->pn_atom);
+ JS_ASSERT(OBJ_GET_CLASS(cx, reobj) == &js_RegExpClass);
+ re = (JSRegExp *) JS_GetPrivate(cx, reobj);
+ re->cloneIndex = cloneIndex;
+ return JS_TRUE;
+}
+
+/*
+ * Emit a bytecode and its 2-byte constant (atom) index immediate operand.
+ * If the atomIndex requires more than 2 bytes, emit a prefix op whose 24-bit
+ * immediate operand indexes the atom in script->atomMap.
+ *
+ * If op has JOF_NAME mode, emit JSOP_FINDNAME to find and push the object in
+ * the scope chain in which the literal name was found, followed by the name
+ * as a string. This enables us to use the JOF_ELEM counterpart to op.
+ *
+ * Otherwise, if op has JOF_PROP mode, emit JSOP_LITERAL before op, to push
+ * the atom's value key. For JOF_PROP ops, the object being operated on has
+ * already been pushed, and JSOP_LITERAL will push the id, leaving the stack
+ * in the proper state for a JOF_ELEM counterpart.
+ *
+ * Otherwise, emit JSOP_LITOPX to push the atom index, then perform a special
+ * dispatch on op, but getting op's atom index from the stack instead of from
+ * an unsigned 16-bit immediate operand.
+ */
+static JSBool
+EmitAtomIndexOp(JSContext *cx, JSOp op, jsatomid atomIndex, JSCodeGenerator *cg)
+{
+ uint32 mode;
+ JSOp prefixOp;
+ ptrdiff_t off;
+ jsbytecode *pc;
+
+ if (atomIndex >= JS_BIT(16)) {
+ mode = (js_CodeSpec[op].format & JOF_MODEMASK);
+ if (op != JSOP_SETNAME) {
+ prefixOp = ((mode != JOF_NAME && mode != JOF_PROP) ||
+#if JS_HAS_XML_SUPPORT
+ op == JSOP_GETMETHOD ||
+ op == JSOP_SETMETHOD ||
+#endif
+ op == JSOP_SETCONST)
+ ? JSOP_LITOPX
+ : (mode == JOF_NAME)
+ ? JSOP_FINDNAME
+ : JSOP_LITERAL;
+ off = js_EmitN(cx, cg, prefixOp, 3);
+ if (off < 0)
+ return JS_FALSE;
+ pc = CG_CODE(cg, off);
+ SET_LITERAL_INDEX(pc, atomIndex);
+ }
+
+ switch (op) {
+ case JSOP_DECNAME: op = JSOP_DECELEM; break;
+ case JSOP_DECPROP: op = JSOP_DECELEM; break;
+ case JSOP_DELNAME: op = JSOP_DELELEM; break;
+ case JSOP_DELPROP: op = JSOP_DELELEM; break;
+ case JSOP_FORNAME: op = JSOP_FORELEM; break;
+ case JSOP_FORPROP: op = JSOP_FORELEM; break;
+ case JSOP_GETPROP: op = JSOP_GETELEM; break;
+ case JSOP_GETXPROP: op = JSOP_GETXELEM; break;
+ case JSOP_IMPORTPROP: op = JSOP_IMPORTELEM; break;
+ case JSOP_INCNAME: op = JSOP_INCELEM; break;
+ case JSOP_INCPROP: op = JSOP_INCELEM; break;
+ case JSOP_INITPROP: op = JSOP_INITELEM; break;
+ case JSOP_NAME: op = JSOP_GETELEM; break;
+ case JSOP_NAMEDEC: op = JSOP_ELEMDEC; break;
+ case JSOP_NAMEINC: op = JSOP_ELEMINC; break;
+ case JSOP_PROPDEC: op = JSOP_ELEMDEC; break;
+ case JSOP_PROPINC: op = JSOP_ELEMINC; break;
+ case JSOP_BINDNAME: return JS_TRUE;
+ case JSOP_SETNAME: op = JSOP_SETELEM; break;
+ case JSOP_SETPROP: op = JSOP_SETELEM; break;
+#if JS_HAS_EXPORT_IMPORT
+ case JSOP_EXPORTNAME:
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+#endif
+ default:
+#if JS_HAS_XML_SUPPORT
+ JS_ASSERT(mode == 0 || op == JSOP_SETCONST ||
+ op == JSOP_GETMETHOD || op == JSOP_SETMETHOD);
+#else
+ JS_ASSERT(mode == 0 || op == JSOP_SETCONST);
+#endif
+ break;
+ }
+
+ return js_Emit1(cx, cg, op) >= 0;
+ }
+
+ EMIT_UINT16_IMM_OP(op, atomIndex);
+ return JS_TRUE;
+}
+
+/*
+ * Slight sugar for EmitAtomIndexOp, again accessing cx and cg from the macro
+ * caller's lexical environment, and embedding a false return on error.
+ * XXXbe hey, who checks for fun->nvars and fun->nargs overflow?!
+ */
+#define EMIT_ATOM_INDEX_OP(op, atomIndex) \
+ JS_BEGIN_MACRO \
+ if (!EmitAtomIndexOp(cx, op, atomIndex, cg)) \
+ return JS_FALSE; \
+ JS_END_MACRO
+
+static JSBool
+EmitAtomOp(JSContext *cx, JSParseNode *pn, JSOp op, JSCodeGenerator *cg)
+{
+ JSAtomListElement *ale;
+
+ ale = js_IndexAtom(cx, pn->pn_atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ if (op == JSOP_REGEXP && !IndexRegExpClone(cx, pn, ale, cg))
+ return JS_FALSE;
+ return EmitAtomIndexOp(cx, op, ALE_INDEX(ale), cg);
+}
+
+/*
+ * This routine tries to optimize name gets and sets to stack slot loads and
+ * stores, given the variables object and scope chain in cx's top frame, the
+ * compile-time context in tc, and a TOK_NAME node pn. It returns false on
+ * error, true on success.
+ *
+ * The caller can inspect pn->pn_slot for a non-negative slot number to tell
+ * whether optimization occurred, in which case BindNameToSlot also updated
+ * pn->pn_op. If pn->pn_slot is still -1 on return, pn->pn_op nevertheless
+ * may have been optimized, e.g., from JSOP_NAME to JSOP_ARGUMENTS. Whether
+ * or not pn->pn_op was modified, if this function finds an argument or local
+ * variable name, pn->pn_attrs will contain the property's attributes after a
+ * successful return.
+ *
+ * NB: if you add more opcodes specialized from JSOP_NAME, etc., don't forget
+ * to update the TOK_FOR (for-in) and TOK_ASSIGN (op=, e.g. +=) special cases
+ * in js_EmitTree.
+ */
+static JSBool
+BindNameToSlot(JSContext *cx, JSTreeContext *tc, JSParseNode *pn,
+ JSBool letdecl)
+{
+ JSAtom *atom;
+ JSStmtInfo *stmt;
+ jsint slot;
+ JSOp op;
+ JSStackFrame *fp;
+ JSObject *obj, *pobj;
+ JSClass *clasp;
+ JSBool optimizeGlobals;
+ JSPropertyOp getter;
+ uintN attrs;
+ JSAtomListElement *ale;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+
+ JS_ASSERT(pn->pn_type == TOK_NAME);
+ if (pn->pn_slot >= 0 || pn->pn_op == JSOP_ARGUMENTS)
+ return JS_TRUE;
+
+ /* QNAME references can never be optimized to use arg/var storage. */
+ if (pn->pn_op == JSOP_QNAMEPART)
+ return JS_TRUE;
+
+ /*
+ * We can't optimize if we are compiling a with statement and its body,
+ * or we're in a catch block whose exception variable has the same name
+ * as this node. FIXME: we should be able to optimize catch vars to be
+ * block-locals.
+ */
+ atom = pn->pn_atom;
+ stmt = js_LexicalLookup(tc, atom, &slot, letdecl);
+ if (stmt) {
+ if (stmt->type == STMT_WITH)
+ return JS_TRUE;
+
+ JS_ASSERT(stmt->flags & SIF_SCOPE);
+ JS_ASSERT(slot >= 0);
+ op = pn->pn_op;
+ switch (op) {
+ case JSOP_NAME: op = JSOP_GETLOCAL; break;
+ case JSOP_SETNAME: op = JSOP_SETLOCAL; break;
+ case JSOP_INCNAME: op = JSOP_INCLOCAL; break;
+ case JSOP_NAMEINC: op = JSOP_LOCALINC; break;
+ case JSOP_DECNAME: op = JSOP_DECLOCAL; break;
+ case JSOP_NAMEDEC: op = JSOP_LOCALDEC; break;
+ case JSOP_FORNAME: op = JSOP_FORLOCAL; break;
+ case JSOP_DELNAME: op = JSOP_FALSE; break;
+ default: JS_ASSERT(0);
+ }
+ if (op != pn->pn_op) {
+ pn->pn_op = op;
+ pn->pn_slot = slot;
+ }
+ return JS_TRUE;
+ }
+
+ /*
+ * A Script object can be used to split an eval into a compile step done
+ * at construction time, and an execute step done separately, possibly in
+ * a different scope altogether. We therefore cannot do any name-to-slot
+ * optimizations, but must lookup names at runtime. Note that script_exec
+ * ensures that its caller's frame has a Call object, so arg and var name
+ * lookups will succeed.
+ */
+ fp = cx->fp;
+ if (fp->flags & JSFRAME_SCRIPT_OBJECT)
+ return JS_TRUE;
+
+ /*
+ * We can't optimize if var and closure (a local function not in a larger
+ * expression and not at top-level within another's body) collide.
+ * XXX suboptimal: keep track of colliding names and deoptimize only those
+ */
+ if (tc->flags & TCF_FUN_CLOSURE_VS_VAR)
+ return JS_TRUE;
+
+ /*
+ * We can't optimize if we're not compiling a function body, whether via
+ * eval, or directly when compiling a function statement or expression.
+ */
+ obj = fp->varobj;
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp != &js_FunctionClass && clasp != &js_CallClass) {
+ /* Check for an eval or debugger frame. */
+ if (fp->flags & JSFRAME_SPECIAL)
+ return JS_TRUE;
+
+ /*
+ * Optimize global variable accesses if there are at least 100 uses
+ * in unambiguous contexts, or failing that, if least half of all the
+ * uses of global vars/consts/functions are in loops.
+ */
+ optimizeGlobals = (tc->globalUses >= 100 ||
+ (tc->loopyGlobalUses &&
+ tc->loopyGlobalUses >= tc->globalUses / 2));
+ if (!optimizeGlobals)
+ return JS_TRUE;
+ } else {
+ optimizeGlobals = JS_FALSE;
+ }
+
+ /*
+ * We can't optimize if we are in an eval called inside a with statement.
+ */
+ if (fp->scopeChain != obj)
+ return JS_TRUE;
+
+ op = pn->pn_op;
+ getter = NULL;
+#ifdef __GNUC__
+ attrs = slot = 0; /* quell GCC overwarning */
+#endif
+ if (optimizeGlobals) {
+ /*
+ * We are optimizing global variables, and there is no pre-existing
+ * global property named atom. If atom was declared via const or var,
+ * optimize pn to access fp->vars using the appropriate JOF_QVAR op.
+ */
+ ATOM_LIST_SEARCH(ale, &tc->decls, atom);
+ if (!ale) {
+ /* Use precedes declaration, or name is never declared. */
+ return JS_TRUE;
+ }
+
+ attrs = (ALE_JSOP(ale) == JSOP_DEFCONST)
+ ? JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT
+ : JSPROP_ENUMERATE | JSPROP_PERMANENT;
+
+ /* Index atom so we can map fast global number to name. */
+ JS_ASSERT(tc->flags & TCF_COMPILING);
+ ale = js_IndexAtom(cx, atom, &((JSCodeGenerator *) tc)->atomList);
+ if (!ale)
+ return JS_FALSE;
+
+ /* Defend against tc->numGlobalVars 16-bit overflow. */
+ slot = ALE_INDEX(ale);
+ if ((slot + 1) >> 16)
+ return JS_TRUE;
+
+ if ((uint16)(slot + 1) > tc->numGlobalVars)
+ tc->numGlobalVars = (uint16)(slot + 1);
+ } else {
+ /*
+ * We may be able to optimize name to stack slot. Look for an argument
+ * or variable property in the function, or its call object, not found
+ * in any prototype object. Rewrite pn_op and update pn accordingly.
+ * NB: We know that JSOP_DELNAME on an argument or variable evaluates
+ * to false, due to JSPROP_PERMANENT.
+ */
+ if (!js_LookupHiddenProperty(cx, obj, ATOM_TO_JSID(atom), &pobj, &prop))
+ return JS_FALSE;
+ sprop = (JSScopeProperty *) prop;
+ if (sprop) {
+ if (pobj == obj) {
+ getter = sprop->getter;
+ attrs = sprop->attrs;
+ slot = (sprop->flags & SPROP_HAS_SHORTID) ? sprop->shortid : -1;
+ }
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+ }
+
+ if (optimizeGlobals || getter) {
+ if (optimizeGlobals) {
+ switch (op) {
+ case JSOP_NAME: op = JSOP_GETGVAR; break;
+ case JSOP_SETNAME: op = JSOP_SETGVAR; break;
+ case JSOP_SETCONST: /* NB: no change */ break;
+ case JSOP_INCNAME: op = JSOP_INCGVAR; break;
+ case JSOP_NAMEINC: op = JSOP_GVARINC; break;
+ case JSOP_DECNAME: op = JSOP_DECGVAR; break;
+ case JSOP_NAMEDEC: op = JSOP_GVARDEC; break;
+ case JSOP_FORNAME: /* NB: no change */ break;
+ case JSOP_DELNAME: /* NB: no change */ break;
+ default: JS_ASSERT(0);
+ }
+ } else if (getter == js_GetLocalVariable ||
+ getter == js_GetCallVariable) {
+ switch (op) {
+ case JSOP_NAME: op = JSOP_GETVAR; break;
+ case JSOP_SETNAME: op = JSOP_SETVAR; break;
+ case JSOP_SETCONST: op = JSOP_SETVAR; break;
+ case JSOP_INCNAME: op = JSOP_INCVAR; break;
+ case JSOP_NAMEINC: op = JSOP_VARINC; break;
+ case JSOP_DECNAME: op = JSOP_DECVAR; break;
+ case JSOP_NAMEDEC: op = JSOP_VARDEC; break;
+ case JSOP_FORNAME: op = JSOP_FORVAR; break;
+ case JSOP_DELNAME: op = JSOP_FALSE; break;
+ default: JS_ASSERT(0);
+ }
+ } else if (getter == js_GetArgument ||
+ (getter == js_CallClass.getProperty &&
+ fp->fun && (uintN) slot < fp->fun->nargs)) {
+ switch (op) {
+ case JSOP_NAME: op = JSOP_GETARG; break;
+ case JSOP_SETNAME: op = JSOP_SETARG; break;
+ case JSOP_INCNAME: op = JSOP_INCARG; break;
+ case JSOP_NAMEINC: op = JSOP_ARGINC; break;
+ case JSOP_DECNAME: op = JSOP_DECARG; break;
+ case JSOP_NAMEDEC: op = JSOP_ARGDEC; break;
+ case JSOP_FORNAME: op = JSOP_FORARG; break;
+ case JSOP_DELNAME: op = JSOP_FALSE; break;
+ default: JS_ASSERT(0);
+ }
+ }
+ if (op != pn->pn_op) {
+ pn->pn_op = op;
+ pn->pn_slot = slot;
+ }
+ pn->pn_attrs = attrs;
+ }
+
+ if (pn->pn_slot < 0) {
+ /*
+ * We couldn't optimize pn, so it's not a global or local slot name.
+ * Now we must check for the predefined arguments variable. It may be
+ * overridden by assignment, in which case the function is heavyweight
+ * and the interpreter will look up 'arguments' in the function's call
+ * object.
+ */
+ if (pn->pn_op == JSOP_NAME &&
+ atom == cx->runtime->atomState.argumentsAtom) {
+ pn->pn_op = JSOP_ARGUMENTS;
+ return JS_TRUE;
+ }
+
+ tc->flags |= TCF_FUN_USES_NONLOCALS;
+ }
+ return JS_TRUE;
+}
+
+/*
+ * If pn contains a useful expression, return true with *answer set to true.
+ * If pn contains a useless expression, return true with *answer set to false.
+ * Return false on error.
+ *
+ * The caller should initialize *answer to false and invoke this function on
+ * an expression statement or similar subtree to decide whether the tree could
+ * produce code that has any side effects. For an expression statement, we
+ * define useless code as code with no side effects, because the main effect,
+ * the value left on the stack after the code executes, will be discarded by a
+ * pop bytecode.
+ */
+static JSBool
+CheckSideEffects(JSContext *cx, JSTreeContext *tc, JSParseNode *pn,
+ JSBool *answer)
+{
+ JSBool ok;
+ JSFunction *fun;
+ JSParseNode *pn2;
+
+ ok = JS_TRUE;
+ if (!pn || *answer)
+ return ok;
+
+ switch (pn->pn_arity) {
+ case PN_FUNC:
+ /*
+ * A named function is presumed useful: we can't yet know that it is
+ * not called. The side effects are the creation of a scope object
+ * to parent this function object, and the binding of the function's
+ * name in that scope object. See comments at case JSOP_NAMEDFUNOBJ:
+ * in jsinterp.c.
+ */
+ fun = (JSFunction *) JS_GetPrivate(cx, ATOM_TO_OBJECT(pn->pn_funAtom));
+ if (fun->atom)
+ *answer = JS_TRUE;
+ break;
+
+ case PN_LIST:
+ if (pn->pn_type == TOK_NEW ||
+ pn->pn_type == TOK_LP ||
+ pn->pn_type == TOK_LB ||
+ pn->pn_type == TOK_RB ||
+ pn->pn_type == TOK_RC) {
+ /*
+ * All invocation operations (construct: TOK_NEW, call: TOK_LP)
+ * are presumed to be useful, because they may have side effects
+ * even if their main effect (their return value) is discarded.
+ *
+ * TOK_LB binary trees of 3 or more nodes are flattened into lists
+ * to avoid too much recursion. All such lists must be presumed
+ * to be useful because each index operation could invoke a getter
+ * (the JSOP_ARGUMENTS special case below, in the PN_BINARY case,
+ * does not apply here: arguments[i][j] might invoke a getter).
+ *
+ * Array and object initializers (TOK_RB and TOK_RC lists) must be
+ * considered useful, because they are sugar for constructor calls
+ * (to Array and Object, respectively).
+ */
+ *answer = JS_TRUE;
+ } else {
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next)
+ ok &= CheckSideEffects(cx, tc, pn2, answer);
+ }
+ break;
+
+ case PN_TERNARY:
+ ok = CheckSideEffects(cx, tc, pn->pn_kid1, answer) &&
+ CheckSideEffects(cx, tc, pn->pn_kid2, answer) &&
+ CheckSideEffects(cx, tc, pn->pn_kid3, answer);
+ break;
+
+ case PN_BINARY:
+ if (pn->pn_type == TOK_ASSIGN) {
+ /*
+ * Assignment is presumed to be useful, even if the next operation
+ * is another assignment overwriting this one's ostensible effect,
+ * because the left operand may be a property with a setter that
+ * has side effects.
+ *
+ * The only exception is assignment of a useless value to a const
+ * declared in the function currently being compiled.
+ */
+ pn2 = pn->pn_left;
+ if (pn2->pn_type != TOK_NAME) {
+ *answer = JS_TRUE;
+ } else {
+ if (!BindNameToSlot(cx, tc, pn2, JS_FALSE))
+ return JS_FALSE;
+ if (!CheckSideEffects(cx, tc, pn->pn_right, answer))
+ return JS_FALSE;
+ if (!*answer &&
+ (pn2->pn_slot < 0 || !(pn2->pn_attrs & JSPROP_READONLY))) {
+ *answer = JS_TRUE;
+ }
+ }
+ } else {
+ if (pn->pn_type == TOK_LB) {
+ pn2 = pn->pn_left;
+ if (pn2->pn_type == TOK_NAME &&
+ !BindNameToSlot(cx, tc, pn2, JS_FALSE)) {
+ return JS_FALSE;
+ }
+ if (pn2->pn_op != JSOP_ARGUMENTS) {
+ /*
+ * Any indexed property reference could call a getter with
+ * side effects, except for arguments[i] where arguments is
+ * unambiguous.
+ */
+ *answer = JS_TRUE;
+ }
+ }
+ ok = CheckSideEffects(cx, tc, pn->pn_left, answer) &&
+ CheckSideEffects(cx, tc, pn->pn_right, answer);
+ }
+ break;
+
+ case PN_UNARY:
+ if (pn->pn_type == TOK_INC || pn->pn_type == TOK_DEC ||
+ pn->pn_type == TOK_THROW ||
+#if JS_HAS_GENERATORS
+ pn->pn_type == TOK_YIELD ||
+#endif
+ pn->pn_type == TOK_DEFSHARP) {
+ /* All these operations have effects that we must commit. */
+ *answer = JS_TRUE;
+ } else if (pn->pn_type == TOK_DELETE) {
+ pn2 = pn->pn_kid;
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ case TOK_DOT:
+#if JS_HAS_XML_SUPPORT
+ case TOK_DBLDOT:
+#endif
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+#endif
+ case TOK_LB:
+ /* All these delete addressing modes have effects too. */
+ *answer = JS_TRUE;
+ break;
+ default:
+ ok = CheckSideEffects(cx, tc, pn2, answer);
+ break;
+ }
+ } else {
+ ok = CheckSideEffects(cx, tc, pn->pn_kid, answer);
+ }
+ break;
+
+ case PN_NAME:
+ /*
+ * Take care to avoid trying to bind a label name (labels, both for
+ * statements and property values in object initialisers, have pn_op
+ * defaulted to JSOP_NOP).
+ */
+ if (pn->pn_type == TOK_NAME && pn->pn_op != JSOP_NOP) {
+ if (!BindNameToSlot(cx, tc, pn, JS_FALSE))
+ return JS_FALSE;
+ if (pn->pn_slot < 0 && pn->pn_op != JSOP_ARGUMENTS) {
+ /*
+ * Not an argument or local variable use, so this expression
+ * could invoke a getter that has side effects.
+ */
+ *answer = JS_TRUE;
+ }
+ }
+ pn2 = pn->pn_expr;
+ if (pn->pn_type == TOK_DOT) {
+ if (pn2->pn_type == TOK_NAME &&
+ !BindNameToSlot(cx, tc, pn2, JS_FALSE)) {
+ return JS_FALSE;
+ }
+ if (!(pn2->pn_op == JSOP_ARGUMENTS &&
+ pn->pn_atom == cx->runtime->atomState.lengthAtom)) {
+ /*
+ * Any dotted property reference could call a getter, except
+ * for arguments.length where arguments is unambiguous.
+ */
+ *answer = JS_TRUE;
+ }
+ }
+ ok = CheckSideEffects(cx, tc, pn2, answer);
+ break;
+
+ case PN_NULLARY:
+ if (pn->pn_type == TOK_DEBUGGER)
+ *answer = JS_TRUE;
+ break;
+ }
+ return ok;
+}
+
+/*
+ * Secret handshake with js_EmitTree's TOK_LP/TOK_NEW case logic, to flag all
+ * uses of JSOP_GETMETHOD that implicitly qualify the method property's name
+ * with a function:: prefix. All other JSOP_GETMETHOD and JSOP_SETMETHOD uses
+ * must be explicit, so we need a distinct source note (SRC_METHODBASE rather
+ * than SRC_PCBASE) for round-tripping through the beloved decompiler.
+ */
+#define JSPROP_IMPLICIT_FUNCTION_NAMESPACE 0x100
+
+static jssrcnote
+SrcNoteForPropOp(JSParseNode *pn, JSOp op)
+{
+ return ((op == JSOP_GETMETHOD &&
+ !(pn->pn_attrs & JSPROP_IMPLICIT_FUNCTION_NAMESPACE)) ||
+ op == JSOP_SETMETHOD)
+ ? SRC_METHODBASE
+ : SRC_PCBASE;
+}
+
+static JSBool
+EmitPropOp(JSContext *cx, JSParseNode *pn, JSOp op, JSCodeGenerator *cg)
+{
+ JSParseNode *pn2, *pndot, *pnup, *pndown;
+ ptrdiff_t top;
+
+ pn2 = pn->pn_expr;
+ if (op == JSOP_GETPROP &&
+ pn->pn_type == TOK_DOT &&
+ pn2->pn_type == TOK_NAME) {
+ /* Try to optimize arguments.length into JSOP_ARGCNT. */
+ if (!BindNameToSlot(cx, &cg->treeContext, pn2, JS_FALSE))
+ return JS_FALSE;
+ if (pn2->pn_op == JSOP_ARGUMENTS &&
+ pn->pn_atom == cx->runtime->atomState.lengthAtom) {
+ return js_Emit1(cx, cg, JSOP_ARGCNT) >= 0;
+ }
+ }
+
+ /*
+ * If the object operand is also a dotted property reference, reverse the
+ * list linked via pn_expr temporarily so we can iterate over it from the
+ * bottom up (reversing again as we go), to avoid excessive recursion.
+ */
+ if (pn2->pn_type == TOK_DOT) {
+ pndot = pn2;
+ pnup = NULL;
+ top = CG_OFFSET(cg);
+ for (;;) {
+ /* Reverse pndot->pn_expr to point up, not down. */
+ pndot->pn_offset = top;
+ pndown = pndot->pn_expr;
+ pndot->pn_expr = pnup;
+ if (pndown->pn_type != TOK_DOT)
+ break;
+ pnup = pndot;
+ pndot = pndown;
+ }
+
+ /* pndown is a primary expression, not a dotted property reference. */
+ if (!js_EmitTree(cx, cg, pndown))
+ return JS_FALSE;
+
+ do {
+ /* Walk back up the list, emitting annotated name ops. */
+ if (js_NewSrcNote2(cx, cg, SrcNoteForPropOp(pndot, pndot->pn_op),
+ CG_OFFSET(cg) - pndown->pn_offset) < 0) {
+ return JS_FALSE;
+ }
+ if (!EmitAtomOp(cx, pndot, pndot->pn_op, cg))
+ return JS_FALSE;
+
+ /* Reverse the pn_expr link again. */
+ pnup = pndot->pn_expr;
+ pndot->pn_expr = pndown;
+ pndown = pndot;
+ } while ((pndot = pnup) != NULL);
+ } else {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ }
+
+ if (js_NewSrcNote2(cx, cg, SrcNoteForPropOp(pn, op),
+ CG_OFFSET(cg) - pn2->pn_offset) < 0) {
+ return JS_FALSE;
+ }
+ if (!pn->pn_atom) {
+ JS_ASSERT(op == JSOP_IMPORTALL);
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ } else {
+ if (!EmitAtomOp(cx, pn, op, cg))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+EmitElemOp(JSContext *cx, JSParseNode *pn, JSOp op, JSCodeGenerator *cg)
+{
+ ptrdiff_t top;
+ JSParseNode *left, *right, *next, ltmp, rtmp;
+ jsint slot;
+
+ top = CG_OFFSET(cg);
+ if (pn->pn_arity == PN_LIST) {
+ /* Left-associative operator chain to avoid too much recursion. */
+ JS_ASSERT(pn->pn_op == JSOP_GETELEM || pn->pn_op == JSOP_IMPORTELEM);
+ JS_ASSERT(pn->pn_count >= 3);
+ left = pn->pn_head;
+ right = PN_LAST(pn);
+ next = left->pn_next;
+ JS_ASSERT(next != right);
+
+ /*
+ * Try to optimize arguments[0][j]... into JSOP_ARGSUB<0> followed by
+ * one or more index expression and JSOP_GETELEM op pairs.
+ */
+ if (left->pn_type == TOK_NAME && next->pn_type == TOK_NUMBER) {
+ if (!BindNameToSlot(cx, &cg->treeContext, left, JS_FALSE))
+ return JS_FALSE;
+ if (left->pn_op == JSOP_ARGUMENTS &&
+ JSDOUBLE_IS_INT(next->pn_dval, slot) &&
+ (jsuint)slot < JS_BIT(16)) {
+ left->pn_offset = next->pn_offset = top;
+ EMIT_UINT16_IMM_OP(JSOP_ARGSUB, (jsatomid)slot);
+ left = next;
+ next = left->pn_next;
+ }
+ }
+
+ /*
+ * Check whether we generated JSOP_ARGSUB, just above, and have only
+ * one more index expression to emit. Given arguments[0][j], we must
+ * skip the while loop altogether, falling through to emit code for j
+ * (in the subtree referenced by right), followed by the annotated op,
+ * at the bottom of this function.
+ */
+ JS_ASSERT(next != right || pn->pn_count == 3);
+ if (left == pn->pn_head) {
+ if (!js_EmitTree(cx, cg, left))
+ return JS_FALSE;
+ }
+ while (next != right) {
+ if (!js_EmitTree(cx, cg, next))
+ return JS_FALSE;
+ if (js_NewSrcNote2(cx, cg, SRC_PCBASE, CG_OFFSET(cg) - top) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_GETELEM) < 0)
+ return JS_FALSE;
+ next = next->pn_next;
+ }
+ } else {
+ if (pn->pn_arity == PN_NAME) {
+ /*
+ * Set left and right so pn appears to be a TOK_LB node, instead
+ * of a TOK_DOT node. See the TOK_FOR/IN case in js_EmitTree, and
+ * EmitDestructuringOps nearer below. In the destructuring case,
+ * the base expression (pn_expr) of the name may be null, which
+ * means we have to emit a JSOP_BINDNAME.
+ */
+ left = pn->pn_expr;
+ if (!left) {
+ left = &ltmp;
+ left->pn_type = TOK_OBJECT;
+ left->pn_op = JSOP_BINDNAME;
+ left->pn_arity = PN_NULLARY;
+ left->pn_pos = pn->pn_pos;
+ left->pn_atom = pn->pn_atom;
+ }
+ right = &rtmp;
+ right->pn_type = TOK_STRING;
+ JS_ASSERT(ATOM_IS_STRING(pn->pn_atom));
+ right->pn_op = js_IsIdentifier(ATOM_TO_STRING(pn->pn_atom))
+ ? JSOP_QNAMEPART
+ : JSOP_STRING;
+ right->pn_arity = PN_NULLARY;
+ right->pn_pos = pn->pn_pos;
+ right->pn_atom = pn->pn_atom;
+ } else {
+ JS_ASSERT(pn->pn_arity == PN_BINARY);
+ left = pn->pn_left;
+ right = pn->pn_right;
+ }
+
+ /* Try to optimize arguments[0] (e.g.) into JSOP_ARGSUB<0>. */
+ if (op == JSOP_GETELEM &&
+ left->pn_type == TOK_NAME &&
+ right->pn_type == TOK_NUMBER) {
+ if (!BindNameToSlot(cx, &cg->treeContext, left, JS_FALSE))
+ return JS_FALSE;
+ if (left->pn_op == JSOP_ARGUMENTS &&
+ JSDOUBLE_IS_INT(right->pn_dval, slot) &&
+ (jsuint)slot < JS_BIT(16)) {
+ left->pn_offset = right->pn_offset = top;
+ EMIT_UINT16_IMM_OP(JSOP_ARGSUB, (jsatomid)slot);
+ return JS_TRUE;
+ }
+ }
+
+ if (!js_EmitTree(cx, cg, left))
+ return JS_FALSE;
+ }
+
+ /* The right side of the descendant operator is implicitly quoted. */
+ JS_ASSERT(op != JSOP_DESCENDANTS || right->pn_type != TOK_STRING ||
+ right->pn_op == JSOP_QNAMEPART);
+ if (!js_EmitTree(cx, cg, right))
+ return JS_FALSE;
+ if (js_NewSrcNote2(cx, cg, SRC_PCBASE, CG_OFFSET(cg) - top) < 0)
+ return JS_FALSE;
+ return js_Emit1(cx, cg, op) >= 0;
+}
+
+static JSBool
+EmitNumberOp(JSContext *cx, jsdouble dval, JSCodeGenerator *cg)
+{
+ jsint ival;
+ jsatomid atomIndex;
+ ptrdiff_t off;
+ jsbytecode *pc;
+ JSAtom *atom;
+ JSAtomListElement *ale;
+
+ if (JSDOUBLE_IS_INT(dval, ival) && INT_FITS_IN_JSVAL(ival)) {
+ if (ival == 0)
+ return js_Emit1(cx, cg, JSOP_ZERO) >= 0;
+ if (ival == 1)
+ return js_Emit1(cx, cg, JSOP_ONE) >= 0;
+
+ atomIndex = (jsatomid)ival;
+ if (atomIndex < JS_BIT(16)) {
+ EMIT_UINT16_IMM_OP(JSOP_UINT16, atomIndex);
+ return JS_TRUE;
+ }
+
+ if (atomIndex < JS_BIT(24)) {
+ off = js_EmitN(cx, cg, JSOP_UINT24, 3);
+ if (off < 0)
+ return JS_FALSE;
+ pc = CG_CODE(cg, off);
+ SET_LITERAL_INDEX(pc, atomIndex);
+ return JS_TRUE;
+ }
+
+ atom = js_AtomizeInt(cx, ival, 0);
+ } else {
+ atom = js_AtomizeDouble(cx, dval, 0);
+ }
+ if (!atom)
+ return JS_FALSE;
+
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ return EmitAtomIndexOp(cx, JSOP_NUMBER, ALE_INDEX(ale), cg);
+}
+
+static JSBool
+EmitSwitch(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn,
+ JSStmtInfo *stmtInfo)
+{
+ JSOp switchOp;
+ JSBool ok, hasDefault, constPropagated;
+ ptrdiff_t top, off, defaultOffset;
+ JSParseNode *pn2, *pn3, *pn4;
+ uint32 caseCount, tableLength;
+ JSParseNode **table;
+ jsdouble d;
+ jsint i, low, high;
+ jsval v;
+ JSAtom *atom;
+ JSAtomListElement *ale;
+ intN noteIndex;
+ size_t switchSize, tableSize;
+ jsbytecode *pc, *savepc;
+#if JS_HAS_BLOCK_SCOPE
+ JSObject *obj;
+ jsint count;
+#endif
+
+ /* Try for most optimal, fall back if not dense ints, and per ECMAv2. */
+ switchOp = JSOP_TABLESWITCH;
+ ok = JS_TRUE;
+ hasDefault = constPropagated = JS_FALSE;
+ defaultOffset = -1;
+
+ /*
+ * If the switch contains let variables scoped by its body, model the
+ * resulting block on the stack first, before emitting the discriminant's
+ * bytecode (in case the discriminant contains a stack-model dependency
+ * such as a let expression).
+ */
+ pn2 = pn->pn_right;
+#if JS_HAS_BLOCK_SCOPE
+ if (pn2->pn_type == TOK_LEXICALSCOPE) {
+ atom = pn2->pn_atom;
+ obj = ATOM_TO_OBJECT(atom);
+ OBJ_SET_BLOCK_DEPTH(cx, obj, cg->stackDepth);
+
+ /*
+ * Push the body's block scope before discriminant code-gen for proper
+ * static block scope linkage in case the discriminant contains a let
+ * expression. The block's locals must lie under the discriminant on
+ * the stack so that case-dispatch bytecodes can find the discriminant
+ * on top of stack.
+ */
+ js_PushBlockScope(&cg->treeContext, stmtInfo, atom, -1);
+ stmtInfo->type = STMT_SWITCH;
+
+ count = OBJ_BLOCK_COUNT(cx, obj);
+ cg->stackDepth += count;
+ if ((uintN)cg->stackDepth > cg->maxStackDepth)
+ cg->maxStackDepth = cg->stackDepth;
+
+ /* Emit JSOP_ENTERBLOCK before code to evaluate the discriminant. */
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(JSOP_ENTERBLOCK, ALE_INDEX(ale));
+
+ /*
+ * Pop the switch's statement info around discriminant code-gen. Note
+ * how this leaves cg->treeContext.blockChain referencing the switch's
+ * block scope object, which is necessary for correct block parenting
+ * in the case where the discriminant contains a let expression.
+ */
+ cg->treeContext.topStmt = stmtInfo->down;
+ cg->treeContext.topScopeStmt = stmtInfo->downScope;
+ }
+#ifdef __GNUC__
+ else {
+ atom = NULL;
+ count = -1;
+ }
+#endif
+#endif
+
+ /*
+ * Emit code for the discriminant first (or nearly first, in the case of a
+ * switch whose body is a block scope).
+ */
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+
+ /* Switch bytecodes run from here till end of final case. */
+ top = CG_OFFSET(cg);
+#if !JS_HAS_BLOCK_SCOPE
+ js_PushStatement(&cg->treeContext, stmtInfo, STMT_SWITCH, top);
+#else
+ if (pn2->pn_type == TOK_LC) {
+ js_PushStatement(&cg->treeContext, stmtInfo, STMT_SWITCH, top);
+ } else {
+ /* Re-push the switch's statement info record. */
+ cg->treeContext.topStmt = cg->treeContext.topScopeStmt = stmtInfo;
+
+ /* Set the statement info record's idea of top. */
+ stmtInfo->update = top;
+
+ /* Advance pn2 to refer to the switch case list. */
+ pn2 = pn2->pn_expr;
+ }
+#endif
+
+ caseCount = pn2->pn_count;
+ tableLength = 0;
+ table = NULL;
+
+ if (caseCount == 0 ||
+ (caseCount == 1 &&
+ (hasDefault = (pn2->pn_head->pn_type == TOK_DEFAULT)))) {
+ caseCount = 0;
+ low = 0;
+ high = -1;
+ } else {
+#define INTMAP_LENGTH 256
+ jsbitmap intmap_space[INTMAP_LENGTH];
+ jsbitmap *intmap = NULL;
+ int32 intmap_bitlen = 0;
+
+ low = JSVAL_INT_MAX;
+ high = JSVAL_INT_MIN;
+
+ for (pn3 = pn2->pn_head; pn3; pn3 = pn3->pn_next) {
+ if (pn3->pn_type == TOK_DEFAULT) {
+ hasDefault = JS_TRUE;
+ caseCount--; /* one of the "cases" was the default */
+ continue;
+ }
+
+ JS_ASSERT(pn3->pn_type == TOK_CASE);
+ if (switchOp == JSOP_CONDSWITCH)
+ continue;
+
+ pn4 = pn3->pn_left;
+ switch (pn4->pn_type) {
+ case TOK_NUMBER:
+ d = pn4->pn_dval;
+ if (JSDOUBLE_IS_INT(d, i) && INT_FITS_IN_JSVAL(i)) {
+ pn3->pn_val = INT_TO_JSVAL(i);
+ } else {
+ atom = js_AtomizeDouble(cx, d, 0);
+ if (!atom) {
+ ok = JS_FALSE;
+ goto release;
+ }
+ pn3->pn_val = ATOM_KEY(atom);
+ }
+ break;
+ case TOK_STRING:
+ pn3->pn_val = ATOM_KEY(pn4->pn_atom);
+ break;
+ case TOK_NAME:
+ if (!pn4->pn_expr) {
+ ok = js_LookupCompileTimeConstant(cx, cg, pn4->pn_atom, &v);
+ if (!ok)
+ goto release;
+ if (!JSVAL_IS_VOID(v)) {
+ pn3->pn_val = v;
+ constPropagated = JS_TRUE;
+ break;
+ }
+ }
+ /* FALL THROUGH */
+ case TOK_PRIMARY:
+ if (pn4->pn_op == JSOP_TRUE) {
+ pn3->pn_val = JSVAL_TRUE;
+ break;
+ }
+ if (pn4->pn_op == JSOP_FALSE) {
+ pn3->pn_val = JSVAL_FALSE;
+ break;
+ }
+ /* FALL THROUGH */
+ default:
+ switchOp = JSOP_CONDSWITCH;
+ continue;
+ }
+
+ JS_ASSERT(JSVAL_IS_NUMBER(pn3->pn_val) ||
+ JSVAL_IS_STRING(pn3->pn_val) ||
+ JSVAL_IS_BOOLEAN(pn3->pn_val));
+
+ if (switchOp != JSOP_TABLESWITCH)
+ continue;
+ if (!JSVAL_IS_INT(pn3->pn_val)) {
+ switchOp = JSOP_LOOKUPSWITCH;
+ continue;
+ }
+ i = JSVAL_TO_INT(pn3->pn_val);
+ if ((jsuint)(i + (jsint)JS_BIT(15)) >= (jsuint)JS_BIT(16)) {
+ switchOp = JSOP_LOOKUPSWITCH;
+ continue;
+ }
+ if (i < low)
+ low = i;
+ if (high < i)
+ high = i;
+
+ /*
+ * Check for duplicates, which require a JSOP_LOOKUPSWITCH.
+ * We bias i by 65536 if it's negative, and hope that's a rare
+ * case (because it requires a malloc'd bitmap).
+ */
+ if (i < 0)
+ i += JS_BIT(16);
+ if (i >= intmap_bitlen) {
+ if (!intmap &&
+ i < (INTMAP_LENGTH << JS_BITS_PER_WORD_LOG2)) {
+ intmap = intmap_space;
+ intmap_bitlen = INTMAP_LENGTH << JS_BITS_PER_WORD_LOG2;
+ } else {
+ /* Just grab 8K for the worst-case bitmap. */
+ intmap_bitlen = JS_BIT(16);
+ intmap = (jsbitmap *)
+ JS_malloc(cx,
+ (JS_BIT(16) >> JS_BITS_PER_WORD_LOG2)
+ * sizeof(jsbitmap));
+ if (!intmap) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ }
+ memset(intmap, 0, intmap_bitlen >> JS_BITS_PER_BYTE_LOG2);
+ }
+ if (JS_TEST_BIT(intmap, i)) {
+ switchOp = JSOP_LOOKUPSWITCH;
+ continue;
+ }
+ JS_SET_BIT(intmap, i);
+ }
+
+ release:
+ if (intmap && intmap != intmap_space)
+ JS_free(cx, intmap);
+ if (!ok)
+ return JS_FALSE;
+
+ /*
+ * Compute table length and select lookup instead if overlarge or
+ * more than half-sparse.
+ */
+ if (switchOp == JSOP_TABLESWITCH) {
+ tableLength = (uint32)(high - low + 1);
+ if (tableLength >= JS_BIT(16) || tableLength > 2 * caseCount)
+ switchOp = JSOP_LOOKUPSWITCH;
+ } else if (switchOp == JSOP_LOOKUPSWITCH) {
+ /*
+ * Lookup switch supports only atom indexes below 64K limit.
+ * Conservatively estimate the maximum possible index during
+ * switch generation and use conditional switch if it exceeds
+ * the limit.
+ */
+ if (caseCount + cg->atomList.count > JS_BIT(16))
+ switchOp = JSOP_CONDSWITCH;
+ }
+ }
+
+ /*
+ * Emit a note with two offsets: first tells total switch code length,
+ * second tells offset to first JSOP_CASE if condswitch.
+ */
+ noteIndex = js_NewSrcNote3(cx, cg, SRC_SWITCH, 0, 0);
+ if (noteIndex < 0)
+ return JS_FALSE;
+
+ if (switchOp == JSOP_CONDSWITCH) {
+ /*
+ * 0 bytes of immediate for unoptimized ECMAv2 switch.
+ */
+ switchSize = 0;
+ } else if (switchOp == JSOP_TABLESWITCH) {
+ /*
+ * 3 offsets (len, low, high) before the table, 1 per entry.
+ */
+ switchSize = (size_t)(JUMP_OFFSET_LEN * (3 + tableLength));
+ } else {
+ /*
+ * JSOP_LOOKUPSWITCH:
+ * 1 offset (len) and 1 atom index (npairs) before the table,
+ * 1 atom index and 1 jump offset per entry.
+ */
+ switchSize = (size_t)(JUMP_OFFSET_LEN + ATOM_INDEX_LEN +
+ (ATOM_INDEX_LEN + JUMP_OFFSET_LEN) * caseCount);
+ }
+
+ /*
+ * Emit switchOp followed by switchSize bytes of jump or lookup table.
+ *
+ * If switchOp is JSOP_LOOKUPSWITCH or JSOP_TABLESWITCH, it is crucial
+ * to emit the immediate operand(s) by which bytecode readers such as
+ * BuildSpanDepTable discover the length of the switch opcode *before*
+ * calling js_SetJumpOffset (which may call BuildSpanDepTable). It's
+ * also important to zero all unknown jump offset immediate operands,
+ * so they can be converted to span dependencies with null targets to
+ * be computed later (js_EmitN zeros switchSize bytes after switchOp).
+ */
+ if (js_EmitN(cx, cg, switchOp, switchSize) < 0)
+ return JS_FALSE;
+
+ off = -1;
+ if (switchOp == JSOP_CONDSWITCH) {
+ intN caseNoteIndex = -1;
+ JSBool beforeCases = JS_TRUE;
+
+ /* Emit code for evaluating cases and jumping to case statements. */
+ for (pn3 = pn2->pn_head; pn3; pn3 = pn3->pn_next) {
+ pn4 = pn3->pn_left;
+ if (pn4 && !js_EmitTree(cx, cg, pn4))
+ return JS_FALSE;
+ if (caseNoteIndex >= 0) {
+ /* off is the previous JSOP_CASE's bytecode offset. */
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)caseNoteIndex, 0,
+ CG_OFFSET(cg) - off)) {
+ return JS_FALSE;
+ }
+ }
+ if (!pn4) {
+ JS_ASSERT(pn3->pn_type == TOK_DEFAULT);
+ continue;
+ }
+ caseNoteIndex = js_NewSrcNote2(cx, cg, SRC_PCDELTA, 0);
+ if (caseNoteIndex < 0)
+ return JS_FALSE;
+ off = EmitJump(cx, cg, JSOP_CASE, 0);
+ if (off < 0)
+ return JS_FALSE;
+ pn3->pn_offset = off;
+ if (beforeCases) {
+ uintN noteCount, noteCountDelta;
+
+ /* Switch note's second offset is to first JSOP_CASE. */
+ noteCount = CG_NOTE_COUNT(cg);
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 1,
+ off - top)) {
+ return JS_FALSE;
+ }
+ noteCountDelta = CG_NOTE_COUNT(cg) - noteCount;
+ if (noteCountDelta != 0)
+ caseNoteIndex += noteCountDelta;
+ beforeCases = JS_FALSE;
+ }
+ }
+
+ /*
+ * If we didn't have an explicit default (which could fall in between
+ * cases, preventing us from fusing this js_SetSrcNoteOffset with the
+ * call in the loop above), link the last case to the implicit default
+ * for the decompiler.
+ */
+ if (!hasDefault &&
+ caseNoteIndex >= 0 &&
+ !js_SetSrcNoteOffset(cx, cg, (uintN)caseNoteIndex, 0,
+ CG_OFFSET(cg) - off)) {
+ return JS_FALSE;
+ }
+
+ /* Emit default even if no explicit default statement. */
+ defaultOffset = EmitJump(cx, cg, JSOP_DEFAULT, 0);
+ if (defaultOffset < 0)
+ return JS_FALSE;
+ } else {
+ pc = CG_CODE(cg, top + JUMP_OFFSET_LEN);
+
+ if (switchOp == JSOP_TABLESWITCH) {
+ /* Fill in switch bounds, which we know fit in 16-bit offsets. */
+ SET_JUMP_OFFSET(pc, low);
+ pc += JUMP_OFFSET_LEN;
+ SET_JUMP_OFFSET(pc, high);
+ pc += JUMP_OFFSET_LEN;
+
+ /*
+ * Use malloc to avoid arena bloat for programs with many switches.
+ * We free table if non-null at label out, so all control flow must
+ * exit this function through goto out or goto bad.
+ */
+ if (tableLength != 0) {
+ tableSize = (size_t)tableLength * sizeof *table;
+ table = (JSParseNode **) JS_malloc(cx, tableSize);
+ if (!table)
+ return JS_FALSE;
+ memset(table, 0, tableSize);
+ for (pn3 = pn2->pn_head; pn3; pn3 = pn3->pn_next) {
+ if (pn3->pn_type == TOK_DEFAULT)
+ continue;
+ i = JSVAL_TO_INT(pn3->pn_val);
+ i -= low;
+ JS_ASSERT((uint32)i < tableLength);
+ table[i] = pn3;
+ }
+ }
+ } else {
+ JS_ASSERT(switchOp == JSOP_LOOKUPSWITCH);
+
+ /* Fill in the number of cases. */
+ SET_ATOM_INDEX(pc, caseCount);
+ pc += ATOM_INDEX_LEN;
+ }
+
+ /*
+ * After this point, all control flow involving JSOP_TABLESWITCH
+ * must set ok and goto out to exit this function. To keep things
+ * simple, all switchOp cases exit that way.
+ */
+ if (constPropagated) {
+ /*
+ * Skip switchOp, as we are not setting jump offsets in the two
+ * for loops below. We'll restore CG_NEXT(cg) from savepc after,
+ * unless there was an error.
+ */
+ savepc = CG_NEXT(cg);
+ CG_NEXT(cg) = pc + 1;
+ if (switchOp == JSOP_TABLESWITCH) {
+ for (i = 0; i < (jsint)tableLength; i++) {
+ pn3 = table[i];
+ if (pn3 &&
+ (pn4 = pn3->pn_left) != NULL &&
+ pn4->pn_type == TOK_NAME) {
+ /* Note a propagated constant with the const's name. */
+ JS_ASSERT(!pn4->pn_expr);
+ ale = js_IndexAtom(cx, pn4->pn_atom, &cg->atomList);
+ if (!ale)
+ goto bad;
+ CG_NEXT(cg) = pc;
+ if (js_NewSrcNote2(cx, cg, SRC_LABEL, (ptrdiff_t)
+ ALE_INDEX(ale)) < 0) {
+ goto bad;
+ }
+ }
+ pc += JUMP_OFFSET_LEN;
+ }
+ } else {
+ for (pn3 = pn2->pn_head; pn3; pn3 = pn3->pn_next) {
+ pn4 = pn3->pn_left;
+ if (pn4 && pn4->pn_type == TOK_NAME) {
+ /* Note a propagated constant with the const's name. */
+ JS_ASSERT(!pn4->pn_expr);
+ ale = js_IndexAtom(cx, pn4->pn_atom, &cg->atomList);
+ if (!ale)
+ goto bad;
+ CG_NEXT(cg) = pc;
+ if (js_NewSrcNote2(cx, cg, SRC_LABEL, (ptrdiff_t)
+ ALE_INDEX(ale)) < 0) {
+ goto bad;
+ }
+ }
+ pc += ATOM_INDEX_LEN + JUMP_OFFSET_LEN;
+ }
+ }
+ CG_NEXT(cg) = savepc;
+ }
+ }
+
+ /* Emit code for each case's statements, copying pn_offset up to pn3. */
+ for (pn3 = pn2->pn_head; pn3; pn3 = pn3->pn_next) {
+ if (switchOp == JSOP_CONDSWITCH && pn3->pn_type != TOK_DEFAULT)
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, pn3->pn_offset);
+ pn4 = pn3->pn_right;
+ ok = js_EmitTree(cx, cg, pn4);
+ if (!ok)
+ goto out;
+ pn3->pn_offset = pn4->pn_offset;
+ if (pn3->pn_type == TOK_DEFAULT)
+ off = pn3->pn_offset - top;
+ }
+
+ if (!hasDefault) {
+ /* If no default case, offset for default is to end of switch. */
+ off = CG_OFFSET(cg) - top;
+ }
+
+ /* We better have set "off" by now. */
+ JS_ASSERT(off != -1);
+
+ /* Set the default offset (to end of switch if no default). */
+ if (switchOp == JSOP_CONDSWITCH) {
+ pc = NULL;
+ JS_ASSERT(defaultOffset != -1);
+ ok = js_SetJumpOffset(cx, cg, CG_CODE(cg, defaultOffset),
+ off - (defaultOffset - top));
+ if (!ok)
+ goto out;
+ } else {
+ pc = CG_CODE(cg, top);
+ ok = js_SetJumpOffset(cx, cg, pc, off);
+ if (!ok)
+ goto out;
+ pc += JUMP_OFFSET_LEN;
+ }
+
+ /* Set the SRC_SWITCH note's offset operand to tell end of switch. */
+ off = CG_OFFSET(cg) - top;
+ ok = js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0, off);
+ if (!ok)
+ goto out;
+
+ if (switchOp == JSOP_TABLESWITCH) {
+ /* Skip over the already-initialized switch bounds. */
+ pc += 2 * JUMP_OFFSET_LEN;
+
+ /* Fill in the jump table, if there is one. */
+ for (i = 0; i < (jsint)tableLength; i++) {
+ pn3 = table[i];
+ off = pn3 ? pn3->pn_offset - top : 0;
+ ok = js_SetJumpOffset(cx, cg, pc, off);
+ if (!ok)
+ goto out;
+ pc += JUMP_OFFSET_LEN;
+ }
+ } else if (switchOp == JSOP_LOOKUPSWITCH) {
+ /* Skip over the already-initialized number of cases. */
+ pc += ATOM_INDEX_LEN;
+
+ for (pn3 = pn2->pn_head; pn3; pn3 = pn3->pn_next) {
+ if (pn3->pn_type == TOK_DEFAULT)
+ continue;
+ atom = js_AtomizeValue(cx, pn3->pn_val, 0);
+ if (!atom)
+ goto bad;
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ goto bad;
+ SET_ATOM_INDEX(pc, ALE_INDEX(ale));
+ pc += ATOM_INDEX_LEN;
+
+ off = pn3->pn_offset - top;
+ ok = js_SetJumpOffset(cx, cg, pc, off);
+ if (!ok)
+ goto out;
+ pc += JUMP_OFFSET_LEN;
+ }
+ }
+
+out:
+ if (table)
+ JS_free(cx, table);
+ if (ok) {
+ ok = js_PopStatementCG(cx, cg);
+
+#if JS_HAS_BLOCK_SCOPE
+ if (ok && pn->pn_right->pn_type == TOK_LEXICALSCOPE) {
+ EMIT_UINT16_IMM_OP(JSOP_LEAVEBLOCK, count);
+ cg->stackDepth -= count;
+ }
+#endif
+ }
+ return ok;
+
+bad:
+ ok = JS_FALSE;
+ goto out;
+}
+
+JSBool
+js_EmitFunctionBytecode(JSContext *cx, JSCodeGenerator *cg, JSParseNode *body)
+{
+ if (!js_AllocTryNotes(cx, cg))
+ return JS_FALSE;
+
+ if (cg->treeContext.flags & TCF_FUN_IS_GENERATOR) {
+ /* JSOP_GENERATOR must be the first instruction. */
+ CG_SWITCH_TO_PROLOG(cg);
+ JS_ASSERT(CG_NEXT(cg) == CG_BASE(cg));
+ if (js_Emit1(cx, cg, JSOP_GENERATOR) < 0)
+ return JS_FALSE;
+ CG_SWITCH_TO_MAIN(cg);
+ }
+
+ return js_EmitTree(cx, cg, body) &&
+ js_Emit1(cx, cg, JSOP_STOP) >= 0;
+}
+
+JSBool
+js_EmitFunctionBody(JSContext *cx, JSCodeGenerator *cg, JSParseNode *body,
+ JSFunction *fun)
+{
+ JSStackFrame *fp, frame;
+ JSObject *funobj;
+ JSBool ok;
+
+ fp = cx->fp;
+ funobj = fun->object;
+ JS_ASSERT(!fp || (fp->fun != fun && fp->varobj != funobj &&
+ fp->scopeChain != funobj));
+ memset(&frame, 0, sizeof frame);
+ frame.fun = fun;
+ frame.varobj = frame.scopeChain = funobj;
+ frame.down = fp;
+ frame.flags = JS_HAS_COMPILE_N_GO_OPTION(cx)
+ ? JSFRAME_COMPILING | JSFRAME_COMPILE_N_GO
+ : JSFRAME_COMPILING;
+ cx->fp = &frame;
+ ok = js_EmitFunctionBytecode(cx, cg, body);
+ cx->fp = fp;
+ if (!ok)
+ return JS_FALSE;
+
+ if (!js_NewScriptFromCG(cx, cg, fun))
+ return JS_FALSE;
+
+ JS_ASSERT(FUN_INTERPRETED(fun));
+ return JS_TRUE;
+}
+
+/* A macro for inlining at the top of js_EmitTree (whence it came). */
+#define UPDATE_LINE_NUMBER_NOTES(cx, cg, pn) \
+ JS_BEGIN_MACRO \
+ uintN line_ = (pn)->pn_pos.begin.lineno; \
+ uintN delta_ = line_ - CG_CURRENT_LINE(cg); \
+ if (delta_ != 0) { \
+ /* \
+ * Encode any change in the current source line number by using \
+ * either several SRC_NEWLINE notes or just one SRC_SETLINE note, \
+ * whichever consumes less space. \
+ * \
+ * NB: We handle backward line number deltas (possible with for \
+ * loops where the update part is emitted after the body, but its \
+ * line number is <= any line number in the body) here by letting \
+ * unsigned delta_ wrap to a very large number, which triggers a \
+ * SRC_SETLINE. \
+ */ \
+ CG_CURRENT_LINE(cg) = line_; \
+ if (delta_ >= (uintN)(2 + ((line_ > SN_3BYTE_OFFSET_MASK)<<1))) { \
+ if (js_NewSrcNote2(cx, cg, SRC_SETLINE, (ptrdiff_t)line_) < 0)\
+ return JS_FALSE; \
+ } else { \
+ do { \
+ if (js_NewSrcNote(cx, cg, SRC_NEWLINE) < 0) \
+ return JS_FALSE; \
+ } while (--delta_ != 0); \
+ } \
+ } \
+ JS_END_MACRO
+
+/* A function, so that we avoid macro-bloating all the other callsites. */
+static JSBool
+UpdateLineNumberNotes(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
+{
+ UPDATE_LINE_NUMBER_NOTES(cx, cg, pn);
+ return JS_TRUE;
+}
+
+static JSBool
+MaybeEmitVarDecl(JSContext *cx, JSCodeGenerator *cg, JSOp prologOp,
+ JSParseNode *pn, jsatomid *result)
+{
+ jsatomid atomIndex;
+ JSAtomListElement *ale;
+
+ if (pn->pn_slot >= 0) {
+ atomIndex = (jsatomid) pn->pn_slot;
+ } else {
+ ale = js_IndexAtom(cx, pn->pn_atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ atomIndex = ALE_INDEX(ale);
+ }
+
+ if ((js_CodeSpec[pn->pn_op].format & JOF_TYPEMASK) == JOF_CONST &&
+ (!(cg->treeContext.flags & TCF_IN_FUNCTION) ||
+ (cg->treeContext.flags & TCF_FUN_HEAVYWEIGHT))) {
+ /* Emit a prolog bytecode to predefine the variable. */
+ CG_SWITCH_TO_PROLOG(cg);
+ if (!UpdateLineNumberNotes(cx, cg, pn))
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(prologOp, atomIndex);
+ CG_SWITCH_TO_MAIN(cg);
+ }
+
+ if (result)
+ *result = atomIndex;
+ return JS_TRUE;
+}
+
+#if JS_HAS_DESTRUCTURING
+
+typedef JSBool
+(*DestructuringDeclEmitter)(JSContext *cx, JSCodeGenerator *cg, JSOp prologOp,
+ JSParseNode *pn);
+
+static JSBool
+EmitDestructuringDecl(JSContext *cx, JSCodeGenerator *cg, JSOp prologOp,
+ JSParseNode *pn)
+{
+ JS_ASSERT(pn->pn_type == TOK_NAME);
+ if (!BindNameToSlot(cx, &cg->treeContext, pn, prologOp == JSOP_NOP))
+ return JS_FALSE;
+
+ JS_ASSERT(pn->pn_op != JSOP_ARGUMENTS);
+ return MaybeEmitVarDecl(cx, cg, prologOp, pn, NULL);
+}
+
+static JSBool
+EmitDestructuringDecls(JSContext *cx, JSCodeGenerator *cg, JSOp prologOp,
+ JSParseNode *pn)
+{
+ JSParseNode *pn2, *pn3;
+ DestructuringDeclEmitter emitter;
+
+ if (pn->pn_type == TOK_RB) {
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ if (pn2->pn_type == TOK_COMMA)
+ continue;
+ emitter = (pn2->pn_type == TOK_NAME)
+ ? EmitDestructuringDecl
+ : EmitDestructuringDecls;
+ if (!emitter(cx, cg, prologOp, pn2))
+ return JS_FALSE;
+ }
+ } else {
+ JS_ASSERT(pn->pn_type == TOK_RC);
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ pn3 = pn2->pn_right;
+ emitter = (pn3->pn_type == TOK_NAME)
+ ? EmitDestructuringDecl
+ : EmitDestructuringDecls;
+ if (!emitter(cx, cg, prologOp, pn3))
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+EmitDestructuringOpsHelper(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn);
+
+static JSBool
+EmitDestructuringLHS(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn,
+ JSBool wantpop)
+{
+ jsuint slot;
+
+ /* Skip any parenthesization. */
+ while (pn->pn_type == TOK_RP)
+ pn = pn->pn_kid;
+
+ /*
+ * Now emit the lvalue opcode sequence. If the lvalue is a nested
+ * destructuring initialiser-form, call ourselves to handle it, then
+ * pop the matched value. Otherwise emit an lvalue bytecode sequence
+ * ending with a JSOP_ENUMELEM or equivalent op.
+ */
+ if (pn->pn_type == TOK_RB || pn->pn_type == TOK_RC) {
+ if (!EmitDestructuringOpsHelper(cx, cg, pn))
+ return JS_FALSE;
+ if (wantpop && js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ } else {
+ if (pn->pn_type == TOK_NAME &&
+ !BindNameToSlot(cx, &cg->treeContext, pn, JS_FALSE)) {
+ return JS_FALSE;
+ }
+
+ switch (pn->pn_op) {
+ case JSOP_SETNAME:
+ /*
+ * NB: pn is a PN_NAME node, not a PN_BINARY. Nevertheless,
+ * we want to emit JSOP_ENUMELEM, which has format JOF_ELEM.
+ * So here and for JSOP_ENUMCONSTELEM, we use EmitElemOp.
+ */
+ if (!EmitElemOp(cx, pn, JSOP_ENUMELEM, cg))
+ return JS_FALSE;
+ break;
+
+ case JSOP_SETCONST:
+ if (!EmitElemOp(cx, pn, JSOP_ENUMCONSTELEM, cg))
+ return JS_FALSE;
+ break;
+
+ case JSOP_SETLOCAL:
+ if (wantpop) {
+ slot = (jsuint) pn->pn_slot;
+ EMIT_UINT16_IMM_OP(JSOP_SETLOCALPOP, slot);
+ break;
+ }
+ /* FALL THROUGH */
+
+ case JSOP_SETARG:
+ case JSOP_SETVAR:
+ case JSOP_SETGVAR:
+ slot = (jsuint) pn->pn_slot;
+ EMIT_UINT16_IMM_OP(pn->pn_op, slot);
+ if (wantpop && js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ break;
+
+ default:
+#if JS_HAS_LVALUE_RETURN || JS_HAS_XML_SUPPORT
+ {
+ ptrdiff_t top;
+
+ top = CG_OFFSET(cg);
+ if (!js_EmitTree(cx, cg, pn))
+ return JS_FALSE;
+ if (js_NewSrcNote2(cx, cg, SRC_PCBASE, CG_OFFSET(cg) - top) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_ENUMELEM) < 0)
+ return JS_FALSE;
+ break;
+ }
+#endif
+ case JSOP_ENUMELEM:
+ JS_ASSERT(0);
+ }
+ }
+
+ return JS_TRUE;
+}
+
+/*
+ * Recursive helper for EmitDestructuringOps.
+ *
+ * Given a value to destructure on the stack, walk over an object or array
+ * initialiser at pn, emitting bytecodes to match property values and store
+ * them in the lvalues identified by the matched property names.
+ */
+static JSBool
+EmitDestructuringOpsHelper(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
+{
+ jsuint index;
+ JSParseNode *pn2, *pn3;
+ JSBool doElemOp;
+
+#ifdef DEBUG
+ intN stackDepth = cg->stackDepth;
+ JS_ASSERT(stackDepth != 0);
+ JS_ASSERT(pn->pn_arity == PN_LIST);
+ JS_ASSERT(pn->pn_type == TOK_RB || pn->pn_type == TOK_RC);
+#endif
+
+ if (pn->pn_count == 0) {
+ /* Emit a DUP;POP sequence for the decompiler. */
+ return js_Emit1(cx, cg, JSOP_DUP) >= 0 &&
+ js_Emit1(cx, cg, JSOP_POP) >= 0;
+ }
+
+ index = 0;
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ /*
+ * Duplicate the value being destructured to use as a reference base.
+ */
+ if (js_Emit1(cx, cg, JSOP_DUP) < 0)
+ return JS_FALSE;
+
+ /*
+ * Now push the property name currently being matched, which is either
+ * the array initialiser's current index, or the current property name
+ * "label" on the left of a colon in the object initialiser. Set pn3
+ * to the lvalue node, which is in the value-initializing position.
+ */
+ doElemOp = JS_TRUE;
+ if (pn->pn_type == TOK_RB) {
+ if (!EmitNumberOp(cx, index, cg))
+ return JS_FALSE;
+ pn3 = pn2;
+ } else {
+ JS_ASSERT(pn->pn_type == TOK_RC);
+ JS_ASSERT(pn2->pn_type == TOK_COLON);
+ pn3 = pn2->pn_left;
+ if (pn3->pn_type == TOK_NUMBER) {
+ /*
+ * If we are emitting an object destructuring initialiser,
+ * annotate the index op with SRC_INITPROP so we know we are
+ * not decompiling an array initialiser.
+ */
+ if (js_NewSrcNote(cx, cg, SRC_INITPROP) < 0)
+ return JS_FALSE;
+ if (!EmitNumberOp(cx, pn3->pn_dval, cg))
+ return JS_FALSE;
+ } else {
+ JS_ASSERT(pn3->pn_type == TOK_STRING ||
+ pn3->pn_type == TOK_NAME);
+ if (!EmitAtomOp(cx, pn3, JSOP_GETPROP, cg))
+ return JS_FALSE;
+ doElemOp = JS_FALSE;
+ }
+ pn3 = pn2->pn_right;
+ }
+
+ if (doElemOp) {
+ /*
+ * Ok, get the value of the matching property name. This leaves
+ * that value on top of the value being destructured, so the stack
+ * is one deeper than when we started.
+ */
+ if (js_Emit1(cx, cg, JSOP_GETELEM) < 0)
+ return JS_FALSE;
+ JS_ASSERT(cg->stackDepth == stackDepth + 1);
+ }
+
+ /* Nullary comma node makes a hole in the array destructurer. */
+ if (pn3->pn_type == TOK_COMMA && pn3->pn_arity == PN_NULLARY) {
+ JS_ASSERT(pn->pn_type == TOK_RB);
+ JS_ASSERT(pn2 == pn3);
+ if (js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ } else {
+ if (!EmitDestructuringLHS(cx, cg, pn3, JS_TRUE))
+ return JS_FALSE;
+ }
+
+ JS_ASSERT(cg->stackDepth == stackDepth);
+ ++index;
+ }
+
+ return JS_TRUE;
+}
+
+static ptrdiff_t
+OpToDeclType(JSOp op)
+{
+ switch (op) {
+ case JSOP_NOP:
+ return SRC_DECL_LET;
+ case JSOP_DEFCONST:
+ return SRC_DECL_CONST;
+ case JSOP_DEFVAR:
+ return SRC_DECL_VAR;
+ default:
+ return SRC_DECL_NONE;
+ }
+}
+
+static JSBool
+EmitDestructuringOps(JSContext *cx, JSCodeGenerator *cg, JSOp declOp,
+ JSParseNode *pn)
+{
+ /*
+ * If we're called from a variable declaration, help the decompiler by
+ * annotating the first JSOP_DUP that EmitDestructuringOpsHelper emits.
+ * If the destructuring initialiser is empty, our helper will emit a
+ * JSOP_DUP followed by a JSOP_POP for the decompiler.
+ */
+ if (js_NewSrcNote2(cx, cg, SRC_DESTRUCT, OpToDeclType(declOp)) < 0)
+ return JS_FALSE;
+
+ /*
+ * Call our recursive helper to emit the destructuring assignments and
+ * related stack manipulations.
+ */
+ return EmitDestructuringOpsHelper(cx, cg, pn);
+}
+
+static JSBool
+EmitGroupAssignment(JSContext *cx, JSCodeGenerator *cg, JSOp declOp,
+ JSParseNode *lhs, JSParseNode *rhs)
+{
+ jsuint depth, limit, slot;
+ JSParseNode *pn;
+
+ depth = limit = (uintN) cg->stackDepth;
+ for (pn = rhs->pn_head; pn; pn = pn->pn_next) {
+ if (limit == JS_BIT(16)) {
+ js_ReportCompileErrorNumber(cx, rhs,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_ARRAY_INIT_TOO_BIG);
+ return JS_FALSE;
+ }
+
+ if (pn->pn_type == TOK_COMMA) {
+ if (js_Emit1(cx, cg, JSOP_PUSH) < 0)
+ return JS_FALSE;
+ } else {
+ JS_ASSERT(pn->pn_type != TOK_DEFSHARP);
+ if (!js_EmitTree(cx, cg, pn))
+ return JS_FALSE;
+ }
+ ++limit;
+ }
+
+ if (js_NewSrcNote2(cx, cg, SRC_GROUPASSIGN, OpToDeclType(declOp)) < 0)
+ return JS_FALSE;
+
+ slot = depth;
+ for (pn = lhs->pn_head; pn; pn = pn->pn_next) {
+ if (slot < limit) {
+ EMIT_UINT16_IMM_OP(JSOP_GETLOCAL, slot);
+ } else {
+ if (js_Emit1(cx, cg, JSOP_PUSH) < 0)
+ return JS_FALSE;
+ }
+ if (pn->pn_type == TOK_COMMA && pn->pn_arity == PN_NULLARY) {
+ if (js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ } else {
+ if (!EmitDestructuringLHS(cx, cg, pn, pn->pn_next != NULL))
+ return JS_FALSE;
+ }
+ ++slot;
+ }
+
+ EMIT_UINT16_IMM_OP(JSOP_SETSP, (jsatomid)depth);
+ cg->stackDepth = (uintN) depth;
+ return JS_TRUE;
+}
+
+/*
+ * Helper called with pop out param initialized to a JSOP_POP* opcode. If we
+ * can emit a group assignment sequence, which results in 0 stack depth delta,
+ * we set *pop to JSOP_NOP so callers can veto emitting pn followed by a pop.
+ */
+static JSBool
+MaybeEmitGroupAssignment(JSContext *cx, JSCodeGenerator *cg, JSOp declOp,
+ JSParseNode *pn, JSOp *pop)
+{
+ JSParseNode *lhs, *rhs;
+
+ JS_ASSERT(pn->pn_type == TOK_ASSIGN);
+ JS_ASSERT(*pop == JSOP_POP || *pop == JSOP_POPV);
+ lhs = pn->pn_left;
+ rhs = pn->pn_right;
+ if (lhs->pn_type == TOK_RB && rhs->pn_type == TOK_RB &&
+ lhs->pn_count <= rhs->pn_count &&
+ (rhs->pn_count == 0 ||
+ rhs->pn_head->pn_type != TOK_DEFSHARP)) {
+ if (!EmitGroupAssignment(cx, cg, declOp, lhs, rhs))
+ return JS_FALSE;
+ *pop = JSOP_NOP;
+ }
+ return JS_TRUE;
+}
+
+#endif /* JS_HAS_DESTRUCTURING */
+
+static JSBool
+EmitVariables(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn,
+ JSBool inLetHead, ptrdiff_t *headNoteIndex)
+{
+ JSTreeContext *tc;
+ JSBool let, forInVar;
+#if JS_HAS_BLOCK_SCOPE
+ JSBool forInLet, popScope;
+ JSStmtInfo *stmt, *scopeStmt;
+#endif
+ ptrdiff_t off, noteIndex, tmp;
+ JSParseNode *pn2, *pn3;
+ JSOp op;
+ jsatomid atomIndex;
+ uintN oldflags;
+
+ /* Default in case of JS_HAS_BLOCK_SCOPE early return, below. */
+ *headNoteIndex = -1;
+
+ /*
+ * Let blocks and expressions have a parenthesized head in which the new
+ * scope is not yet open. Initializer evaluation uses the parent node's
+ * lexical scope. If popScope is true below, then we hide the top lexical
+ * block from any calls to BindNameToSlot hiding in pn2->pn_expr so that
+ * it won't find any names in the new let block.
+ *
+ * The same goes for let declarations in the head of any kind of for loop.
+ * Unlike a let declaration 'let x = i' within a block, where x is hoisted
+ * to the start of the block, a 'for (let x = i...) ...' loop evaluates i
+ * in the containing scope, and puts x in the loop body's scope.
+ */
+ tc = &cg->treeContext;
+ let = (pn->pn_op == JSOP_NOP);
+ forInVar = (pn->pn_extra & PNX_FORINVAR) != 0;
+#if JS_HAS_BLOCK_SCOPE
+ forInLet = let && forInVar;
+ popScope = (inLetHead || (let && (tc->flags & TCF_IN_FOR_INIT)));
+ JS_ASSERT(!popScope || let);
+#endif
+
+ off = noteIndex = -1;
+ for (pn2 = pn->pn_head; ; pn2 = pn2->pn_next) {
+#if JS_HAS_DESTRUCTURING
+ if (pn2->pn_type != TOK_NAME) {
+ if (pn2->pn_type == TOK_RB || pn2->pn_type == TOK_RC) {
+ /*
+ * Emit variable binding ops, but not destructuring ops.
+ * The parser (see Variables, jsparse.c) has ensured that
+ * our caller will be the TOK_FOR/TOK_IN case in js_EmitTree,
+ * and that case will emit the destructuring code only after
+ * emitting an enumerating opcode and a branch that tests
+ * whether the enumeration ended.
+ */
+ JS_ASSERT(forInVar);
+ JS_ASSERT(pn->pn_count == 1);
+ if (!EmitDestructuringDecls(cx, cg, pn->pn_op, pn2))
+ return JS_FALSE;
+ break;
+ }
+
+ /*
+ * A destructuring initialiser assignment preceded by var is
+ * always evaluated promptly, even if it is to the left of 'in'
+ * in a for-in loop. As with 'for (var x = i in o)...', this
+ * will cause the entire 'var [a, b] = i' to be hoisted out of
+ * the head of the loop.
+ */
+ JS_ASSERT(pn2->pn_type == TOK_ASSIGN);
+ if (pn->pn_count == 1 && !forInLet) {
+ /*
+ * If this is the only destructuring assignment in the list,
+ * try to optimize to a group assignment. If we're in a let
+ * head, pass JSOP_POP rather than the pseudo-prolog JSOP_NOP
+ * in pn->pn_op, to suppress a second (and misplaced) 'let'.
+ */
+ JS_ASSERT(noteIndex < 0 && !pn2->pn_next);
+ op = JSOP_POP;
+ if (!MaybeEmitGroupAssignment(cx, cg,
+ inLetHead ? JSOP_POP : pn->pn_op,
+ pn2, &op)) {
+ return JS_FALSE;
+ }
+ if (op == JSOP_NOP) {
+ pn->pn_extra = (pn->pn_extra & ~PNX_POPVAR) | PNX_GROUPINIT;
+ break;
+ }
+ }
+
+ pn3 = pn2->pn_left;
+ if (!EmitDestructuringDecls(cx, cg, pn->pn_op, pn3))
+ return JS_FALSE;
+
+#if JS_HAS_BLOCK_SCOPE
+ /*
+ * If this is a 'for (let [x, y] = i in o) ...' let declaration,
+ * throw away i if it is a useless expression.
+ */
+ if (forInLet) {
+ JSBool useful = JS_FALSE;
+
+ JS_ASSERT(pn->pn_count == 1);
+ if (!CheckSideEffects(cx, tc, pn2->pn_right, &useful))
+ return JS_FALSE;
+ if (!useful)
+ return JS_TRUE;
+ }
+#endif
+
+ if (!js_EmitTree(cx, cg, pn2->pn_right))
+ return JS_FALSE;
+
+#if JS_HAS_BLOCK_SCOPE
+ /*
+ * The expression i in 'for (let [x, y] = i in o) ...', which is
+ * pn2->pn_right above, appears to have side effects. We've just
+ * emitted code to evaluate i, but we must not destructure i yet.
+ * Let the TOK_FOR: code in js_EmitTree do the destructuring to
+ * emit the right combination of source notes and bytecode for the
+ * decompiler.
+ *
+ * This has the effect of hoisting the evaluation of i out of the
+ * for-in loop, without hoisting the let variables, which must of
+ * course be scoped by the loop. Set PNX_POPVAR to cause JSOP_POP
+ * to be emitted, just before returning from this function.
+ */
+ if (forInVar) {
+ pn->pn_extra |= PNX_POPVAR;
+ if (forInLet)
+ break;
+ }
+#endif
+
+ /*
+ * Veto pn->pn_op if inLetHead to avoid emitting a SRC_DESTRUCT
+ * that's redundant with respect to the SRC_DECL/SRC_DECL_LET that
+ * we will emit at the bottom of this function.
+ */
+ if (!EmitDestructuringOps(cx, cg,
+ inLetHead ? JSOP_POP : pn->pn_op,
+ pn3)) {
+ return JS_FALSE;
+ }
+ goto emit_note_pop;
+ }
+#else
+ JS_ASSERT(pn2->pn_type == TOK_NAME);
+#endif
+
+ if (!BindNameToSlot(cx, &cg->treeContext, pn2, let))
+ return JS_FALSE;
+ JS_ASSERT(pn2->pn_slot >= 0 || !let);
+
+ op = pn2->pn_op;
+ if (op == JSOP_ARGUMENTS) {
+ /* JSOP_ARGUMENTS => no initializer */
+ JS_ASSERT(!pn2->pn_expr && !let);
+ pn3 = NULL;
+#ifdef __GNUC__
+ atomIndex = 0; /* quell GCC overwarning */
+#endif
+ } else {
+ if (!MaybeEmitVarDecl(cx, cg, pn->pn_op, pn2, &atomIndex))
+ return JS_FALSE;
+
+ pn3 = pn2->pn_expr;
+ if (pn3) {
+#if JS_HAS_BLOCK_SCOPE
+ /*
+ * If this is a 'for (let x = i in o) ...' let declaration,
+ * throw away i if it is a useless expression.
+ */
+ if (forInLet) {
+ JSBool useful = JS_FALSE;
+
+ JS_ASSERT(pn->pn_count == 1);
+ if (!CheckSideEffects(cx, tc, pn3, &useful))
+ return JS_FALSE;
+ if (!useful)
+ return JS_TRUE;
+ }
+#endif
+
+ if (op == JSOP_SETNAME) {
+ JS_ASSERT(!let);
+ EMIT_ATOM_INDEX_OP(JSOP_BINDNAME, atomIndex);
+ }
+ if (pn->pn_op == JSOP_DEFCONST &&
+ !js_DefineCompileTimeConstant(cx, cg, pn2->pn_atom,
+ pn3)) {
+ return JS_FALSE;
+ }
+
+#if JS_HAS_BLOCK_SCOPE
+ /* Evaluate expr in the outer lexical scope if requested. */
+ if (popScope) {
+ stmt = tc->topStmt;
+ scopeStmt = tc->topScopeStmt;
+
+ tc->topStmt = stmt->down;
+ tc->topScopeStmt = scopeStmt->downScope;
+ }
+#ifdef __GNUC__
+ else {
+ stmt = scopeStmt = NULL; /* quell GCC overwarning */
+ }
+#endif
+#endif
+
+ oldflags = cg->treeContext.flags;
+ cg->treeContext.flags &= ~TCF_IN_FOR_INIT;
+ if (!js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+ cg->treeContext.flags |= oldflags & TCF_IN_FOR_INIT;
+
+#if JS_HAS_BLOCK_SCOPE
+ if (popScope) {
+ tc->topStmt = stmt;
+ tc->topScopeStmt = scopeStmt;
+ }
+#endif
+ }
+ }
+
+ /*
+ * 'for (var x in o) ...' and 'for (var x = i in o) ...' call the
+ * TOK_VAR case, but only the initialized case (a strange one that
+ * falls out of ECMA-262's grammar) wants to run past this point.
+ * Both cases must conditionally emit a JSOP_DEFVAR, above. Note
+ * that the parser error-checks to ensure that pn->pn_count is 1.
+ *
+ * 'for (let x = i in o) ...' must evaluate i before the loop, and
+ * subject it to useless expression elimination. The variable list
+ * in pn is a single let declaration if pn_op == JSOP_NOP. We test
+ * the let local in order to break early in this case, as well as in
+ * the 'for (var x in o)' case.
+ *
+ * XXX Narcissus keeps track of variable declarations in the node
+ * for the script being compiled, so there's no need to share any
+ * conditional prolog code generation there. We could do likewise,
+ * but it's a big change, requiring extra allocation, so probably
+ * not worth the trouble for SpiderMonkey.
+ */
+ JS_ASSERT(pn3 == pn2->pn_expr);
+ if (forInVar && (!pn3 || let)) {
+ JS_ASSERT(pn->pn_count == 1);
+ break;
+ }
+
+ if (pn2 == pn->pn_head &&
+ !inLetHead &&
+ js_NewSrcNote2(cx, cg, SRC_DECL,
+ (pn->pn_op == JSOP_DEFCONST)
+ ? SRC_DECL_CONST
+ : (pn->pn_op == JSOP_DEFVAR)
+ ? SRC_DECL_VAR
+ : SRC_DECL_LET) < 0) {
+ return JS_FALSE;
+ }
+ if (op == JSOP_ARGUMENTS) {
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ } else if (pn2->pn_slot >= 0) {
+ EMIT_UINT16_IMM_OP(op, atomIndex);
+ } else {
+ EMIT_ATOM_INDEX_OP(op, atomIndex);
+ }
+
+#if JS_HAS_DESTRUCTURING
+ emit_note_pop:
+#endif
+ tmp = CG_OFFSET(cg);
+ if (noteIndex >= 0) {
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0, tmp-off))
+ return JS_FALSE;
+ }
+ if (!pn2->pn_next)
+ break;
+ off = tmp;
+ noteIndex = js_NewSrcNote2(cx, cg, SRC_PCDELTA, 0);
+ if (noteIndex < 0 || js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ }
+
+ /* If this is a let head, emit and return a srcnote on the pop. */
+ if (inLetHead) {
+ *headNoteIndex = js_NewSrcNote(cx, cg, SRC_DECL);
+ if (*headNoteIndex < 0)
+ return JS_FALSE;
+ if (!(pn->pn_extra & PNX_POPVAR))
+ return js_Emit1(cx, cg, JSOP_NOP) >= 0;
+ }
+
+ return !(pn->pn_extra & PNX_POPVAR) || js_Emit1(cx, cg, JSOP_POP) >= 0;
+}
+
+#if defined DEBUG_brendan || defined DEBUG_mrbkap
+static JSBool
+GettableNoteForNextOp(JSCodeGenerator *cg)
+{
+ ptrdiff_t offset, target;
+ jssrcnote *sn, *end;
+
+ offset = 0;
+ target = CG_OFFSET(cg);
+ for (sn = CG_NOTES(cg), end = sn + CG_NOTE_COUNT(cg); sn < end;
+ sn = SN_NEXT(sn)) {
+ if (offset == target && SN_IS_GETTABLE(sn))
+ return JS_TRUE;
+ offset += SN_DELTA(sn);
+ }
+ return JS_FALSE;
+}
+#endif
+
+JSBool
+js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
+{
+ JSBool ok, useful, wantval;
+ JSStmtInfo *stmt, stmtInfo;
+ ptrdiff_t top, off, tmp, beq, jmp;
+ JSParseNode *pn2, *pn3;
+ JSAtom *atom;
+ JSAtomListElement *ale;
+ jsatomid atomIndex;
+ ptrdiff_t noteIndex;
+ JSSrcNoteType noteType;
+ jsbytecode *pc;
+ JSOp op;
+ JSTokenType type;
+ uint32 argc;
+ int stackDummy;
+
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ return JS_FALSE;
+ }
+
+ ok = JS_TRUE;
+ cg->emitLevel++;
+ pn->pn_offset = top = CG_OFFSET(cg);
+
+ /* Emit notes to tell the current bytecode's source line number. */
+ UPDATE_LINE_NUMBER_NOTES(cx, cg, pn);
+
+ switch (pn->pn_type) {
+ case TOK_FUNCTION:
+ {
+ void *cg2mark;
+ JSCodeGenerator *cg2;
+ JSFunction *fun;
+
+#if JS_HAS_XML_SUPPORT
+ if (pn->pn_arity == PN_NULLARY) {
+ if (js_Emit1(cx, cg, JSOP_GETFUNNS) < 0)
+ return JS_FALSE;
+ break;
+ }
+#endif
+
+ /* Generate code for the function's body. */
+ cg2mark = JS_ARENA_MARK(&cx->tempPool);
+ JS_ARENA_ALLOCATE_TYPE(cg2, JSCodeGenerator, &cx->tempPool);
+ if (!cg2) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ if (!js_InitCodeGenerator(cx, cg2, cg->codePool, cg->notePool,
+ cg->filename, pn->pn_pos.begin.lineno,
+ cg->principals)) {
+ return JS_FALSE;
+ }
+ cg2->treeContext.flags = (uint16) (pn->pn_flags | TCF_IN_FUNCTION);
+ cg2->treeContext.tryCount = pn->pn_tryCount;
+ cg2->parent = cg;
+ fun = (JSFunction *) JS_GetPrivate(cx, ATOM_TO_OBJECT(pn->pn_funAtom));
+ if (!js_EmitFunctionBody(cx, cg2, pn->pn_body, fun))
+ return JS_FALSE;
+
+ /*
+ * We need an activation object if an inner peeks out, or if such
+ * inner-peeking caused one of our inners to become heavyweight.
+ */
+ if (cg2->treeContext.flags &
+ (TCF_FUN_USES_NONLOCALS | TCF_FUN_HEAVYWEIGHT)) {
+ cg->treeContext.flags |= TCF_FUN_HEAVYWEIGHT;
+ }
+ js_FinishCodeGenerator(cx, cg2);
+ JS_ARENA_RELEASE(&cx->tempPool, cg2mark);
+
+ /* Make the function object a literal in the outer script's pool. */
+ ale = js_IndexAtom(cx, pn->pn_funAtom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ atomIndex = ALE_INDEX(ale);
+
+ /* Emit a bytecode pointing to the closure object in its immediate. */
+ if (pn->pn_op != JSOP_NOP) {
+ EMIT_ATOM_INDEX_OP(pn->pn_op, atomIndex);
+ break;
+ }
+
+ /* Top-level named functions need a nop for decompilation. */
+ noteIndex = js_NewSrcNote2(cx, cg, SRC_FUNCDEF, (ptrdiff_t)atomIndex);
+ if (noteIndex < 0 ||
+ js_Emit1(cx, cg, JSOP_NOP) < 0) {
+ return JS_FALSE;
+ }
+
+ /*
+ * Top-levels also need a prolog op to predefine their names in the
+ * variable object, or if local, to fill their stack slots.
+ */
+ CG_SWITCH_TO_PROLOG(cg);
+
+ if (cg->treeContext.flags & TCF_IN_FUNCTION) {
+ JSObject *obj, *pobj;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ uintN slot;
+
+ obj = OBJ_GET_PARENT(cx, fun->object);
+ if (!js_LookupHiddenProperty(cx, obj, ATOM_TO_JSID(fun->atom),
+ &pobj, &prop)) {
+ return JS_FALSE;
+ }
+
+ JS_ASSERT(prop && pobj == obj);
+ sprop = (JSScopeProperty *) prop;
+ JS_ASSERT(sprop->getter == js_GetLocalVariable);
+ slot = sprop->shortid;
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+
+ /*
+ * If this local function is declared in a body block induced by
+ * let declarations, reparent fun->object to the compiler-created
+ * body block object so that JSOP_DEFLOCALFUN can clone that block
+ * into the runtime scope chain.
+ */
+ stmt = cg->treeContext.topStmt;
+ if (stmt && stmt->type == STMT_BLOCK &&
+ stmt->down && stmt->down->type == STMT_BLOCK &&
+ (stmt->down->flags & SIF_SCOPE)) {
+ obj = ATOM_TO_OBJECT(stmt->down->atom);
+ JS_ASSERT(LOCKED_OBJ_GET_CLASS(obj) == &js_BlockClass);
+ OBJ_SET_PARENT(cx, fun->object, obj);
+ }
+
+ if (atomIndex >= JS_BIT(16)) {
+ /*
+ * Lots of literals in the outer function, so we have to emit
+ * [JSOP_LITOPX, atomIndex, JSOP_DEFLOCALFUN, var slot].
+ */
+ off = js_EmitN(cx, cg, JSOP_LITOPX, 3);
+ if (off < 0)
+ return JS_FALSE;
+ pc = CG_CODE(cg, off);
+ SET_LITERAL_INDEX(pc, atomIndex);
+ EMIT_UINT16_IMM_OP(JSOP_DEFLOCALFUN, slot);
+ } else {
+ /* Emit [JSOP_DEFLOCALFUN, var slot, atomIndex]. */
+ off = js_EmitN(cx, cg, JSOP_DEFLOCALFUN,
+ VARNO_LEN + ATOM_INDEX_LEN);
+ if (off < 0)
+ return JS_FALSE;
+ pc = CG_CODE(cg, off);
+ SET_VARNO(pc, slot);
+ pc += VARNO_LEN;
+ SET_ATOM_INDEX(pc, atomIndex);
+ }
+ } else {
+ JS_ASSERT(!cg->treeContext.topStmt);
+ EMIT_ATOM_INDEX_OP(JSOP_DEFFUN, atomIndex);
+ }
+
+ CG_SWITCH_TO_MAIN(cg);
+ break;
+ }
+
+#if JS_HAS_EXPORT_IMPORT
+ case TOK_EXPORT:
+ pn2 = pn->pn_head;
+ if (pn2->pn_type == TOK_STAR) {
+ /*
+ * 'export *' must have no other elements in the list (what would
+ * be the point?).
+ */
+ if (js_Emit1(cx, cg, JSOP_EXPORTALL) < 0)
+ return JS_FALSE;
+ } else {
+ /*
+ * If not 'export *', the list consists of NAME nodes identifying
+ * properties of the variables object to flag as exported.
+ */
+ do {
+ ale = js_IndexAtom(cx, pn2->pn_atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(JSOP_EXPORTNAME, ALE_INDEX(ale));
+ } while ((pn2 = pn2->pn_next) != NULL);
+ }
+ break;
+
+ case TOK_IMPORT:
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ /*
+ * Each subtree on an import list is rooted by a DOT or LB node.
+ * A DOT may have a null pn_atom member, in which case pn_op must
+ * be JSOP_IMPORTALL -- see EmitPropOp above.
+ */
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ }
+ break;
+#endif /* JS_HAS_EXPORT_IMPORT */
+
+ case TOK_IF:
+ /* Initialize so we can detect else-if chains and avoid recursion. */
+ stmtInfo.type = STMT_IF;
+ beq = jmp = -1;
+ noteIndex = -1;
+
+ if_again:
+ /* Emit code for the condition before pushing stmtInfo. */
+ if (!js_EmitTree(cx, cg, pn->pn_kid1))
+ return JS_FALSE;
+ top = CG_OFFSET(cg);
+ if (stmtInfo.type == STMT_IF) {
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_IF, top);
+ } else {
+ /*
+ * We came here from the goto further below that detects else-if
+ * chains, so we must mutate stmtInfo back into a STMT_IF record.
+ * Also (see below for why) we need a note offset for SRC_IF_ELSE
+ * to help the decompiler. Actually, we need two offsets, one for
+ * decompiling any else clause and the second for decompiling an
+ * else-if chain without bracing, overindenting, or incorrectly
+ * scoping let declarations.
+ */
+ JS_ASSERT(stmtInfo.type == STMT_ELSE);
+ stmtInfo.type = STMT_IF;
+ stmtInfo.update = top;
+ if (!js_SetSrcNoteOffset(cx, cg, noteIndex, 0, jmp - beq))
+ return JS_FALSE;
+ if (!js_SetSrcNoteOffset(cx, cg, noteIndex, 1, top - jmp))
+ return JS_FALSE;
+ }
+
+ /* Emit an annotated branch-if-false around the then part. */
+ pn3 = pn->pn_kid3;
+ noteIndex = js_NewSrcNote(cx, cg, pn3 ? SRC_IF_ELSE : SRC_IF);
+ if (noteIndex < 0)
+ return JS_FALSE;
+ beq = EmitJump(cx, cg, JSOP_IFEQ, 0);
+ if (beq < 0)
+ return JS_FALSE;
+
+ /* Emit code for the then and optional else parts. */
+ if (!js_EmitTree(cx, cg, pn->pn_kid2))
+ return JS_FALSE;
+ if (pn3) {
+ /* Modify stmtInfo so we know we're in the else part. */
+ stmtInfo.type = STMT_ELSE;
+
+ /*
+ * Emit a JSOP_BACKPATCH op to jump from the end of our then part
+ * around the else part. The js_PopStatementCG call at the bottom
+ * of this switch case will fix up the backpatch chain linked from
+ * stmtInfo.breaks.
+ */
+ jmp = EmitGoto(cx, cg, &stmtInfo, &stmtInfo.breaks, NULL, SRC_NULL);
+ if (jmp < 0)
+ return JS_FALSE;
+
+ /* Ensure the branch-if-false comes here, then emit the else. */
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, beq);
+ if (pn3->pn_type == TOK_IF) {
+ pn = pn3;
+ goto if_again;
+ }
+
+ if (!js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+
+ /*
+ * Annotate SRC_IF_ELSE with the offset from branch to jump, for
+ * the decompiler's benefit. We can't just "back up" from the pc
+ * of the else clause, because we don't know whether an extended
+ * jump was required to leap from the end of the then clause over
+ * the else clause.
+ */
+ if (!js_SetSrcNoteOffset(cx, cg, noteIndex, 0, jmp - beq))
+ return JS_FALSE;
+ } else {
+ /* No else part, fixup the branch-if-false to come here. */
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, beq);
+ }
+ ok = js_PopStatementCG(cx, cg);
+ break;
+
+ case TOK_SWITCH:
+ /* Out of line to avoid bloating js_EmitTree's stack frame size. */
+ ok = EmitSwitch(cx, cg, pn, &stmtInfo);
+ break;
+
+ case TOK_WHILE:
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_WHILE_LOOP, top);
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+ noteIndex = js_NewSrcNote(cx, cg, SRC_WHILE);
+ if (noteIndex < 0)
+ return JS_FALSE;
+ beq = EmitJump(cx, cg, JSOP_IFEQ, 0);
+ if (beq < 0)
+ return JS_FALSE;
+ if (!js_EmitTree(cx, cg, pn->pn_right))
+ return JS_FALSE;
+ jmp = EmitJump(cx, cg, JSOP_GOTO, top - CG_OFFSET(cg));
+ if (jmp < 0)
+ return JS_FALSE;
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, beq);
+ if (!js_SetSrcNoteOffset(cx, cg, noteIndex, 0, jmp - beq))
+ return JS_FALSE;
+ ok = js_PopStatementCG(cx, cg);
+ break;
+
+ case TOK_DO:
+ /* Emit an annotated nop so we know to decompile a 'do' keyword. */
+ if (js_NewSrcNote(cx, cg, SRC_WHILE) < 0 ||
+ js_Emit1(cx, cg, JSOP_NOP) < 0) {
+ return JS_FALSE;
+ }
+
+ /* Compile the loop body. */
+ top = CG_OFFSET(cg);
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_DO_LOOP, top);
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+
+ /* Set loop and enclosing label update offsets, for continue. */
+ stmt = &stmtInfo;
+ do {
+ stmt->update = CG_OFFSET(cg);
+ } while ((stmt = stmt->down) != NULL && stmt->type == STMT_LABEL);
+
+ /* Compile the loop condition, now that continues know where to go. */
+ if (!js_EmitTree(cx, cg, pn->pn_right))
+ return JS_FALSE;
+
+ /*
+ * No source note needed, because JSOP_IFNE is used only for do-while.
+ * If we ever use JSOP_IFNE for other purposes, we can still avoid yet
+ * another note here, by storing (jmp - top) in the SRC_WHILE note's
+ * offset, and fetching that delta in order to decompile recursively.
+ */
+ if (EmitJump(cx, cg, JSOP_IFNE, top - CG_OFFSET(cg)) < 0)
+ return JS_FALSE;
+ ok = js_PopStatementCG(cx, cg);
+ break;
+
+ case TOK_FOR:
+ beq = 0; /* suppress gcc warnings */
+ pn2 = pn->pn_left;
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_FOR_LOOP, top);
+
+ if (pn2->pn_type == TOK_IN) {
+ JSBool emitIFEQ;
+
+ /* Set stmtInfo type for later testing. */
+ stmtInfo.type = STMT_FOR_IN_LOOP;
+ noteIndex = -1;
+
+ /*
+ * If the left part is 'var x', emit code to define x if necessary
+ * using a prolog opcode, but do not emit a pop. If the left part
+ * is 'var x = i', emit prolog code to define x if necessary; then
+ * emit code to evaluate i, assign the result to x, and pop the
+ * result off the stack.
+ *
+ * All the logic to do this is implemented in the outer switch's
+ * TOK_VAR case, conditioned on pn_extra flags set by the parser.
+ *
+ * In the 'for (var x = i in o) ...' case, the js_EmitTree(...pn3)
+ * called here will generate the proper note for the assignment
+ * op that sets x = i, hoisting the initialized var declaration
+ * out of the loop: 'var x = i; for (x in o) ...'.
+ *
+ * In the 'for (var x in o) ...' case, nothing but the prolog op
+ * (if needed) should be generated here, we must emit the note
+ * just before the JSOP_FOR* opcode in the switch on pn3->pn_type
+ * a bit below, so nothing is hoisted: 'for (var x in o) ...'.
+ *
+ * A 'for (let x = i in o)' loop must not be hoisted, since in
+ * this form the let variable is scoped by the loop body (but not
+ * the head). The initializer expression i must be evaluated for
+ * any side effects. So we hoist only i in the let case.
+ */
+ pn3 = pn2->pn_left;
+ type = pn3->pn_type;
+ cg->treeContext.flags |= TCF_IN_FOR_INIT;
+ if (TOKEN_TYPE_IS_DECL(type) && !js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+ cg->treeContext.flags &= ~TCF_IN_FOR_INIT;
+
+ /* Emit a push to allocate the iterator. */
+ if (js_Emit1(cx, cg, JSOP_STARTITER) < 0)
+ return JS_FALSE;
+
+ /* Compile the object expression to the right of 'in'. */
+ if (!js_EmitTree(cx, cg, pn2->pn_right))
+ return JS_FALSE;
+
+ /*
+ * Emit a bytecode to convert top of stack value to the iterator
+ * object depending on the loop variant (for-in, for-each-in, or
+ * destructuring for-in).
+ */
+#if JS_HAS_DESTRUCTURING
+ JS_ASSERT(pn->pn_op == JSOP_FORIN ||
+ pn->pn_op == JSOP_FOREACHKEYVAL ||
+ pn->pn_op == JSOP_FOREACH);
+#else
+ JS_ASSERT(pn->pn_op == JSOP_FORIN || pn->pn_op == JSOP_FOREACH);
+#endif
+ if (js_Emit1(cx, cg, pn->pn_op) < 0)
+ return JS_FALSE;
+
+ top = CG_OFFSET(cg);
+ SET_STATEMENT_TOP(&stmtInfo, top);
+
+ /*
+ * Compile a JSOP_FOR* bytecode based on the left hand side.
+ *
+ * Initialize op to JSOP_SETNAME in case of |for ([a, b] in o)...|
+ * or similar, to signify assignment, rather than declaration, to
+ * the decompiler. EmitDestructuringOps takes a prolog bytecode
+ * parameter and emits the appropriate source note, defaulting to
+ * assignment, so JSOP_SETNAME is not critical here; many similar
+ * ops could be used -- just not JSOP_NOP (which means 'let').
+ */
+ emitIFEQ = JS_TRUE;
+ op = JSOP_SETNAME;
+ switch (type) {
+#if JS_HAS_BLOCK_SCOPE
+ case TOK_LET:
+#endif
+ case TOK_VAR:
+ JS_ASSERT(pn3->pn_arity == PN_LIST && pn3->pn_count == 1);
+ pn3 = pn3->pn_head;
+#if JS_HAS_DESTRUCTURING
+ if (pn3->pn_type == TOK_ASSIGN) {
+ pn3 = pn3->pn_left;
+ JS_ASSERT(pn3->pn_type == TOK_RB || pn3->pn_type == TOK_RC);
+ }
+ if (pn3->pn_type == TOK_RB || pn3->pn_type == TOK_RC) {
+ op = pn2->pn_left->pn_op;
+ goto destructuring_for;
+ }
+#else
+ JS_ASSERT(pn3->pn_type == TOK_NAME);
+#endif
+ /*
+ * Always annotate JSOP_FORLOCAL if given input of the form
+ * 'for (let x in * o)' -- the decompiler must not hoist the
+ * 'let x' out of the loop head, or x will be bound in the
+ * wrong scope. Likewise, but in this case only for the sake
+ * of higher decompilation fidelity only, do not hoist 'var x'
+ * when given 'for (var x in o)'. But 'for (var x = i in o)'
+ * requires hoisting in order to preserve the initializer i.
+ * The decompiler can only handle so much!
+ */
+ if ((
+#if JS_HAS_BLOCK_SCOPE
+ type == TOK_LET ||
+#endif
+ !pn3->pn_expr) &&
+ js_NewSrcNote2(cx, cg, SRC_DECL,
+ type == TOK_VAR
+ ? SRC_DECL_VAR
+ : SRC_DECL_LET) < 0) {
+ return JS_FALSE;
+ }
+ /* FALL THROUGH */
+ case TOK_NAME:
+ if (pn3->pn_slot >= 0) {
+ op = pn3->pn_op;
+ switch (op) {
+ case JSOP_GETARG: /* FALL THROUGH */
+ case JSOP_SETARG: op = JSOP_FORARG; break;
+ case JSOP_GETVAR: /* FALL THROUGH */
+ case JSOP_SETVAR: op = JSOP_FORVAR; break;
+ case JSOP_GETGVAR: /* FALL THROUGH */
+ case JSOP_SETGVAR: op = JSOP_FORNAME; break;
+ case JSOP_GETLOCAL: /* FALL THROUGH */
+ case JSOP_SETLOCAL: op = JSOP_FORLOCAL; break;
+ default: JS_ASSERT(0);
+ }
+ } else {
+ pn3->pn_op = JSOP_FORNAME;
+ if (!BindNameToSlot(cx, &cg->treeContext, pn3, JS_FALSE))
+ return JS_FALSE;
+ op = pn3->pn_op;
+ }
+ if (pn3->pn_slot >= 0) {
+ if (pn3->pn_attrs & JSPROP_READONLY) {
+ JS_ASSERT(op == JSOP_FORVAR);
+ op = JSOP_GETVAR;
+ }
+ atomIndex = (jsatomid) pn3->pn_slot;
+ EMIT_UINT16_IMM_OP(op, atomIndex);
+ } else {
+ if (!EmitAtomOp(cx, pn3, op, cg))
+ return JS_FALSE;
+ }
+ break;
+
+ case TOK_DOT:
+ useful = JS_FALSE;
+ if (!CheckSideEffects(cx, &cg->treeContext, pn3->pn_expr,
+ &useful)) {
+ return JS_FALSE;
+ }
+ if (!useful) {
+ if (!EmitPropOp(cx, pn3, JSOP_FORPROP, cg))
+ return JS_FALSE;
+ break;
+ }
+ /* FALL THROUGH */
+
+#if JS_HAS_DESTRUCTURING
+ case TOK_RB:
+ case TOK_RC:
+ destructuring_for:
+#endif
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+#endif
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+#endif
+ case TOK_LB:
+ /*
+ * We separate the first/next bytecode from the enumerator
+ * variable binding to avoid any side-effects in the index
+ * expression (e.g., for (x[i++] in {}) should not bind x[i]
+ * or increment i at all).
+ */
+ emitIFEQ = JS_FALSE;
+ if (!js_Emit1(cx, cg, JSOP_FORELEM))
+ return JS_FALSE;
+
+ /*
+ * Emit a SRC_WHILE note with offset telling the distance to
+ * the loop-closing jump (we can't reckon from the branch at
+ * the top of the loop, because the loop-closing jump might
+ * need to be an extended jump, independent of whether the
+ * branch is short or long).
+ */
+ noteIndex = js_NewSrcNote(cx, cg, SRC_WHILE);
+ if (noteIndex < 0)
+ return JS_FALSE;
+ beq = EmitJump(cx, cg, JSOP_IFEQ, 0);
+ if (beq < 0)
+ return JS_FALSE;
+
+#if JS_HAS_DESTRUCTURING
+ if (pn3->pn_type == TOK_RB || pn3->pn_type == TOK_RC) {
+ if (!EmitDestructuringOps(cx, cg, op, pn3))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ break;
+ }
+#endif
+#if JS_HAS_LVALUE_RETURN
+ if (pn3->pn_type == TOK_LP) {
+ JS_ASSERT(pn3->pn_op == JSOP_SETCALL);
+ if (!js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+ if (!js_Emit1(cx, cg, JSOP_ENUMELEM))
+ return JS_FALSE;
+ break;
+ }
+#endif
+#if JS_HAS_XML_SUPPORT
+ if (pn3->pn_type == TOK_UNARYOP) {
+ JS_ASSERT(pn3->pn_op == JSOP_BINDXMLNAME);
+ if (!js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+ if (!js_Emit1(cx, cg, JSOP_ENUMELEM))
+ return JS_FALSE;
+ break;
+ }
+#endif
+
+ /* Now that we're safely past the IFEQ, commit side effects. */
+ if (!EmitElemOp(cx, pn3, JSOP_ENUMELEM, cg))
+ return JS_FALSE;
+ break;
+
+ default:
+ JS_ASSERT(0);
+ }
+
+ if (emitIFEQ) {
+ /* Annotate so the decompiler can find the loop-closing jump. */
+ noteIndex = js_NewSrcNote(cx, cg, SRC_WHILE);
+ if (noteIndex < 0)
+ return JS_FALSE;
+
+ /* Pop and test the loop condition generated by JSOP_FOR*. */
+ beq = EmitJump(cx, cg, JSOP_IFEQ, 0);
+ if (beq < 0)
+ return JS_FALSE;
+ }
+ } else {
+ op = JSOP_POP;
+ if (!pn2->pn_kid1) {
+ /* No initializer: emit an annotated nop for the decompiler. */
+ op = JSOP_NOP;
+ } else {
+ cg->treeContext.flags |= TCF_IN_FOR_INIT;
+#if JS_HAS_DESTRUCTURING
+ pn3 = pn2->pn_kid1;
+ if (pn3->pn_type == TOK_ASSIGN &&
+ !MaybeEmitGroupAssignment(cx, cg, op, pn3, &op)) {
+ return JS_FALSE;
+ }
+#endif
+ if (op == JSOP_POP) {
+ if (!js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+ if (TOKEN_TYPE_IS_DECL(pn3->pn_type)) {
+ /*
+ * Check whether a destructuring-initialized var decl
+ * was optimized to a group assignment. If so, we do
+ * not need to emit a pop below, so switch to a nop,
+ * just for the decompiler.
+ */
+ JS_ASSERT(pn3->pn_arity == PN_LIST);
+ if (pn3->pn_extra & PNX_GROUPINIT)
+ op = JSOP_NOP;
+ }
+ }
+ cg->treeContext.flags &= ~TCF_IN_FOR_INIT;
+ }
+ noteIndex = js_NewSrcNote(cx, cg, SRC_FOR);
+ if (noteIndex < 0 ||
+ js_Emit1(cx, cg, op) < 0) {
+ return JS_FALSE;
+ }
+
+ top = CG_OFFSET(cg);
+ SET_STATEMENT_TOP(&stmtInfo, top);
+ if (!pn2->pn_kid2) {
+ /* No loop condition: flag this fact in the source notes. */
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0, 0))
+ return JS_FALSE;
+ } else {
+ if (!js_EmitTree(cx, cg, pn2->pn_kid2))
+ return JS_FALSE;
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0,
+ CG_OFFSET(cg) - top)) {
+ return JS_FALSE;
+ }
+ beq = EmitJump(cx, cg, JSOP_IFEQ, 0);
+ if (beq < 0)
+ return JS_FALSE;
+ }
+
+ /* Set pn3 (used below) here to avoid spurious gcc warnings. */
+ pn3 = pn2->pn_kid3;
+ }
+
+ /* Emit code for the loop body. */
+ if (!js_EmitTree(cx, cg, pn->pn_right))
+ return JS_FALSE;
+
+ if (pn2->pn_type != TOK_IN) {
+ /* Set the second note offset so we can find the update part. */
+ JS_ASSERT(noteIndex != -1);
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 1,
+ CG_OFFSET(cg) - top)) {
+ return JS_FALSE;
+ }
+
+ if (pn3) {
+ /* Set loop and enclosing "update" offsets, for continue. */
+ stmt = &stmtInfo;
+ do {
+ stmt->update = CG_OFFSET(cg);
+ } while ((stmt = stmt->down) != NULL &&
+ stmt->type == STMT_LABEL);
+
+ op = JSOP_POP;
+#if JS_HAS_DESTRUCTURING
+ if (pn3->pn_type == TOK_ASSIGN &&
+ !MaybeEmitGroupAssignment(cx, cg, op, pn3, &op)) {
+ return JS_FALSE;
+ }
+#endif
+ if (op == JSOP_POP) {
+ if (!js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ }
+
+ /* Restore the absolute line number for source note readers. */
+ off = (ptrdiff_t) pn->pn_pos.end.lineno;
+ if (CG_CURRENT_LINE(cg) != (uintN) off) {
+ if (js_NewSrcNote2(cx, cg, SRC_SETLINE, off) < 0)
+ return JS_FALSE;
+ CG_CURRENT_LINE(cg) = (uintN) off;
+ }
+ }
+
+ /* The third note offset helps us find the loop-closing jump. */
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 2,
+ CG_OFFSET(cg) - top)) {
+ return JS_FALSE;
+ }
+ }
+
+ /* Emit the loop-closing jump and fixup all jump offsets. */
+ jmp = EmitJump(cx, cg, JSOP_GOTO, top - CG_OFFSET(cg));
+ if (jmp < 0)
+ return JS_FALSE;
+ if (beq > 0)
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, beq);
+ if (pn2->pn_type == TOK_IN) {
+ /* Set the SRC_WHILE note offset so we can find the closing jump. */
+ JS_ASSERT(noteIndex != -1);
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0, jmp - beq))
+ return JS_FALSE;
+ }
+
+ /* Now fixup all breaks and continues (before for/in's JSOP_ENDITER). */
+ if (!js_PopStatementCG(cx, cg))
+ return JS_FALSE;
+
+ if (pn2->pn_type == TOK_IN) {
+ if (js_Emit1(cx, cg, JSOP_ENDITER) < 0)
+ return JS_FALSE;
+ }
+ break;
+
+ case TOK_BREAK:
+ stmt = cg->treeContext.topStmt;
+ atom = pn->pn_atom;
+ if (atom) {
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ while (stmt->type != STMT_LABEL || stmt->atom != atom)
+ stmt = stmt->down;
+ noteType = SRC_BREAK2LABEL;
+ } else {
+ ale = NULL;
+ while (!STMT_IS_LOOP(stmt) && stmt->type != STMT_SWITCH)
+ stmt = stmt->down;
+ noteType = SRC_NULL;
+ }
+
+ if (EmitGoto(cx, cg, stmt, &stmt->breaks, ale, noteType) < 0)
+ return JS_FALSE;
+ break;
+
+ case TOK_CONTINUE:
+ stmt = cg->treeContext.topStmt;
+ atom = pn->pn_atom;
+ if (atom) {
+ /* Find the loop statement enclosed by the matching label. */
+ JSStmtInfo *loop = NULL;
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ while (stmt->type != STMT_LABEL || stmt->atom != atom) {
+ if (STMT_IS_LOOP(stmt))
+ loop = stmt;
+ stmt = stmt->down;
+ }
+ stmt = loop;
+ noteType = SRC_CONT2LABEL;
+ } else {
+ ale = NULL;
+ while (!STMT_IS_LOOP(stmt))
+ stmt = stmt->down;
+ noteType = SRC_CONTINUE;
+ }
+
+ if (EmitGoto(cx, cg, stmt, &stmt->continues, ale, noteType) < 0)
+ return JS_FALSE;
+ break;
+
+ case TOK_WITH:
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_WITH, CG_OFFSET(cg));
+ if (js_Emit1(cx, cg, JSOP_ENTERWITH) < 0)
+ return JS_FALSE;
+ if (!js_EmitTree(cx, cg, pn->pn_right))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_LEAVEWITH) < 0)
+ return JS_FALSE;
+ ok = js_PopStatementCG(cx, cg);
+ break;
+
+ case TOK_TRY:
+ {
+ ptrdiff_t start, end, catchJump, catchStart, finallyCatch;
+ intN depth;
+ JSParseNode *lastCatch;
+
+ catchJump = catchStart = finallyCatch = -1;
+
+ /*
+ * Push stmtInfo to track jumps-over-catches and gosubs-to-finally
+ * for later fixup.
+ *
+ * When a finally block is 'active' (STMT_FINALLY on the treeContext),
+ * non-local jumps (including jumps-over-catches) result in a GOSUB
+ * being written into the bytecode stream and fixed-up later (c.f.
+ * EmitBackPatchOp and BackPatch).
+ */
+ js_PushStatement(&cg->treeContext, &stmtInfo,
+ pn->pn_kid3 ? STMT_FINALLY : STMT_TRY,
+ CG_OFFSET(cg));
+
+ /*
+ * About JSOP_SETSP: an exception can be thrown while the stack is in
+ * an unbalanced state, and this imbalance causes problems with things
+ * like function invocation later on.
+ *
+ * To fix this, we compute the 'balanced' stack depth upon try entry,
+ * and then restore the stack to this depth when we hit the first catch
+ * or finally block. We can't just zero the stack, because things like
+ * for/in and with that are active upon entry to the block keep state
+ * variables on the stack.
+ */
+ depth = cg->stackDepth;
+
+ /* Mark try location for decompilation, then emit try block. */
+ if (js_Emit1(cx, cg, JSOP_TRY) < 0)
+ return JS_FALSE;
+ start = CG_OFFSET(cg);
+ if (!js_EmitTree(cx, cg, pn->pn_kid1))
+ return JS_FALSE;
+
+ /* GOSUB to finally, if present. */
+ if (pn->pn_kid3) {
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ jmp = EmitBackPatchOp(cx, cg, JSOP_BACKPATCH, &GOSUBS(stmtInfo));
+ if (jmp < 0)
+ return JS_FALSE;
+
+ /* JSOP_RETSUB pops the return pc-index, balancing the stack. */
+ cg->stackDepth = depth;
+ }
+
+ /* Emit (hidden) jump over catch and/or finally. */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ jmp = EmitBackPatchOp(cx, cg, JSOP_BACKPATCH, &catchJump);
+ if (jmp < 0)
+ return JS_FALSE;
+
+ end = CG_OFFSET(cg);
+
+ /* If this try has a catch block, emit it. */
+ pn2 = pn->pn_kid2;
+ lastCatch = NULL;
+ if (pn2) {
+ jsint count = 0; /* previous catch block's population */
+
+ catchStart = end;
+
+ /*
+ * The emitted code for a catch block looks like:
+ *
+ * [throwing] only if 2nd+ catch block
+ * [leaveblock] only if 2nd+ catch block
+ * enterblock with SRC_CATCH
+ * exception
+ * [dup] only if catchguard
+ * setlocalpop <slot> or destructuring code
+ * [< catchguard code >] if there's a catchguard
+ * [ifeq <offset to next catch block>] " "
+ * [pop] only if catchguard
+ * < catch block contents >
+ * leaveblock
+ * goto <end of catch blocks> non-local; finally applies
+ *
+ * If there's no catch block without a catchguard, the last
+ * <offset to next catch block> points to rethrow code. This
+ * code will [gosub] to the finally code if appropriate, and is
+ * also used for the catch-all trynote for capturing exceptions
+ * thrown from catch{} blocks.
+ */
+ for (pn3 = pn2->pn_head; pn3; pn3 = pn3->pn_next) {
+ ptrdiff_t guardJump, catchNote;
+
+ guardJump = GUARDJUMP(stmtInfo);
+ if (guardJump == -1) {
+ /* Set stack to original depth (see SETSP comment above). */
+ EMIT_UINT16_IMM_OP(JSOP_SETSP, (jsatomid)depth);
+ cg->stackDepth = depth;
+ } else {
+ /* Fix up and clean up previous catch block. */
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, guardJump);
+
+ /*
+ * Account for the pushed exception object that we still
+ * have after the jumping from the previous guard.
+ */
+ JS_ASSERT(cg->stackDepth == depth);
+ cg->stackDepth = depth + 1;
+
+ /*
+ * Move exception back to cx->exception to prepare for
+ * the next catch. We hide [throwing] from the decompiler
+ * since it compensates for the hidden JSOP_DUP at the
+ * start of the previous guarded catch.
+ */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0 ||
+ js_Emit1(cx, cg, JSOP_THROWING) < 0) {
+ return JS_FALSE;
+ }
+
+ /*
+ * Emit an unbalanced [leaveblock] for the previous catch,
+ * whose block object count is saved below.
+ */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ JS_ASSERT(count >= 0);
+ EMIT_UINT16_IMM_OP(JSOP_LEAVEBLOCK, count);
+ }
+
+ /*
+ * Annotate the JSOP_ENTERBLOCK that's about to be generated
+ * by the call to js_EmitTree immediately below. Save this
+ * source note's index in stmtInfo for use by the TOK_CATCH:
+ * case, where the length of the catch guard is set as the
+ * note's offset.
+ */
+ catchNote = js_NewSrcNote2(cx, cg, SRC_CATCH, 0);
+ if (catchNote < 0)
+ return JS_FALSE;
+ CATCHNOTE(stmtInfo) = catchNote;
+
+ /*
+ * Emit the lexical scope and catch body. Save the catch's
+ * block object population via count, for use when targeting
+ * guardJump at the next catch (the guard mismatch case).
+ */
+ JS_ASSERT(pn3->pn_type == TOK_LEXICALSCOPE);
+ count = OBJ_BLOCK_COUNT(cx, ATOM_TO_OBJECT(pn3->pn_atom));
+ if (!js_EmitTree(cx, cg, pn3))
+ return JS_FALSE;
+
+ /* gosub <finally>, if required */
+ if (pn->pn_kid3) {
+ jmp = EmitBackPatchOp(cx, cg, JSOP_BACKPATCH,
+ &GOSUBS(stmtInfo));
+ if (jmp < 0)
+ return JS_FALSE;
+ JS_ASSERT(cg->stackDepth == depth);
+ }
+
+ /*
+ * Jump over the remaining catch blocks. This will get fixed
+ * up to jump to after catch/finally.
+ */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0)
+ return JS_FALSE;
+ jmp = EmitBackPatchOp(cx, cg, JSOP_BACKPATCH, &catchJump);
+ if (jmp < 0)
+ return JS_FALSE;
+
+ /*
+ * Save a pointer to the last catch node to handle try-finally
+ * and try-catch(guard)-finally special cases.
+ */
+ lastCatch = pn3->pn_expr;
+ }
+ }
+
+ /*
+ * Last catch guard jumps to the rethrow code sequence if none of the
+ * guards match. Target guardJump at the beginning of the rethrow
+ * sequence, just in case a guard expression throws and leaves the
+ * stack unbalanced.
+ */
+ if (lastCatch && lastCatch->pn_kid2) {
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, GUARDJUMP(stmtInfo));
+
+ /* Sync the stack to take into account pushed exception. */
+ JS_ASSERT(cg->stackDepth == depth);
+ cg->stackDepth = depth + 1;
+
+ /*
+ * Rethrow the exception, delegating executing of finally if any
+ * to the exception handler.
+ */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0 ||
+ js_Emit1(cx, cg, JSOP_THROW) < 0) {
+ return JS_FALSE;
+ }
+ }
+
+ JS_ASSERT(cg->stackDepth == depth);
+
+ /* Emit finally handler if any. */
+ if (pn->pn_kid3) {
+ /*
+ * We emit [setsp][gosub] to call try-finally when an exception is
+ * thrown from try or try-catch blocks. The [gosub] and [retsub]
+ * opcodes will take care of stacking and rethrowing any exception
+ * pending across the finally.
+ */
+ finallyCatch = CG_OFFSET(cg);
+ EMIT_UINT16_IMM_OP(JSOP_SETSP, (jsatomid)depth);
+
+ jmp = EmitBackPatchOp(cx, cg, JSOP_BACKPATCH,
+ &GOSUBS(stmtInfo));
+ if (jmp < 0)
+ return JS_FALSE;
+
+ JS_ASSERT(cg->stackDepth == depth);
+ JS_ASSERT((uintN)depth <= cg->maxStackDepth);
+
+ /*
+ * Fix up the gosubs that might have been emitted before non-local
+ * jumps to the finally code.
+ */
+ if (!BackPatch(cx, cg, GOSUBS(stmtInfo), CG_NEXT(cg), JSOP_GOSUB))
+ return JS_FALSE;
+
+ /*
+ * The stack budget must be balanced at this point. All [gosub]
+ * calls emitted before this point will push two stack slots, one
+ * for the pending exception (or JSVAL_HOLE if there is no pending
+ * exception) and one for the [retsub] pc-index.
+ */
+ JS_ASSERT(cg->stackDepth == depth);
+ cg->stackDepth += 2;
+ if ((uintN)cg->stackDepth > cg->maxStackDepth)
+ cg->maxStackDepth = cg->stackDepth;
+
+ /* Now indicate that we're emitting a subroutine body. */
+ stmtInfo.type = STMT_SUBROUTINE;
+ if (!UpdateLineNumberNotes(cx, cg, pn->pn_kid3))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_FINALLY) < 0 ||
+ !js_EmitTree(cx, cg, pn->pn_kid3) ||
+ js_Emit1(cx, cg, JSOP_RETSUB) < 0) {
+ return JS_FALSE;
+ }
+
+ /* Restore stack depth budget to its balanced state. */
+ JS_ASSERT(cg->stackDepth == depth + 2);
+ cg->stackDepth = depth;
+ }
+ if (!js_PopStatementCG(cx, cg))
+ return JS_FALSE;
+
+ if (js_NewSrcNote(cx, cg, SRC_ENDBRACE) < 0 ||
+ js_Emit1(cx, cg, JSOP_NOP) < 0) {
+ return JS_FALSE;
+ }
+
+ /* Fix up the end-of-try/catch jumps to come here. */
+ if (!BackPatch(cx, cg, catchJump, CG_NEXT(cg), JSOP_GOTO))
+ return JS_FALSE;
+
+ /*
+ * Add the try note last, to let post-order give us the right ordering
+ * (first to last for a given nesting level, inner to outer by level).
+ */
+ if (pn->pn_kid2) {
+ JS_ASSERT(end != -1 && catchStart != -1);
+ if (!js_NewTryNote(cx, cg, start, end, catchStart))
+ return JS_FALSE;
+ }
+
+ /*
+ * If we've got a finally, mark try+catch region with additional
+ * trynote to catch exceptions (re)thrown from a catch block or
+ * for the try{}finally{} case.
+ */
+ if (pn->pn_kid3) {
+ JS_ASSERT(finallyCatch != -1);
+ if (!js_NewTryNote(cx, cg, start, finallyCatch, finallyCatch))
+ return JS_FALSE;
+ }
+ break;
+ }
+
+ case TOK_CATCH:
+ {
+ ptrdiff_t catchStart, guardJump;
+
+ /*
+ * Morph STMT_BLOCK to STMT_CATCH, note the block entry code offset,
+ * and save the block object atom.
+ */
+ stmt = cg->treeContext.topStmt;
+ JS_ASSERT(stmt->type == STMT_BLOCK && (stmt->flags & SIF_SCOPE));
+ stmt->type = STMT_CATCH;
+ catchStart = stmt->update;
+ atom = stmt->atom;
+
+ /* Go up one statement info record to the TRY or FINALLY record. */
+ stmt = stmt->down;
+ JS_ASSERT(stmt->type == STMT_TRY || stmt->type == STMT_FINALLY);
+
+ /* Pick up the pending exception and bind it to the catch variable. */
+ if (js_Emit1(cx, cg, JSOP_EXCEPTION) < 0)
+ return JS_FALSE;
+
+ /*
+ * Dup the exception object if there is a guard for rethrowing to use
+ * it later when rethrowing or in other catches.
+ */
+ if (pn->pn_kid2) {
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0 ||
+ js_Emit1(cx, cg, JSOP_DUP) < 0) {
+ return JS_FALSE;
+ }
+ }
+
+ pn2 = pn->pn_kid1;
+ switch (pn2->pn_type) {
+#if JS_HAS_DESTRUCTURING
+ case TOK_RB:
+ case TOK_RC:
+ if (!EmitDestructuringOps(cx, cg, JSOP_NOP, pn2))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ break;
+#endif
+
+ case TOK_NAME:
+ /* Inline BindNameToSlot, adding block depth to pn2->pn_slot. */
+ pn2->pn_slot += OBJ_BLOCK_DEPTH(cx, ATOM_TO_OBJECT(atom));
+ EMIT_UINT16_IMM_OP(JSOP_SETLOCALPOP, pn2->pn_slot);
+ break;
+
+ default:
+ JS_ASSERT(0);
+ }
+
+ /* Emit the guard expression, if there is one. */
+ if (pn->pn_kid2) {
+ if (!js_EmitTree(cx, cg, pn->pn_kid2))
+ return JS_FALSE;
+ if (!js_SetSrcNoteOffset(cx, cg, CATCHNOTE(*stmt), 0,
+ CG_OFFSET(cg) - catchStart)) {
+ return JS_FALSE;
+ }
+ /* ifeq <next block> */
+ guardJump = EmitJump(cx, cg, JSOP_IFEQ, 0);
+ if (guardJump < 0)
+ return JS_FALSE;
+ GUARDJUMP(*stmt) = guardJump;
+
+ /* Pop duplicated exception object as we no longer need it. */
+ if (js_NewSrcNote(cx, cg, SRC_HIDDEN) < 0 ||
+ js_Emit1(cx, cg, JSOP_POP) < 0) {
+ return JS_FALSE;
+ }
+ }
+
+ /* Emit the catch body. */
+ if (!js_EmitTree(cx, cg, pn->pn_kid3))
+ return JS_FALSE;
+
+ /*
+ * Annotate the JSOP_LEAVEBLOCK that will be emitted as we unwind via
+ * our TOK_LEXICALSCOPE parent, so the decompiler knows to pop.
+ */
+ off = cg->stackDepth;
+ if (js_NewSrcNote2(cx, cg, SRC_CATCH, off) < 0)
+ return JS_FALSE;
+ break;
+ }
+
+ case TOK_VAR:
+ if (!EmitVariables(cx, cg, pn, JS_FALSE, &noteIndex))
+ return JS_FALSE;
+ break;
+
+ case TOK_RETURN:
+ /* Push a return value */
+ pn2 = pn->pn_kid;
+ if (pn2) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ } else {
+ if (js_Emit1(cx, cg, JSOP_PUSH) < 0)
+ return JS_FALSE;
+ }
+
+ /*
+ * EmitNonLocalJumpFixup mutates op to JSOP_RETRVAL after emitting a
+ * JSOP_SETRVAL if there are open try blocks having finally clauses.
+ * We can't simply transfer control flow to our caller in that case,
+ * because we must gosub to those clauses from inner to outer, with
+ * the correct stack pointer (i.e., after popping any with, for/in,
+ * etc., slots nested inside the finally's try).
+ */
+ op = JSOP_RETURN;
+ if (!EmitNonLocalJumpFixup(cx, cg, NULL, &op))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ break;
+
+#if JS_HAS_GENERATORS
+ case TOK_YIELD:
+ if (pn->pn_kid) {
+ if (!js_EmitTree(cx, cg, pn->pn_kid))
+ return JS_FALSE;
+ } else {
+ if (js_Emit1(cx, cg, JSOP_PUSH) < 0)
+ return JS_FALSE;
+ }
+ if (js_Emit1(cx, cg, JSOP_YIELD) < 0)
+ return JS_FALSE;
+ break;
+#endif
+
+ case TOK_LC:
+#if JS_HAS_XML_SUPPORT
+ if (pn->pn_arity == PN_UNARY) {
+ if (!js_EmitTree(cx, cg, pn->pn_kid))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, pn->pn_op) < 0)
+ return JS_FALSE;
+ break;
+ }
+#endif
+
+ JS_ASSERT(pn->pn_arity == PN_LIST);
+
+ noteIndex = -1;
+ tmp = CG_OFFSET(cg);
+ if (pn->pn_extra & PNX_NEEDBRACES) {
+ noteIndex = js_NewSrcNote2(cx, cg, SRC_BRACE, 0);
+ if (noteIndex < 0 || js_Emit1(cx, cg, JSOP_NOP) < 0)
+ return JS_FALSE;
+ }
+
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_BLOCK, top);
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ }
+
+ if (noteIndex >= 0 &&
+ !js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0,
+ CG_OFFSET(cg) - tmp)) {
+ return JS_FALSE;
+ }
+
+ ok = js_PopStatementCG(cx, cg);
+ break;
+
+ case TOK_BODY:
+ JS_ASSERT(pn->pn_arity == PN_LIST);
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_BODY, top);
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ }
+ ok = js_PopStatementCG(cx, cg);
+ break;
+
+ case TOK_SEMI:
+ pn2 = pn->pn_kid;
+ if (pn2) {
+ /*
+ * Top-level or called-from-a-native JS_Execute/EvaluateScript,
+ * debugger, and eval frames may need the value of the ultimate
+ * expression statement as the script's result, despite the fact
+ * that it appears useless to the compiler.
+ */
+ useful = wantval = !cx->fp->fun ||
+ !FUN_INTERPRETED(cx->fp->fun) ||
+ (cx->fp->flags & JSFRAME_SPECIAL);
+ if (!useful) {
+ if (!CheckSideEffects(cx, &cg->treeContext, pn2, &useful))
+ return JS_FALSE;
+ }
+
+ /*
+ * Don't eliminate apparently useless expressions if they are
+ * labeled expression statements. The tc->topStmt->update test
+ * catches the case where we are nesting in js_EmitTree for a
+ * labeled compound statement.
+ */
+ if (!useful &&
+ (!cg->treeContext.topStmt ||
+ cg->treeContext.topStmt->type != STMT_LABEL ||
+ cg->treeContext.topStmt->update < CG_OFFSET(cg))) {
+ CG_CURRENT_LINE(cg) = pn2->pn_pos.begin.lineno;
+ if (!js_ReportCompileErrorNumber(cx, cg,
+ JSREPORT_CG |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_USELESS_EXPR)) {
+ return JS_FALSE;
+ }
+ } else {
+ op = wantval ? JSOP_POPV : JSOP_POP;
+#if JS_HAS_DESTRUCTURING
+ if (!wantval &&
+ pn2->pn_type == TOK_ASSIGN &&
+ !MaybeEmitGroupAssignment(cx, cg, op, pn2, &op)) {
+ return JS_FALSE;
+ }
+#endif
+ if (op != JSOP_NOP) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ }
+ }
+ }
+ break;
+
+ case TOK_COLON:
+ /* Emit an annotated nop so we know to decompile a label. */
+ atom = pn->pn_atom;
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ pn2 = pn->pn_expr;
+ noteType = (pn2->pn_type == TOK_LC ||
+ (pn2->pn_type == TOK_LEXICALSCOPE &&
+ pn2->pn_expr->pn_type == TOK_LC))
+ ? SRC_LABELBRACE
+ : SRC_LABEL;
+ noteIndex = js_NewSrcNote2(cx, cg, noteType,
+ (ptrdiff_t) ALE_INDEX(ale));
+ if (noteIndex < 0 ||
+ js_Emit1(cx, cg, JSOP_NOP) < 0) {
+ return JS_FALSE;
+ }
+
+ /* Emit code for the labeled statement. */
+ js_PushStatement(&cg->treeContext, &stmtInfo, STMT_LABEL,
+ CG_OFFSET(cg));
+ stmtInfo.atom = atom;
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if (!js_PopStatementCG(cx, cg))
+ return JS_FALSE;
+
+ /* If the statement was compound, emit a note for the end brace. */
+ if (noteType == SRC_LABELBRACE) {
+ if (js_NewSrcNote(cx, cg, SRC_ENDBRACE) < 0 ||
+ js_Emit1(cx, cg, JSOP_NOP) < 0) {
+ return JS_FALSE;
+ }
+ }
+ break;
+
+ case TOK_COMMA:
+ /*
+ * Emit SRC_PCDELTA notes on each JSOP_POP between comma operands.
+ * These notes help the decompiler bracket the bytecodes generated
+ * from each sub-expression that follows a comma.
+ */
+ off = noteIndex = -1;
+ for (pn2 = pn->pn_head; ; pn2 = pn2->pn_next) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ tmp = CG_OFFSET(cg);
+ if (noteIndex >= 0) {
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0, tmp-off))
+ return JS_FALSE;
+ }
+ if (!pn2->pn_next)
+ break;
+ off = tmp;
+ noteIndex = js_NewSrcNote2(cx, cg, SRC_PCDELTA, 0);
+ if (noteIndex < 0 ||
+ js_Emit1(cx, cg, JSOP_POP) < 0) {
+ return JS_FALSE;
+ }
+ }
+ break;
+
+ case TOK_ASSIGN:
+ /*
+ * Check left operand type and generate specialized code for it.
+ * Specialize to avoid ECMA "reference type" values on the operand
+ * stack, which impose pervasive runtime "GetValue" costs.
+ */
+ pn2 = pn->pn_left;
+ JS_ASSERT(pn2->pn_type != TOK_RP);
+ atomIndex = (jsatomid) -1;
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ if (!BindNameToSlot(cx, &cg->treeContext, pn2, JS_FALSE))
+ return JS_FALSE;
+ if (pn2->pn_slot >= 0) {
+ atomIndex = (jsatomid) pn2->pn_slot;
+ } else {
+ ale = js_IndexAtom(cx, pn2->pn_atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ atomIndex = ALE_INDEX(ale);
+ EMIT_ATOM_INDEX_OP(JSOP_BINDNAME, atomIndex);
+ }
+ break;
+ case TOK_DOT:
+ if (!js_EmitTree(cx, cg, pn2->pn_expr))
+ return JS_FALSE;
+ ale = js_IndexAtom(cx, pn2->pn_atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ atomIndex = ALE_INDEX(ale);
+ break;
+ case TOK_LB:
+ JS_ASSERT(pn2->pn_arity == PN_BINARY);
+ if (!js_EmitTree(cx, cg, pn2->pn_left))
+ return JS_FALSE;
+ if (!js_EmitTree(cx, cg, pn2->pn_right))
+ return JS_FALSE;
+ break;
+#if JS_HAS_DESTRUCTURING
+ case TOK_RB:
+ case TOK_RC:
+ break;
+#endif
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ break;
+#endif
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+ JS_ASSERT(pn2->pn_op == JSOP_SETXMLNAME);
+ if (!js_EmitTree(cx, cg, pn2->pn_kid))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_BINDXMLNAME) < 0)
+ return JS_FALSE;
+ break;
+#endif
+ default:
+ JS_ASSERT(0);
+ }
+
+ op = pn->pn_op;
+#if JS_HAS_GETTER_SETTER
+ if (op == JSOP_GETTER || op == JSOP_SETTER) {
+ /* We'll emit these prefix bytecodes after emitting the r.h.s. */
+ if (atomIndex != (jsatomid) -1 && atomIndex >= JS_BIT(16)) {
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+ } else
+#endif
+ /* If += or similar, dup the left operand and get its value. */
+ if (op != JSOP_NOP) {
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ if (pn2->pn_op != JSOP_SETNAME) {
+ EMIT_UINT16_IMM_OP((pn2->pn_op == JSOP_SETGVAR)
+ ? JSOP_GETGVAR
+ : (pn2->pn_op == JSOP_SETARG)
+ ? JSOP_GETARG
+ : (pn2->pn_op == JSOP_SETLOCAL)
+ ? JSOP_GETLOCAL
+ : JSOP_GETVAR,
+ atomIndex);
+ break;
+ }
+ /* FALL THROUGH */
+ case TOK_DOT:
+ if (js_Emit1(cx, cg, JSOP_DUP) < 0)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP((pn2->pn_type == TOK_NAME)
+ ? JSOP_GETXPROP
+ : JSOP_GETPROP,
+ atomIndex);
+ break;
+ case TOK_LB:
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+#endif
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+#endif
+ if (js_Emit1(cx, cg, JSOP_DUP2) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_GETELEM) < 0)
+ return JS_FALSE;
+ break;
+ default:;
+ }
+ }
+
+ /* Now emit the right operand (it may affect the namespace). */
+ if (!js_EmitTree(cx, cg, pn->pn_right))
+ return JS_FALSE;
+
+ /* If += etc., emit the binary operator with a decompiler note. */
+ if (op != JSOP_NOP) {
+ /*
+ * Take care to avoid SRC_ASSIGNOP if the left-hand side is a
+ * const declared in a function (i.e., with non-negative pn_slot
+ * and JSPROP_READONLY in pn_attrs), as in this case (just a bit
+ * further below) we will avoid emitting the assignment op.
+ */
+ if (pn2->pn_type != TOK_NAME ||
+ pn2->pn_slot < 0 ||
+ !(pn2->pn_attrs & JSPROP_READONLY)) {
+ if (js_NewSrcNote(cx, cg, SRC_ASSIGNOP) < 0)
+ return JS_FALSE;
+ }
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ }
+
+ /* Left parts such as a.b.c and a[b].c need a decompiler note. */
+ if (pn2->pn_type != TOK_NAME &&
+#if JS_HAS_DESTRUCTURING
+ pn2->pn_type != TOK_RB &&
+ pn2->pn_type != TOK_RC &&
+#endif
+ js_NewSrcNote2(cx, cg, SrcNoteForPropOp(pn2, pn2->pn_op),
+ CG_OFFSET(cg) - top) < 0) {
+ return JS_FALSE;
+ }
+
+ /* Finally, emit the specialized assignment bytecode. */
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ if (pn2->pn_slot < 0 || !(pn2->pn_attrs & JSPROP_READONLY)) {
+ if (pn2->pn_slot >= 0) {
+ EMIT_UINT16_IMM_OP(pn2->pn_op, atomIndex);
+ } else {
+ case TOK_DOT:
+ EMIT_ATOM_INDEX_OP(pn2->pn_op, atomIndex);
+ }
+ }
+ break;
+ case TOK_LB:
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+#endif
+ if (js_Emit1(cx, cg, JSOP_SETELEM) < 0)
+ return JS_FALSE;
+ break;
+#if JS_HAS_DESTRUCTURING
+ case TOK_RB:
+ case TOK_RC:
+ if (!EmitDestructuringOps(cx, cg, JSOP_SETNAME, pn2))
+ return JS_FALSE;
+ break;
+#endif
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+ if (js_Emit1(cx, cg, JSOP_SETXMLNAME) < 0)
+ return JS_FALSE;
+ break;
+#endif
+ default:
+ JS_ASSERT(0);
+ }
+ break;
+
+ case TOK_HOOK:
+ /* Emit the condition, then branch if false to the else part. */
+ if (!js_EmitTree(cx, cg, pn->pn_kid1))
+ return JS_FALSE;
+ noteIndex = js_NewSrcNote(cx, cg, SRC_COND);
+ if (noteIndex < 0)
+ return JS_FALSE;
+ beq = EmitJump(cx, cg, JSOP_IFEQ, 0);
+ if (beq < 0 || !js_EmitTree(cx, cg, pn->pn_kid2))
+ return JS_FALSE;
+
+ /* Jump around else, fixup the branch, emit else, fixup jump. */
+ jmp = EmitJump(cx, cg, JSOP_GOTO, 0);
+ if (jmp < 0)
+ return JS_FALSE;
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, beq);
+
+ /*
+ * Because each branch pushes a single value, but our stack budgeting
+ * analysis ignores branches, we now have to adjust cg->stackDepth to
+ * ignore the value pushed by the first branch. Execution will follow
+ * only one path, so we must decrement cg->stackDepth.
+ *
+ * Failing to do this will foil code, such as the try/catch/finally
+ * exception handling code generator, that samples cg->stackDepth for
+ * use at runtime (JSOP_SETSP), or in let expression and block code
+ * generation, which must use the stack depth to compute local stack
+ * indexes correctly.
+ */
+ JS_ASSERT(cg->stackDepth > 0);
+ cg->stackDepth--;
+ if (!js_EmitTree(cx, cg, pn->pn_kid3))
+ return JS_FALSE;
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, jmp);
+ if (!js_SetSrcNoteOffset(cx, cg, noteIndex, 0, jmp - beq))
+ return JS_FALSE;
+ break;
+
+ case TOK_OR:
+ case TOK_AND:
+ /*
+ * JSOP_OR converts the operand on the stack to boolean, and if true,
+ * leaves the original operand value on the stack and jumps; otherwise
+ * it pops and falls into the next bytecode, which evaluates the right
+ * operand. The jump goes around the right operand evaluation.
+ *
+ * JSOP_AND converts the operand on the stack to boolean, and if false,
+ * leaves the original operand value on the stack and jumps; otherwise
+ * it pops and falls into the right operand's bytecode.
+ *
+ * Avoid tail recursion for long ||...|| expressions and long &&...&&
+ * expressions or long mixtures of ||'s and &&'s that can easily blow
+ * the stack, by forward-linking and then backpatching all the JSOP_OR
+ * and JSOP_AND bytecodes' immediate jump-offset operands.
+ */
+ pn3 = pn;
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+ top = EmitJump(cx, cg, JSOP_BACKPATCH_POP, 0);
+ if (top < 0)
+ return JS_FALSE;
+ jmp = top;
+ pn2 = pn->pn_right;
+ while (pn2->pn_type == TOK_OR || pn2->pn_type == TOK_AND) {
+ pn = pn2;
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+ off = EmitJump(cx, cg, JSOP_BACKPATCH_POP, 0);
+ if (off < 0)
+ return JS_FALSE;
+ if (!SetBackPatchDelta(cx, cg, CG_CODE(cg, jmp), off - jmp))
+ return JS_FALSE;
+ jmp = off;
+ pn2 = pn->pn_right;
+ }
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ off = CG_OFFSET(cg);
+ do {
+ pc = CG_CODE(cg, top);
+ tmp = GetJumpOffset(cg, pc);
+ CHECK_AND_SET_JUMP_OFFSET(cx, cg, pc, off - top);
+ *pc = pn3->pn_op;
+ top += tmp;
+ } while ((pn3 = pn3->pn_right) != pn2);
+ break;
+
+ case TOK_BITOR:
+ case TOK_BITXOR:
+ case TOK_BITAND:
+ case TOK_EQOP:
+ case TOK_RELOP:
+ case TOK_IN:
+ case TOK_INSTANCEOF:
+ case TOK_SHOP:
+ case TOK_PLUS:
+ case TOK_MINUS:
+ case TOK_STAR:
+ case TOK_DIVOP:
+ if (pn->pn_arity == PN_LIST) {
+ /* Left-associative operator chain: avoid too much recursion. */
+ pn2 = pn->pn_head;
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ op = pn->pn_op;
+ while ((pn2 = pn2->pn_next) != NULL) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ }
+ } else {
+#if JS_HAS_XML_SUPPORT
+ uintN oldflags;
+
+ case TOK_DBLCOLON:
+ if (pn->pn_arity == PN_NAME) {
+ if (!js_EmitTree(cx, cg, pn->pn_expr))
+ return JS_FALSE;
+ if (!EmitAtomOp(cx, pn, pn->pn_op, cg))
+ return JS_FALSE;
+ break;
+ }
+
+ /*
+ * Binary :: has a right operand that brackets arbitrary code,
+ * possibly including a let (a = b) ... expression. We must clear
+ * TCF_IN_FOR_INIT to avoid mis-compiling such beasts.
+ */
+ oldflags = cg->treeContext.flags;
+ cg->treeContext.flags &= ~TCF_IN_FOR_INIT;
+#endif
+
+ /* Binary operators that evaluate both operands unconditionally. */
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+ if (!js_EmitTree(cx, cg, pn->pn_right))
+ return JS_FALSE;
+#if JS_HAS_XML_SUPPORT
+ cg->treeContext.flags |= oldflags & TCF_IN_FOR_INIT;
+#endif
+ if (js_Emit1(cx, cg, pn->pn_op) < 0)
+ return JS_FALSE;
+ }
+ break;
+
+ case TOK_THROW:
+#if JS_HAS_XML_SUPPORT
+ case TOK_AT:
+ case TOK_DEFAULT:
+ JS_ASSERT(pn->pn_arity == PN_UNARY);
+ /* FALL THROUGH */
+#endif
+ case TOK_UNARYOP:
+ {
+ uintN oldflags;
+
+ /* Unary op, including unary +/-. */
+ pn2 = pn->pn_kid;
+ op = pn->pn_op;
+ if (op == JSOP_TYPEOF) {
+ for (pn3 = pn2; pn3->pn_type == TOK_RP; pn3 = pn3->pn_kid)
+ continue;
+ if (pn3->pn_type != TOK_NAME)
+ op = JSOP_TYPEOFEXPR;
+ }
+ oldflags = cg->treeContext.flags;
+ cg->treeContext.flags &= ~TCF_IN_FOR_INIT;
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ cg->treeContext.flags |= oldflags & TCF_IN_FOR_INIT;
+#if JS_HAS_XML_SUPPORT
+ if (op == JSOP_XMLNAME &&
+ js_NewSrcNote2(cx, cg, SRC_PCBASE,
+ CG_OFFSET(cg) - pn2->pn_offset) < 0) {
+ return JS_FALSE;
+ }
+#endif
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ break;
+ }
+
+ case TOK_INC:
+ case TOK_DEC:
+ {
+ intN depth;
+
+ /* Emit lvalue-specialized code for ++/-- operators. */
+ pn2 = pn->pn_kid;
+ JS_ASSERT(pn2->pn_type != TOK_RP);
+ op = pn->pn_op;
+ depth = cg->stackDepth;
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ pn2->pn_op = op;
+ if (!BindNameToSlot(cx, &cg->treeContext, pn2, JS_FALSE))
+ return JS_FALSE;
+ op = pn2->pn_op;
+ if (pn2->pn_slot >= 0) {
+ if (pn2->pn_attrs & JSPROP_READONLY) {
+ /* Incrementing a declared const: just get its value. */
+ op = ((js_CodeSpec[op].format & JOF_TYPEMASK) == JOF_CONST)
+ ? JSOP_GETGVAR
+ : JSOP_GETVAR;
+ }
+ atomIndex = (jsatomid) pn2->pn_slot;
+ EMIT_UINT16_IMM_OP(op, atomIndex);
+ } else {
+ if (!EmitAtomOp(cx, pn2, op, cg))
+ return JS_FALSE;
+ }
+ break;
+ case TOK_DOT:
+ if (!EmitPropOp(cx, pn2, op, cg))
+ return JS_FALSE;
+ ++depth;
+ break;
+ case TOK_LB:
+ if (!EmitElemOp(cx, pn2, op, cg))
+ return JS_FALSE;
+ depth += 2;
+ break;
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ depth = cg->stackDepth;
+ if (js_NewSrcNote2(cx, cg, SRC_PCBASE,
+ CG_OFFSET(cg) - pn2->pn_offset) < 0) {
+ return JS_FALSE;
+ }
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ break;
+#endif
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+ JS_ASSERT(pn2->pn_op == JSOP_SETXMLNAME);
+ if (!js_EmitTree(cx, cg, pn2->pn_kid))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_BINDXMLNAME) < 0)
+ return JS_FALSE;
+ depth = cg->stackDepth;
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ break;
+#endif
+ default:
+ JS_ASSERT(0);
+ }
+
+ /*
+ * Allocate another stack slot for GC protection in case the initial
+ * value being post-incremented or -decremented is not a number, but
+ * converts to a jsdouble. In the TOK_NAME cases, op has 0 operand
+ * uses and 1 definition, so we don't need an extra stack slot -- we
+ * can use the one allocated for the def.
+ */
+ if (pn2->pn_type != TOK_NAME &&
+ (js_CodeSpec[op].format & JOF_POST) &&
+ (uintN)depth == cg->maxStackDepth) {
+ ++cg->maxStackDepth;
+ }
+ break;
+ }
+
+ case TOK_DELETE:
+ /*
+ * Under ECMA 3, deleting a non-reference returns true -- but alas we
+ * must evaluate the operand if it appears it might have side effects.
+ */
+ pn2 = pn->pn_kid;
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ pn2->pn_op = JSOP_DELNAME;
+ if (!BindNameToSlot(cx, &cg->treeContext, pn2, JS_FALSE))
+ return JS_FALSE;
+ op = pn2->pn_op;
+ if (op == JSOP_FALSE) {
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ } else {
+ if (!EmitAtomOp(cx, pn2, op, cg))
+ return JS_FALSE;
+ }
+ break;
+ case TOK_DOT:
+ if (!EmitPropOp(cx, pn2, JSOP_DELPROP, cg))
+ return JS_FALSE;
+ break;
+#if JS_HAS_XML_SUPPORT
+ case TOK_DBLDOT:
+ if (!EmitElemOp(cx, pn2, JSOP_DELDESC, cg))
+ return JS_FALSE;
+ break;
+#endif
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+ if (pn2->pn_op != JSOP_SETCALL) {
+ JS_ASSERT(pn2->pn_op == JSOP_CALL || pn2->pn_op == JSOP_EVAL);
+ pn2->pn_op = JSOP_SETCALL;
+ }
+ top = CG_OFFSET(cg);
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if (js_NewSrcNote2(cx, cg, SRC_PCBASE, CG_OFFSET(cg) - top) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_DELELEM) < 0)
+ return JS_FALSE;
+ break;
+#endif
+ case TOK_LB:
+ if (!EmitElemOp(cx, pn2, JSOP_DELELEM, cg))
+ return JS_FALSE;
+ break;
+ default:
+ /*
+ * If useless, just emit JSOP_TRUE; otherwise convert delete foo()
+ * to foo(), true (a comma expression, requiring SRC_PCDELTA).
+ */
+ useful = JS_FALSE;
+ if (!CheckSideEffects(cx, &cg->treeContext, pn2, &useful))
+ return JS_FALSE;
+ if (!useful) {
+ off = noteIndex = -1;
+ } else {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ off = CG_OFFSET(cg);
+ noteIndex = js_NewSrcNote2(cx, cg, SRC_PCDELTA, 0);
+ if (noteIndex < 0 || js_Emit1(cx, cg, JSOP_POP) < 0)
+ return JS_FALSE;
+ }
+ if (js_Emit1(cx, cg, JSOP_TRUE) < 0)
+ return JS_FALSE;
+ if (noteIndex >= 0) {
+ tmp = CG_OFFSET(cg);
+ if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0, tmp-off))
+ return JS_FALSE;
+ }
+ }
+ break;
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_FILTER:
+ if (!js_EmitTree(cx, cg, pn->pn_left))
+ return JS_FALSE;
+ jmp = js_Emit3(cx, cg, JSOP_FILTER, 0, 0);
+ if (jmp < 0)
+ return JS_FALSE;
+ if (!js_EmitTree(cx, cg, pn->pn_right))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_ENDFILTER) < 0)
+ return JS_FALSE;
+ CHECK_AND_SET_JUMP_OFFSET_AT(cx, cg, jmp);
+ break;
+#endif
+
+ case TOK_DOT:
+ /*
+ * Pop a stack operand, convert it to object, get a property named by
+ * this bytecode's immediate-indexed atom operand, and push its value
+ * (not a reference to it). This bytecode sets the virtual machine's
+ * "obj" register to the left operand's ToObject conversion result,
+ * for use by JSOP_PUSHOBJ.
+ */
+ ok = EmitPropOp(cx, pn, pn->pn_op, cg);
+ break;
+
+ case TOK_LB:
+#if JS_HAS_XML_SUPPORT
+ case TOK_DBLDOT:
+#endif
+ /*
+ * Pop two operands, convert the left one to object and the right one
+ * to property name (atom or tagged int), get the named property, and
+ * push its value. Set the "obj" register to the result of ToObject
+ * on the left operand.
+ */
+ ok = EmitElemOp(cx, pn, pn->pn_op, cg);
+ break;
+
+ case TOK_NEW:
+ case TOK_LP:
+ {
+ uintN oldflags;
+
+ /*
+ * Emit function call or operator new (constructor call) code.
+ * First, emit code for the left operand to evaluate the callable or
+ * constructable object expression.
+ *
+ * For E4X, if this expression is a dotted member reference, select
+ * JSOP_GETMETHOD instead of JSOP_GETPROP. ECMA-357 separates XML
+ * method lookup from the normal property id lookup done for native
+ * objects.
+ */
+ pn2 = pn->pn_head;
+#if JS_HAS_XML_SUPPORT
+ if (pn2->pn_type == TOK_DOT && pn2->pn_op != JSOP_GETMETHOD) {
+ JS_ASSERT(pn2->pn_op == JSOP_GETPROP);
+ pn2->pn_op = JSOP_GETMETHOD;
+ pn2->pn_attrs |= JSPROP_IMPLICIT_FUNCTION_NAMESPACE;
+ }
+#endif
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+
+ /*
+ * Push the virtual machine's "obj" register, which was set by a
+ * name, property, or element get (or set) bytecode.
+ */
+ if (js_Emit1(cx, cg, JSOP_PUSHOBJ) < 0)
+ return JS_FALSE;
+
+ /* Remember start of callable-object bytecode for decompilation hint. */
+ off = top;
+
+ /*
+ * Emit code for each argument in order, then emit the JSOP_*CALL or
+ * JSOP_NEW bytecode with a two-byte immediate telling how many args
+ * were pushed on the operand stack.
+ */
+ oldflags = cg->treeContext.flags;
+ cg->treeContext.flags &= ~TCF_IN_FOR_INIT;
+ for (pn2 = pn2->pn_next; pn2; pn2 = pn2->pn_next) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ }
+ cg->treeContext.flags |= oldflags & TCF_IN_FOR_INIT;
+ if (js_NewSrcNote2(cx, cg, SRC_PCBASE, CG_OFFSET(cg) - off) < 0)
+ return JS_FALSE;
+
+ argc = pn->pn_count - 1;
+ if (js_Emit3(cx, cg, pn->pn_op, ARGC_HI(argc), ARGC_LO(argc)) < 0)
+ return JS_FALSE;
+ break;
+ }
+
+ case TOK_LEXICALSCOPE:
+ {
+ JSObject *obj;
+ jsint count;
+
+ atom = pn->pn_atom;
+ obj = ATOM_TO_OBJECT(atom);
+ js_PushBlockScope(&cg->treeContext, &stmtInfo, atom, CG_OFFSET(cg));
+
+ OBJ_SET_BLOCK_DEPTH(cx, obj, cg->stackDepth);
+ count = OBJ_BLOCK_COUNT(cx, obj);
+ cg->stackDepth += count;
+ if ((uintN)cg->stackDepth > cg->maxStackDepth)
+ cg->maxStackDepth = cg->stackDepth;
+
+ /*
+ * If this lexical scope is not for a catch block, let block or let
+ * expression, or any kind of for loop (where the scope starts in the
+ * head after the first part if for (;;), else in the body if for-in);
+ * and if our container is top-level but not a function body, or else
+ * a block statement; then emit a SRC_BRACE note. All other container
+ * statements get braces by default from the decompiler.
+ */
+ noteIndex = -1;
+ type = pn->pn_expr->pn_type;
+ if (type != TOK_CATCH && type != TOK_LET && type != TOK_FOR &&
+ (!(stmt = stmtInfo.down)
+ ? !(cg->treeContext.flags & TCF_IN_FUNCTION)
+ : stmt->type == STMT_BLOCK)) {
+#if defined DEBUG_brendan || defined DEBUG_mrbkap
+ /* There must be no source note already output for the next op. */
+ JS_ASSERT(CG_NOTE_COUNT(cg) == 0 ||
+ CG_LAST_NOTE_OFFSET(cg) != CG_OFFSET(cg) ||
+ !GettableNoteForNextOp(cg));
+#endif
+ noteIndex = js_NewSrcNote2(cx, cg, SRC_BRACE, 0);
+ if (noteIndex < 0)
+ return JS_FALSE;
+ }
+
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ JS_ASSERT(CG_OFFSET(cg) == top);
+ EMIT_ATOM_INDEX_OP(JSOP_ENTERBLOCK, ALE_INDEX(ale));
+
+ if (!js_EmitTree(cx, cg, pn->pn_expr))
+ return JS_FALSE;
+
+ op = pn->pn_op;
+ if (op == JSOP_LEAVEBLOCKEXPR) {
+ if (js_NewSrcNote2(cx, cg, SRC_PCBASE, CG_OFFSET(cg) - top) < 0)
+ return JS_FALSE;
+ } else {
+ if (noteIndex >= 0 &&
+ !js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0,
+ CG_OFFSET(cg) - top)) {
+ return JS_FALSE;
+ }
+ }
+
+ /* Emit the JSOP_LEAVEBLOCK or JSOP_LEAVEBLOCKEXPR opcode. */
+ EMIT_UINT16_IMM_OP(op, count);
+ cg->stackDepth -= count;
+
+ ok = js_PopStatementCG(cx, cg);
+ break;
+ }
+
+#if JS_HAS_BLOCK_SCOPE
+ case TOK_LET:
+ /* Let statements have their variable declarations on the left. */
+ if (pn->pn_arity == PN_BINARY) {
+ pn2 = pn->pn_right;
+ pn = pn->pn_left;
+ } else {
+ pn2 = NULL;
+ }
+
+ /* Non-null pn2 means that pn is the variable list from a let head. */
+ JS_ASSERT(pn->pn_arity == PN_LIST);
+ if (!EmitVariables(cx, cg, pn, pn2 != NULL, &noteIndex))
+ return JS_FALSE;
+
+ /* Thus non-null pn2 is the body of the let block or expression. */
+ tmp = CG_OFFSET(cg);
+ if (pn2 && !js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+
+ if (noteIndex >= 0 &&
+ !js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0,
+ CG_OFFSET(cg) - tmp)) {
+ return JS_FALSE;
+ }
+ break;
+#endif /* JS_HAS_BLOCK_SCOPE */
+
+#if JS_HAS_GENERATORS
+ case TOK_ARRAYPUSH:
+ /*
+ * The array object's stack index is in cg->arrayCompSlot. See below
+ * under the array initialiser code generator for array comprehension
+ * special casing.
+ */
+ if (!js_EmitTree(cx, cg, pn->pn_kid))
+ return JS_FALSE;
+ EMIT_UINT16_IMM_OP(pn->pn_op, cg->arrayCompSlot);
+ break;
+#endif
+
+ case TOK_RB:
+#if JS_HAS_GENERATORS
+ case TOK_ARRAYCOMP:
+#endif
+ /*
+ * Emit code for [a, b, c] of the form:
+ * t = new Array; t[0] = a; t[1] = b; t[2] = c; t;
+ * but use a stack slot for t and avoid dup'ing and popping it via
+ * the JSOP_NEWINIT and JSOP_INITELEM bytecodes.
+ */
+ ale = js_IndexAtom(cx, CLASS_ATOM(cx, Array), &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(JSOP_NAME, ALE_INDEX(ale));
+ if (js_Emit1(cx, cg, JSOP_PUSHOBJ) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_NEWINIT) < 0)
+ return JS_FALSE;
+
+ pn2 = pn->pn_head;
+#if JS_HAS_SHARP_VARS
+ if (pn2 && pn2->pn_type == TOK_DEFSHARP) {
+ EMIT_UINT16_IMM_OP(JSOP_DEFSHARP, (jsatomid)pn2->pn_num);
+ pn2 = pn2->pn_next;
+ }
+#endif
+
+#if JS_HAS_GENERATORS
+ if (pn->pn_type == TOK_ARRAYCOMP) {
+ uintN saveSlot;
+
+ /*
+ * Pass the new array's stack index to the TOK_ARRAYPUSH case by
+ * storing it in pn->pn_extra, then simply traverse the TOK_FOR
+ * node and its kids under pn2 to generate this comprehension.
+ */
+ JS_ASSERT(cg->stackDepth > 0);
+ saveSlot = cg->arrayCompSlot;
+ cg->arrayCompSlot = (uint32) (cg->stackDepth - 1);
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ cg->arrayCompSlot = saveSlot;
+
+ /* Emit the usual op needed for decompilation. */
+ if (js_Emit1(cx, cg, JSOP_ENDINIT) < 0)
+ return JS_FALSE;
+ break;
+ }
+#endif /* JS_HAS_GENERATORS */
+
+ for (atomIndex = 0; pn2; atomIndex++, pn2 = pn2->pn_next) {
+ if (!EmitNumberOp(cx, atomIndex, cg))
+ return JS_FALSE;
+
+ /* FIXME 260106: holes in a sparse initializer are void-filled. */
+ if (pn2->pn_type == TOK_COMMA) {
+ if (js_Emit1(cx, cg, JSOP_PUSH) < 0)
+ return JS_FALSE;
+ } else {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ }
+
+ if (js_Emit1(cx, cg, JSOP_INITELEM) < 0)
+ return JS_FALSE;
+ }
+
+ if (pn->pn_extra & PNX_ENDCOMMA) {
+ /* Emit a source note so we know to decompile an extra comma. */
+ if (js_NewSrcNote(cx, cg, SRC_CONTINUE) < 0)
+ return JS_FALSE;
+ }
+
+ /* Emit an op for sharp array cleanup and decompilation. */
+ if (js_Emit1(cx, cg, JSOP_ENDINIT) < 0)
+ return JS_FALSE;
+ break;
+
+ case TOK_RC:
+ /*
+ * Emit code for {p:a, '%q':b, 2:c} of the form:
+ * t = new Object; t.p = a; t['%q'] = b; t[2] = c; t;
+ * but use a stack slot for t and avoid dup'ing and popping it via
+ * the JSOP_NEWINIT and JSOP_INITELEM bytecodes.
+ */
+ ale = js_IndexAtom(cx, CLASS_ATOM(cx, Object), &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(JSOP_NAME, ALE_INDEX(ale));
+
+ if (js_Emit1(cx, cg, JSOP_PUSHOBJ) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_NEWINIT) < 0)
+ return JS_FALSE;
+
+ pn2 = pn->pn_head;
+#if JS_HAS_SHARP_VARS
+ if (pn2 && pn2->pn_type == TOK_DEFSHARP) {
+ EMIT_UINT16_IMM_OP(JSOP_DEFSHARP, (jsatomid)pn2->pn_num);
+ pn2 = pn2->pn_next;
+ }
+#endif
+
+ for (; pn2; pn2 = pn2->pn_next) {
+ /* Emit an index for t[2], else map an atom for t.p or t['%q']. */
+ pn3 = pn2->pn_left;
+ switch (pn3->pn_type) {
+ case TOK_NUMBER:
+ if (!EmitNumberOp(cx, pn3->pn_dval, cg))
+ return JS_FALSE;
+ break;
+ case TOK_NAME:
+ case TOK_STRING:
+ ale = js_IndexAtom(cx, pn3->pn_atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ break;
+ default:
+ JS_ASSERT(0);
+ }
+
+ /* Emit code for the property initializer. */
+ if (!js_EmitTree(cx, cg, pn2->pn_right))
+ return JS_FALSE;
+
+#if JS_HAS_GETTER_SETTER
+ op = pn2->pn_op;
+ if (op == JSOP_GETTER || op == JSOP_SETTER) {
+ if (pn3->pn_type != TOK_NUMBER &&
+ ALE_INDEX(ale) >= JS_BIT(16)) {
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ }
+#endif
+ /* Annotate JSOP_INITELEM so we decompile 2:c and not just c. */
+ if (pn3->pn_type == TOK_NUMBER) {
+ if (js_NewSrcNote(cx, cg, SRC_INITPROP) < 0)
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_INITELEM) < 0)
+ return JS_FALSE;
+ } else {
+ EMIT_ATOM_INDEX_OP(JSOP_INITPROP, ALE_INDEX(ale));
+ }
+ }
+
+ /* Emit an op for sharpArray cleanup and decompilation. */
+ if (js_Emit1(cx, cg, JSOP_ENDINIT) < 0)
+ return JS_FALSE;
+ break;
+
+#if JS_HAS_SHARP_VARS
+ case TOK_DEFSHARP:
+ if (!js_EmitTree(cx, cg, pn->pn_kid))
+ return JS_FALSE;
+ EMIT_UINT16_IMM_OP(JSOP_DEFSHARP, (jsatomid) pn->pn_num);
+ break;
+
+ case TOK_USESHARP:
+ EMIT_UINT16_IMM_OP(JSOP_USESHARP, (jsatomid) pn->pn_num);
+ break;
+#endif /* JS_HAS_SHARP_VARS */
+
+ case TOK_RP:
+ {
+ uintN oldflags;
+
+ /*
+ * The node for (e) has e as its kid, enabling users who want to nest
+ * assignment expressions in conditions to avoid the error correction
+ * done by Condition (from x = y to x == y) by double-parenthesizing.
+ */
+ oldflags = cg->treeContext.flags;
+ cg->treeContext.flags &= ~TCF_IN_FOR_INIT;
+ if (!js_EmitTree(cx, cg, pn->pn_kid))
+ return JS_FALSE;
+ cg->treeContext.flags |= oldflags & TCF_IN_FOR_INIT;
+ if (js_Emit1(cx, cg, JSOP_GROUP) < 0)
+ return JS_FALSE;
+ break;
+ }
+
+ case TOK_NAME:
+ if (!BindNameToSlot(cx, &cg->treeContext, pn, JS_FALSE))
+ return JS_FALSE;
+ op = pn->pn_op;
+ if (op == JSOP_ARGUMENTS) {
+ if (js_Emit1(cx, cg, op) < 0)
+ return JS_FALSE;
+ break;
+ }
+ if (pn->pn_slot >= 0) {
+ atomIndex = (jsatomid) pn->pn_slot;
+ EMIT_UINT16_IMM_OP(op, atomIndex);
+ break;
+ }
+ /* FALL THROUGH */
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_XMLATTR:
+ case TOK_XMLSPACE:
+ case TOK_XMLTEXT:
+ case TOK_XMLCDATA:
+ case TOK_XMLCOMMENT:
+#endif
+ case TOK_STRING:
+ case TOK_OBJECT:
+ /*
+ * The scanner and parser associate JSOP_NAME with TOK_NAME, although
+ * other bytecodes may result instead (JSOP_BINDNAME/JSOP_SETNAME,
+ * JSOP_FORNAME, etc.). Among JSOP_*NAME* variants, only JSOP_NAME
+ * may generate the first operand of a call or new expression, so only
+ * it sets the "obj" virtual machine register to the object along the
+ * scope chain in which the name was found.
+ *
+ * Token types for STRING and OBJECT have corresponding bytecode ops
+ * in pn_op and emit the same format as NAME, so they share this code.
+ */
+ ok = EmitAtomOp(cx, pn, pn->pn_op, cg);
+ break;
+
+ case TOK_NUMBER:
+ ok = EmitNumberOp(cx, pn->pn_dval, cg);
+ break;
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_ANYNAME:
+#endif
+ case TOK_PRIMARY:
+ if (js_Emit1(cx, cg, pn->pn_op) < 0)
+ return JS_FALSE;
+ break;
+
+#if JS_HAS_DEBUGGER_KEYWORD
+ case TOK_DEBUGGER:
+ if (js_Emit1(cx, cg, JSOP_DEBUGGER) < 0)
+ return JS_FALSE;
+ break;
+#endif /* JS_HAS_DEBUGGER_KEYWORD */
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_XMLELEM:
+ case TOK_XMLLIST:
+ if (pn->pn_op == JSOP_XMLOBJECT) {
+ ok = EmitAtomOp(cx, pn, pn->pn_op, cg);
+ break;
+ }
+
+ JS_ASSERT(pn->pn_type == TOK_XMLLIST || pn->pn_count != 0);
+ switch (pn->pn_head ? pn->pn_head->pn_type : TOK_XMLLIST) {
+ case TOK_XMLETAGO:
+ JS_ASSERT(0);
+ /* FALL THROUGH */
+ case TOK_XMLPTAGC:
+ case TOK_XMLSTAGO:
+ break;
+ default:
+ if (js_Emit1(cx, cg, JSOP_STARTXML) < 0)
+ return JS_FALSE;
+ }
+
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ if (pn2->pn_type == TOK_LC &&
+ js_Emit1(cx, cg, JSOP_STARTXMLEXPR) < 0) {
+ return JS_FALSE;
+ }
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if (pn2 != pn->pn_head && js_Emit1(cx, cg, JSOP_ADD) < 0)
+ return JS_FALSE;
+ }
+
+ if (pn->pn_extra & PNX_XMLROOT) {
+ if (pn->pn_count == 0) {
+ JS_ASSERT(pn->pn_type == TOK_XMLLIST);
+ atom = cx->runtime->atomState.emptyAtom;
+ ale = js_IndexAtom(cx, atom, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(JSOP_STRING, ALE_INDEX(ale));
+ }
+ if (js_Emit1(cx, cg, pn->pn_op) < 0)
+ return JS_FALSE;
+ }
+#ifdef DEBUG
+ else
+ JS_ASSERT(pn->pn_count != 0);
+#endif
+ break;
+
+ case TOK_XMLPTAGC:
+ if (pn->pn_op == JSOP_XMLOBJECT) {
+ ok = EmitAtomOp(cx, pn, pn->pn_op, cg);
+ break;
+ }
+ /* FALL THROUGH */
+
+ case TOK_XMLSTAGO:
+ case TOK_XMLETAGO:
+ {
+ uint32 i;
+
+ if (js_Emit1(cx, cg, JSOP_STARTXML) < 0)
+ return JS_FALSE;
+
+ ale = js_IndexAtom(cx,
+ (pn->pn_type == TOK_XMLETAGO)
+ ? cx->runtime->atomState.etagoAtom
+ : cx->runtime->atomState.stagoAtom,
+ &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(JSOP_STRING, ALE_INDEX(ale));
+
+ JS_ASSERT(pn->pn_count != 0);
+ pn2 = pn->pn_head;
+ if (pn2->pn_type == TOK_LC && js_Emit1(cx, cg, JSOP_STARTXMLEXPR) < 0)
+ return JS_FALSE;
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if (js_Emit1(cx, cg, JSOP_ADD) < 0)
+ return JS_FALSE;
+
+ for (pn2 = pn2->pn_next, i = 0; pn2; pn2 = pn2->pn_next, i++) {
+ if (pn2->pn_type == TOK_LC &&
+ js_Emit1(cx, cg, JSOP_STARTXMLEXPR) < 0) {
+ return JS_FALSE;
+ }
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if ((i & 1) && pn2->pn_type == TOK_LC) {
+ if (js_Emit1(cx, cg, JSOP_TOATTRVAL) < 0)
+ return JS_FALSE;
+ }
+ if (js_Emit1(cx, cg,
+ (i & 1) ? JSOP_ADDATTRVAL : JSOP_ADDATTRNAME) < 0) {
+ return JS_FALSE;
+ }
+ }
+
+ ale = js_IndexAtom(cx,
+ (pn->pn_type == TOK_XMLPTAGC)
+ ? cx->runtime->atomState.ptagcAtom
+ : cx->runtime->atomState.tagcAtom,
+ &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ EMIT_ATOM_INDEX_OP(JSOP_STRING, ALE_INDEX(ale));
+ if (js_Emit1(cx, cg, JSOP_ADD) < 0)
+ return JS_FALSE;
+
+ if ((pn->pn_extra & PNX_XMLROOT) && js_Emit1(cx, cg, pn->pn_op) < 0)
+ return JS_FALSE;
+ break;
+ }
+
+ case TOK_XMLNAME:
+ if (pn->pn_arity == PN_LIST) {
+ JS_ASSERT(pn->pn_count != 0);
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ if (!js_EmitTree(cx, cg, pn2))
+ return JS_FALSE;
+ if (pn2 != pn->pn_head && js_Emit1(cx, cg, JSOP_ADD) < 0)
+ return JS_FALSE;
+ }
+ } else {
+ JS_ASSERT(pn->pn_arity == PN_NULLARY);
+ ok = EmitAtomOp(cx, pn, pn->pn_op, cg);
+ }
+ break;
+
+ case TOK_XMLPI:
+ ale = js_IndexAtom(cx, pn->pn_atom2, &cg->atomList);
+ if (!ale)
+ return JS_FALSE;
+ if (!EmitAtomIndexOp(cx, JSOP_QNAMEPART, ALE_INDEX(ale), cg))
+ return JS_FALSE;
+ if (!EmitAtomOp(cx, pn, JSOP_XMLPI, cg))
+ return JS_FALSE;
+ break;
+#endif /* JS_HAS_XML_SUPPORT */
+
+ default:
+ JS_ASSERT(0);
+ }
+
+ if (ok && --cg->emitLevel == 0 && cg->spanDeps)
+ ok = OptimizeSpanDeps(cx, cg);
+
+ return ok;
+}
+
+/* XXX get rid of offsetBias, it's used only by SRC_FOR and SRC_DECL */
+JS_FRIEND_DATA(JSSrcNoteSpec) js_SrcNoteSpec[] = {
+ {"null", 0, 0, 0},
+ {"if", 0, 0, 0},
+ {"if-else", 2, 0, 1},
+ {"while", 1, 0, 1},
+ {"for", 3, 1, 1},
+ {"continue", 0, 0, 0},
+ {"decl", 1, 1, 1},
+ {"pcdelta", 1, 0, 1},
+ {"assignop", 0, 0, 0},
+ {"cond", 1, 0, 1},
+ {"brace", 1, 0, 1},
+ {"hidden", 0, 0, 0},
+ {"pcbase", 1, 0, -1},
+ {"label", 1, 0, 0},
+ {"labelbrace", 1, 0, 0},
+ {"endbrace", 0, 0, 0},
+ {"break2label", 1, 0, 0},
+ {"cont2label", 1, 0, 0},
+ {"switch", 2, 0, 1},
+ {"funcdef", 1, 0, 0},
+ {"catch", 1, 0, 1},
+ {"extended", -1, 0, 0},
+ {"newline", 0, 0, 0},
+ {"setline", 1, 0, 0},
+ {"xdelta", 0, 0, 0},
+};
+
+static intN
+AllocSrcNote(JSContext *cx, JSCodeGenerator *cg)
+{
+ intN index;
+ JSArenaPool *pool;
+ size_t size;
+
+ index = CG_NOTE_COUNT(cg);
+ if (((uintN)index & CG_NOTE_MASK(cg)) == 0) {
+ pool = cg->notePool;
+ size = SRCNOTE_SIZE(CG_NOTE_MASK(cg) + 1);
+ if (!CG_NOTES(cg)) {
+ /* Allocate the first note array lazily; leave noteMask alone. */
+ JS_ARENA_ALLOCATE_CAST(CG_NOTES(cg), jssrcnote *, pool, size);
+ } else {
+ /* Grow by doubling note array size; update noteMask on success. */
+ JS_ARENA_GROW_CAST(CG_NOTES(cg), jssrcnote *, pool, size, size);
+ if (CG_NOTES(cg))
+ CG_NOTE_MASK(cg) = (CG_NOTE_MASK(cg) << 1) | 1;
+ }
+ if (!CG_NOTES(cg)) {
+ JS_ReportOutOfMemory(cx);
+ return -1;
+ }
+ }
+
+ CG_NOTE_COUNT(cg) = index + 1;
+ return index;
+}
+
+intN
+js_NewSrcNote(JSContext *cx, JSCodeGenerator *cg, JSSrcNoteType type)
+{
+ intN index, n;
+ jssrcnote *sn;
+ ptrdiff_t offset, delta, xdelta;
+
+ /*
+ * Claim a note slot in CG_NOTES(cg) by growing it if necessary and then
+ * incrementing CG_NOTE_COUNT(cg).
+ */
+ index = AllocSrcNote(cx, cg);
+ if (index < 0)
+ return -1;
+ sn = &CG_NOTES(cg)[index];
+
+ /*
+ * Compute delta from the last annotated bytecode's offset. If it's too
+ * big to fit in sn, allocate one or more xdelta notes and reset sn.
+ */
+ offset = CG_OFFSET(cg);
+ delta = offset - CG_LAST_NOTE_OFFSET(cg);
+ CG_LAST_NOTE_OFFSET(cg) = offset;
+ if (delta >= SN_DELTA_LIMIT) {
+ do {
+ xdelta = JS_MIN(delta, SN_XDELTA_MASK);
+ SN_MAKE_XDELTA(sn, xdelta);
+ delta -= xdelta;
+ index = AllocSrcNote(cx, cg);
+ if (index < 0)
+ return -1;
+ sn = &CG_NOTES(cg)[index];
+ } while (delta >= SN_DELTA_LIMIT);
+ }
+
+ /*
+ * Initialize type and delta, then allocate the minimum number of notes
+ * needed for type's arity. Usually, we won't need more, but if an offset
+ * does take two bytes, js_SetSrcNoteOffset will grow CG_NOTES(cg).
+ */
+ SN_MAKE_NOTE(sn, type, delta);
+ for (n = (intN)js_SrcNoteSpec[type].arity; n > 0; n--) {
+ if (js_NewSrcNote(cx, cg, SRC_NULL) < 0)
+ return -1;
+ }
+ return index;
+}
+
+intN
+js_NewSrcNote2(JSContext *cx, JSCodeGenerator *cg, JSSrcNoteType type,
+ ptrdiff_t offset)
+{
+ intN index;
+
+ index = js_NewSrcNote(cx, cg, type);
+ if (index >= 0) {
+ if (!js_SetSrcNoteOffset(cx, cg, index, 0, offset))
+ return -1;
+ }
+ return index;
+}
+
+intN
+js_NewSrcNote3(JSContext *cx, JSCodeGenerator *cg, JSSrcNoteType type,
+ ptrdiff_t offset1, ptrdiff_t offset2)
+{
+ intN index;
+
+ index = js_NewSrcNote(cx, cg, type);
+ if (index >= 0) {
+ if (!js_SetSrcNoteOffset(cx, cg, index, 0, offset1))
+ return -1;
+ if (!js_SetSrcNoteOffset(cx, cg, index, 1, offset2))
+ return -1;
+ }
+ return index;
+}
+
+static JSBool
+GrowSrcNotes(JSContext *cx, JSCodeGenerator *cg)
+{
+ JSArenaPool *pool;
+ size_t size;
+
+ /* Grow by doubling note array size; update noteMask on success. */
+ pool = cg->notePool;
+ size = SRCNOTE_SIZE(CG_NOTE_MASK(cg) + 1);
+ JS_ARENA_GROW_CAST(CG_NOTES(cg), jssrcnote *, pool, size, size);
+ if (!CG_NOTES(cg)) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ CG_NOTE_MASK(cg) = (CG_NOTE_MASK(cg) << 1) | 1;
+ return JS_TRUE;
+}
+
+jssrcnote *
+js_AddToSrcNoteDelta(JSContext *cx, JSCodeGenerator *cg, jssrcnote *sn,
+ ptrdiff_t delta)
+{
+ ptrdiff_t base, limit, newdelta, diff;
+ intN index;
+
+ /*
+ * Called only from OptimizeSpanDeps and js_FinishTakingSrcNotes to add to
+ * main script note deltas, and only by a small positive amount.
+ */
+ JS_ASSERT(cg->current == &cg->main);
+ JS_ASSERT((unsigned) delta < (unsigned) SN_XDELTA_LIMIT);
+
+ base = SN_DELTA(sn);
+ limit = SN_IS_XDELTA(sn) ? SN_XDELTA_LIMIT : SN_DELTA_LIMIT;
+ newdelta = base + delta;
+ if (newdelta < limit) {
+ SN_SET_DELTA(sn, newdelta);
+ } else {
+ index = sn - cg->main.notes;
+ if ((cg->main.noteCount & cg->main.noteMask) == 0) {
+ if (!GrowSrcNotes(cx, cg))
+ return NULL;
+ sn = cg->main.notes + index;
+ }
+ diff = cg->main.noteCount - index;
+ cg->main.noteCount++;
+ memmove(sn + 1, sn, SRCNOTE_SIZE(diff));
+ SN_MAKE_XDELTA(sn, delta);
+ sn++;
+ }
+ return sn;
+}
+
+JS_FRIEND_API(uintN)
+js_SrcNoteLength(jssrcnote *sn)
+{
+ uintN arity;
+ jssrcnote *base;
+
+ arity = (intN)js_SrcNoteSpec[SN_TYPE(sn)].arity;
+ for (base = sn++; arity; sn++, arity--) {
+ if (*sn & SN_3BYTE_OFFSET_FLAG)
+ sn += 2;
+ }
+ return sn - base;
+}
+
+JS_FRIEND_API(ptrdiff_t)
+js_GetSrcNoteOffset(jssrcnote *sn, uintN which)
+{
+ /* Find the offset numbered which (i.e., skip exactly which offsets). */
+ JS_ASSERT(SN_TYPE(sn) != SRC_XDELTA);
+ JS_ASSERT(which < js_SrcNoteSpec[SN_TYPE(sn)].arity);
+ for (sn++; which; sn++, which--) {
+ if (*sn & SN_3BYTE_OFFSET_FLAG)
+ sn += 2;
+ }
+ if (*sn & SN_3BYTE_OFFSET_FLAG) {
+ return (ptrdiff_t)(((uint32)(sn[0] & SN_3BYTE_OFFSET_MASK) << 16)
+ | (sn[1] << 8)
+ | sn[2]);
+ }
+ return (ptrdiff_t)*sn;
+}
+
+JSBool
+js_SetSrcNoteOffset(JSContext *cx, JSCodeGenerator *cg, uintN index,
+ uintN which, ptrdiff_t offset)
+{
+ jssrcnote *sn;
+ ptrdiff_t diff;
+
+ if ((jsuword)offset >= (jsuword)((ptrdiff_t)SN_3BYTE_OFFSET_FLAG << 16)) {
+ ReportStatementTooLarge(cx, cg);
+ return JS_FALSE;
+ }
+
+ /* Find the offset numbered which (i.e., skip exactly which offsets). */
+ sn = &CG_NOTES(cg)[index];
+ JS_ASSERT(SN_TYPE(sn) != SRC_XDELTA);
+ JS_ASSERT(which < js_SrcNoteSpec[SN_TYPE(sn)].arity);
+ for (sn++; which; sn++, which--) {
+ if (*sn & SN_3BYTE_OFFSET_FLAG)
+ sn += 2;
+ }
+
+ /* See if the new offset requires three bytes. */
+ if (offset > (ptrdiff_t)SN_3BYTE_OFFSET_MASK) {
+ /* Maybe this offset was already set to a three-byte value. */
+ if (!(*sn & SN_3BYTE_OFFSET_FLAG)) {
+ /* Losing, need to insert another two bytes for this offset. */
+ index = PTRDIFF(sn, CG_NOTES(cg), jssrcnote);
+
+ /*
+ * Simultaneously test to see if the source note array must grow to
+ * accomodate either the first or second byte of additional storage
+ * required by this 3-byte offset.
+ */
+ if (((CG_NOTE_COUNT(cg) + 1) & CG_NOTE_MASK(cg)) <= 1) {
+ if (!GrowSrcNotes(cx, cg))
+ return JS_FALSE;
+ sn = CG_NOTES(cg) + index;
+ }
+ CG_NOTE_COUNT(cg) += 2;
+
+ diff = CG_NOTE_COUNT(cg) - (index + 3);
+ JS_ASSERT(diff >= 0);
+ if (diff > 0)
+ memmove(sn + 3, sn + 1, SRCNOTE_SIZE(diff));
+ }
+ *sn++ = (jssrcnote)(SN_3BYTE_OFFSET_FLAG | (offset >> 16));
+ *sn++ = (jssrcnote)(offset >> 8);
+ }
+ *sn = (jssrcnote)offset;
+ return JS_TRUE;
+}
+
+#ifdef DEBUG_notme
+#define DEBUG_srcnotesize
+#endif
+
+#ifdef DEBUG_srcnotesize
+#define NBINS 10
+static uint32 hist[NBINS];
+
+void DumpSrcNoteSizeHist()
+{
+ static FILE *fp;
+ int i, n;
+
+ if (!fp) {
+ fp = fopen("/tmp/srcnotes.hist", "w");
+ if (!fp)
+ return;
+ setvbuf(fp, NULL, _IONBF, 0);
+ }
+ fprintf(fp, "SrcNote size histogram:\n");
+ for (i = 0; i < NBINS; i++) {
+ fprintf(fp, "%4u %4u ", JS_BIT(i), hist[i]);
+ for (n = (int) JS_HOWMANY(hist[i], 10); n > 0; --n)
+ fputc('*', fp);
+ fputc('\n', fp);
+ }
+ fputc('\n', fp);
+}
+#endif
+
+/*
+ * Fill in the storage at notes with prolog and main srcnotes; the space at
+ * notes was allocated using the CG_COUNT_FINAL_SRCNOTES macro from jsemit.h.
+ * SO DON'T CHANGE THIS FUNCTION WITHOUT AT LEAST CHECKING WHETHER jsemit.h's
+ * CG_COUNT_FINAL_SRCNOTES MACRO NEEDS CORRESPONDING CHANGES!
+ */
+JSBool
+js_FinishTakingSrcNotes(JSContext *cx, JSCodeGenerator *cg, jssrcnote *notes)
+{
+ uintN prologCount, mainCount, totalCount;
+ ptrdiff_t offset, delta;
+ jssrcnote *sn;
+
+ JS_ASSERT(cg->current == &cg->main);
+
+ prologCount = cg->prolog.noteCount;
+ if (prologCount && cg->prolog.currentLine != cg->firstLine) {
+ CG_SWITCH_TO_PROLOG(cg);
+ if (js_NewSrcNote2(cx, cg, SRC_SETLINE, (ptrdiff_t)cg->firstLine) < 0)
+ return JS_FALSE;
+ prologCount = cg->prolog.noteCount;
+ CG_SWITCH_TO_MAIN(cg);
+ } else {
+ /*
+ * Either no prolog srcnotes, or no line number change over prolog.
+ * We don't need a SRC_SETLINE, but we may need to adjust the offset
+ * of the first main note, by adding to its delta and possibly even
+ * prepending SRC_XDELTA notes to it to account for prolog bytecodes
+ * that came at and after the last annotated bytecode.
+ */
+ offset = CG_PROLOG_OFFSET(cg) - cg->prolog.lastNoteOffset;
+ JS_ASSERT(offset >= 0);
+ if (offset > 0 && cg->main.noteCount != 0) {
+ /* NB: Use as much of the first main note's delta as we can. */
+ sn = cg->main.notes;
+ delta = SN_IS_XDELTA(sn)
+ ? SN_XDELTA_MASK - (*sn & SN_XDELTA_MASK)
+ : SN_DELTA_MASK - (*sn & SN_DELTA_MASK);
+ if (offset < delta)
+ delta = offset;
+ for (;;) {
+ if (!js_AddToSrcNoteDelta(cx, cg, sn, delta))
+ return JS_FALSE;
+ offset -= delta;
+ if (offset == 0)
+ break;
+ delta = JS_MIN(offset, SN_XDELTA_MASK);
+ sn = cg->main.notes;
+ }
+ }
+ }
+
+ mainCount = cg->main.noteCount;
+ totalCount = prologCount + mainCount;
+ if (prologCount)
+ memcpy(notes, cg->prolog.notes, SRCNOTE_SIZE(prologCount));
+ memcpy(notes + prologCount, cg->main.notes, SRCNOTE_SIZE(mainCount));
+ SN_MAKE_TERMINATOR(&notes[totalCount]);
+
+#ifdef DEBUG_notme
+ { int bin = JS_CeilingLog2(totalCount);
+ if (bin >= NBINS)
+ bin = NBINS - 1;
+ ++hist[bin];
+ }
+#endif
+ return JS_TRUE;
+}
+
+JSBool
+js_AllocTryNotes(JSContext *cx, JSCodeGenerator *cg)
+{
+ size_t size, incr;
+ ptrdiff_t delta;
+
+ size = TRYNOTE_SIZE(cg->treeContext.tryCount);
+ if (size <= cg->tryNoteSpace)
+ return JS_TRUE;
+
+ /*
+ * Allocate trynotes from cx->tempPool.
+ * XXX Too much growing and we bloat, as other tempPool allocators block
+ * in-place growth, and we never recycle old free space in an arena.
+ * YYY But once we consume an entire arena, we'll realloc it, letting the
+ * malloc heap recycle old space, while still freeing _en masse_ via the
+ * arena pool.
+ */
+ if (!cg->tryBase) {
+ size = JS_ROUNDUP(size, TRYNOTE_SIZE(TRYNOTE_CHUNK));
+ JS_ARENA_ALLOCATE_CAST(cg->tryBase, JSTryNote *, &cx->tempPool, size);
+ if (!cg->tryBase)
+ return JS_FALSE;
+ cg->tryNoteSpace = size;
+ cg->tryNext = cg->tryBase;
+ } else {
+ delta = PTRDIFF((char *)cg->tryNext, (char *)cg->tryBase, char);
+ incr = size - cg->tryNoteSpace;
+ incr = JS_ROUNDUP(incr, TRYNOTE_SIZE(TRYNOTE_CHUNK));
+ size = cg->tryNoteSpace;
+ JS_ARENA_GROW_CAST(cg->tryBase, JSTryNote *, &cx->tempPool, size, incr);
+ if (!cg->tryBase)
+ return JS_FALSE;
+ cg->tryNoteSpace = size + incr;
+ cg->tryNext = (JSTryNote *)((char *)cg->tryBase + delta);
+ }
+ return JS_TRUE;
+}
+
+JSTryNote *
+js_NewTryNote(JSContext *cx, JSCodeGenerator *cg, ptrdiff_t start,
+ ptrdiff_t end, ptrdiff_t catchStart)
+{
+ JSTryNote *tn;
+
+ JS_ASSERT(cg->tryBase <= cg->tryNext);
+ JS_ASSERT(catchStart >= 0);
+ tn = cg->tryNext++;
+ tn->start = start;
+ tn->length = end - start;
+ tn->catchStart = catchStart;
+ return tn;
+}
+
+void
+js_FinishTakingTryNotes(JSContext *cx, JSCodeGenerator *cg, JSTryNote *notes)
+{
+ uintN count;
+
+ count = PTRDIFF(cg->tryNext, cg->tryBase, JSTryNote);
+ if (!count)
+ return;
+
+ memcpy(notes, cg->tryBase, TRYNOTE_SIZE(count));
+ notes[count].start = 0;
+ notes[count].length = CG_OFFSET(cg);
+ notes[count].catchStart = 0;
+}
diff --git a/src/third_party/js-1.7/jsemit.h b/src/third_party/js-1.7/jsemit.h
new file mode 100644
index 00000000000..90709c22ae9
--- /dev/null
+++ b/src/third_party/js-1.7/jsemit.h
@@ -0,0 +1,743 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsemit_h___
+#define jsemit_h___
+/*
+ * JS bytecode generation.
+ */
+
+#include "jsstddef.h"
+#include "jstypes.h"
+#include "jsatom.h"
+#include "jsopcode.h"
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * NB: If you add enumerators for scope statements, add them between STMT_WITH
+ * and STMT_CATCH, or you will break the STMT_TYPE_IS_SCOPE macro. If you add
+ * non-looping statement enumerators, add them before STMT_DO_LOOP or you will
+ * break the STMT_TYPE_IS_LOOP macro.
+ *
+ * Also remember to keep the statementName array in jsemit.c in sync.
+ */
+typedef enum JSStmtType {
+ STMT_LABEL, /* labeled statement: L: s */
+ STMT_IF, /* if (then) statement */
+ STMT_ELSE, /* else clause of if statement */
+ STMT_BODY, /* synthetic body of function with
+ destructuring formal parameters */
+ STMT_BLOCK, /* compound statement: { s1[;... sN] } */
+ STMT_SWITCH, /* switch statement */
+ STMT_WITH, /* with statement */
+ STMT_CATCH, /* catch block */
+ STMT_TRY, /* try block */
+ STMT_FINALLY, /* finally block */
+ STMT_SUBROUTINE, /* gosub-target subroutine body */
+ STMT_DO_LOOP, /* do/while loop statement */
+ STMT_FOR_LOOP, /* for loop statement */
+ STMT_FOR_IN_LOOP, /* for/in loop statement */
+ STMT_WHILE_LOOP /* while loop statement */
+} JSStmtType;
+
+#define STMT_TYPE_IN_RANGE(t,b,e) ((uint)((t) - (b)) <= (uintN)((e) - (b)))
+
+/*
+ * A comment on the encoding of the JSStmtType enum and type-testing macros:
+ *
+ * STMT_TYPE_MAYBE_SCOPE tells whether a statement type is always, or may
+ * become, a lexical scope. It therefore includes block and switch (the two
+ * low-numbered "maybe" scope types) and excludes with (with has dynamic scope
+ * pending the "reformed with" in ES4/JS2). It includes all try-catch-finally
+ * types, which are high-numbered maybe-scope types.
+ *
+ * STMT_TYPE_LINKS_SCOPE tells whether a JSStmtInfo of the given type eagerly
+ * links to other scoping statement info records. It excludes the two early
+ * "maybe" types, block and switch, as well as the try and both finally types,
+ * since try and the other trailing maybe-scope types don't need block scope
+ * unless they contain let declarations.
+ *
+ * We treat with as a static scope because it prevents lexical binding from
+ * continuing further up the static scope chain. With the "reformed with"
+ * proposal for JS2, we'll be able to model it statically, too.
+ */
+#define STMT_TYPE_MAYBE_SCOPE(type) \
+ (type != STMT_WITH && \
+ STMT_TYPE_IN_RANGE(type, STMT_BLOCK, STMT_SUBROUTINE))
+
+#define STMT_TYPE_LINKS_SCOPE(type) \
+ STMT_TYPE_IN_RANGE(type, STMT_WITH, STMT_CATCH)
+
+#define STMT_TYPE_IS_TRYING(type) \
+ STMT_TYPE_IN_RANGE(type, STMT_TRY, STMT_SUBROUTINE)
+
+#define STMT_TYPE_IS_LOOP(type) ((type) >= STMT_DO_LOOP)
+
+#define STMT_MAYBE_SCOPE(stmt) STMT_TYPE_MAYBE_SCOPE((stmt)->type)
+#define STMT_LINKS_SCOPE(stmt) (STMT_TYPE_LINKS_SCOPE((stmt)->type) || \
+ ((stmt)->flags & SIF_SCOPE))
+#define STMT_IS_TRYING(stmt) STMT_TYPE_IS_TRYING((stmt)->type)
+#define STMT_IS_LOOP(stmt) STMT_TYPE_IS_LOOP((stmt)->type)
+
+typedef struct JSStmtInfo JSStmtInfo;
+
+struct JSStmtInfo {
+ uint16 type; /* statement type */
+ uint16 flags; /* flags, see below */
+ ptrdiff_t update; /* loop update offset (top if none) */
+ ptrdiff_t breaks; /* offset of last break in loop */
+ ptrdiff_t continues; /* offset of last continue in loop */
+ JSAtom *atom; /* name of LABEL, or block scope object */
+ JSStmtInfo *down; /* info for enclosing statement */
+ JSStmtInfo *downScope; /* next enclosing lexical scope */
+};
+
+#define SIF_SCOPE 0x0001 /* statement has its own lexical scope */
+#define SIF_BODY_BLOCK 0x0002 /* STMT_BLOCK type is a function body */
+
+/*
+ * To reuse space in JSStmtInfo, rename breaks and continues for use during
+ * try/catch/finally code generation and backpatching. To match most common
+ * use cases, the macro argument is a struct, not a struct pointer. Only a
+ * loop, switch, or label statement info record can have breaks and continues,
+ * and only a for loop has an update backpatch chain, so it's safe to overlay
+ * these for the "trying" JSStmtTypes.
+ */
+#define CATCHNOTE(stmt) ((stmt).update)
+#define GOSUBS(stmt) ((stmt).breaks)
+#define GUARDJUMP(stmt) ((stmt).continues)
+
+#define AT_TOP_LEVEL(tc) \
+ (!(tc)->topStmt || ((tc)->topStmt->flags & SIF_BODY_BLOCK))
+
+#define SET_STATEMENT_TOP(stmt, top) \
+ ((stmt)->update = (top), (stmt)->breaks = (stmt)->continues = (-1))
+
+struct JSTreeContext { /* tree context for semantic checks */
+ uint16 flags; /* statement state flags, see below */
+ uint16 numGlobalVars; /* max. no. of global variables/regexps */
+ uint32 tryCount; /* total count of try statements parsed */
+ uint32 globalUses; /* optimizable global var uses in total */
+ uint32 loopyGlobalUses;/* optimizable global var uses in loops */
+ JSStmtInfo *topStmt; /* top of statement info stack */
+ JSStmtInfo *topScopeStmt; /* top lexical scope statement */
+ JSObject *blockChain; /* compile time block scope chain (NB: one
+ deeper than the topScopeStmt/downScope
+ chain when in head of let block/expr) */
+ JSParseNode *blockNode; /* parse node for a lexical scope.
+ XXX combine with blockChain? */
+ JSAtomList decls; /* function, const, and var declarations */
+ JSParseNode *nodeList; /* list of recyclable parse-node structs */
+};
+
+#define TCF_COMPILING 0x01 /* generating bytecode; this tc is a cg */
+#define TCF_IN_FUNCTION 0x02 /* parsing inside function body */
+#define TCF_RETURN_EXPR 0x04 /* function has 'return expr;' */
+#define TCF_RETURN_VOID 0x08 /* function has 'return;' */
+#define TCF_RETURN_FLAGS 0x0C /* propagate these out of blocks */
+#define TCF_IN_FOR_INIT 0x10 /* parsing init expr of for; exclude 'in' */
+#define TCF_FUN_CLOSURE_VS_VAR 0x20 /* function and var with same name */
+#define TCF_FUN_USES_NONLOCALS 0x40 /* function refers to non-local names */
+#define TCF_FUN_HEAVYWEIGHT 0x80 /* function needs Call object per call */
+#define TCF_FUN_IS_GENERATOR 0x100 /* parsed yield statement in function */
+#define TCF_FUN_FLAGS 0x1E0 /* flags to propagate from FunctionBody */
+#define TCF_HAS_DEFXMLNS 0x200 /* default xml namespace = ...; parsed */
+#define TCF_HAS_FUNCTION_STMT 0x400 /* block contains a function statement */
+
+#define TREE_CONTEXT_INIT(tc) \
+ ((tc)->flags = (tc)->numGlobalVars = 0, \
+ (tc)->tryCount = (tc)->globalUses = (tc)->loopyGlobalUses = 0, \
+ (tc)->topStmt = (tc)->topScopeStmt = NULL, \
+ (tc)->blockChain = NULL, \
+ ATOM_LIST_INIT(&(tc)->decls), \
+ (tc)->nodeList = NULL, (tc)->blockNode = NULL)
+
+#define TREE_CONTEXT_FINISH(tc) \
+ ((void)0)
+
+/*
+ * Span-dependent instructions are jumps whose span (from the jump bytecode to
+ * the jump target) may require 2 or 4 bytes of immediate operand.
+ */
+typedef struct JSSpanDep JSSpanDep;
+typedef struct JSJumpTarget JSJumpTarget;
+
+struct JSSpanDep {
+ ptrdiff_t top; /* offset of first bytecode in an opcode */
+ ptrdiff_t offset; /* offset - 1 within opcode of jump operand */
+ ptrdiff_t before; /* original offset - 1 of jump operand */
+ JSJumpTarget *target; /* tagged target pointer or backpatch delta */
+};
+
+/*
+ * Jump targets are stored in an AVL tree, for O(log(n)) lookup with targets
+ * sorted by offset from left to right, so that targets after a span-dependent
+ * instruction whose jump offset operand must be extended can be found quickly
+ * and adjusted upward (toward higher offsets).
+ */
+struct JSJumpTarget {
+ ptrdiff_t offset; /* offset of span-dependent jump target */
+ int balance; /* AVL tree balance number */
+ JSJumpTarget *kids[2]; /* left and right AVL tree child pointers */
+};
+
+#define JT_LEFT 0
+#define JT_RIGHT 1
+#define JT_OTHER_DIR(dir) (1 - (dir))
+#define JT_IMBALANCE(dir) (((dir) << 1) - 1)
+#define JT_DIR(imbalance) (((imbalance) + 1) >> 1)
+
+/*
+ * Backpatch deltas are encoded in JSSpanDep.target if JT_TAG_BIT is clear,
+ * so we can maintain backpatch chains when using span dependency records to
+ * hold jump offsets that overflow 16 bits.
+ */
+#define JT_TAG_BIT ((jsword) 1)
+#define JT_UNTAG_SHIFT 1
+#define JT_SET_TAG(jt) ((JSJumpTarget *)((jsword)(jt) | JT_TAG_BIT))
+#define JT_CLR_TAG(jt) ((JSJumpTarget *)((jsword)(jt) & ~JT_TAG_BIT))
+#define JT_HAS_TAG(jt) ((jsword)(jt) & JT_TAG_BIT)
+
+#define BITS_PER_PTRDIFF (sizeof(ptrdiff_t) * JS_BITS_PER_BYTE)
+#define BITS_PER_BPDELTA (BITS_PER_PTRDIFF - 1 - JT_UNTAG_SHIFT)
+#define BPDELTA_MAX (((ptrdiff_t)1 << BITS_PER_BPDELTA) - 1)
+#define BPDELTA_TO_JT(bp) ((JSJumpTarget *)((bp) << JT_UNTAG_SHIFT))
+#define JT_TO_BPDELTA(jt) ((ptrdiff_t)((jsword)(jt) >> JT_UNTAG_SHIFT))
+
+#define SD_SET_TARGET(sd,jt) ((sd)->target = JT_SET_TAG(jt))
+#define SD_GET_TARGET(sd) (JS_ASSERT(JT_HAS_TAG((sd)->target)), \
+ JT_CLR_TAG((sd)->target))
+#define SD_SET_BPDELTA(sd,bp) ((sd)->target = BPDELTA_TO_JT(bp))
+#define SD_GET_BPDELTA(sd) (JS_ASSERT(!JT_HAS_TAG((sd)->target)), \
+ JT_TO_BPDELTA((sd)->target))
+
+/* Avoid asserting twice by expanding SD_GET_TARGET in the "then" clause. */
+#define SD_SPAN(sd,pivot) (SD_GET_TARGET(sd) \
+ ? JT_CLR_TAG((sd)->target)->offset - (pivot) \
+ : 0)
+
+struct JSCodeGenerator {
+ JSTreeContext treeContext; /* base state: statement info stack, etc. */
+
+ JSArenaPool *codePool; /* pointer to thread code arena pool */
+ JSArenaPool *notePool; /* pointer to thread srcnote arena pool */
+ void *codeMark; /* low watermark in cg->codePool */
+ void *noteMark; /* low watermark in cg->notePool */
+ void *tempMark; /* low watermark in cx->tempPool */
+
+ struct {
+ jsbytecode *base; /* base of JS bytecode vector */
+ jsbytecode *limit; /* one byte beyond end of bytecode */
+ jsbytecode *next; /* pointer to next free bytecode */
+ jssrcnote *notes; /* source notes, see below */
+ uintN noteCount; /* number of source notes so far */
+ uintN noteMask; /* growth increment for notes */
+ ptrdiff_t lastNoteOffset; /* code offset for last source note */
+ uintN currentLine; /* line number for tree-based srcnote gen */
+ } prolog, main, *current;
+
+ const char *filename; /* null or weak link to source filename */
+ uintN firstLine; /* first line, for js_NewScriptFromCG */
+ JSPrincipals *principals; /* principals for constant folding eval */
+ JSAtomList atomList; /* literals indexed for mapping */
+
+ intN stackDepth; /* current stack depth in script frame */
+ uintN maxStackDepth; /* maximum stack depth so far */
+
+ JSTryNote *tryBase; /* first exception handling note */
+ JSTryNote *tryNext; /* next available note */
+ size_t tryNoteSpace; /* # of bytes allocated at tryBase */
+
+ JSSpanDep *spanDeps; /* span dependent instruction records */
+ JSJumpTarget *jumpTargets; /* AVL tree of jump target offsets */
+ JSJumpTarget *jtFreeList; /* JT_LEFT-linked list of free structs */
+ uintN numSpanDeps; /* number of span dependencies */
+ uintN numJumpTargets; /* number of jump targets */
+ ptrdiff_t spanDepTodo; /* offset from main.base of potentially
+ unoptimized spandeps */
+
+ uintN arrayCompSlot; /* stack slot of array in comprehension */
+
+ uintN emitLevel; /* js_EmitTree recursion level */
+ JSAtomList constList; /* compile time constants */
+ JSCodeGenerator *parent; /* Enclosing function or global context */
+};
+
+#define CG_BASE(cg) ((cg)->current->base)
+#define CG_LIMIT(cg) ((cg)->current->limit)
+#define CG_NEXT(cg) ((cg)->current->next)
+#define CG_CODE(cg,offset) (CG_BASE(cg) + (offset))
+#define CG_OFFSET(cg) PTRDIFF(CG_NEXT(cg), CG_BASE(cg), jsbytecode)
+
+#define CG_NOTES(cg) ((cg)->current->notes)
+#define CG_NOTE_COUNT(cg) ((cg)->current->noteCount)
+#define CG_NOTE_MASK(cg) ((cg)->current->noteMask)
+#define CG_LAST_NOTE_OFFSET(cg) ((cg)->current->lastNoteOffset)
+#define CG_CURRENT_LINE(cg) ((cg)->current->currentLine)
+
+#define CG_PROLOG_BASE(cg) ((cg)->prolog.base)
+#define CG_PROLOG_LIMIT(cg) ((cg)->prolog.limit)
+#define CG_PROLOG_NEXT(cg) ((cg)->prolog.next)
+#define CG_PROLOG_CODE(cg,poff) (CG_PROLOG_BASE(cg) + (poff))
+#define CG_PROLOG_OFFSET(cg) PTRDIFF(CG_PROLOG_NEXT(cg), CG_PROLOG_BASE(cg),\
+ jsbytecode)
+
+#define CG_SWITCH_TO_MAIN(cg) ((cg)->current = &(cg)->main)
+#define CG_SWITCH_TO_PROLOG(cg) ((cg)->current = &(cg)->prolog)
+
+/*
+ * Initialize cg to allocate bytecode space from codePool, source note space
+ * from notePool, and all other arena-allocated temporaries from cx->tempPool.
+ * Return true on success. Report an error and return false if the initial
+ * code segment can't be allocated.
+ */
+extern JS_FRIEND_API(JSBool)
+js_InitCodeGenerator(JSContext *cx, JSCodeGenerator *cg,
+ JSArenaPool *codePool, JSArenaPool *notePool,
+ const char *filename, uintN lineno,
+ JSPrincipals *principals);
+
+/*
+ * Release cg->codePool, cg->notePool, and cx->tempPool to marks set by
+ * js_InitCodeGenerator. Note that cgs are magic: they own the arena pool
+ * "tops-of-stack" space above their codeMark, noteMark, and tempMark points.
+ * This means you cannot alloc from tempPool and save the pointer beyond the
+ * next JS_FinishCodeGenerator.
+ */
+extern JS_FRIEND_API(void)
+js_FinishCodeGenerator(JSContext *cx, JSCodeGenerator *cg);
+
+/*
+ * Emit one bytecode.
+ */
+extern ptrdiff_t
+js_Emit1(JSContext *cx, JSCodeGenerator *cg, JSOp op);
+
+/*
+ * Emit two bytecodes, an opcode (op) with a byte of immediate operand (op1).
+ */
+extern ptrdiff_t
+js_Emit2(JSContext *cx, JSCodeGenerator *cg, JSOp op, jsbytecode op1);
+
+/*
+ * Emit three bytecodes, an opcode with two bytes of immediate operands.
+ */
+extern ptrdiff_t
+js_Emit3(JSContext *cx, JSCodeGenerator *cg, JSOp op, jsbytecode op1,
+ jsbytecode op2);
+
+/*
+ * Emit (1 + extra) bytecodes, for N bytes of op and its immediate operand.
+ */
+extern ptrdiff_t
+js_EmitN(JSContext *cx, JSCodeGenerator *cg, JSOp op, size_t extra);
+
+/*
+ * Unsafe macro to call js_SetJumpOffset and return false if it does.
+ */
+#define CHECK_AND_SET_JUMP_OFFSET(cx,cg,pc,off) \
+ JS_BEGIN_MACRO \
+ if (!js_SetJumpOffset(cx, cg, pc, off)) \
+ return JS_FALSE; \
+ JS_END_MACRO
+
+#define CHECK_AND_SET_JUMP_OFFSET_AT(cx,cg,off) \
+ CHECK_AND_SET_JUMP_OFFSET(cx, cg, CG_CODE(cg,off), CG_OFFSET(cg) - (off))
+
+extern JSBool
+js_SetJumpOffset(JSContext *cx, JSCodeGenerator *cg, jsbytecode *pc,
+ ptrdiff_t off);
+
+/* Test whether we're in a statement of given type. */
+extern JSBool
+js_InStatement(JSTreeContext *tc, JSStmtType type);
+
+/* Test whether we're in a with statement. */
+#define js_InWithStatement(tc) js_InStatement(tc, STMT_WITH)
+
+/*
+ * Test whether atom refers to a global variable (or is a reference error).
+ * Return true in *loopyp if any loops enclose the lexical reference, false
+ * otherwise.
+ */
+extern JSBool
+js_IsGlobalReference(JSTreeContext *tc, JSAtom *atom, JSBool *loopyp);
+
+/*
+ * Push the C-stack-allocated struct at stmt onto the stmtInfo stack.
+ */
+extern void
+js_PushStatement(JSTreeContext *tc, JSStmtInfo *stmt, JSStmtType type,
+ ptrdiff_t top);
+
+/*
+ * Push a block scope statement and link blockAtom's object-valued key into
+ * tc->blockChain. To pop this statement info record, use js_PopStatement as
+ * usual, or if appropriate (if generating code), js_PopStatementCG.
+ */
+extern void
+js_PushBlockScope(JSTreeContext *tc, JSStmtInfo *stmt, JSAtom *blockAtom,
+ ptrdiff_t top);
+
+/*
+ * Pop tc->topStmt. If the top JSStmtInfo struct is not stack-allocated, it
+ * is up to the caller to free it.
+ */
+extern void
+js_PopStatement(JSTreeContext *tc);
+
+/*
+ * Like js_PopStatement(&cg->treeContext), also patch breaks and continues
+ * unless the top statement info record represents a try-catch-finally suite.
+ * May fail if a jump offset overflows.
+ */
+extern JSBool
+js_PopStatementCG(JSContext *cx, JSCodeGenerator *cg);
+
+/*
+ * Define and lookup a primitive jsval associated with the const named by atom.
+ * js_DefineCompileTimeConstant analyzes the constant-folded initializer at pn
+ * and saves the const's value in cg->constList, if it can be used at compile
+ * time. It returns true unless an error occurred.
+ *
+ * If the initializer's value could not be saved, js_LookupCompileTimeConstant
+ * calls will return the undefined value. js_LookupCompileTimeConstant tries
+ * to find a const value memorized for atom, returning true with *vp set to a
+ * value other than undefined if the constant was found, true with *vp set to
+ * JSVAL_VOID if not found, and false on error.
+ */
+extern JSBool
+js_DefineCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
+ JSParseNode *pn);
+
+extern JSBool
+js_LookupCompileTimeConstant(JSContext *cx, JSCodeGenerator *cg, JSAtom *atom,
+ jsval *vp);
+
+/*
+ * Find a lexically scoped variable (one declared by let, catch, or an array
+ * comprehension) named by atom, looking in tc's compile-time scopes.
+ *
+ * If a WITH statement is reached along the scope stack, return its statement
+ * info record, so callers can tell that atom is ambiguous. If slotp is not
+ * null, then if atom is found, set *slotp to its stack slot, otherwise to -1.
+ * This means that if slotp is not null, all the block objects on the lexical
+ * scope chain must have had their depth slots computed by the code generator,
+ * so the caller must be under js_EmitTree.
+ *
+ * In any event, directly return the statement info record in which atom was
+ * found. Otherwise return null.
+ */
+extern JSStmtInfo *
+js_LexicalLookup(JSTreeContext *tc, JSAtom *atom, jsint *slotp,
+ JSBool letdecl);
+
+/*
+ * Emit code into cg for the tree rooted at pn.
+ */
+extern JSBool
+js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn);
+
+/*
+ * Emit function code into cg for the tree rooted at body.
+ */
+extern JSBool
+js_EmitFunctionBytecode(JSContext *cx, JSCodeGenerator *cg, JSParseNode *body);
+
+/*
+ * Emit code into cg for the tree rooted at body, then create a persistent
+ * script for fun from cg.
+ */
+extern JSBool
+js_EmitFunctionBody(JSContext *cx, JSCodeGenerator *cg, JSParseNode *body,
+ JSFunction *fun);
+
+/*
+ * Source notes generated along with bytecode for decompiling and debugging.
+ * A source note is a uint8 with 5 bits of type and 3 of offset from the pc of
+ * the previous note. If 3 bits of offset aren't enough, extended delta notes
+ * (SRC_XDELTA) consisting of 2 set high order bits followed by 6 offset bits
+ * are emitted before the next note. Some notes have operand offsets encoded
+ * immediately after them, in note bytes or byte-triples.
+ *
+ * Source Note Extended Delta
+ * +7-6-5-4-3+2-1-0+ +7-6-5+4-3-2-1-0+
+ * |note-type|delta| |1 1| ext-delta |
+ * +---------+-----+ +---+-----------+
+ *
+ * At most one "gettable" note (i.e., a note of type other than SRC_NEWLINE,
+ * SRC_SETLINE, and SRC_XDELTA) applies to a given bytecode.
+ *
+ * NB: the js_SrcNoteSpec array in jsemit.c is indexed by this enum, so its
+ * initializers need to match the order here.
+ *
+ * Note on adding new source notes: every pair of bytecodes (A, B) where A and
+ * B have disjoint sets of source notes that could apply to each bytecode may
+ * reuse the same note type value for two notes (snA, snB) that have the same
+ * arity, offsetBias, and isSpanDep initializers in js_SrcNoteSpec. This is
+ * why SRC_IF and SRC_INITPROP have the same value below. For bad historical
+ * reasons, some bytecodes below that could be overlayed have not been, but
+ * before using SRC_EXTENDED, consider compressing the existing note types.
+ *
+ * Don't forget to update JSXDR_BYTECODE_VERSION in jsxdrapi.h for all such
+ * incompatible source note or other bytecode changes.
+ */
+typedef enum JSSrcNoteType {
+ SRC_NULL = 0, /* terminates a note vector */
+ SRC_IF = 1, /* JSOP_IFEQ bytecode is from an if-then */
+ SRC_INITPROP = 1, /* disjoint meaning applied to JSOP_INITELEM or
+ to an index label in a regular (structuring)
+ or a destructuring object initialiser */
+ SRC_IF_ELSE = 2, /* JSOP_IFEQ bytecode is from an if-then-else */
+ SRC_WHILE = 3, /* JSOP_IFEQ is from a while loop */
+ SRC_FOR = 4, /* JSOP_NOP or JSOP_POP in for loop head */
+ SRC_CONTINUE = 5, /* JSOP_GOTO is a continue, not a break;
+ also used on JSOP_ENDINIT if extra comma
+ at end of array literal: [1,2,,] */
+ SRC_DECL = 6, /* type of a declaration (var, const, let*) */
+ SRC_DESTRUCT = 6, /* JSOP_DUP starting a destructuring assignment
+ operation, with SRC_DECL_* offset operand */
+ SRC_PCDELTA = 7, /* distance forward from comma-operator to
+ next POP, or from CONDSWITCH to first CASE
+ opcode, etc. -- always a forward delta */
+ SRC_GROUPASSIGN = 7, /* SRC_DESTRUCT variant for [a, b] = [c, d] */
+ SRC_ASSIGNOP = 8, /* += or another assign-op follows */
+ SRC_COND = 9, /* JSOP_IFEQ is from conditional ?: operator */
+ SRC_BRACE = 10, /* mandatory brace, for scope or to avoid
+ dangling else */
+ SRC_HIDDEN = 11, /* opcode shouldn't be decompiled */
+ SRC_PCBASE = 12, /* distance back from annotated getprop or
+ setprop op to left-most obj.prop.subprop
+ bytecode -- always a backward delta */
+ SRC_METHODBASE = 13, /* SRC_PCBASE variant for obj.function::foo
+ gets and sets; disjoint from SRC_LABEL by
+ bytecode to which it applies */
+ SRC_LABEL = 13, /* JSOP_NOP for label: with atomid immediate */
+ SRC_LABELBRACE = 14, /* JSOP_NOP for label: {...} begin brace */
+ SRC_ENDBRACE = 15, /* JSOP_NOP for label: {...} end brace */
+ SRC_BREAK2LABEL = 16, /* JSOP_GOTO for 'break label' with atomid */
+ SRC_CONT2LABEL = 17, /* JSOP_GOTO for 'continue label' with atomid */
+ SRC_SWITCH = 18, /* JSOP_*SWITCH with offset to end of switch,
+ 2nd off to first JSOP_CASE if condswitch */
+ SRC_FUNCDEF = 19, /* JSOP_NOP for function f() with atomid */
+ SRC_CATCH = 20, /* catch block has guard */
+ SRC_EXTENDED = 21, /* extended source note, 32-159, in next byte */
+ SRC_NEWLINE = 22, /* bytecode follows a source newline */
+ SRC_SETLINE = 23, /* a file-absolute source line number note */
+ SRC_XDELTA = 24 /* 24-31 are for extended delta notes */
+} JSSrcNoteType;
+
+/*
+ * Constants for the SRC_DECL source note. Note that span-dependent bytecode
+ * selection means that any SRC_DECL offset greater than SRC_DECL_LET may need
+ * to be adjusted, but these "offsets" are too small to span a span-dependent
+ * instruction, so can be used to denote distinct declaration syntaxes to the
+ * decompiler.
+ *
+ * NB: the var_prefix array in jsopcode.c depends on these dense indexes from
+ * SRC_DECL_VAR through SRC_DECL_LET.
+ */
+#define SRC_DECL_VAR 0
+#define SRC_DECL_CONST 1
+#define SRC_DECL_LET 2
+#define SRC_DECL_NONE 3
+
+#define SN_TYPE_BITS 5
+#define SN_DELTA_BITS 3
+#define SN_XDELTA_BITS 6
+#define SN_TYPE_MASK (JS_BITMASK(SN_TYPE_BITS) << SN_DELTA_BITS)
+#define SN_DELTA_MASK ((ptrdiff_t)JS_BITMASK(SN_DELTA_BITS))
+#define SN_XDELTA_MASK ((ptrdiff_t)JS_BITMASK(SN_XDELTA_BITS))
+
+#define SN_MAKE_NOTE(sn,t,d) (*(sn) = (jssrcnote) \
+ (((t) << SN_DELTA_BITS) \
+ | ((d) & SN_DELTA_MASK)))
+#define SN_MAKE_XDELTA(sn,d) (*(sn) = (jssrcnote) \
+ ((SRC_XDELTA << SN_DELTA_BITS) \
+ | ((d) & SN_XDELTA_MASK)))
+
+#define SN_IS_XDELTA(sn) ((*(sn) >> SN_DELTA_BITS) >= SRC_XDELTA)
+#define SN_TYPE(sn) (SN_IS_XDELTA(sn) ? SRC_XDELTA \
+ : *(sn) >> SN_DELTA_BITS)
+#define SN_SET_TYPE(sn,type) SN_MAKE_NOTE(sn, type, SN_DELTA(sn))
+#define SN_IS_GETTABLE(sn) (SN_TYPE(sn) < SRC_NEWLINE)
+
+#define SN_DELTA(sn) ((ptrdiff_t)(SN_IS_XDELTA(sn) \
+ ? *(sn) & SN_XDELTA_MASK \
+ : *(sn) & SN_DELTA_MASK))
+#define SN_SET_DELTA(sn,delta) (SN_IS_XDELTA(sn) \
+ ? SN_MAKE_XDELTA(sn, delta) \
+ : SN_MAKE_NOTE(sn, SN_TYPE(sn), delta))
+
+#define SN_DELTA_LIMIT ((ptrdiff_t)JS_BIT(SN_DELTA_BITS))
+#define SN_XDELTA_LIMIT ((ptrdiff_t)JS_BIT(SN_XDELTA_BITS))
+
+/*
+ * Offset fields follow certain notes and are frequency-encoded: an offset in
+ * [0,0x7f] consumes one byte, an offset in [0x80,0x7fffff] takes three, and
+ * the high bit of the first byte is set.
+ */
+#define SN_3BYTE_OFFSET_FLAG 0x80
+#define SN_3BYTE_OFFSET_MASK 0x7f
+
+typedef struct JSSrcNoteSpec {
+ const char *name; /* name for disassembly/debugging output */
+ uint8 arity; /* number of offset operands */
+ uint8 offsetBias; /* bias of offset(s) from annotated pc */
+ int8 isSpanDep; /* 1 or -1 if offsets could span extended ops,
+ 0 otherwise; sign tells span direction */
+} JSSrcNoteSpec;
+
+extern JS_FRIEND_DATA(JSSrcNoteSpec) js_SrcNoteSpec[];
+extern JS_FRIEND_API(uintN) js_SrcNoteLength(jssrcnote *sn);
+
+#define SN_LENGTH(sn) ((js_SrcNoteSpec[SN_TYPE(sn)].arity == 0) ? 1 \
+ : js_SrcNoteLength(sn))
+#define SN_NEXT(sn) ((sn) + SN_LENGTH(sn))
+
+/* A source note array is terminated by an all-zero element. */
+#define SN_MAKE_TERMINATOR(sn) (*(sn) = SRC_NULL)
+#define SN_IS_TERMINATOR(sn) (*(sn) == SRC_NULL)
+
+/*
+ * Append a new source note of the given type (and therefore size) to cg's
+ * notes dynamic array, updating cg->noteCount. Return the new note's index
+ * within the array pointed at by cg->current->notes. Return -1 if out of
+ * memory.
+ */
+extern intN
+js_NewSrcNote(JSContext *cx, JSCodeGenerator *cg, JSSrcNoteType type);
+
+extern intN
+js_NewSrcNote2(JSContext *cx, JSCodeGenerator *cg, JSSrcNoteType type,
+ ptrdiff_t offset);
+
+extern intN
+js_NewSrcNote3(JSContext *cx, JSCodeGenerator *cg, JSSrcNoteType type,
+ ptrdiff_t offset1, ptrdiff_t offset2);
+
+/*
+ * NB: this function can add at most one extra extended delta note.
+ */
+extern jssrcnote *
+js_AddToSrcNoteDelta(JSContext *cx, JSCodeGenerator *cg, jssrcnote *sn,
+ ptrdiff_t delta);
+
+/*
+ * Get and set the offset operand identified by which (0 for the first, etc.).
+ */
+extern JS_FRIEND_API(ptrdiff_t)
+js_GetSrcNoteOffset(jssrcnote *sn, uintN which);
+
+extern JSBool
+js_SetSrcNoteOffset(JSContext *cx, JSCodeGenerator *cg, uintN index,
+ uintN which, ptrdiff_t offset);
+
+/*
+ * Finish taking source notes in cx's notePool, copying final notes to the new
+ * stable store allocated by the caller and passed in via notes. Return false
+ * on malloc failure, which means this function reported an error.
+ *
+ * To compute the number of jssrcnotes to allocate and pass in via notes, use
+ * the CG_COUNT_FINAL_SRCNOTES macro. This macro knows a lot about details of
+ * js_FinishTakingSrcNotes, SO DON'T CHANGE jsemit.c's js_FinishTakingSrcNotes
+ * FUNCTION WITHOUT CHECKING WHETHER THIS MACRO NEEDS CORRESPONDING CHANGES!
+ */
+#define CG_COUNT_FINAL_SRCNOTES(cg, cnt) \
+ JS_BEGIN_MACRO \
+ ptrdiff_t diff_ = CG_PROLOG_OFFSET(cg) - (cg)->prolog.lastNoteOffset; \
+ cnt = (cg)->prolog.noteCount + (cg)->main.noteCount + 1; \
+ if ((cg)->prolog.noteCount && \
+ (cg)->prolog.currentLine != (cg)->firstLine) { \
+ if (diff_ > SN_DELTA_MASK) \
+ cnt += JS_HOWMANY(diff_ - SN_DELTA_MASK, SN_XDELTA_MASK); \
+ cnt += 2 + (((cg)->firstLine > SN_3BYTE_OFFSET_MASK) << 1); \
+ } else if (diff_ > 0) { \
+ if (cg->main.noteCount) { \
+ jssrcnote *sn_ = (cg)->main.notes; \
+ diff_ -= SN_IS_XDELTA(sn_) \
+ ? SN_XDELTA_MASK - (*sn_ & SN_XDELTA_MASK) \
+ : SN_DELTA_MASK - (*sn_ & SN_DELTA_MASK); \
+ } \
+ if (diff_ > 0) \
+ cnt += JS_HOWMANY(diff_, SN_XDELTA_MASK); \
+ } \
+ JS_END_MACRO
+
+extern JSBool
+js_FinishTakingSrcNotes(JSContext *cx, JSCodeGenerator *cg, jssrcnote *notes);
+
+/*
+ * Allocate cg->treeContext.tryCount notes (plus one for the end sentinel)
+ * from cx->tempPool and set up cg->tryBase/tryNext for exactly tryCount
+ * js_NewTryNote calls. The storage is freed by js_FinishCodeGenerator.
+ */
+extern JSBool
+js_AllocTryNotes(JSContext *cx, JSCodeGenerator *cg);
+
+/*
+ * Grab the next trynote slot in cg, filling it in appropriately.
+ */
+extern JSTryNote *
+js_NewTryNote(JSContext *cx, JSCodeGenerator *cg, ptrdiff_t start,
+ ptrdiff_t end, ptrdiff_t catchStart);
+
+/*
+ * Finish generating exception information into the space at notes. As with
+ * js_FinishTakingSrcNotes, the caller must use CG_COUNT_FINAL_TRYNOTES(cg) to
+ * preallocate enough space in a JSTryNote[] to pass as the notes parameter of
+ * js_FinishTakingTryNotes.
+ */
+#define CG_COUNT_FINAL_TRYNOTES(cg, cnt) \
+ JS_BEGIN_MACRO \
+ cnt = ((cg)->tryNext > (cg)->tryBase) \
+ ? PTRDIFF(cg->tryNext, cg->tryBase, JSTryNote) + 1 \
+ : 0; \
+ JS_END_MACRO
+
+extern void
+js_FinishTakingTryNotes(JSContext *cx, JSCodeGenerator *cg, JSTryNote *notes);
+
+JS_END_EXTERN_C
+
+#endif /* jsemit_h___ */
diff --git a/src/third_party/js-1.7/jsexn.c b/src/third_party/js-1.7/jsexn.c
new file mode 100644
index 00000000000..e60f85e4aaa
--- /dev/null
+++ b/src/third_party/js-1.7/jsexn.c
@@ -0,0 +1,1348 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS standard exception implementation.
+ */
+
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsbit.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsexn.h"
+#include "jsfun.h"
+#include "jsinterp.h"
+#include "jsnum.h"
+#include "jsopcode.h"
+#include "jsscript.h"
+
+/* Forward declarations for js_ErrorClass's initializer. */
+static JSBool
+Exception(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval);
+
+static void
+exn_finalize(JSContext *cx, JSObject *obj);
+
+static uint32
+exn_mark(JSContext *cx, JSObject *obj, void *arg);
+
+static void
+exn_finalize(JSContext *cx, JSObject *obj);
+
+static JSBool
+exn_enumerate(JSContext *cx, JSObject *obj);
+
+static JSBool
+exn_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp);
+
+JSClass js_ErrorClass = {
+ js_Error_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Error),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ exn_enumerate, (JSResolveOp)exn_resolve, JS_ConvertStub, exn_finalize,
+ NULL, NULL, NULL, Exception,
+ NULL, NULL, exn_mark, NULL
+};
+
+typedef struct JSStackTraceElem {
+ JSString *funName;
+ size_t argc;
+ const char *filename;
+ uintN ulineno;
+} JSStackTraceElem;
+
+typedef struct JSExnPrivate {
+ /* A copy of the JSErrorReport originally generated. */
+ JSErrorReport *errorReport;
+ JSString *message;
+ JSString *filename;
+ uintN lineno;
+ size_t stackDepth;
+ JSStackTraceElem stackElems[1];
+} JSExnPrivate;
+
+static JSString *
+StackTraceToString(JSContext *cx, JSExnPrivate *priv);
+
+static JSErrorReport *
+CopyErrorReport(JSContext *cx, JSErrorReport *report)
+{
+ /*
+ * We use a single malloc block to make a deep copy of JSErrorReport with
+ * the following layout:
+ * JSErrorReport
+ * array of copies of report->messageArgs
+ * jschar array with characters for all messageArgs
+ * jschar array with characters for ucmessage
+ * jschar array with characters for uclinebuf and uctokenptr
+ * char array with characters for linebuf and tokenptr
+ * char array with characters for filename
+ * Such layout together with the properties enforced by the following
+ * asserts does not need any extra alignment padding.
+ */
+ JS_STATIC_ASSERT(sizeof(JSErrorReport) % sizeof(const char *) == 0);
+ JS_STATIC_ASSERT(sizeof(const char *) % sizeof(jschar) == 0);
+
+ size_t filenameSize;
+ size_t linebufSize;
+ size_t uclinebufSize;
+ size_t ucmessageSize;
+ size_t i, argsArraySize, argsCopySize, argSize;
+ size_t mallocSize;
+ JSErrorReport *copy;
+ uint8 *cursor;
+
+#define JS_CHARS_SIZE(jschars) ((js_strlen(jschars) + 1) * sizeof(jschar))
+
+ filenameSize = report->filename ? strlen(report->filename) + 1 : 0;
+ linebufSize = report->linebuf ? strlen(report->linebuf) + 1 : 0;
+ uclinebufSize = report->uclinebuf ? JS_CHARS_SIZE(report->uclinebuf) : 0;
+ ucmessageSize = 0;
+ argsArraySize = 0;
+ argsCopySize = 0;
+ if (report->ucmessage) {
+ ucmessageSize = JS_CHARS_SIZE(report->ucmessage);
+ if (report->messageArgs) {
+ for (i = 0; report->messageArgs[i]; ++i)
+ argsCopySize += JS_CHARS_SIZE(report->messageArgs[i]);
+
+ /* Non-null messageArgs should have at least one non-null arg. */
+ JS_ASSERT(i != 0);
+ argsArraySize = (i + 1) * sizeof(const jschar *);
+ }
+ }
+
+ /*
+ * The mallocSize can not overflow since it represents the sum of the
+ * sizes of already allocated objects.
+ */
+ mallocSize = sizeof(JSErrorReport) + argsArraySize + argsCopySize +
+ ucmessageSize + uclinebufSize + linebufSize + filenameSize;
+ cursor = (uint8 *)JS_malloc(cx, mallocSize);
+ if (!cursor)
+ return NULL;
+
+ copy = (JSErrorReport *)cursor;
+ memset(cursor, 0, sizeof(JSErrorReport));
+ cursor += sizeof(JSErrorReport);
+
+ if (argsArraySize != 0) {
+ copy->messageArgs = (const jschar **)cursor;
+ cursor += argsArraySize;
+ for (i = 0; report->messageArgs[i]; ++i) {
+ copy->messageArgs[i] = (const jschar *)cursor;
+ argSize = JS_CHARS_SIZE(report->messageArgs[i]);
+ memcpy(cursor, report->messageArgs[i], argSize);
+ cursor += argSize;
+ }
+ copy->messageArgs[i] = NULL;
+ JS_ASSERT(cursor == (uint8 *)copy->messageArgs[0] + argsCopySize);
+ }
+
+ if (report->ucmessage) {
+ copy->ucmessage = (const jschar *)cursor;
+ memcpy(cursor, report->ucmessage, ucmessageSize);
+ cursor += ucmessageSize;
+ }
+
+ if (report->uclinebuf) {
+ copy->uclinebuf = (const jschar *)cursor;
+ memcpy(cursor, report->uclinebuf, uclinebufSize);
+ cursor += uclinebufSize;
+ if (report->uctokenptr) {
+ copy->uctokenptr = copy->uclinebuf + (report->uctokenptr -
+ report->uclinebuf);
+ }
+ }
+
+ if (report->linebuf) {
+ copy->linebuf = (const char *)cursor;
+ memcpy(cursor, report->linebuf, linebufSize);
+ cursor += linebufSize;
+ if (report->tokenptr) {
+ copy->tokenptr = copy->linebuf + (report->tokenptr -
+ report->linebuf);
+ }
+ }
+
+ if (report->filename) {
+ copy->filename = (const char *)cursor;
+ memcpy(cursor, report->filename, filenameSize);
+ }
+ JS_ASSERT(cursor + filenameSize == (uint8 *)copy + mallocSize);
+
+ /* Copy non-pointer members. */
+ copy->lineno = report->lineno;
+ copy->errorNumber = report->errorNumber;
+
+ /* Note that this is before it gets flagged with JSREPORT_EXCEPTION */
+ copy->flags = report->flags;
+
+#undef JS_CHARS_SIZE
+ return copy;
+}
+
+static jsval *
+GetStackTraceValueBuffer(JSExnPrivate *priv)
+{
+ /*
+ * We use extra memory after JSExnPrivateInfo.stackElems to store jsvals
+ * that helps to produce more informative stack traces. The following
+ * assert allows us to assume that no gap after stackElems is necessary to
+ * align the buffer properly.
+ */
+ JS_STATIC_ASSERT(sizeof(JSStackTraceElem) % sizeof(jsval) == 0);
+
+ return (jsval *)(priv->stackElems + priv->stackDepth);
+}
+
+static JSBool
+InitExnPrivate(JSContext *cx, JSObject *exnObject, JSString *message,
+ JSString *filename, uintN lineno, JSErrorReport *report)
+{
+ JSCheckAccessOp checkAccess;
+ JSErrorReporter older;
+ JSExceptionState *state;
+ jsval callerid, v;
+ JSStackFrame *fp, *fpstop;
+ size_t stackDepth, valueCount, size;
+ JSBool overflow;
+ JSExnPrivate *priv;
+ JSStackTraceElem *elem;
+ jsval *values;
+
+ JS_ASSERT(OBJ_GET_CLASS(cx, exnObject) == &js_ErrorClass);
+
+ /*
+ * Prepare stack trace data.
+ *
+ * Set aside any error reporter for cx and save its exception state
+ * so we can suppress any checkAccess failures. Such failures should stop
+ * the backtrace procedure, not result in a failure of this constructor.
+ */
+ checkAccess = cx->runtime->checkObjectAccess;
+ older = JS_SetErrorReporter(cx, NULL);
+ state = JS_SaveExceptionState(cx);
+
+ callerid = ATOM_KEY(cx->runtime->atomState.callerAtom);
+ stackDepth = 0;
+ valueCount = 0;
+ for (fp = cx->fp; fp; fp = fp->down) {
+ if (fp->fun && fp->argv) {
+ if (checkAccess) {
+ v = fp->argv[-2];
+ if (!JSVAL_IS_PRIMITIVE(v) &&
+ !checkAccess(cx, JSVAL_TO_OBJECT(v), callerid,
+ JSACC_READ, &v /* ignored */)) {
+ break;
+ }
+ }
+ valueCount += fp->argc;
+ }
+ ++stackDepth;
+ }
+ JS_RestoreExceptionState(cx, state);
+ JS_SetErrorReporter(cx, older);
+ fpstop = fp;
+
+ size = offsetof(JSExnPrivate, stackElems);
+ overflow = (stackDepth > ((size_t)-1 - size) / sizeof(JSStackTraceElem));
+ size += stackDepth * sizeof(JSStackTraceElem);
+ overflow |= (valueCount > ((size_t)-1 - size) / sizeof(jsval));
+ size += valueCount * sizeof(jsval);
+ if (overflow) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ priv = (JSExnPrivate *)JS_malloc(cx, size);
+ if (!priv)
+ return JS_FALSE;
+
+ /*
+ * We initialize errorReport with a copy of report after setting the
+ * private slot, to prevent GC accessing a junk value we clear the field
+ * here.
+ */
+ priv->errorReport = NULL;
+ priv->message = message;
+ priv->filename = filename;
+ priv->lineno = lineno;
+ priv->stackDepth = stackDepth;
+
+ values = GetStackTraceValueBuffer(priv);
+ elem = priv->stackElems;
+ for (fp = cx->fp; fp != fpstop; fp = fp->down) {
+ if (!fp->fun) {
+ elem->funName = NULL;
+ elem->argc = 0;
+ } else {
+ elem->funName = fp->fun->atom
+ ? ATOM_TO_STRING(fp->fun->atom)
+ : cx->runtime->emptyString;
+ elem->argc = fp->argc;
+ memcpy(values, fp->argv, fp->argc * sizeof(jsval));
+ values += fp->argc;
+ }
+ elem->ulineno = 0;
+ elem->filename = NULL;
+ if (fp->script) {
+ elem->filename = fp->script->filename;
+ if (fp->pc)
+ elem->ulineno = js_PCToLineNumber(cx, fp->script, fp->pc);
+ }
+ ++elem;
+ }
+ JS_ASSERT(priv->stackElems + stackDepth == elem);
+ JS_ASSERT(GetStackTraceValueBuffer(priv) + valueCount == values);
+
+ OBJ_SET_SLOT(cx, exnObject, JSSLOT_PRIVATE, PRIVATE_TO_JSVAL(priv));
+
+ if (report) {
+ /*
+ * Construct a new copy of the error report struct. We can't use the
+ * error report struct that was passed in, because it's allocated on
+ * the stack, and also because it may point to transient data in the
+ * JSTokenStream.
+ */
+ priv->errorReport = CopyErrorReport(cx, report);
+ if (!priv->errorReport) {
+ /* The finalizer realeases priv since it is in the private slot. */
+ return JS_FALSE;
+ }
+ }
+
+ return JS_TRUE;
+}
+
+static JSExnPrivate *
+GetExnPrivate(JSContext *cx, JSObject *obj)
+{
+ jsval privateValue;
+ JSExnPrivate *priv;
+
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_ErrorClass);
+ privateValue = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ if (JSVAL_IS_VOID(privateValue))
+ return NULL;
+ priv = (JSExnPrivate *)JSVAL_TO_PRIVATE(privateValue);
+ JS_ASSERT(priv);
+ return priv;
+}
+
+static uint32
+exn_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSExnPrivate *priv;
+ JSStackTraceElem *elem;
+ size_t vcount, i;
+ jsval *vp, v;
+
+ priv = GetExnPrivate(cx, obj);
+ if (priv) {
+ GC_MARK(cx, priv->message, "exception message");
+ GC_MARK(cx, priv->filename, "exception filename");
+ elem = priv->stackElems;
+ for (vcount = i = 0; i != priv->stackDepth; ++i, ++elem) {
+ if (elem->funName)
+ GC_MARK(cx, elem->funName, "stack trace function name");
+ if (elem->filename)
+ js_MarkScriptFilename(elem->filename);
+ vcount += elem->argc;
+ }
+ vp = GetStackTraceValueBuffer(priv);
+ for (i = 0; i != vcount; ++i, ++vp) {
+ v = *vp;
+ if (JSVAL_IS_GCTHING(v))
+ GC_MARK(cx, JSVAL_TO_GCTHING(v), "stack trace argument");
+ }
+ }
+ return 0;
+}
+
+static void
+exn_finalize(JSContext *cx, JSObject *obj)
+{
+ JSExnPrivate *priv;
+
+ priv = GetExnPrivate(cx, obj);
+ if (priv) {
+ if (priv->errorReport)
+ JS_free(cx, priv->errorReport);
+ JS_free(cx, priv);
+ }
+}
+
+static JSBool
+exn_enumerate(JSContext *cx, JSObject *obj)
+{
+ JSAtomState *atomState;
+ uintN i;
+ JSAtom *atom;
+ JSObject *pobj;
+ JSProperty *prop;
+
+ JS_STATIC_ASSERT(sizeof(JSAtomState) <= (size_t)(uint16)-1);
+ static const uint16 offsets[] = {
+ (uint16)offsetof(JSAtomState, messageAtom),
+ (uint16)offsetof(JSAtomState, fileNameAtom),
+ (uint16)offsetof(JSAtomState, lineNumberAtom),
+ (uint16)offsetof(JSAtomState, stackAtom),
+ };
+
+ atomState = &cx->runtime->atomState;
+ for (i = 0; i != JS_ARRAY_LENGTH(offsets); ++i) {
+ atom = *(JSAtom **)((uint8 *)atomState + offsets[i]);
+ if (!js_LookupProperty(cx, obj, ATOM_TO_JSID(atom), &pobj, &prop))
+ return JS_FALSE;
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+exn_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ JSExnPrivate *priv;
+ JSString *str;
+ JSAtom *atom;
+ JSString *stack;
+ const char *prop;
+ jsval v;
+
+ *objp = NULL;
+ priv = GetExnPrivate(cx, obj);
+ if (priv && JSVAL_IS_STRING(id)) {
+ str = JSVAL_TO_STRING(id);
+
+ atom = cx->runtime->atomState.messageAtom;
+ if (str == ATOM_TO_STRING(atom)) {
+ prop = js_message_str;
+ v = STRING_TO_JSVAL(priv->message);
+ goto define;
+ }
+
+ atom = cx->runtime->atomState.fileNameAtom;
+ if (str == ATOM_TO_STRING(atom)) {
+ prop = js_fileName_str;
+ v = STRING_TO_JSVAL(priv->filename);
+ goto define;
+ }
+
+ atom = cx->runtime->atomState.lineNumberAtom;
+ if (str == ATOM_TO_STRING(atom)) {
+ prop = js_lineNumber_str;
+ v = INT_TO_JSVAL(priv->lineno);
+ goto define;
+ }
+
+ atom = cx->runtime->atomState.stackAtom;
+ if (str == ATOM_TO_STRING(atom)) {
+ stack = StackTraceToString(cx, priv);
+ if (!stack)
+ return JS_FALSE;
+
+ /* Allow to GC all things that were used to build stack trace. */
+ priv->stackDepth = 0;
+ prop = js_stack_str;
+ v = STRING_TO_JSVAL(stack);
+ goto define;
+ }
+ }
+ return JS_TRUE;
+
+ define:
+ if (!JS_DefineProperty(cx, obj, prop, v, NULL, NULL, JSPROP_ENUMERATE))
+ return JS_FALSE;
+ *objp = obj;
+ return JS_TRUE;
+}
+
+JSErrorReport *
+js_ErrorFromException(JSContext *cx, jsval exn)
+{
+ JSObject *obj;
+ JSExnPrivate *priv;
+
+ if (JSVAL_IS_PRIMITIVE(exn))
+ return NULL;
+ obj = JSVAL_TO_OBJECT(exn);
+ if (OBJ_GET_CLASS(cx, obj) != &js_ErrorClass)
+ return NULL;
+ priv = GetExnPrivate(cx, obj);
+ if (!priv)
+ return NULL;
+ return priv->errorReport;
+}
+
+struct JSExnSpec {
+ int protoIndex;
+ const char *name;
+ JSProtoKey key;
+ JSNative native;
+};
+
+/*
+ * All *Error constructors share the same JSClass, js_ErrorClass. But each
+ * constructor function for an *Error class must have a distinct native 'call'
+ * function pointer, in order for instanceof to work properly across multiple
+ * standard class sets. See jsfun.c:fun_hasInstance.
+ */
+#define MAKE_EXCEPTION_CTOR(name) \
+static JSBool \
+name(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) \
+{ \
+ return Exception(cx, obj, argc, argv, rval); \
+}
+
+MAKE_EXCEPTION_CTOR(Error)
+MAKE_EXCEPTION_CTOR(InternalError)
+MAKE_EXCEPTION_CTOR(EvalError)
+MAKE_EXCEPTION_CTOR(RangeError)
+MAKE_EXCEPTION_CTOR(ReferenceError)
+MAKE_EXCEPTION_CTOR(SyntaxError)
+MAKE_EXCEPTION_CTOR(TypeError)
+MAKE_EXCEPTION_CTOR(URIError)
+
+#undef MAKE_EXCEPTION_CTOR
+
+static struct JSExnSpec exceptions[] = {
+ {JSEXN_NONE, js_Error_str, JSProto_Error, Error},
+ {JSEXN_ERR, js_InternalError_str, JSProto_InternalError, InternalError},
+ {JSEXN_ERR, js_EvalError_str, JSProto_EvalError, EvalError},
+ {JSEXN_ERR, js_RangeError_str, JSProto_RangeError, RangeError},
+ {JSEXN_ERR, js_ReferenceError_str, JSProto_ReferenceError, ReferenceError},
+ {JSEXN_ERR, js_SyntaxError_str, JSProto_SyntaxError, SyntaxError},
+ {JSEXN_ERR, js_TypeError_str, JSProto_TypeError, TypeError},
+ {JSEXN_ERR, js_URIError_str, JSProto_URIError, URIError},
+ {0, NULL, JSProto_Null, NULL}
+};
+
+static JSString *
+ValueToShortSource(JSContext *cx, jsval v)
+{
+ JSString *str;
+
+ /* Avoid toSource bloat and fallibility for object types. */
+ if (JSVAL_IS_PRIMITIVE(v)) {
+ str = js_ValueToSource(cx, v);
+ } else if (VALUE_IS_FUNCTION(cx, v)) {
+ /*
+ * XXX Avoid function decompilation bloat for now.
+ */
+ str = JS_GetFunctionId(JS_ValueToFunction(cx, v));
+ if (!str && !(str = js_ValueToSource(cx, v))) {
+ /*
+ * Continue to soldier on if the function couldn't be
+ * converted into a string.
+ */
+ JS_ClearPendingException(cx);
+ str = JS_NewStringCopyZ(cx, "[unknown function]");
+ }
+ } else {
+ /*
+ * XXX Avoid toString on objects, it takes too long and uses too much
+ * memory, for too many classes (see Mozilla bug 166743).
+ */
+ char buf[100];
+ JS_snprintf(buf, sizeof buf, "[object %s]",
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(v))->name);
+ str = JS_NewStringCopyZ(cx, buf);
+ }
+ return str;
+}
+
+static JSString *
+StackTraceToString(JSContext *cx, JSExnPrivate *priv)
+{
+ jschar *stackbuf;
+ size_t stacklen, stackmax;
+ JSStackTraceElem *elem, *endElem;
+ jsval *values;
+ size_t i;
+ JSString *str;
+ const char *cp;
+ char ulnbuf[11];
+
+ /* After this point, failing control flow must goto bad. */
+ stackbuf = NULL;
+ stacklen = stackmax = 0;
+
+/* Limit the stackbuf length to a reasonable value to avoid overflow checks. */
+#define STACK_LENGTH_LIMIT JS_BIT(20)
+
+#define APPEND_CHAR_TO_STACK(c) \
+ JS_BEGIN_MACRO \
+ if (stacklen == stackmax) { \
+ void *ptr_; \
+ if (stackmax >= STACK_LENGTH_LIMIT) \
+ goto done; \
+ stackmax = stackmax ? 2 * stackmax : 64; \
+ ptr_ = JS_realloc(cx, stackbuf, (stackmax+1) * sizeof(jschar)); \
+ if (!ptr_) \
+ goto bad; \
+ stackbuf = ptr_; \
+ } \
+ stackbuf[stacklen++] = (c); \
+ JS_END_MACRO
+
+#define APPEND_STRING_TO_STACK(str) \
+ JS_BEGIN_MACRO \
+ JSString *str_ = str; \
+ size_t length_ = JSSTRING_LENGTH(str_); \
+ if (length_ > stackmax - stacklen) { \
+ void *ptr_; \
+ if (stackmax >= STACK_LENGTH_LIMIT || \
+ length_ >= STACK_LENGTH_LIMIT - stacklen) { \
+ goto done; \
+ } \
+ stackmax = JS_BIT(JS_CeilingLog2(stacklen + length_)); \
+ ptr_ = JS_realloc(cx, stackbuf, (stackmax+1) * sizeof(jschar)); \
+ if (!ptr_) \
+ goto bad; \
+ stackbuf = ptr_; \
+ } \
+ js_strncpy(stackbuf + stacklen, JSSTRING_CHARS(str_), length_); \
+ stacklen += length_; \
+ JS_END_MACRO
+
+ values = GetStackTraceValueBuffer(priv);
+ elem = priv->stackElems;
+ for (endElem = elem + priv->stackDepth; elem != endElem; elem++) {
+ if (elem->funName) {
+ APPEND_STRING_TO_STACK(elem->funName);
+ APPEND_CHAR_TO_STACK('(');
+ for (i = 0; i != elem->argc; i++, values++) {
+ if (i > 0)
+ APPEND_CHAR_TO_STACK(',');
+ str = ValueToShortSource(cx, *values);
+ if (!str)
+ goto bad;
+ APPEND_STRING_TO_STACK(str);
+ }
+ APPEND_CHAR_TO_STACK(')');
+ }
+ APPEND_CHAR_TO_STACK('@');
+ if (elem->filename) {
+ for (cp = elem->filename; *cp; cp++)
+ APPEND_CHAR_TO_STACK(*cp);
+ }
+ APPEND_CHAR_TO_STACK(':');
+ JS_snprintf(ulnbuf, sizeof ulnbuf, "%u", elem->ulineno);
+ for (cp = ulnbuf; *cp; cp++)
+ APPEND_CHAR_TO_STACK(*cp);
+ APPEND_CHAR_TO_STACK('\n');
+ }
+#undef APPEND_CHAR_TO_STACK
+#undef APPEND_STRING_TO_STACK
+#undef STACK_LENGTH_LIMIT
+
+ done:
+ if (stacklen == 0) {
+ JS_ASSERT(!stackbuf);
+ return cx->runtime->emptyString;
+ }
+ if (stacklen < stackmax) {
+ /*
+ * Realloc can fail when shrinking on some FreeBSD versions, so
+ * don't use JS_realloc here; simply let the oversized allocation
+ * be owned by the string in that rare case.
+ */
+ void *shrunk = JS_realloc(cx, stackbuf, (stacklen+1) * sizeof(jschar));
+ if (shrunk)
+ stackbuf = shrunk;
+ }
+
+ stackbuf[stacklen] = 0;
+ str = js_NewString(cx, stackbuf, stacklen, 0);
+ if (str)
+ return str;
+
+ bad:
+ if (stackbuf)
+ JS_free(cx, stackbuf);
+ return NULL;
+}
+
+/* XXXbe Consolidate the ugly truth that we don't treat filename as UTF-8
+ with these two functions. */
+static JSString *
+FilenameToString(JSContext *cx, const char *filename)
+{
+ return JS_NewStringCopyZ(cx, filename);
+}
+
+static const char *
+StringToFilename(JSContext *cx, JSString *str)
+{
+ return JS_GetStringBytes(str);
+}
+
+static JSBool
+Exception(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSBool ok;
+ uint32 lineno;
+ JSString *message, *filename;
+ JSStackFrame *fp;
+
+ if (cx->creatingException)
+ return JS_FALSE;
+ cx->creatingException = JS_TRUE;
+
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ /*
+ * ECMA ed. 3, 15.11.1 requires Error, etc., to construct even when
+ * called as functions, without operator new. But as we do not give
+ * each constructor a distinct JSClass, whose .name member is used by
+ * js_NewObject to find the class prototype, we must get the class
+ * prototype ourselves.
+ */
+ ok = OBJ_GET_PROPERTY(cx, JSVAL_TO_OBJECT(argv[-2]),
+ ATOM_TO_JSID(cx->runtime->atomState
+ .classPrototypeAtom),
+ rval);
+ if (!ok)
+ goto out;
+ obj = js_NewObject(cx, &js_ErrorClass, JSVAL_TO_OBJECT(*rval), NULL);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+
+ /*
+ * If it's a new object of class Exception, then null out the private
+ * data so that the finalizer doesn't attempt to free it.
+ */
+ if (OBJ_GET_CLASS(cx, obj) == &js_ErrorClass)
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, JSVAL_VOID);
+
+ /* Set the 'message' property. */
+ if (argc != 0) {
+ message = js_ValueToString(cx, argv[0]);
+ if (!message) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ argv[0] = STRING_TO_JSVAL(message);
+ } else {
+ message = cx->runtime->emptyString;
+ }
+
+ /* Set the 'fileName' property. */
+ if (argc > 1) {
+ filename = js_ValueToString(cx, argv[1]);
+ if (!filename) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ argv[1] = STRING_TO_JSVAL(filename);
+ fp = NULL;
+ } else {
+ fp = JS_GetScriptedCaller(cx, NULL);
+ if (fp) {
+ filename = FilenameToString(cx, fp->script->filename);
+ if (!filename) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ } else {
+ filename = cx->runtime->emptyString;
+ }
+ }
+
+ /* Set the 'lineNumber' property. */
+ if (argc > 2) {
+ ok = js_ValueToECMAUint32(cx, argv[2], &lineno);
+ if (!ok)
+ goto out;
+ } else {
+ if (!fp)
+ fp = JS_GetScriptedCaller(cx, NULL);
+ lineno = (fp && fp->pc) ? js_PCToLineNumber(cx, fp->script, fp->pc) : 0;
+ }
+
+ ok = (OBJ_GET_CLASS(cx, obj) != &js_ErrorClass) ||
+ InitExnPrivate(cx, obj, message, filename, lineno, NULL);
+
+ out:
+ cx->creatingException = JS_FALSE;
+ return ok;
+}
+
+/*
+ * Convert to string.
+ *
+ * This method only uses JavaScript-modifiable properties name, message. It
+ * is left to the host to check for private data and report filename and line
+ * number information along with this message.
+ */
+static JSBool
+exn_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval v;
+ JSString *name, *message, *result;
+ jschar *chars, *cp;
+ size_t name_length, message_length, length;
+
+ if (!OBJ_GET_PROPERTY(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState.nameAtom),
+ &v)) {
+ return JS_FALSE;
+ }
+ name = JSVAL_IS_STRING(v) ? JSVAL_TO_STRING(v) : cx->runtime->emptyString;
+ *rval = STRING_TO_JSVAL(name);
+
+ if (!JS_GetProperty(cx, obj, js_message_str, &v))
+ return JS_FALSE;
+ message = JSVAL_IS_STRING(v) ? JSVAL_TO_STRING(v)
+ : cx->runtime->emptyString;
+
+ if (JSSTRING_LENGTH(message) != 0) {
+ name_length = JSSTRING_LENGTH(name);
+ message_length = JSSTRING_LENGTH(message);
+ length = (name_length ? name_length + 2 : 0) + message_length;
+ cp = chars = (jschar*) JS_malloc(cx, (length + 1) * sizeof(jschar));
+ if (!chars)
+ return JS_FALSE;
+
+ if (name_length) {
+ js_strncpy(cp, JSSTRING_CHARS(name), name_length);
+ cp += name_length;
+ *cp++ = ':'; *cp++ = ' ';
+ }
+ js_strncpy(cp, JSSTRING_CHARS(message), message_length);
+ cp += message_length;
+ *cp = 0;
+
+ result = js_NewString(cx, chars, length, 0);
+ if (!result) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+ } else {
+ result = name;
+ }
+
+ *rval = STRING_TO_JSVAL(result);
+ return JS_TRUE;
+}
+
+#if JS_HAS_TOSOURCE
+/*
+ * Return a string that may eval to something similar to the original object.
+ */
+static JSBool
+exn_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval *vp;
+ JSString *name, *message, *filename, *lineno_as_str, *result;
+ uint32 lineno;
+ size_t lineno_length, name_length, message_length, filename_length, length;
+ jschar *chars, *cp;
+
+ vp = argv + argc; /* beginning of explicit local roots */
+
+ if (!OBJ_GET_PROPERTY(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState.nameAtom),
+ rval)) {
+ return JS_FALSE;
+ }
+ name = js_ValueToString(cx, *rval);
+ if (!name)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(name);
+
+ if (!JS_GetProperty(cx, obj, js_message_str, &vp[0]) ||
+ !(message = js_ValueToSource(cx, vp[0]))) {
+ return JS_FALSE;
+ }
+ vp[0] = STRING_TO_JSVAL(message);
+
+ if (!JS_GetProperty(cx, obj, js_fileName_str, &vp[1]) ||
+ !(filename = js_ValueToSource(cx, vp[1]))) {
+ return JS_FALSE;
+ }
+ vp[1] = STRING_TO_JSVAL(filename);
+
+ if (!JS_GetProperty(cx, obj, js_lineNumber_str, &vp[2]) ||
+ !js_ValueToECMAUint32 (cx, vp[2], &lineno)) {
+ return JS_FALSE;
+ }
+
+ if (lineno != 0) {
+ lineno_as_str = js_ValueToString(cx, vp[2]);
+ if (!lineno_as_str)
+ return JS_FALSE;
+ lineno_length = JSSTRING_LENGTH(lineno_as_str);
+ } else {
+ lineno_as_str = NULL;
+ lineno_length = 0;
+ }
+
+ /* Magic 8, for the characters in ``(new ())''. */
+ name_length = JSSTRING_LENGTH(name);
+ message_length = JSSTRING_LENGTH(message);
+ length = 8 + name_length + message_length;
+
+ filename_length = JSSTRING_LENGTH(filename);
+ if (filename_length != 0) {
+ /* append filename as ``, {filename}'' */
+ length += 2 + filename_length;
+ if (lineno_as_str) {
+ /* append lineno as ``, {lineno_as_str}'' */
+ length += 2 + lineno_length;
+ }
+ } else {
+ if (lineno_as_str) {
+ /*
+ * no filename, but have line number,
+ * need to append ``, "", {lineno_as_str}''
+ */
+ length += 6 + lineno_length;
+ }
+ }
+
+ cp = chars = (jschar*) JS_malloc(cx, (length + 1) * sizeof(jschar));
+ if (!chars)
+ return JS_FALSE;
+
+ *cp++ = '('; *cp++ = 'n'; *cp++ = 'e'; *cp++ = 'w'; *cp++ = ' ';
+ js_strncpy(cp, JSSTRING_CHARS(name), name_length);
+ cp += name_length;
+ *cp++ = '(';
+ if (message_length != 0) {
+ js_strncpy(cp, JSSTRING_CHARS(message), message_length);
+ cp += message_length;
+ }
+
+ if (filename_length != 0) {
+ /* append filename as ``, {filename}'' */
+ *cp++ = ','; *cp++ = ' ';
+ js_strncpy(cp, JSSTRING_CHARS(filename), filename_length);
+ cp += filename_length;
+ } else {
+ if (lineno_as_str) {
+ /*
+ * no filename, but have line number,
+ * need to append ``, "", {lineno_as_str}''
+ */
+ *cp++ = ','; *cp++ = ' '; *cp++ = '"'; *cp++ = '"';
+ }
+ }
+ if (lineno_as_str) {
+ /* append lineno as ``, {lineno_as_str}'' */
+ *cp++ = ','; *cp++ = ' ';
+ js_strncpy(cp, JSSTRING_CHARS(lineno_as_str), lineno_length);
+ cp += lineno_length;
+ }
+
+ *cp++ = ')'; *cp++ = ')'; *cp = 0;
+
+ result = js_NewString(cx, chars, length, 0);
+ if (!result) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(result);
+ return JS_TRUE;
+}
+#endif
+
+static JSFunctionSpec exception_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, exn_toSource, 0,0,3},
+#endif
+ {js_toString_str, exn_toString, 0,0,0},
+ {0,0,0,0,0}
+};
+
+JSObject *
+js_InitExceptionClasses(JSContext *cx, JSObject *obj)
+{
+ JSObject *obj_proto, *protos[JSEXN_LIMIT];
+ int i;
+
+ /*
+ * If lazy class initialization occurs for any Error subclass, then all
+ * classes are initialized, starting with Error. To avoid reentry and
+ * redundant initialization, we must not pass a null proto parameter to
+ * js_NewObject below, when called for the Error superclass. We need to
+ * ensure that Object.prototype is the proto of Error.prototype.
+ *
+ * See the equivalent code to ensure that parent_proto is non-null when
+ * JS_InitClass calls js_NewObject, in jsapi.c.
+ */
+ if (!js_GetClassPrototype(cx, obj, INT_TO_JSID(JSProto_Object),
+ &obj_proto)) {
+ return NULL;
+ }
+
+ if (!js_EnterLocalRootScope(cx))
+ return NULL;
+
+ /* Initialize the prototypes first. */
+ for (i = 0; exceptions[i].name != 0; i++) {
+ JSAtom *atom;
+ JSFunction *fun;
+ JSObject *funobj;
+ JSString *nameString;
+ int protoIndex = exceptions[i].protoIndex;
+
+ /* Make the prototype for the current constructor name. */
+ protos[i] = js_NewObject(cx, &js_ErrorClass,
+ (protoIndex != JSEXN_NONE)
+ ? protos[protoIndex]
+ : obj_proto,
+ obj);
+ if (!protos[i])
+ break;
+
+ /* So exn_finalize knows whether to destroy private data. */
+ OBJ_SET_SLOT(cx, protos[i], JSSLOT_PRIVATE, JSVAL_VOID);
+
+ /* Make a constructor function for the current name. */
+ atom = cx->runtime->atomState.classAtoms[exceptions[i].key];
+ fun = js_DefineFunction(cx, obj, atom, exceptions[i].native, 3, 0);
+ if (!fun)
+ break;
+
+ /* Make this constructor make objects of class Exception. */
+ fun->clasp = &js_ErrorClass;
+
+ /* Extract the constructor object. */
+ funobj = fun->object;
+
+ /* Make the prototype and constructor links. */
+ if (!js_SetClassPrototype(cx, funobj, protos[i],
+ JSPROP_READONLY | JSPROP_PERMANENT)) {
+ break;
+ }
+
+ /* proto bootstrap bit from JS_InitClass omitted. */
+ nameString = JS_NewStringCopyZ(cx, exceptions[i].name);
+ if (!nameString)
+ break;
+
+ /* Add the name property to the prototype. */
+ if (!JS_DefineProperty(cx, protos[i], js_name_str,
+ STRING_TO_JSVAL(nameString),
+ NULL, NULL,
+ JSPROP_ENUMERATE)) {
+ break;
+ }
+
+ /* Finally, stash the constructor for later uses. */
+ if (!js_SetClassObject(cx, obj, exceptions[i].key, funobj))
+ break;
+ }
+
+ js_LeaveLocalRootScope(cx);
+ if (exceptions[i].name)
+ return NULL;
+
+ /*
+ * Add an empty message property. (To Exception.prototype only,
+ * because this property will be the same for all the exception
+ * protos.)
+ */
+ if (!JS_DefineProperty(cx, protos[0], js_message_str,
+ STRING_TO_JSVAL(cx->runtime->emptyString),
+ NULL, NULL, JSPROP_ENUMERATE)) {
+ return NULL;
+ }
+ if (!JS_DefineProperty(cx, protos[0], js_fileName_str,
+ STRING_TO_JSVAL(cx->runtime->emptyString),
+ NULL, NULL, JSPROP_ENUMERATE)) {
+ return NULL;
+ }
+ if (!JS_DefineProperty(cx, protos[0], js_lineNumber_str,
+ INT_TO_JSVAL(0),
+ NULL, NULL, JSPROP_ENUMERATE)) {
+ return NULL;
+ }
+
+ /*
+ * Add methods only to Exception.prototype, because ostensibly all
+ * exception types delegate to that.
+ */
+ if (!JS_DefineFunctions(cx, protos[0], exception_methods))
+ return NULL;
+
+ return protos[0];
+}
+
+const JSErrorFormatString*
+js_GetLocalizedErrorMessage(JSContext* cx, void *userRef, const char *locale, const uintN errorNumber)
+{
+ const JSErrorFormatString *errorString = NULL;
+
+ if (cx->localeCallbacks && cx->localeCallbacks->localeGetErrorMessage) {
+ errorString = cx->localeCallbacks
+ ->localeGetErrorMessage(userRef, locale, errorNumber);
+ }
+ if (!errorString)
+ errorString = js_GetErrorMessage(userRef, locale, errorNumber);
+ return errorString;
+}
+
+#if defined ( DEBUG_mccabe ) && defined ( PRINTNAMES )
+/* For use below... get character strings for error name and exception name */
+static struct exnname { char *name; char *exception; } errortoexnname[] = {
+#define MSG_DEF(name, number, count, exception, format) \
+ {#name, #exception},
+#include "js.msg"
+#undef MSG_DEF
+};
+#endif /* DEBUG */
+
+JSBool
+js_ErrorToException(JSContext *cx, const char *message, JSErrorReport *reportp)
+{
+ JSErrNum errorNumber;
+ const JSErrorFormatString *errorString;
+ JSExnType exn;
+ jsval tv[4];
+ JSTempValueRooter tvr;
+ JSBool ok;
+ JSObject *errProto, *errObject;
+ JSString *messageStr, *filenameStr;
+
+ /*
+ * Tell our caller to report immediately if cx has no active frames, or if
+ * this report is just a warning.
+ */
+ JS_ASSERT(reportp);
+ if (!cx->fp || JSREPORT_IS_WARNING(reportp->flags))
+ return JS_FALSE;
+
+ /* Find the exception index associated with this error. */
+ errorNumber = (JSErrNum) reportp->errorNumber;
+ errorString = js_GetLocalizedErrorMessage(cx, NULL, NULL, errorNumber);
+ exn = errorString ? errorString->exnType : JSEXN_NONE;
+ JS_ASSERT(exn < JSEXN_LIMIT);
+
+#if defined( DEBUG_mccabe ) && defined ( PRINTNAMES )
+ /* Print the error name and the associated exception name to stderr */
+ fprintf(stderr, "%s\t%s\n",
+ errortoexnname[errorNumber].name,
+ errortoexnname[errorNumber].exception);
+#endif
+
+ /*
+ * Return false (no exception raised) if no exception is associated
+ * with the given error number.
+ */
+ if (exn == JSEXN_NONE)
+ return JS_FALSE;
+
+ /*
+ * Prevent runaway recursion, just as the Exception native constructor
+ * must do, via cx->creatingException. If an out-of-memory error occurs,
+ * no exception object will be created, but we don't assume that OOM is
+ * the only kind of error that subroutines of this function called below
+ * might raise.
+ */
+ if (cx->creatingException)
+ return JS_FALSE;
+
+ /* After this point the control must flow through the label out. */
+ cx->creatingException = JS_TRUE;
+
+ /* Protect the newly-created strings below from nesting GCs. */
+ memset(tv, 0, sizeof tv);
+ JS_PUSH_TEMP_ROOT(cx, sizeof tv / sizeof tv[0], tv, &tvr);
+
+ /*
+ * Try to get an appropriate prototype by looking up the corresponding
+ * exception constructor name in the scope chain of the current context's
+ * top stack frame, or in the global object if no frame is active.
+ */
+ ok = js_GetClassPrototype(cx, NULL, INT_TO_JSID(exceptions[exn].key),
+ &errProto);
+ if (!ok)
+ goto out;
+ tv[0] = OBJECT_TO_JSVAL(errProto);
+
+ errObject = js_NewObject(cx, &js_ErrorClass, errProto, NULL);
+ if (!errObject) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ tv[1] = OBJECT_TO_JSVAL(errObject);
+
+ messageStr = JS_NewStringCopyZ(cx, message);
+ if (!messageStr) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ tv[2] = STRING_TO_JSVAL(messageStr);
+
+ filenameStr = JS_NewStringCopyZ(cx, reportp->filename);
+ if (!filenameStr) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ tv[3] = STRING_TO_JSVAL(filenameStr);
+
+ ok = InitExnPrivate(cx, errObject, messageStr, filenameStr,
+ reportp->lineno, reportp);
+ if (!ok)
+ goto out;
+
+ JS_SetPendingException(cx, OBJECT_TO_JSVAL(errObject));
+
+ /* Flag the error report passed in to indicate an exception was raised. */
+ reportp->flags |= JSREPORT_EXCEPTION;
+
+out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ cx->creatingException = JS_FALSE;
+ return ok;
+}
+
+JSBool
+js_ReportUncaughtException(JSContext *cx)
+{
+ jsval exn;
+ JSObject *exnObject;
+ jsval vp[5];
+ JSTempValueRooter tvr;
+ JSErrorReport *reportp, report;
+ JSString *str;
+ const char *bytes;
+ JSBool ok;
+
+ if (!JS_IsExceptionPending(cx))
+ return JS_TRUE;
+
+ if (!JS_GetPendingException(cx, &exn))
+ return JS_FALSE;
+
+ /*
+ * Because js_ValueToString below could error and an exception object
+ * could become unrooted, we must root exnObject. Later, if exnObject is
+ * non-null, we need to root other intermediates, so allocate an operand
+ * stack segment to protect all of these values.
+ */
+ if (JSVAL_IS_PRIMITIVE(exn)) {
+ exnObject = NULL;
+ } else {
+ exnObject = JSVAL_TO_OBJECT(exn);
+ vp[0] = exn;
+ memset(vp + 1, 0, sizeof vp - sizeof vp[0]);
+ JS_PUSH_TEMP_ROOT(cx, JS_ARRAY_LENGTH(vp), vp, &tvr);
+ }
+
+ JS_ClearPendingException(cx);
+ reportp = js_ErrorFromException(cx, exn);
+
+ /* XXX L10N angels cry once again (see also jsemit.c, /L10N gaffes/) */
+ str = js_ValueToString(cx, exn);
+ if (!str) {
+ bytes = "unknown (can't convert to string)";
+ } else {
+ if (exnObject)
+ vp[1] = STRING_TO_JSVAL(str);
+ bytes = js_GetStringBytes(cx->runtime, str);
+ }
+ ok = JS_TRUE;
+
+ if (!reportp &&
+ exnObject &&
+ OBJ_GET_CLASS(cx, exnObject) == &js_ErrorClass) {
+ const char *filename;
+ uint32 lineno;
+
+ ok = JS_GetProperty(cx, exnObject, js_message_str, &vp[2]);
+ if (!ok)
+ goto out;
+ if (JSVAL_IS_STRING(vp[2]))
+ bytes = JS_GetStringBytes(JSVAL_TO_STRING(vp[2]));
+
+ ok = JS_GetProperty(cx, exnObject, js_fileName_str, &vp[3]);
+ if (!ok)
+ goto out;
+ str = js_ValueToString(cx, vp[3]);
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ filename = StringToFilename(cx, str);
+
+ ok = JS_GetProperty(cx, exnObject, js_lineNumber_str, &vp[4]);
+ if (!ok)
+ goto out;
+ ok = js_ValueToECMAUint32 (cx, vp[4], &lineno);
+ if (!ok)
+ goto out;
+
+ reportp = &report;
+ memset(&report, 0, sizeof report);
+ report.filename = filename;
+ report.lineno = (uintN) lineno;
+ }
+
+ if (!reportp) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_UNCAUGHT_EXCEPTION, bytes);
+ } else {
+ /* Flag the error as an exception. */
+ reportp->flags |= JSREPORT_EXCEPTION;
+ js_ReportErrorAgain(cx, bytes, reportp);
+ }
+
+out:
+ if (exnObject)
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+}
diff --git a/src/third_party/js-1.7/jsexn.h b/src/third_party/js-1.7/jsexn.h
new file mode 100644
index 00000000000..58cb984d4c7
--- /dev/null
+++ b/src/third_party/js-1.7/jsexn.h
@@ -0,0 +1,96 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS runtime exception classes.
+ */
+
+#ifndef jsexn_h___
+#define jsexn_h___
+
+JS_BEGIN_EXTERN_C
+
+extern JSClass js_ErrorClass;
+
+/*
+ * Initialize the exception constructor/prototype hierarchy.
+ */
+extern JSObject *
+js_InitExceptionClasses(JSContext *cx, JSObject *obj);
+
+/*
+ * Given a JSErrorReport, check to see if there is an exception associated with
+ * the error number. If there is, then create an appropriate exception object,
+ * set it as the pending exception, and set the JSREPORT_EXCEPTION flag on the
+ * error report. Exception-aware host error reporters should probably ignore
+ * error reports so flagged. Returns JS_TRUE if an associated exception is
+ * found and set, JS_FALSE otherwise..
+ */
+extern JSBool
+js_ErrorToException(JSContext *cx, const char *message, JSErrorReport *reportp);
+
+/*
+ * Called if a JS API call to js_Execute or js_InternalCall fails; calls the
+ * error reporter with the error report associated with any uncaught exception
+ * that has been raised. Returns true if there was an exception pending, and
+ * the error reporter was actually called.
+ *
+ * The JSErrorReport * that the error reporter is called with is currently
+ * associated with a JavaScript object, and is not guaranteed to persist after
+ * the object is collected. Any persistent uses of the JSErrorReport contents
+ * should make their own copy.
+ *
+ * The flags field of the JSErrorReport will have the JSREPORT_EXCEPTION flag
+ * set; embeddings that want to silently propagate JavaScript exceptions to
+ * other contexts may want to use an error reporter that ignores errors with
+ * this flag.
+ */
+extern JSBool
+js_ReportUncaughtException(JSContext *cx);
+
+extern JSErrorReport *
+js_ErrorFromException(JSContext *cx, jsval exn);
+
+extern const JSErrorFormatString *
+js_GetLocalizedErrorMessage(JSContext* cx, void *userRef, const char *locale,
+ const uintN errorNumber);
+
+JS_END_EXTERN_C
+
+#endif /* jsexn_h___ */
diff --git a/src/third_party/js-1.7/jsfile.c b/src/third_party/js-1.7/jsfile.c
new file mode 100644
index 00000000000..ed1c4e8b6ea
--- /dev/null
+++ b/src/third_party/js-1.7/jsfile.c
@@ -0,0 +1,2735 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS File object
+ */
+#if JS_HAS_FILE_OBJECT
+
+#include "jsstddef.h"
+#include "jsfile.h"
+
+/* ----------------- Platform-specific includes and defines ----------------- */
+#if defined(XP_WIN) || defined(XP_OS2)
+# include <direct.h>
+# include <io.h>
+# include <sys/types.h>
+# include <sys/stat.h>
+# define FILESEPARATOR '\\'
+# define FILESEPARATOR2 '/'
+# define CURRENT_DIR "c:\\"
+# define POPEN _popen
+# define PCLOSE _pclose
+#elif defined(XP_UNIX) || defined(XP_BEOS)
+# include <strings.h>
+# include <stdio.h>
+# include <stdlib.h>
+# include <unistd.h>
+# define FILESEPARATOR '/'
+# define FILESEPARATOR2 '\0'
+# define CURRENT_DIR "/"
+# define POPEN popen
+# define PCLOSE pclose
+#endif
+
+/* --------------- Platform-independent includes and defines ---------------- */
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsdate.h"
+#include "jsdbgapi.h"
+#include "jsemit.h"
+#include "jsfun.h"
+#include "jslock.h"
+#include "jsobj.h"
+#include "jsparse.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include <string.h>
+
+/* NSPR dependencies */
+#include "prio.h"
+#include "prerror.h"
+
+#define SPECIAL_FILE_STRING "Special File"
+#define CURRENTDIR_PROPERTY "currentDir"
+#define SEPARATOR_PROPERTY "separator"
+#define FILE_CONSTRUCTOR "File"
+#define PIPE_SYMBOL '|'
+
+#define ASCII 0
+#define UTF8 1
+#define UCS2 2
+
+#define asciistring "text"
+#define utfstring "binary"
+#define unicodestring "unicode"
+
+#define MAX_PATH_LENGTH 1024
+#define MODE_SIZE 256
+#define NUMBER_SIZE 32
+#define MAX_LINE_LENGTH 256
+#define URL_PREFIX "file://"
+
+#define STDINPUT_NAME "Standard input stream"
+#define STDOUTPUT_NAME "Standard output stream"
+#define STDERROR_NAME "Standard error stream"
+
+#define RESOLVE_PATH js_canonicalPath /* js_absolutePath */
+
+/* Error handling */
+typedef enum JSFileErrNum {
+#define MSG_DEF(name, number, count, exception, format) \
+ name = number,
+#include "jsfile.msg"
+#undef MSG_DEF
+ JSFileErr_Limit
+#undef MSGDEF
+} JSFileErrNum;
+
+#define JSFILE_HAS_DFLT_MSG_STRINGS 1
+
+JSErrorFormatString JSFile_ErrorFormatString[JSFileErr_Limit] = {
+#if JSFILE_HAS_DFLT_MSG_STRINGS
+#define MSG_DEF(name, number, count, exception, format) \
+ { format, count },
+#else
+#define MSG_DEF(name, number, count, exception, format) \
+ { NULL, count },
+#endif
+#include "jsfile.msg"
+#undef MSG_DEF
+};
+
+const JSErrorFormatString *
+JSFile_GetErrorMessage(void *userRef, const char *locale,
+ const uintN errorNumber)
+{
+ if ((errorNumber > 0) && (errorNumber < JSFileErr_Limit))
+ return &JSFile_ErrorFormatString[errorNumber];
+ else
+ return NULL;
+}
+
+#define JSFILE_CHECK_NATIVE(op) \
+ if (file->isNative) { \
+ JS_ReportWarning(cx, "Cannot call or access \"%s\" on native file %s",\
+ op, file->path); \
+ goto out; \
+ }
+
+#define JSFILE_CHECK_WRITE \
+ if (!file->isOpen) { \
+ JS_ReportWarning(cx, \
+ "File %s is closed, will open it for writing, proceeding", \
+ file->path); \
+ js_FileOpen(cx, obj, file, "write,append,create"); \
+ } \
+ if (!js_canWrite(cx, file)) { \
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, \
+ JSFILEMSG_CANNOT_WRITE, file->path); \
+ goto out; \
+ }
+
+#define JSFILE_CHECK_READ \
+ if (!file->isOpen) { \
+ JS_ReportWarning(cx, \
+ "File %s is closed, will open it for reading, proceeding", \
+ file->path); \
+ js_FileOpen(cx, obj, file, "read"); \
+ } \
+ if (!js_canRead(cx, file)) { \
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, \
+ JSFILEMSG_CANNOT_READ, file->path); \
+ goto out; \
+ }
+
+#define JSFILE_CHECK_OPEN(op) \
+ if (!file->isOpen) { \
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, \
+ JSFILEMSG_FILE_MUST_BE_CLOSED, op); \
+ goto out; \
+ }
+
+#define JSFILE_CHECK_CLOSED(op) \
+ if (file->isOpen) { \
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, \
+ JSFILEMSG_FILE_MUST_BE_OPEN, op); \
+ goto out; \
+ }
+
+#define JSFILE_CHECK_ONE_ARG(op) \
+ if (argc != 1) { \
+ char str[NUMBER_SIZE]; \
+ sprintf(str, "%d", argc); \
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL, \
+ JSFILEMSG_EXPECTS_ONE_ARG_ERROR, op, str); \
+ goto out; \
+ }
+
+
+/*
+ Security mechanism, should define a callback for this.
+ The parameters are as follows:
+ SECURITY_CHECK(JSContext *cx, JSPrincipals *ps, char *op_name, JSFile *file)
+ XXX Should this be a real function returning a JSBool result (and getting
+ some typesafety help from the compiler?).
+*/
+#define SECURITY_CHECK(cx, ps, op, file) \
+ /* Define a callback here... */
+
+
+/* Structure representing the file internally */
+typedef struct JSFile {
+ char *path; /* the path to the file. */
+ JSBool isOpen;
+ int32 mode; /* mode used to open the file: read, write, append, create, etc.. */
+ int32 type; /* Asciiz, utf, unicode */
+ char byteBuffer[3]; /* bytes read in advance by js_FileRead ( UTF8 encoding ) */
+ jsint nbBytesInBuf; /* number of bytes stored in the buffer above */
+ jschar charBuffer; /* character read in advance by readln ( mac files only ) */
+ JSBool charBufferUsed; /* flag indicating if the buffer above is being used */
+ JSBool hasRandomAccess;/* can the file be randomly accessed? false for stdin, and
+ UTF-encoded files. */
+ JSBool hasAutoflush; /* should we force a flush for each line break? */
+ JSBool isNative; /* if the file is using OS-specific file FILE type */
+ /* We can actually put the following two in a union since they should never be used at the same time */
+ PRFileDesc *handle; /* the handle for the file, if open. */
+ FILE *nativehandle; /* native handle, for stuff NSPR doesn't do. */
+ JSBool isPipe; /* if the file is really an OS pipe */
+} JSFile;
+
+/* a few forward declarations... */
+JS_PUBLIC_API(JSObject*) js_NewFileObject(JSContext *cx, char *filename);
+static JSBool file_open(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval);
+static JSBool file_close(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval);
+
+/* New filename manipulation procesures */
+/* assumes we don't have leading/trailing spaces */
+static JSBool
+js_filenameHasAPipe(const char *filename)
+{
+ if (!filename)
+ return JS_FALSE;
+
+ return filename[0] == PIPE_SYMBOL ||
+ filename[strlen(filename) - 1] == PIPE_SYMBOL;
+}
+
+static JSBool
+js_isAbsolute(const char *name)
+{
+#if defined(XP_WIN) || defined(XP_OS2)
+ return *name && name[1] == ':';
+#else
+ return (name[0]
+# if defined(XP_UNIX) || defined(XP_BEOS)
+ ==
+# else
+ !=
+# endif
+ FILESEPARATOR);
+#endif
+}
+
+/*
+ * Concatinates base and name to produce a valid filename.
+ * Returned string must be freed.
+*/
+static char*
+js_combinePath(JSContext *cx, const char *base, const char *name)
+{
+ int len = strlen(base);
+ char* result = JS_malloc(cx, len + strlen(name) + 2);
+
+ if (!result)
+ return NULL;
+
+ strcpy(result, base);
+
+ if (base[len - 1] != FILESEPARATOR && base[len - 1] != FILESEPARATOR2) {
+ result[len] = FILESEPARATOR;
+ result[len + 1] = '\0';
+ }
+ strcat(result, name);
+ return result;
+}
+
+/* Extract the last component from a path name. Returned string must be freed */
+static char *
+js_fileBaseName(JSContext *cx, const char *pathname)
+{
+ jsint index, aux;
+ char *result;
+
+ index = strlen(pathname)-1;
+
+ /* Chop off trailing seperators. */
+ while (index > 0 && (pathname[index]==FILESEPARATOR ||
+ pathname[index]==FILESEPARATOR2)) {
+ --index;
+ }
+
+ aux = index;
+
+ /* Now find the next separator. */
+ while (index >= 0 && pathname[index] != FILESEPARATOR &&
+ pathname[index] != FILESEPARATOR2) {
+ --index;
+ }
+
+ /* Allocate and copy. */
+ result = JS_malloc(cx, aux - index + 1);
+ if (!result)
+ return NULL;
+ strncpy(result, pathname + index + 1, aux - index);
+ result[aux - index] = '\0';
+ return result;
+}
+
+/*
+ * Returns everything but the last component from a path name.
+ * Returned string must be freed.
+ */
+static char *
+js_fileDirectoryName(JSContext *cx, const char *pathname)
+{
+ char *result;
+ const char *cp, *end;
+ size_t pathsize;
+
+ end = pathname + strlen(pathname);
+ cp = end - 1;
+
+ /* If this is already a directory, chop off the trailing /s. */
+ while (cp >= pathname) {
+ if (*cp != FILESEPARATOR && *cp != FILESEPARATOR2)
+ break;
+ --cp;
+ }
+
+ if (cp < pathname && end != pathname) {
+ /* There were just /s, return the root. */
+ result = JS_malloc(cx, 1 + 1); /* The separator + trailing NUL. */
+ result[0] = FILESEPARATOR;
+ result[1] = '\0';
+ return result;
+ }
+
+ /* Now chop off the last portion. */
+ while (cp >= pathname) {
+ if (*cp == FILESEPARATOR || *cp == FILESEPARATOR2)
+ break;
+ --cp;
+ }
+
+ /* Check if this is a leaf. */
+ if (cp < pathname) {
+ /* It is, return "pathname/". */
+ if (end[-1] == FILESEPARATOR || end[-1] == FILESEPARATOR2) {
+ /* Already has its terminating /. */
+ return JS_strdup(cx, pathname);
+ }
+
+ pathsize = end - pathname + 1;
+ result = JS_malloc(cx, pathsize + 1);
+ if (!result)
+ return NULL;
+
+ strcpy(result, pathname);
+ result[pathsize - 1] = FILESEPARATOR;
+ result[pathsize] = '\0';
+
+ return result;
+ }
+
+ /* Return everything up to and including the seperator. */
+ pathsize = cp - pathname + 1;
+ result = JS_malloc(cx, pathsize + 1);
+ if (!result)
+ return NULL;
+
+ strncpy(result, pathname, pathsize);
+ result[pathsize] = '\0';
+
+ return result;
+}
+
+static char *
+js_absolutePath(JSContext *cx, const char * path)
+{
+ JSObject *obj;
+ JSString *str;
+ jsval prop;
+
+ if (js_isAbsolute(path)) {
+ return JS_strdup(cx, path);
+ } else {
+ obj = JS_GetGlobalObject(cx);
+ if (!JS_GetProperty(cx, obj, FILE_CONSTRUCTOR, &prop)) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FILE_CONSTRUCTOR_UNDEFINED_ERROR);
+ return JS_strdup(cx, path);
+ }
+
+ obj = JSVAL_TO_OBJECT(prop);
+ if (!JS_GetProperty(cx, obj, CURRENTDIR_PROPERTY, &prop)) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FILE_CURRENTDIR_UNDEFINED_ERROR);
+ return JS_strdup(cx, path);
+ }
+
+ str = JS_ValueToString(cx, prop);
+ if (!str)
+ return JS_strdup(cx, path);
+
+ /* should we have an array of curr dirs indexed by drive for windows? */
+ return js_combinePath(cx, JS_GetStringBytes(str), path);
+ }
+}
+
+/* Side effect: will remove spaces in the beginning/end of the filename */
+static char *
+js_canonicalPath(JSContext *cx, char *oldpath)
+{
+ char *tmp;
+ char *path = oldpath;
+ char *base, *dir, *current, *result;
+ jsint c;
+ jsint back = 0;
+ unsigned int i = 0, j = strlen(path)-1;
+
+ /* This is probably optional */
+ /* Remove possible spaces in the beginning and end */
+ while (i < j && path[i] == ' ')
+ i++;
+ while (j >= 0 && path[j] == ' ')
+ j--;
+
+ tmp = JS_malloc(cx, j-i+2);
+ if (!tmp)
+ return NULL;
+
+ strncpy(tmp, path + i, j - i + 1);
+ tmp[j - i + 1] = '\0';
+
+ path = tmp;
+
+ /* Pipe support. */
+ if (js_filenameHasAPipe(path))
+ return path;
+
+ /* file:// support. */
+ if (!strncmp(path, URL_PREFIX, strlen(URL_PREFIX))) {
+ tmp = js_canonicalPath(cx, path + strlen(URL_PREFIX));
+ JS_free(cx, path);
+ return tmp;
+ }
+
+ if (!js_isAbsolute(path)) {
+ tmp = js_absolutePath(cx, path);
+ if (!tmp)
+ return NULL;
+ path = tmp;
+ }
+
+ result = JS_strdup(cx, "");
+
+ current = path;
+
+ base = js_fileBaseName(cx, current);
+ dir = js_fileDirectoryName(cx, current);
+
+ while (strcmp(dir, current)) {
+ if (!strcmp(base, "..")) {
+ back++;
+ } else {
+ if (back > 0) {
+ back--;
+ } else {
+ tmp = result;
+ result = JS_malloc(cx, strlen(base) + 1 + strlen(tmp) + 1);
+ if (!result)
+ goto out;
+
+ strcpy(result, base);
+ c = strlen(result);
+ if (*tmp) {
+ result[c] = FILESEPARATOR;
+ result[c + 1] = '\0';
+ strcat(result, tmp);
+ }
+ JS_free(cx, tmp);
+ }
+ }
+ JS_free(cx, current);
+ JS_free(cx, base);
+ current = dir;
+ base = js_fileBaseName(cx, current);
+ dir = js_fileDirectoryName(cx, current);
+ }
+
+ tmp = result;
+ result = JS_malloc(cx, strlen(dir)+1+strlen(tmp)+1);
+ if (!result)
+ goto out;
+
+ strcpy(result, dir);
+ c = strlen(result);
+ if (tmp[0]!='\0') {
+ if ((result[c-1]!=FILESEPARATOR)&&(result[c-1]!=FILESEPARATOR2)) {
+ result[c] = FILESEPARATOR;
+ result[c+1] = '\0';
+ }
+ strcat(result, tmp);
+ }
+
+out:
+ if (tmp)
+ JS_free(cx, tmp);
+ if (dir)
+ JS_free(cx, dir);
+ if (base)
+ JS_free(cx, base);
+ if (current)
+ JS_free(cx, current);
+
+ return result;
+}
+
+/* -------------------------- Text conversion ------------------------------- */
+/* The following is ripped from libi18n/unicvt.c and include files.. */
+
+/*
+ * UTF8 defines and macros
+ */
+#define ONE_OCTET_BASE 0x00 /* 0xxxxxxx */
+#define ONE_OCTET_MASK 0x7F /* x1111111 */
+#define CONTINUING_OCTET_BASE 0x80 /* 10xxxxxx */
+#define CONTINUING_OCTET_MASK 0x3F /* 00111111 */
+#define TWO_OCTET_BASE 0xC0 /* 110xxxxx */
+#define TWO_OCTET_MASK 0x1F /* 00011111 */
+#define THREE_OCTET_BASE 0xE0 /* 1110xxxx */
+#define THREE_OCTET_MASK 0x0F /* 00001111 */
+#define FOUR_OCTET_BASE 0xF0 /* 11110xxx */
+#define FOUR_OCTET_MASK 0x07 /* 00000111 */
+#define FIVE_OCTET_BASE 0xF8 /* 111110xx */
+#define FIVE_OCTET_MASK 0x03 /* 00000011 */
+#define SIX_OCTET_BASE 0xFC /* 1111110x */
+#define SIX_OCTET_MASK 0x01 /* 00000001 */
+
+#define IS_UTF8_1ST_OF_1(x) (( (x)&~ONE_OCTET_MASK ) == ONE_OCTET_BASE)
+#define IS_UTF8_1ST_OF_2(x) (( (x)&~TWO_OCTET_MASK ) == TWO_OCTET_BASE)
+#define IS_UTF8_1ST_OF_3(x) (( (x)&~THREE_OCTET_MASK) == THREE_OCTET_BASE)
+#define IS_UTF8_1ST_OF_4(x) (( (x)&~FOUR_OCTET_MASK ) == FOUR_OCTET_BASE)
+#define IS_UTF8_1ST_OF_5(x) (( (x)&~FIVE_OCTET_MASK ) == FIVE_OCTET_BASE)
+#define IS_UTF8_1ST_OF_6(x) (( (x)&~SIX_OCTET_MASK ) == SIX_OCTET_BASE)
+#define IS_UTF8_2ND_THRU_6TH(x) \
+ (( (x)&~CONTINUING_OCTET_MASK ) == CONTINUING_OCTET_BASE)
+#define IS_UTF8_1ST_OF_UCS2(x) \
+ IS_UTF8_1ST_OF_1(x) \
+ || IS_UTF8_1ST_OF_2(x) \
+ || IS_UTF8_1ST_OF_3(x)
+
+
+#define MAX_UCS2 0xFFFF
+#define DEFAULT_CHAR 0x003F /* Default char is "?" */
+#define BYTE_MASK 0xBF
+#define BYTE_MARK 0x80
+
+
+/* Function: one_ucs2_to_utf8_char
+ *
+ * Function takes one UCS-2 char and writes it to a UTF-8 buffer.
+ * We need a UTF-8 buffer because we don't know before this
+ * function how many bytes of utf-8 data will be written. It also
+ * takes a pointer to the end of the UTF-8 buffer so that we don't
+ * overwrite data. This function returns the number of UTF-8 bytes
+ * of data written, or -1 if the buffer would have been overrun.
+ */
+
+#define LINE_SEPARATOR 0x2028
+#define PARAGRAPH_SEPARATOR 0x2029
+static int16 one_ucs2_to_utf8_char(unsigned char *tobufp,
+ unsigned char *tobufendp,
+ uint16 onechar)
+{
+ int16 numUTF8bytes = 0;
+
+ if (onechar == LINE_SEPARATOR || onechar == PARAGRAPH_SEPARATOR) {
+ strcpy((char*)tobufp, "\n");
+ return strlen((char*)tobufp);
+ }
+
+ if (onechar < 0x80) {
+ numUTF8bytes = 1;
+ } else if (onechar < 0x800) {
+ numUTF8bytes = 2;
+ } else {
+ /* 0x800 >= onechar <= MAX_UCS2 */
+ numUTF8bytes = 3;
+ }
+
+ tobufp += numUTF8bytes;
+
+ /* return error if we don't have space for the whole character */
+ if (tobufp > tobufendp) {
+ return(-1);
+ }
+
+ switch(numUTF8bytes) {
+ case 3: *--tobufp = (onechar | BYTE_MARK) & BYTE_MASK; onechar >>=6;
+ *--tobufp = (onechar | BYTE_MARK) & BYTE_MASK; onechar >>=6;
+ *--tobufp = onechar | THREE_OCTET_BASE;
+ break;
+
+ case 2: *--tobufp = (onechar | BYTE_MARK) & BYTE_MASK; onechar >>=6;
+ *--tobufp = onechar | TWO_OCTET_BASE;
+ break;
+
+ case 1: *--tobufp = (unsigned char)onechar;
+ break;
+ }
+
+ return numUTF8bytes;
+}
+
+/*
+ * utf8_to_ucs2_char
+ *
+ * Convert a utf8 multibyte character to ucs2
+ *
+ * inputs: pointer to utf8 character(s)
+ * length of utf8 buffer ("read" length limit)
+ * pointer to return ucs2 character
+ *
+ * outputs: number of bytes in the utf8 character
+ * -1 if not a valid utf8 character sequence
+ * -2 if the buffer is too short
+ */
+static int16
+utf8_to_ucs2_char(const unsigned char *utf8p, int16 buflen, uint16 *ucs2p)
+{
+ uint16 lead, cont1, cont2;
+
+ /*
+ * Check for minimum buffer length
+ */
+ if ((buflen < 1) || (utf8p == NULL)) {
+ return -2;
+ }
+ lead = (uint16) (*utf8p);
+
+ /*
+ * Check for a one octet sequence
+ */
+ if (IS_UTF8_1ST_OF_1(lead)) {
+ *ucs2p = lead & ONE_OCTET_MASK;
+ return 1;
+ }
+
+ /*
+ * Check for a two octet sequence
+ */
+ if (IS_UTF8_1ST_OF_2(*utf8p)) {
+ if (buflen < 2)
+ return -2;
+ cont1 = (uint16) *(utf8p+1);
+ if (!IS_UTF8_2ND_THRU_6TH(cont1))
+ return -1;
+ *ucs2p = (lead & TWO_OCTET_MASK) << 6;
+ *ucs2p |= cont1 & CONTINUING_OCTET_MASK;
+ return 2;
+ }
+
+ /*
+ * Check for a three octet sequence
+ */
+ else if (IS_UTF8_1ST_OF_3(lead)) {
+ if (buflen < 3)
+ return -2;
+ cont1 = (uint16) *(utf8p+1);
+ cont2 = (uint16) *(utf8p+2);
+ if ( (!IS_UTF8_2ND_THRU_6TH(cont1))
+ || (!IS_UTF8_2ND_THRU_6TH(cont2)))
+ return -1;
+ *ucs2p = (lead & THREE_OCTET_MASK) << 12;
+ *ucs2p |= (cont1 & CONTINUING_OCTET_MASK) << 6;
+ *ucs2p |= cont2 & CONTINUING_OCTET_MASK;
+ return 3;
+ }
+ else { /* not a valid utf8/ucs2 character */
+ return -1;
+ }
+}
+
+/* ----------------------------- Helper functions --------------------------- */
+/* Ripped off from lm_win.c .. */
+/* where is strcasecmp?.. for now, it's case sensitive..
+ *
+ * strcasecmp is in strings.h, but on windows it's called _stricmp...
+ * will need to #ifdef this
+*/
+
+static int32
+js_FileHasOption(JSContext *cx, const char *oldoptions, const char *name)
+{
+ char *comma, *equal, *current;
+ char *options = JS_strdup(cx, oldoptions);
+ int32 found = 0;
+
+ current = options;
+ for (;;) {
+ comma = strchr(current, ',');
+ if (comma) *comma = '\0';
+ equal = strchr(current, '=');
+ if (equal) *equal = '\0';
+ if (strcmp(current, name) == 0) {
+ if (!equal || strcmp(equal + 1, "yes") == 0)
+ found = 1;
+ else
+ found = atoi(equal + 1);
+ }
+ if (equal) *equal = '=';
+ if (comma) *comma = ',';
+ if (found || !comma)
+ break;
+ current = comma + 1;
+ }
+ JS_free(cx, options);
+ return found;
+}
+
+/* empty the buffer */
+static void
+js_ResetBuffers(JSFile * file)
+{
+ file->charBufferUsed = JS_FALSE;
+ file->nbBytesInBuf = 0;
+}
+
+/* Reset file attributes */
+static void
+js_ResetAttributes(JSFile * file)
+{
+ file->mode = file->type = 0;
+ file->isOpen = JS_FALSE;
+ file->handle = NULL;
+ file->nativehandle = NULL;
+ file->hasRandomAccess = JS_TRUE; /* Innocent until proven guilty. */
+ file->hasAutoflush = JS_FALSE;
+ file->isNative = JS_FALSE;
+ file->isPipe = JS_FALSE;
+
+ js_ResetBuffers(file);
+}
+
+static JSBool
+js_FileOpen(JSContext *cx, JSObject *obj, JSFile *file, char *mode){
+ JSString *type, *mask;
+ jsval v[2];
+ jsval rval;
+
+ type = JS_InternString(cx, asciistring);
+ mask = JS_NewStringCopyZ(cx, mode);
+ v[0] = STRING_TO_JSVAL(mask);
+ v[1] = STRING_TO_JSVAL(type);
+
+ if (!file_open(cx, obj, 2, v, &rval))
+ return JS_FALSE;
+ return JS_TRUE;
+}
+
+/* Buffered version of PR_Read. Used by js_FileRead */
+static int32
+js_BufferedRead(JSFile *f, unsigned char *buf, int32 len)
+{
+ int32 count = 0;
+
+ while (f->nbBytesInBuf>0&&len>0) {
+ buf[0] = f->byteBuffer[0];
+ f->byteBuffer[0] = f->byteBuffer[1];
+ f->byteBuffer[1] = f->byteBuffer[2];
+ f->nbBytesInBuf--;
+ len--;
+ buf+=1;
+ count++;
+ }
+
+ if (len > 0) {
+ count += (!f->isNative)
+ ? PR_Read(f->handle, buf, len)
+ : fread(buf, 1, len, f->nativehandle);
+ }
+ return count;
+}
+
+static int32
+js_FileRead(JSContext *cx, JSFile *file, jschar *buf, int32 len, int32 mode)
+{
+ unsigned char *aux;
+ int32 count = 0, i;
+ jsint remainder;
+ unsigned char utfbuf[3];
+
+ if (file->charBufferUsed) {
+ buf[0] = file->charBuffer;
+ buf++;
+ len--;
+ file->charBufferUsed = JS_FALSE;
+ }
+
+ switch (mode) {
+ case ASCII:
+ aux = (unsigned char*)JS_malloc(cx, len);
+ if (!aux)
+ return 0;
+
+ count = js_BufferedRead(file, aux, len);
+ if (count == -1) {
+ JS_free(cx, aux);
+ return 0;
+ }
+
+ for (i = 0; i < len; i++)
+ buf[i] = (jschar)aux[i];
+
+ JS_free(cx, aux);
+ break;
+
+ case UTF8:
+ remainder = 0;
+ for (count = 0;count<len;count++) {
+ i = js_BufferedRead(file, utfbuf+remainder, 3-remainder);
+ if (i<=0) {
+ return count;
+ }
+ i = utf8_to_ucs2_char(utfbuf, (int16)i, &buf[count] );
+ if (i<0) {
+ return count;
+ } else {
+ if (i==1) {
+ utfbuf[0] = utfbuf[1];
+ utfbuf[1] = utfbuf[2];
+ remainder = 2;
+ } else if (i==2) {
+ utfbuf[0] = utfbuf[2];
+ remainder = 1;
+ } else if (i==3) {
+ remainder = 0;
+ }
+ }
+ }
+ while (remainder>0) {
+ file->byteBuffer[file->nbBytesInBuf] = utfbuf[0];
+ file->nbBytesInBuf++;
+ utfbuf[0] = utfbuf[1];
+ utfbuf[1] = utfbuf[2];
+ remainder--;
+ }
+ break;
+
+ case UCS2:
+ count = js_BufferedRead(file, (unsigned char *)buf, len * 2) >> 1;
+ if (count == -1)
+ return 0;
+
+ break;
+
+ default:
+ /* Not reached. */
+ JS_ASSERT(0);
+ }
+
+ if(count == -1) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "read", file->path);
+ }
+
+ return count;
+}
+
+static int32
+js_FileSeek(JSContext *cx, JSFile *file, int32 len, int32 mode)
+{
+ int32 count = 0, i;
+ jsint remainder;
+ unsigned char utfbuf[3];
+ jschar tmp;
+
+ switch (mode) {
+ case ASCII:
+ count = PR_Seek(file->handle, len, PR_SEEK_CUR);
+ break;
+
+ case UTF8:
+ remainder = 0;
+ for (count = 0;count<len;count++) {
+ i = js_BufferedRead(file, utfbuf+remainder, 3-remainder);
+ if (i<=0) {
+ return 0;
+ }
+ i = utf8_to_ucs2_char(utfbuf, (int16)i, &tmp );
+ if (i<0) {
+ return 0;
+ } else {
+ if (i==1) {
+ utfbuf[0] = utfbuf[1];
+ utfbuf[1] = utfbuf[2];
+ remainder = 2;
+ } else if (i==2) {
+ utfbuf[0] = utfbuf[2];
+ remainder = 1;
+ } else if (i==3) {
+ remainder = 0;
+ }
+ }
+ }
+ while (remainder>0) {
+ file->byteBuffer[file->nbBytesInBuf] = utfbuf[0];
+ file->nbBytesInBuf++;
+ utfbuf[0] = utfbuf[1];
+ utfbuf[1] = utfbuf[2];
+ remainder--;
+ }
+ break;
+
+ case UCS2:
+ count = PR_Seek(file->handle, len*2, PR_SEEK_CUR)/2;
+ break;
+
+ default:
+ /* Not reached. */
+ JS_ASSERT(0);
+ }
+
+ if(count == -1) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "seek", file->path);
+ }
+
+ return count;
+}
+
+static int32
+js_FileWrite(JSContext *cx, JSFile *file, jschar *buf, int32 len, int32 mode)
+{
+ unsigned char *aux;
+ int32 count = 0, i, j;
+ unsigned char *utfbuf;
+
+ switch (mode) {
+ case ASCII:
+ aux = (unsigned char*)JS_malloc(cx, len);
+ if (!aux)
+ return 0;
+
+ for (i = 0; i<len; i++)
+ aux[i] = buf[i] % 256;
+
+ count = (!file->isNative)
+ ? PR_Write(file->handle, aux, len)
+ : fwrite(aux, 1, len, file->nativehandle);
+
+ if (count==-1) {
+ JS_free(cx, aux);
+ return 0;
+ }
+
+ JS_free(cx, aux);
+ break;
+
+ case UTF8:
+ utfbuf = (unsigned char*)JS_malloc(cx, len*3);
+ if (!utfbuf) return 0;
+ i = 0;
+ for (count = 0;count<len;count++) {
+ j = one_ucs2_to_utf8_char(utfbuf+i, utfbuf+len*3, buf[count]);
+ if (j==-1) {
+ JS_free(cx, utfbuf);
+ return 0;
+ }
+ i+=j;
+ }
+ j = (!file->isNative)
+ ? PR_Write(file->handle, utfbuf, i)
+ : fwrite(utfbuf, 1, i, file->nativehandle);
+
+ if (j<i) {
+ JS_free(cx, utfbuf);
+ return 0;
+ }
+ JS_free(cx, utfbuf);
+ break;
+
+ case UCS2:
+ count = (!file->isNative)
+ ? PR_Write(file->handle, buf, len*2) >> 1
+ : fwrite(buf, 1, len*2, file->nativehandle) >> 1;
+
+ if (count == -1)
+ return 0;
+ break;
+
+ default:
+ /* Not reached. */
+ JS_ASSERT(0);
+ }
+
+ if(count == -1) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "write", file->path);
+ }
+
+ return count;
+}
+
+/* ----------------------------- Property checkers -------------------------- */
+static JSBool
+js_exists(JSContext *cx, JSFile *file)
+{
+ if (file->isNative) {
+ /* It doesn't make sense for a pipe of stdstream. */
+ return JS_FALSE;
+ }
+
+ return PR_Access(file->path, PR_ACCESS_EXISTS) == PR_SUCCESS;
+}
+
+static JSBool
+js_canRead(JSContext *cx, JSFile *file)
+{
+ if (!file->isNative) {
+ if (file->isOpen && !(file->mode & PR_RDONLY))
+ return JS_FALSE;
+ return PR_Access(file->path, PR_ACCESS_READ_OK) == PR_SUCCESS;
+ }
+
+ if (file->isPipe) {
+ /* Is this pipe open for reading? */
+ return file->path[0] == PIPE_SYMBOL;
+ }
+
+ return !strcmp(file->path, STDINPUT_NAME);
+}
+
+static JSBool
+js_canWrite(JSContext *cx, JSFile *file)
+{
+ if (!file->isNative) {
+ if (file->isOpen && !(file->mode & PR_WRONLY))
+ return JS_FALSE;
+ return PR_Access(file->path, PR_ACCESS_WRITE_OK) == PR_SUCCESS;
+ }
+
+ if(file->isPipe) {
+ /* Is this pipe open for writing? */
+ return file->path[strlen(file->path)-1] == PIPE_SYMBOL;
+ }
+
+ return !strcmp(file->path, STDOUTPUT_NAME) ||
+ !strcmp(file->path, STDERROR_NAME);
+}
+
+static JSBool
+js_isFile(JSContext *cx, JSFile *file)
+{
+ if (!file->isNative) {
+ PRFileInfo info;
+
+ if (file->isOpen
+ ? PR_GetOpenFileInfo(file->handle, &info)
+ : PR_GetFileInfo(file->path, &info) != PR_SUCCESS) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_ACCESS_FILE_STATUS, file->path);
+ return JS_FALSE;
+ }
+
+ return info.type == PR_FILE_FILE;
+ }
+
+ /* This doesn't make sense for a pipe of stdstream. */
+ return JS_FALSE;
+}
+
+static JSBool
+js_isDirectory(JSContext *cx, JSFile *file)
+{
+ if(!file->isNative){
+ PRFileInfo info;
+
+ /* Hack needed to get get_property to work. */
+ if (!js_exists(cx, file))
+ return JS_FALSE;
+
+ if (file->isOpen
+ ? PR_GetOpenFileInfo(file->handle, &info)
+ : PR_GetFileInfo(file->path, &info) != PR_SUCCESS) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_ACCESS_FILE_STATUS, file->path);
+ return JS_FALSE;
+ }
+
+ return info.type == PR_FILE_DIRECTORY;
+ }
+
+ /* This doesn't make sense for a pipe of stdstream. */
+ return JS_FALSE;
+}
+
+static jsval
+js_size(JSContext *cx, JSFile *file)
+{
+ PRFileInfo info;
+
+ JSFILE_CHECK_NATIVE("size");
+
+ if (file->isOpen
+ ? PR_GetOpenFileInfo(file->handle, &info)
+ : PR_GetFileInfo(file->path, &info) != PR_SUCCESS) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_ACCESS_FILE_STATUS, file->path);
+ return JSVAL_VOID;
+ }
+
+ return INT_TO_JSVAL(info.size);
+
+out:
+ return JSVAL_VOID;
+}
+
+/*
+ * Return the parent object
+ */
+static JSBool
+js_parent(JSContext *cx, JSFile *file, jsval *resultp)
+{
+ char *str;
+
+ /* Since we only care about pipes and native files, return NULL. */
+ if (file->isNative) {
+ *resultp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+
+ str = js_fileDirectoryName(cx, file->path);
+ if (!str)
+ return JS_FALSE;
+
+ /* If the directory is equal to the original path, we're at the root. */
+ if (!strcmp(file->path, str)) {
+ *resultp = JSVAL_NULL;
+ } else {
+ JSObject *obj = js_NewFileObject(cx, str);
+ if (!obj) {
+ JS_free(cx, str);
+ return JS_FALSE;
+ }
+ *resultp = OBJECT_TO_JSVAL(obj);
+ }
+
+ JS_free(cx, str);
+ return JS_TRUE;
+}
+
+static JSBool
+js_name(JSContext *cx, JSFile *file, jsval *vp)
+{
+ char *name;
+ JSString *str;
+
+ if (file->isPipe) {
+ *vp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+
+ name = js_fileBaseName(cx, file->path);
+ if (!name)
+ return JS_FALSE;
+
+ str = JS_NewString(cx, name, strlen(name));
+ if (!str) {
+ JS_free(cx, name);
+ return JS_FALSE;
+ }
+
+ *vp = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+/* ------------------------------ File object methods ---------------------------- */
+static JSBool
+file_open(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSString *strmode, *strtype;
+ char *ctype, *mode;
+ int32 mask, type;
+ int len;
+
+ mode = NULL;
+
+ SECURITY_CHECK(cx, NULL, "open", file);
+
+ /* A native file that is already open */
+ if(file->isOpen && file->isNative) {
+ JS_ReportWarning(cx, "Native file %s is already open, proceeding",
+ file->path);
+ goto good;
+ }
+
+ /* Close before proceeding */
+ if (file->isOpen) {
+ JS_ReportWarning(cx, "File %s is already open, we will close it and "
+ "reopen, proceeding", file->path);
+ if(!file_close(cx, obj, 0, NULL, rval))
+ goto out;
+ }
+
+ if (js_isDirectory(cx, file)) {
+ JS_ReportWarning(cx, "%s seems to be a directory, there is no point in "
+ "trying to open it, proceeding", file->path);
+ goto good;
+ }
+
+ /* Path must be defined at this point */
+ len = strlen(file->path);
+
+ /* Mode */
+ if (argc >= 1) {
+ strmode = JS_ValueToString(cx, argv[0]);
+ if (!strmode) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FIRST_ARGUMENT_OPEN_NOT_STRING_ERROR,
+ argv[0]);
+ goto out;
+ }
+ mode = JS_strdup(cx, JS_GetStringBytes(strmode));
+ } else {
+ if(file->path[0]==PIPE_SYMBOL) {
+ /* pipe default mode */
+ mode = JS_strdup(cx, "read");
+ } else if(file->path[len-1]==PIPE_SYMBOL) {
+ /* pipe default mode */
+ mode = JS_strdup(cx, "write");
+ } else {
+ /* non-destructive, permissive defaults. */
+ mode = JS_strdup(cx, "readWrite,append,create");
+ }
+ }
+
+ /* Process the mode */
+ mask = 0;
+ /* TODO: this is pretty ugly, we walk thru the string too many times */
+ mask |= js_FileHasOption(cx, mode, "read") ? PR_RDONLY : 0;
+ mask |= js_FileHasOption(cx, mode, "write") ? PR_WRONLY : 0;
+ mask |= js_FileHasOption(cx, mode, "readWrite")? PR_RDWR : 0;
+ mask |= js_FileHasOption(cx, mode, "append") ? PR_APPEND : 0;
+ mask |= js_FileHasOption(cx, mode, "create") ? PR_CREATE_FILE : 0;
+ mask |= js_FileHasOption(cx, mode, "replace") ? PR_TRUNCATE : 0;
+
+ if (mask & PR_RDWR)
+ mask |= (PR_RDONLY | PR_WRONLY);
+ if ((mask & PR_RDONLY) && (mask & PR_WRONLY))
+ mask |= PR_RDWR;
+
+ file->hasAutoflush |= js_FileHasOption(cx, mode, "autoflush");
+
+ /* Type */
+ if (argc > 1) {
+ strtype = JS_ValueToString(cx, argv[1]);
+ if (!strtype) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_SECOND_ARGUMENT_OPEN_NOT_STRING_ERROR,
+ argv[1]);
+ goto out;
+ }
+ ctype = JS_GetStringBytes(strtype);
+
+ if(!strcmp(ctype, utfstring)) {
+ type = UTF8;
+ } else if (!strcmp(ctype, unicodestring)) {
+ type = UCS2;
+ } else {
+ if (strcmp(ctype, asciistring)) {
+ JS_ReportWarning(cx, "File type %s is not supported, using "
+ "'text' instead, proceeding", ctype);
+ }
+ type = ASCII;
+ }
+ } else {
+ type = ASCII;
+ }
+
+ /* Save the relevant fields */
+ file->type = type;
+ file->mode = mask;
+ file->nativehandle = NULL;
+ file->hasRandomAccess = (type != UTF8);
+
+ /*
+ * Deal with pipes here. We can't use NSPR for pipes, so we have to use
+ * POPEN.
+ */
+ if (file->path[0]==PIPE_SYMBOL || file->path[len-1]==PIPE_SYMBOL) {
+ if (file->path[0] == PIPE_SYMBOL && file->path[len-1] == PIPE_SYMBOL) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_BIDIRECTIONAL_PIPE_NOT_SUPPORTED);
+ goto out;
+ } else {
+ int i = 0;
+ char pipemode[3];
+ SECURITY_CHECK(cx, NULL, "pipe_open", file);
+
+ if(file->path[0] == PIPE_SYMBOL){
+ if(mask & (PR_WRONLY | PR_APPEND | PR_CREATE_FILE | PR_TRUNCATE)){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OPEN_MODE_NOT_SUPPORTED_WITH_PIPES,
+ mode, file->path);
+ goto out;
+ }
+ /* open(SPOOLER, "| cat -v | lpr -h 2>/dev/null") -- pipe for writing */
+ pipemode[i++] = 'r';
+#ifndef XP_UNIX
+ pipemode[i++] = file->type==UTF8 ? 'b' : 't';
+#endif
+ pipemode[i++] = '\0';
+ file->nativehandle = POPEN(&file->path[1], pipemode);
+ } else if(file->path[len-1] == PIPE_SYMBOL) {
+ char *command = JS_malloc(cx, len);
+
+ strncpy(command, file->path, len-1);
+ command[len-1] = '\0';
+ /* open(STATUS, "netstat -an 2>&1 |") */
+ pipemode[i++] = 'w';
+#ifndef XP_UNIX
+ pipemode[i++] = file->type==UTF8 ? 'b' : 't';
+#endif
+ pipemode[i++] = '\0';
+ file->nativehandle = POPEN(command, pipemode);
+ JS_free(cx, command);
+ }
+ /* set the flags */
+ file->isNative = JS_TRUE;
+ file->isPipe = JS_TRUE;
+ file->hasRandomAccess = JS_FALSE;
+ }
+ } else {
+ /* TODO: what about the permissions?? Java ignores the problem... */
+ file->handle = PR_Open(file->path, mask, 0644);
+ }
+
+ js_ResetBuffers(file);
+ JS_free(cx, mode);
+ mode = NULL;
+
+ /* Set the open flag and return result */
+ if (file->handle == NULL && file->nativehandle == NULL) {
+ file->isOpen = JS_FALSE;
+
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "open", file->path);
+ goto out;
+ }
+
+good:
+ file->isOpen = JS_TRUE;
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+
+out:
+ if(mode)
+ JS_free(cx, mode);
+ return JS_FALSE;
+}
+
+static JSBool
+file_close(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+
+ SECURITY_CHECK(cx, NULL, "close", file);
+
+ if(!file->isOpen){
+ JS_ReportWarning(cx, "File %s is not open, can't close it, proceeding",
+ file->path);
+ goto out;
+ }
+
+ if(!file->isPipe){
+ if(file->isNative){
+ JS_ReportWarning(cx, "Unable to close a native file, proceeding", file->path);
+ goto out;
+ }else{
+ if(file->handle && PR_Close(file->handle)){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "close", file->path);
+
+ goto out;
+ }
+ }
+ }else{
+ if(PCLOSE(file->nativehandle)==-1){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "pclose", file->path);
+ goto out;
+ }
+ }
+
+ js_ResetAttributes(file);
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+
+out:
+ return JS_FALSE;
+}
+
+
+static JSBool
+file_remove(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+
+ SECURITY_CHECK(cx, NULL, "remove", file);
+ JSFILE_CHECK_NATIVE("remove");
+ JSFILE_CHECK_CLOSED("remove");
+
+ if ((js_isDirectory(cx, file) ?
+ PR_RmDir(file->path) : PR_Delete(file->path))==PR_SUCCESS) {
+ js_ResetAttributes(file);
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+ } else {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "remove", file->path);
+ goto out;
+ }
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+/* Raw PR-based function. No text processing. Just raw data copying. */
+static JSBool
+file_copyTo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ char *dest = NULL;
+ PRFileDesc *handle = NULL;
+ char *buffer;
+ jsval count, size;
+ JSBool fileInitiallyOpen=JS_FALSE;
+
+ SECURITY_CHECK(cx, NULL, "copyTo", file); /* may need a second argument!*/
+ JSFILE_CHECK_ONE_ARG("copyTo");
+ JSFILE_CHECK_NATIVE("copyTo");
+ /* remeber the state */
+ fileInitiallyOpen = file->isOpen;
+ JSFILE_CHECK_READ;
+
+ dest = JS_GetStringBytes(JS_ValueToString(cx, argv[0]));
+
+ /* make sure we are not reading a file open for writing */
+ if (file->isOpen && !js_canRead(cx, file)) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_COPY_FILE_OPEN_FOR_WRITING_ERROR, file->path);
+ goto out;
+ }
+
+ if (file->handle==NULL){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "open", file->path);
+ goto out;
+ }
+
+ handle = PR_Open(dest, PR_WRONLY|PR_CREATE_FILE|PR_TRUNCATE, 0644);
+
+ if(!handle){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "open", dest);
+ goto out;
+ }
+
+ if ((size=js_size(cx, file))==JSVAL_VOID) {
+ goto out;
+ }
+
+ buffer = JS_malloc(cx, size);
+
+ count = INT_TO_JSVAL(PR_Read(file->handle, buffer, size));
+
+ /* reading panic */
+ if (count!=size) {
+ JS_free(cx, buffer);
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_COPY_READ_ERROR, file->path);
+ goto out;
+ }
+
+ count = INT_TO_JSVAL(PR_Write(handle, buffer, JSVAL_TO_INT(size)));
+
+ /* writing panic */
+ if (count!=size) {
+ JS_free(cx, buffer);
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_COPY_WRITE_ERROR, file->path);
+ goto out;
+ }
+
+ JS_free(cx, buffer);
+
+ if(!fileInitiallyOpen){
+ if(!file_close(cx, obj, 0, NULL, rval)) goto out;
+ }
+
+ if(PR_Close(handle)!=PR_SUCCESS){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "close", dest);
+ goto out;
+ }
+
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+out:
+ if(file->isOpen && !fileInitiallyOpen){
+ if(PR_Close(file->handle)!=PR_SUCCESS){
+ JS_ReportWarning(cx, "Can't close %s, proceeding", file->path);
+ }
+ }
+
+ if(handle && PR_Close(handle)!=PR_SUCCESS){
+ JS_ReportWarning(cx, "Can't close %s, proceeding", dest);
+ }
+
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_renameTo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ char *dest;
+
+ SECURITY_CHECK(cx, NULL, "renameTo", file); /* may need a second argument!*/
+ JSFILE_CHECK_ONE_ARG("renameTo");
+ JSFILE_CHECK_NATIVE("renameTo");
+ JSFILE_CHECK_CLOSED("renameTo");
+
+ dest = RESOLVE_PATH(cx, JS_GetStringBytes(JS_ValueToString(cx, argv[0])));
+
+ if (PR_Rename(file->path, dest)==PR_SUCCESS){
+ /* copy the new filename */
+ JS_free(cx, file->path);
+ file->path = dest;
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+ }else{
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_RENAME_FAILED, file->path, dest);
+ goto out;
+ }
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_flush(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+
+ SECURITY_CHECK(cx, NULL, "flush", file);
+ JSFILE_CHECK_NATIVE("flush");
+ JSFILE_CHECK_OPEN("flush");
+
+ if (PR_Sync(file->handle)==PR_SUCCESS){
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+ }else{
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "flush", file->path);
+ goto out;
+ }
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_write(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSString *str;
+ int32 count;
+ uintN i;
+
+ SECURITY_CHECK(cx, NULL, "write", file);
+ JSFILE_CHECK_WRITE;
+
+ for (i = 0; i<argc; i++) {
+ str = JS_ValueToString(cx, argv[i]);
+ count = js_FileWrite(cx, file, JS_GetStringChars(str),
+ JS_GetStringLength(str), file->type);
+ if (count==-1){
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+ }
+ }
+
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_writeln(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSString *str;
+
+ SECURITY_CHECK(cx, NULL, "writeln", file);
+ JSFILE_CHECK_WRITE;
+
+ /* don't report an error here */
+ if(!file_write(cx, obj, argc, argv, rval)) return JS_FALSE;
+ /* don't do security here -- we passed the check in file_write */
+ str = JS_NewStringCopyZ(cx, "\n");
+
+ if (js_FileWrite(cx, file, JS_GetStringChars(str), JS_GetStringLength(str),
+ file->type)==-1){
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+ }
+
+ /* eol causes flush if hasAutoflush is turned on */
+ if (file->hasAutoflush)
+ file_flush(cx, obj, 0, NULL, rval);
+
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_writeAll(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ jsuint i;
+ jsuint limit;
+ JSObject *array;
+ JSObject *elem;
+ jsval elemval;
+
+ SECURITY_CHECK(cx, NULL, "writeAll", file);
+ JSFILE_CHECK_ONE_ARG("writeAll");
+ JSFILE_CHECK_WRITE;
+
+ if (!JS_IsArrayObject(cx, JSVAL_TO_OBJECT(argv[0]))) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FIRST_ARGUMENT_WRITEALL_NOT_ARRAY_ERROR);
+ goto out;
+ }
+
+ array = JSVAL_TO_OBJECT(argv[0]);
+
+ JS_GetArrayLength(cx, array, &limit);
+
+ for (i = 0; i<limit; i++) {
+ if (!JS_GetElement(cx, array, i, &elemval)) return JS_FALSE;
+ elem = JSVAL_TO_OBJECT(elemval);
+ file_writeln(cx, obj, 1, &elemval, rval);
+ }
+
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_read(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSString *str;
+ int32 want, count;
+ jschar *buf;
+
+ SECURITY_CHECK(cx, NULL, "read", file);
+ JSFILE_CHECK_ONE_ARG("read");
+ JSFILE_CHECK_READ;
+
+ if (!JS_ValueToInt32(cx, argv[0], &want)){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FIRST_ARGUMENT_MUST_BE_A_NUMBER, "read", argv[0]);
+ goto out;
+ }
+
+ /* want = (want>262144)?262144:want; * arbitrary size limitation */
+
+ buf = JS_malloc(cx, want*sizeof buf[0]);
+ if (!buf) goto out;
+
+ count = js_FileRead(cx, file, buf, want, file->type);
+ if (count>0) {
+ str = JS_NewUCStringCopyN(cx, buf, count);
+ *rval = STRING_TO_JSVAL(str);
+ JS_free(cx, buf);
+ return JS_TRUE;
+ } else {
+ JS_free(cx, buf);
+ goto out;
+ }
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_readln(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSString *str;
+ jschar *buf = NULL, *tmp;
+ int32 offset, read;
+ intN room;
+ jschar data, data2;
+
+ SECURITY_CHECK(cx, NULL, "readln", file);
+ JSFILE_CHECK_READ;
+
+ buf = JS_malloc(cx, MAX_LINE_LENGTH * sizeof data);
+ if (!buf)
+ return JS_FALSE;
+
+ room = MAX_LINE_LENGTH - 1;
+ offset = 0;
+
+ for (;;) {
+ read = js_FileRead(cx, file, &data, 1, file->type);
+ if (read < 0)
+ goto out;
+ if (read == 0)
+ goto eof;
+
+ switch (data) {
+ case '\r':
+ read = js_FileRead(cx, file, &data2, 1, file->type);
+ if (read < 0)
+ goto out;
+
+ if (read == 1 && data2 != '\n') {
+ /* We read one char too far. Buffer it. */
+ file->charBuffer = data2;
+ file->charBufferUsed = JS_TRUE;
+ }
+
+ /* Fall through. */
+ case '\n':
+ goto done;
+
+ default:
+ if (--room < 0) {
+ tmp = JS_realloc(cx, buf,
+ (offset + MAX_LINE_LENGTH) * sizeof data);
+ if (!tmp)
+ goto out;
+
+ room = MAX_LINE_LENGTH - 1;
+ buf = tmp;
+ }
+
+ buf[offset++] = data;
+ break;
+ }
+ }
+
+eof:
+ if (offset == 0) {
+ *rval = JSVAL_NULL;
+ return JS_TRUE;
+ }
+
+done:
+ buf[offset] = 0;
+ tmp = JS_realloc(cx, buf, (offset + 1) * sizeof data);
+ if (!tmp)
+ goto out;
+
+ str = JS_NewUCString(cx, tmp, offset);
+ if (!str)
+ goto out;
+
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+
+out:
+ if (buf)
+ JS_free(cx, buf);
+
+ return JS_FALSE;
+}
+
+static JSBool
+file_readAll(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSObject *array;
+ jsint len;
+ jsval line;
+ JSBool lineok = JS_FALSE;
+
+ SECURITY_CHECK(cx, NULL, "readAll", file);
+ JSFILE_CHECK_READ;
+
+ array = JS_NewArrayObject(cx, 0, NULL);
+ if (!array)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(array);
+
+ len = 0;
+
+ lineok = file_readln(cx, obj, 0, NULL, &line);
+ while (lineok && !JSVAL_IS_NULL(line)) {
+ JS_SetElement(cx, array, len++, &line);
+ lineok = file_readln(cx, obj, 0, NULL, &line);
+ }
+
+out:
+ return lineok;
+}
+
+static JSBool
+file_seek(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ int32 toskip;
+ int32 pos;
+
+ SECURITY_CHECK(cx, NULL, "seek", file);
+ JSFILE_CHECK_ONE_ARG("seek");
+ JSFILE_CHECK_NATIVE("seek");
+ JSFILE_CHECK_READ;
+
+ if (!JS_ValueToInt32(cx, argv[0], &toskip)){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FIRST_ARGUMENT_MUST_BE_A_NUMBER, "seek", argv[0]);
+ goto out;
+ }
+
+ if(!file->hasRandomAccess){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_NO_RANDOM_ACCESS, file->path);
+ goto out;
+ }
+
+ if(js_isDirectory(cx, file)){
+ JS_ReportWarning(cx,"Seek on directories is not supported, proceeding");
+ goto out;
+ }
+
+ pos = js_FileSeek(cx, file, toskip, file->type);
+
+ if (pos!=-1) {
+ *rval = INT_TO_JSVAL(pos);
+ return JS_TRUE;
+ }
+out:
+ *rval = JSVAL_VOID;
+ return JS_FALSE;
+}
+
+static JSBool
+file_list(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ PRDir *dir;
+ PRDirEntry *entry;
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSObject *array;
+ JSObject *eachFile;
+ jsint len;
+ jsval v;
+ JSRegExp *re = NULL;
+ JSFunction *func = NULL;
+ JSString *str;
+ jsval args[1];
+ char *filePath;
+
+ SECURITY_CHECK(cx, NULL, "list", file);
+ JSFILE_CHECK_NATIVE("list");
+
+ if (argc==1) {
+ if (JSVAL_IS_REGEXP(cx, argv[0])) {
+ re = JS_GetPrivate(cx, JSVAL_TO_OBJECT(argv[0]));
+ }else
+ if (VALUE_IS_FUNCTION(cx, argv[0])) {
+ func = JS_GetPrivate(cx, JSVAL_TO_OBJECT(argv[0]));
+ }else{
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FIRST_ARGUMENT_MUST_BE_A_FUNCTION_OR_REGEX, argv[0]);
+ goto out;
+ }
+ }
+
+ if (!js_isDirectory(cx, file)) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_DO_LIST_ON_A_FILE, file->path);
+ goto out;
+ }
+
+ dir = PR_OpenDir(file->path);
+ if(!dir){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "open", file->path);
+ goto out;
+ }
+
+ /* create JSArray here... */
+ array = JS_NewArrayObject(cx, 0, NULL);
+ len = 0;
+
+ while ((entry = PR_ReadDir(dir, PR_SKIP_BOTH))!=NULL) {
+ /* first, check if we have a regexp */
+ if (re!=NULL) {
+ size_t index = 0;
+
+ str = JS_NewStringCopyZ(cx, entry->name);
+ if(!js_ExecuteRegExp(cx, re, str, &index, JS_TRUE, &v)){
+ /* don't report anything here */
+ goto out;
+ }
+ /* not matched! */
+ if (JSVAL_IS_NULL(v)) {
+ continue;
+ }
+ }else
+ if (func!=NULL) {
+ str = JS_NewStringCopyZ(cx, entry->name);
+ args[0] = STRING_TO_JSVAL(str);
+ if(!JS_CallFunction(cx, obj, func, 1, args, &v)){
+ goto out;
+ }
+
+ if (v==JSVAL_FALSE) {
+ continue;
+ }
+ }
+
+ filePath = js_combinePath(cx, file->path, (char*)entry->name);
+
+ eachFile = js_NewFileObject(cx, filePath);
+ JS_free(cx, filePath);
+ if (!eachFile){
+ JS_ReportWarning(cx, "File %s cannot be retrieved", filePath);
+ continue;
+ }
+ v = OBJECT_TO_JSVAL(eachFile);
+ JS_SetElement(cx, array, len, &v);
+ JS_SetProperty(cx, array, entry->name, &v);
+ len++;
+ }
+
+ if(PR_CloseDir(dir)!=PR_SUCCESS){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "close", file->path);
+ goto out;
+ }
+ *rval = OBJECT_TO_JSVAL(array);
+ return JS_TRUE;
+out:
+ *rval = JSVAL_NULL;
+ return JS_FALSE;
+}
+
+static JSBool
+file_mkdir(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+
+ SECURITY_CHECK(cx, NULL, "mkdir", file);
+ JSFILE_CHECK_ONE_ARG("mkdir");
+ JSFILE_CHECK_NATIVE("mkdir");
+
+ /* if the current file is not a directory, find out the directory name */
+ if (!js_isDirectory(cx, file)) {
+ char *dir = js_fileDirectoryName(cx, file->path);
+ JSObject *dirObj = js_NewFileObject(cx, dir);
+
+ JS_free(cx, dir);
+
+ /* call file_mkdir with the right set of parameters if needed */
+ if (file_mkdir(cx, dirObj, argc, argv, rval))
+ return JS_TRUE;
+ else
+ goto out;
+ }else{
+ char *dirName = JS_GetStringBytes(JS_ValueToString(cx, argv[0]));
+ char *fullName;
+
+ fullName = js_combinePath(cx, file->path, dirName);
+ if (PR_MkDir(fullName, 0755)==PR_SUCCESS){
+ *rval = JSVAL_TRUE;
+ JS_free(cx, fullName);
+ return JS_TRUE;
+ }else{
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "mkdir", fullName);
+ JS_free(cx, fullName);
+ goto out;
+ }
+ }
+out:
+ *rval = JSVAL_FALSE;
+ return JS_FALSE;
+}
+
+static JSBool
+file_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval*rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ JSString *str;
+
+ str = JS_NewStringCopyZ(cx, file->path);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+file_toURL(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ char url[MAX_PATH_LENGTH];
+ jschar *urlChars;
+ size_t len;
+ JSString *str;
+
+ JSFILE_CHECK_NATIVE("toURL");
+
+ sprintf(url, "file://%s", file->path);
+
+ len = strlen(url);
+ urlChars = js_InflateString(cx, url, &len);
+ if (!urlChars)
+ return JS_FALSE;
+ str = js_NewString(cx, urlChars, len, 0);
+ if (!str) {
+ JS_free(cx, urlChars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+
+ /* TODO: js_escape in jsstr.h may go away at some point */
+ return js_str_escape(cx, obj, 0, rval, rval);
+
+out:
+ *rval = JSVAL_VOID;
+ return JS_FALSE;
+}
+
+
+static void
+file_finalize(JSContext *cx, JSObject *obj)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+
+ if(file) {
+ /* Close the file before exiting. */
+ if(file->isOpen && !file->isNative) {
+ jsval vp;
+ file_close(cx, obj, 0, NULL, &vp);
+ }
+
+ if (file->path)
+ JS_free(cx, file->path);
+
+ JS_free(cx, file);
+ }
+}
+
+/*
+ Allocates memory for the file object, sets fields to defaults.
+*/
+static JSFile*
+file_init(JSContext *cx, JSObject *obj, char *bytes)
+{
+ JSFile *file;
+
+ file = JS_malloc(cx, sizeof *file);
+ if (!file)
+ return NULL;
+ memset(file, 0 , sizeof *file);
+
+ js_ResetAttributes(file);
+
+ file->path = RESOLVE_PATH(cx, bytes);
+
+ if (!JS_SetPrivate(cx, obj, file)) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_SET_PRIVATE_FILE, file->path);
+ JS_free(cx, file);
+ return NULL;
+ }
+
+ return file;
+}
+
+/* Returns a JSObject. This function is globally visible */
+JS_PUBLIC_API(JSObject*)
+js_NewFileObject(JSContext *cx, char *filename)
+{
+ JSObject *obj;
+ JSFile *file;
+
+ obj = JS_NewObject(cx, &js_FileClass, NULL, NULL);
+ if (!obj){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OBJECT_CREATION_FAILED, "js_NewFileObject");
+ return NULL;
+ }
+ file = file_init(cx, obj, filename);
+ if(!file) return NULL;
+ return obj;
+}
+
+/* Internal function, used for cases which NSPR file support doesn't cover */
+JSObject*
+js_NewFileObjectFromFILE(JSContext *cx, FILE *nativehandle, char *filename,
+ int32 mode, JSBool open, JSBool randomAccess)
+{
+ JSObject *obj;
+ JSFile *file;
+
+ obj = JS_NewObject(cx, &js_FileClass, NULL, NULL);
+ if (!obj){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OBJECT_CREATION_FAILED, "js_NewFileObjectFromFILE");
+ return NULL;
+ }
+ file = file_init(cx, obj, filename);
+ if(!file) return NULL;
+
+ file->nativehandle = nativehandle;
+
+ /* free result of RESOLVE_PATH from file_init. */
+ JS_ASSERT(file->path != NULL);
+ JS_free(cx, file->path);
+
+ file->path = strdup(filename);
+ file->isOpen = open;
+ file->mode = mode;
+ file->hasRandomAccess = randomAccess;
+ file->isNative = JS_TRUE;
+ return obj;
+}
+
+/*
+ Real file constructor that is called from JavaScript.
+ Basically, does error processing and calls file_init.
+*/
+static JSBool
+file_constructor(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ JSFile *file;
+
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ /* Replace obj with a new File object. */
+ obj = JS_NewObject(cx, &js_FileClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+
+ str = (argc == 0)
+ ? JS_InternString(cx, "")
+ : JS_ValueToString(cx, argv[0]);
+
+ if (!str) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FIRST_ARGUMENT_CONSTRUCTOR_NOT_STRING_ERROR,
+ argv[0]);
+ return JS_FALSE;
+ }
+
+ file = file_init(cx, obj, JS_GetStringBytes(str));
+ if (!file)
+ return JS_FALSE;
+
+ SECURITY_CHECK(cx, NULL, "constructor", file);
+
+ return JS_TRUE;
+}
+
+/* -------------------- File methods and properties ------------------------- */
+static JSFunctionSpec file_functions[] = {
+ { "open", file_open, 0},
+ { "close", file_close, 0},
+ { "remove", file_remove, 0},
+ { "copyTo", file_copyTo, 0},
+ { "renameTo", file_renameTo, 0},
+ { "flush", file_flush, 0},
+ { "seek", file_seek, 0},
+ { "read", file_read, 0},
+ { "readln", file_readln, 0},
+ { "readAll", file_readAll, 0},
+ { "write", file_write, 0},
+ { "writeln", file_writeln, 0},
+ { "writeAll", file_writeAll, 0},
+ { "list", file_list, 0},
+ { "mkdir", file_mkdir, 0},
+ { "toString", file_toString, 0},
+ { "toURL", file_toURL, 0},
+ {0}
+};
+
+enum file_tinyid {
+ FILE_LENGTH = -2,
+ FILE_PARENT = -3,
+ FILE_PATH = -4,
+ FILE_NAME = -5,
+ FILE_ISDIR = -6,
+ FILE_ISFILE = -7,
+ FILE_EXISTS = -8,
+ FILE_CANREAD = -9,
+ FILE_CANWRITE = -10,
+ FILE_OPEN = -11,
+ FILE_TYPE = -12,
+ FILE_MODE = -13,
+ FILE_CREATED = -14,
+ FILE_MODIFIED = -15,
+ FILE_SIZE = -16,
+ FILE_RANDOMACCESS = -17,
+ FILE_POSITION = -18,
+ FILE_APPEND = -19,
+ FILE_REPLACE = -20,
+ FILE_AUTOFLUSH = -21,
+ FILE_ISNATIVE = -22,
+};
+
+static JSPropertySpec file_props[] = {
+ {"length", FILE_LENGTH, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"parent", FILE_PARENT, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"path", FILE_PATH, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"name", FILE_NAME, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"isDirectory", FILE_ISDIR, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"isFile", FILE_ISFILE, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"exists", FILE_EXISTS, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"canRead", FILE_CANREAD, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"canWrite", FILE_CANWRITE, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"canAppend", FILE_APPEND, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"canReplace", FILE_REPLACE, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"isOpen", FILE_OPEN, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"type", FILE_TYPE, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"mode", FILE_MODE, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"creationTime", FILE_CREATED, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"lastModified", FILE_MODIFIED, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"size", FILE_SIZE, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"hasRandomAccess", FILE_RANDOMACCESS, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"hasAutoFlush", FILE_AUTOFLUSH, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {"position", FILE_POSITION, JSPROP_ENUMERATE },
+ {"isNative", FILE_ISNATIVE, JSPROP_ENUMERATE | JSPROP_READONLY },
+ {0}
+};
+
+/* ------------------------- Property getter/setter ------------------------- */
+static JSBool
+file_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ char *bytes;
+ JSString *str;
+ jsint tiny;
+ PRFileInfo info;
+ JSBool flag;
+ PRExplodedTime expandedTime;
+
+ tiny = JSVAL_TO_INT(id);
+ if (!file)
+ return JS_TRUE;
+
+ switch (tiny) {
+ case FILE_PARENT:
+ SECURITY_CHECK(cx, NULL, "parent", file);
+ if (!js_parent(cx, file, vp))
+ return JS_FALSE;
+ break;
+ case FILE_PATH:
+ str = JS_NewStringCopyZ(cx, file->path);
+ if (!str)
+ return JS_FALSE;
+ *vp = STRING_TO_JSVAL(str);
+ break;
+ case FILE_NAME:
+ if (!js_name(cx, file, vp))
+ return JS_FALSE;
+ break;
+ case FILE_ISDIR:
+ SECURITY_CHECK(cx, NULL, "isDirectory", file);
+ *vp = BOOLEAN_TO_JSVAL(js_isDirectory(cx, file));
+ break;
+ case FILE_ISFILE:
+ SECURITY_CHECK(cx, NULL, "isFile", file);
+ *vp = BOOLEAN_TO_JSVAL(js_isFile(cx, file));
+ break;
+ case FILE_EXISTS:
+ SECURITY_CHECK(cx, NULL, "exists", file);
+ *vp = BOOLEAN_TO_JSVAL(js_exists(cx, file));
+ break;
+ case FILE_ISNATIVE:
+ SECURITY_CHECK(cx, NULL, "isNative", file);
+ *vp = BOOLEAN_TO_JSVAL(file->isNative);
+ break;
+ case FILE_CANREAD:
+ SECURITY_CHECK(cx, NULL, "canRead", file);
+ *vp = BOOLEAN_TO_JSVAL(js_canRead(cx, file));
+ break;
+ case FILE_CANWRITE:
+ SECURITY_CHECK(cx, NULL, "canWrite", file);
+ *vp = BOOLEAN_TO_JSVAL(js_canWrite(cx, file));
+ break;
+ case FILE_OPEN:
+ SECURITY_CHECK(cx, NULL, "isOpen", file);
+ *vp = BOOLEAN_TO_JSVAL(file->isOpen);
+ break;
+ case FILE_APPEND :
+ SECURITY_CHECK(cx, NULL, "canAppend", file);
+ JSFILE_CHECK_OPEN("canAppend");
+ *vp = BOOLEAN_TO_JSVAL(!file->isNative &&
+ (file->mode&PR_APPEND)==PR_APPEND);
+ break;
+ case FILE_REPLACE :
+ SECURITY_CHECK(cx, NULL, "canReplace", file);
+ JSFILE_CHECK_OPEN("canReplace");
+ *vp = BOOLEAN_TO_JSVAL(!file->isNative &&
+ (file->mode&PR_TRUNCATE)==PR_TRUNCATE);
+ break;
+ case FILE_AUTOFLUSH :
+ SECURITY_CHECK(cx, NULL, "hasAutoFlush", file);
+ JSFILE_CHECK_OPEN("hasAutoFlush");
+ *vp = BOOLEAN_TO_JSVAL(!file->isNative && file->hasAutoflush);
+ break;
+ case FILE_TYPE:
+ SECURITY_CHECK(cx, NULL, "type", file);
+ JSFILE_CHECK_OPEN("type");
+ if(js_isDirectory(cx, file)){
+ *vp = JSVAL_VOID;
+ break;
+ }
+
+ switch (file->type) {
+ case ASCII:
+ *vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, asciistring));
+ break;
+ case UTF8:
+ *vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, utfstring));
+ break;
+ case UCS2:
+ *vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, unicodestring));
+ break;
+ default:
+ JS_ReportWarning(cx, "Unsupported file type %d, proceeding",
+ file->type);
+ }
+ break;
+ case FILE_MODE:
+ SECURITY_CHECK(cx, NULL, "mode", file);
+ JSFILE_CHECK_OPEN("mode");
+ bytes = JS_malloc(cx, MODE_SIZE);
+ bytes[0] = '\0';
+ flag = JS_FALSE;
+
+ if ((file->mode&PR_RDONLY)==PR_RDONLY) {
+ if (flag) strcat(bytes, ",");
+ strcat(bytes, "read");
+ flag = JS_TRUE;
+ }
+ if ((file->mode&PR_WRONLY)==PR_WRONLY) {
+ if (flag) strcat(bytes, ",");
+ strcat(bytes, "write");
+ flag = JS_TRUE;
+ }
+ if ((file->mode&PR_RDWR)==PR_RDWR) {
+ if (flag) strcat(bytes, ",");
+ strcat(bytes, "readWrite");
+ flag = JS_TRUE;
+ }
+ if ((file->mode&PR_APPEND)==PR_APPEND) {
+ if (flag) strcat(bytes, ",");
+ strcat(bytes, "append");
+ flag = JS_TRUE;
+ }
+ if ((file->mode&PR_CREATE_FILE)==PR_CREATE_FILE) {
+ if (flag) strcat(bytes, ",");
+ strcat(bytes, "create");
+ flag = JS_TRUE;
+ }
+ if ((file->mode&PR_TRUNCATE)==PR_TRUNCATE) {
+ if (flag) strcat(bytes, ",");
+ strcat(bytes, "replace");
+ flag = JS_TRUE;
+ }
+ if (file->hasAutoflush) {
+ if (flag) strcat(bytes, ",");
+ strcat(bytes, "hasAutoFlush");
+ flag = JS_TRUE;
+ }
+ *vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, bytes));
+ JS_free(cx, bytes);
+ break;
+ case FILE_CREATED:
+ SECURITY_CHECK(cx, NULL, "creationTime", file);
+ JSFILE_CHECK_NATIVE("creationTime");
+ if(((file->isOpen)?
+ PR_GetOpenFileInfo(file->handle, &info):
+ PR_GetFileInfo(file->path, &info))!=PR_SUCCESS){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_ACCESS_FILE_STATUS, file->path);
+ goto out;
+ }
+
+ PR_ExplodeTime(info.creationTime, PR_LocalTimeParameters,&expandedTime);
+ *vp = OBJECT_TO_JSVAL(js_NewDateObject(cx, expandedTime.tm_year,
+ expandedTime.tm_month,
+ expandedTime.tm_mday,
+ expandedTime.tm_hour,
+ expandedTime.tm_min,
+ expandedTime.tm_sec));
+ break;
+ case FILE_MODIFIED:
+ SECURITY_CHECK(cx, NULL, "lastModified", file);
+ JSFILE_CHECK_NATIVE("lastModified");
+ if(((file->isOpen)?
+ PR_GetOpenFileInfo(file->handle, &info):
+ PR_GetFileInfo(file->path, &info))!=PR_SUCCESS){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_ACCESS_FILE_STATUS, file->path);
+ goto out;
+ }
+
+ PR_ExplodeTime(info.modifyTime, PR_LocalTimeParameters, &expandedTime);
+ *vp = OBJECT_TO_JSVAL(js_NewDateObject(cx, expandedTime.tm_year,
+ expandedTime.tm_month,
+ expandedTime.tm_mday,
+ expandedTime.tm_hour,
+ expandedTime.tm_min,
+ expandedTime.tm_sec));
+ break;
+ case FILE_SIZE:
+ SECURITY_CHECK(cx, NULL, "size", file);
+ *vp = js_size(cx, file);
+ break;
+ case FILE_LENGTH:
+ SECURITY_CHECK(cx, NULL, "length", file);
+ JSFILE_CHECK_NATIVE("length");
+
+ if (js_isDirectory(cx, file)) { /* XXX debug me */
+ PRDir *dir;
+ PRDirEntry *entry;
+ jsint count = 0;
+
+ if(!(dir = PR_OpenDir(file->path))){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_OPEN_DIR, file->path);
+ goto out;
+ }
+
+ while ((entry = PR_ReadDir(dir, PR_SKIP_BOTH))) {
+ count++;
+ }
+
+ if(!PR_CloseDir(dir)){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_OP_FAILED, "close", file->path);
+
+ goto out;
+ }
+
+ *vp = INT_TO_JSVAL(count);
+ break;
+ }else{
+ /* return file size */
+ *vp = js_size(cx, file);
+ }
+ break;
+ case FILE_RANDOMACCESS:
+ SECURITY_CHECK(cx, NULL, "hasRandomAccess", file);
+ JSFILE_CHECK_OPEN("hasRandomAccess");
+ *vp = BOOLEAN_TO_JSVAL(file->hasRandomAccess);
+ break;
+ case FILE_POSITION:
+ SECURITY_CHECK(cx, NULL, "position", file);
+ JSFILE_CHECK_NATIVE("position");
+ JSFILE_CHECK_OPEN("position");
+
+ if(!file->hasRandomAccess){
+ JS_ReportWarning(cx, "File %s doesn't support random access, can't report the position, proceeding");
+ *vp = JSVAL_VOID;
+ break;
+ }
+
+ if (file->isOpen && js_isFile(cx, file)) {
+ int pos = PR_Seek(file->handle, 0, PR_SEEK_CUR);
+ if(pos!=-1){
+ *vp = INT_TO_JSVAL(pos);
+ }else{
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_REPORT_POSITION, file->path);
+ goto out;
+ }
+ }else {
+ JS_ReportWarning(cx, "File %s is closed or not a plain file,"
+ " can't report position, proceeding");
+ goto out;
+ }
+ break;
+ default:
+ SECURITY_CHECK(cx, NULL, "file_access", file);
+
+ /* this is some other property -- try to use the dir["file"] syntax */
+ if (js_isDirectory(cx, file)) {
+ PRDir *dir = NULL;
+ PRDirEntry *entry = NULL;
+ char *prop_name;
+
+ str = JS_ValueToString(cx, id);
+ if (!str)
+ return JS_FALSE;
+
+ prop_name = JS_GetStringBytes(str);
+
+ /* no native files past this point */
+ dir = PR_OpenDir(file->path);
+ if(!dir) {
+ /* This is probably not a directory */
+ JS_ReportWarning(cx, "Can't open directory %s", file->path);
+ return JS_FALSE;
+ }
+
+ while ((entry = PR_ReadDir(dir, PR_SKIP_NONE)) != NULL) {
+ if (!strcmp(entry->name, prop_name)){
+ bytes = js_combinePath(cx, file->path, prop_name);
+ *vp = OBJECT_TO_JSVAL(js_NewFileObject(cx, bytes));
+ PR_CloseDir(dir);
+ JS_free(cx, bytes);
+ return !JSVAL_IS_NULL(*vp);
+ }
+ }
+ PR_CloseDir(dir);
+ }
+ }
+ return JS_TRUE;
+
+out:
+ return JS_FALSE;
+}
+
+static JSBool
+file_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSFile *file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+ jsint slot;
+
+ if (JSVAL_IS_STRING(id)){
+ return JS_TRUE;
+ }
+
+ slot = JSVAL_TO_INT(id);
+
+ switch (slot) {
+ /* File.position = 10 */
+ case FILE_POSITION:
+ SECURITY_CHECK(cx, NULL, "set_position", file);
+ JSFILE_CHECK_NATIVE("set_position");
+
+ if(!file->hasRandomAccess){
+ JS_ReportWarning(cx, "File %s doesn't support random access, can't "
+ "report the position, proceeding");
+ goto out;
+ }
+
+ if (file->isOpen && js_isFile(cx, file)) {
+ int32 pos;
+ int32 offset;
+
+ if (!JS_ValueToInt32(cx, *vp, &offset)){
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_FIRST_ARGUMENT_MUST_BE_A_NUMBER, "position", *vp);
+ goto out;
+ }
+
+ pos = PR_Seek(file->handle, offset, PR_SEEK_SET);
+
+ if(pos!=-1){
+ *vp = INT_TO_JSVAL(pos);
+ }else{
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_CANNOT_SET_POSITION, file->path);
+ goto out;
+ }
+ } else {
+ JS_ReportWarning(cx, "File %s is closed or not a file, can't set "
+ "position, proceeding", file->path);
+ goto out;
+ }
+ }
+
+ return JS_TRUE;
+out:
+ return JS_FALSE;
+}
+
+/*
+ File.currentDir = new File("D:\") or File.currentDir = "D:\"
+*/
+static JSBool
+file_currentDirSetter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSFile *file;
+
+ file = JS_GetInstancePrivate(cx, obj, &js_FileClass, NULL);
+
+ /* Look at the rhs and extract a file object from it */
+ if (JSVAL_IS_OBJECT(*vp)) {
+ if (JS_InstanceOf(cx, obj, &js_FileClass, NULL)) {
+ /* Braindamaged rhs -- just return the old value */
+ if (file && (!js_exists(cx, file) || !js_isDirectory(cx, file))) {
+ JS_GetProperty(cx, obj, CURRENTDIR_PROPERTY, vp);
+ return JS_FALSE;
+ } else {
+ chdir(file->path);
+ return JS_TRUE;
+ }
+ } else {
+ return JS_FALSE;
+ }
+ } else {
+ JSObject *rhsObject;
+ char *path;
+
+ path = JS_GetStringBytes(JS_ValueToString(cx, *vp));
+ rhsObject = js_NewFileObject(cx, path);
+ if (!rhsObject)
+ return JS_FALSE;
+
+ if (!file || !js_exists(cx, file) || !js_isDirectory(cx, file)){
+ JS_GetProperty(cx, obj, CURRENTDIR_PROPERTY, vp);
+ } else {
+ *vp = OBJECT_TO_JSVAL(rhsObject);
+ chdir(path);
+ }
+ }
+
+ return JS_TRUE;
+}
+
+/* Declare class */
+JSClass js_FileClass = {
+ "File", JSCLASS_HAS_PRIVATE | JSCLASS_HAS_CACHED_PROTO(JSProto_File),
+ JS_PropertyStub, JS_PropertyStub, file_getProperty, file_setProperty,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, file_finalize
+};
+
+/* -------------------- Functions exposed to the outside -------------------- */
+JS_PUBLIC_API(JSObject*)
+js_InitFileClass(JSContext *cx, JSObject* obj)
+{
+ JSObject *file, *ctor, *afile;
+ jsval vp;
+ char *currentdir;
+ char separator[2];
+
+ file = JS_InitClass(cx, obj, NULL, &js_FileClass, file_constructor, 1,
+ file_props, file_functions, NULL, NULL);
+ if (!file) {
+ JS_ReportErrorNumber(cx, JSFile_GetErrorMessage, NULL,
+ JSFILEMSG_INIT_FAILED);
+ return NULL;
+ }
+
+ ctor = JS_GetConstructor(cx, file);
+ if (!ctor) return NULL;
+
+ /* Define CURRENTDIR property. We are doing this to get a
+ slash at the end of the current dir */
+ afile = js_NewFileObject(cx, CURRENT_DIR);
+ currentdir = JS_malloc(cx, MAX_PATH_LENGTH);
+ currentdir = getcwd(currentdir, MAX_PATH_LENGTH);
+ afile = js_NewFileObject(cx, currentdir);
+ JS_free(cx, currentdir);
+ vp = OBJECT_TO_JSVAL(afile);
+ JS_DefinePropertyWithTinyId(cx, ctor, CURRENTDIR_PROPERTY, 0, vp,
+ JS_PropertyStub, file_currentDirSetter,
+ JSPROP_ENUMERATE | JSPROP_READONLY );
+
+ /* Define input */
+ vp = OBJECT_TO_JSVAL(js_NewFileObjectFromFILE(cx, stdin,
+ STDINPUT_NAME, PR_RDONLY, JS_TRUE, JS_FALSE));
+ JS_SetProperty(cx, ctor, "input", &vp);
+
+ /* Define output */
+ vp = OBJECT_TO_JSVAL(js_NewFileObjectFromFILE(cx, stdout,
+ STDOUTPUT_NAME, PR_WRONLY, JS_TRUE, JS_FALSE));
+ JS_SetProperty(cx, ctor, "output", &vp);
+
+ /* Define error */
+ vp = OBJECT_TO_JSVAL(js_NewFileObjectFromFILE(cx, stderr,
+ STDERROR_NAME, PR_WRONLY, JS_TRUE, JS_FALSE));
+ JS_SetProperty(cx, ctor, "error", &vp);
+
+ separator[0] = FILESEPARATOR;
+ separator[1] = '\0';
+ vp = STRING_TO_JSVAL(JS_NewStringCopyZ(cx, separator));
+ JS_DefinePropertyWithTinyId(cx, ctor, SEPARATOR_PROPERTY, 0, vp,
+ JS_PropertyStub, JS_PropertyStub,
+ JSPROP_ENUMERATE | JSPROP_READONLY );
+ return file;
+}
+#endif /* JS_HAS_FILE_OBJECT */
diff --git a/src/third_party/js-1.7/jsfile.h b/src/third_party/js-1.7/jsfile.h
new file mode 100644
index 00000000000..78707e8b72d
--- /dev/null
+++ b/src/third_party/js-1.7/jsfile.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef _jsfile_h__
+#define _jsfile_h__
+
+#if JS_HAS_FILE_OBJECT
+
+#include "jsobj.h"
+
+extern JS_PUBLIC_API(JSObject*)
+js_InitFileClass(JSContext *cx, JSObject* obj);
+
+extern JS_PUBLIC_API(JSObject*)
+js_NewFileObject(JSContext *cx, char *bytes);
+
+extern JSClass js_FileClass;
+
+#endif /* JS_HAS_FILE_OBJECT */
+#endif /* _jsfile_h__ */
diff --git a/src/third_party/js-1.7/jsfile.msg b/src/third_party/js-1.7/jsfile.msg
new file mode 100644
index 00000000000..137b35d8740
--- /dev/null
+++ b/src/third_party/js-1.7/jsfile.msg
@@ -0,0 +1,90 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ Error messages for jsfile.c. See js.msg for format specification.
+*/
+
+MSG_DEF(JSFILEMSG_NOT_AN_ERROR, 0, 0, JSEXN_NONE, "<Error #0 is reserved>")
+MSG_DEF(JSFILEMSG_FILE_CONSTRUCTOR_UNDEFINED_ERROR, 1, 0, JSEXN_NONE, "File constructor is undefined")
+MSG_DEF(JSFILEMSG_FILE_CURRENTDIR_UNDEFINED_ERROR, 2, 0, JSEXN_NONE, "File.currentDir is undefined")
+MSG_DEF(JSFILEMSG_FIRST_ARGUMENT_OPEN_NOT_STRING_ERROR, 3, 1, JSEXN_NONE, "The first argument {0} to file.open must be a string")
+MSG_DEF(JSFILEMSG_SECOND_ARGUMENT_OPEN_NOT_STRING_ERROR, 4, 0, JSEXN_NONE, "The second argument to file.open must be a string")
+MSG_DEF(JSFILEMSG_CANNOT_COPY_FILE_OPEN_FOR_WRITING_ERROR, 5, 1, JSEXN_NONE, "Cannot copy file {0} open for writing")
+MSG_DEF(JSFILEMSG_CANNOT_ACCESS_FILE_INFO_ERROR, 6, 1, JSEXN_NONE, "Cannot access file information for {0}")
+MSG_DEF(JSFILEMSG_COPY_READ_ERROR, 7, 1, JSEXN_NONE, "An error occured while attempting to read a file {0} to copy")
+MSG_DEF(JSFILEMSG_COPY_WRITE_ERROR, 8, 1, JSEXN_NONE, "An error occured while attempting to copy into file {0}")
+MSG_DEF(JSFILEMSG_EXPECTS_ONE_ARG_ERROR, 9, 0, JSEXN_NONE, "Operation {0} expects one argument, not {1}")
+MSG_DEF(JSFILEMSG_CANNOT_FLUSH_CLOSE_FILE_ERROR, 10, 1, JSEXN_NONE, "Cannot flush closed file {0}")
+MSG_DEF(JSFILEMSG_CANNOT_OPEN_WRITING_ERROR, 11, 1, JSEXN_NONE, "Cannot open file {0} for writing")
+MSG_DEF(JSFILEMSG_WRITEALL_EXPECTS_ONE_ARG_ERROR, 12, 0, JSEXN_NONE, "writeAll expects one argument")
+MSG_DEF(JSFILEMSG_FIRST_ARGUMENT_WRITEALL_NOT_ARRAY_ERROR, 13, 0, JSEXN_NONE, "writeAll expects an array as an argument")
+MSG_DEF(JSFILEMSG_UNUSED0, 14, 0, JSEXN_NONE, "Unused error message slot")
+MSG_DEF(JSFILEMSG_CANNOT_OPEN_FILE_ERROR, 15, 1, JSEXN_NONE, "Cannot open file {0}")
+MSG_DEF(JSFILEMSG_FIRST_ARGUMENT_CONSTRUCTOR_NOT_STRING_ERROR, 16, 1, JSEXN_NONE, "The argument to the File constructor {0} must be a string")
+MSG_DEF(JSFILEMSG_BIDIRECTIONAL_PIPE_NOT_SUPPORTED, 17, 0, JSEXN_NONE, "Bidirectional pipes are not supported")
+MSG_DEF(JSFILEMSG_OPEN_MODE_NOT_SUPPORTED_WITH_PIPES, 18, 2, JSEXN_NONE, "The opening mode you have chosen {0} is not supported by the pipe you are trying to open: {1}")
+MSG_DEF(JSFILEMSG_OPEN_FAILED, 19, 1, JSEXN_NONE, "open on file {0} failed")
+MSG_DEF(JSFILEMSG_CLOSE_FAILED, 20, 1, JSEXN_NONE, "close on file {0} failed")
+MSG_DEF(JSFILEMSG_PCLOSE_FAILED, 21, 1, JSEXN_NONE, "pclose on file {0} failed")
+MSG_DEF(JSFILEMSG_REMOVE_FAILED, 22, 1, JSEXN_NONE, "remove on file {0} failed")
+MSG_DEF(JSFILEMSG_CANNOT_ACCESS_FILE_STATUS, 23, 1, JSEXN_NONE, "Cannot access file status for {0}")
+MSG_DEF(JSFILEMSG_RENAME_FAILED, 24, 2, JSEXN_NONE, "Cannot rename {0} to {1}")
+MSG_DEF(JSFILEMSG_WRITE_FAILED, 25, 1, JSEXN_NONE, "Write failed on file {0}")
+MSG_DEF(JSFILEMSG_READ_FAILED, 26, 1, JSEXN_NONE, "Read failed on file {0}")
+MSG_DEF(JSFILEMSG_SKIP_FAILED, 27, 1, JSEXN_NONE, "Skip failed on file {0}")
+MSG_DEF(JSFILEMSG_FIRST_ARGUMENT_MUST_BE_A_FUNCTION_OR_REGEX, 28, 1, JSEXN_NONE, "The first argument to file.list must be a function or a regex")
+MSG_DEF(JSFILEMSG_CANNOT_DO_LIST_ON_A_FILE, 29, 1, JSEXN_NONE, "{0} must be a directory, cannot do list")
+MSG_DEF(JSFILEMSG_NATIVE_OPERATION_IS_NOT_SUPPORTED, 30, 2, JSEXN_NONE, "Native operation {0} is not supported on {1}")
+MSG_DEF(JSFILEMSG_CANNOT_SET_PRIVATE_FILE, 31, 1, JSEXN_NONE, "Cannot set private data for file {0}")
+MSG_DEF(JSFILEMSG_FIRST_ARGUMENT_MUST_BE_A_NUMBER, 32, 2, JSEXN_NONE, "First argument to {0} must be a number, not {1}")
+MSG_DEF(JSFILEMSG_CANNOT_WRITE, 33, 1, JSEXN_NONE, "Cannot write to {0}, file mode is different")
+MSG_DEF(JSFILEMSG_CANNOT_READ, 34, 1, JSEXN_NONE, "Cannot read from {0}, file mode is different")
+MSG_DEF(JSFILEMSG_CANNOT_FLUSH, 35, 1, JSEXN_NONE, "Flush failed on {0}")
+MSG_DEF(JSFILEMSG_OP_FAILED, 36, 1, JSEXN_NONE, "File operation {0} failed")
+MSG_DEF(JSFILEMSG_FILE_MUST_BE_OPEN, 37, 1, JSEXN_NONE, "File must be open for {0}")
+MSG_DEF(JSFILEMSG_FILE_MUST_BE_CLOSED, 38, 1, JSEXN_NONE, "File must be closed for {0}")
+MSG_DEF(JSFILEMSG_NO_RANDOM_ACCESS, 39, 1, JSEXN_NONE, "File {0} doesn't allow random access")
+MSG_DEF(JSFILEMSG_OBJECT_CREATION_FAILED, 40, 1, JSEXN_NONE, "Couldn't create {0}")
+MSG_DEF(JSFILEMSG_CANNOT_OPEN_DIR, 41, 1, JSEXN_NONE, "Couldn't open directory {0}")
+MSG_DEF(JSFILEMSG_CANNOT_REPORT_POSITION, 42, 1, JSEXN_NONE, "Couldn't report position for {0}")
+MSG_DEF(JSFILEMSG_CANNOT_SET_POSITION, 43, 1, JSEXN_NONE, "Couldn't set position for {0}")
+MSG_DEF(JSFILEMSG_INIT_FAILED, 44, 0, JSEXN_NONE, "File class initialization failed")
+
+
diff --git a/src/third_party/js-1.7/jsfun.c b/src/third_party/js-1.7/jsfun.c
new file mode 100644
index 00000000000..2a2df539c8d
--- /dev/null
+++ b/src/third_party/js-1.7/jsfun.c
@@ -0,0 +1,2330 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS function support.
+ */
+#include "jsstddef.h"
+#include <string.h>
+#include "jstypes.h"
+#include "jsbit.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsparse.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+#include "jsexn.h"
+
+#if JS_HAS_GENERATORS
+# include "jsiter.h"
+#endif
+
+/* Generic function/call/arguments tinyids -- also reflected bit numbers. */
+enum {
+ CALL_ARGUMENTS = -1, /* predefined arguments local variable */
+ CALL_CALLEE = -2, /* reference to active function's object */
+ ARGS_LENGTH = -3, /* number of actual args, arity if inactive */
+ ARGS_CALLEE = -4, /* reference from arguments to active funobj */
+ FUN_ARITY = -5, /* number of formal parameters; desired argc */
+ FUN_NAME = -6, /* function name, "" if anonymous */
+ FUN_CALLER = -7 /* Function.prototype.caller, backward compat */
+};
+
+#if JSFRAME_OVERRIDE_BITS < 8
+# error "not enough override bits in JSStackFrame.flags!"
+#endif
+
+#define TEST_OVERRIDE_BIT(fp, tinyid) \
+ ((fp)->flags & JS_BIT(JSFRAME_OVERRIDE_SHIFT - ((tinyid) + 1)))
+
+#define SET_OVERRIDE_BIT(fp, tinyid) \
+ ((fp)->flags |= JS_BIT(JSFRAME_OVERRIDE_SHIFT - ((tinyid) + 1)))
+
+JSBool
+js_GetArgsValue(JSContext *cx, JSStackFrame *fp, jsval *vp)
+{
+ JSObject *argsobj;
+
+ if (TEST_OVERRIDE_BIT(fp, CALL_ARGUMENTS)) {
+ JS_ASSERT(fp->callobj);
+ return OBJ_GET_PROPERTY(cx, fp->callobj,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .argumentsAtom),
+ vp);
+ }
+ argsobj = js_GetArgsObject(cx, fp);
+ if (!argsobj)
+ return JS_FALSE;
+ *vp = OBJECT_TO_JSVAL(argsobj);
+ return JS_TRUE;
+}
+
+static JSBool
+MarkArgDeleted(JSContext *cx, JSStackFrame *fp, uintN slot)
+{
+ JSObject *argsobj;
+ jsval bmapval, bmapint;
+ size_t nbits, nbytes;
+ jsbitmap *bitmap;
+
+ argsobj = fp->argsobj;
+ (void) JS_GetReservedSlot(cx, argsobj, 0, &bmapval);
+ nbits = fp->argc;
+ JS_ASSERT(slot < nbits);
+ if (JSVAL_IS_VOID(bmapval)) {
+ if (nbits <= JSVAL_INT_BITS) {
+ bmapint = 0;
+ bitmap = (jsbitmap *) &bmapint;
+ } else {
+ nbytes = JS_HOWMANY(nbits, JS_BITS_PER_WORD) * sizeof(jsbitmap);
+ bitmap = (jsbitmap *) JS_malloc(cx, nbytes);
+ if (!bitmap)
+ return JS_FALSE;
+ memset(bitmap, 0, nbytes);
+ bmapval = PRIVATE_TO_JSVAL(bitmap);
+ JS_SetReservedSlot(cx, argsobj, 0, bmapval);
+ }
+ } else {
+ if (nbits <= JSVAL_INT_BITS) {
+ bmapint = JSVAL_TO_INT(bmapval);
+ bitmap = (jsbitmap *) &bmapint;
+ } else {
+ bitmap = (jsbitmap *) JSVAL_TO_PRIVATE(bmapval);
+ }
+ }
+ JS_SET_BIT(bitmap, slot);
+ if (bitmap == (jsbitmap *) &bmapint) {
+ bmapval = INT_TO_JSVAL(bmapint);
+ JS_SetReservedSlot(cx, argsobj, 0, bmapval);
+ }
+ return JS_TRUE;
+}
+
+/* NB: Infallible predicate, false does not mean error/exception. */
+static JSBool
+ArgWasDeleted(JSContext *cx, JSStackFrame *fp, uintN slot)
+{
+ JSObject *argsobj;
+ jsval bmapval, bmapint;
+ jsbitmap *bitmap;
+
+ argsobj = fp->argsobj;
+ (void) JS_GetReservedSlot(cx, argsobj, 0, &bmapval);
+ if (JSVAL_IS_VOID(bmapval))
+ return JS_FALSE;
+ if (fp->argc <= JSVAL_INT_BITS) {
+ bmapint = JSVAL_TO_INT(bmapval);
+ bitmap = (jsbitmap *) &bmapint;
+ } else {
+ bitmap = (jsbitmap *) JSVAL_TO_PRIVATE(bmapval);
+ }
+ return JS_TEST_BIT(bitmap, slot) != 0;
+}
+
+JSBool
+js_GetArgsProperty(JSContext *cx, JSStackFrame *fp, jsid id,
+ JSObject **objp, jsval *vp)
+{
+ jsval val;
+ JSObject *obj;
+ uintN slot;
+
+ if (TEST_OVERRIDE_BIT(fp, CALL_ARGUMENTS)) {
+ JS_ASSERT(fp->callobj);
+ if (!OBJ_GET_PROPERTY(cx, fp->callobj,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .argumentsAtom),
+ &val)) {
+ return JS_FALSE;
+ }
+ if (JSVAL_IS_PRIMITIVE(val)) {
+ obj = js_ValueToNonNullObject(cx, val);
+ if (!obj)
+ return JS_FALSE;
+ } else {
+ obj = JSVAL_TO_OBJECT(val);
+ }
+ *objp = obj;
+ return OBJ_GET_PROPERTY(cx, obj, id, vp);
+ }
+
+ *objp = NULL;
+ *vp = JSVAL_VOID;
+ if (JSID_IS_INT(id)) {
+ slot = (uintN) JSID_TO_INT(id);
+ if (slot < fp->argc) {
+ if (fp->argsobj && ArgWasDeleted(cx, fp, slot))
+ return OBJ_GET_PROPERTY(cx, fp->argsobj, id, vp);
+ *vp = fp->argv[slot];
+ } else {
+ /*
+ * Per ECMA-262 Ed. 3, 10.1.8, last bulleted item, do not share
+ * storage between the formal parameter and arguments[k] for all
+ * k >= fp->argc && k < fp->fun->nargs. For example, in
+ *
+ * function f(x) { x = 42; return arguments[0]; }
+ * f();
+ *
+ * the call to f should return undefined, not 42. If fp->argsobj
+ * is null at this point, as it would be in the example, return
+ * undefined in *vp.
+ */
+ if (fp->argsobj)
+ return OBJ_GET_PROPERTY(cx, fp->argsobj, id, vp);
+ }
+ } else {
+ if (id == ATOM_TO_JSID(cx->runtime->atomState.lengthAtom)) {
+ if (fp->argsobj && TEST_OVERRIDE_BIT(fp, ARGS_LENGTH))
+ return OBJ_GET_PROPERTY(cx, fp->argsobj, id, vp);
+ *vp = INT_TO_JSVAL((jsint) fp->argc);
+ }
+ }
+ return JS_TRUE;
+}
+
+JSObject *
+js_GetArgsObject(JSContext *cx, JSStackFrame *fp)
+{
+ JSObject *argsobj, *global, *parent;
+
+ /*
+ * We must be in a function activation; the function must be lightweight
+ * or else fp must have a variable object.
+ */
+ JS_ASSERT(fp->fun && (!(fp->fun->flags & JSFUN_HEAVYWEIGHT) || fp->varobj));
+
+ /* Skip eval and debugger frames. */
+ while (fp->flags & JSFRAME_SPECIAL)
+ fp = fp->down;
+
+ /* Create an arguments object for fp only if it lacks one. */
+ argsobj = fp->argsobj;
+ if (argsobj)
+ return argsobj;
+
+ /* Link the new object to fp so it can get actual argument values. */
+ argsobj = js_NewObject(cx, &js_ArgumentsClass, NULL, NULL);
+ if (!argsobj || !JS_SetPrivate(cx, argsobj, fp)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+
+ /*
+ * Give arguments an intrinsic scope chain link to fp's global object.
+ * Since the arguments object lacks a prototype because js_ArgumentsClass
+ * is not initialized, js_NewObject won't assign a default parent to it.
+ *
+ * Therefore if arguments is used as the head of an eval scope chain (via
+ * a direct or indirect call to eval(program, arguments)), any reference
+ * to a standard class object in the program will fail to resolve due to
+ * js_GetClassPrototype not being able to find a global object containing
+ * the standard prototype by starting from arguments and following parent.
+ */
+ global = fp->scopeChain;
+ while ((parent = OBJ_GET_PARENT(cx, global)) != NULL)
+ global = parent;
+ argsobj->slots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(global);
+ fp->argsobj = argsobj;
+ return argsobj;
+}
+
+static JSBool
+args_enumerate(JSContext *cx, JSObject *obj);
+
+JSBool
+js_PutArgsObject(JSContext *cx, JSStackFrame *fp)
+{
+ JSObject *argsobj;
+ jsval bmapval, rval;
+ JSBool ok;
+ JSRuntime *rt;
+
+ /*
+ * Reuse args_enumerate here to reflect fp's actual arguments as indexed
+ * elements of argsobj. Do this first, before clearing and freeing the
+ * deleted argument slot bitmap, because args_enumerate depends on that.
+ */
+ argsobj = fp->argsobj;
+ ok = args_enumerate(cx, argsobj);
+
+ /*
+ * Now clear the deleted argument number bitmap slot and free the bitmap,
+ * if one was actually created due to 'delete arguments[0]' or similar.
+ */
+ (void) JS_GetReservedSlot(cx, argsobj, 0, &bmapval);
+ if (!JSVAL_IS_VOID(bmapval)) {
+ JS_SetReservedSlot(cx, argsobj, 0, JSVAL_VOID);
+ if (fp->argc > JSVAL_INT_BITS)
+ JS_free(cx, JSVAL_TO_PRIVATE(bmapval));
+ }
+
+ /*
+ * Now get the prototype properties so we snapshot fp->fun and fp->argc
+ * before fp goes away.
+ */
+ rt = cx->runtime;
+ ok &= js_GetProperty(cx, argsobj, ATOM_TO_JSID(rt->atomState.calleeAtom),
+ &rval);
+ ok &= js_SetProperty(cx, argsobj, ATOM_TO_JSID(rt->atomState.calleeAtom),
+ &rval);
+ ok &= js_GetProperty(cx, argsobj, ATOM_TO_JSID(rt->atomState.lengthAtom),
+ &rval);
+ ok &= js_SetProperty(cx, argsobj, ATOM_TO_JSID(rt->atomState.lengthAtom),
+ &rval);
+
+ /*
+ * Clear the private pointer to fp, which is about to go away (js_Invoke).
+ * Do this last because the args_enumerate and js_GetProperty calls above
+ * need to follow the private slot to find fp.
+ */
+ ok &= JS_SetPrivate(cx, argsobj, NULL);
+ fp->argsobj = NULL;
+ return ok;
+}
+
+static JSBool
+args_delProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsint slot;
+ JSStackFrame *fp;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ fp = (JSStackFrame *)
+ JS_GetInstancePrivate(cx, obj, &js_ArgumentsClass, NULL);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->argsobj);
+
+ slot = JSVAL_TO_INT(id);
+ switch (slot) {
+ case ARGS_CALLEE:
+ case ARGS_LENGTH:
+ SET_OVERRIDE_BIT(fp, slot);
+ break;
+
+ default:
+ if ((uintN)slot < fp->argc && !MarkArgDeleted(cx, fp, slot))
+ return JS_FALSE;
+ break;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+args_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsint slot;
+ JSStackFrame *fp;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ fp = (JSStackFrame *)
+ JS_GetInstancePrivate(cx, obj, &js_ArgumentsClass, NULL);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->argsobj);
+
+ slot = JSVAL_TO_INT(id);
+ switch (slot) {
+ case ARGS_CALLEE:
+ if (!TEST_OVERRIDE_BIT(fp, slot))
+ *vp = fp->argv ? fp->argv[-2] : OBJECT_TO_JSVAL(fp->fun->object);
+ break;
+
+ case ARGS_LENGTH:
+ if (!TEST_OVERRIDE_BIT(fp, slot))
+ *vp = INT_TO_JSVAL((jsint)fp->argc);
+ break;
+
+ default:
+ if ((uintN)slot < fp->argc && !ArgWasDeleted(cx, fp, slot))
+ *vp = fp->argv[slot];
+ break;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+args_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSStackFrame *fp;
+ jsint slot;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ fp = (JSStackFrame *)
+ JS_GetInstancePrivate(cx, obj, &js_ArgumentsClass, NULL);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->argsobj);
+
+ slot = JSVAL_TO_INT(id);
+ switch (slot) {
+ case ARGS_CALLEE:
+ case ARGS_LENGTH:
+ SET_OVERRIDE_BIT(fp, slot);
+ break;
+
+ default:
+ if (FUN_INTERPRETED(fp->fun) &&
+ (uintN)slot < fp->argc &&
+ !ArgWasDeleted(cx, fp, slot)) {
+ fp->argv[slot] = *vp;
+ }
+ break;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+args_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ JSStackFrame *fp;
+ uintN slot;
+ JSString *str;
+ JSAtom *atom;
+ intN tinyid;
+ jsval value;
+
+ *objp = NULL;
+ fp = (JSStackFrame *)
+ JS_GetInstancePrivate(cx, obj, &js_ArgumentsClass, NULL);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->argsobj);
+
+ if (JSVAL_IS_INT(id)) {
+ slot = JSVAL_TO_INT(id);
+ if (slot < fp->argc && !ArgWasDeleted(cx, fp, slot)) {
+ /* XXX ECMA specs DontEnum, contrary to other array-like objects */
+ if (!js_DefineProperty(cx, obj, INT_JSVAL_TO_JSID(id),
+ fp->argv[slot],
+ args_getProperty, args_setProperty,
+ JS_VERSION_IS_ECMA(cx)
+ ? 0
+ : JSPROP_ENUMERATE,
+ NULL)) {
+ return JS_FALSE;
+ }
+ *objp = obj;
+ }
+ } else {
+ str = JSVAL_TO_STRING(id);
+ atom = cx->runtime->atomState.lengthAtom;
+ if (str == ATOM_TO_STRING(atom)) {
+ tinyid = ARGS_LENGTH;
+ value = INT_TO_JSVAL(fp->argc);
+ } else {
+ atom = cx->runtime->atomState.calleeAtom;
+ if (str == ATOM_TO_STRING(atom)) {
+ tinyid = ARGS_CALLEE;
+ value = fp->argv ? fp->argv[-2]
+ : OBJECT_TO_JSVAL(fp->fun->object);
+ } else {
+ atom = NULL;
+
+ /* Quell GCC overwarnings. */
+ tinyid = 0;
+ value = JSVAL_NULL;
+ }
+ }
+
+ if (atom && !TEST_OVERRIDE_BIT(fp, tinyid)) {
+ if (!js_DefineNativeProperty(cx, obj, ATOM_TO_JSID(atom), value,
+ args_getProperty, args_setProperty, 0,
+ SPROP_HAS_SHORTID, tinyid, NULL)) {
+ return JS_FALSE;
+ }
+ *objp = obj;
+ }
+ }
+
+ return JS_TRUE;
+}
+
+static JSBool
+args_enumerate(JSContext *cx, JSObject *obj)
+{
+ JSStackFrame *fp;
+ JSObject *pobj;
+ JSProperty *prop;
+ uintN slot, argc;
+
+ fp = (JSStackFrame *)
+ JS_GetInstancePrivate(cx, obj, &js_ArgumentsClass, NULL);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->argsobj);
+
+ /*
+ * Trigger reflection with value snapshot in args_resolve using a series
+ * of js_LookupProperty calls. We handle length, callee, and the indexed
+ * argument properties. We know that args_resolve covers all these cases
+ * and creates direct properties of obj, but that it may fail to resolve
+ * length or callee if overridden.
+ */
+ if (!js_LookupProperty(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState.lengthAtom),
+ &pobj, &prop)) {
+ return JS_FALSE;
+ }
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+
+ if (!js_LookupProperty(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState.calleeAtom),
+ &pobj, &prop)) {
+ return JS_FALSE;
+ }
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+
+ argc = fp->argc;
+ for (slot = 0; slot < argc; slot++) {
+ if (!js_LookupProperty(cx, obj, INT_TO_JSID((jsint)slot), &pobj, &prop))
+ return JS_FALSE;
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+ return JS_TRUE;
+}
+
+#if JS_HAS_GENERATORS
+/*
+ * If a generator-iterator's arguments or call object escapes, it needs to
+ * mark its generator object.
+ */
+static uint32
+args_or_call_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSStackFrame *fp;
+
+ fp = JS_GetPrivate(cx, obj);
+ if (fp && (fp->flags & JSFRAME_GENERATOR))
+ GC_MARK(cx, FRAME_TO_GENERATOR(fp)->obj, "FRAME_TO_GENERATOR(fp)->obj");
+ return 0;
+}
+#else
+# define args_or_call_mark NULL
+#endif
+
+/*
+ * The Arguments class is not initialized via JS_InitClass, and must not be,
+ * because its name is "Object". Per ECMA, that causes instances of it to
+ * delegate to the object named by Object.prototype. It also ensures that
+ * arguments.toString() returns "[object Object]".
+ *
+ * The JSClass functions below collaborate to lazily reflect and synchronize
+ * actual argument values, argument count, and callee function object stored
+ * in a JSStackFrame with their corresponding property values in the frame's
+ * arguments object.
+ */
+JSClass js_ArgumentsClass = {
+ js_Object_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE | JSCLASS_HAS_RESERVED_SLOTS(1) |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Object),
+ JS_PropertyStub, args_delProperty,
+ args_getProperty, args_setProperty,
+ args_enumerate, (JSResolveOp) args_resolve,
+ JS_ConvertStub, JS_FinalizeStub,
+ NULL, NULL,
+ NULL, NULL,
+ NULL, NULL,
+ args_or_call_mark, NULL
+};
+
+JSObject *
+js_GetCallObject(JSContext *cx, JSStackFrame *fp, JSObject *parent)
+{
+ JSObject *callobj, *funobj;
+
+ /* Create a call object for fp only if it lacks one. */
+ JS_ASSERT(fp->fun);
+ callobj = fp->callobj;
+ if (callobj)
+ return callobj;
+ JS_ASSERT(fp->fun);
+
+ /* The default call parent is its function's parent (static link). */
+ if (!parent) {
+ funobj = fp->argv ? JSVAL_TO_OBJECT(fp->argv[-2]) : fp->fun->object;
+ if (funobj)
+ parent = OBJ_GET_PARENT(cx, funobj);
+ }
+
+ /* Create the call object and link it to its stack frame. */
+ callobj = js_NewObject(cx, &js_CallClass, NULL, parent);
+ if (!callobj || !JS_SetPrivate(cx, callobj, fp)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ fp->callobj = callobj;
+
+ /* Make callobj be the scope chain and the variables object. */
+ JS_ASSERT(fp->scopeChain == parent);
+ fp->scopeChain = callobj;
+ fp->varobj = callobj;
+ return callobj;
+}
+
+static JSBool
+call_enumerate(JSContext *cx, JSObject *obj);
+
+JSBool
+js_PutCallObject(JSContext *cx, JSStackFrame *fp)
+{
+ JSObject *callobj;
+ JSBool ok;
+ jsid argsid;
+ jsval aval;
+
+ /*
+ * Reuse call_enumerate here to reflect all actual args and vars into the
+ * call object from fp.
+ */
+ callobj = fp->callobj;
+ if (!callobj)
+ return JS_TRUE;
+ ok = call_enumerate(cx, callobj);
+
+ /*
+ * Get the arguments object to snapshot fp's actual argument values.
+ */
+ if (fp->argsobj) {
+ argsid = ATOM_TO_JSID(cx->runtime->atomState.argumentsAtom);
+ ok &= js_GetProperty(cx, callobj, argsid, &aval);
+ ok &= js_SetProperty(cx, callobj, argsid, &aval);
+ ok &= js_PutArgsObject(cx, fp);
+ }
+
+ /*
+ * Clear the private pointer to fp, which is about to go away (js_Invoke).
+ * Do this last because the call_enumerate and js_GetProperty calls above
+ * need to follow the private slot to find fp.
+ */
+ ok &= JS_SetPrivate(cx, callobj, NULL);
+ fp->callobj = NULL;
+ return ok;
+}
+
+static JSPropertySpec call_props[] = {
+ {js_arguments_str, CALL_ARGUMENTS, JSPROP_PERMANENT,0,0},
+ {"__callee__", CALL_CALLEE, 0,0,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+call_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSStackFrame *fp;
+ jsint slot;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->fun);
+
+ slot = JSVAL_TO_INT(id);
+ switch (slot) {
+ case CALL_ARGUMENTS:
+ if (!TEST_OVERRIDE_BIT(fp, slot)) {
+ JSObject *argsobj = js_GetArgsObject(cx, fp);
+ if (!argsobj)
+ return JS_FALSE;
+ *vp = OBJECT_TO_JSVAL(argsobj);
+ }
+ break;
+
+ case CALL_CALLEE:
+ if (!TEST_OVERRIDE_BIT(fp, slot))
+ *vp = fp->argv ? fp->argv[-2] : OBJECT_TO_JSVAL(fp->fun->object);
+ break;
+
+ default:
+ if ((uintN)slot < JS_MAX(fp->argc, fp->fun->nargs))
+ *vp = fp->argv[slot];
+ break;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+call_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSStackFrame *fp;
+ jsint slot;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->fun);
+
+ slot = JSVAL_TO_INT(id);
+ switch (slot) {
+ case CALL_ARGUMENTS:
+ case CALL_CALLEE:
+ SET_OVERRIDE_BIT(fp, slot);
+ break;
+
+ default:
+ if ((uintN)slot < JS_MAX(fp->argc, fp->fun->nargs))
+ fp->argv[slot] = *vp;
+ break;
+ }
+ return JS_TRUE;
+}
+
+JSBool
+js_GetCallVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSStackFrame *fp;
+
+ JS_ASSERT(JSVAL_IS_INT(id));
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (fp) {
+ /* XXX no jsint slot commoning here to avoid MSVC1.52 crashes */
+ if ((uintN)JSVAL_TO_INT(id) < fp->nvars)
+ *vp = fp->vars[JSVAL_TO_INT(id)];
+ }
+ return JS_TRUE;
+}
+
+JSBool
+js_SetCallVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSStackFrame *fp;
+
+ JS_ASSERT(JSVAL_IS_INT(id));
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (fp) {
+ /* XXX jsint slot is block-local here to avoid MSVC1.52 crashes */
+ jsint slot = JSVAL_TO_INT(id);
+ if ((uintN)slot < fp->nvars)
+ fp->vars[slot] = *vp;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+call_enumerate(JSContext *cx, JSObject *obj)
+{
+ JSStackFrame *fp;
+ JSObject *funobj, *pobj;
+ JSScope *scope;
+ JSScopeProperty *sprop, *cprop;
+ JSPropertyOp getter;
+ jsval *vec;
+ JSAtom *atom;
+ JSProperty *prop;
+
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (!fp)
+ return JS_TRUE;
+
+ /*
+ * Do not enumerate a cloned function object at fp->argv[-2], it may have
+ * gained its own (mutable) scope (e.g., a brutally-shared XUL script sets
+ * the clone's prototype property). We must enumerate the function object
+ * that was decorated with parameter and local variable properties by the
+ * compiler when the compiler created fp->fun, namely fp->fun->object.
+ *
+ * Contrast with call_resolve, where we prefer fp->argv[-2], because we'll
+ * use js_LookupProperty to find any overridden properties in that object,
+ * if it was a mutated clone; and if not, we will search its prototype,
+ * fp->fun->object, to find compiler-created params and locals.
+ */
+ funobj = fp->fun->object;
+ if (!funobj)
+ return JS_TRUE;
+
+ /*
+ * Reflect actual args from fp->argv for formal parameters, and local vars
+ * and functions in fp->vars for declared variables and nested-at-top-level
+ * local functions.
+ */
+ scope = OBJ_SCOPE(funobj);
+ for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
+ getter = sprop->getter;
+ if (getter == js_GetArgument)
+ vec = fp->argv;
+ else if (getter == js_GetLocalVariable)
+ vec = fp->vars;
+ else
+ continue;
+
+ /* Trigger reflection by looking up the unhidden atom for sprop->id. */
+ JS_ASSERT(JSID_IS_ATOM(sprop->id));
+ atom = JSID_TO_ATOM(sprop->id);
+ JS_ASSERT(atom->flags & ATOM_HIDDEN);
+ atom = atom->entry.value;
+
+ if (!js_LookupProperty(cx, obj, ATOM_TO_JSID(atom), &pobj, &prop))
+ return JS_FALSE;
+
+ /*
+ * If we found the property in a different object, don't try sticking
+ * it into wrong slots vector. This can occur because we have a mutable
+ * __proto__ slot, and cloned function objects rely on their __proto__
+ * to delegate to the object that contains the var and arg properties.
+ */
+ if (!prop || pobj != obj) {
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ continue;
+ }
+ cprop = (JSScopeProperty *)prop;
+ LOCKED_OBJ_SET_SLOT(obj, cprop->slot, vec[(uint16) sprop->shortid]);
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ }
+
+ return JS_TRUE;
+}
+
+static JSBool
+call_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ JSStackFrame *fp;
+ JSObject *funobj;
+ JSString *str;
+ JSAtom *atom;
+ JSObject *obj2;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ JSPropertyOp getter, setter;
+ uintN attrs, slot, nslots, spflags;
+ jsval *vp, value;
+ intN shortid;
+
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (!fp)
+ return JS_TRUE;
+ JS_ASSERT(fp->fun);
+
+ if (!JSVAL_IS_STRING(id))
+ return JS_TRUE;
+
+ funobj = fp->argv ? JSVAL_TO_OBJECT(fp->argv[-2]) : fp->fun->object;
+ if (!funobj)
+ return JS_TRUE;
+ JS_ASSERT((JSFunction *) JS_GetPrivate(cx, funobj) == fp->fun);
+
+ str = JSVAL_TO_STRING(id);
+ atom = js_AtomizeString(cx, str, 0);
+ if (!atom)
+ return JS_FALSE;
+ if (!js_LookupHiddenProperty(cx, funobj, ATOM_TO_JSID(atom), &obj2, &prop))
+ return JS_FALSE;
+
+ if (prop) {
+ if (!OBJ_IS_NATIVE(obj2)) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ return JS_TRUE;
+ }
+
+ sprop = (JSScopeProperty *) prop;
+ getter = sprop->getter;
+ attrs = sprop->attrs & ~JSPROP_SHARED;
+ slot = (uintN) sprop->shortid;
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+
+ /* Ensure we found an arg or var property for the same function. */
+ if ((sprop->flags & SPROP_IS_HIDDEN) &&
+ (obj2 == funobj ||
+ (JSFunction *) JS_GetPrivate(cx, obj2) == fp->fun)) {
+ if (getter == js_GetArgument) {
+ vp = fp->argv;
+ nslots = JS_MAX(fp->argc, fp->fun->nargs);
+ getter = setter = NULL;
+ } else {
+ JS_ASSERT(getter == js_GetLocalVariable);
+ vp = fp->vars;
+ nslots = fp->nvars;
+ getter = js_GetCallVariable;
+ setter = js_SetCallVariable;
+ }
+ if (slot < nslots) {
+ value = vp[slot];
+ spflags = SPROP_HAS_SHORTID;
+ shortid = (intN) slot;
+ } else {
+ value = JSVAL_VOID;
+ spflags = 0;
+ shortid = 0;
+ }
+ if (!js_DefineNativeProperty(cx, obj, ATOM_TO_JSID(atom), value,
+ getter, setter, attrs,
+ spflags, shortid, NULL)) {
+ return JS_FALSE;
+ }
+ *objp = obj;
+ }
+ }
+
+ return JS_TRUE;
+}
+
+static JSBool
+call_convert(JSContext *cx, JSObject *obj, JSType type, jsval *vp)
+{
+ JSStackFrame *fp;
+
+ if (type == JSTYPE_FUNCTION) {
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (fp) {
+ JS_ASSERT(fp->fun);
+ *vp = fp->argv ? fp->argv[-2] : OBJECT_TO_JSVAL(fp->fun->object);
+ }
+ }
+ return JS_TRUE;
+}
+
+JSClass js_CallClass = {
+ js_Call_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE | JSCLASS_IS_ANONYMOUS |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Call),
+ JS_PropertyStub, JS_PropertyStub,
+ call_getProperty, call_setProperty,
+ call_enumerate, (JSResolveOp)call_resolve,
+ call_convert, JS_FinalizeStub,
+ NULL, NULL,
+ NULL, NULL,
+ NULL, NULL,
+ args_or_call_mark, NULL,
+};
+
+/*
+ * ECMA-262 specifies that length is a property of function object instances,
+ * but we can avoid that space cost by delegating to a prototype property that
+ * is JSPROP_PERMANENT and JSPROP_SHARED. Each fun_getProperty call computes
+ * a fresh length value based on the arity of the individual function object's
+ * private data.
+ *
+ * The extensions below other than length, i.e., the ones not in ECMA-262,
+ * are neither JSPROP_READONLY nor JSPROP_SHARED, because for compatibility
+ * with ECMA we must allow a delegating object to override them.
+ */
+#define LENGTH_PROP_ATTRS (JSPROP_READONLY|JSPROP_PERMANENT|JSPROP_SHARED)
+
+static JSPropertySpec function_props[] = {
+ {js_arguments_str, CALL_ARGUMENTS, JSPROP_PERMANENT, 0,0},
+ {js_arity_str, FUN_ARITY, JSPROP_PERMANENT, 0,0},
+ {js_caller_str, FUN_CALLER, JSPROP_PERMANENT, 0,0},
+ {js_length_str, ARGS_LENGTH, LENGTH_PROP_ATTRS, 0,0},
+ {js_name_str, FUN_NAME, JSPROP_PERMANENT, 0,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+fun_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsint slot;
+ JSFunction *fun;
+ JSStackFrame *fp;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ slot = JSVAL_TO_INT(id);
+
+ /*
+ * Loop because getter and setter can be delegated from another class,
+ * but loop only for ARGS_LENGTH because we must pretend that f.length
+ * is in each function instance f, per ECMA-262, instead of only in the
+ * Function.prototype object (we use JSPROP_PERMANENT with JSPROP_SHARED
+ * to make it appear so).
+ *
+ * This code couples tightly to the attributes for the function_props[]
+ * initializers above, and to js_SetProperty and js_HasOwnPropertyHelper.
+ *
+ * It's important to allow delegating objects, even though they inherit
+ * this getter (fun_getProperty), to override arguments, arity, caller,
+ * and name. If we didn't return early for slot != ARGS_LENGTH, we would
+ * clobber *vp with the native property value, instead of letting script
+ * override that value in delegating objects.
+ *
+ * Note how that clobbering is what simulates JSPROP_READONLY for all of
+ * the non-standard properties when the directly addressed object (obj)
+ * is a function object (i.e., when this loop does not iterate).
+ */
+ while (!(fun = (JSFunction *)
+ JS_GetInstancePrivate(cx, obj, &js_FunctionClass, NULL))) {
+ if (slot != ARGS_LENGTH)
+ return JS_TRUE;
+ obj = OBJ_GET_PROTO(cx, obj);
+ if (!obj)
+ return JS_TRUE;
+ }
+
+ /* Find fun's top-most activation record. */
+ for (fp = cx->fp; fp && (fp->fun != fun || (fp->flags & JSFRAME_SPECIAL));
+ fp = fp->down) {
+ continue;
+ }
+
+ switch (slot) {
+ case CALL_ARGUMENTS:
+ /* Warn if strict about f.arguments or equivalent unqualified uses. */
+ if (!JS_ReportErrorFlagsAndNumber(cx,
+ JSREPORT_WARNING | JSREPORT_STRICT,
+ js_GetErrorMessage, NULL,
+ JSMSG_DEPRECATED_USAGE,
+ js_arguments_str)) {
+ return JS_FALSE;
+ }
+ if (fp) {
+ if (!js_GetArgsValue(cx, fp, vp))
+ return JS_FALSE;
+ } else {
+ *vp = JSVAL_NULL;
+ }
+ break;
+
+ case ARGS_LENGTH:
+ case FUN_ARITY:
+ *vp = INT_TO_JSVAL((jsint)fun->nargs);
+ break;
+
+ case FUN_NAME:
+ *vp = fun->atom
+ ? ATOM_KEY(fun->atom)
+ : STRING_TO_JSVAL(cx->runtime->emptyString);
+ break;
+
+ case FUN_CALLER:
+ while (fp && (fp->flags & JSFRAME_SKIP_CALLER) && fp->down)
+ fp = fp->down;
+ if (fp && fp->down && fp->down->fun && fp->down->argv)
+ *vp = fp->down->argv[-2];
+ else
+ *vp = JSVAL_NULL;
+ if (!JSVAL_IS_PRIMITIVE(*vp) && cx->runtime->checkObjectAccess) {
+ id = ATOM_KEY(cx->runtime->atomState.callerAtom);
+ if (!cx->runtime->checkObjectAccess(cx, obj, id, JSACC_READ, vp))
+ return JS_FALSE;
+ }
+ break;
+
+ default:
+ /* XXX fun[0] and fun.arguments[0] are equivalent. */
+ if (fp && fp->fun && (uintN)slot < fp->fun->nargs)
+ *vp = fp->argv[slot];
+ break;
+ }
+
+ return JS_TRUE;
+}
+
+static JSBool
+fun_enumerate(JSContext *cx, JSObject *obj)
+{
+ jsid prototypeId;
+ JSObject *pobj;
+ JSProperty *prop;
+
+ prototypeId = ATOM_TO_JSID(cx->runtime->atomState.classPrototypeAtom);
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, prototypeId, &pobj, &prop))
+ return JS_FALSE;
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ return JS_TRUE;
+}
+
+static JSBool
+fun_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ JSFunction *fun;
+ JSString *str;
+ JSAtom *prototypeAtom;
+
+ /*
+ * No need to reflect fun.prototype in 'fun.prototype = ...' or in an
+ * unqualified reference to prototype, which the emitter looks up as a
+ * hidden atom when attempting to bind to a formal parameter or local
+ * variable slot.
+ */
+ if (flags & (JSRESOLVE_ASSIGNING | JSRESOLVE_HIDDEN))
+ return JS_TRUE;
+
+ if (!JSVAL_IS_STRING(id))
+ return JS_TRUE;
+
+ /* No valid function object should lack private data, but check anyway. */
+ fun = (JSFunction *)JS_GetInstancePrivate(cx, obj, &js_FunctionClass, NULL);
+ if (!fun || !fun->object)
+ return JS_TRUE;
+
+ /*
+ * Ok, check whether id is 'prototype' and bootstrap the function object's
+ * prototype property.
+ */
+ str = JSVAL_TO_STRING(id);
+ prototypeAtom = cx->runtime->atomState.classPrototypeAtom;
+ if (str == ATOM_TO_STRING(prototypeAtom)) {
+ JSObject *proto, *parentProto;
+ jsval pval;
+
+ proto = parentProto = NULL;
+ if (fun->object != obj && fun->object) {
+ /*
+ * Clone of a function: make its prototype property value have the
+ * same class as the clone-parent's prototype.
+ */
+ if (!OBJ_GET_PROPERTY(cx, fun->object, ATOM_TO_JSID(prototypeAtom),
+ &pval)) {
+ return JS_FALSE;
+ }
+ if (!JSVAL_IS_PRIMITIVE(pval)) {
+ /*
+ * We are about to allocate a new object, so hack the newborn
+ * root until then to protect pval in case it is figuratively
+ * up in the air, with no strong refs protecting it.
+ */
+ cx->weakRoots.newborn[GCX_OBJECT] = JSVAL_TO_GCTHING(pval);
+ parentProto = JSVAL_TO_OBJECT(pval);
+ }
+ }
+
+ /*
+ * Beware of the wacky case of a user function named Object -- trying
+ * to find a prototype for that will recur back here _ad perniciem_.
+ */
+ if (!parentProto && fun->atom == CLASS_ATOM(cx, Object))
+ return JS_TRUE;
+
+ /*
+ * If resolving "prototype" in a clone, clone the parent's prototype.
+ * Pass the constructor's (obj's) parent as the prototype parent, to
+ * avoid defaulting to parentProto.constructor.__parent__.
+ */
+ proto = js_NewObject(cx, &js_ObjectClass, parentProto,
+ OBJ_GET_PARENT(cx, obj));
+ if (!proto)
+ return JS_FALSE;
+
+ /*
+ * ECMA (15.3.5.2) says that constructor.prototype is DontDelete for
+ * user-defined functions, but DontEnum | ReadOnly | DontDelete for
+ * native "system" constructors such as Object or Function. So lazily
+ * set the former here in fun_resolve, but eagerly define the latter
+ * in JS_InitClass, with the right attributes.
+ */
+ if (!js_SetClassPrototype(cx, obj, proto,
+ JSPROP_ENUMERATE | JSPROP_PERMANENT)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return JS_FALSE;
+ }
+ *objp = obj;
+ }
+
+ return JS_TRUE;
+}
+
+static JSBool
+fun_convert(JSContext *cx, JSObject *obj, JSType type, jsval *vp)
+{
+ switch (type) {
+ case JSTYPE_FUNCTION:
+ *vp = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+ default:
+ return js_TryValueOf(cx, obj, type, vp);
+ }
+}
+
+static void
+fun_finalize(JSContext *cx, JSObject *obj)
+{
+ JSFunction *fun;
+ JSScript *script;
+
+ /* No valid function object should lack private data, but check anyway. */
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ if (!fun)
+ return;
+ if (fun->object == obj)
+ fun->object = NULL;
+
+ /* Null-check required since the parser sets interpreted very early. */
+ if (FUN_INTERPRETED(fun) && fun->u.i.script &&
+ js_IsAboutToBeFinalized(cx, fun))
+ {
+ script = fun->u.i.script;
+ fun->u.i.script = NULL;
+ js_DestroyScript(cx, script);
+ }
+}
+
+#if JS_HAS_XDR
+
+#include "jsxdrapi.h"
+
+enum {
+ JSXDR_FUNARG = 1,
+ JSXDR_FUNVAR = 2,
+ JSXDR_FUNCONST = 3
+};
+
+/* XXX store parent and proto, if defined */
+static JSBool
+fun_xdrObject(JSXDRState *xdr, JSObject **objp)
+{
+ JSContext *cx;
+ JSFunction *fun;
+ uint32 nullAtom; /* flag to indicate if fun->atom is NULL */
+ JSTempValueRooter tvr;
+ uint32 flagsword; /* originally only flags was JS_XDRUint8'd */
+ uint16 extraUnused; /* variable for no longer used field */
+ JSAtom *propAtom;
+ JSScopeProperty *sprop;
+ uint32 userid; /* NB: holds a signed int-tagged jsval */
+ uintN i, n, dupflag;
+ uint32 type;
+ JSBool ok;
+#ifdef DEBUG
+ uintN nvars = 0, nargs = 0;
+#endif
+
+ cx = xdr->cx;
+ if (xdr->mode == JSXDR_ENCODE) {
+ /*
+ * No valid function object should lack private data, but fail soft
+ * (return true, no error report) in case one does due to API pilot
+ * or internal error.
+ */
+ fun = (JSFunction *) JS_GetPrivate(cx, *objp);
+ if (!fun)
+ return JS_TRUE;
+ if (!FUN_INTERPRETED(fun)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NOT_SCRIPTED_FUNCTION,
+ JS_GetFunctionName(fun));
+ return JS_FALSE;
+ }
+ nullAtom = !fun->atom;
+ flagsword = ((uint32)fun->u.i.nregexps << 16) | fun->flags;
+ extraUnused = 0;
+ } else {
+ fun = js_NewFunction(cx, NULL, NULL, 0, 0, NULL, NULL);
+ if (!fun)
+ return JS_FALSE;
+ }
+
+ /* From here on, control flow must flow through label out. */
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, fun->object, &tvr);
+ ok = JS_TRUE;
+
+ if (!JS_XDRUint32(xdr, &nullAtom))
+ goto bad;
+ if (!nullAtom && !js_XDRStringAtom(xdr, &fun->atom))
+ goto bad;
+
+ if (!JS_XDRUint16(xdr, &fun->nargs) ||
+ !JS_XDRUint16(xdr, &extraUnused) ||
+ !JS_XDRUint16(xdr, &fun->u.i.nvars) ||
+ !JS_XDRUint32(xdr, &flagsword)) {
+ goto bad;
+ }
+
+ /* Assert that all previous writes of extraUnused were writes of 0. */
+ JS_ASSERT(extraUnused == 0);
+
+ /* do arguments and local vars */
+ if (fun->object) {
+ n = fun->nargs + fun->u.i.nvars;
+ if (xdr->mode == JSXDR_ENCODE) {
+ JSScope *scope;
+ JSScopeProperty **spvec, *auto_spvec[8];
+ void *mark;
+
+ if (n <= sizeof auto_spvec / sizeof auto_spvec[0]) {
+ spvec = auto_spvec;
+ mark = NULL;
+ } else {
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ JS_ARENA_ALLOCATE_CAST(spvec, JSScopeProperty **, &cx->tempPool,
+ n * sizeof(JSScopeProperty *));
+ if (!spvec) {
+ JS_ReportOutOfMemory(cx);
+ goto bad;
+ }
+ }
+ scope = OBJ_SCOPE(fun->object);
+ for (sprop = SCOPE_LAST_PROP(scope); sprop;
+ sprop = sprop->parent) {
+ if (sprop->getter == js_GetArgument) {
+ JS_ASSERT(nargs++ <= fun->nargs);
+ spvec[sprop->shortid] = sprop;
+ } else if (sprop->getter == js_GetLocalVariable) {
+ JS_ASSERT(nvars++ <= fun->u.i.nvars);
+ spvec[fun->nargs + sprop->shortid] = sprop;
+ }
+ }
+ for (i = 0; i < n; i++) {
+ sprop = spvec[i];
+ JS_ASSERT(sprop->flags & SPROP_HAS_SHORTID);
+ type = (i < fun->nargs)
+ ? JSXDR_FUNARG
+ : (sprop->attrs & JSPROP_READONLY)
+ ? JSXDR_FUNCONST
+ : JSXDR_FUNVAR;
+ userid = INT_TO_JSVAL(sprop->shortid);
+ propAtom = JSID_TO_ATOM(sprop->id);
+ if (!JS_XDRUint32(xdr, &type) ||
+ !JS_XDRUint32(xdr, &userid) ||
+ !js_XDRCStringAtom(xdr, &propAtom)) {
+ if (mark)
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ goto bad;
+ }
+ }
+ if (mark)
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ } else {
+ JSPropertyOp getter, setter;
+
+ for (i = n; i != 0; i--) {
+ uintN attrs = JSPROP_PERMANENT;
+
+ if (!JS_XDRUint32(xdr, &type) ||
+ !JS_XDRUint32(xdr, &userid) ||
+ !js_XDRCStringAtom(xdr, &propAtom)) {
+ goto bad;
+ }
+ JS_ASSERT(type == JSXDR_FUNARG || type == JSXDR_FUNVAR ||
+ type == JSXDR_FUNCONST);
+ if (type == JSXDR_FUNARG) {
+ getter = js_GetArgument;
+ setter = js_SetArgument;
+ JS_ASSERT(nargs++ <= fun->nargs);
+ } else if (type == JSXDR_FUNVAR || type == JSXDR_FUNCONST) {
+ getter = js_GetLocalVariable;
+ setter = js_SetLocalVariable;
+ if (type == JSXDR_FUNCONST)
+ attrs |= JSPROP_READONLY;
+ JS_ASSERT(nvars++ <= fun->u.i.nvars);
+ } else {
+ getter = NULL;
+ setter = NULL;
+ }
+
+ /* Flag duplicate argument if atom is bound in fun->object. */
+ dupflag = SCOPE_GET_PROPERTY(OBJ_SCOPE(fun->object),
+ ATOM_TO_JSID(propAtom))
+ ? SPROP_IS_DUPLICATE
+ : 0;
+
+ if (!js_AddHiddenProperty(cx, fun->object,
+ ATOM_TO_JSID(propAtom),
+ getter, setter, SPROP_INVALID_SLOT,
+ attrs | JSPROP_SHARED,
+ dupflag | SPROP_HAS_SHORTID,
+ JSVAL_TO_INT(userid))) {
+ goto bad;
+ }
+ }
+ }
+ }
+
+ if (!js_XDRScript(xdr, &fun->u.i.script, NULL))
+ goto bad;
+
+ if (xdr->mode == JSXDR_DECODE) {
+ fun->flags = (uint16) flagsword | JSFUN_INTERPRETED;
+ fun->u.i.nregexps = (uint16) (flagsword >> 16);
+
+ *objp = fun->object;
+ js_CallNewScriptHook(cx, fun->u.i.script, fun);
+ }
+
+out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+
+bad:
+ ok = JS_FALSE;
+ goto out;
+}
+
+#else /* !JS_HAS_XDR */
+
+#define fun_xdrObject NULL
+
+#endif /* !JS_HAS_XDR */
+
+/*
+ * [[HasInstance]] internal method for Function objects: fetch the .prototype
+ * property of its 'this' parameter, and walks the prototype chain of v (only
+ * if v is an object) returning true if .prototype is found.
+ */
+static JSBool
+fun_hasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ jsval pval;
+ JSString *str;
+
+ if (!OBJ_GET_PROPERTY(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .classPrototypeAtom),
+ &pval)) {
+ return JS_FALSE;
+ }
+
+ if (JSVAL_IS_PRIMITIVE(pval)) {
+ /*
+ * Throw a runtime error if instanceof is called on a function that
+ * has a non-object as its .prototype value.
+ */
+ str = js_DecompileValueGenerator(cx, -1, OBJECT_TO_JSVAL(obj), NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_PROTOTYPE, JS_GetStringBytes(str));
+ }
+ return JS_FALSE;
+ }
+
+ return js_IsDelegate(cx, JSVAL_TO_OBJECT(pval), v, bp);
+}
+
+static uint32
+fun_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSFunction *fun;
+
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ if (fun) {
+ GC_MARK(cx, fun, "private");
+ if (fun->atom)
+ GC_MARK_ATOM(cx, fun->atom);
+ if (FUN_INTERPRETED(fun) && fun->u.i.script)
+ js_MarkScript(cx, fun->u.i.script);
+ }
+ return 0;
+}
+
+static uint32
+fun_reserveSlots(JSContext *cx, JSObject *obj)
+{
+ JSFunction *fun;
+
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ return (fun && FUN_INTERPRETED(fun)) ? fun->u.i.nregexps : 0;
+}
+
+/*
+ * Reserve two slots in all function objects for XPConnect. Note that this
+ * does not bloat every instance, only those on which reserved slots are set,
+ * and those on which ad-hoc properties are defined.
+ */
+JS_FRIEND_DATA(JSClass) js_FunctionClass = {
+ js_Function_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE | JSCLASS_HAS_RESERVED_SLOTS(2) |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Function),
+ JS_PropertyStub, JS_PropertyStub,
+ fun_getProperty, JS_PropertyStub,
+ fun_enumerate, (JSResolveOp)fun_resolve,
+ fun_convert, fun_finalize,
+ NULL, NULL,
+ NULL, NULL,
+ fun_xdrObject, fun_hasInstance,
+ fun_mark, fun_reserveSlots
+};
+
+JSBool
+js_fun_toString(JSContext *cx, JSObject *obj, uint32 indent,
+ uintN argc, jsval *argv, jsval *rval)
+{
+ jsval fval;
+ JSFunction *fun;
+ JSString *str;
+
+ if (!argv) {
+ JS_ASSERT(JS_ObjectIsFunction(cx, obj));
+ } else {
+ fval = argv[-1];
+ if (!VALUE_IS_FUNCTION(cx, fval)) {
+ /*
+ * If we don't have a function to start off with, try converting
+ * the object to a function. If that doesn't work, complain.
+ */
+ if (JSVAL_IS_OBJECT(fval)) {
+ obj = JSVAL_TO_OBJECT(fval);
+ if (!OBJ_GET_CLASS(cx, obj)->convert(cx, obj, JSTYPE_FUNCTION,
+ &fval)) {
+ return JS_FALSE;
+ }
+ argv[-1] = fval;
+ }
+ if (!VALUE_IS_FUNCTION(cx, fval)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_INCOMPATIBLE_PROTO,
+ js_Function_str, js_toString_str,
+ JS_GetTypeName(cx,
+ JS_TypeOfValue(cx, fval)));
+ return JS_FALSE;
+ }
+ }
+
+ obj = JSVAL_TO_OBJECT(fval);
+ }
+
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ if (!fun)
+ return JS_TRUE;
+ if (argc && !js_ValueToECMAUint32(cx, argv[0], &indent))
+ return JS_FALSE;
+ str = JS_DecompileFunction(cx, fun, (uintN)indent);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+fun_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return js_fun_toString(cx, obj, 0, argc, argv, rval);
+}
+
+#if JS_HAS_TOSOURCE
+static JSBool
+fun_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return js_fun_toString(cx, obj, JS_DONT_PRETTY_PRINT, argc, argv, rval);
+}
+#endif
+
+static const char call_str[] = "call";
+
+static JSBool
+fun_call(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval fval, *sp, *oldsp;
+ JSString *str;
+ void *mark;
+ uintN i;
+ JSStackFrame *fp;
+ JSBool ok;
+
+ if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_FUNCTION, &argv[-1]))
+ return JS_FALSE;
+ fval = argv[-1];
+
+ if (!VALUE_IS_FUNCTION(cx, fval)) {
+ str = JS_ValueToString(cx, fval);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_INCOMPATIBLE_PROTO,
+ js_Function_str, call_str,
+ JS_GetStringBytes(str));
+ }
+ return JS_FALSE;
+ }
+
+ if (argc == 0) {
+ /* Call fun with its global object as the 'this' param if no args. */
+ obj = NULL;
+ } else {
+ /* Otherwise convert the first arg to 'this' and skip over it. */
+ if (!js_ValueToObject(cx, argv[0], &obj))
+ return JS_FALSE;
+ argc--;
+ argv++;
+ }
+
+ /* Allocate stack space for fval, obj, and the args. */
+ sp = js_AllocStack(cx, 2 + argc, &mark);
+ if (!sp)
+ return JS_FALSE;
+
+ /* Push fval, obj, and the args. */
+ *sp++ = fval;
+ *sp++ = OBJECT_TO_JSVAL(obj);
+ for (i = 0; i < argc; i++)
+ *sp++ = argv[i];
+
+ /* Lift current frame to include the args and do the call. */
+ fp = cx->fp;
+ oldsp = fp->sp;
+ fp->sp = sp;
+ ok = js_Invoke(cx, argc, JSINVOKE_INTERNAL | JSINVOKE_SKIP_CALLER);
+
+ /* Store rval and pop stack back to our frame's sp. */
+ *rval = fp->sp[-1];
+ fp->sp = oldsp;
+ js_FreeStack(cx, mark);
+ return ok;
+}
+
+static JSBool
+fun_apply(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval fval, *sp, *oldsp;
+ JSString *str;
+ JSObject *aobj;
+ jsuint length;
+ JSBool arraylike, ok;
+ void *mark;
+ uintN i;
+ JSStackFrame *fp;
+
+ if (argc == 0) {
+ /* Will get globalObject as 'this' and no other arguments. */
+ return fun_call(cx, obj, argc, argv, rval);
+ }
+
+ if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_FUNCTION, &argv[-1]))
+ return JS_FALSE;
+ fval = argv[-1];
+
+ if (!VALUE_IS_FUNCTION(cx, fval)) {
+ str = JS_ValueToString(cx, fval);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_INCOMPATIBLE_PROTO,
+ js_Function_str, "apply",
+ JS_GetStringBytes(str));
+ }
+ return JS_FALSE;
+ }
+
+ /* Quell GCC overwarnings. */
+ aobj = NULL;
+ length = 0;
+
+ if (argc >= 2) {
+ /* If the 2nd arg is null or void, call the function with 0 args. */
+ if (JSVAL_IS_NULL(argv[1]) || JSVAL_IS_VOID(argv[1])) {
+ argc = 0;
+ } else {
+ /* The second arg must be an array (or arguments object). */
+ arraylike = JS_FALSE;
+ if (!JSVAL_IS_PRIMITIVE(argv[1])) {
+ aobj = JSVAL_TO_OBJECT(argv[1]);
+ if (!js_IsArrayLike(cx, aobj, &arraylike, &length))
+ return JS_FALSE;
+ }
+ if (!arraylike) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_APPLY_ARGS, "apply");
+ return JS_FALSE;
+ }
+ }
+ }
+
+ /* Convert the first arg to 'this' and skip over it. */
+ if (!js_ValueToObject(cx, argv[0], &obj))
+ return JS_FALSE;
+
+ /* Allocate stack space for fval, obj, and the args. */
+ argc = (uintN)JS_MIN(length, ARRAY_INIT_LIMIT - 1);
+ sp = js_AllocStack(cx, 2 + argc, &mark);
+ if (!sp)
+ return JS_FALSE;
+
+ /* Push fval, obj, and aobj's elements as args. */
+ *sp++ = fval;
+ *sp++ = OBJECT_TO_JSVAL(obj);
+ for (i = 0; i < argc; i++) {
+ ok = JS_GetElement(cx, aobj, (jsint)i, sp);
+ if (!ok)
+ goto out;
+ sp++;
+ }
+
+ /* Lift current frame to include the args and do the call. */
+ fp = cx->fp;
+ oldsp = fp->sp;
+ fp->sp = sp;
+ ok = js_Invoke(cx, argc, JSINVOKE_INTERNAL | JSINVOKE_SKIP_CALLER);
+
+ /* Store rval and pop stack back to our frame's sp. */
+ *rval = fp->sp[-1];
+ fp->sp = oldsp;
+out:
+ js_FreeStack(cx, mark);
+ return ok;
+}
+
+#ifdef NARCISSUS
+static JSBool
+fun_applyConstructor(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSObject *aobj;
+ uintN length, i;
+ void *mark;
+ jsval *sp, *newsp, *oldsp;
+ JSStackFrame *fp;
+ JSBool ok;
+
+ if (JSVAL_IS_PRIMITIVE(argv[0]) ||
+ (aobj = JSVAL_TO_OBJECT(argv[0]),
+ OBJ_GET_CLASS(cx, aobj) != &js_ArrayClass &&
+ OBJ_GET_CLASS(cx, aobj) != &js_ArgumentsClass)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_APPLY_ARGS, "__applyConstruct__");
+ return JS_FALSE;
+ }
+
+ if (!js_GetLengthProperty(cx, aobj, &length))
+ return JS_FALSE;
+
+ if (length >= ARRAY_INIT_LIMIT)
+ length = ARRAY_INIT_LIMIT - 1;
+ newsp = sp = js_AllocStack(cx, 2 + length, &mark);
+ if (!sp)
+ return JS_FALSE;
+
+ fp = cx->fp;
+ oldsp = fp->sp;
+ *sp++ = OBJECT_TO_JSVAL(obj);
+ *sp++ = JSVAL_NULL; /* This is filled automagically. */
+ for (i = 0; i < length; i++) {
+ ok = JS_GetElement(cx, aobj, (jsint)i, sp);
+ if (!ok)
+ goto out;
+ sp++;
+ }
+
+ oldsp = fp->sp;
+ fp->sp = sp;
+ ok = js_InvokeConstructor(cx, newsp, length);
+
+ *rval = fp->sp[-1];
+ fp->sp = oldsp;
+out:
+ js_FreeStack(cx, mark);
+ return ok;
+}
+#endif
+
+static JSFunctionSpec function_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, fun_toSource, 0,0,0},
+#endif
+ {js_toString_str, fun_toString, 1,0,0},
+ {"apply", fun_apply, 2,0,0},
+ {call_str, fun_call, 1,0,0},
+#ifdef NARCISSUS
+ {"__applyConstructor__", fun_applyConstructor, 1,0,0},
+#endif
+ {0,0,0,0,0}
+};
+
+JSBool
+js_IsIdentifier(JSString *str)
+{
+ size_t length;
+ jschar c, *chars, *end, *s;
+
+ length = JSSTRING_LENGTH(str);
+ if (length == 0)
+ return JS_FALSE;
+ chars = JSSTRING_CHARS(str);
+ c = *chars;
+ if (!JS_ISIDSTART(c))
+ return JS_FALSE;
+ end = chars + length;
+ for (s = chars + 1; s != end; ++s) {
+ c = *s;
+ if (!JS_ISIDENT(c))
+ return JS_FALSE;
+ }
+ return !js_IsKeyword(chars, length);
+}
+
+static JSBool
+Function(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSStackFrame *fp, *caller;
+ JSFunction *fun;
+ JSObject *parent;
+ uintN i, n, lineno, dupflag;
+ JSAtom *atom;
+ const char *filename;
+ JSObject *obj2;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ JSString *str, *arg;
+ void *mark;
+ JSTokenStream *ts;
+ JSPrincipals *principals;
+ jschar *collected_args, *cp;
+ size_t arg_length, args_length, old_args_length;
+ JSTokenType tt;
+ JSBool ok;
+
+ fp = cx->fp;
+ if (!(fp->flags & JSFRAME_CONSTRUCTING)) {
+ obj = js_NewObject(cx, &js_FunctionClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ if (fun)
+ return JS_TRUE;
+
+ /*
+ * NB: (new Function) is not lexically closed by its caller, it's just an
+ * anonymous function in the top-level scope that its constructor inhabits.
+ * Thus 'var x = 42; f = new Function("return x"); print(f())' prints 42,
+ * and so would a call to f from another top-level's script or function.
+ *
+ * In older versions, before call objects, a new Function was adopted by
+ * its running context's globalObject, which might be different from the
+ * top-level reachable from scopeChain (in HTML frames, e.g.).
+ */
+ parent = OBJ_GET_PARENT(cx, JSVAL_TO_OBJECT(argv[-2]));
+
+ fun = js_NewFunction(cx, obj, NULL, 0, JSFUN_LAMBDA, parent,
+ cx->runtime->atomState.anonymousAtom);
+
+ if (!fun)
+ return JS_FALSE;
+
+ /*
+ * Function is static and not called directly by other functions in this
+ * file, therefore it is callable only as a native function by js_Invoke.
+ * Find the scripted caller, possibly skipping other native frames such as
+ * are built for Function.prototype.call or .apply activations that invoke
+ * Function indirectly from a script.
+ */
+ JS_ASSERT(!fp->script && fp->fun && fp->fun->u.n.native == Function);
+ caller = JS_GetScriptedCaller(cx, fp);
+ if (caller) {
+ filename = caller->script->filename;
+ lineno = js_PCToLineNumber(cx, caller->script, caller->pc);
+ principals = JS_EvalFramePrincipals(cx, fp, caller);
+ } else {
+ filename = NULL;
+ lineno = 0;
+ principals = NULL;
+ }
+
+ /* Belt-and-braces: check that the caller has access to parent. */
+ if (!js_CheckPrincipalsAccess(cx, parent, principals,
+ CLASS_ATOM(cx, Function))) {
+ return JS_FALSE;
+ }
+
+ n = argc ? argc - 1 : 0;
+ if (n > 0) {
+ /*
+ * Collect the function-argument arguments into one string, separated
+ * by commas, then make a tokenstream from that string, and scan it to
+ * get the arguments. We need to throw the full scanner at the
+ * problem, because the argument string can legitimately contain
+ * comments and linefeeds. XXX It might be better to concatenate
+ * everything up into a function definition and pass it to the
+ * compiler, but doing it this way is less of a delta from the old
+ * code. See ECMA 15.3.2.1.
+ */
+ args_length = 0;
+ for (i = 0; i < n; i++) {
+ /* Collect the lengths for all the function-argument arguments. */
+ arg = js_ValueToString(cx, argv[i]);
+ if (!arg)
+ return JS_FALSE;
+ argv[i] = STRING_TO_JSVAL(arg);
+
+ /*
+ * Check for overflow. The < test works because the maximum
+ * JSString length fits in 2 fewer bits than size_t has.
+ */
+ old_args_length = args_length;
+ args_length = old_args_length + JSSTRING_LENGTH(arg);
+ if (args_length < old_args_length) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ }
+
+ /* Add 1 for each joining comma and check for overflow (two ways). */
+ old_args_length = args_length;
+ args_length = old_args_length + n - 1;
+ if (args_length < old_args_length ||
+ args_length >= ~(size_t)0 / sizeof(jschar)) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ /*
+ * Allocate a string to hold the concatenated arguments, including room
+ * for a terminating 0. Mark cx->tempPool for later release, to free
+ * collected_args and its tokenstream in one swoop.
+ */
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ JS_ARENA_ALLOCATE_CAST(cp, jschar *, &cx->tempPool,
+ (args_length+1) * sizeof(jschar));
+ if (!cp) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ collected_args = cp;
+
+ /*
+ * Concatenate the arguments into the new string, separated by commas.
+ */
+ for (i = 0; i < n; i++) {
+ arg = JSVAL_TO_STRING(argv[i]);
+ arg_length = JSSTRING_LENGTH(arg);
+ (void) js_strncpy(cp, JSSTRING_CHARS(arg), arg_length);
+ cp += arg_length;
+
+ /* Add separating comma or terminating 0. */
+ *cp++ = (i + 1 < n) ? ',' : 0;
+ }
+
+ /*
+ * Make a tokenstream (allocated from cx->tempPool) that reads from
+ * the given string.
+ */
+ ts = js_NewTokenStream(cx, collected_args, args_length, filename,
+ lineno, principals);
+ if (!ts) {
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ return JS_FALSE;
+ }
+
+ /* The argument string may be empty or contain no tokens. */
+ tt = js_GetToken(cx, ts);
+ if (tt != TOK_EOF) {
+ for (;;) {
+ /*
+ * Check that it's a name. This also implicitly guards against
+ * TOK_ERROR, which was already reported.
+ */
+ if (tt != TOK_NAME)
+ goto bad_formal;
+
+ /*
+ * Get the atom corresponding to the name from the tokenstream;
+ * we're assured at this point that it's a valid identifier.
+ */
+ atom = CURRENT_TOKEN(ts).t_atom;
+ if (!js_LookupHiddenProperty(cx, obj, ATOM_TO_JSID(atom),
+ &obj2, &prop)) {
+ goto bad_formal;
+ }
+ sprop = (JSScopeProperty *) prop;
+ dupflag = 0;
+ if (sprop) {
+ ok = JS_TRUE;
+ if (obj2 == obj) {
+ const char *name = js_AtomToPrintableString(cx, atom);
+
+ /*
+ * A duplicate parameter name. We force a duplicate
+ * node on the SCOPE_LAST_PROP(scope) list with the
+ * same id, distinguished by the SPROP_IS_DUPLICATE
+ * flag, and not mapped by an entry in scope.
+ */
+ JS_ASSERT(sprop->getter == js_GetArgument);
+ ok = name &&
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_DUPLICATE_FORMAL,
+ name);
+
+ dupflag = SPROP_IS_DUPLICATE;
+ }
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ if (!ok)
+ goto bad_formal;
+ sprop = NULL;
+ }
+ if (!js_AddHiddenProperty(cx, fun->object, ATOM_TO_JSID(atom),
+ js_GetArgument, js_SetArgument,
+ SPROP_INVALID_SLOT,
+ JSPROP_PERMANENT | JSPROP_SHARED,
+ dupflag | SPROP_HAS_SHORTID,
+ fun->nargs)) {
+ goto bad_formal;
+ }
+ if (fun->nargs == JS_BITMASK(16)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_FUN_ARGS);
+ goto bad;
+ }
+ fun->nargs++;
+
+ /*
+ * Get the next token. Stop on end of stream. Otherwise
+ * insist on a comma, get another name, and iterate.
+ */
+ tt = js_GetToken(cx, ts);
+ if (tt == TOK_EOF)
+ break;
+ if (tt != TOK_COMMA)
+ goto bad_formal;
+ tt = js_GetToken(cx, ts);
+ }
+ }
+
+ /* Clean up. */
+ ok = js_CloseTokenStream(cx, ts);
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ if (!ok)
+ return JS_FALSE;
+ }
+
+ if (argc) {
+ str = js_ValueToString(cx, argv[argc-1]);
+ } else {
+ /* Can't use cx->runtime->emptyString because we're called too early. */
+ str = js_NewStringCopyZ(cx, js_empty_ucstr, 0);
+ }
+ if (!str)
+ return JS_FALSE;
+ if (argv) {
+ /* Use the last arg (or this if argc == 0) as a local GC root. */
+ argv[(intN)(argc-1)] = STRING_TO_JSVAL(str);
+ }
+
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ ts = js_NewTokenStream(cx, JSSTRING_CHARS(str), JSSTRING_LENGTH(str),
+ filename, lineno, principals);
+ if (!ts) {
+ ok = JS_FALSE;
+ } else {
+ ok = js_CompileFunctionBody(cx, ts, fun) &&
+ js_CloseTokenStream(cx, ts);
+ }
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ return ok;
+
+bad_formal:
+ /*
+ * Report "malformed formal parameter" iff no illegal char or similar
+ * scanner error was already reported.
+ */
+ if (!(ts->flags & TSF_ERROR))
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_FORMAL);
+
+bad:
+ /*
+ * Clean up the arguments string and tokenstream if we failed to parse
+ * the arguments.
+ */
+ (void)js_CloseTokenStream(cx, ts);
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ return JS_FALSE;
+}
+
+JSObject *
+js_InitFunctionClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+ JSAtom *atom;
+ JSFunction *fun;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_FunctionClass, Function, 1,
+ function_props, function_methods, NULL, NULL);
+ if (!proto)
+ return NULL;
+ atom = js_Atomize(cx, js_FunctionClass.name, strlen(js_FunctionClass.name),
+ 0);
+ if (!atom)
+ goto bad;
+ fun = js_NewFunction(cx, proto, NULL, 0, 0, obj, NULL);
+ if (!fun)
+ goto bad;
+ fun->u.i.script = js_NewScript(cx, 1, 0, 0);
+ if (!fun->u.i.script)
+ goto bad;
+ fun->u.i.script->code[0] = JSOP_STOP;
+ fun->flags |= JSFUN_INTERPRETED;
+ return proto;
+
+bad:
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+}
+
+JSObject *
+js_InitCallClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_CallClass, NULL, 0,
+ call_props, NULL, NULL, NULL);
+ if (!proto)
+ return NULL;
+
+ /*
+ * Null Call.prototype's proto slot so that Object.prototype.* does not
+ * pollute the scope of heavyweight functions.
+ */
+ OBJ_SET_PROTO(cx, proto, NULL);
+ return proto;
+}
+
+JSFunction *
+js_NewFunction(JSContext *cx, JSObject *funobj, JSNative native, uintN nargs,
+ uintN flags, JSObject *parent, JSAtom *atom)
+{
+ JSFunction *fun;
+ JSTempValueRooter tvr;
+
+ /* If funobj is null, allocate an object for it. */
+ if (funobj) {
+ OBJ_SET_PARENT(cx, funobj, parent);
+ } else {
+ funobj = js_NewObject(cx, &js_FunctionClass, NULL, parent);
+ if (!funobj)
+ return NULL;
+ }
+
+ /* Protect fun from any potential GC callback. */
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, OBJECT_TO_JSVAL(funobj), &tvr);
+
+ /*
+ * Allocate fun after allocating funobj so slot allocation in js_NewObject
+ * does not wipe out fun from newborn[GCX_PRIVATE].
+ */
+ fun = (JSFunction *) js_NewGCThing(cx, GCX_PRIVATE, sizeof(JSFunction));
+ if (!fun)
+ goto out;
+
+ /* Initialize all function members. */
+ fun->object = NULL;
+ fun->nargs = nargs;
+ fun->flags = flags & JSFUN_FLAGS_MASK;
+ fun->u.n.native = native;
+ fun->u.n.extra = 0;
+ fun->u.n.spare = 0;
+ fun->atom = atom;
+ fun->clasp = NULL;
+
+ /* Link fun to funobj and vice versa. */
+ if (!js_LinkFunctionObject(cx, fun, funobj)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ fun = NULL;
+ }
+
+out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return fun;
+}
+
+JSObject *
+js_CloneFunctionObject(JSContext *cx, JSObject *funobj, JSObject *parent)
+{
+ JSObject *newfunobj;
+ JSFunction *fun;
+
+ JS_ASSERT(OBJ_GET_CLASS(cx, funobj) == &js_FunctionClass);
+ newfunobj = js_NewObject(cx, &js_FunctionClass, funobj, parent);
+ if (!newfunobj)
+ return NULL;
+ fun = (JSFunction *) JS_GetPrivate(cx, funobj);
+ if (!js_LinkFunctionObject(cx, fun, newfunobj)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ return newfunobj;
+}
+
+JSBool
+js_LinkFunctionObject(JSContext *cx, JSFunction *fun, JSObject *funobj)
+{
+ if (!fun->object)
+ fun->object = funobj;
+ return JS_SetPrivate(cx, funobj, fun);
+}
+
+JSFunction *
+js_DefineFunction(JSContext *cx, JSObject *obj, JSAtom *atom, JSNative native,
+ uintN nargs, uintN attrs)
+{
+ JSFunction *fun;
+
+ fun = js_NewFunction(cx, NULL, native, nargs, attrs, obj, atom);
+ if (!fun)
+ return NULL;
+ if (!OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom),
+ OBJECT_TO_JSVAL(fun->object),
+ NULL, NULL,
+ attrs & ~JSFUN_FLAGS_MASK, NULL)) {
+ return NULL;
+ }
+ return fun;
+}
+
+#if (JSV2F_CONSTRUCT & JSV2F_SEARCH_STACK)
+# error "JSINVOKE_CONSTRUCT and JSV2F_SEARCH_STACK are not disjoint!"
+#endif
+
+JSFunction *
+js_ValueToFunction(JSContext *cx, jsval *vp, uintN flags)
+{
+ jsval v;
+ JSObject *obj;
+
+ v = *vp;
+ obj = NULL;
+ if (JSVAL_IS_OBJECT(v)) {
+ obj = JSVAL_TO_OBJECT(v);
+ if (obj && OBJ_GET_CLASS(cx, obj) != &js_FunctionClass) {
+ if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_FUNCTION, &v))
+ return NULL;
+ obj = VALUE_IS_FUNCTION(cx, v) ? JSVAL_TO_OBJECT(v) : NULL;
+ }
+ }
+ if (!obj) {
+ js_ReportIsNotFunction(cx, vp, flags);
+ return NULL;
+ }
+ return (JSFunction *) JS_GetPrivate(cx, obj);
+}
+
+JSObject *
+js_ValueToFunctionObject(JSContext *cx, jsval *vp, uintN flags)
+{
+ JSFunction *fun;
+ JSObject *funobj;
+ JSStackFrame *caller;
+ JSPrincipals *principals;
+
+ if (VALUE_IS_FUNCTION(cx, *vp))
+ return JSVAL_TO_OBJECT(*vp);
+
+ fun = js_ValueToFunction(cx, vp, flags);
+ if (!fun)
+ return NULL;
+ funobj = fun->object;
+ *vp = OBJECT_TO_JSVAL(funobj);
+
+ caller = JS_GetScriptedCaller(cx, cx->fp);
+ if (caller) {
+ principals = caller->script->principals;
+ } else {
+ /* No scripted caller, don't allow access. */
+ principals = NULL;
+ }
+
+ if (!js_CheckPrincipalsAccess(cx, funobj, principals,
+ fun->atom
+ ? fun->atom
+ : cx->runtime->atomState.anonymousAtom)) {
+ return NULL;
+ }
+ return funobj;
+}
+
+JSObject *
+js_ValueToCallableObject(JSContext *cx, jsval *vp, uintN flags)
+{
+ JSObject *callable;
+
+ callable = JSVAL_IS_PRIMITIVE(*vp) ? NULL : JSVAL_TO_OBJECT(*vp);
+ if (callable &&
+ ((callable->map->ops == &js_ObjectOps)
+ ? OBJ_GET_CLASS(cx, callable)->call
+ : callable->map->ops->call)) {
+ *vp = OBJECT_TO_JSVAL(callable);
+ } else {
+ callable = js_ValueToFunctionObject(cx, vp, flags);
+ }
+ return callable;
+}
+
+void
+js_ReportIsNotFunction(JSContext *cx, jsval *vp, uintN flags)
+{
+ JSStackFrame *fp;
+ JSString *str;
+ JSTempValueRooter tvr;
+ const char *bytes, *source;
+
+ for (fp = cx->fp; fp && !fp->spbase; fp = fp->down)
+ continue;
+ str = js_DecompileValueGenerator(cx,
+ (fp && fp->spbase <= vp && vp < fp->sp)
+ ? vp - fp->sp
+ : (flags & JSV2F_SEARCH_STACK)
+ ? JSDVG_SEARCH_STACK
+ : JSDVG_IGNORE_STACK,
+ *vp,
+ NULL);
+ if (str) {
+ JS_PUSH_TEMP_ROOT_STRING(cx, str, &tvr);
+ bytes = JS_GetStringBytes(str);
+ if (flags & JSV2F_ITERATOR) {
+ source = js_ValueToPrintableSource(cx, *vp);
+ if (source) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_ITERATOR,
+ bytes, js_iterator_str, source);
+ }
+ } else {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ (uintN)((flags & JSV2F_CONSTRUCT)
+ ? JSMSG_NOT_CONSTRUCTOR
+ : JSMSG_NOT_FUNCTION),
+ bytes);
+ }
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ }
+}
diff --git a/src/third_party/js-1.7/jsfun.h b/src/third_party/js-1.7/jsfun.h
new file mode 100644
index 00000000000..8d5c1850a34
--- /dev/null
+++ b/src/third_party/js-1.7/jsfun.h
@@ -0,0 +1,170 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsfun_h___
+#define jsfun_h___
+/*
+ * JS function definitions.
+ */
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+JS_BEGIN_EXTERN_C
+
+struct JSFunction {
+ JSObject *object; /* back-pointer to GC'ed object header */
+ uint16 nargs; /* minimum number of actual arguments */
+ uint16 flags; /* bound method and other flags, see jsapi.h */
+ union {
+ struct {
+ uint16 extra; /* number of arg slots for local GC roots */
+ uint16 spare; /* reserved for future use */
+ JSNative native; /* native method pointer or null */
+ } n;
+ struct {
+ uint16 nvars; /* number of local variables */
+ uint16 nregexps; /* number of regular expressions literals */
+ JSScript *script; /* interpreted bytecode descriptor or null */
+ } i;
+ } u;
+ JSAtom *atom; /* name for diagnostics and decompiling */
+ JSClass *clasp; /* if non-null, constructor for this class */
+};
+
+#define JSFUN_INTERPRETED 0x8000 /* use u.i if set, u.n if unset */
+
+#define FUN_INTERPRETED(fun) ((fun)->flags & JSFUN_INTERPRETED)
+#define FUN_NATIVE(fun) (FUN_INTERPRETED(fun) ? NULL : (fun)->u.n.native)
+#define FUN_SCRIPT(fun) (FUN_INTERPRETED(fun) ? (fun)->u.i.script : NULL)
+
+extern JSClass js_ArgumentsClass;
+extern JSClass js_CallClass;
+
+/* JS_FRIEND_DATA so that VALUE_IS_FUNCTION is callable from the shell. */
+extern JS_FRIEND_DATA(JSClass) js_FunctionClass;
+
+/*
+ * NB: jsapi.h and jsobj.h must be included before any call to this macro.
+ */
+#define VALUE_IS_FUNCTION(cx, v) \
+ (!JSVAL_IS_PRIMITIVE(v) && \
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(v)) == &js_FunctionClass)
+
+extern JSBool
+js_fun_toString(JSContext *cx, JSObject *obj, uint32 indent,
+ uintN argc, jsval *argv, jsval *rval);
+
+extern JSBool
+js_IsIdentifier(JSString *str);
+
+extern JSObject *
+js_InitFunctionClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_InitArgumentsClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_InitCallClass(JSContext *cx, JSObject *obj);
+
+extern JSFunction *
+js_NewFunction(JSContext *cx, JSObject *funobj, JSNative native, uintN nargs,
+ uintN flags, JSObject *parent, JSAtom *atom);
+
+extern JSObject *
+js_CloneFunctionObject(JSContext *cx, JSObject *funobj, JSObject *parent);
+
+extern JSBool
+js_LinkFunctionObject(JSContext *cx, JSFunction *fun, JSObject *object);
+
+extern JSFunction *
+js_DefineFunction(JSContext *cx, JSObject *obj, JSAtom *atom, JSNative native,
+ uintN nargs, uintN flags);
+
+/*
+ * Flags for js_ValueToFunction and js_ReportIsNotFunction. We depend on the
+ * fact that JSINVOKE_CONSTRUCT (aka JSFRAME_CONSTRUCTING) is 1, and test that
+ * with #if/#error in jsfun.c.
+ */
+#define JSV2F_CONSTRUCT JSINVOKE_CONSTRUCT
+#define JSV2F_ITERATOR JSINVOKE_ITERATOR
+#define JSV2F_SEARCH_STACK 0x10000
+
+extern JSFunction *
+js_ValueToFunction(JSContext *cx, jsval *vp, uintN flags);
+
+extern JSObject *
+js_ValueToFunctionObject(JSContext *cx, jsval *vp, uintN flags);
+
+extern JSObject *
+js_ValueToCallableObject(JSContext *cx, jsval *vp, uintN flags);
+
+extern void
+js_ReportIsNotFunction(JSContext *cx, jsval *vp, uintN flags);
+
+extern JSObject *
+js_GetCallObject(JSContext *cx, JSStackFrame *fp, JSObject *parent);
+
+extern JSBool
+js_PutCallObject(JSContext *cx, JSStackFrame *fp);
+
+extern JSBool
+js_GetCallVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JSBool
+js_SetCallVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JSBool
+js_GetArgsValue(JSContext *cx, JSStackFrame *fp, jsval *vp);
+
+extern JSBool
+js_GetArgsProperty(JSContext *cx, JSStackFrame *fp, jsid id,
+ JSObject **objp, jsval *vp);
+
+extern JSObject *
+js_GetArgsObject(JSContext *cx, JSStackFrame *fp);
+
+extern JSBool
+js_PutArgsObject(JSContext *cx, JSStackFrame *fp);
+
+extern JSBool
+js_XDRFunction(JSXDRState *xdr, JSObject **objp);
+
+JS_END_EXTERN_C
+
+#endif /* jsfun_h___ */
diff --git a/src/third_party/js-1.7/jsgc.c b/src/third_party/js-1.7/jsgc.c
new file mode 100644
index 00000000000..7fae096e0e8
--- /dev/null
+++ b/src/third_party/js-1.7/jsgc.c
@@ -0,0 +1,3201 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS Mark-and-Sweep Garbage Collector.
+ *
+ * This GC allocates fixed-sized things with sizes up to GC_NBYTES_MAX (see
+ * jsgc.h). It allocates from a special GC arena pool with each arena allocated
+ * using malloc. It uses an ideally parallel array of flag bytes to hold the
+ * mark bit, finalizer type index, etc.
+ *
+ * XXX swizzle page to freelist for better locality of reference
+ */
+#include "jsstddef.h"
+#include <stdlib.h> /* for free */
+#include <string.h> /* for memset used when DEBUG */
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jshash.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jsbit.h"
+#include "jsclist.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsexn.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jsiter.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+
+#if JS_HAS_XML_SUPPORT
+#include "jsxml.h"
+#endif
+
+/*
+ * GC arena sizing depends on amortizing arena overhead using a large number
+ * of things per arena, and on the thing/flags ratio of 8:1 on most platforms.
+ *
+ * On 64-bit platforms, we would have half as many things per arena because
+ * pointers are twice as big, so we double the bytes for things per arena.
+ * This preserves the 1024 byte flags sub-arena size, which relates to the
+ * GC_PAGE_SIZE (see below for why).
+ */
+#if JS_BYTES_PER_WORD == 8
+# define GC_THINGS_SHIFT 14 /* 16KB for things on Alpha, etc. */
+#else
+# define GC_THINGS_SHIFT 13 /* 8KB for things on most platforms */
+#endif
+#define GC_THINGS_SIZE JS_BIT(GC_THINGS_SHIFT)
+#define GC_FLAGS_SIZE (GC_THINGS_SIZE / sizeof(JSGCThing))
+
+/*
+ * A GC arena contains one flag byte for each thing in its heap, and supports
+ * O(1) lookup of a flag given its thing's address.
+ *
+ * To implement this, we take advantage of the thing/flags numerology: given
+ * the 8K bytes worth of GC-things, there are 1K flag bytes. Within each 9K
+ * allocation for things+flags there are always 8 consecutive 1K-pages each
+ * aligned on 1K boundary. We use these pages to allocate things and the
+ * remaining 1K of space before and after the aligned pages to store flags.
+ * If we are really lucky and things+flags starts on a 1K boundary, then
+ * flags would consist of a single 1K chunk that comes after 8K of things.
+ * Otherwise there are 2 chunks of flags, one before and one after things.
+ *
+ * To be able to find the flag byte for a particular thing, we put a
+ * JSGCPageInfo record at the beginning of each 1K-aligned page to hold that
+ * page's offset from the beginning of things+flags allocation and we allocate
+ * things after this record. Thus for each thing |thing_address & ~1023|
+ * gives the address of a JSGCPageInfo record from which we read page_offset.
+ * Due to page alignment
+ * (page_offset & ~1023) + (thing_address & 1023)
+ * gives thing_offset from the beginning of 8K paged things. We then divide
+ * thing_offset by sizeof(JSGCThing) to get thing_index.
+ *
+ * Now |page_address - page_offset| is things+flags arena_address and
+ * (page_offset & 1023) is the offset of the first page from the start of
+ * things+flags area. Thus if
+ * thing_index < (page_offset & 1023)
+ * then
+ * allocation_start_address + thing_index < address_of_the_first_page
+ * and we use
+ * allocation_start_address + thing_index
+ * as the address to store thing's flags. If
+ * thing_index >= (page_offset & 1023),
+ * then we use the chunk of flags that comes after the pages with things
+ * and calculate the address for the flag byte as
+ * address_of_the_first_page + 8K + (thing_index - (page_offset & 1023))
+ * which is just
+ * allocation_start_address + thing_index + 8K.
+ *
+ * When we allocate things with size equal to sizeof(JSGCThing), the overhead
+ * of this scheme for 32 bit platforms is (8+8*(8+1))/(8+9K) or 0.87%
+ * (assuming 4 bytes for each JSGCArena header, and 8 bytes for each
+ * JSGCThing and JSGCPageInfo). When thing_size > 8, the scheme wastes the
+ * flag byte for each extra 8 bytes beyond sizeof(JSGCThing) in thing_size
+ * and the overhead is close to 1/8 or 12.5%.
+ * FIXME: How can we avoid this overhead?
+ *
+ * Here's some ASCII art showing an arena:
+ *
+ * split or the first 1-K aligned address.
+ * |
+ * V
+ * +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+
+ * |fB| tp0 | tp1 | tp2 | tp3 | tp4 | tp5 | tp6 | tp7 | fA |
+ * +--+-------+-------+-------+-------+-------+-------+-------+-------+-----+
+ * ^ ^
+ * tI ---------+ |
+ * tJ -------------------------------------------+
+ *
+ * - fB are the "before split" flags, fA are the "after split" flags
+ * - tp0-tp7 are the 8 thing pages
+ * - thing tI points into tp1, whose flags are below the split, in fB
+ * - thing tJ points into tp5, clearly above the split
+ *
+ * In general, one of the thing pages will have some of its things' flags on
+ * the low side of the split, and the rest of its things' flags on the high
+ * side. All the other pages have flags only below or only above.
+ *
+ * (If we need to implement card-marking for an incremental GC write barrier,
+ * we can replace word-sized offsetInArena in JSGCPageInfo by pair of
+ * uint8 card_mark and uint16 offsetInArena fields as the offset can not exceed
+ * GC_THINGS_SIZE. This would gives an extremely efficient write barrier:
+ * when mutating an object obj, just store a 1 byte at
+ * (uint8 *) ((jsuword)obj & ~1023) on 32-bit platforms.)
+ */
+#define GC_PAGE_SHIFT 10
+#define GC_PAGE_MASK ((jsuword) JS_BITMASK(GC_PAGE_SHIFT))
+#define GC_PAGE_SIZE JS_BIT(GC_PAGE_SHIFT)
+#define GC_PAGE_COUNT (1 << (GC_THINGS_SHIFT - GC_PAGE_SHIFT))
+
+typedef struct JSGCPageInfo {
+ jsuword offsetInArena; /* offset from the arena start */
+ jsuword unscannedBitmap; /* bitset for fast search of marked
+ but not yet scanned GC things */
+} JSGCPageInfo;
+
+struct JSGCArena {
+ JSGCArenaList *list; /* allocation list for the arena */
+ JSGCArena *prev; /* link field for allocation list */
+ JSGCArena *prevUnscanned; /* link field for the list of arenas
+ with marked but not yet scanned
+ things */
+ jsuword unscannedPages; /* bitset for fast search of pages
+ with marked but not yet scanned
+ things */
+ uint8 base[1]; /* things+flags allocation area */
+};
+
+#define GC_ARENA_SIZE \
+ (offsetof(JSGCArena, base) + GC_THINGS_SIZE + GC_FLAGS_SIZE)
+
+#define FIRST_THING_PAGE(a) \
+ (((jsuword)(a)->base + GC_FLAGS_SIZE - 1) & ~GC_PAGE_MASK)
+
+#define PAGE_TO_ARENA(pi) \
+ ((JSGCArena *)((jsuword)(pi) - (pi)->offsetInArena \
+ - offsetof(JSGCArena, base)))
+
+#define PAGE_INDEX(pi) \
+ ((size_t)((pi)->offsetInArena >> GC_PAGE_SHIFT))
+
+#define THING_TO_PAGE(thing) \
+ ((JSGCPageInfo *)((jsuword)(thing) & ~GC_PAGE_MASK))
+
+/*
+ * Given a thing size n, return the size of the gap from the page start before
+ * the first thing. We know that any n not a power of two packs from
+ * the end of the page leaving at least enough room for one JSGCPageInfo, but
+ * not for another thing, at the front of the page (JS_ASSERTs below insist
+ * on this).
+ *
+ * This works because all allocations are a multiple of sizeof(JSGCThing) ==
+ * sizeof(JSGCPageInfo) in size.
+ */
+#define PAGE_THING_GAP(n) (((n) & ((n) - 1)) ? (GC_PAGE_SIZE % (n)) : (n))
+
+#ifdef JS_THREADSAFE
+/*
+ * The maximum number of things to put to the local free list by taking
+ * several things from the global free list or from the tail of the last
+ * allocated arena to amortize the cost of rt->gcLock.
+ *
+ * We use number 8 based on benchmarks from bug 312238.
+ */
+#define MAX_THREAD_LOCAL_THINGS 8
+
+#endif
+
+JS_STATIC_ASSERT(sizeof(JSGCThing) == sizeof(JSGCPageInfo));
+JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSObject));
+JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(JSString));
+JS_STATIC_ASSERT(sizeof(JSGCThing) >= sizeof(jsdouble));
+JS_STATIC_ASSERT(GC_FLAGS_SIZE >= GC_PAGE_SIZE);
+JS_STATIC_ASSERT(sizeof(JSStackHeader) >= 2 * sizeof(jsval));
+
+/*
+ * JSPtrTable capacity growth descriptor. The table grows by powers of two
+ * starting from capacity JSPtrTableInfo.minCapacity, but switching to linear
+ * growth when capacity reaches JSPtrTableInfo.linearGrowthThreshold.
+ */
+typedef struct JSPtrTableInfo {
+ uint16 minCapacity;
+ uint16 linearGrowthThreshold;
+} JSPtrTableInfo;
+
+#define GC_ITERATOR_TABLE_MIN 4
+#define GC_ITERATOR_TABLE_LINEAR 1024
+
+static const JSPtrTableInfo iteratorTableInfo = {
+ GC_ITERATOR_TABLE_MIN,
+ GC_ITERATOR_TABLE_LINEAR
+};
+
+/* Calculate table capacity based on the current value of JSPtrTable.count. */
+static size_t
+PtrTableCapacity(size_t count, const JSPtrTableInfo *info)
+{
+ size_t linear, log, capacity;
+
+ linear = info->linearGrowthThreshold;
+ JS_ASSERT(info->minCapacity <= linear);
+
+ if (count == 0) {
+ capacity = 0;
+ } else if (count < linear) {
+ log = JS_CEILING_LOG2W(count);
+ JS_ASSERT(log != JS_BITS_PER_WORD);
+ capacity = (size_t)1 << log;
+ if (capacity < info->minCapacity)
+ capacity = info->minCapacity;
+ } else {
+ capacity = JS_ROUNDUP(count, linear);
+ }
+
+ JS_ASSERT(capacity >= count);
+ return capacity;
+}
+
+static void
+FreePtrTable(JSPtrTable *table, const JSPtrTableInfo *info)
+{
+ if (table->array) {
+ JS_ASSERT(table->count > 0);
+ free(table->array);
+ table->array = NULL;
+ table->count = 0;
+ }
+ JS_ASSERT(table->count == 0);
+}
+
+static JSBool
+AddToPtrTable(JSContext *cx, JSPtrTable *table, const JSPtrTableInfo *info,
+ void *ptr)
+{
+ size_t count, capacity;
+ void **array;
+
+ count = table->count;
+ capacity = PtrTableCapacity(count, info);
+
+ if (count == capacity) {
+ if (capacity < info->minCapacity) {
+ JS_ASSERT(capacity == 0);
+ JS_ASSERT(!table->array);
+ capacity = info->minCapacity;
+ } else {
+ /*
+ * Simplify the overflow detection assuming pointer is bigger
+ * than byte.
+ */
+ JS_STATIC_ASSERT(2 <= sizeof table->array[0]);
+ capacity = (capacity < info->linearGrowthThreshold)
+ ? 2 * capacity
+ : capacity + info->linearGrowthThreshold;
+ if (capacity > (size_t)-1 / sizeof table->array[0])
+ goto bad;
+ }
+ array = (void **) realloc(table->array,
+ capacity * sizeof table->array[0]);
+ if (!array)
+ goto bad;
+#ifdef DEBUG
+ memset(array + count, JS_FREE_PATTERN,
+ (capacity - count) * sizeof table->array[0]);
+#endif
+ table->array = array;
+ }
+
+ table->array[count] = ptr;
+ table->count = count + 1;
+
+ return JS_TRUE;
+
+ bad:
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+}
+
+static void
+ShrinkPtrTable(JSPtrTable *table, const JSPtrTableInfo *info,
+ size_t newCount)
+{
+ size_t oldCapacity, capacity;
+ void **array;
+
+ JS_ASSERT(newCount <= table->count);
+ if (newCount == table->count)
+ return;
+
+ oldCapacity = PtrTableCapacity(table->count, info);
+ table->count = newCount;
+ capacity = PtrTableCapacity(newCount, info);
+
+ if (oldCapacity != capacity) {
+ array = table->array;
+ JS_ASSERT(array);
+ if (capacity == 0) {
+ free(array);
+ table->array = NULL;
+ return;
+ }
+ array = (void **) realloc(array, capacity * sizeof array[0]);
+ if (array)
+ table->array = array;
+ }
+#ifdef DEBUG
+ memset(table->array + newCount, JS_FREE_PATTERN,
+ (capacity - newCount) * sizeof table->array[0]);
+#endif
+}
+
+#ifdef JS_GCMETER
+# define METER(x) x
+#else
+# define METER(x) ((void) 0)
+#endif
+
+static JSBool
+NewGCArena(JSRuntime *rt, JSGCArenaList *arenaList)
+{
+ JSGCArena *a;
+ jsuword offset;
+ JSGCPageInfo *pi;
+ uint32 *bytesptr;
+
+ /* Check if we are allowed and can allocate a new arena. */
+ if (rt->gcBytes >= rt->gcMaxBytes)
+ return JS_FALSE;
+ a = (JSGCArena *)malloc(GC_ARENA_SIZE);
+ if (!a)
+ return JS_FALSE;
+
+ /* Initialize the JSGCPageInfo records at the start of every thing page. */
+ offset = (GC_PAGE_SIZE - ((jsuword)a->base & GC_PAGE_MASK)) & GC_PAGE_MASK;
+ JS_ASSERT((jsuword)a->base + offset == FIRST_THING_PAGE(a));
+ do {
+ pi = (JSGCPageInfo *) (a->base + offset);
+ pi->offsetInArena = offset;
+ pi->unscannedBitmap = 0;
+ offset += GC_PAGE_SIZE;
+ } while (offset < GC_THINGS_SIZE);
+
+ METER(++arenaList->stats.narenas);
+ METER(arenaList->stats.maxarenas
+ = JS_MAX(arenaList->stats.maxarenas, arenaList->stats.narenas));
+
+ a->list = arenaList;
+ a->prev = arenaList->last;
+ a->prevUnscanned = NULL;
+ a->unscannedPages = 0;
+ arenaList->last = a;
+ arenaList->lastLimit = 0;
+
+ bytesptr = (arenaList == &rt->gcArenaList[0])
+ ? &rt->gcBytes
+ : &rt->gcPrivateBytes;
+ *bytesptr += GC_ARENA_SIZE;
+
+ return JS_TRUE;
+}
+
+static void
+DestroyGCArena(JSRuntime *rt, JSGCArenaList *arenaList, JSGCArena **ap)
+{
+ JSGCArena *a;
+ uint32 *bytesptr;
+
+ a = *ap;
+ JS_ASSERT(a);
+ bytesptr = (arenaList == &rt->gcArenaList[0])
+ ? &rt->gcBytes
+ : &rt->gcPrivateBytes;
+ JS_ASSERT(*bytesptr >= GC_ARENA_SIZE);
+ *bytesptr -= GC_ARENA_SIZE;
+ METER(rt->gcStats.afree++);
+ METER(--arenaList->stats.narenas);
+ if (a == arenaList->last)
+ arenaList->lastLimit = (uint16)(a->prev ? GC_THINGS_SIZE : 0);
+ *ap = a->prev;
+
+#ifdef DEBUG
+ memset(a, JS_FREE_PATTERN, GC_ARENA_SIZE);
+#endif
+ free(a);
+}
+
+static void
+InitGCArenaLists(JSRuntime *rt)
+{
+ uintN i, thingSize;
+ JSGCArenaList *arenaList;
+
+ for (i = 0; i < GC_NUM_FREELISTS; i++) {
+ arenaList = &rt->gcArenaList[i];
+ thingSize = GC_FREELIST_NBYTES(i);
+ JS_ASSERT((size_t)(uint16)thingSize == thingSize);
+ arenaList->last = NULL;
+ arenaList->lastLimit = 0;
+ arenaList->thingSize = (uint16)thingSize;
+ arenaList->freeList = NULL;
+ METER(memset(&arenaList->stats, 0, sizeof arenaList->stats));
+ }
+}
+
+static void
+FinishGCArenaLists(JSRuntime *rt)
+{
+ uintN i;
+ JSGCArenaList *arenaList;
+
+ for (i = 0; i < GC_NUM_FREELISTS; i++) {
+ arenaList = &rt->gcArenaList[i];
+ while (arenaList->last)
+ DestroyGCArena(rt, arenaList, &arenaList->last);
+ arenaList->freeList = NULL;
+ }
+}
+
+uint8 *
+js_GetGCThingFlags(void *thing)
+{
+ JSGCPageInfo *pi;
+ jsuword offsetInArena, thingIndex;
+
+ pi = THING_TO_PAGE(thing);
+ offsetInArena = pi->offsetInArena;
+ JS_ASSERT(offsetInArena < GC_THINGS_SIZE);
+ thingIndex = ((offsetInArena & ~GC_PAGE_MASK) |
+ ((jsuword)thing & GC_PAGE_MASK)) / sizeof(JSGCThing);
+ JS_ASSERT(thingIndex < GC_PAGE_SIZE);
+ if (thingIndex >= (offsetInArena & GC_PAGE_MASK))
+ thingIndex += GC_THINGS_SIZE;
+ return (uint8 *)pi - offsetInArena + thingIndex;
+}
+
+JSRuntime*
+js_GetGCStringRuntime(JSString *str)
+{
+ JSGCPageInfo *pi;
+ JSGCArenaList *list;
+
+ pi = THING_TO_PAGE(str);
+ list = PAGE_TO_ARENA(pi)->list;
+
+ JS_ASSERT(list->thingSize == sizeof(JSGCThing));
+ JS_ASSERT(GC_FREELIST_INDEX(sizeof(JSGCThing)) == 0);
+
+ return (JSRuntime *)((uint8 *)list - offsetof(JSRuntime, gcArenaList));
+}
+
+JSBool
+js_IsAboutToBeFinalized(JSContext *cx, void *thing)
+{
+ uint8 flags = *js_GetGCThingFlags(thing);
+
+ return !(flags & (GCF_MARK | GCF_LOCK | GCF_FINAL));
+}
+
+typedef void (*GCFinalizeOp)(JSContext *cx, JSGCThing *thing);
+
+#ifndef DEBUG
+# define js_FinalizeDouble NULL
+#endif
+
+#if !JS_HAS_XML_SUPPORT
+# define js_FinalizeXMLNamespace NULL
+# define js_FinalizeXMLQName NULL
+# define js_FinalizeXML NULL
+#endif
+
+static GCFinalizeOp gc_finalizers[GCX_NTYPES] = {
+ (GCFinalizeOp) js_FinalizeObject, /* GCX_OBJECT */
+ (GCFinalizeOp) js_FinalizeString, /* GCX_STRING */
+ (GCFinalizeOp) js_FinalizeDouble, /* GCX_DOUBLE */
+ (GCFinalizeOp) js_FinalizeString, /* GCX_MUTABLE_STRING */
+ NULL, /* GCX_PRIVATE */
+ (GCFinalizeOp) js_FinalizeXMLNamespace, /* GCX_NAMESPACE */
+ (GCFinalizeOp) js_FinalizeXMLQName, /* GCX_QNAME */
+ (GCFinalizeOp) js_FinalizeXML, /* GCX_XML */
+ NULL, /* GCX_EXTERNAL_STRING */
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+#ifdef GC_MARK_DEBUG
+static const char newborn_external_string[] = "newborn external string";
+
+static const char *gc_typenames[GCX_NTYPES] = {
+ "newborn object",
+ "newborn string",
+ "newborn double",
+ "newborn mutable string",
+ "newborn private",
+ "newborn Namespace",
+ "newborn QName",
+ "newborn XML",
+ newborn_external_string,
+ newborn_external_string,
+ newborn_external_string,
+ newborn_external_string,
+ newborn_external_string,
+ newborn_external_string,
+ newborn_external_string,
+ newborn_external_string
+};
+#endif
+
+intN
+js_ChangeExternalStringFinalizer(JSStringFinalizeOp oldop,
+ JSStringFinalizeOp newop)
+{
+ uintN i;
+
+ for (i = GCX_EXTERNAL_STRING; i < GCX_NTYPES; i++) {
+ if (gc_finalizers[i] == (GCFinalizeOp) oldop) {
+ gc_finalizers[i] = (GCFinalizeOp) newop;
+ return (intN) i;
+ }
+ }
+ return -1;
+}
+
+/* This is compatible with JSDHashEntryStub. */
+typedef struct JSGCRootHashEntry {
+ JSDHashEntryHdr hdr;
+ void *root;
+ const char *name;
+} JSGCRootHashEntry;
+
+/* Initial size of the gcRootsHash table (SWAG, small enough to amortize). */
+#define GC_ROOTS_SIZE 256
+#define GC_FINALIZE_LEN 1024
+
+JSBool
+js_InitGC(JSRuntime *rt, uint32 maxbytes)
+{
+ InitGCArenaLists(rt);
+ if (!JS_DHashTableInit(&rt->gcRootsHash, JS_DHashGetStubOps(), NULL,
+ sizeof(JSGCRootHashEntry), GC_ROOTS_SIZE)) {
+ rt->gcRootsHash.ops = NULL;
+ return JS_FALSE;
+ }
+ rt->gcLocksHash = NULL; /* create lazily */
+
+ /*
+ * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
+ * for default backward API compatibility.
+ */
+ rt->gcMaxBytes = rt->gcMaxMallocBytes = maxbytes;
+
+ return JS_TRUE;
+}
+
+#ifdef JS_GCMETER
+JS_FRIEND_API(void)
+js_DumpGCStats(JSRuntime *rt, FILE *fp)
+{
+ uintN i;
+ size_t totalThings, totalMaxThings, totalBytes;
+
+ fprintf(fp, "\nGC allocation statistics:\n");
+
+#define UL(x) ((unsigned long)(x))
+#define ULSTAT(x) UL(rt->gcStats.x)
+ totalThings = 0;
+ totalMaxThings = 0;
+ totalBytes = 0;
+ for (i = 0; i < GC_NUM_FREELISTS; i++) {
+ JSGCArenaList *list = &rt->gcArenaList[i];
+ JSGCArenaStats *stats = &list->stats;
+ if (stats->maxarenas == 0) {
+ fprintf(fp, "ARENA LIST %u (thing size %lu): NEVER USED\n",
+ i, UL(GC_FREELIST_NBYTES(i)));
+ continue;
+ }
+ fprintf(fp, "ARENA LIST %u (thing size %lu):\n",
+ i, UL(GC_FREELIST_NBYTES(i)));
+ fprintf(fp, " arenas: %lu\n", UL(stats->narenas));
+ fprintf(fp, " max arenas: %lu\n", UL(stats->maxarenas));
+ fprintf(fp, " things: %lu\n", UL(stats->nthings));
+ fprintf(fp, " max things: %lu\n", UL(stats->maxthings));
+ fprintf(fp, " free list: %lu\n", UL(stats->freelen));
+ fprintf(fp, " free list density: %.1f%%\n",
+ stats->narenas == 0
+ ? 0.0
+ : (100.0 * list->thingSize * (jsdouble)stats->freelen /
+ (GC_THINGS_SIZE * (jsdouble)stats->narenas)));
+ fprintf(fp, " average free list density: %.1f%%\n",
+ stats->totalarenas == 0
+ ? 0.0
+ : (100.0 * list->thingSize * (jsdouble)stats->totalfreelen /
+ (GC_THINGS_SIZE * (jsdouble)stats->totalarenas)));
+ fprintf(fp, " recycles: %lu\n", UL(stats->recycle));
+ fprintf(fp, " recycle/alloc ratio: %.2f\n",
+ (jsdouble)stats->recycle /
+ (jsdouble)(stats->totalnew - stats->recycle));
+ totalThings += stats->nthings;
+ totalMaxThings += stats->maxthings;
+ totalBytes += GC_FREELIST_NBYTES(i) * stats->nthings;
+ }
+ fprintf(fp, "TOTAL STATS:\n");
+ fprintf(fp, " public bytes allocated: %lu\n", UL(rt->gcBytes));
+ fprintf(fp, " private bytes allocated: %lu\n", UL(rt->gcPrivateBytes));
+ fprintf(fp, " alloc attempts: %lu\n", ULSTAT(alloc));
+#ifdef JS_THREADSAFE
+ fprintf(fp, " alloc without locks: %1u\n", ULSTAT(localalloc));
+#endif
+ fprintf(fp, " total GC things: %lu\n", UL(totalThings));
+ fprintf(fp, " max total GC things: %lu\n", UL(totalMaxThings));
+ fprintf(fp, " GC things size: %lu\n", UL(totalBytes));
+ fprintf(fp, "allocation retries after GC: %lu\n", ULSTAT(retry));
+ fprintf(fp, " allocation failures: %lu\n", ULSTAT(fail));
+ fprintf(fp, " things born locked: %lu\n", ULSTAT(lockborn));
+ fprintf(fp, " valid lock calls: %lu\n", ULSTAT(lock));
+ fprintf(fp, " valid unlock calls: %lu\n", ULSTAT(unlock));
+ fprintf(fp, " mark recursion depth: %lu\n", ULSTAT(depth));
+ fprintf(fp, " maximum mark recursion: %lu\n", ULSTAT(maxdepth));
+ fprintf(fp, " mark C recursion depth: %lu\n", ULSTAT(cdepth));
+ fprintf(fp, " maximum mark C recursion: %lu\n", ULSTAT(maxcdepth));
+ fprintf(fp, " delayed scan bag adds: %lu\n", ULSTAT(unscanned));
+#ifdef DEBUG
+ fprintf(fp, " max delayed scan bag size: %lu\n", ULSTAT(maxunscanned));
+#endif
+ fprintf(fp, " maximum GC nesting level: %lu\n", ULSTAT(maxlevel));
+ fprintf(fp, "potentially useful GC calls: %lu\n", ULSTAT(poke));
+ fprintf(fp, " useless GC calls: %lu\n", ULSTAT(nopoke));
+ fprintf(fp, " thing arenas freed so far: %lu\n", ULSTAT(afree));
+ fprintf(fp, " stack segments scanned: %lu\n", ULSTAT(stackseg));
+ fprintf(fp, "stack segment slots scanned: %lu\n", ULSTAT(segslots));
+ fprintf(fp, "reachable closeable objects: %lu\n", ULSTAT(nclose));
+ fprintf(fp, " max reachable closeable: %lu\n", ULSTAT(maxnclose));
+ fprintf(fp, " scheduled close hooks: %lu\n", ULSTAT(closelater));
+ fprintf(fp, " max scheduled close hooks: %lu\n", ULSTAT(maxcloselater));
+#undef UL
+#undef US
+
+#ifdef JS_ARENAMETER
+ JS_DumpArenaStats(fp);
+#endif
+}
+#endif
+
+#ifdef DEBUG
+static void
+CheckLeakedRoots(JSRuntime *rt);
+#endif
+
+void
+js_FinishGC(JSRuntime *rt)
+{
+#ifdef JS_ARENAMETER
+ JS_DumpArenaStats(stdout);
+#endif
+#ifdef JS_GCMETER
+ js_DumpGCStats(rt, stdout);
+#endif
+
+ FreePtrTable(&rt->gcIteratorTable, &iteratorTableInfo);
+#if JS_HAS_GENERATORS
+ rt->gcCloseState.reachableList = NULL;
+ METER(rt->gcStats.nclose = 0);
+ rt->gcCloseState.todoQueue = NULL;
+#endif
+ FinishGCArenaLists(rt);
+
+ if (rt->gcRootsHash.ops) {
+#ifdef DEBUG
+ CheckLeakedRoots(rt);
+#endif
+ JS_DHashTableFinish(&rt->gcRootsHash);
+ rt->gcRootsHash.ops = NULL;
+ }
+ if (rt->gcLocksHash) {
+ JS_DHashTableDestroy(rt->gcLocksHash);
+ rt->gcLocksHash = NULL;
+ }
+}
+
+JSBool
+js_AddRoot(JSContext *cx, void *rp, const char *name)
+{
+ JSBool ok = js_AddRootRT(cx->runtime, rp, name);
+ if (!ok)
+ JS_ReportOutOfMemory(cx);
+ return ok;
+}
+
+JSBool
+js_AddRootRT(JSRuntime *rt, void *rp, const char *name)
+{
+ JSBool ok;
+ JSGCRootHashEntry *rhe;
+
+ /*
+ * Due to the long-standing, but now removed, use of rt->gcLock across the
+ * bulk of js_GC, API users have come to depend on JS_AddRoot etc. locking
+ * properly with a racing GC, without calling JS_AddRoot from a request.
+ * We have to preserve API compatibility here, now that we avoid holding
+ * rt->gcLock across the mark phase (including the root hashtable mark).
+ *
+ * If the GC is running and we're called on another thread, wait for this
+ * GC activation to finish. We can safely wait here (in the case where we
+ * are called within a request on another thread's context) without fear
+ * of deadlock because the GC doesn't set rt->gcRunning until after it has
+ * waited for all active requests to end.
+ */
+ JS_LOCK_GC(rt);
+#ifdef JS_THREADSAFE
+ JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
+ if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
+ do {
+ JS_AWAIT_GC_DONE(rt);
+ } while (rt->gcLevel > 0);
+ }
+#endif
+ rhe = (JSGCRootHashEntry *) JS_DHashTableOperate(&rt->gcRootsHash, rp,
+ JS_DHASH_ADD);
+ if (rhe) {
+ rhe->root = rp;
+ rhe->name = name;
+ ok = JS_TRUE;
+ } else {
+ ok = JS_FALSE;
+ }
+ JS_UNLOCK_GC(rt);
+ return ok;
+}
+
+JSBool
+js_RemoveRoot(JSRuntime *rt, void *rp)
+{
+ /*
+ * Due to the JS_RemoveRootRT API, we may be called outside of a request.
+ * Same synchronization drill as above in js_AddRoot.
+ */
+ JS_LOCK_GC(rt);
+#ifdef JS_THREADSAFE
+ JS_ASSERT(!rt->gcRunning || rt->gcLevel > 0);
+ if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
+ do {
+ JS_AWAIT_GC_DONE(rt);
+ } while (rt->gcLevel > 0);
+ }
+#endif
+ (void) JS_DHashTableOperate(&rt->gcRootsHash, rp, JS_DHASH_REMOVE);
+ rt->gcPoke = JS_TRUE;
+ JS_UNLOCK_GC(rt);
+ return JS_TRUE;
+}
+
+#ifdef DEBUG
+
+JS_STATIC_DLL_CALLBACK(JSDHashOperator)
+js_root_printer(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 i, void *arg)
+{
+ uint32 *leakedroots = (uint32 *)arg;
+ JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
+
+ (*leakedroots)++;
+ fprintf(stderr,
+ "JS engine warning: leaking GC root \'%s\' at %p\n",
+ rhe->name ? (char *)rhe->name : "", rhe->root);
+
+ return JS_DHASH_NEXT;
+}
+
+static void
+CheckLeakedRoots(JSRuntime *rt)
+{
+ uint32 leakedroots = 0;
+
+ /* Warn (but don't assert) debug builds of any remaining roots. */
+ JS_DHashTableEnumerate(&rt->gcRootsHash, js_root_printer,
+ &leakedroots);
+ if (leakedroots > 0) {
+ if (leakedroots == 1) {
+ fprintf(stderr,
+"JS engine warning: 1 GC root remains after destroying the JSRuntime.\n"
+" This root may point to freed memory. Objects reachable\n"
+" through it have not been finalized.\n");
+ } else {
+ fprintf(stderr,
+"JS engine warning: %lu GC roots remain after destroying the JSRuntime.\n"
+" These roots may point to freed memory. Objects reachable\n"
+" through them have not been finalized.\n",
+ (unsigned long) leakedroots);
+ }
+ }
+}
+
+typedef struct NamedRootDumpArgs {
+ void (*dump)(const char *name, void *rp, void *data);
+ void *data;
+} NamedRootDumpArgs;
+
+JS_STATIC_DLL_CALLBACK(JSDHashOperator)
+js_named_root_dumper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
+ void *arg)
+{
+ NamedRootDumpArgs *args = (NamedRootDumpArgs *) arg;
+ JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
+
+ if (rhe->name)
+ args->dump(rhe->name, rhe->root, args->data);
+ return JS_DHASH_NEXT;
+}
+
+void
+js_DumpNamedRoots(JSRuntime *rt,
+ void (*dump)(const char *name, void *rp, void *data),
+ void *data)
+{
+ NamedRootDumpArgs args;
+
+ args.dump = dump;
+ args.data = data;
+ JS_DHashTableEnumerate(&rt->gcRootsHash, js_named_root_dumper, &args);
+}
+
+#endif /* DEBUG */
+
+typedef struct GCRootMapArgs {
+ JSGCRootMapFun map;
+ void *data;
+} GCRootMapArgs;
+
+JS_STATIC_DLL_CALLBACK(JSDHashOperator)
+js_gcroot_mapper(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
+ void *arg)
+{
+ GCRootMapArgs *args = (GCRootMapArgs *) arg;
+ JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
+ intN mapflags;
+ JSDHashOperator op;
+
+ mapflags = args->map(rhe->root, rhe->name, args->data);
+
+#if JS_MAP_GCROOT_NEXT == JS_DHASH_NEXT && \
+ JS_MAP_GCROOT_STOP == JS_DHASH_STOP && \
+ JS_MAP_GCROOT_REMOVE == JS_DHASH_REMOVE
+ op = (JSDHashOperator)mapflags;
+#else
+ op = JS_DHASH_NEXT;
+ if (mapflags & JS_MAP_GCROOT_STOP)
+ op |= JS_DHASH_STOP;
+ if (mapflags & JS_MAP_GCROOT_REMOVE)
+ op |= JS_DHASH_REMOVE;
+#endif
+
+ return op;
+}
+
+uint32
+js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data)
+{
+ GCRootMapArgs args;
+ uint32 rv;
+
+ args.map = map;
+ args.data = data;
+ JS_LOCK_GC(rt);
+ rv = JS_DHashTableEnumerate(&rt->gcRootsHash, js_gcroot_mapper, &args);
+ JS_UNLOCK_GC(rt);
+ return rv;
+}
+
+JSBool
+js_RegisterCloseableIterator(JSContext *cx, JSObject *obj)
+{
+ JSRuntime *rt;
+ JSBool ok;
+
+ rt = cx->runtime;
+ JS_ASSERT(!rt->gcRunning);
+
+ JS_LOCK_GC(rt);
+ ok = AddToPtrTable(cx, &rt->gcIteratorTable, &iteratorTableInfo, obj);
+ JS_UNLOCK_GC(rt);
+ return ok;
+}
+
+static void
+CloseIteratorStates(JSContext *cx)
+{
+ JSRuntime *rt;
+ size_t count, newCount, i;
+ void **array;
+ JSObject *obj;
+
+ rt = cx->runtime;
+ count = rt->gcIteratorTable.count;
+ array = rt->gcIteratorTable.array;
+
+ newCount = 0;
+ for (i = 0; i != count; ++i) {
+ obj = (JSObject *)array[i];
+ if (js_IsAboutToBeFinalized(cx, obj))
+ js_CloseIteratorState(cx, obj);
+ else
+ array[newCount++] = obj;
+ }
+ ShrinkPtrTable(&rt->gcIteratorTable, &iteratorTableInfo, newCount);
+}
+
+#if JS_HAS_GENERATORS
+
+void
+js_RegisterGenerator(JSContext *cx, JSGenerator *gen)
+{
+ JSRuntime *rt;
+
+ rt = cx->runtime;
+ JS_ASSERT(!rt->gcRunning);
+ JS_ASSERT(rt->state != JSRTS_LANDING);
+ JS_ASSERT(gen->state == JSGEN_NEWBORN);
+
+ JS_LOCK_GC(rt);
+ gen->next = rt->gcCloseState.reachableList;
+ rt->gcCloseState.reachableList = gen;
+ METER(rt->gcStats.nclose++);
+ METER(rt->gcStats.maxnclose = JS_MAX(rt->gcStats.maxnclose,
+ rt->gcStats.nclose));
+ JS_UNLOCK_GC(rt);
+}
+
+/*
+ * We do not run close hooks when the parent scope of the generator instance
+ * becomes unreachable to prevent denial-of-service and resource leakage from
+ * misbehaved generators.
+ *
+ * Called from the GC.
+ */
+static JSBool
+CanScheduleCloseHook(JSGenerator *gen)
+{
+ JSObject *parent;
+ JSBool canSchedule;
+
+ /* Avoid OBJ_GET_PARENT overhead as we are in GC. */
+ parent = JSVAL_TO_OBJECT(gen->obj->slots[JSSLOT_PARENT]);
+ canSchedule = *js_GetGCThingFlags(parent) & GCF_MARK;
+#ifdef DEBUG_igor
+ if (!canSchedule) {
+ fprintf(stderr, "GEN: Kill without schedule, gen=%p parent=%p\n",
+ (void *)gen, (void *)parent);
+ }
+#endif
+ return canSchedule;
+}
+
+/*
+ * Check if we should delay execution of the close hook.
+ *
+ * Called outside GC or any locks.
+ *
+ * XXX The current implementation is a hack that embeds the knowledge of the
+ * browser embedding pending the resolution of bug 352788. In the browser we
+ * must not close any generators that came from a page that is currently in
+ * the browser history. We detect that using the fact in the browser the scope
+ * is history if scope->outerObject->innerObject != scope.
+ */
+static JSBool
+ShouldDeferCloseHook(JSContext *cx, JSGenerator *gen, JSBool *defer)
+{
+ JSObject *parent, *obj;
+ JSClass *clasp;
+ JSExtendedClass *xclasp;
+
+ /*
+ * This is called outside any locks, so use thread-safe macros to access
+ * parent and classes.
+ */
+ *defer = JS_FALSE;
+ parent = OBJ_GET_PARENT(cx, gen->obj);
+ clasp = OBJ_GET_CLASS(cx, parent);
+ if (clasp->flags & JSCLASS_IS_EXTENDED) {
+ xclasp = (JSExtendedClass *)clasp;
+ if (xclasp->outerObject) {
+ obj = xclasp->outerObject(cx, parent);
+ if (!obj)
+ return JS_FALSE;
+ OBJ_TO_INNER_OBJECT(cx, obj);
+ if (!obj)
+ return JS_FALSE;
+ *defer = obj != parent;
+ }
+ }
+#ifdef DEBUG_igor
+ if (*defer) {
+ fprintf(stderr, "GEN: deferring, gen=%p parent=%p\n",
+ (void *)gen, (void *)parent);
+ }
+#endif
+ return JS_TRUE;
+}
+
+/*
+ * Find all unreachable generators and move them to the todo queue from
+ * rt->gcCloseState.reachableList to execute thier close hooks after the GC
+ * cycle completes. To ensure liveness during the sweep phase we mark all
+ * generators we are going to close later.
+ */
+static void
+FindAndMarkObjectsToClose(JSContext *cx, JSGCInvocationKind gckind,
+ JSGenerator **todoQueueTail)
+{
+ JSRuntime *rt;
+ JSGenerator *todo, **genp, *gen;
+
+ rt = cx->runtime;
+ todo = NULL;
+ genp = &rt->gcCloseState.reachableList;
+ while ((gen = *genp) != NULL) {
+ if (*js_GetGCThingFlags(gen->obj) & GCF_MARK) {
+ genp = &gen->next;
+ } else {
+ /* Generator must not be executing when it becomes unreachable. */
+ JS_ASSERT(gen->state == JSGEN_NEWBORN ||
+ gen->state == JSGEN_OPEN ||
+ gen->state == JSGEN_CLOSED);
+
+ *genp = gen->next;
+ if (gen->state == JSGEN_OPEN &&
+ js_FindFinallyHandler(gen->frame.script, gen->frame.pc) &&
+ CanScheduleCloseHook(gen)) {
+ /*
+ * Generator yielded inside a try with a finally block.
+ * Schedule it for closing.
+ *
+ * We keep generators that yielded outside try-with-finally
+ * with gen->state == JSGEN_OPEN. The finalizer must deal with
+ * open generators as we may skip the close hooks, see below.
+ */
+ gen->next = NULL;
+ *todoQueueTail = gen;
+ todoQueueTail = &gen->next;
+ if (!todo)
+ todo = gen;
+ METER(JS_ASSERT(rt->gcStats.nclose));
+ METER(rt->gcStats.nclose--);
+ METER(rt->gcStats.closelater++);
+ METER(rt->gcStats.maxcloselater
+ = JS_MAX(rt->gcStats.maxcloselater,
+ rt->gcStats.closelater));
+ }
+ }
+ }
+
+ if (gckind == GC_LAST_CONTEXT) {
+ /*
+ * Remove scheduled hooks on shutdown as it is too late to run them:
+ * we do not allow execution of arbitrary scripts at this point.
+ */
+ rt->gcCloseState.todoQueue = NULL;
+ } else {
+ /*
+ * Mark just-found unreachable generators *after* we scan the global
+ * list to prevent a generator that refers to other unreachable
+ * generators from keeping them on gcCloseState.reachableList.
+ */
+ for (gen = todo; gen; gen = gen->next)
+ GC_MARK(cx, gen->obj, "newly scheduled generator");
+ }
+}
+
+/*
+ * Mark unreachable generators already scheduled to close and return the tail
+ * pointer to JSGCCloseState.todoQueue.
+ */
+static JSGenerator **
+MarkScheduledGenerators(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSGenerator **genp, *gen;
+
+ rt = cx->runtime;
+ genp = &rt->gcCloseState.todoQueue;
+ while ((gen = *genp) != NULL) {
+ if (CanScheduleCloseHook(gen)) {
+ GC_MARK(cx, gen->obj, "scheduled generator");
+ genp = &gen->next;
+ } else {
+ /* Discard the generator from the list if its schedule is over. */
+ *genp = gen->next;
+ METER(JS_ASSERT(rt->gcStats.closelater > 0));
+ METER(rt->gcStats.closelater--);
+ }
+ }
+ return genp;
+}
+
+#ifdef JS_THREADSAFE
+# define GC_RUNNING_CLOSE_HOOKS_PTR(cx) \
+ (&(cx)->thread->gcRunningCloseHooks)
+#else
+# define GC_RUNNING_CLOSE_HOOKS_PTR(cx) \
+ (&(cx)->runtime->gcCloseState.runningCloseHook)
+#endif
+
+typedef struct JSTempCloseList {
+ JSTempValueRooter tvr;
+ JSGenerator *head;
+} JSTempCloseList;
+
+JS_STATIC_DLL_CALLBACK(void)
+mark_temp_close_list(JSContext *cx, JSTempValueRooter *tvr)
+{
+ JSTempCloseList *list = (JSTempCloseList *)tvr;
+ JSGenerator *gen;
+
+ for (gen = list->head; gen; gen = gen->next)
+ GC_MARK(cx, gen->obj, "temp list generator");
+}
+
+#define JS_PUSH_TEMP_CLOSE_LIST(cx, tempList) \
+ JS_PUSH_TEMP_ROOT_MARKER(cx, mark_temp_close_list, &(tempList)->tvr)
+
+#define JS_POP_TEMP_CLOSE_LIST(cx, tempList) \
+ JS_BEGIN_MACRO \
+ JS_ASSERT((tempList)->tvr.u.marker == mark_temp_close_list); \
+ JS_POP_TEMP_ROOT(cx, &(tempList)->tvr); \
+ JS_END_MACRO
+
+JSBool
+js_RunCloseHooks(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSTempCloseList tempList;
+ JSStackFrame *fp;
+ JSGenerator **genp, *gen;
+ JSBool ok, defer;
+#if JS_GCMETER
+ uint32 deferCount;
+#endif
+
+ rt = cx->runtime;
+
+ /*
+ * It is OK to access todoQueue outside the lock here. When many threads
+ * update the todo list, accessing some older value of todoQueue in the
+ * worst case just delays the excution of close hooks.
+ */
+ if (!rt->gcCloseState.todoQueue)
+ return JS_TRUE;
+
+ /*
+ * To prevent an infinite loop when a close hook creats more objects with
+ * close hooks and then triggers GC we ignore recursive invocations of
+ * js_RunCloseHooks and limit number of hooks to execute to the initial
+ * size of the list.
+ */
+ if (*GC_RUNNING_CLOSE_HOOKS_PTR(cx))
+ return JS_TRUE;
+
+ *GC_RUNNING_CLOSE_HOOKS_PTR(cx) = JS_TRUE;
+
+ JS_LOCK_GC(rt);
+ tempList.head = rt->gcCloseState.todoQueue;
+ JS_PUSH_TEMP_CLOSE_LIST(cx, &tempList);
+ rt->gcCloseState.todoQueue = NULL;
+ METER(rt->gcStats.closelater = 0);
+ rt->gcPoke = JS_TRUE;
+ JS_UNLOCK_GC(rt);
+
+ /*
+ * Set aside cx->fp since we do not want a close hook using caller or
+ * other means to backtrace into whatever stack might be active when
+ * running the hook. We store the current frame on the dormant list to
+ * protect against GC that the hook can trigger.
+ */
+ fp = cx->fp;
+ if (fp) {
+ JS_ASSERT(!fp->dormantNext);
+ fp->dormantNext = cx->dormantFrameChain;
+ cx->dormantFrameChain = fp;
+ }
+ cx->fp = NULL;
+
+ genp = &tempList.head;
+ ok = JS_TRUE;
+ while ((gen = *genp) != NULL) {
+ ok = ShouldDeferCloseHook(cx, gen, &defer);
+ if (!ok) {
+ /* Quit ASAP discarding the hook. */
+ *genp = gen->next;
+ break;
+ }
+ if (defer) {
+ genp = &gen->next;
+ METER(deferCount++);
+ continue;
+ }
+ ok = js_CloseGeneratorObject(cx, gen);
+
+ /*
+ * Unlink the generator after closing it to make sure it always stays
+ * rooted through tempList.
+ */
+ *genp = gen->next;
+
+ if (cx->throwing) {
+ /*
+ * Report the exception thrown by the close hook and continue to
+ * execute the rest of the hooks.
+ */
+ if (!js_ReportUncaughtException(cx))
+ JS_ClearPendingException(cx);
+ ok = JS_TRUE;
+ } else if (!ok) {
+ /*
+ * Assume this is a stop signal from the branch callback or
+ * other quit ASAP condition. Break execution until the next
+ * invocation of js_RunCloseHooks.
+ */
+ break;
+ }
+ }
+
+ cx->fp = fp;
+ if (fp) {
+ JS_ASSERT(cx->dormantFrameChain == fp);
+ cx->dormantFrameChain = fp->dormantNext;
+ fp->dormantNext = NULL;
+ }
+
+ if (tempList.head) {
+ /*
+ * Some close hooks were not yet executed, put them back into the
+ * scheduled list.
+ */
+ while ((gen = *genp) != NULL) {
+ genp = &gen->next;
+ METER(deferCount++);
+ }
+
+ /* Now genp is a pointer to the tail of tempList. */
+ JS_LOCK_GC(rt);
+ *genp = rt->gcCloseState.todoQueue;
+ rt->gcCloseState.todoQueue = tempList.head;
+ METER(rt->gcStats.closelater += deferCount);
+ METER(rt->gcStats.maxcloselater
+ = JS_MAX(rt->gcStats.maxcloselater, rt->gcStats.closelater));
+ JS_UNLOCK_GC(rt);
+ }
+
+ JS_POP_TEMP_CLOSE_LIST(cx, &tempList);
+ *GC_RUNNING_CLOSE_HOOKS_PTR(cx) = JS_FALSE;
+
+ return ok;
+}
+
+#endif /* JS_HAS_GENERATORS */
+
+#if defined(DEBUG_brendan) || defined(DEBUG_timeless)
+#define DEBUG_gchist
+#endif
+
+#ifdef DEBUG_gchist
+#define NGCHIST 64
+
+static struct GCHist {
+ JSBool lastDitch;
+ JSGCThing *freeList;
+} gchist[NGCHIST];
+
+unsigned gchpos;
+#endif
+
+void *
+js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes)
+{
+ JSRuntime *rt;
+ uintN flindex;
+ JSBool doGC;
+ JSGCThing *thing;
+ uint8 *flagp, *firstPage;
+ JSGCArenaList *arenaList;
+ jsuword offset;
+ JSGCArena *a;
+ JSLocalRootStack *lrs;
+#ifdef JS_THREADSAFE
+ JSBool gcLocked;
+ uintN localMallocBytes;
+ JSGCThing **flbase, **lastptr;
+ JSGCThing *tmpthing;
+ uint8 *tmpflagp;
+ uintN maxFreeThings; /* max to take from the global free list */
+ METER(size_t nfree);
+#endif
+
+ rt = cx->runtime;
+ METER(rt->gcStats.alloc++); /* this is not thread-safe */
+ nbytes = JS_ROUNDUP(nbytes, sizeof(JSGCThing));
+ flindex = GC_FREELIST_INDEX(nbytes);
+
+#ifdef JS_THREADSAFE
+ gcLocked = JS_FALSE;
+ JS_ASSERT(cx->thread);
+ flbase = cx->thread->gcFreeLists;
+ JS_ASSERT(flbase);
+ thing = flbase[flindex];
+ localMallocBytes = cx->thread->gcMallocBytes;
+ if (thing && rt->gcMaxMallocBytes - rt->gcMallocBytes > localMallocBytes) {
+ flagp = thing->flagp;
+ flbase[flindex] = thing->next;
+ METER(rt->gcStats.localalloc++); /* this is not thread-safe */
+ goto success;
+ }
+
+ JS_LOCK_GC(rt);
+ gcLocked = JS_TRUE;
+
+ /* Transfer thread-local counter to global one. */
+ if (localMallocBytes != 0) {
+ cx->thread->gcMallocBytes = 0;
+ if (rt->gcMaxMallocBytes - rt->gcMallocBytes < localMallocBytes)
+ rt->gcMallocBytes = rt->gcMaxMallocBytes;
+ else
+ rt->gcMallocBytes += localMallocBytes;
+ }
+#endif
+ JS_ASSERT(!rt->gcRunning);
+ if (rt->gcRunning) {
+ METER(rt->gcStats.finalfail++);
+ JS_UNLOCK_GC(rt);
+ return NULL;
+ }
+
+#ifdef TOO_MUCH_GC
+#ifdef WAY_TOO_MUCH_GC
+ rt->gcPoke = JS_TRUE;
+#endif
+ doGC = JS_TRUE;
+#else
+ doGC = (rt->gcMallocBytes >= rt->gcMaxMallocBytes);
+#endif
+
+ arenaList = &rt->gcArenaList[flindex];
+ for (;;) {
+ if (doGC) {
+ /*
+ * Keep rt->gcLock across the call into js_GC so we don't starve
+ * and lose to racing threads who deplete the heap just after
+ * js_GC has replenished it (or has synchronized with a racing
+ * GC that collected a bunch of garbage). This unfair scheduling
+ * can happen on certain operating systems. For the gory details,
+ * see bug 162779 at https://bugzilla.mozilla.org/.
+ */
+ js_GC(cx, GC_LAST_DITCH);
+ METER(rt->gcStats.retry++);
+ }
+
+ /* Try to get thing from the free list. */
+ thing = arenaList->freeList;
+ if (thing) {
+ arenaList->freeList = thing->next;
+ flagp = thing->flagp;
+ JS_ASSERT(*flagp & GCF_FINAL);
+ METER(arenaList->stats.freelen--);
+ METER(arenaList->stats.recycle++);
+
+#ifdef JS_THREADSAFE
+ /*
+ * Refill the local free list by taking several things from the
+ * global free list unless we are still at rt->gcMaxMallocBytes
+ * barrier or the free list is already populated. The former
+ * happens when GC is canceled due to !gcCallback(cx, JSGC_BEGIN)
+ * or no gcPoke. The latter is caused via allocating new things
+ * in gcCallback(cx, JSGC_END).
+ */
+ if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
+ break;
+ tmpthing = arenaList->freeList;
+ if (tmpthing) {
+ maxFreeThings = MAX_THREAD_LOCAL_THINGS;
+ do {
+ if (!tmpthing->next)
+ break;
+ tmpthing = tmpthing->next;
+ } while (--maxFreeThings != 0);
+
+ flbase[flindex] = arenaList->freeList;
+ arenaList->freeList = tmpthing->next;
+ tmpthing->next = NULL;
+ }
+#endif
+ break;
+ }
+
+ /* Allocate from the tail of last arena or from new arena if we can. */
+ if ((arenaList->last && arenaList->lastLimit != GC_THINGS_SIZE) ||
+ NewGCArena(rt, arenaList)) {
+
+ offset = arenaList->lastLimit;
+ if ((offset & GC_PAGE_MASK) == 0) {
+ /*
+ * Skip JSGCPageInfo record located at GC_PAGE_SIZE boundary.
+ */
+ offset += PAGE_THING_GAP(nbytes);
+ }
+ JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE);
+ arenaList->lastLimit = (uint16)(offset + nbytes);
+ a = arenaList->last;
+ firstPage = (uint8 *)FIRST_THING_PAGE(a);
+ thing = (JSGCThing *)(firstPage + offset);
+ flagp = a->base + offset / sizeof(JSGCThing);
+ if (flagp >= firstPage)
+ flagp += GC_THINGS_SIZE;
+ METER(++arenaList->stats.nthings);
+ METER(arenaList->stats.maxthings =
+ JS_MAX(arenaList->stats.nthings,
+ arenaList->stats.maxthings));
+
+#ifdef JS_THREADSAFE
+ /*
+ * Refill the local free list by taking free things from the last
+ * arena. Prefer to order free things by ascending address in the
+ * (unscientific) hope of better cache locality.
+ */
+ if (rt->gcMallocBytes >= rt->gcMaxMallocBytes || flbase[flindex])
+ break;
+ METER(nfree = 0);
+ lastptr = &flbase[flindex];
+ maxFreeThings = MAX_THREAD_LOCAL_THINGS;
+ for (offset = arenaList->lastLimit;
+ offset != GC_THINGS_SIZE && maxFreeThings-- != 0;
+ offset += nbytes) {
+ if ((offset & GC_PAGE_MASK) == 0)
+ offset += PAGE_THING_GAP(nbytes);
+ JS_ASSERT(offset + nbytes <= GC_THINGS_SIZE);
+ tmpflagp = a->base + offset / sizeof(JSGCThing);
+ if (tmpflagp >= firstPage)
+ tmpflagp += GC_THINGS_SIZE;
+
+ tmpthing = (JSGCThing *)(firstPage + offset);
+ tmpthing->flagp = tmpflagp;
+ *tmpflagp = GCF_FINAL; /* signifying that thing is free */
+
+ *lastptr = tmpthing;
+ lastptr = &tmpthing->next;
+ METER(++nfree);
+ }
+ arenaList->lastLimit = offset;
+ *lastptr = NULL;
+ METER(arenaList->stats.freelen += nfree);
+#endif
+ break;
+ }
+
+ /* Consider doing a "last ditch" GC unless already tried. */
+ if (doGC)
+ goto fail;
+ rt->gcPoke = JS_TRUE;
+ doGC = JS_TRUE;
+ }
+
+ /* We successfully allocated the thing. */
+#ifdef JS_THREADSAFE
+ success:
+#endif
+ lrs = cx->localRootStack;
+ if (lrs) {
+ /*
+ * If we're in a local root scope, don't set newborn[type] at all, to
+ * avoid entraining garbage from it for an unbounded amount of time
+ * on this context. A caller will leave the local root scope and pop
+ * this reference, allowing thing to be GC'd if it has no other refs.
+ * See JS_EnterLocalRootScope and related APIs.
+ */
+ if (js_PushLocalRoot(cx, lrs, (jsval) thing) < 0) {
+ /*
+ * When we fail for a thing allocated through the tail of the last
+ * arena, thing's flag byte is not initialized. So to prevent GC
+ * accessing the uninitialized flags during the finalization, we
+ * always mark the thing as final. See bug 337407.
+ */
+ *flagp = GCF_FINAL;
+ goto fail;
+ }
+ } else {
+ /*
+ * No local root scope, so we're stuck with the old, fragile model of
+ * depending on a pigeon-hole newborn per type per context.
+ */
+ cx->weakRoots.newborn[flags & GCF_TYPEMASK] = thing;
+ }
+
+ /* We can't fail now, so update flags and rt->gc{,Private}Bytes. */
+ *flagp = (uint8)flags;
+
+ /*
+ * Clear thing before unlocking in case a GC run is about to scan it,
+ * finding it via newborn[].
+ */
+ thing->next = NULL;
+ thing->flagp = NULL;
+#ifdef DEBUG_gchist
+ gchist[gchpos].lastDitch = doGC;
+ gchist[gchpos].freeList = rt->gcArenaList[flindex].freeList;
+ if (++gchpos == NGCHIST)
+ gchpos = 0;
+#endif
+ METER(if (flags & GCF_LOCK) rt->gcStats.lockborn++);
+ METER(++rt->gcArenaList[flindex].stats.totalnew);
+#ifdef JS_THREADSAFE
+ if (gcLocked)
+ JS_UNLOCK_GC(rt);
+#endif
+ return thing;
+
+fail:
+#ifdef JS_THREADSAFE
+ if (gcLocked)
+ JS_UNLOCK_GC(rt);
+#endif
+ METER(rt->gcStats.fail++);
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+}
+
+JSBool
+js_LockGCThing(JSContext *cx, void *thing)
+{
+ JSBool ok = js_LockGCThingRT(cx->runtime, thing);
+ if (!ok)
+ JS_ReportOutOfMemory(cx);
+ return ok;
+}
+
+/*
+ * Deep GC-things can't be locked just by setting the GCF_LOCK bit, because
+ * their descendants must be marked by the GC. To find them during the mark
+ * phase, they are added to rt->gcLocksHash, which is created lazily.
+ *
+ * NB: we depend on the order of GC-thing type indexes here!
+ */
+#define GC_TYPE_IS_STRING(t) ((t) == GCX_STRING || \
+ (t) >= GCX_EXTERNAL_STRING)
+#define GC_TYPE_IS_XML(t) ((unsigned)((t) - GCX_NAMESPACE) <= \
+ (unsigned)(GCX_XML - GCX_NAMESPACE))
+#define GC_TYPE_IS_DEEP(t) ((t) == GCX_OBJECT || GC_TYPE_IS_XML(t))
+
+#define IS_DEEP_STRING(t,o) (GC_TYPE_IS_STRING(t) && \
+ JSSTRING_IS_DEPENDENT((JSString *)(o)))
+
+#define GC_THING_IS_DEEP(t,o) (GC_TYPE_IS_DEEP(t) || IS_DEEP_STRING(t, o))
+
+/* This is compatible with JSDHashEntryStub. */
+typedef struct JSGCLockHashEntry {
+ JSDHashEntryHdr hdr;
+ const JSGCThing *thing;
+ uint32 count;
+} JSGCLockHashEntry;
+
+JSBool
+js_LockGCThingRT(JSRuntime *rt, void *thing)
+{
+ JSBool ok, deep;
+ uint8 *flagp;
+ uintN flags, lock, type;
+ JSGCLockHashEntry *lhe;
+
+ ok = JS_TRUE;
+ if (!thing)
+ return ok;
+
+ flagp = js_GetGCThingFlags(thing);
+
+ JS_LOCK_GC(rt);
+ flags = *flagp;
+ lock = (flags & GCF_LOCK);
+ type = (flags & GCF_TYPEMASK);
+ deep = GC_THING_IS_DEEP(type, thing);
+
+ /*
+ * Avoid adding a rt->gcLocksHash entry for shallow things until someone
+ * nests a lock -- then start such an entry with a count of 2, not 1.
+ */
+ if (lock || deep) {
+ if (!rt->gcLocksHash) {
+ rt->gcLocksHash =
+ JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
+ sizeof(JSGCLockHashEntry),
+ GC_ROOTS_SIZE);
+ if (!rt->gcLocksHash) {
+ ok = JS_FALSE;
+ goto done;
+ }
+ } else if (lock == 0) {
+#ifdef DEBUG
+ JSDHashEntryHdr *hdr =
+ JS_DHashTableOperate(rt->gcLocksHash, thing,
+ JS_DHASH_LOOKUP);
+ JS_ASSERT(JS_DHASH_ENTRY_IS_FREE(hdr));
+#endif
+ }
+
+ lhe = (JSGCLockHashEntry *)
+ JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_ADD);
+ if (!lhe) {
+ ok = JS_FALSE;
+ goto done;
+ }
+ if (!lhe->thing) {
+ lhe->thing = thing;
+ lhe->count = deep ? 1 : 2;
+ } else {
+ JS_ASSERT(lhe->count >= 1);
+ lhe->count++;
+ }
+ }
+
+ *flagp = (uint8)(flags | GCF_LOCK);
+ METER(rt->gcStats.lock++);
+ ok = JS_TRUE;
+done:
+ JS_UNLOCK_GC(rt);
+ return ok;
+}
+
+JSBool
+js_UnlockGCThingRT(JSRuntime *rt, void *thing)
+{
+ uint8 *flagp, flags;
+ JSGCLockHashEntry *lhe;
+
+ if (!thing)
+ return JS_TRUE;
+
+ flagp = js_GetGCThingFlags(thing);
+ JS_LOCK_GC(rt);
+ flags = *flagp;
+
+ if (flags & GCF_LOCK) {
+ if (!rt->gcLocksHash ||
+ (lhe = (JSGCLockHashEntry *)
+ JS_DHashTableOperate(rt->gcLocksHash, thing,
+ JS_DHASH_LOOKUP),
+ JS_DHASH_ENTRY_IS_FREE(&lhe->hdr))) {
+ /* Shallow GC-thing with an implicit lock count of 1. */
+ JS_ASSERT(!GC_THING_IS_DEEP(flags & GCF_TYPEMASK, thing));
+ } else {
+ /* Basis or nested unlock of a deep thing, or nested of shallow. */
+ if (--lhe->count != 0)
+ goto out;
+ JS_DHashTableOperate(rt->gcLocksHash, thing, JS_DHASH_REMOVE);
+ }
+ *flagp = (uint8)(flags & ~GCF_LOCK);
+ }
+
+ rt->gcPoke = JS_TRUE;
+out:
+ METER(rt->gcStats.unlock++);
+ JS_UNLOCK_GC(rt);
+ return JS_TRUE;
+}
+
+#ifdef GC_MARK_DEBUG
+
+#include <stdio.h>
+#include "jsprf.h"
+
+typedef struct GCMarkNode GCMarkNode;
+
+struct GCMarkNode {
+ void *thing;
+ const char *name;
+ GCMarkNode *next;
+ GCMarkNode *prev;
+};
+
+JS_FRIEND_DATA(FILE *) js_DumpGCHeap;
+JS_EXPORT_DATA(void *) js_LiveThingToFind;
+
+#ifdef HAVE_XPCONNECT
+#include "dump_xpc.h"
+#endif
+
+static void
+GetObjSlotName(JSScope *scope, JSObject *obj, uint32 slot, char *buf,
+ size_t bufsize)
+{
+ jsval nval;
+ JSScopeProperty *sprop;
+ JSClass *clasp;
+ uint32 key;
+ const char *slotname;
+
+ if (!scope) {
+ JS_snprintf(buf, bufsize, "**UNKNOWN OBJECT MAP ENTRY**");
+ return;
+ }
+
+ sprop = SCOPE_LAST_PROP(scope);
+ while (sprop && sprop->slot != slot)
+ sprop = sprop->parent;
+
+ if (!sprop) {
+ switch (slot) {
+ case JSSLOT_PROTO:
+ JS_snprintf(buf, bufsize, "__proto__");
+ break;
+ case JSSLOT_PARENT:
+ JS_snprintf(buf, bufsize, "__parent__");
+ break;
+ default:
+ slotname = NULL;
+ clasp = LOCKED_OBJ_GET_CLASS(obj);
+ if (clasp->flags & JSCLASS_IS_GLOBAL) {
+ key = slot - JSSLOT_START(clasp);
+#define JS_PROTO(name,code,init) \
+ if ((code) == key) { slotname = js_##name##_str; goto found; }
+#include "jsproto.tbl"
+#undef JS_PROTO
+ }
+ found:
+ if (slotname)
+ JS_snprintf(buf, bufsize, "CLASS_OBJECT(%s)", slotname);
+ else
+ JS_snprintf(buf, bufsize, "**UNKNOWN SLOT %ld**", (long)slot);
+ break;
+ }
+ } else {
+ nval = ID_TO_VALUE(sprop->id);
+ if (JSVAL_IS_INT(nval)) {
+ JS_snprintf(buf, bufsize, "%ld", (long)JSVAL_TO_INT(nval));
+ } else if (JSVAL_IS_STRING(nval)) {
+ JS_snprintf(buf, bufsize, "%s",
+ JS_GetStringBytes(JSVAL_TO_STRING(nval)));
+ } else {
+ JS_snprintf(buf, bufsize, "**FINALIZED ATOM KEY**");
+ }
+ }
+}
+
+static const char *
+gc_object_class_name(void* thing)
+{
+ uint8 *flagp = js_GetGCThingFlags(thing);
+ const char *className = "";
+ static char depbuf[32];
+
+ switch (*flagp & GCF_TYPEMASK) {
+ case GCX_OBJECT: {
+ JSObject *obj = (JSObject *)thing;
+ JSClass *clasp = JSVAL_TO_PRIVATE(obj->slots[JSSLOT_CLASS]);
+ className = clasp->name;
+#ifdef HAVE_XPCONNECT
+ if (clasp->flags & JSCLASS_PRIVATE_IS_NSISUPPORTS) {
+ jsval privateValue = obj->slots[JSSLOT_PRIVATE];
+
+ JS_ASSERT(clasp->flags & JSCLASS_HAS_PRIVATE);
+ if (!JSVAL_IS_VOID(privateValue)) {
+ void *privateThing = JSVAL_TO_PRIVATE(privateValue);
+ const char *xpcClassName = GetXPCObjectClassName(privateThing);
+
+ if (xpcClassName)
+ className = xpcClassName;
+ }
+ }
+#endif
+ break;
+ }
+
+ case GCX_STRING:
+ case GCX_MUTABLE_STRING: {
+ JSString *str = (JSString *)thing;
+ if (JSSTRING_IS_DEPENDENT(str)) {
+ JS_snprintf(depbuf, sizeof depbuf, "start:%u, length:%u",
+ JSSTRDEP_START(str), JSSTRDEP_LENGTH(str));
+ className = depbuf;
+ } else {
+ className = "string";
+ }
+ break;
+ }
+
+ case GCX_DOUBLE:
+ className = "double";
+ break;
+ }
+
+ return className;
+}
+
+static void
+gc_dump_thing(JSContext *cx, JSGCThing *thing, FILE *fp)
+{
+ GCMarkNode *prev = (GCMarkNode *)cx->gcCurrentMarkNode;
+ GCMarkNode *next = NULL;
+ char *path = NULL;
+
+ while (prev) {
+ next = prev;
+ prev = prev->prev;
+ }
+ while (next) {
+ uint8 nextFlags = *js_GetGCThingFlags(next->thing);
+ if ((nextFlags & GCF_TYPEMASK) == GCX_OBJECT) {
+ path = JS_sprintf_append(path, "%s(%s @ 0x%08p).",
+ next->name,
+ gc_object_class_name(next->thing),
+ (JSObject*)next->thing);
+ } else {
+ path = JS_sprintf_append(path, "%s(%s).",
+ next->name,
+ gc_object_class_name(next->thing));
+ }
+ next = next->next;
+ }
+ if (!path)
+ return;
+
+ fprintf(fp, "%08lx ", (long)thing);
+ switch (*js_GetGCThingFlags(thing) & GCF_TYPEMASK) {
+ case GCX_OBJECT:
+ {
+ JSObject *obj = (JSObject *)thing;
+ jsval privateValue = obj->slots[JSSLOT_PRIVATE];
+ void *privateThing = JSVAL_IS_VOID(privateValue)
+ ? NULL
+ : JSVAL_TO_PRIVATE(privateValue);
+ const char *className = gc_object_class_name(thing);
+ fprintf(fp, "object %8p %s", privateThing, className);
+ break;
+ }
+#if JS_HAS_XML_SUPPORT
+ case GCX_NAMESPACE:
+ {
+ JSXMLNamespace *ns = (JSXMLNamespace *)thing;
+ fprintf(fp, "namespace %s:%s",
+ JS_GetStringBytes(ns->prefix), JS_GetStringBytes(ns->uri));
+ break;
+ }
+ case GCX_QNAME:
+ {
+ JSXMLQName *qn = (JSXMLQName *)thing;
+ fprintf(fp, "qname %s(%s):%s",
+ JS_GetStringBytes(qn->prefix), JS_GetStringBytes(qn->uri),
+ JS_GetStringBytes(qn->localName));
+ break;
+ }
+ case GCX_XML:
+ {
+ extern const char *js_xml_class_str[];
+ JSXML *xml = (JSXML *)thing;
+ fprintf(fp, "xml %8p %s", xml, js_xml_class_str[xml->xml_class]);
+ break;
+ }
+#endif
+ case GCX_DOUBLE:
+ fprintf(fp, "double %g", *(jsdouble *)thing);
+ break;
+ case GCX_PRIVATE:
+ fprintf(fp, "private %8p", (void *)thing);
+ break;
+ default:
+ fprintf(fp, "string %s", JS_GetStringBytes((JSString *)thing));
+ break;
+ }
+ fprintf(fp, " via %s\n", path);
+ free(path);
+}
+
+void
+js_MarkNamedGCThing(JSContext *cx, void *thing, const char *name)
+{
+ GCMarkNode markNode;
+
+ if (!thing)
+ return;
+
+ markNode.thing = thing;
+ markNode.name = name;
+ markNode.next = NULL;
+ markNode.prev = (GCMarkNode *)cx->gcCurrentMarkNode;
+ if (markNode.prev)
+ markNode.prev->next = &markNode;
+ cx->gcCurrentMarkNode = &markNode;
+
+ if (thing == js_LiveThingToFind) {
+ /*
+ * Dump js_LiveThingToFind each time we reach it during the marking
+ * phase of GC to print all live references to the thing.
+ */
+ gc_dump_thing(cx, thing, stderr);
+ }
+
+ js_MarkGCThing(cx, thing);
+
+ if (markNode.prev)
+ markNode.prev->next = NULL;
+ cx->gcCurrentMarkNode = markNode.prev;
+}
+
+#endif /* !GC_MARK_DEBUG */
+
+static void
+gc_mark_atom_key_thing(void *thing, void *arg)
+{
+ JSContext *cx = (JSContext *) arg;
+
+ GC_MARK(cx, thing, "atom");
+}
+
+void
+js_MarkAtom(JSContext *cx, JSAtom *atom)
+{
+ jsval key;
+
+ if (atom->flags & ATOM_MARK)
+ return;
+ atom->flags |= ATOM_MARK;
+ key = ATOM_KEY(atom);
+ if (JSVAL_IS_GCTHING(key)) {
+#ifdef GC_MARK_DEBUG
+ char name[32];
+
+ if (JSVAL_IS_STRING(key)) {
+ JS_snprintf(name, sizeof name, "'%s'",
+ JS_GetStringBytes(JSVAL_TO_STRING(key)));
+ } else {
+ JS_snprintf(name, sizeof name, "<%x>", key);
+ }
+#endif
+ GC_MARK(cx, JSVAL_TO_GCTHING(key), name);
+ }
+ if (atom->flags & ATOM_HIDDEN)
+ js_MarkAtom(cx, atom->entry.value);
+}
+
+static void
+AddThingToUnscannedBag(JSRuntime *rt, void *thing, uint8 *flagp);
+
+static void
+MarkGCThingChildren(JSContext *cx, void *thing, uint8 *flagp,
+ JSBool shouldCheckRecursion)
+{
+ JSRuntime *rt;
+ JSObject *obj;
+ jsval v, *vp, *end;
+ void *next_thing;
+ uint8 *next_flagp;
+ JSString *str;
+#ifdef JS_GCMETER
+ uint32 tailCallNesting;
+#endif
+#ifdef GC_MARK_DEBUG
+ JSScope *scope;
+ char name[32];
+#endif
+
+ /*
+ * With JS_GC_ASSUME_LOW_C_STACK defined the mark phase of GC always
+ * uses the non-recursive code that otherwise would be called only on
+ * a low C stack condition.
+ */
+#ifdef JS_GC_ASSUME_LOW_C_STACK
+# define RECURSION_TOO_DEEP() shouldCheckRecursion
+#else
+ int stackDummy;
+# define RECURSION_TOO_DEEP() (shouldCheckRecursion && \
+ !JS_CHECK_STACK_SIZE(cx, stackDummy))
+#endif
+
+ rt = cx->runtime;
+ METER(tailCallNesting = 0);
+ METER(if (++rt->gcStats.cdepth > rt->gcStats.maxcdepth)
+ rt->gcStats.maxcdepth = rt->gcStats.cdepth);
+
+#ifndef GC_MARK_DEBUG
+ start:
+#endif
+ JS_ASSERT(flagp);
+ JS_ASSERT(*flagp & GCF_MARK); /* the caller must already mark the thing */
+ METER(if (++rt->gcStats.depth > rt->gcStats.maxdepth)
+ rt->gcStats.maxdepth = rt->gcStats.depth);
+#ifdef GC_MARK_DEBUG
+ if (js_DumpGCHeap)
+ gc_dump_thing(cx, thing, js_DumpGCHeap);
+#endif
+
+ switch (*flagp & GCF_TYPEMASK) {
+ case GCX_OBJECT:
+ if (RECURSION_TOO_DEEP())
+ goto add_to_unscanned_bag;
+ /* If obj->slots is null, obj must be a newborn. */
+ obj = (JSObject *) thing;
+ vp = obj->slots;
+ if (!vp)
+ break;
+
+ /* Mark slots if they are small enough to be GC-allocated. */
+ if ((vp[-1] + 1) * sizeof(jsval) <= GC_NBYTES_MAX)
+ GC_MARK(cx, vp - 1, "slots");
+
+ /* Set up local variables to loop over unmarked things. */
+ end = vp + ((obj->map->ops->mark)
+ ? obj->map->ops->mark(cx, obj, NULL)
+ : JS_MIN(obj->map->freeslot, obj->map->nslots));
+ thing = NULL;
+ flagp = NULL;
+#ifdef GC_MARK_DEBUG
+ scope = OBJ_IS_NATIVE(obj) ? OBJ_SCOPE(obj) : NULL;
+#endif
+ for (; vp != end; ++vp) {
+ v = *vp;
+ if (!JSVAL_IS_GCTHING(v) || v == JSVAL_NULL)
+ continue;
+ next_thing = JSVAL_TO_GCTHING(v);
+ if (next_thing == thing)
+ continue;
+ next_flagp = js_GetGCThingFlags(next_thing);
+ if (*next_flagp & GCF_MARK)
+ continue;
+ JS_ASSERT(*next_flagp != GCF_FINAL);
+ if (thing) {
+#ifdef GC_MARK_DEBUG
+ GC_MARK(cx, thing, name);
+#else
+ *flagp |= GCF_MARK;
+ MarkGCThingChildren(cx, thing, flagp, JS_TRUE);
+#endif
+ if (*next_flagp & GCF_MARK) {
+ /*
+ * This happens when recursive MarkGCThingChildren marks
+ * the thing with flags referred by *next_flagp.
+ */
+ thing = NULL;
+ continue;
+ }
+ }
+#ifdef GC_MARK_DEBUG
+ GetObjSlotName(scope, obj, vp - obj->slots, name, sizeof name);
+#endif
+ thing = next_thing;
+ flagp = next_flagp;
+ }
+ if (thing) {
+ /*
+ * thing came from the last unmarked GC-thing slot and we
+ * can optimize tail recursion.
+ *
+ * Since we already know that there is enough C stack space,
+ * we clear shouldCheckRecursion to avoid extra checking in
+ * RECURSION_TOO_DEEP.
+ */
+ shouldCheckRecursion = JS_FALSE;
+ goto on_tail_recursion;
+ }
+ break;
+
+#ifdef DEBUG
+ case GCX_STRING:
+ str = (JSString *)thing;
+ JS_ASSERT(!JSSTRING_IS_DEPENDENT(str));
+ break;
+#endif
+
+ case GCX_MUTABLE_STRING:
+ str = (JSString *)thing;
+ if (!JSSTRING_IS_DEPENDENT(str))
+ break;
+ thing = JSSTRDEP_BASE(str);
+ flagp = js_GetGCThingFlags(thing);
+ if (*flagp & GCF_MARK)
+ break;
+#ifdef GC_MARK_DEBUG
+ strcpy(name, "base");
+#endif
+ /* Fallthrough to code to deal with the tail recursion. */
+
+ on_tail_recursion:
+#ifdef GC_MARK_DEBUG
+ /*
+ * Do not eliminate C recursion when debugging to allow
+ * js_MarkNamedGCThing to build a full dump of live GC
+ * things.
+ */
+ GC_MARK(cx, thing, name);
+ break;
+#else
+ /* Eliminate tail recursion for the last unmarked child. */
+ JS_ASSERT(*flagp != GCF_FINAL);
+ METER(++tailCallNesting);
+ *flagp |= GCF_MARK;
+ goto start;
+#endif
+
+#if JS_HAS_XML_SUPPORT
+ case GCX_NAMESPACE:
+ if (RECURSION_TOO_DEEP())
+ goto add_to_unscanned_bag;
+ js_MarkXMLNamespace(cx, (JSXMLNamespace *)thing);
+ break;
+
+ case GCX_QNAME:
+ if (RECURSION_TOO_DEEP())
+ goto add_to_unscanned_bag;
+ js_MarkXMLQName(cx, (JSXMLQName *)thing);
+ break;
+
+ case GCX_XML:
+ if (RECURSION_TOO_DEEP())
+ goto add_to_unscanned_bag;
+ js_MarkXML(cx, (JSXML *)thing);
+ break;
+#endif
+ add_to_unscanned_bag:
+ AddThingToUnscannedBag(cx->runtime, thing, flagp);
+ break;
+ }
+
+#undef RECURSION_TOO_DEEP
+
+ METER(rt->gcStats.depth -= 1 + tailCallNesting);
+ METER(rt->gcStats.cdepth--);
+}
+
+/*
+ * Avoid using PAGE_THING_GAP inside this macro to optimize the
+ * thingsPerUnscannedChunk calculation when thingSize is a power of two.
+ */
+#define GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap) \
+ JS_BEGIN_MACRO \
+ if (0 == ((thingSize) & ((thingSize) - 1))) { \
+ pageGap = (thingSize); \
+ thingsPerUnscannedChunk = ((GC_PAGE_SIZE / (thingSize)) \
+ + JS_BITS_PER_WORD - 1) \
+ >> JS_BITS_PER_WORD_LOG2; \
+ } else { \
+ pageGap = GC_PAGE_SIZE % (thingSize); \
+ thingsPerUnscannedChunk = JS_HOWMANY(GC_PAGE_SIZE / (thingSize), \
+ JS_BITS_PER_WORD); \
+ } \
+ JS_END_MACRO
+
+static void
+AddThingToUnscannedBag(JSRuntime *rt, void *thing, uint8 *flagp)
+{
+ JSGCPageInfo *pi;
+ JSGCArena *arena;
+ size_t thingSize;
+ size_t thingsPerUnscannedChunk;
+ size_t pageGap;
+ size_t chunkIndex;
+ jsuword bit;
+
+ /* Things from delayed scanning bag are marked as GCF_MARK | GCF_FINAL. */
+ JS_ASSERT((*flagp & (GCF_MARK | GCF_FINAL)) == GCF_MARK);
+ *flagp |= GCF_FINAL;
+
+ METER(rt->gcStats.unscanned++);
+#ifdef DEBUG
+ ++rt->gcUnscannedBagSize;
+ METER(if (rt->gcUnscannedBagSize > rt->gcStats.maxunscanned)
+ rt->gcStats.maxunscanned = rt->gcUnscannedBagSize);
+#endif
+
+ pi = THING_TO_PAGE(thing);
+ arena = PAGE_TO_ARENA(pi);
+ thingSize = arena->list->thingSize;
+ GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap);
+ chunkIndex = (((jsuword)thing & GC_PAGE_MASK) - pageGap) /
+ (thingSize * thingsPerUnscannedChunk);
+ JS_ASSERT(chunkIndex < JS_BITS_PER_WORD);
+ bit = (jsuword)1 << chunkIndex;
+ if (pi->unscannedBitmap != 0) {
+ JS_ASSERT(rt->gcUnscannedArenaStackTop);
+ if (thingsPerUnscannedChunk != 1) {
+ if (pi->unscannedBitmap & bit) {
+ /* Chunk already contains things to scan later. */
+ return;
+ }
+ } else {
+ /*
+ * The chunk must not contain things to scan later if there is
+ * only one thing per chunk.
+ */
+ JS_ASSERT(!(pi->unscannedBitmap & bit));
+ }
+ pi->unscannedBitmap |= bit;
+ JS_ASSERT(arena->unscannedPages & ((size_t)1 << PAGE_INDEX(pi)));
+ } else {
+ /*
+ * The thing is the first unscanned thing in the page, set the bit
+ * corresponding to this page arena->unscannedPages.
+ */
+ pi->unscannedBitmap = bit;
+ JS_ASSERT(PAGE_INDEX(pi) < JS_BITS_PER_WORD);
+ bit = (jsuword)1 << PAGE_INDEX(pi);
+ JS_ASSERT(!(arena->unscannedPages & bit));
+ if (arena->unscannedPages != 0) {
+ arena->unscannedPages |= bit;
+ JS_ASSERT(arena->prevUnscanned);
+ JS_ASSERT(rt->gcUnscannedArenaStackTop);
+ } else {
+ /*
+ * The thing is the first unscanned thing in the whole arena, push
+ * the arena on the stack of unscanned arenas unless the arena
+ * has already been pushed. We detect that through prevUnscanned
+ * field which is NULL only for not yet pushed arenas. To ensure
+ * that prevUnscanned != NULL even when the stack contains one
+ * element, we make prevUnscanned for the arena at the bottom
+ * to point to itself.
+ *
+ * See comments in ScanDelayedChildren.
+ */
+ arena->unscannedPages = bit;
+ if (!arena->prevUnscanned) {
+ if (!rt->gcUnscannedArenaStackTop) {
+ /* Stack was empty, mark the arena as bottom element. */
+ arena->prevUnscanned = arena;
+ } else {
+ JS_ASSERT(rt->gcUnscannedArenaStackTop->prevUnscanned);
+ arena->prevUnscanned = rt->gcUnscannedArenaStackTop;
+ }
+ rt->gcUnscannedArenaStackTop = arena;
+ }
+ }
+ }
+ JS_ASSERT(rt->gcUnscannedArenaStackTop);
+}
+
+static void
+ScanDelayedChildren(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSGCArena *arena;
+ size_t thingSize;
+ size_t thingsPerUnscannedChunk;
+ size_t pageGap;
+ size_t pageIndex;
+ JSGCPageInfo *pi;
+ size_t chunkIndex;
+ size_t thingOffset, thingLimit;
+ JSGCThing *thing;
+ uint8 *flagp;
+ JSGCArena *prevArena;
+
+ rt = cx->runtime;
+ arena = rt->gcUnscannedArenaStackTop;
+ if (!arena) {
+ JS_ASSERT(rt->gcUnscannedBagSize == 0);
+ return;
+ }
+
+ init_size:
+ thingSize = arena->list->thingSize;
+ GET_GAP_AND_CHUNK_SPAN(thingSize, thingsPerUnscannedChunk, pageGap);
+ for (;;) {
+ /*
+ * The following assert verifies that the current arena belongs to
+ * the unscan stack since AddThingToUnscannedBag ensures that even
+ * for stack's bottom prevUnscanned != NULL but rather points to self.
+ */
+ JS_ASSERT(arena->prevUnscanned);
+ JS_ASSERT(rt->gcUnscannedArenaStackTop->prevUnscanned);
+ while (arena->unscannedPages != 0) {
+ pageIndex = JS_FLOOR_LOG2W(arena->unscannedPages);
+ JS_ASSERT(pageIndex < GC_PAGE_COUNT);
+ pi = (JSGCPageInfo *)(FIRST_THING_PAGE(arena) +
+ pageIndex * GC_PAGE_SIZE);
+ JS_ASSERT(pi->unscannedBitmap);
+ chunkIndex = JS_FLOOR_LOG2W(pi->unscannedBitmap);
+ pi->unscannedBitmap &= ~((jsuword)1 << chunkIndex);
+ if (pi->unscannedBitmap == 0)
+ arena->unscannedPages &= ~((jsuword)1 << pageIndex);
+ thingOffset = (pageGap
+ + chunkIndex * thingsPerUnscannedChunk * thingSize);
+ JS_ASSERT(thingOffset >= sizeof(JSGCPageInfo));
+ thingLimit = thingOffset + thingsPerUnscannedChunk * thingSize;
+ if (thingsPerUnscannedChunk != 1) {
+ /*
+ * thingLimit can go beyond the last allocated thing for the
+ * last chunk as the real limit can be inside the chunk.
+ */
+ if (arena->list->last == arena &&
+ arena->list->lastLimit < (pageIndex * GC_PAGE_SIZE +
+ thingLimit)) {
+ thingLimit = (arena->list->lastLimit -
+ pageIndex * GC_PAGE_SIZE);
+ } else if (thingLimit > GC_PAGE_SIZE) {
+ thingLimit = GC_PAGE_SIZE;
+ }
+ JS_ASSERT(thingLimit > thingOffset);
+ }
+ JS_ASSERT(arena->list->last != arena ||
+ arena->list->lastLimit >= (pageIndex * GC_PAGE_SIZE +
+ thingLimit));
+ JS_ASSERT(thingLimit <= GC_PAGE_SIZE);
+
+ for (; thingOffset != thingLimit; thingOffset += thingSize) {
+ /*
+ * XXX: inline js_GetGCThingFlags() to use already available
+ * pi.
+ */
+ thing = (void *)((jsuword)pi + thingOffset);
+ flagp = js_GetGCThingFlags(thing);
+ if (thingsPerUnscannedChunk != 1) {
+ /*
+ * Skip free or already scanned things that share the chunk
+ * with unscanned ones.
+ */
+ if ((*flagp & (GCF_MARK|GCF_FINAL)) != (GCF_MARK|GCF_FINAL))
+ continue;
+ }
+ JS_ASSERT((*flagp & (GCF_MARK|GCF_FINAL))
+ == (GCF_MARK|GCF_FINAL));
+ *flagp &= ~GCF_FINAL;
+#ifdef DEBUG
+ JS_ASSERT(rt->gcUnscannedBagSize != 0);
+ --rt->gcUnscannedBagSize;
+
+ /*
+ * Check that GC thing type is consistent with the type of
+ * things that can be put to the unscanned bag.
+ */
+ switch (*flagp & GCF_TYPEMASK) {
+ case GCX_OBJECT:
+# if JS_HAS_XML_SUPPORT
+ case GCX_NAMESPACE:
+ case GCX_QNAME:
+ case GCX_XML:
+# endif
+ break;
+ default:
+ JS_ASSERT(0);
+ }
+#endif
+ MarkGCThingChildren(cx, thing, flagp, JS_FALSE);
+ }
+ }
+ /*
+ * We finished scanning of the arena but we can only pop it from
+ * the stack if the arena is the stack's top.
+ *
+ * When MarkGCThingChildren from the above calls
+ * AddThingToUnscannedBag and the latter pushes new arenas to the
+ * stack, we have to skip popping of this arena until it becomes
+ * the top of the stack again.
+ */
+ if (arena == rt->gcUnscannedArenaStackTop) {
+ prevArena = arena->prevUnscanned;
+ arena->prevUnscanned = NULL;
+ if (arena == prevArena) {
+ /*
+ * prevUnscanned points to itself and we reached the bottom
+ * of the stack.
+ */
+ break;
+ }
+ rt->gcUnscannedArenaStackTop = arena = prevArena;
+ } else {
+ arena = rt->gcUnscannedArenaStackTop;
+ }
+ if (arena->list->thingSize != thingSize)
+ goto init_size;
+ }
+ JS_ASSERT(rt->gcUnscannedArenaStackTop);
+ JS_ASSERT(!rt->gcUnscannedArenaStackTop->prevUnscanned);
+ rt->gcUnscannedArenaStackTop = NULL;
+ JS_ASSERT(rt->gcUnscannedBagSize == 0);
+}
+
+void
+js_MarkGCThing(JSContext *cx, void *thing)
+{
+ uint8 *flagp;
+
+ if (!thing)
+ return;
+
+ flagp = js_GetGCThingFlags(thing);
+ JS_ASSERT(*flagp != GCF_FINAL);
+ if (*flagp & GCF_MARK)
+ return;
+ *flagp |= GCF_MARK;
+
+ if (!cx->insideGCMarkCallback) {
+ MarkGCThingChildren(cx, thing, flagp, JS_TRUE);
+ } else {
+ /*
+ * For API compatibility we allow for the callback to assume that
+ * after it calls js_MarkGCThing for the last time, the callback
+ * can start to finalize its own objects that are only referenced
+ * by unmarked GC things.
+ *
+ * Since we do not know which call from inside the callback is the
+ * last, we ensure that the unscanned bag is always empty when we
+ * return to the callback and all marked things are scanned.
+ *
+ * As an optimization we do not check for the stack size here and
+ * pass JS_FALSE as the last argument to MarkGCThingChildren.
+ * Otherwise with low C stack the thing would be pushed to the bag
+ * just to be feed to MarkGCThingChildren from inside
+ * ScanDelayedChildren.
+ */
+ cx->insideGCMarkCallback = JS_FALSE;
+ MarkGCThingChildren(cx, thing, flagp, JS_FALSE);
+ ScanDelayedChildren(cx);
+ cx->insideGCMarkCallback = JS_TRUE;
+ }
+}
+
+JS_STATIC_DLL_CALLBACK(JSDHashOperator)
+gc_root_marker(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, void *arg)
+{
+ JSGCRootHashEntry *rhe = (JSGCRootHashEntry *)hdr;
+ jsval *rp = (jsval *)rhe->root;
+ jsval v = *rp;
+
+ /* Ignore null object and scalar values. */
+ if (!JSVAL_IS_NULL(v) && JSVAL_IS_GCTHING(v)) {
+ JSContext *cx = (JSContext *)arg;
+#ifdef DEBUG
+ JSBool root_points_to_gcArenaList = JS_FALSE;
+ jsuword thing = (jsuword) JSVAL_TO_GCTHING(v);
+ uintN i;
+ JSGCArenaList *arenaList;
+ JSGCArena *a;
+ size_t limit;
+
+ for (i = 0; i < GC_NUM_FREELISTS; i++) {
+ arenaList = &cx->runtime->gcArenaList[i];
+ limit = arenaList->lastLimit;
+ for (a = arenaList->last; a; a = a->prev) {
+ if (thing - FIRST_THING_PAGE(a) < limit) {
+ root_points_to_gcArenaList = JS_TRUE;
+ break;
+ }
+ limit = GC_THINGS_SIZE;
+ }
+ }
+ if (!root_points_to_gcArenaList && rhe->name) {
+ fprintf(stderr,
+"JS API usage error: the address passed to JS_AddNamedRoot currently holds an\n"
+"invalid jsval. This is usually caused by a missing call to JS_RemoveRoot.\n"
+"The root's name is \"%s\".\n",
+ rhe->name);
+ }
+ JS_ASSERT(root_points_to_gcArenaList);
+#endif
+
+ GC_MARK(cx, JSVAL_TO_GCTHING(v), rhe->name ? rhe->name : "root");
+ }
+ return JS_DHASH_NEXT;
+}
+
+JS_STATIC_DLL_CALLBACK(JSDHashOperator)
+gc_lock_marker(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, void *arg)
+{
+ JSGCLockHashEntry *lhe = (JSGCLockHashEntry *)hdr;
+ void *thing = (void *)lhe->thing;
+ JSContext *cx = (JSContext *)arg;
+
+ GC_MARK(cx, thing, "locked object");
+ return JS_DHASH_NEXT;
+}
+
+#define GC_MARK_JSVALS(cx, len, vec, name) \
+ JS_BEGIN_MACRO \
+ jsval _v, *_vp, *_end; \
+ \
+ for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) { \
+ _v = *_vp; \
+ if (JSVAL_IS_GCTHING(_v)) \
+ GC_MARK(cx, JSVAL_TO_GCTHING(_v), name); \
+ } \
+ JS_END_MACRO
+
+void
+js_MarkStackFrame(JSContext *cx, JSStackFrame *fp)
+{
+ uintN depth, nslots;
+
+ if (fp->callobj)
+ GC_MARK(cx, fp->callobj, "call object");
+ if (fp->argsobj)
+ GC_MARK(cx, fp->argsobj, "arguments object");
+ if (fp->varobj)
+ GC_MARK(cx, fp->varobj, "variables object");
+ if (fp->script) {
+ js_MarkScript(cx, fp->script);
+ if (fp->spbase) {
+ /*
+ * Don't mark what has not been pushed yet, or what has been
+ * popped already.
+ */
+ depth = fp->script->depth;
+ nslots = (JS_UPTRDIFF(fp->sp, fp->spbase)
+ < depth * sizeof(jsval))
+ ? (uintN)(fp->sp - fp->spbase)
+ : depth;
+ GC_MARK_JSVALS(cx, nslots, fp->spbase, "operand");
+ }
+ }
+
+ /* Allow for primitive this parameter due to JSFUN_THISP_* flags. */
+ JS_ASSERT(JSVAL_IS_OBJECT((jsval)fp->thisp) ||
+ (fp->fun && JSFUN_THISP_FLAGS(fp->fun->flags)));
+ if (JSVAL_IS_GCTHING((jsval)fp->thisp))
+ GC_MARK(cx, JSVAL_TO_GCTHING((jsval)fp->thisp), "this");
+
+ /*
+ * Mark fp->argv, even though in the common case it will be marked via our
+ * caller's frame, or via a JSStackHeader if fp was pushed by an external
+ * invocation.
+ *
+ * The hard case is when there is not enough contiguous space in the stack
+ * arena for actual, missing formal, and local root (JSFunctionSpec.extra)
+ * slots. In this case, fp->argv points to new space in a new arena, and
+ * marking the caller's operand stack, or an external caller's allocated
+ * stack tracked by a JSStackHeader, will not mark all the values stored
+ * and addressable via fp->argv.
+ *
+ * So in summary, solely for the hard case of moving argv due to missing
+ * formals and extra roots, we must mark actuals, missing formals, and any
+ * local roots arrayed at fp->argv here.
+ *
+ * It would be good to avoid redundant marking of the same reference, in
+ * the case where fp->argv does point into caller-allocated space tracked
+ * by fp->down->spbase or cx->stackHeaders. This would allow callbacks
+ * such as the forthcoming rt->gcThingCallback (bug 333078) to compute JS
+ * reference counts. So this comment deserves a FIXME bug to cite.
+ */
+ if (fp->argv) {
+ nslots = fp->argc;
+ if (fp->fun) {
+ if (fp->fun->nargs > nslots)
+ nslots = fp->fun->nargs;
+ if (!FUN_INTERPRETED(fp->fun))
+ nslots += fp->fun->u.n.extra;
+ }
+ GC_MARK_JSVALS(cx, nslots + 2, fp->argv - 2, "arg");
+ }
+ if (JSVAL_IS_GCTHING(fp->rval))
+ GC_MARK(cx, JSVAL_TO_GCTHING(fp->rval), "rval");
+ if (fp->vars)
+ GC_MARK_JSVALS(cx, fp->nvars, fp->vars, "var");
+ GC_MARK(cx, fp->scopeChain, "scope chain");
+ if (fp->sharpArray)
+ GC_MARK(cx, fp->sharpArray, "sharp array");
+
+ if (fp->xmlNamespace)
+ GC_MARK(cx, fp->xmlNamespace, "xmlNamespace");
+}
+
+static void
+MarkWeakRoots(JSContext *cx, JSWeakRoots *wr)
+{
+ uintN i;
+ void *thing;
+
+ for (i = 0; i < GCX_NTYPES; i++)
+ GC_MARK(cx, wr->newborn[i], gc_typenames[i]);
+ if (wr->lastAtom)
+ GC_MARK_ATOM(cx, wr->lastAtom);
+ if (JSVAL_IS_GCTHING(wr->lastInternalResult)) {
+ thing = JSVAL_TO_GCTHING(wr->lastInternalResult);
+ if (thing)
+ GC_MARK(cx, thing, "lastInternalResult");
+ }
+}
+
+/*
+ * When gckind is GC_LAST_DITCH, it indicates a call from js_NewGCThing with
+ * rt->gcLock already held and when the lock should be kept on return.
+ */
+void
+js_GC(JSContext *cx, JSGCInvocationKind gckind)
+{
+ JSRuntime *rt;
+ JSBool keepAtoms;
+ uintN i, type;
+ JSContext *iter, *acx;
+#if JS_HAS_GENERATORS
+ JSGenerator **genTodoTail;
+#endif
+ JSStackFrame *fp, *chain;
+ JSStackHeader *sh;
+ JSTempValueRooter *tvr;
+ size_t nbytes, limit, offset;
+ JSGCArena *a, **ap;
+ uint8 flags, *flagp, *firstPage;
+ JSGCThing *thing, *freeList;
+ JSGCArenaList *arenaList;
+ GCFinalizeOp finalizer;
+ JSBool allClear;
+#ifdef JS_THREADSAFE
+ uint32 requestDebit;
+#endif
+
+ rt = cx->runtime;
+#ifdef JS_THREADSAFE
+ /* Avoid deadlock. */
+ JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));
+#endif
+
+ if (gckind == GC_LAST_DITCH) {
+ /* The last ditch GC preserves all atoms and weak roots. */
+ keepAtoms = JS_TRUE;
+ } else {
+ JS_CLEAR_WEAK_ROOTS(&cx->weakRoots);
+ rt->gcPoke = JS_TRUE;
+
+ /* Keep atoms when a suspended compile is running on another context. */
+ keepAtoms = (rt->gcKeepAtoms != 0);
+ }
+
+ /*
+ * Don't collect garbage if the runtime isn't up, and cx is not the last
+ * context in the runtime. The last context must force a GC, and nothing
+ * should suppress that final collection or there may be shutdown leaks,
+ * or runtime bloat until the next context is created.
+ */
+ if (rt->state != JSRTS_UP && gckind != GC_LAST_CONTEXT)
+ return;
+
+ restart_after_callback:
+ /*
+ * Let the API user decide to defer a GC if it wants to (unless this
+ * is the last context). Invoke the callback regardless.
+ */
+ if (rt->gcCallback &&
+ !rt->gcCallback(cx, JSGC_BEGIN) &&
+ gckind != GC_LAST_CONTEXT) {
+ return;
+ }
+
+ /* Lock out other GC allocator and collector invocations. */
+ if (gckind != GC_LAST_DITCH)
+ JS_LOCK_GC(rt);
+
+ /* Do nothing if no mutator has executed since the last GC. */
+ if (!rt->gcPoke) {
+ METER(rt->gcStats.nopoke++);
+ if (gckind != GC_LAST_DITCH)
+ JS_UNLOCK_GC(rt);
+ return;
+ }
+ METER(rt->gcStats.poke++);
+ rt->gcPoke = JS_FALSE;
+
+#ifdef JS_THREADSAFE
+ JS_ASSERT(cx->thread->id == js_CurrentThreadId());
+
+ /* Bump gcLevel and return rather than nest on this thread. */
+ if (rt->gcThread == cx->thread) {
+ JS_ASSERT(rt->gcLevel > 0);
+ rt->gcLevel++;
+ METER(if (rt->gcLevel > rt->gcStats.maxlevel)
+ rt->gcStats.maxlevel = rt->gcLevel);
+ if (gckind != GC_LAST_DITCH)
+ JS_UNLOCK_GC(rt);
+ return;
+ }
+
+ /*
+ * If we're in one or more requests (possibly on more than one context)
+ * running on the current thread, indicate, temporarily, that all these
+ * requests are inactive. If cx->thread is NULL, then cx is not using
+ * the request model, and does not contribute to rt->requestCount.
+ */
+ requestDebit = 0;
+ if (cx->thread) {
+ JSCList *head, *link;
+
+ /*
+ * Check all contexts on cx->thread->contextList for active requests,
+ * counting each such context against requestDebit.
+ */
+ head = &cx->thread->contextList;
+ for (link = head->next; link != head; link = link->next) {
+ acx = CX_FROM_THREAD_LINKS(link);
+ JS_ASSERT(acx->thread == cx->thread);
+ if (acx->requestDepth)
+ requestDebit++;
+ }
+ } else {
+ /*
+ * We assert, but check anyway, in case someone is misusing the API.
+ * Avoiding the loop over all of rt's contexts is a win in the event
+ * that the GC runs only on request-less contexts with null threads,
+ * in a special thread such as might be used by the UI/DOM/Layout
+ * "mozilla" or "main" thread in Mozilla-the-browser.
+ */
+ JS_ASSERT(cx->requestDepth == 0);
+ if (cx->requestDepth)
+ requestDebit = 1;
+ }
+ if (requestDebit) {
+ JS_ASSERT(requestDebit <= rt->requestCount);
+ rt->requestCount -= requestDebit;
+ if (rt->requestCount == 0)
+ JS_NOTIFY_REQUEST_DONE(rt);
+ }
+
+ /* If another thread is already in GC, don't attempt GC; wait instead. */
+ if (rt->gcLevel > 0) {
+ /* Bump gcLevel to restart the current GC, so it finds new garbage. */
+ rt->gcLevel++;
+ METER(if (rt->gcLevel > rt->gcStats.maxlevel)
+ rt->gcStats.maxlevel = rt->gcLevel);
+
+ /* Wait for the other thread to finish, then resume our request. */
+ while (rt->gcLevel > 0)
+ JS_AWAIT_GC_DONE(rt);
+ if (requestDebit)
+ rt->requestCount += requestDebit;
+ if (gckind != GC_LAST_DITCH)
+ JS_UNLOCK_GC(rt);
+ return;
+ }
+
+ /* No other thread is in GC, so indicate that we're now in GC. */
+ rt->gcLevel = 1;
+ rt->gcThread = cx->thread;
+
+ /* Wait for all other requests to finish. */
+ while (rt->requestCount > 0)
+ JS_AWAIT_REQUEST_DONE(rt);
+
+#else /* !JS_THREADSAFE */
+
+ /* Bump gcLevel and return rather than nest; the outer gc will restart. */
+ rt->gcLevel++;
+ METER(if (rt->gcLevel > rt->gcStats.maxlevel)
+ rt->gcStats.maxlevel = rt->gcLevel);
+ if (rt->gcLevel > 1)
+ return;
+
+#endif /* !JS_THREADSAFE */
+
+ /*
+ * Set rt->gcRunning here within the GC lock, and after waiting for any
+ * active requests to end, so that new requests that try to JS_AddRoot,
+ * JS_RemoveRoot, or JS_RemoveRootRT block in JS_BeginRequest waiting for
+ * rt->gcLevel to drop to zero, while request-less calls to the *Root*
+ * APIs block in js_AddRoot or js_RemoveRoot (see above in this file),
+ * waiting for GC to finish.
+ */
+ rt->gcRunning = JS_TRUE;
+ JS_UNLOCK_GC(rt);
+
+ /* Reset malloc counter. */
+ rt->gcMallocBytes = 0;
+
+ /* Drop atoms held by the property cache, and clear property weak links. */
+ js_DisablePropertyCache(cx);
+ js_FlushPropertyCache(cx);
+#ifdef DEBUG_scopemeters
+ { extern void js_DumpScopeMeters(JSRuntime *rt);
+ js_DumpScopeMeters(rt);
+ }
+#endif
+
+#ifdef JS_THREADSAFE
+ /*
+ * Set all thread local freelists to NULL. We may visit a thread's
+ * freelist more than once. To avoid redundant clearing we unroll the
+ * current thread's step.
+ *
+ * Also, in case a JSScript wrapped within an object was finalized, we
+ * null acx->thread->gsnCache.script and finish the cache's hashtable.
+ * Note that js_DestroyScript, called from script_finalize, will have
+ * already cleared cx->thread->gsnCache above during finalization, so we
+ * don't have to here.
+ */
+ memset(cx->thread->gcFreeLists, 0, sizeof cx->thread->gcFreeLists);
+ iter = NULL;
+ while ((acx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
+ if (!acx->thread || acx->thread == cx->thread)
+ continue;
+ memset(acx->thread->gcFreeLists, 0, sizeof acx->thread->gcFreeLists);
+ GSN_CACHE_CLEAR(&acx->thread->gsnCache);
+ }
+#else
+ /* The thread-unsafe case just has to clear the runtime's GSN cache. */
+ GSN_CACHE_CLEAR(&rt->gsnCache);
+#endif
+
+restart:
+ rt->gcNumber++;
+ JS_ASSERT(!rt->gcUnscannedArenaStackTop);
+ JS_ASSERT(rt->gcUnscannedBagSize == 0);
+
+ /*
+ * Mark phase.
+ */
+ JS_DHashTableEnumerate(&rt->gcRootsHash, gc_root_marker, cx);
+ if (rt->gcLocksHash)
+ JS_DHashTableEnumerate(rt->gcLocksHash, gc_lock_marker, cx);
+ js_MarkAtomState(&rt->atomState, keepAtoms, gc_mark_atom_key_thing, cx);
+ js_MarkWatchPoints(cx);
+ js_MarkScriptFilenames(rt, keepAtoms);
+ js_MarkNativeIteratorStates(cx);
+
+#if JS_HAS_GENERATORS
+ genTodoTail = MarkScheduledGenerators(cx);
+ JS_ASSERT(!*genTodoTail);
+#endif
+
+ iter = NULL;
+ while ((acx = js_ContextIterator(rt, JS_TRUE, &iter)) != NULL) {
+ /*
+ * Iterate frame chain and dormant chains. Temporarily tack current
+ * frame onto the head of the dormant list to ease iteration.
+ *
+ * (NB: see comment on this whole "dormant" thing in js_Execute.)
+ */
+ chain = acx->fp;
+ if (chain) {
+ JS_ASSERT(!chain->dormantNext);
+ chain->dormantNext = acx->dormantFrameChain;
+ } else {
+ chain = acx->dormantFrameChain;
+ }
+
+ for (fp = chain; fp; fp = chain = chain->dormantNext) {
+ do {
+ js_MarkStackFrame(cx, fp);
+ } while ((fp = fp->down) != NULL);
+ }
+
+ /* Cleanup temporary "dormant" linkage. */
+ if (acx->fp)
+ acx->fp->dormantNext = NULL;
+
+ /* Mark other roots-by-definition in acx. */
+ GC_MARK(cx, acx->globalObject, "global object");
+ MarkWeakRoots(cx, &acx->weakRoots);
+ if (acx->throwing) {
+ if (JSVAL_IS_GCTHING(acx->exception))
+ GC_MARK(cx, JSVAL_TO_GCTHING(acx->exception), "exception");
+ } else {
+ /* Avoid keeping GC-ed junk stored in JSContext.exception. */
+ acx->exception = JSVAL_NULL;
+ }
+#if JS_HAS_LVALUE_RETURN
+ if (acx->rval2set && JSVAL_IS_GCTHING(acx->rval2))
+ GC_MARK(cx, JSVAL_TO_GCTHING(acx->rval2), "rval2");
+#endif
+
+ for (sh = acx->stackHeaders; sh; sh = sh->down) {
+ METER(rt->gcStats.stackseg++);
+ METER(rt->gcStats.segslots += sh->nslots);
+ GC_MARK_JSVALS(cx, sh->nslots, JS_STACK_SEGMENT(sh), "stack");
+ }
+
+ if (acx->localRootStack)
+ js_MarkLocalRoots(cx, acx->localRootStack);
+
+ for (tvr = acx->tempValueRooters; tvr; tvr = tvr->down) {
+ switch (tvr->count) {
+ case JSTVU_SINGLE:
+ if (JSVAL_IS_GCTHING(tvr->u.value)) {
+ GC_MARK(cx, JSVAL_TO_GCTHING(tvr->u.value),
+ "tvr->u.value");
+ }
+ break;
+ case JSTVU_MARKER:
+ tvr->u.marker(cx, tvr);
+ break;
+ case JSTVU_SPROP:
+ MARK_SCOPE_PROPERTY(cx, tvr->u.sprop);
+ break;
+ case JSTVU_WEAK_ROOTS:
+ MarkWeakRoots(cx, tvr->u.weakRoots);
+ break;
+ default:
+ JS_ASSERT(tvr->count >= 0);
+ GC_MARK_JSVALS(cx, tvr->count, tvr->u.array, "tvr->u.array");
+ }
+ }
+
+ if (acx->sharpObjectMap.depth > 0)
+ js_GCMarkSharpMap(cx, &acx->sharpObjectMap);
+ }
+
+#ifdef DUMP_CALL_TABLE
+ js_DumpCallTable(cx);
+#endif
+
+ /*
+ * Mark children of things that caused too deep recursion during above
+ * marking phase.
+ */
+ ScanDelayedChildren(cx);
+
+#if JS_HAS_GENERATORS
+ /*
+ * Close phase: search and mark part. See comments in
+ * FindAndMarkObjectsToClose for details.
+ */
+ FindAndMarkObjectsToClose(cx, gckind, genTodoTail);
+
+ /*
+ * Mark children of things that caused too deep recursion during the
+ * just-completed marking part of the close phase.
+ */
+ ScanDelayedChildren(cx);
+#endif
+
+ JS_ASSERT(!cx->insideGCMarkCallback);
+ if (rt->gcCallback) {
+ cx->insideGCMarkCallback = JS_TRUE;
+ (void) rt->gcCallback(cx, JSGC_MARK_END);
+ JS_ASSERT(cx->insideGCMarkCallback);
+ cx->insideGCMarkCallback = JS_FALSE;
+ }
+ JS_ASSERT(rt->gcUnscannedBagSize == 0);
+
+ /* Finalize iterator states before the objects they iterate over. */
+ CloseIteratorStates(cx);
+
+ /*
+ * Sweep phase.
+ *
+ * Finalize as we sweep, outside of rt->gcLock but with rt->gcRunning set
+ * so that any attempt to allocate a GC-thing from a finalizer will fail,
+ * rather than nest badly and leave the unmarked newborn to be swept.
+ *
+ * Finalize smaller objects before larger, to guarantee finalization of
+ * GC-allocated obj->slots after obj. See FreeSlots in jsobj.c.
+ */
+ for (i = 0; i < GC_NUM_FREELISTS; i++) {
+ arenaList = &rt->gcArenaList[i];
+ nbytes = GC_FREELIST_NBYTES(i);
+ limit = arenaList->lastLimit;
+ for (a = arenaList->last; a; a = a->prev) {
+ JS_ASSERT(!a->prevUnscanned);
+ JS_ASSERT(a->unscannedPages == 0);
+ firstPage = (uint8 *) FIRST_THING_PAGE(a);
+ for (offset = 0; offset != limit; offset += nbytes) {
+ if ((offset & GC_PAGE_MASK) == 0) {
+ JS_ASSERT(((JSGCPageInfo *)(firstPage + offset))->
+ unscannedBitmap == 0);
+ offset += PAGE_THING_GAP(nbytes);
+ }
+ JS_ASSERT(offset < limit);
+ flagp = a->base + offset / sizeof(JSGCThing);
+ if (flagp >= firstPage)
+ flagp += GC_THINGS_SIZE;
+ flags = *flagp;
+ if (flags & GCF_MARK) {
+ *flagp &= ~GCF_MARK;
+ } else if (!(flags & (GCF_LOCK | GCF_FINAL))) {
+ /* Call the finalizer with GCF_FINAL ORed into flags. */
+ type = flags & GCF_TYPEMASK;
+ finalizer = gc_finalizers[type];
+ if (finalizer) {
+ thing = (JSGCThing *)(firstPage + offset);
+ *flagp = (uint8)(flags | GCF_FINAL);
+ if (type >= GCX_EXTERNAL_STRING)
+ js_PurgeDeflatedStringCache(rt, (JSString *)thing);
+ finalizer(cx, thing);
+ }
+
+ /* Set flags to GCF_FINAL, signifying that thing is free. */
+ *flagp = GCF_FINAL;
+ }
+ }
+ limit = GC_THINGS_SIZE;
+ }
+ }
+
+ /*
+ * Sweep the runtime's property tree after finalizing objects, in case any
+ * had watchpoints referencing tree nodes. Then sweep atoms, which may be
+ * referenced from dead property ids.
+ */
+ js_SweepScopeProperties(rt);
+ js_SweepAtomState(&rt->atomState);
+
+ /*
+ * Sweep script filenames after sweeping functions in the generic loop
+ * above. In this way when a scripted function's finalizer destroys the
+ * script and calls rt->destroyScriptHook, the hook can still access the
+ * script's filename. See bug 323267.
+ */
+ js_SweepScriptFilenames(rt);
+
+ /*
+ * Free phase.
+ * Free any unused arenas and rebuild the JSGCThing freelist.
+ */
+ for (i = 0; i < GC_NUM_FREELISTS; i++) {
+ arenaList = &rt->gcArenaList[i];
+ ap = &arenaList->last;
+ a = *ap;
+ if (!a)
+ continue;
+
+ allClear = JS_TRUE;
+ arenaList->freeList = NULL;
+ freeList = NULL;
+ METER(arenaList->stats.nthings = 0);
+ METER(arenaList->stats.freelen = 0);
+
+ nbytes = GC_FREELIST_NBYTES(i);
+ limit = arenaList->lastLimit;
+ do {
+ METER(size_t nfree = 0);
+ firstPage = (uint8 *) FIRST_THING_PAGE(a);
+ for (offset = 0; offset != limit; offset += nbytes) {
+ if ((offset & GC_PAGE_MASK) == 0)
+ offset += PAGE_THING_GAP(nbytes);
+ JS_ASSERT(offset < limit);
+ flagp = a->base + offset / sizeof(JSGCThing);
+ if (flagp >= firstPage)
+ flagp += GC_THINGS_SIZE;
+
+ if (*flagp != GCF_FINAL) {
+ allClear = JS_FALSE;
+ METER(++arenaList->stats.nthings);
+ } else {
+ thing = (JSGCThing *)(firstPage + offset);
+ thing->flagp = flagp;
+ thing->next = freeList;
+ freeList = thing;
+ METER(++nfree);
+ }
+ }
+ if (allClear) {
+ /*
+ * Forget just assembled free list head for the arena
+ * and destroy the arena itself.
+ */
+ freeList = arenaList->freeList;
+ DestroyGCArena(rt, arenaList, ap);
+ } else {
+ allClear = JS_TRUE;
+ arenaList->freeList = freeList;
+ ap = &a->prev;
+ METER(arenaList->stats.freelen += nfree);
+ METER(arenaList->stats.totalfreelen += nfree);
+ METER(++arenaList->stats.totalarenas);
+ }
+ limit = GC_THINGS_SIZE;
+ } while ((a = *ap) != NULL);
+ }
+
+ if (rt->gcCallback)
+ (void) rt->gcCallback(cx, JSGC_FINALIZE_END);
+#ifdef DEBUG_srcnotesize
+ { extern void DumpSrcNoteSizeHist();
+ DumpSrcNoteSizeHist();
+ printf("GC HEAP SIZE %lu (%lu)\n",
+ (unsigned long)rt->gcBytes, (unsigned long)rt->gcPrivateBytes);
+ }
+#endif
+
+ JS_LOCK_GC(rt);
+
+ /*
+ * We want to restart GC if js_GC was called recursively or if any of the
+ * finalizers called js_RemoveRoot or js_UnlockGCThingRT.
+ */
+ if (rt->gcLevel > 1 || rt->gcPoke) {
+ rt->gcLevel = 1;
+ rt->gcPoke = JS_FALSE;
+ JS_UNLOCK_GC(rt);
+ goto restart;
+ }
+ js_EnablePropertyCache(cx);
+ rt->gcLevel = 0;
+ rt->gcLastBytes = rt->gcBytes;
+ rt->gcRunning = JS_FALSE;
+
+#ifdef JS_THREADSAFE
+ /* If we were invoked during a request, pay back the temporary debit. */
+ if (requestDebit)
+ rt->requestCount += requestDebit;
+ rt->gcThread = NULL;
+ JS_NOTIFY_GC_DONE(rt);
+
+ /*
+ * Unlock unless we have GC_LAST_DITCH which requires locked GC on return.
+ */
+ if (gckind != GC_LAST_DITCH)
+ JS_UNLOCK_GC(rt);
+#endif
+
+ /* Execute JSGC_END callback outside the lock. */
+ if (rt->gcCallback) {
+ JSWeakRoots savedWeakRoots;
+ JSTempValueRooter tvr;
+
+ if (gckind == GC_LAST_DITCH) {
+ /*
+ * We allow JSGC_END implementation to force a full GC or allocate
+ * new GC things. Thus we must protect the weak roots from GC or
+ * overwrites.
+ */
+ savedWeakRoots = cx->weakRoots;
+ JS_PUSH_TEMP_ROOT_WEAK_COPY(cx, &savedWeakRoots, &tvr);
+ JS_KEEP_ATOMS(rt);
+ JS_UNLOCK_GC(rt);
+ }
+
+ (void) rt->gcCallback(cx, JSGC_END);
+
+ if (gckind == GC_LAST_DITCH) {
+ JS_LOCK_GC(rt);
+ JS_UNKEEP_ATOMS(rt);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ } else if (gckind == GC_LAST_CONTEXT && rt->gcPoke) {
+ /*
+ * On shutdown iterate until JSGC_END callback stops creating
+ * garbage.
+ */
+ goto restart_after_callback;
+ }
+ }
+}
+
+void
+js_UpdateMallocCounter(JSContext *cx, size_t nbytes)
+{
+ uint32 *pbytes, bytes;
+
+#ifdef JS_THREADSAFE
+ pbytes = &cx->thread->gcMallocBytes;
+#else
+ pbytes = &cx->runtime->gcMallocBytes;
+#endif
+ bytes = *pbytes;
+ *pbytes = ((uint32)-1 - bytes <= nbytes) ? (uint32)-1 : bytes + nbytes;
+}
diff --git a/src/third_party/js-1.7/jsgc.h b/src/third_party/js-1.7/jsgc.h
new file mode 100644
index 00000000000..ec623a1a331
--- /dev/null
+++ b/src/third_party/js-1.7/jsgc.h
@@ -0,0 +1,368 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsgc_h___
+#define jsgc_h___
+/*
+ * JS Garbage Collector.
+ */
+#include "jsprvtd.h"
+#include "jspubtd.h"
+#include "jsdhash.h"
+#include "jsutil.h"
+
+JS_BEGIN_EXTERN_C
+
+/* GC thing type indexes. */
+#define GCX_OBJECT 0 /* JSObject */
+#define GCX_STRING 1 /* JSString */
+#define GCX_DOUBLE 2 /* jsdouble */
+#define GCX_MUTABLE_STRING 3 /* JSString that's mutable --
+ single-threaded only! */
+#define GCX_PRIVATE 4 /* private (unscanned) data */
+#define GCX_NAMESPACE 5 /* JSXMLNamespace */
+#define GCX_QNAME 6 /* JSXMLQName */
+#define GCX_XML 7 /* JSXML */
+#define GCX_EXTERNAL_STRING 8 /* JSString w/ external chars */
+
+#define GCX_NTYPES_LOG2 4 /* type index bits */
+#define GCX_NTYPES JS_BIT(GCX_NTYPES_LOG2)
+
+/* GC flag definitions, must fit in 8 bits (type index goes in the low bits). */
+#define GCF_TYPEMASK JS_BITMASK(GCX_NTYPES_LOG2)
+#define GCF_MARK JS_BIT(GCX_NTYPES_LOG2)
+#define GCF_FINAL JS_BIT(GCX_NTYPES_LOG2 + 1)
+#define GCF_SYSTEM JS_BIT(GCX_NTYPES_LOG2 + 2)
+#define GCF_LOCKSHIFT (GCX_NTYPES_LOG2 + 3) /* lock bit shift */
+#define GCF_LOCK JS_BIT(GCF_LOCKSHIFT) /* lock request bit in API */
+
+/* Pseudo-flag that modifies GCX_STRING to make GCX_MUTABLE_STRING. */
+#define GCF_MUTABLE 2
+
+#if (GCX_STRING | GCF_MUTABLE) != GCX_MUTABLE_STRING
+# error "mutable string type index botch!"
+#endif
+
+extern uint8 *
+js_GetGCThingFlags(void *thing);
+
+/*
+ * The sole purpose of the function is to preserve public API compatibility
+ * in JS_GetStringBytes which takes only single JSString* argument.
+ */
+JSRuntime*
+js_GetGCStringRuntime(JSString *str);
+
+#if 1
+/*
+ * Since we're forcing a GC from JS_GC anyway, don't bother wasting cycles
+ * loading oldval. XXX remove implied force, fix jsinterp.c's "second arg
+ * ignored", etc.
+ */
+#define GC_POKE(cx, oldval) ((cx)->runtime->gcPoke = JS_TRUE)
+#else
+#define GC_POKE(cx, oldval) ((cx)->runtime->gcPoke = JSVAL_IS_GCTHING(oldval))
+#endif
+
+extern intN
+js_ChangeExternalStringFinalizer(JSStringFinalizeOp oldop,
+ JSStringFinalizeOp newop);
+
+extern JSBool
+js_InitGC(JSRuntime *rt, uint32 maxbytes);
+
+extern void
+js_FinishGC(JSRuntime *rt);
+
+extern JSBool
+js_AddRoot(JSContext *cx, void *rp, const char *name);
+
+extern JSBool
+js_AddRootRT(JSRuntime *rt, void *rp, const char *name);
+
+extern JSBool
+js_RemoveRoot(JSRuntime *rt, void *rp);
+
+#ifdef DEBUG
+extern void
+js_DumpNamedRoots(JSRuntime *rt,
+ void (*dump)(const char *name, void *rp, void *data),
+ void *data);
+#endif
+
+extern uint32
+js_MapGCRoots(JSRuntime *rt, JSGCRootMapFun map, void *data);
+
+/* Table of pointers with count valid members. */
+typedef struct JSPtrTable {
+ size_t count;
+ void **array;
+} JSPtrTable;
+
+extern JSBool
+js_RegisterCloseableIterator(JSContext *cx, JSObject *obj);
+
+#if JS_HAS_GENERATORS
+
+/*
+ * Runtime state to support generators' close hooks.
+ */
+typedef struct JSGCCloseState {
+ /*
+ * Singly linked list of generators that are reachable from GC roots or
+ * were created after the last GC.
+ */
+ JSGenerator *reachableList;
+
+ /*
+ * Head of the queue of generators that have already become unreachable but
+ * whose close hooks are not yet run.
+ */
+ JSGenerator *todoQueue;
+
+#ifndef JS_THREADSAFE
+ /*
+ * Flag indicating that the current thread is excuting a close hook for
+ * single thread case.
+ */
+ JSBool runningCloseHook;
+#endif
+} JSGCCloseState;
+
+extern void
+js_RegisterGenerator(JSContext *cx, JSGenerator *gen);
+
+extern JSBool
+js_RunCloseHooks(JSContext *cx);
+
+#endif
+
+/*
+ * The private JSGCThing struct, which describes a gcFreeList element.
+ */
+struct JSGCThing {
+ JSGCThing *next;
+ uint8 *flagp;
+};
+
+#define GC_NBYTES_MAX (10 * sizeof(JSGCThing))
+#define GC_NUM_FREELISTS (GC_NBYTES_MAX / sizeof(JSGCThing))
+#define GC_FREELIST_NBYTES(i) (((i) + 1) * sizeof(JSGCThing))
+#define GC_FREELIST_INDEX(n) (((n) / sizeof(JSGCThing)) - 1)
+
+extern void *
+js_NewGCThing(JSContext *cx, uintN flags, size_t nbytes);
+
+extern JSBool
+js_LockGCThing(JSContext *cx, void *thing);
+
+extern JSBool
+js_LockGCThingRT(JSRuntime *rt, void *thing);
+
+extern JSBool
+js_UnlockGCThingRT(JSRuntime *rt, void *thing);
+
+extern JSBool
+js_IsAboutToBeFinalized(JSContext *cx, void *thing);
+
+extern void
+js_MarkAtom(JSContext *cx, JSAtom *atom);
+
+/* We avoid a large number of unnecessary calls by doing the flag check first */
+#define GC_MARK_ATOM(cx, atom) \
+ JS_BEGIN_MACRO \
+ if (!((atom)->flags & ATOM_MARK)) \
+ js_MarkAtom(cx, atom); \
+ JS_END_MACRO
+
+/*
+ * Always use GC_MARK macro and never call js_MarkGCThing directly so
+ * when GC_MARK_DEBUG is defined the dump of live GC things does not miss
+ * a thing.
+ */
+extern void
+js_MarkGCThing(JSContext *cx, void *thing);
+
+#ifdef GC_MARK_DEBUG
+
+# define GC_MARK(cx, thing, name) js_MarkNamedGCThing(cx, thing, name)
+
+extern void
+js_MarkNamedGCThing(JSContext *cx, void *thing, const char *name);
+
+extern JS_FRIEND_DATA(FILE *) js_DumpGCHeap;
+JS_EXTERN_DATA(void *) js_LiveThingToFind;
+
+#else
+
+# define GC_MARK(cx, thing, name) js_MarkGCThing(cx, thing)
+
+#endif
+
+extern void
+js_MarkStackFrame(JSContext *cx, JSStackFrame *fp);
+
+/*
+ * Kinds of js_GC invocation.
+ */
+typedef enum JSGCInvocationKind {
+ /* Normal invocation. */
+ GC_NORMAL,
+
+ /*
+ * Called from js_DestroyContext for last JSContext in a JSRuntime, when
+ * it is imperative that rt->gcPoke gets cleared early in js_GC.
+ */
+ GC_LAST_CONTEXT,
+
+ /*
+ * Called from js_NewGCThing as a last-ditch GC attempt. See comments
+ * before js_GC definition for details.
+ */
+ GC_LAST_DITCH
+} JSGCInvocationKind;
+
+extern void
+js_GC(JSContext *cx, JSGCInvocationKind gckind);
+
+/* Call this after succesful malloc of memory for GC-related things. */
+extern void
+js_UpdateMallocCounter(JSContext *cx, size_t nbytes);
+
+#ifdef DEBUG_notme
+#define JS_GCMETER 1
+#endif
+
+#ifdef JS_GCMETER
+
+typedef struct JSGCStats {
+#ifdef JS_THREADSAFE
+ uint32 localalloc; /* number of succeeded allocations from local lists */
+#endif
+ uint32 alloc; /* number of allocation attempts */
+ uint32 retry; /* allocation attempt retries after running the GC */
+ uint32 retryhalt; /* allocation retries halted by the branch callback */
+ uint32 fail; /* allocation failures */
+ uint32 finalfail; /* finalizer calls allocator failures */
+ uint32 lockborn; /* things born locked */
+ uint32 lock; /* valid lock calls */
+ uint32 unlock; /* valid unlock calls */
+ uint32 depth; /* mark tail recursion depth */
+ uint32 maxdepth; /* maximum mark tail recursion depth */
+ uint32 cdepth; /* mark recursion depth of C functions */
+ uint32 maxcdepth; /* maximum mark recursion depth of C functions */
+ uint32 unscanned; /* mark C stack overflows or number of times
+ GC things were put in unscanned bag */
+#ifdef DEBUG
+ uint32 maxunscanned; /* maximum size of unscanned bag */
+#endif
+ uint32 maxlevel; /* maximum GC nesting (indirect recursion) level */
+ uint32 poke; /* number of potentially useful GC calls */
+ uint32 nopoke; /* useless GC calls where js_PokeGC was not set */
+ uint32 afree; /* thing arenas freed so far */
+ uint32 stackseg; /* total extraordinary stack segments scanned */
+ uint32 segslots; /* total stack segment jsval slots scanned */
+ uint32 nclose; /* number of objects with close hooks */
+ uint32 maxnclose; /* max number of objects with close hooks */
+ uint32 closelater; /* number of close hooks scheduled to run */
+ uint32 maxcloselater; /* max number of close hooks scheduled to run */
+} JSGCStats;
+
+extern JS_FRIEND_API(void)
+js_DumpGCStats(JSRuntime *rt, FILE *fp);
+
+#endif /* JS_GCMETER */
+
+typedef struct JSGCArena JSGCArena;
+typedef struct JSGCArenaList JSGCArenaList;
+
+#ifdef JS_GCMETER
+typedef struct JSGCArenaStats JSGCArenaStats;
+
+struct JSGCArenaStats {
+ uint32 narenas; /* number of arena in list */
+ uint32 maxarenas; /* maximun number of allocated arenas */
+ uint32 nthings; /* number of allocates JSGCThing */
+ uint32 maxthings; /* maximum number number of allocates JSGCThing */
+ uint32 totalnew; /* number of succeeded calls to js_NewGCThing */
+ uint32 freelen; /* freeList lengths */
+ uint32 recycle; /* number of things recycled through freeList */
+ uint32 totalarenas; /* total number of arenas with live things that
+ GC scanned so far */
+ uint32 totalfreelen; /* total number of things that GC put to free
+ list so far */
+};
+#endif
+
+struct JSGCArenaList {
+ JSGCArena *last; /* last allocated GC arena */
+ uint16 lastLimit; /* end offset of allocated so far things in
+ the last arena */
+ uint16 thingSize; /* size of things to allocate on this list */
+ JSGCThing *freeList; /* list of free GC things */
+#ifdef JS_GCMETER
+ JSGCArenaStats stats;
+#endif
+};
+
+typedef struct JSWeakRoots {
+ /* Most recently created things by type, members of the GC's root set. */
+ JSGCThing *newborn[GCX_NTYPES];
+
+ /* Atom root for the last-looked-up atom on this context. */
+ JSAtom *lastAtom;
+
+ /* Root for the result of the most recent js_InternalInvoke call. */
+ jsval lastInternalResult;
+} JSWeakRoots;
+
+JS_STATIC_ASSERT(JSVAL_NULL == 0);
+#define JS_CLEAR_WEAK_ROOTS(wr) (memset((wr), 0, sizeof(JSWeakRoots)))
+
+#ifdef DEBUG_notme
+#define TOO_MUCH_GC 1
+#endif
+
+#ifdef WAY_TOO_MUCH_GC
+#define TOO_MUCH_GC 1
+#endif
+
+JS_END_EXTERN_C
+
+#endif /* jsgc_h___ */
diff --git a/src/third_party/js-1.7/jshash.c b/src/third_party/js-1.7/jshash.c
new file mode 100644
index 00000000000..8e25517d5c0
--- /dev/null
+++ b/src/third_party/js-1.7/jshash.c
@@ -0,0 +1,483 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * PR hash table package.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsbit.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jshash.h" /* Added by JSIFY */
+
+/* Compute the number of buckets in ht */
+#define NBUCKETS(ht) JS_BIT(JS_HASH_BITS - (ht)->shift)
+
+/* The smallest table has 16 buckets */
+#define MINBUCKETSLOG2 4
+#define MINBUCKETS JS_BIT(MINBUCKETSLOG2)
+
+/* Compute the maximum entries given n buckets that we will tolerate, ~90% */
+#define OVERLOADED(n) ((n) - ((n) >> 3))
+
+/* Compute the number of entries below which we shrink the table by half */
+#define UNDERLOADED(n) (((n) > MINBUCKETS) ? ((n) >> 2) : 0)
+
+/*
+** Stubs for default hash allocator ops.
+*/
+static void *
+DefaultAllocTable(void *pool, size_t size)
+{
+ return malloc(size);
+}
+
+static void
+DefaultFreeTable(void *pool, void *item)
+{
+ free(item);
+}
+
+static JSHashEntry *
+DefaultAllocEntry(void *pool, const void *key)
+{
+ return (JSHashEntry*) malloc(sizeof(JSHashEntry));
+}
+
+static void
+DefaultFreeEntry(void *pool, JSHashEntry *he, uintN flag)
+{
+ if (flag == HT_FREE_ENTRY)
+ free(he);
+}
+
+static JSHashAllocOps defaultHashAllocOps = {
+ DefaultAllocTable, DefaultFreeTable,
+ DefaultAllocEntry, DefaultFreeEntry
+};
+
+JS_PUBLIC_API(JSHashTable *)
+JS_NewHashTable(uint32 n, JSHashFunction keyHash,
+ JSHashComparator keyCompare, JSHashComparator valueCompare,
+ JSHashAllocOps *allocOps, void *allocPriv)
+{
+ JSHashTable *ht;
+ size_t nb;
+
+ if (n <= MINBUCKETS) {
+ n = MINBUCKETSLOG2;
+ } else {
+ n = JS_CeilingLog2(n);
+ if ((int32)n < 0)
+ return NULL;
+ }
+
+ if (!allocOps) allocOps = &defaultHashAllocOps;
+
+ ht = (JSHashTable*) allocOps->allocTable(allocPriv, sizeof *ht);
+ if (!ht)
+ return NULL;
+ memset(ht, 0, sizeof *ht);
+ ht->shift = JS_HASH_BITS - n;
+ n = JS_BIT(n);
+ nb = n * sizeof(JSHashEntry *);
+ ht->buckets = (JSHashEntry**) allocOps->allocTable(allocPriv, nb);
+ if (!ht->buckets) {
+ allocOps->freeTable(allocPriv, ht);
+ return NULL;
+ }
+ memset(ht->buckets, 0, nb);
+
+ ht->keyHash = keyHash;
+ ht->keyCompare = keyCompare;
+ ht->valueCompare = valueCompare;
+ ht->allocOps = allocOps;
+ ht->allocPriv = allocPriv;
+ return ht;
+}
+
+JS_PUBLIC_API(void)
+JS_HashTableDestroy(JSHashTable *ht)
+{
+ uint32 i, n;
+ JSHashEntry *he, **hep;
+ JSHashAllocOps *allocOps = ht->allocOps;
+ void *allocPriv = ht->allocPriv;
+
+ n = NBUCKETS(ht);
+ for (i = 0; i < n; i++) {
+ hep = &ht->buckets[i];
+ while ((he = *hep) != NULL) {
+ *hep = he->next;
+ allocOps->freeEntry(allocPriv, he, HT_FREE_ENTRY);
+ }
+ }
+#ifdef DEBUG
+ memset(ht->buckets, 0xDB, n * sizeof ht->buckets[0]);
+#endif
+ allocOps->freeTable(allocPriv, ht->buckets);
+#ifdef DEBUG
+ memset(ht, 0xDB, sizeof *ht);
+#endif
+ allocOps->freeTable(allocPriv, ht);
+}
+
+/*
+ * Multiplicative hash, from Knuth 6.4.
+ */
+#define BUCKET_HEAD(ht, keyHash) \
+ (&(ht)->buckets[((keyHash) * JS_GOLDEN_RATIO) >> (ht)->shift])
+
+JS_PUBLIC_API(JSHashEntry **)
+JS_HashTableRawLookup(JSHashTable *ht, JSHashNumber keyHash, const void *key)
+{
+ JSHashEntry *he, **hep, **hep0;
+
+#ifdef HASHMETER
+ ht->nlookups++;
+#endif
+ hep = hep0 = BUCKET_HEAD(ht, keyHash);
+ while ((he = *hep) != NULL) {
+ if (he->keyHash == keyHash && ht->keyCompare(key, he->key)) {
+ /* Move to front of chain if not already there */
+ if (hep != hep0) {
+ *hep = he->next;
+ he->next = *hep0;
+ *hep0 = he;
+ }
+ return hep0;
+ }
+ hep = &he->next;
+#ifdef HASHMETER
+ ht->nsteps++;
+#endif
+ }
+ return hep;
+}
+
+static JSBool
+Resize(JSHashTable *ht, uint32 newshift)
+{
+ size_t nb, nentries, i;
+ JSHashEntry **oldbuckets, *he, *next, **hep;
+#ifdef DEBUG
+ size_t nold = NBUCKETS(ht);
+#endif
+
+ JS_ASSERT(newshift < JS_HASH_BITS);
+
+ nb = (size_t)1 << (JS_HASH_BITS - newshift);
+
+ /* Integer overflow protection. */
+ if (nb > (size_t)-1 / sizeof(JSHashEntry*))
+ return JS_FALSE;
+ nb *= sizeof(JSHashEntry*);
+
+ oldbuckets = ht->buckets;
+ ht->buckets = (JSHashEntry**)ht->allocOps->allocTable(ht->allocPriv, nb);
+ if (!ht->buckets) {
+ ht->buckets = oldbuckets;
+ return JS_FALSE;
+ }
+ memset(ht->buckets, 0, nb);
+
+ ht->shift = newshift;
+ nentries = ht->nentries;
+
+ for (i = 0; nentries != 0; i++) {
+ for (he = oldbuckets[i]; he; he = next) {
+ JS_ASSERT(nentries != 0);
+ --nentries;
+ next = he->next;
+ hep = BUCKET_HEAD(ht, he->keyHash);
+
+ /*
+ * Since he comes from the old table, it must be unique and we
+ * simply add it to the head of bucket chain without chain lookup.
+ */
+ he->next = *hep;
+ *hep = he;
+ }
+ }
+#ifdef DEBUG
+ memset(oldbuckets, 0xDB, nold * sizeof oldbuckets[0]);
+#endif
+ ht->allocOps->freeTable(ht->allocPriv, oldbuckets);
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSHashEntry *)
+JS_HashTableRawAdd(JSHashTable *ht, JSHashEntry **hep,
+ JSHashNumber keyHash, const void *key, void *value)
+{
+ uint32 n;
+ JSHashEntry *he;
+
+ /* Grow the table if it is overloaded */
+ n = NBUCKETS(ht);
+ if (ht->nentries >= OVERLOADED(n)) {
+ if (!Resize(ht, ht->shift - 1))
+ return NULL;
+#ifdef HASHMETER
+ ht->ngrows++;
+#endif
+ hep = JS_HashTableRawLookup(ht, keyHash, key);
+ }
+
+ /* Make a new key value entry */
+ he = ht->allocOps->allocEntry(ht->allocPriv, key);
+ if (!he)
+ return NULL;
+ he->keyHash = keyHash;
+ he->key = key;
+ he->value = value;
+ he->next = *hep;
+ *hep = he;
+ ht->nentries++;
+ return he;
+}
+
+JS_PUBLIC_API(JSHashEntry *)
+JS_HashTableAdd(JSHashTable *ht, const void *key, void *value)
+{
+ JSHashNumber keyHash;
+ JSHashEntry *he, **hep;
+
+ keyHash = ht->keyHash(key);
+ hep = JS_HashTableRawLookup(ht, keyHash, key);
+ if ((he = *hep) != NULL) {
+ /* Hit; see if values match */
+ if (ht->valueCompare(he->value, value)) {
+ /* key,value pair is already present in table */
+ return he;
+ }
+ if (he->value)
+ ht->allocOps->freeEntry(ht->allocPriv, he, HT_FREE_VALUE);
+ he->value = value;
+ return he;
+ }
+ return JS_HashTableRawAdd(ht, hep, keyHash, key, value);
+}
+
+JS_PUBLIC_API(void)
+JS_HashTableRawRemove(JSHashTable *ht, JSHashEntry **hep, JSHashEntry *he)
+{
+ uint32 n;
+
+ *hep = he->next;
+ ht->allocOps->freeEntry(ht->allocPriv, he, HT_FREE_ENTRY);
+
+ /* Shrink table if it's underloaded */
+ n = NBUCKETS(ht);
+ if (--ht->nentries < UNDERLOADED(n)) {
+ Resize(ht, ht->shift + 1);
+#ifdef HASHMETER
+ ht->nshrinks++;
+#endif
+ }
+}
+
+JS_PUBLIC_API(JSBool)
+JS_HashTableRemove(JSHashTable *ht, const void *key)
+{
+ JSHashNumber keyHash;
+ JSHashEntry *he, **hep;
+
+ keyHash = ht->keyHash(key);
+ hep = JS_HashTableRawLookup(ht, keyHash, key);
+ if ((he = *hep) == NULL)
+ return JS_FALSE;
+
+ /* Hit; remove element */
+ JS_HashTableRawRemove(ht, hep, he);
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(void *)
+JS_HashTableLookup(JSHashTable *ht, const void *key)
+{
+ JSHashNumber keyHash;
+ JSHashEntry *he, **hep;
+
+ keyHash = ht->keyHash(key);
+ hep = JS_HashTableRawLookup(ht, keyHash, key);
+ if ((he = *hep) != NULL) {
+ return he->value;
+ }
+ return NULL;
+}
+
+/*
+** Iterate over the entries in the hash table calling func for each
+** entry found. Stop if "f" says to (return value & JS_ENUMERATE_STOP).
+** Return a count of the number of elements scanned.
+*/
+JS_PUBLIC_API(int)
+JS_HashTableEnumerateEntries(JSHashTable *ht, JSHashEnumerator f, void *arg)
+{
+ JSHashEntry *he, **hep, **bucket;
+ uint32 nlimit, n, nbuckets, newlog2;
+ int rv;
+
+ nlimit = ht->nentries;
+ n = 0;
+ for (bucket = ht->buckets; n != nlimit; ++bucket) {
+ hep = bucket;
+ while ((he = *hep) != NULL) {
+ JS_ASSERT(n < nlimit);
+ rv = f(he, n, arg);
+ n++;
+ if (rv & HT_ENUMERATE_REMOVE) {
+ *hep = he->next;
+ ht->allocOps->freeEntry(ht->allocPriv, he, HT_FREE_ENTRY);
+ --ht->nentries;
+ } else {
+ hep = &he->next;
+ }
+ if (rv & HT_ENUMERATE_STOP) {
+ goto out;
+ }
+ }
+ }
+
+out:
+ /* Shrink table if removal of entries made it underloaded */
+ if (ht->nentries != nlimit) {
+ JS_ASSERT(ht->nentries < nlimit);
+ nbuckets = NBUCKETS(ht);
+ if (MINBUCKETS < nbuckets && ht->nentries < UNDERLOADED(nbuckets)) {
+ newlog2 = JS_CeilingLog2(ht->nentries);
+ if (newlog2 < MINBUCKETSLOG2)
+ newlog2 = MINBUCKETSLOG2;
+
+ /* Check that we really shrink the table. */
+ JS_ASSERT(JS_HASH_BITS - ht->shift > newlog2);
+ Resize(ht, JS_HASH_BITS - newlog2);
+ }
+ }
+ return (int)n;
+}
+
+#ifdef HASHMETER
+#include <math.h>
+#include <stdio.h>
+
+JS_PUBLIC_API(void)
+JS_HashTableDumpMeter(JSHashTable *ht, JSHashEnumerator dump, FILE *fp)
+{
+ double sqsum, mean, variance, sigma;
+ uint32 nchains, nbuckets, nentries;
+ uint32 i, n, maxChain, maxChainLen;
+ JSHashEntry *he;
+
+ sqsum = 0;
+ nchains = 0;
+ maxChainLen = 0;
+ nbuckets = NBUCKETS(ht);
+ for (i = 0; i < nbuckets; i++) {
+ he = ht->buckets[i];
+ if (!he)
+ continue;
+ nchains++;
+ for (n = 0; he; he = he->next)
+ n++;
+ sqsum += n * n;
+ if (n > maxChainLen) {
+ maxChainLen = n;
+ maxChain = i;
+ }
+ }
+ nentries = ht->nentries;
+ mean = (double)nentries / nchains;
+ variance = nchains * sqsum - nentries * nentries;
+ if (variance < 0 || nchains == 1)
+ variance = 0;
+ else
+ variance /= nchains * (nchains - 1);
+ sigma = sqrt(variance);
+
+ fprintf(fp, "\nHash table statistics:\n");
+ fprintf(fp, " number of lookups: %u\n", ht->nlookups);
+ fprintf(fp, " number of entries: %u\n", ht->nentries);
+ fprintf(fp, " number of grows: %u\n", ht->ngrows);
+ fprintf(fp, " number of shrinks: %u\n", ht->nshrinks);
+ fprintf(fp, " mean steps per hash: %g\n", (double)ht->nsteps
+ / ht->nlookups);
+ fprintf(fp, "mean hash chain length: %g\n", mean);
+ fprintf(fp, " standard deviation: %g\n", sigma);
+ fprintf(fp, " max hash chain length: %u\n", maxChainLen);
+ fprintf(fp, " max hash chain: [%u]\n", maxChain);
+
+ for (he = ht->buckets[maxChain], i = 0; he; he = he->next, i++)
+ if (dump(he, i, fp) != HT_ENUMERATE_NEXT)
+ break;
+}
+#endif /* HASHMETER */
+
+JS_PUBLIC_API(int)
+JS_HashTableDump(JSHashTable *ht, JSHashEnumerator dump, FILE *fp)
+{
+ int count;
+
+ count = JS_HashTableEnumerateEntries(ht, dump, fp);
+#ifdef HASHMETER
+ JS_HashTableDumpMeter(ht, dump, fp);
+#endif
+ return count;
+}
+
+JS_PUBLIC_API(JSHashNumber)
+JS_HashString(const void *key)
+{
+ JSHashNumber h;
+ const unsigned char *s;
+
+ h = 0;
+ for (s = (const unsigned char *)key; *s; s++)
+ h = (h >> (JS_HASH_BITS - 4)) ^ (h << 4) ^ *s;
+ return h;
+}
+
+JS_PUBLIC_API(int)
+JS_CompareValues(const void *v1, const void *v2)
+{
+ return v1 == v2;
+}
diff --git a/src/third_party/js-1.7/jshash.h b/src/third_party/js-1.7/jshash.h
new file mode 100644
index 00000000000..2a125e1907d
--- /dev/null
+++ b/src/third_party/js-1.7/jshash.h
@@ -0,0 +1,151 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jshash_h___
+#define jshash_h___
+/*
+ * API to portable hash table code.
+ */
+#include <stddef.h>
+#include <stdio.h>
+#include "jstypes.h"
+#include "jscompat.h"
+
+JS_BEGIN_EXTERN_C
+
+typedef uint32 JSHashNumber;
+typedef struct JSHashEntry JSHashEntry;
+typedef struct JSHashTable JSHashTable;
+
+#define JS_HASH_BITS 32
+#define JS_GOLDEN_RATIO 0x9E3779B9U
+
+typedef JSHashNumber (* JS_DLL_CALLBACK JSHashFunction)(const void *key);
+typedef intN (* JS_DLL_CALLBACK JSHashComparator)(const void *v1, const void *v2);
+typedef intN (* JS_DLL_CALLBACK JSHashEnumerator)(JSHashEntry *he, intN i, void *arg);
+
+/* Flag bits in JSHashEnumerator's return value */
+#define HT_ENUMERATE_NEXT 0 /* continue enumerating entries */
+#define HT_ENUMERATE_STOP 1 /* stop enumerating entries */
+#define HT_ENUMERATE_REMOVE 2 /* remove and free the current entry */
+
+typedef struct JSHashAllocOps {
+ void * (*allocTable)(void *pool, size_t size);
+ void (*freeTable)(void *pool, void *item);
+ JSHashEntry * (*allocEntry)(void *pool, const void *key);
+ void (*freeEntry)(void *pool, JSHashEntry *he, uintN flag);
+} JSHashAllocOps;
+
+#define HT_FREE_VALUE 0 /* just free the entry's value */
+#define HT_FREE_ENTRY 1 /* free value and entire entry */
+
+struct JSHashEntry {
+ JSHashEntry *next; /* hash chain linkage */
+ JSHashNumber keyHash; /* key hash function result */
+ const void *key; /* ptr to opaque key */
+ void *value; /* ptr to opaque value */
+};
+
+struct JSHashTable {
+ JSHashEntry **buckets; /* vector of hash buckets */
+ uint32 nentries; /* number of entries in table */
+ uint32 shift; /* multiplicative hash shift */
+ JSHashFunction keyHash; /* key hash function */
+ JSHashComparator keyCompare; /* key comparison function */
+ JSHashComparator valueCompare; /* value comparison function */
+ JSHashAllocOps *allocOps; /* allocation operations */
+ void *allocPriv; /* allocation private data */
+#ifdef HASHMETER
+ uint32 nlookups; /* total number of lookups */
+ uint32 nsteps; /* number of hash chains traversed */
+ uint32 ngrows; /* number of table expansions */
+ uint32 nshrinks; /* number of table contractions */
+#endif
+};
+
+/*
+ * Create a new hash table.
+ * If allocOps is null, use default allocator ops built on top of malloc().
+ */
+extern JS_PUBLIC_API(JSHashTable *)
+JS_NewHashTable(uint32 n, JSHashFunction keyHash,
+ JSHashComparator keyCompare, JSHashComparator valueCompare,
+ JSHashAllocOps *allocOps, void *allocPriv);
+
+extern JS_PUBLIC_API(void)
+JS_HashTableDestroy(JSHashTable *ht);
+
+/* Low level access methods */
+extern JS_PUBLIC_API(JSHashEntry **)
+JS_HashTableRawLookup(JSHashTable *ht, JSHashNumber keyHash, const void *key);
+
+extern JS_PUBLIC_API(JSHashEntry *)
+JS_HashTableRawAdd(JSHashTable *ht, JSHashEntry **hep, JSHashNumber keyHash,
+ const void *key, void *value);
+
+extern JS_PUBLIC_API(void)
+JS_HashTableRawRemove(JSHashTable *ht, JSHashEntry **hep, JSHashEntry *he);
+
+/* Higher level access methods */
+extern JS_PUBLIC_API(JSHashEntry *)
+JS_HashTableAdd(JSHashTable *ht, const void *key, void *value);
+
+extern JS_PUBLIC_API(JSBool)
+JS_HashTableRemove(JSHashTable *ht, const void *key);
+
+extern JS_PUBLIC_API(intN)
+JS_HashTableEnumerateEntries(JSHashTable *ht, JSHashEnumerator f, void *arg);
+
+extern JS_PUBLIC_API(void *)
+JS_HashTableLookup(JSHashTable *ht, const void *key);
+
+extern JS_PUBLIC_API(intN)
+JS_HashTableDump(JSHashTable *ht, JSHashEnumerator dump, FILE *fp);
+
+/* General-purpose C string hash function. */
+extern JS_PUBLIC_API(JSHashNumber)
+JS_HashString(const void *key);
+
+/* Stub function just returns v1 == v2 */
+extern JS_PUBLIC_API(intN)
+JS_CompareValues(const void *v1, const void *v2);
+
+JS_END_EXTERN_C
+
+#endif /* jshash_h___ */
diff --git a/src/third_party/js-1.7/jsify.pl b/src/third_party/js-1.7/jsify.pl
new file mode 100644
index 00000000000..fa7f4f83c49
--- /dev/null
+++ b/src/third_party/js-1.7/jsify.pl
@@ -0,0 +1,485 @@
+#!/usr/local/bin/perl
+
+# This script modifies C code to use the hijacked NSPR routines that are
+# now baked into the JavaScript engine rather than using the NSPR
+# routines that they were based on, i.e. types like PRArenaPool are changed
+# to JSArenaPool.
+#
+# This script was used in 9/98 to facilitate the incorporation of some NSPR
+# code into the JS engine so as to minimize dependency on NSPR.
+#
+
+# Command-line: jsify.pl [options] [filename]*
+#
+# Options:
+# -r Reverse direction of transformation, i.e. JS ==> NSPR2
+# -outdir Directory in which to place output files
+
+
+# NSPR2 symbols that will be modified to JS symbols, e.g.
+# PRArena <==> JSArena
+
+@NSPR_symbols = (
+"PRArena",
+"PRArenaPool",
+"PRArenaStats",
+"PR_ARENAMETER",
+"PR_ARENA_",
+"PR_ARENA_ALIGN",
+"PR_ARENA_ALLOCATE",
+"PR_ARENA_CONST_ALIGN_MASK",
+"PR_ARENA_DEFAULT_ALIGN",
+"PR_ARENA_DESTROY",
+"PR_ARENA_GROW",
+"PR_ARENA_MARK",
+"PR_ARENA_RELEASE",
+
+"PR_smprintf",
+"PR_smprintf_free",
+"PR_snprintf",
+"PR_sprintf_append",
+"PR_sscanf",
+"PR_sxprintf",
+"PR_vsmprintf",
+"PR_vsnprintf",
+"PR_vsprintf_append",
+"PR_vsxprintf",
+
+"PRCList",
+"PRCListStr",
+"PRCLists",
+
+"PRDestroyEventProc",
+"PREvent",
+"PREventFunProc",
+"PREventQueue",
+"PRHandleEventProc",
+"PR_PostEvent",
+"PR_PostSynchronousEvent",
+"PR_ProcessPendingEvents",
+"PR_CreateEventQueue",
+"PR_DequeueEvent",
+"PR_DestroyEvent",
+"PR_DestroyEventQueue",
+"PR_EventAvailable",
+"PR_EventLoop",
+"PR_GetEvent",
+"PR_GetEventOwner",
+"PR_GetEventQueueMonitor",
+"PR_GetEventQueueSelectFD",
+"PR_GetMainEventQueue",
+"PR_HandleEvent",
+"PR_InitEvent",
+"PR_ENTER_EVENT_QUEUE_MONITOR",
+"PR_EXIT_EVENT_QUEUE_MONITOR",
+"PR_MapEvents",
+"PR_RevokeEvents",
+
+"PR_cnvtf",
+"PR_dtoa",
+"PR_strtod",
+
+"PRFileDesc",
+
+"PR_HASH_BITS",
+"PR_GOLDEN_RATIO",
+"PRHashAllocOps",
+"PRHashComparator",
+"PRHashEntry",
+"PRHashEnumerator",
+"PRHashFunction",
+"PRHashNumber",
+"PRHashTable",
+"PR_HashString",
+"PR_HashTableAdd",
+"PR_HashTableDestroy",
+"PR_HashTableDump",
+"PR_HashTableEnumerateEntries",
+"PR_HashTableLookup",
+"PR_HashTableRawAdd",
+"PR_HashTableRawLookup",
+"PR_HashTableRawRemove",
+"PR_HashTableRemove",
+
+"PRBool",
+"PRFloat64",
+"PRInt16",
+"PRInt32",
+"PRInt64",
+"PRInt8",
+"PRIntn",
+"PRUint16",
+"PRUint32",
+"PRUint64",
+"PRUint8",
+"PRUintn",
+"PRPtrDiff",
+"PRPtrdiff",
+"PRUptrdiff",
+"PRUword",
+"PRWord",
+"PRPackedBool",
+"PRSize",
+"PRStatus",
+"pruword",
+"prword",
+"prword_t",
+
+"PR_ALIGN_OF_DOUBLE",
+"PR_ALIGN_OF_FLOAT",
+"PR_ALIGN_OF_INT",
+"PR_ALIGN_OF_INT64",
+"PR_ALIGN_OF_LONG",
+"PR_ALIGN_OF_POINTER",
+"PR_ALIGN_OF_SHORT",
+"PR_ALIGN_OF_WORD",
+"PR_BITS_PER_BYTE",
+"PR_BITS_PER_BYTE_LOG2",
+"PR_BITS_PER_DOUBLE",
+"PR_BITS_PER_DOUBLE_LOG2",
+"PR_BITS_PER_FLOAT",
+"PR_BITS_PER_FLOAT_LOG2",
+"PR_BITS_PER_INT",
+"PR_BITS_PER_INT64",
+"PR_BITS_PER_INT64_LOG2",
+"PR_BITS_PER_INT_LOG2",
+"PR_BITS_PER_LONG",
+"PR_BITS_PER_LONG_LOG2",
+"PR_BITS_PER_SHORT",
+"PR_BITS_PER_SHORT_LOG2",
+"PR_BITS_PER_WORD",
+"PR_BITS_PER_WORD_LOG2",
+"PR_BYTES_PER_BYTE",
+"PR_BYTES_PER_DOUBLE",
+"PR_BYTES_PER_DWORD",
+"PR_BYTES_PER_DWORD_LOG2",
+"PR_BYTES_PER_FLOAT",
+"PR_BYTES_PER_INT",
+"PR_BYTES_PER_INT64",
+"PR_BYTES_PER_LONG",
+"PR_BYTES_PER_SHORT",
+"PR_BYTES_PER_WORD",
+"PR_BYTES_PER_WORD_LOG2",
+
+"PRSegment",
+"PRSegmentAccess",
+"PRStuffFunc",
+"PRThread",
+
+"PR_APPEND_LINK",
+
+"PR_ASSERT",
+
+"PR_ATOMIC_DWORD_LOAD",
+"PR_ATOMIC_DWORD_STORE",
+
+"PR_Abort",
+
+"PR_ArenaAllocate",
+"PR_ArenaCountAllocation",
+"PR_ArenaCountGrowth",
+"PR_ArenaCountInplaceGrowth",
+"PR_ArenaCountRelease",
+"PR_ArenaCountRetract",
+"PR_ArenaFinish",
+"PR_ArenaGrow",
+"PR_ArenaRelease",
+"PR_CompactArenaPool",
+"PR_DumpArenaStats",
+"PR_FinishArenaPool",
+"PR_FreeArenaPool",
+"PR_InitArenaPool",
+
+"PR_Assert",
+
+"PR_AttachThread",
+
+"PR_BEGIN_EXTERN_C",
+"PR_BEGIN_MACRO",
+
+"PR_BIT",
+"PR_BITMASK",
+
+"PR_BUFFER_OVERFLOW_ERROR",
+
+"PR_CALLBACK",
+"PR_CALLBACK_DECL",
+"PR_CALLOC",
+"PR_CEILING_LOG2",
+"PR_CLEAR_ARENA",
+"PR_CLEAR_BIT",
+"PR_CLEAR_UNUSED",
+"PR_CLIST_IS_EMPTY",
+"PR_COUNT_ARENA",
+"PR_CURRENT_THREAD",
+
+"PR_GetSegmentAccess",
+"PR_GetSegmentSize",
+"PR_GetSegmentVaddr",
+"PR_GrowSegment",
+"PR_DestroySegment",
+"PR_MapSegment",
+"PR_NewSegment",
+"PR_Segment",
+"PR_Seg",
+"PR_SEGMENT_NONE",
+"PR_SEGMENT_RDONLY",
+"PR_SEGMENT_RDWR",
+
+"PR_Calloc",
+"PR_CeilingLog2",
+"PR_CompareStrings",
+"PR_CompareValues",
+"PR_DELETE",
+"PR_END_EXTERN_C",
+"PR_END_MACRO",
+"PR_ENUMERATE_STOP",
+"PR_FAILURE",
+"PR_FALSE",
+"PR_FLOOR_LOG2",
+"PR_FREEIF",
+"PR_FREE_PATTERN",
+"PR_FloorLog2",
+"PR_FormatTime",
+"PR_Free",
+
+"PR_GetEnv",
+"PR_GetError",
+"PR_INIT_ARENA_POOL",
+"PR_INIT_CLIST",
+"PR_INIT_STATIC_CLIST",
+"PR_INLINE",
+"PR_INSERT_AFTER",
+"PR_INSERT_BEFORE",
+"PR_INSERT_LINK",
+"PR_INT32",
+"PR_INTERVAL_NO_TIMEOUT",
+"PR_INTERVAL_NO_WAIT",
+"PR_Init",
+"PR_LIST_HEAD",
+"PR_LIST_TAIL",
+"PR_LOG",
+"PR_LOGGING",
+"PR_LOG_ALWAYS",
+"PR_LOG_BEGIN",
+"PR_LOG_DEBUG",
+"PR_LOG_DEFINE",
+"PR_LOG_END",
+"PR_LOG_ERROR",
+"PR_LOG_MAX",
+"PR_LOG_MIN",
+"PR_LOG_NONE",
+"PR_LOG_NOTICE",
+"PR_LOG_TEST",
+"PR_LOG_WARN",
+"PR_LOG_WARNING",
+"PR_LogFlush",
+"PR_LogPrint",
+"PR_MALLOC",
+"PR_MAX",
+"PR_MD_calloc",
+"PR_MD_free",
+"PR_MD_malloc",
+"PR_MD_realloc",
+"PR_MIN",
+"PR_Malloc",
+"PR_NEW",
+"PR_NEWZAP",
+"PR_NEXT_LINK",
+"PR_NOT_REACHED",
+"PR_NewCondVar",
+"PR_NewHashTable",
+"PR_NewLogModule",
+"PR_PREV_LINK",
+"PR_PUBLIC_API",
+"PR_PUBLIC_DATA",
+"PR_RANGE_ERROR",
+"PR_REALLOC",
+"PR_REMOVE_AND_INIT_LINK",
+"PR_REMOVE_LINK",
+"PR_ROUNDUP",
+"PR_Realloc",
+
+"PR_SET_BIT",
+"PR_STATIC_CALLBACK",
+"PR_SUCCESS",
+"PR_SetError",
+"PR_SetLogBuffering",
+"PR_SetLogFile",
+
+"PR_TEST_BIT",
+"PR_TRUE",
+"PR_UINT32",
+"PR_UPTRDIFF",
+
+"prarena_h___",
+"prbit_h___",
+"prclist_h___",
+"prdtoa_h___",
+"prlog_h___",
+"prlong_h___",
+"prmacos_h___",
+"prmem_h___",
+"prprf_h___",
+"prtypes_h___",
+
+"prarena",
+"prbit",
+"prbitmap_t",
+"prclist",
+"prcpucfg",
+"prdtoa",
+"prhash",
+"plhash",
+"prlong",
+"prmacos",
+"prmem",
+"prosdep",
+"protypes",
+"prprf",
+"prtypes"
+);
+
+while ($ARGV[0] =~ /^-/) {
+ if ($ARGV[0] eq "-r") {
+ shift;
+ $reverse_conversion = 1;
+ } elsif ($ARGV[0] eq "-outdir") {
+ shift;
+ $outdir = shift;
+ }
+}
+
+# Given an NSPR symbol compute the JS equivalent or
+# vice-versa
+sub subst {
+ local ($replacement);
+ local ($sym) = @_;
+
+ $replacement = substr($sym,0,2) eq "pr" ? "js" : "JS";
+ $replacement .= substr($sym, 2);
+ return $replacement;
+}
+
+# Build the regular expression that will convert between the NSPR
+# types and the JS types
+if ($reverse_conversion) {
+ die "Not implemented yet";
+} else {
+ foreach $sym (@NSPR_symbols) {
+ $regexp .= $sym . "|"
+ }
+ # Get rid of the last "!"
+ chop $regexp;
+
+ # Replace PR* with JS* and replace pr* with js*
+ $regexp = 's/(^|\\W)(' . $regexp . ')/$1 . &subst($2)/eg';
+# print $regexp;
+}
+
+# Pre-compile a little subroutine to perform the regexp substitution
+# between NSPR types and JS types
+eval('sub convert_from_NSPR {($line) = @_; $line =~ ' . $regexp . ';}');
+
+sub convert_mallocs {
+ ($line) = @_;
+ $line =~ s/PR_MALLOC/malloc/g;
+ $line =~ s/PR_REALLOC/realloc/g;
+ $line =~ s/PR_FREE/free/g;
+ return $line;
+}
+
+sub convert_includes {
+ ($line) = @_;
+ if ($line !~ /include/) {
+ return $line;
+ }
+
+ if ($line =~ /prlog\.h/) {
+ $line = '#include "jsutil.h"'. " /* Added by JSIFY */\n";
+ } elsif ($line =~ /plhash\.h/) {
+ $line = '#include "jshash.h"'. " /* Added by JSIFY */\n";
+ } elsif ($line =~ /plarena\.h/) {
+ $line = '#include "jsarena.h"'. " /* Added by JSIFY */\n";
+ } elsif ($line =~ /prmem\.h/) {
+ $line = "";
+ } elsif ($line =~ /jsmsg\.def/) {
+ $line = '#include "js.msg"' . "\n";
+ } elsif ($line =~ /shellmsg\.def/) {
+ $line = '#include "jsshell.msg"' . "\n";
+ } elsif ($line =~ /jsopcode\.def/) {
+ $line = '#include "jsopcode.tbl"' . "\n";
+ }
+ return $line;
+}
+
+sub convert_declarations {
+ ($line) = @_;
+ $line =~ s/PR_EXTERN/JS_EXTERN_API/g;
+ $line =~ s/PR_IMPLEMENT_DATA/JS_EXPORT_DATA/g;
+ $line =~ s/PR_IMPLEMENT/JS_EXPORT_API/g;
+ $line =~ s/PR_CALLBACK/JS_DLL_CALLBACK/g;
+ $line =~ s/PR_STATIC_CALLBACK/JS_STATIC_DLL_CALLBACK/g;
+ $line =~ s/PR_IMPORT/JS_IMPORT/g;
+ $line =~ s/PR_PUBLIC_API/JS_EXPORT_API/g;
+ $line =~ s/PR_PUBLIC_DATA/JS_EXPORT_DATA/g;
+ return $line;
+}
+
+sub convert_long_long_macros {
+ ($line) = @_;
+ $line =~ s/\b(LL_)/JSLL_/g;
+ return $line;
+}
+
+sub convert_asserts {
+ ($line) = @_;
+ $line =~ s/\bPR_ASSERT/JS_ASSERT/g;
+ return $line;
+}
+
+while ($#ARGV >= 0) {
+ $infile = shift;
+
+ # Change filename, e.g. prtime.h to jsprtime.h, except for legacy
+ # files that start with 'prmj', like prmjtime.h.
+ $outfile = $infile;
+ if ($infile !~ /^prmj/) {
+ $outfile =~ s/^pr/js/;
+ $outfile =~ s/^pl/js/;
+ }
+
+ if ($outdir) {
+ $outfile = $outdir . '/' . $outfile;
+ }
+
+ if ($infile eq $outfile) {
+ die "Error: refuse to overwrite $outfile, use -outdir option."
+ }
+ die "Can't open $infile" if !open(INFILE, "<$infile");
+ die "Can't open $outfile for writing" if !open(OUTFILE, ">$outfile");
+
+ while (<INFILE>) {
+ $line = $_;
+
+ #Get rid of #include "prlog.h"
+ &convert_includes($line);
+
+ # Rename PR_EXTERN, PR_IMPORT, etc.
+ &convert_declarations($line);
+
+ # Convert from PR_MALLOC to malloc, etc.
+ &convert_mallocs($line);
+
+ # Convert from PR_ASSERT to JS_ASSERT
+# &convert_asserts($line);
+
+ # Convert from, e.g. PRArena to JSPRArena
+ &convert_from_NSPR($line);
+
+ # Change LL_* macros to JSLL_*
+ &convert_long_long_macros($line);
+
+ print OUTFILE $line;
+ }
+}
diff --git a/src/third_party/js-1.7/jsinterp.c b/src/third_party/js-1.7/jsinterp.c
new file mode 100644
index 00000000000..c8c120482fa
--- /dev/null
+++ b/src/third_party/js-1.7/jsinterp.c
@@ -0,0 +1,6216 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JavaScript bytecode interpreter.
+ */
+#include "jsstddef.h"
+#include <stdio.h>
+#include <string.h>
+#include <math.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jsiter.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+
+#if JS_HAS_XML_SUPPORT
+#include "jsxml.h"
+#endif
+
+#ifdef DEBUG
+#define ASSERT_CACHE_IS_EMPTY(cache) \
+ JS_BEGIN_MACRO \
+ JSPropertyCacheEntry *end_, *pce_, entry_; \
+ JSPropertyCache *cache_ = (cache); \
+ JS_ASSERT(cache_->empty); \
+ end_ = &cache_->table[PROPERTY_CACHE_SIZE]; \
+ for (pce_ = &cache_->table[0]; pce_ < end_; pce_++) { \
+ PCE_LOAD(cache_, pce_, entry_); \
+ JS_ASSERT(!PCE_OBJECT(entry_)); \
+ JS_ASSERT(!PCE_PROPERTY(entry_)); \
+ } \
+ JS_END_MACRO
+#else
+#define ASSERT_CACHE_IS_EMPTY(cache) ((void)0)
+#endif
+
+void
+js_FlushPropertyCache(JSContext *cx)
+{
+ JSPropertyCache *cache;
+
+ cache = &cx->runtime->propertyCache;
+ if (cache->empty) {
+ ASSERT_CACHE_IS_EMPTY(cache);
+ return;
+ }
+ memset(cache->table, 0, sizeof cache->table);
+ cache->empty = JS_TRUE;
+#ifdef JS_PROPERTY_CACHE_METERING
+ cache->flushes++;
+#endif
+}
+
+void
+js_DisablePropertyCache(JSContext *cx)
+{
+ JS_ASSERT(!cx->runtime->propertyCache.disabled);
+ cx->runtime->propertyCache.disabled = JS_TRUE;
+}
+
+void
+js_EnablePropertyCache(JSContext *cx)
+{
+ JS_ASSERT(cx->runtime->propertyCache.disabled);
+ ASSERT_CACHE_IS_EMPTY(&cx->runtime->propertyCache);
+ cx->runtime->propertyCache.disabled = JS_FALSE;
+}
+
+/*
+ * Stack macros and functions. These all use a local variable, jsval *sp, to
+ * point to the next free stack slot. SAVE_SP must be called before any call
+ * to a function that may invoke the interpreter. RESTORE_SP must be called
+ * only after return from js_Invoke, because only js_Invoke changes fp->sp.
+ */
+#define PUSH(v) (*sp++ = (v))
+#define POP() (*--sp)
+#ifdef DEBUG
+#define SAVE_SP(fp) \
+ (JS_ASSERT((fp)->script || !(fp)->spbase || (sp) == (fp)->spbase), \
+ (fp)->sp = sp)
+#else
+#define SAVE_SP(fp) ((fp)->sp = sp)
+#endif
+#define RESTORE_SP(fp) (sp = (fp)->sp)
+
+/*
+ * SAVE_SP_AND_PC commits deferred stores of interpreter registers to their
+ * homes in fp, when calling out of the interpreter loop or threaded code.
+ * RESTORE_SP_AND_PC copies the other way, to update registers after a call
+ * to a subroutine that interprets a piece of the current script.
+ */
+#define SAVE_SP_AND_PC(fp) (SAVE_SP(fp), (fp)->pc = pc)
+#define RESTORE_SP_AND_PC(fp) (RESTORE_SP(fp), pc = (fp)->pc)
+
+/*
+ * Push the generating bytecode's pc onto the parallel pc stack that runs
+ * depth slots below the operands.
+ *
+ * NB: PUSH_OPND uses sp, depth, and pc from its lexical environment. See
+ * js_Interpret for these local variables' declarations and uses.
+ */
+#define PUSH_OPND(v) (sp[-depth] = (jsval)pc, PUSH(v))
+#define STORE_OPND(n,v) (sp[(n)-depth] = (jsval)pc, sp[n] = (v))
+#define POP_OPND() POP()
+#define FETCH_OPND(n) (sp[n])
+
+/*
+ * Push the jsdouble d using sp, depth, and pc from the lexical environment.
+ * Try to convert d to a jsint that fits in a jsval, otherwise GC-alloc space
+ * for it and push a reference.
+ */
+#define STORE_NUMBER(cx, n, d) \
+ JS_BEGIN_MACRO \
+ jsint i_; \
+ jsval v_; \
+ \
+ if (JSDOUBLE_IS_INT(d, i_) && INT_FITS_IN_JSVAL(i_)) { \
+ v_ = INT_TO_JSVAL(i_); \
+ } else { \
+ ok = js_NewDoubleValue(cx, d, &v_); \
+ if (!ok) \
+ goto out; \
+ } \
+ STORE_OPND(n, v_); \
+ JS_END_MACRO
+
+#define STORE_INT(cx, n, i) \
+ JS_BEGIN_MACRO \
+ jsval v_; \
+ \
+ if (INT_FITS_IN_JSVAL(i)) { \
+ v_ = INT_TO_JSVAL(i); \
+ } else { \
+ ok = js_NewDoubleValue(cx, (jsdouble)(i), &v_); \
+ if (!ok) \
+ goto out; \
+ } \
+ STORE_OPND(n, v_); \
+ JS_END_MACRO
+
+#define STORE_UINT(cx, n, u) \
+ JS_BEGIN_MACRO \
+ jsval v_; \
+ \
+ if ((u) <= JSVAL_INT_MAX) { \
+ v_ = INT_TO_JSVAL(u); \
+ } else { \
+ ok = js_NewDoubleValue(cx, (jsdouble)(u), &v_); \
+ if (!ok) \
+ goto out; \
+ } \
+ STORE_OPND(n, v_); \
+ JS_END_MACRO
+
+#define FETCH_NUMBER(cx, n, d) \
+ JS_BEGIN_MACRO \
+ jsval v_; \
+ \
+ v_ = FETCH_OPND(n); \
+ VALUE_TO_NUMBER(cx, v_, d); \
+ JS_END_MACRO
+
+#define FETCH_INT(cx, n, i) \
+ JS_BEGIN_MACRO \
+ jsval v_ = FETCH_OPND(n); \
+ if (JSVAL_IS_INT(v_)) { \
+ i = JSVAL_TO_INT(v_); \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = js_ValueToECMAInt32(cx, v_, &i); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+#define FETCH_UINT(cx, n, ui) \
+ JS_BEGIN_MACRO \
+ jsval v_ = FETCH_OPND(n); \
+ jsint i_; \
+ if (JSVAL_IS_INT(v_) && (i_ = JSVAL_TO_INT(v_)) >= 0) { \
+ ui = (uint32) i_; \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = js_ValueToECMAUint32(cx, v_, &ui); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+/*
+ * Optimized conversion macros that test for the desired type in v before
+ * homing sp and calling a conversion function.
+ */
+#define VALUE_TO_NUMBER(cx, v, d) \
+ JS_BEGIN_MACRO \
+ if (JSVAL_IS_INT(v)) { \
+ d = (jsdouble)JSVAL_TO_INT(v); \
+ } else if (JSVAL_IS_DOUBLE(v)) { \
+ d = *JSVAL_TO_DOUBLE(v); \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = js_ValueToNumber(cx, v, &d); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+#define POP_BOOLEAN(cx, v, b) \
+ JS_BEGIN_MACRO \
+ v = FETCH_OPND(-1); \
+ if (v == JSVAL_NULL) { \
+ b = JS_FALSE; \
+ } else if (JSVAL_IS_BOOLEAN(v)) { \
+ b = JSVAL_TO_BOOLEAN(v); \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = js_ValueToBoolean(cx, v, &b); \
+ if (!ok) \
+ goto out; \
+ } \
+ sp--; \
+ JS_END_MACRO
+
+/*
+ * Convert a primitive string, number or boolean to a corresponding object.
+ * v must not be an object, null or undefined when using this macro.
+ */
+#define PRIMITIVE_TO_OBJECT(cx, v, obj) \
+ JS_BEGIN_MACRO \
+ SAVE_SP(fp); \
+ if (JSVAL_IS_STRING(v)) { \
+ obj = js_StringToObject(cx, JSVAL_TO_STRING(v)); \
+ } else if (JSVAL_IS_INT(v)) { \
+ obj = js_NumberToObject(cx, (jsdouble)JSVAL_TO_INT(v)); \
+ } else if (JSVAL_IS_DOUBLE(v)) { \
+ obj = js_NumberToObject(cx, *JSVAL_TO_DOUBLE(v)); \
+ } else { \
+ JS_ASSERT(JSVAL_IS_BOOLEAN(v)); \
+ obj = js_BooleanToObject(cx, JSVAL_TO_BOOLEAN(v)); \
+ } \
+ JS_END_MACRO
+
+#define VALUE_TO_OBJECT(cx, v, obj) \
+ JS_BEGIN_MACRO \
+ if (!JSVAL_IS_PRIMITIVE(v)) { \
+ obj = JSVAL_TO_OBJECT(v); \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ obj = js_ValueToNonNullObject(cx, v); \
+ if (!obj) { \
+ ok = JS_FALSE; \
+ goto out; \
+ } \
+ } \
+ JS_END_MACRO
+
+#define FETCH_OBJECT(cx, n, v, obj) \
+ JS_BEGIN_MACRO \
+ v = FETCH_OPND(n); \
+ VALUE_TO_OBJECT(cx, v, obj); \
+ STORE_OPND(n, OBJECT_TO_JSVAL(obj)); \
+ JS_END_MACRO
+
+#define VALUE_TO_PRIMITIVE(cx, v, hint, vp) \
+ JS_BEGIN_MACRO \
+ if (JSVAL_IS_PRIMITIVE(v)) { \
+ *vp = v; \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = OBJ_DEFAULT_VALUE(cx, JSVAL_TO_OBJECT(v), hint, vp); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+JS_FRIEND_API(jsval *)
+js_AllocRawStack(JSContext *cx, uintN nslots, void **markp)
+{
+ jsval *sp;
+
+ if (markp)
+ *markp = JS_ARENA_MARK(&cx->stackPool);
+ JS_ARENA_ALLOCATE_CAST(sp, jsval *, &cx->stackPool, nslots * sizeof(jsval));
+ if (!sp) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_STACK_OVERFLOW,
+ (cx->fp && cx->fp->fun)
+ ? JS_GetFunctionName(cx->fp->fun)
+ : "script");
+ }
+ return sp;
+}
+
+JS_FRIEND_API(void)
+js_FreeRawStack(JSContext *cx, void *mark)
+{
+ JS_ARENA_RELEASE(&cx->stackPool, mark);
+}
+
+JS_FRIEND_API(jsval *)
+js_AllocStack(JSContext *cx, uintN nslots, void **markp)
+{
+ jsval *sp, *vp, *end;
+ JSArena *a;
+ JSStackHeader *sh;
+ JSStackFrame *fp;
+
+ /* Callers don't check for zero nslots: we do to avoid empty segments. */
+ if (nslots == 0) {
+ *markp = NULL;
+ return JS_ARENA_MARK(&cx->stackPool);
+ }
+
+ /* Allocate 2 extra slots for the stack segment header we'll likely need. */
+ sp = js_AllocRawStack(cx, 2 + nslots, markp);
+ if (!sp)
+ return NULL;
+
+ /* Try to avoid another header if we can piggyback on the last segment. */
+ a = cx->stackPool.current;
+ sh = cx->stackHeaders;
+ if (sh && JS_STACK_SEGMENT(sh) + sh->nslots == sp) {
+ /* Extend the last stack segment, give back the 2 header slots. */
+ sh->nslots += nslots;
+ a->avail -= 2 * sizeof(jsval);
+ } else {
+ /*
+ * Need a new stack segment, so we must initialize unused slots in the
+ * current frame. See js_GC, just before marking the "operand" jsvals,
+ * where we scan from fp->spbase to fp->sp or through fp->script->depth
+ * (whichever covers fewer slots).
+ */
+ fp = cx->fp;
+ if (fp && fp->script && fp->spbase) {
+#ifdef DEBUG
+ jsuword depthdiff = fp->script->depth * sizeof(jsval);
+ JS_ASSERT(JS_UPTRDIFF(fp->sp, fp->spbase) <= depthdiff);
+ JS_ASSERT(JS_UPTRDIFF(*markp, fp->spbase) >= depthdiff);
+#endif
+ end = fp->spbase + fp->script->depth;
+ for (vp = fp->sp; vp < end; vp++)
+ *vp = JSVAL_VOID;
+ }
+
+ /* Allocate and push a stack segment header from the 2 extra slots. */
+ sh = (JSStackHeader *)sp;
+ sh->nslots = nslots;
+ sh->down = cx->stackHeaders;
+ cx->stackHeaders = sh;
+ sp += 2;
+ }
+
+ /*
+ * Store JSVAL_NULL using memset, to let compilers optimize as they see
+ * fit, in case a caller allocates and pushes GC-things one by one, which
+ * could nest a last-ditch GC that will scan this segment.
+ */
+ memset(sp, 0, nslots * sizeof(jsval));
+ return sp;
+}
+
+JS_FRIEND_API(void)
+js_FreeStack(JSContext *cx, void *mark)
+{
+ JSStackHeader *sh;
+ jsuword slotdiff;
+
+ /* Check for zero nslots allocation special case. */
+ if (!mark)
+ return;
+
+ /* We can assert because js_FreeStack always balances js_AllocStack. */
+ sh = cx->stackHeaders;
+ JS_ASSERT(sh);
+
+ /* If mark is in the current segment, reduce sh->nslots, else pop sh. */
+ slotdiff = JS_UPTRDIFF(mark, JS_STACK_SEGMENT(sh)) / sizeof(jsval);
+ if (slotdiff < (jsuword)sh->nslots)
+ sh->nslots = slotdiff;
+ else
+ cx->stackHeaders = sh->down;
+
+ /* Release the stackPool space allocated since mark was set. */
+ JS_ARENA_RELEASE(&cx->stackPool, mark);
+}
+
+JSBool
+js_GetArgument(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ return JS_TRUE;
+}
+
+JSBool
+js_SetArgument(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ return JS_TRUE;
+}
+
+JSBool
+js_GetLocalVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ return JS_TRUE;
+}
+
+JSBool
+js_SetLocalVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ return JS_TRUE;
+}
+
+JSObject *
+js_GetScopeChain(JSContext *cx, JSStackFrame *fp)
+{
+ JSObject *obj, *cursor, *clonedChild, *parent;
+ JSTempValueRooter tvr;
+
+ obj = fp->blockChain;
+ if (!obj) {
+ /*
+ * Don't force a call object for a lightweight function call, but do
+ * insist that there is a call object for a heavyweight function call.
+ */
+ JS_ASSERT(!fp->fun ||
+ !(fp->fun->flags & JSFUN_HEAVYWEIGHT) ||
+ fp->callobj);
+ JS_ASSERT(fp->scopeChain);
+ return fp->scopeChain;
+ }
+
+ /*
+ * We have one or more lexical scopes to reflect into fp->scopeChain, so
+ * make sure there's a call object at the current head of the scope chain,
+ * if this frame is a call frame.
+ */
+ if (fp->fun && !fp->callobj) {
+ JS_ASSERT(OBJ_GET_CLASS(cx, fp->scopeChain) != &js_BlockClass ||
+ JS_GetPrivate(cx, fp->scopeChain) != fp);
+ if (!js_GetCallObject(cx, fp, fp->scopeChain))
+ return NULL;
+ }
+
+ /*
+ * Clone the block chain. To avoid recursive cloning we set the parent of
+ * the cloned child after we clone the parent. In the following loop when
+ * clonedChild is null it indicates the first iteration when no special GC
+ * rooting is necessary. On the second and the following iterations we
+ * have to protect cloned so far chain against the GC during cloning of
+ * the cursor object.
+ */
+ cursor = obj;
+ clonedChild = NULL;
+ for (;;) {
+ parent = OBJ_GET_PARENT(cx, cursor);
+
+ /*
+ * We pass fp->scopeChain and not null even if we override the parent
+ * slot later as null triggers useless calculations of slot's value in
+ * js_NewObject that js_CloneBlockObject calls.
+ */
+ cursor = js_CloneBlockObject(cx, cursor, fp->scopeChain, fp);
+ if (!cursor) {
+ if (clonedChild)
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return NULL;
+ }
+ if (!clonedChild) {
+ /*
+ * The first iteration. Check if other follow and root obj if so
+ * to protect the whole cloned chain against GC.
+ */
+ obj = cursor;
+ if (!parent)
+ break;
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, obj, &tvr);
+ } else {
+ /*
+ * Avoid OBJ_SET_PARENT overhead as clonedChild cannot escape to
+ * other threads.
+ */
+ clonedChild->slots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(cursor);
+ if (!parent) {
+ JS_ASSERT(tvr.u.value == OBJECT_TO_JSVAL(obj));
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ break;
+ }
+ }
+ clonedChild = cursor;
+ cursor = parent;
+ }
+ fp->flags |= JSFRAME_POP_BLOCKS;
+ fp->scopeChain = obj;
+ fp->blockChain = NULL;
+ return obj;
+}
+
+/*
+ * Walk the scope chain looking for block scopes whose locals need to be
+ * copied from stack slots into object slots before fp goes away.
+ */
+static JSBool
+PutBlockObjects(JSContext *cx, JSStackFrame *fp)
+{
+ JSBool ok;
+ JSObject *obj;
+
+ ok = JS_TRUE;
+ for (obj = fp->scopeChain; obj; obj = OBJ_GET_PARENT(cx, obj)) {
+ if (OBJ_GET_CLASS(cx, obj) == &js_BlockClass) {
+ if (JS_GetPrivate(cx, obj) != fp)
+ break;
+ ok &= js_PutBlockObject(cx, obj);
+ }
+ }
+ return ok;
+}
+
+JSObject *
+js_ComputeThis(JSContext *cx, JSObject *thisp, jsval *argv)
+{
+ if (thisp && OBJ_GET_CLASS(cx, thisp) != &js_CallClass) {
+ /* Some objects (e.g., With) delegate 'this' to another object. */
+ thisp = OBJ_THIS_OBJECT(cx, thisp);
+ if (!thisp)
+ return NULL;
+ } else {
+ /*
+ * ECMA requires "the global object", but in the presence of multiple
+ * top-level objects (windows, frames, or certain layers in the client
+ * object model), we prefer fun's parent. An example that causes this
+ * code to run:
+ *
+ * // in window w1
+ * function f() { return this }
+ * function g() { return f }
+ *
+ * // in window w2
+ * var h = w1.g()
+ * alert(h() == w1)
+ *
+ * The alert should display "true".
+ */
+ if (JSVAL_IS_PRIMITIVE(argv[-2]) ||
+ !OBJ_GET_PARENT(cx, JSVAL_TO_OBJECT(argv[-2]))) {
+ thisp = cx->globalObject;
+ } else {
+ jsid id;
+ jsval v;
+ uintN attrs;
+
+ /* Walk up the parent chain. */
+ thisp = JSVAL_TO_OBJECT(argv[-2]);
+ id = ATOM_TO_JSID(cx->runtime->atomState.parentAtom);
+ for (;;) {
+ if (!OBJ_CHECK_ACCESS(cx, thisp, id, JSACC_PARENT, &v, &attrs))
+ return NULL;
+ if (JSVAL_IS_VOID(v))
+ v = OBJ_GET_SLOT(cx, thisp, JSSLOT_PARENT);
+ if (JSVAL_IS_NULL(v))
+ break;
+ thisp = JSVAL_TO_OBJECT(v);
+ }
+ }
+ }
+ argv[-1] = OBJECT_TO_JSVAL(thisp);
+ return thisp;
+}
+
+#if JS_HAS_NO_SUCH_METHOD
+
+static JSBool
+NoSuchMethod(JSContext *cx, JSStackFrame *fp, jsval *vp, uint32 flags,
+ uintN argc)
+{
+ JSObject *thisp, *argsobj;
+ jsval *sp, roots[3];
+ JSTempValueRooter tvr;
+ jsid id;
+ JSBool ok;
+ jsbytecode *pc;
+ jsatomid atomIndex;
+
+ /*
+ * We must call js_ComputeThis here to censor Call objects. A performance
+ * hit, since we'll call it again in the normal sequence of invoke events,
+ * but at least it's idempotent.
+ *
+ * Normally, we call ComputeThis after all frame members have been set,
+ * and in particular, after any revision of the callee value at *vp due
+ * to clasp->convert (see below). This matters because ComputeThis may
+ * access *vp via fp->argv[-2], to follow the parent chain to a global
+ * object to use as the 'this' parameter.
+ *
+ * Obviously, here in the JSVAL_IS_PRIMITIVE(v) case, there can't be any
+ * such defaulting of 'this' to callee (v, *vp) ancestor.
+ */
+ JS_ASSERT(JSVAL_IS_PRIMITIVE(vp[0]));
+ RESTORE_SP(fp);
+ if (JSVAL_IS_OBJECT(vp[1])) {
+ thisp = JSVAL_TO_OBJECT(vp[1]);
+ } else {
+ PRIMITIVE_TO_OBJECT(cx, vp[1], thisp);
+ if (!thisp)
+ return JS_FALSE;
+ vp[1] = OBJECT_TO_JSVAL(thisp);
+ }
+ thisp = js_ComputeThis(cx, thisp, vp + 2);
+ if (!thisp)
+ return JS_FALSE;
+ vp[1] = OBJECT_TO_JSVAL(thisp);
+
+ /* From here on, control must flow through label out: to return. */
+ memset(roots, 0, sizeof roots);
+ JS_PUSH_TEMP_ROOT(cx, JS_ARRAY_LENGTH(roots), roots, &tvr);
+
+ id = ATOM_TO_JSID(cx->runtime->atomState.noSuchMethodAtom);
+#if JS_HAS_XML_SUPPORT
+ if (OBJECT_IS_XML(cx, thisp)) {
+ JSXMLObjectOps *ops;
+
+ ops = (JSXMLObjectOps *) thisp->map->ops;
+ thisp = ops->getMethod(cx, thisp, id, &roots[2]);
+ if (!thisp) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ vp[1] = OBJECT_TO_JSVAL(thisp);
+ } else
+#endif
+ {
+ ok = OBJ_GET_PROPERTY(cx, thisp, id, &roots[2]);
+ if (!ok)
+ goto out;
+ }
+ if (JSVAL_IS_PRIMITIVE(roots[2]))
+ goto not_function;
+
+ pc = (jsbytecode *) vp[-(intN)fp->script->depth];
+ switch ((JSOp) *pc) {
+ case JSOP_NAME:
+ case JSOP_GETPROP:
+#if JS_HAS_XML_SUPPORT
+ case JSOP_GETMETHOD:
+#endif
+ atomIndex = GET_ATOM_INDEX(pc);
+ roots[0] = ATOM_KEY(js_GetAtom(cx, &fp->script->atomMap, atomIndex));
+ argsobj = js_NewArrayObject(cx, argc, vp + 2);
+ if (!argsobj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ roots[1] = OBJECT_TO_JSVAL(argsobj);
+ ok = js_InternalInvoke(cx, thisp, roots[2], flags | JSINVOKE_INTERNAL,
+ 2, roots, &vp[0]);
+ break;
+
+ default:
+ goto not_function;
+ }
+
+ out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+
+ not_function:
+ js_ReportIsNotFunction(cx, vp, flags & JSINVOKE_FUNFLAGS);
+ ok = JS_FALSE;
+ goto out;
+}
+
+#endif /* JS_HAS_NO_SUCH_METHOD */
+
+#ifdef DUMP_CALL_TABLE
+
+#include "jsclist.h"
+#include "jshash.h"
+#include "jsdtoa.h"
+
+typedef struct CallKey {
+ jsval callee; /* callee value */
+ const char *filename; /* function filename or null */
+ uintN lineno; /* function lineno or 0 */
+} CallKey;
+
+/* Compensate for typeof null == "object" brain damage. */
+#define JSTYPE_NULL JSTYPE_LIMIT
+#define TYPEOF(cx,v) (JSVAL_IS_NULL(v) ? JSTYPE_NULL : JS_TypeOfValue(cx,v))
+#define TYPENAME(t) (((t) == JSTYPE_NULL) ? js_null_str : js_type_str[t])
+#define NTYPEHIST (JSTYPE_LIMIT + 1)
+
+typedef struct CallValue {
+ uint32 total; /* total call count */
+ uint32 recycled; /* LRU-recycled calls lost */
+ uint16 minargc; /* minimum argument count */
+ uint16 maxargc; /* maximum argument count */
+ struct ArgInfo {
+ uint32 typeHist[NTYPEHIST]; /* histogram by type */
+ JSCList lruList; /* top 10 values LRU list */
+ struct ArgValCount {
+ JSCList lruLink; /* LRU list linkage */
+ jsval value; /* recently passed value */
+ uint32 count; /* number of times passed */
+ char strbuf[112]; /* string conversion buffer */
+ } topValCounts[10]; /* top 10 value storage */
+ } argInfo[8];
+} CallValue;
+
+typedef struct CallEntry {
+ JSHashEntry entry;
+ CallKey key;
+ CallValue value;
+ char name[32]; /* function name copy */
+} CallEntry;
+
+static void *
+AllocCallTable(void *pool, size_t size)
+{
+ return malloc(size);
+}
+
+static void
+FreeCallTable(void *pool, void *item)
+{
+ free(item);
+}
+
+static JSHashEntry *
+AllocCallEntry(void *pool, const void *key)
+{
+ return (JSHashEntry*) calloc(1, sizeof(CallEntry));
+}
+
+static void
+FreeCallEntry(void *pool, JSHashEntry *he, uintN flag)
+{
+ JS_ASSERT(flag == HT_FREE_ENTRY);
+ free(he);
+}
+
+static JSHashAllocOps callTableAllocOps = {
+ AllocCallTable, FreeCallTable,
+ AllocCallEntry, FreeCallEntry
+};
+
+JS_STATIC_DLL_CALLBACK(JSHashNumber)
+js_hash_call_key(const void *key)
+{
+ CallKey *ck = (CallKey *) key;
+ JSHashNumber hash = (jsuword)ck->callee >> 3;
+
+ if (ck->filename) {
+ hash = (hash << 4) ^ JS_HashString(ck->filename);
+ hash = (hash << 4) ^ ck->lineno;
+ }
+ return hash;
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_compare_call_keys(const void *k1, const void *k2)
+{
+ CallKey *ck1 = (CallKey *)k1, *ck2 = (CallKey *)k2;
+
+ return ck1->callee == ck2->callee &&
+ ((ck1->filename && ck2->filename)
+ ? strcmp(ck1->filename, ck2->filename) == 0
+ : ck1->filename == ck2->filename) &&
+ ck1->lineno == ck2->lineno;
+}
+
+JSHashTable *js_CallTable;
+size_t js_LogCallToSourceLimit;
+
+JS_STATIC_DLL_CALLBACK(intN)
+CallTableDumper(JSHashEntry *he, intN k, void *arg)
+{
+ CallEntry *ce = (CallEntry *)he;
+ FILE *fp = (FILE *)arg;
+ uintN argc, i, n;
+ struct ArgInfo *ai;
+ JSType save, type;
+ JSCList *cl;
+ struct ArgValCount *avc;
+ jsval argval;
+
+ if (ce->key.filename) {
+ /* We're called at the end of the mark phase, so mark our filenames. */
+ js_MarkScriptFilename(ce->key.filename);
+ fprintf(fp, "%s:%u ", ce->key.filename, ce->key.lineno);
+ } else {
+ fprintf(fp, "@%p ", (void *) ce->key.callee);
+ }
+
+ if (ce->name[0])
+ fprintf(fp, "name %s ", ce->name);
+ fprintf(fp, "calls %lu (%lu) argc %u/%u\n",
+ (unsigned long) ce->value.total,
+ (unsigned long) ce->value.recycled,
+ ce->value.minargc, ce->value.maxargc);
+
+ argc = JS_MIN(ce->value.maxargc, 8);
+ for (i = 0; i < argc; i++) {
+ ai = &ce->value.argInfo[i];
+
+ n = 0;
+ save = -1;
+ for (type = JSTYPE_VOID; type <= JSTYPE_LIMIT; type++) {
+ if (ai->typeHist[type]) {
+ save = type;
+ ++n;
+ }
+ }
+ if (n == 1) {
+ fprintf(fp, " arg %u type %s: %lu\n",
+ i, TYPENAME(save), (unsigned long) ai->typeHist[save]);
+ } else {
+ fprintf(fp, " arg %u type histogram:\n", i);
+ for (type = JSTYPE_VOID; type <= JSTYPE_LIMIT; type++) {
+ fprintf(fp, " %9s: %8lu ",
+ TYPENAME(type), (unsigned long) ai->typeHist[type]);
+ for (n = (uintN) JS_HOWMANY(ai->typeHist[type], 10); n > 0; --n)
+ fputc('*', fp);
+ fputc('\n', fp);
+ }
+ }
+
+ fprintf(fp, " arg %u top 10 values:\n", i);
+ n = 1;
+ for (cl = ai->lruList.prev; cl != &ai->lruList; cl = cl->prev) {
+ avc = (struct ArgValCount *)cl;
+ if (!avc->count)
+ break;
+ argval = avc->value;
+ fprintf(fp, " %9u: %8lu %.*s (%#lx)\n",
+ n, (unsigned long) avc->count,
+ sizeof avc->strbuf, avc->strbuf, argval);
+ ++n;
+ }
+ }
+
+ return HT_ENUMERATE_NEXT;
+}
+
+void
+js_DumpCallTable(JSContext *cx)
+{
+ char name[24];
+ FILE *fp;
+ static uintN dumpCount;
+
+ if (!js_CallTable)
+ return;
+
+ JS_snprintf(name, sizeof name, "/tmp/calltable.dump.%u", dumpCount & 7);
+ dumpCount++;
+ fp = fopen(name, "w");
+ if (!fp)
+ return;
+
+ JS_HashTableEnumerateEntries(js_CallTable, CallTableDumper, fp);
+ fclose(fp);
+}
+
+static void
+LogCall(JSContext *cx, jsval callee, uintN argc, jsval *argv)
+{
+ CallKey key;
+ const char *name, *cstr;
+ JSFunction *fun;
+ JSHashNumber keyHash;
+ JSHashEntry **hep, *he;
+ CallEntry *ce;
+ uintN i, j;
+ jsval argval;
+ JSType type;
+ struct ArgInfo *ai;
+ struct ArgValCount *avc;
+ JSString *str;
+
+ if (!js_CallTable) {
+ js_CallTable = JS_NewHashTable(1024, js_hash_call_key,
+ js_compare_call_keys, NULL,
+ &callTableAllocOps, NULL);
+ if (!js_CallTable)
+ return;
+ }
+
+ key.callee = callee;
+ key.filename = NULL;
+ key.lineno = 0;
+ name = "";
+ if (VALUE_IS_FUNCTION(cx, callee)) {
+ fun = (JSFunction *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(callee));
+ if (fun->atom)
+ name = js_AtomToPrintableString(cx, fun->atom);
+ if (FUN_INTERPRETED(fun)) {
+ key.filename = fun->u.i.script->filename;
+ key.lineno = fun->u.i.script->lineno;
+ }
+ }
+ keyHash = js_hash_call_key(&key);
+
+ hep = JS_HashTableRawLookup(js_CallTable, keyHash, &key);
+ he = *hep;
+ if (he) {
+ ce = (CallEntry *) he;
+ JS_ASSERT(strncmp(ce->name, name, sizeof ce->name) == 0);
+ } else {
+ he = JS_HashTableRawAdd(js_CallTable, hep, keyHash, &key, NULL);
+ if (!he)
+ return;
+ ce = (CallEntry *) he;
+ ce->entry.key = &ce->key;
+ ce->entry.value = &ce->value;
+ ce->key = key;
+ for (i = 0; i < 8; i++) {
+ ai = &ce->value.argInfo[i];
+ JS_INIT_CLIST(&ai->lruList);
+ for (j = 0; j < 10; j++)
+ JS_APPEND_LINK(&ai->topValCounts[j].lruLink, &ai->lruList);
+ }
+ strncpy(ce->name, name, sizeof ce->name);
+ }
+
+ ++ce->value.total;
+ if (ce->value.minargc < argc)
+ ce->value.minargc = argc;
+ if (ce->value.maxargc < argc)
+ ce->value.maxargc = argc;
+ if (argc > 8)
+ argc = 8;
+ for (i = 0; i < argc; i++) {
+ ai = &ce->value.argInfo[i];
+ argval = argv[i];
+ type = TYPEOF(cx, argval);
+ ++ai->typeHist[type];
+
+ for (j = 0; ; j++) {
+ if (j == 10) {
+ avc = (struct ArgValCount *) ai->lruList.next;
+ ce->value.recycled += avc->count;
+ avc->value = argval;
+ avc->count = 1;
+ break;
+ }
+ avc = &ai->topValCounts[j];
+ if (avc->value == argval) {
+ ++avc->count;
+ break;
+ }
+ }
+
+ /* Move avc to the back of the LRU list. */
+ JS_REMOVE_LINK(&avc->lruLink);
+ JS_APPEND_LINK(&avc->lruLink, &ai->lruList);
+
+ str = NULL;
+ cstr = "";
+ switch (TYPEOF(cx, argval)) {
+ case JSTYPE_VOID:
+ cstr = js_type_str[JSTYPE_VOID];
+ break;
+ case JSTYPE_NULL:
+ cstr = js_null_str;
+ break;
+ case JSTYPE_BOOLEAN:
+ cstr = js_boolean_str[JSVAL_TO_BOOLEAN(argval)];
+ break;
+ case JSTYPE_NUMBER:
+ if (JSVAL_IS_INT(argval)) {
+ JS_snprintf(avc->strbuf, sizeof avc->strbuf, "%ld",
+ JSVAL_TO_INT(argval));
+ } else {
+ JS_dtostr(avc->strbuf, sizeof avc->strbuf, DTOSTR_STANDARD, 0,
+ *JSVAL_TO_DOUBLE(argval));
+ }
+ continue;
+ case JSTYPE_STRING:
+ str = js_QuoteString(cx, JSVAL_TO_STRING(argval), (jschar)'"');
+ break;
+ case JSTYPE_FUNCTION:
+ if (VALUE_IS_FUNCTION(cx, argval)) {
+ fun = (JSFunction *)JS_GetPrivate(cx, JSVAL_TO_OBJECT(argval));
+ if (fun && fun->atom) {
+ str = ATOM_TO_STRING(fun->atom);
+ break;
+ }
+ }
+ /* FALL THROUGH */
+ case JSTYPE_OBJECT:
+ js_LogCallToSourceLimit = sizeof avc->strbuf;
+ cx->options |= JSOPTION_LOGCALL_TOSOURCE;
+ str = js_ValueToSource(cx, argval);
+ cx->options &= ~JSOPTION_LOGCALL_TOSOURCE;
+ break;
+ }
+ if (str)
+ cstr = JS_GetStringBytes(str);
+ strncpy(avc->strbuf, cstr, sizeof avc->strbuf);
+ }
+}
+
+#endif /* DUMP_CALL_TABLE */
+
+/*
+ * Conditional assert to detect failure to clear a pending exception that is
+ * suppressed (or unintentional suppression of a wanted exception).
+ */
+#if defined DEBUG_brendan || defined DEBUG_mrbkap || defined DEBUG_shaver
+# define DEBUG_NOT_THROWING 1
+#endif
+
+#ifdef DEBUG_NOT_THROWING
+# define ASSERT_NOT_THROWING(cx) JS_ASSERT(!(cx)->throwing)
+#else
+# define ASSERT_NOT_THROWING(cx) /* nothing */
+#endif
+
+/*
+ * Find a function reference and its 'this' object implicit first parameter
+ * under argc arguments on cx's stack, and call the function. Push missing
+ * required arguments, allocate declared local variables, and pop everything
+ * when done. Then push the return value.
+ */
+JS_FRIEND_API(JSBool)
+js_Invoke(JSContext *cx, uintN argc, uintN flags)
+{
+ void *mark;
+ JSStackFrame *fp, frame;
+ jsval *sp, *newsp, *limit;
+ jsval *vp, v, thisv;
+ JSObject *funobj, *parent, *thisp;
+ JSBool ok;
+ JSClass *clasp;
+ JSObjectOps *ops;
+ JSNative native;
+ JSFunction *fun;
+ JSScript *script;
+ uintN nslots, nvars, nalloc, surplus;
+ JSInterpreterHook hook;
+ void *hookData;
+
+ /* Mark the top of stack and load frequently-used registers. */
+ mark = JS_ARENA_MARK(&cx->stackPool);
+ fp = cx->fp;
+ sp = fp->sp;
+
+ /*
+ * Set vp to the callee value's stack slot (it's where rval goes).
+ * Once vp is set, control should flow through label out2: to return.
+ * Set frame.rval early so native class and object ops can throw and
+ * return false, causing a goto out2 with ok set to false.
+ */
+ vp = sp - (2 + argc);
+ v = *vp;
+ frame.rval = JSVAL_VOID;
+
+ /*
+ * A callee must be an object reference, unless its 'this' parameter
+ * implements the __noSuchMethod__ method, in which case that method will
+ * be called like so:
+ *
+ * thisp.__noSuchMethod__(id, args)
+ *
+ * where id is the name of the method that this invocation attempted to
+ * call by name, and args is an Array containing this invocation's actual
+ * parameters.
+ */
+ if (JSVAL_IS_PRIMITIVE(v)) {
+#if JS_HAS_NO_SUCH_METHOD
+ if (fp->script && !(flags & JSINVOKE_INTERNAL)) {
+ ok = NoSuchMethod(cx, fp, vp, flags, argc);
+ if (ok)
+ frame.rval = *vp;
+ goto out2;
+ }
+#endif
+ goto bad;
+ }
+
+ /* Load thisv after potentially calling NoSuchMethod, which may set it. */
+ thisv = vp[1];
+
+ funobj = JSVAL_TO_OBJECT(v);
+ parent = OBJ_GET_PARENT(cx, funobj);
+ clasp = OBJ_GET_CLASS(cx, funobj);
+ if (clasp != &js_FunctionClass) {
+ /* Function is inlined, all other classes use object ops. */
+ ops = funobj->map->ops;
+
+ /*
+ * XXX this makes no sense -- why convert to function if clasp->call?
+ * XXX better to call that hook without converting
+ * XXX the only thing that needs fixing is liveconnect
+ *
+ * Try converting to function, for closure and API compatibility.
+ * We attempt the conversion under all circumstances for 1.2, but
+ * only if there is a call op defined otherwise.
+ */
+ if ((ops == &js_ObjectOps) ? clasp->call : ops->call) {
+ ok = clasp->convert(cx, funobj, JSTYPE_FUNCTION, &v);
+ if (!ok)
+ goto out2;
+
+ if (VALUE_IS_FUNCTION(cx, v)) {
+ /* Make vp refer to funobj to keep it available as argv[-2]. */
+ *vp = v;
+ funobj = JSVAL_TO_OBJECT(v);
+ parent = OBJ_GET_PARENT(cx, funobj);
+ goto have_fun;
+ }
+ }
+ fun = NULL;
+ script = NULL;
+ nslots = nvars = 0;
+
+ /* Try a call or construct native object op. */
+ native = (flags & JSINVOKE_CONSTRUCT) ? ops->construct : ops->call;
+ if (!native)
+ goto bad;
+
+ if (JSVAL_IS_OBJECT(thisv)) {
+ thisp = JSVAL_TO_OBJECT(thisv);
+ } else {
+ PRIMITIVE_TO_OBJECT(cx, thisv, thisp);
+ if (!thisp)
+ goto out2;
+ vp[1] = thisv = OBJECT_TO_JSVAL(thisp);
+ }
+ } else {
+have_fun:
+ /* Get private data and set derived locals from it. */
+ fun = (JSFunction *) JS_GetPrivate(cx, funobj);
+ nslots = (fun->nargs > argc) ? fun->nargs - argc : 0;
+ if (FUN_INTERPRETED(fun)) {
+ native = NULL;
+ script = fun->u.i.script;
+ nvars = fun->u.i.nvars;
+ } else {
+ native = fun->u.n.native;
+ script = NULL;
+ nvars = 0;
+ nslots += fun->u.n.extra;
+ }
+
+ if (JSFUN_BOUND_METHOD_TEST(fun->flags)) {
+ /* Handle bound method special case. */
+ thisp = parent;
+ } else if (JSVAL_IS_OBJECT(thisv)) {
+ thisp = JSVAL_TO_OBJECT(thisv);
+ } else {
+ uintN thispflags = JSFUN_THISP_FLAGS(fun->flags);
+
+ JS_ASSERT(!(flags & JSINVOKE_CONSTRUCT));
+ if (JSVAL_IS_STRING(thisv)) {
+ if (JSFUN_THISP_TEST(thispflags, JSFUN_THISP_STRING)) {
+ thisp = (JSObject *) thisv;
+ goto init_frame;
+ }
+ thisp = js_StringToObject(cx, JSVAL_TO_STRING(thisv));
+ } else if (JSVAL_IS_INT(thisv)) {
+ if (JSFUN_THISP_TEST(thispflags, JSFUN_THISP_NUMBER)) {
+ thisp = (JSObject *) thisv;
+ goto init_frame;
+ }
+ thisp = js_NumberToObject(cx, (jsdouble)JSVAL_TO_INT(thisv));
+ } else if (JSVAL_IS_DOUBLE(thisv)) {
+ if (JSFUN_THISP_TEST(thispflags, JSFUN_THISP_NUMBER)) {
+ thisp = (JSObject *) thisv;
+ goto init_frame;
+ }
+ thisp = js_NumberToObject(cx, *JSVAL_TO_DOUBLE(thisv));
+ } else {
+ JS_ASSERT(JSVAL_IS_BOOLEAN(thisv));
+ if (JSFUN_THISP_TEST(thispflags, JSFUN_THISP_BOOLEAN)) {
+ thisp = (JSObject *) thisv;
+ goto init_frame;
+ }
+ thisp = js_BooleanToObject(cx, JSVAL_TO_BOOLEAN(thisv));
+ }
+ if (!thisp) {
+ ok = JS_FALSE;
+ goto out2;
+ }
+ goto init_frame;
+ }
+ }
+
+ if (flags & JSINVOKE_CONSTRUCT) {
+ /* Default return value for a constructor is the new object. */
+ frame.rval = OBJECT_TO_JSVAL(thisp);
+ } else {
+ thisp = js_ComputeThis(cx, thisp, vp + 2);
+ if (!thisp) {
+ ok = JS_FALSE;
+ goto out2;
+ }
+ }
+
+ init_frame:
+ /* Initialize the rest of frame, except for sp (set by SAVE_SP later). */
+ frame.thisp = thisp;
+ frame.varobj = NULL;
+ frame.callobj = frame.argsobj = NULL;
+ frame.script = script;
+ frame.fun = fun;
+ frame.argc = argc;
+ frame.argv = sp - argc;
+ frame.nvars = nvars;
+ frame.vars = sp;
+ frame.down = fp;
+ frame.annotation = NULL;
+ frame.scopeChain = NULL; /* set below for real, after cx->fp is set */
+ frame.pc = NULL;
+ frame.spbase = NULL;
+ frame.sharpDepth = 0;
+ frame.sharpArray = NULL;
+ frame.flags = flags;
+ frame.dormantNext = NULL;
+ frame.xmlNamespace = NULL;
+ frame.blockChain = NULL;
+
+ /* From here on, control must flow through label out: to return. */
+ cx->fp = &frame;
+
+ /* Init these now in case we goto out before first hook call. */
+ hook = cx->runtime->callHook;
+ hookData = NULL;
+
+ /* Check for argument slots required by the function. */
+ if (nslots) {
+ /* All arguments must be contiguous, so we may have to copy actuals. */
+ nalloc = nslots;
+ limit = (jsval *) cx->stackPool.current->limit;
+ JS_ASSERT((jsval *) cx->stackPool.current->base <= sp && sp <= limit);
+ if (sp + nslots > limit) {
+ /* Hit end of arena: we have to copy argv[-2..(argc+nslots-1)]. */
+ nalloc += 2 + argc;
+ } else {
+ /* Take advantage of surplus slots in the caller's frame depth. */
+ JS_ASSERT((jsval *)mark >= sp);
+ surplus = (jsval *)mark - sp;
+ nalloc -= surplus;
+ }
+
+ /* Check whether we have enough space in the caller's frame. */
+ if ((intN)nalloc > 0) {
+ /* Need space for actuals plus missing formals minus surplus. */
+ newsp = js_AllocRawStack(cx, nalloc, NULL);
+ if (!newsp) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ /* If we couldn't allocate contiguous args, copy actuals now. */
+ if (newsp != mark) {
+ JS_ASSERT(sp + nslots > limit);
+ JS_ASSERT(2 + argc + nslots == nalloc);
+ *newsp++ = vp[0];
+ *newsp++ = vp[1];
+ if (argc)
+ memcpy(newsp, frame.argv, argc * sizeof(jsval));
+ frame.argv = newsp;
+ sp = frame.vars = newsp + argc;
+ }
+ }
+
+ /* Advance frame.vars to make room for the missing args. */
+ frame.vars += nslots;
+
+ /* Push void to initialize missing args. */
+ do {
+ PUSH(JSVAL_VOID);
+ } while (--nslots != 0);
+ }
+ JS_ASSERT(nslots == 0);
+
+ /* Now allocate stack space for local variables. */
+ if (nvars) {
+ JS_ASSERT((jsval *)cx->stackPool.current->avail >= frame.vars);
+ surplus = (jsval *)cx->stackPool.current->avail - frame.vars;
+ if (surplus < nvars) {
+ newsp = js_AllocRawStack(cx, nvars, NULL);
+ if (!newsp) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ if (newsp != sp) {
+ /* NB: Discontinuity between argv and vars. */
+ sp = frame.vars = newsp;
+ }
+ }
+
+ /* Push void to initialize local variables. */
+ do {
+ PUSH(JSVAL_VOID);
+ } while (--nvars != 0);
+ }
+ JS_ASSERT(nvars == 0);
+
+ /* Store the current sp in frame before calling fun. */
+ SAVE_SP(&frame);
+
+ /* call the hook if present */
+ if (hook && (native || script))
+ hookData = hook(cx, &frame, JS_TRUE, 0, cx->runtime->callHookData);
+
+ /* Call the function, either a native method or an interpreted script. */
+ if (native) {
+#ifdef DEBUG_NOT_THROWING
+ JSBool alreadyThrowing = cx->throwing;
+#endif
+
+#if JS_HAS_LVALUE_RETURN
+ /* Set by JS_SetCallReturnValue2, used to return reference types. */
+ cx->rval2set = JS_FALSE;
+#endif
+
+ /* If native, use caller varobj and scopeChain for eval. */
+ frame.varobj = fp->varobj;
+ frame.scopeChain = fp->scopeChain;
+ ok = native(cx, frame.thisp, argc, frame.argv, &frame.rval);
+ JS_RUNTIME_METER(cx->runtime, nativeCalls);
+#ifdef DEBUG_NOT_THROWING
+ if (ok && !alreadyThrowing)
+ ASSERT_NOT_THROWING(cx);
+#endif
+ } else if (script) {
+#ifdef DUMP_CALL_TABLE
+ LogCall(cx, *vp, argc, frame.argv);
+#endif
+ /* Use parent scope so js_GetCallObject can find the right "Call". */
+ frame.scopeChain = parent;
+ if (JSFUN_HEAVYWEIGHT_TEST(fun->flags)) {
+ /* Scope with a call object parented by the callee's parent. */
+ if (!js_GetCallObject(cx, &frame, parent)) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+ ok = js_Interpret(cx, script->code, &v);
+ } else {
+ /* fun might be onerror trying to report a syntax error in itself. */
+ frame.scopeChain = NULL;
+ ok = JS_TRUE;
+ }
+
+out:
+ if (hookData) {
+ hook = cx->runtime->callHook;
+ if (hook)
+ hook(cx, &frame, JS_FALSE, &ok, hookData);
+ }
+
+ /* If frame has a call object, sync values and clear back-pointer. */
+ if (frame.callobj)
+ ok &= js_PutCallObject(cx, &frame);
+
+ /* If frame has an arguments object, sync values and clear back-pointer. */
+ if (frame.argsobj)
+ ok &= js_PutArgsObject(cx, &frame);
+
+ /* Restore cx->fp now that we're done releasing frame objects. */
+ cx->fp = fp;
+
+out2:
+ /* Pop everything we may have allocated off the stack. */
+ JS_ARENA_RELEASE(&cx->stackPool, mark);
+
+ /* Store the return value and restore sp just above it. */
+ *vp = frame.rval;
+ fp->sp = vp + 1;
+
+ /*
+ * Store the location of the JSOP_CALL or JSOP_EVAL that generated the
+ * return value, but only if this is an external (compiled from script
+ * source) call that has stack budget for the generating pc.
+ */
+ if (fp->script && !(flags & JSINVOKE_INTERNAL))
+ vp[-(intN)fp->script->depth] = (jsval)fp->pc;
+ return ok;
+
+bad:
+ js_ReportIsNotFunction(cx, vp, flags & JSINVOKE_FUNFLAGS);
+ ok = JS_FALSE;
+ goto out2;
+}
+
+JSBool
+js_InternalInvoke(JSContext *cx, JSObject *obj, jsval fval, uintN flags,
+ uintN argc, jsval *argv, jsval *rval)
+{
+ JSStackFrame *fp, *oldfp, frame;
+ jsval *oldsp, *sp;
+ void *mark;
+ uintN i;
+ JSBool ok;
+
+ fp = oldfp = cx->fp;
+ if (!fp) {
+ memset(&frame, 0, sizeof frame);
+ cx->fp = fp = &frame;
+ }
+ oldsp = fp->sp;
+ sp = js_AllocStack(cx, 2 + argc, &mark);
+ if (!sp) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ PUSH(fval);
+ PUSH(OBJECT_TO_JSVAL(obj));
+ for (i = 0; i < argc; i++)
+ PUSH(argv[i]);
+ SAVE_SP(fp);
+ ok = js_Invoke(cx, argc, flags | JSINVOKE_INTERNAL);
+ if (ok) {
+ RESTORE_SP(fp);
+
+ /*
+ * Store *rval in the a scoped local root if a scope is open, else in
+ * the lastInternalResult pigeon-hole GC root, solely so users of
+ * js_InternalInvoke and its direct and indirect (js_ValueToString for
+ * example) callers do not need to manage roots for local, temporary
+ * references to such results.
+ */
+ *rval = POP_OPND();
+ if (JSVAL_IS_GCTHING(*rval)) {
+ if (cx->localRootStack) {
+ if (js_PushLocalRoot(cx, cx->localRootStack, *rval) < 0)
+ ok = JS_FALSE;
+ } else {
+ cx->weakRoots.lastInternalResult = *rval;
+ }
+ }
+ }
+
+ js_FreeStack(cx, mark);
+out:
+ fp->sp = oldsp;
+ if (oldfp != fp)
+ cx->fp = oldfp;
+
+ return ok;
+}
+
+JSBool
+js_InternalGetOrSet(JSContext *cx, JSObject *obj, jsid id, jsval fval,
+ JSAccessMode mode, uintN argc, jsval *argv, jsval *rval)
+{
+ int stackDummy;
+
+ /*
+ * js_InternalInvoke could result in another try to get or set the same id
+ * again, see bug 355497.
+ */
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_OVER_RECURSED);
+ return JS_FALSE;
+ }
+ /*
+ * Check general (not object-ops/class-specific) access from the running
+ * script to obj.id only if id has a scripted getter or setter that we're
+ * about to invoke. If we don't check this case, nothing else will -- no
+ * other native code has the chance to check.
+ *
+ * Contrast this non-native (scripted) case with native getter and setter
+ * accesses, where the native itself must do an access check, if security
+ * policies requires it. We make a checkAccess or checkObjectAccess call
+ * back to the embedding program only in those cases where we're not going
+ * to call an embedding-defined native function, getter, setter, or class
+ * hook anyway. Where we do call such a native, there's no need for the
+ * engine to impose a separate access check callback on all embeddings --
+ * many embeddings have no security policy at all.
+ */
+ JS_ASSERT(mode == JSACC_READ || mode == JSACC_WRITE);
+ if (cx->runtime->checkObjectAccess &&
+ VALUE_IS_FUNCTION(cx, fval) &&
+ FUN_INTERPRETED((JSFunction *)
+ JS_GetPrivate(cx, JSVAL_TO_OBJECT(fval))) &&
+ !cx->runtime->checkObjectAccess(cx, obj, ID_TO_VALUE(id), mode,
+ &fval)) {
+ return JS_FALSE;
+ }
+
+ return js_InternalCall(cx, obj, fval, argc, argv, rval);
+}
+
+JSBool
+js_Execute(JSContext *cx, JSObject *chain, JSScript *script,
+ JSStackFrame *down, uintN flags, jsval *result)
+{
+ JSInterpreterHook hook;
+ void *hookData, *mark;
+ JSStackFrame *oldfp, frame;
+ JSObject *obj, *tmp;
+ JSBool ok;
+
+ hook = cx->runtime->executeHook;
+ hookData = mark = NULL;
+ oldfp = cx->fp;
+ frame.script = script;
+ if (down) {
+ /* Propagate arg/var state for eval and the debugger API. */
+ frame.callobj = down->callobj;
+ frame.argsobj = down->argsobj;
+ frame.varobj = down->varobj;
+ frame.fun = down->fun;
+ frame.thisp = down->thisp;
+ frame.argc = down->argc;
+ frame.argv = down->argv;
+ frame.nvars = down->nvars;
+ frame.vars = down->vars;
+ frame.annotation = down->annotation;
+ frame.sharpArray = down->sharpArray;
+ } else {
+ frame.callobj = frame.argsobj = NULL;
+ obj = chain;
+ if (cx->options & JSOPTION_VAROBJFIX) {
+ while ((tmp = OBJ_GET_PARENT(cx, obj)) != NULL)
+ obj = tmp;
+ }
+ frame.varobj = obj;
+ frame.fun = NULL;
+ frame.thisp = chain;
+ frame.argc = 0;
+ frame.argv = NULL;
+ frame.nvars = script->numGlobalVars;
+ if (frame.nvars) {
+ frame.vars = js_AllocRawStack(cx, frame.nvars, &mark);
+ if (!frame.vars)
+ return JS_FALSE;
+ memset(frame.vars, 0, frame.nvars * sizeof(jsval));
+ } else {
+ frame.vars = NULL;
+ }
+ frame.annotation = NULL;
+ frame.sharpArray = NULL;
+ }
+ frame.rval = JSVAL_VOID;
+ frame.down = down;
+ frame.scopeChain = chain;
+ frame.pc = NULL;
+ frame.sp = oldfp ? oldfp->sp : NULL;
+ frame.spbase = NULL;
+ frame.sharpDepth = 0;
+ frame.flags = flags;
+ frame.dormantNext = NULL;
+ frame.xmlNamespace = NULL;
+ frame.blockChain = NULL;
+
+ /*
+ * Here we wrap the call to js_Interpret with code to (conditionally)
+ * save and restore the old stack frame chain into a chain of 'dormant'
+ * frame chains. Since we are replacing cx->fp, we were running into
+ * the problem that if GC was called under this frame, some of the GC
+ * things associated with the old frame chain (available here only in
+ * the C variable 'oldfp') were not rooted and were being collected.
+ *
+ * So, now we preserve the links to these 'dormant' frame chains in cx
+ * before calling js_Interpret and cleanup afterwards. The GC walks
+ * these dormant chains and marks objects in the same way that it marks
+ * objects in the primary cx->fp chain.
+ */
+ if (oldfp && oldfp != down) {
+ JS_ASSERT(!oldfp->dormantNext);
+ oldfp->dormantNext = cx->dormantFrameChain;
+ cx->dormantFrameChain = oldfp;
+ }
+
+ cx->fp = &frame;
+ if (hook)
+ hookData = hook(cx, &frame, JS_TRUE, 0, cx->runtime->executeHookData);
+
+ /*
+ * Use frame.rval, not result, so the last result stays rooted across any
+ * GC activations nested within this js_Interpret.
+ */
+ ok = js_Interpret(cx, script->code, &frame.rval);
+ *result = frame.rval;
+
+ if (hookData) {
+ hook = cx->runtime->executeHook;
+ if (hook)
+ hook(cx, &frame, JS_FALSE, &ok, hookData);
+ }
+ if (mark)
+ js_FreeRawStack(cx, mark);
+ cx->fp = oldfp;
+
+ if (oldfp && oldfp != down) {
+ JS_ASSERT(cx->dormantFrameChain == oldfp);
+ cx->dormantFrameChain = oldfp->dormantNext;
+ oldfp->dormantNext = NULL;
+ }
+
+ return ok;
+}
+
+#if JS_HAS_EXPORT_IMPORT
+/*
+ * If id is JSVAL_VOID, import all exported properties from obj.
+ */
+static JSBool
+ImportProperty(JSContext *cx, JSObject *obj, jsid id)
+{
+ JSBool ok;
+ JSIdArray *ida;
+ JSProperty *prop;
+ JSObject *obj2, *target, *funobj, *closure;
+ JSString *str;
+ uintN attrs;
+ jsint i;
+ jsval value;
+
+ if (JSVAL_IS_VOID(id)) {
+ ida = JS_Enumerate(cx, obj);
+ if (!ida)
+ return JS_FALSE;
+ ok = JS_TRUE;
+ if (ida->length == 0)
+ goto out;
+ } else {
+ ida = NULL;
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ str = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK,
+ ID_TO_VALUE(id), NULL);
+ if (str)
+ js_ReportIsNotDefined(cx, JS_GetStringBytes(str));
+ return JS_FALSE;
+ }
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, id, prop, &attrs);
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ if (!ok)
+ return JS_FALSE;
+ if (!(attrs & JSPROP_EXPORTED)) {
+ str = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK,
+ ID_TO_VALUE(id), NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NOT_EXPORTED,
+ JS_GetStringBytes(str));
+ }
+ return JS_FALSE;
+ }
+ }
+
+ target = cx->fp->varobj;
+ i = 0;
+ do {
+ if (ida) {
+ id = ida->vector[i];
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, id, NULL, &attrs);
+ if (!ok)
+ goto out;
+ if (!(attrs & JSPROP_EXPORTED))
+ continue;
+ }
+ ok = OBJ_CHECK_ACCESS(cx, obj, id, JSACC_IMPORT, &value, &attrs);
+ if (!ok)
+ goto out;
+ if (VALUE_IS_FUNCTION(cx, value)) {
+ funobj = JSVAL_TO_OBJECT(value);
+ closure = js_CloneFunctionObject(cx, funobj, obj);
+ if (!closure) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ value = OBJECT_TO_JSVAL(closure);
+ }
+
+ /*
+ * Handle the case of importing a property that refers to a local
+ * variable or formal parameter of a function activation. These
+ * properties are accessed by opcodes using stack slot numbers
+ * generated by the compiler rather than runtime name-lookup. These
+ * local references, therefore, bypass the normal scope chain lookup.
+ * So, instead of defining a new property in the activation object,
+ * modify the existing value in the stack slot.
+ */
+ if (OBJ_GET_CLASS(cx, target) == &js_CallClass) {
+ ok = OBJ_LOOKUP_PROPERTY(cx, target, id, &obj2, &prop);
+ if (!ok)
+ goto out;
+ } else {
+ prop = NULL;
+ }
+ if (prop && target == obj2) {
+ ok = OBJ_SET_PROPERTY(cx, target, id, &value);
+ } else {
+ ok = OBJ_DEFINE_PROPERTY(cx, target, id, value, NULL, NULL,
+ attrs & ~(JSPROP_EXPORTED |
+ JSPROP_GETTER |
+ JSPROP_SETTER),
+ NULL);
+ }
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ if (!ok)
+ goto out;
+ } while (ida && ++i < ida->length);
+
+out:
+ if (ida)
+ JS_DestroyIdArray(cx, ida);
+ return ok;
+}
+#endif /* JS_HAS_EXPORT_IMPORT */
+
+JSBool
+js_CheckRedeclaration(JSContext *cx, JSObject *obj, jsid id, uintN attrs,
+ JSObject **objp, JSProperty **propp)
+{
+ JSObject *obj2;
+ JSProperty *prop;
+ uintN oldAttrs, report;
+ JSBool isFunction;
+ jsval value;
+ const char *type, *name;
+
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop))
+ return JS_FALSE;
+ if (propp) {
+ *objp = obj2;
+ *propp = prop;
+ }
+ if (!prop)
+ return JS_TRUE;
+
+ /*
+ * Use prop as a speedup hint to OBJ_GET_ATTRIBUTES, but drop it on error.
+ * An assertion at label bad: will insist that it is null.
+ */
+ if (!OBJ_GET_ATTRIBUTES(cx, obj2, id, prop, &oldAttrs)) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+#ifdef DEBUG
+ prop = NULL;
+#endif
+ goto bad;
+ }
+
+ /*
+ * From here, return true, or else goto bad on failure to null out params.
+ * If our caller doesn't want prop, drop it (we don't need it any longer).
+ */
+ if (!propp) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ prop = NULL;
+ }
+
+ /* If either property is readonly, we have an error. */
+ report = ((oldAttrs | attrs) & JSPROP_READONLY)
+ ? JSREPORT_ERROR
+ : JSREPORT_WARNING | JSREPORT_STRICT;
+
+ if (report != JSREPORT_ERROR) {
+ /*
+ * Allow redeclaration of variables and functions, but insist that the
+ * new value is not a getter if the old value was, ditto for setters --
+ * unless prop is impermanent (in which case anyone could delete it and
+ * redefine it, willy-nilly).
+ */
+ if (!(attrs & (JSPROP_GETTER | JSPROP_SETTER)))
+ return JS_TRUE;
+ if ((~(oldAttrs ^ attrs) & (JSPROP_GETTER | JSPROP_SETTER)) == 0)
+ return JS_TRUE;
+ if (!(oldAttrs & JSPROP_PERMANENT))
+ return JS_TRUE;
+ report = JSREPORT_ERROR;
+ }
+
+ isFunction = (oldAttrs & (JSPROP_GETTER | JSPROP_SETTER)) != 0;
+ if (!isFunction) {
+ if (!OBJ_GET_PROPERTY(cx, obj, id, &value))
+ goto bad;
+ isFunction = VALUE_IS_FUNCTION(cx, value);
+ }
+ type = (oldAttrs & attrs & JSPROP_GETTER)
+ ? js_getter_str
+ : (oldAttrs & attrs & JSPROP_SETTER)
+ ? js_setter_str
+ : (oldAttrs & JSPROP_READONLY)
+ ? js_const_str
+ : isFunction
+ ? js_function_str
+ : js_var_str;
+ name = js_AtomToPrintableString(cx, JSID_TO_ATOM(id));
+ if (!name)
+ goto bad;
+ return JS_ReportErrorFlagsAndNumber(cx, report,
+ js_GetErrorMessage, NULL,
+ JSMSG_REDECLARED_VAR,
+ type, name);
+
+bad:
+ if (propp) {
+ *objp = NULL;
+ *propp = NULL;
+ }
+ JS_ASSERT(!prop);
+ return JS_FALSE;
+}
+
+JSBool
+js_StrictlyEqual(jsval lval, jsval rval)
+{
+ jsval ltag = JSVAL_TAG(lval), rtag = JSVAL_TAG(rval);
+ jsdouble ld, rd;
+
+ if (ltag == rtag) {
+ if (ltag == JSVAL_STRING) {
+ JSString *lstr = JSVAL_TO_STRING(lval),
+ *rstr = JSVAL_TO_STRING(rval);
+ return js_EqualStrings(lstr, rstr);
+ }
+ if (ltag == JSVAL_DOUBLE) {
+ ld = *JSVAL_TO_DOUBLE(lval);
+ rd = *JSVAL_TO_DOUBLE(rval);
+ return JSDOUBLE_COMPARE(ld, ==, rd, JS_FALSE);
+ }
+ return lval == rval;
+ }
+ if (ltag == JSVAL_DOUBLE && JSVAL_IS_INT(rval)) {
+ ld = *JSVAL_TO_DOUBLE(lval);
+ rd = JSVAL_TO_INT(rval);
+ return JSDOUBLE_COMPARE(ld, ==, rd, JS_FALSE);
+ }
+ if (JSVAL_IS_INT(lval) && rtag == JSVAL_DOUBLE) {
+ ld = JSVAL_TO_INT(lval);
+ rd = *JSVAL_TO_DOUBLE(rval);
+ return JSDOUBLE_COMPARE(ld, ==, rd, JS_FALSE);
+ }
+ return lval == rval;
+}
+
+JSBool
+js_InvokeConstructor(JSContext *cx, jsval *vp, uintN argc)
+{
+ JSFunction *fun;
+ JSObject *obj, *obj2, *proto, *parent;
+ jsval lval, rval;
+ JSClass *clasp, *funclasp;
+
+ fun = NULL;
+ obj2 = NULL;
+ lval = *vp;
+ if (!JSVAL_IS_OBJECT(lval) ||
+ (obj2 = JSVAL_TO_OBJECT(lval)) == NULL ||
+ /* XXX clean up to avoid special cases above ObjectOps layer */
+ OBJ_GET_CLASS(cx, obj2) == &js_FunctionClass ||
+ !obj2->map->ops->construct)
+ {
+ fun = js_ValueToFunction(cx, vp, JSV2F_CONSTRUCT);
+ if (!fun)
+ return JS_FALSE;
+ }
+
+ clasp = &js_ObjectClass;
+ if (!obj2) {
+ proto = parent = NULL;
+ fun = NULL;
+ } else {
+ /*
+ * Get the constructor prototype object for this function.
+ * Use the nominal 'this' parameter slot, vp[1], as a local
+ * root to protect this prototype, in case it has no other
+ * strong refs.
+ */
+ if (!OBJ_GET_PROPERTY(cx, obj2,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .classPrototypeAtom),
+ &vp[1])) {
+ return JS_FALSE;
+ }
+ rval = vp[1];
+ proto = JSVAL_IS_OBJECT(rval) ? JSVAL_TO_OBJECT(rval) : NULL;
+ parent = OBJ_GET_PARENT(cx, obj2);
+
+ if (OBJ_GET_CLASS(cx, obj2) == &js_FunctionClass) {
+ funclasp = ((JSFunction *)JS_GetPrivate(cx, obj2))->clasp;
+ if (funclasp)
+ clasp = funclasp;
+ }
+ }
+ obj = js_NewObject(cx, clasp, proto, parent);
+ if (!obj)
+ return JS_FALSE;
+
+ /* Now we have an object with a constructor method; call it. */
+ vp[1] = OBJECT_TO_JSVAL(obj);
+ if (!js_Invoke(cx, argc, JSINVOKE_CONSTRUCT)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return JS_FALSE;
+ }
+
+ /* Check the return value and if it's primitive, force it to be obj. */
+ rval = *vp;
+ if (JSVAL_IS_PRIMITIVE(rval)) {
+ if (!fun) {
+ /* native [[Construct]] returning primitive is error */
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_NEW_RESULT,
+ js_ValueToPrintableString(cx, rval));
+ return JS_FALSE;
+ }
+ *vp = OBJECT_TO_JSVAL(obj);
+ }
+
+ JS_RUNTIME_METER(cx->runtime, constructs);
+ return JS_TRUE;
+}
+
+static JSBool
+InternStringElementId(JSContext *cx, jsval idval, jsid *idp)
+{
+ JSAtom *atom;
+
+ atom = js_ValueToStringAtom(cx, idval);
+ if (!atom)
+ return JS_FALSE;
+ *idp = ATOM_TO_JSID(atom);
+ return JS_TRUE;
+}
+
+static JSBool
+InternNonIntElementId(JSContext *cx, jsval idval, jsid *idp)
+{
+ JS_ASSERT(!JSVAL_IS_INT(idval));
+
+#if JS_HAS_XML_SUPPORT
+ if (JSVAL_IS_OBJECT(idval)) {
+ *idp = OBJECT_JSVAL_TO_JSID(idval);
+ return JS_TRUE;
+ }
+#endif
+
+ return InternStringElementId(cx, idval, idp);
+}
+
+#if JS_HAS_XML_SUPPORT
+#define CHECK_ELEMENT_ID(obj, id) \
+ JS_BEGIN_MACRO \
+ if (JSID_IS_OBJECT(id) && !OBJECT_IS_XML(cx, obj)) { \
+ SAVE_SP_AND_PC(fp); \
+ ok = InternStringElementId(cx, OBJECT_JSID_TO_JSVAL(id), &id); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+#else
+#define CHECK_ELEMENT_ID(obj, id) JS_ASSERT(!JSID_IS_OBJECT(id))
+#endif
+
+#ifndef MAX_INTERP_LEVEL
+#if defined(XP_OS2)
+#define MAX_INTERP_LEVEL 250
+#else
+#define MAX_INTERP_LEVEL 1000
+#endif
+#endif
+
+#define MAX_INLINE_CALL_COUNT 1000
+
+/*
+ * Threaded interpretation via computed goto appears to be well-supported by
+ * GCC 3 and higher. IBM's C compiler when run with the right options (e.g.,
+ * -qlanglvl=extended) also supports threading. Ditto the SunPro C compiler.
+ * Currently it's broken for JS_VERSION < 160, though this isn't worth fixing.
+ * Add your compiler support macros here.
+ */
+#if JS_VERSION >= 160 && ( \
+ __GNUC__ >= 3 || \
+ (__IBMC__ >= 700 && defined __IBM_COMPUTED_GOTO) || \
+ __SUNPRO_C >= 0x570)
+# define JS_THREADED_INTERP 1
+#else
+# undef JS_THREADED_INTERP
+#endif
+
+JSBool
+js_Interpret(JSContext *cx, jsbytecode *pc, jsval *result)
+{
+ JSRuntime *rt;
+ JSStackFrame *fp;
+ JSScript *script;
+ uintN inlineCallCount;
+ JSObject *obj, *obj2, *parent;
+ JSVersion currentVersion, originalVersion;
+ JSBranchCallback onbranch;
+ JSBool ok, cond;
+ JSTrapHandler interruptHandler;
+ jsint depth, len;
+ jsval *sp, *newsp;
+ void *mark;
+ jsbytecode *endpc, *pc2;
+ JSOp op, op2;
+ jsatomid atomIndex;
+ JSAtom *atom;
+ uintN argc, attrs, flags, slot;
+ jsval *vp, lval, rval, ltmp, rtmp;
+ jsid id;
+ JSObject *withobj, *iterobj;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ JSString *str, *str2;
+ jsint i, j;
+ jsdouble d, d2;
+ JSClass *clasp;
+ JSFunction *fun;
+ JSType type;
+#if !defined JS_THREADED_INTERP && defined DEBUG
+ FILE *tracefp = NULL;
+#endif
+#if JS_HAS_EXPORT_IMPORT
+ JSIdArray *ida;
+#endif
+ jsint low, high, off, npairs;
+ JSBool match;
+#if JS_HAS_GETTER_SETTER
+ JSPropertyOp getter, setter;
+#endif
+ int stackDummy;
+
+#ifdef __GNUC__
+# define JS_EXTENSION __extension__
+# define JS_EXTENSION_(s) __extension__ ({ s; })
+#else
+# define JS_EXTENSION
+# define JS_EXTENSION_(s) s
+#endif
+
+#ifdef JS_THREADED_INTERP
+ static void *normalJumpTable[] = {
+# define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
+ JS_EXTENSION &&L_##op,
+# include "jsopcode.tbl"
+# undef OPDEF
+ };
+
+ static void *interruptJumpTable[] = {
+# define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
+ ((op != JSOP_PUSHOBJ) \
+ ? JS_EXTENSION &&interrupt \
+ : JS_EXTENSION &&L_JSOP_PUSHOBJ),
+# include "jsopcode.tbl"
+# undef OPDEF
+ };
+
+ register void **jumpTable = normalJumpTable;
+
+# define DO_OP() JS_EXTENSION_(goto *jumpTable[op])
+# define DO_NEXT_OP(n) do { op = *(pc += (n)); DO_OP(); } while (0)
+# define BEGIN_CASE(OP) L_##OP:
+# define END_CASE(OP) DO_NEXT_OP(OP##_LENGTH);
+# define END_VARLEN_CASE DO_NEXT_OP(len);
+# define EMPTY_CASE(OP) BEGIN_CASE(OP) op = *++pc; DO_OP();
+#else
+# define DO_OP() goto do_op
+# define DO_NEXT_OP(n) goto advance_pc
+# define BEGIN_CASE(OP) case OP:
+# define END_CASE(OP) break;
+# define END_VARLEN_CASE break;
+# define EMPTY_CASE(OP) BEGIN_CASE(OP) END_CASE(OP)
+#endif
+
+ *result = JSVAL_VOID;
+ rt = cx->runtime;
+
+ /* Set registerized frame pointer and derived script pointer. */
+ fp = cx->fp;
+ script = fp->script;
+ JS_ASSERT(script->length != 0);
+
+ /* Count of JS function calls that nest in this C js_Interpret frame. */
+ inlineCallCount = 0;
+
+ /*
+ * Optimized Get and SetVersion for proper script language versioning.
+ *
+ * If any native method or JSClass/JSObjectOps hook calls js_SetVersion
+ * and changes cx->version, the effect will "stick" and we will stop
+ * maintaining currentVersion. This is relied upon by testsuites, for
+ * the most part -- web browsers select version before compiling and not
+ * at run-time.
+ */
+ currentVersion = script->version;
+ originalVersion = cx->version;
+ if (currentVersion != originalVersion)
+ js_SetVersion(cx, currentVersion);
+
+#ifdef __GNUC__
+ flags = 0; /* suppress gcc warnings */
+ id = 0;
+#endif
+
+ /*
+ * Prepare to call a user-supplied branch handler, and abort the script
+ * if it returns false. We reload onbranch after calling out to native
+ * functions (but not to getters, setters, or other native hooks).
+ */
+#define LOAD_BRANCH_CALLBACK(cx) (onbranch = (cx)->branchCallback)
+
+ LOAD_BRANCH_CALLBACK(cx);
+#define CHECK_BRANCH(len) \
+ JS_BEGIN_MACRO \
+ if (len <= 0 && onbranch) { \
+ SAVE_SP_AND_PC(fp); \
+ if (!(ok = (*onbranch)(cx, script))) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+ /*
+ * Load the debugger's interrupt hook here and after calling out to native
+ * functions (but not to getters, setters, or other native hooks), so we do
+ * not have to reload it each time through the interpreter loop -- we hope
+ * the compiler can keep it in a register when it is non-null.
+ */
+#ifdef JS_THREADED_INTERP
+# define LOAD_JUMP_TABLE() \
+ (jumpTable = interruptHandler ? interruptJumpTable : normalJumpTable)
+#else
+# define LOAD_JUMP_TABLE() /* nothing */
+#endif
+
+#define LOAD_INTERRUPT_HANDLER(rt) \
+ JS_BEGIN_MACRO \
+ interruptHandler = (rt)->interruptHandler; \
+ LOAD_JUMP_TABLE(); \
+ JS_END_MACRO
+
+ LOAD_INTERRUPT_HANDLER(rt);
+
+ /* Check for too much js_Interpret nesting, or too deep a C stack. */
+ if (++cx->interpLevel == MAX_INTERP_LEVEL ||
+ !JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ ok = JS_FALSE;
+ goto out2;
+ }
+
+ /*
+ * Allocate operand and pc stack slots for the script's worst-case depth,
+ * unless we're called to interpret a part of an already active script, a
+ * filtering predicate expression for example.
+ */
+ depth = (jsint) script->depth;
+ if (JS_LIKELY(!fp->spbase)) {
+ newsp = js_AllocRawStack(cx, (uintN)(2 * depth), &mark);
+ if (!newsp) {
+ ok = JS_FALSE;
+ goto out2;
+ }
+ sp = newsp + depth;
+ fp->spbase = sp;
+ SAVE_SP(fp);
+ } else {
+ sp = fp->sp;
+ JS_ASSERT(JS_UPTRDIFF(sp, fp->spbase) <= depth * sizeof(jsval));
+ newsp = fp->spbase - depth;
+ mark = NULL;
+ }
+
+ /*
+ * To support generator_throw and to catch ignored exceptions, fail right
+ * away if cx->throwing is set. If no exception is pending, null obj in
+ * case a callable object is being sent into a yield expression, and the
+ * yield's result is invoked.
+ */
+ ok = !cx->throwing;
+ if (!ok) {
+#ifdef DEBUG_NOT_THROWING
+ printf("JS INTERPRETER CALLED WITH PENDING EXCEPTION %lx\n",
+ (unsigned long) cx->exception);
+#endif
+ goto out;
+ }
+ obj = NULL;
+
+#ifdef JS_THREADED_INTERP
+
+ /*
+ * This is a loop, but it does not look like a loop. The loop-closing
+ * jump is distributed throughout interruptJumpTable, and comes back to
+ * the interrupt label. The dispatch on op is through normalJumpTable.
+ * The trick is LOAD_INTERRUPT_HANDLER setting jumpTable appropriately.
+ *
+ * It is important that "op" be initialized before the interrupt label
+ * because it is possible for "op" to be specially assigned during the
+ * normally processing of an opcode while looping (in particular, this
+ * happens in JSOP_TRAP while debugging). We rely on DO_NEXT_OP to
+ * correctly manage "op" in all other cases.
+ */
+ op = (JSOp) *pc;
+ if (interruptHandler) {
+interrupt:
+ SAVE_SP_AND_PC(fp);
+ switch (interruptHandler(cx, script, pc, &rval,
+ rt->interruptHandlerData)) {
+ case JSTRAP_ERROR:
+ ok = JS_FALSE;
+ goto out;
+ case JSTRAP_CONTINUE:
+ break;
+ case JSTRAP_RETURN:
+ fp->rval = rval;
+ goto out;
+ case JSTRAP_THROW:
+ cx->throwing = JS_TRUE;
+ cx->exception = rval;
+ ok = JS_FALSE;
+ goto out;
+ default:;
+ }
+ LOAD_INTERRUPT_HANDLER(rt);
+ }
+
+ JS_ASSERT((uintN)op < (uintN)JSOP_LIMIT);
+ JS_EXTENSION_(goto *normalJumpTable[op]);
+
+#else /* !JS_THREADED_INTERP */
+
+ for (;;) {
+ op = (JSOp) *pc;
+ do_op:
+ len = js_CodeSpec[op].length;
+
+#ifdef DEBUG
+ tracefp = (FILE *) cx->tracefp;
+ if (tracefp) {
+ intN nuses, n;
+
+ fprintf(tracefp, "%4u: ", js_PCToLineNumber(cx, script, pc));
+ js_Disassemble1(cx, script, pc,
+ PTRDIFF(pc, script->code, jsbytecode), JS_FALSE,
+ tracefp);
+ nuses = js_CodeSpec[op].nuses;
+ if (nuses) {
+ SAVE_SP_AND_PC(fp);
+ for (n = -nuses; n < 0; n++) {
+ str = js_DecompileValueGenerator(cx, n, sp[n], NULL);
+ if (str) {
+ fprintf(tracefp, "%s %s",
+ (n == -nuses) ? " inputs:" : ",",
+ JS_GetStringBytes(str));
+ }
+ }
+ fprintf(tracefp, " @ %d\n", sp - fp->spbase);
+ }
+ }
+#endif /* DEBUG */
+
+ if (interruptHandler && op != JSOP_PUSHOBJ) {
+ SAVE_SP_AND_PC(fp);
+ switch (interruptHandler(cx, script, pc, &rval,
+ rt->interruptHandlerData)) {
+ case JSTRAP_ERROR:
+ ok = JS_FALSE;
+ goto out;
+ case JSTRAP_CONTINUE:
+ break;
+ case JSTRAP_RETURN:
+ fp->rval = rval;
+ goto out;
+ case JSTRAP_THROW:
+ cx->throwing = JS_TRUE;
+ cx->exception = rval;
+ ok = JS_FALSE;
+ goto out;
+ default:;
+ }
+ LOAD_INTERRUPT_HANDLER(rt);
+ }
+
+ switch (op) {
+
+#endif /* !JS_THREADED_INTERP */
+
+ BEGIN_CASE(JSOP_STOP)
+ goto out;
+
+ EMPTY_CASE(JSOP_NOP)
+
+ BEGIN_CASE(JSOP_GROUP)
+ obj = NULL;
+ END_CASE(JSOP_GROUP)
+
+ BEGIN_CASE(JSOP_PUSH)
+ PUSH_OPND(JSVAL_VOID);
+ END_CASE(JSOP_PUSH)
+
+ BEGIN_CASE(JSOP_POP)
+ sp--;
+ END_CASE(JSOP_POP)
+
+ BEGIN_CASE(JSOP_POP2)
+ sp -= 2;
+ END_CASE(JSOP_POP2)
+
+ BEGIN_CASE(JSOP_SWAP)
+ vp = sp - depth; /* swap generating pc's for the decompiler */
+ ltmp = vp[-1];
+ vp[-1] = vp[-2];
+ sp[-2] = ltmp;
+ rtmp = sp[-1];
+ sp[-1] = sp[-2];
+ sp[-2] = rtmp;
+ END_CASE(JSOP_SWAP)
+
+ BEGIN_CASE(JSOP_POPV)
+ *result = POP_OPND();
+ END_CASE(JSOP_POPV)
+
+ BEGIN_CASE(JSOP_ENTERWITH)
+ FETCH_OBJECT(cx, -1, rval, obj);
+ SAVE_SP_AND_PC(fp);
+ OBJ_TO_INNER_OBJECT(cx, obj);
+ if (!obj || !(obj2 = js_GetScopeChain(cx, fp))) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ withobj = js_NewWithObject(cx, obj, obj2, sp - fp->spbase - 1);
+ if (!withobj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ fp->scopeChain = withobj;
+ STORE_OPND(-1, OBJECT_TO_JSVAL(withobj));
+ END_CASE(JSOP_ENTERWITH)
+
+ BEGIN_CASE(JSOP_LEAVEWITH)
+ rval = POP_OPND();
+ JS_ASSERT(JSVAL_IS_OBJECT(rval));
+ withobj = JSVAL_TO_OBJECT(rval);
+ JS_ASSERT(OBJ_GET_CLASS(cx, withobj) == &js_WithClass);
+ fp->scopeChain = OBJ_GET_PARENT(cx, withobj);
+ JS_SetPrivate(cx, withobj, NULL);
+ END_CASE(JSOP_LEAVEWITH)
+
+ BEGIN_CASE(JSOP_SETRVAL)
+ ASSERT_NOT_THROWING(cx);
+ fp->rval = POP_OPND();
+ END_CASE(JSOP_SETRVAL)
+
+ BEGIN_CASE(JSOP_RETURN)
+ CHECK_BRANCH(-1);
+ fp->rval = POP_OPND();
+ /* FALL THROUGH */
+
+ BEGIN_CASE(JSOP_RETRVAL) /* fp->rval already set */
+ ASSERT_NOT_THROWING(cx);
+ if (inlineCallCount)
+ inline_return:
+ {
+ JSInlineFrame *ifp = (JSInlineFrame *) fp;
+ void *hookData = ifp->hookData;
+
+ /*
+ * If fp has blocks on its scope chain, home their locals now,
+ * before calling any debugger hook, and before freeing stack.
+ * This matches the order of block putting and hook calling in
+ * the "out-of-line" return code at the bottom of js_Interpret
+ * and in js_Invoke.
+ */
+ if (fp->flags & JSFRAME_POP_BLOCKS) {
+ SAVE_SP_AND_PC(fp);
+ ok &= PutBlockObjects(cx, fp);
+ }
+
+ if (hookData) {
+ JSInterpreterHook hook = rt->callHook;
+ if (hook) {
+ SAVE_SP_AND_PC(fp);
+ hook(cx, fp, JS_FALSE, &ok, hookData);
+ LOAD_INTERRUPT_HANDLER(rt);
+ }
+ }
+
+ /*
+ * If fp has a call object, sync values and clear the back-
+ * pointer. This can happen for a lightweight function if it
+ * calls eval unexpectedly (in a way that is hidden from the
+ * compiler). See bug 325540.
+ */
+ if (fp->callobj) {
+ SAVE_SP_AND_PC(fp);
+ ok &= js_PutCallObject(cx, fp);
+ }
+
+ if (fp->argsobj) {
+ SAVE_SP_AND_PC(fp);
+ ok &= js_PutArgsObject(cx, fp);
+ }
+
+ /* Restore context version only if callee hasn't set version. */
+ if (JS_LIKELY(cx->version == currentVersion)) {
+ currentVersion = ifp->callerVersion;
+ if (currentVersion != cx->version)
+ js_SetVersion(cx, currentVersion);
+ }
+
+ /* Store the return value in the caller's operand frame. */
+ vp = ifp->rvp;
+ *vp = fp->rval;
+
+ /* Restore cx->fp and release the inline frame's space. */
+ cx->fp = fp = fp->down;
+ JS_ARENA_RELEASE(&cx->stackPool, ifp->mark);
+
+ /* Restore sp to point just above the return value. */
+ fp->sp = vp + 1;
+ RESTORE_SP(fp);
+
+ /* Restore the calling script's interpreter registers. */
+ obj = NULL;
+ script = fp->script;
+ depth = (jsint) script->depth;
+ pc = fp->pc;
+#ifndef JS_THREADED_INTERP
+ endpc = script->code + script->length;
+#endif
+
+ /* Store the generating pc for the return value. */
+ vp[-depth] = (jsval)pc;
+
+ /* Resume execution in the calling frame. */
+ inlineCallCount--;
+ if (JS_LIKELY(ok)) {
+ JS_ASSERT(js_CodeSpec[*pc].length == JSOP_CALL_LENGTH);
+ len = JSOP_CALL_LENGTH;
+ DO_NEXT_OP(len);
+ }
+ }
+ goto out;
+
+ BEGIN_CASE(JSOP_DEFAULT)
+ (void) POP();
+ /* FALL THROUGH */
+ BEGIN_CASE(JSOP_GOTO)
+ len = GET_JUMP_OFFSET(pc);
+ CHECK_BRANCH(len);
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_IFEQ)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond == JS_FALSE) {
+ len = GET_JUMP_OFFSET(pc);
+ CHECK_BRANCH(len);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_IFEQ)
+
+ BEGIN_CASE(JSOP_IFNE)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond != JS_FALSE) {
+ len = GET_JUMP_OFFSET(pc);
+ CHECK_BRANCH(len);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_IFNE)
+
+ BEGIN_CASE(JSOP_OR)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond == JS_TRUE) {
+ len = GET_JUMP_OFFSET(pc);
+ PUSH_OPND(rval);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_OR)
+
+ BEGIN_CASE(JSOP_AND)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond == JS_FALSE) {
+ len = GET_JUMP_OFFSET(pc);
+ PUSH_OPND(rval);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_AND)
+
+ BEGIN_CASE(JSOP_DEFAULTX)
+ (void) POP();
+ /* FALL THROUGH */
+ BEGIN_CASE(JSOP_GOTOX)
+ len = GET_JUMPX_OFFSET(pc);
+ CHECK_BRANCH(len);
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_IFEQX)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond == JS_FALSE) {
+ len = GET_JUMPX_OFFSET(pc);
+ CHECK_BRANCH(len);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_IFEQX)
+
+ BEGIN_CASE(JSOP_IFNEX)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond != JS_FALSE) {
+ len = GET_JUMPX_OFFSET(pc);
+ CHECK_BRANCH(len);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_IFNEX)
+
+ BEGIN_CASE(JSOP_ORX)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond == JS_TRUE) {
+ len = GET_JUMPX_OFFSET(pc);
+ PUSH_OPND(rval);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_ORX)
+
+ BEGIN_CASE(JSOP_ANDX)
+ POP_BOOLEAN(cx, rval, cond);
+ if (cond == JS_FALSE) {
+ len = GET_JUMPX_OFFSET(pc);
+ PUSH_OPND(rval);
+ DO_NEXT_OP(len);
+ }
+ END_CASE(JSOP_ANDX)
+
+/*
+ * If the index value at sp[n] is not an int that fits in a jsval, it could
+ * be an object (an XML QName, AttributeName, or AnyName), but only if we are
+ * compiling with JS_HAS_XML_SUPPORT. Otherwise convert the index value to a
+ * string atom id.
+ */
+#define FETCH_ELEMENT_ID(n, id) \
+ JS_BEGIN_MACRO \
+ jsval idval_ = FETCH_OPND(n); \
+ if (JSVAL_IS_INT(idval_)) { \
+ id = INT_JSVAL_TO_JSID(idval_); \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = InternNonIntElementId(cx, idval_, &id); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+ BEGIN_CASE(JSOP_IN)
+ SAVE_SP_AND_PC(fp);
+ rval = FETCH_OPND(-1);
+ if (JSVAL_IS_PRIMITIVE(rval)) {
+ str = js_DecompileValueGenerator(cx, -1, rval, NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_IN_NOT_OBJECT,
+ JS_GetStringBytes(str));
+ }
+ ok = JS_FALSE;
+ goto out;
+ }
+ obj = JSVAL_TO_OBJECT(rval);
+ FETCH_ELEMENT_ID(-2, id);
+ CHECK_ELEMENT_ID(obj, id);
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop);
+ if (!ok)
+ goto out;
+ sp--;
+ STORE_OPND(-1, BOOLEAN_TO_JSVAL(prop != NULL));
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ END_CASE(JSOP_IN)
+
+ BEGIN_CASE(JSOP_FOREACH)
+ flags = JSITER_ENUMERATE | JSITER_FOREACH;
+ goto value_to_iter;
+
+#if JS_HAS_DESTRUCTURING
+ BEGIN_CASE(JSOP_FOREACHKEYVAL)
+ flags = JSITER_ENUMERATE | JSITER_FOREACH | JSITER_KEYVALUE;
+ goto value_to_iter;
+#endif
+
+ BEGIN_CASE(JSOP_FORIN)
+ /*
+ * Set JSITER_ENUMERATE to indicate that for-in loop should use
+ * the enumeration protocol's iterator for compatibility if an
+ * explicit iterator is not given via the optional __iterator__
+ * method.
+ */
+ flags = JSITER_ENUMERATE;
+
+ value_to_iter:
+ JS_ASSERT(sp > fp->spbase);
+ SAVE_SP_AND_PC(fp);
+ ok = js_ValueToIterator(cx, flags, &sp[-1]);
+ if (!ok)
+ goto out;
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(sp[-1]));
+ JS_ASSERT(JSOP_FORIN_LENGTH == js_CodeSpec[op].length);
+ END_CASE(JSOP_FORIN)
+
+ BEGIN_CASE(JSOP_FORPROP)
+ /*
+ * Handle JSOP_FORPROP first, so the cost of the goto do_forinloop
+ * is not paid for the more common cases.
+ */
+ lval = FETCH_OPND(-1);
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ i = -2;
+ goto do_forinloop;
+
+ BEGIN_CASE(JSOP_FORNAME)
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+
+ /*
+ * ECMA 12.6.3 says to eval the LHS after looking for properties
+ * to enumerate, and bail without LHS eval if there are no props.
+ * We do Find here to share the most code at label do_forinloop.
+ * If looking for enumerable properties could have side effects,
+ * then we'd have to move this into the common code and condition
+ * it on op == JSOP_FORNAME.
+ */
+ SAVE_SP_AND_PC(fp);
+ ok = js_FindProperty(cx, id, &obj, &obj2, &prop);
+ if (!ok)
+ goto out;
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ lval = OBJECT_TO_JSVAL(obj);
+ /* FALL THROUGH */
+
+ BEGIN_CASE(JSOP_FORARG)
+ BEGIN_CASE(JSOP_FORVAR)
+ BEGIN_CASE(JSOP_FORLOCAL)
+ /*
+ * JSOP_FORARG and JSOP_FORVAR don't require any lval computation
+ * here, because they address slots on the stack (in fp->args and
+ * fp->vars, respectively). Same applies to JSOP_FORLOCAL, which
+ * addresses fp->spbase.
+ */
+ /* FALL THROUGH */
+
+ BEGIN_CASE(JSOP_FORELEM)
+ /*
+ * JSOP_FORELEM simply initializes or updates the iteration state
+ * and leaves the index expression evaluation and assignment to the
+ * enumerator until after the next property has been acquired, via
+ * a JSOP_ENUMELEM bytecode.
+ */
+ i = -1;
+
+ do_forinloop:
+ /*
+ * Reach under the top of stack to find our property iterator, a
+ * JSObject that contains the iteration state.
+ */
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(sp[i]));
+ iterobj = JSVAL_TO_OBJECT(sp[i]);
+
+ SAVE_SP_AND_PC(fp);
+ ok = js_CallIteratorNext(cx, iterobj, &rval);
+ if (!ok)
+ goto out;
+ if (rval == JSVAL_HOLE) {
+ rval = JSVAL_FALSE;
+ goto end_forinloop;
+ }
+
+ switch (op) {
+ case JSOP_FORARG:
+ slot = GET_ARGNO(pc);
+ JS_ASSERT(slot < fp->fun->nargs);
+ fp->argv[slot] = rval;
+ break;
+
+ case JSOP_FORVAR:
+ slot = GET_VARNO(pc);
+ JS_ASSERT(slot < fp->fun->u.i.nvars);
+ fp->vars[slot] = rval;
+ break;
+
+ case JSOP_FORLOCAL:
+ slot = GET_UINT16(pc);
+ JS_ASSERT(slot < (uintN)depth);
+ vp = &fp->spbase[slot];
+ GC_POKE(cx, *vp);
+ *vp = rval;
+ break;
+
+ case JSOP_FORELEM:
+ /* FORELEM is not a SET operation, it's more like BINDNAME. */
+ PUSH_OPND(rval);
+ break;
+
+ default:
+ JS_ASSERT(op == JSOP_FORPROP || op == JSOP_FORNAME);
+
+ /* Convert lval to a non-null object containing id. */
+ VALUE_TO_OBJECT(cx, lval, obj);
+ if (op == JSOP_FORPROP)
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+
+ /* Set the variable obj[id] to refer to rval. */
+ fp->flags |= JSFRAME_ASSIGNING;
+ ok = OBJ_SET_PROPERTY(cx, obj, id, &rval);
+ fp->flags &= ~JSFRAME_ASSIGNING;
+ if (!ok)
+ goto out;
+ break;
+ }
+
+ /* Push true to keep looping through properties. */
+ rval = JSVAL_TRUE;
+
+ end_forinloop:
+ sp += i + 1;
+ PUSH_OPND(rval);
+ len = js_CodeSpec[op].length;
+ DO_NEXT_OP(len);
+
+ BEGIN_CASE(JSOP_DUP)
+ JS_ASSERT(sp > fp->spbase);
+ vp = sp - 1; /* address top of stack */
+ rval = *vp;
+ vp -= depth; /* address generating pc */
+ vp[1] = *vp;
+ PUSH(rval);
+ END_CASE(JSOP_DUP)
+
+ BEGIN_CASE(JSOP_DUP2)
+ JS_ASSERT(sp - 2 >= fp->spbase);
+ vp = sp - 1; /* address top of stack */
+ lval = vp[-1];
+ rval = *vp;
+ vp -= depth; /* address generating pc */
+ vp[1] = vp[2] = *vp;
+ PUSH(lval);
+ PUSH(rval);
+ END_CASE(JSOP_DUP2)
+
+#define PROPERTY_OP(n, call) \
+ JS_BEGIN_MACRO \
+ /* Fetch the left part and resolve it to a non-null object. */ \
+ FETCH_OBJECT(cx, n, lval, obj); \
+ \
+ /* Get or set the property, set ok false if error, true if success. */\
+ SAVE_SP_AND_PC(fp); \
+ call; \
+ if (!ok) \
+ goto out; \
+ JS_END_MACRO
+
+#define ELEMENT_OP(n, call) \
+ JS_BEGIN_MACRO \
+ /* Fetch the right part and resolve it to an internal id. */ \
+ FETCH_ELEMENT_ID(n, id); \
+ \
+ /* Fetch the left part and resolve it to a non-null object. */ \
+ FETCH_OBJECT(cx, n - 1, lval, obj); \
+ \
+ /* Ensure that id has a type suitable for use with obj. */ \
+ CHECK_ELEMENT_ID(obj, id); \
+ \
+ /* Get or set the element, set ok false if error, true if success. */ \
+ SAVE_SP_AND_PC(fp); \
+ call; \
+ if (!ok) \
+ goto out; \
+ JS_END_MACRO
+
+#define NATIVE_GET(cx,obj,pobj,sprop,vp) \
+ JS_BEGIN_MACRO \
+ if (SPROP_HAS_STUB_GETTER(sprop)) { \
+ /* Fast path for Object instance properties. */ \
+ JS_ASSERT((sprop)->slot != SPROP_INVALID_SLOT || \
+ !SPROP_HAS_STUB_SETTER(sprop)); \
+ *vp = ((sprop)->slot != SPROP_INVALID_SLOT) \
+ ? LOCKED_OBJ_GET_SLOT(pobj, (sprop)->slot) \
+ : JSVAL_VOID; \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = js_NativeGet(cx, obj, pobj, sprop, vp); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+#define NATIVE_SET(cx,obj,sprop,vp) \
+ JS_BEGIN_MACRO \
+ if (SPROP_HAS_STUB_SETTER(sprop) && \
+ (sprop)->slot != SPROP_INVALID_SLOT) { \
+ /* Fast path for Object instance properties. */ \
+ LOCKED_OBJ_SET_SLOT(obj, (sprop)->slot, *vp); \
+ } else { \
+ SAVE_SP_AND_PC(fp); \
+ ok = js_NativeSet(cx, obj, sprop, vp); \
+ if (!ok) \
+ goto out; \
+ } \
+ JS_END_MACRO
+
+/*
+ * CACHED_GET and CACHED_SET use cx, obj, id, and rval from their callers'
+ * environments.
+ */
+#define CACHED_GET(call) \
+ JS_BEGIN_MACRO \
+ if (!OBJ_IS_NATIVE(obj)) { \
+ ok = call; \
+ } else { \
+ JS_LOCK_OBJ(cx, obj); \
+ PROPERTY_CACHE_TEST(&rt->propertyCache, obj, id, sprop); \
+ if (sprop) { \
+ NATIVE_GET(cx, obj, obj, sprop, &rval); \
+ JS_UNLOCK_OBJ(cx, obj); \
+ } else { \
+ JS_UNLOCK_OBJ(cx, obj); \
+ ok = call; \
+ /* No fill here: js_GetProperty fills the cache. */ \
+ } \
+ } \
+ JS_END_MACRO
+
+#define CACHED_SET(call) \
+ JS_BEGIN_MACRO \
+ if (!OBJ_IS_NATIVE(obj)) { \
+ ok = call; \
+ } else { \
+ JSScope *scope_; \
+ JS_LOCK_OBJ(cx, obj); \
+ PROPERTY_CACHE_TEST(&rt->propertyCache, obj, id, sprop); \
+ if (sprop && \
+ !(sprop->attrs & JSPROP_READONLY) && \
+ (scope_ = OBJ_SCOPE(obj), !SCOPE_IS_SEALED(scope_))) { \
+ NATIVE_SET(cx, obj, sprop, &rval); \
+ JS_UNLOCK_SCOPE(cx, scope_); \
+ } else { \
+ JS_UNLOCK_OBJ(cx, obj); \
+ ok = call; \
+ /* No fill here: js_SetProperty writes through the cache. */ \
+ } \
+ } \
+ JS_END_MACRO
+
+#define BEGIN_LITOPX_CASE(OP,PCOFF) \
+ BEGIN_CASE(OP) \
+ pc2 = pc; \
+ atomIndex = GET_ATOM_INDEX(pc + PCOFF); \
+ do_##OP: \
+ atom = js_GetAtom(cx, &script->atomMap, atomIndex);
+
+#define END_LITOPX_CASE(OP) \
+ END_CASE(OP)
+
+ BEGIN_LITOPX_CASE(JSOP_SETCONST, 0)
+ obj = fp->varobj;
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ ok = OBJ_DEFINE_PROPERTY(cx, obj, ATOM_TO_JSID(atom), rval,
+ NULL, NULL,
+ JSPROP_ENUMERATE | JSPROP_PERMANENT |
+ JSPROP_READONLY,
+ NULL);
+ if (!ok)
+ goto out;
+ STORE_OPND(-1, rval);
+ END_LITOPX_CASE(JSOP_SETCONST)
+
+#if JS_HAS_DESTRUCTURING
+ BEGIN_CASE(JSOP_ENUMCONSTELEM)
+ FETCH_ELEMENT_ID(-1, id);
+ FETCH_OBJECT(cx, -2, lval, obj);
+ CHECK_ELEMENT_ID(obj, id);
+ rval = FETCH_OPND(-3);
+ SAVE_SP_AND_PC(fp);
+ ok = OBJ_DEFINE_PROPERTY(cx, obj, id, rval, NULL, NULL,
+ JSPROP_ENUMERATE | JSPROP_PERMANENT |
+ JSPROP_READONLY,
+ NULL);
+ if (!ok)
+ goto out;
+ sp -= 3;
+ END_CASE(JSOP_ENUMCONSTELEM)
+#endif
+
+ BEGIN_LITOPX_CASE(JSOP_BINDNAME, 0)
+ SAVE_SP_AND_PC(fp);
+ obj = js_FindIdentifierBase(cx, ATOM_TO_JSID(atom));
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ END_LITOPX_CASE(JSOP_BINDNAME)
+
+ BEGIN_CASE(JSOP_SETNAME)
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ rval = FETCH_OPND(-1);
+ lval = FETCH_OPND(-2);
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(lval));
+ obj = JSVAL_TO_OBJECT(lval);
+ SAVE_SP_AND_PC(fp);
+ CACHED_SET(OBJ_SET_PROPERTY(cx, obj, id, &rval));
+ if (!ok)
+ goto out;
+ sp--;
+ STORE_OPND(-1, rval);
+ obj = NULL;
+ END_CASE(JSOP_SETNAME)
+
+#define INTEGER_OP(OP, EXTRA_CODE) \
+ JS_BEGIN_MACRO \
+ FETCH_INT(cx, -1, j); \
+ FETCH_INT(cx, -2, i); \
+ EXTRA_CODE \
+ i = i OP j; \
+ sp--; \
+ STORE_INT(cx, -1, i); \
+ JS_END_MACRO
+
+#define BITWISE_OP(OP) INTEGER_OP(OP, (void) 0;)
+#define SIGNED_SHIFT_OP(OP) INTEGER_OP(OP, j &= 31;)
+
+ BEGIN_CASE(JSOP_BITOR)
+ BITWISE_OP(|);
+ END_CASE(JSOP_BITOR)
+
+ BEGIN_CASE(JSOP_BITXOR)
+ BITWISE_OP(^);
+ END_CASE(JSOP_BITXOR)
+
+ BEGIN_CASE(JSOP_BITAND)
+ BITWISE_OP(&);
+ END_CASE(JSOP_BITAND)
+
+#define RELATIONAL_OP(OP) \
+ JS_BEGIN_MACRO \
+ rval = FETCH_OPND(-1); \
+ lval = FETCH_OPND(-2); \
+ /* Optimize for two int-tagged operands (typical loop control). */ \
+ if ((lval & rval) & JSVAL_INT) { \
+ ltmp = lval ^ JSVAL_VOID; \
+ rtmp = rval ^ JSVAL_VOID; \
+ if (ltmp && rtmp) { \
+ cond = JSVAL_TO_INT(lval) OP JSVAL_TO_INT(rval); \
+ } else { \
+ d = ltmp ? JSVAL_TO_INT(lval) : *rt->jsNaN; \
+ d2 = rtmp ? JSVAL_TO_INT(rval) : *rt->jsNaN; \
+ cond = JSDOUBLE_COMPARE(d, OP, d2, JS_FALSE); \
+ } \
+ } else { \
+ VALUE_TO_PRIMITIVE(cx, lval, JSTYPE_NUMBER, &lval); \
+ sp[-2] = lval; \
+ VALUE_TO_PRIMITIVE(cx, rval, JSTYPE_NUMBER, &rval); \
+ if (JSVAL_IS_STRING(lval) && JSVAL_IS_STRING(rval)) { \
+ str = JSVAL_TO_STRING(lval); \
+ str2 = JSVAL_TO_STRING(rval); \
+ cond = js_CompareStrings(str, str2) OP 0; \
+ } else { \
+ VALUE_TO_NUMBER(cx, lval, d); \
+ VALUE_TO_NUMBER(cx, rval, d2); \
+ cond = JSDOUBLE_COMPARE(d, OP, d2, JS_FALSE); \
+ } \
+ } \
+ sp--; \
+ STORE_OPND(-1, BOOLEAN_TO_JSVAL(cond)); \
+ JS_END_MACRO
+
+/*
+ * NB: These macros can't use JS_BEGIN_MACRO/JS_END_MACRO around their bodies
+ * because they begin if/else chains, so callers must not put semicolons after
+ * the call expressions!
+ */
+#if JS_HAS_XML_SUPPORT
+#define XML_EQUALITY_OP(OP) \
+ if ((ltmp == JSVAL_OBJECT && \
+ (obj2 = JSVAL_TO_OBJECT(lval)) && \
+ OBJECT_IS_XML(cx, obj2)) || \
+ (rtmp == JSVAL_OBJECT && \
+ (obj2 = JSVAL_TO_OBJECT(rval)) && \
+ OBJECT_IS_XML(cx, obj2))) { \
+ JSXMLObjectOps *ops; \
+ \
+ ops = (JSXMLObjectOps *) obj2->map->ops; \
+ if (obj2 == JSVAL_TO_OBJECT(rval)) \
+ rval = lval; \
+ SAVE_SP_AND_PC(fp); \
+ ok = ops->equality(cx, obj2, rval, &cond); \
+ if (!ok) \
+ goto out; \
+ cond = cond OP JS_TRUE; \
+ } else
+
+#define EXTENDED_EQUALITY_OP(OP) \
+ if (ltmp == JSVAL_OBJECT && \
+ (obj2 = JSVAL_TO_OBJECT(lval)) && \
+ ((clasp = OBJ_GET_CLASS(cx, obj2))->flags & JSCLASS_IS_EXTENDED)) { \
+ JSExtendedClass *xclasp; \
+ \
+ xclasp = (JSExtendedClass *) clasp; \
+ SAVE_SP_AND_PC(fp); \
+ ok = xclasp->equality(cx, obj2, rval, &cond); \
+ if (!ok) \
+ goto out; \
+ cond = cond OP JS_TRUE; \
+ } else
+#else
+#define XML_EQUALITY_OP(OP) /* nothing */
+#define EXTENDED_EQUALITY_OP(OP) /* nothing */
+#endif
+
+#define EQUALITY_OP(OP, IFNAN) \
+ JS_BEGIN_MACRO \
+ rval = FETCH_OPND(-1); \
+ lval = FETCH_OPND(-2); \
+ ltmp = JSVAL_TAG(lval); \
+ rtmp = JSVAL_TAG(rval); \
+ XML_EQUALITY_OP(OP) \
+ if (ltmp == rtmp) { \
+ if (ltmp == JSVAL_STRING) { \
+ str = JSVAL_TO_STRING(lval); \
+ str2 = JSVAL_TO_STRING(rval); \
+ cond = js_EqualStrings(str, str2) OP JS_TRUE; \
+ } else if (ltmp == JSVAL_DOUBLE) { \
+ d = *JSVAL_TO_DOUBLE(lval); \
+ d2 = *JSVAL_TO_DOUBLE(rval); \
+ cond = JSDOUBLE_COMPARE(d, OP, d2, IFNAN); \
+ } else { \
+ EXTENDED_EQUALITY_OP(OP) \
+ /* Handle all undefined (=>NaN) and int combinations. */ \
+ cond = lval OP rval; \
+ } \
+ } else { \
+ if (JSVAL_IS_NULL(lval) || JSVAL_IS_VOID(lval)) { \
+ cond = (JSVAL_IS_NULL(rval) || JSVAL_IS_VOID(rval)) OP 1; \
+ } else if (JSVAL_IS_NULL(rval) || JSVAL_IS_VOID(rval)) { \
+ cond = 1 OP 0; \
+ } else { \
+ if (ltmp == JSVAL_OBJECT) { \
+ VALUE_TO_PRIMITIVE(cx, lval, JSTYPE_VOID, &sp[-2]); \
+ lval = sp[-2]; \
+ ltmp = JSVAL_TAG(lval); \
+ } else if (rtmp == JSVAL_OBJECT) { \
+ VALUE_TO_PRIMITIVE(cx, rval, JSTYPE_VOID, &sp[-1]); \
+ rval = sp[-1]; \
+ rtmp = JSVAL_TAG(rval); \
+ } \
+ if (ltmp == JSVAL_STRING && rtmp == JSVAL_STRING) { \
+ str = JSVAL_TO_STRING(lval); \
+ str2 = JSVAL_TO_STRING(rval); \
+ cond = js_EqualStrings(str, str2) OP JS_TRUE; \
+ } else { \
+ VALUE_TO_NUMBER(cx, lval, d); \
+ VALUE_TO_NUMBER(cx, rval, d2); \
+ cond = JSDOUBLE_COMPARE(d, OP, d2, IFNAN); \
+ } \
+ } \
+ } \
+ sp--; \
+ STORE_OPND(-1, BOOLEAN_TO_JSVAL(cond)); \
+ JS_END_MACRO
+
+ BEGIN_CASE(JSOP_EQ)
+ EQUALITY_OP(==, JS_FALSE);
+ END_CASE(JSOP_EQ)
+
+ BEGIN_CASE(JSOP_NE)
+ EQUALITY_OP(!=, JS_TRUE);
+ END_CASE(JSOP_NE)
+
+#define NEW_EQUALITY_OP(OP) \
+ JS_BEGIN_MACRO \
+ rval = FETCH_OPND(-1); \
+ lval = FETCH_OPND(-2); \
+ cond = js_StrictlyEqual(lval, rval) OP JS_TRUE; \
+ sp--; \
+ STORE_OPND(-1, BOOLEAN_TO_JSVAL(cond)); \
+ JS_END_MACRO
+
+ BEGIN_CASE(JSOP_NEW_EQ)
+ NEW_EQUALITY_OP(==);
+ END_CASE(JSOP_NEW_EQ)
+
+ BEGIN_CASE(JSOP_NEW_NE)
+ NEW_EQUALITY_OP(!=);
+ END_CASE(JSOP_NEW_NE)
+
+ BEGIN_CASE(JSOP_CASE)
+ pc2 = (jsbytecode *) sp[-2-depth];
+ NEW_EQUALITY_OP(==);
+ (void) POP();
+ if (cond) {
+ len = GET_JUMP_OFFSET(pc);
+ CHECK_BRANCH(len);
+ DO_NEXT_OP(len);
+ }
+ sp[-depth] = (jsval)pc2;
+ PUSH(lval);
+ END_CASE(JSOP_CASE)
+
+ BEGIN_CASE(JSOP_CASEX)
+ pc2 = (jsbytecode *) sp[-2-depth];
+ NEW_EQUALITY_OP(==);
+ (void) POP();
+ if (cond) {
+ len = GET_JUMPX_OFFSET(pc);
+ CHECK_BRANCH(len);
+ DO_NEXT_OP(len);
+ }
+ sp[-depth] = (jsval)pc2;
+ PUSH(lval);
+ END_CASE(JSOP_CASEX)
+
+ BEGIN_CASE(JSOP_LT)
+ RELATIONAL_OP(<);
+ END_CASE(JSOP_LT)
+
+ BEGIN_CASE(JSOP_LE)
+ RELATIONAL_OP(<=);
+ END_CASE(JSOP_LE)
+
+ BEGIN_CASE(JSOP_GT)
+ RELATIONAL_OP(>);
+ END_CASE(JSOP_GT)
+
+ BEGIN_CASE(JSOP_GE)
+ RELATIONAL_OP(>=);
+ END_CASE(JSOP_GE)
+
+#undef EQUALITY_OP
+#undef RELATIONAL_OP
+
+ BEGIN_CASE(JSOP_LSH)
+ SIGNED_SHIFT_OP(<<);
+ END_CASE(JSOP_LSH)
+
+ BEGIN_CASE(JSOP_RSH)
+ SIGNED_SHIFT_OP(>>);
+ END_CASE(JSOP_RSH)
+
+ BEGIN_CASE(JSOP_URSH)
+ {
+ uint32 u;
+
+ FETCH_INT(cx, -1, j);
+ FETCH_UINT(cx, -2, u);
+ u >>= j & 31;
+ sp--;
+ STORE_UINT(cx, -1, u);
+ }
+ END_CASE(JSOP_URSH)
+
+#undef INTEGER_OP
+#undef BITWISE_OP
+#undef SIGNED_SHIFT_OP
+
+ BEGIN_CASE(JSOP_ADD)
+ rval = FETCH_OPND(-1);
+ lval = FETCH_OPND(-2);
+#if JS_HAS_XML_SUPPORT
+ if (!JSVAL_IS_PRIMITIVE(lval) &&
+ (obj2 = JSVAL_TO_OBJECT(lval), OBJECT_IS_XML(cx, obj2)) &&
+ VALUE_IS_XML(cx, rval)) {
+ JSXMLObjectOps *ops;
+
+ ops = (JSXMLObjectOps *) obj2->map->ops;
+ SAVE_SP_AND_PC(fp);
+ ok = ops->concatenate(cx, obj2, rval, &rval);
+ if (!ok)
+ goto out;
+ sp--;
+ STORE_OPND(-1, rval);
+ } else
+#endif
+ {
+ VALUE_TO_PRIMITIVE(cx, lval, JSTYPE_VOID, &sp[-2]);
+ lval = sp[-2];
+ VALUE_TO_PRIMITIVE(cx, rval, JSTYPE_VOID, &sp[-1]);
+ rval = sp[-1];
+ if ((cond = JSVAL_IS_STRING(lval)) || JSVAL_IS_STRING(rval)) {
+ SAVE_SP_AND_PC(fp);
+ if (cond) {
+ str = JSVAL_TO_STRING(lval);
+ ok = (str2 = js_ValueToString(cx, rval)) != NULL;
+ if (!ok)
+ goto out;
+ sp[-1] = STRING_TO_JSVAL(str2);
+ } else {
+ str2 = JSVAL_TO_STRING(rval);
+ ok = (str = js_ValueToString(cx, lval)) != NULL;
+ if (!ok)
+ goto out;
+ sp[-2] = STRING_TO_JSVAL(str);
+ }
+ str = js_ConcatStrings(cx, str, str2);
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ sp--;
+ STORE_OPND(-1, STRING_TO_JSVAL(str));
+ } else {
+ VALUE_TO_NUMBER(cx, lval, d);
+ VALUE_TO_NUMBER(cx, rval, d2);
+ d += d2;
+ sp--;
+ STORE_NUMBER(cx, -1, d);
+ }
+ }
+ END_CASE(JSOP_ADD)
+
+#define BINARY_OP(OP) \
+ JS_BEGIN_MACRO \
+ FETCH_NUMBER(cx, -1, d2); \
+ FETCH_NUMBER(cx, -2, d); \
+ d = d OP d2; \
+ sp--; \
+ STORE_NUMBER(cx, -1, d); \
+ JS_END_MACRO
+
+ BEGIN_CASE(JSOP_SUB)
+ BINARY_OP(-);
+ END_CASE(JSOP_SUB)
+
+ BEGIN_CASE(JSOP_MUL)
+ BINARY_OP(*);
+ END_CASE(JSOP_MUL)
+
+ BEGIN_CASE(JSOP_DIV)
+ FETCH_NUMBER(cx, -1, d2);
+ FETCH_NUMBER(cx, -2, d);
+ sp--;
+ if (d2 == 0) {
+#ifdef XP_WIN
+ /* XXX MSVC miscompiles such that (NaN == 0) */
+ if (JSDOUBLE_IS_NaN(d2))
+ rval = DOUBLE_TO_JSVAL(rt->jsNaN);
+ else
+#endif
+ if (d == 0 || JSDOUBLE_IS_NaN(d))
+ rval = DOUBLE_TO_JSVAL(rt->jsNaN);
+ else if ((JSDOUBLE_HI32(d) ^ JSDOUBLE_HI32(d2)) >> 31)
+ rval = DOUBLE_TO_JSVAL(rt->jsNegativeInfinity);
+ else
+ rval = DOUBLE_TO_JSVAL(rt->jsPositiveInfinity);
+ STORE_OPND(-1, rval);
+ } else {
+ d /= d2;
+ STORE_NUMBER(cx, -1, d);
+ }
+ END_CASE(JSOP_DIV)
+
+ BEGIN_CASE(JSOP_MOD)
+ FETCH_NUMBER(cx, -1, d2);
+ FETCH_NUMBER(cx, -2, d);
+ sp--;
+ if (d2 == 0) {
+ STORE_OPND(-1, DOUBLE_TO_JSVAL(rt->jsNaN));
+ } else {
+#ifdef XP_WIN
+ /* Workaround MS fmod bug where 42 % (1/0) => NaN, not 42. */
+ if (!(JSDOUBLE_IS_FINITE(d) && JSDOUBLE_IS_INFINITE(d2)))
+#endif
+ d = fmod(d, d2);
+ STORE_NUMBER(cx, -1, d);
+ }
+ END_CASE(JSOP_MOD)
+
+ BEGIN_CASE(JSOP_NOT)
+ POP_BOOLEAN(cx, rval, cond);
+ PUSH_OPND(BOOLEAN_TO_JSVAL(!cond));
+ END_CASE(JSOP_NOT)
+
+ BEGIN_CASE(JSOP_BITNOT)
+ FETCH_INT(cx, -1, i);
+ i = ~i;
+ STORE_INT(cx, -1, i);
+ END_CASE(JSOP_BITNOT)
+
+ BEGIN_CASE(JSOP_NEG)
+ /*
+ * Optimize the case of an int-tagged operand by noting that
+ * INT_FITS_IN_JSVAL(i) => INT_FITS_IN_JSVAL(-i) unless i is 0
+ * when -i is the negative zero which is jsdouble.
+ */
+ rval = FETCH_OPND(-1);
+ if (JSVAL_IS_INT(rval) && (i = JSVAL_TO_INT(rval)) != 0) {
+ i = -i;
+ JS_ASSERT(INT_FITS_IN_JSVAL(i));
+ rval = INT_TO_JSVAL(i);
+ } else {
+ if (JSVAL_IS_DOUBLE(rval)) {
+ d = *JSVAL_TO_DOUBLE(rval);
+ } else {
+ SAVE_SP_AND_PC(fp);
+ ok = js_ValueToNumber(cx, rval, &d);
+ if (!ok)
+ goto out;
+ }
+#ifdef HPUX
+ /*
+ * Negation of a zero doesn't produce a negative
+ * zero on HPUX. Perform the operation by bit
+ * twiddling.
+ */
+ JSDOUBLE_HI32(d) ^= JSDOUBLE_HI32_SIGNBIT;
+#else
+ d = -d;
+#endif
+ ok = js_NewNumberValue(cx, d, &rval);
+ if (!ok)
+ goto out;
+ }
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_NEG)
+
+ BEGIN_CASE(JSOP_POS)
+ rval = FETCH_OPND(-1);
+ if (!JSVAL_IS_NUMBER(rval)) {
+ SAVE_SP_AND_PC(fp);
+ ok = js_ValueToNumber(cx, rval, &d);
+ if (!ok)
+ goto out;
+ ok = js_NewNumberValue(cx, d, &rval);
+ if (!ok)
+ goto out;
+ sp[-1] = rval;
+ }
+ sp[-1-depth] = (jsval)pc;
+ END_CASE(JSOP_POS)
+
+ BEGIN_CASE(JSOP_NEW)
+ /* Get immediate argc and find the constructor function. */
+ argc = GET_ARGC(pc);
+
+ do_new:
+ SAVE_SP_AND_PC(fp);
+ vp = sp - (2 + argc);
+ JS_ASSERT(vp >= fp->spbase);
+
+ ok = js_InvokeConstructor(cx, vp, argc);
+ if (!ok)
+ goto out;
+ RESTORE_SP(fp);
+ LOAD_BRANCH_CALLBACK(cx);
+ LOAD_INTERRUPT_HANDLER(rt);
+ obj = JSVAL_TO_OBJECT(*vp);
+ len = js_CodeSpec[op].length;
+ DO_NEXT_OP(len);
+
+ BEGIN_CASE(JSOP_DELNAME)
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+
+ SAVE_SP_AND_PC(fp);
+ ok = js_FindProperty(cx, id, &obj, &obj2, &prop);
+ if (!ok)
+ goto out;
+
+ /* ECMA says to return true if name is undefined or inherited. */
+ rval = JSVAL_TRUE;
+ if (prop) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ ok = OBJ_DELETE_PROPERTY(cx, obj, id, &rval);
+ if (!ok)
+ goto out;
+ }
+ PUSH_OPND(rval);
+ END_CASE(JSOP_DELNAME)
+
+ BEGIN_CASE(JSOP_DELPROP)
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ PROPERTY_OP(-1, ok = OBJ_DELETE_PROPERTY(cx, obj, id, &rval));
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_DELPROP)
+
+ BEGIN_CASE(JSOP_DELELEM)
+ ELEMENT_OP(-1, ok = OBJ_DELETE_PROPERTY(cx, obj, id, &rval));
+ sp--;
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_DELELEM)
+
+ BEGIN_CASE(JSOP_TYPEOFEXPR)
+ BEGIN_CASE(JSOP_TYPEOF)
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ type = JS_TypeOfValue(cx, rval);
+ atom = rt->atomState.typeAtoms[type];
+ STORE_OPND(-1, ATOM_KEY(atom));
+ END_CASE(JSOP_TYPEOF)
+
+ BEGIN_CASE(JSOP_VOID)
+ (void) POP_OPND();
+ PUSH_OPND(JSVAL_VOID);
+ END_CASE(JSOP_VOID)
+
+ BEGIN_CASE(JSOP_INCNAME)
+ BEGIN_CASE(JSOP_DECNAME)
+ BEGIN_CASE(JSOP_NAMEINC)
+ BEGIN_CASE(JSOP_NAMEDEC)
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+
+ SAVE_SP_AND_PC(fp);
+ ok = js_FindProperty(cx, id, &obj, &obj2, &prop);
+ if (!ok)
+ goto out;
+ if (!prop)
+ goto atom_not_defined;
+
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ lval = OBJECT_TO_JSVAL(obj);
+ i = 0;
+ goto do_incop;
+
+ BEGIN_CASE(JSOP_INCPROP)
+ BEGIN_CASE(JSOP_DECPROP)
+ BEGIN_CASE(JSOP_PROPINC)
+ BEGIN_CASE(JSOP_PROPDEC)
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ lval = FETCH_OPND(-1);
+ i = -1;
+ goto do_incop;
+
+ BEGIN_CASE(JSOP_INCELEM)
+ BEGIN_CASE(JSOP_DECELEM)
+ BEGIN_CASE(JSOP_ELEMINC)
+ BEGIN_CASE(JSOP_ELEMDEC)
+ FETCH_ELEMENT_ID(-1, id);
+ lval = FETCH_OPND(-2);
+ i = -2;
+
+ do_incop:
+ {
+ const JSCodeSpec *cs;
+
+ VALUE_TO_OBJECT(cx, lval, obj);
+ if (i < 0)
+ STORE_OPND(i, OBJECT_TO_JSVAL(obj));
+ CHECK_ELEMENT_ID(obj, id);
+
+ /* The operand must contain a number. */
+ SAVE_SP_AND_PC(fp);
+ CACHED_GET(OBJ_GET_PROPERTY(cx, obj, id, &rval));
+ if (!ok)
+ goto out;
+
+ /* Preload for use in the if/else immediately below. */
+ cs = &js_CodeSpec[op];
+
+ /* The expression result goes in rtmp, the updated value in rval. */
+ if (JSVAL_IS_INT(rval) &&
+ rval != INT_TO_JSVAL(JSVAL_INT_MIN) &&
+ rval != INT_TO_JSVAL(JSVAL_INT_MAX)) {
+ if (cs->format & JOF_POST) {
+ rtmp = rval;
+ (cs->format & JOF_INC) ? (rval += 2) : (rval -= 2);
+ } else {
+ (cs->format & JOF_INC) ? (rval += 2) : (rval -= 2);
+ rtmp = rval;
+ }
+ } else {
+
+/*
+ * Initially, rval contains the value to increment or decrement, which is not
+ * yet converted. As above, the expression result goes in rtmp, the updated
+ * value goes in rval. Our caller must set vp to point at a GC-rooted jsval
+ * in which we home rtmp, to protect it from GC in case the unconverted rval
+ * is not a number.
+ */
+#define NONINT_INCREMENT_OP_MIDDLE() \
+ JS_BEGIN_MACRO \
+ VALUE_TO_NUMBER(cx, rval, d); \
+ if (cs->format & JOF_POST) { \
+ rtmp = rval; \
+ if (!JSVAL_IS_NUMBER(rtmp)) { \
+ ok = js_NewNumberValue(cx, d, &rtmp); \
+ if (!ok) \
+ goto out; \
+ } \
+ *vp = rtmp; \
+ (cs->format & JOF_INC) ? d++ : d--; \
+ ok = js_NewNumberValue(cx, d, &rval); \
+ } else { \
+ (cs->format & JOF_INC) ? ++d : --d; \
+ ok = js_NewNumberValue(cx, d, &rval); \
+ rtmp = rval; \
+ } \
+ if (!ok) \
+ goto out; \
+ JS_END_MACRO
+
+ if (cs->format & JOF_POST) {
+ /*
+ * We must push early to protect the postfix increment
+ * or decrement result, if converted to a jsdouble from
+ * a non-number value, from GC nesting in the setter.
+ */
+ vp = sp;
+ PUSH(JSVAL_VOID);
+ SAVE_SP(fp);
+ --i;
+ }
+#ifdef __GNUC__
+ else vp = NULL; /* suppress bogus gcc warnings */
+#endif
+
+ NONINT_INCREMENT_OP_MIDDLE();
+ }
+
+ fp->flags |= JSFRAME_ASSIGNING;
+ CACHED_SET(OBJ_SET_PROPERTY(cx, obj, id, &rval));
+ fp->flags &= ~JSFRAME_ASSIGNING;
+ if (!ok)
+ goto out;
+ sp += i;
+ PUSH_OPND(rtmp);
+ len = js_CodeSpec[op].length;
+ DO_NEXT_OP(len);
+ }
+
+/* NB: This macro doesn't use JS_BEGIN_MACRO/JS_END_MACRO around its body. */
+#define FAST_INCREMENT_OP(SLOT,COUNT,BASE,PRE,OPEQ,MINMAX) \
+ slot = SLOT; \
+ JS_ASSERT(slot < fp->fun->COUNT); \
+ vp = fp->BASE + slot; \
+ rval = *vp; \
+ if (!JSVAL_IS_INT(rval) || rval == INT_TO_JSVAL(JSVAL_INT_##MINMAX)) \
+ goto do_nonint_fast_incop; \
+ PRE = rval; \
+ rval OPEQ 2; \
+ *vp = rval; \
+ PUSH_OPND(PRE); \
+ goto end_nonint_fast_incop
+
+ BEGIN_CASE(JSOP_INCARG)
+ FAST_INCREMENT_OP(GET_ARGNO(pc), nargs, argv, rval, +=, MAX);
+ BEGIN_CASE(JSOP_DECARG)
+ FAST_INCREMENT_OP(GET_ARGNO(pc), nargs, argv, rval, -=, MIN);
+ BEGIN_CASE(JSOP_ARGINC)
+ FAST_INCREMENT_OP(GET_ARGNO(pc), nargs, argv, rtmp, +=, MAX);
+ BEGIN_CASE(JSOP_ARGDEC)
+ FAST_INCREMENT_OP(GET_ARGNO(pc), nargs, argv, rtmp, -=, MIN);
+
+ BEGIN_CASE(JSOP_INCVAR)
+ FAST_INCREMENT_OP(GET_VARNO(pc), u.i.nvars, vars, rval, +=, MAX);
+ BEGIN_CASE(JSOP_DECVAR)
+ FAST_INCREMENT_OP(GET_VARNO(pc), u.i.nvars, vars, rval, -=, MIN);
+ BEGIN_CASE(JSOP_VARINC)
+ FAST_INCREMENT_OP(GET_VARNO(pc), u.i.nvars, vars, rtmp, +=, MAX);
+ BEGIN_CASE(JSOP_VARDEC)
+ FAST_INCREMENT_OP(GET_VARNO(pc), u.i.nvars, vars, rtmp, -=, MIN);
+
+ end_nonint_fast_incop:
+ len = JSOP_INCARG_LENGTH; /* all fast incops are same length */
+ DO_NEXT_OP(len);
+
+#undef FAST_INCREMENT_OP
+
+ do_nonint_fast_incop:
+ {
+ const JSCodeSpec *cs = &js_CodeSpec[op];
+
+ NONINT_INCREMENT_OP_MIDDLE();
+ *vp = rval;
+ PUSH_OPND(rtmp);
+ len = cs->length;
+ DO_NEXT_OP(len);
+ }
+
+/* NB: This macro doesn't use JS_BEGIN_MACRO/JS_END_MACRO around its body. */
+#define FAST_GLOBAL_INCREMENT_OP(SLOWOP,PRE,OPEQ,MINMAX) \
+ slot = GET_VARNO(pc); \
+ JS_ASSERT(slot < fp->nvars); \
+ lval = fp->vars[slot]; \
+ if (JSVAL_IS_NULL(lval)) { \
+ op = SLOWOP; \
+ DO_OP(); \
+ } \
+ slot = JSVAL_TO_INT(lval); \
+ obj = fp->varobj; \
+ rval = OBJ_GET_SLOT(cx, obj, slot); \
+ if (!JSVAL_IS_INT(rval) || rval == INT_TO_JSVAL(JSVAL_INT_##MINMAX)) \
+ goto do_nonint_fast_global_incop; \
+ PRE = rval; \
+ rval OPEQ 2; \
+ OBJ_SET_SLOT(cx, obj, slot, rval); \
+ PUSH_OPND(PRE); \
+ goto end_nonint_fast_global_incop
+
+ BEGIN_CASE(JSOP_INCGVAR)
+ FAST_GLOBAL_INCREMENT_OP(JSOP_INCNAME, rval, +=, MAX);
+ BEGIN_CASE(JSOP_DECGVAR)
+ FAST_GLOBAL_INCREMENT_OP(JSOP_DECNAME, rval, -=, MIN);
+ BEGIN_CASE(JSOP_GVARINC)
+ FAST_GLOBAL_INCREMENT_OP(JSOP_NAMEINC, rtmp, +=, MAX);
+ BEGIN_CASE(JSOP_GVARDEC)
+ FAST_GLOBAL_INCREMENT_OP(JSOP_NAMEDEC, rtmp, -=, MIN);
+
+ end_nonint_fast_global_incop:
+ len = JSOP_INCGVAR_LENGTH; /* all gvar incops are same length */
+ JS_ASSERT(len == js_CodeSpec[op].length);
+ DO_NEXT_OP(len);
+
+#undef FAST_GLOBAL_INCREMENT_OP
+
+ do_nonint_fast_global_incop:
+ {
+ const JSCodeSpec *cs = &js_CodeSpec[op];
+
+ vp = sp++;
+ SAVE_SP(fp);
+ NONINT_INCREMENT_OP_MIDDLE();
+ OBJ_SET_SLOT(cx, obj, slot, rval);
+ STORE_OPND(-1, rtmp);
+ len = cs->length;
+ DO_NEXT_OP(len);
+ }
+
+ BEGIN_CASE(JSOP_GETPROP)
+ BEGIN_CASE(JSOP_GETXPROP)
+ /* Get an immediate atom naming the property. */
+ atom = GET_ATOM(cx, script, pc);
+ lval = FETCH_OPND(-1);
+ if (JSVAL_IS_STRING(lval) &&
+ atom == cx->runtime->atomState.lengthAtom) {
+ rval = INT_TO_JSVAL(JSSTRING_LENGTH(JSVAL_TO_STRING(lval)));
+ obj = NULL;
+ } else {
+ id = ATOM_TO_JSID(atom);
+ VALUE_TO_OBJECT(cx, lval, obj);
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ SAVE_SP_AND_PC(fp);
+ CACHED_GET(OBJ_GET_PROPERTY(cx, obj, id, &rval));
+ if (!ok)
+ goto out;
+ }
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_GETPROP)
+
+ BEGIN_CASE(JSOP_SETPROP)
+ /* Pop the right-hand side into rval for OBJ_SET_PROPERTY. */
+ rval = FETCH_OPND(-1);
+
+ /* Get an immediate atom naming the property. */
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ PROPERTY_OP(-2, CACHED_SET(OBJ_SET_PROPERTY(cx, obj, id, &rval)));
+ sp--;
+ STORE_OPND(-1, rval);
+ obj = NULL;
+ END_CASE(JSOP_SETPROP)
+
+ BEGIN_CASE(JSOP_GETELEM)
+ BEGIN_CASE(JSOP_GETXELEM)
+ ELEMENT_OP(-1, CACHED_GET(OBJ_GET_PROPERTY(cx, obj, id, &rval)));
+ sp--;
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_GETELEM)
+
+ BEGIN_CASE(JSOP_SETELEM)
+ rval = FETCH_OPND(-1);
+ ELEMENT_OP(-2, CACHED_SET(OBJ_SET_PROPERTY(cx, obj, id, &rval)));
+ sp -= 2;
+ STORE_OPND(-1, rval);
+ obj = NULL;
+ END_CASE(JSOP_SETELEM)
+
+ BEGIN_CASE(JSOP_ENUMELEM)
+ /* Funky: the value to set is under the [obj, id] pair. */
+ FETCH_ELEMENT_ID(-1, id);
+ FETCH_OBJECT(cx, -2, lval, obj);
+ CHECK_ELEMENT_ID(obj, id);
+ rval = FETCH_OPND(-3);
+ SAVE_SP_AND_PC(fp);
+ ok = OBJ_SET_PROPERTY(cx, obj, id, &rval);
+ if (!ok)
+ goto out;
+ sp -= 3;
+ END_CASE(JSOP_ENUMELEM)
+
+/*
+ * LAZY_ARGS_THISP allows the JSOP_ARGSUB bytecode to defer creation of the
+ * arguments object until it is truly needed. JSOP_ARGSUB optimizes away
+ * arguments objects when the only uses of the 'arguments' parameter are to
+ * fetch individual actual parameters. But if such a use were then invoked,
+ * e.g., arguments[i](), the 'this' parameter would and must bind to the
+ * caller's arguments object. So JSOP_ARGSUB sets obj to LAZY_ARGS_THISP.
+ */
+#define LAZY_ARGS_THISP ((JSObject *) JSVAL_VOID)
+
+ BEGIN_CASE(JSOP_PUSHOBJ)
+ if (obj == LAZY_ARGS_THISP && !(obj = js_GetArgsObject(cx, fp))) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ END_CASE(JSOP_PUSHOBJ)
+
+ BEGIN_CASE(JSOP_CALL)
+ BEGIN_CASE(JSOP_EVAL)
+ argc = GET_ARGC(pc);
+ vp = sp - (argc + 2);
+ lval = *vp;
+ SAVE_SP_AND_PC(fp);
+ if (VALUE_IS_FUNCTION(cx, lval) &&
+ (obj = JSVAL_TO_OBJECT(lval),
+ fun = (JSFunction *) JS_GetPrivate(cx, obj),
+ FUN_INTERPRETED(fun)))
+ /* inline_call: */
+ {
+ uintN nframeslots, nvars, nslots, missing;
+ JSArena *a;
+ jsuword avail, nbytes;
+ JSBool overflow;
+ void *newmark;
+ jsval *rvp;
+ JSInlineFrame *newifp;
+ JSInterpreterHook hook;
+
+ /* Restrict recursion of lightweight functions. */
+ if (inlineCallCount == MAX_INLINE_CALL_COUNT) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_OVER_RECURSED);
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ /* Compute the total number of stack slots needed for fun. */
+ nframeslots = JS_HOWMANY(sizeof(JSInlineFrame), sizeof(jsval));
+ nvars = fun->u.i.nvars;
+ script = fun->u.i.script;
+ depth = (jsint) script->depth;
+ nslots = nframeslots + nvars + 2 * depth;
+
+ /* Allocate missing expected args adjacent to actual args. */
+ missing = (fun->nargs > argc) ? fun->nargs - argc : 0;
+ a = cx->stackPool.current;
+ avail = a->avail;
+ newmark = (void *) avail;
+ if (missing) {
+ newsp = sp + missing;
+ overflow = (jsuword) newsp > a->limit;
+ if (overflow)
+ nslots += 2 + argc + missing;
+ else if ((jsuword) newsp > avail)
+ avail = a->avail = (jsuword) newsp;
+ }
+#ifdef __GNUC__
+ else overflow = JS_FALSE; /* suppress bogus gcc warnings */
+#endif
+
+ /* Allocate the inline frame with its vars and operand slots. */
+ newsp = (jsval *) avail;
+ nbytes = nslots * sizeof(jsval);
+ avail += nbytes;
+ if (avail <= a->limit) {
+ a->avail = avail;
+ } else {
+ JS_ARENA_ALLOCATE_CAST(newsp, jsval *, &cx->stackPool,
+ nbytes);
+ if (!newsp) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_STACK_OVERFLOW,
+ (fp && fp->fun)
+ ? JS_GetFunctionName(fp->fun)
+ : "script");
+ goto bad_inline_call;
+ }
+ }
+
+ /* Move args if missing overflow arena a, push missing args. */
+ rvp = vp;
+ if (missing) {
+ if (overflow) {
+ memcpy(newsp, vp, (2 + argc) * sizeof(jsval));
+ vp = newsp;
+ sp = vp + 2 + argc;
+ newsp = sp + missing;
+ }
+ do {
+ PUSH(JSVAL_VOID);
+ } while (--missing != 0);
+ }
+
+ /* Claim space for the stack frame and initialize it. */
+ newifp = (JSInlineFrame *) newsp;
+ newsp += nframeslots;
+ newifp->frame.callobj = NULL;
+ newifp->frame.argsobj = NULL;
+ newifp->frame.varobj = NULL;
+ newifp->frame.script = script;
+ newifp->frame.fun = fun;
+ newifp->frame.argc = argc;
+ newifp->frame.argv = vp + 2;
+ newifp->frame.rval = JSVAL_VOID;
+ newifp->frame.nvars = nvars;
+ newifp->frame.vars = newsp;
+ newifp->frame.down = fp;
+ newifp->frame.annotation = NULL;
+ newifp->frame.scopeChain = parent = OBJ_GET_PARENT(cx, obj);
+ newifp->frame.sharpDepth = 0;
+ newifp->frame.sharpArray = NULL;
+ newifp->frame.flags = 0;
+ newifp->frame.dormantNext = NULL;
+ newifp->frame.xmlNamespace = NULL;
+ newifp->frame.blockChain = NULL;
+ newifp->rvp = rvp;
+ newifp->mark = newmark;
+
+ /* Compute the 'this' parameter now that argv is set. */
+ if (!JSVAL_IS_OBJECT(vp[1])) {
+ PRIMITIVE_TO_OBJECT(cx, vp[1], obj2);
+ if (!obj2)
+ goto bad_inline_call;
+ vp[1] = OBJECT_TO_JSVAL(obj2);
+ }
+ newifp->frame.thisp =
+ js_ComputeThis(cx,
+ JSFUN_BOUND_METHOD_TEST(fun->flags)
+ ? parent
+ : JSVAL_TO_OBJECT(vp[1]),
+ newifp->frame.argv);
+ if (!newifp->frame.thisp)
+ goto bad_inline_call;
+#ifdef DUMP_CALL_TABLE
+ LogCall(cx, *vp, argc, vp + 2);
+#endif
+
+ /* Push void to initialize local variables. */
+ sp = newsp;
+ while (nvars--)
+ PUSH(JSVAL_VOID);
+ sp += depth;
+ newifp->frame.spbase = sp;
+ SAVE_SP(&newifp->frame);
+
+ /* Call the debugger hook if present. */
+ hook = rt->callHook;
+ if (hook) {
+ newifp->frame.pc = NULL;
+ newifp->hookData = hook(cx, &newifp->frame, JS_TRUE, 0,
+ rt->callHookData);
+ LOAD_INTERRUPT_HANDLER(rt);
+ } else {
+ newifp->hookData = NULL;
+ }
+
+ /* Scope with a call object parented by the callee's parent. */
+ if (JSFUN_HEAVYWEIGHT_TEST(fun->flags) &&
+ !js_GetCallObject(cx, &newifp->frame, parent)) {
+ goto bad_inline_call;
+ }
+
+ /* Switch to new version if currentVersion wasn't overridden. */
+ newifp->callerVersion = cx->version;
+ if (JS_LIKELY(cx->version == currentVersion)) {
+ currentVersion = script->version;
+ if (currentVersion != cx->version)
+ js_SetVersion(cx, currentVersion);
+ }
+
+ /* Push the frame and set interpreter registers. */
+ cx->fp = fp = &newifp->frame;
+ pc = script->code;
+#ifndef JS_THREADED_INTERP
+ endpc = pc + script->length;
+#endif
+ obj = NULL;
+ inlineCallCount++;
+ JS_RUNTIME_METER(rt, inlineCalls);
+
+ /* Load first opcode and dispatch it (safe since JSOP_STOP). */
+ op = *pc;
+ DO_OP();
+
+ bad_inline_call:
+ RESTORE_SP(fp);
+ JS_ASSERT(fp->pc == pc);
+ script = fp->script;
+ depth = (jsint) script->depth;
+ js_FreeRawStack(cx, newmark);
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ ok = js_Invoke(cx, argc, 0);
+ RESTORE_SP(fp);
+ LOAD_BRANCH_CALLBACK(cx);
+ LOAD_INTERRUPT_HANDLER(rt);
+ if (!ok)
+ goto out;
+ JS_RUNTIME_METER(rt, nonInlineCalls);
+#if JS_HAS_LVALUE_RETURN
+ if (cx->rval2set) {
+ /*
+ * Use the stack depth we didn't claim in our budget, but that
+ * we know is there on account of [fun, this] already having
+ * been pushed, at a minimum (if no args). Those two slots
+ * have been popped and [rval] has been pushed, which leaves
+ * one more slot for rval2 before we might overflow.
+ *
+ * NB: rval2 must be the property identifier, and rval the
+ * object from which to get the property. The pair form an
+ * ECMA "reference type", which can be used on the right- or
+ * left-hand side of assignment ops. Note well: only native
+ * methods can return reference types. See JSOP_SETCALL just
+ * below for the left-hand-side case.
+ */
+ PUSH_OPND(cx->rval2);
+ ELEMENT_OP(-1, ok = OBJ_GET_PROPERTY(cx, obj, id, &rval));
+
+ sp--;
+ STORE_OPND(-1, rval);
+ cx->rval2set = JS_FALSE;
+ }
+#endif /* JS_HAS_LVALUE_RETURN */
+ obj = NULL;
+ END_CASE(JSOP_CALL)
+
+#if JS_HAS_LVALUE_RETURN
+ BEGIN_CASE(JSOP_SETCALL)
+ argc = GET_ARGC(pc);
+ SAVE_SP_AND_PC(fp);
+ ok = js_Invoke(cx, argc, 0);
+ RESTORE_SP(fp);
+ LOAD_BRANCH_CALLBACK(cx);
+ LOAD_INTERRUPT_HANDLER(rt);
+ if (!ok)
+ goto out;
+ if (!cx->rval2set) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_LEFTSIDE_OF_ASS);
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(cx->rval2);
+ cx->rval2set = JS_FALSE;
+ obj = NULL;
+ END_CASE(JSOP_SETCALL)
+#endif
+
+ BEGIN_CASE(JSOP_NAME)
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+
+ SAVE_SP_AND_PC(fp);
+ ok = js_FindProperty(cx, id, &obj, &obj2, &prop);
+ if (!ok)
+ goto out;
+ if (!prop) {
+ /* Kludge to allow (typeof foo == "undefined") tests. */
+ len = JSOP_NAME_LENGTH;
+ endpc = script->code + script->length;
+ for (pc2 = pc + len; pc2 < endpc; pc2++) {
+ op2 = (JSOp)*pc2;
+ if (op2 == JSOP_TYPEOF) {
+ PUSH_OPND(JSVAL_VOID);
+ DO_NEXT_OP(len);
+ }
+ if (op2 != JSOP_GROUP)
+ break;
+ }
+ goto atom_not_defined;
+ }
+
+ /* Take the slow path if prop was not found in a native object. */
+ if (!OBJ_IS_NATIVE(obj) || !OBJ_IS_NATIVE(obj2)) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &rval);
+ if (!ok)
+ goto out;
+ } else {
+ sprop = (JSScopeProperty *)prop;
+ NATIVE_GET(cx, obj, obj2, sprop, &rval);
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ }
+ PUSH_OPND(rval);
+ END_CASE(JSOP_NAME)
+
+ BEGIN_CASE(JSOP_UINT16)
+ i = (jsint) GET_ATOM_INDEX(pc);
+ rval = INT_TO_JSVAL(i);
+ PUSH_OPND(rval);
+ obj = NULL;
+ END_CASE(JSOP_UINT16)
+
+ BEGIN_CASE(JSOP_UINT24)
+ i = (jsint) GET_LITERAL_INDEX(pc);
+ rval = INT_TO_JSVAL(i);
+ PUSH_OPND(rval);
+ END_CASE(JSOP_UINT24)
+
+ BEGIN_CASE(JSOP_LITERAL)
+ atomIndex = GET_LITERAL_INDEX(pc);
+ atom = js_GetAtom(cx, &script->atomMap, atomIndex);
+ PUSH_OPND(ATOM_KEY(atom));
+ obj = NULL;
+ END_CASE(JSOP_LITERAL)
+
+ BEGIN_CASE(JSOP_FINDNAME)
+ atomIndex = GET_LITERAL_INDEX(pc);
+ atom = js_GetAtom(cx, &script->atomMap, atomIndex);
+ SAVE_SP_AND_PC(fp);
+ obj = js_FindIdentifierBase(cx, ATOM_TO_JSID(atom));
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ PUSH_OPND(ATOM_KEY(atom));
+ END_CASE(JSOP_FINDNAME)
+
+ BEGIN_CASE(JSOP_LITOPX)
+ /*
+ * Load atomIndex, which is used by code at each do_JSOP_* label.
+ *
+ * Also set pc2 to point at the bytecode extended by this prefix
+ * to have a leading 24 bit atomIndex, instead of the unextended
+ * 16-bit atomIndex that normally comes after op. This enables
+ * JOF_INDEXCONST format ops (which have multiple immediates) to
+ * collect their other immediate via GET_VARNO(pc2) or similar.
+ *
+ * Finally, load op and, if threading, adjust pc so that it will
+ * be advanced properly at the end of op's case by DO_NEXT_OP.
+ */
+ atomIndex = GET_LITERAL_INDEX(pc);
+ pc2 = pc + 1 + LITERAL_INDEX_LEN;
+ op = *pc2;
+ pc += JSOP_LITOPX_LENGTH - (1 + ATOM_INDEX_LEN);
+#ifndef JS_THREADED_INTERP
+ len = js_CodeSpec[op].length;
+#endif
+ switch (op) {
+ case JSOP_ANONFUNOBJ: goto do_JSOP_ANONFUNOBJ;
+ case JSOP_BINDNAME: goto do_JSOP_BINDNAME;
+ case JSOP_CLOSURE: goto do_JSOP_CLOSURE;
+ case JSOP_DEFCONST: goto do_JSOP_DEFCONST;
+ case JSOP_DEFFUN: goto do_JSOP_DEFFUN;
+ case JSOP_DEFLOCALFUN: goto do_JSOP_DEFLOCALFUN;
+ case JSOP_DEFVAR: goto do_JSOP_DEFVAR;
+#if JS_HAS_EXPORT_IMPORT
+ case JSOP_EXPORTNAME: goto do_JSOP_EXPORTNAME;
+#endif
+#if JS_HAS_XML_SUPPORT
+ case JSOP_GETMETHOD: goto do_JSOP_GETMETHOD;
+ case JSOP_SETMETHOD: goto do_JSOP_SETMETHOD;
+#endif
+ case JSOP_NAMEDFUNOBJ: goto do_JSOP_NAMEDFUNOBJ;
+ case JSOP_NUMBER: goto do_JSOP_NUMBER;
+ case JSOP_OBJECT: goto do_JSOP_OBJECT;
+#if JS_HAS_XML_SUPPORT
+ case JSOP_QNAMECONST: goto do_JSOP_QNAMECONST;
+ case JSOP_QNAMEPART: goto do_JSOP_QNAMEPART;
+#endif
+ case JSOP_REGEXP: goto do_JSOP_REGEXP;
+ case JSOP_SETCONST: goto do_JSOP_SETCONST;
+ case JSOP_STRING: goto do_JSOP_STRING;
+#if JS_HAS_XML_SUPPORT
+ case JSOP_XMLCDATA: goto do_JSOP_XMLCDATA;
+ case JSOP_XMLCOMMENT: goto do_JSOP_XMLCOMMENT;
+ case JSOP_XMLOBJECT: goto do_JSOP_XMLOBJECT;
+ case JSOP_XMLPI: goto do_JSOP_XMLPI;
+#endif
+ case JSOP_ENTERBLOCK: goto do_JSOP_ENTERBLOCK;
+ default: JS_ASSERT(0);
+ }
+ /* NOTREACHED */
+
+ BEGIN_CASE(JSOP_NUMBER)
+ BEGIN_CASE(JSOP_STRING)
+ BEGIN_CASE(JSOP_OBJECT)
+ atomIndex = GET_ATOM_INDEX(pc);
+
+ do_JSOP_NUMBER:
+ do_JSOP_STRING:
+ do_JSOP_OBJECT:
+ atom = js_GetAtom(cx, &script->atomMap, atomIndex);
+ PUSH_OPND(ATOM_KEY(atom));
+ obj = NULL;
+ END_CASE(JSOP_NUMBER)
+
+ BEGIN_LITOPX_CASE(JSOP_REGEXP, 0)
+ {
+ JSRegExp *re;
+ JSObject *funobj;
+
+ /*
+ * Push a regexp object for the atom mapped by the bytecode at pc,
+ * cloning the literal's regexp object if necessary, to simulate in
+ * the pre-compile/execute-later case what ECMA specifies for the
+ * compile-and-go case: that scanning each regexp literal creates
+ * a single corresponding RegExp object.
+ *
+ * To support pre-compilation transparently, we must handle the
+ * case where a regexp object literal is used in a different global
+ * at execution time from the global with which it was scanned at
+ * compile time. We do this by re-wrapping the JSRegExp private
+ * data struct with a cloned object having the right prototype and
+ * parent, and having its own lastIndex property value storage.
+ *
+ * Unlike JSOP_DEFFUN and other prolog bytecodes that may clone
+ * literal objects, we don't want to pay a script prolog execution
+ * price for all regexp literals in a script (many may not be used
+ * by a particular execution of that script, depending on control
+ * flow), so we initialize lazily here.
+ *
+ * XXX This code is specific to regular expression objects. If we
+ * need a similar op for other kinds of object literals, we should
+ * push cloning down under JSObjectOps and reuse code here.
+ */
+ JS_ASSERT(ATOM_IS_OBJECT(atom));
+ obj = ATOM_TO_OBJECT(atom);
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_RegExpClass);
+
+ re = (JSRegExp *) JS_GetPrivate(cx, obj);
+ slot = re->cloneIndex;
+ if (fp->fun) {
+ /*
+ * We're in function code, not global or eval code (in eval
+ * code, JSOP_REGEXP is never emitted). The code generator
+ * recorded in fp->fun->nregexps the number of re->cloneIndex
+ * slots that it reserved in the cloned funobj.
+ */
+ funobj = JSVAL_TO_OBJECT(fp->argv[-2]);
+ slot += JSCLASS_RESERVED_SLOTS(&js_FunctionClass);
+ if (!JS_GetReservedSlot(cx, funobj, slot, &rval))
+ return JS_FALSE;
+ if (JSVAL_IS_VOID(rval))
+ rval = JSVAL_NULL;
+ } else {
+ /*
+ * We're in global code. The code generator already arranged
+ * via script->numGlobalVars to reserve a global variable slot
+ * at cloneIndex. All global variable slots are initialized
+ * to null, not void, for faster testing in JSOP_*GVAR cases.
+ */
+ rval = fp->vars[slot];
+#ifdef __GNUC__
+ funobj = NULL; /* suppress bogus gcc warnings */
+#endif
+ }
+
+ if (JSVAL_IS_NULL(rval)) {
+ /* Compute the current global object in obj2. */
+ obj2 = fp->scopeChain;
+ while ((parent = OBJ_GET_PARENT(cx, obj2)) != NULL)
+ obj2 = parent;
+
+ /*
+ * We must home sp here, because either js_CloneRegExpObject
+ * or JS_SetReservedSlot could nest a last-ditch GC. We home
+ * pc as well, in case js_CloneRegExpObject has to lookup the
+ * "RegExp" class in the global object, which could entail a
+ * JSNewResolveOp call.
+ */
+ SAVE_SP_AND_PC(fp);
+
+ /*
+ * If obj's parent is not obj2, we must clone obj so that it
+ * has the right parent, and therefore, the right prototype.
+ *
+ * Yes, this means we assume that the correct RegExp.prototype
+ * to which regexp instances (including literals) delegate can
+ * be distinguished solely by the instance's parent, which was
+ * set to the parent of the RegExp constructor function object
+ * when the instance was created. In other words,
+ *
+ * (/x/.__parent__ == RegExp.__parent__) implies
+ * (/x/.__proto__ == RegExp.prototype)
+ *
+ * (unless you assign a different object to RegExp.prototype
+ * at runtime, in which case, ECMA doesn't specify operation,
+ * and you get what you deserve).
+ *
+ * This same coupling between instance parent and constructor
+ * parent turns up everywhere (see jsobj.c's FindClassObject,
+ * js_ConstructObject, and js_NewObject). It's fundamental to
+ * the design of the language when you consider multiple global
+ * objects and separate compilation and execution, even though
+ * it is not specified fully in ECMA.
+ */
+ if (OBJ_GET_PARENT(cx, obj) != obj2) {
+ obj = js_CloneRegExpObject(cx, obj, obj2);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+ rval = OBJECT_TO_JSVAL(obj);
+
+ /* Store the regexp object value in its cloneIndex slot. */
+ if (fp->fun) {
+ if (!JS_SetReservedSlot(cx, funobj, slot, rval))
+ return JS_FALSE;
+ } else {
+ fp->vars[slot] = rval;
+ }
+ }
+
+ PUSH_OPND(rval);
+ obj = NULL;
+ }
+ END_LITOPX_CASE(JSOP_REGEXP)
+
+ BEGIN_CASE(JSOP_ZERO)
+ PUSH_OPND(JSVAL_ZERO);
+ obj = NULL;
+ END_CASE(JSOP_ZERO)
+
+ BEGIN_CASE(JSOP_ONE)
+ PUSH_OPND(JSVAL_ONE);
+ obj = NULL;
+ END_CASE(JSOP_ONE)
+
+ BEGIN_CASE(JSOP_NULL)
+ PUSH_OPND(JSVAL_NULL);
+ obj = NULL;
+ END_CASE(JSOP_NULL)
+
+ BEGIN_CASE(JSOP_THIS)
+ obj = fp->thisp;
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp->flags & JSCLASS_IS_EXTENDED) {
+ JSExtendedClass *xclasp;
+
+ xclasp = (JSExtendedClass *) clasp;
+ if (xclasp->outerObject) {
+ obj = xclasp->outerObject(cx, obj);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+ }
+
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ obj = NULL;
+ END_CASE(JSOP_THIS)
+
+ BEGIN_CASE(JSOP_FALSE)
+ PUSH_OPND(JSVAL_FALSE);
+ obj = NULL;
+ END_CASE(JSOP_FALSE)
+
+ BEGIN_CASE(JSOP_TRUE)
+ PUSH_OPND(JSVAL_TRUE);
+ obj = NULL;
+ END_CASE(JSOP_TRUE)
+
+ BEGIN_CASE(JSOP_TABLESWITCH)
+ pc2 = pc;
+ len = GET_JUMP_OFFSET(pc2);
+
+ /*
+ * ECMAv2+ forbids conversion of discriminant, so we will skip to
+ * the default case if the discriminant isn't already an int jsval.
+ * (This opcode is emitted only for dense jsint-domain switches.)
+ */
+ rval = POP_OPND();
+ if (!JSVAL_IS_INT(rval))
+ DO_NEXT_OP(len);
+ i = JSVAL_TO_INT(rval);
+
+ pc2 += JUMP_OFFSET_LEN;
+ low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ high = GET_JUMP_OFFSET(pc2);
+
+ i -= low;
+ if ((jsuint)i < (jsuint)(high - low + 1)) {
+ pc2 += JUMP_OFFSET_LEN + JUMP_OFFSET_LEN * i;
+ off = (jsint) GET_JUMP_OFFSET(pc2);
+ if (off)
+ len = off;
+ }
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_LOOKUPSWITCH)
+ lval = POP_OPND();
+ pc2 = pc;
+ len = GET_JUMP_OFFSET(pc2);
+
+ if (!JSVAL_IS_NUMBER(lval) &&
+ !JSVAL_IS_STRING(lval) &&
+ !JSVAL_IS_BOOLEAN(lval)) {
+ DO_NEXT_OP(len);
+ }
+
+ pc2 += JUMP_OFFSET_LEN;
+ npairs = (jsint) GET_ATOM_INDEX(pc2);
+ pc2 += ATOM_INDEX_LEN;
+
+#define SEARCH_PAIRS(MATCH_CODE) \
+ while (npairs) { \
+ atom = GET_ATOM(cx, script, pc2); \
+ rval = ATOM_KEY(atom); \
+ MATCH_CODE \
+ if (match) { \
+ pc2 += ATOM_INDEX_LEN; \
+ len = GET_JUMP_OFFSET(pc2); \
+ DO_NEXT_OP(len); \
+ } \
+ pc2 += ATOM_INDEX_LEN + JUMP_OFFSET_LEN; \
+ npairs--; \
+ }
+ if (JSVAL_IS_STRING(lval)) {
+ str = JSVAL_TO_STRING(lval);
+ SEARCH_PAIRS(
+ match = (JSVAL_IS_STRING(rval) &&
+ ((str2 = JSVAL_TO_STRING(rval)) == str ||
+ js_EqualStrings(str2, str)));
+ )
+ } else if (JSVAL_IS_DOUBLE(lval)) {
+ d = *JSVAL_TO_DOUBLE(lval);
+ SEARCH_PAIRS(
+ match = (JSVAL_IS_DOUBLE(rval) &&
+ *JSVAL_TO_DOUBLE(rval) == d);
+ )
+ } else {
+ SEARCH_PAIRS(
+ match = (lval == rval);
+ )
+ }
+#undef SEARCH_PAIRS
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_TABLESWITCHX)
+ pc2 = pc;
+ len = GET_JUMPX_OFFSET(pc2);
+
+ /*
+ * ECMAv2+ forbids conversion of discriminant, so we will skip to
+ * the default case if the discriminant isn't already an int jsval.
+ * (This opcode is emitted only for dense jsint-domain switches.)
+ */
+ rval = POP_OPND();
+ if (!JSVAL_IS_INT(rval))
+ DO_NEXT_OP(len);
+ i = JSVAL_TO_INT(rval);
+
+ pc2 += JUMPX_OFFSET_LEN;
+ low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ high = GET_JUMP_OFFSET(pc2);
+
+ i -= low;
+ if ((jsuint)i < (jsuint)(high - low + 1)) {
+ pc2 += JUMP_OFFSET_LEN + JUMPX_OFFSET_LEN * i;
+ off = (jsint) GET_JUMPX_OFFSET(pc2);
+ if (off)
+ len = off;
+ }
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_LOOKUPSWITCHX)
+ lval = POP_OPND();
+ pc2 = pc;
+ len = GET_JUMPX_OFFSET(pc2);
+
+ if (!JSVAL_IS_NUMBER(lval) &&
+ !JSVAL_IS_STRING(lval) &&
+ !JSVAL_IS_BOOLEAN(lval)) {
+ DO_NEXT_OP(len);
+ }
+
+ pc2 += JUMPX_OFFSET_LEN;
+ npairs = (jsint) GET_ATOM_INDEX(pc2);
+ pc2 += ATOM_INDEX_LEN;
+
+#define SEARCH_EXTENDED_PAIRS(MATCH_CODE) \
+ while (npairs) { \
+ atom = GET_ATOM(cx, script, pc2); \
+ rval = ATOM_KEY(atom); \
+ MATCH_CODE \
+ if (match) { \
+ pc2 += ATOM_INDEX_LEN; \
+ len = GET_JUMPX_OFFSET(pc2); \
+ DO_NEXT_OP(len); \
+ } \
+ pc2 += ATOM_INDEX_LEN + JUMPX_OFFSET_LEN; \
+ npairs--; \
+ }
+ if (JSVAL_IS_STRING(lval)) {
+ str = JSVAL_TO_STRING(lval);
+ SEARCH_EXTENDED_PAIRS(
+ match = (JSVAL_IS_STRING(rval) &&
+ ((str2 = JSVAL_TO_STRING(rval)) == str ||
+ js_EqualStrings(str2, str)));
+ )
+ } else if (JSVAL_IS_DOUBLE(lval)) {
+ d = *JSVAL_TO_DOUBLE(lval);
+ SEARCH_EXTENDED_PAIRS(
+ match = (JSVAL_IS_DOUBLE(rval) &&
+ *JSVAL_TO_DOUBLE(rval) == d);
+ )
+ } else {
+ SEARCH_EXTENDED_PAIRS(
+ match = (lval == rval);
+ )
+ }
+#undef SEARCH_EXTENDED_PAIRS
+ END_VARLEN_CASE
+
+ EMPTY_CASE(JSOP_CONDSWITCH)
+
+#if JS_HAS_EXPORT_IMPORT
+ BEGIN_CASE(JSOP_EXPORTALL)
+ obj = fp->varobj;
+ SAVE_SP_AND_PC(fp);
+ ida = JS_Enumerate(cx, obj);
+ if (!ida) {
+ ok = JS_FALSE;
+ } else {
+ for (i = 0, j = ida->length; i < j; i++) {
+ id = ida->vector[i];
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop);
+ if (!ok)
+ break;
+ if (!prop)
+ continue;
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, id, prop, &attrs);
+ if (ok) {
+ attrs |= JSPROP_EXPORTED;
+ ok = OBJ_SET_ATTRIBUTES(cx, obj, id, prop, &attrs);
+ }
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ if (!ok)
+ break;
+ }
+ JS_DestroyIdArray(cx, ida);
+ }
+ END_CASE(JSOP_EXPORTALL)
+
+ BEGIN_LITOPX_CASE(JSOP_EXPORTNAME, 0)
+ id = ATOM_TO_JSID(atom);
+ obj = fp->varobj;
+ SAVE_SP_AND_PC(fp);
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop);
+ if (!ok)
+ goto out;
+ if (!prop) {
+ ok = OBJ_DEFINE_PROPERTY(cx, obj, id, JSVAL_VOID, NULL, NULL,
+ JSPROP_EXPORTED, NULL);
+ } else {
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, id, prop, &attrs);
+ if (ok) {
+ attrs |= JSPROP_EXPORTED;
+ ok = OBJ_SET_ATTRIBUTES(cx, obj, id, prop, &attrs);
+ }
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ }
+ if (!ok)
+ goto out;
+ END_LITOPX_CASE(JSOP_EXPORTNAME)
+
+ BEGIN_CASE(JSOP_IMPORTALL)
+ id = (jsid) JSVAL_VOID;
+ PROPERTY_OP(-1, ok = ImportProperty(cx, obj, id));
+ sp--;
+ END_CASE(JSOP_IMPORTALL)
+
+ BEGIN_CASE(JSOP_IMPORTPROP)
+ /* Get an immediate atom naming the property. */
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ PROPERTY_OP(-1, ok = ImportProperty(cx, obj, id));
+ sp--;
+ END_CASE(JSOP_IMPORTPROP)
+
+ BEGIN_CASE(JSOP_IMPORTELEM)
+ ELEMENT_OP(-1, ok = ImportProperty(cx, obj, id));
+ sp -= 2;
+ END_CASE(JSOP_IMPORTELEM)
+#endif /* JS_HAS_EXPORT_IMPORT */
+
+ BEGIN_CASE(JSOP_TRAP)
+ SAVE_SP_AND_PC(fp);
+ switch (JS_HandleTrap(cx, script, pc, &rval)) {
+ case JSTRAP_ERROR:
+ ok = JS_FALSE;
+ goto out;
+ case JSTRAP_CONTINUE:
+ JS_ASSERT(JSVAL_IS_INT(rval));
+ op = (JSOp) JSVAL_TO_INT(rval);
+ JS_ASSERT((uintN)op < (uintN)JSOP_LIMIT);
+ LOAD_INTERRUPT_HANDLER(rt);
+ DO_OP();
+ case JSTRAP_RETURN:
+ fp->rval = rval;
+ goto out;
+ case JSTRAP_THROW:
+ cx->throwing = JS_TRUE;
+ cx->exception = rval;
+ ok = JS_FALSE;
+ goto out;
+ default:;
+ }
+ LOAD_INTERRUPT_HANDLER(rt);
+ END_CASE(JSOP_TRAP)
+
+ BEGIN_CASE(JSOP_ARGUMENTS)
+ SAVE_SP_AND_PC(fp);
+ ok = js_GetArgsValue(cx, fp, &rval);
+ if (!ok)
+ goto out;
+ PUSH_OPND(rval);
+ obj = NULL;
+ END_CASE(JSOP_ARGUMENTS)
+
+ BEGIN_CASE(JSOP_ARGSUB)
+ id = INT_TO_JSID(GET_ARGNO(pc));
+ SAVE_SP_AND_PC(fp);
+ ok = js_GetArgsProperty(cx, fp, id, &obj, &rval);
+ if (!ok)
+ goto out;
+ if (!obj) {
+ /*
+ * If arguments was not overridden by eval('arguments = ...'),
+ * set obj to the magic cookie respected by JSOP_PUSHOBJ, just
+ * in case this bytecode is part of an 'arguments[i](j, k)' or
+ * similar such invocation sequence, where the function that
+ * is invoked expects its 'this' parameter to be the caller's
+ * arguments object.
+ */
+ obj = LAZY_ARGS_THISP;
+ }
+ PUSH_OPND(rval);
+ END_CASE(JSOP_ARGSUB)
+
+#undef LAZY_ARGS_THISP
+
+ BEGIN_CASE(JSOP_ARGCNT)
+ id = ATOM_TO_JSID(rt->atomState.lengthAtom);
+ SAVE_SP_AND_PC(fp);
+ ok = js_GetArgsProperty(cx, fp, id, &obj, &rval);
+ if (!ok)
+ goto out;
+ PUSH_OPND(rval);
+ END_CASE(JSOP_ARGCNT)
+
+ BEGIN_CASE(JSOP_GETARG)
+ slot = GET_ARGNO(pc);
+ JS_ASSERT(slot < fp->fun->nargs);
+ PUSH_OPND(fp->argv[slot]);
+ obj = NULL;
+ END_CASE(JSOP_GETARG)
+
+ BEGIN_CASE(JSOP_SETARG)
+ slot = GET_ARGNO(pc);
+ JS_ASSERT(slot < fp->fun->nargs);
+ vp = &fp->argv[slot];
+ GC_POKE(cx, *vp);
+ *vp = FETCH_OPND(-1);
+ obj = NULL;
+ END_CASE(JSOP_SETARG)
+
+ BEGIN_CASE(JSOP_GETVAR)
+ slot = GET_VARNO(pc);
+ JS_ASSERT(slot < fp->fun->u.i.nvars);
+ PUSH_OPND(fp->vars[slot]);
+ obj = NULL;
+ END_CASE(JSOP_GETVAR)
+
+ BEGIN_CASE(JSOP_SETVAR)
+ slot = GET_VARNO(pc);
+ JS_ASSERT(slot < fp->fun->u.i.nvars);
+ vp = &fp->vars[slot];
+ GC_POKE(cx, *vp);
+ *vp = FETCH_OPND(-1);
+ obj = NULL;
+ END_CASE(JSOP_SETVAR)
+
+ BEGIN_CASE(JSOP_GETGVAR)
+ slot = GET_VARNO(pc);
+ JS_ASSERT(slot < fp->nvars);
+ lval = fp->vars[slot];
+ if (JSVAL_IS_NULL(lval)) {
+ op = JSOP_NAME;
+ DO_OP();
+ }
+ slot = JSVAL_TO_INT(lval);
+ obj = fp->varobj;
+ rval = OBJ_GET_SLOT(cx, obj, slot);
+ PUSH_OPND(rval);
+ END_CASE(JSOP_GETGVAR)
+
+ BEGIN_CASE(JSOP_SETGVAR)
+ slot = GET_VARNO(pc);
+ JS_ASSERT(slot < fp->nvars);
+ rval = FETCH_OPND(-1);
+ lval = fp->vars[slot];
+ obj = fp->varobj;
+ if (JSVAL_IS_NULL(lval)) {
+ /*
+ * Inline-clone and specialize JSOP_SETNAME code here because
+ * JSOP_SETGVAR has arity 1: [rval], not arity 2: [obj, rval]
+ * as JSOP_SETNAME does, where [obj] is due to JSOP_BINDNAME.
+ */
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ SAVE_SP_AND_PC(fp);
+ CACHED_SET(OBJ_SET_PROPERTY(cx, obj, id, &rval));
+ if (!ok)
+ goto out;
+ STORE_OPND(-1, rval);
+ } else {
+ slot = JSVAL_TO_INT(lval);
+ GC_POKE(cx, obj->slots[slot]);
+ OBJ_SET_SLOT(cx, obj, slot, rval);
+ }
+ obj = NULL;
+ END_CASE(JSOP_SETGVAR)
+
+ BEGIN_CASE(JSOP_DEFCONST)
+ BEGIN_CASE(JSOP_DEFVAR)
+ atomIndex = GET_ATOM_INDEX(pc);
+
+ do_JSOP_DEFCONST:
+ do_JSOP_DEFVAR:
+ atom = js_GetAtom(cx, &script->atomMap, atomIndex);
+ obj = fp->varobj;
+ attrs = JSPROP_ENUMERATE;
+ if (!(fp->flags & JSFRAME_EVAL))
+ attrs |= JSPROP_PERMANENT;
+ if (op == JSOP_DEFCONST)
+ attrs |= JSPROP_READONLY;
+
+ /* Lookup id in order to check for redeclaration problems. */
+ id = ATOM_TO_JSID(atom);
+ SAVE_SP_AND_PC(fp);
+ ok = js_CheckRedeclaration(cx, obj, id, attrs, &obj2, &prop);
+ if (!ok)
+ goto out;
+
+ /* Bind a variable only if it's not yet defined. */
+ if (!prop) {
+ ok = OBJ_DEFINE_PROPERTY(cx, obj, id, JSVAL_VOID, NULL, NULL,
+ attrs, &prop);
+ if (!ok)
+ goto out;
+ JS_ASSERT(prop);
+ obj2 = obj;
+ }
+
+ /*
+ * Try to optimize a property we either just created, or found
+ * directly in the global object, that is permanent, has a slot,
+ * and has stub getter and setter, into a "fast global" accessed
+ * by the JSOP_*GVAR opcodes.
+ */
+ if (atomIndex < script->numGlobalVars &&
+ (attrs & JSPROP_PERMANENT) &&
+ obj2 == obj &&
+ OBJ_IS_NATIVE(obj)) {
+ sprop = (JSScopeProperty *) prop;
+ if (SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(obj)) &&
+ SPROP_HAS_STUB_GETTER(sprop) &&
+ SPROP_HAS_STUB_SETTER(sprop)) {
+ /*
+ * Fast globals use fp->vars to map the global name's
+ * atomIndex to the permanent fp->varobj slot number,
+ * tagged as a jsval. The atomIndex for the global's
+ * name literal is identical to its fp->vars index.
+ */
+ fp->vars[atomIndex] = INT_TO_JSVAL(sprop->slot);
+ }
+ }
+
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ END_CASE(JSOP_DEFVAR)
+
+ BEGIN_LITOPX_CASE(JSOP_DEFFUN, 0)
+ obj = ATOM_TO_OBJECT(atom);
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ id = ATOM_TO_JSID(fun->atom);
+
+ /*
+ * We must be at top-level (either outermost block that forms a
+ * function's body, or a global) scope, not inside an expression
+ * (JSOP_{ANON,NAMED}FUNOBJ) or compound statement (JSOP_CLOSURE)
+ * in the same compilation unit (ECMA Program).
+ *
+ * However, we could be in a Program being eval'd from inside a
+ * with statement, so we need to distinguish scope chain head from
+ * variables object. Hence the obj2 vs. parent distinction below.
+ * First we make sure the function object we're defining has the
+ * right scope chain. Then we define its name in fp->varobj.
+ *
+ * If static link is not current scope, clone fun's object to link
+ * to the current scope via parent. This clause exists to enable
+ * sharing of compiled functions among multiple equivalent scopes,
+ * splitting the cost of compilation evenly among the scopes and
+ * amortizing it over a number of executions. Examples include XUL
+ * scripts and event handlers shared among Mozilla chrome windows,
+ * and server-side JS user-defined functions shared among requests.
+ *
+ * NB: The Script object exposes compile and exec in the language,
+ * such that this clause introduces an incompatible change from old
+ * JS versions that supported Script. Such a JS version supported
+ * executing a script that defined and called functions scoped by
+ * the compile-time static link, not by the exec-time scope chain.
+ *
+ * We sacrifice compatibility, breaking such scripts, in order to
+ * promote compile-cost sharing and amortizing, and because Script
+ * is not and will not be standardized.
+ */
+ JS_ASSERT(!fp->blockChain);
+ obj2 = fp->scopeChain;
+ if (OBJ_GET_PARENT(cx, obj) != obj2) {
+ obj = js_CloneFunctionObject(cx, obj, obj2);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+
+ /*
+ * Protect obj from any GC hiding below OBJ_DEFINE_PROPERTY. All
+ * paths from here must flow through the "Restore fp->scopeChain"
+ * code below the OBJ_DEFINE_PROPERTY call.
+ */
+ fp->scopeChain = obj;
+ rval = OBJECT_TO_JSVAL(obj);
+
+ /*
+ * ECMA requires functions defined when entering Global code to be
+ * permanent, and functions defined when entering Eval code to be
+ * impermanent.
+ */
+ attrs = JSPROP_ENUMERATE;
+ if (!(fp->flags & JSFRAME_EVAL))
+ attrs |= JSPROP_PERMANENT;
+
+ /*
+ * Load function flags that are also property attributes. Getters
+ * and setters do not need a slot, their value is stored elsewhere
+ * in the property itself, not in obj->slots.
+ */
+ flags = JSFUN_GSFLAG2ATTR(fun->flags);
+ if (flags) {
+ attrs |= flags | JSPROP_SHARED;
+ rval = JSVAL_VOID;
+ }
+
+ /*
+ * Check for a const property of the same name -- or any kind
+ * of property if executing with the strict option. We check
+ * here at runtime as well as at compile-time, to handle eval
+ * as well as multiple HTML script tags.
+ */
+ parent = fp->varobj;
+ SAVE_SP_AND_PC(fp);
+ ok = js_CheckRedeclaration(cx, parent, id, attrs, NULL, NULL);
+ if (ok) {
+ ok = OBJ_DEFINE_PROPERTY(cx, parent, id, rval,
+ (flags & JSPROP_GETTER)
+ ? JS_EXTENSION (JSPropertyOp) obj
+ : NULL,
+ (flags & JSPROP_SETTER)
+ ? JS_EXTENSION (JSPropertyOp) obj
+ : NULL,
+ attrs,
+ &prop);
+ }
+
+ /* Restore fp->scopeChain now that obj is defined in fp->varobj. */
+ fp->scopeChain = obj2;
+ if (!ok)
+ goto out;
+
+#if 0
+ if (attrs == (JSPROP_ENUMERATE | JSPROP_PERMANENT) &&
+ script->numGlobalVars) {
+ /*
+ * As with JSOP_DEFVAR and JSOP_DEFCONST (above), fast globals
+ * use fp->vars to map the global function name's atomIndex to
+ * its permanent fp->varobj slot number, tagged as a jsval.
+ */
+ sprop = (JSScopeProperty *) prop;
+ fp->vars[atomIndex] = INT_TO_JSVAL(sprop->slot);
+ }
+#endif
+ OBJ_DROP_PROPERTY(cx, parent, prop);
+ END_LITOPX_CASE(JSOP_DEFFUN)
+
+ BEGIN_LITOPX_CASE(JSOP_DEFLOCALFUN, VARNO_LEN)
+ /*
+ * Define a local function (i.e., one nested at the top level of
+ * another function), parented by the current scope chain, and
+ * stored in a local variable slot that the compiler allocated.
+ * This is an optimization over JSOP_DEFFUN that avoids requiring
+ * a call object for the outer function's activation.
+ */
+ slot = GET_VARNO(pc2);
+ obj = ATOM_TO_OBJECT(atom);
+
+ JS_ASSERT(!fp->blockChain);
+ if (!(fp->flags & JSFRAME_POP_BLOCKS)) {
+ /*
+ * If the compiler-created function object (obj) is scoped by a
+ * let-induced body block, temporarily update fp->blockChain so
+ * that js_GetScopeChain will clone the block into the runtime
+ * scope needed to parent the function object's clone.
+ */
+ parent = OBJ_GET_PARENT(cx, obj);
+ if (OBJ_GET_CLASS(cx, parent) == &js_BlockClass)
+ fp->blockChain = parent;
+ parent = js_GetScopeChain(cx, fp);
+ } else {
+ /*
+ * We have already emulated JSOP_ENTERBLOCK for the enclosing
+ * body block, for a prior JSOP_DEFLOCALFUN in the prolog, so
+ * we just load fp->scopeChain into parent.
+ *
+ * In typical execution scenarios, the prolog bytecodes that
+ * include this JSOP_DEFLOCALFUN run, then come main bytecodes
+ * including JSOP_ENTERBLOCK for the outermost (body) block.
+ * JSOP_ENTERBLOCK will detect that it need not do anything if
+ * the body block was entered above due to a local function.
+ * Finally the matching JSOP_LEAVEBLOCK runs.
+ *
+ * If the matching JSOP_LEAVEBLOCK for the body block does not
+ * run for some reason, the body block will be properly "put"
+ * (via js_PutBlockObject) by the PutBlockObjects call at the
+ * bottom of js_Interpret.
+ */
+ parent = fp->scopeChain;
+ JS_ASSERT(OBJ_GET_CLASS(cx, parent) == &js_BlockClass);
+ JS_ASSERT(OBJ_GET_PROTO(cx, parent) == OBJ_GET_PARENT(cx, obj));
+ JS_ASSERT(OBJ_GET_CLASS(cx, OBJ_GET_PARENT(cx, parent))
+ == &js_CallClass);
+ }
+
+ /* If re-parenting, store a clone of the function object. */
+ if (OBJ_GET_PARENT(cx, obj) != parent) {
+ SAVE_SP_AND_PC(fp);
+ obj = js_CloneFunctionObject(cx, obj, parent);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+ fp->vars[slot] = OBJECT_TO_JSVAL(obj);
+ END_LITOPX_CASE(JSOP_DEFLOCALFUN)
+
+ BEGIN_LITOPX_CASE(JSOP_ANONFUNOBJ, 0)
+ /* Push the specified function object literal. */
+ obj = ATOM_TO_OBJECT(atom);
+
+ /* If re-parenting, push a clone of the function object. */
+ SAVE_SP_AND_PC(fp);
+ parent = js_GetScopeChain(cx, fp);
+ if (!parent) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ if (OBJ_GET_PARENT(cx, obj) != parent) {
+ obj = js_CloneFunctionObject(cx, obj, parent);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ obj = NULL;
+ END_LITOPX_CASE(JSOP_ANONFUNOBJ)
+
+ BEGIN_LITOPX_CASE(JSOP_NAMEDFUNOBJ, 0)
+ /* ECMA ed. 3 FunctionExpression: function Identifier [etc.]. */
+ rval = ATOM_KEY(atom);
+ JS_ASSERT(VALUE_IS_FUNCTION(cx, rval));
+
+ /*
+ * 1. Create a new object as if by the expression new Object().
+ * 2. Add Result(1) to the front of the scope chain.
+ *
+ * Step 2 is achieved by making the new object's parent be the
+ * current scope chain, and then making the new object the parent
+ * of the Function object clone.
+ */
+ SAVE_SP_AND_PC(fp);
+ obj2 = js_GetScopeChain(cx, fp);
+ if (!obj2) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ parent = js_NewObject(cx, &js_ObjectClass, NULL, obj2);
+ if (!parent) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ /*
+ * 3. Create a new Function object as specified in section 13.2
+ * with [parameters and body specified by the function expression
+ * that was parsed by the compiler into a Function object, and
+ * saved in the script's atom map].
+ *
+ * Protect parent from GC after js_CloneFunctionObject calls into
+ * js_NewObject, which displaces the newborn object root in cx by
+ * allocating the clone, then runs a last-ditch GC while trying
+ * to allocate the clone's slots vector. Another, multi-threaded
+ * path: js_CloneFunctionObject => js_NewObject => OBJ_GET_CLASS
+ * which may suspend the current request in ClaimScope, with the
+ * newborn displaced as in the first scenario.
+ */
+ fp->scopeChain = parent;
+ obj = js_CloneFunctionObject(cx, JSVAL_TO_OBJECT(rval), parent);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ /*
+ * Protect obj from any GC hiding below OBJ_DEFINE_PROPERTY. All
+ * paths from here must flow through the "Restore fp->scopeChain"
+ * code below the OBJ_DEFINE_PROPERTY call.
+ */
+ fp->scopeChain = obj;
+ rval = OBJECT_TO_JSVAL(obj);
+
+ /*
+ * 4. Create a property in the object Result(1). The property's
+ * name is [fun->atom, the identifier parsed by the compiler],
+ * value is Result(3), and attributes are { DontDelete, ReadOnly }.
+ */
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ attrs = JSFUN_GSFLAG2ATTR(fun->flags);
+ if (attrs) {
+ attrs |= JSPROP_SHARED;
+ rval = JSVAL_VOID;
+ }
+ ok = OBJ_DEFINE_PROPERTY(cx, parent, ATOM_TO_JSID(fun->atom), rval,
+ (attrs & JSPROP_GETTER)
+ ? JS_EXTENSION (JSPropertyOp) obj
+ : NULL,
+ (attrs & JSPROP_SETTER)
+ ? JS_EXTENSION (JSPropertyOp) obj
+ : NULL,
+ attrs |
+ JSPROP_ENUMERATE | JSPROP_PERMANENT |
+ JSPROP_READONLY,
+ NULL);
+
+ /* Restore fp->scopeChain now that obj is defined in parent. */
+ fp->scopeChain = obj2;
+ if (!ok) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ goto out;
+ }
+
+ /*
+ * 5. Remove Result(1) from the front of the scope chain [no-op].
+ * 6. Return Result(3).
+ */
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ obj = NULL;
+ END_LITOPX_CASE(JSOP_NAMEDFUNOBJ)
+
+ BEGIN_LITOPX_CASE(JSOP_CLOSURE, 0)
+ /*
+ * ECMA ed. 3 extension: a named function expression in a compound
+ * statement (not at the top statement level of global code, or at
+ * the top level of a function body).
+ *
+ * Get immediate operand atom, which is a function object literal.
+ * From it, get the function to close.
+ */
+ JS_ASSERT(VALUE_IS_FUNCTION(cx, ATOM_KEY(atom)));
+ obj = ATOM_TO_OBJECT(atom);
+
+ /*
+ * Clone the function object with the current scope chain as the
+ * clone's parent. The original function object is the prototype
+ * of the clone. Do this only if re-parenting; the compiler may
+ * have seen the right parent already and created a sufficiently
+ * well-scoped function object.
+ */
+ SAVE_SP_AND_PC(fp);
+ obj2 = js_GetScopeChain(cx, fp);
+ if (!obj2) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ if (OBJ_GET_PARENT(cx, obj) != obj2) {
+ obj = js_CloneFunctionObject(cx, obj, obj2);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+
+ /*
+ * Protect obj from any GC hiding below OBJ_DEFINE_PROPERTY. All
+ * paths from here must flow through the "Restore fp->scopeChain"
+ * code below the OBJ_DEFINE_PROPERTY call.
+ */
+ fp->scopeChain = obj;
+ rval = OBJECT_TO_JSVAL(obj);
+
+ /*
+ * Make a property in fp->varobj with id fun->atom and value obj,
+ * unless fun is a getter or setter (in which case, obj is cast to
+ * a JSPropertyOp and passed accordingly).
+ */
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ attrs = JSFUN_GSFLAG2ATTR(fun->flags);
+ if (attrs) {
+ attrs |= JSPROP_SHARED;
+ rval = JSVAL_VOID;
+ }
+ parent = fp->varobj;
+ ok = OBJ_DEFINE_PROPERTY(cx, parent, ATOM_TO_JSID(fun->atom), rval,
+ (attrs & JSPROP_GETTER)
+ ? JS_EXTENSION (JSPropertyOp) obj
+ : NULL,
+ (attrs & JSPROP_SETTER)
+ ? JS_EXTENSION (JSPropertyOp) obj
+ : NULL,
+ attrs | JSPROP_ENUMERATE
+ | JSPROP_PERMANENT,
+ &prop);
+
+ /* Restore fp->scopeChain now that obj is defined in fp->varobj. */
+ fp->scopeChain = obj2;
+ if (!ok) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ goto out;
+ }
+
+#if 0
+ if (attrs == 0 && script->numGlobalVars) {
+ /*
+ * As with JSOP_DEFVAR and JSOP_DEFCONST (above), fast globals
+ * use fp->vars to map the global function name's atomIndex to
+ * its permanent fp->varobj slot number, tagged as a jsval.
+ */
+ sprop = (JSScopeProperty *) prop;
+ fp->vars[atomIndex] = INT_TO_JSVAL(sprop->slot);
+ }
+#endif
+ OBJ_DROP_PROPERTY(cx, parent, prop);
+ END_LITOPX_CASE(JSOP_CLOSURE)
+
+#if JS_HAS_GETTER_SETTER
+ BEGIN_CASE(JSOP_GETTER)
+ BEGIN_CASE(JSOP_SETTER)
+ op2 = (JSOp) *++pc;
+ switch (op2) {
+ case JSOP_SETNAME:
+ case JSOP_SETPROP:
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ rval = FETCH_OPND(-1);
+ i = -1;
+ goto gs_pop_lval;
+
+ case JSOP_SETELEM:
+ rval = FETCH_OPND(-1);
+ FETCH_ELEMENT_ID(-2, id);
+ i = -2;
+ gs_pop_lval:
+ FETCH_OBJECT(cx, i - 1, lval, obj);
+ break;
+
+ case JSOP_INITPROP:
+ JS_ASSERT(sp - fp->spbase >= 2);
+ rval = FETCH_OPND(-1);
+ i = -1;
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ goto gs_get_lval;
+
+ case JSOP_INITELEM:
+ JS_ASSERT(sp - fp->spbase >= 3);
+ rval = FETCH_OPND(-1);
+ FETCH_ELEMENT_ID(-2, id);
+ i = -2;
+ gs_get_lval:
+ lval = FETCH_OPND(i-1);
+ JS_ASSERT(JSVAL_IS_OBJECT(lval));
+ obj = JSVAL_TO_OBJECT(lval);
+ break;
+
+ default:
+ JS_ASSERT(0);
+ }
+
+ /* Ensure that id has a type suitable for use with obj. */
+ CHECK_ELEMENT_ID(obj, id);
+
+ SAVE_SP_AND_PC(fp);
+ if (JS_TypeOfValue(cx, rval) != JSTYPE_FUNCTION) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_GETTER_OR_SETTER,
+ (op == JSOP_GETTER)
+ ? js_getter_str
+ : js_setter_str);
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ /*
+ * Getters and setters are just like watchpoints from an access
+ * control point of view.
+ */
+ ok = OBJ_CHECK_ACCESS(cx, obj, id, JSACC_WATCH, &rtmp, &attrs);
+ if (!ok)
+ goto out;
+
+ if (op == JSOP_GETTER) {
+ getter = JS_EXTENSION (JSPropertyOp) JSVAL_TO_OBJECT(rval);
+ setter = NULL;
+ attrs = JSPROP_GETTER;
+ } else {
+ getter = NULL;
+ setter = JS_EXTENSION (JSPropertyOp) JSVAL_TO_OBJECT(rval);
+ attrs = JSPROP_SETTER;
+ }
+ attrs |= JSPROP_ENUMERATE | JSPROP_SHARED;
+
+ /* Check for a readonly or permanent property of the same name. */
+ ok = js_CheckRedeclaration(cx, obj, id, attrs, NULL, NULL);
+ if (!ok)
+ goto out;
+
+ ok = OBJ_DEFINE_PROPERTY(cx, obj, id, JSVAL_VOID, getter, setter,
+ attrs, NULL);
+ if (!ok)
+ goto out;
+
+ obj = NULL;
+ sp += i;
+ if (js_CodeSpec[op2].ndefs)
+ STORE_OPND(-1, rval);
+ len = js_CodeSpec[op2].length;
+ DO_NEXT_OP(len);
+#endif /* JS_HAS_GETTER_SETTER */
+
+ BEGIN_CASE(JSOP_NEWINIT)
+ argc = 0;
+ fp->sharpDepth++;
+ goto do_new;
+
+ BEGIN_CASE(JSOP_ENDINIT)
+ if (--fp->sharpDepth == 0)
+ fp->sharpArray = NULL;
+
+ /* Re-set the newborn root to the top of this object tree. */
+ JS_ASSERT(sp - fp->spbase >= 1);
+ lval = FETCH_OPND(-1);
+ JS_ASSERT(JSVAL_IS_OBJECT(lval));
+ cx->weakRoots.newborn[GCX_OBJECT] = JSVAL_TO_GCTHING(lval);
+ END_CASE(JSOP_ENDINIT)
+
+ BEGIN_CASE(JSOP_INITPROP)
+ /* Pop the property's value into rval. */
+ JS_ASSERT(sp - fp->spbase >= 2);
+ rval = FETCH_OPND(-1);
+
+ /* Get the immediate property name into id. */
+ atom = GET_ATOM(cx, script, pc);
+ id = ATOM_TO_JSID(atom);
+ i = -1;
+ goto do_init;
+
+ BEGIN_CASE(JSOP_INITELEM)
+ /* Pop the element's value into rval. */
+ JS_ASSERT(sp - fp->spbase >= 3);
+ rval = FETCH_OPND(-1);
+
+ /* Pop and conditionally atomize the element id. */
+ FETCH_ELEMENT_ID(-2, id);
+ i = -2;
+
+ do_init:
+ /* Find the object being initialized at top of stack. */
+ lval = FETCH_OPND(i-1);
+ JS_ASSERT(JSVAL_IS_OBJECT(lval));
+ obj = JSVAL_TO_OBJECT(lval);
+
+ /* Ensure that id has a type suitable for use with obj. */
+ CHECK_ELEMENT_ID(obj, id);
+
+ /* Set the property named by obj[id] to rval. */
+ SAVE_SP_AND_PC(fp);
+ ok = OBJ_SET_PROPERTY(cx, obj, id, &rval);
+ if (!ok)
+ goto out;
+ sp += i;
+ len = js_CodeSpec[op].length;
+ DO_NEXT_OP(len);
+
+#if JS_HAS_SHARP_VARS
+ BEGIN_CASE(JSOP_DEFSHARP)
+ SAVE_SP_AND_PC(fp);
+ obj = fp->sharpArray;
+ if (!obj) {
+ obj = js_NewArrayObject(cx, 0, NULL);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ fp->sharpArray = obj;
+ }
+ i = (jsint) GET_ATOM_INDEX(pc);
+ id = INT_TO_JSID(i);
+ rval = FETCH_OPND(-1);
+ if (JSVAL_IS_PRIMITIVE(rval)) {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%u", (unsigned) i);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_SHARP_DEF, numBuf);
+ ok = JS_FALSE;
+ goto out;
+ }
+ ok = OBJ_SET_PROPERTY(cx, obj, id, &rval);
+ if (!ok)
+ goto out;
+ END_CASE(JSOP_DEFSHARP)
+
+ BEGIN_CASE(JSOP_USESHARP)
+ i = (jsint) GET_ATOM_INDEX(pc);
+ id = INT_TO_JSID(i);
+ obj = fp->sharpArray;
+ if (!obj) {
+ rval = JSVAL_VOID;
+ } else {
+ SAVE_SP_AND_PC(fp);
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &rval);
+ if (!ok)
+ goto out;
+ }
+ if (!JSVAL_IS_OBJECT(rval)) {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%u", (unsigned) i);
+
+ SAVE_SP_AND_PC(fp);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_SHARP_USE, numBuf);
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(rval);
+ END_CASE(JSOP_USESHARP)
+#endif /* JS_HAS_SHARP_VARS */
+
+ /* No-ops for ease of decompilation and jit'ing. */
+ EMPTY_CASE(JSOP_TRY)
+ EMPTY_CASE(JSOP_FINALLY)
+
+ /* Reset the stack to the given depth. */
+ BEGIN_CASE(JSOP_SETSP)
+ i = (jsint) GET_ATOM_INDEX(pc);
+ JS_ASSERT(i >= 0);
+
+ for (obj = fp->blockChain; obj; obj = OBJ_GET_PARENT(cx, obj)) {
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_BlockClass);
+ if (OBJ_BLOCK_DEPTH(cx, obj) + (jsint)OBJ_BLOCK_COUNT(cx, obj) <= i) {
+ JS_ASSERT(OBJ_BLOCK_DEPTH(cx, obj) < i || OBJ_BLOCK_COUNT(cx, obj) == 0);
+ break;
+ }
+ }
+ fp->blockChain = obj;
+
+ JS_ASSERT(ok);
+ for (obj = fp->scopeChain;
+ (clasp = OBJ_GET_CLASS(cx, obj)) == &js_WithClass ||
+ clasp == &js_BlockClass;
+ obj = OBJ_GET_PARENT(cx, obj)) {
+ if (JS_GetPrivate(cx, obj) != fp ||
+ OBJ_BLOCK_DEPTH(cx, obj) < i) {
+ break;
+ }
+ if (clasp == &js_BlockClass)
+ ok &= js_PutBlockObject(cx, obj);
+ else
+ JS_SetPrivate(cx, obj, NULL);
+ }
+
+ fp->scopeChain = obj;
+
+ /* Set sp after js_PutBlockObject to avoid potential GC hazards. */
+ sp = fp->spbase + i;
+
+ /* Don't fail until after we've updated all stacks. */
+ if (!ok)
+ goto out;
+ END_CASE(JSOP_SETSP)
+
+ BEGIN_CASE(JSOP_GOSUB)
+ JS_ASSERT(cx->exception != JSVAL_HOLE);
+ if (!cx->throwing) {
+ lval = JSVAL_HOLE;
+ } else {
+ lval = cx->exception;
+ cx->throwing = JS_FALSE;
+ }
+ PUSH(lval);
+ i = PTRDIFF(pc, script->main, jsbytecode) + JSOP_GOSUB_LENGTH;
+ len = GET_JUMP_OFFSET(pc);
+ PUSH(INT_TO_JSVAL(i));
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_GOSUBX)
+ JS_ASSERT(cx->exception != JSVAL_HOLE);
+ if (!cx->throwing) {
+ lval = JSVAL_HOLE;
+ } else {
+ lval = cx->exception;
+ cx->throwing = JS_FALSE;
+ }
+ PUSH(lval);
+ i = PTRDIFF(pc, script->main, jsbytecode) + JSOP_GOSUBX_LENGTH;
+ len = GET_JUMPX_OFFSET(pc);
+ PUSH(INT_TO_JSVAL(i));
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_RETSUB)
+ rval = POP();
+ JS_ASSERT(JSVAL_IS_INT(rval));
+ lval = POP();
+ if (lval != JSVAL_HOLE) {
+ /*
+ * Exception was pending during finally, throw it *before* we
+ * adjust pc, because pc indexes into script->trynotes. This
+ * turns out not to be necessary, but it seems clearer. And
+ * it points out a FIXME: 350509, due to Igor Bukanov.
+ */
+ cx->throwing = JS_TRUE;
+ cx->exception = lval;
+ ok = JS_FALSE;
+ goto out;
+ }
+ len = JSVAL_TO_INT(rval);
+ pc = script->main;
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_EXCEPTION)
+ JS_ASSERT(cx->throwing);
+ PUSH(cx->exception);
+ cx->throwing = JS_FALSE;
+ END_CASE(JSOP_EXCEPTION)
+
+ BEGIN_CASE(JSOP_THROWING)
+ JS_ASSERT(!cx->throwing);
+ cx->throwing = JS_TRUE;
+ cx->exception = POP_OPND();
+ END_CASE(JSOP_THROWING)
+
+ BEGIN_CASE(JSOP_THROW)
+ JS_ASSERT(!cx->throwing);
+ cx->throwing = JS_TRUE;
+ cx->exception = POP_OPND();
+ ok = JS_FALSE;
+ /* let the code at out try to catch the exception. */
+ goto out;
+
+ BEGIN_CASE(JSOP_SETLOCALPOP)
+ /*
+ * The stack must have a block with at least one local slot below
+ * the exception object.
+ */
+ JS_ASSERT(sp - fp->spbase >= 2);
+ slot = GET_UINT16(pc);
+ JS_ASSERT(slot + 1 < (uintN)depth);
+ fp->spbase[slot] = POP_OPND();
+ END_CASE(JSOP_SETLOCALPOP)
+
+ BEGIN_CASE(JSOP_INSTANCEOF)
+ SAVE_SP_AND_PC(fp);
+ rval = FETCH_OPND(-1);
+ if (JSVAL_IS_PRIMITIVE(rval) ||
+ !(obj = JSVAL_TO_OBJECT(rval))->map->ops->hasInstance) {
+ str = js_DecompileValueGenerator(cx, -1, rval, NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_INSTANCEOF_RHS,
+ JS_GetStringBytes(str));
+ }
+ ok = JS_FALSE;
+ goto out;
+ }
+ lval = FETCH_OPND(-2);
+ cond = JS_FALSE;
+ ok = obj->map->ops->hasInstance(cx, obj, lval, &cond);
+ if (!ok)
+ goto out;
+ sp--;
+ STORE_OPND(-1, BOOLEAN_TO_JSVAL(cond));
+ END_CASE(JSOP_INSTANCEOF)
+
+#if JS_HAS_DEBUGGER_KEYWORD
+ BEGIN_CASE(JSOP_DEBUGGER)
+ {
+ JSTrapHandler handler = rt->debuggerHandler;
+ if (handler) {
+ SAVE_SP_AND_PC(fp);
+ switch (handler(cx, script, pc, &rval,
+ rt->debuggerHandlerData)) {
+ case JSTRAP_ERROR:
+ ok = JS_FALSE;
+ goto out;
+ case JSTRAP_CONTINUE:
+ break;
+ case JSTRAP_RETURN:
+ fp->rval = rval;
+ goto out;
+ case JSTRAP_THROW:
+ cx->throwing = JS_TRUE;
+ cx->exception = rval;
+ ok = JS_FALSE;
+ goto out;
+ default:;
+ }
+ LOAD_INTERRUPT_HANDLER(rt);
+ }
+ }
+ END_CASE(JSOP_DEBUGGER)
+#endif /* JS_HAS_DEBUGGER_KEYWORD */
+
+#if JS_HAS_XML_SUPPORT
+ BEGIN_CASE(JSOP_DEFXMLNS)
+ rval = POP();
+ SAVE_SP_AND_PC(fp);
+ ok = js_SetDefaultXMLNamespace(cx, rval);
+ if (!ok)
+ goto out;
+ END_CASE(JSOP_DEFXMLNS)
+
+ BEGIN_CASE(JSOP_ANYNAME)
+ SAVE_SP_AND_PC(fp);
+ ok = js_GetAnyName(cx, &rval);
+ if (!ok)
+ goto out;
+ PUSH_OPND(rval);
+ END_CASE(JSOP_ANYNAME)
+
+ BEGIN_LITOPX_CASE(JSOP_QNAMEPART, 0)
+ PUSH_OPND(ATOM_KEY(atom));
+ END_LITOPX_CASE(JSOP_QNAMEPART)
+
+ BEGIN_LITOPX_CASE(JSOP_QNAMECONST, 0)
+ rval = ATOM_KEY(atom);
+ lval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ obj = js_ConstructXMLQNameObject(cx, lval, rval);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ END_LITOPX_CASE(JSOP_QNAMECONST)
+
+ BEGIN_CASE(JSOP_QNAME)
+ rval = FETCH_OPND(-1);
+ lval = FETCH_OPND(-2);
+ SAVE_SP_AND_PC(fp);
+ obj = js_ConstructXMLQNameObject(cx, lval, rval);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ sp--;
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ END_CASE(JSOP_QNAME)
+
+ BEGIN_CASE(JSOP_TOATTRNAME)
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ ok = js_ToAttributeName(cx, &rval);
+ if (!ok)
+ goto out;
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_TOATTRNAME)
+
+ BEGIN_CASE(JSOP_TOATTRVAL)
+ rval = FETCH_OPND(-1);
+ JS_ASSERT(JSVAL_IS_STRING(rval));
+ SAVE_SP_AND_PC(fp);
+ str = js_EscapeAttributeValue(cx, JSVAL_TO_STRING(rval));
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ STORE_OPND(-1, STRING_TO_JSVAL(str));
+ END_CASE(JSOP_TOATTRVAL)
+
+ BEGIN_CASE(JSOP_ADDATTRNAME)
+ BEGIN_CASE(JSOP_ADDATTRVAL)
+ rval = FETCH_OPND(-1);
+ lval = FETCH_OPND(-2);
+ str = JSVAL_TO_STRING(lval);
+ str2 = JSVAL_TO_STRING(rval);
+ SAVE_SP_AND_PC(fp);
+ str = js_AddAttributePart(cx, op == JSOP_ADDATTRNAME, str, str2);
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ sp--;
+ STORE_OPND(-1, STRING_TO_JSVAL(str));
+ END_CASE(JSOP_ADDATTRNAME)
+
+ BEGIN_CASE(JSOP_BINDXMLNAME)
+ lval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ ok = js_FindXMLProperty(cx, lval, &obj, &rval);
+ if (!ok)
+ goto out;
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ PUSH_OPND(rval);
+ END_CASE(JSOP_BINDXMLNAME)
+
+ BEGIN_CASE(JSOP_SETXMLNAME)
+ obj = JSVAL_TO_OBJECT(FETCH_OPND(-3));
+ lval = FETCH_OPND(-2);
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ ok = js_SetXMLProperty(cx, obj, lval, &rval);
+ if (!ok)
+ goto out;
+ sp -= 2;
+ STORE_OPND(-1, rval);
+ obj = NULL;
+ END_CASE(JSOP_SETXMLNAME)
+
+ BEGIN_CASE(JSOP_XMLNAME)
+ lval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ ok = js_FindXMLProperty(cx, lval, &obj, &rval);
+ if (!ok)
+ goto out;
+ ok = js_GetXMLProperty(cx, obj, rval, &rval);
+ if (!ok)
+ goto out;
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_XMLNAME)
+
+ BEGIN_CASE(JSOP_DESCENDANTS)
+ BEGIN_CASE(JSOP_DELDESC)
+ FETCH_OBJECT(cx, -2, lval, obj);
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ ok = js_GetXMLDescendants(cx, obj, rval, &rval);
+ if (!ok)
+ goto out;
+
+ if (op == JSOP_DELDESC) {
+ sp[-1] = rval; /* set local root */
+ ok = js_DeleteXMLListElements(cx, JSVAL_TO_OBJECT(rval));
+ if (!ok)
+ goto out;
+ rval = JSVAL_TRUE; /* always succeed */
+ }
+
+ sp--;
+ STORE_OPND(-1, rval);
+ END_CASE(JSOP_DESCENDANTS)
+
+ BEGIN_CASE(JSOP_FILTER)
+ FETCH_OBJECT(cx, -1, lval, obj);
+ len = GET_JUMP_OFFSET(pc);
+ SAVE_SP_AND_PC(fp);
+ ok = js_FilterXMLList(cx, obj, pc + js_CodeSpec[op].length, &rval);
+ if (!ok)
+ goto out;
+ JS_ASSERT(fp->sp == sp);
+ STORE_OPND(-1, rval);
+ END_VARLEN_CASE
+
+ BEGIN_CASE(JSOP_ENDFILTER)
+ *result = POP_OPND();
+ goto out;
+
+ EMPTY_CASE(JSOP_STARTXML)
+ EMPTY_CASE(JSOP_STARTXMLEXPR)
+
+ BEGIN_CASE(JSOP_TOXML)
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ obj = js_ValueToXMLObject(cx, rval);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ END_CASE(JSOP_TOXML)
+
+ BEGIN_CASE(JSOP_TOXMLLIST)
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ obj = js_ValueToXMLListObject(cx, rval);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ END_CASE(JSOP_TOXMLLIST)
+
+ BEGIN_CASE(JSOP_XMLTAGEXPR)
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ str = js_ValueToString(cx, rval);
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ STORE_OPND(-1, STRING_TO_JSVAL(str));
+ END_CASE(JSOP_XMLTAGEXPR)
+
+ BEGIN_CASE(JSOP_XMLELTEXPR)
+ rval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ if (VALUE_IS_XML(cx, rval)) {
+ str = js_ValueToXMLString(cx, rval);
+ } else {
+ str = js_ValueToString(cx, rval);
+ if (str)
+ str = js_EscapeElementValue(cx, str);
+ }
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ STORE_OPND(-1, STRING_TO_JSVAL(str));
+ END_CASE(JSOP_XMLELTEXPR)
+
+ BEGIN_LITOPX_CASE(JSOP_XMLOBJECT, 0)
+ SAVE_SP_AND_PC(fp);
+ obj = js_CloneXMLObject(cx, ATOM_TO_OBJECT(atom));
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ obj = NULL;
+ END_LITOPX_CASE(JSOP_XMLOBJECT)
+
+ BEGIN_LITOPX_CASE(JSOP_XMLCDATA, 0)
+ str = ATOM_TO_STRING(atom);
+ obj = js_NewXMLSpecialObject(cx, JSXML_CLASS_TEXT, NULL, str);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ END_LITOPX_CASE(JSOP_XMLCDATA)
+
+ BEGIN_LITOPX_CASE(JSOP_XMLCOMMENT, 0)
+ str = ATOM_TO_STRING(atom);
+ obj = js_NewXMLSpecialObject(cx, JSXML_CLASS_COMMENT, NULL, str);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ PUSH_OPND(OBJECT_TO_JSVAL(obj));
+ END_LITOPX_CASE(JSOP_XMLCOMMENT)
+
+ BEGIN_LITOPX_CASE(JSOP_XMLPI, 0)
+ str = ATOM_TO_STRING(atom);
+ rval = FETCH_OPND(-1);
+ str2 = JSVAL_TO_STRING(rval);
+ SAVE_SP_AND_PC(fp);
+ obj = js_NewXMLSpecialObject(cx,
+ JSXML_CLASS_PROCESSING_INSTRUCTION,
+ str, str2);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ END_LITOPX_CASE(JSOP_XMLPI)
+
+ BEGIN_LITOPX_CASE(JSOP_GETMETHOD, 0)
+ /* Get an immediate atom naming the property. */
+ id = ATOM_TO_JSID(atom);
+ lval = FETCH_OPND(-1);
+ SAVE_SP_AND_PC(fp);
+ if (!JSVAL_IS_PRIMITIVE(lval)) {
+ STORE_OPND(-1, lval);
+ obj = JSVAL_TO_OBJECT(lval);
+
+ /* Special-case XML object method lookup, per ECMA-357. */
+ if (OBJECT_IS_XML(cx, obj)) {
+ JSXMLObjectOps *ops;
+
+ ops = (JSXMLObjectOps *) obj->map->ops;
+ obj = ops->getMethod(cx, obj, id, &rval);
+ if (!obj)
+ ok = JS_FALSE;
+ } else {
+ CACHED_GET(OBJ_GET_PROPERTY(cx, obj, id, &rval));
+ }
+ } else {
+ if (JSVAL_IS_STRING(lval)) {
+ i = JSProto_String;
+ } else if (JSVAL_IS_NUMBER(lval)) {
+ i = JSProto_Number;
+ } else if (JSVAL_IS_BOOLEAN(lval)) {
+ i = JSProto_Boolean;
+ } else {
+ JS_ASSERT(JSVAL_IS_NULL(lval) || JSVAL_IS_VOID(lval));
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK,
+ lval, NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NO_PROPERTIES,
+ JS_GetStringBytes(str));
+ }
+ ok = JS_FALSE;
+ goto out;
+ }
+ ok = js_GetClassPrototype(cx, NULL, INT_TO_JSID(i), &obj);
+ if (!ok)
+ goto out;
+ JS_ASSERT(obj);
+ STORE_OPND(-1, OBJECT_TO_JSVAL(obj));
+ CACHED_GET(OBJ_GET_PROPERTY(cx, obj, id, &rval));
+ obj = (JSObject *) lval; /* keep tagged as non-object */
+ }
+ if (!ok)
+ goto out;
+ STORE_OPND(-1, rval);
+ END_LITOPX_CASE(JSOP_GETMETHOD)
+
+ BEGIN_LITOPX_CASE(JSOP_SETMETHOD, 0)
+ /* Get an immediate atom naming the property. */
+ id = ATOM_TO_JSID(atom);
+ rval = FETCH_OPND(-1);
+ FETCH_OBJECT(cx, -2, lval, obj);
+ SAVE_SP_AND_PC(fp);
+
+ /* Special-case XML object method lookup, per ECMA-357. */
+ if (OBJECT_IS_XML(cx, obj)) {
+ JSXMLObjectOps *ops;
+
+ ops = (JSXMLObjectOps *) obj->map->ops;
+ ok = ops->setMethod(cx, obj, id, &rval);
+ } else {
+ CACHED_SET(OBJ_SET_PROPERTY(cx, obj, id, &rval));
+ }
+ if (!ok)
+ goto out;
+ --sp;
+ STORE_OPND(-1, rval);
+ obj = NULL;
+ END_LITOPX_CASE(JSOP_SETMETHOD)
+
+ BEGIN_CASE(JSOP_GETFUNNS)
+ SAVE_SP_AND_PC(fp);
+ ok = js_GetFunctionNamespace(cx, &rval);
+ if (!ok)
+ goto out;
+ PUSH_OPND(rval);
+ END_CASE(JSOP_GETFUNNS)
+#endif /* JS_HAS_XML_SUPPORT */
+
+ BEGIN_LITOPX_CASE(JSOP_ENTERBLOCK, 0)
+ obj = ATOM_TO_OBJECT(atom);
+ JS_ASSERT(fp->spbase + OBJ_BLOCK_DEPTH(cx, obj) == sp);
+ vp = sp + OBJ_BLOCK_COUNT(cx, obj);
+ JS_ASSERT(vp <= fp->spbase + depth);
+ while (sp < vp) {
+ STORE_OPND(0, JSVAL_VOID);
+ sp++;
+ }
+
+ /*
+ * If this frame had to reflect the compile-time block chain into
+ * the runtime scope chain, we can't optimize block scopes out of
+ * runtime any longer, because an outer block that parents obj has
+ * been cloned onto the scope chain. To avoid re-cloning such a
+ * parent and accumulating redundant clones via js_GetScopeChain,
+ * we must clone each block eagerly on entry, and push it on the
+ * scope chain, until this frame pops.
+ */
+ if (fp->flags & JSFRAME_POP_BLOCKS) {
+ JS_ASSERT(!fp->blockChain);
+
+ /*
+ * Check whether JSOP_DEFLOCALFUN emulated JSOP_ENTERBLOCK for
+ * the body block in order to correctly scope the local cloned
+ * function object it creates.
+ */
+ parent = fp->scopeChain;
+ if (OBJ_GET_PROTO(cx, parent) == obj) {
+ JS_ASSERT(OBJ_GET_CLASS(cx, parent) == &js_BlockClass);
+ } else {
+ obj = js_CloneBlockObject(cx, obj, parent, fp);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ fp->scopeChain = obj;
+ }
+ } else {
+ JS_ASSERT(!fp->blockChain ||
+ OBJ_GET_PARENT(cx, obj) == fp->blockChain);
+ fp->blockChain = obj;
+ }
+ END_LITOPX_CASE(JSOP_ENTERBLOCK)
+
+ BEGIN_CASE(JSOP_LEAVEBLOCKEXPR)
+ BEGIN_CASE(JSOP_LEAVEBLOCK)
+ {
+ JSObject **chainp;
+
+ /* Grab the result of the expression. */
+ if (op == JSOP_LEAVEBLOCKEXPR)
+ rval = FETCH_OPND(-1);
+
+ chainp = &fp->blockChain;
+ obj = *chainp;
+ if (!obj) {
+ chainp = &fp->scopeChain;
+ obj = *chainp;
+
+ /*
+ * This block was cloned, so clear its private data and sync
+ * its locals to their property slots.
+ */
+ SAVE_SP_AND_PC(fp);
+ ok = js_PutBlockObject(cx, obj);
+ if (!ok)
+ goto out;
+ }
+
+ sp -= GET_UINT16(pc);
+ JS_ASSERT(fp->spbase <= sp && sp <= fp->spbase + depth);
+
+ /* Store the result into the topmost stack slot. */
+ if (op == JSOP_LEAVEBLOCKEXPR)
+ STORE_OPND(-1, rval);
+
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_BlockClass);
+ JS_ASSERT(op == JSOP_LEAVEBLOCKEXPR
+ ? fp->spbase + OBJ_BLOCK_DEPTH(cx, obj) == sp - 1
+ : fp->spbase + OBJ_BLOCK_DEPTH(cx, obj) == sp);
+
+ *chainp = OBJ_GET_PARENT(cx, obj);
+ JS_ASSERT(chainp != &fp->blockChain ||
+ !*chainp ||
+ OBJ_GET_CLASS(cx, *chainp) == &js_BlockClass);
+ }
+ END_CASE(JSOP_LEAVEBLOCK)
+
+ BEGIN_CASE(JSOP_GETLOCAL)
+ slot = GET_UINT16(pc);
+ JS_ASSERT(slot < (uintN)depth);
+ PUSH_OPND(fp->spbase[slot]);
+ obj = NULL;
+ END_CASE(JSOP_GETLOCAL)
+
+ BEGIN_CASE(JSOP_SETLOCAL)
+ slot = GET_UINT16(pc);
+ JS_ASSERT(slot < (uintN)depth);
+ vp = &fp->spbase[slot];
+ GC_POKE(cx, *vp);
+ *vp = FETCH_OPND(-1);
+ obj = NULL;
+ END_CASE(JSOP_SETLOCAL)
+
+/* NB: This macro doesn't use JS_BEGIN_MACRO/JS_END_MACRO around its body. */
+#define FAST_LOCAL_INCREMENT_OP(PRE,OPEQ,MINMAX) \
+ slot = GET_UINT16(pc); \
+ JS_ASSERT(slot < (uintN)depth); \
+ vp = fp->spbase + slot; \
+ rval = *vp; \
+ if (!JSVAL_IS_INT(rval) || rval == INT_TO_JSVAL(JSVAL_INT_##MINMAX)) \
+ goto do_nonint_fast_incop; \
+ PRE = rval; \
+ rval OPEQ 2; \
+ *vp = rval; \
+ PUSH_OPND(PRE)
+
+ BEGIN_CASE(JSOP_INCLOCAL)
+ FAST_LOCAL_INCREMENT_OP(rval, +=, MAX);
+ END_CASE(JSOP_INCLOCAL)
+
+ BEGIN_CASE(JSOP_DECLOCAL)
+ FAST_LOCAL_INCREMENT_OP(rval, -=, MIN);
+ END_CASE(JSOP_DECLOCAL)
+
+ BEGIN_CASE(JSOP_LOCALINC)
+ FAST_LOCAL_INCREMENT_OP(rtmp, +=, MAX);
+ END_CASE(JSOP_LOCALINC)
+
+ BEGIN_CASE(JSOP_LOCALDEC)
+ FAST_LOCAL_INCREMENT_OP(rtmp, -=, MIN);
+ END_CASE(JSOP_LOCALDEC)
+
+#undef FAST_LOCAL_INCREMENT_OP
+
+ EMPTY_CASE(JSOP_STARTITER)
+
+ BEGIN_CASE(JSOP_ENDITER)
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(sp[-1]));
+ iterobj = JSVAL_TO_OBJECT(sp[-1]);
+
+ /*
+ * js_CloseNativeIterator checks whether the iterator is not
+ * native, and also detects the case of a native iterator that
+ * has already escaped, even though a for-in loop caused it to
+ * be created. See jsiter.c.
+ */
+ SAVE_SP_AND_PC(fp);
+ js_CloseNativeIterator(cx, iterobj);
+ *--sp = JSVAL_NULL;
+ END_CASE(JSOP_ENDITER)
+
+#if JS_HAS_GENERATORS
+ BEGIN_CASE(JSOP_GENERATOR)
+ pc += JSOP_GENERATOR_LENGTH;
+ SAVE_SP_AND_PC(fp);
+ obj = js_NewGenerator(cx, fp);
+ if (!obj) {
+ ok = JS_FALSE;
+ } else {
+ JS_ASSERT(!fp->callobj && !fp->argsobj);
+ fp->rval = OBJECT_TO_JSVAL(obj);
+ }
+ goto out;
+
+ BEGIN_CASE(JSOP_YIELD)
+ ASSERT_NOT_THROWING(cx);
+ if (fp->flags & JSFRAME_FILTERING) {
+ /* FIXME: bug 309894 -- fix to eliminate this error. */
+ JS_ReportErrorNumberUC(cx, js_GetErrorMessage, NULL,
+ JSMSG_YIELD_FROM_FILTER);
+ ok = JS_FALSE;
+ goto out;
+ }
+ if (FRAME_TO_GENERATOR(fp)->state == JSGEN_CLOSING) {
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK,
+ fp->argv[-2], NULL);
+ if (str) {
+ JS_ReportErrorNumberUC(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_GENERATOR_YIELD,
+ JSSTRING_CHARS(str));
+ }
+ ok = JS_FALSE;
+ goto out;
+ }
+ fp->rval = FETCH_OPND(-1);
+ fp->flags |= JSFRAME_YIELDING;
+ pc += JSOP_YIELD_LENGTH;
+ SAVE_SP_AND_PC(fp);
+ goto out;
+
+ BEGIN_CASE(JSOP_ARRAYPUSH)
+ slot = GET_UINT16(pc);
+ JS_ASSERT(slot < (uintN)depth);
+ lval = fp->spbase[slot];
+ obj = JSVAL_TO_OBJECT(lval);
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_ArrayClass);
+ rval = FETCH_OPND(-1);
+
+ /* We know that the array is created with only a 'length' slot. */
+ i = obj->map->freeslot - (JSSLOT_FREE(&js_ArrayClass) + 1);
+ id = INT_TO_JSID(i);
+
+ SAVE_SP_AND_PC(fp);
+ ok = OBJ_SET_PROPERTY(cx, obj, id, &rval);
+ if (!ok)
+ goto out;
+ --sp;
+ END_CASE(JSOP_ARRAYPUSH)
+#endif /* JS_HAS_GENERATORS */
+
+#if !JS_HAS_GENERATORS
+ L_JSOP_GENERATOR:
+ L_JSOP_YIELD:
+ L_JSOP_ARRAYPUSH:
+#endif
+
+#if !JS_HAS_DESTRUCTURING
+ L_JSOP_FOREACHKEYVAL:
+ L_JSOP_ENUMCONSTELEM:
+#endif
+
+#ifdef JS_THREADED_INTERP
+ L_JSOP_BACKPATCH:
+ L_JSOP_BACKPATCH_POP:
+#else
+ default:
+#endif
+ {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%d", op);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_BYTECODE, numBuf);
+ ok = JS_FALSE;
+ goto out;
+ }
+
+#ifndef JS_THREADED_INTERP
+
+ } /* switch (op) */
+
+ advance_pc:
+ pc += len;
+
+#ifdef DEBUG
+ if (tracefp) {
+ intN ndefs, n;
+ jsval *siter;
+
+ ndefs = js_CodeSpec[op].ndefs;
+ if (ndefs) {
+ SAVE_SP_AND_PC(fp);
+ if (op == JSOP_FORELEM && sp[-1] == JSVAL_FALSE)
+ --ndefs;
+ for (n = -ndefs; n < 0; n++) {
+ str = js_DecompileValueGenerator(cx, n, sp[n], NULL);
+ if (str) {
+ fprintf(tracefp, "%s %s",
+ (n == -ndefs) ? " output:" : ",",
+ JS_GetStringBytes(str));
+ }
+ }
+ fprintf(tracefp, " @ %d\n", sp - fp->spbase);
+ }
+ fprintf(tracefp, " stack: ");
+ for (siter = fp->spbase; siter < sp; siter++) {
+ str = js_ValueToSource(cx, *siter);
+ fprintf(tracefp, "%s ",
+ str ? JS_GetStringBytes(str) : "<null>");
+ }
+ fputc('\n', tracefp);
+ }
+#endif /* DEBUG */
+ }
+#endif /* !JS_THREADED_INTERP */
+
+out:
+ if (!ok) {
+ /*
+ * Has an exception been raised? Also insist that we are not in an
+ * XML filtering predicate expression, to avoid catching exceptions
+ * within the filtering predicate, such as this example taken from
+ * tests/e4x/Regress/regress-301596.js:
+ *
+ * try {
+ * <xml/>.(@a == 1);
+ * throw 5;
+ * } catch (e) {
+ * }
+ *
+ * The inner interpreter activation executing the predicate bytecode
+ * will throw "reference to undefined XML name @a" (or 5, in older
+ * versions that followed the first edition of ECMA-357 and evaluated
+ * unbound identifiers to undefined), and the exception must not be
+ * caught until control unwinds to the outer interpreter activation.
+ *
+ * Otherwise, the wrong stack depth will be restored by JSOP_SETSP,
+ * and the catch will move into the filtering predicate expression,
+ * leading to double catch execution if it rethrows.
+ *
+ * FIXME: https://bugzilla.mozilla.org/show_bug.cgi?id=309894
+ */
+ if (cx->throwing && !(fp->flags & JSFRAME_FILTERING)) {
+ /*
+ * Call debugger throw hook if set (XXX thread safety?).
+ */
+ JSTrapHandler handler = rt->throwHook;
+ if (handler) {
+ SAVE_SP_AND_PC(fp);
+ switch (handler(cx, script, pc, &rval, rt->throwHookData)) {
+ case JSTRAP_ERROR:
+ cx->throwing = JS_FALSE;
+ goto no_catch;
+ case JSTRAP_RETURN:
+ ok = JS_TRUE;
+ cx->throwing = JS_FALSE;
+ fp->rval = rval;
+ goto no_catch;
+ case JSTRAP_THROW:
+ cx->exception = rval;
+ case JSTRAP_CONTINUE:
+ default:;
+ }
+ LOAD_INTERRUPT_HANDLER(rt);
+ }
+
+ /*
+ * Look for a try block in script that can catch this exception.
+ */
+#if JS_HAS_GENERATORS
+ if (JS_LIKELY(cx->exception != JSVAL_ARETURN)) {
+ SCRIPT_FIND_CATCH_START(script, pc, pc);
+ if (!pc)
+ goto no_catch;
+ } else {
+ pc = js_FindFinallyHandler(script, pc);
+ if (!pc) {
+ cx->throwing = JS_FALSE;
+ ok = JS_TRUE;
+ fp->rval = JSVAL_VOID;
+ goto no_catch;
+ }
+ }
+#else
+ SCRIPT_FIND_CATCH_START(script, pc, pc);
+ if (!pc)
+ goto no_catch;
+#endif
+
+ /* Don't clear cx->throwing to save cx->exception from GC. */
+ len = 0;
+ ok = JS_TRUE;
+ DO_NEXT_OP(len);
+ }
+no_catch:;
+ }
+
+ /*
+ * Check whether control fell off the end of a lightweight function, or an
+ * exception thrown under such a function was not caught by it. If so, go
+ * to the inline code under JSOP_RETURN.
+ */
+ if (inlineCallCount)
+ goto inline_return;
+
+ /*
+ * Reset sp before freeing stack slots, because our caller may GC soon.
+ * Clear spbase to indicate that we've popped the 2 * depth operand slots.
+ * Restore the previous frame's execution state.
+ */
+ if (JS_LIKELY(mark != NULL)) {
+ /* If fp has blocks on its scope chain, home their locals now. */
+ if (fp->flags & JSFRAME_POP_BLOCKS) {
+ SAVE_SP_AND_PC(fp);
+ ok &= PutBlockObjects(cx, fp);
+ }
+
+ fp->sp = fp->spbase;
+ fp->spbase = NULL;
+ js_FreeRawStack(cx, mark);
+ } else {
+ SAVE_SP(fp);
+ }
+
+out2:
+ if (cx->version == currentVersion && currentVersion != originalVersion)
+ js_SetVersion(cx, originalVersion);
+ cx->interpLevel--;
+ return ok;
+
+atom_not_defined:
+ {
+ const char *printable = js_AtomToPrintableString(cx, atom);
+ if (printable)
+ js_ReportIsNotDefined(cx, printable);
+ ok = JS_FALSE;
+ goto out;
+ }
+}
diff --git a/src/third_party/js-1.7/jsinterp.h b/src/third_party/js-1.7/jsinterp.h
new file mode 100644
index 00000000000..ab60b3af78a
--- /dev/null
+++ b/src/third_party/js-1.7/jsinterp.h
@@ -0,0 +1,361 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsinterp_h___
+#define jsinterp_h___
+/*
+ * JS interpreter interface.
+ */
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * JS stack frame, may be allocated on the C stack by native callers. Always
+ * allocated on cx->stackPool for calls from the interpreter to an interpreted
+ * function.
+ *
+ * NB: This struct is manually initialized in jsinterp.c and jsiter.c. If you
+ * add new members, update both files. But first, try to remove members. The
+ * sharp* and xml* members should be moved onto the stack as local variables
+ * with well-known slots, if possible.
+ */
+struct JSStackFrame {
+ JSObject *callobj; /* lazily created Call object */
+ JSObject *argsobj; /* lazily created arguments object */
+ JSObject *varobj; /* variables object, where vars go */
+ JSScript *script; /* script being interpreted */
+ JSFunction *fun; /* function being called or null */
+ JSObject *thisp; /* "this" pointer if in method */
+ uintN argc; /* actual argument count */
+ jsval *argv; /* base of argument stack slots */
+ jsval rval; /* function return value */
+ uintN nvars; /* local variable count */
+ jsval *vars; /* base of variable stack slots */
+ JSStackFrame *down; /* previous frame */
+ void *annotation; /* used by Java security */
+ JSObject *scopeChain; /* scope chain */
+ jsbytecode *pc; /* program counter */
+ jsval *sp; /* stack pointer */
+ jsval *spbase; /* operand stack base */
+ uintN sharpDepth; /* array/object initializer depth */
+ JSObject *sharpArray; /* scope for #n= initializer vars */
+ uint32 flags; /* frame flags -- see below */
+ JSStackFrame *dormantNext; /* next dormant frame chain */
+ JSObject *xmlNamespace; /* null or default xml namespace in E4X */
+ JSObject *blockChain; /* active compile-time block scopes */
+};
+
+typedef struct JSInlineFrame {
+ JSStackFrame frame; /* base struct */
+ jsval *rvp; /* ptr to caller's return value slot */
+ void *mark; /* mark before inline frame */
+ void *hookData; /* debugger call hook data */
+ JSVersion callerVersion; /* dynamic version of calling script */
+} JSInlineFrame;
+
+/* JS stack frame flags. */
+#define JSFRAME_CONSTRUCTING 0x01 /* frame is for a constructor invocation */
+#define JSFRAME_INTERNAL 0x02 /* internal call, not invoked by a script */
+#define JSFRAME_SKIP_CALLER 0x04 /* skip one link when evaluating f.caller
+ for this invocation of f */
+#define JSFRAME_ASSIGNING 0x08 /* a complex (not simplex JOF_ASSIGNING) op
+ is currently assigning to a property */
+#define JSFRAME_DEBUGGER 0x10 /* frame for JS_EvaluateInStackFrame */
+#define JSFRAME_EVAL 0x20 /* frame for obj_eval */
+#define JSFRAME_SPECIAL 0x30 /* special evaluation frame flags */
+#define JSFRAME_COMPILING 0x40 /* frame is being used by compiler */
+#define JSFRAME_COMPILE_N_GO 0x80 /* compiler-and-go mode, can optimize name
+ references based on scope chain */
+#define JSFRAME_SCRIPT_OBJECT 0x100 /* compiling source for a Script object */
+#define JSFRAME_YIELDING 0x200 /* js_Interpret dispatched JSOP_YIELD */
+#define JSFRAME_FILTERING 0x400 /* XML filtering predicate expression */
+#define JSFRAME_ITERATOR 0x800 /* trying to get an iterator for for-in */
+#define JSFRAME_POP_BLOCKS 0x1000 /* scope chain contains blocks to pop */
+#define JSFRAME_GENERATOR 0x2000 /* frame belongs to generator-iterator */
+
+#define JSFRAME_OVERRIDE_SHIFT 24 /* override bit-set params; see jsfun.c */
+#define JSFRAME_OVERRIDE_BITS 8
+
+/*
+ * Property cache for quickened get/set property opcodes.
+ */
+#define PROPERTY_CACHE_LOG2 10
+#define PROPERTY_CACHE_SIZE JS_BIT(PROPERTY_CACHE_LOG2)
+#define PROPERTY_CACHE_MASK JS_BITMASK(PROPERTY_CACHE_LOG2)
+
+#define PROPERTY_CACHE_HASH(obj, id) \
+ ((((jsuword)(obj) >> JSVAL_TAGBITS) ^ (jsuword)(id)) & PROPERTY_CACHE_MASK)
+
+#ifdef JS_THREADSAFE
+
+#if HAVE_ATOMIC_DWORD_ACCESS
+
+#define PCE_LOAD(cache, pce, entry) JS_ATOMIC_DWORD_LOAD(pce, entry)
+#define PCE_STORE(cache, pce, entry) JS_ATOMIC_DWORD_STORE(pce, entry)
+
+#else /* !HAVE_ATOMIC_DWORD_ACCESS */
+
+#define JS_PROPERTY_CACHE_METERING 1
+
+#define PCE_LOAD(cache, pce, entry) \
+ JS_BEGIN_MACRO \
+ uint32 prefills_; \
+ uint32 fills_ = (cache)->fills; \
+ do { \
+ /* Load until cache->fills is stable (see FILL macro below). */ \
+ prefills_ = fills_; \
+ (entry) = *(pce); \
+ } while ((fills_ = (cache)->fills) != prefills_); \
+ JS_END_MACRO
+
+#define PCE_STORE(cache, pce, entry) \
+ JS_BEGIN_MACRO \
+ do { \
+ /* Store until no racing collider stores half or all of pce. */ \
+ *(pce) = (entry); \
+ } while (PCE_OBJECT(*pce) != PCE_OBJECT(entry) || \
+ PCE_PROPERTY(*pce) != PCE_PROPERTY(entry)); \
+ JS_END_MACRO
+
+#endif /* !HAVE_ATOMIC_DWORD_ACCESS */
+
+#else /* !JS_THREADSAFE */
+
+#define PCE_LOAD(cache, pce, entry) ((entry) = *(pce))
+#define PCE_STORE(cache, pce, entry) (*(pce) = (entry))
+
+#endif /* !JS_THREADSAFE */
+
+typedef union JSPropertyCacheEntry {
+ struct {
+ JSObject *object; /* weak link to object */
+ JSScopeProperty *property; /* weak link to property */
+ } s;
+#ifdef HAVE_ATOMIC_DWORD_ACCESS
+ prdword align;
+#endif
+} JSPropertyCacheEntry;
+
+/* These may be called in lvalue or rvalue position. */
+#define PCE_OBJECT(entry) ((entry).s.object)
+#define PCE_PROPERTY(entry) ((entry).s.property)
+
+typedef struct JSPropertyCache {
+ JSPropertyCacheEntry table[PROPERTY_CACHE_SIZE];
+ JSBool empty;
+ JSBool disabled;
+#ifdef JS_PROPERTY_CACHE_METERING
+ uint32 fills;
+ uint32 recycles;
+ uint32 tests;
+ uint32 misses;
+ uint32 flushes;
+# define PCMETER(x) x
+#else
+# define PCMETER(x) /* nothing */
+#endif
+} JSPropertyCache;
+
+#define PROPERTY_CACHE_FILL(cache, obj, id, sprop) \
+ JS_BEGIN_MACRO \
+ JSPropertyCache *cache_ = (cache); \
+ if (!cache_->disabled) { \
+ uintN hashIndex_ = (uintN) PROPERTY_CACHE_HASH(obj, id); \
+ JSPropertyCacheEntry *pce_ = &cache_->table[hashIndex_]; \
+ JSPropertyCacheEntry entry_; \
+ JSScopeProperty *pce_sprop_; \
+ PCE_LOAD(cache_, pce_, entry_); \
+ pce_sprop_ = PCE_PROPERTY(entry_); \
+ PCMETER(if (pce_sprop_ && pce_sprop_ != sprop) \
+ cache_->recycles++); \
+ PCE_OBJECT(entry_) = obj; \
+ PCE_PROPERTY(entry_) = sprop; \
+ cache_->empty = JS_FALSE; \
+ PCMETER(cache_->fills++); \
+ PCE_STORE(cache_, pce_, entry_); \
+ } \
+ JS_END_MACRO
+
+#define PROPERTY_CACHE_TEST(cache, obj, id, sprop) \
+ JS_BEGIN_MACRO \
+ uintN hashIndex_ = (uintN) PROPERTY_CACHE_HASH(obj, id); \
+ JSPropertyCache *cache_ = (cache); \
+ JSPropertyCacheEntry *pce_ = &cache_->table[hashIndex_]; \
+ JSPropertyCacheEntry entry_; \
+ JSScopeProperty *pce_sprop_; \
+ PCE_LOAD(cache_, pce_, entry_); \
+ pce_sprop_ = PCE_PROPERTY(entry_); \
+ PCMETER(cache_->tests++); \
+ if (pce_sprop_ && \
+ PCE_OBJECT(entry_) == obj && \
+ pce_sprop_->id == id) { \
+ sprop = pce_sprop_; \
+ } else { \
+ PCMETER(cache_->misses++); \
+ sprop = NULL; \
+ } \
+ JS_END_MACRO
+
+extern void
+js_FlushPropertyCache(JSContext *cx);
+
+extern void
+js_DisablePropertyCache(JSContext *cx);
+
+extern void
+js_EnablePropertyCache(JSContext *cx);
+
+extern JS_FRIEND_API(jsval *)
+js_AllocStack(JSContext *cx, uintN nslots, void **markp);
+
+extern JS_FRIEND_API(void)
+js_FreeStack(JSContext *cx, void *mark);
+
+extern JSBool
+js_GetArgument(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JSBool
+js_SetArgument(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JSBool
+js_GetLocalVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JSBool
+js_SetLocalVariable(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+#ifdef DUMP_CALL_TABLE
+# define JSOPTION_LOGCALL_TOSOURCE JS_BIT(15)
+
+extern JSHashTable *js_CallTable;
+extern size_t js_LogCallToSourceLimit;
+
+extern void js_DumpCallTable(JSContext *cx);
+#endif
+
+/*
+ * Refresh and return fp->scopeChain. It may be stale if block scopes are
+ * active but not yet reflected by objects in the scope chain. If a block
+ * scope contains a with, eval, XML filtering predicate, or similar such
+ * dynamically scoped construct, then compile-time block scope at fp->blocks
+ * must reflect at runtime.
+ */
+extern JSObject *
+js_GetScopeChain(JSContext *cx, JSStackFrame *fp);
+
+/*
+ * Compute the 'this' parameter for a call with nominal 'this' given by thisp
+ * and arguments including argv[-1] (nominal 'this') and argv[-2] (callee).
+ * Activation objects ("Call" objects not created with "new Call()", i.e.,
+ * "Call" objects that have private data) may not be referred to by 'this',
+ * per ECMA-262, so js_ComputeThis censors them.
+ */
+extern JSObject *
+js_ComputeThis(JSContext *cx, JSObject *thisp, jsval *argv);
+
+/*
+ * NB: js_Invoke requires that cx is currently running JS (i.e., that cx->fp
+ * is non-null), and that the callee, |this| parameter, and actual arguments
+ * are already pushed on the stack under cx->fp->sp.
+ */
+extern JS_FRIEND_API(JSBool)
+js_Invoke(JSContext *cx, uintN argc, uintN flags);
+
+/*
+ * Consolidated js_Invoke flags simply rename certain JSFRAME_* flags, so that
+ * we can share bits stored in JSStackFrame.flags and passed to:
+ *
+ * js_Invoke
+ * js_InternalInvoke
+ * js_ValueToFunction
+ * js_ValueToFunctionObject
+ * js_ValueToCallableObject
+ * js_ReportIsNotFunction
+ *
+ * See jsfun.h for the latter four and flag renaming macros.
+ */
+#define JSINVOKE_CONSTRUCT JSFRAME_CONSTRUCTING
+#define JSINVOKE_INTERNAL JSFRAME_INTERNAL
+#define JSINVOKE_SKIP_CALLER JSFRAME_SKIP_CALLER
+#define JSINVOKE_ITERATOR JSFRAME_ITERATOR
+
+/*
+ * Mask to isolate construct and iterator flags for use with jsfun.h functions.
+ */
+#define JSINVOKE_FUNFLAGS (JSINVOKE_CONSTRUCT | JSINVOKE_ITERATOR)
+
+/*
+ * "Internal" calls may come from C or C++ code using a JSContext on which no
+ * JS is running (!cx->fp), so they may need to push a dummy JSStackFrame.
+ */
+#define js_InternalCall(cx,obj,fval,argc,argv,rval) \
+ js_InternalInvoke(cx, obj, fval, 0, argc, argv, rval)
+
+#define js_InternalConstruct(cx,obj,fval,argc,argv,rval) \
+ js_InternalInvoke(cx, obj, fval, JSINVOKE_CONSTRUCT, argc, argv, rval)
+
+extern JSBool
+js_InternalInvoke(JSContext *cx, JSObject *obj, jsval fval, uintN flags,
+ uintN argc, jsval *argv, jsval *rval);
+
+extern JSBool
+js_InternalGetOrSet(JSContext *cx, JSObject *obj, jsid id, jsval fval,
+ JSAccessMode mode, uintN argc, jsval *argv, jsval *rval);
+
+extern JSBool
+js_Execute(JSContext *cx, JSObject *chain, JSScript *script,
+ JSStackFrame *down, uintN flags, jsval *result);
+
+extern JSBool
+js_CheckRedeclaration(JSContext *cx, JSObject *obj, jsid id, uintN attrs,
+ JSObject **objp, JSProperty **propp);
+
+extern JSBool
+js_StrictlyEqual(jsval lval, jsval rval);
+
+extern JSBool
+js_InvokeConstructor(JSContext *cx, jsval *vp, uintN argc);
+
+extern JSBool
+js_Interpret(JSContext *cx, jsbytecode *pc, jsval *result);
+
+JS_END_EXTERN_C
+
+#endif /* jsinterp_h___ */
diff --git a/src/third_party/js-1.7/jsiter.c b/src/third_party/js-1.7/jsiter.c
new file mode 100644
index 00000000000..0a4de54214f
--- /dev/null
+++ b/src/third_party/js-1.7/jsiter.c
@@ -0,0 +1,1080 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JavaScript iterators.
+ */
+#include "jsstddef.h"
+#include <string.h> /* for memcpy */
+#include "jstypes.h"
+#include "jsutil.h"
+#include "jsarena.h"
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsexn.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jsiter.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsscope.h"
+#include "jsscript.h"
+
+#if JS_HAS_XML_SUPPORT
+#include "jsxml.h"
+#endif
+
+extern const char js_throw_str[]; /* from jsscan.h */
+
+#define JSSLOT_ITER_STATE (JSSLOT_PRIVATE)
+#define JSSLOT_ITER_FLAGS (JSSLOT_PRIVATE + 1)
+
+#if JSSLOT_ITER_FLAGS >= JS_INITIAL_NSLOTS
+#error JS_INITIAL_NSLOTS must be greater than JSSLOT_ITER_FLAGS.
+#endif
+
+/*
+ * Shared code to close iterator's state either through an explicit call or
+ * when GC detects that the iterator is no longer reachable.
+ */
+void
+js_CloseIteratorState(JSContext *cx, JSObject *iterobj)
+{
+ jsval *slots;
+ jsval state, parent;
+ JSObject *iterable;
+
+ JS_ASSERT(JS_InstanceOf(cx, iterobj, &js_IteratorClass, NULL));
+ slots = iterobj->slots;
+
+ /* Avoid double work if js_CloseNativeIterator was called on obj. */
+ state = slots[JSSLOT_ITER_STATE];
+ if (JSVAL_IS_NULL(state))
+ return;
+
+ /* Protect against failure to fully initialize obj. */
+ parent = slots[JSSLOT_PARENT];
+ if (!JSVAL_IS_PRIMITIVE(parent)) {
+ iterable = JSVAL_TO_OBJECT(parent);
+#if JS_HAS_XML_SUPPORT
+ if ((JSVAL_TO_INT(slots[JSSLOT_ITER_FLAGS]) & JSITER_FOREACH) &&
+ OBJECT_IS_XML(cx, iterable)) {
+ ((JSXMLObjectOps *) iterable->map->ops)->
+ enumerateValues(cx, iterable, JSENUMERATE_DESTROY, &state,
+ NULL, NULL);
+ } else
+#endif
+ OBJ_ENUMERATE(cx, iterable, JSENUMERATE_DESTROY, &state, NULL);
+ }
+ slots[JSSLOT_ITER_STATE] = JSVAL_NULL;
+}
+
+JSClass js_IteratorClass = {
+ "Iterator",
+ JSCLASS_HAS_RESERVED_SLOTS(2) | /* slots for state and flags */
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Iterator),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+static JSBool
+InitNativeIterator(JSContext *cx, JSObject *iterobj, JSObject *obj, uintN flags)
+{
+ jsval state;
+ JSBool ok;
+
+ JS_ASSERT(JSVAL_TO_PRIVATE(iterobj->slots[JSSLOT_CLASS]) ==
+ &js_IteratorClass);
+
+ /* Initialize iterobj in case of enumerate hook failure. */
+ iterobj->slots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(obj);
+ iterobj->slots[JSSLOT_ITER_STATE] = JSVAL_NULL;
+ iterobj->slots[JSSLOT_ITER_FLAGS] = INT_TO_JSVAL(flags);
+ if (!js_RegisterCloseableIterator(cx, iterobj))
+ return JS_FALSE;
+ if (!obj)
+ return JS_TRUE;
+
+ ok =
+#if JS_HAS_XML_SUPPORT
+ ((flags & JSITER_FOREACH) && OBJECT_IS_XML(cx, obj))
+ ? ((JSXMLObjectOps *) obj->map->ops)->
+ enumerateValues(cx, obj, JSENUMERATE_INIT, &state, NULL, NULL)
+ :
+#endif
+ OBJ_ENUMERATE(cx, obj, JSENUMERATE_INIT, &state, NULL);
+ if (!ok)
+ return JS_FALSE;
+
+ iterobj->slots[JSSLOT_ITER_STATE] = state;
+ if (flags & JSITER_ENUMERATE) {
+ /*
+ * The enumerating iterator needs the original object to suppress
+ * enumeration of deleted or shadowed prototype properties. Since the
+ * enumerator never escapes to scripts, we use the prototype slot to
+ * store the original object.
+ */
+ JS_ASSERT(obj != iterobj);
+ iterobj->slots[JSSLOT_PROTO] = OBJECT_TO_JSVAL(obj);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+Iterator(JSContext *cx, JSObject *iterobj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSBool keyonly;
+ uintN flags;
+ JSObject *obj;
+
+ keyonly = JS_FALSE;
+ if (!js_ValueToBoolean(cx, argv[1], &keyonly))
+ return JS_FALSE;
+ flags = keyonly ? 0 : JSITER_FOREACH;
+
+ if (cx->fp->flags & JSFRAME_CONSTRUCTING) {
+ /* XXX work around old valueOf call hidden beneath js_ValueToObject */
+ if (!JSVAL_IS_PRIMITIVE(argv[0])) {
+ obj = JSVAL_TO_OBJECT(argv[0]);
+ } else {
+ obj = js_ValueToNonNullObject(cx, argv[0]);
+ if (!obj)
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(obj);
+ }
+ return InitNativeIterator(cx, iterobj, obj, flags);
+ }
+
+ *rval = argv[0];
+ return js_ValueToIterator(cx, flags, rval);
+}
+
+static JSBool
+NewKeyValuePair(JSContext *cx, jsid key, jsval val, jsval *rval)
+{
+ jsval vec[2];
+ JSTempValueRooter tvr;
+ JSObject *aobj;
+
+ vec[0] = ID_TO_VALUE(key);
+ vec[1] = val;
+
+ JS_PUSH_TEMP_ROOT(cx, 2, vec, &tvr);
+ aobj = js_NewArrayObject(cx, 2, vec);
+ *rval = OBJECT_TO_JSVAL(aobj);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+
+ return aobj != NULL;
+}
+
+static JSBool
+IteratorNextImpl(JSContext *cx, JSObject *obj, jsval *rval)
+{
+ JSObject *iterable;
+ jsval state;
+ uintN flags;
+ JSBool foreach, ok;
+ jsid id;
+
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_IteratorClass);
+
+ iterable = OBJ_GET_PARENT(cx, obj);
+ JS_ASSERT(iterable);
+ state = OBJ_GET_SLOT(cx, obj, JSSLOT_ITER_STATE);
+ if (JSVAL_IS_NULL(state))
+ goto stop;
+
+ flags = JSVAL_TO_INT(OBJ_GET_SLOT(cx, obj, JSSLOT_ITER_FLAGS));
+ JS_ASSERT(!(flags & JSITER_ENUMERATE));
+ foreach = (flags & JSITER_FOREACH) != 0;
+ ok =
+#if JS_HAS_XML_SUPPORT
+ (foreach && OBJECT_IS_XML(cx, iterable))
+ ? ((JSXMLObjectOps *) iterable->map->ops)->
+ enumerateValues(cx, iterable, JSENUMERATE_NEXT, &state,
+ &id, rval)
+ :
+#endif
+ OBJ_ENUMERATE(cx, iterable, JSENUMERATE_NEXT, &state, &id);
+ if (!ok)
+ return JS_FALSE;
+
+ OBJ_SET_SLOT(cx, obj, JSSLOT_ITER_STATE, state);
+ if (JSVAL_IS_NULL(state))
+ goto stop;
+
+ if (foreach) {
+#if JS_HAS_XML_SUPPORT
+ if (!OBJECT_IS_XML(cx, iterable) &&
+ !OBJ_GET_PROPERTY(cx, iterable, id, rval)) {
+ return JS_FALSE;
+ }
+#endif
+ if (!NewKeyValuePair(cx, id, *rval, rval))
+ return JS_FALSE;
+ } else {
+ *rval = ID_TO_VALUE(id);
+ }
+ return JS_TRUE;
+
+ stop:
+ JS_ASSERT(OBJ_GET_SLOT(cx, obj, JSSLOT_ITER_STATE) == JSVAL_NULL);
+ *rval = JSVAL_HOLE;
+ return JS_TRUE;
+}
+
+static JSBool
+js_ThrowStopIteration(JSContext *cx, JSObject *obj)
+{
+ jsval v;
+
+ JS_ASSERT(!JS_IsExceptionPending(cx));
+ if (js_FindClassObject(cx, NULL, INT_TO_JSID(JSProto_StopIteration), &v))
+ JS_SetPendingException(cx, v);
+ return JS_FALSE;
+}
+
+static JSBool
+iterator_next(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ if (!JS_InstanceOf(cx, obj, &js_IteratorClass, argv))
+ return JS_FALSE;
+
+ if (!IteratorNextImpl(cx, obj, rval))
+ return JS_FALSE;
+
+ if (*rval == JSVAL_HOLE) {
+ *rval = JSVAL_NULL;
+ js_ThrowStopIteration(cx, obj);
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+iterator_self(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+static JSFunctionSpec iterator_methods[] = {
+ {js_iterator_str, iterator_self, 0,JSPROP_READONLY|JSPROP_PERMANENT,0},
+ {js_next_str, iterator_next, 0,JSPROP_READONLY|JSPROP_PERMANENT,0},
+ {0,0,0,0,0}
+};
+
+uintN
+js_GetNativeIteratorFlags(JSContext *cx, JSObject *iterobj)
+{
+ if (OBJ_GET_CLASS(cx, iterobj) != &js_IteratorClass)
+ return 0;
+ return JSVAL_TO_INT(OBJ_GET_SLOT(cx, iterobj, JSSLOT_ITER_FLAGS));
+}
+
+void
+js_CloseNativeIterator(JSContext *cx, JSObject *iterobj)
+{
+ uintN flags;
+
+ /*
+ * If this iterator is not an instance of the native default iterator
+ * class, leave it to be GC'ed.
+ */
+ if (!JS_InstanceOf(cx, iterobj, &js_IteratorClass, NULL))
+ return;
+
+ /*
+ * If this iterator was not created by js_ValueToIterator called from the
+ * for-in loop code in js_Interpret, leave it to be GC'ed.
+ */
+ flags = JSVAL_TO_INT(OBJ_GET_SLOT(cx, iterobj, JSSLOT_ITER_FLAGS));
+ if (!(flags & JSITER_ENUMERATE))
+ return;
+
+ js_CloseIteratorState(cx, iterobj);
+}
+
+/*
+ * Call ToObject(v).__iterator__(keyonly) if ToObject(v).__iterator__ exists.
+ * Otherwise construct the defualt iterator.
+ */
+JSBool
+js_ValueToIterator(JSContext *cx, uintN flags, jsval *vp)
+{
+ JSObject *obj;
+ JSTempValueRooter tvr;
+ const JSAtom *atom;
+ JSBool ok;
+ JSObject *iterobj;
+ jsval arg;
+ JSString *str;
+
+ JS_ASSERT(!(flags & ~(JSITER_ENUMERATE |
+ JSITER_FOREACH |
+ JSITER_KEYVALUE)));
+
+ /* JSITER_KEYVALUE must always come with JSITER_FOREACH */
+ JS_ASSERT(!(flags & JSITER_KEYVALUE) || (flags & JSITER_FOREACH));
+
+ /* XXX work around old valueOf call hidden beneath js_ValueToObject */
+ if (!JSVAL_IS_PRIMITIVE(*vp)) {
+ obj = JSVAL_TO_OBJECT(*vp);
+ } else {
+ /*
+ * Enumerating over null and undefined gives an empty enumerator.
+ * This is contrary to ECMA-262 9.9 ToObject, invoked from step 3 of
+ * the first production in 12.6.4 and step 4 of the second production,
+ * but it's "web JS" compatible.
+ */
+ if ((flags & JSITER_ENUMERATE)) {
+ if (!js_ValueToObject(cx, *vp, &obj))
+ return JS_FALSE;
+ if (!obj)
+ goto default_iter;
+ } else {
+ obj = js_ValueToNonNullObject(cx, *vp);
+ if (!obj)
+ return JS_FALSE;
+ }
+ }
+
+ JS_ASSERT(obj);
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, obj, &tvr);
+
+ atom = cx->runtime->atomState.iteratorAtom;
+#if JS_HAS_XML_SUPPORT
+ if (OBJECT_IS_XML(cx, obj)) {
+ if (!js_GetXMLFunction(cx, obj, ATOM_TO_JSID(atom), vp))
+ goto bad;
+ } else
+#endif
+ {
+ if (!OBJ_GET_PROPERTY(cx, obj, ATOM_TO_JSID(atom), vp))
+ goto bad;
+ }
+
+ if (JSVAL_IS_VOID(*vp)) {
+ default_iter:
+ /*
+ * Fail over to the default enumerating native iterator.
+ *
+ * Create iterobj with a NULL parent to ensure that we use the correct
+ * scope chain to lookup the iterator's constructor. Since we use the
+ * parent slot to keep track of the iterable, we must fix it up after.
+ */
+ iterobj = js_NewObject(cx, &js_IteratorClass, NULL, NULL);
+ if (!iterobj)
+ goto bad;
+
+ /* Store iterobj in *vp to protect it from GC (callers must root vp). */
+ *vp = OBJECT_TO_JSVAL(iterobj);
+
+ if (!InitNativeIterator(cx, iterobj, obj, flags))
+ goto bad;
+ } else {
+ arg = BOOLEAN_TO_JSVAL((flags & JSITER_FOREACH) == 0);
+ if (!js_InternalInvoke(cx, obj, *vp, JSINVOKE_ITERATOR, 1, &arg, vp))
+ goto bad;
+ if (JSVAL_IS_PRIMITIVE(*vp)) {
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK, *vp, NULL);
+ if (str) {
+ JS_ReportErrorNumberUC(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_ITERATOR_RETURN,
+ JSSTRING_CHARS(str),
+ JSSTRING_CHARS(ATOM_TO_STRING(atom)));
+ }
+ goto bad;
+ }
+ }
+
+ ok = JS_TRUE;
+ out:
+ if (obj)
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+ bad:
+ ok = JS_FALSE;
+ goto out;
+}
+
+static JSBool
+CallEnumeratorNext(JSContext *cx, JSObject *iterobj, uintN flags, jsval *rval)
+{
+ JSObject *obj, *origobj;
+ jsval state;
+ JSBool foreach;
+ jsid id;
+ JSObject *obj2;
+ JSBool cond;
+ JSClass *clasp;
+ JSExtendedClass *xclasp;
+ JSProperty *prop;
+ JSString *str;
+
+ JS_ASSERT(flags & JSITER_ENUMERATE);
+ JS_ASSERT(JSVAL_TO_PRIVATE(iterobj->slots[JSSLOT_CLASS]) ==
+ &js_IteratorClass);
+
+ obj = JSVAL_TO_OBJECT(iterobj->slots[JSSLOT_PARENT]);
+ origobj = JSVAL_TO_OBJECT(iterobj->slots[JSSLOT_PROTO]);
+ state = iterobj->slots[JSSLOT_ITER_STATE];
+ if (JSVAL_IS_NULL(state))
+ goto stop;
+
+ foreach = (flags & JSITER_FOREACH) != 0;
+#if JS_HAS_XML_SUPPORT
+ /*
+ * Treat an XML object specially only when it starts the prototype chain.
+ * Otherwise we need to do the usual deleted and shadowed property checks.
+ */
+ if (obj == origobj && OBJECT_IS_XML(cx, obj)) {
+ if (foreach) {
+ JSXMLObjectOps *xmlops = (JSXMLObjectOps *) obj->map->ops;
+
+ if (!xmlops->enumerateValues(cx, obj, JSENUMERATE_NEXT, &state,
+ &id, rval)) {
+ return JS_FALSE;
+ }
+ } else {
+ if (!OBJ_ENUMERATE(cx, obj, JSENUMERATE_NEXT, &state, &id))
+ return JS_FALSE;
+ }
+ iterobj->slots[JSSLOT_ITER_STATE] = state;
+ if (JSVAL_IS_NULL(state))
+ goto stop;
+ } else
+#endif
+ {
+ restart:
+ if (!OBJ_ENUMERATE(cx, obj, JSENUMERATE_NEXT, &state, &id))
+ return JS_TRUE;
+
+ iterobj->slots[JSSLOT_ITER_STATE] = state;
+ if (JSVAL_IS_NULL(state)) {
+#if JS_HAS_XML_SUPPORT
+ if (OBJECT_IS_XML(cx, obj)) {
+ /*
+ * We just finished enumerating an XML obj that is present on
+ * the prototype chain of a non-XML origobj. Stop further
+ * prototype chain searches because XML objects don't
+ * enumerate prototypes.
+ */
+ JS_ASSERT(origobj != obj);
+ JS_ASSERT(!OBJECT_IS_XML(cx, origobj));
+ } else
+#endif
+ {
+ obj = OBJ_GET_PROTO(cx, obj);
+ if (obj) {
+ iterobj->slots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(obj);
+ if (!OBJ_ENUMERATE(cx, obj, JSENUMERATE_INIT, &state, NULL))
+ return JS_FALSE;
+ iterobj->slots[JSSLOT_ITER_STATE] = state;
+ if (!JSVAL_IS_NULL(state))
+ goto restart;
+ }
+ }
+ goto stop;
+ }
+
+ /* Skip properties not in obj when looking from origobj. */
+ if (!OBJ_LOOKUP_PROPERTY(cx, origobj, id, &obj2, &prop))
+ return JS_FALSE;
+ if (!prop)
+ goto restart;
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+
+ /*
+ * If the id was found in a prototype object or an unrelated object
+ * (specifically, not in an inner object for obj), skip it. This step
+ * means that all OBJ_LOOKUP_PROPERTY implementations must return an
+ * object further along on the prototype chain, or else possibly an
+ * object returned by the JSExtendedClass.outerObject optional hook.
+ */
+ if (obj != obj2) {
+ cond = JS_FALSE;
+ clasp = OBJ_GET_CLASS(cx, obj2);
+ if (clasp->flags & JSCLASS_IS_EXTENDED) {
+ xclasp = (JSExtendedClass *) clasp;
+ cond = xclasp->outerObject &&
+ xclasp->outerObject(cx, obj2) == obj;
+ }
+ if (!cond)
+ goto restart;
+ }
+
+ if (foreach) {
+ /* Get property querying the original object. */
+ if (!OBJ_GET_PROPERTY(cx, origobj, id, rval))
+ return JS_FALSE;
+ }
+ }
+
+ if (foreach) {
+ if (flags & JSITER_KEYVALUE) {
+ if (!NewKeyValuePair(cx, id, *rval, rval))
+ return JS_FALSE;
+ }
+ } else {
+ /* Make rval a string for uniformity and compatibility. */
+ if (JSID_IS_ATOM(id)) {
+ *rval = ATOM_KEY(JSID_TO_ATOM(id));
+ }
+#if JS_HAS_XML_SUPPORT
+ else if (JSID_IS_OBJECT(id)) {
+ str = js_ValueToString(cx, OBJECT_JSID_TO_JSVAL(id));
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ }
+#endif
+ else {
+ str = js_NumberToString(cx, (jsdouble)JSID_TO_INT(id));
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ }
+ }
+ return JS_TRUE;
+
+ stop:
+ JS_ASSERT(iterobj->slots[JSSLOT_ITER_STATE] == JSVAL_NULL);
+ *rval = JSVAL_HOLE;
+ return JS_TRUE;
+}
+
+JSBool
+js_CallIteratorNext(JSContext *cx, JSObject *iterobj, jsval *rval)
+{
+ uintN flags;
+
+ /* Fast path for native iterators */
+ if (OBJ_GET_CLASS(cx, iterobj) == &js_IteratorClass) {
+ flags = JSVAL_TO_INT(OBJ_GET_SLOT(cx, iterobj, JSSLOT_ITER_FLAGS));
+ if (flags & JSITER_ENUMERATE)
+ return CallEnumeratorNext(cx, iterobj, flags, rval);
+
+ /*
+ * Call next directly as all the methods of the native iterator are
+ * read-only and permanent.
+ */
+ if (!IteratorNextImpl(cx, iterobj, rval))
+ return JS_FALSE;
+ } else {
+ jsid id = ATOM_TO_JSID(cx->runtime->atomState.nextAtom);
+
+ if (!JS_GetMethodById(cx, iterobj, id, &iterobj, rval))
+ return JS_FALSE;
+ if (!js_InternalCall(cx, iterobj, *rval, 0, NULL, rval)) {
+ /* Check for StopIteration. */
+ if (!cx->throwing ||
+ JSVAL_IS_PRIMITIVE(cx->exception) ||
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(cx->exception))
+ != &js_StopIterationClass) {
+ return JS_FALSE;
+ }
+
+ /* Inline JS_ClearPendingException(cx). */
+ cx->throwing = JS_FALSE;
+ cx->exception = JSVAL_VOID;
+ *rval = JSVAL_HOLE;
+ return JS_TRUE;
+ }
+ }
+
+ return JS_TRUE;
+}
+
+static JSBool
+stopiter_hasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ *bp = !JSVAL_IS_PRIMITIVE(v) &&
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(v)) == &js_StopIterationClass;
+ return JS_TRUE;
+}
+
+JSClass js_StopIterationClass = {
+ js_StopIteration_str,
+ JSCLASS_HAS_CACHED_PROTO(JSProto_StopIteration),
+ JS_PropertyStub, JS_PropertyStub,
+ JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub,
+ JS_ConvertStub, JS_FinalizeStub,
+ NULL, NULL,
+ NULL, NULL,
+ NULL, stopiter_hasInstance,
+ NULL, NULL
+};
+
+#if JS_HAS_GENERATORS
+
+static void
+generator_finalize(JSContext *cx, JSObject *obj)
+{
+ JSGenerator *gen;
+
+ gen = (JSGenerator *) JS_GetPrivate(cx, obj);
+ if (gen) {
+ /*
+ * gen can be open on shutdown when close hooks are ignored or when
+ * the embedding cancels scheduled close hooks.
+ */
+ JS_ASSERT(gen->state == JSGEN_NEWBORN || gen->state == JSGEN_CLOSED ||
+ gen->state == JSGEN_OPEN);
+ JS_free(cx, gen);
+ }
+}
+
+static uint32
+generator_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSGenerator *gen;
+
+ gen = (JSGenerator *) JS_GetPrivate(cx, obj);
+ if (gen) {
+ /*
+ * We must mark argv[-2], as js_MarkStackFrame will not. Note that
+ * js_MarkStackFrame will mark thisp (argv[-1]) and actual arguments,
+ * plus any missing formals and local GC roots.
+ */
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(gen->frame.argv[-2]));
+ GC_MARK(cx, JSVAL_TO_GCTHING(gen->frame.argv[-2]), "generator");
+ js_MarkStackFrame(cx, &gen->frame);
+ }
+ return 0;
+}
+
+JSClass js_GeneratorClass = {
+ js_Generator_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_IS_ANONYMOUS |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Generator),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, generator_finalize,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, generator_mark, NULL
+};
+
+/*
+ * Called from the JSOP_GENERATOR case in the interpreter, with fp referring
+ * to the frame by which the generator function was activated. Create a new
+ * JSGenerator object, which contains its own JSStackFrame that we populate
+ * from *fp. We know that upon return, the JSOP_GENERATOR opcode will return
+ * from the activation in fp, so we can steal away fp->callobj and fp->argsobj
+ * if they are non-null.
+ */
+JSObject *
+js_NewGenerator(JSContext *cx, JSStackFrame *fp)
+{
+ JSObject *obj;
+ uintN argc, nargs, nvars, depth, nslots;
+ JSGenerator *gen;
+ jsval *newsp;
+
+ /* After the following return, failing control flow must goto bad. */
+ obj = js_NewObject(cx, &js_GeneratorClass, NULL, NULL);
+ if (!obj)
+ return NULL;
+
+ /* Load and compute stack slot counts. */
+ argc = fp->argc;
+ nargs = JS_MAX(argc, fp->fun->nargs);
+ nvars = fp->nvars;
+ depth = fp->script->depth;
+ nslots = 2 + nargs + nvars + 2 * depth;
+
+ /* Allocate obj's private data struct. */
+ gen = (JSGenerator *)
+ JS_malloc(cx, sizeof(JSGenerator) + (nslots - 1) * sizeof(jsval));
+ if (!gen)
+ goto bad;
+
+ gen->obj = obj;
+
+ /* Steal away objects reflecting fp and point them at gen->frame. */
+ gen->frame.callobj = fp->callobj;
+ if (fp->callobj) {
+ JS_SetPrivate(cx, fp->callobj, &gen->frame);
+ fp->callobj = NULL;
+ }
+ gen->frame.argsobj = fp->argsobj;
+ if (fp->argsobj) {
+ JS_SetPrivate(cx, fp->argsobj, &gen->frame);
+ fp->argsobj = NULL;
+ }
+
+ /* These two references can be shared with fp until it goes away. */
+ gen->frame.varobj = fp->varobj;
+ gen->frame.thisp = fp->thisp;
+
+ /* Copy call-invariant script and function references. */
+ gen->frame.script = fp->script;
+ gen->frame.fun = fp->fun;
+
+ /* Use newsp to carve space out of gen->stack. */
+ newsp = gen->stack;
+ gen->arena.next = NULL;
+ gen->arena.base = (jsuword) newsp;
+ gen->arena.limit = gen->arena.avail = (jsuword) (newsp + nslots);
+
+#define COPY_STACK_ARRAY(vec,cnt,num) \
+ JS_BEGIN_MACRO \
+ gen->frame.cnt = cnt; \
+ gen->frame.vec = newsp; \
+ newsp += (num); \
+ memcpy(gen->frame.vec, fp->vec, (num) * sizeof(jsval)); \
+ JS_END_MACRO
+
+ /* Copy argv, rval, and vars. */
+ *newsp++ = fp->argv[-2];
+ *newsp++ = fp->argv[-1];
+ COPY_STACK_ARRAY(argv, argc, nargs);
+ gen->frame.rval = fp->rval;
+ COPY_STACK_ARRAY(vars, nvars, nvars);
+
+#undef COPY_STACK_ARRAY
+
+ /* Initialize or copy virtual machine state. */
+ gen->frame.down = NULL;
+ gen->frame.annotation = NULL;
+ gen->frame.scopeChain = fp->scopeChain;
+ gen->frame.pc = fp->pc;
+
+ /* Allocate generating pc and operand stack space. */
+ gen->frame.spbase = gen->frame.sp = newsp + depth;
+
+ /* Copy remaining state (XXX sharp* and xml* should be local vars). */
+ gen->frame.sharpDepth = 0;
+ gen->frame.sharpArray = NULL;
+ gen->frame.flags = fp->flags | JSFRAME_GENERATOR;
+ gen->frame.dormantNext = NULL;
+ gen->frame.xmlNamespace = NULL;
+ gen->frame.blockChain = NULL;
+
+ /* Note that gen is newborn. */
+ gen->state = JSGEN_NEWBORN;
+
+ if (!JS_SetPrivate(cx, obj, gen)) {
+ JS_free(cx, gen);
+ goto bad;
+ }
+
+ /*
+ * Register with GC to ensure that suspended finally blocks will be
+ * executed.
+ */
+ js_RegisterGenerator(cx, gen);
+ return obj;
+
+ bad:
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+}
+
+typedef enum JSGeneratorOp {
+ JSGENOP_NEXT,
+ JSGENOP_SEND,
+ JSGENOP_THROW,
+ JSGENOP_CLOSE
+} JSGeneratorOp;
+
+/*
+ * Start newborn or restart yielding generator and perform the requested
+ * operation inside its frame.
+ */
+static JSBool
+SendToGenerator(JSContext *cx, JSGeneratorOp op, JSObject *obj,
+ JSGenerator *gen, jsval arg, jsval *rval)
+{
+ JSStackFrame *fp;
+ jsval junk;
+ JSArena *arena;
+ JSBool ok;
+
+ JS_ASSERT(gen->state == JSGEN_NEWBORN || gen->state == JSGEN_OPEN);
+ switch (op) {
+ case JSGENOP_NEXT:
+ case JSGENOP_SEND:
+ if (gen->state == JSGEN_OPEN) {
+ /*
+ * Store the argument to send as the result of the yield
+ * expression.
+ */
+ gen->frame.sp[-1] = arg;
+ }
+ gen->state = JSGEN_RUNNING;
+ break;
+
+ case JSGENOP_THROW:
+ JS_SetPendingException(cx, arg);
+ gen->state = JSGEN_RUNNING;
+ break;
+
+ default:
+ JS_ASSERT(op == JSGENOP_CLOSE);
+ JS_SetPendingException(cx, JSVAL_ARETURN);
+ gen->state = JSGEN_CLOSING;
+ break;
+ }
+
+ /* Extend the current stack pool with gen->arena. */
+ arena = cx->stackPool.current;
+ JS_ASSERT(!arena->next);
+ JS_ASSERT(!gen->arena.next);
+ JS_ASSERT(cx->stackPool.current != &gen->arena);
+ cx->stackPool.current = arena->next = &gen->arena;
+
+ /* Push gen->frame around the interpreter activation. */
+ fp = cx->fp;
+ cx->fp = &gen->frame;
+ gen->frame.down = fp;
+ ok = js_Interpret(cx, gen->frame.pc, &junk);
+ cx->fp = fp;
+ gen->frame.down = NULL;
+
+ /* Retract the stack pool and sanitize gen->arena. */
+ JS_ASSERT(!gen->arena.next);
+ JS_ASSERT(arena->next == &gen->arena);
+ JS_ASSERT(cx->stackPool.current == &gen->arena);
+ cx->stackPool.current = arena;
+ arena->next = NULL;
+
+ if (gen->frame.flags & JSFRAME_YIELDING) {
+ /* Yield cannot fail, throw or be called on closing. */
+ JS_ASSERT(ok);
+ JS_ASSERT(!cx->throwing);
+ JS_ASSERT(gen->state == JSGEN_RUNNING);
+ JS_ASSERT(op != JSGENOP_CLOSE);
+ gen->frame.flags &= ~JSFRAME_YIELDING;
+ gen->state = JSGEN_OPEN;
+ *rval = gen->frame.rval;
+ return JS_TRUE;
+ }
+
+ gen->state = JSGEN_CLOSED;
+
+ if (ok) {
+ /* Returned, explicitly or by falling off the end. */
+ if (op == JSGENOP_CLOSE)
+ return JS_TRUE;
+ return js_ThrowStopIteration(cx, obj);
+ }
+
+ /*
+ * An error, silent termination by branch callback or an exception.
+ * Propagate the condition to the caller.
+ */
+ return JS_FALSE;
+}
+
+/*
+ * Execute gen's close hook after the GC detects that the object has become
+ * unreachable.
+ */
+JSBool
+js_CloseGeneratorObject(JSContext *cx, JSGenerator *gen)
+{
+ /* We pass null as rval since SendToGenerator never uses it with CLOSE. */
+ return SendToGenerator(cx, JSGENOP_CLOSE, gen->obj, gen, JSVAL_VOID, NULL);
+}
+
+/*
+ * Common subroutine of generator_(next|send|throw|close) methods.
+ */
+static JSBool
+generator_op(JSContext *cx, JSGeneratorOp op,
+ JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSGenerator *gen;
+ JSString *str;
+ jsval arg;
+
+ if (!JS_InstanceOf(cx, obj, &js_GeneratorClass, argv))
+ return JS_FALSE;
+
+ gen = (JSGenerator *) JS_GetPrivate(cx, obj);
+ if (gen == NULL) {
+ /* This happens when obj is the generator prototype. See bug 352885. */
+ goto closed_generator;
+ }
+
+ switch (gen->state) {
+ case JSGEN_NEWBORN:
+ switch (op) {
+ case JSGENOP_NEXT:
+ case JSGENOP_THROW:
+ break;
+
+ case JSGENOP_SEND:
+ if (!JSVAL_IS_VOID(argv[0])) {
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK,
+ argv[0], NULL);
+ if (str) {
+ JS_ReportErrorNumberUC(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_GENERATOR_SEND,
+ JSSTRING_CHARS(str));
+ }
+ return JS_FALSE;
+ }
+ break;
+
+ default:
+ JS_ASSERT(op == JSGENOP_CLOSE);
+ gen->state = JSGEN_CLOSED;
+ return JS_TRUE;
+ }
+ break;
+
+ case JSGEN_OPEN:
+ break;
+
+ case JSGEN_RUNNING:
+ case JSGEN_CLOSING:
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK, argv[-1],
+ JS_GetFunctionId(gen->frame.fun));
+ if (str) {
+ JS_ReportErrorNumberUC(cx, js_GetErrorMessage, NULL,
+ JSMSG_NESTING_GENERATOR,
+ JSSTRING_CHARS(str));
+ }
+ return JS_FALSE;
+
+ default:
+ JS_ASSERT(gen->state == JSGEN_CLOSED);
+
+ closed_generator:
+ switch (op) {
+ case JSGENOP_NEXT:
+ case JSGENOP_SEND:
+ return js_ThrowStopIteration(cx, obj);
+ case JSGENOP_THROW:
+ JS_SetPendingException(cx, argv[0]);
+ return JS_FALSE;
+ default:
+ JS_ASSERT(op == JSGENOP_CLOSE);
+ return JS_TRUE;
+ }
+ }
+
+ arg = (op == JSGENOP_SEND || op == JSGENOP_THROW)
+ ? argv[0]
+ : JSVAL_VOID;
+ if (!SendToGenerator(cx, op, obj, gen, arg, rval))
+ return JS_FALSE;
+ return JS_TRUE;
+}
+
+static JSBool
+generator_send(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return generator_op(cx, JSGENOP_SEND, obj, argc, argv, rval);
+}
+
+static JSBool
+generator_next(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return generator_op(cx, JSGENOP_NEXT, obj, argc, argv, rval);
+}
+
+static JSBool
+generator_throw(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return generator_op(cx, JSGENOP_THROW, obj, argc, argv, rval);
+}
+
+static JSBool
+generator_close(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return generator_op(cx, JSGENOP_CLOSE, obj, argc, argv, rval);
+}
+
+static JSFunctionSpec generator_methods[] = {
+ {js_iterator_str, iterator_self, 0,JSPROP_READONLY|JSPROP_PERMANENT,0},
+ {js_next_str, generator_next, 0,JSPROP_READONLY|JSPROP_PERMANENT,0},
+ {js_send_str, generator_send, 1,JSPROP_READONLY|JSPROP_PERMANENT,0},
+ {js_throw_str, generator_throw, 1,JSPROP_READONLY|JSPROP_PERMANENT,0},
+ {js_close_str, generator_close, 0,JSPROP_READONLY|JSPROP_PERMANENT,0},
+ {0,0,0,0,0}
+};
+
+#endif /* JS_HAS_GENERATORS */
+
+JSObject *
+js_InitIteratorClasses(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto, *stop;
+
+ /* Idempotency required: we initialize several things, possibly lazily. */
+ if (!js_GetClassObject(cx, obj, JSProto_StopIteration, &stop))
+ return NULL;
+ if (stop)
+ return stop;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_IteratorClass, Iterator, 2,
+ NULL, iterator_methods, NULL, NULL);
+ if (!proto)
+ return NULL;
+ proto->slots[JSSLOT_ITER_STATE] = JSVAL_NULL;
+
+#if JS_HAS_GENERATORS
+ /* Initialize the generator internals if configured. */
+ if (!JS_InitClass(cx, obj, NULL, &js_GeneratorClass, NULL, 0,
+ NULL, generator_methods, NULL, NULL)) {
+ return NULL;
+ }
+#endif
+
+ return JS_InitClass(cx, obj, NULL, &js_StopIterationClass, NULL, 0,
+ NULL, NULL, NULL, NULL);
+}
diff --git a/src/third_party/js-1.7/jsiter.h b/src/third_party/js-1.7/jsiter.h
new file mode 100644
index 00000000000..1a99b6b06f3
--- /dev/null
+++ b/src/third_party/js-1.7/jsiter.h
@@ -0,0 +1,114 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1 *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsiter_h___
+#define jsiter_h___
+/*
+ * JavaScript iterators.
+ */
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+#define JSITER_ENUMERATE 0x1 /* for-in compatible hidden default iterator */
+#define JSITER_FOREACH 0x2 /* return [key, value] pair rather than key */
+#define JSITER_KEYVALUE 0x4 /* destructuring for-in wants [key, value] */
+
+extern void
+js_CloseNativeIterator(JSContext *cx, JSObject *iterobj);
+
+extern void
+js_CloseIteratorState(JSContext *cx, JSObject *iterobj);
+
+/*
+ * Convert the value stored in *vp to its iteration object. The flags should
+ * contain JSITER_ENUMERATE if js_ValueToIterator is called when enumerating
+ * for-in semantics are required, and when the caller can guarantee that the
+ * iterator will never be exposed to scripts.
+ */
+extern JSBool
+js_ValueToIterator(JSContext *cx, uintN flags, jsval *vp);
+
+/*
+ * Given iterobj, call iterobj.next(). If the iterator stopped, set *rval to
+ * JSVAL_HOLE. Otherwise set it to the result of the next call.
+ */
+extern JSBool
+js_CallIteratorNext(JSContext *cx, JSObject *iterobj, jsval *rval);
+
+#if JS_HAS_GENERATORS
+
+/*
+ * Generator state codes.
+ */
+typedef enum JSGeneratorState {
+ JSGEN_NEWBORN, /* not yet started */
+ JSGEN_OPEN, /* started by a .next() or .send(undefined) call */
+ JSGEN_RUNNING, /* currently executing via .next(), etc., call */
+ JSGEN_CLOSING, /* close method is doing asynchronous return */
+ JSGEN_CLOSED /* closed, cannot be started or closed again */
+} JSGeneratorState;
+
+struct JSGenerator {
+ JSGenerator *next;
+ JSObject *obj;
+ JSGeneratorState state;
+ JSStackFrame frame;
+ JSArena arena;
+ jsval stack[1];
+};
+
+#define FRAME_TO_GENERATOR(fp) \
+ ((JSGenerator *) ((uint8 *)(fp) - offsetof(JSGenerator, frame)))
+
+extern JSObject *
+js_NewGenerator(JSContext *cx, JSStackFrame *fp);
+
+extern JSBool
+js_CloseGeneratorObject(JSContext *cx, JSGenerator *gen);
+
+#endif
+
+extern JSClass js_GeneratorClass;
+extern JSClass js_IteratorClass;
+extern JSClass js_StopIterationClass;
+
+extern JSObject *
+js_InitIteratorClasses(JSContext *cx, JSObject *obj);
+
+#endif /* jsiter_h___ */
diff --git a/src/third_party/js-1.7/jskeyword.tbl b/src/third_party/js-1.7/jskeyword.tbl
new file mode 100644
index 00000000000..49b9c6c8c61
--- /dev/null
+++ b/src/third_party/js-1.7/jskeyword.tbl
@@ -0,0 +1,124 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set sw=4 ts=8 et tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+JS_KEYWORD(break, TOK_BREAK, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(case, TOK_CASE, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(continue, TOK_CONTINUE, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(default, TOK_DEFAULT, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(delete, TOK_DELETE, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(do, TOK_DO, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(else, TOK_ELSE, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(export, TOK_EXPORT, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(false, TOK_PRIMARY, JSOP_FALSE, JSVERSION_DEFAULT)
+JS_KEYWORD(for, TOK_FOR, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(function, TOK_FUNCTION, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(if, TOK_IF, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(in, TOK_IN, JSOP_IN, JSVERSION_DEFAULT)
+JS_KEYWORD(new, TOK_NEW, JSOP_NEW, JSVERSION_DEFAULT)
+JS_KEYWORD(null, TOK_PRIMARY, JSOP_NULL, JSVERSION_DEFAULT)
+JS_KEYWORD(return, TOK_RETURN, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(switch, TOK_SWITCH, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(this, TOK_PRIMARY, JSOP_THIS, JSVERSION_DEFAULT)
+JS_KEYWORD(true, TOK_PRIMARY, JSOP_TRUE, JSVERSION_DEFAULT)
+JS_KEYWORD(typeof, TOK_UNARYOP, JSOP_TYPEOF, JSVERSION_DEFAULT)
+JS_KEYWORD(var, TOK_VAR, JSOP_DEFVAR, JSVERSION_DEFAULT)
+JS_KEYWORD(void, TOK_UNARYOP, JSOP_VOID, JSVERSION_DEFAULT)
+JS_KEYWORD(while, TOK_WHILE, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(with, TOK_WITH, JSOP_NOP, JSVERSION_DEFAULT)
+#if JS_HAS_CONST
+JS_KEYWORD(const, TOK_VAR, JSOP_DEFCONST, JSVERSION_DEFAULT)
+#else
+JS_KEYWORD(const, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+#endif
+
+JS_KEYWORD(try, TOK_TRY, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(catch, TOK_CATCH, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(finally, TOK_FINALLY, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(throw, TOK_THROW, JSOP_NOP, JSVERSION_DEFAULT)
+
+JS_KEYWORD(instanceof, TOK_INSTANCEOF, JSOP_INSTANCEOF,JSVERSION_DEFAULT)
+
+#if JS_HAS_RESERVED_JAVA_KEYWORDS
+JS_KEYWORD(abstract, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(boolean, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(byte, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(char, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(class, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(double, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(extends, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(final, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(float, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(goto, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(implements, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(import, TOK_IMPORT, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(int, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(interface, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(long, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(native, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(package, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(private, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(protected, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(public, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(short, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(static, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(super, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(synchronized,TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(throws, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(transient, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+JS_KEYWORD(volatile, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+#endif
+
+#if JS_HAS_RESERVED_ECMA_KEYWORDS
+JS_KEYWORD(enum, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+#endif
+
+#if JS_HAS_DEBUGGER_KEYWORD
+JS_KEYWORD(debugger, TOK_DEBUGGER, JSOP_NOP, JSVERSION_DEFAULT)
+#elif JS_HAS_RESERVED_ECMA_KEYWORDS
+JS_KEYWORD(debugger, TOK_RESERVED, JSOP_NOP, JSVERSION_DEFAULT)
+#endif
+
+#if JS_HAS_GENERATORS
+JS_KEYWORD(yield, TOK_YIELD, JSOP_NOP, JSVERSION_1_7)
+#endif
+
+#if JS_HAS_BLOCK_SCOPE
+JS_KEYWORD(let, TOK_LET, JSOP_NOP, JSVERSION_1_7)
+#endif
diff --git a/src/third_party/js-1.7/jskwgen.c b/src/third_party/js-1.7/jskwgen.c
new file mode 100644
index 00000000000..5ae39bd99db
--- /dev/null
+++ b/src/third_party/js-1.7/jskwgen.c
@@ -0,0 +1,460 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set sw=4 ts=8 et tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is String Switch Generator for JavaScript Keywords,
+ * released 2005-12-09.
+ *
+ * The Initial Developer of the Original Code is
+ * Igor Bukanov.
+ * Portions created by the Initial Developer are Copyright (C) 2005-2006
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "jsstddef.h"
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <ctype.h>
+
+#include "jsconfig.h"
+
+const char * const keyword_list[] = {
+#define JS_KEYWORD(keyword, type, op, version) #keyword,
+#include "jskeyword.tbl"
+#undef JS_KEYWORD
+};
+
+struct gen_opt {
+ FILE *output; /* output file for generated source */
+ unsigned use_if_threshold; /* max number of choices to generate
+ "if" selector instead of "switch" */
+ unsigned char_tail_test_threshold; /* max number of unprocessed columns
+ to use inlined char compare
+ for remaining chars and not generic
+ string compare code */
+ unsigned indent_level; /* current source identation level */
+};
+
+static unsigned column_to_compare;
+
+static int
+length_comparator(const void *a, const void *b)
+{
+ const char *str1 = keyword_list[*(unsigned *)a];
+ const char *str2 = keyword_list[*(unsigned *)b];
+ return (int)strlen(str1) - (int)strlen(str2);
+}
+
+static int
+column_comparator(const void *a, const void *b)
+{
+ const char *str1 = keyword_list[*(unsigned *)a];
+ const char *str2 = keyword_list[*(unsigned *)b];
+ return (int)str1[column_to_compare] - (int)str2[column_to_compare];
+}
+
+static unsigned
+count_different_lengths(unsigned indexes[], unsigned nelem)
+{
+ unsigned nlength, current_length, i, l;
+
+ current_length = 0;
+ nlength = 0;
+ for (i = 0; i != nelem; ++i) {
+ l = (unsigned)strlen(keyword_list[indexes[i]]);
+ assert(l != 0);
+ if (current_length != l) {
+ ++nlength;
+ current_length = l;
+ }
+ }
+ return nlength;
+}
+
+static void
+find_char_span_and_count(unsigned indexes[], unsigned nelem, unsigned column,
+ unsigned *span_result, unsigned *count_result)
+{
+ unsigned i, count;
+ unsigned char c, prev, minc, maxc;
+
+ assert(nelem != 0);
+ minc = maxc = prev = (unsigned char)keyword_list[indexes[0]][column];
+ count = 1;
+ for (i = 1; i != nelem; ++i) {
+ c = (unsigned char)keyword_list[indexes[i]][column];
+ if (prev != c) {
+ prev = c;
+ ++count;
+ if (minc > c) {
+ minc = c;
+ } else if (maxc < c) {
+ maxc = c;
+ }
+ }
+ }
+
+ *span_result = maxc - minc + 1;
+ *count_result = count;
+}
+
+static unsigned
+find_optimal_switch_column(struct gen_opt *opt,
+ unsigned indexes[], unsigned nelem,
+ unsigned columns[], unsigned unprocessed_columns,
+ int *use_if_result)
+{
+ unsigned i;
+ unsigned span, min_span, min_span_index;
+ unsigned nchar, min_nchar, min_nchar_index;
+
+ assert(unprocessed_columns != 0);
+ i = 0;
+ min_nchar = min_span = (unsigned)-1;
+ min_nchar_index = min_span_index = 0;
+ do {
+ column_to_compare = columns[i];
+ qsort(indexes, nelem, sizeof(indexes[0]), column_comparator);
+ find_char_span_and_count(indexes, nelem, column_to_compare,
+ &span, &nchar);
+ assert(span != 0);
+ if (span == 1) {
+ assert(nchar == 1);
+ *use_if_result = 1;
+ return 1;
+ }
+ assert(nchar != 1);
+ if (min_span > span) {
+ min_span = span;
+ min_span_index = i;
+ }
+ if (min_nchar > nchar) {
+ min_nchar = nchar;
+ min_nchar_index = i;
+ }
+ } while (++i != unprocessed_columns);
+
+ if (min_nchar <= opt->use_if_threshold) {
+ *use_if_result = 1;
+ i = min_nchar_index;
+ } else {
+ *use_if_result = 0;
+ i = min_span_index;
+ }
+
+ /*
+ * Restore order corresponding to i if it was destroyed by
+ * subsequent sort.
+ */
+ if (i != unprocessed_columns - 1) {
+ column_to_compare = columns[i];
+ qsort(indexes, nelem, sizeof(indexes[0]), column_comparator);
+ }
+
+ return i;
+}
+
+
+static void
+p(struct gen_opt *opt, const char *format, ...)
+{
+ va_list ap;
+
+ va_start(ap, format);
+ vfprintf(opt->output, format, ap);
+ va_end(ap);
+}
+
+/* Size for '\xxx' where xxx is octal escape */
+#define MIN_QUOTED_CHAR_BUFFER 7
+
+static char *
+qchar(char c, char *quoted_buffer)
+{
+ char *s;
+
+ s = quoted_buffer;
+ *s++ = '\'';
+ switch (c) {
+ case '\n': c = 'n'; goto one_char_escape;
+ case '\r': c = 'r'; goto one_char_escape;
+ case '\t': c = 't'; goto one_char_escape;
+ case '\f': c = 't'; goto one_char_escape;
+ case '\0': c = '0'; goto one_char_escape;
+ case '\'': goto one_char_escape;
+ one_char_escape:
+ *s++ = '\\';
+ break;
+ default:
+ if (!isprint(c)) {
+ *s++ = '\\';
+ *s++ = (char)('0' + (0x3 & (((unsigned char)c) >> 6)));
+ *s++ = (char)('0' + (0x7 & (((unsigned char)c) >> 3)));
+ c = (char)('0' + (0x7 & ((unsigned char)c)));
+ }
+ }
+ *s++ = c;
+ *s++ = '\'';
+ *s = '\0';
+ assert(s + 1 <= quoted_buffer + MIN_QUOTED_CHAR_BUFFER);
+ return quoted_buffer;
+}
+
+static void
+nl(struct gen_opt *opt)
+{
+ putc('\n', opt->output);
+}
+
+static void
+indent(struct gen_opt *opt)
+{
+ unsigned n = opt->indent_level;
+ while (n != 0) {
+ --n;
+ fputs(" ", opt->output);
+ }
+}
+
+static void
+line(struct gen_opt *opt, const char *format, ...)
+{
+ va_list ap;
+
+ indent(opt);
+ va_start(ap, format);
+ vfprintf(opt->output, format, ap);
+ va_end(ap);
+ nl(opt);
+}
+
+static void
+generate_letter_switch_r(struct gen_opt *opt,
+ unsigned indexes[], unsigned nelem,
+ unsigned columns[], unsigned unprocessed_columns)
+{
+ char qbuf[MIN_QUOTED_CHAR_BUFFER];
+
+ assert(nelem != 0);
+ if (nelem == 1) {
+ unsigned kw_index = indexes[0];
+ const char *keyword = keyword_list[kw_index];
+
+ if (unprocessed_columns == 0) {
+ line(opt, "JSKW_GOT_MATCH(%u) /* %s */", kw_index, keyword);
+ } else if (unprocessed_columns > opt->char_tail_test_threshold) {
+ line(opt, "JSKW_TEST_GUESS(%u) /* %s */", kw_index, keyword);
+ } else {
+ unsigned i, column;
+
+ indent(opt); p(opt, "if (");
+ for (i = 0; i != unprocessed_columns; ++i) {
+ column = columns[i];
+ qchar(keyword[column], qbuf);
+ p(opt, "%sJSKW_AT(%u)==%s", (i == 0) ? "" : " && ",
+ column, qbuf);
+ }
+ p(opt, ") {"); nl(opt);
+ ++opt->indent_level;
+ line(opt, "JSKW_GOT_MATCH(%u) /* %s */", kw_index, keyword);
+ --opt->indent_level;
+ line(opt, "}");
+ line(opt, "JSKW_NO_MATCH()");
+ }
+ } else {
+ unsigned optimal_column_index, optimal_column;
+ unsigned i;
+ int use_if;
+ char current;
+
+ assert(unprocessed_columns != 0);
+ optimal_column_index = find_optimal_switch_column(opt, indexes, nelem,
+ columns,
+ unprocessed_columns,
+ &use_if);
+ optimal_column = columns[optimal_column_index];
+ columns[optimal_column_index] = columns[unprocessed_columns - 1];
+
+ if (!use_if)
+ line(opt, "switch (JSKW_AT(%u)) {", optimal_column);
+
+ current = keyword_list[indexes[0]][optimal_column];
+ for (i = 0; i != nelem;) {
+ unsigned same_char_begin = i;
+ char next = current;
+
+ for (++i; i != nelem; ++i) {
+ next = keyword_list[indexes[i]][optimal_column];
+ if (next != current)
+ break;
+ }
+ qchar(current, qbuf);
+ if (use_if) {
+ line(opt, "if (JSKW_AT(%u) == %s) {", optimal_column, qbuf);
+ } else {
+ line(opt, " case %s:", qbuf);
+ }
+ ++opt->indent_level;
+ generate_letter_switch_r(opt, indexes + same_char_begin,
+ i - same_char_begin,
+ columns, unprocessed_columns - 1);
+ --opt->indent_level;
+ if (use_if) {
+ line(opt, "}");
+ }
+ current = next;
+ }
+
+ if (!use_if) {
+ line(opt, "}");
+ }
+
+ columns[optimal_column_index] = optimal_column;
+
+ line(opt, "JSKW_NO_MATCH()");
+ }
+}
+
+static void
+generate_letter_switch(struct gen_opt *opt,
+ unsigned indexes[], unsigned nelem,
+ unsigned current_length)
+{
+ unsigned *columns;
+ unsigned i;
+
+ columns = malloc(sizeof(columns[0]) * current_length);
+ if (!columns) {
+ perror("malloc");
+ exit(EXIT_FAILURE);
+ }
+ for (i = 0; i != current_length; ++i) {
+ columns[i] = i;
+ }
+ generate_letter_switch_r(opt, indexes, nelem, columns, current_length);
+ free(columns);
+}
+
+
+static void
+generate_switch(struct gen_opt *opt)
+{
+ unsigned *indexes;
+ unsigned nlength;
+ unsigned i, current;
+ int use_if;
+ unsigned nelem;
+
+ nelem = sizeof(keyword_list)/sizeof(keyword_list[0]);
+
+ line(opt, "/*");
+ line(opt, " * Generating switch for the list of %u entries:", nelem);
+ for (i = 0; i != nelem; ++i) {
+ line(opt, " * %s", keyword_list[i]);
+ }
+ line(opt, " */");
+
+ indexes = malloc(sizeof(indexes[0]) * nelem);
+ if (!indexes) {
+ perror("malloc");
+ exit(EXIT_FAILURE);
+ }
+ for (i = 0; i != nelem; ++i)
+ indexes[i] = i;
+ qsort(indexes, nelem, sizeof(indexes[i]), length_comparator);
+ nlength = count_different_lengths(indexes, nelem);
+
+ use_if = (nlength <= opt->use_if_threshold);
+
+ if (!use_if)
+ line(opt, "switch (JSKW_LENGTH()) {");
+
+ current = (unsigned)strlen(keyword_list[indexes[0]]);
+ for (i = 0; i != nelem;) {
+ unsigned same_length_begin = i;
+ unsigned next = current;
+
+ for (++i; i != nelem; ++i) {
+ next = (unsigned)strlen(keyword_list[indexes[i]]);
+ if (next != current)
+ break;
+ }
+ if (use_if) {
+ line(opt, "if (JSKW_LENGTH() == %u) {", current);
+ } else {
+ line(opt, " case %u:", current);
+ }
+ ++opt->indent_level;
+ generate_letter_switch(opt, indexes + same_length_begin,
+ i - same_length_begin,
+ current);
+ --opt->indent_level;
+ if (use_if) {
+ line(opt, "}");
+ }
+ current = next;
+ }
+ if (!use_if)
+ line(opt, "}");
+ line(opt, "JSKW_NO_MATCH()");
+ free(indexes);
+}
+
+int main(int argc, char **argv)
+{
+ struct gen_opt opt;
+
+ if (argc < 2) {
+ opt.output = stdout;
+ } else {
+ opt.output = fopen(argv[1], "w");
+ if (!opt.output) {
+ perror("fopen");
+ exit(EXIT_FAILURE);
+ }
+ }
+ opt.indent_level = 1;
+ opt.use_if_threshold = 3;
+ opt.char_tail_test_threshold = 4;
+
+ generate_switch(&opt);
+
+ if (opt.output != stdout) {
+ if (fclose(opt.output)) {
+ perror("fclose");
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/src/third_party/js-1.7/jslibmath.h b/src/third_party/js-1.7/jslibmath.h
new file mode 100644
index 00000000000..3f75f30b464
--- /dev/null
+++ b/src/third_party/js-1.7/jslibmath.h
@@ -0,0 +1,266 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * IBM Corp.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * By default all math calls go to fdlibm. The defines for each platform
+ * remap the math calls to native routines.
+ */
+
+#ifndef _LIBMATH_H
+#define _LIBMATH_H
+
+#include <math.h>
+#include "jsconfig.h"
+
+/*
+ * Define on which platforms to use fdlibm. Not used by default under
+ * assumption that native math library works unless proved guilty.
+ * Plus there can be problems with endian-ness and such in fdlibm itself.
+ *
+ * fdlibm compatibility notes:
+ * - fdlibm broken on OSF1/alpha
+ */
+
+#ifndef JS_USE_FDLIBM_MATH
+#define JS_USE_FDLIBM_MATH 0
+#endif
+
+#if !JS_USE_FDLIBM_MATH
+
+/*
+ * Use system provided math routines.
+ */
+
+#define fd_acos acos
+#define fd_asin asin
+#define fd_atan atan
+#define fd_atan2 atan2
+#define fd_ceil ceil
+
+/* The right copysign function is not always named the same thing. */
+#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
+#define fd_copysign __builtin_copysign
+#elif defined WINCE
+#define fd_copysign _copysign
+#elif defined _WIN32
+#if _MSC_VER < 1400
+/* Try to work around apparent _copysign bustage in VC6 and VC7. */
+#define fd_copysign js_copysign
+extern double js_copysign(double, double);
+#else
+#define fd_copysign _copysign
+#endif
+#else
+#define fd_copysign copysign
+#endif
+
+#define fd_cos cos
+#define fd_exp exp
+#define fd_fabs fabs
+#define fd_floor floor
+#define fd_fmod fmod
+#define fd_log log
+#define fd_pow pow
+#define fd_sin sin
+#define fd_sqrt sqrt
+#define fd_tan tan
+
+#else
+
+/*
+ * Use math routines in fdlibm.
+ */
+
+#undef __P
+#ifdef __STDC__
+#define __P(p) p
+#else
+#define __P(p) ()
+#endif
+
+#if (defined _WIN32 && !defined WINCE) || defined SUNOS4
+
+#define fd_acos acos
+#define fd_asin asin
+#define fd_atan atan
+#define fd_cos cos
+#define fd_sin sin
+#define fd_tan tan
+#define fd_exp exp
+#define fd_log log
+#define fd_sqrt sqrt
+#define fd_ceil ceil
+#define fd_fabs fabs
+#define fd_floor floor
+#define fd_fmod fmod
+
+extern double fd_atan2 __P((double, double));
+extern double fd_copysign __P((double, double));
+extern double fd_pow __P((double, double));
+
+#elif defined IRIX
+
+#define fd_acos acos
+#define fd_asin asin
+#define fd_atan atan
+#define fd_exp exp
+#define fd_log log
+#define fd_log10 log10
+#define fd_sqrt sqrt
+#define fd_fabs fabs
+#define fd_floor floor
+#define fd_fmod fmod
+
+extern double fd_cos __P((double));
+extern double fd_sin __P((double));
+extern double fd_tan __P((double));
+extern double fd_atan2 __P((double, double));
+extern double fd_pow __P((double, double));
+extern double fd_ceil __P((double));
+extern double fd_copysign __P((double, double));
+
+#elif defined SOLARIS
+
+#define fd_atan atan
+#define fd_cos cos
+#define fd_sin sin
+#define fd_tan tan
+#define fd_exp exp
+#define fd_sqrt sqrt
+#define fd_ceil ceil
+#define fd_fabs fabs
+#define fd_floor floor
+#define fd_fmod fmod
+
+extern double fd_acos __P((double));
+extern double fd_asin __P((double));
+extern double fd_log __P((double));
+extern double fd_atan2 __P((double, double));
+extern double fd_pow __P((double, double));
+extern double fd_copysign __P((double, double));
+
+#elif defined HPUX
+
+#define fd_cos cos
+#define fd_sin sin
+#define fd_exp exp
+#define fd_sqrt sqrt
+#define fd_fabs fabs
+#define fd_floor floor
+#define fd_fmod fmod
+
+extern double fd_ceil __P((double));
+extern double fd_acos __P((double));
+extern double fd_log __P((double));
+extern double fd_atan2 __P((double, double));
+extern double fd_tan __P((double));
+extern double fd_pow __P((double, double));
+extern double fd_asin __P((double));
+extern double fd_atan __P((double));
+extern double fd_copysign __P((double, double));
+
+#elif defined(OSF1)
+
+#define fd_acos acos
+#define fd_asin asin
+#define fd_atan atan
+#define fd_copysign copysign
+#define fd_cos cos
+#define fd_exp exp
+#define fd_fabs fabs
+#define fd_fmod fmod
+#define fd_sin sin
+#define fd_sqrt sqrt
+#define fd_tan tan
+
+extern double fd_atan2 __P((double, double));
+extern double fd_ceil __P((double));
+extern double fd_floor __P((double));
+extern double fd_log __P((double));
+extern double fd_pow __P((double, double));
+
+#elif defined(AIX)
+
+#define fd_acos acos
+#define fd_asin asin
+#define fd_atan2 atan2
+#define fd_copysign copysign
+#define fd_cos cos
+#define fd_exp exp
+#define fd_fabs fabs
+#define fd_floor floor
+#define fd_fmod fmod
+#define fd_log log
+#define fd_sin sin
+#define fd_sqrt sqrt
+
+extern double fd_atan __P((double));
+extern double fd_ceil __P((double));
+extern double fd_pow __P((double,double));
+extern double fd_tan __P((double));
+
+#else /* other platform.. generic paranoid slow fdlibm */
+
+extern double fd_acos __P((double));
+extern double fd_asin __P((double));
+extern double fd_atan __P((double));
+extern double fd_cos __P((double));
+extern double fd_sin __P((double));
+extern double fd_tan __P((double));
+
+extern double fd_exp __P((double));
+extern double fd_log __P((double));
+extern double fd_sqrt __P((double));
+
+extern double fd_ceil __P((double));
+extern double fd_fabs __P((double));
+extern double fd_floor __P((double));
+extern double fd_fmod __P((double, double));
+
+extern double fd_atan2 __P((double, double));
+extern double fd_pow __P((double, double));
+extern double fd_copysign __P((double, double));
+
+#endif
+
+#endif /* JS_USE_FDLIBM_MATH */
+
+#endif /* _LIBMATH_H */
+
diff --git a/src/third_party/js-1.7/jslock.c b/src/third_party/js-1.7/jslock.c
new file mode 100644
index 00000000000..48550099797
--- /dev/null
+++ b/src/third_party/js-1.7/jslock.c
@@ -0,0 +1,1303 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifdef JS_THREADSAFE
+
+/*
+ * JS locking stubs.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include "jspubtd.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jstypes.h"
+#include "jsbit.h"
+#include "jscntxt.h"
+#include "jsdtoa.h"
+#include "jsgc.h"
+#include "jslock.h"
+#include "jsscope.h"
+#include "jsstr.h"
+
+#define ReadWord(W) (W)
+
+#ifndef NSPR_LOCK
+
+#include <memory.h>
+
+static PRLock **global_locks;
+static uint32 global_lock_count = 1;
+static uint32 global_locks_log2 = 0;
+static uint32 global_locks_mask = 0;
+
+#define GLOBAL_LOCK_INDEX(id) (((uint32)(id) >> 2) & global_locks_mask)
+
+static void
+js_LockGlobal(void *id)
+{
+ uint32 i = GLOBAL_LOCK_INDEX(id);
+ PR_Lock(global_locks[i]);
+}
+
+static void
+js_UnlockGlobal(void *id)
+{
+ uint32 i = GLOBAL_LOCK_INDEX(id);
+ PR_Unlock(global_locks[i]);
+}
+
+/* Exclude Alpha NT. */
+#if defined(_WIN32) && defined(_M_IX86)
+#pragma warning( disable : 4035 )
+
+static JS_INLINE int
+js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
+{
+ __asm {
+ mov eax, ov
+ mov ecx, nv
+ mov ebx, w
+ lock cmpxchg [ebx], ecx
+ sete al
+ and eax, 1h
+ }
+}
+
+#elif defined(__GNUC__) && defined(__i386__)
+
+/* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
+static JS_INLINE int
+js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
+{
+ unsigned int res;
+
+ __asm__ __volatile__ (
+ "lock\n"
+ "cmpxchgl %2, (%1)\n"
+ "sete %%al\n"
+ "andl $1, %%eax\n"
+ : "=a" (res)
+ : "r" (w), "r" (nv), "a" (ov)
+ : "cc", "memory");
+ return (int)res;
+}
+
+#elif (defined(__USLC__) || defined(_SCO_DS)) && defined(i386)
+
+/* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
+
+asm int
+js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
+{
+%ureg w, nv;
+ movl ov,%eax
+ lock
+ cmpxchgl nv,(w)
+ sete %al
+ andl $1,%eax
+%ureg w; mem ov, nv;
+ movl ov,%eax
+ movl nv,%ecx
+ lock
+ cmpxchgl %ecx,(w)
+ sete %al
+ andl $1,%eax
+%ureg nv;
+ movl ov,%eax
+ movl w,%edx
+ lock
+ cmpxchgl nv,(%edx)
+ sete %al
+ andl $1,%eax
+%mem w, ov, nv;
+ movl ov,%eax
+ movl nv,%ecx
+ movl w,%edx
+ lock
+ cmpxchgl %ecx,(%edx)
+ sete %al
+ andl $1,%eax
+}
+#pragma asm full_optimization js_CompareAndSwap
+
+#elif defined(SOLARIS) && defined(sparc) && defined(ULTRA_SPARC)
+
+static JS_INLINE int
+js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
+{
+#if defined(__GNUC__)
+ unsigned int res;
+ JS_ASSERT(ov != nv);
+ asm volatile ("\
+stbar\n\
+cas [%1],%2,%3\n\
+cmp %2,%3\n\
+be,a 1f\n\
+mov 1,%0\n\
+mov 0,%0\n\
+1:"
+ : "=r" (res)
+ : "r" (w), "r" (ov), "r" (nv));
+ return (int)res;
+#else /* !__GNUC__ */
+ extern int compare_and_swap(jsword*, jsword, jsword);
+ JS_ASSERT(ov != nv);
+ return compare_and_swap(w, ov, nv);
+#endif
+}
+
+#elif defined(AIX)
+
+#include <sys/atomic_op.h>
+
+static JS_INLINE int
+js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
+{
+ return !_check_lock((atomic_p)w, ov, nv);
+}
+
+#else
+
+#error "Define NSPR_LOCK if your platform lacks a compare-and-swap instruction."
+
+#endif /* arch-tests */
+
+#endif /* !NSPR_LOCK */
+
+void
+js_InitLock(JSThinLock *tl)
+{
+#ifdef NSPR_LOCK
+ tl->owner = 0;
+ tl->fat = (JSFatLock*)JS_NEW_LOCK();
+#else
+ memset(tl, 0, sizeof(JSThinLock));
+#endif
+}
+
+void
+js_FinishLock(JSThinLock *tl)
+{
+#ifdef NSPR_LOCK
+ tl->owner = 0xdeadbeef;
+ if (tl->fat)
+ JS_DESTROY_LOCK(((JSLock*)tl->fat));
+#else
+ JS_ASSERT(tl->owner == 0);
+ JS_ASSERT(tl->fat == NULL);
+#endif
+}
+
+static void js_Dequeue(JSThinLock *);
+
+#ifdef DEBUG_SCOPE_COUNT
+
+#include <stdio.h>
+#include "jsdhash.h"
+
+static FILE *logfp;
+static JSDHashTable logtbl;
+
+typedef struct logentry {
+ JSDHashEntryStub stub;
+ char op;
+ const char *file;
+ int line;
+} logentry;
+
+static void
+logit(JSScope *scope, char op, const char *file, int line)
+{
+ logentry *entry;
+
+ if (!logfp) {
+ logfp = fopen("/tmp/scope.log", "w");
+ if (!logfp)
+ return;
+ setvbuf(logfp, NULL, _IONBF, 0);
+ }
+ fprintf(logfp, "%p %c %s %d\n", scope, op, file, line);
+
+ if (!logtbl.entryStore &&
+ !JS_DHashTableInit(&logtbl, JS_DHashGetStubOps(), NULL,
+ sizeof(logentry), 100)) {
+ return;
+ }
+ entry = (logentry *) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_ADD);
+ if (!entry)
+ return;
+ entry->stub.key = scope;
+ entry->op = op;
+ entry->file = file;
+ entry->line = line;
+}
+
+void
+js_unlog_scope(JSScope *scope)
+{
+ if (!logtbl.entryStore)
+ return;
+ (void) JS_DHashTableOperate(&logtbl, scope, JS_DHASH_REMOVE);
+}
+
+# define LOGIT(scope,op) logit(scope, op, __FILE__, __LINE__)
+
+#else
+
+# define LOGIT(scope,op) /* nothing */
+
+#endif /* DEBUG_SCOPE_COUNT */
+
+/*
+ * Return true if scope's ownercx, or the ownercx of a single-threaded scope
+ * for which ownercx is waiting to become multi-threaded and shared, is cx.
+ * That condition implies deadlock in ClaimScope if cx's thread were to wait
+ * to share scope.
+ *
+ * (i) rt->gcLock held
+ */
+static JSBool
+WillDeadlock(JSScope *scope, JSContext *cx)
+{
+ JSContext *ownercx;
+
+ do {
+ ownercx = scope->ownercx;
+ if (ownercx == cx) {
+ JS_RUNTIME_METER(cx->runtime, deadlocksAvoided);
+ return JS_TRUE;
+ }
+ } while (ownercx && (scope = ownercx->scopeToShare) != NULL);
+ return JS_FALSE;
+}
+
+/*
+ * Make scope multi-threaded, i.e. share its ownership among contexts in rt
+ * using a "thin" or (if necessary due to contention) "fat" lock. Called only
+ * from ClaimScope, immediately below, when we detect deadlock were we to wait
+ * for scope's lock, because its ownercx is waiting on a scope owned by the
+ * calling cx.
+ *
+ * (i) rt->gcLock held
+ */
+static void
+ShareScope(JSRuntime *rt, JSScope *scope)
+{
+ JSScope **todop;
+
+ if (scope->u.link) {
+ for (todop = &rt->scopeSharingTodo; *todop != scope;
+ todop = &(*todop)->u.link) {
+ JS_ASSERT(*todop != NO_SCOPE_SHARING_TODO);
+ }
+ *todop = scope->u.link;
+ scope->u.link = NULL; /* null u.link for sanity ASAP */
+ JS_NOTIFY_ALL_CONDVAR(rt->scopeSharingDone);
+ }
+ js_InitLock(&scope->lock);
+ if (scope == rt->setSlotScope) {
+ /*
+ * Nesting locks on another thread that's using scope->ownercx: give
+ * the held lock a reentrancy count of 1 and set its lock.owner field
+ * directly (no compare-and-swap needed while scope->ownercx is still
+ * non-null). See below in ClaimScope, before the ShareScope call,
+ * for more on why this is necessary.
+ *
+ * If NSPR_LOCK is defined, we cannot deadlock holding rt->gcLock and
+ * acquiring scope->lock.fat here, against another thread holding that
+ * fat lock and trying to grab rt->gcLock. This is because no other
+ * thread can attempt to acquire scope->lock.fat until scope->ownercx
+ * is null *and* our thread has released rt->gcLock, which interlocks
+ * scope->ownercx's transition to null against tests of that member
+ * in ClaimScope.
+ */
+ scope->lock.owner = CX_THINLOCK_ID(scope->ownercx);
+#ifdef NSPR_LOCK
+ JS_ACQUIRE_LOCK((JSLock*)scope->lock.fat);
+#endif
+ scope->u.count = 1;
+ } else {
+ scope->u.count = 0;
+ }
+ js_FinishSharingScope(rt, scope);
+}
+
+/*
+ * js_FinishSharingScope is the tail part of ShareScope, split out to become a
+ * subroutine of JS_EndRequest too. The bulk of the work here involves making
+ * mutable strings in the scope's object's slots be immutable. We have to do
+ * this because such strings will soon be available to multiple threads, so
+ * their buffers can't be realloc'd any longer in js_ConcatStrings, and their
+ * members can't be modified by js_ConcatStrings, js_MinimizeDependentStrings,
+ * or js_UndependString.
+ *
+ * The last bit of work done by js_FinishSharingScope nulls scope->ownercx and
+ * updates rt->sharedScopes.
+ */
+#define MAKE_STRING_IMMUTABLE(rt, v, vp) \
+ JS_BEGIN_MACRO \
+ JSString *str_ = JSVAL_TO_STRING(v); \
+ uint8 *flagp_ = js_GetGCThingFlags(str_); \
+ if (*flagp_ & GCF_MUTABLE) { \
+ if (JSSTRING_IS_DEPENDENT(str_) && \
+ !js_UndependString(NULL, str_)) { \
+ JS_RUNTIME_METER(rt, badUndependStrings); \
+ *vp = JSVAL_VOID; \
+ } else { \
+ *flagp_ &= ~GCF_MUTABLE; \
+ } \
+ } \
+ JS_END_MACRO
+
+void
+js_FinishSharingScope(JSRuntime *rt, JSScope *scope)
+{
+ JSObject *obj;
+ uint32 nslots;
+ jsval v, *vp, *end;
+
+ obj = scope->object;
+ nslots = JS_MIN(obj->map->freeslot, obj->map->nslots);
+ for (vp = obj->slots, end = vp + nslots; vp < end; vp++) {
+ v = *vp;
+ if (JSVAL_IS_STRING(v))
+ MAKE_STRING_IMMUTABLE(rt, v, vp);
+ }
+
+ scope->ownercx = NULL; /* NB: set last, after lock init */
+ JS_RUNTIME_METER(rt, sharedScopes);
+}
+
+/*
+ * Given a scope with apparently non-null ownercx different from cx, try to
+ * set ownercx to cx, claiming exclusive (single-threaded) ownership of scope.
+ * If we claim ownership, return true. Otherwise, we wait for ownercx to be
+ * set to null (indicating that scope is multi-threaded); or if waiting would
+ * deadlock, we set ownercx to null ourselves via ShareScope. In any case,
+ * once ownercx is null we return false.
+ */
+static JSBool
+ClaimScope(JSScope *scope, JSContext *cx)
+{
+ JSRuntime *rt;
+ JSContext *ownercx;
+ jsrefcount saveDepth;
+ PRStatus stat;
+
+ rt = cx->runtime;
+ JS_RUNTIME_METER(rt, claimAttempts);
+ JS_LOCK_GC(rt);
+
+ /* Reload in case ownercx went away while we blocked on the lock. */
+ while ((ownercx = scope->ownercx) != NULL) {
+ /*
+ * Avoid selflock if ownercx is dead, or is not running a request, or
+ * has the same thread as cx. Set scope->ownercx to cx so that the
+ * matching JS_UNLOCK_SCOPE or JS_UNLOCK_OBJ macro call will take the
+ * fast path around the corresponding js_UnlockScope or js_UnlockObj
+ * function call.
+ *
+ * If scope->u.link is non-null, scope has already been inserted on
+ * the rt->scopeSharingTodo list, because another thread's context
+ * already wanted to lock scope while ownercx was running a request.
+ * We can't claim any scope whose u.link is non-null at this point,
+ * even if ownercx->requestDepth is 0 (see below where we suspend our
+ * request before waiting on rt->scopeSharingDone).
+ */
+ if (!scope->u.link &&
+ (!js_ValidContextPointer(rt, ownercx) ||
+ !ownercx->requestDepth ||
+ ownercx->thread == cx->thread)) {
+ JS_ASSERT(scope->u.count == 0);
+ scope->ownercx = cx;
+ JS_UNLOCK_GC(rt);
+ JS_RUNTIME_METER(rt, claimedScopes);
+ return JS_TRUE;
+ }
+
+ /*
+ * Avoid deadlock if scope's owner context is waiting on a scope that
+ * we own, by revoking scope's ownership. This approach to deadlock
+ * avoidance works because the engine never nests scope locks, except
+ * for the notable case of js_SetProtoOrParent (see jsobj.c).
+ *
+ * If cx could hold locks on ownercx->scopeToShare, or if ownercx
+ * could hold locks on scope, we would need to keep reentrancy counts
+ * for all such "flyweight" (ownercx != NULL) locks, so that control
+ * would unwind properly once these locks became "thin" or "fat".
+ * Apart from the js_SetProtoOrParent exception, the engine promotes
+ * a scope from exclusive to shared access only when locking, never
+ * when holding or unlocking.
+ *
+ * If ownercx's thread is calling js_SetProtoOrParent, trying to lock
+ * the inner scope (the scope of the object being set as the prototype
+ * of the outer object), ShareScope will find the outer object's scope
+ * at rt->setSlotScope. If it's the same as scope, we give it a lock
+ * held by ownercx's thread with reentrancy count of 1, then we return
+ * here and break. After that we unwind to js_[GS]etSlotThreadSafe or
+ * js_LockScope (our caller), where we wait on the newly-fattened lock
+ * until ownercx's thread unwinds from js_SetProtoOrParent.
+ *
+ * Avoid deadlock before any of this scope/context cycle detection if
+ * cx is on the active GC's thread, because in that case, no requests
+ * will run until the GC completes. Any scope wanted by the GC (from
+ * a finalizer) that can't be claimed must be slated for sharing.
+ */
+ if (rt->gcThread == cx->thread ||
+ (ownercx->scopeToShare &&
+ WillDeadlock(ownercx->scopeToShare, cx))) {
+ ShareScope(rt, scope);
+ break;
+ }
+
+ /*
+ * Thanks to the non-zero NO_SCOPE_SHARING_TODO link terminator, we
+ * can decide whether scope is on rt->scopeSharingTodo with a single
+ * non-null test, and avoid double-insertion bugs.
+ */
+ if (!scope->u.link) {
+ scope->u.link = rt->scopeSharingTodo;
+ rt->scopeSharingTodo = scope;
+ js_HoldObjectMap(cx, &scope->map);
+ }
+
+ /*
+ * Inline JS_SuspendRequest before we wait on rt->scopeSharingDone,
+ * saving and clearing cx->requestDepth so we don't deadlock if the
+ * GC needs to run on ownercx.
+ *
+ * Unlike JS_SuspendRequest and JS_EndRequest, we must take care not
+ * to decrement rt->requestCount if cx is active on the GC's thread,
+ * because the GC has already reduced rt->requestCount to exclude all
+ * such such contexts.
+ */
+ saveDepth = cx->requestDepth;
+ if (saveDepth) {
+ cx->requestDepth = 0;
+ if (rt->gcThread != cx->thread) {
+ JS_ASSERT(rt->requestCount > 0);
+ rt->requestCount--;
+ if (rt->requestCount == 0)
+ JS_NOTIFY_REQUEST_DONE(rt);
+ }
+ }
+
+ /*
+ * We know that some other thread's context owns scope, which is now
+ * linked onto rt->scopeSharingTodo, awaiting the end of that other
+ * thread's request. So it is safe to wait on rt->scopeSharingDone.
+ */
+ cx->scopeToShare = scope;
+ stat = PR_WaitCondVar(rt->scopeSharingDone, PR_INTERVAL_NO_TIMEOUT);
+ JS_ASSERT(stat != PR_FAILURE);
+
+ /*
+ * Inline JS_ResumeRequest after waiting on rt->scopeSharingDone,
+ * restoring cx->requestDepth. Same note as above for the inlined,
+ * specialized JS_SuspendRequest code: beware rt->gcThread.
+ */
+ if (saveDepth) {
+ if (rt->gcThread != cx->thread) {
+ while (rt->gcLevel > 0)
+ JS_AWAIT_GC_DONE(rt);
+ rt->requestCount++;
+ }
+ cx->requestDepth = saveDepth;
+ }
+
+ /*
+ * Don't clear cx->scopeToShare until after we're through waiting on
+ * all condition variables protected by rt->gcLock -- that includes
+ * rt->scopeSharingDone *and* rt->gcDone (hidden in JS_AWAIT_GC_DONE,
+ * in the inlined JS_ResumeRequest code immediately above).
+ *
+ * Otherwise, the GC could easily deadlock with another thread that
+ * owns a scope wanted by a finalizer. By keeping cx->scopeToShare
+ * set till here, we ensure that such deadlocks are detected, which
+ * results in the finalized object's scope being shared (it must, of
+ * course, have other, live objects sharing it).
+ */
+ cx->scopeToShare = NULL;
+ }
+
+ JS_UNLOCK_GC(rt);
+ return JS_FALSE;
+}
+
+/* Exported to js.c, which calls it via OBJ_GET_* and JSVAL_IS_* macros. */
+JS_FRIEND_API(jsval)
+js_GetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot)
+{
+ jsval v;
+ JSScope *scope;
+#ifndef NSPR_LOCK
+ JSThinLock *tl;
+ jsword me;
+#endif
+
+ /*
+ * We handle non-native objects via JSObjectOps.getRequiredSlot, treating
+ * all slots starting from 0 as required slots. A property definition or
+ * some prior arrangement must have allocated slot.
+ *
+ * Note once again (see jspubtd.h, before JSGetRequiredSlotOp's typedef)
+ * the crucial distinction between a |required slot number| that's passed
+ * to the get/setRequiredSlot JSObjectOps, and a |reserved slot index|
+ * passed to the JS_Get/SetReservedSlot APIs.
+ */
+ if (!OBJ_IS_NATIVE(obj))
+ return OBJ_GET_REQUIRED_SLOT(cx, obj, slot);
+
+ /*
+ * Native object locking is inlined here to optimize the single-threaded
+ * and contention-free multi-threaded cases.
+ */
+ scope = OBJ_SCOPE(obj);
+ JS_ASSERT(scope->ownercx != cx);
+ JS_ASSERT(obj->slots && slot < obj->map->freeslot);
+
+ /*
+ * Avoid locking if called from the GC (see GC_AWARE_GET_SLOT in jsobj.h).
+ * Also avoid locking an object owning a sealed scope. If neither of those
+ * special cases applies, try to claim scope's flyweight lock from whatever
+ * context may have had it in an earlier request.
+ */
+ if (CX_THREAD_IS_RUNNING_GC(cx) ||
+ (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
+ (scope->ownercx && ClaimScope(scope, cx))) {
+ return obj->slots[slot];
+ }
+
+#ifndef NSPR_LOCK
+ tl = &scope->lock;
+ me = CX_THINLOCK_ID(cx);
+ JS_ASSERT(CURRENT_THREAD_IS_ME(me));
+ if (js_CompareAndSwap(&tl->owner, 0, me)) {
+ /*
+ * Got the lock with one compare-and-swap. Even so, someone else may
+ * have mutated obj so it now has its own scope and lock, which would
+ * require either a restart from the top of this routine, or a thin
+ * lock release followed by fat lock acquisition.
+ */
+ if (scope == OBJ_SCOPE(obj)) {
+ v = obj->slots[slot];
+ if (!js_CompareAndSwap(&tl->owner, me, 0)) {
+ /* Assert that scope locks never revert to flyweight. */
+ JS_ASSERT(scope->ownercx != cx);
+ LOGIT(scope, '1');
+ scope->u.count = 1;
+ js_UnlockObj(cx, obj);
+ }
+ return v;
+ }
+ if (!js_CompareAndSwap(&tl->owner, me, 0))
+ js_Dequeue(tl);
+ }
+ else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
+ return obj->slots[slot];
+ }
+#endif
+
+ js_LockObj(cx, obj);
+ v = obj->slots[slot];
+
+ /*
+ * Test whether cx took ownership of obj's scope during js_LockObj.
+ *
+ * This does not mean that a given scope reverted to flyweight from "thin"
+ * or "fat" -- it does mean that obj's map pointer changed due to another
+ * thread setting a property, requiring obj to cease sharing a prototype
+ * object's scope (whose lock was not flyweight, else we wouldn't be here
+ * in the first place!).
+ */
+ scope = OBJ_SCOPE(obj);
+ if (scope->ownercx != cx)
+ js_UnlockScope(cx, scope);
+ return v;
+}
+
+void
+js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
+{
+ JSScope *scope;
+#ifndef NSPR_LOCK
+ JSThinLock *tl;
+ jsword me;
+#endif
+
+ /* Any string stored in a thread-safe object must be immutable. */
+ if (JSVAL_IS_STRING(v))
+ MAKE_STRING_IMMUTABLE(cx->runtime, v, &v);
+
+ /*
+ * We handle non-native objects via JSObjectOps.setRequiredSlot, as above
+ * for the Get case.
+ */
+ if (!OBJ_IS_NATIVE(obj)) {
+ OBJ_SET_REQUIRED_SLOT(cx, obj, slot, v);
+ return;
+ }
+
+ /*
+ * Native object locking is inlined here to optimize the single-threaded
+ * and contention-free multi-threaded cases.
+ */
+ scope = OBJ_SCOPE(obj);
+ JS_ASSERT(scope->ownercx != cx);
+ JS_ASSERT(obj->slots && slot < obj->map->freeslot);
+
+ /*
+ * Avoid locking if called from the GC (see GC_AWARE_GET_SLOT in jsobj.h).
+ * Also avoid locking an object owning a sealed scope. If neither of those
+ * special cases applies, try to claim scope's flyweight lock from whatever
+ * context may have had it in an earlier request.
+ */
+ if (CX_THREAD_IS_RUNNING_GC(cx) ||
+ (SCOPE_IS_SEALED(scope) && scope->object == obj) ||
+ (scope->ownercx && ClaimScope(scope, cx))) {
+ obj->slots[slot] = v;
+ return;
+ }
+
+#ifndef NSPR_LOCK
+ tl = &scope->lock;
+ me = CX_THINLOCK_ID(cx);
+ JS_ASSERT(CURRENT_THREAD_IS_ME(me));
+ if (js_CompareAndSwap(&tl->owner, 0, me)) {
+ if (scope == OBJ_SCOPE(obj)) {
+ obj->slots[slot] = v;
+ if (!js_CompareAndSwap(&tl->owner, me, 0)) {
+ /* Assert that scope locks never revert to flyweight. */
+ JS_ASSERT(scope->ownercx != cx);
+ LOGIT(scope, '1');
+ scope->u.count = 1;
+ js_UnlockObj(cx, obj);
+ }
+ return;
+ }
+ if (!js_CompareAndSwap(&tl->owner, me, 0))
+ js_Dequeue(tl);
+ }
+ else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) {
+ obj->slots[slot] = v;
+ return;
+ }
+#endif
+
+ js_LockObj(cx, obj);
+ obj->slots[slot] = v;
+
+ /*
+ * Same drill as above, in js_GetSlotThreadSafe. Note that we cannot
+ * assume obj has its own mutable scope (where scope->object == obj) yet,
+ * because OBJ_SET_SLOT is called for the "universal", common slots such
+ * as JSSLOT_PROTO and JSSLOT_PARENT, without a prior js_GetMutableScope.
+ * See also the JSPROP_SHARED attribute and its usage.
+ */
+ scope = OBJ_SCOPE(obj);
+ if (scope->ownercx != cx)
+ js_UnlockScope(cx, scope);
+}
+
+#ifndef NSPR_LOCK
+
+static JSFatLock *
+NewFatlock()
+{
+ JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */
+ if (!fl) return NULL;
+ fl->susp = 0;
+ fl->next = NULL;
+ fl->prevp = NULL;
+ fl->slock = PR_NewLock();
+ fl->svar = PR_NewCondVar(fl->slock);
+ return fl;
+}
+
+static void
+DestroyFatlock(JSFatLock *fl)
+{
+ PR_DestroyLock(fl->slock);
+ PR_DestroyCondVar(fl->svar);
+ free(fl);
+}
+
+static JSFatLock *
+ListOfFatlocks(int listc)
+{
+ JSFatLock *m;
+ JSFatLock *m0;
+ int i;
+
+ JS_ASSERT(listc>0);
+ m0 = m = NewFatlock();
+ for (i=1; i<listc; i++) {
+ m->next = NewFatlock();
+ m = m->next;
+ }
+ return m0;
+}
+
+static void
+DeleteListOfFatlocks(JSFatLock *m)
+{
+ JSFatLock *m0;
+ for (; m; m=m0) {
+ m0 = m->next;
+ DestroyFatlock(m);
+ }
+}
+
+static JSFatLockTable *fl_list_table = NULL;
+static uint32 fl_list_table_len = 0;
+static uint32 fl_list_chunk_len = 0;
+
+static JSFatLock *
+GetFatlock(void *id)
+{
+ JSFatLock *m;
+
+ uint32 i = GLOBAL_LOCK_INDEX(id);
+ if (fl_list_table[i].free == NULL) {
+#ifdef DEBUG
+ if (fl_list_table[i].taken)
+ printf("Ran out of fat locks!\n");
+#endif
+ fl_list_table[i].free = ListOfFatlocks(fl_list_chunk_len);
+ }
+ m = fl_list_table[i].free;
+ fl_list_table[i].free = m->next;
+ m->susp = 0;
+ m->next = fl_list_table[i].taken;
+ m->prevp = &fl_list_table[i].taken;
+ if (fl_list_table[i].taken)
+ fl_list_table[i].taken->prevp = &m->next;
+ fl_list_table[i].taken = m;
+ return m;
+}
+
+static void
+PutFatlock(JSFatLock *m, void *id)
+{
+ uint32 i;
+ if (m == NULL)
+ return;
+
+ /* Unlink m from fl_list_table[i].taken. */
+ *m->prevp = m->next;
+ if (m->next)
+ m->next->prevp = m->prevp;
+
+ /* Insert m in fl_list_table[i].free. */
+ i = GLOBAL_LOCK_INDEX(id);
+ m->next = fl_list_table[i].free;
+ fl_list_table[i].free = m;
+}
+
+#endif /* !NSPR_LOCK */
+
+JSBool
+js_SetupLocks(int listc, int globc)
+{
+#ifndef NSPR_LOCK
+ uint32 i;
+
+ if (global_locks)
+ return JS_TRUE;
+#ifdef DEBUG
+ if (listc > 10000 || listc < 0) /* listc == fat lock list chunk length */
+ printf("Bad number %d in js_SetupLocks()!\n", listc);
+ if (globc > 100 || globc < 0) /* globc == number of global locks */
+ printf("Bad number %d in js_SetupLocks()!\n", listc);
+#endif
+ global_locks_log2 = JS_CeilingLog2(globc);
+ global_locks_mask = JS_BITMASK(global_locks_log2);
+ global_lock_count = JS_BIT(global_locks_log2);
+ global_locks = (PRLock **) malloc(global_lock_count * sizeof(PRLock*));
+ if (!global_locks)
+ return JS_FALSE;
+ for (i = 0; i < global_lock_count; i++) {
+ global_locks[i] = PR_NewLock();
+ if (!global_locks[i]) {
+ global_lock_count = i;
+ js_CleanupLocks();
+ return JS_FALSE;
+ }
+ }
+ fl_list_table = (JSFatLockTable *) malloc(i * sizeof(JSFatLockTable));
+ if (!fl_list_table) {
+ js_CleanupLocks();
+ return JS_FALSE;
+ }
+ fl_list_table_len = global_lock_count;
+ for (i = 0; i < global_lock_count; i++)
+ fl_list_table[i].free = fl_list_table[i].taken = NULL;
+ fl_list_chunk_len = listc;
+#endif /* !NSPR_LOCK */
+ return JS_TRUE;
+}
+
+void
+js_CleanupLocks()
+{
+#ifndef NSPR_LOCK
+ uint32 i;
+
+ if (global_locks) {
+ for (i = 0; i < global_lock_count; i++)
+ PR_DestroyLock(global_locks[i]);
+ free(global_locks);
+ global_locks = NULL;
+ global_lock_count = 1;
+ global_locks_log2 = 0;
+ global_locks_mask = 0;
+ }
+ if (fl_list_table) {
+ for (i = 0; i < fl_list_table_len; i++) {
+ DeleteListOfFatlocks(fl_list_table[i].free);
+ fl_list_table[i].free = NULL;
+ DeleteListOfFatlocks(fl_list_table[i].taken);
+ fl_list_table[i].taken = NULL;
+ }
+ free(fl_list_table);
+ fl_list_table = NULL;
+ fl_list_table_len = 0;
+ }
+#endif /* !NSPR_LOCK */
+}
+
+#ifndef NSPR_LOCK
+
+/*
+ * Fast locking and unlocking is implemented by delaying the allocation of a
+ * system lock (fat lock) until contention. As long as a locking thread A
+ * runs uncontended, the lock is represented solely by storing A's identity in
+ * the object being locked.
+ *
+ * If another thread B tries to lock the object currently locked by A, B is
+ * enqueued into a fat lock structure (which might have to be allocated and
+ * pointed to by the object), and suspended using NSPR conditional variables
+ * (wait). A wait bit (Bacon bit) is set in the lock word of the object,
+ * signalling to A that when releasing the lock, B must be dequeued and
+ * notified.
+ *
+ * The basic operation of the locking primitives (js_Lock, js_Unlock,
+ * js_Enqueue, and js_Dequeue) is compare-and-swap. Hence, when locking into
+ * the word pointed at by p, compare-and-swap(p, 0, A) success implies that p
+ * is unlocked. Similarly, when unlocking p, if compare-and-swap(p, A, 0)
+ * succeeds this implies that p is uncontended (no one is waiting because the
+ * wait bit is not set).
+ *
+ * When dequeueing, the lock is released, and one of the threads suspended on
+ * the lock is notified. If other threads still are waiting, the wait bit is
+ * kept (in js_Enqueue), and if not, the fat lock is deallocated.
+ *
+ * The functions js_Enqueue, js_Dequeue, js_SuspendThread, and js_ResumeThread
+ * are serialized using a global lock. For scalability, a hashtable of global
+ * locks is used, which is indexed modulo the thin lock pointer.
+ */
+
+/*
+ * Invariants:
+ * (i) global lock is held
+ * (ii) fl->susp >= 0
+ */
+static int
+js_SuspendThread(JSThinLock *tl)
+{
+ JSFatLock *fl;
+ PRStatus stat;
+
+ if (tl->fat == NULL)
+ fl = tl->fat = GetFatlock(tl);
+ else
+ fl = tl->fat;
+ JS_ASSERT(fl->susp >= 0);
+ fl->susp++;
+ PR_Lock(fl->slock);
+ js_UnlockGlobal(tl);
+ stat = PR_WaitCondVar(fl->svar, PR_INTERVAL_NO_TIMEOUT);
+ JS_ASSERT(stat != PR_FAILURE);
+ PR_Unlock(fl->slock);
+ js_LockGlobal(tl);
+ fl->susp--;
+ if (fl->susp == 0) {
+ PutFatlock(fl, tl);
+ tl->fat = NULL;
+ }
+ return tl->fat == NULL;
+}
+
+/*
+ * (i) global lock is held
+ * (ii) fl->susp > 0
+ */
+static void
+js_ResumeThread(JSThinLock *tl)
+{
+ JSFatLock *fl = tl->fat;
+ PRStatus stat;
+
+ JS_ASSERT(fl != NULL);
+ JS_ASSERT(fl->susp > 0);
+ PR_Lock(fl->slock);
+ js_UnlockGlobal(tl);
+ stat = PR_NotifyCondVar(fl->svar);
+ JS_ASSERT(stat != PR_FAILURE);
+ PR_Unlock(fl->slock);
+}
+
+static void
+js_Enqueue(JSThinLock *tl, jsword me)
+{
+ jsword o, n;
+
+ js_LockGlobal(tl);
+ for (;;) {
+ o = ReadWord(tl->owner);
+ n = Thin_SetWait(o);
+ if (o != 0 && js_CompareAndSwap(&tl->owner, o, n)) {
+ if (js_SuspendThread(tl))
+ me = Thin_RemoveWait(me);
+ else
+ me = Thin_SetWait(me);
+ }
+ else if (js_CompareAndSwap(&tl->owner, 0, me)) {
+ js_UnlockGlobal(tl);
+ return;
+ }
+ }
+}
+
+static void
+js_Dequeue(JSThinLock *tl)
+{
+ jsword o;
+
+ js_LockGlobal(tl);
+ o = ReadWord(tl->owner);
+ JS_ASSERT(Thin_GetWait(o) != 0);
+ JS_ASSERT(tl->fat != NULL);
+ if (!js_CompareAndSwap(&tl->owner, o, 0)) /* release it */
+ JS_ASSERT(0);
+ js_ResumeThread(tl);
+}
+
+JS_INLINE void
+js_Lock(JSThinLock *tl, jsword me)
+{
+ JS_ASSERT(CURRENT_THREAD_IS_ME(me));
+ if (js_CompareAndSwap(&tl->owner, 0, me))
+ return;
+ if (Thin_RemoveWait(ReadWord(tl->owner)) != me)
+ js_Enqueue(tl, me);
+#ifdef DEBUG
+ else
+ JS_ASSERT(0);
+#endif
+}
+
+JS_INLINE void
+js_Unlock(JSThinLock *tl, jsword me)
+{
+ JS_ASSERT(CURRENT_THREAD_IS_ME(me));
+
+ /*
+ * Only me can hold the lock, no need to use compare and swap atomic
+ * operation for this common case.
+ */
+ if (tl->owner == me) {
+ tl->owner = 0;
+ return;
+ }
+ JS_ASSERT(Thin_GetWait(tl->owner));
+ if (Thin_RemoveWait(ReadWord(tl->owner)) == me)
+ js_Dequeue(tl);
+#ifdef DEBUG
+ else
+ JS_ASSERT(0); /* unbalanced unlock */
+#endif
+}
+
+#endif /* !NSPR_LOCK */
+
+void
+js_LockRuntime(JSRuntime *rt)
+{
+ PR_Lock(rt->rtLock);
+#ifdef DEBUG
+ rt->rtLockOwner = js_CurrentThreadId();
+#endif
+}
+
+void
+js_UnlockRuntime(JSRuntime *rt)
+{
+#ifdef DEBUG
+ rt->rtLockOwner = 0;
+#endif
+ PR_Unlock(rt->rtLock);
+}
+
+void
+js_LockScope(JSContext *cx, JSScope *scope)
+{
+ jsword me = CX_THINLOCK_ID(cx);
+
+ JS_ASSERT(CURRENT_THREAD_IS_ME(me));
+ JS_ASSERT(scope->ownercx != cx);
+ if (CX_THREAD_IS_RUNNING_GC(cx))
+ return;
+ if (scope->ownercx && ClaimScope(scope, cx))
+ return;
+
+ if (Thin_RemoveWait(ReadWord(scope->lock.owner)) == me) {
+ JS_ASSERT(scope->u.count > 0);
+ LOGIT(scope, '+');
+ scope->u.count++;
+ } else {
+ JSThinLock *tl = &scope->lock;
+ JS_LOCK0(tl, me);
+ JS_ASSERT(scope->u.count == 0);
+ LOGIT(scope, '1');
+ scope->u.count = 1;
+ }
+}
+
+void
+js_UnlockScope(JSContext *cx, JSScope *scope)
+{
+ jsword me = CX_THINLOCK_ID(cx);
+
+ /* We hope compilers use me instead of reloading cx->thread in the macro. */
+ if (CX_THREAD_IS_RUNNING_GC(cx))
+ return;
+ if (cx->lockedSealedScope == scope) {
+ cx->lockedSealedScope = NULL;
+ return;
+ }
+
+ /*
+ * If scope->ownercx is not null, it's likely that two contexts not using
+ * requests nested locks for scope. The first context, cx here, claimed
+ * scope; the second, scope->ownercx here, re-claimed it because the first
+ * was not in a request, or was on the same thread. We don't want to keep
+ * track of such nesting, because it penalizes the common non-nested case.
+ * Instead of asserting here and silently coping, we simply re-claim scope
+ * for cx and return.
+ *
+ * See http://bugzilla.mozilla.org/show_bug.cgi?id=229200 for a real world
+ * case where an asymmetric thread model (Mozilla's main thread is known
+ * to be the only thread that runs the GC) combined with multiple contexts
+ * per thread has led to such request-less nesting.
+ */
+ if (scope->ownercx) {
+ JS_ASSERT(scope->u.count == 0);
+ JS_ASSERT(scope->lock.owner == 0);
+ scope->ownercx = cx;
+ return;
+ }
+
+ JS_ASSERT(scope->u.count > 0);
+ if (Thin_RemoveWait(ReadWord(scope->lock.owner)) != me) {
+ JS_ASSERT(0); /* unbalanced unlock */
+ return;
+ }
+ LOGIT(scope, '-');
+ if (--scope->u.count == 0) {
+ JSThinLock *tl = &scope->lock;
+ JS_UNLOCK0(tl, me);
+ }
+}
+
+/*
+ * NB: oldscope may be null if our caller is js_GetMutableScope and it just
+ * dropped the last reference to oldscope.
+ */
+void
+js_TransferScopeLock(JSContext *cx, JSScope *oldscope, JSScope *newscope)
+{
+ jsword me;
+ JSThinLock *tl;
+
+ JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, newscope));
+
+ /*
+ * If the last reference to oldscope went away, newscope needs no lock
+ * state update.
+ */
+ if (!oldscope)
+ return;
+ JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, oldscope));
+
+ /*
+ * Special case in js_LockScope and js_UnlockScope for the GC calling
+ * code that locks, unlocks, or mutates. Nothing to do in these cases,
+ * because scope and newscope were "locked" by the GC thread, so neither
+ * was actually locked.
+ */
+ if (CX_THREAD_IS_RUNNING_GC(cx))
+ return;
+
+ /*
+ * Special case in js_LockObj and js_UnlockScope for locking the sealed
+ * scope of an object that owns that scope (the prototype or mutated obj
+ * for which OBJ_SCOPE(obj)->object == obj), and unlocking it.
+ */
+ JS_ASSERT(cx->lockedSealedScope != newscope);
+ if (cx->lockedSealedScope == oldscope) {
+ JS_ASSERT(newscope->ownercx == cx ||
+ (!newscope->ownercx && newscope->u.count == 1));
+ cx->lockedSealedScope = NULL;
+ return;
+ }
+
+ /*
+ * If oldscope is single-threaded, there's nothing to do.
+ */
+ if (oldscope->ownercx) {
+ JS_ASSERT(oldscope->ownercx == cx);
+ JS_ASSERT(newscope->ownercx == cx ||
+ (!newscope->ownercx && newscope->u.count == 1));
+ return;
+ }
+
+ /*
+ * We transfer oldscope->u.count only if newscope is not single-threaded.
+ * Flow unwinds from here through some number of JS_UNLOCK_SCOPE and/or
+ * JS_UNLOCK_OBJ macro calls, which will decrement newscope->u.count only
+ * if they find newscope->ownercx != cx.
+ */
+ if (newscope->ownercx != cx) {
+ JS_ASSERT(!newscope->ownercx);
+ newscope->u.count = oldscope->u.count;
+ }
+
+ /*
+ * Reset oldscope's lock state so that it is completely unlocked.
+ */
+ LOGIT(oldscope, '0');
+ oldscope->u.count = 0;
+ tl = &oldscope->lock;
+ me = CX_THINLOCK_ID(cx);
+ JS_UNLOCK0(tl, me);
+}
+
+void
+js_LockObj(JSContext *cx, JSObject *obj)
+{
+ JSScope *scope;
+
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+
+ /*
+ * We must test whether the GC is calling and return without mutating any
+ * state, especially cx->lockedSealedScope. Note asymmetry with respect to
+ * js_UnlockObj, which is a thin-layer on top of js_UnlockScope.
+ */
+ if (CX_THREAD_IS_RUNNING_GC(cx))
+ return;
+
+ for (;;) {
+ scope = OBJ_SCOPE(obj);
+ if (SCOPE_IS_SEALED(scope) && scope->object == obj &&
+ !cx->lockedSealedScope) {
+ cx->lockedSealedScope = scope;
+ return;
+ }
+
+ js_LockScope(cx, scope);
+
+ /* If obj still has this scope, we're done. */
+ if (scope == OBJ_SCOPE(obj))
+ return;
+
+ /* Lost a race with a mutator; retry with obj's new scope. */
+ js_UnlockScope(cx, scope);
+ }
+}
+
+void
+js_UnlockObj(JSContext *cx, JSObject *obj)
+{
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ js_UnlockScope(cx, OBJ_SCOPE(obj));
+}
+
+#ifdef DEBUG
+
+JSBool
+js_IsRuntimeLocked(JSRuntime *rt)
+{
+ return js_CurrentThreadId() == rt->rtLockOwner;
+}
+
+JSBool
+js_IsObjLocked(JSContext *cx, JSObject *obj)
+{
+ JSScope *scope = OBJ_SCOPE(obj);
+
+ return MAP_IS_NATIVE(&scope->map) && js_IsScopeLocked(cx, scope);
+}
+
+JSBool
+js_IsScopeLocked(JSContext *cx, JSScope *scope)
+{
+ /* Special case: the GC locking any object's scope, see js_LockScope. */
+ if (CX_THREAD_IS_RUNNING_GC(cx))
+ return JS_TRUE;
+
+ /* Special case: locked object owning a sealed scope, see js_LockObj. */
+ if (cx->lockedSealedScope == scope)
+ return JS_TRUE;
+
+ /*
+ * General case: the scope is either exclusively owned (by cx), or it has
+ * a thin or fat lock to cope with shared (concurrent) ownership.
+ */
+ if (scope->ownercx) {
+ JS_ASSERT(scope->ownercx == cx || scope->ownercx->thread == cx->thread);
+ return JS_TRUE;
+ }
+ return js_CurrentThreadId() ==
+ ((JSThread *)Thin_RemoveWait(ReadWord(scope->lock.owner)))->id;
+}
+
+#endif /* DEBUG */
+#endif /* JS_THREADSAFE */
diff --git a/src/third_party/js-1.7/jslock.h b/src/third_party/js-1.7/jslock.h
new file mode 100644
index 00000000000..f9ed03db553
--- /dev/null
+++ b/src/third_party/js-1.7/jslock.h
@@ -0,0 +1,266 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+#ifndef jslock_h__
+#define jslock_h__
+
+#ifdef JS_THREADSAFE
+
+#include "jstypes.h"
+#include "pratom.h"
+#include "prlock.h"
+#include "prcvar.h"
+#include "prthread.h"
+
+#include "jsprvtd.h" /* for JSScope, etc. */
+#include "jspubtd.h" /* for JSRuntime, etc. */
+
+#define Thin_GetWait(W) ((jsword)(W) & 0x1)
+#define Thin_SetWait(W) ((jsword)(W) | 0x1)
+#define Thin_RemoveWait(W) ((jsword)(W) & ~0x1)
+
+typedef struct JSFatLock JSFatLock;
+
+struct JSFatLock {
+ int susp;
+ PRLock *slock;
+ PRCondVar *svar;
+ JSFatLock *next;
+ JSFatLock **prevp;
+};
+
+typedef struct JSThinLock {
+ jsword owner;
+ JSFatLock *fat;
+} JSThinLock;
+
+#define CX_THINLOCK_ID(cx) ((jsword)(cx)->thread)
+#define CURRENT_THREAD_IS_ME(me) (((JSThread *)me)->id == js_CurrentThreadId())
+
+typedef PRLock JSLock;
+
+typedef struct JSFatLockTable {
+ JSFatLock *free;
+ JSFatLock *taken;
+} JSFatLockTable;
+
+/*
+ * Atomic increment and decrement for a reference counter, given jsrefcount *p.
+ * NB: jsrefcount is int32, aka PRInt32, so that pratom.h functions work.
+ */
+#define JS_ATOMIC_INCREMENT(p) PR_AtomicIncrement((PRInt32 *)(p))
+#define JS_ATOMIC_DECREMENT(p) PR_AtomicDecrement((PRInt32 *)(p))
+#define JS_ATOMIC_ADD(p,v) PR_AtomicAdd((PRInt32 *)(p), (PRInt32)(v))
+
+#define js_CurrentThreadId() (jsword)PR_GetCurrentThread()
+#define JS_NEW_LOCK() PR_NewLock()
+#define JS_DESTROY_LOCK(l) PR_DestroyLock(l)
+#define JS_ACQUIRE_LOCK(l) PR_Lock(l)
+#define JS_RELEASE_LOCK(l) PR_Unlock(l)
+#define JS_LOCK0(P,M) js_Lock(P,M)
+#define JS_UNLOCK0(P,M) js_Unlock(P,M)
+
+#define JS_NEW_CONDVAR(l) PR_NewCondVar(l)
+#define JS_DESTROY_CONDVAR(cv) PR_DestroyCondVar(cv)
+#define JS_WAIT_CONDVAR(cv,to) PR_WaitCondVar(cv,to)
+#define JS_NO_TIMEOUT PR_INTERVAL_NO_TIMEOUT
+#define JS_NOTIFY_CONDVAR(cv) PR_NotifyCondVar(cv)
+#define JS_NOTIFY_ALL_CONDVAR(cv) PR_NotifyAllCondVar(cv)
+
+/*
+ * Include jsscope.h so JS_LOCK_OBJ macro callers don't have to include it.
+ * Since there is a JSThinLock member in JSScope, we can't nest this include
+ * much earlier (see JSThinLock's typedef, above). Yes, that means there is
+ * an #include cycle between jslock.h and jsscope.h: moderate-sized XXX here,
+ * to be fixed by moving JS_LOCK_SCOPE to jsscope.h, JS_LOCK_OBJ to jsobj.h,
+ * and so on.
+ */
+#include "jsscope.h"
+
+#define JS_LOCK_RUNTIME(rt) js_LockRuntime(rt)
+#define JS_UNLOCK_RUNTIME(rt) js_UnlockRuntime(rt)
+
+/*
+ * NB: The JS_LOCK_OBJ and JS_UNLOCK_OBJ macros work *only* on native objects
+ * (objects for which OBJ_IS_NATIVE returns true). All uses of these macros in
+ * the engine are predicated on OBJ_IS_NATIVE or equivalent checks. These uses
+ * are for optimizations above the JSObjectOps layer, under which object locks
+ * normally hide.
+ */
+#define JS_LOCK_OBJ(cx,obj) ((OBJ_SCOPE(obj)->ownercx == (cx)) \
+ ? (void)0 \
+ : (js_LockObj(cx, obj)))
+#define JS_UNLOCK_OBJ(cx,obj) ((OBJ_SCOPE(obj)->ownercx == (cx)) \
+ ? (void)0 : js_UnlockObj(cx, obj))
+
+#define JS_LOCK_SCOPE(cx,scope) ((scope)->ownercx == (cx) ? (void)0 \
+ : js_LockScope(cx, scope))
+#define JS_UNLOCK_SCOPE(cx,scope) ((scope)->ownercx == (cx) ? (void)0 \
+ : js_UnlockScope(cx, scope))
+#define JS_TRANSFER_SCOPE_LOCK(cx, scope, newscope) \
+ js_TransferScopeLock(cx, scope, newscope)
+
+extern void js_LockRuntime(JSRuntime *rt);
+extern void js_UnlockRuntime(JSRuntime *rt);
+extern void js_LockObj(JSContext *cx, JSObject *obj);
+extern void js_UnlockObj(JSContext *cx, JSObject *obj);
+extern void js_LockScope(JSContext *cx, JSScope *scope);
+extern void js_UnlockScope(JSContext *cx, JSScope *scope);
+extern int js_SetupLocks(int,int);
+extern void js_CleanupLocks();
+extern void js_TransferScopeLock(JSContext *, JSScope *, JSScope *);
+extern JS_FRIEND_API(jsval)
+js_GetSlotThreadSafe(JSContext *, JSObject *, uint32);
+extern void js_SetSlotThreadSafe(JSContext *, JSObject *, uint32, jsval);
+extern void js_InitLock(JSThinLock *);
+extern void js_FinishLock(JSThinLock *);
+extern void js_FinishSharingScope(JSRuntime *rt, JSScope *scope);
+
+#ifdef DEBUG
+
+#define JS_IS_RUNTIME_LOCKED(rt) js_IsRuntimeLocked(rt)
+#define JS_IS_OBJ_LOCKED(cx,obj) js_IsObjLocked(cx,obj)
+#define JS_IS_SCOPE_LOCKED(cx,scope) js_IsScopeLocked(cx,scope)
+
+extern JSBool js_IsRuntimeLocked(JSRuntime *rt);
+extern JSBool js_IsObjLocked(JSContext *cx, JSObject *obj);
+extern JSBool js_IsScopeLocked(JSContext *cx, JSScope *scope);
+
+#else
+
+#define JS_IS_RUNTIME_LOCKED(rt) 0
+#define JS_IS_OBJ_LOCKED(cx,obj) 1
+#define JS_IS_SCOPE_LOCKED(cx,scope) 1
+
+#endif /* DEBUG */
+
+#define JS_LOCK_OBJ_VOID(cx, obj, e) \
+ JS_BEGIN_MACRO \
+ JS_LOCK_OBJ(cx, obj); \
+ e; \
+ JS_UNLOCK_OBJ(cx, obj); \
+ JS_END_MACRO
+
+#define JS_LOCK_VOID(cx, e) \
+ JS_BEGIN_MACRO \
+ JSRuntime *_rt = (cx)->runtime; \
+ JS_LOCK_RUNTIME_VOID(_rt, e); \
+ JS_END_MACRO
+
+/* FIXME: bug 353962 hackaround */
+#define JS_USE_ONLY_NSPR_LOCKS 1
+
+#if defined(JS_USE_ONLY_NSPR_LOCKS) || \
+ !( (defined(_WIN32) && defined(_M_IX86)) || \
+ (defined(__GNUC__) && defined(__i386__)) || \
+ ((defined(__USLC__) || defined(_SCO_DS)) && defined(i386)) || \
+ (defined(SOLARIS) && defined(sparc) && defined(ULTRA_SPARC)) || \
+ defined(AIX) )
+
+#define NSPR_LOCK 1
+
+#undef JS_LOCK0
+#undef JS_UNLOCK0
+#define JS_LOCK0(P,M) (JS_ACQUIRE_LOCK(((JSLock*)(P)->fat)), (P)->owner = (M))
+#define JS_UNLOCK0(P,M) ((P)->owner = 0, JS_RELEASE_LOCK(((JSLock*)(P)->fat)))
+
+#else /* arch-tests */
+
+#undef NSPR_LOCK
+
+extern JS_INLINE void js_Lock(JSThinLock *tl, jsword me);
+extern JS_INLINE void js_Unlock(JSThinLock *tl, jsword me);
+
+#endif /* arch-tests */
+
+#else /* !JS_THREADSAFE */
+
+#define JS_ATOMIC_INCREMENT(p) (++*(p))
+#define JS_ATOMIC_DECREMENT(p) (--*(p))
+#define JS_ATOMIC_ADD(p,v) (*(p) += (v))
+
+#define JS_CurrentThreadId() 0
+#define JS_NEW_LOCK() NULL
+#define JS_DESTROY_LOCK(l) ((void)0)
+#define JS_ACQUIRE_LOCK(l) ((void)0)
+#define JS_RELEASE_LOCK(l) ((void)0)
+#define JS_LOCK0(P,M) ((void)0)
+#define JS_UNLOCK0(P,M) ((void)0)
+
+#define JS_NEW_CONDVAR(l) NULL
+#define JS_DESTROY_CONDVAR(cv) ((void)0)
+#define JS_WAIT_CONDVAR(cv,to) ((void)0)
+#define JS_NOTIFY_CONDVAR(cv) ((void)0)
+#define JS_NOTIFY_ALL_CONDVAR(cv) ((void)0)
+
+#define JS_LOCK_RUNTIME(rt) ((void)0)
+#define JS_UNLOCK_RUNTIME(rt) ((void)0)
+#define JS_LOCK_OBJ(cx,obj) ((void)0)
+#define JS_UNLOCK_OBJ(cx,obj) ((void)0)
+#define JS_LOCK_OBJ_VOID(cx,obj,e) (e)
+#define JS_LOCK_SCOPE(cx,scope) ((void)0)
+#define JS_UNLOCK_SCOPE(cx,scope) ((void)0)
+#define JS_TRANSFER_SCOPE_LOCK(c,o,n) ((void)0)
+
+#define JS_IS_RUNTIME_LOCKED(rt) 1
+#define JS_IS_OBJ_LOCKED(cx,obj) 1
+#define JS_IS_SCOPE_LOCKED(cx,scope) 1
+#define JS_LOCK_VOID(cx, e) JS_LOCK_RUNTIME_VOID((cx)->runtime, e)
+
+#endif /* !JS_THREADSAFE */
+
+#define JS_LOCK_RUNTIME_VOID(rt,e) \
+ JS_BEGIN_MACRO \
+ JS_LOCK_RUNTIME(rt); \
+ e; \
+ JS_UNLOCK_RUNTIME(rt); \
+ JS_END_MACRO
+
+#define JS_LOCK_GC(rt) JS_ACQUIRE_LOCK((rt)->gcLock)
+#define JS_UNLOCK_GC(rt) JS_RELEASE_LOCK((rt)->gcLock)
+#define JS_LOCK_GC_VOID(rt,e) (JS_LOCK_GC(rt), (e), JS_UNLOCK_GC(rt))
+#define JS_AWAIT_GC_DONE(rt) JS_WAIT_CONDVAR((rt)->gcDone, JS_NO_TIMEOUT)
+#define JS_NOTIFY_GC_DONE(rt) JS_NOTIFY_ALL_CONDVAR((rt)->gcDone)
+#define JS_AWAIT_REQUEST_DONE(rt) JS_WAIT_CONDVAR((rt)->requestDone, \
+ JS_NO_TIMEOUT)
+#define JS_NOTIFY_REQUEST_DONE(rt) JS_NOTIFY_CONDVAR((rt)->requestDone)
+
+#define JS_LOCK(P,CX) JS_LOCK0(P, CX_THINLOCK_ID(CX))
+#define JS_UNLOCK(P,CX) JS_UNLOCK0(P, CX_THINLOCK_ID(CX))
+
+#endif /* jslock_h___ */
diff --git a/src/third_party/js-1.7/jslocko.asm b/src/third_party/js-1.7/jslocko.asm
new file mode 100644
index 00000000000..95353ba1a09
--- /dev/null
+++ b/src/third_party/js-1.7/jslocko.asm
@@ -0,0 +1,60 @@
+; -*- Mode: asm; tab-width: 8; c-basic-offset: 4 -*-
+
+; ***** BEGIN LICENSE BLOCK *****
+; Version: MPL 1.1/GPL 2.0/LGPL 2.1
+;
+; The contents of this file are subject to the Mozilla Public License Version
+; 1.1 (the "License"); you may not use this file except in compliance with
+; the License. You may obtain a copy of the License at
+; http://www.mozilla.org/MPL/
+;
+; Software distributed under the License is distributed on an "AS IS" basis,
+; WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+; for the specific language governing rights and limitations under the
+; License.
+;
+; The Original Code is an OS/2 implementation of js_CompareAndSwap in assembly.
+;
+; The Initial Developer of the Original Code is
+; IBM Corporation.
+; Portions created by the Initial Developer are Copyright (C) 2001
+; the Initial Developer. All Rights Reserved.
+;
+; Contributor(s):
+;
+; Alternatively, the contents of this file may be used under the terms of
+; either the GNU General Public License Version 2 or later (the "GPL"), or
+; the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+; in which case the provisions of the GPL or the LGPL are applicable instead
+; of those above. If you wish to allow use of your version of this file only
+; under the terms of either the GPL or the LGPL, and not to allow others to
+; use your version of this file under the terms of the MPL, indicate your
+; decision by deleting the provisions above and replace them with the notice
+; and other provisions required by the GPL or the LGPL. If you do not delete
+; the provisions above, a recipient may use your version of this file under
+; the terms of any one of the MPL, the GPL or the LGPL.
+;
+; ***** END LICENSE BLOCK *****
+
+ .486P
+ .MODEL FLAT, OPTLINK
+ .STACK
+
+ .CODE
+
+;;;---------------------------------------------------------------------
+;;; int _Optlink js_CompareAndSwap(jsword *w, jsword ov, jsword nv)
+;;;---------------------------------------------------------------------
+js_CompareAndSwap PROC OPTLINK EXPORT
+ push ebx
+ mov ebx, eax
+ mov eax, edx
+ mov edx, ebx
+ lock cmpxchg [ebx], ecx
+ sete al
+ and eax, 1h
+ pop ebx
+ ret
+js_CompareAndSwap endp
+
+ END
diff --git a/src/third_party/js-1.7/jslog2.c b/src/third_party/js-1.7/jslog2.c
new file mode 100644
index 00000000000..876e5285d26
--- /dev/null
+++ b/src/third_party/js-1.7/jslog2.c
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "jsstddef.h"
+#include "jsbit.h"
+#include "jsutil.h"
+
+/*
+** Compute the log of the least power of 2 greater than or equal to n
+*/
+JS_PUBLIC_API(JSIntn) JS_CeilingLog2(JSUint32 n)
+{
+ JSIntn log2;
+
+ JS_CEILING_LOG2(log2, n);
+ return log2;
+}
+
+/*
+** Compute the log of the greatest power of 2 less than or equal to n.
+** This really just finds the highest set bit in the word.
+*/
+JS_PUBLIC_API(JSIntn) JS_FloorLog2(JSUint32 n)
+{
+ JSIntn log2;
+
+ JS_FLOOR_LOG2(log2, n);
+ return log2;
+}
+
+/*
+ * js_FloorLog2wImpl has to be defined only for 64-bit non-GCC case.
+ */
+#if !defined(JS_HAS_GCC_BUILTIN_CLZ) && JS_BYTES_PER_WORD == 8
+
+JSUword
+js_FloorLog2wImpl(JSUword n)
+{
+ JSUword log2, m;
+
+ JS_ASSERT(n != 0);
+
+ log2 = 0;
+ m = n >> 32;
+ if (m != 0) { n = m; log2 = 32; }
+ m = n >> 16;
+ if (m != 0) { n = m; log2 |= 16; }
+ m = n >> 8;
+ if (m != 0) { n = m; log2 |= 8; }
+ m = n >> 4;
+ if (m != 0) { n = m; log2 |= 4; }
+ m = n >> 2;
+ if (m != 0) { n = m; log2 |= 2; }
+ log2 |= (n >> 1);
+
+ return log2;
+}
+
+#endif
diff --git a/src/third_party/js-1.7/jslong.c b/src/third_party/js-1.7/jslong.c
new file mode 100644
index 00000000000..9a4a5b4d789
--- /dev/null
+++ b/src/third_party/js-1.7/jslong.c
@@ -0,0 +1,281 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "jsstddef.h"
+#include "jstypes.h"
+#include "jslong.h"
+
+static JSInt64 ll_zero = JSLL_INIT( 0x00000000,0x00000000 );
+static JSInt64 ll_maxint = JSLL_INIT( 0x7fffffff, 0xffffffff );
+static JSInt64 ll_minint = JSLL_INIT( 0x80000000, 0x00000000 );
+
+#ifdef HAVE_WATCOM_BUG_2
+JSInt64 __pascal __loadds __export
+ JSLL_Zero(void) { return ll_zero; }
+JSInt64 __pascal __loadds __export
+ JSLL_MaxInt(void) { return ll_maxint; }
+JSInt64 __pascal __loadds __export
+ JSLL_MinInt(void) { return ll_minint; }
+#else
+JS_PUBLIC_API(JSInt64) JSLL_Zero(void) { return ll_zero; }
+JS_PUBLIC_API(JSInt64) JSLL_MaxInt(void) { return ll_maxint; }
+JS_PUBLIC_API(JSInt64) JSLL_MinInt(void) { return ll_minint; }
+#endif
+
+#ifndef JS_HAVE_LONG_LONG
+/*
+** Divide 64-bit a by 32-bit b, which must be normalized so its high bit is 1.
+*/
+static void norm_udivmod32(JSUint32 *qp, JSUint32 *rp, JSUint64 a, JSUint32 b)
+{
+ JSUint32 d1, d0, q1, q0;
+ JSUint32 r1, r0, m;
+
+ d1 = jshi16(b);
+ d0 = jslo16(b);
+ r1 = a.hi % d1;
+ q1 = a.hi / d1;
+ m = q1 * d0;
+ r1 = (r1 << 16) | jshi16(a.lo);
+ if (r1 < m) {
+ q1--, r1 += b;
+ if (r1 >= b /* i.e., we didn't get a carry when adding to r1 */
+ && r1 < m) {
+ q1--, r1 += b;
+ }
+ }
+ r1 -= m;
+ r0 = r1 % d1;
+ q0 = r1 / d1;
+ m = q0 * d0;
+ r0 = (r0 << 16) | jslo16(a.lo);
+ if (r0 < m) {
+ q0--, r0 += b;
+ if (r0 >= b
+ && r0 < m) {
+ q0--, r0 += b;
+ }
+ }
+ *qp = (q1 << 16) | q0;
+ *rp = r0 - m;
+}
+
+static JSUint32 CountLeadingZeros(JSUint32 a)
+{
+ JSUint32 t;
+ JSUint32 r = 32;
+
+ if ((t = a >> 16) != 0)
+ r -= 16, a = t;
+ if ((t = a >> 8) != 0)
+ r -= 8, a = t;
+ if ((t = a >> 4) != 0)
+ r -= 4, a = t;
+ if ((t = a >> 2) != 0)
+ r -= 2, a = t;
+ if ((t = a >> 1) != 0)
+ r -= 1, a = t;
+ if (a & 1)
+ r--;
+ return r;
+}
+
+JS_PUBLIC_API(void) jsll_udivmod(JSUint64 *qp, JSUint64 *rp, JSUint64 a, JSUint64 b)
+{
+ JSUint32 n0, n1, n2;
+ JSUint32 q0, q1;
+ JSUint32 rsh, lsh;
+
+ n0 = a.lo;
+ n1 = a.hi;
+
+ if (b.hi == 0) {
+ if (b.lo > n1) {
+ /* (0 q0) = (n1 n0) / (0 D0) */
+
+ lsh = CountLeadingZeros(b.lo);
+
+ if (lsh) {
+ /*
+ * Normalize, i.e. make the most significant bit of the
+ * denominator be set.
+ */
+ b.lo = b.lo << lsh;
+ n1 = (n1 << lsh) | (n0 >> (32 - lsh));
+ n0 = n0 << lsh;
+ }
+
+ a.lo = n0, a.hi = n1;
+ norm_udivmod32(&q0, &n0, a, b.lo);
+ q1 = 0;
+
+ /* remainder is in n0 >> lsh */
+ } else {
+ /* (q1 q0) = (n1 n0) / (0 d0) */
+
+ if (b.lo == 0) /* user wants to divide by zero! */
+ b.lo = 1 / b.lo; /* so go ahead and crash */
+
+ lsh = CountLeadingZeros(b.lo);
+
+ if (lsh == 0) {
+ /*
+ * From (n1 >= b.lo)
+ * && (the most significant bit of b.lo is set),
+ * conclude that
+ * (the most significant bit of n1 is set)
+ * && (the leading quotient digit q1 = 1).
+ *
+ * This special case is necessary, not an optimization
+ * (Shifts counts of 32 are undefined).
+ */
+ n1 -= b.lo;
+ q1 = 1;
+ } else {
+ /*
+ * Normalize.
+ */
+ rsh = 32 - lsh;
+
+ b.lo = b.lo << lsh;
+ n2 = n1 >> rsh;
+ n1 = (n1 << lsh) | (n0 >> rsh);
+ n0 = n0 << lsh;
+
+ a.lo = n1, a.hi = n2;
+ norm_udivmod32(&q1, &n1, a, b.lo);
+ }
+
+ /* n1 != b.lo... */
+
+ a.lo = n0, a.hi = n1;
+ norm_udivmod32(&q0, &n0, a, b.lo);
+
+ /* remainder in n0 >> lsh */
+ }
+
+ if (rp) {
+ rp->lo = n0 >> lsh;
+ rp->hi = 0;
+ }
+ } else {
+ if (b.hi > n1) {
+ /* (0 0) = (n1 n0) / (D1 d0) */
+
+ q0 = 0;
+ q1 = 0;
+
+ /* remainder in (n1 n0) */
+ if (rp) {
+ rp->lo = n0;
+ rp->hi = n1;
+ }
+ } else {
+ /* (0 q0) = (n1 n0) / (d1 d0) */
+
+ lsh = CountLeadingZeros(b.hi);
+ if (lsh == 0) {
+ /*
+ * From (n1 >= b.hi)
+ * && (the most significant bit of b.hi is set),
+ * conclude that
+ * (the most significant bit of n1 is set)
+ * && (the quotient digit q0 = 0 or 1).
+ *
+ * This special case is necessary, not an optimization.
+ */
+
+ /*
+ * The condition on the next line takes advantage of that
+ * n1 >= b.hi (true due to control flow).
+ */
+ if (n1 > b.hi || n0 >= b.lo) {
+ q0 = 1;
+ a.lo = n0, a.hi = n1;
+ JSLL_SUB(a, a, b);
+ } else {
+ q0 = 0;
+ }
+ q1 = 0;
+
+ if (rp) {
+ rp->lo = n0;
+ rp->hi = n1;
+ }
+ } else {
+ JSInt64 m;
+
+ /*
+ * Normalize.
+ */
+ rsh = 32 - lsh;
+
+ b.hi = (b.hi << lsh) | (b.lo >> rsh);
+ b.lo = b.lo << lsh;
+ n2 = n1 >> rsh;
+ n1 = (n1 << lsh) | (n0 >> rsh);
+ n0 = n0 << lsh;
+
+ a.lo = n1, a.hi = n2;
+ norm_udivmod32(&q0, &n1, a, b.hi);
+ JSLL_MUL32(m, q0, b.lo);
+
+ if ((m.hi > n1) || ((m.hi == n1) && (m.lo > n0))) {
+ q0--;
+ JSLL_SUB(m, m, b);
+ }
+
+ q1 = 0;
+
+ /* Remainder is ((n1 n0) - (m1 m0)) >> lsh */
+ if (rp) {
+ a.lo = n0, a.hi = n1;
+ JSLL_SUB(a, a, m);
+ rp->lo = (a.hi << rsh) | (a.lo >> lsh);
+ rp->hi = a.hi >> lsh;
+ }
+ }
+ }
+ }
+
+ if (qp) {
+ qp->lo = q0;
+ qp->hi = q1;
+ }
+}
+#endif /* !JS_HAVE_LONG_LONG */
diff --git a/src/third_party/js-1.7/jslong.h b/src/third_party/js-1.7/jslong.h
new file mode 100644
index 00000000000..059cf00bb22
--- /dev/null
+++ b/src/third_party/js-1.7/jslong.h
@@ -0,0 +1,437 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+** File: jslong.h
+** Description: Portable access to 64 bit numerics
+**
+** Long-long (64-bit signed integer type) support. Some C compilers
+** don't support 64 bit integers yet, so we use these macros to
+** support both machines that do and don't.
+**/
+#ifndef jslong_h___
+#define jslong_h___
+
+#include "jstypes.h"
+
+JS_BEGIN_EXTERN_C
+
+/***********************************************************************
+** DEFINES: JSLL_MaxInt
+** JSLL_MinInt
+** JSLL_Zero
+** DESCRIPTION:
+** Various interesting constants and static variable
+** initializer
+***********************************************************************/
+#ifdef HAVE_WATCOM_BUG_2
+JSInt64 __pascal __loadds __export
+ JSLL_MaxInt(void);
+JSInt64 __pascal __loadds __export
+ JSLL_MinInt(void);
+JSInt64 __pascal __loadds __export
+ JSLL_Zero(void);
+#else
+extern JS_PUBLIC_API(JSInt64) JSLL_MaxInt(void);
+extern JS_PUBLIC_API(JSInt64) JSLL_MinInt(void);
+extern JS_PUBLIC_API(JSInt64) JSLL_Zero(void);
+#endif
+
+#define JSLL_MAXINT JSLL_MaxInt()
+#define JSLL_MININT JSLL_MinInt()
+#define JSLL_ZERO JSLL_Zero()
+
+#ifdef JS_HAVE_LONG_LONG
+
+#if JS_BYTES_PER_LONG == 8
+#define JSLL_INIT(hi, lo) ((hi ## L << 32) + lo ## L)
+#elif (defined(WIN32) || defined(WIN16)) && !defined(__GNUC__)
+#define JSLL_INIT(hi, lo) ((hi ## i64 << 32) + lo ## i64)
+#else
+#define JSLL_INIT(hi, lo) ((hi ## LL << 32) + lo ## LL)
+#endif
+
+/***********************************************************************
+** MACROS: JSLL_*
+** DESCRIPTION:
+** The following macros define portable access to the 64 bit
+** math facilities.
+**
+***********************************************************************/
+
+/***********************************************************************
+** MACROS: JSLL_<relational operators>
+**
+** JSLL_IS_ZERO Test for zero
+** JSLL_EQ Test for equality
+** JSLL_NE Test for inequality
+** JSLL_GE_ZERO Test for zero or positive
+** JSLL_CMP Compare two values
+***********************************************************************/
+#define JSLL_IS_ZERO(a) ((a) == 0)
+#define JSLL_EQ(a, b) ((a) == (b))
+#define JSLL_NE(a, b) ((a) != (b))
+#define JSLL_GE_ZERO(a) ((a) >= 0)
+#define JSLL_CMP(a, op, b) ((JSInt64)(a) op (JSInt64)(b))
+#define JSLL_UCMP(a, op, b) ((JSUint64)(a) op (JSUint64)(b))
+
+/***********************************************************************
+** MACROS: JSLL_<logical operators>
+**
+** JSLL_AND Logical and
+** JSLL_OR Logical or
+** JSLL_XOR Logical exclusion
+** JSLL_OR2 A disgusting deviation
+** JSLL_NOT Negation (one's compliment)
+***********************************************************************/
+#define JSLL_AND(r, a, b) ((r) = (a) & (b))
+#define JSLL_OR(r, a, b) ((r) = (a) | (b))
+#define JSLL_XOR(r, a, b) ((r) = (a) ^ (b))
+#define JSLL_OR2(r, a) ((r) = (r) | (a))
+#define JSLL_NOT(r, a) ((r) = ~(a))
+
+/***********************************************************************
+** MACROS: JSLL_<mathematical operators>
+**
+** JSLL_NEG Negation (two's compliment)
+** JSLL_ADD Summation (two's compliment)
+** JSLL_SUB Difference (two's compliment)
+***********************************************************************/
+#define JSLL_NEG(r, a) ((r) = -(a))
+#define JSLL_ADD(r, a, b) ((r) = (a) + (b))
+#define JSLL_SUB(r, a, b) ((r) = (a) - (b))
+
+/***********************************************************************
+** MACROS: JSLL_<mathematical operators>
+**
+** JSLL_MUL Product (two's compliment)
+** JSLL_DIV Quotient (two's compliment)
+** JSLL_MOD Modulus (two's compliment)
+***********************************************************************/
+#define JSLL_MUL(r, a, b) ((r) = (a) * (b))
+#define JSLL_DIV(r, a, b) ((r) = (a) / (b))
+#define JSLL_MOD(r, a, b) ((r) = (a) % (b))
+
+/***********************************************************************
+** MACROS: JSLL_<shifting operators>
+**
+** JSLL_SHL Shift left [0..64] bits
+** JSLL_SHR Shift right [0..64] bits with sign extension
+** JSLL_USHR Unsigned shift right [0..64] bits
+** JSLL_ISHL Signed shift left [0..64] bits
+***********************************************************************/
+#define JSLL_SHL(r, a, b) ((r) = (JSInt64)(a) << (b))
+#define JSLL_SHR(r, a, b) ((r) = (JSInt64)(a) >> (b))
+#define JSLL_USHR(r, a, b) ((r) = (JSUint64)(a) >> (b))
+#define JSLL_ISHL(r, a, b) ((r) = (JSInt64)(a) << (b))
+
+/***********************************************************************
+** MACROS: JSLL_<conversion operators>
+**
+** JSLL_L2I Convert to signed 32 bit
+** JSLL_L2UI Convert to unsigned 32 bit
+** JSLL_L2F Convert to floating point
+** JSLL_L2D Convert to floating point
+** JSLL_I2L Convert signed to 64 bit
+** JSLL_UI2L Convert unsigned to 64 bit
+** JSLL_F2L Convert float to 64 bit
+** JSLL_D2L Convert float to 64 bit
+***********************************************************************/
+#define JSLL_L2I(i, l) ((i) = (JSInt32)(l))
+#define JSLL_L2UI(ui, l) ((ui) = (JSUint32)(l))
+#define JSLL_L2F(f, l) ((f) = (JSFloat64)(l))
+#define JSLL_L2D(d, l) ((d) = (JSFloat64)(l))
+
+#define JSLL_I2L(l, i) ((l) = (JSInt64)(i))
+#define JSLL_UI2L(l, ui) ((l) = (JSInt64)(ui))
+#define JSLL_F2L(l, f) ((l) = (JSInt64)(f))
+#define JSLL_D2L(l, d) ((l) = (JSInt64)(d))
+
+/***********************************************************************
+** MACROS: JSLL_UDIVMOD
+** DESCRIPTION:
+** Produce both a quotient and a remainder given an unsigned
+** INPUTS: JSUint64 a: The dividend of the operation
+** JSUint64 b: The quotient of the operation
+** OUTPUTS: JSUint64 *qp: pointer to quotient
+** JSUint64 *rp: pointer to remainder
+***********************************************************************/
+#define JSLL_UDIVMOD(qp, rp, a, b) \
+ (*(qp) = ((JSUint64)(a) / (b)), \
+ *(rp) = ((JSUint64)(a) % (b)))
+
+#else /* !JS_HAVE_LONG_LONG */
+
+#ifdef IS_LITTLE_ENDIAN
+#define JSLL_INIT(hi, lo) {JS_INT32(lo), JS_INT32(hi)}
+#else
+#define JSLL_INIT(hi, lo) {JS_INT32(hi), JS_INT32(lo)}
+#endif
+
+#define JSLL_IS_ZERO(a) (((a).hi == 0) && ((a).lo == 0))
+#define JSLL_EQ(a, b) (((a).hi == (b).hi) && ((a).lo == (b).lo))
+#define JSLL_NE(a, b) (((a).hi != (b).hi) || ((a).lo != (b).lo))
+#define JSLL_GE_ZERO(a) (((a).hi >> 31) == 0)
+
+#ifdef DEBUG
+#define JSLL_CMP(a, op, b) (JS_ASSERT((#op)[1] != '='), JSLL_REAL_CMP(a, op, b))
+#define JSLL_UCMP(a, op, b) (JS_ASSERT((#op)[1] != '='), JSLL_REAL_UCMP(a, op, b))
+#else
+#define JSLL_CMP(a, op, b) JSLL_REAL_CMP(a, op, b)
+#define JSLL_UCMP(a, op, b) JSLL_REAL_UCMP(a, op, b)
+#endif
+
+#define JSLL_REAL_CMP(a,op,b) (((JSInt32)(a).hi op (JSInt32)(b).hi) || \
+ (((a).hi == (b).hi) && ((a).lo op (b).lo)))
+#define JSLL_REAL_UCMP(a,op,b) (((a).hi op (b).hi) || \
+ (((a).hi == (b).hi) && ((a).lo op (b).lo)))
+
+#define JSLL_AND(r, a, b) ((r).lo = (a).lo & (b).lo, \
+ (r).hi = (a).hi & (b).hi)
+#define JSLL_OR(r, a, b) ((r).lo = (a).lo | (b).lo, \
+ (r).hi = (a).hi | (b).hi)
+#define JSLL_XOR(r, a, b) ((r).lo = (a).lo ^ (b).lo, \
+ (r).hi = (a).hi ^ (b).hi)
+#define JSLL_OR2(r, a) ((r).lo = (r).lo | (a).lo, \
+ (r).hi = (r).hi | (a).hi)
+#define JSLL_NOT(r, a) ((r).lo = ~(a).lo, \
+ (r).hi = ~(a).hi)
+
+#define JSLL_NEG(r, a) ((r).lo = -(JSInt32)(a).lo, \
+ (r).hi = -(JSInt32)(a).hi - ((r).lo != 0))
+#define JSLL_ADD(r, a, b) { \
+ JSInt64 _a, _b; \
+ _a = a; _b = b; \
+ (r).lo = _a.lo + _b.lo; \
+ (r).hi = _a.hi + _b.hi + ((r).lo < _b.lo); \
+}
+
+#define JSLL_SUB(r, a, b) { \
+ JSInt64 _a, _b; \
+ _a = a; _b = b; \
+ (r).lo = _a.lo - _b.lo; \
+ (r).hi = _a.hi - _b.hi - (_a.lo < _b.lo); \
+}
+
+#define JSLL_MUL(r, a, b) { \
+ JSInt64 _a, _b; \
+ _a = a; _b = b; \
+ JSLL_MUL32(r, _a.lo, _b.lo); \
+ (r).hi += _a.hi * _b.lo + _a.lo * _b.hi; \
+}
+
+#define jslo16(a) ((a) & JS_BITMASK(16))
+#define jshi16(a) ((a) >> 16)
+
+#define JSLL_MUL32(r, a, b) { \
+ JSUint32 _a1, _a0, _b1, _b0, _y0, _y1, _y2, _y3; \
+ _a1 = jshi16(a), _a0 = jslo16(a); \
+ _b1 = jshi16(b), _b0 = jslo16(b); \
+ _y0 = _a0 * _b0; \
+ _y1 = _a0 * _b1; \
+ _y2 = _a1 * _b0; \
+ _y3 = _a1 * _b1; \
+ _y1 += jshi16(_y0); /* can't carry */ \
+ _y1 += _y2; /* might carry */ \
+ if (_y1 < _y2) \
+ _y3 += (JSUint32)(JS_BIT(16)); /* propagate */ \
+ (r).lo = (jslo16(_y1) << 16) + jslo16(_y0); \
+ (r).hi = _y3 + jshi16(_y1); \
+}
+
+#define JSLL_UDIVMOD(qp, rp, a, b) jsll_udivmod(qp, rp, a, b)
+
+extern JS_PUBLIC_API(void) jsll_udivmod(JSUint64 *qp, JSUint64 *rp, JSUint64 a, JSUint64 b);
+
+#define JSLL_DIV(r, a, b) { \
+ JSInt64 _a, _b; \
+ JSUint32 _negative = (JSInt32)(a).hi < 0; \
+ if (_negative) { \
+ JSLL_NEG(_a, a); \
+ } else { \
+ _a = a; \
+ } \
+ if ((JSInt32)(b).hi < 0) { \
+ _negative ^= 1; \
+ JSLL_NEG(_b, b); \
+ } else { \
+ _b = b; \
+ } \
+ JSLL_UDIVMOD(&(r), 0, _a, _b); \
+ if (_negative) \
+ JSLL_NEG(r, r); \
+}
+
+#define JSLL_MOD(r, a, b) { \
+ JSInt64 _a, _b; \
+ JSUint32 _negative = (JSInt32)(a).hi < 0; \
+ if (_negative) { \
+ JSLL_NEG(_a, a); \
+ } else { \
+ _a = a; \
+ } \
+ if ((JSInt32)(b).hi < 0) { \
+ JSLL_NEG(_b, b); \
+ } else { \
+ _b = b; \
+ } \
+ JSLL_UDIVMOD(0, &(r), _a, _b); \
+ if (_negative) \
+ JSLL_NEG(r, r); \
+}
+
+#define JSLL_SHL(r, a, b) { \
+ if (b) { \
+ JSInt64 _a; \
+ _a = a; \
+ if ((b) < 32) { \
+ (r).lo = _a.lo << ((b) & 31); \
+ (r).hi = (_a.hi << ((b) & 31)) | (_a.lo >> (32 - (b))); \
+ } else { \
+ (r).lo = 0; \
+ (r).hi = _a.lo << ((b) & 31); \
+ } \
+ } else { \
+ (r) = (a); \
+ } \
+}
+
+/* a is an JSInt32, b is JSInt32, r is JSInt64 */
+#define JSLL_ISHL(r, a, b) { \
+ if (b) { \
+ JSInt64 _a; \
+ _a.lo = (a); \
+ _a.hi = 0; \
+ if ((b) < 32) { \
+ (r).lo = (a) << ((b) & 31); \
+ (r).hi = ((a) >> (32 - (b))); \
+ } else { \
+ (r).lo = 0; \
+ (r).hi = (a) << ((b) & 31); \
+ } \
+ } else { \
+ (r).lo = (a); \
+ (r).hi = 0; \
+ } \
+}
+
+#define JSLL_SHR(r, a, b) { \
+ if (b) { \
+ JSInt64 _a; \
+ _a = a; \
+ if ((b) < 32) { \
+ (r).lo = (_a.hi << (32 - (b))) | (_a.lo >> ((b) & 31)); \
+ (r).hi = (JSInt32)_a.hi >> ((b) & 31); \
+ } else { \
+ (r).lo = (JSInt32)_a.hi >> ((b) & 31); \
+ (r).hi = (JSInt32)_a.hi >> 31; \
+ } \
+ } else { \
+ (r) = (a); \
+ } \
+}
+
+#define JSLL_USHR(r, a, b) { \
+ if (b) { \
+ JSInt64 _a; \
+ _a = a; \
+ if ((b) < 32) { \
+ (r).lo = (_a.hi << (32 - (b))) | (_a.lo >> ((b) & 31)); \
+ (r).hi = _a.hi >> ((b) & 31); \
+ } else { \
+ (r).lo = _a.hi >> ((b) & 31); \
+ (r).hi = 0; \
+ } \
+ } else { \
+ (r) = (a); \
+ } \
+}
+
+#define JSLL_L2I(i, l) ((i) = (l).lo)
+#define JSLL_L2UI(ui, l) ((ui) = (l).lo)
+#define JSLL_L2F(f, l) { double _d; JSLL_L2D(_d, l); (f) = (JSFloat64)_d; }
+
+#define JSLL_L2D(d, l) { \
+ int _negative; \
+ JSInt64 _absval; \
+ \
+ _negative = (l).hi >> 31; \
+ if (_negative) { \
+ JSLL_NEG(_absval, l); \
+ } else { \
+ _absval = l; \
+ } \
+ (d) = (double)_absval.hi * 4.294967296e9 + _absval.lo; \
+ if (_negative) \
+ (d) = -(d); \
+}
+
+#define JSLL_I2L(l, i) { JSInt32 _i = (i) >> 31; (l).lo = (i); (l).hi = _i; }
+#define JSLL_UI2L(l, ui) ((l).lo = (ui), (l).hi = 0)
+#define JSLL_F2L(l, f) { double _d = (double)f; JSLL_D2L(l, _d); }
+
+#define JSLL_D2L(l, d) { \
+ int _negative; \
+ double _absval, _d_hi; \
+ JSInt64 _lo_d; \
+ \
+ _negative = ((d) < 0); \
+ _absval = _negative ? -(d) : (d); \
+ \
+ (l).hi = _absval / 4.294967296e9; \
+ (l).lo = 0; \
+ JSLL_L2D(_d_hi, l); \
+ _absval -= _d_hi; \
+ _lo_d.hi = 0; \
+ if (_absval < 0) { \
+ _lo_d.lo = -_absval; \
+ JSLL_SUB(l, l, _lo_d); \
+ } else { \
+ _lo_d.lo = _absval; \
+ JSLL_ADD(l, l, _lo_d); \
+ } \
+ \
+ if (_negative) \
+ JSLL_NEG(l, l); \
+}
+
+#endif /* !JS_HAVE_LONG_LONG */
+
+JS_END_EXTERN_C
+
+#endif /* jslong_h___ */
diff --git a/src/third_party/js-1.7/jsmath.c b/src/third_party/js-1.7/jsmath.c
new file mode 100644
index 00000000000..2062916324b
--- /dev/null
+++ b/src/third_party/js-1.7/jsmath.c
@@ -0,0 +1,514 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS math package.
+ */
+#include "jsstddef.h"
+#include "jslibmath.h"
+#include <stdlib.h>
+#include "jstypes.h"
+#include "jslong.h"
+#include "prmjtime.h"
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jslock.h"
+#include "jsmath.h"
+#include "jsnum.h"
+#include "jsobj.h"
+
+#ifndef M_E
+#define M_E 2.7182818284590452354
+#endif
+#ifndef M_LOG2E
+#define M_LOG2E 1.4426950408889634074
+#endif
+#ifndef M_LOG10E
+#define M_LOG10E 0.43429448190325182765
+#endif
+#ifndef M_LN2
+#define M_LN2 0.69314718055994530942
+#endif
+#ifndef M_LN10
+#define M_LN10 2.30258509299404568402
+#endif
+#ifndef M_PI
+#define M_PI 3.14159265358979323846
+#endif
+#ifndef M_SQRT2
+#define M_SQRT2 1.41421356237309504880
+#endif
+#ifndef M_SQRT1_2
+#define M_SQRT1_2 0.70710678118654752440
+#endif
+
+static JSConstDoubleSpec math_constants[] = {
+ {M_E, "E", 0, {0,0,0}},
+ {M_LOG2E, "LOG2E", 0, {0,0,0}},
+ {M_LOG10E, "LOG10E", 0, {0,0,0}},
+ {M_LN2, "LN2", 0, {0,0,0}},
+ {M_LN10, "LN10", 0, {0,0,0}},
+ {M_PI, "PI", 0, {0,0,0}},
+ {M_SQRT2, "SQRT2", 0, {0,0,0}},
+ {M_SQRT1_2, "SQRT1_2", 0, {0,0,0}},
+ {0,0,0,{0,0,0}}
+};
+
+JSClass js_MathClass = {
+ js_Math_str,
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Math),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+static JSBool
+math_abs(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_fabs(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_acos(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_acos(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_asin(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_asin(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_atan(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_atan(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_atan2(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, y, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ if (!js_ValueToNumber(cx, argv[1], &y))
+ return JS_FALSE;
+#if !JS_USE_FDLIBM_MATH && defined(_MSC_VER)
+ /*
+ * MSVC's atan2 does not yield the result demanded by ECMA when both x
+ * and y are infinite.
+ * - The result is a multiple of pi/4.
+ * - The sign of x determines the sign of the result.
+ * - The sign of y determines the multiplicator, 1 or 3.
+ */
+ if (JSDOUBLE_IS_INFINITE(x) && JSDOUBLE_IS_INFINITE(y)) {
+ z = fd_copysign(M_PI / 4, x);
+ if (y < 0)
+ z *= 3;
+ return js_NewDoubleValue(cx, z, rval);
+ }
+#endif
+ z = fd_atan2(x, y);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_ceil(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_ceil(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_cos(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_cos(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_exp(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+#ifdef _WIN32
+ if (!JSDOUBLE_IS_NaN(x)) {
+ if (x == *cx->runtime->jsPositiveInfinity) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsPositiveInfinity);
+ return JS_TRUE;
+ }
+ if (x == *cx->runtime->jsNegativeInfinity) {
+ *rval = JSVAL_ZERO;
+ return JS_TRUE;
+ }
+ }
+#endif
+ z = fd_exp(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_floor(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_floor(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_log(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_log(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_max(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z = *cx->runtime->jsNegativeInfinity;
+ uintN i;
+
+ if (argc == 0) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNegativeInfinity);
+ return JS_TRUE;
+ }
+ for (i = 0; i < argc; i++) {
+ if (!js_ValueToNumber(cx, argv[i], &x))
+ return JS_FALSE;
+ if (JSDOUBLE_IS_NaN(x)) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+ if (x == 0 && x == z && fd_copysign(1.0, z) == -1)
+ z = x;
+ else
+ z = (x > z) ? x : z;
+ }
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_min(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z = *cx->runtime->jsPositiveInfinity;
+ uintN i;
+
+ if (argc == 0) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsPositiveInfinity);
+ return JS_TRUE;
+ }
+ for (i = 0; i < argc; i++) {
+ if (!js_ValueToNumber(cx, argv[i], &x))
+ return JS_FALSE;
+ if (JSDOUBLE_IS_NaN(x)) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+ if (x == 0 && x == z && fd_copysign(1.0,x) == -1)
+ z = x;
+ else
+ z = (x < z) ? x : z;
+ }
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_pow(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, y, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ if (!js_ValueToNumber(cx, argv[1], &y))
+ return JS_FALSE;
+#if !JS_USE_FDLIBM_MATH
+ /*
+ * Because C99 and ECMA specify different behavior for pow(),
+ * we need to wrap the libm call to make it ECMA compliant.
+ */
+ if (!JSDOUBLE_IS_FINITE(y) && (x == 1.0 || x == -1.0)) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+ /* pow(x, +-0) is always 1, even for x = NaN. */
+ if (y == 0) {
+ *rval = JSVAL_ONE;
+ return JS_TRUE;
+ }
+#endif
+ z = fd_pow(x, y);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+/*
+ * Math.random() support, lifted from java.util.Random.java.
+ */
+static void
+random_setSeed(JSRuntime *rt, int64 seed)
+{
+ int64 tmp;
+
+ JSLL_I2L(tmp, 1000);
+ JSLL_DIV(seed, seed, tmp);
+ JSLL_XOR(tmp, seed, rt->rngMultiplier);
+ JSLL_AND(rt->rngSeed, tmp, rt->rngMask);
+}
+
+static void
+random_init(JSRuntime *rt)
+{
+ int64 tmp, tmp2;
+
+ /* Do at most once. */
+ if (rt->rngInitialized)
+ return;
+ rt->rngInitialized = JS_TRUE;
+
+ /* rt->rngMultiplier = 0x5DEECE66DL */
+ JSLL_ISHL(tmp, 0x5, 32);
+ JSLL_UI2L(tmp2, 0xDEECE66DL);
+ JSLL_OR(rt->rngMultiplier, tmp, tmp2);
+
+ /* rt->rngAddend = 0xBL */
+ JSLL_I2L(rt->rngAddend, 0xBL);
+
+ /* rt->rngMask = (1L << 48) - 1 */
+ JSLL_I2L(tmp, 1);
+ JSLL_SHL(tmp2, tmp, 48);
+ JSLL_SUB(rt->rngMask, tmp2, tmp);
+
+ /* rt->rngDscale = (jsdouble)(1L << 53) */
+ JSLL_SHL(tmp2, tmp, 53);
+ JSLL_L2D(rt->rngDscale, tmp2);
+
+ /* Finally, set the seed from current time. */
+ random_setSeed(rt, PRMJ_Now());
+}
+
+static uint32
+random_next(JSRuntime *rt, int bits)
+{
+ int64 nextseed, tmp;
+ uint32 retval;
+
+ JSLL_MUL(nextseed, rt->rngSeed, rt->rngMultiplier);
+ JSLL_ADD(nextseed, nextseed, rt->rngAddend);
+ JSLL_AND(nextseed, nextseed, rt->rngMask);
+ rt->rngSeed = nextseed;
+ JSLL_USHR(tmp, nextseed, 48 - bits);
+ JSLL_L2I(retval, tmp);
+ return retval;
+}
+
+static jsdouble
+random_nextDouble(JSRuntime *rt)
+{
+ int64 tmp, tmp2;
+ jsdouble d;
+
+ JSLL_ISHL(tmp, random_next(rt, 26), 27);
+ JSLL_UI2L(tmp2, random_next(rt, 27));
+ JSLL_ADD(tmp, tmp, tmp2);
+ JSLL_L2D(d, tmp);
+ return d / rt->rngDscale;
+}
+
+static JSBool
+math_random(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSRuntime *rt;
+ jsdouble z;
+
+ rt = cx->runtime;
+ JS_LOCK_RUNTIME(rt);
+ random_init(rt);
+ z = random_nextDouble(rt);
+ JS_UNLOCK_RUNTIME(rt);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+#if defined _WIN32 && !defined WINCE && _MSC_VER < 1400
+/* Try to work around apparent _copysign bustage in VC6 and VC7. */
+double
+js_copysign(double x, double y)
+{
+ jsdpun xu, yu;
+
+ xu.d = x;
+ yu.d = y;
+ xu.s.hi &= ~JSDOUBLE_HI32_SIGNBIT;
+ xu.s.hi |= yu.s.hi & JSDOUBLE_HI32_SIGNBIT;
+ return xu.d;
+}
+#endif
+
+static JSBool
+math_round(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_copysign(fd_floor(x + 0.5), x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_sin(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_sin(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_sqrt(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_sqrt(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+static JSBool
+math_tan(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x, z;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ z = fd_tan(x);
+ return js_NewNumberValue(cx, z, rval);
+}
+
+#if JS_HAS_TOSOURCE
+static JSBool
+math_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ *rval = ATOM_KEY(CLASS_ATOM(cx, Math));
+ return JS_TRUE;
+}
+#endif
+
+static JSFunctionSpec math_static_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, math_toSource, 0, 0, 0},
+#endif
+ {"abs", math_abs, 1, 0, 0},
+ {"acos", math_acos, 1, 0, 0},
+ {"asin", math_asin, 1, 0, 0},
+ {"atan", math_atan, 1, 0, 0},
+ {"atan2", math_atan2, 2, 0, 0},
+ {"ceil", math_ceil, 1, 0, 0},
+ {"cos", math_cos, 1, 0, 0},
+ {"exp", math_exp, 1, 0, 0},
+ {"floor", math_floor, 1, 0, 0},
+ {"log", math_log, 1, 0, 0},
+ {"max", math_max, 2, 0, 0},
+ {"min", math_min, 2, 0, 0},
+ {"pow", math_pow, 2, 0, 0},
+ {"random", math_random, 0, 0, 0},
+ {"round", math_round, 1, 0, 0},
+ {"sin", math_sin, 1, 0, 0},
+ {"sqrt", math_sqrt, 1, 0, 0},
+ {"tan", math_tan, 1, 0, 0},
+ {0,0,0,0,0}
+};
+
+JSObject *
+js_InitMathClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *Math;
+
+ Math = JS_DefineObject(cx, obj, js_Math_str, &js_MathClass, NULL, 0);
+ if (!Math)
+ return NULL;
+ if (!JS_DefineFunctions(cx, Math, math_static_methods))
+ return NULL;
+ if (!JS_DefineConstDoubles(cx, Math, math_constants))
+ return NULL;
+ return Math;
+}
diff --git a/src/third_party/js-1.7/jsmath.h b/src/third_party/js-1.7/jsmath.h
new file mode 100644
index 00000000000..1f60630bca2
--- /dev/null
+++ b/src/third_party/js-1.7/jsmath.h
@@ -0,0 +1,57 @@
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998-1999
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/* -*- Mode: C; tab-width: 8 -*-
+ * Copyright (C) 1998-1999 Netscape Communications Corporation, All Rights Reserved.
+ */
+
+#ifndef jsmath_h___
+#define jsmath_h___
+/*
+ * JS math functions.
+ */
+
+JS_BEGIN_EXTERN_C
+
+extern JSClass js_MathClass;
+
+extern JSObject *
+js_InitMathClass(JSContext *cx, JSObject *obj);
+
+JS_END_EXTERN_C
+
+#endif /* jsmath_h___ */
diff --git a/src/third_party/js-1.7/jsnum.c b/src/third_party/js-1.7/jsnum.c
new file mode 100644
index 00000000000..987619dbde6
--- /dev/null
+++ b/src/third_party/js-1.7/jsnum.c
@@ -0,0 +1,1147 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * IBM Corp.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS number type and wrapper class.
+ */
+#include "jsstddef.h"
+#if defined(XP_WIN) || defined(XP_OS2)
+#include <float.h>
+#endif
+#include <locale.h>
+#include <limits.h>
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdtoa.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsprf.h"
+#include "jsstr.h"
+
+static JSBool
+num_isNaN(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ *rval = BOOLEAN_TO_JSVAL(JSDOUBLE_IS_NaN(x));
+ return JS_TRUE;
+}
+
+static JSBool
+num_isFinite(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble x;
+
+ if (!js_ValueToNumber(cx, argv[0], &x))
+ return JS_FALSE;
+ *rval = BOOLEAN_TO_JSVAL(JSDOUBLE_IS_FINITE(x));
+ return JS_TRUE;
+}
+
+static JSBool
+num_parseFloat(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ jsdouble d;
+ const jschar *bp, *ep;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ /* XXXbe js_strtod shouldn't require NUL termination */
+ bp = js_UndependString(cx, str);
+ if (!bp)
+ return JS_FALSE;
+ if (!js_strtod(cx, bp, &ep, &d))
+ return JS_FALSE;
+ if (ep == bp) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+ return js_NewNumberValue(cx, d, rval);
+}
+
+/* See ECMA 15.1.2.2. */
+static JSBool
+num_parseInt(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsint radix;
+ JSString *str;
+ jsdouble d;
+ const jschar *bp, *ep;
+
+ if (argc > 1) {
+ if (!js_ValueToECMAInt32(cx, argv[1], &radix))
+ return JS_FALSE;
+ } else {
+ radix = 0;
+ }
+ if (radix != 0 && (radix < 2 || radix > 36)) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ /* XXXbe js_strtointeger shouldn't require NUL termination */
+ bp = js_UndependString(cx, str);
+ if (!bp)
+ return JS_FALSE;
+ if (!js_strtointeger(cx, bp, &ep, radix, &d))
+ return JS_FALSE;
+ if (ep == bp) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+ return js_NewNumberValue(cx, d, rval);
+}
+
+const char js_Infinity_str[] = "Infinity";
+const char js_NaN_str[] = "NaN";
+const char js_isNaN_str[] = "isNaN";
+const char js_isFinite_str[] = "isFinite";
+const char js_parseFloat_str[] = "parseFloat";
+const char js_parseInt_str[] = "parseInt";
+
+static JSFunctionSpec number_functions[] = {
+ {js_isNaN_str, num_isNaN, 1,0,0},
+ {js_isFinite_str, num_isFinite, 1,0,0},
+ {js_parseFloat_str, num_parseFloat, 1,0,0},
+ {js_parseInt_str, num_parseInt, 2,0,0},
+ {0,0,0,0,0}
+};
+
+JSClass js_NumberClass = {
+ js_Number_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_CACHED_PROTO(JSProto_Number),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+static JSBool
+Number(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsdouble d;
+ jsval v;
+
+ if (argc != 0) {
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ } else {
+ d = 0.0;
+ }
+ if (!js_NewNumberValue(cx, d, &v))
+ return JS_FALSE;
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ *rval = v;
+ return JS_TRUE;
+ }
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, v);
+ return JS_TRUE;
+}
+
+#if JS_HAS_TOSOURCE
+static JSBool
+num_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval v;
+ jsdouble d;
+ char numBuf[DTOSTR_STANDARD_BUFFER_SIZE], *numStr;
+ char buf[64];
+ JSString *str;
+
+ if (JSVAL_IS_NUMBER((jsval)obj)) {
+ v = (jsval)obj;
+ } else {
+ if (!JS_InstanceOf(cx, obj, &js_NumberClass, argv))
+ return JS_FALSE;
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ JS_ASSERT(JSVAL_IS_NUMBER(v));
+ }
+ d = JSVAL_IS_INT(v) ? (jsdouble)JSVAL_TO_INT(v) : *JSVAL_TO_DOUBLE(v);
+ numStr = JS_dtostr(numBuf, sizeof numBuf, DTOSTR_STANDARD, 0, d);
+ if (!numStr) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ JS_snprintf(buf, sizeof buf, "(new %s(%s))", js_NumberClass.name, numStr);
+ str = JS_NewStringCopyZ(cx, buf);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+#endif
+
+/* The buf must be big enough for MIN_INT to fit including '-' and '\0'. */
+static char *
+IntToString(jsint i, char *buf, size_t bufSize)
+{
+ char *cp;
+ jsuint u;
+
+ u = (i < 0) ? -i : i;
+
+ cp = buf + bufSize; /* one past last buffer cell */
+ *--cp = '\0'; /* null terminate the string to be */
+
+ /*
+ * Build the string from behind. We use multiply and subtraction
+ * instead of modulus because that's much faster.
+ */
+ do {
+ jsuint newu = u / 10;
+ *--cp = (char)(u - newu * 10) + '0';
+ u = newu;
+ } while (u != 0);
+
+ if (i < 0)
+ *--cp = '-';
+
+ return cp;
+}
+
+static JSBool
+num_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval v;
+ jsdouble d;
+ jsint base;
+ JSString *str;
+
+ if (JSVAL_IS_NUMBER((jsval)obj)) {
+ v = (jsval)obj;
+ } else {
+ if (!JS_InstanceOf(cx, obj, &js_NumberClass, argv))
+ return JS_FALSE;
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ JS_ASSERT(JSVAL_IS_NUMBER(v));
+ }
+ d = JSVAL_IS_INT(v) ? (jsdouble)JSVAL_TO_INT(v) : *JSVAL_TO_DOUBLE(v);
+ base = 10;
+ if (argc != 0) {
+ if (!js_ValueToECMAInt32(cx, argv[0], &base))
+ return JS_FALSE;
+ if (base < 2 || base > 36) {
+ char numBuf[12];
+ char *numStr = IntToString(base, numBuf, sizeof numBuf);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_RADIX,
+ numStr);
+ return JS_FALSE;
+ }
+ }
+ if (base == 10) {
+ str = js_NumberToString(cx, d);
+ } else {
+ char *dStr = JS_dtobasestr(base, d);
+ if (!dStr) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ str = JS_NewStringCopyZ(cx, dStr);
+ free(dStr);
+ }
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+num_toLocaleString(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ char thousandsLength, decimalLength;
+ const char *numGrouping, *tmpGroup;
+ JSRuntime *rt;
+ JSString *numStr, *str;
+ char *num, *buf, *dec, *end, *tmpSrc, *tmpDest;
+ int digits, size, remainder, nrepeat;
+
+ /*
+ * Create the string, move back to bytes to make string twiddling
+ * a bit easier and so we can insert platform charset seperators.
+ */
+ if (!num_toString(cx, obj, 0, argv, rval))
+ return JS_FALSE;
+ JS_ASSERT(JSVAL_IS_STRING(*rval));
+ numStr = JSVAL_TO_STRING(*rval);
+ num = js_GetStringBytes(cx->runtime, numStr);
+
+ /* Find bit before the decimal. */
+ dec = strchr(num, '.');
+ digits = dec ? dec - num : (int)strlen(num);
+ end = num + digits;
+
+ rt = cx->runtime;
+ thousandsLength = strlen(rt->thousandsSeparator);
+ decimalLength = strlen(rt->decimalSeparator);
+
+ /* Figure out how long resulting string will be. */
+ size = digits + (dec ? decimalLength + strlen(dec + 1) : 0);
+
+ numGrouping = tmpGroup = rt->numGrouping;
+ remainder = digits;
+ if (*num == '-')
+ remainder--;
+
+ while (*tmpGroup != CHAR_MAX && *tmpGroup != '\0') {
+ if (*tmpGroup >= remainder)
+ break;
+ size += thousandsLength;
+ remainder -= *tmpGroup;
+ tmpGroup++;
+ }
+ if (*tmpGroup == '\0' && *numGrouping != '\0') {
+ nrepeat = (remainder - 1) / tmpGroup[-1];
+ size += thousandsLength * nrepeat;
+ remainder -= nrepeat * tmpGroup[-1];
+ } else {
+ nrepeat = 0;
+ }
+ tmpGroup--;
+
+ buf = (char *)JS_malloc(cx, size + 1);
+ if (!buf)
+ return JS_FALSE;
+
+ tmpDest = buf;
+ tmpSrc = num;
+
+ while (*tmpSrc == '-' || remainder--)
+ *tmpDest++ = *tmpSrc++;
+ while (tmpSrc < end) {
+ strcpy(tmpDest, rt->thousandsSeparator);
+ tmpDest += thousandsLength;
+ memcpy(tmpDest, tmpSrc, *tmpGroup);
+ tmpDest += *tmpGroup;
+ tmpSrc += *tmpGroup;
+ if (--nrepeat < 0)
+ tmpGroup--;
+ }
+
+ if (dec) {
+ strcpy(tmpDest, rt->decimalSeparator);
+ tmpDest += decimalLength;
+ strcpy(tmpDest, dec + 1);
+ } else {
+ *tmpDest++ = '\0';
+ }
+
+ if (cx->localeCallbacks && cx->localeCallbacks->localeToUnicode)
+ return cx->localeCallbacks->localeToUnicode(cx, buf, rval);
+
+ str = JS_NewString(cx, buf, size);
+ if (!str) {
+ JS_free(cx, buf);
+ return JS_FALSE;
+ }
+
+ *rval = STRING_TO_JSVAL(str);
+
+ return JS_TRUE;
+}
+
+static JSBool
+num_valueOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (JSVAL_IS_NUMBER((jsval)obj)) {
+ *rval = (jsval)obj;
+ return JS_TRUE;
+ }
+ if (!JS_InstanceOf(cx, obj, &js_NumberClass, argv))
+ return JS_FALSE;
+ *rval = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ return JS_TRUE;
+}
+
+
+#define MAX_PRECISION 100
+
+static JSBool
+num_to(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval, JSDToStrMode zeroArgMode,
+ JSDToStrMode oneArgMode, jsint precisionMin, jsint precisionMax, jsint precisionOffset)
+{
+ jsval v;
+ jsdouble d, precision;
+ JSString *str;
+ char buf[DTOSTR_VARIABLE_BUFFER_SIZE(MAX_PRECISION+1)], *numStr; /* Use MAX_PRECISION+1 because precisionOffset can be 1 */
+
+ if (JSVAL_IS_NUMBER((jsval)obj)) {
+ v = (jsval)obj;
+ } else {
+ if (!JS_InstanceOf(cx, obj, &js_NumberClass, argv))
+ return JS_FALSE;
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ JS_ASSERT(JSVAL_IS_NUMBER(v));
+ }
+ d = JSVAL_IS_INT(v) ? (jsdouble)JSVAL_TO_INT(v) : *JSVAL_TO_DOUBLE(v);
+
+ if (JSVAL_IS_VOID(argv[0])) {
+ precision = 0.0;
+ oneArgMode = zeroArgMode;
+ } else {
+ if (!js_ValueToNumber(cx, argv[0], &precision))
+ return JS_FALSE;
+ precision = js_DoubleToInteger(precision);
+ if (precision < precisionMin || precision > precisionMax) {
+ numStr = JS_dtostr(buf, sizeof buf, DTOSTR_STANDARD, 0, precision);
+ if (!numStr)
+ JS_ReportOutOfMemory(cx);
+ else
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_PRECISION_RANGE, numStr);
+ return JS_FALSE;
+ }
+ }
+
+ numStr = JS_dtostr(buf, sizeof buf, oneArgMode, (jsint)precision + precisionOffset, d);
+ if (!numStr) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ str = JS_NewStringCopyZ(cx, numStr);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+num_toFixed(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ /* We allow a larger range of precision than ECMA requires; this is permitted by ECMA. */
+ return num_to(cx, obj, argc, argv, rval, DTOSTR_FIXED, DTOSTR_FIXED, -20, MAX_PRECISION, 0);
+}
+
+static JSBool
+num_toExponential(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ /* We allow a larger range of precision than ECMA requires; this is permitted by ECMA. */
+ return num_to(cx, obj, argc, argv, rval, DTOSTR_STANDARD_EXPONENTIAL, DTOSTR_EXPONENTIAL, 0, MAX_PRECISION, 1);
+}
+
+static JSBool
+num_toPrecision(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ /* We allow a larger range of precision than ECMA requires; this is permitted by ECMA. */
+ return num_to(cx, obj, argc, argv, rval, DTOSTR_STANDARD, DTOSTR_PRECISION, 1, MAX_PRECISION, 0);
+}
+
+static JSFunctionSpec number_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, num_toSource, 0,JSFUN_THISP_NUMBER,0},
+#endif
+ {js_toString_str, num_toString, 0,JSFUN_THISP_NUMBER,0},
+ {js_toLocaleString_str, num_toLocaleString, 0,JSFUN_THISP_NUMBER,0},
+ {js_valueOf_str, num_valueOf, 0,JSFUN_THISP_NUMBER,0},
+ {"toFixed", num_toFixed, 1,JSFUN_THISP_NUMBER,0},
+ {"toExponential", num_toExponential, 1,JSFUN_THISP_NUMBER,0},
+ {"toPrecision", num_toPrecision, 1,JSFUN_THISP_NUMBER,0},
+ {0,0,0,0,0}
+};
+
+/* NB: Keep this in synch with number_constants[]. */
+enum nc_slot {
+ NC_NaN,
+ NC_POSITIVE_INFINITY,
+ NC_NEGATIVE_INFINITY,
+ NC_MAX_VALUE,
+ NC_MIN_VALUE,
+ NC_LIMIT
+};
+
+/*
+ * Some to most C compilers forbid spelling these at compile time, or barf
+ * if you try, so all but MAX_VALUE are set up by js_InitRuntimeNumberState
+ * using union jsdpun.
+ */
+static JSConstDoubleSpec number_constants[] = {
+ {0, js_NaN_str, 0,{0,0,0}},
+ {0, "POSITIVE_INFINITY", 0,{0,0,0}},
+ {0, "NEGATIVE_INFINITY", 0,{0,0,0}},
+ {1.7976931348623157E+308, "MAX_VALUE", 0,{0,0,0}},
+ {0, "MIN_VALUE", 0,{0,0,0}},
+ {0,0,0,{0,0,0}}
+};
+
+static jsdouble NaN;
+
+#if (defined XP_WIN || defined XP_OS2) && \
+ !defined WINCE && \
+ !defined __MWERKS__ && \
+ (defined _M_IX86 || \
+ (defined __GNUC__ && !defined __MINGW32__))
+
+/*
+ * Set the exception mask to mask all exceptions and set the FPU precision
+ * to 53 bit mantissa.
+ * On Alpha platform this is handled via Compiler option.
+ */
+#define FIX_FPU() _control87(MCW_EM | PC_53, MCW_EM | MCW_PC)
+
+#else
+
+#define FIX_FPU() ((void)0)
+
+#endif
+
+JSBool
+js_InitRuntimeNumberState(JSContext *cx)
+{
+ JSRuntime *rt;
+ jsdpun u;
+ struct lconv *locale;
+
+ rt = cx->runtime;
+ JS_ASSERT(!rt->jsNaN);
+
+ FIX_FPU();
+
+ u.s.hi = JSDOUBLE_HI32_EXPMASK | JSDOUBLE_HI32_MANTMASK;
+ u.s.lo = 0xffffffff;
+ number_constants[NC_NaN].dval = NaN = u.d;
+ rt->jsNaN = js_NewDouble(cx, NaN, GCF_LOCK);
+ if (!rt->jsNaN)
+ return JS_FALSE;
+
+ u.s.hi = JSDOUBLE_HI32_EXPMASK;
+ u.s.lo = 0x00000000;
+ number_constants[NC_POSITIVE_INFINITY].dval = u.d;
+ rt->jsPositiveInfinity = js_NewDouble(cx, u.d, GCF_LOCK);
+ if (!rt->jsPositiveInfinity)
+ return JS_FALSE;
+
+ u.s.hi = JSDOUBLE_HI32_SIGNBIT | JSDOUBLE_HI32_EXPMASK;
+ u.s.lo = 0x00000000;
+ number_constants[NC_NEGATIVE_INFINITY].dval = u.d;
+ rt->jsNegativeInfinity = js_NewDouble(cx, u.d, GCF_LOCK);
+ if (!rt->jsNegativeInfinity)
+ return JS_FALSE;
+
+ u.s.hi = 0;
+ u.s.lo = 1;
+ number_constants[NC_MIN_VALUE].dval = u.d;
+
+ locale = localeconv();
+ rt->thousandsSeparator =
+ JS_strdup(cx, locale->thousands_sep ? locale->thousands_sep : "'");
+ rt->decimalSeparator =
+ JS_strdup(cx, locale->decimal_point ? locale->decimal_point : ".");
+ rt->numGrouping =
+ JS_strdup(cx, locale->grouping ? locale->grouping : "\3\0");
+
+ return rt->thousandsSeparator && rt->decimalSeparator && rt->numGrouping;
+}
+
+void
+js_FinishRuntimeNumberState(JSContext *cx)
+{
+ JSRuntime *rt = cx->runtime;
+
+ js_UnlockGCThingRT(rt, rt->jsNaN);
+ js_UnlockGCThingRT(rt, rt->jsNegativeInfinity);
+ js_UnlockGCThingRT(rt, rt->jsPositiveInfinity);
+
+ rt->jsNaN = NULL;
+ rt->jsNegativeInfinity = NULL;
+ rt->jsPositiveInfinity = NULL;
+
+ JS_free(cx, (void *)rt->thousandsSeparator);
+ JS_free(cx, (void *)rt->decimalSeparator);
+ JS_free(cx, (void *)rt->numGrouping);
+ rt->thousandsSeparator = rt->decimalSeparator = rt->numGrouping = NULL;
+}
+
+JSObject *
+js_InitNumberClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto, *ctor;
+ JSRuntime *rt;
+
+ /* XXX must do at least once per new thread, so do it per JSContext... */
+ FIX_FPU();
+
+ if (!JS_DefineFunctions(cx, obj, number_functions))
+ return NULL;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_NumberClass, Number, 1,
+ NULL, number_methods, NULL, NULL);
+ if (!proto || !(ctor = JS_GetConstructor(cx, proto)))
+ return NULL;
+ OBJ_SET_SLOT(cx, proto, JSSLOT_PRIVATE, JSVAL_ZERO);
+ if (!JS_DefineConstDoubles(cx, ctor, number_constants))
+ return NULL;
+
+ /* ECMA 15.1.1.1 */
+ rt = cx->runtime;
+ if (!JS_DefineProperty(cx, obj, js_NaN_str, DOUBLE_TO_JSVAL(rt->jsNaN),
+ NULL, NULL, JSPROP_PERMANENT)) {
+ return NULL;
+ }
+
+ /* ECMA 15.1.1.2 */
+ if (!JS_DefineProperty(cx, obj, js_Infinity_str,
+ DOUBLE_TO_JSVAL(rt->jsPositiveInfinity),
+ NULL, NULL, JSPROP_PERMANENT)) {
+ return NULL;
+ }
+ return proto;
+}
+
+jsdouble *
+js_NewDouble(JSContext *cx, jsdouble d, uintN gcflag)
+{
+ jsdouble *dp;
+
+ dp = (jsdouble *) js_NewGCThing(cx, gcflag | GCX_DOUBLE, sizeof(jsdouble));
+ if (!dp)
+ return NULL;
+ *dp = d;
+ return dp;
+}
+
+void
+js_FinalizeDouble(JSContext *cx, jsdouble *dp)
+{
+ *dp = NaN;
+}
+
+JSBool
+js_NewDoubleValue(JSContext *cx, jsdouble d, jsval *rval)
+{
+ jsdouble *dp;
+
+ dp = js_NewDouble(cx, d, 0);
+ if (!dp)
+ return JS_FALSE;
+ *rval = DOUBLE_TO_JSVAL(dp);
+ return JS_TRUE;
+}
+
+JSBool
+js_NewNumberValue(JSContext *cx, jsdouble d, jsval *rval)
+{
+ jsint i;
+
+ if (JSDOUBLE_IS_INT(d, i) && INT_FITS_IN_JSVAL(i)) {
+ *rval = INT_TO_JSVAL(i);
+ } else {
+ if (!js_NewDoubleValue(cx, d, rval))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+JSObject *
+js_NumberToObject(JSContext *cx, jsdouble d)
+{
+ JSObject *obj;
+ jsval v;
+
+ obj = js_NewObject(cx, &js_NumberClass, NULL, NULL);
+ if (!obj)
+ return NULL;
+ if (!js_NewNumberValue(cx, d, &v)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, v);
+ return obj;
+}
+
+JSString *
+js_NumberToString(JSContext *cx, jsdouble d)
+{
+ jsint i;
+ char buf[DTOSTR_STANDARD_BUFFER_SIZE];
+ char *numStr;
+
+ if (JSDOUBLE_IS_INT(d, i)) {
+ numStr = IntToString(i, buf, sizeof buf);
+ } else {
+ numStr = JS_dtostr(buf, sizeof buf, DTOSTR_STANDARD, 0, d);
+ if (!numStr) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ }
+ return JS_NewStringCopyZ(cx, numStr);
+}
+
+JSBool
+js_ValueToNumber(JSContext *cx, jsval v, jsdouble *dp)
+{
+ JSObject *obj;
+ JSString *str;
+ const jschar *bp, *ep;
+
+ if (JSVAL_IS_OBJECT(v)) {
+ obj = JSVAL_TO_OBJECT(v);
+ if (!obj) {
+ *dp = 0;
+ return JS_TRUE;
+ }
+ if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_NUMBER, &v))
+ return JS_FALSE;
+ }
+ if (JSVAL_IS_INT(v)) {
+ *dp = (jsdouble)JSVAL_TO_INT(v);
+ } else if (JSVAL_IS_DOUBLE(v)) {
+ *dp = *JSVAL_TO_DOUBLE(v);
+ } else if (JSVAL_IS_STRING(v)) {
+ str = JSVAL_TO_STRING(v);
+ /*
+ * Note that ECMA doesn't treat a string beginning with a '0' as an
+ * octal number here. This works because all such numbers will be
+ * interpreted as decimal by js_strtod and will never get passed to
+ * js_strtointeger (which would interpret them as octal).
+ */
+ /* XXXbe js_strtod shouldn't require NUL termination */
+ bp = js_UndependString(cx, str);
+ if (!bp)
+ return JS_FALSE;
+ if ((!js_strtod(cx, bp, &ep, dp) ||
+ js_SkipWhiteSpace(ep) != bp + str->length) &&
+ (!js_strtointeger(cx, bp, &ep, 0, dp) ||
+ js_SkipWhiteSpace(ep) != bp + str->length)) {
+ goto badstr;
+ }
+ } else if (JSVAL_IS_BOOLEAN(v)) {
+ *dp = JSVAL_TO_BOOLEAN(v) ? 1 : 0;
+ } else {
+badstr:
+ *dp = *cx->runtime->jsNaN;
+ }
+ return JS_TRUE;
+}
+
+JSBool
+js_ValueToECMAInt32(JSContext *cx, jsval v, int32 *ip)
+{
+ jsdouble d;
+
+ if (!js_ValueToNumber(cx, v, &d))
+ return JS_FALSE;
+ return js_DoubleToECMAInt32(cx, d, ip);
+}
+
+JSBool
+js_DoubleToECMAInt32(JSContext *cx, jsdouble d, int32 *ip)
+{
+ jsdouble two32 = 4294967296.0;
+ jsdouble two31 = 2147483648.0;
+
+ if (!JSDOUBLE_IS_FINITE(d) || d == 0) {
+ *ip = 0;
+ return JS_TRUE;
+ }
+ d = fmod(d, two32);
+ d = (d >= 0) ? floor(d) : ceil(d) + two32;
+ if (d >= two31)
+ *ip = (int32)(d - two32);
+ else
+ *ip = (int32)d;
+ return JS_TRUE;
+}
+
+JSBool
+js_ValueToECMAUint32(JSContext *cx, jsval v, uint32 *ip)
+{
+ jsdouble d;
+
+ if (!js_ValueToNumber(cx, v, &d))
+ return JS_FALSE;
+ return js_DoubleToECMAUint32(cx, d, ip);
+}
+
+JSBool
+js_DoubleToECMAUint32(JSContext *cx, jsdouble d, uint32 *ip)
+{
+ JSBool neg;
+ jsdouble two32 = 4294967296.0;
+
+ if (!JSDOUBLE_IS_FINITE(d) || d == 0) {
+ *ip = 0;
+ return JS_TRUE;
+ }
+
+ neg = (d < 0);
+ d = floor(neg ? -d : d);
+ d = neg ? -d : d;
+
+ d = fmod(d, two32);
+
+ d = (d >= 0) ? d : d + two32;
+ *ip = (uint32)d;
+ return JS_TRUE;
+}
+
+JSBool
+js_ValueToInt32(JSContext *cx, jsval v, int32 *ip)
+{
+ jsdouble d;
+ JSString *str;
+
+ if (JSVAL_IS_INT(v)) {
+ *ip = JSVAL_TO_INT(v);
+ return JS_TRUE;
+ }
+ if (!js_ValueToNumber(cx, v, &d))
+ return JS_FALSE;
+ if (JSDOUBLE_IS_NaN(d) || d <= -2147483649.0 || 2147483648.0 <= d) {
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK, v, NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_CONVERT, JS_GetStringBytes(str));
+
+ }
+ return JS_FALSE;
+ }
+ *ip = (int32)floor(d + 0.5); /* Round to nearest */
+ return JS_TRUE;
+}
+
+JSBool
+js_ValueToUint16(JSContext *cx, jsval v, uint16 *ip)
+{
+ jsdouble d;
+ jsuint i, m;
+ JSBool neg;
+
+ if (!js_ValueToNumber(cx, v, &d))
+ return JS_FALSE;
+ if (d == 0 || !JSDOUBLE_IS_FINITE(d)) {
+ *ip = 0;
+ return JS_TRUE;
+ }
+ i = (jsuint)d;
+ if ((jsdouble)i == d) {
+ *ip = (uint16)i;
+ return JS_TRUE;
+ }
+ neg = (d < 0);
+ d = floor(neg ? -d : d);
+ d = neg ? -d : d;
+ m = JS_BIT(16);
+ d = fmod(d, (double)m);
+ if (d < 0)
+ d += m;
+ *ip = (uint16) d;
+ return JS_TRUE;
+}
+
+jsdouble
+js_DoubleToInteger(jsdouble d)
+{
+ JSBool neg;
+
+ if (d == 0)
+ return d;
+ if (!JSDOUBLE_IS_FINITE(d)) {
+ if (JSDOUBLE_IS_NaN(d))
+ return 0;
+ return d;
+ }
+ neg = (d < 0);
+ d = floor(neg ? -d : d);
+ return neg ? -d : d;
+}
+
+
+JSBool
+js_strtod(JSContext *cx, const jschar *s, const jschar **ep, jsdouble *dp)
+{
+ char cbuf[32];
+ size_t i;
+ char *cstr, *istr, *estr;
+ JSBool negative;
+ jsdouble d;
+ const jschar *s1 = js_SkipWhiteSpace(s);
+ size_t length = js_strlen(s1);
+
+ /* Use cbuf to avoid malloc */
+ if (length >= sizeof cbuf) {
+ cstr = (char *) JS_malloc(cx, length + 1);
+ if (!cstr)
+ return JS_FALSE;
+ } else {
+ cstr = cbuf;
+ }
+
+ for (i = 0; i <= length; i++) {
+ if (s1[i] >> 8) {
+ cstr[i] = 0;
+ break;
+ }
+ cstr[i] = (char)s1[i];
+ }
+
+ istr = cstr;
+ if ((negative = (*istr == '-')) != 0 || *istr == '+')
+ istr++;
+ if (!strncmp(istr, js_Infinity_str, sizeof js_Infinity_str - 1)) {
+ d = *(negative ? cx->runtime->jsNegativeInfinity : cx->runtime->jsPositiveInfinity);
+ estr = istr + 8;
+ } else {
+ int err;
+ d = JS_strtod(cstr, &estr, &err);
+ if (err == JS_DTOA_ENOMEM) {
+ JS_ReportOutOfMemory(cx);
+ if (cstr != cbuf)
+ JS_free(cx, cstr);
+ return JS_FALSE;
+ }
+ if (err == JS_DTOA_ERANGE) {
+ if (d == HUGE_VAL)
+ d = *cx->runtime->jsPositiveInfinity;
+ else if (d == -HUGE_VAL)
+ d = *cx->runtime->jsNegativeInfinity;
+ }
+#ifdef HPUX
+ if (d == 0.0 && negative) {
+ /*
+ * "-0", "-1e-2000" come out as positive zero
+ * here on HPUX. Force a negative zero instead.
+ */
+ JSDOUBLE_HI32(d) = JSDOUBLE_HI32_SIGNBIT;
+ JSDOUBLE_LO32(d) = 0;
+ }
+#endif
+ }
+
+ i = estr - cstr;
+ if (cstr != cbuf)
+ JS_free(cx, cstr);
+ *ep = i ? s1 + i : s;
+ *dp = d;
+ return JS_TRUE;
+}
+
+struct BinaryDigitReader
+{
+ uintN base; /* Base of number; must be a power of 2 */
+ uintN digit; /* Current digit value in radix given by base */
+ uintN digitMask; /* Mask to extract the next bit from digit */
+ const jschar *digits; /* Pointer to the remaining digits */
+ const jschar *end; /* Pointer to first non-digit */
+};
+
+/* Return the next binary digit from the number or -1 if done */
+static intN GetNextBinaryDigit(struct BinaryDigitReader *bdr)
+{
+ intN bit;
+
+ if (bdr->digitMask == 0) {
+ uintN c;
+
+ if (bdr->digits == bdr->end)
+ return -1;
+
+ c = *bdr->digits++;
+ if ('0' <= c && c <= '9')
+ bdr->digit = c - '0';
+ else if ('a' <= c && c <= 'z')
+ bdr->digit = c - 'a' + 10;
+ else bdr->digit = c - 'A' + 10;
+ bdr->digitMask = bdr->base >> 1;
+ }
+ bit = (bdr->digit & bdr->digitMask) != 0;
+ bdr->digitMask >>= 1;
+ return bit;
+}
+
+JSBool
+js_strtointeger(JSContext *cx, const jschar *s, const jschar **ep, jsint base, jsdouble *dp)
+{
+ JSBool negative;
+ jsdouble value;
+ const jschar *start;
+ const jschar *s1 = js_SkipWhiteSpace(s);
+
+ if ((negative = (*s1 == '-')) != 0 || *s1 == '+')
+ s1++;
+
+ if (base == 0) {
+ /* No base supplied, or some base that evaluated to 0. */
+ if (*s1 == '0') {
+ /* It's either hex or octal; only increment char if str isn't '0' */
+ if (s1[1] == 'X' || s1[1] == 'x') { /* Hex */
+ s1 += 2;
+ base = 16;
+ } else { /* Octal */
+ base = 8;
+ }
+ } else {
+ base = 10; /* Default to decimal. */
+ }
+ } else if (base == 16 && *s1 == '0' && (s1[1] == 'X' || s1[1] == 'x')) {
+ /* If base is 16, ignore hex prefix. */
+ s1 += 2;
+ }
+
+ /*
+ * Done with the preliminaries; find some prefix of the string that's
+ * a number in the given base.
+ */
+ start = s1; /* Mark - if string is empty, we return NaN. */
+ value = 0.0;
+ for (;;) {
+ uintN digit;
+ jschar c = *s1;
+ if ('0' <= c && c <= '9')
+ digit = c - '0';
+ else if ('a' <= c && c <= 'z')
+ digit = c - 'a' + 10;
+ else if ('A' <= c && c <= 'Z')
+ digit = c - 'A' + 10;
+ else
+ break;
+ if (digit >= (uintN)base)
+ break;
+ value = value * base + digit;
+ s1++;
+ }
+
+ if (value >= 9007199254740992.0) {
+ if (base == 10) {
+ /*
+ * If we're accumulating a decimal number and the number is >=
+ * 2^53, then the result from the repeated multiply-add above may
+ * be inaccurate. Call JS_strtod to get the correct answer.
+ */
+ size_t i;
+ size_t length = s1 - start;
+ char *cstr = (char *) JS_malloc(cx, length + 1);
+ char *estr;
+ int err=0;
+
+ if (!cstr)
+ return JS_FALSE;
+ for (i = 0; i != length; i++)
+ cstr[i] = (char)start[i];
+ cstr[length] = 0;
+
+ value = JS_strtod(cstr, &estr, &err);
+ if (err == JS_DTOA_ENOMEM) {
+ JS_ReportOutOfMemory(cx);
+ JS_free(cx, cstr);
+ return JS_FALSE;
+ }
+ if (err == JS_DTOA_ERANGE && value == HUGE_VAL)
+ value = *cx->runtime->jsPositiveInfinity;
+ JS_free(cx, cstr);
+ } else if ((base & (base - 1)) == 0) {
+ /*
+ * The number may also be inaccurate for power-of-two bases. This
+ * happens if the addition in value * base + digit causes a round-
+ * down to an even least significant mantissa bit when the first
+ * dropped bit is a one. If any of the following digits in the
+ * number (which haven't been added in yet) are nonzero, then the
+ * correct action would have been to round up instead of down. An
+ * example occurs when reading the number 0x1000000000000081, which
+ * rounds to 0x1000000000000000 instead of 0x1000000000000100.
+ */
+ struct BinaryDigitReader bdr;
+ intN bit, bit2;
+ intN j;
+
+ bdr.base = base;
+ bdr.digitMask = 0;
+ bdr.digits = start;
+ bdr.end = s1;
+ value = 0.0;
+
+ /* Skip leading zeros. */
+ do {
+ bit = GetNextBinaryDigit(&bdr);
+ } while (bit == 0);
+
+ if (bit == 1) {
+ /* Gather the 53 significant bits (including the leading 1) */
+ value = 1.0;
+ for (j = 52; j; j--) {
+ bit = GetNextBinaryDigit(&bdr);
+ if (bit < 0)
+ goto done;
+ value = value*2 + bit;
+ }
+ /* bit2 is the 54th bit (the first dropped from the mantissa) */
+ bit2 = GetNextBinaryDigit(&bdr);
+ if (bit2 >= 0) {
+ jsdouble factor = 2.0;
+ intN sticky = 0; /* sticky is 1 if any bit beyond the 54th is 1 */
+ intN bit3;
+
+ while ((bit3 = GetNextBinaryDigit(&bdr)) >= 0) {
+ sticky |= bit3;
+ factor *= 2;
+ }
+ value += bit2 & (bit | sticky);
+ value *= factor;
+ }
+ done:;
+ }
+ }
+ }
+ /* We don't worry about inaccurate numbers for any other base. */
+
+ if (s1 == start) {
+ *dp = 0.0;
+ *ep = s;
+ } else {
+ *dp = negative ? -value : value;
+ *ep = s1;
+ }
+ return JS_TRUE;
+}
diff --git a/src/third_party/js-1.7/jsnum.h b/src/third_party/js-1.7/jsnum.h
new file mode 100644
index 00000000000..cd99501e77f
--- /dev/null
+++ b/src/third_party/js-1.7/jsnum.h
@@ -0,0 +1,268 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsnum_h___
+#define jsnum_h___
+/*
+ * JS number (IEEE double) interface.
+ *
+ * JS numbers are optimistically stored in the top 31 bits of 32-bit integers,
+ * but floating point literals, results that overflow 31 bits, and division and
+ * modulus operands and results require a 64-bit IEEE double. These are GC'ed
+ * and pointed to by 32-bit jsvals on the stack and in object properties.
+ *
+ * When a JS number is treated as an object (followed by . or []), the runtime
+ * wraps it with a JSObject whose valueOf method returns the unwrapped number.
+ */
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * Stefan Hanske <sh990154@mail.uni-greifswald.de> reports:
+ * ARM is a little endian architecture but 64 bit double words are stored
+ * differently: the 32 bit words are in little endian byte order, the two words
+ * are stored in big endian`s way.
+ */
+
+#if defined(__arm) || defined(__arm32__) || defined(__arm26__) || defined(__arm__)
+#define CPU_IS_ARM
+#endif
+
+typedef union jsdpun {
+ struct {
+#if defined(IS_LITTLE_ENDIAN) && !defined(CPU_IS_ARM)
+ uint32 lo, hi;
+#else
+ uint32 hi, lo;
+#endif
+ } s;
+ jsdouble d;
+} jsdpun;
+
+#if (__GNUC__ == 2 && __GNUC_MINOR__ > 95) || __GNUC__ > 2
+/*
+ * This version of the macros is safe for the alias optimizations that gcc
+ * does, but uses gcc-specific extensions.
+ */
+
+#define JSDOUBLE_HI32(x) (__extension__ ({ jsdpun u; u.d = (x); u.s.hi; }))
+#define JSDOUBLE_LO32(x) (__extension__ ({ jsdpun u; u.d = (x); u.s.lo; }))
+#define JSDOUBLE_SET_HI32(x, y) \
+ (__extension__ ({ jsdpun u; u.d = (x); u.s.hi = (y); (x) = u.d; }))
+#define JSDOUBLE_SET_LO32(x, y) \
+ (__extension__ ({ jsdpun u; u.d = (x); u.s.lo = (y); (x) = u.d; }))
+
+#else /* not or old GNUC */
+
+/*
+ * We don't know of any non-gcc compilers that perform alias optimization,
+ * so this code should work.
+ */
+
+#if defined(IS_LITTLE_ENDIAN) && !defined(CPU_IS_ARM)
+#define JSDOUBLE_HI32(x) (((uint32 *)&(x))[1])
+#define JSDOUBLE_LO32(x) (((uint32 *)&(x))[0])
+#else
+#define JSDOUBLE_HI32(x) (((uint32 *)&(x))[0])
+#define JSDOUBLE_LO32(x) (((uint32 *)&(x))[1])
+#endif
+
+#define JSDOUBLE_SET_HI32(x, y) (JSDOUBLE_HI32(x)=(y))
+#define JSDOUBLE_SET_LO32(x, y) (JSDOUBLE_LO32(x)=(y))
+
+#endif /* not or old GNUC */
+
+#define JSDOUBLE_HI32_SIGNBIT 0x80000000
+#define JSDOUBLE_HI32_EXPMASK 0x7ff00000
+#define JSDOUBLE_HI32_MANTMASK 0x000fffff
+
+#define JSDOUBLE_IS_NaN(x) \
+ ((JSDOUBLE_HI32(x) & JSDOUBLE_HI32_EXPMASK) == JSDOUBLE_HI32_EXPMASK && \
+ (JSDOUBLE_LO32(x) || (JSDOUBLE_HI32(x) & JSDOUBLE_HI32_MANTMASK)))
+
+#define JSDOUBLE_IS_INFINITE(x) \
+ ((JSDOUBLE_HI32(x) & ~JSDOUBLE_HI32_SIGNBIT) == JSDOUBLE_HI32_EXPMASK && \
+ !JSDOUBLE_LO32(x))
+
+#define JSDOUBLE_IS_FINITE(x) \
+ ((JSDOUBLE_HI32(x) & JSDOUBLE_HI32_EXPMASK) != JSDOUBLE_HI32_EXPMASK)
+
+#define JSDOUBLE_IS_NEGZERO(d) (JSDOUBLE_HI32(d) == JSDOUBLE_HI32_SIGNBIT && \
+ JSDOUBLE_LO32(d) == 0)
+
+/*
+ * JSDOUBLE_IS_INT first checks that d is neither NaN nor infinite, to avoid
+ * raising SIGFPE on platforms such as Alpha Linux, then (only if the cast is
+ * safe) leaves i as (jsint)d. This also avoid anomalous NaN floating point
+ * comparisons under MSVC.
+ */
+#define JSDOUBLE_IS_INT(d, i) (JSDOUBLE_IS_FINITE(d) \
+ && !JSDOUBLE_IS_NEGZERO(d) \
+ && ((d) == (i = (jsint)(d))))
+
+#if defined(XP_WIN)
+#define JSDOUBLE_COMPARE(LVAL, OP, RVAL, IFNAN) \
+ ((JSDOUBLE_IS_NaN(LVAL) || JSDOUBLE_IS_NaN(RVAL)) \
+ ? (IFNAN) \
+ : (LVAL) OP (RVAL))
+#else
+#define JSDOUBLE_COMPARE(LVAL, OP, RVAL, IFNAN) ((LVAL) OP (RVAL))
+#endif
+
+/* Initialize number constants and runtime state for the first context. */
+extern JSBool
+js_InitRuntimeNumberState(JSContext *cx);
+
+extern void
+js_FinishRuntimeNumberState(JSContext *cx);
+
+/* Initialize the Number class, returning its prototype object. */
+extern JSClass js_NumberClass;
+
+extern JSObject *
+js_InitNumberClass(JSContext *cx, JSObject *obj);
+
+/*
+ * String constants for global function names, used in jsapi.c and jsnum.c.
+ */
+extern const char js_Infinity_str[];
+extern const char js_NaN_str[];
+extern const char js_isNaN_str[];
+extern const char js_isFinite_str[];
+extern const char js_parseFloat_str[];
+extern const char js_parseInt_str[];
+
+/* GC-allocate a new JS number. */
+extern jsdouble *
+js_NewDouble(JSContext *cx, jsdouble d, uintN gcflag);
+
+extern void
+js_FinalizeDouble(JSContext *cx, jsdouble *dp);
+
+extern JSBool
+js_NewDoubleValue(JSContext *cx, jsdouble d, jsval *rval);
+
+extern JSBool
+js_NewNumberValue(JSContext *cx, jsdouble d, jsval *rval);
+
+/* Construct a Number instance that wraps around d. */
+extern JSObject *
+js_NumberToObject(JSContext *cx, jsdouble d);
+
+/* Convert a number to a GC'ed string. */
+extern JSString *
+js_NumberToString(JSContext *cx, jsdouble d);
+
+/*
+ * Convert a value to a number, returning false after reporting any error,
+ * otherwise returning true with *dp set.
+ */
+extern JSBool
+js_ValueToNumber(JSContext *cx, jsval v, jsdouble *dp);
+
+/*
+ * Convert a value or a double to an int32, according to the ECMA rules
+ * for ToInt32.
+ */
+extern JSBool
+js_ValueToECMAInt32(JSContext *cx, jsval v, int32 *ip);
+
+extern JSBool
+js_DoubleToECMAInt32(JSContext *cx, jsdouble d, int32 *ip);
+
+/*
+ * Convert a value or a double to a uint32, according to the ECMA rules
+ * for ToUint32.
+ */
+extern JSBool
+js_ValueToECMAUint32(JSContext *cx, jsval v, uint32 *ip);
+
+extern JSBool
+js_DoubleToECMAUint32(JSContext *cx, jsdouble d, uint32 *ip);
+
+/*
+ * Convert a value to a number, then to an int32 if it fits by rounding to
+ * nearest; but failing with an error report if the double is out of range
+ * or unordered.
+ */
+extern JSBool
+js_ValueToInt32(JSContext *cx, jsval v, int32 *ip);
+
+/*
+ * Convert a value to a number, then to a uint16 according to the ECMA rules
+ * for ToUint16.
+ */
+extern JSBool
+js_ValueToUint16(JSContext *cx, jsval v, uint16 *ip);
+
+/*
+ * Convert a jsdouble to an integral number, stored in a jsdouble.
+ * If d is NaN, return 0. If d is an infinity, return it without conversion.
+ */
+extern jsdouble
+js_DoubleToInteger(jsdouble d);
+
+/*
+ * Similar to strtod except that it replaces overflows with infinities of the
+ * correct sign, and underflows with zeros of the correct sign. Guaranteed to
+ * return the closest double number to the given input in dp.
+ *
+ * Also allows inputs of the form [+|-]Infinity, which produce an infinity of
+ * the appropriate sign. The case of the "Infinity" string must match exactly.
+ * If the string does not contain a number, set *ep to s and return 0.0 in dp.
+ * Return false if out of memory.
+ */
+extern JSBool
+js_strtod(JSContext *cx, const jschar *s, const jschar **ep, jsdouble *dp);
+
+/*
+ * Similar to strtol except that it handles integers of arbitrary size.
+ * Guaranteed to return the closest double number to the given input when radix
+ * is 10 or a power of 2. Callers may see round-off errors for very large
+ * numbers of a different radix than 10 or a power of 2.
+ *
+ * If the string does not contain a number, set *ep to s and return 0.0 in dp.
+ * Return false if out of memory.
+ */
+extern JSBool
+js_strtointeger(JSContext *cx, const jschar *s, const jschar **ep, jsint radix, jsdouble *dp);
+
+JS_END_EXTERN_C
+
+#endif /* jsnum_h___ */
diff --git a/src/third_party/js-1.7/jsobj.c b/src/third_party/js-1.7/jsobj.c
new file mode 100644
index 00000000000..b552acafe98
--- /dev/null
+++ b/src/third_party/js-1.7/jsobj.c
@@ -0,0 +1,5035 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS object implementation.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsbit.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jshash.h" /* Added by JSIFY */
+#include "jsdhash.h"
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+#include "jsopcode.h"
+
+#include "jsdbgapi.h" /* whether or not JS_HAS_OBJ_WATCHPOINT */
+
+#if JS_HAS_GENERATORS
+#include "jsiter.h"
+#endif
+
+#if JS_HAS_XML_SUPPORT
+#include "jsxml.h"
+#endif
+
+#if JS_HAS_XDR
+#include "jsxdrapi.h"
+#endif
+
+#ifdef JS_THREADSAFE
+#define NATIVE_DROP_PROPERTY js_DropProperty
+
+extern void
+js_DropProperty(JSContext *cx, JSObject *obj, JSProperty *prop);
+#else
+#define NATIVE_DROP_PROPERTY NULL
+#endif
+
+JS_FRIEND_DATA(JSObjectOps) js_ObjectOps = {
+ js_NewObjectMap, js_DestroyObjectMap,
+ js_LookupProperty, js_DefineProperty,
+ js_GetProperty, js_SetProperty,
+ js_GetAttributes, js_SetAttributes,
+ js_DeleteProperty, js_DefaultValue,
+ js_Enumerate, js_CheckAccess,
+ NULL, NATIVE_DROP_PROPERTY,
+ js_Call, js_Construct,
+ NULL, js_HasInstance,
+ js_SetProtoOrParent, js_SetProtoOrParent,
+ js_Mark, js_Clear,
+ js_GetRequiredSlot, js_SetRequiredSlot
+};
+
+JSClass js_ObjectClass = {
+ js_Object_str,
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Object),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+#if JS_HAS_OBJ_PROTO_PROP
+
+static JSBool
+obj_getSlot(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+static JSBool
+obj_setSlot(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+static JSBool
+obj_getCount(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+static JSPropertySpec object_props[] = {
+ /* These two must come first; see object_props[slot].name usage below. */
+ {js_proto_str, JSSLOT_PROTO, JSPROP_PERMANENT|JSPROP_SHARED,
+ obj_getSlot, obj_setSlot},
+ {js_parent_str,JSSLOT_PARENT,JSPROP_READONLY|JSPROP_PERMANENT|JSPROP_SHARED,
+ obj_getSlot, obj_setSlot},
+ {js_count_str, 0, JSPROP_PERMANENT,obj_getCount, obj_getCount},
+ {0,0,0,0,0}
+};
+
+/* NB: JSSLOT_PROTO and JSSLOT_PARENT are already indexes into object_props. */
+#define JSSLOT_COUNT 2
+
+static JSBool
+ReportStrictSlot(JSContext *cx, uint32 slot)
+{
+ if (slot == JSSLOT_PROTO)
+ return JS_TRUE;
+ return JS_ReportErrorFlagsAndNumber(cx,
+ JSREPORT_WARNING | JSREPORT_STRICT,
+ js_GetErrorMessage, NULL,
+ JSMSG_DEPRECATED_USAGE,
+ object_props[slot].name);
+}
+
+static JSBool
+obj_getSlot(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ uint32 slot;
+ jsid propid;
+ JSAccessMode mode;
+ uintN attrs;
+ JSObject *pobj;
+ JSClass *clasp;
+ JSExtendedClass *xclasp;
+
+ slot = (uint32) JSVAL_TO_INT(id);
+ if (id == INT_TO_JSVAL(JSSLOT_PROTO)) {
+ propid = ATOM_TO_JSID(cx->runtime->atomState.protoAtom);
+ mode = JSACC_PROTO;
+ } else {
+ propid = ATOM_TO_JSID(cx->runtime->atomState.parentAtom);
+ mode = JSACC_PARENT;
+ }
+
+ /* Let OBJ_CHECK_ACCESS get the slot's value, based on the access mode. */
+ if (!OBJ_CHECK_ACCESS(cx, obj, propid, mode, vp, &attrs))
+ return JS_FALSE;
+
+ pobj = JSVAL_TO_OBJECT(*vp);
+ if (pobj) {
+ clasp = OBJ_GET_CLASS(cx, pobj);
+ if (clasp == &js_CallClass || clasp == &js_BlockClass) {
+ /* Censor activations and lexical scopes per ECMA-262. */
+ *vp = JSVAL_NULL;
+ } else if (clasp->flags & JSCLASS_IS_EXTENDED) {
+ xclasp = (JSExtendedClass *) clasp;
+ if (xclasp->outerObject) {
+ pobj = xclasp->outerObject(cx, pobj);
+ if (!pobj)
+ return JS_FALSE;
+ *vp = OBJECT_TO_JSVAL(pobj);
+ }
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+obj_setSlot(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSObject *pobj;
+ uint32 slot;
+ jsid propid;
+ uintN attrs;
+
+ if (!JSVAL_IS_OBJECT(*vp))
+ return JS_TRUE;
+ pobj = JSVAL_TO_OBJECT(*vp);
+
+ if (pobj) {
+ /*
+ * Innerize pobj here to avoid sticking unwanted properties on the
+ * outer object. This ensures that any with statements only grant
+ * access to the inner object.
+ */
+ OBJ_TO_INNER_OBJECT(cx, pobj);
+ if (!pobj)
+ return JS_FALSE;
+ }
+ slot = (uint32) JSVAL_TO_INT(id);
+ if (JS_HAS_STRICT_OPTION(cx) && !ReportStrictSlot(cx, slot))
+ return JS_FALSE;
+
+ /* __parent__ is readonly and permanent, only __proto__ may be set. */
+ propid = ATOM_TO_JSID(cx->runtime->atomState.protoAtom);
+ if (!OBJ_CHECK_ACCESS(cx, obj, propid, JSACC_PROTO|JSACC_WRITE, vp, &attrs))
+ return JS_FALSE;
+
+ return js_SetProtoOrParent(cx, obj, slot, pobj);
+}
+
+static JSBool
+obj_getCount(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsval iter_state;
+ jsid num_properties;
+ JSBool ok;
+
+ if (JS_HAS_STRICT_OPTION(cx) && !ReportStrictSlot(cx, JSSLOT_COUNT))
+ return JS_FALSE;
+
+ /* Get the number of properties to enumerate. */
+ iter_state = JSVAL_NULL;
+ ok = OBJ_ENUMERATE(cx, obj, JSENUMERATE_INIT, &iter_state, &num_properties);
+ if (!ok)
+ goto out;
+
+ if (!JSVAL_IS_INT(num_properties)) {
+ JS_ASSERT(0);
+ *vp = JSVAL_ZERO;
+ goto out;
+ }
+ *vp = num_properties;
+
+out:
+ if (iter_state != JSVAL_NULL)
+ ok = OBJ_ENUMERATE(cx, obj, JSENUMERATE_DESTROY, &iter_state, 0);
+ return ok;
+}
+
+#else /* !JS_HAS_OBJ_PROTO_PROP */
+
+#define object_props NULL
+
+#endif /* !JS_HAS_OBJ_PROTO_PROP */
+
+JSBool
+js_SetProtoOrParent(JSContext *cx, JSObject *obj, uint32 slot, JSObject *pobj)
+{
+ JSRuntime *rt;
+ JSObject *obj2, *oldproto;
+ JSScope *scope, *newscope;
+
+ /*
+ * Serialize all proto and parent setting in order to detect cycles.
+ * We nest locks in this function, and only here, in the following orders:
+ *
+ * (1) rt->setSlotLock < pobj's scope lock;
+ * rt->setSlotLock < pobj's proto-or-parent's scope lock;
+ * rt->setSlotLock < pobj's grand-proto-or-parent's scope lock;
+ * etc...
+ * (2) rt->setSlotLock < obj's scope lock < pobj's scope lock.
+ *
+ * We avoid AB-BA deadlock by restricting obj from being on pobj's parent
+ * or proto chain (pobj may already be on obj's parent or proto chain; it
+ * could be moving up or down). We finally order obj with respect to pobj
+ * at the bottom of this routine (just before releasing rt->setSlotLock),
+ * by making pobj be obj's prototype or parent.
+ *
+ * After we have set the slot and released rt->setSlotLock, another call
+ * to js_SetProtoOrParent could nest locks according to the first order
+ * list above, but it cannot deadlock with any other thread. For there
+ * to be a deadlock, other parts of the engine would have to nest scope
+ * locks in the opposite order. XXXbe ensure they don't!
+ */
+ rt = cx->runtime;
+#ifdef JS_THREADSAFE
+
+ JS_ACQUIRE_LOCK(rt->setSlotLock);
+ while (rt->setSlotBusy) {
+ jsrefcount saveDepth;
+
+ /* Take pains to avoid nesting rt->gcLock inside rt->setSlotLock! */
+ JS_RELEASE_LOCK(rt->setSlotLock);
+ saveDepth = JS_SuspendRequest(cx);
+ JS_ACQUIRE_LOCK(rt->setSlotLock);
+ if (rt->setSlotBusy)
+ JS_WAIT_CONDVAR(rt->setSlotDone, JS_NO_TIMEOUT);
+ JS_RELEASE_LOCK(rt->setSlotLock);
+ JS_ResumeRequest(cx, saveDepth);
+ JS_ACQUIRE_LOCK(rt->setSlotLock);
+ }
+ rt->setSlotBusy = JS_TRUE;
+ JS_RELEASE_LOCK(rt->setSlotLock);
+
+#define SET_SLOT_DONE(rt) \
+ JS_BEGIN_MACRO \
+ JS_ACQUIRE_LOCK((rt)->setSlotLock); \
+ (rt)->setSlotBusy = JS_FALSE; \
+ JS_NOTIFY_ALL_CONDVAR((rt)->setSlotDone); \
+ JS_RELEASE_LOCK((rt)->setSlotLock); \
+ JS_END_MACRO
+
+#else
+
+#define SET_SLOT_DONE(rt) /* nothing */
+
+#endif
+
+ obj2 = pobj;
+ while (obj2) {
+ if (obj2 == obj) {
+ SET_SLOT_DONE(rt);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CYCLIC_VALUE,
+#if JS_HAS_OBJ_PROTO_PROP
+ object_props[slot].name
+#else
+ (slot == JSSLOT_PROTO) ? js_proto_str
+ : js_parent_str
+#endif
+ );
+ return JS_FALSE;
+ }
+ obj2 = JSVAL_TO_OBJECT(OBJ_GET_SLOT(cx, obj2, slot));
+ }
+
+ if (slot == JSSLOT_PROTO && OBJ_IS_NATIVE(obj)) {
+ /* Check to see whether obj shares its prototype's scope. */
+ JS_LOCK_OBJ(cx, obj);
+ scope = OBJ_SCOPE(obj);
+ oldproto = JSVAL_TO_OBJECT(LOCKED_OBJ_GET_SLOT(obj, JSSLOT_PROTO));
+ if (oldproto && OBJ_SCOPE(oldproto) == scope) {
+ /* Either obj needs a new empty scope, or it should share pobj's. */
+ if (!pobj ||
+ !OBJ_IS_NATIVE(pobj) ||
+ OBJ_GET_CLASS(cx, pobj) != LOCKED_OBJ_GET_CLASS(oldproto)) {
+ /*
+ * With no proto and no scope of its own, obj is truly empty.
+ *
+ * If pobj is not native, obj needs its own empty scope -- it
+ * should not continue to share oldproto's scope once oldproto
+ * is not on obj's prototype chain. That would put properties
+ * from oldproto's scope ahead of properties defined by pobj,
+ * in lookup order.
+ *
+ * If pobj's class differs from oldproto's, we may need a new
+ * scope to handle differences in private and reserved slots,
+ * so we suboptimally but safely make one.
+ */
+ scope = js_GetMutableScope(cx, obj);
+ if (!scope) {
+ JS_UNLOCK_OBJ(cx, obj);
+ SET_SLOT_DONE(rt);
+ return JS_FALSE;
+ }
+ } else if (OBJ_SCOPE(pobj) != scope) {
+#ifdef JS_THREADSAFE
+ /*
+ * We are about to nest scope locks. Help jslock.c:ShareScope
+ * keep scope->u.count balanced for the JS_UNLOCK_SCOPE, while
+ * avoiding deadlock, by recording scope in rt->setSlotScope.
+ */
+ if (scope->ownercx) {
+ JS_ASSERT(scope->ownercx == cx);
+ rt->setSlotScope = scope;
+ }
+#endif
+
+ /* We can't deadlock because we checked for cycles above (2). */
+ JS_LOCK_OBJ(cx, pobj);
+ newscope = (JSScope *) js_HoldObjectMap(cx, pobj->map);
+ obj->map = &newscope->map;
+ js_DropObjectMap(cx, &scope->map, obj);
+ JS_TRANSFER_SCOPE_LOCK(cx, scope, newscope);
+ scope = newscope;
+#ifdef JS_THREADSAFE
+ rt->setSlotScope = NULL;
+#endif
+ }
+ }
+ LOCKED_OBJ_SET_SLOT(obj, JSSLOT_PROTO, OBJECT_TO_JSVAL(pobj));
+ JS_UNLOCK_SCOPE(cx, scope);
+ } else {
+ OBJ_SET_SLOT(cx, obj, slot, OBJECT_TO_JSVAL(pobj));
+ }
+
+ SET_SLOT_DONE(rt);
+ return JS_TRUE;
+
+#undef SET_SLOT_DONE
+}
+
+JS_STATIC_DLL_CALLBACK(JSHashNumber)
+js_hash_object(const void *key)
+{
+ return (JSHashNumber)JS_PTR_TO_UINT32(key) >> JSVAL_TAGBITS;
+}
+
+static JSHashEntry *
+MarkSharpObjects(JSContext *cx, JSObject *obj, JSIdArray **idap)
+{
+ JSSharpObjectMap *map;
+ JSHashTable *table;
+ JSHashNumber hash;
+ JSHashEntry **hep, *he;
+ jsatomid sharpid;
+ JSIdArray *ida;
+ JSBool ok;
+ jsint i, length;
+ jsid id;
+#if JS_HAS_GETTER_SETTER
+ JSObject *obj2;
+ JSProperty *prop;
+ uintN attrs;
+#endif
+ jsval val;
+ int stackDummy;
+
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ return NULL;
+ }
+
+ map = &cx->sharpObjectMap;
+ table = map->table;
+ hash = js_hash_object(obj);
+ hep = JS_HashTableRawLookup(table, hash, obj);
+ he = *hep;
+ if (!he) {
+ sharpid = 0;
+ he = JS_HashTableRawAdd(table, hep, hash, obj,
+ JS_UINT32_TO_PTR(sharpid));
+ if (!he) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+
+ /*
+ * Increment map->depth to protect js_EnterSharpObject from reentering
+ * itself badly. Without this fix, if we reenter the basis case where
+ * map->depth == 0, when unwinding the inner call we will destroy the
+ * newly-created hash table and crash.
+ */
+ ++map->depth;
+ ida = JS_Enumerate(cx, obj);
+ --map->depth;
+ if (!ida)
+ return NULL;
+
+ ok = JS_TRUE;
+ for (i = 0, length = ida->length; i < length; i++) {
+ id = ida->vector[i];
+#if JS_HAS_GETTER_SETTER
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop);
+ if (!ok)
+ break;
+ if (!prop)
+ continue;
+ ok = OBJ_GET_ATTRIBUTES(cx, obj2, id, prop, &attrs);
+ if (ok) {
+ if (OBJ_IS_NATIVE(obj2) &&
+ (attrs & (JSPROP_GETTER | JSPROP_SETTER))) {
+ val = JSVAL_NULL;
+ if (attrs & JSPROP_GETTER)
+ val = (jsval) ((JSScopeProperty*)prop)->getter;
+ if (attrs & JSPROP_SETTER) {
+ if (val != JSVAL_NULL) {
+ /* Mark the getter, then set val to setter. */
+ ok = (MarkSharpObjects(cx, JSVAL_TO_OBJECT(val),
+ NULL)
+ != NULL);
+ }
+ val = (jsval) ((JSScopeProperty*)prop)->setter;
+ }
+ } else {
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &val);
+ }
+ }
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+#else
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &val);
+#endif
+ if (!ok)
+ break;
+ if (!JSVAL_IS_PRIMITIVE(val) &&
+ !MarkSharpObjects(cx, JSVAL_TO_OBJECT(val), NULL)) {
+ ok = JS_FALSE;
+ break;
+ }
+ }
+ if (!ok || !idap)
+ JS_DestroyIdArray(cx, ida);
+ if (!ok)
+ return NULL;
+ } else {
+ sharpid = JS_PTR_TO_UINT32(he->value);
+ if (sharpid == 0) {
+ sharpid = ++map->sharpgen << SHARP_ID_SHIFT;
+ he->value = JS_UINT32_TO_PTR(sharpid);
+ }
+ ida = NULL;
+ }
+ if (idap)
+ *idap = ida;
+ return he;
+}
+
+JSHashEntry *
+js_EnterSharpObject(JSContext *cx, JSObject *obj, JSIdArray **idap,
+ jschar **sp)
+{
+ JSSharpObjectMap *map;
+ JSHashTable *table;
+ JSIdArray *ida;
+ JSHashNumber hash;
+ JSHashEntry *he, **hep;
+ jsatomid sharpid;
+ char buf[20];
+ size_t len;
+
+ if (JS_HAS_NATIVE_BRANCH_CALLBACK_OPTION(cx) &&
+ cx->branchCallback &&
+ !cx->branchCallback(cx, NULL)) {
+ return NULL;
+ }
+
+ /* Set to null in case we return an early error. */
+ *sp = NULL;
+ map = &cx->sharpObjectMap;
+ table = map->table;
+ if (!table) {
+ table = JS_NewHashTable(8, js_hash_object, JS_CompareValues,
+ JS_CompareValues, NULL, NULL);
+ if (!table) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ map->table = table;
+ JS_KEEP_ATOMS(cx->runtime);
+ }
+
+ /* From this point the control must flow either through out: or bad:. */
+ ida = NULL;
+ if (map->depth == 0) {
+ he = MarkSharpObjects(cx, obj, &ida);
+ if (!he)
+ goto bad;
+ JS_ASSERT((JS_PTR_TO_UINT32(he->value) & SHARP_BIT) == 0);
+ if (!idap) {
+ JS_DestroyIdArray(cx, ida);
+ ida = NULL;
+ }
+ } else {
+ hash = js_hash_object(obj);
+ hep = JS_HashTableRawLookup(table, hash, obj);
+ he = *hep;
+
+ /*
+ * It's possible that the value of a property has changed from the
+ * first time the object's properties are traversed (when the property
+ * ids are entered into the hash table) to the second (when they are
+ * converted to strings), i.e., the OBJ_GET_PROPERTY() call is not
+ * idempotent.
+ */
+ if (!he) {
+ he = JS_HashTableRawAdd(table, hep, hash, obj, NULL);
+ if (!he) {
+ JS_ReportOutOfMemory(cx);
+ goto bad;
+ }
+ sharpid = 0;
+ goto out;
+ }
+ }
+
+ sharpid = JS_PTR_TO_UINT32(he->value);
+ if (sharpid != 0) {
+ len = JS_snprintf(buf, sizeof buf, "#%u%c",
+ sharpid >> SHARP_ID_SHIFT,
+ (sharpid & SHARP_BIT) ? '#' : '=');
+ *sp = js_InflateString(cx, buf, &len);
+ if (!*sp) {
+ if (ida)
+ JS_DestroyIdArray(cx, ida);
+ goto bad;
+ }
+ }
+
+out:
+ JS_ASSERT(he);
+ if ((sharpid & SHARP_BIT) == 0) {
+ if (idap && !ida) {
+ ida = JS_Enumerate(cx, obj);
+ if (!ida) {
+ if (*sp) {
+ JS_free(cx, *sp);
+ *sp = NULL;
+ }
+ goto bad;
+ }
+ }
+ map->depth++;
+ }
+
+ if (idap)
+ *idap = ida;
+ return he;
+
+bad:
+ /* Clean up the sharpObjectMap table on outermost error. */
+ if (map->depth == 0) {
+ JS_UNKEEP_ATOMS(cx->runtime);
+ map->sharpgen = 0;
+ JS_HashTableDestroy(map->table);
+ map->table = NULL;
+ }
+ return NULL;
+}
+
+void
+js_LeaveSharpObject(JSContext *cx, JSIdArray **idap)
+{
+ JSSharpObjectMap *map;
+ JSIdArray *ida;
+
+ map = &cx->sharpObjectMap;
+ JS_ASSERT(map->depth > 0);
+ if (--map->depth == 0) {
+ JS_UNKEEP_ATOMS(cx->runtime);
+ map->sharpgen = 0;
+ JS_HashTableDestroy(map->table);
+ map->table = NULL;
+ }
+ if (idap) {
+ ida = *idap;
+ if (ida) {
+ JS_DestroyIdArray(cx, ida);
+ *idap = NULL;
+ }
+ }
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+gc_sharp_table_entry_marker(JSHashEntry *he, intN i, void *arg)
+{
+ GC_MARK((JSContext *)arg, (JSObject *)he->key, "sharp table entry");
+ return JS_DHASH_NEXT;
+}
+
+void
+js_GCMarkSharpMap(JSContext *cx, JSSharpObjectMap *map)
+{
+ JS_ASSERT(map->depth > 0);
+ JS_ASSERT(map->table);
+
+ /*
+ * During recursive calls to MarkSharpObjects a non-native object or
+ * object with a custom getProperty method can potentially return an
+ * unrooted value or even cut from the object graph an argument of one of
+ * MarkSharpObjects recursive invocations. So we must protect map->table
+ * entries against GC.
+ *
+ * We can not simply use JSTempValueRooter to mark the obj argument of
+ * MarkSharpObjects during recursion as we have to protect *all* entries
+ * in JSSharpObjectMap including those that contains otherwise unreachable
+ * objects just allocated through custom getProperty. Otherwise newer
+ * allocations can re-use the address of an object stored in the hashtable
+ * confusing js_EnterSharpObject. So to address the problem we simply
+ * mark all objects from map->table.
+ *
+ * An alternative "proper" solution is to use JSTempValueRooter in
+ * MarkSharpObjects with code to remove during finalization entries
+ * with otherwise unreachable objects. But this is way too complex
+ * to justify spending efforts.
+ */
+ JS_HashTableEnumerateEntries(map->table, gc_sharp_table_entry_marker, cx);
+}
+
+#define OBJ_TOSTRING_EXTRA 4 /* for 4 local GC roots */
+
+#if JS_HAS_TOSOURCE
+JSBool
+js_obj_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSBool ok, outermost;
+ JSHashEntry *he;
+ JSIdArray *ida;
+ jschar *chars, *ochars, *vsharp;
+ const jschar *idstrchars, *vchars;
+ size_t nchars, idstrlength, gsoplength, vlength, vsharplength, curlen;
+ char *comma;
+ jsint i, j, length, valcnt;
+ jsid id;
+#if JS_HAS_GETTER_SETTER
+ JSObject *obj2;
+ JSProperty *prop;
+ uintN attrs;
+#endif
+ jsval *val;
+ JSString *gsopold[2];
+ JSString *gsop[2];
+ JSAtom *atom;
+ JSString *idstr, *valstr, *str;
+ int stackDummy;
+
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ return JS_FALSE;
+ }
+
+ /* If outermost, we need parentheses to be an expression, not a block. */
+ outermost = (cx->sharpObjectMap.depth == 0);
+ he = js_EnterSharpObject(cx, obj, &ida, &chars);
+ if (!he)
+ return JS_FALSE;
+ if (IS_SHARP(he)) {
+ /*
+ * We didn't enter -- obj is already "sharp", meaning we've visited it
+ * already in our depth first search, and therefore chars contains a
+ * string of the form "#n#".
+ */
+ JS_ASSERT(!ida);
+#if JS_HAS_SHARP_VARS
+ nchars = js_strlen(chars);
+#else
+ chars[0] = '{';
+ chars[1] = '}';
+ chars[2] = 0;
+ nchars = 2;
+#endif
+ goto make_string;
+ }
+ JS_ASSERT(ida);
+ ok = JS_TRUE;
+
+ if (!chars) {
+ /* If outermost, allocate 4 + 1 for "({})" and the terminator. */
+ chars = (jschar *) malloc(((outermost ? 4 : 2) + 1) * sizeof(jschar));
+ nchars = 0;
+ if (!chars)
+ goto error;
+ if (outermost)
+ chars[nchars++] = '(';
+ } else {
+ /* js_EnterSharpObject returned a string of the form "#n=" in chars. */
+ MAKE_SHARP(he);
+ nchars = js_strlen(chars);
+ chars = (jschar *)
+ realloc((ochars = chars), (nchars + 2 + 1) * sizeof(jschar));
+ if (!chars) {
+ free(ochars);
+ goto error;
+ }
+ if (outermost) {
+ /*
+ * No need for parentheses around the whole shebang, because #n=
+ * unambiguously begins an object initializer, and never a block
+ * statement.
+ */
+ outermost = JS_FALSE;
+ }
+ }
+
+#ifdef DUMP_CALL_TABLE
+ if (cx->options & JSOPTION_LOGCALL_TOSOURCE) {
+ const char *classname = OBJ_GET_CLASS(cx, obj)->name;
+ size_t classnchars = strlen(classname);
+ static const char classpropid[] = "C";
+ const char *cp;
+ size_t onchars = nchars;
+
+ /* 2 for ': ', 2 quotes around classname, 2 for ', ' after. */
+ classnchars += sizeof classpropid - 1 + 2 + 2;
+ if (ida->length)
+ classnchars += 2;
+
+ /* 2 for the braces, 1 for the terminator */
+ chars = (jschar *)
+ realloc((ochars = chars),
+ (nchars + classnchars + 2 + 1) * sizeof(jschar));
+ if (!chars) {
+ free(ochars);
+ goto error;
+ }
+
+ chars[nchars++] = '{'; /* 1 from the 2 braces */
+ for (cp = classpropid; *cp; cp++)
+ chars[nchars++] = (jschar) *cp;
+ chars[nchars++] = ':';
+ chars[nchars++] = ' '; /* 2 for ': ' */
+ chars[nchars++] = '"';
+ for (cp = classname; *cp; cp++)
+ chars[nchars++] = (jschar) *cp;
+ chars[nchars++] = '"'; /* 2 quotes */
+ if (ida->length) {
+ chars[nchars++] = ',';
+ chars[nchars++] = ' '; /* 2 for ', ' */
+ }
+
+ JS_ASSERT(nchars - onchars == 1 + classnchars);
+ } else
+#endif
+ chars[nchars++] = '{';
+
+ comma = NULL;
+
+ /*
+ * We have four local roots for cooked and raw value GC safety. Hoist the
+ * "argv + 2" out of the loop using the val local, which refers to the raw
+ * (unconverted, "uncooked") values.
+ */
+ val = argv + 2;
+
+ for (i = 0, length = ida->length; i < length; i++) {
+ JSBool idIsLexicalIdentifier, needOldStyleGetterSetter;
+
+ /* Get strings for id and value and GC-root them via argv. */
+ id = ida->vector[i];
+
+#if JS_HAS_GETTER_SETTER
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop);
+ if (!ok)
+ goto error;
+#endif
+
+ /*
+ * Convert id to a jsval and then to a string. Decide early whether we
+ * prefer get/set or old getter/setter syntax.
+ */
+ atom = JSID_IS_ATOM(id) ? JSID_TO_ATOM(id) : NULL;
+ idstr = js_ValueToString(cx, ID_TO_VALUE(id));
+ if (!idstr) {
+ ok = JS_FALSE;
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ goto error;
+ }
+ *rval = STRING_TO_JSVAL(idstr); /* local root */
+ idIsLexicalIdentifier = js_IsIdentifier(idstr);
+ needOldStyleGetterSetter =
+ !idIsLexicalIdentifier ||
+ js_CheckKeyword(JSSTRING_CHARS(idstr),
+ JSSTRING_LENGTH(idstr)) != TOK_EOF;
+
+#if JS_HAS_GETTER_SETTER
+
+ valcnt = 0;
+ if (prop) {
+ ok = OBJ_GET_ATTRIBUTES(cx, obj2, id, prop, &attrs);
+ if (!ok) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ goto error;
+ }
+ if (OBJ_IS_NATIVE(obj2) &&
+ (attrs & (JSPROP_GETTER | JSPROP_SETTER))) {
+ if (attrs & JSPROP_GETTER) {
+ val[valcnt] = (jsval) ((JSScopeProperty *)prop)->getter;
+ gsopold[valcnt] =
+ ATOM_TO_STRING(cx->runtime->atomState.getterAtom);
+ gsop[valcnt] =
+ ATOM_TO_STRING(cx->runtime->atomState.getAtom);
+ valcnt++;
+ }
+ if (attrs & JSPROP_SETTER) {
+ val[valcnt] = (jsval) ((JSScopeProperty *)prop)->setter;
+ gsopold[valcnt] =
+ ATOM_TO_STRING(cx->runtime->atomState.setterAtom);
+ gsop[valcnt] =
+ ATOM_TO_STRING(cx->runtime->atomState.setAtom);
+ valcnt++;
+ }
+ } else {
+ valcnt = 1;
+ gsop[0] = NULL;
+ gsopold[0] = NULL;
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &val[0]);
+ }
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ }
+
+#else /* !JS_HAS_GETTER_SETTER */
+
+ /*
+ * We simplify the source code at the price of minor dead code bloat in
+ * the ECMA version (for testing only, see jsconfig.h). The null
+ * default values in gsop[j] suffice to disable non-ECMA getter and
+ * setter code.
+ */
+ valcnt = 1;
+ gsop[0] = NULL;
+ gsopold[0] = NULL;
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &val[0]);
+
+#endif /* !JS_HAS_GETTER_SETTER */
+
+ if (!ok)
+ goto error;
+
+ /*
+ * If id is a string that's not an identifier, then it needs to be
+ * quoted. Also, negative integer ids must be quoted.
+ */
+ if (atom
+ ? !idIsLexicalIdentifier
+ : (JSID_IS_OBJECT(id) || JSID_TO_INT(id) < 0)) {
+ idstr = js_QuoteString(cx, idstr, (jschar)'\'');
+ if (!idstr) {
+ ok = JS_FALSE;
+ goto error;
+ }
+ *rval = STRING_TO_JSVAL(idstr); /* local root */
+ }
+ idstrchars = JSSTRING_CHARS(idstr);
+ idstrlength = JSSTRING_LENGTH(idstr);
+
+ for (j = 0; j < valcnt; j++) {
+ /* Convert val[j] to its canonical source form. */
+ valstr = js_ValueToSource(cx, val[j]);
+ if (!valstr) {
+ ok = JS_FALSE;
+ goto error;
+ }
+ argv[j] = STRING_TO_JSVAL(valstr); /* local root */
+ vchars = JSSTRING_CHARS(valstr);
+ vlength = JSSTRING_LENGTH(valstr);
+
+ if (vchars[0] == '#')
+ needOldStyleGetterSetter = JS_TRUE;
+
+ if (needOldStyleGetterSetter)
+ gsop[j] = gsopold[j];
+
+#ifndef OLD_GETTER_SETTER
+ /*
+ * Remove '(function ' from the beginning of valstr and ')' from the
+ * end so that we can put "get" in front of the function definition.
+ */
+ if (gsop[j] && VALUE_IS_FUNCTION(cx, val[j]) &&
+ !needOldStyleGetterSetter) {
+ const jschar *start = vchars;
+ if (vchars[0] == '(')
+ vchars++;
+ vchars = js_strchr_limit(vchars, '(', vchars + vlength);
+ if (vchars) {
+ vlength -= vchars - start + 1;
+ } else {
+ gsop[j] = NULL;
+ vchars = start;
+ }
+ }
+#else
+ needOldStyleGetterSetter = JS_TRUE;
+ gsop[j] = gsopold[j];
+#endif
+
+ /* If val[j] is a non-sharp object, consider sharpening it. */
+ vsharp = NULL;
+ vsharplength = 0;
+#if JS_HAS_SHARP_VARS
+ if (!JSVAL_IS_PRIMITIVE(val[j]) && vchars[0] != '#') {
+ he = js_EnterSharpObject(cx, JSVAL_TO_OBJECT(val[j]), NULL,
+ &vsharp);
+ if (!he) {
+ ok = JS_FALSE;
+ goto error;
+ }
+ if (IS_SHARP(he)) {
+ vchars = vsharp;
+ vlength = js_strlen(vchars);
+ needOldStyleGetterSetter = JS_TRUE;
+ gsop[j] = gsopold[j];
+ } else {
+ if (vsharp) {
+ vsharplength = js_strlen(vsharp);
+ MAKE_SHARP(he);
+ needOldStyleGetterSetter = JS_TRUE;
+ gsop[j] = gsopold[j];
+ }
+ js_LeaveSharpObject(cx, NULL);
+ }
+ }
+#endif
+
+#define SAFE_ADD(n) \
+ JS_BEGIN_MACRO \
+ size_t n_ = (n); \
+ curlen += n_; \
+ if (curlen < n_) \
+ goto overflow; \
+ JS_END_MACRO
+
+ curlen = nchars;
+ if (comma)
+ SAFE_ADD(2);
+ SAFE_ADD(idstrlength + 1);
+ if (gsop[j])
+ SAFE_ADD(JSSTRING_LENGTH(gsop[j]) + 1);
+ SAFE_ADD(vsharplength);
+ SAFE_ADD(vlength);
+ /* Account for the trailing null. */
+ SAFE_ADD((outermost ? 2 : 1) + 1);
+#undef SAFE_ADD
+
+ if (curlen > (size_t)-1 / sizeof(jschar))
+ goto overflow;
+
+ /* Allocate 1 + 1 at end for closing brace and terminating 0. */
+ chars = (jschar *)
+ realloc((ochars = chars), curlen * sizeof(jschar));
+ if (!chars) {
+ /* Save code space on error: let JS_free ignore null vsharp. */
+ JS_free(cx, vsharp);
+ free(ochars);
+ goto error;
+ }
+
+ if (comma) {
+ chars[nchars++] = comma[0];
+ chars[nchars++] = comma[1];
+ }
+ comma = ", ";
+
+ if (needOldStyleGetterSetter) {
+ js_strncpy(&chars[nchars], idstrchars, idstrlength);
+ nchars += idstrlength;
+ if (gsop[j]) {
+ chars[nchars++] = ' ';
+ gsoplength = JSSTRING_LENGTH(gsop[j]);
+ js_strncpy(&chars[nchars], JSSTRING_CHARS(gsop[j]),
+ gsoplength);
+ nchars += gsoplength;
+ }
+ chars[nchars++] = ':';
+ } else { /* New style "decompilation" */
+ if (gsop[j]) {
+ gsoplength = JSSTRING_LENGTH(gsop[j]);
+ js_strncpy(&chars[nchars], JSSTRING_CHARS(gsop[j]),
+ gsoplength);
+ nchars += gsoplength;
+ chars[nchars++] = ' ';
+ }
+ js_strncpy(&chars[nchars], idstrchars, idstrlength);
+ nchars += idstrlength;
+ /* Extraneous space after id here will be extracted later */
+ chars[nchars++] = gsop[j] ? ' ' : ':';
+ }
+
+ if (vsharplength) {
+ js_strncpy(&chars[nchars], vsharp, vsharplength);
+ nchars += vsharplength;
+ }
+ js_strncpy(&chars[nchars], vchars, vlength);
+ nchars += vlength;
+
+ if (vsharp)
+ JS_free(cx, vsharp);
+#ifdef DUMP_CALL_TABLE
+ if (outermost && nchars >= js_LogCallToSourceLimit)
+ break;
+#endif
+ }
+ }
+
+ chars[nchars++] = '}';
+ if (outermost)
+ chars[nchars++] = ')';
+ chars[nchars] = 0;
+
+ error:
+ js_LeaveSharpObject(cx, &ida);
+
+ if (!ok) {
+ if (chars)
+ free(chars);
+ return ok;
+ }
+
+ if (!chars) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ make_string:
+ str = js_NewString(cx, chars, nchars, 0);
+ if (!str) {
+ free(chars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+
+ overflow:
+ JS_free(cx, vsharp);
+ free(chars);
+ chars = NULL;
+ goto error;
+}
+#endif /* JS_HAS_TOSOURCE */
+
+JSBool
+js_obj_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jschar *chars;
+ size_t nchars;
+ const char *clazz, *prefix;
+ JSString *str;
+
+ clazz = OBJ_GET_CLASS(cx, obj)->name;
+ nchars = 9 + strlen(clazz); /* 9 for "[object ]" */
+ chars = (jschar *) JS_malloc(cx, (nchars + 1) * sizeof(jschar));
+ if (!chars)
+ return JS_FALSE;
+
+ prefix = "[object ";
+ nchars = 0;
+ while ((chars[nchars] = (jschar)*prefix) != 0)
+ nchars++, prefix++;
+ while ((chars[nchars] = (jschar)*clazz) != 0)
+ nchars++, clazz++;
+ chars[nchars++] = ']';
+ chars[nchars] = 0;
+
+ str = js_NewString(cx, chars, nchars, 0);
+ if (!str) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+js_obj_toLocaleString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, argv[-1]);
+ if (!str)
+ return JS_FALSE;
+
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+obj_valueOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+/*
+ * Check whether principals subsumes scopeobj's principals, and return true
+ * if so (or if scopeobj has no principals, for backward compatibility with
+ * the JS API, which does not require principals), and false otherwise.
+ */
+JSBool
+js_CheckPrincipalsAccess(JSContext *cx, JSObject *scopeobj,
+ JSPrincipals *principals, JSAtom *caller)
+{
+ JSRuntime *rt;
+ JSPrincipals *scopePrincipals;
+ const char *callerstr;
+
+ rt = cx->runtime;
+ if (rt->findObjectPrincipals) {
+ scopePrincipals = rt->findObjectPrincipals(cx, scopeobj);
+ if (!principals || !scopePrincipals ||
+ !principals->subsume(principals, scopePrincipals)) {
+ callerstr = js_AtomToPrintableString(cx, caller);
+ if (!callerstr)
+ return JS_FALSE;
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_INDIRECT_CALL, callerstr);
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+JSObject *
+js_CheckScopeChainValidity(JSContext *cx, JSObject *scopeobj, const char *caller)
+{
+ JSClass *clasp;
+ JSExtendedClass *xclasp;
+ JSObject *inner;
+
+ if (!scopeobj)
+ goto bad;
+
+ OBJ_TO_INNER_OBJECT(cx, scopeobj);
+ if (!scopeobj)
+ return NULL;
+
+ inner = scopeobj;
+
+ /* XXX This is an awful gross hack. */
+ while (scopeobj) {
+ clasp = OBJ_GET_CLASS(cx, scopeobj);
+ if (clasp->flags & JSCLASS_IS_EXTENDED) {
+ xclasp = (JSExtendedClass*)clasp;
+ if (xclasp->innerObject &&
+ xclasp->innerObject(cx, scopeobj) != scopeobj) {
+ goto bad;
+ }
+ }
+
+ scopeobj = OBJ_GET_PARENT(cx, scopeobj);
+ }
+
+ return inner;
+
+bad:
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_INDIRECT_CALL, caller);
+ return NULL;
+}
+
+static JSBool
+obj_eval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSStackFrame *fp, *caller;
+ JSBool indirectCall;
+ JSObject *scopeobj;
+ JSString *str;
+ const char *file;
+ uintN line;
+ JSPrincipals *principals;
+ JSScript *script;
+ JSBool ok;
+#if JS_HAS_EVAL_THIS_SCOPE
+ JSObject *callerScopeChain = NULL, *callerVarObj = NULL;
+ JSObject *setCallerScopeChain = NULL;
+ JSBool setCallerVarObj = JS_FALSE;
+#endif
+
+ fp = cx->fp;
+ caller = JS_GetScriptedCaller(cx, fp);
+ JS_ASSERT(!caller || caller->pc);
+ indirectCall = (caller && *caller->pc != JSOP_EVAL);
+
+ if (indirectCall &&
+ !JS_ReportErrorFlagsAndNumber(cx,
+ JSREPORT_WARNING | JSREPORT_STRICT,
+ js_GetErrorMessage, NULL,
+ JSMSG_BAD_INDIRECT_CALL,
+ js_eval_str)) {
+ return JS_FALSE;
+ }
+
+ if (!JSVAL_IS_STRING(argv[0])) {
+ *rval = argv[0];
+ return JS_TRUE;
+ }
+
+ /*
+ * If the caller is a lightweight function and doesn't have a variables
+ * object, then we need to provide one for the compiler to stick any
+ * declared (var) variables into.
+ */
+ if (caller && !caller->varobj && !js_GetCallObject(cx, caller, NULL))
+ return JS_FALSE;
+
+#if JS_HAS_SCRIPT_OBJECT
+ /*
+ * Script.prototype.compile/exec and Object.prototype.eval all take an
+ * optional trailing argument that overrides the scope object.
+ */
+ scopeobj = NULL;
+ if (argc >= 2) {
+ if (!js_ValueToObject(cx, argv[1], &scopeobj))
+ return JS_FALSE;
+ argv[1] = OBJECT_TO_JSVAL(scopeobj);
+ }
+ if (!scopeobj)
+#endif
+ {
+#if JS_HAS_EVAL_THIS_SCOPE
+ /* If obj.eval(str), emulate 'with (obj) eval(str)' in the caller. */
+ if (indirectCall) {
+ callerScopeChain = js_GetScopeChain(cx, caller);
+ if (!callerScopeChain)
+ return JS_FALSE;
+ OBJ_TO_INNER_OBJECT(cx, obj);
+ if (!obj)
+ return JS_FALSE;
+ if (obj != callerScopeChain) {
+ if (!js_CheckPrincipalsAccess(cx, obj,
+ caller->script->principals,
+ cx->runtime->atomState.evalAtom))
+ {
+ return JS_FALSE;
+ }
+
+ scopeobj = js_NewWithObject(cx, obj, callerScopeChain, -1);
+ if (!scopeobj)
+ return JS_FALSE;
+
+ /* Set fp->scopeChain too, for the compiler. */
+ caller->scopeChain = fp->scopeChain = scopeobj;
+
+ /* Remember scopeobj so we can null its private when done. */
+ setCallerScopeChain = scopeobj;
+ }
+
+ callerVarObj = caller->varobj;
+ if (obj != callerVarObj) {
+ /* Set fp->varobj too, for the compiler. */
+ caller->varobj = fp->varobj = obj;
+ setCallerVarObj = JS_TRUE;
+ }
+ }
+ /* From here on, control must exit through label out with ok set. */
+#endif
+
+ /* Compile using caller's current scope object. */
+ if (caller) {
+ scopeobj = js_GetScopeChain(cx, caller);
+ if (!scopeobj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+ }
+
+ /* Ensure we compile this eval with the right object in the scope chain. */
+ scopeobj = js_CheckScopeChainValidity(cx, scopeobj, js_eval_str);
+ if (!scopeobj)
+ return JS_FALSE;
+
+ str = JSVAL_TO_STRING(argv[0]);
+ if (caller) {
+ principals = JS_EvalFramePrincipals(cx, fp, caller);
+ if (principals == caller->script->principals) {
+ file = caller->script->filename;
+ line = js_PCToLineNumber(cx, caller->script, caller->pc);
+ } else {
+ file = principals->codebase;
+ line = 0;
+ }
+ } else {
+ file = NULL;
+ line = 0;
+ principals = NULL;
+ }
+
+ /*
+ * Set JSFRAME_EVAL on fp and any frames (e.g., fun_call if eval.call was
+ * invoked) between fp and its scripted caller, to help the compiler easily
+ * find the same caller whose scope and var obj we've set.
+ *
+ * XXX this nonsense could, and perhaps should, go away with a better way
+ * to pass params to the compiler than via the top-most frame.
+ */
+ do {
+ fp->flags |= JSFRAME_EVAL;
+ } while ((fp = fp->down) != caller);
+
+ script = JS_CompileUCScriptForPrincipals(cx, scopeobj, principals,
+ JSSTRING_CHARS(str),
+ JSSTRING_LENGTH(str),
+ file, line);
+ if (!script) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+#if JS_HAS_SCRIPT_OBJECT
+ if (argc < 2)
+#endif
+ {
+ /* Execute using caller's new scope object (might be a Call object). */
+ if (caller)
+ scopeobj = caller->scopeChain;
+ }
+
+ /*
+ * Belt-and-braces: check that the lesser of eval's principals and the
+ * caller's principals has access to scopeobj.
+ */
+ ok = js_CheckPrincipalsAccess(cx, scopeobj, principals,
+ cx->runtime->atomState.evalAtom);
+ if (ok)
+ ok = js_Execute(cx, scopeobj, script, caller, JSFRAME_EVAL, rval);
+
+ JS_DestroyScript(cx, script);
+
+out:
+#if JS_HAS_EVAL_THIS_SCOPE
+ /* Restore OBJ_GET_PARENT(scopeobj) not callerScopeChain in case of Call. */
+ if (setCallerScopeChain) {
+ caller->scopeChain = callerScopeChain;
+ JS_ASSERT(OBJ_GET_CLASS(cx, setCallerScopeChain) == &js_WithClass);
+ JS_SetPrivate(cx, setCallerScopeChain, NULL);
+ }
+ if (setCallerVarObj)
+ caller->varobj = callerVarObj;
+#endif
+ return ok;
+}
+
+#if JS_HAS_OBJ_WATCHPOINT
+
+static JSBool
+obj_watch_handler(JSContext *cx, JSObject *obj, jsval id, jsval old, jsval *nvp,
+ void *closure)
+{
+ JSObject *callable;
+ JSRuntime *rt;
+ JSStackFrame *caller;
+ JSPrincipals *subject, *watcher;
+ JSResolvingKey key;
+ JSResolvingEntry *entry;
+ uint32 generation;
+ jsval argv[3];
+ JSBool ok;
+
+ callable = (JSObject *) closure;
+
+ rt = cx->runtime;
+ if (rt->findObjectPrincipals) {
+ /* Skip over any obj_watch_* frames between us and the real subject. */
+ caller = JS_GetScriptedCaller(cx, cx->fp);
+ if (caller) {
+ /*
+ * Only call the watch handler if the watcher is allowed to watch
+ * the currently executing script.
+ */
+ watcher = rt->findObjectPrincipals(cx, callable);
+ subject = JS_StackFramePrincipals(cx, caller);
+
+ if (watcher && subject && !watcher->subsume(watcher, subject)) {
+ /* Silently don't call the watch handler. */
+ return JS_TRUE;
+ }
+ }
+ }
+
+ /* Avoid recursion on (obj, id) already being watched on cx. */
+ key.obj = obj;
+ key.id = id;
+ if (!js_StartResolving(cx, &key, JSRESFLAG_WATCH, &entry))
+ return JS_FALSE;
+ if (!entry)
+ return JS_TRUE;
+ generation = cx->resolvingTable->generation;
+
+ argv[0] = id;
+ argv[1] = old;
+ argv[2] = *nvp;
+ ok = js_InternalCall(cx, obj, OBJECT_TO_JSVAL(callable), 3, argv, nvp);
+ js_StopResolving(cx, &key, JSRESFLAG_WATCH, entry, generation);
+ return ok;
+}
+
+static JSBool
+obj_watch(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSObject *callable;
+ jsval userid, value;
+ jsid propid;
+ uintN attrs;
+
+ callable = js_ValueToCallableObject(cx, &argv[1], 0);
+ if (!callable)
+ return JS_FALSE;
+
+ /* Compute the unique int/atom symbol id needed by js_LookupProperty. */
+ userid = argv[0];
+ if (!JS_ValueToId(cx, userid, &propid))
+ return JS_FALSE;
+
+ if (!OBJ_CHECK_ACCESS(cx, obj, propid, JSACC_WATCH, &value, &attrs))
+ return JS_FALSE;
+ if (attrs & JSPROP_READONLY)
+ return JS_TRUE;
+ return JS_SetWatchPoint(cx, obj, userid, obj_watch_handler, callable);
+}
+
+static JSBool
+obj_unwatch(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return JS_ClearWatchPoint(cx, obj, argv[0], NULL, NULL);
+}
+
+#endif /* JS_HAS_OBJ_WATCHPOINT */
+
+/*
+ * Prototype and property query methods, to complement the 'in' and
+ * 'instanceof' operators.
+ */
+
+/* Proposed ECMA 15.2.4.5. */
+static JSBool
+obj_hasOwnProperty(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return js_HasOwnPropertyHelper(cx, obj, obj->map->ops->lookupProperty,
+ argc, argv, rval);
+}
+
+JSBool
+js_HasOwnPropertyHelper(JSContext *cx, JSObject *obj, JSLookupPropOp lookup,
+ uintN argc, jsval *argv, jsval *rval)
+{
+ jsid id;
+ JSObject *obj2;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+
+ if (!JS_ValueToId(cx, argv[0], &id))
+ return JS_FALSE;
+ if (!lookup(cx, obj, id, &obj2, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ *rval = JSVAL_FALSE;
+ } else if (obj2 == obj) {
+ *rval = JSVAL_TRUE;
+ } else {
+ JSClass *clasp;
+ JSExtendedClass *xclasp;
+
+ clasp = OBJ_GET_CLASS(cx, obj);
+ xclasp = (clasp->flags & JSCLASS_IS_EXTENDED)
+ ? (JSExtendedClass *)clasp
+ : NULL;
+ if (xclasp && xclasp->outerObject &&
+ xclasp->outerObject(cx, obj2) == obj) {
+ *rval = JSVAL_TRUE;
+ } else if (OBJ_IS_NATIVE(obj2) && OBJ_GET_CLASS(cx, obj2) == clasp) {
+ /*
+ * The combination of JSPROP_SHARED and JSPROP_PERMANENT in a
+ * delegated property makes that property appear to be direct in
+ * all delegating instances of the same native class. This hack
+ * avoids bloating every function instance with its own 'length'
+ * (AKA 'arity') property. But it must not extend across class
+ * boundaries, to avoid making hasOwnProperty lie (bug 320854).
+ *
+ * It's not really a hack, of course: a permanent property can't
+ * be deleted, and JSPROP_SHARED means "don't allocate a slot in
+ * any instance, prototype or delegating". Without a slot, and
+ * without the ability to remove and recreate (with differences)
+ * the property, there is no way to tell whether it is directly
+ * owned, or indirectly delegated.
+ */
+ sprop = (JSScopeProperty *)prop;
+ *rval = BOOLEAN_TO_JSVAL(SPROP_IS_SHARED_PERMANENT(sprop));
+ } else {
+ *rval = JSVAL_FALSE;
+ }
+ }
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ return JS_TRUE;
+}
+
+/* Proposed ECMA 15.2.4.6. */
+static JSBool
+obj_isPrototypeOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSBool b;
+
+ if (!js_IsDelegate(cx, obj, *argv, &b))
+ return JS_FALSE;
+ *rval = BOOLEAN_TO_JSVAL(b);
+ return JS_TRUE;
+}
+
+/* Proposed ECMA 15.2.4.7. */
+static JSBool
+obj_propertyIsEnumerable(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsid id;
+ uintN attrs;
+ JSObject *obj2;
+ JSProperty *prop;
+ JSBool ok;
+
+ if (!JS_ValueToId(cx, argv[0], &id))
+ return JS_FALSE;
+
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &obj2, &prop))
+ return JS_FALSE;
+
+ if (!prop) {
+ *rval = JSVAL_FALSE;
+ return JS_TRUE;
+ }
+
+ /*
+ * XXX ECMA spec error compatible: return false unless hasOwnProperty.
+ * The ECMA spec really should be fixed so propertyIsEnumerable and the
+ * for..in loop agree on whether prototype properties are enumerable,
+ * obviously by fixing this method (not by breaking the for..in loop!).
+ *
+ * We check here for shared permanent prototype properties, which should
+ * be treated as if they are local to obj. They are an implementation
+ * technique used to satisfy ECMA requirements; users should not be able
+ * to distinguish a shared permanent proto-property from a local one.
+ */
+ if (obj2 != obj &&
+ !(OBJ_IS_NATIVE(obj2) &&
+ SPROP_IS_SHARED_PERMANENT((JSScopeProperty *)prop))) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ *rval = JSVAL_FALSE;
+ return JS_TRUE;
+ }
+
+ ok = OBJ_GET_ATTRIBUTES(cx, obj2, id, prop, &attrs);
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ if (ok)
+ *rval = BOOLEAN_TO_JSVAL((attrs & JSPROP_ENUMERATE) != 0);
+ return ok;
+}
+
+#if JS_HAS_GETTER_SETTER
+static JSBool
+obj_defineGetter(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval fval, junk;
+ jsid id;
+ uintN attrs;
+
+ fval = argv[1];
+ if (JS_TypeOfValue(cx, fval) != JSTYPE_FUNCTION) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_GETTER_OR_SETTER,
+ js_getter_str);
+ return JS_FALSE;
+ }
+
+ if (!JS_ValueToId(cx, argv[0], &id))
+ return JS_FALSE;
+ if (!js_CheckRedeclaration(cx, obj, id, JSPROP_GETTER, NULL, NULL))
+ return JS_FALSE;
+ /*
+ * Getters and setters are just like watchpoints from an access
+ * control point of view.
+ */
+ if (!OBJ_CHECK_ACCESS(cx, obj, id, JSACC_WATCH, &junk, &attrs))
+ return JS_FALSE;
+ return OBJ_DEFINE_PROPERTY(cx, obj, id, JSVAL_VOID,
+ (JSPropertyOp) JSVAL_TO_OBJECT(fval), NULL,
+ JSPROP_ENUMERATE | JSPROP_GETTER | JSPROP_SHARED,
+ NULL);
+}
+
+static JSBool
+obj_defineSetter(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval fval, junk;
+ jsid id;
+ uintN attrs;
+
+ fval = argv[1];
+ if (JS_TypeOfValue(cx, fval) != JSTYPE_FUNCTION) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_GETTER_OR_SETTER,
+ js_setter_str);
+ return JS_FALSE;
+ }
+
+ if (!JS_ValueToId(cx, argv[0], &id))
+ return JS_FALSE;
+ if (!js_CheckRedeclaration(cx, obj, id, JSPROP_SETTER, NULL, NULL))
+ return JS_FALSE;
+ /*
+ * Getters and setters are just like watchpoints from an access
+ * control point of view.
+ */
+ if (!OBJ_CHECK_ACCESS(cx, obj, id, JSACC_WATCH, &junk, &attrs))
+ return JS_FALSE;
+ return OBJ_DEFINE_PROPERTY(cx, obj, id, JSVAL_VOID,
+ NULL, (JSPropertyOp) JSVAL_TO_OBJECT(fval),
+ JSPROP_ENUMERATE | JSPROP_SETTER | JSPROP_SHARED,
+ NULL);
+}
+
+static JSBool
+obj_lookupGetter(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsid id;
+ JSObject *pobj;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+
+ if (!JS_ValueToId(cx, argv[0], &id))
+ return JS_FALSE;
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &pobj, &prop))
+ return JS_FALSE;
+ if (prop) {
+ if (OBJ_IS_NATIVE(pobj)) {
+ sprop = (JSScopeProperty *) prop;
+ if (sprop->attrs & JSPROP_GETTER)
+ *rval = OBJECT_TO_JSVAL(sprop->getter);
+ }
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+obj_lookupSetter(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsid id;
+ JSObject *pobj;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+
+ if (!JS_ValueToId(cx, argv[0], &id))
+ return JS_FALSE;
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &pobj, &prop))
+ return JS_FALSE;
+ if (prop) {
+ if (OBJ_IS_NATIVE(pobj)) {
+ sprop = (JSScopeProperty *) prop;
+ if (sprop->attrs & JSPROP_SETTER)
+ *rval = OBJECT_TO_JSVAL(sprop->setter);
+ }
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+ return JS_TRUE;
+}
+#endif /* JS_HAS_GETTER_SETTER */
+
+#if JS_HAS_OBJ_WATCHPOINT
+const char js_watch_str[] = "watch";
+const char js_unwatch_str[] = "unwatch";
+#endif
+const char js_hasOwnProperty_str[] = "hasOwnProperty";
+const char js_isPrototypeOf_str[] = "isPrototypeOf";
+const char js_propertyIsEnumerable_str[] = "propertyIsEnumerable";
+#if JS_HAS_GETTER_SETTER
+const char js_defineGetter_str[] = "__defineGetter__";
+const char js_defineSetter_str[] = "__defineSetter__";
+const char js_lookupGetter_str[] = "__lookupGetter__";
+const char js_lookupSetter_str[] = "__lookupSetter__";
+#endif
+
+static JSFunctionSpec object_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, js_obj_toSource, 0, 0, OBJ_TOSTRING_EXTRA},
+#endif
+ {js_toString_str, js_obj_toString, 0, 0, OBJ_TOSTRING_EXTRA},
+ {js_toLocaleString_str, js_obj_toLocaleString, 0, 0, OBJ_TOSTRING_EXTRA},
+ {js_valueOf_str, obj_valueOf, 0,0,0},
+ {js_eval_str, obj_eval, 1,0,0},
+#if JS_HAS_OBJ_WATCHPOINT
+ {js_watch_str, obj_watch, 2,0,0},
+ {js_unwatch_str, obj_unwatch, 1,0,0},
+#endif
+ {js_hasOwnProperty_str, obj_hasOwnProperty, 1,0,0},
+ {js_isPrototypeOf_str, obj_isPrototypeOf, 1,0,0},
+ {js_propertyIsEnumerable_str, obj_propertyIsEnumerable, 1,0,0},
+#if JS_HAS_GETTER_SETTER
+ {js_defineGetter_str, obj_defineGetter, 2,0,0},
+ {js_defineSetter_str, obj_defineSetter, 2,0,0},
+ {js_lookupGetter_str, obj_lookupGetter, 1,0,0},
+ {js_lookupSetter_str, obj_lookupSetter, 1,0,0},
+#endif
+ {0,0,0,0,0}
+};
+
+static JSBool
+Object(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (argc == 0) {
+ /* Trigger logic below to construct a blank object. */
+ obj = NULL;
+ } else {
+ /* If argv[0] is null or undefined, obj comes back null. */
+ if (!js_ValueToObject(cx, argv[0], &obj))
+ return JS_FALSE;
+ }
+ if (!obj) {
+ JS_ASSERT(!argc || JSVAL_IS_NULL(argv[0]) || JSVAL_IS_VOID(argv[0]));
+ if (cx->fp->flags & JSFRAME_CONSTRUCTING)
+ return JS_TRUE;
+ obj = js_NewObject(cx, &js_ObjectClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ }
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+/*
+ * ObjectOps and Class for with-statement stack objects.
+ */
+static JSBool
+with_LookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ JSProperty **propp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_LookupProperty(cx, obj, id, objp, propp);
+ return OBJ_LOOKUP_PROPERTY(cx, proto, id, objp, propp);
+}
+
+static JSBool
+with_GetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_GetProperty(cx, obj, id, vp);
+ return OBJ_GET_PROPERTY(cx, proto, id, vp);
+}
+
+static JSBool
+with_SetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_SetProperty(cx, obj, id, vp);
+ return OBJ_SET_PROPERTY(cx, proto, id, vp);
+}
+
+static JSBool
+with_GetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_GetAttributes(cx, obj, id, prop, attrsp);
+ return OBJ_GET_ATTRIBUTES(cx, proto, id, prop, attrsp);
+}
+
+static JSBool
+with_SetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_SetAttributes(cx, obj, id, prop, attrsp);
+ return OBJ_SET_ATTRIBUTES(cx, proto, id, prop, attrsp);
+}
+
+static JSBool
+with_DeleteProperty(JSContext *cx, JSObject *obj, jsid id, jsval *rval)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_DeleteProperty(cx, obj, id, rval);
+ return OBJ_DELETE_PROPERTY(cx, proto, id, rval);
+}
+
+static JSBool
+with_DefaultValue(JSContext *cx, JSObject *obj, JSType hint, jsval *vp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_DefaultValue(cx, obj, hint, vp);
+ return OBJ_DEFAULT_VALUE(cx, proto, hint, vp);
+}
+
+static JSBool
+with_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
+ jsval *statep, jsid *idp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_Enumerate(cx, obj, enum_op, statep, idp);
+ return OBJ_ENUMERATE(cx, proto, enum_op, statep, idp);
+}
+
+static JSBool
+with_CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode,
+ jsval *vp, uintN *attrsp)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return js_CheckAccess(cx, obj, id, mode, vp, attrsp);
+ return OBJ_CHECK_ACCESS(cx, proto, id, mode, vp, attrsp);
+}
+
+static JSObject *
+with_ThisObject(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto = OBJ_GET_PROTO(cx, obj);
+ if (!proto)
+ return obj;
+ return OBJ_THIS_OBJECT(cx, proto);
+}
+
+JS_FRIEND_DATA(JSObjectOps) js_WithObjectOps = {
+ js_NewObjectMap, js_DestroyObjectMap,
+ with_LookupProperty, js_DefineProperty,
+ with_GetProperty, with_SetProperty,
+ with_GetAttributes, with_SetAttributes,
+ with_DeleteProperty, with_DefaultValue,
+ with_Enumerate, with_CheckAccess,
+ with_ThisObject, NATIVE_DROP_PROPERTY,
+ NULL, NULL,
+ NULL, NULL,
+ js_SetProtoOrParent, js_SetProtoOrParent,
+ js_Mark, js_Clear,
+ NULL, NULL
+};
+
+static JSObjectOps *
+with_getObjectOps(JSContext *cx, JSClass *clasp)
+{
+ return &js_WithObjectOps;
+}
+
+JSClass js_WithClass = {
+ "With",
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_RESERVED_SLOTS(1) | JSCLASS_IS_ANONYMOUS,
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ with_getObjectOps,
+ 0,0,0,0,0,0,0
+};
+
+JSObject *
+js_NewWithObject(JSContext *cx, JSObject *proto, JSObject *parent, jsint depth)
+{
+ JSObject *obj;
+
+ obj = js_NewObject(cx, &js_WithClass, proto, parent);
+ if (!obj)
+ return NULL;
+ obj->slots[JSSLOT_PRIVATE] = PRIVATE_TO_JSVAL(cx->fp);
+ OBJ_SET_BLOCK_DEPTH(cx, obj, depth);
+ return obj;
+}
+
+JSObject *
+js_NewBlockObject(JSContext *cx)
+{
+ JSObject *obj;
+
+ /*
+ * Null obj's proto slot so that Object.prototype.* does not pollute block
+ * scopes. Make sure obj has its own scope too, since clearing proto does
+ * not affect OBJ_SCOPE(obj).
+ */
+ obj = js_NewObject(cx, &js_BlockClass, NULL, NULL);
+ if (!obj || !js_GetMutableScope(cx, obj))
+ return NULL;
+ OBJ_SET_PROTO(cx, obj, NULL);
+ return obj;
+}
+
+JSObject *
+js_CloneBlockObject(JSContext *cx, JSObject *proto, JSObject *parent,
+ JSStackFrame *fp)
+{
+ JSObject *clone;
+
+ clone = js_NewObject(cx, &js_BlockClass, proto, parent);
+ if (!clone)
+ return NULL;
+ clone->slots[JSSLOT_PRIVATE] = PRIVATE_TO_JSVAL(fp);
+ clone->slots[JSSLOT_BLOCK_DEPTH] =
+ OBJ_GET_SLOT(cx, proto, JSSLOT_BLOCK_DEPTH);
+ return clone;
+}
+
+/*
+ * XXXblock this reverses a path in the property tree -- try to share
+ * the prototype's scope harder!
+ */
+JSBool
+js_PutBlockObject(JSContext *cx, JSObject *obj)
+{
+ JSStackFrame *fp;
+ uintN depth, slot;
+ JSScopeProperty *sprop;
+
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ JS_ASSERT(fp);
+ depth = OBJ_BLOCK_DEPTH(cx, obj);
+ for (sprop = OBJ_SCOPE(obj)->lastProp; sprop; sprop = sprop->parent) {
+ if (sprop->getter != js_BlockClass.getProperty)
+ continue;
+ if (!(sprop->flags & SPROP_HAS_SHORTID))
+ continue;
+ slot = depth + (uintN)sprop->shortid;
+ JS_ASSERT(slot < fp->script->depth);
+ if (!js_DefineNativeProperty(cx, obj, sprop->id,
+ fp->spbase[slot], NULL, NULL,
+ JSPROP_ENUMERATE | JSPROP_PERMANENT,
+ SPROP_HAS_SHORTID, sprop->shortid,
+ NULL)) {
+ JS_SetPrivate(cx, obj, NULL);
+ return JS_FALSE;
+ }
+ }
+
+ return JS_SetPrivate(cx, obj, NULL);
+}
+
+static JSBool
+block_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSStackFrame *fp;
+ jsint slot;
+
+ JS_ASSERT(JS_InstanceOf(cx, obj, &js_BlockClass, NULL));
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (!fp)
+ return JS_TRUE;
+
+ slot = OBJ_BLOCK_DEPTH(cx, obj) + (uint16) JSVAL_TO_INT(id);
+ JS_ASSERT((uintN)slot < fp->script->depth);
+ *vp = fp->spbase[slot];
+ return JS_TRUE;
+}
+
+static JSBool
+block_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSStackFrame *fp;
+ jsint slot;
+
+ JS_ASSERT(JS_InstanceOf(cx, obj, &js_BlockClass, NULL));
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+
+ fp = (JSStackFrame *) JS_GetPrivate(cx, obj);
+ if (!fp)
+ return JS_TRUE;
+
+ slot = OBJ_BLOCK_DEPTH(cx, obj) + (uint16) JSVAL_TO_INT(id);
+ JS_ASSERT((uintN)slot < fp->script->depth);
+ fp->spbase[slot] = *vp;
+ return JS_TRUE;
+}
+
+#if JS_HAS_XDR
+
+#define NO_PARENT_INDEX (jsatomid)-1
+
+jsatomid
+FindObjectAtomIndex(JSAtomMap *map, JSObject *obj)
+{
+ size_t i;
+ JSAtom *atom;
+
+ for (i = 0; i < map->length; i++) {
+ atom = map->vector[i];
+ if (ATOM_KEY(atom) == OBJECT_TO_JSVAL(obj))
+ return i;
+ }
+
+ return NO_PARENT_INDEX;
+}
+
+static JSBool
+block_xdrObject(JSXDRState *xdr, JSObject **objp)
+{
+ JSContext *cx;
+ jsatomid parentId;
+ JSAtomMap *atomMap;
+ JSObject *obj, *parent;
+ uint16 depth, count, i;
+ uint32 tmp;
+ JSTempValueRooter tvr;
+ JSScopeProperty *sprop;
+ jsid propid;
+ JSAtom *atom;
+ int16 shortid;
+ JSBool ok;
+
+ cx = xdr->cx;
+#ifdef __GNUC__
+ obj = NULL; /* quell GCC overwarning */
+#endif
+
+ atomMap = &xdr->script->atomMap;
+ if (xdr->mode == JSXDR_ENCODE) {
+ obj = *objp;
+ parent = OBJ_GET_PARENT(cx, obj);
+ parentId = FindObjectAtomIndex(atomMap, parent);
+ depth = OBJ_BLOCK_DEPTH(cx, obj);
+ count = OBJ_BLOCK_COUNT(cx, obj);
+ tmp = (uint32)(depth << 16) | count;
+ }
+#ifdef __GNUC__ /* suppress bogus gcc warnings */
+ else count = 0;
+#endif
+
+ /* First, XDR the parent atomid. */
+ if (!JS_XDRUint32(xdr, &parentId))
+ return JS_FALSE;
+
+ if (xdr->mode == JSXDR_DECODE) {
+ obj = js_NewBlockObject(cx);
+ if (!obj)
+ return JS_FALSE;
+ *objp = obj;
+
+ /*
+ * If there's a parent id, then get the parent out of our script's
+ * atomMap. We know that we XDR block object in outer-to-inner order,
+ * which means that getting the parent now will work.
+ */
+ if (parentId == NO_PARENT_INDEX) {
+ parent = NULL;
+ } else {
+ atom = js_GetAtom(cx, atomMap, parentId);
+ JS_ASSERT(ATOM_IS_OBJECT(atom));
+ parent = ATOM_TO_OBJECT(atom);
+ }
+ obj->slots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(parent);
+ }
+
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, OBJECT_TO_JSVAL(obj), &tvr);
+
+ if (!JS_XDRUint32(xdr, &tmp)) {
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return JS_FALSE;
+ }
+
+ if (xdr->mode == JSXDR_DECODE) {
+ depth = (uint16)(tmp >> 16);
+ count = (uint16)tmp;
+ obj->slots[JSSLOT_BLOCK_DEPTH] = INT_TO_JSVAL(depth);
+ }
+
+ /*
+ * XDR the block object's properties. We know that there are 'count'
+ * properties to XDR, stored as id/shortid pairs. We do not XDR any
+ * non-native properties, only those that the compiler created.
+ */
+ sprop = NULL;
+ ok = JS_TRUE;
+ for (i = 0; i < count; i++) {
+ if (xdr->mode == JSXDR_ENCODE) {
+ /* Find a property to XDR. */
+ do {
+ /* If sprop is NULL, this is the first property. */
+ sprop = sprop ? sprop->parent : OBJ_SCOPE(obj)->lastProp;
+ } while (!(sprop->flags & SPROP_HAS_SHORTID));
+
+ JS_ASSERT(sprop->getter == js_BlockClass.getProperty);
+ propid = sprop->id;
+ JS_ASSERT(JSID_IS_ATOM(propid));
+ atom = JSID_TO_ATOM(propid);
+ shortid = sprop->shortid;
+ JS_ASSERT(shortid >= 0);
+ }
+
+ /* XDR the real id, then the shortid. */
+ if (!js_XDRStringAtom(xdr, &atom) ||
+ !JS_XDRUint16(xdr, (uint16 *)&shortid)) {
+ ok = JS_FALSE;
+ break;
+ }
+
+ if (xdr->mode == JSXDR_DECODE) {
+ if (!js_DefineNativeProperty(cx, obj, ATOM_TO_JSID(atom),
+ JSVAL_VOID, NULL, NULL,
+ JSPROP_ENUMERATE | JSPROP_PERMANENT,
+ SPROP_HAS_SHORTID, shortid, NULL)) {
+ ok = JS_FALSE;
+ break;
+ }
+ }
+ }
+
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+}
+
+#else
+# define block_xdrObject NULL
+#endif
+
+JSClass js_BlockClass = {
+ "Block",
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_RESERVED_SLOTS(1) |
+ JSCLASS_IS_ANONYMOUS | JSCLASS_HAS_CACHED_PROTO(JSProto_Block),
+ JS_PropertyStub, JS_PropertyStub, block_getProperty, block_setProperty,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, JS_FinalizeStub,
+ NULL, NULL, NULL, NULL, block_xdrObject, NULL, NULL, NULL
+};
+
+JSObject*
+js_InitBlockClass(JSContext *cx, JSObject* obj)
+{
+ JSObject *proto;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_BlockClass, NULL, 0, NULL,
+ NULL, NULL, NULL);
+ if (!proto)
+ return NULL;
+
+ OBJ_SET_PROTO(cx, proto, NULL);
+ return proto;
+}
+
+JSObject *
+js_InitObjectClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+ jsval eval;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_ObjectClass, Object, 1,
+ object_props, object_methods, NULL, NULL);
+ if (!proto)
+ return NULL;
+
+ /* ECMA (15.1.2.1) says 'eval' is also a property of the global object. */
+ if (!OBJ_GET_PROPERTY(cx, proto,
+ ATOM_TO_JSID(cx->runtime->atomState.evalAtom),
+ &eval)) {
+ return NULL;
+ }
+ if (!OBJ_DEFINE_PROPERTY(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState.evalAtom),
+ eval, NULL, NULL, 0, NULL)) {
+ return NULL;
+ }
+
+ return proto;
+}
+
+void
+js_InitObjectMap(JSObjectMap *map, jsrefcount nrefs, JSObjectOps *ops,
+ JSClass *clasp)
+{
+ map->nrefs = nrefs;
+ map->ops = ops;
+ map->nslots = JS_INITIAL_NSLOTS;
+ map->freeslot = JSSLOT_FREE(clasp);
+}
+
+JSObjectMap *
+js_NewObjectMap(JSContext *cx, jsrefcount nrefs, JSObjectOps *ops,
+ JSClass *clasp, JSObject *obj)
+{
+ return (JSObjectMap *) js_NewScope(cx, nrefs, ops, clasp, obj);
+}
+
+void
+js_DestroyObjectMap(JSContext *cx, JSObjectMap *map)
+{
+ js_DestroyScope(cx, (JSScope *)map);
+}
+
+JSObjectMap *
+js_HoldObjectMap(JSContext *cx, JSObjectMap *map)
+{
+ JS_ASSERT(map->nrefs >= 0);
+ JS_ATOMIC_INCREMENT(&map->nrefs);
+ return map;
+}
+
+JSObjectMap *
+js_DropObjectMap(JSContext *cx, JSObjectMap *map, JSObject *obj)
+{
+ JS_ASSERT(map->nrefs > 0);
+ JS_ATOMIC_DECREMENT(&map->nrefs);
+ if (map->nrefs == 0) {
+ map->ops->destroyObjectMap(cx, map);
+ return NULL;
+ }
+ if (MAP_IS_NATIVE(map) && ((JSScope *)map)->object == obj)
+ ((JSScope *)map)->object = NULL;
+ return map;
+}
+
+static jsval *
+AllocSlots(JSContext *cx, jsval *slots, uint32 nslots)
+{
+ size_t nbytes, obytes, minbytes;
+ uint32 i, oslots;
+ jsval *newslots;
+
+ nbytes = (nslots + 1) * sizeof(jsval);
+ if (slots) {
+ oslots = slots[-1];
+ obytes = (oslots + 1) * sizeof(jsval);
+ } else {
+ oslots = 0;
+ obytes = 0;
+ }
+
+ if (nbytes <= GC_NBYTES_MAX) {
+ newslots = (jsval *) js_NewGCThing(cx, GCX_PRIVATE, nbytes);
+ } else {
+ newslots = (jsval *)
+ JS_realloc(cx,
+ (obytes <= GC_NBYTES_MAX) ? NULL : slots - 1,
+ nbytes);
+ }
+ if (!newslots)
+ return NULL;
+
+ if (obytes != 0) {
+ /* If either nbytes or obytes fit in a GC-thing, we must copy. */
+ minbytes = JS_MIN(nbytes, obytes);
+ if (minbytes <= GC_NBYTES_MAX)
+ memcpy(newslots + 1, slots, minbytes - sizeof(jsval));
+
+ /* If nbytes are in a GC-thing but obytes aren't, free obytes. */
+ if (nbytes <= GC_NBYTES_MAX && obytes > GC_NBYTES_MAX)
+ JS_free(cx, slots - 1);
+
+ /* If we're extending an allocation, initialize free slots. */
+ if (nslots > oslots) {
+ for (i = 1 + oslots; i <= nslots; i++)
+ newslots[i] = JSVAL_VOID;
+ }
+ }
+
+ newslots[0] = nslots;
+ return ++newslots;
+}
+
+static void
+FreeSlots(JSContext *cx, jsval *slots)
+{
+ size_t nbytes;
+
+ /*
+ * NB: We count on smaller GC-things being finalized before larger things
+ * that become garbage during the same GC. Without this assumption, we
+ * couldn't load slots[-1] here without possibly loading a gcFreeList link
+ * (see struct JSGCThing in jsgc.h).
+ */
+ nbytes = (slots[-1] + 1) * sizeof(jsval);
+ if (nbytes > GC_NBYTES_MAX)
+ JS_free(cx, slots - 1);
+}
+
+extern JSBool
+js_GetClassId(JSContext *cx, JSClass *clasp, jsid *idp)
+{
+ JSProtoKey key;
+ JSAtom *atom;
+
+ key = JSCLASS_CACHED_PROTO_KEY(clasp);
+ if (key != JSProto_Null) {
+ *idp = INT_TO_JSID(key);
+ } else if (clasp->flags & JSCLASS_IS_ANONYMOUS) {
+ *idp = INT_TO_JSID(JSProto_Object);
+ } else {
+ atom = js_Atomize(cx, clasp->name, strlen(clasp->name), 0);
+ if (!atom)
+ return JS_FALSE;
+ *idp = ATOM_TO_JSID(atom);
+ }
+ return JS_TRUE;
+}
+
+JSObject *
+js_NewObject(JSContext *cx, JSClass *clasp, JSObject *proto, JSObject *parent)
+{
+ jsid id;
+ JSObject *obj;
+ JSObjectOps *ops;
+ JSObjectMap *map;
+ JSClass *protoclasp;
+ uint32 nslots, i;
+ jsval *newslots;
+ JSTempValueRooter tvr;
+
+ /* Bootstrap the ur-object, and make it the default prototype object. */
+ if (!proto) {
+ if (!js_GetClassId(cx, clasp, &id))
+ return NULL;
+ if (!js_GetClassPrototype(cx, parent, id, &proto))
+ return NULL;
+ if (!proto &&
+ !js_GetClassPrototype(cx, parent, INT_TO_JSID(JSProto_Object),
+ &proto)) {
+ return NULL;
+ }
+ }
+
+ /* Always call the class's getObjectOps hook if it has one. */
+ ops = clasp->getObjectOps
+ ? clasp->getObjectOps(cx, clasp)
+ : &js_ObjectOps;
+
+ /*
+ * Allocate a zeroed object from the GC heap. Do this *after* any other
+ * GC-thing allocations under js_GetClassPrototype or clasp->getObjectOps,
+ * to avoid displacing the newborn root for obj.
+ */
+ obj = (JSObject *) js_NewGCThing(cx, GCX_OBJECT, sizeof(JSObject));
+ if (!obj)
+ return NULL;
+
+ /*
+ * Root obj to prevent it from being collected out from under this call.
+ * to js_NewObject. AllocSlots can trigger a finalizer from a last-ditch
+ * GC calling JS_ClearNewbornRoots. There's also the possibilty of things
+ * happening under the objectHook call-out further below.
+ */
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, obj, &tvr);
+
+ /*
+ * Share proto's map only if it has the same JSObjectOps, and only if
+ * proto's class has the same private and reserved slots as obj's map
+ * and class have. We assume that if prototype and object are of the
+ * same class, they always have the same number of computed reserved
+ * slots (returned via clasp->reserveSlots); otherwise, prototype and
+ * object classes must have the same (null or not) reserveSlots hook.
+ */
+ if (proto &&
+ (map = proto->map)->ops == ops &&
+ ((protoclasp = OBJ_GET_CLASS(cx, proto)) == clasp ||
+ (!((protoclasp->flags ^ clasp->flags) &
+ (JSCLASS_HAS_PRIVATE |
+ (JSCLASS_RESERVED_SLOTS_MASK << JSCLASS_RESERVED_SLOTS_SHIFT))) &&
+ protoclasp->reserveSlots == clasp->reserveSlots)))
+ {
+ /*
+ * Default parent to the parent of the prototype, which was set from
+ * the parent of the prototype's constructor.
+ */
+ if (!parent)
+ parent = OBJ_GET_PARENT(cx, proto);
+
+ /* Share the given prototype's map. */
+ obj->map = js_HoldObjectMap(cx, map);
+
+ /* Ensure that obj starts with the minimum slots for clasp. */
+ nslots = JS_INITIAL_NSLOTS;
+ } else {
+ /* Leave parent alone. Allocate a new map for obj. */
+ map = ops->newObjectMap(cx, 1, ops, clasp, obj);
+ if (!map)
+ goto bad;
+ obj->map = map;
+
+ /* Let ops->newObjectMap set nslots so as to reserve slots. */
+ nslots = map->nslots;
+ }
+
+ /* Allocate a slots vector, with a -1'st element telling its length. */
+ newslots = AllocSlots(cx, NULL, nslots);
+ if (!newslots) {
+ js_DropObjectMap(cx, obj->map, obj);
+ obj->map = NULL;
+ goto bad;
+ }
+
+ /* Set the proto, parent, and class properties. */
+ newslots[JSSLOT_PROTO] = OBJECT_TO_JSVAL(proto);
+ newslots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(parent);
+ newslots[JSSLOT_CLASS] = PRIVATE_TO_JSVAL(clasp);
+
+ /* Clear above JSSLOT_CLASS so the GC doesn't load uninitialized memory. */
+ for (i = JSSLOT_CLASS + 1; i < nslots; i++)
+ newslots[i] = JSVAL_VOID;
+
+ /* Store newslots after initializing all of 'em, just in case. */
+ obj->slots = newslots;
+
+ if (cx->runtime->objectHook) {
+ JS_KEEP_ATOMS(cx->runtime);
+ cx->runtime->objectHook(cx, obj, JS_TRUE, cx->runtime->objectHookData);
+ JS_UNKEEP_ATOMS(cx->runtime);
+ }
+
+out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ cx->weakRoots.newborn[GCX_OBJECT] = (JSGCThing *) obj;
+ return obj;
+
+bad:
+ obj = NULL;
+ goto out;
+}
+
+JS_STATIC_DLL_CALLBACK(JSObject *)
+js_InitNullClass(JSContext *cx, JSObject *obj)
+{
+ JS_ASSERT(0);
+ return NULL;
+}
+
+#define JS_PROTO(name,code,init) extern JSObject *init(JSContext *, JSObject *);
+#include "jsproto.tbl"
+#undef JS_PROTO
+
+static JSObjectOp lazy_prototype_init[JSProto_LIMIT] = {
+#define JS_PROTO(name,code,init) init,
+#include "jsproto.tbl"
+#undef JS_PROTO
+};
+
+JSBool
+js_GetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key,
+ JSObject **objp)
+{
+ JSBool ok;
+ JSObject *tmp, *cobj;
+ JSResolvingKey rkey;
+ JSResolvingEntry *rentry;
+ uint32 generation;
+ JSObjectOp init;
+ jsval v;
+
+ while ((tmp = OBJ_GET_PARENT(cx, obj)) != NULL)
+ obj = tmp;
+ if (!(OBJ_GET_CLASS(cx, obj)->flags & JSCLASS_IS_GLOBAL)) {
+ *objp = NULL;
+ return JS_TRUE;
+ }
+
+ ok = JS_GetReservedSlot(cx, obj, key, &v);
+ if (!ok)
+ return JS_FALSE;
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ *objp = JSVAL_TO_OBJECT(v);
+ return JS_TRUE;
+ }
+
+ rkey.obj = obj;
+ rkey.id = ATOM_TO_JSID(cx->runtime->atomState.classAtoms[key]);
+ if (!js_StartResolving(cx, &rkey, JSRESFLAG_LOOKUP, &rentry))
+ return JS_FALSE;
+ if (!rentry) {
+ /* Already caching key in obj -- suppress recursion. */
+ *objp = NULL;
+ return JS_TRUE;
+ }
+ generation = cx->resolvingTable->generation;
+
+ cobj = NULL;
+ init = lazy_prototype_init[key];
+ if (init) {
+ if (!init(cx, obj)) {
+ ok = JS_FALSE;
+ } else {
+ ok = JS_GetReservedSlot(cx, obj, key, &v);
+ if (ok && !JSVAL_IS_PRIMITIVE(v))
+ cobj = JSVAL_TO_OBJECT(v);
+ }
+ }
+
+ js_StopResolving(cx, &rkey, JSRESFLAG_LOOKUP, rentry, generation);
+ *objp = cobj;
+ return ok;
+}
+
+JSBool
+js_SetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key, JSObject *cobj)
+{
+ JS_ASSERT(!OBJ_GET_PARENT(cx, obj));
+ if (!(OBJ_GET_CLASS(cx, obj)->flags & JSCLASS_IS_GLOBAL))
+ return JS_TRUE;
+
+ return JS_SetReservedSlot(cx, obj, key, OBJECT_TO_JSVAL(cobj));
+}
+
+JSBool
+js_FindClassObject(JSContext *cx, JSObject *start, jsid id, jsval *vp)
+{
+ JSObject *obj, *cobj, *pobj;
+ JSProtoKey key;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+
+ if (start || (cx->fp && (start = cx->fp->scopeChain) != NULL)) {
+ /* Find the topmost object in the scope chain. */
+ do {
+ obj = start;
+ start = OBJ_GET_PARENT(cx, obj);
+ } while (start);
+ } else {
+ obj = cx->globalObject;
+ if (!obj) {
+ *vp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+ }
+
+ OBJ_TO_INNER_OBJECT(cx, obj);
+ if (!obj)
+ return JS_FALSE;
+
+ if (JSID_IS_INT(id)) {
+ key = JSID_TO_INT(id);
+ JS_ASSERT(key != JSProto_Null);
+ if (!js_GetClassObject(cx, obj, key, &cobj))
+ return JS_FALSE;
+ if (cobj) {
+ *vp = OBJECT_TO_JSVAL(cobj);
+ return JS_TRUE;
+ }
+ id = ATOM_TO_JSID(cx->runtime->atomState.classAtoms[key]);
+ }
+
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ if (!js_LookupPropertyWithFlags(cx, obj, id, JSRESOLVE_CLASSNAME,
+ &pobj, &prop)) {
+ return JS_FALSE;
+ }
+ if (!prop) {
+ *vp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+
+ JS_ASSERT(OBJ_IS_NATIVE(pobj));
+ sprop = (JSScopeProperty *) prop;
+ JS_ASSERT(SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj)));
+ *vp = OBJ_GET_SLOT(cx, pobj, sprop->slot);
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ return JS_TRUE;
+}
+
+JSObject *
+js_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto,
+ JSObject *parent, uintN argc, jsval *argv)
+{
+ jsid id;
+ jsval cval, rval;
+ JSTempValueRooter argtvr, tvr;
+ JSObject *obj, *ctor;
+
+ JS_PUSH_TEMP_ROOT(cx, argc, argv, &argtvr);
+
+ if (!js_GetClassId(cx, clasp, &id) ||
+ !js_FindClassObject(cx, parent, id, &cval)) {
+ JS_POP_TEMP_ROOT(cx, &argtvr);
+ return NULL;
+ }
+
+ if (JSVAL_IS_PRIMITIVE(cval)) {
+ js_ReportIsNotFunction(cx, &cval, JSV2F_CONSTRUCT | JSV2F_SEARCH_STACK);
+ JS_POP_TEMP_ROOT(cx, &argtvr);
+ return NULL;
+ }
+
+ /*
+ * Protect cval in case a crazy getter for .prototype uproots it. After
+ * this point, all control flow must exit through label out with obj set.
+ */
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, cval, &tvr);
+
+ /*
+ * If proto or parent are NULL, set them to Constructor.prototype and/or
+ * Constructor.__parent__, just like JSOP_NEW does.
+ */
+ ctor = JSVAL_TO_OBJECT(cval);
+ if (!parent)
+ parent = OBJ_GET_PARENT(cx, ctor);
+ if (!proto) {
+ if (!OBJ_GET_PROPERTY(cx, ctor,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .classPrototypeAtom),
+ &rval)) {
+ obj = NULL;
+ goto out;
+ }
+ if (JSVAL_IS_OBJECT(rval))
+ proto = JSVAL_TO_OBJECT(rval);
+ }
+
+ obj = js_NewObject(cx, clasp, proto, parent);
+ if (!obj)
+ goto out;
+
+ if (!js_InternalConstruct(cx, obj, cval, argc, argv, &rval))
+ goto bad;
+
+ if (JSVAL_IS_PRIMITIVE(rval))
+ goto out;
+ obj = JSVAL_TO_OBJECT(rval);
+
+ /*
+ * If the instance's class differs from what was requested, throw a type
+ * error. If the given class has both the JSCLASS_HAS_PRIVATE and the
+ * JSCLASS_CONSTRUCT_PROTOTYPE flags, and the instance does not have its
+ * private data set at this point, then the constructor was replaced and
+ * we should throw a type error.
+ */
+ if (OBJ_GET_CLASS(cx, obj) != clasp ||
+ (!(~clasp->flags & (JSCLASS_HAS_PRIVATE |
+ JSCLASS_CONSTRUCT_PROTOTYPE)) &&
+ !JS_GetPrivate(cx, obj))) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_WRONG_CONSTRUCTOR, clasp->name);
+ goto bad;
+ }
+
+out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ JS_POP_TEMP_ROOT(cx, &argtvr);
+ return obj;
+
+bad:
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ obj = NULL;
+ goto out;
+}
+
+void
+js_FinalizeObject(JSContext *cx, JSObject *obj)
+{
+ JSObjectMap *map;
+
+ /* Cope with stillborn objects that have no map. */
+ map = obj->map;
+ if (!map)
+ return;
+ JS_ASSERT(obj->slots);
+
+ if (cx->runtime->objectHook)
+ cx->runtime->objectHook(cx, obj, JS_FALSE, cx->runtime->objectHookData);
+
+ /* Remove all watchpoints with weak links to obj. */
+ JS_ClearWatchPointsForObject(cx, obj);
+
+ /*
+ * Finalize obj first, in case it needs map and slots. Optimized to use
+ * LOCKED_OBJ_GET_CLASS instead of OBJ_GET_CLASS, so we avoid "promoting"
+ * obj's scope from lock-free to lock-full (see jslock.c:ClaimScope) when
+ * we're called from the GC. Only the GC should call js_FinalizeObject,
+ * and no other threads run JS (and possibly racing to update obj->slots)
+ * while the GC is running.
+ */
+ LOCKED_OBJ_GET_CLASS(obj)->finalize(cx, obj);
+
+ /* Drop map and free slots. */
+ js_DropObjectMap(cx, map, obj);
+ obj->map = NULL;
+ FreeSlots(cx, obj->slots);
+ obj->slots = NULL;
+}
+
+/* XXXbe if one adds props, deletes earlier props, adds more, the last added
+ won't recycle the deleted props' slots. */
+JSBool
+js_AllocSlot(JSContext *cx, JSObject *obj, uint32 *slotp)
+{
+ JSObjectMap *map;
+ JSClass *clasp;
+ uint32 nslots;
+ jsval *newslots;
+
+ map = obj->map;
+ JS_ASSERT(!MAP_IS_NATIVE(map) || ((JSScope *)map)->object == obj);
+ clasp = LOCKED_OBJ_GET_CLASS(obj);
+ if (map->freeslot == JSSLOT_FREE(clasp)) {
+ /* Adjust map->freeslot to include computed reserved slots, if any. */
+ if (clasp->reserveSlots)
+ map->freeslot += clasp->reserveSlots(cx, obj);
+ }
+ nslots = map->nslots;
+ if (map->freeslot >= nslots) {
+ nslots = map->freeslot;
+ JS_ASSERT(nslots >= JS_INITIAL_NSLOTS);
+ nslots += (nslots + 1) / 2;
+
+ newslots = AllocSlots(cx, obj->slots, nslots);
+ if (!newslots)
+ return JS_FALSE;
+ map->nslots = nslots;
+ obj->slots = newslots;
+ }
+
+#ifdef TOO_MUCH_GC
+ obj->slots[map->freeslot] = JSVAL_VOID;
+#endif
+ *slotp = map->freeslot++;
+ return JS_TRUE;
+}
+
+void
+js_FreeSlot(JSContext *cx, JSObject *obj, uint32 slot)
+{
+ JSObjectMap *map;
+ uint32 nslots;
+ jsval *newslots;
+
+ OBJ_CHECK_SLOT(obj, slot);
+ obj->slots[slot] = JSVAL_VOID;
+ map = obj->map;
+ JS_ASSERT(!MAP_IS_NATIVE(map) || ((JSScope *)map)->object == obj);
+ if (map->freeslot == slot + 1)
+ map->freeslot = slot;
+ nslots = map->nslots;
+ if (nslots > JS_INITIAL_NSLOTS && map->freeslot < nslots / 2) {
+ nslots = map->freeslot;
+ nslots += nslots / 2;
+ if (nslots < JS_INITIAL_NSLOTS)
+ nslots = JS_INITIAL_NSLOTS;
+
+ newslots = AllocSlots(cx, obj->slots, nslots);
+ if (!newslots)
+ return;
+ map->nslots = nslots;
+ obj->slots = newslots;
+ }
+}
+
+/* JSVAL_INT_MAX as a string */
+#define JSVAL_INT_MAX_STRING "1073741823"
+
+#define CHECK_FOR_STRING_INDEX(id) \
+ JS_BEGIN_MACRO \
+ if (JSID_IS_ATOM(id)) { \
+ JSAtom *atom_ = JSID_TO_ATOM(id); \
+ JSString *str_ = ATOM_TO_STRING(atom_); \
+ const jschar *cp_ = str_->chars; \
+ JSBool negative_ = (*cp_ == '-'); \
+ if (negative_) cp_++; \
+ if (JS7_ISDEC(*cp_)) { \
+ size_t n_ = str_->length - negative_; \
+ if (n_ <= sizeof(JSVAL_INT_MAX_STRING) - 1) \
+ id = CheckForStringIndex(id, cp_, cp_ + n_, negative_); \
+ } \
+ } \
+ JS_END_MACRO
+
+static jsid
+CheckForStringIndex(jsid id, const jschar *cp, const jschar *end,
+ JSBool negative)
+{
+ jsuint index = JS7_UNDEC(*cp++);
+ jsuint oldIndex = 0;
+ jsuint c = 0;
+
+ if (index != 0) {
+ while (JS7_ISDEC(*cp)) {
+ oldIndex = index;
+ c = JS7_UNDEC(*cp);
+ index = 10 * index + c;
+ cp++;
+ }
+ }
+ if (cp == end &&
+ (oldIndex < (JSVAL_INT_MAX / 10) ||
+ (oldIndex == (JSVAL_INT_MAX / 10) &&
+ c <= (JSVAL_INT_MAX % 10)))) {
+ if (negative)
+ index = 0 - index;
+ id = INT_TO_JSID((jsint)index);
+ }
+ return id;
+}
+
+static JSBool
+HidePropertyName(JSContext *cx, jsid *idp)
+{
+ jsid id;
+ JSAtom *atom, *hidden;
+
+ id = *idp;
+ JS_ASSERT(JSID_IS_ATOM(id));
+
+ atom = JSID_TO_ATOM(id);
+ JS_ASSERT(!(atom->flags & ATOM_HIDDEN));
+ JS_ASSERT(ATOM_IS_STRING(atom));
+
+ hidden = js_AtomizeString(cx, ATOM_TO_STRING(atom), ATOM_HIDDEN);
+ if (!hidden)
+ return JS_FALSE;
+
+ /*
+ * Link hidden to unhidden atom to optimize call_enumerate -- this means
+ * the GC must mark a hidden atom's unhidden counterpart (see js_MarkAtom
+ * in jsgc.c). It uses the atom's entry.value member for this linkage.
+ */
+ hidden->entry.value = atom;
+ *idp = ATOM_TO_JSID(hidden);
+ return JS_TRUE;
+}
+
+JSScopeProperty *
+js_AddHiddenProperty(JSContext *cx, JSObject *obj, jsid id,
+ JSPropertyOp getter, JSPropertyOp setter, uint32 slot,
+ uintN attrs, uintN flags, intN shortid)
+{
+ if (!HidePropertyName(cx, &id))
+ return NULL;
+
+ flags |= SPROP_IS_HIDDEN;
+ return js_AddNativeProperty(cx, obj, id, getter, setter, slot, attrs,
+ flags, shortid);
+}
+
+JSBool
+js_LookupHiddenProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ JSProperty **propp)
+{
+ return HidePropertyName(cx, &id) &&
+ js_LookupPropertyWithFlags(cx, obj, id, JSRESOLVE_HIDDEN,
+ objp, propp);
+}
+
+JSScopeProperty *
+js_AddNativeProperty(JSContext *cx, JSObject *obj, jsid id,
+ JSPropertyOp getter, JSPropertyOp setter, uint32 slot,
+ uintN attrs, uintN flags, intN shortid)
+{
+ JSScope *scope;
+ JSScopeProperty *sprop;
+
+ JS_LOCK_OBJ(cx, obj);
+ scope = js_GetMutableScope(cx, obj);
+ if (!scope) {
+ sprop = NULL;
+ } else {
+ /*
+ * Handle old bug that took empty string as zero index. Also convert
+ * string indices to integers if appropriate.
+ */
+ CHECK_FOR_STRING_INDEX(id);
+ sprop = js_AddScopeProperty(cx, scope, id, getter, setter, slot, attrs,
+ flags, shortid);
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+ return sprop;
+}
+
+JSScopeProperty *
+js_ChangeNativePropertyAttrs(JSContext *cx, JSObject *obj,
+ JSScopeProperty *sprop, uintN attrs, uintN mask,
+ JSPropertyOp getter, JSPropertyOp setter)
+{
+ JSScope *scope;
+
+ JS_LOCK_OBJ(cx, obj);
+ scope = js_GetMutableScope(cx, obj);
+ if (!scope) {
+ sprop = NULL;
+ } else {
+ sprop = js_ChangeScopePropertyAttrs(cx, scope, sprop, attrs, mask,
+ getter, setter);
+ if (sprop) {
+ PROPERTY_CACHE_FILL(&cx->runtime->propertyCache, obj, sprop->id,
+ sprop);
+ }
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+ return sprop;
+}
+
+JSBool
+js_DefineProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs,
+ JSProperty **propp)
+{
+ return js_DefineNativeProperty(cx, obj, id, value, getter, setter, attrs,
+ 0, 0, propp);
+}
+
+/*
+ * Backward compatibility requires allowing addProperty hooks to mutate the
+ * nominal initial value of a slot-full property, while GC safety wants that
+ * value to be stored before the call-out through the hook. Optimize to do
+ * both while saving cycles for classes that stub their addProperty hook.
+ */
+#define ADD_PROPERTY_HELPER(cx,clasp,obj,scope,sprop,vp,cleanup) \
+ JS_BEGIN_MACRO \
+ if ((clasp)->addProperty != JS_PropertyStub) { \
+ jsval nominal_ = *(vp); \
+ if (!(clasp)->addProperty(cx, obj, SPROP_USERID(sprop), vp)) { \
+ cleanup; \
+ } \
+ if (*(vp) != nominal_) { \
+ if (SPROP_HAS_VALID_SLOT(sprop, scope)) \
+ LOCKED_OBJ_SET_SLOT(obj, (sprop)->slot, *(vp)); \
+ } \
+ } \
+ JS_END_MACRO
+
+JSBool
+js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs,
+ uintN flags, intN shortid, JSProperty **propp)
+{
+ JSClass *clasp;
+ JSScope *scope;
+ JSScopeProperty *sprop;
+
+ /*
+ * Handle old bug that took empty string as zero index. Also convert
+ * string indices to integers if appropriate.
+ */
+ CHECK_FOR_STRING_INDEX(id);
+
+#if JS_HAS_GETTER_SETTER
+ /*
+ * If defining a getter or setter, we must check for its counterpart and
+ * update the attributes and property ops. A getter or setter is really
+ * only half of a property.
+ */
+ if (attrs & (JSPROP_GETTER | JSPROP_SETTER)) {
+ JSObject *pobj;
+ JSProperty *prop;
+
+ /*
+ * If JS_THREADSAFE and id is found, js_LookupProperty returns with
+ * sprop non-null and pobj locked. If pobj == obj, the property is
+ * already in obj and obj has its own (mutable) scope. So if we are
+ * defining a getter whose setter was already defined, or vice versa,
+ * finish the job via js_ChangeScopePropertyAttributes, and refresh
+ * the property cache line for (obj, id) to map sprop.
+ */
+ if (!js_LookupProperty(cx, obj, id, &pobj, &prop))
+ return JS_FALSE;
+ sprop = (JSScopeProperty *) prop;
+ if (sprop &&
+ pobj == obj &&
+ (sprop->attrs & (JSPROP_GETTER | JSPROP_SETTER))) {
+ sprop = js_ChangeScopePropertyAttrs(cx, OBJ_SCOPE(obj), sprop,
+ attrs, sprop->attrs,
+ (attrs & JSPROP_GETTER)
+ ? getter
+ : sprop->getter,
+ (attrs & JSPROP_SETTER)
+ ? setter
+ : sprop->setter);
+
+ /* NB: obj == pobj, so we can share unlock code at the bottom. */
+ if (!sprop)
+ goto bad;
+ goto out;
+ }
+
+ if (prop) {
+ /* NB: call OBJ_DROP_PROPERTY, as pobj might not be native. */
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ prop = NULL;
+ }
+ }
+#endif /* JS_HAS_GETTER_SETTER */
+
+ /* Lock if object locking is required by this implementation. */
+ JS_LOCK_OBJ(cx, obj);
+
+ /* Use the object's class getter and setter by default. */
+ clasp = LOCKED_OBJ_GET_CLASS(obj);
+ if (!getter)
+ getter = clasp->getProperty;
+ if (!setter)
+ setter = clasp->setProperty;
+
+ /* Get obj's own scope if it has one, or create a new one for obj. */
+ scope = js_GetMutableScope(cx, obj);
+ if (!scope)
+ goto bad;
+
+ /* Add the property to scope, or replace an existing one of the same id. */
+ if (clasp->flags & JSCLASS_SHARE_ALL_PROPERTIES)
+ attrs |= JSPROP_SHARED;
+ sprop = js_AddScopeProperty(cx, scope, id, getter, setter,
+ SPROP_INVALID_SLOT, attrs, flags, shortid);
+ if (!sprop)
+ goto bad;
+
+ /* Store value before calling addProperty, in case the latter GC's. */
+ if (SPROP_HAS_VALID_SLOT(sprop, scope))
+ LOCKED_OBJ_SET_SLOT(obj, sprop->slot, value);
+
+ /* XXXbe called with lock held */
+ ADD_PROPERTY_HELPER(cx, clasp, obj, scope, sprop, &value,
+ js_RemoveScopeProperty(cx, scope, id);
+ goto bad);
+
+#if JS_HAS_GETTER_SETTER
+out:
+#endif
+ PROPERTY_CACHE_FILL(&cx->runtime->propertyCache, obj, id, sprop);
+ if (propp)
+ *propp = (JSProperty *) sprop;
+ else
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_TRUE;
+
+bad:
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_FALSE;
+}
+
+/*
+ * Given pc pointing after a property accessing bytecode, return true if the
+ * access is "object-detecting" in the sense used by web scripts, e.g., when
+ * checking whether document.all is defined.
+ */
+static JSBool
+Detecting(JSContext *cx, jsbytecode *pc)
+{
+ JSScript *script;
+ jsbytecode *endpc;
+ JSOp op;
+ JSAtom *atom;
+
+ if (!cx->fp)
+ return JS_FALSE;
+ script = cx->fp->script;
+ for (endpc = script->code + script->length; pc < endpc; pc++) {
+ /* General case: a branch or equality op follows the access. */
+ op = (JSOp) *pc;
+ if (js_CodeSpec[op].format & JOF_DETECTING)
+ return JS_TRUE;
+
+ /*
+ * Special case #1: handle (document.all == null). Don't sweat about
+ * JS1.2's revision of the equality operators here.
+ */
+ if (op == JSOP_NULL) {
+ if (++pc < endpc)
+ return *pc == JSOP_EQ || *pc == JSOP_NE;
+ break;
+ }
+
+ /*
+ * Special case #2: handle (document.all == undefined). Don't worry
+ * about someone redefining undefined, which was added by Edition 3,
+ * so is read/write for backward compatibility.
+ */
+ if (op == JSOP_NAME) {
+ atom = GET_ATOM(cx, script, pc);
+ if (atom == cx->runtime->atomState.typeAtoms[JSTYPE_VOID] &&
+ (pc += js_CodeSpec[op].length) < endpc) {
+ op = (JSOp) *pc;
+ return op == JSOP_EQ || op == JSOP_NE ||
+ op == JSOP_NEW_EQ || op == JSOP_NEW_NE;
+ }
+ break;
+ }
+
+ /* At this point, anything but grouping means we're not detecting. */
+ if (op != JSOP_GROUP)
+ break;
+ }
+ return JS_FALSE;
+}
+
+JS_FRIEND_API(JSBool)
+js_LookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ JSProperty **propp)
+{
+ return js_LookupPropertyWithFlags(cx, obj, id, 0, objp, propp);
+}
+
+JSBool
+js_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, jsid id, uintN flags,
+ JSObject **objp, JSProperty **propp)
+{
+ JSObject *start, *obj2, *proto;
+ JSScope *scope;
+ JSScopeProperty *sprop;
+ JSClass *clasp;
+ JSResolveOp resolve;
+ JSResolvingKey key;
+ JSResolvingEntry *entry;
+ uint32 generation;
+ JSNewResolveOp newresolve;
+ jsbytecode *pc;
+ const JSCodeSpec *cs;
+ uint32 format;
+ JSBool ok;
+
+ /*
+ * Handle old bug that took empty string as zero index. Also convert
+ * string indices to integers if appropriate.
+ */
+ CHECK_FOR_STRING_INDEX(id);
+
+ /* Search scopes starting with obj and following the prototype link. */
+ start = obj;
+ for (;;) {
+ JS_LOCK_OBJ(cx, obj);
+ scope = OBJ_SCOPE(obj);
+ if (scope->object == obj) {
+ sprop = SCOPE_GET_PROPERTY(scope, id);
+ } else {
+ /* Shared prototype scope: try resolve before lookup. */
+ sprop = NULL;
+ }
+
+ /* Try obj's class resolve hook if id was not found in obj's scope. */
+ if (!sprop) {
+ clasp = LOCKED_OBJ_GET_CLASS(obj);
+ resolve = clasp->resolve;
+ if (resolve != JS_ResolveStub) {
+ /* Avoid recursion on (obj, id) already being resolved on cx. */
+ key.obj = obj;
+ key.id = id;
+
+ /*
+ * Once we have successfully added an entry for (obj, key) to
+ * cx->resolvingTable, control must go through cleanup: before
+ * returning. But note that JS_DHASH_ADD may find an existing
+ * entry, in which case we bail to suppress runaway recursion.
+ */
+ if (!js_StartResolving(cx, &key, JSRESFLAG_LOOKUP, &entry)) {
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_FALSE;
+ }
+ if (!entry) {
+ /* Already resolving id in obj -- suppress recursion. */
+ JS_UNLOCK_OBJ(cx, obj);
+ goto out;
+ }
+ generation = cx->resolvingTable->generation;
+
+ /* Null *propp here so we can test it at cleanup: safely. */
+ *propp = NULL;
+
+ if (clasp->flags & JSCLASS_NEW_RESOLVE) {
+ newresolve = (JSNewResolveOp)resolve;
+ if (!(flags & JSRESOLVE_CLASSNAME) &&
+ cx->fp &&
+ (pc = cx->fp->pc)) {
+ cs = &js_CodeSpec[*pc];
+ format = cs->format;
+ if ((format & JOF_MODEMASK) != JOF_NAME)
+ flags |= JSRESOLVE_QUALIFIED;
+ if ((format & JOF_ASSIGNING) ||
+ (cx->fp->flags & JSFRAME_ASSIGNING)) {
+ flags |= JSRESOLVE_ASSIGNING;
+ } else {
+ pc += cs->length;
+ if (Detecting(cx, pc))
+ flags |= JSRESOLVE_DETECTING;
+ }
+ if (format & JOF_DECLARING)
+ flags |= JSRESOLVE_DECLARING;
+ }
+ obj2 = (clasp->flags & JSCLASS_NEW_RESOLVE_GETS_START)
+ ? start
+ : NULL;
+ JS_UNLOCK_OBJ(cx, obj);
+
+ /* Protect id and all atoms from a GC nested in resolve. */
+ JS_KEEP_ATOMS(cx->runtime);
+ ok = newresolve(cx, obj, ID_TO_VALUE(id), flags, &obj2);
+ JS_UNKEEP_ATOMS(cx->runtime);
+ if (!ok)
+ goto cleanup;
+
+ JS_LOCK_OBJ(cx, obj);
+ if (obj2) {
+ /* Resolved: juggle locks and lookup id again. */
+ if (obj2 != obj) {
+ JS_UNLOCK_OBJ(cx, obj);
+ JS_LOCK_OBJ(cx, obj2);
+ }
+ scope = OBJ_SCOPE(obj2);
+ if (!MAP_IS_NATIVE(&scope->map)) {
+ /* Whoops, newresolve handed back a foreign obj2. */
+ JS_ASSERT(obj2 != obj);
+ JS_UNLOCK_OBJ(cx, obj2);
+ ok = OBJ_LOOKUP_PROPERTY(cx, obj2, id, objp, propp);
+ if (!ok || *propp)
+ goto cleanup;
+ JS_LOCK_OBJ(cx, obj2);
+ } else {
+ /*
+ * Require that obj2 have its own scope now, as we
+ * do for old-style resolve. If it doesn't, then
+ * id was not truly resolved, and we'll find it in
+ * the proto chain, or miss it if obj2's proto is
+ * not on obj's proto chain. That last case is a
+ * "too bad!" case.
+ */
+ if (scope->object == obj2)
+ sprop = SCOPE_GET_PROPERTY(scope, id);
+ }
+ if (sprop) {
+ JS_ASSERT(obj2 == scope->object);
+ obj = obj2;
+ } else if (obj2 != obj) {
+ JS_UNLOCK_OBJ(cx, obj2);
+ JS_LOCK_OBJ(cx, obj);
+ }
+ }
+ } else {
+ /*
+ * Old resolve always requires id re-lookup if obj owns
+ * its scope after resolve returns.
+ */
+ JS_UNLOCK_OBJ(cx, obj);
+ ok = resolve(cx, obj, ID_TO_VALUE(id));
+ if (!ok)
+ goto cleanup;
+ JS_LOCK_OBJ(cx, obj);
+ scope = OBJ_SCOPE(obj);
+ JS_ASSERT(MAP_IS_NATIVE(&scope->map));
+ if (scope->object == obj)
+ sprop = SCOPE_GET_PROPERTY(scope, id);
+ }
+
+ cleanup:
+ js_StopResolving(cx, &key, JSRESFLAG_LOOKUP, entry, generation);
+ if (!ok || *propp)
+ return ok;
+ }
+ }
+
+ if (sprop) {
+ JS_ASSERT(OBJ_SCOPE(obj) == scope);
+ *objp = scope->object; /* XXXbe hide in jsscope.[ch] */
+
+ *propp = (JSProperty *) sprop;
+ return JS_TRUE;
+ }
+
+ proto = LOCKED_OBJ_GET_PROTO(obj);
+ JS_UNLOCK_OBJ(cx, obj);
+ if (!proto)
+ break;
+ if (!OBJ_IS_NATIVE(proto))
+ return OBJ_LOOKUP_PROPERTY(cx, proto, id, objp, propp);
+ obj = proto;
+ }
+
+out:
+ *objp = NULL;
+ *propp = NULL;
+ return JS_TRUE;
+}
+
+JS_FRIEND_API(JSBool)
+js_FindProperty(JSContext *cx, jsid id, JSObject **objp, JSObject **pobjp,
+ JSProperty **propp)
+{
+ JSRuntime *rt;
+ JSObject *obj, *pobj, *lastobj;
+ JSScopeProperty *sprop;
+ JSProperty *prop;
+
+ rt = cx->runtime;
+ obj = cx->fp->scopeChain;
+ do {
+ /* Try the property cache and return immediately on cache hit. */
+ if (OBJ_IS_NATIVE(obj)) {
+ JS_LOCK_OBJ(cx, obj);
+ PROPERTY_CACHE_TEST(&rt->propertyCache, obj, id, sprop);
+ if (sprop) {
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ *objp = obj;
+ *pobjp = obj;
+ *propp = (JSProperty *) sprop;
+ return JS_TRUE;
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+ }
+
+ /* If cache miss, take the slow path. */
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &pobj, &prop))
+ return JS_FALSE;
+ if (prop) {
+ if (OBJ_IS_NATIVE(pobj)) {
+ sprop = (JSScopeProperty *) prop;
+ PROPERTY_CACHE_FILL(&rt->propertyCache, pobj, id, sprop);
+ }
+ *objp = obj;
+ *pobjp = pobj;
+ *propp = prop;
+ return JS_TRUE;
+ }
+ lastobj = obj;
+ } while ((obj = OBJ_GET_PARENT(cx, obj)) != NULL);
+
+ *objp = lastobj;
+ *pobjp = NULL;
+ *propp = NULL;
+ return JS_TRUE;
+}
+
+JSObject *
+js_FindIdentifierBase(JSContext *cx, jsid id)
+{
+ JSObject *obj, *pobj;
+ JSProperty *prop;
+
+ /*
+ * Look for id's property along the "with" statement chain and the
+ * statically-linked scope chain.
+ */
+ if (!js_FindProperty(cx, id, &obj, &pobj, &prop))
+ return NULL;
+ if (prop) {
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ return obj;
+ }
+
+ /*
+ * Use the top-level scope from the scope chain, which won't end in the
+ * same scope as cx->globalObject for cross-context function calls.
+ */
+ JS_ASSERT(obj);
+
+ /*
+ * Property not found. Give a strict warning if binding an undeclared
+ * top-level variable.
+ */
+ if (JS_HAS_STRICT_OPTION(cx)) {
+ JSString *str = JSVAL_TO_STRING(ID_TO_VALUE(id));
+ if (!JS_ReportErrorFlagsAndNumber(cx,
+ JSREPORT_WARNING | JSREPORT_STRICT,
+ js_GetErrorMessage, NULL,
+ JSMSG_UNDECLARED_VAR,
+ JS_GetStringBytes(str))) {
+ return NULL;
+ }
+ }
+ return obj;
+}
+
+JSBool
+js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj,
+ JSScopeProperty *sprop, jsval *vp)
+{
+ JSScope *scope;
+ uint32 slot;
+ int32 sample;
+ JSTempValueRooter tvr;
+ JSBool ok;
+
+ JS_ASSERT(OBJ_IS_NATIVE(pobj));
+ JS_ASSERT(JS_IS_OBJ_LOCKED(cx, pobj));
+ scope = OBJ_SCOPE(pobj);
+ JS_ASSERT(scope->object == pobj);
+
+ slot = sprop->slot;
+ *vp = (slot != SPROP_INVALID_SLOT)
+ ? LOCKED_OBJ_GET_SLOT(pobj, slot)
+ : JSVAL_VOID;
+ if (SPROP_HAS_STUB_GETTER(sprop))
+ return JS_TRUE;
+
+ sample = cx->runtime->propertyRemovals;
+ JS_UNLOCK_SCOPE(cx, scope);
+ JS_PUSH_TEMP_ROOT_SPROP(cx, sprop, &tvr);
+ ok = SPROP_GET(cx, sprop, obj, pobj, vp);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ if (!ok)
+ return JS_FALSE;
+
+ JS_LOCK_SCOPE(cx, scope);
+ JS_ASSERT(scope->object == pobj);
+ if (SLOT_IN_SCOPE(slot, scope) &&
+ (JS_LIKELY(cx->runtime->propertyRemovals == sample) ||
+ SCOPE_GET_PROPERTY(scope, sprop->id) == sprop)) {
+ LOCKED_OBJ_SET_SLOT(pobj, slot, *vp);
+ }
+
+ return JS_TRUE;
+}
+
+JSBool
+js_NativeSet(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, jsval *vp)
+{
+ JSScope *scope;
+ uint32 slot;
+ jsval pval;
+ int32 sample;
+ JSTempValueRooter tvr;
+ JSBool ok;
+
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ JS_ASSERT(JS_IS_OBJ_LOCKED(cx, obj));
+ scope = OBJ_SCOPE(obj);
+ JS_ASSERT(scope->object == obj);
+
+ slot = sprop->slot;
+ if (slot != SPROP_INVALID_SLOT) {
+ pval = LOCKED_OBJ_GET_SLOT(obj, slot);
+
+ /* If sprop has a stub setter, keep scope locked and just store *vp. */
+ if (SPROP_HAS_STUB_SETTER(sprop))
+ goto set_slot;
+ } else {
+ /*
+ * Allow API consumers to create shared properties with stub setters.
+ * Such properties lack value storage, so setting them is like writing
+ * to /dev/null.
+ */
+ if (SPROP_HAS_STUB_SETTER(sprop))
+ return JS_TRUE;
+ pval = JSVAL_VOID;
+ }
+
+ sample = cx->runtime->propertyRemovals;
+ JS_UNLOCK_SCOPE(cx, scope);
+ JS_PUSH_TEMP_ROOT_SPROP(cx, sprop, &tvr);
+ ok = SPROP_SET(cx, sprop, obj, obj, vp);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ if (!ok)
+ return JS_FALSE;
+
+ JS_LOCK_SCOPE(cx, scope);
+ JS_ASSERT(scope->object == obj);
+ if (SLOT_IN_SCOPE(slot, scope) &&
+ (JS_LIKELY(cx->runtime->propertyRemovals == sample) ||
+ SCOPE_GET_PROPERTY(scope, sprop->id) == sprop)) {
+ set_slot:
+ GC_POKE(cx, pval);
+ LOCKED_OBJ_SET_SLOT(obj, slot, *vp);
+ }
+
+ return JS_TRUE;
+}
+
+JSBool
+js_GetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ JSObject *obj2;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+
+ /*
+ * Handle old bug that took empty string as zero index. Also convert
+ * string indices to integers if appropriate.
+ */
+ CHECK_FOR_STRING_INDEX(id);
+
+ if (!js_LookupProperty(cx, obj, id, &obj2, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ jsbytecode *pc;
+
+ *vp = JSVAL_VOID;
+
+ if (!OBJ_GET_CLASS(cx, obj)->getProperty(cx, obj, ID_TO_VALUE(id), vp))
+ return JS_FALSE;
+
+ /*
+ * Give a strict warning if foo.bar is evaluated by a script for an
+ * object foo with no property named 'bar'.
+ */
+ if (JSVAL_IS_VOID(*vp) && cx->fp && (pc = cx->fp->pc)) {
+ JSOp op;
+ uintN flags;
+ JSString *str;
+
+ op = *pc;
+ if (op == JSOP_GETXPROP || op == JSOP_GETXELEM) {
+ flags = JSREPORT_ERROR;
+ } else {
+ if (!JS_HAS_STRICT_OPTION(cx) ||
+ (op != JSOP_GETPROP && op != JSOP_GETELEM)) {
+ return JS_TRUE;
+ }
+
+ /*
+ * XXX do not warn about missing __iterator__ as the function
+ * may be called from JS_GetMethodById. See bug 355145.
+ */
+ if (id == ATOM_TO_JSID(cx->runtime->atomState.iteratorAtom))
+ return JS_TRUE;
+
+ /* Kludge to allow (typeof foo == "undefined") tests. */
+ JS_ASSERT(cx->fp->script);
+ pc += js_CodeSpec[op].length;
+ if (Detecting(cx, pc))
+ return JS_TRUE;
+
+ flags = JSREPORT_WARNING | JSREPORT_STRICT;
+ }
+
+ /* Ok, bad undefined property reference: whine about it. */
+ str = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK,
+ ID_TO_VALUE(id), NULL);
+ if (!str ||
+ !JS_ReportErrorFlagsAndNumber(cx, flags,
+ js_GetErrorMessage, NULL,
+ JSMSG_UNDEFINED_PROP,
+ JS_GetStringBytes(str))) {
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+ }
+
+ if (!OBJ_IS_NATIVE(obj2)) {
+ OBJ_DROP_PROPERTY(cx, obj2, prop);
+ return OBJ_GET_PROPERTY(cx, obj2, id, vp);
+ }
+
+ sprop = (JSScopeProperty *) prop;
+ if (!js_NativeGet(cx, obj, obj2, sprop, vp))
+ return JS_FALSE;
+
+ PROPERTY_CACHE_FILL(&cx->runtime->propertyCache, obj2, id, sprop);
+ JS_UNLOCK_OBJ(cx, obj2);
+ return JS_TRUE;
+}
+
+JSBool
+js_SetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ JSObject *pobj;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ JSScope *scope;
+ uintN attrs, flags;
+ intN shortid;
+ JSClass *clasp;
+ JSPropertyOp getter, setter;
+
+ /*
+ * Handle old bug that took empty string as zero index. Also convert
+ * string indices to integers if appropriate.
+ */
+ CHECK_FOR_STRING_INDEX(id);
+
+ if (!js_LookupProperty(cx, obj, id, &pobj, &prop))
+ return JS_FALSE;
+
+ if (prop && !OBJ_IS_NATIVE(pobj)) {
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ prop = NULL;
+ }
+ sprop = (JSScopeProperty *) prop;
+
+ /*
+ * Now either sprop is null, meaning id was not found in obj or one of its
+ * prototypes; or sprop is non-null, meaning id was found in pobj's scope.
+ * If JS_THREADSAFE and sprop is non-null, then scope is locked, and sprop
+ * is held: we must OBJ_DROP_PROPERTY or JS_UNLOCK_SCOPE before we return
+ * (the two are equivalent for native objects, but we use JS_UNLOCK_SCOPE
+ * because it is cheaper).
+ */
+ attrs = JSPROP_ENUMERATE;
+ flags = 0;
+ shortid = 0;
+ clasp = OBJ_GET_CLASS(cx, obj);
+ getter = clasp->getProperty;
+ setter = clasp->setProperty;
+
+ if (sprop) {
+ /*
+ * Set scope for use below. It was locked by js_LookupProperty, and
+ * we know pobj owns it (i.e., scope->object == pobj). Therefore we
+ * optimize JS_UNLOCK_OBJ(cx, pobj) into JS_UNLOCK_SCOPE(cx, scope).
+ */
+ scope = OBJ_SCOPE(pobj);
+
+ attrs = sprop->attrs;
+ if ((attrs & JSPROP_READONLY) ||
+ (SCOPE_IS_SEALED(scope) && pobj == obj)) {
+ JS_UNLOCK_SCOPE(cx, scope);
+
+ /*
+ * Here, we'll either return true or goto read_only_error, which
+ * reports a strict warning or throws an error. So we redefine
+ * the |flags| local variable to be JSREPORT_* flags to pass to
+ * JS_ReportErrorFlagsAndNumberUC at label read_only_error. We
+ * must likewise re-task flags further below for the other 'goto
+ * read_only_error;' case.
+ */
+ flags = JSREPORT_ERROR;
+ if ((attrs & JSPROP_READONLY) && JS_VERSION_IS_ECMA(cx)) {
+ if (!JS_HAS_STRICT_OPTION(cx)) {
+ /* Just return true per ECMA if not in strict mode. */
+ return JS_TRUE;
+ }
+
+ /* Strict mode: report a read-only strict warning. */
+ flags = JSREPORT_STRICT | JSREPORT_WARNING;
+ }
+ goto read_only_error;
+ }
+
+ if (pobj != obj) {
+ /*
+ * We found id in a prototype object: prepare to share or shadow.
+ * NB: Thanks to the immutable, garbage-collected property tree
+ * maintained by jsscope.c in cx->runtime, we needn't worry about
+ * sprop going away behind our back after we've unlocked scope.
+ */
+ JS_UNLOCK_SCOPE(cx, scope);
+
+ /* Don't clone a shared prototype property. */
+ if (attrs & JSPROP_SHARED) {
+ if (SPROP_HAS_STUB_SETTER(sprop) &&
+ !(sprop->attrs & JSPROP_GETTER)) {
+ return JS_TRUE;
+ }
+ return SPROP_SET(cx, sprop, obj, pobj, vp);
+ }
+
+ /* Restore attrs to the ECMA default for new properties. */
+ attrs = JSPROP_ENUMERATE;
+
+ /*
+ * Preserve the shortid, getter, and setter when shadowing any
+ * property that has a shortid. An old API convention requires
+ * that the property's getter and setter functions receive the
+ * shortid, not id, when they are called on the shadow we are
+ * about to create in obj's scope.
+ */
+ if (sprop->flags & SPROP_HAS_SHORTID) {
+ flags = SPROP_HAS_SHORTID;
+ shortid = sprop->shortid;
+ getter = sprop->getter;
+ setter = sprop->setter;
+ }
+
+ /*
+ * Forget we found the proto-property now that we've copied any
+ * needed member values.
+ */
+ sprop = NULL;
+ }
+#ifdef __GNUC__ /* suppress bogus gcc warnings */
+ } else {
+ scope = NULL;
+#endif
+ }
+
+ if (!sprop) {
+ if (SCOPE_IS_SEALED(OBJ_SCOPE(obj)) && OBJ_SCOPE(obj)->object == obj) {
+ flags = JSREPORT_ERROR;
+ goto read_only_error;
+ }
+
+ /* Find or make a property descriptor with the right heritage. */
+ JS_LOCK_OBJ(cx, obj);
+ scope = js_GetMutableScope(cx, obj);
+ if (!scope) {
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_FALSE;
+ }
+ if (clasp->flags & JSCLASS_SHARE_ALL_PROPERTIES)
+ attrs |= JSPROP_SHARED;
+ sprop = js_AddScopeProperty(cx, scope, id, getter, setter,
+ SPROP_INVALID_SLOT, attrs, flags, shortid);
+ if (!sprop) {
+ JS_UNLOCK_SCOPE(cx, scope);
+ return JS_FALSE;
+ }
+
+ /*
+ * Initialize the new property value (passed to setter) to undefined.
+ * Note that we store before calling addProperty, to match the order
+ * in js_DefineNativeProperty.
+ */
+ if (SPROP_HAS_VALID_SLOT(sprop, scope))
+ LOCKED_OBJ_SET_SLOT(obj, sprop->slot, JSVAL_VOID);
+
+ /* XXXbe called with obj locked */
+ ADD_PROPERTY_HELPER(cx, clasp, obj, scope, sprop, vp,
+ js_RemoveScopeProperty(cx, scope, id);
+ JS_UNLOCK_SCOPE(cx, scope);
+ return JS_FALSE);
+
+ PROPERTY_CACHE_FILL(&cx->runtime->propertyCache, obj, id, sprop);
+ }
+
+ if (!js_NativeSet(cx, obj, sprop, vp))
+ return JS_FALSE;
+ JS_UNLOCK_SCOPE(cx, scope);
+ return JS_TRUE;
+
+ read_only_error: {
+ JSString *str = js_DecompileValueGenerator(cx,
+ JSDVG_IGNORE_STACK,
+ ID_TO_VALUE(id),
+ NULL);
+ if (!str)
+ return JS_FALSE;
+ return JS_ReportErrorFlagsAndNumberUC(cx, flags, js_GetErrorMessage,
+ NULL, JSMSG_READ_ONLY,
+ JS_GetStringChars(str));
+ }
+}
+
+JSBool
+js_GetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp)
+{
+ JSBool noprop, ok;
+ JSScopeProperty *sprop;
+
+ noprop = !prop;
+ if (noprop) {
+ if (!js_LookupProperty(cx, obj, id, &obj, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ *attrsp = 0;
+ return JS_TRUE;
+ }
+ if (!OBJ_IS_NATIVE(obj)) {
+ ok = OBJ_GET_ATTRIBUTES(cx, obj, id, prop, attrsp);
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+ }
+ }
+ sprop = (JSScopeProperty *)prop;
+ *attrsp = sprop->attrs;
+ if (noprop)
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return JS_TRUE;
+}
+
+JSBool
+js_SetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp)
+{
+ JSBool noprop, ok;
+ JSScopeProperty *sprop;
+
+ noprop = !prop;
+ if (noprop) {
+ if (!js_LookupProperty(cx, obj, id, &obj, &prop))
+ return JS_FALSE;
+ if (!prop)
+ return JS_TRUE;
+ if (!OBJ_IS_NATIVE(obj)) {
+ ok = OBJ_SET_ATTRIBUTES(cx, obj, id, prop, attrsp);
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+ }
+ }
+ sprop = (JSScopeProperty *)prop;
+ sprop = js_ChangeNativePropertyAttrs(cx, obj, sprop, *attrsp, 0,
+ sprop->getter, sprop->setter);
+ if (noprop)
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return (sprop != NULL);
+}
+
+JSBool
+js_DeleteProperty(JSContext *cx, JSObject *obj, jsid id, jsval *rval)
+{
+ JSObject *proto;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ JSString *str;
+ JSScope *scope;
+ JSBool ok;
+
+ *rval = JSVAL_TRUE;
+
+ /*
+ * Handle old bug that took empty string as zero index. Also convert
+ * string indices to integers if appropriate.
+ */
+ CHECK_FOR_STRING_INDEX(id);
+
+ if (!js_LookupProperty(cx, obj, id, &proto, &prop))
+ return JS_FALSE;
+ if (!prop || proto != obj) {
+ /*
+ * If the property was found in a native prototype, check whether it's
+ * shared and permanent. Such a property stands for direct properties
+ * in all delegating objects, matching ECMA semantics without bloating
+ * each delegating object.
+ */
+ if (prop) {
+ if (OBJ_IS_NATIVE(proto)) {
+ sprop = (JSScopeProperty *)prop;
+ if (SPROP_IS_SHARED_PERMANENT(sprop))
+ *rval = JSVAL_FALSE;
+ }
+ OBJ_DROP_PROPERTY(cx, proto, prop);
+ if (*rval == JSVAL_FALSE)
+ return JS_TRUE;
+ }
+
+ /*
+ * If no property, or the property comes unshared or impermanent from
+ * a prototype, call the class's delProperty hook, passing rval as the
+ * result parameter.
+ */
+ return OBJ_GET_CLASS(cx, obj)->delProperty(cx, obj, ID_TO_VALUE(id),
+ rval);
+ }
+
+ sprop = (JSScopeProperty *)prop;
+ if (sprop->attrs & JSPROP_PERMANENT) {
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ if (JS_VERSION_IS_ECMA(cx)) {
+ *rval = JSVAL_FALSE;
+ return JS_TRUE;
+ }
+ str = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK,
+ ID_TO_VALUE(id), NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_PERMANENT, JS_GetStringBytes(str));
+ }
+ return JS_FALSE;
+ }
+
+ /* XXXbe called with obj locked */
+ if (!LOCKED_OBJ_GET_CLASS(obj)->delProperty(cx, obj, SPROP_USERID(sprop),
+ rval)) {
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return JS_FALSE;
+ }
+
+ scope = OBJ_SCOPE(obj);
+ if (SPROP_HAS_VALID_SLOT(sprop, scope))
+ GC_POKE(cx, LOCKED_OBJ_GET_SLOT(obj, sprop->slot));
+
+ PROPERTY_CACHE_FILL(&cx->runtime->propertyCache, obj, id, NULL);
+ ok = js_RemoveScopeProperty(cx, scope, id);
+ OBJ_DROP_PROPERTY(cx, obj, prop);
+ return ok;
+}
+
+JSBool
+js_DefaultValue(JSContext *cx, JSObject *obj, JSType hint, jsval *vp)
+{
+ jsval v, save;
+ JSString *str;
+
+ v = save = OBJECT_TO_JSVAL(obj);
+ switch (hint) {
+ case JSTYPE_STRING:
+ /*
+ * Propagate the exception if js_TryMethod finds an appropriate
+ * method, and calling that method returned failure.
+ */
+ if (!js_TryMethod(cx, obj, cx->runtime->atomState.toStringAtom, 0, NULL,
+ &v)) {
+ return JS_FALSE;
+ }
+
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ if (!OBJ_GET_CLASS(cx, obj)->convert(cx, obj, hint, &v))
+ return JS_FALSE;
+ }
+ break;
+
+ default:
+ if (!OBJ_GET_CLASS(cx, obj)->convert(cx, obj, hint, &v))
+ return JS_FALSE;
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ JSType type = JS_TypeOfValue(cx, v);
+ if (type == hint ||
+ (type == JSTYPE_FUNCTION && hint == JSTYPE_OBJECT)) {
+ goto out;
+ }
+ if (!js_TryMethod(cx, obj, cx->runtime->atomState.toStringAtom, 0,
+ NULL, &v)) {
+ return JS_FALSE;
+ }
+ }
+ break;
+ }
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ /* Avoid recursive death through js_DecompileValueGenerator. */
+ if (hint == JSTYPE_STRING) {
+ str = JS_InternString(cx, OBJ_GET_CLASS(cx, obj)->name);
+ if (!str)
+ return JS_FALSE;
+ } else {
+ str = NULL;
+ }
+ *vp = OBJECT_TO_JSVAL(obj);
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK, save, str);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_CONVERT_TO,
+ JS_GetStringBytes(str),
+ (hint == JSTYPE_VOID)
+ ? "primitive type"
+ : js_type_strs[hint]);
+ }
+ return JS_FALSE;
+ }
+out:
+ *vp = v;
+ return JS_TRUE;
+}
+
+JSIdArray *
+js_NewIdArray(JSContext *cx, jsint length)
+{
+ JSIdArray *ida;
+
+ ida = (JSIdArray *)
+ JS_malloc(cx, sizeof(JSIdArray) + (length-1) * sizeof(jsval));
+ if (ida)
+ ida->length = length;
+ return ida;
+}
+
+JSIdArray *
+js_SetIdArrayLength(JSContext *cx, JSIdArray *ida, jsint length)
+{
+ JSIdArray *rida;
+
+ rida = (JSIdArray *)
+ JS_realloc(cx, ida, sizeof(JSIdArray) + (length-1) * sizeof(jsval));
+ if (!rida)
+ JS_DestroyIdArray(cx, ida);
+ else
+ rida->length = length;
+ return rida;
+}
+
+/* Private type used to iterate over all properties of a native JS object */
+struct JSNativeIteratorState {
+ jsint next_index; /* index into jsid array */
+ JSIdArray *ida; /* all property ids in enumeration */
+ JSNativeIteratorState *next; /* double-linked list support */
+ JSNativeIteratorState **prevp;
+};
+
+/*
+ * This function is used to enumerate the properties of native JSObjects
+ * and those host objects that do not define a JSNewEnumerateOp-style iterator
+ * function.
+ */
+JSBool
+js_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
+ jsval *statep, jsid *idp)
+{
+ JSRuntime *rt;
+ JSObject *proto;
+ JSClass *clasp;
+ JSEnumerateOp enumerate;
+ JSScopeProperty *sprop, *lastProp;
+ jsint i, length;
+ JSScope *scope;
+ JSIdArray *ida;
+ JSNativeIteratorState *state;
+
+ rt = cx->runtime;
+ clasp = OBJ_GET_CLASS(cx, obj);
+ enumerate = clasp->enumerate;
+ if (clasp->flags & JSCLASS_NEW_ENUMERATE)
+ return ((JSNewEnumerateOp) enumerate)(cx, obj, enum_op, statep, idp);
+
+ switch (enum_op) {
+ case JSENUMERATE_INIT:
+ if (!enumerate(cx, obj))
+ return JS_FALSE;
+ length = 0;
+
+ /*
+ * The set of all property ids is pre-computed when the iterator
+ * is initialized so as to avoid problems with properties being
+ * deleted during the iteration.
+ */
+ JS_LOCK_OBJ(cx, obj);
+ scope = OBJ_SCOPE(obj);
+
+ /*
+ * If this object shares a scope with its prototype, don't enumerate
+ * its properties. Otherwise they will be enumerated a second time
+ * when the prototype object is enumerated.
+ */
+ proto = OBJ_GET_PROTO(cx, obj);
+ if (proto && scope == OBJ_SCOPE(proto)) {
+ ida = js_NewIdArray(cx, 0);
+ if (!ida) {
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_FALSE;
+ }
+ } else {
+ /* Object has a private scope; Enumerate all props in scope. */
+ for (sprop = lastProp = SCOPE_LAST_PROP(scope); sprop;
+ sprop = sprop->parent) {
+ if ((
+#ifdef DUMP_CALL_TABLE
+ (cx->options & JSOPTION_LOGCALL_TOSOURCE) ||
+#endif
+ (sprop->attrs & JSPROP_ENUMERATE)) &&
+ !(sprop->flags & SPROP_IS_ALIAS) &&
+ (!SCOPE_HAD_MIDDLE_DELETE(scope) ||
+ SCOPE_HAS_PROPERTY(scope, sprop))) {
+ length++;
+ }
+ }
+ ida = js_NewIdArray(cx, length);
+ if (!ida) {
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_FALSE;
+ }
+ i = length;
+ for (sprop = lastProp; sprop; sprop = sprop->parent) {
+ if ((
+#ifdef DUMP_CALL_TABLE
+ (cx->options & JSOPTION_LOGCALL_TOSOURCE) ||
+#endif
+ (sprop->attrs & JSPROP_ENUMERATE)) &&
+ !(sprop->flags & SPROP_IS_ALIAS) &&
+ (!SCOPE_HAD_MIDDLE_DELETE(scope) ||
+ SCOPE_HAS_PROPERTY(scope, sprop))) {
+ JS_ASSERT(i > 0);
+ ida->vector[--i] = sprop->id;
+ }
+ }
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+
+ state = (JSNativeIteratorState *)
+ JS_malloc(cx, sizeof(JSNativeIteratorState));
+ if (!state) {
+ JS_DestroyIdArray(cx, ida);
+ return JS_FALSE;
+ }
+ state->ida = ida;
+ state->next_index = 0;
+
+ JS_LOCK_RUNTIME(rt);
+ state->next = rt->nativeIteratorStates;
+ if (state->next)
+ state->next->prevp = &state->next;
+ state->prevp = &rt->nativeIteratorStates;
+ *state->prevp = state;
+ JS_UNLOCK_RUNTIME(rt);
+
+ *statep = PRIVATE_TO_JSVAL(state);
+ if (idp)
+ *idp = INT_TO_JSVAL(length);
+ break;
+
+ case JSENUMERATE_NEXT:
+ state = (JSNativeIteratorState *) JSVAL_TO_PRIVATE(*statep);
+ ida = state->ida;
+ length = ida->length;
+ if (state->next_index != length) {
+ *idp = ida->vector[state->next_index++];
+ break;
+ }
+ /* FALL THROUGH */
+
+ case JSENUMERATE_DESTROY:
+ state = (JSNativeIteratorState *) JSVAL_TO_PRIVATE(*statep);
+
+ JS_LOCK_RUNTIME(rt);
+ JS_ASSERT(rt->nativeIteratorStates);
+ JS_ASSERT(*state->prevp == state);
+ if (state->next) {
+ JS_ASSERT(state->next->prevp == &state->next);
+ state->next->prevp = state->prevp;
+ }
+ *state->prevp = state->next;
+ JS_UNLOCK_RUNTIME(rt);
+
+ JS_DestroyIdArray(cx, state->ida);
+ JS_free(cx, state);
+ *statep = JSVAL_NULL;
+ break;
+ }
+ return JS_TRUE;
+}
+
+void
+js_MarkNativeIteratorStates(JSContext *cx)
+{
+ JSNativeIteratorState *state;
+ jsid *cursor, *end, id;
+
+ state = cx->runtime->nativeIteratorStates;
+ if (!state)
+ return;
+
+ do {
+ JS_ASSERT(*state->prevp == state);
+ cursor = state->ida->vector;
+ end = cursor + state->ida->length;
+ for (; cursor != end; ++cursor) {
+ id = *cursor;
+ MARK_ID(cx, id);
+ }
+ } while ((state = state->next) != NULL);
+}
+
+JSBool
+js_CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode,
+ jsval *vp, uintN *attrsp)
+{
+ JSBool writing;
+ JSObject *pobj;
+ JSProperty *prop;
+ JSClass *clasp;
+ JSScopeProperty *sprop;
+ JSCheckAccessOp check;
+
+ writing = (mode & JSACC_WRITE) != 0;
+ switch (mode & JSACC_TYPEMASK) {
+ case JSACC_PROTO:
+ pobj = obj;
+ if (!writing)
+ *vp = OBJ_GET_SLOT(cx, obj, JSSLOT_PROTO);
+ *attrsp = JSPROP_PERMANENT;
+ break;
+
+ case JSACC_PARENT:
+ JS_ASSERT(!writing);
+ pobj = obj;
+ *vp = OBJ_GET_SLOT(cx, obj, JSSLOT_PARENT);
+ *attrsp = JSPROP_READONLY | JSPROP_PERMANENT;
+ break;
+
+ default:
+ if (!js_LookupProperty(cx, obj, id, &pobj, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ if (!writing)
+ *vp = JSVAL_VOID;
+ *attrsp = 0;
+ clasp = OBJ_GET_CLASS(cx, obj);
+ return !clasp->checkAccess ||
+ clasp->checkAccess(cx, obj, ID_TO_VALUE(id), mode, vp);
+ }
+ if (!OBJ_IS_NATIVE(pobj)) {
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ return OBJ_CHECK_ACCESS(cx, pobj, id, mode, vp, attrsp);
+ }
+
+ sprop = (JSScopeProperty *)prop;
+ *attrsp = sprop->attrs;
+ if (!writing) {
+ *vp = (SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj)))
+ ? LOCKED_OBJ_GET_SLOT(pobj, sprop->slot)
+ : JSVAL_VOID;
+ }
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ }
+
+ /*
+ * If obj's class has a stub (null) checkAccess hook, use the per-runtime
+ * checkObjectAccess callback, if configured.
+ *
+ * We don't want to require all classes to supply a checkAccess hook; we
+ * need that hook only for certain classes used when precompiling scripts
+ * and functions ("brutal sharing"). But for general safety of built-in
+ * magic properties such as __proto__ and __parent__, we route all access
+ * checks, even for classes that stub out checkAccess, through the global
+ * checkObjectAccess hook. This covers precompilation-based sharing and
+ * (possibly unintended) runtime sharing across trust boundaries.
+ */
+ clasp = OBJ_GET_CLASS(cx, pobj);
+ check = clasp->checkAccess;
+ if (!check)
+ check = cx->runtime->checkObjectAccess;
+ return !check || check(cx, pobj, ID_TO_VALUE(id), mode, vp);
+}
+
+#ifdef JS_THREADSAFE
+void
+js_DropProperty(JSContext *cx, JSObject *obj, JSProperty *prop)
+{
+ JS_UNLOCK_OBJ(cx, obj);
+}
+#endif
+
+static void
+ReportIsNotFunction(JSContext *cx, jsval *vp, uintN flags)
+{
+ /*
+ * The decompiler may need to access the args of the function in
+ * progress rather than the one we had hoped to call.
+ * So we switch the cx->fp to the frame below us. We stick the
+ * current frame in the dormantFrameChain to protect it from gc.
+ */
+
+ JSStackFrame *fp = cx->fp;
+ if (fp->down) {
+ JS_ASSERT(!fp->dormantNext);
+ fp->dormantNext = cx->dormantFrameChain;
+ cx->dormantFrameChain = fp;
+ cx->fp = fp->down;
+ }
+
+ js_ReportIsNotFunction(cx, vp, flags);
+
+ if (fp->down) {
+ JS_ASSERT(cx->dormantFrameChain == fp);
+ cx->dormantFrameChain = fp->dormantNext;
+ fp->dormantNext = NULL;
+ cx->fp = fp;
+ }
+}
+
+#ifdef NARCISSUS
+static JSBool
+GetCurrentExecutionContext(JSContext *cx, JSObject *obj, jsval *rval)
+{
+ JSObject *tmp;
+ jsval xcval;
+
+ while ((tmp = OBJ_GET_PARENT(cx, obj)) != NULL)
+ obj = tmp;
+ if (!OBJ_GET_PROPERTY(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .ExecutionContextAtom),
+ &xcval)) {
+ return JS_FALSE;
+ }
+ if (JSVAL_IS_PRIMITIVE(xcval)) {
+ JS_ReportError(cx, "invalid ExecutionContext in global object");
+ return JS_FALSE;
+ }
+ if (!OBJ_GET_PROPERTY(cx, JSVAL_TO_OBJECT(xcval),
+ ATOM_TO_JSID(cx->runtime->atomState.currentAtom),
+ rval)) {
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+#endif
+
+JSBool
+js_Call(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSClass *clasp;
+
+ clasp = OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(argv[-2]));
+ if (!clasp->call) {
+#ifdef NARCISSUS
+ JSObject *callee, *args;
+ jsval fval, nargv[3];
+ JSBool ok;
+
+ callee = JSVAL_TO_OBJECT(argv[-2]);
+ if (!OBJ_GET_PROPERTY(cx, callee,
+ ATOM_TO_JSID(cx->runtime->atomState.callAtom),
+ &fval)) {
+ return JS_FALSE;
+ }
+ if (VALUE_IS_FUNCTION(cx, fval)) {
+ if (!GetCurrentExecutionContext(cx, obj, &nargv[2]))
+ return JS_FALSE;
+ args = js_GetArgsObject(cx, cx->fp);
+ if (!args)
+ return JS_FALSE;
+ nargv[0] = OBJECT_TO_JSVAL(obj);
+ nargv[1] = OBJECT_TO_JSVAL(args);
+ return js_InternalCall(cx, callee, fval, 3, nargv, rval);
+ }
+ if (JSVAL_IS_OBJECT(fval) && JSVAL_TO_OBJECT(fval) != callee) {
+ argv[-2] = fval;
+ ok = js_Call(cx, obj, argc, argv, rval);
+ argv[-2] = OBJECT_TO_JSVAL(callee);
+ return ok;
+ }
+#endif
+ ReportIsNotFunction(cx, &argv[-2], cx->fp->flags & JSFRAME_ITERATOR);
+ return JS_FALSE;
+ }
+ return clasp->call(cx, obj, argc, argv, rval);
+}
+
+JSBool
+js_Construct(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSClass *clasp;
+
+ clasp = OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(argv[-2]));
+ if (!clasp->construct) {
+#ifdef NARCISSUS
+ JSObject *callee, *args;
+ jsval cval, nargv[2];
+ JSBool ok;
+
+ callee = JSVAL_TO_OBJECT(argv[-2]);
+ if (!OBJ_GET_PROPERTY(cx, callee,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .constructAtom),
+ &cval)) {
+ return JS_FALSE;
+ }
+ if (VALUE_IS_FUNCTION(cx, cval)) {
+ if (!GetCurrentExecutionContext(cx, obj, &nargv[1]))
+ return JS_FALSE;
+ args = js_GetArgsObject(cx, cx->fp);
+ if (!args)
+ return JS_FALSE;
+ nargv[0] = OBJECT_TO_JSVAL(args);
+ return js_InternalCall(cx, callee, cval, 2, nargv, rval);
+ }
+ if (JSVAL_IS_OBJECT(cval) && JSVAL_TO_OBJECT(cval) != callee) {
+ argv[-2] = cval;
+ ok = js_Call(cx, obj, argc, argv, rval);
+ argv[-2] = OBJECT_TO_JSVAL(callee);
+ return ok;
+ }
+#endif
+ ReportIsNotFunction(cx, &argv[-2], JSV2F_CONSTRUCT);
+ return JS_FALSE;
+ }
+ return clasp->construct(cx, obj, argc, argv, rval);
+}
+
+JSBool
+js_HasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ JSClass *clasp;
+ JSString *str;
+
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp->hasInstance)
+ return clasp->hasInstance(cx, obj, v, bp);
+#ifdef NARCISSUS
+ {
+ jsval fval, rval;
+
+ if (!OBJ_GET_PROPERTY(cx, obj,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .hasInstanceAtom),
+ &fval)) {
+ return JS_FALSE;
+ }
+ if (VALUE_IS_FUNCTION(cx, fval)) {
+ return js_InternalCall(cx, obj, fval, 1, &v, &rval) &&
+ js_ValueToBoolean(cx, rval, bp);
+ }
+ }
+#endif
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK,
+ OBJECT_TO_JSVAL(obj), NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_INSTANCEOF_RHS,
+ JS_GetStringBytes(str));
+ }
+ return JS_FALSE;
+}
+
+JSBool
+js_IsDelegate(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ JSObject *obj2;
+
+ *bp = JS_FALSE;
+ if (JSVAL_IS_PRIMITIVE(v))
+ return JS_TRUE;
+ obj2 = JSVAL_TO_OBJECT(v);
+ while ((obj2 = OBJ_GET_PROTO(cx, obj2)) != NULL) {
+ if (obj2 == obj) {
+ *bp = JS_TRUE;
+ break;
+ }
+ }
+ return JS_TRUE;
+}
+
+JSBool
+js_GetClassPrototype(JSContext *cx, JSObject *scope, jsid id,
+ JSObject **protop)
+{
+ jsval v;
+ JSObject *ctor;
+
+ if (!js_FindClassObject(cx, scope, id, &v))
+ return JS_FALSE;
+ if (VALUE_IS_FUNCTION(cx, v)) {
+ ctor = JSVAL_TO_OBJECT(v);
+ if (!OBJ_GET_PROPERTY(cx, ctor,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .classPrototypeAtom),
+ &v)) {
+ return JS_FALSE;
+ }
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ /*
+ * Set the newborn root in case v is otherwise unreferenced.
+ * It's ok to overwrite newborn roots here, since the getter
+ * called just above could have. Unlike the common GC rooting
+ * model, our callers do not have to protect protop thanks to
+ * this newborn root, since they all immediately create a new
+ * instance that delegates to this object, or just query the
+ * prototype for its class.
+ */
+ cx->weakRoots.newborn[GCX_OBJECT] = JSVAL_TO_GCTHING(v);
+ }
+ }
+ *protop = JSVAL_IS_OBJECT(v) ? JSVAL_TO_OBJECT(v) : NULL;
+ return JS_TRUE;
+}
+
+/*
+ * For shared precompilation of function objects, we support cloning on entry
+ * to an execution context in which the function declaration or expression
+ * should be processed as if it were not precompiled, where the precompiled
+ * function's scope chain does not match the execution context's. The cloned
+ * function object carries its execution-context scope in its parent slot; it
+ * links to the precompiled function (the "clone-parent") via its proto slot.
+ *
+ * Note that this prototype-based delegation leaves an unchecked access path
+ * from the clone to the clone-parent's 'constructor' property. If the clone
+ * lives in a less privileged or shared scope than the clone-parent, this is
+ * a security hole, a sharing hazard, or both. Therefore we check all such
+ * accesses with the following getter/setter pair, which we use when defining
+ * 'constructor' in f.prototype for all function objects f.
+ */
+static JSBool
+CheckCtorGetAccess(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSAtom *atom;
+ uintN attrs;
+
+ atom = cx->runtime->atomState.constructorAtom;
+ JS_ASSERT(id == ATOM_KEY(atom));
+ return OBJ_CHECK_ACCESS(cx, obj, ATOM_TO_JSID(atom), JSACC_READ,
+ vp, &attrs);
+}
+
+static JSBool
+CheckCtorSetAccess(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSAtom *atom;
+ uintN attrs;
+
+ atom = cx->runtime->atomState.constructorAtom;
+ JS_ASSERT(id == ATOM_KEY(atom));
+ return OBJ_CHECK_ACCESS(cx, obj, ATOM_TO_JSID(atom), JSACC_WRITE,
+ vp, &attrs);
+}
+
+JSBool
+js_SetClassPrototype(JSContext *cx, JSObject *ctor, JSObject *proto,
+ uintN attrs)
+{
+ /*
+ * Use the given attributes for the prototype property of the constructor,
+ * as user-defined constructors have a DontDelete prototype (which may be
+ * reset), while native or "system" constructors have DontEnum | ReadOnly |
+ * DontDelete.
+ */
+ if (!OBJ_DEFINE_PROPERTY(cx, ctor,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .classPrototypeAtom),
+ OBJECT_TO_JSVAL(proto),
+ JS_PropertyStub, JS_PropertyStub,
+ attrs, NULL)) {
+ return JS_FALSE;
+ }
+
+ /*
+ * ECMA says that Object.prototype.constructor, or f.prototype.constructor
+ * for a user-defined function f, is DontEnum.
+ */
+ return OBJ_DEFINE_PROPERTY(cx, proto,
+ ATOM_TO_JSID(cx->runtime->atomState
+ .constructorAtom),
+ OBJECT_TO_JSVAL(ctor),
+ CheckCtorGetAccess, CheckCtorSetAccess,
+ 0, NULL);
+}
+
+JSBool
+js_ValueToObject(JSContext *cx, jsval v, JSObject **objp)
+{
+ JSObject *obj;
+
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v)) {
+ obj = NULL;
+ } else if (JSVAL_IS_OBJECT(v)) {
+ obj = JSVAL_TO_OBJECT(v);
+ if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_OBJECT, &v))
+ return JS_FALSE;
+ if (JSVAL_IS_OBJECT(v))
+ obj = JSVAL_TO_OBJECT(v);
+ } else {
+ if (JSVAL_IS_STRING(v)) {
+ obj = js_StringToObject(cx, JSVAL_TO_STRING(v));
+ } else if (JSVAL_IS_INT(v)) {
+ obj = js_NumberToObject(cx, (jsdouble)JSVAL_TO_INT(v));
+ } else if (JSVAL_IS_DOUBLE(v)) {
+ obj = js_NumberToObject(cx, *JSVAL_TO_DOUBLE(v));
+ } else {
+ JS_ASSERT(JSVAL_IS_BOOLEAN(v));
+ obj = js_BooleanToObject(cx, JSVAL_TO_BOOLEAN(v));
+ }
+ if (!obj)
+ return JS_FALSE;
+ }
+ *objp = obj;
+ return JS_TRUE;
+}
+
+JSObject *
+js_ValueToNonNullObject(JSContext *cx, jsval v)
+{
+ JSObject *obj;
+ JSString *str;
+
+ if (!js_ValueToObject(cx, v, &obj))
+ return NULL;
+ if (!obj) {
+ str = js_DecompileValueGenerator(cx, JSDVG_SEARCH_STACK, v, NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NO_PROPERTIES, JS_GetStringBytes(str));
+ }
+ }
+ return obj;
+}
+
+JSBool
+js_TryValueOf(JSContext *cx, JSObject *obj, JSType type, jsval *rval)
+{
+ jsval argv[1];
+
+ argv[0] = ATOM_KEY(cx->runtime->atomState.typeAtoms[type]);
+ return js_TryMethod(cx, obj, cx->runtime->atomState.valueOfAtom, 1, argv,
+ rval);
+}
+
+JSBool
+js_TryMethod(JSContext *cx, JSObject *obj, JSAtom *atom,
+ uintN argc, jsval *argv, jsval *rval)
+{
+ JSErrorReporter older;
+ jsid id;
+ jsval fval;
+ JSBool ok;
+ int stackDummy;
+
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ return JS_FALSE;
+ }
+
+ /*
+ * Report failure only if an appropriate method was found, and calling it
+ * returned failure. We propagate failure in this case to make exceptions
+ * behave properly.
+ */
+ older = JS_SetErrorReporter(cx, NULL);
+ id = ATOM_TO_JSID(atom);
+ fval = JSVAL_VOID;
+#if JS_HAS_XML_SUPPORT
+ if (OBJECT_IS_XML(cx, obj)) {
+ JSXMLObjectOps *ops;
+
+ ops = (JSXMLObjectOps *) obj->map->ops;
+ obj = ops->getMethod(cx, obj, id, &fval);
+ ok = (obj != NULL);
+ } else
+#endif
+ {
+ ok = OBJ_GET_PROPERTY(cx, obj, id, &fval);
+ }
+ if (!ok)
+ JS_ClearPendingException(cx);
+ JS_SetErrorReporter(cx, older);
+
+ return JSVAL_IS_PRIMITIVE(fval) ||
+ js_InternalCall(cx, obj, fval, argc, argv, rval);
+}
+
+#if JS_HAS_XDR
+
+JSBool
+js_XDRObject(JSXDRState *xdr, JSObject **objp)
+{
+ JSContext *cx;
+ JSAtom *atom;
+ JSClass *clasp;
+ uint32 classId, classDef;
+ JSProtoKey protoKey;
+ jsid classKey;
+ JSObject *proto;
+
+ cx = xdr->cx;
+ atom = NULL;
+ if (xdr->mode == JSXDR_ENCODE) {
+ clasp = OBJ_GET_CLASS(cx, *objp);
+ classId = JS_XDRFindClassIdByName(xdr, clasp->name);
+ classDef = !classId;
+ if (classDef) {
+ if (!JS_XDRRegisterClass(xdr, clasp, &classId))
+ return JS_FALSE;
+ protoKey = JSCLASS_CACHED_PROTO_KEY(clasp);
+ if (protoKey != JSProto_Null) {
+ classDef |= (protoKey << 1);
+ } else {
+ atom = js_Atomize(cx, clasp->name, strlen(clasp->name), 0);
+ if (!atom)
+ return JS_FALSE;
+ }
+ }
+ } else {
+ clasp = NULL; /* quell GCC overwarning */
+ classDef = 0;
+ }
+
+ /*
+ * XDR a flag word, which could be 0 for a class use, in which case no
+ * name follows, only the id in xdr's class registry; 1 for a class def,
+ * in which case the flag word is followed by the class name transferred
+ * from or to atom; or a value greater than 1, an odd number that when
+ * divided by two yields the JSProtoKey for class. In the last case, as
+ * in the 0 classDef case, no name is transferred via atom.
+ */
+ if (!JS_XDRUint32(xdr, &classDef))
+ return JS_FALSE;
+ if (classDef == 1 && !js_XDRCStringAtom(xdr, &atom))
+ return JS_FALSE;
+
+ if (!JS_XDRUint32(xdr, &classId))
+ return JS_FALSE;
+
+ if (xdr->mode == JSXDR_DECODE) {
+ if (classDef) {
+ /* NB: we know that JSProto_Null is 0 here, for backward compat. */
+ protoKey = classDef >> 1;
+ classKey = (protoKey != JSProto_Null)
+ ? INT_TO_JSID(protoKey)
+ : ATOM_TO_JSID(atom);
+ if (!js_GetClassPrototype(cx, NULL, classKey, &proto))
+ return JS_FALSE;
+ clasp = OBJ_GET_CLASS(cx, proto);
+ if (!JS_XDRRegisterClass(xdr, clasp, &classId))
+ return JS_FALSE;
+ } else {
+ clasp = JS_XDRFindClassById(xdr, classId);
+ if (!clasp) {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%ld", (long)classId);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_FIND_CLASS, numBuf);
+ return JS_FALSE;
+ }
+ }
+ }
+
+ if (!clasp->xdrObject) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_XDR_CLASS, clasp->name);
+ return JS_FALSE;
+ }
+ return clasp->xdrObject(xdr, objp);
+}
+
+#endif /* JS_HAS_XDR */
+
+#ifdef DEBUG_brendan
+
+#include <stdio.h>
+#include <math.h>
+
+uint32 js_entry_count_max;
+uint32 js_entry_count_sum;
+double js_entry_count_sqsum;
+uint32 js_entry_count_hist[11];
+
+static void
+MeterEntryCount(uintN count)
+{
+ if (count) {
+ js_entry_count_sum += count;
+ js_entry_count_sqsum += (double)count * count;
+ if (count > js_entry_count_max)
+ js_entry_count_max = count;
+ }
+ js_entry_count_hist[JS_MIN(count, 10)]++;
+}
+
+#define DEBUG_scopemeters
+#endif /* DEBUG_brendan */
+
+#ifdef DEBUG_scopemeters
+void
+js_DumpScopeMeters(JSRuntime *rt)
+{
+ static FILE *logfp;
+ if (!logfp)
+ logfp = fopen("/tmp/scope.stats", "a");
+
+ {
+ double mean = 0., var = 0., sigma = 0.;
+ double nscopes = rt->liveScopes;
+ double nentrys = js_entry_count_sum;
+ if (nscopes > 0 && nentrys >= 0) {
+ mean = nentrys / nscopes;
+ var = nscopes * js_entry_count_sqsum - nentrys * nentrys;
+ if (var < 0.0 || nscopes <= 1)
+ var = 0.0;
+ else
+ var /= nscopes * (nscopes - 1);
+
+ /* Windows says sqrt(0.0) is "-1.#J" (?!) so we must test. */
+ sigma = (var != 0.) ? sqrt(var) : 0.;
+ }
+
+ fprintf(logfp,
+ "scopes %g entries %g mean %g sigma %g max %u",
+ nscopes, nentrys, mean, sigma, js_entry_count_max);
+ }
+
+ fprintf(logfp, " histogram %u %u %u %u %u %u %u %u %u %u %u\n",
+ js_entry_count_hist[0], js_entry_count_hist[1],
+ js_entry_count_hist[2], js_entry_count_hist[3],
+ js_entry_count_hist[4], js_entry_count_hist[5],
+ js_entry_count_hist[6], js_entry_count_hist[7],
+ js_entry_count_hist[8], js_entry_count_hist[9],
+ js_entry_count_hist[10]);
+ js_entry_count_sum = js_entry_count_max = 0;
+ js_entry_count_sqsum = 0;
+ memset(js_entry_count_hist, 0, sizeof js_entry_count_hist);
+ fflush(logfp);
+}
+#endif
+
+uint32
+js_Mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSScope *scope;
+ JSScopeProperty *sprop;
+ JSClass *clasp;
+
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ scope = OBJ_SCOPE(obj);
+#ifdef DEBUG_brendan
+ if (scope->object == obj)
+ MeterEntryCount(scope->entryCount);
+#endif
+
+ JS_ASSERT(!SCOPE_LAST_PROP(scope) ||
+ SCOPE_HAS_PROPERTY(scope, SCOPE_LAST_PROP(scope)));
+
+ for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
+ if (SCOPE_HAD_MIDDLE_DELETE(scope) && !SCOPE_HAS_PROPERTY(scope, sprop))
+ continue;
+ MARK_SCOPE_PROPERTY(cx, sprop);
+ }
+
+ /* No one runs while the GC is running, so we can use LOCKED_... here. */
+ clasp = LOCKED_OBJ_GET_CLASS(obj);
+ if (clasp->mark)
+ (void) clasp->mark(cx, obj, NULL);
+
+ if (scope->object != obj) {
+ /*
+ * An unmutated object that shares a prototype's scope. We can't tell
+ * how many slots are allocated and in use at obj->slots by looking at
+ * scope, so we get obj->slots' length from its -1'st element.
+ */
+ return (uint32) obj->slots[-1];
+ }
+ return JS_MIN(scope->map.freeslot, scope->map.nslots);
+}
+
+void
+js_Clear(JSContext *cx, JSObject *obj)
+{
+ JSScope *scope;
+ JSRuntime *rt;
+ JSScopeProperty *sprop;
+ uint32 i, n;
+
+ /*
+ * Clear our scope and the property cache of all obj's properties only if
+ * obj owns the scope (i.e., not if obj is unmutated and therefore sharing
+ * its prototype's scope). NB: we do not clear any reserved slots lying
+ * below JSSLOT_FREE(clasp).
+ */
+ JS_LOCK_OBJ(cx, obj);
+ scope = OBJ_SCOPE(obj);
+ if (scope->object == obj) {
+ /* Clear the property cache before we clear the scope. */
+ rt = cx->runtime;
+ for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
+ if (!SCOPE_HAD_MIDDLE_DELETE(scope) ||
+ SCOPE_HAS_PROPERTY(scope, sprop)) {
+ PROPERTY_CACHE_FILL(&rt->propertyCache, obj, sprop->id, NULL);
+ }
+ }
+
+ /* Now that we're done using scope->lastProp/table, clear scope. */
+ js_ClearScope(cx, scope);
+
+ /* Clear slot values and reset freeslot so we're consistent. */
+ i = scope->map.nslots;
+ n = JSSLOT_FREE(LOCKED_OBJ_GET_CLASS(obj));
+ while (--i >= n)
+ obj->slots[i] = JSVAL_VOID;
+ scope->map.freeslot = n;
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+}
+
+jsval
+js_GetRequiredSlot(JSContext *cx, JSObject *obj, uint32 slot)
+{
+ jsval v;
+
+ JS_LOCK_OBJ(cx, obj);
+ v = (slot < (uint32) obj->slots[-1]) ? obj->slots[slot] : JSVAL_VOID;
+ JS_UNLOCK_OBJ(cx, obj);
+ return v;
+}
+
+JSBool
+js_SetRequiredSlot(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
+{
+ JSScope *scope;
+ uint32 nslots;
+ JSClass *clasp;
+ jsval *newslots;
+
+ JS_LOCK_OBJ(cx, obj);
+ scope = OBJ_SCOPE(obj);
+ nslots = (uint32) obj->slots[-1];
+ if (slot >= nslots) {
+ /*
+ * At this point, obj may or may not own scope. If some path calls
+ * js_GetMutableScope but does not add a slot-owning property, then
+ * scope->object == obj but nslots will be nominal. If obj shares a
+ * prototype's scope, then we cannot update scope->map here, but we
+ * must update obj->slots[-1] when we grow obj->slots.
+ *
+ * See js_Mark, before the last return, where we make a special case
+ * for unmutated (scope->object != obj) objects.
+ */
+ JS_ASSERT(nslots == JS_INITIAL_NSLOTS);
+ clasp = LOCKED_OBJ_GET_CLASS(obj);
+ nslots = JSSLOT_FREE(clasp);
+ if (clasp->reserveSlots)
+ nslots += clasp->reserveSlots(cx, obj);
+ JS_ASSERT(slot < nslots);
+
+ newslots = AllocSlots(cx, obj->slots, nslots);
+ if (!newslots) {
+ JS_UNLOCK_SCOPE(cx, scope);
+ return JS_FALSE;
+ }
+ if (scope->object == obj)
+ scope->map.nslots = nslots;
+ obj->slots = newslots;
+ }
+
+ /* Whether or not we grew nslots, we may need to advance freeslot. */
+ if (scope->object == obj && slot >= scope->map.freeslot)
+ scope->map.freeslot = slot + 1;
+
+ obj->slots[slot] = v;
+ JS_UNLOCK_SCOPE(cx, scope);
+ return JS_TRUE;
+}
+
+#ifdef DEBUG
+
+/* Routines to print out values during debugging. */
+
+void printChar(jschar *cp) {
+ fprintf(stderr, "jschar* (0x%p) \"", (void *)cp);
+ while (*cp)
+ fputc(*cp++, stderr);
+ fputc('"', stderr);
+ fputc('\n', stderr);
+}
+
+void printString(JSString *str) {
+ size_t i, n;
+ jschar *s;
+ fprintf(stderr, "string (0x%p) \"", (void *)str);
+ s = JSSTRING_CHARS(str);
+ for (i=0, n=JSSTRING_LENGTH(str); i < n; i++)
+ fputc(s[i], stderr);
+ fputc('"', stderr);
+ fputc('\n', stderr);
+}
+
+void printVal(JSContext *cx, jsval val);
+
+void printObj(JSContext *cx, JSObject *jsobj) {
+ jsuint i;
+ jsval val;
+ JSClass *clasp;
+
+ fprintf(stderr, "object 0x%p\n", (void *)jsobj);
+ clasp = OBJ_GET_CLASS(cx, jsobj);
+ fprintf(stderr, "class 0x%p %s\n", (void *)clasp, clasp->name);
+ for (i=0; i < jsobj->map->nslots; i++) {
+ fprintf(stderr, "slot %3d ", i);
+ val = jsobj->slots[i];
+ if (JSVAL_IS_OBJECT(val))
+ fprintf(stderr, "object 0x%p\n", (void *)JSVAL_TO_OBJECT(val));
+ else
+ printVal(cx, val);
+ }
+}
+
+void printVal(JSContext *cx, jsval val) {
+ fprintf(stderr, "val %d (0x%p) = ", (int)val, (void *)val);
+ if (JSVAL_IS_NULL(val)) {
+ fprintf(stderr, "null\n");
+ } else if (JSVAL_IS_VOID(val)) {
+ fprintf(stderr, "undefined\n");
+ } else if (JSVAL_IS_OBJECT(val)) {
+ printObj(cx, JSVAL_TO_OBJECT(val));
+ } else if (JSVAL_IS_INT(val)) {
+ fprintf(stderr, "(int) %d\n", JSVAL_TO_INT(val));
+ } else if (JSVAL_IS_STRING(val)) {
+ printString(JSVAL_TO_STRING(val));
+ } else if (JSVAL_IS_DOUBLE(val)) {
+ fprintf(stderr, "(double) %g\n", *JSVAL_TO_DOUBLE(val));
+ } else {
+ JS_ASSERT(JSVAL_IS_BOOLEAN(val));
+ fprintf(stderr, "(boolean) %s\n",
+ JSVAL_TO_BOOLEAN(val) ? "true" : "false");
+ }
+ fflush(stderr);
+}
+
+void printId(JSContext *cx, jsid id) {
+ fprintf(stderr, "id %d (0x%p) is ", (int)id, (void *)id);
+ printVal(cx, ID_TO_VALUE(id));
+}
+
+void printAtom(JSAtom *atom) {
+ printString(ATOM_TO_STRING(atom));
+}
+
+#endif
diff --git a/src/third_party/js-1.7/jsobj.h b/src/third_party/js-1.7/jsobj.h
new file mode 100644
index 00000000000..eb3aedb71f9
--- /dev/null
+++ b/src/third_party/js-1.7/jsobj.h
@@ -0,0 +1,596 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsobj_h___
+#define jsobj_h___
+/*
+ * JS object definitions.
+ *
+ * A JS object consists of a possibly-shared object descriptor containing
+ * ordered property names, called the map; and a dense vector of property
+ * values, called slots. The map/slot pointer pair is GC'ed, while the map
+ * is reference counted and the slot vector is malloc'ed.
+ */
+#include "jshash.h" /* Added by JSIFY */
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+JS_BEGIN_EXTERN_C
+
+struct JSObjectMap {
+ jsrefcount nrefs; /* count of all referencing objects */
+ JSObjectOps *ops; /* high level object operation vtable */
+ uint32 nslots; /* length of obj->slots vector */
+ uint32 freeslot; /* index of next free obj->slots element */
+};
+
+/* Shorthand macros for frequently-made calls. */
+#define OBJ_LOOKUP_PROPERTY(cx,obj,id,objp,propp) \
+ (obj)->map->ops->lookupProperty(cx,obj,id,objp,propp)
+#define OBJ_DEFINE_PROPERTY(cx,obj,id,value,getter,setter,attrs,propp) \
+ (obj)->map->ops->defineProperty(cx,obj,id,value,getter,setter,attrs,propp)
+#define OBJ_GET_PROPERTY(cx,obj,id,vp) \
+ (obj)->map->ops->getProperty(cx,obj,id,vp)
+#define OBJ_SET_PROPERTY(cx,obj,id,vp) \
+ (obj)->map->ops->setProperty(cx,obj,id,vp)
+#define OBJ_GET_ATTRIBUTES(cx,obj,id,prop,attrsp) \
+ (obj)->map->ops->getAttributes(cx,obj,id,prop,attrsp)
+#define OBJ_SET_ATTRIBUTES(cx,obj,id,prop,attrsp) \
+ (obj)->map->ops->setAttributes(cx,obj,id,prop,attrsp)
+#define OBJ_DELETE_PROPERTY(cx,obj,id,rval) \
+ (obj)->map->ops->deleteProperty(cx,obj,id,rval)
+#define OBJ_DEFAULT_VALUE(cx,obj,hint,vp) \
+ (obj)->map->ops->defaultValue(cx,obj,hint,vp)
+#define OBJ_ENUMERATE(cx,obj,enum_op,statep,idp) \
+ (obj)->map->ops->enumerate(cx,obj,enum_op,statep,idp)
+#define OBJ_CHECK_ACCESS(cx,obj,id,mode,vp,attrsp) \
+ (obj)->map->ops->checkAccess(cx,obj,id,mode,vp,attrsp)
+
+/* These four are time-optimized to avoid stub calls. */
+#define OBJ_THIS_OBJECT(cx,obj) \
+ ((obj)->map->ops->thisObject \
+ ? (obj)->map->ops->thisObject(cx,obj) \
+ : (obj))
+#define OBJ_DROP_PROPERTY(cx,obj,prop) \
+ ((obj)->map->ops->dropProperty \
+ ? (obj)->map->ops->dropProperty(cx,obj,prop) \
+ : (void)0)
+#define OBJ_GET_REQUIRED_SLOT(cx,obj,slot) \
+ ((obj)->map->ops->getRequiredSlot \
+ ? (obj)->map->ops->getRequiredSlot(cx, obj, slot) \
+ : JSVAL_VOID)
+#define OBJ_SET_REQUIRED_SLOT(cx,obj,slot,v) \
+ ((obj)->map->ops->setRequiredSlot \
+ ? (obj)->map->ops->setRequiredSlot(cx, obj, slot, v) \
+ : JS_TRUE)
+
+#define OBJ_TO_INNER_OBJECT(cx,obj) \
+ JS_BEGIN_MACRO \
+ JSClass *clasp_ = OBJ_GET_CLASS(cx, obj); \
+ if (clasp_->flags & JSCLASS_IS_EXTENDED) { \
+ JSExtendedClass *xclasp_ = (JSExtendedClass*)clasp_; \
+ if (xclasp_->innerObject) \
+ obj = xclasp_->innerObject(cx, obj); \
+ } \
+ JS_END_MACRO
+
+/*
+ * In the original JS engine design, obj->slots pointed to a vector of length
+ * JS_INITIAL_NSLOTS words if obj->map was shared with a prototype object,
+ * else of length obj->map->nslots. With the advent of JS_GetReservedSlot,
+ * JS_SetReservedSlot, and JSCLASS_HAS_RESERVED_SLOTS (see jsapi.h), the size
+ * of the minimum length slots vector in the case where map is shared cannot
+ * be constant. This length starts at JS_INITIAL_NSLOTS, but may advance to
+ * include all the reserved slots.
+ *
+ * Therefore slots must be self-describing. Rather than tag its low order bit
+ * (a bit is all we need) to distinguish initial length from reserved length,
+ * we do "the BSTR thing": over-allocate slots by one jsval, and store the
+ * *net* length (counting usable slots, which have non-negative obj->slots[]
+ * indices) in obj->slots[-1]. All code that sets obj->slots must be aware of
+ * this hack -- you have been warned, and jsobj.c has been updated!
+ */
+struct JSObject {
+ JSObjectMap *map;
+ jsval *slots;
+};
+
+#define JSSLOT_PROTO 0
+#define JSSLOT_PARENT 1
+#define JSSLOT_CLASS 2
+#define JSSLOT_PRIVATE 3
+#define JSSLOT_START(clasp) (((clasp)->flags & JSCLASS_HAS_PRIVATE) \
+ ? JSSLOT_PRIVATE + 1 \
+ : JSSLOT_CLASS + 1)
+
+#define JSSLOT_FREE(clasp) (JSSLOT_START(clasp) \
+ + JSCLASS_RESERVED_SLOTS(clasp))
+
+#define JS_INITIAL_NSLOTS 5
+
+#ifdef DEBUG
+#define MAP_CHECK_SLOT(map,slot) \
+ JS_ASSERT((uint32)slot < JS_MIN((map)->freeslot, (map)->nslots))
+#define OBJ_CHECK_SLOT(obj,slot) \
+ MAP_CHECK_SLOT((obj)->map, slot)
+#else
+#define OBJ_CHECK_SLOT(obj,slot) ((void)0)
+#endif
+
+/* Fast macros for accessing obj->slots while obj is locked (if thread-safe). */
+#define LOCKED_OBJ_GET_SLOT(obj,slot) \
+ (OBJ_CHECK_SLOT(obj, slot), (obj)->slots[slot])
+#define LOCKED_OBJ_SET_SLOT(obj,slot,value) \
+ (OBJ_CHECK_SLOT(obj, slot), (obj)->slots[slot] = (value))
+#define LOCKED_OBJ_GET_PROTO(obj) \
+ JSVAL_TO_OBJECT(LOCKED_OBJ_GET_SLOT(obj, JSSLOT_PROTO))
+#define LOCKED_OBJ_GET_CLASS(obj) \
+ ((JSClass *)JSVAL_TO_PRIVATE(LOCKED_OBJ_GET_SLOT(obj, JSSLOT_CLASS)))
+
+#ifdef JS_THREADSAFE
+
+/* Thread-safe functions and wrapper macros for accessing obj->slots. */
+#define OBJ_GET_SLOT(cx,obj,slot) \
+ (OBJ_CHECK_SLOT(obj, slot), \
+ (OBJ_IS_NATIVE(obj) && OBJ_SCOPE(obj)->ownercx == cx) \
+ ? LOCKED_OBJ_GET_SLOT(obj, slot) \
+ : js_GetSlotThreadSafe(cx, obj, slot))
+
+#define OBJ_SET_SLOT(cx,obj,slot,value) \
+ (OBJ_CHECK_SLOT(obj, slot), \
+ (OBJ_IS_NATIVE(obj) && OBJ_SCOPE(obj)->ownercx == cx) \
+ ? (void) LOCKED_OBJ_SET_SLOT(obj, slot, value) \
+ : js_SetSlotThreadSafe(cx, obj, slot, value))
+
+/*
+ * If thread-safe, define an OBJ_GET_SLOT wrapper that bypasses, for a native
+ * object, the lock-free "fast path" test of (OBJ_SCOPE(obj)->ownercx == cx),
+ * to avoid needlessly switching from lock-free to lock-full scope when doing
+ * GC on a different context from the last one to own the scope. The caller
+ * in this case is probably a JSClass.mark function, e.g., fun_mark, or maybe
+ * a finalizer.
+ *
+ * The GC runs only when all threads except the one on which the GC is active
+ * are suspended at GC-safe points, so there is no hazard in directly accessing
+ * obj->slots[slot] from the GC's thread, once rt->gcRunning has been set. See
+ * jsgc.c for details.
+ */
+#define THREAD_IS_RUNNING_GC(rt, thread) \
+ ((rt)->gcRunning && (rt)->gcThread == (thread))
+
+#define CX_THREAD_IS_RUNNING_GC(cx) \
+ THREAD_IS_RUNNING_GC((cx)->runtime, (cx)->thread)
+
+#define GC_AWARE_GET_SLOT(cx, obj, slot) \
+ ((OBJ_IS_NATIVE(obj) && CX_THREAD_IS_RUNNING_GC(cx)) \
+ ? (obj)->slots[slot] \
+ : OBJ_GET_SLOT(cx, obj, slot))
+
+#else /* !JS_THREADSAFE */
+
+#define OBJ_GET_SLOT(cx,obj,slot) LOCKED_OBJ_GET_SLOT(obj,slot)
+#define OBJ_SET_SLOT(cx,obj,slot,value) LOCKED_OBJ_SET_SLOT(obj,slot,value)
+#define GC_AWARE_GET_SLOT(cx,obj,slot) LOCKED_OBJ_GET_SLOT(obj,slot)
+
+#endif /* !JS_THREADSAFE */
+
+/* Thread-safe proto, parent, and class access macros. */
+#define OBJ_GET_PROTO(cx,obj) \
+ JSVAL_TO_OBJECT(OBJ_GET_SLOT(cx, obj, JSSLOT_PROTO))
+#define OBJ_SET_PROTO(cx,obj,proto) \
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PROTO, OBJECT_TO_JSVAL(proto))
+
+#define OBJ_GET_PARENT(cx,obj) \
+ JSVAL_TO_OBJECT(OBJ_GET_SLOT(cx, obj, JSSLOT_PARENT))
+#define OBJ_SET_PARENT(cx,obj,parent) \
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PARENT, OBJECT_TO_JSVAL(parent))
+
+#define OBJ_GET_CLASS(cx,obj) \
+ ((JSClass *)JSVAL_TO_PRIVATE(OBJ_GET_SLOT(cx, obj, JSSLOT_CLASS)))
+
+/* Test whether a map or object is native. */
+#define MAP_IS_NATIVE(map) \
+ ((map)->ops == &js_ObjectOps || \
+ ((map)->ops && (map)->ops->newObjectMap == js_ObjectOps.newObjectMap))
+
+#define OBJ_IS_NATIVE(obj) MAP_IS_NATIVE((obj)->map)
+
+extern JS_FRIEND_DATA(JSObjectOps) js_ObjectOps;
+extern JS_FRIEND_DATA(JSObjectOps) js_WithObjectOps;
+extern JSClass js_ObjectClass;
+extern JSClass js_WithClass;
+extern JSClass js_BlockClass;
+
+/*
+ * Block scope object macros. The slots reserved by js_BlockClass are:
+ *
+ * JSSLOT_PRIVATE JSStackFrame * active frame pointer or null
+ * JSSLOT_BLOCK_DEPTH int depth of block slots in frame
+ *
+ * After JSSLOT_BLOCK_DEPTH come one or more slots for the block locals.
+ * OBJ_BLOCK_COUNT depends on this arrangement.
+ *
+ * A With object is like a Block object, in that both have one reserved slot
+ * telling the stack depth of the relevant slots (the slot whose value is the
+ * object named in the with statement, the slots containing the block's local
+ * variables); and both have a private slot referring to the JSStackFrame in
+ * whose activation they were created (or null if the with or block object
+ * outlives the frame).
+ */
+#define JSSLOT_BLOCK_DEPTH (JSSLOT_PRIVATE + 1)
+
+#define OBJ_BLOCK_COUNT(cx,obj) \
+ ((obj)->map->freeslot - (JSSLOT_BLOCK_DEPTH + 1))
+#define OBJ_BLOCK_DEPTH(cx,obj) \
+ JSVAL_TO_INT(OBJ_GET_SLOT(cx, obj, JSSLOT_BLOCK_DEPTH))
+#define OBJ_SET_BLOCK_DEPTH(cx,obj,depth) \
+ OBJ_SET_SLOT(cx, obj, JSSLOT_BLOCK_DEPTH, INT_TO_JSVAL(depth))
+
+/*
+ * To make sure this slot is well-defined, always call js_NewWithObject to
+ * create a With object, don't call js_NewObject directly. When creating a
+ * With object that does not correspond to a stack slot, pass -1 for depth.
+ *
+ * When popping the stack across this object's "with" statement, client code
+ * must call JS_SetPrivate(cx, withobj, NULL).
+ */
+extern JSObject *
+js_NewWithObject(JSContext *cx, JSObject *proto, JSObject *parent, jsint depth);
+
+/*
+ * Create a new block scope object not linked to any proto or parent object.
+ * Blocks are created by the compiler to reify let blocks and comprehensions.
+ * Only when dynamic scope is captured do they need to be cloned and spliced
+ * into an active scope chain.
+ */
+extern JSObject *
+js_NewBlockObject(JSContext *cx);
+
+extern JSObject *
+js_CloneBlockObject(JSContext *cx, JSObject *proto, JSObject *parent,
+ JSStackFrame *fp);
+
+extern JSBool
+js_PutBlockObject(JSContext *cx, JSObject *obj);
+
+struct JSSharpObjectMap {
+ jsrefcount depth;
+ jsatomid sharpgen;
+ JSHashTable *table;
+};
+
+#define SHARP_BIT ((jsatomid) 1)
+#define BUSY_BIT ((jsatomid) 2)
+#define SHARP_ID_SHIFT 2
+#define IS_SHARP(he) (JS_PTR_TO_UINT32((he)->value) & SHARP_BIT)
+#define MAKE_SHARP(he) ((he)->value = JS_UINT32_TO_PTR(JS_PTR_TO_UINT32((he)->value)|SHARP_BIT))
+#define IS_BUSY(he) (JS_PTR_TO_UINT32((he)->value) & BUSY_BIT)
+#define MAKE_BUSY(he) ((he)->value = JS_UINT32_TO_PTR(JS_PTR_TO_UINT32((he)->value)|BUSY_BIT))
+#define CLEAR_BUSY(he) ((he)->value = JS_UINT32_TO_PTR(JS_PTR_TO_UINT32((he)->value)&~BUSY_BIT))
+
+extern JSHashEntry *
+js_EnterSharpObject(JSContext *cx, JSObject *obj, JSIdArray **idap,
+ jschar **sp);
+
+extern void
+js_LeaveSharpObject(JSContext *cx, JSIdArray **idap);
+
+/*
+ * Mark objects stored in map if GC happens between js_EnterSharpObject
+ * and js_LeaveSharpObject. GC calls this when map->depth > 0.
+ */
+extern void
+js_GCMarkSharpMap(JSContext *cx, JSSharpObjectMap *map);
+
+extern JSBool
+js_obj_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+extern JSBool
+js_obj_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+extern JSBool
+js_HasOwnPropertyHelper(JSContext *cx, JSObject *obj, JSLookupPropOp lookup,
+ uintN argc, jsval *argv, jsval *rval);
+
+extern JSObject*
+js_InitBlockClass(JSContext *cx, JSObject* obj);
+
+extern JSObject *
+js_InitObjectClass(JSContext *cx, JSObject *obj);
+
+/* Select Object.prototype method names shared between jsapi.c and jsobj.c. */
+extern const char js_watch_str[];
+extern const char js_unwatch_str[];
+extern const char js_hasOwnProperty_str[];
+extern const char js_isPrototypeOf_str[];
+extern const char js_propertyIsEnumerable_str[];
+extern const char js_defineGetter_str[];
+extern const char js_defineSetter_str[];
+extern const char js_lookupGetter_str[];
+extern const char js_lookupSetter_str[];
+
+extern void
+js_InitObjectMap(JSObjectMap *map, jsrefcount nrefs, JSObjectOps *ops,
+ JSClass *clasp);
+
+extern JSObjectMap *
+js_NewObjectMap(JSContext *cx, jsrefcount nrefs, JSObjectOps *ops,
+ JSClass *clasp, JSObject *obj);
+
+extern void
+js_DestroyObjectMap(JSContext *cx, JSObjectMap *map);
+
+extern JSObjectMap *
+js_HoldObjectMap(JSContext *cx, JSObjectMap *map);
+
+extern JSObjectMap *
+js_DropObjectMap(JSContext *cx, JSObjectMap *map, JSObject *obj);
+
+extern JSBool
+js_GetClassId(JSContext *cx, JSClass *clasp, jsid *idp);
+
+extern JSObject *
+js_NewObject(JSContext *cx, JSClass *clasp, JSObject *proto, JSObject *parent);
+
+/*
+ * Fast access to immutable standard objects (constructors and prototypes).
+ */
+extern JSBool
+js_GetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key,
+ JSObject **objp);
+
+extern JSBool
+js_SetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key, JSObject *cobj);
+
+extern JSBool
+js_FindClassObject(JSContext *cx, JSObject *start, jsid id, jsval *vp);
+
+extern JSObject *
+js_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto,
+ JSObject *parent, uintN argc, jsval *argv);
+
+extern void
+js_FinalizeObject(JSContext *cx, JSObject *obj);
+
+extern JSBool
+js_AllocSlot(JSContext *cx, JSObject *obj, uint32 *slotp);
+
+extern void
+js_FreeSlot(JSContext *cx, JSObject *obj, uint32 slot);
+
+/*
+ * Native property add and lookup variants that hide id in the hidden atom
+ * subspace, so as to avoid collisions between internal properties such as
+ * formal arguments and local variables in function objects, and externally
+ * set properties with the same ids.
+ */
+extern JSScopeProperty *
+js_AddHiddenProperty(JSContext *cx, JSObject *obj, jsid id,
+ JSPropertyOp getter, JSPropertyOp setter, uint32 slot,
+ uintN attrs, uintN flags, intN shortid);
+
+extern JSBool
+js_LookupHiddenProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ JSProperty **propp);
+
+/*
+ * Find or create a property named by id in obj's scope, with the given getter
+ * and setter, slot, attributes, and other members.
+ */
+extern JSScopeProperty *
+js_AddNativeProperty(JSContext *cx, JSObject *obj, jsid id,
+ JSPropertyOp getter, JSPropertyOp setter, uint32 slot,
+ uintN attrs, uintN flags, intN shortid);
+
+/*
+ * Change sprop to have the given attrs, getter, and setter in scope, morphing
+ * it into a potentially new JSScopeProperty. Return a pointer to the changed
+ * or identical property.
+ */
+extern JSScopeProperty *
+js_ChangeNativePropertyAttrs(JSContext *cx, JSObject *obj,
+ JSScopeProperty *sprop, uintN attrs, uintN mask,
+ JSPropertyOp getter, JSPropertyOp setter);
+
+/*
+ * On error, return false. On success, if propp is non-null, return true with
+ * obj locked and with a held property in *propp; if propp is null, return true
+ * but release obj's lock first. Therefore all callers who pass non-null propp
+ * result parameters must later call OBJ_DROP_PROPERTY(cx, obj, *propp) both to
+ * drop the held property, and to release the lock on obj.
+ */
+extern JSBool
+js_DefineProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs,
+ JSProperty **propp);
+
+extern JSBool
+js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs,
+ uintN flags, intN shortid, JSProperty **propp);
+
+/*
+ * Unlike js_DefineProperty, propp must be non-null. On success, and if id was
+ * found, return true with *objp non-null and locked, and with a held property
+ * stored in *propp. If successful but id was not found, return true with both
+ * *objp and *propp null. Therefore all callers who receive a non-null *propp
+ * must later call OBJ_DROP_PROPERTY(cx, *objp, *propp).
+ */
+extern JS_FRIEND_API(JSBool)
+js_LookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ JSProperty **propp);
+
+/*
+ * Specialized subroutine that allows caller to preset JSRESOLVE_* flags.
+ * JSRESOLVE_HIDDEN flags hidden function param/local name lookups, just for
+ * internal use by fun_resolve and similar built-ins.
+ */
+extern JSBool
+js_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, jsid id, uintN flags,
+ JSObject **objp, JSProperty **propp);
+
+#define JSRESOLVE_HIDDEN 0x8000
+
+extern JS_FRIEND_API(JSBool)
+js_FindProperty(JSContext *cx, jsid id, JSObject **objp, JSObject **pobjp,
+ JSProperty **propp);
+
+extern JSObject *
+js_FindIdentifierBase(JSContext *cx, jsid id);
+
+extern JSObject *
+js_FindVariableScope(JSContext *cx, JSFunction **funp);
+
+/*
+ * NB: js_NativeGet and js_NativeSet are called with the scope containing sprop
+ * (pobj's scope for Get, obj's for Set) locked, and on successful return, that
+ * scope is again locked. But on failure, both functions return false with the
+ * scope containing sprop unlocked.
+ */
+extern JSBool
+js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj,
+ JSScopeProperty *sprop, jsval *vp);
+
+extern JSBool
+js_NativeSet(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, jsval *vp);
+
+extern JSBool
+js_GetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp);
+
+extern JSBool
+js_SetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp);
+
+extern JSBool
+js_GetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp);
+
+extern JSBool
+js_SetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp);
+
+extern JSBool
+js_DeleteProperty(JSContext *cx, JSObject *obj, jsid id, jsval *rval);
+
+extern JSBool
+js_DefaultValue(JSContext *cx, JSObject *obj, JSType hint, jsval *vp);
+
+extern JSIdArray *
+js_NewIdArray(JSContext *cx, jsint length);
+
+/*
+ * Unlike realloc(3), this function frees ida on failure.
+ */
+extern JSIdArray *
+js_SetIdArrayLength(JSContext *cx, JSIdArray *ida, jsint length);
+
+extern JSBool
+js_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
+ jsval *statep, jsid *idp);
+
+extern void
+js_MarkNativeIteratorStates(JSContext *cx);
+
+extern JSBool
+js_CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode,
+ jsval *vp, uintN *attrsp);
+
+extern JSBool
+js_Call(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval);
+
+extern JSBool
+js_Construct(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+extern JSBool
+js_HasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp);
+
+extern JSBool
+js_SetProtoOrParent(JSContext *cx, JSObject *obj, uint32 slot, JSObject *pobj);
+
+extern JSBool
+js_IsDelegate(JSContext *cx, JSObject *obj, jsval v, JSBool *bp);
+
+extern JSBool
+js_GetClassPrototype(JSContext *cx, JSObject *scope, jsid id,
+ JSObject **protop);
+
+extern JSBool
+js_SetClassPrototype(JSContext *cx, JSObject *ctor, JSObject *proto,
+ uintN attrs);
+
+extern JSBool
+js_ValueToObject(JSContext *cx, jsval v, JSObject **objp);
+
+extern JSObject *
+js_ValueToNonNullObject(JSContext *cx, jsval v);
+
+extern JSBool
+js_TryValueOf(JSContext *cx, JSObject *obj, JSType type, jsval *rval);
+
+extern JSBool
+js_TryMethod(JSContext *cx, JSObject *obj, JSAtom *atom,
+ uintN argc, jsval *argv, jsval *rval);
+
+extern JSBool
+js_XDRObject(JSXDRState *xdr, JSObject **objp);
+
+extern uint32
+js_Mark(JSContext *cx, JSObject *obj, void *arg);
+
+extern void
+js_Clear(JSContext *cx, JSObject *obj);
+
+extern jsval
+js_GetRequiredSlot(JSContext *cx, JSObject *obj, uint32 slot);
+
+extern JSBool
+js_SetRequiredSlot(JSContext *cx, JSObject *obj, uint32 slot, jsval v);
+
+extern JSObject *
+js_CheckScopeChainValidity(JSContext *cx, JSObject *scopeobj, const char *caller);
+
+extern JSBool
+js_CheckPrincipalsAccess(JSContext *cx, JSObject *scopeobj,
+ JSPrincipals *principals, JSAtom *caller);
+JS_END_EXTERN_C
+
+#endif /* jsobj_h___ */
diff --git a/src/third_party/js-1.7/jsopcode.c b/src/third_party/js-1.7/jsopcode.c
new file mode 100644
index 00000000000..3dec776cf69
--- /dev/null
+++ b/src/third_party/js-1.7/jsopcode.c
@@ -0,0 +1,4794 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set sw=4 ts=8 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS bytecode descriptors, disassemblers, and decompilers.
+ */
+#include "jsstddef.h"
+#ifdef HAVE_MEMORY_H
+#include <memory.h>
+#endif
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsdtoa.h"
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsemit.h"
+#include "jsfun.h"
+#include "jslock.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsregexp.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+
+#if JS_HAS_DESTRUCTURING
+# include "jsnum.h"
+#endif
+
+static const char js_incop_strs[][3] = {"++", "--"};
+
+/* Pollute the namespace locally for MSVC Win16, but not for WatCom. */
+#ifdef __WINDOWS_386__
+ #ifdef FAR
+ #undef FAR
+ #endif
+#else /* !__WINDOWS_386__ */
+#ifndef FAR
+#define FAR
+#endif
+#endif /* !__WINDOWS_386__ */
+
+const JSCodeSpec FAR js_CodeSpec[] = {
+#define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
+ {name,token,length,nuses,ndefs,prec,format},
+#include "jsopcode.tbl"
+#undef OPDEF
+};
+
+uintN js_NumCodeSpecs = sizeof (js_CodeSpec) / sizeof js_CodeSpec[0];
+
+/************************************************************************/
+
+static ptrdiff_t
+GetJumpOffset(jsbytecode *pc, jsbytecode *pc2)
+{
+ uint32 type;
+
+ type = (js_CodeSpec[*pc].format & JOF_TYPEMASK);
+ if (JOF_TYPE_IS_EXTENDED_JUMP(type))
+ return GET_JUMPX_OFFSET(pc2);
+ return GET_JUMP_OFFSET(pc2);
+}
+
+#ifdef DEBUG
+
+JS_FRIEND_API(JSBool)
+js_Disassemble(JSContext *cx, JSScript *script, JSBool lines, FILE *fp)
+{
+ jsbytecode *pc, *end;
+ uintN len;
+
+ pc = script->code;
+ end = pc + script->length;
+ while (pc < end) {
+ if (pc == script->main)
+ fputs("main:\n", fp);
+ len = js_Disassemble1(cx, script, pc,
+ PTRDIFF(pc, script->code, jsbytecode),
+ lines, fp);
+ if (!len)
+ return JS_FALSE;
+ pc += len;
+ }
+ return JS_TRUE;
+}
+
+const char *
+ToDisassemblySource(JSContext *cx, jsval v)
+{
+ JSObject *obj;
+ JSScopeProperty *sprop;
+ char *source;
+ const char *bytes;
+ JSString *str;
+
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ obj = JSVAL_TO_OBJECT(v);
+ if (OBJ_GET_CLASS(cx, obj) == &js_BlockClass) {
+ source = JS_sprintf_append(NULL, "depth %d {",
+ OBJ_BLOCK_DEPTH(cx, obj));
+ for (sprop = OBJ_SCOPE(obj)->lastProp; sprop;
+ sprop = sprop->parent) {
+ bytes = js_AtomToPrintableString(cx, JSID_TO_ATOM(sprop->id));
+ if (!bytes)
+ return NULL;
+ source = JS_sprintf_append(source, "%s: %d%s",
+ bytes, sprop->shortid,
+ sprop->parent ? ", " : "");
+ }
+ source = JS_sprintf_append(source, "}");
+ if (!source)
+ return NULL;
+ str = JS_NewString(cx, source, strlen(source));
+ if (!str)
+ return NULL;
+ return JS_GetStringBytes(str);
+ }
+ }
+ return js_ValueToPrintableSource(cx, v);
+}
+
+JS_FRIEND_API(uintN)
+js_Disassemble1(JSContext *cx, JSScript *script, jsbytecode *pc, uintN loc,
+ JSBool lines, FILE *fp)
+{
+ JSOp op;
+ const JSCodeSpec *cs;
+ ptrdiff_t len, off, jmplen;
+ uint32 type;
+ JSAtom *atom;
+ const char *bytes;
+
+ op = (JSOp)*pc;
+ if (op >= JSOP_LIMIT) {
+ char numBuf1[12], numBuf2[12];
+ JS_snprintf(numBuf1, sizeof numBuf1, "%d", op);
+ JS_snprintf(numBuf2, sizeof numBuf2, "%d", JSOP_LIMIT);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BYTECODE_TOO_BIG, numBuf1, numBuf2);
+ return 0;
+ }
+ cs = &js_CodeSpec[op];
+ len = (ptrdiff_t) cs->length;
+ fprintf(fp, "%05u:", loc);
+ if (lines)
+ fprintf(fp, "%4u", JS_PCToLineNumber(cx, script, pc));
+ fprintf(fp, " %s", cs->name);
+ type = cs->format & JOF_TYPEMASK;
+ switch (type) {
+ case JOF_BYTE:
+ if (op == JSOP_TRAP) {
+ op = JS_GetTrapOpcode(cx, script, pc);
+ if (op == JSOP_LIMIT)
+ return 0;
+ len = (ptrdiff_t) js_CodeSpec[op].length;
+ }
+ break;
+
+ case JOF_JUMP:
+ case JOF_JUMPX:
+ off = GetJumpOffset(pc, pc);
+ fprintf(fp, " %u (%d)", loc + off, off);
+ break;
+
+ case JOF_CONST:
+ atom = GET_ATOM(cx, script, pc);
+ bytes = ToDisassemblySource(cx, ATOM_KEY(atom));
+ if (!bytes)
+ return 0;
+ fprintf(fp, " %s", bytes);
+ break;
+
+ case JOF_UINT16:
+ case JOF_LOCAL:
+ fprintf(fp, " %u", GET_UINT16(pc));
+ break;
+
+ case JOF_TABLESWITCH:
+ case JOF_TABLESWITCHX:
+ {
+ jsbytecode *pc2;
+ jsint i, low, high;
+
+ jmplen = (type == JOF_TABLESWITCH) ? JUMP_OFFSET_LEN
+ : JUMPX_OFFSET_LEN;
+ pc2 = pc;
+ off = GetJumpOffset(pc, pc2);
+ pc2 += jmplen;
+ low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ high = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ fprintf(fp, " defaultOffset %d low %d high %d", off, low, high);
+ for (i = low; i <= high; i++) {
+ off = GetJumpOffset(pc, pc2);
+ fprintf(fp, "\n\t%d: %d", i, off);
+ pc2 += jmplen;
+ }
+ len = 1 + pc2 - pc;
+ break;
+ }
+
+ case JOF_LOOKUPSWITCH:
+ case JOF_LOOKUPSWITCHX:
+ {
+ jsbytecode *pc2;
+ jsatomid npairs;
+
+ jmplen = (type == JOF_LOOKUPSWITCH) ? JUMP_OFFSET_LEN
+ : JUMPX_OFFSET_LEN;
+ pc2 = pc;
+ off = GetJumpOffset(pc, pc2);
+ pc2 += jmplen;
+ npairs = GET_ATOM_INDEX(pc2);
+ pc2 += ATOM_INDEX_LEN;
+ fprintf(fp, " offset %d npairs %u", off, (uintN) npairs);
+ while (npairs) {
+ atom = GET_ATOM(cx, script, pc2);
+ pc2 += ATOM_INDEX_LEN;
+ off = GetJumpOffset(pc, pc2);
+ pc2 += jmplen;
+
+ bytes = ToDisassemblySource(cx, ATOM_KEY(atom));
+ if (!bytes)
+ return 0;
+ fprintf(fp, "\n\t%s: %d", bytes, off);
+ npairs--;
+ }
+ len = 1 + pc2 - pc;
+ break;
+ }
+
+ case JOF_QARG:
+ fprintf(fp, " %u", GET_ARGNO(pc));
+ break;
+
+ case JOF_QVAR:
+ fprintf(fp, " %u", GET_VARNO(pc));
+ break;
+
+ case JOF_INDEXCONST:
+ fprintf(fp, " %u", GET_VARNO(pc));
+ pc += VARNO_LEN;
+ atom = GET_ATOM(cx, script, pc);
+ bytes = ToDisassemblySource(cx, ATOM_KEY(atom));
+ if (!bytes)
+ return 0;
+ fprintf(fp, " %s", bytes);
+ break;
+
+ case JOF_UINT24:
+ if (op == JSOP_FINDNAME) {
+ /* Special case to avoid a JOF_FINDNAME just for this op. */
+ atom = js_GetAtom(cx, &script->atomMap, GET_UINT24(pc));
+ bytes = ToDisassemblySource(cx, ATOM_KEY(atom));
+ if (!bytes)
+ return 0;
+ fprintf(fp, " %s", bytes);
+ break;
+ }
+
+ JS_ASSERT(op == JSOP_UINT24 || op == JSOP_LITERAL);
+ fprintf(fp, " %u", GET_UINT24(pc));
+ break;
+
+ case JOF_LITOPX:
+ atom = js_GetAtom(cx, &script->atomMap, GET_LITERAL_INDEX(pc));
+ bytes = ToDisassemblySource(cx, ATOM_KEY(atom));
+ if (!bytes)
+ return 0;
+
+ /*
+ * Bytecode: JSOP_LITOPX <uint24> op [<varno> if JSOP_DEFLOCALFUN].
+ * Advance pc to point at op.
+ */
+ pc += 1 + LITERAL_INDEX_LEN;
+ op = *pc;
+ cs = &js_CodeSpec[op];
+ fprintf(fp, " %s op %s", bytes, cs->name);
+ if ((cs->format & JOF_TYPEMASK) == JOF_INDEXCONST)
+ fprintf(fp, " %u", GET_VARNO(pc));
+
+ /*
+ * Set len to advance pc to skip op and any other immediates (namely,
+ * <varno> if JSOP_DEFLOCALFUN).
+ */
+ JS_ASSERT(cs->length > ATOM_INDEX_LEN);
+ len = cs->length - ATOM_INDEX_LEN;
+ break;
+
+ default: {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%lx", (unsigned long) cs->format);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_UNKNOWN_FORMAT, numBuf);
+ return 0;
+ }
+ }
+ fputs("\n", fp);
+ return len;
+}
+
+#endif /* DEBUG */
+
+/************************************************************************/
+
+/*
+ * Sprintf, but with unlimited and automatically allocated buffering.
+ */
+typedef struct Sprinter {
+ JSContext *context; /* context executing the decompiler */
+ JSArenaPool *pool; /* string allocation pool */
+ char *base; /* base address of buffer in pool */
+ size_t size; /* size of buffer allocated at base */
+ ptrdiff_t offset; /* offset of next free char in buffer */
+} Sprinter;
+
+#define INIT_SPRINTER(cx, sp, ap, off) \
+ ((sp)->context = cx, (sp)->pool = ap, (sp)->base = NULL, (sp)->size = 0, \
+ (sp)->offset = off)
+
+#define OFF2STR(sp,off) ((sp)->base + (off))
+#define STR2OFF(sp,str) ((str) - (sp)->base)
+#define RETRACT(sp,str) ((sp)->offset = STR2OFF(sp, str))
+
+static JSBool
+SprintAlloc(Sprinter *sp, size_t nb)
+{
+ char *base;
+
+ base = sp->base;
+ if (!base) {
+ JS_ARENA_ALLOCATE_CAST(base, char *, sp->pool, nb);
+ } else {
+ JS_ARENA_GROW_CAST(base, char *, sp->pool, sp->size, nb);
+ }
+ if (!base) {
+ JS_ReportOutOfMemory(sp->context);
+ return JS_FALSE;
+ }
+ sp->base = base;
+ sp->size += nb;
+ return JS_TRUE;
+}
+
+static ptrdiff_t
+SprintPut(Sprinter *sp, const char *s, size_t len)
+{
+ ptrdiff_t nb, offset;
+ char *bp;
+
+ /* Allocate space for s, including the '\0' at the end. */
+ nb = (sp->offset + len + 1) - sp->size;
+ if (nb > 0 && !SprintAlloc(sp, nb))
+ return -1;
+
+ /* Advance offset and copy s into sp's buffer. */
+ offset = sp->offset;
+ sp->offset += len;
+ bp = sp->base + offset;
+ memmove(bp, s, len);
+ bp[len] = 0;
+ return offset;
+}
+
+static ptrdiff_t
+SprintCString(Sprinter *sp, const char *s)
+{
+ return SprintPut(sp, s, strlen(s));
+}
+
+static ptrdiff_t
+Sprint(Sprinter *sp, const char *format, ...)
+{
+ va_list ap;
+ char *bp;
+ ptrdiff_t offset;
+
+ va_start(ap, format);
+ bp = JS_vsmprintf(format, ap); /* XXX vsaprintf */
+ va_end(ap);
+ if (!bp) {
+ JS_ReportOutOfMemory(sp->context);
+ return -1;
+ }
+ offset = SprintCString(sp, bp);
+ free(bp);
+ return offset;
+}
+
+const jschar js_EscapeMap[] = {
+ '\b', 'b',
+ '\f', 'f',
+ '\n', 'n',
+ '\r', 'r',
+ '\t', 't',
+ '\v', 'v',
+ '"', '"',
+ '\'', '\'',
+ '\\', '\\',
+ 0
+};
+
+#define DONT_ESCAPE 0x10000
+
+static char *
+QuoteString(Sprinter *sp, JSString *str, uint32 quote)
+{
+ JSBool dontEscape, ok;
+ jschar qc, c;
+ ptrdiff_t off, len, nb;
+ const jschar *s, *t, *u, *z;
+ char *bp;
+
+ /* Sample off first for later return value pointer computation. */
+ dontEscape = (quote & DONT_ESCAPE) != 0;
+ qc = (jschar) quote;
+ off = sp->offset;
+ if (qc && Sprint(sp, "%c", (char)qc) < 0)
+ return NULL;
+
+ /* Loop control variables: z points at end of string sentinel. */
+ s = JSSTRING_CHARS(str);
+ z = s + JSSTRING_LENGTH(str);
+ for (t = s; t < z; s = ++t) {
+ /* Move t forward from s past un-quote-worthy characters. */
+ c = *t;
+ while (JS_ISPRINT(c) && c != qc && c != '\\' && !(c >> 8)) {
+ c = *++t;
+ if (t == z)
+ break;
+ }
+ len = PTRDIFF(t, s, jschar);
+
+ /* Allocate space for s, including the '\0' at the end. */
+ nb = (sp->offset + len + 1) - sp->size;
+ if (nb > 0 && !SprintAlloc(sp, nb))
+ return NULL;
+
+ /* Advance sp->offset and copy s into sp's buffer. */
+ bp = sp->base + sp->offset;
+ sp->offset += len;
+ while (--len >= 0)
+ *bp++ = (char) *s++;
+ *bp = '\0';
+
+ if (t == z)
+ break;
+
+ /* Use js_EscapeMap, \u, or \x only if necessary. */
+ if ((u = js_strchr(js_EscapeMap, c)) != NULL) {
+ ok = dontEscape
+ ? Sprint(sp, "%c", (char)c) >= 0
+ : Sprint(sp, "\\%c", (char)u[1]) >= 0;
+ } else {
+#ifdef JS_C_STRINGS_ARE_UTF8
+ /* If this is a surrogate pair, make sure to print the pair. */
+ if (c >= 0xD800 && c <= 0xDBFF) {
+ jschar buffer[3];
+ buffer[0] = c;
+ buffer[1] = *++t;
+ buffer[2] = 0;
+ if (t == z) {
+ char numbuf[10];
+ JS_snprintf(numbuf, sizeof numbuf, "0x%x", c);
+ JS_ReportErrorFlagsAndNumber(sp->context, JSREPORT_ERROR,
+ js_GetErrorMessage, NULL,
+ JSMSG_BAD_SURROGATE_CHAR,
+ numbuf);
+ ok = JS_FALSE;
+ break;
+ }
+ ok = Sprint(sp, "%hs", buffer) >= 0;
+ } else {
+ /* Print as UTF-8 string. */
+ ok = Sprint(sp, "%hc", c) >= 0;
+ }
+#else
+ /* Use \uXXXX or \xXX if the string can't be displayed as UTF-8. */
+ ok = Sprint(sp, (c >> 8) ? "\\u%04X" : "\\x%02X", c) >= 0;
+#endif
+ }
+ if (!ok)
+ return NULL;
+ }
+
+ /* Sprint the closing quote and return the quoted string. */
+ if (qc && Sprint(sp, "%c", (char)qc) < 0)
+ return NULL;
+
+ /*
+ * If we haven't Sprint'd anything yet, Sprint an empty string so that
+ * the OFF2STR below gives a valid result.
+ */
+ if (off == sp->offset && Sprint(sp, "") < 0)
+ return NULL;
+ return OFF2STR(sp, off);
+}
+
+JSString *
+js_QuoteString(JSContext *cx, JSString *str, jschar quote)
+{
+ void *mark;
+ Sprinter sprinter;
+ char *bytes;
+ JSString *escstr;
+
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ INIT_SPRINTER(cx, &sprinter, &cx->tempPool, 0);
+ bytes = QuoteString(&sprinter, str, quote);
+ escstr = bytes ? JS_NewStringCopyZ(cx, bytes) : NULL;
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ return escstr;
+}
+
+/************************************************************************/
+
+#if JS_HAS_BLOCK_SCOPE
+typedef enum JSBraceState {
+ ALWAYS_BRACE,
+ MAYBE_BRACE,
+ DONT_BRACE
+} JSBraceState;
+#endif
+
+struct JSPrinter {
+ Sprinter sprinter; /* base class state */
+ JSArenaPool pool; /* string allocation pool */
+ uintN indent; /* indentation in spaces */
+ JSPackedBool pretty; /* pretty-print: indent, use newlines */
+ JSPackedBool grouped; /* in parenthesized expression context */
+ JSScript *script; /* script being printed */
+ jsbytecode *dvgfence; /* js_DecompileValueGenerator fencepost */
+ JSScope *scope; /* script function scope */
+#if JS_HAS_BLOCK_SCOPE
+ JSBraceState braceState; /* remove braces around let declaration */
+ ptrdiff_t spaceOffset; /* -1 or offset of space before maybe-{ */
+#endif
+};
+
+/*
+ * Hack another flag, a la JS_DONT_PRETTY_PRINT, into uintN indent parameters
+ * to functions such as js_DecompileFunction and js_NewPrinter. This time, as
+ * opposed to JS_DONT_PRETTY_PRINT back in the dark ages, we can assume that a
+ * uintN is at least 32 bits.
+ */
+#define JS_IN_GROUP_CONTEXT 0x10000
+
+JSPrinter *
+js_NewPrinter(JSContext *cx, const char *name, uintN indent, JSBool pretty)
+{
+ JSPrinter *jp;
+
+ jp = (JSPrinter *) JS_malloc(cx, sizeof(JSPrinter));
+ if (!jp)
+ return NULL;
+ INIT_SPRINTER(cx, &jp->sprinter, &jp->pool, 0);
+ JS_InitArenaPool(&jp->pool, name, 256, 1);
+ jp->indent = indent & ~JS_IN_GROUP_CONTEXT;
+ jp->pretty = pretty;
+ jp->grouped = (indent & JS_IN_GROUP_CONTEXT) != 0;
+ jp->script = NULL;
+ jp->dvgfence = NULL;
+ jp->scope = NULL;
+#if JS_HAS_BLOCK_SCOPE
+ jp->braceState = ALWAYS_BRACE;
+ jp->spaceOffset = -1;
+#endif
+ return jp;
+}
+
+void
+js_DestroyPrinter(JSPrinter *jp)
+{
+ JS_FinishArenaPool(&jp->pool);
+ JS_free(jp->sprinter.context, jp);
+}
+
+JSString *
+js_GetPrinterOutput(JSPrinter *jp)
+{
+ JSContext *cx;
+ JSString *str;
+
+ cx = jp->sprinter.context;
+ if (!jp->sprinter.base)
+ return cx->runtime->emptyString;
+ str = JS_NewStringCopyZ(cx, jp->sprinter.base);
+ if (!str)
+ return NULL;
+ JS_FreeArenaPool(&jp->pool);
+ INIT_SPRINTER(cx, &jp->sprinter, &jp->pool, 0);
+ return str;
+}
+
+#if !JS_HAS_BLOCK_SCOPE
+# define SET_MAYBE_BRACE(jp) jp
+# define CLEAR_MAYBE_BRACE(jp) jp
+#else
+# define SET_MAYBE_BRACE(jp) ((jp)->braceState = MAYBE_BRACE, (jp))
+# define CLEAR_MAYBE_BRACE(jp) ((jp)->braceState = ALWAYS_BRACE, (jp))
+
+static void
+SetDontBrace(JSPrinter *jp)
+{
+ ptrdiff_t offset;
+ const char *bp;
+
+ /* When not pretty-printing, newline after brace is chopped. */
+ JS_ASSERT(jp->spaceOffset < 0);
+ offset = jp->sprinter.offset - (jp->pretty ? 3 : 2);
+
+ /* The shortest case is "if (x) {". */
+ JS_ASSERT(offset >= 6);
+ bp = jp->sprinter.base;
+ if (bp[offset+0] == ' ' && bp[offset+1] == '{') {
+ JS_ASSERT(!jp->pretty || bp[offset+2] == '\n');
+ jp->spaceOffset = offset;
+ jp->braceState = DONT_BRACE;
+ }
+}
+#endif
+
+int
+js_printf(JSPrinter *jp, const char *format, ...)
+{
+ va_list ap;
+ char *bp, *fp;
+ int cc;
+
+ if (*format == '\0')
+ return 0;
+
+ va_start(ap, format);
+
+ /* If pretty-printing, expand magic tab into a run of jp->indent spaces. */
+ if (*format == '\t') {
+ format++;
+
+#if JS_HAS_BLOCK_SCOPE
+ if (*format == '}' && jp->braceState != ALWAYS_BRACE) {
+ JSBraceState braceState;
+
+ braceState = jp->braceState;
+ jp->braceState = ALWAYS_BRACE;
+ if (braceState == DONT_BRACE) {
+ ptrdiff_t offset, delta, from;
+
+ JS_ASSERT(format[1] == '\n' || format[1] == ' ');
+ offset = jp->spaceOffset;
+ JS_ASSERT(offset >= 6);
+
+ /* Replace " {\n" at the end of jp->sprinter with "\n". */
+ bp = jp->sprinter.base;
+ if (bp[offset+0] == ' ' && bp[offset+1] == '{') {
+ delta = 2;
+ if (jp->pretty) {
+ /* If pretty, we don't have to worry about 'else'. */
+ JS_ASSERT(bp[offset+2] == '\n');
+ } else if (bp[offset-1] != ')') {
+ /* Must keep ' ' to avoid 'dolet' or 'elselet'. */
+ ++offset;
+ delta = 1;
+ }
+
+ from = offset + delta;
+ memmove(bp + offset, bp + from, jp->sprinter.offset - from);
+ jp->sprinter.offset -= delta;
+ jp->spaceOffset = -1;
+
+ format += 2;
+ if (*format == '\0')
+ return 0;
+ }
+ }
+ }
+#endif
+
+ if (jp->pretty && Sprint(&jp->sprinter, "%*s", jp->indent, "") < 0)
+ return -1;
+ }
+
+ /* Suppress newlines (must be once per format, at the end) if not pretty. */
+ fp = NULL;
+ if (!jp->pretty && format[cc = strlen(format) - 1] == '\n') {
+ fp = JS_strdup(jp->sprinter.context, format);
+ if (!fp)
+ return -1;
+ fp[cc] = '\0';
+ format = fp;
+ }
+
+ /* Allocate temp space, convert format, and put. */
+ bp = JS_vsmprintf(format, ap); /* XXX vsaprintf */
+ if (fp) {
+ JS_free(jp->sprinter.context, fp);
+ format = NULL;
+ }
+ if (!bp) {
+ JS_ReportOutOfMemory(jp->sprinter.context);
+ return -1;
+ }
+
+ cc = strlen(bp);
+ if (SprintPut(&jp->sprinter, bp, (size_t)cc) < 0)
+ cc = -1;
+ free(bp);
+
+ va_end(ap);
+ return cc;
+}
+
+JSBool
+js_puts(JSPrinter *jp, const char *s)
+{
+ return SprintCString(&jp->sprinter, s) >= 0;
+}
+
+/************************************************************************/
+
+typedef struct SprintStack {
+ Sprinter sprinter; /* sprinter for postfix to infix buffering */
+ ptrdiff_t *offsets; /* stack of postfix string offsets */
+ jsbytecode *opcodes; /* parallel stack of JS opcodes */
+ uintN top; /* top of stack index */
+ uintN inArrayInit; /* array initialiser/comprehension level */
+ JSPrinter *printer; /* permanent output goes here */
+} SprintStack;
+
+/*
+ * Get a stacked offset from ss->sprinter.base, or if the stacked value |off|
+ * is negative, lazily fetch the generating pc at |spindex = 1 + off| and try
+ * to decompile the code that generated the missing value. This is used when
+ * reporting errors, where the model stack will lack |pcdepth| non-negative
+ * offsets (see js_DecompileValueGenerator and js_DecompileCode).
+ *
+ * If the stacked offset is -1, return 0 to index the NUL padding at the start
+ * of ss->sprinter.base. If this happens, it means there is a decompiler bug
+ * to fix, but it won't violate memory safety.
+ */
+static ptrdiff_t
+GetOff(SprintStack *ss, uintN i)
+{
+ ptrdiff_t off;
+ JSString *str;
+
+ off = ss->offsets[i];
+ if (off < 0) {
+#if defined DEBUG_brendan || defined DEBUG_mrbkap || defined DEBUG_crowder
+ JS_ASSERT(off < -1);
+#endif
+ if (++off == 0) {
+ if (!ss->sprinter.base && SprintPut(&ss->sprinter, "", 0) >= 0)
+ memset(ss->sprinter.base, 0, ss->sprinter.offset);
+ return 0;
+ }
+
+ str = js_DecompileValueGenerator(ss->sprinter.context, off,
+ JSVAL_NULL, NULL);
+ if (!str)
+ return 0;
+ off = SprintCString(&ss->sprinter, JS_GetStringBytes(str));
+ if (off < 0)
+ off = 0;
+ ss->offsets[i] = off;
+ }
+ return off;
+}
+
+static const char *
+GetStr(SprintStack *ss, uintN i)
+{
+ ptrdiff_t off;
+
+ /*
+ * Must call GetOff before using ss->sprinter.base, since it may be null
+ * until bootstrapped by GetOff.
+ */
+ off = GetOff(ss, i);
+ return OFF2STR(&ss->sprinter, off);
+}
+
+/* Gap between stacked strings to allow for insertion of parens and commas. */
+#define PAREN_SLOP (2 + 1)
+
+/*
+ * These pseudo-ops help js_DecompileValueGenerator decompile JSOP_SETNAME,
+ * JSOP_SETPROP, and JSOP_SETELEM, respectively. They are never stored in
+ * bytecode, so they don't preempt valid opcodes.
+ */
+#define JSOP_GETPROP2 256
+#define JSOP_GETELEM2 257
+
+static JSBool
+PushOff(SprintStack *ss, ptrdiff_t off, JSOp op)
+{
+ uintN top;
+
+ if (!SprintAlloc(&ss->sprinter, PAREN_SLOP))
+ return JS_FALSE;
+
+ /* ss->top points to the next free slot; be paranoid about overflow. */
+ top = ss->top;
+ JS_ASSERT(top < ss->printer->script->depth);
+ if (top >= ss->printer->script->depth) {
+ JS_ReportOutOfMemory(ss->sprinter.context);
+ return JS_FALSE;
+ }
+
+ /* The opcodes stack must contain real bytecodes that index js_CodeSpec. */
+ ss->offsets[top] = off;
+ ss->opcodes[top] = (op == JSOP_GETPROP2) ? JSOP_GETPROP
+ : (op == JSOP_GETELEM2) ? JSOP_GETELEM
+ : (jsbytecode) op;
+ ss->top = ++top;
+ memset(OFF2STR(&ss->sprinter, ss->sprinter.offset), 0, PAREN_SLOP);
+ ss->sprinter.offset += PAREN_SLOP;
+ return JS_TRUE;
+}
+
+static ptrdiff_t
+PopOff(SprintStack *ss, JSOp op)
+{
+ uintN top;
+ const JSCodeSpec *cs, *topcs;
+ ptrdiff_t off;
+
+ /* ss->top points to the next free slot; be paranoid about underflow. */
+ top = ss->top;
+ JS_ASSERT(top != 0);
+ if (top == 0)
+ return 0;
+
+ ss->top = --top;
+ off = GetOff(ss, top);
+ topcs = &js_CodeSpec[ss->opcodes[top]];
+ cs = &js_CodeSpec[op];
+ if (topcs->prec != 0 && topcs->prec < cs->prec) {
+ ss->sprinter.offset = ss->offsets[top] = off - 2;
+ off = Sprint(&ss->sprinter, "(%s)", OFF2STR(&ss->sprinter, off));
+ } else {
+ ss->sprinter.offset = off;
+ }
+ return off;
+}
+
+static const char *
+PopStr(SprintStack *ss, JSOp op)
+{
+ ptrdiff_t off;
+
+ off = PopOff(ss, op);
+ return OFF2STR(&ss->sprinter, off);
+}
+
+typedef struct TableEntry {
+ jsval key;
+ ptrdiff_t offset;
+ JSAtom *label;
+ jsint order; /* source order for stable tableswitch sort */
+} TableEntry;
+
+static JSBool
+CompareOffsets(void *arg, const void *v1, const void *v2, int *result)
+{
+ ptrdiff_t offset_diff;
+ const TableEntry *te1 = (const TableEntry *) v1,
+ *te2 = (const TableEntry *) v2;
+
+ offset_diff = te1->offset - te2->offset;
+ *result = (offset_diff == 0 ? te1->order - te2->order
+ : offset_diff < 0 ? -1
+ : 1);
+ return JS_TRUE;
+}
+
+static jsbytecode *
+Decompile(SprintStack *ss, jsbytecode *pc, intN nb);
+
+static JSBool
+DecompileSwitch(SprintStack *ss, TableEntry *table, uintN tableLength,
+ jsbytecode *pc, ptrdiff_t switchLength,
+ ptrdiff_t defaultOffset, JSBool isCondSwitch)
+{
+ JSContext *cx;
+ JSPrinter *jp;
+ ptrdiff_t off, off2, diff, caseExprOff;
+ char *lval, *rval;
+ uintN i;
+ jsval key;
+ JSString *str;
+
+ cx = ss->sprinter.context;
+ jp = ss->printer;
+
+ /* JSOP_CONDSWITCH doesn't pop, unlike JSOP_{LOOKUP,TABLE}SWITCH. */
+ off = isCondSwitch ? GetOff(ss, ss->top-1) : PopOff(ss, JSOP_NOP);
+ lval = OFF2STR(&ss->sprinter, off);
+
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\tswitch (%s) {\n", lval);
+
+ if (tableLength) {
+ diff = table[0].offset - defaultOffset;
+ if (diff > 0) {
+ jp->indent += 2;
+ js_printf(jp, "\t%s:\n", js_default_str);
+ jp->indent += 2;
+ if (!Decompile(ss, pc + defaultOffset, diff))
+ return JS_FALSE;
+ jp->indent -= 4;
+ }
+
+ caseExprOff = isCondSwitch ? JSOP_CONDSWITCH_LENGTH : 0;
+
+ for (i = 0; i < tableLength; i++) {
+ off = table[i].offset;
+ off2 = (i + 1 < tableLength) ? table[i + 1].offset : switchLength;
+
+ key = table[i].key;
+ if (isCondSwitch) {
+ ptrdiff_t nextCaseExprOff;
+
+ /*
+ * key encodes the JSOP_CASE bytecode's offset from switchtop.
+ * The next case expression follows immediately, unless we are
+ * at the last case.
+ */
+ nextCaseExprOff = (ptrdiff_t)JSVAL_TO_INT(key);
+ nextCaseExprOff += js_CodeSpec[pc[nextCaseExprOff]].length;
+ jp->indent += 2;
+ if (!Decompile(ss, pc + caseExprOff,
+ nextCaseExprOff - caseExprOff)) {
+ return JS_FALSE;
+ }
+ caseExprOff = nextCaseExprOff;
+
+ /* Balance the stack as if this JSOP_CASE matched. */
+ --ss->top;
+ } else {
+ /*
+ * key comes from an atom, not the decompiler, so we need to
+ * quote it if it's a string literal. But if table[i].label
+ * is non-null, key was constant-propagated and label is the
+ * name of the const we should show as the case label. We set
+ * key to undefined so this identifier is escaped, if required
+ * by non-ASCII characters, but not quoted, by QuoteString.
+ */
+ if (table[i].label) {
+ str = ATOM_TO_STRING(table[i].label);
+ key = JSVAL_VOID;
+ } else {
+ str = js_ValueToString(cx, key);
+ if (!str)
+ return JS_FALSE;
+ }
+ rval = QuoteString(&ss->sprinter, str,
+ (jschar)(JSVAL_IS_STRING(key) ? '"' : 0));
+ if (!rval)
+ return JS_FALSE;
+ RETRACT(&ss->sprinter, rval);
+ jp->indent += 2;
+ js_printf(jp, "\tcase %s:\n", rval);
+ }
+
+ jp->indent += 2;
+ if (off <= defaultOffset && defaultOffset < off2) {
+ diff = defaultOffset - off;
+ if (diff != 0) {
+ if (!Decompile(ss, pc + off, diff))
+ return JS_FALSE;
+ off = defaultOffset;
+ }
+ jp->indent -= 2;
+ js_printf(jp, "\t%s:\n", js_default_str);
+ jp->indent += 2;
+ }
+ if (!Decompile(ss, pc + off, off2 - off))
+ return JS_FALSE;
+ jp->indent -= 4;
+
+ /* Re-balance as if last JSOP_CASE or JSOP_DEFAULT mismatched. */
+ if (isCondSwitch)
+ ++ss->top;
+ }
+ }
+
+ if (defaultOffset == switchLength) {
+ jp->indent += 2;
+ js_printf(jp, "\t%s:;\n", js_default_str);
+ jp->indent -= 2;
+ }
+ js_printf(jp, "\t}\n");
+
+ /* By the end of a JSOP_CONDSWITCH, the discriminant has been popped. */
+ if (isCondSwitch)
+ --ss->top;
+ return JS_TRUE;
+}
+
+static JSAtom *
+GetSlotAtom(JSPrinter *jp, JSPropertyOp getter, uintN slot)
+{
+ JSScope *scope;
+ JSScopeProperty *sprop;
+ JSObject *obj, *proto;
+
+ scope = jp->scope;
+ while (scope) {
+ for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
+ if (sprop->getter != getter)
+ continue;
+ JS_ASSERT(sprop->flags & SPROP_HAS_SHORTID);
+ JS_ASSERT(JSID_IS_ATOM(sprop->id));
+ if ((uintN) sprop->shortid == slot)
+ return JSID_TO_ATOM(sprop->id);
+ }
+ obj = scope->object;
+ if (!obj)
+ break;
+ proto = OBJ_GET_PROTO(jp->sprinter.context, obj);
+ if (!proto)
+ break;
+ scope = OBJ_SCOPE(proto);
+ }
+ return NULL;
+}
+
+/*
+ * NB: Indexed by SRC_DECL_* defines from jsemit.h.
+ */
+static const char * const var_prefix[] = {"var ", "const ", "let "};
+
+static const char *
+VarPrefix(jssrcnote *sn)
+{
+ if (sn && (SN_TYPE(sn) == SRC_DECL || SN_TYPE(sn) == SRC_GROUPASSIGN)) {
+ ptrdiff_t type = js_GetSrcNoteOffset(sn, 0);
+ if ((uintN)type <= SRC_DECL_LET)
+ return var_prefix[type];
+ }
+ return "";
+}
+#define LOCAL_ASSERT_RV(expr, rv) \
+ JS_BEGIN_MACRO \
+ JS_ASSERT(expr); \
+ if (!(expr)) return (rv); \
+ JS_END_MACRO
+
+const char *
+GetLocal(SprintStack *ss, jsint i)
+{
+ ptrdiff_t off;
+ JSContext *cx;
+ JSScript *script;
+ jsatomid j, n;
+ JSAtom *atom;
+ JSObject *obj;
+ jsint depth, count;
+ JSScopeProperty *sprop;
+ const char *rval;
+
+#define LOCAL_ASSERT(expr) LOCAL_ASSERT_RV(expr, "")
+
+ off = ss->offsets[i];
+ if (off >= 0)
+ return OFF2STR(&ss->sprinter, off);
+
+ /*
+ * We must be called from js_DecompileValueGenerator (via Decompile) when
+ * dereferencing a local that's undefined or null. Search script->atomMap
+ * for the block containing this local by its stack index, i.
+ */
+ cx = ss->sprinter.context;
+ script = ss->printer->script;
+ for (j = 0, n = script->atomMap.length; j < n; j++) {
+ atom = script->atomMap.vector[j];
+ if (ATOM_IS_OBJECT(atom)) {
+ obj = ATOM_TO_OBJECT(atom);
+ if (OBJ_GET_CLASS(cx, obj) == &js_BlockClass) {
+ depth = OBJ_BLOCK_DEPTH(cx, obj);
+ count = OBJ_BLOCK_COUNT(cx, obj);
+ if ((jsuint)(i - depth) < (jsuint)count)
+ break;
+ }
+ }
+ }
+
+ LOCAL_ASSERT(j < n);
+ i -= depth;
+ for (sprop = OBJ_SCOPE(obj)->lastProp; sprop; sprop = sprop->parent) {
+ if (sprop->shortid == i)
+ break;
+ }
+
+ LOCAL_ASSERT(sprop && JSID_IS_ATOM(sprop->id));
+ atom = JSID_TO_ATOM(sprop->id);
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ return rval;
+
+#undef LOCAL_ASSERT
+}
+
+#if JS_HAS_DESTRUCTURING
+
+#define LOCAL_ASSERT(expr) LOCAL_ASSERT_RV(expr, NULL)
+#define LOAD_OP_DATA(pc) (oplen = (cs = &js_CodeSpec[op = *pc])->length)
+
+static jsbytecode *
+DecompileDestructuring(SprintStack *ss, jsbytecode *pc, jsbytecode *endpc);
+
+static jsbytecode *
+DecompileDestructuringLHS(SprintStack *ss, jsbytecode *pc, jsbytecode *endpc,
+ JSBool *hole)
+{
+ JSContext *cx;
+ JSPrinter *jp;
+ JSOp op;
+ const JSCodeSpec *cs;
+ uintN oplen, i;
+ const char *lval, *xval;
+ ptrdiff_t todo;
+ JSAtom *atom;
+
+ *hole = JS_FALSE;
+ cx = ss->sprinter.context;
+ jp = ss->printer;
+ LOAD_OP_DATA(pc);
+
+ switch (op) {
+ case JSOP_POP:
+ *hole = JS_TRUE;
+ todo = SprintPut(&ss->sprinter, ", ", 2);
+ break;
+
+ case JSOP_DUP:
+ pc = DecompileDestructuring(ss, pc, endpc);
+ if (!pc)
+ return NULL;
+ if (pc == endpc)
+ return pc;
+ LOAD_OP_DATA(pc);
+ lval = PopStr(ss, JSOP_NOP);
+ todo = SprintCString(&ss->sprinter, lval);
+ if (op == JSOP_SETSP)
+ return pc;
+ LOCAL_ASSERT(*pc == JSOP_POP);
+ break;
+
+ case JSOP_SETARG:
+ case JSOP_SETVAR:
+ case JSOP_SETGVAR:
+ case JSOP_SETLOCAL:
+ LOCAL_ASSERT(pc[oplen] == JSOP_POP || pc[oplen] == JSOP_SETSP);
+ /* FALL THROUGH */
+
+ case JSOP_SETLOCALPOP:
+ i = GET_UINT16(pc);
+ atom = NULL;
+ lval = NULL;
+ if (op == JSOP_SETARG)
+ atom = GetSlotAtom(jp, js_GetArgument, i);
+ else if (op == JSOP_SETVAR)
+ atom = GetSlotAtom(jp, js_GetLocalVariable, i);
+ else if (op == JSOP_SETGVAR)
+ atom = GET_ATOM(cx, jp->script, pc);
+ else
+ lval = GetLocal(ss, i);
+ if (atom)
+ lval = js_AtomToPrintableString(cx, atom);
+ LOCAL_ASSERT(lval);
+ todo = SprintCString(&ss->sprinter, lval);
+ if (op != JSOP_SETLOCALPOP) {
+ pc += oplen;
+ if (pc == endpc)
+ return pc;
+ LOAD_OP_DATA(pc);
+ if (op == JSOP_SETSP)
+ return pc;
+ LOCAL_ASSERT(op == JSOP_POP);
+ }
+ break;
+
+ default:
+ /*
+ * We may need to auto-parenthesize the left-most value decompiled
+ * here, so add back PAREN_SLOP temporarily. Then decompile until the
+ * opcode that would reduce the stack depth to (ss->top-1), which we
+ * pass to Decompile encoded as -(ss->top-1) - 1 or just -ss->top for
+ * the nb parameter.
+ */
+ todo = ss->sprinter.offset;
+ ss->sprinter.offset = todo + PAREN_SLOP;
+ pc = Decompile(ss, pc, -ss->top);
+ if (!pc)
+ return NULL;
+ if (pc == endpc)
+ return pc;
+ LOAD_OP_DATA(pc);
+ LOCAL_ASSERT(op == JSOP_ENUMELEM || op == JSOP_ENUMCONSTELEM);
+ xval = PopStr(ss, JSOP_NOP);
+ lval = PopStr(ss, JSOP_GETPROP);
+ ss->sprinter.offset = todo;
+ if (*lval == '\0') {
+ /* lval is from JSOP_BINDNAME, so just print xval. */
+ todo = SprintCString(&ss->sprinter, xval);
+ } else if (*xval == '\0') {
+ /* xval is from JSOP_SETCALL or JSOP_BINDXMLNAME, print lval. */
+ todo = SprintCString(&ss->sprinter, lval);
+ } else {
+ todo = Sprint(&ss->sprinter,
+ (js_CodeSpec[ss->opcodes[ss->top+1]].format
+ & JOF_XMLNAME)
+ ? "%s.%s"
+ : "%s[%s]",
+ lval, xval);
+ }
+ break;
+ }
+
+ if (todo < 0)
+ return NULL;
+
+ LOCAL_ASSERT(pc < endpc);
+ pc += oplen;
+ return pc;
+}
+
+/*
+ * Starting with a SRC_DESTRUCT-annotated JSOP_DUP, decompile a destructuring
+ * left-hand side object or array initialiser, including nested destructuring
+ * initialisers. On successful return, the decompilation will be pushed on ss
+ * and the return value will point to the POP or GROUP bytecode following the
+ * destructuring expression.
+ *
+ * At any point, if pc is equal to endpc and would otherwise advance, we stop
+ * immediately and return endpc.
+ */
+static jsbytecode *
+DecompileDestructuring(SprintStack *ss, jsbytecode *pc, jsbytecode *endpc)
+{
+ ptrdiff_t head, todo;
+ JSContext *cx;
+ JSPrinter *jp;
+ JSOp op, saveop;
+ const JSCodeSpec *cs;
+ uintN oplen;
+ jsint i, lasti;
+ jsdouble d;
+ const char *lval;
+ jsbytecode *pc2;
+ jsatomid atomIndex;
+ JSAtom *atom;
+ jssrcnote *sn;
+ JSString *str;
+ JSBool hole;
+
+ LOCAL_ASSERT(*pc == JSOP_DUP);
+ pc += JSOP_DUP_LENGTH;
+
+ /*
+ * Set head so we can rewrite '[' to '{' as needed. Back up PAREN_SLOP
+ * chars so the destructuring decompilation accumulates contiguously in
+ * ss->sprinter starting with "[".
+ */
+ head = SprintPut(&ss->sprinter, "[", 1);
+ if (head < 0 || !PushOff(ss, head, JSOP_NOP))
+ return NULL;
+ ss->sprinter.offset -= PAREN_SLOP;
+ LOCAL_ASSERT(head == ss->sprinter.offset - 1);
+ LOCAL_ASSERT(*OFF2STR(&ss->sprinter, head) == '[');
+
+ cx = ss->sprinter.context;
+ jp = ss->printer;
+ lasti = -1;
+
+ while (pc < endpc) {
+ LOAD_OP_DATA(pc);
+ saveop = op;
+
+ switch (op) {
+ case JSOP_POP:
+ pc += oplen;
+ goto out;
+
+ /* Handle the optimized number-pushing opcodes. */
+ case JSOP_ZERO: d = i = 0; goto do_getelem;
+ case JSOP_ONE: d = i = 1; goto do_getelem;
+ case JSOP_UINT16: d = i = GET_UINT16(pc); goto do_getelem;
+ case JSOP_UINT24: d = i = GET_UINT24(pc); goto do_getelem;
+
+ /* Handle the extended literal form of JSOP_NUMBER. */
+ case JSOP_LITOPX:
+ atomIndex = GET_LITERAL_INDEX(pc);
+ pc2 = pc + 1 + LITERAL_INDEX_LEN;
+ op = *pc2;
+ LOCAL_ASSERT(op == JSOP_NUMBER);
+ goto do_getatom;
+
+ case JSOP_NUMBER:
+ atomIndex = GET_ATOM_INDEX(pc);
+
+ do_getatom:
+ atom = js_GetAtom(cx, &jp->script->atomMap, atomIndex);
+ d = *ATOM_TO_DOUBLE(atom);
+ LOCAL_ASSERT(JSDOUBLE_IS_FINITE(d) && !JSDOUBLE_IS_NEGZERO(d));
+ i = (jsint)d;
+
+ do_getelem:
+ sn = js_GetSrcNote(jp->script, pc);
+ pc += oplen;
+ if (pc == endpc)
+ return pc;
+ LOAD_OP_DATA(pc);
+ LOCAL_ASSERT(op == JSOP_GETELEM);
+
+ /* Distinguish object from array by opcode or source note. */
+ if (saveop == JSOP_LITERAL ||
+ (sn && SN_TYPE(sn) == SRC_INITPROP)) {
+ *OFF2STR(&ss->sprinter, head) = '{';
+ if (Sprint(&ss->sprinter, "%g: ", d) < 0)
+ return NULL;
+ } else {
+ /* Sanity check for the gnarly control flow above. */
+ LOCAL_ASSERT(i == d);
+
+ /* Fill in any holes (holes at the end don't matter). */
+ while (++lasti < i) {
+ if (SprintPut(&ss->sprinter, ", ", 2) < 0)
+ return NULL;
+ }
+ }
+ break;
+
+ case JSOP_LITERAL:
+ atomIndex = GET_LITERAL_INDEX(pc);
+ goto do_getatom;
+
+ case JSOP_GETPROP:
+ *OFF2STR(&ss->sprinter, head) = '{';
+ atom = GET_ATOM(cx, jp->script, pc);
+ str = ATOM_TO_STRING(atom);
+ if (!QuoteString(&ss->sprinter, str,
+ js_IsIdentifier(str) ? 0 : (jschar)'\'')) {
+ return NULL;
+ }
+ if (SprintPut(&ss->sprinter, ": ", 2) < 0)
+ return NULL;
+ break;
+
+ default:
+ LOCAL_ASSERT(0);
+ }
+
+ pc += oplen;
+ if (pc == endpc)
+ return pc;
+
+ /*
+ * Decompile the left-hand side expression whose bytecode starts at pc
+ * and continues for a bounded number of bytecodes or stack operations
+ * (and which in any event stops before endpc).
+ */
+ pc = DecompileDestructuringLHS(ss, pc, endpc, &hole);
+ if (!pc)
+ return NULL;
+ if (pc == endpc || *pc != JSOP_DUP)
+ break;
+
+ /*
+ * Check for SRC_DESTRUCT on this JSOP_DUP, which would mean another
+ * destructuring initialiser abuts this one, and we should stop. This
+ * happens with source of the form '[a] = [b] = c'.
+ */
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn && SN_TYPE(sn) == SRC_DESTRUCT)
+ break;
+
+ if (!hole && SprintPut(&ss->sprinter, ", ", 2) < 0)
+ return NULL;
+
+ pc += JSOP_DUP_LENGTH;
+ }
+
+out:
+ lval = OFF2STR(&ss->sprinter, head);
+ todo = SprintPut(&ss->sprinter, (*lval == '[') ? "]" : "}", 1);
+ if (todo < 0)
+ return NULL;
+ return pc;
+}
+
+static jsbytecode *
+DecompileGroupAssignment(SprintStack *ss, jsbytecode *pc, jsbytecode *endpc,
+ jssrcnote *sn, ptrdiff_t *todop)
+{
+ JSOp op;
+ const JSCodeSpec *cs;
+ uintN oplen, start, end, i;
+ ptrdiff_t todo;
+ JSBool hole;
+ const char *rval;
+
+ LOAD_OP_DATA(pc);
+ LOCAL_ASSERT(op == JSOP_PUSH || op == JSOP_GETLOCAL);
+
+ todo = Sprint(&ss->sprinter, "%s[", VarPrefix(sn));
+ if (todo < 0 || !PushOff(ss, todo, JSOP_NOP))
+ return NULL;
+ ss->sprinter.offset -= PAREN_SLOP;
+
+ for (;;) {
+ pc += oplen;
+ if (pc == endpc)
+ return pc;
+ pc = DecompileDestructuringLHS(ss, pc, endpc, &hole);
+ if (!pc)
+ return NULL;
+ if (pc == endpc)
+ return pc;
+ LOAD_OP_DATA(pc);
+ if (op != JSOP_PUSH && op != JSOP_GETLOCAL)
+ break;
+ if (!hole && SprintPut(&ss->sprinter, ", ", 2) < 0)
+ return NULL;
+ }
+
+ LOCAL_ASSERT(op == JSOP_SETSP);
+ if (SprintPut(&ss->sprinter, "] = [", 5) < 0)
+ return NULL;
+
+ start = GET_UINT16(pc);
+ end = ss->top - 1;
+ for (i = start; i < end; i++) {
+ rval = GetStr(ss, i);
+ if (Sprint(&ss->sprinter, "%s%s",
+ (i == start) ? "" : ", ",
+ (i == end - 1 && *rval == '\0') ? ", " : rval) < 0) {
+ return NULL;
+ }
+ }
+
+ if (SprintPut(&ss->sprinter, "]", 1) < 0)
+ return NULL;
+ ss->sprinter.offset = ss->offsets[i];
+ ss->top = start;
+ *todop = todo;
+ return pc;
+}
+
+#undef LOCAL_ASSERT
+#undef LOAD_OP_DATA
+
+#endif /* JS_HAS_DESTRUCTURING */
+
+/*
+ * If nb is non-negative, decompile nb bytecodes starting at pc. Otherwise
+ * the decompiler starts at pc and continues until it reaches an opcode for
+ * which decompiling would result in the stack depth equaling -(nb + 1).
+ */
+static jsbytecode *
+Decompile(SprintStack *ss, jsbytecode *pc, intN nb)
+{
+ JSContext *cx;
+ JSPrinter *jp, *jp2;
+ jsbytecode *startpc, *endpc, *pc2, *done, *forelem_tail, *forelem_done;
+ ptrdiff_t tail, todo, len, oplen, cond, next;
+ JSOp op, lastop, saveop;
+ const JSCodeSpec *cs;
+ jssrcnote *sn, *sn2;
+ const char *lval, *rval, *xval, *fmt;
+ jsint i, argc;
+ char **argv;
+ jsatomid atomIndex;
+ JSAtom *atom;
+ JSObject *obj;
+ JSFunction *fun;
+ JSString *str;
+ JSBool ok;
+#if JS_HAS_XML_SUPPORT
+ JSBool foreach, inXML, quoteAttr;
+#else
+#define inXML JS_FALSE
+#endif
+ jsval val;
+ int stackDummy;
+
+ static const char exception_cookie[] = "/*EXCEPTION*/";
+ static const char retsub_pc_cookie[] = "/*RETSUB_PC*/";
+ static const char forelem_cookie[] = "/*FORELEM*/";
+ static const char with_cookie[] = "/*WITH*/";
+ static const char dot_format[] = "%s.%s";
+ static const char index_format[] = "%s[%s]";
+ static const char predot_format[] = "%s%s.%s";
+ static const char postdot_format[] = "%s.%s%s";
+ static const char preindex_format[] = "%s%s[%s]";
+ static const char postindex_format[] = "%s[%s]%s";
+ static const char ss_format[] = "%s%s";
+
+/*
+ * Local macros
+ */
+#define DECOMPILE_CODE(pc,nb) if (!Decompile(ss, pc, nb)) return NULL
+#define POP_STR() PopStr(ss, op)
+#define LOCAL_ASSERT(expr) LOCAL_ASSERT_RV(expr, JS_FALSE)
+
+/*
+ * Callers know that ATOM_IS_STRING(atom), and we leave it to the optimizer to
+ * common ATOM_TO_STRING(atom) here and near the call sites.
+ */
+#define ATOM_IS_IDENTIFIER(atom) js_IsIdentifier(ATOM_TO_STRING(atom))
+#define ATOM_IS_KEYWORD(atom) \
+ (js_CheckKeyword(JSSTRING_CHARS(ATOM_TO_STRING(atom)), \
+ JSSTRING_LENGTH(ATOM_TO_STRING(atom))) != TOK_EOF)
+
+/*
+ * Given an atom already fetched from jp->script's atom map, quote/escape its
+ * string appropriately into rval, and select fmt from the quoted and unquoted
+ * alternatives.
+ */
+#define GET_QUOTE_AND_FMT(qfmt, ufmt, rval) \
+ JS_BEGIN_MACRO \
+ jschar quote_; \
+ if (!ATOM_IS_IDENTIFIER(atom)) { \
+ quote_ = '\''; \
+ fmt = qfmt; \
+ } else { \
+ quote_ = 0; \
+ fmt = ufmt; \
+ } \
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), quote_); \
+ if (!rval) \
+ return NULL; \
+ JS_END_MACRO
+
+/*
+ * Get atom from jp->script's atom map, quote/escape its string appropriately
+ * into rval, and select fmt from the quoted and unquoted alternatives.
+ */
+#define GET_ATOM_QUOTE_AND_FMT(qfmt, ufmt, rval) \
+ JS_BEGIN_MACRO \
+ atom = GET_ATOM(cx, jp->script, pc); \
+ GET_QUOTE_AND_FMT(qfmt, ufmt, rval); \
+ JS_END_MACRO
+
+ cx = ss->sprinter.context;
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ return NULL;
+ }
+
+ jp = ss->printer;
+ startpc = pc;
+ endpc = (nb < 0) ? jp->script->code + jp->script->length : pc + nb;
+ forelem_tail = forelem_done = NULL;
+ tail = -1;
+ todo = -2; /* NB: different from Sprint() error return. */
+ saveop = JSOP_NOP;
+ sn = NULL;
+ rval = NULL;
+#if JS_HAS_XML_SUPPORT
+ foreach = inXML = quoteAttr = JS_FALSE;
+#endif
+
+ while (nb < 0 || pc < endpc) {
+ /*
+ * Move saveop to lastop so prefixed bytecodes can take special action
+ * while sharing maximal code. Set op and saveop to the new bytecode,
+ * use op in POP_STR to trigger automatic parenthesization, but push
+ * saveop at the bottom of the loop if this op pushes. Thus op may be
+ * set to nop or otherwise mutated to suppress auto-parens.
+ */
+ lastop = saveop;
+ op = saveop = (JSOp) *pc;
+ cs = &js_CodeSpec[saveop];
+ len = oplen = cs->length;
+
+ if (nb < 0 && -(nb + 1) == (intN)ss->top - cs->nuses + cs->ndefs)
+ return pc;
+
+ if (pc + oplen == jp->dvgfence) {
+ JSStackFrame *fp;
+ uint32 format, mode, type;
+
+ /*
+ * Rewrite non-get ops to their "get" format if the error is in
+ * the bytecode at pc, so we don't decompile more than the error
+ * expression.
+ */
+ for (fp = cx->fp; fp && !fp->script; fp = fp->down)
+ continue;
+ format = cs->format;
+ if (((fp && pc == fp->pc) ||
+ (pc == startpc && cs->nuses != 0)) &&
+ format & (JOF_SET|JOF_DEL|JOF_INCDEC|JOF_IMPORT|JOF_FOR)) {
+ mode = (format & JOF_MODEMASK);
+ if (mode == JOF_NAME) {
+ /*
+ * JOF_NAME does not imply JOF_CONST, so we must check for
+ * the QARG and QVAR format types, and translate those to
+ * JSOP_GETARG or JSOP_GETVAR appropriately, instead of to
+ * JSOP_NAME.
+ */
+ type = format & JOF_TYPEMASK;
+ op = (type == JOF_QARG)
+ ? JSOP_GETARG
+ : (type == JOF_QVAR)
+ ? JSOP_GETVAR
+ : (type == JOF_LOCAL)
+ ? JSOP_GETLOCAL
+ : JSOP_NAME;
+
+ i = cs->nuses - js_CodeSpec[op].nuses;
+ while (--i >= 0)
+ PopOff(ss, JSOP_NOP);
+ } else {
+ /*
+ * We must replace the faulting pc's bytecode with a
+ * corresponding JSOP_GET* code. For JSOP_SET{PROP,ELEM},
+ * we must use the "2nd" form of JSOP_GET{PROP,ELEM}, to
+ * throw away the assignment op's right-hand operand and
+ * decompile it as if it were a GET of its left-hand
+ * operand.
+ */
+ if (mode == JOF_PROP) {
+ op = (format & JOF_SET) ? JSOP_GETPROP2 : JSOP_GETPROP;
+ } else if (mode == JOF_ELEM) {
+ op = (format & JOF_SET) ? JSOP_GETELEM2 : JSOP_GETELEM;
+ } else {
+ /*
+ * Zero mode means precisely that op is uncategorized
+ * for our purposes, so we must write per-op special
+ * case code here.
+ */
+ switch (op) {
+ case JSOP_ENUMELEM:
+ case JSOP_ENUMCONSTELEM:
+ op = JSOP_GETELEM;
+ break;
+#if JS_HAS_LVALUE_RETURN
+ case JSOP_SETCALL:
+ op = JSOP_CALL;
+ break;
+#endif
+ default:
+ LOCAL_ASSERT(0);
+ }
+ }
+ }
+ }
+
+ saveop = op;
+ if (op >= JSOP_LIMIT) {
+ switch (op) {
+ case JSOP_GETPROP2:
+ saveop = JSOP_GETPROP;
+ break;
+ case JSOP_GETELEM2:
+ saveop = JSOP_GETELEM;
+ break;
+ default:;
+ }
+ }
+ LOCAL_ASSERT(js_CodeSpec[saveop].length == oplen);
+
+ jp->dvgfence = NULL;
+ }
+
+ if (cs->token) {
+ switch (cs->nuses) {
+ case 2:
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn && SN_TYPE(sn) == SRC_ASSIGNOP) {
+ /*
+ * Avoid over-parenthesizing y in x op= y based on its
+ * expansion: x = x op y (replace y by z = w to see the
+ * problem).
+ */
+ op = pc[oplen];
+ LOCAL_ASSERT(op != saveop);
+ }
+ rval = POP_STR();
+ lval = POP_STR();
+ if (op != saveop) {
+ /* Print only the right operand of the assignment-op. */
+ todo = SprintCString(&ss->sprinter, rval);
+ op = saveop;
+ } else if (!inXML) {
+ todo = Sprint(&ss->sprinter, "%s %s %s",
+ lval, cs->token, rval);
+ } else {
+ /* In XML, just concatenate the two operands. */
+ LOCAL_ASSERT(op == JSOP_ADD);
+ todo = Sprint(&ss->sprinter, ss_format, lval, rval);
+ }
+ break;
+
+ case 1:
+ rval = POP_STR();
+ todo = Sprint(&ss->sprinter, ss_format, cs->token, rval);
+ break;
+
+ case 0:
+ todo = SprintCString(&ss->sprinter, cs->token);
+ break;
+
+ default:
+ todo = -2;
+ break;
+ }
+ } else {
+ switch (op) {
+#define BEGIN_LITOPX_CASE(OP) \
+ case OP: \
+ atomIndex = GET_ATOM_INDEX(pc); \
+ do_##OP: \
+ atom = js_GetAtom(cx, &jp->script->atomMap, atomIndex);
+
+#define END_LITOPX_CASE \
+ break;
+
+ case JSOP_NOP:
+ /*
+ * Check for a do-while loop, a for-loop with an empty
+ * initializer part, a labeled statement, a function
+ * definition, or try/finally.
+ */
+ sn = js_GetSrcNote(jp->script, pc);
+ todo = -2;
+ switch (sn ? SN_TYPE(sn) : SRC_NULL) {
+ case SRC_WHILE:
+ js_printf(SET_MAYBE_BRACE(jp), "\tdo {\n");
+ jp->indent += 4;
+ break;
+
+ case SRC_FOR:
+ rval = "";
+
+ do_forloop:
+ /* Skip the JSOP_NOP or JSOP_POP bytecode. */
+ pc++;
+
+ /* Get the cond, next, and loop-closing tail offsets. */
+ cond = js_GetSrcNoteOffset(sn, 0);
+ next = js_GetSrcNoteOffset(sn, 1);
+ tail = js_GetSrcNoteOffset(sn, 2);
+ LOCAL_ASSERT(tail + GetJumpOffset(pc+tail, pc+tail) == 0);
+
+ /* Print the keyword and the possibly empty init-part. */
+ js_printf(jp, "\tfor (%s;", rval);
+
+ if (pc[cond] == JSOP_IFEQ || pc[cond] == JSOP_IFEQX) {
+ /* Decompile the loop condition. */
+ DECOMPILE_CODE(pc, cond);
+ js_printf(jp, " %s", POP_STR());
+ }
+
+ /* Need a semicolon whether or not there was a cond. */
+ js_puts(jp, ";");
+
+ if (pc[next] != JSOP_GOTO && pc[next] != JSOP_GOTOX) {
+ /* Decompile the loop updater. */
+ DECOMPILE_CODE(pc + next, tail - next - 1);
+ js_printf(jp, " %s", POP_STR());
+ }
+
+ /* Do the loop body. */
+ js_printf(SET_MAYBE_BRACE(jp), ") {\n");
+ jp->indent += 4;
+ oplen = (cond) ? js_CodeSpec[pc[cond]].length : 0;
+ DECOMPILE_CODE(pc + cond + oplen, next - cond - oplen);
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+
+ /* Set len so pc skips over the entire loop. */
+ len = tail + js_CodeSpec[pc[tail]].length;
+ break;
+
+ case SRC_LABEL:
+ atom = js_GetAtom(cx, &jp->script->atomMap,
+ (jsatomid) js_GetSrcNoteOffset(sn, 0));
+ jp->indent -= 4;
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\t%s:\n", rval);
+ jp->indent += 4;
+ break;
+
+ case SRC_LABELBRACE:
+ atom = js_GetAtom(cx, &jp->script->atomMap,
+ (jsatomid) js_GetSrcNoteOffset(sn, 0));
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\t%s: {\n", rval);
+ jp->indent += 4;
+ break;
+
+ case SRC_ENDBRACE:
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ break;
+
+ case SRC_FUNCDEF:
+ atom = js_GetAtom(cx, &jp->script->atomMap,
+ (jsatomid) js_GetSrcNoteOffset(sn, 0));
+ LOCAL_ASSERT(ATOM_IS_OBJECT(atom));
+ do_function:
+ obj = ATOM_TO_OBJECT(atom);
+ fun = (JSFunction *) JS_GetPrivate(cx, obj);
+ jp2 = js_NewPrinter(cx, JS_GetFunctionName(fun),
+ jp->indent, jp->pretty);
+ if (!jp2)
+ return NULL;
+ jp2->scope = jp->scope;
+ js_puts(jp2, "\n");
+ ok = js_DecompileFunction(jp2, fun);
+ if (ok) {
+ js_puts(jp2, "\n");
+ str = js_GetPrinterOutput(jp2);
+ if (str)
+ js_printf(jp, "%s\n", JS_GetStringBytes(str));
+ else
+ ok = JS_FALSE;
+ }
+ js_DestroyPrinter(jp2);
+ if (!ok)
+ return NULL;
+
+ break;
+
+ case SRC_BRACE:
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\t{\n");
+ jp->indent += 4;
+ len = js_GetSrcNoteOffset(sn, 0);
+ DECOMPILE_CODE(pc + oplen, len - oplen);
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ break;
+
+ default:;
+ }
+ break;
+
+ case JSOP_GROUP:
+ cs = &js_CodeSpec[lastop];
+ if ((cs->prec != 0 &&
+ cs->prec == js_CodeSpec[pc[JSOP_GROUP_LENGTH]].prec) ||
+ pc[JSOP_GROUP_LENGTH] == JSOP_PUSHOBJ ||
+ pc[JSOP_GROUP_LENGTH] == JSOP_DUP) {
+ /*
+ * Force parens if this JSOP_GROUP forced re-association
+ * against precedence, or if this is a call or constructor
+ * expression, or if it is destructured (JSOP_DUP).
+ *
+ * This is necessary to handle the operator new grammar,
+ * by which new x(y).z means (new x(y))).z. For example
+ * new (x(y).z) must decompile with the constructor
+ * parenthesized, but normal precedence has JSOP_GETPROP
+ * (for the final .z) higher than JSOP_NEW. In general,
+ * if the call or constructor expression is parenthesized,
+ * we preserve parens.
+ */
+ op = JSOP_NAME;
+ rval = POP_STR();
+ todo = SprintCString(&ss->sprinter, rval);
+ } else {
+ /*
+ * Don't explicitly parenthesize -- just fix the top
+ * opcode so that the auto-parens magic in PopOff can do
+ * its thing.
+ */
+ LOCAL_ASSERT(ss->top != 0);
+ ss->opcodes[ss->top-1] = saveop = lastop;
+ todo = -2;
+ }
+ break;
+
+ case JSOP_STARTITER:
+ todo = -2;
+ break;
+
+ case JSOP_PUSH:
+#if JS_HAS_DESTRUCTURING
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn && SN_TYPE(sn) == SRC_GROUPASSIGN) {
+ pc = DecompileGroupAssignment(ss, pc, endpc, sn, &todo);
+ if (!pc)
+ return NULL;
+ LOCAL_ASSERT(*pc == JSOP_SETSP);
+ len = oplen = JSOP_SETSP_LENGTH;
+ goto end_groupassignment;
+ }
+#endif
+ /* FALL THROUGH */
+
+ case JSOP_PUSHOBJ:
+ case JSOP_BINDNAME:
+ do_JSOP_BINDNAME:
+ todo = Sprint(&ss->sprinter, "");
+ break;
+
+ case JSOP_TRY:
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\ttry {\n");
+ jp->indent += 4;
+ todo = -2;
+ break;
+
+ case JSOP_FINALLY:
+ jp->indent -= 4;
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\t} finally {\n");
+ jp->indent += 4;
+
+ /*
+ * We must push an empty string placeholder for gosub's return
+ * address, popped by JSOP_RETSUB and counted by script->depth
+ * but not by ss->top (see JSOP_SETSP, below).
+ */
+ todo = Sprint(&ss->sprinter, exception_cookie);
+ if (todo < 0 || !PushOff(ss, todo, op))
+ return NULL;
+ todo = Sprint(&ss->sprinter, retsub_pc_cookie);
+ break;
+
+ case JSOP_RETSUB:
+ rval = POP_STR();
+ LOCAL_ASSERT(strcmp(rval, retsub_pc_cookie) == 0);
+ lval = POP_STR();
+ LOCAL_ASSERT(strcmp(lval, exception_cookie) == 0);
+ todo = -2;
+ break;
+
+ case JSOP_SWAP:
+ /*
+ * We don't generate this opcode currently, and previously we
+ * did not need to decompile it. If old, serialized bytecode
+ * uses it still, we should fall through and set todo = -2.
+ */
+ /* FALL THROUGH */
+
+ case JSOP_GOSUB:
+ case JSOP_GOSUBX:
+ /*
+ * JSOP_GOSUB and GOSUBX have no effect on the decompiler's
+ * string stack because the next op in bytecode order finds
+ * the stack balanced by a JSOP_RETSUB executed elsewhere.
+ */
+ todo = -2;
+ break;
+
+ case JSOP_SETSP:
+ {
+ uintN newtop, oldtop, i;
+
+ /*
+ * The compiler models operand stack depth and fixes the stack
+ * pointer on entry to a catch clause based on its depth model.
+ * The decompiler must match the code generator's model, which
+ * is why JSOP_FINALLY pushes a cookie that JSOP_RETSUB pops.
+ */
+ newtop = (uintN) GET_UINT16(pc);
+ oldtop = ss->top;
+ LOCAL_ASSERT(newtop <= oldtop);
+ todo = -2;
+
+#if JS_HAS_DESTRUCTURING
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn && SN_TYPE(sn) == SRC_GROUPASSIGN) {
+ todo = Sprint(&ss->sprinter, "%s[] = [",
+ VarPrefix(sn));
+ if (todo < 0)
+ return NULL;
+ for (i = newtop; i < oldtop; i++) {
+ rval = OFF2STR(&ss->sprinter, ss->offsets[i]);
+ if (Sprint(&ss->sprinter, ss_format,
+ (i == newtop) ? "" : ", ",
+ (i == oldtop - 1 && *rval == '\0')
+ ? ", " : rval) < 0) {
+ return NULL;
+ }
+ }
+ if (SprintPut(&ss->sprinter, "]", 1) < 0)
+ return NULL;
+
+ /*
+ * Kill newtop before the end_groupassignment: label by
+ * retracting/popping early. Control will either jump to
+ * do_forloop: or do_letheadbody: or else break from our
+ * case JSOP_SETSP: after the switch (*pc2) below.
+ */
+ if (newtop < oldtop) {
+ ss->sprinter.offset = GetOff(ss, newtop);
+ ss->top = newtop;
+ }
+
+ end_groupassignment:
+ /*
+ * Thread directly to the next opcode if we can, to handle
+ * the special cases of a group assignment in the first or
+ * last part of a for(;;) loop head, or in a let block or
+ * expression head.
+ *
+ * NB: todo at this point indexes space in ss->sprinter
+ * that is liable to be overwritten. The code below knows
+ * exactly how long rval lives, or else copies it down via
+ * SprintCString.
+ */
+ rval = OFF2STR(&ss->sprinter, todo);
+ todo = -2;
+ pc2 = pc + oplen;
+ switch (*pc2) {
+ case JSOP_NOP:
+ /* First part of for(;;) or let block/expr head. */
+ sn = js_GetSrcNote(jp->script, pc2);
+ if (sn) {
+ if (SN_TYPE(sn) == SRC_FOR) {
+ pc = pc2;
+ goto do_forloop;
+ }
+ if (SN_TYPE(sn) == SRC_DECL) {
+ if (ss->top == jp->script->depth) {
+ /*
+ * This must be an empty destructuring
+ * in the head of a let whose body block
+ * is also empty.
+ */
+ pc = pc2 + 1;
+ len = js_GetSrcNoteOffset(sn, 0);
+ LOCAL_ASSERT(pc[len] == JSOP_LEAVEBLOCK);
+ js_printf(jp, "\tlet (%s) {\n", rval);
+ js_printf(jp, "\t}\n");
+ goto end_setsp;
+ }
+ todo = SprintCString(&ss->sprinter, rval);
+ if (todo < 0 || !PushOff(ss, todo, JSOP_NOP))
+ return NULL;
+ op = JSOP_POP;
+ pc = pc2 + 1;
+ goto do_letheadbody;
+ }
+ }
+ break;
+
+ case JSOP_GOTO:
+ case JSOP_GOTOX:
+ /* Third part of for(;;) loop head. */
+ cond = GetJumpOffset(pc2, pc2);
+ sn = js_GetSrcNote(jp->script, pc2 + cond - 1);
+ if (sn && SN_TYPE(sn) == SRC_FOR) {
+ todo = SprintCString(&ss->sprinter, rval);
+ saveop = JSOP_NOP;
+ }
+ break;
+ }
+
+ /*
+ * If control flow reaches this point with todo still -2,
+ * just print rval as an expression statement.
+ */
+ if (todo == -2)
+ js_printf(jp, "\t%s;\n", rval);
+ end_setsp:
+ break;
+ }
+#endif
+ if (newtop < oldtop) {
+ ss->sprinter.offset = GetOff(ss, newtop);
+ ss->top = newtop;
+ }
+ break;
+ }
+
+ case JSOP_EXCEPTION:
+ /* The catch decompiler handles this op itself. */
+ LOCAL_ASSERT(JS_FALSE);
+ break;
+
+ case JSOP_POP:
+ /*
+ * By default, do not automatically parenthesize when popping
+ * a stacked expression decompilation. We auto-parenthesize
+ * only when JSOP_POP is annotated with SRC_PCDELTA, meaning
+ * comma operator.
+ */
+ op = JSOP_POPV;
+ /* FALL THROUGH */
+
+ case JSOP_POPV:
+ sn = js_GetSrcNote(jp->script, pc);
+ switch (sn ? SN_TYPE(sn) : SRC_NULL) {
+ case SRC_FOR:
+ /* Force parens around 'in' expression at 'for' front. */
+ if (ss->opcodes[ss->top-1] == JSOP_IN)
+ op = JSOP_LSH;
+ rval = POP_STR();
+ todo = -2;
+ goto do_forloop;
+
+ case SRC_PCDELTA:
+ /* Comma operator: use JSOP_POP for correct precedence. */
+ op = JSOP_POP;
+
+ /* Pop and save to avoid blowing stack depth budget. */
+ lval = JS_strdup(cx, POP_STR());
+ if (!lval)
+ return NULL;
+
+ /*
+ * The offset tells distance to the end of the right-hand
+ * operand of the comma operator.
+ */
+ done = pc + len;
+ pc += js_GetSrcNoteOffset(sn, 0);
+ len = 0;
+
+ if (!Decompile(ss, done, pc - done)) {
+ JS_free(cx, (char *)lval);
+ return NULL;
+ }
+
+ /* Pop Decompile result and print comma expression. */
+ rval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s, %s", lval, rval);
+ JS_free(cx, (char *)lval);
+ break;
+
+ case SRC_HIDDEN:
+ /* Hide this pop, it's from a goto in a with or for/in. */
+ todo = -2;
+ break;
+
+ case SRC_DECL:
+ /* This pop is at the end of the let block/expr head. */
+ pc += JSOP_POP_LENGTH;
+#if JS_HAS_DESTRUCTURING
+ do_letheadbody:
+#endif
+ len = js_GetSrcNoteOffset(sn, 0);
+ if (pc[len] == JSOP_LEAVEBLOCK) {
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\tlet (%s) {\n",
+ POP_STR());
+ jp->indent += 4;
+ DECOMPILE_CODE(pc, len);
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ todo = -2;
+ } else {
+ LOCAL_ASSERT(pc[len] == JSOP_LEAVEBLOCKEXPR);
+
+ lval = JS_strdup(cx, POP_STR());
+ if (!lval)
+ return NULL;
+
+ if (!Decompile(ss, pc, len)) {
+ JS_free(cx, (char *)lval);
+ return NULL;
+ }
+ rval = POP_STR();
+ todo = Sprint(&ss->sprinter,
+ (*rval == '{')
+ ? "let (%s) (%s)"
+ : "let (%s) %s",
+ lval, rval);
+ JS_free(cx, (char *)lval);
+ }
+ break;
+
+ default:
+ /* Turn off parens around a yield statement. */
+ if (ss->opcodes[ss->top-1] == JSOP_YIELD)
+ op = JSOP_NOP;
+
+ rval = POP_STR();
+ if (*rval != '\0') {
+#if JS_HAS_BLOCK_SCOPE
+ /*
+ * If a let declaration is the only child of a control
+ * structure that does not require braces, it must not
+ * be braced. If it were braced explicitly, it would
+ * be bracketed by JSOP_ENTERBLOCK/JSOP_LEAVEBLOCK.
+ */
+ if (jp->braceState == MAYBE_BRACE &&
+ pc + JSOP_POP_LENGTH == endpc &&
+ !strncmp(rval, var_prefix[SRC_DECL_LET], 4) &&
+ rval[4] != '(') {
+ SetDontBrace(jp);
+ }
+#endif
+ js_printf(jp,
+ (*rval == '{' ||
+ (strncmp(rval, js_function_str, 8) == 0 &&
+ rval[8] == ' '))
+ ? "\t(%s);\n"
+ : "\t%s;\n",
+ rval);
+ }
+ todo = -2;
+ break;
+ }
+ break;
+
+ case JSOP_POP2:
+ case JSOP_ENDITER:
+ sn = js_GetSrcNote(jp->script, pc);
+ todo = -2;
+ if (sn && SN_TYPE(sn) == SRC_HIDDEN)
+ break;
+ (void) PopOff(ss, op);
+ if (op == JSOP_POP2)
+ (void) PopOff(ss, op);
+ break;
+
+ case JSOP_ENTERWITH:
+ LOCAL_ASSERT(!js_GetSrcNote(jp->script, pc));
+ rval = POP_STR();
+ js_printf(SET_MAYBE_BRACE(jp), "\twith (%s) {\n", rval);
+ jp->indent += 4;
+ todo = Sprint(&ss->sprinter, with_cookie);
+ break;
+
+ case JSOP_LEAVEWITH:
+ sn = js_GetSrcNote(jp->script, pc);
+ todo = -2;
+ if (sn && SN_TYPE(sn) == SRC_HIDDEN)
+ break;
+ rval = POP_STR();
+ LOCAL_ASSERT(strcmp(rval, with_cookie) == 0);
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ break;
+
+ BEGIN_LITOPX_CASE(JSOP_ENTERBLOCK)
+ {
+ JSAtom **atomv, *smallv[5];
+ JSScopeProperty *sprop;
+
+ obj = ATOM_TO_OBJECT(atom);
+ argc = OBJ_BLOCK_COUNT(cx, obj);
+ if ((size_t)argc <= sizeof smallv / sizeof smallv[0]) {
+ atomv = smallv;
+ } else {
+ atomv = (JSAtom **) JS_malloc(cx, argc * sizeof(JSAtom *));
+ if (!atomv)
+ return NULL;
+ }
+
+ /* From here on, control must flow through enterblock_out. */
+ for (sprop = OBJ_SCOPE(obj)->lastProp; sprop;
+ sprop = sprop->parent) {
+ if (!(sprop->flags & SPROP_HAS_SHORTID))
+ continue;
+ LOCAL_ASSERT(sprop->shortid < argc);
+ atomv[sprop->shortid] = JSID_TO_ATOM(sprop->id);
+ }
+ ok = JS_TRUE;
+ for (i = 0; i < argc; i++) {
+ atom = atomv[i];
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval ||
+ !PushOff(ss, STR2OFF(&ss->sprinter, rval), op)) {
+ ok = JS_FALSE;
+ goto enterblock_out;
+ }
+ }
+
+ sn = js_GetSrcNote(jp->script, pc);
+ switch (sn ? SN_TYPE(sn) : SRC_NULL) {
+#if JS_HAS_BLOCK_SCOPE
+ case SRC_BRACE:
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\t{\n");
+ jp->indent += 4;
+ len = js_GetSrcNoteOffset(sn, 0);
+ ok = Decompile(ss, pc + oplen, len - oplen) != NULL;
+ if (!ok)
+ goto enterblock_out;
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ break;
+#endif
+
+ case SRC_CATCH:
+ jp->indent -= 4;
+ js_printf(CLEAR_MAYBE_BRACE(jp), "\t} catch (");
+
+ pc2 = pc;
+ pc += oplen;
+ LOCAL_ASSERT(*pc == JSOP_EXCEPTION);
+ pc += JSOP_EXCEPTION_LENGTH;
+ if (*pc == JSOP_DUP) {
+ sn2 = js_GetSrcNote(jp->script, pc);
+ if (sn2 && SN_TYPE(sn2) == SRC_HIDDEN) {
+ /*
+ * This is a hidden dup to save the exception for
+ * later. It must exist only when the catch has
+ * an exception guard.
+ */
+ LOCAL_ASSERT(js_GetSrcNoteOffset(sn, 0) != 0);
+ pc += JSOP_DUP_LENGTH;
+ }
+ }
+#if JS_HAS_DESTRUCTURING
+ if (*pc == JSOP_DUP) {
+ pc = DecompileDestructuring(ss, pc, endpc);
+ if (!pc) {
+ ok = JS_FALSE;
+ goto enterblock_out;
+ }
+ LOCAL_ASSERT(*pc == JSOP_POP);
+ pc += JSOP_POP_LENGTH;
+ lval = PopStr(ss, JSOP_NOP);
+ js_puts(jp, lval);
+ } else {
+#endif
+ LOCAL_ASSERT(*pc == JSOP_SETLOCALPOP);
+ i = GET_UINT16(pc);
+ pc += JSOP_SETLOCALPOP_LENGTH;
+ atom = atomv[i - OBJ_BLOCK_DEPTH(cx, obj)];
+ str = ATOM_TO_STRING(atom);
+ if (!QuoteString(&jp->sprinter, str, 0)) {
+ ok = JS_FALSE;
+ goto enterblock_out;
+ }
+#if JS_HAS_DESTRUCTURING
+ }
+#endif
+
+ len = js_GetSrcNoteOffset(sn, 0);
+ if (len) {
+ len -= PTRDIFF(pc, pc2, jsbytecode);
+ LOCAL_ASSERT(len > 0);
+ js_printf(jp, " if ");
+ ok = Decompile(ss, pc, len) != NULL;
+ if (!ok)
+ goto enterblock_out;
+ js_printf(jp, "%s", POP_STR());
+ pc += len;
+ LOCAL_ASSERT(*pc == JSOP_IFEQ || *pc == JSOP_IFEQX);
+ pc += js_CodeSpec[*pc].length;
+ }
+
+ js_printf(jp, ") {\n");
+ jp->indent += 4;
+ len = 0;
+ break;
+ }
+
+ todo = -2;
+
+ enterblock_out:
+ if (atomv != smallv)
+ JS_free(cx, atomv);
+ if (!ok)
+ return NULL;
+ }
+ END_LITOPX_CASE
+
+ case JSOP_LEAVEBLOCK:
+ case JSOP_LEAVEBLOCKEXPR:
+ {
+ uintN top, depth;
+
+ sn = js_GetSrcNote(jp->script, pc);
+ todo = -2;
+ if (op == JSOP_LEAVEBLOCKEXPR) {
+ LOCAL_ASSERT(SN_TYPE(sn) == SRC_PCBASE);
+ rval = POP_STR();
+ } else if (sn) {
+ LOCAL_ASSERT(op == JSOP_LEAVEBLOCK);
+ if (SN_TYPE(sn) == SRC_HIDDEN)
+ break;
+ LOCAL_ASSERT(SN_TYPE(sn) == SRC_CATCH);
+ LOCAL_ASSERT((uintN)js_GetSrcNoteOffset(sn, 0) == ss->top);
+ }
+ top = ss->top;
+ depth = GET_UINT16(pc);
+ LOCAL_ASSERT(top >= depth);
+ top -= depth;
+ ss->top = top;
+ ss->sprinter.offset = GetOff(ss, top);
+ if (op == JSOP_LEAVEBLOCKEXPR)
+ todo = SprintCString(&ss->sprinter, rval);
+ break;
+ }
+
+ case JSOP_GETLOCAL:
+ i = GET_UINT16(pc);
+ sn = js_GetSrcNote(jp->script, pc);
+ LOCAL_ASSERT((uintN)i < ss->top);
+ rval = GetLocal(ss, i);
+
+#if JS_HAS_DESTRUCTURING
+ if (sn && SN_TYPE(sn) == SRC_GROUPASSIGN) {
+ pc = DecompileGroupAssignment(ss, pc, endpc, sn, &todo);
+ if (!pc)
+ return NULL;
+ LOCAL_ASSERT(*pc == JSOP_SETSP);
+ len = oplen = JSOP_SETSP_LENGTH;
+ goto end_groupassignment;
+ }
+#endif
+
+ todo = Sprint(&ss->sprinter, ss_format, VarPrefix(sn), rval);
+ break;
+
+ case JSOP_SETLOCAL:
+ case JSOP_SETLOCALPOP:
+ i = GET_UINT16(pc);
+ lval = GetStr(ss, i);
+ rval = POP_STR();
+ goto do_setlval;
+
+ case JSOP_INCLOCAL:
+ case JSOP_DECLOCAL:
+ i = GET_UINT16(pc);
+ lval = GetLocal(ss, i);
+ goto do_inclval;
+
+ case JSOP_LOCALINC:
+ case JSOP_LOCALDEC:
+ i = GET_UINT16(pc);
+ lval = GetLocal(ss, i);
+ goto do_lvalinc;
+
+ case JSOP_FORLOCAL:
+ i = GET_UINT16(pc);
+ lval = GetStr(ss, i);
+ atom = NULL;
+ goto do_forlvalinloop;
+
+ case JSOP_RETRVAL:
+ todo = -2;
+ break;
+
+ case JSOP_SETRVAL:
+ case JSOP_RETURN:
+ rval = POP_STR();
+ if (*rval != '\0')
+ js_printf(jp, "\t%s %s;\n", js_return_str, rval);
+ else
+ js_printf(jp, "\t%s;\n", js_return_str);
+ todo = -2;
+ break;
+
+#if JS_HAS_GENERATORS
+ case JSOP_YIELD:
+ op = JSOP_SETNAME; /* turn off most parens */
+ rval = POP_STR();
+ todo = (*rval != '\0')
+ ? Sprint(&ss->sprinter,
+ (strncmp(rval, js_yield_str, 5) == 0 &&
+ (rval[5] == ' ' || rval[5] == '\0'))
+ ? "%s (%s)"
+ : "%s %s",
+ js_yield_str, rval)
+ : SprintCString(&ss->sprinter, js_yield_str);
+ break;
+
+ case JSOP_ARRAYPUSH:
+ {
+ uintN pos, blockpos, startpos;
+ ptrdiff_t start;
+
+ rval = POP_STR();
+ pos = ss->top;
+ while ((op = ss->opcodes[--pos]) != JSOP_ENTERBLOCK &&
+ op != JSOP_NEWINIT) {
+ LOCAL_ASSERT(pos != 0);
+ }
+ blockpos = pos;
+ while (ss->opcodes[pos] == JSOP_ENTERBLOCK) {
+ if (pos == 0)
+ break;
+ --pos;
+ }
+ LOCAL_ASSERT(ss->opcodes[pos] == JSOP_NEWINIT);
+ startpos = pos;
+ start = ss->offsets[pos];
+ LOCAL_ASSERT(ss->sprinter.base[start] == '[' ||
+ ss->sprinter.base[start] == '#');
+ pos = blockpos;
+ while (ss->opcodes[++pos] == JSOP_STARTITER)
+ LOCAL_ASSERT(pos < ss->top);
+ LOCAL_ASSERT(pos < ss->top);
+ xval = OFF2STR(&ss->sprinter, ss->offsets[pos]);
+ lval = OFF2STR(&ss->sprinter, start);
+ RETRACT(&ss->sprinter, lval);
+ todo = Sprint(&ss->sprinter, "%s%s%.*s",
+ lval, rval, rval - xval, xval);
+ if (todo < 0)
+ return NULL;
+ ss->offsets[startpos] = todo;
+ todo = -2;
+ break;
+ }
+#endif
+
+ case JSOP_THROWING:
+ todo = -2;
+ break;
+
+ case JSOP_THROW:
+ sn = js_GetSrcNote(jp->script, pc);
+ todo = -2;
+ if (sn && SN_TYPE(sn) == SRC_HIDDEN)
+ break;
+ rval = POP_STR();
+ js_printf(jp, "\t%s %s;\n", cs->name, rval);
+ break;
+
+ case JSOP_GOTO:
+ case JSOP_GOTOX:
+ sn = js_GetSrcNote(jp->script, pc);
+ switch (sn ? SN_TYPE(sn) : SRC_NULL) {
+ case SRC_CONT2LABEL:
+ atom = js_GetAtom(cx, &jp->script->atomMap,
+ (jsatomid) js_GetSrcNoteOffset(sn, 0));
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ js_printf(jp, "\tcontinue %s;\n", rval);
+ break;
+ case SRC_CONTINUE:
+ js_printf(jp, "\tcontinue;\n");
+ break;
+ case SRC_BREAK2LABEL:
+ atom = js_GetAtom(cx, &jp->script->atomMap,
+ (jsatomid) js_GetSrcNoteOffset(sn, 0));
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ js_printf(jp, "\tbreak %s;\n", rval);
+ break;
+ case SRC_HIDDEN:
+ break;
+ default:
+ js_printf(jp, "\tbreak;\n");
+ break;
+ }
+ todo = -2;
+ break;
+
+ case JSOP_IFEQ:
+ case JSOP_IFEQX:
+ {
+ JSBool elseif = JS_FALSE;
+
+ if_again:
+ len = GetJumpOffset(pc, pc);
+ sn = js_GetSrcNote(jp->script, pc);
+
+ switch (sn ? SN_TYPE(sn) : SRC_NULL) {
+ case SRC_IF:
+ case SRC_IF_ELSE:
+ op = JSOP_NOP; /* turn off parens */
+ rval = POP_STR();
+ if (ss->inArrayInit) {
+ LOCAL_ASSERT(SN_TYPE(sn) == SRC_IF);
+ if (Sprint(&ss->sprinter, " if (%s)", rval) < 0)
+ return NULL;
+ } else {
+ js_printf(SET_MAYBE_BRACE(jp),
+ elseif ? " if (%s) {\n" : "\tif (%s) {\n",
+ rval);
+ jp->indent += 4;
+ }
+
+ if (SN_TYPE(sn) == SRC_IF) {
+ DECOMPILE_CODE(pc + oplen, len - oplen);
+ } else {
+ LOCAL_ASSERT(!ss->inArrayInit);
+ tail = js_GetSrcNoteOffset(sn, 0);
+ DECOMPILE_CODE(pc + oplen, tail - oplen);
+ jp->indent -= 4;
+ pc += tail;
+ LOCAL_ASSERT(*pc == JSOP_GOTO || *pc == JSOP_GOTOX);
+ oplen = js_CodeSpec[*pc].length;
+ len = GetJumpOffset(pc, pc);
+ js_printf(jp, "\t} else");
+
+ /*
+ * If the second offset for sn is non-zero, it tells
+ * the distance from the goto around the else, to the
+ * ifeq for the if inside the else that forms an "if
+ * else if" chain. Thus cond spans the condition of
+ * the second if, so we simply decompile it and start
+ * over at label if_again.
+ */
+ cond = js_GetSrcNoteOffset(sn, 1);
+ if (cond != 0) {
+ DECOMPILE_CODE(pc + oplen, cond - oplen);
+ pc += cond;
+ elseif = JS_TRUE;
+ goto if_again;
+ }
+
+ js_printf(SET_MAYBE_BRACE(jp), " {\n");
+ jp->indent += 4;
+ DECOMPILE_CODE(pc + oplen, len - oplen);
+ }
+
+ if (!ss->inArrayInit) {
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ }
+ todo = -2;
+ break;
+
+ case SRC_WHILE:
+ rval = POP_STR();
+ js_printf(SET_MAYBE_BRACE(jp), "\twhile (%s) {\n", rval);
+ jp->indent += 4;
+ tail = js_GetSrcNoteOffset(sn, 0);
+ DECOMPILE_CODE(pc + oplen, tail - oplen);
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ todo = -2;
+ break;
+
+ case SRC_COND:
+ xval = JS_strdup(cx, POP_STR());
+ if (!xval)
+ return NULL;
+ len = js_GetSrcNoteOffset(sn, 0);
+ DECOMPILE_CODE(pc + oplen, len - oplen);
+ lval = JS_strdup(cx, POP_STR());
+ if (!lval) {
+ JS_free(cx, (void *)xval);
+ return NULL;
+ }
+ pc += len;
+ LOCAL_ASSERT(*pc == JSOP_GOTO || *pc == JSOP_GOTOX);
+ oplen = js_CodeSpec[*pc].length;
+ len = GetJumpOffset(pc, pc);
+ DECOMPILE_CODE(pc + oplen, len - oplen);
+ rval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s ? %s : %s",
+ xval, lval, rval);
+ JS_free(cx, (void *)xval);
+ JS_free(cx, (void *)lval);
+ break;
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ case JSOP_IFNE:
+ case JSOP_IFNEX:
+ /* Currently, this must be a do-while loop's upward branch. */
+ jp->indent -= 4;
+ js_printf(jp, "\t} while (%s);\n", POP_STR());
+ todo = -2;
+ break;
+
+ case JSOP_OR:
+ case JSOP_ORX:
+ xval = "||";
+
+ do_logical_connective:
+ /* Top of stack is the first clause in a disjunction (||). */
+ lval = JS_strdup(cx, POP_STR());
+ if (!lval)
+ return NULL;
+ done = pc + GetJumpOffset(pc, pc);
+ pc += len;
+ len = PTRDIFF(done, pc, jsbytecode);
+ DECOMPILE_CODE(pc, len);
+ rval = POP_STR();
+ if (jp->pretty &&
+ jp->indent + 4 + strlen(lval) + 4 + strlen(rval) > 75) {
+ rval = JS_strdup(cx, rval);
+ if (!rval) {
+ tail = -1;
+ } else {
+ todo = Sprint(&ss->sprinter, "%s %s\n", lval, xval);
+ tail = Sprint(&ss->sprinter, "%*s%s",
+ jp->indent + 4, "", rval);
+ JS_free(cx, (char *)rval);
+ }
+ if (tail < 0)
+ todo = -1;
+ } else {
+ todo = Sprint(&ss->sprinter, "%s %s %s", lval, xval, rval);
+ }
+ JS_free(cx, (char *)lval);
+ break;
+
+ case JSOP_AND:
+ case JSOP_ANDX:
+ xval = "&&";
+ goto do_logical_connective;
+
+ case JSOP_FORARG:
+ atom = GetSlotAtom(jp, js_GetArgument, GET_ARGNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_fornameinloop;
+
+ case JSOP_FORVAR:
+ atom = GetSlotAtom(jp, js_GetLocalVariable, GET_VARNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_fornameinloop;
+
+ case JSOP_FORNAME:
+ atom = GET_ATOM(cx, jp->script, pc);
+
+ do_fornameinloop:
+ lval = "";
+ do_forlvalinloop:
+ sn = js_GetSrcNote(jp->script, pc);
+ xval = NULL;
+ goto do_forinloop;
+
+ case JSOP_FORPROP:
+ xval = NULL;
+ atom = GET_ATOM(cx, jp->script, pc);
+ if (!ATOM_IS_IDENTIFIER(atom)) {
+ xval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom),
+ (jschar)'\'');
+ if (!xval)
+ return NULL;
+ atom = NULL;
+ }
+ lval = POP_STR();
+ sn = NULL;
+
+ do_forinloop:
+ pc += oplen;
+ LOCAL_ASSERT(*pc == JSOP_IFEQ || *pc == JSOP_IFEQX);
+ oplen = js_CodeSpec[*pc].length;
+ len = GetJumpOffset(pc, pc);
+ sn2 = js_GetSrcNote(jp->script, pc);
+ tail = js_GetSrcNoteOffset(sn2, 0);
+
+ do_forinhead:
+ if (!atom && xval) {
+ /*
+ * If xval is not a dummy empty string, we have to strdup
+ * it to save it from being clobbered by the first Sprint
+ * below. Standard dumb decompiler operating procedure!
+ */
+ if (*xval == '\0') {
+ xval = NULL;
+ } else {
+ xval = JS_strdup(cx, xval);
+ if (!xval)
+ return NULL;
+ }
+ }
+
+#if JS_HAS_XML_SUPPORT
+ if (foreach) {
+ foreach = JS_FALSE;
+ todo = Sprint(&ss->sprinter, "for %s (%s%s",
+ js_each_str, VarPrefix(sn), lval);
+ } else
+#endif
+ {
+ todo = Sprint(&ss->sprinter, "for (%s%s",
+ VarPrefix(sn), lval);
+ }
+ if (atom) {
+ if (*lval && SprintPut(&ss->sprinter, ".", 1) < 0)
+ return NULL;
+ xval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!xval)
+ return NULL;
+ } else if (xval) {
+ LOCAL_ASSERT(*xval != '\0');
+ ok = (Sprint(&ss->sprinter,
+ (js_CodeSpec[lastop].format & JOF_XMLNAME)
+ ? ".%s"
+ : "[%s]",
+ xval)
+ >= 0);
+ JS_free(cx, (char *)xval);
+ if (!ok)
+ return NULL;
+ }
+ if (todo < 0)
+ return NULL;
+
+ lval = OFF2STR(&ss->sprinter, todo);
+ rval = GetStr(ss, ss->top-1);
+ RETRACT(&ss->sprinter, rval);
+ if (ss->inArrayInit) {
+ todo = Sprint(&ss->sprinter, " %s in %s)", lval, rval);
+ if (todo < 0)
+ return NULL;
+ ss->offsets[ss->top-1] = todo;
+ ss->sprinter.offset += PAREN_SLOP;
+ DECOMPILE_CODE(pc + oplen, tail - oplen);
+ } else {
+ js_printf(SET_MAYBE_BRACE(jp), "\t%s in %s) {\n",
+ lval, rval);
+ jp->indent += 4;
+ DECOMPILE_CODE(pc + oplen, tail - oplen);
+ jp->indent -= 4;
+ js_printf(jp, "\t}\n");
+ }
+ todo = -2;
+ break;
+
+ case JSOP_FORELEM:
+ pc++;
+ LOCAL_ASSERT(*pc == JSOP_IFEQ || *pc == JSOP_IFEQX);
+ len = js_CodeSpec[*pc].length;
+
+ /*
+ * Arrange for the JSOP_ENUMELEM case to set tail for use by
+ * do_forinhead: code that uses on it to find the loop-closing
+ * jump (whatever its format, normal or extended), in order to
+ * bound the recursively decompiled loop body.
+ */
+ sn = js_GetSrcNote(jp->script, pc);
+ LOCAL_ASSERT(!forelem_tail);
+ forelem_tail = pc + js_GetSrcNoteOffset(sn, 0);
+
+ /*
+ * This gets a little wacky. Only the length of the for loop
+ * body PLUS the element-indexing expression is known here, so
+ * we pass the after-loop pc to the JSOP_ENUMELEM case, which
+ * is immediately below, to decompile that helper bytecode via
+ * the 'forelem_done' local.
+ *
+ * Since a for..in loop can't nest in the head of another for
+ * loop, we can use forelem_{tail,done} singletons to remember
+ * state from JSOP_FORELEM to JSOP_ENUMELEM, thence (via goto)
+ * to label do_forinhead.
+ */
+ LOCAL_ASSERT(!forelem_done);
+ forelem_done = pc + GetJumpOffset(pc, pc);
+
+ /* Our net stack balance after forelem;ifeq is +1. */
+ todo = SprintCString(&ss->sprinter, forelem_cookie);
+ break;
+
+ case JSOP_ENUMELEM:
+ case JSOP_ENUMCONSTELEM:
+ /*
+ * The stack has the object under the (top) index expression.
+ * The "rval" property id is underneath those two on the stack.
+ * The for loop body net and gross lengths can now be adjusted
+ * to account for the length of the indexing expression that
+ * came after JSOP_FORELEM and before JSOP_ENUMELEM.
+ */
+ atom = NULL;
+ xval = POP_STR();
+ op = JSOP_GETELEM; /* lval must have high precedence */
+ lval = POP_STR();
+ op = saveop;
+ rval = POP_STR();
+ LOCAL_ASSERT(strcmp(rval, forelem_cookie) == 0);
+ LOCAL_ASSERT(forelem_tail > pc);
+ tail = forelem_tail - pc;
+ forelem_tail = NULL;
+ LOCAL_ASSERT(forelem_done > pc);
+ len = forelem_done - pc;
+ forelem_done = NULL;
+ goto do_forinhead;
+
+#if JS_HAS_GETTER_SETTER
+ case JSOP_GETTER:
+ case JSOP_SETTER:
+ todo = -2;
+ break;
+#endif
+
+ case JSOP_DUP2:
+ rval = GetStr(ss, ss->top-2);
+ todo = SprintCString(&ss->sprinter, rval);
+ if (todo < 0 || !PushOff(ss, todo, ss->opcodes[ss->top-2]))
+ return NULL;
+ /* FALL THROUGH */
+
+ case JSOP_DUP:
+#if JS_HAS_DESTRUCTURING
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn) {
+ LOCAL_ASSERT(SN_TYPE(sn) == SRC_DESTRUCT);
+ pc = DecompileDestructuring(ss, pc, endpc);
+ if (!pc)
+ return NULL;
+ len = 0;
+ lval = POP_STR();
+ op = saveop = JSOP_ENUMELEM;
+ rval = POP_STR();
+
+ if (strcmp(rval, forelem_cookie) == 0) {
+ LOCAL_ASSERT(forelem_tail > pc);
+ tail = forelem_tail - pc;
+ forelem_tail = NULL;
+ LOCAL_ASSERT(forelem_done > pc);
+ len = forelem_done - pc;
+ forelem_done = NULL;
+ xval = NULL;
+ atom = NULL;
+
+ /*
+ * Null sn if this is a 'for (var [k, v] = i in o)'
+ * loop, because 'var [k, v = i;' has already been
+ * hoisted.
+ */
+ if (js_GetSrcNoteOffset(sn, 0) == SRC_DECL_VAR)
+ sn = NULL;
+ goto do_forinhead;
+ }
+
+ todo = Sprint(&ss->sprinter, "%s%s = %s",
+ VarPrefix(sn), lval, rval);
+ break;
+ }
+#endif
+
+ rval = GetStr(ss, ss->top-1);
+ saveop = ss->opcodes[ss->top-1];
+ todo = SprintCString(&ss->sprinter, rval);
+ break;
+
+ case JSOP_SETARG:
+ atom = GetSlotAtom(jp, js_GetArgument, GET_ARGNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_setname;
+
+ case JSOP_SETVAR:
+ atom = GetSlotAtom(jp, js_GetLocalVariable, GET_VARNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_setname;
+
+ case JSOP_SETCONST:
+ case JSOP_SETNAME:
+ case JSOP_SETGVAR:
+ atomIndex = GET_ATOM_INDEX(pc);
+
+ do_JSOP_SETCONST:
+ atom = js_GetAtom(cx, &jp->script->atomMap, atomIndex);
+
+ do_setname:
+ lval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!lval)
+ return NULL;
+ rval = POP_STR();
+ if (op == JSOP_SETNAME)
+ (void) PopOff(ss, op);
+
+ do_setlval:
+ sn = js_GetSrcNote(jp->script, pc - 1);
+ if (sn && SN_TYPE(sn) == SRC_ASSIGNOP) {
+ todo = Sprint(&ss->sprinter, "%s %s= %s",
+ lval,
+ (lastop == JSOP_GETTER)
+ ? js_getter_str
+ : (lastop == JSOP_SETTER)
+ ? js_setter_str
+ : js_CodeSpec[lastop].token,
+ rval);
+ } else {
+ sn = js_GetSrcNote(jp->script, pc);
+ todo = Sprint(&ss->sprinter, "%s%s = %s",
+ VarPrefix(sn), lval, rval);
+ }
+ if (op == JSOP_SETLOCALPOP) {
+ if (!PushOff(ss, todo, saveop))
+ return NULL;
+ rval = POP_STR();
+ LOCAL_ASSERT(*rval != '\0');
+ js_printf(jp, "\t%s;\n", rval);
+ todo = -2;
+ }
+ break;
+
+ case JSOP_NEW:
+ case JSOP_CALL:
+ case JSOP_EVAL:
+#if JS_HAS_LVALUE_RETURN
+ case JSOP_SETCALL:
+#endif
+ op = JSOP_SETNAME; /* turn off most parens */
+ argc = GET_ARGC(pc);
+ argv = (char **)
+ JS_malloc(cx, (size_t)(argc + 1) * sizeof *argv);
+ if (!argv)
+ return NULL;
+
+ ok = JS_TRUE;
+ for (i = argc; i > 0; i--) {
+ argv[i] = JS_strdup(cx, POP_STR());
+ if (!argv[i]) {
+ ok = JS_FALSE;
+ break;
+ }
+ }
+
+ /* Skip the JSOP_PUSHOBJ-created empty string. */
+ LOCAL_ASSERT(ss->top >= 2);
+ (void) PopOff(ss, op);
+
+ op = saveop;
+ argv[0] = JS_strdup(cx, POP_STR());
+ if (!argv[i])
+ ok = JS_FALSE;
+
+ lval = "(", rval = ")";
+ if (op == JSOP_NEW) {
+ if (argc == 0)
+ lval = rval = "";
+ todo = Sprint(&ss->sprinter, "%s %s%s",
+ js_new_str, argv[0], lval);
+ } else {
+ todo = Sprint(&ss->sprinter, ss_format,
+ argv[0], lval);
+ }
+ if (todo < 0)
+ ok = JS_FALSE;
+
+ for (i = 1; i <= argc; i++) {
+ if (!argv[i] ||
+ Sprint(&ss->sprinter, ss_format,
+ argv[i], (i < argc) ? ", " : "") < 0) {
+ ok = JS_FALSE;
+ break;
+ }
+ }
+ if (Sprint(&ss->sprinter, rval) < 0)
+ ok = JS_FALSE;
+
+ for (i = 0; i <= argc; i++) {
+ if (argv[i])
+ JS_free(cx, argv[i]);
+ }
+ JS_free(cx, argv);
+ if (!ok)
+ return NULL;
+#if JS_HAS_LVALUE_RETURN
+ if (op == JSOP_SETCALL) {
+ if (!PushOff(ss, todo, op))
+ return NULL;
+ todo = Sprint(&ss->sprinter, "");
+ }
+#endif
+ break;
+
+ case JSOP_DELNAME:
+ atom = GET_ATOM(cx, jp->script, pc);
+ lval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!lval)
+ return NULL;
+ RETRACT(&ss->sprinter, lval);
+ do_delete_lval:
+ todo = Sprint(&ss->sprinter, "%s %s", js_delete_str, lval);
+ break;
+
+ case JSOP_DELPROP:
+ GET_ATOM_QUOTE_AND_FMT("%s %s[%s]", "%s %s.%s", rval);
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, fmt, js_delete_str, lval, rval);
+ break;
+
+ case JSOP_DELELEM:
+ op = JSOP_NOP; /* turn off parens */
+ xval = POP_STR();
+ op = saveop;
+ lval = POP_STR();
+ if (*xval == '\0')
+ goto do_delete_lval;
+ todo = Sprint(&ss->sprinter,
+ (js_CodeSpec[lastop].format & JOF_XMLNAME)
+ ? "%s %s.%s"
+ : "%s %s[%s]",
+ js_delete_str, lval, xval);
+ break;
+
+#if JS_HAS_XML_SUPPORT
+ case JSOP_DELDESC:
+ xval = POP_STR();
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s %s..%s",
+ js_delete_str, lval, xval);
+ break;
+#endif
+
+ case JSOP_TYPEOFEXPR:
+ case JSOP_TYPEOF:
+ case JSOP_VOID:
+ rval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s %s", cs->name, rval);
+ break;
+
+ case JSOP_INCARG:
+ case JSOP_DECARG:
+ atom = GetSlotAtom(jp, js_GetArgument, GET_ARGNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_incatom;
+
+ case JSOP_INCVAR:
+ case JSOP_DECVAR:
+ atom = GetSlotAtom(jp, js_GetLocalVariable, GET_VARNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_incatom;
+
+ case JSOP_INCNAME:
+ case JSOP_DECNAME:
+ case JSOP_INCGVAR:
+ case JSOP_DECGVAR:
+ atom = GET_ATOM(cx, jp->script, pc);
+ do_incatom:
+ lval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!lval)
+ return NULL;
+ RETRACT(&ss->sprinter, lval);
+ do_inclval:
+ todo = Sprint(&ss->sprinter, ss_format,
+ js_incop_strs[!(cs->format & JOF_INC)], lval);
+ break;
+
+ case JSOP_INCPROP:
+ case JSOP_DECPROP:
+ GET_ATOM_QUOTE_AND_FMT(preindex_format, predot_format, rval);
+
+ /*
+ * Force precedence below the numeric literal opcodes, so that
+ * 42..foo or 10000..toString(16), e.g., decompile with parens
+ * around the left-hand side of dot.
+ */
+ op = JSOP_GETPROP;
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, fmt,
+ js_incop_strs[!(cs->format & JOF_INC)],
+ lval, rval);
+ break;
+
+ case JSOP_INCELEM:
+ case JSOP_DECELEM:
+ op = JSOP_NOP; /* turn off parens */
+ xval = POP_STR();
+ op = JSOP_GETELEM;
+ lval = POP_STR();
+ if (*xval != '\0') {
+ todo = Sprint(&ss->sprinter,
+ (js_CodeSpec[lastop].format & JOF_XMLNAME)
+ ? predot_format
+ : preindex_format,
+ js_incop_strs[!(cs->format & JOF_INC)],
+ lval, xval);
+ } else {
+ todo = Sprint(&ss->sprinter, ss_format,
+ js_incop_strs[!(cs->format & JOF_INC)], lval);
+ }
+ break;
+
+ case JSOP_ARGINC:
+ case JSOP_ARGDEC:
+ atom = GetSlotAtom(jp, js_GetArgument, GET_ARGNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_atominc;
+
+ case JSOP_VARINC:
+ case JSOP_VARDEC:
+ atom = GetSlotAtom(jp, js_GetLocalVariable, GET_VARNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_atominc;
+
+ case JSOP_NAMEINC:
+ case JSOP_NAMEDEC:
+ case JSOP_GVARINC:
+ case JSOP_GVARDEC:
+ atom = GET_ATOM(cx, jp->script, pc);
+ do_atominc:
+ lval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!lval)
+ return NULL;
+ RETRACT(&ss->sprinter, lval);
+ do_lvalinc:
+ todo = Sprint(&ss->sprinter, ss_format,
+ lval, js_incop_strs[!(cs->format & JOF_INC)]);
+ break;
+
+ case JSOP_PROPINC:
+ case JSOP_PROPDEC:
+ GET_ATOM_QUOTE_AND_FMT(postindex_format, postdot_format, rval);
+
+ /*
+ * Force precedence below the numeric literal opcodes, so that
+ * 42..foo or 10000..toString(16), e.g., decompile with parens
+ * around the left-hand side of dot.
+ */
+ op = JSOP_GETPROP;
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, fmt, lval, rval,
+ js_incop_strs[!(cs->format & JOF_INC)]);
+ break;
+
+ case JSOP_ELEMINC:
+ case JSOP_ELEMDEC:
+ op = JSOP_NOP; /* turn off parens */
+ xval = POP_STR();
+ op = JSOP_GETELEM;
+ lval = POP_STR();
+ if (*xval != '\0') {
+ todo = Sprint(&ss->sprinter,
+ (js_CodeSpec[lastop].format & JOF_XMLNAME)
+ ? postdot_format
+ : postindex_format,
+ lval, xval,
+ js_incop_strs[!(cs->format & JOF_INC)]);
+ } else {
+ todo = Sprint(&ss->sprinter, ss_format,
+ lval, js_incop_strs[!(cs->format & JOF_INC)]);
+ }
+ break;
+
+ case JSOP_GETPROP2:
+ op = JSOP_GETPROP;
+ (void) PopOff(ss, lastop);
+ /* FALL THROUGH */
+
+ case JSOP_GETPROP:
+ case JSOP_GETXPROP:
+ atom = GET_ATOM(cx, jp->script, pc);
+
+ do_getprop:
+ GET_QUOTE_AND_FMT(index_format, dot_format, rval);
+
+ do_getprop_lval:
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, fmt, lval, rval);
+ break;
+
+#if JS_HAS_XML_SUPPORT
+ BEGIN_LITOPX_CASE(JSOP_GETMETHOD)
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn && SN_TYPE(sn) == SRC_PCBASE)
+ goto do_getprop;
+ GET_QUOTE_AND_FMT("%s.function::[%s]", "%s.function::%s", rval);
+ goto do_getprop_lval;
+
+ BEGIN_LITOPX_CASE(JSOP_SETMETHOD)
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn && SN_TYPE(sn) == SRC_PCBASE)
+ goto do_setprop;
+ GET_QUOTE_AND_FMT("%s.function::[%s] %s= %s",
+ "%s.function::%s %s= %s",
+ xval);
+ goto do_setprop_rval;
+#endif
+
+ case JSOP_SETPROP:
+ atom = GET_ATOM(cx, jp->script, pc);
+
+ do_setprop:
+ GET_QUOTE_AND_FMT("%s[%s] %s= %s", "%s.%s %s= %s", xval);
+
+ do_setprop_rval:
+ rval = POP_STR();
+
+ /*
+ * Force precedence below the numeric literal opcodes, so that
+ * 42..foo or 10000..toString(16), e.g., decompile with parens
+ * around the left-hand side of dot.
+ */
+ op = JSOP_GETPROP;
+ lval = POP_STR();
+ sn = js_GetSrcNote(jp->script, pc - 1);
+ todo = Sprint(&ss->sprinter, fmt, lval, xval,
+ (sn && SN_TYPE(sn) == SRC_ASSIGNOP)
+ ? (lastop == JSOP_GETTER)
+ ? js_getter_str
+ : (lastop == JSOP_SETTER)
+ ? js_setter_str
+ : js_CodeSpec[lastop].token
+ : "",
+ rval);
+ break;
+
+ case JSOP_GETELEM2:
+ op = JSOP_GETELEM;
+ (void) PopOff(ss, lastop);
+ /* FALL THROUGH */
+
+ case JSOP_GETELEM:
+ case JSOP_GETXELEM:
+ op = JSOP_NOP; /* turn off parens */
+ xval = POP_STR();
+ op = saveop;
+ lval = POP_STR();
+ if (*xval == '\0') {
+ todo = Sprint(&ss->sprinter, "%s", lval);
+ } else {
+ todo = Sprint(&ss->sprinter,
+ (js_CodeSpec[lastop].format & JOF_XMLNAME)
+ ? dot_format
+ : index_format,
+ lval, xval);
+ }
+ break;
+
+ case JSOP_SETELEM:
+ rval = POP_STR();
+ op = JSOP_NOP; /* turn off parens */
+ xval = POP_STR();
+ cs = &js_CodeSpec[ss->opcodes[ss->top]];
+ op = JSOP_GETELEM; /* lval must have high precedence */
+ lval = POP_STR();
+ op = saveop;
+ if (*xval == '\0')
+ goto do_setlval;
+ sn = js_GetSrcNote(jp->script, pc - 1);
+ todo = Sprint(&ss->sprinter,
+ (cs->format & JOF_XMLNAME)
+ ? "%s.%s %s= %s"
+ : "%s[%s] %s= %s",
+ lval, xval,
+ (sn && SN_TYPE(sn) == SRC_ASSIGNOP)
+ ? (lastop == JSOP_GETTER)
+ ? js_getter_str
+ : (lastop == JSOP_SETTER)
+ ? js_setter_str
+ : js_CodeSpec[lastop].token
+ : "",
+ rval);
+ break;
+
+ case JSOP_ARGSUB:
+ i = (jsint) GET_ATOM_INDEX(pc);
+ todo = Sprint(&ss->sprinter, "%s[%d]",
+ js_arguments_str, (int) i);
+ break;
+
+ case JSOP_ARGCNT:
+ todo = Sprint(&ss->sprinter, dot_format,
+ js_arguments_str, js_length_str);
+ break;
+
+ case JSOP_GETARG:
+ i = GET_ARGNO(pc);
+ atom = GetSlotAtom(jp, js_GetArgument, i);
+#if JS_HAS_DESTRUCTURING
+ if (!atom) {
+ todo = Sprint(&ss->sprinter, "%s[%d]", js_arguments_str, i);
+ break;
+ }
+#else
+ LOCAL_ASSERT(atom);
+#endif
+ goto do_name;
+
+ case JSOP_GETVAR:
+ atom = GetSlotAtom(jp, js_GetLocalVariable, GET_VARNO(pc));
+ LOCAL_ASSERT(atom);
+ goto do_name;
+
+ case JSOP_NAME:
+ case JSOP_GETGVAR:
+ atom = GET_ATOM(cx, jp->script, pc);
+ do_name:
+ lval = "";
+ do_qname:
+ sn = js_GetSrcNote(jp->script, pc);
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ todo = Sprint(&ss->sprinter, "%s%s%s",
+ VarPrefix(sn), lval, rval);
+ break;
+
+ case JSOP_UINT16:
+ i = (jsint) GET_ATOM_INDEX(pc);
+ goto do_sprint_int;
+
+ case JSOP_UINT24:
+ i = (jsint) GET_UINT24(pc);
+ do_sprint_int:
+ todo = Sprint(&ss->sprinter, "%u", (unsigned) i);
+ break;
+
+ case JSOP_LITERAL:
+ atomIndex = GET_LITERAL_INDEX(pc);
+ goto do_JSOP_STRING;
+
+ case JSOP_FINDNAME:
+ atomIndex = GET_LITERAL_INDEX(pc);
+ todo = Sprint(&ss->sprinter, "");
+ if (todo < 0 || !PushOff(ss, todo, op))
+ return NULL;
+ atom = js_GetAtom(cx, &jp->script->atomMap, atomIndex);
+ goto do_name;
+
+ case JSOP_LITOPX:
+ atomIndex = GET_LITERAL_INDEX(pc);
+ pc2 = pc + 1 + LITERAL_INDEX_LEN;
+ op = saveop = *pc2;
+ pc += len - (1 + ATOM_INDEX_LEN);
+ cs = &js_CodeSpec[op];
+ len = cs->length;
+ switch (op) {
+ case JSOP_ANONFUNOBJ: goto do_JSOP_ANONFUNOBJ;
+ case JSOP_BINDNAME: goto do_JSOP_BINDNAME;
+ case JSOP_CLOSURE: goto do_JSOP_CLOSURE;
+#if JS_HAS_EXPORT_IMPORT
+ case JSOP_EXPORTNAME: goto do_JSOP_EXPORTNAME;
+#endif
+#if JS_HAS_XML_SUPPORT
+ case JSOP_GETMETHOD: goto do_JSOP_GETMETHOD;
+ case JSOP_SETMETHOD: goto do_JSOP_SETMETHOD;
+#endif
+ case JSOP_NAMEDFUNOBJ: goto do_JSOP_NAMEDFUNOBJ;
+ case JSOP_NUMBER: goto do_JSOP_NUMBER;
+ case JSOP_OBJECT: goto do_JSOP_OBJECT;
+#if JS_HAS_XML_SUPPORT
+ case JSOP_QNAMECONST: goto do_JSOP_QNAMECONST;
+ case JSOP_QNAMEPART: goto do_JSOP_QNAMEPART;
+#endif
+ case JSOP_REGEXP: goto do_JSOP_REGEXP;
+ case JSOP_SETCONST: goto do_JSOP_SETCONST;
+ case JSOP_STRING: goto do_JSOP_STRING;
+#if JS_HAS_XML_SUPPORT
+ case JSOP_XMLCDATA: goto do_JSOP_XMLCDATA;
+ case JSOP_XMLCOMMENT: goto do_JSOP_XMLCOMMENT;
+ case JSOP_XMLOBJECT: goto do_JSOP_XMLOBJECT;
+ case JSOP_XMLPI: goto do_JSOP_XMLPI;
+#endif
+ case JSOP_ENTERBLOCK: goto do_JSOP_ENTERBLOCK;
+ default: LOCAL_ASSERT(0);
+ }
+ /* NOTREACHED */
+ break;
+
+ BEGIN_LITOPX_CASE(JSOP_NUMBER)
+ val = ATOM_KEY(atom);
+ if (JSVAL_IS_INT(val)) {
+ long ival = (long)JSVAL_TO_INT(val);
+ todo = Sprint(&ss->sprinter, "%ld", ival);
+ } else {
+ char buf[DTOSTR_STANDARD_BUFFER_SIZE];
+ char *numStr = JS_dtostr(buf, sizeof buf, DTOSTR_STANDARD,
+ 0, *JSVAL_TO_DOUBLE(val));
+ if (!numStr) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ todo = Sprint(&ss->sprinter, numStr);
+ }
+ END_LITOPX_CASE
+
+ BEGIN_LITOPX_CASE(JSOP_STRING)
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom),
+ inXML ? DONT_ESCAPE : '"');
+ if (!rval)
+ return NULL;
+ todo = STR2OFF(&ss->sprinter, rval);
+ END_LITOPX_CASE
+
+ case JSOP_OBJECT:
+ case JSOP_REGEXP:
+ case JSOP_ANONFUNOBJ:
+ case JSOP_NAMEDFUNOBJ:
+ atomIndex = GET_ATOM_INDEX(pc);
+
+ do_JSOP_OBJECT:
+ do_JSOP_REGEXP:
+ do_JSOP_ANONFUNOBJ:
+ do_JSOP_NAMEDFUNOBJ:
+ atom = js_GetAtom(cx, &jp->script->atomMap, atomIndex);
+ if (op == JSOP_OBJECT || op == JSOP_REGEXP) {
+ if (!js_regexp_toString(cx, ATOM_TO_OBJECT(atom), 0, NULL,
+ &val)) {
+ return NULL;
+ }
+ } else {
+ if (!js_fun_toString(cx, ATOM_TO_OBJECT(atom),
+ JS_IN_GROUP_CONTEXT |
+ JS_DONT_PRETTY_PRINT,
+ 0, NULL, &val)) {
+ return NULL;
+ }
+ }
+ str = JSVAL_TO_STRING(val);
+ todo = SprintPut(&ss->sprinter, JS_GetStringBytes(str),
+ JSSTRING_LENGTH(str));
+ break;
+
+ case JSOP_TABLESWITCH:
+ case JSOP_TABLESWITCHX:
+ {
+ ptrdiff_t jmplen, off, off2;
+ jsint j, n, low, high;
+ TableEntry *table, pivot;
+
+ sn = js_GetSrcNote(jp->script, pc);
+ LOCAL_ASSERT(sn && SN_TYPE(sn) == SRC_SWITCH);
+ len = js_GetSrcNoteOffset(sn, 0);
+ jmplen = (op == JSOP_TABLESWITCH) ? JUMP_OFFSET_LEN
+ : JUMPX_OFFSET_LEN;
+ pc2 = pc;
+ off = GetJumpOffset(pc, pc2);
+ pc2 += jmplen;
+ low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ high = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+
+ n = high - low + 1;
+ if (n == 0) {
+ table = NULL;
+ j = 0;
+ } else {
+ table = (TableEntry *)
+ JS_malloc(cx, (size_t)n * sizeof *table);
+ if (!table)
+ return NULL;
+ for (i = j = 0; i < n; i++) {
+ table[j].label = NULL;
+ off2 = GetJumpOffset(pc, pc2);
+ if (off2) {
+ sn = js_GetSrcNote(jp->script, pc2);
+ if (sn) {
+ LOCAL_ASSERT(SN_TYPE(sn) == SRC_LABEL);
+ table[j].label =
+ js_GetAtom(cx, &jp->script->atomMap,
+ (jsatomid)
+ js_GetSrcNoteOffset(sn, 0));
+ }
+ table[j].key = INT_TO_JSVAL(low + i);
+ table[j].offset = off2;
+ table[j].order = j;
+ j++;
+ }
+ pc2 += jmplen;
+ }
+ js_HeapSort(table, (size_t) j, &pivot, sizeof(TableEntry),
+ CompareOffsets, NULL);
+ }
+
+ ok = DecompileSwitch(ss, table, (uintN)j, pc, len, off,
+ JS_FALSE);
+ JS_free(cx, table);
+ if (!ok)
+ return NULL;
+ todo = -2;
+ break;
+ }
+
+ case JSOP_LOOKUPSWITCH:
+ case JSOP_LOOKUPSWITCHX:
+ {
+ ptrdiff_t jmplen, off, off2;
+ jsatomid npairs, k;
+ TableEntry *table;
+
+ sn = js_GetSrcNote(jp->script, pc);
+ LOCAL_ASSERT(sn && SN_TYPE(sn) == SRC_SWITCH);
+ len = js_GetSrcNoteOffset(sn, 0);
+ jmplen = (op == JSOP_LOOKUPSWITCH) ? JUMP_OFFSET_LEN
+ : JUMPX_OFFSET_LEN;
+ pc2 = pc;
+ off = GetJumpOffset(pc, pc2);
+ pc2 += jmplen;
+ npairs = GET_ATOM_INDEX(pc2);
+ pc2 += ATOM_INDEX_LEN;
+
+ table = (TableEntry *)
+ JS_malloc(cx, (size_t)npairs * sizeof *table);
+ if (!table)
+ return NULL;
+ for (k = 0; k < npairs; k++) {
+ sn = js_GetSrcNote(jp->script, pc2);
+ if (sn) {
+ LOCAL_ASSERT(SN_TYPE(sn) == SRC_LABEL);
+ table[k].label =
+ js_GetAtom(cx, &jp->script->atomMap, (jsatomid)
+ js_GetSrcNoteOffset(sn, 0));
+ } else {
+ table[k].label = NULL;
+ }
+ atom = GET_ATOM(cx, jp->script, pc2);
+ pc2 += ATOM_INDEX_LEN;
+ off2 = GetJumpOffset(pc, pc2);
+ pc2 += jmplen;
+ table[k].key = ATOM_KEY(atom);
+ table[k].offset = off2;
+ }
+
+ ok = DecompileSwitch(ss, table, (uintN)npairs, pc, len, off,
+ JS_FALSE);
+ JS_free(cx, table);
+ if (!ok)
+ return NULL;
+ todo = -2;
+ break;
+ }
+
+ case JSOP_CONDSWITCH:
+ {
+ ptrdiff_t off, off2, caseOff;
+ jsint ncases;
+ TableEntry *table;
+
+ sn = js_GetSrcNote(jp->script, pc);
+ LOCAL_ASSERT(sn && SN_TYPE(sn) == SRC_SWITCH);
+ len = js_GetSrcNoteOffset(sn, 0);
+ off = js_GetSrcNoteOffset(sn, 1);
+
+ /*
+ * Count the cases using offsets from switch to first case,
+ * and case to case, stored in srcnote immediates.
+ */
+ pc2 = pc;
+ off2 = off;
+ for (ncases = 0; off2 != 0; ncases++) {
+ pc2 += off2;
+ LOCAL_ASSERT(*pc2 == JSOP_CASE || *pc2 == JSOP_DEFAULT ||
+ *pc2 == JSOP_CASEX || *pc2 == JSOP_DEFAULTX);
+ if (*pc2 == JSOP_DEFAULT || *pc2 == JSOP_DEFAULTX) {
+ /* End of cases, but count default as a case. */
+ off2 = 0;
+ } else {
+ sn = js_GetSrcNote(jp->script, pc2);
+ LOCAL_ASSERT(sn && SN_TYPE(sn) == SRC_PCDELTA);
+ off2 = js_GetSrcNoteOffset(sn, 0);
+ }
+ }
+
+ /*
+ * Allocate table and rescan the cases using their srcnotes,
+ * stashing each case's delta from switch top in table[i].key,
+ * and the distance to its statements in table[i].offset.
+ */
+ table = (TableEntry *)
+ JS_malloc(cx, (size_t)ncases * sizeof *table);
+ if (!table)
+ return NULL;
+ pc2 = pc;
+ off2 = off;
+ for (i = 0; i < ncases; i++) {
+ pc2 += off2;
+ LOCAL_ASSERT(*pc2 == JSOP_CASE || *pc2 == JSOP_DEFAULT ||
+ *pc2 == JSOP_CASEX || *pc2 == JSOP_DEFAULTX);
+ caseOff = pc2 - pc;
+ table[i].key = INT_TO_JSVAL((jsint) caseOff);
+ table[i].offset = caseOff + GetJumpOffset(pc2, pc2);
+ if (*pc2 == JSOP_CASE || *pc2 == JSOP_CASEX) {
+ sn = js_GetSrcNote(jp->script, pc2);
+ LOCAL_ASSERT(sn && SN_TYPE(sn) == SRC_PCDELTA);
+ off2 = js_GetSrcNoteOffset(sn, 0);
+ }
+ }
+
+ /*
+ * Find offset of default code by fetching the default offset
+ * from the end of table. JSOP_CONDSWITCH always has a default
+ * case at the end.
+ */
+ off = JSVAL_TO_INT(table[ncases-1].key);
+ pc2 = pc + off;
+ off += GetJumpOffset(pc2, pc2);
+
+ ok = DecompileSwitch(ss, table, (uintN)ncases, pc, len, off,
+ JS_TRUE);
+ JS_free(cx, table);
+ if (!ok)
+ return NULL;
+ todo = -2;
+ break;
+ }
+
+ case JSOP_CASE:
+ case JSOP_CASEX:
+ {
+ lval = POP_STR();
+ if (!lval)
+ return NULL;
+ js_printf(jp, "\tcase %s:\n", lval);
+ todo = -2;
+ break;
+ }
+
+ case JSOP_NEW_EQ:
+ case JSOP_NEW_NE:
+ rval = POP_STR();
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s %c== %s",
+ lval, (op == JSOP_NEW_EQ) ? '=' : '!', rval);
+ break;
+
+ BEGIN_LITOPX_CASE(JSOP_CLOSURE)
+ LOCAL_ASSERT(ATOM_IS_OBJECT(atom));
+ todo = -2;
+ goto do_function;
+ END_LITOPX_CASE
+
+#if JS_HAS_EXPORT_IMPORT
+ case JSOP_EXPORTALL:
+ js_printf(jp, "\texport *;\n");
+ todo = -2;
+ break;
+
+ BEGIN_LITOPX_CASE(JSOP_EXPORTNAME)
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ js_printf(jp, "\texport %s;\n", rval);
+ todo = -2;
+ END_LITOPX_CASE
+
+ case JSOP_IMPORTALL:
+ lval = POP_STR();
+ js_printf(jp, "\timport %s.*;\n", lval);
+ todo = -2;
+ break;
+
+ case JSOP_IMPORTPROP:
+ do_importprop:
+ GET_ATOM_QUOTE_AND_FMT("\timport %s[%s];\n",
+ "\timport %s.%s;\n",
+ rval);
+ lval = POP_STR();
+ js_printf(jp, fmt, lval, rval);
+ todo = -2;
+ break;
+
+ case JSOP_IMPORTELEM:
+ xval = POP_STR();
+ op = JSOP_GETELEM;
+ if (js_CodeSpec[lastop].format & JOF_XMLNAME)
+ goto do_importprop;
+ lval = POP_STR();
+ js_printf(jp, "\timport %s[%s];\n", lval, xval);
+ todo = -2;
+ break;
+#endif /* JS_HAS_EXPORT_IMPORT */
+
+ case JSOP_TRAP:
+ op = JS_GetTrapOpcode(cx, jp->script, pc);
+ if (op == JSOP_LIMIT)
+ return NULL;
+ saveop = op;
+ *pc = op;
+ cs = &js_CodeSpec[op];
+ len = cs->length;
+ DECOMPILE_CODE(pc, len);
+ *pc = JSOP_TRAP;
+ todo = -2;
+ break;
+
+ case JSOP_NEWINIT:
+ {
+ JSBool isArray;
+
+ LOCAL_ASSERT(ss->top >= 2);
+ (void) PopOff(ss, op);
+ lval = POP_STR();
+ isArray = (*lval == 'A');
+ todo = ss->sprinter.offset;
+#if JS_HAS_SHARP_VARS
+ op = (JSOp)pc[len];
+ if (op == JSOP_DEFSHARP) {
+ pc += len;
+ cs = &js_CodeSpec[op];
+ len = cs->length;
+ i = (jsint) GET_ATOM_INDEX(pc);
+ if (Sprint(&ss->sprinter, "#%u=", (unsigned) i) < 0)
+ return NULL;
+ }
+#endif /* JS_HAS_SHARP_VARS */
+ if (isArray) {
+ ++ss->inArrayInit;
+ if (SprintCString(&ss->sprinter, "[") < 0)
+ return NULL;
+ } else {
+ if (SprintCString(&ss->sprinter, "{") < 0)
+ return NULL;
+ }
+ break;
+ }
+
+ case JSOP_ENDINIT:
+ op = JSOP_NOP; /* turn off parens */
+ rval = POP_STR();
+ sn = js_GetSrcNote(jp->script, pc);
+
+ /* Skip any #n= prefix to find the opening bracket. */
+ for (xval = rval; *xval != '[' && *xval != '{'; xval++)
+ continue;
+ if (*xval == '[')
+ --ss->inArrayInit;
+ todo = Sprint(&ss->sprinter, "%s%s%c",
+ rval,
+ (sn && SN_TYPE(sn) == SRC_CONTINUE) ? ", " : "",
+ (*xval == '[') ? ']' : '}');
+ break;
+
+ case JSOP_INITPROP:
+ atom = GET_ATOM(cx, jp->script, pc);
+ xval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom),
+ (jschar)
+ (ATOM_IS_IDENTIFIER(atom) ? 0 : '\''));
+ if (!xval)
+ return NULL;
+ rval = POP_STR();
+ lval = POP_STR();
+ do_initprop:
+#ifdef OLD_GETTER_SETTER
+ todo = Sprint(&ss->sprinter, "%s%s%s%s%s:%s",
+ lval,
+ (lval[1] != '\0') ? ", " : "",
+ xval,
+ (lastop == JSOP_GETTER || lastop == JSOP_SETTER)
+ ? " " : "",
+ (lastop == JSOP_GETTER) ? js_getter_str :
+ (lastop == JSOP_SETTER) ? js_setter_str :
+ "",
+ rval);
+#else
+ if (lastop == JSOP_GETTER || lastop == JSOP_SETTER) {
+ if (!atom || !ATOM_IS_STRING(atom) ||
+ !ATOM_IS_IDENTIFIER(atom) ||
+ ATOM_IS_KEYWORD(atom) ||
+ ((ss->opcodes[ss->top+1] != JSOP_ANONFUNOBJ ||
+ strncmp(rval, js_function_str, 8) != 0) &&
+ ss->opcodes[ss->top+1] != JSOP_NAMEDFUNOBJ)) {
+ todo = Sprint(&ss->sprinter, "%s%s%s%s%s:%s", lval,
+ (lval[1] != '\0') ? ", " : "", xval,
+ (lastop == JSOP_GETTER ||
+ lastop == JSOP_SETTER)
+ ? " " : "",
+ (lastop == JSOP_GETTER) ? js_getter_str :
+ (lastop == JSOP_SETTER) ? js_setter_str :
+ "",
+ rval);
+ } else {
+ rval += 8 + 1;
+ LOCAL_ASSERT(rval[strlen(rval)-1] == '}');
+ todo = Sprint(&ss->sprinter, "%s%s%s %s%s",
+ lval,
+ (lval[1] != '\0') ? ", " : "",
+ (lastop == JSOP_GETTER)
+ ? js_get_str : js_set_str,
+ xval,
+ rval);
+ }
+ } else {
+ todo = Sprint(&ss->sprinter, "%s%s%s:%s",
+ lval,
+ (lval[1] != '\0') ? ", " : "",
+ xval,
+ rval);
+ }
+#endif
+ break;
+
+ case JSOP_INITELEM:
+ rval = POP_STR();
+ xval = POP_STR();
+ lval = POP_STR();
+ sn = js_GetSrcNote(jp->script, pc);
+ if (sn && SN_TYPE(sn) == SRC_INITPROP) {
+ atom = NULL;
+ goto do_initprop;
+ }
+ todo = Sprint(&ss->sprinter, "%s%s%s",
+ lval,
+ (lval[1] != '\0' || *xval != '0') ? ", " : "",
+ rval);
+ break;
+
+#if JS_HAS_SHARP_VARS
+ case JSOP_DEFSHARP:
+ i = (jsint) GET_ATOM_INDEX(pc);
+ rval = POP_STR();
+ todo = Sprint(&ss->sprinter, "#%u=%s", (unsigned) i, rval);
+ break;
+
+ case JSOP_USESHARP:
+ i = (jsint) GET_ATOM_INDEX(pc);
+ todo = Sprint(&ss->sprinter, "#%u#", (unsigned) i);
+ break;
+#endif /* JS_HAS_SHARP_VARS */
+
+#if JS_HAS_DEBUGGER_KEYWORD
+ case JSOP_DEBUGGER:
+ js_printf(jp, "\tdebugger;\n");
+ todo = -2;
+ break;
+#endif /* JS_HAS_DEBUGGER_KEYWORD */
+
+#if JS_HAS_XML_SUPPORT
+ case JSOP_STARTXML:
+ case JSOP_STARTXMLEXPR:
+ inXML = op == JSOP_STARTXML;
+ todo = -2;
+ break;
+
+ case JSOP_DEFXMLNS:
+ rval = POP_STR();
+ js_printf(jp, "\t%s %s %s = %s;\n",
+ js_default_str, js_xml_str, js_namespace_str, rval);
+ todo = -2;
+ break;
+
+ case JSOP_ANYNAME:
+ if (pc[JSOP_ANYNAME_LENGTH] == JSOP_TOATTRNAME) {
+ len += JSOP_TOATTRNAME_LENGTH;
+ todo = SprintPut(&ss->sprinter, "@*", 2);
+ } else {
+ todo = SprintPut(&ss->sprinter, "*", 1);
+ }
+ break;
+
+ BEGIN_LITOPX_CASE(JSOP_QNAMEPART)
+ if (pc[JSOP_QNAMEPART_LENGTH] == JSOP_TOATTRNAME) {
+ saveop = JSOP_TOATTRNAME;
+ len += JSOP_TOATTRNAME_LENGTH;
+ lval = "@";
+ goto do_qname;
+ }
+ goto do_name;
+ END_LITOPX_CASE
+
+ BEGIN_LITOPX_CASE(JSOP_QNAMECONST)
+ rval = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0);
+ if (!rval)
+ return NULL;
+ RETRACT(&ss->sprinter, rval);
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s::%s", lval, rval);
+ END_LITOPX_CASE
+
+ case JSOP_QNAME:
+ rval = POP_STR();
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s::[%s]", lval, rval);
+ break;
+
+ case JSOP_TOATTRNAME:
+ op = JSOP_NOP; /* turn off parens */
+ rval = POP_STR();
+ todo = Sprint(&ss->sprinter, "@[%s]", rval);
+ break;
+
+ case JSOP_TOATTRVAL:
+ todo = -2;
+ break;
+
+ case JSOP_ADDATTRNAME:
+ rval = POP_STR();
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s %s", lval, rval);
+ /* This gets reset by all XML tag expressions. */
+ quoteAttr = JS_TRUE;
+ break;
+
+ case JSOP_ADDATTRVAL:
+ rval = POP_STR();
+ lval = POP_STR();
+ if (quoteAttr)
+ todo = Sprint(&ss->sprinter, "%s=\"%s\"", lval, rval);
+ else
+ todo = Sprint(&ss->sprinter, "%s=%s", lval, rval);
+ break;
+
+ case JSOP_BINDXMLNAME:
+ /* Leave the name stacked and push a dummy string. */
+ todo = Sprint(&ss->sprinter, "");
+ break;
+
+ case JSOP_SETXMLNAME:
+ /* Pop the r.h.s., the dummy string, and the name. */
+ rval = POP_STR();
+ (void) PopOff(ss, op);
+ lval = POP_STR();
+ goto do_setlval;
+
+ case JSOP_XMLELTEXPR:
+ case JSOP_XMLTAGEXPR:
+ todo = Sprint(&ss->sprinter, "{%s}", POP_STR());
+ inXML = JS_TRUE;
+ /* If we're an attribute value, we shouldn't quote this. */
+ quoteAttr = JS_FALSE;
+ break;
+
+ case JSOP_TOXMLLIST:
+ op = JSOP_NOP; /* turn off parens */
+ todo = Sprint(&ss->sprinter, "<>%s</>", POP_STR());
+ inXML = JS_FALSE;
+ break;
+
+ case JSOP_FOREACH:
+ foreach = JS_TRUE;
+ todo = -2;
+ break;
+
+ case JSOP_TOXML:
+ inXML = JS_FALSE;
+ /* FALL THROUGH */
+
+ case JSOP_XMLNAME:
+ case JSOP_FILTER:
+ /* Conversion and prefix ops do nothing in the decompiler. */
+ todo = -2;
+ break;
+
+ case JSOP_ENDFILTER:
+ rval = POP_STR();
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s.(%s)", lval, rval);
+ break;
+
+ case JSOP_DESCENDANTS:
+ rval = POP_STR();
+ lval = POP_STR();
+ todo = Sprint(&ss->sprinter, "%s..%s", lval, rval);
+ break;
+
+ BEGIN_LITOPX_CASE(JSOP_XMLOBJECT)
+ todo = Sprint(&ss->sprinter, "<xml address='%p'>",
+ ATOM_TO_OBJECT(atom));
+ END_LITOPX_CASE
+
+ BEGIN_LITOPX_CASE(JSOP_XMLCDATA)
+ todo = SprintPut(&ss->sprinter, "<![CDATA[", 9);
+ if (!QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0))
+ return NULL;
+ SprintPut(&ss->sprinter, "]]>", 3);
+ END_LITOPX_CASE
+
+ BEGIN_LITOPX_CASE(JSOP_XMLCOMMENT)
+ todo = SprintPut(&ss->sprinter, "<!--", 4);
+ if (!QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0))
+ return NULL;
+ SprintPut(&ss->sprinter, "-->", 3);
+ END_LITOPX_CASE
+
+ BEGIN_LITOPX_CASE(JSOP_XMLPI)
+ rval = JS_strdup(cx, POP_STR());
+ if (!rval)
+ return NULL;
+ todo = SprintPut(&ss->sprinter, "<?", 2);
+ ok = QuoteString(&ss->sprinter, ATOM_TO_STRING(atom), 0) &&
+ (*rval == '\0' ||
+ (SprintPut(&ss->sprinter, " ", 1) >= 0 &&
+ SprintCString(&ss->sprinter, rval)));
+ JS_free(cx, (char *)rval);
+ if (!ok)
+ return NULL;
+ SprintPut(&ss->sprinter, "?>", 2);
+ END_LITOPX_CASE
+
+ case JSOP_GETFUNNS:
+ todo = SprintPut(&ss->sprinter, js_function_str, 8);
+ break;
+#endif /* JS_HAS_XML_SUPPORT */
+
+ default:
+ todo = -2;
+ break;
+
+#undef BEGIN_LITOPX_CASE
+#undef END_LITOPX_CASE
+ }
+ }
+
+ if (todo < 0) {
+ /* -2 means "don't push", -1 means reported error. */
+ if (todo == -1)
+ return NULL;
+ } else {
+ if (!PushOff(ss, todo, saveop))
+ return NULL;
+ }
+ pc += len;
+ }
+
+/*
+ * Undefine local macros.
+ */
+#undef inXML
+#undef DECOMPILE_CODE
+#undef POP_STR
+#undef LOCAL_ASSERT
+#undef ATOM_IS_IDENTIFIER
+#undef GET_QUOTE_AND_FMT
+#undef GET_ATOM_QUOTE_AND_FMT
+
+ return pc;
+}
+
+static JSBool
+InitSprintStack(JSContext *cx, SprintStack *ss, JSPrinter *jp, uintN depth)
+{
+ size_t offsetsz, opcodesz;
+ void *space;
+
+ INIT_SPRINTER(cx, &ss->sprinter, &cx->tempPool, PAREN_SLOP);
+
+ /* Allocate the parallel (to avoid padding) offset and opcode stacks. */
+ offsetsz = depth * sizeof(ptrdiff_t);
+ opcodesz = depth * sizeof(jsbytecode);
+ JS_ARENA_ALLOCATE(space, &cx->tempPool, offsetsz + opcodesz);
+ if (!space)
+ return JS_FALSE;
+ ss->offsets = (ptrdiff_t *) space;
+ ss->opcodes = (jsbytecode *) ((char *)space + offsetsz);
+
+ ss->top = ss->inArrayInit = 0;
+ ss->printer = jp;
+ return JS_TRUE;
+}
+
+JSBool
+js_DecompileCode(JSPrinter *jp, JSScript *script, jsbytecode *pc, uintN len,
+ uintN pcdepth)
+{
+ uintN depth, i;
+ SprintStack ss;
+ JSContext *cx;
+ void *mark;
+ JSBool ok;
+ JSScript *oldscript;
+ char *last;
+
+ depth = script->depth;
+ JS_ASSERT(pcdepth <= depth);
+
+ /* Initialize a sprinter for use with the offset stack. */
+ cx = jp->sprinter.context;
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ ok = InitSprintStack(cx, &ss, jp, depth);
+ if (!ok)
+ goto out;
+
+ /*
+ * If we are called from js_DecompileValueGenerator with a portion of
+ * script's bytecode that starts with a non-zero model stack depth given
+ * by pcdepth, attempt to initialize the missing string offsets in ss to
+ * |spindex| negative indexes from fp->sp for the activation fp in which
+ * the error arose.
+ *
+ * See js_DecompileValueGenerator for how its |spindex| parameter is used,
+ * and see also GetOff, which makes use of the ss.offsets[i] < -1 that are
+ * potentially stored below.
+ */
+ ss.top = pcdepth;
+ if (pcdepth != 0) {
+ JSStackFrame *fp;
+ ptrdiff_t top;
+
+ for (fp = cx->fp; fp && !fp->script; fp = fp->down)
+ continue;
+ top = fp ? fp->sp - fp->spbase : 0;
+ for (i = 0; i < pcdepth; i++) {
+ ss.offsets[i] = -1;
+ ss.opcodes[i] = JSOP_NOP;
+ }
+ if (fp && fp->pc == pc && (uintN)top == pcdepth) {
+ for (i = 0; i < pcdepth; i++) {
+ ptrdiff_t off;
+ jsbytecode *genpc;
+
+ off = (intN)i - (intN)depth;
+ genpc = (jsbytecode *) fp->spbase[off];
+ if (JS_UPTRDIFF(genpc, script->code) < script->length) {
+ ss.offsets[i] += (ptrdiff_t)i - top;
+ ss.opcodes[i] = *genpc;
+ }
+ }
+ }
+ }
+
+ /* Call recursive subroutine to do the hard work. */
+ oldscript = jp->script;
+ jp->script = script;
+ ok = Decompile(&ss, pc, len) != NULL;
+ jp->script = oldscript;
+
+ /* If the given code didn't empty the stack, do it now. */
+ if (ss.top) {
+ do {
+ last = OFF2STR(&ss.sprinter, PopOff(&ss, JSOP_POP));
+ } while (ss.top > pcdepth);
+ js_printf(jp, "%s", last);
+ }
+
+out:
+ /* Free all temporary stuff allocated under this call. */
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ return ok;
+}
+
+JSBool
+js_DecompileScript(JSPrinter *jp, JSScript *script)
+{
+ return js_DecompileCode(jp, script, script->code, (uintN)script->length, 0);
+}
+
+static const char native_code_str[] = "\t[native code]\n";
+
+JSBool
+js_DecompileFunctionBody(JSPrinter *jp, JSFunction *fun)
+{
+ JSScript *script;
+ JSScope *scope, *save;
+ JSBool ok;
+
+ if (!FUN_INTERPRETED(fun)) {
+ js_printf(jp, native_code_str);
+ return JS_TRUE;
+ }
+ script = fun->u.i.script;
+ scope = fun->object ? OBJ_SCOPE(fun->object) : NULL;
+ save = jp->scope;
+ jp->scope = scope;
+ ok = js_DecompileCode(jp, script, script->code, (uintN)script->length, 0);
+ jp->scope = save;
+ return ok;
+}
+
+JSBool
+js_DecompileFunction(JSPrinter *jp, JSFunction *fun)
+{
+ JSContext *cx;
+ uintN i, nargs, indent;
+ void *mark;
+ JSAtom **params;
+ JSScope *scope, *oldscope;
+ JSScopeProperty *sprop;
+ jsbytecode *pc, *endpc;
+ ptrdiff_t len;
+ JSBool ok;
+
+ /*
+ * If pretty, conform to ECMA-262 Edition 3, 15.3.4.2, by decompiling a
+ * FunctionDeclaration. Otherwise, check the JSFUN_LAMBDA flag and force
+ * an expression by parenthesizing.
+ */
+ if (jp->pretty) {
+ js_printf(jp, "\t");
+ } else {
+ if (!jp->grouped && (fun->flags & JSFUN_LAMBDA))
+ js_puts(jp, "(");
+ }
+ if (JSFUN_GETTER_TEST(fun->flags))
+ js_printf(jp, "%s ", js_getter_str);
+ else if (JSFUN_SETTER_TEST(fun->flags))
+ js_printf(jp, "%s ", js_setter_str);
+
+ js_printf(jp, "%s ", js_function_str);
+ if (fun->atom && !QuoteString(&jp->sprinter, ATOM_TO_STRING(fun->atom), 0))
+ return JS_FALSE;
+ js_puts(jp, "(");
+
+ if (FUN_INTERPRETED(fun) && fun->object) {
+ size_t paramsize;
+#ifdef JS_HAS_DESTRUCTURING
+ SprintStack ss;
+ JSScript *oldscript;
+#endif
+
+ /*
+ * Print the parameters.
+ *
+ * This code is complicated by the need to handle duplicate parameter
+ * names, as required by ECMA (bah!). A duplicate parameter is stored
+ * as another node with the same id (the parameter name) but different
+ * shortid (the argument index) along the property tree ancestor line
+ * starting at SCOPE_LAST_PROP(scope). Only the last duplicate param
+ * is mapped by the scope's hash table.
+ */
+ cx = jp->sprinter.context;
+ nargs = fun->nargs;
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ paramsize = nargs * sizeof(JSAtom *);
+ JS_ARENA_ALLOCATE_CAST(params, JSAtom **, &cx->tempPool, paramsize);
+ if (!params) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ memset(params, 0, paramsize);
+ scope = OBJ_SCOPE(fun->object);
+ for (sprop = SCOPE_LAST_PROP(scope); sprop; sprop = sprop->parent) {
+ if (sprop->getter != js_GetArgument)
+ continue;
+ JS_ASSERT(sprop->flags & SPROP_HAS_SHORTID);
+ JS_ASSERT((uint16) sprop->shortid < nargs);
+ JS_ASSERT(JSID_IS_ATOM(sprop->id));
+ params[(uint16) sprop->shortid] = JSID_TO_ATOM(sprop->id);
+ }
+
+ pc = fun->u.i.script->main;
+ endpc = pc + fun->u.i.script->length;
+ ok = JS_TRUE;
+
+#ifdef JS_HAS_DESTRUCTURING
+ /* Skip JSOP_GENERATOR in case of destructuring parameters. */
+ if (*pc == JSOP_GENERATOR)
+ pc += JSOP_GENERATOR_LENGTH;
+
+ ss.printer = NULL;
+ oldscript = jp->script;
+ jp->script = fun->u.i.script;
+ oldscope = jp->scope;
+ jp->scope = scope;
+#endif
+
+ for (i = 0; i < nargs; i++) {
+ if (i > 0)
+ js_puts(jp, ", ");
+
+#if JS_HAS_DESTRUCTURING
+#define LOCAL_ASSERT(expr) LOCAL_ASSERT_RV(expr, JS_FALSE)
+
+ if (!params[i]) {
+ ptrdiff_t todo;
+ const char *lval;
+
+ LOCAL_ASSERT(*pc == JSOP_GETARG);
+ pc += JSOP_GETARG_LENGTH;
+ LOCAL_ASSERT(*pc == JSOP_DUP);
+ if (!ss.printer) {
+ ok = InitSprintStack(cx, &ss, jp, fun->u.i.script->depth);
+ if (!ok)
+ break;
+ }
+ pc = DecompileDestructuring(&ss, pc, endpc);
+ if (!pc) {
+ ok = JS_FALSE;
+ break;
+ }
+ LOCAL_ASSERT(*pc == JSOP_POP);
+ pc += JSOP_POP_LENGTH;
+ lval = PopStr(&ss, JSOP_NOP);
+ todo = SprintCString(&jp->sprinter, lval);
+ if (todo < 0) {
+ ok = JS_FALSE;
+ break;
+ }
+ continue;
+ }
+
+#undef LOCAL_ASSERT
+#endif
+
+ if (!QuoteString(&jp->sprinter, ATOM_TO_STRING(params[i]), 0)) {
+ ok = JS_FALSE;
+ break;
+ }
+ }
+
+#ifdef JS_HAS_DESTRUCTURING
+ jp->script = oldscript;
+ jp->scope = oldscope;
+#endif
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ if (!ok)
+ return JS_FALSE;
+#ifdef __GNUC__
+ } else {
+ scope = NULL;
+ pc = NULL;
+#endif
+ }
+
+ js_printf(jp, ") {\n");
+ indent = jp->indent;
+ jp->indent += 4;
+ if (FUN_INTERPRETED(fun) && fun->object) {
+ oldscope = jp->scope;
+ jp->scope = scope;
+ len = fun->u.i.script->code + fun->u.i.script->length - pc;
+ ok = js_DecompileCode(jp, fun->u.i.script, pc, (uintN)len, 0);
+ jp->scope = oldscope;
+ if (!ok) {
+ jp->indent = indent;
+ return JS_FALSE;
+ }
+ } else {
+ js_printf(jp, native_code_str);
+ }
+ jp->indent -= 4;
+ js_printf(jp, "\t}");
+
+ if (!jp->pretty) {
+ if (!jp->grouped && (fun->flags & JSFUN_LAMBDA))
+ js_puts(jp, ")");
+ }
+ return JS_TRUE;
+}
+
+#undef LOCAL_ASSERT_RV
+
+JSString *
+js_DecompileValueGenerator(JSContext *cx, intN spindex, jsval v,
+ JSString *fallback)
+{
+ JSStackFrame *fp, *down;
+ jsbytecode *pc, *begin, *end;
+ jsval *sp, *spbase, *base, *limit;
+ intN depth, pcdepth;
+ JSScript *script;
+ JSOp op;
+ const JSCodeSpec *cs;
+ jssrcnote *sn;
+ ptrdiff_t len, oplen;
+ JSPrinter *jp;
+ JSString *name;
+
+ for (fp = cx->fp; fp && !fp->script; fp = fp->down)
+ continue;
+ if (!fp)
+ goto do_fallback;
+
+ /* Try to find sp's generating pc depth slots under it on the stack. */
+ pc = fp->pc;
+ sp = fp->sp;
+ spbase = fp->spbase;
+ if ((uintN)(sp - spbase) > fp->script->depth) {
+ /*
+ * Preparing to make an internal invocation, using an argv stack
+ * segment pushed just above fp's operand stack space. Such an argv
+ * stack has no generating pc "basement", so we must fall back.
+ */
+ goto do_fallback;
+ }
+
+ if (spindex == JSDVG_SEARCH_STACK) {
+ if (!pc) {
+ /*
+ * Current frame is native: look under it for a scripted call
+ * in which a decompilable bytecode string that generated the
+ * value as an actual argument might exist.
+ */
+ JS_ASSERT(!fp->script && !(fp->fun && FUN_INTERPRETED(fp->fun)));
+ down = fp->down;
+ if (!down)
+ goto do_fallback;
+ script = down->script;
+ spbase = down->spbase;
+ base = fp->argv;
+ limit = base + fp->argc;
+ } else {
+ /*
+ * This should be a script activation, either a top-level
+ * script or a scripted function. But be paranoid about calls
+ * to js_DecompileValueGenerator from code that hasn't fully
+ * initialized a (default-all-zeroes) frame.
+ */
+ script = fp->script;
+ spbase = base = fp->spbase;
+ limit = fp->sp;
+ }
+
+ /*
+ * Pure paranoia about default-zeroed frames being active while
+ * js_DecompileValueGenerator is called. It can't hurt much now;
+ * error reporting performance is not an issue.
+ */
+ if (!script || !base || !limit)
+ goto do_fallback;
+
+ /*
+ * Try to find operand-generating pc depth slots below sp.
+ *
+ * In the native case, we know the arguments have generating pc's
+ * under them, on account of fp->down->script being non-null: all
+ * compiled scripts get depth slots for generating pc's allocated
+ * upon activation, at the top of js_Interpret.
+ *
+ * In the script or scripted function case, the same reasoning
+ * applies to fp rather than to fp->down.
+ *
+ * We search from limit to base to find the most recently calculated
+ * value matching v under assumption that it is it that caused
+ * exception, see bug 328664.
+ */
+ for (sp = limit;;) {
+ if (sp <= base)
+ goto do_fallback;
+ --sp;
+ if (*sp == v) {
+ depth = (intN)script->depth;
+ sp -= depth;
+ pc = (jsbytecode *) *sp;
+ break;
+ }
+ }
+ } else {
+ /*
+ * At this point, pc may or may not be null, i.e., we could be in
+ * a script activation, or we could be in a native frame that was
+ * called by another native function. Check pc and script.
+ */
+ if (!pc)
+ goto do_fallback;
+ script = fp->script;
+ if (!script)
+ goto do_fallback;
+
+ if (spindex != JSDVG_IGNORE_STACK) {
+ JS_ASSERT(spindex < 0);
+ depth = (intN)script->depth;
+#if !JS_HAS_NO_SUCH_METHOD
+ JS_ASSERT(-depth <= spindex);
+#endif
+ spindex -= depth;
+
+ base = (jsval *) cx->stackPool.current->base;
+ limit = (jsval *) cx->stackPool.current->avail;
+ sp = fp->sp + spindex;
+ if (JS_UPTRDIFF(sp, base) < JS_UPTRDIFF(limit, base))
+ pc = (jsbytecode *) *sp;
+ }
+ }
+
+ /*
+ * Again, be paranoid, this time about possibly loading an invalid pc
+ * from fp->sp[-(1+depth)].
+ */
+ if (JS_UPTRDIFF(pc, script->code) >= (jsuword)script->length) {
+ pc = fp->pc;
+ if (!pc)
+ goto do_fallback;
+ }
+ op = (JSOp) *pc;
+ if (op == JSOP_TRAP)
+ op = JS_GetTrapOpcode(cx, script, pc);
+
+ /* None of these stack-writing ops generates novel values. */
+ JS_ASSERT(op != JSOP_CASE && op != JSOP_CASEX &&
+ op != JSOP_DUP && op != JSOP_DUP2 &&
+ op != JSOP_SWAP);
+
+ /*
+ * |this| could convert to a very long object initialiser, so cite it by
+ * its keyword name instead.
+ */
+ if (op == JSOP_THIS)
+ return JS_NewStringCopyZ(cx, js_this_str);
+
+ /*
+ * JSOP_BINDNAME is special: it generates a value, the base object of a
+ * reference. But if it is the generating op for a diagnostic produced by
+ * js_DecompileValueGenerator, the name being bound is irrelevant. Just
+ * fall back to the base object.
+ */
+ if (op == JSOP_BINDNAME)
+ goto do_fallback;
+
+ /* NAME ops are self-contained, others require left or right context. */
+ cs = &js_CodeSpec[op];
+ begin = pc;
+ end = pc + cs->length;
+ if ((cs->format & JOF_MODEMASK) != JOF_NAME) {
+ JSSrcNoteType noteType;
+
+ sn = js_GetSrcNote(script, pc);
+ if (!sn)
+ goto do_fallback;
+ noteType = SN_TYPE(sn);
+ if (noteType == SRC_PCBASE) {
+ begin -= js_GetSrcNoteOffset(sn, 0);
+ } else if (noteType == SRC_PCDELTA) {
+ end = begin + js_GetSrcNoteOffset(sn, 0);
+ begin += cs->length;
+ } else {
+ goto do_fallback;
+ }
+ }
+ len = PTRDIFF(end, begin, jsbytecode);
+ if (len <= 0)
+ goto do_fallback;
+
+ /*
+ * Walk forward from script->main and compute starting stack depth.
+ * FIXME: Code to compute oplen copied from js_Disassemble1 and reduced.
+ * FIXME: Optimize to use last empty-stack sequence point.
+ */
+ pcdepth = 0;
+ for (pc = script->main; pc < begin; pc += oplen) {
+ jsbytecode *pc2;
+ uint32 type;
+ intN nuses, ndefs;
+
+ /* Let pc2 be non-null only for JSOP_LITOPX. */
+ pc2 = NULL;
+ op = (JSOp) *pc;
+ if (op == JSOP_TRAP)
+ op = JS_GetTrapOpcode(cx, script, pc);
+ cs = &js_CodeSpec[op];
+ oplen = cs->length;
+
+ if (op == JSOP_SETSP) {
+ pcdepth = GET_UINT16(pc);
+ continue;
+ }
+
+ /*
+ * A (C ? T : E) expression requires skipping either T (if begin is in
+ * E) or both T and E (if begin is after the whole expression) before
+ * adjusting pcdepth based on the JSOP_IFEQ or JSOP_IFEQX at pc that
+ * tests condition C. We know that the stack depth can't change from
+ * what it was with C on top of stack.
+ */
+ sn = js_GetSrcNote(script, pc);
+ if (sn && SN_TYPE(sn) == SRC_COND) {
+ ptrdiff_t jmpoff, jmplen;
+
+ jmpoff = js_GetSrcNoteOffset(sn, 0);
+ if (pc + jmpoff < begin) {
+ pc += jmpoff;
+ op = *pc;
+ JS_ASSERT(op == JSOP_GOTO || op == JSOP_GOTOX);
+ cs = &js_CodeSpec[op];
+ oplen = cs->length;
+ jmplen = GetJumpOffset(pc, pc);
+ if (pc + jmplen < begin) {
+ oplen = (uintN) jmplen;
+ continue;
+ }
+
+ /*
+ * Ok, begin lies in E. Manually pop C off the model stack,
+ * since we have moved beyond the IFEQ now.
+ */
+ --pcdepth;
+ }
+ }
+
+ type = cs->format & JOF_TYPEMASK;
+ switch (type) {
+ case JOF_TABLESWITCH:
+ case JOF_TABLESWITCHX:
+ {
+ jsint jmplen, i, low, high;
+
+ jmplen = (type == JOF_TABLESWITCH) ? JUMP_OFFSET_LEN
+ : JUMPX_OFFSET_LEN;
+ pc2 = pc;
+ pc2 += jmplen;
+ low = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ high = GET_JUMP_OFFSET(pc2);
+ pc2 += JUMP_OFFSET_LEN;
+ for (i = low; i <= high; i++)
+ pc2 += jmplen;
+ oplen = 1 + pc2 - pc;
+ break;
+ }
+
+ case JOF_LOOKUPSWITCH:
+ case JOF_LOOKUPSWITCHX:
+ {
+ jsint jmplen;
+ jsbytecode *pc2;
+ jsatomid npairs;
+
+ jmplen = (type == JOF_LOOKUPSWITCH) ? JUMP_OFFSET_LEN
+ : JUMPX_OFFSET_LEN;
+ pc2 = pc;
+ pc2 += jmplen;
+ npairs = GET_ATOM_INDEX(pc2);
+ pc2 += ATOM_INDEX_LEN;
+ while (npairs) {
+ pc2 += ATOM_INDEX_LEN;
+ pc2 += jmplen;
+ npairs--;
+ }
+ oplen = 1 + pc2 - pc;
+ break;
+ }
+
+ case JOF_LITOPX:
+ pc2 = pc + 1 + LITERAL_INDEX_LEN;
+ op = *pc2;
+ cs = &js_CodeSpec[op];
+ JS_ASSERT(cs->length > ATOM_INDEX_LEN);
+ oplen += cs->length - (1 + ATOM_INDEX_LEN);
+ break;
+
+ default:;
+ }
+
+ if (sn && SN_TYPE(sn) == SRC_HIDDEN)
+ continue;
+
+ nuses = cs->nuses;
+ if (nuses < 0) {
+ /* Call opcode pushes [callee, this, argv...]. */
+ nuses = 2 + GET_ARGC(pc);
+ } else if (op == JSOP_RETSUB) {
+ /* Pop [exception or hole, retsub pc-index]. */
+ JS_ASSERT(nuses == 0);
+ nuses = 2;
+ } else if (op == JSOP_LEAVEBLOCK || op == JSOP_LEAVEBLOCKEXPR) {
+ JS_ASSERT(nuses == 0);
+ nuses = GET_UINT16(pc);
+ }
+ pcdepth -= nuses;
+ JS_ASSERT(pcdepth >= 0);
+
+ ndefs = cs->ndefs;
+ if (op == JSOP_FINALLY) {
+ /* Push [exception or hole, retsub pc-index]. */
+ JS_ASSERT(ndefs == 0);
+ ndefs = 2;
+ } else if (op == JSOP_ENTERBLOCK) {
+ jsatomid atomIndex;
+ JSAtom *atom;
+ JSObject *obj;
+
+ JS_ASSERT(ndefs == 0);
+ atomIndex = pc2 ? GET_LITERAL_INDEX(pc) : GET_ATOM_INDEX(pc);
+ atom = js_GetAtom(cx, &script->atomMap, atomIndex);
+ obj = ATOM_TO_OBJECT(atom);
+ JS_ASSERT(OBJ_BLOCK_DEPTH(cx, obj) == pcdepth);
+ ndefs = OBJ_BLOCK_COUNT(cx, obj);
+ }
+ pcdepth += ndefs;
+ }
+
+ name = NULL;
+ jp = js_NewPrinter(cx, "js_DecompileValueGenerator", 0, JS_FALSE);
+ if (jp) {
+ if (fp->fun && fp->fun->object) {
+ JS_ASSERT(OBJ_IS_NATIVE(fp->fun->object));
+ jp->scope = OBJ_SCOPE(fp->fun->object);
+ }
+ jp->dvgfence = end;
+ if (js_DecompileCode(jp, script, begin, (uintN)len, (uintN)pcdepth))
+ name = js_GetPrinterOutput(jp);
+ js_DestroyPrinter(jp);
+ }
+ return name;
+
+ do_fallback:
+ return fallback ? fallback : js_ValueToSource(cx, v);
+}
diff --git a/src/third_party/js-1.7/jsopcode.h b/src/third_party/js-1.7/jsopcode.h
new file mode 100644
index 00000000000..3f7e1de9d5c
--- /dev/null
+++ b/src/third_party/js-1.7/jsopcode.h
@@ -0,0 +1,318 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsopcode_h___
+#define jsopcode_h___
+/*
+ * JS bytecode definitions.
+ */
+#include <stddef.h>
+#include "jsprvtd.h"
+#include "jspubtd.h"
+#include "jsutil.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * JS operation bytecodes.
+ */
+typedef enum JSOp {
+#define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
+ op = val,
+#include "jsopcode.tbl"
+#undef OPDEF
+ JSOP_LIMIT
+} JSOp;
+
+typedef enum JSOpLength {
+#define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
+ op##_LENGTH = length,
+#include "jsopcode.tbl"
+#undef OPDEF
+ JSOP_LIMIT_LENGTH
+} JSOpLength;
+
+/*
+ * JS bytecode formats.
+ */
+#define JOF_BYTE 0 /* single bytecode, no immediates */
+#define JOF_JUMP 1 /* signed 16-bit jump offset immediate */
+#define JOF_CONST 2 /* unsigned 16-bit constant pool index */
+#define JOF_UINT16 3 /* unsigned 16-bit immediate operand */
+#define JOF_TABLESWITCH 4 /* table switch */
+#define JOF_LOOKUPSWITCH 5 /* lookup switch */
+#define JOF_QARG 6 /* quickened get/set function argument ops */
+#define JOF_QVAR 7 /* quickened get/set local variable ops */
+#define JOF_INDEXCONST 8 /* uint16 slot index + constant pool index */
+#define JOF_JUMPX 9 /* signed 32-bit jump offset immediate */
+#define JOF_TABLESWITCHX 10 /* extended (32-bit offset) table switch */
+#define JOF_LOOKUPSWITCHX 11 /* extended (32-bit offset) lookup switch */
+#define JOF_UINT24 12 /* extended unsigned 24-bit literal (index) */
+#define JOF_LITOPX 13 /* JOF_UINT24 followed by op being extended,
+ where op if JOF_CONST has no unsigned 16-
+ bit immediate operand */
+#define JOF_LOCAL 14 /* block-local operand stack variable */
+#define JOF_TYPEMASK 0x000f /* mask for above immediate types */
+#define JOF_NAME 0x0010 /* name operation */
+#define JOF_PROP 0x0020 /* obj.prop operation */
+#define JOF_ELEM 0x0030 /* obj[index] operation */
+#define JOF_MODEMASK 0x0030 /* mask for above addressing modes */
+#define JOF_SET 0x0040 /* set (i.e., assignment) operation */
+#define JOF_DEL 0x0080 /* delete operation */
+#define JOF_DEC 0x0100 /* decrement (--, not ++) opcode */
+#define JOF_INC 0x0200 /* increment (++, not --) opcode */
+#define JOF_INCDEC 0x0300 /* increment or decrement opcode */
+#define JOF_POST 0x0400 /* postorder increment or decrement */
+#define JOF_IMPORT 0x0800 /* import property op */
+#define JOF_FOR 0x1000 /* for-in property op */
+#define JOF_ASSIGNING JOF_SET /* hint for JSClass.resolve, used for ops
+ that do simplex assignment */
+#define JOF_DETECTING 0x2000 /* object detection flag for JSNewResolveOp */
+#define JOF_BACKPATCH 0x4000 /* backpatch placeholder during codegen */
+#define JOF_LEFTASSOC 0x8000 /* left-associative operator */
+#define JOF_DECLARING 0x10000 /* var, const, or function declaration op */
+#define JOF_XMLNAME 0x20000 /* XML name: *, a::b, @a, @a::b, etc. */
+
+#define JOF_TYPE_IS_EXTENDED_JUMP(t) \
+ ((unsigned)((t) - JOF_JUMPX) <= (unsigned)(JOF_LOOKUPSWITCHX - JOF_JUMPX))
+
+/*
+ * Immediate operand getters, setters, and bounds.
+ */
+
+/* Short (2-byte signed offset) relative jump macros. */
+#define JUMP_OFFSET_LEN 2
+#define JUMP_OFFSET_HI(off) ((jsbytecode)((off) >> 8))
+#define JUMP_OFFSET_LO(off) ((jsbytecode)(off))
+#define GET_JUMP_OFFSET(pc) ((int16)(((pc)[1] << 8) | (pc)[2]))
+#define SET_JUMP_OFFSET(pc,off) ((pc)[1] = JUMP_OFFSET_HI(off), \
+ (pc)[2] = JUMP_OFFSET_LO(off))
+#define JUMP_OFFSET_MIN ((int16)0x8000)
+#define JUMP_OFFSET_MAX ((int16)0x7fff)
+
+/*
+ * When a short jump won't hold a relative offset, its 2-byte immediate offset
+ * operand is an unsigned index of a span-dependency record, maintained until
+ * code generation finishes -- after which some (but we hope not nearly all)
+ * span-dependent jumps must be extended (see OptimizeSpanDeps in jsemit.c).
+ *
+ * If the span-dependency record index overflows SPANDEP_INDEX_MAX, the jump
+ * offset will contain SPANDEP_INDEX_HUGE, indicating that the record must be
+ * found (via binary search) by its "before span-dependency optimization" pc
+ * offset (from script main entry point).
+ */
+#define GET_SPANDEP_INDEX(pc) ((uint16)(((pc)[1] << 8) | (pc)[2]))
+#define SET_SPANDEP_INDEX(pc,i) ((pc)[1] = JUMP_OFFSET_HI(i), \
+ (pc)[2] = JUMP_OFFSET_LO(i))
+#define SPANDEP_INDEX_MAX ((uint16)0xfffe)
+#define SPANDEP_INDEX_HUGE ((uint16)0xffff)
+
+/* Ultimately, if short jumps won't do, emit long (4-byte signed) offsets. */
+#define JUMPX_OFFSET_LEN 4
+#define JUMPX_OFFSET_B3(off) ((jsbytecode)((off) >> 24))
+#define JUMPX_OFFSET_B2(off) ((jsbytecode)((off) >> 16))
+#define JUMPX_OFFSET_B1(off) ((jsbytecode)((off) >> 8))
+#define JUMPX_OFFSET_B0(off) ((jsbytecode)(off))
+#define GET_JUMPX_OFFSET(pc) ((int32)(((pc)[1] << 24) | ((pc)[2] << 16) \
+ | ((pc)[3] << 8) | (pc)[4]))
+#define SET_JUMPX_OFFSET(pc,off)((pc)[1] = JUMPX_OFFSET_B3(off), \
+ (pc)[2] = JUMPX_OFFSET_B2(off), \
+ (pc)[3] = JUMPX_OFFSET_B1(off), \
+ (pc)[4] = JUMPX_OFFSET_B0(off))
+#define JUMPX_OFFSET_MIN ((int32)0x80000000)
+#define JUMPX_OFFSET_MAX ((int32)0x7fffffff)
+
+/*
+ * A literal is indexed by a per-script atom map. Most scripts have relatively
+ * few literals, so the standard JOF_CONST format specifies a fixed 16 bits of
+ * immediate operand index. A script with more than 64K literals must push all
+ * high-indexed literals on the stack using JSOP_LITERAL, then use JOF_ELEM ops
+ * instead of JOF_PROP, etc.
+ */
+#define ATOM_INDEX_LEN 2
+#define ATOM_INDEX_HI(i) ((jsbytecode)((i) >> 8))
+#define ATOM_INDEX_LO(i) ((jsbytecode)(i))
+#define GET_ATOM_INDEX(pc) ((jsatomid)(((pc)[1] << 8) | (pc)[2]))
+#define SET_ATOM_INDEX(pc,i) ((pc)[1] = ATOM_INDEX_HI(i), \
+ (pc)[2] = ATOM_INDEX_LO(i))
+#define GET_ATOM(cx,script,pc) js_GetAtom((cx), &(script)->atomMap, \
+ GET_ATOM_INDEX(pc))
+
+/* A full atom index for JSOP_UINT24 uses 24 bits of immediate operand. */
+#define UINT24_HI(i) ((jsbytecode)((i) >> 16))
+#define UINT24_MID(i) ((jsbytecode)((i) >> 8))
+#define UINT24_LO(i) ((jsbytecode)(i))
+#define GET_UINT24(pc) ((jsatomid)(((pc)[1] << 16) | \
+ ((pc)[2] << 8) | \
+ (pc)[3]))
+#define SET_UINT24(pc,i) ((pc)[1] = UINT24_HI(i), \
+ (pc)[2] = UINT24_MID(i), \
+ (pc)[3] = UINT24_LO(i))
+
+/* Same format for JSOP_LITERAL, etc., but future-proof with different names. */
+#define LITERAL_INDEX_LEN 3
+#define LITERAL_INDEX_HI(i) UINT24_HI(i)
+#define LITERAL_INDEX_MID(i) UINT24_MID(i)
+#define LITERAL_INDEX_LO(i) UINT24_LO(i)
+#define GET_LITERAL_INDEX(pc) GET_UINT24(pc)
+#define SET_LITERAL_INDEX(pc,i) SET_UINT24(pc,i)
+
+/* Atom index limit is determined by SN_3BYTE_OFFSET_FLAG, see jsemit.h. */
+#define ATOM_INDEX_LIMIT_LOG2 23
+#define ATOM_INDEX_LIMIT ((uint32)1 << ATOM_INDEX_LIMIT_LOG2)
+
+JS_STATIC_ASSERT(sizeof(jsatomid) * JS_BITS_PER_BYTE >=
+ ATOM_INDEX_LIMIT_LOG2 + 1);
+
+/* Common uint16 immediate format helpers. */
+#define UINT16_HI(i) ((jsbytecode)((i) >> 8))
+#define UINT16_LO(i) ((jsbytecode)(i))
+#define GET_UINT16(pc) ((uintN)(((pc)[1] << 8) | (pc)[2]))
+#define SET_UINT16(pc,i) ((pc)[1] = UINT16_HI(i), (pc)[2] = UINT16_LO(i))
+#define UINT16_LIMIT ((uintN)1 << 16)
+
+/* Actual argument count operand format helpers. */
+#define ARGC_HI(argc) UINT16_HI(argc)
+#define ARGC_LO(argc) UINT16_LO(argc)
+#define GET_ARGC(pc) GET_UINT16(pc)
+#define ARGC_LIMIT UINT16_LIMIT
+
+/* Synonyms for quick JOF_QARG and JOF_QVAR bytecodes. */
+#define GET_ARGNO(pc) GET_UINT16(pc)
+#define SET_ARGNO(pc,argno) SET_UINT16(pc,argno)
+#define ARGNO_LEN 2
+#define ARGNO_LIMIT UINT16_LIMIT
+
+#define GET_VARNO(pc) GET_UINT16(pc)
+#define SET_VARNO(pc,varno) SET_UINT16(pc,varno)
+#define VARNO_LEN 2
+#define VARNO_LIMIT UINT16_LIMIT
+
+struct JSCodeSpec {
+ const char *name; /* JS bytecode name */
+ const char *token; /* JS source literal or null */
+ int8 length; /* length including opcode byte */
+ int8 nuses; /* arity, -1 if variadic */
+ int8 ndefs; /* number of stack results */
+ uint8 prec; /* operator precedence */
+ uint32 format; /* immediate operand format */
+};
+
+extern const JSCodeSpec js_CodeSpec[];
+extern uintN js_NumCodeSpecs;
+extern const jschar js_EscapeMap[];
+
+/*
+ * Return a GC'ed string containing the chars in str, with any non-printing
+ * chars or quotes (' or " as specified by the quote argument) escaped, and
+ * with the quote character at the beginning and end of the result string.
+ */
+extern JSString *
+js_QuoteString(JSContext *cx, JSString *str, jschar quote);
+
+/*
+ * JSPrinter operations, for printf style message formatting. The return
+ * value from js_GetPrinterOutput() is the printer's cumulative output, in
+ * a GC'ed string.
+ */
+extern JSPrinter *
+js_NewPrinter(JSContext *cx, const char *name, uintN indent, JSBool pretty);
+
+extern void
+js_DestroyPrinter(JSPrinter *jp);
+
+extern JSString *
+js_GetPrinterOutput(JSPrinter *jp);
+
+extern int
+js_printf(JSPrinter *jp, const char *format, ...);
+
+extern JSBool
+js_puts(JSPrinter *jp, const char *s);
+
+#ifdef DEBUG
+/*
+ * Disassemblers, for debugging only.
+ */
+#include <stdio.h>
+
+extern JS_FRIEND_API(JSBool)
+js_Disassemble(JSContext *cx, JSScript *script, JSBool lines, FILE *fp);
+
+extern JS_FRIEND_API(uintN)
+js_Disassemble1(JSContext *cx, JSScript *script, jsbytecode *pc, uintN loc,
+ JSBool lines, FILE *fp);
+#endif /* DEBUG */
+
+/*
+ * Decompilers, for script, function, and expression pretty-printing.
+ */
+extern JSBool
+js_DecompileCode(JSPrinter *jp, JSScript *script, jsbytecode *pc, uintN len,
+ uintN pcdepth);
+
+extern JSBool
+js_DecompileScript(JSPrinter *jp, JSScript *script);
+
+extern JSBool
+js_DecompileFunctionBody(JSPrinter *jp, JSFunction *fun);
+
+extern JSBool
+js_DecompileFunction(JSPrinter *jp, JSFunction *fun);
+
+/*
+ * Find the source expression that resulted in v, and return a new string
+ * containing it. Fall back on v's string conversion (fallback) if we can't
+ * find the bytecode that generated and pushed v on the operand stack.
+ *
+ * Search the current stack frame if spindex is JSDVG_SEARCH_STACK. Don't
+ * look for v on the stack if spindex is JSDVG_IGNORE_STACK. Otherwise,
+ * spindex is the negative index of v, measured from cx->fp->sp, or from a
+ * lower frame's sp if cx->fp is native.
+ */
+extern JSString *
+js_DecompileValueGenerator(JSContext *cx, intN spindex, jsval v,
+ JSString *fallback);
+
+#define JSDVG_IGNORE_STACK 0
+#define JSDVG_SEARCH_STACK 1
+
+JS_END_EXTERN_C
+
+#endif /* jsopcode_h___ */
diff --git a/src/third_party/js-1.7/jsopcode.tbl b/src/third_party/js-1.7/jsopcode.tbl
new file mode 100644
index 00000000000..4a4ca89830d
--- /dev/null
+++ b/src/third_party/js-1.7/jsopcode.tbl
@@ -0,0 +1,478 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=0 ft=C:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JavaScript operation bytecodes. If you need to allocate a bytecode, look
+ * for a name of the form JSOP_UNUSED* and claim it. Otherwise, always add at
+ * the end of the table.
+ *
+ * Includers must define an OPDEF macro of the following form:
+ *
+ * #define OPDEF(op,val,name,image,length,nuses,ndefs,prec,format) ...
+ *
+ * Selected arguments can be expanded in initializers. The op argument is
+ * expanded followed by comma in the JSOp enum (jsopcode.h), e.g. The value
+ * field must be dense for now, because jsopcode.c uses an OPDEF() expansion
+ * inside the js_CodeSpec[] initializer.
+ *
+ * Field Description
+ * op Bytecode name, which is the JSOp enumerator name
+ * value Bytecode value, which is the JSOp enumerator value
+ * name C string containing name for disassembler
+ * image C string containing "image" for pretty-printer, null if ugly
+ * length Number of bytes including any immediate operands
+ * nuses Number of stack slots consumed by bytecode, -1 if variadic
+ * ndefs Number of stack slots produced by bytecode
+ * prec Operator precedence, zero if not an operator
+ * format Bytecode plus immediate operand encoding format
+ *
+ * Precedence Operators Opcodes
+ * 1 let (x = y) z, w JSOP_LEAVEBLOCKEXPR
+ * 2 , JSOP_POP with SRC_PCDELTA note
+ * 3 =, +=, etc. JSOP_SETNAME, etc. (all JOF_ASSIGNING)
+ * 4 ?: JSOP_IFEQ, JSOP_IFEQX
+ * 5 || JSOP_OR, JSOP_ORX
+ * 6 && JSOP_AND, JSOP_ANDX
+ * 7 | JSOP_BITOR
+ * 8 ^ JSOP_BITXOR
+ * 9 & JSOP_BITAND
+ * 10 ==, !=, etc. JSOP_EQ, JSOP_NE, etc.
+ * 11 <, in, etc. JSOP_LT, JSOP_IN, etc.
+ * 12 <<, >>, >>> JSOP_LSH, JSOP_RSH, JSOP_URSH
+ * 13 +, -, etc. JSOP_ADD, JSOP_SUB, etc.
+ * 14 *, /, % JSOP_MUL, JSOP_DIV, JSOP_MOD
+ * 15 !, ~, etc. JSOP_NOT, JSOP_BITNOT, etc.
+ * 16 0, function(){} etc. JSOP_ZERO, JSOP_ANONFUNOBJ, etc.
+ * 17 delete, new JSOP_DEL*, JSOP_NEW
+ * 18 x.y, f(), etc. JSOP_GETPROP, JSOP_CALL, etc.
+ * 19 x, null, etc. JSOP_NAME, JSOP_NULL, etc.
+ *
+ * The push-numeric-constant operators, JSOP_ZERO, JSOP_NUMBER, etc., have
+ * lower precedence than the member operators emitted for the . operator, to
+ * cause the decompiler to parenthesize the . left operand, e.g. (0).foo.
+ * Otherwise the . could be taken as a decimal point. We use the same level
+ * 16 for function expressions too, to force parenthesization.
+ *
+ * This file is best viewed with 128 columns:
+12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678
+ */
+
+/* legend: op val name image len use def prec format */
+
+/* Longstanding JavaScript bytecodes. */
+OPDEF(JSOP_NOP, 0, "nop", NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_PUSH, 1, "push", NULL, 1, 0, 1, 0, JOF_BYTE)
+OPDEF(JSOP_POPV, 2, "popv", NULL, 1, 1, 0, 2, JOF_BYTE)
+OPDEF(JSOP_ENTERWITH, 3, "enterwith", NULL, 1, 1, 1, 0, JOF_BYTE)
+OPDEF(JSOP_LEAVEWITH, 4, "leavewith", NULL, 1, 1, 0, 0, JOF_BYTE)
+OPDEF(JSOP_RETURN, 5, "return", NULL, 1, 1, 0, 0, JOF_BYTE)
+OPDEF(JSOP_GOTO, 6, "goto", NULL, 3, 0, 0, 0, JOF_JUMP)
+OPDEF(JSOP_IFEQ, 7, "ifeq", NULL, 3, 1, 0, 4, JOF_JUMP|JOF_DETECTING)
+OPDEF(JSOP_IFNE, 8, "ifne", NULL, 3, 1, 0, 0, JOF_JUMP)
+
+/* Get the arguments object for the current, lightweight function activation. */
+OPDEF(JSOP_ARGUMENTS, 9, js_arguments_str, js_arguments_str, 1, 0, 1, 18, JOF_BYTE)
+
+/* ECMA-compliant for-in loop with argument or local variable loop control. */
+OPDEF(JSOP_FORARG, 10, "forarg", NULL, 3, 0, 1, 19, JOF_QARG|JOF_NAME|JOF_FOR)
+OPDEF(JSOP_FORVAR, 11, "forvar", NULL, 3, 0, 1, 19, JOF_QVAR|JOF_NAME|JOF_FOR)
+
+/* More longstanding bytecodes. */
+OPDEF(JSOP_DUP, 12, "dup", NULL, 1, 1, 2, 0, JOF_BYTE)
+OPDEF(JSOP_DUP2, 13, "dup2", NULL, 1, 2, 4, 0, JOF_BYTE)
+OPDEF(JSOP_SETCONST, 14, "setconst", NULL, 3, 1, 1, 3, JOF_CONST|JOF_NAME|JOF_SET|JOF_ASSIGNING)
+OPDEF(JSOP_BITOR, 15, "bitor", "|", 1, 2, 1, 7, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_BITXOR, 16, "bitxor", "^", 1, 2, 1, 8, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_BITAND, 17, "bitand", "&", 1, 2, 1, 9, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_EQ, 18, "eq", "==", 1, 2, 1, 10, JOF_BYTE|JOF_LEFTASSOC|JOF_DETECTING)
+OPDEF(JSOP_NE, 19, "ne", "!=", 1, 2, 1, 10, JOF_BYTE|JOF_LEFTASSOC|JOF_DETECTING)
+OPDEF(JSOP_LT, 20, "lt", "<", 1, 2, 1, 11, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_LE, 21, "le", "<=", 1, 2, 1, 11, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_GT, 22, "gt", ">", 1, 2, 1, 11, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_GE, 23, "ge", ">=", 1, 2, 1, 11, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_LSH, 24, "lsh", "<<", 1, 2, 1, 12, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_RSH, 25, "rsh", ">>", 1, 2, 1, 12, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_URSH, 26, "ursh", ">>>", 1, 2, 1, 12, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_ADD, 27, "add", "+", 1, 2, 1, 13, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_SUB, 28, "sub", "-", 1, 2, 1, 13, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_MUL, 29, "mul", "*", 1, 2, 1, 14, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_DIV, 30, "div", "/", 1, 2, 1, 14, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_MOD, 31, "mod", "%", 1, 2, 1, 14, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_NOT, 32, "not", "!", 1, 1, 1, 15, JOF_BYTE|JOF_DETECTING)
+OPDEF(JSOP_BITNOT, 33, "bitnot", "~", 1, 1, 1, 15, JOF_BYTE)
+OPDEF(JSOP_NEG, 34, "neg", "- ", 1, 1, 1, 15, JOF_BYTE)
+OPDEF(JSOP_NEW, 35, js_new_str, NULL, 3, -1, 1, 17, JOF_UINT16)
+OPDEF(JSOP_DELNAME, 36, "delname", NULL, 3, 0, 1, 17, JOF_CONST|JOF_NAME|JOF_DEL)
+OPDEF(JSOP_DELPROP, 37, "delprop", NULL, 3, 1, 1, 17, JOF_CONST|JOF_PROP|JOF_DEL)
+OPDEF(JSOP_DELELEM, 38, "delelem", NULL, 1, 2, 1, 17, JOF_BYTE |JOF_ELEM|JOF_DEL)
+OPDEF(JSOP_TYPEOF, 39, js_typeof_str,NULL, 1, 1, 1, 15, JOF_BYTE|JOF_DETECTING)
+OPDEF(JSOP_VOID, 40, js_void_str, NULL, 1, 1, 1, 15, JOF_BYTE)
+OPDEF(JSOP_INCNAME, 41, "incname", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_INC)
+OPDEF(JSOP_INCPROP, 42, "incprop", NULL, 3, 1, 1, 15, JOF_CONST|JOF_PROP|JOF_INC)
+OPDEF(JSOP_INCELEM, 43, "incelem", NULL, 1, 2, 1, 15, JOF_BYTE |JOF_ELEM|JOF_INC)
+OPDEF(JSOP_DECNAME, 44, "decname", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_DEC)
+OPDEF(JSOP_DECPROP, 45, "decprop", NULL, 3, 1, 1, 15, JOF_CONST|JOF_PROP|JOF_DEC)
+OPDEF(JSOP_DECELEM, 46, "decelem", NULL, 1, 2, 1, 15, JOF_BYTE |JOF_ELEM|JOF_DEC)
+OPDEF(JSOP_NAMEINC, 47, "nameinc", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_INC|JOF_POST)
+OPDEF(JSOP_PROPINC, 48, "propinc", NULL, 3, 1, 1, 15, JOF_CONST|JOF_PROP|JOF_INC|JOF_POST)
+OPDEF(JSOP_ELEMINC, 49, "eleminc", NULL, 1, 2, 1, 15, JOF_BYTE |JOF_ELEM|JOF_INC|JOF_POST)
+OPDEF(JSOP_NAMEDEC, 50, "namedec", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_DEC|JOF_POST)
+OPDEF(JSOP_PROPDEC, 51, "propdec", NULL, 3, 1, 1, 15, JOF_CONST|JOF_PROP|JOF_DEC|JOF_POST)
+OPDEF(JSOP_ELEMDEC, 52, "elemdec", NULL, 1, 2, 1, 15, JOF_BYTE |JOF_ELEM|JOF_DEC|JOF_POST)
+OPDEF(JSOP_GETPROP, 53, "getprop", NULL, 3, 1, 1, 18, JOF_CONST|JOF_PROP)
+OPDEF(JSOP_SETPROP, 54, "setprop", NULL, 3, 2, 1, 3, JOF_CONST|JOF_PROP|JOF_SET|JOF_ASSIGNING|JOF_DETECTING)
+OPDEF(JSOP_GETELEM, 55, "getelem", NULL, 1, 2, 1, 18, JOF_BYTE |JOF_ELEM|JOF_LEFTASSOC)
+OPDEF(JSOP_SETELEM, 56, "setelem", NULL, 1, 3, 1, 3, JOF_BYTE |JOF_ELEM|JOF_SET|JOF_ASSIGNING|JOF_DETECTING)
+OPDEF(JSOP_PUSHOBJ, 57, "pushobj", NULL, 1, 0, 1, 0, JOF_BYTE)
+OPDEF(JSOP_CALL, 58, "call", NULL, 3, -1, 1, 18, JOF_UINT16)
+OPDEF(JSOP_NAME, 59, "name", NULL, 3, 0, 1, 19, JOF_CONST|JOF_NAME)
+OPDEF(JSOP_NUMBER, 60, "number", NULL, 3, 0, 1, 16, JOF_CONST)
+OPDEF(JSOP_STRING, 61, "string", NULL, 3, 0, 1, 19, JOF_CONST)
+OPDEF(JSOP_ZERO, 62, "zero", "0", 1, 0, 1, 16, JOF_BYTE)
+OPDEF(JSOP_ONE, 63, "one", "1", 1, 0, 1, 16, JOF_BYTE)
+OPDEF(JSOP_NULL, 64, js_null_str, js_null_str, 1, 0, 1, 19, JOF_BYTE)
+OPDEF(JSOP_THIS, 65, js_this_str, js_this_str, 1, 0, 1, 19, JOF_BYTE)
+OPDEF(JSOP_FALSE, 66, js_false_str, js_false_str, 1, 0, 1, 19, JOF_BYTE)
+OPDEF(JSOP_TRUE, 67, js_true_str, js_true_str, 1, 0, 1, 19, JOF_BYTE)
+OPDEF(JSOP_OR, 68, "or", NULL, 3, 1, 0, 5, JOF_JUMP|JOF_DETECTING)
+OPDEF(JSOP_AND, 69, "and", NULL, 3, 1, 0, 6, JOF_JUMP|JOF_DETECTING)
+
+/* The switch bytecodes have variable length. */
+OPDEF(JSOP_TABLESWITCH, 70, "tableswitch", NULL, -1, 1, 0, 0, JOF_TABLESWITCH|JOF_DETECTING)
+OPDEF(JSOP_LOOKUPSWITCH, 71, "lookupswitch", NULL, -1, 1, 0, 0, JOF_LOOKUPSWITCH|JOF_DETECTING)
+
+/* New, infallible/transitive identity ops. */
+OPDEF(JSOP_NEW_EQ, 72, "eq", NULL, 1, 2, 1, 10, JOF_BYTE|JOF_DETECTING)
+OPDEF(JSOP_NEW_NE, 73, "ne", NULL, 1, 2, 1, 10, JOF_BYTE|JOF_DETECTING)
+
+/* Lexical closure constructor. */
+OPDEF(JSOP_CLOSURE, 74, "closure", NULL, 3, 0, 0, 0, JOF_CONST)
+
+/* Export and import ops. */
+OPDEF(JSOP_EXPORTALL, 75, "exportall", NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_EXPORTNAME,76, "exportname", NULL, 3, 0, 0, 0, JOF_CONST|JOF_NAME)
+OPDEF(JSOP_IMPORTALL, 77, "importall", NULL, 1, 1, 0, 0, JOF_BYTE)
+OPDEF(JSOP_IMPORTPROP,78, "importprop", NULL, 3, 1, 0, 0, JOF_CONST|JOF_PROP|JOF_IMPORT)
+OPDEF(JSOP_IMPORTELEM,79, "importelem", NULL, 1, 2, 0, 0, JOF_BYTE |JOF_ELEM|JOF_IMPORT)
+
+/* Push object literal. */
+OPDEF(JSOP_OBJECT, 80, "object", NULL, 3, 0, 1, 19, JOF_CONST)
+
+/* Pop value and discard it. */
+OPDEF(JSOP_POP, 81, "pop", NULL, 1, 1, 0, 2, JOF_BYTE)
+
+/* Convert value to number, for unary +. */
+OPDEF(JSOP_POS, 82, "pos", "+ ", 1, 1, 1, 15, JOF_BYTE)
+
+/* Trap into debugger for breakpoint, etc. */
+OPDEF(JSOP_TRAP, 83, "trap", NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/* Fast get/set ops for function arguments and local variables. */
+OPDEF(JSOP_GETARG, 84, "getarg", NULL, 3, 0, 1, 19, JOF_QARG |JOF_NAME)
+OPDEF(JSOP_SETARG, 85, "setarg", NULL, 3, 1, 1, 3, JOF_QARG |JOF_NAME|JOF_SET|JOF_ASSIGNING)
+OPDEF(JSOP_GETVAR, 86, "getvar", NULL, 3, 0, 1, 19, JOF_QVAR |JOF_NAME)
+OPDEF(JSOP_SETVAR, 87, "setvar", NULL, 3, 1, 1, 3, JOF_QVAR |JOF_NAME|JOF_SET|JOF_ASSIGNING|JOF_DETECTING)
+
+/* Push unsigned 16-bit int constant. */
+OPDEF(JSOP_UINT16, 88, "uint16", NULL, 3, 0, 1, 16, JOF_UINT16)
+
+/* Object and array literal support. */
+OPDEF(JSOP_NEWINIT, 89, "newinit", NULL, 1, 2, 1, 0, JOF_BYTE)
+OPDEF(JSOP_ENDINIT, 90, "endinit", NULL, 1, 0, 0, 19, JOF_BYTE)
+OPDEF(JSOP_INITPROP, 91, "initprop", NULL, 3, 1, 0, 3, JOF_CONST|JOF_PROP|JOF_DETECTING)
+OPDEF(JSOP_INITELEM, 92, "initelem", NULL, 1, 2, 0, 3, JOF_BYTE |JOF_ELEM|JOF_DETECTING)
+OPDEF(JSOP_DEFSHARP, 93, "defsharp", NULL, 3, 0, 0, 0, JOF_UINT16)
+OPDEF(JSOP_USESHARP, 94, "usesharp", NULL, 3, 0, 1, 0, JOF_UINT16)
+
+/* Fast inc/dec ops for args and local vars. */
+OPDEF(JSOP_INCARG, 95, "incarg", NULL, 3, 0, 1, 15, JOF_QARG |JOF_NAME|JOF_INC)
+OPDEF(JSOP_INCVAR, 96, "incvar", NULL, 3, 0, 1, 15, JOF_QVAR |JOF_NAME|JOF_INC)
+OPDEF(JSOP_DECARG, 97, "decarg", NULL, 3, 0, 1, 15, JOF_QARG |JOF_NAME|JOF_DEC)
+OPDEF(JSOP_DECVAR, 98, "decvar", NULL, 3, 0, 1, 15, JOF_QVAR |JOF_NAME|JOF_DEC)
+OPDEF(JSOP_ARGINC, 99, "arginc", NULL, 3, 0, 1, 15, JOF_QARG |JOF_NAME|JOF_INC|JOF_POST)
+OPDEF(JSOP_VARINC, 100,"varinc", NULL, 3, 0, 1, 15, JOF_QVAR |JOF_NAME|JOF_INC|JOF_POST)
+OPDEF(JSOP_ARGDEC, 101,"argdec", NULL, 3, 0, 1, 15, JOF_QARG |JOF_NAME|JOF_DEC|JOF_POST)
+OPDEF(JSOP_VARDEC, 102,"vardec", NULL, 3, 0, 1, 15, JOF_QVAR |JOF_NAME|JOF_DEC|JOF_POST)
+
+/*
+ * Initialize for-in iterator. See also JSOP_FOREACH and JSOP_FOREACHKEYVAL.
+ */
+OPDEF(JSOP_FORIN, 103,"forin", NULL, 1, 1, 1, 0, JOF_BYTE)
+
+/* ECMA-compliant for/in ops. */
+OPDEF(JSOP_FORNAME, 104,"forname", NULL, 3, 0, 1, 19, JOF_CONST|JOF_NAME|JOF_FOR)
+OPDEF(JSOP_FORPROP, 105,"forprop", NULL, 3, 1, 1, 18, JOF_CONST|JOF_PROP|JOF_FOR)
+OPDEF(JSOP_FORELEM, 106,"forelem", NULL, 1, 1, 3, 18, JOF_BYTE |JOF_ELEM|JOF_FOR)
+OPDEF(JSOP_POP2, 107,"pop2", NULL, 1, 2, 0, 0, JOF_BYTE)
+
+/* ECMA-compliant assignment ops. */
+OPDEF(JSOP_BINDNAME, 108,"bindname", NULL, 3, 0, 1, 0, JOF_CONST|JOF_NAME|JOF_SET|JOF_ASSIGNING)
+OPDEF(JSOP_SETNAME, 109,"setname", NULL, 3, 2, 1, 3, JOF_CONST|JOF_NAME|JOF_SET|JOF_ASSIGNING|JOF_DETECTING)
+
+/* Exception handling ops. */
+OPDEF(JSOP_THROW, 110,"throw", NULL, 1, 1, 0, 0, JOF_BYTE)
+
+/* 'in' and 'instanceof' ops. */
+OPDEF(JSOP_IN, 111,js_in_str, js_in_str, 1, 2, 1, 11, JOF_BYTE|JOF_LEFTASSOC)
+OPDEF(JSOP_INSTANCEOF,112,js_instanceof_str,js_instanceof_str,1,2,1,11,JOF_BYTE|JOF_LEFTASSOC)
+
+/* debugger op */
+OPDEF(JSOP_DEBUGGER, 113,"debugger", NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/* gosub/retsub for finally handling */
+OPDEF(JSOP_GOSUB, 114,"gosub", NULL, 3, 0, 0, 0, JOF_JUMP)
+OPDEF(JSOP_RETSUB, 115,"retsub", NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/* More exception handling ops. */
+OPDEF(JSOP_EXCEPTION, 116,"exception", NULL, 1, 0, 1, 0, JOF_BYTE)
+OPDEF(JSOP_SETSP, 117,"setsp", NULL, 3, 0, 0, 0, JOF_UINT16)
+
+/*
+ * ECMA-compliant switch statement ops.
+ * CONDSWITCH is a decompilable NOP; CASE is ===, POP, jump if true, re-push
+ * lval if false; and DEFAULT is POP lval and GOTO.
+ */
+OPDEF(JSOP_CONDSWITCH,118,"condswitch", NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_CASE, 119,"case", NULL, 3, 1, 0, 0, JOF_JUMP)
+OPDEF(JSOP_DEFAULT, 120,"default", NULL, 3, 1, 0, 0, JOF_JUMP)
+
+/*
+ * ECMA-compliant call to eval op
+ */
+OPDEF(JSOP_EVAL, 121,"eval", NULL, 3, -1, 1, 18, JOF_UINT16)
+
+/*
+ * ECMA-compliant helper for 'for (x[i] in o)' loops.
+ */
+OPDEF(JSOP_ENUMELEM, 122,"enumelem", NULL, 1, 3, 0, 3, JOF_BYTE |JOF_SET|JOF_ASSIGNING)
+
+/*
+ * Getter and setter prefix bytecodes. These modify the next bytecode, either
+ * an assignment or a property initializer code, which then defines a property
+ * getter or setter.
+ */
+OPDEF(JSOP_GETTER, 123,js_getter_str,NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_SETTER, 124,js_setter_str,NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/*
+ * Prolog bytecodes for defining function, var, and const names.
+ */
+OPDEF(JSOP_DEFFUN, 125,"deffun", NULL, 3, 0, 0, 0, JOF_CONST|JOF_DECLARING)
+OPDEF(JSOP_DEFCONST, 126,"defconst", NULL, 3, 0, 0, 0, JOF_CONST|JOF_DECLARING)
+OPDEF(JSOP_DEFVAR, 127,"defvar", NULL, 3, 0, 0, 0, JOF_CONST|JOF_DECLARING)
+
+/* Auto-clone (if needed due to re-parenting) and push an anonymous function. */
+OPDEF(JSOP_ANONFUNOBJ, 128, "anonfunobj", NULL, 3, 0, 1, 16, JOF_CONST)
+
+/* ECMA ed. 3 named function expression. */
+OPDEF(JSOP_NAMEDFUNOBJ, 129, "namedfunobj", NULL, 3, 0, 1, 16, JOF_CONST)
+
+/*
+ * Like JSOP_SETLOCAL, but specialized to avoid requiring JSOP_POP immediately
+ * after to throw away the exception value.
+ */
+OPDEF(JSOP_SETLOCALPOP, 130, "setlocalpop", NULL, 3, 1, 0, 3, JOF_LOCAL|JOF_NAME|JOF_SET)
+
+/* ECMA-mandated parenthesization opcode, which nulls the reference base register, obj; see jsinterp.c. */
+OPDEF(JSOP_GROUP, 131, "group", NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/* Host object extension: given 'o.item(i) = j', the left-hand side compiles JSOP_SETCALL, rather than JSOP_CALL. */
+OPDEF(JSOP_SETCALL, 132, "setcall", NULL, 3, -1, 2, 18, JOF_UINT16|JOF_SET|JOF_ASSIGNING)
+
+/*
+ * Exception handling no-ops, for more economical byte-coding than SRC_TRYFIN
+ * srcnote-annotated JSOP_NOPs.
+ */
+OPDEF(JSOP_TRY, 133,"try", NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_FINALLY, 134,"finally", NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/*
+ * Swap the top two stack elements.
+ */
+OPDEF(JSOP_SWAP, 135,"swap", NULL, 1, 2, 2, 0, JOF_BYTE)
+
+/*
+ * Bytecodes that avoid making an arguments object in most cases:
+ * JSOP_ARGSUB gets arguments[i] from fp->argv, iff i is in [0, fp->argc-1].
+ * JSOP_ARGCNT returns fp->argc.
+ */
+OPDEF(JSOP_ARGSUB, 136,"argsub", NULL, 3, 0, 1, 18, JOF_QARG |JOF_NAME)
+OPDEF(JSOP_ARGCNT, 137,"argcnt", NULL, 1, 0, 1, 18, JOF_BYTE)
+
+/*
+ * Define a local function object as a local variable.
+ * The local variable's slot number is the first immediate two-byte operand.
+ * The function object's atom index is the second immediate operand.
+ */
+OPDEF(JSOP_DEFLOCALFUN, 138,"deflocalfun",NULL, 5, 0, 0, 0, JOF_INDEXCONST|JOF_DECLARING)
+
+/* Extended jumps. */
+OPDEF(JSOP_GOTOX, 139,"gotox", NULL, 5, 0, 0, 0, JOF_JUMPX)
+OPDEF(JSOP_IFEQX, 140,"ifeqx", NULL, 5, 1, 0, 3, JOF_JUMPX|JOF_DETECTING)
+OPDEF(JSOP_IFNEX, 141,"ifnex", NULL, 5, 1, 0, 0, JOF_JUMPX)
+OPDEF(JSOP_ORX, 142,"orx", NULL, 5, 1, 0, 5, JOF_JUMPX|JOF_DETECTING)
+OPDEF(JSOP_ANDX, 143,"andx", NULL, 5, 1, 0, 6, JOF_JUMPX|JOF_DETECTING)
+OPDEF(JSOP_GOSUBX, 144,"gosubx", NULL, 5, 0, 0, 0, JOF_JUMPX)
+OPDEF(JSOP_CASEX, 145,"casex", NULL, 5, 1, 0, 0, JOF_JUMPX)
+OPDEF(JSOP_DEFAULTX, 146,"defaultx", NULL, 5, 1, 0, 0, JOF_JUMPX)
+OPDEF(JSOP_TABLESWITCHX, 147,"tableswitchx",NULL, -1, 1, 0, 0, JOF_TABLESWITCHX|JOF_DETECTING)
+OPDEF(JSOP_LOOKUPSWITCHX, 148,"lookupswitchx",NULL, -1, 1, 0, 0, JOF_LOOKUPSWITCHX|JOF_DETECTING)
+
+/* Placeholders for a real jump opcode set during backpatch chain fixup. */
+OPDEF(JSOP_BACKPATCH, 149,"backpatch",NULL, 3, 0, 0, 0, JOF_JUMP|JOF_BACKPATCH)
+OPDEF(JSOP_BACKPATCH_POP, 150,"backpatch_pop",NULL, 3, 1, 0, 0, JOF_JUMP|JOF_BACKPATCH)
+
+/* Set pending exception from the stack, to trigger rethrow. */
+OPDEF(JSOP_THROWING, 151,"throwing", NULL, 1, 1, 0, 0, JOF_BYTE)
+
+/* Set and get return value pseudo-register in stack frame. */
+OPDEF(JSOP_SETRVAL, 152,"setrval", NULL, 1, 1, 0, 0, JOF_BYTE)
+OPDEF(JSOP_RETRVAL, 153,"retrval", NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/* Optimized global variable ops (we don't bother doing a JSOP_FORGVAR op). */
+OPDEF(JSOP_GETGVAR, 154,"getgvar", NULL, 3, 0, 1, 19, JOF_CONST|JOF_NAME)
+OPDEF(JSOP_SETGVAR, 155,"setgvar", NULL, 3, 1, 1, 3, JOF_CONST|JOF_NAME|JOF_SET|JOF_ASSIGNING|JOF_DETECTING)
+OPDEF(JSOP_INCGVAR, 156,"incgvar", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_INC)
+OPDEF(JSOP_DECGVAR, 157,"decgvar", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_DEC)
+OPDEF(JSOP_GVARINC, 158,"gvarinc", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_INC|JOF_POST)
+OPDEF(JSOP_GVARDEC, 159,"gvardec", NULL, 3, 0, 1, 15, JOF_CONST|JOF_NAME|JOF_DEC|JOF_POST)
+
+/* Regular expression literal requiring special "fork on exec" handling. */
+OPDEF(JSOP_REGEXP, 160,"regexp", NULL, 3, 0, 1, 19, JOF_CONST)
+
+/* XML (ECMA-357, a.k.a. "E4X") support. */
+OPDEF(JSOP_DEFXMLNS, 161,"defxmlns", NULL, 1, 1, 0, 0, JOF_BYTE)
+OPDEF(JSOP_ANYNAME, 162,"anyname", NULL, 1, 0, 1, 19, JOF_BYTE|JOF_XMLNAME)
+OPDEF(JSOP_QNAMEPART, 163,"qnamepart", NULL, 3, 0, 1, 19, JOF_CONST|JOF_XMLNAME)
+OPDEF(JSOP_QNAMECONST, 164,"qnameconst", NULL, 3, 1, 1, 19, JOF_CONST|JOF_XMLNAME)
+OPDEF(JSOP_QNAME, 165,"qname", NULL, 1, 2, 1, 0, JOF_BYTE|JOF_XMLNAME)
+OPDEF(JSOP_TOATTRNAME, 166,"toattrname", NULL, 1, 1, 1, 19, JOF_BYTE|JOF_XMLNAME)
+OPDEF(JSOP_TOATTRVAL, 167,"toattrval", NULL, 1, 1, 1, 19, JOF_BYTE)
+OPDEF(JSOP_ADDATTRNAME, 168,"addattrname",NULL, 1, 2, 1, 13, JOF_BYTE)
+OPDEF(JSOP_ADDATTRVAL, 169,"addattrval", NULL, 1, 2, 1, 13, JOF_BYTE)
+OPDEF(JSOP_BINDXMLNAME, 170,"bindxmlname",NULL, 1, 1, 2, 3, JOF_BYTE|JOF_SET|JOF_ASSIGNING)
+OPDEF(JSOP_SETXMLNAME, 171,"setxmlname", NULL, 1, 3, 1, 3, JOF_BYTE|JOF_SET|JOF_ASSIGNING|JOF_DETECTING)
+OPDEF(JSOP_XMLNAME, 172,"xmlname", NULL, 1, 1, 1, 19, JOF_BYTE)
+OPDEF(JSOP_DESCENDANTS, 173,"descendants",NULL, 1, 2, 1, 18, JOF_BYTE)
+OPDEF(JSOP_FILTER, 174,"filter", NULL, 3, 1, 1, 0, JOF_JUMP)
+OPDEF(JSOP_ENDFILTER, 175,"endfilter", NULL, 1, 1, 0, 18, JOF_BYTE)
+OPDEF(JSOP_TOXML, 176,"toxml", NULL, 1, 1, 1, 19, JOF_BYTE)
+OPDEF(JSOP_TOXMLLIST, 177,"toxmllist", NULL, 1, 1, 1, 19, JOF_BYTE)
+OPDEF(JSOP_XMLTAGEXPR, 178,"xmltagexpr", NULL, 1, 1, 1, 0, JOF_BYTE)
+OPDEF(JSOP_XMLELTEXPR, 179,"xmleltexpr", NULL, 1, 1, 1, 0, JOF_BYTE)
+OPDEF(JSOP_XMLOBJECT, 180,"xmlobject", NULL, 3, 0, 1, 19, JOF_CONST)
+OPDEF(JSOP_XMLCDATA, 181,"xmlcdata", NULL, 3, 0, 1, 19, JOF_CONST)
+OPDEF(JSOP_XMLCOMMENT, 182,"xmlcomment", NULL, 3, 0, 1, 19, JOF_CONST)
+OPDEF(JSOP_XMLPI, 183,"xmlpi", NULL, 3, 1, 1, 19, JOF_CONST)
+OPDEF(JSOP_GETMETHOD, 184,"getmethod", NULL, 3, 1, 1, 18, JOF_CONST|JOF_PROP)
+OPDEF(JSOP_GETFUNNS, 185,"getfunns", NULL, 1, 0, 1, 19, JOF_BYTE)
+OPDEF(JSOP_FOREACH, 186,"foreach", NULL, 1, 1, 1, 0, JOF_BYTE)
+OPDEF(JSOP_DELDESC, 187,"deldesc", NULL, 1, 2, 1, 17, JOF_BYTE |JOF_ELEM|JOF_DEL)
+
+/*
+ * Opcodes for extended literal addressing, using unsigned 24-bit immediate
+ * operands to hold integer operands (JSOP_UINT24), extended atom indexes in
+ * script->atomMap (JSOP_LITERAL, JSOP_FINDNAME), and ops prefixed by such
+ * atom index immediates (JSOP_LITOPX). See jsemit.c, EmitAtomIndexOp.
+ */
+OPDEF(JSOP_UINT24, 188,"uint24", NULL, 4, 0, 1, 16, JOF_UINT24)
+OPDEF(JSOP_LITERAL, 189,"literal", NULL, 4, 0, 1, 19, JOF_UINT24)
+OPDEF(JSOP_FINDNAME, 190,"findname", NULL, 4, 0, 2, 0, JOF_UINT24)
+OPDEF(JSOP_LITOPX, 191,"litopx", NULL, 5, 0, 0, 0, JOF_LITOPX)
+
+/*
+ * Opcodes to help the decompiler deal with XML.
+ */
+OPDEF(JSOP_STARTXML, 192,"startxml", NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_STARTXMLEXPR, 193,"startxmlexpr",NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_SETMETHOD, 194,"setmethod", NULL, 3, 2, 1, 3, JOF_CONST|JOF_PROP|JOF_SET|JOF_ASSIGNING|JOF_DETECTING)
+
+/*
+ * Stop interpretation, emitted at end of script to save the threaded bytecode
+ * interpreter an extra branch test on every DO_NEXT_OP (see jsinterp.c).
+ */
+OPDEF(JSOP_STOP, 195,"stop", NULL, 1, 0, 0, 0, JOF_BYTE)
+
+/*
+ * Get an extant property or element value, throwing ReferenceError if the
+ * identified property does not exist.
+ */
+OPDEF(JSOP_GETXPROP, 196,"getxprop", NULL, 3, 1, 1, 18, JOF_CONST|JOF_PROP)
+OPDEF(JSOP_GETXELEM, 197,"getxelem", NULL, 1, 2, 1, 18, JOF_BYTE |JOF_ELEM|JOF_LEFTASSOC)
+
+/*
+ * Specialized JSOP_TYPEOF to avoid reporting undefined for typeof(0, undef).
+ */
+OPDEF(JSOP_TYPEOFEXPR, 198,js_typeof_str, NULL, 1, 1, 1, 15, JOF_BYTE|JOF_DETECTING)
+
+/*
+ * Block-local scope support.
+ */
+OPDEF(JSOP_ENTERBLOCK, 199,"enterblock", NULL, 3, 0, 0, 0, JOF_CONST)
+OPDEF(JSOP_LEAVEBLOCK, 200,"leaveblock", NULL, 3, 0, 0, 0, JOF_UINT16)
+OPDEF(JSOP_GETLOCAL, 201,"getlocal", NULL, 3, 0, 1, 19, JOF_LOCAL|JOF_NAME)
+OPDEF(JSOP_SETLOCAL, 202,"setlocal", NULL, 3, 1, 1, 3, JOF_LOCAL|JOF_NAME|JOF_SET)
+OPDEF(JSOP_INCLOCAL, 203,"inclocal", NULL, 3, 0, 1, 15, JOF_LOCAL|JOF_NAME|JOF_INC)
+OPDEF(JSOP_DECLOCAL, 204,"declocal", NULL, 3, 0, 1, 15, JOF_LOCAL|JOF_NAME|JOF_DEC)
+OPDEF(JSOP_LOCALINC, 205,"localinc", NULL, 3, 0, 1, 15, JOF_LOCAL|JOF_NAME|JOF_INC|JOF_POST)
+OPDEF(JSOP_LOCALDEC, 206,"localdec", NULL, 3, 0, 1, 15, JOF_LOCAL|JOF_NAME|JOF_DEC|JOF_POST)
+OPDEF(JSOP_FORLOCAL, 207,"forlocal", NULL, 3, 0, 1, 19, JOF_LOCAL|JOF_NAME|JOF_FOR)
+
+/*
+ * Iterator, generator, and array comprehension support.
+ */
+OPDEF(JSOP_STARTITER, 208,"startiter", NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_ENDITER, 209,"enditer", NULL, 1, 1, 0, 0, JOF_BYTE)
+OPDEF(JSOP_GENERATOR, 210,"generator", NULL, 1, 0, 0, 0, JOF_BYTE)
+OPDEF(JSOP_YIELD, 211,"yield", NULL, 1, 1, 1, 1, JOF_BYTE)
+OPDEF(JSOP_ARRAYPUSH, 212,"arraypush", NULL, 3, 1, 0, 3, JOF_LOCAL)
+
+OPDEF(JSOP_FOREACHKEYVAL, 213,"foreachkeyval",NULL, 1, 1, 1, 0, JOF_BYTE)
+
+/*
+ * Variant of JSOP_ENUMELEM for destructuring const (const [a, b] = ...).
+ */
+OPDEF(JSOP_ENUMCONSTELEM, 214,"enumconstelem",NULL, 1, 3, 0, 3, JOF_BYTE|JOF_SET|JOF_ASSIGNING)
+
+/*
+ * Variant of JSOP_LEAVEBLOCK has a result on the stack above the locals,
+ * which must be moved down when the block pops.
+ */
+OPDEF(JSOP_LEAVEBLOCKEXPR,215,"leaveblockexpr",NULL, 3, 0, 0, 1, JOF_UINT16)
diff --git a/src/third_party/js-1.7/jsosdep.h b/src/third_party/js-1.7/jsosdep.h
new file mode 100644
index 00000000000..a26614485ab
--- /dev/null
+++ b/src/third_party/js-1.7/jsosdep.h
@@ -0,0 +1,115 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsosdep_h___
+#define jsosdep_h___
+/*
+ * OS (and machine, and compiler XXX) dependent information.
+ */
+
+#if defined(XP_WIN) || defined(XP_OS2)
+
+#if defined(_WIN32) || defined (XP_OS2)
+#define JS_HAVE_LONG_LONG
+#else
+#undef JS_HAVE_LONG_LONG
+#endif
+#endif /* XP_WIN || XP_OS2 */
+
+#ifdef XP_BEOS
+#define JS_HAVE_LONG_LONG
+#endif
+
+
+#ifdef XP_UNIX
+
+/*
+ * Get OS specific header information.
+ */
+#if defined(XP_MACOSX) || defined(DARWIN)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(AIXV3) || defined(AIX)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(BSDI)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(HPUX)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(IRIX)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(linux)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(OSF1)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(_SCO_DS)
+#undef JS_HAVE_LONG_LONG
+
+#elif defined(SOLARIS)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(FREEBSD)
+#define JS_HAVE_LONG_LONG
+
+#elif defined(SUNOS4)
+#undef JS_HAVE_LONG_LONG
+
+/*
+** Missing function prototypes
+*/
+
+extern void *sbrk(int);
+
+#elif defined(UNIXWARE)
+#undef JS_HAVE_LONG_LONG
+
+#elif defined(VMS) && defined(__ALPHA)
+#define JS_HAVE_LONG_LONG
+
+#endif
+
+#endif /* XP_UNIX */
+
+#endif /* jsosdep_h___ */
+
diff --git a/src/third_party/js-1.7/jsotypes.h b/src/third_party/js-1.7/jsotypes.h
new file mode 100644
index 00000000000..38d72869cca
--- /dev/null
+++ b/src/third_party/js-1.7/jsotypes.h
@@ -0,0 +1,202 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * This section typedefs the old 'native' types to the new PR<type>s.
+ * These definitions are scheduled to be eliminated at the earliest
+ * possible time. The NSPR API is implemented and documented using
+ * the new definitions.
+ */
+
+/*
+ * Note that we test for PROTYPES_H, not JSOTYPES_H. This is to avoid
+ * double-definitions of scalar types such as uint32, if NSPR's
+ * protypes.h is also included.
+ */
+#ifndef PROTYPES_H
+#define PROTYPES_H
+
+#ifdef XP_BEOS
+/* BeOS defines most int types in SupportDefs.h (int8, uint8, int16,
+ * uint16, int32, uint32, int64, uint64), so in the interest of
+ * not conflicting with other definitions elsewhere we have to skip the
+ * #ifdef jungle below, duplicate some definitions, and do our stuff.
+ */
+#include <SupportDefs.h>
+
+typedef JSUintn uintn;
+#ifndef _XP_Core_
+typedef JSIntn intn;
+#endif
+
+#else
+
+/* SVR4 typedef of uint is commonly found on UNIX machines. */
+#if defined(XP_UNIX) && !defined(__QNXNTO__)
+#include <sys/types.h>
+#else
+typedef JSUintn uint;
+#endif
+
+typedef JSUintn uintn;
+typedef JSUint64 uint64;
+#if !defined(_WIN32) && !defined(XP_OS2)
+typedef JSUint32 uint32;
+#else
+typedef unsigned long uint32;
+#endif
+typedef JSUint16 uint16;
+typedef JSUint8 uint8;
+
+#ifndef _XP_Core_
+typedef JSIntn intn;
+#endif
+
+/*
+ * On AIX 4.3, sys/inttypes.h (which is included by sys/types.h, a very
+ * common header file) defines the types int8, int16, int32, and int64.
+ * So we don't define these four types here to avoid conflicts in case
+ * the code also includes sys/types.h.
+ */
+#if defined(AIX) && defined(HAVE_SYS_INTTYPES_H)
+#include <sys/inttypes.h>
+#else
+typedef JSInt64 int64;
+
+/* /usr/include/model.h on HP-UX defines int8, int16, and int32 */
+#ifdef HPUX
+#include <model.h>
+#else
+#if !defined(_WIN32) && !defined(XP_OS2)
+typedef JSInt32 int32;
+#else
+typedef long int32;
+#endif
+typedef JSInt16 int16;
+typedef JSInt8 int8;
+#endif /* HPUX */
+#endif /* AIX && HAVE_SYS_INTTYPES_H */
+
+#endif /* XP_BEOS */
+
+typedef JSFloat64 float64;
+
+/* Re: jsbit.h */
+#define TEST_BIT JS_TEST_BIT
+#define SET_BIT JS_SET_BIT
+#define CLEAR_BIT JS_CLEAR_BIT
+
+/* Re: prarena.h->plarena.h */
+#define PRArena PLArena
+#define PRArenaPool PLArenaPool
+#define PRArenaStats PLArenaStats
+#define PR_ARENA_ALIGN PL_ARENA_ALIGN
+#define PR_INIT_ARENA_POOL PL_INIT_ARENA_POOL
+#define PR_ARENA_ALLOCATE PL_ARENA_ALLOCATE
+#define PR_ARENA_GROW PL_ARENA_GROW
+#define PR_ARENA_MARK PL_ARENA_MARK
+#define PR_CLEAR_UNUSED PL_CLEAR_UNUSED
+#define PR_CLEAR_ARENA PL_CLEAR_ARENA
+#define PR_ARENA_RELEASE PL_ARENA_RELEASE
+#define PR_COUNT_ARENA PL_COUNT_ARENA
+#define PR_ARENA_DESTROY PL_ARENA_DESTROY
+#define PR_InitArenaPool PL_InitArenaPool
+#define PR_FreeArenaPool PL_FreeArenaPool
+#define PR_FinishArenaPool PL_FinishArenaPool
+#define PR_CompactArenaPool PL_CompactArenaPool
+#define PR_ArenaFinish PL_ArenaFinish
+#define PR_ArenaAllocate PL_ArenaAllocate
+#define PR_ArenaGrow PL_ArenaGrow
+#define PR_ArenaRelease PL_ArenaRelease
+#define PR_ArenaCountAllocation PL_ArenaCountAllocation
+#define PR_ArenaCountInplaceGrowth PL_ArenaCountInplaceGrowth
+#define PR_ArenaCountGrowth PL_ArenaCountGrowth
+#define PR_ArenaCountRelease PL_ArenaCountRelease
+#define PR_ArenaCountRetract PL_ArenaCountRetract
+
+/* Re: prevent.h->plevent.h */
+#define PREvent PLEvent
+#define PREventQueue PLEventQueue
+#define PR_CreateEventQueue PL_CreateEventQueue
+#define PR_DestroyEventQueue PL_DestroyEventQueue
+#define PR_GetEventQueueMonitor PL_GetEventQueueMonitor
+#define PR_ENTER_EVENT_QUEUE_MONITOR PL_ENTER_EVENT_QUEUE_MONITOR
+#define PR_EXIT_EVENT_QUEUE_MONITOR PL_EXIT_EVENT_QUEUE_MONITOR
+#define PR_PostEvent PL_PostEvent
+#define PR_PostSynchronousEvent PL_PostSynchronousEvent
+#define PR_GetEvent PL_GetEvent
+#define PR_EventAvailable PL_EventAvailable
+#define PREventFunProc PLEventFunProc
+#define PR_MapEvents PL_MapEvents
+#define PR_RevokeEvents PL_RevokeEvents
+#define PR_ProcessPendingEvents PL_ProcessPendingEvents
+#define PR_WaitForEvent PL_WaitForEvent
+#define PR_EventLoop PL_EventLoop
+#define PR_GetEventQueueSelectFD PL_GetEventQueueSelectFD
+#define PRHandleEventProc PLHandleEventProc
+#define PRDestroyEventProc PLDestroyEventProc
+#define PR_InitEvent PL_InitEvent
+#define PR_GetEventOwner PL_GetEventOwner
+#define PR_HandleEvent PL_HandleEvent
+#define PR_DestroyEvent PL_DestroyEvent
+#define PR_DequeueEvent PL_DequeueEvent
+#define PR_GetMainEventQueue PL_GetMainEventQueue
+
+/* Re: prhash.h->plhash.h */
+#define PRHashEntry PLHashEntry
+#define PRHashTable PLHashTable
+#define PRHashNumber PLHashNumber
+#define PRHashFunction PLHashFunction
+#define PRHashComparator PLHashComparator
+#define PRHashEnumerator PLHashEnumerator
+#define PRHashAllocOps PLHashAllocOps
+#define PR_NewHashTable PL_NewHashTable
+#define PR_HashTableDestroy PL_HashTableDestroy
+#define PR_HashTableRawLookup PL_HashTableRawLookup
+#define PR_HashTableRawAdd PL_HashTableRawAdd
+#define PR_HashTableRawRemove PL_HashTableRawRemove
+#define PR_HashTableAdd PL_HashTableAdd
+#define PR_HashTableRemove PL_HashTableRemove
+#define PR_HashTableEnumerateEntries PL_HashTableEnumerateEntries
+#define PR_HashTableLookup PL_HashTableLookup
+#define PR_HashTableDump PL_HashTableDump
+#define PR_HashString PL_HashString
+#define PR_CompareStrings PL_CompareStrings
+#define PR_CompareValues PL_CompareValues
+
+#endif /* !defined(PROTYPES_H) */
diff --git a/src/third_party/js-1.7/jsparse.c b/src/third_party/js-1.7/jsparse.c
new file mode 100644
index 00000000000..132e2ad26b7
--- /dev/null
+++ b/src/third_party/js-1.7/jsparse.c
@@ -0,0 +1,6547 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS parser.
+ *
+ * This is a recursive-descent parser for the JavaScript language specified by
+ * "The JavaScript 1.5 Language Specification". It uses lexical and semantic
+ * feedback to disambiguate non-LL(1) structures. It generates trees of nodes
+ * induced by the recursive parsing (not precise syntax trees, see jsparse.h).
+ * After tree construction, it rewrites trees to fold constants and evaluate
+ * compile-time expressions. Finally, it calls js_EmitTree (see jsemit.h) to
+ * generate bytecode.
+ *
+ * This parser attempts no error recovery.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsemit.h"
+#include "jsfun.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsparse.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+
+#if JS_HAS_XML_SUPPORT
+#include "jsxml.h"
+#endif
+
+#if JS_HAS_DESTRUCTURING
+#include "jsdhash.h"
+#endif
+
+/*
+ * JS parsers, from lowest to highest precedence.
+ *
+ * Each parser takes a context, a token stream, and a tree context struct.
+ * Each returns a parse node tree or null on error.
+ */
+
+typedef JSParseNode *
+JSParser(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc);
+
+typedef JSParseNode *
+JSMemberParser(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSBool allowCallSyntax);
+
+typedef JSParseNode *
+JSPrimaryParser(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSTokenType tt, JSBool afterDot);
+
+static JSParser FunctionStmt;
+static JSParser FunctionExpr;
+static JSParser Statements;
+static JSParser Statement;
+static JSParser Variables;
+static JSParser Expr;
+static JSParser AssignExpr;
+static JSParser CondExpr;
+static JSParser OrExpr;
+static JSParser AndExpr;
+static JSParser BitOrExpr;
+static JSParser BitXorExpr;
+static JSParser BitAndExpr;
+static JSParser EqExpr;
+static JSParser RelExpr;
+static JSParser ShiftExpr;
+static JSParser AddExpr;
+static JSParser MulExpr;
+static JSParser UnaryExpr;
+static JSMemberParser MemberExpr;
+static JSPrimaryParser PrimaryExpr;
+
+/*
+ * Insist that the next token be of type tt, or report errno and return null.
+ * NB: this macro uses cx and ts from its lexical environment.
+ */
+#define MUST_MATCH_TOKEN(tt, errno) \
+ JS_BEGIN_MACRO \
+ if (js_GetToken(cx, ts) != tt) { \
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR, \
+ errno); \
+ return NULL; \
+ } \
+ JS_END_MACRO
+
+#define CHECK_RECURSION() \
+ JS_BEGIN_MACRO \
+ int stackDummy; \
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) { \
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR, \
+ JSMSG_OVER_RECURSED); \
+ return NULL; \
+ } \
+ JS_END_MACRO
+
+#ifdef METER_PARSENODES
+static uint32 parsenodes = 0;
+static uint32 maxparsenodes = 0;
+static uint32 recyclednodes = 0;
+#endif
+
+static JSParseNode *
+RecycleTree(JSParseNode *pn, JSTreeContext *tc)
+{
+ JSParseNode *next;
+
+ if (!pn)
+ return NULL;
+ JS_ASSERT(pn != tc->nodeList); /* catch back-to-back dup recycles */
+ next = pn->pn_next;
+ pn->pn_next = tc->nodeList;
+ tc->nodeList = pn;
+#ifdef METER_PARSENODES
+ recyclednodes++;
+#endif
+ return next;
+}
+
+static JSParseNode *
+NewOrRecycledNode(JSContext *cx, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = tc->nodeList;
+ if (!pn) {
+ JS_ARENA_ALLOCATE_TYPE(pn, JSParseNode, &cx->tempPool);
+ if (!pn)
+ JS_ReportOutOfMemory(cx);
+ } else {
+ tc->nodeList = pn->pn_next;
+
+ /* Recycle immediate descendents only, to save work and working set. */
+ switch (pn->pn_arity) {
+ case PN_FUNC:
+ RecycleTree(pn->pn_body, tc);
+ break;
+ case PN_LIST:
+ if (pn->pn_head) {
+ /* XXX check for dup recycles in the list */
+ *pn->pn_tail = tc->nodeList;
+ tc->nodeList = pn->pn_head;
+#ifdef METER_PARSENODES
+ recyclednodes += pn->pn_count;
+#endif
+ }
+ break;
+ case PN_TERNARY:
+ RecycleTree(pn->pn_kid1, tc);
+ RecycleTree(pn->pn_kid2, tc);
+ RecycleTree(pn->pn_kid3, tc);
+ break;
+ case PN_BINARY:
+ RecycleTree(pn->pn_left, tc);
+ RecycleTree(pn->pn_right, tc);
+ break;
+ case PN_UNARY:
+ RecycleTree(pn->pn_kid, tc);
+ break;
+ case PN_NAME:
+ RecycleTree(pn->pn_expr, tc);
+ break;
+ case PN_NULLARY:
+ break;
+ }
+ }
+#ifdef METER_PARSENODES
+ if (pn) {
+ parsenodes++;
+ if (parsenodes - recyclednodes > maxparsenodes)
+ maxparsenodes = parsenodes - recyclednodes;
+ }
+#endif
+ return pn;
+}
+
+/*
+ * Allocate a JSParseNode from cx's temporary arena.
+ */
+static JSParseNode *
+NewParseNode(JSContext *cx, JSTokenStream *ts, JSParseNodeArity arity,
+ JSTreeContext *tc)
+{
+ JSParseNode *pn;
+ JSToken *tp;
+
+ pn = NewOrRecycledNode(cx, tc);
+ if (!pn)
+ return NULL;
+ tp = &CURRENT_TOKEN(ts);
+ pn->pn_type = tp->type;
+ pn->pn_pos = tp->pos;
+ pn->pn_op = JSOP_NOP;
+ pn->pn_arity = arity;
+ pn->pn_next = NULL;
+ pn->pn_ts = ts;
+ pn->pn_source = NULL;
+ return pn;
+}
+
+static JSParseNode *
+NewBinary(JSContext *cx, JSTokenType tt,
+ JSOp op, JSParseNode *left, JSParseNode *right,
+ JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn1, *pn2;
+
+ if (!left || !right)
+ return NULL;
+
+ /*
+ * Flatten a left-associative (left-heavy) tree of a given operator into
+ * a list, to reduce js_FoldConstants and js_EmitTree recursion.
+ */
+ if (left->pn_type == tt &&
+ left->pn_op == op &&
+ (js_CodeSpec[op].format & JOF_LEFTASSOC)) {
+ if (left->pn_arity != PN_LIST) {
+ pn1 = left->pn_left, pn2 = left->pn_right;
+ left->pn_arity = PN_LIST;
+ PN_INIT_LIST_1(left, pn1);
+ PN_APPEND(left, pn2);
+ if (tt == TOK_PLUS) {
+ if (pn1->pn_type == TOK_STRING)
+ left->pn_extra |= PNX_STRCAT;
+ else if (pn1->pn_type != TOK_NUMBER)
+ left->pn_extra |= PNX_CANTFOLD;
+ if (pn2->pn_type == TOK_STRING)
+ left->pn_extra |= PNX_STRCAT;
+ else if (pn2->pn_type != TOK_NUMBER)
+ left->pn_extra |= PNX_CANTFOLD;
+ }
+ }
+ PN_APPEND(left, right);
+ left->pn_pos.end = right->pn_pos.end;
+ if (tt == TOK_PLUS) {
+ if (right->pn_type == TOK_STRING)
+ left->pn_extra |= PNX_STRCAT;
+ else if (right->pn_type != TOK_NUMBER)
+ left->pn_extra |= PNX_CANTFOLD;
+ }
+ return left;
+ }
+
+ /*
+ * Fold constant addition immediately, to conserve node space and, what's
+ * more, so js_FoldConstants never sees mixed addition and concatenation
+ * operations with more than one leading non-string operand in a PN_LIST
+ * generated for expressions such as 1 + 2 + "pt" (which should evaluate
+ * to "3pt", not "12pt").
+ */
+ if (tt == TOK_PLUS &&
+ left->pn_type == TOK_NUMBER &&
+ right->pn_type == TOK_NUMBER) {
+ left->pn_dval += right->pn_dval;
+ left->pn_pos.end = right->pn_pos.end;
+ RecycleTree(right, tc);
+ return left;
+ }
+
+ pn = NewOrRecycledNode(cx, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = tt;
+ pn->pn_pos.begin = left->pn_pos.begin;
+ pn->pn_pos.end = right->pn_pos.end;
+ pn->pn_op = op;
+ pn->pn_arity = PN_BINARY;
+ pn->pn_left = left;
+ pn->pn_right = right;
+ pn->pn_next = NULL;
+ pn->pn_ts = NULL;
+ pn->pn_source = NULL;
+ return pn;
+}
+
+#if JS_HAS_GETTER_SETTER
+static JSTokenType
+CheckGetterOrSetter(JSContext *cx, JSTokenStream *ts, JSTokenType tt)
+{
+ JSAtom *atom;
+ JSRuntime *rt;
+ JSOp op;
+ const char *name;
+
+ JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_NAME);
+ atom = CURRENT_TOKEN(ts).t_atom;
+ rt = cx->runtime;
+ if (atom == rt->atomState.getterAtom)
+ op = JSOP_GETTER;
+ else if (atom == rt->atomState.setterAtom)
+ op = JSOP_SETTER;
+ else
+ return TOK_NAME;
+ if (js_PeekTokenSameLine(cx, ts) != tt)
+ return TOK_NAME;
+ (void) js_GetToken(cx, ts);
+ if (CURRENT_TOKEN(ts).t_op != JSOP_NOP) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_GETTER_OR_SETTER,
+ (op == JSOP_GETTER)
+ ? js_getter_str
+ : js_setter_str);
+ return TOK_ERROR;
+ }
+ CURRENT_TOKEN(ts).t_op = op;
+ if (JS_HAS_STRICT_OPTION(cx)) {
+ name = js_AtomToPrintableString(cx, atom);
+ if (!name ||
+ !js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_DEPRECATED_USAGE,
+ name)) {
+ return TOK_ERROR;
+ }
+ }
+ return tt;
+}
+#endif
+
+static void
+MaybeSetupFrame(JSContext *cx, JSObject *chain, JSStackFrame *oldfp,
+ JSStackFrame *newfp)
+{
+ /*
+ * Always push a new frame if the current frame is special, so that
+ * Variables gets the correct variables object: the one from the special
+ * frame's caller.
+ */
+ if (oldfp &&
+ oldfp->varobj &&
+ oldfp->scopeChain == chain &&
+ !(oldfp->flags & JSFRAME_SPECIAL)) {
+ return;
+ }
+
+ memset(newfp, 0, sizeof *newfp);
+
+ /* Default to sharing the same variables object and scope chain. */
+ newfp->varobj = newfp->scopeChain = chain;
+ if (cx->options & JSOPTION_VAROBJFIX) {
+ while ((chain = JS_GetParent(cx, chain)) != NULL)
+ newfp->varobj = chain;
+ }
+ newfp->down = oldfp;
+ if (oldfp) {
+ /*
+ * In the case of eval and debugger frames, we need to dig down and find
+ * the real variables objects and function that our new stack frame is
+ * going to use.
+ */
+ newfp->flags = oldfp->flags & (JSFRAME_SPECIAL | JSFRAME_COMPILE_N_GO |
+ JSFRAME_SCRIPT_OBJECT);
+ while (oldfp->flags & JSFRAME_SPECIAL) {
+ oldfp = oldfp->down;
+ if (!oldfp)
+ break;
+ }
+ if (oldfp && (newfp->flags & JSFRAME_SPECIAL)) {
+ newfp->varobj = oldfp->varobj;
+ newfp->vars = oldfp->vars;
+ newfp->fun = oldfp->fun;
+ }
+ }
+ cx->fp = newfp;
+}
+
+/*
+ * Parse a top-level JS script.
+ */
+JS_FRIEND_API(JSParseNode *)
+js_ParseTokenStream(JSContext *cx, JSObject *chain, JSTokenStream *ts)
+{
+ JSStackFrame *fp, frame;
+ JSTreeContext tc;
+ JSParseNode *pn;
+
+ /*
+ * Push a compiler frame if we have no frames, or if the top frame is a
+ * lightweight function activation, or if its scope chain doesn't match
+ * the one passed to us.
+ */
+ fp = cx->fp;
+ MaybeSetupFrame(cx, chain, fp, &frame);
+
+ /*
+ * Protect atoms from being collected by a GC activation, which might
+ * - nest on this thread due to out of memory (the so-called "last ditch"
+ * GC attempted within js_NewGCThing), or
+ * - run for any reason on another thread if this thread is suspended on
+ * an object lock before it finishes generating bytecode into a script
+ * protected from the GC by a root or a stack frame reference.
+ */
+ JS_KEEP_ATOMS(cx->runtime);
+ TREE_CONTEXT_INIT(&tc);
+ pn = Statements(cx, ts, &tc);
+ if (pn) {
+ if (!js_MatchToken(cx, ts, TOK_EOF)) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SYNTAX_ERROR);
+ pn = NULL;
+ } else {
+ pn->pn_type = TOK_LC;
+ if (!js_FoldConstants(cx, pn, &tc))
+ pn = NULL;
+ }
+ }
+
+ TREE_CONTEXT_FINISH(&tc);
+ JS_UNKEEP_ATOMS(cx->runtime);
+ cx->fp = fp;
+ return pn;
+}
+
+/*
+ * Compile a top-level script.
+ */
+JS_FRIEND_API(JSBool)
+js_CompileTokenStream(JSContext *cx, JSObject *chain, JSTokenStream *ts,
+ JSCodeGenerator *cg)
+{
+ JSStackFrame *fp, frame;
+ uint32 flags;
+ JSParseNode *pn;
+ JSBool ok;
+#ifdef METER_PARSENODES
+ void *sbrk(ptrdiff_t), *before = sbrk(0);
+#endif
+
+ /*
+ * Push a compiler frame if we have no frames, or if the top frame is a
+ * lightweight function activation, or if its scope chain doesn't match
+ * the one passed to us.
+ */
+ fp = cx->fp;
+ MaybeSetupFrame(cx, chain, fp, &frame);
+ flags = cx->fp->flags;
+ cx->fp->flags = flags |
+ (JS_HAS_COMPILE_N_GO_OPTION(cx)
+ ? JSFRAME_COMPILING | JSFRAME_COMPILE_N_GO
+ : JSFRAME_COMPILING);
+
+ /* Prevent GC activation while compiling. */
+ JS_KEEP_ATOMS(cx->runtime);
+
+ pn = Statements(cx, ts, &cg->treeContext);
+ if (!pn) {
+ ok = JS_FALSE;
+ } else if (!js_MatchToken(cx, ts, TOK_EOF)) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SYNTAX_ERROR);
+ ok = JS_FALSE;
+ } else {
+#ifdef METER_PARSENODES
+ printf("Parser growth: %d (%u nodes, %u max, %u unrecycled)\n",
+ (char *)sbrk(0) - (char *)before,
+ parsenodes,
+ maxparsenodes,
+ parsenodes - recyclednodes);
+ before = sbrk(0);
+#endif
+
+ /*
+ * No need to emit bytecode here -- Statements already has, for each
+ * statement in turn. Search for TCF_COMPILING in Statements, below.
+ * That flag is set for every tc == &cg->treeContext, and it implies
+ * that the tc can be downcast to a cg and used to emit code during
+ * parsing, rather than at the end of the parse phase.
+ *
+ * Nowadays the threaded interpreter needs a stop instruction, so we
+ * do have to emit that here.
+ */
+ JS_ASSERT(cg->treeContext.flags & TCF_COMPILING);
+ ok = js_Emit1(cx, cg, JSOP_STOP) >= 0;
+ }
+
+#ifdef METER_PARSENODES
+ printf("Code-gen growth: %d (%u bytecodes, %u srcnotes)\n",
+ (char *)sbrk(0) - (char *)before, CG_OFFSET(cg), cg->noteCount);
+#endif
+#ifdef JS_ARENAMETER
+ JS_DumpArenaStats(stdout);
+#endif
+ JS_UNKEEP_ATOMS(cx->runtime);
+ cx->fp->flags = flags;
+ cx->fp = fp;
+ return ok;
+}
+
+/*
+ * Insist on a final return before control flows out of pn. Try to be a bit
+ * smart about loops: do {...; return e2;} while(0) at the end of a function
+ * that contains an early return e1 will get a strict warning. Similarly for
+ * iloops: while (true){...} is treated as though ... returns.
+ */
+#define ENDS_IN_OTHER 0
+#define ENDS_IN_RETURN 1
+#define ENDS_IN_BREAK 2
+
+static int
+HasFinalReturn(JSParseNode *pn)
+{
+ JSParseNode *pn2, *pn3;
+ uintN rv, rv2, hasDefault;
+
+ switch (pn->pn_type) {
+ case TOK_LC:
+ if (!pn->pn_head)
+ return ENDS_IN_OTHER;
+ return HasFinalReturn(PN_LAST(pn));
+
+ case TOK_IF:
+ if (!pn->pn_kid3)
+ return ENDS_IN_OTHER;
+ return HasFinalReturn(pn->pn_kid2) & HasFinalReturn(pn->pn_kid3);
+
+ case TOK_WHILE:
+ pn2 = pn->pn_left;
+ if (pn2->pn_type == TOK_PRIMARY && pn2->pn_op == JSOP_TRUE)
+ return ENDS_IN_RETURN;
+ if (pn2->pn_type == TOK_NUMBER && pn2->pn_dval)
+ return ENDS_IN_RETURN;
+ return ENDS_IN_OTHER;
+
+ case TOK_DO:
+ pn2 = pn->pn_right;
+ if (pn2->pn_type == TOK_PRIMARY) {
+ if (pn2->pn_op == JSOP_FALSE)
+ return HasFinalReturn(pn->pn_left);
+ if (pn2->pn_op == JSOP_TRUE)
+ return ENDS_IN_RETURN;
+ }
+ if (pn2->pn_type == TOK_NUMBER) {
+ if (pn2->pn_dval == 0)
+ return HasFinalReturn(pn->pn_left);
+ return ENDS_IN_RETURN;
+ }
+ return ENDS_IN_OTHER;
+
+ case TOK_FOR:
+ pn2 = pn->pn_left;
+ if (pn2->pn_arity == PN_TERNARY && !pn2->pn_kid2)
+ return ENDS_IN_RETURN;
+ return ENDS_IN_OTHER;
+
+ case TOK_SWITCH:
+ rv = ENDS_IN_RETURN;
+ hasDefault = ENDS_IN_OTHER;
+ pn2 = pn->pn_right;
+ if (pn2->pn_type == TOK_LEXICALSCOPE)
+ pn2 = pn2->pn_expr;
+ for (pn2 = pn2->pn_head; rv && pn2; pn2 = pn2->pn_next) {
+ if (pn2->pn_type == TOK_DEFAULT)
+ hasDefault = ENDS_IN_RETURN;
+ pn3 = pn2->pn_right;
+ JS_ASSERT(pn3->pn_type == TOK_LC);
+ if (pn3->pn_head) {
+ rv2 = HasFinalReturn(PN_LAST(pn3));
+ if (rv2 == ENDS_IN_OTHER && pn2->pn_next)
+ /* Falling through to next case or default. */;
+ else
+ rv &= rv2;
+ }
+ }
+ /* If a final switch has no default case, we judge it harshly. */
+ rv &= hasDefault;
+ return rv;
+
+ case TOK_BREAK:
+ return ENDS_IN_BREAK;
+
+ case TOK_WITH:
+ return HasFinalReturn(pn->pn_right);
+
+ case TOK_RETURN:
+ return ENDS_IN_RETURN;
+
+ case TOK_COLON:
+ case TOK_LEXICALSCOPE:
+ return HasFinalReturn(pn->pn_expr);
+
+ case TOK_THROW:
+ return ENDS_IN_RETURN;
+
+ case TOK_TRY:
+ /* If we have a finally block that returns, we are done. */
+ if (pn->pn_kid3) {
+ rv = HasFinalReturn(pn->pn_kid3);
+ if (rv == ENDS_IN_RETURN)
+ return rv;
+ }
+
+ /* Else check the try block and any and all catch statements. */
+ rv = HasFinalReturn(pn->pn_kid1);
+ if (pn->pn_kid2) {
+ JS_ASSERT(pn->pn_kid2->pn_arity == PN_LIST);
+ for (pn2 = pn->pn_kid2->pn_head; pn2; pn2 = pn2->pn_next)
+ rv &= HasFinalReturn(pn2);
+ }
+ return rv;
+
+ case TOK_CATCH:
+ /* Check this catch block's body. */
+ return HasFinalReturn(pn->pn_kid3);
+
+ case TOK_LET:
+ /* Non-binary let statements are let declarations. */
+ if (pn->pn_arity != PN_BINARY)
+ return ENDS_IN_OTHER;
+ return HasFinalReturn(pn->pn_right);
+
+ default:
+ return ENDS_IN_OTHER;
+ }
+}
+
+static JSBool
+ReportBadReturn(JSContext *cx, JSTokenStream *ts, uintN flags, uintN errnum,
+ uintN anonerrnum)
+{
+ JSFunction *fun;
+ const char *name;
+
+ fun = cx->fp->fun;
+ if (fun->atom) {
+ name = js_AtomToPrintableString(cx, fun->atom);
+ } else {
+ errnum = anonerrnum;
+ name = NULL;
+ }
+ return js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | flags, errnum,
+ name);
+}
+
+static JSBool
+CheckFinalReturn(JSContext *cx, JSTokenStream *ts, JSParseNode *pn)
+{
+ return HasFinalReturn(pn) == ENDS_IN_RETURN ||
+ ReportBadReturn(cx, ts, JSREPORT_WARNING | JSREPORT_STRICT,
+ JSMSG_NO_RETURN_VALUE, JSMSG_ANON_NO_RETURN_VALUE);
+}
+
+static JSParseNode *
+FunctionBody(JSContext *cx, JSTokenStream *ts, JSFunction *fun,
+ JSTreeContext *tc)
+{
+ JSStackFrame *fp, frame;
+ JSObject *funobj;
+ JSStmtInfo stmtInfo;
+ uintN oldflags, firstLine;
+ JSParseNode *pn;
+
+ fp = cx->fp;
+ funobj = fun->object;
+ if (!fp || fp->fun != fun || fp->varobj != funobj ||
+ fp->scopeChain != funobj) {
+ memset(&frame, 0, sizeof frame);
+ frame.fun = fun;
+ frame.varobj = frame.scopeChain = funobj;
+ frame.down = fp;
+ if (fp)
+ frame.flags = fp->flags & JSFRAME_COMPILE_N_GO;
+ cx->fp = &frame;
+ }
+
+ /*
+ * Set interpreted early so js_EmitTree can test it to decide whether to
+ * eliminate useless expressions.
+ */
+ fun->flags |= JSFUN_INTERPRETED;
+
+ js_PushStatement(tc, &stmtInfo, STMT_BLOCK, -1);
+ stmtInfo.flags = SIF_BODY_BLOCK;
+
+ oldflags = tc->flags;
+ tc->flags &= ~(TCF_RETURN_EXPR | TCF_RETURN_VOID);
+ tc->flags |= TCF_IN_FUNCTION;
+
+ /*
+ * Save the body's first line, and store it in pn->pn_pos.begin.lineno
+ * later, because we may have not peeked in ts yet, so Statements won't
+ * acquire a valid pn->pn_pos.begin from the current token.
+ */
+ firstLine = ts->lineno;
+ pn = Statements(cx, ts, tc);
+
+ js_PopStatement(tc);
+
+ /* Check for falling off the end of a function that returns a value. */
+ if (pn && JS_HAS_STRICT_OPTION(cx) && (tc->flags & TCF_RETURN_EXPR)) {
+ if (!CheckFinalReturn(cx, ts, pn))
+ pn = NULL;
+ }
+
+ /*
+ * If we have a parse tree in pn and a code generator in tc, emit this
+ * function's code. We must do this here, not in js_CompileFunctionBody,
+ * in order to detect TCF_IN_FUNCTION among tc->flags.
+ */
+ if (pn) {
+ pn->pn_pos.begin.lineno = firstLine;
+ if ((tc->flags & TCF_COMPILING)) {
+ JSCodeGenerator *cg = (JSCodeGenerator *) tc;
+
+ if (!js_FoldConstants(cx, pn, tc) ||
+ !js_EmitFunctionBytecode(cx, cg, pn)) {
+ pn = NULL;
+ }
+ }
+ }
+
+ cx->fp = fp;
+ tc->flags = oldflags | (tc->flags & (TCF_FUN_FLAGS | TCF_HAS_DEFXMLNS));
+ return pn;
+}
+
+/*
+ * Compile a JS function body, which might appear as the value of an event
+ * handler attribute in an HTML <INPUT> tag.
+ */
+JSBool
+js_CompileFunctionBody(JSContext *cx, JSTokenStream *ts, JSFunction *fun)
+{
+ JSArenaPool codePool, notePool;
+ JSCodeGenerator funcg;
+ JSStackFrame *fp, frame;
+ JSObject *funobj;
+ JSParseNode *pn;
+
+ JS_InitArenaPool(&codePool, "code", 1024, sizeof(jsbytecode));
+ JS_InitArenaPool(&notePool, "note", 1024, sizeof(jssrcnote));
+ if (!js_InitCodeGenerator(cx, &funcg, &codePool, &notePool,
+ ts->filename, ts->lineno,
+ ts->principals)) {
+ return JS_FALSE;
+ }
+
+ /* Prevent GC activation while compiling. */
+ JS_KEEP_ATOMS(cx->runtime);
+
+ /* Push a JSStackFrame for use by FunctionBody. */
+ fp = cx->fp;
+ funobj = fun->object;
+ JS_ASSERT(!fp || (fp->fun != fun && fp->varobj != funobj &&
+ fp->scopeChain != funobj));
+ memset(&frame, 0, sizeof frame);
+ frame.fun = fun;
+ frame.varobj = frame.scopeChain = funobj;
+ frame.down = fp;
+ frame.flags = JS_HAS_COMPILE_N_GO_OPTION(cx)
+ ? JSFRAME_COMPILING | JSFRAME_COMPILE_N_GO
+ : JSFRAME_COMPILING;
+ cx->fp = &frame;
+
+ /*
+ * Farble the body so that it looks like a block statement to js_EmitTree,
+ * which is called beneath FunctionBody; see Statements, further below in
+ * this file. FunctionBody pushes a STMT_BLOCK record around its call to
+ * Statements, so Statements will not compile each statement as it loops
+ * to save JSParseNode space -- it will not compile at all, only build a
+ * JSParseNode tree.
+ *
+ * Therefore we must fold constants, allocate try notes, and generate code
+ * for this function, including a stop opcode at the end.
+ */
+ CURRENT_TOKEN(ts).type = TOK_LC;
+ pn = FunctionBody(cx, ts, fun, &funcg.treeContext);
+ if (pn && !js_NewScriptFromCG(cx, &funcg, fun))
+ pn = NULL;
+
+ /* Restore saved state and release code generation arenas. */
+ cx->fp = fp;
+ JS_UNKEEP_ATOMS(cx->runtime);
+ js_FinishCodeGenerator(cx, &funcg);
+ JS_FinishArenaPool(&codePool);
+ JS_FinishArenaPool(&notePool);
+ return pn != NULL;
+}
+
+/*
+ * Parameter block types for the several Binder functions. We use a common
+ * helper function signature in order to share code among destructuring and
+ * simple variable declaration parsers. In the destructuring case, the binder
+ * function is called indirectly from the variable declaration parser by way
+ * of CheckDestructuring and its friends.
+ */
+typedef struct BindData BindData;
+
+typedef JSBool
+(*Binder)(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc);
+
+struct BindData {
+ JSParseNode *pn; /* error source coordinate */
+ JSTokenStream *ts; /* fallback if pn is null */
+ JSObject *obj; /* the variable object */
+ JSOp op; /* prolog bytecode or nop */
+ Binder binder; /* binder, discriminates u */
+ union {
+ struct {
+ JSFunction *fun; /* must come first! see next */
+ } arg;
+ struct {
+ JSFunction *fun; /* this overlays u.arg.fun */
+ JSClass *clasp;
+ JSPropertyOp getter;
+ JSPropertyOp setter;
+ uintN attrs;
+ } var;
+ struct {
+ jsuint index;
+ uintN overflow;
+ } let;
+ } u;
+};
+
+/*
+ * Given BindData *data and JSREPORT_* flags, expand to the second and third
+ * actual parameters to js_ReportCompileErrorNumber. Prefer reporting via pn
+ * to reporting via ts, for better destructuring error pointers.
+ */
+#define BIND_DATA_REPORT_ARGS(data, flags) \
+ (data)->pn ? (void *)(data)->pn : (void *)(data)->ts, \
+ ((data)->pn ? JSREPORT_PN : JSREPORT_TS) | (flags)
+
+static JSBool
+BumpFormalCount(JSContext *cx, JSFunction *fun)
+{
+ if (fun->nargs == JS_BITMASK(16)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_FUN_ARGS);
+ return JS_FALSE;
+ }
+ fun->nargs++;
+ return JS_TRUE;
+}
+
+static JSBool
+BindArg(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc)
+{
+ JSObject *obj, *pobj;
+ JSProperty *prop;
+ JSBool ok;
+ uintN dupflag;
+ JSFunction *fun;
+ const char *name;
+
+ obj = data->obj;
+ ok = js_LookupHiddenProperty(cx, obj, ATOM_TO_JSID(atom), &pobj, &prop);
+ if (!ok)
+ return JS_FALSE;
+
+ dupflag = 0;
+ if (prop) {
+ JS_ASSERT(pobj == obj);
+ name = js_AtomToPrintableString(cx, atom);
+
+ /*
+ * A duplicate parameter name, a "feature" required by ECMA-262.
+ * We force a duplicate node on the SCOPE_LAST_PROP(scope) list
+ * with the same id, distinguished by the SPROP_IS_DUPLICATE flag,
+ * and not mapped by an entry in scope.
+ */
+ ok = name &&
+ js_ReportCompileErrorNumber(cx,
+ BIND_DATA_REPORT_ARGS(data,
+ JSREPORT_WARNING |
+ JSREPORT_STRICT),
+ JSMSG_DUPLICATE_FORMAL,
+ name);
+
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ if (!ok)
+ return JS_FALSE;
+
+ dupflag = SPROP_IS_DUPLICATE;
+ }
+
+ fun = data->u.arg.fun;
+ if (!js_AddHiddenProperty(cx, data->obj, ATOM_TO_JSID(atom),
+ js_GetArgument, js_SetArgument,
+ SPROP_INVALID_SLOT,
+ JSPROP_PERMANENT | JSPROP_SHARED,
+ dupflag | SPROP_HAS_SHORTID,
+ fun->nargs)) {
+ return JS_FALSE;
+ }
+
+ return BumpFormalCount(cx, fun);
+}
+
+static JSBool
+BindLocalVariable(JSContext *cx, BindData *data, JSAtom *atom)
+{
+ JSFunction *fun;
+
+ /*
+ * Can't increase fun->nvars in an active frame, so insist that getter is
+ * js_GetLocalVariable, not js_GetCallVariable or anything else.
+ */
+ if (data->u.var.getter != js_GetLocalVariable)
+ return JS_TRUE;
+
+ /*
+ * Don't bind a variable with the hidden name 'arguments', per ECMA-262.
+ * Instead 'var arguments' always restates the predefined property of the
+ * activation objects with unhidden name 'arguments'. Assignment to such
+ * a variable must be handled specially.
+ */
+ if (atom == cx->runtime->atomState.argumentsAtom)
+ return JS_TRUE;
+
+ fun = data->u.var.fun;
+ if (!js_AddHiddenProperty(cx, data->obj, ATOM_TO_JSID(atom),
+ data->u.var.getter, data->u.var.setter,
+ SPROP_INVALID_SLOT,
+ data->u.var.attrs | JSPROP_SHARED,
+ SPROP_HAS_SHORTID, fun->u.i.nvars)) {
+ return JS_FALSE;
+ }
+ if (fun->u.i.nvars == JS_BITMASK(16)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_FUN_VARS);
+ return JS_FALSE;
+ }
+ fun->u.i.nvars++;
+ return JS_TRUE;
+}
+
+#if JS_HAS_DESTRUCTURING
+/*
+ * Forward declaration to maintain top-down presentation.
+ */
+static JSParseNode *
+DestructuringExpr(JSContext *cx, BindData *data, JSTreeContext *tc,
+ JSTokenType tt);
+
+static JSBool
+BindDestructuringArg(JSContext *cx, BindData *data, JSAtom *atom,
+ JSTreeContext *tc)
+{
+ JSAtomListElement *ale;
+ JSFunction *fun;
+ JSObject *obj, *pobj;
+ JSProperty *prop;
+ const char *name;
+
+ ATOM_LIST_SEARCH(ale, &tc->decls, atom);
+ if (!ale) {
+ ale = js_IndexAtom(cx, atom, &tc->decls);
+ if (!ale)
+ return JS_FALSE;
+ ALE_SET_JSOP(ale, data->op);
+ }
+
+ fun = data->u.var.fun;
+ obj = data->obj;
+ if (!js_LookupHiddenProperty(cx, obj, ATOM_TO_JSID(atom), &pobj, &prop))
+ return JS_FALSE;
+
+ if (prop) {
+ JS_ASSERT(pobj == obj && OBJ_IS_NATIVE(pobj));
+ name = js_AtomToPrintableString(cx, atom);
+ if (!name ||
+ !js_ReportCompileErrorNumber(cx,
+ BIND_DATA_REPORT_ARGS(data,
+ JSREPORT_WARNING |
+ JSREPORT_STRICT),
+ JSMSG_DUPLICATE_FORMAL,
+ name)) {
+ return JS_FALSE;
+ }
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ } else {
+ if (!BindLocalVariable(cx, data, atom))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+#endif /* JS_HAS_DESTRUCTURING */
+
+static JSParseNode *
+FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSBool lambda)
+{
+ JSOp op, prevop;
+ JSParseNode *pn, *body, *result;
+ JSTokenType tt;
+ JSAtom *funAtom, *objAtom;
+ JSStackFrame *fp;
+ JSObject *varobj, *pobj;
+ JSAtomListElement *ale;
+ JSProperty *prop;
+ JSFunction *fun;
+ JSTreeContext funtc;
+#if JS_HAS_DESTRUCTURING
+ JSParseNode *item, *list = NULL;
+#endif
+
+ /* Make a TOK_FUNCTION node. */
+#if JS_HAS_GETTER_SETTER
+ op = CURRENT_TOKEN(ts).t_op;
+#endif
+ pn = NewParseNode(cx, ts, PN_FUNC, tc);
+ if (!pn)
+ return NULL;
+
+ /* Scan the optional function name into funAtom. */
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ if (tt == TOK_NAME) {
+ funAtom = CURRENT_TOKEN(ts).t_atom;
+ } else {
+ funAtom = NULL;
+ js_UngetToken(ts);
+ }
+
+ /* Find the nearest variable-declaring scope and use it as our parent. */
+ fp = cx->fp;
+ varobj = fp->varobj;
+
+ /*
+ * Record names for function statements in tc->decls so we know when to
+ * avoid optimizing variable references that might name a function.
+ */
+ if (!lambda && funAtom) {
+ ATOM_LIST_SEARCH(ale, &tc->decls, funAtom);
+ if (ale) {
+ prevop = ALE_JSOP(ale);
+ if (JS_HAS_STRICT_OPTION(cx) || prevop == JSOP_DEFCONST) {
+ const char *name = js_AtomToPrintableString(cx, funAtom);
+ if (!name ||
+ !js_ReportCompileErrorNumber(cx, ts,
+ (prevop != JSOP_DEFCONST)
+ ? JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT
+ : JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_REDECLARED_VAR,
+ (prevop == JSOP_DEFFUN ||
+ prevop == JSOP_CLOSURE)
+ ? js_function_str
+ : (prevop == JSOP_DEFCONST)
+ ? js_const_str
+ : js_var_str,
+ name)) {
+ return NULL;
+ }
+ }
+ if (!AT_TOP_LEVEL(tc) && prevop == JSOP_DEFVAR)
+ tc->flags |= TCF_FUN_CLOSURE_VS_VAR;
+ } else {
+ ale = js_IndexAtom(cx, funAtom, &tc->decls);
+ if (!ale)
+ return NULL;
+ }
+ ALE_SET_JSOP(ale, AT_TOP_LEVEL(tc) ? JSOP_DEFFUN : JSOP_CLOSURE);
+
+ /*
+ * A function nested at top level inside another's body needs only a
+ * local variable to bind its name to its value, and not an activation
+ * object property (it might also need the activation property, if the
+ * outer function contains with statements, e.g., but the stack slot
+ * wins when jsemit.c's BindNameToSlot can optimize a JSOP_NAME into a
+ * JSOP_GETVAR bytecode).
+ */
+ if (AT_TOP_LEVEL(tc) && (tc->flags & TCF_IN_FUNCTION)) {
+ JSScopeProperty *sprop;
+
+ /*
+ * Define a property on the outer function so that BindNameToSlot
+ * can properly optimize accesses.
+ */
+ JS_ASSERT(OBJ_GET_CLASS(cx, varobj) == &js_FunctionClass);
+ JS_ASSERT(fp->fun == (JSFunction *) JS_GetPrivate(cx, varobj));
+ if (!js_LookupHiddenProperty(cx, varobj, ATOM_TO_JSID(funAtom),
+ &pobj, &prop)) {
+ return NULL;
+ }
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ sprop = NULL;
+ if (!prop ||
+ pobj != varobj ||
+ (sprop = (JSScopeProperty *)prop,
+ sprop->getter != js_GetLocalVariable)) {
+ uintN sflags;
+
+ /*
+ * Use SPROP_IS_DUPLICATE if there is a formal argument of the
+ * same name, so the decompiler can find the parameter name.
+ */
+ sflags = (sprop && sprop->getter == js_GetArgument)
+ ? SPROP_IS_DUPLICATE | SPROP_HAS_SHORTID
+ : SPROP_HAS_SHORTID;
+ if (!js_AddHiddenProperty(cx, varobj, ATOM_TO_JSID(funAtom),
+ js_GetLocalVariable,
+ js_SetLocalVariable,
+ SPROP_INVALID_SLOT,
+ JSPROP_PERMANENT | JSPROP_SHARED,
+ sflags, fp->fun->u.i.nvars)) {
+ return NULL;
+ }
+ if (fp->fun->u.i.nvars == JS_BITMASK(16)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_FUN_VARS);
+ return NULL;
+ }
+ fp->fun->u.i.nvars++;
+ }
+ }
+ }
+
+ fun = js_NewFunction(cx, NULL, NULL, 0, lambda ? JSFUN_LAMBDA : 0, varobj,
+ funAtom);
+ if (!fun)
+ return NULL;
+#if JS_HAS_GETTER_SETTER
+ if (op != JSOP_NOP)
+ fun->flags |= (op == JSOP_GETTER) ? JSPROP_GETTER : JSPROP_SETTER;
+#endif
+
+ /*
+ * Atomize fun->object early to protect against a last-ditch GC under
+ * js_LookupHiddenProperty.
+ *
+ * Absent use of the new scoped local GC roots API around compiler calls,
+ * we need to atomize here to protect against a GC activation. Atoms are
+ * protected from GC during compilation by the JS_FRIEND_API entry points
+ * in this file. There doesn't seem to be any gain in switching from the
+ * atom-keeping method to the bulkier, slower scoped local roots method.
+ */
+ objAtom = js_AtomizeObject(cx, fun->object, 0);
+ if (!objAtom)
+ return NULL;
+
+ /* Initialize early for possible flags mutation via DestructuringExpr. */
+ TREE_CONTEXT_INIT(&funtc);
+
+ /* Now parse formal argument list and compute fun->nargs. */
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_FORMAL);
+ if (!js_MatchToken(cx, ts, TOK_RP)) {
+ BindData data;
+
+ data.pn = NULL;
+ data.ts = ts;
+ data.obj = fun->object;
+ data.op = JSOP_NOP;
+ data.binder = BindArg;
+ data.u.arg.fun = fun;
+
+ do {
+ tt = js_GetToken(cx, ts);
+ switch (tt) {
+#if JS_HAS_DESTRUCTURING
+ case TOK_LB:
+ case TOK_LC:
+ {
+ JSParseNode *lhs, *rhs;
+ jsint slot;
+
+ /*
+ * A destructuring formal parameter turns into one or more
+ * local variables initialized from properties of a single
+ * anonymous positional parameter, so here we must tweak our
+ * binder and its data.
+ */
+ data.op = JSOP_DEFVAR;
+ data.binder = BindDestructuringArg;
+ data.u.var.clasp = &js_FunctionClass;
+ data.u.var.getter = js_GetLocalVariable;
+ data.u.var.setter = js_SetLocalVariable;
+ data.u.var.attrs = JSPROP_PERMANENT;
+
+ /*
+ * Temporarily transfer the owneship of the recycle list to
+ * funtc. See bug 313967.
+ */
+ funtc.nodeList = tc->nodeList;
+ tc->nodeList = NULL;
+ lhs = DestructuringExpr(cx, &data, &funtc, tt);
+ tc->nodeList = funtc.nodeList;
+ funtc.nodeList = NULL;
+ if (!lhs)
+ return NULL;
+
+ /*
+ * Restore the formal parameter binder in case there are more
+ * non-destructuring formals in the parameter list.
+ */
+ data.binder = BindArg;
+
+ /*
+ * Adjust fun->nargs to count the single anonymous positional
+ * parameter that is to be destructured.
+ */
+ slot = fun->nargs;
+ if (!BumpFormalCount(cx, fun))
+ return NULL;
+
+ /*
+ * Synthesize a destructuring assignment from the single
+ * anonymous positional parameter into the destructuring
+ * left-hand-side expression and accumulate it in list.
+ */
+ rhs = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!rhs)
+ return NULL;
+ rhs->pn_type = TOK_NAME;
+ rhs->pn_op = JSOP_GETARG;
+ rhs->pn_atom = cx->runtime->atomState.emptyAtom;
+ rhs->pn_expr = NULL;
+ rhs->pn_slot = slot;
+ rhs->pn_attrs = 0;
+
+ item = NewBinary(cx, TOK_ASSIGN, JSOP_NOP, lhs, rhs, tc);
+ if (!item)
+ return NULL;
+ if (!list) {
+ list = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!list)
+ return NULL;
+ list->pn_type = TOK_COMMA;
+ PN_INIT_LIST(list);
+ }
+ PN_APPEND(list, item);
+ break;
+ }
+#endif /* JS_HAS_DESTRUCTURING */
+
+ case TOK_NAME:
+ if (!data.binder(cx, &data, CURRENT_TOKEN(ts).t_atom, tc))
+ return NULL;
+ break;
+
+ default:
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_MISSING_FORMAL);
+ return NULL;
+ }
+ } while (js_MatchToken(cx, ts, TOK_COMMA));
+
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_FORMAL);
+ }
+
+ MUST_MATCH_TOKEN(TOK_LC, JSMSG_CURLY_BEFORE_BODY);
+ pn->pn_pos.begin = CURRENT_TOKEN(ts).pos.begin;
+
+ /*
+ * Temporarily transfer the owneship of the recycle list to funtc.
+ * See bug 313967.
+ */
+ funtc.nodeList = tc->nodeList;
+ tc->nodeList = NULL;
+ body = FunctionBody(cx, ts, fun, &funtc);
+ tc->nodeList = funtc.nodeList;
+ funtc.nodeList = NULL;
+
+ if (!body)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_BODY);
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+
+#if JS_HAS_DESTRUCTURING
+ /*
+ * If there were destructuring formal parameters, prepend the initializing
+ * comma expression that we synthesized to body. If the body is a lexical
+ * scope node, we must make a special TOK_BODY node, to prepend the formal
+ * parameter destructuring code without bracing the decompilation of the
+ * function body's lexical scope.
+ */
+ if (list) {
+ if (body->pn_arity != PN_LIST) {
+ JSParseNode *block;
+
+ JS_ASSERT(body->pn_type == TOK_LEXICALSCOPE);
+ JS_ASSERT(body->pn_arity == PN_NAME);
+
+ block = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!block)
+ return NULL;
+ block->pn_type = TOK_BODY;
+ block->pn_pos = body->pn_pos;
+ PN_INIT_LIST_1(block, body);
+
+ body = block;
+ }
+
+ item = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!item)
+ return NULL;
+
+ item->pn_type = TOK_SEMI;
+ item->pn_pos.begin = item->pn_pos.end = body->pn_pos.begin;
+ item->pn_kid = list;
+ item->pn_next = body->pn_head;
+ body->pn_head = item;
+ if (body->pn_tail == &body->pn_head)
+ body->pn_tail = &item->pn_next;
+ ++body->pn_count;
+ }
+#endif
+
+ /*
+ * If we collected flags that indicate nested heavyweight functions, or
+ * this function contains heavyweight-making statements (references to
+ * __parent__ or __proto__; use of with, eval, import, or export; and
+ * assignment to arguments), flag the function as heavyweight (requiring
+ * a call object per invocation).
+ */
+ if (funtc.flags & TCF_FUN_HEAVYWEIGHT) {
+ fun->flags |= JSFUN_HEAVYWEIGHT;
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ } else {
+ /*
+ * If this function is a named statement function not at top-level
+ * (i.e. a JSOP_CLOSURE, not a function definiton or expression), then
+ * our enclosing function, if any, must be heavyweight.
+ *
+ * The TCF_FUN_USES_NONLOCALS flag is set only by the code generator,
+ * so it won't be set here. Assert that it's not. We have to check
+ * it later, in js_EmitTree, after js_EmitFunctionBody has traversed
+ * the function's body
+ */
+ JS_ASSERT(!(funtc.flags & TCF_FUN_USES_NONLOCALS));
+ if (!lambda && funAtom && !AT_TOP_LEVEL(tc))
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ }
+
+ result = pn;
+ if (lambda) {
+ /*
+ * ECMA ed. 3 standard: function expression, possibly anonymous.
+ */
+ op = funAtom ? JSOP_NAMEDFUNOBJ : JSOP_ANONFUNOBJ;
+ } else if (!funAtom) {
+ /*
+ * If this anonymous function definition is *not* embedded within a
+ * larger expression, we treat it as an expression statement, not as
+ * a function declaration -- and not as a syntax error (as ECMA-262
+ * Edition 3 would have it). Backward compatibility trumps all.
+ */
+ result = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!result)
+ return NULL;
+ result->pn_type = TOK_SEMI;
+ result->pn_pos = pn->pn_pos;
+ result->pn_kid = pn;
+ op = JSOP_ANONFUNOBJ;
+ } else if (!AT_TOP_LEVEL(tc)) {
+ /*
+ * ECMA ed. 3 extension: a function expression statement not at the
+ * top level, e.g., in a compound statement such as the "then" part
+ * of an "if" statement, binds a closure only if control reaches that
+ * sub-statement.
+ */
+ op = JSOP_CLOSURE;
+ } else {
+ op = JSOP_NOP;
+ }
+
+ pn->pn_funAtom = objAtom;
+ pn->pn_op = op;
+ pn->pn_body = body;
+ pn->pn_flags = funtc.flags & (TCF_FUN_FLAGS | TCF_HAS_DEFXMLNS);
+ pn->pn_tryCount = funtc.tryCount;
+ TREE_CONTEXT_FINISH(&funtc);
+ return result;
+}
+
+static JSParseNode *
+FunctionStmt(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ return FunctionDef(cx, ts, tc, JS_FALSE);
+}
+
+static JSParseNode *
+FunctionExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ return FunctionDef(cx, ts, tc, JS_TRUE);
+}
+
+/*
+ * Parse the statements in a block, creating a TOK_LC node that lists the
+ * statements' trees. If called from block-parsing code, the caller must
+ * match { before and } after.
+ */
+static JSParseNode *
+Statements(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2, *saveBlock;
+ JSTokenType tt;
+
+ CHECK_RECURSION();
+
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ saveBlock = tc->blockNode;
+ tc->blockNode = pn;
+ PN_INIT_LIST(pn);
+
+ ts->flags |= TSF_OPERAND;
+ while ((tt = js_PeekToken(cx, ts)) > TOK_EOF && tt != TOK_RC) {
+ ts->flags &= ~TSF_OPERAND;
+ pn2 = Statement(cx, ts, tc);
+ if (!pn2) {
+ if (ts->flags & TSF_EOF)
+ ts->flags |= TSF_UNEXPECTED_EOF;
+ return NULL;
+ }
+ ts->flags |= TSF_OPERAND;
+
+ /* Detect a function statement for the TOK_LC case in Statement. */
+ if (pn2->pn_type == TOK_FUNCTION && !AT_TOP_LEVEL(tc))
+ tc->flags |= TCF_HAS_FUNCTION_STMT;
+
+ /* If compiling top-level statements, emit as we go to save space. */
+ if (!tc->topStmt && (tc->flags & TCF_COMPILING)) {
+ if (cx->fp->fun &&
+ JS_HAS_STRICT_OPTION(cx) &&
+ (tc->flags & TCF_RETURN_EXPR)) {
+ /*
+ * Check pn2 for lack of a final return statement if it is the
+ * last statement in the block.
+ */
+ tt = js_PeekToken(cx, ts);
+ if ((tt == TOK_EOF || tt == TOK_RC) &&
+ !CheckFinalReturn(cx, ts, pn2)) {
+ tt = TOK_ERROR;
+ break;
+ }
+
+ /*
+ * Clear TCF_RETURN_EXPR so FunctionBody doesn't try to
+ * CheckFinalReturn again.
+ */
+ tc->flags &= ~TCF_RETURN_EXPR;
+ }
+ if (!js_FoldConstants(cx, pn2, tc) ||
+ !js_AllocTryNotes(cx, (JSCodeGenerator *)tc) ||
+ !js_EmitTree(cx, (JSCodeGenerator *)tc, pn2)) {
+ tt = TOK_ERROR;
+ break;
+ }
+ RecycleTree(pn2, tc);
+ } else {
+ PN_APPEND(pn, pn2);
+ }
+ }
+
+ /*
+ * Handle the case where there was a let declaration under this block. If
+ * it replaced tc->blockNode with a new block node then we must refresh pn
+ * and then restore tc->blockNode.
+ */
+ if (tc->blockNode != pn)
+ pn = tc->blockNode;
+ tc->blockNode = saveBlock;
+
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_ERROR)
+ return NULL;
+
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ return pn;
+}
+
+static JSParseNode *
+Condition(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2;
+
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_COND);
+ pn = Expr(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_COND);
+
+ /*
+ * Check for (a = b) and "correct" it to (a == b) iff b's operator has
+ * greater precedence than ==.
+ * XXX not ECMA, but documented in several books -- now a strict warning.
+ */
+ if (pn->pn_type == TOK_ASSIGN &&
+ pn->pn_op == JSOP_NOP &&
+ pn->pn_right->pn_type > TOK_EQOP)
+ {
+ JSBool rewrite = !JS_VERSION_IS_ECMA(cx);
+ if (!js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_EQUAL_AS_ASSIGN,
+ rewrite
+ ? "\nAssuming equality test"
+ : "")) {
+ return NULL;
+ }
+ if (rewrite) {
+ pn->pn_type = TOK_EQOP;
+ pn->pn_op = (JSOp)cx->jsop_eq;
+ pn2 = pn->pn_left;
+ switch (pn2->pn_op) {
+ case JSOP_SETNAME:
+ pn2->pn_op = JSOP_NAME;
+ break;
+ case JSOP_SETPROP:
+ pn2->pn_op = JSOP_GETPROP;
+ break;
+ case JSOP_SETELEM:
+ pn2->pn_op = JSOP_GETELEM;
+ break;
+ default:
+ JS_ASSERT(0);
+ }
+ }
+ }
+ return pn;
+}
+
+static JSBool
+MatchLabel(JSContext *cx, JSTokenStream *ts, JSParseNode *pn)
+{
+ JSAtom *label;
+ JSTokenType tt;
+
+ tt = js_PeekTokenSameLine(cx, ts);
+ if (tt == TOK_ERROR)
+ return JS_FALSE;
+ if (tt == TOK_NAME) {
+ (void) js_GetToken(cx, ts);
+ label = CURRENT_TOKEN(ts).t_atom;
+ } else {
+ label = NULL;
+ }
+ pn->pn_atom = label;
+ return JS_TRUE;
+}
+
+#if JS_HAS_EXPORT_IMPORT
+static JSParseNode *
+ImportExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2;
+ JSTokenType tt;
+
+ MUST_MATCH_TOKEN(TOK_NAME, JSMSG_NO_IMPORT_NAME);
+ pn = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_op = JSOP_NAME;
+ pn->pn_atom = CURRENT_TOKEN(ts).t_atom;
+ pn->pn_expr = NULL;
+ pn->pn_slot = -1;
+ pn->pn_attrs = 0;
+
+ ts->flags |= TSF_OPERAND;
+ while ((tt = js_GetToken(cx, ts)) == TOK_DOT || tt == TOK_LB) {
+ ts->flags &= ~TSF_OPERAND;
+ if (pn->pn_op == JSOP_IMPORTALL)
+ goto bad_import;
+
+ if (tt == TOK_DOT) {
+ pn2 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn2)
+ return NULL;
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ if (js_MatchToken(cx, ts, TOK_STAR)) {
+ pn2->pn_op = JSOP_IMPORTALL;
+ pn2->pn_atom = NULL;
+ pn2->pn_slot = -1;
+ pn2->pn_attrs = 0;
+ } else {
+ MUST_MATCH_TOKEN(TOK_NAME, JSMSG_NAME_AFTER_DOT);
+ pn2->pn_op = JSOP_GETPROP;
+ pn2->pn_atom = CURRENT_TOKEN(ts).t_atom;
+ pn2->pn_slot = -1;
+ pn2->pn_attrs = 0;
+ }
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ pn2->pn_expr = pn;
+ pn2->pn_pos.begin = pn->pn_pos.begin;
+ pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ } else {
+ /* Make a TOK_LB binary node. */
+ pn2 = NewBinary(cx, tt, JSOP_GETELEM, pn, Expr(cx, ts, tc), tc);
+ if (!pn2)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RB, JSMSG_BRACKET_IN_INDEX);
+ }
+
+ pn = pn2;
+ ts->flags |= TSF_OPERAND;
+ }
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_ERROR)
+ return NULL;
+ js_UngetToken(ts);
+
+ switch (pn->pn_op) {
+ case JSOP_GETPROP:
+ pn->pn_op = JSOP_IMPORTPROP;
+ break;
+ case JSOP_GETELEM:
+ pn->pn_op = JSOP_IMPORTELEM;
+ break;
+ case JSOP_IMPORTALL:
+ break;
+ default:
+ goto bad_import;
+ }
+ return pn;
+
+ bad_import:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_IMPORT);
+ return NULL;
+}
+#endif /* JS_HAS_EXPORT_IMPORT */
+
+static JSBool
+BindLet(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc)
+{
+ JSObject *blockObj;
+ JSScopeProperty *sprop;
+ JSAtomListElement *ale;
+
+ blockObj = data->obj;
+ sprop = SCOPE_GET_PROPERTY(OBJ_SCOPE(blockObj), ATOM_TO_JSID(atom));
+ ATOM_LIST_SEARCH(ale, &tc->decls, atom);
+ if (sprop || (ale && ALE_JSOP(ale) == JSOP_DEFCONST)) {
+ const char *name;
+
+ if (sprop) {
+ JS_ASSERT(sprop->flags & SPROP_HAS_SHORTID);
+ JS_ASSERT((uint16)sprop->shortid < data->u.let.index);
+ }
+
+ name = js_AtomToPrintableString(cx, atom);
+ if (name) {
+ js_ReportCompileErrorNumber(cx,
+ BIND_DATA_REPORT_ARGS(data,
+ JSREPORT_ERROR),
+ JSMSG_REDECLARED_VAR,
+ (ale && ALE_JSOP(ale) == JSOP_DEFCONST)
+ ? js_const_str
+ : "variable",
+ name);
+ }
+ return JS_FALSE;
+ }
+
+ if (data->u.let.index == JS_BIT(16)) {
+ js_ReportCompileErrorNumber(cx,
+ BIND_DATA_REPORT_ARGS(data, JSREPORT_ERROR),
+ data->u.let.overflow);
+ return JS_FALSE;
+ }
+
+ /* Use JSPROP_ENUMERATE to aid the disassembler. */
+ return js_DefineNativeProperty(cx, blockObj, ATOM_TO_JSID(atom),
+ JSVAL_VOID, NULL, NULL,
+ JSPROP_ENUMERATE | JSPROP_PERMANENT,
+ SPROP_HAS_SHORTID,
+ (intN)data->u.let.index++,
+ NULL);
+}
+
+static JSBool
+BindVarOrConst(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc)
+{
+ JSStmtInfo *stmt;
+ JSAtomListElement *ale;
+ JSOp op, prevop;
+ const char *name;
+ JSFunction *fun;
+ JSObject *obj, *pobj;
+ JSProperty *prop;
+ JSBool ok;
+ JSPropertyOp getter, setter;
+ JSScopeProperty *sprop;
+
+ stmt = js_LexicalLookup(tc, atom, NULL, JS_FALSE);
+ ATOM_LIST_SEARCH(ale, &tc->decls, atom);
+ op = data->op;
+ if ((stmt && stmt->type != STMT_WITH) || ale) {
+ prevop = ale ? ALE_JSOP(ale) : JSOP_DEFVAR;
+ if (JS_HAS_STRICT_OPTION(cx)
+ ? op != JSOP_DEFVAR || prevop != JSOP_DEFVAR
+ : op == JSOP_DEFCONST || prevop == JSOP_DEFCONST) {
+ name = js_AtomToPrintableString(cx, atom);
+ if (!name ||
+ !js_ReportCompileErrorNumber(cx,
+ BIND_DATA_REPORT_ARGS(data,
+ (op != JSOP_DEFCONST &&
+ prevop != JSOP_DEFCONST)
+ ? JSREPORT_WARNING |
+ JSREPORT_STRICT
+ : JSREPORT_ERROR),
+ JSMSG_REDECLARED_VAR,
+ (prevop == JSOP_DEFFUN ||
+ prevop == JSOP_CLOSURE)
+ ? js_function_str
+ : (prevop == JSOP_DEFCONST)
+ ? js_const_str
+ : js_var_str,
+ name)) {
+ return JS_FALSE;
+ }
+ }
+ if (op == JSOP_DEFVAR && prevop == JSOP_CLOSURE)
+ tc->flags |= TCF_FUN_CLOSURE_VS_VAR;
+ }
+ if (!ale) {
+ ale = js_IndexAtom(cx, atom, &tc->decls);
+ if (!ale)
+ return JS_FALSE;
+ }
+ ALE_SET_JSOP(ale, op);
+
+ fun = data->u.var.fun;
+ obj = data->obj;
+ if (!fun) {
+ /* Don't lookup global variables at compile time. */
+ prop = NULL;
+ } else {
+ JS_ASSERT(OBJ_IS_NATIVE(obj));
+ if (!js_LookupHiddenProperty(cx, obj, ATOM_TO_JSID(atom),
+ &pobj, &prop)) {
+ return JS_FALSE;
+ }
+ }
+
+ ok = JS_TRUE;
+ getter = data->u.var.getter;
+ setter = data->u.var.setter;
+
+ if (prop && pobj == obj && OBJ_IS_NATIVE(pobj)) {
+ sprop = (JSScopeProperty *)prop;
+ if (sprop->getter == js_GetArgument) {
+ name = js_AtomToPrintableString(cx, atom);
+ if (!name) {
+ ok = JS_FALSE;
+ } else if (op == JSOP_DEFCONST) {
+ js_ReportCompileErrorNumber(cx,
+ BIND_DATA_REPORT_ARGS(data,
+ JSREPORT_ERROR),
+ JSMSG_REDECLARED_PARAM,
+ name);
+ ok = JS_FALSE;
+ } else {
+ getter = js_GetArgument;
+ setter = js_SetArgument;
+ ok = js_ReportCompileErrorNumber(cx,
+ BIND_DATA_REPORT_ARGS(data,
+ JSREPORT_WARNING |
+ JSREPORT_STRICT),
+ JSMSG_VAR_HIDES_ARG,
+ name);
+ }
+ } else {
+ JS_ASSERT(getter == js_GetLocalVariable);
+
+ if (fun) {
+ /* Not an argument, must be a redeclared local var. */
+ if (data->u.var.clasp == &js_FunctionClass) {
+ JS_ASSERT(sprop->getter == js_GetLocalVariable);
+ JS_ASSERT((sprop->flags & SPROP_HAS_SHORTID) &&
+ (uint16) sprop->shortid < fun->u.i.nvars);
+ } else if (data->u.var.clasp == &js_CallClass) {
+ if (sprop->getter == js_GetCallVariable) {
+ /*
+ * Referencing a name introduced by a var statement in
+ * the enclosing function. Check that the slot number
+ * we have is in range.
+ */
+ JS_ASSERT((sprop->flags & SPROP_HAS_SHORTID) &&
+ (uint16) sprop->shortid < fun->u.i.nvars);
+ } else {
+ /*
+ * A variable introduced through another eval: don't
+ * use the special getters and setters since we can't
+ * allocate a slot in the frame.
+ */
+ getter = sprop->getter;
+ setter = sprop->setter;
+ }
+ }
+
+ /* Override the old getter and setter, to handle eval. */
+ sprop = js_ChangeNativePropertyAttrs(cx, obj, sprop,
+ 0, sprop->attrs,
+ getter, setter);
+ if (!sprop)
+ ok = JS_FALSE;
+ }
+ }
+ if (prop)
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ } else {
+ /*
+ * Property not found in current variable scope: we have not seen this
+ * variable before. Define a new local variable by adding a property
+ * to the function's scope, allocating one slot in the function's vars
+ * frame. Global variables and any locals declared in with statement
+ * bodies are handled at runtime, by script prolog JSOP_DEFVAR opcodes
+ * generated for slot-less vars.
+ */
+ sprop = NULL;
+ if (prop) {
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ prop = NULL;
+ }
+
+ if (cx->fp->scopeChain == obj &&
+ !js_InWithStatement(tc) &&
+ !BindLocalVariable(cx, data, atom)) {
+ return JS_FALSE;
+ }
+ }
+ return ok;
+}
+
+#if JS_HAS_DESTRUCTURING
+
+static JSBool
+BindDestructuringVar(JSContext *cx, BindData *data, JSParseNode *pn,
+ JSTreeContext *tc)
+{
+ JSAtom *atom;
+
+ /*
+ * Destructuring is a form of assignment, so just as for an initialized
+ * simple variable, we must check for assignment to 'arguments' and flag
+ * the enclosing function (if any) as heavyweight.
+ */
+ JS_ASSERT(pn->pn_type == TOK_NAME);
+ atom = pn->pn_atom;
+ if (atom == cx->runtime->atomState.argumentsAtom)
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+
+ data->pn = pn;
+ if (!data->binder(cx, data, atom, tc))
+ return JS_FALSE;
+ data->pn = NULL;
+
+ /*
+ * Select the appropriate name-setting opcode, which may be specialized
+ * further for local variable and argument slot optimizations. At this
+ * point, we can't select the optimal final opcode, yet we must preserve
+ * the CONST bit and convey "set", not "get".
+ */
+ pn->pn_op = (data->op == JSOP_DEFCONST)
+ ? JSOP_SETCONST
+ : JSOP_SETNAME;
+ pn->pn_attrs = data->u.var.attrs;
+ return JS_TRUE;
+}
+
+/*
+ * Here, we are destructuring {... P: Q, ...} = R, where P is any id, Q is any
+ * LHS expression except a destructuring initialiser, and R is on the stack.
+ * Because R is already evaluated, the usual LHS-specialized bytecodes won't
+ * work. After pushing R[P] we need to evaluate Q's "reference base" QB and
+ * then push its property name QN. At this point the stack looks like
+ *
+ * [... R, R[P], QB, QN]
+ *
+ * We need to set QB[QN] = R[P]. This is a job for JSOP_ENUMELEM, which takes
+ * its operands with left-hand side above right-hand side:
+ *
+ * [rval, lval, xval]
+ *
+ * and pops all three values, setting lval[xval] = rval. But we cannot select
+ * JSOP_ENUMELEM yet, because the LHS may turn out to be an arg or local var,
+ * which can be optimized further. So we select JSOP_SETNAME.
+ */
+static JSBool
+BindDestructuringLHS(JSContext *cx, JSParseNode *pn, JSTreeContext *tc)
+{
+ while (pn->pn_type == TOK_RP)
+ pn = pn->pn_kid;
+
+ switch (pn->pn_type) {
+ case TOK_NAME:
+ if (pn->pn_atom == cx->runtime->atomState.argumentsAtom)
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ /* FALL THROUGH */
+ case TOK_DOT:
+ case TOK_LB:
+ pn->pn_op = JSOP_SETNAME;
+ break;
+
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+ JS_ASSERT(pn->pn_op == JSOP_CALL || pn->pn_op == JSOP_EVAL);
+ pn->pn_op = JSOP_SETCALL;
+ break;
+#endif
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+ if (pn->pn_op == JSOP_XMLNAME) {
+ pn->pn_op = JSOP_BINDXMLNAME;
+ break;
+ }
+ /* FALL THROUGH */
+#endif
+
+ default:
+ js_ReportCompileErrorNumber(cx, pn, JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_BAD_LEFTSIDE_OF_ASS);
+ return JS_FALSE;
+ }
+
+ return JS_TRUE;
+}
+
+typedef struct FindPropValData {
+ uint32 numvars; /* # of destructuring vars in left side */
+ uint32 maxstep; /* max # of steps searching right side */
+ JSDHashTable table; /* hash table for O(1) right side search */
+} FindPropValData;
+
+typedef struct FindPropValEntry {
+ JSDHashEntryHdr hdr;
+ JSParseNode *pnkey;
+ JSParseNode *pnval;
+} FindPropValEntry;
+
+#define ASSERT_VALID_PROPERTY_KEY(pnkey) \
+ JS_ASSERT((pnkey)->pn_arity == PN_NULLARY && \
+ ((pnkey)->pn_type == TOK_NUMBER || \
+ (pnkey)->pn_type == TOK_STRING || \
+ (pnkey)->pn_type == TOK_NAME))
+
+JS_STATIC_DLL_CALLBACK(JSDHashNumber)
+HashFindPropValKey(JSDHashTable *table, const void *key)
+{
+ const JSParseNode *pnkey = (const JSParseNode *)key;
+
+ ASSERT_VALID_PROPERTY_KEY(pnkey);
+ return (pnkey->pn_type == TOK_NUMBER)
+ ? (JSDHashNumber) (JSDOUBLE_HI32(pnkey->pn_dval) ^
+ JSDOUBLE_LO32(pnkey->pn_dval))
+ : (JSDHashNumber) pnkey->pn_atom->number;
+}
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+MatchFindPropValEntry(JSDHashTable *table,
+ const JSDHashEntryHdr *entry,
+ const void *key)
+{
+ const FindPropValEntry *fpve = (const FindPropValEntry *)entry;
+ const JSParseNode *pnkey = (const JSParseNode *)key;
+
+ ASSERT_VALID_PROPERTY_KEY(pnkey);
+ return pnkey->pn_type == fpve->pnkey->pn_type &&
+ ((pnkey->pn_type == TOK_NUMBER)
+ ? pnkey->pn_dval == fpve->pnkey->pn_dval
+ : pnkey->pn_atom == fpve->pnkey->pn_atom);
+}
+
+static const JSDHashTableOps FindPropValOps = {
+ JS_DHashAllocTable,
+ JS_DHashFreeTable,
+ JS_DHashGetKeyStub,
+ HashFindPropValKey,
+ MatchFindPropValEntry,
+ JS_DHashMoveEntryStub,
+ JS_DHashClearEntryStub,
+ JS_DHashFinalizeStub,
+ NULL
+};
+
+#define STEP_HASH_THRESHOLD 10
+#define BIG_DESTRUCTURING 5
+#define BIG_OBJECT_INIT 20
+
+static JSParseNode *
+FindPropertyValue(JSParseNode *pn, JSParseNode *pnid, FindPropValData *data)
+{
+ FindPropValEntry *entry;
+ JSParseNode *pnhit, *pnprop, *pnkey;
+ uint32 step;
+
+ /* If we have a hash table, use it as the sole source of truth. */
+ if (data->table.ops) {
+ entry = (FindPropValEntry *)
+ JS_DHashTableOperate(&data->table, pnid, JS_DHASH_LOOKUP);
+ return JS_DHASH_ENTRY_IS_BUSY(&entry->hdr) ? entry->pnval : NULL;
+ }
+
+ /* If pn is not an object initialiser node, we can't do anything here. */
+ if (pn->pn_type != TOK_RC)
+ return NULL;
+
+ /*
+ * We must search all the way through pn's list, to handle the case of an
+ * id duplicated for two or more property initialisers.
+ */
+ pnhit = NULL;
+ step = 0;
+ ASSERT_VALID_PROPERTY_KEY(pnid);
+ if (pnid->pn_type == TOK_NUMBER) {
+ for (pnprop = pn->pn_head; pnprop; pnprop = pnprop->pn_next) {
+ JS_ASSERT(pnprop->pn_type == TOK_COLON);
+ if (pnprop->pn_op == JSOP_NOP) {
+ pnkey = pnprop->pn_left;
+ ASSERT_VALID_PROPERTY_KEY(pnkey);
+ if (pnkey->pn_type == TOK_NUMBER &&
+ pnkey->pn_dval == pnid->pn_dval) {
+ pnhit = pnprop;
+ }
+ ++step;
+ }
+ }
+ } else {
+ for (pnprop = pn->pn_head; pnprop; pnprop = pnprop->pn_next) {
+ JS_ASSERT(pnprop->pn_type == TOK_COLON);
+ if (pnprop->pn_op == JSOP_NOP) {
+ pnkey = pnprop->pn_left;
+ ASSERT_VALID_PROPERTY_KEY(pnkey);
+ if (pnkey->pn_type == pnid->pn_type &&
+ pnkey->pn_atom == pnid->pn_atom) {
+ pnhit = pnprop;
+ }
+ ++step;
+ }
+ }
+ }
+ if (!pnhit)
+ return NULL;
+
+ /* Hit via full search -- see whether it's time to create the hash table. */
+ JS_ASSERT(!data->table.ops);
+ if (step > data->maxstep) {
+ data->maxstep = step;
+ if (step >= STEP_HASH_THRESHOLD &&
+ data->numvars >= BIG_DESTRUCTURING &&
+ pn->pn_count >= BIG_OBJECT_INIT &&
+ JS_DHashTableInit(&data->table, &FindPropValOps, pn,
+ sizeof(FindPropValEntry), pn->pn_count)) {
+
+ for (pn = pn->pn_head; pn; pn = pn->pn_next) {
+ ASSERT_VALID_PROPERTY_KEY(pn->pn_left);
+ entry = (FindPropValEntry *)
+ JS_DHashTableOperate(&data->table, pn->pn_left,
+ JS_DHASH_ADD);
+ entry->pnval = pn->pn_right;
+ }
+ }
+ }
+ return pnhit->pn_right;
+}
+
+/*
+ * If data is null, the caller is AssignExpr and instead of binding variables,
+ * we specialize lvalues in the propery value positions of the left-hand side.
+ * If right is null, just check for well-formed lvalues.
+ */
+static JSBool
+CheckDestructuring(JSContext *cx, BindData *data,
+ JSParseNode *left, JSParseNode *right,
+ JSTreeContext *tc)
+{
+ JSBool ok;
+ FindPropValData fpvd;
+ JSParseNode *lhs, *rhs, *pn, *pn2;
+
+ if (left->pn_type == TOK_ARRAYCOMP) {
+ js_ReportCompileErrorNumber(cx, left, JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_ARRAY_COMP_LEFTSIDE);
+ return JS_FALSE;
+ }
+
+ ok = JS_TRUE;
+ fpvd.table.ops = NULL;
+ lhs = left->pn_head;
+ if (lhs && lhs->pn_type == TOK_DEFSHARP) {
+ pn = lhs;
+ goto no_var_name;
+ }
+
+ if (left->pn_type == TOK_RB) {
+ rhs = (right && right->pn_type == left->pn_type)
+ ? right->pn_head
+ : NULL;
+
+ while (lhs) {
+ pn = lhs, pn2 = rhs;
+ if (!data) {
+ /* Skip parenthesization if not in a variable declaration. */
+ while (pn->pn_type == TOK_RP)
+ pn = pn->pn_kid;
+ if (pn2) {
+ while (pn2->pn_type == TOK_RP)
+ pn2 = pn2->pn_kid;
+ }
+ }
+
+ /* Nullary comma is an elision; binary comma is an expression.*/
+ if (pn->pn_type != TOK_COMMA || pn->pn_arity != PN_NULLARY) {
+ if (pn->pn_type == TOK_RB || pn->pn_type == TOK_RC) {
+ ok = CheckDestructuring(cx, data, pn, pn2, tc);
+ } else {
+ if (data) {
+ if (pn->pn_type != TOK_NAME)
+ goto no_var_name;
+
+ ok = BindDestructuringVar(cx, data, pn, tc);
+ } else {
+ ok = BindDestructuringLHS(cx, pn, tc);
+ }
+ }
+ if (!ok)
+ goto out;
+ }
+
+ lhs = lhs->pn_next;
+ if (rhs)
+ rhs = rhs->pn_next;
+ }
+ } else {
+ JS_ASSERT(left->pn_type == TOK_RC);
+ fpvd.numvars = left->pn_count;
+ fpvd.maxstep = 0;
+ rhs = NULL;
+
+ while (lhs) {
+ JS_ASSERT(lhs->pn_type == TOK_COLON);
+ pn = lhs->pn_right;
+ if (!data) {
+ /* Skip parenthesization if not in a variable declaration. */
+ while (pn->pn_type == TOK_RP)
+ pn = pn->pn_kid;
+ }
+
+ if (pn->pn_type == TOK_RB || pn->pn_type == TOK_RC) {
+ if (right) {
+ rhs = FindPropertyValue(right, lhs->pn_left, &fpvd);
+ if (rhs && !data) {
+ while (rhs->pn_type == TOK_RP)
+ rhs = rhs->pn_kid;
+ }
+ }
+
+ ok = CheckDestructuring(cx, data, pn, rhs, tc);
+ } else if (data) {
+ if (pn->pn_type != TOK_NAME)
+ goto no_var_name;
+
+ ok = BindDestructuringVar(cx, data, pn, tc);
+ } else {
+ ok = BindDestructuringLHS(cx, pn, tc);
+ }
+ if (!ok)
+ goto out;
+
+ lhs = lhs->pn_next;
+ }
+ }
+
+out:
+ if (fpvd.table.ops)
+ JS_DHashTableFinish(&fpvd.table);
+ return ok;
+
+no_var_name:
+ js_ReportCompileErrorNumber(cx, pn, JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_NO_VARIABLE_NAME);
+ ok = JS_FALSE;
+ goto out;
+}
+
+static JSParseNode *
+DestructuringExpr(JSContext *cx, BindData *data, JSTreeContext *tc,
+ JSTokenType tt)
+{
+ JSParseNode *pn;
+
+ pn = PrimaryExpr(cx, data->ts, tc, tt, JS_FALSE);
+ if (!pn)
+ return NULL;
+ if (!CheckDestructuring(cx, data, pn, NULL, tc))
+ return NULL;
+ return pn;
+}
+
+#endif /* JS_HAS_DESTRUCTURING */
+
+extern const char js_with_statement_str[];
+
+static JSParseNode *
+ContainsStmt(JSParseNode *pn, JSTokenType tt)
+{
+ JSParseNode *pn2, *pnt;
+
+ if (!pn)
+ return NULL;
+ if (pn->pn_type == tt)
+ return pn;
+ switch (pn->pn_arity) {
+ case PN_LIST:
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ pnt = ContainsStmt(pn2, tt);
+ if (pnt)
+ return pnt;
+ }
+ break;
+ case PN_TERNARY:
+ pnt = ContainsStmt(pn->pn_kid1, tt);
+ if (pnt)
+ return pnt;
+ pnt = ContainsStmt(pn->pn_kid2, tt);
+ if (pnt)
+ return pnt;
+ return ContainsStmt(pn->pn_kid3, tt);
+ case PN_BINARY:
+ /*
+ * Limit recursion if pn is a binary expression, which can't contain a
+ * var statement.
+ */
+ if (pn->pn_op != JSOP_NOP)
+ return NULL;
+ pnt = ContainsStmt(pn->pn_left, tt);
+ if (pnt)
+ return pnt;
+ return ContainsStmt(pn->pn_right, tt);
+ case PN_UNARY:
+ if (pn->pn_op != JSOP_NOP)
+ return NULL;
+ return ContainsStmt(pn->pn_kid, tt);
+ case PN_NAME:
+ return ContainsStmt(pn->pn_expr, tt);
+ default:;
+ }
+ return NULL;
+}
+
+static JSParseNode *
+ReturnOrYield(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSParser operandParser)
+{
+ JSTokenType tt, tt2;
+ JSParseNode *pn, *pn2;
+
+ tt = CURRENT_TOKEN(ts).type;
+ if (!(tc->flags & TCF_IN_FUNCTION)) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_RETURN_OR_YIELD,
+#if JS_HAS_GENERATORS
+ (tt == TOK_YIELD) ? js_yield_str :
+#endif
+ js_return_str);
+ return NULL;
+ }
+
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+
+#if JS_HAS_GENERATORS
+ if (tt == TOK_YIELD)
+ tc->flags |= TCF_FUN_IS_GENERATOR;
+#endif
+
+ /* This is ugly, but we don't want to require a semicolon. */
+ ts->flags |= TSF_OPERAND;
+ tt2 = js_PeekTokenSameLine(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt2 == TOK_ERROR)
+ return NULL;
+
+ if (tt2 != TOK_EOF && tt2 != TOK_EOL && tt2 != TOK_SEMI && tt2 != TOK_RC
+#if JS_HAS_GENERATORS
+ && (tt != TOK_YIELD || (tt2 != tt && tt2 != TOK_RB && tt2 != TOK_RP))
+#endif
+ ) {
+ pn2 = operandParser(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+#if JS_HAS_GENERATORS
+ if (tt == TOK_RETURN)
+#endif
+ tc->flags |= TCF_RETURN_EXPR;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ pn->pn_kid = pn2;
+ } else {
+#if JS_HAS_GENERATORS
+ if (tt == TOK_RETURN)
+#endif
+ tc->flags |= TCF_RETURN_VOID;
+ pn->pn_kid = NULL;
+ }
+
+ if ((~tc->flags & (TCF_RETURN_EXPR | TCF_FUN_IS_GENERATOR)) == 0) {
+ /* As in Python (see PEP-255), disallow return v; in generators. */
+ ReportBadReturn(cx, ts, JSREPORT_ERROR,
+ JSMSG_BAD_GENERATOR_RETURN,
+ JSMSG_BAD_ANON_GENERATOR_RETURN);
+ return NULL;
+ }
+
+ if (JS_HAS_STRICT_OPTION(cx) &&
+ (~tc->flags & (TCF_RETURN_EXPR | TCF_RETURN_VOID)) == 0 &&
+ !ReportBadReturn(cx, ts, JSREPORT_WARNING | JSREPORT_STRICT,
+ JSMSG_NO_RETURN_VALUE,
+ JSMSG_ANON_NO_RETURN_VALUE)) {
+ return NULL;
+ }
+
+ return pn;
+}
+
+static JSParseNode *
+PushLexicalScope(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSStmtInfo *stmtInfo)
+{
+ JSParseNode *pn;
+ JSObject *obj;
+ JSAtom *atom;
+
+ pn = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn)
+ return NULL;
+
+ obj = js_NewBlockObject(cx);
+ if (!obj)
+ return NULL;
+
+ atom = js_AtomizeObject(cx, obj, 0);
+ if (!atom)
+ return NULL;
+
+ js_PushBlockScope(tc, stmtInfo, atom, -1);
+ pn->pn_type = TOK_LEXICALSCOPE;
+ pn->pn_op = JSOP_LEAVEBLOCK;
+ pn->pn_atom = atom;
+ pn->pn_expr = NULL;
+ pn->pn_slot = -1;
+ pn->pn_attrs = 0;
+ return pn;
+}
+
+#if JS_HAS_BLOCK_SCOPE
+
+static JSParseNode *
+LetBlock(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, JSBool statement)
+{
+ JSParseNode *pn, *pnblock, *pnlet;
+ JSStmtInfo stmtInfo;
+
+ JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_LET);
+
+ /* Create the let binary node. */
+ pnlet = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pnlet)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_LET);
+
+ /* This is a let block or expression of the form: let (a, b, c) .... */
+ pnblock = PushLexicalScope(cx, ts, tc, &stmtInfo);
+ if (!pnblock)
+ return NULL;
+ pn = pnblock;
+ pn->pn_expr = pnlet;
+
+ pnlet->pn_left = Variables(cx, ts, tc);
+ if (!pnlet->pn_left)
+ return NULL;
+ pnlet->pn_left->pn_extra = PNX_POPVAR;
+
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_LET);
+
+ ts->flags |= TSF_OPERAND;
+ if (statement && !js_MatchToken(cx, ts, TOK_LC)) {
+ /*
+ * If this is really an expression in let statement guise, then we
+ * need to wrap the TOK_LET node in a TOK_SEMI node so that we pop
+ * the return value of the expression.
+ */
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = TOK_SEMI;
+ pn->pn_num = -1;
+ pn->pn_kid = pnblock;
+
+ statement = JS_FALSE;
+ }
+ ts->flags &= ~TSF_OPERAND;
+
+ if (statement) {
+ pnlet->pn_right = Statements(cx, ts, tc);
+ if (!pnlet->pn_right)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_LET);
+ } else {
+ /*
+ * Change pnblock's opcode to the variant that propagates the last
+ * result down after popping the block, and clear statement.
+ */
+ pnblock->pn_op = JSOP_LEAVEBLOCKEXPR;
+ pnlet->pn_right = Expr(cx, ts, tc);
+ if (!pnlet->pn_right)
+ return NULL;
+ }
+
+ js_PopStatement(tc);
+ return pn;
+}
+
+#endif /* JS_HAS_BLOCK_SCOPE */
+
+static JSParseNode *
+Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSTokenType tt;
+ JSParseNode *pn, *pn1, *pn2, *pn3, *pn4;
+ JSStmtInfo stmtInfo, *stmt, *stmt2;
+ JSAtom *label;
+
+ CHECK_RECURSION();
+
+ ts->flags |= TSF_OPERAND;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+
+#if JS_HAS_GETTER_SETTER
+ if (tt == TOK_NAME) {
+ tt = CheckGetterOrSetter(cx, ts, TOK_FUNCTION);
+ if (tt == TOK_ERROR)
+ return NULL;
+ }
+#endif
+
+ switch (tt) {
+#if JS_HAS_EXPORT_IMPORT
+ case TOK_EXPORT:
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ PN_INIT_LIST(pn);
+ if (js_MatchToken(cx, ts, TOK_STAR)) {
+ pn2 = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn2)
+ return NULL;
+ PN_APPEND(pn, pn2);
+ } else {
+ do {
+ MUST_MATCH_TOKEN(TOK_NAME, JSMSG_NO_EXPORT_NAME);
+ pn2 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_op = JSOP_NAME;
+ pn2->pn_atom = CURRENT_TOKEN(ts).t_atom;
+ pn2->pn_expr = NULL;
+ pn2->pn_slot = -1;
+ pn2->pn_attrs = 0;
+ PN_APPEND(pn, pn2);
+ } while (js_MatchToken(cx, ts, TOK_COMMA));
+ }
+ pn->pn_pos.end = PN_LAST(pn)->pn_pos.end;
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ break;
+
+ case TOK_IMPORT:
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ PN_INIT_LIST(pn);
+ do {
+ pn2 = ImportExpr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ PN_APPEND(pn, pn2);
+ } while (js_MatchToken(cx, ts, TOK_COMMA));
+ pn->pn_pos.end = PN_LAST(pn)->pn_pos.end;
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ break;
+#endif /* JS_HAS_EXPORT_IMPORT */
+
+ case TOK_FUNCTION:
+#if JS_HAS_XML_SUPPORT
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ tt = js_PeekToken(cx, ts);
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ if (tt == TOK_DBLCOLON)
+ goto expression;
+#endif
+ return FunctionStmt(cx, ts, tc);
+
+ case TOK_IF:
+ /* An IF node has three kids: condition, then, and optional else. */
+ pn = NewParseNode(cx, ts, PN_TERNARY, tc);
+ if (!pn)
+ return NULL;
+ pn1 = Condition(cx, ts, tc);
+ if (!pn1)
+ return NULL;
+ js_PushStatement(tc, &stmtInfo, STMT_IF, -1);
+ pn2 = Statement(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ ts->flags |= TSF_OPERAND;
+ if (js_MatchToken(cx, ts, TOK_ELSE)) {
+ ts->flags &= ~TSF_OPERAND;
+ stmtInfo.type = STMT_ELSE;
+ pn3 = Statement(cx, ts, tc);
+ if (!pn3)
+ return NULL;
+ pn->pn_pos.end = pn3->pn_pos.end;
+ } else {
+ ts->flags &= ~TSF_OPERAND;
+ pn3 = NULL;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ }
+ js_PopStatement(tc);
+ pn->pn_kid1 = pn1;
+ pn->pn_kid2 = pn2;
+ pn->pn_kid3 = pn3;
+ return pn;
+
+ case TOK_SWITCH:
+ {
+ JSParseNode *pn5, *saveBlock;
+ JSBool seenDefault = JS_FALSE;
+
+ pn = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_SWITCH);
+
+ /* pn1 points to the switch's discriminant. */
+ pn1 = Expr(cx, ts, tc);
+ if (!pn1)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_SWITCH);
+ MUST_MATCH_TOKEN(TOK_LC, JSMSG_CURLY_BEFORE_SWITCH);
+
+ /* pn2 is a list of case nodes. The default case has pn_left == NULL */
+ pn2 = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn2)
+ return NULL;
+ saveBlock = tc->blockNode;
+ tc->blockNode = pn2;
+ PN_INIT_LIST(pn2);
+
+ js_PushStatement(tc, &stmtInfo, STMT_SWITCH, -1);
+
+ while ((tt = js_GetToken(cx, ts)) != TOK_RC) {
+ switch (tt) {
+ case TOK_DEFAULT:
+ if (seenDefault) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_TOO_MANY_DEFAULTS);
+ return NULL;
+ }
+ seenDefault = JS_TRUE;
+ /* fall through */
+
+ case TOK_CASE:
+ pn3 = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn3)
+ return NULL;
+ if (tt == TOK_DEFAULT) {
+ pn3->pn_left = NULL;
+ } else {
+ pn3->pn_left = Expr(cx, ts, tc);
+ if (!pn3->pn_left)
+ return NULL;
+ }
+ PN_APPEND(pn2, pn3);
+ if (pn2->pn_count == JS_BIT(16)) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_TOO_MANY_CASES);
+ return NULL;
+ }
+ break;
+
+ case TOK_ERROR:
+ return NULL;
+
+ default:
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_SWITCH);
+ return NULL;
+ }
+ MUST_MATCH_TOKEN(TOK_COLON, JSMSG_COLON_AFTER_CASE);
+
+ pn4 = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn4)
+ return NULL;
+ pn4->pn_type = TOK_LC;
+ PN_INIT_LIST(pn4);
+ ts->flags |= TSF_OPERAND;
+ while ((tt = js_PeekToken(cx, ts)) != TOK_RC &&
+ tt != TOK_CASE && tt != TOK_DEFAULT) {
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_ERROR)
+ return NULL;
+ pn5 = Statement(cx, ts, tc);
+ if (!pn5)
+ return NULL;
+ pn4->pn_pos.end = pn5->pn_pos.end;
+ PN_APPEND(pn4, pn5);
+ ts->flags |= TSF_OPERAND;
+ }
+ ts->flags &= ~TSF_OPERAND;
+
+ /* Fix the PN_LIST so it doesn't begin at the TOK_COLON. */
+ if (pn4->pn_head)
+ pn4->pn_pos.begin = pn4->pn_head->pn_pos.begin;
+ pn3->pn_pos.end = pn4->pn_pos.end;
+ pn3->pn_right = pn4;
+ }
+
+ /*
+ * Handle the case where there was a let declaration in any case in
+ * the switch body, but not within an inner block. If it replaced
+ * tc->blockNode with a new block node then we must refresh pn2 and
+ * then restore tc->blockNode.
+ */
+ if (tc->blockNode != pn2)
+ pn2 = tc->blockNode;
+ tc->blockNode = saveBlock;
+ js_PopStatement(tc);
+
+ pn->pn_pos.end = pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ pn->pn_left = pn1;
+ pn->pn_right = pn2;
+ return pn;
+ }
+
+ case TOK_WHILE:
+ pn = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn)
+ return NULL;
+ js_PushStatement(tc, &stmtInfo, STMT_WHILE_LOOP, -1);
+ pn2 = Condition(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_left = pn2;
+ pn2 = Statement(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ js_PopStatement(tc);
+ pn->pn_pos.end = pn2->pn_pos.end;
+ pn->pn_right = pn2;
+ return pn;
+
+ case TOK_DO:
+ pn = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn)
+ return NULL;
+ js_PushStatement(tc, &stmtInfo, STMT_DO_LOOP, -1);
+ pn2 = Statement(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_left = pn2;
+ MUST_MATCH_TOKEN(TOK_WHILE, JSMSG_WHILE_AFTER_DO);
+ pn2 = Condition(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ js_PopStatement(tc);
+ pn->pn_pos.end = pn2->pn_pos.end;
+ pn->pn_right = pn2;
+ if ((cx->version & JSVERSION_MASK) != JSVERSION_ECMA_3) {
+ /*
+ * All legacy and extended versions must do automatic semicolon
+ * insertion after do-while. See the testcase and discussion in
+ * http://bugzilla.mozilla.org/show_bug.cgi?id=238945.
+ */
+ (void) js_MatchToken(cx, ts, TOK_SEMI);
+ return pn;
+ }
+ break;
+
+ case TOK_FOR:
+ {
+#if JS_HAS_BLOCK_SCOPE
+ JSParseNode *pnlet;
+ JSStmtInfo blockInfo;
+
+ pnlet = NULL;
+#endif
+
+ /* A FOR node is binary, left is loop control and right is the body. */
+ pn = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn)
+ return NULL;
+ js_PushStatement(tc, &stmtInfo, STMT_FOR_LOOP, -1);
+
+ pn->pn_op = JSOP_FORIN;
+ if (js_MatchToken(cx, ts, TOK_NAME)) {
+ if (CURRENT_TOKEN(ts).t_atom == cx->runtime->atomState.eachAtom)
+ pn->pn_op = JSOP_FOREACH;
+ else
+ js_UngetToken(ts);
+ }
+
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_AFTER_FOR);
+ ts->flags |= TSF_OPERAND;
+ tt = js_PeekToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_SEMI) {
+ if (pn->pn_op == JSOP_FOREACH)
+ goto bad_for_each;
+
+ /* No initializer -- set first kid of left sub-node to null. */
+ pn1 = NULL;
+ } else {
+ /*
+ * Set pn1 to a var list or an initializing expression.
+ *
+ * Set the TCF_IN_FOR_INIT flag during parsing of the first clause
+ * of the for statement. This flag will be used by the RelExpr
+ * production; if it is set, then the 'in' keyword will not be
+ * recognized as an operator, leaving it available to be parsed as
+ * part of a for/in loop.
+ *
+ * A side effect of this restriction is that (unparenthesized)
+ * expressions involving an 'in' operator are illegal in the init
+ * clause of an ordinary for loop.
+ */
+ tc->flags |= TCF_IN_FOR_INIT;
+ if (tt == TOK_VAR) {
+ (void) js_GetToken(cx, ts);
+ pn1 = Variables(cx, ts, tc);
+#if JS_HAS_BLOCK_SCOPE
+ } else if (tt == TOK_LET) {
+ (void) js_GetToken(cx, ts);
+ if (js_PeekToken(cx, ts) == TOK_LP) {
+ pn1 = LetBlock(cx, ts, tc, JS_FALSE);
+ tt = TOK_LEXICALSCOPE;
+ } else {
+ pnlet = PushLexicalScope(cx, ts, tc, &blockInfo);
+ if (!pnlet)
+ return NULL;
+ pn1 = Variables(cx, ts, tc);
+ }
+#endif
+ } else {
+ pn1 = Expr(cx, ts, tc);
+ if (pn1) {
+ while (pn1->pn_type == TOK_RP)
+ pn1 = pn1->pn_kid;
+ }
+ }
+ tc->flags &= ~TCF_IN_FOR_INIT;
+ if (!pn1)
+ return NULL;
+ }
+
+ /*
+ * We can be sure that it's a for/in loop if there's still an 'in'
+ * keyword here, even if JavaScript recognizes 'in' as an operator,
+ * as we've excluded 'in' from being parsed in RelExpr by setting
+ * the TCF_IN_FOR_INIT flag in our JSTreeContext.
+ */
+ if (pn1 && js_MatchToken(cx, ts, TOK_IN)) {
+ stmtInfo.type = STMT_FOR_IN_LOOP;
+
+ /* Check that the left side of the 'in' is valid. */
+ JS_ASSERT(!TOKEN_TYPE_IS_DECL(tt) || pn1->pn_type == tt);
+ if (TOKEN_TYPE_IS_DECL(tt)
+ ? (pn1->pn_count > 1 || pn1->pn_op == JSOP_DEFCONST
+#if JS_HAS_DESTRUCTURING
+ || (pn->pn_op == JSOP_FORIN &&
+ (pn1->pn_head->pn_type == TOK_RC ||
+ (pn1->pn_head->pn_type == TOK_RB &&
+ pn1->pn_head->pn_count != 2) ||
+ (pn1->pn_head->pn_type == TOK_ASSIGN &&
+ (pn1->pn_head->pn_left->pn_type != TOK_RB ||
+ pn1->pn_head->pn_left->pn_count != 2))))
+#endif
+ )
+ : (pn1->pn_type != TOK_NAME &&
+ pn1->pn_type != TOK_DOT &&
+#if JS_HAS_DESTRUCTURING
+ ((pn->pn_op == JSOP_FORIN)
+ ? (pn1->pn_type != TOK_RB || pn1->pn_count != 2)
+ : (pn1->pn_type != TOK_RB && pn1->pn_type != TOK_RC)) &&
+#endif
+#if JS_HAS_LVALUE_RETURN
+ pn1->pn_type != TOK_LP &&
+#endif
+#if JS_HAS_XML_SUPPORT
+ (pn1->pn_type != TOK_UNARYOP ||
+ pn1->pn_op != JSOP_XMLNAME) &&
+#endif
+ pn1->pn_type != TOK_LB)) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_FOR_LEFTSIDE);
+ return NULL;
+ }
+
+ if (TOKEN_TYPE_IS_DECL(tt)) {
+ /* Tell js_EmitTree(TOK_VAR) that pn1 is part of a for/in. */
+ pn1->pn_extra |= PNX_FORINVAR;
+
+ /*
+ * Generate a final POP only if the variable is a simple name
+ * (which means it is not a destructuring left-hand side) and
+ * it has an initializer.
+ */
+ pn2 = pn1->pn_head;
+ if (pn2->pn_type == TOK_NAME && pn2->pn_expr)
+ pn1->pn_extra |= PNX_POPVAR;
+ } else {
+ pn2 = pn1;
+#if JS_HAS_LVALUE_RETURN
+ if (pn2->pn_type == TOK_LP)
+ pn2->pn_op = JSOP_SETCALL;
+#endif
+#if JS_HAS_XML_SUPPORT
+ if (pn2->pn_type == TOK_UNARYOP)
+ pn2->pn_op = JSOP_BINDXMLNAME;
+#endif
+ }
+
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ /* Beware 'for (arguments in ...)' with or without a 'var'. */
+ if (pn2->pn_atom == cx->runtime->atomState.argumentsAtom)
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ break;
+
+#if JS_HAS_DESTRUCTURING
+ case TOK_ASSIGN:
+ pn2 = pn2->pn_left;
+ JS_ASSERT(pn2->pn_type == TOK_RB || pn2->pn_type == TOK_RC);
+ /* FALL THROUGH */
+ case TOK_RB:
+ case TOK_RC:
+ /* Check for valid lvalues in var-less destructuring for-in. */
+ if (pn1 == pn2 && !CheckDestructuring(cx, NULL, pn2, NULL, tc))
+ return NULL;
+
+ /* Destructuring for-in requires [key, value] enumeration. */
+ if (pn->pn_op != JSOP_FOREACH)
+ pn->pn_op = JSOP_FOREACHKEYVAL;
+ break;
+#endif
+
+ default:;
+ }
+
+ /* Parse the object expression as the right operand of 'in'. */
+ pn2 = NewBinary(cx, TOK_IN, JSOP_NOP, pn1, Expr(cx, ts, tc), tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_left = pn2;
+ } else {
+ if (pn->pn_op == JSOP_FOREACH)
+ goto bad_for_each;
+ pn->pn_op = JSOP_NOP;
+
+ /* Parse the loop condition or null into pn2. */
+ MUST_MATCH_TOKEN(TOK_SEMI, JSMSG_SEMI_AFTER_FOR_INIT);
+ ts->flags |= TSF_OPERAND;
+ tt = js_PeekToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_SEMI) {
+ pn2 = NULL;
+ } else {
+ pn2 = Expr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ }
+
+ /* Parse the update expression or null into pn3. */
+ MUST_MATCH_TOKEN(TOK_SEMI, JSMSG_SEMI_AFTER_FOR_COND);
+ ts->flags |= TSF_OPERAND;
+ tt = js_PeekToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_RP) {
+ pn3 = NULL;
+ } else {
+ pn3 = Expr(cx, ts, tc);
+ if (!pn3)
+ return NULL;
+ }
+
+ /* Build the RESERVED node to use as the left kid of pn. */
+ pn4 = NewParseNode(cx, ts, PN_TERNARY, tc);
+ if (!pn4)
+ return NULL;
+ pn4->pn_type = TOK_RESERVED;
+ pn4->pn_op = JSOP_NOP;
+ pn4->pn_kid1 = pn1;
+ pn4->pn_kid2 = pn2;
+ pn4->pn_kid3 = pn3;
+ pn->pn_left = pn4;
+ }
+
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_FOR_CTRL);
+
+ /* Parse the loop body into pn->pn_right. */
+ pn2 = Statement(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_right = pn2;
+
+ /* Record the absolute line number for source note emission. */
+ pn->pn_pos.end = pn2->pn_pos.end;
+
+#if JS_HAS_BLOCK_SCOPE
+ if (pnlet) {
+ js_PopStatement(tc);
+ pnlet->pn_expr = pn;
+ pn = pnlet;
+ }
+#endif
+ js_PopStatement(tc);
+ return pn;
+
+ bad_for_each:
+ js_ReportCompileErrorNumber(cx, pn,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_BAD_FOR_EACH_LOOP);
+ return NULL;
+ }
+
+ case TOK_TRY: {
+ JSParseNode *catchList, *lastCatch;
+
+ /*
+ * try nodes are ternary.
+ * kid1 is the try Statement
+ * kid2 is the catch node list or null
+ * kid3 is the finally Statement
+ *
+ * catch nodes are ternary.
+ * kid1 is the lvalue (TOK_NAME, TOK_LB, or TOK_LC)
+ * kid2 is the catch guard or null if no guard
+ * kid3 is the catch block
+ *
+ * catch lvalue nodes are either:
+ * TOK_NAME for a single identifier
+ * TOK_RB or TOK_RC for a destructuring left-hand side
+ *
+ * finally nodes are TOK_LC Statement lists.
+ */
+ pn = NewParseNode(cx, ts, PN_TERNARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_op = JSOP_NOP;
+
+ MUST_MATCH_TOKEN(TOK_LC, JSMSG_CURLY_BEFORE_TRY);
+ js_PushStatement(tc, &stmtInfo, STMT_TRY, -1);
+ pn->pn_kid1 = Statements(cx, ts, tc);
+ if (!pn->pn_kid1)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_TRY);
+ js_PopStatement(tc);
+
+ catchList = NULL;
+ tt = js_GetToken(cx, ts);
+ if (tt == TOK_CATCH) {
+ catchList = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!catchList)
+ return NULL;
+ catchList->pn_type = TOK_RESERVED;
+ PN_INIT_LIST(catchList);
+ lastCatch = NULL;
+
+ do {
+ JSParseNode *pnblock;
+ BindData data;
+
+ /* Check for another catch after unconditional catch. */
+ if (lastCatch && !lastCatch->pn_kid2) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_CATCH_AFTER_GENERAL);
+ return NULL;
+ }
+
+ /*
+ * Create a lexical scope node around the whole catch clause,
+ * including the head.
+ */
+ pnblock = PushLexicalScope(cx, ts, tc, &stmtInfo);
+ if (!pnblock)
+ return NULL;
+ stmtInfo.type = STMT_CATCH;
+
+ /*
+ * Legal catch forms are:
+ * catch (lhs)
+ * catch (lhs if <boolean_expression>)
+ * where lhs is a name or a destructuring left-hand side.
+ * (the latter is legal only #ifdef JS_HAS_CATCH_GUARD)
+ */
+ pn2 = NewParseNode(cx, ts, PN_TERNARY, tc);
+ if (!pn2)
+ return NULL;
+ pnblock->pn_expr = pn2;
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_CATCH);
+
+ /*
+ * Contrary to ECMA Ed. 3, the catch variable is lexically
+ * scoped, not a property of a new Object instance. This is
+ * an intentional change that anticipates ECMA Ed. 4.
+ */
+ data.pn = NULL;
+ data.ts = ts;
+ data.obj = tc->blockChain;
+ data.op = JSOP_NOP;
+ data.binder = BindLet;
+ data.u.let.index = 0;
+ data.u.let.overflow = JSMSG_TOO_MANY_CATCH_VARS;
+
+ tt = js_GetToken(cx, ts);
+ switch (tt) {
+#if JS_HAS_DESTRUCTURING
+ case TOK_LB:
+ case TOK_LC:
+ pn3 = DestructuringExpr(cx, &data, tc, tt);
+ if (!pn3)
+ return NULL;
+ break;
+#endif
+
+ case TOK_NAME:
+ label = CURRENT_TOKEN(ts).t_atom;
+ if (!data.binder(cx, &data, label, tc))
+ return NULL;
+
+ pn3 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn3)
+ return NULL;
+ pn3->pn_atom = label;
+ pn3->pn_expr = NULL;
+ pn3->pn_slot = 0;
+ pn3->pn_attrs = 0;
+ break;
+
+ default:
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_CATCH_IDENTIFIER);
+ return NULL;
+ }
+
+ pn2->pn_kid1 = pn3;
+ pn2->pn_kid2 = NULL;
+#if JS_HAS_CATCH_GUARD
+ /*
+ * We use 'catch (x if x === 5)' (not 'catch (x : x === 5)')
+ * to avoid conflicting with the JS2/ECMAv4 type annotation
+ * catchguard syntax.
+ */
+ if (js_MatchToken(cx, ts, TOK_IF)) {
+ pn2->pn_kid2 = Expr(cx, ts, tc);
+ if (!pn2->pn_kid2)
+ return NULL;
+ }
+#endif
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_CATCH);
+
+ MUST_MATCH_TOKEN(TOK_LC, JSMSG_CURLY_BEFORE_CATCH);
+ pn2->pn_kid3 = Statements(cx, ts, tc);
+ if (!pn2->pn_kid3)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_CATCH);
+ js_PopStatement(tc);
+
+ PN_APPEND(catchList, pnblock);
+ lastCatch = pn2;
+ ts->flags |= TSF_OPERAND;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ } while (tt == TOK_CATCH);
+ }
+ pn->pn_kid2 = catchList;
+
+ if (tt == TOK_FINALLY) {
+ tc->tryCount++;
+ MUST_MATCH_TOKEN(TOK_LC, JSMSG_CURLY_BEFORE_FINALLY);
+ js_PushStatement(tc, &stmtInfo, STMT_FINALLY, -1);
+ pn->pn_kid3 = Statements(cx, ts, tc);
+ if (!pn->pn_kid3)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_FINALLY);
+ js_PopStatement(tc);
+ } else {
+ js_UngetToken(ts);
+ pn->pn_kid3 = NULL;
+ }
+ if (!catchList && !pn->pn_kid3) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_CATCH_OR_FINALLY);
+ return NULL;
+ }
+ tc->tryCount++;
+ return pn;
+ }
+
+ case TOK_THROW:
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+
+ /* ECMA-262 Edition 3 says 'throw [no LineTerminator here] Expr'. */
+ ts->flags |= TSF_OPERAND;
+ tt = js_PeekTokenSameLine(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_ERROR)
+ return NULL;
+ if (tt == TOK_EOF || tt == TOK_EOL || tt == TOK_SEMI || tt == TOK_RC) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SYNTAX_ERROR);
+ return NULL;
+ }
+
+ pn2 = Expr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ pn->pn_op = JSOP_THROW;
+ pn->pn_kid = pn2;
+ break;
+
+ /* TOK_CATCH and TOK_FINALLY are both handled in the TOK_TRY case */
+ case TOK_CATCH:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_CATCH_WITHOUT_TRY);
+ return NULL;
+
+ case TOK_FINALLY:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_FINALLY_WITHOUT_TRY);
+ return NULL;
+
+ case TOK_BREAK:
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ if (!MatchLabel(cx, ts, pn))
+ return NULL;
+ stmt = tc->topStmt;
+ label = pn->pn_atom;
+ if (label) {
+ for (; ; stmt = stmt->down) {
+ if (!stmt) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_LABEL_NOT_FOUND);
+ return NULL;
+ }
+ if (stmt->type == STMT_LABEL && stmt->atom == label)
+ break;
+ }
+ } else {
+ for (; ; stmt = stmt->down) {
+ if (!stmt) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_TOUGH_BREAK);
+ return NULL;
+ }
+ if (STMT_IS_LOOP(stmt) || stmt->type == STMT_SWITCH)
+ break;
+ }
+ }
+ if (label)
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ break;
+
+ case TOK_CONTINUE:
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ if (!MatchLabel(cx, ts, pn))
+ return NULL;
+ stmt = tc->topStmt;
+ label = pn->pn_atom;
+ if (label) {
+ for (stmt2 = NULL; ; stmt = stmt->down) {
+ if (!stmt) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_LABEL_NOT_FOUND);
+ return NULL;
+ }
+ if (stmt->type == STMT_LABEL) {
+ if (stmt->atom == label) {
+ if (!stmt2 || !STMT_IS_LOOP(stmt2)) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_ERROR,
+ JSMSG_BAD_CONTINUE);
+ return NULL;
+ }
+ break;
+ }
+ } else {
+ stmt2 = stmt;
+ }
+ }
+ } else {
+ for (; ; stmt = stmt->down) {
+ if (!stmt) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_CONTINUE);
+ return NULL;
+ }
+ if (STMT_IS_LOOP(stmt))
+ break;
+ }
+ }
+ if (label)
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ break;
+
+ case TOK_WITH:
+ pn = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_WITH);
+ pn2 = Expr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_WITH);
+ pn->pn_left = pn2;
+
+ js_PushStatement(tc, &stmtInfo, STMT_WITH, -1);
+ pn2 = Statement(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ js_PopStatement(tc);
+
+ pn->pn_pos.end = pn2->pn_pos.end;
+ pn->pn_right = pn2;
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ return pn;
+
+ case TOK_VAR:
+ pn = Variables(cx, ts, tc);
+ if (!pn)
+ return NULL;
+
+ /* Tell js_EmitTree to generate a final POP. */
+ pn->pn_extra |= PNX_POPVAR;
+ break;
+
+#if JS_HAS_BLOCK_SCOPE
+ case TOK_LET:
+ {
+ JSStmtInfo **sip;
+ JSObject *obj;
+ JSAtom *atom;
+
+ /* Check for a let statement or let expression. */
+ if (js_PeekToken(cx, ts) == TOK_LP) {
+ pn = LetBlock(cx, ts, tc, JS_TRUE);
+ if (!pn || pn->pn_op == JSOP_LEAVEBLOCK)
+ return pn;
+
+ /* Let expressions require automatic semicolon insertion. */
+ JS_ASSERT(pn->pn_type == TOK_SEMI ||
+ pn->pn_op == JSOP_LEAVEBLOCKEXPR);
+ break;
+ }
+
+ /*
+ * This is a let declaration. We must convert the nearest JSStmtInfo
+ * that is a block or a switch body to be our scope statement. Further
+ * let declarations in this block will find this scope statement and
+ * use the same block object. If we are the first let declaration in
+ * this block (i.e., when the nearest maybe-scope JSStmtInfo isn't a
+ * scope statement) then we also need to set tc->blockNode to be our
+ * TOK_LEXICALSCOPE.
+ */
+ sip = &tc->topScopeStmt;
+ for (stmt = tc->topStmt; stmt; stmt = stmt->down) {
+ if (STMT_MAYBE_SCOPE(stmt))
+ break;
+ if (stmt == *sip)
+ sip = &stmt->downScope;
+ }
+
+ if (stmt && (stmt->flags & SIF_SCOPE)) {
+ JS_ASSERT(tc->blockChain == ATOM_TO_OBJECT(stmt->atom));
+ obj = tc->blockChain;
+ } else {
+ if (!stmt) {
+ /*
+ * FIXME: https://bugzilla.mozilla.org/show_bug.cgi?id=346749
+ *
+ * This is a hard case that requires more work. In particular,
+ * in many cases, we're trying to emit code as we go. However,
+ * this means that we haven't necessarily finished processing
+ * all let declarations in the implicit top-level block when
+ * we emit a reference to one of them. For now, punt on this
+ * and pretend this is a var declaration.
+ */
+ CURRENT_TOKEN(ts).type = TOK_VAR;
+ CURRENT_TOKEN(ts).t_op = JSOP_DEFVAR;
+
+ pn = Variables(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_extra |= PNX_POPVAR;
+ break;
+ }
+
+ /* Convert the block statement into a scope statement. */
+ obj = js_NewBlockObject(cx);
+ if (!obj)
+ return NULL;
+ atom = js_AtomizeObject(cx, obj, 0);
+ if (!atom)
+ return NULL;
+
+ /*
+ * Insert stmt on the tc->topScopeStmt/stmtInfo.downScope linked
+ * list stack, if it isn't already there. If it is there, but it
+ * lacks the SIF_SCOPE flag, it must be a try, catch, or finally
+ * block.
+ */
+ JS_ASSERT(!(stmt->flags & SIF_SCOPE));
+ stmt->flags |= SIF_SCOPE;
+ if (stmt != *sip) {
+ JS_ASSERT(!stmt->downScope);
+ JS_ASSERT(stmt->type == STMT_BLOCK ||
+ stmt->type == STMT_SWITCH ||
+ stmt->type == STMT_TRY ||
+ stmt->type == STMT_FINALLY);
+ stmt->downScope = *sip;
+ *sip = stmt;
+ } else {
+ JS_ASSERT(stmt->type == STMT_CATCH);
+ JS_ASSERT(stmt->downScope);
+ }
+
+ obj->slots[JSSLOT_PARENT] = OBJECT_TO_JSVAL(tc->blockChain);
+ tc->blockChain = obj;
+ stmt->atom = atom;
+
+#ifdef DEBUG
+ pn1 = tc->blockNode;
+ JS_ASSERT(!pn1 || pn1->pn_type != TOK_LEXICALSCOPE);
+#endif
+
+ /* Create a new lexical scope node for these statements. */
+ pn1 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn1)
+ return NULL;
+
+ pn1->pn_type = TOK_LEXICALSCOPE;
+ pn1->pn_op = JSOP_LEAVEBLOCK;
+ pn1->pn_pos = tc->blockNode->pn_pos;
+ pn1->pn_atom = atom;
+ pn1->pn_expr = tc->blockNode;
+ pn1->pn_slot = -1;
+ pn1->pn_attrs = 0;
+ tc->blockNode = pn1;
+ }
+
+ pn = Variables(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_extra = PNX_POPVAR;
+ break;
+ }
+#endif /* JS_HAS_BLOCK_SCOPE */
+
+ case TOK_RETURN:
+ pn = ReturnOrYield(cx, ts, tc, Expr);
+ if (!pn)
+ return NULL;
+ break;
+
+ case TOK_LC:
+ {
+ uintN oldflags;
+
+ oldflags = tc->flags;
+ tc->flags = oldflags & ~TCF_HAS_FUNCTION_STMT;
+ js_PushStatement(tc, &stmtInfo, STMT_BLOCK, -1);
+ pn = Statements(cx, ts, tc);
+ if (!pn)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_IN_COMPOUND);
+ js_PopStatement(tc);
+
+ /*
+ * If we contain a function statement and our container is top-level
+ * or another block, flag pn to preserve braces when decompiling.
+ */
+ if ((tc->flags & TCF_HAS_FUNCTION_STMT) &&
+ (!tc->topStmt || tc->topStmt->type == STMT_BLOCK)) {
+ pn->pn_extra |= PNX_NEEDBRACES;
+ }
+ tc->flags = oldflags | (tc->flags & (TCF_FUN_FLAGS | TCF_RETURN_FLAGS));
+ return pn;
+ }
+
+ case TOK_EOL:
+ case TOK_SEMI:
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = TOK_SEMI;
+ pn->pn_kid = NULL;
+ return pn;
+
+#if JS_HAS_DEBUGGER_KEYWORD
+ case TOK_DEBUGGER:
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = TOK_DEBUGGER;
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ break;
+#endif /* JS_HAS_DEBUGGER_KEYWORD */
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_DEFAULT:
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ if (!js_MatchToken(cx, ts, TOK_NAME) ||
+ CURRENT_TOKEN(ts).t_atom != cx->runtime->atomState.xmlAtom ||
+ !js_MatchToken(cx, ts, TOK_NAME) ||
+ CURRENT_TOKEN(ts).t_atom != cx->runtime->atomState.namespaceAtom ||
+ !js_MatchToken(cx, ts, TOK_ASSIGN) ||
+ CURRENT_TOKEN(ts).t_op != JSOP_NOP) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_DEFAULT_XML_NAMESPACE);
+ return NULL;
+ }
+ pn2 = Expr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_op = JSOP_DEFXMLNS;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ pn->pn_kid = pn2;
+ tc->flags |= TCF_HAS_DEFXMLNS;
+ break;
+#endif
+
+ case TOK_ERROR:
+ return NULL;
+
+ default:
+#if JS_HAS_XML_SUPPORT
+ expression:
+#endif
+ js_UngetToken(ts);
+ pn2 = Expr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+
+ if (js_PeekToken(cx, ts) == TOK_COLON) {
+ if (pn2->pn_type != TOK_NAME) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_LABEL);
+ return NULL;
+ }
+ label = pn2->pn_atom;
+ for (stmt = tc->topStmt; stmt; stmt = stmt->down) {
+ if (stmt->type == STMT_LABEL && stmt->atom == label) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_DUPLICATE_LABEL);
+ return NULL;
+ }
+ }
+ (void) js_GetToken(cx, ts);
+
+ /* Push a label struct and parse the statement. */
+ js_PushStatement(tc, &stmtInfo, STMT_LABEL, -1);
+ stmtInfo.atom = label;
+ pn = Statement(cx, ts, tc);
+ if (!pn)
+ return NULL;
+
+ /* Normalize empty statement to empty block for the decompiler. */
+ if (pn->pn_type == TOK_SEMI && !pn->pn_kid) {
+ pn->pn_type = TOK_LC;
+ pn->pn_arity = PN_LIST;
+ PN_INIT_LIST(pn);
+ }
+
+ /* Pop the label, set pn_expr, and return early. */
+ js_PopStatement(tc);
+ pn2->pn_type = TOK_COLON;
+ pn2->pn_pos.end = pn->pn_pos.end;
+ pn2->pn_expr = pn;
+ return pn2;
+ }
+
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = TOK_SEMI;
+ pn->pn_pos = pn2->pn_pos;
+ pn->pn_kid = pn2;
+ break;
+ }
+
+ /* Check termination of this primitive statement. */
+ if (ON_CURRENT_LINE(ts, pn->pn_pos)) {
+ ts->flags |= TSF_OPERAND;
+ tt = js_PeekTokenSameLine(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_ERROR)
+ return NULL;
+ if (tt != TOK_EOF && tt != TOK_EOL && tt != TOK_SEMI && tt != TOK_RC) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SEMI_BEFORE_STMNT);
+ return NULL;
+ }
+ }
+
+ (void) js_MatchToken(cx, ts, TOK_SEMI);
+ return pn;
+}
+
+static JSParseNode *
+Variables(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSTokenType tt;
+ JSBool let;
+ JSStmtInfo *scopeStmt;
+ BindData data;
+ JSParseNode *pn, *pn2;
+ JSStackFrame *fp;
+ JSAtom *atom;
+
+ /*
+ * The three options here are:
+ * - TOK_LET: We are parsing a let declaration.
+ * - TOK_LP: We are parsing the head of a let block.
+ * - Otherwise, we're parsing var declarations.
+ */
+ tt = CURRENT_TOKEN(ts).type;
+ let = (tt == TOK_LET || tt == TOK_LP);
+ JS_ASSERT(let || tt == TOK_VAR);
+
+ /* Make sure that Statement set the tree context up correctly. */
+ scopeStmt = tc->topScopeStmt;
+ if (let) {
+ while (scopeStmt && !(scopeStmt->flags & SIF_SCOPE)) {
+ JS_ASSERT(!STMT_MAYBE_SCOPE(scopeStmt));
+ scopeStmt = scopeStmt->downScope;
+ }
+ JS_ASSERT(scopeStmt);
+ }
+
+ data.pn = NULL;
+ data.ts = ts;
+ data.op = let ? JSOP_NOP : CURRENT_TOKEN(ts).t_op;
+ data.binder = let ? BindLet : BindVarOrConst;
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_op = data.op;
+ PN_INIT_LIST(pn);
+
+ /*
+ * The tricky part of this code is to create special parsenode opcodes for
+ * getting and setting variables (which will be stored as special slots in
+ * the frame). The most complicated case is an eval() inside a function.
+ * If the evaluated string references variables in the enclosing function,
+ * then we need to generate the special variable opcodes. We determine
+ * this by looking up the variable's id in the current variable object.
+ * Fortunately, we can avoid doing this for let declared variables.
+ */
+ fp = cx->fp;
+ if (let) {
+ JS_ASSERT(tc->blockChain == ATOM_TO_OBJECT(scopeStmt->atom));
+ data.obj = tc->blockChain;
+ data.u.let.index = OBJ_BLOCK_COUNT(cx, data.obj);
+ data.u.let.overflow = JSMSG_TOO_MANY_FUN_VARS;
+ } else {
+ data.obj = fp->varobj;
+ data.u.var.fun = fp->fun;
+ data.u.var.clasp = OBJ_GET_CLASS(cx, data.obj);
+ if (data.u.var.fun && data.u.var.clasp == &js_FunctionClass) {
+ /* We are compiling code inside a function */
+ data.u.var.getter = js_GetLocalVariable;
+ data.u.var.setter = js_SetLocalVariable;
+ } else if (data.u.var.fun && data.u.var.clasp == &js_CallClass) {
+ /* We are compiling code from an eval inside a function */
+ data.u.var.getter = js_GetCallVariable;
+ data.u.var.setter = js_SetCallVariable;
+ } else {
+ data.u.var.getter = data.u.var.clasp->getProperty;
+ data.u.var.setter = data.u.var.clasp->setProperty;
+ }
+
+ data.u.var.attrs = (data.op == JSOP_DEFCONST)
+ ? JSPROP_PERMANENT | JSPROP_READONLY
+ : JSPROP_PERMANENT;
+ }
+
+ do {
+ tt = js_GetToken(cx, ts);
+#if JS_HAS_DESTRUCTURING
+ if (tt == TOK_LB || tt == TOK_LC) {
+ pn2 = PrimaryExpr(cx, ts, tc, tt, JS_FALSE);
+ if (!pn2)
+ return NULL;
+
+ if ((tc->flags & TCF_IN_FOR_INIT) &&
+ js_PeekToken(cx, ts) == TOK_IN) {
+ if (!CheckDestructuring(cx, &data, pn2, NULL, tc))
+ return NULL;
+ PN_APPEND(pn, pn2);
+ continue;
+ }
+
+ MUST_MATCH_TOKEN(TOK_ASSIGN, JSMSG_BAD_DESTRUCT_DECL);
+ if (CURRENT_TOKEN(ts).t_op != JSOP_NOP)
+ goto bad_var_init;
+
+ pn2 = NewBinary(cx, TOK_ASSIGN, JSOP_NOP,
+ pn2, AssignExpr(cx, ts, tc),
+ tc);
+ if (!pn2 ||
+ !CheckDestructuring(cx, &data,
+ pn2->pn_left, pn2->pn_right,
+ tc)) {
+ return NULL;
+ }
+ PN_APPEND(pn, pn2);
+ continue;
+ }
+#endif
+
+ if (tt != TOK_NAME) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_NO_VARIABLE_NAME);
+ return NULL;
+ }
+ atom = CURRENT_TOKEN(ts).t_atom;
+ if (!data.binder(cx, &data, atom, tc))
+ return NULL;
+
+ pn2 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_op = JSOP_NAME;
+ pn2->pn_atom = atom;
+ pn2->pn_expr = NULL;
+ pn2->pn_slot = -1;
+ pn2->pn_attrs = let ? 0 : data.u.var.attrs;
+ PN_APPEND(pn, pn2);
+
+ if (js_MatchToken(cx, ts, TOK_ASSIGN)) {
+ if (CURRENT_TOKEN(ts).t_op != JSOP_NOP)
+ goto bad_var_init;
+
+ pn2->pn_expr = AssignExpr(cx, ts, tc);
+ if (!pn2->pn_expr)
+ return NULL;
+ pn2->pn_op = (!let && data.op == JSOP_DEFCONST)
+ ? JSOP_SETCONST
+ : JSOP_SETNAME;
+ if (!let && atom == cx->runtime->atomState.argumentsAtom)
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ }
+ } while (js_MatchToken(cx, ts, TOK_COMMA));
+
+ pn->pn_pos.end = PN_LAST(pn)->pn_pos.end;
+ return pn;
+
+bad_var_init:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_VAR_INIT);
+ return NULL;
+}
+
+static JSParseNode *
+Expr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2;
+
+ pn = AssignExpr(cx, ts, tc);
+ if (pn && js_MatchToken(cx, ts, TOK_COMMA)) {
+ pn2 = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_pos.begin = pn->pn_pos.begin;
+ PN_INIT_LIST_1(pn2, pn);
+ pn = pn2;
+ do {
+#if JS_HAS_GENERATORS
+ pn2 = PN_LAST(pn);
+ if (pn2->pn_type == TOK_YIELD) {
+ js_ReportCompileErrorNumber(cx, pn2,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_BAD_YIELD_SYNTAX);
+ return NULL;
+ }
+#endif
+ pn2 = AssignExpr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ PN_APPEND(pn, pn2);
+ } while (js_MatchToken(cx, ts, TOK_COMMA));
+ pn->pn_pos.end = PN_LAST(pn)->pn_pos.end;
+ }
+ return pn;
+}
+
+static JSParseNode *
+AssignExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2;
+ JSTokenType tt;
+ JSOp op;
+
+ CHECK_RECURSION();
+
+#if JS_HAS_GENERATORS
+ ts->flags |= TSF_OPERAND;
+ if (js_MatchToken(cx, ts, TOK_YIELD)) {
+ ts->flags &= ~TSF_OPERAND;
+ return ReturnOrYield(cx, ts, tc, AssignExpr);
+ }
+ ts->flags &= ~TSF_OPERAND;
+#endif
+
+ pn = CondExpr(cx, ts, tc);
+ if (!pn)
+ return NULL;
+
+ tt = js_GetToken(cx, ts);
+#if JS_HAS_GETTER_SETTER
+ if (tt == TOK_NAME) {
+ tt = CheckGetterOrSetter(cx, ts, TOK_ASSIGN);
+ if (tt == TOK_ERROR)
+ return NULL;
+ }
+#endif
+ if (tt != TOK_ASSIGN) {
+ js_UngetToken(ts);
+ return pn;
+ }
+
+ op = CURRENT_TOKEN(ts).t_op;
+ for (pn2 = pn; pn2->pn_type == TOK_RP; pn2 = pn2->pn_kid)
+ continue;
+ switch (pn2->pn_type) {
+ case TOK_NAME:
+ pn2->pn_op = JSOP_SETNAME;
+ if (pn2->pn_atom == cx->runtime->atomState.argumentsAtom)
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ break;
+ case TOK_DOT:
+ pn2->pn_op = (pn2->pn_op == JSOP_GETMETHOD)
+ ? JSOP_SETMETHOD
+ : JSOP_SETPROP;
+ break;
+ case TOK_LB:
+ pn2->pn_op = JSOP_SETELEM;
+ break;
+#if JS_HAS_DESTRUCTURING
+ case TOK_RB:
+ case TOK_RC:
+ if (op != JSOP_NOP) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_DESTRUCT_ASS);
+ return NULL;
+ }
+ pn = AssignExpr(cx, ts, tc);
+ if (!pn || !CheckDestructuring(cx, NULL, pn2, pn, tc))
+ return NULL;
+ return NewBinary(cx, TOK_ASSIGN, op, pn2, pn, tc);
+#endif
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+ JS_ASSERT(pn2->pn_op == JSOP_CALL || pn2->pn_op == JSOP_EVAL);
+ pn2->pn_op = JSOP_SETCALL;
+ break;
+#endif
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+ if (pn2->pn_op == JSOP_XMLNAME) {
+ pn2->pn_op = JSOP_SETXMLNAME;
+ break;
+ }
+ /* FALL THROUGH */
+#endif
+ default:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_LEFTSIDE_OF_ASS);
+ return NULL;
+ }
+
+ return NewBinary(cx, TOK_ASSIGN, op, pn2, AssignExpr(cx, ts, tc), tc);
+}
+
+static JSParseNode *
+CondExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn1, *pn2, *pn3;
+ uintN oldflags;
+
+ pn = OrExpr(cx, ts, tc);
+ if (pn && js_MatchToken(cx, ts, TOK_HOOK)) {
+ pn1 = pn;
+ pn = NewParseNode(cx, ts, PN_TERNARY, tc);
+ if (!pn)
+ return NULL;
+ /*
+ * Always accept the 'in' operator in the middle clause of a ternary,
+ * where it's unambiguous, even if we might be parsing the init of a
+ * for statement.
+ */
+ oldflags = tc->flags;
+ tc->flags &= ~TCF_IN_FOR_INIT;
+ pn2 = AssignExpr(cx, ts, tc);
+ tc->flags = oldflags | (tc->flags & TCF_FUN_FLAGS);
+
+ if (!pn2)
+ return NULL;
+ MUST_MATCH_TOKEN(TOK_COLON, JSMSG_COLON_IN_COND);
+ pn3 = AssignExpr(cx, ts, tc);
+ if (!pn3)
+ return NULL;
+ pn->pn_pos.begin = pn1->pn_pos.begin;
+ pn->pn_pos.end = pn3->pn_pos.end;
+ pn->pn_kid1 = pn1;
+ pn->pn_kid2 = pn2;
+ pn->pn_kid3 = pn3;
+ }
+ return pn;
+}
+
+static JSParseNode *
+OrExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = AndExpr(cx, ts, tc);
+ if (pn && js_MatchToken(cx, ts, TOK_OR))
+ pn = NewBinary(cx, TOK_OR, JSOP_OR, pn, OrExpr(cx, ts, tc), tc);
+ return pn;
+}
+
+static JSParseNode *
+AndExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = BitOrExpr(cx, ts, tc);
+ if (pn && js_MatchToken(cx, ts, TOK_AND))
+ pn = NewBinary(cx, TOK_AND, JSOP_AND, pn, AndExpr(cx, ts, tc), tc);
+ return pn;
+}
+
+static JSParseNode *
+BitOrExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = BitXorExpr(cx, ts, tc);
+ while (pn && js_MatchToken(cx, ts, TOK_BITOR)) {
+ pn = NewBinary(cx, TOK_BITOR, JSOP_BITOR, pn, BitXorExpr(cx, ts, tc),
+ tc);
+ }
+ return pn;
+}
+
+static JSParseNode *
+BitXorExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = BitAndExpr(cx, ts, tc);
+ while (pn && js_MatchToken(cx, ts, TOK_BITXOR)) {
+ pn = NewBinary(cx, TOK_BITXOR, JSOP_BITXOR, pn, BitAndExpr(cx, ts, tc),
+ tc);
+ }
+ return pn;
+}
+
+static JSParseNode *
+BitAndExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = EqExpr(cx, ts, tc);
+ while (pn && js_MatchToken(cx, ts, TOK_BITAND))
+ pn = NewBinary(cx, TOK_BITAND, JSOP_BITAND, pn, EqExpr(cx, ts, tc), tc);
+ return pn;
+}
+
+static JSParseNode *
+EqExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+ JSOp op;
+
+ pn = RelExpr(cx, ts, tc);
+ while (pn && js_MatchToken(cx, ts, TOK_EQOP)) {
+ op = CURRENT_TOKEN(ts).t_op;
+ pn = NewBinary(cx, TOK_EQOP, op, pn, RelExpr(cx, ts, tc), tc);
+ }
+ return pn;
+}
+
+static JSParseNode *
+RelExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+ JSTokenType tt;
+ JSOp op;
+ uintN inForInitFlag = tc->flags & TCF_IN_FOR_INIT;
+
+ /*
+ * Uses of the in operator in ShiftExprs are always unambiguous,
+ * so unset the flag that prohibits recognizing it.
+ */
+ tc->flags &= ~TCF_IN_FOR_INIT;
+
+ pn = ShiftExpr(cx, ts, tc);
+ while (pn &&
+ (js_MatchToken(cx, ts, TOK_RELOP) ||
+ /*
+ * Recognize the 'in' token as an operator only if we're not
+ * currently in the init expr of a for loop.
+ */
+ (inForInitFlag == 0 && js_MatchToken(cx, ts, TOK_IN)) ||
+ js_MatchToken(cx, ts, TOK_INSTANCEOF))) {
+ tt = CURRENT_TOKEN(ts).type;
+ op = CURRENT_TOKEN(ts).t_op;
+ pn = NewBinary(cx, tt, op, pn, ShiftExpr(cx, ts, tc), tc);
+ }
+ /* Restore previous state of inForInit flag. */
+ tc->flags |= inForInitFlag;
+
+ return pn;
+}
+
+static JSParseNode *
+ShiftExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+ JSOp op;
+
+ pn = AddExpr(cx, ts, tc);
+ while (pn && js_MatchToken(cx, ts, TOK_SHOP)) {
+ op = CURRENT_TOKEN(ts).t_op;
+ pn = NewBinary(cx, TOK_SHOP, op, pn, AddExpr(cx, ts, tc), tc);
+ }
+ return pn;
+}
+
+static JSParseNode *
+AddExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+ JSTokenType tt;
+ JSOp op;
+
+ pn = MulExpr(cx, ts, tc);
+ while (pn &&
+ (js_MatchToken(cx, ts, TOK_PLUS) ||
+ js_MatchToken(cx, ts, TOK_MINUS))) {
+ tt = CURRENT_TOKEN(ts).type;
+ op = (tt == TOK_PLUS) ? JSOP_ADD : JSOP_SUB;
+ pn = NewBinary(cx, tt, op, pn, MulExpr(cx, ts, tc), tc);
+ }
+ return pn;
+}
+
+static JSParseNode *
+MulExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+ JSTokenType tt;
+ JSOp op;
+
+ pn = UnaryExpr(cx, ts, tc);
+ while (pn &&
+ (js_MatchToken(cx, ts, TOK_STAR) ||
+ js_MatchToken(cx, ts, TOK_DIVOP))) {
+ tt = CURRENT_TOKEN(ts).type;
+ op = CURRENT_TOKEN(ts).t_op;
+ pn = NewBinary(cx, tt, op, pn, UnaryExpr(cx, ts, tc), tc);
+ }
+ return pn;
+}
+
+static JSParseNode *
+SetLvalKid(JSContext *cx, JSTokenStream *ts, JSParseNode *pn, JSParseNode *kid,
+ const char *name)
+{
+ while (kid->pn_type == TOK_RP)
+ kid = kid->pn_kid;
+ if (kid->pn_type != TOK_NAME &&
+ kid->pn_type != TOK_DOT &&
+#if JS_HAS_LVALUE_RETURN
+ (kid->pn_type != TOK_LP || kid->pn_op != JSOP_CALL) &&
+#endif
+#if JS_HAS_XML_SUPPORT
+ (kid->pn_type != TOK_UNARYOP || kid->pn_op != JSOP_XMLNAME) &&
+#endif
+ kid->pn_type != TOK_LB) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_OPERAND, name);
+ return NULL;
+ }
+ pn->pn_kid = kid;
+ return kid;
+}
+
+static const char incop_name_str[][10] = {"increment", "decrement"};
+
+static JSBool
+SetIncOpKid(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSParseNode *pn, JSParseNode *kid,
+ JSTokenType tt, JSBool preorder)
+{
+ JSOp op;
+
+ kid = SetLvalKid(cx, ts, pn, kid, incop_name_str[tt == TOK_DEC]);
+ if (!kid)
+ return JS_FALSE;
+ switch (kid->pn_type) {
+ case TOK_NAME:
+ op = (tt == TOK_INC)
+ ? (preorder ? JSOP_INCNAME : JSOP_NAMEINC)
+ : (preorder ? JSOP_DECNAME : JSOP_NAMEDEC);
+ if (kid->pn_atom == cx->runtime->atomState.argumentsAtom)
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ break;
+
+ case TOK_DOT:
+ op = (tt == TOK_INC)
+ ? (preorder ? JSOP_INCPROP : JSOP_PROPINC)
+ : (preorder ? JSOP_DECPROP : JSOP_PROPDEC);
+ break;
+
+#if JS_HAS_LVALUE_RETURN
+ case TOK_LP:
+ JS_ASSERT(kid->pn_op == JSOP_CALL);
+ kid->pn_op = JSOP_SETCALL;
+ /* FALL THROUGH */
+#endif
+#if JS_HAS_XML_SUPPORT
+ case TOK_UNARYOP:
+ if (kid->pn_op == JSOP_XMLNAME)
+ kid->pn_op = JSOP_SETXMLNAME;
+ /* FALL THROUGH */
+#endif
+ case TOK_LB:
+ op = (tt == TOK_INC)
+ ? (preorder ? JSOP_INCELEM : JSOP_ELEMINC)
+ : (preorder ? JSOP_DECELEM : JSOP_ELEMDEC);
+ break;
+
+ default:
+ JS_ASSERT(0);
+ op = JSOP_NOP;
+ }
+ pn->pn_op = op;
+ return JS_TRUE;
+}
+
+static JSParseNode *
+UnaryExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSTokenType tt;
+ JSParseNode *pn, *pn2;
+
+ CHECK_RECURSION();
+
+ ts->flags |= TSF_OPERAND;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+
+ switch (tt) {
+ case TOK_UNARYOP:
+ case TOK_PLUS:
+ case TOK_MINUS:
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = TOK_UNARYOP; /* PLUS and MINUS are binary */
+ pn->pn_op = CURRENT_TOKEN(ts).t_op;
+ pn2 = UnaryExpr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ pn->pn_kid = pn2;
+ break;
+
+ case TOK_INC:
+ case TOK_DEC:
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn2 = MemberExpr(cx, ts, tc, JS_TRUE);
+ if (!pn2)
+ return NULL;
+ if (!SetIncOpKid(cx, ts, tc, pn, pn2, tt, JS_TRUE))
+ return NULL;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ break;
+
+ case TOK_DELETE:
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn2 = UnaryExpr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ pn->pn_pos.end = pn2->pn_pos.end;
+
+ /*
+ * Under ECMA3, deleting any unary expression is valid -- it simply
+ * returns true. Here we strip off any parentheses.
+ */
+ while (pn2->pn_type == TOK_RP)
+ pn2 = pn2->pn_kid;
+ pn->pn_kid = pn2;
+ break;
+
+ case TOK_ERROR:
+ return NULL;
+
+ default:
+ js_UngetToken(ts);
+ pn = MemberExpr(cx, ts, tc, JS_TRUE);
+ if (!pn)
+ return NULL;
+
+ /* Don't look across a newline boundary for a postfix incop. */
+ if (ON_CURRENT_LINE(ts, pn->pn_pos)) {
+ ts->flags |= TSF_OPERAND;
+ tt = js_PeekTokenSameLine(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_INC || tt == TOK_DEC) {
+ (void) js_GetToken(cx, ts);
+ pn2 = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn2)
+ return NULL;
+ if (!SetIncOpKid(cx, ts, tc, pn2, pn, tt, JS_FALSE))
+ return NULL;
+ pn2->pn_pos.begin = pn->pn_pos.begin;
+ pn = pn2;
+ }
+ }
+ break;
+ }
+ return pn;
+}
+
+static JSBool
+ArgumentList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSParseNode *listNode)
+{
+ JSBool matched;
+
+ ts->flags |= TSF_OPERAND;
+ matched = js_MatchToken(cx, ts, TOK_RP);
+ ts->flags &= ~TSF_OPERAND;
+ if (!matched) {
+ do {
+ JSParseNode *argNode = AssignExpr(cx, ts, tc);
+ if (!argNode)
+ return JS_FALSE;
+#if JS_HAS_GENERATORS
+ if (argNode->pn_type == TOK_YIELD) {
+ js_ReportCompileErrorNumber(cx, argNode,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_BAD_YIELD_SYNTAX);
+ return JS_FALSE;
+ }
+#endif
+ PN_APPEND(listNode, argNode);
+ } while (js_MatchToken(cx, ts, TOK_COMMA));
+
+ if (js_GetToken(cx, ts) != TOK_RP) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_PAREN_AFTER_ARGS);
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSParseNode *
+MemberExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSBool allowCallSyntax)
+{
+ JSParseNode *pn, *pn2, *pn3;
+ JSTokenType tt;
+
+ CHECK_RECURSION();
+
+ /* Check for new expression first. */
+ ts->flags |= TSF_OPERAND;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_NEW) {
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ pn2 = MemberExpr(cx, ts, tc, JS_FALSE);
+ if (!pn2)
+ return NULL;
+ pn->pn_op = JSOP_NEW;
+ PN_INIT_LIST_1(pn, pn2);
+ pn->pn_pos.begin = pn2->pn_pos.begin;
+
+ if (js_MatchToken(cx, ts, TOK_LP) && !ArgumentList(cx, ts, tc, pn))
+ return NULL;
+ if (pn->pn_count > ARGC_LIMIT) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_CON_ARGS);
+ return NULL;
+ }
+ pn->pn_pos.end = PN_LAST(pn)->pn_pos.end;
+ } else {
+ pn = PrimaryExpr(cx, ts, tc, tt, JS_FALSE);
+ if (!pn)
+ return NULL;
+
+ if (pn->pn_type == TOK_ANYNAME ||
+ pn->pn_type == TOK_AT ||
+ pn->pn_type == TOK_DBLCOLON) {
+ pn2 = NewOrRecycledNode(cx, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_type = TOK_UNARYOP;
+ pn2->pn_pos = pn->pn_pos;
+ pn2->pn_op = JSOP_XMLNAME;
+ pn2->pn_arity = PN_UNARY;
+ pn2->pn_kid = pn;
+ pn2->pn_next = NULL;
+ pn2->pn_ts = ts;
+ pn2->pn_source = NULL;
+ pn = pn2;
+ }
+ }
+
+ while ((tt = js_GetToken(cx, ts)) > TOK_EOF) {
+ if (tt == TOK_DOT) {
+ pn2 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_slot = -1;
+ pn2->pn_attrs = 0;
+#if JS_HAS_XML_SUPPORT
+ ts->flags |= TSF_OPERAND | TSF_KEYWORD_IS_NAME;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~(TSF_OPERAND | TSF_KEYWORD_IS_NAME);
+ pn3 = PrimaryExpr(cx, ts, tc, tt, JS_TRUE);
+ if (!pn3)
+ return NULL;
+ tt = pn3->pn_type;
+ if (tt == TOK_NAME ||
+ (tt == TOK_DBLCOLON &&
+ pn3->pn_arity == PN_NAME &&
+ pn3->pn_expr->pn_type == TOK_FUNCTION)) {
+ pn2->pn_op = (tt == TOK_NAME) ? JSOP_GETPROP : JSOP_GETMETHOD;
+ pn2->pn_expr = pn;
+ pn2->pn_atom = pn3->pn_atom;
+ RecycleTree(pn3, tc);
+ } else {
+ if (TOKEN_TYPE_IS_XML(tt)) {
+ pn2->pn_type = TOK_LB;
+ pn2->pn_op = JSOP_GETELEM;
+ } else if (tt == TOK_RP) {
+ JSParseNode *group = pn3;
+
+ /* Recycle the useless TOK_RP/JSOP_GROUP node. */
+ pn3 = group->pn_kid;
+ group->pn_kid = NULL;
+ RecycleTree(group, tc);
+ pn2->pn_type = TOK_FILTER;
+ pn2->pn_op = JSOP_FILTER;
+
+ /* A filtering predicate is like a with statement. */
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ } else {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_NAME_AFTER_DOT);
+ return NULL;
+ }
+ pn2->pn_arity = PN_BINARY;
+ pn2->pn_left = pn;
+ pn2->pn_right = pn3;
+ }
+#else
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ MUST_MATCH_TOKEN(TOK_NAME, JSMSG_NAME_AFTER_DOT);
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ pn2->pn_op = JSOP_GETPROP;
+ pn2->pn_expr = pn;
+ pn2->pn_atom = CURRENT_TOKEN(ts).t_atom;
+#endif
+ pn2->pn_pos.begin = pn->pn_pos.begin;
+ pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+#if JS_HAS_XML_SUPPORT
+ } else if (tt == TOK_DBLDOT) {
+ pn2 = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn2)
+ return NULL;
+ ts->flags |= TSF_OPERAND | TSF_KEYWORD_IS_NAME;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~(TSF_OPERAND | TSF_KEYWORD_IS_NAME);
+ pn3 = PrimaryExpr(cx, ts, tc, tt, JS_TRUE);
+ if (!pn3)
+ return NULL;
+ tt = pn3->pn_type;
+ if (tt == TOK_NAME) {
+ pn3->pn_type = TOK_STRING;
+ pn3->pn_arity = PN_NULLARY;
+ pn3->pn_op = JSOP_QNAMEPART;
+ } else if (!TOKEN_TYPE_IS_XML(tt)) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_NAME_AFTER_DOT);
+ return NULL;
+ }
+ pn2->pn_op = JSOP_DESCENDANTS;
+ pn2->pn_left = pn;
+ pn2->pn_right = pn3;
+ pn2->pn_pos.begin = pn->pn_pos.begin;
+ pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+#endif
+ } else if (tt == TOK_LB) {
+ pn2 = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn2)
+ return NULL;
+ pn3 = Expr(cx, ts, tc);
+ if (!pn3)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RB, JSMSG_BRACKET_IN_INDEX);
+ pn2->pn_pos.begin = pn->pn_pos.begin;
+ pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+
+ /* Optimize o['p'] to o.p by rewriting pn2. */
+ if (pn3->pn_type == TOK_STRING) {
+ pn2->pn_type = TOK_DOT;
+ pn2->pn_op = JSOP_GETPROP;
+ pn2->pn_arity = PN_NAME;
+ pn2->pn_expr = pn;
+ pn2->pn_atom = pn3->pn_atom;
+ } else {
+ pn2->pn_op = JSOP_GETELEM;
+ pn2->pn_left = pn;
+ pn2->pn_right = pn3;
+ }
+ } else if (allowCallSyntax && tt == TOK_LP) {
+ pn2 = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn2)
+ return NULL;
+
+ /* Pick JSOP_EVAL and flag tc as heavyweight if eval(...). */
+ pn2->pn_op = JSOP_CALL;
+ if (pn->pn_op == JSOP_NAME &&
+ pn->pn_atom == cx->runtime->atomState.evalAtom) {
+ pn2->pn_op = JSOP_EVAL;
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ }
+
+ PN_INIT_LIST_1(pn2, pn);
+ pn2->pn_pos.begin = pn->pn_pos.begin;
+
+ if (!ArgumentList(cx, ts, tc, pn2))
+ return NULL;
+ if (pn2->pn_count > ARGC_LIMIT) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_TOO_MANY_FUN_ARGS);
+ return NULL;
+ }
+ pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ } else {
+ js_UngetToken(ts);
+ return pn;
+ }
+
+ pn = pn2;
+ }
+ if (tt == TOK_ERROR)
+ return NULL;
+ return pn;
+}
+
+static JSParseNode *
+BracketedExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ uintN oldflags;
+ JSParseNode *pn;
+
+ /*
+ * Always accept the 'in' operator in a parenthesized expression,
+ * where it's unambiguous, even if we might be parsing the init of a
+ * for statement.
+ */
+ oldflags = tc->flags;
+ tc->flags &= ~TCF_IN_FOR_INIT;
+ pn = Expr(cx, ts, tc);
+ tc->flags = oldflags | (tc->flags & TCF_FUN_FLAGS);
+ return pn;
+}
+
+#if JS_HAS_XML_SUPPORT
+
+static JSParseNode *
+EndBracketedExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = BracketedExpr(cx, ts, tc);
+ if (!pn)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RB, JSMSG_BRACKET_AFTER_ATTR_EXPR);
+ return pn;
+}
+
+/*
+ * From the ECMA-357 grammar in 11.1.1 and 11.1.2:
+ *
+ * AttributeIdentifier:
+ * @ PropertySelector
+ * @ QualifiedIdentifier
+ * @ [ Expression ]
+ *
+ * PropertySelector:
+ * Identifier
+ * *
+ *
+ * QualifiedIdentifier:
+ * PropertySelector :: PropertySelector
+ * PropertySelector :: [ Expression ]
+ *
+ * We adapt AttributeIdentifier and QualifiedIdentier to be LL(1), like so:
+ *
+ * AttributeIdentifier:
+ * @ QualifiedIdentifier
+ * @ [ Expression ]
+ *
+ * PropertySelector:
+ * Identifier
+ * *
+ *
+ * QualifiedIdentifier:
+ * PropertySelector :: PropertySelector
+ * PropertySelector :: [ Expression ]
+ * PropertySelector
+ *
+ * As PrimaryExpression: Identifier is in ECMA-262 and we want the semantics
+ * for that rule to result in a name node, but ECMA-357 extends the grammar
+ * to include PrimaryExpression: QualifiedIdentifier, we must factor further:
+ *
+ * QualifiedIdentifier:
+ * PropertySelector QualifiedSuffix
+ *
+ * QualifiedSuffix:
+ * :: PropertySelector
+ * :: [ Expression ]
+ * /nothing/
+ *
+ * And use this production instead of PrimaryExpression: QualifiedIdentifier:
+ *
+ * PrimaryExpression:
+ * Identifier QualifiedSuffix
+ *
+ * We hoist the :: match into callers of QualifiedSuffix, in order to tweak
+ * PropertySelector vs. Identifier pn_arity, pn_op, and other members.
+ */
+static JSParseNode *
+PropertySelector(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ if (pn->pn_type == TOK_STAR) {
+ pn->pn_type = TOK_ANYNAME;
+ pn->pn_op = JSOP_ANYNAME;
+ pn->pn_atom = cx->runtime->atomState.starAtom;
+ } else {
+ JS_ASSERT(pn->pn_type == TOK_NAME);
+ pn->pn_op = JSOP_QNAMEPART;
+ pn->pn_arity = PN_NAME;
+ pn->pn_atom = CURRENT_TOKEN(ts).t_atom;
+ pn->pn_expr = NULL;
+ pn->pn_slot = -1;
+ pn->pn_attrs = 0;
+ }
+ return pn;
+}
+
+static JSParseNode *
+QualifiedSuffix(JSContext *cx, JSTokenStream *ts, JSParseNode *pn,
+ JSTreeContext *tc)
+{
+ JSParseNode *pn2, *pn3;
+ JSTokenType tt;
+
+ JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_DBLCOLON);
+ pn2 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn2)
+ return NULL;
+
+ /* Left operand of :: must be evaluated if it is an identifier. */
+ if (pn->pn_op == JSOP_QNAMEPART)
+ pn->pn_op = JSOP_NAME;
+
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ if (tt == TOK_STAR || tt == TOK_NAME) {
+ /* Inline and specialize PropertySelector for JSOP_QNAMECONST. */
+ pn2->pn_op = JSOP_QNAMECONST;
+ pn2->pn_atom = (tt == TOK_STAR)
+ ? cx->runtime->atomState.starAtom
+ : CURRENT_TOKEN(ts).t_atom;
+ pn2->pn_expr = pn;
+ pn2->pn_slot = -1;
+ pn2->pn_attrs = 0;
+ return pn2;
+ }
+
+ if (tt != TOK_LB) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SYNTAX_ERROR);
+ return NULL;
+ }
+ pn3 = EndBracketedExpr(cx, ts, tc);
+ if (!pn3)
+ return NULL;
+
+ pn2->pn_op = JSOP_QNAME;
+ pn2->pn_arity = PN_BINARY;
+ pn2->pn_left = pn;
+ pn2->pn_right = pn3;
+ return pn2;
+}
+
+static JSParseNode *
+QualifiedIdentifier(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+
+ pn = PropertySelector(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ if (js_MatchToken(cx, ts, TOK_DBLCOLON))
+ pn = QualifiedSuffix(cx, ts, pn, tc);
+ return pn;
+}
+
+static JSParseNode *
+AttributeIdentifier(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2;
+ JSTokenType tt;
+
+ JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_AT);
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_op = JSOP_TOATTRNAME;
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ if (tt == TOK_STAR || tt == TOK_NAME) {
+ pn2 = QualifiedIdentifier(cx, ts, tc);
+ } else if (tt == TOK_LB) {
+ pn2 = EndBracketedExpr(cx, ts, tc);
+ } else {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SYNTAX_ERROR);
+ return NULL;
+ }
+ if (!pn2)
+ return NULL;
+ pn->pn_kid = pn2;
+ return pn;
+}
+
+/*
+ * Make a TOK_LC unary node whose pn_kid is an expression.
+ */
+static JSParseNode *
+XMLExpr(JSContext *cx, JSTokenStream *ts, JSBool inTag, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2;
+ uintN oldflags;
+
+ JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_LC);
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+
+ /*
+ * Turn off XML tag mode, but don't restore it after parsing this braced
+ * expression. Instead, simply restore ts's old flags. This is required
+ * because XMLExpr is called both from within a tag, and from within text
+ * contained in an element, but outside of any start, end, or point tag.
+ */
+ oldflags = ts->flags;
+ ts->flags = oldflags & ~TSF_XMLTAGMODE;
+ pn2 = Expr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_IN_XML_EXPR);
+ ts->flags = oldflags;
+ pn->pn_kid = pn2;
+ pn->pn_op = inTag ? JSOP_XMLTAGEXPR : JSOP_XMLELTEXPR;
+ return pn;
+}
+
+/*
+ * Make a terminal node for one of TOK_XMLNAME, TOK_XMLATTR, TOK_XMLSPACE,
+ * TOK_XMLTEXT, TOK_XMLCDATA, TOK_XMLCOMMENT, or TOK_XMLPI. When converting
+ * parse tree to XML, we preserve a TOK_XMLSPACE node only if it's the sole
+ * child of a container tag.
+ */
+static JSParseNode *
+XMLAtomNode(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn;
+ JSToken *tp;
+
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ tp = &CURRENT_TOKEN(ts);
+ pn->pn_op = tp->t_op;
+ pn->pn_atom = tp->t_atom;
+ if (tp->type == TOK_XMLPI)
+ pn->pn_atom2 = tp->t_atom2;
+ return pn;
+}
+
+/*
+ * Parse the productions:
+ *
+ * XMLNameExpr:
+ * XMLName XMLNameExpr?
+ * { Expr } XMLNameExpr?
+ *
+ * Return a PN_LIST, PN_UNARY, or PN_NULLARY according as XMLNameExpr produces
+ * a list of names and/or expressions, a single expression, or a single name.
+ * If PN_LIST or PN_NULLARY, pn_type will be TOK_XMLNAME; if PN_UNARY, pn_type
+ * will be TOK_LC.
+ */
+static JSParseNode *
+XMLNameExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
+{
+ JSParseNode *pn, *pn2, *list;
+ JSTokenType tt;
+
+ pn = list = NULL;
+ do {
+ tt = CURRENT_TOKEN(ts).type;
+ if (tt == TOK_LC) {
+ pn2 = XMLExpr(cx, ts, JS_TRUE, tc);
+ if (!pn2)
+ return NULL;
+ } else {
+ JS_ASSERT(tt == TOK_XMLNAME);
+ pn2 = XMLAtomNode(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ }
+
+ if (!pn) {
+ pn = pn2;
+ } else {
+ if (!list) {
+ list = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!list)
+ return NULL;
+ list->pn_type = TOK_XMLNAME;
+ list->pn_pos.begin = pn->pn_pos.begin;
+ PN_INIT_LIST_1(list, pn);
+ list->pn_extra = PNX_CANTFOLD;
+ pn = list;
+ }
+ pn->pn_pos.end = pn2->pn_pos.end;
+ PN_APPEND(pn, pn2);
+ }
+ } while ((tt = js_GetToken(cx, ts)) == TOK_XMLNAME || tt == TOK_LC);
+
+ js_UngetToken(ts);
+ return pn;
+}
+
+/*
+ * Macro to test whether an XMLNameExpr or XMLTagContent node can be folded
+ * at compile time into a JSXML tree.
+ */
+#define XML_FOLDABLE(pn) ((pn)->pn_arity == PN_LIST \
+ ? ((pn)->pn_extra & PNX_CANTFOLD) == 0 \
+ : (pn)->pn_type != TOK_LC)
+
+/*
+ * Parse the productions:
+ *
+ * XMLTagContent:
+ * XMLNameExpr
+ * XMLTagContent S XMLNameExpr S? = S? XMLAttr
+ * XMLTagContent S XMLNameExpr S? = S? { Expr }
+ *
+ * Return a PN_LIST, PN_UNARY, or PN_NULLARY according to how XMLTagContent
+ * produces a list of name and attribute values and/or braced expressions, a
+ * single expression, or a single name.
+ *
+ * If PN_LIST or PN_NULLARY, pn_type will be TOK_XMLNAME for the case where
+ * XMLTagContent: XMLNameExpr. If pn_type is not TOK_XMLNAME but pn_arity is
+ * PN_LIST, pn_type will be tagtype. If PN_UNARY, pn_type will be TOK_LC and
+ * we parsed exactly one expression.
+ */
+static JSParseNode *
+XMLTagContent(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSTokenType tagtype, JSAtom **namep)
+{
+ JSParseNode *pn, *pn2, *list;
+ JSTokenType tt;
+
+ pn = XMLNameExpr(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ *namep = (pn->pn_arity == PN_NULLARY) ? pn->pn_atom : NULL;
+ list = NULL;
+
+ while (js_MatchToken(cx, ts, TOK_XMLSPACE)) {
+ tt = js_GetToken(cx, ts);
+ if (tt != TOK_XMLNAME && tt != TOK_LC) {
+ js_UngetToken(ts);
+ break;
+ }
+
+ pn2 = XMLNameExpr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+ if (!list) {
+ list = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!list)
+ return NULL;
+ list->pn_type = tagtype;
+ list->pn_pos.begin = pn->pn_pos.begin;
+ PN_INIT_LIST_1(list, pn);
+ pn = list;
+ }
+ PN_APPEND(pn, pn2);
+ if (!XML_FOLDABLE(pn2))
+ pn->pn_extra |= PNX_CANTFOLD;
+
+ js_MatchToken(cx, ts, TOK_XMLSPACE);
+ MUST_MATCH_TOKEN(TOK_ASSIGN, JSMSG_NO_ASSIGN_IN_XML_ATTR);
+ js_MatchToken(cx, ts, TOK_XMLSPACE);
+
+ tt = js_GetToken(cx, ts);
+ if (tt == TOK_XMLATTR) {
+ pn2 = XMLAtomNode(cx, ts, tc);
+ } else if (tt == TOK_LC) {
+ pn2 = XMLExpr(cx, ts, JS_TRUE, tc);
+ pn->pn_extra |= PNX_CANTFOLD;
+ } else {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_ATTR_VALUE);
+ return NULL;
+ }
+ if (!pn2)
+ return NULL;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ PN_APPEND(pn, pn2);
+ }
+
+ return pn;
+}
+
+#define XML_CHECK_FOR_ERROR_AND_EOF(tt,result) \
+ JS_BEGIN_MACRO \
+ if ((tt) <= TOK_EOF) { \
+ if ((tt) == TOK_EOF) { \
+ js_ReportCompileErrorNumber(cx, ts, \
+ JSREPORT_TS | JSREPORT_ERROR, \
+ JSMSG_END_OF_XML_SOURCE); \
+ } \
+ return result; \
+ } \
+ JS_END_MACRO
+
+static JSParseNode *
+XMLElementOrList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSBool allowList);
+
+/*
+ * Consume XML element tag content, including the TOK_XMLETAGO (</) sequence
+ * that opens the end tag for the container.
+ */
+static JSBool
+XMLElementContent(JSContext *cx, JSTokenStream *ts, JSParseNode *pn,
+ JSTreeContext *tc)
+{
+ JSTokenType tt;
+ JSParseNode *pn2;
+ JSAtom *textAtom;
+
+ ts->flags &= ~TSF_XMLTAGMODE;
+ for (;;) {
+ ts->flags |= TSF_XMLTEXTMODE;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_XMLTEXTMODE;
+ XML_CHECK_FOR_ERROR_AND_EOF(tt, JS_FALSE);
+
+ JS_ASSERT(tt == TOK_XMLSPACE || tt == TOK_XMLTEXT);
+ textAtom = CURRENT_TOKEN(ts).t_atom;
+ if (textAtom) {
+ /* Non-zero-length XML text scanned. */
+ pn2 = XMLAtomNode(cx, ts, tc);
+ if (!pn2)
+ return JS_FALSE;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ PN_APPEND(pn, pn2);
+ }
+
+ ts->flags |= TSF_OPERAND;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ XML_CHECK_FOR_ERROR_AND_EOF(tt, JS_FALSE);
+ if (tt == TOK_XMLETAGO)
+ break;
+
+ if (tt == TOK_LC) {
+ pn2 = XMLExpr(cx, ts, JS_FALSE, tc);
+ pn->pn_extra |= PNX_CANTFOLD;
+ } else if (tt == TOK_XMLSTAGO) {
+ pn2 = XMLElementOrList(cx, ts, tc, JS_FALSE);
+ if (pn2) {
+ pn2->pn_extra &= ~PNX_XMLROOT;
+ pn->pn_extra |= pn2->pn_extra;
+ }
+ } else {
+ JS_ASSERT(tt == TOK_XMLCDATA || tt == TOK_XMLCOMMENT ||
+ tt == TOK_XMLPI);
+ pn2 = XMLAtomNode(cx, ts, tc);
+ }
+ if (!pn2)
+ return JS_FALSE;
+ pn->pn_pos.end = pn2->pn_pos.end;
+ PN_APPEND(pn, pn2);
+ }
+
+ JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_XMLETAGO);
+ ts->flags |= TSF_XMLTAGMODE;
+ return JS_TRUE;
+}
+
+/*
+ * Return a PN_LIST node containing an XML or XMLList Initialiser.
+ */
+static JSParseNode *
+XMLElementOrList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSBool allowList)
+{
+ JSParseNode *pn, *pn2, *list;
+ JSBool hadSpace;
+ JSTokenType tt;
+ JSAtom *startAtom, *endAtom;
+
+ CHECK_RECURSION();
+
+ JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_XMLSTAGO);
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+
+ ts->flags |= TSF_XMLTAGMODE;
+ hadSpace = js_MatchToken(cx, ts, TOK_XMLSPACE);
+ tt = js_GetToken(cx, ts);
+ if (tt == TOK_ERROR)
+ return NULL;
+
+ if (tt == TOK_XMLNAME || tt == TOK_LC) {
+ /*
+ * XMLElement. Append the tag and its contents, if any, to pn.
+ */
+ pn2 = XMLTagContent(cx, ts, tc, TOK_XMLSTAGO, &startAtom);
+ if (!pn2)
+ return NULL;
+ js_MatchToken(cx, ts, TOK_XMLSPACE);
+
+ tt = js_GetToken(cx, ts);
+ if (tt == TOK_XMLPTAGC) {
+ /* Point tag (/>): recycle pn if pn2 is a list of tag contents. */
+ if (pn2->pn_type == TOK_XMLSTAGO) {
+ PN_INIT_LIST(pn);
+ RecycleTree(pn, tc);
+ pn = pn2;
+ } else {
+ JS_ASSERT(pn2->pn_type == TOK_XMLNAME ||
+ pn2->pn_type == TOK_LC);
+ PN_INIT_LIST_1(pn, pn2);
+ if (!XML_FOLDABLE(pn2))
+ pn->pn_extra |= PNX_CANTFOLD;
+ }
+ pn->pn_type = TOK_XMLPTAGC;
+ pn->pn_extra |= PNX_XMLROOT;
+ } else {
+ /* We had better have a tag-close (>) at this point. */
+ if (tt != TOK_XMLTAGC) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_TAG_SYNTAX);
+ return NULL;
+ }
+ pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+
+ /* Make sure pn2 is a TOK_XMLSTAGO list containing tag contents. */
+ if (pn2->pn_type != TOK_XMLSTAGO) {
+ PN_INIT_LIST_1(pn, pn2);
+ if (!XML_FOLDABLE(pn2))
+ pn->pn_extra |= PNX_CANTFOLD;
+ pn2 = pn;
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ }
+
+ /* Now make pn a nominal-root TOK_XMLELEM list containing pn2. */
+ pn->pn_type = TOK_XMLELEM;
+ PN_INIT_LIST_1(pn, pn2);
+ if (!XML_FOLDABLE(pn2))
+ pn->pn_extra |= PNX_CANTFOLD;
+ pn->pn_extra |= PNX_XMLROOT;
+
+ /* Get element contents and delimiting end-tag-open sequence. */
+ if (!XMLElementContent(cx, ts, pn, tc))
+ return NULL;
+
+ js_MatchToken(cx, ts, TOK_XMLSPACE);
+ tt = js_GetToken(cx, ts);
+ XML_CHECK_FOR_ERROR_AND_EOF(tt, NULL);
+ if (tt != TOK_XMLNAME && tt != TOK_LC) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_TAG_SYNTAX);
+ return NULL;
+ }
+
+ /* Parse end tag; check mismatch at compile-time if we can. */
+ pn2 = XMLTagContent(cx, ts, tc, TOK_XMLETAGO, &endAtom);
+ if (!pn2)
+ return NULL;
+ if (pn2->pn_type == TOK_XMLETAGO) {
+ /* Oops, end tag has attributes! */
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_TAG_SYNTAX);
+ return NULL;
+ }
+ if (endAtom && startAtom && endAtom != startAtom) {
+ JSString *str = ATOM_TO_STRING(startAtom);
+
+ /* End vs. start tag name mismatch: point to the tag name. */
+ js_ReportCompileErrorNumberUC(cx, pn2,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_XML_TAG_NAME_MISMATCH,
+ JSSTRING_CHARS(str));
+ return NULL;
+ }
+
+ /* Make a TOK_XMLETAGO list with pn2 as its single child. */
+ JS_ASSERT(pn2->pn_type == TOK_XMLNAME || pn2->pn_type == TOK_LC);
+ list = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!list)
+ return NULL;
+ list->pn_type = TOK_XMLETAGO;
+ PN_INIT_LIST_1(list, pn2);
+ PN_APPEND(pn, list);
+ if (!XML_FOLDABLE(pn2)) {
+ list->pn_extra |= PNX_CANTFOLD;
+ pn->pn_extra |= PNX_CANTFOLD;
+ }
+
+ js_MatchToken(cx, ts, TOK_XMLSPACE);
+ MUST_MATCH_TOKEN(TOK_XMLTAGC, JSMSG_BAD_XML_TAG_SYNTAX);
+ }
+
+ /* Set pn_op now that pn has been updated to its final value. */
+ pn->pn_op = JSOP_TOXML;
+ } else if (!hadSpace && allowList && tt == TOK_XMLTAGC) {
+ /* XMLList Initialiser. */
+ pn->pn_type = TOK_XMLLIST;
+ pn->pn_op = JSOP_TOXMLLIST;
+ PN_INIT_LIST(pn);
+ pn->pn_extra |= PNX_XMLROOT;
+ if (!XMLElementContent(cx, ts, pn, tc))
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_XMLTAGC, JSMSG_BAD_XML_LIST_SYNTAX);
+ } else {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_NAME_SYNTAX);
+ return NULL;
+ }
+
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ ts->flags &= ~TSF_XMLTAGMODE;
+ return pn;
+}
+
+static JSParseNode *
+XMLElementOrListRoot(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSBool allowList)
+{
+ uint32 oldopts;
+ JSParseNode *pn;
+
+ /*
+ * Force XML support to be enabled so that comments and CDATA literals
+ * are recognized, instead of <! followed by -- starting an HTML comment
+ * to end of line (used in script tags to hide content from old browsers
+ * that don't recognize <script>).
+ */
+ oldopts = JS_SetOptions(cx, cx->options | JSOPTION_XML);
+ pn = XMLElementOrList(cx, ts, tc, allowList);
+ JS_SetOptions(cx, oldopts);
+ return pn;
+}
+
+JS_FRIEND_API(JSParseNode *)
+js_ParseXMLTokenStream(JSContext *cx, JSObject *chain, JSTokenStream *ts,
+ JSBool allowList)
+{
+ JSStackFrame *fp, frame;
+ JSParseNode *pn;
+ JSTreeContext tc;
+ JSTokenType tt;
+
+ /*
+ * Push a compiler frame if we have no frames, or if the top frame is a
+ * lightweight function activation, or if its scope chain doesn't match
+ * the one passed to us.
+ */
+ fp = cx->fp;
+ MaybeSetupFrame(cx, chain, fp, &frame);
+ JS_KEEP_ATOMS(cx->runtime);
+ TREE_CONTEXT_INIT(&tc);
+
+ /* Set XML-only mode to turn off special treatment of {expr} in XML. */
+ ts->flags |= TSF_OPERAND | TSF_XMLONLYMODE;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+
+ if (tt != TOK_XMLSTAGO) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_MARKUP);
+ pn = NULL;
+ } else {
+ pn = XMLElementOrListRoot(cx, ts, &tc, allowList);
+ }
+
+ ts->flags &= ~TSF_XMLONLYMODE;
+ TREE_CONTEXT_FINISH(&tc);
+ JS_UNKEEP_ATOMS(cx->runtime);
+ cx->fp = fp;
+ return pn;
+}
+
+#endif /* JS_HAS_XMLSUPPORT */
+
+static JSParseNode *
+PrimaryExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
+ JSTokenType tt, JSBool afterDot)
+{
+ JSParseNode *pn, *pn2, *pn3;
+ JSOp op;
+
+#if JS_HAS_SHARP_VARS
+ JSParseNode *defsharp;
+ JSBool notsharp;
+
+ defsharp = NULL;
+ notsharp = JS_FALSE;
+ again:
+ /*
+ * Control flows here after #n= is scanned. If the following primary is
+ * not valid after such a "sharp variable" definition, the tt switch case
+ * should set notsharp.
+ */
+#endif
+
+ CHECK_RECURSION();
+
+#if JS_HAS_GETTER_SETTER
+ if (tt == TOK_NAME) {
+ tt = CheckGetterOrSetter(cx, ts, TOK_FUNCTION);
+ if (tt == TOK_ERROR)
+ return NULL;
+ }
+#endif
+
+ switch (tt) {
+ case TOK_FUNCTION:
+#if JS_HAS_XML_SUPPORT
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ if (js_MatchToken(cx, ts, TOK_DBLCOLON)) {
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ pn2 = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_type = TOK_FUNCTION;
+ pn = QualifiedSuffix(cx, ts, pn2, tc);
+ if (!pn)
+ return NULL;
+ break;
+ }
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+#endif
+ pn = FunctionExpr(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ break;
+
+ case TOK_LB:
+ {
+ JSBool matched;
+ jsuint index;
+
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = TOK_RB;
+
+#if JS_HAS_SHARP_VARS
+ if (defsharp) {
+ PN_INIT_LIST_1(pn, defsharp);
+ defsharp = NULL;
+ } else
+#endif
+ PN_INIT_LIST(pn);
+
+ ts->flags |= TSF_OPERAND;
+ matched = js_MatchToken(cx, ts, TOK_RB);
+ ts->flags &= ~TSF_OPERAND;
+ if (!matched) {
+ for (index = 0; ; index++) {
+ if (index == ARRAY_INIT_LIMIT) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_ARRAY_INIT_TOO_BIG);
+ return NULL;
+ }
+
+ ts->flags |= TSF_OPERAND;
+ tt = js_PeekToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ if (tt == TOK_RB) {
+ pn->pn_extra |= PNX_ENDCOMMA;
+ break;
+ }
+
+ if (tt == TOK_COMMA) {
+ /* So CURRENT_TOKEN gets TOK_COMMA and not TOK_LB. */
+ js_MatchToken(cx, ts, TOK_COMMA);
+ pn2 = NewParseNode(cx, ts, PN_NULLARY, tc);
+ } else {
+ pn2 = AssignExpr(cx, ts, tc);
+ }
+ if (!pn2)
+ return NULL;
+ PN_APPEND(pn, pn2);
+
+ if (tt != TOK_COMMA) {
+ /* If we didn't already match TOK_COMMA in above case. */
+ if (!js_MatchToken(cx, ts, TOK_COMMA))
+ break;
+ }
+ }
+
+#if JS_HAS_GENERATORS
+ /*
+ * At this point, (index == 0 && pn->pn_count != 0) implies one
+ * element initialiser was parsed (possibly with a defsharp before
+ * the left bracket).
+ *
+ * An array comprehension of the form:
+ *
+ * [i * j for (i in o) for (j in p) if (i != j)]
+ *
+ * translates to roughly the following let expression:
+ *
+ * let (array = new Array, i, j) {
+ * for (i in o) let {
+ * for (j in p)
+ * if (i != j)
+ * array.push(i * j)
+ * }
+ * array
+ * }
+ *
+ * where array is a nameless block-local variable. The "roughly"
+ * means that an implementation may optimize away the array.push.
+ * An array comprehension opens exactly one block scope, no matter
+ * how many for heads it contains.
+ *
+ * Each let () {...} or for (let ...) ... compiles to:
+ *
+ * JSOP_ENTERBLOCK <o> ... JSOP_LEAVEBLOCK <n>
+ *
+ * where <o> is a literal object representing the block scope,
+ * with <n> properties, naming each var declared in the block.
+ *
+ * Each var declaration in a let-block binds a name in <o> at
+ * compile time, and allocates a slot on the operand stack at
+ * runtime via JSOP_ENTERBLOCK. A block-local var is accessed
+ * by the JSOP_GETLOCAL and JSOP_SETLOCAL ops, and iterated with
+ * JSOP_FORLOCAL. These ops all have an immediate operand, the
+ * local slot's stack index from fp->spbase.
+ *
+ * The array comprehension iteration step, array.push(i * j) in
+ * the example above, is done by <i * j>; JSOP_ARRAYCOMP <array>,
+ * where <array> is the index of array's stack slot.
+ */
+ if (index == 0 &&
+ pn->pn_count != 0 &&
+ js_MatchToken(cx, ts, TOK_FOR)) {
+ JSParseNode **pnp, *pnexp, *pntop, *pnlet;
+ BindData data;
+ JSRuntime *rt;
+ JSStmtInfo stmtInfo;
+ JSAtom *atom;
+
+ /* Relabel pn as an array comprehension node. */
+ pn->pn_type = TOK_ARRAYCOMP;
+
+ /*
+ * Remove the comprehension expression from pn's linked list
+ * and save it via pnexp. We'll re-install it underneath the
+ * ARRAYPUSH node after we parse the rest of the comprehension.
+ */
+ pnexp = PN_LAST(pn);
+ JS_ASSERT(pn->pn_count == 1 || pn->pn_count == 2);
+ pn->pn_tail = (--pn->pn_count == 1)
+ ? &pn->pn_head->pn_next
+ : &pn->pn_head;
+ *pn->pn_tail = NULL;
+
+ /*
+ * Make a parse-node and literal object representing the array
+ * comprehension's block scope.
+ */
+ pntop = PushLexicalScope(cx, ts, tc, &stmtInfo);
+ if (!pntop)
+ return NULL;
+ pnp = &pntop->pn_expr;
+
+ data.pn = NULL;
+ data.ts = ts;
+ data.obj = tc->blockChain;
+ data.op = JSOP_NOP;
+ data.binder = BindLet;
+ data.u.let.index = 0;
+ data.u.let.overflow = JSMSG_ARRAY_INIT_TOO_BIG;
+
+ rt = cx->runtime;
+ do {
+ /*
+ * FOR node is binary, left is control and right is body.
+ * Use index to count each block-local let-variable on the
+ * left-hand side of IN.
+ */
+ pn2 = NewParseNode(cx, ts, PN_BINARY, tc);
+ if (!pn2)
+ return NULL;
+
+ pn2->pn_op = JSOP_FORIN;
+ if (js_MatchToken(cx, ts, TOK_NAME)) {
+ if (CURRENT_TOKEN(ts).t_atom == rt->atomState.eachAtom)
+ pn2->pn_op = JSOP_FOREACH;
+ else
+ js_UngetToken(ts);
+ }
+ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_AFTER_FOR);
+
+ tt = js_GetToken(cx, ts);
+ switch (tt) {
+#if JS_HAS_DESTRUCTURING
+ case TOK_LB:
+ case TOK_LC:
+ pnlet = DestructuringExpr(cx, &data, tc, tt);
+ if (!pnlet)
+ return NULL;
+
+ if (pnlet->pn_type != TOK_RB || pnlet->pn_count != 2) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_ERROR,
+ JSMSG_BAD_FOR_LEFTSIDE);
+ return NULL;
+ }
+
+ /* Destructuring requires [key, value] enumeration. */
+ if (pn2->pn_op != JSOP_FOREACH)
+ pn2->pn_op = JSOP_FOREACHKEYVAL;
+ break;
+#endif
+
+ case TOK_NAME:
+ atom = CURRENT_TOKEN(ts).t_atom;
+ if (!data.binder(cx, &data, atom, tc))
+ return NULL;
+
+ /*
+ * Create a name node with op JSOP_NAME. We can't set
+ * op to JSOP_GETLOCAL here, because we don't yet know
+ * the block's depth in the operand stack frame. The
+ * code generator computes that, and it tries to bind
+ * all names to slots, so we must let it do the deed.
+ */
+ pnlet = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pnlet)
+ return NULL;
+ pnlet->pn_op = JSOP_NAME;
+ pnlet->pn_atom = atom;
+ pnlet->pn_expr = NULL;
+ pnlet->pn_slot = -1;
+ pnlet->pn_attrs = 0;
+ break;
+
+ default:
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS|JSREPORT_ERROR,
+ JSMSG_NO_VARIABLE_NAME);
+ return NULL;
+ }
+
+ MUST_MATCH_TOKEN(TOK_IN, JSMSG_IN_AFTER_FOR_NAME);
+ pn3 = NewBinary(cx, TOK_IN, JSOP_NOP, pnlet,
+ Expr(cx, ts, tc), tc);
+ if (!pn3)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_FOR_CTRL);
+ pn2->pn_left = pn3;
+ *pnp = pn2;
+ pnp = &pn2->pn_right;
+ } while (js_MatchToken(cx, ts, TOK_FOR));
+
+ if (js_MatchToken(cx, ts, TOK_IF)) {
+ pn2 = NewParseNode(cx, ts, PN_TERNARY, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_kid1 = Condition(cx, ts, tc);
+ if (!pn2->pn_kid1)
+ return NULL;
+ pn2->pn_kid2 = NULL;
+ pn2->pn_kid3 = NULL;
+ *pnp = pn2;
+ pnp = &pn2->pn_kid2;
+ }
+
+ pn2 = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn2)
+ return NULL;
+ pn2->pn_type = TOK_ARRAYPUSH;
+ pn2->pn_op = JSOP_ARRAYPUSH;
+ pn2->pn_kid = pnexp;
+ *pnp = pn2;
+ PN_APPEND(pn, pntop);
+
+ js_PopStatement(tc);
+ }
+#endif /* JS_HAS_GENERATORS */
+
+ MUST_MATCH_TOKEN(TOK_RB, JSMSG_BRACKET_AFTER_LIST);
+ }
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ return pn;
+ }
+
+#if JS_HAS_BLOCK_SCOPE
+ case TOK_LET:
+ pn = LetBlock(cx, ts, tc, JS_FALSE);
+ if (!pn)
+ return NULL;
+ break;
+#endif
+
+ case TOK_LC:
+ {
+ JSBool afterComma;
+
+ pn = NewParseNode(cx, ts, PN_LIST, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_type = TOK_RC;
+
+#if JS_HAS_SHARP_VARS
+ if (defsharp) {
+ PN_INIT_LIST_1(pn, defsharp);
+ defsharp = NULL;
+ } else
+#endif
+ PN_INIT_LIST(pn);
+
+ afterComma = JS_FALSE;
+ for (;;) {
+ ts->flags |= TSF_KEYWORD_IS_NAME;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_KEYWORD_IS_NAME;
+ switch (tt) {
+ case TOK_NUMBER:
+ pn3 = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (pn3)
+ pn3->pn_dval = CURRENT_TOKEN(ts).t_dval;
+ break;
+ case TOK_NAME:
+#if JS_HAS_GETTER_SETTER
+ {
+ JSAtom *atom;
+ JSRuntime *rt;
+
+ atom = CURRENT_TOKEN(ts).t_atom;
+ rt = cx->runtime;
+ if (atom == rt->atomState.getAtom ||
+ atom == rt->atomState.setAtom) {
+ op = (atom == rt->atomState.getAtom)
+ ? JSOP_GETTER
+ : JSOP_SETTER;
+ if (js_MatchToken(cx, ts, TOK_NAME)) {
+ pn3 = NewParseNode(cx, ts, PN_NAME, tc);
+ if (!pn3)
+ return NULL;
+ pn3->pn_atom = CURRENT_TOKEN(ts).t_atom;
+ pn3->pn_expr = NULL;
+ pn3->pn_slot = -1;
+ pn3->pn_attrs = 0;
+
+ /* We have to fake a 'function' token here. */
+ CURRENT_TOKEN(ts).t_op = JSOP_NOP;
+ CURRENT_TOKEN(ts).type = TOK_FUNCTION;
+ pn2 = FunctionExpr(cx, ts, tc);
+ pn2 = NewBinary(cx, TOK_COLON, op, pn3, pn2, tc);
+ goto skip;
+ }
+ }
+ /* else fall thru ... */
+ }
+#endif
+ case TOK_STRING:
+ pn3 = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (pn3)
+ pn3->pn_atom = CURRENT_TOKEN(ts).t_atom;
+ break;
+ case TOK_RC:
+ if (afterComma &&
+ !js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_TRAILING_COMMA)) {
+ return NULL;
+ }
+ goto end_obj_init;
+ default:
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_PROP_ID);
+ return NULL;
+ }
+
+ tt = js_GetToken(cx, ts);
+#if JS_HAS_GETTER_SETTER
+ if (tt == TOK_NAME) {
+ tt = CheckGetterOrSetter(cx, ts, TOK_COLON);
+ if (tt == TOK_ERROR)
+ return NULL;
+ }
+#endif
+ if (tt != TOK_COLON) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_COLON_AFTER_ID);
+ return NULL;
+ }
+ op = CURRENT_TOKEN(ts).t_op;
+ pn2 = NewBinary(cx, TOK_COLON, op, pn3, AssignExpr(cx, ts, tc), tc);
+#if JS_HAS_GETTER_SETTER
+ skip:
+#endif
+ if (!pn2)
+ return NULL;
+ PN_APPEND(pn, pn2);
+
+ tt = js_GetToken(cx, ts);
+ if (tt == TOK_RC)
+ goto end_obj_init;
+ if (tt != TOK_COMMA) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_CURLY_AFTER_LIST);
+ return NULL;
+ }
+ afterComma = JS_TRUE;
+ }
+ end_obj_init:
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ return pn;
+ }
+
+#if JS_HAS_SHARP_VARS
+ case TOK_DEFSHARP:
+ if (defsharp)
+ goto badsharp;
+ defsharp = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!defsharp)
+ return NULL;
+ defsharp->pn_kid = NULL;
+ defsharp->pn_num = (jsint) CURRENT_TOKEN(ts).t_dval;
+ ts->flags |= TSF_OPERAND;
+ tt = js_GetToken(cx, ts);
+ ts->flags &= ~TSF_OPERAND;
+ goto again;
+
+ case TOK_USESHARP:
+ /* Check for forward/dangling references at runtime, to allow eval. */
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_num = (jsint) CURRENT_TOKEN(ts).t_dval;
+ notsharp = JS_TRUE;
+ break;
+#endif /* JS_HAS_SHARP_VARS */
+
+ case TOK_LP:
+ pn = NewParseNode(cx, ts, PN_UNARY, tc);
+ if (!pn)
+ return NULL;
+ pn2 = BracketedExpr(cx, ts, tc);
+ if (!pn2)
+ return NULL;
+
+ MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_IN_PAREN);
+ if (pn2->pn_type == TOK_RP ||
+ (js_CodeSpec[pn2->pn_op].prec >= js_CodeSpec[JSOP_GETPROP].prec &&
+ !afterDot)) {
+ /*
+ * Avoid redundant JSOP_GROUP opcodes, for efficiency and mainly
+ * to help the decompiler look ahead from a JSOP_ENDINIT to see a
+ * JSOP_GROUP followed by a POP or POPV. That sequence means the
+ * parentheses are mandatory, to disambiguate object initialisers
+ * as expression statements from block statements.
+ *
+ * Also drop pn if pn2 is a member or a primary expression of any
+ * kind. This is required to avoid generating a JSOP_GROUP that
+ * will null the |obj| interpreter register, causing |this| in any
+ * call of that member expression to bind to the global object.
+ */
+ pn->pn_kid = NULL;
+ RecycleTree(pn, tc);
+ pn = pn2;
+ } else {
+ pn->pn_type = TOK_RP;
+ pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end;
+ pn->pn_kid = pn2;
+ }
+ break;
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_STAR:
+ pn = QualifiedIdentifier(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ notsharp = JS_TRUE;
+ break;
+
+ case TOK_AT:
+ pn = AttributeIdentifier(cx, ts, tc);
+ if (!pn)
+ return NULL;
+ notsharp = JS_TRUE;
+ break;
+
+ case TOK_XMLSTAGO:
+ pn = XMLElementOrListRoot(cx, ts, tc, JS_TRUE);
+ if (!pn)
+ return NULL;
+ notsharp = JS_TRUE; /* XXXbe could be sharp? */
+ break;
+#endif /* JS_HAS_XML_SUPPORT */
+
+ case TOK_STRING:
+#if JS_HAS_SHARP_VARS
+ notsharp = JS_TRUE;
+ /* FALL THROUGH */
+#endif
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_XMLCDATA:
+ case TOK_XMLCOMMENT:
+ case TOK_XMLPI:
+#endif
+ case TOK_NAME:
+ case TOK_OBJECT:
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_atom = CURRENT_TOKEN(ts).t_atom;
+#if JS_HAS_XML_SUPPORT
+ if (tt == TOK_XMLPI)
+ pn->pn_atom2 = CURRENT_TOKEN(ts).t_atom2;
+ else
+#endif
+ pn->pn_op = CURRENT_TOKEN(ts).t_op;
+ if (tt == TOK_NAME) {
+ pn->pn_arity = PN_NAME;
+ pn->pn_expr = NULL;
+ pn->pn_slot = -1;
+ pn->pn_attrs = 0;
+
+#if JS_HAS_XML_SUPPORT
+ if (js_MatchToken(cx, ts, TOK_DBLCOLON)) {
+ if (afterDot) {
+ JSString *str;
+
+ /*
+ * Here PrimaryExpr is called after '.' or '..' and we
+ * just scanned .name:: or ..name:: . This is the only
+ * case where a keyword after '.' or '..' is not
+ * treated as a property name.
+ */
+ str = ATOM_TO_STRING(pn->pn_atom);
+ tt = js_CheckKeyword(JSSTRING_CHARS(str),
+ JSSTRING_LENGTH(str));
+ if (tt == TOK_FUNCTION) {
+ pn->pn_arity = PN_NULLARY;
+ pn->pn_type = TOK_FUNCTION;
+ } else if (tt != TOK_EOF) {
+ js_ReportCompileErrorNumber(
+ cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_KEYWORD_NOT_NS);
+ return NULL;
+ }
+ }
+ pn = QualifiedSuffix(cx, ts, pn, tc);
+ if (!pn)
+ return NULL;
+ break;
+ }
+#endif
+
+ /* Unqualified __parent__ and __proto__ uses require activations. */
+ if (pn->pn_atom == cx->runtime->atomState.parentAtom ||
+ pn->pn_atom == cx->runtime->atomState.protoAtom) {
+ tc->flags |= TCF_FUN_HEAVYWEIGHT;
+ } else {
+ JSAtomListElement *ale;
+ JSStackFrame *fp;
+ JSBool loopy;
+
+ /* Measure optimizable global variable uses. */
+ ATOM_LIST_SEARCH(ale, &tc->decls, pn->pn_atom);
+ if (ale &&
+ !(fp = cx->fp)->fun &&
+ fp->scopeChain == fp->varobj &&
+ js_IsGlobalReference(tc, pn->pn_atom, &loopy)) {
+ tc->globalUses++;
+ if (loopy)
+ tc->loopyGlobalUses++;
+ }
+ }
+ }
+ break;
+
+ case TOK_NUMBER:
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_dval = CURRENT_TOKEN(ts).t_dval;
+#if JS_HAS_SHARP_VARS
+ notsharp = JS_TRUE;
+#endif
+ break;
+
+ case TOK_PRIMARY:
+ pn = NewParseNode(cx, ts, PN_NULLARY, tc);
+ if (!pn)
+ return NULL;
+ pn->pn_op = CURRENT_TOKEN(ts).t_op;
+#if JS_HAS_SHARP_VARS
+ notsharp = JS_TRUE;
+#endif
+ break;
+
+#if !JS_HAS_EXPORT_IMPORT
+ case TOK_EXPORT:
+ case TOK_IMPORT:
+#endif
+ case TOK_ERROR:
+ /* The scanner or one of its subroutines reported the error. */
+ return NULL;
+
+ default:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SYNTAX_ERROR);
+ return NULL;
+ }
+
+#if JS_HAS_SHARP_VARS
+ if (defsharp) {
+ if (notsharp) {
+ badsharp:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_SHARP_VAR_DEF);
+ return NULL;
+ }
+ defsharp->pn_kid = pn;
+ return defsharp;
+ }
+#endif
+ return pn;
+}
+
+/*
+ * Fold from one constant type to another.
+ * XXX handles only strings and numbers for now
+ */
+static JSBool
+FoldType(JSContext *cx, JSParseNode *pn, JSTokenType type)
+{
+ if (pn->pn_type != type) {
+ switch (type) {
+ case TOK_NUMBER:
+ if (pn->pn_type == TOK_STRING) {
+ jsdouble d;
+ if (!js_ValueToNumber(cx, ATOM_KEY(pn->pn_atom), &d))
+ return JS_FALSE;
+ pn->pn_dval = d;
+ pn->pn_type = TOK_NUMBER;
+ pn->pn_op = JSOP_NUMBER;
+ }
+ break;
+
+ case TOK_STRING:
+ if (pn->pn_type == TOK_NUMBER) {
+ JSString *str = js_NumberToString(cx, pn->pn_dval);
+ if (!str)
+ return JS_FALSE;
+ pn->pn_atom = js_AtomizeString(cx, str, 0);
+ if (!pn->pn_atom)
+ return JS_FALSE;
+ pn->pn_type = TOK_STRING;
+ pn->pn_op = JSOP_STRING;
+ }
+ break;
+
+ default:;
+ }
+ }
+ return JS_TRUE;
+}
+
+/*
+ * Fold two numeric constants. Beware that pn1 and pn2 are recycled, unless
+ * one of them aliases pn, so you can't safely fetch pn2->pn_next, e.g., after
+ * a successful call to this function.
+ */
+static JSBool
+FoldBinaryNumeric(JSContext *cx, JSOp op, JSParseNode *pn1, JSParseNode *pn2,
+ JSParseNode *pn, JSTreeContext *tc)
+{
+ jsdouble d, d2;
+ int32 i, j;
+ uint32 u;
+
+ JS_ASSERT(pn1->pn_type == TOK_NUMBER && pn2->pn_type == TOK_NUMBER);
+ d = pn1->pn_dval;
+ d2 = pn2->pn_dval;
+ switch (op) {
+ case JSOP_LSH:
+ case JSOP_RSH:
+ if (!js_DoubleToECMAInt32(cx, d, &i))
+ return JS_FALSE;
+ if (!js_DoubleToECMAInt32(cx, d2, &j))
+ return JS_FALSE;
+ j &= 31;
+ d = (op == JSOP_LSH) ? i << j : i >> j;
+ break;
+
+ case JSOP_URSH:
+ if (!js_DoubleToECMAUint32(cx, d, &u))
+ return JS_FALSE;
+ if (!js_DoubleToECMAInt32(cx, d2, &j))
+ return JS_FALSE;
+ j &= 31;
+ d = u >> j;
+ break;
+
+ case JSOP_ADD:
+ d += d2;
+ break;
+
+ case JSOP_SUB:
+ d -= d2;
+ break;
+
+ case JSOP_MUL:
+ d *= d2;
+ break;
+
+ case JSOP_DIV:
+ if (d2 == 0) {
+#if defined(XP_WIN)
+ /* XXX MSVC miscompiles such that (NaN == 0) */
+ if (JSDOUBLE_IS_NaN(d2))
+ d = *cx->runtime->jsNaN;
+ else
+#endif
+ if (d == 0 || JSDOUBLE_IS_NaN(d))
+ d = *cx->runtime->jsNaN;
+ else if ((JSDOUBLE_HI32(d) ^ JSDOUBLE_HI32(d2)) >> 31)
+ d = *cx->runtime->jsNegativeInfinity;
+ else
+ d = *cx->runtime->jsPositiveInfinity;
+ } else {
+ d /= d2;
+ }
+ break;
+
+ case JSOP_MOD:
+ if (d2 == 0) {
+ d = *cx->runtime->jsNaN;
+ } else {
+#if defined(XP_WIN)
+ /* Workaround MS fmod bug where 42 % (1/0) => NaN, not 42. */
+ if (!(JSDOUBLE_IS_FINITE(d) && JSDOUBLE_IS_INFINITE(d2)))
+#endif
+ d = fmod(d, d2);
+ }
+ break;
+
+ default:;
+ }
+
+ /* Take care to allow pn1 or pn2 to alias pn. */
+ if (pn1 != pn)
+ RecycleTree(pn1, tc);
+ if (pn2 != pn)
+ RecycleTree(pn2, tc);
+ pn->pn_type = TOK_NUMBER;
+ pn->pn_op = JSOP_NUMBER;
+ pn->pn_arity = PN_NULLARY;
+ pn->pn_dval = d;
+ return JS_TRUE;
+}
+
+#if JS_HAS_XML_SUPPORT
+
+static JSBool
+FoldXMLConstants(JSContext *cx, JSParseNode *pn, JSTreeContext *tc)
+{
+ JSTokenType tt;
+ JSParseNode **pnp, *pn1, *pn2;
+ JSString *accum, *str;
+ uint32 i, j;
+
+ JS_ASSERT(pn->pn_arity == PN_LIST);
+ tt = pn->pn_type;
+ pnp = &pn->pn_head;
+ pn1 = *pnp;
+ accum = NULL;
+ if ((pn->pn_extra & PNX_CANTFOLD) == 0) {
+ if (tt == TOK_XMLETAGO)
+ accum = ATOM_TO_STRING(cx->runtime->atomState.etagoAtom);
+ else if (tt == TOK_XMLSTAGO || tt == TOK_XMLPTAGC)
+ accum = ATOM_TO_STRING(cx->runtime->atomState.stagoAtom);
+ }
+
+ for (pn2 = pn1, i = j = 0; pn2; pn2 = pn2->pn_next, i++) {
+ /* The parser already rejected end-tags with attributes. */
+ JS_ASSERT(tt != TOK_XMLETAGO || i == 0);
+ switch (pn2->pn_type) {
+ case TOK_XMLATTR:
+ if (!accum)
+ goto cantfold;
+ /* FALL THROUGH */
+ case TOK_XMLNAME:
+ case TOK_XMLSPACE:
+ case TOK_XMLTEXT:
+ case TOK_STRING:
+ if (pn2->pn_arity == PN_LIST)
+ goto cantfold;
+ str = ATOM_TO_STRING(pn2->pn_atom);
+ break;
+
+ case TOK_XMLCDATA:
+ str = js_MakeXMLCDATAString(cx, ATOM_TO_STRING(pn2->pn_atom));
+ if (!str)
+ return JS_FALSE;
+ break;
+
+ case TOK_XMLCOMMENT:
+ str = js_MakeXMLCommentString(cx, ATOM_TO_STRING(pn2->pn_atom));
+ if (!str)
+ return JS_FALSE;
+ break;
+
+ case TOK_XMLPI:
+ str = js_MakeXMLPIString(cx, ATOM_TO_STRING(pn2->pn_atom),
+ ATOM_TO_STRING(pn2->pn_atom2));
+ if (!str)
+ return JS_FALSE;
+ break;
+
+ cantfold:
+ default:
+ JS_ASSERT(*pnp == pn1);
+ if ((tt == TOK_XMLSTAGO || tt == TOK_XMLPTAGC) &&
+ (i & 1) ^ (j & 1)) {
+#ifdef DEBUG_brendanXXX
+ printf("1: %d, %d => %s\n",
+ i, j, accum ? JS_GetStringBytes(accum) : "NULL");
+#endif
+ } else if (accum && pn1 != pn2) {
+ while (pn1->pn_next != pn2) {
+ pn1 = RecycleTree(pn1, tc);
+ --pn->pn_count;
+ }
+ pn1->pn_type = TOK_XMLTEXT;
+ pn1->pn_op = JSOP_STRING;
+ pn1->pn_arity = PN_NULLARY;
+ pn1->pn_atom = js_AtomizeString(cx, accum, 0);
+ if (!pn1->pn_atom)
+ return JS_FALSE;
+ JS_ASSERT(pnp != &pn1->pn_next);
+ *pnp = pn1;
+ }
+ pnp = &pn2->pn_next;
+ pn1 = *pnp;
+ accum = NULL;
+ continue;
+ }
+
+ if (accum) {
+ str = ((tt == TOK_XMLSTAGO || tt == TOK_XMLPTAGC) && i != 0)
+ ? js_AddAttributePart(cx, i & 1, accum, str)
+ : js_ConcatStrings(cx, accum, str);
+ if (!str)
+ return JS_FALSE;
+#ifdef DEBUG_brendanXXX
+ printf("2: %d, %d => %s (%u)\n",
+ i, j, JS_GetStringBytes(str), JSSTRING_LENGTH(str));
+#endif
+ ++j;
+ }
+ accum = str;
+ }
+
+ if (accum) {
+ str = NULL;
+ if ((pn->pn_extra & PNX_CANTFOLD) == 0) {
+ if (tt == TOK_XMLPTAGC)
+ str = ATOM_TO_STRING(cx->runtime->atomState.ptagcAtom);
+ else if (tt == TOK_XMLSTAGO || tt == TOK_XMLETAGO)
+ str = ATOM_TO_STRING(cx->runtime->atomState.tagcAtom);
+ }
+ if (str) {
+ accum = js_ConcatStrings(cx, accum, str);
+ if (!accum)
+ return JS_FALSE;
+ }
+
+ JS_ASSERT(*pnp == pn1);
+ while (pn1->pn_next) {
+ pn1 = RecycleTree(pn1, tc);
+ --pn->pn_count;
+ }
+ pn1->pn_type = TOK_XMLTEXT;
+ pn1->pn_op = JSOP_STRING;
+ pn1->pn_arity = PN_NULLARY;
+ pn1->pn_atom = js_AtomizeString(cx, accum, 0);
+ if (!pn1->pn_atom)
+ return JS_FALSE;
+ JS_ASSERT(pnp != &pn1->pn_next);
+ *pnp = pn1;
+ }
+
+ if (pn1 && pn->pn_count == 1) {
+ /*
+ * Only one node under pn, and it has been folded: move pn1 onto pn
+ * unless pn is an XML root (in which case we need it to tell the code
+ * generator to emit a JSOP_TOXML or JSOP_TOXMLLIST op). If pn is an
+ * XML root *and* it's a point-tag, rewrite it to TOK_XMLELEM to avoid
+ * extra "<" and "/>" bracketing at runtime.
+ */
+ if (!(pn->pn_extra & PNX_XMLROOT)) {
+ PN_MOVE_NODE(pn, pn1);
+ } else if (tt == TOK_XMLPTAGC) {
+ pn->pn_type = TOK_XMLELEM;
+ pn->pn_op = JSOP_TOXML;
+ }
+ }
+ return JS_TRUE;
+}
+
+#endif /* JS_HAS_XML_SUPPORT */
+
+static JSBool
+StartsWith(JSParseNode *pn, JSTokenType tt)
+{
+#define TAIL_RECURSE(pn2) JS_BEGIN_MACRO pn = (pn2); goto recur; JS_END_MACRO
+
+recur:
+ if (pn->pn_type == tt)
+ return JS_TRUE;
+ switch (pn->pn_arity) {
+ case PN_FUNC:
+ return tt == TOK_FUNCTION;
+ case PN_LIST:
+ if (pn->pn_head)
+ TAIL_RECURSE(pn->pn_head);
+ break;
+ case PN_TERNARY:
+ if (pn->pn_kid1)
+ TAIL_RECURSE(pn->pn_kid1);
+ break;
+ case PN_BINARY:
+ if (pn->pn_left)
+ TAIL_RECURSE(pn->pn_left);
+ break;
+ case PN_UNARY:
+ /* A parenthesized expression starts with a left parenthesis. */
+ if (pn->pn_type == TOK_RP)
+ return tt == TOK_LP;
+ if (pn->pn_kid)
+ TAIL_RECURSE(pn->pn_kid);
+ break;
+ case PN_NAME:
+ if (pn->pn_type == TOK_DOT || pn->pn_type == TOK_DBLDOT)
+ TAIL_RECURSE(pn->pn_expr);
+ /* FALL THROUGH */
+ }
+ return JS_FALSE;
+#undef TAIL_RECURSE
+}
+
+JSBool
+js_FoldConstants(JSContext *cx, JSParseNode *pn, JSTreeContext *tc)
+{
+ JSParseNode *pn1 = NULL, *pn2 = NULL, *pn3 = NULL;
+ int stackDummy;
+
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_OVER_RECURSED);
+ return JS_FALSE;
+ }
+
+ switch (pn->pn_arity) {
+ case PN_FUNC:
+ {
+ uint16 oldflags = tc->flags;
+
+ tc->flags = (uint16) pn->pn_flags;
+ if (!js_FoldConstants(cx, pn->pn_body, tc))
+ return JS_FALSE;
+ tc->flags = oldflags;
+ break;
+ }
+
+ case PN_LIST:
+#if 0 /* JS_HAS_XML_SUPPORT */
+ switch (pn->pn_type) {
+ case TOK_XMLELEM:
+ case TOK_XMLLIST:
+ case TOK_XMLPTAGC:
+ /*
+ * Try to fold this XML parse tree once, from the top down, into
+ * a JSXML tree with just one object wrapping the tree root.
+ *
+ * Certain subtrees could be folded similarly, but we'd have to
+ * ensure that none used namespace prefixes declared elsewhere in
+ * its super-tree, and we would have to convert each XML object
+ * created at runtime for such sub-trees back into a string, and
+ * concatenate and re-parse anyway.
+ */
+ if ((pn->pn_extra & (PNX_XMLROOT | PNX_CANTFOLD)) == PNX_XMLROOT &&
+ !(tc->flags & TCF_HAS_DEFXMLNS)) {
+ JSObject *obj;
+ JSAtom *atom;
+
+ obj = js_ParseNodeToXMLObject(cx, pn);
+ if (!obj)
+ return JS_FALSE;
+ atom = js_AtomizeObject(cx, obj, 0);
+ if (!atom)
+ return JS_FALSE;
+ pn->pn_op = JSOP_XMLOBJECT;
+ pn->pn_arity = PN_NULLARY;
+ pn->pn_atom = atom;
+ return JS_TRUE;
+ }
+
+ /*
+ * Can't fold from parse node to XML tree -- try folding strings
+ * as much as possible, and folding XML sub-trees bottom up to
+ * minimize string concatenation and ToXML/ToXMLList operations
+ * at runtime.
+ */
+ break;
+
+ default:;
+ }
+#endif
+
+ /* Save the list head in pn1 for later use. */
+ for (pn1 = pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ if (!js_FoldConstants(cx, pn2, tc))
+ return JS_FALSE;
+ }
+ break;
+
+ case PN_TERNARY:
+ /* Any kid may be null (e.g. for (;;)). */
+ pn1 = pn->pn_kid1;
+ pn2 = pn->pn_kid2;
+ pn3 = pn->pn_kid3;
+ if (pn1 && !js_FoldConstants(cx, pn1, tc))
+ return JS_FALSE;
+ if (pn2 && !js_FoldConstants(cx, pn2, tc))
+ return JS_FALSE;
+ if (pn3 && !js_FoldConstants(cx, pn3, tc))
+ return JS_FALSE;
+ break;
+
+ case PN_BINARY:
+ /* First kid may be null (for default case in switch). */
+ pn1 = pn->pn_left;
+ pn2 = pn->pn_right;
+ if (pn1 && !js_FoldConstants(cx, pn1, tc))
+ return JS_FALSE;
+ if (!js_FoldConstants(cx, pn2, tc))
+ return JS_FALSE;
+ break;
+
+ case PN_UNARY:
+ /* Our kid may be null (e.g. return; vs. return e;). */
+ pn1 = pn->pn_kid;
+ if (pn1 && !js_FoldConstants(cx, pn1, tc))
+ return JS_FALSE;
+ break;
+
+ case PN_NAME:
+ /*
+ * Skip pn1 down along a chain of dotted member expressions to avoid
+ * excessive recursion. Our only goal here is to fold constants (if
+ * any) in the primary expression operand to the left of the first
+ * dot in the chain.
+ */
+ pn1 = pn->pn_expr;
+ while (pn1 && pn1->pn_arity == PN_NAME)
+ pn1 = pn1->pn_expr;
+ if (pn1 && !js_FoldConstants(cx, pn1, tc))
+ return JS_FALSE;
+ break;
+
+ case PN_NULLARY:
+ break;
+ }
+
+ switch (pn->pn_type) {
+ case TOK_IF:
+ if (ContainsStmt(pn2, TOK_VAR) || ContainsStmt(pn3, TOK_VAR))
+ break;
+ /* FALL THROUGH */
+
+ case TOK_HOOK:
+ /* Reduce 'if (C) T; else E' into T for true C, E for false. */
+ while (pn1->pn_type == TOK_RP)
+ pn1 = pn1->pn_kid;
+ switch (pn1->pn_type) {
+ case TOK_NUMBER:
+ if (pn1->pn_dval == 0)
+ pn2 = pn3;
+ break;
+ case TOK_STRING:
+ if (JSSTRING_LENGTH(ATOM_TO_STRING(pn1->pn_atom)) == 0)
+ pn2 = pn3;
+ break;
+ case TOK_PRIMARY:
+ if (pn1->pn_op == JSOP_TRUE)
+ break;
+ if (pn1->pn_op == JSOP_FALSE || pn1->pn_op == JSOP_NULL) {
+ pn2 = pn3;
+ break;
+ }
+ /* FALL THROUGH */
+ default:
+ /* Early return to dodge common code that copies pn2 to pn. */
+ return JS_TRUE;
+ }
+
+ if (pn2) {
+ /*
+ * pn2 is the then- or else-statement subtree to compile. Take
+ * care not to expose an object initialiser, which would be parsed
+ * as a block, to the Statement parser via eval(uneval(e)) where e
+ * is '1 ? {p:2, q:3}[i] : r;' or the like.
+ */
+ if (pn->pn_type == TOK_HOOK && StartsWith(pn2, TOK_RC)) {
+ pn->pn_type = TOK_RP;
+ pn->pn_arity = PN_UNARY;
+ pn->pn_kid = pn2;
+ } else {
+ PN_MOVE_NODE(pn, pn2);
+ }
+ }
+ if (!pn2 || (pn->pn_type == TOK_SEMI && !pn->pn_kid)) {
+ /*
+ * False condition and no else, or an empty then-statement was
+ * moved up over pn. Either way, make pn an empty block (not an
+ * empty statement, which does not decompile, even when labeled).
+ * NB: pn must be a TOK_IF as TOK_HOOK can never have a null kid
+ * or an empty statement for a child.
+ */
+ pn->pn_type = TOK_LC;
+ pn->pn_arity = PN_LIST;
+ PN_INIT_LIST(pn);
+ }
+ RecycleTree(pn2, tc);
+ if (pn3 && pn3 != pn2)
+ RecycleTree(pn3, tc);
+ break;
+
+ case TOK_ASSIGN:
+ /*
+ * Compound operators such as *= should be subject to folding, in case
+ * the left-hand side is constant, and so that the decompiler produces
+ * the same string that you get from decompiling a script or function
+ * compiled from that same string. As with +, += is special.
+ */
+ if (pn->pn_op == JSOP_NOP)
+ break;
+ if (pn->pn_op != JSOP_ADD)
+ goto do_binary_op;
+ /* FALL THROUGH */
+
+ case TOK_PLUS:
+ if (pn->pn_arity == PN_LIST) {
+ size_t length, length2;
+ jschar *chars;
+ JSString *str, *str2;
+
+ /*
+ * Any string literal term with all others number or string means
+ * this is a concatenation. If any term is not a string or number
+ * literal, we can't fold.
+ */
+ JS_ASSERT(pn->pn_count > 2);
+ if (pn->pn_extra & PNX_CANTFOLD)
+ return JS_TRUE;
+ if (pn->pn_extra != PNX_STRCAT)
+ goto do_binary_op;
+
+ /* Ok, we're concatenating: convert non-string constant operands. */
+ length = 0;
+ for (pn2 = pn1; pn2; pn2 = pn2->pn_next) {
+ if (!FoldType(cx, pn2, TOK_STRING))
+ return JS_FALSE;
+ /* XXX fold only if all operands convert to string */
+ if (pn2->pn_type != TOK_STRING)
+ return JS_TRUE;
+ length += ATOM_TO_STRING(pn2->pn_atom)->length;
+ }
+
+ /* Allocate a new buffer and string descriptor for the result. */
+ chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar));
+ if (!chars)
+ return JS_FALSE;
+ str = js_NewString(cx, chars, length, 0);
+ if (!str) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+
+ /* Fill the buffer, advancing chars and recycling kids as we go. */
+ for (pn2 = pn1; pn2; pn2 = RecycleTree(pn2, tc)) {
+ str2 = ATOM_TO_STRING(pn2->pn_atom);
+ length2 = str2->length;
+ js_strncpy(chars, str2->chars, length2);
+ chars += length2;
+ }
+ *chars = 0;
+
+ /* Atomize the result string and mutate pn to refer to it. */
+ pn->pn_atom = js_AtomizeString(cx, str, 0);
+ if (!pn->pn_atom)
+ return JS_FALSE;
+ pn->pn_type = TOK_STRING;
+ pn->pn_op = JSOP_STRING;
+ pn->pn_arity = PN_NULLARY;
+ break;
+ }
+
+ /* Handle a binary string concatenation. */
+ JS_ASSERT(pn->pn_arity == PN_BINARY);
+ if (pn1->pn_type == TOK_STRING || pn2->pn_type == TOK_STRING) {
+ JSString *left, *right, *str;
+
+ if (!FoldType(cx, (pn1->pn_type != TOK_STRING) ? pn1 : pn2,
+ TOK_STRING)) {
+ return JS_FALSE;
+ }
+ if (pn1->pn_type != TOK_STRING || pn2->pn_type != TOK_STRING)
+ return JS_TRUE;
+ left = ATOM_TO_STRING(pn1->pn_atom);
+ right = ATOM_TO_STRING(pn2->pn_atom);
+ str = js_ConcatStrings(cx, left, right);
+ if (!str)
+ return JS_FALSE;
+ pn->pn_atom = js_AtomizeString(cx, str, 0);
+ if (!pn->pn_atom)
+ return JS_FALSE;
+ pn->pn_type = TOK_STRING;
+ pn->pn_op = JSOP_STRING;
+ pn->pn_arity = PN_NULLARY;
+ RecycleTree(pn1, tc);
+ RecycleTree(pn2, tc);
+ break;
+ }
+
+ /* Can't concatenate string literals, let's try numbers. */
+ goto do_binary_op;
+
+ case TOK_STAR:
+ /* The * in 'import *;' parses as a nullary star node. */
+ if (pn->pn_arity == PN_NULLARY)
+ break;
+ /* FALL THROUGH */
+
+ case TOK_SHOP:
+ case TOK_MINUS:
+ case TOK_DIVOP:
+ do_binary_op:
+ if (pn->pn_arity == PN_LIST) {
+ JS_ASSERT(pn->pn_count > 2);
+ for (pn2 = pn1; pn2; pn2 = pn2->pn_next) {
+ if (!FoldType(cx, pn2, TOK_NUMBER))
+ return JS_FALSE;
+ }
+ for (pn2 = pn1; pn2; pn2 = pn2->pn_next) {
+ /* XXX fold only if all operands convert to number */
+ if (pn2->pn_type != TOK_NUMBER)
+ break;
+ }
+ if (!pn2) {
+ JSOp op = pn->pn_op;
+
+ pn2 = pn1->pn_next;
+ pn3 = pn2->pn_next;
+ if (!FoldBinaryNumeric(cx, op, pn1, pn2, pn, tc))
+ return JS_FALSE;
+ while ((pn2 = pn3) != NULL) {
+ pn3 = pn2->pn_next;
+ if (!FoldBinaryNumeric(cx, op, pn, pn2, pn, tc))
+ return JS_FALSE;
+ }
+ }
+ } else {
+ JS_ASSERT(pn->pn_arity == PN_BINARY);
+ if (!FoldType(cx, pn1, TOK_NUMBER) ||
+ !FoldType(cx, pn2, TOK_NUMBER)) {
+ return JS_FALSE;
+ }
+ if (pn1->pn_type == TOK_NUMBER && pn2->pn_type == TOK_NUMBER) {
+ if (!FoldBinaryNumeric(cx, pn->pn_op, pn1, pn2, pn, tc))
+ return JS_FALSE;
+ }
+ }
+ break;
+
+ case TOK_UNARYOP:
+ while (pn1->pn_type == TOK_RP)
+ pn1 = pn1->pn_kid;
+ if (pn1->pn_type == TOK_NUMBER) {
+ jsdouble d;
+ int32 i;
+
+ /* Operate on one numeric constant. */
+ d = pn1->pn_dval;
+ switch (pn->pn_op) {
+ case JSOP_BITNOT:
+ if (!js_DoubleToECMAInt32(cx, d, &i))
+ return JS_FALSE;
+ d = ~i;
+ break;
+
+ case JSOP_NEG:
+#ifdef HPUX
+ /*
+ * Negation of a zero doesn't produce a negative
+ * zero on HPUX. Perform the operation by bit
+ * twiddling.
+ */
+ JSDOUBLE_HI32(d) ^= JSDOUBLE_HI32_SIGNBIT;
+#else
+ d = -d;
+#endif
+ break;
+
+ case JSOP_POS:
+ break;
+
+ case JSOP_NOT:
+ pn->pn_type = TOK_PRIMARY;
+ pn->pn_op = (d == 0) ? JSOP_TRUE : JSOP_FALSE;
+ pn->pn_arity = PN_NULLARY;
+ /* FALL THROUGH */
+
+ default:
+ /* Return early to dodge the common TOK_NUMBER code. */
+ return JS_TRUE;
+ }
+ pn->pn_type = TOK_NUMBER;
+ pn->pn_op = JSOP_NUMBER;
+ pn->pn_arity = PN_NULLARY;
+ pn->pn_dval = d;
+ RecycleTree(pn1, tc);
+ }
+ break;
+
+#if JS_HAS_XML_SUPPORT
+ case TOK_XMLELEM:
+ case TOK_XMLLIST:
+ case TOK_XMLPTAGC:
+ case TOK_XMLSTAGO:
+ case TOK_XMLETAGO:
+ case TOK_XMLNAME:
+ if (pn->pn_arity == PN_LIST) {
+ JS_ASSERT(pn->pn_type == TOK_XMLLIST || pn->pn_count != 0);
+ if (!FoldXMLConstants(cx, pn, tc))
+ return JS_FALSE;
+ }
+ break;
+
+ case TOK_AT:
+ if (pn1->pn_type == TOK_XMLNAME) {
+ jsval v;
+ JSAtom *atom;
+
+ v = ATOM_KEY(pn1->pn_atom);
+ if (!js_ToAttributeName(cx, &v))
+ return JS_FALSE;
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(v));
+ atom = js_AtomizeObject(cx, JSVAL_TO_OBJECT(v), 0);
+ if (!atom)
+ return JS_FALSE;
+
+ pn->pn_type = TOK_XMLNAME;
+ pn->pn_op = JSOP_OBJECT;
+ pn->pn_arity = PN_NULLARY;
+ pn->pn_atom = atom;
+ RecycleTree(pn1, tc);
+ }
+ break;
+#endif /* JS_HAS_XML_SUPPORT */
+
+ default:;
+ }
+
+ return JS_TRUE;
+}
diff --git a/src/third_party/js-1.7/jsparse.h b/src/third_party/js-1.7/jsparse.h
new file mode 100644
index 00000000000..7c23927eb32
--- /dev/null
+++ b/src/third_party/js-1.7/jsparse.h
@@ -0,0 +1,438 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsparse_h___
+#define jsparse_h___
+/*
+ * JS parser definitions.
+ */
+#include "jsconfig.h"
+#include "jsprvtd.h"
+#include "jspubtd.h"
+#include "jsscan.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * Parsing builds a tree of nodes that directs code generation. This tree is
+ * not a concrete syntax tree in all respects (for example, || and && are left
+ * associative, but (A && B && C) translates into the right-associated tree
+ * <A && <B && C>> so that code generation can emit a left-associative branch
+ * around <B && C> when A is false). Nodes are labeled by token type, with a
+ * JSOp secondary label when needed:
+ *
+ * Label Variant Members
+ * ----- ------- -------
+ * <Definitions>
+ * TOK_FUNCTION func pn_funAtom: atom holding function object containing
+ * arg and var properties. We create the function
+ * object at parse (not emit) time to specialize arg
+ * and var bytecodes early.
+ * pn_body: TOK_LC node for function body statements
+ * pn_flags: TCF_FUN_* flags (see jsemit.h) collected
+ * while parsing the function's body
+ * pn_tryCount: of try statements in function
+ *
+ * <Statements>
+ * TOK_LC list pn_head: list of pn_count statements
+ * TOK_EXPORT list pn_head: list of pn_count TOK_NAMEs or one TOK_STAR
+ * (which is not a multiply node)
+ * TOK_IMPORT list pn_head: list of pn_count sub-trees of the form
+ * a.b.*, a[b].*, a.*, a.b, or a[b] -- but never a.
+ * Each member is expressed with TOK_DOT or TOK_LB.
+ * Each sub-tree's root node has a pn_op in the set
+ * JSOP_IMPORT{ALL,PROP,ELEM}
+ * TOK_IF ternary pn_kid1: cond, pn_kid2: then, pn_kid3: else or null
+ * TOK_SWITCH binary pn_left: discriminant
+ * pn_right: list of TOK_CASE nodes, with at most one
+ * TOK_DEFAULT node, or if there are let bindings
+ * in the top level of the switch body's cases, a
+ * TOK_LEXICALSCOPE node that contains the list of
+ * TOK_CASE nodes.
+ * TOK_CASE, binary pn_left: case expr or null if TOK_DEFAULT
+ * TOK_DEFAULT pn_right: TOK_LC node for this case's statements
+ * pn_val: constant value if lookup or table switch
+ * TOK_WHILE binary pn_left: cond, pn_right: body
+ * TOK_DO binary pn_left: body, pn_right: cond
+ * TOK_FOR binary pn_left: either
+ * for/in loop: a binary TOK_IN node with
+ * pn_left: TOK_VAR or TOK_NAME to left of 'in'
+ * if TOK_VAR, its pn_extra may have PNX_POPVAR
+ * and PNX_FORINVAR bits set
+ * pn_right: object expr to right of 'in'
+ * for(;;) loop: a ternary TOK_RESERVED node with
+ * pn_kid1: init expr before first ';'
+ * pn_kid2: cond expr before second ';'
+ * pn_kid3: update expr after second ';'
+ * any kid may be null
+ * pn_right: body
+ * TOK_THROW unary pn_op: JSOP_THROW, pn_kid: exception
+ * TOK_TRY ternary pn_kid1: try block
+ * pn_kid2: null or TOK_RESERVED list of
+ * TOK_LEXICALSCOPE nodes, each with pn_expr pointing
+ * to a TOK_CATCH node
+ * pn_kid3: null or finally block
+ * TOK_CATCH ternary pn_kid1: TOK_NAME, TOK_RB, or TOK_RC catch var node
+ * (TOK_RB or TOK_RC if destructuring)
+ * pn_kid2: null or the catch guard expression
+ * pn_kid3: catch block statements
+ * TOK_BREAK name pn_atom: label or null
+ * TOK_CONTINUE name pn_atom: label or null
+ * TOK_WITH binary pn_left: head expr, pn_right: body
+ * TOK_VAR list pn_head: list of pn_count TOK_NAME nodes
+ * each name node has
+ * pn_atom: variable name
+ * pn_expr: initializer or null
+ * TOK_RETURN unary pn_kid: return expr or null
+ * TOK_SEMI unary pn_kid: expr or null statement
+ * TOK_COLON name pn_atom: label, pn_expr: labeled statement
+ *
+ * <Expressions>
+ * All left-associated binary trees of the same type are optimized into lists
+ * to avoid recursion when processing expression chains.
+ * TOK_COMMA list pn_head: list of pn_count comma-separated exprs
+ * TOK_ASSIGN binary pn_left: lvalue, pn_right: rvalue
+ * pn_op: JSOP_ADD for +=, etc.
+ * TOK_HOOK ternary pn_kid1: cond, pn_kid2: then, pn_kid3: else
+ * TOK_OR binary pn_left: first in || chain, pn_right: rest of chain
+ * TOK_AND binary pn_left: first in && chain, pn_right: rest of chain
+ * TOK_BITOR binary pn_left: left-assoc | expr, pn_right: ^ expr
+ * TOK_BITXOR binary pn_left: left-assoc ^ expr, pn_right: & expr
+ * TOK_BITAND binary pn_left: left-assoc & expr, pn_right: EQ expr
+ * TOK_EQOP binary pn_left: left-assoc EQ expr, pn_right: REL expr
+ * pn_op: JSOP_EQ, JSOP_NE, JSOP_NEW_EQ, JSOP_NEW_NE
+ * TOK_RELOP binary pn_left: left-assoc REL expr, pn_right: SH expr
+ * pn_op: JSOP_LT, JSOP_LE, JSOP_GT, JSOP_GE
+ * TOK_SHOP binary pn_left: left-assoc SH expr, pn_right: ADD expr
+ * pn_op: JSOP_LSH, JSOP_RSH, JSOP_URSH
+ * TOK_PLUS, binary pn_left: left-assoc ADD expr, pn_right: MUL expr
+ * pn_extra: if a left-associated binary TOK_PLUS
+ * tree has been flattened into a list (see above
+ * under <Expressions>), pn_extra will contain
+ * PNX_STRCAT if at least one list element is a
+ * string literal (TOK_STRING); if such a list has
+ * any non-string, non-number term, pn_extra will
+ * contain PNX_CANTFOLD.
+ * pn_
+ * TOK_MINUS pn_op: JSOP_ADD, JSOP_SUB
+ * TOK_STAR, binary pn_left: left-assoc MUL expr, pn_right: UNARY expr
+ * TOK_DIVOP pn_op: JSOP_MUL, JSOP_DIV, JSOP_MOD
+ * TOK_UNARYOP unary pn_kid: UNARY expr, pn_op: JSOP_NEG, JSOP_POS,
+ * JSOP_NOT, JSOP_BITNOT, JSOP_TYPEOF, JSOP_VOID
+ * TOK_INC, unary pn_kid: MEMBER expr
+ * TOK_DEC
+ * TOK_NEW list pn_head: list of ctor, arg1, arg2, ... argN
+ * pn_count: 1 + N (where N is number of args)
+ * ctor is a MEMBER expr
+ * TOK_DELETE unary pn_kid: MEMBER expr
+ * TOK_DOT, name pn_expr: MEMBER expr to left of .
+ * TOK_DBLDOT pn_atom: name to right of .
+ * TOK_LB binary pn_left: MEMBER expr to left of [
+ * pn_right: expr between [ and ]
+ * TOK_LP list pn_head: list of call, arg1, arg2, ... argN
+ * pn_count: 1 + N (where N is number of args)
+ * call is a MEMBER expr naming a callable object
+ * TOK_RB list pn_head: list of pn_count array element exprs
+ * [,,] holes are represented by TOK_COMMA nodes
+ * #n=[...] produces TOK_DEFSHARP at head of list
+ * pn_extra: PN_ENDCOMMA if extra comma at end
+ * TOK_RC list pn_head: list of pn_count TOK_COLON nodes where
+ * each has pn_left: property id, pn_right: value
+ * #n={...} produces TOK_DEFSHARP at head of list
+ * TOK_DEFSHARP unary pn_num: jsint value of n in #n=
+ * pn_kid: null for #n=[...] and #n={...}, primary
+ * if #n=primary for function, paren, name, object
+ * literal expressions
+ * TOK_USESHARP nullary pn_num: jsint value of n in #n#
+ * TOK_RP unary pn_kid: parenthesized expression
+ * TOK_NAME, name pn_atom: name, string, or object atom
+ * TOK_STRING, pn_op: JSOP_NAME, JSOP_STRING, or JSOP_OBJECT, or
+ * JSOP_REGEXP
+ * TOK_OBJECT If JSOP_NAME, pn_op may be JSOP_*ARG or JSOP_*VAR
+ * with pn_slot >= 0 and pn_attrs telling const-ness
+ * TOK_NUMBER dval pn_dval: double value of numeric literal
+ * TOK_PRIMARY nullary pn_op: JSOp bytecode
+ *
+ * <E4X node descriptions>
+ * TOK_ANYNAME nullary pn_op: JSOP_ANYNAME
+ * pn_atom: cx->runtime->atomState.starAtom
+ * TOK_AT unary pn_op: JSOP_TOATTRNAME; pn_kid attribute id/expr
+ * TOK_DBLCOLON binary pn_op: JSOP_QNAME
+ * pn_left: TOK_ANYNAME or TOK_NAME node
+ * pn_right: TOK_STRING "*" node, or expr within []
+ * name pn_op: JSOP_QNAMECONST
+ * pn_expr: TOK_ANYNAME or TOK_NAME left operand
+ * pn_atom: name on right of ::
+ * TOK_XMLELEM list XML element node
+ * pn_head: start tag, content1, ... contentN, end tag
+ * pn_count: 2 + N where N is number of content nodes
+ * N may be > x.length() if {expr} embedded
+ * TOK_XMLLIST list XML list node
+ * pn_head: content1, ... contentN
+ * TOK_XMLSTAGO, list XML start, end, and point tag contents
+ * TOK_XMLETAGC, pn_head: tag name or {expr}, ... XML attrs ...
+ * TOK_XMLPTAGO
+ * TOK_XMLNAME nullary pn_atom: XML name, with no {expr} embedded
+ * TOK_XMLNAME list pn_head: tag name or {expr}, ... name or {expr}
+ * TOK_XMLATTR, nullary pn_atom: attribute value string; pn_op: JSOP_STRING
+ * TOK_XMLCDATA,
+ * TOK_XMLCOMMENT
+ * TOK_XMLPI nullary pn_atom: XML processing instruction target
+ * pn_atom2: XML PI content, or null if no content
+ * TOK_XMLTEXT nullary pn_atom: marked-up text, or null if empty string
+ * TOK_LC unary {expr} in XML tag or content; pn_kid is expr
+ *
+ * So an XML tag with no {expr} and three attributes is a list with the form:
+ *
+ * (tagname attrname1 attrvalue1 attrname2 attrvalue2 attrname2 attrvalue3)
+ *
+ * An XML tag with embedded expressions like so:
+ *
+ * <name1{expr1} name2{expr2}name3={expr3}>
+ *
+ * would have the form:
+ *
+ * ((name1 {expr1}) (name2 {expr2} name3) {expr3})
+ *
+ * where () bracket a list with elements separated by spaces, and {expr} is a
+ * TOK_LC unary node with expr as its kid.
+ *
+ * Thus, the attribute name/value pairs occupy successive odd and even list
+ * locations, where pn_head is the TOK_XMLNAME node at list location 0. The
+ * parser builds the same sort of structures for elements:
+ *
+ * <a x={x}>Hi there!<b y={y}>How are you?</b><answer>{x + y}</answer></a>
+ *
+ * translates to:
+ *
+ * ((a x {x}) 'Hi there!' ((b y {y}) 'How are you?') ((answer) {x + y}))
+ *
+ * <Non-E4X node descriptions, continued>
+ *
+ * Label Variant Members
+ * ----- ------- -------
+ * TOK_LEXICALSCOPE name pn_op: JSOP_LEAVEBLOCK or JSOP_LEAVEBLOCKEXPR
+ * pn_atom: block object
+ * pn_expr: block body
+ * TOK_ARRAYCOMP list pn_head: list of pn_count (1 or 2) elements
+ * if pn_count is 2, first element is #n=[...]
+ * last element is block enclosing for loop(s)
+ * and optionally if-guarded TOK_ARRAYPUSH
+ * pn_extra: stack slot, used during code gen
+ * TOK_ARRAYPUSH unary pn_op: JSOP_ARRAYCOMP
+ * pn_kid: array comprehension expression
+ */
+typedef enum JSParseNodeArity {
+ PN_FUNC = -3,
+ PN_LIST = -2,
+ PN_TERNARY = 3,
+ PN_BINARY = 2,
+ PN_UNARY = 1,
+ PN_NAME = -1,
+ PN_NULLARY = 0
+} JSParseNodeArity;
+
+struct JSParseNode {
+ uint16 pn_type;
+ uint8 pn_op;
+ int8 pn_arity;
+ JSTokenPos pn_pos;
+ ptrdiff_t pn_offset; /* first generated bytecode offset */
+ union {
+ struct { /* TOK_FUNCTION node */
+ JSAtom *funAtom; /* atomized function object */
+ JSParseNode *body; /* TOK_LC list of statements */
+ uint32 flags; /* accumulated tree context flags */
+ uint32 tryCount; /* count of try statements in body */
+ } func;
+ struct { /* list of next-linked nodes */
+ JSParseNode *head; /* first node in list */
+ JSParseNode **tail; /* ptr to ptr to last node in list */
+ uint32 count; /* number of nodes in list */
+ uint32 extra; /* extra flags, see below */
+ } list;
+ struct { /* ternary: if, for(;;), ?: */
+ JSParseNode *kid1; /* condition, discriminant, etc. */
+ JSParseNode *kid2; /* then-part, case list, etc. */
+ JSParseNode *kid3; /* else-part, default case, etc. */
+ } ternary;
+ struct { /* two kids if binary */
+ JSParseNode *left;
+ JSParseNode *right;
+ jsval val; /* switch case value */
+ } binary;
+ struct { /* one kid if unary */
+ JSParseNode *kid;
+ jsint num; /* -1 or sharp variable number */
+ } unary;
+ struct { /* name, labeled statement, etc. */
+ JSAtom *atom; /* name or label atom, null if slot */
+ JSParseNode *expr; /* object or initializer */
+ jsint slot; /* -1 or arg or local var slot */
+ uintN attrs; /* attributes if local var or const */
+ } name;
+ struct {
+ JSAtom *atom; /* first atom in pair */
+ JSAtom *atom2; /* second atom in pair or null */
+ } apair;
+ jsdouble dval; /* aligned numeric literal value */
+ } pn_u;
+ JSParseNode *pn_next; /* to align dval and pn_u on RISCs */
+ JSTokenStream *pn_ts; /* token stream for error reports */
+ JSAtom *pn_source; /* saved source for decompilation */
+};
+
+#define pn_funAtom pn_u.func.funAtom
+#define pn_body pn_u.func.body
+#define pn_flags pn_u.func.flags
+#define pn_tryCount pn_u.func.tryCount
+#define pn_head pn_u.list.head
+#define pn_tail pn_u.list.tail
+#define pn_count pn_u.list.count
+#define pn_extra pn_u.list.extra
+#define pn_kid1 pn_u.ternary.kid1
+#define pn_kid2 pn_u.ternary.kid2
+#define pn_kid3 pn_u.ternary.kid3
+#define pn_left pn_u.binary.left
+#define pn_right pn_u.binary.right
+#define pn_val pn_u.binary.val
+#define pn_kid pn_u.unary.kid
+#define pn_num pn_u.unary.num
+#define pn_atom pn_u.name.atom
+#define pn_expr pn_u.name.expr
+#define pn_slot pn_u.name.slot
+#define pn_attrs pn_u.name.attrs
+#define pn_dval pn_u.dval
+#define pn_atom2 pn_u.apair.atom2
+
+/* PN_LIST pn_extra flags. */
+#define PNX_STRCAT 0x01 /* TOK_PLUS list has string term */
+#define PNX_CANTFOLD 0x02 /* TOK_PLUS list has unfoldable term */
+#define PNX_POPVAR 0x04 /* TOK_VAR last result needs popping */
+#define PNX_FORINVAR 0x08 /* TOK_VAR is left kid of TOK_IN node,
+ which is left kid of TOK_FOR */
+#define PNX_ENDCOMMA 0x10 /* array literal has comma at end */
+#define PNX_XMLROOT 0x20 /* top-most node in XML literal tree */
+#define PNX_GROUPINIT 0x40 /* var [a, b] = [c, d]; unit list */
+#define PNX_NEEDBRACES 0x80 /* braces necessary due to closure */
+
+/*
+ * Move pn2 into pn, preserving pn->pn_pos and pn->pn_offset and handing off
+ * any kids in pn2->pn_u, by clearing pn2.
+ */
+#define PN_MOVE_NODE(pn, pn2) \
+ JS_BEGIN_MACRO \
+ (pn)->pn_type = (pn2)->pn_type; \
+ (pn)->pn_op = (pn2)->pn_op; \
+ (pn)->pn_arity = (pn2)->pn_arity; \
+ (pn)->pn_u = (pn2)->pn_u; \
+ PN_CLEAR_NODE(pn2); \
+ JS_END_MACRO
+
+#define PN_CLEAR_NODE(pn) \
+ JS_BEGIN_MACRO \
+ (pn)->pn_type = TOK_EOF; \
+ (pn)->pn_op = JSOP_NOP; \
+ (pn)->pn_arity = PN_NULLARY; \
+ JS_END_MACRO
+
+/* True if pn is a parsenode representing a literal constant. */
+#define PN_IS_CONSTANT(pn) \
+ ((pn)->pn_type == TOK_NUMBER || \
+ (pn)->pn_type == TOK_STRING || \
+ ((pn)->pn_type == TOK_PRIMARY && (pn)->pn_op != JSOP_THIS))
+
+/*
+ * Compute a pointer to the last JSParseNode element in a singly-linked list.
+ * NB: list must be non-empty for correct PN_LAST usage!
+ */
+#define PN_LAST(list) \
+ ((JSParseNode *)((char *)(list)->pn_tail - offsetof(JSParseNode, pn_next)))
+
+#define PN_INIT_LIST(list) \
+ JS_BEGIN_MACRO \
+ (list)->pn_head = NULL; \
+ (list)->pn_tail = &(list)->pn_head; \
+ (list)->pn_count = (list)->pn_extra = 0; \
+ JS_END_MACRO
+
+#define PN_INIT_LIST_1(list, pn) \
+ JS_BEGIN_MACRO \
+ (list)->pn_head = (pn); \
+ (list)->pn_tail = &(pn)->pn_next; \
+ (list)->pn_count = 1; \
+ (list)->pn_extra = 0; \
+ JS_END_MACRO
+
+#define PN_APPEND(list, pn) \
+ JS_BEGIN_MACRO \
+ *(list)->pn_tail = (pn); \
+ (list)->pn_tail = &(pn)->pn_next; \
+ (list)->pn_count++; \
+ JS_END_MACRO
+
+/*
+ * Parse a top-level JS script.
+ *
+ * The caller must prevent the GC from running while this function is active,
+ * because atoms and function newborns are not rooted yet.
+ */
+extern JS_FRIEND_API(JSParseNode *)
+js_ParseTokenStream(JSContext *cx, JSObject *chain, JSTokenStream *ts);
+
+extern JS_FRIEND_API(JSBool)
+js_CompileTokenStream(JSContext *cx, JSObject *chain, JSTokenStream *ts,
+ JSCodeGenerator *cg);
+
+extern JSBool
+js_CompileFunctionBody(JSContext *cx, JSTokenStream *ts, JSFunction *fun);
+
+extern JSBool
+js_FoldConstants(JSContext *cx, JSParseNode *pn, JSTreeContext *tc);
+
+#if JS_HAS_XML_SUPPORT
+JS_FRIEND_API(JSParseNode *)
+js_ParseXMLTokenStream(JSContext *cx, JSObject *chain, JSTokenStream *ts,
+ JSBool allowList);
+#endif
+
+JS_END_EXTERN_C
+
+#endif /* jsparse_h___ */
diff --git a/src/third_party/js-1.7/jsprf.c b/src/third_party/js-1.7/jsprf.c
new file mode 100644
index 00000000000..416c16c82c7
--- /dev/null
+++ b/src/third_party/js-1.7/jsprf.c
@@ -0,0 +1,1264 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+** Portable safe sprintf code.
+**
+** Author: Kipp E.B. Hickman
+*/
+#include "jsstddef.h"
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include "jsprf.h"
+#include "jslong.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jspubtd.h"
+#include "jsstr.h"
+
+/*
+** Note: on some platforms va_list is defined as an array,
+** and requires array notation.
+*/
+#ifdef HAVE_VA_COPY
+#define VARARGS_ASSIGN(foo, bar) VA_COPY(foo,bar)
+#elif defined(HAVE_VA_LIST_AS_ARRAY)
+#define VARARGS_ASSIGN(foo, bar) foo[0] = bar[0]
+#else
+#define VARARGS_ASSIGN(foo, bar) (foo) = (bar)
+#endif
+
+/*
+** WARNING: This code may *NOT* call JS_LOG (because JS_LOG calls it)
+*/
+
+/*
+** XXX This needs to be internationalized!
+*/
+
+typedef struct SprintfStateStr SprintfState;
+
+struct SprintfStateStr {
+ int (*stuff)(SprintfState *ss, const char *sp, JSUint32 len);
+
+ char *base;
+ char *cur;
+ JSUint32 maxlen;
+
+ int (*func)(void *arg, const char *sp, JSUint32 len);
+ void *arg;
+};
+
+/*
+** Numbered Arguement State
+*/
+struct NumArgState{
+ int type; /* type of the current ap */
+ va_list ap; /* point to the corresponding position on ap */
+};
+
+#define NAS_DEFAULT_NUM 20 /* default number of NumberedArgumentState array */
+
+
+#define TYPE_INT16 0
+#define TYPE_UINT16 1
+#define TYPE_INTN 2
+#define TYPE_UINTN 3
+#define TYPE_INT32 4
+#define TYPE_UINT32 5
+#define TYPE_INT64 6
+#define TYPE_UINT64 7
+#define TYPE_STRING 8
+#define TYPE_DOUBLE 9
+#define TYPE_INTSTR 10
+#define TYPE_WSTRING 11
+#define TYPE_UNKNOWN 20
+
+#define FLAG_LEFT 0x1
+#define FLAG_SIGNED 0x2
+#define FLAG_SPACED 0x4
+#define FLAG_ZEROS 0x8
+#define FLAG_NEG 0x10
+
+/*
+** Fill into the buffer using the data in src
+*/
+static int fill2(SprintfState *ss, const char *src, int srclen, int width,
+ int flags)
+{
+ char space = ' ';
+ int rv;
+
+ width -= srclen;
+ if ((width > 0) && ((flags & FLAG_LEFT) == 0)) { /* Right adjusting */
+ if (flags & FLAG_ZEROS) {
+ space = '0';
+ }
+ while (--width >= 0) {
+ rv = (*ss->stuff)(ss, &space, 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ }
+
+ /* Copy out the source data */
+ rv = (*ss->stuff)(ss, src, (JSUint32)srclen);
+ if (rv < 0) {
+ return rv;
+ }
+
+ if ((width > 0) && ((flags & FLAG_LEFT) != 0)) { /* Left adjusting */
+ while (--width >= 0) {
+ rv = (*ss->stuff)(ss, &space, 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ }
+ return 0;
+}
+
+/*
+** Fill a number. The order is: optional-sign zero-filling conversion-digits
+*/
+static int fill_n(SprintfState *ss, const char *src, int srclen, int width,
+ int prec, int type, int flags)
+{
+ int zerowidth = 0;
+ int precwidth = 0;
+ int signwidth = 0;
+ int leftspaces = 0;
+ int rightspaces = 0;
+ int cvtwidth;
+ int rv;
+ char sign;
+
+ if ((type & 1) == 0) {
+ if (flags & FLAG_NEG) {
+ sign = '-';
+ signwidth = 1;
+ } else if (flags & FLAG_SIGNED) {
+ sign = '+';
+ signwidth = 1;
+ } else if (flags & FLAG_SPACED) {
+ sign = ' ';
+ signwidth = 1;
+ }
+ }
+ cvtwidth = signwidth + srclen;
+
+ if (prec > 0) {
+ if (prec > srclen) {
+ precwidth = prec - srclen; /* Need zero filling */
+ cvtwidth += precwidth;
+ }
+ }
+
+ if ((flags & FLAG_ZEROS) && (prec < 0)) {
+ if (width > cvtwidth) {
+ zerowidth = width - cvtwidth; /* Zero filling */
+ cvtwidth += zerowidth;
+ }
+ }
+
+ if (flags & FLAG_LEFT) {
+ if (width > cvtwidth) {
+ /* Space filling on the right (i.e. left adjusting) */
+ rightspaces = width - cvtwidth;
+ }
+ } else {
+ if (width > cvtwidth) {
+ /* Space filling on the left (i.e. right adjusting) */
+ leftspaces = width - cvtwidth;
+ }
+ }
+ while (--leftspaces >= 0) {
+ rv = (*ss->stuff)(ss, " ", 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ if (signwidth) {
+ rv = (*ss->stuff)(ss, &sign, 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ while (--precwidth >= 0) {
+ rv = (*ss->stuff)(ss, "0", 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ while (--zerowidth >= 0) {
+ rv = (*ss->stuff)(ss, "0", 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ rv = (*ss->stuff)(ss, src, (JSUint32)srclen);
+ if (rv < 0) {
+ return rv;
+ }
+ while (--rightspaces >= 0) {
+ rv = (*ss->stuff)(ss, " ", 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ return 0;
+}
+
+/*
+** Convert a long into its printable form
+*/
+static int cvt_l(SprintfState *ss, long num, int width, int prec, int radix,
+ int type, int flags, const char *hexp)
+{
+ char cvtbuf[100];
+ char *cvt;
+ int digits;
+
+ /* according to the man page this needs to happen */
+ if ((prec == 0) && (num == 0)) {
+ return 0;
+ }
+
+ /*
+ ** Converting decimal is a little tricky. In the unsigned case we
+ ** need to stop when we hit 10 digits. In the signed case, we can
+ ** stop when the number is zero.
+ */
+ cvt = cvtbuf + sizeof(cvtbuf);
+ digits = 0;
+ while (num) {
+ int digit = (((unsigned long)num) % radix) & 0xF;
+ *--cvt = hexp[digit];
+ digits++;
+ num = (long)(((unsigned long)num) / radix);
+ }
+ if (digits == 0) {
+ *--cvt = '0';
+ digits++;
+ }
+
+ /*
+ ** Now that we have the number converted without its sign, deal with
+ ** the sign and zero padding.
+ */
+ return fill_n(ss, cvt, digits, width, prec, type, flags);
+}
+
+/*
+** Convert a 64-bit integer into its printable form
+*/
+static int cvt_ll(SprintfState *ss, JSInt64 num, int width, int prec, int radix,
+ int type, int flags, const char *hexp)
+{
+ char cvtbuf[100];
+ char *cvt;
+ int digits;
+ JSInt64 rad;
+
+ /* according to the man page this needs to happen */
+ if ((prec == 0) && (JSLL_IS_ZERO(num))) {
+ return 0;
+ }
+
+ /*
+ ** Converting decimal is a little tricky. In the unsigned case we
+ ** need to stop when we hit 10 digits. In the signed case, we can
+ ** stop when the number is zero.
+ */
+ JSLL_I2L(rad, radix);
+ cvt = cvtbuf + sizeof(cvtbuf);
+ digits = 0;
+ while (!JSLL_IS_ZERO(num)) {
+ JSInt32 digit;
+ JSInt64 quot, rem;
+ JSLL_UDIVMOD(&quot, &rem, num, rad);
+ JSLL_L2I(digit, rem);
+ *--cvt = hexp[digit & 0xf];
+ digits++;
+ num = quot;
+ }
+ if (digits == 0) {
+ *--cvt = '0';
+ digits++;
+ }
+
+ /*
+ ** Now that we have the number converted without its sign, deal with
+ ** the sign and zero padding.
+ */
+ return fill_n(ss, cvt, digits, width, prec, type, flags);
+}
+
+/*
+** Convert a double precision floating point number into its printable
+** form.
+**
+** XXX stop using sprintf to convert floating point
+*/
+static int cvt_f(SprintfState *ss, double d, const char *fmt0, const char *fmt1)
+{
+ char fin[20];
+ char fout[300];
+ int amount = fmt1 - fmt0;
+
+ JS_ASSERT((amount > 0) && (amount < (int)sizeof(fin)));
+ if (amount >= (int)sizeof(fin)) {
+ /* Totally bogus % command to sprintf. Just ignore it */
+ return 0;
+ }
+ memcpy(fin, fmt0, (size_t)amount);
+ fin[amount] = 0;
+
+ /* Convert floating point using the native sprintf code */
+#ifdef DEBUG
+ {
+ const char *p = fin;
+ while (*p) {
+ JS_ASSERT(*p != 'L');
+ p++;
+ }
+ }
+#endif
+ sprintf(fout, fin, d);
+
+ /*
+ ** This assert will catch overflow's of fout, when building with
+ ** debugging on. At least this way we can track down the evil piece
+ ** of calling code and fix it!
+ */
+ JS_ASSERT(strlen(fout) < sizeof(fout));
+
+ return (*ss->stuff)(ss, fout, strlen(fout));
+}
+
+/*
+** Convert a string into its printable form. "width" is the output
+** width. "prec" is the maximum number of characters of "s" to output,
+** where -1 means until NUL.
+*/
+static int cvt_s(SprintfState *ss, const char *s, int width, int prec,
+ int flags)
+{
+ int slen;
+
+ if (prec == 0)
+ return 0;
+
+ /* Limit string length by precision value */
+ slen = s ? strlen(s) : 6;
+ if (prec > 0) {
+ if (prec < slen) {
+ slen = prec;
+ }
+ }
+
+ /* and away we go */
+ return fill2(ss, s ? s : "(null)", slen, width, flags);
+}
+
+static int cvt_ws(SprintfState *ss, const jschar *ws, int width, int prec,
+ int flags)
+{
+ int result;
+ /*
+ * Supply NULL as the JSContext; errors are not reported,
+ * and malloc() is used to allocate the buffer buffer.
+ */
+ if (ws) {
+ int slen = js_strlen(ws);
+ char *s = js_DeflateString(NULL, ws, slen);
+ if (!s)
+ return -1; /* JSStuffFunc error indicator. */
+ result = cvt_s(ss, s, width, prec, flags);
+ free(s);
+ } else {
+ result = cvt_s(ss, NULL, width, prec, flags);
+ }
+ return result;
+}
+
+/*
+** BuildArgArray stands for Numbered Argument list Sprintf
+** for example,
+** fmp = "%4$i, %2$d, %3s, %1d";
+** the number must start from 1, and no gap among them
+*/
+
+static struct NumArgState* BuildArgArray( const char *fmt, va_list ap, int* rv, struct NumArgState* nasArray )
+{
+ int number = 0, cn = 0, i;
+ const char *p;
+ char c;
+ struct NumArgState *nas;
+
+
+ /*
+ ** first pass:
+ ** detemine how many legal % I have got, then allocate space
+ */
+
+ p = fmt;
+ *rv = 0;
+ i = 0;
+ while( ( c = *p++ ) != 0 ){
+ if( c != '%' )
+ continue;
+ if( ( c = *p++ ) == '%' ) /* skip %% case */
+ continue;
+
+ while( c != 0 ){
+ if( c > '9' || c < '0' ){
+ if( c == '$' ){ /* numbered argument csae */
+ if( i > 0 ){
+ *rv = -1;
+ return NULL;
+ }
+ number++;
+ } else { /* non-numbered argument case */
+ if( number > 0 ){
+ *rv = -1;
+ return NULL;
+ }
+ i = 1;
+ }
+ break;
+ }
+
+ c = *p++;
+ }
+ }
+
+ if( number == 0 ){
+ return NULL;
+ }
+
+
+ if( number > NAS_DEFAULT_NUM ){
+ nas = (struct NumArgState*)malloc( number * sizeof( struct NumArgState ) );
+ if( !nas ){
+ *rv = -1;
+ return NULL;
+ }
+ } else {
+ nas = nasArray;
+ }
+
+ for( i = 0; i < number; i++ ){
+ nas[i].type = TYPE_UNKNOWN;
+ }
+
+
+ /*
+ ** second pass:
+ ** set nas[].type
+ */
+
+ p = fmt;
+ while( ( c = *p++ ) != 0 ){
+ if( c != '%' ) continue;
+ c = *p++;
+ if( c == '%' ) continue;
+
+ cn = 0;
+ while( c && c != '$' ){ /* should improve error check later */
+ cn = cn*10 + c - '0';
+ c = *p++;
+ }
+
+ if( !c || cn < 1 || cn > number ){
+ *rv = -1;
+ break;
+ }
+
+ /* nas[cn] starts from 0, and make sure nas[cn].type is not assigned */
+ cn--;
+ if( nas[cn].type != TYPE_UNKNOWN )
+ continue;
+
+ c = *p++;
+
+ /* width */
+ if (c == '*') {
+ /* not supported feature, for the argument is not numbered */
+ *rv = -1;
+ break;
+ }
+
+ while ((c >= '0') && (c <= '9')) {
+ c = *p++;
+ }
+
+ /* precision */
+ if (c == '.') {
+ c = *p++;
+ if (c == '*') {
+ /* not supported feature, for the argument is not numbered */
+ *rv = -1;
+ break;
+ }
+
+ while ((c >= '0') && (c <= '9')) {
+ c = *p++;
+ }
+ }
+
+ /* size */
+ nas[cn].type = TYPE_INTN;
+ if (c == 'h') {
+ nas[cn].type = TYPE_INT16;
+ c = *p++;
+ } else if (c == 'L') {
+ /* XXX not quite sure here */
+ nas[cn].type = TYPE_INT64;
+ c = *p++;
+ } else if (c == 'l') {
+ nas[cn].type = TYPE_INT32;
+ c = *p++;
+ if (c == 'l') {
+ nas[cn].type = TYPE_INT64;
+ c = *p++;
+ }
+ }
+
+ /* format */
+ switch (c) {
+ case 'd':
+ case 'c':
+ case 'i':
+ case 'o':
+ case 'u':
+ case 'x':
+ case 'X':
+ break;
+
+ case 'e':
+ case 'f':
+ case 'g':
+ nas[ cn ].type = TYPE_DOUBLE;
+ break;
+
+ case 'p':
+ /* XXX should use cpp */
+ if (sizeof(void *) == sizeof(JSInt32)) {
+ nas[ cn ].type = TYPE_UINT32;
+ } else if (sizeof(void *) == sizeof(JSInt64)) {
+ nas[ cn ].type = TYPE_UINT64;
+ } else if (sizeof(void *) == sizeof(JSIntn)) {
+ nas[ cn ].type = TYPE_UINTN;
+ } else {
+ nas[ cn ].type = TYPE_UNKNOWN;
+ }
+ break;
+
+ case 'C':
+ case 'S':
+ case 'E':
+ case 'G':
+ /* XXX not supported I suppose */
+ JS_ASSERT(0);
+ nas[ cn ].type = TYPE_UNKNOWN;
+ break;
+
+ case 's':
+ nas[ cn ].type = (nas[ cn ].type == TYPE_UINT16) ? TYPE_WSTRING : TYPE_STRING;
+ break;
+
+ case 'n':
+ nas[ cn ].type = TYPE_INTSTR;
+ break;
+
+ default:
+ JS_ASSERT(0);
+ nas[ cn ].type = TYPE_UNKNOWN;
+ break;
+ }
+
+ /* get a legal para. */
+ if( nas[ cn ].type == TYPE_UNKNOWN ){
+ *rv = -1;
+ break;
+ }
+ }
+
+
+ /*
+ ** third pass
+ ** fill the nas[cn].ap
+ */
+
+ if( *rv < 0 ){
+ if( nas != nasArray )
+ free( nas );
+ return NULL;
+ }
+
+ cn = 0;
+ while( cn < number ){
+ if( nas[cn].type == TYPE_UNKNOWN ){
+ cn++;
+ continue;
+ }
+
+ VARARGS_ASSIGN(nas[cn].ap, ap);
+
+ switch( nas[cn].type ){
+ case TYPE_INT16:
+ case TYPE_UINT16:
+ case TYPE_INTN:
+ case TYPE_UINTN: (void)va_arg( ap, JSIntn ); break;
+
+ case TYPE_INT32: (void)va_arg( ap, JSInt32 ); break;
+
+ case TYPE_UINT32: (void)va_arg( ap, JSUint32 ); break;
+
+ case TYPE_INT64: (void)va_arg( ap, JSInt64 ); break;
+
+ case TYPE_UINT64: (void)va_arg( ap, JSUint64 ); break;
+
+ case TYPE_STRING: (void)va_arg( ap, char* ); break;
+
+ case TYPE_WSTRING: (void)va_arg( ap, jschar* ); break;
+
+ case TYPE_INTSTR: (void)va_arg( ap, JSIntn* ); break;
+
+ case TYPE_DOUBLE: (void)va_arg( ap, double ); break;
+
+ default:
+ if( nas != nasArray )
+ free( nas );
+ *rv = -1;
+ return NULL;
+ }
+
+ cn++;
+ }
+
+
+ return nas;
+}
+
+/*
+** The workhorse sprintf code.
+*/
+static int dosprintf(SprintfState *ss, const char *fmt, va_list ap)
+{
+ char c;
+ int flags, width, prec, radix, type;
+ union {
+ char ch;
+ jschar wch;
+ int i;
+ long l;
+ JSInt64 ll;
+ double d;
+ const char *s;
+ const jschar* ws;
+ int *ip;
+ } u;
+ const char *fmt0;
+ static char *hex = "0123456789abcdef";
+ static char *HEX = "0123456789ABCDEF";
+ char *hexp;
+ int rv, i;
+ struct NumArgState *nas = NULL;
+ struct NumArgState nasArray[ NAS_DEFAULT_NUM ];
+ char pattern[20];
+ const char *dolPt = NULL; /* in "%4$.2f", dolPt will poiont to . */
+#ifdef JS_C_STRINGS_ARE_UTF8
+ char utf8buf[6];
+ int utf8len;
+#endif
+
+ /*
+ ** build an argument array, IF the fmt is numbered argument
+ ** list style, to contain the Numbered Argument list pointers
+ */
+
+ nas = BuildArgArray( fmt, ap, &rv, nasArray );
+ if( rv < 0 ){
+ /* the fmt contains error Numbered Argument format, jliu@netscape.com */
+ JS_ASSERT(0);
+ return rv;
+ }
+
+ while ((c = *fmt++) != 0) {
+ if (c != '%') {
+ rv = (*ss->stuff)(ss, fmt - 1, 1);
+ if (rv < 0) {
+ return rv;
+ }
+ continue;
+ }
+ fmt0 = fmt - 1;
+
+ /*
+ ** Gobble up the % format string. Hopefully we have handled all
+ ** of the strange cases!
+ */
+ flags = 0;
+ c = *fmt++;
+ if (c == '%') {
+ /* quoting a % with %% */
+ rv = (*ss->stuff)(ss, fmt - 1, 1);
+ if (rv < 0) {
+ return rv;
+ }
+ continue;
+ }
+
+ if( nas != NULL ){
+ /* the fmt contains the Numbered Arguments feature */
+ i = 0;
+ while( c && c != '$' ){ /* should imporve error check later */
+ i = ( i * 10 ) + ( c - '0' );
+ c = *fmt++;
+ }
+
+ if( nas[i-1].type == TYPE_UNKNOWN ){
+ if( nas && ( nas != nasArray ) )
+ free( nas );
+ return -1;
+ }
+
+ ap = nas[i-1].ap;
+ dolPt = fmt;
+ c = *fmt++;
+ }
+
+ /*
+ * Examine optional flags. Note that we do not implement the
+ * '#' flag of sprintf(). The ANSI C spec. of the '#' flag is
+ * somewhat ambiguous and not ideal, which is perhaps why
+ * the various sprintf() implementations are inconsistent
+ * on this feature.
+ */
+ while ((c == '-') || (c == '+') || (c == ' ') || (c == '0')) {
+ if (c == '-') flags |= FLAG_LEFT;
+ if (c == '+') flags |= FLAG_SIGNED;
+ if (c == ' ') flags |= FLAG_SPACED;
+ if (c == '0') flags |= FLAG_ZEROS;
+ c = *fmt++;
+ }
+ if (flags & FLAG_SIGNED) flags &= ~FLAG_SPACED;
+ if (flags & FLAG_LEFT) flags &= ~FLAG_ZEROS;
+
+ /* width */
+ if (c == '*') {
+ c = *fmt++;
+ width = va_arg(ap, int);
+ } else {
+ width = 0;
+ while ((c >= '0') && (c <= '9')) {
+ width = (width * 10) + (c - '0');
+ c = *fmt++;
+ }
+ }
+
+ /* precision */
+ prec = -1;
+ if (c == '.') {
+ c = *fmt++;
+ if (c == '*') {
+ c = *fmt++;
+ prec = va_arg(ap, int);
+ } else {
+ prec = 0;
+ while ((c >= '0') && (c <= '9')) {
+ prec = (prec * 10) + (c - '0');
+ c = *fmt++;
+ }
+ }
+ }
+
+ /* size */
+ type = TYPE_INTN;
+ if (c == 'h') {
+ type = TYPE_INT16;
+ c = *fmt++;
+ } else if (c == 'L') {
+ /* XXX not quite sure here */
+ type = TYPE_INT64;
+ c = *fmt++;
+ } else if (c == 'l') {
+ type = TYPE_INT32;
+ c = *fmt++;
+ if (c == 'l') {
+ type = TYPE_INT64;
+ c = *fmt++;
+ }
+ }
+
+ /* format */
+ hexp = hex;
+ switch (c) {
+ case 'd': case 'i': /* decimal/integer */
+ radix = 10;
+ goto fetch_and_convert;
+
+ case 'o': /* octal */
+ radix = 8;
+ type |= 1;
+ goto fetch_and_convert;
+
+ case 'u': /* unsigned decimal */
+ radix = 10;
+ type |= 1;
+ goto fetch_and_convert;
+
+ case 'x': /* unsigned hex */
+ radix = 16;
+ type |= 1;
+ goto fetch_and_convert;
+
+ case 'X': /* unsigned HEX */
+ radix = 16;
+ hexp = HEX;
+ type |= 1;
+ goto fetch_and_convert;
+
+ fetch_and_convert:
+ switch (type) {
+ case TYPE_INT16:
+ u.l = va_arg(ap, int);
+ if (u.l < 0) {
+ u.l = -u.l;
+ flags |= FLAG_NEG;
+ }
+ goto do_long;
+ case TYPE_UINT16:
+ u.l = va_arg(ap, int) & 0xffff;
+ goto do_long;
+ case TYPE_INTN:
+ u.l = va_arg(ap, int);
+ if (u.l < 0) {
+ u.l = -u.l;
+ flags |= FLAG_NEG;
+ }
+ goto do_long;
+ case TYPE_UINTN:
+ u.l = (long)va_arg(ap, unsigned int);
+ goto do_long;
+
+ case TYPE_INT32:
+ u.l = va_arg(ap, JSInt32);
+ if (u.l < 0) {
+ u.l = -u.l;
+ flags |= FLAG_NEG;
+ }
+ goto do_long;
+ case TYPE_UINT32:
+ u.l = (long)va_arg(ap, JSUint32);
+ do_long:
+ rv = cvt_l(ss, u.l, width, prec, radix, type, flags, hexp);
+ if (rv < 0) {
+ return rv;
+ }
+ break;
+
+ case TYPE_INT64:
+ u.ll = va_arg(ap, JSInt64);
+ if (!JSLL_GE_ZERO(u.ll)) {
+ JSLL_NEG(u.ll, u.ll);
+ flags |= FLAG_NEG;
+ }
+ goto do_longlong;
+ case TYPE_UINT64:
+ u.ll = va_arg(ap, JSUint64);
+ do_longlong:
+ rv = cvt_ll(ss, u.ll, width, prec, radix, type, flags, hexp);
+ if (rv < 0) {
+ return rv;
+ }
+ break;
+ }
+ break;
+
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'g':
+ u.d = va_arg(ap, double);
+ if( nas != NULL ){
+ i = fmt - dolPt;
+ if( i < (int)sizeof( pattern ) ){
+ pattern[0] = '%';
+ memcpy( &pattern[1], dolPt, (size_t)i );
+ rv = cvt_f(ss, u.d, pattern, &pattern[i+1] );
+ }
+ } else
+ rv = cvt_f(ss, u.d, fmt0, fmt);
+
+ if (rv < 0) {
+ return rv;
+ }
+ break;
+
+ case 'c':
+ if ((flags & FLAG_LEFT) == 0) {
+ while (width-- > 1) {
+ rv = (*ss->stuff)(ss, " ", 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ }
+ switch (type) {
+ case TYPE_INT16:
+ /* Treat %hc as %c if JS_C_STRINGS_ARE_UTF8 is undefined. */
+#ifdef JS_C_STRINGS_ARE_UTF8
+ u.wch = va_arg(ap, int);
+ utf8len = js_OneUcs4ToUtf8Char (utf8buf, u.wch);
+ rv = (*ss->stuff)(ss, utf8buf, utf8len);
+ break;
+#endif
+ case TYPE_INTN:
+ u.ch = va_arg(ap, int);
+ rv = (*ss->stuff)(ss, &u.ch, 1);
+ break;
+ }
+ if (rv < 0) {
+ return rv;
+ }
+ if (flags & FLAG_LEFT) {
+ while (width-- > 1) {
+ rv = (*ss->stuff)(ss, " ", 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ }
+ break;
+
+ case 'p':
+ if (sizeof(void *) == sizeof(JSInt32)) {
+ type = TYPE_UINT32;
+ } else if (sizeof(void *) == sizeof(JSInt64)) {
+ type = TYPE_UINT64;
+ } else if (sizeof(void *) == sizeof(int)) {
+ type = TYPE_UINTN;
+ } else {
+ JS_ASSERT(0);
+ break;
+ }
+ radix = 16;
+ goto fetch_and_convert;
+
+#if 0
+ case 'C':
+ case 'S':
+ case 'E':
+ case 'G':
+ /* XXX not supported I suppose */
+ JS_ASSERT(0);
+ break;
+#endif
+
+ case 's':
+ if(type == TYPE_INT16) {
+ /*
+ * This would do a simple string/byte conversion
+ * if JS_C_STRINGS_ARE_UTF8 is not defined.
+ */
+ u.ws = va_arg(ap, const jschar*);
+ rv = cvt_ws(ss, u.ws, width, prec, flags);
+ } else {
+ u.s = va_arg(ap, const char*);
+ rv = cvt_s(ss, u.s, width, prec, flags);
+ }
+ if (rv < 0) {
+ return rv;
+ }
+ break;
+
+ case 'n':
+ u.ip = va_arg(ap, int*);
+ if (u.ip) {
+ *u.ip = ss->cur - ss->base;
+ }
+ break;
+
+ default:
+ /* Not a % token after all... skip it */
+#if 0
+ JS_ASSERT(0);
+#endif
+ rv = (*ss->stuff)(ss, "%", 1);
+ if (rv < 0) {
+ return rv;
+ }
+ rv = (*ss->stuff)(ss, fmt - 1, 1);
+ if (rv < 0) {
+ return rv;
+ }
+ }
+ }
+
+ /* Stuff trailing NUL */
+ rv = (*ss->stuff)(ss, "\0", 1);
+
+ if( nas && ( nas != nasArray ) ){
+ free( nas );
+ }
+
+ return rv;
+}
+
+/************************************************************************/
+
+static int FuncStuff(SprintfState *ss, const char *sp, JSUint32 len)
+{
+ int rv;
+
+ rv = (*ss->func)(ss->arg, sp, len);
+ if (rv < 0) {
+ return rv;
+ }
+ ss->maxlen += len;
+ return 0;
+}
+
+JS_PUBLIC_API(JSUint32) JS_sxprintf(JSStuffFunc func, void *arg,
+ const char *fmt, ...)
+{
+ va_list ap;
+ int rv;
+
+ va_start(ap, fmt);
+ rv = JS_vsxprintf(func, arg, fmt, ap);
+ va_end(ap);
+ return rv;
+}
+
+JS_PUBLIC_API(JSUint32) JS_vsxprintf(JSStuffFunc func, void *arg,
+ const char *fmt, va_list ap)
+{
+ SprintfState ss;
+ int rv;
+
+ ss.stuff = FuncStuff;
+ ss.func = func;
+ ss.arg = arg;
+ ss.maxlen = 0;
+ rv = dosprintf(&ss, fmt, ap);
+ return (rv < 0) ? (JSUint32)-1 : ss.maxlen;
+}
+
+/*
+** Stuff routine that automatically grows the malloc'd output buffer
+** before it overflows.
+*/
+static int GrowStuff(SprintfState *ss, const char *sp, JSUint32 len)
+{
+ ptrdiff_t off;
+ char *newbase;
+ JSUint32 newlen;
+
+ off = ss->cur - ss->base;
+ if (off + len >= ss->maxlen) {
+ /* Grow the buffer */
+ newlen = ss->maxlen + ((len > 32) ? len : 32);
+ if (ss->base) {
+ newbase = (char*) realloc(ss->base, newlen);
+ } else {
+ newbase = (char*) malloc(newlen);
+ }
+ if (!newbase) {
+ /* Ran out of memory */
+ return -1;
+ }
+ ss->base = newbase;
+ ss->maxlen = newlen;
+ ss->cur = ss->base + off;
+ }
+
+ /* Copy data */
+ while (len) {
+ --len;
+ *ss->cur++ = *sp++;
+ }
+ JS_ASSERT((JSUint32)(ss->cur - ss->base) <= ss->maxlen);
+ return 0;
+}
+
+/*
+** sprintf into a malloc'd buffer
+*/
+JS_PUBLIC_API(char *) JS_smprintf(const char *fmt, ...)
+{
+ va_list ap;
+ char *rv;
+
+ va_start(ap, fmt);
+ rv = JS_vsmprintf(fmt, ap);
+ va_end(ap);
+ return rv;
+}
+
+/*
+** Free memory allocated, for the caller, by JS_smprintf
+*/
+JS_PUBLIC_API(void) JS_smprintf_free(char *mem)
+{
+ free(mem);
+}
+
+JS_PUBLIC_API(char *) JS_vsmprintf(const char *fmt, va_list ap)
+{
+ SprintfState ss;
+ int rv;
+
+ ss.stuff = GrowStuff;
+ ss.base = 0;
+ ss.cur = 0;
+ ss.maxlen = 0;
+ rv = dosprintf(&ss, fmt, ap);
+ if (rv < 0) {
+ if (ss.base) {
+ free(ss.base);
+ }
+ return 0;
+ }
+ return ss.base;
+}
+
+/*
+** Stuff routine that discards overflow data
+*/
+static int LimitStuff(SprintfState *ss, const char *sp, JSUint32 len)
+{
+ JSUint32 limit = ss->maxlen - (ss->cur - ss->base);
+
+ if (len > limit) {
+ len = limit;
+ }
+ while (len) {
+ --len;
+ *ss->cur++ = *sp++;
+ }
+ return 0;
+}
+
+/*
+** sprintf into a fixed size buffer. Make sure there is a NUL at the end
+** when finished.
+*/
+JS_PUBLIC_API(JSUint32) JS_snprintf(char *out, JSUint32 outlen, const char *fmt, ...)
+{
+ va_list ap;
+ int rv;
+
+ JS_ASSERT((JSInt32)outlen > 0);
+ if ((JSInt32)outlen <= 0) {
+ return 0;
+ }
+
+ va_start(ap, fmt);
+ rv = JS_vsnprintf(out, outlen, fmt, ap);
+ va_end(ap);
+ return rv;
+}
+
+JS_PUBLIC_API(JSUint32) JS_vsnprintf(char *out, JSUint32 outlen,const char *fmt,
+ va_list ap)
+{
+ SprintfState ss;
+ JSUint32 n;
+
+ JS_ASSERT((JSInt32)outlen > 0);
+ if ((JSInt32)outlen <= 0) {
+ return 0;
+ }
+
+ ss.stuff = LimitStuff;
+ ss.base = out;
+ ss.cur = out;
+ ss.maxlen = outlen;
+ (void) dosprintf(&ss, fmt, ap);
+
+ /* If we added chars, and we didn't append a null, do it now. */
+ if( (ss.cur != ss.base) && (ss.cur[-1] != '\0') )
+ ss.cur[-1] = '\0';
+
+ n = ss.cur - ss.base;
+ return n ? n - 1 : n;
+}
+
+JS_PUBLIC_API(char *) JS_sprintf_append(char *last, const char *fmt, ...)
+{
+ va_list ap;
+ char *rv;
+
+ va_start(ap, fmt);
+ rv = JS_vsprintf_append(last, fmt, ap);
+ va_end(ap);
+ return rv;
+}
+
+JS_PUBLIC_API(char *) JS_vsprintf_append(char *last, const char *fmt, va_list ap)
+{
+ SprintfState ss;
+ int rv;
+
+ ss.stuff = GrowStuff;
+ if (last) {
+ int lastlen = strlen(last);
+ ss.base = last;
+ ss.cur = last + lastlen;
+ ss.maxlen = lastlen;
+ } else {
+ ss.base = 0;
+ ss.cur = 0;
+ ss.maxlen = 0;
+ }
+ rv = dosprintf(&ss, fmt, ap);
+ if (rv < 0) {
+ if (ss.base) {
+ free(ss.base);
+ }
+ return 0;
+ }
+ return ss.base;
+}
+
diff --git a/src/third_party/js-1.7/jsprf.h b/src/third_party/js-1.7/jsprf.h
new file mode 100644
index 00000000000..0eb910f279d
--- /dev/null
+++ b/src/third_party/js-1.7/jsprf.h
@@ -0,0 +1,150 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsprf_h___
+#define jsprf_h___
+
+/*
+** API for PR printf like routines. Supports the following formats
+** %d - decimal
+** %u - unsigned decimal
+** %x - unsigned hex
+** %X - unsigned uppercase hex
+** %o - unsigned octal
+** %hd, %hu, %hx, %hX, %ho - 16-bit versions of above
+** %ld, %lu, %lx, %lX, %lo - 32-bit versions of above
+** %lld, %llu, %llx, %llX, %llo - 64 bit versions of above
+** %s - string
+** %hs - 16-bit version of above (only available if compiled with JS_C_STRINGS_ARE_UTF8)
+** %c - character
+** %hc - 16-bit version of above (only available if compiled with JS_C_STRINGS_ARE_UTF8)
+** %p - pointer (deals with machine dependent pointer size)
+** %f - float
+** %g - float
+*/
+#include "jstypes.h"
+#include <stdio.h>
+#include <stdarg.h>
+
+JS_BEGIN_EXTERN_C
+
+/*
+** sprintf into a fixed size buffer. Guarantees that a NUL is at the end
+** of the buffer. Returns the length of the written output, NOT including
+** the NUL, or (JSUint32)-1 if an error occurs.
+*/
+extern JS_PUBLIC_API(JSUint32) JS_snprintf(char *out, JSUint32 outlen, const char *fmt, ...);
+
+/*
+** sprintf into a malloc'd buffer. Return a pointer to the malloc'd
+** buffer on success, NULL on failure. Call "JS_smprintf_free" to release
+** the memory returned.
+*/
+extern JS_PUBLIC_API(char*) JS_smprintf(const char *fmt, ...);
+
+/*
+** Free the memory allocated, for the caller, by JS_smprintf
+*/
+extern JS_PUBLIC_API(void) JS_smprintf_free(char *mem);
+
+/*
+** "append" sprintf into a malloc'd buffer. "last" is the last value of
+** the malloc'd buffer. sprintf will append data to the end of last,
+** growing it as necessary using realloc. If last is NULL, JS_sprintf_append
+** will allocate the initial string. The return value is the new value of
+** last for subsequent calls, or NULL if there is a malloc failure.
+*/
+extern JS_PUBLIC_API(char*) JS_sprintf_append(char *last, const char *fmt, ...);
+
+/*
+** sprintf into a function. The function "f" is called with a string to
+** place into the output. "arg" is an opaque pointer used by the stuff
+** function to hold any state needed to do the storage of the output
+** data. The return value is a count of the number of characters fed to
+** the stuff function, or (JSUint32)-1 if an error occurs.
+*/
+typedef JSIntn (*JSStuffFunc)(void *arg, const char *s, JSUint32 slen);
+
+extern JS_PUBLIC_API(JSUint32) JS_sxprintf(JSStuffFunc f, void *arg, const char *fmt, ...);
+
+/*
+** va_list forms of the above.
+*/
+extern JS_PUBLIC_API(JSUint32) JS_vsnprintf(char *out, JSUint32 outlen, const char *fmt, va_list ap);
+extern JS_PUBLIC_API(char*) JS_vsmprintf(const char *fmt, va_list ap);
+extern JS_PUBLIC_API(char*) JS_vsprintf_append(char *last, const char *fmt, va_list ap);
+extern JS_PUBLIC_API(JSUint32) JS_vsxprintf(JSStuffFunc f, void *arg, const char *fmt, va_list ap);
+
+/*
+***************************************************************************
+** FUNCTION: JS_sscanf
+** DESCRIPTION:
+** JS_sscanf() scans the input character string, performs data
+** conversions, and stores the converted values in the data objects
+** pointed to by its arguments according to the format control
+** string.
+**
+** JS_sscanf() behaves the same way as the sscanf() function in the
+** Standard C Library (stdio.h), with the following exceptions:
+** - JS_sscanf() handles the NSPR integer and floating point types,
+** such as JSInt16, JSInt32, JSInt64, and JSFloat64, whereas
+** sscanf() handles the standard C types like short, int, long,
+** and double.
+** - JS_sscanf() has no multibyte character support, while sscanf()
+** does.
+** INPUTS:
+** const char *buf
+** a character string holding the input to scan
+** const char *fmt
+** the format control string for the conversions
+** ...
+** variable number of arguments, each of them is a pointer to
+** a data object in which the converted value will be stored
+** OUTPUTS: none
+** RETURNS: JSInt32
+** The number of values converted and stored.
+** RESTRICTIONS:
+** Multibyte characters in 'buf' or 'fmt' are not allowed.
+***************************************************************************
+*/
+
+extern JS_PUBLIC_API(JSInt32) JS_sscanf(const char *buf, const char *fmt, ...);
+
+JS_END_EXTERN_C
+
+#endif /* jsprf_h___ */
diff --git a/src/third_party/js-1.7/jsproto.tbl b/src/third_party/js-1.7/jsproto.tbl
new file mode 100644
index 00000000000..18f2355945c
--- /dev/null
+++ b/src/third_party/js-1.7/jsproto.tbl
@@ -0,0 +1,116 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set sw=4 ts=8 et tw=80 ft=c:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is SpiderMonkey 1.7 work in progress, released
+ * February 14, 2006.
+ *
+ * The Initial Developer of the Original Code is
+ * Brendan Eich <brendan@mozilla.org>
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "jsconfig.h"
+
+#if JS_HAS_SCRIPT_OBJECT
+# define SCRIPT_INIT js_InitScriptClass
+#else
+# define SCRIPT_INIT js_InitNullClass
+#endif
+
+#if JS_HAS_XML_SUPPORT
+# define XML_INIT js_InitXMLClass
+# define NAMESPACE_INIT js_InitNamespaceClass
+# define QNAME_INIT js_InitQNameClass
+# define ANYNAME_INIT js_InitAnyNameClass
+# define ATTRIBUTE_INIT js_InitAttributeNameClass
+#else
+# define XML_INIT js_InitNullClass
+# define NAMESPACE_INIT js_InitNullClass
+# define QNAME_INIT js_InitNullClass
+# define ANYNAME_INIT js_InitNullClass
+# define ATTRIBUTE_INIT js_InitNullClass
+#endif
+
+#if JS_HAS_GENERATORS
+# define GENERATOR_INIT js_InitIteratorClasses
+#else
+# define GENERATOR_INIT js_InitNullClass
+#endif
+
+#if JS_HAS_FILE_OBJECT
+# define FILE_INIT js_InitFileClass
+#else
+# define FILE_INIT js_InitNullClass
+#endif
+
+/*
+ * Enumerator codes in the second column must not change -- they are part of
+ * the JS XDR API.
+ */
+JS_PROTO(Null, 0, js_InitNullClass)
+JS_PROTO(Object, 1, js_InitFunctionAndObjectClasses)
+JS_PROTO(Function, 2, js_InitFunctionAndObjectClasses)
+JS_PROTO(Array, 3, js_InitArrayClass)
+JS_PROTO(Boolean, 4, js_InitBooleanClass)
+JS_PROTO(Call, 5, js_InitCallClass)
+JS_PROTO(Date, 6, js_InitDateClass)
+JS_PROTO(Math, 7, js_InitMathClass)
+JS_PROTO(Number, 8, js_InitNumberClass)
+JS_PROTO(String, 9, js_InitStringClass)
+JS_PROTO(RegExp, 10, js_InitRegExpClass)
+JS_PROTO(Script, 11, SCRIPT_INIT)
+JS_PROTO(XML, 12, XML_INIT)
+JS_PROTO(Namespace, 13, NAMESPACE_INIT)
+JS_PROTO(QName, 14, QNAME_INIT)
+JS_PROTO(AnyName, 15, ANYNAME_INIT)
+JS_PROTO(AttributeName, 16, ATTRIBUTE_INIT)
+JS_PROTO(Error, 17, js_InitExceptionClasses)
+JS_PROTO(InternalError, 18, js_InitExceptionClasses)
+JS_PROTO(EvalError, 19, js_InitExceptionClasses)
+JS_PROTO(RangeError, 20, js_InitExceptionClasses)
+JS_PROTO(ReferenceError, 21, js_InitExceptionClasses)
+JS_PROTO(SyntaxError, 22, js_InitExceptionClasses)
+JS_PROTO(TypeError, 23, js_InitExceptionClasses)
+JS_PROTO(URIError, 24, js_InitExceptionClasses)
+JS_PROTO(Generator, 25, GENERATOR_INIT)
+JS_PROTO(Iterator, 26, js_InitIteratorClasses)
+JS_PROTO(StopIteration, 27, js_InitIteratorClasses)
+JS_PROTO(UnusedProto28, 28, js_InitNullClass)
+JS_PROTO(File, 29, FILE_INIT)
+JS_PROTO(Block, 30, js_InitBlockClass)
+
+#undef SCRIPT_INIT
+#undef XML_INIT
+#undef NAMESPACE_INIT
+#undef QNAME_INIT
+#undef ANYNAME_INIT
+#undef ATTRIBUTE_INIT
+#undef GENERATOR_INIT
+#undef FILE_INIT
diff --git a/src/third_party/js-1.7/jsprvtd.h b/src/third_party/js-1.7/jsprvtd.h
new file mode 100644
index 00000000000..f71b9a508b0
--- /dev/null
+++ b/src/third_party/js-1.7/jsprvtd.h
@@ -0,0 +1,202 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsprvtd_h___
+#define jsprvtd_h___
+/*
+ * JS private typename definitions.
+ *
+ * This header is included only in other .h files, for convenience and for
+ * simplicity of type naming. The alternative for structures is to use tags,
+ * which are named the same as their typedef names (legal in C/C++, and less
+ * noisy than suffixing the typedef name with "Struct" or "Str"). Instead,
+ * all .h files that include this file may use the same typedef name, whether
+ * declaring a pointer to struct type, or defining a member of struct type.
+ *
+ * A few fundamental scalar types are defined here too. Neither the scalar
+ * nor the struct typedefs should change much, therefore the nearly-global
+ * make dependency induced by this file should not prove painful.
+ */
+
+#include "jspubtd.h"
+
+/* Internal identifier (jsid) macros. */
+#define JSID_ATOM 0x0
+#define JSID_INT 0x1
+#define JSID_OBJECT 0x2
+#define JSID_TAGMASK 0x3
+#define JSID_TAG(id) ((id) & JSID_TAGMASK)
+#define JSID_SETTAG(id,t) ((id) | (t))
+#define JSID_CLRTAG(id) ((id) & ~(jsid)JSID_TAGMASK)
+
+#define JSID_IS_ATOM(id) (JSID_TAG(id) == JSID_ATOM)
+#define JSID_TO_ATOM(id) ((JSAtom *)(id))
+#define ATOM_TO_JSID(atom) ((jsid)(atom))
+#define ATOM_JSID_TO_JSVAL(id) ATOM_KEY(JSID_TO_ATOM(id))
+
+#define JSID_IS_INT(id) ((id) & JSID_INT)
+#define JSID_TO_INT(id) ((jsint)(id) >> 1)
+#define INT_TO_JSID(i) (((jsint)(i) << 1) | JSID_INT)
+#define INT_JSID_TO_JSVAL(id) (id)
+#define INT_JSVAL_TO_JSID(v) (v)
+
+#define JSID_IS_OBJECT(id) (JSID_TAG(id) == JSID_OBJECT)
+#define JSID_TO_OBJECT(id) ((JSObject *) JSID_CLRTAG(id))
+#define OBJECT_TO_JSID(obj) ((jsid)(obj) | JSID_OBJECT)
+#define OBJECT_JSID_TO_JSVAL(id) OBJECT_TO_JSVAL(JSID_CLRTAG(id))
+#define OBJECT_JSVAL_TO_JSID(v) OBJECT_TO_JSID(JSVAL_TO_OBJECT(v))
+
+/* Scalar typedefs. */
+typedef uint8 jsbytecode;
+typedef uint8 jssrcnote;
+typedef uint32 jsatomid;
+
+/* Struct typedefs. */
+typedef struct JSArgumentFormatMap JSArgumentFormatMap;
+typedef struct JSCodeGenerator JSCodeGenerator;
+typedef struct JSDependentString JSDependentString;
+typedef struct JSGCThing JSGCThing;
+typedef struct JSGenerator JSGenerator;
+typedef struct JSParseNode JSParseNode;
+typedef struct JSSharpObjectMap JSSharpObjectMap;
+typedef struct JSThread JSThread;
+typedef struct JSToken JSToken;
+typedef struct JSTokenPos JSTokenPos;
+typedef struct JSTokenPtr JSTokenPtr;
+typedef struct JSTokenStream JSTokenStream;
+typedef struct JSTreeContext JSTreeContext;
+typedef struct JSTryNote JSTryNote;
+
+/* Friend "Advanced API" typedefs. */
+typedef struct JSAtom JSAtom;
+typedef struct JSAtomList JSAtomList;
+typedef struct JSAtomListElement JSAtomListElement;
+typedef struct JSAtomMap JSAtomMap;
+typedef struct JSAtomState JSAtomState;
+typedef struct JSCodeSpec JSCodeSpec;
+typedef struct JSPrinter JSPrinter;
+typedef struct JSRegExp JSRegExp;
+typedef struct JSRegExpStatics JSRegExpStatics;
+typedef struct JSScope JSScope;
+typedef struct JSScopeOps JSScopeOps;
+typedef struct JSScopeProperty JSScopeProperty;
+typedef struct JSStackHeader JSStackHeader;
+typedef struct JSStringBuffer JSStringBuffer;
+typedef struct JSSubString JSSubString;
+typedef struct JSXML JSXML;
+typedef struct JSXMLNamespace JSXMLNamespace;
+typedef struct JSXMLQName JSXMLQName;
+typedef struct JSXMLArray JSXMLArray;
+typedef struct JSXMLArrayCursor JSXMLArrayCursor;
+
+/* "Friend" types used by jscntxt.h and jsdbgapi.h. */
+typedef enum JSTrapStatus {
+ JSTRAP_ERROR,
+ JSTRAP_CONTINUE,
+ JSTRAP_RETURN,
+ JSTRAP_THROW,
+ JSTRAP_LIMIT
+} JSTrapStatus;
+
+typedef JSTrapStatus
+(* JS_DLL_CALLBACK JSTrapHandler)(JSContext *cx, JSScript *script,
+ jsbytecode *pc, jsval *rval, void *closure);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSWatchPointHandler)(JSContext *cx, JSObject *obj, jsval id,
+ jsval old, jsval *newp, void *closure);
+
+/* called just after script creation */
+typedef void
+(* JS_DLL_CALLBACK JSNewScriptHook)(JSContext *cx,
+ const char *filename, /* URL of script */
+ uintN lineno, /* first line */
+ JSScript *script,
+ JSFunction *fun,
+ void *callerdata);
+
+/* called just before script destruction */
+typedef void
+(* JS_DLL_CALLBACK JSDestroyScriptHook)(JSContext *cx,
+ JSScript *script,
+ void *callerdata);
+
+typedef void
+(* JS_DLL_CALLBACK JSSourceHandler)(const char *filename, uintN lineno,
+ jschar *str, size_t length,
+ void **listenerTSData, void *closure);
+
+/*
+ * This hook captures high level script execution and function calls (JS or
+ * native). It is used by JS_SetExecuteHook to hook top level scripts and by
+ * JS_SetCallHook to hook function calls. It will get called twice per script
+ * or function call: just before execution begins and just after it finishes.
+ * In both cases the 'current' frame is that of the executing code.
+ *
+ * The 'before' param is JS_TRUE for the hook invocation before the execution
+ * and JS_FALSE for the invocation after the code has run.
+ *
+ * The 'ok' param is significant only on the post execution invocation to
+ * signify whether or not the code completed 'normally'.
+ *
+ * The 'closure' param is as passed to JS_SetExecuteHook or JS_SetCallHook
+ * for the 'before'invocation, but is whatever value is returned from that
+ * invocation for the 'after' invocation. Thus, the hook implementor *could*
+ * allocate a structure in the 'before' invocation and return a pointer to that
+ * structure. The pointer would then be handed to the hook for the 'after'
+ * invocation. Alternately, the 'before' could just return the same value as
+ * in 'closure' to cause the 'after' invocation to be called with the same
+ * 'closure' value as the 'before'.
+ *
+ * Returning NULL in the 'before' hook will cause the 'after' hook *not* to
+ * be called.
+ */
+typedef void *
+(* JS_DLL_CALLBACK JSInterpreterHook)(JSContext *cx, JSStackFrame *fp, JSBool before,
+ JSBool *ok, void *closure);
+
+typedef void
+(* JS_DLL_CALLBACK JSObjectHook)(JSContext *cx, JSObject *obj, JSBool isNew,
+ void *closure);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSDebugErrorHook)(JSContext *cx, const char *message,
+ JSErrorReport *report, void *closure);
+
+#endif /* jsprvtd_h___ */
diff --git a/src/third_party/js-1.7/jspubtd.h b/src/third_party/js-1.7/jspubtd.h
new file mode 100644
index 00000000000..4e8c92a61f0
--- /dev/null
+++ b/src/third_party/js-1.7/jspubtd.h
@@ -0,0 +1,667 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jspubtd_h___
+#define jspubtd_h___
+/*
+ * JS public API typedefs.
+ */
+#include "jstypes.h"
+#include "jscompat.h"
+
+JS_BEGIN_EXTERN_C
+
+/* Scalar typedefs. */
+typedef uint16 jschar;
+typedef int32 jsint;
+typedef uint32 jsuint;
+typedef float64 jsdouble;
+typedef jsword jsval;
+typedef jsword jsid;
+typedef int32 jsrefcount; /* PRInt32 if JS_THREADSAFE, see jslock.h */
+
+/*
+ * Run-time version enumeration. See jsconfig.h for compile-time counterparts
+ * to these values that may be selected by the JS_VERSION macro, and tested by
+ * #if expressions.
+ */
+typedef enum JSVersion {
+ JSVERSION_1_0 = 100,
+ JSVERSION_1_1 = 110,
+ JSVERSION_1_2 = 120,
+ JSVERSION_1_3 = 130,
+ JSVERSION_1_4 = 140,
+ JSVERSION_ECMA_3 = 148,
+ JSVERSION_1_5 = 150,
+ JSVERSION_1_6 = 160,
+ JSVERSION_1_7 = 170,
+ JSVERSION_DEFAULT = 0,
+ JSVERSION_UNKNOWN = -1
+} JSVersion;
+
+#define JSVERSION_IS_ECMA(version) \
+ ((version) == JSVERSION_DEFAULT || (version) >= JSVERSION_1_3)
+
+/* Result of typeof operator enumeration. */
+typedef enum JSType {
+ JSTYPE_VOID, /* undefined */
+ JSTYPE_OBJECT, /* object */
+ JSTYPE_FUNCTION, /* function */
+ JSTYPE_STRING, /* string */
+ JSTYPE_NUMBER, /* number */
+ JSTYPE_BOOLEAN, /* boolean */
+ JSTYPE_NULL, /* null */
+ JSTYPE_XML, /* xml object */
+ JSTYPE_LIMIT
+} JSType;
+
+/* Dense index into cached prototypes and class atoms for standard objects. */
+typedef enum JSProtoKey {
+#define JS_PROTO(name,code,init) JSProto_##name = code,
+#include "jsproto.tbl"
+#undef JS_PROTO
+ JSProto_LIMIT
+} JSProtoKey;
+
+/* JSObjectOps.checkAccess mode enumeration. */
+typedef enum JSAccessMode {
+ JSACC_PROTO = 0, /* XXXbe redundant w.r.t. id */
+ JSACC_PARENT = 1, /* XXXbe redundant w.r.t. id */
+ JSACC_IMPORT = 2, /* import foo.bar */
+ JSACC_WATCH = 3, /* a watchpoint on object foo for id 'bar' */
+ JSACC_READ = 4, /* a "get" of foo.bar */
+ JSACC_WRITE = 8, /* a "set" of foo.bar = baz */
+ JSACC_LIMIT
+} JSAccessMode;
+
+#define JSACC_TYPEMASK (JSACC_WRITE - 1)
+
+/*
+ * This enum type is used to control the behavior of a JSObject property
+ * iterator function that has type JSNewEnumerate.
+ */
+typedef enum JSIterateOp {
+ JSENUMERATE_INIT, /* Create new iterator state */
+ JSENUMERATE_NEXT, /* Iterate once */
+ JSENUMERATE_DESTROY /* Destroy iterator state */
+} JSIterateOp;
+
+/* Struct typedefs. */
+typedef struct JSClass JSClass;
+typedef struct JSExtendedClass JSExtendedClass;
+typedef struct JSConstDoubleSpec JSConstDoubleSpec;
+typedef struct JSContext JSContext;
+typedef struct JSErrorReport JSErrorReport;
+typedef struct JSFunction JSFunction;
+typedef struct JSFunctionSpec JSFunctionSpec;
+typedef struct JSIdArray JSIdArray;
+typedef struct JSProperty JSProperty;
+typedef struct JSPropertySpec JSPropertySpec;
+typedef struct JSObject JSObject;
+typedef struct JSObjectMap JSObjectMap;
+typedef struct JSObjectOps JSObjectOps;
+typedef struct JSXMLObjectOps JSXMLObjectOps;
+typedef struct JSRuntime JSRuntime;
+typedef struct JSRuntime JSTaskState; /* XXX deprecated name */
+typedef struct JSScript JSScript;
+typedef struct JSStackFrame JSStackFrame;
+typedef struct JSString JSString;
+typedef struct JSXDRState JSXDRState;
+typedef struct JSExceptionState JSExceptionState;
+typedef struct JSLocaleCallbacks JSLocaleCallbacks;
+
+/* JSClass (and JSObjectOps where appropriate) function pointer typedefs. */
+
+/*
+ * Add, delete, get or set a property named by id in obj. Note the jsval id
+ * type -- id may be a string (Unicode property identifier) or an int (element
+ * index). The *vp out parameter, on success, is the new property value after
+ * an add, get, or set. After a successful delete, *vp is JSVAL_FALSE iff
+ * obj[id] can't be deleted (because it's permanent).
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSPropertyOp)(JSContext *cx, JSObject *obj, jsval id,
+ jsval *vp);
+
+/*
+ * This function type is used for callbacks that enumerate the properties of
+ * a JSObject. The behavior depends on the value of enum_op:
+ *
+ * JSENUMERATE_INIT
+ * A new, opaque iterator state should be allocated and stored in *statep.
+ * (You can use PRIVATE_TO_JSVAL() to tag the pointer to be stored).
+ *
+ * The number of properties that will be enumerated should be returned as
+ * an integer jsval in *idp, if idp is non-null, and provided the number of
+ * enumerable properties is known. If idp is non-null and the number of
+ * enumerable properties can't be computed in advance, *idp should be set
+ * to JSVAL_ZERO.
+ *
+ * JSENUMERATE_NEXT
+ * A previously allocated opaque iterator state is passed in via statep.
+ * Return the next jsid in the iteration using *idp. The opaque iterator
+ * state pointed at by statep is destroyed and *statep is set to JSVAL_NULL
+ * if there are no properties left to enumerate.
+ *
+ * JSENUMERATE_DESTROY
+ * Destroy the opaque iterator state previously allocated in *statep by a
+ * call to this function when enum_op was JSENUMERATE_INIT.
+ *
+ * The return value is used to indicate success, with a value of JS_FALSE
+ * indicating failure.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSNewEnumerateOp)(JSContext *cx, JSObject *obj,
+ JSIterateOp enum_op,
+ jsval *statep, jsid *idp);
+
+/*
+ * The old-style JSClass.enumerate op should define all lazy properties not
+ * yet reflected in obj.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSEnumerateOp)(JSContext *cx, JSObject *obj);
+
+/*
+ * Resolve a lazy property named by id in obj by defining it directly in obj.
+ * Lazy properties are those reflected from some peer native property space
+ * (e.g., the DOM attributes for a given node reflected as obj) on demand.
+ *
+ * JS looks for a property in an object, and if not found, tries to resolve
+ * the given id. If resolve succeeds, the engine looks again in case resolve
+ * defined obj[id]. If no such property exists directly in obj, the process
+ * is repeated with obj's prototype, etc.
+ *
+ * NB: JSNewResolveOp provides a cheaper way to resolve lazy properties.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSResolveOp)(JSContext *cx, JSObject *obj, jsval id);
+
+/*
+ * Like JSResolveOp, but flags provide contextual information as follows:
+ *
+ * JSRESOLVE_QUALIFIED a qualified property id: obj.id or obj[id], not id
+ * JSRESOLVE_ASSIGNING obj[id] is on the left-hand side of an assignment
+ * JSRESOLVE_DETECTING 'if (o.p)...' or similar detection opcode sequence
+ * JSRESOLVE_DECLARING var, const, or function prolog declaration opcode
+ * JSRESOLVE_CLASSNAME class name used when constructing
+ *
+ * The *objp out parameter, on success, should be null to indicate that id
+ * was not resolved; and non-null, referring to obj or one of its prototypes,
+ * if id was resolved.
+ *
+ * This hook instead of JSResolveOp is called via the JSClass.resolve member
+ * if JSCLASS_NEW_RESOLVE is set in JSClass.flags.
+ *
+ * Setting JSCLASS_NEW_RESOLVE and JSCLASS_NEW_RESOLVE_GETS_START further
+ * extends this hook by passing in the starting object on the prototype chain
+ * via *objp. Thus a resolve hook implementation may define the property id
+ * being resolved in the object in which the id was first sought, rather than
+ * in a prototype object whose class led to the resolve hook being called.
+ *
+ * When using JSCLASS_NEW_RESOLVE_GETS_START, the resolve hook must therefore
+ * null *objp to signify "not resolved". With only JSCLASS_NEW_RESOLVE and no
+ * JSCLASS_NEW_RESOLVE_GETS_START, the hook can assume *objp is null on entry.
+ * This is not good practice, but enough existing hook implementations count
+ * on it that we can't break compatibility by passing the starting object in
+ * *objp without a new JSClass flag.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSNewResolveOp)(JSContext *cx, JSObject *obj, jsval id,
+ uintN flags, JSObject **objp);
+
+/*
+ * Convert obj to the given type, returning true with the resulting value in
+ * *vp on success, and returning false on error or exception.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSConvertOp)(JSContext *cx, JSObject *obj, JSType type,
+ jsval *vp);
+
+/*
+ * Finalize obj, which the garbage collector has determined to be unreachable
+ * from other live objects or from GC roots. Obviously, finalizers must never
+ * store a reference to obj.
+ */
+typedef void
+(* JS_DLL_CALLBACK JSFinalizeOp)(JSContext *cx, JSObject *obj);
+
+/*
+ * Used by JS_AddExternalStringFinalizer and JS_RemoveExternalStringFinalizer
+ * to extend and reduce the set of string types finalized by the GC.
+ */
+typedef void
+(* JS_DLL_CALLBACK JSStringFinalizeOp)(JSContext *cx, JSString *str);
+
+/*
+ * The signature for JSClass.getObjectOps, used by JS_NewObject's internals
+ * to discover the set of high-level object operations to use for new objects
+ * of the given class. All native objects have a JSClass, which is stored as
+ * a private (int-tagged) pointer in obj->slots[JSSLOT_CLASS]. In contrast,
+ * all native and host objects have a JSObjectMap at obj->map, which may be
+ * shared among a number of objects, and which contains the JSObjectOps *ops
+ * pointer used to dispatch object operations from API calls.
+ *
+ * Thus JSClass (which pre-dates JSObjectOps in the API) provides a low-level
+ * interface to class-specific code and data, while JSObjectOps allows for a
+ * higher level of operation, which does not use the object's class except to
+ * find the class's JSObjectOps struct, by calling clasp->getObjectOps, and to
+ * finalize the object.
+ *
+ * If this seems backwards, that's because it is! API compatibility requires
+ * a JSClass *clasp parameter to JS_NewObject, etc. Most host objects do not
+ * need to implement the larger JSObjectOps, and can share the common JSScope
+ * code and data used by the native (js_ObjectOps, see jsobj.c) ops.
+ *
+ * Further extension to preserve API compatibility: if this function returns
+ * a pointer to JSXMLObjectOps.base, not to JSObjectOps, then the engine calls
+ * extended hooks needed for E4X.
+ */
+typedef JSObjectOps *
+(* JS_DLL_CALLBACK JSGetObjectOps)(JSContext *cx, JSClass *clasp);
+
+/*
+ * JSClass.checkAccess type: check whether obj[id] may be accessed per mode,
+ * returning false on error/exception, true on success with obj[id]'s last-got
+ * value in *vp, and its attributes in *attrsp. As for JSPropertyOp above, id
+ * is either a string or an int jsval.
+ *
+ * See JSCheckAccessIdOp, below, for the JSObjectOps counterpart, which takes
+ * a jsid (a tagged int or aligned, unique identifier pointer) rather than a
+ * jsval. The native js_ObjectOps.checkAccess simply forwards to the object's
+ * clasp->checkAccess, so that both JSClass and JSObjectOps implementors may
+ * specialize access checks.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSCheckAccessOp)(JSContext *cx, JSObject *obj, jsval id,
+ JSAccessMode mode, jsval *vp);
+
+/*
+ * Encode or decode an object, given an XDR state record representing external
+ * data. See jsxdrapi.h.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSXDRObjectOp)(JSXDRState *xdr, JSObject **objp);
+
+/*
+ * Check whether v is an instance of obj. Return false on error or exception,
+ * true on success with JS_TRUE in *bp if v is an instance of obj, JS_FALSE in
+ * *bp otherwise.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSHasInstanceOp)(JSContext *cx, JSObject *obj, jsval v,
+ JSBool *bp);
+
+/*
+ * Function type for JSClass.mark and JSObjectOps.mark, called from the GC to
+ * scan live GC-things reachable from obj's private data structure. For each
+ * such thing, a mark implementation must call
+ *
+ * JS_MarkGCThing(cx, thing, name, arg);
+ *
+ * The trailing name and arg parameters are used for GC_MARK_DEBUG-mode heap
+ * dumping and ref-path tracing. The mark function should pass a (typically
+ * literal) string naming the private data member for name, and it must pass
+ * the opaque arg parameter through from its caller.
+ *
+ * For the JSObjectOps.mark hook, the return value is the number of slots at
+ * obj->slots to scan. For JSClass.mark, the return value is ignored.
+ *
+ * NB: JSMarkOp implementations cannot allocate new GC-things (JS_NewObject
+ * called from a mark function will fail silently, e.g.).
+ */
+typedef uint32
+(* JS_DLL_CALLBACK JSMarkOp)(JSContext *cx, JSObject *obj, void *arg);
+
+/*
+ * The optional JSClass.reserveSlots hook allows a class to make computed
+ * per-instance object slots reservations, in addition to or instead of using
+ * JSCLASS_HAS_RESERVED_SLOTS(n) in the JSClass.flags initializer to reserve
+ * a constant-per-class number of slots. Implementations of this hook should
+ * return the number of slots to reserve, not including any reserved by using
+ * JSCLASS_HAS_RESERVED_SLOTS(n) in JSClass.flags.
+ *
+ * NB: called with obj locked by the JSObjectOps-specific mutual exclusion
+ * mechanism appropriate for obj, so don't nest other operations that might
+ * also lock obj.
+ */
+typedef uint32
+(* JS_DLL_CALLBACK JSReserveSlotsOp)(JSContext *cx, JSObject *obj);
+
+/* JSObjectOps function pointer typedefs. */
+
+/*
+ * Create a new subclass of JSObjectMap (see jsobj.h), with the nrefs and ops
+ * members initialized from the same-named parameters, and with the nslots and
+ * freeslot members initialized according to ops and clasp. Return null on
+ * error, non-null on success.
+ *
+ * JSObjectMaps are reference-counted by generic code in the engine. Usually,
+ * the nrefs parameter to JSObjectOps.newObjectMap will be 1, to count the ref
+ * returned to the caller on success. After a successful construction, some
+ * number of js_HoldObjectMap and js_DropObjectMap calls ensue. When nrefs
+ * reaches 0 due to a js_DropObjectMap call, JSObjectOps.destroyObjectMap will
+ * be called to dispose of the map.
+ */
+typedef JSObjectMap *
+(* JS_DLL_CALLBACK JSNewObjectMapOp)(JSContext *cx, jsrefcount nrefs,
+ JSObjectOps *ops, JSClass *clasp,
+ JSObject *obj);
+
+/*
+ * Generic type for an infallible JSObjectMap operation, used currently by
+ * JSObjectOps.destroyObjectMap.
+ */
+typedef void
+(* JS_DLL_CALLBACK JSObjectMapOp)(JSContext *cx, JSObjectMap *map);
+
+/*
+ * Look for id in obj and its prototype chain, returning false on error or
+ * exception, true on success. On success, return null in *propp if id was
+ * not found. If id was found, return the first object searching from obj
+ * along its prototype chain in which id names a direct property in *objp, and
+ * return a non-null, opaque property pointer in *propp.
+ *
+ * If JSLookupPropOp succeeds and returns with *propp non-null, that pointer
+ * may be passed as the prop parameter to a JSAttributesOp, as a short-cut
+ * that bypasses id re-lookup. In any case, a non-null *propp result after a
+ * successful lookup must be dropped via JSObjectOps.dropProperty.
+ *
+ * NB: successful return with non-null *propp means the implementation may
+ * have locked *objp and added a reference count associated with *propp, so
+ * callers should not risk deadlock by nesting or interleaving other lookups
+ * or any obj-bearing ops before dropping *propp.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSLookupPropOp)(JSContext *cx, JSObject *obj, jsid id,
+ JSObject **objp, JSProperty **propp);
+
+/*
+ * Define obj[id], a direct property of obj named id, having the given initial
+ * value, with the specified getter, setter, and attributes. If the propp out
+ * param is non-null, *propp on successful return contains an opaque property
+ * pointer usable as a speedup hint with JSAttributesOp. But note that propp
+ * may be null, indicating that the caller is not interested in recovering an
+ * opaque pointer to the newly-defined property.
+ *
+ * If propp is non-null and JSDefinePropOp succeeds, its caller must be sure
+ * to drop *propp using JSObjectOps.dropProperty in short order, just as with
+ * JSLookupPropOp.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSDefinePropOp)(JSContext *cx, JSObject *obj,
+ jsid id, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter,
+ uintN attrs, JSProperty **propp);
+
+/*
+ * Get, set, or delete obj[id], returning false on error or exception, true
+ * on success. If getting or setting, the new value is returned in *vp on
+ * success. If deleting without error, *vp will be JSVAL_FALSE if obj[id] is
+ * permanent, and JSVAL_TRUE if id named a direct property of obj that was in
+ * fact deleted, or if id names no direct property of obj (id could name a
+ * prototype property, or no property in obj or its prototype chain).
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSPropertyIdOp)(JSContext *cx, JSObject *obj, jsid id,
+ jsval *vp);
+
+/*
+ * Get or set attributes of the property obj[id]. Return false on error or
+ * exception, true with current attributes in *attrsp. If prop is non-null,
+ * it must come from the *propp out parameter of a prior JSDefinePropOp or
+ * JSLookupPropOp call.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSAttributesOp)(JSContext *cx, JSObject *obj, jsid id,
+ JSProperty *prop, uintN *attrsp);
+
+/*
+ * JSObjectOps.checkAccess type: check whether obj[id] may be accessed per
+ * mode, returning false on error/exception, true on success with obj[id]'s
+ * last-got value in *vp, and its attributes in *attrsp.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSCheckAccessIdOp)(JSContext *cx, JSObject *obj, jsid id,
+ JSAccessMode mode, jsval *vp,
+ uintN *attrsp);
+
+/*
+ * A generic type for functions mapping an object to another object, or null
+ * if an error or exception was thrown on cx. Used by JSObjectOps.thisObject
+ * at present.
+ */
+typedef JSObject *
+(* JS_DLL_CALLBACK JSObjectOp)(JSContext *cx, JSObject *obj);
+
+/*
+ * A generic type for functions taking a context, object, and property, with
+ * no return value. Used by JSObjectOps.dropProperty currently (see above,
+ * JSDefinePropOp and JSLookupPropOp, for the object-locking protocol in which
+ * dropProperty participates).
+ */
+typedef void
+(* JS_DLL_CALLBACK JSPropertyRefOp)(JSContext *cx, JSObject *obj,
+ JSProperty *prop);
+
+/*
+ * Function type for JSObjectOps.setProto and JSObjectOps.setParent. These
+ * hooks must check for cycles without deadlocking, and otherwise take special
+ * steps. See jsobj.c, js_SetProtoOrParent, for an example.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSSetObjectSlotOp)(JSContext *cx, JSObject *obj,
+ uint32 slot, JSObject *pobj);
+
+/*
+ * Get and set a required slot, one that should already have been allocated.
+ * These operations are infallible, so required slots must be pre-allocated,
+ * or implementations must suppress out-of-memory errors. The native ops
+ * (js_ObjectOps, see jsobj.c) access slots reserved by including a call to
+ * the JSCLASS_HAS_RESERVED_SLOTS(n) macro in the JSClass.flags initializer.
+ *
+ * NB: the slot parameter is a zero-based index into obj->slots[], unlike the
+ * index parameter to the JS_GetReservedSlot and JS_SetReservedSlot API entry
+ * points, which is a zero-based index into the JSCLASS_RESERVED_SLOTS(clasp)
+ * reserved slots that come after the initial well-known slots: proto, parent,
+ * class, and optionally, the private data slot.
+ */
+typedef jsval
+(* JS_DLL_CALLBACK JSGetRequiredSlotOp)(JSContext *cx, JSObject *obj,
+ uint32 slot);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSSetRequiredSlotOp)(JSContext *cx, JSObject *obj,
+ uint32 slot, jsval v);
+
+typedef JSObject *
+(* JS_DLL_CALLBACK JSGetMethodOp)(JSContext *cx, JSObject *obj, jsid id,
+ jsval *vp);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSSetMethodOp)(JSContext *cx, JSObject *obj, jsid id,
+ jsval *vp);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSEnumerateValuesOp)(JSContext *cx, JSObject *obj,
+ JSIterateOp enum_op,
+ jsval *statep, jsid *idp, jsval *vp);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSEqualityOp)(JSContext *cx, JSObject *obj, jsval v,
+ JSBool *bp);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSConcatenateOp)(JSContext *cx, JSObject *obj, jsval v,
+ jsval *vp);
+
+/* Typedef for native functions called by the JS VM. */
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSNative)(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval);
+
+/* Callbacks and their arguments. */
+
+typedef enum JSContextOp {
+ JSCONTEXT_NEW,
+ JSCONTEXT_DESTROY
+} JSContextOp;
+
+/*
+ * The possible values for contextOp when the runtime calls the callback are:
+ * JSCONTEXT_NEW JS_NewContext succesfully created a new JSContext
+ * instance. The callback can initialize the instance as
+ * required. If the callback returns false, the instance
+ * will be destroyed and JS_NewContext returns null. In
+ * this case the callback is not called again.
+ * JSCONTEXT_DESTROY One of JS_DestroyContext* methods is called. The
+ * callback may perform its own cleanup and must always
+ * return true.
+ * Any other value For future compatibility the callback must do nothing
+ * and return true in this case.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSContextCallback)(JSContext *cx, uintN contextOp);
+
+typedef enum JSGCStatus {
+ JSGC_BEGIN,
+ JSGC_END,
+ JSGC_MARK_END,
+ JSGC_FINALIZE_END
+} JSGCStatus;
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSGCCallback)(JSContext *cx, JSGCStatus status);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSBranchCallback)(JSContext *cx, JSScript *script);
+
+typedef void
+(* JS_DLL_CALLBACK JSErrorReporter)(JSContext *cx, const char *message,
+ JSErrorReport *report);
+
+/*
+ * Possible exception types. These types are part of a JSErrorFormatString
+ * structure. They define which error to throw in case of a runtime error.
+ * JSEXN_NONE marks an unthrowable error.
+ */
+typedef enum JSExnType {
+ JSEXN_NONE = -1,
+ JSEXN_ERR,
+ JSEXN_INTERNALERR,
+ JSEXN_EVALERR,
+ JSEXN_RANGEERR,
+ JSEXN_REFERENCEERR,
+ JSEXN_SYNTAXERR,
+ JSEXN_TYPEERR,
+ JSEXN_URIERR,
+ JSEXN_LIMIT
+} JSExnType;
+
+typedef struct JSErrorFormatString {
+ /* The error format string (UTF-8 if JS_C_STRINGS_ARE_UTF8 is defined). */
+ const char *format;
+
+ /* The number of arguments to expand in the formatted error message. */
+ uint16 argCount;
+
+ /* One of the JSExnType constants above. */
+ int16 exnType;
+} JSErrorFormatString;
+
+typedef const JSErrorFormatString *
+(* JS_DLL_CALLBACK JSErrorCallback)(void *userRef, const char *locale,
+ const uintN errorNumber);
+
+#ifdef va_start
+#define JS_ARGUMENT_FORMATTER_DEFINED 1
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSArgumentFormatter)(JSContext *cx, const char *format,
+ JSBool fromJS, jsval **vpp,
+ va_list *app);
+#endif
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSLocaleToUpperCase)(JSContext *cx, JSString *src,
+ jsval *rval);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSLocaleToLowerCase)(JSContext *cx, JSString *src,
+ jsval *rval);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSLocaleCompare)(JSContext *cx,
+ JSString *src1, JSString *src2,
+ jsval *rval);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSLocaleToUnicode)(JSContext *cx, char *src, jsval *rval);
+
+/*
+ * Security protocol types.
+ */
+typedef struct JSPrincipals JSPrincipals;
+
+/*
+ * XDR-encode or -decode a principals instance, based on whether xdr->mode is
+ * JSXDR_ENCODE, in which case *principalsp should be encoded; or JSXDR_DECODE,
+ * in which case implementations must return a held (via JSPRINCIPALS_HOLD),
+ * non-null *principalsp out parameter. Return true on success, false on any
+ * error, which the implementation must have reported.
+ */
+typedef JSBool
+(* JS_DLL_CALLBACK JSPrincipalsTranscoder)(JSXDRState *xdr,
+ JSPrincipals **principalsp);
+
+/*
+ * Return a weak reference to the principals associated with obj, possibly via
+ * the immutable parent chain leading from obj to a top-level container (e.g.,
+ * a window object in the DOM level 0). If there are no principals associated
+ * with obj, return null. Therefore null does not mean an error was reported;
+ * in no event should an error be reported or an exception be thrown by this
+ * callback's implementation.
+ */
+typedef JSPrincipals *
+(* JS_DLL_CALLBACK JSObjectPrincipalsFinder)(JSContext *cx, JSObject *obj);
+
+JS_END_EXTERN_C
+
+#endif /* jspubtd_h___ */
diff --git a/src/third_party/js-1.7/jsregexp.c b/src/third_party/js-1.7/jsregexp.c
new file mode 100644
index 00000000000..5d2fce48d12
--- /dev/null
+++ b/src/third_party/js-1.7/jsregexp.c
@@ -0,0 +1,4206 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set sw=4 ts=8 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS regular expressions, after Perl.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsregexp.h"
+#include "jsscan.h"
+#include "jsstr.h"
+
+/* Note : contiguity of 'simple opcodes' is important for SimpleMatch() */
+typedef enum REOp {
+ REOP_EMPTY = 0, /* match rest of input against rest of r.e. */
+ REOP_ALT = 1, /* alternative subexpressions in kid and next */
+ REOP_SIMPLE_START = 2, /* start of 'simple opcodes' */
+ REOP_BOL = 2, /* beginning of input (or line if multiline) */
+ REOP_EOL = 3, /* end of input (or line if multiline) */
+ REOP_WBDRY = 4, /* match "" at word boundary */
+ REOP_WNONBDRY = 5, /* match "" at word non-boundary */
+ REOP_DOT = 6, /* stands for any character */
+ REOP_DIGIT = 7, /* match a digit char: [0-9] */
+ REOP_NONDIGIT = 8, /* match a non-digit char: [^0-9] */
+ REOP_ALNUM = 9, /* match an alphanumeric char: [0-9a-z_A-Z] */
+ REOP_NONALNUM = 10, /* match a non-alphanumeric char: [^0-9a-z_A-Z] */
+ REOP_SPACE = 11, /* match a whitespace char */
+ REOP_NONSPACE = 12, /* match a non-whitespace char */
+ REOP_BACKREF = 13, /* back-reference (e.g., \1) to a parenthetical */
+ REOP_FLAT = 14, /* match a flat string */
+ REOP_FLAT1 = 15, /* match a single char */
+ REOP_FLATi = 16, /* case-independent REOP_FLAT */
+ REOP_FLAT1i = 17, /* case-independent REOP_FLAT1 */
+ REOP_UCFLAT1 = 18, /* single Unicode char */
+ REOP_UCFLAT1i = 19, /* case-independent REOP_UCFLAT1 */
+ REOP_UCFLAT = 20, /* flat Unicode string; len immediate counts chars */
+ REOP_UCFLATi = 21, /* case-independent REOP_UCFLAT */
+ REOP_CLASS = 22, /* character class with index */
+ REOP_NCLASS = 23, /* negated character class with index */
+ REOP_SIMPLE_END = 23, /* end of 'simple opcodes' */
+ REOP_QUANT = 25, /* quantified atom: atom{1,2} */
+ REOP_STAR = 26, /* zero or more occurrences of kid */
+ REOP_PLUS = 27, /* one or more occurrences of kid */
+ REOP_OPT = 28, /* optional subexpression in kid */
+ REOP_LPAREN = 29, /* left paren bytecode: kid is u.num'th sub-regexp */
+ REOP_RPAREN = 30, /* right paren bytecode */
+ REOP_JUMP = 31, /* for deoptimized closure loops */
+ REOP_DOTSTAR = 32, /* optimize .* to use a single opcode */
+ REOP_ANCHOR = 33, /* like .* but skips left context to unanchored r.e. */
+ REOP_EOLONLY = 34, /* $ not preceded by any pattern */
+ REOP_BACKREFi = 37, /* case-independent REOP_BACKREF */
+ REOP_LPARENNON = 41, /* non-capturing version of REOP_LPAREN */
+ REOP_ASSERT = 43, /* zero width positive lookahead assertion */
+ REOP_ASSERT_NOT = 44, /* zero width negative lookahead assertion */
+ REOP_ASSERTTEST = 45, /* sentinel at end of assertion child */
+ REOP_ASSERTNOTTEST = 46, /* sentinel at end of !assertion child */
+ REOP_MINIMALSTAR = 47, /* non-greedy version of * */
+ REOP_MINIMALPLUS = 48, /* non-greedy version of + */
+ REOP_MINIMALOPT = 49, /* non-greedy version of ? */
+ REOP_MINIMALQUANT = 50, /* non-greedy version of {} */
+ REOP_ENDCHILD = 51, /* sentinel at end of quantifier child */
+ REOP_REPEAT = 52, /* directs execution of greedy quantifier */
+ REOP_MINIMALREPEAT = 53, /* directs execution of non-greedy quantifier */
+ REOP_ALTPREREQ = 54, /* prerequisite for ALT, either of two chars */
+ REOP_ALTPREREQ2 = 55, /* prerequisite for ALT, a char or a class */
+ REOP_ENDALT = 56, /* end of final alternate */
+ REOP_CONCAT = 57, /* concatenation of terms (parse time only) */
+
+ REOP_END
+} REOp;
+
+#define REOP_IS_SIMPLE(op) ((unsigned)((op) - REOP_SIMPLE_START) < \
+ (unsigned)REOP_SIMPLE_END)
+
+struct RENode {
+ REOp op; /* r.e. op bytecode */
+ RENode *next; /* next in concatenation order */
+ void *kid; /* first operand */
+ union {
+ void *kid2; /* second operand */
+ jsint num; /* could be a number */
+ size_t parenIndex; /* or a parenthesis index */
+ struct { /* or a quantifier range */
+ uintN min;
+ uintN max;
+ JSPackedBool greedy;
+ } range;
+ struct { /* or a character class */
+ size_t startIndex;
+ size_t kidlen; /* length of string at kid, in jschars */
+ size_t index; /* index into class list */
+ uint16 bmsize; /* bitmap size, based on max char code */
+ JSPackedBool sense;
+ } ucclass;
+ struct { /* or a literal sequence */
+ jschar chr; /* of one character */
+ size_t length; /* or many (via the kid) */
+ } flat;
+ struct {
+ RENode *kid2; /* second operand from ALT */
+ jschar ch1; /* match char for ALTPREREQ */
+ jschar ch2; /* ditto, or class index for ALTPREREQ2 */
+ } altprereq;
+ } u;
+};
+
+#define RE_IS_LETTER(c) (((c >= 'A') && (c <= 'Z')) || \
+ ((c >= 'a') && (c <= 'z')) )
+#define RE_IS_LINE_TERM(c) ((c == '\n') || (c == '\r') || \
+ (c == LINE_SEPARATOR) || (c == PARA_SEPARATOR))
+
+#define CLASS_CACHE_SIZE 4
+
+typedef struct CompilerState {
+ JSContext *context;
+ JSTokenStream *tokenStream; /* For reporting errors */
+ const jschar *cpbegin;
+ const jschar *cpend;
+ const jschar *cp;
+ size_t parenCount;
+ size_t classCount; /* number of [] encountered */
+ size_t treeDepth; /* maximum depth of parse tree */
+ size_t progLength; /* estimated bytecode length */
+ RENode *result;
+ size_t classBitmapsMem; /* memory to hold all class bitmaps */
+ struct {
+ const jschar *start; /* small cache of class strings */
+ size_t length; /* since they're often the same */
+ size_t index;
+ } classCache[CLASS_CACHE_SIZE];
+ uint16 flags;
+} CompilerState;
+
+typedef struct EmitStateStackEntry {
+ jsbytecode *altHead; /* start of REOP_ALT* opcode */
+ jsbytecode *nextAltFixup; /* fixup pointer to next-alt offset */
+ jsbytecode *nextTermFixup; /* fixup ptr. to REOP_JUMP offset */
+ jsbytecode *endTermFixup; /* fixup ptr. to REOPT_ALTPREREQ* offset */
+ RENode *continueNode; /* original REOP_ALT* node being stacked */
+ jsbytecode continueOp; /* REOP_JUMP or REOP_ENDALT continuation */
+ JSPackedBool jumpToJumpFlag; /* true if we've patched jump-to-jump to
+ avoid 16-bit unsigned offset overflow */
+} EmitStateStackEntry;
+
+/*
+ * Immediate operand sizes and getter/setters. Unlike the ones in jsopcode.h,
+ * the getters and setters take the pc of the offset, not of the opcode before
+ * the offset.
+ */
+#define ARG_LEN 2
+#define GET_ARG(pc) ((uint16)(((pc)[0] << 8) | (pc)[1]))
+#define SET_ARG(pc, arg) ((pc)[0] = (jsbytecode) ((arg) >> 8), \
+ (pc)[1] = (jsbytecode) (arg))
+
+#define OFFSET_LEN ARG_LEN
+#define OFFSET_MAX (JS_BIT(ARG_LEN * 8) - 1)
+#define GET_OFFSET(pc) GET_ARG(pc)
+
+/*
+ * Maximum supported tree depth is maximum size of EmitStateStackEntry stack.
+ * For sanity, we limit it to 2^24 bytes.
+ */
+#define TREE_DEPTH_MAX (JS_BIT(24) / sizeof(EmitStateStackEntry))
+
+/*
+ * The maximum memory that can be allocated for class bitmaps.
+ * For sanity, we limit it to 2^24 bytes.
+ */
+#define CLASS_BITMAPS_MEM_LIMIT JS_BIT(24)
+
+/*
+ * Functions to get size and write/read bytecode that represent small indexes
+ * compactly.
+ * Each byte in the code represent 7-bit chunk of the index. 8th bit when set
+ * indicates that the following byte brings more bits to the index. Otherwise
+ * this is the last byte in the index bytecode representing highest index bits.
+ */
+static size_t
+GetCompactIndexWidth(size_t index)
+{
+ size_t width;
+
+ for (width = 1; (index >>= 7) != 0; ++width) { }
+ return width;
+}
+
+static jsbytecode *
+WriteCompactIndex(jsbytecode *pc, size_t index)
+{
+ size_t next;
+
+ while ((next = index >> 7) != 0) {
+ *pc++ = (jsbytecode)(index | 0x80);
+ index = next;
+ }
+ *pc++ = (jsbytecode)index;
+ return pc;
+}
+
+static jsbytecode *
+ReadCompactIndex(jsbytecode *pc, size_t *result)
+{
+ size_t nextByte;
+
+ nextByte = *pc++;
+ if ((nextByte & 0x80) == 0) {
+ /*
+ * Short-circuit the most common case when compact index <= 127.
+ */
+ *result = nextByte;
+ } else {
+ size_t shift = 7;
+ *result = 0x7F & nextByte;
+ do {
+ nextByte = *pc++;
+ *result |= (nextByte & 0x7F) << shift;
+ shift += 7;
+ } while ((nextByte & 0x80) != 0);
+ }
+ return pc;
+}
+
+typedef struct RECapture {
+ ptrdiff_t index; /* start of contents, -1 for empty */
+ size_t length; /* length of capture */
+} RECapture;
+
+typedef struct REMatchState {
+ const jschar *cp;
+ RECapture parens[1]; /* first of 're->parenCount' captures,
+ allocated at end of this struct */
+} REMatchState;
+
+struct REBackTrackData;
+
+typedef struct REProgState {
+ jsbytecode *continue_pc; /* current continuation data */
+ jsbytecode continue_op;
+ ptrdiff_t index; /* progress in text */
+ size_t parenSoFar; /* highest indexed paren started */
+ union {
+ struct {
+ uintN min; /* current quantifier limits */
+ uintN max;
+ } quantifier;
+ struct {
+ size_t top; /* backtrack stack state */
+ size_t sz;
+ } assertion;
+ } u;
+} REProgState;
+
+typedef struct REBackTrackData {
+ size_t sz; /* size of previous stack entry */
+ jsbytecode *backtrack_pc; /* where to backtrack to */
+ jsbytecode backtrack_op;
+ const jschar *cp; /* index in text of match at backtrack */
+ size_t parenIndex; /* start index of saved paren contents */
+ size_t parenCount; /* # of saved paren contents */
+ size_t saveStateStackTop; /* number of parent states */
+ /* saved parent states follow */
+ /* saved paren contents follow */
+} REBackTrackData;
+
+#define INITIAL_STATESTACK 100
+#define INITIAL_BACKTRACK 8000
+
+typedef struct REGlobalData {
+ JSContext *cx;
+ JSRegExp *regexp; /* the RE in execution */
+ JSBool ok; /* runtime error (out_of_memory only?) */
+ size_t start; /* offset to start at */
+ ptrdiff_t skipped; /* chars skipped anchoring this r.e. */
+ const jschar *cpbegin; /* text base address */
+ const jschar *cpend; /* text limit address */
+
+ REProgState *stateStack; /* stack of state of current parents */
+ size_t stateStackTop;
+ size_t stateStackLimit;
+
+ REBackTrackData *backTrackStack;/* stack of matched-so-far positions */
+ REBackTrackData *backTrackSP;
+ size_t backTrackStackSize;
+ size_t cursz; /* size of current stack entry */
+
+ JSArenaPool pool; /* It's faster to use one malloc'd pool
+ than to malloc/free the three items
+ that are allocated from this pool */
+} REGlobalData;
+
+/*
+ * 1. If IgnoreCase is false, return ch.
+ * 2. Let u be ch converted to upper case as if by calling
+ * String.prototype.toUpperCase on the one-character string ch.
+ * 3. If u does not consist of a single character, return ch.
+ * 4. Let cu be u's character.
+ * 5. If ch's code point value is greater than or equal to decimal 128 and cu's
+ * code point value is less than decimal 128, then return ch.
+ * 6. Return cu.
+ */
+static jschar
+upcase(jschar ch)
+{
+ jschar cu = JS_TOUPPER(ch);
+ if (ch >= 128 && cu < 128)
+ return ch;
+ return cu;
+}
+
+static jschar
+downcase(jschar ch)
+{
+ jschar cl = JS_TOLOWER(ch);
+ if (cl >= 128 && ch < 128)
+ return ch;
+ return cl;
+}
+
+/* Construct and initialize an RENode, returning NULL for out-of-memory */
+static RENode *
+NewRENode(CompilerState *state, REOp op)
+{
+ JSContext *cx;
+ RENode *ren;
+
+ cx = state->context;
+ JS_ARENA_ALLOCATE_CAST(ren, RENode *, &cx->tempPool, sizeof *ren);
+ if (!ren) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ ren->op = op;
+ ren->next = NULL;
+ ren->kid = NULL;
+ return ren;
+}
+
+/*
+ * Validates and converts hex ascii value.
+ */
+static JSBool
+isASCIIHexDigit(jschar c, uintN *digit)
+{
+ uintN cv = c;
+
+ if (cv < '0')
+ return JS_FALSE;
+ if (cv <= '9') {
+ *digit = cv - '0';
+ return JS_TRUE;
+ }
+ cv |= 0x20;
+ if (cv >= 'a' && cv <= 'f') {
+ *digit = cv - 'a' + 10;
+ return JS_TRUE;
+ }
+ return JS_FALSE;
+}
+
+
+typedef struct {
+ REOp op;
+ const jschar *errPos;
+ size_t parenIndex;
+} REOpData;
+
+
+/*
+ * Process the op against the two top operands, reducing them to a single
+ * operand in the penultimate slot. Update progLength and treeDepth.
+ */
+static JSBool
+ProcessOp(CompilerState *state, REOpData *opData, RENode **operandStack,
+ intN operandSP)
+{
+ RENode *result;
+
+ switch (opData->op) {
+ case REOP_ALT:
+ result = NewRENode(state, REOP_ALT);
+ if (!result)
+ return JS_FALSE;
+ result->kid = operandStack[operandSP - 2];
+ result->u.kid2 = operandStack[operandSP - 1];
+ operandStack[operandSP - 2] = result;
+
+ if (state->treeDepth == TREE_DEPTH_MAX) {
+ js_ReportCompileErrorNumber(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_REGEXP_TOO_COMPLEX);
+ return JS_FALSE;
+ }
+ ++state->treeDepth;
+
+ /*
+ * Look at both alternates to see if there's a FLAT or a CLASS at
+ * the start of each. If so, use a prerequisite match.
+ */
+ if (((RENode *) result->kid)->op == REOP_FLAT &&
+ ((RENode *) result->u.kid2)->op == REOP_FLAT &&
+ (state->flags & JSREG_FOLD) == 0) {
+ result->op = REOP_ALTPREREQ;
+ result->u.altprereq.ch1 = ((RENode *) result->kid)->u.flat.chr;
+ result->u.altprereq.ch2 = ((RENode *) result->u.kid2)->u.flat.chr;
+ /* ALTPREREQ, <end>, uch1, uch2, <next>, ...,
+ JUMP, <end> ... ENDALT */
+ state->progLength += 13;
+ }
+ else
+ if (((RENode *) result->kid)->op == REOP_CLASS &&
+ ((RENode *) result->kid)->u.ucclass.index < 256 &&
+ ((RENode *) result->u.kid2)->op == REOP_FLAT &&
+ (state->flags & JSREG_FOLD) == 0) {
+ result->op = REOP_ALTPREREQ2;
+ result->u.altprereq.ch1 = ((RENode *) result->u.kid2)->u.flat.chr;
+ result->u.altprereq.ch2 = ((RENode *) result->kid)->u.ucclass.index;
+ /* ALTPREREQ2, <end>, uch1, uch2, <next>, ...,
+ JUMP, <end> ... ENDALT */
+ state->progLength += 13;
+ }
+ else
+ if (((RENode *) result->kid)->op == REOP_FLAT &&
+ ((RENode *) result->u.kid2)->op == REOP_CLASS &&
+ ((RENode *) result->u.kid2)->u.ucclass.index < 256 &&
+ (state->flags & JSREG_FOLD) == 0) {
+ result->op = REOP_ALTPREREQ2;
+ result->u.altprereq.ch1 = ((RENode *) result->kid)->u.flat.chr;
+ result->u.altprereq.ch2 =
+ ((RENode *) result->u.kid2)->u.ucclass.index;
+ /* ALTPREREQ2, <end>, uch1, uch2, <next>, ...,
+ JUMP, <end> ... ENDALT */
+ state->progLength += 13;
+ }
+ else {
+ /* ALT, <next>, ..., JUMP, <end> ... ENDALT */
+ state->progLength += 7;
+ }
+ break;
+
+ case REOP_CONCAT:
+ result = operandStack[operandSP - 2];
+ while (result->next)
+ result = result->next;
+ result->next = operandStack[operandSP - 1];
+ break;
+
+ case REOP_ASSERT:
+ case REOP_ASSERT_NOT:
+ case REOP_LPARENNON:
+ case REOP_LPAREN:
+ /* These should have been processed by a close paren. */
+ js_ReportCompileErrorNumberUC(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_MISSING_PAREN, opData->errPos);
+ return JS_FALSE;
+
+ default:;
+ }
+ return JS_TRUE;
+}
+
+/*
+ * Parser forward declarations.
+ */
+static JSBool ParseTerm(CompilerState *state);
+static JSBool ParseQuantifier(CompilerState *state);
+static intN ParseMinMaxQuantifier(CompilerState *state, JSBool ignoreValues);
+
+/*
+ * Top-down regular expression grammar, based closely on Perl4.
+ *
+ * regexp: altern A regular expression is one or more
+ * altern '|' regexp alternatives separated by vertical bar.
+ */
+#define INITIAL_STACK_SIZE 128
+
+static JSBool
+ParseRegExp(CompilerState *state)
+{
+ size_t parenIndex;
+ RENode *operand;
+ REOpData *operatorStack;
+ RENode **operandStack;
+ REOp op;
+ intN i;
+ JSBool result = JS_FALSE;
+
+ intN operatorSP = 0, operatorStackSize = INITIAL_STACK_SIZE;
+ intN operandSP = 0, operandStackSize = INITIAL_STACK_SIZE;
+
+ /* Watch out for empty regexp */
+ if (state->cp == state->cpend) {
+ state->result = NewRENode(state, REOP_EMPTY);
+ return (state->result != NULL);
+ }
+
+ operatorStack = (REOpData *)
+ JS_malloc(state->context, sizeof(REOpData) * operatorStackSize);
+ if (!operatorStack)
+ return JS_FALSE;
+
+ operandStack = (RENode **)
+ JS_malloc(state->context, sizeof(RENode *) * operandStackSize);
+ if (!operandStack)
+ goto out;
+
+ for (;;) {
+ parenIndex = state->parenCount;
+ if (state->cp == state->cpend) {
+ /*
+ * If we are at the end of the regexp and we're short one or more
+ * operands, the regexp must have the form /x|/ or some such, with
+ * left parentheses making us short more than one operand.
+ */
+ if (operatorSP >= operandSP) {
+ operand = NewRENode(state, REOP_EMPTY);
+ if (!operand)
+ goto out;
+ goto pushOperand;
+ }
+ } else {
+ switch (*state->cp) {
+ case '(':
+ ++state->cp;
+ if (state->cp + 1 < state->cpend &&
+ *state->cp == '?' &&
+ (state->cp[1] == '=' ||
+ state->cp[1] == '!' ||
+ state->cp[1] == ':')) {
+ switch (state->cp[1]) {
+ case '=':
+ op = REOP_ASSERT;
+ /* ASSERT, <next>, ... ASSERTTEST */
+ state->progLength += 4;
+ break;
+ case '!':
+ op = REOP_ASSERT_NOT;
+ /* ASSERTNOT, <next>, ... ASSERTNOTTEST */
+ state->progLength += 4;
+ break;
+ default:
+ op = REOP_LPARENNON;
+ break;
+ }
+ state->cp += 2;
+ } else {
+ op = REOP_LPAREN;
+ /* LPAREN, <index>, ... RPAREN, <index> */
+ state->progLength
+ += 2 * (1 + GetCompactIndexWidth(parenIndex));
+ state->parenCount++;
+ if (state->parenCount == 65535) {
+ js_ReportCompileErrorNumber(state->context,
+ state->tokenStream,
+ JSREPORT_TS |
+ JSREPORT_ERROR,
+ JSMSG_TOO_MANY_PARENS);
+ goto out;
+ }
+ }
+ goto pushOperator;
+
+ case ')':
+ /*
+ * If there's no stacked open parenthesis, throw syntax error.
+ */
+ for (i = operatorSP - 1; ; i--) {
+ if (i < 0) {
+ js_ReportCompileErrorNumber(state->context,
+ state->tokenStream,
+ JSREPORT_TS |
+ JSREPORT_ERROR,
+ JSMSG_UNMATCHED_RIGHT_PAREN);
+ goto out;
+ }
+ if (operatorStack[i].op == REOP_ASSERT ||
+ operatorStack[i].op == REOP_ASSERT_NOT ||
+ operatorStack[i].op == REOP_LPARENNON ||
+ operatorStack[i].op == REOP_LPAREN) {
+ break;
+ }
+ }
+ /* FALL THROUGH */
+
+ case '|':
+ /* Expected an operand before these, so make an empty one */
+ operand = NewRENode(state, REOP_EMPTY);
+ if (!operand)
+ goto out;
+ goto pushOperand;
+
+ default:
+ if (!ParseTerm(state))
+ goto out;
+ operand = state->result;
+pushOperand:
+ if (operandSP == operandStackSize) {
+ operandStackSize += operandStackSize;
+ operandStack = (RENode **)
+ JS_realloc(state->context, operandStack,
+ sizeof(RENode *) * operandStackSize);
+ if (!operandStack)
+ goto out;
+ }
+ operandStack[operandSP++] = operand;
+ break;
+ }
+ }
+
+ /* At the end; process remaining operators. */
+restartOperator:
+ if (state->cp == state->cpend) {
+ while (operatorSP) {
+ --operatorSP;
+ if (!ProcessOp(state, &operatorStack[operatorSP],
+ operandStack, operandSP))
+ goto out;
+ --operandSP;
+ }
+ JS_ASSERT(operandSP == 1);
+ state->result = operandStack[0];
+ result = JS_TRUE;
+ goto out;
+ }
+
+ switch (*state->cp) {
+ case '|':
+ /* Process any stacked 'concat' operators */
+ ++state->cp;
+ while (operatorSP &&
+ operatorStack[operatorSP - 1].op == REOP_CONCAT) {
+ --operatorSP;
+ if (!ProcessOp(state, &operatorStack[operatorSP],
+ operandStack, operandSP)) {
+ goto out;
+ }
+ --operandSP;
+ }
+ op = REOP_ALT;
+ goto pushOperator;
+
+ case ')':
+ /*
+ * If there's no stacked open parenthesis, throw syntax error.
+ */
+ for (i = operatorSP - 1; ; i--) {
+ if (i < 0) {
+ js_ReportCompileErrorNumber(state->context,
+ state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_UNMATCHED_RIGHT_PAREN);
+ goto out;
+ }
+ if (operatorStack[i].op == REOP_ASSERT ||
+ operatorStack[i].op == REOP_ASSERT_NOT ||
+ operatorStack[i].op == REOP_LPARENNON ||
+ operatorStack[i].op == REOP_LPAREN) {
+ break;
+ }
+ }
+ ++state->cp;
+
+ /* Process everything on the stack until the open parenthesis. */
+ for (;;) {
+ JS_ASSERT(operatorSP);
+ --operatorSP;
+ switch (operatorStack[operatorSP].op) {
+ case REOP_ASSERT:
+ case REOP_ASSERT_NOT:
+ case REOP_LPAREN:
+ operand = NewRENode(state, operatorStack[operatorSP].op);
+ if (!operand)
+ goto out;
+ operand->u.parenIndex =
+ operatorStack[operatorSP].parenIndex;
+ JS_ASSERT(operandSP);
+ operand->kid = operandStack[operandSP - 1];
+ operandStack[operandSP - 1] = operand;
+ if (state->treeDepth == TREE_DEPTH_MAX) {
+ js_ReportCompileErrorNumber(state->context,
+ state->tokenStream,
+ JSREPORT_TS |
+ JSREPORT_ERROR,
+ JSMSG_REGEXP_TOO_COMPLEX);
+ goto out;
+ }
+ ++state->treeDepth;
+ /* FALL THROUGH */
+
+ case REOP_LPARENNON:
+ state->result = operandStack[operandSP - 1];
+ if (!ParseQuantifier(state))
+ goto out;
+ operandStack[operandSP - 1] = state->result;
+ goto restartOperator;
+ default:
+ if (!ProcessOp(state, &operatorStack[operatorSP],
+ operandStack, operandSP))
+ goto out;
+ --operandSP;
+ break;
+ }
+ }
+ break;
+
+ case '{':
+ {
+ const jschar *errp = state->cp;
+
+ if (ParseMinMaxQuantifier(state, JS_TRUE) < 0) {
+ /*
+ * This didn't even scan correctly as a quantifier, so we should
+ * treat it as flat.
+ */
+ op = REOP_CONCAT;
+ goto pushOperator;
+ }
+
+ state->cp = errp;
+ /* FALL THROUGH */
+ }
+
+ case '+':
+ case '*':
+ case '?':
+ js_ReportCompileErrorNumberUC(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_QUANTIFIER, state->cp);
+ result = JS_FALSE;
+ goto out;
+
+ default:
+ /* Anything else is the start of the next term. */
+ op = REOP_CONCAT;
+pushOperator:
+ if (operatorSP == operatorStackSize) {
+ operatorStackSize += operatorStackSize;
+ operatorStack = (REOpData *)
+ JS_realloc(state->context, operatorStack,
+ sizeof(REOpData) * operatorStackSize);
+ if (!operatorStack)
+ goto out;
+ }
+ operatorStack[operatorSP].op = op;
+ operatorStack[operatorSP].errPos = state->cp;
+ operatorStack[operatorSP++].parenIndex = parenIndex;
+ break;
+ }
+ }
+out:
+ if (operatorStack)
+ JS_free(state->context, operatorStack);
+ if (operandStack)
+ JS_free(state->context, operandStack);
+ return result;
+}
+
+/*
+ * Hack two bits in CompilerState.flags, for use within FindParenCount to flag
+ * its being on the stack, and to propagate errors to its callers.
+ */
+#define JSREG_FIND_PAREN_COUNT 0x8000
+#define JSREG_FIND_PAREN_ERROR 0x4000
+
+/*
+ * Magic return value from FindParenCount and GetDecimalValue, to indicate
+ * overflow beyond GetDecimalValue's max parameter, or a computed maximum if
+ * its findMax parameter is non-null.
+ */
+#define OVERFLOW_VALUE ((uintN)-1)
+
+static uintN
+FindParenCount(CompilerState *state)
+{
+ CompilerState temp;
+ int i;
+
+ if (state->flags & JSREG_FIND_PAREN_COUNT)
+ return OVERFLOW_VALUE;
+
+ /*
+ * Copy state into temp, flag it so we never report an invalid backref,
+ * and reset its members to parse the entire regexp. This is obviously
+ * suboptimal, but GetDecimalValue calls us only if a backref appears to
+ * refer to a forward parenthetical, which is rare.
+ */
+ temp = *state;
+ temp.flags |= JSREG_FIND_PAREN_COUNT;
+ temp.cp = temp.cpbegin;
+ temp.parenCount = 0;
+ temp.classCount = 0;
+ temp.progLength = 0;
+ temp.treeDepth = 0;
+ temp.classBitmapsMem = 0;
+ for (i = 0; i < CLASS_CACHE_SIZE; i++)
+ temp.classCache[i].start = NULL;
+
+ if (!ParseRegExp(&temp)) {
+ state->flags |= JSREG_FIND_PAREN_ERROR;
+ return OVERFLOW_VALUE;
+ }
+ return temp.parenCount;
+}
+
+/*
+ * Extract and return a decimal value at state->cp. The initial character c
+ * has already been read. Return OVERFLOW_VALUE if the result exceeds max.
+ * Callers who pass a non-null findMax should test JSREG_FIND_PAREN_ERROR in
+ * state->flags to discover whether an error occurred under findMax.
+ */
+static uintN
+GetDecimalValue(jschar c, uintN max, uintN (*findMax)(CompilerState *state),
+ CompilerState *state)
+{
+ uintN value = JS7_UNDEC(c);
+ JSBool overflow = (value > max && (!findMax || value > findMax(state)));
+
+ /* The following restriction allows simpler overflow checks. */
+ JS_ASSERT(max <= ((uintN)-1 - 9) / 10);
+ while (state->cp < state->cpend) {
+ c = *state->cp;
+ if (!JS7_ISDEC(c))
+ break;
+ value = 10 * value + JS7_UNDEC(c);
+ if (!overflow && value > max && (!findMax || value > findMax(state)))
+ overflow = JS_TRUE;
+ ++state->cp;
+ }
+ return overflow ? OVERFLOW_VALUE : value;
+}
+
+/*
+ * Calculate the total size of the bitmap required for a class expression.
+ */
+static JSBool
+CalculateBitmapSize(CompilerState *state, RENode *target, const jschar *src,
+ const jschar *end)
+{
+ uintN max = 0;
+ JSBool inRange = JS_FALSE;
+ jschar c, rangeStart = 0;
+ uintN n, digit, nDigits, i;
+
+ target->u.ucclass.bmsize = 0;
+ target->u.ucclass.sense = JS_TRUE;
+
+ if (src == end)
+ return JS_TRUE;
+
+ if (*src == '^') {
+ ++src;
+ target->u.ucclass.sense = JS_FALSE;
+ }
+
+ while (src != end) {
+ uintN localMax = 0;
+ switch (*src) {
+ case '\\':
+ ++src;
+ c = *src++;
+ switch (c) {
+ case 'b':
+ localMax = 0x8;
+ break;
+ case 'f':
+ localMax = 0xC;
+ break;
+ case 'n':
+ localMax = 0xA;
+ break;
+ case 'r':
+ localMax = 0xD;
+ break;
+ case 't':
+ localMax = 0x9;
+ break;
+ case 'v':
+ localMax = 0xB;
+ break;
+ case 'c':
+ if (src < end && RE_IS_LETTER(*src)) {
+ localMax = (jschar) (*src++ & 0x1F);
+ } else {
+ --src;
+ localMax = '\\';
+ }
+ break;
+ case 'x':
+ nDigits = 2;
+ goto lexHex;
+ case 'u':
+ nDigits = 4;
+lexHex:
+ n = 0;
+ for (i = 0; (i < nDigits) && (src < end); i++) {
+ c = *src++;
+ if (!isASCIIHexDigit(c, &digit)) {
+ /*
+ * Back off to accepting the original
+ *'\' as a literal.
+ */
+ src -= i + 1;
+ n = '\\';
+ break;
+ }
+ n = (n << 4) | digit;
+ }
+ localMax = n;
+ break;
+ case 'd':
+ if (inRange) {
+ JS_ReportErrorNumber(state->context,
+ js_GetErrorMessage, NULL,
+ JSMSG_BAD_CLASS_RANGE);
+ return JS_FALSE;
+ }
+ localMax = '9';
+ break;
+ case 'D':
+ case 's':
+ case 'S':
+ case 'w':
+ case 'W':
+ if (inRange) {
+ JS_ReportErrorNumber(state->context,
+ js_GetErrorMessage, NULL,
+ JSMSG_BAD_CLASS_RANGE);
+ return JS_FALSE;
+ }
+ target->u.ucclass.bmsize = 65535;
+ return JS_TRUE;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ /*
+ * This is a non-ECMA extension - decimal escapes (in this
+ * case, octal!) are supposed to be an error inside class
+ * ranges, but supported here for backwards compatibility.
+ *
+ */
+ n = JS7_UNDEC(c);
+ c = *src;
+ if ('0' <= c && c <= '7') {
+ src++;
+ n = 8 * n + JS7_UNDEC(c);
+ c = *src;
+ if ('0' <= c && c <= '7') {
+ src++;
+ i = 8 * n + JS7_UNDEC(c);
+ if (i <= 0377)
+ n = i;
+ else
+ src--;
+ }
+ }
+ localMax = n;
+ break;
+
+ default:
+ localMax = c;
+ break;
+ }
+ break;
+ default:
+ localMax = *src++;
+ break;
+ }
+ if (state->flags & JSREG_FOLD) {
+ c = JS_MAX(upcase((jschar) localMax), downcase((jschar) localMax));
+ if (c > localMax)
+ localMax = c;
+ }
+ if (inRange) {
+ if (rangeStart > localMax) {
+ JS_ReportErrorNumber(state->context,
+ js_GetErrorMessage, NULL,
+ JSMSG_BAD_CLASS_RANGE);
+ return JS_FALSE;
+ }
+ inRange = JS_FALSE;
+ } else {
+ if (src < end - 1) {
+ if (*src == '-') {
+ ++src;
+ inRange = JS_TRUE;
+ rangeStart = (jschar)localMax;
+ continue;
+ }
+ }
+ }
+ if (localMax > max)
+ max = localMax;
+ }
+ target->u.ucclass.bmsize = max;
+ return JS_TRUE;
+}
+
+/*
+ * item: assertion An item is either an assertion or
+ * quantatom a quantified atom.
+ *
+ * assertion: '^' Assertions match beginning of string
+ * (or line if the class static property
+ * RegExp.multiline is true).
+ * '$' End of string (or line if the class
+ * static property RegExp.multiline is
+ * true).
+ * '\b' Word boundary (between \w and \W).
+ * '\B' Word non-boundary.
+ *
+ * quantatom: atom An unquantified atom.
+ * quantatom '{' n ',' m '}'
+ * Atom must occur between n and m times.
+ * quantatom '{' n ',' '}' Atom must occur at least n times.
+ * quantatom '{' n '}' Atom must occur exactly n times.
+ * quantatom '*' Zero or more times (same as {0,}).
+ * quantatom '+' One or more times (same as {1,}).
+ * quantatom '?' Zero or one time (same as {0,1}).
+ *
+ * any of which can be optionally followed by '?' for ungreedy
+ *
+ * atom: '(' regexp ')' A parenthesized regexp (what matched
+ * can be addressed using a backreference,
+ * see '\' n below).
+ * '.' Matches any char except '\n'.
+ * '[' classlist ']' A character class.
+ * '[' '^' classlist ']' A negated character class.
+ * '\f' Form Feed.
+ * '\n' Newline (Line Feed).
+ * '\r' Carriage Return.
+ * '\t' Horizontal Tab.
+ * '\v' Vertical Tab.
+ * '\d' A digit (same as [0-9]).
+ * '\D' A non-digit.
+ * '\w' A word character, [0-9a-z_A-Z].
+ * '\W' A non-word character.
+ * '\s' A whitespace character, [ \b\f\n\r\t\v].
+ * '\S' A non-whitespace character.
+ * '\' n A backreference to the nth (n decimal
+ * and positive) parenthesized expression.
+ * '\' octal An octal escape sequence (octal must be
+ * two or three digits long, unless it is
+ * 0 for the null character).
+ * '\x' hex A hex escape (hex must be two digits).
+ * '\u' unicode A unicode escape (must be four digits).
+ * '\c' ctrl A control character, ctrl is a letter.
+ * '\' literalatomchar Any character except one of the above
+ * that follow '\' in an atom.
+ * otheratomchar Any character not first among the other
+ * atom right-hand sides.
+ */
+static JSBool
+ParseTerm(CompilerState *state)
+{
+ jschar c = *state->cp++;
+ uintN nDigits;
+ uintN num, tmp, n, i;
+ const jschar *termStart;
+
+ switch (c) {
+ /* assertions and atoms */
+ case '^':
+ state->result = NewRENode(state, REOP_BOL);
+ if (!state->result)
+ return JS_FALSE;
+ state->progLength++;
+ return JS_TRUE;
+ case '$':
+ state->result = NewRENode(state, REOP_EOL);
+ if (!state->result)
+ return JS_FALSE;
+ state->progLength++;
+ return JS_TRUE;
+ case '\\':
+ if (state->cp >= state->cpend) {
+ /* a trailing '\' is an error */
+ js_ReportCompileErrorNumber(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_TRAILING_SLASH);
+ return JS_FALSE;
+ }
+ c = *state->cp++;
+ switch (c) {
+ /* assertion escapes */
+ case 'b' :
+ state->result = NewRENode(state, REOP_WBDRY);
+ if (!state->result)
+ return JS_FALSE;
+ state->progLength++;
+ return JS_TRUE;
+ case 'B':
+ state->result = NewRENode(state, REOP_WNONBDRY);
+ if (!state->result)
+ return JS_FALSE;
+ state->progLength++;
+ return JS_TRUE;
+ /* Decimal escape */
+ case '0':
+ /* Give a strict warning. See also the note below. */
+ if (!js_ReportCompileErrorNumber(state->context,
+ state->tokenStream,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_INVALID_BACKREF)) {
+ return JS_FALSE;
+ }
+ doOctal:
+ num = 0;
+ while (state->cp < state->cpend) {
+ c = *state->cp;
+ if (c < '0' || '7' < c)
+ break;
+ state->cp++;
+ tmp = 8 * num + (uintN)JS7_UNDEC(c);
+ if (tmp > 0377)
+ break;
+ num = tmp;
+ }
+ c = (jschar)num;
+ doFlat:
+ state->result = NewRENode(state, REOP_FLAT);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.flat.chr = c;
+ state->result->u.flat.length = 1;
+ state->progLength += 3;
+ break;
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ termStart = state->cp - 1;
+ num = GetDecimalValue(c, state->parenCount, FindParenCount, state);
+ if (state->flags & JSREG_FIND_PAREN_ERROR)
+ return JS_FALSE;
+ if (num == OVERFLOW_VALUE) {
+ /* Give a strict mode warning. */
+ if (!js_ReportCompileErrorNumber(state->context,
+ state->tokenStream,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ (c >= '8')
+ ? JSMSG_INVALID_BACKREF
+ : JSMSG_BAD_BACKREF)) {
+ return JS_FALSE;
+ }
+
+ /*
+ * Note: ECMA 262, 15.10.2.9 says that we should throw a syntax
+ * error here. However, for compatibility with IE, we treat the
+ * whole backref as flat if the first character in it is not a
+ * valid octal character, and as an octal escape otherwise.
+ */
+ state->cp = termStart;
+ if (c >= '8') {
+ /* Treat this as flat. termStart - 1 is the \. */
+ c = '\\';
+ goto asFlat;
+ }
+
+ /* Treat this as an octal escape. */
+ goto doOctal;
+ }
+ JS_ASSERT(1 <= num && num <= 0x10000);
+ state->result = NewRENode(state, REOP_BACKREF);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.parenIndex = num - 1;
+ state->progLength
+ += 1 + GetCompactIndexWidth(state->result->u.parenIndex);
+ break;
+ /* Control escape */
+ case 'f':
+ c = 0xC;
+ goto doFlat;
+ case 'n':
+ c = 0xA;
+ goto doFlat;
+ case 'r':
+ c = 0xD;
+ goto doFlat;
+ case 't':
+ c = 0x9;
+ goto doFlat;
+ case 'v':
+ c = 0xB;
+ goto doFlat;
+ /* Control letter */
+ case 'c':
+ if (state->cp < state->cpend && RE_IS_LETTER(*state->cp)) {
+ c = (jschar) (*state->cp++ & 0x1F);
+ } else {
+ /* back off to accepting the original '\' as a literal */
+ --state->cp;
+ c = '\\';
+ }
+ goto doFlat;
+ /* HexEscapeSequence */
+ case 'x':
+ nDigits = 2;
+ goto lexHex;
+ /* UnicodeEscapeSequence */
+ case 'u':
+ nDigits = 4;
+lexHex:
+ n = 0;
+ for (i = 0; i < nDigits && state->cp < state->cpend; i++) {
+ uintN digit;
+ c = *state->cp++;
+ if (!isASCIIHexDigit(c, &digit)) {
+ /*
+ * Back off to accepting the original 'u' or 'x' as a
+ * literal.
+ */
+ state->cp -= i + 2;
+ n = *state->cp++;
+ break;
+ }
+ n = (n << 4) | digit;
+ }
+ c = (jschar) n;
+ goto doFlat;
+ /* Character class escapes */
+ case 'd':
+ state->result = NewRENode(state, REOP_DIGIT);
+doSimple:
+ if (!state->result)
+ return JS_FALSE;
+ state->progLength++;
+ break;
+ case 'D':
+ state->result = NewRENode(state, REOP_NONDIGIT);
+ goto doSimple;
+ case 's':
+ state->result = NewRENode(state, REOP_SPACE);
+ goto doSimple;
+ case 'S':
+ state->result = NewRENode(state, REOP_NONSPACE);
+ goto doSimple;
+ case 'w':
+ state->result = NewRENode(state, REOP_ALNUM);
+ goto doSimple;
+ case 'W':
+ state->result = NewRENode(state, REOP_NONALNUM);
+ goto doSimple;
+ /* IdentityEscape */
+ default:
+ state->result = NewRENode(state, REOP_FLAT);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.flat.chr = c;
+ state->result->u.flat.length = 1;
+ state->result->kid = (void *) (state->cp - 1);
+ state->progLength += 3;
+ break;
+ }
+ break;
+ case '[':
+ state->result = NewRENode(state, REOP_CLASS);
+ if (!state->result)
+ return JS_FALSE;
+ termStart = state->cp;
+ state->result->u.ucclass.startIndex = termStart - state->cpbegin;
+ for (;;) {
+ if (state->cp == state->cpend) {
+ js_ReportCompileErrorNumberUC(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_UNTERM_CLASS, termStart);
+
+ return JS_FALSE;
+ }
+ if (*state->cp == '\\') {
+ state->cp++;
+ if (state->cp != state->cpend)
+ state->cp++;
+ continue;
+ }
+ if (*state->cp == ']') {
+ state->result->u.ucclass.kidlen = state->cp - termStart;
+ break;
+ }
+ state->cp++;
+ }
+ for (i = 0; i < CLASS_CACHE_SIZE; i++) {
+ if (!state->classCache[i].start) {
+ state->classCache[i].start = termStart;
+ state->classCache[i].length = state->result->u.ucclass.kidlen;
+ state->classCache[i].index = state->classCount;
+ break;
+ }
+ if (state->classCache[i].length ==
+ state->result->u.ucclass.kidlen) {
+ for (n = 0; ; n++) {
+ if (n == state->classCache[i].length) {
+ state->result->u.ucclass.index
+ = state->classCache[i].index;
+ goto claim;
+ }
+ if (state->classCache[i].start[n] != termStart[n])
+ break;
+ }
+ }
+ }
+ state->result->u.ucclass.index = state->classCount++;
+
+ claim:
+ /*
+ * Call CalculateBitmapSize now as we want any errors it finds
+ * to be reported during the parse phase, not at execution.
+ */
+ if (!CalculateBitmapSize(state, state->result, termStart, state->cp++))
+ return JS_FALSE;
+ /*
+ * Update classBitmapsMem with number of bytes to hold bmsize bits,
+ * which is (bitsCount + 7) / 8 or (highest_bit + 1 + 7) / 8
+ * or highest_bit / 8 + 1 where highest_bit is u.ucclass.bmsize.
+ */
+ n = (state->result->u.ucclass.bmsize >> 3) + 1;
+ if (n > CLASS_BITMAPS_MEM_LIMIT - state->classBitmapsMem) {
+ js_ReportCompileErrorNumber(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_REGEXP_TOO_COMPLEX);
+ return JS_FALSE;
+ }
+ state->classBitmapsMem += n;
+ /* CLASS, <index> */
+ state->progLength
+ += 1 + GetCompactIndexWidth(state->result->u.ucclass.index);
+ break;
+
+ case '.':
+ state->result = NewRENode(state, REOP_DOT);
+ goto doSimple;
+
+ case '{':
+ {
+ const jschar *errp = state->cp--;
+ intN err;
+
+ err = ParseMinMaxQuantifier(state, JS_TRUE);
+ state->cp = errp;
+
+ if (err < 0)
+ goto asFlat;
+
+ /* FALL THROUGH */
+ }
+ case '*':
+ case '+':
+ case '?':
+ js_ReportCompileErrorNumberUC(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_QUANTIFIER, state->cp - 1);
+ return JS_FALSE;
+ default:
+asFlat:
+ state->result = NewRENode(state, REOP_FLAT);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.flat.chr = c;
+ state->result->u.flat.length = 1;
+ state->result->kid = (void *) (state->cp - 1);
+ state->progLength += 3;
+ break;
+ }
+ return ParseQuantifier(state);
+}
+
+static JSBool
+ParseQuantifier(CompilerState *state)
+{
+ RENode *term;
+ term = state->result;
+ if (state->cp < state->cpend) {
+ switch (*state->cp) {
+ case '+':
+ state->result = NewRENode(state, REOP_QUANT);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.range.min = 1;
+ state->result->u.range.max = (uintN)-1;
+ /* <PLUS>, <next> ... <ENDCHILD> */
+ state->progLength += 4;
+ goto quantifier;
+ case '*':
+ state->result = NewRENode(state, REOP_QUANT);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.range.min = 0;
+ state->result->u.range.max = (uintN)-1;
+ /* <STAR>, <next> ... <ENDCHILD> */
+ state->progLength += 4;
+ goto quantifier;
+ case '?':
+ state->result = NewRENode(state, REOP_QUANT);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.range.min = 0;
+ state->result->u.range.max = 1;
+ /* <OPT>, <next> ... <ENDCHILD> */
+ state->progLength += 4;
+ goto quantifier;
+ case '{': /* balance '}' */
+ {
+ intN err;
+ const jschar *errp = state->cp;
+
+ err = ParseMinMaxQuantifier(state, JS_FALSE);
+ if (err == 0)
+ goto quantifier;
+ if (err == -1)
+ return JS_TRUE;
+
+ js_ReportCompileErrorNumberUC(state->context,
+ state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ err, errp);
+ return JS_FALSE;
+ }
+ default:;
+ }
+ }
+ return JS_TRUE;
+
+quantifier:
+ if (state->treeDepth == TREE_DEPTH_MAX) {
+ js_ReportCompileErrorNumber(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_REGEXP_TOO_COMPLEX);
+ return JS_FALSE;
+ }
+
+ ++state->treeDepth;
+ ++state->cp;
+ state->result->kid = term;
+ if (state->cp < state->cpend && *state->cp == '?') {
+ ++state->cp;
+ state->result->u.range.greedy = JS_FALSE;
+ } else {
+ state->result->u.range.greedy = JS_TRUE;
+ }
+ return JS_TRUE;
+}
+
+static intN
+ParseMinMaxQuantifier(CompilerState *state, JSBool ignoreValues)
+{
+ uintN min, max;
+ jschar c;
+ const jschar *errp = state->cp++;
+
+ c = *state->cp;
+ if (JS7_ISDEC(c)) {
+ ++state->cp;
+ min = GetDecimalValue(c, 0xFFFF, NULL, state);
+ c = *state->cp;
+
+ if (!ignoreValues && min == OVERFLOW_VALUE)
+ return JSMSG_MIN_TOO_BIG;
+
+ if (c == ',') {
+ c = *++state->cp;
+ if (JS7_ISDEC(c)) {
+ ++state->cp;
+ max = GetDecimalValue(c, 0xFFFF, NULL, state);
+ c = *state->cp;
+ if (!ignoreValues && max == OVERFLOW_VALUE)
+ return JSMSG_MAX_TOO_BIG;
+ if (!ignoreValues && min > max)
+ return JSMSG_OUT_OF_ORDER;
+ } else {
+ max = (uintN)-1;
+ }
+ } else {
+ max = min;
+ }
+ if (c == '}') {
+ state->result = NewRENode(state, REOP_QUANT);
+ if (!state->result)
+ return JS_FALSE;
+ state->result->u.range.min = min;
+ state->result->u.range.max = max;
+ /*
+ * QUANT, <min>, <max>, <next> ... <ENDCHILD>
+ * where <max> is written as compact(max+1) to make
+ * (uintN)-1 sentinel to occupy 1 byte, not width_of(max)+1.
+ */
+ state->progLength += (1 + GetCompactIndexWidth(min)
+ + GetCompactIndexWidth(max + 1)
+ +3);
+ return 0;
+ }
+ }
+
+ state->cp = errp;
+ return -1;
+}
+
+static JSBool
+SetForwardJumpOffset(jsbytecode *jump, jsbytecode *target)
+{
+ ptrdiff_t offset = target - jump;
+
+ /* Check that target really points forward. */
+ JS_ASSERT(offset >= 2);
+ if ((size_t)offset > OFFSET_MAX)
+ return JS_FALSE;
+
+ jump[0] = JUMP_OFFSET_HI(offset);
+ jump[1] = JUMP_OFFSET_LO(offset);
+ return JS_TRUE;
+}
+
+/*
+ * Generate bytecode for the tree rooted at t using an explicit stack instead
+ * of recursion.
+ */
+static jsbytecode *
+EmitREBytecode(CompilerState *state, JSRegExp *re, size_t treeDepth,
+ jsbytecode *pc, RENode *t)
+{
+ EmitStateStackEntry *emitStateSP, *emitStateStack;
+ RECharSet *charSet;
+ REOp op;
+
+ if (treeDepth == 0) {
+ emitStateStack = NULL;
+ } else {
+ emitStateStack =
+ (EmitStateStackEntry *)JS_malloc(state->context,
+ sizeof(EmitStateStackEntry) *
+ treeDepth);
+ if (!emitStateStack)
+ return NULL;
+ }
+ emitStateSP = emitStateStack;
+ op = t->op;
+
+ for (;;) {
+ *pc++ = op;
+ switch (op) {
+ case REOP_EMPTY:
+ --pc;
+ break;
+
+ case REOP_ALTPREREQ2:
+ case REOP_ALTPREREQ:
+ JS_ASSERT(emitStateSP);
+ emitStateSP->altHead = pc - 1;
+ emitStateSP->endTermFixup = pc;
+ pc += OFFSET_LEN;
+ SET_ARG(pc, t->u.altprereq.ch1);
+ pc += ARG_LEN;
+ SET_ARG(pc, t->u.altprereq.ch2);
+ pc += ARG_LEN;
+
+ emitStateSP->nextAltFixup = pc; /* offset to next alternate */
+ pc += OFFSET_LEN;
+
+ emitStateSP->continueNode = t;
+ emitStateSP->continueOp = REOP_JUMP;
+ emitStateSP->jumpToJumpFlag = JS_FALSE;
+ ++emitStateSP;
+ JS_ASSERT((size_t)(emitStateSP - emitStateStack) <= treeDepth);
+ t = (RENode *) t->kid;
+ op = t->op;
+ continue;
+
+ case REOP_JUMP:
+ emitStateSP->nextTermFixup = pc; /* offset to following term */
+ pc += OFFSET_LEN;
+ if (!SetForwardJumpOffset(emitStateSP->nextAltFixup, pc))
+ goto jump_too_big;
+ emitStateSP->continueOp = REOP_ENDALT;
+ ++emitStateSP;
+ JS_ASSERT((size_t)(emitStateSP - emitStateStack) <= treeDepth);
+ t = t->u.kid2;
+ op = t->op;
+ continue;
+
+ case REOP_ENDALT:
+ /*
+ * If we already patched emitStateSP->nextTermFixup to jump to
+ * a nearer jump, to avoid 16-bit immediate offset overflow, we
+ * are done here.
+ */
+ if (emitStateSP->jumpToJumpFlag)
+ break;
+
+ /*
+ * Fix up the REOP_JUMP offset to go to the op after REOP_ENDALT.
+ * REOP_ENDALT is executed only on successful match of the last
+ * alternate in a group.
+ */
+ if (!SetForwardJumpOffset(emitStateSP->nextTermFixup, pc))
+ goto jump_too_big;
+ if (t->op != REOP_ALT) {
+ if (!SetForwardJumpOffset(emitStateSP->endTermFixup, pc))
+ goto jump_too_big;
+ }
+
+ /*
+ * If the program is bigger than the REOP_JUMP offset range, then
+ * we must check for alternates before this one that are part of
+ * the same group, and fix up their jump offsets to target jumps
+ * close enough to fit in a 16-bit unsigned offset immediate.
+ */
+ if ((size_t)(pc - re->program) > OFFSET_MAX &&
+ emitStateSP > emitStateStack) {
+ EmitStateStackEntry *esp, *esp2;
+ jsbytecode *alt, *jump;
+ ptrdiff_t span, header;
+
+ esp2 = emitStateSP;
+ alt = esp2->altHead;
+ for (esp = esp2 - 1; esp >= emitStateStack; --esp) {
+ if (esp->continueOp == REOP_ENDALT &&
+ !esp->jumpToJumpFlag &&
+ esp->nextTermFixup + OFFSET_LEN == alt &&
+ (size_t)(pc - ((esp->continueNode->op != REOP_ALT)
+ ? esp->endTermFixup
+ : esp->nextTermFixup)) > OFFSET_MAX) {
+ alt = esp->altHead;
+ jump = esp->nextTermFixup;
+
+ /*
+ * The span must be 1 less than the distance from
+ * jump offset to jump offset, so we actually jump
+ * to a REOP_JUMP bytecode, not to its offset!
+ */
+ for (;;) {
+ JS_ASSERT(jump < esp2->nextTermFixup);
+ span = esp2->nextTermFixup - jump - 1;
+ if ((size_t)span <= OFFSET_MAX)
+ break;
+ do {
+ if (--esp2 == esp)
+ goto jump_too_big;
+ } while (esp2->continueOp != REOP_ENDALT);
+ }
+
+ jump[0] = JUMP_OFFSET_HI(span);
+ jump[1] = JUMP_OFFSET_LO(span);
+
+ if (esp->continueNode->op != REOP_ALT) {
+ /*
+ * We must patch the offset at esp->endTermFixup
+ * as well, for the REOP_ALTPREREQ{,2} opcodes.
+ * If we're unlucky and endTermFixup is more than
+ * OFFSET_MAX bytes from its target, we cheat by
+ * jumping 6 bytes to the jump whose offset is at
+ * esp->nextTermFixup, which has the same target.
+ */
+ jump = esp->endTermFixup;
+ header = esp->nextTermFixup - jump;
+ span += header;
+ if ((size_t)span > OFFSET_MAX)
+ span = header;
+
+ jump[0] = JUMP_OFFSET_HI(span);
+ jump[1] = JUMP_OFFSET_LO(span);
+ }
+
+ esp->jumpToJumpFlag = JS_TRUE;
+ }
+ }
+ }
+ break;
+
+ case REOP_ALT:
+ JS_ASSERT(emitStateSP);
+ emitStateSP->altHead = pc - 1;
+ emitStateSP->nextAltFixup = pc; /* offset to next alternate */
+ pc += OFFSET_LEN;
+ emitStateSP->continueNode = t;
+ emitStateSP->continueOp = REOP_JUMP;
+ emitStateSP->jumpToJumpFlag = JS_FALSE;
+ ++emitStateSP;
+ JS_ASSERT((size_t)(emitStateSP - emitStateStack) <= treeDepth);
+ t = t->kid;
+ op = t->op;
+ continue;
+
+ case REOP_FLAT:
+ /*
+ * Coalesce FLATs if possible and if it would not increase bytecode
+ * beyond preallocated limit. The latter happens only when bytecode
+ * size for coalesced string with offset p and length 2 exceeds 6
+ * bytes preallocated for 2 single char nodes, i.e. when
+ * 1 + GetCompactIndexWidth(p) + GetCompactIndexWidth(2) > 6 or
+ * GetCompactIndexWidth(p) > 4.
+ * Since when GetCompactIndexWidth(p) <= 4 coalescing of 3 or more
+ * nodes strictly decreases bytecode size, the check has to be
+ * done only for the first coalescing.
+ */
+ if (t->kid &&
+ GetCompactIndexWidth((jschar *)t->kid - state->cpbegin) <= 4)
+ {
+ while (t->next &&
+ t->next->op == REOP_FLAT &&
+ (jschar*)t->kid + t->u.flat.length ==
+ (jschar*)t->next->kid) {
+ t->u.flat.length += t->next->u.flat.length;
+ t->next = t->next->next;
+ }
+ }
+ if (t->kid && t->u.flat.length > 1) {
+ pc[-1] = (state->flags & JSREG_FOLD) ? REOP_FLATi : REOP_FLAT;
+ pc = WriteCompactIndex(pc, (jschar *)t->kid - state->cpbegin);
+ pc = WriteCompactIndex(pc, t->u.flat.length);
+ } else if (t->u.flat.chr < 256) {
+ pc[-1] = (state->flags & JSREG_FOLD) ? REOP_FLAT1i : REOP_FLAT1;
+ *pc++ = (jsbytecode) t->u.flat.chr;
+ } else {
+ pc[-1] = (state->flags & JSREG_FOLD)
+ ? REOP_UCFLAT1i
+ : REOP_UCFLAT1;
+ SET_ARG(pc, t->u.flat.chr);
+ pc += ARG_LEN;
+ }
+ break;
+
+ case REOP_LPAREN:
+ JS_ASSERT(emitStateSP);
+ pc = WriteCompactIndex(pc, t->u.parenIndex);
+ emitStateSP->continueNode = t;
+ emitStateSP->continueOp = REOP_RPAREN;
+ ++emitStateSP;
+ JS_ASSERT((size_t)(emitStateSP - emitStateStack) <= treeDepth);
+ t = (RENode *) t->kid;
+ op = t->op;
+ continue;
+
+ case REOP_RPAREN:
+ pc = WriteCompactIndex(pc, t->u.parenIndex);
+ break;
+
+ case REOP_BACKREF:
+ pc = WriteCompactIndex(pc, t->u.parenIndex);
+ break;
+
+ case REOP_ASSERT:
+ JS_ASSERT(emitStateSP);
+ emitStateSP->nextTermFixup = pc;
+ pc += OFFSET_LEN;
+ emitStateSP->continueNode = t;
+ emitStateSP->continueOp = REOP_ASSERTTEST;
+ ++emitStateSP;
+ JS_ASSERT((size_t)(emitStateSP - emitStateStack) <= treeDepth);
+ t = (RENode *) t->kid;
+ op = t->op;
+ continue;
+
+ case REOP_ASSERTTEST:
+ case REOP_ASSERTNOTTEST:
+ if (!SetForwardJumpOffset(emitStateSP->nextTermFixup, pc))
+ goto jump_too_big;
+ break;
+
+ case REOP_ASSERT_NOT:
+ JS_ASSERT(emitStateSP);
+ emitStateSP->nextTermFixup = pc;
+ pc += OFFSET_LEN;
+ emitStateSP->continueNode = t;
+ emitStateSP->continueOp = REOP_ASSERTNOTTEST;
+ ++emitStateSP;
+ JS_ASSERT((size_t)(emitStateSP - emitStateStack) <= treeDepth);
+ t = (RENode *) t->kid;
+ op = t->op;
+ continue;
+
+ case REOP_QUANT:
+ JS_ASSERT(emitStateSP);
+ if (t->u.range.min == 0 && t->u.range.max == (uintN)-1) {
+ pc[-1] = (t->u.range.greedy) ? REOP_STAR : REOP_MINIMALSTAR;
+ } else if (t->u.range.min == 0 && t->u.range.max == 1) {
+ pc[-1] = (t->u.range.greedy) ? REOP_OPT : REOP_MINIMALOPT;
+ } else if (t->u.range.min == 1 && t->u.range.max == (uintN) -1) {
+ pc[-1] = (t->u.range.greedy) ? REOP_PLUS : REOP_MINIMALPLUS;
+ } else {
+ if (!t->u.range.greedy)
+ pc[-1] = REOP_MINIMALQUANT;
+ pc = WriteCompactIndex(pc, t->u.range.min);
+ /*
+ * Write max + 1 to avoid using size_t(max) + 1 bytes
+ * for (uintN)-1 sentinel.
+ */
+ pc = WriteCompactIndex(pc, t->u.range.max + 1);
+ }
+ emitStateSP->nextTermFixup = pc;
+ pc += OFFSET_LEN;
+ emitStateSP->continueNode = t;
+ emitStateSP->continueOp = REOP_ENDCHILD;
+ ++emitStateSP;
+ JS_ASSERT((size_t)(emitStateSP - emitStateStack) <= treeDepth);
+ t = (RENode *) t->kid;
+ op = t->op;
+ continue;
+
+ case REOP_ENDCHILD:
+ if (!SetForwardJumpOffset(emitStateSP->nextTermFixup, pc))
+ goto jump_too_big;
+ break;
+
+ case REOP_CLASS:
+ if (!t->u.ucclass.sense)
+ pc[-1] = REOP_NCLASS;
+ pc = WriteCompactIndex(pc, t->u.ucclass.index);
+ charSet = &re->classList[t->u.ucclass.index];
+ charSet->converted = JS_FALSE;
+ charSet->length = t->u.ucclass.bmsize;
+ charSet->u.src.startIndex = t->u.ucclass.startIndex;
+ charSet->u.src.length = t->u.ucclass.kidlen;
+ charSet->sense = t->u.ucclass.sense;
+ break;
+
+ default:
+ break;
+ }
+
+ t = t->next;
+ if (t) {
+ op = t->op;
+ } else {
+ if (emitStateSP == emitStateStack)
+ break;
+ --emitStateSP;
+ t = emitStateSP->continueNode;
+ op = emitStateSP->continueOp;
+ }
+ }
+
+ cleanup:
+ if (emitStateStack)
+ JS_free(state->context, emitStateStack);
+ return pc;
+
+ jump_too_big:
+ js_ReportCompileErrorNumber(state->context, state->tokenStream,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_REGEXP_TOO_COMPLEX);
+ pc = NULL;
+ goto cleanup;
+}
+
+
+JSRegExp *
+js_NewRegExp(JSContext *cx, JSTokenStream *ts,
+ JSString *str, uintN flags, JSBool flat)
+{
+ JSRegExp *re;
+ void *mark;
+ CompilerState state;
+ size_t resize;
+ jsbytecode *endPC;
+ uintN i;
+ size_t len;
+
+ re = NULL;
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ len = JSSTRING_LENGTH(str);
+
+ state.context = cx;
+ state.tokenStream = ts;
+ state.cp = js_UndependString(cx, str);
+ if (!state.cp)
+ goto out;
+ state.cpbegin = state.cp;
+ state.cpend = state.cp + len;
+ state.flags = flags;
+ state.parenCount = 0;
+ state.classCount = 0;
+ state.progLength = 0;
+ state.treeDepth = 0;
+ state.classBitmapsMem = 0;
+ for (i = 0; i < CLASS_CACHE_SIZE; i++)
+ state.classCache[i].start = NULL;
+
+ if (len != 0 && flat) {
+ state.result = NewRENode(&state, REOP_FLAT);
+ state.result->u.flat.chr = *state.cpbegin;
+ state.result->u.flat.length = len;
+ state.result->kid = (void *) state.cpbegin;
+ /* Flat bytecode: REOP_FLAT compact(string_offset) compact(len). */
+ state.progLength += 1 + GetCompactIndexWidth(0)
+ + GetCompactIndexWidth(len);
+ } else {
+ if (!ParseRegExp(&state))
+ goto out;
+ }
+ resize = offsetof(JSRegExp, program) + state.progLength + 1;
+ re = (JSRegExp *) JS_malloc(cx, resize);
+ if (!re)
+ goto out;
+
+ re->nrefs = 1;
+ JS_ASSERT(state.classBitmapsMem <= CLASS_BITMAPS_MEM_LIMIT);
+ re->classCount = state.classCount;
+ if (re->classCount) {
+ re->classList = (RECharSet *)
+ JS_malloc(cx, re->classCount * sizeof(RECharSet));
+ if (!re->classList) {
+ js_DestroyRegExp(cx, re);
+ re = NULL;
+ goto out;
+ }
+ for (i = 0; i < re->classCount; i++)
+ re->classList[i].converted = JS_FALSE;
+ } else {
+ re->classList = NULL;
+ }
+ endPC = EmitREBytecode(&state, re, state.treeDepth, re->program, state.result);
+ if (!endPC) {
+ js_DestroyRegExp(cx, re);
+ re = NULL;
+ goto out;
+ }
+ *endPC++ = REOP_END;
+ /*
+ * Check whether size was overestimated and shrink using realloc.
+ * This is safe since no pointers to newly parsed regexp or its parts
+ * besides re exist here.
+ */
+ if ((size_t)(endPC - re->program) != state.progLength + 1) {
+ JSRegExp *tmp;
+ JS_ASSERT((size_t)(endPC - re->program) < state.progLength + 1);
+ resize = offsetof(JSRegExp, program) + (endPC - re->program);
+ tmp = (JSRegExp *) JS_realloc(cx, re, resize);
+ if (tmp)
+ re = tmp;
+ }
+
+ re->flags = flags;
+ re->cloneIndex = 0;
+ re->parenCount = state.parenCount;
+ re->source = str;
+
+out:
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ return re;
+}
+
+JSRegExp *
+js_NewRegExpOpt(JSContext *cx, JSTokenStream *ts,
+ JSString *str, JSString *opt, JSBool flat)
+{
+ uintN flags;
+ jschar *s;
+ size_t i, n;
+ char charBuf[2];
+
+ flags = 0;
+ if (opt) {
+ s = JSSTRING_CHARS(opt);
+ for (i = 0, n = JSSTRING_LENGTH(opt); i < n; i++) {
+ switch (s[i]) {
+ case 'g':
+ flags |= JSREG_GLOB;
+ break;
+ case 'i':
+ flags |= JSREG_FOLD;
+ break;
+ case 'm':
+ flags |= JSREG_MULTILINE;
+ break;
+ default:
+ charBuf[0] = (char)s[i];
+ charBuf[1] = '\0';
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_FLAG, charBuf);
+ return NULL;
+ }
+ }
+ }
+ return js_NewRegExp(cx, ts, str, flags, flat);
+}
+
+/*
+ * Save the current state of the match - the position in the input
+ * text as well as the position in the bytecode. The state of any
+ * parent expressions is also saved (preceding state).
+ * Contents of parenCount parentheses from parenIndex are also saved.
+ */
+static REBackTrackData *
+PushBackTrackState(REGlobalData *gData, REOp op,
+ jsbytecode *target, REMatchState *x, const jschar *cp,
+ size_t parenIndex, size_t parenCount)
+{
+ size_t i;
+ REBackTrackData *result =
+ (REBackTrackData *) ((char *)gData->backTrackSP + gData->cursz);
+
+ size_t sz = sizeof(REBackTrackData) +
+ gData->stateStackTop * sizeof(REProgState) +
+ parenCount * sizeof(RECapture);
+
+ ptrdiff_t btsize = gData->backTrackStackSize;
+ ptrdiff_t btincr = ((char *)result + sz) -
+ ((char *)gData->backTrackStack + btsize);
+
+ if (btincr > 0) {
+ ptrdiff_t offset = (char *)result - (char *)gData->backTrackStack;
+
+ btincr = JS_ROUNDUP(btincr, btsize);
+ JS_ARENA_GROW_CAST(gData->backTrackStack, REBackTrackData *,
+ &gData->pool, btsize, btincr);
+ if (!gData->backTrackStack) {
+ JS_ReportOutOfMemory(gData->cx);
+ gData->ok = JS_FALSE;
+ return NULL;
+ }
+ gData->backTrackStackSize = btsize + btincr;
+ result = (REBackTrackData *) ((char *)gData->backTrackStack + offset);
+ }
+ gData->backTrackSP = result;
+ result->sz = gData->cursz;
+ gData->cursz = sz;
+
+ result->backtrack_op = op;
+ result->backtrack_pc = target;
+ result->cp = cp;
+ result->parenCount = parenCount;
+
+ result->saveStateStackTop = gData->stateStackTop;
+ JS_ASSERT(gData->stateStackTop);
+ memcpy(result + 1, gData->stateStack,
+ sizeof(REProgState) * result->saveStateStackTop);
+
+ if (parenCount != 0) {
+ result->parenIndex = parenIndex;
+ memcpy((char *)(result + 1) +
+ sizeof(REProgState) * result->saveStateStackTop,
+ &x->parens[parenIndex],
+ sizeof(RECapture) * parenCount);
+ for (i = 0; i != parenCount; i++)
+ x->parens[parenIndex + i].index = -1;
+ }
+
+ return result;
+}
+
+
+/*
+ * Consecutive literal characters.
+ */
+#if 0
+static REMatchState *
+FlatNMatcher(REGlobalData *gData, REMatchState *x, jschar *matchChars,
+ size_t length)
+{
+ size_t i;
+ if (length > gData->cpend - x->cp)
+ return NULL;
+ for (i = 0; i != length; i++) {
+ if (matchChars[i] != x->cp[i])
+ return NULL;
+ }
+ x->cp += length;
+ return x;
+}
+#endif
+
+static REMatchState *
+FlatNIMatcher(REGlobalData *gData, REMatchState *x, jschar *matchChars,
+ size_t length)
+{
+ size_t i;
+ JS_ASSERT(gData->cpend >= x->cp);
+ if (length > (size_t)(gData->cpend - x->cp))
+ return NULL;
+ for (i = 0; i != length; i++) {
+ if (upcase(matchChars[i]) != upcase(x->cp[i]))
+ return NULL;
+ }
+ x->cp += length;
+ return x;
+}
+
+/*
+ * 1. Evaluate DecimalEscape to obtain an EscapeValue E.
+ * 2. If E is not a character then go to step 6.
+ * 3. Let ch be E's character.
+ * 4. Let A be a one-element RECharSet containing the character ch.
+ * 5. Call CharacterSetMatcher(A, false) and return its Matcher result.
+ * 6. E must be an integer. Let n be that integer.
+ * 7. If n=0 or n>NCapturingParens then throw a SyntaxError exception.
+ * 8. Return an internal Matcher closure that takes two arguments, a State x
+ * and a Continuation c, and performs the following:
+ * 1. Let cap be x's captures internal array.
+ * 2. Let s be cap[n].
+ * 3. If s is undefined, then call c(x) and return its result.
+ * 4. Let e be x's endIndex.
+ * 5. Let len be s's length.
+ * 6. Let f be e+len.
+ * 7. If f>InputLength, return failure.
+ * 8. If there exists an integer i between 0 (inclusive) and len (exclusive)
+ * such that Canonicalize(s[i]) is not the same character as
+ * Canonicalize(Input [e+i]), then return failure.
+ * 9. Let y be the State (f, cap).
+ * 10. Call c(y) and return its result.
+ */
+static REMatchState *
+BackrefMatcher(REGlobalData *gData, REMatchState *x, size_t parenIndex)
+{
+ size_t len, i;
+ const jschar *parenContent;
+ RECapture *cap = &x->parens[parenIndex];
+
+ if (cap->index == -1)
+ return x;
+
+ len = cap->length;
+ if (x->cp + len > gData->cpend)
+ return NULL;
+
+ parenContent = &gData->cpbegin[cap->index];
+ if (gData->regexp->flags & JSREG_FOLD) {
+ for (i = 0; i < len; i++) {
+ if (upcase(parenContent[i]) != upcase(x->cp[i]))
+ return NULL;
+ }
+ } else {
+ for (i = 0; i < len; i++) {
+ if (parenContent[i] != x->cp[i])
+ return NULL;
+ }
+ }
+ x->cp += len;
+ return x;
+}
+
+
+/* Add a single character to the RECharSet */
+static void
+AddCharacterToCharSet(RECharSet *cs, jschar c)
+{
+ uintN byteIndex = (uintN)(c >> 3);
+ JS_ASSERT(c <= cs->length);
+ cs->u.bits[byteIndex] |= 1 << (c & 0x7);
+}
+
+
+/* Add a character range, c1 to c2 (inclusive) to the RECharSet */
+static void
+AddCharacterRangeToCharSet(RECharSet *cs, jschar c1, jschar c2)
+{
+ uintN i;
+
+ uintN byteIndex1 = (uintN)(c1 >> 3);
+ uintN byteIndex2 = (uintN)(c2 >> 3);
+
+ JS_ASSERT((c2 <= cs->length) && (c1 <= c2));
+
+ c1 &= 0x7;
+ c2 &= 0x7;
+
+ if (byteIndex1 == byteIndex2) {
+ cs->u.bits[byteIndex1] |= ((uint8)0xFF >> (7 - (c2 - c1))) << c1;
+ } else {
+ cs->u.bits[byteIndex1] |= 0xFF << c1;
+ for (i = byteIndex1 + 1; i < byteIndex2; i++)
+ cs->u.bits[i] = 0xFF;
+ cs->u.bits[byteIndex2] |= (uint8)0xFF >> (7 - c2);
+ }
+}
+
+/* Compile the source of the class into a RECharSet */
+static JSBool
+ProcessCharSet(REGlobalData *gData, RECharSet *charSet)
+{
+ const jschar *src, *end;
+ JSBool inRange = JS_FALSE;
+ jschar rangeStart = 0;
+ uintN byteLength, n;
+ jschar c, thisCh;
+ intN nDigits, i;
+
+ JS_ASSERT(!charSet->converted);
+ /*
+ * Assert that startIndex and length points to chars inside [] inside
+ * source string.
+ */
+ JS_ASSERT(1 <= charSet->u.src.startIndex);
+ JS_ASSERT(charSet->u.src.startIndex
+ < JSSTRING_LENGTH(gData->regexp->source));
+ JS_ASSERT(charSet->u.src.length <= JSSTRING_LENGTH(gData->regexp->source)
+ - 1 - charSet->u.src.startIndex);
+
+ charSet->converted = JS_TRUE;
+ src = JSSTRING_CHARS(gData->regexp->source) + charSet->u.src.startIndex;
+ end = src + charSet->u.src.length;
+ JS_ASSERT(src[-1] == '[');
+ JS_ASSERT(end[0] == ']');
+
+ byteLength = (charSet->length >> 3) + 1;
+ charSet->u.bits = (uint8 *)JS_malloc(gData->cx, byteLength);
+ if (!charSet->u.bits) {
+ JS_ReportOutOfMemory(gData->cx);
+ gData->ok = JS_FALSE;
+ return JS_FALSE;
+ }
+ memset(charSet->u.bits, 0, byteLength);
+
+ if (src == end)
+ return JS_TRUE;
+
+ if (*src == '^') {
+ JS_ASSERT(charSet->sense == JS_FALSE);
+ ++src;
+ } else {
+ JS_ASSERT(charSet->sense == JS_TRUE);
+ }
+
+ while (src != end) {
+ switch (*src) {
+ case '\\':
+ ++src;
+ c = *src++;
+ switch (c) {
+ case 'b':
+ thisCh = 0x8;
+ break;
+ case 'f':
+ thisCh = 0xC;
+ break;
+ case 'n':
+ thisCh = 0xA;
+ break;
+ case 'r':
+ thisCh = 0xD;
+ break;
+ case 't':
+ thisCh = 0x9;
+ break;
+ case 'v':
+ thisCh = 0xB;
+ break;
+ case 'c':
+ if (src < end && JS_ISWORD(*src)) {
+ thisCh = (jschar)(*src++ & 0x1F);
+ } else {
+ --src;
+ thisCh = '\\';
+ }
+ break;
+ case 'x':
+ nDigits = 2;
+ goto lexHex;
+ case 'u':
+ nDigits = 4;
+ lexHex:
+ n = 0;
+ for (i = 0; (i < nDigits) && (src < end); i++) {
+ uintN digit;
+ c = *src++;
+ if (!isASCIIHexDigit(c, &digit)) {
+ /*
+ * Back off to accepting the original '\'
+ * as a literal
+ */
+ src -= i + 1;
+ n = '\\';
+ break;
+ }
+ n = (n << 4) | digit;
+ }
+ thisCh = (jschar)n;
+ break;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ /*
+ * This is a non-ECMA extension - decimal escapes (in this
+ * case, octal!) are supposed to be an error inside class
+ * ranges, but supported here for backwards compatibility.
+ */
+ n = JS7_UNDEC(c);
+ c = *src;
+ if ('0' <= c && c <= '7') {
+ src++;
+ n = 8 * n + JS7_UNDEC(c);
+ c = *src;
+ if ('0' <= c && c <= '7') {
+ src++;
+ i = 8 * n + JS7_UNDEC(c);
+ if (i <= 0377)
+ n = i;
+ else
+ src--;
+ }
+ }
+ thisCh = (jschar)n;
+ break;
+
+ case 'd':
+ AddCharacterRangeToCharSet(charSet, '0', '9');
+ continue; /* don't need range processing */
+ case 'D':
+ AddCharacterRangeToCharSet(charSet, 0, '0' - 1);
+ AddCharacterRangeToCharSet(charSet,
+ (jschar)('9' + 1),
+ (jschar)charSet->length);
+ continue;
+ case 's':
+ for (i = (intN)charSet->length; i >= 0; i--)
+ if (JS_ISSPACE(i))
+ AddCharacterToCharSet(charSet, (jschar)i);
+ continue;
+ case 'S':
+ for (i = (intN)charSet->length; i >= 0; i--)
+ if (!JS_ISSPACE(i))
+ AddCharacterToCharSet(charSet, (jschar)i);
+ continue;
+ case 'w':
+ for (i = (intN)charSet->length; i >= 0; i--)
+ if (JS_ISWORD(i))
+ AddCharacterToCharSet(charSet, (jschar)i);
+ continue;
+ case 'W':
+ for (i = (intN)charSet->length; i >= 0; i--)
+ if (!JS_ISWORD(i))
+ AddCharacterToCharSet(charSet, (jschar)i);
+ continue;
+ default:
+ thisCh = c;
+ break;
+
+ }
+ break;
+
+ default:
+ thisCh = *src++;
+ break;
+
+ }
+ if (inRange) {
+ if (gData->regexp->flags & JSREG_FOLD) {
+ AddCharacterRangeToCharSet(charSet, upcase(rangeStart),
+ upcase(thisCh));
+ AddCharacterRangeToCharSet(charSet, downcase(rangeStart),
+ downcase(thisCh));
+ } else {
+ AddCharacterRangeToCharSet(charSet, rangeStart, thisCh);
+ }
+ inRange = JS_FALSE;
+ } else {
+ if (gData->regexp->flags & JSREG_FOLD) {
+ AddCharacterToCharSet(charSet, upcase(thisCh));
+ AddCharacterToCharSet(charSet, downcase(thisCh));
+ } else {
+ AddCharacterToCharSet(charSet, thisCh);
+ }
+ if (src < end - 1) {
+ if (*src == '-') {
+ ++src;
+ inRange = JS_TRUE;
+ rangeStart = thisCh;
+ }
+ }
+ }
+ }
+ return JS_TRUE;
+}
+
+void
+js_DestroyRegExp(JSContext *cx, JSRegExp *re)
+{
+ if (JS_ATOMIC_DECREMENT(&re->nrefs) == 0) {
+ if (re->classList) {
+ uintN i;
+ for (i = 0; i < re->classCount; i++) {
+ if (re->classList[i].converted)
+ JS_free(cx, re->classList[i].u.bits);
+ re->classList[i].u.bits = NULL;
+ }
+ JS_free(cx, re->classList);
+ }
+ JS_free(cx, re);
+ }
+}
+
+static JSBool
+ReallocStateStack(REGlobalData *gData)
+{
+ size_t limit = gData->stateStackLimit;
+ size_t sz = sizeof(REProgState) * limit;
+
+ JS_ARENA_GROW_CAST(gData->stateStack, REProgState *, &gData->pool, sz, sz);
+ if (!gData->stateStack) {
+ gData->ok = JS_FALSE;
+ return JS_FALSE;
+ }
+ gData->stateStackLimit = limit + limit;
+ return JS_TRUE;
+}
+
+#define PUSH_STATE_STACK(data) \
+ JS_BEGIN_MACRO \
+ ++(data)->stateStackTop; \
+ if ((data)->stateStackTop == (data)->stateStackLimit && \
+ !ReallocStateStack((data))) { \
+ return NULL; \
+ } \
+ JS_END_MACRO
+
+/*
+ * Apply the current op against the given input to see if it's going to match
+ * or fail. Return false if we don't get a match, true if we do. If updatecp is
+ * true, then update the current state's cp. Always update startpc to the next
+ * op.
+ */
+static REMatchState *
+SimpleMatch(REGlobalData *gData, REMatchState *x, REOp op,
+ jsbytecode **startpc, JSBool updatecp)
+{
+ REMatchState *result = NULL;
+ jschar matchCh;
+ size_t parenIndex;
+ size_t offset, length, index;
+ jsbytecode *pc = *startpc; /* pc has already been incremented past op */
+ jschar *source;
+ const jschar *startcp = x->cp;
+ jschar ch;
+ RECharSet *charSet;
+
+ switch (op) {
+ case REOP_BOL:
+ if (x->cp != gData->cpbegin) {
+ if (!gData->cx->regExpStatics.multiline &&
+ !(gData->regexp->flags & JSREG_MULTILINE)) {
+ break;
+ }
+ if (!RE_IS_LINE_TERM(x->cp[-1]))
+ break;
+ }
+ result = x;
+ break;
+ case REOP_EOL:
+ if (x->cp != gData->cpend) {
+ if (!gData->cx->regExpStatics.multiline &&
+ !(gData->regexp->flags & JSREG_MULTILINE)) {
+ break;
+ }
+ if (!RE_IS_LINE_TERM(*x->cp))
+ break;
+ }
+ result = x;
+ break;
+ case REOP_WBDRY:
+ if ((x->cp == gData->cpbegin || !JS_ISWORD(x->cp[-1])) ^
+ !(x->cp != gData->cpend && JS_ISWORD(*x->cp))) {
+ result = x;
+ }
+ break;
+ case REOP_WNONBDRY:
+ if ((x->cp == gData->cpbegin || !JS_ISWORD(x->cp[-1])) ^
+ (x->cp != gData->cpend && JS_ISWORD(*x->cp))) {
+ result = x;
+ }
+ break;
+ case REOP_DOT:
+ if (x->cp != gData->cpend && !RE_IS_LINE_TERM(*x->cp)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_DIGIT:
+ if (x->cp != gData->cpend && JS_ISDIGIT(*x->cp)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_NONDIGIT:
+ if (x->cp != gData->cpend && !JS_ISDIGIT(*x->cp)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_ALNUM:
+ if (x->cp != gData->cpend && JS_ISWORD(*x->cp)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_NONALNUM:
+ if (x->cp != gData->cpend && !JS_ISWORD(*x->cp)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_SPACE:
+ if (x->cp != gData->cpend && JS_ISSPACE(*x->cp)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_NONSPACE:
+ if (x->cp != gData->cpend && !JS_ISSPACE(*x->cp)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_BACKREF:
+ pc = ReadCompactIndex(pc, &parenIndex);
+ JS_ASSERT(parenIndex < gData->regexp->parenCount);
+ result = BackrefMatcher(gData, x, parenIndex);
+ break;
+ case REOP_FLAT:
+ pc = ReadCompactIndex(pc, &offset);
+ JS_ASSERT(offset < JSSTRING_LENGTH(gData->regexp->source));
+ pc = ReadCompactIndex(pc, &length);
+ JS_ASSERT(1 <= length);
+ JS_ASSERT(length <= JSSTRING_LENGTH(gData->regexp->source) - offset);
+ if (length <= (size_t)(gData->cpend - x->cp)) {
+ source = JSSTRING_CHARS(gData->regexp->source) + offset;
+ for (index = 0; index != length; index++) {
+ if (source[index] != x->cp[index])
+ return NULL;
+ }
+ x->cp += length;
+ result = x;
+ }
+ break;
+ case REOP_FLAT1:
+ matchCh = *pc++;
+ if (x->cp != gData->cpend && *x->cp == matchCh) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_FLATi:
+ pc = ReadCompactIndex(pc, &offset);
+ JS_ASSERT(offset < JSSTRING_LENGTH(gData->regexp->source));
+ pc = ReadCompactIndex(pc, &length);
+ JS_ASSERT(1 <= length);
+ JS_ASSERT(length <= JSSTRING_LENGTH(gData->regexp->source) - offset);
+ source = JSSTRING_CHARS(gData->regexp->source);
+ result = FlatNIMatcher(gData, x, source + offset, length);
+ break;
+ case REOP_FLAT1i:
+ matchCh = *pc++;
+ if (x->cp != gData->cpend && upcase(*x->cp) == upcase(matchCh)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_UCFLAT1:
+ matchCh = GET_ARG(pc);
+ pc += ARG_LEN;
+ if (x->cp != gData->cpend && *x->cp == matchCh) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_UCFLAT1i:
+ matchCh = GET_ARG(pc);
+ pc += ARG_LEN;
+ if (x->cp != gData->cpend && upcase(*x->cp) == upcase(matchCh)) {
+ result = x;
+ result->cp++;
+ }
+ break;
+ case REOP_CLASS:
+ pc = ReadCompactIndex(pc, &index);
+ JS_ASSERT(index < gData->regexp->classCount);
+ if (x->cp != gData->cpend) {
+ charSet = &gData->regexp->classList[index];
+ JS_ASSERT(charSet->converted);
+ ch = *x->cp;
+ index = ch >> 3;
+ if (charSet->length != 0 &&
+ ch <= charSet->length &&
+ (charSet->u.bits[index] & (1 << (ch & 0x7)))) {
+ result = x;
+ result->cp++;
+ }
+ }
+ break;
+ case REOP_NCLASS:
+ pc = ReadCompactIndex(pc, &index);
+ JS_ASSERT(index < gData->regexp->classCount);
+ if (x->cp != gData->cpend) {
+ charSet = &gData->regexp->classList[index];
+ JS_ASSERT(charSet->converted);
+ ch = *x->cp;
+ index = ch >> 3;
+ if (charSet->length == 0 ||
+ ch > charSet->length ||
+ !(charSet->u.bits[index] & (1 << (ch & 0x7)))) {
+ result = x;
+ result->cp++;
+ }
+ }
+ break;
+ default:
+ JS_ASSERT(JS_FALSE);
+ }
+ if (result) {
+ if (!updatecp)
+ x->cp = startcp;
+ *startpc = pc;
+ return result;
+ }
+ x->cp = startcp;
+ return NULL;
+}
+
+static REMatchState *
+ExecuteREBytecode(REGlobalData *gData, REMatchState *x)
+{
+ REMatchState *result = NULL;
+ REBackTrackData *backTrackData;
+ jsbytecode *nextpc, *testpc;
+ REOp nextop;
+ RECapture *cap;
+ REProgState *curState;
+ const jschar *startcp;
+ size_t parenIndex, k;
+ size_t parenSoFar = 0;
+
+ jschar matchCh1, matchCh2;
+ RECharSet *charSet;
+
+ JSBranchCallback onbranch = gData->cx->branchCallback;
+ uintN onbranchCalls = 0;
+#define ONBRANCH_CALLS_MASK 127
+#define CHECK_BRANCH() \
+ JS_BEGIN_MACRO \
+ if (onbranch && \
+ (++onbranchCalls & ONBRANCH_CALLS_MASK) == 0 && \
+ !(*onbranch)(gData->cx, NULL)) { \
+ gData->ok = JS_FALSE; \
+ return NULL; \
+ } \
+ JS_END_MACRO
+
+ JSBool anchor;
+ jsbytecode *pc = gData->regexp->program;
+ REOp op = (REOp) *pc++;
+
+ /*
+ * If the first node is a simple match, step the index into the string
+ * until that match is made, or fail if it can't be found at all.
+ */
+ if (REOP_IS_SIMPLE(op)) {
+ anchor = JS_FALSE;
+ while (x->cp <= gData->cpend) {
+ nextpc = pc; /* reset back to start each time */
+ result = SimpleMatch(gData, x, op, &nextpc, JS_TRUE);
+ if (result) {
+ anchor = JS_TRUE;
+ x = result;
+ pc = nextpc; /* accept skip to next opcode */
+ op = (REOp) *pc++;
+ break;
+ }
+ gData->skipped++;
+ x->cp++;
+ }
+ if (!anchor)
+ return NULL;
+ }
+
+ for (;;) {
+ if (REOP_IS_SIMPLE(op)) {
+ result = SimpleMatch(gData, x, op, &pc, JS_TRUE);
+ } else {
+ curState = &gData->stateStack[gData->stateStackTop];
+ switch (op) {
+ case REOP_EMPTY:
+ result = x;
+ break;
+
+ case REOP_ALTPREREQ2:
+ nextpc = pc + GET_OFFSET(pc); /* start of next op */
+ pc += ARG_LEN;
+ matchCh2 = GET_ARG(pc);
+ pc += ARG_LEN;
+ k = GET_ARG(pc);
+ pc += ARG_LEN;
+
+ if (x->cp != gData->cpend) {
+ if (*x->cp == matchCh2)
+ goto doAlt;
+
+ charSet = &gData->regexp->classList[k];
+ if (!charSet->converted && !ProcessCharSet(gData, charSet))
+ return NULL;
+ matchCh1 = *x->cp;
+ k = matchCh1 >> 3;
+ if ((charSet->length == 0 ||
+ matchCh1 > charSet->length ||
+ !(charSet->u.bits[k] & (1 << (matchCh1 & 0x7)))) ^
+ charSet->sense) {
+ goto doAlt;
+ }
+ }
+ result = NULL;
+ break;
+
+ case REOP_ALTPREREQ:
+ nextpc = pc + GET_OFFSET(pc); /* start of next op */
+ pc += ARG_LEN;
+ matchCh1 = GET_ARG(pc);
+ pc += ARG_LEN;
+ matchCh2 = GET_ARG(pc);
+ pc += ARG_LEN;
+ if (x->cp == gData->cpend ||
+ (*x->cp != matchCh1 && *x->cp != matchCh2)) {
+ result = NULL;
+ break;
+ }
+ /* else false thru... */
+
+ case REOP_ALT:
+ doAlt:
+ nextpc = pc + GET_OFFSET(pc); /* start of next alternate */
+ pc += ARG_LEN; /* start of this alternate */
+ curState->parenSoFar = parenSoFar;
+ PUSH_STATE_STACK(gData);
+ op = (REOp) *pc++;
+ startcp = x->cp;
+ if (REOP_IS_SIMPLE(op)) {
+ if (!SimpleMatch(gData, x, op, &pc, JS_TRUE)) {
+ op = (REOp) *nextpc++;
+ pc = nextpc;
+ continue;
+ }
+ result = x;
+ op = (REOp) *pc++;
+ }
+ nextop = (REOp) *nextpc++;
+ if (!PushBackTrackState(gData, nextop, nextpc, x, startcp, 0, 0))
+ return NULL;
+ continue;
+
+ /*
+ * Occurs at (successful) end of REOP_ALT,
+ */
+ case REOP_JUMP:
+ --gData->stateStackTop;
+ pc += GET_OFFSET(pc);
+ op = (REOp) *pc++;
+ continue;
+
+ /*
+ * Occurs at last (successful) end of REOP_ALT,
+ */
+ case REOP_ENDALT:
+ --gData->stateStackTop;
+ op = (REOp) *pc++;
+ continue;
+
+ case REOP_LPAREN:
+ pc = ReadCompactIndex(pc, &parenIndex);
+ JS_ASSERT(parenIndex < gData->regexp->parenCount);
+ if (parenIndex + 1 > parenSoFar)
+ parenSoFar = parenIndex + 1;
+ x->parens[parenIndex].index = x->cp - gData->cpbegin;
+ x->parens[parenIndex].length = 0;
+ op = (REOp) *pc++;
+ continue;
+
+ case REOP_RPAREN:
+ pc = ReadCompactIndex(pc, &parenIndex);
+ JS_ASSERT(parenIndex < gData->regexp->parenCount);
+ cap = &x->parens[parenIndex];
+
+ /*
+ * FIXME: https://bugzilla.mozilla.org/show_bug.cgi?id=346090
+ * This wallpaper prevents a case where we somehow took a step
+ * backward in input while minimally-matching an empty string.
+ */
+ if (x->cp < gData->cpbegin + cap->index)
+ cap->index = -1;
+ cap->length = x->cp - (gData->cpbegin + cap->index);
+ op = (REOp) *pc++;
+ continue;
+
+ case REOP_ASSERT:
+ nextpc = pc + GET_OFFSET(pc); /* start of term after ASSERT */
+ pc += ARG_LEN; /* start of ASSERT child */
+ op = (REOp) *pc++;
+ testpc = pc;
+ if (REOP_IS_SIMPLE(op) &&
+ !SimpleMatch(gData, x, op, &testpc, JS_FALSE)) {
+ result = NULL;
+ break;
+ }
+ curState->u.assertion.top =
+ (char *)gData->backTrackSP - (char *)gData->backTrackStack;
+ curState->u.assertion.sz = gData->cursz;
+ curState->index = x->cp - gData->cpbegin;
+ curState->parenSoFar = parenSoFar;
+ PUSH_STATE_STACK(gData);
+ if (!PushBackTrackState(gData, REOP_ASSERTTEST,
+ nextpc, x, x->cp, 0, 0)) {
+ return NULL;
+ }
+ continue;
+
+ case REOP_ASSERT_NOT:
+ nextpc = pc + GET_OFFSET(pc);
+ pc += ARG_LEN;
+ op = (REOp) *pc++;
+ testpc = pc;
+ if (REOP_IS_SIMPLE(op) /* Note - fail to fail! */ &&
+ SimpleMatch(gData, x, op, &testpc, JS_FALSE) &&
+ *testpc == REOP_ASSERTNOTTEST) {
+ result = NULL;
+ break;
+ }
+ curState->u.assertion.top
+ = (char *)gData->backTrackSP -
+ (char *)gData->backTrackStack;
+ curState->u.assertion.sz = gData->cursz;
+ curState->index = x->cp - gData->cpbegin;
+ curState->parenSoFar = parenSoFar;
+ PUSH_STATE_STACK(gData);
+ if (!PushBackTrackState(gData, REOP_ASSERTNOTTEST,
+ nextpc, x, x->cp, 0, 0)) {
+ return NULL;
+ }
+ continue;
+
+ case REOP_ASSERTTEST:
+ --gData->stateStackTop;
+ --curState;
+ x->cp = gData->cpbegin + curState->index;
+ gData->backTrackSP =
+ (REBackTrackData *) ((char *)gData->backTrackStack +
+ curState->u.assertion.top);
+ gData->cursz = curState->u.assertion.sz;
+ if (result)
+ result = x;
+ break;
+
+ case REOP_ASSERTNOTTEST:
+ --gData->stateStackTop;
+ --curState;
+ x->cp = gData->cpbegin + curState->index;
+ gData->backTrackSP =
+ (REBackTrackData *) ((char *)gData->backTrackStack +
+ curState->u.assertion.top);
+ gData->cursz = curState->u.assertion.sz;
+ result = (!result) ? x : NULL;
+ break;
+
+ case REOP_END:
+ if (x)
+ return x;
+ break;
+
+ case REOP_STAR:
+ curState->u.quantifier.min = 0;
+ curState->u.quantifier.max = (uintN)-1;
+ goto quantcommon;
+ case REOP_PLUS:
+ curState->u.quantifier.min = 1;
+ curState->u.quantifier.max = (uintN)-1;
+ goto quantcommon;
+ case REOP_OPT:
+ curState->u.quantifier.min = 0;
+ curState->u.quantifier.max = 1;
+ goto quantcommon;
+ case REOP_QUANT:
+ pc = ReadCompactIndex(pc, &k);
+ curState->u.quantifier.min = k;
+ pc = ReadCompactIndex(pc, &k);
+ /* max is k - 1 to use one byte for (uintN)-1 sentinel. */
+ curState->u.quantifier.max = k - 1;
+ JS_ASSERT(curState->u.quantifier.min
+ <= curState->u.quantifier.max);
+ quantcommon:
+ if (curState->u.quantifier.max == 0) {
+ pc = pc + GET_OFFSET(pc);
+ op = (REOp) *pc++;
+ result = x;
+ continue;
+ }
+ /* Step over <next> */
+ nextpc = pc + ARG_LEN;
+ op = (REOp) *nextpc++;
+ startcp = x->cp;
+ if (REOP_IS_SIMPLE(op)) {
+ if (!SimpleMatch(gData, x, op, &nextpc, JS_TRUE)) {
+ if (curState->u.quantifier.min == 0)
+ result = x;
+ else
+ result = NULL;
+ pc = pc + GET_OFFSET(pc);
+ break;
+ }
+ op = (REOp) *nextpc++;
+ result = x;
+ }
+ curState->index = startcp - gData->cpbegin;
+ curState->continue_op = REOP_REPEAT;
+ curState->continue_pc = pc;
+ curState->parenSoFar = parenSoFar;
+ PUSH_STATE_STACK(gData);
+ if (curState->u.quantifier.min == 0 &&
+ !PushBackTrackState(gData, REOP_REPEAT, pc, x, startcp,
+ 0, 0)) {
+ return NULL;
+ }
+ pc = nextpc;
+ continue;
+
+ case REOP_ENDCHILD: /* marks the end of a quantifier child */
+ pc = curState[-1].continue_pc;
+ op = curState[-1].continue_op;
+ continue;
+
+ case REOP_REPEAT:
+ CHECK_BRANCH();
+ --curState;
+ do {
+ --gData->stateStackTop;
+ if (!result) {
+ /* Failed, see if we have enough children. */
+ if (curState->u.quantifier.min == 0)
+ goto repeatDone;
+ goto break_switch;
+ }
+ if (curState->u.quantifier.min == 0 &&
+ x->cp == gData->cpbegin + curState->index) {
+ /* matched an empty string, that'll get us nowhere */
+ result = NULL;
+ goto break_switch;
+ }
+ if (curState->u.quantifier.min != 0)
+ curState->u.quantifier.min--;
+ if (curState->u.quantifier.max != (uintN) -1)
+ curState->u.quantifier.max--;
+ if (curState->u.quantifier.max == 0)
+ goto repeatDone;
+ nextpc = pc + ARG_LEN;
+ nextop = (REOp) *nextpc;
+ startcp = x->cp;
+ if (REOP_IS_SIMPLE(nextop)) {
+ nextpc++;
+ if (!SimpleMatch(gData, x, nextop, &nextpc, JS_TRUE)) {
+ if (curState->u.quantifier.min == 0)
+ goto repeatDone;
+ result = NULL;
+ goto break_switch;
+ }
+ result = x;
+ }
+ curState->index = startcp - gData->cpbegin;
+ PUSH_STATE_STACK(gData);
+ if (curState->u.quantifier.min == 0 &&
+ !PushBackTrackState(gData, REOP_REPEAT,
+ pc, x, startcp,
+ curState->parenSoFar,
+ parenSoFar -
+ curState->parenSoFar)) {
+ return NULL;
+ }
+ } while (*nextpc == REOP_ENDCHILD);
+ pc = nextpc;
+ op = (REOp) *pc++;
+ parenSoFar = curState->parenSoFar;
+ continue;
+
+ repeatDone:
+ result = x;
+ pc += GET_OFFSET(pc);
+ goto break_switch;
+
+ case REOP_MINIMALSTAR:
+ curState->u.quantifier.min = 0;
+ curState->u.quantifier.max = (uintN)-1;
+ goto minimalquantcommon;
+ case REOP_MINIMALPLUS:
+ curState->u.quantifier.min = 1;
+ curState->u.quantifier.max = (uintN)-1;
+ goto minimalquantcommon;
+ case REOP_MINIMALOPT:
+ curState->u.quantifier.min = 0;
+ curState->u.quantifier.max = 1;
+ goto minimalquantcommon;
+ case REOP_MINIMALQUANT:
+ pc = ReadCompactIndex(pc, &k);
+ curState->u.quantifier.min = k;
+ pc = ReadCompactIndex(pc, &k);
+ /* See REOP_QUANT comments about k - 1. */
+ curState->u.quantifier.max = k - 1;
+ JS_ASSERT(curState->u.quantifier.min
+ <= curState->u.quantifier.max);
+ minimalquantcommon:
+ curState->index = x->cp - gData->cpbegin;
+ curState->parenSoFar = parenSoFar;
+ PUSH_STATE_STACK(gData);
+ if (curState->u.quantifier.min != 0) {
+ curState->continue_op = REOP_MINIMALREPEAT;
+ curState->continue_pc = pc;
+ /* step over <next> */
+ pc += OFFSET_LEN;
+ op = (REOp) *pc++;
+ } else {
+ if (!PushBackTrackState(gData, REOP_MINIMALREPEAT,
+ pc, x, x->cp, 0, 0)) {
+ return NULL;
+ }
+ --gData->stateStackTop;
+ pc = pc + GET_OFFSET(pc);
+ op = (REOp) *pc++;
+ }
+ continue;
+
+ case REOP_MINIMALREPEAT:
+ CHECK_BRANCH();
+ --gData->stateStackTop;
+ --curState;
+
+ if (!result) {
+ /*
+ * Non-greedy failure - try to consume another child.
+ */
+ if (curState->u.quantifier.max == (uintN) -1 ||
+ curState->u.quantifier.max > 0) {
+ curState->index = x->cp - gData->cpbegin;
+ curState->continue_op = REOP_MINIMALREPEAT;
+ curState->continue_pc = pc;
+ pc += ARG_LEN;
+ for (k = curState->parenSoFar; k < parenSoFar; k++)
+ x->parens[k].index = -1;
+ PUSH_STATE_STACK(gData);
+ op = (REOp) *pc++;
+ continue;
+ }
+ /* Don't need to adjust pc since we're going to pop. */
+ break;
+ }
+ if (curState->u.quantifier.min == 0 &&
+ x->cp == gData->cpbegin + curState->index) {
+ /* Matched an empty string, that'll get us nowhere. */
+ result = NULL;
+ break;
+ }
+ if (curState->u.quantifier.min != 0)
+ curState->u.quantifier.min--;
+ if (curState->u.quantifier.max != (uintN) -1)
+ curState->u.quantifier.max--;
+ if (curState->u.quantifier.min != 0) {
+ curState->continue_op = REOP_MINIMALREPEAT;
+ curState->continue_pc = pc;
+ pc += ARG_LEN;
+ for (k = curState->parenSoFar; k < parenSoFar; k++)
+ x->parens[k].index = -1;
+ curState->index = x->cp - gData->cpbegin;
+ PUSH_STATE_STACK(gData);
+ op = (REOp) *pc++;
+ continue;
+ }
+ curState->index = x->cp - gData->cpbegin;
+ curState->parenSoFar = parenSoFar;
+ PUSH_STATE_STACK(gData);
+ if (!PushBackTrackState(gData, REOP_MINIMALREPEAT,
+ pc, x, x->cp,
+ curState->parenSoFar,
+ parenSoFar - curState->parenSoFar)) {
+ return NULL;
+ }
+ --gData->stateStackTop;
+ pc = pc + GET_OFFSET(pc);
+ op = (REOp) *pc++;
+ continue;
+
+ default:
+ JS_ASSERT(JS_FALSE);
+ result = NULL;
+ }
+ break_switch:;
+ }
+
+ /*
+ * If the match failed and there's a backtrack option, take it.
+ * Otherwise this is a complete and utter failure.
+ */
+ if (!result) {
+ if (gData->cursz == 0)
+ return NULL;
+ backTrackData = gData->backTrackSP;
+ gData->cursz = backTrackData->sz;
+ gData->backTrackSP =
+ (REBackTrackData *) ((char *)backTrackData - backTrackData->sz);
+ x->cp = backTrackData->cp;
+ pc = backTrackData->backtrack_pc;
+ op = backTrackData->backtrack_op;
+ gData->stateStackTop = backTrackData->saveStateStackTop;
+ JS_ASSERT(gData->stateStackTop);
+
+ memcpy(gData->stateStack, backTrackData + 1,
+ sizeof(REProgState) * backTrackData->saveStateStackTop);
+ curState = &gData->stateStack[gData->stateStackTop - 1];
+
+ if (backTrackData->parenCount) {
+ memcpy(&x->parens[backTrackData->parenIndex],
+ (char *)(backTrackData + 1) +
+ sizeof(REProgState) * backTrackData->saveStateStackTop,
+ sizeof(RECapture) * backTrackData->parenCount);
+ parenSoFar = backTrackData->parenIndex + backTrackData->parenCount;
+ } else {
+ for (k = curState->parenSoFar; k < parenSoFar; k++)
+ x->parens[k].index = -1;
+ parenSoFar = curState->parenSoFar;
+ }
+ continue;
+ }
+ x = result;
+
+ /*
+ * Continue with the expression.
+ */
+ op = (REOp)*pc++;
+ }
+ return NULL;
+}
+
+static REMatchState *
+MatchRegExp(REGlobalData *gData, REMatchState *x)
+{
+ REMatchState *result;
+ const jschar *cp = x->cp;
+ const jschar *cp2;
+ uintN j;
+
+ /*
+ * Have to include the position beyond the last character
+ * in order to detect end-of-input/line condition.
+ */
+ for (cp2 = cp; cp2 <= gData->cpend; cp2++) {
+ gData->skipped = cp2 - cp;
+ x->cp = cp2;
+ for (j = 0; j < gData->regexp->parenCount; j++)
+ x->parens[j].index = -1;
+ result = ExecuteREBytecode(gData, x);
+ if (!gData->ok || result)
+ return result;
+ gData->backTrackSP = gData->backTrackStack;
+ gData->cursz = 0;
+ gData->stateStackTop = 0;
+ cp2 = cp + gData->skipped;
+ }
+ return NULL;
+}
+
+
+static REMatchState *
+InitMatch(JSContext *cx, REGlobalData *gData, JSRegExp *re)
+{
+ REMatchState *result;
+ uintN i;
+
+ gData->backTrackStackSize = INITIAL_BACKTRACK;
+ JS_ARENA_ALLOCATE_CAST(gData->backTrackStack, REBackTrackData *,
+ &gData->pool,
+ INITIAL_BACKTRACK);
+ if (!gData->backTrackStack)
+ goto bad;
+
+ gData->backTrackSP = gData->backTrackStack;
+ gData->cursz = 0;
+
+ gData->stateStackLimit = INITIAL_STATESTACK;
+ JS_ARENA_ALLOCATE_CAST(gData->stateStack, REProgState *,
+ &gData->pool,
+ sizeof(REProgState) * INITIAL_STATESTACK);
+ if (!gData->stateStack)
+ goto bad;
+
+ gData->stateStackTop = 0;
+ gData->cx = cx;
+ gData->regexp = re;
+ gData->ok = JS_TRUE;
+
+ JS_ARENA_ALLOCATE_CAST(result, REMatchState *,
+ &gData->pool,
+ offsetof(REMatchState, parens)
+ + re->parenCount * sizeof(RECapture));
+ if (!result)
+ goto bad;
+
+ for (i = 0; i < re->classCount; i++) {
+ if (!re->classList[i].converted &&
+ !ProcessCharSet(gData, &re->classList[i])) {
+ return NULL;
+ }
+ }
+
+ return result;
+
+bad:
+ JS_ReportOutOfMemory(cx);
+ gData->ok = JS_FALSE;
+ return NULL;
+}
+
+JSBool
+js_ExecuteRegExp(JSContext *cx, JSRegExp *re, JSString *str, size_t *indexp,
+ JSBool test, jsval *rval)
+{
+ REGlobalData gData;
+ REMatchState *x, *result;
+
+ const jschar *cp, *ep;
+ size_t i, length, start;
+ JSSubString *morepar;
+ JSBool ok;
+ JSRegExpStatics *res;
+ ptrdiff_t matchlen;
+ uintN num, morenum;
+ JSString *parstr, *matchstr;
+ JSObject *obj;
+
+ RECapture *parsub = NULL;
+
+ /*
+ * It's safe to load from cp because JSStrings have a zero at the end,
+ * and we never let cp get beyond cpend.
+ */
+ start = *indexp;
+ length = JSSTRING_LENGTH(str);
+ if (start > length)
+ start = length;
+ cp = JSSTRING_CHARS(str);
+ gData.cpbegin = cp;
+ gData.cpend = cp + length;
+ cp += start;
+ gData.start = start;
+ gData.skipped = 0;
+
+ JS_InitArenaPool(&gData.pool, "RegExpPool", 8096, 4);
+ x = InitMatch(cx, &gData, re);
+ if (!x) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ x->cp = cp;
+
+ /*
+ * Call the recursive matcher to do the real work. Return null on mismatch
+ * whether testing or not. On match, return an extended Array object.
+ */
+ result = MatchRegExp(&gData, x);
+ ok = gData.ok;
+ if (!ok)
+ goto out;
+ if (!result) {
+ *rval = JSVAL_NULL;
+ goto out;
+ }
+ cp = result->cp;
+ i = cp - gData.cpbegin;
+ *indexp = i;
+ matchlen = i - (start + gData.skipped);
+ ep = cp;
+ cp -= matchlen;
+
+ if (test) {
+ /*
+ * Testing for a match and updating cx->regExpStatics: don't allocate
+ * an array object, do return true.
+ */
+ *rval = JSVAL_TRUE;
+
+ /* Avoid warning. (gcc doesn't detect that obj is needed iff !test); */
+ obj = NULL;
+ } else {
+ /*
+ * The array returned on match has element 0 bound to the matched
+ * string, elements 1 through state.parenCount bound to the paren
+ * matches, an index property telling the length of the left context,
+ * and an input property referring to the input string.
+ */
+ obj = js_NewArrayObject(cx, 0, NULL);
+ if (!obj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ *rval = OBJECT_TO_JSVAL(obj);
+
+#define DEFVAL(val, id) { \
+ ok = js_DefineProperty(cx, obj, id, val, \
+ JS_PropertyStub, JS_PropertyStub, \
+ JSPROP_ENUMERATE, NULL); \
+ if (!ok) { \
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL; \
+ cx->weakRoots.newborn[GCX_STRING] = NULL; \
+ goto out; \
+ } \
+}
+
+ matchstr = js_NewStringCopyN(cx, cp, matchlen, 0);
+ if (!matchstr) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ ok = JS_FALSE;
+ goto out;
+ }
+ DEFVAL(STRING_TO_JSVAL(matchstr), INT_TO_JSID(0));
+ }
+
+ res = &cx->regExpStatics;
+ res->input = str;
+ res->parenCount = re->parenCount;
+ if (re->parenCount == 0) {
+ res->lastParen = js_EmptySubString;
+ } else {
+ for (num = 0; num < re->parenCount; num++) {
+ parsub = &result->parens[num];
+ if (num < 9) {
+ if (parsub->index == -1) {
+ res->parens[num].chars = NULL;
+ res->parens[num].length = 0;
+ } else {
+ res->parens[num].chars = gData.cpbegin + parsub->index;
+ res->parens[num].length = parsub->length;
+ }
+ } else {
+ morenum = num - 9;
+ morepar = res->moreParens;
+ if (!morepar) {
+ res->moreLength = 10;
+ morepar = (JSSubString*)
+ JS_malloc(cx, 10 * sizeof(JSSubString));
+ } else if (morenum >= res->moreLength) {
+ res->moreLength += 10;
+ morepar = (JSSubString*)
+ JS_realloc(cx, morepar,
+ res->moreLength * sizeof(JSSubString));
+ }
+ if (!morepar) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ cx->weakRoots.newborn[GCX_STRING] = NULL;
+ ok = JS_FALSE;
+ goto out;
+ }
+ res->moreParens = morepar;
+ if (parsub->index == -1) {
+ morepar[morenum].chars = NULL;
+ morepar[morenum].length = 0;
+ } else {
+ morepar[morenum].chars = gData.cpbegin + parsub->index;
+ morepar[morenum].length = parsub->length;
+ }
+ }
+ if (test)
+ continue;
+ if (parsub->index == -1) {
+ ok = js_DefineProperty(cx, obj, INT_TO_JSID(num + 1),
+ JSVAL_VOID, NULL, NULL,
+ JSPROP_ENUMERATE, NULL);
+ } else {
+ parstr = js_NewStringCopyN(cx, gData.cpbegin + parsub->index,
+ parsub->length, 0);
+ if (!parstr) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ cx->weakRoots.newborn[GCX_STRING] = NULL;
+ ok = JS_FALSE;
+ goto out;
+ }
+ ok = js_DefineProperty(cx, obj, INT_TO_JSID(num + 1),
+ STRING_TO_JSVAL(parstr), NULL, NULL,
+ JSPROP_ENUMERATE, NULL);
+ }
+ if (!ok) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ cx->weakRoots.newborn[GCX_STRING] = NULL;
+ goto out;
+ }
+ }
+ if (parsub->index == -1) {
+ res->lastParen = js_EmptySubString;
+ } else {
+ res->lastParen.chars = gData.cpbegin + parsub->index;
+ res->lastParen.length = parsub->length;
+ }
+ }
+
+ if (!test) {
+ /*
+ * Define the index and input properties last for better for/in loop
+ * order (so they come after the elements).
+ */
+ DEFVAL(INT_TO_JSVAL(start + gData.skipped),
+ ATOM_TO_JSID(cx->runtime->atomState.indexAtom));
+ DEFVAL(STRING_TO_JSVAL(str),
+ ATOM_TO_JSID(cx->runtime->atomState.inputAtom));
+ }
+
+#undef DEFVAL
+
+ res->lastMatch.chars = cp;
+ res->lastMatch.length = matchlen;
+
+ /*
+ * For JS1.3 and ECMAv2, emulate Perl5 exactly:
+ *
+ * js1.3 "hi", "hi there" "hihitherehi therebye"
+ */
+ res->leftContext.chars = JSSTRING_CHARS(str);
+ res->leftContext.length = start + gData.skipped;
+ res->rightContext.chars = ep;
+ res->rightContext.length = gData.cpend - ep;
+
+out:
+ JS_FinishArenaPool(&gData.pool);
+ return ok;
+}
+
+/************************************************************************/
+
+enum regexp_tinyid {
+ REGEXP_SOURCE = -1,
+ REGEXP_GLOBAL = -2,
+ REGEXP_IGNORE_CASE = -3,
+ REGEXP_LAST_INDEX = -4,
+ REGEXP_MULTILINE = -5
+};
+
+#define REGEXP_PROP_ATTRS (JSPROP_PERMANENT|JSPROP_SHARED)
+
+static JSPropertySpec regexp_props[] = {
+ {"source", REGEXP_SOURCE, REGEXP_PROP_ATTRS | JSPROP_READONLY,0,0},
+ {"global", REGEXP_GLOBAL, REGEXP_PROP_ATTRS | JSPROP_READONLY,0,0},
+ {"ignoreCase", REGEXP_IGNORE_CASE, REGEXP_PROP_ATTRS | JSPROP_READONLY,0,0},
+ {"lastIndex", REGEXP_LAST_INDEX, REGEXP_PROP_ATTRS,0,0},
+ {"multiline", REGEXP_MULTILINE, REGEXP_PROP_ATTRS | JSPROP_READONLY,0,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+regexp_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsint slot;
+ JSRegExp *re;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ slot = JSVAL_TO_INT(id);
+ if (slot == REGEXP_LAST_INDEX)
+ return JS_GetReservedSlot(cx, obj, 0, vp);
+
+ JS_LOCK_OBJ(cx, obj);
+ re = (JSRegExp *) JS_GetInstancePrivate(cx, obj, &js_RegExpClass, NULL);
+ if (re) {
+ switch (slot) {
+ case REGEXP_SOURCE:
+ *vp = STRING_TO_JSVAL(re->source);
+ break;
+ case REGEXP_GLOBAL:
+ *vp = BOOLEAN_TO_JSVAL((re->flags & JSREG_GLOB) != 0);
+ break;
+ case REGEXP_IGNORE_CASE:
+ *vp = BOOLEAN_TO_JSVAL((re->flags & JSREG_FOLD) != 0);
+ break;
+ case REGEXP_MULTILINE:
+ *vp = BOOLEAN_TO_JSVAL((re->flags & JSREG_MULTILINE) != 0);
+ break;
+ }
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_TRUE;
+}
+
+static JSBool
+regexp_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSBool ok;
+ jsint slot;
+ jsdouble lastIndex;
+
+ ok = JS_TRUE;
+ if (!JSVAL_IS_INT(id))
+ return ok;
+ slot = JSVAL_TO_INT(id);
+ if (slot == REGEXP_LAST_INDEX) {
+ if (!js_ValueToNumber(cx, *vp, &lastIndex))
+ return JS_FALSE;
+ lastIndex = js_DoubleToInteger(lastIndex);
+ ok = js_NewNumberValue(cx, lastIndex, vp) &&
+ JS_SetReservedSlot(cx, obj, 0, *vp);
+ }
+ return ok;
+}
+
+/*
+ * RegExp class static properties and their Perl counterparts:
+ *
+ * RegExp.input $_
+ * RegExp.multiline $*
+ * RegExp.lastMatch $&
+ * RegExp.lastParen $+
+ * RegExp.leftContext $`
+ * RegExp.rightContext $'
+ */
+enum regexp_static_tinyid {
+ REGEXP_STATIC_INPUT = -1,
+ REGEXP_STATIC_MULTILINE = -2,
+ REGEXP_STATIC_LAST_MATCH = -3,
+ REGEXP_STATIC_LAST_PAREN = -4,
+ REGEXP_STATIC_LEFT_CONTEXT = -5,
+ REGEXP_STATIC_RIGHT_CONTEXT = -6
+};
+
+JSBool
+js_InitRegExpStatics(JSContext *cx, JSRegExpStatics *res)
+{
+ JS_ClearRegExpStatics(cx);
+ return js_AddRoot(cx, &res->input, "res->input");
+}
+
+void
+js_FreeRegExpStatics(JSContext *cx, JSRegExpStatics *res)
+{
+ if (res->moreParens) {
+ JS_free(cx, res->moreParens);
+ res->moreParens = NULL;
+ }
+ js_RemoveRoot(cx->runtime, &res->input);
+}
+
+static JSBool
+regexp_static_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsint slot;
+ JSRegExpStatics *res;
+ JSString *str;
+ JSSubString *sub;
+
+ res = &cx->regExpStatics;
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ slot = JSVAL_TO_INT(id);
+ switch (slot) {
+ case REGEXP_STATIC_INPUT:
+ *vp = res->input ? STRING_TO_JSVAL(res->input)
+ : JS_GetEmptyStringValue(cx);
+ return JS_TRUE;
+ case REGEXP_STATIC_MULTILINE:
+ *vp = BOOLEAN_TO_JSVAL(res->multiline);
+ return JS_TRUE;
+ case REGEXP_STATIC_LAST_MATCH:
+ sub = &res->lastMatch;
+ break;
+ case REGEXP_STATIC_LAST_PAREN:
+ sub = &res->lastParen;
+ break;
+ case REGEXP_STATIC_LEFT_CONTEXT:
+ sub = &res->leftContext;
+ break;
+ case REGEXP_STATIC_RIGHT_CONTEXT:
+ sub = &res->rightContext;
+ break;
+ default:
+ sub = REGEXP_PAREN_SUBSTRING(res, slot);
+ break;
+ }
+ str = js_NewStringCopyN(cx, sub->chars, sub->length, 0);
+ if (!str)
+ return JS_FALSE;
+ *vp = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+regexp_static_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSRegExpStatics *res;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+ res = &cx->regExpStatics;
+ /* XXX use if-else rather than switch to keep MSVC1.52 from crashing */
+ if (JSVAL_TO_INT(id) == REGEXP_STATIC_INPUT) {
+ if (!JSVAL_IS_STRING(*vp) &&
+ !JS_ConvertValue(cx, *vp, JSTYPE_STRING, vp)) {
+ return JS_FALSE;
+ }
+ res->input = JSVAL_TO_STRING(*vp);
+ } else if (JSVAL_TO_INT(id) == REGEXP_STATIC_MULTILINE) {
+ if (!JSVAL_IS_BOOLEAN(*vp) &&
+ !JS_ConvertValue(cx, *vp, JSTYPE_BOOLEAN, vp)) {
+ return JS_FALSE;
+ }
+ res->multiline = JSVAL_TO_BOOLEAN(*vp);
+ }
+ return JS_TRUE;
+}
+
+static JSPropertySpec regexp_static_props[] = {
+ {"input",
+ REGEXP_STATIC_INPUT,
+ JSPROP_ENUMERATE|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_setProperty},
+ {"multiline",
+ REGEXP_STATIC_MULTILINE,
+ JSPROP_ENUMERATE|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_setProperty},
+ {"lastMatch",
+ REGEXP_STATIC_LAST_MATCH,
+ JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"lastParen",
+ REGEXP_STATIC_LAST_PAREN,
+ JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"leftContext",
+ REGEXP_STATIC_LEFT_CONTEXT,
+ JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"rightContext",
+ REGEXP_STATIC_RIGHT_CONTEXT,
+ JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+
+ /* XXX should have block scope and local $1, etc. */
+ {"$1", 0, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$2", 1, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$3", 2, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$4", 3, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$5", 4, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$6", 5, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$7", 6, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$8", 7, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+ {"$9", 8, JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_SHARED,
+ regexp_static_getProperty, regexp_static_getProperty},
+
+ {0,0,0,0,0}
+};
+
+static void
+regexp_finalize(JSContext *cx, JSObject *obj)
+{
+ JSRegExp *re;
+
+ re = (JSRegExp *) JS_GetPrivate(cx, obj);
+ if (!re)
+ return;
+ js_DestroyRegExp(cx, re);
+}
+
+/* Forward static prototype. */
+static JSBool
+regexp_exec(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+static JSBool
+regexp_call(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return regexp_exec(cx, JSVAL_TO_OBJECT(argv[-2]), argc, argv, rval);
+}
+
+#if JS_HAS_XDR
+
+#include "jsxdrapi.h"
+
+static JSBool
+regexp_xdrObject(JSXDRState *xdr, JSObject **objp)
+{
+ JSRegExp *re;
+ JSString *source;
+ uint32 flagsword;
+ JSObject *obj;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ re = (JSRegExp *) JS_GetPrivate(xdr->cx, *objp);
+ if (!re)
+ return JS_FALSE;
+ source = re->source;
+ flagsword = ((uint32)re->cloneIndex << 16) | re->flags;
+ }
+ if (!JS_XDRString(xdr, &source) ||
+ !JS_XDRUint32(xdr, &flagsword)) {
+ return JS_FALSE;
+ }
+ if (xdr->mode == JSXDR_DECODE) {
+ obj = js_NewObject(xdr->cx, &js_RegExpClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ re = js_NewRegExp(xdr->cx, NULL, source, (uint16)flagsword, JS_FALSE);
+ if (!re)
+ return JS_FALSE;
+ if (!JS_SetPrivate(xdr->cx, obj, re) ||
+ !js_SetLastIndex(xdr->cx, obj, 0)) {
+ js_DestroyRegExp(xdr->cx, re);
+ return JS_FALSE;
+ }
+ re->cloneIndex = (uint16)(flagsword >> 16);
+ *objp = obj;
+ }
+ return JS_TRUE;
+}
+
+#else /* !JS_HAS_XDR */
+
+#define regexp_xdrObject NULL
+
+#endif /* !JS_HAS_XDR */
+
+static uint32
+regexp_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSRegExp *re = (JSRegExp *) JS_GetPrivate(cx, obj);
+ if (re)
+ GC_MARK(cx, re->source, "source");
+ return 0;
+}
+
+JSClass js_RegExpClass = {
+ js_RegExp_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_RESERVED_SLOTS(1) |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_RegExp),
+ JS_PropertyStub, JS_PropertyStub,
+ regexp_getProperty, regexp_setProperty,
+ JS_EnumerateStub, JS_ResolveStub,
+ JS_ConvertStub, regexp_finalize,
+ NULL, NULL,
+ regexp_call, NULL,
+ regexp_xdrObject, NULL,
+ regexp_mark, 0
+};
+
+static const jschar empty_regexp_ucstr[] = {'(', '?', ':', ')', 0};
+
+JSBool
+js_regexp_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSRegExp *re;
+ const jschar *source;
+ jschar *chars;
+ size_t length, nflags;
+ uintN flags;
+ JSString *str;
+
+ if (!JS_InstanceOf(cx, obj, &js_RegExpClass, argv))
+ return JS_FALSE;
+ JS_LOCK_OBJ(cx, obj);
+ re = (JSRegExp *) JS_GetPrivate(cx, obj);
+ if (!re) {
+ JS_UNLOCK_OBJ(cx, obj);
+ *rval = STRING_TO_JSVAL(cx->runtime->emptyString);
+ return JS_TRUE;
+ }
+
+ source = JSSTRING_CHARS(re->source);
+ length = JSSTRING_LENGTH(re->source);
+ if (length == 0) {
+ source = empty_regexp_ucstr;
+ length = sizeof(empty_regexp_ucstr) / sizeof(jschar) - 1;
+ }
+ length += 2;
+ nflags = 0;
+ for (flags = re->flags; flags != 0; flags &= flags - 1)
+ nflags++;
+ chars = (jschar*) JS_malloc(cx, (length + nflags + 1) * sizeof(jschar));
+ if (!chars) {
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_FALSE;
+ }
+
+ chars[0] = '/';
+ js_strncpy(&chars[1], source, length - 2);
+ chars[length-1] = '/';
+ if (nflags) {
+ if (re->flags & JSREG_GLOB)
+ chars[length++] = 'g';
+ if (re->flags & JSREG_FOLD)
+ chars[length++] = 'i';
+ if (re->flags & JSREG_MULTILINE)
+ chars[length++] = 'm';
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+ chars[length] = 0;
+
+ str = js_NewString(cx, chars, length, 0);
+ if (!str) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+regexp_compile(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *opt, *str;
+ JSRegExp *oldre, *re;
+ JSBool ok, ok2;
+ JSObject *obj2;
+ size_t length, nbytes;
+ const jschar *cp, *start, *end;
+ jschar *nstart, *ncp, *tmp;
+
+ if (!JS_InstanceOf(cx, obj, &js_RegExpClass, argv))
+ return JS_FALSE;
+ opt = NULL;
+ if (argc == 0) {
+ str = cx->runtime->emptyString;
+ } else {
+ if (JSVAL_IS_OBJECT(argv[0])) {
+ /*
+ * If we get passed in a RegExp object we construct a new
+ * RegExp that is a duplicate of it by re-compiling the
+ * original source code. ECMA requires that it be an error
+ * here if the flags are specified. (We must use the flags
+ * from the original RegExp also).
+ */
+ obj2 = JSVAL_TO_OBJECT(argv[0]);
+ if (obj2 && OBJ_GET_CLASS(cx, obj2) == &js_RegExpClass) {
+ if (argc >= 2 && !JSVAL_IS_VOID(argv[1])) { /* 'flags' passed */
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NEWREGEXP_FLAGGED);
+ return JS_FALSE;
+ }
+ JS_LOCK_OBJ(cx, obj2);
+ re = (JSRegExp *) JS_GetPrivate(cx, obj2);
+ if (!re) {
+ JS_UNLOCK_OBJ(cx, obj2);
+ return JS_FALSE;
+ }
+ re = js_NewRegExp(cx, NULL, re->source, re->flags, JS_FALSE);
+ JS_UNLOCK_OBJ(cx, obj2);
+ goto created;
+ }
+ }
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+ if (argc > 1) {
+ if (JSVAL_IS_VOID(argv[1])) {
+ opt = NULL;
+ } else {
+ opt = js_ValueToString(cx, argv[1]);
+ if (!opt)
+ return JS_FALSE;
+ argv[1] = STRING_TO_JSVAL(opt);
+ }
+ }
+
+ /* Escape any naked slashes in the regexp source. */
+ length = JSSTRING_LENGTH(str);
+ start = JSSTRING_CHARS(str);
+ end = start + length;
+ nstart = ncp = NULL;
+ for (cp = start; cp < end; cp++) {
+ if (*cp == '/' && (cp == start || cp[-1] != '\\')) {
+ nbytes = (++length + 1) * sizeof(jschar);
+ if (!nstart) {
+ nstart = (jschar *) JS_malloc(cx, nbytes);
+ if (!nstart)
+ return JS_FALSE;
+ ncp = nstart + (cp - start);
+ js_strncpy(nstart, start, cp - start);
+ } else {
+ tmp = (jschar *) JS_realloc(cx, nstart, nbytes);
+ if (!tmp) {
+ JS_free(cx, nstart);
+ return JS_FALSE;
+ }
+ ncp = tmp + (ncp - nstart);
+ nstart = tmp;
+ }
+ *ncp++ = '\\';
+ }
+ if (nstart)
+ *ncp++ = *cp;
+ }
+
+ if (nstart) {
+ /* Don't forget to store the backstop after the new string. */
+ JS_ASSERT((size_t)(ncp - nstart) == length);
+ *ncp = 0;
+ str = js_NewString(cx, nstart, length, 0);
+ if (!str) {
+ JS_free(cx, nstart);
+ return JS_FALSE;
+ }
+ argv[0] = STRING_TO_JSVAL(str);
+ }
+ }
+
+ re = js_NewRegExpOpt(cx, NULL, str, opt, JS_FALSE);
+created:
+ if (!re)
+ return JS_FALSE;
+ JS_LOCK_OBJ(cx, obj);
+ oldre = (JSRegExp *) JS_GetPrivate(cx, obj);
+ ok = JS_SetPrivate(cx, obj, re);
+ ok2 = js_SetLastIndex(cx, obj, 0);
+ JS_UNLOCK_OBJ(cx, obj);
+ if (!ok) {
+ js_DestroyRegExp(cx, re);
+ return JS_FALSE;
+ }
+ if (oldre)
+ js_DestroyRegExp(cx, oldre);
+ *rval = OBJECT_TO_JSVAL(obj);
+ return ok2;
+}
+
+static JSBool
+regexp_exec_sub(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ JSBool test, jsval *rval)
+{
+ JSBool ok;
+ JSRegExp *re;
+ jsdouble lastIndex;
+ JSString *str;
+ size_t i;
+
+ ok = JS_InstanceOf(cx, obj, &js_RegExpClass, argv);
+ if (!ok)
+ return JS_FALSE;
+ JS_LOCK_OBJ(cx, obj);
+ re = (JSRegExp *) JS_GetPrivate(cx, obj);
+ if (!re) {
+ JS_UNLOCK_OBJ(cx, obj);
+ return JS_TRUE;
+ }
+
+ /* NB: we must reach out: after this paragraph, in order to drop re. */
+ HOLD_REGEXP(cx, re);
+ if (re->flags & JSREG_GLOB) {
+ ok = js_GetLastIndex(cx, obj, &lastIndex);
+ } else {
+ lastIndex = 0;
+ }
+ JS_UNLOCK_OBJ(cx, obj);
+ if (!ok)
+ goto out;
+
+ /* Now that obj is unlocked, it's safe to (potentially) grab the GC lock. */
+ if (argc == 0) {
+ str = cx->regExpStatics.input;
+ if (!str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NO_INPUT,
+ JS_GetStringBytes(re->source),
+ (re->flags & JSREG_GLOB) ? "g" : "",
+ (re->flags & JSREG_FOLD) ? "i" : "",
+ (re->flags & JSREG_MULTILINE) ? "m" : "");
+ ok = JS_FALSE;
+ goto out;
+ }
+ } else {
+ str = js_ValueToString(cx, argv[0]);
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ argv[0] = STRING_TO_JSVAL(str);
+ }
+
+ if (lastIndex < 0 || JSSTRING_LENGTH(str) < lastIndex) {
+ ok = js_SetLastIndex(cx, obj, 0);
+ *rval = JSVAL_NULL;
+ } else {
+ i = (size_t) lastIndex;
+ ok = js_ExecuteRegExp(cx, re, str, &i, test, rval);
+ if (ok && (re->flags & JSREG_GLOB))
+ ok = js_SetLastIndex(cx, obj, (*rval == JSVAL_NULL) ? 0 : i);
+ }
+
+out:
+ DROP_REGEXP(cx, re);
+ return ok;
+}
+
+static JSBool
+regexp_exec(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return regexp_exec_sub(cx, obj, argc, argv, JS_FALSE, rval);
+}
+
+static JSBool
+regexp_test(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (!regexp_exec_sub(cx, obj, argc, argv, JS_TRUE, rval))
+ return JS_FALSE;
+ if (*rval != JSVAL_TRUE)
+ *rval = JSVAL_FALSE;
+ return JS_TRUE;
+}
+
+static JSFunctionSpec regexp_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, js_regexp_toString, 0,0,0},
+#endif
+ {js_toString_str, js_regexp_toString, 0,0,0},
+ {"compile", regexp_compile, 1,0,0},
+ {"exec", regexp_exec, 0,0,0},
+ {"test", regexp_test, 0,0,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+RegExp(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ /*
+ * If first arg is regexp and no flags are given, just return the arg.
+ * (regexp_compile detects the regexp + flags case and throws a
+ * TypeError.) See 10.15.3.1.
+ */
+ if ((argc < 2 || JSVAL_IS_VOID(argv[1])) &&
+ !JSVAL_IS_PRIMITIVE(argv[0]) &&
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(argv[0])) == &js_RegExpClass) {
+ *rval = argv[0];
+ return JS_TRUE;
+ }
+
+ /* Otherwise, replace obj with a new RegExp object. */
+ obj = js_NewObject(cx, &js_RegExpClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+
+ /*
+ * regexp_compile does not use rval to root its temporaries
+ * so we can use it to root obj.
+ */
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+ return regexp_compile(cx, obj, argc, argv, rval);
+}
+
+JSObject *
+js_InitRegExpClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto, *ctor;
+ jsval rval;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_RegExpClass, RegExp, 1,
+ regexp_props, regexp_methods,
+ regexp_static_props, NULL);
+
+ if (!proto || !(ctor = JS_GetConstructor(cx, proto)))
+ return NULL;
+ if (!JS_AliasProperty(cx, ctor, "input", "$_") ||
+ !JS_AliasProperty(cx, ctor, "multiline", "$*") ||
+ !JS_AliasProperty(cx, ctor, "lastMatch", "$&") ||
+ !JS_AliasProperty(cx, ctor, "lastParen", "$+") ||
+ !JS_AliasProperty(cx, ctor, "leftContext", "$`") ||
+ !JS_AliasProperty(cx, ctor, "rightContext", "$'")) {
+ goto bad;
+ }
+
+ /* Give RegExp.prototype private data so it matches the empty string. */
+ if (!regexp_compile(cx, proto, 0, NULL, &rval))
+ goto bad;
+ return proto;
+
+bad:
+ JS_DeleteProperty(cx, obj, js_RegExpClass.name);
+ return NULL;
+}
+
+JSObject *
+js_NewRegExpObject(JSContext *cx, JSTokenStream *ts,
+ jschar *chars, size_t length, uintN flags)
+{
+ JSString *str;
+ JSObject *obj;
+ JSRegExp *re;
+ JSTempValueRooter tvr;
+
+ str = js_NewStringCopyN(cx, chars, length, 0);
+ if (!str)
+ return NULL;
+ re = js_NewRegExp(cx, ts, str, flags, JS_FALSE);
+ if (!re)
+ return NULL;
+ JS_PUSH_TEMP_ROOT_STRING(cx, str, &tvr);
+ obj = js_NewObject(cx, &js_RegExpClass, NULL, NULL);
+ if (!obj || !JS_SetPrivate(cx, obj, re)) {
+ js_DestroyRegExp(cx, re);
+ obj = NULL;
+ }
+ if (obj && !js_SetLastIndex(cx, obj, 0))
+ obj = NULL;
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return obj;
+}
+
+JSObject *
+js_CloneRegExpObject(JSContext *cx, JSObject *obj, JSObject *parent)
+{
+ JSObject *clone;
+ JSRegExp *re;
+
+ JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_RegExpClass);
+ clone = js_NewObject(cx, &js_RegExpClass, NULL, parent);
+ if (!clone)
+ return NULL;
+ re = JS_GetPrivate(cx, obj);
+ if (!JS_SetPrivate(cx, clone, re) || !js_SetLastIndex(cx, clone, 0)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ HOLD_REGEXP(cx, re);
+ return clone;
+}
+
+JSBool
+js_GetLastIndex(JSContext *cx, JSObject *obj, jsdouble *lastIndex)
+{
+ jsval v;
+
+ return JS_GetReservedSlot(cx, obj, 0, &v) &&
+ js_ValueToNumber(cx, v, lastIndex);
+}
+
+JSBool
+js_SetLastIndex(JSContext *cx, JSObject *obj, jsdouble lastIndex)
+{
+ jsval v;
+
+ return js_NewNumberValue(cx, lastIndex, &v) &&
+ JS_SetReservedSlot(cx, obj, 0, v);
+}
diff --git a/src/third_party/js-1.7/jsregexp.h b/src/third_party/js-1.7/jsregexp.h
new file mode 100644
index 00000000000..50789832d09
--- /dev/null
+++ b/src/third_party/js-1.7/jsregexp.h
@@ -0,0 +1,183 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsregexp_h___
+#define jsregexp_h___
+/*
+ * JS regular expression interface.
+ */
+#include <stddef.h>
+#include "jspubtd.h"
+#include "jsstr.h"
+
+#ifdef JS_THREADSAFE
+#include "jsdhash.h"
+#endif
+
+struct JSRegExpStatics {
+ JSString *input; /* input string to match (perl $_, GC root) */
+ JSBool multiline; /* whether input contains newlines (perl $*) */
+ uint16 parenCount; /* number of valid elements in parens[] */
+ uint16 moreLength; /* number of allocated elements in moreParens */
+ JSSubString parens[9]; /* last set of parens matched (perl $1, $2) */
+ JSSubString *moreParens; /* null or realloc'd vector for $10, etc. */
+ JSSubString lastMatch; /* last string matched (perl $&) */
+ JSSubString lastParen; /* last paren matched (perl $+) */
+ JSSubString leftContext; /* input to left of last match (perl $`) */
+ JSSubString rightContext; /* input to right of last match (perl $') */
+};
+
+/*
+ * This struct holds a bitmap representation of a class from a regexp.
+ * There's a list of these referenced by the classList field in the JSRegExp
+ * struct below. The initial state has startIndex set to the offset in the
+ * original regexp source of the beginning of the class contents. The first
+ * use of the class converts the source representation into a bitmap.
+ *
+ */
+typedef struct RECharSet {
+ JSPackedBool converted;
+ JSPackedBool sense;
+ uint16 length;
+ union {
+ uint8 *bits;
+ struct {
+ size_t startIndex;
+ size_t length;
+ } src;
+ } u;
+} RECharSet;
+
+/*
+ * This macro is safe because moreParens is guaranteed to be allocated and big
+ * enough to hold parenCount, or else be null when parenCount is 0.
+ */
+#define REGEXP_PAREN_SUBSTRING(res, num) \
+ (((jsuint)(num) < (jsuint)(res)->parenCount) \
+ ? ((jsuint)(num) < 9) \
+ ? &(res)->parens[num] \
+ : &(res)->moreParens[(num) - 9] \
+ : &js_EmptySubString)
+
+typedef struct RENode RENode;
+
+struct JSRegExp {
+ jsrefcount nrefs; /* reference count */
+ uint16 flags; /* flags, see jsapi.h's JSREG_* defines */
+ uint16 cloneIndex; /* index in fp->vars or funobj->slots of
+ cloned regexp object */
+ size_t parenCount; /* number of parenthesized submatches */
+ size_t classCount; /* count [...] bitmaps */
+ RECharSet *classList; /* list of [...] bitmaps */
+ JSString *source; /* locked source string, sans // */
+ jsbytecode program[1]; /* regular expression bytecode */
+};
+
+extern JSRegExp *
+js_NewRegExp(JSContext *cx, JSTokenStream *ts,
+ JSString *str, uintN flags, JSBool flat);
+
+extern JSRegExp *
+js_NewRegExpOpt(JSContext *cx, JSTokenStream *ts,
+ JSString *str, JSString *opt, JSBool flat);
+
+#define HOLD_REGEXP(cx, re) JS_ATOMIC_INCREMENT(&(re)->nrefs)
+#define DROP_REGEXP(cx, re) js_DestroyRegExp(cx, re)
+
+extern void
+js_DestroyRegExp(JSContext *cx, JSRegExp *re);
+
+/*
+ * Execute re on input str at *indexp, returning null in *rval on mismatch.
+ * On match, return true if test is true, otherwise return an array object.
+ * Update *indexp and cx->regExpStatics always on match.
+ */
+extern JSBool
+js_ExecuteRegExp(JSContext *cx, JSRegExp *re, JSString *str, size_t *indexp,
+ JSBool test, jsval *rval);
+
+/*
+ * These two add and remove GC roots, respectively, so their calls must be
+ * well-ordered.
+ */
+extern JSBool
+js_InitRegExpStatics(JSContext *cx, JSRegExpStatics *res);
+
+extern void
+js_FreeRegExpStatics(JSContext *cx, JSRegExpStatics *res);
+
+#define JSVAL_IS_REGEXP(cx, v) \
+ (JSVAL_IS_OBJECT(v) && JSVAL_TO_OBJECT(v) && \
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(v)) == &js_RegExpClass)
+
+extern JSClass js_RegExpClass;
+
+extern JSObject *
+js_InitRegExpClass(JSContext *cx, JSObject *obj);
+
+/*
+ * Export js_regexp_toString to the decompiler.
+ */
+extern JSBool
+js_regexp_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+/*
+ * Create, serialize/deserialize, or clone a RegExp object.
+ */
+extern JSObject *
+js_NewRegExpObject(JSContext *cx, JSTokenStream *ts,
+ jschar *chars, size_t length, uintN flags);
+
+extern JSBool
+js_XDRRegExp(JSXDRState *xdr, JSObject **objp);
+
+extern JSObject *
+js_CloneRegExpObject(JSContext *cx, JSObject *obj, JSObject *parent);
+
+/*
+ * Get and set the per-object (clone or clone-parent) lastIndex slot.
+ */
+extern JSBool
+js_GetLastIndex(JSContext *cx, JSObject *obj, jsdouble *lastIndex);
+
+extern JSBool
+js_SetLastIndex(JSContext *cx, JSObject *obj, jsdouble lastIndex);
+
+#endif /* jsregexp_h___ */
diff --git a/src/third_party/js-1.7/jsscan.c b/src/third_party/js-1.7/jsscan.c
new file mode 100644
index 00000000000..f9f7436f956
--- /dev/null
+++ b/src/third_party/js-1.7/jsscan.c
@@ -0,0 +1,2101 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set sw=4 ts=8 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS lexical scanner.
+ */
+#include "jsstddef.h"
+#include <stdio.h> /* first to avoid trouble on some systems */
+#include <errno.h>
+#include <limits.h>
+#include <math.h>
+#ifdef HAVE_MEMORY_H
+#include <memory.h>
+#endif
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h" /* Added by JSIFY */
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsdtoa.h"
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsemit.h"
+#include "jsexn.h"
+#include "jsnum.h"
+#include "jsopcode.h"
+#include "jsregexp.h"
+#include "jsscan.h"
+#include "jsscript.h"
+
+#if JS_HAS_XML_SUPPORT
+#include "jsparse.h"
+#include "jsxml.h"
+#endif
+
+#define JS_KEYWORD(keyword, type, op, version) \
+ const char js_##keyword##_str[] = #keyword;
+#include "jskeyword.tbl"
+#undef JS_KEYWORD
+
+struct keyword {
+ const char *chars; /* C string with keyword text */
+ JSTokenType tokentype; /* JSTokenType */
+ JSOp op; /* JSOp */
+ JSVersion version; /* JSVersion */
+};
+
+static const struct keyword keyword_defs[] = {
+#define JS_KEYWORD(keyword, type, op, version) \
+ {js_##keyword##_str, type, op, version},
+#include "jskeyword.tbl"
+#undef JS_KEYWORD
+};
+
+#define KEYWORD_COUNT (sizeof keyword_defs / sizeof keyword_defs[0])
+
+static const struct keyword *
+FindKeyword(const jschar *s, size_t length)
+{
+ register size_t i;
+ const struct keyword *kw;
+ const char *chars;
+
+ JS_ASSERT(length != 0);
+
+#define JSKW_LENGTH() length
+#define JSKW_AT(column) s[column]
+#define JSKW_GOT_MATCH(index) i = (index); goto got_match;
+#define JSKW_TEST_GUESS(index) i = (index); goto test_guess;
+#define JSKW_NO_MATCH() goto no_match;
+#include "jsautokw.h"
+#undef JSKW_NO_MATCH
+#undef JSKW_TEST_GUESS
+#undef JSKW_GOT_MATCH
+#undef JSKW_AT
+#undef JSKW_LENGTH
+
+ got_match:
+ return &keyword_defs[i];
+
+ test_guess:
+ kw = &keyword_defs[i];
+ chars = kw->chars;
+ do {
+ if (*s++ != (unsigned char)(*chars++))
+ goto no_match;
+ } while (--length != 0);
+ return kw;
+
+ no_match:
+ return NULL;
+}
+
+JSTokenType
+js_CheckKeyword(const jschar *str, size_t length)
+{
+ const struct keyword *kw;
+
+ JS_ASSERT(length != 0);
+ kw = FindKeyword(str, length);
+ return kw ? kw->tokentype : TOK_EOF;
+}
+
+JS_FRIEND_API(void)
+js_MapKeywords(void (*mapfun)(const char *))
+{
+ size_t i;
+
+ for (i = 0; i != KEYWORD_COUNT; ++i)
+ mapfun(keyword_defs[i].chars);
+}
+
+JSTokenStream *
+js_NewTokenStream(JSContext *cx, const jschar *base, size_t length,
+ const char *filename, uintN lineno,
+ JSPrincipals *principals)
+{
+ JSTokenStream *ts;
+
+ ts = js_NewBufferTokenStream(cx, base, length);
+ if (!ts)
+ return NULL;
+ ts->filename = filename;
+ ts->lineno = lineno;
+ if (principals)
+ JSPRINCIPALS_HOLD(cx, principals);
+ ts->principals = principals;
+ return ts;
+}
+
+#define TBMIN 64
+
+static JSBool
+GrowTokenBuf(JSStringBuffer *sb, size_t newlength)
+{
+ JSContext *cx;
+ jschar *base;
+ ptrdiff_t offset, length;
+ size_t tbsize;
+ JSArenaPool *pool;
+
+ cx = sb->data;
+ base = sb->base;
+ offset = PTRDIFF(sb->ptr, base, jschar);
+ pool = &cx->tempPool;
+ if (!base) {
+ tbsize = TBMIN * sizeof(jschar);
+ length = TBMIN - 1;
+ JS_ARENA_ALLOCATE_CAST(base, jschar *, pool, tbsize);
+ } else {
+ length = PTRDIFF(sb->limit, base, jschar);
+ if ((size_t)length >= ~(size_t)0 / sizeof(jschar)) {
+ base = NULL;
+ } else {
+ tbsize = (length + 1) * sizeof(jschar);
+ length += length + 1;
+ JS_ARENA_GROW_CAST(base, jschar *, pool, tbsize, tbsize);
+ }
+ }
+ if (!base) {
+ JS_ReportOutOfMemory(cx);
+ sb->base = STRING_BUFFER_ERROR_BASE;
+ return JS_FALSE;
+ }
+ sb->base = base;
+ sb->limit = base + length;
+ sb->ptr = base + offset;
+ return JS_TRUE;
+}
+
+JS_FRIEND_API(JSTokenStream *)
+js_NewBufferTokenStream(JSContext *cx, const jschar *base, size_t length)
+{
+ size_t nb;
+ JSTokenStream *ts;
+
+ nb = sizeof(JSTokenStream) + JS_LINE_LIMIT * sizeof(jschar);
+ JS_ARENA_ALLOCATE_CAST(ts, JSTokenStream *, &cx->tempPool, nb);
+ if (!ts) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ memset(ts, 0, nb);
+ ts->lineno = 1;
+ ts->linebuf.base = ts->linebuf.limit = ts->linebuf.ptr = (jschar *)(ts + 1);
+ ts->userbuf.base = (jschar *)base;
+ ts->userbuf.limit = (jschar *)base + length;
+ ts->userbuf.ptr = (jschar *)base;
+ ts->tokenbuf.grow = GrowTokenBuf;
+ ts->tokenbuf.data = cx;
+ ts->listener = cx->runtime->sourceHandler;
+ ts->listenerData = cx->runtime->sourceHandlerData;
+ return ts;
+}
+
+JS_FRIEND_API(JSTokenStream *)
+js_NewFileTokenStream(JSContext *cx, const char *filename, FILE *defaultfp)
+{
+ jschar *base;
+ JSTokenStream *ts;
+ FILE *file;
+
+ JS_ARENA_ALLOCATE_CAST(base, jschar *, &cx->tempPool,
+ JS_LINE_LIMIT * sizeof(jschar));
+ if (!base)
+ return NULL;
+ ts = js_NewBufferTokenStream(cx, base, JS_LINE_LIMIT);
+ if (!ts)
+ return NULL;
+ if (!filename || strcmp(filename, "-") == 0) {
+ file = defaultfp;
+ } else {
+ file = fopen(filename, "r");
+ if (!file) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_OPEN,
+ filename, "No such file or directory");
+ return NULL;
+ }
+ }
+ ts->userbuf.ptr = ts->userbuf.limit;
+ ts->file = file;
+ ts->filename = filename;
+ return ts;
+}
+
+JS_FRIEND_API(JSBool)
+js_CloseTokenStream(JSContext *cx, JSTokenStream *ts)
+{
+ if (ts->flags & TSF_OWNFILENAME)
+ JS_free(cx, (void *) ts->filename);
+ if (ts->principals)
+ JSPRINCIPALS_DROP(cx, ts->principals);
+ return !ts->file || fclose(ts->file) == 0;
+}
+
+JS_FRIEND_API(int)
+js_fgets(char *buf, int size, FILE *file)
+{
+ int n, i, c;
+ JSBool crflag;
+
+ n = size - 1;
+ if (n < 0)
+ return -1;
+
+ crflag = JS_FALSE;
+ for (i = 0; i < n && (c = getc(file)) != EOF; i++) {
+ buf[i] = c;
+ if (c == '\n') { /* any \n ends a line */
+ i++; /* keep the \n; we know there is room for \0 */
+ break;
+ }
+ if (crflag) { /* \r not followed by \n ends line at the \r */
+ ungetc(c, file);
+ break; /* and overwrite c in buf with \0 */
+ }
+ crflag = (c == '\r');
+ }
+
+ buf[i] = '\0';
+ return i;
+}
+
+static int32
+GetChar(JSTokenStream *ts)
+{
+ int32 c;
+ ptrdiff_t i, j, len, olen;
+ JSBool crflag;
+ char cbuf[JS_LINE_LIMIT];
+ jschar *ubuf, *nl;
+
+ if (ts->ungetpos != 0) {
+ c = ts->ungetbuf[--ts->ungetpos];
+ } else {
+ do {
+ if (ts->linebuf.ptr == ts->linebuf.limit) {
+ len = PTRDIFF(ts->userbuf.limit, ts->userbuf.ptr, jschar);
+ if (len <= 0) {
+ if (!ts->file) {
+ ts->flags |= TSF_EOF;
+ return EOF;
+ }
+
+ /* Fill ts->userbuf so that \r and \r\n convert to \n. */
+ crflag = (ts->flags & TSF_CRFLAG) != 0;
+ len = js_fgets(cbuf, JS_LINE_LIMIT - crflag, ts->file);
+ if (len <= 0) {
+ ts->flags |= TSF_EOF;
+ return EOF;
+ }
+ olen = len;
+ ubuf = ts->userbuf.base;
+ i = 0;
+ if (crflag) {
+ ts->flags &= ~TSF_CRFLAG;
+ if (cbuf[0] != '\n') {
+ ubuf[i++] = '\n';
+ len++;
+ ts->linepos--;
+ }
+ }
+ for (j = 0; i < len; i++, j++)
+ ubuf[i] = (jschar) (unsigned char) cbuf[j];
+ ts->userbuf.limit = ubuf + len;
+ ts->userbuf.ptr = ubuf;
+ }
+ if (ts->listener) {
+ ts->listener(ts->filename, ts->lineno, ts->userbuf.ptr, len,
+ &ts->listenerTSData, ts->listenerData);
+ }
+
+ nl = ts->saveEOL;
+ if (!nl) {
+ /*
+ * Any one of \n, \r, or \r\n ends a line (the longest
+ * match wins). Also allow the Unicode line and paragraph
+ * separators.
+ */
+ for (nl = ts->userbuf.ptr; nl < ts->userbuf.limit; nl++) {
+ /*
+ * Try to prevent value-testing on most characters by
+ * filtering out characters that aren't 000x or 202x.
+ */
+ if ((*nl & 0xDFD0) == 0) {
+ if (*nl == '\n')
+ break;
+ if (*nl == '\r') {
+ if (nl + 1 < ts->userbuf.limit && nl[1] == '\n')
+ nl++;
+ break;
+ }
+ if (*nl == LINE_SEPARATOR || *nl == PARA_SEPARATOR)
+ break;
+ }
+ }
+ }
+
+ /*
+ * If there was a line terminator, copy thru it into linebuf.
+ * Else copy JS_LINE_LIMIT-1 bytes into linebuf.
+ */
+ if (nl < ts->userbuf.limit)
+ len = PTRDIFF(nl, ts->userbuf.ptr, jschar) + 1;
+ if (len >= JS_LINE_LIMIT) {
+ len = JS_LINE_LIMIT - 1;
+ ts->saveEOL = nl;
+ } else {
+ ts->saveEOL = NULL;
+ }
+ js_strncpy(ts->linebuf.base, ts->userbuf.ptr, len);
+ ts->userbuf.ptr += len;
+ olen = len;
+
+ /*
+ * Make sure linebuf contains \n for EOL (don't do this in
+ * userbuf because the user's string might be readonly).
+ */
+ if (nl < ts->userbuf.limit) {
+ if (*nl == '\r') {
+ if (ts->linebuf.base[len-1] == '\r') {
+ /*
+ * Does the line segment end in \r? We must check
+ * for a \n at the front of the next segment before
+ * storing a \n into linebuf. This case matters
+ * only when we're reading from a file.
+ */
+ if (nl + 1 == ts->userbuf.limit && ts->file) {
+ len--;
+ ts->flags |= TSF_CRFLAG; /* clear NLFLAG? */
+ if (len == 0) {
+ /*
+ * This can happen when a segment ends in
+ * \r\r. Start over. ptr == limit in this
+ * case, so we'll fall into buffer-filling
+ * code.
+ */
+ return GetChar(ts);
+ }
+ } else {
+ ts->linebuf.base[len-1] = '\n';
+ }
+ }
+ } else if (*nl == '\n') {
+ if (nl > ts->userbuf.base &&
+ nl[-1] == '\r' &&
+ ts->linebuf.base[len-2] == '\r') {
+ len--;
+ JS_ASSERT(ts->linebuf.base[len] == '\n');
+ ts->linebuf.base[len-1] = '\n';
+ }
+ } else if (*nl == LINE_SEPARATOR || *nl == PARA_SEPARATOR) {
+ ts->linebuf.base[len-1] = '\n';
+ }
+ }
+
+ /* Reset linebuf based on adjusted segment length. */
+ ts->linebuf.limit = ts->linebuf.base + len;
+ ts->linebuf.ptr = ts->linebuf.base;
+
+ /* Update position of linebuf within physical userbuf line. */
+ if (!(ts->flags & TSF_NLFLAG))
+ ts->linepos += ts->linelen;
+ else
+ ts->linepos = 0;
+ if (ts->linebuf.limit[-1] == '\n')
+ ts->flags |= TSF_NLFLAG;
+ else
+ ts->flags &= ~TSF_NLFLAG;
+
+ /* Update linelen from original segment length. */
+ ts->linelen = olen;
+ }
+ c = *ts->linebuf.ptr++;
+ } while (JS_ISFORMAT(c));
+ }
+ if (c == '\n')
+ ts->lineno++;
+ return c;
+}
+
+static void
+UngetChar(JSTokenStream *ts, int32 c)
+{
+ if (c == EOF)
+ return;
+ JS_ASSERT(ts->ungetpos < sizeof ts->ungetbuf / sizeof ts->ungetbuf[0]);
+ if (c == '\n')
+ ts->lineno--;
+ ts->ungetbuf[ts->ungetpos++] = (jschar)c;
+}
+
+static int32
+PeekChar(JSTokenStream *ts)
+{
+ int32 c;
+
+ c = GetChar(ts);
+ UngetChar(ts, c);
+ return c;
+}
+
+/*
+ * Peek n chars ahead into ts. Return true if n chars were read, false if
+ * there weren't enough characters in the input stream. This function cannot
+ * be used to peek into or past a newline.
+ */
+static JSBool
+PeekChars(JSTokenStream *ts, intN n, jschar *cp)
+{
+ intN i, j;
+ int32 c;
+
+ for (i = 0; i < n; i++) {
+ c = GetChar(ts);
+ if (c == EOF)
+ break;
+ if (c == '\n') {
+ UngetChar(ts, c);
+ break;
+ }
+ cp[i] = (jschar)c;
+ }
+ for (j = i - 1; j >= 0; j--)
+ UngetChar(ts, cp[j]);
+ return i == n;
+}
+
+static void
+SkipChars(JSTokenStream *ts, intN n)
+{
+ while (--n >= 0)
+ GetChar(ts);
+}
+
+static JSBool
+MatchChar(JSTokenStream *ts, int32 expect)
+{
+ int32 c;
+
+ c = GetChar(ts);
+ if (c == expect)
+ return JS_TRUE;
+ UngetChar(ts, c);
+ return JS_FALSE;
+}
+
+static JSBool
+ReportCompileErrorNumber(JSContext *cx, void *handle, uintN flags,
+ uintN errorNumber, JSErrorReport *report,
+ JSBool charArgs, va_list ap)
+{
+ JSTempValueRooter linetvr;
+ JSString *linestr = NULL;
+ JSTokenStream *ts = NULL;
+ JSCodeGenerator *cg = NULL;
+ JSParseNode *pn = NULL;
+ JSErrorReporter onError;
+ JSTokenPos *tp;
+ JSStackFrame *fp;
+ uintN index;
+ char *message;
+ JSBool warning;
+
+ memset(report, 0, sizeof (struct JSErrorReport));
+ report->flags = flags;
+ report->errorNumber = errorNumber;
+ message = NULL;
+
+ if (!js_ExpandErrorArguments(cx, js_GetErrorMessage, NULL,
+ errorNumber, &message, report, &warning,
+ charArgs, ap)) {
+ return JS_FALSE;
+ }
+
+ JS_PUSH_TEMP_ROOT_STRING(cx, NULL, &linetvr);
+
+ switch (flags & JSREPORT_HANDLE) {
+ case JSREPORT_TS:
+ ts = handle;
+ break;
+ case JSREPORT_CG:
+ cg = handle;
+ break;
+ case JSREPORT_PN:
+ pn = handle;
+ ts = pn->pn_ts;
+ break;
+ }
+
+ JS_ASSERT(!ts || ts->linebuf.limit < ts->linebuf.base + JS_LINE_LIMIT);
+ /*
+ * We are typically called with non-null ts and null cg from jsparse.c.
+ * We can be called with null ts from the regexp compilation functions.
+ * The code generator (jsemit.c) may pass null ts and non-null cg.
+ */
+ do {
+ if (ts) {
+ report->filename = ts->filename;
+ if (pn) {
+ report->lineno = pn->pn_pos.begin.lineno;
+ if (report->lineno != ts->lineno)
+ break;
+ }
+ report->lineno = ts->lineno;
+ linestr = js_NewStringCopyN(cx, ts->linebuf.base,
+ PTRDIFF(ts->linebuf.limit,
+ ts->linebuf.base,
+ jschar),
+ 0);
+ linetvr.u.string = linestr;
+ report->linebuf = linestr
+ ? JS_GetStringBytes(linestr)
+ : NULL;
+ tp = &ts->tokens[(ts->cursor+ts->lookahead) & NTOKENS_MASK].pos;
+ if (pn)
+ tp = &pn->pn_pos;
+
+ /*
+ * FIXME: What should instead happen here is that we should
+ * find error-tokens in userbuf, if !ts->file. That will
+ * allow us to deliver a more helpful error message, which
+ * includes all or part of the bad string or bad token. The
+ * code here yields something that looks truncated.
+ * See https://bugzilla.mozilla.org/show_bug.cgi?id=352970
+ */
+ index = 0;
+ if (tp->begin.lineno == tp->end.lineno) {
+ if (tp->begin.index < ts->linepos)
+ break;
+
+ index = tp->begin.index - ts->linepos;
+ }
+
+ report->tokenptr = linestr ? report->linebuf + index : NULL;
+ report->uclinebuf = linestr ? JS_GetStringChars(linestr) : NULL;
+ report->uctokenptr = linestr ? report->uclinebuf + index : NULL;
+ break;
+ }
+
+ if (cg) {
+ report->filename = cg->filename;
+ report->lineno = CG_CURRENT_LINE(cg);
+ break;
+ }
+
+ /*
+ * If we can't find out where the error was based on the current
+ * frame, see if the next frame has a script/pc combo we can use.
+ */
+ for (fp = cx->fp; fp; fp = fp->down) {
+ if (fp->script && fp->pc) {
+ report->filename = fp->script->filename;
+ report->lineno = js_PCToLineNumber(cx, fp->script, fp->pc);
+ break;
+ }
+ }
+ } while (0);
+
+ /*
+ * If there's a runtime exception type associated with this error
+ * number, set that as the pending exception. For errors occuring at
+ * compile time, this is very likely to be a JSEXN_SYNTAXERR.
+ *
+ * If an exception is thrown but not caught, the JSREPORT_EXCEPTION
+ * flag will be set in report.flags. Proper behavior for an error
+ * reporter is to ignore a report with this flag for all but top-level
+ * compilation errors. The exception will remain pending, and so long
+ * as the non-top-level "load", "eval", or "compile" native function
+ * returns false, the top-level reporter will eventually receive the
+ * uncaught exception report.
+ *
+ * XXX it'd probably be best if there was only one call to this
+ * function, but there seem to be two error reporter call points.
+ */
+ onError = cx->errorReporter;
+
+ /*
+ * Try to raise an exception only if there isn't one already set --
+ * otherwise the exception will describe the last compile-time error,
+ * which is likely spurious.
+ */
+ if (!ts || !(ts->flags & TSF_ERROR)) {
+ if (js_ErrorToException(cx, message, report))
+ onError = NULL;
+ }
+
+ /*
+ * Suppress any compile-time errors that don't occur at the top level.
+ * This may still fail, as interplevel may be zero in contexts where we
+ * don't really want to call the error reporter, as when js is called
+ * by other code which could catch the error.
+ */
+ if (cx->interpLevel != 0 && !JSREPORT_IS_WARNING(flags))
+ onError = NULL;
+
+ if (onError) {
+ JSDebugErrorHook hook = cx->runtime->debugErrorHook;
+
+ /*
+ * If debugErrorHook is present then we give it a chance to veto
+ * sending the error on to the regular error reporter.
+ */
+ if (hook && !hook(cx, message, report,
+ cx->runtime->debugErrorHookData)) {
+ onError = NULL;
+ }
+ }
+ if (onError)
+ (*onError)(cx, message, report);
+
+ if (message)
+ JS_free(cx, message);
+ if (report->ucmessage)
+ JS_free(cx, (void *)report->ucmessage);
+
+ JS_POP_TEMP_ROOT(cx, &linetvr);
+
+ if (ts && !JSREPORT_IS_WARNING(flags)) {
+ /* Set the error flag to suppress spurious reports. */
+ ts->flags |= TSF_ERROR;
+ }
+
+ return warning;
+}
+
+JSBool
+js_ReportCompileErrorNumber(JSContext *cx, void *handle, uintN flags,
+ uintN errorNumber, ...)
+{
+ va_list ap;
+ JSErrorReport report;
+ JSBool warning;
+
+ if ((flags & JSREPORT_STRICT) && !JS_HAS_STRICT_OPTION(cx))
+ return JS_TRUE;
+
+ va_start(ap, errorNumber);
+ warning = ReportCompileErrorNumber(cx, handle, flags, errorNumber,
+ &report, JS_TRUE, ap);
+ va_end(ap);
+
+ /*
+ * We have to do this here because js_ReportCompileErrorNumberUC doesn't
+ * need to do this.
+ */
+ if (report.messageArgs) {
+ int i = 0;
+ while (report.messageArgs[i])
+ JS_free(cx, (void *)report.messageArgs[i++]);
+ JS_free(cx, (void *)report.messageArgs);
+ }
+
+ return warning;
+}
+
+JSBool
+js_ReportCompileErrorNumberUC(JSContext *cx, void *handle, uintN flags,
+ uintN errorNumber, ...)
+{
+ va_list ap;
+ JSErrorReport report;
+ JSBool warning;
+
+ if ((flags & JSREPORT_STRICT) && !JS_HAS_STRICT_OPTION(cx))
+ return JS_TRUE;
+
+ va_start(ap, errorNumber);
+ warning = ReportCompileErrorNumber(cx, handle, flags, errorNumber,
+ &report, JS_FALSE, ap);
+ va_end(ap);
+
+ if (report.messageArgs)
+ JS_free(cx, (void *)report.messageArgs);
+
+ return warning;
+}
+
+static JSBool
+GrowStringBuffer(JSStringBuffer *sb, size_t newlength)
+{
+ ptrdiff_t offset;
+ jschar *bp;
+
+ offset = PTRDIFF(sb->ptr, sb->base, jschar);
+ JS_ASSERT(offset >= 0);
+ newlength += offset + 1;
+ if ((size_t)offset < newlength && newlength < ~(size_t)0 / sizeof(jschar))
+ bp = realloc(sb->base, newlength * sizeof(jschar));
+ else
+ bp = NULL;
+ if (!bp) {
+ free(sb->base);
+ sb->base = STRING_BUFFER_ERROR_BASE;
+ return JS_FALSE;
+ }
+ sb->base = bp;
+ sb->ptr = bp + offset;
+ sb->limit = bp + newlength - 1;
+ return JS_TRUE;
+}
+
+static void
+FreeStringBuffer(JSStringBuffer *sb)
+{
+ JS_ASSERT(STRING_BUFFER_OK(sb));
+ if (sb->base)
+ free(sb->base);
+}
+
+void
+js_InitStringBuffer(JSStringBuffer *sb)
+{
+ sb->base = sb->limit = sb->ptr = NULL;
+ sb->data = NULL;
+ sb->grow = GrowStringBuffer;
+ sb->free = FreeStringBuffer;
+}
+
+void
+js_FinishStringBuffer(JSStringBuffer *sb)
+{
+ sb->free(sb);
+}
+
+#define ENSURE_STRING_BUFFER(sb,n) \
+ ((sb)->ptr + (n) <= (sb)->limit || sb->grow(sb, n))
+
+static void
+FastAppendChar(JSStringBuffer *sb, jschar c)
+{
+ if (!STRING_BUFFER_OK(sb))
+ return;
+ if (!ENSURE_STRING_BUFFER(sb, 1))
+ return;
+ *sb->ptr++ = c;
+}
+
+void
+js_AppendChar(JSStringBuffer *sb, jschar c)
+{
+ jschar *bp;
+
+ if (!STRING_BUFFER_OK(sb))
+ return;
+ if (!ENSURE_STRING_BUFFER(sb, 1))
+ return;
+ bp = sb->ptr;
+ *bp++ = c;
+ *bp = 0;
+ sb->ptr = bp;
+}
+
+#if JS_HAS_XML_SUPPORT
+
+void
+js_RepeatChar(JSStringBuffer *sb, jschar c, uintN count)
+{
+ jschar *bp;
+
+ if (!STRING_BUFFER_OK(sb) || count == 0)
+ return;
+ if (!ENSURE_STRING_BUFFER(sb, count))
+ return;
+ for (bp = sb->ptr; count; --count)
+ *bp++ = c;
+ *bp = 0;
+ sb->ptr = bp;
+}
+
+void
+js_AppendCString(JSStringBuffer *sb, const char *asciiz)
+{
+ size_t length;
+ jschar *bp;
+
+ if (!STRING_BUFFER_OK(sb) || *asciiz == '\0')
+ return;
+ length = strlen(asciiz);
+ if (!ENSURE_STRING_BUFFER(sb, length))
+ return;
+ for (bp = sb->ptr; length; --length)
+ *bp++ = (jschar) *asciiz++;
+ *bp = 0;
+ sb->ptr = bp;
+}
+
+void
+js_AppendJSString(JSStringBuffer *sb, JSString *str)
+{
+ size_t length;
+ jschar *bp;
+
+ if (!STRING_BUFFER_OK(sb))
+ return;
+ length = JSSTRING_LENGTH(str);
+ if (length == 0 || !ENSURE_STRING_BUFFER(sb, length))
+ return;
+ bp = sb->ptr;
+ js_strncpy(bp, JSSTRING_CHARS(str), length);
+ bp += length;
+ *bp = 0;
+ sb->ptr = bp;
+}
+
+static JSBool
+GetXMLEntity(JSContext *cx, JSTokenStream *ts)
+{
+ ptrdiff_t offset, length, i;
+ int32 c, d;
+ JSBool ispair;
+ jschar *bp, digit;
+ char *bytes;
+ JSErrNum msg;
+
+ /* Put the entity, including the '&' already scanned, in ts->tokenbuf. */
+ offset = PTRDIFF(ts->tokenbuf.ptr, ts->tokenbuf.base, jschar);
+ FastAppendChar(&ts->tokenbuf, '&');
+ while ((c = GetChar(ts)) != ';') {
+ if (c == EOF || c == '\n') {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_END_OF_XML_ENTITY);
+ return JS_FALSE;
+ }
+ FastAppendChar(&ts->tokenbuf, (jschar) c);
+ }
+
+ /* Let length be the number of jschars after the '&', including the ';'. */
+ length = PTRDIFF(ts->tokenbuf.ptr, ts->tokenbuf.base, jschar) - offset;
+ bp = ts->tokenbuf.base + offset;
+ c = d = 0;
+ ispair = JS_FALSE;
+ if (length > 2 && bp[1] == '#') {
+ /* Match a well-formed XML Character Reference. */
+ i = 2;
+ if (length > 3 && JS_TOLOWER(bp[i]) == 'x') {
+ if (length > 9) /* at most 6 hex digits allowed */
+ goto badncr;
+ while (++i < length) {
+ digit = bp[i];
+ if (!JS7_ISHEX(digit))
+ goto badncr;
+ c = (c << 4) + JS7_UNHEX(digit);
+ }
+ } else {
+ while (i < length) {
+ digit = bp[i++];
+ if (!JS7_ISDEC(digit))
+ goto badncr;
+ c = (c * 10) + JS7_UNDEC(digit);
+ if (c < 0)
+ goto badncr;
+ }
+ }
+
+ if (0x10000 <= c && c <= 0x10FFFF) {
+ /* Form a surrogate pair (c, d) -- c is the high surrogate. */
+ d = 0xDC00 + (c & 0x3FF);
+ c = 0xD7C0 + (c >> 10);
+ ispair = JS_TRUE;
+ } else {
+ /* Enforce the http://www.w3.org/TR/REC-xml/#wf-Legalchar WFC. */
+ if (c != 0x9 && c != 0xA && c != 0xD &&
+ !(0x20 <= c && c <= 0xD7FF) &&
+ !(0xE000 <= c && c <= 0xFFFD)) {
+ goto badncr;
+ }
+ }
+ } else {
+ /* Try to match one of the five XML 1.0 predefined entities. */
+ switch (length) {
+ case 3:
+ if (bp[2] == 't') {
+ if (bp[1] == 'l')
+ c = '<';
+ else if (bp[1] == 'g')
+ c = '>';
+ }
+ break;
+ case 4:
+ if (bp[1] == 'a' && bp[2] == 'm' && bp[3] == 'p')
+ c = '&';
+ break;
+ case 5:
+ if (bp[3] == 'o') {
+ if (bp[1] == 'a' && bp[2] == 'p' && bp[4] == 's')
+ c = '\'';
+ else if (bp[1] == 'q' && bp[2] == 'u' && bp[4] == 't')
+ c = '"';
+ }
+ break;
+ }
+ if (c == 0) {
+ msg = JSMSG_UNKNOWN_XML_ENTITY;
+ goto bad;
+ }
+ }
+
+ /* If we matched, retract ts->tokenbuf and store the entity's value. */
+ *bp++ = (jschar) c;
+ if (ispair)
+ *bp++ = (jschar) d;
+ *bp = 0;
+ ts->tokenbuf.ptr = bp;
+ return JS_TRUE;
+
+badncr:
+ msg = JSMSG_BAD_XML_NCR;
+bad:
+ /* No match: throw a TypeError per ECMA-357 10.3.2.1 step 8(a). */
+ bytes = js_DeflateString(cx, bp + 1,
+ PTRDIFF(ts->tokenbuf.ptr, bp, jschar) - 1);
+ if (bytes) {
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ msg, bytes);
+ JS_free(cx, bytes);
+ }
+ return JS_FALSE;
+}
+
+#endif /* JS_HAS_XML_SUPPORT */
+
+JSTokenType
+js_PeekToken(JSContext *cx, JSTokenStream *ts)
+{
+ JSTokenType tt;
+
+ if (ts->lookahead != 0) {
+ tt = ts->tokens[(ts->cursor + ts->lookahead) & NTOKENS_MASK].type;
+ } else {
+ tt = js_GetToken(cx, ts);
+ js_UngetToken(ts);
+ }
+ return tt;
+}
+
+JSTokenType
+js_PeekTokenSameLine(JSContext *cx, JSTokenStream *ts)
+{
+ JSTokenType tt;
+
+ if (!ON_CURRENT_LINE(ts, CURRENT_TOKEN(ts).pos))
+ return TOK_EOL;
+ ts->flags |= TSF_NEWLINES;
+ tt = js_PeekToken(cx, ts);
+ ts->flags &= ~TSF_NEWLINES;
+ return tt;
+}
+
+/*
+ * We have encountered a '\': check for a Unicode escape sequence after it,
+ * returning the character code value if we found a Unicode escape sequence.
+ * Otherwise, non-destructively return the original '\'.
+ */
+static int32
+GetUnicodeEscape(JSTokenStream *ts)
+{
+ jschar cp[5];
+ int32 c;
+
+ if (PeekChars(ts, 5, cp) && cp[0] == 'u' &&
+ JS7_ISHEX(cp[1]) && JS7_ISHEX(cp[2]) &&
+ JS7_ISHEX(cp[3]) && JS7_ISHEX(cp[4]))
+ {
+ c = (((((JS7_UNHEX(cp[1]) << 4)
+ + JS7_UNHEX(cp[2])) << 4)
+ + JS7_UNHEX(cp[3])) << 4)
+ + JS7_UNHEX(cp[4]);
+ SkipChars(ts, 5);
+ return c;
+ }
+ return '\\';
+}
+
+static JSToken *
+NewToken(JSTokenStream *ts, ptrdiff_t adjust)
+{
+ JSToken *tp;
+
+ ts->cursor = (ts->cursor + 1) & NTOKENS_MASK;
+ tp = &CURRENT_TOKEN(ts);
+ tp->ptr = ts->linebuf.ptr + adjust;
+ tp->pos.begin.index = ts->linepos +
+ PTRDIFF(tp->ptr, ts->linebuf.base, jschar) -
+ ts->ungetpos;
+ tp->pos.begin.lineno = tp->pos.end.lineno = (uint16)ts->lineno;
+ return tp;
+}
+
+JSTokenType
+js_GetToken(JSContext *cx, JSTokenStream *ts)
+{
+ JSTokenType tt;
+ int32 c, qc;
+ JSToken *tp;
+ JSAtom *atom;
+ JSBool hadUnicodeEscape;
+ const struct keyword *kw;
+
+#define INIT_TOKENBUF() (ts->tokenbuf.ptr = ts->tokenbuf.base)
+#define TOKENBUF_LENGTH() PTRDIFF(ts->tokenbuf.ptr, ts->tokenbuf.base, jschar)
+#define TOKENBUF_OK() STRING_BUFFER_OK(&ts->tokenbuf)
+#define TOKENBUF_TO_ATOM() (TOKENBUF_OK() \
+ ? js_AtomizeChars(cx, \
+ TOKENBUF_BASE(), \
+ TOKENBUF_LENGTH(), \
+ 0) \
+ : NULL)
+#define ADD_TO_TOKENBUF(c) FastAppendChar(&ts->tokenbuf, (jschar) (c))
+
+/* The following 4 macros should only be used when TOKENBUF_OK() is true. */
+#define TOKENBUF_BASE() (ts->tokenbuf.base)
+#define TOKENBUF_CHAR(i) (ts->tokenbuf.base[i])
+#define TRIM_TOKENBUF(i) (ts->tokenbuf.ptr = ts->tokenbuf.base + i)
+#define NUL_TERM_TOKENBUF() (*ts->tokenbuf.ptr = 0)
+
+ /* Check for a pushed-back token resulting from mismatching lookahead. */
+ while (ts->lookahead != 0) {
+ JS_ASSERT(!(ts->flags & TSF_XMLTEXTMODE));
+ ts->lookahead--;
+ ts->cursor = (ts->cursor + 1) & NTOKENS_MASK;
+ tt = CURRENT_TOKEN(ts).type;
+ if (tt != TOK_EOL || (ts->flags & TSF_NEWLINES))
+ return tt;
+ }
+
+ /* If there was a fatal error, keep returning TOK_ERROR. */
+ if (ts->flags & TSF_ERROR)
+ return TOK_ERROR;
+
+#if JS_HAS_XML_SUPPORT
+ if (ts->flags & TSF_XMLTEXTMODE) {
+ tt = TOK_XMLSPACE; /* veto if non-space, return TOK_XMLTEXT */
+ tp = NewToken(ts, 0);
+ INIT_TOKENBUF();
+ qc = (ts->flags & TSF_XMLONLYMODE) ? '<' : '{';
+
+ while ((c = GetChar(ts)) != qc && c != '<' && c != EOF) {
+ if (c == '&' && qc == '<') {
+ if (!GetXMLEntity(cx, ts))
+ goto error;
+ tt = TOK_XMLTEXT;
+ continue;
+ }
+
+ if (!JS_ISXMLSPACE(c))
+ tt = TOK_XMLTEXT;
+ ADD_TO_TOKENBUF(c);
+ }
+ UngetChar(ts, c);
+
+ if (TOKENBUF_LENGTH() == 0) {
+ atom = NULL;
+ } else {
+ atom = TOKENBUF_TO_ATOM();
+ if (!atom)
+ goto error;
+ }
+ tp->pos.end.lineno = (uint16)ts->lineno;
+ tp->t_op = JSOP_STRING;
+ tp->t_atom = atom;
+ goto out;
+ }
+
+ if (ts->flags & TSF_XMLTAGMODE) {
+ tp = NewToken(ts, 0);
+ c = GetChar(ts);
+ if (JS_ISXMLSPACE(c)) {
+ do {
+ c = GetChar(ts);
+ } while (JS_ISXMLSPACE(c));
+ UngetChar(ts, c);
+ tt = TOK_XMLSPACE;
+ goto out;
+ }
+
+ if (c == EOF) {
+ tt = TOK_EOF;
+ goto out;
+ }
+
+ INIT_TOKENBUF();
+ if (JS_ISXMLNSSTART(c)) {
+ JSBool sawColon = JS_FALSE;
+
+ ADD_TO_TOKENBUF(c);
+ while ((c = GetChar(ts)) != EOF && JS_ISXMLNAME(c)) {
+ if (c == ':') {
+ int nextc;
+
+ if (sawColon ||
+ (nextc = PeekChar(ts),
+ ((ts->flags & TSF_XMLONLYMODE) || nextc != '{') &&
+ !JS_ISXMLNAME(nextc))) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_ERROR,
+ JSMSG_BAD_XML_QNAME);
+ goto error;
+ }
+ sawColon = JS_TRUE;
+ }
+
+ ADD_TO_TOKENBUF(c);
+ }
+
+ UngetChar(ts, c);
+ atom = TOKENBUF_TO_ATOM();
+ if (!atom)
+ goto error;
+ tp->t_op = JSOP_STRING;
+ tp->t_atom = atom;
+ tt = TOK_XMLNAME;
+ goto out;
+ }
+
+ switch (c) {
+ case '{':
+ if (ts->flags & TSF_XMLONLYMODE)
+ goto bad_xml_char;
+ tt = TOK_LC;
+ goto out;
+
+ case '=':
+ tt = TOK_ASSIGN;
+ goto out;
+
+ case '"':
+ case '\'':
+ qc = c;
+ while ((c = GetChar(ts)) != qc) {
+ if (c == EOF) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_UNTERMINATED_STRING);
+ goto error;
+ }
+
+ /*
+ * XML attribute values are double-quoted when pretty-printed,
+ * so escape " if it is expressed directly in a single-quoted
+ * attribute value.
+ */
+ if (c == '"' && !(ts->flags & TSF_XMLONLYMODE)) {
+ JS_ASSERT(qc == '\'');
+ js_AppendCString(&ts->tokenbuf, js_quot_entity_str);
+ continue;
+ }
+
+ if (c == '&' && (ts->flags & TSF_XMLONLYMODE)) {
+ if (!GetXMLEntity(cx, ts))
+ goto error;
+ continue;
+ }
+
+ ADD_TO_TOKENBUF(c);
+ }
+ atom = TOKENBUF_TO_ATOM();
+ if (!atom)
+ goto error;
+ tp->pos.end.lineno = (uint16)ts->lineno;
+ tp->t_op = JSOP_STRING;
+ tp->t_atom = atom;
+ tt = TOK_XMLATTR;
+ goto out;
+
+ case '>':
+ tt = TOK_XMLTAGC;
+ goto out;
+
+ case '/':
+ if (MatchChar(ts, '>')) {
+ tt = TOK_XMLPTAGC;
+ goto out;
+ }
+ /* FALL THROUGH */
+
+ bad_xml_char:
+ default:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_CHARACTER);
+ goto error;
+ }
+ /* NOTREACHED */
+ }
+#endif /* JS_HAS_XML_SUPPORT */
+
+retry:
+ do {
+ c = GetChar(ts);
+ if (c == '\n') {
+ ts->flags &= ~TSF_DIRTYLINE;
+ if (ts->flags & TSF_NEWLINES)
+ break;
+ }
+ } while (JS_ISSPACE(c));
+
+ tp = NewToken(ts, -1);
+ if (c == EOF) {
+ tt = TOK_EOF;
+ goto out;
+ }
+
+ hadUnicodeEscape = JS_FALSE;
+ if (JS_ISIDSTART(c) ||
+ (c == '\\' &&
+ (c = GetUnicodeEscape(ts),
+ hadUnicodeEscape = JS_ISIDSTART(c)))) {
+ INIT_TOKENBUF();
+ for (;;) {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ if (c == '\\') {
+ c = GetUnicodeEscape(ts);
+ if (!JS_ISIDENT(c))
+ break;
+ hadUnicodeEscape = JS_TRUE;
+ } else {
+ if (!JS_ISIDENT(c))
+ break;
+ }
+ }
+ UngetChar(ts, c);
+
+ /*
+ * Check for keywords unless we saw Unicode escape or parser asks
+ * to ignore keywords.
+ */
+ if (!hadUnicodeEscape &&
+ !(ts->flags & TSF_KEYWORD_IS_NAME) &&
+ TOKENBUF_OK() &&
+ (kw = FindKeyword(TOKENBUF_BASE(), TOKENBUF_LENGTH()))) {
+ if (kw->tokentype == TOK_RESERVED) {
+ if (!js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_RESERVED_ID,
+ kw->chars)) {
+ goto error;
+ }
+ } else if (kw->version <= JSVERSION_NUMBER(cx)) {
+ tt = kw->tokentype;
+ tp->t_op = (JSOp) kw->op;
+ goto out;
+ }
+ }
+
+ atom = TOKENBUF_TO_ATOM();
+ if (!atom)
+ goto error;
+ tp->t_op = JSOP_NAME;
+ tp->t_atom = atom;
+ tt = TOK_NAME;
+ goto out;
+ }
+
+ if (JS7_ISDEC(c) || (c == '.' && JS7_ISDEC(PeekChar(ts)))) {
+ jsint radix;
+ const jschar *endptr;
+ jsdouble dval;
+
+ radix = 10;
+ INIT_TOKENBUF();
+
+ if (c == '0') {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ if (JS_TOLOWER(c) == 'x') {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ radix = 16;
+ } else if (JS7_ISDEC(c)) {
+ radix = 8;
+ }
+ }
+
+ while (JS7_ISHEX(c)) {
+ if (radix < 16) {
+ if (JS7_ISLET(c))
+ break;
+
+ /*
+ * We permit 08 and 09 as decimal numbers, which makes our
+ * behaviour a superset of the ECMA numeric grammar. We might
+ * not always be so permissive, so we warn about it.
+ */
+ if (radix == 8 && c >= '8') {
+ if (!js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_WARNING,
+ JSMSG_BAD_OCTAL,
+ c == '8' ? "08" : "09")) {
+ goto error;
+ }
+ radix = 10;
+ }
+ }
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ }
+
+ if (radix == 10 && (c == '.' || JS_TOLOWER(c) == 'e')) {
+ if (c == '.') {
+ do {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ } while (JS7_ISDEC(c));
+ }
+ if (JS_TOLOWER(c) == 'e') {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ if (c == '+' || c == '-') {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ }
+ if (!JS7_ISDEC(c)) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_MISSING_EXPONENT);
+ goto error;
+ }
+ do {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ } while (JS7_ISDEC(c));
+ }
+ }
+
+ /* Put back the next char and NUL-terminate tokenbuf for js_strto*. */
+ UngetChar(ts, c);
+ ADD_TO_TOKENBUF(0);
+
+ if (!TOKENBUF_OK())
+ goto error;
+ if (radix == 10) {
+ if (!js_strtod(cx, TOKENBUF_BASE(), &endptr, &dval)) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_OUT_OF_MEMORY);
+ goto error;
+ }
+ } else {
+ if (!js_strtointeger(cx, TOKENBUF_BASE(), &endptr, radix, &dval)) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_OUT_OF_MEMORY);
+ goto error;
+ }
+ }
+ tp->t_dval = dval;
+ tt = TOK_NUMBER;
+ goto out;
+ }
+
+ if (c == '"' || c == '\'') {
+ qc = c;
+ INIT_TOKENBUF();
+ while ((c = GetChar(ts)) != qc) {
+ if (c == '\n' || c == EOF) {
+ UngetChar(ts, c);
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_UNTERMINATED_STRING);
+ goto error;
+ }
+ if (c == '\\') {
+ switch (c = GetChar(ts)) {
+ case 'b': c = '\b'; break;
+ case 'f': c = '\f'; break;
+ case 'n': c = '\n'; break;
+ case 'r': c = '\r'; break;
+ case 't': c = '\t'; break;
+ case 'v': c = '\v'; break;
+
+ default:
+ if ('0' <= c && c < '8') {
+ int32 val = JS7_UNDEC(c);
+
+ c = PeekChar(ts);
+ if ('0' <= c && c < '8') {
+ val = 8 * val + JS7_UNDEC(c);
+ GetChar(ts);
+ c = PeekChar(ts);
+ if ('0' <= c && c < '8') {
+ int32 save = val;
+ val = 8 * val + JS7_UNDEC(c);
+ if (val <= 0377)
+ GetChar(ts);
+ else
+ val = save;
+ }
+ }
+
+ c = (jschar)val;
+ } else if (c == 'u') {
+ jschar cp[4];
+ if (PeekChars(ts, 4, cp) &&
+ JS7_ISHEX(cp[0]) && JS7_ISHEX(cp[1]) &&
+ JS7_ISHEX(cp[2]) && JS7_ISHEX(cp[3])) {
+ c = (((((JS7_UNHEX(cp[0]) << 4)
+ + JS7_UNHEX(cp[1])) << 4)
+ + JS7_UNHEX(cp[2])) << 4)
+ + JS7_UNHEX(cp[3]);
+ SkipChars(ts, 4);
+ }
+ } else if (c == 'x') {
+ jschar cp[2];
+ if (PeekChars(ts, 2, cp) &&
+ JS7_ISHEX(cp[0]) && JS7_ISHEX(cp[1])) {
+ c = (JS7_UNHEX(cp[0]) << 4) + JS7_UNHEX(cp[1]);
+ SkipChars(ts, 2);
+ }
+ } else if (c == '\n' && JS_VERSION_IS_ECMA(cx)) {
+ /* ECMA follows C by removing escaped newlines. */
+ continue;
+ }
+ break;
+ }
+ }
+ ADD_TO_TOKENBUF(c);
+ }
+ atom = TOKENBUF_TO_ATOM();
+ if (!atom)
+ goto error;
+ tp->pos.end.lineno = (uint16)ts->lineno;
+ tp->t_op = JSOP_STRING;
+ tp->t_atom = atom;
+ tt = TOK_STRING;
+ goto out;
+ }
+
+ switch (c) {
+ case '\n': tt = TOK_EOL; goto eol_out;
+ case ';': tt = TOK_SEMI; break;
+ case '[': tt = TOK_LB; break;
+ case ']': tt = TOK_RB; break;
+ case '{': tt = TOK_LC; break;
+ case '}': tt = TOK_RC; break;
+ case '(': tt = TOK_LP; break;
+ case ')': tt = TOK_RP; break;
+ case ',': tt = TOK_COMMA; break;
+ case '?': tt = TOK_HOOK; break;
+
+ case '.':
+#if JS_HAS_XML_SUPPORT
+ if (MatchChar(ts, c))
+ tt = TOK_DBLDOT;
+ else
+#endif
+ tt = TOK_DOT;
+ break;
+
+ case ':':
+#if JS_HAS_XML_SUPPORT
+ if (MatchChar(ts, c)) {
+ tt = TOK_DBLCOLON;
+ break;
+ }
+#endif
+ /*
+ * Default so compiler can modify to JSOP_GETTER if 'p getter: v' in an
+ * object initializer, likewise for setter.
+ */
+ tp->t_op = JSOP_NOP;
+ tt = TOK_COLON;
+ break;
+
+ case '|':
+ if (MatchChar(ts, c)) {
+ tt = TOK_OR;
+ } else if (MatchChar(ts, '=')) {
+ tp->t_op = JSOP_BITOR;
+ tt = TOK_ASSIGN;
+ } else {
+ tt = TOK_BITOR;
+ }
+ break;
+
+ case '^':
+ if (MatchChar(ts, '=')) {
+ tp->t_op = JSOP_BITXOR;
+ tt = TOK_ASSIGN;
+ } else {
+ tt = TOK_BITXOR;
+ }
+ break;
+
+ case '&':
+ if (MatchChar(ts, c)) {
+ tt = TOK_AND;
+ } else if (MatchChar(ts, '=')) {
+ tp->t_op = JSOP_BITAND;
+ tt = TOK_ASSIGN;
+ } else {
+ tt = TOK_BITAND;
+ }
+ break;
+
+ case '=':
+ if (MatchChar(ts, c)) {
+ tp->t_op = MatchChar(ts, c) ? JSOP_NEW_EQ : (JSOp)cx->jsop_eq;
+ tt = TOK_EQOP;
+ } else {
+ tp->t_op = JSOP_NOP;
+ tt = TOK_ASSIGN;
+ }
+ break;
+
+ case '!':
+ if (MatchChar(ts, '=')) {
+ tp->t_op = MatchChar(ts, '=') ? JSOP_NEW_NE : (JSOp)cx->jsop_ne;
+ tt = TOK_EQOP;
+ } else {
+ tp->t_op = JSOP_NOT;
+ tt = TOK_UNARYOP;
+ }
+ break;
+
+#if JS_HAS_XML_SUPPORT
+ case '@':
+ tt = TOK_AT;
+ break;
+#endif
+
+ case '<':
+#if JS_HAS_XML_SUPPORT
+ /*
+ * After much testing, it's clear that Postel's advice to protocol
+ * designers ("be liberal in what you accept, and conservative in what
+ * you send") invites a natural-law repercussion for JS as "protocol":
+ *
+ * "If you are liberal in what you accept, others will utterly fail to
+ * be conservative in what they send."
+ *
+ * Which means you will get <!-- comments to end of line in the middle
+ * of .js files, and after if conditions whose then statements are on
+ * the next line, and other wonders. See at least the following bugs:
+ * https://bugzilla.mozilla.org/show_bug.cgi?id=309242
+ * https://bugzilla.mozilla.org/show_bug.cgi?id=309712
+ * https://bugzilla.mozilla.org/show_bug.cgi?id=310993
+ *
+ * So without JSOPTION_XML, we never scan an XML comment or CDATA
+ * literal. We always scan <! as the start of an HTML comment hack
+ * to end of line, used since Netscape 2 to hide script tag content
+ * from script-unaware browsers.
+ */
+ if ((ts->flags & TSF_OPERAND) &&
+ (JS_HAS_XML_OPTION(cx) || PeekChar(ts) != '!')) {
+ /* Check for XML comment or CDATA section. */
+ if (MatchChar(ts, '!')) {
+ INIT_TOKENBUF();
+
+ /* Scan XML comment. */
+ if (MatchChar(ts, '-')) {
+ if (!MatchChar(ts, '-'))
+ goto bad_xml_markup;
+ while ((c = GetChar(ts)) != '-' || !MatchChar(ts, '-')) {
+ if (c == EOF)
+ goto bad_xml_markup;
+ ADD_TO_TOKENBUF(c);
+ }
+ tt = TOK_XMLCOMMENT;
+ tp->t_op = JSOP_XMLCOMMENT;
+ goto finish_xml_markup;
+ }
+
+ /* Scan CDATA section. */
+ if (MatchChar(ts, '[')) {
+ jschar cp[6];
+ if (PeekChars(ts, 6, cp) &&
+ cp[0] == 'C' &&
+ cp[1] == 'D' &&
+ cp[2] == 'A' &&
+ cp[3] == 'T' &&
+ cp[4] == 'A' &&
+ cp[5] == '[') {
+ SkipChars(ts, 6);
+ while ((c = GetChar(ts)) != ']' ||
+ !PeekChars(ts, 2, cp) ||
+ cp[0] != ']' ||
+ cp[1] != '>') {
+ if (c == EOF)
+ goto bad_xml_markup;
+ ADD_TO_TOKENBUF(c);
+ }
+ GetChar(ts); /* discard ] but not > */
+ tt = TOK_XMLCDATA;
+ tp->t_op = JSOP_XMLCDATA;
+ goto finish_xml_markup;
+ }
+ goto bad_xml_markup;
+ }
+ }
+
+ /* Check for processing instruction. */
+ if (MatchChar(ts, '?')) {
+ JSBool inTarget = JS_TRUE;
+ size_t targetLength = 0;
+ ptrdiff_t contentIndex = -1;
+
+ INIT_TOKENBUF();
+ while ((c = GetChar(ts)) != '?' || PeekChar(ts) != '>') {
+ if (c == EOF)
+ goto bad_xml_markup;
+ if (inTarget) {
+ if (JS_ISXMLSPACE(c)) {
+ if (TOKENBUF_LENGTH() == 0)
+ goto bad_xml_markup;
+ inTarget = JS_FALSE;
+ } else {
+ if (!((TOKENBUF_LENGTH() == 0)
+ ? JS_ISXMLNSSTART(c)
+ : JS_ISXMLNS(c))) {
+ goto bad_xml_markup;
+ }
+ ++targetLength;
+ }
+ } else {
+ if (contentIndex < 0 && !JS_ISXMLSPACE(c))
+ contentIndex = TOKENBUF_LENGTH();
+ }
+ ADD_TO_TOKENBUF(c);
+ }
+ if (targetLength == 0)
+ goto bad_xml_markup;
+ if (!TOKENBUF_OK())
+ goto error;
+ if (contentIndex < 0) {
+ atom = cx->runtime->atomState.emptyAtom;
+ } else {
+ atom = js_AtomizeChars(cx,
+ &TOKENBUF_CHAR(contentIndex),
+ TOKENBUF_LENGTH() - contentIndex,
+ 0);
+ if (!atom)
+ goto error;
+ }
+ TRIM_TOKENBUF(targetLength);
+ tp->t_atom2 = atom;
+ tt = TOK_XMLPI;
+
+ finish_xml_markup:
+ if (!MatchChar(ts, '>'))
+ goto bad_xml_markup;
+ atom = TOKENBUF_TO_ATOM();
+ if (!atom)
+ goto error;
+ tp->t_atom = atom;
+ tp->pos.end.lineno = (uint16)ts->lineno;
+ goto out;
+ }
+
+ /* An XML start-of-tag character. */
+ tt = MatchChar(ts, '/') ? TOK_XMLETAGO : TOK_XMLSTAGO;
+ goto out;
+
+ bad_xml_markup:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_XML_MARKUP);
+ goto error;
+ }
+#endif /* JS_HAS_XML_SUPPORT */
+
+ /* NB: treat HTML begin-comment as comment-till-end-of-line */
+ if (MatchChar(ts, '!')) {
+ if (MatchChar(ts, '-')) {
+ if (MatchChar(ts, '-')) {
+ ts->flags |= TSF_IN_HTML_COMMENT;
+ goto skipline;
+ }
+ UngetChar(ts, '-');
+ }
+ UngetChar(ts, '!');
+ }
+ if (MatchChar(ts, c)) {
+ tp->t_op = JSOP_LSH;
+ tt = MatchChar(ts, '=') ? TOK_ASSIGN : TOK_SHOP;
+ } else {
+ tp->t_op = MatchChar(ts, '=') ? JSOP_LE : JSOP_LT;
+ tt = TOK_RELOP;
+ }
+ break;
+
+ case '>':
+ if (MatchChar(ts, c)) {
+ tp->t_op = MatchChar(ts, c) ? JSOP_URSH : JSOP_RSH;
+ tt = MatchChar(ts, '=') ? TOK_ASSIGN : TOK_SHOP;
+ } else {
+ tp->t_op = MatchChar(ts, '=') ? JSOP_GE : JSOP_GT;
+ tt = TOK_RELOP;
+ }
+ break;
+
+ case '*':
+ tp->t_op = JSOP_MUL;
+ tt = MatchChar(ts, '=') ? TOK_ASSIGN : TOK_STAR;
+ break;
+
+ case '/':
+ if (MatchChar(ts, '/')) {
+ /*
+ * Hack for source filters such as the Mozilla XUL preprocessor:
+ * "//@line 123\n" sets the number of the *next* line after the
+ * comment to 123.
+ */
+ if (JS_HAS_ATLINE_OPTION(cx)) {
+ jschar cp[5];
+ uintN i, line, temp;
+ char filename[1024];
+
+ if (PeekChars(ts, 5, cp) &&
+ cp[0] == '@' &&
+ cp[1] == 'l' &&
+ cp[2] == 'i' &&
+ cp[3] == 'n' &&
+ cp[4] == 'e') {
+ SkipChars(ts, 5);
+ while ((c = GetChar(ts)) != '\n' && JS_ISSPACE(c))
+ continue;
+ if (JS7_ISDEC(c)) {
+ line = JS7_UNDEC(c);
+ while ((c = GetChar(ts)) != EOF && JS7_ISDEC(c)) {
+ temp = 10 * line + JS7_UNDEC(c);
+ if (temp < line) {
+ /* Ignore overlarge line numbers. */
+ goto skipline;
+ }
+ line = temp;
+ }
+ while (c != '\n' && JS_ISSPACE(c))
+ c = GetChar(ts);
+ i = 0;
+ if (c == '"') {
+ while ((c = GetChar(ts)) != EOF && c != '"') {
+ if (c == '\n') {
+ UngetChar(ts, c);
+ goto skipline;
+ }
+ if ((c >> 8) != 0 || i >= sizeof filename - 1)
+ goto skipline;
+ filename[i++] = (char) c;
+ }
+ if (c == '"') {
+ while ((c = GetChar(ts)) != '\n' &&
+ JS_ISSPACE(c)) {
+ continue;
+ }
+ }
+ }
+ filename[i] = '\0';
+ if (c == '\n') {
+ if (i > 0) {
+ if (ts->flags & TSF_OWNFILENAME)
+ JS_free(cx, (void *) ts->filename);
+ ts->filename = JS_strdup(cx, filename);
+ if (!ts->filename)
+ goto error;
+ ts->flags |= TSF_OWNFILENAME;
+ }
+ ts->lineno = line;
+ }
+ }
+ UngetChar(ts, c);
+ }
+ }
+
+skipline:
+ /* Optimize line skipping if we are not in an HTML comment. */
+ if (ts->flags & TSF_IN_HTML_COMMENT) {
+ while ((c = GetChar(ts)) != EOF && c != '\n') {
+ if (c == '-' && MatchChar(ts, '-') && MatchChar(ts, '>'))
+ ts->flags &= ~TSF_IN_HTML_COMMENT;
+ }
+ } else {
+ while ((c = GetChar(ts)) != EOF && c != '\n')
+ continue;
+ }
+ UngetChar(ts, c);
+ ts->cursor = (ts->cursor - 1) & NTOKENS_MASK;
+ goto retry;
+ }
+
+ if (MatchChar(ts, '*')) {
+ while ((c = GetChar(ts)) != EOF &&
+ !(c == '*' && MatchChar(ts, '/'))) {
+ /* Ignore all characters until comment close. */
+ }
+ if (c == EOF) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_UNTERMINATED_COMMENT);
+ goto error;
+ }
+ ts->cursor = (ts->cursor - 1) & NTOKENS_MASK;
+ goto retry;
+ }
+
+ if (ts->flags & TSF_OPERAND) {
+ JSObject *obj;
+ uintN flags;
+ JSBool inCharClass = JS_FALSE;
+
+ INIT_TOKENBUF();
+ for (;;) {
+ c = GetChar(ts);
+ if (c == '\n' || c == EOF) {
+ UngetChar(ts, c);
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_UNTERMINATED_REGEXP);
+ goto error;
+ }
+ if (c == '\\') {
+ ADD_TO_TOKENBUF(c);
+ c = GetChar(ts);
+ } else if (c == '[') {
+ inCharClass = JS_TRUE;
+ } else if (c == ']') {
+ inCharClass = JS_FALSE;
+ } else if (c == '/' && !inCharClass) {
+ /* For compat with IE, allow unescaped / in char classes. */
+ break;
+ }
+ ADD_TO_TOKENBUF(c);
+ }
+ for (flags = 0; ; ) {
+ if (MatchChar(ts, 'g'))
+ flags |= JSREG_GLOB;
+ else if (MatchChar(ts, 'i'))
+ flags |= JSREG_FOLD;
+ else if (MatchChar(ts, 'm'))
+ flags |= JSREG_MULTILINE;
+ else
+ break;
+ }
+ c = PeekChar(ts);
+ if (JS7_ISLET(c)) {
+ tp->ptr = ts->linebuf.ptr - 1;
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_BAD_REGEXP_FLAG);
+ (void) GetChar(ts);
+ goto error;
+ }
+ /* XXXbe fix jsregexp.c so it doesn't depend on NUL termination */
+ if (!TOKENBUF_OK())
+ goto error;
+ NUL_TERM_TOKENBUF();
+ obj = js_NewRegExpObject(cx, ts,
+ TOKENBUF_BASE(),
+ TOKENBUF_LENGTH(),
+ flags);
+ if (!obj)
+ goto error;
+ atom = js_AtomizeObject(cx, obj, 0);
+ if (!atom)
+ goto error;
+
+ /*
+ * If the regexp's script is one-shot, we can avoid the extra
+ * fork-on-exec costs of JSOP_REGEXP by selecting JSOP_OBJECT.
+ * Otherwise, to avoid incorrect proto, parent, and lastIndex
+ * sharing among threads and sequentially across re-execution,
+ * select JSOP_REGEXP.
+ */
+ tp->t_op = (cx->fp->flags & (JSFRAME_EVAL | JSFRAME_COMPILE_N_GO))
+ ? JSOP_OBJECT
+ : JSOP_REGEXP;
+ tp->t_atom = atom;
+ tt = TOK_OBJECT;
+ break;
+ }
+
+ tp->t_op = JSOP_DIV;
+ tt = MatchChar(ts, '=') ? TOK_ASSIGN : TOK_DIVOP;
+ break;
+
+ case '%':
+ tp->t_op = JSOP_MOD;
+ tt = MatchChar(ts, '=') ? TOK_ASSIGN : TOK_DIVOP;
+ break;
+
+ case '~':
+ tp->t_op = JSOP_BITNOT;
+ tt = TOK_UNARYOP;
+ break;
+
+ case '+':
+ if (MatchChar(ts, '=')) {
+ tp->t_op = JSOP_ADD;
+ tt = TOK_ASSIGN;
+ } else if (MatchChar(ts, c)) {
+ tt = TOK_INC;
+ } else {
+ tp->t_op = JSOP_POS;
+ tt = TOK_PLUS;
+ }
+ break;
+
+ case '-':
+ if (MatchChar(ts, '=')) {
+ tp->t_op = JSOP_SUB;
+ tt = TOK_ASSIGN;
+ } else if (MatchChar(ts, c)) {
+ if (PeekChar(ts) == '>' && !(ts->flags & TSF_DIRTYLINE)) {
+ ts->flags &= ~TSF_IN_HTML_COMMENT;
+ goto skipline;
+ }
+ tt = TOK_DEC;
+ } else {
+ tp->t_op = JSOP_NEG;
+ tt = TOK_MINUS;
+ }
+ break;
+
+#if JS_HAS_SHARP_VARS
+ case '#':
+ {
+ uint32 n;
+
+ c = GetChar(ts);
+ if (!JS7_ISDEC(c)) {
+ UngetChar(ts, c);
+ goto badchar;
+ }
+ n = (uint32)JS7_UNDEC(c);
+ for (;;) {
+ c = GetChar(ts);
+ if (!JS7_ISDEC(c))
+ break;
+ n = 10 * n + JS7_UNDEC(c);
+ if (n >= UINT16_LIMIT) {
+ js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_SHARPVAR_TOO_BIG);
+ goto error;
+ }
+ }
+ tp->t_dval = (jsdouble) n;
+ if (JS_HAS_STRICT_OPTION(cx) &&
+ (c == '=' || c == '#')) {
+ char buf[20];
+ JS_snprintf(buf, sizeof buf, "#%u%c", n, c);
+ if (!js_ReportCompileErrorNumber(cx, ts,
+ JSREPORT_TS |
+ JSREPORT_WARNING |
+ JSREPORT_STRICT,
+ JSMSG_DEPRECATED_USAGE,
+ buf)) {
+ goto error;
+ }
+ }
+ if (c == '=')
+ tt = TOK_DEFSHARP;
+ else if (c == '#')
+ tt = TOK_USESHARP;
+ else
+ goto badchar;
+ break;
+ }
+#endif /* JS_HAS_SHARP_VARS */
+
+#if JS_HAS_SHARP_VARS || JS_HAS_XML_SUPPORT
+ badchar:
+#endif
+
+ default:
+ js_ReportCompileErrorNumber(cx, ts, JSREPORT_TS | JSREPORT_ERROR,
+ JSMSG_ILLEGAL_CHARACTER);
+ goto error;
+ }
+
+out:
+ JS_ASSERT(tt != TOK_EOL);
+ ts->flags |= TSF_DIRTYLINE;
+
+eol_out:
+ if (!STRING_BUFFER_OK(&ts->tokenbuf))
+ tt = TOK_ERROR;
+ JS_ASSERT(tt < TOK_LIMIT);
+ tp->pos.end.index = ts->linepos +
+ PTRDIFF(ts->linebuf.ptr, ts->linebuf.base, jschar) -
+ ts->ungetpos;
+ tp->type = tt;
+ return tt;
+
+error:
+ tt = TOK_ERROR;
+ ts->flags |= TSF_ERROR;
+ goto out;
+
+#undef INIT_TOKENBUF
+#undef TOKENBUF_LENGTH
+#undef TOKENBUF_OK
+#undef TOKENBUF_TO_ATOM
+#undef ADD_TO_TOKENBUF
+#undef TOKENBUF_BASE
+#undef TOKENBUF_CHAR
+#undef TRIM_TOKENBUF
+#undef NUL_TERM_TOKENBUF
+}
+
+void
+js_UngetToken(JSTokenStream *ts)
+{
+ JS_ASSERT(ts->lookahead < NTOKENS_MASK);
+ ts->lookahead++;
+ ts->cursor = (ts->cursor - 1) & NTOKENS_MASK;
+}
+
+JSBool
+js_MatchToken(JSContext *cx, JSTokenStream *ts, JSTokenType tt)
+{
+ if (js_GetToken(cx, ts) == tt)
+ return JS_TRUE;
+ js_UngetToken(ts);
+ return JS_FALSE;
+}
diff --git a/src/third_party/js-1.7/jsscan.h b/src/third_party/js-1.7/jsscan.h
new file mode 100644
index 00000000000..08cb0956028
--- /dev/null
+++ b/src/third_party/js-1.7/jsscan.h
@@ -0,0 +1,389 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsscan_h___
+#define jsscan_h___
+/*
+ * JS lexical scanner interface.
+ */
+#include <stddef.h>
+#include <stdio.h>
+#include "jsconfig.h"
+#include "jsopcode.h"
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+JS_BEGIN_EXTERN_C
+
+#define JS_KEYWORD(keyword, type, op, version) \
+ extern const char js_##keyword##_str[];
+#include "jskeyword.tbl"
+#undef JS_KEYWORD
+
+typedef enum JSTokenType {
+ TOK_ERROR = -1, /* well-known as the only code < EOF */
+ TOK_EOF = 0, /* end of file */
+ TOK_EOL = 1, /* end of line */
+ TOK_SEMI = 2, /* semicolon */
+ TOK_COMMA = 3, /* comma operator */
+ TOK_ASSIGN = 4, /* assignment ops (= += -= etc.) */
+ TOK_HOOK = 5, TOK_COLON = 6, /* conditional (?:) */
+ TOK_OR = 7, /* logical or (||) */
+ TOK_AND = 8, /* logical and (&&) */
+ TOK_BITOR = 9, /* bitwise-or (|) */
+ TOK_BITXOR = 10, /* bitwise-xor (^) */
+ TOK_BITAND = 11, /* bitwise-and (&) */
+ TOK_EQOP = 12, /* equality ops (== !=) */
+ TOK_RELOP = 13, /* relational ops (< <= > >=) */
+ TOK_SHOP = 14, /* shift ops (<< >> >>>) */
+ TOK_PLUS = 15, /* plus */
+ TOK_MINUS = 16, /* minus */
+ TOK_STAR = 17, TOK_DIVOP = 18, /* multiply/divide ops (* / %) */
+ TOK_UNARYOP = 19, /* unary prefix operator */
+ TOK_INC = 20, TOK_DEC = 21, /* increment/decrement (++ --) */
+ TOK_DOT = 22, /* member operator (.) */
+ TOK_LB = 23, TOK_RB = 24, /* left and right brackets */
+ TOK_LC = 25, TOK_RC = 26, /* left and right curlies (braces) */
+ TOK_LP = 27, TOK_RP = 28, /* left and right parentheses */
+ TOK_NAME = 29, /* identifier */
+ TOK_NUMBER = 30, /* numeric constant */
+ TOK_STRING = 31, /* string constant */
+ TOK_OBJECT = 32, /* RegExp or other object constant */
+ TOK_PRIMARY = 33, /* true, false, null, this, super */
+ TOK_FUNCTION = 34, /* function keyword */
+ TOK_EXPORT = 35, /* export keyword */
+ TOK_IMPORT = 36, /* import keyword */
+ TOK_IF = 37, /* if keyword */
+ TOK_ELSE = 38, /* else keyword */
+ TOK_SWITCH = 39, /* switch keyword */
+ TOK_CASE = 40, /* case keyword */
+ TOK_DEFAULT = 41, /* default keyword */
+ TOK_WHILE = 42, /* while keyword */
+ TOK_DO = 43, /* do keyword */
+ TOK_FOR = 44, /* for keyword */
+ TOK_BREAK = 45, /* break keyword */
+ TOK_CONTINUE = 46, /* continue keyword */
+ TOK_IN = 47, /* in keyword */
+ TOK_VAR = 48, /* var keyword */
+ TOK_WITH = 49, /* with keyword */
+ TOK_RETURN = 50, /* return keyword */
+ TOK_NEW = 51, /* new keyword */
+ TOK_DELETE = 52, /* delete keyword */
+ TOK_DEFSHARP = 53, /* #n= for object/array initializers */
+ TOK_USESHARP = 54, /* #n# for object/array initializers */
+ TOK_TRY = 55, /* try keyword */
+ TOK_CATCH = 56, /* catch keyword */
+ TOK_FINALLY = 57, /* finally keyword */
+ TOK_THROW = 58, /* throw keyword */
+ TOK_INSTANCEOF = 59, /* instanceof keyword */
+ TOK_DEBUGGER = 60, /* debugger keyword */
+ TOK_XMLSTAGO = 61, /* XML start tag open (<) */
+ TOK_XMLETAGO = 62, /* XML end tag open (</) */
+ TOK_XMLPTAGC = 63, /* XML point tag close (/>) */
+ TOK_XMLTAGC = 64, /* XML start or end tag close (>) */
+ TOK_XMLNAME = 65, /* XML start-tag non-final fragment */
+ TOK_XMLATTR = 66, /* XML quoted attribute value */
+ TOK_XMLSPACE = 67, /* XML whitespace */
+ TOK_XMLTEXT = 68, /* XML text */
+ TOK_XMLCOMMENT = 69, /* XML comment */
+ TOK_XMLCDATA = 70, /* XML CDATA section */
+ TOK_XMLPI = 71, /* XML processing instruction */
+ TOK_AT = 72, /* XML attribute op (@) */
+ TOK_DBLCOLON = 73, /* namespace qualified name op (::) */
+ TOK_ANYNAME = 74, /* XML AnyName singleton (*) */
+ TOK_DBLDOT = 75, /* XML descendant op (..) */
+ TOK_FILTER = 76, /* XML filtering predicate op (.()) */
+ TOK_XMLELEM = 77, /* XML element node type (no token) */
+ TOK_XMLLIST = 78, /* XML list node type (no token) */
+ TOK_YIELD = 79, /* yield from generator function */
+ TOK_ARRAYCOMP = 80, /* array comprehension initialiser */
+ TOK_ARRAYPUSH = 81, /* array push within comprehension */
+ TOK_LEXICALSCOPE = 82, /* block scope AST node label */
+ TOK_LET = 83, /* let keyword */
+ TOK_BODY = 84, /* synthetic body of function with
+ destructuring formal parameters */
+ TOK_RESERVED, /* reserved keywords */
+ TOK_LIMIT /* domain size */
+} JSTokenType;
+
+#define IS_PRIMARY_TOKEN(tt) \
+ ((uintN)((tt) - TOK_NAME) <= (uintN)(TOK_PRIMARY - TOK_NAME))
+
+#define TOKEN_TYPE_IS_XML(tt) \
+ (tt == TOK_AT || tt == TOK_DBLCOLON || tt == TOK_ANYNAME)
+
+#if JS_HAS_BLOCK_SCOPE
+# define TOKEN_TYPE_IS_DECL(tt) ((tt) == TOK_VAR || (tt) == TOK_LET)
+#else
+# define TOKEN_TYPE_IS_DECL(tt) ((tt) == TOK_VAR)
+#endif
+
+struct JSStringBuffer {
+ jschar *base;
+ jschar *limit; /* length limit for quick bounds check */
+ jschar *ptr; /* slot for next non-NUL char to store */
+ void *data;
+ JSBool (*grow)(JSStringBuffer *sb, size_t newlength);
+ void (*free)(JSStringBuffer *sb);
+};
+
+#define STRING_BUFFER_ERROR_BASE ((jschar *) 1)
+#define STRING_BUFFER_OK(sb) ((sb)->base != STRING_BUFFER_ERROR_BASE)
+#define STRING_BUFFER_OFFSET(sb) ((sb)->ptr -(sb)->base)
+
+extern void
+js_InitStringBuffer(JSStringBuffer *sb);
+
+extern void
+js_FinishStringBuffer(JSStringBuffer *sb);
+
+extern void
+js_AppendChar(JSStringBuffer *sb, jschar c);
+
+extern void
+js_RepeatChar(JSStringBuffer *sb, jschar c, uintN count);
+
+extern void
+js_AppendCString(JSStringBuffer *sb, const char *asciiz);
+
+extern void
+js_AppendJSString(JSStringBuffer *sb, JSString *str);
+
+struct JSTokenPtr {
+ uint16 index; /* index of char in physical line */
+ uint16 lineno; /* physical line number */
+};
+
+struct JSTokenPos {
+ JSTokenPtr begin; /* first character and line of token */
+ JSTokenPtr end; /* index 1 past last char, last line */
+};
+
+struct JSToken {
+ JSTokenType type; /* char value or above enumerator */
+ JSTokenPos pos; /* token position in file */
+ jschar *ptr; /* beginning of token in line buffer */
+ union {
+ struct { /* non-numeric literal */
+ JSOp op; /* operator, for minimal parser */
+ JSAtom *atom; /* atom table entry */
+ } s;
+ struct { /* atom pair, for XML PIs */
+ JSAtom *atom2; /* auxiliary atom table entry */
+ JSAtom *atom; /* main atom table entry */
+ } p;
+ jsdouble dval; /* floating point number */
+ } u;
+};
+
+#define t_op u.s.op
+#define t_atom u.s.atom
+#define t_atom2 u.p.atom2
+#define t_dval u.dval
+
+typedef struct JSTokenBuf {
+ jschar *base; /* base of line or stream buffer */
+ jschar *limit; /* limit for quick bounds check */
+ jschar *ptr; /* next char to get, or slot to use */
+} JSTokenBuf;
+
+#define JS_LINE_LIMIT 256 /* logical line buffer size limit --
+ physical line length is unlimited */
+#define NTOKENS 4 /* 1 current + 2 lookahead, rounded */
+#define NTOKENS_MASK (NTOKENS-1) /* to power of 2 to avoid divmod by 3 */
+
+struct JSTokenStream {
+ JSToken tokens[NTOKENS];/* circular token buffer */
+ uintN cursor; /* index of last parsed token */
+ uintN lookahead; /* count of lookahead tokens */
+ uintN lineno; /* current line number */
+ uintN ungetpos; /* next free char slot in ungetbuf */
+ jschar ungetbuf[6]; /* at most 6, for \uXXXX lookahead */
+ uintN flags; /* flags -- see below */
+ ptrdiff_t linelen; /* physical linebuf segment length */
+ ptrdiff_t linepos; /* linebuf offset in physical line */
+ JSTokenBuf linebuf; /* line buffer for diagnostics */
+ JSTokenBuf userbuf; /* user input buffer if !file */
+ JSStringBuffer tokenbuf; /* current token string buffer */
+ const char *filename; /* input filename or null */
+ FILE *file; /* stdio stream if reading from file */
+ JSPrincipals *principals; /* principals associated with source */
+ JSSourceHandler listener; /* callback for source; eg debugger */
+ void *listenerData; /* listener 'this' data */
+ void *listenerTSData;/* listener data for this TokenStream */
+ jschar *saveEOL; /* save next end of line in userbuf, to
+ optimize for very long lines */
+};
+
+#define CURRENT_TOKEN(ts) ((ts)->tokens[(ts)->cursor])
+#define ON_CURRENT_LINE(ts,pos) ((uint16)(ts)->lineno == (pos).end.lineno)
+
+/* JSTokenStream flags */
+#define TSF_ERROR 0x01 /* fatal error while compiling */
+#define TSF_EOF 0x02 /* hit end of file */
+#define TSF_NEWLINES 0x04 /* tokenize newlines */
+#define TSF_OPERAND 0x08 /* looking for operand, not operator */
+#define TSF_NLFLAG 0x20 /* last linebuf ended with \n */
+#define TSF_CRFLAG 0x40 /* linebuf would have ended with \r */
+#define TSF_DIRTYLINE 0x80 /* non-whitespace since start of line */
+#define TSF_OWNFILENAME 0x100 /* ts->filename is malloc'd */
+#define TSF_XMLTAGMODE 0x200 /* scanning within an XML tag in E4X */
+#define TSF_XMLTEXTMODE 0x400 /* scanning XMLText terminal from E4X */
+#define TSF_XMLONLYMODE 0x800 /* don't scan {expr} within text/tag */
+
+/* Flag indicating unexpected end of input, i.e. TOK_EOF not at top-level. */
+#define TSF_UNEXPECTED_EOF 0x1000
+
+/*
+ * To handle the hard case of contiguous HTML comments, we want to clear the
+ * TSF_DIRTYINPUT flag at the end of each such comment. But we'd rather not
+ * scan for --> within every //-style comment unless we have to. So we set
+ * TSF_IN_HTML_COMMENT when a <!-- is scanned as an HTML begin-comment, and
+ * clear it (and TSF_DIRTYINPUT) when we scan --> either on a clean line, or
+ * only if (ts->flags & TSF_IN_HTML_COMMENT), in a //-style comment.
+ *
+ * This still works as before given a malformed comment hiding hack such as:
+ *
+ * <script>
+ * <!-- comment hiding hack #1
+ * code goes here
+ * // --> oops, markup for script-unaware browsers goes here!
+ * </script>
+ *
+ * It does not cope with malformed comment hiding hacks where --> is hidden
+ * by C-style comments, or on a dirty line. Such cases are already broken.
+ */
+#define TSF_IN_HTML_COMMENT 0x2000
+
+/* Ignore keywords and return TOK_NAME instead to the parser. */
+#define TSF_KEYWORD_IS_NAME 0x4000
+
+/* Unicode separators that are treated as line terminators, in addition to \n, \r */
+#define LINE_SEPARATOR 0x2028
+#define PARA_SEPARATOR 0x2029
+
+/*
+ * Create a new token stream, either from an input buffer or from a file.
+ * Return null on file-open or memory-allocation failure.
+ *
+ * NB: All of js_New{,Buffer,File}TokenStream() return a pointer to transient
+ * memory in the current context's temp pool. This memory is deallocated via
+ * JS_ARENA_RELEASE() after parsing is finished.
+ */
+extern JSTokenStream *
+js_NewTokenStream(JSContext *cx, const jschar *base, size_t length,
+ const char *filename, uintN lineno, JSPrincipals *principals);
+
+extern JS_FRIEND_API(JSTokenStream *)
+js_NewBufferTokenStream(JSContext *cx, const jschar *base, size_t length);
+
+extern JS_FRIEND_API(JSTokenStream *)
+js_NewFileTokenStream(JSContext *cx, const char *filename, FILE *defaultfp);
+
+extern JS_FRIEND_API(JSBool)
+js_CloseTokenStream(JSContext *cx, JSTokenStream *ts);
+
+extern JS_FRIEND_API(int)
+js_fgets(char *buf, int size, FILE *file);
+
+/*
+ * If the given char array forms JavaScript keyword, return corresponding
+ * token. Otherwise return TOK_EOF.
+ */
+extern JSTokenType
+js_CheckKeyword(const jschar *chars, size_t length);
+
+#define js_IsKeyword(chars, length) \
+ (js_CheckKeyword(chars, length) != TOK_EOF)
+
+/*
+ * Friend-exported API entry point to call a mapping function on each reserved
+ * identifier in the scanner's keyword table.
+ */
+extern JS_FRIEND_API(void)
+js_MapKeywords(void (*mapfun)(const char *));
+
+/*
+ * Report a compile-time error by its number, using ts or cg to show context.
+ * Return true for a warning, false for an error.
+ */
+extern JSBool
+js_ReportCompileErrorNumber(JSContext *cx, void *handle, uintN flags,
+ uintN errorNumber, ...);
+
+extern JSBool
+js_ReportCompileErrorNumberUC(JSContext *cx, void *handle, uintN flags,
+ uintN errorNumber, ...);
+
+/* Steal some JSREPORT_* bits (see jsapi.h) to tell handle's type. */
+#define JSREPORT_HANDLE 0x300
+#define JSREPORT_TS 0x000
+#define JSREPORT_CG 0x100
+#define JSREPORT_PN 0x200
+
+/*
+ * Look ahead one token and return its type.
+ */
+extern JSTokenType
+js_PeekToken(JSContext *cx, JSTokenStream *ts);
+
+extern JSTokenType
+js_PeekTokenSameLine(JSContext *cx, JSTokenStream *ts);
+
+/*
+ * Get the next token from ts.
+ */
+extern JSTokenType
+js_GetToken(JSContext *cx, JSTokenStream *ts);
+
+/*
+ * Push back the last scanned token onto ts.
+ */
+extern void
+js_UngetToken(JSTokenStream *ts);
+
+/*
+ * Get the next token from ts if its type is tt.
+ */
+extern JSBool
+js_MatchToken(JSContext *cx, JSTokenStream *ts, JSTokenType tt);
+
+JS_END_EXTERN_C
+
+#endif /* jsscan_h___ */
diff --git a/src/third_party/js-1.7/jsscope.c b/src/third_party/js-1.7/jsscope.c
new file mode 100644
index 00000000000..49b55a6fbfd
--- /dev/null
+++ b/src/third_party/js-1.7/jsscope.c
@@ -0,0 +1,1776 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS symbol tables.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsarena.h"
+#include "jsbit.h"
+#include "jsclist.h"
+#include "jsdhash.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsdbgapi.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsscope.h"
+#include "jsstr.h"
+
+JSScope *
+js_GetMutableScope(JSContext *cx, JSObject *obj)
+{
+ JSScope *scope, *newscope;
+
+ scope = OBJ_SCOPE(obj);
+ JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, scope));
+ if (scope->object == obj)
+ return scope;
+ newscope = js_NewScope(cx, 0, scope->map.ops, LOCKED_OBJ_GET_CLASS(obj),
+ obj);
+ if (!newscope)
+ return NULL;
+ JS_LOCK_SCOPE(cx, newscope);
+ obj->map = js_HoldObjectMap(cx, &newscope->map);
+ scope = (JSScope *) js_DropObjectMap(cx, &scope->map, obj);
+ JS_TRANSFER_SCOPE_LOCK(cx, scope, newscope);
+ return newscope;
+}
+
+/*
+ * JSScope uses multiplicative hashing, _a la_ jsdhash.[ch], but specialized
+ * to minimize footprint. But if a scope has fewer than SCOPE_HASH_THRESHOLD
+ * entries, we use linear search and avoid allocating scope->table.
+ */
+#define SCOPE_HASH_THRESHOLD 6
+#define MIN_SCOPE_SIZE_LOG2 4
+#define MIN_SCOPE_SIZE JS_BIT(MIN_SCOPE_SIZE_LOG2)
+#define SCOPE_TABLE_NBYTES(n) ((n) * sizeof(JSScopeProperty *))
+
+static void
+InitMinimalScope(JSScope *scope)
+{
+ scope->hashShift = JS_DHASH_BITS - MIN_SCOPE_SIZE_LOG2;
+ scope->entryCount = scope->removedCount = 0;
+ scope->table = NULL;
+ scope->lastProp = NULL;
+}
+
+static JSBool
+CreateScopeTable(JSContext *cx, JSScope *scope, JSBool report)
+{
+ int sizeLog2;
+ JSScopeProperty *sprop, **spp;
+
+ JS_ASSERT(!scope->table);
+ JS_ASSERT(scope->lastProp);
+
+ if (scope->entryCount > SCOPE_HASH_THRESHOLD) {
+ /*
+ * Ouch: calloc failed at least once already -- let's try again,
+ * overallocating to hold at least twice the current population.
+ */
+ sizeLog2 = JS_CeilingLog2(2 * scope->entryCount);
+ scope->hashShift = JS_DHASH_BITS - sizeLog2;
+ } else {
+ JS_ASSERT(scope->hashShift == JS_DHASH_BITS - MIN_SCOPE_SIZE_LOG2);
+ sizeLog2 = MIN_SCOPE_SIZE_LOG2;
+ }
+
+ scope->table = (JSScopeProperty **)
+ calloc(JS_BIT(sizeLog2), sizeof(JSScopeProperty *));
+ if (!scope->table) {
+ if (report)
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ js_UpdateMallocCounter(cx, JS_BIT(sizeLog2) * sizeof(JSScopeProperty *));
+
+ scope->hashShift = JS_DHASH_BITS - sizeLog2;
+ for (sprop = scope->lastProp; sprop; sprop = sprop->parent) {
+ spp = js_SearchScope(scope, sprop->id, JS_TRUE);
+ SPROP_STORE_PRESERVING_COLLISION(spp, sprop);
+ }
+ return JS_TRUE;
+}
+
+JSScope *
+js_NewScope(JSContext *cx, jsrefcount nrefs, JSObjectOps *ops, JSClass *clasp,
+ JSObject *obj)
+{
+ JSScope *scope;
+
+ scope = (JSScope *) JS_malloc(cx, sizeof(JSScope));
+ if (!scope)
+ return NULL;
+
+ js_InitObjectMap(&scope->map, nrefs, ops, clasp);
+ scope->object = obj;
+ scope->flags = 0;
+ InitMinimalScope(scope);
+
+#ifdef JS_THREADSAFE
+ scope->ownercx = cx;
+ memset(&scope->lock, 0, sizeof scope->lock);
+
+ /*
+ * Set u.link = NULL, not u.count = 0, in case the target architecture's
+ * null pointer has a non-zero integer representation.
+ */
+ scope->u.link = NULL;
+
+#ifdef DEBUG
+ scope->file[0] = scope->file[1] = scope->file[2] = scope->file[3] = NULL;
+ scope->line[0] = scope->line[1] = scope->line[2] = scope->line[3] = 0;
+#endif
+#endif
+
+ JS_RUNTIME_METER(cx->runtime, liveScopes);
+ JS_RUNTIME_METER(cx->runtime, totalScopes);
+ return scope;
+}
+
+#ifdef DEBUG_SCOPE_COUNT
+extern void
+js_unlog_scope(JSScope *scope);
+#endif
+
+void
+js_DestroyScope(JSContext *cx, JSScope *scope)
+{
+#ifdef DEBUG_SCOPE_COUNT
+ js_unlog_scope(scope);
+#endif
+
+#ifdef JS_THREADSAFE
+ /* Scope must be single-threaded at this point, so set scope->ownercx. */
+ JS_ASSERT(scope->u.count == 0);
+ scope->ownercx = cx;
+ js_FinishLock(&scope->lock);
+#endif
+ if (scope->table)
+ JS_free(cx, scope->table);
+
+#ifdef DEBUG
+ JS_LOCK_RUNTIME_VOID(cx->runtime,
+ cx->runtime->liveScopeProps -= scope->entryCount);
+#endif
+ JS_RUNTIME_UNMETER(cx->runtime, liveScopes);
+ JS_free(cx, scope);
+}
+
+#ifdef DUMP_SCOPE_STATS
+typedef struct JSScopeStats {
+ jsrefcount searches;
+ jsrefcount steps;
+ jsrefcount hits;
+ jsrefcount misses;
+ jsrefcount stepHits;
+ jsrefcount stepMisses;
+ jsrefcount adds;
+ jsrefcount redundantAdds;
+ jsrefcount addFailures;
+ jsrefcount changeFailures;
+ jsrefcount compresses;
+ jsrefcount grows;
+ jsrefcount removes;
+ jsrefcount removeFrees;
+ jsrefcount uselessRemoves;
+ jsrefcount shrinks;
+} JSScopeStats;
+
+JS_FRIEND_DATA(JSScopeStats) js_scope_stats;
+
+# define METER(x) JS_ATOMIC_INCREMENT(&js_scope_stats.x)
+#else
+# define METER(x) /* nothing */
+#endif
+
+/*
+ * Double hashing needs the second hash code to be relatively prime to table
+ * size, so we simply make hash2 odd. The inputs to multiplicative hash are
+ * the golden ratio, expressed as a fixed-point 32 bit fraction, and the int
+ * property index or named property's atom number (observe that most objects
+ * have either no indexed properties, or almost all indexed and a few names,
+ * so collisions between index and atom number are unlikely).
+ */
+#define SCOPE_HASH0(id) (HASH_ID(id) * JS_GOLDEN_RATIO)
+#define SCOPE_HASH1(hash0,shift) ((hash0) >> (shift))
+#define SCOPE_HASH2(hash0,log2,shift) ((((hash0) << (log2)) >> (shift)) | 1)
+
+JS_FRIEND_API(JSScopeProperty **)
+js_SearchScope(JSScope *scope, jsid id, JSBool adding)
+{
+ JSHashNumber hash0, hash1, hash2;
+ int hashShift, sizeLog2;
+ JSScopeProperty *stored, *sprop, **spp, **firstRemoved;
+ uint32 sizeMask;
+
+ METER(searches);
+ if (!scope->table) {
+ /* Not enough properties to justify hashing: search from lastProp. */
+ JS_ASSERT(!SCOPE_HAD_MIDDLE_DELETE(scope));
+ for (spp = &scope->lastProp; (sprop = *spp); spp = &sprop->parent) {
+ if (sprop->id == id) {
+ METER(hits);
+ return spp;
+ }
+ }
+ METER(misses);
+ return spp;
+ }
+
+ /* Compute the primary hash address. */
+ hash0 = SCOPE_HASH0(id);
+ hashShift = scope->hashShift;
+ hash1 = SCOPE_HASH1(hash0, hashShift);
+ spp = scope->table + hash1;
+
+ /* Miss: return space for a new entry. */
+ stored = *spp;
+ if (SPROP_IS_FREE(stored)) {
+ METER(misses);
+ return spp;
+ }
+
+ /* Hit: return entry. */
+ sprop = SPROP_CLEAR_COLLISION(stored);
+ if (sprop && sprop->id == id) {
+ METER(hits);
+ return spp;
+ }
+
+ /* Collision: double hash. */
+ sizeLog2 = JS_DHASH_BITS - hashShift;
+ hash2 = SCOPE_HASH2(hash0, sizeLog2, hashShift);
+ sizeMask = JS_BITMASK(sizeLog2);
+
+ /* Save the first removed entry pointer so we can recycle it if adding. */
+ if (SPROP_IS_REMOVED(stored)) {
+ firstRemoved = spp;
+ } else {
+ firstRemoved = NULL;
+ if (adding && !SPROP_HAD_COLLISION(stored))
+ SPROP_FLAG_COLLISION(spp, sprop);
+ }
+
+ for (;;) {
+ METER(steps);
+ hash1 -= hash2;
+ hash1 &= sizeMask;
+ spp = scope->table + hash1;
+
+ stored = *spp;
+ if (SPROP_IS_FREE(stored)) {
+ METER(stepMisses);
+ return (adding && firstRemoved) ? firstRemoved : spp;
+ }
+
+ sprop = SPROP_CLEAR_COLLISION(stored);
+ if (sprop && sprop->id == id) {
+ METER(stepHits);
+ return spp;
+ }
+
+ if (SPROP_IS_REMOVED(stored)) {
+ if (!firstRemoved)
+ firstRemoved = spp;
+ } else {
+ if (adding && !SPROP_HAD_COLLISION(stored))
+ SPROP_FLAG_COLLISION(spp, sprop);
+ }
+ }
+
+ /* NOTREACHED */
+ return NULL;
+}
+
+static JSBool
+ChangeScope(JSContext *cx, JSScope *scope, int change)
+{
+ int oldlog2, newlog2;
+ uint32 oldsize, newsize, nbytes;
+ JSScopeProperty **table, **oldtable, **spp, **oldspp, *sprop;
+
+ /* Grow, shrink, or compress by changing scope->table. */
+ oldlog2 = JS_DHASH_BITS - scope->hashShift;
+ newlog2 = oldlog2 + change;
+ oldsize = JS_BIT(oldlog2);
+ newsize = JS_BIT(newlog2);
+ nbytes = SCOPE_TABLE_NBYTES(newsize);
+ table = (JSScopeProperty **) calloc(nbytes, 1);
+ if (!table) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ /* Now that we have a new table allocated, update scope members. */
+ scope->hashShift = JS_DHASH_BITS - newlog2;
+ scope->removedCount = 0;
+ oldtable = scope->table;
+ scope->table = table;
+
+ /* Treat the above calloc as a JS_malloc, to match CreateScopeTable. */
+ cx->runtime->gcMallocBytes += nbytes;
+
+ /* Copy only live entries, leaving removed and free ones behind. */
+ for (oldspp = oldtable; oldsize != 0; oldspp++) {
+ sprop = SPROP_FETCH(oldspp);
+ if (sprop) {
+ spp = js_SearchScope(scope, sprop->id, JS_TRUE);
+ JS_ASSERT(SPROP_IS_FREE(*spp));
+ *spp = sprop;
+ }
+ oldsize--;
+ }
+
+ /* Finally, free the old table storage. */
+ JS_free(cx, oldtable);
+ return JS_TRUE;
+}
+
+/*
+ * Take care to exclude the mark and duplicate bits, in case we're called from
+ * the GC, or we are searching for a property that has not yet been flagged as
+ * a duplicate when making a duplicate formal parameter.
+ */
+#define SPROP_FLAGS_NOT_MATCHED (SPROP_MARK | SPROP_IS_DUPLICATE)
+
+JS_STATIC_DLL_CALLBACK(JSDHashNumber)
+js_HashScopeProperty(JSDHashTable *table, const void *key)
+{
+ const JSScopeProperty *sprop = (const JSScopeProperty *)key;
+ JSDHashNumber hash;
+ JSPropertyOp gsop;
+
+ /* Accumulate from least to most random so the low bits are most random. */
+ hash = 0;
+ gsop = sprop->getter;
+ if (gsop)
+ hash = (hash >> (JS_DHASH_BITS - 4)) ^ (hash << 4) ^ (jsword)gsop;
+ gsop = sprop->setter;
+ if (gsop)
+ hash = (hash >> (JS_DHASH_BITS - 4)) ^ (hash << 4) ^ (jsword)gsop;
+
+ hash = (hash >> (JS_DHASH_BITS - 4)) ^ (hash << 4)
+ ^ (sprop->flags & ~SPROP_FLAGS_NOT_MATCHED);
+
+ hash = (hash >> (JS_DHASH_BITS - 4)) ^ (hash << 4) ^ sprop->attrs;
+ hash = (hash >> (JS_DHASH_BITS - 4)) ^ (hash << 4) ^ sprop->shortid;
+ hash = (hash >> (JS_DHASH_BITS - 4)) ^ (hash << 4) ^ sprop->slot;
+ hash = (hash >> (JS_DHASH_BITS - 4)) ^ (hash << 4) ^ sprop->id;
+ return hash;
+}
+
+#define SPROP_MATCH(sprop, child) \
+ SPROP_MATCH_PARAMS(sprop, (child)->id, (child)->getter, (child)->setter, \
+ (child)->slot, (child)->attrs, (child)->flags, \
+ (child)->shortid)
+
+#define SPROP_MATCH_PARAMS(sprop, aid, agetter, asetter, aslot, aattrs, \
+ aflags, ashortid) \
+ ((sprop)->id == (aid) && \
+ SPROP_MATCH_PARAMS_AFTER_ID(sprop, agetter, asetter, aslot, aattrs, \
+ aflags, ashortid))
+
+#define SPROP_MATCH_PARAMS_AFTER_ID(sprop, agetter, asetter, aslot, aattrs, \
+ aflags, ashortid) \
+ ((sprop)->getter == (agetter) && \
+ (sprop)->setter == (asetter) && \
+ (sprop)->slot == (aslot) && \
+ (sprop)->attrs == (aattrs) && \
+ (((sprop)->flags ^ (aflags)) & ~SPROP_FLAGS_NOT_MATCHED) == 0 && \
+ (sprop)->shortid == (ashortid))
+
+JS_STATIC_DLL_CALLBACK(JSBool)
+js_MatchScopeProperty(JSDHashTable *table,
+ const JSDHashEntryHdr *hdr,
+ const void *key)
+{
+ const JSPropertyTreeEntry *entry = (const JSPropertyTreeEntry *)hdr;
+ const JSScopeProperty *sprop = entry->child;
+ const JSScopeProperty *kprop = (const JSScopeProperty *)key;
+
+ return SPROP_MATCH(sprop, kprop);
+}
+
+static const JSDHashTableOps PropertyTreeHashOps = {
+ JS_DHashAllocTable,
+ JS_DHashFreeTable,
+ JS_DHashGetKeyStub,
+ js_HashScopeProperty,
+ js_MatchScopeProperty,
+ JS_DHashMoveEntryStub,
+ JS_DHashClearEntryStub,
+ JS_DHashFinalizeStub,
+ NULL
+};
+
+/*
+ * A property tree node on rt->propertyFreeList overlays the following prefix
+ * struct on JSScopeProperty.
+ */
+typedef struct FreeNode {
+ jsid id;
+ JSScopeProperty *next;
+ JSScopeProperty **prevp;
+} FreeNode;
+
+#define FREENODE(sprop) ((FreeNode *) (sprop))
+
+#define FREENODE_INSERT(list, sprop) \
+ JS_BEGIN_MACRO \
+ FREENODE(sprop)->next = (list); \
+ FREENODE(sprop)->prevp = &(list); \
+ if (list) \
+ FREENODE(list)->prevp = &FREENODE(sprop)->next; \
+ (list) = (sprop); \
+ JS_END_MACRO
+
+#define FREENODE_REMOVE(sprop) \
+ JS_BEGIN_MACRO \
+ *FREENODE(sprop)->prevp = FREENODE(sprop)->next; \
+ if (FREENODE(sprop)->next) \
+ FREENODE(FREENODE(sprop)->next)->prevp = FREENODE(sprop)->prevp; \
+ JS_END_MACRO
+
+/* NB: Called with the runtime lock held. */
+static JSScopeProperty *
+NewScopeProperty(JSRuntime *rt)
+{
+ JSScopeProperty *sprop;
+
+ sprop = rt->propertyFreeList;
+ if (sprop) {
+ FREENODE_REMOVE(sprop);
+ } else {
+ JS_ARENA_ALLOCATE_CAST(sprop, JSScopeProperty *,
+ &rt->propertyArenaPool,
+ sizeof(JSScopeProperty));
+ if (!sprop)
+ return NULL;
+ }
+
+ JS_RUNTIME_METER(rt, livePropTreeNodes);
+ JS_RUNTIME_METER(rt, totalPropTreeNodes);
+ return sprop;
+}
+
+#define CHUNKY_KIDS_TAG ((jsuword)1)
+#define KIDS_IS_CHUNKY(kids) ((jsuword)(kids) & CHUNKY_KIDS_TAG)
+#define KIDS_TO_CHUNK(kids) ((PropTreeKidsChunk *) \
+ ((jsuword)(kids) & ~CHUNKY_KIDS_TAG))
+#define CHUNK_TO_KIDS(chunk) ((JSScopeProperty *) \
+ ((jsuword)(chunk) | CHUNKY_KIDS_TAG))
+#define MAX_KIDS_PER_CHUNK 10
+
+typedef struct PropTreeKidsChunk PropTreeKidsChunk;
+
+struct PropTreeKidsChunk {
+ JSScopeProperty *kids[MAX_KIDS_PER_CHUNK];
+ PropTreeKidsChunk *next;
+};
+
+static PropTreeKidsChunk *
+NewPropTreeKidsChunk(JSRuntime *rt)
+{
+ PropTreeKidsChunk *chunk;
+
+ chunk = calloc(1, sizeof *chunk);
+ if (!chunk)
+ return NULL;
+ JS_ASSERT(((jsuword)chunk & CHUNKY_KIDS_TAG) == 0);
+ JS_RUNTIME_METER(rt, propTreeKidsChunks);
+ return chunk;
+}
+
+static void
+DestroyPropTreeKidsChunk(JSRuntime *rt, PropTreeKidsChunk *chunk)
+{
+ JS_RUNTIME_UNMETER(rt, propTreeKidsChunks);
+ free(chunk);
+}
+
+/* NB: Called with the runtime lock held. */
+static JSBool
+InsertPropertyTreeChild(JSRuntime *rt, JSScopeProperty *parent,
+ JSScopeProperty *child, PropTreeKidsChunk *sweptChunk)
+{
+ JSPropertyTreeEntry *entry;
+ JSScopeProperty **childp, *kids, *sprop;
+ PropTreeKidsChunk *chunk, **chunkp;
+ uintN i;
+
+ JS_ASSERT(!parent || child->parent != parent);
+
+ if (!parent) {
+ entry = (JSPropertyTreeEntry *)
+ JS_DHashTableOperate(&rt->propertyTreeHash, child, JS_DHASH_ADD);
+ if (!entry)
+ return JS_FALSE;
+ childp = &entry->child;
+ sprop = *childp;
+ if (!sprop) {
+ *childp = child;
+ } else {
+ /*
+ * A "Duplicate child" case.
+ *
+ * We can't do away with child, as at least one live scope entry
+ * still points at it. What's more, that scope's lastProp chains
+ * through an ancestor line to reach child, and js_Enumerate and
+ * others count on this linkage. We must leave child out of the
+ * hash table, and not require it to be there when we eventually
+ * GC it (see RemovePropertyTreeChild, below).
+ *
+ * It is necessary to leave the duplicate child out of the hash
+ * table to preserve entry uniqueness. It is safe to leave the
+ * child out of the hash table (unlike the duplicate child cases
+ * below), because the child's parent link will be null, which
+ * can't dangle.
+ */
+ JS_ASSERT(sprop != child && SPROP_MATCH(sprop, child));
+ JS_RUNTIME_METER(rt, duplicatePropTreeNodes);
+ }
+ } else {
+ childp = &parent->kids;
+ kids = *childp;
+ if (kids) {
+ if (KIDS_IS_CHUNKY(kids)) {
+ chunk = KIDS_TO_CHUNK(kids);
+ do {
+ for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) {
+ childp = &chunk->kids[i];
+ sprop = *childp;
+ if (!sprop)
+ goto insert;
+
+ JS_ASSERT(sprop != child);
+ if (SPROP_MATCH(sprop, child)) {
+ /*
+ * Duplicate child, see comment above. In this
+ * case, we must let the duplicate be inserted at
+ * this level in the tree, so we keep iterating,
+ * looking for an empty slot in which to insert.
+ */
+ JS_ASSERT(sprop != child);
+ JS_RUNTIME_METER(rt, duplicatePropTreeNodes);
+ }
+ }
+ chunkp = &chunk->next;
+ } while ((chunk = *chunkp) != NULL);
+
+ if (sweptChunk) {
+ chunk = sweptChunk;
+ } else {
+ chunk = NewPropTreeKidsChunk(rt);
+ if (!chunk)
+ return JS_FALSE;
+ }
+ *chunkp = chunk;
+ childp = &chunk->kids[0];
+ } else {
+ sprop = kids;
+ JS_ASSERT(sprop != child);
+ if (SPROP_MATCH(sprop, child)) {
+ /*
+ * Duplicate child, see comment above. Once again, we
+ * must let duplicates created by deletion pile up in a
+ * kids-chunk-list, in order to find them when sweeping
+ * and thereby avoid dangling parent pointers.
+ */
+ JS_RUNTIME_METER(rt, duplicatePropTreeNodes);
+ }
+ if (sweptChunk) {
+ chunk = sweptChunk;
+ } else {
+ chunk = NewPropTreeKidsChunk(rt);
+ if (!chunk)
+ return JS_FALSE;
+ }
+ parent->kids = CHUNK_TO_KIDS(chunk);
+ chunk->kids[0] = sprop;
+ childp = &chunk->kids[1];
+ }
+ }
+ insert:
+ *childp = child;
+ }
+
+ child->parent = parent;
+ return JS_TRUE;
+}
+
+/* NB: Called with the runtime lock held. */
+static PropTreeKidsChunk *
+RemovePropertyTreeChild(JSRuntime *rt, JSScopeProperty *child)
+{
+ JSPropertyTreeEntry *entry;
+ JSScopeProperty *parent, *kids, *kid;
+ PropTreeKidsChunk *list, *chunk, **chunkp, *lastChunk;
+ uintN i, j;
+
+ parent = child->parent;
+ if (!parent) {
+ /*
+ * Don't remove child if it is not in rt->propertyTreeHash, but only
+ * matches a root child in the table that has compatible members. See
+ * the "Duplicate child" comments in InsertPropertyTreeChild, above.
+ */
+ entry = (JSPropertyTreeEntry *)
+ JS_DHashTableOperate(&rt->propertyTreeHash, child, JS_DHASH_LOOKUP);
+
+ if (entry->child == child)
+ JS_DHashTableRawRemove(&rt->propertyTreeHash, &entry->hdr);
+ } else {
+ kids = parent->kids;
+ if (KIDS_IS_CHUNKY(kids)) {
+ list = chunk = KIDS_TO_CHUNK(kids);
+ chunkp = &list;
+
+ do {
+ for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) {
+ if (chunk->kids[i] == child) {
+ lastChunk = chunk;
+ if (!lastChunk->next) {
+ j = i + 1;
+ } else {
+ j = 0;
+ do {
+ chunkp = &lastChunk->next;
+ lastChunk = *chunkp;
+ } while (lastChunk->next);
+ }
+ for (; j < MAX_KIDS_PER_CHUNK; j++) {
+ if (!lastChunk->kids[j])
+ break;
+ }
+ --j;
+ if (chunk != lastChunk || j > i)
+ chunk->kids[i] = lastChunk->kids[j];
+ lastChunk->kids[j] = NULL;
+ if (j == 0) {
+ *chunkp = NULL;
+ if (!list)
+ parent->kids = NULL;
+ return lastChunk;
+ }
+ return NULL;
+ }
+ }
+
+ chunkp = &chunk->next;
+ } while ((chunk = *chunkp) != NULL);
+ } else {
+ kid = kids;
+ if (kid == child)
+ parent->kids = NULL;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Called *without* the runtime lock held, this function acquires that lock
+ * only when inserting a new child. Thus there may be races to find or add
+ * a node that result in duplicates. We expect such races to be rare!
+ */
+static JSScopeProperty *
+GetPropertyTreeChild(JSContext *cx, JSScopeProperty *parent,
+ JSScopeProperty *child)
+{
+ JSRuntime *rt;
+ JSPropertyTreeEntry *entry;
+ JSScopeProperty *sprop;
+ PropTreeKidsChunk *chunk;
+ uintN i;
+
+ rt = cx->runtime;
+ if (!parent) {
+ JS_LOCK_RUNTIME(rt);
+
+ entry = (JSPropertyTreeEntry *)
+ JS_DHashTableOperate(&rt->propertyTreeHash, child, JS_DHASH_ADD);
+ if (!entry)
+ goto out_of_memory;
+
+ sprop = entry->child;
+ if (sprop)
+ goto out;
+ } else {
+ /*
+ * Because chunks are appended at the end and never deleted except by
+ * the GC, we can search without taking the runtime lock. We may miss
+ * a matching sprop added by another thread, and make a duplicate one,
+ * but that is an unlikely, therefore small, cost. The property tree
+ * has extremely low fan-out below its root in popular embeddings with
+ * real-world workloads.
+ *
+ * If workload changes so as to increase fan-out significantly below
+ * the property tree root, we'll want to add another tag bit stored in
+ * parent->kids that indicates a JSDHashTable pointer.
+ */
+ entry = NULL;
+ sprop = parent->kids;
+ if (sprop) {
+ if (KIDS_IS_CHUNKY(sprop)) {
+ chunk = KIDS_TO_CHUNK(sprop);
+ do {
+ for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) {
+ sprop = chunk->kids[i];
+ if (!sprop)
+ goto not_found;
+
+ if (SPROP_MATCH(sprop, child))
+ return sprop;
+ }
+ } while ((chunk = chunk->next) != NULL);
+ } else {
+ if (SPROP_MATCH(sprop, child))
+ return sprop;
+ }
+ }
+
+ not_found:
+ JS_LOCK_RUNTIME(rt);
+ }
+
+ sprop = NewScopeProperty(rt);
+ if (!sprop)
+ goto out_of_memory;
+
+ sprop->id = child->id;
+ sprop->getter = child->getter;
+ sprop->setter = child->setter;
+ sprop->slot = child->slot;
+ sprop->attrs = child->attrs;
+ sprop->flags = child->flags;
+ sprop->shortid = child->shortid;
+ sprop->parent = sprop->kids = NULL;
+ if (!parent) {
+ entry->child = sprop;
+ } else {
+ if (!InsertPropertyTreeChild(rt, parent, sprop, NULL))
+ goto out_of_memory;
+ }
+
+out:
+ JS_UNLOCK_RUNTIME(rt);
+ return sprop;
+
+out_of_memory:
+ JS_UNLOCK_RUNTIME(rt);
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+}
+
+#ifdef DEBUG_notbrendan
+#define CHECK_ANCESTOR_LINE(scope, sparse) \
+ JS_BEGIN_MACRO \
+ if ((scope)->table) CheckAncestorLine(scope, sparse); \
+ JS_END_MACRO
+
+static void
+CheckAncestorLine(JSScope *scope, JSBool sparse)
+{
+ uint32 size;
+ JSScopeProperty **spp, **start, **end, *ancestorLine, *sprop, *aprop;
+ uint32 entryCount, ancestorCount;
+
+ ancestorLine = SCOPE_LAST_PROP(scope);
+ if (ancestorLine)
+ JS_ASSERT(SCOPE_HAS_PROPERTY(scope, ancestorLine));
+
+ entryCount = 0;
+ size = SCOPE_CAPACITY(scope);
+ start = scope->table;
+ for (spp = start, end = start + size; spp < end; spp++) {
+ sprop = SPROP_FETCH(spp);
+ if (sprop) {
+ entryCount++;
+ for (aprop = ancestorLine; aprop; aprop = aprop->parent) {
+ if (aprop == sprop)
+ break;
+ }
+ JS_ASSERT(aprop);
+ }
+ }
+ JS_ASSERT(entryCount == scope->entryCount);
+
+ ancestorCount = 0;
+ for (sprop = ancestorLine; sprop; sprop = sprop->parent) {
+ if (SCOPE_HAD_MIDDLE_DELETE(scope) &&
+ !SCOPE_HAS_PROPERTY(scope, sprop)) {
+ JS_ASSERT(sparse || (sprop->flags & SPROP_IS_DUPLICATE));
+ continue;
+ }
+ ancestorCount++;
+ }
+ JS_ASSERT(ancestorCount == scope->entryCount);
+}
+#else
+#define CHECK_ANCESTOR_LINE(scope, sparse) /* nothing */
+#endif
+
+static void
+ReportReadOnlyScope(JSContext *cx, JSScope *scope)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(scope->object));
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_READ_ONLY,
+ str
+ ? JS_GetStringBytes(str)
+ : LOCKED_OBJ_GET_CLASS(scope->object)->name);
+}
+
+JSScopeProperty *
+js_AddScopeProperty(JSContext *cx, JSScope *scope, jsid id,
+ JSPropertyOp getter, JSPropertyOp setter, uint32 slot,
+ uintN attrs, uintN flags, intN shortid)
+{
+ JSScopeProperty **spp, *sprop, *overwriting, **spvec, **spp2, child;
+ uint32 size, splen, i;
+ int change;
+ JSTempValueRooter tvr;
+
+ JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, scope));
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+
+ /*
+ * You can't add properties to a sealed scope. But note well that you can
+ * change property attributes in a sealed scope, even though that replaces
+ * a JSScopeProperty * in the scope's hash table -- but no id is added, so
+ * the scope remains sealed.
+ */
+ if (SCOPE_IS_SEALED(scope)) {
+ ReportReadOnlyScope(cx, scope);
+ return NULL;
+ }
+
+ /*
+ * Normalize stub getter and setter values for faster is-stub testing in
+ * the SPROP_CALL_[GS]ETTER macros.
+ */
+ if (getter == JS_PropertyStub)
+ getter = NULL;
+ if (setter == JS_PropertyStub)
+ setter = NULL;
+
+ /*
+ * Search for id in order to claim its entry, allocating a property tree
+ * node if one doesn't already exist for our parameters.
+ */
+ spp = js_SearchScope(scope, id, JS_TRUE);
+ sprop = overwriting = SPROP_FETCH(spp);
+ if (!sprop) {
+ /* Check whether we need to grow, if the load factor is >= .75. */
+ size = SCOPE_CAPACITY(scope);
+ if (scope->entryCount + scope->removedCount >= size - (size >> 2)) {
+ if (scope->removedCount >= size >> 2) {
+ METER(compresses);
+ change = 0;
+ } else {
+ METER(grows);
+ change = 1;
+ }
+ if (!ChangeScope(cx, scope, change) &&
+ scope->entryCount + scope->removedCount == size - 1) {
+ METER(addFailures);
+ return NULL;
+ }
+ spp = js_SearchScope(scope, id, JS_TRUE);
+ JS_ASSERT(!SPROP_FETCH(spp));
+ }
+ } else {
+ /* Property exists: js_SearchScope must have returned a valid entry. */
+ JS_ASSERT(!SPROP_IS_REMOVED(*spp));
+
+ /*
+ * If all property members match, this is a redundant add and we can
+ * return early. If the caller wants to allocate a slot, but doesn't
+ * care which slot, copy sprop->slot into slot so we can match sprop,
+ * if all other members match.
+ */
+ if (!(attrs & JSPROP_SHARED) &&
+ slot == SPROP_INVALID_SLOT &&
+ SPROP_HAS_VALID_SLOT(sprop, scope)) {
+ slot = sprop->slot;
+ }
+ if (SPROP_MATCH_PARAMS_AFTER_ID(sprop, getter, setter, slot, attrs,
+ flags, shortid)) {
+ METER(redundantAdds);
+ return sprop;
+ }
+
+ /*
+ * Duplicate formal parameters require us to leave the old property
+ * on the ancestor line, so the decompiler can find it, even though
+ * its entry in scope->table is overwritten to point at a new property
+ * descending from the old one. The SPROP_IS_DUPLICATE flag helps us
+ * cope with the consequent disparity between ancestor line height and
+ * scope->entryCount.
+ */
+ if (flags & SPROP_IS_DUPLICATE) {
+ sprop->flags |= SPROP_IS_DUPLICATE;
+ } else {
+ /*
+ * If we are clearing sprop to force an existing property to be
+ * overwritten (apart from a duplicate formal parameter), we must
+ * unlink it from the ancestor line at scope->lastProp, lazily if
+ * sprop is not lastProp. And we must remove the entry at *spp,
+ * precisely so the lazy "middle delete" fixup code further below
+ * won't find sprop in scope->table, in spite of sprop being on
+ * the ancestor line.
+ *
+ * When we finally succeed in finding or creating a new sprop
+ * and storing its pointer at *spp, we'll use the |overwriting|
+ * local saved when we first looked up id to decide whether we're
+ * indeed creating a new entry, or merely overwriting an existing
+ * property.
+ */
+ if (sprop == SCOPE_LAST_PROP(scope)) {
+ do {
+ SCOPE_REMOVE_LAST_PROP(scope);
+ if (!SCOPE_HAD_MIDDLE_DELETE(scope))
+ break;
+ sprop = SCOPE_LAST_PROP(scope);
+ } while (sprop && !SCOPE_HAS_PROPERTY(scope, sprop));
+ } else if (!SCOPE_HAD_MIDDLE_DELETE(scope)) {
+ /*
+ * If we have no hash table yet, we need one now. The middle
+ * delete code is simple-minded that way!
+ */
+ if (!scope->table) {
+ if (!CreateScopeTable(cx, scope, JS_TRUE))
+ return NULL;
+ spp = js_SearchScope(scope, id, JS_TRUE);
+ sprop = overwriting = SPROP_FETCH(spp);
+ }
+ SCOPE_SET_MIDDLE_DELETE(scope);
+ }
+ }
+
+ /*
+ * If we fail later on trying to find or create a new sprop, we will
+ * goto fail_overwrite and restore *spp from |overwriting|. Note that
+ * we don't bother to keep scope->removedCount in sync, because we'll
+ * fix up *spp and scope->entryCount shortly, no matter how control
+ * flow returns from this function.
+ */
+ if (scope->table)
+ SPROP_STORE_PRESERVING_COLLISION(spp, NULL);
+ scope->entryCount--;
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+ sprop = NULL;
+ }
+
+ if (!sprop) {
+ /*
+ * If properties were deleted from the middle of the list starting at
+ * scope->lastProp, we may need to fork the property tree and squeeze
+ * all deleted properties out of scope's ancestor line. Otherwise we
+ * risk adding a node with the same id as a "middle" node, violating
+ * the rule that properties along an ancestor line have distinct ids
+ * (unless flagged SPROP_IS_DUPLICATE).
+ */
+ if (SCOPE_HAD_MIDDLE_DELETE(scope)) {
+ JS_ASSERT(scope->table);
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+
+ splen = scope->entryCount;
+ if (splen == 0) {
+ JS_ASSERT(scope->lastProp == NULL);
+ } else {
+ /*
+ * Enumerate live entries in scope->table using a temporary
+ * vector, by walking the (possibly sparse, due to deletions)
+ * ancestor line from scope->lastProp.
+ */
+ spvec = (JSScopeProperty **)
+ JS_malloc(cx, SCOPE_TABLE_NBYTES(splen));
+ if (!spvec)
+ goto fail_overwrite;
+ i = splen;
+ sprop = SCOPE_LAST_PROP(scope);
+ JS_ASSERT(sprop);
+ do {
+ /*
+ * NB: test SCOPE_GET_PROPERTY, not SCOPE_HAS_PROPERTY --
+ * the latter insists that sprop->id maps to sprop, while
+ * the former simply tests whether sprop->id is bound in
+ * scope. We must allow for duplicate formal parameters
+ * along the ancestor line, and fork them as needed.
+ */
+ if (!SCOPE_GET_PROPERTY(scope, sprop->id))
+ continue;
+
+ JS_ASSERT(sprop != overwriting);
+ if (i == 0) {
+ /*
+ * If our original splen estimate, scope->entryCount,
+ * is less than the ancestor line height, there must
+ * be duplicate formal parameters in this (function
+ * object) scope. Count remaining ancestors in order
+ * to realloc spvec.
+ */
+ JSScopeProperty *tmp = sprop;
+ do {
+ if (SCOPE_GET_PROPERTY(scope, tmp->id))
+ i++;
+ } while ((tmp = tmp->parent) != NULL);
+ spp2 = (JSScopeProperty **)
+ JS_realloc(cx, spvec, SCOPE_TABLE_NBYTES(splen+i));
+ if (!spp2) {
+ JS_free(cx, spvec);
+ goto fail_overwrite;
+ }
+
+ spvec = spp2;
+ memmove(spvec + i, spvec, SCOPE_TABLE_NBYTES(splen));
+ splen += i;
+ }
+
+ spvec[--i] = sprop;
+ } while ((sprop = sprop->parent) != NULL);
+ JS_ASSERT(i == 0);
+
+ /*
+ * Now loop forward through spvec, forking the property tree
+ * whenever we see a "parent gap" due to deletions from scope.
+ * NB: sprop is null on first entry to the loop body.
+ */
+ do {
+ if (spvec[i]->parent == sprop) {
+ sprop = spvec[i];
+ } else {
+ sprop = GetPropertyTreeChild(cx, sprop, spvec[i]);
+ if (!sprop) {
+ JS_free(cx, spvec);
+ goto fail_overwrite;
+ }
+
+ spp2 = js_SearchScope(scope, sprop->id, JS_FALSE);
+ JS_ASSERT(SPROP_FETCH(spp2) == spvec[i]);
+ SPROP_STORE_PRESERVING_COLLISION(spp2, sprop);
+ }
+ } while (++i < splen);
+ JS_free(cx, spvec);
+
+ /*
+ * Now sprop points to the last property in scope, where the
+ * ancestor line from sprop to the root is dense w.r.t. scope:
+ * it contains no nodes not mapped by scope->table, apart from
+ * any stinking ECMA-mandated duplicate formal parameters.
+ */
+ scope->lastProp = sprop;
+ CHECK_ANCESTOR_LINE(scope, JS_FALSE);
+ JS_RUNTIME_METER(cx->runtime, middleDeleteFixups);
+ }
+
+ SCOPE_CLR_MIDDLE_DELETE(scope);
+ }
+
+ /*
+ * Aliases share another property's slot, passed in the |slot| param.
+ * Shared properties have no slot. Unshared properties that do not
+ * alias another property's slot get one here, but may lose it due to
+ * a JS_ClearScope call.
+ */
+ if (!(flags & SPROP_IS_ALIAS)) {
+ if (attrs & JSPROP_SHARED) {
+ slot = SPROP_INVALID_SLOT;
+ } else {
+ /*
+ * We may have set slot from a nearly-matching sprop, above.
+ * If so, we're overwriting that nearly-matching sprop, so we
+ * can reuse its slot -- we don't need to allocate a new one.
+ * Callers should therefore pass SPROP_INVALID_SLOT for all
+ * non-alias, unshared property adds.
+ */
+ if (slot != SPROP_INVALID_SLOT)
+ JS_ASSERT(overwriting);
+ else if (!js_AllocSlot(cx, scope->object, &slot))
+ goto fail_overwrite;
+ }
+ }
+
+ /*
+ * Check for a watchpoint on a deleted property; if one exists, change
+ * setter to js_watch_set.
+ * XXXbe this could get expensive with lots of watchpoints...
+ */
+ if (!JS_CLIST_IS_EMPTY(&cx->runtime->watchPointList) &&
+ js_FindWatchPoint(cx->runtime, scope, id)) {
+ JS_PUSH_TEMP_ROOT_SPROP(cx, overwriting, &tvr);
+ setter = js_WrapWatchedSetter(cx, id, attrs, setter);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ if (!setter)
+ goto fail_overwrite;
+ }
+
+ /* Find or create a property tree node labeled by our arguments. */
+ child.id = id;
+ child.getter = getter;
+ child.setter = setter;
+ child.slot = slot;
+ child.attrs = attrs;
+ child.flags = flags;
+ child.shortid = shortid;
+ sprop = GetPropertyTreeChild(cx, scope->lastProp, &child);
+ if (!sprop)
+ goto fail_overwrite;
+
+ /* Store the tree node pointer in the table entry for id. */
+ if (scope->table)
+ SPROP_STORE_PRESERVING_COLLISION(spp, sprop);
+ scope->entryCount++;
+ scope->lastProp = sprop;
+ CHECK_ANCESTOR_LINE(scope, JS_FALSE);
+ if (!overwriting) {
+ JS_RUNTIME_METER(cx->runtime, liveScopeProps);
+ JS_RUNTIME_METER(cx->runtime, totalScopeProps);
+ }
+
+ /*
+ * If we reach the hashing threshold, try to allocate scope->table.
+ * If we can't (a rare event, preceded by swapping to death on most
+ * modern OSes), stick with linear search rather than whining about
+ * this little set-back. Therefore we must test !scope->table and
+ * scope->entryCount >= SCOPE_HASH_THRESHOLD, not merely whether the
+ * entry count just reached the threshold.
+ */
+ if (!scope->table && scope->entryCount >= SCOPE_HASH_THRESHOLD)
+ (void) CreateScopeTable(cx, scope, JS_FALSE);
+ }
+
+ METER(adds);
+ return sprop;
+
+fail_overwrite:
+ if (overwriting) {
+ /*
+ * We may or may not have forked overwriting out of scope's ancestor
+ * line, so we must check (the alternative is to set a flag above, but
+ * that hurts the common, non-error case). If we did fork overwriting
+ * out, we'll add it back at scope->lastProp. This means enumeration
+ * order can change due to a failure to overwrite an id.
+ * XXXbe very minor incompatibility
+ */
+ for (sprop = SCOPE_LAST_PROP(scope); ; sprop = sprop->parent) {
+ if (!sprop) {
+ sprop = SCOPE_LAST_PROP(scope);
+ if (overwriting->parent == sprop) {
+ scope->lastProp = overwriting;
+ } else {
+ sprop = GetPropertyTreeChild(cx, sprop, overwriting);
+ if (sprop) {
+ JS_ASSERT(sprop != overwriting);
+ scope->lastProp = sprop;
+ }
+ overwriting = sprop;
+ }
+ break;
+ }
+ if (sprop == overwriting)
+ break;
+ }
+ if (overwriting) {
+ if (scope->table)
+ SPROP_STORE_PRESERVING_COLLISION(spp, overwriting);
+ scope->entryCount++;
+ }
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+ }
+ METER(addFailures);
+ return NULL;
+}
+
+JSScopeProperty *
+js_ChangeScopePropertyAttrs(JSContext *cx, JSScope *scope,
+ JSScopeProperty *sprop, uintN attrs, uintN mask,
+ JSPropertyOp getter, JSPropertyOp setter)
+{
+ JSScopeProperty child, *newsprop, **spp;
+
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+
+ /* Allow only shared (slot-less) => unshared (slot-full) transition. */
+ attrs |= sprop->attrs & mask;
+ JS_ASSERT(!((attrs ^ sprop->attrs) & JSPROP_SHARED) ||
+ !(attrs & JSPROP_SHARED));
+ if (getter == JS_PropertyStub)
+ getter = NULL;
+ if (setter == JS_PropertyStub)
+ setter = NULL;
+ if (sprop->attrs == attrs &&
+ sprop->getter == getter &&
+ sprop->setter == setter) {
+ return sprop;
+ }
+
+ child.id = sprop->id;
+ child.getter = getter;
+ child.setter = setter;
+ child.slot = sprop->slot;
+ child.attrs = attrs;
+ child.flags = sprop->flags;
+ child.shortid = sprop->shortid;
+
+ if (SCOPE_LAST_PROP(scope) == sprop) {
+ /*
+ * Optimize the case where the last property added to scope is changed
+ * to have a different attrs, getter, or setter. In the last property
+ * case, we need not fork the property tree. But since we do not call
+ * js_AddScopeProperty, we may need to allocate a new slot directly.
+ */
+ if ((sprop->attrs & JSPROP_SHARED) && !(attrs & JSPROP_SHARED)) {
+ JS_ASSERT(child.slot == SPROP_INVALID_SLOT);
+ if (!js_AllocSlot(cx, scope->object, &child.slot))
+ return NULL;
+ }
+
+ newsprop = GetPropertyTreeChild(cx, sprop->parent, &child);
+ if (newsprop) {
+ spp = js_SearchScope(scope, sprop->id, JS_FALSE);
+ JS_ASSERT(SPROP_FETCH(spp) == sprop);
+
+ if (scope->table)
+ SPROP_STORE_PRESERVING_COLLISION(spp, newsprop);
+ scope->lastProp = newsprop;
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+ }
+ } else {
+ /*
+ * Let js_AddScopeProperty handle this |overwriting| case, including
+ * the conservation of sprop->slot (if it's valid). We must not call
+ * js_RemoveScopeProperty here, it will free a valid sprop->slot and
+ * js_AddScopeProperty won't re-allocate it.
+ */
+ newsprop = js_AddScopeProperty(cx, scope, child.id,
+ child.getter, child.setter, child.slot,
+ child.attrs, child.flags, child.shortid);
+ }
+
+#ifdef DUMP_SCOPE_STATS
+ if (!newsprop)
+ METER(changeFailures);
+#endif
+ return newsprop;
+}
+
+JSBool
+js_RemoveScopeProperty(JSContext *cx, JSScope *scope, jsid id)
+{
+ JSScopeProperty **spp, *stored, *sprop;
+ uint32 size;
+
+ JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, scope));
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+ if (SCOPE_IS_SEALED(scope)) {
+ ReportReadOnlyScope(cx, scope);
+ return JS_FALSE;
+ }
+ METER(removes);
+
+ spp = js_SearchScope(scope, id, JS_FALSE);
+ stored = *spp;
+ sprop = SPROP_CLEAR_COLLISION(stored);
+ if (!sprop) {
+ METER(uselessRemoves);
+ return JS_TRUE;
+ }
+
+ /* Convert from a list to a hash so we can handle "middle deletes". */
+ if (!scope->table && sprop != scope->lastProp) {
+ if (!CreateScopeTable(cx, scope, JS_TRUE))
+ return JS_FALSE;
+ spp = js_SearchScope(scope, id, JS_FALSE);
+ stored = *spp;
+ sprop = SPROP_CLEAR_COLLISION(stored);
+ }
+
+ /* First, if sprop is unshared and not cleared, free its slot number. */
+ if (SPROP_HAS_VALID_SLOT(sprop, scope)) {
+ js_FreeSlot(cx, scope->object, sprop->slot);
+ JS_ATOMIC_INCREMENT(&cx->runtime->propertyRemovals);
+ }
+
+ /* Next, remove id by setting its entry to a removed or free sentinel. */
+ if (SPROP_HAD_COLLISION(stored)) {
+ JS_ASSERT(scope->table);
+ *spp = SPROP_REMOVED;
+ scope->removedCount++;
+ } else {
+ METER(removeFrees);
+ if (scope->table)
+ *spp = NULL;
+ }
+ scope->entryCount--;
+ JS_RUNTIME_UNMETER(cx->runtime, liveScopeProps);
+
+ /* Update scope->lastProp directly, or set its deferred update flag. */
+ if (sprop == SCOPE_LAST_PROP(scope)) {
+ do {
+ SCOPE_REMOVE_LAST_PROP(scope);
+ if (!SCOPE_HAD_MIDDLE_DELETE(scope))
+ break;
+ sprop = SCOPE_LAST_PROP(scope);
+ } while (sprop && !SCOPE_HAS_PROPERTY(scope, sprop));
+ } else if (!SCOPE_HAD_MIDDLE_DELETE(scope)) {
+ SCOPE_SET_MIDDLE_DELETE(scope);
+ }
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+
+ /* Last, consider shrinking scope's table if its load factor is <= .25. */
+ size = SCOPE_CAPACITY(scope);
+ if (size > MIN_SCOPE_SIZE && scope->entryCount <= size >> 2) {
+ METER(shrinks);
+ (void) ChangeScope(cx, scope, -1);
+ }
+
+ return JS_TRUE;
+}
+
+void
+js_ClearScope(JSContext *cx, JSScope *scope)
+{
+ CHECK_ANCESTOR_LINE(scope, JS_TRUE);
+#ifdef DEBUG
+ JS_LOCK_RUNTIME_VOID(cx->runtime,
+ cx->runtime->liveScopeProps -= scope->entryCount);
+#endif
+
+ if (scope->table)
+ free(scope->table);
+ SCOPE_CLR_MIDDLE_DELETE(scope);
+ InitMinimalScope(scope);
+ JS_ATOMIC_INCREMENT(&cx->runtime->propertyRemovals);
+}
+
+void
+js_MarkId(JSContext *cx, jsid id)
+{
+ if (JSID_IS_ATOM(id))
+ GC_MARK_ATOM(cx, JSID_TO_ATOM(id));
+ else if (JSID_IS_OBJECT(id))
+ GC_MARK(cx, JSID_TO_OBJECT(id), "id");
+ else
+ JS_ASSERT(JSID_IS_INT(id));
+}
+
+#if defined GC_MARK_DEBUG || defined DUMP_SCOPE_STATS
+# include "jsprf.h"
+#endif
+
+void
+js_MarkScopeProperty(JSContext *cx, JSScopeProperty *sprop)
+{
+ sprop->flags |= SPROP_MARK;
+ MARK_ID(cx, sprop->id);
+
+#if JS_HAS_GETTER_SETTER
+ if (sprop->attrs & (JSPROP_GETTER | JSPROP_SETTER)) {
+#ifdef GC_MARK_DEBUG
+ char buf[64];
+ char buf2[11];
+ const char *id;
+
+ if (JSID_IS_ATOM(sprop->id)) {
+ JSAtom *atom = JSID_TO_ATOM(sprop->id);
+
+ id = (atom && ATOM_IS_STRING(atom))
+ ? JS_GetStringBytes(ATOM_TO_STRING(atom))
+ : "unknown";
+ } else if (JSID_IS_INT(sprop->id)) {
+ JS_snprintf(buf2, sizeof buf2, "%d", JSID_TO_INT(sprop->id));
+ id = buf2;
+ } else {
+ id = "<object>";
+ }
+#endif
+
+ if (sprop->attrs & JSPROP_GETTER) {
+#ifdef GC_MARK_DEBUG
+ JS_snprintf(buf, sizeof buf, "%s %s",
+ id, js_getter_str);
+#endif
+ GC_MARK(cx, JSVAL_TO_GCTHING((jsval) sprop->getter), buf);
+ }
+ if (sprop->attrs & JSPROP_SETTER) {
+#ifdef GC_MARK_DEBUG
+ JS_snprintf(buf, sizeof buf, "%s %s",
+ id, js_setter_str);
+#endif
+ GC_MARK(cx, JSVAL_TO_GCTHING((jsval) sprop->setter), buf);
+ }
+ }
+#endif /* JS_HAS_GETTER_SETTER */
+}
+
+#ifdef DUMP_SCOPE_STATS
+
+#include <stdio.h>
+#include <math.h>
+
+uint32 js_nkids_max;
+uint32 js_nkids_sum;
+double js_nkids_sqsum;
+uint32 js_nkids_hist[11];
+
+static void
+MeterKidCount(uintN nkids)
+{
+ if (nkids) {
+ js_nkids_sum += nkids;
+ js_nkids_sqsum += (double)nkids * nkids;
+ if (nkids > js_nkids_max)
+ js_nkids_max = nkids;
+ }
+ js_nkids_hist[JS_MIN(nkids, 10)]++;
+}
+
+static void
+MeterPropertyTree(JSScopeProperty *node)
+{
+ uintN i, nkids;
+ JSScopeProperty *kids, *kid;
+ PropTreeKidsChunk *chunk;
+
+ nkids = 0;
+ kids = node->kids;
+ if (kids) {
+ if (KIDS_IS_CHUNKY(kids)) {
+ for (chunk = KIDS_TO_CHUNK(kids); chunk; chunk = chunk->next) {
+ for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) {
+ kid = chunk->kids[i];
+ if (!kid)
+ break;
+ MeterPropertyTree(kid);
+ nkids++;
+ }
+ }
+ } else {
+ MeterPropertyTree(kids);
+ nkids = 1;
+ }
+ }
+
+ MeterKidCount(nkids);
+}
+
+JS_STATIC_DLL_CALLBACK(JSDHashOperator)
+js_MeterPropertyTree(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number,
+ void *arg)
+{
+ JSPropertyTreeEntry *entry = (JSPropertyTreeEntry *)hdr;
+
+ MeterPropertyTree(entry->child);
+ return JS_DHASH_NEXT;
+}
+
+static void
+DumpSubtree(JSScopeProperty *sprop, int level, FILE *fp)
+{
+ char buf[10];
+ JSScopeProperty *kids, *kid;
+ PropTreeKidsChunk *chunk;
+ uintN i;
+
+ fprintf(fp, "%*sid %s g/s %p/%p slot %lu attrs %x flags %x shortid %d\n",
+ level, "",
+ JSID_IS_ATOM(sprop->id)
+ ? JS_GetStringBytes(ATOM_TO_STRING(JSID_TO_ATOM(sprop->id)))
+ : JSID_IS_OBJECT(sprop->id)
+ ? js_ValueToPrintableString(cx, OBJECT_JSID_TO_JSVAL(sprop->id))
+ : (JS_snprintf(buf, sizeof buf, "%ld", JSVAL_TO_INT(sprop->id)),
+ buf)
+ (void *) sprop->getter, (void *) sprop->setter,
+ (unsigned long) sprop->slot, sprop->attrs, sprop->flags,
+ sprop->shortid);
+ kids = sprop->kids;
+ if (kids) {
+ ++level;
+ if (KIDS_IS_CHUNKY(kids)) {
+ chunk = KIDS_TO_CHUNK(kids);
+ do {
+ for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) {
+ kid = chunk->kids[i];
+ if (!kid)
+ break;
+ JS_ASSERT(kid->parent == sprop);
+ DumpSubtree(kid, level, fp);
+ }
+ } while ((chunk = chunk->next) != NULL);
+ } else {
+ kid = kids;
+ DumpSubtree(kid, level, fp);
+ }
+ }
+}
+
+#endif /* DUMP_SCOPE_STATS */
+
+void
+js_SweepScopeProperties(JSRuntime *rt)
+{
+ JSArena **ap, *a;
+ JSScopeProperty *limit, *sprop, *parent, *kids, *kid;
+ uintN liveCount;
+ PropTreeKidsChunk *chunk, *nextChunk, *freeChunk;
+ uintN i;
+
+#ifdef DUMP_SCOPE_STATS
+ uint32 livePropCapacity = 0, totalLiveCount = 0;
+ static FILE *logfp;
+ if (!logfp)
+ logfp = fopen("/tmp/proptree.stats", "a");
+
+ MeterKidCount(rt->propertyTreeHash.entryCount);
+ JS_DHashTableEnumerate(&rt->propertyTreeHash, js_MeterPropertyTree, NULL);
+
+ {
+ double mean = 0.0, var = 0.0, sigma = 0.0;
+ double nodesum = rt->livePropTreeNodes;
+ double kidsum = js_nkids_sum;
+ if (nodesum > 0 && kidsum >= 0) {
+ mean = kidsum / nodesum;
+ var = nodesum * js_nkids_sqsum - kidsum * kidsum;
+ if (var < 0.0 || nodesum <= 1)
+ var = 0.0;
+ else
+ var /= nodesum * (nodesum - 1);
+
+ /* Windows says sqrt(0.0) is "-1.#J" (?!) so we must test. */
+ sigma = (var != 0.0) ? sqrt(var) : 0.0;
+ }
+
+ fprintf(logfp,
+ "props %u nodes %g beta %g meankids %g sigma %g max %u",
+ rt->liveScopeProps, nodesum, nodesum / rt->liveScopeProps,
+ mean, sigma, js_nkids_max);
+ }
+
+ fprintf(logfp, " histogram %u %u %u %u %u %u %u %u %u %u %u",
+ js_nkids_hist[0], js_nkids_hist[1],
+ js_nkids_hist[2], js_nkids_hist[3],
+ js_nkids_hist[4], js_nkids_hist[5],
+ js_nkids_hist[6], js_nkids_hist[7],
+ js_nkids_hist[8], js_nkids_hist[9],
+ js_nkids_hist[10]);
+ js_nkids_sum = js_nkids_max = 0;
+ js_nkids_sqsum = 0;
+ memset(js_nkids_hist, 0, sizeof js_nkids_hist);
+#endif
+
+ ap = &rt->propertyArenaPool.first.next;
+ while ((a = *ap) != NULL) {
+ limit = (JSScopeProperty *) a->avail;
+ liveCount = 0;
+ for (sprop = (JSScopeProperty *) a->base; sprop < limit; sprop++) {
+ /* If the id is null, sprop is already on the freelist. */
+ if (sprop->id == JSVAL_NULL)
+ continue;
+
+ /* If the mark bit is set, sprop is alive, so we skip it. */
+ if (sprop->flags & SPROP_MARK) {
+ sprop->flags &= ~SPROP_MARK;
+ liveCount++;
+ continue;
+ }
+
+ /* Ok, sprop is garbage to collect: unlink it from its parent. */
+ freeChunk = RemovePropertyTreeChild(rt, sprop);
+
+ /*
+ * Take care to reparent all sprop's kids to their grandparent.
+ * InsertPropertyTreeChild can potentially fail for two reasons:
+ *
+ * 1. If parent is null, insertion into the root property hash
+ * table may fail. We are forced to leave the kid out of the
+ * table (as can already happen with duplicates) but ensure
+ * that the kid's parent pointer is set to null.
+ *
+ * 2. If parent is non-null, allocation of a new KidsChunk can
+ * fail. To prevent this from happening, we allow sprops's own
+ * chunks to be reused by the grandparent, which removes the
+ * need for InsertPropertyTreeChild to malloc a new KidsChunk.
+ *
+ * If sprop does not have chunky kids, then we rely on the
+ * RemovePropertyTreeChild call above (which removed sprop from
+ * its parent) either leaving one free entry, or else returning
+ * the now-unused chunk to us so we can reuse it.
+ *
+ * We also require the grandparent to have either no kids or else
+ * chunky kids. A single non-chunky kid would force a new chunk to
+ * be malloced in some cases (if sprop had a single non-chunky
+ * kid, or a multiple of MAX_KIDS_PER_CHUNK kids). Note that
+ * RemovePropertyTreeChild never converts a single-entry chunky
+ * kid back to a non-chunky kid, so we are assured of correct
+ * behaviour.
+ */
+ kids = sprop->kids;
+ if (kids) {
+ sprop->kids = NULL;
+ parent = sprop->parent;
+ /* Validate that grandparent has no kids or chunky kids. */
+ JS_ASSERT(!parent || !parent->kids ||
+ KIDS_IS_CHUNKY(parent->kids));
+ if (KIDS_IS_CHUNKY(kids)) {
+ chunk = KIDS_TO_CHUNK(kids);
+ do {
+ nextChunk = chunk->next;
+ chunk->next = NULL;
+ for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) {
+ kid = chunk->kids[i];
+ if (!kid)
+ break;
+ JS_ASSERT(kid->parent == sprop);
+
+ /*
+ * Clear a space in the kids array for possible
+ * re-use by InsertPropertyTreeChild.
+ */
+ chunk->kids[i] = NULL;
+ if (!InsertPropertyTreeChild(rt, parent, kid,
+ chunk)) {
+ /*
+ * This can happen only if we failed to add an
+ * entry to the root property hash table.
+ */
+ JS_ASSERT(!parent);
+ kid->parent = NULL;
+ }
+ }
+ if (!chunk->kids[0]) {
+ /* The chunk wasn't reused, so we must free it. */
+ DestroyPropTreeKidsChunk(rt, chunk);
+ }
+ } while ((chunk = nextChunk) != NULL);
+ } else {
+ kid = kids;
+ if (!InsertPropertyTreeChild(rt, parent, kid, freeChunk)) {
+ /*
+ * This can happen only if we failed to add an entry
+ * to the root property hash table.
+ */
+ JS_ASSERT(!parent);
+ kid->parent = NULL;
+ }
+ }
+ }
+
+ if (freeChunk && !freeChunk->kids[0]) {
+ /* The chunk wasn't reused, so we must free it. */
+ DestroyPropTreeKidsChunk(rt, freeChunk);
+ }
+
+ /* Clear id so we know (above) that sprop is on the freelist. */
+ sprop->id = JSVAL_NULL;
+ FREENODE_INSERT(rt->propertyFreeList, sprop);
+ JS_RUNTIME_UNMETER(rt, livePropTreeNodes);
+ }
+
+ /* If a contains no live properties, return it to the malloc heap. */
+ if (liveCount == 0) {
+ for (sprop = (JSScopeProperty *) a->base; sprop < limit; sprop++)
+ FREENODE_REMOVE(sprop);
+ JS_ARENA_DESTROY(&rt->propertyArenaPool, a, ap);
+ } else {
+#ifdef DUMP_SCOPE_STATS
+ livePropCapacity += limit - (JSScopeProperty *) a->base;
+ totalLiveCount += liveCount;
+#endif
+ ap = &a->next;
+ }
+ }
+
+#ifdef DUMP_SCOPE_STATS
+ fprintf(logfp, " arenautil %g%%\n",
+ (totalLiveCount * 100.0) / livePropCapacity);
+ fflush(logfp);
+#endif
+
+#ifdef DUMP_PROPERTY_TREE
+ {
+ FILE *dumpfp = fopen("/tmp/proptree.dump", "w");
+ if (dumpfp) {
+ JSPropertyTreeEntry *pte, *end;
+
+ pte = (JSPropertyTreeEntry *) rt->propertyTreeHash.entryStore;
+ end = pte + JS_DHASH_TABLE_SIZE(&rt->propertyTreeHash);
+ while (pte < end) {
+ if (pte->child)
+ DumpSubtree(pte->child, 0, dumpfp);
+ pte++;
+ }
+ fclose(dumpfp);
+ }
+ }
+#endif
+}
+
+JSBool
+js_InitPropertyTree(JSRuntime *rt)
+{
+ if (!JS_DHashTableInit(&rt->propertyTreeHash, &PropertyTreeHashOps, NULL,
+ sizeof(JSPropertyTreeEntry), JS_DHASH_MIN_SIZE)) {
+ rt->propertyTreeHash.ops = NULL;
+ return JS_FALSE;
+ }
+ JS_InitArenaPool(&rt->propertyArenaPool, "properties",
+ 256 * sizeof(JSScopeProperty), sizeof(void *));
+ return JS_TRUE;
+}
+
+void
+js_FinishPropertyTree(JSRuntime *rt)
+{
+ if (rt->propertyTreeHash.ops) {
+ JS_DHashTableFinish(&rt->propertyTreeHash);
+ rt->propertyTreeHash.ops = NULL;
+ }
+ JS_FinishArenaPool(&rt->propertyArenaPool);
+}
diff --git a/src/third_party/js-1.7/jsscope.h b/src/third_party/js-1.7/jsscope.h
new file mode 100644
index 00000000000..0565d4d8115
--- /dev/null
+++ b/src/third_party/js-1.7/jsscope.h
@@ -0,0 +1,407 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsscope_h___
+#define jsscope_h___
+/*
+ * JS symbol tables.
+ */
+#include "jstypes.h"
+#include "jsobj.h"
+#include "jsprvtd.h"
+#include "jspubtd.h"
+
+#ifdef JS_THREADSAFE
+# include "jslock.h"
+#endif
+
+/*
+ * Given P independent, non-unique properties each of size S words mapped by
+ * all scopes in a runtime, construct a property tree of N nodes each of size
+ * S+L words (L for tree linkage). A nominal L value is 2 for leftmost-child
+ * and right-sibling links. We hope that the N < P by enough that the space
+ * overhead of L, and the overhead of scope entries pointing at property tree
+ * nodes, is worth it.
+ *
+ * The tree construction goes as follows. If any empty scope in the runtime
+ * has a property X added to it, find or create a node under the tree root
+ * labeled X, and set scope->lastProp to point at that node. If any non-empty
+ * scope whose most recently added property is labeled Y has another property
+ * labeled Z added, find or create a node for Z under the node that was added
+ * for Y, and set scope->lastProp to point at that node.
+ *
+ * A property is labeled by its members' values: id, getter, setter, slot,
+ * attributes, tiny or short id, and a field telling for..in order. Note that
+ * labels are not unique in the tree, but they are unique among a node's kids
+ * (barring rare and benign multi-threaded race condition outcomes, see below)
+ * and along any ancestor line from the tree root to a given leaf node (except
+ * for the hard case of duplicate formal parameters to a function).
+ *
+ * Thus the root of the tree represents all empty scopes, and the first ply
+ * of the tree represents all scopes containing one property, etc. Each node
+ * in the tree can stand for any number of scopes having the same ordered set
+ * of properties, where that node was the last added to the scope. (We need
+ * not store the root of the tree as a node, and do not -- all we need are
+ * links to its kids.)
+ *
+ * Sidebar on for..in loop order: ECMA requires no particular order, but this
+ * implementation has promised and delivered property definition order, and
+ * compatibility is king. We could use an order number per property, which
+ * would require a sort in js_Enumerate, and an entry order generation number
+ * per scope. An order number beats a list, which should be doubly-linked for
+ * O(1) delete. An even better scheme is to use a parent link in the property
+ * tree, so that the ancestor line can be iterated from scope->lastProp when
+ * filling in a JSIdArray from back to front. This parent link also helps the
+ * GC to sweep properties iteratively.
+ *
+ * What if a property Y is deleted from a scope? If Y is the last property in
+ * the scope, we simply adjust the scope's lastProp member after we remove the
+ * scope's hash-table entry pointing at that property node. The parent link
+ * mentioned in the for..in sidebar above makes this adjustment O(1). But if
+ * Y comes between X and Z in the scope, then we might have to "fork" the tree
+ * at X, leaving X->Y->Z in case other scopes have those properties added in
+ * that order; and to finish the fork, we'd add a node labeled Z with the path
+ * X->Z, if it doesn't exist. This could lead to lots of extra nodes, and to
+ * O(n^2) growth when deleting lots of properties.
+ *
+ * Rather, for O(1) growth all around, we should share the path X->Y->Z among
+ * scopes having those three properties added in that order, and among scopes
+ * having only X->Z where Y was deleted. All such scopes have a lastProp that
+ * points to the Z child of Y. But a scope in which Y was deleted does not
+ * have a table entry for Y, and when iterating that scope by traversing the
+ * ancestor line from Z, we will have to test for a table entry for each node,
+ * skipping nodes that lack entries.
+ *
+ * What if we add Y again? X->Y->Z->Y is wrong and we'll enumerate Y twice.
+ * Therefore we must fork in such a case, if not earlier. Because delete is
+ * "bursty", we should not fork eagerly. Delaying a fork till we are at risk
+ * of adding Y after it was deleted already requires a flag in the JSScope, to
+ * wit, SCOPE_MIDDLE_DELETE.
+ *
+ * What about thread safety? If the property tree operations done by requests
+ * are find-node and insert-node, then the only hazard is duplicate insertion.
+ * This is harmless except for minor bloat. When all requests have ended or
+ * been suspended, the GC is free to sweep the tree after marking all nodes
+ * reachable from scopes, performing remove-node operations as needed. Note
+ * also that the stable storage of the property nodes during active requests
+ * permits the property cache (see jsinterp.h) to dereference JSScopeProperty
+ * weak references safely.
+ *
+ * Is the property tree worth it compared to property storage in each table's
+ * entries? To decide, we must find the relation <> between the words used
+ * with a property tree and the words required without a tree.
+ *
+ * Model all scopes as one super-scope of capacity T entries (T a power of 2).
+ * Let alpha be the load factor of this double hash-table. With the property
+ * tree, each entry in the table is a word-sized pointer to a node that can be
+ * shared by many scopes. But all such pointers are overhead compared to the
+ * situation without the property tree, where the table stores property nodes
+ * directly, as entries each of size S words. With the property tree, we need
+ * L=2 extra words per node for siblings and kids pointers. Without the tree,
+ * (1-alpha)*S*T words are wasted on free or removed sentinel-entries required
+ * by double hashing.
+ *
+ * Therefore,
+ *
+ * (property tree) <> (no property tree)
+ * N*(S+L) + T <> S*T
+ * N*(S+L) + T <> P*S + (1-alpha)*S*T
+ * N*(S+L) + alpha*T + (1-alpha)*T <> P*S + (1-alpha)*S*T
+ *
+ * Note that P is alpha*T by definition, so
+ *
+ * N*(S+L) + P + (1-alpha)*T <> P*S + (1-alpha)*S*T
+ * N*(S+L) <> P*S - P + (1-alpha)*S*T - (1-alpha)*T
+ * N*(S+L) <> (P + (1-alpha)*T) * (S-1)
+ * N*(S+L) <> (P + (1-alpha)*P/alpha) * (S-1)
+ * N*(S+L) <> P * (1/alpha) * (S-1)
+ *
+ * Let N = P*beta for a compression ratio beta, beta <= 1:
+ *
+ * P*beta*(S+L) <> P * (1/alpha) * (S-1)
+ * beta*(S+L) <> (S-1)/alpha
+ * beta <> (S-1)/((S+L)*alpha)
+ *
+ * For S = 6 (32-bit architectures) and L = 2, the property tree wins iff
+ *
+ * beta < 5/(8*alpha)
+ *
+ * We ensure that alpha <= .75, so the property tree wins if beta < .83_. An
+ * average beta from recent Mozilla browser startups was around .6.
+ *
+ * Can we reduce L? Observe that the property tree degenerates into a list of
+ * lists if at most one property Y follows X in all scopes. In or near such a
+ * case, we waste a word on the right-sibling link outside of the root ply of
+ * the tree. Note also that the root ply tends to be large, so O(n^2) growth
+ * searching it is likely, indicating the need for hashing (but with increased
+ * thread safety costs).
+ *
+ * If only K out of N nodes in the property tree have more than one child, we
+ * could eliminate the sibling link and overlay a children list or hash-table
+ * pointer on the leftmost-child link (which would then be either null or an
+ * only-child link; the overlay could be tagged in the low bit of the pointer,
+ * or flagged elsewhere in the property tree node, although such a flag must
+ * not be considered when comparing node labels during tree search).
+ *
+ * For such a system, L = 1 + (K * averageChildrenTableSize) / N instead of 2.
+ * If K << N, L approaches 1 and the property tree wins if beta < .95.
+ *
+ * We observe that fan-out below the root ply of the property tree appears to
+ * have extremely low degree (see the MeterPropertyTree code that histograms
+ * child-counts in jsscope.c), so instead of a hash-table we use a linked list
+ * of child node pointer arrays ("kid chunks"). The details are isolated in
+ * jsscope.c; others must treat JSScopeProperty.kids as opaque. We leave it
+ * strongly typed for debug-ability of the common (null or one-kid) cases.
+ *
+ * One final twist (can you stand it?): the mean number of entries per scope
+ * in Mozilla is < 5, with a large standard deviation (~8). Instead of always
+ * allocating scope->table, we leave it null while initializing all the other
+ * scope members as if it were non-null and minimal-length. Until a property
+ * is added that crosses the threshold of 6 or more entries for hashing, or
+ * until a "middle delete" occurs, we use linear search from scope->lastProp
+ * to find a given id, and save on the space overhead of a hash table.
+ */
+
+struct JSScope {
+ JSObjectMap map; /* base class state */
+ JSObject *object; /* object that owns this scope */
+ uint8 flags; /* flags, see below */
+ int8 hashShift; /* multiplicative hash shift */
+ uint16 spare; /* reserved */
+ uint32 entryCount; /* number of entries in table */
+ uint32 removedCount; /* removed entry sentinels in table */
+ JSScopeProperty **table; /* table of ptrs to shared tree nodes */
+ JSScopeProperty *lastProp; /* pointer to last property added */
+#ifdef JS_THREADSAFE
+ JSContext *ownercx; /* creating context, NULL if shared */
+ JSThinLock lock; /* binary semaphore protecting scope */
+ union { /* union lockful and lock-free state: */
+ jsrefcount count; /* lock entry count for reentrancy */
+ JSScope *link; /* next link in rt->scopeSharingTodo */
+ } u;
+#ifdef DEBUG
+ const char *file[4]; /* file where lock was (re-)taken */
+ unsigned int line[4]; /* line where lock was (re-)taken */
+#endif
+#endif
+};
+
+#define OBJ_SCOPE(obj) ((JSScope *)(obj)->map)
+
+/* By definition, hashShift = JS_DHASH_BITS - log2(capacity). */
+#define SCOPE_CAPACITY(scope) JS_BIT(JS_DHASH_BITS-(scope)->hashShift)
+
+/* Scope flags and some macros to hide them from other files than jsscope.c. */
+#define SCOPE_MIDDLE_DELETE 0x0001
+#define SCOPE_SEALED 0x0002
+
+#define SCOPE_HAD_MIDDLE_DELETE(scope) ((scope)->flags & SCOPE_MIDDLE_DELETE)
+#define SCOPE_SET_MIDDLE_DELETE(scope) ((scope)->flags |= SCOPE_MIDDLE_DELETE)
+#define SCOPE_CLR_MIDDLE_DELETE(scope) ((scope)->flags &= ~SCOPE_MIDDLE_DELETE)
+
+#define SCOPE_IS_SEALED(scope) ((scope)->flags & SCOPE_SEALED)
+#define SCOPE_SET_SEALED(scope) ((scope)->flags |= SCOPE_SEALED)
+#if 0
+/*
+ * Don't define this, it can't be done safely because JS_LOCK_OBJ will avoid
+ * taking the lock if the object owns its scope and the scope is sealed.
+ */
+#define SCOPE_CLR_SEALED(scope) ((scope)->flags &= ~SCOPE_SEALED)
+#endif
+
+/*
+ * A little information hiding for scope->lastProp, in case it ever becomes
+ * a tagged pointer again.
+ */
+#define SCOPE_LAST_PROP(scope) ((scope)->lastProp)
+#define SCOPE_REMOVE_LAST_PROP(scope) ((scope)->lastProp = \
+ (scope)->lastProp->parent)
+
+struct JSScopeProperty {
+ jsid id; /* int-tagged jsval/untagged JSAtom* */
+ JSPropertyOp getter; /* getter and setter hooks or objects */
+ JSPropertyOp setter;
+ uint32 slot; /* index in obj->slots vector */
+ uint8 attrs; /* attributes, see jsapi.h JSPROP_* */
+ uint8 flags; /* flags, see below for defines */
+ int16 shortid; /* tinyid, or local arg/var index */
+ JSScopeProperty *parent; /* parent node, reverse for..in order */
+ JSScopeProperty *kids; /* null, single child, or a tagged ptr
+ to many-kids data structure */
+};
+
+/* JSScopeProperty pointer tag bit indicating a collision. */
+#define SPROP_COLLISION ((jsuword)1)
+#define SPROP_REMOVED ((JSScopeProperty *) SPROP_COLLISION)
+
+/* Macros to get and set sprop pointer values and collision flags. */
+#define SPROP_IS_FREE(sprop) ((sprop) == NULL)
+#define SPROP_IS_REMOVED(sprop) ((sprop) == SPROP_REMOVED)
+#define SPROP_IS_LIVE(sprop) ((sprop) > SPROP_REMOVED)
+#define SPROP_FLAG_COLLISION(spp,sprop) (*(spp) = (JSScopeProperty *) \
+ ((jsuword)(sprop) | SPROP_COLLISION))
+#define SPROP_HAD_COLLISION(sprop) ((jsuword)(sprop) & SPROP_COLLISION)
+#define SPROP_FETCH(spp) SPROP_CLEAR_COLLISION(*(spp))
+
+#define SPROP_CLEAR_COLLISION(sprop) \
+ ((JSScopeProperty *) ((jsuword)(sprop) & ~SPROP_COLLISION))
+
+#define SPROP_STORE_PRESERVING_COLLISION(spp, sprop) \
+ (*(spp) = (JSScopeProperty *) ((jsuword)(sprop) \
+ | SPROP_HAD_COLLISION(*(spp))))
+
+/* Bits stored in sprop->flags. */
+#define SPROP_MARK 0x01
+#define SPROP_IS_DUPLICATE 0x02
+#define SPROP_IS_ALIAS 0x04
+#define SPROP_HAS_SHORTID 0x08
+#define SPROP_IS_HIDDEN 0x10 /* a normally-hidden property,
+ e.g., function arg or var */
+
+/*
+ * If SPROP_HAS_SHORTID is set in sprop->flags, we use sprop->shortid rather
+ * than id when calling sprop's getter or setter.
+ */
+#define SPROP_USERID(sprop) \
+ (((sprop)->flags & SPROP_HAS_SHORTID) ? INT_TO_JSVAL((sprop)->shortid) \
+ : ID_TO_VALUE((sprop)->id))
+
+#define SPROP_INVALID_SLOT 0xffffffff
+
+#define SLOT_IN_SCOPE(slot,scope) ((slot) < (scope)->map.freeslot)
+#define SPROP_HAS_VALID_SLOT(sprop,scope) SLOT_IN_SCOPE((sprop)->slot, scope)
+
+#define SPROP_HAS_STUB_GETTER(sprop) (!(sprop)->getter)
+#define SPROP_HAS_STUB_SETTER(sprop) (!(sprop)->setter)
+
+/*
+ * NB: SPROP_GET must not be called if SPROP_HAS_STUB_GETTER(sprop).
+ */
+#define SPROP_GET(cx,sprop,obj,obj2,vp) \
+ (((sprop)->attrs & JSPROP_GETTER) \
+ ? js_InternalGetOrSet(cx, obj, (sprop)->id, \
+ OBJECT_TO_JSVAL((sprop)->getter), JSACC_READ, \
+ 0, 0, vp) \
+ : (sprop)->getter(cx, OBJ_THIS_OBJECT(cx,obj), SPROP_USERID(sprop), vp))
+
+/*
+ * NB: SPROP_SET must not be called if (SPROP_HAS_STUB_SETTER(sprop) &&
+ * !(sprop->attrs & JSPROP_GETTER)).
+ */
+#define SPROP_SET(cx,sprop,obj,obj2,vp) \
+ (((sprop)->attrs & JSPROP_SETTER) \
+ ? js_InternalGetOrSet(cx, obj, (sprop)->id, \
+ OBJECT_TO_JSVAL((sprop)->setter), JSACC_WRITE, \
+ 1, vp, vp) \
+ : ((sprop)->attrs & JSPROP_GETTER) \
+ ? (JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, \
+ JSMSG_GETTER_ONLY, NULL), JS_FALSE) \
+ : (sprop)->setter(cx, OBJ_THIS_OBJECT(cx,obj), SPROP_USERID(sprop), vp))
+
+/* Macro for common expression to test for shared permanent attributes. */
+#define SPROP_IS_SHARED_PERMANENT(sprop) \
+ ((~(sprop)->attrs & (JSPROP_SHARED | JSPROP_PERMANENT)) == 0)
+
+extern JSScope *
+js_GetMutableScope(JSContext *cx, JSObject *obj);
+
+extern JSScope *
+js_NewScope(JSContext *cx, jsrefcount nrefs, JSObjectOps *ops, JSClass *clasp,
+ JSObject *obj);
+
+extern void
+js_DestroyScope(JSContext *cx, JSScope *scope);
+
+#define ID_TO_VALUE(id) (JSID_IS_ATOM(id) ? ATOM_JSID_TO_JSVAL(id) : \
+ JSID_IS_OBJECT(id) ? OBJECT_JSID_TO_JSVAL(id) : \
+ (jsval)(id))
+#define HASH_ID(id) (JSID_IS_ATOM(id) ? JSID_TO_ATOM(id)->number : \
+ JSID_IS_OBJECT(id) ? (jsatomid) JSID_CLRTAG(id) : \
+ (jsatomid) JSID_TO_INT(id))
+
+extern JS_FRIEND_API(JSScopeProperty **)
+js_SearchScope(JSScope *scope, jsid id, JSBool adding);
+
+#define SCOPE_GET_PROPERTY(scope, id) \
+ SPROP_FETCH(js_SearchScope(scope, id, JS_FALSE))
+
+#define SCOPE_HAS_PROPERTY(scope, sprop) \
+ (SCOPE_GET_PROPERTY(scope, (sprop)->id) == (sprop))
+
+extern JSScopeProperty *
+js_AddScopeProperty(JSContext *cx, JSScope *scope, jsid id,
+ JSPropertyOp getter, JSPropertyOp setter, uint32 slot,
+ uintN attrs, uintN flags, intN shortid);
+
+extern JSScopeProperty *
+js_ChangeScopePropertyAttrs(JSContext *cx, JSScope *scope,
+ JSScopeProperty *sprop, uintN attrs, uintN mask,
+ JSPropertyOp getter, JSPropertyOp setter);
+
+extern JSBool
+js_RemoveScopeProperty(JSContext *cx, JSScope *scope, jsid id);
+
+extern void
+js_ClearScope(JSContext *cx, JSScope *scope);
+
+/*
+ * These macros used to inline short code sequences, but they grew over time.
+ * We retain them for internal backward compatibility, and in case one or both
+ * ever shrink to inline-able size.
+ */
+#define MARK_ID(cx,id) js_MarkId(cx, id)
+#define MARK_SCOPE_PROPERTY(cx,sprop) js_MarkScopeProperty(cx, sprop)
+
+extern void
+js_MarkId(JSContext *cx, jsid id);
+
+extern void
+js_MarkScopeProperty(JSContext *cx, JSScopeProperty *sprop);
+
+extern void
+js_SweepScopeProperties(JSRuntime *rt);
+
+extern JSBool
+js_InitPropertyTree(JSRuntime *rt);
+
+extern void
+js_FinishPropertyTree(JSRuntime *rt);
+
+#endif /* jsscope_h___ */
diff --git a/src/third_party/js-1.7/jsscript.c b/src/third_party/js-1.7/jsscript.c
new file mode 100644
index 00000000000..73298a4244e
--- /dev/null
+++ b/src/third_party/js-1.7/jsscript.c
@@ -0,0 +1,1717 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS script operations.
+ */
+#include "jsstddef.h"
+#include <string.h>
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsatom.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsdbgapi.h"
+#include "jsemit.h"
+#include "jsfun.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsopcode.h"
+#include "jsscript.h"
+#if JS_HAS_XDR
+#include "jsxdrapi.h"
+#endif
+
+#if JS_HAS_SCRIPT_OBJECT
+
+static const char js_script_exec[] = "Script.prototype.exec";
+static const char js_script_compile[] = "Script.prototype.compile";
+
+/*
+ * This routine requires that obj has been locked previously.
+ */
+static jsint
+GetScriptExecDepth(JSContext *cx, JSObject *obj)
+{
+ jsval v;
+
+ JS_ASSERT(JS_IS_OBJ_LOCKED(cx, obj));
+ v = LOCKED_OBJ_GET_SLOT(obj, JSSLOT_START(&js_ScriptClass));
+ return JSVAL_TO_INT(v);
+}
+
+static void
+AdjustScriptExecDepth(JSContext *cx, JSObject *obj, jsint delta)
+{
+ jsint execDepth;
+
+ JS_LOCK_OBJ(cx, obj);
+ execDepth = GetScriptExecDepth(cx, obj);
+ LOCKED_OBJ_SET_SLOT(obj, JSSLOT_START(&js_ScriptClass),
+ INT_TO_JSVAL(execDepth + delta));
+ JS_UNLOCK_OBJ(cx, obj);
+}
+
+#if JS_HAS_TOSOURCE
+static JSBool
+script_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ uint32 indent;
+ JSScript *script;
+ size_t i, j, k, n;
+ char buf[16];
+ jschar *s, *t;
+ JSString *str;
+
+ if (!JS_InstanceOf(cx, obj, &js_ScriptClass, argv))
+ return JS_FALSE;
+
+ indent = 0;
+ if (argc && !js_ValueToECMAUint32(cx, argv[0], &indent))
+ return JS_FALSE;
+
+ script = (JSScript *) JS_GetPrivate(cx, obj);
+
+ /* Let n count the source string length, j the "front porch" length. */
+ j = JS_snprintf(buf, sizeof buf, "(new %s(", js_ScriptClass.name);
+ n = j + 2;
+ if (!script) {
+ /* Let k count the constructor argument string length. */
+ k = 0;
+ s = NULL; /* quell GCC overwarning */
+ } else {
+ str = JS_DecompileScript(cx, script, "Script.prototype.toSource",
+ (uintN)indent);
+ if (!str)
+ return JS_FALSE;
+ str = js_QuoteString(cx, str, '\'');
+ if (!str)
+ return JS_FALSE;
+ s = JSSTRING_CHARS(str);
+ k = JSSTRING_LENGTH(str);
+ n += k;
+ }
+
+ /* Allocate the source string and copy into it. */
+ t = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar));
+ if (!t)
+ return JS_FALSE;
+ for (i = 0; i < j; i++)
+ t[i] = buf[i];
+ for (j = 0; j < k; i++, j++)
+ t[i] = s[j];
+ t[i++] = ')';
+ t[i++] = ')';
+ t[i] = 0;
+
+ /* Create and return a JS string for t. */
+ str = JS_NewUCString(cx, t, n);
+ if (!str) {
+ JS_free(cx, t);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+#endif /* JS_HAS_TOSOURCE */
+
+static JSBool
+script_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ uint32 indent;
+ JSScript *script;
+ JSString *str;
+
+ indent = 0;
+ if (argc && !js_ValueToECMAUint32(cx, argv[0], &indent))
+ return JS_FALSE;
+
+ if (!JS_InstanceOf(cx, obj, &js_ScriptClass, argv))
+ return JS_FALSE;
+ script = (JSScript *) JS_GetPrivate(cx, obj);
+ if (!script) {
+ *rval = STRING_TO_JSVAL(cx->runtime->emptyString);
+ return JS_TRUE;
+ }
+
+ str = JS_DecompileScript(cx, script, "Script.prototype.toString",
+ (uintN)indent);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+script_compile(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ JSObject *scopeobj;
+ jsval v;
+ JSScript *script, *oldscript;
+ JSStackFrame *fp, *caller;
+ const char *file;
+ uintN line;
+ JSPrincipals *principals;
+ jsint execDepth;
+
+ /* Make sure obj is a Script object. */
+ if (!JS_InstanceOf(cx, obj, &js_ScriptClass, argv))
+ return JS_FALSE;
+
+ /* If no args, leave private undefined and return early. */
+ if (argc == 0)
+ goto out;
+
+ /* Otherwise, the first arg is the script source to compile. */
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+
+ scopeobj = NULL;
+ if (argc >= 2) {
+ if (!js_ValueToObject(cx, argv[1], &scopeobj))
+ return JS_FALSE;
+ argv[1] = OBJECT_TO_JSVAL(scopeobj);
+ }
+
+ /* Compile using the caller's scope chain, which js_Invoke passes to fp. */
+ fp = cx->fp;
+ caller = JS_GetScriptedCaller(cx, fp);
+ JS_ASSERT(!caller || fp->scopeChain == caller->scopeChain);
+
+ if (caller) {
+ if (!scopeobj) {
+ scopeobj = js_GetScopeChain(cx, caller);
+ if (!scopeobj)
+ return JS_FALSE;
+ fp->scopeChain = scopeobj; /* for the compiler's benefit */
+ }
+
+ principals = JS_EvalFramePrincipals(cx, fp, caller);
+ if (principals == caller->script->principals) {
+ file = caller->script->filename;
+ line = js_PCToLineNumber(cx, caller->script, caller->pc);
+ } else {
+ file = principals->codebase;
+ line = 0;
+ }
+ } else {
+ file = NULL;
+ line = 0;
+ principals = NULL;
+ }
+
+ /* Ensure we compile this script with the right (inner) principals. */
+ scopeobj = js_CheckScopeChainValidity(cx, scopeobj, js_script_compile);
+ if (!scopeobj)
+ return JS_FALSE;
+
+ /*
+ * Compile the new script using the caller's scope chain, a la eval().
+ * Unlike jsobj.c:obj_eval, however, we do not set JSFRAME_EVAL in fp's
+ * flags, because compilation is here separated from execution, and the
+ * run-time scope chain may not match the compile-time. JSFRAME_EVAL is
+ * tested in jsemit.c and jsscan.c to optimize based on identity of run-
+ * and compile-time scope.
+ */
+ fp->flags |= JSFRAME_SCRIPT_OBJECT;
+ script = JS_CompileUCScriptForPrincipals(cx, scopeobj, principals,
+ JSSTRING_CHARS(str),
+ JSSTRING_LENGTH(str),
+ file, line);
+ if (!script)
+ return JS_FALSE;
+
+ JS_LOCK_OBJ(cx, obj);
+ execDepth = GetScriptExecDepth(cx, obj);
+
+ /*
+ * execDepth must be 0 to allow compilation here, otherwise the JSScript
+ * struct can be released while running.
+ */
+ if (execDepth > 0) {
+ JS_UNLOCK_OBJ(cx, obj);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_COMPILE_EXECED_SCRIPT);
+ return JS_FALSE;
+ }
+
+ /* Swap script for obj's old script, if any. */
+ v = LOCKED_OBJ_GET_SLOT(obj, JSSLOT_PRIVATE);
+ oldscript = !JSVAL_IS_VOID(v) ? (JSScript *) JSVAL_TO_PRIVATE(v) : NULL;
+ LOCKED_OBJ_SET_SLOT(obj, JSSLOT_PRIVATE, PRIVATE_TO_JSVAL(script));
+ JS_UNLOCK_OBJ(cx, obj);
+
+ if (oldscript)
+ js_DestroyScript(cx, oldscript);
+
+ script->object = obj;
+ js_CallNewScriptHook(cx, script, NULL);
+
+out:
+ /* Return the object. */
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+static JSBool
+script_exec(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSObject *scopeobj, *parent;
+ JSStackFrame *fp, *caller;
+ JSScript *script;
+ JSBool ok;
+
+ if (!JS_InstanceOf(cx, obj, &js_ScriptClass, argv))
+ return JS_FALSE;
+
+ scopeobj = NULL;
+ if (argc) {
+ if (!js_ValueToObject(cx, argv[0], &scopeobj))
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(scopeobj);
+ }
+
+ /*
+ * Emulate eval() by using caller's this, var object, sharp array, etc.,
+ * all propagated by js_Execute via a non-null fourth (down) argument to
+ * js_Execute. If there is no scripted caller, js_Execute uses its second
+ * (chain) argument to set the exec frame's varobj, thisp, and scopeChain.
+ *
+ * Unlike eval, which the compiler detects, Script.prototype.exec may be
+ * called from a lightweight function, or even from native code (in which
+ * case fp->varobj and fp->scopeChain are null). If exec is called from
+ * a lightweight function, we will need to get a Call object representing
+ * its frame, to act as the var object and scope chain head.
+ */
+ fp = cx->fp;
+ caller = JS_GetScriptedCaller(cx, fp);
+ if (caller && !caller->varobj) {
+ /* Called from a lightweight function. */
+ JS_ASSERT(caller->fun && !JSFUN_HEAVYWEIGHT_TEST(caller->fun->flags));
+
+ /* Scope chain links from Call object to callee's parent. */
+ parent = OBJ_GET_PARENT(cx, JSVAL_TO_OBJECT(caller->argv[-2]));
+ if (!js_GetCallObject(cx, caller, parent))
+ return JS_FALSE;
+ }
+
+ if (!scopeobj) {
+ /* No scope object passed in: try to use the caller's scope chain. */
+ if (caller) {
+ /*
+ * Load caller->scopeChain after the conditional js_GetCallObject
+ * call above, which resets scopeChain as well as varobj.
+ */
+ scopeobj = js_GetScopeChain(cx, caller);
+ if (!scopeobj)
+ return JS_FALSE;
+ } else {
+ /*
+ * Called from native code, so we don't know what scope object to
+ * use. We could use parent (see above), but Script.prototype.exec
+ * might be a shared/sealed "superglobal" method. A more general
+ * approach would use cx->globalObject, which will be the same as
+ * exec.__parent__ in the non-superglobal case. In the superglobal
+ * case it's the right object: the global, not the superglobal.
+ */
+ scopeobj = cx->globalObject;
+ }
+ }
+
+ scopeobj = js_CheckScopeChainValidity(cx, scopeobj, js_script_exec);
+ if (!scopeobj)
+ return JS_FALSE;
+
+ /* Keep track of nesting depth for the script. */
+ AdjustScriptExecDepth(cx, obj, 1);
+
+ /* Must get to out label after this */
+ script = (JSScript *) JS_GetPrivate(cx, obj);
+ if (!script) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ /* Belt-and-braces: check that this script object has access to scopeobj. */
+ ok = js_CheckPrincipalsAccess(cx, scopeobj, script->principals,
+ CLASS_ATOM(cx, Script));
+ if (!ok)
+ goto out;
+
+ ok = js_Execute(cx, scopeobj, script, caller, JSFRAME_EVAL, rval);
+
+out:
+ AdjustScriptExecDepth(cx, obj, -1);
+ return ok;
+}
+
+#if JS_HAS_XDR
+
+static JSBool
+XDRAtomMap(JSXDRState *xdr, JSAtomMap *map)
+{
+ JSContext *cx;
+ uint32 natoms, i, index;
+ JSAtom **atoms;
+
+ cx = xdr->cx;
+
+ if (xdr->mode == JSXDR_ENCODE)
+ natoms = (uint32)map->length;
+
+ if (!JS_XDRUint32(xdr, &natoms))
+ return JS_FALSE;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ atoms = map->vector;
+ } else {
+ if (natoms == 0) {
+ atoms = NULL;
+ } else {
+ atoms = (JSAtom **) JS_malloc(cx, (size_t)natoms * sizeof *atoms);
+ if (!atoms)
+ return JS_FALSE;
+#ifdef DEBUG
+ memset(atoms, 0, (size_t)natoms * sizeof *atoms);
+#endif
+ }
+
+ map->vector = atoms;
+ map->length = natoms;
+ }
+
+ for (i = 0; i != natoms; ++i) {
+ if (xdr->mode == JSXDR_ENCODE)
+ index = i;
+ if (!JS_XDRUint32(xdr, &index))
+ goto bad;
+
+ /*
+ * Assert that, when decoding, the read index is valid and points to
+ * an unoccupied element of atoms array.
+ */
+ JS_ASSERT(index < natoms);
+ JS_ASSERT(xdr->mode == JSXDR_ENCODE || !atoms[index]);
+ if (!js_XDRAtom(xdr, &atoms[index]))
+ goto bad;
+ }
+
+ return JS_TRUE;
+
+ bad:
+ if (xdr->mode == JSXDR_DECODE) {
+ JS_free(cx, atoms);
+ map->vector = NULL;
+ map->length = 0;
+ }
+
+ return JS_FALSE;
+}
+
+JSBool
+js_XDRScript(JSXDRState *xdr, JSScript **scriptp, JSBool *hasMagic)
+{
+ JSContext *cx;
+ JSScript *script, *newscript, *oldscript;
+ uint32 length, lineno, depth, magic, nsrcnotes, ntrynotes;
+ uint32 prologLength, version;
+ JSBool filenameWasSaved;
+ jssrcnote *notes, *sn;
+
+ cx = xdr->cx;
+ script = *scriptp;
+ nsrcnotes = ntrynotes = 0;
+ filenameWasSaved = JS_FALSE;
+ notes = NULL;
+
+ /*
+ * Encode prologLength and version after script->length (_2 or greater),
+ * but decode both new (>= _2) and old, prolog&version-free (_1) scripts.
+ * Version _3 supports principals serialization. Version _4 reorders the
+ * nsrcnotes and ntrynotes fields to come before everything except magic,
+ * length, prologLength, and version, so that srcnote and trynote storage
+ * can be allocated as part of the JSScript (along with bytecode storage).
+ *
+ * So far, the magic number has not changed for every jsopcode.tbl change.
+ * We stipulate forward compatibility by requiring old bytecodes never to
+ * change or go away (modulo a few exceptions before the XDR interfaces
+ * evolved, and a few exceptions during active trunk development). With
+ * the addition of JSOP_STOP to support JS_THREADED_INTERP, we make a new
+ * magic number (_5) so that we know to append JSOP_STOP to old scripts
+ * when deserializing.
+ */
+ if (xdr->mode == JSXDR_ENCODE)
+ magic = JSXDR_MAGIC_SCRIPT_CURRENT;
+ if (!JS_XDRUint32(xdr, &magic))
+ return JS_FALSE;
+ JS_ASSERT((uint32)JSXDR_MAGIC_SCRIPT_5 - (uint32)JSXDR_MAGIC_SCRIPT_1 == 4);
+ if (magic - (uint32)JSXDR_MAGIC_SCRIPT_1 > 4) {
+ if (!hasMagic) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_SCRIPT_MAGIC);
+ return JS_FALSE;
+ }
+ *hasMagic = JS_FALSE;
+ return JS_TRUE;
+ }
+ if (hasMagic)
+ *hasMagic = JS_TRUE;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ length = script->length;
+ prologLength = PTRDIFF(script->main, script->code, jsbytecode);
+ JS_ASSERT((int16)script->version != JSVERSION_UNKNOWN);
+ version = (uint32)script->version | (script->numGlobalVars << 16);
+ lineno = (uint32)script->lineno;
+ depth = (uint32)script->depth;
+
+ /* Count the srcnotes, keeping notes pointing at the first one. */
+ notes = SCRIPT_NOTES(script);
+ for (sn = notes; !SN_IS_TERMINATOR(sn); sn = SN_NEXT(sn))
+ continue;
+ nsrcnotes = PTRDIFF(sn, notes, jssrcnote);
+ nsrcnotes++; /* room for the terminator */
+
+ /* Count the trynotes. */
+ if (script->trynotes) {
+ while (script->trynotes[ntrynotes].catchStart)
+ ntrynotes++;
+ ntrynotes++; /* room for the end marker */
+ }
+ }
+
+ if (!JS_XDRUint32(xdr, &length))
+ return JS_FALSE;
+ if (magic >= JSXDR_MAGIC_SCRIPT_2) {
+ if (!JS_XDRUint32(xdr, &prologLength))
+ return JS_FALSE;
+ if (!JS_XDRUint32(xdr, &version))
+ return JS_FALSE;
+
+ /* To fuse allocations, we need srcnote and trynote counts early. */
+ if (magic >= JSXDR_MAGIC_SCRIPT_4) {
+ if (!JS_XDRUint32(xdr, &nsrcnotes))
+ return JS_FALSE;
+ if (!JS_XDRUint32(xdr, &ntrynotes))
+ return JS_FALSE;
+ }
+ }
+
+ if (xdr->mode == JSXDR_DECODE) {
+ size_t alloclength = length;
+ if (magic < JSXDR_MAGIC_SCRIPT_5)
+ ++alloclength; /* add a byte for JSOP_STOP */
+
+ script = js_NewScript(cx, alloclength, nsrcnotes, ntrynotes);
+ if (!script)
+ return JS_FALSE;
+ if (magic >= JSXDR_MAGIC_SCRIPT_2) {
+ script->main += prologLength;
+ script->version = (JSVersion) (version & 0xffff);
+ script->numGlobalVars = (uint16) (version >> 16);
+
+ /* If we know nsrcnotes, we allocated space for notes in script. */
+ if (magic >= JSXDR_MAGIC_SCRIPT_4)
+ notes = SCRIPT_NOTES(script);
+ }
+ *scriptp = script;
+ }
+
+ /*
+ * Control hereafter must goto error on failure, in order for the DECODE
+ * case to destroy script and conditionally free notes, which if non-null
+ * in the (DECODE and magic < _4) case must point at a temporary vector
+ * allocated just below.
+ */
+ oldscript = xdr->script;
+ xdr->script = script;
+ if (!JS_XDRBytes(xdr, (char *)script->code, length * sizeof(jsbytecode)) ||
+ !XDRAtomMap(xdr, &script->atomMap)) {
+ goto error;
+ }
+
+ if (magic < JSXDR_MAGIC_SCRIPT_5) {
+ if (xdr->mode == JSXDR_DECODE) {
+ /*
+ * Append JSOP_STOP to old scripts, to relieve the interpreter
+ * from having to bounds-check pc. Also take care to increment
+ * length, as it is used below and must count all bytecode.
+ */
+ script->code[length++] = JSOP_STOP;
+ }
+
+ if (magic < JSXDR_MAGIC_SCRIPT_4) {
+ if (!JS_XDRUint32(xdr, &nsrcnotes))
+ goto error;
+ if (xdr->mode == JSXDR_DECODE) {
+ notes = (jssrcnote *)
+ JS_malloc(cx, nsrcnotes * sizeof(jssrcnote));
+ if (!notes)
+ goto error;
+ }
+ }
+ }
+
+ if (!JS_XDRBytes(xdr, (char *)notes, nsrcnotes * sizeof(jssrcnote)) ||
+ !JS_XDRCStringOrNull(xdr, (char **)&script->filename) ||
+ !JS_XDRUint32(xdr, &lineno) ||
+ !JS_XDRUint32(xdr, &depth) ||
+ (magic < JSXDR_MAGIC_SCRIPT_4 && !JS_XDRUint32(xdr, &ntrynotes))) {
+ goto error;
+ }
+
+ /* Script principals transcoding support comes with versions >= _3. */
+ if (magic >= JSXDR_MAGIC_SCRIPT_3) {
+ JSPrincipals *principals;
+ uint32 encodeable;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ principals = script->principals;
+ encodeable = (cx->runtime->principalsTranscoder != NULL);
+ if (!JS_XDRUint32(xdr, &encodeable))
+ goto error;
+ if (encodeable &&
+ !cx->runtime->principalsTranscoder(xdr, &principals)) {
+ goto error;
+ }
+ } else {
+ if (!JS_XDRUint32(xdr, &encodeable))
+ goto error;
+ if (encodeable) {
+ if (!cx->runtime->principalsTranscoder) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_DECODE_PRINCIPALS);
+ goto error;
+ }
+ if (!cx->runtime->principalsTranscoder(xdr, &principals))
+ goto error;
+ script->principals = principals;
+ }
+ }
+ }
+
+ if (xdr->mode == JSXDR_DECODE) {
+ const char *filename = script->filename;
+ if (filename) {
+ filename = js_SaveScriptFilename(cx, filename);
+ if (!filename)
+ goto error;
+ JS_free(cx, (void *) script->filename);
+ script->filename = filename;
+ filenameWasSaved = JS_TRUE;
+ }
+ script->lineno = (uintN)lineno;
+ script->depth = (uintN)depth;
+
+ if (magic < JSXDR_MAGIC_SCRIPT_4) {
+ /*
+ * Argh, we have to reallocate script, copy notes into the extra
+ * space after the bytecodes, and free the temporary notes vector.
+ * First, add enough slop to nsrcnotes so we can align the address
+ * after the srcnotes of the first trynote.
+ */
+ uint32 osrcnotes = nsrcnotes;
+
+ if (ntrynotes)
+ nsrcnotes += JSTRYNOTE_ALIGNMASK;
+ newscript = (JSScript *) JS_realloc(cx, script,
+ sizeof(JSScript) +
+ length * sizeof(jsbytecode) +
+ nsrcnotes * sizeof(jssrcnote) +
+ ntrynotes * sizeof(JSTryNote));
+ if (!newscript)
+ goto error;
+
+ *scriptp = script = newscript;
+ script->code = (jsbytecode *)(script + 1);
+ script->main = script->code + prologLength;
+ memcpy(script->code + length, notes, osrcnotes * sizeof(jssrcnote));
+ JS_free(cx, (void *) notes);
+ notes = NULL;
+ if (ntrynotes) {
+ script->trynotes = (JSTryNote *)
+ ((jsword)(SCRIPT_NOTES(script) + nsrcnotes) &
+ ~(jsword)JSTRYNOTE_ALIGNMASK);
+ memset(script->trynotes, 0, ntrynotes * sizeof(JSTryNote));
+ }
+ }
+ }
+
+ while (ntrynotes) {
+ JSTryNote *tn = &script->trynotes[--ntrynotes];
+ uint32 start = (uint32) tn->start,
+ catchLength = (uint32) tn->length,
+ catchStart = (uint32) tn->catchStart;
+
+ if (!JS_XDRUint32(xdr, &start) ||
+ !JS_XDRUint32(xdr, &catchLength) ||
+ !JS_XDRUint32(xdr, &catchStart)) {
+ goto error;
+ }
+ tn->start = (ptrdiff_t) start;
+ tn->length = (ptrdiff_t) catchLength;
+ tn->catchStart = (ptrdiff_t) catchStart;
+ }
+
+ xdr->script = oldscript;
+ return JS_TRUE;
+
+ error:
+ if (xdr->mode == JSXDR_DECODE) {
+ if (script->filename && !filenameWasSaved) {
+ JS_free(cx, (void *) script->filename);
+ script->filename = NULL;
+ }
+ if (notes && magic < JSXDR_MAGIC_SCRIPT_4)
+ JS_free(cx, (void *) notes);
+ js_DestroyScript(cx, script);
+ *scriptp = NULL;
+ }
+ return JS_FALSE;
+}
+
+#if JS_HAS_XDR_FREEZE_THAW
+/*
+ * These cannot be exposed to web content, and chrome does not need them, so
+ * we take them out of the Mozilla client altogether. Fortunately, there is
+ * no way to serialize a native function (see fun_xdrObject in jsfun.c).
+ */
+
+static JSBool
+script_freeze(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXDRState *xdr;
+ JSScript *script;
+ JSBool ok, hasMagic;
+ uint32 len;
+ void *buf;
+ JSString *str;
+
+ if (!JS_InstanceOf(cx, obj, &js_ScriptClass, argv))
+ return JS_FALSE;
+ script = (JSScript *) JS_GetPrivate(cx, obj);
+ if (!script)
+ return JS_TRUE;
+
+ /* create new XDR */
+ xdr = JS_XDRNewMem(cx, JSXDR_ENCODE);
+ if (!xdr)
+ return JS_FALSE;
+
+ /* write */
+ ok = js_XDRScript(xdr, &script, &hasMagic);
+ if (!ok)
+ goto out;
+ if (!hasMagic) {
+ *rval = JSVAL_VOID;
+ goto out;
+ }
+
+ buf = JS_XDRMemGetData(xdr, &len);
+ if (!buf) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ JS_ASSERT((jsword)buf % sizeof(jschar) == 0);
+ len /= sizeof(jschar);
+ str = JS_NewUCStringCopyN(cx, (jschar *)buf, len);
+ if (!str) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+#if IS_BIG_ENDIAN
+ {
+ jschar *chars;
+ uint32 i;
+
+ /* Swap bytes in Unichars to keep frozen strings machine-independent. */
+ chars = JS_GetStringChars(str);
+ for (i = 0; i < len; i++)
+ chars[i] = JSXDR_SWAB16(chars[i]);
+ }
+#endif
+ *rval = STRING_TO_JSVAL(str);
+
+out:
+ JS_XDRDestroy(xdr);
+ return ok;
+}
+
+static JSBool
+script_thaw(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXDRState *xdr;
+ JSString *str;
+ void *buf;
+ uint32 len;
+ jsval v;
+ JSScript *script, *oldscript;
+ JSBool ok, hasMagic;
+
+ if (!JS_InstanceOf(cx, obj, &js_ScriptClass, argv))
+ return JS_FALSE;
+
+ if (argc == 0)
+ return JS_TRUE;
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+
+ /* create new XDR */
+ xdr = JS_XDRNewMem(cx, JSXDR_DECODE);
+ if (!xdr)
+ return JS_FALSE;
+
+ buf = JS_GetStringChars(str);
+ len = JS_GetStringLength(str);
+#if IS_BIG_ENDIAN
+ {
+ jschar *from, *to;
+ uint32 i;
+
+ /* Swap bytes in Unichars to keep frozen strings machine-independent. */
+ from = (jschar *)buf;
+ to = (jschar *) JS_malloc(cx, len * sizeof(jschar));
+ if (!to) {
+ JS_XDRDestroy(xdr);
+ return JS_FALSE;
+ }
+ for (i = 0; i < len; i++)
+ to[i] = JSXDR_SWAB16(from[i]);
+ buf = (char *)to;
+ }
+#endif
+ len *= sizeof(jschar);
+ JS_XDRMemSetData(xdr, buf, len);
+
+ /* XXXbe should magic mismatch be error, or false return value? */
+ ok = js_XDRScript(xdr, &script, &hasMagic);
+ if (!ok)
+ goto out;
+ if (!hasMagic) {
+ *rval = JSVAL_FALSE;
+ goto out;
+ }
+
+ JS_LOCK_OBJ(cx, obj);
+ execDepth = GetScriptExecDepth(cx, obj);
+
+ /*
+ * execDepth must be 0 to allow compilation here, otherwise the JSScript
+ * struct can be released while running.
+ */
+ if (execDepth > 0) {
+ JS_UNLOCK_OBJ(cx, obj);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_COMPILE_EXECED_SCRIPT);
+ goto out;
+ }
+
+ /* Swap script for obj's old script, if any. */
+ v = LOCKED_OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ oldscript = !JSVAL_IS_VOID(v) ? (JSScript *) JSVAL_TO_PRIVATE(v) : NULL;
+ LOCKED_OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, PRIVATE_TO_JSVAL(script));
+ JS_UNLOCK_OBJ(cx, obj);
+
+ if (oldscript)
+ js_DestroyScript(cx, oldscript);
+
+ script->object = obj;
+ js_CallNewScriptHook(cx, script, NULL);
+
+out:
+ /*
+ * We reset the buffer to be NULL so that it doesn't free the chars
+ * memory owned by str (argv[0]).
+ */
+ JS_XDRMemSetData(xdr, NULL, 0);
+ JS_XDRDestroy(xdr);
+#if IS_BIG_ENDIAN
+ JS_free(cx, buf);
+#endif
+ *rval = JSVAL_TRUE;
+ return ok;
+}
+
+static const char js_thaw_str[] = "thaw";
+
+#endif /* JS_HAS_XDR_FREEZE_THAW */
+#endif /* JS_HAS_XDR */
+
+static JSFunctionSpec script_methods[] = {
+#if JS_HAS_TOSOURCE
+ {js_toSource_str, script_toSource, 0,0,0},
+#endif
+ {js_toString_str, script_toString, 0,0,0},
+ {"compile", script_compile, 2,0,0},
+ {"exec", script_exec, 1,0,0},
+#if JS_HAS_XDR_FREEZE_THAW
+ {"freeze", script_freeze, 0,0,0},
+ {js_thaw_str, script_thaw, 1,0,0},
+#endif /* JS_HAS_XDR_FREEZE_THAW */
+ {0,0,0,0,0}
+};
+
+#endif /* JS_HAS_SCRIPT_OBJECT */
+
+static void
+script_finalize(JSContext *cx, JSObject *obj)
+{
+ JSScript *script;
+
+ script = (JSScript *) JS_GetPrivate(cx, obj);
+ if (script)
+ js_DestroyScript(cx, script);
+}
+
+static JSBool
+script_call(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+#if JS_HAS_SCRIPT_OBJECT
+ return script_exec(cx, JSVAL_TO_OBJECT(argv[-2]), argc, argv, rval);
+#else
+ return JS_FALSE;
+#endif
+}
+
+static uint32
+script_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSScript *script;
+
+ script = (JSScript *) JS_GetPrivate(cx, obj);
+ if (script)
+ js_MarkScript(cx, script);
+ return 0;
+}
+
+#if !JS_HAS_SCRIPT_OBJECT
+const char js_Script_str[] = "Script";
+
+#define JSProto_Script JSProto_Object
+#endif
+
+JS_FRIEND_DATA(JSClass) js_ScriptClass = {
+ js_Script_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_CACHED_PROTO(JSProto_Script) |
+ JSCLASS_HAS_RESERVED_SLOTS(1),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, script_finalize,
+ NULL, NULL, script_call, NULL,/*XXXbe xdr*/
+ NULL, NULL, script_mark, 0
+};
+
+#if JS_HAS_SCRIPT_OBJECT
+
+static JSBool
+Script(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ /* If not constructing, replace obj with a new Script object. */
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ obj = js_NewObject(cx, &js_ScriptClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+
+ /*
+ * script_compile does not use rval to root its temporaries
+ * so we can use it to root obj.
+ */
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+
+ if (!JS_SetReservedSlot(cx, obj, 0, INT_TO_JSVAL(0)))
+ return JS_FALSE;
+
+ return script_compile(cx, obj, argc, argv, rval);
+}
+
+#if JS_HAS_XDR_FREEZE_THAW
+
+static JSBool
+script_static_thaw(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ obj = js_NewObject(cx, &js_ScriptClass, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ if (!script_thaw(cx, obj, argc, argv, rval))
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+static JSFunctionSpec script_static_methods[] = {
+ {js_thaw_str, script_static_thaw, 1,0,0},
+ {0,0,0,0,0}
+};
+
+#else /* !JS_HAS_XDR_FREEZE_THAW */
+
+#define script_static_methods NULL
+
+#endif /* !JS_HAS_XDR_FREEZE_THAW */
+
+JSObject *
+js_InitScriptClass(JSContext *cx, JSObject *obj)
+{
+ return JS_InitClass(cx, obj, NULL, &js_ScriptClass, Script, 1,
+ NULL, script_methods, NULL, script_static_methods);
+}
+
+#endif /* JS_HAS_SCRIPT_OBJECT */
+
+/*
+ * Shared script filename management.
+ */
+JS_STATIC_DLL_CALLBACK(int)
+js_compare_strings(const void *k1, const void *k2)
+{
+ return strcmp(k1, k2) == 0;
+}
+
+/* Shared with jsatom.c to save code space. */
+extern void * JS_DLL_CALLBACK
+js_alloc_table_space(void *priv, size_t size);
+
+extern void JS_DLL_CALLBACK
+js_free_table_space(void *priv, void *item);
+
+/* NB: This struct overlays JSHashEntry -- see jshash.h, do not reorganize. */
+typedef struct ScriptFilenameEntry {
+ JSHashEntry *next; /* hash chain linkage */
+ JSHashNumber keyHash; /* key hash function result */
+ const void *key; /* ptr to filename, below */
+ uint32 flags; /* user-defined filename prefix flags */
+ JSPackedBool mark; /* GC mark flag */
+ char filename[3]; /* two or more bytes, NUL-terminated */
+} ScriptFilenameEntry;
+
+JS_STATIC_DLL_CALLBACK(JSHashEntry *)
+js_alloc_sftbl_entry(void *priv, const void *key)
+{
+ size_t nbytes = offsetof(ScriptFilenameEntry, filename) + strlen(key) + 1;
+
+ return (JSHashEntry *) malloc(JS_MAX(nbytes, sizeof(JSHashEntry)));
+}
+
+JS_STATIC_DLL_CALLBACK(void)
+js_free_sftbl_entry(void *priv, JSHashEntry *he, uintN flag)
+{
+ if (flag != HT_FREE_ENTRY)
+ return;
+ free(he);
+}
+
+static JSHashAllocOps sftbl_alloc_ops = {
+ js_alloc_table_space, js_free_table_space,
+ js_alloc_sftbl_entry, js_free_sftbl_entry
+};
+
+JSBool
+js_InitRuntimeScriptState(JSRuntime *rt)
+{
+#ifdef JS_THREADSAFE
+ JS_ASSERT(!rt->scriptFilenameTableLock);
+ rt->scriptFilenameTableLock = JS_NEW_LOCK();
+ if (!rt->scriptFilenameTableLock)
+ return JS_FALSE;
+#endif
+ JS_ASSERT(!rt->scriptFilenameTable);
+ rt->scriptFilenameTable =
+ JS_NewHashTable(16, JS_HashString, js_compare_strings, NULL,
+ &sftbl_alloc_ops, NULL);
+ if (!rt->scriptFilenameTable) {
+ js_FinishRuntimeScriptState(rt); /* free lock if threadsafe */
+ return JS_FALSE;
+ }
+ JS_INIT_CLIST(&rt->scriptFilenamePrefixes);
+ return JS_TRUE;
+}
+
+typedef struct ScriptFilenamePrefix {
+ JSCList links; /* circular list linkage for easy deletion */
+ const char *name; /* pointer to pinned ScriptFilenameEntry string */
+ size_t length; /* prefix string length, precomputed */
+ uint32 flags; /* user-defined flags to inherit from this prefix */
+} ScriptFilenamePrefix;
+
+void
+js_FinishRuntimeScriptState(JSRuntime *rt)
+{
+ if (rt->scriptFilenameTable) {
+ JS_HashTableDestroy(rt->scriptFilenameTable);
+ rt->scriptFilenameTable = NULL;
+ }
+#ifdef JS_THREADSAFE
+ if (rt->scriptFilenameTableLock) {
+ JS_DESTROY_LOCK(rt->scriptFilenameTableLock);
+ rt->scriptFilenameTableLock = NULL;
+ }
+#endif
+}
+
+void
+js_FreeRuntimeScriptState(JSRuntime *rt)
+{
+ ScriptFilenamePrefix *sfp;
+
+ if (!rt->scriptFilenameTable)
+ return;
+
+ while (!JS_CLIST_IS_EMPTY(&rt->scriptFilenamePrefixes)) {
+ sfp = (ScriptFilenamePrefix *) rt->scriptFilenamePrefixes.next;
+ JS_REMOVE_LINK(&sfp->links);
+ free(sfp);
+ }
+ js_FinishRuntimeScriptState(rt);
+}
+
+#ifdef DEBUG_brendan
+#define DEBUG_SFTBL
+#endif
+#ifdef DEBUG_SFTBL
+size_t sftbl_savings = 0;
+#endif
+
+static ScriptFilenameEntry *
+SaveScriptFilename(JSRuntime *rt, const char *filename, uint32 flags)
+{
+ JSHashTable *table;
+ JSHashNumber hash;
+ JSHashEntry **hep;
+ ScriptFilenameEntry *sfe;
+ size_t length;
+ JSCList *head, *link;
+ ScriptFilenamePrefix *sfp;
+
+ table = rt->scriptFilenameTable;
+ hash = JS_HashString(filename);
+ hep = JS_HashTableRawLookup(table, hash, filename);
+ sfe = (ScriptFilenameEntry *) *hep;
+#ifdef DEBUG_SFTBL
+ if (sfe)
+ sftbl_savings += strlen(sfe->filename);
+#endif
+
+ if (!sfe) {
+ sfe = (ScriptFilenameEntry *)
+ JS_HashTableRawAdd(table, hep, hash, filename, NULL);
+ if (!sfe)
+ return NULL;
+ sfe->key = strcpy(sfe->filename, filename);
+ sfe->flags = 0;
+ sfe->mark = JS_FALSE;
+ }
+
+ /* If saving a prefix, add it to the set in rt->scriptFilenamePrefixes. */
+ if (flags != 0) {
+ /* Search in case filename was saved already; we must be idempotent. */
+ sfp = NULL;
+ length = strlen(filename);
+ for (head = link = &rt->scriptFilenamePrefixes;
+ link->next != head;
+ link = link->next) {
+ /* Lag link behind sfp to insert in non-increasing length order. */
+ sfp = (ScriptFilenamePrefix *) link->next;
+ if (!strcmp(sfp->name, filename))
+ break;
+ if (sfp->length <= length) {
+ sfp = NULL;
+ break;
+ }
+ sfp = NULL;
+ }
+
+ if (!sfp) {
+ /* No such prefix: add one now. */
+ sfp = (ScriptFilenamePrefix *) malloc(sizeof(ScriptFilenamePrefix));
+ if (!sfp)
+ return NULL;
+ JS_INSERT_AFTER(&sfp->links, link);
+ sfp->name = sfe->filename;
+ sfp->length = length;
+ sfp->flags = 0;
+ }
+
+ /*
+ * Accumulate flags in both sfe and sfp: sfe for later access from the
+ * JS_GetScriptedCallerFilenameFlags debug-API, and sfp so that longer
+ * filename entries can inherit by prefix.
+ */
+ sfe->flags |= flags;
+ sfp->flags |= flags;
+ }
+
+ return sfe;
+}
+
+const char *
+js_SaveScriptFilename(JSContext *cx, const char *filename)
+{
+ JSRuntime *rt;
+ ScriptFilenameEntry *sfe;
+ JSCList *head, *link;
+ ScriptFilenamePrefix *sfp;
+
+ rt = cx->runtime;
+ JS_ACQUIRE_LOCK(rt->scriptFilenameTableLock);
+ sfe = SaveScriptFilename(rt, filename, 0);
+ if (!sfe) {
+ JS_RELEASE_LOCK(rt->scriptFilenameTableLock);
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+
+ /*
+ * Try to inherit flags by prefix. We assume there won't be more than a
+ * few (dozen! ;-) prefixes, so linear search is tolerable.
+ * XXXbe every time I've assumed that in the JS engine, I've been wrong!
+ */
+ for (head = &rt->scriptFilenamePrefixes, link = head->next;
+ link != head;
+ link = link->next) {
+ sfp = (ScriptFilenamePrefix *) link;
+ if (!strncmp(sfp->name, filename, sfp->length)) {
+ sfe->flags |= sfp->flags;
+ break;
+ }
+ }
+ JS_RELEASE_LOCK(rt->scriptFilenameTableLock);
+ return sfe->filename;
+}
+
+const char *
+js_SaveScriptFilenameRT(JSRuntime *rt, const char *filename, uint32 flags)
+{
+ ScriptFilenameEntry *sfe;
+
+ /* This may be called very early, via the jsdbgapi.h entry point. */
+ if (!rt->scriptFilenameTable && !js_InitRuntimeScriptState(rt))
+ return NULL;
+
+ JS_ACQUIRE_LOCK(rt->scriptFilenameTableLock);
+ sfe = SaveScriptFilename(rt, filename, flags);
+ JS_RELEASE_LOCK(rt->scriptFilenameTableLock);
+ if (!sfe)
+ return NULL;
+
+ return sfe->filename;
+}
+
+/*
+ * Back up from a saved filename by its offset within its hash table entry.
+ */
+#define FILENAME_TO_SFE(fn) \
+ ((ScriptFilenameEntry *) ((fn) - offsetof(ScriptFilenameEntry, filename)))
+
+/*
+ * The sfe->key member, redundant given sfe->filename but required by the old
+ * jshash.c code, here gives us a useful sanity check. This assertion will
+ * very likely botch if someone tries to mark a string that wasn't allocated
+ * as an sfe->filename.
+ */
+#define ASSERT_VALID_SFE(sfe) JS_ASSERT((sfe)->key == (sfe)->filename)
+
+uint32
+js_GetScriptFilenameFlags(const char *filename)
+{
+ ScriptFilenameEntry *sfe;
+
+ sfe = FILENAME_TO_SFE(filename);
+ ASSERT_VALID_SFE(sfe);
+ return sfe->flags;
+}
+
+void
+js_MarkScriptFilename(const char *filename)
+{
+ ScriptFilenameEntry *sfe;
+
+ sfe = FILENAME_TO_SFE(filename);
+ ASSERT_VALID_SFE(sfe);
+ sfe->mark = JS_TRUE;
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_script_filename_marker(JSHashEntry *he, intN i, void *arg)
+{
+ ScriptFilenameEntry *sfe = (ScriptFilenameEntry *) he;
+
+ sfe->mark = JS_TRUE;
+ return HT_ENUMERATE_NEXT;
+}
+
+void
+js_MarkScriptFilenames(JSRuntime *rt, JSBool keepAtoms)
+{
+ JSCList *head, *link;
+ ScriptFilenamePrefix *sfp;
+
+ if (!rt->scriptFilenameTable)
+ return;
+
+ if (keepAtoms) {
+ JS_HashTableEnumerateEntries(rt->scriptFilenameTable,
+ js_script_filename_marker,
+ rt);
+ }
+ for (head = &rt->scriptFilenamePrefixes, link = head->next;
+ link != head;
+ link = link->next) {
+ sfp = (ScriptFilenamePrefix *) link;
+ js_MarkScriptFilename(sfp->name);
+ }
+}
+
+JS_STATIC_DLL_CALLBACK(intN)
+js_script_filename_sweeper(JSHashEntry *he, intN i, void *arg)
+{
+ ScriptFilenameEntry *sfe = (ScriptFilenameEntry *) he;
+
+ if (!sfe->mark)
+ return HT_ENUMERATE_REMOVE;
+ sfe->mark = JS_FALSE;
+ return HT_ENUMERATE_NEXT;
+}
+
+void
+js_SweepScriptFilenames(JSRuntime *rt)
+{
+ if (!rt->scriptFilenameTable)
+ return;
+
+ JS_HashTableEnumerateEntries(rt->scriptFilenameTable,
+ js_script_filename_sweeper,
+ rt);
+#ifdef DEBUG_notme
+#ifdef DEBUG_SFTBL
+ printf("script filename table savings so far: %u\n", sftbl_savings);
+#endif
+#endif
+}
+
+JSScript *
+js_NewScript(JSContext *cx, uint32 length, uint32 nsrcnotes, uint32 ntrynotes)
+{
+ JSScript *script;
+
+ /* Round up source note count to align script->trynotes for its type. */
+ if (ntrynotes)
+ nsrcnotes += JSTRYNOTE_ALIGNMASK;
+ script = (JSScript *) JS_malloc(cx,
+ sizeof(JSScript) +
+ length * sizeof(jsbytecode) +
+ nsrcnotes * sizeof(jssrcnote) +
+ ntrynotes * sizeof(JSTryNote));
+ if (!script)
+ return NULL;
+ memset(script, 0, sizeof(JSScript));
+ script->code = script->main = (jsbytecode *)(script + 1);
+ script->length = length;
+ script->version = cx->version;
+ if (ntrynotes) {
+ script->trynotes = (JSTryNote *)
+ ((jsword)(SCRIPT_NOTES(script) + nsrcnotes) &
+ ~(jsword)JSTRYNOTE_ALIGNMASK);
+ memset(script->trynotes, 0, ntrynotes * sizeof(JSTryNote));
+ }
+ return script;
+}
+
+JS_FRIEND_API(JSScript *)
+js_NewScriptFromCG(JSContext *cx, JSCodeGenerator *cg, JSFunction *fun)
+{
+ uint32 mainLength, prologLength, nsrcnotes, ntrynotes;
+ JSScript *script;
+ const char *filename;
+
+ mainLength = CG_OFFSET(cg);
+ prologLength = CG_PROLOG_OFFSET(cg);
+ CG_COUNT_FINAL_SRCNOTES(cg, nsrcnotes);
+ CG_COUNT_FINAL_TRYNOTES(cg, ntrynotes);
+ script = js_NewScript(cx, prologLength + mainLength, nsrcnotes, ntrynotes);
+ if (!script)
+ return NULL;
+
+ /* Now that we have script, error control flow must go to label bad. */
+ script->main += prologLength;
+ memcpy(script->code, CG_PROLOG_BASE(cg), prologLength * sizeof(jsbytecode));
+ memcpy(script->main, CG_BASE(cg), mainLength * sizeof(jsbytecode));
+ script->numGlobalVars = cg->treeContext.numGlobalVars;
+ if (!js_InitAtomMap(cx, &script->atomMap, &cg->atomList))
+ goto bad;
+
+ filename = cg->filename;
+ if (filename) {
+ script->filename = js_SaveScriptFilename(cx, filename);
+ if (!script->filename)
+ goto bad;
+ }
+ script->lineno = cg->firstLine;
+ script->depth = cg->maxStackDepth;
+ if (cg->principals) {
+ script->principals = cg->principals;
+ JSPRINCIPALS_HOLD(cx, script->principals);
+ }
+
+ if (!js_FinishTakingSrcNotes(cx, cg, SCRIPT_NOTES(script)))
+ goto bad;
+ if (script->trynotes)
+ js_FinishTakingTryNotes(cx, cg, script->trynotes);
+
+ /*
+ * We initialize fun->u.script to be the script constructed above
+ * so that the debugger has a valid FUN_SCRIPT(fun).
+ */
+ if (fun) {
+ JS_ASSERT(FUN_INTERPRETED(fun) && !FUN_SCRIPT(fun));
+ fun->u.i.script = script;
+ if (cg->treeContext.flags & TCF_FUN_HEAVYWEIGHT)
+ fun->flags |= JSFUN_HEAVYWEIGHT;
+ }
+
+ /* Tell the debugger about this compiled script. */
+ js_CallNewScriptHook(cx, script, fun);
+ return script;
+
+bad:
+ js_DestroyScript(cx, script);
+ return NULL;
+}
+
+JS_FRIEND_API(void)
+js_CallNewScriptHook(JSContext *cx, JSScript *script, JSFunction *fun)
+{
+ JSRuntime *rt;
+ JSNewScriptHook hook;
+
+ rt = cx->runtime;
+ hook = rt->newScriptHook;
+ if (hook) {
+ JS_KEEP_ATOMS(rt);
+ hook(cx, script->filename, script->lineno, script, fun,
+ rt->newScriptHookData);
+ JS_UNKEEP_ATOMS(rt);
+ }
+}
+
+JS_FRIEND_API(void)
+js_CallDestroyScriptHook(JSContext *cx, JSScript *script)
+{
+ JSRuntime *rt;
+ JSDestroyScriptHook hook;
+
+ rt = cx->runtime;
+ hook = rt->destroyScriptHook;
+ if (hook)
+ hook(cx, script, rt->destroyScriptHookData);
+}
+
+void
+js_DestroyScript(JSContext *cx, JSScript *script)
+{
+ js_CallDestroyScriptHook(cx, script);
+
+ JS_ClearScriptTraps(cx, script);
+ js_FreeAtomMap(cx, &script->atomMap);
+ if (script->principals)
+ JSPRINCIPALS_DROP(cx, script->principals);
+ if (JS_GSN_CACHE(cx).script == script)
+ JS_CLEAR_GSN_CACHE(cx);
+ JS_free(cx, script);
+}
+
+void
+js_MarkScript(JSContext *cx, JSScript *script)
+{
+ JSAtomMap *map;
+ uintN i, length;
+ JSAtom **vector;
+
+ map = &script->atomMap;
+ length = map->length;
+ vector = map->vector;
+ for (i = 0; i < length; i++)
+ GC_MARK_ATOM(cx, vector[i]);
+
+ if (script->filename)
+ js_MarkScriptFilename(script->filename);
+}
+
+typedef struct GSNCacheEntry {
+ JSDHashEntryHdr hdr;
+ jsbytecode *pc;
+ jssrcnote *sn;
+} GSNCacheEntry;
+
+#define GSN_CACHE_THRESHOLD 100
+
+jssrcnote *
+js_GetSrcNoteCached(JSContext *cx, JSScript *script, jsbytecode *pc)
+{
+ ptrdiff_t target, offset;
+ GSNCacheEntry *entry;
+ jssrcnote *sn, *result;
+ uintN nsrcnotes;
+
+
+ target = PTRDIFF(pc, script->code, jsbytecode);
+ if ((uint32)target >= script->length)
+ return NULL;
+
+ if (JS_GSN_CACHE(cx).script == script) {
+ JS_METER_GSN_CACHE(cx, hits);
+ entry = (GSNCacheEntry *)
+ JS_DHashTableOperate(&JS_GSN_CACHE(cx).table, pc,
+ JS_DHASH_LOOKUP);
+ return entry->sn;
+ }
+
+ JS_METER_GSN_CACHE(cx, misses);
+ offset = 0;
+ for (sn = SCRIPT_NOTES(script); ; sn = SN_NEXT(sn)) {
+ if (SN_IS_TERMINATOR(sn)) {
+ result = NULL;
+ break;
+ }
+ offset += SN_DELTA(sn);
+ if (offset == target && SN_IS_GETTABLE(sn)) {
+ result = sn;
+ break;
+ }
+ }
+
+ if (JS_GSN_CACHE(cx).script != script &&
+ script->length >= GSN_CACHE_THRESHOLD) {
+ JS_CLEAR_GSN_CACHE(cx);
+ nsrcnotes = 0;
+ for (sn = SCRIPT_NOTES(script); !SN_IS_TERMINATOR(sn);
+ sn = SN_NEXT(sn)) {
+ if (SN_IS_GETTABLE(sn))
+ ++nsrcnotes;
+ }
+ if (!JS_DHashTableInit(&JS_GSN_CACHE(cx).table, JS_DHashGetStubOps(),
+ NULL, sizeof(GSNCacheEntry), nsrcnotes)) {
+ JS_GSN_CACHE(cx).table.ops = NULL;
+ } else {
+ pc = script->code;
+ for (sn = SCRIPT_NOTES(script); !SN_IS_TERMINATOR(sn);
+ sn = SN_NEXT(sn)) {
+ pc += SN_DELTA(sn);
+ if (SN_IS_GETTABLE(sn)) {
+ entry = (GSNCacheEntry *)
+ JS_DHashTableOperate(&JS_GSN_CACHE(cx).table, pc,
+ JS_DHASH_ADD);
+ entry->pc = pc;
+ entry->sn = sn;
+ }
+ }
+ JS_GSN_CACHE(cx).script = script;
+ JS_METER_GSN_CACHE(cx, fills);
+ }
+ }
+
+ return result;
+}
+
+uintN
+js_PCToLineNumber(JSContext *cx, JSScript *script, jsbytecode *pc)
+{
+ JSAtom *atom;
+ JSFunction *fun;
+ uintN lineno;
+ ptrdiff_t offset, target;
+ jssrcnote *sn;
+ JSSrcNoteType type;
+
+ /* Cope with JSStackFrame.pc value prior to entering js_Interpret. */
+ if (!pc)
+ return 0;
+
+ /*
+ * Special case: function definition needs no line number note because
+ * the function's script contains its starting line number.
+ */
+ if (*pc == JSOP_DEFFUN ||
+ (*pc == JSOP_LITOPX && pc[1 + LITERAL_INDEX_LEN] == JSOP_DEFFUN)) {
+ atom = js_GetAtom(cx, &script->atomMap,
+ (*pc == JSOP_DEFFUN)
+ ? GET_ATOM_INDEX(pc)
+ : GET_LITERAL_INDEX(pc));
+ fun = (JSFunction *) JS_GetPrivate(cx, ATOM_TO_OBJECT(atom));
+ JS_ASSERT(FUN_INTERPRETED(fun));
+ return fun->u.i.script->lineno;
+ }
+
+ /*
+ * General case: walk through source notes accumulating their deltas,
+ * keeping track of line-number notes, until we pass the note for pc's
+ * offset within script->code.
+ */
+ lineno = script->lineno;
+ offset = 0;
+ target = PTRDIFF(pc, script->code, jsbytecode);
+ for (sn = SCRIPT_NOTES(script); !SN_IS_TERMINATOR(sn); sn = SN_NEXT(sn)) {
+ offset += SN_DELTA(sn);
+ type = (JSSrcNoteType) SN_TYPE(sn);
+ if (type == SRC_SETLINE) {
+ if (offset <= target)
+ lineno = (uintN) js_GetSrcNoteOffset(sn, 0);
+ } else if (type == SRC_NEWLINE) {
+ if (offset <= target)
+ lineno++;
+ }
+ if (offset > target)
+ break;
+ }
+ return lineno;
+}
+
+/* The line number limit is the same as the jssrcnote offset limit. */
+#define SN_LINE_LIMIT (SN_3BYTE_OFFSET_FLAG << 16)
+
+jsbytecode *
+js_LineNumberToPC(JSScript *script, uintN target)
+{
+ ptrdiff_t offset, best;
+ uintN lineno, bestdiff, diff;
+ jssrcnote *sn;
+ JSSrcNoteType type;
+
+ offset = 0;
+ best = -1;
+ lineno = script->lineno;
+ bestdiff = SN_LINE_LIMIT;
+ for (sn = SCRIPT_NOTES(script); !SN_IS_TERMINATOR(sn); sn = SN_NEXT(sn)) {
+ if (lineno == target)
+ goto out;
+ if (lineno > target) {
+ diff = lineno - target;
+ if (diff < bestdiff) {
+ bestdiff = diff;
+ best = offset;
+ }
+ }
+ offset += SN_DELTA(sn);
+ type = (JSSrcNoteType) SN_TYPE(sn);
+ if (type == SRC_SETLINE) {
+ lineno = (uintN) js_GetSrcNoteOffset(sn, 0);
+ } else if (type == SRC_NEWLINE) {
+ lineno++;
+ }
+ }
+ if (best >= 0)
+ offset = best;
+out:
+ return script->code + offset;
+}
+
+JS_FRIEND_API(uintN)
+js_GetScriptLineExtent(JSScript *script)
+{
+ uintN lineno;
+ jssrcnote *sn;
+ JSSrcNoteType type;
+
+ lineno = script->lineno;
+ for (sn = SCRIPT_NOTES(script); !SN_IS_TERMINATOR(sn); sn = SN_NEXT(sn)) {
+ type = (JSSrcNoteType) SN_TYPE(sn);
+ if (type == SRC_SETLINE) {
+ lineno = (uintN) js_GetSrcNoteOffset(sn, 0);
+ } else if (type == SRC_NEWLINE) {
+ lineno++;
+ }
+ }
+ return 1 + lineno - script->lineno;
+}
+
+#if JS_HAS_GENERATORS
+
+jsbytecode *
+js_FindFinallyHandler(JSScript *script, jsbytecode *pc)
+{
+ JSTryNote *tn;
+ ptrdiff_t off;
+ JSOp op2;
+
+ tn = script->trynotes;
+ if (!tn)
+ return NULL;
+
+ off = pc - script->main;
+ if (off < 0)
+ return NULL;
+
+ JS_ASSERT(tn->catchStart != 0);
+ do {
+ if ((jsuword)(off - tn->start) < (jsuword)tn->length) {
+ /*
+ * We have a handler: is it the finally one, or a catch handler?
+ *
+ * Catch bytecode begins with: JSOP_SETSP JSOP_ENTERBLOCK
+ * Finally bytecode begins with: JSOP_SETSP JSOP_(GOSUB|EXCEPTION)
+ */
+ pc = script->main + tn->catchStart;
+ JS_ASSERT(*pc == JSOP_SETSP);
+ op2 = pc[JSOP_SETSP_LENGTH];
+ if (op2 != JSOP_ENTERBLOCK) {
+ JS_ASSERT(op2 == JSOP_GOSUB || op2 == JSOP_EXCEPTION);
+ return pc;
+ }
+ }
+ } while ((++tn)->catchStart != 0);
+ return NULL;
+}
+
+#endif
diff --git a/src/third_party/js-1.7/jsscript.h b/src/third_party/js-1.7/jsscript.h
new file mode 100644
index 00000000000..18ad373d007
--- /dev/null
+++ b/src/third_party/js-1.7/jsscript.h
@@ -0,0 +1,225 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsscript_h___
+#define jsscript_h___
+/*
+ * JS script descriptor.
+ */
+#include "jsatom.h"
+#include "jsprvtd.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * Exception handling runtime information.
+ *
+ * All fields except length are code offsets relative to the main entry point
+ * of the script. If script->trynotes is not null, it points to a vector of
+ * these structs terminated by one with catchStart == 0.
+ */
+struct JSTryNote {
+ ptrdiff_t start; /* start of try statement */
+ ptrdiff_t length; /* count of try statement bytecodes */
+ ptrdiff_t catchStart; /* start of catch block (0 if end) */
+};
+
+#define JSTRYNOTE_GRAIN sizeof(ptrdiff_t)
+#define JSTRYNOTE_ALIGNMASK (JSTRYNOTE_GRAIN - 1)
+
+struct JSScript {
+ jsbytecode *code; /* bytecodes and their immediate operands */
+ uint32 length; /* length of code vector */
+ jsbytecode *main; /* main entry point, after predef'ing prolog */
+ uint16 version; /* JS version under which script was compiled */
+ uint16 numGlobalVars; /* declared global var/const/function count */
+ JSAtomMap atomMap; /* maps immediate index to literal struct */
+ const char *filename; /* source filename or null */
+ uintN lineno; /* base line number of script */
+ uintN depth; /* maximum stack depth in slots */
+ JSTryNote *trynotes; /* exception table for this script */
+ JSPrincipals *principals; /* principals for this script */
+ JSObject *object; /* optional Script-class object wrapper */
+};
+
+/* No need to store script->notes now that it is allocated right after code. */
+#define SCRIPT_NOTES(script) ((jssrcnote*)((script)->code+(script)->length))
+
+#define SCRIPT_FIND_CATCH_START(script, pc, catchpc) \
+ JS_BEGIN_MACRO \
+ JSTryNote *tn_ = (script)->trynotes; \
+ jsbytecode *catchpc_ = NULL; \
+ if (tn_) { \
+ ptrdiff_t off_ = PTRDIFF(pc, (script)->main, jsbytecode); \
+ if (off_ >= 0) { \
+ while ((jsuword)(off_ - tn_->start) >= (jsuword)tn_->length) \
+ ++tn_; \
+ if (tn_->catchStart) \
+ catchpc_ = (script)->main + tn_->catchStart; \
+ } \
+ } \
+ catchpc = catchpc_; \
+ JS_END_MACRO
+
+/*
+ * Find the innermost finally block that handles the given pc. This is a
+ * version of SCRIPT_FIND_CATCH_START that ignore catch blocks and is used
+ * to implement generator.close().
+ */
+jsbytecode *
+js_FindFinallyHandler(JSScript *script, jsbytecode *pc);
+
+extern JS_FRIEND_DATA(JSClass) js_ScriptClass;
+
+extern JSObject *
+js_InitScriptClass(JSContext *cx, JSObject *obj);
+
+/*
+ * On first new context in rt, initialize script runtime state, specifically
+ * the script filename table and its lock.
+ */
+extern JSBool
+js_InitRuntimeScriptState(JSRuntime *rt);
+
+/*
+ * On last context destroy for rt, if script filenames are all GC'd, free the
+ * script filename table and its lock.
+ */
+extern void
+js_FinishRuntimeScriptState(JSRuntime *rt);
+
+/*
+ * On JS_DestroyRuntime(rt), forcibly free script filename prefixes and any
+ * script filename table entries that have not been GC'd, the latter using
+ * js_FinishRuntimeScriptState.
+ *
+ * This allows script filename prefixes to outlive any context in rt.
+ */
+extern void
+js_FreeRuntimeScriptState(JSRuntime *rt);
+
+extern const char *
+js_SaveScriptFilename(JSContext *cx, const char *filename);
+
+extern const char *
+js_SaveScriptFilenameRT(JSRuntime *rt, const char *filename, uint32 flags);
+
+extern uint32
+js_GetScriptFilenameFlags(const char *filename);
+
+extern void
+js_MarkScriptFilename(const char *filename);
+
+extern void
+js_MarkScriptFilenames(JSRuntime *rt, JSBool keepAtoms);
+
+extern void
+js_SweepScriptFilenames(JSRuntime *rt);
+
+/*
+ * Two successively less primitive ways to make a new JSScript. The first
+ * does *not* call a non-null cx->runtime->newScriptHook -- only the second,
+ * js_NewScriptFromCG, calls this optional debugger hook.
+ *
+ * The js_NewScript function can't know whether the script it creates belongs
+ * to a function, or is top-level or eval code, but the debugger wants access
+ * to the newly made script's function, if any -- so callers of js_NewScript
+ * are responsible for notifying the debugger after successfully creating any
+ * kind (function or other) of new JSScript.
+ */
+extern JSScript *
+js_NewScript(JSContext *cx, uint32 length, uint32 snlength, uint32 tnlength);
+
+extern JS_FRIEND_API(JSScript *)
+js_NewScriptFromCG(JSContext *cx, JSCodeGenerator *cg, JSFunction *fun);
+
+/*
+ * New-script-hook calling is factored from js_NewScriptFromCG so that it
+ * and callers of js_XDRScript can share this code. In the case of callers
+ * of js_XDRScript, the hook should be invoked only after successful decode
+ * of any owning function (the fun parameter) or script object (null fun).
+ */
+extern JS_FRIEND_API(void)
+js_CallNewScriptHook(JSContext *cx, JSScript *script, JSFunction *fun);
+
+extern JS_FRIEND_API(void)
+js_CallDestroyScriptHook(JSContext *cx, JSScript *script);
+
+extern void
+js_DestroyScript(JSContext *cx, JSScript *script);
+
+extern void
+js_MarkScript(JSContext *cx, JSScript *script);
+
+/*
+ * To perturb as little code as possible, we introduce a js_GetSrcNote lookup
+ * cache without adding an explicit cx parameter. Thus js_GetSrcNote becomes
+ * a macro that uses cx from its calls' lexical environments.
+ */
+#define js_GetSrcNote(script,pc) js_GetSrcNoteCached(cx, script, pc)
+
+extern jssrcnote *
+js_GetSrcNoteCached(JSContext *cx, JSScript *script, jsbytecode *pc);
+
+/* XXX need cx to lock function objects declared by prolog bytecodes. */
+extern uintN
+js_PCToLineNumber(JSContext *cx, JSScript *script, jsbytecode *pc);
+
+extern jsbytecode *
+js_LineNumberToPC(JSScript *script, uintN lineno);
+
+extern JS_FRIEND_API(uintN)
+js_GetScriptLineExtent(JSScript *script);
+
+/*
+ * If magic is non-null, js_XDRScript succeeds on magic number mismatch but
+ * returns false in *magic; it reflects a match via a true *magic out param.
+ * If magic is null, js_XDRScript returns false on bad magic number errors,
+ * which it reports.
+ *
+ * NB: callers must call js_CallNewScriptHook after successful JSXDR_DECODE
+ * and subsequent set-up of owning function or script object, if any.
+ */
+extern JSBool
+js_XDRScript(JSXDRState *xdr, JSScript **scriptp, JSBool *magic);
+
+JS_END_EXTERN_C
+
+#endif /* jsscript_h___ */
diff --git a/src/third_party/js-1.7/jsshell.msg b/src/third_party/js-1.7/jsshell.msg
new file mode 100644
index 00000000000..4b811ac01da
--- /dev/null
+++ b/src/third_party/js-1.7/jsshell.msg
@@ -0,0 +1,50 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ Error messages for JSShell. See js.msg for format.
+*/
+
+MSG_DEF(JSSMSG_NOT_AN_ERROR, 0, 0, JSEXN_NONE, "<Error #0 is reserved>")
+MSG_DEF(JSSMSG_CANT_OPEN, 1, 2, JSEXN_NONE, "can't open {0}: {1}")
+MSG_DEF(JSSMSG_TRAP_USAGE, 2, 0, JSEXN_NONE, "usage: trap [fun] [pc] expr")
+MSG_DEF(JSSMSG_LINE2PC_USAGE, 3, 0, JSEXN_NONE, "usage: line2pc [fun] line")
+MSG_DEF(JSSMSG_FILE_SCRIPTS_ONLY, 4, 0, JSEXN_NONE, "only works on JS scripts read from files")
+MSG_DEF(JSSMSG_UNEXPECTED_EOF, 5, 1, JSEXN_NONE, "unexpected EOF in {0}")
+MSG_DEF(JSSMSG_DOEXP_USAGE, 6, 0, JSEXN_NONE, "usage: doexp obj id")
diff --git a/src/third_party/js-1.7/jsstddef.h b/src/third_party/js-1.7/jsstddef.h
new file mode 100644
index 00000000000..addaa88ff6f
--- /dev/null
+++ b/src/third_party/js-1.7/jsstddef.h
@@ -0,0 +1,83 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * stddef inclusion here to first declare ptrdif as a signed long instead of a
+ * signed int.
+ */
+
+#ifdef _WINDOWS
+# ifndef XP_WIN
+# define XP_WIN
+# endif
+#if defined(_WIN32) || defined(WIN32)
+# ifndef XP_WIN32
+# define XP_WIN32
+# endif
+#else
+# ifndef XP_WIN16
+# define XP_WIN16
+# endif
+#endif
+#endif
+
+#ifdef XP_WIN16
+#ifndef _PTRDIFF_T_DEFINED
+typedef long ptrdiff_t;
+
+/*
+ * The Win16 compiler treats pointer differences as 16-bit signed values.
+ * This macro allows us to treat them as 17-bit signed values, stored in
+ * a 32-bit type.
+ */
+#define PTRDIFF(p1, p2, type) \
+ ((((unsigned long)(p1)) - ((unsigned long)(p2))) / sizeof(type))
+
+#define _PTRDIFF_T_DEFINED
+#endif /*_PTRDIFF_T_DEFINED*/
+#else /*WIN16*/
+
+#define PTRDIFF(p1, p2, type) \
+ ((p1) - (p2))
+
+#endif
+
+#include <stddef.h>
+
+
diff --git a/src/third_party/js-1.7/jsstr.c b/src/third_party/js-1.7/jsstr.c
new file mode 100644
index 00000000000..e38f6525dc7
--- /dev/null
+++ b/src/third_party/js-1.7/jsstr.c
@@ -0,0 +1,4818 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=80:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * JS string type implementation.
+ *
+ * In order to avoid unnecessary js_LockGCThing/js_UnlockGCThing calls, these
+ * native methods store strings (possibly newborn) converted from their 'this'
+ * parameter and arguments on the stack: 'this' conversions at argv[-1], arg
+ * conversions at their index (argv[0], argv[1]). This is a legitimate method
+ * of rooting things that might lose their newborn root due to subsequent GC
+ * allocations in the same native method.
+ */
+#include "jsstddef.h"
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jshash.h" /* Added by JSIFY */
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsconfig.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsregexp.h"
+#include "jsstr.h"
+
+#define JSSTRDEP_RECURSION_LIMIT 100
+
+size_t
+js_MinimizeDependentStrings(JSString *str, int level, JSString **basep)
+{
+ JSString *base;
+ size_t start, length;
+
+ JS_ASSERT(JSSTRING_IS_DEPENDENT(str));
+ base = JSSTRDEP_BASE(str);
+ start = JSSTRDEP_START(str);
+ if (JSSTRING_IS_DEPENDENT(base)) {
+ if (level < JSSTRDEP_RECURSION_LIMIT) {
+ start += js_MinimizeDependentStrings(base, level + 1, &base);
+ } else {
+ do {
+ start += JSSTRDEP_START(base);
+ base = JSSTRDEP_BASE(base);
+ } while (JSSTRING_IS_DEPENDENT(base));
+ }
+ if (start == 0) {
+ JS_ASSERT(JSSTRING_IS_PREFIX(str));
+ JSPREFIX_SET_BASE(str, base);
+ } else if (start <= JSSTRDEP_START_MASK) {
+ length = JSSTRDEP_LENGTH(str);
+ JSSTRDEP_SET_START_AND_LENGTH(str, start, length);
+ JSSTRDEP_SET_BASE(str, base);
+ }
+ }
+ *basep = base;
+ return start;
+}
+
+jschar *
+js_GetDependentStringChars(JSString *str)
+{
+ size_t start;
+ JSString *base;
+
+ start = js_MinimizeDependentStrings(str, 0, &base);
+ JS_ASSERT(!JSSTRING_IS_DEPENDENT(base));
+ JS_ASSERT(start < base->length);
+ return base->chars + start;
+}
+
+jschar *
+js_GetStringChars(JSString *str)
+{
+ if (JSSTRING_IS_DEPENDENT(str) && !js_UndependString(NULL, str))
+ return NULL;
+
+ *js_GetGCThingFlags(str) &= ~GCF_MUTABLE;
+ return str->chars;
+}
+
+JSString *
+js_ConcatStrings(JSContext *cx, JSString *left, JSString *right)
+{
+ size_t rn, ln, lrdist, n;
+ jschar *rs, *ls, *s;
+ JSDependentString *ldep; /* non-null if left should become dependent */
+ JSString *str;
+
+ if (JSSTRING_IS_DEPENDENT(right)) {
+ rn = JSSTRDEP_LENGTH(right);
+ rs = JSSTRDEP_CHARS(right);
+ } else {
+ rn = right->length;
+ rs = right->chars;
+ }
+ if (rn == 0)
+ return left;
+
+ if (JSSTRING_IS_DEPENDENT(left) ||
+ !(*js_GetGCThingFlags(left) & GCF_MUTABLE)) {
+ /* We must copy if left does not own a buffer to realloc. */
+ ln = JSSTRING_LENGTH(left);
+ if (ln == 0)
+ return right;
+ ls = JSSTRING_CHARS(left);
+ s = (jschar *) JS_malloc(cx, (ln + rn + 1) * sizeof(jschar));
+ if (!s)
+ return NULL;
+ js_strncpy(s, ls, ln);
+ ldep = NULL;
+ } else {
+ /* We can realloc left's space and make it depend on our result. */
+ ln = left->length;
+ if (ln == 0)
+ return right;
+ ls = left->chars;
+ s = (jschar *) JS_realloc(cx, ls, (ln + rn + 1) * sizeof(jschar));
+ if (!s)
+ return NULL;
+
+ /* Take care: right could depend on left! */
+ lrdist = (size_t)(rs - ls);
+ if (lrdist < ln)
+ rs = s + lrdist;
+ left->chars = ls = s;
+ ldep = JSSTRDEP(left);
+ }
+
+ js_strncpy(s + ln, rs, rn);
+ n = ln + rn;
+ s[n] = 0;
+ str = js_NewString(cx, s, n, GCF_MUTABLE);
+ if (!str) {
+ /* Out of memory: clean up any space we (re-)allocated. */
+ if (!ldep) {
+ JS_free(cx, s);
+ } else {
+ s = JS_realloc(cx, ls, (ln + 1) * sizeof(jschar));
+ if (s)
+ left->chars = s;
+ }
+ } else {
+ /* Morph left into a dependent prefix if we realloc'd its buffer. */
+ if (ldep) {
+ JSPREFIX_SET_LENGTH(ldep, ln);
+ JSPREFIX_SET_BASE(ldep, str);
+#ifdef DEBUG
+ {
+ JSRuntime *rt = cx->runtime;
+ JS_RUNTIME_METER(rt, liveDependentStrings);
+ JS_RUNTIME_METER(rt, totalDependentStrings);
+ JS_LOCK_RUNTIME_VOID(rt,
+ (rt->strdepLengthSum += (double)ln,
+ rt->strdepLengthSquaredSum += (double)ln * (double)ln));
+ }
+#endif
+ }
+ }
+
+ return str;
+}
+
+/*
+ * May be called with null cx by js_GetStringChars, above; and by the jslock.c
+ * MAKE_STRING_IMMUTABLE file-local macro.
+ */
+const jschar *
+js_UndependString(JSContext *cx, JSString *str)
+{
+ size_t n, size;
+ jschar *s;
+
+ if (JSSTRING_IS_DEPENDENT(str)) {
+ n = JSSTRDEP_LENGTH(str);
+ size = (n + 1) * sizeof(jschar);
+ s = (jschar *) (cx ? JS_malloc(cx, size) : malloc(size));
+ if (!s)
+ return NULL;
+
+ js_strncpy(s, JSSTRDEP_CHARS(str), n);
+ s[n] = 0;
+ str->length = n;
+ str->chars = s;
+
+#ifdef DEBUG
+ if (cx) {
+ JSRuntime *rt = cx->runtime;
+ JS_RUNTIME_UNMETER(rt, liveDependentStrings);
+ JS_RUNTIME_UNMETER(rt, totalDependentStrings);
+ JS_LOCK_RUNTIME_VOID(rt,
+ (rt->strdepLengthSum -= (double)n,
+ rt->strdepLengthSquaredSum -= (double)n * (double)n));
+ }
+#endif
+ }
+
+ return str->chars;
+}
+
+/*
+ * Forward declarations for URI encode/decode and helper routines
+ */
+static JSBool
+str_decodeURI(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+static JSBool
+str_decodeURI_Component(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+static JSBool
+str_encodeURI(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+static JSBool
+str_encodeURI_Component(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+static uint32
+Utf8ToOneUcs4Char(const uint8 *utf8Buffer, int utf8Length);
+
+/*
+ * Contributions from the String class to the set of methods defined for the
+ * global object. escape and unescape used to be defined in the Mocha library,
+ * but as ECMA decided to spec them, they've been moved to the core engine
+ * and made ECMA-compliant. (Incomplete escapes are interpreted as literal
+ * characters by unescape.)
+ */
+
+/*
+ * Stuff to emulate the old libmocha escape, which took a second argument
+ * giving the type of escape to perform. Retained for compatibility, and
+ * copied here to avoid reliance on net.h, mkparse.c/NET_EscapeBytes.
+ */
+
+#define URL_XALPHAS ((uint8) 1)
+#define URL_XPALPHAS ((uint8) 2)
+#define URL_PATH ((uint8) 4)
+
+static const uint8 urlCharType[256] =
+/* Bit 0 xalpha -- the alphas
+ * Bit 1 xpalpha -- as xalpha but
+ * converts spaces to plus and plus to %20
+ * Bit 2 ... path -- as xalphas but doesn't escape '/'
+ */
+ /* 0 1 2 3 4 5 6 7 8 9 A B C D E F */
+ { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x */
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 1x */
+ 0,0,0,0,0,0,0,0,0,0,7,4,0,7,7,4, /* 2x !"#$%&'()*+,-./ */
+ 7,7,7,7,7,7,7,7,7,7,0,0,0,0,0,0, /* 3x 0123456789:;<=>? */
+ 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, /* 4x @ABCDEFGHIJKLMNO */
+ 7,7,7,7,7,7,7,7,7,7,7,0,0,0,0,7, /* 5X PQRSTUVWXYZ[\]^_ */
+ 0,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, /* 6x `abcdefghijklmno */
+ 7,7,7,7,7,7,7,7,7,7,7,0,0,0,0,0, /* 7X pqrstuvwxyz{\}~ DEL */
+ 0, };
+
+/* This matches the ECMA escape set when mask is 7 (default.) */
+
+#define IS_OK(C, mask) (urlCharType[((uint8) (C))] & (mask))
+
+/* See ECMA-262 15.1.2.4. */
+JSBool
+js_str_escape(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ size_t i, ni, length, newlength;
+ const jschar *chars;
+ jschar *newchars;
+ jschar ch;
+ jsint mask;
+ jsdouble d;
+ const char digits[] = {'0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
+
+ mask = URL_XALPHAS | URL_XPALPHAS | URL_PATH;
+ if (argc > 1) {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+ if (!JSDOUBLE_IS_FINITE(d) ||
+ (mask = (jsint)d) != d ||
+ mask & ~(URL_XALPHAS | URL_XPALPHAS | URL_PATH))
+ {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%lx", (unsigned long) mask);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_STRING_MASK, numBuf);
+ return JS_FALSE;
+ }
+ }
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+
+ chars = JSSTRING_CHARS(str);
+ length = newlength = JSSTRING_LENGTH(str);
+
+ /* Take a first pass and see how big the result string will need to be. */
+ for (i = 0; i < length; i++) {
+ if ((ch = chars[i]) < 128 && IS_OK(ch, mask))
+ continue;
+ if (ch < 256) {
+ if (mask == URL_XPALPHAS && ch == ' ')
+ continue; /* The character will be encoded as '+' */
+ newlength += 2; /* The character will be encoded as %XX */
+ } else {
+ newlength += 5; /* The character will be encoded as %uXXXX */
+ }
+
+ /*
+ * This overflow test works because newlength is incremented by at
+ * most 5 on each iteration.
+ */
+ if (newlength < length) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ }
+
+ if (newlength >= ~(size_t)0 / sizeof(jschar)) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ newchars = (jschar *) JS_malloc(cx, (newlength + 1) * sizeof(jschar));
+ if (!newchars)
+ return JS_FALSE;
+ for (i = 0, ni = 0; i < length; i++) {
+ if ((ch = chars[i]) < 128 && IS_OK(ch, mask)) {
+ newchars[ni++] = ch;
+ } else if (ch < 256) {
+ if (mask == URL_XPALPHAS && ch == ' ') {
+ newchars[ni++] = '+'; /* convert spaces to pluses */
+ } else {
+ newchars[ni++] = '%';
+ newchars[ni++] = digits[ch >> 4];
+ newchars[ni++] = digits[ch & 0xF];
+ }
+ } else {
+ newchars[ni++] = '%';
+ newchars[ni++] = 'u';
+ newchars[ni++] = digits[ch >> 12];
+ newchars[ni++] = digits[(ch & 0xF00) >> 8];
+ newchars[ni++] = digits[(ch & 0xF0) >> 4];
+ newchars[ni++] = digits[ch & 0xF];
+ }
+ }
+ JS_ASSERT(ni == newlength);
+ newchars[newlength] = 0;
+
+ str = js_NewString(cx, newchars, newlength, 0);
+ if (!str) {
+ JS_free(cx, newchars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+#undef IS_OK
+
+/* See ECMA-262 15.1.2.5 */
+static JSBool
+str_unescape(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ size_t i, ni, length;
+ const jschar *chars;
+ jschar *newchars;
+ jschar ch;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+
+ chars = JSSTRING_CHARS(str);
+ length = JSSTRING_LENGTH(str);
+
+ /* Don't bother allocating less space for the new string. */
+ newchars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar));
+ if (!newchars)
+ return JS_FALSE;
+ ni = i = 0;
+ while (i < length) {
+ ch = chars[i++];
+ if (ch == '%') {
+ if (i + 1 < length &&
+ JS7_ISHEX(chars[i]) && JS7_ISHEX(chars[i + 1]))
+ {
+ ch = JS7_UNHEX(chars[i]) * 16 + JS7_UNHEX(chars[i + 1]);
+ i += 2;
+ } else if (i + 4 < length && chars[i] == 'u' &&
+ JS7_ISHEX(chars[i + 1]) && JS7_ISHEX(chars[i + 2]) &&
+ JS7_ISHEX(chars[i + 3]) && JS7_ISHEX(chars[i + 4]))
+ {
+ ch = (((((JS7_UNHEX(chars[i + 1]) << 4)
+ + JS7_UNHEX(chars[i + 2])) << 4)
+ + JS7_UNHEX(chars[i + 3])) << 4)
+ + JS7_UNHEX(chars[i + 4]);
+ i += 5;
+ }
+ }
+ newchars[ni++] = ch;
+ }
+ newchars[ni] = 0;
+
+ str = js_NewString(cx, newchars, ni, 0);
+ if (!str) {
+ JS_free(cx, newchars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+#if JS_HAS_UNEVAL
+static JSBool
+str_uneval(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+
+ str = js_ValueToSource(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+#endif
+
+const char js_escape_str[] = "escape";
+const char js_unescape_str[] = "unescape";
+#if JS_HAS_UNEVAL
+const char js_uneval_str[] = "uneval";
+#endif
+const char js_decodeURI_str[] = "decodeURI";
+const char js_encodeURI_str[] = "encodeURI";
+const char js_decodeURIComponent_str[] = "decodeURIComponent";
+const char js_encodeURIComponent_str[] = "encodeURIComponent";
+
+static JSFunctionSpec string_functions[] = {
+ {js_escape_str, js_str_escape, 1,0,0},
+ {js_unescape_str, str_unescape, 1,0,0},
+#if JS_HAS_UNEVAL
+ {js_uneval_str, str_uneval, 1,0,0},
+#endif
+ {js_decodeURI_str, str_decodeURI, 1,0,0},
+ {js_encodeURI_str, str_encodeURI, 1,0,0},
+ {js_decodeURIComponent_str, str_decodeURI_Component, 1,0,0},
+ {js_encodeURIComponent_str, str_encodeURI_Component, 1,0,0},
+
+ {0,0,0,0,0}
+};
+
+jschar js_empty_ucstr[] = {0};
+JSSubString js_EmptySubString = {0, js_empty_ucstr};
+
+enum string_tinyid {
+ STRING_LENGTH = -1
+};
+
+static JSPropertySpec string_props[] = {
+ {js_length_str, STRING_LENGTH,
+ JSPROP_READONLY|JSPROP_PERMANENT|JSPROP_SHARED, 0,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+str_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ jsval v;
+ JSString *str;
+ jsint slot;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+
+ slot = JSVAL_TO_INT(id);
+ if (slot == STRING_LENGTH) {
+ if (OBJ_GET_CLASS(cx, obj) == &js_StringClass) {
+ /* Follow ECMA-262 by fetching intrinsic length of our string. */
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ JS_ASSERT(JSVAL_IS_STRING(v));
+ str = JSVAL_TO_STRING(v);
+ } else {
+ /* Preserve compatibility: convert obj to a string primitive. */
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ }
+
+ *vp = INT_TO_JSVAL((jsint) JSSTRING_LENGTH(str));
+ }
+ return JS_TRUE;
+}
+
+#define STRING_ELEMENT_ATTRS (JSPROP_ENUMERATE|JSPROP_READONLY|JSPROP_PERMANENT)
+
+static JSBool
+str_enumerate(JSContext *cx, JSObject *obj)
+{
+ jsval v;
+ JSString *str, *str1;
+ size_t i, length;
+
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ JS_ASSERT(JSVAL_IS_STRING(v));
+ str = JSVAL_TO_STRING(v);
+
+ length = JSSTRING_LENGTH(str);
+ for (i = 0; i < length; i++) {
+ str1 = js_NewDependentString(cx, str, i, 1, 0);
+ if (!str1)
+ return JS_FALSE;
+ if (!OBJ_DEFINE_PROPERTY(cx, obj, INT_TO_JSID(i),
+ STRING_TO_JSVAL(str1), NULL, NULL,
+ STRING_ELEMENT_ATTRS, NULL)) {
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+str_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
+ JSObject **objp)
+{
+ jsval v;
+ JSString *str, *str1;
+ jsint slot;
+
+ if (!JSVAL_IS_INT(id) || (flags & JSRESOLVE_ASSIGNING))
+ return JS_TRUE;
+
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ JS_ASSERT(JSVAL_IS_STRING(v));
+ str = JSVAL_TO_STRING(v);
+
+ slot = JSVAL_TO_INT(id);
+ if ((size_t)slot < JSSTRING_LENGTH(str)) {
+ str1 = js_NewDependentString(cx, str, (size_t)slot, 1, 0);
+ if (!str1)
+ return JS_FALSE;
+ if (!OBJ_DEFINE_PROPERTY(cx, obj, INT_TO_JSID(slot),
+ STRING_TO_JSVAL(str1), NULL, NULL,
+ STRING_ELEMENT_ATTRS, NULL)) {
+ return JS_FALSE;
+ }
+ *objp = obj;
+ }
+ return JS_TRUE;
+}
+
+JSClass js_StringClass = {
+ js_String_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_String),
+ JS_PropertyStub, JS_PropertyStub, str_getProperty, JS_PropertyStub,
+ str_enumerate, (JSResolveOp)str_resolve, JS_ConvertStub, JS_FinalizeStub,
+ JSCLASS_NO_OPTIONAL_MEMBERS
+};
+
+#if JS_HAS_TOSOURCE
+
+/*
+ * String.prototype.quote is generic (as are most string methods), unlike
+ * toSource, toString, and valueOf.
+ */
+static JSBool
+str_quote(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ str = js_QuoteString(cx, str, '"');
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+str_toSource(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval v;
+ JSString *str;
+ size_t i, j, k, n;
+ char buf[16];
+ jschar *s, *t;
+
+ if (JSVAL_IS_STRING((jsval)obj)) {
+ v = (jsval)obj;
+ } else {
+ if (!JS_InstanceOf(cx, obj, &js_StringClass, argv))
+ return JS_FALSE;
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ if (!JSVAL_IS_STRING(v))
+ return js_obj_toSource(cx, obj, argc, argv, rval);
+ }
+ str = js_QuoteString(cx, JSVAL_TO_STRING(v), '"');
+ if (!str)
+ return JS_FALSE;
+ j = JS_snprintf(buf, sizeof buf, "(new %s(", js_StringClass.name);
+ s = JSSTRING_CHARS(str);
+ k = JSSTRING_LENGTH(str);
+ n = j + k + 2;
+ t = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar));
+ if (!t)
+ return JS_FALSE;
+ for (i = 0; i < j; i++)
+ t[i] = buf[i];
+ for (j = 0; j < k; i++, j++)
+ t[i] = s[j];
+ t[i++] = ')';
+ t[i++] = ')';
+ t[i] = 0;
+ str = js_NewString(cx, t, n, 0);
+ if (!str) {
+ JS_free(cx, t);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+#endif /* JS_HAS_TOSOURCE */
+
+static JSBool
+str_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval v;
+
+ if (JSVAL_IS_STRING((jsval)obj)) {
+ *rval = (jsval)obj;
+ return JS_TRUE;
+ }
+ if (!JS_InstanceOf(cx, obj, &js_StringClass, argv))
+ return JS_FALSE;
+ v = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ if (!JSVAL_IS_STRING(v))
+ return js_obj_toString(cx, obj, argc, argv, rval);
+ *rval = v;
+ return JS_TRUE;
+}
+
+static JSBool
+str_valueOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ if (JSVAL_IS_STRING((jsval)obj)) {
+ *rval = (jsval)obj;
+ return JS_TRUE;
+ }
+ if (!JS_InstanceOf(cx, obj, &js_StringClass, argv))
+ return JS_FALSE;
+ *rval = OBJ_GET_SLOT(cx, obj, JSSLOT_PRIVATE);
+ return JS_TRUE;
+}
+
+/*
+ * Java-like string native methods.
+ */
+static JSBool
+str_substring(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ jsdouble d;
+ jsdouble length, begin, end;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ if (argc != 0) {
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ length = JSSTRING_LENGTH(str);
+ begin = js_DoubleToInteger(d);
+ if (begin < 0)
+ begin = 0;
+ else if (begin > length)
+ begin = length;
+
+ if (argc == 1) {
+ end = length;
+ } else {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+ end = js_DoubleToInteger(d);
+ if (end < 0)
+ end = 0;
+ else if (end > length)
+ end = length;
+ if (end < begin) {
+ /* ECMA emulates old JDK1.0 java.lang.String.substring. */
+ jsdouble tmp = begin;
+ begin = end;
+ end = tmp;
+ }
+ }
+
+ str = js_NewDependentString(cx, str, (size_t)begin,
+ (size_t)(end - begin), 0);
+ if (!str)
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+str_toLowerCase(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ size_t i, n;
+ jschar *s, *news;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ n = JSSTRING_LENGTH(str);
+ news = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar));
+ if (!news)
+ return JS_FALSE;
+ s = JSSTRING_CHARS(str);
+ for (i = 0; i < n; i++)
+ news[i] = JS_TOLOWER(s[i]);
+ news[n] = 0;
+ str = js_NewString(cx, news, n, 0);
+ if (!str) {
+ JS_free(cx, news);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+str_toLocaleLowerCase(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ /*
+ * Forcefully ignore the first (or any) argument and return toLowerCase(),
+ * ECMA has reserved that argument, presumably for defining the locale.
+ */
+ if (cx->localeCallbacks && cx->localeCallbacks->localeToLowerCase) {
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+ return cx->localeCallbacks->localeToLowerCase(cx, str, rval);
+ }
+ return str_toLowerCase(cx, obj, 0, argv, rval);
+}
+
+static JSBool
+str_toUpperCase(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ size_t i, n;
+ jschar *s, *news;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ n = JSSTRING_LENGTH(str);
+ news = (jschar *) JS_malloc(cx, (n + 1) * sizeof(jschar));
+ if (!news)
+ return JS_FALSE;
+ s = JSSTRING_CHARS(str);
+ for (i = 0; i < n; i++)
+ news[i] = JS_TOUPPER(s[i]);
+ news[n] = 0;
+ str = js_NewString(cx, news, n, 0);
+ if (!str) {
+ JS_free(cx, news);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+str_toLocaleUpperCase(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ /*
+ * Forcefully ignore the first (or any) argument and return toUpperCase(),
+ * ECMA has reserved that argument, presumbaly for defining the locale.
+ */
+ if (cx->localeCallbacks && cx->localeCallbacks->localeToUpperCase) {
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+ return cx->localeCallbacks->localeToUpperCase(cx, str, rval);
+ }
+ return str_toUpperCase(cx, obj, 0, argv, rval);
+}
+
+static JSBool
+str_localeCompare(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str, *thatStr;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ if (argc == 0) {
+ *rval = JSVAL_ZERO;
+ } else {
+ thatStr = js_ValueToString(cx, argv[0]);
+ if (!thatStr)
+ return JS_FALSE;
+ if (cx->localeCallbacks && cx->localeCallbacks->localeCompare) {
+ argv[0] = STRING_TO_JSVAL(thatStr);
+ return cx->localeCallbacks->localeCompare(cx, str, thatStr, rval);
+ }
+ *rval = INT_TO_JSVAL(js_CompareStrings(str, thatStr));
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+str_charAt(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ jsdouble d;
+ size_t index;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ if (argc == 0) {
+ d = 0.0;
+ } else {
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ d = js_DoubleToInteger(d);
+ }
+
+ if (d < 0 || JSSTRING_LENGTH(str) <= d) {
+ *rval = JS_GetEmptyStringValue(cx);
+ } else {
+ index = (size_t)d;
+ str = js_NewDependentString(cx, str, index, 1, 0);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+str_charCodeAt(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+ jsdouble d;
+ size_t index;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ if (argc == 0) {
+ d = 0.0;
+ } else {
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ d = js_DoubleToInteger(d);
+ }
+
+ if (d < 0 || JSSTRING_LENGTH(str) <= d) {
+ *rval = JS_GetNaNValue(cx);
+ } else {
+ index = (size_t)d;
+ *rval = INT_TO_JSVAL((jsint) JSSTRING_CHARS(str)[index]);
+ }
+ return JS_TRUE;
+}
+
+jsint
+js_BoyerMooreHorspool(const jschar *text, jsint textlen,
+ const jschar *pat, jsint patlen,
+ jsint start)
+{
+ jsint i, j, k, m;
+ uint8 skip[BMH_CHARSET_SIZE];
+ jschar c;
+
+ JS_ASSERT(0 < patlen && patlen <= BMH_PATLEN_MAX);
+ for (i = 0; i < BMH_CHARSET_SIZE; i++)
+ skip[i] = (uint8)patlen;
+ m = patlen - 1;
+ for (i = 0; i < m; i++) {
+ c = pat[i];
+ if (c >= BMH_CHARSET_SIZE)
+ return BMH_BAD_PATTERN;
+ skip[c] = (uint8)(m - i);
+ }
+ for (k = start + m;
+ k < textlen;
+ k += ((c = text[k]) >= BMH_CHARSET_SIZE) ? patlen : skip[c]) {
+ for (i = k, j = m; ; i--, j--) {
+ if (j < 0)
+ return i + 1;
+ if (text[i] != pat[j])
+ break;
+ }
+ }
+ return -1;
+}
+
+static JSBool
+str_indexOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str, *str2;
+ jsint i, j, index, textlen, patlen;
+ const jschar *text, *pat;
+ jsdouble d;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+ text = JSSTRING_CHARS(str);
+ textlen = (jsint) JSSTRING_LENGTH(str);
+
+ str2 = js_ValueToString(cx, argv[0]);
+ if (!str2)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str2);
+ pat = JSSTRING_CHARS(str2);
+ patlen = (jsint) JSSTRING_LENGTH(str2);
+
+ if (argc > 1) {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+ d = js_DoubleToInteger(d);
+ if (d < 0)
+ i = 0;
+ else if (d > textlen)
+ i = textlen;
+ else
+ i = (jsint)d;
+ } else {
+ i = 0;
+ }
+ if (patlen == 0) {
+ *rval = INT_TO_JSVAL(i);
+ return JS_TRUE;
+ }
+
+ /* XXX tune the BMH threshold (512) */
+ if ((jsuint)(patlen - 2) <= BMH_PATLEN_MAX - 2 && textlen >= 512) {
+ index = js_BoyerMooreHorspool(text, textlen, pat, patlen, i);
+ if (index != BMH_BAD_PATTERN)
+ goto out;
+ }
+
+ index = -1;
+ j = 0;
+ while (i + j < textlen) {
+ if (text[i + j] == pat[j]) {
+ if (++j == patlen) {
+ index = i;
+ break;
+ }
+ } else {
+ i++;
+ j = 0;
+ }
+ }
+
+out:
+ *rval = INT_TO_JSVAL(index);
+ return JS_TRUE;
+}
+
+static JSBool
+str_lastIndexOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str, *str2;
+ const jschar *text, *pat;
+ jsint i, j, textlen, patlen;
+ jsdouble d;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+ text = JSSTRING_CHARS(str);
+ textlen = (jsint) JSSTRING_LENGTH(str);
+
+ str2 = js_ValueToString(cx, argv[0]);
+ if (!str2)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str2);
+ pat = JSSTRING_CHARS(str2);
+ patlen = (jsint) JSSTRING_LENGTH(str2);
+
+ if (argc > 1) {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+ if (JSDOUBLE_IS_NaN(d)) {
+ i = textlen;
+ } else {
+ d = js_DoubleToInteger(d);
+ if (d < 0)
+ i = 0;
+ else if (d > textlen)
+ i = textlen;
+ else
+ i = (jsint)d;
+ }
+ } else {
+ i = textlen;
+ }
+
+ if (patlen == 0) {
+ *rval = INT_TO_JSVAL(i);
+ return JS_TRUE;
+ }
+
+ j = 0;
+ while (i >= 0) {
+ /* Don't assume that text is NUL-terminated: it could be dependent. */
+ if (i + j < textlen && text[i + j] == pat[j]) {
+ if (++j == patlen)
+ break;
+ } else {
+ i--;
+ j = 0;
+ }
+ }
+ *rval = INT_TO_JSVAL(i);
+ return JS_TRUE;
+}
+
+/*
+ * Perl-inspired string functions.
+ */
+typedef struct GlobData {
+ uintN flags; /* inout: mode and flag bits, see below */
+ uintN optarg; /* in: index of optional flags argument */
+ JSString *str; /* out: 'this' parameter object as string */
+ JSRegExp *regexp; /* out: regexp parameter object private data */
+} GlobData;
+
+/*
+ * Mode and flag bit definitions for match_or_replace's GlobData.flags field.
+ */
+#define MODE_MATCH 0x00 /* in: return match array on success */
+#define MODE_REPLACE 0x01 /* in: match and replace */
+#define MODE_SEARCH 0x02 /* in: search only, return match index or -1 */
+#define GET_MODE(f) ((f) & 0x03)
+#define FORCE_FLAT 0x04 /* in: force flat (non-regexp) string match */
+#define KEEP_REGEXP 0x08 /* inout: keep GlobData.regexp alive for caller
+ of match_or_replace; if set on input
+ but clear on output, regexp ownership
+ does not pass to caller */
+#define GLOBAL_REGEXP 0x10 /* out: regexp had the 'g' flag */
+
+static JSBool
+match_or_replace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ JSBool (*glob)(JSContext *cx, jsint count, GlobData *data),
+ GlobData *data, jsval *rval)
+{
+ JSString *str, *src, *opt;
+ JSObject *reobj;
+ JSRegExp *re;
+ size_t index, length;
+ JSBool ok, test;
+ jsint count;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+ data->str = str;
+
+ if (JSVAL_IS_REGEXP(cx, argv[0])) {
+ reobj = JSVAL_TO_OBJECT(argv[0]);
+ re = (JSRegExp *) JS_GetPrivate(cx, reobj);
+ } else {
+ src = js_ValueToString(cx, argv[0]);
+ if (!src)
+ return JS_FALSE;
+ if (data->optarg < argc) {
+ argv[0] = STRING_TO_JSVAL(src);
+ opt = js_ValueToString(cx, argv[data->optarg]);
+ if (!opt)
+ return JS_FALSE;
+ } else {
+ opt = NULL;
+ }
+ re = js_NewRegExpOpt(cx, NULL, src, opt,
+ (data->flags & FORCE_FLAT) != 0);
+ if (!re)
+ return JS_FALSE;
+ reobj = NULL;
+ }
+ /* From here on, all control flow must reach the matching DROP. */
+ data->regexp = re;
+ HOLD_REGEXP(cx, re);
+
+ if (re->flags & JSREG_GLOB)
+ data->flags |= GLOBAL_REGEXP;
+ index = 0;
+ if (GET_MODE(data->flags) == MODE_SEARCH) {
+ ok = js_ExecuteRegExp(cx, re, str, &index, JS_TRUE, rval);
+ if (ok) {
+ *rval = (*rval == JSVAL_TRUE)
+ ? INT_TO_JSVAL(cx->regExpStatics.leftContext.length)
+ : INT_TO_JSVAL(-1);
+ }
+ } else if (data->flags & GLOBAL_REGEXP) {
+ if (reobj) {
+ /* Set the lastIndex property's reserved slot to 0. */
+ ok = js_SetLastIndex(cx, reobj, 0);
+ } else {
+ ok = JS_TRUE;
+ }
+ if (ok) {
+ length = JSSTRING_LENGTH(str);
+ for (count = 0; index <= length; count++) {
+ ok = js_ExecuteRegExp(cx, re, str, &index, JS_TRUE, rval);
+ if (!ok || *rval != JSVAL_TRUE)
+ break;
+ ok = glob(cx, count, data);
+ if (!ok)
+ break;
+ if (cx->regExpStatics.lastMatch.length == 0) {
+ if (index == length)
+ break;
+ index++;
+ }
+ }
+ }
+ } else {
+ if (GET_MODE(data->flags) == MODE_REPLACE) {
+ test = JS_TRUE;
+ } else {
+ /*
+ * MODE_MATCH implies str_match is being called from a script or a
+ * scripted function. If the caller cares only about testing null
+ * vs. non-null return value, optimize away the array object that
+ * would normally be returned in *rval.
+ */
+ JSStackFrame *fp = cx->fp->down;
+
+ /* Skip Function.prototype.call and .apply frames. */
+ while (fp && !fp->pc) {
+ JS_ASSERT(!fp->script);
+ fp = fp->down;
+ }
+
+ /* Assume a full array result is required, then prove otherwise. */
+ test = JS_FALSE;
+ if (fp) {
+ JS_ASSERT(*fp->pc == JSOP_CALL || *fp->pc == JSOP_NEW);
+ JS_ASSERT(js_CodeSpec[*fp->pc].length == 3);
+ switch (fp->pc[3]) {
+ case JSOP_POP:
+ case JSOP_IFEQ:
+ case JSOP_IFNE:
+ case JSOP_IFEQX:
+ case JSOP_IFNEX:
+ test = JS_TRUE;
+ break;
+ default:;
+ }
+ }
+ }
+ ok = js_ExecuteRegExp(cx, re, str, &index, test, rval);
+ }
+
+ DROP_REGEXP(cx, re);
+ if (reobj) {
+ /* Tell our caller that it doesn't need to destroy data->regexp. */
+ data->flags &= ~KEEP_REGEXP;
+ } else if (!(data->flags & KEEP_REGEXP)) {
+ /* Caller didn't want to keep data->regexp, so null and destroy it. */
+ data->regexp = NULL;
+ js_DestroyRegExp(cx, re);
+ }
+
+ return ok;
+}
+
+typedef struct MatchData {
+ GlobData base;
+ jsval *arrayval; /* NB: local root pointer */
+} MatchData;
+
+static JSBool
+match_glob(JSContext *cx, jsint count, GlobData *data)
+{
+ MatchData *mdata;
+ JSObject *arrayobj;
+ JSSubString *matchsub;
+ JSString *matchstr;
+ jsval v;
+
+ mdata = (MatchData *)data;
+ arrayobj = JSVAL_TO_OBJECT(*mdata->arrayval);
+ if (!arrayobj) {
+ arrayobj = js_ConstructObject(cx, &js_ArrayClass, NULL, NULL, 0, NULL);
+ if (!arrayobj)
+ return JS_FALSE;
+ *mdata->arrayval = OBJECT_TO_JSVAL(arrayobj);
+ }
+ matchsub = &cx->regExpStatics.lastMatch;
+ matchstr = js_NewStringCopyN(cx, matchsub->chars, matchsub->length, 0);
+ if (!matchstr)
+ return JS_FALSE;
+ v = STRING_TO_JSVAL(matchstr);
+ return js_SetProperty(cx, arrayobj, INT_TO_JSID(count), &v);
+}
+
+static JSBool
+str_match(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ MatchData mdata;
+ JSBool ok;
+
+ mdata.base.flags = MODE_MATCH;
+ mdata.base.optarg = 1;
+ mdata.arrayval = &argv[2];
+ *mdata.arrayval = JSVAL_NULL;
+ ok = match_or_replace(cx, obj, argc, argv, match_glob, &mdata.base, rval);
+ if (ok && !JSVAL_IS_NULL(*mdata.arrayval))
+ *rval = *mdata.arrayval;
+ return ok;
+}
+
+static JSBool
+str_search(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ GlobData data;
+
+ data.flags = MODE_SEARCH;
+ data.optarg = 1;
+ return match_or_replace(cx, obj, argc, argv, NULL, &data, rval);
+}
+
+typedef struct ReplaceData {
+ GlobData base; /* base struct state */
+ JSObject *lambda; /* replacement function object or null */
+ JSString *repstr; /* replacement string */
+ jschar *dollar; /* null or pointer to first $ in repstr */
+ jschar *dollarEnd; /* limit pointer for js_strchr_limit */
+ jschar *chars; /* result chars, null initially */
+ size_t length; /* result length, 0 initially */
+ jsint index; /* index in result of next replacement */
+ jsint leftIndex; /* left context index in base.str->chars */
+ JSSubString dollarStr; /* for "$$" interpret_dollar result */
+} ReplaceData;
+
+static JSSubString *
+interpret_dollar(JSContext *cx, jschar *dp, jschar *ep, ReplaceData *rdata,
+ size_t *skip)
+{
+ JSRegExpStatics *res;
+ jschar dc, *cp;
+ uintN num, tmp;
+
+ JS_ASSERT(*dp == '$');
+
+ /* If there is only a dollar, bail now */
+ if (dp + 1 >= ep)
+ return NULL;
+
+ /* Interpret all Perl match-induced dollar variables. */
+ res = &cx->regExpStatics;
+ dc = dp[1];
+ if (JS7_ISDEC(dc)) {
+ /* ECMA-262 Edition 3: 1-9 or 01-99 */
+ num = JS7_UNDEC(dc);
+ if (num > res->parenCount)
+ return NULL;
+
+ cp = dp + 2;
+ if (cp < ep && (dc = *cp, JS7_ISDEC(dc))) {
+ tmp = 10 * num + JS7_UNDEC(dc);
+ if (tmp <= res->parenCount) {
+ cp++;
+ num = tmp;
+ }
+ }
+ if (num == 0)
+ return NULL;
+
+ /* Adjust num from 1 $n-origin to 0 array-index-origin. */
+ num--;
+ *skip = cp - dp;
+ return REGEXP_PAREN_SUBSTRING(res, num);
+ }
+
+ *skip = 2;
+ switch (dc) {
+ case '$':
+ rdata->dollarStr.chars = dp;
+ rdata->dollarStr.length = 1;
+ return &rdata->dollarStr;
+ case '&':
+ return &res->lastMatch;
+ case '+':
+ return &res->lastParen;
+ case '`':
+ return &res->leftContext;
+ case '\'':
+ return &res->rightContext;
+ }
+ return NULL;
+}
+
+static JSBool
+find_replen(JSContext *cx, ReplaceData *rdata, size_t *sizep)
+{
+ JSString *repstr;
+ size_t replen, skip;
+ jschar *dp, *ep;
+ JSSubString *sub;
+ JSObject *lambda;
+
+ lambda = rdata->lambda;
+ if (lambda) {
+ uintN argc, i, j, m, n, p;
+ jsval *sp, *oldsp, rval;
+ void *mark;
+ JSStackFrame *fp;
+ JSBool ok;
+
+ /*
+ * Save the regExpStatics from the current regexp, since they may be
+ * clobbered by a RegExp usage in the lambda function. Note that all
+ * members of JSRegExpStatics are JSSubStrings, so not GC roots, save
+ * input, which is rooted otherwise via argv[-1] in str_replace.
+ */
+ JSRegExpStatics save = cx->regExpStatics;
+ JSBool freeMoreParens = JS_FALSE;
+
+ /*
+ * In the lambda case, not only do we find the replacement string's
+ * length, we compute repstr and return it via rdata for use within
+ * do_replace. The lambda is called with arguments ($&, $1, $2, ...,
+ * index, input), i.e., all the properties of a regexp match array.
+ * For $&, etc., we must create string jsvals from cx->regExpStatics.
+ * We grab up stack space to keep the newborn strings GC-rooted.
+ */
+ p = rdata->base.regexp->parenCount;
+ argc = 1 + p + 2;
+ sp = js_AllocStack(cx, 2 + argc, &mark);
+ if (!sp)
+ return JS_FALSE;
+
+ /* Push lambda and its 'this' parameter. */
+ *sp++ = OBJECT_TO_JSVAL(lambda);
+ *sp++ = OBJECT_TO_JSVAL(OBJ_GET_PARENT(cx, lambda));
+
+#define PUSH_REGEXP_STATIC(sub) \
+ JS_BEGIN_MACRO \
+ JSString *str = js_NewStringCopyN(cx, \
+ cx->regExpStatics.sub.chars, \
+ cx->regExpStatics.sub.length, \
+ 0); \
+ if (!str) { \
+ ok = JS_FALSE; \
+ goto lambda_out; \
+ } \
+ *sp++ = STRING_TO_JSVAL(str); \
+ JS_END_MACRO
+
+ /* Push $&, $1, $2, ... */
+ PUSH_REGEXP_STATIC(lastMatch);
+ i = 0;
+ m = cx->regExpStatics.parenCount;
+ n = JS_MIN(m, 9);
+ for (j = 0; i < n; i++, j++)
+ PUSH_REGEXP_STATIC(parens[j]);
+ for (j = 0; i < m; i++, j++)
+ PUSH_REGEXP_STATIC(moreParens[j]);
+
+ /*
+ * We need to clear moreParens in the top-of-stack cx->regExpStatics
+ * to it won't be possibly realloc'ed, leaving the bottom-of-stack
+ * moreParens pointing to freed memory.
+ */
+ cx->regExpStatics.moreParens = NULL;
+ freeMoreParens = JS_TRUE;
+
+#undef PUSH_REGEXP_STATIC
+
+ /* Make sure to push undefined for any unmatched parens. */
+ for (; i < p; i++)
+ *sp++ = JSVAL_VOID;
+
+ /* Push match index and input string. */
+ *sp++ = INT_TO_JSVAL((jsint)cx->regExpStatics.leftContext.length);
+ *sp++ = STRING_TO_JSVAL(rdata->base.str);
+
+ /* Lift current frame to include the args and do the call. */
+ fp = cx->fp;
+ oldsp = fp->sp;
+ fp->sp = sp;
+ ok = js_Invoke(cx, argc, JSINVOKE_INTERNAL);
+ rval = fp->sp[-1];
+ fp->sp = oldsp;
+
+ if (ok) {
+ /*
+ * NB: we count on the newborn string root to hold any string
+ * created by this js_ValueToString that would otherwise be GC-
+ * able, until we use rdata->repstr in do_replace.
+ */
+ repstr = js_ValueToString(cx, rval);
+ if (!repstr) {
+ ok = JS_FALSE;
+ } else {
+ rdata->repstr = repstr;
+ *sizep = JSSTRING_LENGTH(repstr);
+ }
+ }
+
+ lambda_out:
+ js_FreeStack(cx, mark);
+ if (freeMoreParens)
+ JS_free(cx, cx->regExpStatics.moreParens);
+ cx->regExpStatics = save;
+ return ok;
+ }
+
+ repstr = rdata->repstr;
+ replen = JSSTRING_LENGTH(repstr);
+ for (dp = rdata->dollar, ep = rdata->dollarEnd; dp;
+ dp = js_strchr_limit(dp, '$', ep)) {
+ sub = interpret_dollar(cx, dp, ep, rdata, &skip);
+ if (sub) {
+ replen += sub->length - skip;
+ dp += skip;
+ }
+ else
+ dp++;
+ }
+ *sizep = replen;
+ return JS_TRUE;
+}
+
+static void
+do_replace(JSContext *cx, ReplaceData *rdata, jschar *chars)
+{
+ JSString *repstr;
+ jschar *bp, *cp, *dp, *ep;
+ size_t len, skip;
+ JSSubString *sub;
+
+ repstr = rdata->repstr;
+ bp = cp = JSSTRING_CHARS(repstr);
+ for (dp = rdata->dollar, ep = rdata->dollarEnd; dp;
+ dp = js_strchr_limit(dp, '$', ep)) {
+ len = dp - cp;
+ js_strncpy(chars, cp, len);
+ chars += len;
+ cp = dp;
+ sub = interpret_dollar(cx, dp, ep, rdata, &skip);
+ if (sub) {
+ len = sub->length;
+ js_strncpy(chars, sub->chars, len);
+ chars += len;
+ cp += skip;
+ dp += skip;
+ } else {
+ dp++;
+ }
+ }
+ js_strncpy(chars, cp, JSSTRING_LENGTH(repstr) - (cp - bp));
+}
+
+static JSBool
+replace_glob(JSContext *cx, jsint count, GlobData *data)
+{
+ ReplaceData *rdata;
+ JSString *str;
+ size_t leftoff, leftlen, replen, growth;
+ const jschar *left;
+ jschar *chars;
+
+ rdata = (ReplaceData *)data;
+ str = data->str;
+ leftoff = rdata->leftIndex;
+ left = JSSTRING_CHARS(str) + leftoff;
+ leftlen = cx->regExpStatics.lastMatch.chars - left;
+ rdata->leftIndex = cx->regExpStatics.lastMatch.chars - JSSTRING_CHARS(str);
+ rdata->leftIndex += cx->regExpStatics.lastMatch.length;
+ if (!find_replen(cx, rdata, &replen))
+ return JS_FALSE;
+ growth = leftlen + replen;
+ chars = (jschar *)
+ (rdata->chars
+ ? JS_realloc(cx, rdata->chars, (rdata->length + growth + 1)
+ * sizeof(jschar))
+ : JS_malloc(cx, (growth + 1) * sizeof(jschar)));
+ if (!chars) {
+ JS_free(cx, rdata->chars);
+ rdata->chars = NULL;
+ return JS_FALSE;
+ }
+ rdata->chars = chars;
+ rdata->length += growth;
+ chars += rdata->index;
+ rdata->index += growth;
+ js_strncpy(chars, left, leftlen);
+ chars += leftlen;
+ do_replace(cx, rdata, chars);
+ return JS_TRUE;
+}
+
+static JSBool
+str_replace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSObject *lambda;
+ JSString *repstr, *str;
+ ReplaceData rdata;
+ JSBool ok;
+ jschar *chars;
+ size_t leftlen, rightlen, length;
+
+ if (JS_TypeOfValue(cx, argv[1]) == JSTYPE_FUNCTION) {
+ lambda = JSVAL_TO_OBJECT(argv[1]);
+ repstr = NULL;
+ } else {
+ if (!JS_ConvertValue(cx, argv[1], JSTYPE_STRING, &argv[1]))
+ return JS_FALSE;
+ repstr = JSVAL_TO_STRING(argv[1]);
+ lambda = NULL;
+ }
+
+ /*
+ * For ECMA Edition 3, the first argument is to be converted to a string
+ * to match in a "flat" sense (without regular expression metachars having
+ * special meanings) UNLESS the first arg is a RegExp object.
+ */
+ rdata.base.flags = MODE_REPLACE | KEEP_REGEXP | FORCE_FLAT;
+ rdata.base.optarg = 2;
+
+ rdata.lambda = lambda;
+ rdata.repstr = repstr;
+ if (repstr) {
+ rdata.dollarEnd = JSSTRING_CHARS(repstr) + JSSTRING_LENGTH(repstr);
+ rdata.dollar = js_strchr_limit(JSSTRING_CHARS(repstr), '$',
+ rdata.dollarEnd);
+ } else {
+ rdata.dollar = rdata.dollarEnd = NULL;
+ }
+ rdata.chars = NULL;
+ rdata.length = 0;
+ rdata.index = 0;
+ rdata.leftIndex = 0;
+
+ ok = match_or_replace(cx, obj, argc, argv, replace_glob, &rdata.base, rval);
+ if (!ok)
+ return JS_FALSE;
+
+ if (!rdata.chars) {
+ if ((rdata.base.flags & GLOBAL_REGEXP) || *rval != JSVAL_TRUE) {
+ /* Didn't match even once. */
+ *rval = STRING_TO_JSVAL(rdata.base.str);
+ goto out;
+ }
+ leftlen = cx->regExpStatics.leftContext.length;
+ ok = find_replen(cx, &rdata, &length);
+ if (!ok)
+ goto out;
+ length += leftlen;
+ chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar));
+ if (!chars) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ js_strncpy(chars, cx->regExpStatics.leftContext.chars, leftlen);
+ do_replace(cx, &rdata, chars + leftlen);
+ rdata.chars = chars;
+ rdata.length = length;
+ }
+
+ rightlen = cx->regExpStatics.rightContext.length;
+ length = rdata.length + rightlen;
+ chars = (jschar *)
+ JS_realloc(cx, rdata.chars, (length + 1) * sizeof(jschar));
+ if (!chars) {
+ JS_free(cx, rdata.chars);
+ ok = JS_FALSE;
+ goto out;
+ }
+ js_strncpy(chars + rdata.length, cx->regExpStatics.rightContext.chars,
+ rightlen);
+ chars[length] = 0;
+
+ str = js_NewString(cx, chars, length, 0);
+ if (!str) {
+ JS_free(cx, chars);
+ ok = JS_FALSE;
+ goto out;
+ }
+ *rval = STRING_TO_JSVAL(str);
+
+out:
+ /* If KEEP_REGEXP is still set, it's our job to destroy regexp now. */
+ if (rdata.base.flags & KEEP_REGEXP)
+ js_DestroyRegExp(cx, rdata.base.regexp);
+ return ok;
+}
+
+/*
+ * Subroutine used by str_split to find the next split point in str, starting
+ * at offset *ip and looking either for the separator substring given by sep,
+ * or for the next re match. In the re case, return the matched separator in
+ * *sep, and the possibly updated offset in *ip.
+ *
+ * Return -2 on error, -1 on end of string, >= 0 for a valid index of the next
+ * separator occurrence if found, or str->length if no separator is found.
+ */
+static jsint
+find_split(JSContext *cx, JSString *str, JSRegExp *re, jsint *ip,
+ JSSubString *sep)
+{
+ jsint i, j, k;
+ size_t length;
+ jschar *chars;
+
+ /*
+ * Stop if past end of string. If at end of string, we will compare the
+ * null char stored there (by js_NewString*) to sep->chars[j] in the while
+ * loop at the end of this function, so that
+ *
+ * "ab,".split(',') => ["ab", ""]
+ *
+ * and the resulting array converts back to the string "ab," for symmetry.
+ * However, we ape Perl and do this only if there is a sufficiently large
+ * limit argument (see str_split).
+ */
+ i = *ip;
+ length = JSSTRING_LENGTH(str);
+ if ((size_t)i > length)
+ return -1;
+
+ chars = JSSTRING_CHARS(str);
+
+ /*
+ * Match a regular expression against the separator at or above index i.
+ * Call js_ExecuteRegExp with true for the test argument. On successful
+ * match, get the separator from cx->regExpStatics.lastMatch.
+ */
+ if (re) {
+ size_t index;
+ jsval rval;
+
+ again:
+ /* JS1.2 deviated from Perl by never matching at end of string. */
+ index = (size_t)i;
+ if (!js_ExecuteRegExp(cx, re, str, &index, JS_TRUE, &rval))
+ return -2;
+ if (rval != JSVAL_TRUE) {
+ /* Mismatch: ensure our caller advances i past end of string. */
+ sep->length = 1;
+ return length;
+ }
+ i = (jsint)index;
+ *sep = cx->regExpStatics.lastMatch;
+ if (sep->length == 0) {
+ /*
+ * Empty string match: never split on an empty match at the start
+ * of a find_split cycle. Same rule as for an empty global match
+ * in match_or_replace.
+ */
+ if (i == *ip) {
+ /*
+ * "Bump-along" to avoid sticking at an empty match, but don't
+ * bump past end of string -- our caller must do that by adding
+ * sep->length to our return value.
+ */
+ if ((size_t)i == length)
+ return -1;
+ i++;
+ goto again;
+ }
+ if ((size_t)i == length) {
+ /*
+ * If there was a trivial zero-length match at the end of the
+ * split, then we shouldn't output the matched string at the end
+ * of the split array. See ECMA-262 Ed. 3, 15.5.4.14, Step 15.
+ */
+ sep->chars = NULL;
+ }
+ }
+ JS_ASSERT((size_t)i >= sep->length);
+ return i - sep->length;
+ }
+
+ /*
+ * Deviate from ECMA by never splitting an empty string by any separator
+ * string into a non-empty array (an array of length 1 that contains the
+ * empty string).
+ */
+ if (!JS_VERSION_IS_ECMA(cx) && length == 0)
+ return -1;
+
+ /*
+ * Special case: if sep is the empty string, split str into one character
+ * substrings. Let our caller worry about whether to split once at end of
+ * string into an empty substring.
+ */
+ if (sep->length == 0)
+ return ((size_t)i == length) ? -1 : i + 1;
+
+ /*
+ * Now that we know sep is non-empty, search starting at i in str for an
+ * occurrence of all of sep's chars. If we find them, return the index of
+ * the first separator char. Otherwise, return length.
+ */
+ j = 0;
+ while ((size_t)(k = i + j) < length) {
+ if (chars[k] == sep->chars[j]) {
+ if ((size_t)++j == sep->length)
+ return i;
+ } else {
+ i++;
+ j = 0;
+ }
+ }
+ return k;
+}
+
+static JSBool
+str_split(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str, *sub;
+ JSObject *arrayobj;
+ jsval v;
+ JSBool ok, limited;
+ JSRegExp *re;
+ JSSubString *sep, tmp;
+ jsdouble d;
+ jsint i, j;
+ uint32 len, limit;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ arrayobj = js_ConstructObject(cx, &js_ArrayClass, NULL, NULL, 0, NULL);
+ if (!arrayobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(arrayobj);
+
+ if (argc == 0) {
+ v = STRING_TO_JSVAL(str);
+ ok = JS_SetElement(cx, arrayobj, 0, &v);
+ } else {
+ if (JSVAL_IS_REGEXP(cx, argv[0])) {
+ re = (JSRegExp *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(argv[0]));
+ sep = &tmp;
+
+ /* Set a magic value so we can detect a successful re match. */
+ sep->chars = NULL;
+ sep->length = 0;
+ } else {
+ JSString *str2 = js_ValueToString(cx, argv[0]);
+ if (!str2)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str2);
+
+ /*
+ * Point sep at a local copy of str2's header because find_split
+ * will modify sep->length.
+ */
+ tmp.length = JSSTRING_LENGTH(str2);
+ tmp.chars = JSSTRING_CHARS(str2);
+ sep = &tmp;
+ re = NULL;
+ }
+
+ /* Use the second argument as the split limit, if given. */
+ limited = (argc > 1) && !JSVAL_IS_VOID(argv[1]);
+ limit = 0; /* Avoid warning. */
+ if (limited) {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+
+ /* Clamp limit between 0 and 1 + string length. */
+ if (!js_DoubleToECMAUint32(cx, d, &limit))
+ return JS_FALSE;
+ if (limit > JSSTRING_LENGTH(str))
+ limit = 1 + JSSTRING_LENGTH(str);
+ }
+
+ len = i = 0;
+ while ((j = find_split(cx, str, re, &i, sep)) >= 0) {
+ if (limited && len >= limit)
+ break;
+ sub = js_NewDependentString(cx, str, i, (size_t)(j - i), 0);
+ if (!sub)
+ return JS_FALSE;
+ v = STRING_TO_JSVAL(sub);
+ if (!JS_SetElement(cx, arrayobj, len, &v))
+ return JS_FALSE;
+ len++;
+
+ /*
+ * Imitate perl's feature of including parenthesized substrings
+ * that matched part of the delimiter in the new array, after the
+ * split substring that was delimited.
+ */
+ if (re && sep->chars) {
+ uintN num;
+ JSSubString *parsub;
+
+ for (num = 0; num < cx->regExpStatics.parenCount; num++) {
+ if (limited && len >= limit)
+ break;
+ parsub = REGEXP_PAREN_SUBSTRING(&cx->regExpStatics, num);
+ sub = js_NewStringCopyN(cx, parsub->chars, parsub->length,
+ 0);
+ if (!sub)
+ return JS_FALSE;
+ v = STRING_TO_JSVAL(sub);
+ if (!JS_SetElement(cx, arrayobj, len, &v))
+ return JS_FALSE;
+ len++;
+ }
+ sep->chars = NULL;
+ }
+
+ i = j + sep->length;
+ if (!JS_VERSION_IS_ECMA(cx)) {
+ /*
+ * Deviate from ECMA to imitate Perl, which omits a final
+ * split unless a limit argument is given and big enough.
+ */
+ if (!limited && (size_t)i == JSSTRING_LENGTH(str))
+ break;
+ }
+ }
+ ok = (j != -2);
+ }
+ return ok;
+}
+
+#if JS_HAS_PERL_SUBSTR
+static JSBool
+str_substr(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ jsdouble d;
+ jsdouble length, begin, end;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ if (argc != 0) {
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ length = JSSTRING_LENGTH(str);
+ begin = js_DoubleToInteger(d);
+ if (begin < 0) {
+ begin += length;
+ if (begin < 0)
+ begin = 0;
+ } else if (begin > length) {
+ begin = length;
+ }
+
+ if (argc == 1) {
+ end = length;
+ } else {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+ end = js_DoubleToInteger(d);
+ if (end < 0)
+ end = 0;
+ end += begin;
+ if (end > length)
+ end = length;
+ }
+
+ str = js_NewDependentString(cx, str, (size_t)begin,
+ (size_t)(end - begin), 0);
+ if (!str)
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+#endif /* JS_HAS_PERL_SUBSTR */
+
+/*
+ * Python-esque sequence operations.
+ */
+static JSBool
+str_concat(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str, *str2;
+ uintN i;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ for (i = 0; i < argc; i++) {
+ str2 = js_ValueToString(cx, argv[i]);
+ if (!str2)
+ return JS_FALSE;
+ argv[i] = STRING_TO_JSVAL(str2);
+
+ str = js_ConcatStrings(cx, str, str2);
+ if (!str)
+ return JS_FALSE;
+ }
+
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+str_slice(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+ jsdouble d;
+ jsdouble length, begin, end;
+
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+
+ if (argc != 0) {
+ if (!js_ValueToNumber(cx, argv[0], &d))
+ return JS_FALSE;
+ length = JSSTRING_LENGTH(str);
+ begin = js_DoubleToInteger(d);
+ if (begin < 0) {
+ begin += length;
+ if (begin < 0)
+ begin = 0;
+ } else if (begin > length) {
+ begin = length;
+ }
+
+ if (argc == 1) {
+ end = length;
+ } else {
+ if (!js_ValueToNumber(cx, argv[1], &d))
+ return JS_FALSE;
+ end = js_DoubleToInteger(d);
+ if (end < 0) {
+ end += length;
+ if (end < 0)
+ end = 0;
+ } else if (end > length) {
+ end = length;
+ }
+ if (end < begin)
+ end = begin;
+ }
+
+ str = js_NewDependentString(cx, str, (size_t)begin,
+ (size_t)(end - begin), 0);
+ if (!str)
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+#if JS_HAS_STR_HTML_HELPERS
+/*
+ * HTML composition aids.
+ */
+static JSBool
+tagify(JSContext *cx, JSObject *obj, jsval *argv,
+ const char *begin, JSString *param, const char *end,
+ jsval *rval)
+{
+ JSString *str;
+ jschar *tagbuf;
+ size_t beglen, endlen, parlen, taglen;
+ size_t i, j;
+
+ if (JSVAL_IS_STRING((jsval)obj)) {
+ str = JSVAL_TO_STRING((jsval)obj);
+ } else {
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ argv[-1] = STRING_TO_JSVAL(str);
+ }
+
+ if (!end)
+ end = begin;
+
+ beglen = strlen(begin);
+ taglen = 1 + beglen + 1; /* '<begin' + '>' */
+ parlen = 0; /* Avoid warning. */
+ if (param) {
+ parlen = JSSTRING_LENGTH(param);
+ taglen += 2 + parlen + 1; /* '="param"' */
+ }
+ endlen = strlen(end);
+ taglen += JSSTRING_LENGTH(str) + 2 + endlen + 1; /* 'str</end>' */
+
+ if (taglen >= ~(size_t)0 / sizeof(jschar)) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+
+ tagbuf = (jschar *) JS_malloc(cx, (taglen + 1) * sizeof(jschar));
+ if (!tagbuf)
+ return JS_FALSE;
+
+ j = 0;
+ tagbuf[j++] = '<';
+ for (i = 0; i < beglen; i++)
+ tagbuf[j++] = (jschar)begin[i];
+ if (param) {
+ tagbuf[j++] = '=';
+ tagbuf[j++] = '"';
+ js_strncpy(&tagbuf[j], JSSTRING_CHARS(param), parlen);
+ j += parlen;
+ tagbuf[j++] = '"';
+ }
+ tagbuf[j++] = '>';
+ js_strncpy(&tagbuf[j], JSSTRING_CHARS(str), JSSTRING_LENGTH(str));
+ j += JSSTRING_LENGTH(str);
+ tagbuf[j++] = '<';
+ tagbuf[j++] = '/';
+ for (i = 0; i < endlen; i++)
+ tagbuf[j++] = (jschar)end[i];
+ tagbuf[j++] = '>';
+ JS_ASSERT(j == taglen);
+ tagbuf[j] = 0;
+
+ str = js_NewString(cx, tagbuf, taglen, 0);
+ if (!str) {
+ free((char *)tagbuf);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+tagify_value(JSContext *cx, JSObject *obj, jsval *argv,
+ const char *begin, const char *end,
+ jsval *rval)
+{
+ JSString *param;
+
+ param = js_ValueToString(cx, argv[0]);
+ if (!param)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(param);
+ return tagify(cx, obj, argv, begin, param, end, rval);
+}
+
+static JSBool
+str_bold(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "b", NULL, NULL, rval);
+}
+
+static JSBool
+str_italics(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "i", NULL, NULL, rval);
+}
+
+static JSBool
+str_fixed(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "tt", NULL, NULL, rval);
+}
+
+static JSBool
+str_fontsize(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify_value(cx, obj, argv, "font size", "font", rval);
+}
+
+static JSBool
+str_fontcolor(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ return tagify_value(cx, obj, argv, "font color", "font", rval);
+}
+
+static JSBool
+str_link(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify_value(cx, obj, argv, "a href", "a", rval);
+}
+
+static JSBool
+str_anchor(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify_value(cx, obj, argv, "a name", "a", rval);
+}
+
+static JSBool
+str_strike(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "strike", NULL, NULL, rval);
+}
+
+static JSBool
+str_small(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "small", NULL, NULL, rval);
+}
+
+static JSBool
+str_big(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "big", NULL, NULL, rval);
+}
+
+static JSBool
+str_blink(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "blink", NULL, NULL, rval);
+}
+
+static JSBool
+str_sup(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "sup", NULL, NULL, rval);
+}
+
+static JSBool
+str_sub(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ return tagify(cx, obj, argv, "sub", NULL, NULL, rval);
+}
+#endif /* JS_HAS_STR_HTML_HELPERS */
+
+static JSFunctionSpec string_methods[] = {
+#if JS_HAS_TOSOURCE
+ {"quote", str_quote, 0,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {js_toSource_str, str_toSource, 0,JSFUN_THISP_STRING,0},
+#endif
+
+ /* Java-like methods. */
+ {js_toString_str, str_toString, 0,JSFUN_THISP_STRING,0},
+ {js_valueOf_str, str_valueOf, 0,JSFUN_THISP_STRING,0},
+ {"substring", str_substring, 2,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"toLowerCase", str_toLowerCase, 0,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"toUpperCase", str_toUpperCase, 0,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"charAt", str_charAt, 1,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"charCodeAt", str_charCodeAt, 1,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"indexOf", str_indexOf, 1,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"lastIndexOf", str_lastIndexOf, 1,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"toLocaleLowerCase", str_toLocaleLowerCase, 0,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"toLocaleUpperCase", str_toLocaleUpperCase, 0,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"localeCompare", str_localeCompare, 1,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+
+ /* Perl-ish methods (search is actually Python-esque). */
+ {"match", str_match, 1,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,2},
+ {"search", str_search, 1,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"replace", str_replace, 2,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"split", str_split, 2,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+#if JS_HAS_PERL_SUBSTR
+ {"substr", str_substr, 2,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+#endif
+
+ /* Python-esque sequence methods. */
+ {"concat", str_concat, 0,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+ {"slice", str_slice, 0,JSFUN_GENERIC_NATIVE|
+ JSFUN_THISP_PRIMITIVE,0},
+
+ /* HTML string methods. */
+#if JS_HAS_STR_HTML_HELPERS
+ {"bold", str_bold, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"italics", str_italics, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"fixed", str_fixed, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"fontsize", str_fontsize, 1,JSFUN_THISP_PRIMITIVE,0},
+ {"fontcolor", str_fontcolor, 1,JSFUN_THISP_PRIMITIVE,0},
+ {"link", str_link, 1,JSFUN_THISP_PRIMITIVE,0},
+ {"anchor", str_anchor, 1,JSFUN_THISP_PRIMITIVE,0},
+ {"strike", str_strike, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"small", str_small, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"big", str_big, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"blink", str_blink, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"sup", str_sup, 0,JSFUN_THISP_PRIMITIVE,0},
+ {"sub", str_sub, 0,JSFUN_THISP_PRIMITIVE,0},
+#endif
+
+ {0,0,0,0,0}
+};
+
+static JSBool
+String(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSString *str;
+
+ if (argc > 0) {
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+ } else {
+ str = cx->runtime->emptyString;
+ }
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+ }
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, STRING_TO_JSVAL(str));
+ return JS_TRUE;
+}
+
+static JSBool
+str_fromCharCode(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jschar *chars;
+ uintN i;
+ uint16 code;
+ JSString *str;
+
+ JS_ASSERT(argc < ARRAY_INIT_LIMIT);
+ chars = (jschar *) JS_malloc(cx, (argc + 1) * sizeof(jschar));
+ if (!chars)
+ return JS_FALSE;
+ for (i = 0; i < argc; i++) {
+ if (!js_ValueToUint16(cx, argv[i], &code)) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+ chars[i] = (jschar)code;
+ }
+ chars[i] = 0;
+ str = js_NewString(cx, chars, argc, 0);
+ if (!str) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSFunctionSpec string_static_methods[] = {
+ {"fromCharCode", str_fromCharCode, 1,0,0},
+ {0,0,0,0,0}
+};
+
+JSBool
+js_InitRuntimeStringState(JSContext *cx)
+{
+ JSRuntime *rt;
+ JSString *empty;
+ JSAtom *atom;
+
+ rt = cx->runtime;
+
+ /* Initialize string cache */
+#ifdef JS_THREADSAFE
+ JS_ASSERT(!rt->deflatedStringCacheLock);
+ rt->deflatedStringCacheLock = JS_NEW_LOCK();
+ if (!rt->deflatedStringCacheLock)
+ return JS_FALSE;
+#endif
+
+ /* Make a permanently locked empty string. */
+ JS_ASSERT(!rt->emptyString);
+ empty = js_NewStringCopyN(cx, js_empty_ucstr, 0, GCF_LOCK);
+ if (!empty)
+ goto bad;
+
+ /* Atomize it for scripts that use '' + x to convert x to string. */
+ atom = js_AtomizeString(cx, empty, ATOM_PINNED);
+ if (!atom)
+ goto bad;
+
+ rt->emptyString = empty;
+ rt->atomState.emptyAtom = atom;
+
+ return JS_TRUE;
+
+ bad:
+#ifdef JS_THREADSAFE
+ JS_DESTROY_LOCK(rt->deflatedStringCacheLock);
+ rt->deflatedStringCacheLock = NULL;
+#endif
+ return JS_FALSE;
+
+}
+
+void
+js_FinishRuntimeStringState(JSContext *cx)
+{
+ JSRuntime *rt = cx->runtime;
+
+ js_UnlockGCThingRT(rt, rt->emptyString);
+ rt->emptyString = NULL;
+}
+
+void
+js_FinishDeflatedStringCache(JSRuntime *rt)
+{
+ if (rt->deflatedStringCache) {
+ JS_HashTableDestroy(rt->deflatedStringCache);
+ rt->deflatedStringCache = NULL;
+ }
+#ifdef JS_THREADSAFE
+ if (rt->deflatedStringCacheLock) {
+ JS_DESTROY_LOCK(rt->deflatedStringCacheLock);
+ rt->deflatedStringCacheLock = NULL;
+ }
+#endif
+}
+
+JSObject *
+js_InitStringClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto;
+
+ /* Define the escape, unescape functions in the global object. */
+ if (!JS_DefineFunctions(cx, obj, string_functions))
+ return NULL;
+
+ proto = JS_InitClass(cx, obj, NULL, &js_StringClass, String, 1,
+ string_props, string_methods,
+ NULL, string_static_methods);
+ if (!proto)
+ return NULL;
+ OBJ_SET_SLOT(cx, proto, JSSLOT_PRIVATE,
+ STRING_TO_JSVAL(cx->runtime->emptyString));
+ return proto;
+}
+
+JSString *
+js_NewString(JSContext *cx, jschar *chars, size_t length, uintN gcflag)
+{
+ JSString *str;
+
+ if (length > JSSTRING_LENGTH_MASK) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+
+ str = (JSString *) js_NewGCThing(cx, gcflag | GCX_STRING, sizeof(JSString));
+ if (!str)
+ return NULL;
+ str->length = length;
+ str->chars = chars;
+#ifdef DEBUG
+ {
+ JSRuntime *rt = cx->runtime;
+ JS_RUNTIME_METER(rt, liveStrings);
+ JS_RUNTIME_METER(rt, totalStrings);
+ JS_LOCK_RUNTIME_VOID(rt,
+ (rt->lengthSum += (double)length,
+ rt->lengthSquaredSum += (double)length * (double)length));
+ }
+#endif
+ return str;
+}
+
+JSString *
+js_NewDependentString(JSContext *cx, JSString *base, size_t start,
+ size_t length, uintN gcflag)
+{
+ JSDependentString *ds;
+
+ if (length == 0)
+ return cx->runtime->emptyString;
+
+ if (start == 0 && length == JSSTRING_LENGTH(base))
+ return base;
+
+ if (start > JSSTRDEP_START_MASK ||
+ (start != 0 && length > JSSTRDEP_LENGTH_MASK)) {
+ return js_NewStringCopyN(cx, JSSTRING_CHARS(base) + start, length,
+ gcflag);
+ }
+
+ ds = (JSDependentString *)
+ js_NewGCThing(cx, gcflag | GCX_MUTABLE_STRING, sizeof(JSString));
+ if (!ds)
+ return NULL;
+ if (start == 0) {
+ JSPREFIX_SET_LENGTH(ds, length);
+ JSPREFIX_SET_BASE(ds, base);
+ } else {
+ JSSTRDEP_SET_START_AND_LENGTH(ds, start, length);
+ JSSTRDEP_SET_BASE(ds, base);
+ }
+#ifdef DEBUG
+ {
+ JSRuntime *rt = cx->runtime;
+ JS_RUNTIME_METER(rt, liveDependentStrings);
+ JS_RUNTIME_METER(rt, totalDependentStrings);
+ JS_RUNTIME_METER(rt, liveStrings);
+ JS_RUNTIME_METER(rt, totalStrings);
+ JS_LOCK_RUNTIME_VOID(rt,
+ (rt->strdepLengthSum += (double)length,
+ rt->strdepLengthSquaredSum += (double)length * (double)length));
+ JS_LOCK_RUNTIME_VOID(rt,
+ (rt->lengthSum += (double)length,
+ rt->lengthSquaredSum += (double)length * (double)length));
+ }
+#endif
+ return (JSString *)ds;
+}
+
+#ifdef DEBUG
+#include <math.h>
+
+void printJSStringStats(JSRuntime *rt) {
+ double mean = 0., var = 0., sigma = 0.;
+ jsrefcount count = rt->totalStrings;
+ if (count > 0 && rt->lengthSum >= 0) {
+ mean = rt->lengthSum / count;
+ var = count * rt->lengthSquaredSum - rt->lengthSum * rt->lengthSum;
+ if (var < 0.0 || count <= 1)
+ var = 0.0;
+ else
+ var /= count * (count - 1);
+
+ /* Windows says sqrt(0.0) is "-1.#J" (?!) so we must test. */
+ sigma = (var != 0.) ? sqrt(var) : 0.;
+ }
+ fprintf(stderr, "%lu total strings, mean length %g (sigma %g)\n",
+ (unsigned long)count, mean, sigma);
+
+ mean = var = sigma = 0.;
+ count = rt->totalDependentStrings;
+ if (count > 0 && rt->strdepLengthSum >= 0) {
+ mean = rt->strdepLengthSum / count;
+ var = count * rt->strdepLengthSquaredSum
+ - rt->strdepLengthSum * rt->strdepLengthSum;
+ if (var < 0.0 || count <= 1)
+ var = 0.0;
+ else
+ var /= count * (count - 1);
+
+ /* Windows says sqrt(0.0) is "-1.#J" (?!) so we must test. */
+ sigma = (var != 0.) ? sqrt(var) : 0.;
+ }
+ fprintf(stderr, "%lu total dependent strings, mean length %g (sigma %g)\n",
+ (unsigned long)count, mean, sigma);
+}
+#endif
+
+JSString *
+js_NewStringCopyN(JSContext *cx, const jschar *s, size_t n, uintN gcflag)
+{
+ jschar *news;
+ JSString *str;
+
+ news = (jschar *)JS_malloc(cx, (n + 1) * sizeof(jschar));
+ if (!news)
+ return NULL;
+ js_strncpy(news, s, n);
+ news[n] = 0;
+ str = js_NewString(cx, news, n, gcflag);
+ if (!str)
+ JS_free(cx, news);
+ return str;
+}
+
+JSString *
+js_NewStringCopyZ(JSContext *cx, const jschar *s, uintN gcflag)
+{
+ size_t n, m;
+ jschar *news;
+ JSString *str;
+
+ n = js_strlen(s);
+ m = (n + 1) * sizeof(jschar);
+ news = (jschar *) JS_malloc(cx, m);
+ if (!news)
+ return NULL;
+ memcpy(news, s, m);
+ str = js_NewString(cx, news, n, gcflag);
+ if (!str)
+ JS_free(cx, news);
+ return str;
+}
+
+JS_STATIC_DLL_CALLBACK(JSHashNumber)
+js_hash_string_pointer(const void *key)
+{
+ return (JSHashNumber)JS_PTR_TO_UINT32(key) >> JSVAL_TAGBITS;
+}
+
+void
+js_PurgeDeflatedStringCache(JSRuntime *rt, JSString *str)
+{
+ JSHashNumber hash;
+ JSHashEntry *he, **hep;
+
+ if (!rt->deflatedStringCache)
+ return;
+
+ hash = js_hash_string_pointer(str);
+ JS_ACQUIRE_LOCK(rt->deflatedStringCacheLock);
+ hep = JS_HashTableRawLookup(rt->deflatedStringCache, hash, str);
+ he = *hep;
+ if (he) {
+#ifdef DEBUG
+ rt->deflatedStringCacheBytes -= JSSTRING_LENGTH(str);
+#endif
+ free(he->value);
+ JS_HashTableRawRemove(rt->deflatedStringCache, hep, he);
+ }
+ JS_RELEASE_LOCK(rt->deflatedStringCacheLock);
+}
+
+void
+js_FinalizeString(JSContext *cx, JSString *str)
+{
+ js_FinalizeStringRT(cx->runtime, str);
+}
+
+void
+js_FinalizeStringRT(JSRuntime *rt, JSString *str)
+{
+ JSBool valid;
+
+ JS_RUNTIME_UNMETER(rt, liveStrings);
+ if (JSSTRING_IS_DEPENDENT(str)) {
+ /* If JSSTRFLAG_DEPENDENT is set, this string must be valid. */
+ JS_ASSERT(JSSTRDEP_BASE(str));
+ JS_RUNTIME_UNMETER(rt, liveDependentStrings);
+ valid = JS_TRUE;
+ } else {
+ /* A stillborn string has null chars, so is not valid. */
+ valid = (str->chars != NULL);
+ if (valid)
+ free(str->chars);
+ }
+ if (valid) {
+ js_PurgeDeflatedStringCache(rt, str);
+ str->chars = NULL;
+ }
+ str->length = 0;
+}
+
+JSObject *
+js_StringToObject(JSContext *cx, JSString *str)
+{
+ JSObject *obj;
+
+ obj = js_NewObject(cx, &js_StringClass, NULL, NULL);
+ if (!obj)
+ return NULL;
+ OBJ_SET_SLOT(cx, obj, JSSLOT_PRIVATE, STRING_TO_JSVAL(str));
+ return obj;
+}
+
+JS_FRIEND_API(const char *)
+js_ValueToPrintable(JSContext *cx, jsval v, JSValueToStringFun v2sfun)
+{
+ JSString *str;
+ const char *bytes;
+
+ str = v2sfun(cx, v);
+ if (!str)
+ return NULL;
+ str = js_QuoteString(cx, str, 0);
+ if (!str)
+ return NULL;
+ bytes = js_GetStringBytes(cx->runtime, str);
+ if (!bytes)
+ JS_ReportOutOfMemory(cx);
+ return bytes;
+}
+
+JS_FRIEND_API(JSString *)
+js_ValueToString(JSContext *cx, jsval v)
+{
+ JSObject *obj;
+ JSString *str;
+
+ if (JSVAL_IS_OBJECT(v)) {
+ obj = JSVAL_TO_OBJECT(v);
+ if (!obj)
+ return ATOM_TO_STRING(cx->runtime->atomState.nullAtom);
+ if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_STRING, &v))
+ return NULL;
+ }
+ if (JSVAL_IS_STRING(v)) {
+ str = JSVAL_TO_STRING(v);
+ } else if (JSVAL_IS_INT(v)) {
+ str = js_NumberToString(cx, JSVAL_TO_INT(v));
+ } else if (JSVAL_IS_DOUBLE(v)) {
+ str = js_NumberToString(cx, *JSVAL_TO_DOUBLE(v));
+ } else if (JSVAL_IS_BOOLEAN(v)) {
+ str = js_BooleanToString(cx, JSVAL_TO_BOOLEAN(v));
+ } else {
+ str = ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[JSTYPE_VOID]);
+ }
+ return str;
+}
+
+JS_FRIEND_API(JSString *)
+js_ValueToSource(JSContext *cx, jsval v)
+{
+ JSTempValueRooter tvr;
+ JSString *str;
+
+ if (JSVAL_IS_STRING(v))
+ return js_QuoteString(cx, JSVAL_TO_STRING(v), '"');
+ if (JSVAL_IS_PRIMITIVE(v)) {
+ /* Special case to preserve negative zero, _contra_ toString. */
+ if (JSVAL_IS_DOUBLE(v) && JSDOUBLE_IS_NEGZERO(*JSVAL_TO_DOUBLE(v))) {
+ /* NB: _ucNstr rather than _ucstr to indicate non-terminated. */
+ static const jschar js_negzero_ucNstr[] = {'-', '0'};
+
+ return js_NewStringCopyN(cx, js_negzero_ucNstr, 2, 0);
+ }
+ return js_ValueToString(cx, v);
+ }
+
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, JSVAL_NULL, &tvr);
+ if (!js_TryMethod(cx, JSVAL_TO_OBJECT(v),
+ cx->runtime->atomState.toSourceAtom,
+ 0, NULL, &tvr.u.value)) {
+ str = NULL;
+ } else {
+ str = js_ValueToString(cx, tvr.u.value);
+ }
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return str;
+}
+
+JSHashNumber
+js_HashString(JSString *str)
+{
+ JSHashNumber h;
+ const jschar *s;
+ size_t n;
+
+ h = 0;
+ for (s = JSSTRING_CHARS(str), n = JSSTRING_LENGTH(str); n; s++, n--)
+ h = (h >> (JS_HASH_BITS - 4)) ^ (h << 4) ^ *s;
+ return h;
+}
+
+intN
+js_CompareStrings(JSString *str1, JSString *str2)
+{
+ size_t l1, l2, n, i;
+ const jschar *s1, *s2;
+ intN cmp;
+
+ JS_ASSERT(str1);
+ JS_ASSERT(str2);
+
+ /* Fast case: pointer equality could be a quick win. */
+ if (str1 == str2)
+ return 0;
+
+ l1 = JSSTRING_LENGTH(str1), l2 = JSSTRING_LENGTH(str2);
+ s1 = JSSTRING_CHARS(str1), s2 = JSSTRING_CHARS(str2);
+ n = JS_MIN(l1, l2);
+ for (i = 0; i < n; i++) {
+ cmp = s1[i] - s2[i];
+ if (cmp != 0)
+ return cmp;
+ }
+ return (intN)(l1 - l2);
+}
+
+JSBool
+js_EqualStrings(JSString *str1, JSString *str2)
+{
+ size_t n;
+ const jschar *s1, *s2;
+
+ JS_ASSERT(str1);
+ JS_ASSERT(str2);
+
+ /* Fast case: pointer equality could be a quick win. */
+ if (str1 == str2)
+ return JS_TRUE;
+
+ n = JSSTRING_LENGTH(str1);
+ if (n != JSSTRING_LENGTH(str2))
+ return JS_FALSE;
+
+ if (n == 0)
+ return JS_TRUE;
+
+ s1 = JSSTRING_CHARS(str1), s2 = JSSTRING_CHARS(str2);
+ do {
+ if (*s1 != *s2)
+ return JS_FALSE;
+ ++s1, ++s2;
+ } while (--n != 0);
+
+ return JS_TRUE;
+}
+
+size_t
+js_strlen(const jschar *s)
+{
+ const jschar *t;
+
+ for (t = s; *t != 0; t++)
+ continue;
+ return (size_t)(t - s);
+}
+
+jschar *
+js_strchr(const jschar *s, jschar c)
+{
+ while (*s != 0) {
+ if (*s == c)
+ return (jschar *)s;
+ s++;
+ }
+ return NULL;
+}
+
+jschar *
+js_strchr_limit(const jschar *s, jschar c, const jschar *limit)
+{
+ while (s < limit) {
+ if (*s == c)
+ return (jschar *)s;
+ s++;
+ }
+ return NULL;
+}
+
+const jschar *
+js_SkipWhiteSpace(const jschar *s)
+{
+ /* JS_ISSPACE is false on a null. */
+ while (JS_ISSPACE(*s))
+ s++;
+ return s;
+}
+
+#ifdef JS_C_STRINGS_ARE_UTF8
+
+jschar *
+js_InflateString(JSContext *cx, const char *bytes, size_t *length)
+{
+ jschar *chars = NULL;
+ size_t dstlen = 0;
+
+ if (!js_InflateStringToBuffer(cx, bytes, *length, NULL, &dstlen))
+ return NULL;
+ chars = (jschar *) JS_malloc(cx, (dstlen + 1) * sizeof (jschar));
+ if (!chars)
+ return NULL;
+ js_InflateStringToBuffer(cx, bytes, *length, chars, &dstlen);
+ chars[dstlen] = 0;
+ *length = dstlen;
+ return chars;
+}
+
+/*
+ * May be called with null cx by js_GetStringBytes, see below.
+ */
+char *
+js_DeflateString(JSContext *cx, const jschar *chars, size_t length)
+{
+ size_t size = 0;
+ char *bytes = NULL;
+ if (!js_DeflateStringToBuffer(cx, chars, length, NULL, &size))
+ return NULL;
+ bytes = (char *) (cx ? JS_malloc(cx, size+1) : malloc(size+1));
+ if (!bytes)
+ return NULL;
+ js_DeflateStringToBuffer(cx, chars, length, bytes, &size);
+ bytes[size] = 0;
+ return bytes;
+}
+
+JSBool
+js_DeflateStringToBuffer(JSContext *cx, const jschar *src, size_t srclen,
+ char *dst, size_t *dstlenp)
+{
+ size_t i, utf8Len, dstlen = *dstlenp, origDstlen = dstlen;
+ jschar c, c2;
+ uint32 v;
+ uint8 utf8buf[6];
+
+ if (!dst)
+ dstlen = origDstlen = (size_t) -1;
+
+ while (srclen) {
+ c = *src++;
+ srclen--;
+ if ((c >= 0xDC00) && (c <= 0xDFFF))
+ goto badSurrogate;
+ if (c < 0xD800 || c > 0xDBFF) {
+ v = c;
+ } else {
+ if (srclen < 1)
+ goto bufferTooSmall;
+ c2 = *src++;
+ srclen--;
+ if ((c2 < 0xDC00) || (c2 > 0xDFFF)) {
+ c = c2;
+ goto badSurrogate;
+ }
+ v = ((c - 0xD800) << 10) + (c2 - 0xDC00) + 0x10000;
+ }
+ if (v < 0x0080) {
+ /* no encoding necessary - performance hack */
+ if (!dstlen)
+ goto bufferTooSmall;
+ if (dst)
+ *dst++ = (char) v;
+ utf8Len = 1;
+ } else {
+ utf8Len = js_OneUcs4ToUtf8Char(utf8buf, v);
+ if (utf8Len > dstlen)
+ goto bufferTooSmall;
+ if (dst) {
+ for (i = 0; i < utf8Len; i++)
+ *dst++ = (char) utf8buf[i];
+ }
+ }
+ dstlen -= utf8Len;
+ }
+ *dstlenp = (origDstlen - dstlen);
+ return JS_TRUE;
+
+badSurrogate:
+ *dstlenp = (origDstlen - dstlen);
+ if (cx) {
+ char buffer[10];
+ JS_snprintf(buffer, 10, "0x%x", c);
+ JS_ReportErrorFlagsAndNumber(cx, JSREPORT_ERROR,
+ js_GetErrorMessage, NULL,
+ JSMSG_BAD_SURROGATE_CHAR,
+ buffer);
+ }
+ return JS_FALSE;
+
+bufferTooSmall:
+ *dstlenp = (origDstlen - dstlen);
+ if (cx) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BUFFER_TOO_SMALL);
+ }
+ return JS_FALSE;
+}
+
+JSBool
+js_InflateStringToBuffer(JSContext *cx, const char *src, size_t srclen,
+ jschar *dst, size_t *dstlenp)
+{
+ uint32 v;
+ size_t offset = 0, j, n, dstlen = *dstlenp, origDstlen = dstlen;
+
+ if (!dst)
+ dstlen = origDstlen = (size_t) -1;
+
+ while (srclen) {
+ v = (uint8) *src;
+ n = 1;
+ if (v & 0x80) {
+ while (v & (0x80 >> n))
+ n++;
+ if (n > srclen)
+ goto bufferTooSmall;
+ if (n == 1 || n > 6)
+ goto badCharacter;
+ for (j = 1; j < n; j++) {
+ if ((src[j] & 0xC0) != 0x80)
+ goto badCharacter;
+ }
+ v = Utf8ToOneUcs4Char(src, n);
+ if (v >= 0x10000) {
+ v -= 0x10000;
+ if (v > 0xFFFFF || dstlen < 2) {
+ *dstlenp = (origDstlen - dstlen);
+ if (cx) {
+ char buffer[10];
+ JS_snprintf(buffer, 10, "0x%x", v + 0x10000);
+ JS_ReportErrorFlagsAndNumber(cx,
+ JSREPORT_ERROR,
+ js_GetErrorMessage, NULL,
+ JSMSG_UTF8_CHAR_TOO_LARGE,
+ buffer);
+ }
+ return JS_FALSE;
+ }
+ if (dstlen < 2)
+ goto bufferTooSmall;
+ if (dst) {
+ *dst++ = (jschar)((v >> 10) + 0xD800);
+ v = (jschar)((v & 0x3FF) + 0xDC00);
+ }
+ dstlen--;
+ }
+ }
+ if (!dstlen)
+ goto bufferTooSmall;
+ if (dst)
+ *dst++ = (jschar) v;
+ dstlen--;
+ offset += n;
+ src += n;
+ srclen -= n;
+ }
+ *dstlenp = (origDstlen - dstlen);
+ return JS_TRUE;
+
+badCharacter:
+ *dstlenp = (origDstlen - dstlen);
+ if (cx) {
+ char buffer[10];
+ JS_snprintf(buffer, 10, "%d", offset);
+ JS_ReportErrorFlagsAndNumber(cx, JSREPORT_ERROR,
+ js_GetErrorMessage, NULL,
+ JSMSG_MALFORMED_UTF8_CHAR,
+ buffer);
+ }
+ return JS_FALSE;
+
+bufferTooSmall:
+ *dstlenp = (origDstlen - dstlen);
+ if (cx) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BUFFER_TOO_SMALL);
+ }
+ return JS_FALSE;
+}
+
+#else
+
+JSBool
+js_InflateStringToBuffer(JSContext* cx, const char *bytes, size_t length,
+ jschar *chars, size_t* charsLength)
+{
+ size_t i;
+
+ if (length > *charsLength) {
+ for (i = 0; i < *charsLength; i++)
+ chars[i] = (unsigned char) bytes[i];
+ if (cx) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BUFFER_TOO_SMALL);
+ }
+ return JS_FALSE;
+ }
+ for (i = 0; i < length; i++)
+ chars[i] = (unsigned char) bytes[i];
+ *charsLength = length;
+ return JS_TRUE;
+}
+
+jschar *
+js_InflateString(JSContext *cx, const char *bytes, size_t *bytesLength)
+{
+ jschar *chars;
+ size_t i, length = *bytesLength;
+
+ chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar));
+ if (!chars) {
+ *bytesLength = 0;
+ return NULL;
+ }
+ for (i = 0; i < length; i++)
+ chars[i] = (unsigned char) bytes[i];
+ chars[length] = 0;
+ *bytesLength = length;
+ return chars;
+}
+
+JSBool
+js_DeflateStringToBuffer(JSContext* cx, const jschar *chars, size_t length,
+ char *bytes, size_t* bytesLength)
+{
+ size_t i;
+
+ if (length > *bytesLength) {
+ for (i = 0; i < *bytesLength; i++)
+ bytes[i] = (char) chars[i];
+ if (cx) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BUFFER_TOO_SMALL);
+ }
+ return JS_FALSE;
+ }
+ for (i = 0; i < length; i++)
+ bytes[i] = (char) chars[i];
+ *bytesLength = length;
+ return JS_TRUE;
+}
+
+/*
+ * May be called with null cx by js_GetStringBytes, see below.
+ */
+char *
+js_DeflateString(JSContext *cx, const jschar *chars, size_t length)
+{
+ size_t i, size;
+ char *bytes;
+
+ size = (length + 1) * sizeof(char);
+ bytes = (char *) (cx ? JS_malloc(cx, size) : malloc(size));
+ if (!bytes)
+ return NULL;
+
+ for (i = 0; i < length; i++)
+ bytes[i] = (char) chars[i];
+
+ bytes[length] = 0;
+ return bytes;
+}
+
+#endif
+
+static JSHashTable *
+GetDeflatedStringCache(JSRuntime *rt)
+{
+ JSHashTable *cache;
+
+ cache = rt->deflatedStringCache;
+ if (!cache) {
+ cache = JS_NewHashTable(8, js_hash_string_pointer,
+ JS_CompareValues, JS_CompareValues,
+ NULL, NULL);
+ rt->deflatedStringCache = cache;
+ }
+ return cache;
+}
+
+JSBool
+js_SetStringBytes(JSRuntime *rt, JSString *str, char *bytes, size_t length)
+{
+ JSHashTable *cache;
+ JSBool ok;
+ JSHashNumber hash;
+ JSHashEntry **hep;
+
+ JS_ACQUIRE_LOCK(rt->deflatedStringCacheLock);
+
+ cache = GetDeflatedStringCache(rt);
+ if (!cache) {
+ ok = JS_FALSE;
+ } else {
+ hash = js_hash_string_pointer(str);
+ hep = JS_HashTableRawLookup(cache, hash, str);
+ JS_ASSERT(*hep == NULL);
+ ok = JS_HashTableRawAdd(cache, hep, hash, str, bytes) != NULL;
+#ifdef DEBUG
+ if (ok)
+ rt->deflatedStringCacheBytes += length;
+#endif
+ }
+
+ JS_RELEASE_LOCK(rt->deflatedStringCacheLock);
+ return ok;
+}
+
+char *
+js_GetStringBytes(JSRuntime *rt, JSString *str)
+{
+ JSHashTable *cache;
+ char *bytes;
+ JSHashNumber hash;
+ JSHashEntry *he, **hep;
+
+ JS_ACQUIRE_LOCK(rt->deflatedStringCacheLock);
+
+ cache = GetDeflatedStringCache(rt);
+ if (!cache) {
+ bytes = NULL;
+ } else {
+ hash = js_hash_string_pointer(str);
+ hep = JS_HashTableRawLookup(cache, hash, str);
+ he = *hep;
+ if (he) {
+ bytes = (char *) he->value;
+
+ /* Try to catch failure to JS_ShutDown between runtime epochs. */
+ JS_ASSERT((*bytes == '\0' && JSSTRING_LENGTH(str) == 0) ||
+ *bytes == (char) JSSTRING_CHARS(str)[0]);
+ } else {
+ bytes = js_DeflateString(NULL, JSSTRING_CHARS(str),
+ JSSTRING_LENGTH(str));
+ if (bytes) {
+ if (JS_HashTableRawAdd(cache, hep, hash, str, bytes)) {
+#ifdef DEBUG
+ rt->deflatedStringCacheBytes += JSSTRING_LENGTH(str);
+#endif
+ } else {
+ free(bytes);
+ bytes = NULL;
+ }
+ }
+ }
+ }
+
+ JS_RELEASE_LOCK(rt->deflatedStringCacheLock);
+ return bytes;
+}
+
+/*
+ * From java.lang.Character.java:
+ *
+ * The character properties are currently encoded into 32 bits in the
+ * following manner:
+ *
+ * 10 bits signed offset used for converting case
+ * 1 bit if 1, adding the signed offset converts the character to
+ * lowercase
+ * 1 bit if 1, subtracting the signed offset converts the character to
+ * uppercase
+ * 1 bit if 1, character has a titlecase equivalent (possibly itself)
+ * 3 bits 0 may not be part of an identifier
+ * 1 ignorable control; may continue a Unicode identifier or JS
+ * identifier
+ * 2 may continue a JS identifier but not a Unicode identifier
+ * (unused)
+ * 3 may continue a Unicode identifier or JS identifier
+ * 4 is a JS whitespace character
+ * 5 may start or continue a JS identifier;
+ * may continue but not start a Unicode identifier (_)
+ * 6 may start or continue a JS identifier but not a Unicode
+ * identifier ($)
+ * 7 may start or continue a Unicode identifier or JS identifier
+ * Thus:
+ * 5, 6, 7 may start a JS identifier
+ * 1, 2, 3, 5, 6, 7 may continue a JS identifier
+ * 7 may start a Unicode identifier
+ * 1, 3, 5, 7 may continue a Unicode identifier
+ * 1 is ignorable within an identifier
+ * 4 is JS whitespace
+ * 2 bits 0 this character has no numeric property
+ * 1 adding the digit offset to the character code and then
+ * masking with 0x1F will produce the desired numeric value
+ * 2 this character has a "strange" numeric value
+ * 3 a JS supradecimal digit: adding the digit offset to the
+ * character code, then masking with 0x1F, then adding 10
+ * will produce the desired numeric value
+ * 5 bits digit offset
+ * 1 bit XML 1.0 name start character
+ * 1 bit XML 1.0 name character
+ * 2 bits reserved for future use
+ * 5 bits character type
+ */
+
+/* The X table has 1024 entries for a total of 1024 bytes. */
+
+const uint8 js_X[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, /* 0x0000 */
+ 8, 9, 10, 11, 12, 13, 14, 15, /* 0x0200 */
+ 16, 17, 18, 19, 20, 21, 22, 23, /* 0x0400 */
+ 24, 25, 26, 27, 28, 28, 28, 28, /* 0x0600 */
+ 28, 28, 28, 28, 29, 30, 31, 32, /* 0x0800 */
+ 33, 34, 35, 36, 37, 38, 39, 40, /* 0x0A00 */
+ 41, 42, 43, 44, 45, 46, 28, 28, /* 0x0C00 */
+ 47, 48, 49, 50, 51, 52, 53, 28, /* 0x0E00 */
+ 28, 28, 54, 55, 56, 57, 58, 59, /* 0x1000 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x1200 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x1400 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x1600 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x1800 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x1A00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x1C00 */
+ 60, 60, 61, 62, 63, 64, 65, 66, /* 0x1E00 */
+ 67, 68, 69, 70, 71, 72, 73, 74, /* 0x2000 */
+ 75, 75, 75, 76, 77, 78, 28, 28, /* 0x2200 */
+ 79, 80, 81, 82, 83, 83, 84, 85, /* 0x2400 */
+ 86, 85, 28, 28, 87, 88, 89, 28, /* 0x2600 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x2800 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x2A00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x2C00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x2E00 */
+ 90, 91, 92, 93, 94, 56, 95, 28, /* 0x3000 */
+ 96, 97, 98, 99, 83, 100, 83, 101, /* 0x3200 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x3400 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x3600 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x3800 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x3A00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x3C00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x3E00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x4000 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x4200 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x4400 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x4600 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x4800 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x4A00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0x4C00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x4E00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5400 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5600 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5800 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5A00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5C00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x5E00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6400 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6600 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6800 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6A00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6C00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x6E00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7400 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7600 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7800 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7A00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7C00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x7E00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8400 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8600 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8800 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8A00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8C00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x8E00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x9000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x9200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x9400 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x9600 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x9800 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x9A00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0x9C00 */
+ 56, 56, 56, 56, 56, 56, 102, 28, /* 0x9E00 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0xA000 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0xA200 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0xA400 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0xA600 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0xA800 */
+ 28, 28, 28, 28, 28, 28, 28, 28, /* 0xAA00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xAC00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xAE00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xB000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xB200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xB400 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xB600 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xB800 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xBA00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xBC00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xBE00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xC000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xC200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xC400 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xC600 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xC800 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xCA00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xCC00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xCE00 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xD000 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xD200 */
+ 56, 56, 56, 56, 56, 56, 56, 56, /* 0xD400 */
+ 56, 56, 56, 56, 56, 56, 103, 28, /* 0xD600 */
+104, 104, 104, 104, 104, 104, 104, 104, /* 0xD800 */
+104, 104, 104, 104, 104, 104, 104, 104, /* 0xDA00 */
+104, 104, 104, 104, 104, 104, 104, 104, /* 0xDC00 */
+104, 104, 104, 104, 104, 104, 104, 104, /* 0xDE00 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xE000 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xE200 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xE400 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xE600 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xE800 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xEA00 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xEC00 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xEE00 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xF000 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xF200 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xF400 */
+105, 105, 105, 105, 105, 105, 105, 105, /* 0xF600 */
+105, 105, 105, 105, 56, 56, 56, 56, /* 0xF800 */
+106, 28, 28, 28, 107, 108, 109, 110, /* 0xFA00 */
+ 56, 56, 56, 56, 111, 112, 113, 114, /* 0xFC00 */
+115, 116, 56, 117, 118, 119, 120, 121 /* 0xFE00 */
+};
+
+/* The Y table has 7808 entries for a total of 7808 bytes. */
+
+const uint8 js_Y[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
+ 0, 1, 1, 1, 1, 1, 0, 0, /* 0 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
+ 2, 3, 3, 3, 4, 3, 3, 3, /* 0 */
+ 5, 6, 3, 7, 3, 8, 3, 3, /* 0 */
+ 9, 9, 9, 9, 9, 9, 9, 9, /* 0 */
+ 9, 9, 3, 3, 7, 7, 7, 3, /* 0 */
+ 3, 10, 10, 10, 10, 10, 10, 10, /* 1 */
+ 10, 10, 10, 10, 10, 10, 10, 10, /* 1 */
+ 10, 10, 10, 10, 10, 10, 10, 10, /* 1 */
+ 10, 10, 10, 5, 3, 6, 11, 12, /* 1 */
+ 11, 13, 13, 13, 13, 13, 13, 13, /* 1 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 1 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 1 */
+ 13, 13, 13, 5, 7, 6, 7, 0, /* 1 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
+ 2, 3, 4, 4, 4, 4, 15, 15, /* 2 */
+ 11, 15, 16, 5, 7, 8, 15, 11, /* 2 */
+ 15, 7, 17, 17, 11, 16, 15, 3, /* 2 */
+ 11, 18, 16, 6, 19, 19, 19, 3, /* 2 */
+ 20, 20, 20, 20, 20, 20, 20, 20, /* 3 */
+ 20, 20, 20, 20, 20, 20, 20, 20, /* 3 */
+ 20, 20, 20, 20, 20, 20, 20, 7, /* 3 */
+ 20, 20, 20, 20, 20, 20, 20, 16, /* 3 */
+ 21, 21, 21, 21, 21, 21, 21, 21, /* 3 */
+ 21, 21, 21, 21, 21, 21, 21, 21, /* 3 */
+ 21, 21, 21, 21, 21, 21, 21, 7, /* 3 */
+ 21, 21, 21, 21, 21, 21, 21, 22, /* 3 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 4 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 4 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 4 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 4 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 4 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 4 */
+ 25, 26, 23, 24, 23, 24, 23, 24, /* 4 */
+ 16, 23, 24, 23, 24, 23, 24, 23, /* 4 */
+ 24, 23, 24, 23, 24, 23, 24, 23, /* 5 */
+ 24, 16, 23, 24, 23, 24, 23, 24, /* 5 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 5 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 5 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 5 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 5 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 5 */
+ 27, 23, 24, 23, 24, 23, 24, 28, /* 5 */
+ 16, 29, 23, 24, 23, 24, 30, 23, /* 6 */
+ 24, 31, 31, 23, 24, 16, 32, 32, /* 6 */
+ 33, 23, 24, 31, 34, 16, 35, 36, /* 6 */
+ 23, 24, 16, 16, 35, 37, 16, 38, /* 6 */
+ 23, 24, 23, 24, 23, 24, 38, 23, /* 6 */
+ 24, 39, 40, 16, 23, 24, 39, 23, /* 6 */
+ 24, 41, 41, 23, 24, 23, 24, 42, /* 6 */
+ 23, 24, 16, 40, 23, 24, 40, 40, /* 6 */
+ 40, 40, 40, 40, 43, 44, 45, 43, /* 7 */
+ 44, 45, 43, 44, 45, 23, 24, 23, /* 7 */
+ 24, 23, 24, 23, 24, 23, 24, 23, /* 7 */
+ 24, 23, 24, 23, 24, 16, 23, 24, /* 7 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 7 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 7 */
+ 16, 43, 44, 45, 23, 24, 46, 46, /* 7 */
+ 46, 46, 23, 24, 23, 24, 23, 24, /* 7 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 8 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 8 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 8 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 8 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 8 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 8 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 8 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 8 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 9 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 9 */
+ 16, 16, 16, 47, 48, 16, 49, 49, /* 9 */
+ 50, 50, 16, 51, 16, 16, 16, 16, /* 9 */
+ 49, 16, 16, 52, 16, 16, 16, 16, /* 9 */
+ 53, 54, 16, 16, 16, 16, 16, 54, /* 9 */
+ 16, 16, 55, 16, 16, 16, 16, 16, /* 9 */
+ 16, 16, 16, 16, 16, 16, 16, 16, /* 9 */
+ 16, 16, 16, 56, 16, 16, 16, 16, /* 10 */
+ 56, 16, 57, 57, 16, 16, 16, 16, /* 10 */
+ 16, 16, 58, 16, 16, 16, 16, 16, /* 10 */
+ 16, 16, 16, 16, 16, 16, 16, 16, /* 10 */
+ 16, 16, 16, 16, 16, 16, 16, 16, /* 10 */
+ 16, 46, 46, 46, 46, 46, 46, 46, /* 10 */
+ 59, 59, 59, 59, 59, 59, 59, 59, /* 10 */
+ 59, 11, 11, 59, 59, 59, 59, 59, /* 10 */
+ 59, 59, 11, 11, 11, 11, 11, 11, /* 11 */
+ 11, 11, 11, 11, 11, 11, 11, 11, /* 11 */
+ 59, 59, 11, 11, 11, 11, 11, 11, /* 11 */
+ 11, 11, 11, 11, 11, 11, 11, 46, /* 11 */
+ 59, 59, 59, 59, 59, 11, 11, 11, /* 11 */
+ 11, 11, 46, 46, 46, 46, 46, 46, /* 11 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 11 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 11 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 12 */
+ 60, 60, 60, 60, 60, 60, 46, 46, /* 13 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 13 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 13 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 13 */
+ 60, 60, 46, 46, 46, 46, 46, 46, /* 13 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 13 */
+ 46, 46, 46, 46, 3, 3, 46, 46, /* 13 */
+ 46, 46, 59, 46, 46, 46, 3, 46, /* 13 */
+ 46, 46, 46, 46, 11, 11, 61, 3, /* 14 */
+ 62, 62, 62, 46, 63, 46, 64, 64, /* 14 */
+ 16, 20, 20, 20, 20, 20, 20, 20, /* 14 */
+ 20, 20, 20, 20, 20, 20, 20, 20, /* 14 */
+ 20, 20, 46, 20, 20, 20, 20, 20, /* 14 */
+ 20, 20, 20, 20, 65, 66, 66, 66, /* 14 */
+ 16, 21, 21, 21, 21, 21, 21, 21, /* 14 */
+ 21, 21, 21, 21, 21, 21, 21, 21, /* 14 */
+ 21, 21, 16, 21, 21, 21, 21, 21, /* 15 */
+ 21, 21, 21, 21, 67, 68, 68, 46, /* 15 */
+ 69, 70, 38, 38, 38, 71, 72, 46, /* 15 */
+ 46, 46, 38, 46, 38, 46, 38, 46, /* 15 */
+ 38, 46, 23, 24, 23, 24, 23, 24, /* 15 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 15 */
+ 73, 74, 16, 40, 46, 46, 46, 46, /* 15 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 15 */
+ 46, 75, 75, 75, 75, 75, 75, 75, /* 16 */
+ 75, 75, 75, 75, 75, 46, 75, 75, /* 16 */
+ 20, 20, 20, 20, 20, 20, 20, 20, /* 16 */
+ 20, 20, 20, 20, 20, 20, 20, 20, /* 16 */
+ 20, 20, 20, 20, 20, 20, 20, 20, /* 16 */
+ 20, 20, 20, 20, 20, 20, 20, 20, /* 16 */
+ 21, 21, 21, 21, 21, 21, 21, 21, /* 16 */
+ 21, 21, 21, 21, 21, 21, 21, 21, /* 16 */
+ 21, 21, 21, 21, 21, 21, 21, 21, /* 17 */
+ 21, 21, 21, 21, 21, 21, 21, 21, /* 17 */
+ 46, 74, 74, 74, 74, 74, 74, 74, /* 17 */
+ 74, 74, 74, 74, 74, 46, 74, 74, /* 17 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 17 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 17 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 17 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 17 */
+ 23, 24, 15, 60, 60, 60, 60, 46, /* 18 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 18 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 18 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 18 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 18 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 18 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 18 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 18 */
+ 40, 23, 24, 23, 24, 46, 46, 23, /* 19 */
+ 24, 46, 46, 23, 24, 46, 46, 46, /* 19 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 19 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 19 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 19 */
+ 23, 24, 23, 24, 46, 46, 23, 24, /* 19 */
+ 23, 24, 23, 24, 23, 24, 46, 46, /* 19 */
+ 23, 24, 46, 46, 46, 46, 46, 46, /* 19 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 20 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 20 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 20 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 20 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 20 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 20 */
+ 46, 76, 76, 76, 76, 76, 76, 76, /* 20 */
+ 76, 76, 76, 76, 76, 76, 76, 76, /* 20 */
+ 76, 76, 76, 76, 76, 76, 76, 76, /* 21 */
+ 76, 76, 76, 76, 76, 76, 76, 76, /* 21 */
+ 76, 76, 76, 76, 76, 76, 76, 46, /* 21 */
+ 46, 59, 3, 3, 3, 3, 3, 3, /* 21 */
+ 46, 77, 77, 77, 77, 77, 77, 77, /* 21 */
+ 77, 77, 77, 77, 77, 77, 77, 77, /* 21 */
+ 77, 77, 77, 77, 77, 77, 77, 77, /* 21 */
+ 77, 77, 77, 77, 77, 77, 77, 77, /* 21 */
+ 77, 77, 77, 77, 77, 77, 77, 16, /* 22 */
+ 46, 3, 46, 46, 46, 46, 46, 46, /* 22 */
+ 46, 60, 60, 60, 60, 60, 60, 60, /* 22 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 22 */
+ 60, 60, 46, 60, 60, 60, 60, 60, /* 22 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 22 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 22 */
+ 60, 60, 46, 60, 60, 60, 3, 60, /* 22 */
+ 3, 60, 60, 3, 60, 46, 46, 46, /* 23 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 23 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 23 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 23 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 23 */
+ 40, 40, 40, 46, 46, 46, 46, 46, /* 23 */
+ 40, 40, 40, 3, 3, 46, 46, 46, /* 23 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 23 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 24 */
+ 46, 46, 46, 46, 3, 46, 46, 46, /* 24 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 24 */
+ 46, 46, 46, 3, 46, 46, 46, 3, /* 24 */
+ 46, 40, 40, 40, 40, 40, 40, 40, /* 24 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 24 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 24 */
+ 40, 40, 40, 46, 46, 46, 46, 46, /* 24 */
+ 59, 40, 40, 40, 40, 40, 40, 40, /* 25 */
+ 40, 40, 40, 60, 60, 60, 60, 60, /* 25 */
+ 60, 60, 60, 46, 46, 46, 46, 46, /* 25 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 25 */
+ 78, 78, 78, 78, 78, 78, 78, 78, /* 25 */
+ 78, 78, 3, 3, 3, 3, 46, 46, /* 25 */
+ 60, 40, 40, 40, 40, 40, 40, 40, /* 25 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 25 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 26 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 26 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 26 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 26 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 26 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 26 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 26 */
+ 46, 46, 40, 40, 40, 40, 40, 46, /* 26 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 27 */
+ 40, 40, 40, 40, 40, 40, 40, 46, /* 27 */
+ 40, 40, 40, 40, 3, 40, 60, 60, /* 27 */
+ 60, 60, 60, 60, 60, 79, 79, 60, /* 27 */
+ 60, 60, 60, 60, 60, 59, 59, 60, /* 27 */
+ 60, 15, 60, 60, 60, 60, 46, 46, /* 27 */
+ 9, 9, 9, 9, 9, 9, 9, 9, /* 27 */
+ 9, 9, 46, 46, 46, 46, 46, 46, /* 27 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 28 */
+ 46, 60, 60, 80, 46, 40, 40, 40, /* 29 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 29 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 29 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 29 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 29 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 29 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 29 */
+ 40, 40, 46, 46, 60, 40, 80, 80, /* 29 */
+ 80, 60, 60, 60, 60, 60, 60, 60, /* 30 */
+ 60, 80, 80, 80, 80, 60, 46, 46, /* 30 */
+ 15, 60, 60, 60, 60, 46, 46, 46, /* 30 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 30 */
+ 40, 40, 60, 60, 3, 3, 81, 81, /* 30 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 30 */
+ 3, 46, 46, 46, 46, 46, 46, 46, /* 30 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 30 */
+ 46, 60, 80, 80, 46, 40, 40, 40, /* 31 */
+ 40, 40, 40, 40, 40, 46, 46, 40, /* 31 */
+ 40, 46, 46, 40, 40, 40, 40, 40, /* 31 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 31 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 31 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 31 */
+ 40, 46, 40, 46, 46, 46, 40, 40, /* 31 */
+ 40, 40, 46, 46, 60, 46, 80, 80, /* 31 */
+ 80, 60, 60, 60, 60, 46, 46, 80, /* 32 */
+ 80, 46, 46, 80, 80, 60, 46, 46, /* 32 */
+ 46, 46, 46, 46, 46, 46, 46, 80, /* 32 */
+ 46, 46, 46, 46, 40, 40, 46, 40, /* 32 */
+ 40, 40, 60, 60, 46, 46, 81, 81, /* 32 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 32 */
+ 40, 40, 4, 4, 82, 82, 82, 82, /* 32 */
+ 19, 83, 15, 46, 46, 46, 46, 46, /* 32 */
+ 46, 46, 60, 46, 46, 40, 40, 40, /* 33 */
+ 40, 40, 40, 46, 46, 46, 46, 40, /* 33 */
+ 40, 46, 46, 40, 40, 40, 40, 40, /* 33 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 33 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 33 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 33 */
+ 40, 46, 40, 40, 46, 40, 40, 46, /* 33 */
+ 40, 40, 46, 46, 60, 46, 80, 80, /* 33 */
+ 80, 60, 60, 46, 46, 46, 46, 60, /* 34 */
+ 60, 46, 46, 60, 60, 60, 46, 46, /* 34 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 34 */
+ 46, 40, 40, 40, 40, 46, 40, 46, /* 34 */
+ 46, 46, 46, 46, 46, 46, 81, 81, /* 34 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 34 */
+ 60, 60, 40, 40, 40, 46, 46, 46, /* 34 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 34 */
+ 46, 60, 60, 80, 46, 40, 40, 40, /* 35 */
+ 40, 40, 40, 40, 46, 40, 46, 40, /* 35 */
+ 40, 40, 46, 40, 40, 40, 40, 40, /* 35 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 35 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 35 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 35 */
+ 40, 46, 40, 40, 46, 40, 40, 40, /* 35 */
+ 40, 40, 46, 46, 60, 40, 80, 80, /* 35 */
+ 80, 60, 60, 60, 60, 60, 46, 60, /* 36 */
+ 60, 80, 46, 80, 80, 60, 46, 46, /* 36 */
+ 15, 46, 46, 46, 46, 46, 46, 46, /* 36 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 36 */
+ 40, 46, 46, 46, 46, 46, 81, 81, /* 36 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 36 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 36 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 36 */
+ 46, 60, 80, 80, 46, 40, 40, 40, /* 37 */
+ 40, 40, 40, 40, 40, 46, 46, 40, /* 37 */
+ 40, 46, 46, 40, 40, 40, 40, 40, /* 37 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 37 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 37 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 37 */
+ 40, 46, 40, 40, 46, 46, 40, 40, /* 37 */
+ 40, 40, 46, 46, 60, 40, 80, 60, /* 37 */
+ 80, 60, 60, 60, 46, 46, 46, 80, /* 38 */
+ 80, 46, 46, 80, 80, 60, 46, 46, /* 38 */
+ 46, 46, 46, 46, 46, 46, 60, 80, /* 38 */
+ 46, 46, 46, 46, 40, 40, 46, 40, /* 38 */
+ 40, 40, 46, 46, 46, 46, 81, 81, /* 38 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 38 */
+ 15, 46, 46, 46, 46, 46, 46, 46, /* 38 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 38 */
+ 46, 46, 60, 80, 46, 40, 40, 40, /* 39 */
+ 40, 40, 40, 46, 46, 46, 40, 40, /* 39 */
+ 40, 46, 40, 40, 40, 40, 46, 46, /* 39 */
+ 46, 40, 40, 46, 40, 46, 40, 40, /* 39 */
+ 46, 46, 46, 40, 40, 46, 46, 46, /* 39 */
+ 40, 40, 40, 46, 46, 46, 40, 40, /* 39 */
+ 40, 40, 40, 40, 40, 40, 46, 40, /* 39 */
+ 40, 40, 46, 46, 46, 46, 80, 80, /* 39 */
+ 60, 80, 80, 46, 46, 46, 80, 80, /* 40 */
+ 80, 46, 80, 80, 80, 60, 46, 46, /* 40 */
+ 46, 46, 46, 46, 46, 46, 46, 80, /* 40 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 40 */
+ 46, 46, 46, 46, 46, 46, 46, 81, /* 40 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 40 */
+ 84, 19, 19, 46, 46, 46, 46, 46, /* 40 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 40 */
+ 46, 80, 80, 80, 46, 40, 40, 40, /* 41 */
+ 40, 40, 40, 40, 40, 46, 40, 40, /* 41 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 41 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 41 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 41 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 41 */
+ 40, 40, 40, 40, 46, 40, 40, 40, /* 41 */
+ 40, 40, 46, 46, 46, 46, 60, 60, /* 41 */
+ 60, 80, 80, 80, 80, 46, 60, 60, /* 42 */
+ 60, 46, 60, 60, 60, 60, 46, 46, /* 42 */
+ 46, 46, 46, 46, 46, 60, 60, 46, /* 42 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 42 */
+ 40, 40, 46, 46, 46, 46, 81, 81, /* 42 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 42 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 42 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 42 */
+ 46, 46, 80, 80, 46, 40, 40, 40, /* 43 */
+ 40, 40, 40, 40, 40, 46, 40, 40, /* 43 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 43 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 43 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 43 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 43 */
+ 40, 40, 40, 40, 46, 40, 40, 40, /* 43 */
+ 40, 40, 46, 46, 46, 46, 80, 60, /* 43 */
+ 80, 80, 80, 80, 80, 46, 60, 80, /* 44 */
+ 80, 46, 80, 80, 60, 60, 46, 46, /* 44 */
+ 46, 46, 46, 46, 46, 80, 80, 46, /* 44 */
+ 46, 46, 46, 46, 46, 46, 40, 46, /* 44 */
+ 40, 40, 46, 46, 46, 46, 81, 81, /* 44 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 44 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 44 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 44 */
+ 46, 46, 80, 80, 46, 40, 40, 40, /* 45 */
+ 40, 40, 40, 40, 40, 46, 40, 40, /* 45 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 45 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 45 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 45 */
+ 40, 46, 40, 40, 40, 40, 40, 40, /* 45 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 45 */
+ 40, 40, 46, 46, 46, 46, 80, 80, /* 45 */
+ 80, 60, 60, 60, 46, 46, 80, 80, /* 46 */
+ 80, 46, 80, 80, 80, 60, 46, 46, /* 46 */
+ 46, 46, 46, 46, 46, 46, 46, 80, /* 46 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 46 */
+ 40, 40, 46, 46, 46, 46, 81, 81, /* 46 */
+ 81, 81, 81, 81, 81, 81, 81, 81, /* 46 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 46 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 46 */
+ 46, 40, 40, 40, 40, 40, 40, 40, /* 47 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 47 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 47 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 47 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 47 */
+ 40, 40, 40, 40, 40, 40, 40, 3, /* 47 */
+ 40, 60, 40, 40, 60, 60, 60, 60, /* 47 */
+ 60, 60, 60, 46, 46, 46, 46, 4, /* 47 */
+ 40, 40, 40, 40, 40, 40, 59, 60, /* 48 */
+ 60, 60, 60, 60, 60, 60, 60, 15, /* 48 */
+ 9, 9, 9, 9, 9, 9, 9, 9, /* 48 */
+ 9, 9, 3, 3, 46, 46, 46, 46, /* 48 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 48 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 48 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 48 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 48 */
+ 46, 40, 40, 46, 40, 46, 46, 40, /* 49 */
+ 40, 46, 40, 46, 46, 40, 46, 46, /* 49 */
+ 46, 46, 46, 46, 40, 40, 40, 40, /* 49 */
+ 46, 40, 40, 40, 40, 40, 40, 40, /* 49 */
+ 46, 40, 40, 40, 46, 40, 46, 40, /* 49 */
+ 46, 46, 40, 40, 46, 40, 40, 3, /* 49 */
+ 40, 60, 40, 40, 60, 60, 60, 60, /* 49 */
+ 60, 60, 46, 60, 60, 40, 46, 46, /* 49 */
+ 40, 40, 40, 40, 40, 46, 59, 46, /* 50 */
+ 60, 60, 60, 60, 60, 60, 46, 46, /* 50 */
+ 9, 9, 9, 9, 9, 9, 9, 9, /* 50 */
+ 9, 9, 46, 46, 40, 40, 46, 46, /* 50 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 50 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 50 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 50 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 50 */
+ 15, 15, 15, 15, 3, 3, 3, 3, /* 51 */
+ 3, 3, 3, 3, 3, 3, 3, 3, /* 51 */
+ 3, 3, 3, 15, 15, 15, 15, 15, /* 51 */
+ 60, 60, 15, 15, 15, 15, 15, 15, /* 51 */
+ 78, 78, 78, 78, 78, 78, 78, 78, /* 51 */
+ 78, 78, 85, 85, 85, 85, 85, 85, /* 51 */
+ 85, 85, 85, 85, 15, 60, 15, 60, /* 51 */
+ 15, 60, 5, 6, 5, 6, 80, 80, /* 51 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 52 */
+ 46, 40, 40, 40, 40, 40, 40, 40, /* 52 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 52 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 52 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 52 */
+ 40, 40, 46, 46, 46, 46, 46, 46, /* 52 */
+ 46, 60, 60, 60, 60, 60, 60, 60, /* 52 */
+ 60, 60, 60, 60, 60, 60, 60, 80, /* 52 */
+ 60, 60, 60, 60, 60, 3, 60, 60, /* 53 */
+ 60, 60, 60, 60, 46, 46, 46, 46, /* 53 */
+ 60, 60, 60, 60, 60, 60, 46, 60, /* 53 */
+ 46, 60, 60, 60, 60, 60, 60, 60, /* 53 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 53 */
+ 60, 60, 60, 60, 60, 60, 46, 46, /* 53 */
+ 46, 60, 60, 60, 60, 60, 60, 60, /* 53 */
+ 46, 60, 46, 46, 46, 46, 46, 46, /* 53 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 54 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 54 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 54 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 54 */
+ 76, 76, 76, 76, 76, 76, 76, 76, /* 54 */
+ 76, 76, 76, 76, 76, 76, 76, 76, /* 54 */
+ 76, 76, 76, 76, 76, 76, 76, 76, /* 54 */
+ 76, 76, 76, 76, 76, 76, 76, 76, /* 54 */
+ 76, 76, 76, 76, 76, 76, 46, 46, /* 55 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 55 */
+ 16, 16, 16, 16, 16, 16, 16, 16, /* 55 */
+ 16, 16, 16, 16, 16, 16, 16, 16, /* 55 */
+ 16, 16, 16, 16, 16, 16, 16, 16, /* 55 */
+ 16, 16, 16, 16, 16, 16, 16, 16, /* 55 */
+ 16, 16, 16, 16, 16, 16, 16, 46, /* 55 */
+ 46, 46, 46, 3, 46, 46, 46, 46, /* 55 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 56 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 57 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 57 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 57 */
+ 40, 40, 46, 46, 46, 46, 46, 40, /* 57 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 57 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 57 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 57 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 57 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 58 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 58 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 58 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 58 */
+ 40, 40, 40, 46, 46, 46, 46, 46, /* 58 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 58 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 58 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 58 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 59 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 59 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 59 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 59 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 59 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 59 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 59 */
+ 40, 40, 46, 46, 46, 46, 46, 46, /* 59 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 60 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 61 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 61 */
+ 23, 24, 23, 24, 23, 24, 16, 16, /* 61 */
+ 16, 16, 16, 16, 46, 46, 46, 46, /* 61 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 61 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 61 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 61 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 61 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 62 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 62 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 62 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 62 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 62 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 62 */
+ 23, 24, 23, 24, 23, 24, 23, 24, /* 62 */
+ 23, 24, 46, 46, 46, 46, 46, 46, /* 62 */
+ 86, 86, 86, 86, 86, 86, 86, 86, /* 63 */
+ 87, 87, 87, 87, 87, 87, 87, 87, /* 63 */
+ 86, 86, 86, 86, 86, 86, 46, 46, /* 63 */
+ 87, 87, 87, 87, 87, 87, 46, 46, /* 63 */
+ 86, 86, 86, 86, 86, 86, 86, 86, /* 63 */
+ 87, 87, 87, 87, 87, 87, 87, 87, /* 63 */
+ 86, 86, 86, 86, 86, 86, 86, 86, /* 63 */
+ 87, 87, 87, 87, 87, 87, 87, 87, /* 63 */
+ 86, 86, 86, 86, 86, 86, 46, 46, /* 64 */
+ 87, 87, 87, 87, 87, 87, 46, 46, /* 64 */
+ 16, 86, 16, 86, 16, 86, 16, 86, /* 64 */
+ 46, 87, 46, 87, 46, 87, 46, 87, /* 64 */
+ 86, 86, 86, 86, 86, 86, 86, 86, /* 64 */
+ 87, 87, 87, 87, 87, 87, 87, 87, /* 64 */
+ 88, 88, 89, 89, 89, 89, 90, 90, /* 64 */
+ 91, 91, 92, 92, 93, 93, 46, 46, /* 64 */
+ 86, 86, 86, 86, 86, 86, 86, 86, /* 65 */
+ 87, 87, 87, 87, 87, 87, 87, 87, /* 65 */
+ 86, 86, 86, 86, 86, 86, 86, 86, /* 65 */
+ 87, 87, 87, 87, 87, 87, 87, 87, /* 65 */
+ 86, 86, 86, 86, 86, 86, 86, 86, /* 65 */
+ 87, 87, 87, 87, 87, 87, 87, 87, /* 65 */
+ 86, 86, 16, 94, 16, 46, 16, 16, /* 65 */
+ 87, 87, 95, 95, 96, 11, 38, 11, /* 65 */
+ 11, 11, 16, 94, 16, 46, 16, 16, /* 66 */
+ 97, 97, 97, 97, 96, 11, 11, 11, /* 66 */
+ 86, 86, 16, 16, 46, 46, 16, 16, /* 66 */
+ 87, 87, 98, 98, 46, 11, 11, 11, /* 66 */
+ 86, 86, 16, 16, 16, 99, 16, 16, /* 66 */
+ 87, 87, 100, 100, 101, 11, 11, 11, /* 66 */
+ 46, 46, 16, 94, 16, 46, 16, 16, /* 66 */
+102, 102, 103, 103, 96, 11, 11, 46, /* 66 */
+ 2, 2, 2, 2, 2, 2, 2, 2, /* 67 */
+ 2, 2, 2, 2, 104, 104, 104, 104, /* 67 */
+ 8, 8, 8, 8, 8, 8, 3, 3, /* 67 */
+ 5, 6, 5, 5, 5, 6, 5, 5, /* 67 */
+ 3, 3, 3, 3, 3, 3, 3, 3, /* 67 */
+105, 106, 104, 104, 104, 104, 104, 46, /* 67 */
+ 3, 3, 3, 3, 3, 3, 3, 3, /* 67 */
+ 3, 5, 6, 3, 3, 3, 3, 12, /* 67 */
+ 12, 3, 3, 3, 7, 5, 6, 46, /* 68 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 68 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 68 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 68 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 68 */
+ 46, 46, 104, 104, 104, 104, 104, 104, /* 68 */
+ 17, 46, 46, 46, 17, 17, 17, 17, /* 68 */
+ 17, 17, 7, 7, 7, 5, 6, 16, /* 68 */
+107, 107, 107, 107, 107, 107, 107, 107, /* 69 */
+107, 107, 7, 7, 7, 5, 6, 46, /* 69 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 69 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 69 */
+ 4, 4, 4, 4, 4, 4, 4, 4, /* 69 */
+ 4, 4, 4, 4, 46, 46, 46, 46, /* 69 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 69 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 69 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 70 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 70 */
+ 60, 60, 60, 60, 60, 60, 60, 60, /* 70 */
+ 60, 60, 60, 60, 60, 79, 79, 79, /* 70 */
+ 79, 60, 46, 46, 46, 46, 46, 46, /* 70 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 70 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 70 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 70 */
+ 15, 15, 38, 15, 15, 15, 15, 38, /* 71 */
+ 15, 15, 16, 38, 38, 38, 16, 16, /* 71 */
+ 38, 38, 38, 16, 15, 38, 15, 15, /* 71 */
+ 38, 38, 38, 38, 38, 38, 15, 15, /* 71 */
+ 15, 15, 15, 15, 38, 15, 38, 15, /* 71 */
+ 38, 15, 38, 38, 38, 38, 16, 16, /* 71 */
+ 38, 38, 15, 38, 16, 40, 40, 40, /* 71 */
+ 40, 46, 46, 46, 46, 46, 46, 46, /* 71 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 72 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 72 */
+ 46, 46, 46, 19, 19, 19, 19, 19, /* 72 */
+ 19, 19, 19, 19, 19, 19, 19, 108, /* 72 */
+109, 109, 109, 109, 109, 109, 109, 109, /* 72 */
+109, 109, 109, 109, 110, 110, 110, 110, /* 72 */
+111, 111, 111, 111, 111, 111, 111, 111, /* 72 */
+111, 111, 111, 111, 112, 112, 112, 112, /* 72 */
+113, 113, 113, 46, 46, 46, 46, 46, /* 73 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 73 */
+ 7, 7, 7, 7, 7, 15, 15, 15, /* 73 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 73 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 73 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 73 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 73 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 73 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 74 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 74 */
+ 15, 15, 7, 15, 7, 15, 15, 15, /* 74 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 74 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 74 */
+ 15, 15, 15, 46, 46, 46, 46, 46, /* 74 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 74 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 74 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 75 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 76 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 76 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 76 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 76 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 76 */
+ 7, 7, 7, 7, 7, 7, 7, 7, /* 76 */
+ 7, 7, 46, 46, 46, 46, 46, 46, /* 76 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 76 */
+ 15, 46, 15, 15, 15, 15, 15, 15, /* 77 */
+ 7, 7, 7, 7, 15, 15, 15, 15, /* 77 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 77 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 77 */
+ 7, 7, 15, 15, 15, 15, 15, 15, /* 77 */
+ 15, 5, 6, 15, 15, 15, 15, 15, /* 77 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 77 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 77 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 78 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 78 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 78 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 78 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 78 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 78 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 78 */
+ 15, 15, 15, 46, 46, 46, 46, 46, /* 78 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 79 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 79 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 79 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 79 */
+ 15, 15, 15, 15, 15, 46, 46, 46, /* 79 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 79 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 79 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 79 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 80 */
+ 15, 15, 15, 46, 46, 46, 46, 46, /* 80 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 80 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 80 */
+114, 114, 114, 114, 114, 114, 114, 114, /* 80 */
+114, 114, 114, 114, 114, 114, 114, 114, /* 80 */
+114, 114, 114, 114, 82, 82, 82, 82, /* 80 */
+ 82, 82, 82, 82, 82, 82, 82, 82, /* 80 */
+ 82, 82, 82, 82, 82, 82, 82, 82, /* 81 */
+115, 115, 115, 115, 115, 115, 115, 115, /* 81 */
+115, 115, 115, 115, 115, 115, 115, 115, /* 81 */
+115, 115, 115, 115, 15, 15, 15, 15, /* 81 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 81 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 81 */
+ 15, 15, 15, 15, 15, 15, 116, 116, /* 81 */
+116, 116, 116, 116, 116, 116, 116, 116, /* 81 */
+116, 116, 116, 116, 116, 116, 116, 116, /* 82 */
+116, 116, 116, 116, 116, 116, 116, 116, /* 82 */
+117, 117, 117, 117, 117, 117, 117, 117, /* 82 */
+117, 117, 117, 117, 117, 117, 117, 117, /* 82 */
+117, 117, 117, 117, 117, 117, 117, 117, /* 82 */
+117, 117, 118, 46, 46, 46, 46, 46, /* 82 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 82 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 82 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 83 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 84 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 84 */
+ 15, 15, 15, 15, 15, 15, 46, 46, /* 84 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 84 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 84 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 84 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 84 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 84 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 85 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 85 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 85 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 85 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 85 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 85 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 85 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 85 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 86 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 86 */
+ 15, 15, 15, 15, 46, 46, 46, 46, /* 86 */
+ 46, 46, 15, 15, 15, 15, 15, 15, /* 86 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 86 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 86 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 86 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 86 */
+ 46, 15, 15, 15, 15, 46, 15, 15, /* 87 */
+ 15, 15, 46, 46, 15, 15, 15, 15, /* 87 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 87 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 87 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 87 */
+ 46, 15, 15, 15, 15, 15, 15, 15, /* 87 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 87 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 87 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 88 */
+ 15, 15, 15, 15, 46, 15, 46, 15, /* 88 */
+ 15, 15, 15, 46, 46, 46, 15, 46, /* 88 */
+ 15, 15, 15, 15, 15, 15, 15, 46, /* 88 */
+ 46, 15, 15, 15, 15, 15, 15, 15, /* 88 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 88 */
+ 46, 46, 46, 46, 46, 46, 119, 119, /* 88 */
+119, 119, 119, 119, 119, 119, 119, 119, /* 88 */
+114, 114, 114, 114, 114, 114, 114, 114, /* 89 */
+114, 114, 83, 83, 83, 83, 83, 83, /* 89 */
+ 83, 83, 83, 83, 15, 46, 46, 46, /* 89 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 89 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 89 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 89 */
+ 46, 15, 15, 15, 15, 15, 15, 15, /* 89 */
+ 15, 15, 15, 15, 15, 15, 15, 46, /* 89 */
+ 2, 3, 3, 3, 15, 59, 3, 120, /* 90 */
+ 5, 6, 5, 6, 5, 6, 5, 6, /* 90 */
+ 5, 6, 15, 15, 5, 6, 5, 6, /* 90 */
+ 5, 6, 5, 6, 8, 5, 6, 5, /* 90 */
+ 15, 121, 121, 121, 121, 121, 121, 121, /* 90 */
+121, 121, 60, 60, 60, 60, 60, 60, /* 90 */
+ 8, 59, 59, 59, 59, 59, 15, 15, /* 90 */
+ 46, 46, 46, 46, 46, 46, 46, 15, /* 90 */
+ 46, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 91 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 92 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 92 */
+ 40, 40, 40, 40, 40, 46, 46, 46, /* 92 */
+ 46, 60, 60, 59, 59, 59, 59, 46, /* 92 */
+ 46, 40, 40, 40, 40, 40, 40, 40, /* 92 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 92 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 92 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 92 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 93 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 93 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 93 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 93 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 93 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 93 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 93 */
+ 40, 40, 40, 3, 59, 59, 59, 46, /* 93 */
+ 46, 46, 46, 46, 46, 40, 40, 40, /* 94 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 94 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 94 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 94 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 94 */
+ 40, 40, 40, 40, 40, 46, 46, 46, /* 94 */
+ 46, 40, 40, 40, 40, 40, 40, 40, /* 94 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 94 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 95 */
+ 40, 40, 40, 40, 40, 40, 40, 46, /* 95 */
+ 15, 15, 85, 85, 85, 85, 15, 15, /* 95 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 95 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 95 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 95 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 95 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 95 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 96 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 96 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 96 */
+ 15, 15, 15, 15, 15, 46, 46, 46, /* 96 */
+ 85, 85, 85, 85, 85, 85, 85, 85, /* 96 */
+ 85, 85, 15, 15, 15, 15, 15, 15, /* 96 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 96 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 96 */
+ 15, 15, 15, 15, 46, 46, 46, 46, /* 97 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 97 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 97 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 97 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 97 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 97 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 97 */
+ 15, 15, 15, 15, 46, 46, 46, 15, /* 97 */
+114, 114, 114, 114, 114, 114, 114, 114, /* 98 */
+114, 114, 15, 15, 15, 15, 15, 15, /* 98 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 98 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 98 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 98 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 98 */
+ 15, 46, 46, 46, 46, 46, 46, 46, /* 98 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 98 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 99 */
+ 15, 15, 15, 15, 46, 46, 46, 46, /* 99 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 99 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 99 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 99 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 99 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 99 */
+ 15, 15, 15, 15, 15, 15, 15, 46, /* 99 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 100 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 100 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 100 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 100 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 100 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 100 */
+ 15, 15, 15, 15, 15, 15, 15, 46, /* 100 */
+ 46, 46, 46, 15, 15, 15, 15, 15, /* 100 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 101 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 101 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 101 */
+ 15, 15, 15, 15, 15, 15, 46, 46, /* 101 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 101 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 101 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 101 */
+ 15, 15, 15, 15, 15, 15, 15, 46, /* 101 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 102 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 102 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 102 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 102 */
+ 40, 40, 40, 40, 40, 40, 46, 46, /* 102 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 102 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 102 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 102 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 103 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 103 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 103 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 103 */
+ 40, 40, 40, 40, 46, 46, 46, 46, /* 103 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 103 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 103 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 103 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+122, 122, 122, 122, 122, 122, 122, 122, /* 104 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+123, 123, 123, 123, 123, 123, 123, 123, /* 105 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 106 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 106 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 106 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 106 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 106 */
+ 40, 40, 40, 40, 40, 40, 46, 46, /* 106 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 106 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 106 */
+ 16, 16, 16, 16, 16, 16, 16, 46, /* 107 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 107 */
+ 46, 46, 46, 16, 16, 16, 16, 16, /* 107 */
+ 46, 46, 46, 46, 46, 46, 60, 40, /* 107 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 107 */
+ 40, 7, 40, 40, 40, 40, 40, 40, /* 107 */
+ 40, 40, 40, 40, 40, 40, 40, 46, /* 107 */
+ 40, 40, 40, 40, 40, 46, 40, 46, /* 107 */
+ 40, 40, 46, 40, 40, 46, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 108 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 109 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 109 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 109 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 109 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 109 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 109 */
+ 40, 40, 46, 46, 46, 46, 46, 46, /* 109 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 109 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 110 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 110 */
+ 46, 46, 46, 40, 40, 40, 40, 40, /* 110 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 110 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 110 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 110 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 110 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 110 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 111 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 111 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 111 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 111 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 111 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 111 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 111 */
+ 40, 40, 40, 40, 40, 40, 5, 6, /* 111 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 112 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 112 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 112 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 112 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 112 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 112 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 112 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 112 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 113 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 113 */
+ 46, 46, 40, 40, 40, 40, 40, 40, /* 113 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 113 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 113 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 113 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 113 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 113 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 114 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 114 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 114 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 114 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 114 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 114 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 114 */
+ 40, 40, 40, 40, 46, 46, 46, 46, /* 114 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 115 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 115 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 115 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 115 */
+ 60, 60, 60, 60, 46, 46, 46, 46, /* 115 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 115 */
+ 3, 8, 8, 12, 12, 5, 6, 5, /* 115 */
+ 6, 5, 6, 5, 6, 5, 6, 5, /* 115 */
+ 6, 5, 6, 5, 6, 46, 46, 46, /* 116 */
+ 46, 3, 3, 3, 3, 12, 12, 12, /* 116 */
+ 3, 3, 3, 46, 3, 3, 3, 3, /* 116 */
+ 8, 5, 6, 5, 6, 5, 6, 3, /* 116 */
+ 3, 3, 7, 8, 7, 7, 7, 46, /* 116 */
+ 3, 4, 3, 3, 46, 46, 46, 46, /* 116 */
+ 40, 40, 40, 46, 40, 46, 40, 40, /* 116 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 116 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 117 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 117 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 117 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 117 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 117 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 117 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 117 */
+ 40, 40, 40, 40, 40, 46, 46, 104, /* 117 */
+ 46, 3, 3, 3, 4, 3, 3, 3, /* 118 */
+ 5, 6, 3, 7, 3, 8, 3, 3, /* 118 */
+ 9, 9, 9, 9, 9, 9, 9, 9, /* 118 */
+ 9, 9, 3, 3, 7, 7, 7, 3, /* 118 */
+ 3, 10, 10, 10, 10, 10, 10, 10, /* 118 */
+ 10, 10, 10, 10, 10, 10, 10, 10, /* 118 */
+ 10, 10, 10, 10, 10, 10, 10, 10, /* 118 */
+ 10, 10, 10, 5, 3, 6, 11, 12, /* 118 */
+ 11, 13, 13, 13, 13, 13, 13, 13, /* 119 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 119 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 119 */
+ 13, 13, 13, 5, 7, 6, 7, 46, /* 119 */
+ 46, 3, 5, 6, 3, 3, 40, 40, /* 119 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 119 */
+ 59, 40, 40, 40, 40, 40, 40, 40, /* 119 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 119 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 120 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 120 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 120 */
+ 40, 40, 40, 40, 40, 40, 59, 59, /* 120 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 120 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 120 */
+ 40, 40, 40, 40, 40, 40, 40, 40, /* 120 */
+ 40, 40, 40, 40, 40, 40, 40, 46, /* 120 */
+ 46, 46, 40, 40, 40, 40, 40, 40, /* 121 */
+ 46, 46, 40, 40, 40, 40, 40, 40, /* 121 */
+ 46, 46, 40, 40, 40, 40, 40, 40, /* 121 */
+ 46, 46, 40, 40, 40, 46, 46, 46, /* 121 */
+ 4, 4, 7, 11, 15, 4, 4, 46, /* 121 */
+ 7, 7, 7, 7, 7, 15, 15, 46, /* 121 */
+ 46, 46, 46, 46, 46, 46, 46, 46, /* 121 */
+ 46, 46, 46, 46, 46, 15, 46, 46 /* 121 */
+};
+
+/* The A table has 124 entries for a total of 496 bytes. */
+
+const uint32 js_A[] = {
+0x0001000F, /* 0 Cc, ignorable */
+0x0004000F, /* 1 Cc, whitespace */
+0x0004000C, /* 2 Zs, whitespace */
+0x00000018, /* 3 Po */
+0x0006001A, /* 4 Sc, currency */
+0x00000015, /* 5 Ps */
+0x00000016, /* 6 Pe */
+0x00000019, /* 7 Sm */
+0x00000014, /* 8 Pd */
+0x00036089, /* 9 Nd, identifier part, decimal 16 */
+0x0827FF81, /* 10 Lu, hasLower (add 32), identifier start, supradecimal 31 */
+0x0000001B, /* 11 Sk */
+0x00050017, /* 12 Pc, underscore */
+0x0817FF82, /* 13 Ll, hasUpper (subtract 32), identifier start, supradecimal 31 */
+0x0000000C, /* 14 Zs */
+0x0000001C, /* 15 So */
+0x00070182, /* 16 Ll, identifier start */
+0x0000600B, /* 17 No, decimal 16 */
+0x0000500B, /* 18 No, decimal 8 */
+0x0000800B, /* 19 No, strange */
+0x08270181, /* 20 Lu, hasLower (add 32), identifier start */
+0x08170182, /* 21 Ll, hasUpper (subtract 32), identifier start */
+0xE1D70182, /* 22 Ll, hasUpper (subtract -121), identifier start */
+0x00670181, /* 23 Lu, hasLower (add 1), identifier start */
+0x00570182, /* 24 Ll, hasUpper (subtract 1), identifier start */
+0xCE670181, /* 25 Lu, hasLower (add -199), identifier start */
+0x3A170182, /* 26 Ll, hasUpper (subtract 232), identifier start */
+0xE1E70181, /* 27 Lu, hasLower (add -121), identifier start */
+0x4B170182, /* 28 Ll, hasUpper (subtract 300), identifier start */
+0x34A70181, /* 29 Lu, hasLower (add 210), identifier start */
+0x33A70181, /* 30 Lu, hasLower (add 206), identifier start */
+0x33670181, /* 31 Lu, hasLower (add 205), identifier start */
+0x32A70181, /* 32 Lu, hasLower (add 202), identifier start */
+0x32E70181, /* 33 Lu, hasLower (add 203), identifier start */
+0x33E70181, /* 34 Lu, hasLower (add 207), identifier start */
+0x34E70181, /* 35 Lu, hasLower (add 211), identifier start */
+0x34670181, /* 36 Lu, hasLower (add 209), identifier start */
+0x35670181, /* 37 Lu, hasLower (add 213), identifier start */
+0x00070181, /* 38 Lu, identifier start */
+0x36A70181, /* 39 Lu, hasLower (add 218), identifier start */
+0x00070185, /* 40 Lo, identifier start */
+0x36670181, /* 41 Lu, hasLower (add 217), identifier start */
+0x36E70181, /* 42 Lu, hasLower (add 219), identifier start */
+0x00AF0181, /* 43 Lu, hasLower (add 2), hasTitle, identifier start */
+0x007F0183, /* 44 Lt, hasUpper (subtract 1), hasLower (add 1), hasTitle, identifier start */
+0x009F0182, /* 45 Ll, hasUpper (subtract 2), hasTitle, identifier start */
+0x00000000, /* 46 unassigned */
+0x34970182, /* 47 Ll, hasUpper (subtract 210), identifier start */
+0x33970182, /* 48 Ll, hasUpper (subtract 206), identifier start */
+0x33570182, /* 49 Ll, hasUpper (subtract 205), identifier start */
+0x32970182, /* 50 Ll, hasUpper (subtract 202), identifier start */
+0x32D70182, /* 51 Ll, hasUpper (subtract 203), identifier start */
+0x33D70182, /* 52 Ll, hasUpper (subtract 207), identifier start */
+0x34570182, /* 53 Ll, hasUpper (subtract 209), identifier start */
+0x34D70182, /* 54 Ll, hasUpper (subtract 211), identifier start */
+0x35570182, /* 55 Ll, hasUpper (subtract 213), identifier start */
+0x36970182, /* 56 Ll, hasUpper (subtract 218), identifier start */
+0x36570182, /* 57 Ll, hasUpper (subtract 217), identifier start */
+0x36D70182, /* 58 Ll, hasUpper (subtract 219), identifier start */
+0x00070084, /* 59 Lm, identifier start */
+0x00030086, /* 60 Mn, identifier part */
+0x09A70181, /* 61 Lu, hasLower (add 38), identifier start */
+0x09670181, /* 62 Lu, hasLower (add 37), identifier start */
+0x10270181, /* 63 Lu, hasLower (add 64), identifier start */
+0x0FE70181, /* 64 Lu, hasLower (add 63), identifier start */
+0x09970182, /* 65 Ll, hasUpper (subtract 38), identifier start */
+0x09570182, /* 66 Ll, hasUpper (subtract 37), identifier start */
+0x10170182, /* 67 Ll, hasUpper (subtract 64), identifier start */
+0x0FD70182, /* 68 Ll, hasUpper (subtract 63), identifier start */
+0x0F970182, /* 69 Ll, hasUpper (subtract 62), identifier start */
+0x0E570182, /* 70 Ll, hasUpper (subtract 57), identifier start */
+0x0BD70182, /* 71 Ll, hasUpper (subtract 47), identifier start */
+0x0D970182, /* 72 Ll, hasUpper (subtract 54), identifier start */
+0x15970182, /* 73 Ll, hasUpper (subtract 86), identifier start */
+0x14170182, /* 74 Ll, hasUpper (subtract 80), identifier start */
+0x14270181, /* 75 Lu, hasLower (add 80), identifier start */
+0x0C270181, /* 76 Lu, hasLower (add 48), identifier start */
+0x0C170182, /* 77 Ll, hasUpper (subtract 48), identifier start */
+0x00034089, /* 78 Nd, identifier part, decimal 0 */
+0x00000087, /* 79 Me */
+0x00030088, /* 80 Mc, identifier part */
+0x00037489, /* 81 Nd, identifier part, decimal 26 */
+0x00005A0B, /* 82 No, decimal 13 */
+0x00006E0B, /* 83 No, decimal 23 */
+0x0000740B, /* 84 No, decimal 26 */
+0x0000000B, /* 85 No */
+0xFE170182, /* 86 Ll, hasUpper (subtract -8), identifier start */
+0xFE270181, /* 87 Lu, hasLower (add -8), identifier start */
+0xED970182, /* 88 Ll, hasUpper (subtract -74), identifier start */
+0xEA970182, /* 89 Ll, hasUpper (subtract -86), identifier start */
+0xE7170182, /* 90 Ll, hasUpper (subtract -100), identifier start */
+0xE0170182, /* 91 Ll, hasUpper (subtract -128), identifier start */
+0xE4170182, /* 92 Ll, hasUpper (subtract -112), identifier start */
+0xE0970182, /* 93 Ll, hasUpper (subtract -126), identifier start */
+0xFDD70182, /* 94 Ll, hasUpper (subtract -9), identifier start */
+0xEDA70181, /* 95 Lu, hasLower (add -74), identifier start */
+0xFDE70181, /* 96 Lu, hasLower (add -9), identifier start */
+0xEAA70181, /* 97 Lu, hasLower (add -86), identifier start */
+0xE7270181, /* 98 Lu, hasLower (add -100), identifier start */
+0xFE570182, /* 99 Ll, hasUpper (subtract -7), identifier start */
+0xE4270181, /* 100 Lu, hasLower (add -112), identifier start */
+0xFE670181, /* 101 Lu, hasLower (add -7), identifier start */
+0xE0270181, /* 102 Lu, hasLower (add -128), identifier start */
+0xE0A70181, /* 103 Lu, hasLower (add -126), identifier start */
+0x00010010, /* 104 Cf, ignorable */
+0x0004000D, /* 105 Zl, whitespace */
+0x0004000E, /* 106 Zp, whitespace */
+0x0000400B, /* 107 No, decimal 0 */
+0x0000440B, /* 108 No, decimal 2 */
+0x0427438A, /* 109 Nl, hasLower (add 16), identifier start, decimal 1 */
+0x0427818A, /* 110 Nl, hasLower (add 16), identifier start, strange */
+0x0417638A, /* 111 Nl, hasUpper (subtract 16), identifier start, decimal 17 */
+0x0417818A, /* 112 Nl, hasUpper (subtract 16), identifier start, strange */
+0x0007818A, /* 113 Nl, identifier start, strange */
+0x0000420B, /* 114 No, decimal 1 */
+0x0000720B, /* 115 No, decimal 25 */
+0x06A0001C, /* 116 So, hasLower (add 26) */
+0x0690001C, /* 117 So, hasUpper (subtract 26) */
+0x00006C0B, /* 118 No, decimal 22 */
+0x0000560B, /* 119 No, decimal 11 */
+0x0007738A, /* 120 Nl, identifier start, decimal 25 */
+0x0007418A, /* 121 Nl, identifier start, decimal 0 */
+0x00000013, /* 122 Cs */
+0x00000012 /* 123 Co */
+};
+
+const jschar js_uriReservedPlusPound_ucstr[] =
+ {';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '#', 0};
+const jschar js_uriUnescaped_ucstr[] =
+ {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
+ 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
+ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
+ 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
+ '-', '_', '.', '!', '~', '*', '\'', '(', ')', 0};
+
+#define URI_CHUNK 64U
+
+/* Concatenate jschars onto an unshared/newborn JSString. */
+static JSBool
+AddCharsToURI(JSContext *cx, JSString *str, const jschar *chars, size_t length)
+{
+ size_t total;
+
+ JS_ASSERT(!JSSTRING_IS_DEPENDENT(str));
+ total = str->length + length + 1;
+ if (!str->chars ||
+ JS_HOWMANY(total, URI_CHUNK) > JS_HOWMANY(str->length + 1, URI_CHUNK)) {
+ total = JS_ROUNDUP(total, URI_CHUNK);
+ str->chars = JS_realloc(cx, str->chars, total * sizeof(jschar));
+ if (!str->chars)
+ return JS_FALSE;
+ }
+ js_strncpy(str->chars + str->length, chars, length);
+ str->length += length;
+ str->chars[str->length] = 0;
+ return JS_TRUE;
+}
+
+/*
+ * ECMA 3, 15.1.3 URI Handling Function Properties
+ *
+ * The following are implementations of the algorithms
+ * given in the ECMA specification for the hidden functions
+ * 'Encode' and 'Decode'.
+ */
+static JSBool
+Encode(JSContext *cx, JSString *str, const jschar *unescapedSet,
+ const jschar *unescapedSet2, jsval *rval)
+{
+ size_t length, j, k, L;
+ jschar *chars, c, c2;
+ uint32 v;
+ uint8 utf8buf[6];
+ jschar hexBuf[4];
+ static const char HexDigits[] = "0123456789ABCDEF"; /* NB: uppercase */
+ JSString *R;
+
+ length = JSSTRING_LENGTH(str);
+ if (length == 0) {
+ *rval = STRING_TO_JSVAL(cx->runtime->emptyString);
+ return JS_TRUE;
+ }
+
+ R = js_NewString(cx, NULL, 0, 0);
+ if (!R)
+ return JS_FALSE;
+
+ hexBuf[0] = '%';
+ hexBuf[3] = 0;
+ chars = JSSTRING_CHARS(str);
+ for (k = 0; k < length; k++) {
+ c = chars[k];
+ if (js_strchr(unescapedSet, c) ||
+ (unescapedSet2 && js_strchr(unescapedSet2, c))) {
+ if (!AddCharsToURI(cx, R, &c, 1))
+ return JS_FALSE;
+ } else {
+ if ((c >= 0xDC00) && (c <= 0xDFFF)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_URI, NULL);
+ return JS_FALSE;
+ }
+ if (c < 0xD800 || c > 0xDBFF) {
+ v = c;
+ } else {
+ k++;
+ if (k == length) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_URI, NULL);
+ return JS_FALSE;
+ }
+ c2 = chars[k];
+ if ((c2 < 0xDC00) || (c2 > 0xDFFF)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_URI, NULL);
+ return JS_FALSE;
+ }
+ v = ((c - 0xD800) << 10) + (c2 - 0xDC00) + 0x10000;
+ }
+ L = js_OneUcs4ToUtf8Char(utf8buf, v);
+ for (j = 0; j < L; j++) {
+ hexBuf[1] = HexDigits[utf8buf[j] >> 4];
+ hexBuf[2] = HexDigits[utf8buf[j] & 0xf];
+ if (!AddCharsToURI(cx, R, hexBuf, 3))
+ return JS_FALSE;
+ }
+ }
+ }
+
+ /*
+ * Shrinking realloc can fail (e.g., with a BSD-style allocator), but we
+ * don't worry about that case here. Worst case, R hangs onto URI_CHUNK-1
+ * more jschars than it needs.
+ */
+ chars = (jschar *) JS_realloc(cx, R->chars, (R->length+1) * sizeof(jschar));
+ if (chars)
+ R->chars = chars;
+ *rval = STRING_TO_JSVAL(R);
+ return JS_TRUE;
+}
+
+static JSBool
+Decode(JSContext *cx, JSString *str, const jschar *reservedSet, jsval *rval)
+{
+ size_t length, start, k;
+ jschar *chars, c, H;
+ uint32 v;
+ jsuint B;
+ uint8 octets[6];
+ JSString *R;
+ intN j, n;
+
+ length = JSSTRING_LENGTH(str);
+ if (length == 0) {
+ *rval = STRING_TO_JSVAL(cx->runtime->emptyString);
+ return JS_TRUE;
+ }
+
+ R = js_NewString(cx, NULL, 0, 0);
+ if (!R)
+ return JS_FALSE;
+
+ chars = JSSTRING_CHARS(str);
+ for (k = 0; k < length; k++) {
+ c = chars[k];
+ if (c == '%') {
+ start = k;
+ if ((k + 2) >= length)
+ goto bad;
+ if (!JS7_ISHEX(chars[k+1]) || !JS7_ISHEX(chars[k+2]))
+ goto bad;
+ B = JS7_UNHEX(chars[k+1]) * 16 + JS7_UNHEX(chars[k+2]);
+ k += 2;
+ if (!(B & 0x80)) {
+ c = (jschar)B;
+ } else {
+ n = 1;
+ while (B & (0x80 >> n))
+ n++;
+ if (n == 1 || n > 6)
+ goto bad;
+ octets[0] = (uint8)B;
+ if (k + 3 * (n - 1) >= length)
+ goto bad;
+ for (j = 1; j < n; j++) {
+ k++;
+ if (chars[k] != '%')
+ goto bad;
+ if (!JS7_ISHEX(chars[k+1]) || !JS7_ISHEX(chars[k+2]))
+ goto bad;
+ B = JS7_UNHEX(chars[k+1]) * 16 + JS7_UNHEX(chars[k+2]);
+ if ((B & 0xC0) != 0x80)
+ goto bad;
+ k += 2;
+ octets[j] = (char)B;
+ }
+ v = Utf8ToOneUcs4Char(octets, n);
+ if (v >= 0x10000) {
+ v -= 0x10000;
+ if (v > 0xFFFFF)
+ goto bad;
+ c = (jschar)((v & 0x3FF) + 0xDC00);
+ H = (jschar)((v >> 10) + 0xD800);
+ if (!AddCharsToURI(cx, R, &H, 1))
+ return JS_FALSE;
+ } else {
+ c = (jschar)v;
+ }
+ }
+ if (js_strchr(reservedSet, c)) {
+ if (!AddCharsToURI(cx, R, &chars[start], (k - start + 1)))
+ return JS_FALSE;
+ } else {
+ if (!AddCharsToURI(cx, R, &c, 1))
+ return JS_FALSE;
+ }
+ } else {
+ if (!AddCharsToURI(cx, R, &c, 1))
+ return JS_FALSE;
+ }
+ }
+
+ /*
+ * Shrinking realloc can fail (e.g., with a BSD-style allocator), but we
+ * don't worry about that case here. Worst case, R hangs onto URI_CHUNK-1
+ * more jschars than it needs.
+ */
+ chars = (jschar *) JS_realloc(cx, R->chars, (R->length+1) * sizeof(jschar));
+ if (chars)
+ R->chars = chars;
+ *rval = STRING_TO_JSVAL(R);
+ return JS_TRUE;
+
+bad:
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_URI);
+ return JS_FALSE;
+}
+
+static JSBool
+str_decodeURI(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+ return Decode(cx, str, js_uriReservedPlusPound_ucstr, rval);
+}
+
+static JSBool
+str_decodeURI_Component(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+ return Decode(cx, str, js_empty_ucstr, rval);
+}
+
+static JSBool
+str_encodeURI(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+ return Encode(cx, str, js_uriReservedPlusPound_ucstr, js_uriUnescaped_ucstr,
+ rval);
+}
+
+static JSBool
+str_encodeURI_Component(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ str = js_ValueToString(cx, argv[0]);
+ if (!str)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(str);
+ return Encode(cx, str, js_uriUnescaped_ucstr, NULL, rval);
+}
+
+/*
+ * Convert one UCS-4 char and write it into a UTF-8 buffer, which must be at
+ * least 6 bytes long. Return the number of UTF-8 bytes of data written.
+ */
+int
+js_OneUcs4ToUtf8Char(uint8 *utf8Buffer, uint32 ucs4Char)
+{
+ int utf8Length = 1;
+
+ JS_ASSERT(ucs4Char <= 0x7FFFFFFF);
+ if (ucs4Char < 0x80) {
+ *utf8Buffer = (uint8)ucs4Char;
+ } else {
+ int i;
+ uint32 a = ucs4Char >> 11;
+ utf8Length = 2;
+ while (a) {
+ a >>= 5;
+ utf8Length++;
+ }
+ i = utf8Length;
+ while (--i) {
+ utf8Buffer[i] = (uint8)((ucs4Char & 0x3F) | 0x80);
+ ucs4Char >>= 6;
+ }
+ *utf8Buffer = (uint8)(0x100 - (1 << (8-utf8Length)) + ucs4Char);
+ }
+ return utf8Length;
+}
+
+/*
+ * Convert a utf8 character sequence into a UCS-4 character and return that
+ * character. It is assumed that the caller already checked that the sequence
+ * is valid.
+ */
+static uint32
+Utf8ToOneUcs4Char(const uint8 *utf8Buffer, int utf8Length)
+{
+ uint32 ucs4Char;
+ uint32 minucs4Char;
+ /* from Unicode 3.1, non-shortest form is illegal */
+ static const uint32 minucs4Table[] = {
+ 0x00000080, 0x00000800, 0x0001000, 0x0020000, 0x0400000
+ };
+
+ JS_ASSERT(utf8Length >= 1 && utf8Length <= 6);
+ if (utf8Length == 1) {
+ ucs4Char = *utf8Buffer;
+ JS_ASSERT(!(ucs4Char & 0x80));
+ } else {
+ JS_ASSERT((*utf8Buffer & (0x100 - (1 << (7-utf8Length)))) ==
+ (0x100 - (1 << (8-utf8Length))));
+ ucs4Char = *utf8Buffer++ & ((1<<(7-utf8Length))-1);
+ minucs4Char = minucs4Table[utf8Length-2];
+ while (--utf8Length) {
+ JS_ASSERT((*utf8Buffer & 0xC0) == 0x80);
+ ucs4Char = ucs4Char<<6 | (*utf8Buffer++ & 0x3F);
+ }
+ if (ucs4Char < minucs4Char ||
+ ucs4Char == 0xFFFE || ucs4Char == 0xFFFF) {
+ ucs4Char = 0xFFFD;
+ }
+ }
+ return ucs4Char;
+}
diff --git a/src/third_party/js-1.7/jsstr.h b/src/third_party/js-1.7/jsstr.h
new file mode 100644
index 00000000000..708c69acbcf
--- /dev/null
+++ b/src/third_party/js-1.7/jsstr.h
@@ -0,0 +1,500 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsstr_h___
+#define jsstr_h___
+/*
+ * JS string type implementation.
+ *
+ * A JS string is a counted array of unicode characters. To support handoff
+ * of API client memory, the chars are allocated separately from the length,
+ * necessitating a pointer after the count, to form a separately allocated
+ * string descriptor. String descriptors are GC'ed, while their chars are
+ * allocated from the malloc heap.
+ *
+ * When a string is treated as an object (by following it with . or []), the
+ * runtime wraps it with a JSObject whose valueOf method returns the unwrapped
+ * string descriptor.
+ */
+#include <ctype.h>
+#include "jspubtd.h"
+#include "jsprvtd.h"
+#include "jshash.h"
+
+JS_BEGIN_EXTERN_C
+
+/*
+ * The original GC-thing "string" type, a flat character string owned by its
+ * GC-thing descriptor. The chars member points to a vector having byte size
+ * (length + 1) * sizeof(jschar), terminated at index length by a zero jschar.
+ * The terminator is purely a backstop, in case the chars pointer flows out to
+ * native code that requires \u0000 termination.
+ *
+ * NB: Always use the JSSTRING_LENGTH and JSSTRING_CHARS accessor macros,
+ * unless you guard str->member uses with !JSSTRING_IS_DEPENDENT(str).
+ */
+struct JSString {
+ size_t length;
+ jschar *chars;
+};
+
+/*
+ * Overlay structure for a string that depends on another string's characters.
+ * Distinguished by the JSSTRFLAG_DEPENDENT bit being set in length. The base
+ * member may point to another dependent string if JSSTRING_CHARS has not been
+ * called yet. The length chars in a dependent string are stored starting at
+ * base->chars + start, and are not necessarily zero-terminated. If start is
+ * 0, it is not stored, length is a full size_t (minus the JSSTRFLAG_* bits in
+ * the high two positions), and the JSSTRFLAG_PREFIX flag is set.
+ */
+struct JSDependentString {
+ size_t length;
+ JSString *base;
+};
+
+/* Definitions for flags stored in the high order bits of JSString.length. */
+#define JSSTRFLAG_BITS 2
+#define JSSTRFLAG_SHIFT(flg) ((size_t)(flg) << JSSTRING_LENGTH_BITS)
+#define JSSTRFLAG_MASK JSSTRFLAG_SHIFT(JS_BITMASK(JSSTRFLAG_BITS))
+#define JSSTRFLAG_DEPENDENT JSSTRFLAG_SHIFT(1)
+#define JSSTRFLAG_PREFIX JSSTRFLAG_SHIFT(2)
+
+/* Universal JSString type inquiry and accessor macros. */
+#define JSSTRING_BIT(n) ((size_t)1 << (n))
+#define JSSTRING_BITMASK(n) (JSSTRING_BIT(n) - 1)
+#define JSSTRING_HAS_FLAG(str,flg) ((str)->length & (flg))
+#define JSSTRING_IS_DEPENDENT(str) JSSTRING_HAS_FLAG(str, JSSTRFLAG_DEPENDENT)
+#define JSSTRING_IS_PREFIX(str) JSSTRING_HAS_FLAG(str, JSSTRFLAG_PREFIX)
+#define JSSTRING_CHARS(str) (JSSTRING_IS_DEPENDENT(str) \
+ ? JSSTRDEP_CHARS(str) \
+ : (str)->chars)
+#define JSSTRING_LENGTH(str) (JSSTRING_IS_DEPENDENT(str) \
+ ? JSSTRDEP_LENGTH(str) \
+ : (str)->length)
+#define JSSTRING_LENGTH_BITS (sizeof(size_t) * JS_BITS_PER_BYTE \
+ - JSSTRFLAG_BITS)
+#define JSSTRING_LENGTH_MASK JSSTRING_BITMASK(JSSTRING_LENGTH_BITS)
+
+/* Specific JSDependentString shift/mask accessor and mutator macros. */
+#define JSSTRDEP_START_BITS (JSSTRING_LENGTH_BITS-JSSTRDEP_LENGTH_BITS)
+#define JSSTRDEP_START_SHIFT JSSTRDEP_LENGTH_BITS
+#define JSSTRDEP_START_MASK JSSTRING_BITMASK(JSSTRDEP_START_BITS)
+#define JSSTRDEP_LENGTH_BITS (JSSTRING_LENGTH_BITS / 2)
+#define JSSTRDEP_LENGTH_MASK JSSTRING_BITMASK(JSSTRDEP_LENGTH_BITS)
+
+#define JSSTRDEP(str) ((JSDependentString *)(str))
+#define JSSTRDEP_START(str) (JSSTRING_IS_PREFIX(str) ? 0 \
+ : ((JSSTRDEP(str)->length \
+ >> JSSTRDEP_START_SHIFT) \
+ & JSSTRDEP_START_MASK))
+#define JSSTRDEP_LENGTH(str) (JSSTRDEP(str)->length \
+ & (JSSTRING_IS_PREFIX(str) \
+ ? JSSTRING_LENGTH_MASK \
+ : JSSTRDEP_LENGTH_MASK))
+
+#define JSSTRDEP_SET_START_AND_LENGTH(str,off,len) \
+ (JSSTRDEP(str)->length = JSSTRFLAG_DEPENDENT \
+ | ((off) << JSSTRDEP_START_SHIFT) \
+ | (len))
+#define JSPREFIX_SET_LENGTH(str,len) \
+ (JSSTRDEP(str)->length = JSSTRFLAG_DEPENDENT | JSSTRFLAG_PREFIX | (len))
+
+#define JSSTRDEP_BASE(str) (JSSTRDEP(str)->base)
+#define JSSTRDEP_SET_BASE(str,bstr) (JSSTRDEP(str)->base = (bstr))
+#define JSPREFIX_BASE(str) JSSTRDEP_BASE(str)
+#define JSPREFIX_SET_BASE(str,bstr) JSSTRDEP_SET_BASE(str,bstr)
+
+#define JSSTRDEP_CHARS(str) \
+ (JSSTRING_IS_DEPENDENT(JSSTRDEP_BASE(str)) \
+ ? js_GetDependentStringChars(str) \
+ : JSSTRDEP_BASE(str)->chars + JSSTRDEP_START(str))
+
+extern size_t
+js_MinimizeDependentStrings(JSString *str, int level, JSString **basep);
+
+extern jschar *
+js_GetDependentStringChars(JSString *str);
+
+extern jschar *
+js_GetStringChars(JSString *str);
+
+extern JSString *
+js_ConcatStrings(JSContext *cx, JSString *left, JSString *right);
+
+extern const jschar *
+js_UndependString(JSContext *cx, JSString *str);
+
+struct JSSubString {
+ size_t length;
+ const jschar *chars;
+};
+
+extern jschar js_empty_ucstr[];
+extern JSSubString js_EmptySubString;
+
+/* Unicode character attribute lookup tables. */
+extern const uint8 js_X[];
+extern const uint8 js_Y[];
+extern const uint32 js_A[];
+
+/* Enumerated Unicode general category types. */
+typedef enum JSCharType {
+ JSCT_UNASSIGNED = 0,
+ JSCT_UPPERCASE_LETTER = 1,
+ JSCT_LOWERCASE_LETTER = 2,
+ JSCT_TITLECASE_LETTER = 3,
+ JSCT_MODIFIER_LETTER = 4,
+ JSCT_OTHER_LETTER = 5,
+ JSCT_NON_SPACING_MARK = 6,
+ JSCT_ENCLOSING_MARK = 7,
+ JSCT_COMBINING_SPACING_MARK = 8,
+ JSCT_DECIMAL_DIGIT_NUMBER = 9,
+ JSCT_LETTER_NUMBER = 10,
+ JSCT_OTHER_NUMBER = 11,
+ JSCT_SPACE_SEPARATOR = 12,
+ JSCT_LINE_SEPARATOR = 13,
+ JSCT_PARAGRAPH_SEPARATOR = 14,
+ JSCT_CONTROL = 15,
+ JSCT_FORMAT = 16,
+ JSCT_PRIVATE_USE = 18,
+ JSCT_SURROGATE = 19,
+ JSCT_DASH_PUNCTUATION = 20,
+ JSCT_START_PUNCTUATION = 21,
+ JSCT_END_PUNCTUATION = 22,
+ JSCT_CONNECTOR_PUNCTUATION = 23,
+ JSCT_OTHER_PUNCTUATION = 24,
+ JSCT_MATH_SYMBOL = 25,
+ JSCT_CURRENCY_SYMBOL = 26,
+ JSCT_MODIFIER_SYMBOL = 27,
+ JSCT_OTHER_SYMBOL = 28
+} JSCharType;
+
+/* Character classifying and mapping macros, based on java.lang.Character. */
+#define JS_CCODE(c) (js_A[js_Y[(js_X[(uint16)(c)>>6]<<6)|((c)&0x3F)]])
+#define JS_CTYPE(c) (JS_CCODE(c) & 0x1F)
+
+#define JS_ISALPHA(c) ((((1 << JSCT_UPPERCASE_LETTER) | \
+ (1 << JSCT_LOWERCASE_LETTER) | \
+ (1 << JSCT_TITLECASE_LETTER) | \
+ (1 << JSCT_MODIFIER_LETTER) | \
+ (1 << JSCT_OTHER_LETTER)) \
+ >> JS_CTYPE(c)) & 1)
+
+#define JS_ISALNUM(c) ((((1 << JSCT_UPPERCASE_LETTER) | \
+ (1 << JSCT_LOWERCASE_LETTER) | \
+ (1 << JSCT_TITLECASE_LETTER) | \
+ (1 << JSCT_MODIFIER_LETTER) | \
+ (1 << JSCT_OTHER_LETTER) | \
+ (1 << JSCT_DECIMAL_DIGIT_NUMBER)) \
+ >> JS_CTYPE(c)) & 1)
+
+/* A unicode letter, suitable for use in an identifier. */
+#define JS_ISLETTER(c) ((((1 << JSCT_UPPERCASE_LETTER) | \
+ (1 << JSCT_LOWERCASE_LETTER) | \
+ (1 << JSCT_TITLECASE_LETTER) | \
+ (1 << JSCT_MODIFIER_LETTER) | \
+ (1 << JSCT_OTHER_LETTER) | \
+ (1 << JSCT_LETTER_NUMBER)) \
+ >> JS_CTYPE(c)) & 1)
+
+/*
+ * 'IdentifierPart' from ECMA grammar, is Unicode letter or combining mark or
+ * digit or connector punctuation.
+ */
+#define JS_ISIDPART(c) ((((1 << JSCT_UPPERCASE_LETTER) | \
+ (1 << JSCT_LOWERCASE_LETTER) | \
+ (1 << JSCT_TITLECASE_LETTER) | \
+ (1 << JSCT_MODIFIER_LETTER) | \
+ (1 << JSCT_OTHER_LETTER) | \
+ (1 << JSCT_LETTER_NUMBER) | \
+ (1 << JSCT_NON_SPACING_MARK) | \
+ (1 << JSCT_COMBINING_SPACING_MARK) | \
+ (1 << JSCT_DECIMAL_DIGIT_NUMBER) | \
+ (1 << JSCT_CONNECTOR_PUNCTUATION)) \
+ >> JS_CTYPE(c)) & 1)
+
+/* Unicode control-format characters, ignored in input */
+#define JS_ISFORMAT(c) (((1 << JSCT_FORMAT) >> JS_CTYPE(c)) & 1)
+
+/*
+ * Per ECMA-262 15.10.2.6, these characters are the only ones that make up a
+ * "word", as far as a RegExp is concerned. If we want a Unicode-friendlier
+ * definition of "word", we should rename this macro to something regexp-y.
+ */
+#define JS_ISWORD(c) ((c) < 128 && (isalnum(c) || (c) == '_'))
+
+#define JS_ISIDSTART(c) (JS_ISLETTER(c) || (c) == '_' || (c) == '$')
+#define JS_ISIDENT(c) (JS_ISIDPART(c) || (c) == '_' || (c) == '$')
+
+#define JS_ISXMLSPACE(c) ((c) == ' ' || (c) == '\t' || (c) == '\r' || \
+ (c) == '\n')
+#define JS_ISXMLNSSTART(c) ((JS_CCODE(c) & 0x00000100) || (c) == '_')
+#define JS_ISXMLNS(c) ((JS_CCODE(c) & 0x00000080) || (c) == '.' || \
+ (c) == '-' || (c) == '_')
+#define JS_ISXMLNAMESTART(c) (JS_ISXMLNSSTART(c) || (c) == ':')
+#define JS_ISXMLNAME(c) (JS_ISXMLNS(c) || (c) == ':')
+
+#define JS_ISDIGIT(c) (JS_CTYPE(c) == JSCT_DECIMAL_DIGIT_NUMBER)
+
+/* XXXbe unify on A/X/Y tbls, avoid ctype.h? */
+/* XXXbe fs, etc. ? */
+#define JS_ISSPACE(c) ((JS_CCODE(c) & 0x00070000) == 0x00040000)
+#define JS_ISPRINT(c) ((c) < 128 && isprint(c))
+
+#define JS_ISUPPER(c) (JS_CTYPE(c) == JSCT_UPPERCASE_LETTER)
+#define JS_ISLOWER(c) (JS_CTYPE(c) == JSCT_LOWERCASE_LETTER)
+
+#define JS_TOUPPER(c) ((jschar) ((JS_CCODE(c) & 0x00100000) \
+ ? (c) - ((int32)JS_CCODE(c) >> 22) \
+ : (c)))
+#define JS_TOLOWER(c) ((jschar) ((JS_CCODE(c) & 0x00200000) \
+ ? (c) + ((int32)JS_CCODE(c) >> 22) \
+ : (c)))
+
+/*
+ * Shorthands for ASCII (7-bit) decimal and hex conversion.
+ * Manually inline isdigit for performance; MSVC doesn't do this for us.
+ */
+#define JS7_ISDEC(c) ((((unsigned)(c)) - '0') <= 9)
+#define JS7_UNDEC(c) ((c) - '0')
+#define JS7_ISHEX(c) ((c) < 128 && isxdigit(c))
+#define JS7_UNHEX(c) (uintN)(JS7_ISDEC(c) ? (c) - '0' : 10 + tolower(c) - 'a')
+#define JS7_ISLET(c) ((c) < 128 && isalpha(c))
+
+/* Initialize per-runtime string state for the first context in the runtime. */
+extern JSBool
+js_InitRuntimeStringState(JSContext *cx);
+
+extern void
+js_FinishRuntimeStringState(JSContext *cx);
+
+extern void
+js_FinishDeflatedStringCache(JSRuntime *rt);
+
+/* Initialize the String class, returning its prototype object. */
+extern JSClass js_StringClass;
+
+extern JSObject *
+js_InitStringClass(JSContext *cx, JSObject *obj);
+
+extern const char js_escape_str[];
+extern const char js_unescape_str[];
+extern const char js_uneval_str[];
+extern const char js_decodeURI_str[];
+extern const char js_encodeURI_str[];
+extern const char js_decodeURIComponent_str[];
+extern const char js_encodeURIComponent_str[];
+
+/* GC-allocate a string descriptor for the given malloc-allocated chars. */
+extern JSString *
+js_NewString(JSContext *cx, jschar *chars, size_t length, uintN gcflag);
+
+extern JSString *
+js_NewDependentString(JSContext *cx, JSString *base, size_t start,
+ size_t length, uintN gcflag);
+
+/* Copy a counted string and GC-allocate a descriptor for it. */
+extern JSString *
+js_NewStringCopyN(JSContext *cx, const jschar *s, size_t n, uintN gcflag);
+
+/* Copy a C string and GC-allocate a descriptor for it. */
+extern JSString *
+js_NewStringCopyZ(JSContext *cx, const jschar *s, uintN gcflag);
+
+/* Free the chars held by str when it is finalized by the GC. */
+extern void
+js_FinalizeString(JSContext *cx, JSString *str);
+
+extern void
+js_FinalizeStringRT(JSRuntime *rt, JSString *str);
+
+/* Wrap a string value in a String object. */
+extern JSObject *
+js_StringToObject(JSContext *cx, JSString *str);
+
+/*
+ * Convert a value to a printable C string.
+ */
+typedef JSString *(*JSValueToStringFun)(JSContext *cx, jsval v);
+
+extern JS_FRIEND_API(const char *)
+js_ValueToPrintable(JSContext *cx, jsval v, JSValueToStringFun v2sfun);
+
+#define js_ValueToPrintableString(cx,v) \
+ js_ValueToPrintable(cx, v, js_ValueToString)
+
+#define js_ValueToPrintableSource(cx,v) \
+ js_ValueToPrintable(cx, v, js_ValueToSource)
+
+/*
+ * Convert a value to a string, returning null after reporting an error,
+ * otherwise returning a new string reference.
+ */
+extern JS_FRIEND_API(JSString *)
+js_ValueToString(JSContext *cx, jsval v);
+
+/*
+ * Convert a value to its source expression, returning null after reporting
+ * an error, otherwise returning a new string reference.
+ */
+extern JS_FRIEND_API(JSString *)
+js_ValueToSource(JSContext *cx, jsval v);
+
+#ifdef HT_ENUMERATE_NEXT /* XXX don't require jshash.h */
+/*
+ * Compute a hash function from str.
+ */
+extern JSHashNumber
+js_HashString(JSString *str);
+#endif
+
+/*
+ * Return less than, equal to, or greater than zero depending on whether
+ * str1 is less than, equal to, or greater than str2.
+ */
+extern intN
+js_CompareStrings(JSString *str1, JSString *str2);
+
+/*
+ * Test if strings are equal.
+ */
+extern JSBool
+js_EqualStrings(JSString *str1, JSString *str2);
+
+/*
+ * Boyer-Moore-Horspool superlinear search for pat:patlen in text:textlen.
+ * The patlen argument must be positive and no greater than BMH_PATLEN_MAX.
+ * The start argument tells where in text to begin the search.
+ *
+ * Return the index of pat in text, or -1 if not found.
+ */
+#define BMH_CHARSET_SIZE 256 /* ISO-Latin-1 */
+#define BMH_PATLEN_MAX 255 /* skip table element is uint8 */
+
+#define BMH_BAD_PATTERN (-2) /* return value if pat is not ISO-Latin-1 */
+
+extern jsint
+js_BoyerMooreHorspool(const jschar *text, jsint textlen,
+ const jschar *pat, jsint patlen,
+ jsint start);
+
+extern size_t
+js_strlen(const jschar *s);
+
+extern jschar *
+js_strchr(const jschar *s, jschar c);
+
+extern jschar *
+js_strchr_limit(const jschar *s, jschar c, const jschar *limit);
+
+#define js_strncpy(t, s, n) memcpy((t), (s), (n) * sizeof(jschar))
+
+/*
+ * Return s advanced past any Unicode white space characters.
+ */
+extern const jschar *
+js_SkipWhiteSpace(const jschar *s);
+
+/*
+ * Inflate bytes to JS chars and vice versa. Report out of memory via cx
+ * and return null on error, otherwise return the jschar or byte vector that
+ * was JS_malloc'ed. length is updated with the length of the new string in jschars.
+ */
+extern jschar *
+js_InflateString(JSContext *cx, const char *bytes, size_t *length);
+
+extern char *
+js_DeflateString(JSContext *cx, const jschar *chars, size_t length);
+
+/*
+ * Inflate bytes to JS chars into a buffer.
+ * 'chars' must be large enough for 'length' jschars.
+ * The buffer is NOT null-terminated.
+ * cx may be NULL, which means no errors are thrown.
+ * The destination length needs to be initialized with the buffer size, takes
+ * the number of chars moved.
+ */
+extern JSBool
+js_InflateStringToBuffer(JSContext* cx, const char *bytes, size_t length,
+ jschar *chars, size_t* charsLength);
+
+/*
+ * Deflate JS chars to bytes into a buffer.
+ * 'bytes' must be large enough for 'length chars.
+ * The buffer is NOT null-terminated.
+ * cx may be NULL, which means no errors are thrown.
+ * The destination length needs to be initialized with the buffer size, takes
+ * the number of bytes moved.
+ */
+extern JSBool
+js_DeflateStringToBuffer(JSContext* cx, const jschar *chars,
+ size_t charsLength, char *bytes, size_t* length);
+
+/*
+ * Associate bytes with str in the deflated string cache, returning true on
+ * successful association, false on out of memory.
+ */
+extern JSBool
+js_SetStringBytes(JSRuntime *rt, JSString *str, char *bytes, size_t length);
+
+/*
+ * Find or create a deflated string cache entry for str that contains its
+ * characters chopped from Unicode code points into bytes.
+ */
+extern char *
+js_GetStringBytes(JSRuntime *rt, JSString *str);
+
+/* Remove a deflated string cache entry associated with str if any. */
+extern void
+js_PurgeDeflatedStringCache(JSRuntime *rt, JSString *str);
+
+JSBool
+js_str_escape(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval);
+
+/*
+ * Convert one UCS-4 char and write it into a UTF-8 buffer, which must be at
+ * least 6 bytes long. Return the number of UTF-8 bytes of data written.
+ */
+extern int
+js_OneUcs4ToUtf8Char(uint8 *utf8Buffer, uint32 ucs4Char);
+
+JS_END_EXTERN_C
+
+#endif /* jsstr_h___ */
diff --git a/src/third_party/js-1.7/jstypes.h b/src/third_party/js-1.7/jstypes.h
new file mode 100644
index 00000000000..8aca929ca73
--- /dev/null
+++ b/src/third_party/js-1.7/jstypes.h
@@ -0,0 +1,464 @@
+/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * IBM Corp.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+** File: jstypes.h
+** Description: Definitions of NSPR's basic types
+**
+** Prototypes and macros used to make up for deficiencies in ANSI environments
+** that we have found.
+**
+** Since we do not wrap <stdlib.h> and all the other standard headers, authors
+** of portable code will not know in general that they need these definitions.
+** Instead of requiring these authors to find the dependent uses in their code
+** and take the following steps only in those C files, we take steps once here
+** for all C files.
+**/
+
+#ifndef jstypes_h___
+#define jstypes_h___
+
+#include <stddef.h>
+
+/***********************************************************************
+** MACROS: JS_EXTERN_API
+** JS_EXPORT_API
+** DESCRIPTION:
+** These are only for externally visible routines and globals. For
+** internal routines, just use "extern" for type checking and that
+** will not export internal cross-file or forward-declared symbols.
+** Define a macro for declaring procedures return types. We use this to
+** deal with windoze specific type hackery for DLL definitions. Use
+** JS_EXTERN_API when the prototype for the method is declared. Use
+** JS_EXPORT_API for the implementation of the method.
+**
+** Example:
+** in dowhim.h
+** JS_EXTERN_API( void ) DoWhatIMean( void );
+** in dowhim.c
+** JS_EXPORT_API( void ) DoWhatIMean( void ) { return; }
+**
+**
+***********************************************************************/
+#ifdef WIN32
+/* These also work for __MWERKS__ */
+#define JS_EXTERN_API(__type) extern __declspec(dllexport) __type
+#define JS_EXPORT_API(__type) __declspec(dllexport) __type
+#define JS_EXTERN_DATA(__type) extern __declspec(dllexport) __type
+#define JS_EXPORT_DATA(__type) __declspec(dllexport) __type
+
+#define JS_DLL_CALLBACK
+#define JS_STATIC_DLL_CALLBACK(__x) static __x
+
+#elif defined(XP_OS2) && defined(__declspec)
+
+#define JS_EXTERN_API(__type) extern __declspec(dllexport) __type
+#define JS_EXPORT_API(__type) __declspec(dllexport) __type
+#define JS_EXTERN_DATA(__type) extern __declspec(dllexport) __type
+#define JS_EXPORT_DATA(__type) __declspec(dllexport) __type
+
+#define JS_DLL_CALLBACK
+#define JS_STATIC_DLL_CALLBACK(__x) static __x
+
+#elif defined(WIN16)
+
+#ifdef _WINDLL
+#define JS_EXTERN_API(__type) extern __type _cdecl _export _loadds
+#define JS_EXPORT_API(__type) __type _cdecl _export _loadds
+#define JS_EXTERN_DATA(__type) extern __type _export
+#define JS_EXPORT_DATA(__type) __type _export
+
+#define JS_DLL_CALLBACK __cdecl __loadds
+#define JS_STATIC_DLL_CALLBACK(__x) static __x CALLBACK
+
+#else /* this must be .EXE */
+#define JS_EXTERN_API(__type) extern __type _cdecl _export
+#define JS_EXPORT_API(__type) __type _cdecl _export
+#define JS_EXTERN_DATA(__type) extern __type _export
+#define JS_EXPORT_DATA(__type) __type _export
+
+#define JS_DLL_CALLBACK __cdecl __loadds
+#define JS_STATIC_DLL_CALLBACK(__x) __x JS_DLL_CALLBACK
+#endif /* _WINDLL */
+
+#else /* Unix */
+
+#ifdef HAVE_VISIBILITY_ATTRIBUTE
+#define JS_EXTERNAL_VIS __attribute__((visibility ("default")))
+#else
+#define JS_EXTERNAL_VIS
+#endif
+
+#define JS_EXTERN_API(__type) extern JS_EXTERNAL_VIS __type
+#define JS_EXPORT_API(__type) JS_EXTERNAL_VIS __type
+#define JS_EXTERN_DATA(__type) extern JS_EXTERNAL_VIS __type
+#define JS_EXPORT_DATA(__type) JS_EXTERNAL_VIS __type
+
+#define JS_DLL_CALLBACK
+#define JS_STATIC_DLL_CALLBACK(__x) static __x
+
+#endif
+
+#ifdef _WIN32
+# if defined(__MWERKS__) || defined(__GNUC__)
+# define JS_IMPORT_API(__x) __x
+# else
+# define JS_IMPORT_API(__x) __declspec(dllimport) __x
+# endif
+#elif defined(XP_OS2) && defined(__declspec)
+# define JS_IMPORT_API(__x) __declspec(dllimport) __x
+#else
+# define JS_IMPORT_API(__x) JS_EXPORT_API (__x)
+#endif
+
+#if defined(_WIN32) && !defined(__MWERKS__)
+# define JS_IMPORT_DATA(__x) __declspec(dllimport) __x
+#elif defined(XP_OS2) && defined(__declspec)
+# define JS_IMPORT_DATA(__x) __declspec(dllimport) __x
+#else
+# define JS_IMPORT_DATA(__x) JS_EXPORT_DATA (__x)
+#endif
+
+/*
+ * The linkage of JS API functions differs depending on whether the file is
+ * used within the JS library or not. Any source file within the JS
+ * interpreter should define EXPORT_JS_API whereas any client of the library
+ * should not.
+ */
+#ifdef EXPORT_JS_API
+#define JS_PUBLIC_API(t) JS_EXPORT_API(t)
+#define JS_PUBLIC_DATA(t) JS_EXPORT_DATA(t)
+#else
+#define JS_PUBLIC_API(t) JS_IMPORT_API(t)
+#define JS_PUBLIC_DATA(t) JS_IMPORT_DATA(t)
+#endif
+
+#define JS_FRIEND_API(t) JS_PUBLIC_API(t)
+#define JS_FRIEND_DATA(t) JS_PUBLIC_DATA(t)
+
+#ifdef _WIN32
+# define JS_INLINE __inline
+#elif defined(__GNUC__)
+# define JS_INLINE
+#else
+# define JS_INLINE
+#endif
+
+/***********************************************************************
+** MACROS: JS_BEGIN_MACRO
+** JS_END_MACRO
+** DESCRIPTION:
+** Macro body brackets so that macros with compound statement definitions
+** behave syntactically more like functions when called.
+***********************************************************************/
+#define JS_BEGIN_MACRO do {
+#define JS_END_MACRO } while (0)
+
+/***********************************************************************
+** MACROS: JS_BEGIN_EXTERN_C
+** JS_END_EXTERN_C
+** DESCRIPTION:
+** Macro shorthands for conditional C++ extern block delimiters.
+***********************************************************************/
+#ifdef __cplusplus
+#define JS_BEGIN_EXTERN_C extern "C" {
+#define JS_END_EXTERN_C }
+#else
+#define JS_BEGIN_EXTERN_C
+#define JS_END_EXTERN_C
+#endif
+
+/***********************************************************************
+** MACROS: JS_BIT
+** JS_BITMASK
+** DESCRIPTION:
+** Bit masking macros. XXX n must be <= 31 to be portable
+***********************************************************************/
+#define JS_BIT(n) ((JSUint32)1 << (n))
+#define JS_BITMASK(n) (JS_BIT(n) - 1)
+
+/***********************************************************************
+** MACROS: JS_PTR_TO_INT32
+** JS_PTR_TO_UINT32
+** JS_INT32_TO_PTR
+** JS_UINT32_TO_PTR
+** DESCRIPTION:
+** Integer to pointer and pointer to integer conversion macros.
+***********************************************************************/
+#define JS_PTR_TO_INT32(x) ((jsint)((char *)(x) - (char *)0))
+#define JS_PTR_TO_UINT32(x) ((jsuint)((char *)(x) - (char *)0))
+#define JS_INT32_TO_PTR(x) ((void *)((char *)0 + (jsint)(x)))
+#define JS_UINT32_TO_PTR(x) ((void *)((char *)0 + (jsuint)(x)))
+
+/***********************************************************************
+** MACROS: JS_HOWMANY
+** JS_ROUNDUP
+** JS_MIN
+** JS_MAX
+** DESCRIPTION:
+** Commonly used macros for operations on compatible types.
+***********************************************************************/
+#define JS_HOWMANY(x,y) (((x)+(y)-1)/(y))
+#define JS_ROUNDUP(x,y) (JS_HOWMANY(x,y)*(y))
+#define JS_MIN(x,y) ((x)<(y)?(x):(y))
+#define JS_MAX(x,y) ((x)>(y)?(x):(y))
+
+#if (defined(XP_WIN) && !defined(CROSS_COMPILE)) || defined (WINCE)
+# include "jscpucfg.h" /* Use standard Mac or Windows configuration */
+#elif defined(XP_UNIX) || defined(XP_BEOS) || defined(XP_OS2) || defined(CROSS_COMPILE)
+# include "jsautocfg.h" /* Use auto-detected configuration */
+# include "jsosdep.h" /* ...and platform-specific flags */
+#else
+# error "Must define one of XP_BEOS, XP_OS2, XP_WIN or XP_UNIX"
+#endif
+
+JS_BEGIN_EXTERN_C
+
+/************************************************************************
+** TYPES: JSUint8
+** JSInt8
+** DESCRIPTION:
+** The int8 types are known to be 8 bits each. There is no type that
+** is equivalent to a plain "char".
+************************************************************************/
+#if JS_BYTES_PER_BYTE == 1
+typedef unsigned char JSUint8;
+typedef signed char JSInt8;
+#else
+#error No suitable type for JSInt8/JSUint8
+#endif
+
+/************************************************************************
+** TYPES: JSUint16
+** JSInt16
+** DESCRIPTION:
+** The int16 types are known to be 16 bits each.
+************************************************************************/
+#if JS_BYTES_PER_SHORT == 2
+typedef unsigned short JSUint16;
+typedef short JSInt16;
+#else
+#error No suitable type for JSInt16/JSUint16
+#endif
+
+/************************************************************************
+** TYPES: JSUint32
+** JSInt32
+** DESCRIPTION:
+** The int32 types are known to be 32 bits each.
+************************************************************************/
+#if JS_BYTES_PER_INT == 4
+typedef unsigned int JSUint32;
+typedef int JSInt32;
+#define JS_INT32(x) x
+#define JS_UINT32(x) x ## U
+#elif JS_BYTES_PER_LONG == 4
+typedef unsigned long JSUint32;
+typedef long JSInt32;
+#define JS_INT32(x) x ## L
+#define JS_UINT32(x) x ## UL
+#else
+#error No suitable type for JSInt32/JSUint32
+#endif
+
+/************************************************************************
+** TYPES: JSUint64
+** JSInt64
+** DESCRIPTION:
+** The int64 types are known to be 64 bits each. Care must be used when
+** declaring variables of type JSUint64 or JSInt64. Different hardware
+** architectures and even different compilers have varying support for
+** 64 bit values. The only guaranteed portability requires the use of
+** the JSLL_ macros (see jslong.h).
+************************************************************************/
+#ifdef JS_HAVE_LONG_LONG
+#if JS_BYTES_PER_LONG == 8
+typedef long JSInt64;
+typedef unsigned long JSUint64;
+#elif defined(WIN16)
+typedef __int64 JSInt64;
+typedef unsigned __int64 JSUint64;
+#elif defined(WIN32) && !defined(__GNUC__)
+typedef __int64 JSInt64;
+typedef unsigned __int64 JSUint64;
+#else
+typedef long long JSInt64;
+typedef unsigned long long JSUint64;
+#endif /* JS_BYTES_PER_LONG == 8 */
+#else /* !JS_HAVE_LONG_LONG */
+typedef struct {
+#ifdef IS_LITTLE_ENDIAN
+ JSUint32 lo, hi;
+#else
+ JSUint32 hi, lo;
+#endif
+} JSInt64;
+typedef JSInt64 JSUint64;
+#endif /* !JS_HAVE_LONG_LONG */
+
+/************************************************************************
+** TYPES: JSUintn
+** JSIntn
+** DESCRIPTION:
+** The JSIntn types are most appropriate for automatic variables. They are
+** guaranteed to be at least 16 bits, though various architectures may
+** define them to be wider (e.g., 32 or even 64 bits). These types are
+** never valid for fields of a structure.
+************************************************************************/
+#if JS_BYTES_PER_INT >= 2
+typedef int JSIntn;
+typedef unsigned int JSUintn;
+#else
+#error 'sizeof(int)' not sufficient for platform use
+#endif
+
+/************************************************************************
+** TYPES: JSFloat64
+** DESCRIPTION:
+** NSPR's floating point type is always 64 bits.
+************************************************************************/
+typedef double JSFloat64;
+
+/************************************************************************
+** TYPES: JSSize
+** DESCRIPTION:
+** A type for representing the size of objects.
+************************************************************************/
+typedef size_t JSSize;
+
+/************************************************************************
+** TYPES: JSPtrDiff
+** DESCRIPTION:
+** A type for pointer difference. Variables of this type are suitable
+** for storing a pointer or pointer sutraction.
+************************************************************************/
+typedef ptrdiff_t JSPtrdiff;
+
+/************************************************************************
+** TYPES: JSUptrdiff
+** DESCRIPTION:
+** A type for pointer difference. Variables of this type are suitable
+** for storing a pointer or pointer sutraction.
+************************************************************************/
+#if JS_BYTES_PER_WORD == 8 && JS_BYTES_PER_LONG != 8
+typedef JSUint64 JSUptrdiff;
+#else
+typedef unsigned long JSUptrdiff;
+#endif
+
+/************************************************************************
+** TYPES: JSBool
+** DESCRIPTION:
+** Use JSBool for variables and parameter types. Use JS_FALSE and JS_TRUE
+** for clarity of target type in assignments and actual arguments. Use
+** 'if (bool)', 'while (!bool)', '(bool) ? x : y' etc., to test booleans
+** just as you would C int-valued conditions.
+************************************************************************/
+typedef JSIntn JSBool;
+#define JS_TRUE (JSIntn)1
+#define JS_FALSE (JSIntn)0
+
+/************************************************************************
+** TYPES: JSPackedBool
+** DESCRIPTION:
+** Use JSPackedBool within structs where bitfields are not desireable
+** but minimum and consistent overhead matters.
+************************************************************************/
+typedef JSUint8 JSPackedBool;
+
+/*
+** A JSWord is an integer that is the same size as a void*
+*/
+#if JS_BYTES_PER_WORD == 8 && JS_BYTES_PER_LONG != 8
+typedef JSInt64 JSWord;
+typedef JSUint64 JSUword;
+#else
+typedef long JSWord;
+typedef unsigned long JSUword;
+#endif
+
+#include "jsotypes.h"
+
+/***********************************************************************
+** MACROS: JS_LIKELY
+** JS_UNLIKELY
+** DESCRIPTION:
+** These macros allow you to give a hint to the compiler about branch
+** probability so that it can better optimize. Use them like this:
+**
+** if (JS_LIKELY(v == 1)) {
+** ... expected code path ...
+** }
+**
+** if (JS_UNLIKELY(v == 0)) {
+** ... non-expected code path ...
+** }
+**
+***********************************************************************/
+#if defined(__GNUC__) && (__GNUC__ > 2)
+#define JS_LIKELY(x) (__builtin_expect((x), 1))
+#define JS_UNLIKELY(x) (__builtin_expect((x), 0))
+#else
+#define JS_LIKELY(x) (x)
+#define JS_UNLIKELY(x) (x)
+#endif
+
+/***********************************************************************
+** MACROS: JS_ARRAY_LENGTH
+** JS_ARRAY_END
+** DESCRIPTION:
+** Macros to get the number of elements and the pointer to one past the
+** last element of a C array. Use them like this:
+**
+** jschar buf[10], *s;
+** JSString *str;
+** ...
+** for (s = buf; s != JS_ARRAY_END(buf); ++s) *s = ...;
+** ...
+** str = JS_NewStringCopyN(cx, buf, JS_ARRAY_LENGTH(buf));
+** ...
+**
+***********************************************************************/
+
+#define JS_ARRAY_LENGTH(array) (sizeof (array) / sizeof (array)[0])
+#define JS_ARRAY_END(array) ((array) + JS_ARRAY_LENGTH(array))
+
+JS_END_EXTERN_C
+
+#endif /* jstypes_h___ */
diff --git a/src/third_party/js-1.7/jsutil.c b/src/third_party/js-1.7/jsutil.c
new file mode 100644
index 00000000000..1bb9f939910
--- /dev/null
+++ b/src/third_party/js-1.7/jsutil.c
@@ -0,0 +1,198 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * IBM Corp.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * PR assertion checker.
+ */
+#include "jsstddef.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include "jstypes.h"
+#include "jsutil.h"
+
+#ifdef WIN32
+# include <windows.h>
+#endif
+
+JS_PUBLIC_API(void) JS_Assert(const char *s, const char *file, JSIntn ln)
+{
+ fprintf(stderr, "Assertion failure: %s, at %s:%d\n", s, file, ln);
+#if defined(WIN32)
+ DebugBreak();
+ exit(3);
+#elif defined(XP_OS2) || (defined(__GNUC__) && defined(__i386))
+ asm("int $3");
+#endif
+ abort();
+}
+
+#if defined DEBUG_notme && defined XP_UNIX
+
+#define __USE_GNU 1
+#include <dlfcn.h>
+#include <string.h>
+#include "jshash.h"
+#include "jsprf.h"
+
+JSCallsite js_calltree_root = {0, NULL, NULL, 0, NULL, NULL, NULL, NULL};
+
+static JSCallsite *
+CallTree(void **bp)
+{
+ void **bpup, **bpdown, *pc;
+ JSCallsite *parent, *site, **csp;
+ Dl_info info;
+ int ok, offset;
+ const char *symbol;
+ char *method;
+
+ /* Reverse the stack frame list to avoid recursion. */
+ bpup = NULL;
+ for (;;) {
+ bpdown = (void**) bp[0];
+ bp[0] = (void*) bpup;
+ if ((void**) bpdown[0] < bpdown)
+ break;
+ bpup = bp;
+ bp = bpdown;
+ }
+
+ /* Reverse the stack again, finding and building a path in the tree. */
+ parent = &js_calltree_root;
+ do {
+ bpup = (void**) bp[0];
+ bp[0] = (void*) bpdown;
+ pc = bp[1];
+
+ csp = &parent->kids;
+ while ((site = *csp) != NULL) {
+ if (site->pc == pc) {
+ /* Put the most recently used site at the front of siblings. */
+ *csp = site->siblings;
+ site->siblings = parent->kids;
+ parent->kids = site;
+
+ /* Site already built -- go up the stack. */
+ goto upward;
+ }
+ csp = &site->siblings;
+ }
+
+ /* Check for recursion: see if pc is on our ancestor line. */
+ for (site = parent; site; site = site->parent) {
+ if (site->pc == pc)
+ goto upward;
+ }
+
+ /*
+ * Not in tree at all: let's find our symbolic callsite info.
+ * XXX static syms are masked by nearest lower global
+ */
+ info.dli_fname = info.dli_sname = NULL;
+ ok = dladdr(pc, &info);
+ if (ok < 0) {
+ fprintf(stderr, "dladdr failed!\n");
+ return NULL;
+ }
+
+/* XXXbe sub 0x08040000? or something, see dbaron bug with tenthumbs comment */
+ symbol = info.dli_sname;
+ offset = (char*)pc - (char*)info.dli_fbase;
+ method = symbol
+ ? strdup(symbol)
+ : JS_smprintf("%s+%X",
+ info.dli_fname ? info.dli_fname : "main",
+ offset);
+ if (!method)
+ return NULL;
+
+ /* Create a new callsite record. */
+ site = (JSCallsite *) malloc(sizeof(JSCallsite));
+ if (!site)
+ return NULL;
+
+ /* Insert the new site into the tree. */
+ site->pc = pc;
+ site->name = method;
+ site->library = info.dli_fname;
+ site->offset = offset;
+ site->parent = parent;
+ site->siblings = parent->kids;
+ parent->kids = site;
+ site->kids = NULL;
+
+ upward:
+ parent = site;
+ bpdown = bp;
+ bp = bpup;
+ } while (bp);
+
+ return site;
+}
+
+JSCallsite *
+JS_Backtrace(int skip)
+{
+ void **bp, **bpdown;
+
+ /* Stack walking code adapted from Kipp's "leaky". */
+#if defined(__i386)
+ __asm__( "movl %%ebp, %0" : "=g"(bp));
+#elif defined(__x86_64__)
+ __asm__( "movq %%rbp, %0" : "=g"(bp));
+#else
+ /*
+ * It would be nice if this worked uniformly, but at least on i386 and
+ * x86_64, it stopped working with gcc 4.1, because it points to the
+ * end of the saved registers instead of the start.
+ */
+ bp = (void**) __builtin_frame_address(0);
+#endif
+ while (--skip >= 0) {
+ bpdown = (void**) *bp++;
+ if (bpdown < bp)
+ break;
+ bp = bpdown;
+ }
+
+ return CallTree(bp);
+}
+
+#endif /* DEBUG_notme && XP_UNIX */
diff --git a/src/third_party/js-1.7/jsutil.h b/src/third_party/js-1.7/jsutil.h
new file mode 100644
index 00000000000..efcb614cb21
--- /dev/null
+++ b/src/third_party/js-1.7/jsutil.h
@@ -0,0 +1,106 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * PR assertion checker.
+ */
+
+#ifndef jsutil_h___
+#define jsutil_h___
+
+JS_BEGIN_EXTERN_C
+
+#ifdef DEBUG
+
+extern JS_PUBLIC_API(void)
+JS_Assert(const char *s, const char *file, JSIntn ln);
+#define JS_ASSERT(_expr) \
+ ((_expr)?((void)0):JS_Assert(# _expr,__FILE__,__LINE__))
+
+#define JS_NOT_REACHED(_reasonStr) \
+ JS_Assert(_reasonStr,__FILE__,__LINE__)
+
+#else
+
+#define JS_ASSERT(expr) ((void) 0)
+#define JS_NOT_REACHED(reasonStr)
+
+#endif /* defined(DEBUG) */
+
+/*
+ * Compile-time assert. "condition" must be a constant expression.
+ * The macro should be used only once per source line in places where
+ * a "typedef" declaration is allowed.
+ */
+#define JS_STATIC_ASSERT(condition) \
+ JS_STATIC_ASSERT_IMPL(condition, __LINE__)
+#define JS_STATIC_ASSERT_IMPL(condition, line) \
+ JS_STATIC_ASSERT_IMPL2(condition, line)
+#define JS_STATIC_ASSERT_IMPL2(condition, line) \
+ typedef int js_static_assert_line_##line[(condition) ? 1 : -1]
+
+/*
+** Abort the process in a non-graceful manner. This will cause a core file,
+** call to the debugger or other moral equivalent as well as causing the
+** entire process to stop.
+*/
+extern JS_PUBLIC_API(void) JS_Abort(void);
+
+#ifdef XP_UNIX
+
+typedef struct JSCallsite JSCallsite;
+
+struct JSCallsite {
+ uint32 pc;
+ char *name;
+ const char *library;
+ int offset;
+ JSCallsite *parent;
+ JSCallsite *siblings;
+ JSCallsite *kids;
+ void *handy;
+};
+
+extern JSCallsite *JS_Backtrace(int skip);
+
+#endif
+
+JS_END_EXTERN_C
+
+#endif /* jsutil_h___ */
diff --git a/src/third_party/js-1.7/jsxdrapi.c b/src/third_party/js-1.7/jsxdrapi.c
new file mode 100644
index 00000000000..2855c608e92
--- /dev/null
+++ b/src/third_party/js-1.7/jsxdrapi.c
@@ -0,0 +1,835 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+#include "jsstddef.h"
+#include "jsconfig.h"
+
+#if JS_HAS_XDR
+
+#include <string.h>
+#include "jstypes.h"
+#include "jsutil.h" /* Added by JSIFY */
+#include "jsdhash.h"
+#include "jsprf.h"
+#include "jsapi.h"
+#include "jscntxt.h"
+#include "jsnum.h"
+#include "jsobj.h" /* js_XDRObject */
+#include "jsscript.h" /* js_XDRScript */
+#include "jsstr.h"
+#include "jsxdrapi.h"
+
+#ifdef DEBUG
+#define DBG(x) x
+#else
+#define DBG(x) ((void)0)
+#endif
+
+typedef struct JSXDRMemState {
+ JSXDRState state;
+ char *base;
+ uint32 count;
+ uint32 limit;
+} JSXDRMemState;
+
+#define MEM_BLOCK 8192
+#define MEM_PRIV(xdr) ((JSXDRMemState *)(xdr))
+
+#define MEM_BASE(xdr) (MEM_PRIV(xdr)->base)
+#define MEM_COUNT(xdr) (MEM_PRIV(xdr)->count)
+#define MEM_LIMIT(xdr) (MEM_PRIV(xdr)->limit)
+
+#define MEM_LEFT(xdr, bytes) \
+ JS_BEGIN_MACRO \
+ if ((xdr)->mode == JSXDR_DECODE && \
+ MEM_COUNT(xdr) + bytes > MEM_LIMIT(xdr)) { \
+ JS_ReportErrorNumber((xdr)->cx, js_GetErrorMessage, NULL, \
+ JSMSG_END_OF_DATA); \
+ return 0; \
+ } \
+ JS_END_MACRO
+
+#define MEM_NEED(xdr, bytes) \
+ JS_BEGIN_MACRO \
+ if ((xdr)->mode == JSXDR_ENCODE) { \
+ if (MEM_LIMIT(xdr) && \
+ MEM_COUNT(xdr) + bytes > MEM_LIMIT(xdr)) { \
+ uint32 limit_ = JS_ROUNDUP(MEM_COUNT(xdr) + bytes, MEM_BLOCK);\
+ void *data_ = JS_realloc((xdr)->cx, MEM_BASE(xdr), limit_); \
+ if (!data_) \
+ return 0; \
+ MEM_BASE(xdr) = data_; \
+ MEM_LIMIT(xdr) = limit_; \
+ } \
+ } else { \
+ MEM_LEFT(xdr, bytes); \
+ } \
+ JS_END_MACRO
+
+#define MEM_DATA(xdr) ((void *)(MEM_BASE(xdr) + MEM_COUNT(xdr)))
+#define MEM_INCR(xdr,bytes) (MEM_COUNT(xdr) += (bytes))
+
+static JSBool
+mem_get32(JSXDRState *xdr, uint32 *lp)
+{
+ MEM_LEFT(xdr, 4);
+ *lp = *(uint32 *)MEM_DATA(xdr);
+ MEM_INCR(xdr, 4);
+ return JS_TRUE;
+}
+
+static JSBool
+mem_set32(JSXDRState *xdr, uint32 *lp)
+{
+ MEM_NEED(xdr, 4);
+ *(uint32 *)MEM_DATA(xdr) = *lp;
+ MEM_INCR(xdr, 4);
+ return JS_TRUE;
+}
+
+static JSBool
+mem_getbytes(JSXDRState *xdr, char *bytes, uint32 len)
+{
+ MEM_LEFT(xdr, len);
+ memcpy(bytes, MEM_DATA(xdr), len);
+ MEM_INCR(xdr, len);
+ return JS_TRUE;
+}
+
+static JSBool
+mem_setbytes(JSXDRState *xdr, char *bytes, uint32 len)
+{
+ MEM_NEED(xdr, len);
+ memcpy(MEM_DATA(xdr), bytes, len);
+ MEM_INCR(xdr, len);
+ return JS_TRUE;
+}
+
+static void *
+mem_raw(JSXDRState *xdr, uint32 len)
+{
+ void *data;
+ if (xdr->mode == JSXDR_ENCODE) {
+ MEM_NEED(xdr, len);
+ } else if (xdr->mode == JSXDR_DECODE) {
+ MEM_LEFT(xdr, len);
+ }
+ data = MEM_DATA(xdr);
+ MEM_INCR(xdr, len);
+ return data;
+}
+
+static JSBool
+mem_seek(JSXDRState *xdr, int32 offset, JSXDRWhence whence)
+{
+ switch (whence) {
+ case JSXDR_SEEK_CUR:
+ if ((int32)MEM_COUNT(xdr) + offset < 0) {
+ JS_ReportErrorNumber(xdr->cx, js_GetErrorMessage, NULL,
+ JSMSG_SEEK_BEYOND_START);
+ return JS_FALSE;
+ }
+ if (offset > 0)
+ MEM_NEED(xdr, offset);
+ MEM_COUNT(xdr) += offset;
+ return JS_TRUE;
+ case JSXDR_SEEK_SET:
+ if (offset < 0) {
+ JS_ReportErrorNumber(xdr->cx, js_GetErrorMessage, NULL,
+ JSMSG_SEEK_BEYOND_START);
+ return JS_FALSE;
+ }
+ if (xdr->mode == JSXDR_ENCODE) {
+ if ((uint32)offset > MEM_COUNT(xdr))
+ MEM_NEED(xdr, offset - MEM_COUNT(xdr));
+ MEM_COUNT(xdr) = offset;
+ } else {
+ if ((uint32)offset > MEM_LIMIT(xdr)) {
+ JS_ReportErrorNumber(xdr->cx, js_GetErrorMessage, NULL,
+ JSMSG_SEEK_BEYOND_END);
+ return JS_FALSE;
+ }
+ MEM_COUNT(xdr) = offset;
+ }
+ return JS_TRUE;
+ case JSXDR_SEEK_END:
+ if (offset >= 0 ||
+ xdr->mode == JSXDR_ENCODE ||
+ (int32)MEM_LIMIT(xdr) + offset < 0) {
+ JS_ReportErrorNumber(xdr->cx, js_GetErrorMessage, NULL,
+ JSMSG_END_SEEK);
+ return JS_FALSE;
+ }
+ MEM_COUNT(xdr) = MEM_LIMIT(xdr) + offset;
+ return JS_TRUE;
+ default: {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%d", whence);
+ JS_ReportErrorNumber(xdr->cx, js_GetErrorMessage, NULL,
+ JSMSG_WHITHER_WHENCE, numBuf);
+ return JS_FALSE;
+ }
+ }
+}
+
+static uint32
+mem_tell(JSXDRState *xdr)
+{
+ return MEM_COUNT(xdr);
+}
+
+static void
+mem_finalize(JSXDRState *xdr)
+{
+ JS_free(xdr->cx, MEM_BASE(xdr));
+}
+
+static JSXDROps xdrmem_ops = {
+ mem_get32, mem_set32, mem_getbytes, mem_setbytes,
+ mem_raw, mem_seek, mem_tell, mem_finalize
+};
+
+JS_PUBLIC_API(void)
+JS_XDRInitBase(JSXDRState *xdr, JSXDRMode mode, JSContext *cx)
+{
+ xdr->mode = mode;
+ xdr->cx = cx;
+ xdr->registry = NULL;
+ xdr->numclasses = xdr->maxclasses = 0;
+ xdr->reghash = NULL;
+ xdr->userdata = NULL;
+ xdr->script = NULL;
+}
+
+JS_PUBLIC_API(JSXDRState *)
+JS_XDRNewMem(JSContext *cx, JSXDRMode mode)
+{
+ JSXDRState *xdr = (JSXDRState *) JS_malloc(cx, sizeof(JSXDRMemState));
+ if (!xdr)
+ return NULL;
+ JS_XDRInitBase(xdr, mode, cx);
+ if (mode == JSXDR_ENCODE) {
+ if (!(MEM_BASE(xdr) = JS_malloc(cx, MEM_BLOCK))) {
+ JS_free(cx, xdr);
+ return NULL;
+ }
+ } else {
+ /* XXXbe ok, so better not deref MEM_BASE(xdr) if not ENCODE */
+ MEM_BASE(xdr) = NULL;
+ }
+ xdr->ops = &xdrmem_ops;
+ MEM_COUNT(xdr) = 0;
+ MEM_LIMIT(xdr) = MEM_BLOCK;
+ return xdr;
+}
+
+JS_PUBLIC_API(void *)
+JS_XDRMemGetData(JSXDRState *xdr, uint32 *lp)
+{
+ if (xdr->ops != &xdrmem_ops)
+ return NULL;
+ *lp = MEM_COUNT(xdr);
+ return MEM_BASE(xdr);
+}
+
+JS_PUBLIC_API(void)
+JS_XDRMemSetData(JSXDRState *xdr, void *data, uint32 len)
+{
+ if (xdr->ops != &xdrmem_ops)
+ return;
+ MEM_LIMIT(xdr) = len;
+ MEM_BASE(xdr) = data;
+ MEM_COUNT(xdr) = 0;
+}
+
+JS_PUBLIC_API(uint32)
+JS_XDRMemDataLeft(JSXDRState *xdr)
+{
+ if (xdr->ops != &xdrmem_ops)
+ return 0;
+ return MEM_LIMIT(xdr) - MEM_COUNT(xdr);
+}
+
+JS_PUBLIC_API(void)
+JS_XDRMemResetData(JSXDRState *xdr)
+{
+ if (xdr->ops != &xdrmem_ops)
+ return;
+ MEM_COUNT(xdr) = 0;
+}
+
+JS_PUBLIC_API(void)
+JS_XDRDestroy(JSXDRState *xdr)
+{
+ JSContext *cx = xdr->cx;
+ xdr->ops->finalize(xdr);
+ if (xdr->registry) {
+ JS_free(cx, xdr->registry);
+ if (xdr->reghash)
+ JS_DHashTableDestroy(xdr->reghash);
+ }
+ JS_free(cx, xdr);
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRUint8(JSXDRState *xdr, uint8 *b)
+{
+ uint32 l = *b;
+ if (!JS_XDRUint32(xdr, &l))
+ return JS_FALSE;
+ *b = (uint8) l;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRUint16(JSXDRState *xdr, uint16 *s)
+{
+ uint32 l = *s;
+ if (!JS_XDRUint32(xdr, &l))
+ return JS_FALSE;
+ *s = (uint16) l;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRUint32(JSXDRState *xdr, uint32 *lp)
+{
+ JSBool ok = JS_TRUE;
+ if (xdr->mode == JSXDR_ENCODE) {
+ uint32 xl = JSXDR_SWAB32(*lp);
+ ok = xdr->ops->set32(xdr, &xl);
+ } else if (xdr->mode == JSXDR_DECODE) {
+ ok = xdr->ops->get32(xdr, lp);
+ *lp = JSXDR_SWAB32(*lp);
+ }
+ return ok;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRBytes(JSXDRState *xdr, char *bytes, uint32 len)
+{
+ uint32 padlen;
+ static char padbuf[JSXDR_ALIGN-1];
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ if (!xdr->ops->setbytes(xdr, bytes, len))
+ return JS_FALSE;
+ } else {
+ if (!xdr->ops->getbytes(xdr, bytes, len))
+ return JS_FALSE;
+ }
+ len = xdr->ops->tell(xdr);
+ if (len % JSXDR_ALIGN) {
+ padlen = JSXDR_ALIGN - (len % JSXDR_ALIGN);
+ if (xdr->mode == JSXDR_ENCODE) {
+ if (!xdr->ops->setbytes(xdr, padbuf, padlen))
+ return JS_FALSE;
+ } else {
+ if (!xdr->ops->seek(xdr, padlen, JSXDR_SEEK_CUR))
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+/**
+ * Convert between a C string and the XDR representation:
+ * leading 32-bit count, then counted vector of chars,
+ * then possibly \0 padding to multiple of 4.
+ */
+JS_PUBLIC_API(JSBool)
+JS_XDRCString(JSXDRState *xdr, char **sp)
+{
+ uint32 len;
+
+ if (xdr->mode == JSXDR_ENCODE)
+ len = strlen(*sp);
+ JS_XDRUint32(xdr, &len);
+ if (xdr->mode == JSXDR_DECODE) {
+ if (!(*sp = (char *) JS_malloc(xdr->cx, len + 1)))
+ return JS_FALSE;
+ }
+ if (!JS_XDRBytes(xdr, *sp, len)) {
+ if (xdr->mode == JSXDR_DECODE)
+ JS_free(xdr->cx, *sp);
+ return JS_FALSE;
+ }
+ if (xdr->mode == JSXDR_DECODE) {
+ (*sp)[len] = '\0';
+ } else if (xdr->mode == JSXDR_FREE) {
+ JS_free(xdr->cx, *sp);
+ *sp = NULL;
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRCStringOrNull(JSXDRState *xdr, char **sp)
+{
+ uint32 null = (*sp == NULL);
+ if (!JS_XDRUint32(xdr, &null))
+ return JS_FALSE;
+ if (null) {
+ *sp = NULL;
+ return JS_TRUE;
+ }
+ return JS_XDRCString(xdr, sp);
+}
+
+static JSBool
+XDRChars(JSXDRState *xdr, jschar *chars, uint32 nchars)
+{
+ uint32 i, padlen, nbytes;
+ jschar *raw;
+
+ nbytes = nchars * sizeof(jschar);
+ padlen = nbytes % JSXDR_ALIGN;
+ if (padlen) {
+ padlen = JSXDR_ALIGN - padlen;
+ nbytes += padlen;
+ }
+ if (!(raw = (jschar *) xdr->ops->raw(xdr, nbytes)))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_ENCODE) {
+ for (i = 0; i != nchars; i++)
+ raw[i] = JSXDR_SWAB16(chars[i]);
+ if (padlen)
+ memset((char *)raw + nbytes - padlen, 0, padlen);
+ } else if (xdr->mode == JSXDR_DECODE) {
+ for (i = 0; i != nchars; i++)
+ chars[i] = JSXDR_SWAB16(raw[i]);
+ }
+ return JS_TRUE;
+}
+
+/*
+ * Convert between a JS (Unicode) string and the XDR representation.
+ */
+JS_PUBLIC_API(JSBool)
+JS_XDRString(JSXDRState *xdr, JSString **strp)
+{
+ uint32 nchars;
+ jschar *chars;
+
+ if (xdr->mode == JSXDR_ENCODE)
+ nchars = JSSTRING_LENGTH(*strp);
+ if (!JS_XDRUint32(xdr, &nchars))
+ return JS_FALSE;
+
+ if (xdr->mode == JSXDR_DECODE) {
+ chars = (jschar *) JS_malloc(xdr->cx, (nchars + 1) * sizeof(jschar));
+ if (!chars)
+ return JS_FALSE;
+ } else {
+ chars = JSSTRING_CHARS(*strp);
+ }
+
+ if (!XDRChars(xdr, chars, nchars))
+ goto bad;
+ if (xdr->mode == JSXDR_DECODE) {
+ chars[nchars] = 0;
+ *strp = JS_NewUCString(xdr->cx, chars, nchars);
+ if (!*strp)
+ goto bad;
+ }
+ return JS_TRUE;
+
+bad:
+ if (xdr->mode == JSXDR_DECODE)
+ JS_free(xdr->cx, chars);
+ return JS_FALSE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRStringOrNull(JSXDRState *xdr, JSString **strp)
+{
+ uint32 null = (*strp == NULL);
+ if (!JS_XDRUint32(xdr, &null))
+ return JS_FALSE;
+ if (null) {
+ *strp = NULL;
+ return JS_TRUE;
+ }
+ return JS_XDRString(xdr, strp);
+}
+
+static JSBool
+XDRDoubleValue(JSXDRState *xdr, jsdouble *dp)
+{
+ jsdpun u;
+
+ if (xdr->mode == JSXDR_ENCODE)
+ u.d = *dp;
+ if (!JS_XDRUint32(xdr, &u.s.lo) || !JS_XDRUint32(xdr, &u.s.hi))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE)
+ *dp = u.d;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRDouble(JSXDRState *xdr, jsdouble **dpp)
+{
+ jsdouble d;
+
+ if (xdr->mode == JSXDR_ENCODE)
+ d = **dpp;
+ if (!XDRDoubleValue(xdr, &d))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE) {
+ *dpp = JS_NewDouble(xdr->cx, d);
+ if (!*dpp)
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+/* These are magic pseudo-tags: see jsapi.h, near the top, for real tags. */
+#define JSVAL_XDRNULL 0x8
+#define JSVAL_XDRVOID 0xA
+
+static JSBool
+XDRValueBody(JSXDRState *xdr, uint32 type, jsval *vp)
+{
+ switch (type) {
+ case JSVAL_XDRNULL:
+ *vp = JSVAL_NULL;
+ break;
+ case JSVAL_XDRVOID:
+ *vp = JSVAL_VOID;
+ break;
+ case JSVAL_STRING: {
+ JSString *str;
+ if (xdr->mode == JSXDR_ENCODE)
+ str = JSVAL_TO_STRING(*vp);
+ if (!JS_XDRString(xdr, &str))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE)
+ *vp = STRING_TO_JSVAL(str);
+ break;
+ }
+ case JSVAL_DOUBLE: {
+ jsdouble *dp;
+ if (xdr->mode == JSXDR_ENCODE)
+ dp = JSVAL_TO_DOUBLE(*vp);
+ if (!JS_XDRDouble(xdr, &dp))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE)
+ *vp = DOUBLE_TO_JSVAL(dp);
+ break;
+ }
+ case JSVAL_OBJECT: {
+ JSObject *obj;
+ if (xdr->mode == JSXDR_ENCODE)
+ obj = JSVAL_TO_OBJECT(*vp);
+ if (!js_XDRObject(xdr, &obj))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE)
+ *vp = OBJECT_TO_JSVAL(obj);
+ break;
+ }
+ case JSVAL_BOOLEAN: {
+ uint32 b;
+ if (xdr->mode == JSXDR_ENCODE)
+ b = (uint32) JSVAL_TO_BOOLEAN(*vp);
+ if (!JS_XDRUint32(xdr, &b))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE)
+ *vp = BOOLEAN_TO_JSVAL((JSBool) b);
+ break;
+ }
+ default: {
+ uint32 i;
+
+ JS_ASSERT(type & JSVAL_INT);
+ if (xdr->mode == JSXDR_ENCODE)
+ i = (uint32) JSVAL_TO_INT(*vp);
+ if (!JS_XDRUint32(xdr, &i))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE)
+ *vp = INT_TO_JSVAL((int32) i);
+ break;
+ }
+ }
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRValue(JSXDRState *xdr, jsval *vp)
+{
+ uint32 type;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ if (JSVAL_IS_NULL(*vp))
+ type = JSVAL_XDRNULL;
+ else if (JSVAL_IS_VOID(*vp))
+ type = JSVAL_XDRVOID;
+ else
+ type = JSVAL_TAG(*vp);
+ }
+ return JS_XDRUint32(xdr, &type) && XDRValueBody(xdr, type, vp);
+}
+
+JSBool
+js_XDRAtom(JSXDRState *xdr, JSAtom **atomp)
+{
+ jsval v;
+ uint32 type;
+ jsdouble d;
+ JSAtom *atom;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ v = ATOM_KEY(*atomp);
+ return JS_XDRValue(xdr, &v);
+ }
+
+ /*
+ * Inline JS_XDRValue when decoding to avoid ceation of GC things when
+ * then corresponding atom already exists. See bug 321985.
+ */
+ if (!JS_XDRUint32(xdr, &type))
+ return JS_FALSE;
+ if (type == JSVAL_STRING)
+ return js_XDRStringAtom(xdr, atomp);
+
+ if (type == JSVAL_DOUBLE) {
+ if (!XDRDoubleValue(xdr, &d))
+ return JS_FALSE;
+ atom = js_AtomizeDouble(xdr->cx, d, 0);
+ } else {
+ if (!XDRValueBody(xdr, type, &v))
+ return JS_FALSE;
+ atom = js_AtomizeValue(xdr->cx, v, 0);
+ }
+
+ if (!atom)
+ return JS_FALSE;
+ *atomp = atom;
+ return JS_TRUE;
+}
+
+extern JSBool
+js_XDRStringAtom(JSXDRState *xdr, JSAtom **atomp)
+{
+ JSString *str;
+ uint32 nchars;
+ JSAtom *atom;
+ JSContext *cx;
+ void *mark;
+ jschar *chars;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ JS_ASSERT(ATOM_IS_STRING(*atomp));
+ str = ATOM_TO_STRING(*atomp);
+ return JS_XDRString(xdr, &str);
+ }
+
+ /*
+ * Inline JS_XDRString when decoding to avoid JSString allocation
+ * for already existing atoms. See bug 321985.
+ */
+ if (!JS_XDRUint32(xdr, &nchars))
+ return JS_FALSE;
+ atom = NULL;
+ cx = xdr->cx;
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ JS_ARENA_ALLOCATE_CAST(chars, jschar *, &cx->tempPool,
+ nchars * sizeof(jschar));
+ if (!chars)
+ JS_ReportOutOfMemory(cx);
+ else if (XDRChars(xdr, chars, nchars))
+ atom = js_AtomizeChars(cx, chars, nchars, 0);
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ if (!atom)
+ return JS_FALSE;
+ *atomp = atom;
+ return JS_TRUE;
+}
+
+/*
+ * FIXME: This performs lossy conversion and we need to switch to
+ * js_XDRStringAtom while allowing to read older XDR files. See bug 325202.
+ */
+JSBool
+js_XDRCStringAtom(JSXDRState *xdr, JSAtom **atomp)
+{
+ char *bytes;
+ uint32 nbytes;
+ JSAtom *atom;
+ JSContext *cx;
+ void *mark;
+
+ if (xdr->mode == JSXDR_ENCODE) {
+ JS_ASSERT(ATOM_IS_STRING(*atomp));
+ bytes = JS_GetStringBytes(ATOM_TO_STRING(*atomp));
+ return JS_XDRCString(xdr, &bytes);
+ }
+
+ /*
+ * Inline JS_XDRCString when decoding not to malloc temporary buffer
+ * just to free it after atomization. See bug 321985.
+ */
+ if (!JS_XDRUint32(xdr, &nbytes))
+ return JS_FALSE;
+ atom = NULL;
+ cx = xdr->cx;
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ JS_ARENA_ALLOCATE_CAST(bytes, char *, &cx->tempPool,
+ nbytes * sizeof *bytes);
+ if (!bytes)
+ JS_ReportOutOfMemory(cx);
+ else if (JS_XDRBytes(xdr, bytes, nbytes))
+ atom = js_Atomize(cx, bytes, nbytes, 0);
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ if (!atom)
+ return JS_FALSE;
+ *atomp = atom;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(JSBool)
+JS_XDRScript(JSXDRState *xdr, JSScript **scriptp)
+{
+ if (!js_XDRScript(xdr, scriptp, NULL))
+ return JS_FALSE;
+ if (xdr->mode == JSXDR_DECODE)
+ js_CallNewScriptHook(xdr->cx, *scriptp, NULL);
+ return JS_TRUE;
+}
+
+#define CLASS_REGISTRY_MIN 8
+#define CLASS_INDEX_TO_ID(i) ((i)+1)
+#define CLASS_ID_TO_INDEX(id) ((id)-1)
+
+typedef struct JSRegHashEntry {
+ JSDHashEntryHdr hdr;
+ const char *name;
+ uint32 index;
+} JSRegHashEntry;
+
+JS_PUBLIC_API(JSBool)
+JS_XDRRegisterClass(JSXDRState *xdr, JSClass *clasp, uint32 *idp)
+{
+ uintN numclasses, maxclasses;
+ JSClass **registry;
+
+ numclasses = xdr->numclasses;
+ maxclasses = xdr->maxclasses;
+ if (numclasses == maxclasses) {
+ maxclasses = (maxclasses == 0) ? CLASS_REGISTRY_MIN : maxclasses << 1;
+ registry = (JSClass **)
+ JS_realloc(xdr->cx, xdr->registry, maxclasses * sizeof(JSClass *));
+ if (!registry)
+ return JS_FALSE;
+ xdr->registry = registry;
+ xdr->maxclasses = maxclasses;
+ } else {
+ JS_ASSERT(numclasses && numclasses < maxclasses);
+ registry = xdr->registry;
+ }
+
+ registry[numclasses] = clasp;
+ if (xdr->reghash) {
+ JSRegHashEntry *entry = (JSRegHashEntry *)
+ JS_DHashTableOperate(xdr->reghash, clasp->name, JS_DHASH_ADD);
+ if (!entry) {
+ JS_ReportOutOfMemory(xdr->cx);
+ return JS_FALSE;
+ }
+ entry->name = clasp->name;
+ entry->index = numclasses;
+ }
+ *idp = CLASS_INDEX_TO_ID(numclasses);
+ xdr->numclasses = ++numclasses;
+ return JS_TRUE;
+}
+
+JS_PUBLIC_API(uint32)
+JS_XDRFindClassIdByName(JSXDRState *xdr, const char *name)
+{
+ uintN i, numclasses;
+
+ numclasses = xdr->numclasses;
+ if (numclasses >= 10) {
+ JSRegHashEntry *entry;
+
+ /* Bootstrap reghash from registry on first overpopulated Find. */
+ if (!xdr->reghash) {
+ xdr->reghash = JS_NewDHashTable(JS_DHashGetStubOps(), NULL,
+ sizeof(JSRegHashEntry),
+ numclasses);
+ if (xdr->reghash) {
+ for (i = 0; i < numclasses; i++) {
+ JSClass *clasp = xdr->registry[i];
+ entry = (JSRegHashEntry *)
+ JS_DHashTableOperate(xdr->reghash, clasp->name,
+ JS_DHASH_ADD);
+ entry->name = clasp->name;
+ entry->index = i;
+ }
+ }
+ }
+
+ /* If we managed to create reghash, use it for O(1) Find. */
+ if (xdr->reghash) {
+ entry = (JSRegHashEntry *)
+ JS_DHashTableOperate(xdr->reghash, name, JS_DHASH_LOOKUP);
+ if (JS_DHASH_ENTRY_IS_BUSY(&entry->hdr))
+ return CLASS_INDEX_TO_ID(entry->index);
+ }
+ }
+
+ /* Only a few classes, or we couldn't malloc reghash: use linear search. */
+ for (i = 0; i < numclasses; i++) {
+ if (!strcmp(name, xdr->registry[i]->name))
+ return CLASS_INDEX_TO_ID(i);
+ }
+ return 0;
+}
+
+JS_PUBLIC_API(JSClass *)
+JS_XDRFindClassById(JSXDRState *xdr, uint32 id)
+{
+ uintN i = CLASS_ID_TO_INDEX(id);
+
+ if (i >= xdr->numclasses)
+ return NULL;
+ return xdr->registry[i];
+}
+
+#endif /* JS_HAS_XDR */
diff --git a/src/third_party/js-1.7/jsxdrapi.h b/src/third_party/js-1.7/jsxdrapi.h
new file mode 100644
index 00000000000..35d9918a302
--- /dev/null
+++ b/src/third_party/js-1.7/jsxdrapi.h
@@ -0,0 +1,223 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsxdrapi_h___
+#define jsxdrapi_h___
+
+/*
+ * JS external data representation interface API.
+ *
+ * The XDR system is comprised of three major parts:
+ *
+ * - the state serialization/deserialization APIs, which allow consumers
+ * of the API to serialize JS runtime state (script bytecodes, atom maps,
+ * object graphs, etc.) for later restoration. These portions
+ * are implemented in various appropriate files, such as jsscript.c
+ * for the script portions and jsobj.c for object state.
+ * - the callback APIs through which the runtime requests an opaque
+ * representation of a native object, and through which the runtime
+ * constructs a live native object from an opaque representation. These
+ * portions are the responsibility of the native object implementor.
+ * - utility functions for en/decoding of primitive types, such as
+ * JSStrings. This portion is implemented in jsxdrapi.c.
+ *
+ * Spiritually guided by Sun's XDR, where appropriate.
+ */
+
+#include "jspubtd.h"
+#include "jsprvtd.h"
+
+JS_BEGIN_EXTERN_C
+
+/* We use little-endian byteorder for all encoded data */
+
+#if defined IS_LITTLE_ENDIAN
+#define JSXDR_SWAB32(x) x
+#define JSXDR_SWAB16(x) x
+#elif defined IS_BIG_ENDIAN
+#define JSXDR_SWAB32(x) (((uint32)(x) >> 24) | \
+ (((uint32)(x) >> 8) & 0xff00) | \
+ (((uint32)(x) << 8) & 0xff0000) | \
+ ((uint32)(x) << 24))
+#define JSXDR_SWAB16(x) (((uint16)(x) >> 8) | ((uint16)(x) << 8))
+#else
+#error "unknown byte order"
+#endif
+
+#define JSXDR_ALIGN 4
+
+typedef enum JSXDRMode {
+ JSXDR_ENCODE,
+ JSXDR_DECODE,
+ JSXDR_FREE
+} JSXDRMode;
+
+typedef enum JSXDRWhence {
+ JSXDR_SEEK_SET,
+ JSXDR_SEEK_CUR,
+ JSXDR_SEEK_END
+} JSXDRWhence;
+
+typedef struct JSXDROps {
+ JSBool (*get32)(JSXDRState *, uint32 *);
+ JSBool (*set32)(JSXDRState *, uint32 *);
+ JSBool (*getbytes)(JSXDRState *, char *, uint32);
+ JSBool (*setbytes)(JSXDRState *, char *, uint32);
+ void * (*raw)(JSXDRState *, uint32);
+ JSBool (*seek)(JSXDRState *, int32, JSXDRWhence);
+ uint32 (*tell)(JSXDRState *);
+ void (*finalize)(JSXDRState *);
+} JSXDROps;
+
+struct JSXDRState {
+ JSXDRMode mode;
+ JSXDROps *ops;
+ JSContext *cx;
+ JSClass **registry;
+ uintN numclasses;
+ uintN maxclasses;
+ void *reghash;
+ void *userdata;
+ JSScript *script;
+};
+
+extern JS_PUBLIC_API(void)
+JS_XDRInitBase(JSXDRState *xdr, JSXDRMode mode, JSContext *cx);
+
+extern JS_PUBLIC_API(JSXDRState *)
+JS_XDRNewMem(JSContext *cx, JSXDRMode mode);
+
+extern JS_PUBLIC_API(void *)
+JS_XDRMemGetData(JSXDRState *xdr, uint32 *lp);
+
+extern JS_PUBLIC_API(void)
+JS_XDRMemSetData(JSXDRState *xdr, void *data, uint32 len);
+
+extern JS_PUBLIC_API(uint32)
+JS_XDRMemDataLeft(JSXDRState *xdr);
+
+extern JS_PUBLIC_API(void)
+JS_XDRMemResetData(JSXDRState *xdr);
+
+extern JS_PUBLIC_API(void)
+JS_XDRDestroy(JSXDRState *xdr);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRUint8(JSXDRState *xdr, uint8 *b);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRUint16(JSXDRState *xdr, uint16 *s);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRUint32(JSXDRState *xdr, uint32 *lp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRBytes(JSXDRState *xdr, char *bytes, uint32 len);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRCString(JSXDRState *xdr, char **sp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRCStringOrNull(JSXDRState *xdr, char **sp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRString(JSXDRState *xdr, JSString **strp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRStringOrNull(JSXDRState *xdr, JSString **strp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRDouble(JSXDRState *xdr, jsdouble **dp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRValue(JSXDRState *xdr, jsval *vp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRScript(JSXDRState *xdr, JSScript **scriptp);
+
+extern JS_PUBLIC_API(JSBool)
+JS_XDRRegisterClass(JSXDRState *xdr, JSClass *clasp, uint32 *lp);
+
+extern JS_PUBLIC_API(uint32)
+JS_XDRFindClassIdByName(JSXDRState *xdr, const char *name);
+
+extern JS_PUBLIC_API(JSClass *)
+JS_XDRFindClassById(JSXDRState *xdr, uint32 id);
+
+/*
+ * Magic numbers.
+ */
+#define JSXDR_MAGIC_SCRIPT_1 0xdead0001
+#define JSXDR_MAGIC_SCRIPT_2 0xdead0002
+#define JSXDR_MAGIC_SCRIPT_3 0xdead0003
+#define JSXDR_MAGIC_SCRIPT_4 0xdead0004
+#define JSXDR_MAGIC_SCRIPT_5 0xdead0005
+#define JSXDR_MAGIC_SCRIPT_CURRENT JSXDR_MAGIC_SCRIPT_5
+
+/*
+ * Bytecode version number. Decrement the second term whenever JS bytecode
+ * changes incompatibly.
+ *
+ * This version number should be XDR'ed once near the front of any file or
+ * larger storage unit containing XDR'ed bytecode and other data, and checked
+ * before deserialization of bytecode. If the saved version does not match
+ * the current version, abort deserialization and invalidate the file.
+ */
+#define JSXDR_BYTECODE_VERSION (0xb973c0de - 16)
+
+/*
+ * Library-private functions.
+ */
+extern JSBool
+js_XDRAtom(JSXDRState *xdr, JSAtom **atomp);
+
+extern JSBool
+js_XDRStringAtom(JSXDRState *xdr, JSAtom **atomp);
+
+/*
+ * FIXME: This is non-unicode version of js_XDRStringAtom that performs lossy
+ * conversion. Do not use it in the new code! See bug 325202.
+ */
+extern JSBool
+js_XDRCStringAtom(JSXDRState *xdr, JSAtom **atomp);
+
+JS_END_EXTERN_C
+
+#endif /* ! jsxdrapi_h___ */
diff --git a/src/third_party/js-1.7/jsxml.c b/src/third_party/js-1.7/jsxml.c
new file mode 100644
index 00000000000..1266255ba2f
--- /dev/null
+++ b/src/third_party/js-1.7/jsxml.c
@@ -0,0 +1,8357 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=78:
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is SpiderMonkey E4X code, released August, 2004.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#include "jsstddef.h"
+#include "jsconfig.h"
+
+#if JS_HAS_XML_SUPPORT
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+#include "jstypes.h"
+#include "jsbit.h"
+#include "jsprf.h"
+#include "jsutil.h"
+#include "jsapi.h"
+#include "jsarray.h"
+#include "jsatom.h"
+#include "jsbool.h"
+#include "jscntxt.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsinterp.h"
+#include "jslock.h"
+#include "jsnum.h"
+#include "jsobj.h"
+#include "jsopcode.h"
+#include "jsparse.h"
+#include "jsscan.h"
+#include "jsscope.h"
+#include "jsscript.h"
+#include "jsstr.h"
+#include "jsxml.h"
+
+#ifdef DEBUG
+#include <string.h> /* for #ifdef DEBUG memset calls */
+#endif
+
+/*
+ * NOTES
+ * - in the js shell, you must use the -x command line option, or call
+ * options('xml') before compiling anything that uses XML literals
+ *
+ * TODO
+ * - XXXbe patrol
+ * - Fuse objects and their JSXML* private data into single GC-things
+ * - fix function::foo vs. x.(foo == 42) collision using proper namespacing
+ * - fix the !TCF_HAS_DEFXMLNS optimization in js_FoldConstants
+ * - JSCLASS_DOCUMENT_OBSERVER support -- live two-way binding to Gecko's DOM!
+ * - JS_TypeOfValue sure could use a cleaner interface to "types"
+ */
+
+#ifdef DEBUG_brendan
+#define METERING 1
+#endif
+
+#ifdef METERING
+static struct {
+ jsrefcount qname;
+ jsrefcount qnameobj;
+ jsrefcount liveqname;
+ jsrefcount liveqnameobj;
+ jsrefcount namespace;
+ jsrefcount namespaceobj;
+ jsrefcount livenamespace;
+ jsrefcount livenamespaceobj;
+ jsrefcount xml;
+ jsrefcount xmlobj;
+ jsrefcount livexml;
+ jsrefcount livexmlobj;
+} xml_stats;
+
+#define METER(x) JS_ATOMIC_INCREMENT(&(x))
+#define UNMETER(x) JS_ATOMIC_DECREMENT(&(x))
+#else
+#define METER(x) /* nothing */
+#define UNMETER(x) /* nothing */
+#endif
+
+/*
+ * Random utilities and global functions.
+ */
+const char js_isXMLName_str[] = "isXMLName";
+const char js_XMLList_str[] = "XMLList";
+const char js_localName_str[] = "localName";
+const char js_xml_parent_str[] = "parent";
+const char js_prefix_str[] = "prefix";
+const char js_toXMLString_str[] = "toXMLString";
+const char js_uri_str[] = "uri";
+
+const char js_amp_entity_str[] = "&amp;";
+const char js_gt_entity_str[] = "&gt;";
+const char js_lt_entity_str[] = "&lt;";
+const char js_quot_entity_str[] = "&quot;";
+
+#define IS_EMPTY(str) (JSSTRING_LENGTH(str) == 0)
+#define IS_STAR(str) (JSSTRING_LENGTH(str) == 1 && *JSSTRING_CHARS(str) == '*')
+
+static JSBool
+xml_isXMLName(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ *rval = BOOLEAN_TO_JSVAL(js_IsXMLName(cx, argv[0]));
+ return JS_TRUE;
+}
+
+/*
+ * Namespace class and library functions.
+ */
+enum namespace_tinyid {
+ NAMESPACE_PREFIX = -1,
+ NAMESPACE_URI = -2
+};
+
+static JSBool
+namespace_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSXMLNamespace *ns;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+
+ ns = (JSXMLNamespace *)
+ JS_GetInstancePrivate(cx, obj, &js_NamespaceClass.base, NULL);
+ if (!ns)
+ return JS_TRUE;
+
+ switch (JSVAL_TO_INT(id)) {
+ case NAMESPACE_PREFIX:
+ *vp = ns->prefix ? STRING_TO_JSVAL(ns->prefix) : JSVAL_VOID;
+ break;
+ case NAMESPACE_URI:
+ *vp = STRING_TO_JSVAL(ns->uri);
+ break;
+ }
+ return JS_TRUE;
+}
+
+static void
+namespace_finalize(JSContext *cx, JSObject *obj)
+{
+ JSXMLNamespace *ns;
+ JSRuntime *rt;
+
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, obj);
+ if (!ns)
+ return;
+ JS_ASSERT(ns->object == obj);
+ ns->object = NULL;
+ UNMETER(xml_stats.livenamespaceobj);
+
+ rt = cx->runtime;
+ if (rt->functionNamespaceObject == obj)
+ rt->functionNamespaceObject = NULL;
+}
+
+static void
+namespace_mark_vector(JSContext *cx, JSXMLNamespace **vec, uint32 len)
+{
+ uint32 i;
+ JSXMLNamespace *ns;
+
+ for (i = 0; i < len; i++) {
+ ns = vec[i];
+ {
+#ifdef GC_MARK_DEBUG
+ char buf[100];
+
+ JS_snprintf(buf, sizeof buf, "%s=%s",
+ ns->prefix ? JS_GetStringBytes(ns->prefix) : "",
+ JS_GetStringBytes(ns->uri));
+#endif
+ GC_MARK(cx, ns, buf);
+ }
+ }
+}
+
+static uint32
+namespace_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSXMLNamespace *ns;
+
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, obj);
+ GC_MARK(cx, ns, "private");
+ return 0;
+}
+
+static JSBool
+namespace_equality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ JSXMLNamespace *ns, *ns2;
+ JSObject *obj2;
+
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, obj);
+ JS_ASSERT(JSVAL_IS_OBJECT(v));
+ obj2 = JSVAL_TO_OBJECT(v);
+ if (!obj2 || OBJ_GET_CLASS(cx, obj2) != &js_NamespaceClass.base) {
+ *bp = JS_FALSE;
+ } else {
+ ns2 = (JSXMLNamespace *) JS_GetPrivate(cx, obj2);
+ *bp = js_EqualStrings(ns->uri, ns2->uri);
+ }
+ return JS_TRUE;
+}
+
+JS_FRIEND_DATA(JSExtendedClass) js_NamespaceClass = {
+ { "Namespace",
+ JSCLASS_HAS_PRIVATE | JSCLASS_CONSTRUCT_PROTOTYPE | JSCLASS_IS_EXTENDED |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_Namespace),
+ JS_PropertyStub, JS_PropertyStub, namespace_getProperty, NULL,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, namespace_finalize,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, namespace_mark, NULL },
+ namespace_equality,NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL
+};
+
+#define NAMESPACE_ATTRS \
+ (JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT | JSPROP_SHARED)
+
+static JSPropertySpec namespace_props[] = {
+ {js_prefix_str, NAMESPACE_PREFIX, NAMESPACE_ATTRS, 0, 0},
+ {js_uri_str, NAMESPACE_URI, NAMESPACE_ATTRS, 0, 0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+namespace_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXMLNamespace *ns;
+
+ ns = (JSXMLNamespace *)
+ JS_GetInstancePrivate(cx, obj, &js_NamespaceClass.base, argv);
+ if (!ns)
+ return JS_FALSE;
+
+ *rval = STRING_TO_JSVAL(ns->uri);
+ return JS_TRUE;
+}
+
+static JSFunctionSpec namespace_methods[] = {
+ {js_toString_str, namespace_toString, 0,0,0},
+ {0,0,0,0,0}
+};
+
+JSXMLNamespace *
+js_NewXMLNamespace(JSContext *cx, JSString *prefix, JSString *uri,
+ JSBool declared)
+{
+ JSXMLNamespace *ns;
+
+ ns = (JSXMLNamespace *)
+ js_NewGCThing(cx, GCX_NAMESPACE, sizeof(JSXMLNamespace));
+ if (!ns)
+ return NULL;
+ ns->object = NULL;
+ ns->prefix = prefix;
+ ns->uri = uri;
+ ns->declared = declared;
+ METER(xml_stats.namespace);
+ METER(xml_stats.livenamespace);
+ return ns;
+}
+
+void
+js_MarkXMLNamespace(JSContext *cx, JSXMLNamespace *ns)
+{
+ GC_MARK(cx, ns->object, "object");
+ GC_MARK(cx, ns->prefix, "prefix");
+ GC_MARK(cx, ns->uri, "uri");
+}
+
+void
+js_FinalizeXMLNamespace(JSContext *cx, JSXMLNamespace *ns)
+{
+ UNMETER(xml_stats.livenamespace);
+}
+
+JSObject *
+js_NewXMLNamespaceObject(JSContext *cx, JSString *prefix, JSString *uri,
+ JSBool declared)
+{
+ JSXMLNamespace *ns;
+
+ ns = js_NewXMLNamespace(cx, prefix, uri, declared);
+ if (!ns)
+ return NULL;
+ return js_GetXMLNamespaceObject(cx, ns);
+}
+
+JSObject *
+js_GetXMLNamespaceObject(JSContext *cx, JSXMLNamespace *ns)
+{
+ JSObject *obj;
+
+ obj = ns->object;
+ if (obj) {
+ JS_ASSERT(JS_GetPrivate(cx, obj) == ns);
+ return obj;
+ }
+ obj = js_NewObject(cx, &js_NamespaceClass.base, NULL, NULL);
+ if (!obj || !JS_SetPrivate(cx, obj, ns)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ ns->object = obj;
+ METER(xml_stats.namespaceobj);
+ METER(xml_stats.livenamespaceobj);
+ return obj;
+}
+
+/*
+ * QName class and library functions.
+ */
+enum qname_tinyid {
+ QNAME_URI = -1,
+ QNAME_LOCALNAME = -2
+};
+
+static JSBool
+qname_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSXMLQName *qn;
+
+ if (!JSVAL_IS_INT(id))
+ return JS_TRUE;
+
+ qn = (JSXMLQName *)
+ JS_GetInstancePrivate(cx, obj, &js_QNameClass.base, NULL);
+ if (!qn)
+ return JS_TRUE;
+
+ switch (JSVAL_TO_INT(id)) {
+ case QNAME_URI:
+ *vp = qn->uri ? STRING_TO_JSVAL(qn->uri) : JSVAL_NULL;
+ break;
+ case QNAME_LOCALNAME:
+ *vp = STRING_TO_JSVAL(qn->localName);
+ break;
+ }
+ return JS_TRUE;
+}
+
+static void
+qname_finalize(JSContext *cx, JSObject *obj)
+{
+ JSXMLQName *qn;
+
+ qn = (JSXMLQName *) JS_GetPrivate(cx, obj);
+ if (!qn)
+ return;
+ JS_ASSERT(qn->object == obj);
+ qn->object = NULL;
+ UNMETER(xml_stats.liveqnameobj);
+}
+
+static void
+anyname_finalize(JSContext* cx, JSObject* obj)
+{
+ JSRuntime *rt;
+
+ /* Make sure the next call to js_GetAnyName doesn't try to use obj. */
+ rt = cx->runtime;
+ if (rt->anynameObject == obj)
+ rt->anynameObject = NULL;
+
+ qname_finalize(cx, obj);
+}
+
+static uint32
+qname_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSXMLQName *qn;
+
+ qn = (JSXMLQName *) JS_GetPrivate(cx, obj);
+ GC_MARK(cx, qn, "private");
+ return 0;
+}
+
+static JSBool
+qname_identity(JSXMLQName *qna, JSXMLQName *qnb)
+{
+ if (!qna->uri ^ !qnb->uri)
+ return JS_FALSE;
+ if (qna->uri && !js_EqualStrings(qna->uri, qnb->uri))
+ return JS_FALSE;
+ return js_EqualStrings(qna->localName, qnb->localName);
+}
+
+static JSBool
+qname_equality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ JSXMLQName *qn, *qn2;
+ JSObject *obj2;
+
+ qn = (JSXMLQName *) JS_GetPrivate(cx, obj);
+ JS_ASSERT(JSVAL_IS_OBJECT(v));
+ obj2 = JSVAL_TO_OBJECT(v);
+ if (!obj2 || OBJ_GET_CLASS(cx, obj2) != &js_QNameClass.base) {
+ *bp = JS_FALSE;
+ } else {
+ qn2 = (JSXMLQName *) JS_GetPrivate(cx, obj2);
+ *bp = qname_identity(qn, qn2);
+ }
+ return JS_TRUE;
+}
+
+JS_FRIEND_DATA(JSExtendedClass) js_QNameClass = {
+ { "QName",
+ JSCLASS_HAS_PRIVATE | JSCLASS_CONSTRUCT_PROTOTYPE | JSCLASS_IS_EXTENDED |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_QName),
+ JS_PropertyStub, JS_PropertyStub, qname_getProperty, NULL,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, qname_finalize,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, qname_mark, NULL },
+ qname_equality, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL
+};
+
+/*
+ * Classes for the ECMA-357-internal types AttributeName and AnyName, which
+ * are like QName, except that they have no property getters. They share the
+ * qname_toString method, and therefore are exposed as constructable objects
+ * in this implementation.
+ */
+JS_FRIEND_DATA(JSClass) js_AttributeNameClass = {
+ js_AttributeName_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_CONSTRUCT_PROTOTYPE |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_AttributeName),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, qname_finalize,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, qname_mark, NULL
+};
+
+JS_FRIEND_DATA(JSClass) js_AnyNameClass = {
+ js_AnyName_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_CONSTRUCT_PROTOTYPE |
+ JSCLASS_HAS_CACHED_PROTO(JSProto_AnyName),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, anyname_finalize,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, qname_mark, NULL
+};
+
+#define QNAME_ATTRS \
+ (JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT | JSPROP_SHARED)
+
+static JSPropertySpec qname_props[] = {
+ {js_uri_str, QNAME_URI, QNAME_ATTRS, 0, 0},
+ {js_localName_str, QNAME_LOCALNAME, QNAME_ATTRS, 0, 0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+qname_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSClass *clasp;
+ JSXMLQName *qn;
+ JSString *str, *qualstr;
+ size_t length;
+ jschar *chars;
+
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp == &js_AttributeNameClass || clasp == &js_AnyNameClass) {
+ qn = (JSXMLQName *) JS_GetPrivate(cx, obj);
+ } else {
+ qn = (JSXMLQName *)
+ JS_GetInstancePrivate(cx, obj, &js_QNameClass.base, argv);
+ if (!qn)
+ return JS_FALSE;
+ }
+
+ if (!qn->uri) {
+ /* No uri means wildcard qualifier. */
+ str = ATOM_TO_STRING(cx->runtime->atomState.starQualifierAtom);
+ } else if (IS_EMPTY(qn->uri)) {
+ /* Empty string for uri means localName is in no namespace. */
+ str = cx->runtime->emptyString;
+ } else {
+ qualstr = ATOM_TO_STRING(cx->runtime->atomState.qualifierAtom);
+ str = js_ConcatStrings(cx, qn->uri, qualstr);
+ if (!str)
+ return JS_FALSE;
+ }
+ str = js_ConcatStrings(cx, str, qn->localName);
+ if (!str)
+ return JS_FALSE;
+
+ if (str && clasp == &js_AttributeNameClass) {
+ length = JSSTRING_LENGTH(str);
+ chars = (jschar *) JS_malloc(cx, (length + 2) * sizeof(jschar));
+ if (!chars)
+ return JS_FALSE;
+ *chars = '@';
+ js_strncpy(chars + 1, JSSTRING_CHARS(str), length);
+ chars[++length] = 0;
+ str = js_NewString(cx, chars, length, 0);
+ if (!str) {
+ JS_free(cx, chars);
+ return JS_FALSE;
+ }
+ }
+
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSFunctionSpec qname_methods[] = {
+ {js_toString_str, qname_toString, 0,0,0},
+ {0,0,0,0,0}
+};
+
+JSXMLQName *
+js_NewXMLQName(JSContext *cx, JSString *uri, JSString *prefix,
+ JSString *localName)
+{
+ JSXMLQName *qn;
+
+ qn = (JSXMLQName *) js_NewGCThing(cx, GCX_QNAME, sizeof(JSXMLQName));
+ if (!qn)
+ return NULL;
+ qn->object = NULL;
+ qn->uri = uri;
+ qn->prefix = prefix;
+ qn->localName = localName;
+ METER(xml_stats.qname);
+ METER(xml_stats.liveqname);
+ return qn;
+}
+
+void
+js_MarkXMLQName(JSContext *cx, JSXMLQName *qn)
+{
+ GC_MARK(cx, qn->object, "object");
+ GC_MARK(cx, qn->uri, "uri");
+ GC_MARK(cx, qn->prefix, "prefix");
+ GC_MARK(cx, qn->localName, "localName");
+}
+
+void
+js_FinalizeXMLQName(JSContext *cx, JSXMLQName *qn)
+{
+ UNMETER(xml_stats.liveqname);
+}
+
+JSObject *
+js_NewXMLQNameObject(JSContext *cx, JSString *uri, JSString *prefix,
+ JSString *localName)
+{
+ JSXMLQName *qn;
+
+ qn = js_NewXMLQName(cx, uri, prefix, localName);
+ if (!qn)
+ return NULL;
+ return js_GetXMLQNameObject(cx, qn);
+}
+
+JSObject *
+js_GetXMLQNameObject(JSContext *cx, JSXMLQName *qn)
+{
+ JSObject *obj;
+
+ obj = qn->object;
+ if (obj) {
+ JS_ASSERT(JS_GetPrivate(cx, obj) == qn);
+ return obj;
+ }
+ obj = js_NewObject(cx, &js_QNameClass.base, NULL, NULL);
+ if (!obj || !JS_SetPrivate(cx, obj, qn)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ qn->object = obj;
+ METER(xml_stats.qnameobj);
+ METER(xml_stats.liveqnameobj);
+ return obj;
+}
+
+JSObject *
+js_GetAttributeNameObject(JSContext *cx, JSXMLQName *qn)
+{
+ JSObject *obj;
+
+ obj = qn->object;
+ if (obj) {
+ if (OBJ_GET_CLASS(cx, obj) == &js_AttributeNameClass)
+ return obj;
+ qn = js_NewXMLQName(cx, qn->uri, qn->prefix, qn->localName);
+ if (!qn)
+ return NULL;
+ }
+
+ obj = js_NewObject(cx, &js_AttributeNameClass, NULL, NULL);
+ if (!obj || !JS_SetPrivate(cx, obj, qn)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+
+ qn->object = obj;
+ METER(xml_stats.qnameobj);
+ METER(xml_stats.liveqnameobj);
+ return obj;
+}
+
+JSObject *
+js_ConstructXMLQNameObject(JSContext *cx, jsval nsval, jsval lnval)
+{
+ jsval argv[2];
+
+ /*
+ * ECMA-357 11.1.2,
+ * The _QualifiedIdentifier : PropertySelector :: PropertySelector_
+ * production, step 2.
+ */
+ if (!JSVAL_IS_PRIMITIVE(nsval) &&
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(nsval)) == &js_AnyNameClass) {
+ nsval = JSVAL_NULL;
+ }
+
+ argv[0] = nsval;
+ argv[1] = lnval;
+ return js_ConstructObject(cx, &js_QNameClass.base, NULL, NULL, 2, argv);
+}
+
+static JSBool
+IsXMLName(const jschar *cp, size_t n)
+{
+ JSBool rv;
+ jschar c;
+
+ rv = JS_FALSE;
+ if (n != 0 && JS_ISXMLNSSTART(*cp)) {
+ while (--n != 0) {
+ c = *++cp;
+ if (!JS_ISXMLNS(c))
+ return rv;
+ }
+ rv = JS_TRUE;
+ }
+ return rv;
+}
+
+JSBool
+js_IsXMLName(JSContext *cx, jsval v)
+{
+ JSClass *clasp;
+ JSXMLQName *qn;
+ JSString *name;
+ JSErrorReporter older;
+
+ /*
+ * Inline specialization of the QName constructor called with v passed as
+ * the only argument, to compute the localName for the constructed qname,
+ * without actually allocating the object or computing its uri and prefix.
+ * See ECMA-357 13.1.2.1 step 1 and 13.3.2.
+ */
+ if (!JSVAL_IS_PRIMITIVE(v) &&
+ (clasp = OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(v)),
+ clasp == &js_QNameClass.base ||
+ clasp == &js_AttributeNameClass ||
+ clasp == &js_AnyNameClass)) {
+ qn = (JSXMLQName *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ name = qn->localName;
+ } else {
+ older = JS_SetErrorReporter(cx, NULL);
+ name = js_ValueToString(cx, v);
+ JS_SetErrorReporter(cx, older);
+ if (!name) {
+ JS_ClearPendingException(cx);
+ return JS_FALSE;
+ }
+ }
+
+ return IsXMLName(JSSTRING_CHARS(name), JSSTRING_LENGTH(name));
+}
+
+static JSBool
+Namespace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval urival, prefixval;
+ JSObject *uriobj;
+ JSBool isNamespace, isQName;
+ JSClass *clasp;
+ JSString *empty, *prefix;
+ JSXMLNamespace *ns, *ns2;
+ JSXMLQName *qn;
+
+ urival = argv[argc > 1];
+ isNamespace = isQName = JS_FALSE;
+ if (!JSVAL_IS_PRIMITIVE(urival)) {
+ uriobj = JSVAL_TO_OBJECT(urival);
+ clasp = OBJ_GET_CLASS(cx, uriobj);
+ isNamespace = (clasp == &js_NamespaceClass.base);
+ isQName = (clasp == &js_QNameClass.base);
+ }
+#ifdef __GNUC__ /* suppress bogus gcc warnings */
+ else uriobj = NULL;
+#endif
+
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ /* Namespace called as function. */
+ if (argc == 1 && isNamespace) {
+ /* Namespace called with one Namespace argument is identity. */
+ *rval = urival;
+ return JS_TRUE;
+ }
+
+ /* Create and return a new QName object exactly as if constructed. */
+ obj = js_NewObject(cx, &js_NamespaceClass.base, NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+ METER(xml_stats.namespaceobj);
+ METER(xml_stats.livenamespaceobj);
+
+ /*
+ * Create and connect private data to rooted obj early, so we don't have
+ * to worry about rooting string newborns hanging off of the private data
+ * further below.
+ */
+ empty = cx->runtime->emptyString;
+ ns = js_NewXMLNamespace(cx, empty, empty, JS_FALSE);
+ if (!ns)
+ return JS_FALSE;
+ if (!JS_SetPrivate(cx, obj, ns))
+ return JS_FALSE;
+ ns->object = obj;
+
+ if (argc == 1) {
+ if (isNamespace) {
+ ns2 = (JSXMLNamespace *) JS_GetPrivate(cx, uriobj);
+ ns->uri = ns2->uri;
+ ns->prefix = ns2->prefix;
+ } else if (isQName &&
+ (qn = (JSXMLQName *) JS_GetPrivate(cx, uriobj))->uri) {
+ ns->uri = qn->uri;
+ ns->prefix = qn->prefix;
+ } else {
+ ns->uri = js_ValueToString(cx, urival);
+ if (!ns->uri)
+ return JS_FALSE;
+
+ /* NULL here represents *undefined* in ECMA-357 13.2.2 3(c)iii. */
+ if (!IS_EMPTY(ns->uri))
+ ns->prefix = NULL;
+ }
+ } else if (argc == 2) {
+ if (isQName &&
+ (qn = (JSXMLQName *) JS_GetPrivate(cx, uriobj))->uri) {
+ ns->uri = qn->uri;
+ } else {
+ ns->uri = js_ValueToString(cx, urival);
+ if (!ns->uri)
+ return JS_FALSE;
+ }
+
+ prefixval = argv[0];
+ if (IS_EMPTY(ns->uri)) {
+ if (!JSVAL_IS_VOID(prefixval)) {
+ prefix = js_ValueToString(cx, prefixval);
+ if (!prefix)
+ return JS_FALSE;
+ if (!IS_EMPTY(prefix)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XML_NAMESPACE,
+ js_ValueToPrintableString(cx,
+ STRING_TO_JSVAL(prefix)));
+ return JS_FALSE;
+ }
+ }
+ } else if (JSVAL_IS_VOID(prefixval) || !js_IsXMLName(cx, prefixval)) {
+ /* NULL here represents *undefined* in ECMA-357 13.2.2 4(d) etc. */
+ ns->prefix = NULL;
+ } else {
+ prefix = js_ValueToString(cx, prefixval);
+ if (!prefix)
+ return JS_FALSE;
+ ns->prefix = prefix;
+ }
+ }
+
+ return JS_TRUE;
+}
+
+static JSBool
+QName(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval nameval, nsval;
+ JSBool isQName, isNamespace;
+ JSXMLQName *qn;
+ JSString *uri, *prefix, *name;
+ JSObject *nsobj;
+ JSClass *clasp;
+ JSXMLNamespace *ns;
+
+ nameval = argv[argc > 1];
+ isQName =
+ !JSVAL_IS_PRIMITIVE(nameval) &&
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(nameval)) == &js_QNameClass.base;
+
+ if (!(cx->fp->flags & JSFRAME_CONSTRUCTING)) {
+ /* QName called as function. */
+ if (argc == 1 && isQName) {
+ /* QName called with one QName argument is identity. */
+ *rval = nameval;
+ return JS_TRUE;
+ }
+
+ /*
+ * Create and return a new QName object exactly as if constructed.
+ * Use the constructor's clasp so we can be shared by AttributeName
+ * (see below after this function).
+ */
+ obj = js_NewObject(cx,
+ JS_ValueToFunction(cx, argv[-2])->clasp,
+ NULL, NULL);
+ if (!obj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ }
+ METER(xml_stats.qnameobj);
+ METER(xml_stats.liveqnameobj);
+
+ if (isQName) {
+ /* If namespace is not specified and name is a QName, clone it. */
+ qn = (JSXMLQName *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(nameval));
+ if (argc == 1) {
+ uri = qn->uri;
+ prefix = qn->prefix;
+ name = qn->localName;
+ goto out;
+ }
+
+ /* Namespace and qname were passed -- use the qname's localName. */
+ nameval = STRING_TO_JSVAL(qn->localName);
+ }
+
+ if (argc == 0) {
+ name = cx->runtime->emptyString;
+ } else {
+ name = js_ValueToString(cx, nameval);
+ if (!name)
+ return JS_FALSE;
+
+ /* Use argv[1] as a local root for name, even if it was not passed. */
+ argv[1] = STRING_TO_JSVAL(name);
+ }
+
+ nsval = argv[0];
+ if (argc == 1 || JSVAL_IS_VOID(nsval)) {
+ if (IS_STAR(name)) {
+ nsval = JSVAL_NULL;
+ } else {
+ if (!js_GetDefaultXMLNamespace(cx, &nsval))
+ return JS_FALSE;
+ }
+ }
+
+ if (JSVAL_IS_NULL(nsval)) {
+ /* NULL prefix represents *undefined* in ECMA-357 13.3.2 5(a). */
+ uri = prefix = NULL;
+ } else {
+ /*
+ * Inline specialization of the Namespace constructor called with
+ * nsval passed as the only argument, to compute the uri and prefix
+ * for the constructed namespace, without actually allocating the
+ * object or computing other members. See ECMA-357 13.3.2 6(a) and
+ * 13.2.2.
+ */
+ isNamespace = isQName = JS_FALSE;
+ if (!JSVAL_IS_PRIMITIVE(nsval)) {
+ nsobj = JSVAL_TO_OBJECT(nsval);
+ clasp = OBJ_GET_CLASS(cx, nsobj);
+ isNamespace = (clasp == &js_NamespaceClass.base);
+ isQName = (clasp == &js_QNameClass.base);
+ }
+#ifdef __GNUC__ /* suppress bogus gcc warnings */
+ else nsobj = NULL;
+#endif
+
+ if (isNamespace) {
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, nsobj);
+ uri = ns->uri;
+ prefix = ns->prefix;
+ } else if (isQName &&
+ (qn = (JSXMLQName *) JS_GetPrivate(cx, nsobj))->uri) {
+ uri = qn->uri;
+ prefix = qn->prefix;
+ } else {
+ uri = js_ValueToString(cx, nsval);
+ if (!uri)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(uri); /* local root */
+
+ /* NULL here represents *undefined* in ECMA-357 13.2.2 3(c)iii. */
+ prefix = IS_EMPTY(uri) ? cx->runtime->emptyString : NULL;
+ }
+ }
+
+out:
+ qn = js_NewXMLQName(cx, uri, prefix, name);
+ if (!qn)
+ return JS_FALSE;
+ if (!JS_SetPrivate(cx, obj, qn))
+ return JS_FALSE;
+ qn->object = obj;
+ return JS_TRUE;
+}
+
+static JSBool
+AttributeName(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ /*
+ * Since js_AttributeNameClass was initialized, obj will have that as its
+ * class, not js_QNameClass.
+ */
+ return QName(cx, obj, argc, argv, rval);
+}
+
+/*
+ * XMLArray library functions.
+ */
+static JSBool
+namespace_identity(const void *a, const void *b)
+{
+ const JSXMLNamespace *nsa = (const JSXMLNamespace *) a;
+ const JSXMLNamespace *nsb = (const JSXMLNamespace *) b;
+
+ if (nsa->prefix && nsb->prefix) {
+ if (!js_EqualStrings(nsa->prefix, nsb->prefix))
+ return JS_FALSE;
+ } else {
+ if (nsa->prefix || nsb->prefix)
+ return JS_FALSE;
+ }
+ return js_EqualStrings(nsa->uri, nsb->uri);
+}
+
+static JSBool
+attr_identity(const void *a, const void *b)
+{
+ const JSXML *xmla = (const JSXML *) a;
+ const JSXML *xmlb = (const JSXML *) b;
+
+ return qname_identity(xmla->name, xmlb->name);
+}
+
+static void
+XMLArrayCursorInit(JSXMLArrayCursor *cursor, JSXMLArray *array)
+{
+ JSXMLArrayCursor *next;
+
+ cursor->array = array;
+ cursor->index = 0;
+ next = cursor->next = array->cursors;
+ if (next)
+ next->prevp = &cursor->next;
+ cursor->prevp = &array->cursors;
+ array->cursors = cursor;
+ cursor->root = NULL;
+}
+
+static void
+XMLArrayCursorFinish(JSXMLArrayCursor *cursor)
+{
+ JSXMLArrayCursor *next;
+
+ if (!cursor->array)
+ return;
+ next = cursor->next;
+ if (next)
+ next->prevp = cursor->prevp;
+ *cursor->prevp = next;
+ cursor->array = NULL;
+}
+
+static void *
+XMLArrayCursorNext(JSXMLArrayCursor *cursor)
+{
+ JSXMLArray *array;
+
+ array = cursor->array;
+ if (!array || cursor->index >= array->length)
+ return NULL;
+ return cursor->root = array->vector[cursor->index++];
+}
+
+static void *
+XMLArrayCursorItem(JSXMLArrayCursor *cursor)
+{
+ JSXMLArray *array;
+
+ array = cursor->array;
+ if (!array || cursor->index >= array->length)
+ return NULL;
+ return cursor->root = array->vector[cursor->index];
+}
+
+static void
+XMLArrayCursorMark(JSContext *cx, JSXMLArrayCursor *cursor)
+{
+ while (cursor) {
+ GC_MARK(cx, cursor->root, "cursor->root");
+ cursor = cursor->next;
+ }
+}
+
+/* NB: called with null cx from the GC, via xml_mark => XMLArrayTrim. */
+static JSBool
+XMLArraySetCapacity(JSContext *cx, JSXMLArray *array, uint32 capacity)
+{
+ void **vector;
+
+ if (capacity == 0) {
+ /* We could let realloc(p, 0) free this, but purify gets confused. */
+ if (array->vector)
+ free(array->vector);
+ vector = NULL;
+ } else {
+ if ((size_t)capacity > ~(size_t)0 / sizeof(void *) ||
+ !(vector = (void **)
+ realloc(array->vector, capacity * sizeof(void *)))) {
+ if (cx)
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ }
+ array->capacity = JSXML_PRESET_CAPACITY | capacity;
+ array->vector = vector;
+ return JS_TRUE;
+}
+
+static void
+XMLArrayTrim(JSXMLArray *array)
+{
+ if (array->capacity & JSXML_PRESET_CAPACITY)
+ return;
+ if (array->length < array->capacity)
+ XMLArraySetCapacity(NULL, array, array->length);
+}
+
+static JSBool
+XMLArrayInit(JSContext *cx, JSXMLArray *array, uint32 capacity)
+{
+ array->length = array->capacity = 0;
+ array->vector = NULL;
+ array->cursors = NULL;
+ return capacity == 0 || XMLArraySetCapacity(cx, array, capacity);
+}
+
+static void
+XMLArrayFinish(JSContext *cx, JSXMLArray *array)
+{
+ JSXMLArrayCursor *cursor;
+
+ JS_free(cx, array->vector);
+
+ while ((cursor = array->cursors) != NULL)
+ XMLArrayCursorFinish(cursor);
+
+#ifdef DEBUG
+ memset(array, 0xd5, sizeof *array);
+#endif
+}
+
+#define XML_NOT_FOUND ((uint32) -1)
+
+static uint32
+XMLArrayFindMember(const JSXMLArray *array, void *elt, JSIdentityOp identity)
+{
+ void **vector;
+ uint32 i, n;
+
+ /* The identity op must not reallocate array->vector. */
+ vector = array->vector;
+ if (identity) {
+ for (i = 0, n = array->length; i < n; i++) {
+ if (identity(vector[i], elt))
+ return i;
+ }
+ } else {
+ for (i = 0, n = array->length; i < n; i++) {
+ if (vector[i] == elt)
+ return i;
+ }
+ }
+ return XML_NOT_FOUND;
+}
+
+/*
+ * Grow array vector capacity by powers of two to LINEAR_THRESHOLD, and after
+ * that, grow by LINEAR_INCREMENT. Both must be powers of two, and threshold
+ * should be greater than increment.
+ */
+#define LINEAR_THRESHOLD 256
+#define LINEAR_INCREMENT 32
+
+static JSBool
+XMLArrayAddMember(JSContext *cx, JSXMLArray *array, uint32 index, void *elt)
+{
+ uint32 capacity, i;
+ int log2;
+ void **vector;
+
+ if (index >= array->length) {
+ if (index >= JSXML_CAPACITY(array)) {
+ /* Arrange to clear JSXML_PRESET_CAPACITY from array->capacity. */
+ capacity = index + 1;
+ if (index >= LINEAR_THRESHOLD) {
+ capacity = JS_ROUNDUP(capacity, LINEAR_INCREMENT);
+ } else {
+ JS_CEILING_LOG2(log2, capacity);
+ capacity = JS_BIT(log2);
+ }
+ if ((size_t)capacity > ~(size_t)0 / sizeof(void *) ||
+ !(vector = (void **)
+ realloc(array->vector, capacity * sizeof(void *)))) {
+ JS_ReportOutOfMemory(cx);
+ return JS_FALSE;
+ }
+ array->capacity = capacity;
+ array->vector = vector;
+ for (i = array->length; i < index; i++)
+ vector[i] = NULL;
+ }
+ array->length = index + 1;
+ }
+
+ array->vector[index] = elt;
+ return JS_TRUE;
+}
+
+static JSBool
+XMLArrayInsert(JSContext *cx, JSXMLArray *array, uint32 i, uint32 n)
+{
+ uint32 j;
+ JSXMLArrayCursor *cursor;
+
+ j = array->length;
+ JS_ASSERT(i <= j);
+ if (!XMLArraySetCapacity(cx, array, j + n))
+ return JS_FALSE;
+
+ array->length = j + n;
+ JS_ASSERT(n != (uint32)-1);
+ while (j != i) {
+ --j;
+ array->vector[j + n] = array->vector[j];
+ }
+
+ for (cursor = array->cursors; cursor; cursor = cursor->next) {
+ if (cursor->index > i)
+ cursor->index += n;
+ }
+ return JS_TRUE;
+}
+
+static void *
+XMLArrayDelete(JSContext *cx, JSXMLArray *array, uint32 index, JSBool compress)
+{
+ uint32 length;
+ void **vector, *elt;
+ JSXMLArrayCursor *cursor;
+
+ length = array->length;
+ if (index >= length)
+ return NULL;
+
+ vector = array->vector;
+ elt = vector[index];
+ if (compress) {
+ while (++index < length)
+ vector[index-1] = vector[index];
+ array->length = length - 1;
+ array->capacity = JSXML_CAPACITY(array);
+ } else {
+ vector[index] = NULL;
+ }
+
+ for (cursor = array->cursors; cursor; cursor = cursor->next) {
+ if (cursor->index > index)
+ --cursor->index;
+ }
+ return elt;
+}
+
+static void
+XMLArrayTruncate(JSContext *cx, JSXMLArray *array, uint32 length)
+{
+ void **vector;
+
+ JS_ASSERT(!array->cursors);
+ if (length >= array->length)
+ return;
+
+ if (length == 0) {
+ if (array->vector)
+ free(array->vector);
+ vector = NULL;
+ } else {
+ vector = realloc(array->vector, length * sizeof(void *));
+ if (!vector)
+ return;
+ }
+
+ if (array->length > length)
+ array->length = length;
+ array->capacity = length;
+ array->vector = vector;
+}
+
+#define XMLARRAY_FIND_MEMBER(a,e,f) XMLArrayFindMember(a, (void *)(e), f)
+#define XMLARRAY_HAS_MEMBER(a,e,f) (XMLArrayFindMember(a, (void *)(e), f) != \
+ XML_NOT_FOUND)
+#define XMLARRAY_MEMBER(a,i,t) (((i) < (a)->length) \
+ ? (t *) (a)->vector[i] \
+ : NULL)
+#define XMLARRAY_SET_MEMBER(a,i,e) JS_BEGIN_MACRO \
+ if ((a)->length <= (i)) \
+ (a)->length = (i) + 1; \
+ ((a)->vector[i] = (void *)(e)); \
+ JS_END_MACRO
+#define XMLARRAY_ADD_MEMBER(x,a,i,e)XMLArrayAddMember(x, a, i, (void *)(e))
+#define XMLARRAY_INSERT(x,a,i,n) XMLArrayInsert(x, a, i, n)
+#define XMLARRAY_APPEND(x,a,e) XMLARRAY_ADD_MEMBER(x, a, (a)->length, (e))
+#define XMLARRAY_DELETE(x,a,i,c,t) ((t *) XMLArrayDelete(x, a, i, c))
+#define XMLARRAY_TRUNCATE(x,a,n) XMLArrayTruncate(x, a, n)
+
+/*
+ * Define XML setting property strings and constants early, so everyone can
+ * use the same names and their magic numbers (tinyids, flags).
+ */
+static const char js_ignoreComments_str[] = "ignoreComments";
+static const char js_ignoreProcessingInstructions_str[]
+ = "ignoreProcessingInstructions";
+static const char js_ignoreWhitespace_str[] = "ignoreWhitespace";
+static const char js_prettyPrinting_str[] = "prettyPrinting";
+static const char js_prettyIndent_str[] = "prettyIndent";
+
+/*
+ * NB: These XML static property tinyids must
+ * (a) not collide with the generic negative tinyids at the top of jsfun.c;
+ * (b) index their corresponding xml_static_props array elements.
+ * Don't change 'em!
+ */
+enum xml_static_tinyid {
+ XML_IGNORE_COMMENTS,
+ XML_IGNORE_PROCESSING_INSTRUCTIONS,
+ XML_IGNORE_WHITESPACE,
+ XML_PRETTY_PRINTING,
+ XML_PRETTY_INDENT
+};
+
+static JSBool
+xml_setting_getter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ return JS_TRUE;
+}
+
+static JSBool
+xml_setting_setter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSBool b;
+ uint8 flag;
+
+ JS_ASSERT(JSVAL_IS_INT(id));
+ if (!js_ValueToBoolean(cx, *vp, &b))
+ return JS_FALSE;
+
+ flag = JS_BIT(JSVAL_TO_INT(id));
+ if (b)
+ cx->xmlSettingFlags |= flag;
+ else
+ cx->xmlSettingFlags &= ~flag;
+ return JS_TRUE;
+}
+
+static JSPropertySpec xml_static_props[] = {
+ {js_ignoreComments_str, XML_IGNORE_COMMENTS, JSPROP_PERMANENT,
+ xml_setting_getter, xml_setting_setter},
+ {js_ignoreProcessingInstructions_str,
+ XML_IGNORE_PROCESSING_INSTRUCTIONS, JSPROP_PERMANENT,
+ xml_setting_getter, xml_setting_setter},
+ {js_ignoreWhitespace_str, XML_IGNORE_WHITESPACE, JSPROP_PERMANENT,
+ xml_setting_getter, xml_setting_setter},
+ {js_prettyPrinting_str, XML_PRETTY_PRINTING, JSPROP_PERMANENT,
+ xml_setting_getter, xml_setting_setter},
+ {js_prettyIndent_str, XML_PRETTY_INDENT, JSPROP_PERMANENT,
+ xml_setting_getter, NULL},
+ {0,0,0,0,0}
+};
+
+/* Derive cx->xmlSettingFlags bits from xml_static_props tinyids. */
+#define XSF_IGNORE_COMMENTS JS_BIT(XML_IGNORE_COMMENTS)
+#define XSF_IGNORE_PROCESSING_INSTRUCTIONS \
+ JS_BIT(XML_IGNORE_PROCESSING_INSTRUCTIONS)
+#define XSF_IGNORE_WHITESPACE JS_BIT(XML_IGNORE_WHITESPACE)
+#define XSF_PRETTY_PRINTING JS_BIT(XML_PRETTY_PRINTING)
+#define XSF_CACHE_VALID JS_BIT(XML_PRETTY_INDENT)
+
+/*
+ * Extra, unrelated but necessarily disjoint flag used by ParseNodeToXML.
+ * This flag means a couple of things:
+ *
+ * - The top JSXML created for a parse tree must have an object owning it.
+ *
+ * - That the default namespace normally inherited from the temporary
+ * <parent xmlns='...'> tag that wraps a runtime-concatenated XML source
+ * string must, in the case of a precompiled XML object tree, inherit via
+ * ad-hoc code in ParseNodeToXML.
+ *
+ * Because of the second purpose, we name this flag XSF_PRECOMPILED_ROOT.
+ */
+#define XSF_PRECOMPILED_ROOT (XSF_CACHE_VALID << 1)
+
+/* Macros for special-casing xml:, xmlns= and xmlns:foo= in ParseNodeToQName. */
+#define IS_XML(str) \
+ (JSSTRING_LENGTH(str) == 3 && IS_XML_CHARS(JSSTRING_CHARS(str)))
+
+#define IS_XMLNS(str) \
+ (JSSTRING_LENGTH(str) == 5 && IS_XMLNS_CHARS(JSSTRING_CHARS(str)))
+
+#define IS_XML_CHARS(chars) \
+ (JS_TOLOWER((chars)[0]) == 'x' && \
+ JS_TOLOWER((chars)[1]) == 'm' && \
+ JS_TOLOWER((chars)[2]) == 'l')
+
+#define HAS_NS_AFTER_XML(chars) \
+ (JS_TOLOWER((chars)[3]) == 'n' && \
+ JS_TOLOWER((chars)[4]) == 's')
+
+#define IS_XMLNS_CHARS(chars) \
+ (IS_XML_CHARS(chars) && HAS_NS_AFTER_XML(chars))
+
+#define STARTS_WITH_XML(chars,length) \
+ (length >= 3 && IS_XML_CHARS(chars))
+
+static const char xml_namespace_str[] = "http://www.w3.org/XML/1998/namespace";
+static const char xmlns_namespace_str[] = "http://www.w3.org/2000/xmlns/";
+
+static JSXMLQName *
+ParseNodeToQName(JSContext *cx, JSParseNode *pn, JSXMLArray *inScopeNSes,
+ JSBool isAttributeName)
+{
+ JSString *str, *uri, *prefix, *localName;
+ size_t length, offset;
+ const jschar *start, *limit, *colon;
+ uint32 n;
+ JSXMLNamespace *ns;
+
+ JS_ASSERT(pn->pn_arity == PN_NULLARY);
+ str = ATOM_TO_STRING(pn->pn_atom);
+ length = JSSTRING_LENGTH(str);
+ start = JSSTRING_CHARS(str);
+ JS_ASSERT(length != 0 && *start != '@');
+ JS_ASSERT(length != 1 || *start != '*');
+
+ uri = cx->runtime->emptyString;
+ limit = start + length;
+ colon = js_strchr_limit(start, ':', limit);
+ if (colon) {
+ offset = PTRDIFF(colon, start, jschar);
+ prefix = js_NewDependentString(cx, str, 0, offset, 0);
+ if (!prefix)
+ return NULL;
+
+ if (STARTS_WITH_XML(start, offset)) {
+ if (offset == 3) {
+ uri = JS_InternString(cx, xml_namespace_str);
+ if (!uri)
+ return NULL;
+ } else if (offset == 5 && HAS_NS_AFTER_XML(start)) {
+ uri = JS_InternString(cx, xmlns_namespace_str);
+ if (!uri)
+ return NULL;
+ } else {
+ uri = NULL;
+ }
+ } else {
+ uri = NULL;
+ n = inScopeNSes->length;
+ while (n != 0) {
+ --n;
+ ns = XMLARRAY_MEMBER(inScopeNSes, n, JSXMLNamespace);
+ if (ns->prefix && js_EqualStrings(ns->prefix, prefix)) {
+ uri = ns->uri;
+ break;
+ }
+ }
+ }
+
+ if (!uri) {
+ js_ReportCompileErrorNumber(cx, pn,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_BAD_XML_NAMESPACE,
+ js_ValueToPrintableString(cx,
+ STRING_TO_JSVAL(prefix)));
+ return NULL;
+ }
+
+ localName = js_NewStringCopyN(cx, colon + 1, length - (offset + 1), 0);
+ if (!localName)
+ return NULL;
+ } else {
+ if (isAttributeName) {
+ /*
+ * An unprefixed attribute is not in any namespace, so set prefix
+ * as well as uri to the empty string.
+ */
+ prefix = uri;
+ } else {
+ /*
+ * Loop from back to front looking for the closest declared default
+ * namespace.
+ */
+ n = inScopeNSes->length;
+ while (n != 0) {
+ --n;
+ ns = XMLARRAY_MEMBER(inScopeNSes, n, JSXMLNamespace);
+ if (!ns->prefix || IS_EMPTY(ns->prefix)) {
+ uri = ns->uri;
+ break;
+ }
+ }
+ prefix = IS_EMPTY(uri) ? cx->runtime->emptyString : NULL;
+ }
+ localName = str;
+ }
+
+ return js_NewXMLQName(cx, uri, prefix, localName);
+}
+
+static JSString *
+ChompXMLWhitespace(JSContext *cx, JSString *str)
+{
+ size_t length, newlength, offset;
+ const jschar *cp, *start, *end;
+ jschar c;
+
+ length = JSSTRING_LENGTH(str);
+ for (cp = start = JSSTRING_CHARS(str), end = cp + length; cp < end; cp++) {
+ c = *cp;
+ if (!JS_ISXMLSPACE(c))
+ break;
+ }
+ while (end > cp) {
+ c = end[-1];
+ if (!JS_ISXMLSPACE(c))
+ break;
+ --end;
+ }
+ newlength = PTRDIFF(end, cp, jschar);
+ if (newlength == length)
+ return str;
+ offset = PTRDIFF(cp, start, jschar);
+ return js_NewDependentString(cx, str, offset, newlength, 0);
+}
+
+static JSXML *
+ParseNodeToXML(JSContext *cx, JSParseNode *pn, JSXMLArray *inScopeNSes,
+ uintN flags)
+{
+ JSXML *xml, *kid, *attr, *attrj;
+ JSString *str;
+ uint32 length, n, i, j;
+ JSParseNode *pn2, *pn3, *head, **pnp;
+ JSXMLNamespace *ns;
+ JSXMLQName *qn, *attrjqn;
+ JSXMLClass xml_class;
+ int stackDummy;
+
+ if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) {
+ js_ReportCompileErrorNumber(cx, pn, JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_OVER_RECURSED);
+ return NULL;
+ }
+
+#define PN2X_SKIP_CHILD ((JSXML *) 1)
+
+ /*
+ * Cases return early to avoid common code that gets an outermost xml's
+ * object, which protects GC-things owned by xml and its descendants from
+ * garbage collection.
+ */
+ xml = NULL;
+ if (!js_EnterLocalRootScope(cx))
+ return NULL;
+ switch (pn->pn_type) {
+ case TOK_XMLELEM:
+ length = inScopeNSes->length;
+ pn2 = pn->pn_head;
+ xml = ParseNodeToXML(cx, pn2, inScopeNSes, flags);
+ if (!xml)
+ goto fail;
+
+ flags &= ~XSF_PRECOMPILED_ROOT;
+ n = pn->pn_count;
+ JS_ASSERT(n >= 2);
+ n -= 2;
+ if (!XMLArraySetCapacity(cx, &xml->xml_kids, n))
+ goto fail;
+
+ i = 0;
+ while ((pn2 = pn2->pn_next) != NULL) {
+ if (!pn2->pn_next) {
+ /* Don't append the end tag! */
+ JS_ASSERT(pn2->pn_type == TOK_XMLETAGO);
+ break;
+ }
+
+ if ((flags & XSF_IGNORE_WHITESPACE) &&
+ n > 1 && pn2->pn_type == TOK_XMLSPACE) {
+ --n;
+ continue;
+ }
+
+ kid = ParseNodeToXML(cx, pn2, inScopeNSes, flags);
+ if (kid == PN2X_SKIP_CHILD) {
+ --n;
+ continue;
+ }
+
+ if (!kid)
+ goto fail;
+
+ /* Store kid in xml right away, to protect it from GC. */
+ XMLARRAY_SET_MEMBER(&xml->xml_kids, i, kid);
+ kid->parent = xml;
+ ++i;
+
+ /* XXX where is this documented in an XML spec, or in E4X? */
+ if ((flags & XSF_IGNORE_WHITESPACE) &&
+ n > 1 && kid->xml_class == JSXML_CLASS_TEXT) {
+ str = ChompXMLWhitespace(cx, kid->xml_value);
+ if (!str)
+ goto fail;
+ kid->xml_value = str;
+ }
+ }
+
+ JS_ASSERT(i == n);
+ if (n < pn->pn_count - 2)
+ XMLArrayTrim(&xml->xml_kids);
+ XMLARRAY_TRUNCATE(cx, inScopeNSes, length);
+ break;
+
+ case TOK_XMLLIST:
+ xml = js_NewXML(cx, JSXML_CLASS_LIST);
+ if (!xml)
+ goto fail;
+
+ n = pn->pn_count;
+ if (!XMLArraySetCapacity(cx, &xml->xml_kids, n))
+ goto fail;
+
+ i = 0;
+ for (pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
+ /*
+ * Always ignore insignificant whitespace in lists -- we shouldn't
+ * condition this on an XML.ignoreWhitespace setting when the list
+ * constructor is XMLList (note XML/XMLList unification hazard).
+ */
+ if (pn2->pn_type == TOK_XMLSPACE) {
+ --n;
+ continue;
+ }
+
+ kid = ParseNodeToXML(cx, pn2, inScopeNSes, flags);
+ if (kid == PN2X_SKIP_CHILD) {
+ --n;
+ continue;
+ }
+
+ if (!kid)
+ goto fail;
+
+ XMLARRAY_SET_MEMBER(&xml->xml_kids, i, kid);
+ ++i;
+ }
+
+ if (n < pn->pn_count)
+ XMLArrayTrim(&xml->xml_kids);
+ break;
+
+ case TOK_XMLSTAGO:
+ case TOK_XMLPTAGC:
+ length = inScopeNSes->length;
+ pn2 = pn->pn_head;
+ JS_ASSERT(pn2->pn_type == TOK_XMLNAME);
+ if (pn2->pn_arity == PN_LIST)
+ goto syntax;
+
+ xml = js_NewXML(cx, JSXML_CLASS_ELEMENT);
+ if (!xml)
+ goto fail;
+
+ /* First pass: check syntax and process namespace declarations. */
+ JS_ASSERT(pn->pn_count >= 1);
+ n = pn->pn_count - 1;
+ pnp = &pn2->pn_next;
+ head = *pnp;
+ while ((pn2 = *pnp) != NULL) {
+ size_t length;
+ const jschar *chars;
+
+ if (pn2->pn_type != TOK_XMLNAME || pn2->pn_arity != PN_NULLARY)
+ goto syntax;
+
+ /* Enforce "Well-formedness constraint: Unique Att Spec". */
+ for (pn3 = head; pn3 != pn2; pn3 = pn3->pn_next->pn_next) {
+ if (pn3->pn_atom == pn2->pn_atom) {
+ js_ReportCompileErrorNumber(cx, pn2,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_DUPLICATE_XML_ATTR,
+ js_ValueToPrintableString(cx,
+ ATOM_KEY(pn2->pn_atom)));
+ goto fail;
+ }
+ }
+
+ str = ATOM_TO_STRING(pn2->pn_atom);
+ pn2 = pn2->pn_next;
+ JS_ASSERT(pn2);
+ if (pn2->pn_type != TOK_XMLATTR)
+ goto syntax;
+
+ length = JSSTRING_LENGTH(str);
+ chars = JSSTRING_CHARS(str);
+ if (length >= 5 &&
+ IS_XMLNS_CHARS(chars) &&
+ (length == 5 || chars[5] == ':')) {
+ JSString *uri, *prefix;
+
+ uri = ATOM_TO_STRING(pn2->pn_atom);
+ if (length == 5) {
+ /* 10.3.2.1. Step 6(h)(i)(1)(a). */
+ prefix = cx->runtime->emptyString;
+ } else {
+ prefix = js_NewStringCopyN(cx, chars + 6, length - 6, 0);
+ if (!prefix)
+ goto fail;
+ }
+
+ /*
+ * Once the new ns is appended to xml->xml_namespaces, it is
+ * protected from GC by the object that owns xml -- which is
+ * either xml->object if outermost, or the object owning xml's
+ * oldest ancestor if !outermost.
+ */
+ ns = js_NewXMLNamespace(cx, prefix, uri, JS_TRUE);
+ if (!ns)
+ goto fail;
+
+ /*
+ * Don't add a namespace that's already in scope. If someone
+ * extracts a child property from its parent via [[Get]], then
+ * we enforce the invariant, noted many times in ECMA-357, that
+ * the child's namespaces form a possibly-improper superset of
+ * its ancestors' namespaces.
+ */
+ if (!XMLARRAY_HAS_MEMBER(inScopeNSes, ns, namespace_identity)) {
+ if (!XMLARRAY_APPEND(cx, inScopeNSes, ns) ||
+ !XMLARRAY_APPEND(cx, &xml->xml_namespaces, ns)) {
+ goto fail;
+ }
+ }
+
+ JS_ASSERT(n >= 2);
+ n -= 2;
+ *pnp = pn2->pn_next;
+ /* XXXbe recycle pn2 */
+ continue;
+ }
+
+ pnp = &pn2->pn_next;
+ }
+
+ /*
+ * If called from js_ParseNodeToXMLObject, emulate the effect of the
+ * <parent xmlns='%s'>...</parent> wrapping done by "ToXML Applied to
+ * the String Type" (ECMA-357 10.3.1).
+ */
+ if (flags & XSF_PRECOMPILED_ROOT) {
+ JS_ASSERT(length >= 1);
+ ns = XMLARRAY_MEMBER(inScopeNSes, 0, JSXMLNamespace);
+ JS_ASSERT(!XMLARRAY_HAS_MEMBER(&xml->xml_namespaces, ns,
+ namespace_identity));
+ ns = js_NewXMLNamespace(cx, ns->prefix, ns->uri, JS_FALSE);
+ if (!ns)
+ goto fail;
+ if (!XMLARRAY_APPEND(cx, &xml->xml_namespaces, ns))
+ goto fail;
+ }
+ XMLArrayTrim(&xml->xml_namespaces);
+
+ /* Second pass: process tag name and attributes, using namespaces. */
+ pn2 = pn->pn_head;
+ qn = ParseNodeToQName(cx, pn2, inScopeNSes, JS_FALSE);
+ if (!qn)
+ goto fail;
+ xml->name = qn;
+
+ JS_ASSERT((n & 1) == 0);
+ n >>= 1;
+ if (!XMLArraySetCapacity(cx, &xml->xml_attrs, n))
+ goto fail;
+
+ for (i = 0; (pn2 = pn2->pn_next) != NULL; i++) {
+ qn = ParseNodeToQName(cx, pn2, inScopeNSes, JS_TRUE);
+ if (!qn) {
+ xml->xml_attrs.length = i;
+ goto fail;
+ }
+
+ /*
+ * Enforce "Well-formedness constraint: Unique Att Spec", part 2:
+ * this time checking local name and namespace URI.
+ */
+ for (j = 0; j < i; j++) {
+ attrj = XMLARRAY_MEMBER(&xml->xml_attrs, j, JSXML);
+ attrjqn = attrj->name;
+ if (js_EqualStrings(attrjqn->uri, qn->uri) &&
+ js_EqualStrings(attrjqn->localName, qn->localName)) {
+ js_ReportCompileErrorNumber(cx, pn2,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_DUPLICATE_XML_ATTR,
+ js_ValueToPrintableString(cx,
+ ATOM_KEY(pn2->pn_atom)));
+ goto fail;
+ }
+ }
+
+ pn2 = pn2->pn_next;
+ JS_ASSERT(pn2);
+ JS_ASSERT(pn2->pn_type == TOK_XMLATTR);
+
+ attr = js_NewXML(cx, JSXML_CLASS_ATTRIBUTE);
+ if (!attr)
+ goto fail;
+
+ XMLARRAY_SET_MEMBER(&xml->xml_attrs, i, attr);
+ attr->parent = xml;
+ attr->name = qn;
+ attr->xml_value = ATOM_TO_STRING(pn2->pn_atom);
+ }
+
+ /* Point tag closes its own namespace scope. */
+ if (pn->pn_type == TOK_XMLPTAGC)
+ XMLARRAY_TRUNCATE(cx, inScopeNSes, length);
+ break;
+
+ case TOK_XMLSPACE:
+ case TOK_XMLTEXT:
+ case TOK_XMLCDATA:
+ case TOK_XMLCOMMENT:
+ case TOK_XMLPI:
+ str = ATOM_TO_STRING(pn->pn_atom);
+ qn = NULL;
+ if (pn->pn_type == TOK_XMLCOMMENT) {
+ if (flags & XSF_IGNORE_COMMENTS)
+ goto skip_child;
+ xml_class = JSXML_CLASS_COMMENT;
+ } else if (pn->pn_type == TOK_XMLPI) {
+ if (IS_XML(str)) {
+ js_ReportCompileErrorNumber(cx, pn,
+ JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_RESERVED_ID,
+ js_ValueToPrintableString(cx,
+ STRING_TO_JSVAL(str)));
+ goto fail;
+ }
+
+ if (flags & XSF_IGNORE_PROCESSING_INSTRUCTIONS)
+ goto skip_child;
+
+ qn = ParseNodeToQName(cx, pn, inScopeNSes, JS_FALSE);
+ if (!qn)
+ goto fail;
+
+ str = pn->pn_atom2
+ ? ATOM_TO_STRING(pn->pn_atom2)
+ : cx->runtime->emptyString;
+ xml_class = JSXML_CLASS_PROCESSING_INSTRUCTION;
+ } else {
+ /* CDATA section content, or element text. */
+ xml_class = JSXML_CLASS_TEXT;
+ }
+
+ xml = js_NewXML(cx, xml_class);
+ if (!xml)
+ goto fail;
+ xml->name = qn;
+ if (pn->pn_type == TOK_XMLSPACE)
+ xml->xml_flags |= XMLF_WHITESPACE_TEXT;
+ xml->xml_value = str;
+ break;
+
+ default:
+ goto syntax;
+ }
+
+ js_LeaveLocalRootScopeWithResult(cx, (jsval) xml);
+ if ((flags & XSF_PRECOMPILED_ROOT) && !js_GetXMLObject(cx, xml))
+ return NULL;
+ return xml;
+
+skip_child:
+ js_LeaveLocalRootScope(cx);
+ return PN2X_SKIP_CHILD;
+
+#undef PN2X_SKIP_CHILD
+
+syntax:
+ js_ReportCompileErrorNumber(cx, pn, JSREPORT_PN | JSREPORT_ERROR,
+ JSMSG_BAD_XML_MARKUP);
+fail:
+ js_LeaveLocalRootScope(cx);
+ return NULL;
+}
+
+/*
+ * XML helper, object-ops, and library functions. We start with the helpers,
+ * in ECMA-357 order, but merging XML (9.1) and XMLList (9.2) helpers.
+ */
+static JSBool
+GetXMLSetting(JSContext *cx, const char *name, jsval *vp)
+{
+ jsval v;
+
+ if (!js_FindClassObject(cx, NULL, INT_TO_JSID(JSProto_XML), &v))
+ return JS_FALSE;
+ if (!VALUE_IS_FUNCTION(cx, v)) {
+ *vp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+ return JS_GetProperty(cx, JSVAL_TO_OBJECT(v), name, vp);
+}
+
+static JSBool
+FillSettingsCache(JSContext *cx)
+{
+ int i;
+ const char *name;
+ jsval v;
+ JSBool isSet;
+
+ /* Note: XML_PRETTY_INDENT is not a boolean setting. */
+ for (i = XML_IGNORE_COMMENTS; i < XML_PRETTY_INDENT; i++) {
+ name = xml_static_props[i].name;
+ if (!GetXMLSetting(cx, name, &v) || !js_ValueToBoolean(cx, v, &isSet))
+ return JS_FALSE;
+ if (isSet)
+ cx->xmlSettingFlags |= JS_BIT(i);
+ else
+ cx->xmlSettingFlags &= ~JS_BIT(i);
+ }
+
+ cx->xmlSettingFlags |= XSF_CACHE_VALID;
+ return JS_TRUE;
+}
+
+static JSBool
+GetBooleanXMLSetting(JSContext *cx, const char *name, JSBool *bp)
+{
+ int i;
+
+ if (!(cx->xmlSettingFlags & XSF_CACHE_VALID) && !FillSettingsCache(cx))
+ return JS_FALSE;
+
+ for (i = 0; xml_static_props[i].name; i++) {
+ if (!strcmp(xml_static_props[i].name, name)) {
+ *bp = (cx->xmlSettingFlags & JS_BIT(i)) != 0;
+ return JS_TRUE;
+ }
+ }
+ *bp = JS_FALSE;
+ return JS_TRUE;
+}
+
+static JSBool
+GetUint32XMLSetting(JSContext *cx, const char *name, uint32 *uip)
+{
+ jsval v;
+
+ return GetXMLSetting(cx, name, &v) && js_ValueToECMAUint32(cx, v, uip);
+}
+
+static JSBool
+GetXMLSettingFlags(JSContext *cx, uintN *flagsp)
+{
+ JSBool flag;
+
+ /* Just get the first flag to validate the setting flags cache. */
+ if (!GetBooleanXMLSetting(cx, js_ignoreComments_str, &flag))
+ return JS_FALSE;
+ *flagsp = cx->xmlSettingFlags;
+ return JS_TRUE;
+}
+
+static JSXML *
+ParseXMLSource(JSContext *cx, JSString *src)
+{
+ jsval nsval;
+ JSXMLNamespace *ns;
+ size_t urilen, srclen, length, offset, dstlen;
+ jschar *chars;
+ const jschar *srcp, *endp;
+ void *mark;
+ JSTokenStream *ts;
+ uintN lineno;
+ JSStackFrame *fp;
+ JSOp op;
+ JSParseNode *pn;
+ JSXML *xml;
+ JSXMLArray nsarray;
+ uintN flags;
+
+ static const char prefix[] = "<parent xmlns='";
+ static const char middle[] = "'>";
+ static const char suffix[] = "</parent>";
+
+#define constrlen(constr) (sizeof(constr) - 1)
+
+ if (!js_GetDefaultXMLNamespace(cx, &nsval))
+ return NULL;
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(nsval));
+
+ urilen = JSSTRING_LENGTH(ns->uri);
+ srclen = JSSTRING_LENGTH(src);
+ length = constrlen(prefix) + urilen + constrlen(middle) + srclen +
+ constrlen(suffix);
+
+ chars = (jschar *) JS_malloc(cx, (length + 1) * sizeof(jschar));
+ if (!chars)
+ return NULL;
+
+ dstlen = length;
+ js_InflateStringToBuffer(cx, prefix, constrlen(prefix), chars, &dstlen);
+ offset = dstlen;
+ js_strncpy(chars + offset, JSSTRING_CHARS(ns->uri), urilen);
+ offset += urilen;
+ dstlen = length - offset + 1;
+ js_InflateStringToBuffer(cx, middle, constrlen(middle), chars + offset,
+ &dstlen);
+ offset += dstlen;
+ srcp = JSSTRING_CHARS(src);
+ js_strncpy(chars + offset, srcp, srclen);
+ offset += srclen;
+ dstlen = length - offset + 1;
+ js_InflateStringToBuffer(cx, suffix, constrlen(suffix), chars + offset,
+ &dstlen);
+ chars [offset + dstlen] = 0;
+
+ mark = JS_ARENA_MARK(&cx->tempPool);
+ ts = js_NewBufferTokenStream(cx, chars, length);
+ if (!ts)
+ return NULL;
+ for (fp = cx->fp; fp && !fp->pc; fp = fp->down)
+ continue;
+ if (fp) {
+ op = (JSOp) *fp->pc;
+ if (op == JSOP_TOXML || op == JSOP_TOXMLLIST) {
+ ts->filename = fp->script->filename;
+ lineno = js_PCToLineNumber(cx, fp->script, fp->pc);
+ for (endp = srcp + srclen; srcp < endp; srcp++)
+ if (*srcp == '\n')
+ --lineno;
+ ts->lineno = lineno;
+ }
+ }
+
+ JS_KEEP_ATOMS(cx->runtime);
+ pn = js_ParseXMLTokenStream(cx, cx->fp->scopeChain, ts, JS_FALSE);
+ xml = NULL;
+ if (pn && XMLArrayInit(cx, &nsarray, 1)) {
+ if (GetXMLSettingFlags(cx, &flags))
+ xml = ParseNodeToXML(cx, pn, &nsarray, flags);
+
+ XMLArrayFinish(cx, &nsarray);
+ }
+ JS_UNKEEP_ATOMS(cx->runtime);
+
+ JS_ARENA_RELEASE(&cx->tempPool, mark);
+ JS_free(cx, chars);
+ return xml;
+
+#undef constrlen
+}
+
+/*
+ * Errata in 10.3.1, 10.4.1, and 13.4.4.24 (at least).
+ *
+ * 10.3.1 Step 6(a) fails to NOTE that implementations that do not enforce
+ * the constraint:
+ *
+ * for all x belonging to XML:
+ * x.[[InScopeNamespaces]] >= x.[[Parent]].[[InScopeNamespaces]]
+ *
+ * must union x.[[InScopeNamespaces]] into x[0].[[InScopeNamespaces]] here
+ * (in new sub-step 6(a), renumbering the others to (b) and (c)).
+ *
+ * Same goes for 10.4.1 Step 7(a).
+ *
+ * In order for XML.prototype.namespaceDeclarations() to work correctly, the
+ * default namespace thereby unioned into x[0].[[InScopeNamespaces]] must be
+ * flagged as not declared, so that 13.4.4.24 Step 8(a) can exclude all such
+ * undeclared namespaces associated with x not belonging to ancestorNS.
+ */
+static JSXML *
+OrphanXMLChild(JSContext *cx, JSXML *xml, uint32 i)
+{
+ JSXMLNamespace *ns;
+
+ ns = XMLARRAY_MEMBER(&xml->xml_namespaces, 0, JSXMLNamespace);
+ xml = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (!ns || !xml)
+ return xml;
+ if (xml->xml_class == JSXML_CLASS_ELEMENT) {
+ if (!XMLARRAY_APPEND(cx, &xml->xml_namespaces, ns))
+ return NULL;
+ ns->declared = JS_FALSE;
+ }
+ xml->parent = NULL;
+ return xml;
+}
+
+static JSObject *
+ToXML(JSContext *cx, jsval v)
+{
+ JSObject *obj;
+ JSXML *xml;
+ JSClass *clasp;
+ JSString *str;
+ uint32 length;
+
+ if (JSVAL_IS_PRIMITIVE(v)) {
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v))
+ goto bad;
+ } else {
+ obj = JSVAL_TO_OBJECT(v);
+ if (OBJECT_IS_XML(cx, obj)) {
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ if (xml->xml_kids.length != 1)
+ goto bad;
+ xml = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (xml) {
+ JS_ASSERT(xml->xml_class != JSXML_CLASS_LIST);
+ return js_GetXMLObject(cx, xml);
+ }
+ }
+ return obj;
+ }
+
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp->flags & JSCLASS_DOCUMENT_OBSERVER) {
+ JS_ASSERT(0);
+ }
+
+ if (clasp != &js_StringClass &&
+ clasp != &js_NumberClass &&
+ clasp != &js_BooleanClass) {
+ goto bad;
+ }
+ }
+
+ str = js_ValueToString(cx, v);
+ if (!str)
+ return NULL;
+ if (IS_EMPTY(str)) {
+ length = 0;
+#ifdef __GNUC__ /* suppress bogus gcc warnings */
+ xml = NULL;
+#endif
+ } else {
+ xml = ParseXMLSource(cx, str);
+ if (!xml)
+ return NULL;
+ length = JSXML_LENGTH(xml);
+ }
+
+ if (length == 0) {
+ obj = js_NewXMLObject(cx, JSXML_CLASS_TEXT);
+ if (!obj)
+ return NULL;
+ } else if (length == 1) {
+ xml = OrphanXMLChild(cx, xml, 0);
+ if (!xml)
+ return NULL;
+ obj = js_GetXMLObject(cx, xml);
+ if (!obj)
+ return NULL;
+ } else {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_SYNTAX_ERROR);
+ return NULL;
+ }
+ return obj;
+
+bad:
+ str = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK, v, NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XML_CONVERSION,
+ JS_GetStringBytes(str));
+ }
+ return NULL;
+}
+
+static JSBool
+Append(JSContext *cx, JSXML *list, JSXML *kid);
+
+static JSObject *
+ToXMLList(JSContext *cx, jsval v)
+{
+ JSObject *obj, *listobj;
+ JSXML *xml, *list, *kid;
+ JSClass *clasp;
+ JSString *str;
+ uint32 i, length;
+
+ if (JSVAL_IS_PRIMITIVE(v)) {
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v))
+ goto bad;
+ } else {
+ obj = JSVAL_TO_OBJECT(v);
+ if (OBJECT_IS_XML(cx, obj)) {
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (xml->xml_class != JSXML_CLASS_LIST) {
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!listobj)
+ return NULL;
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ if (!Append(cx, list, xml))
+ return NULL;
+ return listobj;
+ }
+ return obj;
+ }
+
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp->flags & JSCLASS_DOCUMENT_OBSERVER) {
+ JS_ASSERT(0);
+ }
+
+ if (clasp != &js_StringClass &&
+ clasp != &js_NumberClass &&
+ clasp != &js_BooleanClass) {
+ goto bad;
+ }
+ }
+
+ str = js_ValueToString(cx, v);
+ if (!str)
+ return NULL;
+ if (IS_EMPTY(str)) {
+ xml = NULL;
+ length = 0;
+ } else {
+ if (!js_EnterLocalRootScope(cx))
+ return NULL;
+ xml = ParseXMLSource(cx, str);
+ if (!xml) {
+ js_LeaveLocalRootScope(cx);
+ return NULL;
+ }
+ length = JSXML_LENGTH(xml);
+ }
+
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (listobj) {
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ for (i = 0; i < length; i++) {
+ kid = OrphanXMLChild(cx, xml, i);
+ if (!kid || !Append(cx, list, kid)) {
+ listobj = NULL;
+ break;
+ }
+ }
+ }
+
+ if (xml)
+ js_LeaveLocalRootScopeWithResult(cx, (jsval) listobj);
+ return listobj;
+
+bad:
+ str = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK, v, NULL);
+ if (str) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XMLLIST_CONVERSION,
+ JS_GetStringBytes(str));
+ }
+ return NULL;
+}
+
+/*
+ * ECMA-357 10.2.1 Steps 5-7 pulled out as common subroutines of XMLToXMLString
+ * and their library-public js_* counterparts. The guts of MakeXMLCDataString,
+ * MakeXMLCommentString, and MakeXMLPIString are further factored into a common
+ * MakeXMLSpecialString subroutine.
+ *
+ * These functions take ownership of sb->base, if sb is non-null, in all cases
+ * of success or failure.
+ */
+static JSString *
+MakeXMLSpecialString(JSContext *cx, JSStringBuffer *sb,
+ JSString *str, JSString *str2,
+ const jschar *prefix, size_t prefixlength,
+ const jschar *suffix, size_t suffixlength)
+{
+ JSStringBuffer localSB;
+ size_t length, length2, newlength;
+ jschar *bp, *base;
+
+ if (!sb) {
+ sb = &localSB;
+ js_InitStringBuffer(sb);
+ }
+
+ length = JSSTRING_LENGTH(str);
+ length2 = str2 ? JSSTRING_LENGTH(str2) : 0;
+ newlength = STRING_BUFFER_OFFSET(sb) +
+ prefixlength + length + ((length2 != 0) ? 1 + length2 : 0) +
+ suffixlength;
+ bp = base = (jschar *)
+ JS_realloc(cx, sb->base, (newlength + 1) * sizeof(jschar));
+ if (!bp) {
+ js_FinishStringBuffer(sb);
+ return NULL;
+ }
+
+ bp += STRING_BUFFER_OFFSET(sb);
+ js_strncpy(bp, prefix, prefixlength);
+ bp += prefixlength;
+ js_strncpy(bp, JSSTRING_CHARS(str), length);
+ bp += length;
+ if (length2 != 0) {
+ *bp++ = (jschar) ' ';
+ js_strncpy(bp, JSSTRING_CHARS(str2), length2);
+ bp += length2;
+ }
+ js_strncpy(bp, suffix, suffixlength);
+ bp[suffixlength] = 0;
+
+ str = js_NewString(cx, base, newlength, 0);
+ if (!str)
+ free(base);
+ return str;
+}
+
+static JSString *
+MakeXMLCDATAString(JSContext *cx, JSStringBuffer *sb, JSString *str)
+{
+ static const jschar cdata_prefix_ucNstr[] = {'<', '!', '[',
+ 'C', 'D', 'A', 'T', 'A',
+ '['};
+ static const jschar cdata_suffix_ucNstr[] = {']', ']', '>'};
+
+ return MakeXMLSpecialString(cx, sb, str, NULL,
+ cdata_prefix_ucNstr, 9,
+ cdata_suffix_ucNstr, 3);
+}
+
+static JSString *
+MakeXMLCommentString(JSContext *cx, JSStringBuffer *sb, JSString *str)
+{
+ static const jschar comment_prefix_ucNstr[] = {'<', '!', '-', '-'};
+ static const jschar comment_suffix_ucNstr[] = {'-', '-', '>'};
+
+ return MakeXMLSpecialString(cx, sb, str, NULL,
+ comment_prefix_ucNstr, 4,
+ comment_suffix_ucNstr, 3);
+}
+
+static JSString *
+MakeXMLPIString(JSContext *cx, JSStringBuffer *sb, JSString *name,
+ JSString *value)
+{
+ static const jschar pi_prefix_ucNstr[] = {'<', '?'};
+ static const jschar pi_suffix_ucNstr[] = {'?', '>'};
+
+ return MakeXMLSpecialString(cx, sb, name, value,
+ pi_prefix_ucNstr, 2,
+ pi_suffix_ucNstr, 2);
+}
+
+/*
+ * ECMA-357 10.2.1 17(d-g) pulled out into a common subroutine that appends
+ * equals, a double quote, an attribute value, and a closing double quote.
+ */
+static void
+AppendAttributeValue(JSContext *cx, JSStringBuffer *sb, JSString *valstr)
+{
+ js_AppendCString(sb, "=\"");
+ valstr = js_EscapeAttributeValue(cx, valstr);
+ if (!valstr) {
+ free(sb->base);
+ sb->base = STRING_BUFFER_ERROR_BASE;
+ return;
+ }
+ js_AppendJSString(sb, valstr);
+ js_AppendChar(sb, '"');
+}
+
+/*
+ * ECMA-357 10.2.1.1 EscapeElementValue helper method.
+ *
+ * This function takes ownership of sb->base, if sb is non-null, in all cases
+ * of success or failure.
+ */
+static JSString *
+EscapeElementValue(JSContext *cx, JSStringBuffer *sb, JSString *str)
+{
+ size_t length, newlength;
+ const jschar *cp, *start, *end;
+ jschar c;
+
+ length = newlength = JSSTRING_LENGTH(str);
+ for (cp = start = JSSTRING_CHARS(str), end = cp + length; cp < end; cp++) {
+ c = *cp;
+ if (c == '<' || c == '>')
+ newlength += 3;
+ else if (c == '&')
+ newlength += 4;
+
+ if (newlength < length) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ }
+ if ((sb && STRING_BUFFER_OFFSET(sb) != 0) || newlength > length) {
+ JSStringBuffer localSB;
+ if (!sb) {
+ sb = &localSB;
+ js_InitStringBuffer(sb);
+ }
+ if (!sb->grow(sb, newlength)) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ for (cp = start; cp < end; cp++) {
+ c = *cp;
+ if (c == '<')
+ js_AppendCString(sb, js_lt_entity_str);
+ else if (c == '>')
+ js_AppendCString(sb, js_gt_entity_str);
+ else if (c == '&')
+ js_AppendCString(sb, js_amp_entity_str);
+ else
+ js_AppendChar(sb, c);
+ }
+ JS_ASSERT(STRING_BUFFER_OK(sb));
+ str = js_NewString(cx, sb->base, STRING_BUFFER_OFFSET(sb), 0);
+ if (!str)
+ js_FinishStringBuffer(sb);
+ }
+ return str;
+}
+
+/*
+ * ECMA-357 10.2.1.2 EscapeAttributeValue helper method.
+ * This function takes ownership of sb->base, if sb is non-null, in all cases.
+ */
+static JSString *
+EscapeAttributeValue(JSContext *cx, JSStringBuffer *sb, JSString *str)
+{
+ size_t length, newlength;
+ const jschar *cp, *start, *end;
+ jschar c;
+
+ length = newlength = JSSTRING_LENGTH(str);
+ for (cp = start = JSSTRING_CHARS(str), end = cp + length; cp < end; cp++) {
+ c = *cp;
+ if (c == '"')
+ newlength += 5;
+ else if (c == '<')
+ newlength += 3;
+ else if (c == '&' || c == '\n' || c == '\r' || c == '\t')
+ newlength += 4;
+
+ if (newlength < length) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ }
+ if ((sb && STRING_BUFFER_OFFSET(sb) != 0) || newlength > length) {
+ JSStringBuffer localSB;
+ if (!sb) {
+ sb = &localSB;
+ js_InitStringBuffer(sb);
+ }
+ if (!sb->grow(sb, newlength)) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ for (cp = start; cp < end; cp++) {
+ c = *cp;
+ if (c == '"')
+ js_AppendCString(sb, js_quot_entity_str);
+ else if (c == '<')
+ js_AppendCString(sb, js_lt_entity_str);
+ else if (c == '&')
+ js_AppendCString(sb, js_amp_entity_str);
+ else if (c == '\n')
+ js_AppendCString(sb, "&#xA;");
+ else if (c == '\r')
+ js_AppendCString(sb, "&#xD;");
+ else if (c == '\t')
+ js_AppendCString(sb, "&#x9;");
+ else
+ js_AppendChar(sb, c);
+ }
+ JS_ASSERT(STRING_BUFFER_OK(sb));
+ str = js_NewString(cx, sb->base, STRING_BUFFER_OFFSET(sb), 0);
+ if (!str)
+ js_FinishStringBuffer(sb);
+ }
+ return str;
+}
+
+/* 13.3.5.4 [[GetNamespace]]([InScopeNamespaces]) */
+static JSXMLNamespace *
+GetNamespace(JSContext *cx, JSXMLQName *qn, const JSXMLArray *inScopeNSes)
+{
+ JSXMLNamespace *match, *ns;
+ uint32 i, n;
+ jsval argv[2];
+ JSObject *nsobj;
+
+ JS_ASSERT(qn->uri);
+ if (!qn->uri) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XML_NAMESPACE,
+ qn->prefix
+ ? js_ValueToPrintableString(cx,
+ STRING_TO_JSVAL(qn->prefix))
+ : js_type_strs[JSTYPE_VOID]);
+ return NULL;
+ }
+
+ /* Look for a matching namespace in inScopeNSes, if provided. */
+ match = NULL;
+ if (inScopeNSes) {
+ for (i = 0, n = inScopeNSes->length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(inScopeNSes, i, JSXMLNamespace);
+ if (!ns)
+ continue;
+
+ /*
+ * Erratum, very tricky, and not specified in ECMA-357 13.3.5.4:
+ * If we preserve prefixes, we must match null qn->prefix against
+ * an empty ns->prefix, in order to avoid generating redundant
+ * prefixed and default namespaces for cases such as:
+ *
+ * x = <t xmlns="http://foo.com"/>
+ * print(x.toXMLString());
+ *
+ * Per 10.3.2.1, the namespace attribute in t has an empty string
+ * prefix (*not* a null prefix), per 10.3.2.1 Step 6(h)(i)(1):
+ *
+ * 1. If the [local name] property of a is "xmlns"
+ * a. Map ns.prefix to the empty string
+ *
+ * But t's name has a null prefix in this implementation, meaning
+ * *undefined*, per 10.3.2.1 Step 6(c)'s NOTE (which refers to
+ * the http://www.w3.org/TR/xml-infoset/ spec, item 2.2.3, without
+ * saying how "no value" maps to an ECMA-357 value -- but it must
+ * map to the *undefined* prefix value).
+ *
+ * Since "" != undefined (or null, in the current implementation)
+ * the ECMA-357 spec will fail to match in [[GetNamespace]] called
+ * on t with argument {} U {(prefix="", uri="http://foo.com")}.
+ * This spec bug leads to ToXMLString results that duplicate the
+ * declared namespace.
+ */
+ if (js_EqualStrings(ns->uri, qn->uri) &&
+ (ns->prefix == qn->prefix ||
+ ((ns->prefix && qn->prefix)
+ ? js_EqualStrings(ns->prefix, qn->prefix)
+ : IS_EMPTY(ns->prefix ? ns->prefix : qn->prefix)))) {
+ match = ns;
+ break;
+ }
+ }
+ }
+
+ /* If we didn't match, make a new namespace from qn. */
+ if (!match) {
+ argv[0] = qn->prefix ? STRING_TO_JSVAL(qn->prefix) : JSVAL_VOID;
+ argv[1] = STRING_TO_JSVAL(qn->uri);
+ nsobj = js_ConstructObject(cx, &js_NamespaceClass.base, NULL, NULL,
+ 2, argv);
+ if (!nsobj)
+ return NULL;
+ match = (JSXMLNamespace *) JS_GetPrivate(cx, nsobj);
+ }
+ return match;
+}
+
+static JSString *
+GeneratePrefix(JSContext *cx, JSString *uri, JSXMLArray *decls)
+{
+ const jschar *cp, *start, *end;
+ size_t length, newlength, offset;
+ uint32 i, n, m, serial;
+ jschar *bp, *dp;
+ JSBool done;
+ JSXMLNamespace *ns;
+ JSString *prefix;
+
+ JS_ASSERT(!IS_EMPTY(uri));
+
+ /*
+ * If there are no *declared* namespaces, skip all collision detection and
+ * return a short prefix quickly; an example of such a situation:
+ *
+ * var x = <f/>;
+ * var n = new Namespace("http://example.com/");
+ * x.@n::att = "val";
+ * x.toXMLString();
+ *
+ * This is necessary for various log10 uses below to be valid.
+ */
+ if (decls->length == 0)
+ return JS_NewStringCopyZ(cx, "a");
+
+ /*
+ * Try peeling off the last filename suffix or pathname component till
+ * we have a valid XML name. This heuristic will prefer "xul" given
+ * ".../there.is.only.xul", "xbl" given ".../xbl", and "xbl2" given any
+ * likely URI of the form ".../xbl2/2005".
+ */
+ start = JSSTRING_CHARS(uri);
+ cp = end = start + JSSTRING_LENGTH(uri);
+ while (--cp > start) {
+ if (*cp == '.' || *cp == '/' || *cp == ':') {
+ ++cp;
+ length = PTRDIFF(end, cp, jschar);
+ if (IsXMLName(cp, length) && !STARTS_WITH_XML(cp, length))
+ break;
+ end = --cp;
+ }
+ }
+ length = PTRDIFF(end, cp, jschar);
+
+ /*
+ * If the namespace consisted only of non-XML names or names that begin
+ * case-insensitively with "xml", arbitrarily create a prefix consisting
+ * of 'a's of size length (allowing dp-calculating code to work with or
+ * without this branch executing) plus the space for storing a hyphen and
+ * the serial number (avoiding reallocation if a collision happens).
+ */
+ bp = (jschar *) cp;
+ newlength = length;
+ if (STARTS_WITH_XML(cp, length) || !IsXMLName(cp, length)) {
+ newlength = length + 2 + (size_t) log10(decls->length);
+ bp = (jschar *)
+ JS_malloc(cx, (newlength + 1) * sizeof(jschar));
+ if (!bp)
+ return NULL;
+
+ bp[newlength] = 0;
+ for (i = 0; i < newlength; i++)
+ bp[i] = 'a';
+ }
+
+ /*
+ * Now search through decls looking for a collision. If we collide with
+ * an existing prefix, start tacking on a hyphen and a serial number.
+ */
+ serial = 0;
+ do {
+ done = JS_TRUE;
+ for (i = 0, n = decls->length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(decls, i, JSXMLNamespace);
+ if (ns && ns->prefix &&
+ JSSTRING_LENGTH(ns->prefix) == newlength &&
+ !memcmp(JSSTRING_CHARS(ns->prefix), bp,
+ newlength * sizeof(jschar))) {
+ if (bp == cp) {
+ newlength = length + 2 + (size_t) log10(n);
+ bp = (jschar *)
+ JS_malloc(cx, (newlength + 1) * sizeof(jschar));
+ if (!bp)
+ return NULL;
+ js_strncpy(bp, cp, length);
+ }
+
+ ++serial;
+ JS_ASSERT(serial <= n);
+ dp = bp + length + 2 + (size_t) log10(serial);
+ *dp = 0;
+ for (m = serial; m != 0; m /= 10)
+ *--dp = (jschar)('0' + m % 10);
+ *--dp = '-';
+ JS_ASSERT(dp == bp + length);
+
+ done = JS_FALSE;
+ break;
+ }
+ }
+ } while (!done);
+
+ if (bp == cp) {
+ offset = PTRDIFF(cp, start, jschar);
+ prefix = js_NewDependentString(cx, uri, offset, length, 0);
+ } else {
+ prefix = js_NewString(cx, bp, newlength, 0);
+ if (!prefix)
+ JS_free(cx, bp);
+ }
+ return prefix;
+}
+
+static JSBool
+namespace_match(const void *a, const void *b)
+{
+ const JSXMLNamespace *nsa = (const JSXMLNamespace *) a;
+ const JSXMLNamespace *nsb = (const JSXMLNamespace *) b;
+
+ if (nsb->prefix)
+ return nsa->prefix && js_EqualStrings(nsa->prefix, nsb->prefix);
+ return js_EqualStrings(nsa->uri, nsb->uri);
+}
+
+/* ECMA-357 10.2.1 and 10.2.2 */
+static JSString *
+XMLToXMLString(JSContext *cx, JSXML *xml, const JSXMLArray *ancestorNSes,
+ uintN indentLevel)
+{
+ JSBool pretty, indentKids;
+ JSStringBuffer sb;
+ JSString *str, *prefix, *kidstr;
+ JSXMLArrayCursor cursor;
+ uint32 i, n;
+ JSXMLArray empty, decls, ancdecls;
+ JSXMLNamespace *ns, *ns2;
+ uintN nextIndentLevel;
+ JSXML *attr, *kid;
+
+ if (!GetBooleanXMLSetting(cx, js_prettyPrinting_str, &pretty))
+ return NULL;
+
+ js_InitStringBuffer(&sb);
+ if (pretty)
+ js_RepeatChar(&sb, ' ', indentLevel);
+ str = NULL;
+
+ switch (xml->xml_class) {
+ case JSXML_CLASS_TEXT:
+ /* Step 4. */
+ if (pretty) {
+ str = ChompXMLWhitespace(cx, xml->xml_value);
+ if (!str)
+ return NULL;
+ } else {
+ str = xml->xml_value;
+ }
+ return EscapeElementValue(cx, &sb, str);
+
+ case JSXML_CLASS_ATTRIBUTE:
+ /* Step 5. */
+ return EscapeAttributeValue(cx, &sb, xml->xml_value);
+
+ case JSXML_CLASS_COMMENT:
+ /* Step 6. */
+ return MakeXMLCommentString(cx, &sb, xml->xml_value);
+
+ case JSXML_CLASS_PROCESSING_INSTRUCTION:
+ /* Step 7. */
+ return MakeXMLPIString(cx, &sb, xml->name->localName, xml->xml_value);
+
+ case JSXML_CLASS_LIST:
+ /* ECMA-357 10.2.2. */
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ i = 0;
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (pretty && i != 0)
+ js_AppendChar(&sb, '\n');
+
+ kidstr = XMLToXMLString(cx, kid, ancestorNSes, indentLevel);
+ if (!kidstr)
+ break;
+
+ js_AppendJSString(&sb, kidstr);
+ ++i;
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (kid)
+ goto list_out;
+
+ if (!sb.base) {
+ if (!STRING_BUFFER_OK(&sb)) {
+ JS_ReportOutOfMemory(cx);
+ return NULL;
+ }
+ return cx->runtime->emptyString;
+ }
+
+ str = js_NewString(cx, sb.base, STRING_BUFFER_OFFSET(&sb), 0);
+ list_out:
+ if (!str)
+ js_FinishStringBuffer(&sb);
+ return str;
+
+ default:;
+ }
+
+ /* After this point, control must flow through label out: to exit. */
+ if (!js_EnterLocalRootScope(cx))
+ return NULL;
+
+ /* ECMA-357 10.2.1 step 8 onward: handle ToXMLString on an XML element. */
+ if (!ancestorNSes) {
+ XMLArrayInit(cx, &empty, 0);
+ ancestorNSes = &empty;
+ }
+ XMLArrayInit(cx, &decls, 0);
+ ancdecls.capacity = 0;
+
+ /* Clone in-scope namespaces not in ancestorNSes into decls. */
+ XMLArrayCursorInit(&cursor, &xml->xml_namespaces);
+ while ((ns = (JSXMLNamespace *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (!ns->declared)
+ continue;
+ if (!XMLARRAY_HAS_MEMBER(ancestorNSes, ns, namespace_identity)) {
+ /* NOTE: may want to exclude unused namespaces here. */
+ ns2 = js_NewXMLNamespace(cx, ns->prefix, ns->uri, JS_TRUE);
+ if (!ns2 || !XMLARRAY_APPEND(cx, &decls, ns2))
+ break;
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (ns)
+ goto out;
+
+ /*
+ * Union ancestorNSes and decls into ancdecls. Note that ancdecls does
+ * not own its member references. In the spec, ancdecls has no name, but
+ * is always written out as (AncestorNamespaces U namespaceDeclarations).
+ */
+ if (!XMLArrayInit(cx, &ancdecls, ancestorNSes->length + decls.length))
+ goto out;
+ for (i = 0, n = ancestorNSes->length; i < n; i++) {
+ ns2 = XMLARRAY_MEMBER(ancestorNSes, i, JSXMLNamespace);
+ if (!ns2)
+ continue;
+ JS_ASSERT(!XMLARRAY_HAS_MEMBER(&decls, ns2, namespace_identity));
+ if (!XMLARRAY_APPEND(cx, &ancdecls, ns2))
+ goto out;
+ }
+ for (i = 0, n = decls.length; i < n; i++) {
+ ns2 = XMLARRAY_MEMBER(&decls, i, JSXMLNamespace);
+ if (!ns2)
+ continue;
+ JS_ASSERT(!XMLARRAY_HAS_MEMBER(&ancdecls, ns2, namespace_identity));
+ if (!XMLARRAY_APPEND(cx, &ancdecls, ns2))
+ goto out;
+ }
+
+ /* Step 11, except we don't clone ns unless its prefix is undefined. */
+ ns = GetNamespace(cx, xml->name, &ancdecls);
+ if (!ns)
+ goto out;
+
+ /* Step 12 (NULL means *undefined* here), plus the deferred ns cloning. */
+ if (!ns->prefix) {
+ /*
+ * Create a namespace prefix that isn't used by any member of decls.
+ * Assign the new prefix to a copy of ns. Flag this namespace as if
+ * it were declared, for assertion-testing's sake later below.
+ *
+ * Erratum: if ns->prefix and xml->name are both null (*undefined* in
+ * ECMA-357), we know that xml was named using the default namespace
+ * (proof: see GetNamespace and the Namespace constructor called with
+ * two arguments). So we ought not generate a new prefix here, when
+ * we can declare ns as the default namespace for xml.
+ *
+ * This helps descendants inherit the namespace instead of redundantly
+ * redeclaring it with generated prefixes in each descendant.
+ */
+ if (!xml->name->prefix) {
+ prefix = cx->runtime->emptyString;
+ } else {
+ prefix = GeneratePrefix(cx, ns->uri, &ancdecls);
+ if (!prefix)
+ goto out;
+ }
+ ns = js_NewXMLNamespace(cx, prefix, ns->uri, JS_TRUE);
+ if (!ns)
+ goto out;
+
+ /*
+ * If the xml->name was unprefixed, we must remove any declared default
+ * namespace from decls before appending ns. How can you get a default
+ * namespace in decls that doesn't match the one from name? Apparently
+ * by calling x.setNamespace(ns) where ns has no prefix. The other way
+ * to fix this is to update x's in-scope namespaces when setNamespace
+ * is called, but that's not specified by ECMA-357.
+ *
+ * Likely Erratum here, depending on whether the lack of update to x's
+ * in-scope namespace in XML.prototype.setNamespace (13.4.4.36) is an
+ * erratum or not. Note that changing setNamespace to update the list
+ * of in-scope namespaces will change x.namespaceDeclarations().
+ */
+ if (IS_EMPTY(prefix)) {
+ i = XMLArrayFindMember(&decls, ns, namespace_match);
+ if (i != XML_NOT_FOUND)
+ XMLArrayDelete(cx, &decls, i, JS_TRUE);
+ }
+
+ /*
+ * In the spec, ancdecls has no name, but is always written out as
+ * (AncestorNamespaces U namespaceDeclarations). Since we compute
+ * that union in ancdecls, any time we append a namespace strong
+ * ref to decls, we must also append a weak ref to ancdecls. Order
+ * matters here: code at label out: releases strong refs in decls.
+ */
+ if (!XMLARRAY_APPEND(cx, &ancdecls, ns) ||
+ !XMLARRAY_APPEND(cx, &decls, ns)) {
+ goto out;
+ }
+ }
+
+ /* Format the element or point-tag into sb. */
+ js_AppendChar(&sb, '<');
+
+ if (ns->prefix && !IS_EMPTY(ns->prefix)) {
+ js_AppendJSString(&sb, ns->prefix);
+ js_AppendChar(&sb, ':');
+ }
+ js_AppendJSString(&sb, xml->name->localName);
+
+ /*
+ * Step 16 makes a union to avoid writing two loops in step 17, to share
+ * common attribute value appending spec-code. We prefer two loops for
+ * faster code and less data overhead.
+ */
+
+ /* Step 17(b): append attributes. */
+ XMLArrayCursorInit(&cursor, &xml->xml_attrs);
+ while ((attr = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ js_AppendChar(&sb, ' ');
+ ns2 = GetNamespace(cx, attr->name, &ancdecls);
+ if (!ns2)
+ break;
+
+ /* 17(b)(ii): NULL means *undefined* here. */
+ if (!ns2->prefix) {
+ prefix = GeneratePrefix(cx, ns2->uri, &ancdecls);
+ if (!prefix)
+ break;
+
+ /* Again, we avoid copying ns2 until we know it's prefix-less. */
+ ns2 = js_NewXMLNamespace(cx, prefix, ns2->uri, JS_TRUE);
+ if (!ns2)
+ break;
+
+ /*
+ * In the spec, ancdecls has no name, but is always written out as
+ * (AncestorNamespaces U namespaceDeclarations). Since we compute
+ * that union in ancdecls, any time we append a namespace strong
+ * ref to decls, we must also append a weak ref to ancdecls. Order
+ * matters here: code at label out: releases strong refs in decls.
+ */
+ if (!XMLARRAY_APPEND(cx, &ancdecls, ns2) ||
+ !XMLARRAY_APPEND(cx, &decls, ns2)) {
+ break;
+ }
+ }
+
+ /* 17(b)(iii). */
+ if (!IS_EMPTY(ns2->prefix)) {
+ js_AppendJSString(&sb, ns2->prefix);
+ js_AppendChar(&sb, ':');
+ }
+
+ /* 17(b)(iv). */
+ js_AppendJSString(&sb, attr->name->localName);
+
+ /* 17(d-g). */
+ AppendAttributeValue(cx, &sb, attr->xml_value);
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (attr)
+ goto out;
+
+ /* Step 17(c): append XML namespace declarations. */
+ XMLArrayCursorInit(&cursor, &decls);
+ while ((ns2 = (JSXMLNamespace *) XMLArrayCursorNext(&cursor)) != NULL) {
+ JS_ASSERT(ns2->declared);
+
+ js_AppendCString(&sb, " xmlns");
+
+ /* 17(c)(ii): NULL means *undefined* here. */
+ if (!ns2->prefix) {
+ prefix = GeneratePrefix(cx, ns2->uri, &ancdecls);
+ if (!prefix)
+ break;
+ ns2->prefix = prefix;
+ }
+
+ /* 17(c)(iii). */
+ if (!IS_EMPTY(ns2->prefix)) {
+ js_AppendChar(&sb, ':');
+ js_AppendJSString(&sb, ns2->prefix);
+ }
+
+ /* 17(d-g). */
+ AppendAttributeValue(cx, &sb, ns2->uri);
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (ns2)
+ goto out;
+
+ /* Step 18: handle point tags. */
+ n = xml->xml_kids.length;
+ if (n == 0) {
+ js_AppendCString(&sb, "/>");
+ } else {
+ /* Steps 19 through 25: handle element content, and open the end-tag. */
+ js_AppendChar(&sb, '>');
+ indentKids = n > 1 ||
+ (n == 1 &&
+ (kid = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML)) &&
+ kid->xml_class != JSXML_CLASS_TEXT);
+
+ if (pretty && indentKids) {
+ if (!GetUint32XMLSetting(cx, js_prettyIndent_str, &i))
+ goto out;
+ nextIndentLevel = indentLevel + i;
+ } else {
+ nextIndentLevel = 0;
+ }
+
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (pretty && indentKids)
+ js_AppendChar(&sb, '\n');
+
+ kidstr = XMLToXMLString(cx, kid, &ancdecls, nextIndentLevel);
+ if (!kidstr)
+ break;
+
+ js_AppendJSString(&sb, kidstr);
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (kid)
+ goto out;
+
+ if (pretty && indentKids) {
+ js_AppendChar(&sb, '\n');
+ js_RepeatChar(&sb, ' ', indentLevel);
+ }
+ js_AppendCString(&sb, "</");
+
+ /* Step 26. */
+ if (ns->prefix && !IS_EMPTY(ns->prefix)) {
+ js_AppendJSString(&sb, ns->prefix);
+ js_AppendChar(&sb, ':');
+ }
+
+ /* Step 27. */
+ js_AppendJSString(&sb, xml->name->localName);
+ js_AppendChar(&sb, '>');
+ }
+
+ if (!STRING_BUFFER_OK(&sb)) {
+ JS_ReportOutOfMemory(cx);
+ goto out;
+ }
+
+ str = js_NewString(cx, sb.base, STRING_BUFFER_OFFSET(&sb), 0);
+out:
+ js_LeaveLocalRootScopeWithResult(cx, STRING_TO_JSVAL(str));
+ if (!str && STRING_BUFFER_OK(&sb))
+ js_FinishStringBuffer(&sb);
+ XMLArrayFinish(cx, &decls);
+ if (ancdecls.capacity != 0)
+ XMLArrayFinish(cx, &ancdecls);
+ return str;
+}
+
+/* ECMA-357 10.2 */
+static JSString *
+ToXMLString(JSContext *cx, jsval v)
+{
+ JSObject *obj;
+ JSString *str;
+ JSXML *xml;
+
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v)) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XML_CONVERSION,
+ js_type_strs[JSVAL_IS_NULL(v)
+ ? JSTYPE_NULL
+ : JSTYPE_VOID]);
+ return NULL;
+ }
+
+ if (JSVAL_IS_BOOLEAN(v) || JSVAL_IS_NUMBER(v))
+ return js_ValueToString(cx, v);
+
+ if (JSVAL_IS_STRING(v))
+ return EscapeElementValue(cx, NULL, JSVAL_TO_STRING(v));
+
+ obj = JSVAL_TO_OBJECT(v);
+ if (!OBJECT_IS_XML(cx, obj)) {
+ if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_STRING, &v))
+ return NULL;
+ str = js_ValueToString(cx, v);
+ if (!str)
+ return NULL;
+ return EscapeElementValue(cx, NULL, str);
+ }
+
+ /* Handle non-element cases in this switch, returning from each case. */
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ return XMLToXMLString(cx, xml, NULL, 0);
+}
+
+static JSXMLQName *
+ToAttributeName(JSContext *cx, jsval v)
+{
+ JSString *name, *uri, *prefix;
+ JSObject *obj;
+ JSClass *clasp;
+ JSXMLQName *qn;
+ JSTempValueRooter tvr;
+
+ if (JSVAL_IS_STRING(v)) {
+ name = JSVAL_TO_STRING(v);
+ uri = prefix = cx->runtime->emptyString;
+ } else {
+ if (JSVAL_IS_PRIMITIVE(v)) {
+ name = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK, v, NULL);
+ if (name) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XML_ATTR_NAME,
+ JS_GetStringBytes(name));
+ }
+ return NULL;
+ }
+
+ obj = JSVAL_TO_OBJECT(v);
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp == &js_AttributeNameClass)
+ return (JSXMLQName *) JS_GetPrivate(cx, obj);
+
+ if (clasp == &js_QNameClass.base) {
+ qn = (JSXMLQName *) JS_GetPrivate(cx, obj);
+ uri = qn->uri;
+ prefix = qn->prefix;
+ name = qn->localName;
+ } else {
+ if (clasp == &js_AnyNameClass) {
+ name = ATOM_TO_STRING(cx->runtime->atomState.starAtom);
+ } else {
+ name = js_ValueToString(cx, v);
+ if (!name)
+ return NULL;
+ }
+ uri = prefix = cx->runtime->emptyString;
+ }
+ }
+
+ qn = js_NewXMLQName(cx, uri, prefix, name);
+ if (!qn)
+ return NULL;
+
+ JS_PUSH_TEMP_ROOT_GCTHING(cx, qn, &tvr);
+ obj = js_GetAttributeNameObject(cx, qn);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ if (!obj)
+ return NULL;
+ return qn;
+}
+
+static JSXMLQName *
+ToXMLName(JSContext *cx, jsval v, jsid *funidp)
+{
+ JSString *name;
+ JSObject *obj;
+ JSClass *clasp;
+ uint32 index;
+ JSXMLQName *qn;
+ JSAtom *atom;
+
+ if (JSVAL_IS_STRING(v)) {
+ name = JSVAL_TO_STRING(v);
+ } else {
+ if (JSVAL_IS_PRIMITIVE(v)) {
+ name = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK, v, NULL);
+ if (name)
+ goto bad;
+ return NULL;
+ }
+
+ obj = JSVAL_TO_OBJECT(v);
+ clasp = OBJ_GET_CLASS(cx, obj);
+ if (clasp == &js_AttributeNameClass || clasp == &js_QNameClass.base)
+ goto out;
+ if (clasp == &js_AnyNameClass) {
+ name = ATOM_TO_STRING(cx->runtime->atomState.starAtom);
+ goto construct;
+ }
+ name = js_ValueToString(cx, v);
+ if (!name)
+ return NULL;
+ }
+
+ /*
+ * ECMA-357 10.6.1 step 1 seems to be incorrect. The spec says:
+ *
+ * 1. If ToString(ToNumber(P)) == ToString(P), throw a TypeError exception
+ *
+ * First, _P_ should be _s_, to refer to the given string.
+ *
+ * Second, why does ToXMLName applied to the string type throw TypeError
+ * only for numeric literals without any leading or trailing whitespace?
+ *
+ * If the idea is to reject uint32 property names, then the check needs to
+ * be stricter, to exclude hexadecimal and floating point literals.
+ */
+ if (js_IdIsIndex(STRING_TO_JSVAL(name), &index))
+ goto bad;
+
+ if (*JSSTRING_CHARS(name) == '@') {
+ name = js_NewDependentString(cx, name, 1, JSSTRING_LENGTH(name) - 1, 0);
+ if (!name)
+ return NULL;
+ *funidp = 0;
+ return ToAttributeName(cx, STRING_TO_JSVAL(name));
+ }
+
+construct:
+ v = STRING_TO_JSVAL(name);
+ obj = js_ConstructObject(cx, &js_QNameClass.base, NULL, NULL, 1, &v);
+ if (!obj)
+ return NULL;
+
+out:
+ qn = (JSXMLQName *) JS_GetPrivate(cx, obj);
+ atom = cx->runtime->atomState.lazy.functionNamespaceURIAtom;
+ if (qn->uri && atom &&
+ (qn->uri == ATOM_TO_STRING(atom) ||
+ js_EqualStrings(qn->uri, ATOM_TO_STRING(atom)))) {
+ if (!JS_ValueToId(cx, STRING_TO_JSVAL(qn->localName), funidp))
+ return NULL;
+ } else {
+ *funidp = 0;
+ }
+ return qn;
+
+bad:
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XML_NAME,
+ js_ValueToPrintableString(cx, STRING_TO_JSVAL(name)));
+ return NULL;
+}
+
+/* ECMA-357 9.1.1.13 XML [[AddInScopeNamespace]]. */
+static JSBool
+AddInScopeNamespace(JSContext *cx, JSXML *xml, JSXMLNamespace *ns)
+{
+ JSXMLNamespace *match, *ns2;
+ uint32 i, n, m;
+
+ if (xml->xml_class != JSXML_CLASS_ELEMENT)
+ return JS_TRUE;
+
+ /* NULL means *undefined* here -- see ECMA-357 9.1.1.13 step 2. */
+ if (!ns->prefix) {
+ match = NULL;
+ for (i = 0, n = xml->xml_namespaces.length; i < n; i++) {
+ ns2 = XMLARRAY_MEMBER(&xml->xml_namespaces, i, JSXMLNamespace);
+ if (ns2 && js_EqualStrings(ns2->uri, ns->uri)) {
+ match = ns2;
+ break;
+ }
+ }
+ if (!match && !XMLARRAY_ADD_MEMBER(cx, &xml->xml_namespaces, n, ns))
+ return JS_FALSE;
+ } else {
+ if (IS_EMPTY(ns->prefix) && IS_EMPTY(xml->name->uri))
+ return JS_TRUE;
+ match = NULL;
+#ifdef __GNUC__ /* suppress bogus gcc warnings */
+ m = XML_NOT_FOUND;
+#endif
+ for (i = 0, n = xml->xml_namespaces.length; i < n; i++) {
+ ns2 = XMLARRAY_MEMBER(&xml->xml_namespaces, i, JSXMLNamespace);
+ if (ns2 && ns2->prefix &&
+ js_EqualStrings(ns2->prefix, ns->prefix)) {
+ match = ns2;
+ m = i;
+ break;
+ }
+ }
+ if (match && !js_EqualStrings(match->uri, ns->uri)) {
+ ns2 = XMLARRAY_DELETE(cx, &xml->xml_namespaces, m, JS_TRUE,
+ JSXMLNamespace);
+ JS_ASSERT(ns2 == match);
+ match->prefix = NULL;
+ if (!AddInScopeNamespace(cx, xml, match))
+ return JS_FALSE;
+ }
+ if (!XMLARRAY_APPEND(cx, &xml->xml_namespaces, ns))
+ return JS_FALSE;
+ }
+
+ /* OPTION: enforce that descendants have superset namespaces. */
+ return JS_TRUE;
+}
+
+/* ECMA-357 9.2.1.6 XMLList [[Append]]. */
+static JSBool
+Append(JSContext *cx, JSXML *list, JSXML *xml)
+{
+ uint32 i, j, k, n;
+ JSXML *kid;
+
+ JS_ASSERT(list->xml_class == JSXML_CLASS_LIST);
+ i = list->xml_kids.length;
+ n = 1;
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ list->xml_target = xml->xml_target;
+ list->xml_targetprop = xml->xml_targetprop;
+ n = JSXML_LENGTH(xml);
+ k = i + n;
+ if (!XMLArraySetCapacity(cx, &list->xml_kids, k))
+ return JS_FALSE;
+ for (j = 0; j < n; j++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, j, JSXML);
+ if (kid)
+ XMLARRAY_SET_MEMBER(&list->xml_kids, i + j, kid);
+ }
+ return JS_TRUE;
+ }
+
+ list->xml_target = xml->parent;
+ if (xml->xml_class == JSXML_CLASS_PROCESSING_INSTRUCTION)
+ list->xml_targetprop = NULL;
+ else
+ list->xml_targetprop = xml->name;
+ if (!XMLARRAY_ADD_MEMBER(cx, &list->xml_kids, i, xml))
+ return JS_FALSE;
+ return JS_TRUE;
+}
+
+/* ECMA-357 9.1.1.7 XML [[DeepCopy]] and 9.2.1.7 XMLList [[DeepCopy]]. */
+static JSXML *
+DeepCopyInLRS(JSContext *cx, JSXML *xml, uintN flags);
+
+static JSXML *
+DeepCopy(JSContext *cx, JSXML *xml, JSObject *obj, uintN flags)
+{
+ JSXML *copy;
+ JSBool ok;
+
+ /* Our caller may not be protecting newborns with a local root scope. */
+ if (!js_EnterLocalRootScope(cx))
+ return NULL;
+ copy = DeepCopyInLRS(cx, xml, flags);
+ if (copy) {
+ if (obj) {
+ /* Caller provided the object for this copy, hook 'em up. */
+ ok = JS_SetPrivate(cx, obj, copy);
+ if (ok)
+ copy->object = obj;
+ } else {
+ ok = js_GetXMLObject(cx, copy) != NULL;
+ }
+ if (!ok)
+ copy = NULL;
+ }
+ js_LeaveLocalRootScopeWithResult(cx, (jsval) copy);
+ return copy;
+}
+
+/*
+ * (i) We must be in a local root scope (InLRS).
+ * (ii) parent must have a rooted object.
+ * (iii) from's owning object must be locked if not thread-local.
+ */
+static JSBool
+DeepCopySetInLRS(JSContext *cx, JSXMLArray *from, JSXMLArray *to, JSXML *parent,
+ uintN flags)
+{
+ uint32 j, n;
+ JSXMLArrayCursor cursor;
+ JSBool ok;
+ JSXML *kid, *kid2;
+ JSString *str;
+
+ JS_ASSERT(cx->localRootStack);
+
+ n = from->length;
+ if (!XMLArraySetCapacity(cx, to, n))
+ return JS_FALSE;
+
+ XMLArrayCursorInit(&cursor, from);
+ j = 0;
+ ok = JS_TRUE;
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if ((flags & XSF_IGNORE_COMMENTS) &&
+ kid->xml_class == JSXML_CLASS_COMMENT) {
+ continue;
+ }
+ if ((flags & XSF_IGNORE_PROCESSING_INSTRUCTIONS) &&
+ kid->xml_class == JSXML_CLASS_PROCESSING_INSTRUCTION) {
+ continue;
+ }
+ if ((flags & XSF_IGNORE_WHITESPACE) &&
+ (kid->xml_flags & XMLF_WHITESPACE_TEXT)) {
+ continue;
+ }
+ kid2 = DeepCopyInLRS(cx, kid, flags);
+ if (!kid2) {
+ to->length = j;
+ ok = JS_FALSE;
+ break;
+ }
+
+ if ((flags & XSF_IGNORE_WHITESPACE) &&
+ n > 1 && kid2->xml_class == JSXML_CLASS_TEXT) {
+ str = ChompXMLWhitespace(cx, kid2->xml_value);
+ if (!str) {
+ to->length = j;
+ ok = JS_FALSE;
+ break;
+ }
+ kid2->xml_value = str;
+ }
+
+ XMLARRAY_SET_MEMBER(to, j, kid2);
+ ++j;
+ if (parent->xml_class != JSXML_CLASS_LIST)
+ kid2->parent = parent;
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (!ok)
+ return JS_FALSE;
+
+ if (j < n)
+ XMLArrayTrim(to);
+ return JS_TRUE;
+}
+
+static JSXML *
+DeepCopyInLRS(JSContext *cx, JSXML *xml, uintN flags)
+{
+ JSXML *copy;
+ JSXMLQName *qn;
+ JSBool ok;
+ uint32 i, n;
+ JSXMLNamespace *ns, *ns2;
+
+ /* Our caller must be protecting newborn objects. */
+ JS_ASSERT(cx->localRootStack);
+
+ copy = js_NewXML(cx, xml->xml_class);
+ if (!copy)
+ return NULL;
+ qn = xml->name;
+ if (qn) {
+ qn = js_NewXMLQName(cx, qn->uri, qn->prefix, qn->localName);
+ if (!qn) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ }
+ copy->name = qn;
+ copy->xml_flags = xml->xml_flags;
+
+ if (JSXML_HAS_VALUE(xml)) {
+ copy->xml_value = xml->xml_value;
+ ok = JS_TRUE;
+ } else {
+ ok = DeepCopySetInLRS(cx, &xml->xml_kids, &copy->xml_kids, copy, flags);
+ if (!ok)
+ goto out;
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ copy->xml_target = xml->xml_target;
+ copy->xml_targetprop = xml->xml_targetprop;
+ } else {
+ n = xml->xml_namespaces.length;
+ ok = XMLArraySetCapacity(cx, &copy->xml_namespaces, n);
+ if (!ok)
+ goto out;
+ for (i = 0; i < n; i++) {
+ ns = XMLARRAY_MEMBER(&xml->xml_namespaces, i, JSXMLNamespace);
+ if (!ns)
+ continue;
+ ns2 = js_NewXMLNamespace(cx, ns->prefix, ns->uri, ns->declared);
+ if (!ns2) {
+ copy->xml_namespaces.length = i;
+ ok = JS_FALSE;
+ goto out;
+ }
+ XMLARRAY_SET_MEMBER(&copy->xml_namespaces, i, ns2);
+ }
+
+ ok = DeepCopySetInLRS(cx, &xml->xml_attrs, &copy->xml_attrs, copy,
+ 0);
+ if (!ok)
+ goto out;
+ }
+ }
+
+out:
+ if (!ok)
+ return NULL;
+ return copy;
+}
+
+static void
+ReportBadXMLName(JSContext *cx, jsval id)
+{
+ JSString *name;
+
+ name = js_DecompileValueGenerator(cx, JSDVG_IGNORE_STACK, id, NULL);
+ if (name) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XML_NAME,
+ JS_GetStringBytes(name));
+ }
+}
+
+/* ECMA-357 9.1.1.4 XML [[DeleteByIndex]]. */
+static JSBool
+DeleteByIndex(JSContext *cx, JSXML *xml, jsval id, jsval *vp)
+{
+ uint32 index;
+ JSXML *kid;
+
+ if (!js_IdIsIndex(id, &index)) {
+ ReportBadXMLName(cx, id);
+ return JS_FALSE;
+ }
+
+ if (JSXML_HAS_KIDS(xml) && index < xml->xml_kids.length) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, index, JSXML);
+ if (kid)
+ kid->parent = NULL;
+ XMLArrayDelete(cx, &xml->xml_kids, index, JS_TRUE);
+ }
+
+ *vp = JSVAL_TRUE;
+ return JS_TRUE;
+}
+
+typedef JSBool (*JSXMLNameMatcher)(JSXMLQName *nameqn, JSXML *xml);
+
+static JSBool
+MatchAttrName(JSXMLQName *nameqn, JSXML *attr)
+{
+ JSXMLQName *attrqn = attr->name;
+
+ return (IS_STAR(nameqn->localName) ||
+ js_EqualStrings(attrqn->localName, nameqn->localName)) &&
+ (!nameqn->uri ||
+ js_EqualStrings(attrqn->uri, nameqn->uri));
+}
+
+static JSBool
+MatchElemName(JSXMLQName *nameqn, JSXML *elem)
+{
+ return (IS_STAR(nameqn->localName) ||
+ (elem->xml_class == JSXML_CLASS_ELEMENT &&
+ js_EqualStrings(elem->name->localName, nameqn->localName))) &&
+ (!nameqn->uri ||
+ (elem->xml_class == JSXML_CLASS_ELEMENT &&
+ js_EqualStrings(elem->name->uri, nameqn->uri)));
+}
+
+/* ECMA-357 9.1.1.8 XML [[Descendants]] and 9.2.1.8 XMLList [[Descendants]]. */
+static JSBool
+DescendantsHelper(JSContext *cx, JSXML *xml, JSXMLQName *nameqn, JSXML *list)
+{
+ uint32 i, n;
+ JSXML *attr, *kid;
+
+ if (xml->xml_class == JSXML_CLASS_ELEMENT &&
+ OBJ_GET_CLASS(cx, nameqn->object) == &js_AttributeNameClass) {
+ for (i = 0, n = xml->xml_attrs.length; i < n; i++) {
+ attr = XMLARRAY_MEMBER(&xml->xml_attrs, i, JSXML);
+ if (attr && MatchAttrName(nameqn, attr)) {
+ if (!Append(cx, list, attr))
+ return JS_FALSE;
+ }
+ }
+ }
+
+ for (i = 0, n = JSXML_LENGTH(xml); i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (!kid)
+ continue;
+ if (OBJ_GET_CLASS(cx, nameqn->object) != &js_AttributeNameClass &&
+ MatchElemName(nameqn, kid)) {
+ if (!Append(cx, list, kid))
+ return JS_FALSE;
+ }
+ if (!DescendantsHelper(cx, kid, nameqn, list))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+static JSXML *
+Descendants(JSContext *cx, JSXML *xml, jsval id)
+{
+ jsid funid;
+ JSXMLQName *nameqn;
+ JSObject *listobj;
+ JSXML *list, *kid;
+ uint32 i, n;
+ JSBool ok;
+
+ nameqn = ToXMLName(cx, id, &funid);
+ if (!nameqn)
+ return NULL;
+
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!listobj)
+ return NULL;
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ if (funid)
+ return list;
+
+ /*
+ * Protect nameqn's object and strings from GC by linking list to it
+ * temporarily. The cx->newborn[GCX_OBJECT] GC root protects listobj,
+ * which protects list. Any other object allocations occuring beneath
+ * DescendantsHelper use local roots.
+ */
+ list->name = nameqn;
+ if (!js_EnterLocalRootScope(cx))
+ return NULL;
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ ok = JS_TRUE;
+ for (i = 0, n = xml->xml_kids.length; i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ ok = DescendantsHelper(cx, kid, nameqn, list);
+ if (!ok)
+ break;
+ }
+ }
+ } else {
+ ok = DescendantsHelper(cx, xml, nameqn, list);
+ }
+ js_LeaveLocalRootScopeWithResult(cx, (jsval) list);
+ if (!ok)
+ return NULL;
+ list->name = NULL;
+ return list;
+}
+
+static JSBool
+xml_equality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp);
+
+/* Recursive (JSXML *) parameterized version of Equals. */
+static JSBool
+XMLEquals(JSContext *cx, JSXML *xml, JSXML *vxml, JSBool *bp)
+{
+ JSXMLQName *qn, *vqn;
+ uint32 i, j, n;
+ JSXMLArrayCursor cursor, vcursor;
+ JSXML *kid, *vkid, *attr, *vattr;
+ JSBool ok;
+ JSObject *xobj, *vobj;
+
+retry:
+ if (xml->xml_class != vxml->xml_class) {
+ if (xml->xml_class == JSXML_CLASS_LIST && xml->xml_kids.length == 1) {
+ xml = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (xml)
+ goto retry;
+ }
+ if (vxml->xml_class == JSXML_CLASS_LIST && vxml->xml_kids.length == 1) {
+ vxml = XMLARRAY_MEMBER(&vxml->xml_kids, 0, JSXML);
+ if (vxml)
+ goto retry;
+ }
+ *bp = JS_FALSE;
+ return JS_TRUE;
+ }
+
+ qn = xml->name;
+ vqn = vxml->name;
+ if (qn) {
+ *bp = vqn &&
+ js_EqualStrings(qn->localName, vqn->localName) &&
+ js_EqualStrings(qn->uri, vqn->uri);
+ } else {
+ *bp = vqn == NULL;
+ }
+ if (!*bp)
+ return JS_TRUE;
+
+ if (JSXML_HAS_VALUE(xml)) {
+ *bp = js_EqualStrings(xml->xml_value, vxml->xml_value);
+ } else if (xml->xml_kids.length != vxml->xml_kids.length) {
+ *bp = JS_FALSE;
+ } else {
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ XMLArrayCursorInit(&vcursor, &vxml->xml_kids);
+ for (;;) {
+ kid = (JSXML *) XMLArrayCursorNext(&cursor);
+ vkid = (JSXML *) XMLArrayCursorNext(&vcursor);
+ if (!kid || !vkid) {
+ *bp = !kid && !vkid;
+ ok = JS_TRUE;
+ break;
+ }
+ xobj = js_GetXMLObject(cx, kid);
+ vobj = js_GetXMLObject(cx, vkid);
+ ok = xobj && vobj &&
+ xml_equality(cx, xobj, OBJECT_TO_JSVAL(vobj), bp);
+ if (!ok || !*bp)
+ break;
+ }
+ XMLArrayCursorFinish(&vcursor);
+ XMLArrayCursorFinish(&cursor);
+ if (!ok)
+ return JS_FALSE;
+
+ if (*bp && xml->xml_class == JSXML_CLASS_ELEMENT) {
+ n = xml->xml_attrs.length;
+ if (n != vxml->xml_attrs.length)
+ *bp = JS_FALSE;
+ for (i = 0; *bp && i < n; i++) {
+ attr = XMLARRAY_MEMBER(&xml->xml_attrs, i, JSXML);
+ if (!attr)
+ continue;
+ j = XMLARRAY_FIND_MEMBER(&vxml->xml_attrs, attr, attr_identity);
+ if (j == XML_NOT_FOUND) {
+ *bp = JS_FALSE;
+ break;
+ }
+ vattr = XMLARRAY_MEMBER(&vxml->xml_attrs, j, JSXML);
+ if (!vattr)
+ continue;
+ *bp = js_EqualStrings(attr->xml_value, vattr->xml_value);
+ }
+ }
+ }
+
+ return JS_TRUE;
+}
+
+/* ECMA-357 9.1.1.9 XML [[Equals]] and 9.2.1.9 XMLList [[Equals]]. */
+static JSBool
+Equals(JSContext *cx, JSXML *xml, jsval v, JSBool *bp)
+{
+ JSObject *vobj;
+ JSXML *vxml;
+
+ if (JSVAL_IS_PRIMITIVE(v)) {
+ *bp = JS_FALSE;
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ if (xml->xml_kids.length == 1) {
+ vxml = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (!vxml)
+ return JS_TRUE;
+ vobj = js_GetXMLObject(cx, vxml);
+ if (!vobj)
+ return JS_FALSE;
+ return js_XMLObjectOps.equality(cx, vobj, v, bp);
+ }
+ if (JSVAL_IS_VOID(v) && xml->xml_kids.length == 0)
+ *bp = JS_TRUE;
+ }
+ } else {
+ vobj = JSVAL_TO_OBJECT(v);
+ if (!OBJECT_IS_XML(cx, vobj)) {
+ *bp = JS_FALSE;
+ } else {
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ if (!XMLEquals(cx, xml, vxml, bp))
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+CheckCycle(JSContext *cx, JSXML *xml, JSXML *kid)
+{
+ JS_ASSERT(kid->xml_class != JSXML_CLASS_LIST);
+
+ do {
+ if (xml == kid) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CYCLIC_VALUE, js_XML_str);
+ return JS_FALSE;
+ }
+ } while ((xml = xml->parent) != NULL);
+
+ return JS_TRUE;
+}
+
+/* ECMA-357 9.1.1.11 XML [[Insert]]. */
+static JSBool
+Insert(JSContext *cx, JSXML *xml, uint32 i, jsval v)
+{
+ uint32 j, n;
+ JSXML *vxml, *kid;
+ JSObject *vobj;
+ JSString *str;
+
+ if (!JSXML_HAS_KIDS(xml))
+ return JS_TRUE;
+
+ n = 1;
+ vxml = NULL;
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ vobj = JSVAL_TO_OBJECT(v);
+ if (OBJECT_IS_XML(cx, vobj)) {
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ if (vxml->xml_class == JSXML_CLASS_LIST) {
+ n = vxml->xml_kids.length;
+ if (n == 0)
+ return JS_TRUE;
+ for (j = 0; j < n; j++) {
+ kid = XMLARRAY_MEMBER(&vxml->xml_kids, j, JSXML);
+ if (!kid)
+ continue;
+ if (!CheckCycle(cx, xml, kid))
+ return JS_FALSE;
+ }
+ } else if (vxml->xml_class == JSXML_CLASS_ELEMENT) {
+ /* OPTION: enforce that descendants have superset namespaces. */
+ if (!CheckCycle(cx, xml, vxml))
+ return JS_FALSE;
+ }
+ }
+ }
+ if (!vxml) {
+ str = js_ValueToString(cx, v);
+ if (!str)
+ return JS_FALSE;
+
+ vxml = js_NewXML(cx, JSXML_CLASS_TEXT);
+ if (!vxml)
+ return JS_FALSE;
+ vxml->xml_value = str;
+ }
+
+ if (i > xml->xml_kids.length)
+ i = xml->xml_kids.length;
+
+ if (!XMLArrayInsert(cx, &xml->xml_kids, i, n))
+ return JS_FALSE;
+
+ if (vxml->xml_class == JSXML_CLASS_LIST) {
+ for (j = 0; j < n; j++) {
+ kid = XMLARRAY_MEMBER(&vxml->xml_kids, j, JSXML);
+ if (!kid)
+ continue;
+ kid->parent = xml;
+ XMLARRAY_SET_MEMBER(&xml->xml_kids, i + j, kid);
+
+ /* OPTION: enforce that descendants have superset namespaces. */
+ }
+ } else {
+ vxml->parent = xml;
+ XMLARRAY_SET_MEMBER(&xml->xml_kids, i, vxml);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+IndexToIdVal(JSContext *cx, uint32 index, jsval *idvp)
+{
+ JSString *str;
+
+ if (index <= JSVAL_INT_MAX) {
+ *idvp = INT_TO_JSVAL(index);
+ } else {
+ str = js_NumberToString(cx, (jsdouble) index);
+ if (!str)
+ return JS_FALSE;
+ *idvp = STRING_TO_JSVAL(str);
+ }
+ return JS_TRUE;
+}
+
+/* ECMA-357 9.1.1.12 XML [[Replace]]. */
+static JSBool
+Replace(JSContext *cx, JSXML *xml, jsval id, jsval v)
+{
+ uint32 i, n;
+ JSXML *vxml, *kid;
+ JSObject *vobj;
+ jsval junk;
+ JSString *str;
+
+ if (!JSXML_HAS_KIDS(xml))
+ return JS_TRUE;
+
+ if (!js_IdIsIndex(id, &i)) {
+ ReportBadXMLName(cx, id);
+ return JS_FALSE;
+ }
+
+ /*
+ * 9.1.1.12
+ * [[Replace]] handles _i >= x.[[Length]]_ by incrementing _x.[[Length]_.
+ * It should therefore constrain callers to pass in _i <= x.[[Length]]_.
+ */
+ n = xml->xml_kids.length;
+ if (i >= n) {
+ if (!IndexToIdVal(cx, n, &id))
+ return JS_FALSE;
+ i = n;
+ }
+
+ vxml = NULL;
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ vobj = JSVAL_TO_OBJECT(v);
+ if (OBJECT_IS_XML(cx, vobj))
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ }
+
+ switch (vxml ? vxml->xml_class : JSXML_CLASS_LIMIT) {
+ case JSXML_CLASS_ELEMENT:
+ /* OPTION: enforce that descendants have superset namespaces. */
+ if (!CheckCycle(cx, xml, vxml))
+ return JS_FALSE;
+ case JSXML_CLASS_COMMENT:
+ case JSXML_CLASS_PROCESSING_INSTRUCTION:
+ case JSXML_CLASS_TEXT:
+ goto do_replace;
+
+ case JSXML_CLASS_LIST:
+ if (i < n && !DeleteByIndex(cx, xml, id, &junk))
+ return JS_FALSE;
+ if (!Insert(cx, xml, i, v))
+ return JS_FALSE;
+ break;
+
+ default:
+ str = js_ValueToString(cx, v);
+ if (!str)
+ return JS_FALSE;
+
+ vxml = js_NewXML(cx, JSXML_CLASS_TEXT);
+ if (!vxml)
+ return JS_FALSE;
+ vxml->xml_value = str;
+
+ do_replace:
+ vxml->parent = xml;
+ if (i < n) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid)
+ kid->parent = NULL;
+ }
+ if (!XMLARRAY_ADD_MEMBER(cx, &xml->xml_kids, i, vxml))
+ return JS_FALSE;
+ break;
+ }
+
+ return JS_TRUE;
+}
+
+/* Forward declared -- its implementation uses other statics that call it. */
+static JSBool
+ResolveValue(JSContext *cx, JSXML *list, JSXML **result);
+
+/* ECMA-357 9.1.1.3 XML [[Delete]], 9.2.1.3 XML [[Delete]]. */
+static JSBool
+DeleteProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSXML *xml, *kid, *parent;
+ JSBool isIndex;
+ JSXMLArray *array;
+ uint32 length, index, kidIndex, deleteCount;
+ JSXMLQName *nameqn;
+ jsid funid;
+ JSObject *nameobj, *kidobj;
+ JSXMLNameMatcher matcher;
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ isIndex = js_IdIsIndex(id, &index);
+ if (JSXML_HAS_KIDS(xml)) {
+ array = &xml->xml_kids;
+ length = array->length;
+ } else {
+ array = NULL;
+ length = 0;
+ }
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ /* ECMA-357 9.2.1.3. */
+ if (isIndex && index < length) {
+ kid = XMLARRAY_MEMBER(array, index, JSXML);
+ if (!kid)
+ goto out;
+ parent = kid->parent;
+ if (parent) {
+ JS_ASSERT(parent != xml);
+ JS_ASSERT(JSXML_HAS_KIDS(parent));
+
+ if (kid->xml_class == JSXML_CLASS_ATTRIBUTE) {
+ nameqn = kid->name;
+ nameobj = js_GetAttributeNameObject(cx, nameqn);
+ if (!nameobj || !js_GetXMLObject(cx, parent))
+ return JS_FALSE;
+
+ id = OBJECT_TO_JSVAL(nameobj);
+ if (!DeleteProperty(cx, parent->object, id, vp))
+ return JS_FALSE;
+ } else {
+ kidIndex = XMLARRAY_FIND_MEMBER(&parent->xml_kids, kid,
+ NULL);
+ JS_ASSERT(kidIndex != XML_NOT_FOUND);
+ if (!IndexToIdVal(cx, kidIndex, &id))
+ return JS_FALSE;
+ if (!DeleteByIndex(cx, parent, id, vp))
+ return JS_FALSE;
+ }
+ }
+
+ XMLArrayDelete(cx, array, index, JS_TRUE);
+ } else {
+ for (index = 0; index < length; index++) {
+ kid = XMLARRAY_MEMBER(array, index, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj || !DeleteProperty(cx, kidobj, id, vp))
+ return JS_FALSE;
+ }
+ }
+ }
+ } else {
+ /* ECMA-357 9.1.1.3. */
+ if (isIndex) {
+ /* See NOTE in spec: this variation is reserved for future use. */
+ ReportBadXMLName(cx, id);
+ return JS_FALSE;
+ }
+
+ nameqn = ToXMLName(cx, id, &funid);
+ if (!nameqn)
+ return JS_FALSE;
+ if (funid)
+ goto out;
+ nameobj = nameqn->object;
+
+ if (OBJ_GET_CLASS(cx, nameobj) == &js_AttributeNameClass) {
+ if (xml->xml_class != JSXML_CLASS_ELEMENT)
+ goto out;
+ array = &xml->xml_attrs;
+ length = array->length;
+ matcher = MatchAttrName;
+ } else {
+ matcher = MatchElemName;
+ }
+ if (length != 0) {
+ deleteCount = 0;
+ for (index = 0; index < length; index++) {
+ kid = XMLARRAY_MEMBER(array, index, JSXML);
+ if (kid && matcher(nameqn, kid)) {
+ kid->parent = NULL;
+ XMLArrayDelete(cx, array, index, JS_FALSE);
+ ++deleteCount;
+ } else if (deleteCount != 0) {
+ XMLARRAY_SET_MEMBER(array,
+ index - deleteCount,
+ array->vector[index]);
+ }
+ }
+ array->length -= deleteCount;
+ }
+ }
+
+out:
+ *vp = JSVAL_TRUE;
+ return JS_TRUE;
+}
+
+static JSBool
+SyncInScopeNamespaces(JSContext *cx, JSXML *xml)
+{
+ JSXMLArray *nsarray;
+ uint32 i, n;
+ JSXMLNamespace *ns;
+
+ nsarray = &xml->xml_namespaces;
+ while ((xml = xml->parent) != NULL) {
+ for (i = 0, n = xml->xml_namespaces.length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(&xml->xml_namespaces, i, JSXMLNamespace);
+ if (ns && !XMLARRAY_HAS_MEMBER(nsarray, ns, namespace_identity)) {
+ if (!XMLARRAY_APPEND(cx, nsarray, ns))
+ return JS_FALSE;
+ }
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+GetNamedProperty(JSContext *cx, JSXML *xml, JSXMLQName* nameqn,
+ JSBool attributes, JSXML *list)
+{
+ JSXMLArray *array;
+ JSXMLNameMatcher matcher;
+ JSXMLArrayCursor cursor;
+ JSXML *kid;
+ JSBool ok;
+
+ if (!JSXML_HAS_KIDS(xml))
+ return JS_TRUE;
+
+ if (attributes) {
+ array = &xml->xml_attrs;
+ matcher = MatchAttrName;
+ } else {
+ array = &xml->xml_kids;
+ matcher = MatchElemName;
+ }
+
+ XMLArrayCursorInit(&cursor, array);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (matcher(nameqn, kid)) {
+ if (!attributes && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ ok = SyncInScopeNamespaces(cx, kid);
+ if (!ok)
+ goto out;
+ }
+ ok = Append(cx, list, kid);
+ if (!ok)
+ goto out;
+ }
+ }
+ ok = JS_TRUE;
+
+ out:
+ XMLArrayCursorFinish(&cursor);
+ return ok;
+}
+
+/* ECMA-357 9.1.1.1 XML [[Get]] and 9.2.1.1 XMLList [[Get]]. */
+static JSBool
+GetProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSXML *xml, *list, *kid;
+ uint32 index;
+ JSObject *kidobj, *listobj;
+ JSXMLQName *nameqn;
+ jsid funid;
+ jsval roots[2];
+ JSTempValueRooter tvr;
+ JSBool attributes;
+ JSXMLArrayCursor cursor;
+
+ xml = (JSXML *) JS_GetInstancePrivate(cx, obj, &js_XMLClass, NULL);
+ if (!xml)
+ return JS_TRUE;
+
+ if (js_IdIsIndex(id, &index)) {
+ if (xml->xml_class != JSXML_CLASS_LIST) {
+ *vp = (index == 0) ? OBJECT_TO_JSVAL(obj) : JSVAL_VOID;
+ } else {
+ /*
+ * ECMA-357 9.2.1.1 starts here.
+ *
+ * Erratum: 9.2 is not completely clear that indexed properties
+ * correspond to kids, but that's what it seems to say, and it's
+ * what any sane user would want.
+ */
+ if (index < xml->xml_kids.length) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, index, JSXML);
+ if (!kid) {
+ *vp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ return JS_FALSE;
+
+ *vp = OBJECT_TO_JSVAL(kidobj);
+ } else {
+ *vp = JSVAL_VOID;
+ }
+ }
+ return JS_TRUE;
+ }
+
+ /*
+ * ECMA-357 9.2.1.1/9.1.1.1 qname case.
+ */
+ nameqn = ToXMLName(cx, id, &funid);
+ if (!nameqn)
+ return JS_FALSE;
+ if (funid)
+ return js_GetXMLFunction(cx, obj, funid, vp);
+
+ roots[0] = OBJECT_TO_JSVAL(nameqn->object);
+ JS_PUSH_TEMP_ROOT(cx, 1, roots, &tvr);
+
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (listobj) {
+ roots[1] = OBJECT_TO_JSVAL(listobj);
+ tvr.count++;
+
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ attributes = (OBJ_GET_CLASS(cx, nameqn->object) ==
+ &js_AttributeNameClass);
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (kid->xml_class == JSXML_CLASS_ELEMENT &&
+ !GetNamedProperty(cx, kid, nameqn, attributes, list)) {
+ listobj = NULL;
+ break;
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ } else {
+ if (!GetNamedProperty(cx, xml, nameqn, attributes, list))
+ listobj = NULL;
+ }
+
+ /*
+ * Erratum: ECMA-357 9.1.1.1 misses that [[Append]] sets the given
+ * list's [[TargetProperty]] to the property that is being appended.
+ * This means that any use of the internal [[Get]] property returns
+ * a list which, when used by e.g. [[Insert]] duplicates the last
+ * element matched by id.
+ * See bug 336921.
+ */
+ list->xml_target = xml;
+ list->xml_targetprop = nameqn;
+ *vp = OBJECT_TO_JSVAL(listobj);
+ }
+
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return listobj != NULL;
+}
+
+static JSXML *
+CopyOnWrite(JSContext *cx, JSXML *xml, JSObject *obj)
+{
+ JS_ASSERT(xml->object != obj);
+
+ xml = DeepCopy(cx, xml, obj, 0);
+ if (!xml)
+ return NULL;
+
+ JS_ASSERT(xml->object == obj);
+ return xml;
+}
+
+#define CHECK_COPY_ON_WRITE(cx,xml,obj) \
+ (xml->object == obj ? xml : CopyOnWrite(cx, xml, obj))
+
+static JSString *
+KidToString(JSContext *cx, JSXML *xml, uint32 index)
+{
+ JSXML *kid;
+ JSObject *kidobj;
+
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, index, JSXML);
+ if (!kid)
+ return cx->runtime->emptyString;
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ return NULL;
+ return js_ValueToString(cx, OBJECT_TO_JSVAL(kidobj));
+}
+
+/* ECMA-357 9.1.1.2 XML [[Put]] and 9.2.1.2 XMLList [[Put]]. */
+static JSBool
+PutProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSBool ok, primitiveAssign;
+ enum { OBJ_ROOT, ID_ROOT, VAL_ROOT };
+ jsval roots[3];
+ JSTempValueRooter tvr;
+ JSXML *xml, *vxml, *rxml, *kid, *attr, *parent, *copy, *kid2, *match;
+ JSObject *vobj, *nameobj, *attrobj, *parentobj, *kidobj, *copyobj;
+ JSXMLQName *targetprop, *nameqn, *attrqn;
+ uint32 index, i, j, k, n, q;
+ jsval attrval, nsval, junk;
+ jsid funid;
+ JSString *left, *right, *space;
+ JSXMLNamespace *ns;
+
+ xml = (JSXML *) JS_GetInstancePrivate(cx, obj, &js_XMLClass, NULL);
+ if (!xml)
+ return JS_TRUE;
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+
+ /* Precompute vxml for 9.2.1.2 2(c)(vii)(2-3) and 2(d) and 9.1.1.2 1. */
+ vxml = NULL;
+ if (!JSVAL_IS_PRIMITIVE(*vp)) {
+ vobj = JSVAL_TO_OBJECT(*vp);
+ if (OBJECT_IS_XML(cx, vobj))
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ }
+
+ /* Control flow after here must exit via label out. */
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ return JS_FALSE;
+ roots[OBJ_ROOT] = OBJECT_TO_JSVAL(obj);
+ roots[ID_ROOT] = id;
+ roots[VAL_ROOT] = *vp;
+ JS_PUSH_TEMP_ROOT(cx, 3, roots, &tvr);
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ /* ECMA-357 9.2.1.2. */
+ if (js_IdIsIndex(id, &index)) {
+ /* Step 1 sets i to the property index. */
+ i = index;
+
+ /* 2(a-b). */
+ if (xml->xml_target) {
+ ok = ResolveValue(cx, xml->xml_target, &rxml);
+ if (!ok)
+ goto out;
+ if (!rxml)
+ goto out;
+ JS_ASSERT(rxml->object);
+ } else {
+ rxml = NULL;
+ }
+
+ /* 2(c). */
+ if (index >= xml->xml_kids.length) {
+ /* 2(c)(i). */
+ if (rxml) {
+ if (rxml->xml_class == JSXML_CLASS_LIST) {
+ if (rxml->xml_kids.length != 1)
+ goto out;
+ rxml = XMLARRAY_MEMBER(&rxml->xml_kids, 0, JSXML);
+ if (!rxml)
+ goto out;
+ ok = js_GetXMLObject(cx, rxml) != NULL;
+ if (!ok)
+ goto out;
+ }
+
+ /*
+ * Erratum: ECMA-357 9.2.1.2 step 2(c)(ii) sets
+ * _y.[[Parent]] = r_ where _r_ is the result of
+ * [[ResolveValue]] called on _x.[[TargetObject]] in
+ * 2(a)(i). This can result in text parenting text:
+ *
+ * var MYXML = new XML();
+ * MYXML.appendChild(new XML("<TEAM>Giants</TEAM>"));
+ *
+ * (testcase from Werner Sharp <wsharp@macromedia.com>).
+ *
+ * To match insertChildAfter, insertChildBefore,
+ * prependChild, and setChildren, we should silently
+ * do nothing in this case.
+ */
+ if (!JSXML_HAS_KIDS(rxml))
+ goto out;
+ }
+
+ /* 2(c)(ii) is distributed below as several js_NewXML calls. */
+ targetprop = xml->xml_targetprop;
+ if (!targetprop || IS_STAR(targetprop->localName)) {
+ /* 2(c)(iv)(1-2), out of order w.r.t. 2(c)(iii). */
+ kid = js_NewXML(cx, JSXML_CLASS_TEXT);
+ if (!kid)
+ goto bad;
+ } else {
+ nameobj = js_GetXMLQNameObject(cx, targetprop);
+ if (!nameobj)
+ goto bad;
+ if (OBJ_GET_CLASS(cx, nameobj) == &js_AttributeNameClass) {
+ /*
+ * 2(c)(iii)(1-3).
+ * Note that rxml can't be null here, because target
+ * and targetprop are non-null.
+ */
+ ok = GetProperty(cx, rxml->object, id, &attrval);
+ if (!ok)
+ goto out;
+ if (JSVAL_IS_PRIMITIVE(attrval)) /* no such attribute */
+ goto out;
+ attrobj = JSVAL_TO_OBJECT(attrval);
+ attr = (JSXML *) JS_GetPrivate(cx, attrobj);
+ if (JSXML_LENGTH(attr) != 0)
+ goto out;
+
+ kid = js_NewXML(cx, JSXML_CLASS_ATTRIBUTE);
+ } else {
+ /* 2(c)(v). */
+ kid = js_NewXML(cx, JSXML_CLASS_ELEMENT);
+ }
+ if (!kid)
+ goto bad;
+
+ /* An important bit of 2(c)(ii). */
+ kid->name = targetprop;
+ }
+
+ /* Final important bit of 2(c)(ii). */
+ kid->parent = rxml;
+
+ /* 2(c)(vi-vii). */
+ i = xml->xml_kids.length;
+ if (kid->xml_class != JSXML_CLASS_ATTRIBUTE) {
+ /*
+ * 2(c)(vii)(1) tests whether _y.[[Parent]]_ is not null.
+ * y.[[Parent]] is here called kid->parent, which we know
+ * from 2(c)(ii) is _r_, here called rxml. So let's just
+ * test that! Erratum, the spec should be simpler here.
+ */
+ if (rxml) {
+ JS_ASSERT(JSXML_HAS_KIDS(rxml));
+ n = rxml->xml_kids.length;
+ j = n - 1;
+ if (n != 0 && i != 0) {
+ for (n = j, j = 0; j < n; j++) {
+ if (rxml->xml_kids.vector[j] ==
+ xml->xml_kids.vector[i-1]) {
+ break;
+ }
+ }
+ }
+
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ goto bad;
+ ok = Insert(cx, rxml, j + 1, OBJECT_TO_JSVAL(kidobj));
+ if (!ok)
+ goto out;
+ }
+
+ /*
+ * 2(c)(vii)(2-3).
+ * Erratum: [[PropertyName]] in 2(c)(vii)(3) must be a
+ * typo for [[TargetProperty]].
+ */
+ if (vxml) {
+ kid->name = (vxml->xml_class == JSXML_CLASS_LIST)
+ ? vxml->xml_targetprop
+ : vxml->name;
+ }
+ }
+
+ /* 2(c)(viii). */
+ ok = Append(cx, xml, kid);
+ if (!ok)
+ goto out;
+ }
+
+ /* 2(d). */
+ if (!vxml ||
+ vxml->xml_class == JSXML_CLASS_TEXT ||
+ vxml->xml_class == JSXML_CLASS_ATTRIBUTE) {
+ ok = JS_ConvertValue(cx, *vp, JSTYPE_STRING, vp);
+ if (!ok)
+ goto out;
+ roots[VAL_ROOT] = *vp;
+ }
+
+ /* 2(e). */
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (!kid)
+ goto out;
+ parent = kid->parent;
+ if (kid->xml_class == JSXML_CLASS_ATTRIBUTE) {
+ nameobj = js_GetAttributeNameObject(cx, kid->name);
+ if (!nameobj)
+ goto bad;
+ id = OBJECT_TO_JSVAL(nameobj);
+
+ if (parent) {
+ /* 2(e)(i). */
+ parentobj = js_GetXMLObject(cx, parent);
+ if (!parentobj)
+ goto bad;
+ ok = PutProperty(cx, parentobj, id, vp);
+ if (!ok)
+ goto out;
+
+ /* 2(e)(ii). */
+ ok = GetProperty(cx, parentobj, id, vp);
+ if (!ok)
+ goto out;
+ attr = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(*vp));
+
+ /* 2(e)(iii). */
+ xml->xml_kids.vector[i] = attr->xml_kids.vector[0];
+ }
+ }
+
+ /* 2(f). */
+ else if (vxml && vxml->xml_class == JSXML_CLASS_LIST) {
+ /* 2(f)(i) Create a shallow copy _c_ of _V_. */
+ copyobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!copyobj)
+ goto bad;
+ copy = (JSXML *) JS_GetPrivate(cx, copyobj);
+ n = vxml->xml_kids.length;
+ ok = XMLArraySetCapacity(cx, &copy->xml_kids, n);
+ if (!ok)
+ goto out;
+ for (k = 0; k < n; k++) {
+ kid2 = XMLARRAY_MEMBER(&vxml->xml_kids, k, JSXML);
+ XMLARRAY_SET_MEMBER(&copy->xml_kids, k, kid2);
+ }
+
+ JS_ASSERT(parent != xml);
+ if (parent) {
+ q = XMLARRAY_FIND_MEMBER(&parent->xml_kids, kid, NULL);
+ JS_ASSERT(q != XML_NOT_FOUND);
+
+ ok = IndexToIdVal(cx, q, &id);
+ if (!ok)
+ goto out;
+ ok = Replace(cx, parent, id, OBJECT_TO_JSVAL(copyobj));
+ if (!ok)
+ goto out;
+
+#ifdef DEBUG
+ /* Erratum: this loop in the spec is useless. */
+ for (j = 0, n = copy->xml_kids.length; j < n; j++) {
+ kid2 = XMLARRAY_MEMBER(&parent->xml_kids, q + j, JSXML);
+ JS_ASSERT(XMLARRAY_MEMBER(&copy->xml_kids, j, JSXML)
+ == kid2);
+ }
+#endif
+ }
+
+ /*
+ * 2(f)(iv-vi).
+ * Erratum: notice the unhandled zero-length V basis case and
+ * the off-by-one errors for the n != 0 cases in the spec.
+ */
+ if (n == 0) {
+ XMLArrayDelete(cx, &xml->xml_kids, i, JS_TRUE);
+ } else {
+ ok = XMLArrayInsert(cx, &xml->xml_kids, i + 1, n - 1);
+ if (!ok)
+ goto out;
+
+ for (j = 0; j < n; j++)
+ xml->xml_kids.vector[i + j] = copy->xml_kids.vector[j];
+ }
+ }
+
+ /* 2(g). */
+ else if (vxml || JSXML_HAS_VALUE(kid)) {
+ if (parent) {
+ q = XMLARRAY_FIND_MEMBER(&parent->xml_kids, kid, NULL);
+ JS_ASSERT(q != XML_NOT_FOUND);
+
+ ok = IndexToIdVal(cx, q, &id);
+ if (!ok)
+ goto out;
+ ok = Replace(cx, parent, id, *vp);
+ if (!ok)
+ goto out;
+
+ vxml = XMLARRAY_MEMBER(&parent->xml_kids, q, JSXML);
+ if (!vxml)
+ goto out;
+ roots[VAL_ROOT] = *vp = OBJECT_TO_JSVAL(vxml->object);
+ }
+
+ /*
+ * 2(g)(iii).
+ * Erratum: _V_ may not be of type XML, but all index-named
+ * properties _x[i]_ in an XMLList _x_ must be of type XML,
+ * according to 9.2.1.1 Overview and other places in the spec.
+ *
+ * Thanks to 2(d), we know _V_ (*vp here) is either a string
+ * or an XML/XMLList object. If *vp is a string, call ToXML
+ * on it to satisfy the constraint.
+ */
+ if (!vxml) {
+ JS_ASSERT(JSVAL_IS_STRING(*vp));
+ vobj = ToXML(cx, *vp);
+ if (!vobj)
+ goto bad;
+ roots[VAL_ROOT] = *vp = OBJECT_TO_JSVAL(vobj);
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ }
+ XMLARRAY_SET_MEMBER(&xml->xml_kids, i, vxml);
+ }
+
+ /* 2(h). */
+ else {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ goto bad;
+ id = ATOM_KEY(cx->runtime->atomState.starAtom);
+ ok = PutProperty(cx, kidobj, id, vp);
+ if (!ok)
+ goto out;
+ }
+ } else {
+ /*
+ * 3.
+ * Erratum: if x.[[Length]] > 1 or [[ResolveValue]] returns null
+ * or an r with r.[[Length]] != 1, throw TypeError.
+ */
+ n = JSXML_LENGTH(xml);
+ if (n > 1)
+ goto type_error;
+ if (n == 0) {
+ ok = ResolveValue(cx, xml, &rxml);
+ if (!ok)
+ goto out;
+ if (!rxml || JSXML_LENGTH(rxml) != 1)
+ goto type_error;
+ ok = Append(cx, xml, rxml);
+ if (!ok)
+ goto out;
+ }
+ JS_ASSERT(JSXML_LENGTH(xml) == 1);
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (!kid)
+ goto out;
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ goto bad;
+ ok = PutProperty(cx, kidobj, id, vp);
+ if (!ok)
+ goto out;
+ }
+ } else {
+ /*
+ * ECMA-357 9.1.1.2.
+ * Erratum: move steps 3 and 4 to before 1 and 2, to avoid wasted
+ * effort in ToString or [[DeepCopy]].
+ */
+ if (js_IdIsIndex(id, &index)) {
+ /* See NOTE in spec: this variation is reserved for future use. */
+ ReportBadXMLName(cx, id);
+ goto bad;
+ }
+
+ nameqn = ToXMLName(cx, id, &funid);
+ if (!nameqn)
+ goto bad;
+ if (funid) {
+ ok = js_SetProperty(cx, obj, funid, vp);
+ goto out;
+ }
+ nameobj = nameqn->object;
+
+ if (JSXML_HAS_VALUE(xml))
+ goto out;
+
+ if (!vxml ||
+ vxml->xml_class == JSXML_CLASS_TEXT ||
+ vxml->xml_class == JSXML_CLASS_ATTRIBUTE) {
+ ok = JS_ConvertValue(cx, *vp, JSTYPE_STRING, vp);
+ if (!ok)
+ goto out;
+ } else {
+ rxml = DeepCopyInLRS(cx, vxml, 0);
+ if (!rxml || !js_GetXMLObject(cx, rxml))
+ goto bad;
+ vxml = rxml;
+ *vp = OBJECT_TO_JSVAL(vxml->object);
+ }
+ roots[VAL_ROOT] = *vp;
+
+ /*
+ * 6.
+ * Erratum: why is this done here, so early? use is way later....
+ */
+ ok = js_GetDefaultXMLNamespace(cx, &nsval);
+ if (!ok)
+ goto out;
+
+ if (OBJ_GET_CLASS(cx, nameobj) == &js_AttributeNameClass) {
+ /* 7(a). */
+ if (!js_IsXMLName(cx, OBJECT_TO_JSVAL(nameobj)))
+ goto out;
+
+ /* 7(b-c). */
+ if (vxml && vxml->xml_class == JSXML_CLASS_LIST) {
+ n = vxml->xml_kids.length;
+ if (n == 0) {
+ *vp = STRING_TO_JSVAL(cx->runtime->emptyString);
+ } else {
+ left = KidToString(cx, vxml, 0);
+ if (!left)
+ goto bad;
+
+ space = ATOM_TO_STRING(cx->runtime->atomState.spaceAtom);
+ for (i = 1; i < n; i++) {
+ left = js_ConcatStrings(cx, left, space);
+ if (!left)
+ goto bad;
+ right = KidToString(cx, vxml, i);
+ if (!right)
+ goto bad;
+ left = js_ConcatStrings(cx, left, right);
+ if (!left)
+ goto bad;
+ }
+
+ roots[VAL_ROOT] = *vp = STRING_TO_JSVAL(left);
+ }
+ } else {
+ ok = JS_ConvertValue(cx, *vp, JSTYPE_STRING, vp);
+ if (!ok)
+ goto out;
+ roots[VAL_ROOT] = *vp;
+ }
+
+ /* 7(d-e). */
+ match = NULL;
+ for (i = 0, n = xml->xml_attrs.length; i < n; i++) {
+ attr = XMLARRAY_MEMBER(&xml->xml_attrs, i, JSXML);
+ if (!attr)
+ continue;
+ attrqn = attr->name;
+ if (js_EqualStrings(attrqn->localName, nameqn->localName) &&
+ (!nameqn->uri ||
+ js_EqualStrings(attrqn->uri, nameqn->uri))) {
+ if (!match) {
+ match = attr;
+ } else {
+ nameobj = js_GetAttributeNameObject(cx, attrqn);
+ if (!nameobj)
+ goto bad;
+
+ id = OBJECT_TO_JSVAL(nameobj);
+ ok = DeleteProperty(cx, obj, id, &junk);
+ if (!ok)
+ goto out;
+ --i;
+ }
+ }
+ }
+
+ /* 7(f). */
+ attr = match;
+ if (!attr) {
+ /* 7(f)(i-ii). */
+ if (!nameqn->uri) {
+ left = right = cx->runtime->emptyString;
+ } else {
+ left = nameqn->uri;
+ right = nameqn->prefix;
+ }
+ nameqn = js_NewXMLQName(cx, left, right, nameqn->localName);
+ if (!nameqn)
+ goto bad;
+
+ /* 7(f)(iii). */
+ attr = js_NewXML(cx, JSXML_CLASS_ATTRIBUTE);
+ if (!attr)
+ goto bad;
+ attr->parent = xml;
+ attr->name = nameqn;
+
+ /* 7(f)(iv). */
+ ok = XMLARRAY_ADD_MEMBER(cx, &xml->xml_attrs, n, attr);
+ if (!ok)
+ goto out;
+
+ /* 7(f)(v-vi). */
+ ns = GetNamespace(cx, nameqn, NULL);
+ if (!ns)
+ goto bad;
+ ok = AddInScopeNamespace(cx, xml, ns);
+ if (!ok)
+ goto out;
+ }
+
+ /* 7(g). */
+ attr->xml_value = JSVAL_TO_STRING(*vp);
+ goto out;
+ }
+
+ /* 8-9. */
+ if (!js_IsXMLName(cx, OBJECT_TO_JSVAL(nameobj)) &&
+ !IS_STAR(nameqn->localName)) {
+ goto out;
+ }
+
+ /* 10-11. */
+ id = JSVAL_VOID;
+ primitiveAssign = !vxml && !IS_STAR(nameqn->localName);
+
+ /* 12. */
+ k = n = xml->xml_kids.length;
+ kid2 = NULL;
+ while (k != 0) {
+ --k;
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, k, JSXML);
+ if (kid && MatchElemName(nameqn, kid)) {
+ if (!JSVAL_IS_VOID(id)) {
+ ok = DeleteByIndex(cx, xml, id, &junk);
+ if (!ok)
+ goto out;
+ }
+ ok = IndexToIdVal(cx, k, &id);
+ if (!ok)
+ goto out;
+ kid2 = kid;
+ }
+ }
+
+ /*
+ * Erratum: ECMA-357 specified child insertion inconsistently:
+ * insertChildBefore and insertChildAfter insert an arbitrary XML
+ * instance, and therefore can create cycles, but appendChild as
+ * specified by the "Overview" of 13.4.4.3 calls [[DeepCopy]] on
+ * its argument. But the "Semantics" in 13.4.4.3 do not include
+ * any [[DeepCopy]] call.
+ *
+ * Fixing this (https://bugzilla.mozilla.org/show_bug.cgi?id=312692)
+ * required adding cycle detection, and allowing duplicate kids to
+ * be created (see comment 6 in the bug). Allowing duplicate kid
+ * references means the loop above will delete all but the lowest
+ * indexed reference, and each [[DeleteByIndex]] nulls the kid's
+ * parent. Thus the need to restore parent here. This is covered
+ * by https://bugzilla.mozilla.org/show_bug.cgi?id=327564.
+ */
+ if (kid2) {
+ JS_ASSERT(kid2->parent == xml || !kid2->parent);
+ if (!kid2->parent)
+ kid2->parent = xml;
+ }
+
+ /* 13. */
+ if (JSVAL_IS_VOID(id)) {
+ /* 13(a). */
+ ok = IndexToIdVal(cx, n, &id);
+ if (!ok)
+ goto out;
+
+ /* 13(b). */
+ if (primitiveAssign) {
+ if (!nameqn->uri) {
+ ns = (JSXMLNamespace *)
+ JS_GetPrivate(cx, JSVAL_TO_OBJECT(nsval));
+ left = ns->uri;
+ right = ns->prefix;
+ } else {
+ left = nameqn->uri;
+ right = nameqn->prefix;
+ }
+ nameqn = js_NewXMLQName(cx, left, right, nameqn->localName);
+ if (!nameqn)
+ goto bad;
+
+ /* 13(b)(iii). */
+ vobj = js_NewXMLObject(cx, JSXML_CLASS_ELEMENT);
+ if (!vobj)
+ goto bad;
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ vxml->parent = xml;
+ vxml->name = nameqn;
+
+ /* 13(b)(iv-vi). */
+ ns = GetNamespace(cx, nameqn, NULL);
+ if (!ns)
+ goto bad;
+ ok = Replace(cx, xml, id, OBJECT_TO_JSVAL(vobj));
+ if (!ok)
+ goto out;
+ ok = AddInScopeNamespace(cx, vxml, ns);
+ if (!ok)
+ goto out;
+ }
+ }
+
+ /* 14. */
+ if (primitiveAssign) {
+ JSXMLArrayCursor cursor;
+
+ js_IdIsIndex(id, &index);
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ cursor.index = index;
+ kid = (JSXML *) XMLArrayCursorItem(&cursor);
+ if (JSXML_HAS_KIDS(kid)) {
+ XMLArrayFinish(cx, &kid->xml_kids);
+ ok = XMLArrayInit(cx, &kid->xml_kids, 1);
+ }
+
+ /* 14(b-c). */
+ /* XXXbe Erratum? redundant w.r.t. 7(b-c) else clause above */
+ if (ok) {
+ ok = JS_ConvertValue(cx, *vp, JSTYPE_STRING, vp);
+ if (ok && !IS_EMPTY(JSVAL_TO_STRING(*vp))) {
+ roots[VAL_ROOT] = *vp;
+ if ((JSXML *) XMLArrayCursorItem(&cursor) == kid)
+ ok = Replace(cx, kid, JSVAL_ZERO, *vp);
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ } else {
+ /* 15(a). */
+ ok = Replace(cx, xml, id, *vp);
+ }
+ }
+
+out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ js_LeaveLocalRootScope(cx);
+ return ok;
+
+type_error:
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_BAD_XMLLIST_PUT,
+ js_ValueToPrintableString(cx, id));
+bad:
+ ok = JS_FALSE;
+ goto out;
+}
+
+/* ECMA-357 9.1.1.10 XML [[ResolveValue]], 9.2.1.10 XMLList [[ResolveValue]]. */
+static JSBool
+ResolveValue(JSContext *cx, JSXML *list, JSXML **result)
+{
+ JSXML *target, *base;
+ JSXMLQName *targetprop;
+ JSObject *targetpropobj;
+ jsval id, tv;
+
+ /* Our caller must be protecting newborn objects. */
+ JS_ASSERT(cx->localRootStack);
+
+ if (list->xml_class != JSXML_CLASS_LIST || list->xml_kids.length != 0) {
+ if (!js_GetXMLObject(cx, list))
+ return JS_FALSE;
+ *result = list;
+ return JS_TRUE;
+ }
+
+ target = list->xml_target;
+ targetprop = list->xml_targetprop;
+ if (!target || !targetprop || IS_STAR(targetprop->localName)) {
+ *result = NULL;
+ return JS_TRUE;
+ }
+
+ targetpropobj = js_GetXMLQNameObject(cx, targetprop);
+ if (!targetpropobj)
+ return JS_FALSE;
+ if (OBJ_GET_CLASS(cx, targetpropobj) == &js_AttributeNameClass) {
+ *result = NULL;
+ return JS_TRUE;
+ }
+
+ if (!ResolveValue(cx, target, &base))
+ return JS_FALSE;
+ if (!base) {
+ *result = NULL;
+ return JS_TRUE;
+ }
+ if (!js_GetXMLObject(cx, base))
+ return JS_FALSE;
+
+ id = OBJECT_TO_JSVAL(targetpropobj);
+ if (!GetProperty(cx, base->object, id, &tv))
+ return JS_FALSE;
+ target = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(tv));
+
+ if (JSXML_LENGTH(target) == 0) {
+ if (base->xml_class == JSXML_CLASS_LIST && JSXML_LENGTH(base) > 1) {
+ *result = NULL;
+ return JS_TRUE;
+ }
+ tv = STRING_TO_JSVAL(cx->runtime->emptyString);
+ if (!PutProperty(cx, base->object, id, &tv))
+ return JS_FALSE;
+ if (!GetProperty(cx, base->object, id, &tv))
+ return JS_FALSE;
+ target = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(tv));
+ }
+
+ *result = target;
+ return JS_TRUE;
+}
+
+/*
+ * HasProperty must be able to return a found JSProperty and the object in
+ * which it was found, if id is of the form function::name. For other ids,
+ * if they index or name an XML child, we return FOUND_XML_PROPERTY in *propp
+ * and null in *objp.
+ *
+ * DROP_PROPERTY helps HasProperty callers drop function properties without
+ * trying to drop the magic FOUND_XML_PROPERTY cookie.
+ */
+#define FOUND_XML_PROPERTY ((JSProperty *) 1)
+#define DROP_PROPERTY(cx,pobj,prop) (((prop) != FOUND_XML_PROPERTY) \
+ ? OBJ_DROP_PROPERTY(cx, pobj, prop) \
+ : (void) 0)
+
+/* ECMA-357 9.1.1.6 XML [[HasProperty]] and 9.2.1.5 XMLList [[HasProperty]]. */
+static JSBool
+HasProperty(JSContext *cx, JSObject *obj, jsval id, JSObject **objp,
+ JSProperty **propp)
+{
+ JSXML *xml, *kid;
+ JSXMLArrayCursor cursor;
+ JSObject *kidobj;
+ JSXMLQName *qn;
+ jsid funid;
+ JSXMLArray *array;
+ JSXMLNameMatcher matcher;
+ uint32 i, n;
+
+ *objp = NULL;
+ *propp = NULL;
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ n = JSXML_LENGTH(xml);
+ if (js_IdIsIndex(id, &i)) {
+ if (i < n)
+ *propp = FOUND_XML_PROPERTY;
+ return JS_TRUE;
+ }
+
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (kid->xml_class == JSXML_CLASS_ELEMENT) {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj || !HasProperty(cx, kidobj, id, objp, propp))
+ break;
+ if (*propp)
+ break;
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (kid)
+ return *propp != NULL;
+ } else {
+ if (xml->xml_class == JSXML_CLASS_ELEMENT && js_IdIsIndex(id, &i)) {
+ if (i == 0)
+ *propp = FOUND_XML_PROPERTY;
+ return JS_TRUE;
+ }
+
+ qn = ToXMLName(cx, id, &funid);
+ if (!qn)
+ return JS_FALSE;
+ if (funid)
+ return js_LookupProperty(cx, obj, funid, objp, propp);
+
+ if (xml->xml_class != JSXML_CLASS_ELEMENT)
+ return JS_TRUE;
+
+ if (OBJ_GET_CLASS(cx, qn->object) == &js_AttributeNameClass) {
+ array = &xml->xml_attrs;
+ matcher = MatchAttrName;
+ } else {
+ array = &xml->xml_kids;
+ matcher = MatchElemName;
+ }
+ for (i = 0, n = array->length; i < n; i++) {
+ kid = XMLARRAY_MEMBER(array, i, JSXML);
+ if (kid && matcher(qn, kid)) {
+ *propp = FOUND_XML_PROPERTY;
+ return JS_TRUE;
+ }
+ }
+ }
+
+ return JS_TRUE;
+}
+
+static void
+xml_finalize(JSContext *cx, JSObject *obj)
+{
+ JSXML *xml;
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (!xml)
+ return;
+ if (xml->object == obj)
+ xml->object = NULL;
+ UNMETER(xml_stats.livexmlobj);
+}
+
+static void
+xml_mark_vector(JSContext *cx, JSXML **vec, uint32 len)
+{
+ uint32 i;
+ JSXML *elt;
+
+ for (i = 0; i < len; i++) {
+ elt = vec[i];
+ {
+#ifdef GC_MARK_DEBUG
+ char buf[120];
+
+ if (elt->xml_class == JSXML_CLASS_LIST) {
+ strcpy(buf, js_XMLList_str);
+ } else if (JSXML_HAS_NAME(elt)) {
+ JSXMLQName *qn = elt->name;
+
+ JS_snprintf(buf, sizeof buf, "%s::%s",
+ qn->uri ? JS_GetStringBytes(qn->uri) : "*",
+ JS_GetStringBytes(qn->localName));
+ } else {
+ JSString *str = elt->xml_value;
+ size_t srclen = JSSTRING_LENGTH(str);
+ size_t dstlen = sizeof buf;
+
+ if (srclen >= sizeof buf / 6)
+ srclen = sizeof buf / 6 - 1;
+ js_DeflateStringToBuffer(cx, JSSTRING_CHARS(str), srclen,
+ buf, &dstlen);
+ }
+#endif
+ GC_MARK(cx, elt, buf);
+ }
+ }
+}
+
+/*
+ * js_XMLObjectOps.newObjectMap == js_NewObjectMap, so XML objects appear to
+ * be native. Therefore, xml_lookupProperty must return a valid JSProperty
+ * pointer parameter via *propp to signify "property found". Since the only
+ * call to xml_lookupProperty is via OBJ_LOOKUP_PROPERTY, and then only from
+ * js_FindXMLProperty (in this file), js_FindProperty (in jsobj.c, called from
+ * jsinterp.c) or from JSOP_IN case in the interpreter, the only time we add a
+ * JSScopeProperty here is when an unqualified name or XML name is being
+ * accessed or when "name in xml" is called.
+ *
+ * This scope property keeps the JSOP_NAME code in js_Interpret happy by
+ * giving it an sprop with (getter, setter) == (GetProperty, PutProperty).
+ *
+ * NB: xml_deleteProperty must take care to remove any property added here.
+ *
+ * FIXME This clashes with the function namespace implementation which also
+ * uses native properties. Effectively after xml_lookupProperty any property
+ * stored previously using assignments to xml.function::name will be removed.
+ * We partially workaround the problem in js_GetXMLFunction. There we take
+ * advantage of the fact that typically function:: is used to access the
+ * functions from XML.prototype. So when js_GetProperty returns a non-function
+ * property, we assume that it represents the result of GetProperty setter
+ * hiding the function and use an extra prototype chain lookup to recover it.
+ * For a proper solution see bug 355257.
+ */
+static JSBool
+xml_lookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
+ JSProperty **propp)
+{
+ JSScopeProperty *sprop;
+
+ if (!HasProperty(cx, obj, ID_TO_VALUE(id), objp, propp))
+ return JS_FALSE;
+
+ if (*propp == FOUND_XML_PROPERTY) {
+ sprop = js_AddNativeProperty(cx, obj, id, GetProperty, PutProperty,
+ SPROP_INVALID_SLOT, JSPROP_ENUMERATE,
+ 0, 0);
+ if (!sprop)
+ return JS_FALSE;
+
+ JS_LOCK_OBJ(cx, obj);
+ *objp = obj;
+ *propp = (JSProperty *) sprop;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+xml_defineProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
+ JSPropertyOp getter, JSPropertyOp setter, uintN attrs,
+ JSProperty **propp)
+{
+ if (VALUE_IS_FUNCTION(cx, value) || getter || setter ||
+ (attrs & JSPROP_ENUMERATE) == 0 ||
+ (attrs & (JSPROP_READONLY | JSPROP_PERMANENT | JSPROP_SHARED))) {
+ return js_DefineProperty(cx, obj, id, value, getter, setter, attrs,
+ propp);
+ }
+
+ if (!PutProperty(cx, obj, ID_TO_VALUE(id), &value))
+ return JS_FALSE;
+ if (propp)
+ *propp = NULL;
+ return JS_TRUE;
+}
+
+static JSBool
+xml_getProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ if (id == JS_DEFAULT_XML_NAMESPACE_ID) {
+ *vp = JSVAL_VOID;
+ return JS_TRUE;
+ }
+
+ return GetProperty(cx, obj, ID_TO_VALUE(id), vp);
+}
+
+static JSBool
+xml_setProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ return PutProperty(cx, obj, ID_TO_VALUE(id), vp);
+}
+
+static JSBool
+FoundProperty(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ JSBool *foundp)
+{
+ JSObject *pobj;
+
+ if (prop) {
+ *foundp = JS_TRUE;
+ } else {
+ if (!HasProperty(cx, obj, ID_TO_VALUE(id), &pobj, &prop))
+ return JS_FALSE;
+ if (prop)
+ DROP_PROPERTY(cx, pobj, prop);
+ *foundp = (prop != NULL);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+xml_getAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp)
+{
+ JSBool found;
+
+ if (!FoundProperty(cx, obj, id, prop, &found))
+ return JS_FALSE;
+ *attrsp = found ? JSPROP_ENUMERATE : 0;
+ return JS_TRUE;
+}
+
+static JSBool
+xml_setAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
+ uintN *attrsp)
+{
+ JSBool found;
+
+ if (!FoundProperty(cx, obj, id, prop, &found))
+ return JS_FALSE;
+ if (found) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_CANT_SET_XML_ATTRS);
+ }
+ return !found;
+}
+
+static JSBool
+xml_deleteProperty(JSContext *cx, JSObject *obj, jsid id, jsval *rval)
+{
+ /*
+ * If this object has its own (mutable) scope, and if id isn't an index,
+ * then we may have added a property to the scope in xml_lookupProperty
+ * for it to return to mean "found" and to provide a handle for access
+ * operations to call the property's getter or setter. The property also
+ * helps speed up unqualified accesses via the property cache, avoiding
+ * what amount to two HasProperty searches.
+ *
+ * But now it's time to remove any such property, to purge the property
+ * cache and remove the scope entry.
+ */
+ if (OBJ_SCOPE(obj)->object == obj && !JSID_IS_INT(id)) {
+ if (!js_DeleteProperty(cx, obj, id, rval))
+ return JS_FALSE;
+ }
+
+ return DeleteProperty(cx, obj, ID_TO_VALUE(id), rval);
+}
+
+static JSBool
+xml_defaultValue(JSContext *cx, JSObject *obj, JSType hint, jsval *vp)
+{
+ JSXML *xml;
+
+ if (hint == JSTYPE_OBJECT) {
+ /* Called from for..in code in js_Interpret: return an XMLList. */
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (xml->xml_class != JSXML_CLASS_LIST) {
+ obj = ToXMLList(cx, OBJECT_TO_JSVAL(obj));
+ if (!obj)
+ return JS_FALSE;
+ }
+ *vp = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+ }
+
+ return JS_CallFunctionName(cx, obj, js_toString_str, 0, NULL, vp);
+}
+
+static JSBool
+xml_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
+ jsval *statep, jsid *idp)
+{
+ JSXML *xml;
+ uint32 length, index;
+ JSXMLArrayCursor *cursor;
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ length = JSXML_LENGTH(xml);
+
+ switch (enum_op) {
+ case JSENUMERATE_INIT:
+ if (length == 0) {
+ cursor = NULL;
+ } else {
+ cursor = (JSXMLArrayCursor *) JS_malloc(cx, sizeof *cursor);
+ if (!cursor)
+ return JS_FALSE;
+ XMLArrayCursorInit(cursor, &xml->xml_kids);
+ }
+ *statep = PRIVATE_TO_JSVAL(cursor);
+ if (idp)
+ *idp = INT_TO_JSID(length);
+ break;
+
+ case JSENUMERATE_NEXT:
+ cursor = JSVAL_TO_PRIVATE(*statep);
+ if (cursor && cursor->array && (index = cursor->index) < length) {
+ *idp = INT_TO_JSID(index);
+ cursor->index = index + 1;
+ break;
+ }
+ /* FALL THROUGH */
+
+ case JSENUMERATE_DESTROY:
+ cursor = JSVAL_TO_PRIVATE(*statep);
+ if (cursor) {
+ XMLArrayCursorFinish(cursor);
+ JS_free(cx, cursor);
+ }
+ *statep = JSVAL_NULL;
+ break;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+xml_hasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ return JS_TRUE;
+}
+
+static uint32
+xml_mark(JSContext *cx, JSObject *obj, void *arg)
+{
+ JSXML *xml;
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ GC_MARK(cx, xml, "private");
+ return js_Mark(cx, obj, NULL);
+}
+
+static void
+xml_clear(JSContext *cx, JSObject *obj)
+{
+}
+
+static JSBool
+HasSimpleContent(JSXML *xml)
+{
+ JSXML *kid;
+ JSBool simple;
+ uint32 i, n;
+
+again:
+ switch (xml->xml_class) {
+ case JSXML_CLASS_COMMENT:
+ case JSXML_CLASS_PROCESSING_INSTRUCTION:
+ return JS_FALSE;
+ case JSXML_CLASS_LIST:
+ if (xml->xml_kids.length == 0)
+ return JS_TRUE;
+ if (xml->xml_kids.length == 1) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (kid) {
+ xml = kid;
+ goto again;
+ }
+ }
+ /* FALL THROUGH */
+ default:
+ simple = JS_TRUE;
+ for (i = 0, n = JSXML_LENGTH(xml); i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ simple = JS_FALSE;
+ break;
+ }
+ }
+ return simple;
+ }
+}
+
+/*
+ * 11.2.2.1 Step 3(d) onward.
+ */
+static JSObject *
+xml_getMethod(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ JSTempValueRooter tvr;
+
+ JS_ASSERT(JS_InstanceOf(cx, obj, &js_XMLClass, NULL));
+
+ /*
+ * As our callers have a bad habit of passing a pointer to an unrooted
+ * local value as vp, we use a proper root here.
+ */
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, JSVAL_NULL, &tvr);
+ if (!js_GetXMLFunction(cx, obj, id, &tvr.u.value))
+ obj = NULL;
+ *vp = tvr.u.value;
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return obj;
+}
+
+static JSBool
+xml_setMethod(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ return js_SetProperty(cx, obj, id, vp);
+}
+
+static JSBool
+xml_enumerateValues(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
+ jsval *statep, jsid *idp, jsval *vp)
+{
+ JSXML *xml, *kid;
+ uint32 length, index;
+ JSXMLArrayCursor *cursor;
+ JSObject *kidobj;
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ length = JSXML_LENGTH(xml);
+ JS_ASSERT(INT_FITS_IN_JSVAL(length));
+
+ switch (enum_op) {
+ case JSENUMERATE_INIT:
+ if (length == 0) {
+ cursor = NULL;
+ } else {
+ cursor = (JSXMLArrayCursor *) JS_malloc(cx, sizeof *cursor);
+ if (!cursor)
+ return JS_FALSE;
+ XMLArrayCursorInit(cursor, &xml->xml_kids);
+ }
+ *statep = PRIVATE_TO_JSVAL(cursor);
+ if (idp)
+ *idp = INT_TO_JSID(length);
+ if (vp)
+ *vp = JSVAL_VOID;
+ break;
+
+ case JSENUMERATE_NEXT:
+ cursor = JSVAL_TO_PRIVATE(*statep);
+ if (cursor && cursor->array && (index = cursor->index) < length) {
+ while (!(kid = XMLARRAY_MEMBER(&xml->xml_kids, index, JSXML))) {
+ if (++index == length)
+ goto destroy;
+ }
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ return JS_FALSE;
+ JS_ASSERT(INT_FITS_IN_JSVAL(index));
+ *idp = INT_TO_JSID(index);
+ *vp = OBJECT_TO_JSVAL(kidobj);
+ cursor->index = index + 1;
+ break;
+ }
+ /* FALL THROUGH */
+
+ case JSENUMERATE_DESTROY:
+ cursor = JSVAL_TO_PRIVATE(*statep);
+ if (cursor) {
+ destroy:
+ XMLArrayCursorFinish(cursor);
+ JS_free(cx, cursor);
+ }
+ *statep = JSVAL_NULL;
+ break;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+xml_equality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
+{
+ JSXML *xml, *vxml;
+ JSObject *vobj;
+ JSBool ok;
+ JSString *str, *vstr;
+ jsdouble d, d2;
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ vxml = NULL;
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ vobj = JSVAL_TO_OBJECT(v);
+ if (OBJECT_IS_XML(cx, vobj))
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ }
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ ok = Equals(cx, xml, v, bp);
+ } else if (vxml) {
+ if (vxml->xml_class == JSXML_CLASS_LIST) {
+ ok = Equals(cx, vxml, OBJECT_TO_JSVAL(obj), bp);
+ } else {
+ if (((xml->xml_class == JSXML_CLASS_TEXT ||
+ xml->xml_class == JSXML_CLASS_ATTRIBUTE) &&
+ HasSimpleContent(vxml)) ||
+ ((vxml->xml_class == JSXML_CLASS_TEXT ||
+ vxml->xml_class == JSXML_CLASS_ATTRIBUTE) &&
+ HasSimpleContent(xml))) {
+ ok = js_EnterLocalRootScope(cx);
+ if (ok) {
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ vstr = js_ValueToString(cx, v);
+ ok = str && vstr;
+ if (ok)
+ *bp = js_EqualStrings(str, vstr);
+ js_LeaveLocalRootScope(cx);
+ }
+ } else {
+ ok = XMLEquals(cx, xml, vxml, bp);
+ }
+ }
+ } else {
+ ok = js_EnterLocalRootScope(cx);
+ if (ok) {
+ if (HasSimpleContent(xml)) {
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ vstr = js_ValueToString(cx, v);
+ ok = str && vstr;
+ if (ok)
+ *bp = js_EqualStrings(str, vstr);
+ } else if (JSVAL_IS_STRING(v) || JSVAL_IS_NUMBER(v)) {
+ str = js_ValueToString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str) {
+ ok = JS_FALSE;
+ } else if (JSVAL_IS_STRING(v)) {
+ *bp = js_EqualStrings(str, JSVAL_TO_STRING(v));
+ } else {
+ ok = js_ValueToNumber(cx, STRING_TO_JSVAL(str), &d);
+ if (ok) {
+ d2 = JSVAL_IS_INT(v) ? JSVAL_TO_INT(v)
+ : *JSVAL_TO_DOUBLE(v);
+ *bp = JSDOUBLE_COMPARE(d, ==, d2, JS_FALSE);
+ }
+ }
+ } else {
+ *bp = JS_FALSE;
+ }
+ js_LeaveLocalRootScope(cx);
+ }
+ }
+ return ok;
+}
+
+static JSBool
+xml_concatenate(JSContext *cx, JSObject *obj, jsval v, jsval *vp)
+{
+ JSBool ok;
+ JSObject *listobj, *robj;
+ JSXML *list, *lxml, *rxml;
+
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ return JS_FALSE;
+
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!listobj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ lxml = (JSXML *) JS_GetPrivate(cx, obj);
+ ok = Append(cx, list, lxml);
+ if (!ok)
+ goto out;
+
+ if (VALUE_IS_XML(cx, v)) {
+ rxml = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ } else {
+ robj = ToXML(cx, v);
+ if (!robj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ rxml = (JSXML *) JS_GetPrivate(cx, robj);
+ }
+ ok = Append(cx, list, rxml);
+ if (!ok)
+ goto out;
+
+ *vp = OBJECT_TO_JSVAL(listobj);
+out:
+ js_LeaveLocalRootScopeWithResult(cx, *vp);
+ return ok;
+}
+
+/* Use js_NewObjectMap so XML objects satisfy OBJ_IS_NATIVE tests. */
+JS_FRIEND_DATA(JSXMLObjectOps) js_XMLObjectOps = {
+ { js_NewObjectMap, js_DestroyObjectMap,
+ xml_lookupProperty, xml_defineProperty,
+ xml_getProperty, xml_setProperty,
+ xml_getAttributes, xml_setAttributes,
+ xml_deleteProperty, xml_defaultValue,
+ xml_enumerate, js_CheckAccess,
+ NULL, NULL,
+ NULL, NULL,
+ NULL, xml_hasInstance,
+ js_SetProtoOrParent, js_SetProtoOrParent,
+ xml_mark, xml_clear,
+ NULL, NULL },
+ xml_getMethod, xml_setMethod,
+ xml_enumerateValues, xml_equality,
+ xml_concatenate
+};
+
+static JSObjectOps *
+xml_getObjectOps(JSContext *cx, JSClass *clasp)
+{
+ return &js_XMLObjectOps.base;
+}
+
+JS_FRIEND_DATA(JSClass) js_XMLClass = {
+ js_XML_str,
+ JSCLASS_HAS_PRIVATE | JSCLASS_HAS_CACHED_PROTO(JSProto_XML),
+ JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
+ JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, xml_finalize,
+ xml_getObjectOps, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL
+};
+
+static JSObject *
+CallConstructorFunction(JSContext *cx, JSObject *obj, JSClass *clasp,
+ uintN argc, jsval *argv)
+{
+ JSObject *tmp;
+ jsval rval;
+
+ while ((tmp = OBJ_GET_PARENT(cx, obj)) != NULL)
+ obj = tmp;
+ if (!JS_CallFunctionName(cx, obj, clasp->name, argc, argv, &rval))
+ return NULL;
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(rval));
+ return JSVAL_TO_OBJECT(rval);
+}
+
+static JSXML *
+StartNonListXMLMethod(JSContext *cx, JSObject **objp, jsval *argv)
+{
+ JSXML *xml;
+ JSFunction *fun;
+
+ JS_ASSERT(VALUE_IS_FUNCTION(cx, argv[-2]));
+
+ xml = (JSXML *) JS_GetInstancePrivate(cx, *objp, &js_XMLClass, argv);
+ if (!xml || xml->xml_class != JSXML_CLASS_LIST)
+ return xml;
+
+ if (xml->xml_kids.length == 1) {
+ xml = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (xml) {
+ *objp = js_GetXMLObject(cx, xml);
+ if (!*objp)
+ return NULL;
+ argv[-1] = OBJECT_TO_JSVAL(*objp);
+ return xml;
+ }
+ }
+
+ fun = (JSFunction *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(argv[-2]));
+ if (fun) {
+ char numBuf[12];
+ JS_snprintf(numBuf, sizeof numBuf, "%u", xml->xml_kids.length);
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_NON_LIST_XML_METHOD,
+ JS_GetFunctionName(fun), numBuf);
+ }
+ return NULL;
+}
+
+#define XML_METHOD_PROLOG \
+ JS_BEGIN_MACRO \
+ xml = (JSXML *) JS_GetInstancePrivate(cx, obj, &js_XMLClass, argv); \
+ if (!xml) \
+ return JS_FALSE; \
+ JS_END_MACRO
+
+#define NON_LIST_XML_METHOD_PROLOG \
+ JS_BEGIN_MACRO \
+ xml = StartNonListXMLMethod(cx, &obj, argv); \
+ if (!xml) \
+ return JS_FALSE; \
+ JS_ASSERT(xml->xml_class != JSXML_CLASS_LIST); \
+ JS_END_MACRO
+
+static JSBool
+xml_addNamespace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ JSObject *nsobj;
+ JSXMLNamespace *ns;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (xml->xml_class != JSXML_CLASS_ELEMENT)
+ return JS_TRUE;
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+
+ nsobj = CallConstructorFunction(cx, obj, &js_NamespaceClass.base, 1, argv);
+ if (!nsobj)
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(nsobj);
+
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, nsobj);
+ if (!AddInScopeNamespace(cx, xml, ns))
+ return JS_FALSE;
+ ns->declared = JS_TRUE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+static JSBool
+xml_appendChild(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *vxml;
+ jsval name, v;
+ JSObject *vobj;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+
+ if (!js_GetAnyName(cx, &name))
+ return JS_FALSE;
+
+ if (!GetProperty(cx, obj, name, &v))
+ return JS_FALSE;
+
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(v));
+ vobj = JSVAL_TO_OBJECT(v);
+ JS_ASSERT(OBJECT_IS_XML(cx, vobj));
+ vxml = (JSXML *) JS_GetPrivate(cx, vobj);
+ JS_ASSERT(vxml->xml_class == JSXML_CLASS_LIST);
+
+ if (!IndexToIdVal(cx, vxml->xml_kids.length, &name))
+ return JS_FALSE;
+ if (!PutProperty(cx, JSVAL_TO_OBJECT(v), name, &argv[0]))
+ return JS_FALSE;
+
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_attribute(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXMLQName *qn;
+
+ qn = ToAttributeName(cx, argv[0]);
+ if (!qn)
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(qn->object); /* local root */
+ return GetProperty(cx, obj, argv[0], rval);
+}
+
+/* XML and XMLList */
+static JSBool
+xml_attributes(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval name;
+ JSXMLQName *qn;
+ JSTempValueRooter tvr;
+ JSBool ok;
+
+ name = ATOM_KEY(cx->runtime->atomState.starAtom);
+ qn = ToAttributeName(cx, name);
+ if (!qn)
+ return JS_FALSE;
+ name = OBJECT_TO_JSVAL(qn->object);
+ JS_PUSH_SINGLE_TEMP_ROOT(cx, name, &tvr);
+ ok = GetProperty(cx, obj, name, rval);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+}
+
+static JSXML *
+xml_list_helper(JSContext *cx, JSXML *xml, jsval *rval)
+{
+ JSObject *listobj;
+ JSXML *list;
+
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!listobj)
+ return NULL;
+
+ *rval = OBJECT_TO_JSVAL(listobj);
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ list->xml_target = xml;
+ return list;
+}
+
+static JSBool
+xml_child_helper(JSContext *cx, JSObject *obj, JSXML *xml, jsval name,
+ jsval *rval)
+{
+ uint32 index;
+ JSXML *kid;
+ JSObject *kidobj;
+
+ /* ECMA-357 13.4.4.6 */
+ JS_ASSERT(xml->xml_class != JSXML_CLASS_LIST);
+
+ if (js_IdIsIndex(name, &index)) {
+ if (index >= JSXML_LENGTH(xml)) {
+ *rval = JSVAL_VOID;
+ } else {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, index, JSXML);
+ if (!kid) {
+ *rval = JSVAL_VOID;
+ } else {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(kidobj);
+ }
+ }
+ return JS_TRUE;
+ }
+
+ return GetProperty(cx, obj, name, rval);
+}
+
+/* XML and XMLList */
+static JSBool
+xml_child(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml, *list, *kid, *vxml;
+ JSXMLArrayCursor cursor;
+ jsval name, v;
+ JSObject *kidobj;
+
+ XML_METHOD_PROLOG;
+ name = argv[0];
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ /* ECMA-357 13.5.4.4 */
+ list = xml_list_helper(cx, xml, rval);
+ if (!list)
+ return JS_FALSE;
+
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ break;
+ if (!xml_child_helper(cx, kidobj, kid, name, &v))
+ break;
+ if (JSVAL_IS_VOID(v)) {
+ /* The property didn't exist in this kid. */
+ continue;
+ }
+
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(v));
+ vxml = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ if ((!JSXML_HAS_KIDS(vxml) || vxml->xml_kids.length != 0) &&
+ !Append(cx, list, vxml)) {
+ break;
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ return !kid;
+ }
+
+ /* ECMA-357 Edition 2 13.3.4.6 (note 13.3, not 13.4 as in Edition 1). */
+ if (!xml_child_helper(cx, obj, xml, name, rval))
+ return JS_FALSE;
+ if (JSVAL_IS_VOID(*rval) && !xml_list_helper(cx, xml, rval))
+ return JS_FALSE;
+ return JS_TRUE;
+}
+
+static JSBool
+xml_childIndex(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *parent;
+ uint32 i, n;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ parent = xml->parent;
+ if (!parent || xml->xml_class == JSXML_CLASS_ATTRIBUTE) {
+ *rval = DOUBLE_TO_JSVAL(cx->runtime->jsNaN);
+ return JS_TRUE;
+ }
+ for (i = 0, n = JSXML_LENGTH(parent); i < n; i++) {
+ if (XMLARRAY_MEMBER(&parent->xml_kids, i, JSXML) == xml)
+ break;
+ }
+ JS_ASSERT(i < n);
+ return js_NewNumberValue(cx, i, rval);
+}
+
+/* XML and XMLList */
+static JSBool
+xml_children(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval name;
+
+ name = ATOM_KEY(cx->runtime->atomState.starAtom);
+ return GetProperty(cx, obj, name, rval);
+}
+
+/* XML and XMLList */
+static JSBool
+xml_comments(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *list, *kid, *vxml;
+ JSBool ok;
+ uint32 i, n;
+ JSObject *kidobj;
+ jsval v;
+
+ XML_METHOD_PROLOG;
+ list = xml_list_helper(cx, xml, rval);
+ if (!list)
+ return JS_FALSE;
+
+ ok = JS_TRUE;
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ /* 13.5.4.6 Step 2. */
+ for (i = 0, n = JSXML_LENGTH(xml); i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ break;
+ kidobj = js_GetXMLObject(cx, kid);
+ if (kidobj) {
+ ok = xml_comments(cx, kidobj, argc, argv, &v);
+ } else {
+ ok = JS_FALSE;
+ v = JSVAL_NULL;
+ }
+ js_LeaveLocalRootScopeWithResult(cx, v);
+ if (!ok)
+ break;
+ vxml = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ if (JSXML_LENGTH(vxml) != 0) {
+ ok = Append(cx, list, vxml);
+ if (!ok)
+ break;
+ }
+ }
+ }
+ } else {
+ /* 13.4.4.9 Step 2. */
+ for (i = 0, n = JSXML_LENGTH(xml); i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_COMMENT) {
+ ok = Append(cx, list, kid);
+ if (!ok)
+ break;
+ }
+ }
+ }
+
+ return ok;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_contains(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *kid;
+ jsval value;
+ JSBool eq;
+ JSXMLArrayCursor cursor;
+ JSObject *kidobj;
+
+ XML_METHOD_PROLOG;
+ value = argv[0];
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ eq = JS_FALSE;
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj || !xml_equality(cx, kidobj, value, &eq))
+ break;
+ if (eq)
+ break;
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (kid && !eq)
+ return JS_FALSE;
+ } else {
+ if (!xml_equality(cx, obj, value, &eq))
+ return JS_FALSE;
+ }
+ *rval = BOOLEAN_TO_JSVAL(eq);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_copy(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml, *copy;
+
+ XML_METHOD_PROLOG;
+ copy = DeepCopy(cx, xml, NULL, 0);
+ if (!copy)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(copy->object);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_descendants(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *list;
+ jsval name;
+
+ XML_METHOD_PROLOG;
+ name = (argc == 0) ? ATOM_KEY(cx->runtime->atomState.starAtom) : argv[0];
+ list = Descendants(cx, xml, name);
+ if (!list)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(list->object);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_elements(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *list, *kid, *vxml;
+ jsval name, v;
+ JSXMLQName *nameqn;
+ jsid funid;
+ JSBool ok;
+ JSXMLArrayCursor cursor;
+ JSObject *kidobj;
+ uint32 i, n;
+
+ XML_METHOD_PROLOG;
+ name = (argc == 0) ? ATOM_KEY(cx->runtime->atomState.starAtom) : argv[0];
+ nameqn = ToXMLName(cx, name, &funid);
+ if (!nameqn)
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(nameqn->object);
+
+ list = xml_list_helper(cx, xml, rval);
+ if (!list)
+ return JS_FALSE;
+ if (funid)
+ return JS_TRUE;
+
+ list->xml_targetprop = nameqn;
+ ok = JS_TRUE;
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ /* 13.5.4.6 */
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (kid->xml_class == JSXML_CLASS_ELEMENT) {
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ break;
+ kidobj = js_GetXMLObject(cx, kid);
+ if (kidobj) {
+ ok = xml_elements(cx, kidobj, argc, argv, &v);
+ } else {
+ ok = JS_FALSE;
+ v = JSVAL_NULL;
+ }
+ js_LeaveLocalRootScopeWithResult(cx, v);
+ if (!ok)
+ break;
+ vxml = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ if (JSXML_LENGTH(vxml) != 0) {
+ ok = Append(cx, list, vxml);
+ if (!ok)
+ break;
+ }
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ } else {
+ for (i = 0, n = JSXML_LENGTH(xml); i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT &&
+ MatchElemName(nameqn, kid)) {
+ ok = Append(cx, list, kid);
+ if (!ok)
+ break;
+ }
+ }
+ }
+
+ return ok;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_hasOwnProperty(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval name;
+ JSObject *pobj;
+ JSProperty *prop;
+
+ if (!JS_InstanceOf(cx, obj, &js_XMLClass, argv))
+ return JS_FALSE;
+
+ name = argv[0];
+ if (!HasProperty(cx, obj, name, &pobj, &prop))
+ return JS_FALSE;
+ if (!prop) {
+ return js_HasOwnPropertyHelper(cx, obj, js_LookupProperty, argc, argv,
+ rval);
+ }
+ DROP_PROPERTY(cx, pobj, prop);
+ *rval = JSVAL_TRUE;
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_hasComplexContent(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *kid;
+ JSObject *kidobj;
+ uint32 i, n;
+
+ XML_METHOD_PROLOG;
+again:
+ switch (xml->xml_class) {
+ case JSXML_CLASS_ATTRIBUTE:
+ case JSXML_CLASS_COMMENT:
+ case JSXML_CLASS_PROCESSING_INSTRUCTION:
+ case JSXML_CLASS_TEXT:
+ *rval = JSVAL_FALSE;
+ break;
+ case JSXML_CLASS_LIST:
+ if (xml->xml_kids.length == 0) {
+ *rval = JSVAL_TRUE;
+ } else if (xml->xml_kids.length == 1) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (kid) {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ return JS_FALSE;
+ obj = kidobj;
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ goto again;
+ }
+ }
+ /* FALL THROUGH */
+ default:
+ *rval = JSVAL_FALSE;
+ for (i = 0, n = xml->xml_kids.length; i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ *rval = JSVAL_TRUE;
+ break;
+ }
+ }
+ break;
+ }
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_hasSimpleContent(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+
+ XML_METHOD_PROLOG;
+ *rval = BOOLEAN_TO_JSVAL(HasSimpleContent(xml));
+ return JS_TRUE;
+}
+
+typedef struct JSTempRootedNSArray {
+ JSTempValueRooter tvr;
+ JSXMLArray array;
+ jsval value; /* extra root for temporaries */
+} JSTempRootedNSArray;
+
+JS_STATIC_DLL_CALLBACK(void)
+mark_temp_ns_array(JSContext *cx, JSTempValueRooter *tvr)
+{
+ JSTempRootedNSArray *tmp = (JSTempRootedNSArray *)tvr;
+
+ namespace_mark_vector(cx,
+ (JSXMLNamespace **)tmp->array.vector,
+ tmp->array.length);
+ XMLArrayCursorMark(cx, tmp->array.cursors);
+ if (JSVAL_IS_GCTHING(tmp->value))
+ GC_MARK(cx, JSVAL_TO_GCTHING(tmp->value), "temp_ns_array_value");
+}
+
+static void
+InitTempNSArray(JSContext *cx, JSTempRootedNSArray *tmp)
+{
+ XMLArrayInit(cx, &tmp->array, 0);
+ tmp->value = JSVAL_NULL;
+ JS_PUSH_TEMP_ROOT_MARKER(cx, mark_temp_ns_array, &tmp->tvr);
+}
+
+static void
+FinishTempNSArray(JSContext *cx, JSTempRootedNSArray *tmp)
+{
+ JS_ASSERT(tmp->tvr.u.marker == mark_temp_ns_array);
+ JS_POP_TEMP_ROOT(cx, &tmp->tvr);
+ XMLArrayFinish(cx, &tmp->array);
+}
+
+/*
+ * Populate a new JS array with elements of JSTempRootedNSArray.array and
+ * place the result into rval. rval must point to a rooted location.
+ */
+static JSBool
+TempNSArrayToJSArray(JSContext *cx, JSTempRootedNSArray *tmp, jsval *rval)
+{
+ JSObject *arrayobj;
+ uint32 i, n;
+ JSXMLNamespace *ns;
+ JSObject *nsobj;
+
+ arrayobj = js_NewArrayObject(cx, 0, NULL);
+ if (!arrayobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(arrayobj);
+ for (i = 0, n = tmp->array.length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(&tmp->array, i, JSXMLNamespace);
+ if (!ns)
+ continue;
+ nsobj = js_GetXMLNamespaceObject(cx, ns);
+ if (!nsobj)
+ return JS_FALSE;
+ tmp->value = OBJECT_TO_JSVAL(nsobj);
+ if (!OBJ_SET_PROPERTY(cx, arrayobj, INT_TO_JSID(i), &tmp->value))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+FindInScopeNamespaces(JSContext *cx, JSXML *xml, JSXMLArray *nsarray)
+{
+ uint32 length, i, j, n;
+ JSXMLNamespace *ns, *ns2;
+
+ length = nsarray->length;
+ do {
+ if (xml->xml_class != JSXML_CLASS_ELEMENT)
+ continue;
+ for (i = 0, n = xml->xml_namespaces.length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(&xml->xml_namespaces, i, JSXMLNamespace);
+ if (!ns)
+ continue;
+
+ for (j = 0; j < length; j++) {
+ ns2 = XMLARRAY_MEMBER(nsarray, j, JSXMLNamespace);
+ if (ns2 &&
+ ((ns2->prefix && ns->prefix)
+ ? js_EqualStrings(ns2->prefix, ns->prefix)
+ : js_EqualStrings(ns2->uri, ns->uri))) {
+ break;
+ }
+ }
+
+ if (j == length) {
+ if (!XMLARRAY_APPEND(cx, nsarray, ns))
+ return JS_FALSE;
+ ++length;
+ }
+ }
+ } while ((xml = xml->parent) != NULL);
+ JS_ASSERT(length == nsarray->length);
+
+ return JS_TRUE;
+}
+
+static JSBool
+xml_inScopeNamespaces(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ JSTempRootedNSArray namespaces;
+ JSBool ok;
+
+ NON_LIST_XML_METHOD_PROLOG;
+
+ InitTempNSArray(cx, &namespaces);
+ ok = FindInScopeNamespaces(cx, xml, &namespaces.array) &&
+ TempNSArrayToJSArray(cx, &namespaces, rval);
+ FinishTempNSArray(cx, &namespaces);
+ return ok;
+}
+
+static JSBool
+xml_insertChildAfter(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *kid;
+ jsval arg;
+ uint32 i;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (!JSXML_HAS_KIDS(xml))
+ return JS_TRUE;
+
+ arg = argv[0];
+ if (JSVAL_IS_NULL(arg)) {
+ kid = NULL;
+ i = 0;
+ } else {
+ if (!VALUE_IS_XML(cx, arg))
+ return JS_TRUE;
+ kid = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(arg));
+ i = XMLARRAY_FIND_MEMBER(&xml->xml_kids, kid, NULL);
+ if (i == XML_NOT_FOUND)
+ return JS_TRUE;
+ ++i;
+ }
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+ if (!Insert(cx, xml, i, argv[1]))
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+static JSBool
+xml_insertChildBefore(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *kid;
+ jsval arg;
+ uint32 i;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (!JSXML_HAS_KIDS(xml))
+ return JS_TRUE;
+
+ arg = argv[0];
+ if (JSVAL_IS_NULL(arg)) {
+ kid = NULL;
+ i = xml->xml_kids.length;
+ } else {
+ if (!VALUE_IS_XML(cx, arg))
+ return JS_TRUE;
+ kid = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(arg));
+ i = XMLARRAY_FIND_MEMBER(&xml->xml_kids, kid, NULL);
+ if (i == XML_NOT_FOUND)
+ return JS_TRUE;
+ }
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+ if (!Insert(cx, xml, i, argv[1]))
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_length(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml;
+
+ XML_METHOD_PROLOG;
+ if (xml->xml_class != JSXML_CLASS_LIST) {
+ *rval = JSVAL_ONE;
+ } else {
+ if (!js_NewNumberValue(cx, xml->xml_kids.length, rval))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+xml_localName(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ *rval = xml->name ? STRING_TO_JSVAL(xml->name->localName) : JSVAL_NULL;
+ return JS_TRUE;
+}
+
+static JSBool
+xml_name(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml;
+ JSObject *nameobj;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (!xml->name) {
+ *rval = JSVAL_NULL;
+ } else {
+ nameobj = js_GetXMLQNameObject(cx, xml->name);
+ if (!nameobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(nameobj);
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+xml_namespace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ JSString *prefix;
+ JSTempRootedNSArray inScopeNSes;
+ JSBool ok;
+ jsuint i, length;
+ JSXMLNamespace *ns;
+ JSObject *nsobj;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (argc == 0 && !JSXML_HAS_NAME(xml)) {
+ *rval = JSVAL_NULL;
+ return JS_TRUE;
+ }
+
+ if (argc == 0) {
+ prefix = NULL;
+ } else {
+ prefix = js_ValueToString(cx, argv[0]);
+ if (!prefix)
+ return JS_FALSE;
+ argv[0] = STRING_TO_JSVAL(prefix); /* local root */
+ }
+
+ /* After this point the control must flow through label out. */
+ InitTempNSArray(cx, &inScopeNSes);
+ ok = FindInScopeNamespaces(cx, xml, &inScopeNSes.array);
+ if (!ok)
+ goto out;
+
+ if (!prefix) {
+ ns = GetNamespace(cx, xml->name, &inScopeNSes.array);
+ if (!ns) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ } else {
+ ns = NULL;
+ for (i = 0, length = inScopeNSes.array.length; i < length; i++) {
+ ns = XMLARRAY_MEMBER(&inScopeNSes.array, i, JSXMLNamespace);
+ if (ns && ns->prefix && js_EqualStrings(ns->prefix, prefix))
+ break;
+ ns = NULL;
+ }
+ }
+
+ if (!ns) {
+ *rval = JSVAL_VOID;
+ } else {
+ nsobj = js_GetXMLNamespaceObject(cx, ns);
+ if (!nsobj) {
+ ok = JS_FALSE;
+ goto out;
+ }
+ *rval = OBJECT_TO_JSVAL(nsobj);
+ }
+
+ out:
+ FinishTempNSArray(cx, &inScopeNSes);
+ return JS_TRUE;
+}
+
+static JSBool
+xml_namespaceDeclarations(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *yml;
+ JSBool ok;
+ JSTempRootedNSArray ancestors, declared;
+ uint32 i, n;
+ JSXMLNamespace *ns;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (JSXML_HAS_VALUE(xml))
+ return JS_TRUE;
+
+ /* From here, control flow must goto out to finish these arrays. */
+ ok = JS_TRUE;
+ InitTempNSArray(cx, &ancestors);
+ InitTempNSArray(cx, &declared);
+ yml = xml;
+
+ while ((yml = yml->parent) != NULL) {
+ JS_ASSERT(yml->xml_class == JSXML_CLASS_ELEMENT);
+ for (i = 0, n = yml->xml_namespaces.length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(&yml->xml_namespaces, i, JSXMLNamespace);
+ if (ns &&
+ !XMLARRAY_HAS_MEMBER(&ancestors.array, ns, namespace_match)) {
+ ok = XMLARRAY_APPEND(cx, &ancestors.array, ns);
+ if (!ok)
+ goto out;
+ }
+ }
+ }
+
+ for (i = 0, n = xml->xml_namespaces.length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(&xml->xml_namespaces, i, JSXMLNamespace);
+ if (!ns)
+ continue;
+ if (!ns->declared)
+ continue;
+ if (!XMLARRAY_HAS_MEMBER(&ancestors.array, ns, namespace_match)) {
+ ok = XMLARRAY_APPEND(cx, &declared.array, ns);
+ if (!ok)
+ goto out;
+ }
+ }
+
+ ok = TempNSArrayToJSArray(cx, &declared, rval);
+
+out:
+ /* Finishing must be in reverse order of initialization to follow LIFO. */
+ FinishTempNSArray(cx, &declared);
+ FinishTempNSArray(cx, &ancestors);
+ return ok;
+}
+
+static const char js_attribute_str[] = "attribute";
+static const char js_text_str[] = "text";
+
+/* Exported to jsgc.c #ifdef GC_MARK_DEBUG. */
+const char *js_xml_class_str[] = {
+ "list",
+ "element",
+ js_attribute_str,
+ "processing-instruction",
+ js_text_str,
+ "comment"
+};
+
+static JSBool
+xml_nodeKind(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ JSString *str;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ str = JS_InternString(cx, js_xml_class_str[xml->xml_class]);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+static JSBool
+NormalizingDelete(JSContext *cx, JSObject *obj, JSXML *xml, jsval id)
+{
+ jsval junk;
+
+ if (xml->xml_class == JSXML_CLASS_LIST)
+ return DeleteProperty(cx, obj, id, &junk);
+ return DeleteByIndex(cx, xml, id, &junk);
+}
+
+/*
+ * Erratum? the testcase js/tests/e4x/XML/13.4.4.26.js wants all-whitespace
+ * text between tags to be removed by normalize.
+ */
+static JSBool
+IsXMLSpace(JSString *str)
+{
+ const jschar *cp, *end;
+
+ cp = JSSTRING_CHARS(str);
+ end = cp + JSSTRING_LENGTH(str);
+ while (cp < end) {
+ if (!JS_ISXMLSPACE(*cp))
+ return JS_FALSE;
+ ++cp;
+ }
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_normalize(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *kid, *kid2;
+ uint32 i, n;
+ JSObject *kidobj;
+ JSString *str;
+ jsval junk;
+
+ XML_METHOD_PROLOG;
+ *rval = OBJECT_TO_JSVAL(obj);
+ if (!JSXML_HAS_KIDS(xml))
+ return JS_TRUE;
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+
+ for (i = 0, n = xml->xml_kids.length; i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (!kid)
+ continue;
+ if (kid->xml_class == JSXML_CLASS_ELEMENT) {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj || !xml_normalize(cx, kidobj, argc, argv, &junk))
+ return JS_FALSE;
+ } else if (kid->xml_class == JSXML_CLASS_TEXT) {
+ while (i + 1 < n &&
+ (kid2 = XMLARRAY_MEMBER(&xml->xml_kids, i + 1, JSXML)) &&
+ kid2->xml_class == JSXML_CLASS_TEXT) {
+ str = js_ConcatStrings(cx, kid->xml_value, kid2->xml_value);
+ if (!str)
+ return JS_FALSE;
+ if (!NormalizingDelete(cx, obj, xml, INT_TO_JSVAL(i + 1)))
+ return JS_FALSE;
+ n = xml->xml_kids.length;
+ kid->xml_value = str;
+ }
+ if (IS_EMPTY(kid->xml_value) || IsXMLSpace(kid->xml_value)) {
+ if (!NormalizingDelete(cx, obj, xml, INT_TO_JSVAL(i)))
+ return JS_FALSE;
+ n = xml->xml_kids.length;
+ --i;
+ }
+ }
+ }
+
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_parent(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml, *parent, *kid;
+ uint32 i, n;
+ JSObject *parentobj;
+
+ XML_METHOD_PROLOG;
+ parent = xml->parent;
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ *rval = JSVAL_VOID;
+ n = xml->xml_kids.length;
+ if (n == 0)
+ return JS_TRUE;
+
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, 0, JSXML);
+ if (!kid)
+ return JS_TRUE;
+ parent = kid->parent;
+ for (i = 1; i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->parent != parent)
+ return JS_TRUE;
+ }
+ }
+
+ if (!parent) {
+ *rval = JSVAL_NULL;
+ return JS_TRUE;
+ }
+
+ parentobj = js_GetXMLObject(cx, parent);
+ if (!parentobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(parentobj);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_processingInstructions(JSContext *cx, JSObject *obj, uintN argc,
+ jsval *argv, jsval *rval)
+{
+ JSXML *xml, *list, *kid, *vxml;
+ jsval name, v;
+ JSXMLQName *nameqn;
+ jsid funid;
+ JSBool ok;
+ JSXMLArrayCursor cursor;
+ JSObject *kidobj;
+ uint32 i, n;
+
+ XML_METHOD_PROLOG;
+ name = (argc == 0) ? ATOM_KEY(cx->runtime->atomState.starAtom) : argv[0];
+ nameqn = ToXMLName(cx, name, &funid);
+ if (!nameqn)
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(nameqn->object);
+
+ list = xml_list_helper(cx, xml, rval);
+ if (!list)
+ return JS_FALSE;
+ if (funid)
+ return JS_TRUE;
+
+ list->xml_targetprop = nameqn;
+ ok = JS_TRUE;
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ /* 13.5.4.17 Step 4 (misnumbered 9 -- Erratum?). */
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (kid->xml_class == JSXML_CLASS_ELEMENT) {
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ break;
+ kidobj = js_GetXMLObject(cx, kid);
+ if (kidobj) {
+ ok = xml_processingInstructions(cx, kidobj, argc, argv, &v);
+ } else {
+ ok = JS_FALSE;
+ v = JSVAL_NULL;
+ }
+ js_LeaveLocalRootScopeWithResult(cx, v);
+ if (!ok)
+ break;
+ vxml = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ if (JSXML_LENGTH(vxml) != 0) {
+ ok = Append(cx, list, vxml);
+ if (!ok)
+ break;
+ }
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ } else {
+ /* 13.4.4.28 Step 4. */
+ for (i = 0, n = JSXML_LENGTH(xml); i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_PROCESSING_INSTRUCTION &&
+ (IS_STAR(nameqn->localName) ||
+ js_EqualStrings(nameqn->localName, kid->name->localName))) {
+ ok = Append(cx, list, kid);
+ if (!ok)
+ break;
+ }
+ }
+ }
+
+ return ok;
+}
+
+static JSBool
+xml_prependChild(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(obj);
+ return Insert(cx, xml, 0, argv[0]);
+}
+
+/* XML and XMLList */
+static JSBool
+xml_propertyIsEnumerable(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ jsval name;
+ uint32 index;
+
+ XML_METHOD_PROLOG;
+ name = argv[0];
+ *rval = JSVAL_FALSE;
+ if (js_IdIsIndex(name, &index)) {
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ /* 13.5.4.18. */
+ *rval = BOOLEAN_TO_JSVAL(index < xml->xml_kids.length);
+ } else {
+ /* 13.4.4.30. */
+ *rval = BOOLEAN_TO_JSVAL(index == 0);
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+namespace_full_match(const void *a, const void *b)
+{
+ const JSXMLNamespace *nsa = (const JSXMLNamespace *) a;
+ const JSXMLNamespace *nsb = (const JSXMLNamespace *) b;
+
+ if (nsa->prefix && nsb->prefix &&
+ !js_EqualStrings(nsa->prefix, nsb->prefix)) {
+ return JS_FALSE;
+ }
+ return js_EqualStrings(nsa->uri, nsb->uri);
+}
+
+static JSBool
+xml_removeNamespace_helper(JSContext *cx, JSXML *xml, JSXMLNamespace *ns)
+{
+ JSXMLNamespace *thisns, *attrns;
+ uint32 i, n;
+ JSXML *attr, *kid;
+
+ thisns = GetNamespace(cx, xml->name, &xml->xml_namespaces);
+ JS_ASSERT(thisns);
+ if (thisns == ns)
+ return JS_TRUE;
+
+ for (i = 0, n = xml->xml_attrs.length; i < n; i++) {
+ attr = XMLARRAY_MEMBER(&xml->xml_attrs, i, JSXML);
+ if (!attr)
+ continue;
+ attrns = GetNamespace(cx, attr->name, &xml->xml_namespaces);
+ JS_ASSERT(attrns);
+ if (attrns == ns)
+ return JS_TRUE;
+ }
+
+ i = XMLARRAY_FIND_MEMBER(&xml->xml_namespaces, ns, namespace_full_match);
+ if (i != XML_NOT_FOUND)
+ XMLArrayDelete(cx, &xml->xml_namespaces, i, JS_TRUE);
+
+ for (i = 0, n = xml->xml_kids.length; i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ if (!xml_removeNamespace_helper(cx, kid, ns))
+ return JS_FALSE;
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+xml_removeNamespace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ JSObject *nsobj;
+ JSXMLNamespace *ns;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ *rval = OBJECT_TO_JSVAL(obj);
+ if (xml->xml_class != JSXML_CLASS_ELEMENT)
+ return JS_TRUE;
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+
+ nsobj = CallConstructorFunction(cx, obj, &js_NamespaceClass.base, 1, argv);
+ if (!nsobj)
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(nsobj);
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, nsobj);
+
+ /* NOTE: remove ns from each ancestor if not used by that ancestor. */
+ return xml_removeNamespace_helper(cx, xml, ns);
+}
+
+static JSBool
+xml_replace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml, *vxml, *kid;
+ jsval name, value, id, junk;
+ uint32 index;
+ JSObject *nameobj;
+ JSXMLQName *nameqn;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ *rval = OBJECT_TO_JSVAL(obj);
+ if (xml->xml_class != JSXML_CLASS_ELEMENT)
+ return JS_TRUE;
+
+ value = argv[1];
+ vxml = VALUE_IS_XML(cx, value)
+ ? (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(value))
+ : NULL;
+ if (!vxml) {
+ if (!JS_ConvertValue(cx, value, JSTYPE_STRING, &argv[1]))
+ return JS_FALSE;
+ value = argv[1];
+ } else {
+ vxml = DeepCopy(cx, vxml, NULL, 0);
+ if (!vxml)
+ return JS_FALSE;
+ value = argv[1] = OBJECT_TO_JSVAL(vxml->object);
+ }
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+
+ name = argv[0];
+ if (js_IdIsIndex(name, &index))
+ return Replace(cx, xml, name, value);
+
+ /* Call function QName per spec, not ToXMLName, to avoid attribute names. */
+ nameobj = CallConstructorFunction(cx, obj, &js_QNameClass.base, 1, &name);
+ if (!nameobj)
+ return JS_FALSE;
+ argv[0] = OBJECT_TO_JSVAL(nameobj);
+ nameqn = (JSXMLQName *) JS_GetPrivate(cx, nameobj);
+
+ id = JSVAL_VOID;
+ index = xml->xml_kids.length;
+ while (index != 0) {
+ --index;
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, index, JSXML);
+ if (kid && MatchElemName(nameqn, kid)) {
+ if (!JSVAL_IS_VOID(id) && !DeleteByIndex(cx, xml, id, &junk))
+ return JS_FALSE;
+ if (!IndexToIdVal(cx, index, &id))
+ return JS_FALSE;
+ }
+ }
+ if (JSVAL_IS_VOID(id))
+ return JS_TRUE;
+ return Replace(cx, xml, id, value);
+}
+
+static JSBool
+xml_setChildren(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ if (!StartNonListXMLMethod(cx, &obj, argv))
+ return JS_FALSE;
+
+ if (!PutProperty(cx, obj, ATOM_KEY(cx->runtime->atomState.starAtom),
+ &argv[0])) {
+ return JS_FALSE;
+ }
+
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+static JSBool
+xml_setLocalName(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ jsval name;
+ JSXMLQName *nameqn;
+ JSString *namestr;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (!JSXML_HAS_NAME(xml))
+ return JS_TRUE;
+
+ name = argv[0];
+ if (!JSVAL_IS_PRIMITIVE(name) &&
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(name)) == &js_QNameClass.base) {
+ nameqn = (JSXMLQName *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(name));
+ namestr = nameqn->localName;
+ } else {
+ if (!JS_ConvertValue(cx, name, JSTYPE_STRING, &argv[0]))
+ return JS_FALSE;
+ name = argv[0];
+ namestr = JSVAL_TO_STRING(name);
+ }
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+ xml->name->localName = namestr;
+ return JS_TRUE;
+}
+
+static JSBool
+xml_setName(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml, *nsowner;
+ jsval name;
+ JSXMLQName *nameqn;
+ JSObject *nameobj;
+ JSXMLArray *nsarray;
+ uint32 i, n;
+ JSXMLNamespace *ns;
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (!JSXML_HAS_NAME(xml))
+ return JS_TRUE;
+
+ name = argv[0];
+ if (!JSVAL_IS_PRIMITIVE(name) &&
+ OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(name)) == &js_QNameClass.base &&
+ !(nameqn = (JSXMLQName *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(name)))
+ ->uri) {
+ name = argv[0] = STRING_TO_JSVAL(nameqn->localName);
+ }
+
+ nameobj = js_ConstructObject(cx, &js_QNameClass.base, NULL, NULL, 1, &name);
+ if (!nameobj)
+ return JS_FALSE;
+ nameqn = (JSXMLQName *) JS_GetPrivate(cx, nameobj);
+
+ /* ECMA-357 13.4.4.35 Step 4. */
+ if (xml->xml_class == JSXML_CLASS_PROCESSING_INSTRUCTION)
+ nameqn->uri = cx->runtime->emptyString;
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml)
+ return JS_FALSE;
+ xml->name = nameqn;
+
+ /*
+ * Erratum: nothing in 13.4.4.35 talks about making the name match the
+ * in-scope namespaces, either by finding an in-scope namespace with a
+ * matching uri and setting the new name's prefix to that namespace's
+ * prefix, or by extending the in-scope namespaces for xml (which are in
+ * xml->parent if xml is an attribute or a PI).
+ */
+ if (xml->xml_class == JSXML_CLASS_ELEMENT) {
+ nsowner = xml;
+ } else {
+ if (!xml->parent || xml->parent->xml_class != JSXML_CLASS_ELEMENT)
+ return JS_TRUE;
+ nsowner = xml->parent;
+ }
+
+ if (nameqn->prefix) {
+ /*
+ * The name being set has a prefix, which originally came from some
+ * namespace object (which may be the null namespace, where both the
+ * prefix and uri are the empty string). We must go through a full
+ * GetNamespace in case that namespace is in-scope in nsowner.
+ *
+ * If we find such an in-scope namespace, we return true right away,
+ * in this block. Otherwise, we fall through to the final return of
+ * AddInScopeNamespace(cx, nsowner, ns).
+ */
+ ns = GetNamespace(cx, nameqn, &nsowner->xml_namespaces);
+ if (!ns)
+ return JS_FALSE;
+
+ /* XXXbe have to test membership to see whether GetNamespace added */
+ if (XMLARRAY_HAS_MEMBER(&nsowner->xml_namespaces, ns, NULL))
+ return JS_TRUE;
+ } else {
+ /*
+ * At this point, we know nameqn->prefix is null, so nameqn->uri can't
+ * be the empty string (the null namespace always uses the empty string
+ * for both prefix and uri).
+ *
+ * This means we must inline GetNamespace and specialize it to match
+ * uri only, never prefix. If we find a namespace with nameqn's uri
+ * already in nsowner->xml_namespaces, then all that we need do is set
+ * nameqn->prefix to that namespace's prefix.
+ *
+ * If no such namespace exists, we can create one without going through
+ * the constructor, because we know nameqn->uri is non-empty (so prefix
+ * does not need to be converted from null to empty by QName).
+ */
+ JS_ASSERT(!IS_EMPTY(nameqn->uri));
+
+ nsarray = &nsowner->xml_namespaces;
+ for (i = 0, n = nsarray->length; i < n; i++) {
+ ns = XMLARRAY_MEMBER(nsarray, i, JSXMLNamespace);
+ if (ns && js_EqualStrings(ns->uri, nameqn->uri)) {
+ nameqn->prefix = ns->prefix;
+ return JS_TRUE;
+ }
+ }
+
+ ns = js_NewXMLNamespace(cx, NULL, nameqn->uri, JS_TRUE);
+ if (!ns)
+ return JS_FALSE;
+ }
+
+ return AddInScopeNamespace(cx, nsowner, ns);
+}
+
+static JSBool
+xml_setNamespace(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml, *nsowner;
+ JSObject *nsobj, *qnobj;
+ JSXMLNamespace *ns;
+ jsval qnargv[2];
+
+ NON_LIST_XML_METHOD_PROLOG;
+ if (!JSXML_HAS_NAME(xml))
+ return JS_TRUE;
+
+ xml = CHECK_COPY_ON_WRITE(cx, xml, obj);
+ if (!xml || !js_GetXMLQNameObject(cx, xml->name))
+ return JS_FALSE;
+
+ nsobj = js_ConstructObject(cx, &js_NamespaceClass.base, NULL, obj, 1, argv);
+ if (!nsobj)
+ return JS_FALSE;
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, nsobj);
+ ns->declared = JS_TRUE;
+
+ qnargv[0] = argv[0] = OBJECT_TO_JSVAL(nsobj);
+ qnargv[1] = OBJECT_TO_JSVAL(xml->name->object);
+ qnobj = js_ConstructObject(cx, &js_QNameClass.base, NULL, NULL, 2, qnargv);
+ if (!qnobj)
+ return JS_FALSE;
+
+ xml->name = (JSXMLQName *) JS_GetPrivate(cx, qnobj);
+
+ /*
+ * Erratum: the spec fails to update the governing in-scope namespaces.
+ * See the erratum noted in xml_setName, above.
+ */
+ if (xml->xml_class == JSXML_CLASS_ELEMENT) {
+ nsowner = xml;
+ } else {
+ if (!xml->parent || xml->parent->xml_class != JSXML_CLASS_ELEMENT)
+ return JS_TRUE;
+ nsowner = xml->parent;
+ }
+ return AddInScopeNamespace(cx, nsowner, ns);
+}
+
+/* XML and XMLList */
+static JSBool
+xml_text(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSXML *xml, *list, *kid, *vxml;
+ uint32 i, n;
+ JSBool ok;
+ JSObject *kidobj;
+ jsval v;
+
+ XML_METHOD_PROLOG;
+ list = xml_list_helper(cx, xml, rval);
+ if (!list)
+ return JS_FALSE;
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ ok = JS_TRUE;
+ for (i = 0, n = xml->xml_kids.length; i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_ELEMENT) {
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ break;
+ kidobj = js_GetXMLObject(cx, kid);
+ if (kidobj) {
+ ok = xml_text(cx, kidobj, argc, argv, &v);
+ } else {
+ ok = JS_FALSE;
+ v = JSVAL_NULL;
+ }
+ js_LeaveLocalRootScopeWithResult(cx, v);
+ if (!ok)
+ return JS_FALSE;
+ vxml = (JSXML *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(v));
+ if (JSXML_LENGTH(vxml) != 0 && !Append(cx, list, vxml))
+ return JS_FALSE;
+ }
+ }
+ } else {
+ for (i = 0, n = JSXML_LENGTH(xml); i < n; i++) {
+ kid = XMLARRAY_MEMBER(&xml->xml_kids, i, JSXML);
+ if (kid && kid->xml_class == JSXML_CLASS_TEXT) {
+ if (!Append(cx, list, kid))
+ return JS_FALSE;
+ }
+ }
+ }
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_toXMLString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSString *str;
+
+ str = ToXMLString(cx, OBJECT_TO_JSVAL(obj));
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSString *
+xml_toString_helper(JSContext *cx, JSXML *xml)
+{
+ JSString *str, *kidstr;
+ JSXML *kid;
+ JSXMLArrayCursor cursor;
+
+ if (xml->xml_class == JSXML_CLASS_ATTRIBUTE ||
+ xml->xml_class == JSXML_CLASS_TEXT) {
+ return xml->xml_value;
+ }
+
+ if (!HasSimpleContent(xml))
+ return ToXMLString(cx, OBJECT_TO_JSVAL(xml->object));
+
+ str = cx->runtime->emptyString;
+ js_EnterLocalRootScope(cx);
+ XMLArrayCursorInit(&cursor, &xml->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ if (kid->xml_class != JSXML_CLASS_COMMENT &&
+ kid->xml_class != JSXML_CLASS_PROCESSING_INSTRUCTION) {
+ kidstr = xml_toString_helper(cx, kid);
+ if (!kidstr) {
+ str = NULL;
+ break;
+ }
+ str = js_ConcatStrings(cx, str, kidstr);
+ if (!str)
+ break;
+ }
+ }
+ XMLArrayCursorFinish(&cursor);
+ js_LeaveLocalRootScopeWithResult(cx, STRING_TO_JSVAL(str));
+ return str;
+}
+
+static JSBool
+xml_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSXML *xml;
+ JSString *str;
+
+ XML_METHOD_PROLOG;
+ str = xml_toString_helper(cx, xml);
+ if (!str)
+ return JS_FALSE;
+ *rval = STRING_TO_JSVAL(str);
+ return JS_TRUE;
+}
+
+/* XML and XMLList */
+static JSBool
+xml_valueOf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+static JSFunctionSpec xml_methods[] = {
+ {"addNamespace", xml_addNamespace, 1,0,0},
+ {"appendChild", xml_appendChild, 1,0,0},
+ {js_attribute_str, xml_attribute, 1,0,0},
+ {"attributes", xml_attributes, 0,0,0},
+ {"child", xml_child, 1,0,0},
+ {"childIndex", xml_childIndex, 0,0,0},
+ {"children", xml_children, 0,0,0},
+ {"comments", xml_comments, 0,0,0},
+ {"contains", xml_contains, 1,0,0},
+ {"copy", xml_copy, 0,0,0},
+ {"descendants", xml_descendants, 1,0,0},
+ {"elements", xml_elements, 1,0,0},
+ {"hasOwnProperty", xml_hasOwnProperty, 1,0,0},
+ {"hasComplexContent", xml_hasComplexContent, 1,0,0},
+ {"hasSimpleContent", xml_hasSimpleContent, 1,0,0},
+ {"inScopeNamespaces", xml_inScopeNamespaces, 0,0,0},
+ {"insertChildAfter", xml_insertChildAfter, 2,0,0},
+ {"insertChildBefore", xml_insertChildBefore, 2,0,0},
+ {js_length_str, xml_length, 0,0,0},
+ {js_localName_str, xml_localName, 0,0,0},
+ {js_name_str, xml_name, 0,0,0},
+ {js_namespace_str, xml_namespace, 1,0,0},
+ {"namespaceDeclarations", xml_namespaceDeclarations, 0,0,0},
+ {"nodeKind", xml_nodeKind, 0,0,0},
+ {"normalize", xml_normalize, 0,0,0},
+ {js_xml_parent_str, xml_parent, 0,0,0},
+ {"processingInstructions",xml_processingInstructions,1,0,0},
+ {"prependChild", xml_prependChild, 1,0,0},
+ {"propertyIsEnumerable", xml_propertyIsEnumerable, 1,0,0},
+ {"removeNamespace", xml_removeNamespace, 1,0,0},
+ {"replace", xml_replace, 2,0,0},
+ {"setChildren", xml_setChildren, 1,0,0},
+ {"setLocalName", xml_setLocalName, 1,0,0},
+ {"setName", xml_setName, 1,0,0},
+ {"setNamespace", xml_setNamespace, 1,0,0},
+ {js_text_str, xml_text, 0,0,0},
+ {js_toString_str, xml_toString, 0,0,0},
+ {js_toXMLString_str, xml_toXMLString, 0,0,0},
+ {js_toSource_str, xml_toXMLString, 0,0,0},
+ {js_valueOf_str, xml_valueOf, 0,0,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+CopyXMLSettings(JSContext *cx, JSObject *from, JSObject *to)
+{
+ int i;
+ const char *name;
+ jsval v;
+
+ for (i = XML_IGNORE_COMMENTS; i < XML_PRETTY_INDENT; i++) {
+ name = xml_static_props[i].name;
+ if (!JS_GetProperty(cx, from, name, &v))
+ return JS_FALSE;
+ if (JSVAL_IS_BOOLEAN(v) && !JS_SetProperty(cx, to, name, &v))
+ return JS_FALSE;
+ }
+
+ name = xml_static_props[i].name;
+ if (!JS_GetProperty(cx, from, name, &v))
+ return JS_FALSE;
+ if (JSVAL_IS_NUMBER(v) && !JS_SetProperty(cx, to, name, &v))
+ return JS_FALSE;
+ return JS_TRUE;
+}
+
+static JSBool
+SetDefaultXMLSettings(JSContext *cx, JSObject *obj)
+{
+ int i;
+ jsval v;
+
+ for (i = XML_IGNORE_COMMENTS; i < XML_PRETTY_INDENT; i++) {
+ v = JSVAL_TRUE;
+ if (!JS_SetProperty(cx, obj, xml_static_props[i].name, &v))
+ return JS_FALSE;
+ }
+ v = INT_TO_JSVAL(2);
+ return JS_SetProperty(cx, obj, xml_static_props[i].name, &v);
+}
+
+static JSBool
+xml_settings(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ JSObject *settings;
+
+ settings = JS_NewObject(cx, NULL, NULL, NULL);
+ if (!settings)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(settings);
+ return CopyXMLSettings(cx, obj, settings);
+}
+
+static JSBool
+xml_setSettings(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ jsval v;
+ JSBool ok;
+ JSObject *settings;
+
+ v = argv[0];
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v)) {
+ cx->xmlSettingFlags = 0;
+ ok = SetDefaultXMLSettings(cx, obj);
+ } else {
+ if (JSVAL_IS_PRIMITIVE(v))
+ return JS_TRUE;
+ settings = JSVAL_TO_OBJECT(v);
+ cx->xmlSettingFlags = 0;
+ ok = CopyXMLSettings(cx, settings, obj);
+ }
+ if (ok)
+ cx->xmlSettingFlags |= XSF_CACHE_VALID;
+ return ok;
+}
+
+static JSBool
+xml_defaultSettings(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ JSObject *settings;
+
+ settings = JS_NewObject(cx, NULL, NULL, NULL);
+ if (!settings)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(settings);
+ return SetDefaultXMLSettings(cx, settings);
+}
+
+static JSFunctionSpec xml_static_methods[] = {
+ {"settings", xml_settings, 0,0,0},
+ {"setSettings", xml_setSettings, 1,0,0},
+ {"defaultSettings", xml_defaultSettings, 0,0,0},
+ {0,0,0,0,0}
+};
+
+static JSBool
+XML(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval v;
+ JSXML *xml, *copy;
+ JSObject *xobj, *vobj;
+ JSClass *clasp;
+
+ v = argv[0];
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v))
+ v = STRING_TO_JSVAL(cx->runtime->emptyString);
+
+ xobj = ToXML(cx, v);
+ if (!xobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(xobj);
+ xml = (JSXML *) JS_GetPrivate(cx, xobj);
+
+ if ((cx->fp->flags & JSFRAME_CONSTRUCTING) && !JSVAL_IS_PRIMITIVE(v)) {
+ vobj = JSVAL_TO_OBJECT(v);
+ clasp = OBJ_GET_CLASS(cx, vobj);
+ if (clasp == &js_XMLClass ||
+ (clasp->flags & JSCLASS_DOCUMENT_OBSERVER)) {
+ /* No need to lock obj, it's newly constructed and thread local. */
+ copy = DeepCopy(cx, xml, obj, 0);
+ if (!copy)
+ return JS_FALSE;
+ JS_ASSERT(copy->object == obj);
+ *rval = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+ }
+ }
+ return JS_TRUE;
+}
+
+static JSBool
+XMLList(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
+{
+ jsval v;
+ JSObject *vobj, *listobj;
+ JSXML *xml, *list;
+
+ v = argv[0];
+ if (JSVAL_IS_NULL(v) || JSVAL_IS_VOID(v))
+ v = STRING_TO_JSVAL(cx->runtime->emptyString);
+
+ if ((cx->fp->flags & JSFRAME_CONSTRUCTING) && !JSVAL_IS_PRIMITIVE(v)) {
+ vobj = JSVAL_TO_OBJECT(v);
+ if (OBJECT_IS_XML(cx, vobj)) {
+ xml = (JSXML *) JS_GetPrivate(cx, vobj);
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!listobj)
+ return JS_FALSE;
+ *rval = OBJECT_TO_JSVAL(listobj);
+
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ if (!Append(cx, list, xml))
+ return JS_FALSE;
+ return JS_TRUE;
+ }
+ }
+ }
+
+ /* Toggle on XML support since the script has explicitly requested it. */
+ listobj = ToXMLList(cx, v);
+ if (!listobj)
+ return JS_FALSE;
+
+ *rval = OBJECT_TO_JSVAL(listobj);
+ return JS_TRUE;
+}
+
+#define JSXML_LIST_SIZE (offsetof(JSXML, u) + sizeof(struct JSXMLListVar))
+#define JSXML_ELEMENT_SIZE (offsetof(JSXML, u) + sizeof(struct JSXMLVar))
+#define JSXML_LEAF_SIZE (offsetof(JSXML, u) + sizeof(JSString *))
+
+static size_t sizeof_JSXML[JSXML_CLASS_LIMIT] = {
+ JSXML_LIST_SIZE, /* JSXML_CLASS_LIST */
+ JSXML_ELEMENT_SIZE, /* JSXML_CLASS_ELEMENT */
+ JSXML_LEAF_SIZE, /* JSXML_CLASS_ATTRIBUTE */
+ JSXML_LEAF_SIZE, /* JSXML_CLASS_PROCESSING_INSTRUCTION */
+ JSXML_LEAF_SIZE, /* JSXML_CLASS_TEXT */
+ JSXML_LEAF_SIZE /* JSXML_CLASS_COMMENT */
+};
+
+#ifdef DEBUG_notme
+JSCList xml_leaks = JS_INIT_STATIC_CLIST(&xml_leaks);
+uint32 xml_serial;
+#endif
+
+JSXML *
+js_NewXML(JSContext *cx, JSXMLClass xml_class)
+{
+ JSXML *xml;
+
+ xml = (JSXML *) js_NewGCThing(cx, GCX_XML, sizeof_JSXML[xml_class]);
+ if (!xml)
+ return NULL;
+
+ xml->object = NULL;
+ xml->domnode = NULL;
+ xml->parent = NULL;
+ xml->name = NULL;
+ xml->xml_class = xml_class;
+ xml->xml_flags = 0;
+ if (JSXML_CLASS_HAS_VALUE(xml_class)) {
+ xml->xml_value = cx->runtime->emptyString;
+ } else {
+ XMLArrayInit(cx, &xml->xml_kids, 0);
+ if (xml_class == JSXML_CLASS_LIST) {
+ xml->xml_target = NULL;
+ xml->xml_targetprop = NULL;
+ } else {
+ XMLArrayInit(cx, &xml->xml_namespaces, 0);
+ XMLArrayInit(cx, &xml->xml_attrs, 0);
+ }
+ }
+
+#ifdef DEBUG_notme
+ JS_APPEND_LINK(&xml->links, &xml_leaks);
+ xml->serial = xml_serial++;
+#endif
+ METER(xml_stats.xml);
+ METER(xml_stats.livexml);
+ return xml;
+}
+
+void
+js_MarkXML(JSContext *cx, JSXML *xml)
+{
+ GC_MARK(cx, xml->object, "object");
+ GC_MARK(cx, xml->name, "name");
+ GC_MARK(cx, xml->parent, "xml_parent");
+
+ if (JSXML_HAS_VALUE(xml)) {
+ GC_MARK(cx, xml->xml_value, "value");
+ return;
+ }
+
+ xml_mark_vector(cx,
+ (JSXML **) xml->xml_kids.vector,
+ xml->xml_kids.length);
+ XMLArrayCursorMark(cx, xml->xml_kids.cursors);
+ XMLArrayTrim(&xml->xml_kids);
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ if (xml->xml_target)
+ GC_MARK(cx, xml->xml_target, "target");
+ if (xml->xml_targetprop)
+ GC_MARK(cx, xml->xml_targetprop, "targetprop");
+ } else {
+ namespace_mark_vector(cx,
+ (JSXMLNamespace **) xml->xml_namespaces.vector,
+ xml->xml_namespaces.length);
+ XMLArrayCursorMark(cx, xml->xml_namespaces.cursors);
+ XMLArrayTrim(&xml->xml_namespaces);
+
+ xml_mark_vector(cx,
+ (JSXML **) xml->xml_attrs.vector,
+ xml->xml_attrs.length);
+ XMLArrayCursorMark(cx, xml->xml_attrs.cursors);
+ XMLArrayTrim(&xml->xml_attrs);
+ }
+}
+
+void
+js_FinalizeXML(JSContext *cx, JSXML *xml)
+{
+ if (JSXML_HAS_KIDS(xml)) {
+ XMLArrayFinish(cx, &xml->xml_kids);
+ if (xml->xml_class == JSXML_CLASS_ELEMENT) {
+ XMLArrayFinish(cx, &xml->xml_namespaces);
+ XMLArrayFinish(cx, &xml->xml_attrs);
+ }
+ }
+
+#ifdef DEBUG_notme
+ JS_REMOVE_LINK(&xml->links);
+#endif
+
+ UNMETER(xml_stats.livexml);
+}
+
+JSObject *
+js_ParseNodeToXMLObject(JSContext *cx, JSParseNode *pn)
+{
+ jsval nsval;
+ JSXMLNamespace *ns;
+ JSXMLArray nsarray;
+ JSXML *xml;
+
+ if (!js_GetDefaultXMLNamespace(cx, &nsval))
+ return NULL;
+ JS_ASSERT(!JSVAL_IS_PRIMITIVE(nsval));
+ ns = (JSXMLNamespace *) JS_GetPrivate(cx, JSVAL_TO_OBJECT(nsval));
+
+ if (!XMLArrayInit(cx, &nsarray, 1))
+ return NULL;
+
+ XMLARRAY_APPEND(cx, &nsarray, ns);
+ xml = ParseNodeToXML(cx, pn, &nsarray, XSF_PRECOMPILED_ROOT);
+ XMLArrayFinish(cx, &nsarray);
+ if (!xml)
+ return NULL;
+
+ return xml->object;
+}
+
+JSObject *
+js_NewXMLObject(JSContext *cx, JSXMLClass xml_class)
+{
+ JSXML *xml;
+ JSObject *obj;
+ JSTempValueRooter tvr;
+
+ xml = js_NewXML(cx, xml_class);
+ if (!xml)
+ return NULL;
+ JS_PUSH_TEMP_ROOT_GCTHING(cx, xml, &tvr);
+ obj = js_GetXMLObject(cx, xml);
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return obj;
+}
+
+static JSObject *
+NewXMLObject(JSContext *cx, JSXML *xml)
+{
+ JSObject *obj;
+
+ obj = js_NewObject(cx, &js_XMLClass, NULL, NULL);
+ if (!obj || !JS_SetPrivate(cx, obj, xml)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ return NULL;
+ }
+ METER(xml_stats.xmlobj);
+ METER(xml_stats.livexmlobj);
+ return obj;
+}
+
+JSObject *
+js_GetXMLObject(JSContext *cx, JSXML *xml)
+{
+ JSObject *obj;
+
+ obj = xml->object;
+ if (obj) {
+ JS_ASSERT(JS_GetPrivate(cx, obj) == xml);
+ return obj;
+ }
+
+ /*
+ * A JSXML cannot be shared among threads unless it has an object.
+ * A JSXML cannot be given an object unless:
+ * (a) it has no parent; or
+ * (b) its parent has no object (therefore is thread-private); or
+ * (c) its parent's object is locked.
+ *
+ * Once given an object, a JSXML is immutable.
+ */
+ JS_ASSERT(!xml->parent ||
+ !xml->parent->object ||
+ JS_IS_OBJ_LOCKED(cx, xml->parent->object));
+
+ obj = NewXMLObject(cx, xml);
+ if (!obj)
+ return NULL;
+ xml->object = obj;
+ return obj;
+}
+
+JSObject *
+js_InitNamespaceClass(JSContext *cx, JSObject *obj)
+{
+ return JS_InitClass(cx, obj, NULL, &js_NamespaceClass.base, Namespace, 2,
+ namespace_props, namespace_methods, NULL, NULL);
+}
+
+JSObject *
+js_InitQNameClass(JSContext *cx, JSObject *obj)
+{
+ return JS_InitClass(cx, obj, NULL, &js_QNameClass.base, QName, 2,
+ qname_props, qname_methods, NULL, NULL);
+}
+
+JSObject *
+js_InitAttributeNameClass(JSContext *cx, JSObject *obj)
+{
+ return JS_InitClass(cx, obj, NULL, &js_AttributeNameClass, AttributeName, 2,
+ qname_props, qname_methods, NULL, NULL);
+}
+
+JSObject *
+js_InitAnyNameClass(JSContext *cx, JSObject *obj)
+{
+ jsval v;
+
+ if (!js_GetAnyName(cx, &v))
+ return NULL;
+ return JSVAL_TO_OBJECT(v);
+}
+
+JSObject *
+js_InitXMLClass(JSContext *cx, JSObject *obj)
+{
+ JSObject *proto, *pobj, *ctor;
+ JSFunction *fun;
+ JSXML *xml;
+ JSProperty *prop;
+ JSScopeProperty *sprop;
+ jsval cval, argv[1], junk;
+
+ /* Define the isXMLName function. */
+ if (!JS_DefineFunction(cx, obj, js_isXMLName_str, xml_isXMLName, 1, 0))
+ return NULL;
+
+ /* Define the XML class constructor and prototype. */
+ proto = JS_InitClass(cx, obj, NULL, &js_XMLClass, XML, 1,
+ NULL, xml_methods,
+ xml_static_props, xml_static_methods);
+ if (!proto)
+ return NULL;
+
+ xml = js_NewXML(cx, JSXML_CLASS_TEXT);
+ if (!xml || !JS_SetPrivate(cx, proto, xml))
+ return NULL;
+ xml->object = proto;
+ METER(xml_stats.xmlobj);
+ METER(xml_stats.livexmlobj);
+
+ /*
+ * Prepare to set default settings on the XML constructor we just made.
+ * NB: We can't use JS_GetConstructor, because it calls OBJ_GET_PROPERTY,
+ * which is xml_getProperty, which creates a new XMLList every time! We
+ * must instead call js_LookupProperty directly.
+ */
+ if (!js_LookupProperty(cx, proto,
+ ATOM_TO_JSID(cx->runtime->atomState.constructorAtom),
+ &pobj, &prop)) {
+ return NULL;
+ }
+ JS_ASSERT(prop);
+ sprop = (JSScopeProperty *) prop;
+ JS_ASSERT(SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj)));
+ cval = OBJ_GET_SLOT(cx, pobj, sprop->slot);
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+ JS_ASSERT(VALUE_IS_FUNCTION(cx, cval));
+
+ /* Set default settings. */
+ ctor = JSVAL_TO_OBJECT(cval);
+ argv[0] = JSVAL_VOID;
+ if (!xml_setSettings(cx, ctor, 1, argv, &junk))
+ return NULL;
+
+ /* Define the XMLList function and give it the same prototype as XML. */
+ fun = JS_DefineFunction(cx, obj, js_XMLList_str, XMLList, 1, 0);
+ if (!fun)
+ return NULL;
+ if (!js_SetClassPrototype(cx, fun->object, proto,
+ JSPROP_READONLY | JSPROP_PERMANENT)) {
+ return NULL;
+ }
+ return proto;
+}
+
+JSObject *
+js_InitXMLClasses(JSContext *cx, JSObject *obj)
+{
+ if (!js_InitNamespaceClass(cx, obj))
+ return NULL;
+ if (!js_InitQNameClass(cx, obj))
+ return NULL;
+ if (!js_InitAttributeNameClass(cx, obj))
+ return NULL;
+ if (!js_InitAnyNameClass(cx, obj))
+ return NULL;
+ return js_InitXMLClass(cx, obj);
+}
+
+JSBool
+js_GetFunctionNamespace(JSContext *cx, jsval *vp)
+{
+ JSRuntime *rt;
+ JSObject *obj;
+ JSAtom *atom;
+ JSString *prefix, *uri;
+
+ /* An invalid URI, for internal use only, guaranteed not to collide. */
+ static const char anti_uri[] = "@mozilla.org/js/function";
+
+ /* Optimize by avoiding JS_LOCK_GC(rt) for the common case. */
+ rt = cx->runtime;
+ obj = rt->functionNamespaceObject;
+ if (!obj) {
+ JS_LOCK_GC(rt);
+ obj = rt->functionNamespaceObject;
+ if (!obj) {
+ JS_UNLOCK_GC(rt);
+ atom = js_Atomize(cx, js_function_str, 8, 0);
+ JS_ASSERT(atom);
+ prefix = ATOM_TO_STRING(atom);
+
+ /*
+ * Note that any race to atomize anti_uri here is resolved by
+ * the atom table code, such that at most one atom for anti_uri
+ * is created. We store in rt->atomState.lazy unconditionally,
+ * since we are guaranteed to overwrite either null or the same
+ * atom pointer.
+ */
+ atom = js_Atomize(cx, anti_uri, sizeof anti_uri - 1, ATOM_PINNED);
+ if (!atom)
+ return JS_FALSE;
+ rt->atomState.lazy.functionNamespaceURIAtom = atom;
+
+ uri = ATOM_TO_STRING(atom);
+ obj = js_NewXMLNamespaceObject(cx, prefix, uri, JS_FALSE);
+ if (!obj)
+ return JS_FALSE;
+
+ /*
+ * Avoid entraining any in-scope Object.prototype. The loss of
+ * Namespace.prototype is not detectable, as there is no way to
+ * refer to this instance in scripts. When used to qualify method
+ * names, its prefix and uri references are copied to the QName.
+ */
+ OBJ_SET_PROTO(cx, obj, NULL);
+ OBJ_SET_PARENT(cx, obj, NULL);
+
+ JS_LOCK_GC(rt);
+ if (!rt->functionNamespaceObject)
+ rt->functionNamespaceObject = obj;
+ else
+ obj = rt->functionNamespaceObject;
+ }
+ JS_UNLOCK_GC(rt);
+ }
+ *vp = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+/*
+ * Note the asymmetry between js_GetDefaultXMLNamespace and js_SetDefaultXML-
+ * Namespace. Get searches fp->scopeChain for JS_DEFAULT_XML_NAMESPACE_ID,
+ * while Set sets JS_DEFAULT_XML_NAMESPACE_ID in fp->varobj (unless fp is a
+ * lightweight function activation). There's no requirement that fp->varobj
+ * lie directly on fp->scopeChain, although it should be reachable using the
+ * prototype chain from a scope object (cf. JSOPTION_VAROBJFIX in jsapi.h).
+ *
+ * If Get can't find JS_DEFAULT_XML_NAMESPACE_ID along the scope chain, it
+ * creates a default namespace via 'new Namespace()'. In contrast, Set uses
+ * its v argument as the uri of a new Namespace, with "" as the prefix. See
+ * ECMA-357 12.1 and 12.1.1. Note that if Set is called with a Namespace n,
+ * the default XML namespace will be set to ("", n.uri). So the uri string
+ * is really the only usefully stored value of the default namespace.
+ */
+JSBool
+js_GetDefaultXMLNamespace(JSContext *cx, jsval *vp)
+{
+ JSStackFrame *fp;
+ JSObject *nsobj, *obj, *tmp;
+ jsval v;
+
+ fp = cx->fp;
+ nsobj = fp->xmlNamespace;
+ if (nsobj) {
+ *vp = OBJECT_TO_JSVAL(nsobj);
+ return JS_TRUE;
+ }
+
+ obj = NULL;
+ for (tmp = fp->scopeChain; tmp; tmp = OBJ_GET_PARENT(cx, obj)) {
+ obj = tmp;
+ if (!OBJ_GET_PROPERTY(cx, obj, JS_DEFAULT_XML_NAMESPACE_ID, &v))
+ return JS_FALSE;
+ if (!JSVAL_IS_PRIMITIVE(v)) {
+ fp->xmlNamespace = JSVAL_TO_OBJECT(v);
+ *vp = v;
+ return JS_TRUE;
+ }
+ }
+
+ nsobj = js_ConstructObject(cx, &js_NamespaceClass.base, NULL, obj, 0, NULL);
+ if (!nsobj)
+ return JS_FALSE;
+ v = OBJECT_TO_JSVAL(nsobj);
+ if (obj &&
+ !OBJ_DEFINE_PROPERTY(cx, obj, JS_DEFAULT_XML_NAMESPACE_ID, v,
+ JS_PropertyStub, JS_PropertyStub,
+ JSPROP_PERMANENT, NULL)) {
+ return JS_FALSE;
+ }
+ fp->xmlNamespace = nsobj;
+ *vp = v;
+ return JS_TRUE;
+}
+
+JSBool
+js_SetDefaultXMLNamespace(JSContext *cx, jsval v)
+{
+ jsval argv[2];
+ JSObject *nsobj, *varobj;
+ JSStackFrame *fp;
+
+ argv[0] = STRING_TO_JSVAL(cx->runtime->emptyString);
+ argv[1] = v;
+ nsobj = js_ConstructObject(cx, &js_NamespaceClass.base, NULL, NULL,
+ 2, argv);
+ if (!nsobj)
+ return JS_FALSE;
+ v = OBJECT_TO_JSVAL(nsobj);
+
+ fp = cx->fp;
+ varobj = fp->varobj;
+ if (varobj) {
+ if (!OBJ_DEFINE_PROPERTY(cx, varobj, JS_DEFAULT_XML_NAMESPACE_ID, v,
+ JS_PropertyStub, JS_PropertyStub,
+ JSPROP_PERMANENT, NULL)) {
+ return JS_FALSE;
+ }
+ } else {
+ JS_ASSERT(fp->fun && !JSFUN_HEAVYWEIGHT_TEST(fp->fun->flags));
+ }
+ fp->xmlNamespace = JSVAL_TO_OBJECT(v);
+ return JS_TRUE;
+}
+
+JSBool
+js_ToAttributeName(JSContext *cx, jsval *vp)
+{
+ JSXMLQName *qn;
+
+ qn = ToAttributeName(cx, *vp);
+ if (!qn)
+ return JS_FALSE;
+ *vp = OBJECT_TO_JSVAL(qn->object);
+ return JS_TRUE;
+}
+
+JSString *
+js_EscapeAttributeValue(JSContext *cx, JSString *str)
+{
+ return EscapeAttributeValue(cx, NULL, str);
+}
+
+JSString *
+js_AddAttributePart(JSContext *cx, JSBool isName, JSString *str, JSString *str2)
+{
+ size_t len, len2, newlen;
+ jschar *chars;
+
+ if (JSSTRING_IS_DEPENDENT(str) ||
+ !(*js_GetGCThingFlags(str) & GCF_MUTABLE)) {
+ str = js_NewStringCopyN(cx, JSSTRING_CHARS(str), JSSTRING_LENGTH(str),
+ 0);
+ if (!str)
+ return NULL;
+ }
+
+ len = str->length;
+ len2 = JSSTRING_LENGTH(str2);
+ newlen = (isName) ? len + 1 + len2 : len + 2 + len2 + 1;
+ chars = (jschar *) JS_realloc(cx, str->chars, (newlen+1) * sizeof(jschar));
+ if (!chars)
+ return NULL;
+
+ /*
+ * Reallocating str (because we know it has no other references) requires
+ * purging any deflated string cached for it.
+ */
+ js_PurgeDeflatedStringCache(cx->runtime, str);
+
+ str->chars = chars;
+ str->length = newlen;
+ chars += len;
+ if (isName) {
+ *chars++ = ' ';
+ js_strncpy(chars, JSSTRING_CHARS(str2), len2);
+ chars += len2;
+ } else {
+ *chars++ = '=';
+ *chars++ = '"';
+ js_strncpy(chars, JSSTRING_CHARS(str2), len2);
+ chars += len2;
+ *chars++ = '"';
+ }
+ *chars = 0;
+ return str;
+}
+
+JSString *
+js_EscapeElementValue(JSContext *cx, JSString *str)
+{
+ return EscapeElementValue(cx, NULL, str);
+}
+
+JSString *
+js_ValueToXMLString(JSContext *cx, jsval v)
+{
+ return ToXMLString(cx, v);
+}
+
+static JSBool
+anyname_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
+ jsval *rval)
+{
+ *rval = ATOM_KEY(cx->runtime->atomState.starAtom);
+ return JS_TRUE;
+}
+
+JSBool
+js_GetAnyName(JSContext *cx, jsval *vp)
+{
+ JSRuntime *rt;
+ JSObject *obj;
+ JSXMLQName *qn;
+ JSBool ok;
+
+ /* Optimize by avoiding JS_LOCK_GC(rt) for the common case. */
+ rt = cx->runtime;
+ obj = rt->anynameObject;
+ if (!obj) {
+ JS_LOCK_GC(rt);
+ obj = rt->anynameObject;
+ if (!obj) {
+ JS_UNLOCK_GC(rt);
+
+ /*
+ * Protect multiple newborns created below, in the do-while(0)
+ * loop used to ensure that we leave this local root scope.
+ */
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ return JS_FALSE;
+
+ do {
+ qn = js_NewXMLQName(cx, rt->emptyString, rt->emptyString,
+ ATOM_TO_STRING(rt->atomState.starAtom));
+ if (!qn) {
+ ok = JS_FALSE;
+ break;
+ }
+
+ obj = js_NewObject(cx, &js_AnyNameClass, NULL, NULL);
+ if (!obj || !JS_SetPrivate(cx, obj, qn)) {
+ cx->weakRoots.newborn[GCX_OBJECT] = NULL;
+ ok = JS_FALSE;
+ break;
+ }
+ qn->object = obj;
+ METER(xml_stats.qnameobj);
+ METER(xml_stats.liveqnameobj);
+
+ /*
+ * Avoid entraining any Object.prototype found via cx's scope
+ * chain or global object. This loses the default toString,
+ * but no big deal: we want to customize toString anyway for
+ * clearer diagnostics.
+ */
+ if (!JS_DefineFunction(cx, obj, js_toString_str,
+ anyname_toString, 0, 0)) {
+ ok = JS_FALSE;
+ break;
+ }
+ OBJ_SET_PROTO(cx, obj, NULL);
+ JS_ASSERT(!OBJ_GET_PARENT(cx, obj));
+ } while (0);
+
+ js_LeaveLocalRootScopeWithResult(cx, OBJECT_TO_JSVAL(obj));
+ if (!ok)
+ return JS_FALSE;
+
+ JS_LOCK_GC(rt);
+ if (!rt->anynameObject)
+ rt->anynameObject = obj;
+ else
+ obj = rt->anynameObject;
+ }
+ JS_UNLOCK_GC(rt);
+ }
+ *vp = OBJECT_TO_JSVAL(obj);
+ return JS_TRUE;
+}
+
+JSBool
+js_FindXMLProperty(JSContext *cx, jsval name, JSObject **objp, jsval *namep)
+{
+ JSXMLQName *qn;
+ jsid funid, id;
+ JSObject *obj, *pobj, *lastobj;
+ JSProperty *prop;
+ const char *printable;
+
+ qn = ToXMLName(cx, name, &funid);
+ if (!qn)
+ return JS_FALSE;
+ id = OBJECT_TO_JSID(qn->object);
+
+ obj = cx->fp->scopeChain;
+ do {
+ if (!OBJ_LOOKUP_PROPERTY(cx, obj, id, &pobj, &prop))
+ return JS_FALSE;
+ if (prop) {
+ OBJ_DROP_PROPERTY(cx, pobj, prop);
+
+ /*
+ * Call OBJ_THIS_OBJECT to skip any With object that wraps an XML
+ * object to carry scope chain linkage in js_FilterXMLList.
+ */
+ pobj = OBJ_THIS_OBJECT(cx, obj);
+ if (OBJECT_IS_XML(cx, pobj)) {
+ *objp = pobj;
+ *namep = ID_TO_VALUE(id);
+ return JS_TRUE;
+ }
+ }
+
+ lastobj = obj;
+ } while ((obj = OBJ_GET_PARENT(cx, obj)) != NULL);
+
+ printable = js_ValueToPrintableString(cx, name);
+ if (printable) {
+ JS_ReportErrorFlagsAndNumber(cx, JSREPORT_ERROR,
+ js_GetErrorMessage, NULL,
+ JSMSG_UNDEFINED_XML_NAME, printable);
+ }
+ return JS_FALSE;
+}
+
+JSBool
+js_GetXMLProperty(JSContext *cx, JSObject *obj, jsval name, jsval *vp)
+{
+ return GetProperty(cx, obj, name, vp);
+}
+
+JSBool
+js_GetXMLFunction(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
+{
+ JSObject *target;
+ JSXML *xml;
+ JSTempValueRooter tvr;
+ JSBool ok;
+
+ JS_ASSERT(OBJECT_IS_XML(cx, obj));
+
+ /* After this point, control must flow through label out: to exit. */
+ JS_PUSH_TEMP_ROOT_OBJECT(cx, NULL, &tvr);
+
+ /*
+ * See comments before xml_lookupProperty about the need for the proto
+ * chain lookup.
+ */
+ target = obj;
+ for (;;) {
+ ok = js_GetProperty(cx, target, id, vp);
+ if (!ok)
+ goto out;
+ if (VALUE_IS_FUNCTION(cx, *vp)) {
+ ok = JS_TRUE;
+ goto out;
+ }
+ target = OBJ_GET_PROTO(cx, target);
+ if (target == NULL)
+ break;
+ tvr.u.object = target;
+ }
+
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (HasSimpleContent(xml)) {
+ /* Search in String.prototype to implement 11.2.2.1 Step 3(f). */
+ ok = js_GetClassPrototype(cx, NULL, INT_TO_JSID(JSProto_String),
+ &tvr.u.object);
+ if (!ok)
+ goto out;
+ JS_ASSERT(tvr.u.object);
+ ok = OBJ_GET_PROPERTY(cx, tvr.u.object, id, vp);
+ }
+
+ out:
+ JS_POP_TEMP_ROOT(cx, &tvr);
+ return ok;
+}
+
+JSBool
+js_SetXMLProperty(JSContext *cx, JSObject *obj, jsval name, jsval *vp)
+{
+ return PutProperty(cx, obj, name, vp);
+}
+
+static JSXML *
+GetPrivate(JSContext *cx, JSObject *obj, const char *method)
+{
+ JSXML *xml;
+
+ xml = (JSXML *) JS_GetInstancePrivate(cx, obj, &js_XMLClass, NULL);
+ if (!xml) {
+ JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
+ JSMSG_INCOMPATIBLE_METHOD,
+ js_XML_str, method, OBJ_GET_CLASS(cx, obj)->name);
+ }
+ return xml;
+}
+
+JSBool
+js_GetXMLDescendants(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
+{
+ JSXML *xml, *list;
+
+ xml = GetPrivate(cx, obj, "descendants internal method");
+ if (!xml)
+ return JS_FALSE;
+
+ list = Descendants(cx, xml, id);
+ if (!list)
+ return JS_FALSE;
+ *vp = OBJECT_TO_JSVAL(list->object);
+ return JS_TRUE;
+}
+
+JSBool
+js_DeleteXMLListElements(JSContext *cx, JSObject *listobj)
+{
+ JSXML *list;
+ uint32 n;
+ jsval junk;
+
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ for (n = list->xml_kids.length; n != 0; --n) {
+ if (!DeleteProperty(cx, listobj, INT_TO_JSID(0), &junk))
+ return JS_FALSE;
+ }
+ return JS_TRUE;
+}
+
+JSBool
+js_FilterXMLList(JSContext *cx, JSObject *obj, jsbytecode *pc, jsval *vp)
+{
+ JSBool ok, match;
+ JSStackFrame *fp;
+ uint32 flags;
+ JSObject *scobj, *listobj, *resobj, *withobj, *kidobj;
+ JSXML *xml, *list, *result, *kid;
+ JSXMLArrayCursor cursor;
+
+ ok = js_EnterLocalRootScope(cx);
+ if (!ok)
+ return JS_FALSE;
+
+ /* All control flow after this point must exit via label out or bad. */
+ *vp = JSVAL_NULL;
+ fp = cx->fp;
+ flags = fp->flags;
+ fp->flags = flags | JSFRAME_FILTERING;
+ scobj = js_GetScopeChain(cx, fp);
+ withobj = NULL;
+ if (!scobj)
+ goto bad;
+ xml = GetPrivate(cx, obj, "filtering predicate operator");
+ if (!xml)
+ goto bad;
+
+ if (xml->xml_class == JSXML_CLASS_LIST) {
+ list = xml;
+ } else {
+ listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!listobj)
+ goto bad;
+ list = (JSXML *) JS_GetPrivate(cx, listobj);
+ ok = Append(cx, list, xml);
+ if (!ok)
+ goto out;
+ }
+
+ resobj = js_NewXMLObject(cx, JSXML_CLASS_LIST);
+ if (!resobj)
+ goto bad;
+ result = (JSXML *) JS_GetPrivate(cx, resobj);
+
+ /* Hoist the scope chain update out of the loop over kids. */
+ withobj = js_NewWithObject(cx, NULL, scobj, -1);
+ if (!withobj)
+ goto bad;
+ fp->scopeChain = withobj;
+
+ XMLArrayCursorInit(&cursor, &list->xml_kids);
+ while ((kid = (JSXML *) XMLArrayCursorNext(&cursor)) != NULL) {
+ kidobj = js_GetXMLObject(cx, kid);
+ if (!kidobj)
+ break;
+ OBJ_SET_PROTO(cx, withobj, kidobj);
+ ok = js_Interpret(cx, pc, vp) && js_ValueToBoolean(cx, *vp, &match);
+ if (ok && match)
+ ok = Append(cx, result, kid);
+ if (!ok)
+ break;
+ }
+ XMLArrayCursorFinish(&cursor);
+ if (!ok)
+ goto out;
+ if (kid)
+ goto bad;
+
+ *vp = OBJECT_TO_JSVAL(resobj);
+
+out:
+ fp->flags = flags | (fp->flags & JSFRAME_POP_BLOCKS);
+ if (withobj) {
+ fp->scopeChain = scobj;
+ JS_SetPrivate(cx, withobj, NULL);
+ }
+ js_LeaveLocalRootScopeWithResult(cx, *vp);
+ return ok;
+bad:
+ ok = JS_FALSE;
+ goto out;
+}
+
+JSObject *
+js_ValueToXMLObject(JSContext *cx, jsval v)
+{
+ return ToXML(cx, v);
+}
+
+JSObject *
+js_ValueToXMLListObject(JSContext *cx, jsval v)
+{
+ return ToXMLList(cx, v);
+}
+
+JSObject *
+js_CloneXMLObject(JSContext *cx, JSObject *obj)
+{
+ uintN flags;
+ JSXML *xml;
+
+ if (!GetXMLSettingFlags(cx, &flags))
+ return NULL;
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (flags & (XSF_IGNORE_COMMENTS |
+ XSF_IGNORE_PROCESSING_INSTRUCTIONS |
+ XSF_IGNORE_WHITESPACE)) {
+ xml = DeepCopy(cx, xml, NULL, flags);
+ if (!xml)
+ return NULL;
+ return xml->object;
+ }
+ return NewXMLObject(cx, xml);
+}
+
+JSObject *
+js_NewXMLSpecialObject(JSContext *cx, JSXMLClass xml_class, JSString *name,
+ JSString *value)
+{
+ uintN flags;
+ JSObject *obj;
+ JSXML *xml;
+ JSXMLQName *qn;
+
+ if (!GetXMLSettingFlags(cx, &flags))
+ return NULL;
+
+ if ((xml_class == JSXML_CLASS_COMMENT &&
+ (flags & XSF_IGNORE_COMMENTS)) ||
+ (xml_class == JSXML_CLASS_PROCESSING_INSTRUCTION &&
+ (flags & XSF_IGNORE_PROCESSING_INSTRUCTIONS))) {
+ return js_NewXMLObject(cx, JSXML_CLASS_TEXT);
+ }
+
+ obj = js_NewXMLObject(cx, xml_class);
+ if (!obj)
+ return NULL;
+ xml = (JSXML *) JS_GetPrivate(cx, obj);
+ if (name) {
+ qn = js_NewXMLQName(cx, cx->runtime->emptyString, NULL, name);
+ if (!qn)
+ return NULL;
+ xml->name = qn;
+ }
+ xml->xml_value = value;
+ return obj;
+}
+
+JSString *
+js_MakeXMLCDATAString(JSContext *cx, JSString *str)
+{
+ return MakeXMLCDATAString(cx, NULL, str);
+}
+
+JSString *
+js_MakeXMLCommentString(JSContext *cx, JSString *str)
+{
+ return MakeXMLCommentString(cx, NULL, str);
+}
+
+JSString *
+js_MakeXMLPIString(JSContext *cx, JSString *name, JSString *str)
+{
+ return MakeXMLPIString(cx, NULL, name, str);
+}
+
+#endif /* JS_HAS_XML_SUPPORT */
diff --git a/src/third_party/js-1.7/jsxml.h b/src/third_party/js-1.7/jsxml.h
new file mode 100644
index 00000000000..71e591acc57
--- /dev/null
+++ b/src/third_party/js-1.7/jsxml.h
@@ -0,0 +1,332 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is SpiderMonkey E4X code, released August, 2004.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef jsxml_h___
+#define jsxml_h___
+
+#include "jsstddef.h"
+#include "jspubtd.h"
+
+extern const char js_AnyName_str[];
+extern const char js_AttributeName_str[];
+extern const char js_isXMLName_str[];
+extern const char js_XMLList_str[];
+
+extern const char js_amp_entity_str[];
+extern const char js_gt_entity_str[];
+extern const char js_lt_entity_str[];
+extern const char js_quot_entity_str[];
+
+struct JSXMLNamespace {
+ JSObject *object;
+ JSString *prefix;
+ JSString *uri;
+ JSBool declared; /* true if declared in its XML tag */
+};
+
+extern JSXMLNamespace *
+js_NewXMLNamespace(JSContext *cx, JSString *prefix, JSString *uri,
+ JSBool declared);
+
+extern void
+js_MarkXMLNamespace(JSContext *cx, JSXMLNamespace *ns);
+
+extern void
+js_FinalizeXMLNamespace(JSContext *cx, JSXMLNamespace *ns);
+
+extern JSObject *
+js_NewXMLNamespaceObject(JSContext *cx, JSString *prefix, JSString *uri,
+ JSBool declared);
+
+extern JSObject *
+js_GetXMLNamespaceObject(JSContext *cx, JSXMLNamespace *ns);
+
+struct JSXMLQName {
+ JSObject *object;
+ JSString *uri;
+ JSString *prefix;
+ JSString *localName;
+};
+
+extern JSXMLQName *
+js_NewXMLQName(JSContext *cx, JSString *uri, JSString *prefix,
+ JSString *localName);
+
+extern void
+js_MarkXMLQName(JSContext *cx, JSXMLQName *qn);
+
+extern void
+js_FinalizeXMLQName(JSContext *cx, JSXMLQName *qn);
+
+extern JSObject *
+js_NewXMLQNameObject(JSContext *cx, JSString *uri, JSString *prefix,
+ JSString *localName);
+
+extern JSObject *
+js_GetXMLQNameObject(JSContext *cx, JSXMLQName *qn);
+
+extern JSObject *
+js_GetAttributeNameObject(JSContext *cx, JSXMLQName *qn);
+
+extern JSObject *
+js_ConstructXMLQNameObject(JSContext *cx, jsval nsval, jsval lnval);
+
+typedef JSBool
+(* JS_DLL_CALLBACK JSIdentityOp)(const void *a, const void *b);
+
+struct JSXMLArray {
+ uint32 length;
+ uint32 capacity;
+ void **vector;
+ JSXMLArrayCursor *cursors;
+};
+
+#define JSXML_PRESET_CAPACITY JS_BIT(31)
+#define JSXML_CAPACITY_MASK JS_BITMASK(31)
+#define JSXML_CAPACITY(array) ((array)->capacity & JSXML_CAPACITY_MASK)
+
+struct JSXMLArrayCursor {
+ JSXMLArray *array;
+ uint32 index;
+ JSXMLArrayCursor *next;
+ JSXMLArrayCursor **prevp;
+ void *root;
+};
+
+/*
+ * NB: don't reorder this enum without changing all array initializers that
+ * depend on it in jsxml.c.
+ */
+typedef enum JSXMLClass {
+ JSXML_CLASS_LIST,
+ JSXML_CLASS_ELEMENT,
+ JSXML_CLASS_ATTRIBUTE,
+ JSXML_CLASS_PROCESSING_INSTRUCTION,
+ JSXML_CLASS_TEXT,
+ JSXML_CLASS_COMMENT,
+ JSXML_CLASS_LIMIT
+} JSXMLClass;
+
+#define JSXML_CLASS_HAS_KIDS(class_) ((class_) < JSXML_CLASS_ATTRIBUTE)
+#define JSXML_CLASS_HAS_VALUE(class_) ((class_) >= JSXML_CLASS_ATTRIBUTE)
+#define JSXML_CLASS_HAS_NAME(class_) \
+ ((uintN)((class_) - JSXML_CLASS_ELEMENT) <= \
+ (uintN)(JSXML_CLASS_PROCESSING_INSTRUCTION - JSXML_CLASS_ELEMENT))
+
+#ifdef DEBUG_notme
+#include "jsclist.h"
+#endif
+
+struct JSXML {
+#ifdef DEBUG_notme
+ JSCList links;
+ uint32 serial;
+#endif
+ JSObject *object;
+ void *domnode; /* DOM node if mapped info item */
+ JSXML *parent;
+ JSXMLQName *name;
+ uint16 xml_class; /* discriminates u, below */
+ uint16 xml_flags; /* flags, see below */
+ union {
+ struct JSXMLListVar {
+ JSXMLArray kids; /* NB: must come first */
+ JSXML *target;
+ JSXMLQName *targetprop;
+ } list;
+ struct JSXMLVar {
+ JSXMLArray kids; /* NB: must come first */
+ JSXMLArray namespaces;
+ JSXMLArray attrs;
+ } elem;
+ JSString *value;
+ } u;
+
+ /* Don't add anything after u -- see js_NewXML for why. */
+};
+
+/* union member shorthands */
+#define xml_kids u.list.kids
+#define xml_target u.list.target
+#define xml_targetprop u.list.targetprop
+#define xml_namespaces u.elem.namespaces
+#define xml_attrs u.elem.attrs
+#define xml_value u.value
+
+/* xml_flags values */
+#define XMLF_WHITESPACE_TEXT 0x1
+
+/* xml_class-testing macros */
+#define JSXML_HAS_KIDS(xml) JSXML_CLASS_HAS_KIDS((xml)->xml_class)
+#define JSXML_HAS_VALUE(xml) JSXML_CLASS_HAS_VALUE((xml)->xml_class)
+#define JSXML_HAS_NAME(xml) JSXML_CLASS_HAS_NAME((xml)->xml_class)
+#define JSXML_LENGTH(xml) (JSXML_CLASS_HAS_KIDS((xml)->xml_class) \
+ ? (xml)->xml_kids.length \
+ : 0)
+
+extern JSXML *
+js_NewXML(JSContext *cx, JSXMLClass xml_class);
+
+extern void
+js_MarkXML(JSContext *cx, JSXML *xml);
+
+extern void
+js_FinalizeXML(JSContext *cx, JSXML *xml);
+
+extern JSObject *
+js_ParseNodeToXMLObject(JSContext *cx, JSParseNode *pn);
+
+extern JSObject *
+js_NewXMLObject(JSContext *cx, JSXMLClass xml_class);
+
+extern JSObject *
+js_GetXMLObject(JSContext *cx, JSXML *xml);
+
+extern JS_FRIEND_DATA(JSXMLObjectOps) js_XMLObjectOps;
+extern JS_FRIEND_DATA(JSClass) js_XMLClass;
+extern JS_FRIEND_DATA(JSExtendedClass) js_NamespaceClass;
+extern JS_FRIEND_DATA(JSExtendedClass) js_QNameClass;
+extern JS_FRIEND_DATA(JSClass) js_AttributeNameClass;
+extern JS_FRIEND_DATA(JSClass) js_AnyNameClass;
+
+/*
+ * Macros to test whether an object or a value is of type "xml" (per typeof).
+ * NB: jsapi.h must be included before any call to VALUE_IS_XML.
+ */
+#define OBJECT_IS_XML(cx,obj) ((obj)->map->ops == &js_XMLObjectOps.base)
+#define VALUE_IS_XML(cx,v) (!JSVAL_IS_PRIMITIVE(v) && \
+ OBJECT_IS_XML(cx, JSVAL_TO_OBJECT(v)))
+
+extern JSObject *
+js_InitNamespaceClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_InitQNameClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_InitAttributeNameClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_InitAnyNameClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_InitXMLClass(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_InitXMLClasses(JSContext *cx, JSObject *obj);
+
+extern JSBool
+js_GetFunctionNamespace(JSContext *cx, jsval *vp);
+
+extern JSBool
+js_GetDefaultXMLNamespace(JSContext *cx, jsval *vp);
+
+extern JSBool
+js_SetDefaultXMLNamespace(JSContext *cx, jsval v);
+
+/*
+ * Return true if v is a XML QName object, or if it converts to a string that
+ * contains a valid XML qualified name (one containing no :), false otherwise.
+ * NB: This function is an infallible predicate, it hides exceptions.
+ */
+extern JSBool
+js_IsXMLName(JSContext *cx, jsval v);
+
+extern JSBool
+js_ToAttributeName(JSContext *cx, jsval *vp);
+
+extern JSString *
+js_EscapeAttributeValue(JSContext *cx, JSString *str);
+
+extern JSString *
+js_AddAttributePart(JSContext *cx, JSBool isName, JSString *str,
+ JSString *str2);
+
+extern JSString *
+js_EscapeElementValue(JSContext *cx, JSString *str);
+
+extern JSString *
+js_ValueToXMLString(JSContext *cx, jsval v);
+
+extern JSBool
+js_GetAnyName(JSContext *cx, jsval *vp);
+
+extern JSBool
+js_FindXMLProperty(JSContext *cx, jsval name, JSObject **objp, jsval *namep);
+
+extern JSBool
+js_GetXMLProperty(JSContext *cx, JSObject *obj, jsval name, jsval *vp);
+
+extern JSBool
+js_GetXMLFunction(JSContext *cx, JSObject *obj, jsid id, jsval *vp);
+
+extern JSBool
+js_SetXMLProperty(JSContext *cx, JSObject *obj, jsval name, jsval *vp);
+
+extern JSBool
+js_GetXMLDescendants(JSContext *cx, JSObject *obj, jsval id, jsval *vp);
+
+extern JSBool
+js_DeleteXMLListElements(JSContext *cx, JSObject *listobj);
+
+extern JSBool
+js_FilterXMLList(JSContext *cx, JSObject *obj, jsbytecode *pc, jsval *vp);
+
+extern JSObject *
+js_ValueToXMLObject(JSContext *cx, jsval v);
+
+extern JSObject *
+js_ValueToXMLListObject(JSContext *cx, jsval v);
+
+extern JSObject *
+js_CloneXMLObject(JSContext *cx, JSObject *obj);
+
+extern JSObject *
+js_NewXMLSpecialObject(JSContext *cx, JSXMLClass xml_class, JSString *name,
+ JSString *value);
+
+extern JSString *
+js_MakeXMLCDATAString(JSContext *cx, JSString *str);
+
+extern JSString *
+js_MakeXMLCommentString(JSContext *cx, JSString *str);
+
+extern JSString *
+js_MakeXMLPIString(JSContext *cx, JSString *name, JSString *str);
+
+#endif /* jsxml_h___ */
diff --git a/src/third_party/js-1.7/lock_SunOS.s b/src/third_party/js-1.7/lock_SunOS.s
new file mode 100644
index 00000000000..7a842d186e0
--- /dev/null
+++ b/src/third_party/js-1.7/lock_SunOS.s
@@ -0,0 +1,114 @@
+!
+! The contents of this file are subject to the Netscape Public
+! License Version 1.1 (the "License"); you may not use this file
+! except in compliance with the License. You may obtain a copy of
+! the License at http://www.mozilla.org/NPL/
+!
+! Software distributed under the License is distributed on an "AS
+! IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
+! implied. See the License for the specific language governing
+! rights and limitations under the License.
+!
+! The Original Code is Mozilla Communicator client code, released
+! March 31, 1998.
+!
+! The Initial Developer of the Original Code is Netscape
+! Communications Corporation. Portions created by Netscape are
+! Copyright (C) 1998-1999 Netscape Communications Corporation. All
+! Rights Reserved.
+!
+! Contributor(s):
+!
+! Alternatively, the contents of this file may be used under the
+! terms of the GNU Public License (the "GPL"), in which case the
+! provisions of the GPL are applicable instead of those above.
+! If you wish to allow use of your version of this file only
+! under the terms of the GPL and not to allow others to use your
+! version of this file under the NPL, indicate your decision by
+! deleting the provisions above and replace them with the notice
+! and other provisions required by the GPL. If you do not delete
+! the provisions above, a recipient may use your version of this
+! file under either the NPL or the GPL.
+!
+
+!
+! atomic compare-and-swap routines for V8 sparc
+! and for V8+ (ultrasparc)
+!
+!
+! standard asm linkage macros; this module must be compiled
+! with the -P option (use C preprocessor)
+
+#include <sys/asm_linkage.h>
+
+! ======================================================================
+!
+! Perform the sequence *a = b atomically with respect to previous value
+! of a (a0). If *a==a0 then assign *a to b, all in one atomic operation.
+! Returns 1 if assignment happened, and 0 otherwise.
+!
+! usage : old_val = compare_and_swap(address, oldval, newval)
+!
+! -----------------------
+! Note on REGISTER USAGE:
+! as this is a LEAF procedure, a new stack frame is not created;
+! we use the caller stack frame so what would normally be %i (input)
+! registers are actually %o (output registers). Also, we must not
+! overwrite the contents of %l (local) registers as they are not
+! assumed to be volatile during calls.
+!
+! So, the registers used are:
+! %o0 [input] - the address of the value to increment
+! %o1 [input] - the old value to compare with
+! %o2 [input] - the new value to set for [%o0]
+! %o3 [local] - work register
+! -----------------------
+#ifndef ULTRA_SPARC
+! v8
+
+ ENTRY(compare_and_swap) ! standard assembler/ELF prologue
+
+ stbar
+ mov -1,%o3 ! busy flag
+ swap [%o0],%o3 ! get current value
+l1: cmp %o3,-1 ! busy?
+ be,a l1 ! if so, spin
+ swap [%o0],%o3 ! using branch-delay to swap back value
+ cmp %o1,%o3 ! compare old with current
+ be,a l2 ! if equal then swap in new value
+ swap [%o0],%o2 ! done.
+ swap [%o0],%o3 ! otherwise, swap back current value
+ retl
+ mov 0,%o0 ! return false
+l2: retl
+ mov 1,%o0 ! return true
+
+ SET_SIZE(compare_and_swap) ! standard assembler/ELF epilogue
+
+!
+! end
+!
+#else /* ULTRA_SPARC */
+! ======================================================================
+!
+! v9
+
+ ENTRY(compare_and_swap) ! standard assembler/ELF prologue
+
+ stbar
+ cas [%o0],%o1,%o2 ! compare *w with old value and set to new if equal
+ cmp %o1,%o2 ! did we succeed?
+ be,a m1 ! yes
+ mov 1,%o0 ! return true (annulled when no jump)
+ mov 0,%o0 ! return false
+m1: retl
+ nop
+
+ SET_SIZE(compare_and_swap) ! standard assembler/ELF epilogue
+
+!
+! end
+!
+! ======================================================================
+!
+#endif
diff --git a/src/third_party/js-1.7/perfect.js b/src/third_party/js-1.7/perfect.js
new file mode 100644
index 00000000000..aeca121186b
--- /dev/null
+++ b/src/third_party/js-1.7/perfect.js
@@ -0,0 +1,39 @@
+// Some simple testing of new, eval and some string stuff.
+
+// constructor -- expression array initialization
+function ExprArray(n,v)
+{
+ // Initializes n values to v coerced to a string.
+ for (var i = 0; i < n; i++) {
+ this[i] = "" + v;
+ }
+}
+
+
+// Print the perfect numbers up to n and the sum expression for n's divisors.
+function perfect(n)
+{
+ print("The perfect numbers up to " + n + " are:");
+
+ // We build sumOfDivisors[i] to hold a string expression for
+ // the sum of the divisors of i, excluding i itself.
+ var sumOfDivisors = new ExprArray(n+1,1);
+ for (var divisor = 2; divisor <= n; divisor++) {
+ for (var j = divisor + divisor; j <= n; j += divisor) {
+ sumOfDivisors[j] += " + " + divisor;
+ }
+ // At this point everything up to 'divisor' has its sumOfDivisors
+ // expression calculated, so we can determine whether it's perfect
+ // already by evaluating.
+ if (eval(sumOfDivisors[divisor]) == divisor) {
+ print("" + divisor + " = " + sumOfDivisors[divisor]);
+ }
+ }
+ print("That's all.");
+}
+
+
+print("\nA number is 'perfect' if it is equal to the sum of its")
+print("divisors (excluding itself).\n");
+perfect(500);
+
diff --git a/src/third_party/js-1.7/plify_jsdhash.sed b/src/third_party/js-1.7/plify_jsdhash.sed
new file mode 100644
index 00000000000..eff4901c5fe
--- /dev/null
+++ b/src/third_party/js-1.7/plify_jsdhash.sed
@@ -0,0 +1,33 @@
+/ * Double hashing implementation./a\
+ * GENERATED BY js/src/plify_jsdhash.sed -- DO NOT EDIT!!!
+/ * Double hashing, a la Knuth 6./a\
+ * GENERATED BY js/src/plify_jsdhash.sed -- DO NOT EDIT!!!
+s/jsdhash_h___/pldhash_h___/
+s/jsdhash\.bigdump/pldhash.bigdump/
+s/jstypes\.h/nscore.h/
+s/jsbit\.h/prbit.h/
+s/jsdhash\.h/pldhash.h/
+s/jsdhash\.c/pldhash.c/
+s/jsdhash:/pldhash:/
+s/jsutil\.h/nsDebug.h/
+s/JS_DHASH/PL_DHASH/g
+s/JS_DHash/PL_DHash/g
+s/JSDHash/PLDHash/g
+s/JSHash/PLHash/g
+s/uint32 /PRUint32/g
+s/\([^U]\)int32 /\1PRInt32/g
+s/uint16 /PRUint16/g
+s/\([^U]\)int16 /\1PRInt16/g
+s/uint32/PRUint32/g
+s/\([^U]\)int32/\1PRInt32/g
+s/uint16/PRUint16/g
+s/\([^U]\)int16/\1PRInt16/g
+s/JSBool/PRBool/g
+s/extern JS_PUBLIC_API(\([^()]*\))/NS_COM_GLUE \1/
+s/JS_PUBLIC_API(\([^()]*\))/\1/
+s/JS_DLL_CALLBACK/PR_CALLBACK/
+s/JS_STATIC_DLL_CALLBACK/PR_STATIC_CALLBACK/
+s/JS_NewDHashTable/PL_NewDHashTable/
+s/JS_ASSERT(0)/NS_NOTREACHED("0")/
+s/\( *\)JS_ASSERT(\(.*\));/\1NS_ASSERTION(\2,\n\1 "\2");/
+s/JS_/PR_/g
diff --git a/src/third_party/js-1.7/prmjtime.c b/src/third_party/js-1.7/prmjtime.c
new file mode 100644
index 00000000000..3228af8be89
--- /dev/null
+++ b/src/third_party/js-1.7/prmjtime.c
@@ -0,0 +1,439 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+/*
+ * PR time code.
+ */
+#include "jsstddef.h"
+#ifdef SOLARIS
+#define _REENTRANT 1
+#endif
+#include <string.h>
+#include <time.h>
+#include "jstypes.h"
+#include "jsutil.h"
+
+#include "jsprf.h"
+#include "prmjtime.h"
+
+#define PRMJ_DO_MILLISECONDS 1
+
+#ifdef XP_OS2
+#include <sys/timeb.h>
+#endif
+#ifdef XP_WIN
+#include <windows.h>
+#endif
+
+#if defined(XP_UNIX) || defined(XP_BEOS)
+
+#ifdef _SVID_GETTOD /* Defined only on Solaris, see Solaris <sys/types.h> */
+extern int gettimeofday(struct timeval *tv);
+#endif
+
+#include <sys/time.h>
+
+#endif /* XP_UNIX */
+
+#define IS_LEAP(year) \
+ (year != 0 && ((((year & 0x3) == 0) && \
+ ((year - ((year/100) * 100)) != 0)) || \
+ (year - ((year/400) * 400)) == 0))
+
+#define PRMJ_HOUR_SECONDS 3600L
+#define PRMJ_DAY_SECONDS (24L * PRMJ_HOUR_SECONDS)
+#define PRMJ_YEAR_SECONDS (PRMJ_DAY_SECONDS * 365L)
+#define PRMJ_MAX_UNIX_TIMET 2145859200L /*time_t value equiv. to 12/31/2037 */
+/* function prototypes */
+static void PRMJ_basetime(JSInt64 tsecs, PRMJTime *prtm);
+/*
+ * get the difference in seconds between this time zone and UTC (GMT)
+ */
+JSInt32
+PRMJ_LocalGMTDifference()
+{
+#if defined(XP_UNIX) || defined(XP_WIN) || defined(XP_OS2) || defined(XP_BEOS)
+ struct tm ltime;
+
+ /* get the difference between this time zone and GMT */
+ memset((char *)&ltime,0,sizeof(ltime));
+ ltime.tm_mday = 2;
+ ltime.tm_year = 70;
+#ifdef SUNOS4
+ ltime.tm_zone = 0;
+ ltime.tm_gmtoff = 0;
+ return timelocal(&ltime) - (24 * 3600);
+#else
+ return mktime(&ltime) - (24L * 3600L);
+#endif
+#endif
+}
+
+/* Constants for GMT offset from 1970 */
+#define G1970GMTMICROHI 0x00dcdcad /* micro secs to 1970 hi */
+#define G1970GMTMICROLOW 0x8b3fa000 /* micro secs to 1970 low */
+
+#define G2037GMTMICROHI 0x00e45fab /* micro secs to 2037 high */
+#define G2037GMTMICROLOW 0x7a238000 /* micro secs to 2037 low */
+
+/* Convert from base time to extended time */
+static JSInt64
+PRMJ_ToExtendedTime(JSInt32 base_time)
+{
+ JSInt64 exttime;
+ JSInt64 g1970GMTMicroSeconds;
+ JSInt64 low;
+ JSInt32 diff;
+ JSInt64 tmp;
+ JSInt64 tmp1;
+
+ diff = PRMJ_LocalGMTDifference();
+ JSLL_UI2L(tmp, PRMJ_USEC_PER_SEC);
+ JSLL_I2L(tmp1,diff);
+ JSLL_MUL(tmp,tmp,tmp1);
+
+ JSLL_UI2L(g1970GMTMicroSeconds,G1970GMTMICROHI);
+ JSLL_UI2L(low,G1970GMTMICROLOW);
+#ifndef JS_HAVE_LONG_LONG
+ JSLL_SHL(g1970GMTMicroSeconds,g1970GMTMicroSeconds,16);
+ JSLL_SHL(g1970GMTMicroSeconds,g1970GMTMicroSeconds,16);
+#else
+ JSLL_SHL(g1970GMTMicroSeconds,g1970GMTMicroSeconds,32);
+#endif
+ JSLL_ADD(g1970GMTMicroSeconds,g1970GMTMicroSeconds,low);
+
+ JSLL_I2L(exttime,base_time);
+ JSLL_ADD(exttime,exttime,g1970GMTMicroSeconds);
+ JSLL_SUB(exttime,exttime,tmp);
+ return exttime;
+}
+
+JSInt64
+PRMJ_Now(void)
+{
+#ifdef XP_OS2
+ JSInt64 s, us, ms2us, s2us;
+ struct timeb b;
+#endif
+#ifdef XP_WIN
+ JSInt64 s, us,
+ win2un = JSLL_INIT(0x19DB1DE, 0xD53E8000),
+ ten = JSLL_INIT(0, 10);
+ FILETIME time, midnight;
+#endif
+#if defined(XP_UNIX) || defined(XP_BEOS)
+ struct timeval tv;
+ JSInt64 s, us, s2us;
+#endif /* XP_UNIX */
+
+#ifdef XP_OS2
+ ftime(&b);
+ JSLL_UI2L(ms2us, PRMJ_USEC_PER_MSEC);
+ JSLL_UI2L(s2us, PRMJ_USEC_PER_SEC);
+ JSLL_UI2L(s, b.time);
+ JSLL_UI2L(us, b.millitm);
+ JSLL_MUL(us, us, ms2us);
+ JSLL_MUL(s, s, s2us);
+ JSLL_ADD(s, s, us);
+ return s;
+#endif
+#ifdef XP_WIN
+ /* The windows epoch is around 1600. The unix epoch is around 1970.
+ win2un is the difference (in windows time units which are 10 times
+ more precise than the JS time unit) */
+ GetSystemTimeAsFileTime(&time);
+ /* Win9x gets confused at midnight
+ http://support.microsoft.com/default.aspx?scid=KB;en-us;q224423
+ So if the low part (precision <8mins) is 0 then we get the time
+ again. */
+ if (!time.dwLowDateTime) {
+ GetSystemTimeAsFileTime(&midnight);
+ time.dwHighDateTime = midnight.dwHighDateTime;
+ }
+ JSLL_UI2L(s, time.dwHighDateTime);
+ JSLL_UI2L(us, time.dwLowDateTime);
+ JSLL_SHL(s, s, 32);
+ JSLL_ADD(s, s, us);
+ JSLL_SUB(s, s, win2un);
+ JSLL_DIV(s, s, ten);
+ return s;
+#endif
+
+#if defined(XP_UNIX) || defined(XP_BEOS)
+#ifdef _SVID_GETTOD /* Defined only on Solaris, see Solaris <sys/types.h> */
+ gettimeofday(&tv);
+#else
+ gettimeofday(&tv, 0);
+#endif /* _SVID_GETTOD */
+ JSLL_UI2L(s2us, PRMJ_USEC_PER_SEC);
+ JSLL_UI2L(s, tv.tv_sec);
+ JSLL_UI2L(us, tv.tv_usec);
+ JSLL_MUL(s, s, s2us);
+ JSLL_ADD(s, s, us);
+ return s;
+#endif /* XP_UNIX */
+}
+
+/* Get the DST timezone offset for the time passed in */
+JSInt64
+PRMJ_DSTOffset(JSInt64 local_time)
+{
+ JSInt64 us2s;
+ time_t local;
+ JSInt32 diff;
+ JSInt64 maxtimet;
+ struct tm tm;
+ PRMJTime prtm;
+#ifndef HAVE_LOCALTIME_R
+ struct tm *ptm;
+#endif
+
+
+ JSLL_UI2L(us2s, PRMJ_USEC_PER_SEC);
+ JSLL_DIV(local_time, local_time, us2s);
+
+ /* get the maximum of time_t value */
+ JSLL_UI2L(maxtimet,PRMJ_MAX_UNIX_TIMET);
+
+ if(JSLL_CMP(local_time,>,maxtimet)){
+ JSLL_UI2L(local_time,PRMJ_MAX_UNIX_TIMET);
+ } else if(!JSLL_GE_ZERO(local_time)){
+ /*go ahead a day to make localtime work (does not work with 0) */
+ JSLL_UI2L(local_time,PRMJ_DAY_SECONDS);
+ }
+ JSLL_L2UI(local,local_time);
+ PRMJ_basetime(local_time,&prtm);
+#ifndef HAVE_LOCALTIME_R
+ ptm = localtime(&local);
+ if(!ptm){
+ return JSLL_ZERO;
+ }
+ tm = *ptm;
+#else
+ localtime_r(&local,&tm); /* get dst information */
+#endif
+
+ diff = ((tm.tm_hour - prtm.tm_hour) * PRMJ_HOUR_SECONDS) +
+ ((tm.tm_min - prtm.tm_min) * 60);
+
+ if(diff < 0){
+ diff += PRMJ_DAY_SECONDS;
+ }
+
+ JSLL_UI2L(local_time,diff);
+
+ JSLL_MUL(local_time,local_time,us2s);
+
+ return(local_time);
+}
+
+/* Format a time value into a buffer. Same semantics as strftime() */
+size_t
+PRMJ_FormatTime(char *buf, int buflen, char *fmt, PRMJTime *prtm)
+{
+#if defined(XP_UNIX) || defined(XP_WIN) || defined(XP_OS2) || defined(XP_BEOS)
+ struct tm a;
+
+ /* Zero out the tm struct. Linux, SunOS 4 struct tm has extra members int
+ * tm_gmtoff, char *tm_zone; when tm_zone is garbage, strftime gets
+ * confused and dumps core. NSPR20 prtime.c attempts to fill these in by
+ * calling mktime on the partially filled struct, but this doesn't seem to
+ * work as well; the result string has "can't get timezone" for ECMA-valid
+ * years. Might still make sense to use this, but find the range of years
+ * for which valid tz information exists, and map (per ECMA hint) from the
+ * given year into that range.
+
+ * N.B. This hasn't been tested with anything that actually _uses_
+ * tm_gmtoff; zero might be the wrong thing to set it to if you really need
+ * to format a time. This fix is for jsdate.c, which only uses
+ * JS_FormatTime to get a string representing the time zone. */
+ memset(&a, 0, sizeof(struct tm));
+
+ a.tm_sec = prtm->tm_sec;
+ a.tm_min = prtm->tm_min;
+ a.tm_hour = prtm->tm_hour;
+ a.tm_mday = prtm->tm_mday;
+ a.tm_mon = prtm->tm_mon;
+ a.tm_wday = prtm->tm_wday;
+ a.tm_year = prtm->tm_year - 1900;
+ a.tm_yday = prtm->tm_yday;
+ a.tm_isdst = prtm->tm_isdst;
+
+ /* Even with the above, SunOS 4 seems to detonate if tm_zone and tm_gmtoff
+ * are null. This doesn't quite work, though - the timezone is off by
+ * tzoff + dst. (And mktime seems to return -1 for the exact dst
+ * changeover time.)
+
+ */
+
+#if defined(SUNOS4)
+ if (mktime(&a) == -1) {
+ /* Seems to fail whenever the requested date is outside of the 32-bit
+ * UNIX epoch. We could proceed at this point (setting a.tm_zone to
+ * "") but then strftime returns a string with a 2-digit field of
+ * garbage for the year. So we return 0 and hope jsdate.c
+ * will fall back on toString.
+ */
+ return 0;
+ }
+#endif
+
+ return strftime(buf, buflen, fmt, &a);
+#endif
+}
+
+/* table for number of days in a month */
+static int mtab[] = {
+ /* jan, feb,mar,apr,may,jun */
+ 31,28,31,30,31,30,
+ /* july,aug,sep,oct,nov,dec */
+ 31,31,30,31,30,31
+};
+
+/*
+ * basic time calculation functionality for localtime and gmtime
+ * setups up prtm argument with correct values based upon input number
+ * of seconds.
+ */
+static void
+PRMJ_basetime(JSInt64 tsecs, PRMJTime *prtm)
+{
+ /* convert tsecs back to year,month,day,hour,secs */
+ JSInt32 year = 0;
+ JSInt32 month = 0;
+ JSInt32 yday = 0;
+ JSInt32 mday = 0;
+ JSInt32 wday = 6; /* start on a Sunday */
+ JSInt32 days = 0;
+ JSInt32 seconds = 0;
+ JSInt32 minutes = 0;
+ JSInt32 hours = 0;
+ JSInt32 isleap = 0;
+ JSInt64 result;
+ JSInt64 result1;
+ JSInt64 result2;
+ JSInt64 base;
+
+ JSLL_UI2L(result,0);
+ JSLL_UI2L(result1,0);
+ JSLL_UI2L(result2,0);
+
+ /* get the base time via UTC */
+ base = PRMJ_ToExtendedTime(0);
+ JSLL_UI2L(result, PRMJ_USEC_PER_SEC);
+ JSLL_DIV(base,base,result);
+ JSLL_ADD(tsecs,tsecs,base);
+
+ JSLL_UI2L(result, PRMJ_YEAR_SECONDS);
+ JSLL_UI2L(result1,PRMJ_DAY_SECONDS);
+ JSLL_ADD(result2,result,result1);
+
+ /* get the year */
+ while ((isleap == 0) ? !JSLL_CMP(tsecs,<,result) : !JSLL_CMP(tsecs,<,result2)) {
+ /* subtract a year from tsecs */
+ JSLL_SUB(tsecs,tsecs,result);
+ days += 365;
+ /* is it a leap year ? */
+ if(IS_LEAP(year)){
+ JSLL_SUB(tsecs,tsecs,result1);
+ days++;
+ }
+ year++;
+ isleap = IS_LEAP(year);
+ }
+
+ JSLL_UI2L(result1,PRMJ_DAY_SECONDS);
+
+ JSLL_DIV(result,tsecs,result1);
+ JSLL_L2I(mday,result);
+
+ /* let's find the month */
+ while(((month == 1 && isleap) ?
+ (mday >= mtab[month] + 1) :
+ (mday >= mtab[month]))){
+ yday += mtab[month];
+ days += mtab[month];
+
+ mday -= mtab[month];
+
+ /* it's a Feb, check if this is a leap year */
+ if(month == 1 && isleap != 0){
+ yday++;
+ days++;
+ mday--;
+ }
+ month++;
+ }
+
+ /* now adjust tsecs */
+ JSLL_MUL(result,result,result1);
+ JSLL_SUB(tsecs,tsecs,result);
+
+ mday++; /* day of month always start with 1 */
+ days += mday;
+ wday = (days + wday) % 7;
+
+ yday += mday;
+
+ /* get the hours */
+ JSLL_UI2L(result1,PRMJ_HOUR_SECONDS);
+ JSLL_DIV(result,tsecs,result1);
+ JSLL_L2I(hours,result);
+ JSLL_MUL(result,result,result1);
+ JSLL_SUB(tsecs,tsecs,result);
+
+ /* get minutes */
+ JSLL_UI2L(result1,60);
+ JSLL_DIV(result,tsecs,result1);
+ JSLL_L2I(minutes,result);
+ JSLL_MUL(result,result,result1);
+ JSLL_SUB(tsecs,tsecs,result);
+
+ JSLL_L2I(seconds,tsecs);
+
+ prtm->tm_usec = 0L;
+ prtm->tm_sec = (JSInt8)seconds;
+ prtm->tm_min = (JSInt8)minutes;
+ prtm->tm_hour = (JSInt8)hours;
+ prtm->tm_mday = (JSInt8)mday;
+ prtm->tm_mon = (JSInt8)month;
+ prtm->tm_wday = (JSInt8)wday;
+ prtm->tm_year = (JSInt16)year;
+ prtm->tm_yday = (JSInt16)yday;
+}
diff --git a/src/third_party/js-1.7/prmjtime.h b/src/third_party/js-1.7/prmjtime.h
new file mode 100644
index 00000000000..b74fe845eb6
--- /dev/null
+++ b/src/third_party/js-1.7/prmjtime.h
@@ -0,0 +1,95 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ *
+ * ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is Mozilla Communicator client code, released
+ * March 31, 1998.
+ *
+ * The Initial Developer of the Original Code is
+ * Netscape Communications Corporation.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+
+#ifndef prmjtime_h___
+#define prmjtime_h___
+/*
+ * PR date stuff for mocha and java. Placed here temporarily not to break
+ * Navigator and localize changes to mocha.
+ */
+#include <time.h>
+#include "jslong.h"
+#ifdef MOZILLA_CLIENT
+#include "jscompat.h"
+#endif
+
+JS_BEGIN_EXTERN_C
+
+typedef struct PRMJTime PRMJTime;
+
+/*
+ * Broken down form of 64 bit time value.
+ */
+struct PRMJTime {
+ JSInt32 tm_usec; /* microseconds of second (0-999999) */
+ JSInt8 tm_sec; /* seconds of minute (0-59) */
+ JSInt8 tm_min; /* minutes of hour (0-59) */
+ JSInt8 tm_hour; /* hour of day (0-23) */
+ JSInt8 tm_mday; /* day of month (1-31) */
+ JSInt8 tm_mon; /* month of year (0-11) */
+ JSInt8 tm_wday; /* 0=sunday, 1=monday, ... */
+ JSInt16 tm_year; /* absolute year, AD */
+ JSInt16 tm_yday; /* day of year (0 to 365) */
+ JSInt8 tm_isdst; /* non-zero if DST in effect */
+};
+
+/* Some handy constants */
+#define PRMJ_USEC_PER_SEC 1000000L
+#define PRMJ_USEC_PER_MSEC 1000L
+
+/* Return the current local time in micro-seconds */
+extern JSInt64
+PRMJ_Now(void);
+
+/* get the difference between this time zone and gmt timezone in seconds */
+extern JSInt32
+PRMJ_LocalGMTDifference(void);
+
+/* Format a time value into a buffer. Same semantics as strftime() */
+extern size_t
+PRMJ_FormatTime(char *buf, int buflen, char *fmt, PRMJTime *tm);
+
+/* Get the DST offset for the local time passed in */
+extern JSInt64
+PRMJ_DSTOffset(JSInt64 local_time);
+
+JS_END_EXTERN_C
+
+#endif /* prmjtime_h___ */
+
diff --git a/src/third_party/js-1.7/resource.h b/src/third_party/js-1.7/resource.h
new file mode 100644
index 00000000000..9301810e444
--- /dev/null
+++ b/src/third_party/js-1.7/resource.h
@@ -0,0 +1,15 @@
+//{{NO_DEPENDENCIES}}
+// Microsoft Developer Studio generated include file.
+// Used by js3240.rc
+//
+
+// Next default values for new objects
+//
+#ifdef APSTUDIO_INVOKED
+#ifndef APSTUDIO_READONLY_SYMBOLS
+#define _APS_NEXT_RESOURCE_VALUE 101
+#define _APS_NEXT_COMMAND_VALUE 40001
+#define _APS_NEXT_CONTROL_VALUE 1000
+#define _APS_NEXT_SYMED_VALUE 101
+#endif
+#endif
diff --git a/src/third_party/js-1.7/rules.mk b/src/third_party/js-1.7/rules.mk
new file mode 100644
index 00000000000..8d484db9a5f
--- /dev/null
+++ b/src/third_party/js-1.7/rules.mk
@@ -0,0 +1,193 @@
+# -*- Mode: makefile -*-
+#
+# ***** BEGIN LICENSE BLOCK *****
+# Version: MPL 1.1/GPL 2.0/LGPL 2.1
+#
+# The contents of this file are subject to the Mozilla Public License Version
+# 1.1 (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+# http://www.mozilla.org/MPL/
+#
+# Software distributed under the License is distributed on an "AS IS" basis,
+# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+# for the specific language governing rights and limitations under the
+# License.
+#
+# The Original Code is Mozilla Communicator client code, released
+# March 31, 1998.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998-1999
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Michael Ang <mang@subcarrier.org>
+#
+# Alternatively, the contents of this file may be used under the terms of
+# either of the GNU General Public License Version 2 or later (the "GPL"),
+# or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+# in which case the provisions of the GPL or the LGPL are applicable instead
+# of those above. If you wish to allow use of your version of this file only
+# under the terms of either the GPL or the LGPL, and not to allow others to
+# use your version of this file under the terms of the MPL, indicate your
+# decision by deleting the provisions above and replace them with the notice
+# and other provisions required by the GPL or the LGPL. If you do not delete
+# the provisions above, a recipient may use your version of this file under
+# the terms of any one of the MPL, the GPL or the LGPL.
+#
+# ***** END LICENSE BLOCK *****
+
+#
+# JSRef GNUmake makefile rules
+#
+
+ifdef USE_MSVC
+LIB_OBJS = $(addprefix $(OBJDIR)/, $(LIB_CFILES:.c=.obj))
+PROG_OBJS = $(addprefix $(OBJDIR)/, $(PROG_CFILES:.c=.obj))
+else
+LIB_OBJS = $(addprefix $(OBJDIR)/, $(LIB_CFILES:.c=.o))
+LIB_OBJS += $(addprefix $(OBJDIR)/, $(LIB_ASFILES:.s=.o))
+PROG_OBJS = $(addprefix $(OBJDIR)/, $(PROG_CFILES:.c=.o))
+endif
+
+CFILES = $(LIB_CFILES) $(PROG_CFILES)
+OBJS = $(LIB_OBJS) $(PROG_OBJS)
+
+ifdef USE_MSVC
+# TARGETS = $(LIBRARY) # $(PROGRAM) not supported for MSVC yet
+TARGETS += $(SHARED_LIBRARY) $(PROGRAM) # it is now
+else
+TARGETS += $(LIBRARY) $(SHARED_LIBRARY) $(PROGRAM)
+endif
+
+all:
+ +$(LOOP_OVER_PREDIRS)
+ifneq "$(strip $(TARGETS))" ""
+ $(MAKE) -f Makefile.ref $(TARGETS)
+endif
+ +$(LOOP_OVER_DIRS)
+
+$(OBJDIR)/%: %.c
+ @$(MAKE_OBJDIR)
+ $(CC) -o $@ $(CFLAGS) $*.c $(LDFLAGS)
+
+# This rule must come before the rule with no dep on header
+$(OBJDIR)/%.o: %.c %.h
+ @$(MAKE_OBJDIR)
+ $(CC) -o $@ -c $(CFLAGS) $*.c
+
+
+$(OBJDIR)/%.o: %.c
+ @$(MAKE_OBJDIR)
+ $(CC) -o $@ -c $(CFLAGS) $*.c
+
+$(OBJDIR)/%.o: %.s
+ @$(MAKE_OBJDIR)
+ $(AS) -o $@ $(ASFLAGS) $*.s
+
+# This rule must come before rule with no dep on header
+$(OBJDIR)/%.obj: %.c %.h
+ @$(MAKE_OBJDIR)
+ $(CC) -Fo$(OBJDIR)/ -c $(CFLAGS) $(JSDLL_CFLAGS) $*.c
+
+$(OBJDIR)/%.obj: %.c
+ @$(MAKE_OBJDIR)
+ $(CC) -Fo$(OBJDIR)/ -c $(CFLAGS) $(JSDLL_CFLAGS) $*.c
+
+$(OBJDIR)/js.obj: js.c
+ @$(MAKE_OBJDIR)
+ $(CC) -Fo$(OBJDIR)/ -c $(CFLAGS) $<
+
+ifeq ($(OS_ARCH),OS2)
+$(LIBRARY): $(LIB_OBJS)
+ $(AR) $@ $? $(AR_OS2_SUFFIX)
+ $(RANLIB) $@
+else
+ifdef USE_MSVC
+$(SHARED_LIBRARY): $(LIB_OBJS)
+ link.exe $(LIB_LINK_FLAGS) /base:0x61000000 $(OTHER_LIBS) \
+ /out:"$@" /pdb:none\
+ /implib:"$(OBJDIR)/$(@F:.dll=.lib)" $^
+else
+$(LIBRARY): $(LIB_OBJS)
+ $(AR) rv $@ $?
+ $(RANLIB) $@
+
+$(SHARED_LIBRARY): $(LIB_OBJS)
+ $(MKSHLIB) -o $@ $(LIB_OBJS) $(LDFLAGS) $(OTHER_LIBS)
+endif
+endif
+
+# Java stuff
+$(CLASSDIR)/$(OBJDIR)/$(JARPATH)/%.class: %.java
+ mkdir -p $(@D)
+ $(JAVAC) $(JAVAC_FLAGS) $<
+
+define MAKE_OBJDIR
+if test ! -d $(@D); then rm -rf $(@D); mkdir -p $(@D); fi
+endef
+
+ifdef DIRS
+LOOP_OVER_DIRS = \
+ @for d in $(DIRS); do \
+ if test -d $$d; then \
+ set -e; \
+ echo "cd $$d; $(MAKE) -f Makefile.ref $@"; \
+ cd $$d; $(MAKE) -f Makefile.ref $@; cd ..; \
+ set +e; \
+ else \
+ echo "Skipping non-directory $$d..."; \
+ fi; \
+ done
+endif
+
+ifdef PREDIRS
+LOOP_OVER_PREDIRS = \
+ @for d in $(PREDIRS); do \
+ if test -d $$d; then \
+ set -e; \
+ echo "cd $$d; $(MAKE) -f Makefile.ref $@"; \
+ cd $$d; $(MAKE) -f Makefile.ref $@; cd ..; \
+ set +e; \
+ else \
+ echo "Skipping non-directory $$d..."; \
+ fi; \
+ done
+endif
+
+export:
+ +$(LOOP_OVER_PREDIRS)
+ mkdir -p $(DIST)/include $(DIST)/$(LIBDIR) $(DIST)/bin
+ifneq "$(strip $(HFILES))" ""
+ $(CP) $(HFILES) $(DIST)/include
+endif
+ifneq "$(strip $(LIBRARY))" ""
+ $(CP) $(LIBRARY) $(DIST)/$(LIBDIR)
+endif
+ifneq "$(strip $(JARS))" ""
+ $(CP) $(JARS) $(DIST)/$(LIBDIR)
+endif
+ifneq "$(strip $(SHARED_LIBRARY))" ""
+ $(CP) $(SHARED_LIBRARY) $(DIST)/$(LIBDIR)
+endif
+ifneq "$(strip $(PROGRAM))" ""
+ $(CP) $(PROGRAM) $(DIST)/bin
+endif
+ +$(LOOP_OVER_DIRS)
+
+clean:
+ rm -rf $(OBJS) $(GARBAGE)
+ @cd fdlibm; $(MAKE) -f Makefile.ref clean
+
+clobber:
+ rm -rf $(OBJS) $(TARGETS) $(DEPENDENCIES)
+ @cd fdlibm; $(MAKE) -f Makefile.ref clobber
+
+depend:
+ gcc -MM $(CFLAGS) $(LIB_CFILES)
+
+tar:
+ tar cvf $(TARNAME) $(TARFILES)
+ gzip $(TARNAME)
+
diff --git a/src/third_party/js-1.7/win32.order b/src/third_party/js-1.7/win32.order
new file mode 100644
index 00000000000..cf4e8c41ce5
--- /dev/null
+++ b/src/third_party/js-1.7/win32.order
@@ -0,0 +1,391 @@
+js_MarkGCThing ; 5893956
+JS_GetPrivate ; 2090130
+JS_HashTableRawLookup ; 1709984
+js_Mark ; 1547496
+js_GetToken ; 1406677
+js_UngetToken ; 1154416
+js_MarkAtom ; 992874
+js_MatchToken ; 980277
+js_CompareStrings ; 662772
+js_Lock ; 628184
+js_Unlock ; 628184
+js_AtomizeString ; 611102
+js_HashString ; 611102
+js_DropScopeProperty ; 546476
+JS_malloc ; 484350
+js_Atomize ; 464433
+js_InflateStringToBuffer ; 460739
+js_HoldScopeProperty ; 442612
+JS_free ; 382991
+js_MarkScript ; 376942
+js_HashId ; 365238
+JS_CompareValues ; 352366
+js_IdToValue ; 337594
+JS_GetClass ; 325296
+js_LookupProperty ; 324680
+js_GetAtom ; 244669
+js_DropProperty ; 223217
+JS_GetParent ; 209680
+js_LiveContext ; 205767
+js_PeekToken ; 200646
+js_GetSlotThreadSafe ; 198839
+JS_GetStringChars ; 190862
+JS_HashTableRawAdd ; 179156
+js_FoldConstants ; 162626
+js_EmitTree ; 145634
+JS_EnumerateStub ; 140640
+js_NewSrcNote ; 136983
+js_GetProperty ; 135639
+js_NewScopeProperty ; 135057
+js_MutateScope ; 135057
+js_GetMutableScope ; 135057
+js_AllocSlot ; 132401
+JS_GetRuntime ; 127316
+JS_FrameIterator ; 121963
+JS_GetFrameFunctionObject ; 120567
+js_AllocGCThing ; 119828
+js_DestroyScopeProperty ; 115989
+js_Emit3 ; 109135
+js_AtomizeChars ; 108038
+JS_HashTableLookup ; 107154
+JS_InstanceOf ; 103905
+js_DefineProperty ; 99514
+js_strncpy ; 88276
+js_PeekTokenSameLine ; 87197
+js_HoldObjectMap ; 79084
+js_DropObjectMap ; 77824
+js_NewObject ; 72421
+js_ValueToString ; 72143
+js_GetClassPrototype ; 66235
+js_UnlockRuntime ; 64699
+js_LockRuntime ; 64699
+js_ContextIterator ; 64586
+JS_ClearWatchPointsForObject ; 64155
+js_FinalizeObject ; 63925
+js_IndexAtom ; 63789
+JS_SetPrivate ; 63702
+JS_GetGlobalObject ; 63546
+js_Emit1 ; 63012
+JS_ContextIterator ; 57847
+JS_GetInstancePrivate ; 57817
+JS_HashTableRawRemove ; 57057
+js_AllocRawStack ; 54181
+js_Invoke ; 53568
+js_FindProperty ; 53150
+JS_GetFrameScript ; 51395
+js_LinkFunctionObject ; 50651
+js_SetSrcNoteOffset ; 47735
+js_InWithStatement ; 47346
+js_NewFunction ; 47074
+js_NewSrcNote2 ; 46165
+JS_HashTableAdd ; 45503
+JS_HashTableRemove ; 45213
+js_InCatchBlock ; 42198
+js_AddRootRT ; 40587
+js_AddRoot ; 40587
+js_SetProperty ; 40558
+JS_AddNamedRoot ; 40462
+js_RemoveRoot ; 40384
+JS_RemoveRootRT ; 38129
+js_NewString ; 37471
+js_DefineFunction ; 36629
+JS_GetContextThread ; 36498
+JS_LookupProperty ; 35137
+JS_ValueToString ; 34072
+JS_realloc ; 33776
+JS_DefineFunction ; 33268
+JS_SetErrorReporter ; 32851
+js_FinalizeString ; 30311
+js_FinalizeStringRT ; 30311
+JS_ArenaAllocate ; 30099
+JS_BeginRequest ; 29323
+JS_EndRequest ; 29323
+JS_GetContextPrivate ; 29189
+JS_CompactArenaPool ; 28874
+js_ValueToStringAtom ; 27934
+JS_ValueToId ; 26517
+js_ValueToBoolean ; 25908
+JS_InternString ; 25467
+js_PopStatement ; 24364
+js_PushStatement ; 24364
+js_NewStringCopyN ; 23911
+js_FlushPropertyCacheByProp ; 23883
+js_GetStringBytes ; 23421
+JS_ArenaRelease ; 23267
+JS_GetStringBytes ; 23106
+js_FreeStack ; 22399
+js_AllocStack ; 22399
+JS_SetProperty ; 21240
+js_InitObjectMap ; 19991
+js_NewScope ; 19991
+js_strlen ; 19070
+JS_GetScriptPrincipals ; 18063
+js_SrcNoteLength ; 17369
+js_DestroyObjectMap ; 17198
+js_DestroyScope ; 17198
+JS_GetStringLength ; 16306
+js_PopStatementCG ; 15418
+JS_GetFrameAnnotation ; 14949
+js_FreeRawStack ; 14032
+js_Interpret ; 14032
+js_TransferScopeLock ; 13899
+JS_ResolveStandardClass ; 13645
+JS_ResumeRequest ; 12837
+JS_SuspendRequest ; 12837
+JS_GetProperty ; 12488
+JS_NewObject ; 11660
+js_AllocTryNotes ; 11418
+js_NewNumberValue ; 10859
+js_InternalInvoke ; 10051
+js_NewDouble ; 9936
+js_SetJumpOffset ; 9886
+js_SkipWhiteSpace ; 9299
+js_NewDoubleValue ; 7474
+JS_GetPendingException ; 7404
+js_NewObjectMap ; 7236
+JS_ClearPendingException ; 7092
+JS_strtod ; 7053
+js_strtod ; 7053
+js_InflateString ; 7004
+JS_GetFunctionName ; 6808
+JS_NewHashTable ; 6794
+JS_NewFunction ; 6575
+js_FreeSlot ; 6476
+js_LockScope ; 6332
+JS_HashTableEnumerateEntries ; 6285
+js_GetLengthProperty ; 6162
+js_LockObj ; 6149
+JS_NewUCStringCopyN ; 5994
+JS_NewNumberValue ; 5904
+js_NewStringCopyZ ; 5809
+JS_NewUCStringCopyZ ; 5809
+js_DeflateString ; 5612
+js_ValueToNumber ; 5456
+JS_SetOptions ; 5322
+js_NewScript ; 4941
+js_InitCodeGenerator ; 4810
+js_FinishTakingSrcNotes ; 4810
+js_NewScriptFromParams ; 4810
+js_InitAtomMap ; 4810
+js_FinishTakingTryNotes ; 4810
+js_NewScriptFromCG ; 4810
+js_FinishCodeGenerator ; 4810
+JS_strdup ; 4534
+JS_HashTableDestroy ; 4119
+js_CheckRedeclaration ; 3965
+JS_DefineFunctions ; 3808
+js_EmitFunctionBody ; 3739
+js_TryMethod ; 3685
+js_DefaultValue ; 3610
+js_CloneFunctionObject ; 3577
+JS_InitClass ; 3546
+js_SetClassPrototype ; 3377
+JS_GetPrototype ; 3268
+JS_DefineProperties ; 3115
+js_FindVariable ; 3093
+js_DestroyScript ; 3041
+JS_ClearScriptTraps ; 3041
+js_FreeAtomMap ; 3041
+JS_NewStringCopyZ ; 2953
+js_AtomizeObject ; 2709
+JS_ValueToBoolean ; 2643
+js_SetLengthProperty ; 2637
+JS_GetOptions ; 2593
+js_ValueToObject ; 2522
+js_ValueToNonNullObject ; 2510
+js_StringToObject ; 2482
+JS_SetElement ; 2448
+js_NumberToString ; 2407
+JS_TypeOfValue ; 2275
+js_NewBufferTokenStream ; 2253
+js_NewTokenStream ; 2253
+js_CloseTokenStream ; 2253
+JS_RemoveRoot ; 2148
+JS_NewDouble ; 2129
+JS_vsnprintf ; 1937
+JS_snprintf ; 1937
+JS_CallFunctionValue ; 1844
+JS_DHashVoidPtrKeyStub ; 1840
+JS_DHashTableOperate ; 1840
+js_SetProtoOrParent ; 1758
+js_DoubleToInteger ; 1729
+JS_SetVersion ; 1531
+js_ValueToFunction ; 1476
+JS_SetPrototype ; 1408
+JS_CeilingLog2 ; 1317
+js_Execute ; 1199
+js_CompileFunctionBody ; 1182
+JS_CompileUCFunctionForPrincipals ; 1182
+js_GetSrcNoteOffset ; 1139
+JS_DHashMatchEntryStub ; 1094
+JS_VersionToString ; 1090
+JS_CompileUCScriptForPrincipals ; 1071
+js_CompileTokenStream ; 1071
+js_CurrentThreadId ; 1058
+JS_IdToValue ; 1046
+js_ConstructObject ; 974
+JS_DestroyScript ; 967
+js_PCToLineNumber ; 967
+JS_DefineProperty ; 930
+JS_GetScriptFilename ; 924
+JS_GetFramePC ; 899
+JS_EvaluateUCScriptForPrincipals ; 892
+JS_PCToLineNumber ; 848
+JS_StringToVersion ; 761
+js_ExecuteRegExp ; 755
+JS_MaybeGC ; 717
+JS_ValueToNumber ; 698
+JS_GetVersion ; 698
+JS_AliasProperty ; 693
+js_AtomizeValue ; 664
+js_BooleanToString ; 664
+js_SetSlotThreadSafe ; 596
+JS_DHashClearEntryStub ; 584
+JS_DHashTableRawRemove ; 584
+JS_DefineObject ; 557
+js_PutCallObject ; 516
+js_GetCallObject ; 516
+js_strchr ; 511
+JS_DefineUCProperty ; 480
+JS_dtostr ; 475
+JS_ValueToInt32 ; 464
+js_ValueToInt32 ; 464
+JS_FinishArenaPool ; 453
+js_NewTryNote ; 441
+js_strtointeger ; 437
+JS_vsmprintf ; 428
+JS_DHashTableInit ; 423
+JS_DHashAllocTable ; 423
+JS_DHashGetStubOps ; 423
+JS_NewDHashTable ; 423
+JS_DHashTableDestroy ; 423
+JS_DHashFreeTable ; 423
+JS_DHashTableFinish ; 423
+js_EmitBreak ; 412
+js_GetAttributes ; 412
+JS_DefineConstDoubles ; 407
+JS_ArenaGrow ; 374
+js_AtomizeInt ; 372
+JS_SetParent ; 345
+JS_CloneFunctionObject ; 343
+JS_IsNativeFrame ; 343
+JS_ReportErrorNumber ; 340
+js_ErrorToException ; 340
+js_ReportErrorNumberVA ; 340
+js_GetErrorMessage ; 340
+js_ExpandErrorArguments ; 340
+js_ReportUncaughtException ; 315
+JS_IsExceptionPending ; 315
+js_ReportErrorAgain ; 315
+js_ErrorFromException ; 315
+JS_LookupUCProperty ; 307
+JS_InitArenaPool ; 293
+PRMJ_Now ; 262
+DllMain@12 ; 235
+JS_ExecuteScript ; 232
+JS_GetFrameFunction ; 226
+PRMJ_LocalGMTDifference ; 175
+JS_GetConstructor ; 175
+JS_SetGlobalObject ; 164
+js_LockGCThing ; 155
+js_NewRegExpObject ; 152
+js_NewRegExp ; 152
+js_InitObjectClass ; 131
+js_InitFunctionClass ; 131
+js_EmitN ; 128
+JS_ArenaFinish ; 124
+js_GC ; 124
+js_SweepAtomState ; 124
+js_MarkAtomState ; 124
+JS_ArenaRealloc ; 124
+js_ForceGC ; 124
+js_FlushPropertyCache ; 122
+js_InitNumberClass ; 114
+JS_smprintf ; 112
+js_DoubleToECMAInt32 ; 112
+js_ValueToECMAInt32 ; 111
+JS_ValueToECMAInt32 ; 111
+JS_SetContextPrivate ; 109
+PRMJ_DSTOffset ; 108
+js_Clear ; 105
+JS_ClearScope ; 105
+JS_NewScriptObject ; 104
+JS_smprintf_free ; 104
+JS_ConvertValue ; 99
+js_GetSrcNote ; 98
+JS_ValueToECMAUint32 ; 93
+js_ValueToECMAUint32 ; 93
+js_printf ; 93
+js_DoubleToECMAUint32 ; 93
+js_DestroyRegExp ; 89
+js_UnlockGCThing ; 89
+js_TryValueOf ; 87
+js_NewSrcNote3 ; 86
+JS_ConvertStub ; 81
+JS_SetPendingException ; 80
+js_InitStringClass ; 79
+JS_GC ; 78
+js_InitArrayClass ; 74
+js_InitDateClass ; 67
+JS_NewContext ; 64
+JS_AddArgumentFormatter ; 64
+js_InitContextForLocking ; 64
+js_NewContext ; 64
+JS_SetBranchCallback ; 64
+JS_ClearRegExpStatics ; 64
+js_InitRegExpStatics ; 64
+js_InitCallClass ; 63
+js_InitRegExpClass ; 61
+js_Enumerate ; 58
+JS_DestroyContext ; 46
+js_DestroyContext ; 46
+js_FreeRegExpStatics ; 46
+js_InitScanner ; 39
+js_NewPrinter ; 36
+js_DestroyPrinter ; 36
+js_GetPrinterOutput ; 36
+JS_FreeArenaPool ; 36
+js_DecompileCode ; 34
+js_EmitContinue ; 33
+js_CheckAccess ; 30
+js_DecompileValueGenerator ; 28
+js_InitMathClass ; 27
+js_InitExceptionClasses ; 25
+js_NewArrayObject ; 24
+js_InitArgumentsClass ; 21
+js_puts ; 20
+js_InitBooleanClass ; 19
+JS_InitStandardClasses ; 19
+js_InitScriptClass ; 19
+js_obj_toString ; 15
+js_GetArgsValue ; 14
+js_GetArgsObject ; 14
+js_AtomizeDouble ; 12
+JS_DestroyIdArray ; 11
+js_NewIdArray ; 11
+JS_GetElement ; 11
+JS_EvaluateScript ; 9
+JS_EvaluateUCScript ; 9
+JS_DecompileFunction ; 8
+js_DecompileFunction ; 8
+JS_NewString ; 8
+js_SetStringBytes ; 8
+JS_GetArrayLength ; 7
+JS_NewArrayObject ; 7
+JS_IsArrayObject ; 7
+JS_ValueToObject ; 7
+JS_DefineElement ; 6
+js_DecompileScript ; 6
+JS_PushArguments ; 4
+JS_PopArguments ; 4
+JS_PushArgumentsVA ; 4
+js_PutArgsObject ; 2
+JS_SetGCCallbackRT ; 2
+JS_Init ; 1
+js_SetupLocks ; 1
+js_InitRuntimeNumberState ; 1
+js_InitRuntimeStringState ; 1
+js_InitLock ; 1
+js_InitGC ; 1
+js_InitAtomState ; 1
+js_InitStringGlobals ; 1
diff --git a/src/third_party/linenoise/Makefile b/src/third_party/linenoise/Makefile
new file mode 100644
index 00000000000..b335b658524
--- /dev/null
+++ b/src/third_party/linenoise/Makefile
@@ -0,0 +1,7 @@
+linenoise_example: linenoise.h linenoise.cpp
+
+linenoise_example: linenoise.cpp example.c
+ $(CXX) -Wall -W -Os -g -o linenoise_example linenoise.cpp example.c
+
+clean:
+ rm -f linenoise_example
diff --git a/src/third_party/linenoise/README.markdown b/src/third_party/linenoise/README.markdown
new file mode 100644
index 00000000000..6c693ed0ba1
--- /dev/null
+++ b/src/third_party/linenoise/README.markdown
@@ -0,0 +1,47 @@
+# Linenoise
+
+A minimal, zero-config, BSD licensed, readline replacement.
+
+News: linenoise now includes minimal completion support, thanks to Pieter Noordhuis (@pnoordhuis).
+
+News: linenoise is now part of [Android](http://android.git.kernel.org/?p=platform/system/core.git;a=tree;f=liblinenoise;h=56450eaed7f783760e5e6a5993ef75cde2e29dea;hb=HEAD Android)!
+
+## Can a line editing library be 20k lines of code?
+
+Line editing with some support for history is a really important feature for command line utilities. Instead of retyping almost the same stuff again and again it's just much better to hit the up arrow and edit on syntax errors, or in order to try a slightly different command. But apparently code dealing with terminals is some sort of Black Magic: readline is 30k lines of code, libedit 20k. Is it reasonable to link small utilities to huge libraries just to get a minimal support for line editing?
+
+So what usually happens is either:
+
+ * Large programs with configure scripts disabling line editing if readline is not present in the system, or not supporting it at all since readline is GPL licensed and libedit (the BSD clone) is not as known and available as readline is (Readl world example of this problem: Tclsh).
+ * Smaller programs not using a configure script not supporting line editing at all (A problem we had with Redis-cli for instance).
+
+The result is a pollution of binaries without line editing support.
+
+So I spent more or less two hours doing a reality check resulting in this little library: is it *really* needed for a line editing library to be 20k lines of code? Apparently not, it is possibe to get a very small, zero configuration, trivial to embed library, that solves the problem. Smaller programs will just include this, supporing line editing out of the box. Larger programs may use this little library or just checking with configure if readline/libedit is available and resorting to linenoise if not.
+
+## Terminals, in 2010.
+
+Apparently almost every terminal you can happen to use today has some kind of support for VT100 alike escape sequences. So I tried to write a lib using just very basic VT100 features. The resulting library appears to work everywhere I tried to use it.
+
+Since it's so young I guess there are a few bugs, or the lib may not compile or work with some operating system, but it's a matter of a few weeks and eventually we'll get it right, and there will be no excuses for not shipping command line tools without built-in line editing support.
+
+The library is currently less than 400 lines of code. In order to use it in your project just look at the *example.c* file in the source distribution, it is trivial. Linenoise is BSD code, so you can use both in free software and commercial software.
+
+## Tested with...
+
+ * Linux text only console ($TERM = linux)
+ * Linux KDE terminal application ($TERM = xterm)
+ * Linux xterm ($TERM = xterm)
+ * Mac OS X iTerm ($TERM = xterm)
+ * Mac OS X default Terminal.app ($TERM = xterm)
+ * OpenBSD 4.5 through an OSX Terminal.app ($TERM = screen)
+ * IBM AIX 6.1
+ * FreeBSD xterm ($TERM = xterm)
+
+Please test it everywhere you can and report back!
+
+## Let's push this forward!
+
+Please fork it and add something interesting and send me a pull request. What's especially interesting are fixes, new key bindings, completion.
+
+Send feedbacks to antirez at gmail
diff --git a/src/third_party/linenoise/example.c b/src/third_party/linenoise/example.c
new file mode 100644
index 00000000000..ea0b515c1fc
--- /dev/null
+++ b/src/third_party/linenoise/example.c
@@ -0,0 +1,27 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include "linenoise.h"
+
+
+void completion(const char *buf, linenoiseCompletions *lc) {
+ if (buf[0] == 'h') {
+ linenoiseAddCompletion(lc,"hello");
+ linenoiseAddCompletion(lc,"hello there");
+ }
+}
+
+int main(void) {
+ char *line;
+
+ linenoiseSetCompletionCallback(completion);
+ linenoiseHistoryLoad("history.txt"); /* Load the history at startup */
+ while((line = linenoise("hello> ")) != NULL) {
+ if (line[0] != '\0') {
+ printf("echo: '%s'\n", line);
+ linenoiseHistoryAdd(line);
+ linenoiseHistorySave("history.txt"); /* Save every new entry */
+ }
+ free(line);
+ }
+ return 0;
+}
diff --git a/src/third_party/linenoise/history.txt b/src/third_party/linenoise/history.txt
new file mode 100644
index 00000000000..70858d8e9a1
--- /dev/null
+++ b/src/third_party/linenoise/history.txt
@@ -0,0 +1,3 @@
+hi
+this is fun
+hel
diff --git a/src/third_party/linenoise/linenoise.cpp b/src/third_party/linenoise/linenoise.cpp
new file mode 100644
index 00000000000..98e48415066
--- /dev/null
+++ b/src/third_party/linenoise/linenoise.cpp
@@ -0,0 +1,2077 @@
+/* linenoise.c -- guerrilla line editing library against the idea that a
+ * line editing lib needs to be 20,000 lines of C code.
+ *
+ * You can find the latest source code at:
+ *
+ * http://github.com/antirez/linenoise
+ *
+ * Does a number of crazy assumptions that happen to be true in 99.9999% of
+ * the 2010 UNIX computers around.
+ *
+ * Copyright (c) 2010, Salvatore Sanfilippo <antirez at gmail dot com>
+ * Copyright (c) 2010, Pieter Noordhuis <pcnoordhuis at gmail dot com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Redis nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * References:
+ * - http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
+ * - http://www.3waylabs.com/nw/WWW/products/wizcon/vt220.html
+ *
+ * Todo list:
+ * - Switch to gets() if $TERM is something we can't support.
+ * - Filter bogus Ctrl+<char> combinations.
+ * - Win32 support
+ *
+ * Bloat:
+ * - Completion?
+ * - History search like Ctrl+r in readline?
+ *
+ * List of escape sequences used by this program, we do everything just
+ * with three sequences. In order to be so cheap we may have some
+ * flickering effect with some slow terminal, but the lesser sequences
+ * the more compatible.
+ *
+ * CHA (Cursor Horizontal Absolute)
+ * Sequence: ESC [ n G
+ * Effect: moves cursor to column n (1 based)
+ *
+ * EL (Erase Line)
+ * Sequence: ESC [ n K
+ * Effect: if n is 0 or missing, clear from cursor to end of line
+ * Effect: if n is 1, clear from beginning of line to cursor
+ * Effect: if n is 2, clear entire line
+ *
+ * CUF (CUrsor Forward)
+ * Sequence: ESC [ n C
+ * Effect: moves cursor forward of n chars
+ *
+ * The following are used to clear the screen: ESC [ H ESC [ 2 J
+ * This is actually composed of two sequences:
+ *
+ * cursorhome
+ * Sequence: ESC [ H
+ * Effect: moves the cursor to upper left corner
+ *
+ * ED2 (Clear entire screen)
+ * Sequence: ESC [ 2 J
+ * Effect: clear the whole screen
+ *
+ */
+
+#ifdef _WIN32
+
+#include <conio.h>
+#include <windows.h>
+#include <stdio.h>
+#include <io.h>
+#include <errno.h>
+#define snprintf _snprintf // Microsoft headers use underscores in some names
+#define strcasecmp _stricmp
+#define strdup _strdup
+#define isatty _isatty
+#define write _write
+#define STDIN_FILENO 0
+
+#else /* _WIN32 */
+#include <signal.h>
+#include <termios.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include <cctype>
+
+#endif /* _WIN32 */
+
+#include "linenoise.h"
+#include <string>
+#include <vector>
+
+using std::string;
+using std::vector;
+
+#define LINENOISE_DEFAULT_HISTORY_MAX_LEN 100
+#define LINENOISE_MAX_LINE 4096
+
+// make control-characters more readable
+#define ctrlChar( upperCaseASCII ) ( upperCaseASCII - 0x40 )
+
+/**
+ * Calculate a new screen position given a starting position, screen width and character count
+ * @param x initial x position (zero-based)
+ * @param y initial y position (zero-based)
+ * @param screenColumns screen column count
+ * @param charCount character positions to advance
+ * @param xOut returned x position (zero-based)
+ * @param yOut returned y position (zero-based)
+ */
+static void calculateScreenPosition(int x, int y, int screenColumns, int charCount, int& xOut, int& yOut) {
+ xOut = x;
+ yOut = y;
+ int charsRemaining = charCount;
+ while ( charsRemaining > 0 ) {
+ int charsThisRow = (x + charsRemaining < screenColumns) ? charsRemaining : screenColumns - x;
+ xOut = x + charsThisRow;
+ yOut = y;
+ charsRemaining -= charsThisRow;
+ x = 0;
+ ++y;
+ }
+ if ( xOut == screenColumns ) { // we have to special-case line wrap
+ xOut = 0;
+ ++yOut;
+ }
+}
+
+struct PromptBase { // a convenience struct for grouping prompt info
+ char* promptText; // our copy of the prompt text, edited
+ int promptChars; // bytes or chars (until UTF-8) in promptText
+ int promptExtraLines; // extra lines (beyond 1) occupied by prompt
+ int promptIndentation; // column offset to end of prompt
+ int promptLastLinePosition; // index into promptText where last line begins
+ int promptPreviousInputLen; // promptChars of previous input line, for clearing
+ int promptCursorRowOffset; // where the cursor is relative to the start of the prompt
+ int promptScreenColumns; // width of screen in columns
+ int previousPromptLen; // help erasing
+};
+
+struct PromptInfo : public PromptBase {
+
+ PromptInfo( const char* textPtr, int columns ) {
+ promptScreenColumns = columns;
+
+ promptText = new char[strlen( textPtr ) + 1];
+ strcpy( promptText, textPtr );
+
+ // strip evil characters from the prompt -- we do allow newline
+ unsigned char* pIn = reinterpret_cast<unsigned char *>( promptText );
+ unsigned char* pOut = pIn;
+ while ( *pIn ) {
+ unsigned char c = *pIn; // we need unsigned so chars 0x80 and above are allowed
+ if ( '\n' == c || c >= ' ' ) {
+ *pOut = c;
+ ++pOut;
+ }
+ ++pIn;
+ }
+ *pOut = 0;
+ promptChars = pOut - reinterpret_cast<unsigned char *>( promptText );
+ promptExtraLines = 0;
+ promptLastLinePosition = 0;
+ promptPreviousInputLen = 0;
+ int x = 0;
+ for ( int i = 0; i < promptChars; ++i ) {
+ char c = promptText[i];
+ if ( '\n' == c ) {
+ x = 0;
+ ++promptExtraLines;
+ promptLastLinePosition = i + 1;
+ }
+ else {
+ ++x;
+ if ( x >= promptScreenColumns ) {
+ x = 0;
+ ++promptExtraLines;
+ promptLastLinePosition = i + 1;
+ }
+ }
+ }
+ promptIndentation = promptChars - promptLastLinePosition;
+ promptCursorRowOffset = promptExtraLines;
+ }
+ ~PromptInfo() {
+ delete [] promptText;
+ }
+};
+
+// Used with DynamicPrompt (history search)
+//
+static const char forwardSearchBasePrompt[] = "(i-search)`";
+static const char reverseSearchBasePrompt[] = "(reverse-i-search)`";
+static const char endSearchBasePrompt[] = "': ";
+static string previousSearchText;
+
+// changing prompt for "(reverse-i-search)`text':" etc.
+//
+struct DynamicPrompt : public PromptBase {
+ char* searchText; // text we are searching for
+ int searchTextLen; // chars in searchText
+ int direction; // current search direction, 1=forward, -1=reverse
+ int forwardSearchBasePromptLen; // prompt component lengths
+ int reverseSearchBasePromptLen;
+ int endSearchBasePromptLen;
+
+ DynamicPrompt( PromptInfo& pi, int initialDirection ) : direction( initialDirection ) {
+ forwardSearchBasePromptLen = strlen( forwardSearchBasePrompt ); // store constant text lengths
+ reverseSearchBasePromptLen = strlen( reverseSearchBasePrompt );
+ endSearchBasePromptLen = strlen( endSearchBasePrompt );
+ promptScreenColumns = pi.promptScreenColumns;
+ promptCursorRowOffset = 0;
+ searchTextLen = 0;
+ searchText = new char[1]; // start with empty search string
+ searchText[0] = 0;
+ promptChars = endSearchBasePromptLen +
+ ( ( direction > 0 ) ? forwardSearchBasePromptLen : reverseSearchBasePromptLen );
+ promptLastLinePosition = promptChars; // TODO fix this, we are asssuming that the history prompt won't wrap (!)
+ promptPreviousInputLen = 0;
+ previousPromptLen = promptChars;
+ promptText = new char[promptChars + 1];
+ strcpy( promptText, ( direction > 0 ) ? forwardSearchBasePrompt : reverseSearchBasePrompt );
+ strcpy( &promptText[promptChars - endSearchBasePromptLen], endSearchBasePrompt );
+ calculateScreenPosition( 0, 0, pi.promptScreenColumns, promptChars, promptIndentation, promptExtraLines );
+ }
+
+ void updateSearchPrompt( void ) {
+ delete [] promptText;
+ promptChars = endSearchBasePromptLen + searchTextLen +
+ ( ( direction > 0 ) ? forwardSearchBasePromptLen : reverseSearchBasePromptLen );
+ promptText = new char[promptChars + 1];
+ strcpy( promptText, ( direction > 0 ) ? forwardSearchBasePrompt : reverseSearchBasePrompt );
+ strcat( promptText, searchText );
+ strcpy( &promptText[promptChars - endSearchBasePromptLen], endSearchBasePrompt );
+ }
+
+ void updateSearchText( const char* textPtr ) {
+ delete [] searchText;
+ searchTextLen = strlen( textPtr );
+ searchText = new char[searchTextLen + 1];
+ strcpy( searchText, textPtr );
+ updateSearchPrompt();
+ }
+
+ ~DynamicPrompt() {
+ delete [] searchText;
+ delete [] promptText;
+ }
+};
+
+class KillRing {
+ static const int capacity = 10;
+ int size;
+ int index;
+ char indexToSlot[10];
+ vector < string > theRing;
+
+public:
+ enum action { actionOther, actionKill, actionYank };
+ action lastAction;
+ int lastYankSize;
+
+ KillRing() : size( 0 ), index( 0 ), lastAction( actionOther ) {
+ theRing.reserve( capacity );
+ }
+
+ void kill( const char* text, int textLen, bool forward ) {
+ if ( textLen == 0 ) {
+ return;
+ }
+ char* textCopy = new char[ textLen + 1 ];
+ memcpy( textCopy, text, textLen );
+ textCopy[ textLen ] = 0;
+ string textCopyString( textCopy );
+ if ( lastAction == actionKill ) {
+ int slot = indexToSlot[0];
+ theRing[slot] = forward ?
+ theRing[slot] + textCopyString :
+ textCopyString + theRing[slot];
+ }
+ else {
+ if ( size < capacity ) {
+ if ( size > 0 ) {
+ memmove( &indexToSlot[1], &indexToSlot[0], size );
+ }
+ indexToSlot[0] = size;
+ size++;
+ theRing.push_back( textCopyString );
+ }
+ else {
+ int slot = indexToSlot[capacity - 1];
+ theRing[slot] = textCopyString;
+ memmove( &indexToSlot[1], &indexToSlot[0], capacity - 1 );
+ indexToSlot[0] = slot;
+ }
+ index = 0;
+ }
+ delete[] textCopy;
+ }
+
+ string* yank() {
+ return ( size > 0 ) ? &theRing[indexToSlot[index]] : 0;
+ }
+
+ string* yankPop() {
+ if ( size == 0 ) {
+ return 0;
+ }
+ ++index;
+ if ( index == size ) {
+ index = 0;
+ }
+ return &theRing[indexToSlot[index]];
+ }
+
+};
+
+class InputBuffer {
+ char* buf;
+ int buflen;
+ int len;
+ int pos;
+
+ void clearScreen( PromptInfo& pi );
+ int incrementalHistorySearch( PromptInfo& pi, int startChar );
+ int completeLine( PromptInfo& pi );
+ void refreshLine( PromptBase& pi );
+
+public:
+ InputBuffer( char* buffer, int bufferLen ) : buf( buffer ), buflen( bufferLen - 1 ), len( 0 ), pos( 0 ) {
+ buf[0] = 0;
+ }
+ int getInputLine( PromptInfo& pi );
+
+};
+
+// Special codes for keyboard input:
+//
+// Between Windows and the various Linux "terminal" programs, there is some
+// pretty diverse behavior in the "scan codes" and escape sequences we are
+// presented with. So ... we'll translate them all into our own pidgin
+// pseudocode, trying to stay out of the way of UTF-8 and international
+// characters. Here's the general plan.
+//
+// "User input keystrokes" (key chords, whatever) will be encoded as a single
+// value. The low 8 bits are reserved for ASCII and UTF-8 characters.
+// Popular function-type keys get their own codes in the range 0x101 to (if
+// needed) 0x1FF, currently just arrow keys, Home, End and Delete.
+// Keypresses with Ctrl get or-ed with 0x200, with Alt get or-ed with 0x400.
+// So, Ctrl+Alt+Home is encoded as 0x200 + 0x400 + 0x105 == 0x705. To keep
+// things complicated, the Alt key is equivalent to prefixing the keystroke
+// with ESC, so ESC followed by D is treated the same as Alt + D ... we'll
+// just use Emacs terminology and call this "Meta". So, we will encode both
+// ESC followed by D and Alt held down while D is pressed the same, as Meta-D,
+// encoded as 0x464.
+//
+// Here are the definitions of our component constants:
+//
+static const int UP_ARROW_KEY = 0x101;
+static const int DOWN_ARROW_KEY = 0x102;
+static const int RIGHT_ARROW_KEY = 0x103;
+static const int LEFT_ARROW_KEY = 0x104;
+static const int HOME_KEY = 0x105;
+static const int END_KEY = 0x106;
+static const int DELETE_KEY = 0x107;
+
+static const int CTRL = 0x200;
+static const int META = 0x400;
+
+static linenoiseCompletionCallback* completionCallback = NULL;
+
+#ifdef _WIN32
+static HANDLE console_in, console_out;
+static DWORD oldMode;
+static WORD oldDisplayAttribute;
+#else
+static struct termios orig_termios; /* in order to restore at exit */
+#endif
+
+static KillRing killRing;
+
+static int rawmode = 0; /* for atexit() function to check if restore is needed*/
+static int atexit_registered = 0; /* register atexit just 1 time */
+static int historyMaxLen = LINENOISE_DEFAULT_HISTORY_MAX_LEN;
+static int historyLen = 0;
+static int historyIndex = 0;
+static char** history = NULL;
+
+static void linenoiseAtExit( void );
+
+static void beep() {
+ fprintf( stderr, "\x7" ); // ctrl-G == bell/beep
+ fflush( stderr );
+}
+
+void linenoiseHistoryFree( void ) {
+ if ( history ) {
+ for ( int j = 0; j < historyLen; ++j )
+ free( history[j] );
+ historyLen = 0;
+ free( history );
+ history = 0;
+ }
+}
+
+static int enableRawMode( void ) {
+#ifdef _WIN32
+ if ( ! console_in ) {
+ console_in = GetStdHandle( STD_INPUT_HANDLE );
+ console_out = GetStdHandle( STD_OUTPUT_HANDLE );
+
+ GetConsoleMode( console_in, &oldMode );
+ SetConsoleMode( console_in, oldMode & ~( ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT | ENABLE_PROCESSED_INPUT ) );
+ }
+ return 0;
+#else
+ struct termios raw;
+
+ if ( ! isatty( 0 ) ) goto fatal;
+ if ( ! atexit_registered ) {
+ atexit( linenoiseAtExit );
+ atexit_registered = 1;
+ }
+ if ( tcgetattr( 0, &orig_termios ) == -1 ) goto fatal;
+
+ raw = orig_termios; /* modify the original mode */
+ /* input modes: no break, no CR to NL, no parity check, no strip char,
+ * no start/stop output control. */
+ raw.c_iflag &= ~( BRKINT | ICRNL | INPCK | ISTRIP | IXON );
+ /* output modes - disable post processing */
+ // this is wrong, we don't want raw output, it turns newlines into straight linefeeds
+ //raw.c_oflag &= ~(OPOST);
+ /* control modes - set 8 bit chars */
+ raw.c_cflag |= ( CS8 );
+ /* local modes - echoing off, canonical off, no extended functions,
+ * no signal chars (^Z,^C) */
+ raw.c_lflag &= ~( ECHO | ICANON | IEXTEN | ISIG );
+ /* control chars - set return condition: min number of bytes and timer.
+ * We want read to return every single byte, without timeout. */
+ raw.c_cc[VMIN] = 1; raw.c_cc[VTIME] = 0; /* 1 byte, no timer */
+
+ /* put terminal in raw mode after flushing */
+ if ( tcsetattr( 0, TCSADRAIN, &raw ) < 0 ) goto fatal;
+ rawmode = 1;
+ return 0;
+
+fatal:
+ errno = ENOTTY;
+ return -1;
+#endif
+}
+
+static void disableRawMode( void ) {
+#ifdef _WIN32
+ SetConsoleMode( console_in, oldMode );
+ console_in = 0;
+ console_out = 0;
+#else
+ if ( rawmode && tcsetattr ( 0, TCSADRAIN, &orig_termios ) != -1 )
+ rawmode = 0;
+#endif
+}
+
+// At exit we'll try to fix the terminal to the initial conditions
+static void linenoiseAtExit( void ) {
+ disableRawMode();
+}
+
+static int getColumns( void ) {
+ int cols;
+#ifdef _WIN32
+ CONSOLE_SCREEN_BUFFER_INFO inf;
+ GetConsoleScreenBufferInfo( console_out, &inf );
+ cols = inf.dwSize.X;
+#else
+ struct winsize ws;
+ cols = ( ioctl( 1, TIOCGWINSZ, &ws ) == -1 ) ? 80 : ws.ws_col;
+#endif
+ // cols is 0 in certain circumstances like inside debugger, which creates further issues
+ return (cols > 0) ? cols : 80;
+}
+
+static void setDisplayAttribute( bool enhancedDisplay ) {
+#ifdef _WIN32
+ if ( enhancedDisplay ) {
+ CONSOLE_SCREEN_BUFFER_INFO inf;
+ GetConsoleScreenBufferInfo( console_out, &inf );
+ oldDisplayAttribute = inf.wAttributes;
+ BYTE oldLowByte = oldDisplayAttribute & 0xFF;
+ BYTE newLowByte;
+ switch ( oldLowByte ) {
+ case 0x07:
+ //newLowByte = FOREGROUND_BLUE | FOREGROUND_INTENSITY; // too dim
+ //newLowByte = FOREGROUND_BLUE; // even dimmer
+ newLowByte = FOREGROUND_BLUE | FOREGROUND_GREEN; // most similar to xterm appearance
+ break;
+ case 0x70:
+ newLowByte = BACKGROUND_BLUE | BACKGROUND_INTENSITY;
+ break;
+ default:
+ newLowByte = oldLowByte ^ 0xFF; // default to inverse video
+ break;
+ }
+ inf.wAttributes = ( inf.wAttributes & 0xFF00 ) | newLowByte;
+ SetConsoleTextAttribute( console_out, inf.wAttributes );
+ }
+ else {
+ SetConsoleTextAttribute( console_out, oldDisplayAttribute );
+ }
+#else
+ if ( enhancedDisplay ) {
+ if ( write( 1, "\x1b[1;34m", 7 ) == -1 ) return; /* bright blue (visible with both B&W bg) */
+ }
+ else {
+ if ( write( 1, "\x1b[0m", 4 ) == -1 ) return; /* reset */
+ }
+#endif
+}
+
+/**
+ * Display the dynamic incremental search prompt and the current user input line.
+ * @param pi PromptInfo struct holding information about the prompt and our screen position
+ * @param buf input buffer to be displayed
+ * @param len count of characters in the buffer
+ * @param pos current cursor position within the buffer (0 <= pos <= len)
+ */
+//static void dynamicRefresh( DynamicPrompt& pi, char *buf, int len, int pos ) {
+static void dynamicRefresh( PromptBase& pi, char *buf, int len, int pos ) {
+
+ // calculate the position of the end of the prompt
+ int xEndOfPrompt, yEndOfPrompt;
+ calculateScreenPosition( 0, 0, pi.promptScreenColumns, pi.promptChars, xEndOfPrompt, yEndOfPrompt );
+ pi.promptIndentation = xEndOfPrompt;
+
+ // calculate the position of the end of the input line
+ int xEndOfInput, yEndOfInput;
+ calculateScreenPosition( xEndOfPrompt, yEndOfPrompt, pi.promptScreenColumns, len, xEndOfInput, yEndOfInput );
+
+ // calculate the desired position of the cursor
+ int xCursorPos, yCursorPos;
+ calculateScreenPosition( xEndOfPrompt, yEndOfPrompt, pi.promptScreenColumns, pos, xCursorPos, yCursorPos );
+
+#ifdef _WIN32
+ // position at the start of the prompt, clear to end of previous input
+ CONSOLE_SCREEN_BUFFER_INFO inf;
+ GetConsoleScreenBufferInfo( console_out, &inf );
+ inf.dwCursorPosition.X = 0;
+ inf.dwCursorPosition.Y -= pi.promptCursorRowOffset /*- pi.promptExtraLines*/;
+ SetConsoleCursorPosition( console_out, inf.dwCursorPosition );
+ DWORD count;
+ FillConsoleOutputCharacterA( console_out, ' ', pi.previousPromptLen + pi.promptPreviousInputLen, inf.dwCursorPosition, &count );
+ pi.previousPromptLen = pi.promptIndentation;
+ pi.promptPreviousInputLen = len;
+
+ // display the prompt
+ if ( write( 1, pi.promptText, pi.promptChars ) == -1 ) return;
+
+ // display the input line
+ if ( write( 1, buf, len ) == -1 ) return;
+
+ // position the cursor
+ GetConsoleScreenBufferInfo( console_out, &inf );
+ inf.dwCursorPosition.X = xCursorPos; // 0-based on Win32
+ inf.dwCursorPosition.Y -= yEndOfInput - yCursorPos;
+ SetConsoleCursorPosition( console_out, inf.dwCursorPosition );
+#else // _WIN32
+ char seq[64];
+ int cursorRowMovement = pi.promptCursorRowOffset - pi.promptExtraLines;
+ if ( cursorRowMovement > 0 ) { // move the cursor up as required
+ snprintf( seq, sizeof seq, "\x1b[%dA", cursorRowMovement );
+ if ( write( 1, seq, strlen( seq ) ) == -1 ) return;
+ }
+ // position at the start of the prompt, clear to end of screen
+ snprintf( seq, sizeof seq, "\x1b[1G\x1b[J" ); // 1-based on VT100
+ if ( write( 1, seq, strlen( seq ) ) == -1 ) return;
+
+ // display the prompt
+ if ( write( 1, pi.promptText, pi.promptChars ) == -1 ) return;
+
+ // display the input line
+ if ( write( 1, buf, len ) == -1 ) return;
+
+ // we have to generate our own newline on line wrap
+ if ( xEndOfInput == 0 && yEndOfInput > 0 )
+ if ( write( 1, "\n", 1 ) == -1 ) return;
+
+ // position the cursor
+ cursorRowMovement = yEndOfInput - yCursorPos;
+ if ( cursorRowMovement > 0 ) { // move the cursor up as required
+ snprintf( seq, sizeof seq, "\x1b[%dA", cursorRowMovement );
+ if ( write( 1, seq, strlen( seq ) ) == -1 ) return;
+ }
+ // position the cursor within the line
+ snprintf( seq, sizeof seq, "\x1b[%dG", xCursorPos + 1 ); // 1-based on VT100
+ if ( write( 1, seq, strlen( seq ) ) == -1 ) return;
+#endif
+
+ pi.promptCursorRowOffset = pi.promptExtraLines + yCursorPos; // remember row for next pass
+}
+
+/**
+ * Refresh the user's input line: the prompt is already onscreen and is not redrawn here
+ * @param pi PromptInfo struct holding information about the prompt and our screen position
+ */
+void InputBuffer::refreshLine( PromptBase& pi ) {
+
+ // check for a matching brace/bracket/paren, remember its position if found
+ int highlight = -1;
+ if ( pos < len ) {
+ /* this scans for a brace matching buf[pos] to highlight */
+ int scanDirection = 0;
+ if ( strchr( "}])", buf[pos] ) )
+ scanDirection = -1; /* backwards */
+ else if ( strchr( "{[(", buf[pos] ) )
+ scanDirection = 1; /* forwards */
+
+ if ( scanDirection ) {
+ int unmatched = scanDirection;
+ for ( int i = pos + scanDirection; i >= 0 && i < len; i += scanDirection ) {
+ /* TODO: the right thing when inside a string */
+ if ( strchr( "}])", buf[i] ) )
+ --unmatched;
+ else if ( strchr( "{[(", buf[i] ) )
+ ++unmatched;
+
+ if ( unmatched == 0 ) {
+ highlight = i;
+ break;
+ }
+ }
+ }
+ }
+
+ // calculate the position of the end of the input line
+ int xEndOfInput, yEndOfInput;
+ calculateScreenPosition( pi.promptIndentation, 0, pi.promptScreenColumns, len, xEndOfInput, yEndOfInput );
+
+ // calculate the desired position of the cursor
+ int xCursorPos, yCursorPos;
+ calculateScreenPosition( pi.promptIndentation, 0, pi.promptScreenColumns, pos, xCursorPos, yCursorPos );
+
+#ifdef _WIN32
+ // position at the end of the prompt, clear to end of previous input
+ CONSOLE_SCREEN_BUFFER_INFO inf;
+ GetConsoleScreenBufferInfo( console_out, &inf );
+ inf.dwCursorPosition.X = pi.promptIndentation; // 0-based on Win32
+ inf.dwCursorPosition.Y -= pi.promptCursorRowOffset - pi.promptExtraLines;
+ SetConsoleCursorPosition( console_out, inf.dwCursorPosition );
+ DWORD count;
+ if ( len < pi.promptPreviousInputLen )
+ FillConsoleOutputCharacterA( console_out, ' ', pi.promptPreviousInputLen, inf.dwCursorPosition, &count );
+ pi.promptPreviousInputLen = len;
+
+ // display the input line
+ if (highlight == -1) {
+ if ( write( 1, buf, len ) == -1 ) return;
+ }
+ else {
+ if (write( 1, buf, highlight ) == -1 ) return;
+ setDisplayAttribute( true ); /* bright blue (visible with both B&W bg) */
+ if ( write( 1, &buf[highlight], 1 ) == -1 ) return;
+ setDisplayAttribute( false );
+ if ( write( 1, buf + highlight + 1, len - highlight - 1 ) == -1 ) return;
+ }
+
+ // position the cursor
+ GetConsoleScreenBufferInfo( console_out, &inf );
+ inf.dwCursorPosition.X = xCursorPos; // 0-based on Win32
+ inf.dwCursorPosition.Y -= yEndOfInput - yCursorPos;
+ SetConsoleCursorPosition( console_out, inf.dwCursorPosition );
+#else // _WIN32
+ char seq[64];
+ int cursorRowMovement = pi.promptCursorRowOffset - pi.promptExtraLines;
+ if ( cursorRowMovement > 0 ) { // move the cursor up as required
+ snprintf( seq, sizeof seq, "\x1b[%dA", cursorRowMovement );
+ if ( write( 1, seq, strlen( seq ) ) == -1 ) return;
+ }
+ // position at the end of the prompt, clear to end of screen
+ snprintf( seq, sizeof seq, "\x1b[%dG\x1b[J", pi.promptIndentation + 1 ); // 1-based on VT100
+ if ( write( 1, seq, strlen( seq ) ) == -1 ) return;
+
+ if ( highlight == -1 ) { // write unhighlighted text
+ if ( write( 1, buf, len ) == -1 ) return;
+ }
+ else { // highlight the matching brace/bracket/parenthesis
+ if ( write( 1, buf, highlight ) == -1 ) return;
+ setDisplayAttribute( true );
+ if ( write( 1, &buf[highlight], 1 ) == -1 ) return;
+ setDisplayAttribute( false );
+ if ( write( 1, buf + highlight + 1, len - highlight - 1 ) == -1 ) return;
+ }
+
+ // we have to generate our own newline on line wrap
+ if ( xEndOfInput == 0 && yEndOfInput > 0 )
+ if ( write( 1, "\n", 1 ) == -1 ) return;
+
+ // position the cursor
+ cursorRowMovement = yEndOfInput - yCursorPos;
+ if ( cursorRowMovement > 0 ) { // move the cursor up as required
+ snprintf( seq, sizeof seq, "\x1b[%dA", cursorRowMovement );
+ if ( write( 1, seq, strlen( seq ) ) == -1 ) return;
+ }
+ // position the cursor within the line
+ snprintf( seq, sizeof seq, "\x1b[%dG", xCursorPos + 1 ); // 1-based on VT100
+ if ( write( 1, seq, strlen( seq ) ) == -1 ) return;
+#endif
+
+ pi.promptCursorRowOffset = pi.promptExtraLines + yCursorPos; // remember row for next pass
+}
+
+#ifndef _WIN32
+
+namespace EscapeSequenceProcessing { // move these out of global namespace
+
+// This chunk of code does parsing of the escape sequences sent by various Linux terminals.
+//
+// It handles arrow keys, Home, End and Delete keys by interpreting the sequences sent by
+// gnome terminal, xterm, rxvt, konsole, aterm and yakuake including the Alt and Ctrl key
+// combinations that are understood by linenoise.
+//
+// The parsing uses tables, a bunch of intermediate dispatch routines and a doDispatch
+// loop that reads the tables and sends control to "deeper" routines to continue the
+// parsing. The starting call to doDispatch( c, initialDispatch ) will eventually return
+// either a character (with optional CTRL and META bits set), or -1 if parsing fails, or
+// zero if an attempt to read from the keyboard fails.
+//
+// This is rather sloppy escape sequence processing, since we're not paying attention to what the
+// actual TERM is set to and are processing all key sequences for all terminals, but it works with
+// the most common keystrokes on the most common terminals. It's intricate, but the nested 'if'
+// statements required to do it directly would be worse. This way has the advantage of allowing
+// changes and extensions without having to touch a lot of code.
+//
+
+// This is a typedef for the routine called by doDispatch(). It takes the current character
+// as input, does any required processing including reading more characters and calling other
+// dispatch routines, then eventually returns the final (possibly extended or special) character.
+//
+typedef unsigned int ( *CharacterDispatchRoutine )( unsigned int );
+
+// This structure is used by doDispatch() to hold a list of characters to test for and
+// a list of routines to call if the character matches. The dispatch routine list is one
+// longer than the character list; the final entry is used if no character matches.
+//
+struct CharacterDispatch {
+ unsigned int len; // length of the chars list
+ const char* chars; // chars to test
+ CharacterDispatchRoutine* dispatch; // array of routines to call
+};
+
+// This dispatch routine is given a dispatch table and then farms work out to routines
+// listed in the table based on the character it is called with. The dispatch routines can
+// read more input characters to decide what should eventually be returned. Eventually,
+// a called routine returns either a character or -1 to indicate parsing failure.
+//
+static unsigned int doDispatch( unsigned int c, CharacterDispatch& dispatchTable ) {
+ for ( unsigned int i = 0; i < dispatchTable.len ; ++i ) {
+ if ( static_cast<unsigned char>( dispatchTable.chars[i] ) == c ) {
+ return dispatchTable.dispatch[i]( c );
+ }
+ }
+ return dispatchTable.dispatch[dispatchTable.len]( c );
+}
+
+static unsigned int thisKeyMetaCtrl = 0; // holds pre-set Meta and/or Ctrl modifiers
+
+// Final dispatch routines -- return something
+//
+static unsigned int normalKeyRoutine( unsigned int c ) { return thisKeyMetaCtrl | c; }
+static unsigned int upArrowKeyRoutine( unsigned int c ) { return thisKeyMetaCtrl | UP_ARROW_KEY; }
+static unsigned int downArrowKeyRoutine( unsigned int c ) { return thisKeyMetaCtrl | DOWN_ARROW_KEY; }
+static unsigned int rightArrowKeyRoutine( unsigned int c ) { return thisKeyMetaCtrl | RIGHT_ARROW_KEY; }
+static unsigned int leftArrowKeyRoutine( unsigned int c ) { return thisKeyMetaCtrl | LEFT_ARROW_KEY; }
+static unsigned int homeKeyRoutine( unsigned int c ) { return thisKeyMetaCtrl | HOME_KEY; }
+static unsigned int endKeyRoutine( unsigned int c ) { return thisKeyMetaCtrl | END_KEY; }
+static unsigned int deleteCharRoutine( unsigned int c ) { return thisKeyMetaCtrl | ctrlChar( 'H' ); } // key labeled Backspace
+static unsigned int deleteKeyRoutine( unsigned int c ) { return thisKeyMetaCtrl | DELETE_KEY; } // key labeled Delete
+static unsigned int ctrlUpArrowKeyRoutine( unsigned int c ) { return thisKeyMetaCtrl | CTRL | UP_ARROW_KEY; }
+static unsigned int ctrlDownArrowKeyRoutine( unsigned int c ) { return thisKeyMetaCtrl | CTRL | DOWN_ARROW_KEY; }
+static unsigned int ctrlRightArrowKeyRoutine( unsigned int c ) { return thisKeyMetaCtrl | CTRL | RIGHT_ARROW_KEY; }
+static unsigned int ctrlLeftArrowKeyRoutine( unsigned int c ) { return thisKeyMetaCtrl | CTRL | LEFT_ARROW_KEY; }
+static unsigned int escFailureRoutine( unsigned int c ) { beep(); return -1; }
+
+// Handle ESC [ 1 ; 3 (or 5) <more stuff> escape sequences
+//
+static CharacterDispatchRoutine escLeftBracket1Semicolon3or5Routines[] = { upArrowKeyRoutine, downArrowKeyRoutine, rightArrowKeyRoutine, leftArrowKeyRoutine, escFailureRoutine };
+static CharacterDispatch escLeftBracket1Semicolon3or5Dispatch = { 4, "ABCD", escLeftBracket1Semicolon3or5Routines };
+
+// Handle ESC [ 1 ; <more stuff> escape sequences
+//
+static unsigned int escLeftBracket1Semicolon3Routine( unsigned int c ) {
+ if ( read( 0, &c, 1 ) <= 0 ) return 0;
+ thisKeyMetaCtrl |= META;
+ return doDispatch( c, escLeftBracket1Semicolon3or5Dispatch );
+}
+static unsigned int escLeftBracket1Semicolon5Routine( unsigned int c ) {
+ if ( read( 0, &c, 1 ) <= 0 ) return 0;
+ thisKeyMetaCtrl |= CTRL;
+ return doDispatch( c, escLeftBracket1Semicolon3or5Dispatch );
+}
+static CharacterDispatchRoutine escLeftBracket1SemicolonRoutines[] = { escLeftBracket1Semicolon3Routine, escLeftBracket1Semicolon5Routine, escFailureRoutine };
+static CharacterDispatch escLeftBracket1SemicolonDispatch = { 2, "35", escLeftBracket1SemicolonRoutines };
+
+// Handle ESC [ 1 <more stuff> escape sequences
+//
+static unsigned int escLeftBracket1SemicolonRoutine( unsigned int c ) {
+ if ( read( 0, &c, 1 ) <= 0 ) return 0;
+ return doDispatch( c, escLeftBracket1SemicolonDispatch );
+}
+static CharacterDispatchRoutine escLeftBracket1Routines[] = { homeKeyRoutine, escLeftBracket1SemicolonRoutine, escFailureRoutine };
+static CharacterDispatch escLeftBracket1Dispatch = { 2, "~;", escLeftBracket1Routines };
+
+// Handle ESC [ 3 <more stuff> escape sequences
+//
+static CharacterDispatchRoutine escLeftBracket3Routines[] = { deleteKeyRoutine, escFailureRoutine };
+static CharacterDispatch escLeftBracket3Dispatch = { 1, "~", escLeftBracket3Routines };
+
+// Handle ESC [ 4 <more stuff> escape sequences
+//
+static CharacterDispatchRoutine escLeftBracket4Routines[] = { endKeyRoutine, escFailureRoutine };
+static CharacterDispatch escLeftBracket4Dispatch = { 1, "~", escLeftBracket4Routines };
+
+// Handle ESC [ 7 <more stuff> escape sequences
+//
+static CharacterDispatchRoutine escLeftBracket7Routines[] = { homeKeyRoutine, escFailureRoutine };
+static CharacterDispatch escLeftBracket7Dispatch = { 1, "~", escLeftBracket7Routines };
+
+// Handle ESC [ 8 <more stuff> escape sequences
+//
+static CharacterDispatchRoutine escLeftBracket8Routines[] = { endKeyRoutine, escFailureRoutine };
+static CharacterDispatch escLeftBracket8Dispatch = { 1, "~", escLeftBracket8Routines };
+
+// Handle ESC [ <digit> escape sequences
+//
+static unsigned int escLeftBracket0Routine( unsigned int c ) {
+ return escFailureRoutine( c );
+}
+static unsigned int escLeftBracket1Routine( unsigned int c ) {
+ if ( read( 0, &c, 1 ) <= 0 ) return 0;
+ return doDispatch( c, escLeftBracket1Dispatch );
+}
+static unsigned int escLeftBracket2Routine( unsigned int c ) {
+ return escFailureRoutine( c );
+}
+static unsigned int escLeftBracket3Routine( unsigned int c ) {
+ if ( read( 0, &c, 1 ) <= 0 ) return 0;
+ return doDispatch( c, escLeftBracket3Dispatch );
+}
+static unsigned int escLeftBracket4Routine( unsigned int c ) {
+ if ( read( 0, &c, 1 ) <= 0 ) return 0;
+ return doDispatch( c, escLeftBracket4Dispatch );
+}
+static unsigned int escLeftBracket5Routine( unsigned int c ) {
+ return escFailureRoutine( c );
+}
+static unsigned int escLeftBracket6Routine( unsigned int c ) {
+ return escFailureRoutine( c );
+}
+static unsigned int escLeftBracket7Routine( unsigned int c ) {
+ if ( read( 0, &c, 1 ) <= 0 ) return 0;
+ return doDispatch( c, escLeftBracket7Dispatch );
+}
+static unsigned int escLeftBracket8Routine( unsigned int c ) {
+ if ( read( 0, &c, 1 ) <= 0 ) return 0;
+ return doDispatch( c, escLeftBracket8Dispatch );
+}
+static unsigned int escLeftBracket9Routine( unsigned int c ) {
+ return escFailureRoutine( c );
+}
+
+// Handle ESC [ <more stuff> escape sequences
+//
+static CharacterDispatchRoutine escLeftBracketRoutines[] = {
+ upArrowKeyRoutine,
+ downArrowKeyRoutine,
+ rightArrowKeyRoutine,
+ leftArrowKeyRoutine,
+ homeKeyRoutine,
+ endKeyRoutine,
+ escLeftBracket0Routine,
+ escLeftBracket1Routine,
+ escLeftBracket2Routine,
+ escLeftBracket3Routine,
+ escLeftBracket4Routine,
+ escLeftBracket5Routine,
+ escLeftBracket6Routine,
+ escLeftBracket7Routine,
+ escLeftBracket8Routine,
+ escLeftBracket9Routine,
+ escFailureRoutine
+};
+static CharacterDispatch escLeftBracketDispatch = { 16, "ABCDHF0123456789", escLeftBracketRoutines };
+
+// Handle ESC O <char> escape sequences
+//
+static CharacterDispatchRoutine escORoutines[] = {
+ upArrowKeyRoutine,
+ downArrowKeyRoutine,
+ rightArrowKeyRoutine,
+ leftArrowKeyRoutine,
+ homeKeyRoutine,
+ endKeyRoutine,
+ ctrlUpArrowKeyRoutine,
+ ctrlDownArrowKeyRoutine,
+ ctrlRightArrowKeyRoutine,
+ ctrlLeftArrowKeyRoutine,
+ escFailureRoutine
+};
+static CharacterDispatch escODispatch = { 10, "ABCDHFabcd", escORoutines };
+
+// Initial ESC dispatch -- could be a Meta prefix or the start of an escape sequence
+//
+static unsigned int escLeftBracketRoutine( unsigned int c ) {
+ if ( read( 0, &c, 1 ) <= 0 ) return 0;
+ return doDispatch( c, escLeftBracketDispatch );
+}
+static unsigned int escORoutine( unsigned int c ) {
+ if ( read( 0, &c, 1 ) <= 0 ) return 0;
+ return doDispatch( c, escODispatch );
+}
+static unsigned int setMetaRoutine( unsigned int c ); // need forward reference
+static CharacterDispatchRoutine escRoutines[] = { escLeftBracketRoutine, escORoutine, setMetaRoutine };
+static CharacterDispatch escDispatch = { 2, "[O", escRoutines };
+
+// Initial dispatch -- we are not in the middle of anything yet
+//
+static unsigned int escRoutine( unsigned int c ) {
+ if ( read( 0, &c, 1 ) <= 0 ) return 0;
+ return doDispatch( c, escDispatch );
+}
+static unsigned int hibitCRoutine( unsigned int c ) {
+ // xterm sends a bizarre sequence for Alt combos: 'C'+0x80 then '!'+0x80 for Alt-a for example
+ if ( read( 0, &c, 1 ) <= 0 ) return 0;
+ if ( c >= ( ' ' | 0x80 ) && c <= ( '?' | 0x80 ) ) {
+ if ( c == ( '?' | 0x80 ) ) { // have to special case this, normal code would send
+ return META | ctrlChar( 'H' ); // Meta-_ (thank you xterm)
+ }
+ return META | ( c - 0x40 );
+ }
+ return escFailureRoutine( c );
+}
+static CharacterDispatchRoutine initialRoutines[] = { escRoutine, deleteCharRoutine, hibitCRoutine, normalKeyRoutine };
+static CharacterDispatch initialDispatch = { 3, "\x1B\x7F\xC3", initialRoutines };
+
+// Special handling for the ESC key because it does double duty
+//
+static unsigned int setMetaRoutine( unsigned int c ) {
+ thisKeyMetaCtrl = META;
+ if ( c == 0x1B ) { // another ESC, stay in ESC processing mode
+ if ( read( 0, &c, 1 ) <= 0 ) return 0;
+ return doDispatch( c, escDispatch );
+ }
+ return doDispatch( c, initialDispatch );
+}
+
+} // namespace EscapeSequenceProcessing // move these out of global namespace
+
+#endif // #ifndef _WIN32
+
+// linenoiseReadChar -- read a keystroke or keychord from the keyboard, and translate it
+// into an encoded "keystroke". When convenient, extended keys are translated into their
+// simpler Emacs keystrokes, so an unmodified "left arrow" becomes Ctrl-B.
+//
+// A return value of zero means "no input available", and a return value of -1 means "invalid key".
+//
+static int linenoiseReadChar( void ){
+#ifdef _WIN32
+ INPUT_RECORD rec;
+ DWORD count;
+ int modifierKeys = 0;
+ bool escSeen = false;
+ while ( true ) {
+ ReadConsoleInputA( console_in, &rec, 1, &count );
+ if ( rec.EventType != KEY_EVENT || !rec.Event.KeyEvent.bKeyDown ) {
+ continue;
+ }
+ modifierKeys = 0;
+ if ( rec.Event.KeyEvent.dwControlKeyState & ( RIGHT_CTRL_PRESSED | LEFT_CTRL_PRESSED ) ) {
+ modifierKeys |= CTRL;
+ }
+ if ( rec.Event.KeyEvent.dwControlKeyState & ( RIGHT_ALT_PRESSED | LEFT_ALT_PRESSED ) ) {
+ modifierKeys |= META;
+ }
+ if ( escSeen ) {
+ modifierKeys |= META;
+ }
+ if ( rec.Event.KeyEvent.uChar.AsciiChar == 0 ) {
+ switch ( rec.Event.KeyEvent.wVirtualKeyCode ) {
+ case VK_LEFT: return modifierKeys | LEFT_ARROW_KEY;
+ case VK_RIGHT: return modifierKeys | RIGHT_ARROW_KEY;
+ case VK_UP: return modifierKeys | UP_ARROW_KEY;
+ case VK_DOWN: return modifierKeys | DOWN_ARROW_KEY;
+ case VK_DELETE: return modifierKeys | DELETE_KEY;
+ case VK_HOME: return modifierKeys | HOME_KEY;
+ case VK_END: return modifierKeys | END_KEY;
+ default: continue; // in raw mode, ReadConsoleInput shows shift, ctrl ...
+ } // ... ignore them
+ }
+ else if ( rec.Event.KeyEvent.uChar.AsciiChar == ctrlChar( '[' ) ) { // ESC, set flag for later
+ escSeen = true;
+ continue;
+ }
+ else {
+ // we got a real character, return it
+ return modifierKeys | rec.Event.KeyEvent.uChar.AsciiChar;
+ }
+ }
+#else
+ unsigned int c;
+ unsigned char oneChar;
+ int nread;
+
+ nread = read( 0, &oneChar, 1 );
+ if ( nread <= 0 )
+ return 0;
+ c = static_cast<unsigned int>( oneChar );
+
+ // If _DEBUG_LINUX_KEYBOARD is set, then ctrl-\ puts us into a keyboard debugging mode
+ // where we print out decimal and decoded values for whatever the "terminal" program
+ // gives us on different keystrokes. Hit ctrl-C to exit this mode.
+ //
+#define _DEBUG_LINUX_KEYBOARD
+#if defined(_DEBUG_LINUX_KEYBOARD)
+ if ( c == 28 ) { // ctrl-\, special debug mode, prints all keys hit, ctrl-C to get out.
+ printf( "\x1b[1G\n" ); /* go to first column of new line */
+ while ( true ) {
+ unsigned char keys[10];
+ int ret = read( 0, keys, 10 );
+
+ if ( ret <= 0 ) {
+ printf( "\nret: %d\n", ret );
+ }
+ for ( int i = 0; i < ret; ++i ) {
+ unsigned int key = static_cast<unsigned int>( keys[i] );
+ char * friendlyTextPtr;
+ char friendlyTextBuf[10];
+ const char * prefixText = (key < 0x80) ? "" : "highbit-";
+ unsigned int keyCopy = (key < 0x80) ? key : key - 0x80;
+ if ( keyCopy >= '!' && keyCopy <= '~' ) { // printable
+ friendlyTextBuf[0] = '\'';
+ friendlyTextBuf[1] = keyCopy;
+ friendlyTextBuf[2] = '\'';
+ friendlyTextBuf[3] = 0;
+ friendlyTextPtr = friendlyTextBuf;
+ }
+ else if ( keyCopy == ' ' ) {
+ friendlyTextPtr = (char *)"space";
+ }
+ else if (keyCopy == 27 ) {
+ friendlyTextPtr = (char *)"ESC";
+ }
+ else if (keyCopy == 0 ) {
+ friendlyTextPtr = (char *)"NUL";
+ }
+ else if (keyCopy == 127 ) {
+ friendlyTextPtr = (char *)"DEL";
+ }
+ else {
+ friendlyTextBuf[0] = '^';
+ friendlyTextBuf[1] = keyCopy + 0x40;
+ friendlyTextBuf[2] = 0;
+ friendlyTextPtr = friendlyTextBuf;
+ }
+ printf( "%d (%s%s) ", key, prefixText, friendlyTextPtr );
+ }
+ printf( "\x1b[1G\n" ); // go to first column of new line
+
+ // drop out of this loop on ctrl-C
+ if ( keys[0] == ctrlChar( 'C' ) ) {
+ return -1;
+ }
+ }
+ }
+#endif // _DEBUG_LINUX_KEYBOARD
+
+ EscapeSequenceProcessing::thisKeyMetaCtrl = 0; // no modifiers yet at initialDispatch
+ return EscapeSequenceProcessing::doDispatch( c, EscapeSequenceProcessing::initialDispatch );
+#endif // #_WIN32
+}
+
+static void freeCompletions( linenoiseCompletions* lc ) {
+ for ( size_t i = 0; i < lc->len; ++i )
+ free( lc->cvec[i] );
+ if ( lc->cvec )
+ free( lc->cvec );
+}
+
+// break characters that may precede items to be completed
+static const char breakChars[] = " =+-/\\*?\"'`&<>;|@{([])}";
+
+/**
+ * Handle command completion, using a completionCallback() routine to provide possible substitutions
+ * This routine handles the mechanics of updating the user's input buffer with possible replacement of
+ * text as the user selects a proposed completion string, or cancels the completion attempt.
+ * @param pi PromptInfo struct holding information about the prompt and our screen position
+ */
+int InputBuffer::completeLine( PromptInfo& pi ) {
+ linenoiseCompletions lc = { 0, NULL };
+ char c = 0;
+
+ // completionCallback() expects a parsable entity, so find the previous break character and extract
+ // a copy to parse. we also handle the case where tab is hit while not at end-of-line.
+ int startIndex = pos;
+ while ( --startIndex >= 0 ) {
+ if ( strchr( breakChars, buf[startIndex] ) ) {
+ break;
+ }
+ }
+ ++startIndex;
+ int itemLength = pos - startIndex;
+ char* parseItem = reinterpret_cast<char *>( malloc( itemLength + 1 ) );
+ int i = 0;
+ for ( ; i < itemLength; ++i ) {
+ parseItem[i] = buf[startIndex+i];
+ }
+ parseItem[i] = 0;
+
+ // get a list of completions
+ completionCallback( parseItem, &lc );
+ free( parseItem );
+ int displayLength = 0;
+ char * displayText = 0;
+ if ( lc.len == 0 ) {
+ beep();
+ }
+ else {
+ size_t i = 0;
+ size_t clen;
+
+ bool stop = false;
+ while ( ! stop ) {
+ /* Show completion or original buffer */
+ if ( i < lc.len ) {
+ clen = strlen( lc.cvec[i] );
+ displayLength = len + clen - itemLength;
+ displayText = reinterpret_cast<char *>( malloc( displayLength + 1 ) );
+ InputBuffer temp( displayText, displayLength + 1 );
+ temp.len = displayLength;
+ temp.pos = startIndex + clen;
+ int j = 0;
+ for ( ; j < startIndex; ++j )
+ displayText[j] = buf[j];
+ strcpy( &displayText[j], lc.cvec[i] );
+ strcpy( &displayText[j+clen], &buf[pos] );
+ displayText[displayLength] = 0;
+ temp.refreshLine( pi );
+ free( displayText );
+ }
+ else {
+ refreshLine( pi );
+ }
+
+ do {
+ c = linenoiseReadChar();
+ } while ( c == static_cast<char>( -1 ) );
+
+ switch ( c ) {
+
+ case 0:
+ freeCompletions( &lc );
+ return -1;
+
+ case ctrlChar( 'I' ): // ctrl-I/tab
+ i = ( i + 1 ) % ( lc.len + 1 );
+ if ( i == lc.len )
+ beep(); // beep after completing cycle
+ break;
+
+#if 0 // SERVER-4011 -- Escape only works to end command completion in Windows
+ // leaving code here for now in case this is where we will add Meta-R (revert-line) later
+ case 27: /* escape */
+ /* Re-show original buffer */
+ if ( i < lc.len )
+ refreshLine( pi, buf, *len, *pos );
+ stop = true;
+ break;
+#endif // SERVER-4011 -- Escape only works to end command completion in Windows
+
+ default:
+ /* Update buffer and return */
+ if ( i < lc.len ) {
+ clen = strlen( lc.cvec[i] );
+ displayLength = len + clen - itemLength;
+ displayText = (char *)malloc( displayLength + 1 );
+ int j = 0;
+ for ( ; j < startIndex; ++j )
+ displayText[j] = buf[j];
+ strcpy( &displayText[j], lc.cvec[i] );
+ strcpy( &displayText[j+clen], &buf[pos] );
+ displayText[displayLength] = 0;
+ strcpy( buf, displayText );
+ free( displayText );
+ pos = startIndex + clen;
+ len = displayLength;
+ }
+ stop = true;
+ break;
+ }
+ }
+ }
+
+ freeCompletions( &lc );
+ return c; /* Return last read character */
+}
+
+/**
+ * Clear the screen ONLY (no redisplay of anything)
+ */
+void linenoiseClearScreen( void ) {
+#ifdef _WIN32
+ COORD coord = {0, 0};
+ CONSOLE_SCREEN_BUFFER_INFO inf;
+ HANDLE screenHandle = GetStdHandle( STD_OUTPUT_HANDLE );
+ GetConsoleScreenBufferInfo( screenHandle, &inf );
+ SetConsoleCursorPosition( screenHandle, coord );
+ DWORD count;
+ FillConsoleOutputCharacterA( screenHandle, ' ', inf.dwSize.X * inf.dwSize.Y, coord, &count );
+#else
+ if ( write( 1, "\x1b[H\x1b[2J", 7 ) <= 0 ) return;
+#endif
+}
+
+void InputBuffer::clearScreen( PromptInfo& pi ) {
+ linenoiseClearScreen();
+ if ( write( 1, pi.promptText, pi.promptChars ) == -1 ) return;
+#ifndef _WIN32
+ // we have to generate our own newline on line wrap on Linux
+ if ( pi.promptIndentation == 0 && pi.promptExtraLines > 0 )
+ if ( write( 1, "\n", 1 ) == -1 ) return;
+#endif
+ pi.promptCursorRowOffset = pi.promptExtraLines;
+ refreshLine( pi );
+}
+
+// convert {CTRL + 'A'}, {CTRL + 'a'} and {CTRL + ctrlChar( 'A' )} into ctrlChar( 'A' )
+// leave META alone
+//
+static int cleanupCtrl( int c ) {
+ if ( c & CTRL ) {
+ int d = c & 0x1FF;
+ if ( d >= 'a' && d <= 'z' ) {
+ c = ( c + ( 'a' - ctrlChar( 'A' ) ) ) & ~CTRL;
+ }
+ if ( d >= 'A' && d <= 'Z' ) {
+ c = ( c + ( 'A' - ctrlChar( 'A' ) ) ) & ~CTRL;
+ }
+ if ( d >= ctrlChar( 'A' ) && d <= ctrlChar( 'Z' ) ) {
+ c = c & ~CTRL;
+ }
+ }
+ return c;
+}
+
+/**
+ * Incremental history search -- take over the prompt and keyboard as the user types a search string,
+ * deletes characters from it, changes direction, and either accepts the found line (for execution or
+ * editing) or cancels.
+ * @param pi PromptInfo struct holding information about the (old, static) prompt and our screen position
+ * @param startChar the character that began the search, used to set the initial direction
+ */
+int InputBuffer::incrementalHistorySearch( PromptInfo& pi, int startChar ) {
+
+ // add the current line to the history list so we don't have to special case it
+ history[historyLen - 1] = reinterpret_cast<char *>( realloc( history[historyLen - 1], len + 1 ) );
+ strcpy( history[historyLen - 1], buf );
+ int historyLineLength = len;
+ int historyLinePosition = pos;
+ char emptyBuffer[1];
+ InputBuffer empty( emptyBuffer, 1 );
+ empty.refreshLine( pi ); // erase the old input first
+ DynamicPrompt dp( pi, ( startChar == ctrlChar( 'R' ) ) ? -1 : 1 );
+
+ dp.previousPromptLen = pi.previousPromptLen;
+ dp.promptPreviousInputLen = pi.promptPreviousInputLen;
+ dynamicRefresh( dp, history[historyLen - 1], historyLineLength, historyLinePosition ); // draw user's text with our prompt
+
+ // loop until we get an exit character
+ int c;
+ bool keepLooping = true;
+ bool useSearchedLine = true;
+ bool searchAgain = false;
+ while ( keepLooping ) {
+ c = linenoiseReadChar();
+ c = cleanupCtrl( c ); // convert CTRL + <char> into normal ctrl
+ switch ( c ) {
+
+ // these characters keep the selected text but do not execute it
+ case ctrlChar( 'A' ): // ctrl-A, move cursor to start of line
+ case HOME_KEY:
+ case ctrlChar( 'B' ): // ctrl-B, move cursor left by one character
+ case LEFT_ARROW_KEY:
+ case META + 'b': // meta-B, move cursor left by one word
+ case META + 'B':
+ case CTRL + LEFT_ARROW_KEY:
+ case META + LEFT_ARROW_KEY: // Emacs allows Meta, bash & readline don't
+ case ctrlChar( 'D' ):
+ case META + 'd': // meta-D, kill word to right of cursor
+ case META + 'D':
+ case ctrlChar( 'E' ): // ctrl-E, move cursor to end of line
+ case END_KEY:
+ case ctrlChar( 'F' ): // ctrl-F, move cursor right by one character
+ case RIGHT_ARROW_KEY:
+ case META + 'f': // meta-F, move cursor right by one word
+ case META + 'F':
+ case CTRL + RIGHT_ARROW_KEY:
+ case META + RIGHT_ARROW_KEY: // Emacs allows Meta, bash & readline don't
+ case META + ctrlChar( 'H' ):
+ case ctrlChar( 'J' ):
+ case ctrlChar( 'K' ): // ctrl-K, kill from cursor to end of line
+ case ctrlChar( 'M' ):
+ case ctrlChar( 'N' ): // ctrl-N, recall next line in history
+ case ctrlChar( 'P' ): // ctrl-P, recall previous line in history
+ case DOWN_ARROW_KEY:
+ case UP_ARROW_KEY:
+ case ctrlChar( 'T' ): // ctrl-T, transpose characters
+ case ctrlChar( 'U' ): // ctrl-U, kill all characters to the left of the cursor
+ case ctrlChar( 'W' ):
+ case META + 'y': // meta-Y, "yank-pop", rotate popped text
+ case META + 'Y':
+ case 127:
+ case DELETE_KEY:
+ keepLooping = false;
+ break;
+
+ // these characters revert the input line to its previous state
+ case ctrlChar( 'C' ): // ctrl-C, abort this line
+ case ctrlChar( 'G' ):
+ case ctrlChar( 'L' ): // ctrl-L, clear screen and redisplay line
+ keepLooping = false;
+ useSearchedLine = false;
+ if ( c != ctrlChar( 'L' ) ) {
+ c = -1; // ctrl-C and ctrl-G just abort the search and do nothing else
+ }
+ break;
+
+ // these characters stay in search mode and update the display
+ case ctrlChar( 'S' ):
+ case ctrlChar( 'R' ):
+ if ( dp.searchTextLen == 0 ) { // if no current search text, recall previous text
+ dp.updateSearchText( previousSearchText.c_str() );
+ }
+ if ( ( dp.direction == 1 && c == ctrlChar( 'R' ) ) ||
+ ( dp.direction == -1 && c == ctrlChar( 'S' ) ) ) {
+ dp.direction = 0 - dp.direction; // reverse direction
+ dp.updateSearchPrompt(); // change the prompt
+ }
+ else {
+ searchAgain = true; // same direction, search again
+ }
+ break;
+
+ // job control is its own thing
+#ifndef _WIN32
+ case ctrlChar( 'Z' ): // ctrl-Z, job control
+ disableRawMode(); // Returning to Linux (whatever) shell, leave raw mode
+ raise( SIGSTOP ); // Break out in mid-line
+ enableRawMode(); // Back from Linux shell, re-enter raw mode
+ dynamicRefresh( dp, history[historyIndex], historyLineLength, historyLinePosition );
+ continue;
+ break;
+#endif
+
+ // these characters update the search string, and hence the selected input line
+ case ctrlChar( 'H' ): // backspace/ctrl-H, delete char to left of cursor
+ if ( dp.searchTextLen > 0 ) {
+ --dp.searchTextLen;
+ dp.searchText[dp.searchTextLen] = 0;
+ string newSearchText( dp.searchText );
+ dp.updateSearchText( newSearchText.c_str() );
+ }
+ else {
+ beep();
+ }
+ break;
+
+ case ctrlChar( 'Y' ): // ctrl-Y, yank killed text
+ break;
+
+ default:
+ if ( c >= ' ' && c < 256 ) { // not an action character
+ string newSearchText = string( dp.searchText ) + static_cast<char>( c );
+ dp.updateSearchText( newSearchText.c_str() );
+ }
+ else {
+ beep();
+ }
+ } // switch
+
+ // if we are staying in search mode, search now
+ if ( keepLooping ) {
+ if ( dp.searchTextLen > 0 ) {
+ bool found = false;
+ int historySearchIndex = historyIndex;
+ int lineLength = historyLineLength;
+ int lineSearchPos = historyLinePosition;
+ if ( searchAgain ) {
+ lineSearchPos += dp.direction;
+ }
+ searchAgain = false;
+ while ( true ) {
+ while ( ( dp.direction > 0 ) ? ( lineSearchPos < lineLength ) : ( lineSearchPos >= 0 ) ) {
+ if ( strncmp( dp.searchText, &history[historySearchIndex][lineSearchPos], dp.searchTextLen) == 0 ) {
+ found = true;
+ break;
+ }
+ lineSearchPos += dp.direction;
+ }
+ if ( found ) {
+ historyIndex = historySearchIndex;
+ historyLineLength = lineLength;
+ historyLinePosition = lineSearchPos;
+ break;
+ }
+ else if ( ( dp.direction > 0 ) ? ( historySearchIndex < historyLen - 1 ) : ( historySearchIndex > 0 ) ) {
+ historySearchIndex += dp.direction;
+ lineLength = strlen( history[historySearchIndex] );
+ lineSearchPos = ( dp.direction > 0 ) ? 0 : ( lineLength - dp.searchTextLen );
+ }
+ else {
+ beep();
+ break;
+ }
+ }; // while
+ }
+ dynamicRefresh( dp, history[historyIndex], historyLineLength, historyLinePosition ); // draw user's text with our prompt
+ }
+ } // while
+
+ // leaving history search, restore previous prompt, maybe make searched line current
+ PromptBase pb;
+ pb.promptText = &pi.promptText[pi.promptLastLinePosition];
+ pb.promptChars = pi.promptIndentation;
+ pb.promptExtraLines = 0;
+ pb.promptIndentation = pi.promptIndentation;
+ pb.promptLastLinePosition = 0;
+ pb.promptPreviousInputLen = historyLineLength;
+ pb.promptCursorRowOffset = dp.promptCursorRowOffset;
+ pb.promptScreenColumns = pi.promptScreenColumns;
+ pb.previousPromptLen = dp.promptChars;
+ if ( useSearchedLine ) {
+ strcpy( buf, history[historyIndex] );
+ len = historyLineLength;
+ pos = historyLinePosition;
+ }
+ dynamicRefresh( pb, buf, len, pos ); // redraw the original prompt with current input
+ pi.promptPreviousInputLen = len;
+ pi.promptCursorRowOffset = pi.promptExtraLines + pb.promptCursorRowOffset;
+
+ previousSearchText = dp.searchText; // save search text for possible reuse on ctrl-R ctrl-R
+ return c; // pass a character or -1 back to main loop
+}
+
+int InputBuffer::getInputLine( PromptInfo& pi ) {
+ // The latest history entry is always our current buffer
+ linenoiseHistoryAdd( "" );
+ historyIndex = historyLen - 1;
+
+ // display the prompt
+ if ( write( 1, pi.promptText, pi.promptChars ) == -1 ) return -1;
+
+#ifndef _WIN32
+ // we have to generate our own newline on line wrap on Linux
+ if ( pi.promptIndentation == 0 && pi.promptExtraLines > 0 )
+ if ( write( 1, "\n", 1 ) == -1 ) return -1;
+#endif
+
+ // the cursor starts out at the end of the prompt
+ pi.promptCursorRowOffset = pi.promptExtraLines;
+
+ // kill and yank start in "other" mode
+ killRing.lastAction = KillRing::actionOther;
+
+ // when history search returns control to us, we execute its terminating keystroke
+ int terminatingKeystroke = -1;
+
+ // loop collecting characters, responding to ctrl characters
+ while ( true ) {
+ int c;
+ if ( terminatingKeystroke == -1 ) {
+ c = linenoiseReadChar(); // get a new keystroke
+ }
+ else {
+ c = terminatingKeystroke; // use the terminating keystroke from search
+ terminatingKeystroke = -1; // clear it once we've used it
+ }
+ c = cleanupCtrl( c ); // convert CTRL + <char> into normal ctrl
+
+ if ( c == 0 )
+ return len;
+
+ if ( c == -1 ) {
+ refreshLine( pi );
+ continue;
+ }
+
+ // ctrl-I/tab, command completion, needs to be before switch statement
+ if ( c == ctrlChar( 'I' ) && completionCallback ) {
+ killRing.lastAction = KillRing::actionOther;
+
+ // completeLine does the actual completion and replacement
+ c = completeLine( pi );
+
+ if ( c < 0 ) // return on error
+ return len;
+
+ if ( c == 0 ) // read next character when 0
+ continue;
+
+ // deliberate fall-through here, so we use the terminating character
+ }
+
+ switch ( c ) {
+
+ case ctrlChar( 'A' ): // ctrl-A, move cursor to start of line
+ case HOME_KEY:
+ killRing.lastAction = KillRing::actionOther;
+ pos = 0;
+ refreshLine( pi );
+ break;
+
+ case ctrlChar( 'B' ): // ctrl-B, move cursor left by one character
+ case LEFT_ARROW_KEY:
+ killRing.lastAction = KillRing::actionOther;
+ if ( pos > 0 ) {
+ --pos;
+ refreshLine( pi );
+ }
+ break;
+
+ case META + 'b': // meta-B, move cursor left by one word
+ case META + 'B':
+ case CTRL + LEFT_ARROW_KEY:
+ case META + LEFT_ARROW_KEY: // Emacs allows Meta, bash & readline don't
+ killRing.lastAction = KillRing::actionOther;
+ if ( pos > 0 ) {
+ while ( pos > 0 && !isalnum( buf[pos - 1] ) ) {
+ --pos;
+ }
+ while ( pos > 0 && isalnum( buf[pos - 1] ) ) {
+ --pos;
+ }
+ refreshLine( pi );
+ }
+ break;
+
+ case ctrlChar( 'C' ): // ctrl-C, abort this line
+ killRing.lastAction = KillRing::actionOther;
+ errno = EAGAIN;
+ --historyLen;
+ free( history[historyLen] );
+ // we need one last refresh with the cursor at the end of the line
+ // so we don't display the next prompt over the previous input line
+ pos = len; // pass len as pos for EOL
+ refreshLine( pi );
+ if ( write( 1, "^C", 2 ) == -1 ) return -1; // Display the ^C we got
+ return -1;
+
+ case META + 'c': // meta-C, give word initial Cap
+ case META + 'C':
+ killRing.lastAction = KillRing::actionOther;
+ if ( pos < len ) {
+ while ( pos < len && !isalnum( buf[pos] ) ) {
+ ++pos;
+ }
+ if ( pos < len && isalnum( buf[pos] ) ) {
+ if ( buf[pos] >= 'a' && buf[pos] <= 'z' ) {
+ buf[pos] += 'A' - 'a';
+ }
+ ++pos;
+ }
+ while ( pos < len && isalnum( buf[pos] ) ) {
+ if ( buf[pos] >= 'A' && buf[pos] <= 'Z' ) {
+ buf[pos] += 'a' - 'A';
+ }
+ ++pos;
+ }
+ refreshLine( pi );
+ }
+ break;
+
+ // ctrl-D, delete the character under the cursor
+ // on an empty line, exit the shell
+ case ctrlChar( 'D' ):
+ killRing.lastAction = KillRing::actionOther;
+ if ( len > 0 && pos < len ) {
+ memmove( buf + pos, buf + pos + 1, len - pos );
+ --len;
+ refreshLine( pi );
+ }
+ else if ( len == 0 ) {
+ --historyLen;
+ free( history[historyLen] );
+ return -1;
+ }
+ break;
+
+ case META + 'd': // meta-D, kill word to right of cursor
+ case META + 'D':
+ if ( pos < len ) {
+ int endingPos = pos;
+ while ( endingPos < len && !isalnum( buf[endingPos] ) ) {
+ ++endingPos;
+ }
+ while ( endingPos < len && isalnum( buf[endingPos] ) ) {
+ ++endingPos;
+ }
+ killRing.kill( &buf[pos], endingPos - pos, true );
+ memmove( buf + pos, buf + endingPos, len - endingPos + 1 );
+ len -= endingPos - pos;
+ refreshLine( pi );
+ }
+ killRing.lastAction = KillRing::actionKill;
+ break;
+
+ case ctrlChar( 'E' ): // ctrl-E, move cursor to end of line
+ case END_KEY:
+ killRing.lastAction = KillRing::actionOther;
+ pos = len;
+ refreshLine( pi );
+ break;
+
+ case ctrlChar( 'F' ): // ctrl-F, move cursor right by one character
+ case RIGHT_ARROW_KEY:
+ killRing.lastAction = KillRing::actionOther;
+ if ( pos < len ) {
+ ++pos;
+ refreshLine( pi );
+ }
+ break;
+
+ case META + 'f': // meta-F, move cursor right by one word
+ case META + 'F':
+ case CTRL + RIGHT_ARROW_KEY:
+ case META + RIGHT_ARROW_KEY: // Emacs allows Meta, bash & readline don't
+ killRing.lastAction = KillRing::actionOther;
+ if ( pos < len ) {
+ while ( pos < len && !isalnum( buf[pos] ) ) {
+ ++pos;
+ }
+ while ( pos < len && isalnum( buf[pos] ) ) {
+ ++pos;
+ }
+ refreshLine( pi );
+ }
+ break;
+
+ case ctrlChar( 'H' ): // backspace/ctrl-H, delete char to left of cursor
+ killRing.lastAction = KillRing::actionOther;
+ if ( pos > 0 ) {
+ memmove( buf + pos - 1, buf + pos, 1 + len - pos );
+ --pos;
+ --len;
+ refreshLine( pi );
+ }
+ break;
+
+ // meta-Backspace, kill word to left of cursor
+ case META + ctrlChar( 'H' ):
+ if ( pos > 0 ) {
+ int startingPos = pos;
+ while ( pos > 0 && !isalnum( buf[pos - 1] ) ) {
+ --pos;
+ }
+ while ( pos > 0 && isalnum( buf[pos - 1] ) ) {
+ --pos;
+ }
+ killRing.kill( &buf[pos], startingPos - pos, false );
+ memmove( buf + pos, buf + startingPos, len - startingPos + 1 );
+ len -= startingPos - pos;
+ refreshLine( pi );
+ }
+ killRing.lastAction = KillRing::actionKill;
+ break;
+
+ case ctrlChar( 'J' ): // ctrl-J/linefeed/newline, accept line
+ case ctrlChar( 'M' ): // ctrl-M/return/enter
+ killRing.lastAction = KillRing::actionOther;
+ // we need one last refresh with the cursor at the end of the line
+ // so we don't display the next prompt over the previous input line
+ pos = len; // pass len as pos for EOL
+ refreshLine( pi );
+ --historyLen;
+ free( history[historyLen] );
+ return len;
+
+ case ctrlChar( 'K' ): // ctrl-K, kill from cursor to end of line
+ killRing.kill( &buf[pos], len - pos, true );
+ buf[pos] = '\0';
+ len = pos;
+ refreshLine( pi );
+ killRing.lastAction = KillRing::actionKill;
+ break;
+
+ case ctrlChar( 'L' ): // ctrl-L, clear screen and redisplay line
+ clearScreen( pi );
+ break;
+
+ case META + 'l': // meta-L, lowercase word
+ case META + 'L':
+ killRing.lastAction = KillRing::actionOther;
+ if ( pos < len ) {
+ while ( pos < len && !isalnum( buf[pos] ) ) {
+ ++pos;
+ }
+ while ( pos < len && isalnum( buf[pos] ) ) {
+ if ( buf[pos] >= 'A' && buf[pos] <= 'Z' ) {
+ buf[pos] += 'a' - 'A';
+ }
+ ++pos;
+ }
+ refreshLine( pi );
+ }
+ break;
+
+ case ctrlChar( 'N' ): // ctrl-N, recall next line in history
+ case ctrlChar( 'P' ): // ctrl-P, recall previous line in history
+ case DOWN_ARROW_KEY:
+ case UP_ARROW_KEY:
+ killRing.lastAction = KillRing::actionOther;
+ if ( historyLen > 1 ) {
+ /* Update the current history entry before we
+ * overwrite it with the next one. */
+ free( history[historyIndex] );
+ history[historyIndex] = strdup (buf );
+ /* Show the new entry */
+ if ( c == UP_ARROW_KEY ) {
+ c = ctrlChar( 'P' );
+ }
+ historyIndex += ( c == ctrlChar( 'P' ) ) ? -1 : 1;
+ if ( historyIndex < 0 ) {
+ historyIndex = 0;
+ break;
+ }
+ else if ( historyIndex >= historyLen ) {
+ historyIndex = historyLen - 1;
+ break;
+ }
+ strncpy( buf, history[historyIndex], buflen );
+ buf[buflen] = '\0';
+ len = pos = strlen( buf ); // place cursor at end of line
+ refreshLine( pi );
+ }
+ break;
+
+ case ctrlChar( 'R' ): // ctrl-R, reverse history search
+ case ctrlChar( 'S' ): // ctrl-S, forward history search
+ terminatingKeystroke = incrementalHistorySearch( pi, c );
+ break;
+
+ case ctrlChar( 'T' ): // ctrl-T, transpose characters
+ killRing.lastAction = KillRing::actionOther;
+ if ( pos > 0 && len > 1 ) {
+ size_t leftCharPos = ( pos == len ) ? pos - 2 : pos - 1;
+ char aux = buf[leftCharPos];
+ buf[leftCharPos] = buf[leftCharPos+1];
+ buf[leftCharPos+1] = aux;
+ if ( pos != len )
+ ++pos;
+ refreshLine( pi );
+ }
+ break;
+
+ case ctrlChar( 'U' ): // ctrl-U, kill all characters to the left of the cursor
+ if ( pos > 0 ) {
+ killRing.kill( &buf[0], pos, false );
+ len -= pos;
+ memmove( buf, buf + pos, len + 1 );
+ pos = 0;
+ refreshLine( pi );
+ }
+ killRing.lastAction = KillRing::actionKill;
+ break;
+
+ case META + 'u': // meta-U, uppercase word
+ case META + 'U':
+ killRing.lastAction = KillRing::actionOther;
+ if ( pos < len ) {
+ while ( pos < len && !isalnum( buf[pos] ) ) {
+ ++pos;
+ }
+ while ( pos < len && isalnum( buf[pos] ) ) {
+ if ( buf[pos] >= 'a' && buf[pos] <= 'z' ) {
+ buf[pos] += 'A' - 'a';
+ }
+ ++pos;
+ }
+ refreshLine( pi );
+ }
+ break;
+
+ // ctrl-W, kill to whitespace (not word) to left of cursor
+ case ctrlChar( 'W' ):
+ if ( pos > 0 ) {
+ int startingPos = pos;
+ while ( pos > 0 && buf[pos - 1] == ' ' ) {
+ --pos;
+ }
+ while ( pos > 0 && buf[pos - 1] != ' ' ) {
+ --pos;
+ }
+ killRing.kill( &buf[pos], startingPos - pos, false );
+ memmove( buf + pos, buf + startingPos, len - startingPos + 1 );
+ len -= startingPos - pos;
+ refreshLine( pi );
+ }
+ killRing.lastAction = KillRing::actionKill;
+ break;
+
+ case ctrlChar( 'Y' ): // ctrl-Y, yank killed text
+ {
+ string* restoredText = killRing.yank();
+ if ( restoredText ) {
+ int restoredTextLen = restoredText->length();
+ memmove( buf + pos + restoredTextLen, buf + pos, len - pos + 1 );
+ memmove( buf + pos, restoredText->c_str(), restoredTextLen );
+ pos += restoredTextLen;
+ len += restoredTextLen;
+ refreshLine( pi );
+ killRing.lastAction = KillRing::actionYank;
+ killRing.lastYankSize = restoredTextLen;
+ }
+ else {
+ beep();
+ }
+ }
+ break;
+
+ case META + 'y': // meta-Y, "yank-pop", rotate popped text
+ case META + 'Y':
+ if ( killRing.lastAction == KillRing::actionYank ) {
+ string* restoredText = killRing.yankPop();
+ if ( restoredText ) {
+ int restoredTextLen = restoredText->length();
+ if ( restoredTextLen > killRing.lastYankSize ) {
+ memmove( buf + pos + restoredTextLen - killRing.lastYankSize, buf + pos, len - pos + 1 );
+ memmove( buf + pos - killRing.lastYankSize, restoredText->c_str(), restoredTextLen );
+ }
+ else {
+ memmove( buf + pos - killRing.lastYankSize, restoredText->c_str(), restoredTextLen );
+ memmove( buf + pos + restoredTextLen - killRing.lastYankSize, buf + pos, len - pos + 1 );
+ }
+ pos += restoredTextLen - killRing.lastYankSize;
+ len += restoredTextLen - killRing.lastYankSize;
+ killRing.lastYankSize = restoredTextLen;
+ refreshLine( pi );
+ break;
+ }
+ }
+ beep();
+ break;
+
+#ifndef _WIN32
+ case ctrlChar( 'Z' ): // ctrl-Z, job control
+ disableRawMode(); // Returning to Linux (whatever) shell, leave raw mode
+ raise( SIGSTOP ); // Break out in mid-line
+ enableRawMode(); // Back from Linux shell, re-enter raw mode
+ if ( write( 1, pi.promptText, pi.promptChars ) == -1 ) break; // Redraw prompt
+ refreshLine( pi ); // Refresh the line
+ break;
+#endif
+
+ // DEL, delete the character under the cursor
+ case 127:
+ case DELETE_KEY:
+ killRing.lastAction = KillRing::actionOther;
+ if ( len > 0 && pos < len ) {
+ memmove( buf + pos, buf + pos + 1, len - pos );
+ --len;
+ refreshLine( pi );
+ }
+ break;
+
+ // not one of our special characters, maybe insert it in the buffer
+ default:
+ killRing.lastAction = KillRing::actionOther;
+ if ( c > 0xFF ) { // beep on unknown Ctrl and/or Meta keys
+ beep();
+ break;
+ }
+ if ( len < buflen ) {
+ if ( static_cast<unsigned char>( c ) < 32 ) { // don't insert control characters
+ beep();
+ break;
+ }
+ if ( len == pos ) { // at end of buffer
+ buf[pos] = c;
+ ++pos;
+ ++len;
+ buf[len] = '\0';
+ if ( pi.promptIndentation + len < pi.promptScreenColumns ) {
+ if ( len > pi.promptPreviousInputLen )
+ pi.promptPreviousInputLen = len;
+ /* Avoid a full update of the line in the
+ * trivial case. */
+ if ( write( 1, &c, 1) == -1 ) return -1;
+ }
+ else {
+ refreshLine( pi );
+ }
+ }
+ else { // not at end of buffer, have to move characters to our right
+ memmove( buf + pos + 1, buf + pos, len - pos );
+ buf[pos] = c;
+ ++len;
+ ++pos;
+ buf[len] = '\0';
+ refreshLine( pi );
+ }
+ }
+ break;
+ }
+ }
+ return len;
+}
+
+/**
+ * linenoise is a readline replacement.
+ *
+ * call it with a prompt to display and it will return a line of input from the user
+ *
+ * @param prompt text of prompt to display to the user
+ * @return the returned string belongs to the caller on return and must be freed to prevent memory leaks
+ */
+char* linenoise( const char* prompt ) {
+ char buf[LINENOISE_MAX_LINE]; // buffer for user's input
+ int count;
+ if ( isatty( STDIN_FILENO ) ) { // input is from a terminal
+ if ( enableRawMode() == -1 )
+ return NULL;
+ PromptInfo pi( prompt, getColumns() ); // struct to hold edited copy of prompt & misc prompt info
+ InputBuffer ib( buf, LINENOISE_MAX_LINE );
+ count = ib.getInputLine( pi );
+ disableRawMode();
+ printf( "\n" );
+ if ( count == -1 )
+ return NULL;
+ }
+ else { // input not from a terminal, we should work with piped input, i.e. redirected stdin
+ if ( fgets( buf, sizeof buf, stdin ) == NULL )
+ return NULL;
+
+ // if fgets() gave us the newline, remove it
+ int count = strlen( buf );
+ if ( count && buf[count-1] == '\n' ) {
+ --count;
+ buf[count] = '\0';
+ }
+ }
+ return strdup( buf ); // caller must free buffer
+}
+
+/* Register a callback function to be called for tab-completion. */
+void linenoiseSetCompletionCallback( linenoiseCompletionCallback* fn ) {
+ completionCallback = fn;
+}
+
+void linenoiseAddCompletion( linenoiseCompletions* lc, const char* str ) {
+ size_t len = strlen( str );
+ char* copy = reinterpret_cast<char *>( malloc( len + 1 ) );
+ memcpy( copy, str, len + 1 );
+ lc->cvec = reinterpret_cast<char**>( realloc( lc->cvec, sizeof( char* ) * ( lc->len + 1 ) ) );
+ lc->cvec[lc->len++] = copy;
+}
+
+int linenoiseHistoryAdd( const char* line ) {
+ if ( historyMaxLen == 0 )
+ return 0;
+ if ( history == NULL ) {
+ history = reinterpret_cast<char**>( malloc( sizeof( char* ) * historyMaxLen ) );
+ if (history == NULL)
+ return 0;
+ memset( history, 0, ( sizeof(char*) * historyMaxLen ) );
+ }
+ char* linecopy = strdup( line );
+ if ( ! linecopy )
+ return 0;
+ if ( historyLen == historyMaxLen ) {
+ free( history[0] );
+ memmove( history, history + 1, sizeof(char*) * ( historyMaxLen - 1 ) );
+ --historyLen;
+ }
+
+ // convert newlines in multi-line code to spaces before storing
+ char *p = linecopy;
+ while ( *p ) {
+ if ( *p == '\n' )
+ *p = ' ';
+ ++p;
+ }
+ history[historyLen] = linecopy;
+ ++historyLen;
+ return 1;
+}
+
+int linenoiseHistorySetMaxLen( int len ) {
+ if ( len < 1 )
+ return 0;
+ if ( history ) {
+ int tocopy = historyLen;
+ char** newHistory = reinterpret_cast<char**>( malloc( sizeof(char*) * len ) );
+ if ( newHistory == NULL )
+ return 0;
+ if ( len < tocopy )
+ tocopy = len;
+ memcpy( newHistory, history + historyMaxLen - tocopy, sizeof(char*) * tocopy );
+ free( history );
+ history = newHistory;
+ }
+ historyMaxLen = len;
+ if ( historyLen > historyMaxLen )
+ historyLen = historyMaxLen;
+ return 1;
+}
+
+/* Save the history in the specified file. On success 0 is returned
+ * otherwise -1 is returned. */
+int linenoiseHistorySave( const char* filename ) {
+ FILE* fp = fopen( filename, "wt" );
+ if ( fp == NULL )
+ return -1;
+
+ for ( int j = 0; j < historyLen; ++j ) {
+ if ( history[j][0] != '\0' )
+ fprintf ( fp, "%s\n", history[j] );
+ }
+ fclose( fp );
+ return 0;
+}
+
+/* Load the history from the specified file. If the file does not exist
+ * zero is returned and no operation is performed.
+ *
+ * If the file exists and the operation succeeded 0 is returned, otherwise
+ * on error -1 is returned. */
+int linenoiseHistoryLoad( const char* filename ) {
+ FILE *fp = fopen( filename, "rt" );
+ if ( fp == NULL )
+ return -1;
+
+ char buf[LINENOISE_MAX_LINE];
+ while ( fgets( buf, LINENOISE_MAX_LINE, fp ) != NULL ) {
+ char* p = strchr( buf, '\r' );
+ if ( ! p )
+ p = strchr( buf, '\n' );
+ if ( p )
+ *p = '\0';
+ if ( p != buf )
+ linenoiseHistoryAdd( buf );
+ }
+ fclose( fp );
+ return 0;
+}
diff --git a/src/third_party/linenoise/linenoise.h b/src/third_party/linenoise/linenoise.h
new file mode 100644
index 00000000000..fdbbaaab187
--- /dev/null
+++ b/src/third_party/linenoise/linenoise.h
@@ -0,0 +1,56 @@
+/* linenoise.h -- guerrilla line editing library against the idea that a
+ * line editing lib needs to be 20,000 lines of C code.
+ *
+ * See linenoise.c for more information.
+ *
+ * Copyright (c) 2010, Salvatore Sanfilippo <antirez at gmail dot com>
+ * Copyright (c) 2010, Pieter Noordhuis <pcnoordhuis at gmail dot com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Redis nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __LINENOISE_H
+#define __LINENOISE_H
+
+struct linenoiseCompletions {
+ size_t len;
+ char** cvec;
+};
+
+typedef void( linenoiseCompletionCallback )( const char *, linenoiseCompletions * );
+void linenoiseSetCompletionCallback( linenoiseCompletionCallback * fn );
+void linenoiseAddCompletion( linenoiseCompletions * lc, const char * str );
+
+char *linenoise( const char* prompt );
+int linenoiseHistoryAdd( const char* line );
+int linenoiseHistorySetMaxLen( int len );
+int linenoiseHistorySave( const char* filename );
+int linenoiseHistoryLoad( const char* filename );
+void linenoiseHistoryFree( void );
+void linenoiseClearScreen( void );
+
+#endif /* __LINENOISE_H */
diff --git a/src/third_party/linenoise/linenoise_win32.cpp b/src/third_party/linenoise/linenoise_win32.cpp
new file mode 100644
index 00000000000..0a2c33485b1
--- /dev/null
+++ b/src/third_party/linenoise/linenoise_win32.cpp
@@ -0,0 +1,442 @@
+/* linenoise_win32.c -- Linenoise win32 port.
+ *
+ * Modifications copyright 2010, Jon Griffiths <jon_p_griffiths at yahoo dot com>.
+ * All rights reserved.
+ * Based on linenoise, copyright 2010, Salvatore Sanfilippo <antirez at gmail dot com>.
+ * The original linenoise can be found at: http://github.com/antirez/linenoise
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Redis nor the names of its contributors may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Todo list:
+ * Actually switch to/from raw mode so emacs key combos work.
+ * Set a console handler to clean up onn exit.
+ */
+#include <conio.h>
+#include <windows.h>
+#include <stdio.h>
+
+/* If ALT_KEYS is defined, emacs key combos using ALT instead of CTRL are
+ * available. At this time, you don't get key repeats when enabled though. */
+/* #define ALT_KEYS */
+
+static HANDLE console_in, console_out;
+
+#define LINENOISE_DEFAULT_HISTORY_MAX_LEN 100
+#define LINENOISE_MAX_LINE 4096
+
+static int history_max_len = LINENOISE_DEFAULT_HISTORY_MAX_LEN;
+static int history_len = 0;
+char** history = NULL;
+
+int linenoiseHistoryAdd(const char* line);
+
+static int enableRawMode()
+{
+ if (!console_in)
+ {
+ console_in = GetStdHandle(STD_INPUT_HANDLE);
+ console_out = GetStdHandle(STD_OUTPUT_HANDLE);
+ }
+ return 0;
+}
+
+static void disableRawMode()
+{
+ /* Nothing to do yet */
+}
+
+static void output(const char* str,
+ size_t len,
+ int x,
+ int y)
+{
+ COORD pos = { (SHORT)x, (SHORT)y };
+ DWORD count = 0;
+ WriteConsoleOutputCharacterA(console_out, str, len, pos, &count);
+}
+
+static void refreshLine(const char* prompt,
+ char* buf,
+ size_t len,
+ size_t pos,
+ size_t cols)
+{
+ size_t plen = strlen(prompt);
+
+ while ((plen + pos) >= cols)
+ {
+ buf++;
+ len--;
+ pos--;
+ }
+ while (plen + len > cols)
+ {
+ len--;
+ }
+
+ CONSOLE_SCREEN_BUFFER_INFO inf = { 0 };
+ GetConsoleScreenBufferInfo(console_out, &inf);
+ size_t prompt_len = strlen(prompt);
+ output(prompt, prompt_len, 0, inf.dwCursorPosition.Y);
+ output(buf, len, prompt_len, inf.dwCursorPosition.Y);
+ if (prompt_len + len < (size_t)inf.dwSize.X)
+ {
+ /* Blank to EOL */
+ char* tmp = (char*)malloc(inf.dwSize.X - (prompt_len + len));
+ memset(tmp, ' ', inf.dwSize.X - (prompt_len + len));
+ output(tmp, inf.dwSize.X - (prompt_len + len), len + prompt_len, inf.dwCursorPosition.Y);
+ free(tmp);
+ }
+ inf.dwCursorPosition.X = (SHORT)(pos + prompt_len);
+ SetConsoleCursorPosition(console_out, inf.dwCursorPosition);
+}
+
+static int linenoisePrompt(char* buf,
+ size_t buflen,
+ const char* prompt)
+{
+ size_t plen = strlen(prompt);
+ size_t pos = 0;
+ size_t len = 0;
+ int history_index = 0;
+#ifdef ALT_KEYS
+ unsigned char last_down = 0;
+#endif
+ buf[0] = '\0';
+ buflen--; /* Make sure there is always space for the nulterm */
+
+ /* The latest history entry is always our current buffer, that
+ * initially is just an empty string. */
+ linenoiseHistoryAdd("");
+
+ CONSOLE_SCREEN_BUFFER_INFO inf = { 0 };
+ GetConsoleScreenBufferInfo(console_out, &inf);
+ size_t cols = inf.dwSize.X;
+ output(prompt, plen, 0, inf.dwCursorPosition.Y);
+ inf.dwCursorPosition.X = (SHORT)plen;
+ SetConsoleCursorPosition(console_out, inf.dwCursorPosition);
+
+ for ( ; ; )
+ {
+ INPUT_RECORD rec;
+ DWORD count;
+ ReadConsoleInputA(console_in, &rec, 1, &count);
+ if (rec.EventType != KEY_EVENT)
+ continue;
+#ifdef ALT_KEYS
+ if (rec.Event.KeyEvent.bKeyDown)
+ {
+ last_down = rec.Event.KeyEvent.uChar.AsciiChar;
+ continue;
+ }
+#else
+ if (!rec.Event.KeyEvent.bKeyDown)
+ {
+ continue;
+ }
+#endif
+ switch (rec.Event.KeyEvent.wVirtualKeyCode)
+ {
+ case VK_RETURN: /* enter */
+ history_len--;
+ free(history[history_len]);
+ return (int)len;
+ case VK_BACK: /* backspace */
+#ifdef ALT_KEYS
+ backspace:
+#endif
+ if (pos > 0 && len > 0)
+ {
+ memmove(buf + pos - 1, buf + pos, len - pos);
+ pos--;
+ len--;
+ buf[len] = '\0';
+ refreshLine(prompt, buf, len, pos, cols);
+ }
+ break;
+ case VK_LEFT:
+#ifdef ALT_KEYS
+ left_arrow:
+#endif
+ /* left arrow */
+ if (pos > 0)
+ {
+ pos--;
+ refreshLine(prompt, buf, len, pos, cols);
+ }
+ break;
+ case VK_RIGHT:
+#ifdef ALT_KEYS
+ right_arrow:
+#endif
+ /* right arrow */
+ if (pos != len)
+ {
+ pos++;
+ refreshLine(prompt, buf, len, pos, cols);
+ }
+ break;
+ case VK_UP:
+ case VK_DOWN:
+#ifdef ALT_KEYS
+ up_down_arrow:
+#endif
+ /* up and down arrow: history */
+ if (history_len > 1)
+ {
+ /* Update the current history entry before to
+ * overwrite it with tne next one. */
+ free(history[history_len - 1 - history_index]);
+ history[history_len - 1 - history_index] = _strdup(buf);
+ /* Show the new entry */
+ history_index += (rec.Event.KeyEvent.wVirtualKeyCode == VK_UP) ? 1 : -1;
+ if (history_index < 0)
+ {
+ history_index = 0;
+ break;
+ }
+ else if (history_index >= history_len)
+ {
+ history_index = history_len - 1;
+ break;
+ }
+ strncpy(buf, history[history_len - 1 - history_index], buflen);
+ buf[buflen] = '\0';
+ len = pos = strlen(buf);
+ refreshLine(prompt, buf, len, pos, cols);
+ }
+ break;
+ case VK_DELETE:
+ /* delete */
+ if (len > 0 && pos < len)
+ {
+ memmove(buf + pos, buf + pos + 1, len - pos - 1);
+ len--;
+ buf[len] = '\0';
+ refreshLine(prompt, buf, len, pos, cols);
+ }
+ break;
+ case VK_HOME: /* Ctrl+a, go to the start of the line */
+#ifdef ALT_KEYS
+ home:
+#endif
+ pos = 0;
+ refreshLine(prompt, buf, len, pos, cols);
+ break;
+ case VK_END: /* ctrl+e, go to the end of the line */
+#ifdef ALT_KEYS
+ end:
+#endif
+ pos = len;
+ refreshLine(prompt, buf, len, pos, cols);
+ break;
+ default:
+#ifdef ALT_KEYS
+ /* Use alt instead of CTRL since windows eats CTRL+char combos */
+ if (rec.Event.KeyEvent.dwControlKeyState & (LEFT_ALT_PRESSED | RIGHT_ALT_PRESSED))
+ {
+ switch (last_down)
+ {
+ case 'a': /* ctrl-t */
+ goto home;
+ case 'e': /* ctrl-t */
+ goto end;
+ case 't': /* ctrl-t */
+ if (pos > 0 && pos < len)
+ {
+ int aux = buf[pos - 1];
+ buf[pos - 1] = buf[pos];
+ buf[pos] = aux;
+ if (pos != len - 1)
+ pos++;
+ refreshLine(prompt, buf, len, pos, cols);
+ }
+ break;
+ case 'h': /* ctrl-h */
+ goto backspace;
+ case 'b': /* ctrl-b */
+ goto left_arrow;
+ case 'f': /* ctrl-f */
+ goto right_arrow;
+ case 'p': /* ctrl-p */
+ rec.Event.KeyEvent.wVirtualKeyCode = VK_UP;
+ goto up_down_arrow;
+ case 'n': /* ctrl-n */
+ rec.Event.KeyEvent.wVirtualKeyCode = VK_DOWN;
+ goto up_down_arrow;
+ case 'u': /* Ctrl+u, delete the whole line. */
+ buf[0] = '\0';
+ pos = len = 0;
+ refreshLine(prompt, buf, len, pos, cols);
+ break;
+ case 'k': /* Ctrl+k, delete from current to end of line. */
+ buf[pos] = '\0';
+ len = pos;
+ refreshLine(prompt, buf, len, pos, cols);
+ break;
+ }
+ continue;
+ }
+#endif /* ALT_KEYS */
+ if (rec.Event.KeyEvent.uChar.AsciiChar < ' ' ||
+ rec.Event.KeyEvent.uChar.AsciiChar > '~')
+ continue;
+
+ if (len < buflen)
+ {
+ if (len != pos)
+ memmove(buf + pos + 1, buf + pos, len - pos);
+ buf[pos] = rec.Event.KeyEvent.uChar.AsciiChar;
+ len++;
+ pos++;
+ buf[len] = '\0';
+ refreshLine(prompt, buf, len, pos, cols);
+ }
+ break;
+ }
+ }
+}
+
+static int linenoiseRaw(char* buf,
+ size_t buflen,
+ const char* prompt)
+{
+ int count = -1;
+
+ if (buflen != 0)
+ {
+ if (enableRawMode() == -1)
+ return -1;
+ count = linenoisePrompt(buf, buflen, prompt);
+ disableRawMode();
+ printf("\n");
+ }
+ return count;
+}
+
+char* linenoise(const char* prompt)
+{
+ char buf[LINENOISE_MAX_LINE];
+ int count = linenoiseRaw(buf, LINENOISE_MAX_LINE, prompt);
+ if (count == -1)
+ return NULL;
+ return _strdup(buf);
+}
+
+/* Using a circular buffer is smarter, but a bit more complex to handle. */
+int linenoiseHistoryAdd(const char* line)
+{
+ char* linecopy;
+
+ if (history_max_len == 0)
+ return 0;
+ if (history == NULL)
+ {
+ history = (char**)malloc(sizeof(char*) * history_max_len);
+ if (history == NULL)
+ return 0;
+ memset(history, 0, (sizeof(char*) * history_max_len));
+ }
+ linecopy = _strdup(line);
+ if (!linecopy)
+ return 0;
+ if (history_len == history_max_len)
+ {
+ free(history[0]);
+ memmove(history, history + 1, sizeof(char*) * (history_max_len - 1));
+ history_len--;
+ }
+ history[history_len] = linecopy;
+ history_len++;
+ return 1;
+}
+
+int linenoiseHistorySetMaxLen(int len)
+{
+ char** new_history;
+
+ if (len < 1)
+ return 0;
+ if (history)
+ {
+ int tocopy = history_len;
+
+ new_history = (char**)malloc(sizeof(char*) * len);
+ if (new_history == NULL)
+ return 0;
+ if (len < tocopy)
+ tocopy = len;
+ memcpy(new_history, history + (history_max_len - tocopy), sizeof(char*) * tocopy);
+ free(history);
+ history = new_history;
+ }
+ history_max_len = len;
+ if (history_len > history_max_len)
+ history_len = history_max_len;
+ return 1;
+}
+
+/* Save the history in the specified file. On success 0 is returned
+ * otherwise -1 is returned. */
+int linenoiseHistorySave(const char* filename)
+{
+ FILE* fp = fopen(filename, "w");
+ int j;
+
+ if (fp == NULL)
+ return -1;
+ for (j = 0; j < history_len; j++)
+ fprintf(fp, "%s\n", history[j]);
+ fclose(fp);
+ return 0;
+}
+
+/* Load the history from the specified file. If the file does not exist
+ * zero is returned and no operation is performed.
+ *
+ * If the file exists and the operation succeeded 0 is returned, otherwise
+ * on error -1 is returned. */
+int linenoiseHistoryLoad(const char* filename)
+{
+ FILE* fp = fopen(filename, "r");
+ char buf[LINENOISE_MAX_LINE];
+
+ if (fp == NULL)
+ return -1;
+
+ while (fgets(buf, LINENOISE_MAX_LINE, fp) != NULL)
+ {
+ char* p;
+
+ p = strchr(buf, '\r');
+ if (!p)
+ p = strchr(buf, '\n');
+ if (p)
+ *p = '\0';
+ linenoiseHistoryAdd(buf);
+ }
+ fclose(fp);
+ return 0;
+}
diff --git a/src/third_party/pcre-7.4/config-cmake.h.in b/src/third_party/pcre-7.4/config-cmake.h.in
new file mode 100644
index 00000000000..27a2d02cb27
--- /dev/null
+++ b/src/third_party/pcre-7.4/config-cmake.h.in
@@ -0,0 +1,31 @@
+/* config.h for CMake builds */
+
+#cmakedefine HAVE_DIRENT_H
+#cmakedefine HAVE_UNISTD_H
+#cmakedefine HAVE_SYS_STAT_H
+#cmakedefine HAVE_SYS_TYPES_H
+#cmakedefine HAVE_TYPE_TRAITS_H
+#cmakedefine HAVE_BITS_TYPE_TRAITS_H
+
+#cmakedefine HAVE_BCOPY
+#cmakedefine HAVE_MEMMOVE
+#cmakedefine HAVE_STRERROR
+
+#cmakedefine PCRE_STATIC
+
+#cmakedefine SUPPORT_UTF8
+#cmakedefine SUPPORT_UCP
+#cmakedefine EBCDIC
+#cmakedefine BSR_ANYCRLF
+#cmakedefine NO_RECURSE
+
+#define NEWLINE @NEWLINE@
+#define POSIX_MALLOC_THRESHOLD @PCRE_POSIX_MALLOC_THRESHOLD@
+#define LINK_SIZE @PCRE_LINK_SIZE@
+#define MATCH_LIMIT @PCRE_MATCH_LIMIT@
+#define MATCH_LIMIT_RECURSION @PCRE_MATCH_LIMIT_RECURSION@
+
+#define MAX_NAME_SIZE 32
+#define MAX_NAME_COUNT 10000
+
+/* end config.h for CMake builds */
diff --git a/src/third_party/pcre-7.4/config.h b/src/third_party/pcre-7.4/config.h
new file mode 100644
index 00000000000..101d071b211
--- /dev/null
+++ b/src/third_party/pcre-7.4/config.h
@@ -0,0 +1,239 @@
+/* config.h. Generated from config.h.in by configure. */
+/* config.h.in. Generated from configure.ac by autoheader. */
+
+
+/* On Unix-like systems config.h.in is converted by "configure" into config.h.
+Some other environments also support the use of "configure". PCRE is written in
+Standard C, but there are a few non-standard things it can cope with, allowing
+it to run on SunOS4 and other "close to standard" systems.
+
+If you are going to build PCRE "by hand" on a system without "configure" you
+should copy the distributed config.h.generic to config.h, and then set up the
+macro definitions the way you need them. You must then add -DHAVE_CONFIG_H to
+all of your compile commands, so that config.h is included at the start of
+every source.
+
+Alternatively, you can avoid editing by using -D on the compiler command line
+to set the macro values. In this case, you do not have to set -DHAVE_CONFIG_H.
+
+PCRE uses memmove() if HAVE_MEMMOVE is set to 1; otherwise it uses bcopy() if
+HAVE_BCOPY is set to 1. If your system has neither bcopy() nor memmove(), set
+them both to 0; an emulation function will be used. */
+
+/* By default, the \R escape sequence matches any Unicode line ending
+ character or sequence of characters. If BSR_ANYCRLF is defined, this is
+ changed so that backslash-R matches only CR, LF, or CRLF. The build- time
+ default can be overridden by the user of PCRE at runtime. On systems that
+ support it, "configure" can be used to override the default. */
+/* #undef BSR_ANYCRLF */
+
+/* If you are compiling for a system that uses EBCDIC instead of ASCII
+ character codes, define this macro as 1. On systems that can use
+ "configure", this can be done via --enable-ebcdic. */
+/* #undef EBCDIC */
+
+/* Define to 1 if you have the `bcopy' function. */
+#define HAVE_BCOPY 1
+
+/* Define to 1 if you have the <bits/type_traits.h> header file. */
+/* #undef HAVE_BITS_TYPE_TRAITS_H */
+
+/* Define to 1 if you have the <dirent.h> header file. */
+// #define HAVE_DIRENT_H 1 ERH
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#define HAVE_DLFCN_H 1
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the <limits.h> header file. */
+#define HAVE_LIMITS_H 1
+
+/* Define to 1 if the system has the type `long long'. */
+#define HAVE_LONG_LONG 1
+
+/* Define to 1 if you have the `memmove' function. */
+#define HAVE_MEMMOVE 1
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the `strerror' function. */
+#define HAVE_STRERROR 1
+
+/* Define to 1 if you have the <string> header file. */
+#define HAVE_STRING 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strtoll' function. */
+// dm: visual studio
+#ifndef _WIN32
+#define HAVE_STRTOLL 1
+#endif
+
+/* Define to 1 if you have the `strtoq' function. */
+// dm: visual studio
+//#define HAVE_STRTOQ 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <type_traits.h> header file. */
+/* #undef HAVE_TYPE_TRAITS_H */
+
+/* Define to 1 if you have the <unistd.h> header file. */
+//#define HAVE_UNISTD_H 1 ERH
+
+/* Define to 1 if the system has the type `unsigned long long'. */
+#define HAVE_UNSIGNED_LONG_LONG 1
+
+/* Define to 1 if you have the <windows.h> header file. */
+/* #undef HAVE_WINDOWS_H */
+
+/* Define to 1 if you have the `_strtoi64' function. */
+/* #undef HAVE__STRTOI64 */
+// dm: visual studio
+#ifdef _WIN32
+#define HAVE__STRTOI64 1
+#endif
+
+/* The value of LINK_SIZE determines the number of bytes used to store links
+ as offsets within the compiled regex. The default is 2, which allows for
+ compiled patterns up to 64K long. This covers the vast majority of cases.
+ However, PCRE can also be compiled to use 3 or 4 bytes instead. This allows
+ for longer patterns in extreme cases. On systems that support it,
+ "configure" can be used to override this default. */
+#define LINK_SIZE 2
+
+/* The value of MATCH_LIMIT determines the default number of times the
+ internal match() function can be called during a single execution of
+ pcre_exec(). There is a runtime interface for setting a different limit.
+ The limit exists in order to catch runaway regular expressions that take
+ for ever to determine that they do not match. The default is set very large
+ so that it does not accidentally catch legitimate cases. On systems that
+ support it, "configure" can be used to override this default default. */
+#define MATCH_LIMIT 200000
+
+/* The above limit applies to all calls of match(), whether or not they
+ increase the recursion depth. In some environments it is desirable to limit
+ the depth of recursive calls of match() more strictly, in order to restrict
+ the maximum amount of stack (or heap, if NO_RECURSE is defined) that is
+ used. The value of MATCH_LIMIT_RECURSION applies only to recursive calls of
+ match(). To have any useful effect, it must be less than the value of
+ MATCH_LIMIT. The default is to use the same value as MATCH_LIMIT. There is
+ a runtime method for setting a different limit. On systems that support it,
+ "configure" can be used to override the default. */
+#define MATCH_LIMIT_RECURSION 4000
+
+/* This limit is parameterized just in case anybody ever wants to change it.
+ Care must be taken if it is increased, because it guards against integer
+ overflow caused by enormously large patterns. */
+#define MAX_NAME_COUNT 10000
+
+/* This limit is parameterized just in case anybody ever wants to change it.
+ Care must be taken if it is increased, because it guards against integer
+ overflow caused by enormously large patterns. */
+#define MAX_NAME_SIZE 32
+
+/* The value of NEWLINE determines the newline character sequence. On systems
+ that support it, "configure" can be used to override the default, which is
+ 10. The possible values are 10 (LF), 13 (CR), 3338 (CRLF), -1 (ANY), or -2
+ (ANYCRLF). */
+#define NEWLINE 10
+
+/* PCRE uses recursive function calls to handle backtracking while matching.
+ This can sometimes be a problem on systems that have stacks of limited
+ size. Define NO_RECURSE to get a version that doesn't use recursion in the
+ match() function; instead it creates its own stack by steam using
+ pcre_recurse_malloc() to obtain memory from the heap. For more detail, see
+ the comments and other stuff just above the match() function. On systems
+ that support it, "configure" can be used to set this in the Makefile (use
+ --disable-stack-for-recursion). */
+/* #undef NO_RECURSE */
+/* mongodb: don't recurse, don't want to use much stack or blow stack */
+#ifndef __sunos__
+/* TODO this doesn't compile on sunos?? */
+#define NO_RECURSE 1
+#endif
+
+/* Name of package */
+#define PACKAGE "pcre"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "PCRE"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "PCRE 7.4"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "pcre"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "7.4"
+
+
+/* If you are compiling for a system other than a Unix-like system or
+ Win32, and it needs some magic to be inserted before the definition
+ of a function that is exported by the library, define this macro to
+ contain the relevant magic. If you do not define this macro, it
+ defaults to "extern" for a C compiler and "extern C" for a C++
+ compiler on non-Win32 systems. This macro apears at the start of
+ every exported function that is part of the external API. It does
+ not appear on functions that are "external" in the C sense, but
+ which are internal to the library. */
+/* #undef PCRE_EXP_DEFN */
+
+/* Define if linking statically (TODO: make nice with Libtool) */
+/* mongodb */
+#define PCRE_STATIC
+#define PCRECPP_STATIC
+
+/* When calling PCRE via the POSIX interface, additional working storage is
+ required for holding the pointers to capturing substrings because PCRE
+ requires three integers per substring, whereas the POSIX interface provides
+ only two. If the number of expected substrings is small, the wrapper
+ function uses space on the stack, because this is faster than using
+ malloc() for each call. The threshold above which the stack is no longer
+ used is defined by POSIX_MALLOC_THRESHOLD. On systems that support it,
+ "configure" can be used to override this default. */
+#define POSIX_MALLOC_THRESHOLD 10
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Define to enable support for Unicode properties */
+/* mongodb */
+#define SUPPORT_UCP
+
+
+/* Define to enable support for the UTF-8 Unicode encoding. */
+/* mongodb */
+#define SUPPORT_UTF8
+
+
+/* Version number of package */
+#define VERSION "7.4"
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef size_t */
diff --git a/src/third_party/pcre-7.4/config.h.generic b/src/third_party/pcre-7.4/config.h.generic
new file mode 100644
index 00000000000..b6d7ab8f3b0
--- /dev/null
+++ b/src/third_party/pcre-7.4/config.h.generic
@@ -0,0 +1,278 @@
+/* config.h. Generated from config.h.in by configure. */
+/* config.h.in. Generated from configure.ac by autoheader. */
+
+
+/* On Unix-like systems config.h.in is converted by "configure" into config.h.
+Some other environments also support the use of "configure". PCRE is written in
+Standard C, but there are a few non-standard things it can cope with, allowing
+it to run on SunOS4 and other "close to standard" systems.
+
+If you are going to build PCRE "by hand" on a system without "configure" you
+should copy the distributed config.h.generic to config.h, and then set up the
+macro definitions the way you need them. You must then add -DHAVE_CONFIG_H to
+all of your compile commands, so that config.h is included at the start of
+every source.
+
+Alternatively, you can avoid editing by using -D on the compiler command line
+to set the macro values. In this case, you do not have to set -DHAVE_CONFIG_H.
+
+PCRE uses memmove() if HAVE_MEMMOVE is set to 1; otherwise it uses bcopy() if
+HAVE_BCOPY is set to 1. If your system has neither bcopy() nor memmove(), set
+them both to 0; an emulation function will be used. */
+
+/* By default, the \R escape sequence matches any Unicode line ending
+ character or sequence of characters. If BSR_ANYCRLF is defined, this is
+ changed so that backslash-R matches only CR, LF, or CRLF. The build- time
+ default can be overridden by the user of PCRE at runtime. On systems that
+ support it, "configure" can be used to override the default. */
+/* #undef BSR_ANYCRLF */
+
+/* If you are compiling for a system that uses EBCDIC instead of ASCII
+ character codes, define this macro as 1. On systems that can use
+ "configure", this can be done via --enable-ebcdic. */
+/* #undef EBCDIC */
+
+/* Define to 1 if you have the `bcopy' function. */
+#ifndef HAVE_BCOPY
+#define HAVE_BCOPY 1
+#endif
+
+/* Define to 1 if you have the <bits/type_traits.h> header file. */
+/* #undef HAVE_BITS_TYPE_TRAITS_H */
+
+/* Define to 1 if you have the <dirent.h> header file. */
+#ifndef HAVE_DIRENT_H
+#define HAVE_DIRENT_H 1
+#endif
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#ifndef HAVE_DLFCN_H
+#define HAVE_DLFCN_H 1
+#endif
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#ifndef HAVE_INTTYPES_H
+#define HAVE_INTTYPES_H 1
+#endif
+
+/* Define to 1 if you have the <limits.h> header file. */
+#ifndef HAVE_LIMITS_H
+#define HAVE_LIMITS_H 1
+#endif
+
+/* Define to 1 if the system has the type `long long'. */
+#ifndef HAVE_LONG_LONG
+#define HAVE_LONG_LONG 1
+#endif
+
+/* Define to 1 if you have the `memmove' function. */
+#ifndef HAVE_MEMMOVE
+#define HAVE_MEMMOVE 1
+#endif
+
+/* Define to 1 if you have the <memory.h> header file. */
+#ifndef HAVE_MEMORY_H
+#define HAVE_MEMORY_H 1
+#endif
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#ifndef HAVE_STDINT_H
+#define HAVE_STDINT_H 1
+#endif
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#ifndef HAVE_STDLIB_H
+#define HAVE_STDLIB_H 1
+#endif
+
+/* Define to 1 if you have the `strerror' function. */
+#ifndef HAVE_STRERROR
+#define HAVE_STRERROR 1
+#endif
+
+/* Define to 1 if you have the <string> header file. */
+#ifndef HAVE_STRING
+#define HAVE_STRING 1
+#endif
+
+/* Define to 1 if you have the <strings.h> header file. */
+#ifndef HAVE_STRINGS_H
+#define HAVE_STRINGS_H 1
+#endif
+
+/* Define to 1 if you have the <string.h> header file. */
+#ifndef HAVE_STRING_H
+#define HAVE_STRING_H 1
+#endif
+
+/* Define to 1 if you have the `strtoll' function. */
+#ifndef HAVE_STRTOLL
+#define HAVE_STRTOLL 1
+#endif
+
+/* Define to 1 if you have the `strtoq' function. */
+#ifndef HAVE_STRTOQ
+#define HAVE_STRTOQ 1
+#endif
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#ifndef HAVE_SYS_STAT_H
+#define HAVE_SYS_STAT_H 1
+#endif
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#ifndef HAVE_SYS_TYPES_H
+#define HAVE_SYS_TYPES_H 1
+#endif
+
+/* Define to 1 if you have the <type_traits.h> header file. */
+/* #undef HAVE_TYPE_TRAITS_H */
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#ifndef HAVE_UNISTD_H
+#define HAVE_UNISTD_H 1
+#endif
+
+/* Define to 1 if the system has the type `unsigned long long'. */
+#ifndef HAVE_UNSIGNED_LONG_LONG
+#define HAVE_UNSIGNED_LONG_LONG 1
+#endif
+
+/* Define to 1 if you have the <windows.h> header file. */
+/* #undef HAVE_WINDOWS_H */
+
+/* Define to 1 if you have the `_strtoi64' function. */
+/* #undef HAVE__STRTOI64 */
+
+/* The value of LINK_SIZE determines the number of bytes used to store links
+ as offsets within the compiled regex. The default is 2, which allows for
+ compiled patterns up to 64K long. This covers the vast majority of cases.
+ However, PCRE can also be compiled to use 3 or 4 bytes instead. This allows
+ for longer patterns in extreme cases. On systems that support it,
+ "configure" can be used to override this default. */
+#ifndef LINK_SIZE
+#define LINK_SIZE 2
+#endif
+
+/* The value of MATCH_LIMIT determines the default number of times the
+ internal match() function can be called during a single execution of
+ pcre_exec(). There is a runtime interface for setting a different limit.
+ The limit exists in order to catch runaway regular expressions that take
+ for ever to determine that they do not match. The default is set very large
+ so that it does not accidentally catch legitimate cases. On systems that
+ support it, "configure" can be used to override this default default. */
+#ifndef MATCH_LIMIT
+#define MATCH_LIMIT 10000000
+#endif
+
+/* The above limit applies to all calls of match(), whether or not they
+ increase the recursion depth. In some environments it is desirable to limit
+ the depth of recursive calls of match() more strictly, in order to restrict
+ the maximum amount of stack (or heap, if NO_RECURSE is defined) that is
+ used. The value of MATCH_LIMIT_RECURSION applies only to recursive calls of
+ match(). To have any useful effect, it must be less than the value of
+ MATCH_LIMIT. The default is to use the same value as MATCH_LIMIT. There is
+ a runtime method for setting a different limit. On systems that support it,
+ "configure" can be used to override the default. */
+#ifndef MATCH_LIMIT_RECURSION
+#define MATCH_LIMIT_RECURSION MATCH_LIMIT
+#endif
+
+/* This limit is parameterized just in case anybody ever wants to change it.
+ Care must be taken if it is increased, because it guards against integer
+ overflow caused by enormously large patterns. */
+#ifndef MAX_NAME_COUNT
+#define MAX_NAME_COUNT 10000
+#endif
+
+/* This limit is parameterized just in case anybody ever wants to change it.
+ Care must be taken if it is increased, because it guards against integer
+ overflow caused by enormously large patterns. */
+#ifndef MAX_NAME_SIZE
+#define MAX_NAME_SIZE 32
+#endif
+
+/* The value of NEWLINE determines the newline character sequence. On systems
+ that support it, "configure" can be used to override the default, which is
+ 10. The possible values are 10 (LF), 13 (CR), 3338 (CRLF), -1 (ANY), or -2
+ (ANYCRLF). */
+#ifndef NEWLINE
+#define NEWLINE 10
+#endif
+
+/* PCRE uses recursive function calls to handle backtracking while matching.
+ This can sometimes be a problem on systems that have stacks of limited
+ size. Define NO_RECURSE to get a version that doesn't use recursion in the
+ match() function; instead it creates its own stack by steam using
+ pcre_recurse_malloc() to obtain memory from the heap. For more detail, see
+ the comments and other stuff just above the match() function. On systems
+ that support it, "configure" can be used to set this in the Makefile (use
+ --disable-stack-for-recursion). */
+/* #undef NO_RECURSE */
+
+/* Name of package */
+#define PACKAGE "pcre"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "PCRE"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "PCRE 7.4"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "pcre"
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "7.4"
+
+
+/* If you are compiling for a system other than a Unix-like system or
+ Win32, and it needs some magic to be inserted before the definition
+ of a function that is exported by the library, define this macro to
+ contain the relevant magic. If you do not define this macro, it
+ defaults to "extern" for a C compiler and "extern C" for a C++
+ compiler on non-Win32 systems. This macro apears at the start of
+ every exported function that is part of the external API. It does
+ not appear on functions that are "external" in the C sense, but
+ which are internal to the library. */
+/* #undef PCRE_EXP_DEFN */
+
+/* Define if linking statically (TODO: make nice with Libtool) */
+/* #undef PCRE_STATIC */
+
+/* When calling PCRE via the POSIX interface, additional working storage is
+ required for holding the pointers to capturing substrings because PCRE
+ requires three integers per substring, whereas the POSIX interface provides
+ only two. If the number of expected substrings is small, the wrapper
+ function uses space on the stack, because this is faster than using
+ malloc() for each call. The threshold above which the stack is no longer
+ used is defined by POSIX_MALLOC_THRESHOLD. On systems that support it,
+ "configure" can be used to override this default. */
+#ifndef POSIX_MALLOC_THRESHOLD
+#define POSIX_MALLOC_THRESHOLD 10
+#endif
+
+/* Define to 1 if you have the ANSI C header files. */
+#ifndef STDC_HEADERS
+#define STDC_HEADERS 1
+#endif
+
+/* Define to enable support for Unicode properties */
+/* #undef SUPPORT_UCP */
+
+/* Define to enable support for the UTF-8 Unicode encoding. */
+/* #undef SUPPORT_UTF8 */
+
+/* Version number of package */
+#ifndef VERSION
+#define VERSION "7.4"
+#endif
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef const */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef size_t */
diff --git a/src/third_party/pcre-7.4/config.h.in b/src/third_party/pcre-7.4/config.h.in
new file mode 100644
index 00000000000..c4ea3c4b367
--- /dev/null
+++ b/src/third_party/pcre-7.4/config.h.in
@@ -0,0 +1,219 @@
+/* config.h.in. Generated from configure.ac by autoheader. */
+
+
+/* On Unix-like systems config.h.in is converted by "configure" into config.h.
+Some other environments also support the use of "configure". PCRE is written in
+Standard C, but there are a few non-standard things it can cope with, allowing
+it to run on SunOS4 and other "close to standard" systems.
+
+If you are going to build PCRE "by hand" on a system without "configure" you
+should copy the distributed config.h.generic to config.h, and then set up the
+macro definitions the way you need them. You must then add -DHAVE_CONFIG_H to
+all of your compile commands, so that config.h is included at the start of
+every source.
+
+Alternatively, you can avoid editing by using -D on the compiler command line
+to set the macro values. In this case, you do not have to set -DHAVE_CONFIG_H.
+
+PCRE uses memmove() if HAVE_MEMMOVE is set to 1; otherwise it uses bcopy() if
+HAVE_BCOPY is set to 1. If your system has neither bcopy() nor memmove(), set
+them both to 0; an emulation function will be used. */
+
+/* By default, the \R escape sequence matches any Unicode line ending
+ character or sequence of characters. If BSR_ANYCRLF is defined, this is
+ changed so that backslash-R matches only CR, LF, or CRLF. The build- time
+ default can be overridden by the user of PCRE at runtime. On systems that
+ support it, "configure" can be used to override the default. */
+#undef BSR_ANYCRLF
+
+/* If you are compiling for a system that uses EBCDIC instead of ASCII
+ character codes, define this macro as 1. On systems that can use
+ "configure", this can be done via --enable-ebcdic. */
+#undef EBCDIC
+
+/* Define to 1 if you have the `bcopy' function. */
+#undef HAVE_BCOPY
+
+/* Define to 1 if you have the <bits/type_traits.h> header file. */
+#undef HAVE_BITS_TYPE_TRAITS_H
+
+/* Define to 1 if you have the <dirent.h> header file. */
+#undef HAVE_DIRENT_H
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#undef HAVE_DLFCN_H
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#undef HAVE_INTTYPES_H
+
+/* Define to 1 if you have the <limits.h> header file. */
+#undef HAVE_LIMITS_H
+
+/* Define to 1 if the system has the type `long long'. */
+#undef HAVE_LONG_LONG
+
+/* Define to 1 if you have the `memmove' function. */
+#undef HAVE_MEMMOVE
+
+/* Define to 1 if you have the <memory.h> header file. */
+#undef HAVE_MEMORY_H
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#undef HAVE_STDINT_H
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#undef HAVE_STDLIB_H
+
+/* Define to 1 if you have the `strerror' function. */
+#undef HAVE_STRERROR
+
+/* Define to 1 if you have the <string> header file. */
+#undef HAVE_STRING
+
+/* Define to 1 if you have the <strings.h> header file. */
+#undef HAVE_STRINGS_H
+
+/* Define to 1 if you have the <string.h> header file. */
+#undef HAVE_STRING_H
+
+/* Define to 1 if you have the `strtoll' function. */
+#undef HAVE_STRTOLL
+
+/* Define to 1 if you have the `strtoq' function. */
+#undef HAVE_STRTOQ
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#undef HAVE_SYS_STAT_H
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#undef HAVE_SYS_TYPES_H
+
+/* Define to 1 if you have the <type_traits.h> header file. */
+#undef HAVE_TYPE_TRAITS_H
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#undef HAVE_UNISTD_H
+
+/* Define to 1 if the system has the type `unsigned long long'. */
+#undef HAVE_UNSIGNED_LONG_LONG
+
+/* Define to 1 if you have the <windows.h> header file. */
+#undef HAVE_WINDOWS_H
+
+/* Define to 1 if you have the `_strtoi64' function. */
+#undef HAVE__STRTOI64
+
+/* The value of LINK_SIZE determines the number of bytes used to store links
+ as offsets within the compiled regex. The default is 2, which allows for
+ compiled patterns up to 64K long. This covers the vast majority of cases.
+ However, PCRE can also be compiled to use 3 or 4 bytes instead. This allows
+ for longer patterns in extreme cases. On systems that support it,
+ "configure" can be used to override this default. */
+#undef LINK_SIZE
+
+/* The value of MATCH_LIMIT determines the default number of times the
+ internal match() function can be called during a single execution of
+ pcre_exec(). There is a runtime interface for setting a different limit.
+ The limit exists in order to catch runaway regular expressions that take
+ for ever to determine that they do not match. The default is set very large
+ so that it does not accidentally catch legitimate cases. On systems that
+ support it, "configure" can be used to override this default default. */
+#undef MATCH_LIMIT
+
+/* The above limit applies to all calls of match(), whether or not they
+ increase the recursion depth. In some environments it is desirable to limit
+ the depth of recursive calls of match() more strictly, in order to restrict
+ the maximum amount of stack (or heap, if NO_RECURSE is defined) that is
+ used. The value of MATCH_LIMIT_RECURSION applies only to recursive calls of
+ match(). To have any useful effect, it must be less than the value of
+ MATCH_LIMIT. The default is to use the same value as MATCH_LIMIT. There is
+ a runtime method for setting a different limit. On systems that support it,
+ "configure" can be used to override the default. */
+#undef MATCH_LIMIT_RECURSION
+
+/* This limit is parameterized just in case anybody ever wants to change it.
+ Care must be taken if it is increased, because it guards against integer
+ overflow caused by enormously large patterns. */
+#undef MAX_NAME_COUNT
+
+/* This limit is parameterized just in case anybody ever wants to change it.
+ Care must be taken if it is increased, because it guards against integer
+ overflow caused by enormously large patterns. */
+#undef MAX_NAME_SIZE
+
+/* The value of NEWLINE determines the newline character sequence. On systems
+ that support it, "configure" can be used to override the default, which is
+ 10. The possible values are 10 (LF), 13 (CR), 3338 (CRLF), -1 (ANY), or -2
+ (ANYCRLF). */
+#undef NEWLINE
+
+/* PCRE uses recursive function calls to handle backtracking while matching.
+ This can sometimes be a problem on systems that have stacks of limited
+ size. Define NO_RECURSE to get a version that doesn't use recursion in the
+ match() function; instead it creates its own stack by steam using
+ pcre_recurse_malloc() to obtain memory from the heap. For more detail, see
+ the comments and other stuff just above the match() function. On systems
+ that support it, "configure" can be used to set this in the Makefile (use
+ --disable-stack-for-recursion). */
+#undef NO_RECURSE
+
+/* Name of package */
+#undef PACKAGE
+
+/* Define to the address where bug reports for this package should be sent. */
+#undef PACKAGE_BUGREPORT
+
+/* Define to the full name of this package. */
+#undef PACKAGE_NAME
+
+/* Define to the full name and version of this package. */
+#undef PACKAGE_STRING
+
+/* Define to the one symbol short name of this package. */
+#undef PACKAGE_TARNAME
+
+/* Define to the version of this package. */
+#undef PACKAGE_VERSION
+
+
+/* If you are compiling for a system other than a Unix-like system or
+ Win32, and it needs some magic to be inserted before the definition
+ of a function that is exported by the library, define this macro to
+ contain the relevant magic. If you do not define this macro, it
+ defaults to "extern" for a C compiler and "extern C" for a C++
+ compiler on non-Win32 systems. This macro apears at the start of
+ every exported function that is part of the external API. It does
+ not appear on functions that are "external" in the C sense, but
+ which are internal to the library. */
+#undef PCRE_EXP_DEFN
+
+/* Define if linking statically (TODO: make nice with Libtool) */
+#undef PCRE_STATIC
+
+/* When calling PCRE via the POSIX interface, additional working storage is
+ required for holding the pointers to capturing substrings because PCRE
+ requires three integers per substring, whereas the POSIX interface provides
+ only two. If the number of expected substrings is small, the wrapper
+ function uses space on the stack, because this is faster than using
+ malloc() for each call. The threshold above which the stack is no longer
+ used is defined by POSIX_MALLOC_THRESHOLD. On systems that support it,
+ "configure" can be used to override this default. */
+#undef POSIX_MALLOC_THRESHOLD
+
+/* Define to 1 if you have the ANSI C header files. */
+#undef STDC_HEADERS
+
+/* Define to enable support for Unicode properties */
+#undef SUPPORT_UCP
+
+/* Define to enable support for the UTF-8 Unicode encoding. */
+#undef SUPPORT_UTF8
+
+/* Version number of package */
+#undef VERSION
+
+/* Define to empty if `const' does not conform to ANSI C. */
+#undef const
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+#undef size_t
diff --git a/src/third_party/pcre-7.4/dftables.c b/src/third_party/pcre-7.4/dftables.c
new file mode 100644
index 00000000000..67bca539fa7
--- /dev/null
+++ b/src/third_party/pcre-7.4/dftables.c
@@ -0,0 +1,199 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This is a freestanding support program to generate a file containing
+character tables for PCRE. The tables are built according to the current
+locale. Now that pcre_maketables is a function visible to the outside world, we
+make use of its code from here in order to be consistent. */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <ctype.h>
+#include <stdio.h>
+#include <string.h>
+#include <locale.h>
+
+#include "pcre_internal.h"
+
+#define DFTABLES /* pcre_maketables.c notices this */
+#include "pcre_maketables.c"
+
+
+int main(int argc, char **argv)
+{
+FILE *f;
+int i = 1;
+const unsigned char *tables;
+const unsigned char *base_of_tables;
+
+/* By default, the default C locale is used rather than what the building user
+happens to have set. However, if the -L option is given, set the locale from
+the LC_xxx environment variables. */
+
+if (argc > 1 && strcmp(argv[1], "-L") == 0)
+ {
+ setlocale(LC_ALL, ""); /* Set from environment variables */
+ i++;
+ }
+
+if (argc < i + 1)
+ {
+ fprintf(stderr, "dftables: one filename argument is required\n");
+ return 1;
+ }
+
+tables = pcre_maketables();
+base_of_tables = tables;
+
+f = fopen(argv[i], "wb");
+if (f == NULL)
+ {
+ fprintf(stderr, "dftables: failed to open %s for writing\n", argv[1]);
+ return 1;
+ }
+
+/* There are several fprintf() calls here, because gcc in pedantic mode
+complains about the very long string otherwise. */
+
+fprintf(f,
+ "/*************************************************\n"
+ "* Perl-Compatible Regular Expressions *\n"
+ "*************************************************/\n\n"
+ "/* This file was automatically written by the dftables auxiliary\n"
+ "program. It contains character tables that are used when no external\n"
+ "tables are passed to PCRE by the application that calls it. The tables\n"
+ "are used only for characters whose code values are less than 256.\n\n");
+fprintf(f,
+ "The following #includes are present because without them gcc 4.x may remove\n"
+ "the array definition from the final binary if PCRE is built into a static\n"
+ "library and dead code stripping is activated. This leads to link errors.\n"
+ "Pulling in the header ensures that the array gets flagged as \"someone\n"
+ "outside this compilation unit might reference this\" and so it will always\n"
+ "be supplied to the linker. */\n\n"
+ "#ifdef HAVE_CONFIG_H\n"
+ "#include \"config.h\"\n"
+ "#endif\n\n"
+ "#include \"pcre_internal.h\"\n\n");
+fprintf(f,
+ "const unsigned char _pcre_default_tables[] = {\n\n"
+ "/* This table is a lower casing table. */\n\n");
+
+fprintf(f, " ");
+for (i = 0; i < 256; i++)
+ {
+ if ((i & 7) == 0 && i != 0) fprintf(f, "\n ");
+ fprintf(f, "%3d", *tables++);
+ if (i != 255) fprintf(f, ",");
+ }
+fprintf(f, ",\n\n");
+
+fprintf(f, "/* This table is a case flipping table. */\n\n");
+
+fprintf(f, " ");
+for (i = 0; i < 256; i++)
+ {
+ if ((i & 7) == 0 && i != 0) fprintf(f, "\n ");
+ fprintf(f, "%3d", *tables++);
+ if (i != 255) fprintf(f, ",");
+ }
+fprintf(f, ",\n\n");
+
+fprintf(f,
+ "/* This table contains bit maps for various character classes.\n"
+ "Each map is 32 bytes long and the bits run from the least\n"
+ "significant end of each byte. The classes that have their own\n"
+ "maps are: space, xdigit, digit, upper, lower, word, graph\n"
+ "print, punct, and cntrl. Other classes are built from combinations. */\n\n");
+
+fprintf(f, " ");
+for (i = 0; i < cbit_length; i++)
+ {
+ if ((i & 7) == 0 && i != 0)
+ {
+ if ((i & 31) == 0) fprintf(f, "\n");
+ fprintf(f, "\n ");
+ }
+ fprintf(f, "0x%02x", *tables++);
+ if (i != cbit_length - 1) fprintf(f, ",");
+ }
+fprintf(f, ",\n\n");
+
+fprintf(f,
+ "/* This table identifies various classes of character by individual bits:\n"
+ " 0x%02x white space character\n"
+ " 0x%02x letter\n"
+ " 0x%02x decimal digit\n"
+ " 0x%02x hexadecimal digit\n"
+ " 0x%02x alphanumeric or '_'\n"
+ " 0x%02x regular expression metacharacter or binary zero\n*/\n\n",
+ ctype_space, ctype_letter, ctype_digit, ctype_xdigit, ctype_word,
+ ctype_meta);
+
+fprintf(f, " ");
+for (i = 0; i < 256; i++)
+ {
+ if ((i & 7) == 0 && i != 0)
+ {
+ fprintf(f, " /* ");
+ if (isprint(i-8)) fprintf(f, " %c -", i-8);
+ else fprintf(f, "%3d-", i-8);
+ if (isprint(i-1)) fprintf(f, " %c ", i-1);
+ else fprintf(f, "%3d", i-1);
+ fprintf(f, " */\n ");
+ }
+ fprintf(f, "0x%02x", *tables++);
+ if (i != 255) fprintf(f, ",");
+ }
+
+fprintf(f, "};/* ");
+if (isprint(i-8)) fprintf(f, " %c -", i-8);
+ else fprintf(f, "%3d-", i-8);
+if (isprint(i-1)) fprintf(f, " %c ", i-1);
+ else fprintf(f, "%3d", i-1);
+fprintf(f, " */\n\n/* End of pcre_chartables.c */\n");
+
+fclose(f);
+free((void *)base_of_tables);
+return 0;
+}
+
+/* End of dftables.c */
diff --git a/src/third_party/pcre-7.4/pcre.h b/src/third_party/pcre-7.4/pcre.h
new file mode 100644
index 00000000000..701699b61a8
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre.h
@@ -0,0 +1,304 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* This is the public header file for the PCRE library, to be #included by
+applications that call the PCRE functions.
+
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+#ifndef _PCRE_H
+#define _PCRE_H
+
+/* The current PCRE version information. */
+
+#define PCRE_MAJOR 7
+#define PCRE_MINOR 4
+#define PCRE_PRERELEASE
+#define PCRE_DATE 2007-09-21
+
+/* When an application links to a PCRE DLL in Windows, the symbols that are
+imported have to be identified as such. When building PCRE, the appropriate
+export setting is defined in pcre_internal.h, which includes this file. So we
+don't change existing definitions of PCRE_EXP_DECL and PCRECPP_EXP_DECL. */
+
+/*#if defined(_WIN32) && !defined(PCRE_STATIC)
+#error why are we here?
+# ifndef PCRE_EXP_DECL
+# define PCRE_EXP_DECL extern __declspec(dllimport)
+# endif
+# ifdef __cplusplus
+# ifndef PCRECPP_EXP_DECL
+# define PCRECPP_EXP_DECL extern __declspec(dllimport)
+# endif
+# ifndef PCRECPP_EXP_DEFN
+# define PCRECPP_EXP_DEFN __declspec(dllimport)
+# endif
+# endif
+#endif*/
+
+/* By default, we use the standard "extern" declarations. */
+
+#ifndef PCRE_EXP_DECL
+# ifdef __cplusplus
+# define PCRE_EXP_DECL extern "C"
+# else
+# define PCRE_EXP_DECL extern
+# endif
+#endif
+
+#ifdef __cplusplus
+# ifndef PCRECPP_EXP_DECL
+# define PCRECPP_EXP_DECL extern
+# endif
+# ifndef PCRECPP_EXP_DEFN
+# define PCRECPP_EXP_DEFN
+# endif
+#endif
+
+/* Have to include stdlib.h in order to ensure that size_t is defined;
+it is needed here for malloc. */
+
+#include <stdlib.h>
+
+/* Allow for C++ users */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Options */
+
+#define PCRE_CASELESS 0x00000001
+#define PCRE_MULTILINE 0x00000002
+#define PCRE_DOTALL 0x00000004
+#define PCRE_EXTENDED 0x00000008
+#define PCRE_ANCHORED 0x00000010
+#define PCRE_DOLLAR_ENDONLY 0x00000020
+#define PCRE_EXTRA 0x00000040
+#define PCRE_NOTBOL 0x00000080
+#define PCRE_NOTEOL 0x00000100
+#define PCRE_UNGREEDY 0x00000200
+#define PCRE_NOTEMPTY 0x00000400
+#define PCRE_UTF8 0x00000800
+#define PCRE_NO_AUTO_CAPTURE 0x00001000
+#define PCRE_NO_UTF8_CHECK 0x00002000
+#define PCRE_AUTO_CALLOUT 0x00004000
+#define PCRE_PARTIAL 0x00008000
+#define PCRE_DFA_SHORTEST 0x00010000
+#define PCRE_DFA_RESTART 0x00020000
+#define PCRE_FIRSTLINE 0x00040000
+#define PCRE_DUPNAMES 0x00080000
+#define PCRE_NEWLINE_CR 0x00100000
+#define PCRE_NEWLINE_LF 0x00200000
+#define PCRE_NEWLINE_CRLF 0x00300000
+#define PCRE_NEWLINE_ANY 0x00400000
+#define PCRE_NEWLINE_ANYCRLF 0x00500000
+#define PCRE_BSR_ANYCRLF 0x00800000
+#define PCRE_BSR_UNICODE 0x01000000
+
+/* Exec-time and get/set-time error codes */
+
+#define PCRE_ERROR_NOMATCH (-1)
+#define PCRE_ERROR_NULL (-2)
+#define PCRE_ERROR_BADOPTION (-3)
+#define PCRE_ERROR_BADMAGIC (-4)
+#define PCRE_ERROR_UNKNOWN_OPCODE (-5)
+#define PCRE_ERROR_UNKNOWN_NODE (-5) /* For backward compatibility */
+#define PCRE_ERROR_NOMEMORY (-6)
+#define PCRE_ERROR_NOSUBSTRING (-7)
+#define PCRE_ERROR_MATCHLIMIT (-8)
+#define PCRE_ERROR_CALLOUT (-9) /* Never used by PCRE itself */
+#define PCRE_ERROR_BADUTF8 (-10)
+#define PCRE_ERROR_BADUTF8_OFFSET (-11)
+#define PCRE_ERROR_PARTIAL (-12)
+#define PCRE_ERROR_BADPARTIAL (-13)
+#define PCRE_ERROR_INTERNAL (-14)
+#define PCRE_ERROR_BADCOUNT (-15)
+#define PCRE_ERROR_DFA_UITEM (-16)
+#define PCRE_ERROR_DFA_UCOND (-17)
+#define PCRE_ERROR_DFA_UMLIMIT (-18)
+#define PCRE_ERROR_DFA_WSSIZE (-19)
+#define PCRE_ERROR_DFA_RECURSE (-20)
+#define PCRE_ERROR_RECURSIONLIMIT (-21)
+#define PCRE_ERROR_NULLWSLIMIT (-22) /* No longer actually used */
+#define PCRE_ERROR_BADNEWLINE (-23)
+
+/* Request types for pcre_fullinfo() */
+
+#define PCRE_INFO_OPTIONS 0
+#define PCRE_INFO_SIZE 1
+#define PCRE_INFO_CAPTURECOUNT 2
+#define PCRE_INFO_BACKREFMAX 3
+#define PCRE_INFO_FIRSTBYTE 4
+#define PCRE_INFO_FIRSTCHAR 4 /* For backwards compatibility */
+#define PCRE_INFO_FIRSTTABLE 5
+#define PCRE_INFO_LASTLITERAL 6
+#define PCRE_INFO_NAMEENTRYSIZE 7
+#define PCRE_INFO_NAMECOUNT 8
+#define PCRE_INFO_NAMETABLE 9
+#define PCRE_INFO_STUDYSIZE 10
+#define PCRE_INFO_DEFAULT_TABLES 11
+#define PCRE_INFO_OKPARTIAL 12
+#define PCRE_INFO_JCHANGED 13
+#define PCRE_INFO_HASCRORLF 14
+
+/* Request types for pcre_config(). Do not re-arrange, in order to remain
+compatible. */
+
+#define PCRE_CONFIG_UTF8 0
+#define PCRE_CONFIG_NEWLINE 1
+#define PCRE_CONFIG_LINK_SIZE 2
+#define PCRE_CONFIG_POSIX_MALLOC_THRESHOLD 3
+#define PCRE_CONFIG_MATCH_LIMIT 4
+#define PCRE_CONFIG_STACKRECURSE 5
+#define PCRE_CONFIG_UNICODE_PROPERTIES 6
+#define PCRE_CONFIG_MATCH_LIMIT_RECURSION 7
+#define PCRE_CONFIG_BSR 8
+
+/* Bit flags for the pcre_extra structure. Do not re-arrange or redefine
+these bits, just add new ones on the end, in order to remain compatible. */
+
+#define PCRE_EXTRA_STUDY_DATA 0x0001
+#define PCRE_EXTRA_MATCH_LIMIT 0x0002
+#define PCRE_EXTRA_CALLOUT_DATA 0x0004
+#define PCRE_EXTRA_TABLES 0x0008
+#define PCRE_EXTRA_MATCH_LIMIT_RECURSION 0x0010
+
+/* Types */
+
+struct real_pcre; /* declaration; the definition is private */
+typedef struct real_pcre pcre;
+
+/* When PCRE is compiled as a C++ library, the subject pointer type can be
+replaced with a custom type. For conventional use, the public interface is a
+const char *. */
+
+#ifndef PCRE_SPTR
+#define PCRE_SPTR const char *
+#endif
+
+/* The structure for passing additional data to pcre_exec(). This is defined in
+such as way as to be extensible. Always add new fields at the end, in order to
+remain compatible. */
+
+typedef struct pcre_extra {
+ unsigned long int flags; /* Bits for which fields are set */
+ void *study_data; /* Opaque data from pcre_study() */
+ unsigned long int match_limit; /* Maximum number of calls to match() */
+ void *callout_data; /* Data passed back in callouts */
+ const unsigned char *tables; /* Pointer to character tables */
+ unsigned long int match_limit_recursion; /* Max recursive calls to match() */
+} pcre_extra;
+
+/* The structure for passing out data via the pcre_callout_function. We use a
+structure so that new fields can be added on the end in future versions,
+without changing the API of the function, thereby allowing old databases to work
+without modification. */
+
+typedef struct pcre_callout_block {
+ int version; /* Identifies version of block */
+ /* ------------------------ Version 0 ------------------------------- */
+ int callout_number; /* Number compiled into pattern */
+ int *offset_vector; /* The offset vector */
+ PCRE_SPTR subject; /* The subject being matched */
+ int subject_length; /* The length of the subject */
+ int start_match; /* Offset to start of this match attempt */
+ int current_position; /* Where we currently are in the subject */
+ int capture_top; /* Max current capture */
+ int capture_last; /* Most recently closed capture */
+ void *callout_data; /* Data passed in with the call */
+ /* ------------------- Added for Version 1 -------------------------- */
+ int pattern_position; /* Offset to next item in the pattern */
+ int next_item_length; /* Length of next item in the pattern */
+ /* ------------------------------------------------------------------ */
+} pcre_callout_block;
+
+/* Indirection for store get and free functions. These can be set to
+alternative malloc/free functions if required. Special ones are used in the
+non-recursive case for "frames". There is also an optional callout function
+that is triggered by the (?) regex item. For Virtual Pascal, these definitions
+have to take another form. */
+
+#ifndef VPCOMPAT
+PCRE_EXP_DECL void *(*pcre_malloc)(size_t);
+PCRE_EXP_DECL void (*pcre_free)(void *);
+PCRE_EXP_DECL void *(*pcre_stack_malloc)(size_t);
+PCRE_EXP_DECL void (*pcre_stack_free)(void *);
+PCRE_EXP_DECL int (*pcre_callout)(pcre_callout_block *);
+#else /* VPCOMPAT */
+PCRE_EXP_DECL void *pcre_malloc(size_t);
+PCRE_EXP_DECL void pcre_free(void *);
+PCRE_EXP_DECL void *pcre_stack_malloc(size_t);
+PCRE_EXP_DECL void pcre_stack_free(void *);
+PCRE_EXP_DECL int pcre_callout(pcre_callout_block *);
+#endif /* VPCOMPAT */
+
+/* Exported PCRE functions */
+
+PCRE_EXP_DECL pcre *pcre_compile(const char *, int, const char **, int *,
+ const unsigned char *);
+PCRE_EXP_DECL pcre *pcre_compile2(const char *, int, int *, const char **,
+ int *, const unsigned char *);
+PCRE_EXP_DECL int pcre_config(int, void *);
+PCRE_EXP_DECL int pcre_copy_named_substring(const pcre *, const char *,
+ int *, int, const char *, char *, int);
+PCRE_EXP_DECL int pcre_copy_substring(const char *, int *, int, int, char *,
+ int);
+PCRE_EXP_DECL int pcre_dfa_exec(const pcre *, const pcre_extra *,
+ const char *, int, int, int, int *, int , int *, int);
+PCRE_EXP_DECL int pcre_exec(const pcre *, const pcre_extra *, PCRE_SPTR,
+ int, int, int, int *, int);
+PCRE_EXP_DECL void pcre_free_substring(const char *);
+PCRE_EXP_DECL void pcre_free_substring_list(const char **);
+PCRE_EXP_DECL int pcre_fullinfo(const pcre *, const pcre_extra *, int,
+ void *);
+PCRE_EXP_DECL int pcre_get_named_substring(const pcre *, const char *,
+ int *, int, const char *, const char **);
+PCRE_EXP_DECL int pcre_get_stringnumber(const pcre *, const char *);
+PCRE_EXP_DECL int pcre_get_stringtable_entries(const pcre *, const char *,
+ char **, char **);
+PCRE_EXP_DECL int pcre_get_substring(const char *, int *, int, int,
+ const char **);
+PCRE_EXP_DECL int pcre_get_substring_list(const char *, int *, int,
+ const char ***);
+PCRE_EXP_DECL int pcre_info(const pcre *, int *, int *);
+PCRE_EXP_DECL const unsigned char *pcre_maketables(void);
+PCRE_EXP_DECL int pcre_refcount(pcre *, int);
+PCRE_EXP_DECL pcre_extra *pcre_study(const pcre *, int, const char **);
+PCRE_EXP_DECL const char *pcre_version(void);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* End of pcre.h */
diff --git a/src/third_party/pcre-7.4/pcre.h.generic b/src/third_party/pcre-7.4/pcre.h.generic
new file mode 100644
index 00000000000..58a83c369a6
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre.h.generic
@@ -0,0 +1,303 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* This is the public header file for the PCRE library, to be #included by
+applications that call the PCRE functions.
+
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+#ifndef _PCRE_H
+#define _PCRE_H
+
+/* The current PCRE version information. */
+
+#define PCRE_MAJOR 7
+#define PCRE_MINOR 4
+#define PCRE_PRERELEASE
+#define PCRE_DATE 2007-09-21
+
+/* When an application links to a PCRE DLL in Windows, the symbols that are
+imported have to be identified as such. When building PCRE, the appropriate
+export setting is defined in pcre_internal.h, which includes this file. So we
+don't change existing definitions of PCRE_EXP_DECL and PCRECPP_EXP_DECL. */
+
+#if defined(_WIN32) && !defined(PCRE_STATIC)
+# ifndef PCRE_EXP_DECL
+# define PCRE_EXP_DECL extern __declspec(dllimport)
+# endif
+# ifdef __cplusplus
+# ifndef PCRECPP_EXP_DECL
+# define PCRECPP_EXP_DECL extern __declspec(dllimport)
+# endif
+# ifndef PCRECPP_EXP_DEFN
+# define PCRECPP_EXP_DEFN __declspec(dllimport)
+# endif
+# endif
+#endif
+
+/* By default, we use the standard "extern" declarations. */
+
+#ifndef PCRE_EXP_DECL
+# ifdef __cplusplus
+# define PCRE_EXP_DECL extern "C"
+# else
+# define PCRE_EXP_DECL extern
+# endif
+#endif
+
+#ifdef __cplusplus
+# ifndef PCRECPP_EXP_DECL
+# define PCRECPP_EXP_DECL extern
+# endif
+# ifndef PCRECPP_EXP_DEFN
+# define PCRECPP_EXP_DEFN
+# endif
+#endif
+
+/* Have to include stdlib.h in order to ensure that size_t is defined;
+it is needed here for malloc. */
+
+#include <stdlib.h>
+
+/* Allow for C++ users */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Options */
+
+#define PCRE_CASELESS 0x00000001
+#define PCRE_MULTILINE 0x00000002
+#define PCRE_DOTALL 0x00000004
+#define PCRE_EXTENDED 0x00000008
+#define PCRE_ANCHORED 0x00000010
+#define PCRE_DOLLAR_ENDONLY 0x00000020
+#define PCRE_EXTRA 0x00000040
+#define PCRE_NOTBOL 0x00000080
+#define PCRE_NOTEOL 0x00000100
+#define PCRE_UNGREEDY 0x00000200
+#define PCRE_NOTEMPTY 0x00000400
+#define PCRE_UTF8 0x00000800
+#define PCRE_NO_AUTO_CAPTURE 0x00001000
+#define PCRE_NO_UTF8_CHECK 0x00002000
+#define PCRE_AUTO_CALLOUT 0x00004000
+#define PCRE_PARTIAL 0x00008000
+#define PCRE_DFA_SHORTEST 0x00010000
+#define PCRE_DFA_RESTART 0x00020000
+#define PCRE_FIRSTLINE 0x00040000
+#define PCRE_DUPNAMES 0x00080000
+#define PCRE_NEWLINE_CR 0x00100000
+#define PCRE_NEWLINE_LF 0x00200000
+#define PCRE_NEWLINE_CRLF 0x00300000
+#define PCRE_NEWLINE_ANY 0x00400000
+#define PCRE_NEWLINE_ANYCRLF 0x00500000
+#define PCRE_BSR_ANYCRLF 0x00800000
+#define PCRE_BSR_UNICODE 0x01000000
+
+/* Exec-time and get/set-time error codes */
+
+#define PCRE_ERROR_NOMATCH (-1)
+#define PCRE_ERROR_NULL (-2)
+#define PCRE_ERROR_BADOPTION (-3)
+#define PCRE_ERROR_BADMAGIC (-4)
+#define PCRE_ERROR_UNKNOWN_OPCODE (-5)
+#define PCRE_ERROR_UNKNOWN_NODE (-5) /* For backward compatibility */
+#define PCRE_ERROR_NOMEMORY (-6)
+#define PCRE_ERROR_NOSUBSTRING (-7)
+#define PCRE_ERROR_MATCHLIMIT (-8)
+#define PCRE_ERROR_CALLOUT (-9) /* Never used by PCRE itself */
+#define PCRE_ERROR_BADUTF8 (-10)
+#define PCRE_ERROR_BADUTF8_OFFSET (-11)
+#define PCRE_ERROR_PARTIAL (-12)
+#define PCRE_ERROR_BADPARTIAL (-13)
+#define PCRE_ERROR_INTERNAL (-14)
+#define PCRE_ERROR_BADCOUNT (-15)
+#define PCRE_ERROR_DFA_UITEM (-16)
+#define PCRE_ERROR_DFA_UCOND (-17)
+#define PCRE_ERROR_DFA_UMLIMIT (-18)
+#define PCRE_ERROR_DFA_WSSIZE (-19)
+#define PCRE_ERROR_DFA_RECURSE (-20)
+#define PCRE_ERROR_RECURSIONLIMIT (-21)
+#define PCRE_ERROR_NULLWSLIMIT (-22) /* No longer actually used */
+#define PCRE_ERROR_BADNEWLINE (-23)
+
+/* Request types for pcre_fullinfo() */
+
+#define PCRE_INFO_OPTIONS 0
+#define PCRE_INFO_SIZE 1
+#define PCRE_INFO_CAPTURECOUNT 2
+#define PCRE_INFO_BACKREFMAX 3
+#define PCRE_INFO_FIRSTBYTE 4
+#define PCRE_INFO_FIRSTCHAR 4 /* For backwards compatibility */
+#define PCRE_INFO_FIRSTTABLE 5
+#define PCRE_INFO_LASTLITERAL 6
+#define PCRE_INFO_NAMEENTRYSIZE 7
+#define PCRE_INFO_NAMECOUNT 8
+#define PCRE_INFO_NAMETABLE 9
+#define PCRE_INFO_STUDYSIZE 10
+#define PCRE_INFO_DEFAULT_TABLES 11
+#define PCRE_INFO_OKPARTIAL 12
+#define PCRE_INFO_JCHANGED 13
+#define PCRE_INFO_HASCRORLF 14
+
+/* Request types for pcre_config(). Do not re-arrange, in order to remain
+compatible. */
+
+#define PCRE_CONFIG_UTF8 0
+#define PCRE_CONFIG_NEWLINE 1
+#define PCRE_CONFIG_LINK_SIZE 2
+#define PCRE_CONFIG_POSIX_MALLOC_THRESHOLD 3
+#define PCRE_CONFIG_MATCH_LIMIT 4
+#define PCRE_CONFIG_STACKRECURSE 5
+#define PCRE_CONFIG_UNICODE_PROPERTIES 6
+#define PCRE_CONFIG_MATCH_LIMIT_RECURSION 7
+#define PCRE_CONFIG_BSR 8
+
+/* Bit flags for the pcre_extra structure. Do not re-arrange or redefine
+these bits, just add new ones on the end, in order to remain compatible. */
+
+#define PCRE_EXTRA_STUDY_DATA 0x0001
+#define PCRE_EXTRA_MATCH_LIMIT 0x0002
+#define PCRE_EXTRA_CALLOUT_DATA 0x0004
+#define PCRE_EXTRA_TABLES 0x0008
+#define PCRE_EXTRA_MATCH_LIMIT_RECURSION 0x0010
+
+/* Types */
+
+struct real_pcre; /* declaration; the definition is private */
+typedef struct real_pcre pcre;
+
+/* When PCRE is compiled as a C++ library, the subject pointer type can be
+replaced with a custom type. For conventional use, the public interface is a
+const char *. */
+
+#ifndef PCRE_SPTR
+#define PCRE_SPTR const char *
+#endif
+
+/* The structure for passing additional data to pcre_exec(). This is defined in
+such as way as to be extensible. Always add new fields at the end, in order to
+remain compatible. */
+
+typedef struct pcre_extra {
+ unsigned long int flags; /* Bits for which fields are set */
+ void *study_data; /* Opaque data from pcre_study() */
+ unsigned long int match_limit; /* Maximum number of calls to match() */
+ void *callout_data; /* Data passed back in callouts */
+ const unsigned char *tables; /* Pointer to character tables */
+ unsigned long int match_limit_recursion; /* Max recursive calls to match() */
+} pcre_extra;
+
+/* The structure for passing out data via the pcre_callout_function. We use a
+structure so that new fields can be added on the end in future versions,
+without changing the API of the function, thereby allowing old clients to work
+without modification. */
+
+typedef struct pcre_callout_block {
+ int version; /* Identifies version of block */
+ /* ------------------------ Version 0 ------------------------------- */
+ int callout_number; /* Number compiled into pattern */
+ int *offset_vector; /* The offset vector */
+ PCRE_SPTR subject; /* The subject being matched */
+ int subject_length; /* The length of the subject */
+ int start_match; /* Offset to start of this match attempt */
+ int current_position; /* Where we currently are in the subject */
+ int capture_top; /* Max current capture */
+ int capture_last; /* Most recently closed capture */
+ void *callout_data; /* Data passed in with the call */
+ /* ------------------- Added for Version 1 -------------------------- */
+ int pattern_position; /* Offset to next item in the pattern */
+ int next_item_length; /* Length of next item in the pattern */
+ /* ------------------------------------------------------------------ */
+} pcre_callout_block;
+
+/* Indirection for store get and free functions. These can be set to
+alternative malloc/free functions if required. Special ones are used in the
+non-recursive case for "frames". There is also an optional callout function
+that is triggered by the (?) regex item. For Virtual Pascal, these definitions
+have to take another form. */
+
+#ifndef VPCOMPAT
+PCRE_EXP_DECL void *(*pcre_malloc)(size_t);
+PCRE_EXP_DECL void (*pcre_free)(void *);
+PCRE_EXP_DECL void *(*pcre_stack_malloc)(size_t);
+PCRE_EXP_DECL void (*pcre_stack_free)(void *);
+PCRE_EXP_DECL int (*pcre_callout)(pcre_callout_block *);
+#else /* VPCOMPAT */
+PCRE_EXP_DECL void *pcre_malloc(size_t);
+PCRE_EXP_DECL void pcre_free(void *);
+PCRE_EXP_DECL void *pcre_stack_malloc(size_t);
+PCRE_EXP_DECL void pcre_stack_free(void *);
+PCRE_EXP_DECL int pcre_callout(pcre_callout_block *);
+#endif /* VPCOMPAT */
+
+/* Exported PCRE functions */
+
+PCRE_EXP_DECL pcre *pcre_compile(const char *, int, const char **, int *,
+ const unsigned char *);
+PCRE_EXP_DECL pcre *pcre_compile2(const char *, int, int *, const char **,
+ int *, const unsigned char *);
+PCRE_EXP_DECL int pcre_config(int, void *);
+PCRE_EXP_DECL int pcre_copy_named_substring(const pcre *, const char *,
+ int *, int, const char *, char *, int);
+PCRE_EXP_DECL int pcre_copy_substring(const char *, int *, int, int, char *,
+ int);
+PCRE_EXP_DECL int pcre_dfa_exec(const pcre *, const pcre_extra *,
+ const char *, int, int, int, int *, int , int *, int);
+PCRE_EXP_DECL int pcre_exec(const pcre *, const pcre_extra *, PCRE_SPTR,
+ int, int, int, int *, int);
+PCRE_EXP_DECL void pcre_free_substring(const char *);
+PCRE_EXP_DECL void pcre_free_substring_list(const char **);
+PCRE_EXP_DECL int pcre_fullinfo(const pcre *, const pcre_extra *, int,
+ void *);
+PCRE_EXP_DECL int pcre_get_named_substring(const pcre *, const char *,
+ int *, int, const char *, const char **);
+PCRE_EXP_DECL int pcre_get_stringnumber(const pcre *, const char *);
+PCRE_EXP_DECL int pcre_get_stringtable_entries(const pcre *, const char *,
+ char **, char **);
+PCRE_EXP_DECL int pcre_get_substring(const char *, int *, int, int,
+ const char **);
+PCRE_EXP_DECL int pcre_get_substring_list(const char *, int *, int,
+ const char ***);
+PCRE_EXP_DECL int pcre_info(const pcre *, int *, int *);
+PCRE_EXP_DECL const unsigned char *pcre_maketables(void);
+PCRE_EXP_DECL int pcre_refcount(pcre *, int);
+PCRE_EXP_DECL pcre_extra *pcre_study(const pcre *, int, const char **);
+PCRE_EXP_DECL const char *pcre_version(void);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* End of pcre.h */
diff --git a/src/third_party/pcre-7.4/pcre.h.in b/src/third_party/pcre-7.4/pcre.h.in
new file mode 100644
index 00000000000..8bebbb47696
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre.h.in
@@ -0,0 +1,303 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* This is the public header file for the PCRE library, to be #included by
+applications that call the PCRE functions.
+
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+#ifndef _PCRE_H
+#define _PCRE_H
+
+/* The current PCRE version information. */
+
+#define PCRE_MAJOR @PCRE_MAJOR@
+#define PCRE_MINOR @PCRE_MINOR@
+#define PCRE_PRERELEASE @PCRE_PRERELEASE@
+#define PCRE_DATE @PCRE_DATE@
+
+/* When an application links to a PCRE DLL in Windows, the symbols that are
+imported have to be identified as such. When building PCRE, the appropriate
+export setting is defined in pcre_internal.h, which includes this file. So we
+don't change existing definitions of PCRE_EXP_DECL and PCRECPP_EXP_DECL. */
+
+#if defined(_WIN32) && !defined(PCRE_STATIC)
+# ifndef PCRE_EXP_DECL
+# define PCRE_EXP_DECL extern __declspec(dllimport)
+# endif
+# ifdef __cplusplus
+# ifndef PCRECPP_EXP_DECL
+# define PCRECPP_EXP_DECL extern __declspec(dllimport)
+# endif
+# ifndef PCRECPP_EXP_DEFN
+# define PCRECPP_EXP_DEFN __declspec(dllimport)
+# endif
+# endif
+#endif
+
+/* By default, we use the standard "extern" declarations. */
+
+#ifndef PCRE_EXP_DECL
+# ifdef __cplusplus
+# define PCRE_EXP_DECL extern "C"
+# else
+# define PCRE_EXP_DECL extern
+# endif
+#endif
+
+#ifdef __cplusplus
+# ifndef PCRECPP_EXP_DECL
+# define PCRECPP_EXP_DECL extern
+# endif
+# ifndef PCRECPP_EXP_DEFN
+# define PCRECPP_EXP_DEFN
+# endif
+#endif
+
+/* Have to include stdlib.h in order to ensure that size_t is defined;
+it is needed here for malloc. */
+
+#include <stdlib.h>
+
+/* Allow for C++ users */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Options */
+
+#define PCRE_CASELESS 0x00000001
+#define PCRE_MULTILINE 0x00000002
+#define PCRE_DOTALL 0x00000004
+#define PCRE_EXTENDED 0x00000008
+#define PCRE_ANCHORED 0x00000010
+#define PCRE_DOLLAR_ENDONLY 0x00000020
+#define PCRE_EXTRA 0x00000040
+#define PCRE_NOTBOL 0x00000080
+#define PCRE_NOTEOL 0x00000100
+#define PCRE_UNGREEDY 0x00000200
+#define PCRE_NOTEMPTY 0x00000400
+#define PCRE_UTF8 0x00000800
+#define PCRE_NO_AUTO_CAPTURE 0x00001000
+#define PCRE_NO_UTF8_CHECK 0x00002000
+#define PCRE_AUTO_CALLOUT 0x00004000
+#define PCRE_PARTIAL 0x00008000
+#define PCRE_DFA_SHORTEST 0x00010000
+#define PCRE_DFA_RESTART 0x00020000
+#define PCRE_FIRSTLINE 0x00040000
+#define PCRE_DUPNAMES 0x00080000
+#define PCRE_NEWLINE_CR 0x00100000
+#define PCRE_NEWLINE_LF 0x00200000
+#define PCRE_NEWLINE_CRLF 0x00300000
+#define PCRE_NEWLINE_ANY 0x00400000
+#define PCRE_NEWLINE_ANYCRLF 0x00500000
+#define PCRE_BSR_ANYCRLF 0x00800000
+#define PCRE_BSR_UNICODE 0x01000000
+
+/* Exec-time and get/set-time error codes */
+
+#define PCRE_ERROR_NOMATCH (-1)
+#define PCRE_ERROR_NULL (-2)
+#define PCRE_ERROR_BADOPTION (-3)
+#define PCRE_ERROR_BADMAGIC (-4)
+#define PCRE_ERROR_UNKNOWN_OPCODE (-5)
+#define PCRE_ERROR_UNKNOWN_NODE (-5) /* For backward compatibility */
+#define PCRE_ERROR_NOMEMORY (-6)
+#define PCRE_ERROR_NOSUBSTRING (-7)
+#define PCRE_ERROR_MATCHLIMIT (-8)
+#define PCRE_ERROR_CALLOUT (-9) /* Never used by PCRE itself */
+#define PCRE_ERROR_BADUTF8 (-10)
+#define PCRE_ERROR_BADUTF8_OFFSET (-11)
+#define PCRE_ERROR_PARTIAL (-12)
+#define PCRE_ERROR_BADPARTIAL (-13)
+#define PCRE_ERROR_INTERNAL (-14)
+#define PCRE_ERROR_BADCOUNT (-15)
+#define PCRE_ERROR_DFA_UITEM (-16)
+#define PCRE_ERROR_DFA_UCOND (-17)
+#define PCRE_ERROR_DFA_UMLIMIT (-18)
+#define PCRE_ERROR_DFA_WSSIZE (-19)
+#define PCRE_ERROR_DFA_RECURSE (-20)
+#define PCRE_ERROR_RECURSIONLIMIT (-21)
+#define PCRE_ERROR_NULLWSLIMIT (-22) /* No longer actually used */
+#define PCRE_ERROR_BADNEWLINE (-23)
+
+/* Request types for pcre_fullinfo() */
+
+#define PCRE_INFO_OPTIONS 0
+#define PCRE_INFO_SIZE 1
+#define PCRE_INFO_CAPTURECOUNT 2
+#define PCRE_INFO_BACKREFMAX 3
+#define PCRE_INFO_FIRSTBYTE 4
+#define PCRE_INFO_FIRSTCHAR 4 /* For backwards compatibility */
+#define PCRE_INFO_FIRSTTABLE 5
+#define PCRE_INFO_LASTLITERAL 6
+#define PCRE_INFO_NAMEENTRYSIZE 7
+#define PCRE_INFO_NAMECOUNT 8
+#define PCRE_INFO_NAMETABLE 9
+#define PCRE_INFO_STUDYSIZE 10
+#define PCRE_INFO_DEFAULT_TABLES 11
+#define PCRE_INFO_OKPARTIAL 12
+#define PCRE_INFO_JCHANGED 13
+#define PCRE_INFO_HASCRORLF 14
+
+/* Request types for pcre_config(). Do not re-arrange, in order to remain
+compatible. */
+
+#define PCRE_CONFIG_UTF8 0
+#define PCRE_CONFIG_NEWLINE 1
+#define PCRE_CONFIG_LINK_SIZE 2
+#define PCRE_CONFIG_POSIX_MALLOC_THRESHOLD 3
+#define PCRE_CONFIG_MATCH_LIMIT 4
+#define PCRE_CONFIG_STACKRECURSE 5
+#define PCRE_CONFIG_UNICODE_PROPERTIES 6
+#define PCRE_CONFIG_MATCH_LIMIT_RECURSION 7
+#define PCRE_CONFIG_BSR 8
+
+/* Bit flags for the pcre_extra structure. Do not re-arrange or redefine
+these bits, just add new ones on the end, in order to remain compatible. */
+
+#define PCRE_EXTRA_STUDY_DATA 0x0001
+#define PCRE_EXTRA_MATCH_LIMIT 0x0002
+#define PCRE_EXTRA_CALLOUT_DATA 0x0004
+#define PCRE_EXTRA_TABLES 0x0008
+#define PCRE_EXTRA_MATCH_LIMIT_RECURSION 0x0010
+
+/* Types */
+
+struct real_pcre; /* declaration; the definition is private */
+typedef struct real_pcre pcre;
+
+/* When PCRE is compiled as a C++ library, the subject pointer type can be
+replaced with a custom type. For conventional use, the public interface is a
+const char *. */
+
+#ifndef PCRE_SPTR
+#define PCRE_SPTR const char *
+#endif
+
+/* The structure for passing additional data to pcre_exec(). This is defined in
+such as way as to be extensible. Always add new fields at the end, in order to
+remain compatible. */
+
+typedef struct pcre_extra {
+ unsigned long int flags; /* Bits for which fields are set */
+ void *study_data; /* Opaque data from pcre_study() */
+ unsigned long int match_limit; /* Maximum number of calls to match() */
+ void *callout_data; /* Data passed back in callouts */
+ const unsigned char *tables; /* Pointer to character tables */
+ unsigned long int match_limit_recursion; /* Max recursive calls to match() */
+} pcre_extra;
+
+/* The structure for passing out data via the pcre_callout_function. We use a
+structure so that new fields can be added on the end in future versions,
+without changing the API of the function, thereby allowing old clients to work
+without modification. */
+
+typedef struct pcre_callout_block {
+ int version; /* Identifies version of block */
+ /* ------------------------ Version 0 ------------------------------- */
+ int callout_number; /* Number compiled into pattern */
+ int *offset_vector; /* The offset vector */
+ PCRE_SPTR subject; /* The subject being matched */
+ int subject_length; /* The length of the subject */
+ int start_match; /* Offset to start of this match attempt */
+ int current_position; /* Where we currently are in the subject */
+ int capture_top; /* Max current capture */
+ int capture_last; /* Most recently closed capture */
+ void *callout_data; /* Data passed in with the call */
+ /* ------------------- Added for Version 1 -------------------------- */
+ int pattern_position; /* Offset to next item in the pattern */
+ int next_item_length; /* Length of next item in the pattern */
+ /* ------------------------------------------------------------------ */
+} pcre_callout_block;
+
+/* Indirection for store get and free functions. These can be set to
+alternative malloc/free functions if required. Special ones are used in the
+non-recursive case for "frames". There is also an optional callout function
+that is triggered by the (?) regex item. For Virtual Pascal, these definitions
+have to take another form. */
+
+#ifndef VPCOMPAT
+PCRE_EXP_DECL void *(*pcre_malloc)(size_t);
+PCRE_EXP_DECL void (*pcre_free)(void *);
+PCRE_EXP_DECL void *(*pcre_stack_malloc)(size_t);
+PCRE_EXP_DECL void (*pcre_stack_free)(void *);
+PCRE_EXP_DECL int (*pcre_callout)(pcre_callout_block *);
+#else /* VPCOMPAT */
+PCRE_EXP_DECL void *pcre_malloc(size_t);
+PCRE_EXP_DECL void pcre_free(void *);
+PCRE_EXP_DECL void *pcre_stack_malloc(size_t);
+PCRE_EXP_DECL void pcre_stack_free(void *);
+PCRE_EXP_DECL int pcre_callout(pcre_callout_block *);
+#endif /* VPCOMPAT */
+
+/* Exported PCRE functions */
+
+PCRE_EXP_DECL pcre *pcre_compile(const char *, int, const char **, int *,
+ const unsigned char *);
+PCRE_EXP_DECL pcre *pcre_compile2(const char *, int, int *, const char **,
+ int *, const unsigned char *);
+PCRE_EXP_DECL int pcre_config(int, void *);
+PCRE_EXP_DECL int pcre_copy_named_substring(const pcre *, const char *,
+ int *, int, const char *, char *, int);
+PCRE_EXP_DECL int pcre_copy_substring(const char *, int *, int, int, char *,
+ int);
+PCRE_EXP_DECL int pcre_dfa_exec(const pcre *, const pcre_extra *,
+ const char *, int, int, int, int *, int , int *, int);
+PCRE_EXP_DECL int pcre_exec(const pcre *, const pcre_extra *, PCRE_SPTR,
+ int, int, int, int *, int);
+PCRE_EXP_DECL void pcre_free_substring(const char *);
+PCRE_EXP_DECL void pcre_free_substring_list(const char **);
+PCRE_EXP_DECL int pcre_fullinfo(const pcre *, const pcre_extra *, int,
+ void *);
+PCRE_EXP_DECL int pcre_get_named_substring(const pcre *, const char *,
+ int *, int, const char *, const char **);
+PCRE_EXP_DECL int pcre_get_stringnumber(const pcre *, const char *);
+PCRE_EXP_DECL int pcre_get_stringtable_entries(const pcre *, const char *,
+ char **, char **);
+PCRE_EXP_DECL int pcre_get_substring(const char *, int *, int, int,
+ const char **);
+PCRE_EXP_DECL int pcre_get_substring_list(const char *, int *, int,
+ const char ***);
+PCRE_EXP_DECL int pcre_info(const pcre *, int *, int *);
+PCRE_EXP_DECL const unsigned char *pcre_maketables(void);
+PCRE_EXP_DECL int pcre_refcount(pcre *, int);
+PCRE_EXP_DECL pcre_extra *pcre_study(const pcre *, int, const char **);
+PCRE_EXP_DECL const char *pcre_version(void);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* End of pcre.h */
diff --git a/src/third_party/pcre-7.4/pcre_chartables.c b/src/third_party/pcre-7.4/pcre_chartables.c
new file mode 100644
index 00000000000..ae45db0ca35
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_chartables.c
@@ -0,0 +1,198 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* This file contains character tables that are used when no external tables
+are passed to PCRE by the application that calls it. The tables are used only
+for characters whose code values are less than 256.
+
+This is a default version of the tables that assumes ASCII encoding. A program
+called dftables (which is distributed with PCRE) can be used to build
+alternative versions of this file. This is necessary if you are running in an
+EBCDIC environment, or if you want to default to a different encoding, for
+example ISO-8859-1. When dftables is run, it creates these tables in the
+current locale. If PCRE is configured with --enable-rebuild-chartables, this
+happens automatically.
+
+The following #includes are present because without the gcc 4.x may remove the
+array definition from the final binary if PCRE is built into a static library
+and dead code stripping is activated. This leads to link errors. Pulling in the
+header ensures that the array gets flagged as "someone outside this compilation
+unit might reference this" and so it will always be supplied to the linker. */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+const unsigned char _pcre_default_tables[] = {
+
+/* This table is a lower casing table. */
+
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 97, 98, 99,100,101,102,103,
+ 104,105,106,107,108,109,110,111,
+ 112,113,114,115,116,117,118,119,
+ 120,121,122, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99,100,101,102,103,
+ 104,105,106,107,108,109,110,111,
+ 112,113,114,115,116,117,118,119,
+ 120,121,122,123,124,125,126,127,
+ 128,129,130,131,132,133,134,135,
+ 136,137,138,139,140,141,142,143,
+ 144,145,146,147,148,149,150,151,
+ 152,153,154,155,156,157,158,159,
+ 160,161,162,163,164,165,166,167,
+ 168,169,170,171,172,173,174,175,
+ 176,177,178,179,180,181,182,183,
+ 184,185,186,187,188,189,190,191,
+ 192,193,194,195,196,197,198,199,
+ 200,201,202,203,204,205,206,207,
+ 208,209,210,211,212,213,214,215,
+ 216,217,218,219,220,221,222,223,
+ 224,225,226,227,228,229,230,231,
+ 232,233,234,235,236,237,238,239,
+ 240,241,242,243,244,245,246,247,
+ 248,249,250,251,252,253,254,255,
+
+/* This table is a case flipping table. */
+
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 97, 98, 99,100,101,102,103,
+ 104,105,106,107,108,109,110,111,
+ 112,113,114,115,116,117,118,119,
+ 120,121,122, 91, 92, 93, 94, 95,
+ 96, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90,123,124,125,126,127,
+ 128,129,130,131,132,133,134,135,
+ 136,137,138,139,140,141,142,143,
+ 144,145,146,147,148,149,150,151,
+ 152,153,154,155,156,157,158,159,
+ 160,161,162,163,164,165,166,167,
+ 168,169,170,171,172,173,174,175,
+ 176,177,178,179,180,181,182,183,
+ 184,185,186,187,188,189,190,191,
+ 192,193,194,195,196,197,198,199,
+ 200,201,202,203,204,205,206,207,
+ 208,209,210,211,212,213,214,215,
+ 216,217,218,219,220,221,222,223,
+ 224,225,226,227,228,229,230,231,
+ 232,233,234,235,236,237,238,239,
+ 240,241,242,243,244,245,246,247,
+ 248,249,250,251,252,253,254,255,
+
+/* This table contains bit maps for various character classes. Each map is 32
+bytes long and the bits run from the least significant end of each byte. The
+classes that have their own maps are: space, xdigit, digit, upper, lower, word,
+graph, print, punct, and cntrl. Other classes are built from combinations. */
+
+ 0x00,0x3e,0x00,0x00,0x01,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x03,
+ 0x7e,0x00,0x00,0x00,0x7e,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x03,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xfe,0xff,0xff,0x07,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0xfe,0xff,0xff,0x07,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x03,
+ 0xfe,0xff,0xff,0x87,0xfe,0xff,0xff,0x07,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0xfe,0xff,0xff,0xff,
+ 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0xff,0xff,0xff,0xff,
+ 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0xfe,0xff,0x00,0xfc,
+ 0x01,0x00,0x00,0xf8,0x01,0x00,0x00,0x78,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+/* This table identifies various classes of character by individual bits:
+ 0x01 white space character
+ 0x02 letter
+ 0x04 decimal digit
+ 0x08 hexadecimal digit
+ 0x10 alphanumeric or '_'
+ 0x80 regular expression metacharacter or binary zero
+*/
+
+ 0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 0- 7 */
+ 0x00,0x01,0x01,0x00,0x01,0x01,0x00,0x00, /* 8- 15 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 16- 23 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 24- 31 */
+ 0x01,0x00,0x00,0x00,0x80,0x00,0x00,0x00, /* - ' */
+ 0x80,0x80,0x80,0x80,0x00,0x00,0x80,0x00, /* ( - / */
+ 0x1c,0x1c,0x1c,0x1c,0x1c,0x1c,0x1c,0x1c, /* 0 - 7 */
+ 0x1c,0x1c,0x00,0x00,0x00,0x00,0x00,0x80, /* 8 - ? */
+ 0x00,0x1a,0x1a,0x1a,0x1a,0x1a,0x1a,0x12, /* @ - G */
+ 0x12,0x12,0x12,0x12,0x12,0x12,0x12,0x12, /* H - O */
+ 0x12,0x12,0x12,0x12,0x12,0x12,0x12,0x12, /* P - W */
+ 0x12,0x12,0x12,0x80,0x80,0x00,0x80,0x10, /* X - _ */
+ 0x00,0x1a,0x1a,0x1a,0x1a,0x1a,0x1a,0x12, /* ` - g */
+ 0x12,0x12,0x12,0x12,0x12,0x12,0x12,0x12, /* h - o */
+ 0x12,0x12,0x12,0x12,0x12,0x12,0x12,0x12, /* p - w */
+ 0x12,0x12,0x12,0x80,0x80,0x00,0x00,0x00, /* x -127 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 128-135 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 136-143 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 144-151 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 152-159 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 160-167 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 168-175 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 176-183 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 184-191 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 192-199 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 200-207 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 208-215 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 216-223 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 224-231 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 232-239 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 240-247 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00};/* 248-255 */
+
+/* End of pcre_chartables.c */
diff --git a/src/third_party/pcre-7.4/pcre_chartables.c.dist b/src/third_party/pcre-7.4/pcre_chartables.c.dist
new file mode 100644
index 00000000000..ae45db0ca35
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_chartables.c.dist
@@ -0,0 +1,198 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* This file contains character tables that are used when no external tables
+are passed to PCRE by the application that calls it. The tables are used only
+for characters whose code values are less than 256.
+
+This is a default version of the tables that assumes ASCII encoding. A program
+called dftables (which is distributed with PCRE) can be used to build
+alternative versions of this file. This is necessary if you are running in an
+EBCDIC environment, or if you want to default to a different encoding, for
+example ISO-8859-1. When dftables is run, it creates these tables in the
+current locale. If PCRE is configured with --enable-rebuild-chartables, this
+happens automatically.
+
+The following #includes are present because without the gcc 4.x may remove the
+array definition from the final binary if PCRE is built into a static library
+and dead code stripping is activated. This leads to link errors. Pulling in the
+header ensures that the array gets flagged as "someone outside this compilation
+unit might reference this" and so it will always be supplied to the linker. */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+const unsigned char _pcre_default_tables[] = {
+
+/* This table is a lower casing table. */
+
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 97, 98, 99,100,101,102,103,
+ 104,105,106,107,108,109,110,111,
+ 112,113,114,115,116,117,118,119,
+ 120,121,122, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99,100,101,102,103,
+ 104,105,106,107,108,109,110,111,
+ 112,113,114,115,116,117,118,119,
+ 120,121,122,123,124,125,126,127,
+ 128,129,130,131,132,133,134,135,
+ 136,137,138,139,140,141,142,143,
+ 144,145,146,147,148,149,150,151,
+ 152,153,154,155,156,157,158,159,
+ 160,161,162,163,164,165,166,167,
+ 168,169,170,171,172,173,174,175,
+ 176,177,178,179,180,181,182,183,
+ 184,185,186,187,188,189,190,191,
+ 192,193,194,195,196,197,198,199,
+ 200,201,202,203,204,205,206,207,
+ 208,209,210,211,212,213,214,215,
+ 216,217,218,219,220,221,222,223,
+ 224,225,226,227,228,229,230,231,
+ 232,233,234,235,236,237,238,239,
+ 240,241,242,243,244,245,246,247,
+ 248,249,250,251,252,253,254,255,
+
+/* This table is a case flipping table. */
+
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 97, 98, 99,100,101,102,103,
+ 104,105,106,107,108,109,110,111,
+ 112,113,114,115,116,117,118,119,
+ 120,121,122, 91, 92, 93, 94, 95,
+ 96, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90,123,124,125,126,127,
+ 128,129,130,131,132,133,134,135,
+ 136,137,138,139,140,141,142,143,
+ 144,145,146,147,148,149,150,151,
+ 152,153,154,155,156,157,158,159,
+ 160,161,162,163,164,165,166,167,
+ 168,169,170,171,172,173,174,175,
+ 176,177,178,179,180,181,182,183,
+ 184,185,186,187,188,189,190,191,
+ 192,193,194,195,196,197,198,199,
+ 200,201,202,203,204,205,206,207,
+ 208,209,210,211,212,213,214,215,
+ 216,217,218,219,220,221,222,223,
+ 224,225,226,227,228,229,230,231,
+ 232,233,234,235,236,237,238,239,
+ 240,241,242,243,244,245,246,247,
+ 248,249,250,251,252,253,254,255,
+
+/* This table contains bit maps for various character classes. Each map is 32
+bytes long and the bits run from the least significant end of each byte. The
+classes that have their own maps are: space, xdigit, digit, upper, lower, word,
+graph, print, punct, and cntrl. Other classes are built from combinations. */
+
+ 0x00,0x3e,0x00,0x00,0x01,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x03,
+ 0x7e,0x00,0x00,0x00,0x7e,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x03,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0xfe,0xff,0xff,0x07,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0xfe,0xff,0xff,0x07,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0x00,0x00,0xff,0x03,
+ 0xfe,0xff,0xff,0x87,0xfe,0xff,0xff,0x07,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0xfe,0xff,0xff,0xff,
+ 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0xff,0xff,0xff,0xff,
+ 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0x7f,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0x00,0x00,0x00,0x00,0xfe,0xff,0x00,0xfc,
+ 0x01,0x00,0x00,0xf8,0x01,0x00,0x00,0x78,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+ 0xff,0xff,0xff,0xff,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x80,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
+
+/* This table identifies various classes of character by individual bits:
+ 0x01 white space character
+ 0x02 letter
+ 0x04 decimal digit
+ 0x08 hexadecimal digit
+ 0x10 alphanumeric or '_'
+ 0x80 regular expression metacharacter or binary zero
+*/
+
+ 0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 0- 7 */
+ 0x00,0x01,0x01,0x00,0x01,0x01,0x00,0x00, /* 8- 15 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 16- 23 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 24- 31 */
+ 0x01,0x00,0x00,0x00,0x80,0x00,0x00,0x00, /* - ' */
+ 0x80,0x80,0x80,0x80,0x00,0x00,0x80,0x00, /* ( - / */
+ 0x1c,0x1c,0x1c,0x1c,0x1c,0x1c,0x1c,0x1c, /* 0 - 7 */
+ 0x1c,0x1c,0x00,0x00,0x00,0x00,0x00,0x80, /* 8 - ? */
+ 0x00,0x1a,0x1a,0x1a,0x1a,0x1a,0x1a,0x12, /* @ - G */
+ 0x12,0x12,0x12,0x12,0x12,0x12,0x12,0x12, /* H - O */
+ 0x12,0x12,0x12,0x12,0x12,0x12,0x12,0x12, /* P - W */
+ 0x12,0x12,0x12,0x80,0x80,0x00,0x80,0x10, /* X - _ */
+ 0x00,0x1a,0x1a,0x1a,0x1a,0x1a,0x1a,0x12, /* ` - g */
+ 0x12,0x12,0x12,0x12,0x12,0x12,0x12,0x12, /* h - o */
+ 0x12,0x12,0x12,0x12,0x12,0x12,0x12,0x12, /* p - w */
+ 0x12,0x12,0x12,0x80,0x80,0x00,0x00,0x00, /* x -127 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 128-135 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 136-143 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 144-151 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 152-159 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 160-167 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 168-175 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 176-183 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 184-191 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 192-199 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 200-207 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 208-215 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 216-223 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 224-231 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 232-239 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 240-247 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00};/* 248-255 */
+
+/* End of pcre_chartables.c */
diff --git a/src/third_party/pcre-7.4/pcre_compile.c b/src/third_party/pcre-7.4/pcre_compile.c
new file mode 100644
index 00000000000..3994781a587
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_compile.c
@@ -0,0 +1,6145 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains the external function pcre_compile(), along with
+supporting internal functions that are not used by other modules. */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define NLBLOCK cd /* Block containing newline information */
+#define PSSTART start_pattern /* Field containing processed string start */
+#define PSEND end_pattern /* Field containing processed string end */
+
+#include "pcre_internal.h"
+
+
+/* When DEBUG is defined, we need the pcre_printint() function, which is also
+used by pcretest. DEBUG is not defined when building a production library. */
+
+#ifdef DEBUG
+#include "pcre_printint.src"
+#endif
+
+
+/* Macro for setting individual bits in class bitmaps. */
+
+#define SETBIT(a,b) a[b/8] |= (1 << (b%8))
+
+/* Maximum length value to check against when making sure that the integer that
+holds the compiled pattern length does not overflow. We make it a bit less than
+INT_MAX to allow for adding in group terminating bytes, so that we don't have
+to check them every time. */
+
+#define OFLOW_MAX (INT_MAX - 20)
+
+
+/*************************************************
+* Code parameters and static tables *
+*************************************************/
+
+/* This value specifies the size of stack workspace that is used during the
+first pre-compile phase that determines how much memory is required. The regex
+is partly compiled into this space, but the compiled parts are discarded as
+soon as they can be, so that hopefully there will never be an overrun. The code
+does, however, check for an overrun. The largest amount I've seen used is 218,
+so this number is very generous.
+
+The same workspace is used during the second, actual compile phase for
+remembering forward references to groups so that they can be filled in at the
+end. Each entry in this list occupies LINK_SIZE bytes, so even when LINK_SIZE
+is 4 there is plenty of room. */
+
+#define COMPILE_WORK_SIZE (4096)
+
+
+/* Table for handling escaped characters in the range '0'-'z'. Positive returns
+are simple data values; negative values are for special things like \d and so
+on. Zero means further processing is needed (for things like \x), or the escape
+is invalid. */
+
+#ifndef EBCDIC /* This is the "normal" table for ASCII systems */
+static const short int escapes[] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, /* 0 - 7 */
+ 0, 0, ':', ';', '<', '=', '>', '?', /* 8 - ? */
+ '@', -ESC_A, -ESC_B, -ESC_C, -ESC_D, -ESC_E, 0, -ESC_G, /* @ - G */
+-ESC_H, 0, 0, -ESC_K, 0, 0, 0, 0, /* H - O */
+-ESC_P, -ESC_Q, -ESC_R, -ESC_S, 0, 0, -ESC_V, -ESC_W, /* P - W */
+-ESC_X, 0, -ESC_Z, '[', '\\', ']', '^', '_', /* X - _ */
+ '`', 7, -ESC_b, 0, -ESC_d, ESC_e, ESC_f, 0, /* ` - g */
+-ESC_h, 0, 0, -ESC_k, 0, 0, ESC_n, 0, /* h - o */
+-ESC_p, 0, ESC_r, -ESC_s, ESC_tee, 0, -ESC_v, -ESC_w, /* p - w */
+ 0, 0, -ESC_z /* x - z */
+};
+
+#else /* This is the "abnormal" table for EBCDIC systems */
+static const short int escapes[] = {
+/* 48 */ 0, 0, 0, '.', '<', '(', '+', '|',
+/* 50 */ '&', 0, 0, 0, 0, 0, 0, 0,
+/* 58 */ 0, 0, '!', '$', '*', ')', ';', '~',
+/* 60 */ '-', '/', 0, 0, 0, 0, 0, 0,
+/* 68 */ 0, 0, '|', ',', '%', '_', '>', '?',
+/* 70 */ 0, 0, 0, 0, 0, 0, 0, 0,
+/* 78 */ 0, '`', ':', '#', '@', '\'', '=', '"',
+/* 80 */ 0, 7, -ESC_b, 0, -ESC_d, ESC_e, ESC_f, 0,
+/* 88 */-ESC_h, 0, 0, '{', 0, 0, 0, 0,
+/* 90 */ 0, 0, -ESC_k, 'l', 0, ESC_n, 0, -ESC_p,
+/* 98 */ 0, ESC_r, 0, '}', 0, 0, 0, 0,
+/* A0 */ 0, '~', -ESC_s, ESC_tee, 0,-ESC_v, -ESC_w, 0,
+/* A8 */ 0,-ESC_z, 0, 0, 0, '[', 0, 0,
+/* B0 */ 0, 0, 0, 0, 0, 0, 0, 0,
+/* B8 */ 0, 0, 0, 0, 0, ']', '=', '-',
+/* C0 */ '{',-ESC_A, -ESC_B, -ESC_C, -ESC_D,-ESC_E, 0, -ESC_G,
+/* C8 */-ESC_H, 0, 0, 0, 0, 0, 0, 0,
+/* D0 */ '}', 0, -ESC_K, 0, 0, 0, 0, -ESC_P,
+/* D8 */-ESC_Q,-ESC_R, 0, 0, 0, 0, 0, 0,
+/* E0 */ '\\', 0, -ESC_S, 0, 0,-ESC_V, -ESC_W, -ESC_X,
+/* E8 */ 0,-ESC_Z, 0, 0, 0, 0, 0, 0,
+/* F0 */ 0, 0, 0, 0, 0, 0, 0, 0,
+/* F8 */ 0, 0, 0, 0, 0, 0, 0, 0
+};
+#endif
+
+
+/* Table of special "verbs" like (*PRUNE). This is a short table, so it is
+searched linearly. Put all the names into a single string, in order to reduce
+the number of relocations when a shared library is dynamically linked. */
+
+typedef struct verbitem {
+ int len;
+ int op;
+} verbitem;
+
+static const char verbnames[] =
+ "ACCEPT\0"
+ "COMMIT\0"
+ "F\0"
+ "FAIL\0"
+ "PRUNE\0"
+ "SKIP\0"
+ "THEN";
+
+static verbitem verbs[] = {
+ { 6, OP_ACCEPT },
+ { 6, OP_COMMIT },
+ { 1, OP_FAIL },
+ { 4, OP_FAIL },
+ { 5, OP_PRUNE },
+ { 4, OP_SKIP },
+ { 4, OP_THEN }
+};
+
+static int verbcount = sizeof(verbs)/sizeof(verbitem);
+
+
+/* Tables of names of POSIX character classes and their lengths. The names are
+now all in a single string, to reduce the number of relocations when a shared
+library is dynamically loaded. The list of lengths is terminated by a zero
+length entry. The first three must be alpha, lower, upper, as this is assumed
+for handling case independence. */
+
+static const char posix_names[] =
+ "alpha\0" "lower\0" "upper\0" "alnum\0" "ascii\0" "blank\0"
+ "cntrl\0" "digit\0" "graph\0" "print\0" "punct\0" "space\0"
+ "word\0" "xdigit";
+
+static const uschar posix_name_lengths[] = {
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 4, 6, 0 };
+
+/* Table of class bit maps for each POSIX class. Each class is formed from a
+base map, with an optional addition or removal of another map. Then, for some
+classes, there is some additional tweaking: for [:blank:] the vertical space
+characters are removed, and for [:alpha:] and [:alnum:] the underscore
+character is removed. The triples in the table consist of the base map offset,
+second map offset or -1 if no second map, and a non-negative value for map
+addition or a negative value for map subtraction (if there are two maps). The
+absolute value of the third field has these meanings: 0 => no tweaking, 1 =>
+remove vertical space characters, 2 => remove underscore. */
+
+static const int posix_class_maps[] = {
+ cbit_word, cbit_digit, -2, /* alpha */
+ cbit_lower, -1, 0, /* lower */
+ cbit_upper, -1, 0, /* upper */
+ cbit_word, -1, 2, /* alnum - word without underscore */
+ cbit_print, cbit_cntrl, 0, /* ascii */
+ cbit_space, -1, 1, /* blank - a GNU extension */
+ cbit_cntrl, -1, 0, /* cntrl */
+ cbit_digit, -1, 0, /* digit */
+ cbit_graph, -1, 0, /* graph */
+ cbit_print, -1, 0, /* print */
+ cbit_punct, -1, 0, /* punct */
+ cbit_space, -1, 0, /* space */
+ cbit_word, -1, 0, /* word - a Perl extension */
+ cbit_xdigit,-1, 0 /* xdigit */
+};
+
+
+#define STRING(a) # a
+#define XSTRING(s) STRING(s)
+
+/* The texts of compile-time error messages. These are "char *" because they
+are passed to the outside world. Do not ever re-use any error number, because
+they are documented. Always add a new error instead. Messages marked DEAD below
+are no longer used. This used to be a table of strings, but in order to reduce
+the number of relocations needed when a shared library is loaded dynamically,
+it is now one long string. We cannot use a table of offsets, because the
+lengths of inserts such as XSTRING(MAX_NAME_SIZE) are not known. Instead, we
+simply count through to the one we want - this isn't a performance issue
+because these strings are used only when there is a compilation error. */
+
+static const char error_texts[] =
+ "no error\0"
+ "\\ at end of pattern\0"
+ "\\c at end of pattern\0"
+ "unrecognized character follows \\\0"
+ "numbers out of order in {} quantifier\0"
+ /* 5 */
+ "number too big in {} quantifier\0"
+ "missing terminating ] for character class\0"
+ "invalid escape sequence in character class\0"
+ "range out of order in character class\0"
+ "nothing to repeat\0"
+ /* 10 */
+ "operand of unlimited repeat could match the empty string\0" /** DEAD **/
+ "internal error: unexpected repeat\0"
+ "unrecognized character after (?\0"
+ "POSIX named classes are supported only within a class\0"
+ "missing )\0"
+ /* 15 */
+ "reference to non-existent subpattern\0"
+ "erroffset passed as NULL\0"
+ "unknown option bit(s) set\0"
+ "missing ) after comment\0"
+ "parentheses nested too deeply\0" /** DEAD **/
+ /* 20 */
+ "regular expression is too large\0"
+ "failed to get memory\0"
+ "unmatched parentheses\0"
+ "internal error: code overflow\0"
+ "unrecognized character after (?<\0"
+ /* 25 */
+ "lookbehind assertion is not fixed length\0"
+ "malformed number or name after (?(\0"
+ "conditional group contains more than two branches\0"
+ "assertion expected after (?(\0"
+ "(?R or (?[+-]digits must be followed by )\0"
+ /* 30 */
+ "unknown POSIX class name\0"
+ "POSIX collating elements are not supported\0"
+ "this version of PCRE is not compiled with PCRE_UTF8 support\0"
+ "spare error\0" /** DEAD **/
+ "character value in \\x{...} sequence is too large\0"
+ /* 35 */
+ "invalid condition (?(0)\0"
+ "\\C not allowed in lookbehind assertion\0"
+ "PCRE does not support \\L, \\l, \\N, \\U, or \\u\0"
+ "number after (?C is > 255\0"
+ "closing ) for (?C expected\0"
+ /* 40 */
+ "recursive call could loop indefinitely\0"
+ "unrecognized character after (?P\0"
+ "syntax error in subpattern name (missing terminator)\0"
+ "two named subpatterns have the same name\0"
+ "invalid UTF-8 string\0"
+ /* 45 */
+ "support for \\P, \\p, and \\X has not been compiled\0"
+ "malformed \\P or \\p sequence\0"
+ "unknown property name after \\P or \\p\0"
+ "subpattern name is too long (maximum " XSTRING(MAX_NAME_SIZE) " characters)\0"
+ "too many named subpatterns (maximum " XSTRING(MAX_NAME_COUNT) ")\0"
+ /* 50 */
+ "repeated subpattern is too long\0" /** DEAD **/
+ "octal value is greater than \\377 (not in UTF-8 mode)\0"
+ "internal error: overran compiling workspace\0"
+ "internal error: previously-checked referenced subpattern not found\0"
+ "DEFINE group contains more than one branch\0"
+ /* 55 */
+ "repeating a DEFINE group is not allowed\0"
+ "inconsistent NEWLINE options\0"
+ "\\g is not followed by a braced name or an optionally braced non-zero number\0"
+ "(?+ or (?- or (?(+ or (?(- must be followed by a non-zero number\0"
+ "(*VERB) with an argument is not supported\0"
+ /* 60 */
+ "(*VERB) not recognized\0"
+ "number is too big";
+
+
+/* Table to identify digits and hex digits. This is used when compiling
+patterns. Note that the tables in chartables are dependent on the locale, and
+may mark arbitrary characters as digits - but the PCRE compiling code expects
+to handle only 0-9, a-z, and A-Z as digits when compiling. That is why we have
+a private table here. It costs 256 bytes, but it is a lot faster than doing
+character value tests (at least in some simple cases I timed), and in some
+applications one wants PCRE to compile efficiently as well as match
+efficiently.
+
+For convenience, we use the same bit definitions as in chartables:
+
+ 0x04 decimal digit
+ 0x08 hexadecimal digit
+
+Then we can use ctype_digit and ctype_xdigit in the code. */
+
+#ifndef EBCDIC /* This is the "normal" case, for ASCII systems */
+static const unsigned char digitab[] =
+ {
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 0- 7 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 8- 15 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 16- 23 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 24- 31 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* - ' */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* ( - / */
+ 0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c, /* 0 - 7 */
+ 0x0c,0x0c,0x00,0x00,0x00,0x00,0x00,0x00, /* 8 - ? */
+ 0x00,0x08,0x08,0x08,0x08,0x08,0x08,0x00, /* @ - G */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* H - O */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* P - W */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* X - _ */
+ 0x00,0x08,0x08,0x08,0x08,0x08,0x08,0x00, /* ` - g */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* h - o */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* p - w */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* x -127 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 128-135 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 136-143 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 144-151 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 152-159 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 160-167 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 168-175 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 176-183 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 184-191 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 192-199 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 200-207 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 208-215 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 216-223 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 224-231 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 232-239 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 240-247 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00};/* 248-255 */
+
+#else /* This is the "abnormal" case, for EBCDIC systems */
+static const unsigned char digitab[] =
+ {
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 0- 7 0 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 8- 15 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 16- 23 10 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 24- 31 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 32- 39 20 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 40- 47 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 48- 55 30 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 56- 63 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* - 71 40 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 72- | */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* & - 87 50 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 88- 95 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* - -103 60 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 104- ? */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 112-119 70 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 120- " */
+ 0x00,0x08,0x08,0x08,0x08,0x08,0x08,0x00, /* 128- g 80 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* h -143 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 144- p 90 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* q -159 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 160- x A0 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* y -175 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* ^ -183 B0 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 184-191 */
+ 0x00,0x08,0x08,0x08,0x08,0x08,0x08,0x00, /* { - G C0 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* H -207 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* } - P D0 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* Q -223 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* \ - X E0 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* Y -239 */
+ 0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c,0x0c, /* 0 - 7 F0 */
+ 0x0c,0x0c,0x00,0x00,0x00,0x00,0x00,0x00};/* 8 -255 */
+
+static const unsigned char ebcdic_chartab[] = { /* chartable partial dup */
+ 0x80,0x00,0x00,0x00,0x00,0x01,0x00,0x00, /* 0- 7 */
+ 0x00,0x00,0x00,0x00,0x01,0x01,0x00,0x00, /* 8- 15 */
+ 0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00, /* 16- 23 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 24- 31 */
+ 0x00,0x00,0x00,0x00,0x00,0x01,0x00,0x00, /* 32- 39 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 40- 47 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 48- 55 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 56- 63 */
+ 0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* - 71 */
+ 0x00,0x00,0x00,0x80,0x00,0x80,0x80,0x80, /* 72- | */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* & - 87 */
+ 0x00,0x00,0x00,0x80,0x80,0x80,0x00,0x00, /* 88- 95 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* - -103 */
+ 0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x80, /* 104- ? */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 112-119 */
+ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* 120- " */
+ 0x00,0x1a,0x1a,0x1a,0x1a,0x1a,0x1a,0x12, /* 128- g */
+ 0x12,0x12,0x00,0x00,0x00,0x00,0x00,0x00, /* h -143 */
+ 0x00,0x12,0x12,0x12,0x12,0x12,0x12,0x12, /* 144- p */
+ 0x12,0x12,0x00,0x00,0x00,0x00,0x00,0x00, /* q -159 */
+ 0x00,0x00,0x12,0x12,0x12,0x12,0x12,0x12, /* 160- x */
+ 0x12,0x12,0x00,0x00,0x00,0x00,0x00,0x00, /* y -175 */
+ 0x80,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* ^ -183 */
+ 0x00,0x00,0x80,0x00,0x00,0x00,0x00,0x00, /* 184-191 */
+ 0x80,0x1a,0x1a,0x1a,0x1a,0x1a,0x1a,0x12, /* { - G */
+ 0x12,0x12,0x00,0x00,0x00,0x00,0x00,0x00, /* H -207 */
+ 0x00,0x12,0x12,0x12,0x12,0x12,0x12,0x12, /* } - P */
+ 0x12,0x12,0x00,0x00,0x00,0x00,0x00,0x00, /* Q -223 */
+ 0x00,0x00,0x12,0x12,0x12,0x12,0x12,0x12, /* \ - X */
+ 0x12,0x12,0x00,0x00,0x00,0x00,0x00,0x00, /* Y -239 */
+ 0x1c,0x1c,0x1c,0x1c,0x1c,0x1c,0x1c,0x1c, /* 0 - 7 */
+ 0x1c,0x1c,0x00,0x00,0x00,0x00,0x00,0x00};/* 8 -255 */
+#endif
+
+
+/* Definition to allow mutual recursion */
+
+static BOOL
+ compile_regex(int, int, uschar **, const uschar **, int *, BOOL, BOOL, int,
+ int *, int *, branch_chain *, compile_data *, int *);
+
+
+
+/*************************************************
+* Find an error text *
+*************************************************/
+
+/* The error texts are now all in one long string, to save on relocations. As
+some of the text is of unknown length, we can't use a table of offsets.
+Instead, just count through the strings. This is not a performance issue
+because it happens only when there has been a compilation error.
+
+Argument: the error number
+Returns: pointer to the error string
+*/
+
+static const char *
+find_error_text(int n)
+{
+const char *s = error_texts;
+for (; n > 0; n--) while (*s++ != 0);
+return s;
+}
+
+
+/*************************************************
+* Handle escapes *
+*************************************************/
+
+/* This function is called when a \ has been encountered. It either returns a
+positive value for a simple escape such as \n, or a negative value which
+encodes one of the more complicated things such as \d. A backreference to group
+n is returned as -(ESC_REF + n); ESC_REF is the highest ESC_xxx macro. When
+UTF-8 is enabled, a positive value greater than 255 may be returned. On entry,
+ptr is pointing at the \. On exit, it is on the final character of the escape
+sequence.
+
+Arguments:
+ ptrptr points to the pattern position pointer
+ errorcodeptr points to the errorcode variable
+ bracount number of previous extracting brackets
+ options the options bits
+ isclass TRUE if inside a character class
+
+Returns: zero or positive => a data character
+ negative => a special escape sequence
+ on error, errorcodeptr is set
+*/
+
+static int
+check_escape(const uschar **ptrptr, int *errorcodeptr, int bracount,
+ int options, BOOL isclass)
+{
+BOOL utf8 = (options & PCRE_UTF8) != 0;
+const uschar *ptr = *ptrptr + 1;
+int c, i;
+
+GETCHARINCTEST(c, ptr); /* Get character value, increment pointer */
+ptr--; /* Set pointer back to the last byte */
+
+/* If backslash is at the end of the pattern, it's an error. */
+
+if (c == 0) *errorcodeptr = ERR1;
+
+/* Non-alphamerics are literals. For digits or letters, do an initial lookup in
+a table. A non-zero result is something that can be returned immediately.
+Otherwise further processing may be required. */
+
+#ifndef EBCDIC /* ASCII coding */
+else if (c < '0' || c > 'z') {} /* Not alphameric */
+else if ((i = escapes[c - '0']) != 0) c = i;
+
+#else /* EBCDIC coding */
+else if (c < 'a' || (ebcdic_chartab[c] & 0x0E) == 0) {} /* Not alphameric */
+else if ((i = escapes[c - 0x48]) != 0) c = i;
+#endif
+
+/* Escapes that need further processing, or are illegal. */
+
+else
+ {
+ const uschar *oldptr;
+ BOOL braced, negated;
+
+ switch (c)
+ {
+ /* A number of Perl escapes are not handled by PCRE. We give an explicit
+ error. */
+
+ case 'l':
+ case 'L':
+ case 'N':
+ case 'u':
+ case 'U':
+ *errorcodeptr = ERR37;
+ break;
+
+ /* \g must be followed by a number, either plain or braced. If positive, it
+ is an absolute backreference. If negative, it is a relative backreference.
+ This is a Perl 5.10 feature. Perl 5.10 also supports \g{name} as a
+ reference to a named group. This is part of Perl's movement towards a
+ unified syntax for back references. As this is synonymous with \k{name}, we
+ fudge it up by pretending it really was \k. */
+
+ case 'g':
+ if (ptr[1] == '{')
+ {
+ const uschar *p;
+ for (p = ptr+2; *p != 0 && *p != '}'; p++)
+ if (*p != '-' && (digitab[*p] & ctype_digit) == 0) break;
+ if (*p != 0 && *p != '}')
+ {
+ c = -ESC_k;
+ break;
+ }
+ braced = TRUE;
+ ptr++;
+ }
+ else braced = FALSE;
+
+ if (ptr[1] == '-')
+ {
+ negated = TRUE;
+ ptr++;
+ }
+ else negated = FALSE;
+
+ c = 0;
+ while ((digitab[ptr[1]] & ctype_digit) != 0)
+ c = c * 10 + *(++ptr) - '0';
+
+ if (c < 0)
+ {
+ *errorcodeptr = ERR61;
+ break;
+ }
+
+ if (c == 0 || (braced && *(++ptr) != '}'))
+ {
+ *errorcodeptr = ERR57;
+ break;
+ }
+
+ if (negated)
+ {
+ if (c > bracount)
+ {
+ *errorcodeptr = ERR15;
+ break;
+ }
+ c = bracount - (c - 1);
+ }
+
+ c = -(ESC_REF + c);
+ break;
+
+ /* The handling of escape sequences consisting of a string of digits
+ starting with one that is not zero is not straightforward. By experiment,
+ the way Perl works seems to be as follows:
+
+ Outside a character class, the digits are read as a decimal number. If the
+ number is less than 10, or if there are that many previous extracting
+ left brackets, then it is a back reference. Otherwise, up to three octal
+ digits are read to form an escaped byte. Thus \123 is likely to be octal
+ 123 (cf \0123, which is octal 012 followed by the literal 3). If the octal
+ value is greater than 377, the least significant 8 bits are taken. Inside a
+ character class, \ followed by a digit is always an octal number. */
+
+ case '1': case '2': case '3': case '4': case '5':
+ case '6': case '7': case '8': case '9':
+
+ if (!isclass)
+ {
+ oldptr = ptr;
+ c -= '0';
+ while ((digitab[ptr[1]] & ctype_digit) != 0)
+ c = c * 10 + *(++ptr) - '0';
+ if (c < 0)
+ {
+ *errorcodeptr = ERR61;
+ break;
+ }
+ if (c < 10 || c <= bracount)
+ {
+ c = -(ESC_REF + c);
+ break;
+ }
+ ptr = oldptr; /* Put the pointer back and fall through */
+ }
+
+ /* Handle an octal number following \. If the first digit is 8 or 9, Perl
+ generates a binary zero byte and treats the digit as a following literal.
+ Thus we have to pull back the pointer by one. */
+
+ if ((c = *ptr) >= '8')
+ {
+ ptr--;
+ c = 0;
+ break;
+ }
+
+ /* \0 always starts an octal number, but we may drop through to here with a
+ larger first octal digit. The original code used just to take the least
+ significant 8 bits of octal numbers (I think this is what early Perls used
+ to do). Nowadays we allow for larger numbers in UTF-8 mode, but no more
+ than 3 octal digits. */
+
+ case '0':
+ c -= '0';
+ while(i++ < 2 && ptr[1] >= '0' && ptr[1] <= '7')
+ c = c * 8 + *(++ptr) - '0';
+ if (!utf8 && c > 255) *errorcodeptr = ERR51;
+ break;
+
+ /* \x is complicated. \x{ddd} is a character number which can be greater
+ than 0xff in utf8 mode, but only if the ddd are hex digits. If not, { is
+ treated as a data character. */
+
+ case 'x':
+ if (ptr[1] == '{')
+ {
+ const uschar *pt = ptr + 2;
+ int count = 0;
+
+ c = 0;
+ while ((digitab[*pt] & ctype_xdigit) != 0)
+ {
+ register int cc = *pt++;
+ if (c == 0 && cc == '0') continue; /* Leading zeroes */
+ count++;
+
+#ifndef EBCDIC /* ASCII coding */
+ if (cc >= 'a') cc -= 32; /* Convert to upper case */
+ c = (c << 4) + cc - ((cc < 'A')? '0' : ('A' - 10));
+#else /* EBCDIC coding */
+ if (cc >= 'a' && cc <= 'z') cc += 64; /* Convert to upper case */
+ c = (c << 4) + cc - ((cc >= '0')? '0' : ('A' - 10));
+#endif
+ }
+
+ if (*pt == '}')
+ {
+ if (c < 0 || count > (utf8? 8 : 2)) *errorcodeptr = ERR34;
+ ptr = pt;
+ break;
+ }
+
+ /* If the sequence of hex digits does not end with '}', then we don't
+ recognize this construct; fall through to the normal \x handling. */
+ }
+
+ /* Read just a single-byte hex-defined char */
+
+ c = 0;
+ while (i++ < 2 && (digitab[ptr[1]] & ctype_xdigit) != 0)
+ {
+ int cc; /* Some compilers don't like ++ */
+ cc = *(++ptr); /* in initializers */
+#ifndef EBCDIC /* ASCII coding */
+ if (cc >= 'a') cc -= 32; /* Convert to upper case */
+ c = c * 16 + cc - ((cc < 'A')? '0' : ('A' - 10));
+#else /* EBCDIC coding */
+ if (cc <= 'z') cc += 64; /* Convert to upper case */
+ c = c * 16 + cc - ((cc >= '0')? '0' : ('A' - 10));
+#endif
+ }
+ break;
+
+ /* For \c, a following letter is upper-cased; then the 0x40 bit is flipped.
+ This coding is ASCII-specific, but then the whole concept of \cx is
+ ASCII-specific. (However, an EBCDIC equivalent has now been added.) */
+
+ case 'c':
+ c = *(++ptr);
+ if (c == 0)
+ {
+ *errorcodeptr = ERR2;
+ break;
+ }
+
+#ifndef EBCDIC /* ASCII coding */
+ if (c >= 'a' && c <= 'z') c -= 32;
+ c ^= 0x40;
+#else /* EBCDIC coding */
+ if (c >= 'a' && c <= 'z') c += 64;
+ c ^= 0xC0;
+#endif
+ break;
+
+ /* PCRE_EXTRA enables extensions to Perl in the matter of escapes. Any
+ other alphameric following \ is an error if PCRE_EXTRA was set; otherwise,
+ for Perl compatibility, it is a literal. This code looks a bit odd, but
+ there used to be some cases other than the default, and there may be again
+ in future, so I haven't "optimized" it. */
+
+ default:
+ if ((options & PCRE_EXTRA) != 0) switch(c)
+ {
+ default:
+ *errorcodeptr = ERR3;
+ break;
+ }
+ break;
+ }
+ }
+
+*ptrptr = ptr;
+return c;
+}
+
+
+
+#ifdef SUPPORT_UCP
+/*************************************************
+* Handle \P and \p *
+*************************************************/
+
+/* This function is called after \P or \p has been encountered, provided that
+PCRE is compiled with support for Unicode properties. On entry, ptrptr is
+pointing at the P or p. On exit, it is pointing at the final character of the
+escape sequence.
+
+Argument:
+ ptrptr points to the pattern position pointer
+ negptr points to a boolean that is set TRUE for negation else FALSE
+ dptr points to an int that is set to the detailed property value
+ errorcodeptr points to the error code variable
+
+Returns: type value from ucp_type_table, or -1 for an invalid type
+*/
+
+static int
+get_ucp(const uschar **ptrptr, BOOL *negptr, int *dptr, int *errorcodeptr)
+{
+int c, i, bot, top;
+const uschar *ptr = *ptrptr;
+char name[32];
+
+c = *(++ptr);
+if (c == 0) goto ERROR_RETURN;
+
+*negptr = FALSE;
+
+/* \P or \p can be followed by a name in {}, optionally preceded by ^ for
+negation. */
+
+if (c == '{')
+ {
+ if (ptr[1] == '^')
+ {
+ *negptr = TRUE;
+ ptr++;
+ }
+ for (i = 0; i < (int)sizeof(name) - 1; i++)
+ {
+ c = *(++ptr);
+ if (c == 0) goto ERROR_RETURN;
+ if (c == '}') break;
+ name[i] = c;
+ }
+ if (c !='}') goto ERROR_RETURN;
+ name[i] = 0;
+ }
+
+/* Otherwise there is just one following character */
+
+else
+ {
+ name[0] = c;
+ name[1] = 0;
+ }
+
+*ptrptr = ptr;
+
+/* Search for a recognized property name using binary chop */
+
+bot = 0;
+top = _pcre_utt_size;
+
+while (bot < top)
+ {
+ i = (bot + top) >> 1;
+ c = strcmp(name, _pcre_utt_names + _pcre_utt[i].name_offset);
+ if (c == 0)
+ {
+ *dptr = _pcre_utt[i].value;
+ return _pcre_utt[i].type;
+ }
+ if (c > 0) bot = i + 1; else top = i;
+ }
+
+*errorcodeptr = ERR47;
+*ptrptr = ptr;
+return -1;
+
+ERROR_RETURN:
+*errorcodeptr = ERR46;
+*ptrptr = ptr;
+return -1;
+}
+#endif
+
+
+
+
+/*************************************************
+* Check for counted repeat *
+*************************************************/
+
+/* This function is called when a '{' is encountered in a place where it might
+start a quantifier. It looks ahead to see if it really is a quantifier or not.
+It is only a quantifier if it is one of the forms {ddd} {ddd,} or {ddd,ddd}
+where the ddds are digits.
+
+Arguments:
+ p pointer to the first char after '{'
+
+Returns: TRUE or FALSE
+*/
+
+static BOOL
+is_counted_repeat(const uschar *p)
+{
+if ((digitab[*p++] & ctype_digit) == 0) return FALSE;
+while ((digitab[*p] & ctype_digit) != 0) p++;
+if (*p == '}') return TRUE;
+
+if (*p++ != ',') return FALSE;
+if (*p == '}') return TRUE;
+
+if ((digitab[*p++] & ctype_digit) == 0) return FALSE;
+while ((digitab[*p] & ctype_digit) != 0) p++;
+
+return (*p == '}');
+}
+
+
+
+/*************************************************
+* Read repeat counts *
+*************************************************/
+
+/* Read an item of the form {n,m} and return the values. This is called only
+after is_counted_repeat() has confirmed that a repeat-count quantifier exists,
+so the syntax is guaranteed to be correct, but we need to check the values.
+
+Arguments:
+ p pointer to first char after '{'
+ minp pointer to int for min
+ maxp pointer to int for max
+ returned as -1 if no max
+ errorcodeptr points to error code variable
+
+Returns: pointer to '}' on success;
+ current ptr on error, with errorcodeptr set non-zero
+*/
+
+static const uschar *
+read_repeat_counts(const uschar *p, int *minp, int *maxp, int *errorcodeptr)
+{
+int min = 0;
+int max = -1;
+
+/* Read the minimum value and do a paranoid check: a negative value indicates
+an integer overflow. */
+
+while ((digitab[*p] & ctype_digit) != 0) min = min * 10 + *p++ - '0';
+if (min < 0 || min > 65535)
+ {
+ *errorcodeptr = ERR5;
+ return p;
+ }
+
+/* Read the maximum value if there is one, and again do a paranoid on its size.
+Also, max must not be less than min. */
+
+if (*p == '}') max = min; else
+ {
+ if (*(++p) != '}')
+ {
+ max = 0;
+ while((digitab[*p] & ctype_digit) != 0) max = max * 10 + *p++ - '0';
+ if (max < 0 || max > 65535)
+ {
+ *errorcodeptr = ERR5;
+ return p;
+ }
+ if (max < min)
+ {
+ *errorcodeptr = ERR4;
+ return p;
+ }
+ }
+ }
+
+/* Fill in the required variables, and pass back the pointer to the terminating
+'}'. */
+
+*minp = min;
+*maxp = max;
+return p;
+}
+
+
+
+/*************************************************
+* Find forward referenced subpattern *
+*************************************************/
+
+/* This function scans along a pattern's text looking for capturing
+subpatterns, and counting them. If it finds a named pattern that matches the
+name it is given, it returns its number. Alternatively, if the name is NULL, it
+returns when it reaches a given numbered subpattern. This is used for forward
+references to subpatterns. We know that if (?P< is encountered, the name will
+be terminated by '>' because that is checked in the first pass.
+
+Arguments:
+ ptr current position in the pattern
+ count current count of capturing parens so far encountered
+ name name to seek, or NULL if seeking a numbered subpattern
+ lorn name length, or subpattern number if name is NULL
+ xmode TRUE if we are in /x mode
+
+Returns: the number of the named subpattern, or -1 if not found
+*/
+
+static int
+find_parens(const uschar *ptr, int count, const uschar *name, int lorn,
+ BOOL xmode)
+{
+const uschar *thisname;
+
+for (; *ptr != 0; ptr++)
+ {
+ int term;
+
+ /* Skip over backslashed characters and also entire \Q...\E */
+
+ if (*ptr == '\\')
+ {
+ if (*(++ptr) == 0) return -1;
+ if (*ptr == 'Q') for (;;)
+ {
+ while (*(++ptr) != 0 && *ptr != '\\');
+ if (*ptr == 0) return -1;
+ if (*(++ptr) == 'E') break;
+ }
+ continue;
+ }
+
+ /* Skip over character classes */
+
+ if (*ptr == '[')
+ {
+ while (*(++ptr) != ']')
+ {
+ if (*ptr == 0) return -1;
+ if (*ptr == '\\')
+ {
+ if (*(++ptr) == 0) return -1;
+ if (*ptr == 'Q') for (;;)
+ {
+ while (*(++ptr) != 0 && *ptr != '\\');
+ if (*ptr == 0) return -1;
+ if (*(++ptr) == 'E') break;
+ }
+ continue;
+ }
+ }
+ continue;
+ }
+
+ /* Skip comments in /x mode */
+
+ if (xmode && *ptr == '#')
+ {
+ while (*(++ptr) != 0 && *ptr != '\n');
+ if (*ptr == 0) return -1;
+ continue;
+ }
+
+ /* An opening parens must now be a real metacharacter */
+
+ if (*ptr != '(') continue;
+ if (ptr[1] != '?' && ptr[1] != '*')
+ {
+ count++;
+ if (name == NULL && count == lorn) return count;
+ continue;
+ }
+
+ ptr += 2;
+ if (*ptr == 'P') ptr++; /* Allow optional P */
+
+ /* We have to disambiguate (?<! and (?<= from (?<name> */
+
+ if ((*ptr != '<' || ptr[1] == '!' || ptr[1] == '=') &&
+ *ptr != '\'')
+ continue;
+
+ count++;
+
+ if (name == NULL && count == lorn) return count;
+ term = *ptr++;
+ if (term == '<') term = '>';
+ thisname = ptr;
+ while (*ptr != term) ptr++;
+ if (name != NULL && lorn == ptr - thisname &&
+ strncmp((const char *)name, (const char *)thisname, lorn) == 0)
+ return count;
+ }
+
+return -1;
+}
+
+
+
+/*************************************************
+* Find first significant op code *
+*************************************************/
+
+/* This is called by several functions that scan a compiled expression looking
+for a fixed first character, or an anchoring op code etc. It skips over things
+that do not influence this. For some calls, a change of option is important.
+For some calls, it makes sense to skip negative forward and all backward
+assertions, and also the \b assertion; for others it does not.
+
+Arguments:
+ code pointer to the start of the group
+ options pointer to external options
+ optbit the option bit whose changing is significant, or
+ zero if none are
+ skipassert TRUE if certain assertions are to be skipped
+
+Returns: pointer to the first significant opcode
+*/
+
+static const uschar*
+first_significant_code(const uschar *code, int *options, int optbit,
+ BOOL skipassert)
+{
+for (;;)
+ {
+ switch ((int)*code)
+ {
+ case OP_OPT:
+ if (optbit > 0 && ((int)code[1] & optbit) != (*options & optbit))
+ *options = (int)code[1];
+ code += 2;
+ break;
+
+ case OP_ASSERT_NOT:
+ case OP_ASSERTBACK:
+ case OP_ASSERTBACK_NOT:
+ if (!skipassert) return code;
+ do code += GET(code, 1); while (*code == OP_ALT);
+ code += _pcre_OP_lengths[*code];
+ break;
+
+ case OP_WORD_BOUNDARY:
+ case OP_NOT_WORD_BOUNDARY:
+ if (!skipassert) return code;
+ /* Fall through */
+
+ case OP_CALLOUT:
+ case OP_CREF:
+ case OP_RREF:
+ case OP_DEF:
+ code += _pcre_OP_lengths[*code];
+ break;
+
+ default:
+ return code;
+ }
+ }
+/* Control never reaches here */
+}
+
+
+
+
+/*************************************************
+* Find the fixed length of a pattern *
+*************************************************/
+
+/* Scan a pattern and compute the fixed length of subject that will match it,
+if the length is fixed. This is needed for dealing with backward assertions.
+In UTF8 mode, the result is in characters rather than bytes.
+
+Arguments:
+ code points to the start of the pattern (the bracket)
+ options the compiling options
+
+Returns: the fixed length, or -1 if there is no fixed length,
+ or -2 if \C was encountered
+*/
+
+static int
+find_fixedlength(uschar *code, int options)
+{
+int length = -1;
+
+register int branchlength = 0;
+register uschar *cc = code + 1 + LINK_SIZE;
+
+/* Scan along the opcodes for this branch. If we get to the end of the
+branch, check the length against that of the other branches. */
+
+for (;;)
+ {
+ int d;
+ register int op = *cc;
+ switch (op)
+ {
+ case OP_CBRA:
+ case OP_BRA:
+ case OP_ONCE:
+ case OP_COND:
+ d = find_fixedlength(cc + ((op == OP_CBRA)? 2:0), options);
+ if (d < 0) return d;
+ branchlength += d;
+ do cc += GET(cc, 1); while (*cc == OP_ALT);
+ cc += 1 + LINK_SIZE;
+ break;
+
+ /* Reached end of a branch; if it's a ket it is the end of a nested
+ call. If it's ALT it is an alternation in a nested call. If it is
+ END it's the end of the outer call. All can be handled by the same code. */
+
+ case OP_ALT:
+ case OP_KET:
+ case OP_KETRMAX:
+ case OP_KETRMIN:
+ case OP_END:
+ if (length < 0) length = branchlength;
+ else if (length != branchlength) return -1;
+ if (*cc != OP_ALT) return length;
+ cc += 1 + LINK_SIZE;
+ branchlength = 0;
+ break;
+
+ /* Skip over assertive subpatterns */
+
+ case OP_ASSERT:
+ case OP_ASSERT_NOT:
+ case OP_ASSERTBACK:
+ case OP_ASSERTBACK_NOT:
+ do cc += GET(cc, 1); while (*cc == OP_ALT);
+ /* Fall through */
+
+ /* Skip over things that don't match chars */
+
+ case OP_REVERSE:
+ case OP_CREF:
+ case OP_RREF:
+ case OP_DEF:
+ case OP_OPT:
+ case OP_CALLOUT:
+ case OP_SOD:
+ case OP_SOM:
+ case OP_EOD:
+ case OP_EODN:
+ case OP_CIRC:
+ case OP_DOLL:
+ case OP_NOT_WORD_BOUNDARY:
+ case OP_WORD_BOUNDARY:
+ cc += _pcre_OP_lengths[*cc];
+ break;
+
+ /* Handle literal characters */
+
+ case OP_CHAR:
+ case OP_CHARNC:
+ case OP_NOT:
+ branchlength++;
+ cc += 2;
+#ifdef SUPPORT_UTF8
+ if ((options & PCRE_UTF8) != 0)
+ {
+ while ((*cc & 0xc0) == 0x80) cc++;
+ }
+#endif
+ break;
+
+ /* Handle exact repetitions. The count is already in characters, but we
+ need to skip over a multibyte character in UTF8 mode. */
+
+ case OP_EXACT:
+ branchlength += GET2(cc,1);
+ cc += 4;
+#ifdef SUPPORT_UTF8
+ if ((options & PCRE_UTF8) != 0)
+ {
+ while((*cc & 0x80) == 0x80) cc++;
+ }
+#endif
+ break;
+
+ case OP_TYPEEXACT:
+ branchlength += GET2(cc,1);
+ if (cc[3] == OP_PROP || cc[3] == OP_NOTPROP) cc += 2;
+ cc += 4;
+ break;
+
+ /* Handle single-char matchers */
+
+ case OP_PROP:
+ case OP_NOTPROP:
+ cc += 2;
+ /* Fall through */
+
+ case OP_NOT_DIGIT:
+ case OP_DIGIT:
+ case OP_NOT_WHITESPACE:
+ case OP_WHITESPACE:
+ case OP_NOT_WORDCHAR:
+ case OP_WORDCHAR:
+ case OP_ANY:
+ branchlength++;
+ cc++;
+ break;
+
+ /* The single-byte matcher isn't allowed */
+
+ case OP_ANYBYTE:
+ return -2;
+
+ /* Check a class for variable quantification */
+
+#ifdef SUPPORT_UTF8
+ case OP_XCLASS:
+ cc += GET(cc, 1) - 33;
+ /* Fall through */
+#endif
+
+ case OP_CLASS:
+ case OP_NCLASS:
+ cc += 33;
+
+ switch (*cc)
+ {
+ case OP_CRSTAR:
+ case OP_CRMINSTAR:
+ case OP_CRQUERY:
+ case OP_CRMINQUERY:
+ return -1;
+
+ case OP_CRRANGE:
+ case OP_CRMINRANGE:
+ if (GET2(cc,1) != GET2(cc,3)) return -1;
+ branchlength += GET2(cc,1);
+ cc += 5;
+ break;
+
+ default:
+ branchlength++;
+ }
+ break;
+
+ /* Anything else is variable length */
+
+ default:
+ return -1;
+ }
+ }
+/* Control never gets here */
+}
+
+
+
+
+/*************************************************
+* Scan compiled regex for numbered bracket *
+*************************************************/
+
+/* This little function scans through a compiled pattern until it finds a
+capturing bracket with the given number.
+
+Arguments:
+ code points to start of expression
+ utf8 TRUE in UTF-8 mode
+ number the required bracket number
+
+Returns: pointer to the opcode for the bracket, or NULL if not found
+*/
+
+static const uschar *
+find_bracket(const uschar *code, BOOL utf8, int number)
+{
+for (;;)
+ {
+ register int c = *code;
+ if (c == OP_END) return NULL;
+
+ /* XCLASS is used for classes that cannot be represented just by a bit
+ map. This includes negated single high-valued characters. The length in
+ the table is zero; the actual length is stored in the compiled code. */
+
+ if (c == OP_XCLASS) code += GET(code, 1);
+
+ /* Handle capturing bracket */
+
+ else if (c == OP_CBRA)
+ {
+ int n = GET2(code, 1+LINK_SIZE);
+ if (n == number) return (uschar *)code;
+ code += _pcre_OP_lengths[c];
+ }
+
+ /* Otherwise, we can get the item's length from the table, except that for
+ repeated character types, we have to test for \p and \P, which have an extra
+ two bytes of parameters. */
+
+ else
+ {
+ switch(c)
+ {
+ case OP_TYPESTAR:
+ case OP_TYPEMINSTAR:
+ case OP_TYPEPLUS:
+ case OP_TYPEMINPLUS:
+ case OP_TYPEQUERY:
+ case OP_TYPEMINQUERY:
+ case OP_TYPEPOSSTAR:
+ case OP_TYPEPOSPLUS:
+ case OP_TYPEPOSQUERY:
+ if (code[1] == OP_PROP || code[1] == OP_NOTPROP) code += 2;
+ break;
+
+ case OP_TYPEUPTO:
+ case OP_TYPEMINUPTO:
+ case OP_TYPEEXACT:
+ case OP_TYPEPOSUPTO:
+ if (code[3] == OP_PROP || code[3] == OP_NOTPROP) code += 2;
+ break;
+ }
+
+ /* Add in the fixed length from the table */
+
+ code += _pcre_OP_lengths[c];
+
+ /* In UTF-8 mode, opcodes that are followed by a character may be followed by
+ a multi-byte character. The length in the table is a minimum, so we have to
+ arrange to skip the extra bytes. */
+
+#ifdef SUPPORT_UTF8
+ if (utf8) switch(c)
+ {
+ case OP_CHAR:
+ case OP_CHARNC:
+ case OP_EXACT:
+ case OP_UPTO:
+ case OP_MINUPTO:
+ case OP_POSUPTO:
+ case OP_STAR:
+ case OP_MINSTAR:
+ case OP_POSSTAR:
+ case OP_PLUS:
+ case OP_MINPLUS:
+ case OP_POSPLUS:
+ case OP_QUERY:
+ case OP_MINQUERY:
+ case OP_POSQUERY:
+ if (code[-1] >= 0xc0) code += _pcre_utf8_table4[code[-1] & 0x3f];
+ break;
+ }
+#endif
+ }
+ }
+}
+
+
+
+/*************************************************
+* Scan compiled regex for recursion reference *
+*************************************************/
+
+/* This little function scans through a compiled pattern until it finds an
+instance of OP_RECURSE.
+
+Arguments:
+ code points to start of expression
+ utf8 TRUE in UTF-8 mode
+
+Returns: pointer to the opcode for OP_RECURSE, or NULL if not found
+*/
+
+static const uschar *
+find_recurse(const uschar *code, BOOL utf8)
+{
+for (;;)
+ {
+ register int c = *code;
+ if (c == OP_END) return NULL;
+ if (c == OP_RECURSE) return code;
+
+ /* XCLASS is used for classes that cannot be represented just by a bit
+ map. This includes negated single high-valued characters. The length in
+ the table is zero; the actual length is stored in the compiled code. */
+
+ if (c == OP_XCLASS) code += GET(code, 1);
+
+ /* Otherwise, we can get the item's length from the table, except that for
+ repeated character types, we have to test for \p and \P, which have an extra
+ two bytes of parameters. */
+
+ else
+ {
+ switch(c)
+ {
+ case OP_TYPESTAR:
+ case OP_TYPEMINSTAR:
+ case OP_TYPEPLUS:
+ case OP_TYPEMINPLUS:
+ case OP_TYPEQUERY:
+ case OP_TYPEMINQUERY:
+ case OP_TYPEPOSSTAR:
+ case OP_TYPEPOSPLUS:
+ case OP_TYPEPOSQUERY:
+ if (code[1] == OP_PROP || code[1] == OP_NOTPROP) code += 2;
+ break;
+
+ case OP_TYPEPOSUPTO:
+ case OP_TYPEUPTO:
+ case OP_TYPEMINUPTO:
+ case OP_TYPEEXACT:
+ if (code[3] == OP_PROP || code[3] == OP_NOTPROP) code += 2;
+ break;
+ }
+
+ /* Add in the fixed length from the table */
+
+ code += _pcre_OP_lengths[c];
+
+ /* In UTF-8 mode, opcodes that are followed by a character may be followed
+ by a multi-byte character. The length in the table is a minimum, so we have
+ to arrange to skip the extra bytes. */
+
+#ifdef SUPPORT_UTF8
+ if (utf8) switch(c)
+ {
+ case OP_CHAR:
+ case OP_CHARNC:
+ case OP_EXACT:
+ case OP_UPTO:
+ case OP_MINUPTO:
+ case OP_POSUPTO:
+ case OP_STAR:
+ case OP_MINSTAR:
+ case OP_POSSTAR:
+ case OP_PLUS:
+ case OP_MINPLUS:
+ case OP_POSPLUS:
+ case OP_QUERY:
+ case OP_MINQUERY:
+ case OP_POSQUERY:
+ if (code[-1] >= 0xc0) code += _pcre_utf8_table4[code[-1] & 0x3f];
+ break;
+ }
+#endif
+ }
+ }
+}
+
+
+
+/*************************************************
+* Scan compiled branch for non-emptiness *
+*************************************************/
+
+/* This function scans through a branch of a compiled pattern to see whether it
+can match the empty string or not. It is called from could_be_empty()
+below and from compile_branch() when checking for an unlimited repeat of a
+group that can match nothing. Note that first_significant_code() skips over
+assertions. If we hit an unclosed bracket, we return "empty" - this means we've
+struck an inner bracket whose current branch will already have been scanned.
+
+Arguments:
+ code points to start of search
+ endcode points to where to stop
+ utf8 TRUE if in UTF8 mode
+
+Returns: TRUE if what is matched could be empty
+*/
+
+static BOOL
+could_be_empty_branch(const uschar *code, const uschar *endcode, BOOL utf8)
+{
+register int c;
+for (code = first_significant_code(code + _pcre_OP_lengths[*code], NULL, 0, TRUE);
+ code < endcode;
+ code = first_significant_code(code + _pcre_OP_lengths[c], NULL, 0, TRUE))
+ {
+ const uschar *ccode;
+
+ c = *code;
+
+ /* Groups with zero repeats can of course be empty; skip them. */
+
+ if (c == OP_BRAZERO || c == OP_BRAMINZERO)
+ {
+ code += _pcre_OP_lengths[c];
+ do code += GET(code, 1); while (*code == OP_ALT);
+ c = *code;
+ continue;
+ }
+
+ /* For other groups, scan the branches. */
+
+ if (c == OP_BRA || c == OP_CBRA || c == OP_ONCE || c == OP_COND)
+ {
+ BOOL empty_branch;
+ if (GET(code, 1) == 0) return TRUE; /* Hit unclosed bracket */
+
+ /* Scan a closed bracket */
+
+ empty_branch = FALSE;
+ do
+ {
+ if (!empty_branch && could_be_empty_branch(code, endcode, utf8))
+ empty_branch = TRUE;
+ code += GET(code, 1);
+ }
+ while (*code == OP_ALT);
+ if (!empty_branch) return FALSE; /* All branches are non-empty */
+ c = *code;
+ continue;
+ }
+
+ /* Handle the other opcodes */
+
+ switch (c)
+ {
+ /* Check for quantifiers after a class. XCLASS is used for classes that
+ cannot be represented just by a bit map. This includes negated single
+ high-valued characters. The length in _pcre_OP_lengths[] is zero; the
+ actual length is stored in the compiled code, so we must update "code"
+ here. */
+
+#ifdef SUPPORT_UTF8
+ case OP_XCLASS:
+ ccode = code += GET(code, 1);
+ goto CHECK_CLASS_REPEAT;
+#endif
+
+ case OP_CLASS:
+ case OP_NCLASS:
+ ccode = code + 33;
+
+#ifdef SUPPORT_UTF8
+ CHECK_CLASS_REPEAT:
+#endif
+
+ switch (*ccode)
+ {
+ case OP_CRSTAR: /* These could be empty; continue */
+ case OP_CRMINSTAR:
+ case OP_CRQUERY:
+ case OP_CRMINQUERY:
+ break;
+
+ default: /* Non-repeat => class must match */
+ case OP_CRPLUS: /* These repeats aren't empty */
+ case OP_CRMINPLUS:
+ return FALSE;
+
+ case OP_CRRANGE:
+ case OP_CRMINRANGE:
+ if (GET2(ccode, 1) > 0) return FALSE; /* Minimum > 0 */
+ break;
+ }
+ break;
+
+ /* Opcodes that must match a character */
+
+ case OP_PROP:
+ case OP_NOTPROP:
+ case OP_EXTUNI:
+ case OP_NOT_DIGIT:
+ case OP_DIGIT:
+ case OP_NOT_WHITESPACE:
+ case OP_WHITESPACE:
+ case OP_NOT_WORDCHAR:
+ case OP_WORDCHAR:
+ case OP_ANY:
+ case OP_ANYBYTE:
+ case OP_CHAR:
+ case OP_CHARNC:
+ case OP_NOT:
+ case OP_PLUS:
+ case OP_MINPLUS:
+ case OP_POSPLUS:
+ case OP_EXACT:
+ case OP_NOTPLUS:
+ case OP_NOTMINPLUS:
+ case OP_NOTPOSPLUS:
+ case OP_NOTEXACT:
+ case OP_TYPEPLUS:
+ case OP_TYPEMINPLUS:
+ case OP_TYPEPOSPLUS:
+ case OP_TYPEEXACT:
+ return FALSE;
+
+ /* These are going to continue, as they may be empty, but we have to
+ fudge the length for the \p and \P cases. */
+
+ case OP_TYPESTAR:
+ case OP_TYPEMINSTAR:
+ case OP_TYPEPOSSTAR:
+ case OP_TYPEQUERY:
+ case OP_TYPEMINQUERY:
+ case OP_TYPEPOSQUERY:
+ if (code[1] == OP_PROP || code[1] == OP_NOTPROP) code += 2;
+ break;
+
+ /* Same for these */
+
+ case OP_TYPEUPTO:
+ case OP_TYPEMINUPTO:
+ case OP_TYPEPOSUPTO:
+ if (code[3] == OP_PROP || code[3] == OP_NOTPROP) code += 2;
+ break;
+
+ /* End of branch */
+
+ case OP_KET:
+ case OP_KETRMAX:
+ case OP_KETRMIN:
+ case OP_ALT:
+ return TRUE;
+
+ /* In UTF-8 mode, STAR, MINSTAR, POSSTAR, QUERY, MINQUERY, POSQUERY, UPTO,
+ MINUPTO, and POSUPTO may be followed by a multibyte character */
+
+#ifdef SUPPORT_UTF8
+ case OP_STAR:
+ case OP_MINSTAR:
+ case OP_POSSTAR:
+ case OP_QUERY:
+ case OP_MINQUERY:
+ case OP_POSQUERY:
+ case OP_UPTO:
+ case OP_MINUPTO:
+ case OP_POSUPTO:
+ if (utf8) while ((code[2] & 0xc0) == 0x80) code++;
+ break;
+#endif
+ }
+ }
+
+return TRUE;
+}
+
+
+
+/*************************************************
+* Scan compiled regex for non-emptiness *
+*************************************************/
+
+/* This function is called to check for left recursive calls. We want to check
+the current branch of the current pattern to see if it could match the empty
+string. If it could, we must look outwards for branches at other levels,
+stopping when we pass beyond the bracket which is the subject of the recursion.
+
+Arguments:
+ code points to start of the recursion
+ endcode points to where to stop (current RECURSE item)
+ bcptr points to the chain of current (unclosed) branch starts
+ utf8 TRUE if in UTF-8 mode
+
+Returns: TRUE if what is matched could be empty
+*/
+
+static BOOL
+could_be_empty(const uschar *code, const uschar *endcode, branch_chain *bcptr,
+ BOOL utf8)
+{
+while (bcptr != NULL && bcptr->current >= code)
+ {
+ if (!could_be_empty_branch(bcptr->current, endcode, utf8)) return FALSE;
+ bcptr = bcptr->outer;
+ }
+return TRUE;
+}
+
+
+
+/*************************************************
+* Check for POSIX class syntax *
+*************************************************/
+
+/* This function is called when the sequence "[:" or "[." or "[=" is
+encountered in a character class. It checks whether this is followed by an
+optional ^ and then a sequence of letters, terminated by a matching ":]" or
+".]" or "=]".
+
+Argument:
+ ptr pointer to the initial [
+ endptr where to return the end pointer
+ cd pointer to compile data
+
+Returns: TRUE or FALSE
+*/
+
+static BOOL
+check_posix_syntax(const uschar *ptr, const uschar **endptr, compile_data *cd)
+{
+int terminator; /* Don't combine these lines; the Solaris cc */
+terminator = *(++ptr); /* compiler warns about "non-constant" initializer. */
+if (*(++ptr) == '^') ptr++;
+while ((cd->ctypes[*ptr] & ctype_letter) != 0) ptr++;
+if (*ptr == terminator && ptr[1] == ']')
+ {
+ *endptr = ptr;
+ return TRUE;
+ }
+return FALSE;
+}
+
+
+
+
+/*************************************************
+* Check POSIX class name *
+*************************************************/
+
+/* This function is called to check the name given in a POSIX-style class entry
+such as [:alnum:].
+
+Arguments:
+ ptr points to the first letter
+ len the length of the name
+
+Returns: a value representing the name, or -1 if unknown
+*/
+
+static int
+check_posix_name(const uschar *ptr, int len)
+{
+const char *pn = posix_names;
+register int yield = 0;
+while (posix_name_lengths[yield] != 0)
+ {
+ if (len == posix_name_lengths[yield] &&
+ strncmp((const char *)ptr, pn, len) == 0) return yield;
+ pn += posix_name_lengths[yield] + 1;
+ yield++;
+ }
+return -1;
+}
+
+
+/*************************************************
+* Adjust OP_RECURSE items in repeated group *
+*************************************************/
+
+/* OP_RECURSE items contain an offset from the start of the regex to the group
+that is referenced. This means that groups can be replicated for fixed
+repetition simply by copying (because the recursion is allowed to refer to
+earlier groups that are outside the current group). However, when a group is
+optional (i.e. the minimum quantifier is zero), OP_BRAZERO is inserted before
+it, after it has been compiled. This means that any OP_RECURSE items within it
+that refer to the group itself or any contained groups have to have their
+offsets adjusted. That one of the jobs of this function. Before it is called,
+the partially compiled regex must be temporarily terminated with OP_END.
+
+This function has been extended with the possibility of forward references for
+recursions and subroutine calls. It must also check the list of such references
+for the group we are dealing with. If it finds that one of the recursions in
+the current group is on this list, it adjusts the offset in the list, not the
+value in the reference (which is a group number).
+
+Arguments:
+ group points to the start of the group
+ adjust the amount by which the group is to be moved
+ utf8 TRUE in UTF-8 mode
+ cd contains pointers to tables etc.
+ save_hwm the hwm forward reference pointer at the start of the group
+
+Returns: nothing
+*/
+
+static void
+adjust_recurse(uschar *group, int adjust, BOOL utf8, compile_data *cd,
+ uschar *save_hwm)
+{
+uschar *ptr = group;
+
+while ((ptr = (uschar *)find_recurse(ptr, utf8)) != NULL)
+ {
+ int offset;
+ uschar *hc;
+
+ /* See if this recursion is on the forward reference list. If so, adjust the
+ reference. */
+
+ for (hc = save_hwm; hc < cd->hwm; hc += LINK_SIZE)
+ {
+ offset = GET(hc, 0);
+ if (cd->start_code + offset == ptr + 1)
+ {
+ PUT(hc, 0, offset + adjust);
+ break;
+ }
+ }
+
+ /* Otherwise, adjust the recursion offset if it's after the start of this
+ group. */
+
+ if (hc >= cd->hwm)
+ {
+ offset = GET(ptr, 1);
+ if (cd->start_code + offset >= group) PUT(ptr, 1, offset + adjust);
+ }
+
+ ptr += 1 + LINK_SIZE;
+ }
+}
+
+
+
+/*************************************************
+* Insert an automatic callout point *
+*************************************************/
+
+/* This function is called when the PCRE_AUTO_CALLOUT option is set, to insert
+callout points before each pattern item.
+
+Arguments:
+ code current code pointer
+ ptr current pattern pointer
+ cd pointers to tables etc
+
+Returns: new code pointer
+*/
+
+static uschar *
+auto_callout(uschar *code, const uschar *ptr, compile_data *cd)
+{
+*code++ = OP_CALLOUT;
+*code++ = 255;
+PUT(code, 0, ptr - cd->start_pattern); /* Pattern offset */
+PUT(code, LINK_SIZE, 0); /* Default length */
+return code + 2*LINK_SIZE;
+}
+
+
+
+/*************************************************
+* Complete a callout item *
+*************************************************/
+
+/* A callout item contains the length of the next item in the pattern, which
+we can't fill in till after we have reached the relevant point. This is used
+for both automatic and manual callouts.
+
+Arguments:
+ previous_callout points to previous callout item
+ ptr current pattern pointer
+ cd pointers to tables etc
+
+Returns: nothing
+*/
+
+static void
+complete_callout(uschar *previous_callout, const uschar *ptr, compile_data *cd)
+{
+int length = ptr - cd->start_pattern - GET(previous_callout, 2);
+PUT(previous_callout, 2 + LINK_SIZE, length);
+}
+
+
+
+#ifdef SUPPORT_UCP
+/*************************************************
+* Get othercase range *
+*************************************************/
+
+/* This function is passed the start and end of a class range, in UTF-8 mode
+with UCP support. It searches up the characters, looking for internal ranges of
+characters in the "other" case. Each call returns the next one, updating the
+start address.
+
+Arguments:
+ cptr points to starting character value; updated
+ d end value
+ ocptr where to put start of othercase range
+ odptr where to put end of othercase range
+
+Yield: TRUE when range returned; FALSE when no more
+*/
+
+static BOOL
+get_othercase_range(unsigned int *cptr, unsigned int d, unsigned int *ocptr,
+ unsigned int *odptr)
+{
+unsigned int c, othercase, next;
+
+for (c = *cptr; c <= d; c++)
+ { if ((othercase = _pcre_ucp_othercase(c)) != NOTACHAR) break; }
+
+if (c > d) return FALSE;
+
+*ocptr = othercase;
+next = othercase + 1;
+
+for (++c; c <= d; c++)
+ {
+ if (_pcre_ucp_othercase(c) != next) break;
+ next++;
+ }
+
+*odptr = next - 1;
+*cptr = c;
+
+return TRUE;
+}
+#endif /* SUPPORT_UCP */
+
+
+
+/*************************************************
+* Check if auto-possessifying is possible *
+*************************************************/
+
+/* This function is called for unlimited repeats of certain items, to see
+whether the next thing could possibly match the repeated item. If not, it makes
+sense to automatically possessify the repeated item.
+
+Arguments:
+ op_code the repeated op code
+ this data for this item, depends on the opcode
+ utf8 TRUE in UTF-8 mode
+ utf8_char used for utf8 character bytes, NULL if not relevant
+ ptr next character in pattern
+ options options bits
+ cd contains pointers to tables etc.
+
+Returns: TRUE if possessifying is wanted
+*/
+
+static BOOL
+check_auto_possessive(int op_code, int item, BOOL utf8, uschar *utf8_char,
+ const uschar *ptr, int options, compile_data *cd)
+{
+int next;
+
+/* Skip whitespace and comments in extended mode */
+
+if ((options & PCRE_EXTENDED) != 0)
+ {
+ for (;;)
+ {
+ while ((cd->ctypes[*ptr] & ctype_space) != 0) ptr++;
+ if (*ptr == '#')
+ {
+ while (*(++ptr) != 0)
+ if (IS_NEWLINE(ptr)) { ptr += cd->nllen; break; }
+ }
+ else break;
+ }
+ }
+
+/* If the next item is one that we can handle, get its value. A non-negative
+value is a character, a negative value is an escape value. */
+
+if (*ptr == '\\')
+ {
+ int temperrorcode = 0;
+ next = check_escape(&ptr, &temperrorcode, cd->bracount, options, FALSE);
+ if (temperrorcode != 0) return FALSE;
+ ptr++; /* Point after the escape sequence */
+ }
+
+else if ((cd->ctypes[*ptr] & ctype_meta) == 0)
+ {
+#ifdef SUPPORT_UTF8
+ if (utf8) { GETCHARINC(next, ptr); } else
+#endif
+ next = *ptr++;
+ }
+
+else return FALSE;
+
+/* Skip whitespace and comments in extended mode */
+
+if ((options & PCRE_EXTENDED) != 0)
+ {
+ for (;;)
+ {
+ while ((cd->ctypes[*ptr] & ctype_space) != 0) ptr++;
+ if (*ptr == '#')
+ {
+ while (*(++ptr) != 0)
+ if (IS_NEWLINE(ptr)) { ptr += cd->nllen; break; }
+ }
+ else break;
+ }
+ }
+
+/* If the next thing is itself optional, we have to give up. */
+
+if (*ptr == '*' || *ptr == '?' || strncmp((char *)ptr, "{0,", 3) == 0)
+ return FALSE;
+
+/* Now compare the next item with the previous opcode. If the previous is a
+positive single character match, "item" either contains the character or, if
+"item" is greater than 127 in utf8 mode, the character's bytes are in
+utf8_char. */
+
+
+/* Handle cases when the next item is a character. */
+
+if (next >= 0) switch(op_code)
+ {
+ case OP_CHAR:
+#ifdef SUPPORT_UTF8
+ if (utf8 && item > 127) { GETCHAR(item, utf8_char); }
+#endif
+ return item != next;
+
+ /* For CHARNC (caseless character) we must check the other case. If we have
+ Unicode property support, we can use it to test the other case of
+ high-valued characters. */
+
+ case OP_CHARNC:
+#ifdef SUPPORT_UTF8
+ if (utf8 && item > 127) { GETCHAR(item, utf8_char); }
+#endif
+ if (item == next) return FALSE;
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ {
+ unsigned int othercase;
+ if (next < 128) othercase = cd->fcc[next]; else
+#ifdef SUPPORT_UCP
+ othercase = _pcre_ucp_othercase((unsigned int)next);
+#else
+ othercase = NOTACHAR;
+#endif
+ return (unsigned int)item != othercase;
+ }
+ else
+#endif /* SUPPORT_UTF8 */
+ return (item != cd->fcc[next]); /* Non-UTF-8 mode */
+
+ /* For OP_NOT, "item" must be a single-byte character. */
+
+ case OP_NOT:
+ if (next < 0) return FALSE; /* Not a character */
+ if (item == next) return TRUE;
+ if ((options & PCRE_CASELESS) == 0) return FALSE;
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ {
+ unsigned int othercase;
+ if (next < 128) othercase = cd->fcc[next]; else
+#ifdef SUPPORT_UCP
+ othercase = _pcre_ucp_othercase(next);
+#else
+ othercase = NOTACHAR;
+#endif
+ return (unsigned int)item == othercase;
+ }
+ else
+#endif /* SUPPORT_UTF8 */
+ return (item == cd->fcc[next]); /* Non-UTF-8 mode */
+
+ case OP_DIGIT:
+ return next > 127 || (cd->ctypes[next] & ctype_digit) == 0;
+
+ case OP_NOT_DIGIT:
+ return next <= 127 && (cd->ctypes[next] & ctype_digit) != 0;
+
+ case OP_WHITESPACE:
+ return next > 127 || (cd->ctypes[next] & ctype_space) == 0;
+
+ case OP_NOT_WHITESPACE:
+ return next <= 127 && (cd->ctypes[next] & ctype_space) != 0;
+
+ case OP_WORDCHAR:
+ return next > 127 || (cd->ctypes[next] & ctype_word) == 0;
+
+ case OP_NOT_WORDCHAR:
+ return next <= 127 && (cd->ctypes[next] & ctype_word) != 0;
+
+ case OP_HSPACE:
+ case OP_NOT_HSPACE:
+ switch(next)
+ {
+ case 0x09:
+ case 0x20:
+ case 0xa0:
+ case 0x1680:
+ case 0x180e:
+ case 0x2000:
+ case 0x2001:
+ case 0x2002:
+ case 0x2003:
+ case 0x2004:
+ case 0x2005:
+ case 0x2006:
+ case 0x2007:
+ case 0x2008:
+ case 0x2009:
+ case 0x200A:
+ case 0x202f:
+ case 0x205f:
+ case 0x3000:
+ return op_code != OP_HSPACE;
+ default:
+ return op_code == OP_HSPACE;
+ }
+
+ case OP_VSPACE:
+ case OP_NOT_VSPACE:
+ switch(next)
+ {
+ case 0x0a:
+ case 0x0b:
+ case 0x0c:
+ case 0x0d:
+ case 0x85:
+ case 0x2028:
+ case 0x2029:
+ return op_code != OP_VSPACE;
+ default:
+ return op_code == OP_VSPACE;
+ }
+
+ default:
+ return FALSE;
+ }
+
+
+/* Handle the case when the next item is \d, \s, etc. */
+
+switch(op_code)
+ {
+ case OP_CHAR:
+ case OP_CHARNC:
+#ifdef SUPPORT_UTF8
+ if (utf8 && item > 127) { GETCHAR(item, utf8_char); }
+#endif
+ switch(-next)
+ {
+ case ESC_d:
+ return item > 127 || (cd->ctypes[item] & ctype_digit) == 0;
+
+ case ESC_D:
+ return item <= 127 && (cd->ctypes[item] & ctype_digit) != 0;
+
+ case ESC_s:
+ return item > 127 || (cd->ctypes[item] & ctype_space) == 0;
+
+ case ESC_S:
+ return item <= 127 && (cd->ctypes[item] & ctype_space) != 0;
+
+ case ESC_w:
+ return item > 127 || (cd->ctypes[item] & ctype_word) == 0;
+
+ case ESC_W:
+ return item <= 127 && (cd->ctypes[item] & ctype_word) != 0;
+
+ case ESC_h:
+ case ESC_H:
+ switch(item)
+ {
+ case 0x09:
+ case 0x20:
+ case 0xa0:
+ case 0x1680:
+ case 0x180e:
+ case 0x2000:
+ case 0x2001:
+ case 0x2002:
+ case 0x2003:
+ case 0x2004:
+ case 0x2005:
+ case 0x2006:
+ case 0x2007:
+ case 0x2008:
+ case 0x2009:
+ case 0x200A:
+ case 0x202f:
+ case 0x205f:
+ case 0x3000:
+ return -next != ESC_h;
+ default:
+ return -next == ESC_h;
+ }
+
+ case ESC_v:
+ case ESC_V:
+ switch(item)
+ {
+ case 0x0a:
+ case 0x0b:
+ case 0x0c:
+ case 0x0d:
+ case 0x85:
+ case 0x2028:
+ case 0x2029:
+ return -next != ESC_v;
+ default:
+ return -next == ESC_v;
+ }
+
+ default:
+ return FALSE;
+ }
+
+ case OP_DIGIT:
+ return next == -ESC_D || next == -ESC_s || next == -ESC_W ||
+ next == -ESC_h || next == -ESC_v;
+
+ case OP_NOT_DIGIT:
+ return next == -ESC_d;
+
+ case OP_WHITESPACE:
+ return next == -ESC_S || next == -ESC_d || next == -ESC_w;
+
+ case OP_NOT_WHITESPACE:
+ return next == -ESC_s || next == -ESC_h || next == -ESC_v;
+
+ case OP_HSPACE:
+ return next == -ESC_S || next == -ESC_H || next == -ESC_d || next == -ESC_w;
+
+ case OP_NOT_HSPACE:
+ return next == -ESC_h;
+
+ /* Can't have \S in here because VT matches \S (Perl anomaly) */
+ case OP_VSPACE:
+ return next == -ESC_V || next == -ESC_d || next == -ESC_w;
+
+ case OP_NOT_VSPACE:
+ return next == -ESC_v;
+
+ case OP_WORDCHAR:
+ return next == -ESC_W || next == -ESC_s || next == -ESC_h || next == -ESC_v;
+
+ case OP_NOT_WORDCHAR:
+ return next == -ESC_w || next == -ESC_d;
+
+ default:
+ return FALSE;
+ }
+
+/* Control does not reach here */
+}
+
+
+
+/*************************************************
+* Compile one branch *
+*************************************************/
+
+/* Scan the pattern, compiling it into the a vector. If the options are
+changed during the branch, the pointer is used to change the external options
+bits. This function is used during the pre-compile phase when we are trying
+to find out the amount of memory needed, as well as during the real compile
+phase. The value of lengthptr distinguishes the two phases.
+
+Arguments:
+ optionsptr pointer to the option bits
+ codeptr points to the pointer to the current code point
+ ptrptr points to the current pattern pointer
+ errorcodeptr points to error code variable
+ firstbyteptr set to initial literal character, or < 0 (REQ_UNSET, REQ_NONE)
+ reqbyteptr set to the last literal character required, else < 0
+ bcptr points to current branch chain
+ cd contains pointers to tables etc.
+ lengthptr NULL during the real compile phase
+ points to length accumulator during pre-compile phase
+
+Returns: TRUE on success
+ FALSE, with *errorcodeptr set non-zero on error
+*/
+
+static BOOL
+compile_branch(int *optionsptr, uschar **codeptr, const uschar **ptrptr,
+ int *errorcodeptr, int *firstbyteptr, int *reqbyteptr, branch_chain *bcptr,
+ compile_data *cd, int *lengthptr)
+{
+int repeat_type, op_type;
+int repeat_min = 0, repeat_max = 0; /* To please picky compilers */
+int bravalue = 0;
+int greedy_default, greedy_non_default;
+int firstbyte, reqbyte;
+int zeroreqbyte, zerofirstbyte;
+int req_caseopt, reqvary, tempreqvary;
+int options = *optionsptr;
+int after_manual_callout = 0;
+int length_prevgroup = 0;
+register int c;
+register uschar *code = *codeptr;
+uschar *last_code = code;
+uschar *orig_code = code;
+uschar *tempcode;
+BOOL inescq = FALSE;
+BOOL groupsetfirstbyte = FALSE;
+const uschar *ptr = *ptrptr;
+const uschar *tempptr;
+uschar *previous = NULL;
+uschar *previous_callout = NULL;
+uschar *save_hwm = NULL;
+uschar classbits[32];
+
+#ifdef SUPPORT_UTF8
+BOOL class_utf8;
+BOOL utf8 = (options & PCRE_UTF8) != 0;
+uschar *class_utf8data;
+uschar utf8_char[6];
+#else
+BOOL utf8 = FALSE;
+uschar *utf8_char = NULL;
+#endif
+
+#ifdef DEBUG
+if (lengthptr != NULL) DPRINTF((">> start branch\n"));
+#endif
+
+/* Set up the default and non-default settings for greediness */
+
+greedy_default = ((options & PCRE_UNGREEDY) != 0);
+greedy_non_default = greedy_default ^ 1;
+
+/* Initialize no first byte, no required byte. REQ_UNSET means "no char
+matching encountered yet". It gets changed to REQ_NONE if we hit something that
+matches a non-fixed char first char; reqbyte just remains unset if we never
+find one.
+
+When we hit a repeat whose minimum is zero, we may have to adjust these values
+to take the zero repeat into account. This is implemented by setting them to
+zerofirstbyte and zeroreqbyte when such a repeat is encountered. The individual
+item types that can be repeated set these backoff variables appropriately. */
+
+firstbyte = reqbyte = zerofirstbyte = zeroreqbyte = REQ_UNSET;
+
+/* The variable req_caseopt contains either the REQ_CASELESS value or zero,
+according to the current setting of the caseless flag. REQ_CASELESS is a bit
+value > 255. It is added into the firstbyte or reqbyte variables to record the
+case status of the value. This is used only for ASCII characters. */
+
+req_caseopt = ((options & PCRE_CASELESS) != 0)? REQ_CASELESS : 0;
+
+/* Switch on next character until the end of the branch */
+
+for (;; ptr++)
+ {
+ BOOL negate_class;
+ BOOL possessive_quantifier;
+ BOOL is_quantifier;
+ BOOL is_recurse;
+ BOOL reset_bracount;
+ int class_charcount;
+ int class_lastchar;
+ int newoptions;
+ int recno;
+ int refsign;
+ int skipbytes;
+ int subreqbyte;
+ int subfirstbyte;
+ int terminator;
+ int mclength;
+ uschar mcbuffer[8];
+
+ /* Get next byte in the pattern */
+
+ c = *ptr;
+
+ /* If we are in the pre-compile phase, accumulate the length used for the
+ previous cycle of this loop. */
+
+ if (lengthptr != NULL)
+ {
+#ifdef DEBUG
+ if (code > cd->hwm) cd->hwm = code; /* High water info */
+#endif
+ if (code > cd->start_workspace + COMPILE_WORK_SIZE) /* Check for overrun */
+ {
+ *errorcodeptr = ERR52;
+ goto FAILED;
+ }
+
+ /* There is at least one situation where code goes backwards: this is the
+ case of a zero quantifier after a class (e.g. [ab]{0}). At compile time,
+ the class is simply eliminated. However, it is created first, so we have to
+ allow memory for it. Therefore, don't ever reduce the length at this point.
+ */
+
+ if (code < last_code) code = last_code;
+
+ /* Paranoid check for integer overflow */
+
+ if (OFLOW_MAX - *lengthptr < code - last_code)
+ {
+ *errorcodeptr = ERR20;
+ goto FAILED;
+ }
+
+ *lengthptr += code - last_code;
+ DPRINTF(("length=%d added %d c=%c\n", *lengthptr, code - last_code, c));
+
+ /* If "previous" is set and it is not at the start of the work space, move
+ it back to there, in order to avoid filling up the work space. Otherwise,
+ if "previous" is NULL, reset the current code pointer to the start. */
+
+ if (previous != NULL)
+ {
+ if (previous > orig_code)
+ {
+ memmove(orig_code, previous, code - previous);
+ code -= previous - orig_code;
+ previous = orig_code;
+ }
+ }
+ else code = orig_code;
+
+ /* Remember where this code item starts so we can pick up the length
+ next time round. */
+
+ last_code = code;
+ }
+
+ /* In the real compile phase, just check the workspace used by the forward
+ reference list. */
+
+ else if (cd->hwm > cd->start_workspace + COMPILE_WORK_SIZE)
+ {
+ *errorcodeptr = ERR52;
+ goto FAILED;
+ }
+
+ /* If in \Q...\E, check for the end; if not, we have a literal */
+
+ if (inescq && c != 0)
+ {
+ if (c == '\\' && ptr[1] == 'E')
+ {
+ inescq = FALSE;
+ ptr++;
+ continue;
+ }
+ else
+ {
+ if (previous_callout != NULL)
+ {
+ if (lengthptr == NULL) /* Don't attempt in pre-compile phase */
+ complete_callout(previous_callout, ptr, cd);
+ previous_callout = NULL;
+ }
+ if ((options & PCRE_AUTO_CALLOUT) != 0)
+ {
+ previous_callout = code;
+ code = auto_callout(code, ptr, cd);
+ }
+ goto NORMAL_CHAR;
+ }
+ }
+
+ /* Fill in length of a previous callout, except when the next thing is
+ a quantifier. */
+
+ is_quantifier = c == '*' || c == '+' || c == '?' ||
+ (c == '{' && is_counted_repeat(ptr+1));
+
+ if (!is_quantifier && previous_callout != NULL &&
+ after_manual_callout-- <= 0)
+ {
+ if (lengthptr == NULL) /* Don't attempt in pre-compile phase */
+ complete_callout(previous_callout, ptr, cd);
+ previous_callout = NULL;
+ }
+
+ /* In extended mode, skip white space and comments */
+
+ if ((options & PCRE_EXTENDED) != 0)
+ {
+ if ((cd->ctypes[c] & ctype_space) != 0) continue;
+ if (c == '#')
+ {
+ while (*(++ptr) != 0)
+ {
+ if (IS_NEWLINE(ptr)) { ptr += cd->nllen - 1; break; }
+ }
+ if (*ptr != 0) continue;
+
+ /* Else fall through to handle end of string */
+ c = 0;
+ }
+ }
+
+ /* No auto callout for quantifiers. */
+
+ if ((options & PCRE_AUTO_CALLOUT) != 0 && !is_quantifier)
+ {
+ previous_callout = code;
+ code = auto_callout(code, ptr, cd);
+ }
+
+ switch(c)
+ {
+ /* ===================================================================*/
+ case 0: /* The branch terminates at string end */
+ case '|': /* or | or ) */
+ case ')':
+ *firstbyteptr = firstbyte;
+ *reqbyteptr = reqbyte;
+ *codeptr = code;
+ *ptrptr = ptr;
+ if (lengthptr != NULL)
+ {
+ if (OFLOW_MAX - *lengthptr < code - last_code)
+ {
+ *errorcodeptr = ERR20;
+ goto FAILED;
+ }
+ *lengthptr += code - last_code; /* To include callout length */
+ DPRINTF((">> end branch\n"));
+ }
+ return TRUE;
+
+
+ /* ===================================================================*/
+ /* Handle single-character metacharacters. In multiline mode, ^ disables
+ the setting of any following char as a first character. */
+
+ case '^':
+ if ((options & PCRE_MULTILINE) != 0)
+ {
+ if (firstbyte == REQ_UNSET) firstbyte = REQ_NONE;
+ }
+ previous = NULL;
+ *code++ = OP_CIRC;
+ break;
+
+ case '$':
+ previous = NULL;
+ *code++ = OP_DOLL;
+ break;
+
+ /* There can never be a first char if '.' is first, whatever happens about
+ repeats. The value of reqbyte doesn't change either. */
+
+ case '.':
+ if (firstbyte == REQ_UNSET) firstbyte = REQ_NONE;
+ zerofirstbyte = firstbyte;
+ zeroreqbyte = reqbyte;
+ previous = code;
+ *code++ = OP_ANY;
+ break;
+
+
+ /* ===================================================================*/
+ /* Character classes. If the included characters are all < 256, we build a
+ 32-byte bitmap of the permitted characters, except in the special case
+ where there is only one such character. For negated classes, we build the
+ map as usual, then invert it at the end. However, we use a different opcode
+ so that data characters > 255 can be handled correctly.
+
+ If the class contains characters outside the 0-255 range, a different
+ opcode is compiled. It may optionally have a bit map for characters < 256,
+ but those above are are explicitly listed afterwards. A flag byte tells
+ whether the bitmap is present, and whether this is a negated class or not.
+ */
+
+ case '[':
+ previous = code;
+
+ /* PCRE supports POSIX class stuff inside a class. Perl gives an error if
+ they are encountered at the top level, so we'll do that too. */
+
+ if ((ptr[1] == ':' || ptr[1] == '.' || ptr[1] == '=') &&
+ check_posix_syntax(ptr, &tempptr, cd))
+ {
+ *errorcodeptr = (ptr[1] == ':')? ERR13 : ERR31;
+ goto FAILED;
+ }
+
+ /* If the first character is '^', set the negation flag and skip it. Also,
+ if the first few characters (either before or after ^) are \Q\E or \E we
+ skip them too. This makes for compatibility with Perl. */
+
+ negate_class = FALSE;
+ for (;;)
+ {
+ c = *(++ptr);
+ if (c == '\\')
+ {
+ if (ptr[1] == 'E') ptr++;
+ else if (strncmp((const char *)ptr+1, "Q\\E", 3) == 0) ptr += 3;
+ else break;
+ }
+ else if (!negate_class && c == '^')
+ negate_class = TRUE;
+ else break;
+ }
+
+ /* Keep a count of chars with values < 256 so that we can optimize the case
+ of just a single character (as long as it's < 256). However, For higher
+ valued UTF-8 characters, we don't yet do any optimization. */
+
+ class_charcount = 0;
+ class_lastchar = -1;
+
+ /* Initialize the 32-char bit map to all zeros. We build the map in a
+ temporary bit of memory, in case the class contains only 1 character (less
+ than 256), because in that case the compiled code doesn't use the bit map.
+ */
+
+ memset(classbits, 0, 32 * sizeof(uschar));
+
+#ifdef SUPPORT_UTF8
+ class_utf8 = FALSE; /* No chars >= 256 */
+ class_utf8data = code + LINK_SIZE + 2; /* For UTF-8 items */
+#endif
+
+ /* Process characters until ] is reached. By writing this as a "do" it
+ means that an initial ] is taken as a data character. At the start of the
+ loop, c contains the first byte of the character. */
+
+ if (c != 0) do
+ {
+ const uschar *oldptr;
+
+#ifdef SUPPORT_UTF8
+ if (utf8 && c > 127)
+ { /* Braces are required because the */
+ GETCHARLEN(c, ptr, ptr); /* macro generates multiple statements */
+ }
+#endif
+
+ /* Inside \Q...\E everything is literal except \E */
+
+ if (inescq)
+ {
+ if (c == '\\' && ptr[1] == 'E') /* If we are at \E */
+ {
+ inescq = FALSE; /* Reset literal state */
+ ptr++; /* Skip the 'E' */
+ continue; /* Carry on with next */
+ }
+ goto CHECK_RANGE; /* Could be range if \E follows */
+ }
+
+ /* Handle POSIX class names. Perl allows a negation extension of the
+ form [:^name:]. A square bracket that doesn't match the syntax is
+ treated as a literal. We also recognize the POSIX constructions
+ [.ch.] and [=ch=] ("collating elements") and fault them, as Perl
+ 5.6 and 5.8 do. */
+
+ if (c == '[' &&
+ (ptr[1] == ':' || ptr[1] == '.' || ptr[1] == '=') &&
+ check_posix_syntax(ptr, &tempptr, cd))
+ {
+ BOOL local_negate = FALSE;
+ int posix_class, taboffset, tabopt;
+ register const uschar *cbits = cd->cbits;
+ uschar pbits[32];
+
+ if (ptr[1] != ':')
+ {
+ *errorcodeptr = ERR31;
+ goto FAILED;
+ }
+
+ ptr += 2;
+ if (*ptr == '^')
+ {
+ local_negate = TRUE;
+ ptr++;
+ }
+
+ posix_class = check_posix_name(ptr, tempptr - ptr);
+ if (posix_class < 0)
+ {
+ *errorcodeptr = ERR30;
+ goto FAILED;
+ }
+
+ /* If matching is caseless, upper and lower are converted to
+ alpha. This relies on the fact that the class table starts with
+ alpha, lower, upper as the first 3 entries. */
+
+ if ((options & PCRE_CASELESS) != 0 && posix_class <= 2)
+ posix_class = 0;
+
+ /* We build the bit map for the POSIX class in a chunk of local store
+ because we may be adding and subtracting from it, and we don't want to
+ subtract bits that may be in the main map already. At the end we or the
+ result into the bit map that is being built. */
+
+ posix_class *= 3;
+
+ /* Copy in the first table (always present) */
+
+ memcpy(pbits, cbits + posix_class_maps[posix_class],
+ 32 * sizeof(uschar));
+
+ /* If there is a second table, add or remove it as required. */
+
+ taboffset = posix_class_maps[posix_class + 1];
+ tabopt = posix_class_maps[posix_class + 2];
+
+ if (taboffset >= 0)
+ {
+ if (tabopt >= 0)
+ for (c = 0; c < 32; c++) pbits[c] |= cbits[c + taboffset];
+ else
+ for (c = 0; c < 32; c++) pbits[c] &= ~cbits[c + taboffset];
+ }
+
+ /* Not see if we need to remove any special characters. An option
+ value of 1 removes vertical space and 2 removes underscore. */
+
+ if (tabopt < 0) tabopt = -tabopt;
+ if (tabopt == 1) pbits[1] &= ~0x3c;
+ else if (tabopt == 2) pbits[11] &= 0x7f;
+
+ /* Add the POSIX table or its complement into the main table that is
+ being built and we are done. */
+
+ if (local_negate)
+ for (c = 0; c < 32; c++) classbits[c] |= ~pbits[c];
+ else
+ for (c = 0; c < 32; c++) classbits[c] |= pbits[c];
+
+ ptr = tempptr + 1;
+ class_charcount = 10; /* Set > 1; assumes more than 1 per class */
+ continue; /* End of POSIX syntax handling */
+ }
+
+ /* Backslash may introduce a single character, or it may introduce one
+ of the specials, which just set a flag. The sequence \b is a special
+ case. Inside a class (and only there) it is treated as backspace.
+ Elsewhere it marks a word boundary. Other escapes have preset maps ready
+ to 'or' into the one we are building. We assume they have more than one
+ character in them, so set class_charcount bigger than one. */
+
+ if (c == '\\')
+ {
+ c = check_escape(&ptr, errorcodeptr, cd->bracount, options, TRUE);
+ if (*errorcodeptr != 0) goto FAILED;
+
+ if (-c == ESC_b) c = '\b'; /* \b is backslash in a class */
+ else if (-c == ESC_X) c = 'X'; /* \X is literal X in a class */
+ else if (-c == ESC_R) c = 'R'; /* \R is literal R in a class */
+ else if (-c == ESC_Q) /* Handle start of quoted string */
+ {
+ if (ptr[1] == '\\' && ptr[2] == 'E')
+ {
+ ptr += 2; /* avoid empty string */
+ }
+ else inescq = TRUE;
+ continue;
+ }
+ else if (-c == ESC_E) continue; /* Ignore orphan \E */
+
+ if (c < 0)
+ {
+ register const uschar *cbits = cd->cbits;
+ class_charcount += 2; /* Greater than 1 is what matters */
+
+ /* Save time by not doing this in the pre-compile phase. */
+
+ if (lengthptr == NULL) switch (-c)
+ {
+ case ESC_d:
+ for (c = 0; c < 32; c++) classbits[c] |= cbits[c+cbit_digit];
+ continue;
+
+ case ESC_D:
+ for (c = 0; c < 32; c++) classbits[c] |= ~cbits[c+cbit_digit];
+ continue;
+
+ case ESC_w:
+ for (c = 0; c < 32; c++) classbits[c] |= cbits[c+cbit_word];
+ continue;
+
+ case ESC_W:
+ for (c = 0; c < 32; c++) classbits[c] |= ~cbits[c+cbit_word];
+ continue;
+
+ case ESC_s:
+ for (c = 0; c < 32; c++) classbits[c] |= cbits[c+cbit_space];
+ classbits[1] &= ~0x08; /* Perl 5.004 onwards omits VT from \s */
+ continue;
+
+ case ESC_S:
+ for (c = 0; c < 32; c++) classbits[c] |= ~cbits[c+cbit_space];
+ classbits[1] |= 0x08; /* Perl 5.004 onwards omits VT from \s */
+ continue;
+
+ case ESC_E: /* Perl ignores an orphan \E */
+ continue;
+
+ default: /* Not recognized; fall through */
+ break; /* Need "default" setting to stop compiler warning. */
+ }
+
+ /* In the pre-compile phase, just do the recognition. */
+
+ else if (c == -ESC_d || c == -ESC_D || c == -ESC_w ||
+ c == -ESC_W || c == -ESC_s || c == -ESC_S) continue;
+
+ /* We need to deal with \H, \h, \V, and \v in both phases because
+ they use extra memory. */
+
+ if (-c == ESC_h)
+ {
+ SETBIT(classbits, 0x09); /* VT */
+ SETBIT(classbits, 0x20); /* SPACE */
+ SETBIT(classbits, 0xa0); /* NSBP */
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ {
+ class_utf8 = TRUE;
+ *class_utf8data++ = XCL_SINGLE;
+ class_utf8data += _pcre_ord2utf8(0x1680, class_utf8data);
+ *class_utf8data++ = XCL_SINGLE;
+ class_utf8data += _pcre_ord2utf8(0x180e, class_utf8data);
+ *class_utf8data++ = XCL_RANGE;
+ class_utf8data += _pcre_ord2utf8(0x2000, class_utf8data);
+ class_utf8data += _pcre_ord2utf8(0x200A, class_utf8data);
+ *class_utf8data++ = XCL_SINGLE;
+ class_utf8data += _pcre_ord2utf8(0x202f, class_utf8data);
+ *class_utf8data++ = XCL_SINGLE;
+ class_utf8data += _pcre_ord2utf8(0x205f, class_utf8data);
+ *class_utf8data++ = XCL_SINGLE;
+ class_utf8data += _pcre_ord2utf8(0x3000, class_utf8data);
+ }
+#endif
+ continue;
+ }
+
+ if (-c == ESC_H)
+ {
+ for (c = 0; c < 32; c++)
+ {
+ int x = 0xff;
+ switch (c)
+ {
+ case 0x09/8: x ^= 1 << (0x09%8); break;
+ case 0x20/8: x ^= 1 << (0x20%8); break;
+ case 0xa0/8: x ^= 1 << (0xa0%8); break;
+ default: break;
+ }
+ classbits[c] |= x;
+ }
+
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ {
+ class_utf8 = TRUE;
+ *class_utf8data++ = XCL_RANGE;
+ class_utf8data += _pcre_ord2utf8(0x0100, class_utf8data);
+ class_utf8data += _pcre_ord2utf8(0x167f, class_utf8data);
+ *class_utf8data++ = XCL_RANGE;
+ class_utf8data += _pcre_ord2utf8(0x1681, class_utf8data);
+ class_utf8data += _pcre_ord2utf8(0x180d, class_utf8data);
+ *class_utf8data++ = XCL_RANGE;
+ class_utf8data += _pcre_ord2utf8(0x180f, class_utf8data);
+ class_utf8data += _pcre_ord2utf8(0x1fff, class_utf8data);
+ *class_utf8data++ = XCL_RANGE;
+ class_utf8data += _pcre_ord2utf8(0x200B, class_utf8data);
+ class_utf8data += _pcre_ord2utf8(0x202e, class_utf8data);
+ *class_utf8data++ = XCL_RANGE;
+ class_utf8data += _pcre_ord2utf8(0x2030, class_utf8data);
+ class_utf8data += _pcre_ord2utf8(0x205e, class_utf8data);
+ *class_utf8data++ = XCL_RANGE;
+ class_utf8data += _pcre_ord2utf8(0x2060, class_utf8data);
+ class_utf8data += _pcre_ord2utf8(0x2fff, class_utf8data);
+ *class_utf8data++ = XCL_RANGE;
+ class_utf8data += _pcre_ord2utf8(0x3001, class_utf8data);
+ class_utf8data += _pcre_ord2utf8(0x7fffffff, class_utf8data);
+ }
+#endif
+ continue;
+ }
+
+ if (-c == ESC_v)
+ {
+ SETBIT(classbits, 0x0a); /* LF */
+ SETBIT(classbits, 0x0b); /* VT */
+ SETBIT(classbits, 0x0c); /* FF */
+ SETBIT(classbits, 0x0d); /* CR */
+ SETBIT(classbits, 0x85); /* NEL */
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ {
+ class_utf8 = TRUE;
+ *class_utf8data++ = XCL_RANGE;
+ class_utf8data += _pcre_ord2utf8(0x2028, class_utf8data);
+ class_utf8data += _pcre_ord2utf8(0x2029, class_utf8data);
+ }
+#endif
+ continue;
+ }
+
+ if (-c == ESC_V)
+ {
+ for (c = 0; c < 32; c++)
+ {
+ int x = 0xff;
+ switch (c)
+ {
+ case 0x0a/8: x ^= 1 << (0x0a%8);
+ x ^= 1 << (0x0b%8);
+ x ^= 1 << (0x0c%8);
+ x ^= 1 << (0x0d%8);
+ break;
+ case 0x85/8: x ^= 1 << (0x85%8); break;
+ default: break;
+ }
+ classbits[c] |= x;
+ }
+
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ {
+ class_utf8 = TRUE;
+ *class_utf8data++ = XCL_RANGE;
+ class_utf8data += _pcre_ord2utf8(0x0100, class_utf8data);
+ class_utf8data += _pcre_ord2utf8(0x2027, class_utf8data);
+ *class_utf8data++ = XCL_RANGE;
+ class_utf8data += _pcre_ord2utf8(0x2029, class_utf8data);
+ class_utf8data += _pcre_ord2utf8(0x7fffffff, class_utf8data);
+ }
+#endif
+ continue;
+ }
+
+ /* We need to deal with \P and \p in both phases. */
+
+#ifdef SUPPORT_UCP
+ if (-c == ESC_p || -c == ESC_P)
+ {
+ BOOL negated;
+ int pdata;
+ int ptype = get_ucp(&ptr, &negated, &pdata, errorcodeptr);
+ if (ptype < 0) goto FAILED;
+ class_utf8 = TRUE;
+ *class_utf8data++ = ((-c == ESC_p) != negated)?
+ XCL_PROP : XCL_NOTPROP;
+ *class_utf8data++ = ptype;
+ *class_utf8data++ = pdata;
+ class_charcount -= 2; /* Not a < 256 character */
+ continue;
+ }
+#endif
+ /* Unrecognized escapes are faulted if PCRE is running in its
+ strict mode. By default, for compatibility with Perl, they are
+ treated as literals. */
+
+ if ((options & PCRE_EXTRA) != 0)
+ {
+ *errorcodeptr = ERR7;
+ goto FAILED;
+ }
+
+ class_charcount -= 2; /* Undo the default count from above */
+ c = *ptr; /* Get the final character and fall through */
+ }
+
+ /* Fall through if we have a single character (c >= 0). This may be
+ greater than 256 in UTF-8 mode. */
+
+ } /* End of backslash handling */
+
+ /* A single character may be followed by '-' to form a range. However,
+ Perl does not permit ']' to be the end of the range. A '-' character
+ at the end is treated as a literal. Perl ignores orphaned \E sequences
+ entirely. The code for handling \Q and \E is messy. */
+
+ CHECK_RANGE:
+ while (ptr[1] == '\\' && ptr[2] == 'E')
+ {
+ inescq = FALSE;
+ ptr += 2;
+ }
+
+ oldptr = ptr;
+
+ /* Remember \r or \n */
+
+ if (c == '\r' || c == '\n') cd->external_flags |= PCRE_HASCRORLF;
+
+ /* Check for range */
+
+ if (!inescq && ptr[1] == '-')
+ {
+ int d;
+ ptr += 2;
+ while (*ptr == '\\' && ptr[1] == 'E') ptr += 2;
+
+ /* If we hit \Q (not followed by \E) at this point, go into escaped
+ mode. */
+
+ while (*ptr == '\\' && ptr[1] == 'Q')
+ {
+ ptr += 2;
+ if (*ptr == '\\' && ptr[1] == 'E') { ptr += 2; continue; }
+ inescq = TRUE;
+ break;
+ }
+
+ if (*ptr == 0 || (!inescq && *ptr == ']'))
+ {
+ ptr = oldptr;
+ goto LONE_SINGLE_CHARACTER;
+ }
+
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ { /* Braces are required because the */
+ GETCHARLEN(d, ptr, ptr); /* macro generates multiple statements */
+ }
+ else
+#endif
+ d = *ptr; /* Not UTF-8 mode */
+
+ /* The second part of a range can be a single-character escape, but
+ not any of the other escapes. Perl 5.6 treats a hyphen as a literal
+ in such circumstances. */
+
+ if (!inescq && d == '\\')
+ {
+ d = check_escape(&ptr, errorcodeptr, cd->bracount, options, TRUE);
+ if (*errorcodeptr != 0) goto FAILED;
+
+ /* \b is backslash; \X is literal X; \R is literal R; any other
+ special means the '-' was literal */
+
+ if (d < 0)
+ {
+ if (d == -ESC_b) d = '\b';
+ else if (d == -ESC_X) d = 'X';
+ else if (d == -ESC_R) d = 'R'; else
+ {
+ ptr = oldptr;
+ goto LONE_SINGLE_CHARACTER; /* A few lines below */
+ }
+ }
+ }
+
+ /* Check that the two values are in the correct order. Optimize
+ one-character ranges */
+
+ if (d < c)
+ {
+ *errorcodeptr = ERR8;
+ goto FAILED;
+ }
+
+ if (d == c) goto LONE_SINGLE_CHARACTER; /* A few lines below */
+
+ /* Remember \r or \n */
+
+ if (d == '\r' || d == '\n') cd->external_flags |= PCRE_HASCRORLF;
+
+ /* In UTF-8 mode, if the upper limit is > 255, or > 127 for caseless
+ matching, we have to use an XCLASS with extra data items. Caseless
+ matching for characters > 127 is available only if UCP support is
+ available. */
+
+#ifdef SUPPORT_UTF8
+ if (utf8 && (d > 255 || ((options & PCRE_CASELESS) != 0 && d > 127)))
+ {
+ class_utf8 = TRUE;
+
+ /* With UCP support, we can find the other case equivalents of
+ the relevant characters. There may be several ranges. Optimize how
+ they fit with the basic range. */
+
+#ifdef SUPPORT_UCP
+ if ((options & PCRE_CASELESS) != 0)
+ {
+ unsigned int occ, ocd;
+ unsigned int cc = c;
+ unsigned int origd = d;
+ while (get_othercase_range(&cc, origd, &occ, &ocd))
+ {
+ if (occ >= (unsigned int)c &&
+ ocd <= (unsigned int)d)
+ continue; /* Skip embedded ranges */
+
+ if (occ < (unsigned int)c &&
+ ocd >= (unsigned int)c - 1) /* Extend the basic range */
+ { /* if there is overlap, */
+ c = occ; /* noting that if occ < c */
+ continue; /* we can't have ocd > d */
+ } /* because a subrange is */
+ if (ocd > (unsigned int)d &&
+ occ <= (unsigned int)d + 1) /* always shorter than */
+ { /* the basic range. */
+ d = ocd;
+ continue;
+ }
+
+ if (occ == ocd)
+ {
+ *class_utf8data++ = XCL_SINGLE;
+ }
+ else
+ {
+ *class_utf8data++ = XCL_RANGE;
+ class_utf8data += _pcre_ord2utf8(occ, class_utf8data);
+ }
+ class_utf8data += _pcre_ord2utf8(ocd, class_utf8data);
+ }
+ }
+#endif /* SUPPORT_UCP */
+
+ /* Now record the original range, possibly modified for UCP caseless
+ overlapping ranges. */
+
+ *class_utf8data++ = XCL_RANGE;
+ class_utf8data += _pcre_ord2utf8(c, class_utf8data);
+ class_utf8data += _pcre_ord2utf8(d, class_utf8data);
+
+ /* With UCP support, we are done. Without UCP support, there is no
+ caseless matching for UTF-8 characters > 127; we can use the bit map
+ for the smaller ones. */
+
+#ifdef SUPPORT_UCP
+ continue; /* With next character in the class */
+#else
+ if ((options & PCRE_CASELESS) == 0 || c > 127) continue;
+
+ /* Adjust upper limit and fall through to set up the map */
+
+ d = 127;
+
+#endif /* SUPPORT_UCP */
+ }
+#endif /* SUPPORT_UTF8 */
+
+ /* We use the bit map for all cases when not in UTF-8 mode; else
+ ranges that lie entirely within 0-127 when there is UCP support; else
+ for partial ranges without UCP support. */
+
+ class_charcount += d - c + 1;
+ class_lastchar = d;
+
+ /* We can save a bit of time by skipping this in the pre-compile. */
+
+ if (lengthptr == NULL) for (; c <= d; c++)
+ {
+ classbits[c/8] |= (1 << (c&7));
+ if ((options & PCRE_CASELESS) != 0)
+ {
+ int uc = cd->fcc[c]; /* flip case */
+ classbits[uc/8] |= (1 << (uc&7));
+ }
+ }
+
+ continue; /* Go get the next char in the class */
+ }
+
+ /* Handle a lone single character - we can get here for a normal
+ non-escape char, or after \ that introduces a single character or for an
+ apparent range that isn't. */
+
+ LONE_SINGLE_CHARACTER:
+
+ /* Handle a character that cannot go in the bit map */
+
+#ifdef SUPPORT_UTF8
+ if (utf8 && (c > 255 || ((options & PCRE_CASELESS) != 0 && c > 127)))
+ {
+ class_utf8 = TRUE;
+ *class_utf8data++ = XCL_SINGLE;
+ class_utf8data += _pcre_ord2utf8(c, class_utf8data);
+
+#ifdef SUPPORT_UCP
+ if ((options & PCRE_CASELESS) != 0)
+ {
+ unsigned int othercase;
+ if ((othercase = _pcre_ucp_othercase(c)) != NOTACHAR)
+ {
+ *class_utf8data++ = XCL_SINGLE;
+ class_utf8data += _pcre_ord2utf8(othercase, class_utf8data);
+ }
+ }
+#endif /* SUPPORT_UCP */
+
+ }
+ else
+#endif /* SUPPORT_UTF8 */
+
+ /* Handle a single-byte character */
+ {
+ classbits[c/8] |= (1 << (c&7));
+ if ((options & PCRE_CASELESS) != 0)
+ {
+ c = cd->fcc[c]; /* flip case */
+ classbits[c/8] |= (1 << (c&7));
+ }
+ class_charcount++;
+ class_lastchar = c;
+ }
+ }
+
+ /* Loop until ']' reached. This "while" is the end of the "do" above. */
+
+ while ((c = *(++ptr)) != 0 && (c != ']' || inescq));
+
+ if (c == 0) /* Missing terminating ']' */
+ {
+ *errorcodeptr = ERR6;
+ goto FAILED;
+ }
+
+
+/* This code has been disabled because it would mean that \s counts as
+an explicit \r or \n reference, and that's not really what is wanted. Now
+we set the flag only if there is a literal "\r" or "\n" in the class. */
+
+#if 0
+ /* Remember whether \r or \n are in this class */
+
+ if (negate_class)
+ {
+ if ((classbits[1] & 0x24) != 0x24) cd->external_flags |= PCRE_HASCRORLF;
+ }
+ else
+ {
+ if ((classbits[1] & 0x24) != 0) cd->external_flags |= PCRE_HASCRORLF;
+ }
+#endif
+
+
+ /* If class_charcount is 1, we saw precisely one character whose value is
+ less than 256. As long as there were no characters >= 128 and there was no
+ use of \p or \P, in other words, no use of any XCLASS features, we can
+ optimize.
+
+ In UTF-8 mode, we can optimize the negative case only if there were no
+ characters >= 128 because OP_NOT and the related opcodes like OP_NOTSTAR
+ operate on single-bytes only. This is an historical hangover. Maybe one day
+ we can tidy these opcodes to handle multi-byte characters.
+
+ The optimization throws away the bit map. We turn the item into a
+ 1-character OP_CHAR[NC] if it's positive, or OP_NOT if it's negative. Note
+ that OP_NOT does not support multibyte characters. In the positive case, it
+ can cause firstbyte to be set. Otherwise, there can be no first char if
+ this item is first, whatever repeat count may follow. In the case of
+ reqbyte, save the previous value for reinstating. */
+
+#ifdef SUPPORT_UTF8
+ if (class_charcount == 1 && !class_utf8 &&
+ (!utf8 || !negate_class || class_lastchar < 128))
+#else
+ if (class_charcount == 1)
+#endif
+ {
+ zeroreqbyte = reqbyte;
+
+ /* The OP_NOT opcode works on one-byte characters only. */
+
+ if (negate_class)
+ {
+ if (firstbyte == REQ_UNSET) firstbyte = REQ_NONE;
+ zerofirstbyte = firstbyte;
+ *code++ = OP_NOT;
+ *code++ = class_lastchar;
+ break;
+ }
+
+ /* For a single, positive character, get the value into mcbuffer, and
+ then we can handle this with the normal one-character code. */
+
+#ifdef SUPPORT_UTF8
+ if (utf8 && class_lastchar > 127)
+ mclength = _pcre_ord2utf8(class_lastchar, mcbuffer);
+ else
+#endif
+ {
+ mcbuffer[0] = class_lastchar;
+ mclength = 1;
+ }
+ goto ONE_CHAR;
+ } /* End of 1-char optimization */
+
+ /* The general case - not the one-char optimization. If this is the first
+ thing in the branch, there can be no first char setting, whatever the
+ repeat count. Any reqbyte setting must remain unchanged after any kind of
+ repeat. */
+
+ if (firstbyte == REQ_UNSET) firstbyte = REQ_NONE;
+ zerofirstbyte = firstbyte;
+ zeroreqbyte = reqbyte;
+
+ /* If there are characters with values > 255, we have to compile an
+ extended class, with its own opcode. If there are no characters < 256,
+ we can omit the bitmap in the actual compiled code. */
+
+#ifdef SUPPORT_UTF8
+ if (class_utf8)
+ {
+ *class_utf8data++ = XCL_END; /* Marks the end of extra data */
+ *code++ = OP_XCLASS;
+ code += LINK_SIZE;
+ *code = negate_class? XCL_NOT : 0;
+
+ /* If the map is required, move up the extra data to make room for it;
+ otherwise just move the code pointer to the end of the extra data. */
+
+ if (class_charcount > 0)
+ {
+ *code++ |= XCL_MAP;
+ memmove(code + 32, code, class_utf8data - code);
+ memcpy(code, classbits, 32);
+ code = class_utf8data + 32;
+ }
+ else code = class_utf8data;
+
+ /* Now fill in the complete length of the item */
+
+ PUT(previous, 1, code - previous);
+ break; /* End of class handling */
+ }
+#endif
+
+ /* If there are no characters > 255, negate the 32-byte map if necessary,
+ and copy it into the code vector. If this is the first thing in the branch,
+ there can be no first char setting, whatever the repeat count. Any reqbyte
+ setting must remain unchanged after any kind of repeat. */
+
+ if (negate_class)
+ {
+ *code++ = OP_NCLASS;
+ if (lengthptr == NULL) /* Save time in the pre-compile phase */
+ for (c = 0; c < 32; c++) code[c] = ~classbits[c];
+ }
+ else
+ {
+ *code++ = OP_CLASS;
+ memcpy(code, classbits, 32);
+ }
+ code += 32;
+ break;
+
+
+ /* ===================================================================*/
+ /* Various kinds of repeat; '{' is not necessarily a quantifier, but this
+ has been tested above. */
+
+ case '{':
+ if (!is_quantifier) goto NORMAL_CHAR;
+ ptr = read_repeat_counts(ptr+1, &repeat_min, &repeat_max, errorcodeptr);
+ if (*errorcodeptr != 0) goto FAILED;
+ goto REPEAT;
+
+ case '*':
+ repeat_min = 0;
+ repeat_max = -1;
+ goto REPEAT;
+
+ case '+':
+ repeat_min = 1;
+ repeat_max = -1;
+ goto REPEAT;
+
+ case '?':
+ repeat_min = 0;
+ repeat_max = 1;
+
+ REPEAT:
+ if (previous == NULL)
+ {
+ *errorcodeptr = ERR9;
+ goto FAILED;
+ }
+
+ if (repeat_min == 0)
+ {
+ firstbyte = zerofirstbyte; /* Adjust for zero repeat */
+ reqbyte = zeroreqbyte; /* Ditto */
+ }
+
+ /* Remember whether this is a variable length repeat */
+
+ reqvary = (repeat_min == repeat_max)? 0 : REQ_VARY;
+
+ op_type = 0; /* Default single-char op codes */
+ possessive_quantifier = FALSE; /* Default not possessive quantifier */
+
+ /* Save start of previous item, in case we have to move it up to make space
+ for an inserted OP_ONCE for the additional '+' extension. */
+
+ tempcode = previous;
+
+ /* If the next character is '+', we have a possessive quantifier. This
+ implies greediness, whatever the setting of the PCRE_UNGREEDY option.
+ If the next character is '?' this is a minimizing repeat, by default,
+ but if PCRE_UNGREEDY is set, it works the other way round. We change the
+ repeat type to the non-default. */
+
+ if (ptr[1] == '+')
+ {
+ repeat_type = 0; /* Force greedy */
+ possessive_quantifier = TRUE;
+ ptr++;
+ }
+ else if (ptr[1] == '?')
+ {
+ repeat_type = greedy_non_default;
+ ptr++;
+ }
+ else repeat_type = greedy_default;
+
+ /* If previous was a character match, abolish the item and generate a
+ repeat item instead. If a char item has a minumum of more than one, ensure
+ that it is set in reqbyte - it might not be if a sequence such as x{3} is
+ the first thing in a branch because the x will have gone into firstbyte
+ instead. */
+
+ if (*previous == OP_CHAR || *previous == OP_CHARNC)
+ {
+ /* Deal with UTF-8 characters that take up more than one byte. It's
+ easier to write this out separately than try to macrify it. Use c to
+ hold the length of the character in bytes, plus 0x80 to flag that it's a
+ length rather than a small character. */
+
+#ifdef SUPPORT_UTF8
+ if (utf8 && (code[-1] & 0x80) != 0)
+ {
+ uschar *lastchar = code - 1;
+ while((*lastchar & 0xc0) == 0x80) lastchar--;
+ c = code - lastchar; /* Length of UTF-8 character */
+ memcpy(utf8_char, lastchar, c); /* Save the char */
+ c |= 0x80; /* Flag c as a length */
+ }
+ else
+#endif
+
+ /* Handle the case of a single byte - either with no UTF8 support, or
+ with UTF-8 disabled, or for a UTF-8 character < 128. */
+
+ {
+ c = code[-1];
+ if (repeat_min > 1) reqbyte = c | req_caseopt | cd->req_varyopt;
+ }
+
+ /* If the repetition is unlimited, it pays to see if the next thing on
+ the line is something that cannot possibly match this character. If so,
+ automatically possessifying this item gains some performance in the case
+ where the match fails. */
+
+ if (!possessive_quantifier &&
+ repeat_max < 0 &&
+ check_auto_possessive(*previous, c, utf8, utf8_char, ptr + 1,
+ options, cd))
+ {
+ repeat_type = 0; /* Force greedy */
+ possessive_quantifier = TRUE;
+ }
+
+ goto OUTPUT_SINGLE_REPEAT; /* Code shared with single character types */
+ }
+
+ /* If previous was a single negated character ([^a] or similar), we use
+ one of the special opcodes, replacing it. The code is shared with single-
+ character repeats by setting opt_type to add a suitable offset into
+ repeat_type. We can also test for auto-possessification. OP_NOT is
+ currently used only for single-byte chars. */
+
+ else if (*previous == OP_NOT)
+ {
+ op_type = OP_NOTSTAR - OP_STAR; /* Use "not" opcodes */
+ c = previous[1];
+ if (!possessive_quantifier &&
+ repeat_max < 0 &&
+ check_auto_possessive(OP_NOT, c, utf8, NULL, ptr + 1, options, cd))
+ {
+ repeat_type = 0; /* Force greedy */
+ possessive_quantifier = TRUE;
+ }
+ goto OUTPUT_SINGLE_REPEAT;
+ }
+
+ /* If previous was a character type match (\d or similar), abolish it and
+ create a suitable repeat item. The code is shared with single-character
+ repeats by setting op_type to add a suitable offset into repeat_type. Note
+ the the Unicode property types will be present only when SUPPORT_UCP is
+ defined, but we don't wrap the little bits of code here because it just
+ makes it horribly messy. */
+
+ else if (*previous < OP_EODN)
+ {
+ uschar *oldcode;
+ int prop_type, prop_value;
+ op_type = OP_TYPESTAR - OP_STAR; /* Use type opcodes */
+ c = *previous;
+
+ if (!possessive_quantifier &&
+ repeat_max < 0 &&
+ check_auto_possessive(c, 0, utf8, NULL, ptr + 1, options, cd))
+ {
+ repeat_type = 0; /* Force greedy */
+ possessive_quantifier = TRUE;
+ }
+
+ OUTPUT_SINGLE_REPEAT:
+ if (*previous == OP_PROP || *previous == OP_NOTPROP)
+ {
+ prop_type = previous[1];
+ prop_value = previous[2];
+ }
+ else prop_type = prop_value = -1;
+
+ oldcode = code;
+ code = previous; /* Usually overwrite previous item */
+
+ /* If the maximum is zero then the minimum must also be zero; Perl allows
+ this case, so we do too - by simply omitting the item altogether. */
+
+ if (repeat_max == 0) goto END_REPEAT;
+
+ /* All real repeats make it impossible to handle partial matching (maybe
+ one day we will be able to remove this restriction). */
+
+ if (repeat_max != 1) cd->external_flags |= PCRE_NOPARTIAL;
+
+ /* Combine the op_type with the repeat_type */
+
+ repeat_type += op_type;
+
+ /* A minimum of zero is handled either as the special case * or ?, or as
+ an UPTO, with the maximum given. */
+
+ if (repeat_min == 0)
+ {
+ if (repeat_max == -1) *code++ = OP_STAR + repeat_type;
+ else if (repeat_max == 1) *code++ = OP_QUERY + repeat_type;
+ else
+ {
+ *code++ = OP_UPTO + repeat_type;
+ PUT2INC(code, 0, repeat_max);
+ }
+ }
+
+ /* A repeat minimum of 1 is optimized into some special cases. If the
+ maximum is unlimited, we use OP_PLUS. Otherwise, the original item is
+ left in place and, if the maximum is greater than 1, we use OP_UPTO with
+ one less than the maximum. */
+
+ else if (repeat_min == 1)
+ {
+ if (repeat_max == -1)
+ *code++ = OP_PLUS + repeat_type;
+ else
+ {
+ code = oldcode; /* leave previous item in place */
+ if (repeat_max == 1) goto END_REPEAT;
+ *code++ = OP_UPTO + repeat_type;
+ PUT2INC(code, 0, repeat_max - 1);
+ }
+ }
+
+ /* The case {n,n} is just an EXACT, while the general case {n,m} is
+ handled as an EXACT followed by an UPTO. */
+
+ else
+ {
+ *code++ = OP_EXACT + op_type; /* NB EXACT doesn't have repeat_type */
+ PUT2INC(code, 0, repeat_min);
+
+ /* If the maximum is unlimited, insert an OP_STAR. Before doing so,
+ we have to insert the character for the previous code. For a repeated
+ Unicode property match, there are two extra bytes that define the
+ required property. In UTF-8 mode, long characters have their length in
+ c, with the 0x80 bit as a flag. */
+
+ if (repeat_max < 0)
+ {
+#ifdef SUPPORT_UTF8
+ if (utf8 && c >= 128)
+ {
+ memcpy(code, utf8_char, c & 7);
+ code += c & 7;
+ }
+ else
+#endif
+ {
+ *code++ = c;
+ if (prop_type >= 0)
+ {
+ *code++ = prop_type;
+ *code++ = prop_value;
+ }
+ }
+ *code++ = OP_STAR + repeat_type;
+ }
+
+ /* Else insert an UPTO if the max is greater than the min, again
+ preceded by the character, for the previously inserted code. If the
+ UPTO is just for 1 instance, we can use QUERY instead. */
+
+ else if (repeat_max != repeat_min)
+ {
+#ifdef SUPPORT_UTF8
+ if (utf8 && c >= 128)
+ {
+ memcpy(code, utf8_char, c & 7);
+ code += c & 7;
+ }
+ else
+#endif
+ *code++ = c;
+ if (prop_type >= 0)
+ {
+ *code++ = prop_type;
+ *code++ = prop_value;
+ }
+ repeat_max -= repeat_min;
+
+ if (repeat_max == 1)
+ {
+ *code++ = OP_QUERY + repeat_type;
+ }
+ else
+ {
+ *code++ = OP_UPTO + repeat_type;
+ PUT2INC(code, 0, repeat_max);
+ }
+ }
+ }
+
+ /* The character or character type itself comes last in all cases. */
+
+#ifdef SUPPORT_UTF8
+ if (utf8 && c >= 128)
+ {
+ memcpy(code, utf8_char, c & 7);
+ code += c & 7;
+ }
+ else
+#endif
+ *code++ = c;
+
+ /* For a repeated Unicode property match, there are two extra bytes that
+ define the required property. */
+
+#ifdef SUPPORT_UCP
+ if (prop_type >= 0)
+ {
+ *code++ = prop_type;
+ *code++ = prop_value;
+ }
+#endif
+ }
+
+ /* If previous was a character class or a back reference, we put the repeat
+ stuff after it, but just skip the item if the repeat was {0,0}. */
+
+ else if (*previous == OP_CLASS ||
+ *previous == OP_NCLASS ||
+#ifdef SUPPORT_UTF8
+ *previous == OP_XCLASS ||
+#endif
+ *previous == OP_REF)
+ {
+ if (repeat_max == 0)
+ {
+ code = previous;
+ goto END_REPEAT;
+ }
+
+ /* All real repeats make it impossible to handle partial matching (maybe
+ one day we will be able to remove this restriction). */
+
+ if (repeat_max != 1) cd->external_flags |= PCRE_NOPARTIAL;
+
+ if (repeat_min == 0 && repeat_max == -1)
+ *code++ = OP_CRSTAR + repeat_type;
+ else if (repeat_min == 1 && repeat_max == -1)
+ *code++ = OP_CRPLUS + repeat_type;
+ else if (repeat_min == 0 && repeat_max == 1)
+ *code++ = OP_CRQUERY + repeat_type;
+ else
+ {
+ *code++ = OP_CRRANGE + repeat_type;
+ PUT2INC(code, 0, repeat_min);
+ if (repeat_max == -1) repeat_max = 0; /* 2-byte encoding for max */
+ PUT2INC(code, 0, repeat_max);
+ }
+ }
+
+ /* If previous was a bracket group, we may have to replicate it in certain
+ cases. */
+
+ else if (*previous == OP_BRA || *previous == OP_CBRA ||
+ *previous == OP_ONCE || *previous == OP_COND)
+ {
+ register int i;
+ int ketoffset = 0;
+ int len = code - previous;
+ uschar *bralink = NULL;
+
+ /* Repeating a DEFINE group is pointless */
+
+ if (*previous == OP_COND && previous[LINK_SIZE+1] == OP_DEF)
+ {
+ *errorcodeptr = ERR55;
+ goto FAILED;
+ }
+
+ /* If the maximum repeat count is unlimited, find the end of the bracket
+ by scanning through from the start, and compute the offset back to it
+ from the current code pointer. There may be an OP_OPT setting following
+ the final KET, so we can't find the end just by going back from the code
+ pointer. */
+
+ if (repeat_max == -1)
+ {
+ register uschar *ket = previous;
+ do ket += GET(ket, 1); while (*ket != OP_KET);
+ ketoffset = code - ket;
+ }
+
+ /* The case of a zero minimum is special because of the need to stick
+ OP_BRAZERO in front of it, and because the group appears once in the
+ data, whereas in other cases it appears the minimum number of times. For
+ this reason, it is simplest to treat this case separately, as otherwise
+ the code gets far too messy. There are several special subcases when the
+ minimum is zero. */
+
+ if (repeat_min == 0)
+ {
+ /* If the maximum is also zero, we just omit the group from the output
+ altogether. */
+
+ if (repeat_max == 0)
+ {
+ code = previous;
+ goto END_REPEAT;
+ }
+
+ /* If the maximum is 1 or unlimited, we just have to stick in the
+ BRAZERO and do no more at this point. However, we do need to adjust
+ any OP_RECURSE calls inside the group that refer to the group itself or
+ any internal or forward referenced group, because the offset is from
+ the start of the whole regex. Temporarily terminate the pattern while
+ doing this. */
+
+ if (repeat_max <= 1)
+ {
+ *code = OP_END;
+ adjust_recurse(previous, 1, utf8, cd, save_hwm);
+ memmove(previous+1, previous, len);
+ code++;
+ *previous++ = OP_BRAZERO + repeat_type;
+ }
+
+ /* If the maximum is greater than 1 and limited, we have to replicate
+ in a nested fashion, sticking OP_BRAZERO before each set of brackets.
+ The first one has to be handled carefully because it's the original
+ copy, which has to be moved up. The remainder can be handled by code
+ that is common with the non-zero minimum case below. We have to
+ adjust the value or repeat_max, since one less copy is required. Once
+ again, we may have to adjust any OP_RECURSE calls inside the group. */
+
+ else
+ {
+ int offset;
+ *code = OP_END;
+ adjust_recurse(previous, 2 + LINK_SIZE, utf8, cd, save_hwm);
+ memmove(previous + 2 + LINK_SIZE, previous, len);
+ code += 2 + LINK_SIZE;
+ *previous++ = OP_BRAZERO + repeat_type;
+ *previous++ = OP_BRA;
+
+ /* We chain together the bracket offset fields that have to be
+ filled in later when the ends of the brackets are reached. */
+
+ offset = (bralink == NULL)? 0 : previous - bralink;
+ bralink = previous;
+ PUTINC(previous, 0, offset);
+ }
+
+ repeat_max--;
+ }
+
+ /* If the minimum is greater than zero, replicate the group as many
+ times as necessary, and adjust the maximum to the number of subsequent
+ copies that we need. If we set a first char from the group, and didn't
+ set a required char, copy the latter from the former. If there are any
+ forward reference subroutine calls in the group, there will be entries on
+ the workspace list; replicate these with an appropriate increment. */
+
+ else
+ {
+ if (repeat_min > 1)
+ {
+ /* In the pre-compile phase, we don't actually do the replication. We
+ just adjust the length as if we had. Do some paranoid checks for
+ potential integer overflow. */
+
+ if (lengthptr != NULL)
+ {
+ int delta = (repeat_min - 1)*length_prevgroup;
+ if ((double)(repeat_min - 1)*(double)length_prevgroup >
+ (double)INT_MAX ||
+ OFLOW_MAX - *lengthptr < delta)
+ {
+ *errorcodeptr = ERR20;
+ goto FAILED;
+ }
+ *lengthptr += delta;
+ }
+
+ /* This is compiling for real */
+
+ else
+ {
+ if (groupsetfirstbyte && reqbyte < 0) reqbyte = firstbyte;
+ for (i = 1; i < repeat_min; i++)
+ {
+ uschar *hc;
+ uschar *this_hwm = cd->hwm;
+ memcpy(code, previous, len);
+ for (hc = save_hwm; hc < this_hwm; hc += LINK_SIZE)
+ {
+ PUT(cd->hwm, 0, GET(hc, 0) + len);
+ cd->hwm += LINK_SIZE;
+ }
+ save_hwm = this_hwm;
+ code += len;
+ }
+ }
+ }
+
+ if (repeat_max > 0) repeat_max -= repeat_min;
+ }
+
+ /* This code is common to both the zero and non-zero minimum cases. If
+ the maximum is limited, it replicates the group in a nested fashion,
+ remembering the bracket starts on a stack. In the case of a zero minimum,
+ the first one was set up above. In all cases the repeat_max now specifies
+ the number of additional copies needed. Again, we must remember to
+ replicate entries on the forward reference list. */
+
+ if (repeat_max >= 0)
+ {
+ /* In the pre-compile phase, we don't actually do the replication. We
+ just adjust the length as if we had. For each repetition we must add 1
+ to the length for BRAZERO and for all but the last repetition we must
+ add 2 + 2*LINKSIZE to allow for the nesting that occurs. Do some
+ paranoid checks to avoid integer overflow. */
+
+ if (lengthptr != NULL && repeat_max > 0)
+ {
+ int delta = repeat_max * (length_prevgroup + 1 + 2 + 2*LINK_SIZE) -
+ 2 - 2*LINK_SIZE; /* Last one doesn't nest */
+ if ((double)repeat_max *
+ (double)(length_prevgroup + 1 + 2 + 2*LINK_SIZE)
+ > (double)INT_MAX ||
+ OFLOW_MAX - *lengthptr < delta)
+ {
+ *errorcodeptr = ERR20;
+ goto FAILED;
+ }
+ *lengthptr += delta;
+ }
+
+ /* This is compiling for real */
+
+ else for (i = repeat_max - 1; i >= 0; i--)
+ {
+ uschar *hc;
+ uschar *this_hwm = cd->hwm;
+
+ *code++ = OP_BRAZERO + repeat_type;
+
+ /* All but the final copy start a new nesting, maintaining the
+ chain of brackets outstanding. */
+
+ if (i != 0)
+ {
+ int offset;
+ *code++ = OP_BRA;
+ offset = (bralink == NULL)? 0 : code - bralink;
+ bralink = code;
+ PUTINC(code, 0, offset);
+ }
+
+ memcpy(code, previous, len);
+ for (hc = save_hwm; hc < this_hwm; hc += LINK_SIZE)
+ {
+ PUT(cd->hwm, 0, GET(hc, 0) + len + ((i != 0)? 2+LINK_SIZE : 1));
+ cd->hwm += LINK_SIZE;
+ }
+ save_hwm = this_hwm;
+ code += len;
+ }
+
+ /* Now chain through the pending brackets, and fill in their length
+ fields (which are holding the chain links pro tem). */
+
+ while (bralink != NULL)
+ {
+ int oldlinkoffset;
+ int offset = code - bralink + 1;
+ uschar *bra = code - offset;
+ oldlinkoffset = GET(bra, 1);
+ bralink = (oldlinkoffset == 0)? NULL : bralink - oldlinkoffset;
+ *code++ = OP_KET;
+ PUTINC(code, 0, offset);
+ PUT(bra, 1, offset);
+ }
+ }
+
+ /* If the maximum is unlimited, set a repeater in the final copy. We
+ can't just offset backwards from the current code point, because we
+ don't know if there's been an options resetting after the ket. The
+ correct offset was computed above.
+
+ Then, when we are doing the actual compile phase, check to see whether
+ this group is a non-atomic one that could match an empty string. If so,
+ convert the initial operator to the S form (e.g. OP_BRA -> OP_SBRA) so
+ that runtime checking can be done. [This check is also applied to
+ atomic groups at runtime, but in a different way.] */
+
+ else
+ {
+ uschar *ketcode = code - ketoffset;
+ uschar *bracode = ketcode - GET(ketcode, 1);
+ *ketcode = OP_KETRMAX + repeat_type;
+ if (lengthptr == NULL && *bracode != OP_ONCE)
+ {
+ uschar *scode = bracode;
+ do
+ {
+ if (could_be_empty_branch(scode, ketcode, utf8))
+ {
+ *bracode += OP_SBRA - OP_BRA;
+ break;
+ }
+ scode += GET(scode, 1);
+ }
+ while (*scode == OP_ALT);
+ }
+ }
+ }
+
+ /* Else there's some kind of shambles */
+
+ else
+ {
+ *errorcodeptr = ERR11;
+ goto FAILED;
+ }
+
+ /* If the character following a repeat is '+', or if certain optimization
+ tests above succeeded, possessive_quantifier is TRUE. For some of the
+ simpler opcodes, there is an special alternative opcode for this. For
+ anything else, we wrap the entire repeated item inside OP_ONCE brackets.
+ The '+' notation is just syntactic sugar, taken from Sun's Java package,
+ but the special opcodes can optimize it a bit. The repeated item starts at
+ tempcode, not at previous, which might be the first part of a string whose
+ (former) last char we repeated.
+
+ Possessifying an 'exact' quantifier has no effect, so we can ignore it. But
+ an 'upto' may follow. We skip over an 'exact' item, and then test the
+ length of what remains before proceeding. */
+
+ if (possessive_quantifier)
+ {
+ int len;
+ if (*tempcode == OP_EXACT || *tempcode == OP_TYPEEXACT ||
+ *tempcode == OP_NOTEXACT)
+ tempcode += _pcre_OP_lengths[*tempcode];
+ len = code - tempcode;
+ if (len > 0) switch (*tempcode)
+ {
+ case OP_STAR: *tempcode = OP_POSSTAR; break;
+ case OP_PLUS: *tempcode = OP_POSPLUS; break;
+ case OP_QUERY: *tempcode = OP_POSQUERY; break;
+ case OP_UPTO: *tempcode = OP_POSUPTO; break;
+
+ case OP_TYPESTAR: *tempcode = OP_TYPEPOSSTAR; break;
+ case OP_TYPEPLUS: *tempcode = OP_TYPEPOSPLUS; break;
+ case OP_TYPEQUERY: *tempcode = OP_TYPEPOSQUERY; break;
+ case OP_TYPEUPTO: *tempcode = OP_TYPEPOSUPTO; break;
+
+ case OP_NOTSTAR: *tempcode = OP_NOTPOSSTAR; break;
+ case OP_NOTPLUS: *tempcode = OP_NOTPOSPLUS; break;
+ case OP_NOTQUERY: *tempcode = OP_NOTPOSQUERY; break;
+ case OP_NOTUPTO: *tempcode = OP_NOTPOSUPTO; break;
+
+ default:
+ memmove(tempcode + 1+LINK_SIZE, tempcode, len);
+ code += 1 + LINK_SIZE;
+ len += 1 + LINK_SIZE;
+ tempcode[0] = OP_ONCE;
+ *code++ = OP_KET;
+ PUTINC(code, 0, len);
+ PUT(tempcode, 1, len);
+ break;
+ }
+ }
+
+ /* In all case we no longer have a previous item. We also set the
+ "follows varying string" flag for subsequently encountered reqbytes if
+ it isn't already set and we have just passed a varying length item. */
+
+ END_REPEAT:
+ previous = NULL;
+ cd->req_varyopt |= reqvary;
+ break;
+
+
+ /* ===================================================================*/
+ /* Start of nested parenthesized sub-expression, or comment or lookahead or
+ lookbehind or option setting or condition or all the other extended
+ parenthesis forms. */
+
+ case '(':
+ newoptions = options;
+ skipbytes = 0;
+ bravalue = OP_CBRA;
+ save_hwm = cd->hwm;
+ reset_bracount = FALSE;
+
+ /* First deal with various "verbs" that can be introduced by '*'. */
+
+ if (*(++ptr) == '*' && (cd->ctypes[ptr[1]] & ctype_letter) != 0)
+ {
+ int i, namelen;
+ const char *vn = verbnames;
+ const uschar *name = ++ptr;
+ previous = NULL;
+ while ((cd->ctypes[*++ptr] & ctype_letter) != 0);
+ if (*ptr == ':')
+ {
+ *errorcodeptr = ERR59; /* Not supported */
+ goto FAILED;
+ }
+ if (*ptr != ')')
+ {
+ *errorcodeptr = ERR60;
+ goto FAILED;
+ }
+ namelen = ptr - name;
+ for (i = 0; i < verbcount; i++)
+ {
+ if (namelen == verbs[i].len &&
+ strncmp((char *)name, vn, namelen) == 0)
+ {
+ *code = verbs[i].op;
+ if (*code++ == OP_ACCEPT) cd->had_accept = TRUE;
+ break;
+ }
+ vn += verbs[i].len + 1;
+ }
+ if (i < verbcount) continue;
+ *errorcodeptr = ERR60;
+ goto FAILED;
+ }
+
+ /* Deal with the extended parentheses; all are introduced by '?', and the
+ appearance of any of them means that this is not a capturing group. */
+
+ else if (*ptr == '?')
+ {
+ int i, set, unset, namelen;
+ int *optset;
+ const uschar *name;
+ uschar *slot;
+
+ switch (*(++ptr))
+ {
+ case '#': /* Comment; skip to ket */
+ ptr++;
+ while (*ptr != 0 && *ptr != ')') ptr++;
+ if (*ptr == 0)
+ {
+ *errorcodeptr = ERR18;
+ goto FAILED;
+ }
+ continue;
+
+
+ /* ------------------------------------------------------------ */
+ case '|': /* Reset capture count for each branch */
+ reset_bracount = TRUE;
+ /* Fall through */
+
+ /* ------------------------------------------------------------ */
+ case ':': /* Non-capturing bracket */
+ bravalue = OP_BRA;
+ ptr++;
+ break;
+
+
+ /* ------------------------------------------------------------ */
+ case '(':
+ bravalue = OP_COND; /* Conditional group */
+
+ /* A condition can be an assertion, a number (referring to a numbered
+ group), a name (referring to a named group), or 'R', referring to
+ recursion. R<digits> and R&name are also permitted for recursion tests.
+
+ There are several syntaxes for testing a named group: (?(name)) is used
+ by Python; Perl 5.10 onwards uses (?(<name>) or (?('name')).
+
+ There are two unfortunate ambiguities, caused by history. (a) 'R' can
+ be the recursive thing or the name 'R' (and similarly for 'R' followed
+ by digits), and (b) a number could be a name that consists of digits.
+ In both cases, we look for a name first; if not found, we try the other
+ cases. */
+
+ /* For conditions that are assertions, check the syntax, and then exit
+ the switch. This will take control down to where bracketed groups,
+ including assertions, are processed. */
+
+ if (ptr[1] == '?' && (ptr[2] == '=' || ptr[2] == '!' || ptr[2] == '<'))
+ break;
+
+ /* Most other conditions use OP_CREF (a couple change to OP_RREF
+ below), and all need to skip 3 bytes at the start of the group. */
+
+ code[1+LINK_SIZE] = OP_CREF;
+ skipbytes = 3;
+ refsign = -1;
+
+ /* Check for a test for recursion in a named group. */
+
+ if (ptr[1] == 'R' && ptr[2] == '&')
+ {
+ terminator = -1;
+ ptr += 2;
+ code[1+LINK_SIZE] = OP_RREF; /* Change the type of test */
+ }
+
+ /* Check for a test for a named group's having been set, using the Perl
+ syntax (?(<name>) or (?('name') */
+
+ else if (ptr[1] == '<')
+ {
+ terminator = '>';
+ ptr++;
+ }
+ else if (ptr[1] == '\'')
+ {
+ terminator = '\'';
+ ptr++;
+ }
+ else
+ {
+ terminator = 0;
+ if (ptr[1] == '-' || ptr[1] == '+') refsign = *(++ptr);
+ }
+
+ /* We now expect to read a name; any thing else is an error */
+
+ if ((cd->ctypes[ptr[1]] & ctype_word) == 0)
+ {
+ ptr += 1; /* To get the right offset */
+ *errorcodeptr = ERR28;
+ goto FAILED;
+ }
+
+ /* Read the name, but also get it as a number if it's all digits */
+
+ recno = 0;
+ name = ++ptr;
+ while ((cd->ctypes[*ptr] & ctype_word) != 0)
+ {
+ if (recno >= 0)
+ recno = ((digitab[*ptr] & ctype_digit) != 0)?
+ recno * 10 + *ptr - '0' : -1;
+ ptr++;
+ }
+ namelen = ptr - name;
+
+ if ((terminator > 0 && *ptr++ != terminator) || *ptr++ != ')')
+ {
+ ptr--; /* Error offset */
+ *errorcodeptr = ERR26;
+ goto FAILED;
+ }
+
+ /* Do no further checking in the pre-compile phase. */
+
+ if (lengthptr != NULL) break;
+
+ /* In the real compile we do the work of looking for the actual
+ reference. If the string started with "+" or "-" we require the rest to
+ be digits, in which case recno will be set. */
+
+ if (refsign > 0)
+ {
+ if (recno <= 0)
+ {
+ *errorcodeptr = ERR58;
+ goto FAILED;
+ }
+ if (refsign == '-')
+ {
+ recno = cd->bracount - recno + 1;
+ if (recno <= 0)
+ {
+ *errorcodeptr = ERR15;
+ goto FAILED;
+ }
+ }
+ else recno += cd->bracount;
+ PUT2(code, 2+LINK_SIZE, recno);
+ break;
+ }
+
+ /* Otherwise (did not start with "+" or "-"), start by looking for the
+ name. */
+
+ slot = cd->name_table;
+ for (i = 0; i < cd->names_found; i++)
+ {
+ if (strncmp((char *)name, (char *)slot+2, namelen) == 0) break;
+ slot += cd->name_entry_size;
+ }
+
+ /* Found a previous named subpattern */
+
+ if (i < cd->names_found)
+ {
+ recno = GET2(slot, 0);
+ PUT2(code, 2+LINK_SIZE, recno);
+ }
+
+ /* Search the pattern for a forward reference */
+
+ else if ((i = find_parens(ptr, cd->bracount, name, namelen,
+ (options & PCRE_EXTENDED) != 0)) > 0)
+ {
+ PUT2(code, 2+LINK_SIZE, i);
+ }
+
+ /* If terminator == 0 it means that the name followed directly after
+ the opening parenthesis [e.g. (?(abc)...] and in this case there are
+ some further alternatives to try. For the cases where terminator != 0
+ [things like (?(<name>... or (?('name')... or (?(R&name)... ] we have
+ now checked all the possibilities, so give an error. */
+
+ else if (terminator != 0)
+ {
+ *errorcodeptr = ERR15;
+ goto FAILED;
+ }
+
+ /* Check for (?(R) for recursion. Allow digits after R to specify a
+ specific group number. */
+
+ else if (*name == 'R')
+ {
+ recno = 0;
+ for (i = 1; i < namelen; i++)
+ {
+ if ((digitab[name[i]] & ctype_digit) == 0)
+ {
+ *errorcodeptr = ERR15;
+ goto FAILED;
+ }
+ recno = recno * 10 + name[i] - '0';
+ }
+ if (recno == 0) recno = RREF_ANY;
+ code[1+LINK_SIZE] = OP_RREF; /* Change test type */
+ PUT2(code, 2+LINK_SIZE, recno);
+ }
+
+ /* Similarly, check for the (?(DEFINE) "condition", which is always
+ false. */
+
+ else if (namelen == 6 && strncmp((char *)name, "DEFINE", 6) == 0)
+ {
+ code[1+LINK_SIZE] = OP_DEF;
+ skipbytes = 1;
+ }
+
+ /* Check for the "name" actually being a subpattern number. */
+
+ else if (recno > 0)
+ {
+ PUT2(code, 2+LINK_SIZE, recno);
+ }
+
+ /* Either an unidentified subpattern, or a reference to (?(0) */
+
+ else
+ {
+ *errorcodeptr = (recno == 0)? ERR35: ERR15;
+ goto FAILED;
+ }
+ break;
+
+
+ /* ------------------------------------------------------------ */
+ case '=': /* Positive lookahead */
+ bravalue = OP_ASSERT;
+ ptr++;
+ break;
+
+
+ /* ------------------------------------------------------------ */
+ case '!': /* Negative lookahead */
+ ptr++;
+ if (*ptr == ')') /* Optimize (?!) */
+ {
+ *code++ = OP_FAIL;
+ previous = NULL;
+ continue;
+ }
+ bravalue = OP_ASSERT_NOT;
+ break;
+
+
+ /* ------------------------------------------------------------ */
+ case '<': /* Lookbehind or named define */
+ switch (ptr[1])
+ {
+ case '=': /* Positive lookbehind */
+ bravalue = OP_ASSERTBACK;
+ ptr += 2;
+ break;
+
+ case '!': /* Negative lookbehind */
+ bravalue = OP_ASSERTBACK_NOT;
+ ptr += 2;
+ break;
+
+ default: /* Could be name define, else bad */
+ if ((cd->ctypes[ptr[1]] & ctype_word) != 0) goto DEFINE_NAME;
+ ptr++; /* Correct offset for error */
+ *errorcodeptr = ERR24;
+ goto FAILED;
+ }
+ break;
+
+
+ /* ------------------------------------------------------------ */
+ case '>': /* One-time brackets */
+ bravalue = OP_ONCE;
+ ptr++;
+ break;
+
+
+ /* ------------------------------------------------------------ */
+ case 'C': /* Callout - may be followed by digits; */
+ previous_callout = code; /* Save for later completion */
+ after_manual_callout = 1; /* Skip one item before completing */
+ *code++ = OP_CALLOUT;
+ {
+ int n = 0;
+ while ((digitab[*(++ptr)] & ctype_digit) != 0)
+ n = n * 10 + *ptr - '0';
+ if (*ptr != ')')
+ {
+ *errorcodeptr = ERR39;
+ goto FAILED;
+ }
+ if (n > 255)
+ {
+ *errorcodeptr = ERR38;
+ goto FAILED;
+ }
+ *code++ = n;
+ PUT(code, 0, ptr - cd->start_pattern + 1); /* Pattern offset */
+ PUT(code, LINK_SIZE, 0); /* Default length */
+ code += 2 * LINK_SIZE;
+ }
+ previous = NULL;
+ continue;
+
+
+ /* ------------------------------------------------------------ */
+ case 'P': /* Python-style named subpattern handling */
+ if (*(++ptr) == '=' || *ptr == '>') /* Reference or recursion */
+ {
+ is_recurse = *ptr == '>';
+ terminator = ')';
+ goto NAMED_REF_OR_RECURSE;
+ }
+ else if (*ptr != '<') /* Test for Python-style definition */
+ {
+ *errorcodeptr = ERR41;
+ goto FAILED;
+ }
+ /* Fall through to handle (?P< as (?< is handled */
+
+
+ /* ------------------------------------------------------------ */
+ DEFINE_NAME: /* Come here from (?< handling */
+ case '\'':
+ {
+ terminator = (*ptr == '<')? '>' : '\'';
+ name = ++ptr;
+
+ while ((cd->ctypes[*ptr] & ctype_word) != 0) ptr++;
+ namelen = ptr - name;
+
+ /* In the pre-compile phase, just do a syntax check. */
+
+ if (lengthptr != NULL)
+ {
+ if (*ptr != terminator)
+ {
+ *errorcodeptr = ERR42;
+ goto FAILED;
+ }
+ if (cd->names_found >= MAX_NAME_COUNT)
+ {
+ *errorcodeptr = ERR49;
+ goto FAILED;
+ }
+ if (namelen + 3 > cd->name_entry_size)
+ {
+ cd->name_entry_size = namelen + 3;
+ if (namelen > MAX_NAME_SIZE)
+ {
+ *errorcodeptr = ERR48;
+ goto FAILED;
+ }
+ }
+ }
+
+ /* In the real compile, create the entry in the table */
+
+ else
+ {
+ slot = cd->name_table;
+ for (i = 0; i < cd->names_found; i++)
+ {
+ int crc = memcmp(name, slot+2, namelen);
+ if (crc == 0)
+ {
+ if (slot[2+namelen] == 0)
+ {
+ if ((options & PCRE_DUPNAMES) == 0)
+ {
+ *errorcodeptr = ERR43;
+ goto FAILED;
+ }
+ }
+ else crc = -1; /* Current name is substring */
+ }
+ if (crc < 0)
+ {
+ memmove(slot + cd->name_entry_size, slot,
+ (cd->names_found - i) * cd->name_entry_size);
+ break;
+ }
+ slot += cd->name_entry_size;
+ }
+
+ PUT2(slot, 0, cd->bracount + 1);
+ memcpy(slot + 2, name, namelen);
+ slot[2+namelen] = 0;
+ }
+ }
+
+ /* In both cases, count the number of names we've encountered. */
+
+ ptr++; /* Move past > or ' */
+ cd->names_found++;
+ goto NUMBERED_GROUP;
+
+
+ /* ------------------------------------------------------------ */
+ case '&': /* Perl recursion/subroutine syntax */
+ terminator = ')';
+ is_recurse = TRUE;
+ /* Fall through */
+
+ /* We come here from the Python syntax above that handles both
+ references (?P=name) and recursion (?P>name), as well as falling
+ through from the Perl recursion syntax (?&name). */
+
+ NAMED_REF_OR_RECURSE:
+ name = ++ptr;
+ while ((cd->ctypes[*ptr] & ctype_word) != 0) ptr++;
+ namelen = ptr - name;
+
+ /* In the pre-compile phase, do a syntax check and set a dummy
+ reference number. */
+
+ if (lengthptr != NULL)
+ {
+ if (*ptr != terminator)
+ {
+ *errorcodeptr = ERR42;
+ goto FAILED;
+ }
+ if (namelen > MAX_NAME_SIZE)
+ {
+ *errorcodeptr = ERR48;
+ goto FAILED;
+ }
+ recno = 0;
+ }
+
+ /* In the real compile, seek the name in the table */
+
+ else
+ {
+ slot = cd->name_table;
+ for (i = 0; i < cd->names_found; i++)
+ {
+ if (strncmp((char *)name, (char *)slot+2, namelen) == 0) break;
+ slot += cd->name_entry_size;
+ }
+
+ if (i < cd->names_found) /* Back reference */
+ {
+ recno = GET2(slot, 0);
+ }
+ else if ((recno = /* Forward back reference */
+ find_parens(ptr, cd->bracount, name, namelen,
+ (options & PCRE_EXTENDED) != 0)) <= 0)
+ {
+ *errorcodeptr = ERR15;
+ goto FAILED;
+ }
+ }
+
+ /* In both phases, we can now go to the code than handles numerical
+ recursion or backreferences. */
+
+ if (is_recurse) goto HANDLE_RECURSION;
+ else goto HANDLE_REFERENCE;
+
+
+ /* ------------------------------------------------------------ */
+ case 'R': /* Recursion */
+ ptr++; /* Same as (?0) */
+ /* Fall through */
+
+
+ /* ------------------------------------------------------------ */
+ case '-': case '+':
+ case '0': case '1': case '2': case '3': case '4': /* Recursion or */
+ case '5': case '6': case '7': case '8': case '9': /* subroutine */
+ {
+ const uschar *called;
+
+ if ((refsign = *ptr) == '+') ptr++;
+ else if (refsign == '-')
+ {
+ if ((digitab[ptr[1]] & ctype_digit) == 0)
+ goto OTHER_CHAR_AFTER_QUERY;
+ ptr++;
+ }
+
+ recno = 0;
+ while((digitab[*ptr] & ctype_digit) != 0)
+ recno = recno * 10 + *ptr++ - '0';
+
+ if (*ptr != ')')
+ {
+ *errorcodeptr = ERR29;
+ goto FAILED;
+ }
+
+ if (refsign == '-')
+ {
+ if (recno == 0)
+ {
+ *errorcodeptr = ERR58;
+ goto FAILED;
+ }
+ recno = cd->bracount - recno + 1;
+ if (recno <= 0)
+ {
+ *errorcodeptr = ERR15;
+ goto FAILED;
+ }
+ }
+ else if (refsign == '+')
+ {
+ if (recno == 0)
+ {
+ *errorcodeptr = ERR58;
+ goto FAILED;
+ }
+ recno += cd->bracount;
+ }
+
+ /* Come here from code above that handles a named recursion */
+
+ HANDLE_RECURSION:
+
+ previous = code;
+ called = cd->start_code;
+
+ /* When we are actually compiling, find the bracket that is being
+ referenced. Temporarily end the regex in case it doesn't exist before
+ this point. If we end up with a forward reference, first check that
+ the bracket does occur later so we can give the error (and position)
+ now. Then remember this forward reference in the workspace so it can
+ be filled in at the end. */
+
+ if (lengthptr == NULL)
+ {
+ *code = OP_END;
+ if (recno != 0) called = find_bracket(cd->start_code, utf8, recno);
+
+ /* Forward reference */
+
+ if (called == NULL)
+ {
+ if (find_parens(ptr, cd->bracount, NULL, recno,
+ (options & PCRE_EXTENDED) != 0) < 0)
+ {
+ *errorcodeptr = ERR15;
+ goto FAILED;
+ }
+ called = cd->start_code + recno;
+ PUTINC(cd->hwm, 0, code + 2 + LINK_SIZE - cd->start_code);
+ }
+
+ /* If not a forward reference, and the subpattern is still open,
+ this is a recursive call. We check to see if this is a left
+ recursion that could loop for ever, and diagnose that case. */
+
+ else if (GET(called, 1) == 0 &&
+ could_be_empty(called, code, bcptr, utf8))
+ {
+ *errorcodeptr = ERR40;
+ goto FAILED;
+ }
+ }
+
+ /* Insert the recursion/subroutine item, automatically wrapped inside
+ "once" brackets. Set up a "previous group" length so that a
+ subsequent quantifier will work. */
+
+ *code = OP_ONCE;
+ PUT(code, 1, 2 + 2*LINK_SIZE);
+ code += 1 + LINK_SIZE;
+
+ *code = OP_RECURSE;
+ PUT(code, 1, called - cd->start_code);
+ code += 1 + LINK_SIZE;
+
+ *code = OP_KET;
+ PUT(code, 1, 2 + 2*LINK_SIZE);
+ code += 1 + LINK_SIZE;
+
+ length_prevgroup = 3 + 3*LINK_SIZE;
+ }
+
+ /* Can't determine a first byte now */
+
+ if (firstbyte == REQ_UNSET) firstbyte = REQ_NONE;
+ continue;
+
+
+ /* ------------------------------------------------------------ */
+ default: /* Other characters: check option setting */
+ OTHER_CHAR_AFTER_QUERY:
+ set = unset = 0;
+ optset = &set;
+
+ while (*ptr != ')' && *ptr != ':')
+ {
+ switch (*ptr++)
+ {
+ case '-': optset = &unset; break;
+
+ case 'J': /* Record that it changed in the external options */
+ *optset |= PCRE_DUPNAMES;
+ cd->external_flags |= PCRE_JCHANGED;
+ break;
+
+ case 'i': *optset |= PCRE_CASELESS; break;
+ case 'm': *optset |= PCRE_MULTILINE; break;
+ case 's': *optset |= PCRE_DOTALL; break;
+ case 'x': *optset |= PCRE_EXTENDED; break;
+ case 'U': *optset |= PCRE_UNGREEDY; break;
+ case 'X': *optset |= PCRE_EXTRA; break;
+
+ default: *errorcodeptr = ERR12;
+ ptr--; /* Correct the offset */
+ goto FAILED;
+ }
+ }
+
+ /* Set up the changed option bits, but don't change anything yet. */
+
+ newoptions = (options | set) & (~unset);
+
+ /* If the options ended with ')' this is not the start of a nested
+ group with option changes, so the options change at this level. If this
+ item is right at the start of the pattern, the options can be
+ abstracted and made external in the pre-compile phase, and ignored in
+ the compile phase. This can be helpful when matching -- for instance in
+ caseless checking of required bytes.
+
+ If the code pointer is not (cd->start_code + 1 + LINK_SIZE), we are
+ definitely *not* at the start of the pattern because something has been
+ compiled. In the pre-compile phase, however, the code pointer can have
+ that value after the start, because it gets reset as code is discarded
+ during the pre-compile. However, this can happen only at top level - if
+ we are within parentheses, the starting BRA will still be present. At
+ any parenthesis level, the length value can be used to test if anything
+ has been compiled at that level. Thus, a test for both these conditions
+ is necessary to ensure we correctly detect the start of the pattern in
+ both phases.
+
+ If we are not at the pattern start, compile code to change the ims
+ options if this setting actually changes any of them. We also pass the
+ new setting back so that it can be put at the start of any following
+ branches, and when this group ends (if we are in a group), a resetting
+ item can be compiled. */
+
+ if (*ptr == ')')
+ {
+ if (code == cd->start_code + 1 + LINK_SIZE &&
+ (lengthptr == NULL || *lengthptr == 2 + 2*LINK_SIZE))
+ {
+ cd->external_options = newoptions;
+ options = newoptions;
+ }
+ else
+ {
+ if ((options & PCRE_IMS) != (newoptions & PCRE_IMS))
+ {
+ *code++ = OP_OPT;
+ *code++ = newoptions & PCRE_IMS;
+ }
+
+ /* Change options at this level, and pass them back for use
+ in subsequent branches. Reset the greedy defaults and the case
+ value for firstbyte and reqbyte. */
+
+ *optionsptr = options = newoptions;
+ greedy_default = ((newoptions & PCRE_UNGREEDY) != 0);
+ greedy_non_default = greedy_default ^ 1;
+ req_caseopt = ((options & PCRE_CASELESS) != 0)? REQ_CASELESS : 0;
+ }
+
+ previous = NULL; /* This item can't be repeated */
+ continue; /* It is complete */
+ }
+
+ /* If the options ended with ':' we are heading into a nested group
+ with possible change of options. Such groups are non-capturing and are
+ not assertions of any kind. All we need to do is skip over the ':';
+ the newoptions value is handled below. */
+
+ bravalue = OP_BRA;
+ ptr++;
+ } /* End of switch for character following (? */
+ } /* End of (? handling */
+
+ /* Opening parenthesis not followed by '?'. If PCRE_NO_AUTO_CAPTURE is set,
+ all unadorned brackets become non-capturing and behave like (?:...)
+ brackets. */
+
+ else if ((options & PCRE_NO_AUTO_CAPTURE) != 0)
+ {
+ bravalue = OP_BRA;
+ }
+
+ /* Else we have a capturing group. */
+
+ else
+ {
+ NUMBERED_GROUP:
+ cd->bracount += 1;
+ PUT2(code, 1+LINK_SIZE, cd->bracount);
+ skipbytes = 2;
+ }
+
+ /* Process nested bracketed regex. Assertions may not be repeated, but
+ other kinds can be. All their opcodes are >= OP_ONCE. We copy code into a
+ non-register variable in order to be able to pass its address because some
+ compilers complain otherwise. Pass in a new setting for the ims options if
+ they have changed. */
+
+ previous = (bravalue >= OP_ONCE)? code : NULL;
+ *code = bravalue;
+ tempcode = code;
+ tempreqvary = cd->req_varyopt; /* Save value before bracket */
+ length_prevgroup = 0; /* Initialize for pre-compile phase */
+
+ if (!compile_regex(
+ newoptions, /* The complete new option state */
+ options & PCRE_IMS, /* The previous ims option state */
+ &tempcode, /* Where to put code (updated) */
+ &ptr, /* Input pointer (updated) */
+ errorcodeptr, /* Where to put an error message */
+ (bravalue == OP_ASSERTBACK ||
+ bravalue == OP_ASSERTBACK_NOT), /* TRUE if back assert */
+ reset_bracount, /* True if (?| group */
+ skipbytes, /* Skip over bracket number */
+ &subfirstbyte, /* For possible first char */
+ &subreqbyte, /* For possible last char */
+ bcptr, /* Current branch chain */
+ cd, /* Tables block */
+ (lengthptr == NULL)? NULL : /* Actual compile phase */
+ &length_prevgroup /* Pre-compile phase */
+ ))
+ goto FAILED;
+
+ /* At the end of compiling, code is still pointing to the start of the
+ group, while tempcode has been updated to point past the end of the group
+ and any option resetting that may follow it. The pattern pointer (ptr)
+ is on the bracket. */
+
+ /* If this is a conditional bracket, check that there are no more than
+ two branches in the group, or just one if it's a DEFINE group. We do this
+ in the real compile phase, not in the pre-pass, where the whole group may
+ not be available. */
+
+ if (bravalue == OP_COND && lengthptr == NULL)
+ {
+ uschar *tc = code;
+ int condcount = 0;
+
+ do {
+ condcount++;
+ tc += GET(tc,1);
+ }
+ while (*tc != OP_KET);
+
+ /* A DEFINE group is never obeyed inline (the "condition" is always
+ false). It must have only one branch. */
+
+ if (code[LINK_SIZE+1] == OP_DEF)
+ {
+ if (condcount > 1)
+ {
+ *errorcodeptr = ERR54;
+ goto FAILED;
+ }
+ bravalue = OP_DEF; /* Just a flag to suppress char handling below */
+ }
+
+ /* A "normal" conditional group. If there is just one branch, we must not
+ make use of its firstbyte or reqbyte, because this is equivalent to an
+ empty second branch. */
+
+ else
+ {
+ if (condcount > 2)
+ {
+ *errorcodeptr = ERR27;
+ goto FAILED;
+ }
+ if (condcount == 1) subfirstbyte = subreqbyte = REQ_NONE;
+ }
+ }
+
+ /* Error if hit end of pattern */
+
+ if (*ptr != ')')
+ {
+ *errorcodeptr = ERR14;
+ goto FAILED;
+ }
+
+ /* In the pre-compile phase, update the length by the length of the group,
+ less the brackets at either end. Then reduce the compiled code to just a
+ set of non-capturing brackets so that it doesn't use much memory if it is
+ duplicated by a quantifier.*/
+
+ if (lengthptr != NULL)
+ {
+ if (OFLOW_MAX - *lengthptr < length_prevgroup - 2 - 2*LINK_SIZE)
+ {
+ *errorcodeptr = ERR20;
+ goto FAILED;
+ }
+ *lengthptr += length_prevgroup - 2 - 2*LINK_SIZE;
+ *code++ = OP_BRA;
+ PUTINC(code, 0, 1 + LINK_SIZE);
+ *code++ = OP_KET;
+ PUTINC(code, 0, 1 + LINK_SIZE);
+ break; /* No need to waste time with special character handling */
+ }
+
+ /* Otherwise update the main code pointer to the end of the group. */
+
+ code = tempcode;
+
+ /* For a DEFINE group, required and first character settings are not
+ relevant. */
+
+ if (bravalue == OP_DEF) break;
+
+ /* Handle updating of the required and first characters for other types of
+ group. Update for normal brackets of all kinds, and conditions with two
+ branches (see code above). If the bracket is followed by a quantifier with
+ zero repeat, we have to back off. Hence the definition of zeroreqbyte and
+ zerofirstbyte outside the main loop so that they can be accessed for the
+ back off. */
+
+ zeroreqbyte = reqbyte;
+ zerofirstbyte = firstbyte;
+ groupsetfirstbyte = FALSE;
+
+ if (bravalue >= OP_ONCE)
+ {
+ /* If we have not yet set a firstbyte in this branch, take it from the
+ subpattern, remembering that it was set here so that a repeat of more
+ than one can replicate it as reqbyte if necessary. If the subpattern has
+ no firstbyte, set "none" for the whole branch. In both cases, a zero
+ repeat forces firstbyte to "none". */
+
+ if (firstbyte == REQ_UNSET)
+ {
+ if (subfirstbyte >= 0)
+ {
+ firstbyte = subfirstbyte;
+ groupsetfirstbyte = TRUE;
+ }
+ else firstbyte = REQ_NONE;
+ zerofirstbyte = REQ_NONE;
+ }
+
+ /* If firstbyte was previously set, convert the subpattern's firstbyte
+ into reqbyte if there wasn't one, using the vary flag that was in
+ existence beforehand. */
+
+ else if (subfirstbyte >= 0 && subreqbyte < 0)
+ subreqbyte = subfirstbyte | tempreqvary;
+
+ /* If the subpattern set a required byte (or set a first byte that isn't
+ really the first byte - see above), set it. */
+
+ if (subreqbyte >= 0) reqbyte = subreqbyte;
+ }
+
+ /* For a forward assertion, we take the reqbyte, if set. This can be
+ helpful if the pattern that follows the assertion doesn't set a different
+ char. For example, it's useful for /(?=abcde).+/. We can't set firstbyte
+ for an assertion, however because it leads to incorrect effect for patterns
+ such as /(?=a)a.+/ when the "real" "a" would then become a reqbyte instead
+ of a firstbyte. This is overcome by a scan at the end if there's no
+ firstbyte, looking for an asserted first char. */
+
+ else if (bravalue == OP_ASSERT && subreqbyte >= 0) reqbyte = subreqbyte;
+ break; /* End of processing '(' */
+
+
+ /* ===================================================================*/
+ /* Handle metasequences introduced by \. For ones like \d, the ESC_ values
+ are arranged to be the negation of the corresponding OP_values. For the
+ back references, the values are ESC_REF plus the reference number. Only
+ back references and those types that consume a character may be repeated.
+ We can test for values between ESC_b and ESC_Z for the latter; this may
+ have to change if any new ones are ever created. */
+
+ case '\\':
+ tempptr = ptr;
+ c = check_escape(&ptr, errorcodeptr, cd->bracount, options, FALSE);
+ if (*errorcodeptr != 0) goto FAILED;
+
+ if (c < 0)
+ {
+ if (-c == ESC_Q) /* Handle start of quoted string */
+ {
+ if (ptr[1] == '\\' && ptr[2] == 'E') ptr += 2; /* avoid empty string */
+ else inescq = TRUE;
+ continue;
+ }
+
+ if (-c == ESC_E) continue; /* Perl ignores an orphan \E */
+
+ /* For metasequences that actually match a character, we disable the
+ setting of a first character if it hasn't already been set. */
+
+ if (firstbyte == REQ_UNSET && -c > ESC_b && -c < ESC_Z)
+ firstbyte = REQ_NONE;
+
+ /* Set values to reset to if this is followed by a zero repeat. */
+
+ zerofirstbyte = firstbyte;
+ zeroreqbyte = reqbyte;
+
+ /* \k<name> or \k'name' is a back reference by name (Perl syntax).
+ We also support \k{name} (.NET syntax) */
+
+ if (-c == ESC_k && (ptr[1] == '<' || ptr[1] == '\'' || ptr[1] == '{'))
+ {
+ is_recurse = FALSE;
+ terminator = (*(++ptr) == '<')? '>' : (*ptr == '\'')? '\'' : '}';
+ goto NAMED_REF_OR_RECURSE;
+ }
+
+ /* Back references are handled specially; must disable firstbyte if
+ not set to cope with cases like (?=(\w+))\1: which would otherwise set
+ ':' later. */
+
+ if (-c >= ESC_REF)
+ {
+ recno = -c - ESC_REF;
+
+ HANDLE_REFERENCE: /* Come here from named backref handling */
+ if (firstbyte == REQ_UNSET) firstbyte = REQ_NONE;
+ previous = code;
+ *code++ = OP_REF;
+ PUT2INC(code, 0, recno);
+ cd->backref_map |= (recno < 32)? (1 << recno) : 1;
+ if (recno > cd->top_backref) cd->top_backref = recno;
+ }
+
+ /* So are Unicode property matches, if supported. */
+
+#ifdef SUPPORT_UCP
+ else if (-c == ESC_P || -c == ESC_p)
+ {
+ BOOL negated;
+ int pdata;
+ int ptype = get_ucp(&ptr, &negated, &pdata, errorcodeptr);
+ if (ptype < 0) goto FAILED;
+ previous = code;
+ *code++ = ((-c == ESC_p) != negated)? OP_PROP : OP_NOTPROP;
+ *code++ = ptype;
+ *code++ = pdata;
+ }
+#else
+
+ /* If Unicode properties are not supported, \X, \P, and \p are not
+ allowed. */
+
+ else if (-c == ESC_X || -c == ESC_P || -c == ESC_p)
+ {
+ *errorcodeptr = ERR45;
+ goto FAILED;
+ }
+#endif
+
+ /* For the rest (including \X when Unicode properties are supported), we
+ can obtain the OP value by negating the escape value. */
+
+ else
+ {
+ previous = (-c > ESC_b && -c < ESC_Z)? code : NULL;
+ *code++ = -c;
+ }
+ continue;
+ }
+
+ /* We have a data character whose value is in c. In UTF-8 mode it may have
+ a value > 127. We set its representation in the length/buffer, and then
+ handle it as a data character. */
+
+#ifdef SUPPORT_UTF8
+ if (utf8 && c > 127)
+ mclength = _pcre_ord2utf8(c, mcbuffer);
+ else
+#endif
+
+ {
+ mcbuffer[0] = c;
+ mclength = 1;
+ }
+ goto ONE_CHAR;
+
+
+ /* ===================================================================*/
+ /* Handle a literal character. It is guaranteed not to be whitespace or #
+ when the extended flag is set. If we are in UTF-8 mode, it may be a
+ multi-byte literal character. */
+
+ default:
+ NORMAL_CHAR:
+ mclength = 1;
+ mcbuffer[0] = c;
+
+#ifdef SUPPORT_UTF8
+ if (utf8 && c >= 0xc0)
+ {
+ while ((ptr[1] & 0xc0) == 0x80)
+ mcbuffer[mclength++] = *(++ptr);
+ }
+#endif
+
+ /* At this point we have the character's bytes in mcbuffer, and the length
+ in mclength. When not in UTF-8 mode, the length is always 1. */
+
+ ONE_CHAR:
+ previous = code;
+ *code++ = ((options & PCRE_CASELESS) != 0)? OP_CHARNC : OP_CHAR;
+ for (c = 0; c < mclength; c++) *code++ = mcbuffer[c];
+
+ /* Remember if \r or \n were seen */
+
+ if (mcbuffer[0] == '\r' || mcbuffer[0] == '\n')
+ cd->external_flags |= PCRE_HASCRORLF;
+
+ /* Set the first and required bytes appropriately. If no previous first
+ byte, set it from this character, but revert to none on a zero repeat.
+ Otherwise, leave the firstbyte value alone, and don't change it on a zero
+ repeat. */
+
+ if (firstbyte == REQ_UNSET)
+ {
+ zerofirstbyte = REQ_NONE;
+ zeroreqbyte = reqbyte;
+
+ /* If the character is more than one byte long, we can set firstbyte
+ only if it is not to be matched caselessly. */
+
+ if (mclength == 1 || req_caseopt == 0)
+ {
+ firstbyte = mcbuffer[0] | req_caseopt;
+ if (mclength != 1) reqbyte = code[-1] | cd->req_varyopt;
+ }
+ else firstbyte = reqbyte = REQ_NONE;
+ }
+
+ /* firstbyte was previously set; we can set reqbyte only the length is
+ 1 or the matching is caseful. */
+
+ else
+ {
+ zerofirstbyte = firstbyte;
+ zeroreqbyte = reqbyte;
+ if (mclength == 1 || req_caseopt == 0)
+ reqbyte = code[-1] | req_caseopt | cd->req_varyopt;
+ }
+
+ break; /* End of literal character handling */
+ }
+ } /* end of big loop */
+
+
+/* Control never reaches here by falling through, only by a goto for all the
+error states. Pass back the position in the pattern so that it can be displayed
+to the user for diagnosing the error. */
+
+FAILED:
+*ptrptr = ptr;
+return FALSE;
+}
+
+
+
+
+/*************************************************
+* Compile sequence of alternatives *
+*************************************************/
+
+/* On entry, ptr is pointing past the bracket character, but on return it
+points to the closing bracket, or vertical bar, or end of string. The code
+variable is pointing at the byte into which the BRA operator has been stored.
+If the ims options are changed at the start (for a (?ims: group) or during any
+branch, we need to insert an OP_OPT item at the start of every following branch
+to ensure they get set correctly at run time, and also pass the new options
+into every subsequent branch compile.
+
+This function is used during the pre-compile phase when we are trying to find
+out the amount of memory needed, as well as during the real compile phase. The
+value of lengthptr distinguishes the two phases.
+
+Arguments:
+ options option bits, including any changes for this subpattern
+ oldims previous settings of ims option bits
+ codeptr -> the address of the current code pointer
+ ptrptr -> the address of the current pattern pointer
+ errorcodeptr -> pointer to error code variable
+ lookbehind TRUE if this is a lookbehind assertion
+ reset_bracount TRUE to reset the count for each branch
+ skipbytes skip this many bytes at start (for brackets and OP_COND)
+ firstbyteptr place to put the first required character, or a negative number
+ reqbyteptr place to put the last required character, or a negative number
+ bcptr pointer to the chain of currently open branches
+ cd points to the data block with tables pointers etc.
+ lengthptr NULL during the real compile phase
+ points to length accumulator during pre-compile phase
+
+Returns: TRUE on success
+*/
+
+static BOOL
+compile_regex(int options, int oldims, uschar **codeptr, const uschar **ptrptr,
+ int *errorcodeptr, BOOL lookbehind, BOOL reset_bracount, int skipbytes,
+ int *firstbyteptr, int *reqbyteptr, branch_chain *bcptr, compile_data *cd,
+ int *lengthptr)
+{
+const uschar *ptr = *ptrptr;
+uschar *code = *codeptr;
+uschar *last_branch = code;
+uschar *start_bracket = code;
+uschar *reverse_count = NULL;
+int firstbyte, reqbyte;
+int branchfirstbyte, branchreqbyte;
+int length;
+int orig_bracount;
+int max_bracount;
+branch_chain bc;
+
+bc.outer = bcptr;
+bc.current = code;
+
+firstbyte = reqbyte = REQ_UNSET;
+
+/* Accumulate the length for use in the pre-compile phase. Start with the
+length of the BRA and KET and any extra bytes that are required at the
+beginning. We accumulate in a local variable to save frequent testing of
+lenthptr for NULL. We cannot do this by looking at the value of code at the
+start and end of each alternative, because compiled items are discarded during
+the pre-compile phase so that the work space is not exceeded. */
+
+length = 2 + 2*LINK_SIZE + skipbytes;
+
+/* WARNING: If the above line is changed for any reason, you must also change
+the code that abstracts option settings at the start of the pattern and makes
+them global. It tests the value of length for (2 + 2*LINK_SIZE) in the
+pre-compile phase to find out whether anything has yet been compiled or not. */
+
+/* Offset is set zero to mark that this bracket is still open */
+
+PUT(code, 1, 0);
+code += 1 + LINK_SIZE + skipbytes;
+
+/* Loop for each alternative branch */
+
+orig_bracount = max_bracount = cd->bracount;
+for (;;)
+ {
+ /* For a (?| group, reset the capturing bracket count so that each branch
+ uses the same numbers. */
+
+ if (reset_bracount) cd->bracount = orig_bracount;
+
+ /* Handle a change of ims options at the start of the branch */
+
+ if ((options & PCRE_IMS) != oldims)
+ {
+ *code++ = OP_OPT;
+ *code++ = options & PCRE_IMS;
+ length += 2;
+ }
+
+ /* Set up dummy OP_REVERSE if lookbehind assertion */
+
+ if (lookbehind)
+ {
+ *code++ = OP_REVERSE;
+ reverse_count = code;
+ PUTINC(code, 0, 0);
+ length += 1 + LINK_SIZE;
+ }
+
+ /* Now compile the branch; in the pre-compile phase its length gets added
+ into the length. */
+
+ if (!compile_branch(&options, &code, &ptr, errorcodeptr, &branchfirstbyte,
+ &branchreqbyte, &bc, cd, (lengthptr == NULL)? NULL : &length))
+ {
+ *ptrptr = ptr;
+ return FALSE;
+ }
+
+ /* Keep the highest bracket count in case (?| was used and some branch
+ has fewer than the rest. */
+
+ if (cd->bracount > max_bracount) max_bracount = cd->bracount;
+
+ /* In the real compile phase, there is some post-processing to be done. */
+
+ if (lengthptr == NULL)
+ {
+ /* If this is the first branch, the firstbyte and reqbyte values for the
+ branch become the values for the regex. */
+
+ if (*last_branch != OP_ALT)
+ {
+ firstbyte = branchfirstbyte;
+ reqbyte = branchreqbyte;
+ }
+
+ /* If this is not the first branch, the first char and reqbyte have to
+ match the values from all the previous branches, except that if the
+ previous value for reqbyte didn't have REQ_VARY set, it can still match,
+ and we set REQ_VARY for the regex. */
+
+ else
+ {
+ /* If we previously had a firstbyte, but it doesn't match the new branch,
+ we have to abandon the firstbyte for the regex, but if there was
+ previously no reqbyte, it takes on the value of the old firstbyte. */
+
+ if (firstbyte >= 0 && firstbyte != branchfirstbyte)
+ {
+ if (reqbyte < 0) reqbyte = firstbyte;
+ firstbyte = REQ_NONE;
+ }
+
+ /* If we (now or from before) have no firstbyte, a firstbyte from the
+ branch becomes a reqbyte if there isn't a branch reqbyte. */
+
+ if (firstbyte < 0 && branchfirstbyte >= 0 && branchreqbyte < 0)
+ branchreqbyte = branchfirstbyte;
+
+ /* Now ensure that the reqbytes match */
+
+ if ((reqbyte & ~REQ_VARY) != (branchreqbyte & ~REQ_VARY))
+ reqbyte = REQ_NONE;
+ else reqbyte |= branchreqbyte; /* To "or" REQ_VARY */
+ }
+
+ /* If lookbehind, check that this branch matches a fixed-length string, and
+ put the length into the OP_REVERSE item. Temporarily mark the end of the
+ branch with OP_END. */
+
+ if (lookbehind)
+ {
+ int fixed_length;
+ *code = OP_END;
+ fixed_length = find_fixedlength(last_branch, options);
+ DPRINTF(("fixed length = %d\n", fixed_length));
+ if (fixed_length < 0)
+ {
+ *errorcodeptr = (fixed_length == -2)? ERR36 : ERR25;
+ *ptrptr = ptr;
+ return FALSE;
+ }
+ PUT(reverse_count, 0, fixed_length);
+ }
+ }
+
+ /* Reached end of expression, either ')' or end of pattern. In the real
+ compile phase, go back through the alternative branches and reverse the chain
+ of offsets, with the field in the BRA item now becoming an offset to the
+ first alternative. If there are no alternatives, it points to the end of the
+ group. The length in the terminating ket is always the length of the whole
+ bracketed item. If any of the ims options were changed inside the group,
+ compile a resetting op-code following, except at the very end of the pattern.
+ Return leaving the pointer at the terminating char. */
+
+ if (*ptr != '|')
+ {
+ if (lengthptr == NULL)
+ {
+ int branch_length = code - last_branch;
+ do
+ {
+ int prev_length = GET(last_branch, 1);
+ PUT(last_branch, 1, branch_length);
+ branch_length = prev_length;
+ last_branch -= branch_length;
+ }
+ while (branch_length > 0);
+ }
+
+ /* Fill in the ket */
+
+ *code = OP_KET;
+ PUT(code, 1, code - start_bracket);
+ code += 1 + LINK_SIZE;
+
+ /* Resetting option if needed */
+
+ if ((options & PCRE_IMS) != oldims && *ptr == ')')
+ {
+ *code++ = OP_OPT;
+ *code++ = oldims;
+ length += 2;
+ }
+
+ /* Retain the highest bracket number, in case resetting was used. */
+
+ cd->bracount = max_bracount;
+
+ /* Set values to pass back */
+
+ *codeptr = code;
+ *ptrptr = ptr;
+ *firstbyteptr = firstbyte;
+ *reqbyteptr = reqbyte;
+ if (lengthptr != NULL)
+ {
+ if (OFLOW_MAX - *lengthptr < length)
+ {
+ *errorcodeptr = ERR20;
+ return FALSE;
+ }
+ *lengthptr += length;
+ }
+ return TRUE;
+ }
+
+ /* Another branch follows. In the pre-compile phase, we can move the code
+ pointer back to where it was for the start of the first branch. (That is,
+ pretend that each branch is the only one.)
+
+ In the real compile phase, insert an ALT node. Its length field points back
+ to the previous branch while the bracket remains open. At the end the chain
+ is reversed. It's done like this so that the start of the bracket has a
+ zero offset until it is closed, making it possible to detect recursion. */
+
+ if (lengthptr != NULL)
+ {
+ code = *codeptr + 1 + LINK_SIZE + skipbytes;
+ length += 1 + LINK_SIZE;
+ }
+ else
+ {
+ *code = OP_ALT;
+ PUT(code, 1, code - last_branch);
+ bc.current = last_branch = code;
+ code += 1 + LINK_SIZE;
+ }
+
+ ptr++;
+ }
+/* Control never reaches here */
+}
+
+
+
+
+/*************************************************
+* Check for anchored expression *
+*************************************************/
+
+/* Try to find out if this is an anchored regular expression. Consider each
+alternative branch. If they all start with OP_SOD or OP_CIRC, or with a bracket
+all of whose alternatives start with OP_SOD or OP_CIRC (recurse ad lib), then
+it's anchored. However, if this is a multiline pattern, then only OP_SOD
+counts, since OP_CIRC can match in the middle.
+
+We can also consider a regex to be anchored if OP_SOM starts all its branches.
+This is the code for \G, which means "match at start of match position, taking
+into account the match offset".
+
+A branch is also implicitly anchored if it starts with .* and DOTALL is set,
+because that will try the rest of the pattern at all possible matching points,
+so there is no point trying again.... er ....
+
+.... except when the .* appears inside capturing parentheses, and there is a
+subsequent back reference to those parentheses. We haven't enough information
+to catch that case precisely.
+
+At first, the best we could do was to detect when .* was in capturing brackets
+and the highest back reference was greater than or equal to that level.
+However, by keeping a bitmap of the first 31 back references, we can catch some
+of the more common cases more precisely.
+
+Arguments:
+ code points to start of expression (the bracket)
+ options points to the options setting
+ bracket_map a bitmap of which brackets we are inside while testing; this
+ handles up to substring 31; after that we just have to take
+ the less precise approach
+ backref_map the back reference bitmap
+
+Returns: TRUE or FALSE
+*/
+
+static BOOL
+is_anchored(register const uschar *code, int *options, unsigned int bracket_map,
+ unsigned int backref_map)
+{
+do {
+ const uschar *scode = first_significant_code(code + _pcre_OP_lengths[*code],
+ options, PCRE_MULTILINE, FALSE);
+ register int op = *scode;
+
+ /* Non-capturing brackets */
+
+ if (op == OP_BRA)
+ {
+ if (!is_anchored(scode, options, bracket_map, backref_map)) return FALSE;
+ }
+
+ /* Capturing brackets */
+
+ else if (op == OP_CBRA)
+ {
+ int n = GET2(scode, 1+LINK_SIZE);
+ int new_map = bracket_map | ((n < 32)? (1 << n) : 1);
+ if (!is_anchored(scode, options, new_map, backref_map)) return FALSE;
+ }
+
+ /* Other brackets */
+
+ else if (op == OP_ASSERT || op == OP_ONCE || op == OP_COND)
+ {
+ if (!is_anchored(scode, options, bracket_map, backref_map)) return FALSE;
+ }
+
+ /* .* is not anchored unless DOTALL is set and it isn't in brackets that
+ are or may be referenced. */
+
+ else if ((op == OP_TYPESTAR || op == OP_TYPEMINSTAR ||
+ op == OP_TYPEPOSSTAR) &&
+ (*options & PCRE_DOTALL) != 0)
+ {
+ if (scode[1] != OP_ANY || (bracket_map & backref_map) != 0) return FALSE;
+ }
+
+ /* Check for explicit anchoring */
+
+ else if (op != OP_SOD && op != OP_SOM &&
+ ((*options & PCRE_MULTILINE) != 0 || op != OP_CIRC))
+ return FALSE;
+ code += GET(code, 1);
+ }
+while (*code == OP_ALT); /* Loop for each alternative */
+return TRUE;
+}
+
+
+
+/*************************************************
+* Check for starting with ^ or .* *
+*************************************************/
+
+/* This is called to find out if every branch starts with ^ or .* so that
+"first char" processing can be done to speed things up in multiline
+matching and for non-DOTALL patterns that start with .* (which must start at
+the beginning or after \n). As in the case of is_anchored() (see above), we
+have to take account of back references to capturing brackets that contain .*
+because in that case we can't make the assumption.
+
+Arguments:
+ code points to start of expression (the bracket)
+ bracket_map a bitmap of which brackets we are inside while testing; this
+ handles up to substring 31; after that we just have to take
+ the less precise approach
+ backref_map the back reference bitmap
+
+Returns: TRUE or FALSE
+*/
+
+static BOOL
+is_startline(const uschar *code, unsigned int bracket_map,
+ unsigned int backref_map)
+{
+do {
+ const uschar *scode = first_significant_code(code + _pcre_OP_lengths[*code],
+ NULL, 0, FALSE);
+ register int op = *scode;
+
+ /* Non-capturing brackets */
+
+ if (op == OP_BRA)
+ {
+ if (!is_startline(scode, bracket_map, backref_map)) return FALSE;
+ }
+
+ /* Capturing brackets */
+
+ else if (op == OP_CBRA)
+ {
+ int n = GET2(scode, 1+LINK_SIZE);
+ int new_map = bracket_map | ((n < 32)? (1 << n) : 1);
+ if (!is_startline(scode, new_map, backref_map)) return FALSE;
+ }
+
+ /* Other brackets */
+
+ else if (op == OP_ASSERT || op == OP_ONCE || op == OP_COND)
+ { if (!is_startline(scode, bracket_map, backref_map)) return FALSE; }
+
+ /* .* means "start at start or after \n" if it isn't in brackets that
+ may be referenced. */
+
+ else if (op == OP_TYPESTAR || op == OP_TYPEMINSTAR || op == OP_TYPEPOSSTAR)
+ {
+ if (scode[1] != OP_ANY || (bracket_map & backref_map) != 0) return FALSE;
+ }
+
+ /* Check for explicit circumflex */
+
+ else if (op != OP_CIRC) return FALSE;
+
+ /* Move on to the next alternative */
+
+ code += GET(code, 1);
+ }
+while (*code == OP_ALT); /* Loop for each alternative */
+return TRUE;
+}
+
+
+
+/*************************************************
+* Check for asserted fixed first char *
+*************************************************/
+
+/* During compilation, the "first char" settings from forward assertions are
+discarded, because they can cause conflicts with actual literals that follow.
+However, if we end up without a first char setting for an unanchored pattern,
+it is worth scanning the regex to see if there is an initial asserted first
+char. If all branches start with the same asserted char, or with a bracket all
+of whose alternatives start with the same asserted char (recurse ad lib), then
+we return that char, otherwise -1.
+
+Arguments:
+ code points to start of expression (the bracket)
+ options pointer to the options (used to check casing changes)
+ inassert TRUE if in an assertion
+
+Returns: -1 or the fixed first char
+*/
+
+static int
+find_firstassertedchar(const uschar *code, int *options, BOOL inassert)
+{
+register int c = -1;
+do {
+ int d;
+ const uschar *scode =
+ first_significant_code(code + 1+LINK_SIZE, options, PCRE_CASELESS, TRUE);
+ register int op = *scode;
+
+ switch(op)
+ {
+ default:
+ return -1;
+
+ case OP_BRA:
+ case OP_CBRA:
+ case OP_ASSERT:
+ case OP_ONCE:
+ case OP_COND:
+ if ((d = find_firstassertedchar(scode, options, op == OP_ASSERT)) < 0)
+ return -1;
+ if (c < 0) c = d; else if (c != d) return -1;
+ break;
+
+ case OP_EXACT: /* Fall through */
+ scode += 2;
+
+ case OP_CHAR:
+ case OP_CHARNC:
+ case OP_PLUS:
+ case OP_MINPLUS:
+ case OP_POSPLUS:
+ if (!inassert) return -1;
+ if (c < 0)
+ {
+ c = scode[1];
+ if ((*options & PCRE_CASELESS) != 0) c |= REQ_CASELESS;
+ }
+ else if (c != scode[1]) return -1;
+ break;
+ }
+
+ code += GET(code, 1);
+ }
+while (*code == OP_ALT);
+return c;
+}
+
+
+
+/*************************************************
+* Compile a Regular Expression *
+*************************************************/
+
+/* This function takes a string and returns a pointer to a block of store
+holding a compiled version of the expression. The original API for this
+function had no error code return variable; it is retained for backwards
+compatibility. The new function is given a new name.
+
+Arguments:
+ pattern the regular expression
+ options various option bits
+ errorcodeptr pointer to error code variable (pcre_compile2() only)
+ can be NULL if you don't want a code value
+ errorptr pointer to pointer to error text
+ erroroffset ptr offset in pattern where error was detected
+ tables pointer to character tables or NULL
+
+Returns: pointer to compiled data block, or NULL on error,
+ with errorptr and erroroffset set
+*/
+
+PCRE_EXP_DEFN pcre *
+pcre_compile(const char *pattern, int options, const char **errorptr,
+ int *erroroffset, const unsigned char *tables)
+{
+return pcre_compile2(pattern, options, NULL, errorptr, erroroffset, tables);
+}
+
+
+PCRE_EXP_DEFN pcre *
+pcre_compile2(const char *pattern, int options, int *errorcodeptr,
+ const char **errorptr, int *erroroffset, const unsigned char *tables)
+{
+real_pcre *re;
+int length = 1; /* For final END opcode */
+int firstbyte, reqbyte, newline;
+int errorcode = 0;
+int skipatstart = 0;
+#ifdef SUPPORT_UTF8
+BOOL utf8;
+#endif
+size_t size;
+uschar *code;
+const uschar *codestart;
+const uschar *ptr;
+compile_data compile_block;
+compile_data *cd = &compile_block;
+
+/* This space is used for "compiling" into during the first phase, when we are
+computing the amount of memory that is needed. Compiled items are thrown away
+as soon as possible, so that a fairly large buffer should be sufficient for
+this purpose. The same space is used in the second phase for remembering where
+to fill in forward references to subpatterns. */
+
+uschar cworkspace[COMPILE_WORK_SIZE];
+
+
+/* Set this early so that early errors get offset 0. */
+
+ptr = (const uschar *)pattern;
+
+/* We can't pass back an error message if errorptr is NULL; I guess the best we
+can do is just return NULL, but we can set a code value if there is a code
+pointer. */
+
+if (errorptr == NULL)
+ {
+ if (errorcodeptr != NULL) *errorcodeptr = 99;
+ return NULL;
+ }
+
+*errorptr = NULL;
+if (errorcodeptr != NULL) *errorcodeptr = ERR0;
+
+/* However, we can give a message for this error */
+
+if (erroroffset == NULL)
+ {
+ errorcode = ERR16;
+ goto PCRE_EARLY_ERROR_RETURN2;
+ }
+
+*erroroffset = 0;
+
+/* Can't support UTF8 unless PCRE has been compiled to include the code. */
+
+#ifdef SUPPORT_UTF8
+utf8 = (options & PCRE_UTF8) != 0;
+if (utf8 && (options & PCRE_NO_UTF8_CHECK) == 0 &&
+ (*erroroffset = _pcre_valid_utf8((uschar *)pattern, -1)) >= 0)
+ {
+ errorcode = ERR44;
+ goto PCRE_EARLY_ERROR_RETURN2;
+ }
+#else
+if ((options & PCRE_UTF8) != 0)
+ {
+ errorcode = ERR32;
+ goto PCRE_EARLY_ERROR_RETURN;
+ }
+#endif
+
+if ((options & ~PUBLIC_OPTIONS) != 0)
+ {
+ errorcode = ERR17;
+ goto PCRE_EARLY_ERROR_RETURN;
+ }
+
+/* Set up pointers to the individual character tables */
+
+if (tables == NULL) tables = _pcre_default_tables;
+cd->lcc = tables + lcc_offset;
+cd->fcc = tables + fcc_offset;
+cd->cbits = tables + cbits_offset;
+cd->ctypes = tables + ctypes_offset;
+
+/* Check for global one-time settings at the start of the pattern, and remember
+the offset for later. */
+
+while (ptr[skipatstart] == '(' && ptr[skipatstart+1] == '*')
+ {
+ int newnl = 0;
+ int newbsr = 0;
+
+ if (strncmp((char *)(ptr+skipatstart+2), "CR)", 3) == 0)
+ { skipatstart += 5; newnl = PCRE_NEWLINE_CR; }
+ else if (strncmp((char *)(ptr+skipatstart+2), "LF)", 3) == 0)
+ { skipatstart += 5; newnl = PCRE_NEWLINE_LF; }
+ else if (strncmp((char *)(ptr+skipatstart+2), "CRLF)", 5) == 0)
+ { skipatstart += 7; newnl = PCRE_NEWLINE_CR + PCRE_NEWLINE_LF; }
+ else if (strncmp((char *)(ptr+skipatstart+2), "ANY)", 4) == 0)
+ { skipatstart += 6; newnl = PCRE_NEWLINE_ANY; }
+ else if (strncmp((char *)(ptr+skipatstart+2), "ANYCRLF)", 8) == 0)
+ { skipatstart += 10; newnl = PCRE_NEWLINE_ANYCRLF; }
+
+ else if (strncmp((char *)(ptr+skipatstart+2), "BSR_ANYCRLF)", 12) == 0)
+ { skipatstart += 14; newbsr = PCRE_BSR_ANYCRLF; }
+ else if (strncmp((char *)(ptr+skipatstart+2), "BSR_UNICODE)", 12) == 0)
+ { skipatstart += 14; newbsr = PCRE_BSR_UNICODE; }
+
+ if (newnl != 0)
+ options = (options & ~PCRE_NEWLINE_BITS) | newnl;
+ else if (newbsr != 0)
+ options = (options & ~(PCRE_BSR_ANYCRLF|PCRE_BSR_UNICODE)) | newbsr;
+ else break;
+ }
+
+/* Check validity of \R options. */
+
+switch (options & (PCRE_BSR_ANYCRLF|PCRE_BSR_UNICODE))
+ {
+ case 0:
+ case PCRE_BSR_ANYCRLF:
+ case PCRE_BSR_UNICODE:
+ break;
+ default: errorcode = ERR56; goto PCRE_EARLY_ERROR_RETURN;
+ }
+
+/* Handle different types of newline. The three bits give seven cases. The
+current code allows for fixed one- or two-byte sequences, plus "any" and
+"anycrlf". */
+
+switch (options & PCRE_NEWLINE_BITS)
+ {
+ case 0: newline = NEWLINE; break; /* Build-time default */
+ case PCRE_NEWLINE_CR: newline = '\r'; break;
+ case PCRE_NEWLINE_LF: newline = '\n'; break;
+ case PCRE_NEWLINE_CR+
+ PCRE_NEWLINE_LF: newline = ('\r' << 8) | '\n'; break;
+ case PCRE_NEWLINE_ANY: newline = -1; break;
+ case PCRE_NEWLINE_ANYCRLF: newline = -2; break;
+ default: errorcode = ERR56; goto PCRE_EARLY_ERROR_RETURN;
+ }
+
+if (newline == -2)
+ {
+ cd->nltype = NLTYPE_ANYCRLF;
+ }
+else if (newline < 0)
+ {
+ cd->nltype = NLTYPE_ANY;
+ }
+else
+ {
+ cd->nltype = NLTYPE_FIXED;
+ if (newline > 255)
+ {
+ cd->nllen = 2;
+ cd->nl[0] = (newline >> 8) & 255;
+ cd->nl[1] = newline & 255;
+ }
+ else
+ {
+ cd->nllen = 1;
+ cd->nl[0] = newline;
+ }
+ }
+
+/* Maximum back reference and backref bitmap. The bitmap records up to 31 back
+references to help in deciding whether (.*) can be treated as anchored or not.
+*/
+
+cd->top_backref = 0;
+cd->backref_map = 0;
+
+/* Reflect pattern for debugging output */
+
+DPRINTF(("------------------------------------------------------------------\n"));
+DPRINTF(("%s\n", pattern));
+
+/* Pretend to compile the pattern while actually just accumulating the length
+of memory required. This behaviour is triggered by passing a non-NULL final
+argument to compile_regex(). We pass a block of workspace (cworkspace) for it
+to compile parts of the pattern into; the compiled code is discarded when it is
+no longer needed, so hopefully this workspace will never overflow, though there
+is a test for its doing so. */
+
+cd->bracount = 0;
+cd->names_found = 0;
+cd->name_entry_size = 0;
+cd->name_table = NULL;
+cd->start_workspace = cworkspace;
+cd->start_code = cworkspace;
+cd->hwm = cworkspace;
+cd->start_pattern = (const uschar *)pattern;
+cd->end_pattern = (const uschar *)(pattern + strlen(pattern));
+cd->req_varyopt = 0;
+cd->external_options = options;
+cd->external_flags = 0;
+
+/* Now do the pre-compile. On error, errorcode will be set non-zero, so we
+don't need to look at the result of the function here. The initial options have
+been put into the cd block so that they can be changed if an option setting is
+found within the regex right at the beginning. Bringing initial option settings
+outside can help speed up starting point checks. */
+
+ptr += skipatstart;
+code = cworkspace;
+*code = OP_BRA;
+(void)compile_regex(cd->external_options, cd->external_options & PCRE_IMS,
+ &code, &ptr, &errorcode, FALSE, FALSE, 0, &firstbyte, &reqbyte, NULL, cd,
+ &length);
+if (errorcode != 0) goto PCRE_EARLY_ERROR_RETURN;
+
+DPRINTF(("end pre-compile: length=%d workspace=%d\n", length,
+ cd->hwm - cworkspace));
+
+if (length > MAX_PATTERN_SIZE)
+ {
+ errorcode = ERR20;
+ goto PCRE_EARLY_ERROR_RETURN;
+ }
+
+/* Compute the size of data block needed and get it, either from malloc or
+externally provided function. Integer overflow should no longer be possible
+because nowadays we limit the maximum value of cd->names_found and
+cd->name_entry_size. */
+
+size = length + sizeof(real_pcre) + cd->names_found * (cd->name_entry_size + 3);
+re = (real_pcre *)(pcre_malloc)(size);
+
+if (re == NULL)
+ {
+ errorcode = ERR21;
+ goto PCRE_EARLY_ERROR_RETURN;
+ }
+
+/* Put in the magic number, and save the sizes, initial options, internal
+flags, and character table pointer. NULL is used for the default character
+tables. The nullpad field is at the end; it's there to help in the case when a
+regex compiled on a system with 4-byte pointers is run on another with 8-byte
+pointers. */
+
+re->magic_number = MAGIC_NUMBER;
+re->size = size;
+re->options = cd->external_options;
+re->flags = cd->external_flags;
+re->dummy1 = 0;
+re->first_byte = 0;
+re->req_byte = 0;
+re->name_table_offset = sizeof(real_pcre);
+re->name_entry_size = cd->name_entry_size;
+re->name_count = cd->names_found;
+re->ref_count = 0;
+re->tables = (tables == _pcre_default_tables)? NULL : tables;
+re->nullpad = NULL;
+
+/* The starting points of the name/number translation table and of the code are
+passed around in the compile data block. The start/end pattern and initial
+options are already set from the pre-compile phase, as is the name_entry_size
+field. Reset the bracket count and the names_found field. Also reset the hwm
+field; this time it's used for remembering forward references to subpatterns.
+*/
+
+cd->bracount = 0;
+cd->names_found = 0;
+cd->name_table = (uschar *)re + re->name_table_offset;
+codestart = cd->name_table + re->name_entry_size * re->name_count;
+cd->start_code = codestart;
+cd->hwm = cworkspace;
+cd->req_varyopt = 0;
+cd->had_accept = FALSE;
+
+/* Set up a starting, non-extracting bracket, then compile the expression. On
+error, errorcode will be set non-zero, so we don't need to look at the result
+of the function here. */
+
+ptr = (const uschar *)pattern + skipatstart;
+code = (uschar *)codestart;
+*code = OP_BRA;
+(void)compile_regex(re->options, re->options & PCRE_IMS, &code, &ptr,
+ &errorcode, FALSE, FALSE, 0, &firstbyte, &reqbyte, NULL, cd, NULL);
+re->top_bracket = cd->bracount;
+re->top_backref = cd->top_backref;
+re->flags = cd->external_flags;
+
+if (cd->had_accept) reqbyte = -1; /* Must disable after (*ACCEPT) */
+
+/* If not reached end of pattern on success, there's an excess bracket. */
+
+if (errorcode == 0 && *ptr != 0) errorcode = ERR22;
+
+/* Fill in the terminating state and check for disastrous overflow, but
+if debugging, leave the test till after things are printed out. */
+
+*code++ = OP_END;
+
+#ifndef DEBUG
+if (code - codestart > length) errorcode = ERR23;
+#endif
+
+/* Fill in any forward references that are required. */
+
+while (errorcode == 0 && cd->hwm > cworkspace)
+ {
+ int offset, recno;
+ const uschar *groupptr;
+ cd->hwm -= LINK_SIZE;
+ offset = GET(cd->hwm, 0);
+ recno = GET(codestart, offset);
+ groupptr = find_bracket(codestart, (re->options & PCRE_UTF8) != 0, recno);
+ if (groupptr == NULL) errorcode = ERR53;
+ else PUT(((uschar *)codestart), offset, groupptr - codestart);
+ }
+
+/* Give an error if there's back reference to a non-existent capturing
+subpattern. */
+
+if (errorcode == 0 && re->top_backref > re->top_bracket) errorcode = ERR15;
+
+/* Failed to compile, or error while post-processing */
+
+if (errorcode != 0)
+ {
+ (pcre_free)(re);
+ PCRE_EARLY_ERROR_RETURN:
+ *erroroffset = ptr - (const uschar *)pattern;
+ PCRE_EARLY_ERROR_RETURN2:
+ *errorptr = find_error_text(errorcode);
+ if (errorcodeptr != NULL) *errorcodeptr = errorcode;
+ return NULL;
+ }
+
+/* If the anchored option was not passed, set the flag if we can determine that
+the pattern is anchored by virtue of ^ characters or \A or anything else (such
+as starting with .* when DOTALL is set).
+
+Otherwise, if we know what the first byte has to be, save it, because that
+speeds up unanchored matches no end. If not, see if we can set the
+PCRE_STARTLINE flag. This is helpful for multiline matches when all branches
+start with ^. and also when all branches start with .* for non-DOTALL matches.
+*/
+
+if ((re->options & PCRE_ANCHORED) == 0)
+ {
+ int temp_options = re->options; /* May get changed during these scans */
+ if (is_anchored(codestart, &temp_options, 0, cd->backref_map))
+ re->options |= PCRE_ANCHORED;
+ else
+ {
+ if (firstbyte < 0)
+ firstbyte = find_firstassertedchar(codestart, &temp_options, FALSE);
+ if (firstbyte >= 0) /* Remove caseless flag for non-caseable chars */
+ {
+ int ch = firstbyte & 255;
+ re->first_byte = ((firstbyte & REQ_CASELESS) != 0 &&
+ cd->fcc[ch] == ch)? ch : firstbyte;
+ re->flags |= PCRE_FIRSTSET;
+ }
+ else if (is_startline(codestart, 0, cd->backref_map))
+ re->flags |= PCRE_STARTLINE;
+ }
+ }
+
+/* For an anchored pattern, we use the "required byte" only if it follows a
+variable length item in the regex. Remove the caseless flag for non-caseable
+bytes. */
+
+if (reqbyte >= 0 &&
+ ((re->options & PCRE_ANCHORED) == 0 || (reqbyte & REQ_VARY) != 0))
+ {
+ int ch = reqbyte & 255;
+ re->req_byte = ((reqbyte & REQ_CASELESS) != 0 &&
+ cd->fcc[ch] == ch)? (reqbyte & ~REQ_CASELESS) : reqbyte;
+ re->flags |= PCRE_REQCHSET;
+ }
+
+/* Print out the compiled data if debugging is enabled. This is never the
+case when building a production library. */
+
+#ifdef DEBUG
+
+printf("Length = %d top_bracket = %d top_backref = %d\n",
+ length, re->top_bracket, re->top_backref);
+
+printf("Options=%08x\n", re->options);
+
+if ((re->flags & PCRE_FIRSTSET) != 0)
+ {
+ int ch = re->first_byte & 255;
+ const char *caseless = ((re->first_byte & REQ_CASELESS) == 0)?
+ "" : " (caseless)";
+ if (isprint(ch)) printf("First char = %c%s\n", ch, caseless);
+ else printf("First char = \\x%02x%s\n", ch, caseless);
+ }
+
+if ((re->flags & PCRE_REQCHSET) != 0)
+ {
+ int ch = re->req_byte & 255;
+ const char *caseless = ((re->req_byte & REQ_CASELESS) == 0)?
+ "" : " (caseless)";
+ if (isprint(ch)) printf("Req char = %c%s\n", ch, caseless);
+ else printf("Req char = \\x%02x%s\n", ch, caseless);
+ }
+
+pcre_printint(re, stdout, TRUE);
+
+/* This check is done here in the debugging case so that the code that
+was compiled can be seen. */
+
+if (code - codestart > length)
+ {
+ (pcre_free)(re);
+ *errorptr = find_error_text(ERR23);
+ *erroroffset = ptr - (uschar *)pattern;
+ if (errorcodeptr != NULL) *errorcodeptr = ERR23;
+ return NULL;
+ }
+#endif /* DEBUG */
+
+return (pcre *)re;
+}
+
+/* End of pcre_compile.c */
diff --git a/src/third_party/pcre-7.4/pcre_config.c b/src/third_party/pcre-7.4/pcre_config.c
new file mode 100644
index 00000000000..220ef93cb10
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_config.c
@@ -0,0 +1,128 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains the external function pcre_config(). */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+
+/*************************************************
+* Return info about what features are configured *
+*************************************************/
+
+/* This function has an extensible interface so that additional items can be
+added compatibly.
+
+Arguments:
+ what what information is required
+ where where to put the information
+
+Returns: 0 if data returned, negative on error
+*/
+
+PCRE_EXP_DEFN int
+pcre_config(int what, void *where)
+{
+switch (what)
+ {
+ case PCRE_CONFIG_UTF8:
+#ifdef SUPPORT_UTF8
+ *((int *)where) = 1;
+#else
+ *((int *)where) = 0;
+#endif
+ break;
+
+ case PCRE_CONFIG_UNICODE_PROPERTIES:
+#ifdef SUPPORT_UCP
+ *((int *)where) = 1;
+#else
+ *((int *)where) = 0;
+#endif
+ break;
+
+ case PCRE_CONFIG_NEWLINE:
+ *((int *)where) = NEWLINE;
+ break;
+
+ case PCRE_CONFIG_BSR:
+#ifdef BSR_ANYCRLF
+ *((int *)where) = 1;
+#else
+ *((int *)where) = 0;
+#endif
+ break;
+
+ case PCRE_CONFIG_LINK_SIZE:
+ *((int *)where) = LINK_SIZE;
+ break;
+
+ case PCRE_CONFIG_POSIX_MALLOC_THRESHOLD:
+ *((int *)where) = POSIX_MALLOC_THRESHOLD;
+ break;
+
+ case PCRE_CONFIG_MATCH_LIMIT:
+ *((unsigned int *)where) = MATCH_LIMIT;
+ break;
+
+ case PCRE_CONFIG_MATCH_LIMIT_RECURSION:
+ *((unsigned int *)where) = MATCH_LIMIT_RECURSION;
+ break;
+
+ case PCRE_CONFIG_STACKRECURSE:
+#ifdef NO_RECURSE
+ *((int *)where) = 0;
+#else
+ *((int *)where) = 1;
+#endif
+ break;
+
+ default: return PCRE_ERROR_BADOPTION;
+ }
+
+return 0;
+}
+
+/* End of pcre_config.c */
diff --git a/src/third_party/pcre-7.4/pcre_dfa_exec.c b/src/third_party/pcre-7.4/pcre_dfa_exec.c
new file mode 100644
index 00000000000..e590fbb145f
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_dfa_exec.c
@@ -0,0 +1,2896 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains the external function pcre_dfa_exec(), which is an
+alternative matching function that uses a sort of DFA algorithm (not a true
+FSM). This is NOT Perl- compatible, but it has advantages in certain
+applications. */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define NLBLOCK md /* Block containing newline information */
+#define PSSTART start_subject /* Field containing processed string start */
+#define PSEND end_subject /* Field containing processed string end */
+
+#include "pcre_internal.h"
+
+
+/* For use to indent debugging output */
+
+#define SP " "
+
+
+
+/*************************************************
+* Code parameters and static tables *
+*************************************************/
+
+/* These are offsets that are used to turn the OP_TYPESTAR and friends opcodes
+into others, under special conditions. A gap of 20 between the blocks should be
+enough. The resulting opcodes don't have to be less than 256 because they are
+never stored, so we push them well clear of the normal opcodes. */
+
+#define OP_PROP_EXTRA 300
+#define OP_EXTUNI_EXTRA 320
+#define OP_ANYNL_EXTRA 340
+#define OP_HSPACE_EXTRA 360
+#define OP_VSPACE_EXTRA 380
+
+
+/* This table identifies those opcodes that are followed immediately by a
+character that is to be tested in some way. This makes is possible to
+centralize the loading of these characters. In the case of Type * etc, the
+"character" is the opcode for \D, \d, \S, \s, \W, or \w, which will always be a
+small value. ***NOTE*** If the start of this table is modified, the two tables
+that follow must also be modified. */
+
+static uschar coptable[] = {
+ 0, /* End */
+ 0, 0, 0, 0, 0, /* \A, \G, \K, \B, \b */
+ 0, 0, 0, 0, 0, 0, /* \D, \d, \S, \s, \W, \w */
+ 0, 0, /* Any, Anybyte */
+ 0, 0, 0, /* NOTPROP, PROP, EXTUNI */
+ 0, 0, 0, 0, 0, /* \R, \H, \h, \V, \v */
+ 0, 0, 0, 0, 0, /* \Z, \z, Opt, ^, $ */
+ 1, /* Char */
+ 1, /* Charnc */
+ 1, /* not */
+ /* Positive single-char repeats */
+ 1, 1, 1, 1, 1, 1, /* *, *?, +, +?, ?, ?? */
+ 3, 3, 3, /* upto, minupto, exact */
+ 1, 1, 1, 3, /* *+, ++, ?+, upto+ */
+ /* Negative single-char repeats - only for chars < 256 */
+ 1, 1, 1, 1, 1, 1, /* NOT *, *?, +, +?, ?, ?? */
+ 3, 3, 3, /* NOT upto, minupto, exact */
+ 1, 1, 1, 3, /* NOT *+, ++, ?+, updo+ */
+ /* Positive type repeats */
+ 1, 1, 1, 1, 1, 1, /* Type *, *?, +, +?, ?, ?? */
+ 3, 3, 3, /* Type upto, minupto, exact */
+ 1, 1, 1, 3, /* Type *+, ++, ?+, upto+ */
+ /* Character class & ref repeats */
+ 0, 0, 0, 0, 0, 0, /* *, *?, +, +?, ?, ?? */
+ 0, 0, /* CRRANGE, CRMINRANGE */
+ 0, /* CLASS */
+ 0, /* NCLASS */
+ 0, /* XCLASS - variable length */
+ 0, /* REF */
+ 0, /* RECURSE */
+ 0, /* CALLOUT */
+ 0, /* Alt */
+ 0, /* Ket */
+ 0, /* KetRmax */
+ 0, /* KetRmin */
+ 0, /* Assert */
+ 0, /* Assert not */
+ 0, /* Assert behind */
+ 0, /* Assert behind not */
+ 0, /* Reverse */
+ 0, 0, 0, 0, /* ONCE, BRA, CBRA, COND */
+ 0, 0, 0, /* SBRA, SCBRA, SCOND */
+ 0, /* CREF */
+ 0, /* RREF */
+ 0, /* DEF */
+ 0, 0, /* BRAZERO, BRAMINZERO */
+ 0, 0, 0, 0, /* PRUNE, SKIP, THEN, COMMIT */
+ 0, 0 /* FAIL, ACCEPT */
+};
+
+/* These 2 tables allow for compact code for testing for \D, \d, \S, \s, \W,
+and \w */
+
+static uschar toptable1[] = {
+ 0, 0, 0, 0, 0, 0,
+ ctype_digit, ctype_digit,
+ ctype_space, ctype_space,
+ ctype_word, ctype_word,
+ 0 /* OP_ANY */
+};
+
+static uschar toptable2[] = {
+ 0, 0, 0, 0, 0, 0,
+ ctype_digit, 0,
+ ctype_space, 0,
+ ctype_word, 0,
+ 1 /* OP_ANY */
+};
+
+
+/* Structure for holding data about a particular state, which is in effect the
+current data for an active path through the match tree. It must consist
+entirely of ints because the working vector we are passed, and which we put
+these structures in, is a vector of ints. */
+
+typedef struct stateblock {
+ int offset; /* Offset to opcode */
+ int count; /* Count for repeats */
+ int ims; /* ims flag bits */
+ int data; /* Some use extra data */
+} stateblock;
+
+#define INTS_PER_STATEBLOCK (sizeof(stateblock)/sizeof(int))
+
+
+#ifdef DEBUG
+/*************************************************
+* Print character string *
+*************************************************/
+
+/* Character string printing function for debugging.
+
+Arguments:
+ p points to string
+ length number of bytes
+ f where to print
+
+Returns: nothing
+*/
+
+static void
+pchars(unsigned char *p, int length, FILE *f)
+{
+int c;
+while (length-- > 0)
+ {
+ if (isprint(c = *(p++)))
+ fprintf(f, "%c", c);
+ else
+ fprintf(f, "\\x%02x", c);
+ }
+}
+#endif
+
+
+
+/*************************************************
+* Execute a Regular Expression - DFA engine *
+*************************************************/
+
+/* This internal function applies a compiled pattern to a subject string,
+starting at a given point, using a DFA engine. This function is called from the
+external one, possibly multiple times if the pattern is not anchored. The
+function calls itself recursively for some kinds of subpattern.
+
+Arguments:
+ md the match_data block with fixed information
+ this_start_code the opening bracket of this subexpression's code
+ current_subject where we currently are in the subject string
+ start_offset start offset in the subject string
+ offsets vector to contain the matching string offsets
+ offsetcount size of same
+ workspace vector of workspace
+ wscount size of same
+ ims the current ims flags
+ rlevel function call recursion level
+ recursing regex recursive call level
+
+Returns: > 0 =>
+ = 0 =>
+ -1 => failed to match
+ < -1 => some kind of unexpected problem
+
+The following macros are used for adding states to the two state vectors (one
+for the current character, one for the following character). */
+
+#define ADD_ACTIVE(x,y) \
+ if (active_count++ < wscount) \
+ { \
+ next_active_state->offset = (x); \
+ next_active_state->count = (y); \
+ next_active_state->ims = ims; \
+ next_active_state++; \
+ DPRINTF(("%.*sADD_ACTIVE(%d,%d)\n", rlevel*2-2, SP, (x), (y))); \
+ } \
+ else return PCRE_ERROR_DFA_WSSIZE
+
+#define ADD_ACTIVE_DATA(x,y,z) \
+ if (active_count++ < wscount) \
+ { \
+ next_active_state->offset = (x); \
+ next_active_state->count = (y); \
+ next_active_state->ims = ims; \
+ next_active_state->data = (z); \
+ next_active_state++; \
+ DPRINTF(("%.*sADD_ACTIVE_DATA(%d,%d,%d)\n", rlevel*2-2, SP, (x), (y), (z))); \
+ } \
+ else return PCRE_ERROR_DFA_WSSIZE
+
+#define ADD_NEW(x,y) \
+ if (new_count++ < wscount) \
+ { \
+ next_new_state->offset = (x); \
+ next_new_state->count = (y); \
+ next_new_state->ims = ims; \
+ next_new_state++; \
+ DPRINTF(("%.*sADD_NEW(%d,%d)\n", rlevel*2-2, SP, (x), (y))); \
+ } \
+ else return PCRE_ERROR_DFA_WSSIZE
+
+#define ADD_NEW_DATA(x,y,z) \
+ if (new_count++ < wscount) \
+ { \
+ next_new_state->offset = (x); \
+ next_new_state->count = (y); \
+ next_new_state->ims = ims; \
+ next_new_state->data = (z); \
+ next_new_state++; \
+ DPRINTF(("%.*sADD_NEW_DATA(%d,%d,%d)\n", rlevel*2-2, SP, (x), (y), (z))); \
+ } \
+ else return PCRE_ERROR_DFA_WSSIZE
+
+/* And now, here is the code */
+
+static int
+internal_dfa_exec(
+ dfa_match_data *md,
+ const uschar *this_start_code,
+ const uschar *current_subject,
+ int start_offset,
+ int *offsets,
+ int offsetcount,
+ int *workspace,
+ int wscount,
+ int ims,
+ int rlevel,
+ int recursing)
+{
+stateblock *active_states, *new_states, *temp_states;
+stateblock *next_active_state, *next_new_state;
+
+const uschar *ctypes, *lcc, *fcc;
+const uschar *ptr;
+const uschar *end_code, *first_op;
+
+int active_count, new_count, match_count;
+
+/* Some fields in the md block are frequently referenced, so we load them into
+independent variables in the hope that this will perform better. */
+
+const uschar *start_subject = md->start_subject;
+const uschar *end_subject = md->end_subject;
+const uschar *start_code = md->start_code;
+
+#ifdef SUPPORT_UTF8
+BOOL utf8 = (md->poptions & PCRE_UTF8) != 0;
+#else
+BOOL utf8 = FALSE;
+#endif
+
+rlevel++;
+offsetcount &= (-2);
+
+wscount -= 2;
+wscount = (wscount - (wscount % (INTS_PER_STATEBLOCK * 2))) /
+ (2 * INTS_PER_STATEBLOCK);
+
+DPRINTF(("\n%.*s---------------------\n"
+ "%.*sCall to internal_dfa_exec f=%d r=%d\n",
+ rlevel*2-2, SP, rlevel*2-2, SP, rlevel, recursing));
+
+ctypes = md->tables + ctypes_offset;
+lcc = md->tables + lcc_offset;
+fcc = md->tables + fcc_offset;
+
+match_count = PCRE_ERROR_NOMATCH; /* A negative number */
+
+active_states = (stateblock *)(workspace + 2);
+next_new_state = new_states = active_states + wscount;
+new_count = 0;
+
+first_op = this_start_code + 1 + LINK_SIZE +
+ ((*this_start_code == OP_CBRA || *this_start_code == OP_SCBRA)? 2:0);
+
+/* The first thing in any (sub) pattern is a bracket of some sort. Push all
+the alternative states onto the list, and find out where the end is. This
+makes is possible to use this function recursively, when we want to stop at a
+matching internal ket rather than at the end.
+
+If the first opcode in the first alternative is OP_REVERSE, we are dealing with
+a backward assertion. In that case, we have to find out the maximum amount to
+move back, and set up each alternative appropriately. */
+
+if (*first_op == OP_REVERSE)
+ {
+ int max_back = 0;
+ int gone_back;
+
+ end_code = this_start_code;
+ do
+ {
+ int back = GET(end_code, 2+LINK_SIZE);
+ if (back > max_back) max_back = back;
+ end_code += GET(end_code, 1);
+ }
+ while (*end_code == OP_ALT);
+
+ /* If we can't go back the amount required for the longest lookbehind
+ pattern, go back as far as we can; some alternatives may still be viable. */
+
+#ifdef SUPPORT_UTF8
+ /* In character mode we have to step back character by character */
+
+ if (utf8)
+ {
+ for (gone_back = 0; gone_back < max_back; gone_back++)
+ {
+ if (current_subject <= start_subject) break;
+ current_subject--;
+ while (current_subject > start_subject &&
+ (*current_subject & 0xc0) == 0x80)
+ current_subject--;
+ }
+ }
+ else
+#endif
+
+ /* In byte-mode we can do this quickly. */
+
+ {
+ gone_back = (current_subject - max_back < start_subject)?
+ current_subject - start_subject : max_back;
+ current_subject -= gone_back;
+ }
+
+ /* Now we can process the individual branches. */
+
+ end_code = this_start_code;
+ do
+ {
+ int back = GET(end_code, 2+LINK_SIZE);
+ if (back <= gone_back)
+ {
+ int bstate = end_code - start_code + 2 + 2*LINK_SIZE;
+ ADD_NEW_DATA(-bstate, 0, gone_back - back);
+ }
+ end_code += GET(end_code, 1);
+ }
+ while (*end_code == OP_ALT);
+ }
+
+/* This is the code for a "normal" subpattern (not a backward assertion). The
+start of a whole pattern is always one of these. If we are at the top level,
+we may be asked to restart matching from the same point that we reached for a
+previous partial match. We still have to scan through the top-level branches to
+find the end state. */
+
+else
+ {
+ end_code = this_start_code;
+
+ /* Restarting */
+
+ if (rlevel == 1 && (md->moptions & PCRE_DFA_RESTART) != 0)
+ {
+ do { end_code += GET(end_code, 1); } while (*end_code == OP_ALT);
+ new_count = workspace[1];
+ if (!workspace[0])
+ memcpy(new_states, active_states, new_count * sizeof(stateblock));
+ }
+
+ /* Not restarting */
+
+ else
+ {
+ int length = 1 + LINK_SIZE +
+ ((*this_start_code == OP_CBRA || *this_start_code == OP_SCBRA)? 2:0);
+ do
+ {
+ ADD_NEW(end_code - start_code + length, 0);
+ end_code += GET(end_code, 1);
+ length = 1 + LINK_SIZE;
+ }
+ while (*end_code == OP_ALT);
+ }
+ }
+
+workspace[0] = 0; /* Bit indicating which vector is current */
+
+DPRINTF(("%.*sEnd state = %d\n", rlevel*2-2, SP, end_code - start_code));
+
+/* Loop for scanning the subject */
+
+ptr = current_subject;
+for (;;)
+ {
+ int i, j;
+ int clen, dlen;
+ unsigned int c, d;
+
+ /* Make the new state list into the active state list and empty the
+ new state list. */
+
+ temp_states = active_states;
+ active_states = new_states;
+ new_states = temp_states;
+ active_count = new_count;
+ new_count = 0;
+
+ workspace[0] ^= 1; /* Remember for the restarting feature */
+ workspace[1] = active_count;
+
+#ifdef DEBUG
+ printf("%.*sNext character: rest of subject = \"", rlevel*2-2, SP);
+ pchars((uschar *)ptr, strlen((char *)ptr), stdout);
+ printf("\"\n");
+
+ printf("%.*sActive states: ", rlevel*2-2, SP);
+ for (i = 0; i < active_count; i++)
+ printf("%d/%d ", active_states[i].offset, active_states[i].count);
+ printf("\n");
+#endif
+
+ /* Set the pointers for adding new states */
+
+ next_active_state = active_states + active_count;
+ next_new_state = new_states;
+
+ /* Load the current character from the subject outside the loop, as many
+ different states may want to look at it, and we assume that at least one
+ will. */
+
+ if (ptr < end_subject)
+ {
+ clen = 1; /* Number of bytes in the character */
+#ifdef SUPPORT_UTF8
+ if (utf8) { GETCHARLEN(c, ptr, clen); } else
+#endif /* SUPPORT_UTF8 */
+ c = *ptr;
+ }
+ else
+ {
+ clen = 0; /* This indicates the end of the subject */
+ c = NOTACHAR; /* This value should never actually be used */
+ }
+
+ /* Scan up the active states and act on each one. The result of an action
+ may be to add more states to the currently active list (e.g. on hitting a
+ parenthesis) or it may be to put states on the new list, for considering
+ when we move the character pointer on. */
+
+ for (i = 0; i < active_count; i++)
+ {
+ stateblock *current_state = active_states + i;
+ const uschar *code;
+ int state_offset = current_state->offset;
+ int count, codevalue;
+#ifdef SUPPORT_UCP
+ int chartype, script;
+#endif
+
+#ifdef DEBUG
+ printf ("%.*sProcessing state %d c=", rlevel*2-2, SP, state_offset);
+ if (clen == 0) printf("EOL\n");
+ else if (c > 32 && c < 127) printf("'%c'\n", c);
+ else printf("0x%02x\n", c);
+#endif
+
+ /* This variable is referred to implicity in the ADD_xxx macros. */
+
+ ims = current_state->ims;
+
+ /* A negative offset is a special case meaning "hold off going to this
+ (negated) state until the number of characters in the data field have
+ been skipped". */
+
+ if (state_offset < 0)
+ {
+ if (current_state->data > 0)
+ {
+ DPRINTF(("%.*sSkipping this character\n", rlevel*2-2, SP));
+ ADD_NEW_DATA(state_offset, current_state->count,
+ current_state->data - 1);
+ continue;
+ }
+ else
+ {
+ current_state->offset = state_offset = -state_offset;
+ }
+ }
+
+ /* Check for a duplicate state with the same count, and skip if found. */
+
+ for (j = 0; j < i; j++)
+ {
+ if (active_states[j].offset == state_offset &&
+ active_states[j].count == current_state->count)
+ {
+ DPRINTF(("%.*sDuplicate state: skipped\n", rlevel*2-2, SP));
+ goto NEXT_ACTIVE_STATE;
+ }
+ }
+
+ /* The state offset is the offset to the opcode */
+
+ code = start_code + state_offset;
+ codevalue = *code;
+
+ /* If this opcode is followed by an inline character, load it. It is
+ tempting to test for the presence of a subject character here, but that
+ is wrong, because sometimes zero repetitions of the subject are
+ permitted.
+
+ We also use this mechanism for opcodes such as OP_TYPEPLUS that take an
+ argument that is not a data character - but is always one byte long. We
+ have to take special action to deal with \P, \p, \H, \h, \V, \v and \X in
+ this case. To keep the other cases fast, convert these ones to new opcodes.
+ */
+
+ if (coptable[codevalue] > 0)
+ {
+ dlen = 1;
+#ifdef SUPPORT_UTF8
+ if (utf8) { GETCHARLEN(d, (code + coptable[codevalue]), dlen); } else
+#endif /* SUPPORT_UTF8 */
+ d = code[coptable[codevalue]];
+ if (codevalue >= OP_TYPESTAR)
+ {
+ switch(d)
+ {
+ case OP_ANYBYTE: return PCRE_ERROR_DFA_UITEM;
+ case OP_NOTPROP:
+ case OP_PROP: codevalue += OP_PROP_EXTRA; break;
+ case OP_ANYNL: codevalue += OP_ANYNL_EXTRA; break;
+ case OP_EXTUNI: codevalue += OP_EXTUNI_EXTRA; break;
+ case OP_NOT_HSPACE:
+ case OP_HSPACE: codevalue += OP_HSPACE_EXTRA; break;
+ case OP_NOT_VSPACE:
+ case OP_VSPACE: codevalue += OP_VSPACE_EXTRA; break;
+ default: break;
+ }
+ }
+ }
+ else
+ {
+ dlen = 0; /* Not strictly necessary, but compilers moan */
+ d = NOTACHAR; /* if these variables are not set. */
+ }
+
+
+ /* Now process the individual opcodes */
+
+ switch (codevalue)
+ {
+
+/* ========================================================================== */
+ /* Reached a closing bracket. If not at the end of the pattern, carry
+ on with the next opcode. Otherwise, unless we have an empty string and
+ PCRE_NOTEMPTY is set, save the match data, shifting up all previous
+ matches so we always have the longest first. */
+
+ case OP_KET:
+ case OP_KETRMIN:
+ case OP_KETRMAX:
+ if (code != end_code)
+ {
+ ADD_ACTIVE(state_offset + 1 + LINK_SIZE, 0);
+ if (codevalue != OP_KET)
+ {
+ ADD_ACTIVE(state_offset - GET(code, 1), 0);
+ }
+ }
+ else if (ptr > current_subject || (md->moptions & PCRE_NOTEMPTY) == 0)
+ {
+ if (match_count < 0) match_count = (offsetcount >= 2)? 1 : 0;
+ else if (match_count > 0 && ++match_count * 2 >= offsetcount)
+ match_count = 0;
+ count = ((match_count == 0)? offsetcount : match_count * 2) - 2;
+ if (count > 0) memmove(offsets + 2, offsets, count * sizeof(int));
+ if (offsetcount >= 2)
+ {
+ offsets[0] = current_subject - start_subject;
+ offsets[1] = ptr - start_subject;
+ DPRINTF(("%.*sSet matched string = \"%.*s\"\n", rlevel*2-2, SP,
+ offsets[1] - offsets[0], current_subject));
+ }
+ if ((md->moptions & PCRE_DFA_SHORTEST) != 0)
+ {
+ DPRINTF(("%.*sEnd of internal_dfa_exec %d: returning %d\n"
+ "%.*s---------------------\n\n", rlevel*2-2, SP, rlevel,
+ match_count, rlevel*2-2, SP));
+ return match_count;
+ }
+ }
+ break;
+
+/* ========================================================================== */
+ /* These opcodes add to the current list of states without looking
+ at the current character. */
+
+ /*-----------------------------------------------------------------*/
+ case OP_ALT:
+ do { code += GET(code, 1); } while (*code == OP_ALT);
+ ADD_ACTIVE(code - start_code, 0);
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_BRA:
+ case OP_SBRA:
+ do
+ {
+ ADD_ACTIVE(code - start_code + 1 + LINK_SIZE, 0);
+ code += GET(code, 1);
+ }
+ while (*code == OP_ALT);
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_CBRA:
+ case OP_SCBRA:
+ ADD_ACTIVE(code - start_code + 3 + LINK_SIZE, 0);
+ code += GET(code, 1);
+ while (*code == OP_ALT)
+ {
+ ADD_ACTIVE(code - start_code + 1 + LINK_SIZE, 0);
+ code += GET(code, 1);
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_BRAZERO:
+ case OP_BRAMINZERO:
+ ADD_ACTIVE(state_offset + 1, 0);
+ code += 1 + GET(code, 2);
+ while (*code == OP_ALT) code += GET(code, 1);
+ ADD_ACTIVE(code - start_code + 1 + LINK_SIZE, 0);
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_CIRC:
+ if ((ptr == start_subject && (md->moptions & PCRE_NOTBOL) == 0) ||
+ ((ims & PCRE_MULTILINE) != 0 &&
+ ptr != end_subject &&
+ WAS_NEWLINE(ptr)))
+ { ADD_ACTIVE(state_offset + 1, 0); }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_EOD:
+ if (ptr >= end_subject) { ADD_ACTIVE(state_offset + 1, 0); }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_OPT:
+ ims = code[1];
+ ADD_ACTIVE(state_offset + 2, 0);
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_SOD:
+ if (ptr == start_subject) { ADD_ACTIVE(state_offset + 1, 0); }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_SOM:
+ if (ptr == start_subject + start_offset) { ADD_ACTIVE(state_offset + 1, 0); }
+ break;
+
+
+/* ========================================================================== */
+ /* These opcodes inspect the next subject character, and sometimes
+ the previous one as well, but do not have an argument. The variable
+ clen contains the length of the current character and is zero if we are
+ at the end of the subject. */
+
+ /*-----------------------------------------------------------------*/
+ case OP_ANY:
+ if (clen > 0 && ((ims & PCRE_DOTALL) != 0 || !IS_NEWLINE(ptr)))
+ { ADD_NEW(state_offset + 1, 0); }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_EODN:
+ if (clen == 0 || (IS_NEWLINE(ptr) && ptr == end_subject - md->nllen))
+ { ADD_ACTIVE(state_offset + 1, 0); }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_DOLL:
+ if ((md->moptions & PCRE_NOTEOL) == 0)
+ {
+ if (clen == 0 ||
+ (IS_NEWLINE(ptr) &&
+ ((ims & PCRE_MULTILINE) != 0 || ptr == end_subject - md->nllen)
+ ))
+ { ADD_ACTIVE(state_offset + 1, 0); }
+ }
+ else if ((ims & PCRE_MULTILINE) != 0 && IS_NEWLINE(ptr))
+ { ADD_ACTIVE(state_offset + 1, 0); }
+ break;
+
+ /*-----------------------------------------------------------------*/
+
+ case OP_DIGIT:
+ case OP_WHITESPACE:
+ case OP_WORDCHAR:
+ if (clen > 0 && c < 256 &&
+ ((ctypes[c] & toptable1[codevalue]) ^ toptable2[codevalue]) != 0)
+ { ADD_NEW(state_offset + 1, 0); }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_NOT_DIGIT:
+ case OP_NOT_WHITESPACE:
+ case OP_NOT_WORDCHAR:
+ if (clen > 0 && (c >= 256 ||
+ ((ctypes[c] & toptable1[codevalue]) ^ toptable2[codevalue]) != 0))
+ { ADD_NEW(state_offset + 1, 0); }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_WORD_BOUNDARY:
+ case OP_NOT_WORD_BOUNDARY:
+ {
+ int left_word, right_word;
+
+ if (ptr > start_subject)
+ {
+ const uschar *temp = ptr - 1;
+#ifdef SUPPORT_UTF8
+ if (utf8) BACKCHAR(temp);
+#endif
+ GETCHARTEST(d, temp);
+ left_word = d < 256 && (ctypes[d] & ctype_word) != 0;
+ }
+ else left_word = 0;
+
+ if (clen > 0) right_word = c < 256 && (ctypes[c] & ctype_word) != 0;
+ else right_word = 0;
+
+ if ((left_word == right_word) == (codevalue == OP_NOT_WORD_BOUNDARY))
+ { ADD_ACTIVE(state_offset + 1, 0); }
+ }
+ break;
+
+
+ /*-----------------------------------------------------------------*/
+ /* Check the next character by Unicode property. We will get here only
+ if the support is in the binary; otherwise a compile-time error occurs.
+ */
+
+#ifdef SUPPORT_UCP
+ case OP_PROP:
+ case OP_NOTPROP:
+ if (clen > 0)
+ {
+ BOOL OK;
+ int category = _pcre_ucp_findprop(c, &chartype, &script);
+ switch(code[1])
+ {
+ case PT_ANY:
+ OK = TRUE;
+ break;
+
+ case PT_LAMP:
+ OK = chartype == ucp_Lu || chartype == ucp_Ll || chartype == ucp_Lt;
+ break;
+
+ case PT_GC:
+ OK = category == code[2];
+ break;
+
+ case PT_PC:
+ OK = chartype == code[2];
+ break;
+
+ case PT_SC:
+ OK = script == code[2];
+ break;
+
+ /* Should never occur, but keep compilers from grumbling. */
+
+ default:
+ OK = codevalue != OP_PROP;
+ break;
+ }
+
+ if (OK == (codevalue == OP_PROP)) { ADD_NEW(state_offset + 3, 0); }
+ }
+ break;
+#endif
+
+
+
+/* ========================================================================== */
+ /* These opcodes likewise inspect the subject character, but have an
+ argument that is not a data character. It is one of these opcodes:
+ OP_ANY, OP_DIGIT, OP_NOT_DIGIT, OP_WHITESPACE, OP_NOT_SPACE, OP_WORDCHAR,
+ OP_NOT_WORDCHAR. The value is loaded into d. */
+
+ case OP_TYPEPLUS:
+ case OP_TYPEMINPLUS:
+ case OP_TYPEPOSPLUS:
+ count = current_state->count; /* Already matched */
+ if (count > 0) { ADD_ACTIVE(state_offset + 2, 0); }
+ if (clen > 0)
+ {
+ if ((c >= 256 && d != OP_DIGIT && d != OP_WHITESPACE && d != OP_WORDCHAR) ||
+ (c < 256 &&
+ (d != OP_ANY ||
+ (ims & PCRE_DOTALL) != 0 ||
+ !IS_NEWLINE(ptr)
+ ) &&
+ ((ctypes[c] & toptable1[d]) ^ toptable2[d]) != 0))
+ {
+ if (count > 0 && codevalue == OP_TYPEPOSPLUS)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ count++;
+ ADD_NEW(state_offset, count);
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_TYPEQUERY:
+ case OP_TYPEMINQUERY:
+ case OP_TYPEPOSQUERY:
+ ADD_ACTIVE(state_offset + 2, 0);
+ if (clen > 0)
+ {
+ if ((c >= 256 && d != OP_DIGIT && d != OP_WHITESPACE && d != OP_WORDCHAR) ||
+ (c < 256 &&
+ (d != OP_ANY ||
+ (ims & PCRE_DOTALL) != 0 ||
+ !IS_NEWLINE(ptr)
+ ) &&
+ ((ctypes[c] & toptable1[d]) ^ toptable2[d]) != 0))
+ {
+ if (codevalue == OP_TYPEPOSQUERY)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ ADD_NEW(state_offset + 2, 0);
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_TYPESTAR:
+ case OP_TYPEMINSTAR:
+ case OP_TYPEPOSSTAR:
+ ADD_ACTIVE(state_offset + 2, 0);
+ if (clen > 0)
+ {
+ if ((c >= 256 && d != OP_DIGIT && d != OP_WHITESPACE && d != OP_WORDCHAR) ||
+ (c < 256 &&
+ (d != OP_ANY ||
+ (ims & PCRE_DOTALL) != 0 ||
+ !IS_NEWLINE(ptr)
+ ) &&
+ ((ctypes[c] & toptable1[d]) ^ toptable2[d]) != 0))
+ {
+ if (codevalue == OP_TYPEPOSSTAR)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ ADD_NEW(state_offset, 0);
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_TYPEEXACT:
+ count = current_state->count; /* Number already matched */
+ if (clen > 0)
+ {
+ if ((c >= 256 && d != OP_DIGIT && d != OP_WHITESPACE && d != OP_WORDCHAR) ||
+ (c < 256 &&
+ (d != OP_ANY ||
+ (ims & PCRE_DOTALL) != 0 ||
+ !IS_NEWLINE(ptr)
+ ) &&
+ ((ctypes[c] & toptable1[d]) ^ toptable2[d]) != 0))
+ {
+ if (++count >= GET2(code, 1))
+ { ADD_NEW(state_offset + 4, 0); }
+ else
+ { ADD_NEW(state_offset, count); }
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_TYPEUPTO:
+ case OP_TYPEMINUPTO:
+ case OP_TYPEPOSUPTO:
+ ADD_ACTIVE(state_offset + 4, 0);
+ count = current_state->count; /* Number already matched */
+ if (clen > 0)
+ {
+ if ((c >= 256 && d != OP_DIGIT && d != OP_WHITESPACE && d != OP_WORDCHAR) ||
+ (c < 256 &&
+ (d != OP_ANY ||
+ (ims & PCRE_DOTALL) != 0 ||
+ !IS_NEWLINE(ptr)
+ ) &&
+ ((ctypes[c] & toptable1[d]) ^ toptable2[d]) != 0))
+ {
+ if (codevalue == OP_TYPEPOSUPTO)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ if (++count >= GET2(code, 1))
+ { ADD_NEW(state_offset + 4, 0); }
+ else
+ { ADD_NEW(state_offset, count); }
+ }
+ }
+ break;
+
+/* ========================================================================== */
+ /* These are virtual opcodes that are used when something like
+ OP_TYPEPLUS has OP_PROP, OP_NOTPROP, OP_ANYNL, or OP_EXTUNI as its
+ argument. It keeps the code above fast for the other cases. The argument
+ is in the d variable. */
+
+#ifdef SUPPORT_UCP
+ case OP_PROP_EXTRA + OP_TYPEPLUS:
+ case OP_PROP_EXTRA + OP_TYPEMINPLUS:
+ case OP_PROP_EXTRA + OP_TYPEPOSPLUS:
+ count = current_state->count; /* Already matched */
+ if (count > 0) { ADD_ACTIVE(state_offset + 4, 0); }
+ if (clen > 0)
+ {
+ BOOL OK;
+ int category = _pcre_ucp_findprop(c, &chartype, &script);
+ switch(code[2])
+ {
+ case PT_ANY:
+ OK = TRUE;
+ break;
+
+ case PT_LAMP:
+ OK = chartype == ucp_Lu || chartype == ucp_Ll || chartype == ucp_Lt;
+ break;
+
+ case PT_GC:
+ OK = category == code[3];
+ break;
+
+ case PT_PC:
+ OK = chartype == code[3];
+ break;
+
+ case PT_SC:
+ OK = script == code[3];
+ break;
+
+ /* Should never occur, but keep compilers from grumbling. */
+
+ default:
+ OK = codevalue != OP_PROP;
+ break;
+ }
+
+ if (OK == (d == OP_PROP))
+ {
+ if (count > 0 && codevalue == OP_PROP_EXTRA + OP_TYPEPOSPLUS)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ count++;
+ ADD_NEW(state_offset, count);
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_EXTUNI_EXTRA + OP_TYPEPLUS:
+ case OP_EXTUNI_EXTRA + OP_TYPEMINPLUS:
+ case OP_EXTUNI_EXTRA + OP_TYPEPOSPLUS:
+ count = current_state->count; /* Already matched */
+ if (count > 0) { ADD_ACTIVE(state_offset + 2, 0); }
+ if (clen > 0 && _pcre_ucp_findprop(c, &chartype, &script) != ucp_M)
+ {
+ const uschar *nptr = ptr + clen;
+ int ncount = 0;
+ if (count > 0 && codevalue == OP_EXTUNI_EXTRA + OP_TYPEPOSPLUS)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ while (nptr < end_subject)
+ {
+ int nd;
+ int ndlen = 1;
+ GETCHARLEN(nd, nptr, ndlen);
+ if (_pcre_ucp_findprop(nd, &chartype, &script) != ucp_M) break;
+ ncount++;
+ nptr += ndlen;
+ }
+ count++;
+ ADD_NEW_DATA(-state_offset, count, ncount);
+ }
+ break;
+#endif
+
+ /*-----------------------------------------------------------------*/
+ case OP_ANYNL_EXTRA + OP_TYPEPLUS:
+ case OP_ANYNL_EXTRA + OP_TYPEMINPLUS:
+ case OP_ANYNL_EXTRA + OP_TYPEPOSPLUS:
+ count = current_state->count; /* Already matched */
+ if (count > 0) { ADD_ACTIVE(state_offset + 2, 0); }
+ if (clen > 0)
+ {
+ int ncount = 0;
+ switch (c)
+ {
+ case 0x000b:
+ case 0x000c:
+ case 0x0085:
+ case 0x2028:
+ case 0x2029:
+ if ((md->moptions & PCRE_BSR_ANYCRLF) != 0) break;
+ goto ANYNL01;
+
+ case 0x000d:
+ if (ptr + 1 < end_subject && ptr[1] == 0x0a) ncount = 1;
+ /* Fall through */
+
+ ANYNL01:
+ case 0x000a:
+ if (count > 0 && codevalue == OP_ANYNL_EXTRA + OP_TYPEPOSPLUS)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ count++;
+ ADD_NEW_DATA(-state_offset, count, ncount);
+ break;
+
+ default:
+ break;
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_VSPACE_EXTRA + OP_TYPEPLUS:
+ case OP_VSPACE_EXTRA + OP_TYPEMINPLUS:
+ case OP_VSPACE_EXTRA + OP_TYPEPOSPLUS:
+ count = current_state->count; /* Already matched */
+ if (count > 0) { ADD_ACTIVE(state_offset + 2, 0); }
+ if (clen > 0)
+ {
+ BOOL OK;
+ switch (c)
+ {
+ case 0x000a:
+ case 0x000b:
+ case 0x000c:
+ case 0x000d:
+ case 0x0085:
+ case 0x2028:
+ case 0x2029:
+ OK = TRUE;
+ break;
+
+ default:
+ OK = FALSE;
+ break;
+ }
+
+ if (OK == (d == OP_VSPACE))
+ {
+ if (count > 0 && codevalue == OP_VSPACE_EXTRA + OP_TYPEPOSPLUS)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ count++;
+ ADD_NEW_DATA(-state_offset, count, 0);
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_HSPACE_EXTRA + OP_TYPEPLUS:
+ case OP_HSPACE_EXTRA + OP_TYPEMINPLUS:
+ case OP_HSPACE_EXTRA + OP_TYPEPOSPLUS:
+ count = current_state->count; /* Already matched */
+ if (count > 0) { ADD_ACTIVE(state_offset + 2, 0); }
+ if (clen > 0)
+ {
+ BOOL OK;
+ switch (c)
+ {
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ case 0x1680: /* OGHAM SPACE MARK */
+ case 0x180e: /* MONGOLIAN VOWEL SEPARATOR */
+ case 0x2000: /* EN QUAD */
+ case 0x2001: /* EM QUAD */
+ case 0x2002: /* EN SPACE */
+ case 0x2003: /* EM SPACE */
+ case 0x2004: /* THREE-PER-EM SPACE */
+ case 0x2005: /* FOUR-PER-EM SPACE */
+ case 0x2006: /* SIX-PER-EM SPACE */
+ case 0x2007: /* FIGURE SPACE */
+ case 0x2008: /* PUNCTUATION SPACE */
+ case 0x2009: /* THIN SPACE */
+ case 0x200A: /* HAIR SPACE */
+ case 0x202f: /* NARROW NO-BREAK SPACE */
+ case 0x205f: /* MEDIUM MATHEMATICAL SPACE */
+ case 0x3000: /* IDEOGRAPHIC SPACE */
+ OK = TRUE;
+ break;
+
+ default:
+ OK = FALSE;
+ break;
+ }
+
+ if (OK == (d == OP_HSPACE))
+ {
+ if (count > 0 && codevalue == OP_HSPACE_EXTRA + OP_TYPEPOSPLUS)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ count++;
+ ADD_NEW_DATA(-state_offset, count, 0);
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+#ifdef SUPPORT_UCP
+ case OP_PROP_EXTRA + OP_TYPEQUERY:
+ case OP_PROP_EXTRA + OP_TYPEMINQUERY:
+ case OP_PROP_EXTRA + OP_TYPEPOSQUERY:
+ count = 4;
+ goto QS1;
+
+ case OP_PROP_EXTRA + OP_TYPESTAR:
+ case OP_PROP_EXTRA + OP_TYPEMINSTAR:
+ case OP_PROP_EXTRA + OP_TYPEPOSSTAR:
+ count = 0;
+
+ QS1:
+
+ ADD_ACTIVE(state_offset + 4, 0);
+ if (clen > 0)
+ {
+ BOOL OK;
+ int category = _pcre_ucp_findprop(c, &chartype, &script);
+ switch(code[2])
+ {
+ case PT_ANY:
+ OK = TRUE;
+ break;
+
+ case PT_LAMP:
+ OK = chartype == ucp_Lu || chartype == ucp_Ll || chartype == ucp_Lt;
+ break;
+
+ case PT_GC:
+ OK = category == code[3];
+ break;
+
+ case PT_PC:
+ OK = chartype == code[3];
+ break;
+
+ case PT_SC:
+ OK = script == code[3];
+ break;
+
+ /* Should never occur, but keep compilers from grumbling. */
+
+ default:
+ OK = codevalue != OP_PROP;
+ break;
+ }
+
+ if (OK == (d == OP_PROP))
+ {
+ if (codevalue == OP_PROP_EXTRA + OP_TYPEPOSSTAR ||
+ codevalue == OP_PROP_EXTRA + OP_TYPEPOSQUERY)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ ADD_NEW(state_offset + count, 0);
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_EXTUNI_EXTRA + OP_TYPEQUERY:
+ case OP_EXTUNI_EXTRA + OP_TYPEMINQUERY:
+ case OP_EXTUNI_EXTRA + OP_TYPEPOSQUERY:
+ count = 2;
+ goto QS2;
+
+ case OP_EXTUNI_EXTRA + OP_TYPESTAR:
+ case OP_EXTUNI_EXTRA + OP_TYPEMINSTAR:
+ case OP_EXTUNI_EXTRA + OP_TYPEPOSSTAR:
+ count = 0;
+
+ QS2:
+
+ ADD_ACTIVE(state_offset + 2, 0);
+ if (clen > 0 && _pcre_ucp_findprop(c, &chartype, &script) != ucp_M)
+ {
+ const uschar *nptr = ptr + clen;
+ int ncount = 0;
+ if (codevalue == OP_EXTUNI_EXTRA + OP_TYPEPOSSTAR ||
+ codevalue == OP_EXTUNI_EXTRA + OP_TYPEPOSQUERY)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ while (nptr < end_subject)
+ {
+ int nd;
+ int ndlen = 1;
+ GETCHARLEN(nd, nptr, ndlen);
+ if (_pcre_ucp_findprop(nd, &chartype, &script) != ucp_M) break;
+ ncount++;
+ nptr += ndlen;
+ }
+ ADD_NEW_DATA(-(state_offset + count), 0, ncount);
+ }
+ break;
+#endif
+
+ /*-----------------------------------------------------------------*/
+ case OP_ANYNL_EXTRA + OP_TYPEQUERY:
+ case OP_ANYNL_EXTRA + OP_TYPEMINQUERY:
+ case OP_ANYNL_EXTRA + OP_TYPEPOSQUERY:
+ count = 2;
+ goto QS3;
+
+ case OP_ANYNL_EXTRA + OP_TYPESTAR:
+ case OP_ANYNL_EXTRA + OP_TYPEMINSTAR:
+ case OP_ANYNL_EXTRA + OP_TYPEPOSSTAR:
+ count = 0;
+
+ QS3:
+ ADD_ACTIVE(state_offset + 2, 0);
+ if (clen > 0)
+ {
+ int ncount = 0;
+ switch (c)
+ {
+ case 0x000b:
+ case 0x000c:
+ case 0x0085:
+ case 0x2028:
+ case 0x2029:
+ if ((md->moptions & PCRE_BSR_ANYCRLF) != 0) break;
+ goto ANYNL02;
+
+ case 0x000d:
+ if (ptr + 1 < end_subject && ptr[1] == 0x0a) ncount = 1;
+ /* Fall through */
+
+ ANYNL02:
+ case 0x000a:
+ if (codevalue == OP_ANYNL_EXTRA + OP_TYPEPOSSTAR ||
+ codevalue == OP_ANYNL_EXTRA + OP_TYPEPOSQUERY)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ ADD_NEW_DATA(-(state_offset + count), 0, ncount);
+ break;
+
+ default:
+ break;
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_VSPACE_EXTRA + OP_TYPEQUERY:
+ case OP_VSPACE_EXTRA + OP_TYPEMINQUERY:
+ case OP_VSPACE_EXTRA + OP_TYPEPOSQUERY:
+ count = 2;
+ goto QS4;
+
+ case OP_VSPACE_EXTRA + OP_TYPESTAR:
+ case OP_VSPACE_EXTRA + OP_TYPEMINSTAR:
+ case OP_VSPACE_EXTRA + OP_TYPEPOSSTAR:
+ count = 0;
+
+ QS4:
+ ADD_ACTIVE(state_offset + 2, 0);
+ if (clen > 0)
+ {
+ BOOL OK;
+ switch (c)
+ {
+ case 0x000a:
+ case 0x000b:
+ case 0x000c:
+ case 0x000d:
+ case 0x0085:
+ case 0x2028:
+ case 0x2029:
+ OK = TRUE;
+ break;
+
+ default:
+ OK = FALSE;
+ break;
+ }
+ if (OK == (d == OP_VSPACE))
+ {
+ if (codevalue == OP_VSPACE_EXTRA + OP_TYPEPOSSTAR ||
+ codevalue == OP_VSPACE_EXTRA + OP_TYPEPOSQUERY)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ ADD_NEW_DATA(-(state_offset + count), 0, 0);
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_HSPACE_EXTRA + OP_TYPEQUERY:
+ case OP_HSPACE_EXTRA + OP_TYPEMINQUERY:
+ case OP_HSPACE_EXTRA + OP_TYPEPOSQUERY:
+ count = 2;
+ goto QS5;
+
+ case OP_HSPACE_EXTRA + OP_TYPESTAR:
+ case OP_HSPACE_EXTRA + OP_TYPEMINSTAR:
+ case OP_HSPACE_EXTRA + OP_TYPEPOSSTAR:
+ count = 0;
+
+ QS5:
+ ADD_ACTIVE(state_offset + 2, 0);
+ if (clen > 0)
+ {
+ BOOL OK;
+ switch (c)
+ {
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ case 0x1680: /* OGHAM SPACE MARK */
+ case 0x180e: /* MONGOLIAN VOWEL SEPARATOR */
+ case 0x2000: /* EN QUAD */
+ case 0x2001: /* EM QUAD */
+ case 0x2002: /* EN SPACE */
+ case 0x2003: /* EM SPACE */
+ case 0x2004: /* THREE-PER-EM SPACE */
+ case 0x2005: /* FOUR-PER-EM SPACE */
+ case 0x2006: /* SIX-PER-EM SPACE */
+ case 0x2007: /* FIGURE SPACE */
+ case 0x2008: /* PUNCTUATION SPACE */
+ case 0x2009: /* THIN SPACE */
+ case 0x200A: /* HAIR SPACE */
+ case 0x202f: /* NARROW NO-BREAK SPACE */
+ case 0x205f: /* MEDIUM MATHEMATICAL SPACE */
+ case 0x3000: /* IDEOGRAPHIC SPACE */
+ OK = TRUE;
+ break;
+
+ default:
+ OK = FALSE;
+ break;
+ }
+
+ if (OK == (d == OP_HSPACE))
+ {
+ if (codevalue == OP_HSPACE_EXTRA + OP_TYPEPOSSTAR ||
+ codevalue == OP_HSPACE_EXTRA + OP_TYPEPOSQUERY)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ ADD_NEW_DATA(-(state_offset + count), 0, 0);
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+#ifdef SUPPORT_UCP
+ case OP_PROP_EXTRA + OP_TYPEEXACT:
+ case OP_PROP_EXTRA + OP_TYPEUPTO:
+ case OP_PROP_EXTRA + OP_TYPEMINUPTO:
+ case OP_PROP_EXTRA + OP_TYPEPOSUPTO:
+ if (codevalue != OP_PROP_EXTRA + OP_TYPEEXACT)
+ { ADD_ACTIVE(state_offset + 6, 0); }
+ count = current_state->count; /* Number already matched */
+ if (clen > 0)
+ {
+ BOOL OK;
+ int category = _pcre_ucp_findprop(c, &chartype, &script);
+ switch(code[4])
+ {
+ case PT_ANY:
+ OK = TRUE;
+ break;
+
+ case PT_LAMP:
+ OK = chartype == ucp_Lu || chartype == ucp_Ll || chartype == ucp_Lt;
+ break;
+
+ case PT_GC:
+ OK = category == code[5];
+ break;
+
+ case PT_PC:
+ OK = chartype == code[5];
+ break;
+
+ case PT_SC:
+ OK = script == code[5];
+ break;
+
+ /* Should never occur, but keep compilers from grumbling. */
+
+ default:
+ OK = codevalue != OP_PROP;
+ break;
+ }
+
+ if (OK == (d == OP_PROP))
+ {
+ if (codevalue == OP_PROP_EXTRA + OP_TYPEPOSUPTO)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ if (++count >= GET2(code, 1))
+ { ADD_NEW(state_offset + 6, 0); }
+ else
+ { ADD_NEW(state_offset, count); }
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_EXTUNI_EXTRA + OP_TYPEEXACT:
+ case OP_EXTUNI_EXTRA + OP_TYPEUPTO:
+ case OP_EXTUNI_EXTRA + OP_TYPEMINUPTO:
+ case OP_EXTUNI_EXTRA + OP_TYPEPOSUPTO:
+ if (codevalue != OP_EXTUNI_EXTRA + OP_TYPEEXACT)
+ { ADD_ACTIVE(state_offset + 4, 0); }
+ count = current_state->count; /* Number already matched */
+ if (clen > 0 && _pcre_ucp_findprop(c, &chartype, &script) != ucp_M)
+ {
+ const uschar *nptr = ptr + clen;
+ int ncount = 0;
+ if (codevalue == OP_EXTUNI_EXTRA + OP_TYPEPOSUPTO)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ while (nptr < end_subject)
+ {
+ int nd;
+ int ndlen = 1;
+ GETCHARLEN(nd, nptr, ndlen);
+ if (_pcre_ucp_findprop(nd, &chartype, &script) != ucp_M) break;
+ ncount++;
+ nptr += ndlen;
+ }
+ if (++count >= GET2(code, 1))
+ { ADD_NEW_DATA(-(state_offset + 4), 0, ncount); }
+ else
+ { ADD_NEW_DATA(-state_offset, count, ncount); }
+ }
+ break;
+#endif
+
+ /*-----------------------------------------------------------------*/
+ case OP_ANYNL_EXTRA + OP_TYPEEXACT:
+ case OP_ANYNL_EXTRA + OP_TYPEUPTO:
+ case OP_ANYNL_EXTRA + OP_TYPEMINUPTO:
+ case OP_ANYNL_EXTRA + OP_TYPEPOSUPTO:
+ if (codevalue != OP_ANYNL_EXTRA + OP_TYPEEXACT)
+ { ADD_ACTIVE(state_offset + 4, 0); }
+ count = current_state->count; /* Number already matched */
+ if (clen > 0)
+ {
+ int ncount = 0;
+ switch (c)
+ {
+ case 0x000b:
+ case 0x000c:
+ case 0x0085:
+ case 0x2028:
+ case 0x2029:
+ if ((md->moptions & PCRE_BSR_ANYCRLF) != 0) break;
+ goto ANYNL03;
+
+ case 0x000d:
+ if (ptr + 1 < end_subject && ptr[1] == 0x0a) ncount = 1;
+ /* Fall through */
+
+ ANYNL03:
+ case 0x000a:
+ if (codevalue == OP_ANYNL_EXTRA + OP_TYPEPOSUPTO)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ if (++count >= GET2(code, 1))
+ { ADD_NEW_DATA(-(state_offset + 4), 0, ncount); }
+ else
+ { ADD_NEW_DATA(-state_offset, count, ncount); }
+ break;
+
+ default:
+ break;
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_VSPACE_EXTRA + OP_TYPEEXACT:
+ case OP_VSPACE_EXTRA + OP_TYPEUPTO:
+ case OP_VSPACE_EXTRA + OP_TYPEMINUPTO:
+ case OP_VSPACE_EXTRA + OP_TYPEPOSUPTO:
+ if (codevalue != OP_VSPACE_EXTRA + OP_TYPEEXACT)
+ { ADD_ACTIVE(state_offset + 4, 0); }
+ count = current_state->count; /* Number already matched */
+ if (clen > 0)
+ {
+ BOOL OK;
+ switch (c)
+ {
+ case 0x000a:
+ case 0x000b:
+ case 0x000c:
+ case 0x000d:
+ case 0x0085:
+ case 0x2028:
+ case 0x2029:
+ OK = TRUE;
+ break;
+
+ default:
+ OK = FALSE;
+ }
+
+ if (OK == (d == OP_VSPACE))
+ {
+ if (codevalue == OP_VSPACE_EXTRA + OP_TYPEPOSUPTO)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ if (++count >= GET2(code, 1))
+ { ADD_NEW_DATA(-(state_offset + 4), 0, 0); }
+ else
+ { ADD_NEW_DATA(-state_offset, count, 0); }
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_HSPACE_EXTRA + OP_TYPEEXACT:
+ case OP_HSPACE_EXTRA + OP_TYPEUPTO:
+ case OP_HSPACE_EXTRA + OP_TYPEMINUPTO:
+ case OP_HSPACE_EXTRA + OP_TYPEPOSUPTO:
+ if (codevalue != OP_HSPACE_EXTRA + OP_TYPEEXACT)
+ { ADD_ACTIVE(state_offset + 4, 0); }
+ count = current_state->count; /* Number already matched */
+ if (clen > 0)
+ {
+ BOOL OK;
+ switch (c)
+ {
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ case 0x1680: /* OGHAM SPACE MARK */
+ case 0x180e: /* MONGOLIAN VOWEL SEPARATOR */
+ case 0x2000: /* EN QUAD */
+ case 0x2001: /* EM QUAD */
+ case 0x2002: /* EN SPACE */
+ case 0x2003: /* EM SPACE */
+ case 0x2004: /* THREE-PER-EM SPACE */
+ case 0x2005: /* FOUR-PER-EM SPACE */
+ case 0x2006: /* SIX-PER-EM SPACE */
+ case 0x2007: /* FIGURE SPACE */
+ case 0x2008: /* PUNCTUATION SPACE */
+ case 0x2009: /* THIN SPACE */
+ case 0x200A: /* HAIR SPACE */
+ case 0x202f: /* NARROW NO-BREAK SPACE */
+ case 0x205f: /* MEDIUM MATHEMATICAL SPACE */
+ case 0x3000: /* IDEOGRAPHIC SPACE */
+ OK = TRUE;
+ break;
+
+ default:
+ OK = FALSE;
+ break;
+ }
+
+ if (OK == (d == OP_HSPACE))
+ {
+ if (codevalue == OP_HSPACE_EXTRA + OP_TYPEPOSUPTO)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ if (++count >= GET2(code, 1))
+ { ADD_NEW_DATA(-(state_offset + 4), 0, 0); }
+ else
+ { ADD_NEW_DATA(-state_offset, count, 0); }
+ }
+ }
+ break;
+
+/* ========================================================================== */
+ /* These opcodes are followed by a character that is usually compared
+ to the current subject character; it is loaded into d. We still get
+ here even if there is no subject character, because in some cases zero
+ repetitions are permitted. */
+
+ /*-----------------------------------------------------------------*/
+ case OP_CHAR:
+ if (clen > 0 && c == d) { ADD_NEW(state_offset + dlen + 1, 0); }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_CHARNC:
+ if (clen == 0) break;
+
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ {
+ if (c == d) { ADD_NEW(state_offset + dlen + 1, 0); } else
+ {
+ unsigned int othercase;
+ if (c < 128) othercase = fcc[c]; else
+
+ /* If we have Unicode property support, we can use it to test the
+ other case of the character. */
+
+#ifdef SUPPORT_UCP
+ othercase = _pcre_ucp_othercase(c);
+#else
+ othercase = NOTACHAR;
+#endif
+
+ if (d == othercase) { ADD_NEW(state_offset + dlen + 1, 0); }
+ }
+ }
+ else
+#endif /* SUPPORT_UTF8 */
+
+ /* Non-UTF-8 mode */
+ {
+ if (lcc[c] == lcc[d]) { ADD_NEW(state_offset + 2, 0); }
+ }
+ break;
+
+
+#ifdef SUPPORT_UCP
+ /*-----------------------------------------------------------------*/
+ /* This is a tricky one because it can match more than one character.
+ Find out how many characters to skip, and then set up a negative state
+ to wait for them to pass before continuing. */
+
+ case OP_EXTUNI:
+ if (clen > 0 && _pcre_ucp_findprop(c, &chartype, &script) != ucp_M)
+ {
+ const uschar *nptr = ptr + clen;
+ int ncount = 0;
+ while (nptr < end_subject)
+ {
+ int nclen = 1;
+ GETCHARLEN(c, nptr, nclen);
+ if (_pcre_ucp_findprop(c, &chartype, &script) != ucp_M) break;
+ ncount++;
+ nptr += nclen;
+ }
+ ADD_NEW_DATA(-(state_offset + 1), 0, ncount);
+ }
+ break;
+#endif
+
+ /*-----------------------------------------------------------------*/
+ /* This is a tricky like EXTUNI because it too can match more than one
+ character (when CR is followed by LF). In this case, set up a negative
+ state to wait for one character to pass before continuing. */
+
+ case OP_ANYNL:
+ if (clen > 0) switch(c)
+ {
+ case 0x000b:
+ case 0x000c:
+ case 0x0085:
+ case 0x2028:
+ case 0x2029:
+ if ((md->moptions & PCRE_BSR_ANYCRLF) != 0) break;
+
+ case 0x000a:
+ ADD_NEW(state_offset + 1, 0);
+ break;
+
+ case 0x000d:
+ if (ptr + 1 < end_subject && ptr[1] == 0x0a)
+ {
+ ADD_NEW_DATA(-(state_offset + 1), 0, 1);
+ }
+ else
+ {
+ ADD_NEW(state_offset + 1, 0);
+ }
+ break;
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_NOT_VSPACE:
+ if (clen > 0) switch(c)
+ {
+ case 0x000a:
+ case 0x000b:
+ case 0x000c:
+ case 0x000d:
+ case 0x0085:
+ case 0x2028:
+ case 0x2029:
+ break;
+
+ default:
+ ADD_NEW(state_offset + 1, 0);
+ break;
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_VSPACE:
+ if (clen > 0) switch(c)
+ {
+ case 0x000a:
+ case 0x000b:
+ case 0x000c:
+ case 0x000d:
+ case 0x0085:
+ case 0x2028:
+ case 0x2029:
+ ADD_NEW(state_offset + 1, 0);
+ break;
+
+ default: break;
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_NOT_HSPACE:
+ if (clen > 0) switch(c)
+ {
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ case 0x1680: /* OGHAM SPACE MARK */
+ case 0x180e: /* MONGOLIAN VOWEL SEPARATOR */
+ case 0x2000: /* EN QUAD */
+ case 0x2001: /* EM QUAD */
+ case 0x2002: /* EN SPACE */
+ case 0x2003: /* EM SPACE */
+ case 0x2004: /* THREE-PER-EM SPACE */
+ case 0x2005: /* FOUR-PER-EM SPACE */
+ case 0x2006: /* SIX-PER-EM SPACE */
+ case 0x2007: /* FIGURE SPACE */
+ case 0x2008: /* PUNCTUATION SPACE */
+ case 0x2009: /* THIN SPACE */
+ case 0x200A: /* HAIR SPACE */
+ case 0x202f: /* NARROW NO-BREAK SPACE */
+ case 0x205f: /* MEDIUM MATHEMATICAL SPACE */
+ case 0x3000: /* IDEOGRAPHIC SPACE */
+ break;
+
+ default:
+ ADD_NEW(state_offset + 1, 0);
+ break;
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_HSPACE:
+ if (clen > 0) switch(c)
+ {
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ case 0x1680: /* OGHAM SPACE MARK */
+ case 0x180e: /* MONGOLIAN VOWEL SEPARATOR */
+ case 0x2000: /* EN QUAD */
+ case 0x2001: /* EM QUAD */
+ case 0x2002: /* EN SPACE */
+ case 0x2003: /* EM SPACE */
+ case 0x2004: /* THREE-PER-EM SPACE */
+ case 0x2005: /* FOUR-PER-EM SPACE */
+ case 0x2006: /* SIX-PER-EM SPACE */
+ case 0x2007: /* FIGURE SPACE */
+ case 0x2008: /* PUNCTUATION SPACE */
+ case 0x2009: /* THIN SPACE */
+ case 0x200A: /* HAIR SPACE */
+ case 0x202f: /* NARROW NO-BREAK SPACE */
+ case 0x205f: /* MEDIUM MATHEMATICAL SPACE */
+ case 0x3000: /* IDEOGRAPHIC SPACE */
+ ADD_NEW(state_offset + 1, 0);
+ break;
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ /* Match a negated single character. This is only used for one-byte
+ characters, that is, we know that d < 256. The character we are
+ checking (c) can be multibyte. */
+
+ case OP_NOT:
+ if (clen > 0)
+ {
+ unsigned int otherd = ((ims & PCRE_CASELESS) != 0)? fcc[d] : d;
+ if (c != d && c != otherd) { ADD_NEW(state_offset + dlen + 1, 0); }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_PLUS:
+ case OP_MINPLUS:
+ case OP_POSPLUS:
+ case OP_NOTPLUS:
+ case OP_NOTMINPLUS:
+ case OP_NOTPOSPLUS:
+ count = current_state->count; /* Already matched */
+ if (count > 0) { ADD_ACTIVE(state_offset + dlen + 1, 0); }
+ if (clen > 0)
+ {
+ unsigned int otherd = NOTACHAR;
+ if ((ims & PCRE_CASELESS) != 0)
+ {
+#ifdef SUPPORT_UTF8
+ if (utf8 && d >= 128)
+ {
+#ifdef SUPPORT_UCP
+ otherd = _pcre_ucp_othercase(d);
+#endif /* SUPPORT_UCP */
+ }
+ else
+#endif /* SUPPORT_UTF8 */
+ otherd = fcc[d];
+ }
+ if ((c == d || c == otherd) == (codevalue < OP_NOTSTAR))
+ {
+ if (count > 0 &&
+ (codevalue == OP_POSPLUS || codevalue == OP_NOTPOSPLUS))
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ count++;
+ ADD_NEW(state_offset, count);
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_QUERY:
+ case OP_MINQUERY:
+ case OP_POSQUERY:
+ case OP_NOTQUERY:
+ case OP_NOTMINQUERY:
+ case OP_NOTPOSQUERY:
+ ADD_ACTIVE(state_offset + dlen + 1, 0);
+ if (clen > 0)
+ {
+ unsigned int otherd = NOTACHAR;
+ if ((ims & PCRE_CASELESS) != 0)
+ {
+#ifdef SUPPORT_UTF8
+ if (utf8 && d >= 128)
+ {
+#ifdef SUPPORT_UCP
+ otherd = _pcre_ucp_othercase(d);
+#endif /* SUPPORT_UCP */
+ }
+ else
+#endif /* SUPPORT_UTF8 */
+ otherd = fcc[d];
+ }
+ if ((c == d || c == otherd) == (codevalue < OP_NOTSTAR))
+ {
+ if (codevalue == OP_POSQUERY || codevalue == OP_NOTPOSQUERY)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ ADD_NEW(state_offset + dlen + 1, 0);
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_STAR:
+ case OP_MINSTAR:
+ case OP_POSSTAR:
+ case OP_NOTSTAR:
+ case OP_NOTMINSTAR:
+ case OP_NOTPOSSTAR:
+ ADD_ACTIVE(state_offset + dlen + 1, 0);
+ if (clen > 0)
+ {
+ unsigned int otherd = NOTACHAR;
+ if ((ims & PCRE_CASELESS) != 0)
+ {
+#ifdef SUPPORT_UTF8
+ if (utf8 && d >= 128)
+ {
+#ifdef SUPPORT_UCP
+ otherd = _pcre_ucp_othercase(d);
+#endif /* SUPPORT_UCP */
+ }
+ else
+#endif /* SUPPORT_UTF8 */
+ otherd = fcc[d];
+ }
+ if ((c == d || c == otherd) == (codevalue < OP_NOTSTAR))
+ {
+ if (codevalue == OP_POSSTAR || codevalue == OP_NOTPOSSTAR)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ ADD_NEW(state_offset, 0);
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_EXACT:
+ case OP_NOTEXACT:
+ count = current_state->count; /* Number already matched */
+ if (clen > 0)
+ {
+ unsigned int otherd = NOTACHAR;
+ if ((ims & PCRE_CASELESS) != 0)
+ {
+#ifdef SUPPORT_UTF8
+ if (utf8 && d >= 128)
+ {
+#ifdef SUPPORT_UCP
+ otherd = _pcre_ucp_othercase(d);
+#endif /* SUPPORT_UCP */
+ }
+ else
+#endif /* SUPPORT_UTF8 */
+ otherd = fcc[d];
+ }
+ if ((c == d || c == otherd) == (codevalue < OP_NOTSTAR))
+ {
+ if (++count >= GET2(code, 1))
+ { ADD_NEW(state_offset + dlen + 3, 0); }
+ else
+ { ADD_NEW(state_offset, count); }
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_UPTO:
+ case OP_MINUPTO:
+ case OP_POSUPTO:
+ case OP_NOTUPTO:
+ case OP_NOTMINUPTO:
+ case OP_NOTPOSUPTO:
+ ADD_ACTIVE(state_offset + dlen + 3, 0);
+ count = current_state->count; /* Number already matched */
+ if (clen > 0)
+ {
+ unsigned int otherd = NOTACHAR;
+ if ((ims & PCRE_CASELESS) != 0)
+ {
+#ifdef SUPPORT_UTF8
+ if (utf8 && d >= 128)
+ {
+#ifdef SUPPORT_UCP
+ otherd = _pcre_ucp_othercase(d);
+#endif /* SUPPORT_UCP */
+ }
+ else
+#endif /* SUPPORT_UTF8 */
+ otherd = fcc[d];
+ }
+ if ((c == d || c == otherd) == (codevalue < OP_NOTSTAR))
+ {
+ if (codevalue == OP_POSUPTO || codevalue == OP_NOTPOSUPTO)
+ {
+ active_count--; /* Remove non-match possibility */
+ next_active_state--;
+ }
+ if (++count >= GET2(code, 1))
+ { ADD_NEW(state_offset + dlen + 3, 0); }
+ else
+ { ADD_NEW(state_offset, count); }
+ }
+ }
+ break;
+
+
+/* ========================================================================== */
+ /* These are the class-handling opcodes */
+
+ case OP_CLASS:
+ case OP_NCLASS:
+ case OP_XCLASS:
+ {
+ BOOL isinclass = FALSE;
+ int next_state_offset;
+ const uschar *ecode;
+
+ /* For a simple class, there is always just a 32-byte table, and we
+ can set isinclass from it. */
+
+ if (codevalue != OP_XCLASS)
+ {
+ ecode = code + 33;
+ if (clen > 0)
+ {
+ isinclass = (c > 255)? (codevalue == OP_NCLASS) :
+ ((code[1 + c/8] & (1 << (c&7))) != 0);
+ }
+ }
+
+ /* An extended class may have a table or a list of single characters,
+ ranges, or both, and it may be positive or negative. There's a
+ function that sorts all this out. */
+
+ else
+ {
+ ecode = code + GET(code, 1);
+ if (clen > 0) isinclass = _pcre_xclass(c, code + 1 + LINK_SIZE);
+ }
+
+ /* At this point, isinclass is set for all kinds of class, and ecode
+ points to the byte after the end of the class. If there is a
+ quantifier, this is where it will be. */
+
+ next_state_offset = ecode - start_code;
+
+ switch (*ecode)
+ {
+ case OP_CRSTAR:
+ case OP_CRMINSTAR:
+ ADD_ACTIVE(next_state_offset + 1, 0);
+ if (isinclass) { ADD_NEW(state_offset, 0); }
+ break;
+
+ case OP_CRPLUS:
+ case OP_CRMINPLUS:
+ count = current_state->count; /* Already matched */
+ if (count > 0) { ADD_ACTIVE(next_state_offset + 1, 0); }
+ if (isinclass) { count++; ADD_NEW(state_offset, count); }
+ break;
+
+ case OP_CRQUERY:
+ case OP_CRMINQUERY:
+ ADD_ACTIVE(next_state_offset + 1, 0);
+ if (isinclass) { ADD_NEW(next_state_offset + 1, 0); }
+ break;
+
+ case OP_CRRANGE:
+ case OP_CRMINRANGE:
+ count = current_state->count; /* Already matched */
+ if (count >= GET2(ecode, 1))
+ { ADD_ACTIVE(next_state_offset + 5, 0); }
+ if (isinclass)
+ {
+ int max = GET2(ecode, 3);
+ if (++count >= max && max != 0) /* Max 0 => no limit */
+ { ADD_NEW(next_state_offset + 5, 0); }
+ else
+ { ADD_NEW(state_offset, count); }
+ }
+ break;
+
+ default:
+ if (isinclass) { ADD_NEW(next_state_offset, 0); }
+ break;
+ }
+ }
+ break;
+
+/* ========================================================================== */
+ /* These are the opcodes for fancy brackets of various kinds. We have
+ to use recursion in order to handle them. */
+
+ case OP_ASSERT:
+ case OP_ASSERT_NOT:
+ case OP_ASSERTBACK:
+ case OP_ASSERTBACK_NOT:
+ {
+ int rc;
+ int local_offsets[2];
+ int local_workspace[1000];
+ const uschar *endasscode = code + GET(code, 1);
+
+ while (*endasscode == OP_ALT) endasscode += GET(endasscode, 1);
+
+ rc = internal_dfa_exec(
+ md, /* static match data */
+ code, /* this subexpression's code */
+ ptr, /* where we currently are */
+ ptr - start_subject, /* start offset */
+ local_offsets, /* offset vector */
+ sizeof(local_offsets)/sizeof(int), /* size of same */
+ local_workspace, /* workspace vector */
+ sizeof(local_workspace)/sizeof(int), /* size of same */
+ ims, /* the current ims flags */
+ rlevel, /* function recursion level */
+ recursing); /* pass on regex recursion */
+
+ if ((rc >= 0) == (codevalue == OP_ASSERT || codevalue == OP_ASSERTBACK))
+ { ADD_ACTIVE(endasscode + LINK_SIZE + 1 - start_code, 0); }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_COND:
+ case OP_SCOND:
+ {
+ int local_offsets[1000];
+ int local_workspace[1000];
+ int condcode = code[LINK_SIZE+1];
+
+ /* Back reference conditions are not supported */
+
+ if (condcode == OP_CREF) return PCRE_ERROR_DFA_UCOND;
+
+ /* The DEFINE condition is always false */
+
+ if (condcode == OP_DEF)
+ {
+ ADD_ACTIVE(state_offset + GET(code, 1) + LINK_SIZE + 1, 0);
+ }
+
+ /* The only supported version of OP_RREF is for the value RREF_ANY,
+ which means "test if in any recursion". We can't test for specifically
+ recursed groups. */
+
+ else if (condcode == OP_RREF)
+ {
+ int value = GET2(code, LINK_SIZE+2);
+ if (value != RREF_ANY) return PCRE_ERROR_DFA_UCOND;
+ if (recursing > 0) { ADD_ACTIVE(state_offset + LINK_SIZE + 4, 0); }
+ else { ADD_ACTIVE(state_offset + GET(code, 1) + LINK_SIZE + 1, 0); }
+ }
+
+ /* Otherwise, the condition is an assertion */
+
+ else
+ {
+ int rc;
+ const uschar *asscode = code + LINK_SIZE + 1;
+ const uschar *endasscode = asscode + GET(asscode, 1);
+
+ while (*endasscode == OP_ALT) endasscode += GET(endasscode, 1);
+
+ rc = internal_dfa_exec(
+ md, /* fixed match data */
+ asscode, /* this subexpression's code */
+ ptr, /* where we currently are */
+ ptr - start_subject, /* start offset */
+ local_offsets, /* offset vector */
+ sizeof(local_offsets)/sizeof(int), /* size of same */
+ local_workspace, /* workspace vector */
+ sizeof(local_workspace)/sizeof(int), /* size of same */
+ ims, /* the current ims flags */
+ rlevel, /* function recursion level */
+ recursing); /* pass on regex recursion */
+
+ if ((rc >= 0) ==
+ (condcode == OP_ASSERT || condcode == OP_ASSERTBACK))
+ { ADD_ACTIVE(endasscode + LINK_SIZE + 1 - start_code, 0); }
+ else
+ { ADD_ACTIVE(state_offset + GET(code, 1) + LINK_SIZE + 1, 0); }
+ }
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_RECURSE:
+ {
+ int local_offsets[1000];
+ int local_workspace[1000];
+ int rc;
+
+ DPRINTF(("%.*sStarting regex recursion %d\n", rlevel*2-2, SP,
+ recursing + 1));
+
+ rc = internal_dfa_exec(
+ md, /* fixed match data */
+ start_code + GET(code, 1), /* this subexpression's code */
+ ptr, /* where we currently are */
+ ptr - start_subject, /* start offset */
+ local_offsets, /* offset vector */
+ sizeof(local_offsets)/sizeof(int), /* size of same */
+ local_workspace, /* workspace vector */
+ sizeof(local_workspace)/sizeof(int), /* size of same */
+ ims, /* the current ims flags */
+ rlevel, /* function recursion level */
+ recursing + 1); /* regex recurse level */
+
+ DPRINTF(("%.*sReturn from regex recursion %d: rc=%d\n", rlevel*2-2, SP,
+ recursing + 1, rc));
+
+ /* Ran out of internal offsets */
+
+ if (rc == 0) return PCRE_ERROR_DFA_RECURSE;
+
+ /* For each successful matched substring, set up the next state with a
+ count of characters to skip before trying it. Note that the count is in
+ characters, not bytes. */
+
+ if (rc > 0)
+ {
+ for (rc = rc*2 - 2; rc >= 0; rc -= 2)
+ {
+ const uschar *p = start_subject + local_offsets[rc];
+ const uschar *pp = start_subject + local_offsets[rc+1];
+ int charcount = local_offsets[rc+1] - local_offsets[rc];
+ while (p < pp) if ((*p++ & 0xc0) == 0x80) charcount--;
+ if (charcount > 0)
+ {
+ ADD_NEW_DATA(-(state_offset + LINK_SIZE + 1), 0, (charcount - 1));
+ }
+ else
+ {
+ ADD_ACTIVE(state_offset + LINK_SIZE + 1, 0);
+ }
+ }
+ }
+ else if (rc != PCRE_ERROR_NOMATCH) return rc;
+ }
+ break;
+
+ /*-----------------------------------------------------------------*/
+ case OP_ONCE:
+ {
+ int local_offsets[2];
+ int local_workspace[1000];
+
+ int rc = internal_dfa_exec(
+ md, /* fixed match data */
+ code, /* this subexpression's code */
+ ptr, /* where we currently are */
+ ptr - start_subject, /* start offset */
+ local_offsets, /* offset vector */
+ sizeof(local_offsets)/sizeof(int), /* size of same */
+ local_workspace, /* workspace vector */
+ sizeof(local_workspace)/sizeof(int), /* size of same */
+ ims, /* the current ims flags */
+ rlevel, /* function recursion level */
+ recursing); /* pass on regex recursion */
+
+ if (rc >= 0)
+ {
+ const uschar *end_subpattern = code;
+ int charcount = local_offsets[1] - local_offsets[0];
+ int next_state_offset, repeat_state_offset;
+
+ do { end_subpattern += GET(end_subpattern, 1); }
+ while (*end_subpattern == OP_ALT);
+ next_state_offset = end_subpattern - start_code + LINK_SIZE + 1;
+
+ /* If the end of this subpattern is KETRMAX or KETRMIN, we must
+ arrange for the repeat state also to be added to the relevant list.
+ Calculate the offset, or set -1 for no repeat. */
+
+ repeat_state_offset = (*end_subpattern == OP_KETRMAX ||
+ *end_subpattern == OP_KETRMIN)?
+ end_subpattern - start_code - GET(end_subpattern, 1) : -1;
+
+ /* If we have matched an empty string, add the next state at the
+ current character pointer. This is important so that the duplicate
+ checking kicks in, which is what breaks infinite loops that match an
+ empty string. */
+
+ if (charcount == 0)
+ {
+ ADD_ACTIVE(next_state_offset, 0);
+ }
+
+ /* Optimization: if there are no more active states, and there
+ are no new states yet set up, then skip over the subject string
+ right here, to save looping. Otherwise, set up the new state to swing
+ into action when the end of the substring is reached. */
+
+ else if (i + 1 >= active_count && new_count == 0)
+ {
+ ptr += charcount;
+ clen = 0;
+ ADD_NEW(next_state_offset, 0);
+
+ /* If we are adding a repeat state at the new character position,
+ we must fudge things so that it is the only current state.
+ Otherwise, it might be a duplicate of one we processed before, and
+ that would cause it to be skipped. */
+
+ if (repeat_state_offset >= 0)
+ {
+ next_active_state = active_states;
+ active_count = 0;
+ i = -1;
+ ADD_ACTIVE(repeat_state_offset, 0);
+ }
+ }
+ else
+ {
+ const uschar *p = start_subject + local_offsets[0];
+ const uschar *pp = start_subject + local_offsets[1];
+ while (p < pp) if ((*p++ & 0xc0) == 0x80) charcount--;
+ ADD_NEW_DATA(-next_state_offset, 0, (charcount - 1));
+ if (repeat_state_offset >= 0)
+ { ADD_NEW_DATA(-repeat_state_offset, 0, (charcount - 1)); }
+ }
+
+ }
+ else if (rc != PCRE_ERROR_NOMATCH) return rc;
+ }
+ break;
+
+
+/* ========================================================================== */
+ /* Handle callouts */
+
+ case OP_CALLOUT:
+ if (pcre_callout != NULL)
+ {
+ int rrc;
+ pcre_callout_block cb;
+ cb.version = 1; /* Version 1 of the callout block */
+ cb.callout_number = code[1];
+ cb.offset_vector = offsets;
+ cb.subject = (PCRE_SPTR)start_subject;
+ cb.subject_length = end_subject - start_subject;
+ cb.start_match = current_subject - start_subject;
+ cb.current_position = ptr - start_subject;
+ cb.pattern_position = GET(code, 2);
+ cb.next_item_length = GET(code, 2 + LINK_SIZE);
+ cb.capture_top = 1;
+ cb.capture_last = -1;
+ cb.callout_data = md->callout_data;
+ if ((rrc = (*pcre_callout)(&cb)) < 0) return rrc; /* Abandon */
+ if (rrc == 0) { ADD_ACTIVE(state_offset + 2 + 2*LINK_SIZE, 0); }
+ }
+ break;
+
+
+/* ========================================================================== */
+ default: /* Unsupported opcode */
+ return PCRE_ERROR_DFA_UITEM;
+ }
+
+ NEXT_ACTIVE_STATE: continue;
+
+ } /* End of loop scanning active states */
+
+ /* We have finished the processing at the current subject character. If no
+ new states have been set for the next character, we have found all the
+ matches that we are going to find. If we are at the top level and partial
+ matching has been requested, check for appropriate conditions. */
+
+ if (new_count <= 0)
+ {
+ if (match_count < 0 && /* No matches found */
+ rlevel == 1 && /* Top level match function */
+ (md->moptions & PCRE_PARTIAL) != 0 && /* Want partial matching */
+ ptr >= end_subject && /* Reached end of subject */
+ ptr > current_subject) /* Matched non-empty string */
+ {
+ if (offsetcount >= 2)
+ {
+ offsets[0] = current_subject - start_subject;
+ offsets[1] = end_subject - start_subject;
+ }
+ match_count = PCRE_ERROR_PARTIAL;
+ }
+
+ DPRINTF(("%.*sEnd of internal_dfa_exec %d: returning %d\n"
+ "%.*s---------------------\n\n", rlevel*2-2, SP, rlevel, match_count,
+ rlevel*2-2, SP));
+ break; /* In effect, "return", but see the comment below */
+ }
+
+ /* One or more states are active for the next character. */
+
+ ptr += clen; /* Advance to next subject character */
+ } /* Loop to move along the subject string */
+
+/* Control gets here from "break" a few lines above. We do it this way because
+if we use "return" above, we have compiler trouble. Some compilers warn if
+there's nothing here because they think the function doesn't return a value. On
+the other hand, if we put a dummy statement here, some more clever compilers
+complain that it can't be reached. Sigh. */
+
+return match_count;
+}
+
+
+
+
+/*************************************************
+* Execute a Regular Expression - DFA engine *
+*************************************************/
+
+/* This external function applies a compiled re to a subject string using a DFA
+engine. This function calls the internal function multiple times if the pattern
+is not anchored.
+
+Arguments:
+ argument_re points to the compiled expression
+ extra_data points to extra data or is NULL
+ subject points to the subject string
+ length length of subject string (may contain binary zeros)
+ start_offset where to start in the subject string
+ options option bits
+ offsets vector of match offsets
+ offsetcount size of same
+ workspace workspace vector
+ wscount size of same
+
+Returns: > 0 => number of match offset pairs placed in offsets
+ = 0 => offsets overflowed; longest matches are present
+ -1 => failed to match
+ < -1 => some kind of unexpected problem
+*/
+
+PCRE_EXP_DEFN int
+pcre_dfa_exec(const pcre *argument_re, const pcre_extra *extra_data,
+ const char *subject, int length, int start_offset, int options, int *offsets,
+ int offsetcount, int *workspace, int wscount)
+{
+real_pcre *re = (real_pcre *)argument_re;
+dfa_match_data match_block;
+dfa_match_data *md = &match_block;
+BOOL utf8, anchored, startline, firstline;
+const uschar *current_subject, *end_subject, *lcc;
+
+pcre_study_data internal_study;
+const pcre_study_data *study = NULL;
+real_pcre internal_re;
+
+const uschar *req_byte_ptr;
+const uschar *start_bits = NULL;
+BOOL first_byte_caseless = FALSE;
+BOOL req_byte_caseless = FALSE;
+int first_byte = -1;
+int req_byte = -1;
+int req_byte2 = -1;
+int newline;
+
+/* Plausibility checks */
+
+if ((options & ~PUBLIC_DFA_EXEC_OPTIONS) != 0) return PCRE_ERROR_BADOPTION;
+if (re == NULL || subject == NULL || workspace == NULL ||
+ (offsets == NULL && offsetcount > 0)) return PCRE_ERROR_NULL;
+if (offsetcount < 0) return PCRE_ERROR_BADCOUNT;
+if (wscount < 20) return PCRE_ERROR_DFA_WSSIZE;
+
+/* We need to find the pointer to any study data before we test for byte
+flipping, so we scan the extra_data block first. This may set two fields in the
+match block, so we must initialize them beforehand. However, the other fields
+in the match block must not be set until after the byte flipping. */
+
+md->tables = re->tables;
+md->callout_data = NULL;
+
+if (extra_data != NULL)
+ {
+ unsigned int flags = extra_data->flags;
+ if ((flags & PCRE_EXTRA_STUDY_DATA) != 0)
+ study = (const pcre_study_data *)extra_data->study_data;
+ if ((flags & PCRE_EXTRA_MATCH_LIMIT) != 0) return PCRE_ERROR_DFA_UMLIMIT;
+ if ((flags & PCRE_EXTRA_MATCH_LIMIT_RECURSION) != 0)
+ return PCRE_ERROR_DFA_UMLIMIT;
+ if ((flags & PCRE_EXTRA_CALLOUT_DATA) != 0)
+ md->callout_data = extra_data->callout_data;
+ if ((flags & PCRE_EXTRA_TABLES) != 0)
+ md->tables = extra_data->tables;
+ }
+
+/* Check that the first field in the block is the magic number. If it is not,
+test for a regex that was compiled on a host of opposite endianness. If this is
+the case, flipped values are put in internal_re and internal_study if there was
+study data too. */
+
+if (re->magic_number != MAGIC_NUMBER)
+ {
+ re = _pcre_try_flipped(re, &internal_re, study, &internal_study);
+ if (re == NULL) return PCRE_ERROR_BADMAGIC;
+ if (study != NULL) study = &internal_study;
+ }
+
+/* Set some local values */
+
+current_subject = (const unsigned char *)subject + start_offset;
+end_subject = (const unsigned char *)subject + length;
+req_byte_ptr = current_subject - 1;
+
+#ifdef SUPPORT_UTF8
+utf8 = (re->options & PCRE_UTF8) != 0;
+#else
+utf8 = FALSE;
+#endif
+
+anchored = (options & (PCRE_ANCHORED|PCRE_DFA_RESTART)) != 0 ||
+ (re->options & PCRE_ANCHORED) != 0;
+
+/* The remaining fixed data for passing around. */
+
+md->start_code = (const uschar *)argument_re +
+ re->name_table_offset + re->name_count * re->name_entry_size;
+md->start_subject = (const unsigned char *)subject;
+md->end_subject = end_subject;
+md->moptions = options;
+md->poptions = re->options;
+
+/* If the BSR option is not set at match time, copy what was set
+at compile time. */
+
+if ((md->moptions & (PCRE_BSR_ANYCRLF|PCRE_BSR_UNICODE)) == 0)
+ {
+ if ((re->options & (PCRE_BSR_ANYCRLF|PCRE_BSR_UNICODE)) != 0)
+ md->moptions |= re->options & (PCRE_BSR_ANYCRLF|PCRE_BSR_UNICODE);
+#ifdef BSR_ANYCRLF
+ else md->moptions |= PCRE_BSR_ANYCRLF;
+#endif
+ }
+
+/* Handle different types of newline. The three bits give eight cases. If
+nothing is set at run time, whatever was used at compile time applies. */
+
+switch ((((options & PCRE_NEWLINE_BITS) == 0)? re->options : (pcre_uint32)options) &
+ PCRE_NEWLINE_BITS)
+ {
+ case 0: newline = NEWLINE; break; /* Compile-time default */
+ case PCRE_NEWLINE_CR: newline = '\r'; break;
+ case PCRE_NEWLINE_LF: newline = '\n'; break;
+ case PCRE_NEWLINE_CR+
+ PCRE_NEWLINE_LF: newline = ('\r' << 8) | '\n'; break;
+ case PCRE_NEWLINE_ANY: newline = -1; break;
+ case PCRE_NEWLINE_ANYCRLF: newline = -2; break;
+ default: return PCRE_ERROR_BADNEWLINE;
+ }
+
+if (newline == -2)
+ {
+ md->nltype = NLTYPE_ANYCRLF;
+ }
+else if (newline < 0)
+ {
+ md->nltype = NLTYPE_ANY;
+ }
+else
+ {
+ md->nltype = NLTYPE_FIXED;
+ if (newline > 255)
+ {
+ md->nllen = 2;
+ md->nl[0] = (newline >> 8) & 255;
+ md->nl[1] = newline & 255;
+ }
+ else
+ {
+ md->nllen = 1;
+ md->nl[0] = newline;
+ }
+ }
+
+/* Check a UTF-8 string if required. Unfortunately there's no way of passing
+back the character offset. */
+
+#ifdef SUPPORT_UTF8
+if (utf8 && (options & PCRE_NO_UTF8_CHECK) == 0)
+ {
+ if (_pcre_valid_utf8((uschar *)subject, length) >= 0)
+ return PCRE_ERROR_BADUTF8;
+ if (start_offset > 0 && start_offset < length)
+ {
+ int tb = ((uschar *)subject)[start_offset];
+ if (tb > 127)
+ {
+ tb &= 0xc0;
+ if (tb != 0 && tb != 0xc0) return PCRE_ERROR_BADUTF8_OFFSET;
+ }
+ }
+ }
+#endif
+
+/* If the exec call supplied NULL for tables, use the inbuilt ones. This
+is a feature that makes it possible to save compiled regex and re-use them
+in other programs later. */
+
+if (md->tables == NULL) md->tables = _pcre_default_tables;
+
+/* The lower casing table and the "must be at the start of a line" flag are
+used in a loop when finding where to start. */
+
+lcc = md->tables + lcc_offset;
+startline = (re->flags & PCRE_STARTLINE) != 0;
+firstline = (re->options & PCRE_FIRSTLINE) != 0;
+
+/* Set up the first character to match, if available. The first_byte value is
+never set for an anchored regular expression, but the anchoring may be forced
+at run time, so we have to test for anchoring. The first char may be unset for
+an unanchored pattern, of course. If there's no first char and the pattern was
+studied, there may be a bitmap of possible first characters. */
+
+if (!anchored)
+ {
+ if ((re->flags & PCRE_FIRSTSET) != 0)
+ {
+ first_byte = re->first_byte & 255;
+ if ((first_byte_caseless = ((re->first_byte & REQ_CASELESS) != 0)) == TRUE)
+ first_byte = lcc[first_byte];
+ }
+ else
+ {
+ if (startline && study != NULL &&
+ (study->options & PCRE_STUDY_MAPPED) != 0)
+ start_bits = study->start_bits;
+ }
+ }
+
+/* For anchored or unanchored matches, there may be a "last known required
+character" set. */
+
+if ((re->flags & PCRE_REQCHSET) != 0)
+ {
+ req_byte = re->req_byte & 255;
+ req_byte_caseless = (re->req_byte & REQ_CASELESS) != 0;
+ req_byte2 = (md->tables + fcc_offset)[req_byte]; /* case flipped */
+ }
+
+/* Call the main matching function, looping for a non-anchored regex after a
+failed match. Unless restarting, optimize by moving to the first match
+character if possible, when not anchored. Then unless wanting a partial match,
+check for a required later character. */
+
+for (;;)
+ {
+ int rc;
+
+ if ((options & PCRE_DFA_RESTART) == 0)
+ {
+ const uschar *save_end_subject = end_subject;
+
+ /* Advance to a unique first char if possible. If firstline is TRUE, the
+ start of the match is constrained to the first line of a multiline string.
+ Implement this by temporarily adjusting end_subject so that we stop
+ scanning at a newline. If the match fails at the newline, later code breaks
+ this loop. */
+
+ if (firstline)
+ {
+ const uschar *t = current_subject;
+ while (t < md->end_subject && !IS_NEWLINE(t)) t++;
+ end_subject = t;
+ }
+
+ if (first_byte >= 0)
+ {
+ if (first_byte_caseless)
+ while (current_subject < end_subject &&
+ lcc[*current_subject] != first_byte)
+ current_subject++;
+ else
+ while (current_subject < end_subject && *current_subject != first_byte)
+ current_subject++;
+ }
+
+ /* Or to just after a linebreak for a multiline match if possible */
+
+ else if (startline)
+ {
+ if (current_subject > md->start_subject + start_offset)
+ {
+ while (current_subject <= end_subject && !WAS_NEWLINE(current_subject))
+ current_subject++;
+
+ /* If we have just passed a CR and the newline option is ANY or
+ ANYCRLF, and we are now at a LF, advance the match position by one more
+ character. */
+
+ if (current_subject[-1] == '\r' &&
+ (md->nltype == NLTYPE_ANY || md->nltype == NLTYPE_ANYCRLF) &&
+ current_subject < end_subject &&
+ *current_subject == '\n')
+ current_subject++;
+ }
+ }
+
+ /* Or to a non-unique first char after study */
+
+ else if (start_bits != NULL)
+ {
+ while (current_subject < end_subject)
+ {
+ register unsigned int c = *current_subject;
+ if ((start_bits[c/8] & (1 << (c&7))) == 0) current_subject++;
+ else break;
+ }
+ }
+
+ /* Restore fudged end_subject */
+
+ end_subject = save_end_subject;
+ }
+
+ /* If req_byte is set, we know that that character must appear in the subject
+ for the match to succeed. If the first character is set, req_byte must be
+ later in the subject; otherwise the test starts at the match point. This
+ optimization can save a huge amount of work in patterns with nested unlimited
+ repeats that aren't going to match. Writing separate code for cased/caseless
+ versions makes it go faster, as does using an autoincrement and backing off
+ on a match.
+
+ HOWEVER: when the subject string is very, very long, searching to its end can
+ take a long time, and give bad performance on quite ordinary patterns. This
+ showed up when somebody was matching /^C/ on a 32-megabyte string... so we
+ don't do this when the string is sufficiently long.
+
+ ALSO: this processing is disabled when partial matching is requested.
+ */
+
+ if (req_byte >= 0 &&
+ end_subject - current_subject < REQ_BYTE_MAX &&
+ (options & PCRE_PARTIAL) == 0)
+ {
+ register const uschar *p = current_subject + ((first_byte >= 0)? 1 : 0);
+
+ /* We don't need to repeat the search if we haven't yet reached the
+ place we found it at last time. */
+
+ if (p > req_byte_ptr)
+ {
+ if (req_byte_caseless)
+ {
+ while (p < end_subject)
+ {
+ register int pp = *p++;
+ if (pp == req_byte || pp == req_byte2) { p--; break; }
+ }
+ }
+ else
+ {
+ while (p < end_subject)
+ {
+ if (*p++ == req_byte) { p--; break; }
+ }
+ }
+
+ /* If we can't find the required character, break the matching loop,
+ which will cause a return or PCRE_ERROR_NOMATCH. */
+
+ if (p >= end_subject) break;
+
+ /* If we have found the required character, save the point where we
+ found it, so that we don't search again next time round the loop if
+ the start hasn't passed this character yet. */
+
+ req_byte_ptr = p;
+ }
+ }
+
+ /* OK, now we can do the business */
+
+ rc = internal_dfa_exec(
+ md, /* fixed match data */
+ md->start_code, /* this subexpression's code */
+ current_subject, /* where we currently are */
+ start_offset, /* start offset in subject */
+ offsets, /* offset vector */
+ offsetcount, /* size of same */
+ workspace, /* workspace vector */
+ wscount, /* size of same */
+ re->options & (PCRE_CASELESS|PCRE_MULTILINE|PCRE_DOTALL), /* ims flags */
+ 0, /* function recurse level */
+ 0); /* regex recurse level */
+
+ /* Anything other than "no match" means we are done, always; otherwise, carry
+ on only if not anchored. */
+
+ if (rc != PCRE_ERROR_NOMATCH || anchored) return rc;
+
+ /* Advance to the next subject character unless we are at the end of a line
+ and firstline is set. */
+
+ if (firstline && IS_NEWLINE(current_subject)) break;
+ current_subject++;
+ if (utf8)
+ {
+ while (current_subject < end_subject && (*current_subject & 0xc0) == 0x80)
+ current_subject++;
+ }
+ if (current_subject > end_subject) break;
+
+ /* If we have just passed a CR and we are now at a LF, and the pattern does
+ not contain any explicit matches for \r or \n, and the newline option is CRLF
+ or ANY or ANYCRLF, advance the match position by one more character. */
+
+ if (current_subject[-1] == '\r' &&
+ current_subject < end_subject &&
+ *current_subject == '\n' &&
+ (re->flags & PCRE_HASCRORLF) == 0 &&
+ (md->nltype == NLTYPE_ANY ||
+ md->nltype == NLTYPE_ANYCRLF ||
+ md->nllen == 2))
+ current_subject++;
+
+ } /* "Bumpalong" loop */
+
+return PCRE_ERROR_NOMATCH;
+}
+
+/* End of pcre_dfa_exec.c */
diff --git a/src/third_party/pcre-7.4/pcre_exec.c b/src/third_party/pcre-7.4/pcre_exec.c
new file mode 100644
index 00000000000..657c142c061
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_exec.c
@@ -0,0 +1,4938 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains pcre_exec(), the externally visible function that does
+pattern matching using an NFA algorithm, trying to mimic Perl as closely as
+possible. There are also some static supporting functions. */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#define NLBLOCK md /* Block containing newline information */
+#define PSSTART start_subject /* Field containing processed string start */
+#define PSEND end_subject /* Field containing processed string end */
+
+#include "pcre_internal.h"
+
+/* Undefine some potentially clashing cpp symbols */
+
+#undef min
+#undef max
+
+/* Flag bits for the match() function */
+
+#define match_condassert 0x01 /* Called to check a condition assertion */
+#define match_cbegroup 0x02 /* Could-be-empty unlimited repeat group */
+
+/* Non-error returns from the match() function. Error returns are externally
+defined PCRE_ERROR_xxx codes, which are all negative. */
+
+#define MATCH_MATCH 1
+#define MATCH_NOMATCH 0
+
+/* Special internal returns from the match() function. Make them sufficiently
+negative to avoid the external error codes. */
+
+#define MATCH_COMMIT (-999)
+#define MATCH_PRUNE (-998)
+#define MATCH_SKIP (-997)
+#define MATCH_THEN (-996)
+
+/* Maximum number of ints of offset to save on the stack for recursive calls.
+If the offset vector is bigger, malloc is used. This should be a multiple of 3,
+because the offset vector is always a multiple of 3 long. */
+
+#define REC_STACK_SAVE_MAX 30
+
+/* Min and max values for the common repeats; for the maxima, 0 => infinity */
+
+static const char rep_min[] = { 0, 0, 1, 1, 0, 0 };
+static const char rep_max[] = { 0, 0, 0, 0, 1, 1 };
+
+
+
+#ifdef DEBUG
+/*************************************************
+* Debugging function to print chars *
+*************************************************/
+
+/* Print a sequence of chars in printable format, stopping at the end of the
+subject if the requested.
+
+Arguments:
+ p points to characters
+ length number to print
+ is_subject TRUE if printing from within md->start_subject
+ md pointer to matching data block, if is_subject is TRUE
+
+Returns: nothing
+*/
+
+static void
+pchars(const uschar *p, int length, BOOL is_subject, match_data *md)
+{
+unsigned int c;
+if (is_subject && length > md->end_subject - p) length = md->end_subject - p;
+while (length-- > 0)
+ if (isprint(c = *(p++))) printf("%c", c); else printf("\\x%02x", c);
+}
+#endif
+
+
+
+/*************************************************
+* Match a back-reference *
+*************************************************/
+
+/* If a back reference hasn't been set, the length that is passed is greater
+than the number of characters left in the string, so the match fails.
+
+Arguments:
+ offset index into the offset vector
+ eptr points into the subject
+ length length to be matched
+ md points to match data block
+ ims the ims flags
+
+Returns: TRUE if matched
+*/
+
+static BOOL
+match_ref(int offset, register USPTR eptr, int length, match_data *md,
+ unsigned long int ims)
+{
+USPTR p = md->start_subject + md->offset_vector[offset];
+
+#ifdef DEBUG
+if (eptr >= md->end_subject)
+ printf("matching subject <null>");
+else
+ {
+ printf("matching subject ");
+ pchars(eptr, length, TRUE, md);
+ }
+printf(" against backref ");
+pchars(p, length, FALSE, md);
+printf("\n");
+#endif
+
+/* Always fail if not enough characters left */
+
+if (length > md->end_subject - eptr) return FALSE;
+
+/* Separate the caselesss case for speed */
+
+if ((ims & PCRE_CASELESS) != 0)
+ {
+ while (length-- > 0)
+ if (md->lcc[*p++] != md->lcc[*eptr++]) return FALSE;
+ }
+else
+ { while (length-- > 0) if (*p++ != *eptr++) return FALSE; }
+
+return TRUE;
+}
+
+
+
+/***************************************************************************
+****************************************************************************
+ RECURSION IN THE match() FUNCTION
+
+The match() function is highly recursive, though not every recursive call
+increases the recursive depth. Nevertheless, some regular expressions can cause
+it to recurse to a great depth. I was writing for Unix, so I just let it call
+itself recursively. This uses the stack for saving everything that has to be
+saved for a recursive call. On Unix, the stack can be large, and this works
+fine.
+
+It turns out that on some non-Unix-like systems there are problems with
+programs that use a lot of stack. (This despite the fact that every last chip
+has oodles of memory these days, and techniques for extending the stack have
+been known for decades.) So....
+
+There is a fudge, triggered by defining NO_RECURSE, which avoids recursive
+calls by keeping local variables that need to be preserved in blocks of memory
+obtained from malloc() instead instead of on the stack. Macros are used to
+achieve this so that the actual code doesn't look very different to what it
+always used to.
+
+The original heap-recursive code used longjmp(). However, it seems that this
+can be very slow on some operating systems. Following a suggestion from Stan
+Switzer, the use of longjmp() has been abolished, at the cost of having to
+provide a unique number for each call to RMATCH. There is no way of generating
+a sequence of numbers at compile time in C. I have given them names, to make
+them stand out more clearly.
+
+Crude tests on x86 Linux show a small speedup of around 5-8%. However, on
+FreeBSD, avoiding longjmp() more than halves the time taken to run the standard
+tests. Furthermore, not using longjmp() means that local dynamic variables
+don't have indeterminate values; this has meant that the frame size can be
+reduced because the result can be "passed back" by straight setting of the
+variable instead of being passed in the frame.
+****************************************************************************
+***************************************************************************/
+
+/* Numbers for RMATCH calls. When this list is changed, the code at HEAP_RETURN
+below must be updated in sync. */
+
+enum { RM1=1, RM2, RM3, RM4, RM5, RM6, RM7, RM8, RM9, RM10,
+ RM11, RM12, RM13, RM14, RM15, RM16, RM17, RM18, RM19, RM20,
+ RM21, RM22, RM23, RM24, RM25, RM26, RM27, RM28, RM29, RM30,
+ RM31, RM32, RM33, RM34, RM35, RM36, RM37, RM38, RM39, RM40,
+ RM41, RM42, RM43, RM44, RM45, RM46, RM47, RM48, RM49, RM50,
+ RM51, RM52, RM53, RM54 };
+
+/* These versions of the macros use the stack, as normal. There are debugging
+versions and production versions. Note that the "rw" argument of RMATCH isn't
+actuall used in this definition. */
+
+#ifndef NO_RECURSE
+#define REGISTER register
+
+#ifdef DEBUG
+#define RMATCH(ra,rb,rc,rd,re,rf,rg,rw) \
+ { \
+ printf("match() called in line %d\n", __LINE__); \
+ rrc = match(ra,rb,mstart,rc,rd,re,rf,rg,rdepth+1); \
+ printf("to line %d\n", __LINE__); \
+ }
+#define RRETURN(ra) \
+ { \
+ printf("match() returned %d from line %d ", ra, __LINE__); \
+ return ra; \
+ }
+#else
+#define RMATCH(ra,rb,rc,rd,re,rf,rg,rw) \
+ rrc = match(ra,rb,mstart,rc,rd,re,rf,rg,rdepth+1)
+#define RRETURN(ra) return ra
+#endif
+
+#else
+
+
+/* These versions of the macros manage a private stack on the heap. Note that
+the "rd" argument of RMATCH isn't actually used in this definition. It's the md
+argument of match(), which never changes. */
+
+#define REGISTER
+
+#define RMATCH(ra,rb,rc,rd,re,rf,rg,rw)\
+ {\
+ heapframe *newframe = ((heapframe*)(pcre_stack_malloc)(sizeof(heapframe)));\
+ frame->Xwhere = rw; \
+ newframe->Xeptr = ra;\
+ newframe->Xecode = rb;\
+ newframe->Xmstart = mstart;\
+ newframe->Xoffset_top = rc;\
+ newframe->Xims = re;\
+ newframe->Xeptrb = rf;\
+ newframe->Xflags = rg;\
+ newframe->Xrdepth = frame->Xrdepth + 1;\
+ newframe->Xprevframe = frame;\
+ frame = newframe;\
+ DPRINTF(("restarting from line %d\n", __LINE__));\
+ goto HEAP_RECURSE;\
+ L_##rw:\
+ DPRINTF(("jumped back to line %d\n", __LINE__));\
+ }
+
+#define RRETURN(ra)\
+ {\
+ heapframe *newframe = frame;\
+ frame = newframe->Xprevframe;\
+ (pcre_stack_free)(newframe);\
+ if (frame != NULL)\
+ {\
+ rrc = ra;\
+ goto HEAP_RETURN;\
+ }\
+ return ra;\
+ }
+
+
+/* Structure for remembering the local variables in a private frame */
+
+typedef struct heapframe {
+ struct heapframe *Xprevframe;
+
+ /* Function arguments that may change */
+
+ const uschar *Xeptr;
+ const uschar *Xecode;
+ const uschar *Xmstart;
+ int Xoffset_top;
+ long int Xims;
+ eptrblock *Xeptrb;
+ int Xflags;
+ unsigned int Xrdepth;
+
+ /* Function local variables */
+
+ const uschar *Xcallpat;
+ const uschar *Xcharptr;
+ const uschar *Xdata;
+ const uschar *Xnext;
+ const uschar *Xpp;
+ const uschar *Xprev;
+ const uschar *Xsaved_eptr;
+
+ recursion_info Xnew_recursive;
+
+ BOOL Xcur_is_word;
+ BOOL Xcondition;
+ BOOL Xprev_is_word;
+
+ unsigned long int Xoriginal_ims;
+
+#ifdef SUPPORT_UCP
+ int Xprop_type;
+ int Xprop_value;
+ int Xprop_fail_result;
+ int Xprop_category;
+ int Xprop_chartype;
+ int Xprop_script;
+ int Xoclength;
+ uschar Xocchars[8];
+#endif
+
+ int Xctype;
+ unsigned int Xfc;
+ int Xfi;
+ int Xlength;
+ int Xmax;
+ int Xmin;
+ int Xnumber;
+ int Xoffset;
+ int Xop;
+ int Xsave_capture_last;
+ int Xsave_offset1, Xsave_offset2, Xsave_offset3;
+ int Xstacksave[REC_STACK_SAVE_MAX];
+
+ eptrblock Xnewptrb;
+
+ /* Where to jump back to */
+
+ int Xwhere;
+
+} heapframe;
+
+#endif
+
+
+/***************************************************************************
+***************************************************************************/
+
+
+
+/*************************************************
+* Match from current position *
+*************************************************/
+
+/* This function is called recursively in many circumstances. Whenever it
+returns a negative (error) response, the outer incarnation must also return the
+same response.
+
+Performance note: It might be tempting to extract commonly used fields from the
+md structure (e.g. utf8, end_subject) into individual variables to improve
+performance. Tests using gcc on a SPARC disproved this; in the first case, it
+made performance worse.
+
+Arguments:
+ eptr pointer to current character in subject
+ ecode pointer to current position in compiled code
+ mstart pointer to the current match start position (can be modified
+ by encountering \K)
+ offset_top current top pointer
+ md pointer to "static" info for the match
+ ims current /i, /m, and /s options
+ eptrb pointer to chain of blocks containing eptr at start of
+ brackets - for testing for empty matches
+ flags can contain
+ match_condassert - this is an assertion condition
+ match_cbegroup - this is the start of an unlimited repeat
+ group that can match an empty string
+ rdepth the recursion depth
+
+Returns: MATCH_MATCH if matched ) these values are >= 0
+ MATCH_NOMATCH if failed to match )
+ a negative PCRE_ERROR_xxx value if aborted by an error condition
+ (e.g. stopped by repeated call or recursion limit)
+*/
+
+static int
+match(REGISTER USPTR eptr, REGISTER const uschar *ecode, const uschar *mstart,
+ int offset_top, match_data *md, unsigned long int ims, eptrblock *eptrb,
+ int flags, unsigned int rdepth)
+{
+/* These variables do not need to be preserved over recursion in this function,
+so they can be ordinary variables in all cases. Mark some of them with
+"register" because they are used a lot in loops. */
+
+register int rrc; /* Returns from recursive calls */
+register int i; /* Used for loops not involving calls to RMATCH() */
+register unsigned int c; /* Character values not kept over RMATCH() calls */
+register BOOL utf8; /* Local copy of UTF-8 flag for speed */
+
+BOOL minimize, possessive; /* Quantifier options */
+
+/* When recursion is not being used, all "local" variables that have to be
+preserved over calls to RMATCH() are part of a "frame" which is obtained from
+heap storage. Set up the top-level frame here; others are obtained from the
+heap whenever RMATCH() does a "recursion". See the macro definitions above. */
+
+#ifdef NO_RECURSE
+heapframe *frame = (heapframe*) ((pcre_stack_malloc)(sizeof(heapframe)));
+frame->Xprevframe = NULL; /* Marks the top level */
+
+/* Copy in the original argument variables */
+
+frame->Xeptr = eptr;
+frame->Xecode = ecode;
+frame->Xmstart = mstart;
+frame->Xoffset_top = offset_top;
+frame->Xims = ims;
+frame->Xeptrb = eptrb;
+frame->Xflags = flags;
+frame->Xrdepth = rdepth;
+
+/* This is where control jumps back to to effect "recursion" */
+
+HEAP_RECURSE:
+
+/* Macros make the argument variables come from the current frame */
+
+#define eptr frame->Xeptr
+#define ecode frame->Xecode
+#define mstart frame->Xmstart
+#define offset_top frame->Xoffset_top
+#define ims frame->Xims
+#define eptrb frame->Xeptrb
+#define flags frame->Xflags
+#define rdepth frame->Xrdepth
+
+/* Ditto for the local variables */
+
+#ifdef SUPPORT_UTF8
+#define charptr frame->Xcharptr
+#endif
+#define callpat frame->Xcallpat
+#define data frame->Xdata
+#define next frame->Xnext
+#define pp frame->Xpp
+#define prev frame->Xprev
+#define saved_eptr frame->Xsaved_eptr
+
+#define new_recursive frame->Xnew_recursive
+
+#define cur_is_word frame->Xcur_is_word
+#define condition frame->Xcondition
+#define prev_is_word frame->Xprev_is_word
+
+#define original_ims frame->Xoriginal_ims
+
+#ifdef SUPPORT_UCP
+#define prop_type frame->Xprop_type
+#define prop_value frame->Xprop_value
+#define prop_fail_result frame->Xprop_fail_result
+#define prop_category frame->Xprop_category
+#define prop_chartype frame->Xprop_chartype
+#define prop_script frame->Xprop_script
+#define oclength frame->Xoclength
+#define occhars frame->Xocchars
+#endif
+
+#define ctype frame->Xctype
+#define fc frame->Xfc
+#define fi frame->Xfi
+#define length frame->Xlength
+#define max frame->Xmax
+#define min frame->Xmin
+#define number frame->Xnumber
+#define offset frame->Xoffset
+#define op frame->Xop
+#define save_capture_last frame->Xsave_capture_last
+#define save_offset1 frame->Xsave_offset1
+#define save_offset2 frame->Xsave_offset2
+#define save_offset3 frame->Xsave_offset3
+#define stacksave frame->Xstacksave
+
+#define newptrb frame->Xnewptrb
+
+/* When recursion is being used, local variables are allocated on the stack and
+get preserved during recursion in the normal way. In this environment, fi and
+i, and fc and c, can be the same variables. */
+
+#else /* NO_RECURSE not defined */
+#define fi i
+#define fc c
+
+
+#ifdef SUPPORT_UTF8 /* Many of these variables are used only */
+const uschar *charptr; /* in small blocks of the code. My normal */
+#endif /* style of coding would have declared */
+const uschar *callpat; /* them within each of those blocks. */
+const uschar *data; /* However, in order to accommodate the */
+const uschar *next; /* version of this code that uses an */
+USPTR pp; /* external "stack" implemented on the */
+const uschar *prev; /* heap, it is easier to declare them all */
+USPTR saved_eptr; /* here, so the declarations can be cut */
+ /* out in a block. The only declarations */
+recursion_info new_recursive; /* within blocks below are for variables */
+ /* that do not have to be preserved over */
+BOOL cur_is_word; /* a recursive call to RMATCH(). */
+BOOL condition;
+BOOL prev_is_word;
+
+unsigned long int original_ims;
+
+#ifdef SUPPORT_UCP
+int prop_type;
+int prop_value;
+int prop_fail_result;
+int prop_category;
+int prop_chartype;
+int prop_script;
+int oclength;
+uschar occhars[8];
+#endif
+
+int ctype;
+int length;
+int max;
+int min;
+int number;
+int offset;
+int op;
+int save_capture_last;
+int save_offset1, save_offset2, save_offset3;
+int stacksave[REC_STACK_SAVE_MAX];
+
+eptrblock newptrb;
+#endif /* NO_RECURSE */
+
+/* These statements are here to stop the compiler complaining about unitialized
+variables. */
+
+#ifdef SUPPORT_UCP
+prop_value = 0;
+prop_fail_result = 0;
+#endif
+
+
+/* This label is used for tail recursion, which is used in a few cases even
+when NO_RECURSE is not defined, in order to reduce the amount of stack that is
+used. Thanks to Ian Taylor for noticing this possibility and sending the
+original patch. */
+
+TAIL_RECURSE:
+
+/* OK, now we can get on with the real code of the function. Recursive calls
+are specified by the macro RMATCH and RRETURN is used to return. When
+NO_RECURSE is *not* defined, these just turn into a recursive call to match()
+and a "return", respectively (possibly with some debugging if DEBUG is
+defined). However, RMATCH isn't like a function call because it's quite a
+complicated macro. It has to be used in one particular way. This shouldn't,
+however, impact performance when true recursion is being used. */
+
+#ifdef SUPPORT_UTF8
+utf8 = md->utf8; /* Local copy of the flag */
+#else
+utf8 = FALSE;
+#endif
+
+/* First check that we haven't called match() too many times, or that we
+haven't exceeded the recursive call limit. */
+
+if (md->match_call_count++ >= md->match_limit) RRETURN(PCRE_ERROR_MATCHLIMIT);
+if (rdepth >= md->match_limit_recursion) RRETURN(PCRE_ERROR_RECURSIONLIMIT);
+
+original_ims = ims; /* Save for resetting on ')' */
+
+/* At the start of a group with an unlimited repeat that may match an empty
+string, the match_cbegroup flag is set. When this is the case, add the current
+subject pointer to the chain of such remembered pointers, to be checked when we
+hit the closing ket, in order to break infinite loops that match no characters.
+When match() is called in other circumstances, don't add to the chain. The
+match_cbegroup flag must NOT be used with tail recursion, because the memory
+block that is used is on the stack, so a new one may be required for each
+match(). */
+
+if ((flags & match_cbegroup) != 0)
+ {
+ newptrb.epb_saved_eptr = eptr;
+ newptrb.epb_prev = eptrb;
+ eptrb = &newptrb;
+ }
+
+/* Now start processing the opcodes. */
+
+for (;;)
+ {
+ minimize = possessive = FALSE;
+ op = *ecode;
+
+ /* For partial matching, remember if we ever hit the end of the subject after
+ matching at least one subject character. */
+
+ if (md->partial &&
+ eptr >= md->end_subject &&
+ eptr > mstart)
+ md->hitend = TRUE;
+
+ switch(op)
+ {
+ case OP_FAIL:
+ RRETURN(MATCH_NOMATCH);
+
+ case OP_PRUNE:
+ RMATCH(eptr, ecode + _pcre_OP_lengths[*ecode], offset_top, md,
+ ims, eptrb, flags, RM51);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ RRETURN(MATCH_PRUNE);
+
+ case OP_COMMIT:
+ RMATCH(eptr, ecode + _pcre_OP_lengths[*ecode], offset_top, md,
+ ims, eptrb, flags, RM52);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ RRETURN(MATCH_COMMIT);
+
+ case OP_SKIP:
+ RMATCH(eptr, ecode + _pcre_OP_lengths[*ecode], offset_top, md,
+ ims, eptrb, flags, RM53);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ md->start_match_ptr = eptr; /* Pass back current position */
+ RRETURN(MATCH_SKIP);
+
+ case OP_THEN:
+ RMATCH(eptr, ecode + _pcre_OP_lengths[*ecode], offset_top, md,
+ ims, eptrb, flags, RM54);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ RRETURN(MATCH_THEN);
+
+ /* Handle a capturing bracket. If there is space in the offset vector, save
+ the current subject position in the working slot at the top of the vector.
+ We mustn't change the current values of the data slot, because they may be
+ set from a previous iteration of this group, and be referred to by a
+ reference inside the group.
+
+ If the bracket fails to match, we need to restore this value and also the
+ values of the final offsets, in case they were set by a previous iteration
+ of the same bracket.
+
+ If there isn't enough space in the offset vector, treat this as if it were
+ a non-capturing bracket. Don't worry about setting the flag for the error
+ case here; that is handled in the code for KET. */
+
+ case OP_CBRA:
+ case OP_SCBRA:
+ number = GET2(ecode, 1+LINK_SIZE);
+ offset = number << 1;
+
+#ifdef DEBUG
+ printf("start bracket %d\n", number);
+ printf("subject=");
+ pchars(eptr, 16, TRUE, md);
+ printf("\n");
+#endif
+
+ if (offset < md->offset_max)
+ {
+ save_offset1 = md->offset_vector[offset];
+ save_offset2 = md->offset_vector[offset+1];
+ save_offset3 = md->offset_vector[md->offset_end - number];
+ save_capture_last = md->capture_last;
+
+ DPRINTF(("saving %d %d %d\n", save_offset1, save_offset2, save_offset3));
+ md->offset_vector[md->offset_end - number] = eptr - md->start_subject;
+
+ flags = (op == OP_SCBRA)? match_cbegroup : 0;
+ do
+ {
+ RMATCH(eptr, ecode + _pcre_OP_lengths[*ecode], offset_top, md,
+ ims, eptrb, flags, RM1);
+ if (rrc != MATCH_NOMATCH && rrc != MATCH_THEN) RRETURN(rrc);
+ md->capture_last = save_capture_last;
+ ecode += GET(ecode, 1);
+ }
+ while (*ecode == OP_ALT);
+
+ DPRINTF(("bracket %d failed\n", number));
+
+ md->offset_vector[offset] = save_offset1;
+ md->offset_vector[offset+1] = save_offset2;
+ md->offset_vector[md->offset_end - number] = save_offset3;
+
+ RRETURN(MATCH_NOMATCH);
+ }
+
+ /* FALL THROUGH ... Insufficient room for saving captured contents. Treat
+ as a non-capturing bracket. */
+
+ /* VVVVVVVVVVVVVVVVVVVVVVVVV */
+ /* VVVVVVVVVVVVVVVVVVVVVVVVV */
+
+ DPRINTF(("insufficient capture room: treat as non-capturing\n"));
+
+ /* VVVVVVVVVVVVVVVVVVVVVVVVV */
+ /* VVVVVVVVVVVVVVVVVVVVVVVVV */
+
+ /* Non-capturing bracket. Loop for all the alternatives. When we get to the
+ final alternative within the brackets, we would return the result of a
+ recursive call to match() whatever happened. We can reduce stack usage by
+ turning this into a tail recursion, except in the case when match_cbegroup
+ is set.*/
+
+ case OP_BRA:
+ case OP_SBRA:
+ DPRINTF(("start non-capturing bracket\n"));
+ flags = (op >= OP_SBRA)? match_cbegroup : 0;
+ for (;;)
+ {
+ if (ecode[GET(ecode, 1)] != OP_ALT) /* Final alternative */
+ {
+ if (flags == 0) /* Not a possibly empty group */
+ {
+ ecode += _pcre_OP_lengths[*ecode];
+ DPRINTF(("bracket 0 tail recursion\n"));
+ goto TAIL_RECURSE;
+ }
+
+ /* Possibly empty group; can't use tail recursion. */
+
+ RMATCH(eptr, ecode + _pcre_OP_lengths[*ecode], offset_top, md, ims,
+ eptrb, flags, RM48);
+ RRETURN(rrc);
+ }
+
+ /* For non-final alternatives, continue the loop for a NOMATCH result;
+ otherwise return. */
+
+ RMATCH(eptr, ecode + _pcre_OP_lengths[*ecode], offset_top, md, ims,
+ eptrb, flags, RM2);
+ if (rrc != MATCH_NOMATCH && rrc != MATCH_THEN) RRETURN(rrc);
+ ecode += GET(ecode, 1);
+ }
+ /* Control never reaches here. */
+
+ /* Conditional group: compilation checked that there are no more than
+ two branches. If the condition is false, skipping the first branch takes us
+ past the end if there is only one branch, but that's OK because that is
+ exactly what going to the ket would do. As there is only one branch to be
+ obeyed, we can use tail recursion to avoid using another stack frame. */
+
+ case OP_COND:
+ case OP_SCOND:
+ if (ecode[LINK_SIZE+1] == OP_RREF) /* Recursion test */
+ {
+ offset = GET2(ecode, LINK_SIZE + 2); /* Recursion group number*/
+ condition = md->recursive != NULL &&
+ (offset == RREF_ANY || offset == md->recursive->group_num);
+ ecode += condition? 3 : GET(ecode, 1);
+ }
+
+ else if (ecode[LINK_SIZE+1] == OP_CREF) /* Group used test */
+ {
+ offset = GET2(ecode, LINK_SIZE+2) << 1; /* Doubled ref number */
+ condition = offset < offset_top && md->offset_vector[offset] >= 0;
+ ecode += condition? 3 : GET(ecode, 1);
+ }
+
+ else if (ecode[LINK_SIZE+1] == OP_DEF) /* DEFINE - always false */
+ {
+ condition = FALSE;
+ ecode += GET(ecode, 1);
+ }
+
+ /* The condition is an assertion. Call match() to evaluate it - setting
+ the final argument match_condassert causes it to stop at the end of an
+ assertion. */
+
+ else
+ {
+ RMATCH(eptr, ecode + 1 + LINK_SIZE, offset_top, md, ims, NULL,
+ match_condassert, RM3);
+ if (rrc == MATCH_MATCH)
+ {
+ condition = TRUE;
+ ecode += 1 + LINK_SIZE + GET(ecode, LINK_SIZE + 2);
+ while (*ecode == OP_ALT) ecode += GET(ecode, 1);
+ }
+ else if (rrc != MATCH_NOMATCH && rrc != MATCH_THEN)
+ {
+ RRETURN(rrc); /* Need braces because of following else */
+ }
+ else
+ {
+ condition = FALSE;
+ ecode += GET(ecode, 1);
+ }
+ }
+
+ /* We are now at the branch that is to be obeyed. As there is only one,
+ we can use tail recursion to avoid using another stack frame, except when
+ match_cbegroup is required for an unlimited repeat of a possibly empty
+ group. If the second alternative doesn't exist, we can just plough on. */
+
+ if (condition || *ecode == OP_ALT)
+ {
+ ecode += 1 + LINK_SIZE;
+ if (op == OP_SCOND) /* Possibly empty group */
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, match_cbegroup, RM49);
+ RRETURN(rrc);
+ }
+ else /* Group must match something */
+ {
+ flags = 0;
+ goto TAIL_RECURSE;
+ }
+ }
+ else /* Condition false & no 2nd alternative */
+ {
+ ecode += 1 + LINK_SIZE;
+ }
+ break;
+
+
+ /* End of the pattern, either real or forced. If we are in a top-level
+ recursion, we should restore the offsets appropriately and continue from
+ after the call. */
+
+ case OP_ACCEPT:
+ case OP_END:
+ if (md->recursive != NULL && md->recursive->group_num == 0)
+ {
+ recursion_info *rec = md->recursive;
+ DPRINTF(("End of pattern in a (?0) recursion\n"));
+ md->recursive = rec->prevrec;
+ memmove(md->offset_vector, rec->offset_save,
+ rec->saved_max * sizeof(int));
+ mstart = rec->save_start;
+ ims = original_ims;
+ ecode = rec->after_call;
+ break;
+ }
+
+ /* Otherwise, if PCRE_NOTEMPTY is set, fail if we have matched an empty
+ string - backtracking will then try other alternatives, if any. */
+
+ if (md->notempty && eptr == mstart) RRETURN(MATCH_NOMATCH);
+ md->end_match_ptr = eptr; /* Record where we ended */
+ md->end_offset_top = offset_top; /* and how many extracts were taken */
+ md->start_match_ptr = mstart; /* and the start (\K can modify) */
+ RRETURN(MATCH_MATCH);
+
+ /* Change option settings */
+
+ case OP_OPT:
+ ims = ecode[1];
+ ecode += 2;
+ DPRINTF(("ims set to %02lx\n", ims));
+ break;
+
+ /* Assertion brackets. Check the alternative branches in turn - the
+ matching won't pass the KET for an assertion. If any one branch matches,
+ the assertion is true. Lookbehind assertions have an OP_REVERSE item at the
+ start of each branch to move the current point backwards, so the code at
+ this level is identical to the lookahead case. */
+
+ case OP_ASSERT:
+ case OP_ASSERTBACK:
+ do
+ {
+ RMATCH(eptr, ecode + 1 + LINK_SIZE, offset_top, md, ims, NULL, 0,
+ RM4);
+ if (rrc == MATCH_MATCH) break;
+ if (rrc != MATCH_NOMATCH && rrc != MATCH_THEN) RRETURN(rrc);
+ ecode += GET(ecode, 1);
+ }
+ while (*ecode == OP_ALT);
+ if (*ecode == OP_KET) RRETURN(MATCH_NOMATCH);
+
+ /* If checking an assertion for a condition, return MATCH_MATCH. */
+
+ if ((flags & match_condassert) != 0) RRETURN(MATCH_MATCH);
+
+ /* Continue from after the assertion, updating the offsets high water
+ mark, since extracts may have been taken during the assertion. */
+
+ do ecode += GET(ecode,1); while (*ecode == OP_ALT);
+ ecode += 1 + LINK_SIZE;
+ offset_top = md->end_offset_top;
+ continue;
+
+ /* Negative assertion: all branches must fail to match */
+
+ case OP_ASSERT_NOT:
+ case OP_ASSERTBACK_NOT:
+ do
+ {
+ RMATCH(eptr, ecode + 1 + LINK_SIZE, offset_top, md, ims, NULL, 0,
+ RM5);
+ if (rrc == MATCH_MATCH) RRETURN(MATCH_NOMATCH);
+ if (rrc != MATCH_NOMATCH && rrc != MATCH_THEN) RRETURN(rrc);
+ ecode += GET(ecode,1);
+ }
+ while (*ecode == OP_ALT);
+
+ if ((flags & match_condassert) != 0) RRETURN(MATCH_MATCH);
+
+ ecode += 1 + LINK_SIZE;
+ continue;
+
+ /* Move the subject pointer back. This occurs only at the start of
+ each branch of a lookbehind assertion. If we are too close to the start to
+ move back, this match function fails. When working with UTF-8 we move
+ back a number of characters, not bytes. */
+
+ case OP_REVERSE:
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ {
+ i = GET(ecode, 1);
+ while (i-- > 0)
+ {
+ eptr--;
+ if (eptr < md->start_subject) RRETURN(MATCH_NOMATCH);
+ BACKCHAR(eptr);
+ }
+ }
+ else
+#endif
+
+ /* No UTF-8 support, or not in UTF-8 mode: count is byte count */
+
+ {
+ eptr -= GET(ecode, 1);
+ if (eptr < md->start_subject) RRETURN(MATCH_NOMATCH);
+ }
+
+ /* Skip to next op code */
+
+ ecode += 1 + LINK_SIZE;
+ break;
+
+ /* The callout item calls an external function, if one is provided, passing
+ details of the match so far. This is mainly for debugging, though the
+ function is able to force a failure. */
+
+ case OP_CALLOUT:
+ if (pcre_callout != NULL)
+ {
+ pcre_callout_block cb;
+ cb.version = 1; /* Version 1 of the callout block */
+ cb.callout_number = ecode[1];
+ cb.offset_vector = md->offset_vector;
+ cb.subject = (PCRE_SPTR)md->start_subject;
+ cb.subject_length = md->end_subject - md->start_subject;
+ cb.start_match = mstart - md->start_subject;
+ cb.current_position = eptr - md->start_subject;
+ cb.pattern_position = GET(ecode, 2);
+ cb.next_item_length = GET(ecode, 2 + LINK_SIZE);
+ cb.capture_top = offset_top/2;
+ cb.capture_last = md->capture_last;
+ cb.callout_data = md->callout_data;
+ if ((rrc = (*pcre_callout)(&cb)) > 0) RRETURN(MATCH_NOMATCH);
+ if (rrc < 0) RRETURN(rrc);
+ }
+ ecode += 2 + 2*LINK_SIZE;
+ break;
+
+ /* Recursion either matches the current regex, or some subexpression. The
+ offset data is the offset to the starting bracket from the start of the
+ whole pattern. (This is so that it works from duplicated subpatterns.)
+
+ If there are any capturing brackets started but not finished, we have to
+ save their starting points and reinstate them after the recursion. However,
+ we don't know how many such there are (offset_top records the completed
+ total) so we just have to save all the potential data. There may be up to
+ 65535 such values, which is too large to put on the stack, but using malloc
+ for small numbers seems expensive. As a compromise, the stack is used when
+ there are no more than REC_STACK_SAVE_MAX values to store; otherwise malloc
+ is used. A problem is what to do if the malloc fails ... there is no way of
+ returning to the top level with an error. Save the top REC_STACK_SAVE_MAX
+ values on the stack, and accept that the rest may be wrong.
+
+ There are also other values that have to be saved. We use a chained
+ sequence of blocks that actually live on the stack. Thanks to Robin Houston
+ for the original version of this logic. */
+
+ case OP_RECURSE:
+ {
+ callpat = md->start_code + GET(ecode, 1);
+ new_recursive.group_num = (callpat == md->start_code)? 0 :
+ GET2(callpat, 1 + LINK_SIZE);
+
+ /* Add to "recursing stack" */
+
+ new_recursive.prevrec = md->recursive;
+ md->recursive = &new_recursive;
+
+ /* Find where to continue from afterwards */
+
+ ecode += 1 + LINK_SIZE;
+ new_recursive.after_call = ecode;
+
+ /* Now save the offset data. */
+
+ new_recursive.saved_max = md->offset_end;
+ if (new_recursive.saved_max <= REC_STACK_SAVE_MAX)
+ new_recursive.offset_save = stacksave;
+ else
+ {
+ new_recursive.offset_save =
+ (int *)(pcre_malloc)(new_recursive.saved_max * sizeof(int));
+ if (new_recursive.offset_save == NULL) RRETURN(PCRE_ERROR_NOMEMORY);
+ }
+
+ memcpy(new_recursive.offset_save, md->offset_vector,
+ new_recursive.saved_max * sizeof(int));
+ new_recursive.save_start = mstart;
+ mstart = eptr;
+
+ /* OK, now we can do the recursion. For each top-level alternative we
+ restore the offset and recursion data. */
+
+ DPRINTF(("Recursing into group %d\n", new_recursive.group_num));
+ flags = (*callpat >= OP_SBRA)? match_cbegroup : 0;
+ do
+ {
+ RMATCH(eptr, callpat + _pcre_OP_lengths[*callpat], offset_top,
+ md, ims, eptrb, flags, RM6);
+ if (rrc == MATCH_MATCH)
+ {
+ DPRINTF(("Recursion matched\n"));
+ md->recursive = new_recursive.prevrec;
+ if (new_recursive.offset_save != stacksave)
+ (pcre_free)(new_recursive.offset_save);
+ RRETURN(MATCH_MATCH);
+ }
+ else if (rrc != MATCH_NOMATCH && rrc != MATCH_THEN)
+ {
+ DPRINTF(("Recursion gave error %d\n", rrc));
+ RRETURN(rrc);
+ }
+
+ md->recursive = &new_recursive;
+ memcpy(md->offset_vector, new_recursive.offset_save,
+ new_recursive.saved_max * sizeof(int));
+ callpat += GET(callpat, 1);
+ }
+ while (*callpat == OP_ALT);
+
+ DPRINTF(("Recursion didn't match\n"));
+ md->recursive = new_recursive.prevrec;
+ if (new_recursive.offset_save != stacksave)
+ (pcre_free)(new_recursive.offset_save);
+ RRETURN(MATCH_NOMATCH);
+ }
+ /* Control never reaches here */
+
+ /* "Once" brackets are like assertion brackets except that after a match,
+ the point in the subject string is not moved back. Thus there can never be
+ a move back into the brackets. Friedl calls these "atomic" subpatterns.
+ Check the alternative branches in turn - the matching won't pass the KET
+ for this kind of subpattern. If any one branch matches, we carry on as at
+ the end of a normal bracket, leaving the subject pointer. */
+
+ case OP_ONCE:
+ prev = ecode;
+ saved_eptr = eptr;
+
+ do
+ {
+ RMATCH(eptr, ecode + 1 + LINK_SIZE, offset_top, md, ims, eptrb, 0, RM7);
+ if (rrc == MATCH_MATCH) break;
+ if (rrc != MATCH_NOMATCH && rrc != MATCH_THEN) RRETURN(rrc);
+ ecode += GET(ecode,1);
+ }
+ while (*ecode == OP_ALT);
+
+ /* If hit the end of the group (which could be repeated), fail */
+
+ if (*ecode != OP_ONCE && *ecode != OP_ALT) RRETURN(MATCH_NOMATCH);
+
+ /* Continue as from after the assertion, updating the offsets high water
+ mark, since extracts may have been taken. */
+
+ do ecode += GET(ecode, 1); while (*ecode == OP_ALT);
+
+ offset_top = md->end_offset_top;
+ eptr = md->end_match_ptr;
+
+ /* For a non-repeating ket, just continue at this level. This also
+ happens for a repeating ket if no characters were matched in the group.
+ This is the forcible breaking of infinite loops as implemented in Perl
+ 5.005. If there is an options reset, it will get obeyed in the normal
+ course of events. */
+
+ if (*ecode == OP_KET || eptr == saved_eptr)
+ {
+ ecode += 1+LINK_SIZE;
+ break;
+ }
+
+ /* The repeating kets try the rest of the pattern or restart from the
+ preceding bracket, in the appropriate order. The second "call" of match()
+ uses tail recursion, to avoid using another stack frame. We need to reset
+ any options that changed within the bracket before re-running it, so
+ check the next opcode. */
+
+ if (ecode[1+LINK_SIZE] == OP_OPT)
+ {
+ ims = (ims & ~PCRE_IMS) | ecode[4];
+ DPRINTF(("ims set to %02lx at group repeat\n", ims));
+ }
+
+ if (*ecode == OP_KETRMIN)
+ {
+ RMATCH(eptr, ecode + 1 + LINK_SIZE, offset_top, md, ims, eptrb, 0, RM8);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ ecode = prev;
+ flags = 0;
+ goto TAIL_RECURSE;
+ }
+ else /* OP_KETRMAX */
+ {
+ RMATCH(eptr, prev, offset_top, md, ims, eptrb, match_cbegroup, RM9);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ ecode += 1 + LINK_SIZE;
+ flags = 0;
+ goto TAIL_RECURSE;
+ }
+ /* Control never gets here */
+
+ /* An alternation is the end of a branch; scan along to find the end of the
+ bracketed group and go to there. */
+
+ case OP_ALT:
+ do ecode += GET(ecode,1); while (*ecode == OP_ALT);
+ break;
+
+ /* BRAZERO and BRAMINZERO occur just before a bracket group, indicating
+ that it may occur zero times. It may repeat infinitely, or not at all -
+ i.e. it could be ()* or ()? in the pattern. Brackets with fixed upper
+ repeat limits are compiled as a number of copies, with the optional ones
+ preceded by BRAZERO or BRAMINZERO. */
+
+ case OP_BRAZERO:
+ {
+ next = ecode+1;
+ RMATCH(eptr, next, offset_top, md, ims, eptrb, 0, RM10);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ do next += GET(next,1); while (*next == OP_ALT);
+ ecode = next + 1 + LINK_SIZE;
+ }
+ break;
+
+ case OP_BRAMINZERO:
+ {
+ next = ecode+1;
+ do next += GET(next, 1); while (*next == OP_ALT);
+ RMATCH(eptr, next + 1+LINK_SIZE, offset_top, md, ims, eptrb, 0, RM11);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ ecode++;
+ }
+ break;
+
+ /* End of a group, repeated or non-repeating. */
+
+ case OP_KET:
+ case OP_KETRMIN:
+ case OP_KETRMAX:
+ prev = ecode - GET(ecode, 1);
+
+ /* If this was a group that remembered the subject start, in order to break
+ infinite repeats of empty string matches, retrieve the subject start from
+ the chain. Otherwise, set it NULL. */
+
+ if (*prev >= OP_SBRA)
+ {
+ saved_eptr = eptrb->epb_saved_eptr; /* Value at start of group */
+ eptrb = eptrb->epb_prev; /* Backup to previous group */
+ }
+ else saved_eptr = NULL;
+
+ /* If we are at the end of an assertion group, stop matching and return
+ MATCH_MATCH, but record the current high water mark for use by positive
+ assertions. Do this also for the "once" (atomic) groups. */
+
+ if (*prev == OP_ASSERT || *prev == OP_ASSERT_NOT ||
+ *prev == OP_ASSERTBACK || *prev == OP_ASSERTBACK_NOT ||
+ *prev == OP_ONCE)
+ {
+ md->end_match_ptr = eptr; /* For ONCE */
+ md->end_offset_top = offset_top;
+ RRETURN(MATCH_MATCH);
+ }
+
+ /* For capturing groups we have to check the group number back at the start
+ and if necessary complete handling an extraction by setting the offsets and
+ bumping the high water mark. Note that whole-pattern recursion is coded as
+ a recurse into group 0, so it won't be picked up here. Instead, we catch it
+ when the OP_END is reached. Other recursion is handled here. */
+
+ if (*prev == OP_CBRA || *prev == OP_SCBRA)
+ {
+ number = GET2(prev, 1+LINK_SIZE);
+ offset = number << 1;
+
+#ifdef DEBUG
+ printf("end bracket %d", number);
+ printf("\n");
+#endif
+
+ md->capture_last = number;
+ if (offset >= md->offset_max) md->offset_overflow = TRUE; else
+ {
+ md->offset_vector[offset] =
+ md->offset_vector[md->offset_end - number];
+ md->offset_vector[offset+1] = eptr - md->start_subject;
+ if (offset_top <= offset) offset_top = offset + 2;
+ }
+
+ /* Handle a recursively called group. Restore the offsets
+ appropriately and continue from after the call. */
+
+ if (md->recursive != NULL && md->recursive->group_num == number)
+ {
+ recursion_info *rec = md->recursive;
+ DPRINTF(("Recursion (%d) succeeded - continuing\n", number));
+ md->recursive = rec->prevrec;
+ mstart = rec->save_start;
+ memcpy(md->offset_vector, rec->offset_save,
+ rec->saved_max * sizeof(int));
+ ecode = rec->after_call;
+ ims = original_ims;
+ break;
+ }
+ }
+
+ /* For both capturing and non-capturing groups, reset the value of the ims
+ flags, in case they got changed during the group. */
+
+ ims = original_ims;
+ DPRINTF(("ims reset to %02lx\n", ims));
+
+ /* For a non-repeating ket, just continue at this level. This also
+ happens for a repeating ket if no characters were matched in the group.
+ This is the forcible breaking of infinite loops as implemented in Perl
+ 5.005. If there is an options reset, it will get obeyed in the normal
+ course of events. */
+
+ if (*ecode == OP_KET || eptr == saved_eptr)
+ {
+ ecode += 1 + LINK_SIZE;
+ break;
+ }
+
+ /* The repeating kets try the rest of the pattern or restart from the
+ preceding bracket, in the appropriate order. In the second case, we can use
+ tail recursion to avoid using another stack frame, unless we have an
+ unlimited repeat of a group that can match an empty string. */
+
+ flags = (*prev >= OP_SBRA)? match_cbegroup : 0;
+
+ if (*ecode == OP_KETRMIN)
+ {
+ RMATCH(eptr, ecode + 1 + LINK_SIZE, offset_top, md, ims, eptrb, 0, RM12);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (flags != 0) /* Could match an empty string */
+ {
+ RMATCH(eptr, prev, offset_top, md, ims, eptrb, flags, RM50);
+ RRETURN(rrc);
+ }
+ ecode = prev;
+ goto TAIL_RECURSE;
+ }
+ else /* OP_KETRMAX */
+ {
+ RMATCH(eptr, prev, offset_top, md, ims, eptrb, flags, RM13);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ ecode += 1 + LINK_SIZE;
+ flags = 0;
+ goto TAIL_RECURSE;
+ }
+ /* Control never gets here */
+
+ /* Start of subject unless notbol, or after internal newline if multiline */
+
+ case OP_CIRC:
+ if (md->notbol && eptr == md->start_subject) RRETURN(MATCH_NOMATCH);
+ if ((ims & PCRE_MULTILINE) != 0)
+ {
+ if (eptr != md->start_subject &&
+ (eptr == md->end_subject || !WAS_NEWLINE(eptr)))
+ RRETURN(MATCH_NOMATCH);
+ ecode++;
+ break;
+ }
+ /* ... else fall through */
+
+ /* Start of subject assertion */
+
+ case OP_SOD:
+ if (eptr != md->start_subject) RRETURN(MATCH_NOMATCH);
+ ecode++;
+ break;
+
+ /* Start of match assertion */
+
+ case OP_SOM:
+ if (eptr != md->start_subject + md->start_offset) RRETURN(MATCH_NOMATCH);
+ ecode++;
+ break;
+
+ /* Reset the start of match point */
+
+ case OP_SET_SOM:
+ mstart = eptr;
+ ecode++;
+ break;
+
+ /* Assert before internal newline if multiline, or before a terminating
+ newline unless endonly is set, else end of subject unless noteol is set. */
+
+ case OP_DOLL:
+ if ((ims & PCRE_MULTILINE) != 0)
+ {
+ if (eptr < md->end_subject)
+ { if (!IS_NEWLINE(eptr)) RRETURN(MATCH_NOMATCH); }
+ else
+ { if (md->noteol) RRETURN(MATCH_NOMATCH); }
+ ecode++;
+ break;
+ }
+ else
+ {
+ if (md->noteol) RRETURN(MATCH_NOMATCH);
+ if (!md->endonly)
+ {
+ if (eptr != md->end_subject &&
+ (!IS_NEWLINE(eptr) || eptr != md->end_subject - md->nllen))
+ RRETURN(MATCH_NOMATCH);
+ ecode++;
+ break;
+ }
+ }
+ /* ... else fall through for endonly */
+
+ /* End of subject assertion (\z) */
+
+ case OP_EOD:
+ if (eptr < md->end_subject) RRETURN(MATCH_NOMATCH);
+ ecode++;
+ break;
+
+ /* End of subject or ending \n assertion (\Z) */
+
+ case OP_EODN:
+ if (eptr != md->end_subject &&
+ (!IS_NEWLINE(eptr) || eptr != md->end_subject - md->nllen))
+ RRETURN(MATCH_NOMATCH);
+ ecode++;
+ break;
+
+ /* Word boundary assertions */
+
+ case OP_NOT_WORD_BOUNDARY:
+ case OP_WORD_BOUNDARY:
+ {
+
+ /* Find out if the previous and current characters are "word" characters.
+ It takes a bit more work in UTF-8 mode. Characters > 255 are assumed to
+ be "non-word" characters. */
+
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ {
+ if (eptr == md->start_subject) prev_is_word = FALSE; else
+ {
+ const uschar *lastptr = eptr - 1;
+ while((*lastptr & 0xc0) == 0x80) lastptr--;
+ GETCHAR(c, lastptr);
+ prev_is_word = c < 256 && (md->ctypes[c] & ctype_word) != 0;
+ }
+ if (eptr >= md->end_subject) cur_is_word = FALSE; else
+ {
+ GETCHAR(c, eptr);
+ cur_is_word = c < 256 && (md->ctypes[c] & ctype_word) != 0;
+ }
+ }
+ else
+#endif
+
+ /* More streamlined when not in UTF-8 mode */
+
+ {
+ prev_is_word = (eptr != md->start_subject) &&
+ ((md->ctypes[eptr[-1]] & ctype_word) != 0);
+ cur_is_word = (eptr < md->end_subject) &&
+ ((md->ctypes[*eptr] & ctype_word) != 0);
+ }
+
+ /* Now see if the situation is what we want */
+
+ if ((*ecode++ == OP_WORD_BOUNDARY)?
+ cur_is_word == prev_is_word : cur_is_word != prev_is_word)
+ RRETURN(MATCH_NOMATCH);
+ }
+ break;
+
+ /* Match a single character type; inline for speed */
+
+ case OP_ANY:
+ if ((ims & PCRE_DOTALL) == 0)
+ {
+ if (IS_NEWLINE(eptr)) RRETURN(MATCH_NOMATCH);
+ }
+ if (eptr++ >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ if (utf8)
+ while (eptr < md->end_subject && (*eptr & 0xc0) == 0x80) eptr++;
+ ecode++;
+ break;
+
+ /* Match a single byte, even in UTF-8 mode. This opcode really does match
+ any byte, even newline, independent of the setting of PCRE_DOTALL. */
+
+ case OP_ANYBYTE:
+ if (eptr++ >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ ecode++;
+ break;
+
+ case OP_NOT_DIGIT:
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ if (
+#ifdef SUPPORT_UTF8
+ c < 256 &&
+#endif
+ (md->ctypes[c] & ctype_digit) != 0
+ )
+ RRETURN(MATCH_NOMATCH);
+ ecode++;
+ break;
+
+ case OP_DIGIT:
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ if (
+#ifdef SUPPORT_UTF8
+ c >= 256 ||
+#endif
+ (md->ctypes[c] & ctype_digit) == 0
+ )
+ RRETURN(MATCH_NOMATCH);
+ ecode++;
+ break;
+
+ case OP_NOT_WHITESPACE:
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ if (
+#ifdef SUPPORT_UTF8
+ c < 256 &&
+#endif
+ (md->ctypes[c] & ctype_space) != 0
+ )
+ RRETURN(MATCH_NOMATCH);
+ ecode++;
+ break;
+
+ case OP_WHITESPACE:
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ if (
+#ifdef SUPPORT_UTF8
+ c >= 256 ||
+#endif
+ (md->ctypes[c] & ctype_space) == 0
+ )
+ RRETURN(MATCH_NOMATCH);
+ ecode++;
+ break;
+
+ case OP_NOT_WORDCHAR:
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ if (
+#ifdef SUPPORT_UTF8
+ c < 256 &&
+#endif
+ (md->ctypes[c] & ctype_word) != 0
+ )
+ RRETURN(MATCH_NOMATCH);
+ ecode++;
+ break;
+
+ case OP_WORDCHAR:
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ if (
+#ifdef SUPPORT_UTF8
+ c >= 256 ||
+#endif
+ (md->ctypes[c] & ctype_word) == 0
+ )
+ RRETURN(MATCH_NOMATCH);
+ ecode++;
+ break;
+
+ case OP_ANYNL:
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ switch(c)
+ {
+ default: RRETURN(MATCH_NOMATCH);
+ case 0x000d:
+ if (eptr < md->end_subject && *eptr == 0x0a) eptr++;
+ break;
+
+ case 0x000a:
+ break;
+
+ case 0x000b:
+ case 0x000c:
+ case 0x0085:
+ case 0x2028:
+ case 0x2029:
+ if (md->bsr_anycrlf) RRETURN(MATCH_NOMATCH);
+ break;
+ }
+ ecode++;
+ break;
+
+ case OP_NOT_HSPACE:
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ switch(c)
+ {
+ default: break;
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ case 0x1680: /* OGHAM SPACE MARK */
+ case 0x180e: /* MONGOLIAN VOWEL SEPARATOR */
+ case 0x2000: /* EN QUAD */
+ case 0x2001: /* EM QUAD */
+ case 0x2002: /* EN SPACE */
+ case 0x2003: /* EM SPACE */
+ case 0x2004: /* THREE-PER-EM SPACE */
+ case 0x2005: /* FOUR-PER-EM SPACE */
+ case 0x2006: /* SIX-PER-EM SPACE */
+ case 0x2007: /* FIGURE SPACE */
+ case 0x2008: /* PUNCTUATION SPACE */
+ case 0x2009: /* THIN SPACE */
+ case 0x200A: /* HAIR SPACE */
+ case 0x202f: /* NARROW NO-BREAK SPACE */
+ case 0x205f: /* MEDIUM MATHEMATICAL SPACE */
+ case 0x3000: /* IDEOGRAPHIC SPACE */
+ RRETURN(MATCH_NOMATCH);
+ }
+ ecode++;
+ break;
+
+ case OP_HSPACE:
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ switch(c)
+ {
+ default: RRETURN(MATCH_NOMATCH);
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ case 0x1680: /* OGHAM SPACE MARK */
+ case 0x180e: /* MONGOLIAN VOWEL SEPARATOR */
+ case 0x2000: /* EN QUAD */
+ case 0x2001: /* EM QUAD */
+ case 0x2002: /* EN SPACE */
+ case 0x2003: /* EM SPACE */
+ case 0x2004: /* THREE-PER-EM SPACE */
+ case 0x2005: /* FOUR-PER-EM SPACE */
+ case 0x2006: /* SIX-PER-EM SPACE */
+ case 0x2007: /* FIGURE SPACE */
+ case 0x2008: /* PUNCTUATION SPACE */
+ case 0x2009: /* THIN SPACE */
+ case 0x200A: /* HAIR SPACE */
+ case 0x202f: /* NARROW NO-BREAK SPACE */
+ case 0x205f: /* MEDIUM MATHEMATICAL SPACE */
+ case 0x3000: /* IDEOGRAPHIC SPACE */
+ break;
+ }
+ ecode++;
+ break;
+
+ case OP_NOT_VSPACE:
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ switch(c)
+ {
+ default: break;
+ case 0x0a: /* LF */
+ case 0x0b: /* VT */
+ case 0x0c: /* FF */
+ case 0x0d: /* CR */
+ case 0x85: /* NEL */
+ case 0x2028: /* LINE SEPARATOR */
+ case 0x2029: /* PARAGRAPH SEPARATOR */
+ RRETURN(MATCH_NOMATCH);
+ }
+ ecode++;
+ break;
+
+ case OP_VSPACE:
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ switch(c)
+ {
+ default: RRETURN(MATCH_NOMATCH);
+ case 0x0a: /* LF */
+ case 0x0b: /* VT */
+ case 0x0c: /* FF */
+ case 0x0d: /* CR */
+ case 0x85: /* NEL */
+ case 0x2028: /* LINE SEPARATOR */
+ case 0x2029: /* PARAGRAPH SEPARATOR */
+ break;
+ }
+ ecode++;
+ break;
+
+#ifdef SUPPORT_UCP
+ /* Check the next character by Unicode property. We will get here only
+ if the support is in the binary; otherwise a compile-time error occurs. */
+
+ case OP_PROP:
+ case OP_NOTPROP:
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ {
+ int chartype, script;
+ int category = _pcre_ucp_findprop(c, &chartype, &script);
+
+ switch(ecode[1])
+ {
+ case PT_ANY:
+ if (op == OP_NOTPROP) RRETURN(MATCH_NOMATCH);
+ break;
+
+ case PT_LAMP:
+ if ((chartype == ucp_Lu ||
+ chartype == ucp_Ll ||
+ chartype == ucp_Lt) == (op == OP_NOTPROP))
+ RRETURN(MATCH_NOMATCH);
+ break;
+
+ case PT_GC:
+ if ((ecode[2] != category) == (op == OP_PROP))
+ RRETURN(MATCH_NOMATCH);
+ break;
+
+ case PT_PC:
+ if ((ecode[2] != chartype) == (op == OP_PROP))
+ RRETURN(MATCH_NOMATCH);
+ break;
+
+ case PT_SC:
+ if ((ecode[2] != script) == (op == OP_PROP))
+ RRETURN(MATCH_NOMATCH);
+ break;
+
+ default:
+ RRETURN(PCRE_ERROR_INTERNAL);
+ }
+
+ ecode += 3;
+ }
+ break;
+
+ /* Match an extended Unicode sequence. We will get here only if the support
+ is in the binary; otherwise a compile-time error occurs. */
+
+ case OP_EXTUNI:
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ {
+ int chartype, script;
+ int category = _pcre_ucp_findprop(c, &chartype, &script);
+ if (category == ucp_M) RRETURN(MATCH_NOMATCH);
+ while (eptr < md->end_subject)
+ {
+ int len = 1;
+ if (!utf8) c = *eptr; else
+ {
+ GETCHARLEN(c, eptr, len);
+ }
+ category = _pcre_ucp_findprop(c, &chartype, &script);
+ if (category != ucp_M) break;
+ eptr += len;
+ }
+ }
+ ecode++;
+ break;
+#endif
+
+
+ /* Match a back reference, possibly repeatedly. Look past the end of the
+ item to see if there is repeat information following. The code is similar
+ to that for character classes, but repeated for efficiency. Then obey
+ similar code to character type repeats - written out again for speed.
+ However, if the referenced string is the empty string, always treat
+ it as matched, any number of times (otherwise there could be infinite
+ loops). */
+
+ case OP_REF:
+ {
+ offset = GET2(ecode, 1) << 1; /* Doubled ref number */
+ ecode += 3; /* Advance past item */
+
+ /* If the reference is unset, set the length to be longer than the amount
+ of subject left; this ensures that every attempt at a match fails. We
+ can't just fail here, because of the possibility of quantifiers with zero
+ minima. */
+
+ length = (offset >= offset_top || md->offset_vector[offset] < 0)?
+ md->end_subject - eptr + 1 :
+ md->offset_vector[offset+1] - md->offset_vector[offset];
+
+ /* Set up for repetition, or handle the non-repeated case */
+
+ switch (*ecode)
+ {
+ case OP_CRSTAR:
+ case OP_CRMINSTAR:
+ case OP_CRPLUS:
+ case OP_CRMINPLUS:
+ case OP_CRQUERY:
+ case OP_CRMINQUERY:
+ c = *ecode++ - OP_CRSTAR;
+ minimize = (c & 1) != 0;
+ min = rep_min[c]; /* Pick up values from tables; */
+ max = rep_max[c]; /* zero for max => infinity */
+ if (max == 0) max = INT_MAX;
+ break;
+
+ case OP_CRRANGE:
+ case OP_CRMINRANGE:
+ minimize = (*ecode == OP_CRMINRANGE);
+ min = GET2(ecode, 1);
+ max = GET2(ecode, 3);
+ if (max == 0) max = INT_MAX;
+ ecode += 5;
+ break;
+
+ default: /* No repeat follows */
+ if (!match_ref(offset, eptr, length, md, ims)) RRETURN(MATCH_NOMATCH);
+ eptr += length;
+ continue; /* With the main loop */
+ }
+
+ /* If the length of the reference is zero, just continue with the
+ main loop. */
+
+ if (length == 0) continue;
+
+ /* First, ensure the minimum number of matches are present. We get back
+ the length of the reference string explicitly rather than passing the
+ address of eptr, so that eptr can be a register variable. */
+
+ for (i = 1; i <= min; i++)
+ {
+ if (!match_ref(offset, eptr, length, md, ims)) RRETURN(MATCH_NOMATCH);
+ eptr += length;
+ }
+
+ /* If min = max, continue at the same level without recursion.
+ They are not both allowed to be zero. */
+
+ if (min == max) continue;
+
+ /* If minimizing, keep trying and advancing the pointer */
+
+ if (minimize)
+ {
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM14);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || !match_ref(offset, eptr, length, md, ims))
+ RRETURN(MATCH_NOMATCH);
+ eptr += length;
+ }
+ /* Control never gets here */
+ }
+
+ /* If maximizing, find the longest string and work backwards */
+
+ else
+ {
+ pp = eptr;
+ for (i = min; i < max; i++)
+ {
+ if (!match_ref(offset, eptr, length, md, ims)) break;
+ eptr += length;
+ }
+ while (eptr >= pp)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM15);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ eptr -= length;
+ }
+ RRETURN(MATCH_NOMATCH);
+ }
+ }
+ /* Control never gets here */
+
+
+
+ /* Match a bit-mapped character class, possibly repeatedly. This op code is
+ used when all the characters in the class have values in the range 0-255,
+ and either the matching is caseful, or the characters are in the range
+ 0-127 when UTF-8 processing is enabled. The only difference between
+ OP_CLASS and OP_NCLASS occurs when a data character outside the range is
+ encountered.
+
+ First, look past the end of the item to see if there is repeat information
+ following. Then obey similar code to character type repeats - written out
+ again for speed. */
+
+ case OP_NCLASS:
+ case OP_CLASS:
+ {
+ data = ecode + 1; /* Save for matching */
+ ecode += 33; /* Advance past the item */
+
+ switch (*ecode)
+ {
+ case OP_CRSTAR:
+ case OP_CRMINSTAR:
+ case OP_CRPLUS:
+ case OP_CRMINPLUS:
+ case OP_CRQUERY:
+ case OP_CRMINQUERY:
+ c = *ecode++ - OP_CRSTAR;
+ minimize = (c & 1) != 0;
+ min = rep_min[c]; /* Pick up values from tables; */
+ max = rep_max[c]; /* zero for max => infinity */
+ if (max == 0) max = INT_MAX;
+ break;
+
+ case OP_CRRANGE:
+ case OP_CRMINRANGE:
+ minimize = (*ecode == OP_CRMINRANGE);
+ min = GET2(ecode, 1);
+ max = GET2(ecode, 3);
+ if (max == 0) max = INT_MAX;
+ ecode += 5;
+ break;
+
+ default: /* No repeat follows */
+ min = max = 1;
+ break;
+ }
+
+ /* First, ensure the minimum number of matches are present. */
+
+#ifdef SUPPORT_UTF8
+ /* UTF-8 mode */
+ if (utf8)
+ {
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINC(c, eptr);
+ if (c > 255)
+ {
+ if (op == OP_CLASS) RRETURN(MATCH_NOMATCH);
+ }
+ else
+ {
+ if ((data[c/8] & (1 << (c&7))) == 0) RRETURN(MATCH_NOMATCH);
+ }
+ }
+ }
+ else
+#endif
+ /* Not UTF-8 mode */
+ {
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ c = *eptr++;
+ if ((data[c/8] & (1 << (c&7))) == 0) RRETURN(MATCH_NOMATCH);
+ }
+ }
+
+ /* If max == min we can continue with the main loop without the
+ need to recurse. */
+
+ if (min == max) continue;
+
+ /* If minimizing, keep testing the rest of the expression and advancing
+ the pointer while it matches the class. */
+
+ if (minimize)
+ {
+#ifdef SUPPORT_UTF8
+ /* UTF-8 mode */
+ if (utf8)
+ {
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM16);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINC(c, eptr);
+ if (c > 255)
+ {
+ if (op == OP_CLASS) RRETURN(MATCH_NOMATCH);
+ }
+ else
+ {
+ if ((data[c/8] & (1 << (c&7))) == 0) RRETURN(MATCH_NOMATCH);
+ }
+ }
+ }
+ else
+#endif
+ /* Not UTF-8 mode */
+ {
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM17);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ c = *eptr++;
+ if ((data[c/8] & (1 << (c&7))) == 0) RRETURN(MATCH_NOMATCH);
+ }
+ }
+ /* Control never gets here */
+ }
+
+ /* If maximizing, find the longest possible run, then work backwards. */
+
+ else
+ {
+ pp = eptr;
+
+#ifdef SUPPORT_UTF8
+ /* UTF-8 mode */
+ if (utf8)
+ {
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ if (c > 255)
+ {
+ if (op == OP_CLASS) break;
+ }
+ else
+ {
+ if ((data[c/8] & (1 << (c&7))) == 0) break;
+ }
+ eptr += len;
+ }
+ for (;;)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM18);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (eptr-- == pp) break; /* Stop if tried at original pos */
+ BACKCHAR(eptr);
+ }
+ }
+ else
+#endif
+ /* Not UTF-8 mode */
+ {
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject) break;
+ c = *eptr;
+ if ((data[c/8] & (1 << (c&7))) == 0) break;
+ eptr++;
+ }
+ while (eptr >= pp)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM19);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ eptr--;
+ }
+ }
+
+ RRETURN(MATCH_NOMATCH);
+ }
+ }
+ /* Control never gets here */
+
+
+ /* Match an extended character class. This opcode is encountered only
+ in UTF-8 mode, because that's the only time it is compiled. */
+
+#ifdef SUPPORT_UTF8
+ case OP_XCLASS:
+ {
+ data = ecode + 1 + LINK_SIZE; /* Save for matching */
+ ecode += GET(ecode, 1); /* Advance past the item */
+
+ switch (*ecode)
+ {
+ case OP_CRSTAR:
+ case OP_CRMINSTAR:
+ case OP_CRPLUS:
+ case OP_CRMINPLUS:
+ case OP_CRQUERY:
+ case OP_CRMINQUERY:
+ c = *ecode++ - OP_CRSTAR;
+ minimize = (c & 1) != 0;
+ min = rep_min[c]; /* Pick up values from tables; */
+ max = rep_max[c]; /* zero for max => infinity */
+ if (max == 0) max = INT_MAX;
+ break;
+
+ case OP_CRRANGE:
+ case OP_CRMINRANGE:
+ minimize = (*ecode == OP_CRMINRANGE);
+ min = GET2(ecode, 1);
+ max = GET2(ecode, 3);
+ if (max == 0) max = INT_MAX;
+ ecode += 5;
+ break;
+
+ default: /* No repeat follows */
+ min = max = 1;
+ break;
+ }
+
+ /* First, ensure the minimum number of matches are present. */
+
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINC(c, eptr);
+ if (!_pcre_xclass(c, data)) RRETURN(MATCH_NOMATCH);
+ }
+
+ /* If max == min we can continue with the main loop without the
+ need to recurse. */
+
+ if (min == max) continue;
+
+ /* If minimizing, keep testing the rest of the expression and advancing
+ the pointer while it matches the class. */
+
+ if (minimize)
+ {
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM20);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINC(c, eptr);
+ if (!_pcre_xclass(c, data)) RRETURN(MATCH_NOMATCH);
+ }
+ /* Control never gets here */
+ }
+
+ /* If maximizing, find the longest possible run, then work backwards. */
+
+ else
+ {
+ pp = eptr;
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ if (!_pcre_xclass(c, data)) break;
+ eptr += len;
+ }
+ for(;;)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM21);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (eptr-- == pp) break; /* Stop if tried at original pos */
+ if (utf8) BACKCHAR(eptr);
+ }
+ RRETURN(MATCH_NOMATCH);
+ }
+
+ /* Control never gets here */
+ }
+#endif /* End of XCLASS */
+
+ /* Match a single character, casefully */
+
+ case OP_CHAR:
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ {
+ length = 1;
+ ecode++;
+ GETCHARLEN(fc, ecode, length);
+ if (length > md->end_subject - eptr) RRETURN(MATCH_NOMATCH);
+ while (length-- > 0) if (*ecode++ != *eptr++) RRETURN(MATCH_NOMATCH);
+ }
+ else
+#endif
+
+ /* Non-UTF-8 mode */
+ {
+ if (md->end_subject - eptr < 1) RRETURN(MATCH_NOMATCH);
+ if (ecode[1] != *eptr++) RRETURN(MATCH_NOMATCH);
+ ecode += 2;
+ }
+ break;
+
+ /* Match a single character, caselessly */
+
+ case OP_CHARNC:
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ {
+ length = 1;
+ ecode++;
+ GETCHARLEN(fc, ecode, length);
+
+ if (length > md->end_subject - eptr) RRETURN(MATCH_NOMATCH);
+
+ /* If the pattern character's value is < 128, we have only one byte, and
+ can use the fast lookup table. */
+
+ if (fc < 128)
+ {
+ if (md->lcc[*ecode++] != md->lcc[*eptr++]) RRETURN(MATCH_NOMATCH);
+ }
+
+ /* Otherwise we must pick up the subject character */
+
+ else
+ {
+ unsigned int dc;
+ GETCHARINC(dc, eptr);
+ ecode += length;
+
+ /* If we have Unicode property support, we can use it to test the other
+ case of the character, if there is one. */
+
+ if (fc != dc)
+ {
+#ifdef SUPPORT_UCP
+ if (dc != _pcre_ucp_othercase(fc))
+#endif
+ RRETURN(MATCH_NOMATCH);
+ }
+ }
+ }
+ else
+#endif /* SUPPORT_UTF8 */
+
+ /* Non-UTF-8 mode */
+ {
+ if (md->end_subject - eptr < 1) RRETURN(MATCH_NOMATCH);
+ if (md->lcc[ecode[1]] != md->lcc[*eptr++]) RRETURN(MATCH_NOMATCH);
+ ecode += 2;
+ }
+ break;
+
+ /* Match a single character repeatedly. */
+
+ case OP_EXACT:
+ min = max = GET2(ecode, 1);
+ ecode += 3;
+ goto REPEATCHAR;
+
+ case OP_POSUPTO:
+ possessive = TRUE;
+ /* Fall through */
+
+ case OP_UPTO:
+ case OP_MINUPTO:
+ min = 0;
+ max = GET2(ecode, 1);
+ minimize = *ecode == OP_MINUPTO;
+ ecode += 3;
+ goto REPEATCHAR;
+
+ case OP_POSSTAR:
+ possessive = TRUE;
+ min = 0;
+ max = INT_MAX;
+ ecode++;
+ goto REPEATCHAR;
+
+ case OP_POSPLUS:
+ possessive = TRUE;
+ min = 1;
+ max = INT_MAX;
+ ecode++;
+ goto REPEATCHAR;
+
+ case OP_POSQUERY:
+ possessive = TRUE;
+ min = 0;
+ max = 1;
+ ecode++;
+ goto REPEATCHAR;
+
+ case OP_STAR:
+ case OP_MINSTAR:
+ case OP_PLUS:
+ case OP_MINPLUS:
+ case OP_QUERY:
+ case OP_MINQUERY:
+ c = *ecode++ - OP_STAR;
+ minimize = (c & 1) != 0;
+ min = rep_min[c]; /* Pick up values from tables; */
+ max = rep_max[c]; /* zero for max => infinity */
+ if (max == 0) max = INT_MAX;
+
+ /* Common code for all repeated single-character matches. We can give
+ up quickly if there are fewer than the minimum number of characters left in
+ the subject. */
+
+ REPEATCHAR:
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ {
+ length = 1;
+ charptr = ecode;
+ GETCHARLEN(fc, ecode, length);
+ if (min * length > md->end_subject - eptr) RRETURN(MATCH_NOMATCH);
+ ecode += length;
+
+ /* Handle multibyte character matching specially here. There is
+ support for caseless matching if UCP support is present. */
+
+ if (length > 1)
+ {
+#ifdef SUPPORT_UCP
+ unsigned int othercase;
+ if ((ims & PCRE_CASELESS) != 0 &&
+ (othercase = _pcre_ucp_othercase(fc)) != NOTACHAR)
+ oclength = _pcre_ord2utf8(othercase, occhars);
+ else oclength = 0;
+#endif /* SUPPORT_UCP */
+
+ for (i = 1; i <= min; i++)
+ {
+ if (memcmp(eptr, charptr, length) == 0) eptr += length;
+#ifdef SUPPORT_UCP
+ /* Need braces because of following else */
+ else if (oclength == 0) { RRETURN(MATCH_NOMATCH); }
+ else
+ {
+ if (memcmp(eptr, occhars, oclength) != 0) RRETURN(MATCH_NOMATCH);
+ eptr += oclength;
+ }
+#else /* without SUPPORT_UCP */
+ else { RRETURN(MATCH_NOMATCH); }
+#endif /* SUPPORT_UCP */
+ }
+
+ if (min == max) continue;
+
+ if (minimize)
+ {
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM22);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ if (memcmp(eptr, charptr, length) == 0) eptr += length;
+#ifdef SUPPORT_UCP
+ /* Need braces because of following else */
+ else if (oclength == 0) { RRETURN(MATCH_NOMATCH); }
+ else
+ {
+ if (memcmp(eptr, occhars, oclength) != 0) RRETURN(MATCH_NOMATCH);
+ eptr += oclength;
+ }
+#else /* without SUPPORT_UCP */
+ else { RRETURN (MATCH_NOMATCH); }
+#endif /* SUPPORT_UCP */
+ }
+ /* Control never gets here */
+ }
+
+ else /* Maximize */
+ {
+ pp = eptr;
+ for (i = min; i < max; i++)
+ {
+ if (eptr > md->end_subject - length) break;
+ if (memcmp(eptr, charptr, length) == 0) eptr += length;
+#ifdef SUPPORT_UCP
+ else if (oclength == 0) break;
+ else
+ {
+ if (memcmp(eptr, occhars, oclength) != 0) break;
+ eptr += oclength;
+ }
+#else /* without SUPPORT_UCP */
+ else break;
+#endif /* SUPPORT_UCP */
+ }
+
+ if (possessive) continue;
+ for(;;)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM23);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (eptr == pp) RRETURN(MATCH_NOMATCH);
+#ifdef SUPPORT_UCP
+ eptr--;
+ BACKCHAR(eptr);
+#else /* without SUPPORT_UCP */
+ eptr -= length;
+#endif /* SUPPORT_UCP */
+ }
+ }
+ /* Control never gets here */
+ }
+
+ /* If the length of a UTF-8 character is 1, we fall through here, and
+ obey the code as for non-UTF-8 characters below, though in this case the
+ value of fc will always be < 128. */
+ }
+ else
+#endif /* SUPPORT_UTF8 */
+
+ /* When not in UTF-8 mode, load a single-byte character. */
+ {
+ if (min > md->end_subject - eptr) RRETURN(MATCH_NOMATCH);
+ fc = *ecode++;
+ }
+
+ /* The value of fc at this point is always less than 256, though we may or
+ may not be in UTF-8 mode. The code is duplicated for the caseless and
+ caseful cases, for speed, since matching characters is likely to be quite
+ common. First, ensure the minimum number of matches are present. If min =
+ max, continue at the same level without recursing. Otherwise, if
+ minimizing, keep trying the rest of the expression and advancing one
+ matching character if failing, up to the maximum. Alternatively, if
+ maximizing, find the maximum number of characters and work backwards. */
+
+ DPRINTF(("matching %c{%d,%d} against subject %.*s\n", fc, min, max,
+ max, eptr));
+
+ if ((ims & PCRE_CASELESS) != 0)
+ {
+ fc = md->lcc[fc];
+ for (i = 1; i <= min; i++)
+ if (fc != md->lcc[*eptr++]) RRETURN(MATCH_NOMATCH);
+ if (min == max) continue;
+ if (minimize)
+ {
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM24);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject ||
+ fc != md->lcc[*eptr++])
+ RRETURN(MATCH_NOMATCH);
+ }
+ /* Control never gets here */
+ }
+ else /* Maximize */
+ {
+ pp = eptr;
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject || fc != md->lcc[*eptr]) break;
+ eptr++;
+ }
+ if (possessive) continue;
+ while (eptr >= pp)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM25);
+ eptr--;
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ }
+ RRETURN(MATCH_NOMATCH);
+ }
+ /* Control never gets here */
+ }
+
+ /* Caseful comparisons (includes all multi-byte characters) */
+
+ else
+ {
+ for (i = 1; i <= min; i++) if (fc != *eptr++) RRETURN(MATCH_NOMATCH);
+ if (min == max) continue;
+ if (minimize)
+ {
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM26);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject || fc != *eptr++)
+ RRETURN(MATCH_NOMATCH);
+ }
+ /* Control never gets here */
+ }
+ else /* Maximize */
+ {
+ pp = eptr;
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject || fc != *eptr) break;
+ eptr++;
+ }
+ if (possessive) continue;
+ while (eptr >= pp)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM27);
+ eptr--;
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ }
+ RRETURN(MATCH_NOMATCH);
+ }
+ }
+ /* Control never gets here */
+
+ /* Match a negated single one-byte character. The character we are
+ checking can be multibyte. */
+
+ case OP_NOT:
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ ecode++;
+ GETCHARINCTEST(c, eptr);
+ if ((ims & PCRE_CASELESS) != 0)
+ {
+#ifdef SUPPORT_UTF8
+ if (c < 256)
+#endif
+ c = md->lcc[c];
+ if (md->lcc[*ecode++] == c) RRETURN(MATCH_NOMATCH);
+ }
+ else
+ {
+ if (*ecode++ == c) RRETURN(MATCH_NOMATCH);
+ }
+ break;
+
+ /* Match a negated single one-byte character repeatedly. This is almost a
+ repeat of the code for a repeated single character, but I haven't found a
+ nice way of commoning these up that doesn't require a test of the
+ positive/negative option for each character match. Maybe that wouldn't add
+ very much to the time taken, but character matching *is* what this is all
+ about... */
+
+ case OP_NOTEXACT:
+ min = max = GET2(ecode, 1);
+ ecode += 3;
+ goto REPEATNOTCHAR;
+
+ case OP_NOTUPTO:
+ case OP_NOTMINUPTO:
+ min = 0;
+ max = GET2(ecode, 1);
+ minimize = *ecode == OP_NOTMINUPTO;
+ ecode += 3;
+ goto REPEATNOTCHAR;
+
+ case OP_NOTPOSSTAR:
+ possessive = TRUE;
+ min = 0;
+ max = INT_MAX;
+ ecode++;
+ goto REPEATNOTCHAR;
+
+ case OP_NOTPOSPLUS:
+ possessive = TRUE;
+ min = 1;
+ max = INT_MAX;
+ ecode++;
+ goto REPEATNOTCHAR;
+
+ case OP_NOTPOSQUERY:
+ possessive = TRUE;
+ min = 0;
+ max = 1;
+ ecode++;
+ goto REPEATNOTCHAR;
+
+ case OP_NOTPOSUPTO:
+ possessive = TRUE;
+ min = 0;
+ max = GET2(ecode, 1);
+ ecode += 3;
+ goto REPEATNOTCHAR;
+
+ case OP_NOTSTAR:
+ case OP_NOTMINSTAR:
+ case OP_NOTPLUS:
+ case OP_NOTMINPLUS:
+ case OP_NOTQUERY:
+ case OP_NOTMINQUERY:
+ c = *ecode++ - OP_NOTSTAR;
+ minimize = (c & 1) != 0;
+ min = rep_min[c]; /* Pick up values from tables; */
+ max = rep_max[c]; /* zero for max => infinity */
+ if (max == 0) max = INT_MAX;
+
+ /* Common code for all repeated single-byte matches. We can give up quickly
+ if there are fewer than the minimum number of bytes left in the
+ subject. */
+
+ REPEATNOTCHAR:
+ if (min > md->end_subject - eptr) RRETURN(MATCH_NOMATCH);
+ fc = *ecode++;
+
+ /* The code is duplicated for the caseless and caseful cases, for speed,
+ since matching characters is likely to be quite common. First, ensure the
+ minimum number of matches are present. If min = max, continue at the same
+ level without recursing. Otherwise, if minimizing, keep trying the rest of
+ the expression and advancing one matching character if failing, up to the
+ maximum. Alternatively, if maximizing, find the maximum number of
+ characters and work backwards. */
+
+ DPRINTF(("negative matching %c{%d,%d} against subject %.*s\n", fc, min, max,
+ max, eptr));
+
+ if ((ims & PCRE_CASELESS) != 0)
+ {
+ fc = md->lcc[fc];
+
+#ifdef SUPPORT_UTF8
+ /* UTF-8 mode */
+ if (utf8)
+ {
+ register unsigned int d;
+ for (i = 1; i <= min; i++)
+ {
+ GETCHARINC(d, eptr);
+ if (d < 256) d = md->lcc[d];
+ if (fc == d) RRETURN(MATCH_NOMATCH);
+ }
+ }
+ else
+#endif
+
+ /* Not UTF-8 mode */
+ {
+ for (i = 1; i <= min; i++)
+ if (fc == md->lcc[*eptr++]) RRETURN(MATCH_NOMATCH);
+ }
+
+ if (min == max) continue;
+
+ if (minimize)
+ {
+#ifdef SUPPORT_UTF8
+ /* UTF-8 mode */
+ if (utf8)
+ {
+ register unsigned int d;
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM28);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ GETCHARINC(d, eptr);
+ if (d < 256) d = md->lcc[d];
+ if (fi >= max || eptr >= md->end_subject || fc == d)
+ RRETURN(MATCH_NOMATCH);
+ }
+ }
+ else
+#endif
+ /* Not UTF-8 mode */
+ {
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM29);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject || fc == md->lcc[*eptr++])
+ RRETURN(MATCH_NOMATCH);
+ }
+ }
+ /* Control never gets here */
+ }
+
+ /* Maximize case */
+
+ else
+ {
+ pp = eptr;
+
+#ifdef SUPPORT_UTF8
+ /* UTF-8 mode */
+ if (utf8)
+ {
+ register unsigned int d;
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(d, eptr, len);
+ if (d < 256) d = md->lcc[d];
+ if (fc == d) break;
+ eptr += len;
+ }
+ if (possessive) continue;
+ for(;;)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM30);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (eptr-- == pp) break; /* Stop if tried at original pos */
+ BACKCHAR(eptr);
+ }
+ }
+ else
+#endif
+ /* Not UTF-8 mode */
+ {
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject || fc == md->lcc[*eptr]) break;
+ eptr++;
+ }
+ if (possessive) continue;
+ while (eptr >= pp)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM31);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ eptr--;
+ }
+ }
+
+ RRETURN(MATCH_NOMATCH);
+ }
+ /* Control never gets here */
+ }
+
+ /* Caseful comparisons */
+
+ else
+ {
+#ifdef SUPPORT_UTF8
+ /* UTF-8 mode */
+ if (utf8)
+ {
+ register unsigned int d;
+ for (i = 1; i <= min; i++)
+ {
+ GETCHARINC(d, eptr);
+ if (fc == d) RRETURN(MATCH_NOMATCH);
+ }
+ }
+ else
+#endif
+ /* Not UTF-8 mode */
+ {
+ for (i = 1; i <= min; i++)
+ if (fc == *eptr++) RRETURN(MATCH_NOMATCH);
+ }
+
+ if (min == max) continue;
+
+ if (minimize)
+ {
+#ifdef SUPPORT_UTF8
+ /* UTF-8 mode */
+ if (utf8)
+ {
+ register unsigned int d;
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM32);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ GETCHARINC(d, eptr);
+ if (fi >= max || eptr >= md->end_subject || fc == d)
+ RRETURN(MATCH_NOMATCH);
+ }
+ }
+ else
+#endif
+ /* Not UTF-8 mode */
+ {
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM33);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject || fc == *eptr++)
+ RRETURN(MATCH_NOMATCH);
+ }
+ }
+ /* Control never gets here */
+ }
+
+ /* Maximize case */
+
+ else
+ {
+ pp = eptr;
+
+#ifdef SUPPORT_UTF8
+ /* UTF-8 mode */
+ if (utf8)
+ {
+ register unsigned int d;
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(d, eptr, len);
+ if (fc == d) break;
+ eptr += len;
+ }
+ if (possessive) continue;
+ for(;;)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM34);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (eptr-- == pp) break; /* Stop if tried at original pos */
+ BACKCHAR(eptr);
+ }
+ }
+ else
+#endif
+ /* Not UTF-8 mode */
+ {
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject || fc == *eptr) break;
+ eptr++;
+ }
+ if (possessive) continue;
+ while (eptr >= pp)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM35);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ eptr--;
+ }
+ }
+
+ RRETURN(MATCH_NOMATCH);
+ }
+ }
+ /* Control never gets here */
+
+ /* Match a single character type repeatedly; several different opcodes
+ share code. This is very similar to the code for single characters, but we
+ repeat it in the interests of efficiency. */
+
+ case OP_TYPEEXACT:
+ min = max = GET2(ecode, 1);
+ minimize = TRUE;
+ ecode += 3;
+ goto REPEATTYPE;
+
+ case OP_TYPEUPTO:
+ case OP_TYPEMINUPTO:
+ min = 0;
+ max = GET2(ecode, 1);
+ minimize = *ecode == OP_TYPEMINUPTO;
+ ecode += 3;
+ goto REPEATTYPE;
+
+ case OP_TYPEPOSSTAR:
+ possessive = TRUE;
+ min = 0;
+ max = INT_MAX;
+ ecode++;
+ goto REPEATTYPE;
+
+ case OP_TYPEPOSPLUS:
+ possessive = TRUE;
+ min = 1;
+ max = INT_MAX;
+ ecode++;
+ goto REPEATTYPE;
+
+ case OP_TYPEPOSQUERY:
+ possessive = TRUE;
+ min = 0;
+ max = 1;
+ ecode++;
+ goto REPEATTYPE;
+
+ case OP_TYPEPOSUPTO:
+ possessive = TRUE;
+ min = 0;
+ max = GET2(ecode, 1);
+ ecode += 3;
+ goto REPEATTYPE;
+
+ case OP_TYPESTAR:
+ case OP_TYPEMINSTAR:
+ case OP_TYPEPLUS:
+ case OP_TYPEMINPLUS:
+ case OP_TYPEQUERY:
+ case OP_TYPEMINQUERY:
+ c = *ecode++ - OP_TYPESTAR;
+ minimize = (c & 1) != 0;
+ min = rep_min[c]; /* Pick up values from tables; */
+ max = rep_max[c]; /* zero for max => infinity */
+ if (max == 0) max = INT_MAX;
+
+ /* Common code for all repeated single character type matches. Note that
+ in UTF-8 mode, '.' matches a character of any length, but for the other
+ character types, the valid characters are all one-byte long. */
+
+ REPEATTYPE:
+ ctype = *ecode++; /* Code for the character type */
+
+#ifdef SUPPORT_UCP
+ if (ctype == OP_PROP || ctype == OP_NOTPROP)
+ {
+ prop_fail_result = ctype == OP_NOTPROP;
+ prop_type = *ecode++;
+ prop_value = *ecode++;
+ }
+ else prop_type = -1;
+#endif
+
+ /* First, ensure the minimum number of matches are present. Use inline
+ code for maximizing the speed, and do the type test once at the start
+ (i.e. keep it out of the loop). Also we can test that there are at least
+ the minimum number of bytes before we start. This isn't as effective in
+ UTF-8 mode, but it does no harm. Separate the UTF-8 code completely as that
+ is tidier. Also separate the UCP code, which can be the same for both UTF-8
+ and single-bytes. */
+
+ if (min > md->end_subject - eptr) RRETURN(MATCH_NOMATCH);
+ if (min > 0)
+ {
+#ifdef SUPPORT_UCP
+ if (prop_type >= 0)
+ {
+ switch(prop_type)
+ {
+ case PT_ANY:
+ if (prop_fail_result) RRETURN(MATCH_NOMATCH);
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ }
+ break;
+
+ case PT_LAMP:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if ((prop_chartype == ucp_Lu ||
+ prop_chartype == ucp_Ll ||
+ prop_chartype == ucp_Lt) == prop_fail_result)
+ RRETURN(MATCH_NOMATCH);
+ }
+ break;
+
+ case PT_GC:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if ((prop_category == prop_value) == prop_fail_result)
+ RRETURN(MATCH_NOMATCH);
+ }
+ break;
+
+ case PT_PC:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if ((prop_chartype == prop_value) == prop_fail_result)
+ RRETURN(MATCH_NOMATCH);
+ }
+ break;
+
+ case PT_SC:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if ((prop_script == prop_value) == prop_fail_result)
+ RRETURN(MATCH_NOMATCH);
+ }
+ break;
+
+ default:
+ RRETURN(PCRE_ERROR_INTERNAL);
+ }
+ }
+
+ /* Match extended Unicode sequences. We will get here only if the
+ support is in the binary; otherwise a compile-time error occurs. */
+
+ else if (ctype == OP_EXTUNI)
+ {
+ for (i = 1; i <= min; i++)
+ {
+ GETCHARINCTEST(c, eptr);
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if (prop_category == ucp_M) RRETURN(MATCH_NOMATCH);
+ while (eptr < md->end_subject)
+ {
+ int len = 1;
+ if (!utf8) c = *eptr; else
+ {
+ GETCHARLEN(c, eptr, len);
+ }
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if (prop_category != ucp_M) break;
+ eptr += len;
+ }
+ }
+ }
+
+ else
+#endif /* SUPPORT_UCP */
+
+/* Handle all other cases when the coding is UTF-8 */
+
+#ifdef SUPPORT_UTF8
+ if (utf8) switch(ctype)
+ {
+ case OP_ANY:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject ||
+ ((ims & PCRE_DOTALL) == 0 && IS_NEWLINE(eptr)))
+ RRETURN(MATCH_NOMATCH);
+ eptr++;
+ while (eptr < md->end_subject && (*eptr & 0xc0) == 0x80) eptr++;
+ }
+ break;
+
+ case OP_ANYBYTE:
+ eptr += min;
+ break;
+
+ case OP_ANYNL:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINC(c, eptr);
+ switch(c)
+ {
+ default: RRETURN(MATCH_NOMATCH);
+ case 0x000d:
+ if (eptr < md->end_subject && *eptr == 0x0a) eptr++;
+ break;
+
+ case 0x000a:
+ break;
+
+ case 0x000b:
+ case 0x000c:
+ case 0x0085:
+ case 0x2028:
+ case 0x2029:
+ if (md->bsr_anycrlf) RRETURN(MATCH_NOMATCH);
+ break;
+ }
+ }
+ break;
+
+ case OP_NOT_HSPACE:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINC(c, eptr);
+ switch(c)
+ {
+ default: break;
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ case 0x1680: /* OGHAM SPACE MARK */
+ case 0x180e: /* MONGOLIAN VOWEL SEPARATOR */
+ case 0x2000: /* EN QUAD */
+ case 0x2001: /* EM QUAD */
+ case 0x2002: /* EN SPACE */
+ case 0x2003: /* EM SPACE */
+ case 0x2004: /* THREE-PER-EM SPACE */
+ case 0x2005: /* FOUR-PER-EM SPACE */
+ case 0x2006: /* SIX-PER-EM SPACE */
+ case 0x2007: /* FIGURE SPACE */
+ case 0x2008: /* PUNCTUATION SPACE */
+ case 0x2009: /* THIN SPACE */
+ case 0x200A: /* HAIR SPACE */
+ case 0x202f: /* NARROW NO-BREAK SPACE */
+ case 0x205f: /* MEDIUM MATHEMATICAL SPACE */
+ case 0x3000: /* IDEOGRAPHIC SPACE */
+ RRETURN(MATCH_NOMATCH);
+ }
+ }
+ break;
+
+ case OP_HSPACE:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINC(c, eptr);
+ switch(c)
+ {
+ default: RRETURN(MATCH_NOMATCH);
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ case 0x1680: /* OGHAM SPACE MARK */
+ case 0x180e: /* MONGOLIAN VOWEL SEPARATOR */
+ case 0x2000: /* EN QUAD */
+ case 0x2001: /* EM QUAD */
+ case 0x2002: /* EN SPACE */
+ case 0x2003: /* EM SPACE */
+ case 0x2004: /* THREE-PER-EM SPACE */
+ case 0x2005: /* FOUR-PER-EM SPACE */
+ case 0x2006: /* SIX-PER-EM SPACE */
+ case 0x2007: /* FIGURE SPACE */
+ case 0x2008: /* PUNCTUATION SPACE */
+ case 0x2009: /* THIN SPACE */
+ case 0x200A: /* HAIR SPACE */
+ case 0x202f: /* NARROW NO-BREAK SPACE */
+ case 0x205f: /* MEDIUM MATHEMATICAL SPACE */
+ case 0x3000: /* IDEOGRAPHIC SPACE */
+ break;
+ }
+ }
+ break;
+
+ case OP_NOT_VSPACE:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINC(c, eptr);
+ switch(c)
+ {
+ default: break;
+ case 0x0a: /* LF */
+ case 0x0b: /* VT */
+ case 0x0c: /* FF */
+ case 0x0d: /* CR */
+ case 0x85: /* NEL */
+ case 0x2028: /* LINE SEPARATOR */
+ case 0x2029: /* PARAGRAPH SEPARATOR */
+ RRETURN(MATCH_NOMATCH);
+ }
+ }
+ break;
+
+ case OP_VSPACE:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINC(c, eptr);
+ switch(c)
+ {
+ default: RRETURN(MATCH_NOMATCH);
+ case 0x0a: /* LF */
+ case 0x0b: /* VT */
+ case 0x0c: /* FF */
+ case 0x0d: /* CR */
+ case 0x85: /* NEL */
+ case 0x2028: /* LINE SEPARATOR */
+ case 0x2029: /* PARAGRAPH SEPARATOR */
+ break;
+ }
+ }
+ break;
+
+ case OP_NOT_DIGIT:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINC(c, eptr);
+ if (c < 128 && (md->ctypes[c] & ctype_digit) != 0)
+ RRETURN(MATCH_NOMATCH);
+ }
+ break;
+
+ case OP_DIGIT:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject ||
+ *eptr >= 128 || (md->ctypes[*eptr++] & ctype_digit) == 0)
+ RRETURN(MATCH_NOMATCH);
+ /* No need to skip more bytes - we know it's a 1-byte character */
+ }
+ break;
+
+ case OP_NOT_WHITESPACE:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject ||
+ (*eptr < 128 && (md->ctypes[*eptr] & ctype_space) != 0))
+ RRETURN(MATCH_NOMATCH);
+ while (++eptr < md->end_subject && (*eptr & 0xc0) == 0x80);
+ }
+ break;
+
+ case OP_WHITESPACE:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject ||
+ *eptr >= 128 || (md->ctypes[*eptr++] & ctype_space) == 0)
+ RRETURN(MATCH_NOMATCH);
+ /* No need to skip more bytes - we know it's a 1-byte character */
+ }
+ break;
+
+ case OP_NOT_WORDCHAR:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject ||
+ (*eptr < 128 && (md->ctypes[*eptr] & ctype_word) != 0))
+ RRETURN(MATCH_NOMATCH);
+ while (++eptr < md->end_subject && (*eptr & 0xc0) == 0x80);
+ }
+ break;
+
+ case OP_WORDCHAR:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject ||
+ *eptr >= 128 || (md->ctypes[*eptr++] & ctype_word) == 0)
+ RRETURN(MATCH_NOMATCH);
+ /* No need to skip more bytes - we know it's a 1-byte character */
+ }
+ break;
+
+ default:
+ RRETURN(PCRE_ERROR_INTERNAL);
+ } /* End switch(ctype) */
+
+ else
+#endif /* SUPPORT_UTF8 */
+
+ /* Code for the non-UTF-8 case for minimum matching of operators other
+ than OP_PROP and OP_NOTPROP. We can assume that there are the minimum
+ number of bytes present, as this was tested above. */
+
+ switch(ctype)
+ {
+ case OP_ANY:
+ if ((ims & PCRE_DOTALL) == 0)
+ {
+ for (i = 1; i <= min; i++)
+ {
+ if (IS_NEWLINE(eptr)) RRETURN(MATCH_NOMATCH);
+ eptr++;
+ }
+ }
+ else eptr += min;
+ break;
+
+ case OP_ANYBYTE:
+ eptr += min;
+ break;
+
+ /* Because of the CRLF case, we can't assume the minimum number of
+ bytes are present in this case. */
+
+ case OP_ANYNL:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ switch(*eptr++)
+ {
+ default: RRETURN(MATCH_NOMATCH);
+ case 0x000d:
+ if (eptr < md->end_subject && *eptr == 0x0a) eptr++;
+ break;
+ case 0x000a:
+ break;
+
+ case 0x000b:
+ case 0x000c:
+ case 0x0085:
+ if (md->bsr_anycrlf) RRETURN(MATCH_NOMATCH);
+ break;
+ }
+ }
+ break;
+
+ case OP_NOT_HSPACE:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ switch(*eptr++)
+ {
+ default: break;
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ RRETURN(MATCH_NOMATCH);
+ }
+ }
+ break;
+
+ case OP_HSPACE:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ switch(*eptr++)
+ {
+ default: RRETURN(MATCH_NOMATCH);
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ break;
+ }
+ }
+ break;
+
+ case OP_NOT_VSPACE:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ switch(*eptr++)
+ {
+ default: break;
+ case 0x0a: /* LF */
+ case 0x0b: /* VT */
+ case 0x0c: /* FF */
+ case 0x0d: /* CR */
+ case 0x85: /* NEL */
+ RRETURN(MATCH_NOMATCH);
+ }
+ }
+ break;
+
+ case OP_VSPACE:
+ for (i = 1; i <= min; i++)
+ {
+ if (eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ switch(*eptr++)
+ {
+ default: RRETURN(MATCH_NOMATCH);
+ case 0x0a: /* LF */
+ case 0x0b: /* VT */
+ case 0x0c: /* FF */
+ case 0x0d: /* CR */
+ case 0x85: /* NEL */
+ break;
+ }
+ }
+ break;
+
+ case OP_NOT_DIGIT:
+ for (i = 1; i <= min; i++)
+ if ((md->ctypes[*eptr++] & ctype_digit) != 0) RRETURN(MATCH_NOMATCH);
+ break;
+
+ case OP_DIGIT:
+ for (i = 1; i <= min; i++)
+ if ((md->ctypes[*eptr++] & ctype_digit) == 0) RRETURN(MATCH_NOMATCH);
+ break;
+
+ case OP_NOT_WHITESPACE:
+ for (i = 1; i <= min; i++)
+ if ((md->ctypes[*eptr++] & ctype_space) != 0) RRETURN(MATCH_NOMATCH);
+ break;
+
+ case OP_WHITESPACE:
+ for (i = 1; i <= min; i++)
+ if ((md->ctypes[*eptr++] & ctype_space) == 0) RRETURN(MATCH_NOMATCH);
+ break;
+
+ case OP_NOT_WORDCHAR:
+ for (i = 1; i <= min; i++)
+ if ((md->ctypes[*eptr++] & ctype_word) != 0)
+ RRETURN(MATCH_NOMATCH);
+ break;
+
+ case OP_WORDCHAR:
+ for (i = 1; i <= min; i++)
+ if ((md->ctypes[*eptr++] & ctype_word) == 0)
+ RRETURN(MATCH_NOMATCH);
+ break;
+
+ default:
+ RRETURN(PCRE_ERROR_INTERNAL);
+ }
+ }
+
+ /* If min = max, continue at the same level without recursing */
+
+ if (min == max) continue;
+
+ /* If minimizing, we have to test the rest of the pattern before each
+ subsequent match. Again, separate the UTF-8 case for speed, and also
+ separate the UCP cases. */
+
+ if (minimize)
+ {
+#ifdef SUPPORT_UCP
+ if (prop_type >= 0)
+ {
+ switch(prop_type)
+ {
+ case PT_ANY:
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM36);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINC(c, eptr);
+ if (prop_fail_result) RRETURN(MATCH_NOMATCH);
+ }
+ /* Control never gets here */
+
+ case PT_LAMP:
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM37);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINC(c, eptr);
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if ((prop_chartype == ucp_Lu ||
+ prop_chartype == ucp_Ll ||
+ prop_chartype == ucp_Lt) == prop_fail_result)
+ RRETURN(MATCH_NOMATCH);
+ }
+ /* Control never gets here */
+
+ case PT_GC:
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM38);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINC(c, eptr);
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if ((prop_category == prop_value) == prop_fail_result)
+ RRETURN(MATCH_NOMATCH);
+ }
+ /* Control never gets here */
+
+ case PT_PC:
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM39);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINC(c, eptr);
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if ((prop_chartype == prop_value) == prop_fail_result)
+ RRETURN(MATCH_NOMATCH);
+ }
+ /* Control never gets here */
+
+ case PT_SC:
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM40);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINC(c, eptr);
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if ((prop_script == prop_value) == prop_fail_result)
+ RRETURN(MATCH_NOMATCH);
+ }
+ /* Control never gets here */
+
+ default:
+ RRETURN(PCRE_ERROR_INTERNAL);
+ }
+ }
+
+ /* Match extended Unicode sequences. We will get here only if the
+ support is in the binary; otherwise a compile-time error occurs. */
+
+ else if (ctype == OP_EXTUNI)
+ {
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM41);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject) RRETURN(MATCH_NOMATCH);
+ GETCHARINCTEST(c, eptr);
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if (prop_category == ucp_M) RRETURN(MATCH_NOMATCH);
+ while (eptr < md->end_subject)
+ {
+ int len = 1;
+ if (!utf8) c = *eptr; else
+ {
+ GETCHARLEN(c, eptr, len);
+ }
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if (prop_category != ucp_M) break;
+ eptr += len;
+ }
+ }
+ }
+
+ else
+#endif /* SUPPORT_UCP */
+
+#ifdef SUPPORT_UTF8
+ /* UTF-8 mode */
+ if (utf8)
+ {
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM42);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject ||
+ (ctype == OP_ANY && (ims & PCRE_DOTALL) == 0 &&
+ IS_NEWLINE(eptr)))
+ RRETURN(MATCH_NOMATCH);
+
+ GETCHARINC(c, eptr);
+ switch(ctype)
+ {
+ case OP_ANY: /* This is the DOTALL case */
+ break;
+
+ case OP_ANYBYTE:
+ break;
+
+ case OP_ANYNL:
+ switch(c)
+ {
+ default: RRETURN(MATCH_NOMATCH);
+ case 0x000d:
+ if (eptr < md->end_subject && *eptr == 0x0a) eptr++;
+ break;
+ case 0x000a:
+ break;
+
+ case 0x000b:
+ case 0x000c:
+ case 0x0085:
+ case 0x2028:
+ case 0x2029:
+ if (md->bsr_anycrlf) RRETURN(MATCH_NOMATCH);
+ break;
+ }
+ break;
+
+ case OP_NOT_HSPACE:
+ switch(c)
+ {
+ default: break;
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ case 0x1680: /* OGHAM SPACE MARK */
+ case 0x180e: /* MONGOLIAN VOWEL SEPARATOR */
+ case 0x2000: /* EN QUAD */
+ case 0x2001: /* EM QUAD */
+ case 0x2002: /* EN SPACE */
+ case 0x2003: /* EM SPACE */
+ case 0x2004: /* THREE-PER-EM SPACE */
+ case 0x2005: /* FOUR-PER-EM SPACE */
+ case 0x2006: /* SIX-PER-EM SPACE */
+ case 0x2007: /* FIGURE SPACE */
+ case 0x2008: /* PUNCTUATION SPACE */
+ case 0x2009: /* THIN SPACE */
+ case 0x200A: /* HAIR SPACE */
+ case 0x202f: /* NARROW NO-BREAK SPACE */
+ case 0x205f: /* MEDIUM MATHEMATICAL SPACE */
+ case 0x3000: /* IDEOGRAPHIC SPACE */
+ RRETURN(MATCH_NOMATCH);
+ }
+ break;
+
+ case OP_HSPACE:
+ switch(c)
+ {
+ default: RRETURN(MATCH_NOMATCH);
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ case 0x1680: /* OGHAM SPACE MARK */
+ case 0x180e: /* MONGOLIAN VOWEL SEPARATOR */
+ case 0x2000: /* EN QUAD */
+ case 0x2001: /* EM QUAD */
+ case 0x2002: /* EN SPACE */
+ case 0x2003: /* EM SPACE */
+ case 0x2004: /* THREE-PER-EM SPACE */
+ case 0x2005: /* FOUR-PER-EM SPACE */
+ case 0x2006: /* SIX-PER-EM SPACE */
+ case 0x2007: /* FIGURE SPACE */
+ case 0x2008: /* PUNCTUATION SPACE */
+ case 0x2009: /* THIN SPACE */
+ case 0x200A: /* HAIR SPACE */
+ case 0x202f: /* NARROW NO-BREAK SPACE */
+ case 0x205f: /* MEDIUM MATHEMATICAL SPACE */
+ case 0x3000: /* IDEOGRAPHIC SPACE */
+ break;
+ }
+ break;
+
+ case OP_NOT_VSPACE:
+ switch(c)
+ {
+ default: break;
+ case 0x0a: /* LF */
+ case 0x0b: /* VT */
+ case 0x0c: /* FF */
+ case 0x0d: /* CR */
+ case 0x85: /* NEL */
+ case 0x2028: /* LINE SEPARATOR */
+ case 0x2029: /* PARAGRAPH SEPARATOR */
+ RRETURN(MATCH_NOMATCH);
+ }
+ break;
+
+ case OP_VSPACE:
+ switch(c)
+ {
+ default: RRETURN(MATCH_NOMATCH);
+ case 0x0a: /* LF */
+ case 0x0b: /* VT */
+ case 0x0c: /* FF */
+ case 0x0d: /* CR */
+ case 0x85: /* NEL */
+ case 0x2028: /* LINE SEPARATOR */
+ case 0x2029: /* PARAGRAPH SEPARATOR */
+ break;
+ }
+ break;
+
+ case OP_NOT_DIGIT:
+ if (c < 256 && (md->ctypes[c] & ctype_digit) != 0)
+ RRETURN(MATCH_NOMATCH);
+ break;
+
+ case OP_DIGIT:
+ if (c >= 256 || (md->ctypes[c] & ctype_digit) == 0)
+ RRETURN(MATCH_NOMATCH);
+ break;
+
+ case OP_NOT_WHITESPACE:
+ if (c < 256 && (md->ctypes[c] & ctype_space) != 0)
+ RRETURN(MATCH_NOMATCH);
+ break;
+
+ case OP_WHITESPACE:
+ if (c >= 256 || (md->ctypes[c] & ctype_space) == 0)
+ RRETURN(MATCH_NOMATCH);
+ break;
+
+ case OP_NOT_WORDCHAR:
+ if (c < 256 && (md->ctypes[c] & ctype_word) != 0)
+ RRETURN(MATCH_NOMATCH);
+ break;
+
+ case OP_WORDCHAR:
+ if (c >= 256 || (md->ctypes[c] & ctype_word) == 0)
+ RRETURN(MATCH_NOMATCH);
+ break;
+
+ default:
+ RRETURN(PCRE_ERROR_INTERNAL);
+ }
+ }
+ }
+ else
+#endif
+ /* Not UTF-8 mode */
+ {
+ for (fi = min;; fi++)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM43);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (fi >= max || eptr >= md->end_subject ||
+ ((ims & PCRE_DOTALL) == 0 && IS_NEWLINE(eptr)))
+ RRETURN(MATCH_NOMATCH);
+
+ c = *eptr++;
+ switch(ctype)
+ {
+ case OP_ANY: /* This is the DOTALL case */
+ break;
+
+ case OP_ANYBYTE:
+ break;
+
+ case OP_ANYNL:
+ switch(c)
+ {
+ default: RRETURN(MATCH_NOMATCH);
+ case 0x000d:
+ if (eptr < md->end_subject && *eptr == 0x0a) eptr++;
+ break;
+
+ case 0x000a:
+ break;
+
+ case 0x000b:
+ case 0x000c:
+ case 0x0085:
+ if (md->bsr_anycrlf) RRETURN(MATCH_NOMATCH);
+ break;
+ }
+ break;
+
+ case OP_NOT_HSPACE:
+ switch(c)
+ {
+ default: break;
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ RRETURN(MATCH_NOMATCH);
+ }
+ break;
+
+ case OP_HSPACE:
+ switch(c)
+ {
+ default: RRETURN(MATCH_NOMATCH);
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ break;
+ }
+ break;
+
+ case OP_NOT_VSPACE:
+ switch(c)
+ {
+ default: break;
+ case 0x0a: /* LF */
+ case 0x0b: /* VT */
+ case 0x0c: /* FF */
+ case 0x0d: /* CR */
+ case 0x85: /* NEL */
+ RRETURN(MATCH_NOMATCH);
+ }
+ break;
+
+ case OP_VSPACE:
+ switch(c)
+ {
+ default: RRETURN(MATCH_NOMATCH);
+ case 0x0a: /* LF */
+ case 0x0b: /* VT */
+ case 0x0c: /* FF */
+ case 0x0d: /* CR */
+ case 0x85: /* NEL */
+ break;
+ }
+ break;
+
+ case OP_NOT_DIGIT:
+ if ((md->ctypes[c] & ctype_digit) != 0) RRETURN(MATCH_NOMATCH);
+ break;
+
+ case OP_DIGIT:
+ if ((md->ctypes[c] & ctype_digit) == 0) RRETURN(MATCH_NOMATCH);
+ break;
+
+ case OP_NOT_WHITESPACE:
+ if ((md->ctypes[c] & ctype_space) != 0) RRETURN(MATCH_NOMATCH);
+ break;
+
+ case OP_WHITESPACE:
+ if ((md->ctypes[c] & ctype_space) == 0) RRETURN(MATCH_NOMATCH);
+ break;
+
+ case OP_NOT_WORDCHAR:
+ if ((md->ctypes[c] & ctype_word) != 0) RRETURN(MATCH_NOMATCH);
+ break;
+
+ case OP_WORDCHAR:
+ if ((md->ctypes[c] & ctype_word) == 0) RRETURN(MATCH_NOMATCH);
+ break;
+
+ default:
+ RRETURN(PCRE_ERROR_INTERNAL);
+ }
+ }
+ }
+ /* Control never gets here */
+ }
+
+ /* If maximizing, it is worth using inline code for speed, doing the type
+ test once at the start (i.e. keep it out of the loop). Again, keep the
+ UTF-8 and UCP stuff separate. */
+
+ else
+ {
+ pp = eptr; /* Remember where we started */
+
+#ifdef SUPPORT_UCP
+ if (prop_type >= 0)
+ {
+ switch(prop_type)
+ {
+ case PT_ANY:
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ if (prop_fail_result) break;
+ eptr+= len;
+ }
+ break;
+
+ case PT_LAMP:
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if ((prop_chartype == ucp_Lu ||
+ prop_chartype == ucp_Ll ||
+ prop_chartype == ucp_Lt) == prop_fail_result)
+ break;
+ eptr+= len;
+ }
+ break;
+
+ case PT_GC:
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if ((prop_category == prop_value) == prop_fail_result)
+ break;
+ eptr+= len;
+ }
+ break;
+
+ case PT_PC:
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if ((prop_chartype == prop_value) == prop_fail_result)
+ break;
+ eptr+= len;
+ }
+ break;
+
+ case PT_SC:
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if ((prop_script == prop_value) == prop_fail_result)
+ break;
+ eptr+= len;
+ }
+ break;
+ }
+
+ /* eptr is now past the end of the maximum run */
+
+ if (possessive) continue;
+ for(;;)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM44);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (eptr-- == pp) break; /* Stop if tried at original pos */
+ if (utf8) BACKCHAR(eptr);
+ }
+ }
+
+ /* Match extended Unicode sequences. We will get here only if the
+ support is in the binary; otherwise a compile-time error occurs. */
+
+ else if (ctype == OP_EXTUNI)
+ {
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject) break;
+ GETCHARINCTEST(c, eptr);
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if (prop_category == ucp_M) break;
+ while (eptr < md->end_subject)
+ {
+ int len = 1;
+ if (!utf8) c = *eptr; else
+ {
+ GETCHARLEN(c, eptr, len);
+ }
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if (prop_category != ucp_M) break;
+ eptr += len;
+ }
+ }
+
+ /* eptr is now past the end of the maximum run */
+
+ if (possessive) continue;
+ for(;;)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM45);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (eptr-- == pp) break; /* Stop if tried at original pos */
+ for (;;) /* Move back over one extended */
+ {
+ int len = 1;
+ if (!utf8) c = *eptr; else
+ {
+ BACKCHAR(eptr);
+ GETCHARLEN(c, eptr, len);
+ }
+ prop_category = _pcre_ucp_findprop(c, &prop_chartype, &prop_script);
+ if (prop_category != ucp_M) break;
+ eptr--;
+ }
+ }
+ }
+
+ else
+#endif /* SUPPORT_UCP */
+
+#ifdef SUPPORT_UTF8
+ /* UTF-8 mode */
+
+ if (utf8)
+ {
+ switch(ctype)
+ {
+ case OP_ANY:
+ if (max < INT_MAX)
+ {
+ if ((ims & PCRE_DOTALL) == 0)
+ {
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject || IS_NEWLINE(eptr)) break;
+ eptr++;
+ while (eptr < md->end_subject && (*eptr & 0xc0) == 0x80) eptr++;
+ }
+ }
+ else
+ {
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject) break;
+ eptr++;
+ while (eptr < md->end_subject && (*eptr & 0xc0) == 0x80) eptr++;
+ }
+ }
+ }
+
+ /* Handle unlimited UTF-8 repeat */
+
+ else
+ {
+ if ((ims & PCRE_DOTALL) == 0)
+ {
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject || IS_NEWLINE(eptr)) break;
+ eptr++;
+ while (eptr < md->end_subject && (*eptr & 0xc0) == 0x80) eptr++;
+ }
+ }
+ else
+ {
+ eptr = md->end_subject;
+ }
+ }
+ break;
+
+ /* The byte case is the same as non-UTF8 */
+
+ case OP_ANYBYTE:
+ c = max - min;
+ if (c > (unsigned int)(md->end_subject - eptr))
+ c = md->end_subject - eptr;
+ eptr += c;
+ break;
+
+ case OP_ANYNL:
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ if (c == 0x000d)
+ {
+ if (++eptr >= md->end_subject) break;
+ if (*eptr == 0x000a) eptr++;
+ }
+ else
+ {
+ if (c != 0x000a &&
+ (md->bsr_anycrlf ||
+ (c != 0x000b && c != 0x000c &&
+ c != 0x0085 && c != 0x2028 && c != 0x2029)))
+ break;
+ eptr += len;
+ }
+ }
+ break;
+
+ case OP_NOT_HSPACE:
+ case OP_HSPACE:
+ for (i = min; i < max; i++)
+ {
+ BOOL gotspace;
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ switch(c)
+ {
+ default: gotspace = FALSE; break;
+ case 0x09: /* HT */
+ case 0x20: /* SPACE */
+ case 0xa0: /* NBSP */
+ case 0x1680: /* OGHAM SPACE MARK */
+ case 0x180e: /* MONGOLIAN VOWEL SEPARATOR */
+ case 0x2000: /* EN QUAD */
+ case 0x2001: /* EM QUAD */
+ case 0x2002: /* EN SPACE */
+ case 0x2003: /* EM SPACE */
+ case 0x2004: /* THREE-PER-EM SPACE */
+ case 0x2005: /* FOUR-PER-EM SPACE */
+ case 0x2006: /* SIX-PER-EM SPACE */
+ case 0x2007: /* FIGURE SPACE */
+ case 0x2008: /* PUNCTUATION SPACE */
+ case 0x2009: /* THIN SPACE */
+ case 0x200A: /* HAIR SPACE */
+ case 0x202f: /* NARROW NO-BREAK SPACE */
+ case 0x205f: /* MEDIUM MATHEMATICAL SPACE */
+ case 0x3000: /* IDEOGRAPHIC SPACE */
+ gotspace = TRUE;
+ break;
+ }
+ if (gotspace == (ctype == OP_NOT_HSPACE)) break;
+ eptr += len;
+ }
+ break;
+
+ case OP_NOT_VSPACE:
+ case OP_VSPACE:
+ for (i = min; i < max; i++)
+ {
+ BOOL gotspace;
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ switch(c)
+ {
+ default: gotspace = FALSE; break;
+ case 0x0a: /* LF */
+ case 0x0b: /* VT */
+ case 0x0c: /* FF */
+ case 0x0d: /* CR */
+ case 0x85: /* NEL */
+ case 0x2028: /* LINE SEPARATOR */
+ case 0x2029: /* PARAGRAPH SEPARATOR */
+ gotspace = TRUE;
+ break;
+ }
+ if (gotspace == (ctype == OP_NOT_VSPACE)) break;
+ eptr += len;
+ }
+ break;
+
+ case OP_NOT_DIGIT:
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ if (c < 256 && (md->ctypes[c] & ctype_digit) != 0) break;
+ eptr+= len;
+ }
+ break;
+
+ case OP_DIGIT:
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ if (c >= 256 ||(md->ctypes[c] & ctype_digit) == 0) break;
+ eptr+= len;
+ }
+ break;
+
+ case OP_NOT_WHITESPACE:
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ if (c < 256 && (md->ctypes[c] & ctype_space) != 0) break;
+ eptr+= len;
+ }
+ break;
+
+ case OP_WHITESPACE:
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ if (c >= 256 ||(md->ctypes[c] & ctype_space) == 0) break;
+ eptr+= len;
+ }
+ break;
+
+ case OP_NOT_WORDCHAR:
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ if (c < 256 && (md->ctypes[c] & ctype_word) != 0) break;
+ eptr+= len;
+ }
+ break;
+
+ case OP_WORDCHAR:
+ for (i = min; i < max; i++)
+ {
+ int len = 1;
+ if (eptr >= md->end_subject) break;
+ GETCHARLEN(c, eptr, len);
+ if (c >= 256 || (md->ctypes[c] & ctype_word) == 0) break;
+ eptr+= len;
+ }
+ break;
+
+ default:
+ RRETURN(PCRE_ERROR_INTERNAL);
+ }
+
+ /* eptr is now past the end of the maximum run */
+
+ if (possessive) continue;
+ for(;;)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM46);
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ if (eptr-- == pp) break; /* Stop if tried at original pos */
+ BACKCHAR(eptr);
+ }
+ }
+ else
+#endif /* SUPPORT_UTF8 */
+
+ /* Not UTF-8 mode */
+ {
+ switch(ctype)
+ {
+ case OP_ANY:
+ if ((ims & PCRE_DOTALL) == 0)
+ {
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject || IS_NEWLINE(eptr)) break;
+ eptr++;
+ }
+ break;
+ }
+ /* For DOTALL case, fall through and treat as \C */
+
+ case OP_ANYBYTE:
+ c = max - min;
+ if (c > (unsigned int)(md->end_subject - eptr))
+ c = md->end_subject - eptr;
+ eptr += c;
+ break;
+
+ case OP_ANYNL:
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject) break;
+ c = *eptr;
+ if (c == 0x000d)
+ {
+ if (++eptr >= md->end_subject) break;
+ if (*eptr == 0x000a) eptr++;
+ }
+ else
+ {
+ if (c != 0x000a &&
+ (md->bsr_anycrlf ||
+ (c != 0x000b && c != 0x000c && c != 0x0085)))
+ break;
+ eptr++;
+ }
+ }
+ break;
+
+ case OP_NOT_HSPACE:
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject) break;
+ c = *eptr;
+ if (c == 0x09 || c == 0x20 || c == 0xa0) break;
+ eptr++;
+ }
+ break;
+
+ case OP_HSPACE:
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject) break;
+ c = *eptr;
+ if (c != 0x09 && c != 0x20 && c != 0xa0) break;
+ eptr++;
+ }
+ break;
+
+ case OP_NOT_VSPACE:
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject) break;
+ c = *eptr;
+ if (c == 0x0a || c == 0x0b || c == 0x0c || c == 0x0d || c == 0x85)
+ break;
+ eptr++;
+ }
+ break;
+
+ case OP_VSPACE:
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject) break;
+ c = *eptr;
+ if (c != 0x0a && c != 0x0b && c != 0x0c && c != 0x0d && c != 0x85)
+ break;
+ eptr++;
+ }
+ break;
+
+ case OP_NOT_DIGIT:
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject || (md->ctypes[*eptr] & ctype_digit) != 0)
+ break;
+ eptr++;
+ }
+ break;
+
+ case OP_DIGIT:
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject || (md->ctypes[*eptr] & ctype_digit) == 0)
+ break;
+ eptr++;
+ }
+ break;
+
+ case OP_NOT_WHITESPACE:
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject || (md->ctypes[*eptr] & ctype_space) != 0)
+ break;
+ eptr++;
+ }
+ break;
+
+ case OP_WHITESPACE:
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject || (md->ctypes[*eptr] & ctype_space) == 0)
+ break;
+ eptr++;
+ }
+ break;
+
+ case OP_NOT_WORDCHAR:
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject || (md->ctypes[*eptr] & ctype_word) != 0)
+ break;
+ eptr++;
+ }
+ break;
+
+ case OP_WORDCHAR:
+ for (i = min; i < max; i++)
+ {
+ if (eptr >= md->end_subject || (md->ctypes[*eptr] & ctype_word) == 0)
+ break;
+ eptr++;
+ }
+ break;
+
+ default:
+ RRETURN(PCRE_ERROR_INTERNAL);
+ }
+
+ /* eptr is now past the end of the maximum run */
+
+ if (possessive) continue;
+ while (eptr >= pp)
+ {
+ RMATCH(eptr, ecode, offset_top, md, ims, eptrb, 0, RM47);
+ eptr--;
+ if (rrc != MATCH_NOMATCH) RRETURN(rrc);
+ }
+ }
+
+ /* Get here if we can't make it match with any permitted repetitions */
+
+ RRETURN(MATCH_NOMATCH);
+ }
+ /* Control never gets here */
+
+ /* There's been some horrible disaster. Arrival here can only mean there is
+ something seriously wrong in the code above or the OP_xxx definitions. */
+
+ default:
+ DPRINTF(("Unknown opcode %d\n", *ecode));
+ RRETURN(PCRE_ERROR_UNKNOWN_OPCODE);
+ }
+
+ /* Do not stick any code in here without much thought; it is assumed
+ that "continue" in the code above comes out to here to repeat the main
+ loop. */
+
+ } /* End of main loop */
+/* Control never reaches here */
+
+
+/* When compiling to use the heap rather than the stack for recursive calls to
+match(), the RRETURN() macro jumps here. The number that is saved in
+frame->Xwhere indicates which label we actually want to return to. */
+
+#ifdef NO_RECURSE
+#define LBL(val) case val: goto L_RM##val;
+HEAP_RETURN:
+switch (frame->Xwhere)
+ {
+ LBL( 1) LBL( 2) LBL( 3) LBL( 4) LBL( 5) LBL( 6) LBL( 7) LBL( 8)
+ LBL( 9) LBL(10) LBL(11) LBL(12) LBL(13) LBL(14) LBL(15) LBL(17)
+ LBL(19) LBL(24) LBL(25) LBL(26) LBL(27) LBL(29) LBL(31) LBL(33)
+ LBL(35) LBL(43) LBL(47) LBL(48) LBL(49) LBL(50) LBL(51) LBL(52)
+ LBL(53) LBL(54)
+#ifdef SUPPORT_UTF8
+ LBL(16) LBL(18) LBL(20) LBL(21) LBL(22) LBL(23) LBL(28) LBL(30)
+ LBL(32) LBL(34) LBL(42) LBL(46)
+#ifdef SUPPORT_UCP
+ LBL(36) LBL(37) LBL(38) LBL(39) LBL(40) LBL(41) LBL(44) LBL(45)
+#endif /* SUPPORT_UCP */
+#endif /* SUPPORT_UTF8 */
+ default:
+ DPRINTF(("jump error in pcre match: label %d non-existent\n", frame->Xwhere));
+ return PCRE_ERROR_INTERNAL;
+ }
+#undef LBL
+#endif /* NO_RECURSE */
+}
+
+
+/***************************************************************************
+****************************************************************************
+ RECURSION IN THE match() FUNCTION
+
+Undefine all the macros that were defined above to handle this. */
+
+#ifdef NO_RECURSE
+#undef eptr
+#undef ecode
+#undef mstart
+#undef offset_top
+#undef ims
+#undef eptrb
+#undef flags
+
+#undef callpat
+#undef charptr
+#undef data
+#undef next
+#undef pp
+#undef prev
+#undef saved_eptr
+
+#undef new_recursive
+
+#undef cur_is_word
+#undef condition
+#undef prev_is_word
+
+#undef original_ims
+
+#undef ctype
+#undef length
+#undef max
+#undef min
+#undef number
+#undef offset
+#undef op
+#undef save_capture_last
+#undef save_offset1
+#undef save_offset2
+#undef save_offset3
+#undef stacksave
+
+#undef newptrb
+
+#endif
+
+/* These two are defined as macros in both cases */
+
+#undef fc
+#undef fi
+
+/***************************************************************************
+***************************************************************************/
+
+
+
+/*************************************************
+* Execute a Regular Expression *
+*************************************************/
+
+/* This function applies a compiled re to a subject string and picks out
+portions of the string if it matches. Two elements in the vector are set for
+each substring: the offsets to the start and end of the substring.
+
+Arguments:
+ argument_re points to the compiled expression
+ extra_data points to extra data or is NULL
+ subject points to the subject string
+ length length of subject string (may contain binary zeros)
+ start_offset where to start in the subject string
+ options option bits
+ offsets points to a vector of ints to be filled in with offsets
+ offsetcount the number of elements in the vector
+
+Returns: > 0 => success; value is the number of elements filled in
+ = 0 => success, but offsets is not big enough
+ -1 => failed to match
+ < -1 => some kind of unexpected problem
+*/
+
+PCRE_EXP_DEFN int
+pcre_exec(const pcre *argument_re, const pcre_extra *extra_data,
+ PCRE_SPTR subject, int length, int start_offset, int options, int *offsets,
+ int offsetcount)
+{
+int rc, resetcount, ocount;
+int first_byte = -1;
+int req_byte = -1;
+int req_byte2 = -1;
+int newline;
+unsigned long int ims;
+BOOL using_temporary_offsets = FALSE;
+BOOL anchored;
+BOOL startline;
+BOOL firstline;
+BOOL first_byte_caseless = FALSE;
+BOOL req_byte_caseless = FALSE;
+BOOL utf8;
+match_data match_block;
+match_data *md = &match_block;
+const uschar *tables;
+const uschar *start_bits = NULL;
+USPTR start_match = (USPTR)subject + start_offset;
+USPTR end_subject;
+USPTR req_byte_ptr = start_match - 1;
+
+pcre_study_data internal_study;
+const pcre_study_data *study;
+
+real_pcre internal_re;
+const real_pcre *external_re = (const real_pcre *)argument_re;
+const real_pcre *re = external_re;
+
+/* Plausibility checks */
+
+if ((options & ~PUBLIC_EXEC_OPTIONS) != 0) return PCRE_ERROR_BADOPTION;
+if (re == NULL || subject == NULL ||
+ (offsets == NULL && offsetcount > 0)) return PCRE_ERROR_NULL;
+if (offsetcount < 0) return PCRE_ERROR_BADCOUNT;
+
+/* Fish out the optional data from the extra_data structure, first setting
+the default values. */
+
+study = NULL;
+md->match_limit = MATCH_LIMIT;
+md->match_limit_recursion = MATCH_LIMIT_RECURSION;
+md->callout_data = NULL;
+
+/* The table pointer is always in native byte order. */
+
+tables = external_re->tables;
+
+if (extra_data != NULL)
+ {
+ register unsigned int flags = extra_data->flags;
+ if ((flags & PCRE_EXTRA_STUDY_DATA) != 0)
+ study = (const pcre_study_data *)extra_data->study_data;
+ if ((flags & PCRE_EXTRA_MATCH_LIMIT) != 0)
+ md->match_limit = extra_data->match_limit;
+ if ((flags & PCRE_EXTRA_MATCH_LIMIT_RECURSION) != 0)
+ md->match_limit_recursion = extra_data->match_limit_recursion;
+ if ((flags & PCRE_EXTRA_CALLOUT_DATA) != 0)
+ md->callout_data = extra_data->callout_data;
+ if ((flags & PCRE_EXTRA_TABLES) != 0) tables = extra_data->tables;
+ }
+
+/* If the exec call supplied NULL for tables, use the inbuilt ones. This
+is a feature that makes it possible to save compiled regex and re-use them
+in other programs later. */
+
+if (tables == NULL) tables = _pcre_default_tables;
+
+/* Check that the first field in the block is the magic number. If it is not,
+test for a regex that was compiled on a host of opposite endianness. If this is
+the case, flipped values are put in internal_re and internal_study if there was
+study data too. */
+
+if (re->magic_number != MAGIC_NUMBER)
+ {
+ re = _pcre_try_flipped(re, &internal_re, study, &internal_study);
+ if (re == NULL) return PCRE_ERROR_BADMAGIC;
+ if (study != NULL) study = &internal_study;
+ }
+
+/* Set up other data */
+
+anchored = ((re->options | options) & PCRE_ANCHORED) != 0;
+startline = (re->flags & PCRE_STARTLINE) != 0;
+firstline = (re->options & PCRE_FIRSTLINE) != 0;
+
+/* The code starts after the real_pcre block and the capture name table. */
+
+md->start_code = (const uschar *)external_re + re->name_table_offset +
+ re->name_count * re->name_entry_size;
+
+md->start_subject = (USPTR)subject;
+md->start_offset = start_offset;
+md->end_subject = md->start_subject + length;
+end_subject = md->end_subject;
+
+md->endonly = (re->options & PCRE_DOLLAR_ENDONLY) != 0;
+utf8 = md->utf8 = (re->options & PCRE_UTF8) != 0;
+
+md->notbol = (options & PCRE_NOTBOL) != 0;
+md->noteol = (options & PCRE_NOTEOL) != 0;
+md->notempty = (options & PCRE_NOTEMPTY) != 0;
+md->partial = (options & PCRE_PARTIAL) != 0;
+md->hitend = FALSE;
+
+md->recursive = NULL; /* No recursion at top level */
+
+md->lcc = tables + lcc_offset;
+md->ctypes = tables + ctypes_offset;
+
+/* Handle different \R options. */
+
+switch (options & (PCRE_BSR_ANYCRLF|PCRE_BSR_UNICODE))
+ {
+ case 0:
+ if ((re->options & (PCRE_BSR_ANYCRLF|PCRE_BSR_UNICODE)) != 0)
+ md->bsr_anycrlf = (re->options & PCRE_BSR_ANYCRLF) != 0;
+ else
+#ifdef BSR_ANYCRLF
+ md->bsr_anycrlf = TRUE;
+#else
+ md->bsr_anycrlf = FALSE;
+#endif
+ break;
+
+ case PCRE_BSR_ANYCRLF:
+ md->bsr_anycrlf = TRUE;
+ break;
+
+ case PCRE_BSR_UNICODE:
+ md->bsr_anycrlf = FALSE;
+ break;
+
+ default: return PCRE_ERROR_BADNEWLINE;
+ }
+
+/* Handle different types of newline. The three bits give eight cases. If
+nothing is set at run time, whatever was used at compile time applies. */
+
+switch ((((options & PCRE_NEWLINE_BITS) == 0)? re->options :
+ (pcre_uint32)options) & PCRE_NEWLINE_BITS)
+ {
+ case 0: newline = NEWLINE; break; /* Compile-time default */
+ case PCRE_NEWLINE_CR: newline = '\r'; break;
+ case PCRE_NEWLINE_LF: newline = '\n'; break;
+ case PCRE_NEWLINE_CR+
+ PCRE_NEWLINE_LF: newline = ('\r' << 8) | '\n'; break;
+ case PCRE_NEWLINE_ANY: newline = -1; break;
+ case PCRE_NEWLINE_ANYCRLF: newline = -2; break;
+ default: return PCRE_ERROR_BADNEWLINE;
+ }
+
+if (newline == -2)
+ {
+ md->nltype = NLTYPE_ANYCRLF;
+ }
+else if (newline < 0)
+ {
+ md->nltype = NLTYPE_ANY;
+ }
+else
+ {
+ md->nltype = NLTYPE_FIXED;
+ if (newline > 255)
+ {
+ md->nllen = 2;
+ md->nl[0] = (newline >> 8) & 255;
+ md->nl[1] = newline & 255;
+ }
+ else
+ {
+ md->nllen = 1;
+ md->nl[0] = newline;
+ }
+ }
+
+/* Partial matching is supported only for a restricted set of regexes at the
+moment. */
+
+if (md->partial && (re->flags & PCRE_NOPARTIAL) != 0)
+ return PCRE_ERROR_BADPARTIAL;
+
+/* Check a UTF-8 string if required. Unfortunately there's no way of passing
+back the character offset. */
+
+#ifdef SUPPORT_UTF8
+if (utf8 && (options & PCRE_NO_UTF8_CHECK) == 0)
+ {
+ if (_pcre_valid_utf8((uschar *)subject, length) >= 0)
+ return PCRE_ERROR_BADUTF8;
+ if (start_offset > 0 && start_offset < length)
+ {
+ int tb = ((uschar *)subject)[start_offset];
+ if (tb > 127)
+ {
+ tb &= 0xc0;
+ if (tb != 0 && tb != 0xc0) return PCRE_ERROR_BADUTF8_OFFSET;
+ }
+ }
+ }
+#endif
+
+/* The ims options can vary during the matching as a result of the presence
+of (?ims) items in the pattern. They are kept in a local variable so that
+restoring at the exit of a group is easy. */
+
+ims = re->options & (PCRE_CASELESS|PCRE_MULTILINE|PCRE_DOTALL);
+
+/* If the expression has got more back references than the offsets supplied can
+hold, we get a temporary chunk of working store to use during the matching.
+Otherwise, we can use the vector supplied, rounding down its size to a multiple
+of 3. */
+
+ocount = offsetcount - (offsetcount % 3);
+
+if (re->top_backref > 0 && re->top_backref >= ocount/3)
+ {
+ ocount = re->top_backref * 3 + 3;
+ md->offset_vector = (int *)(pcre_malloc)(ocount * sizeof(int));
+ if (md->offset_vector == NULL) return PCRE_ERROR_NOMEMORY;
+ using_temporary_offsets = TRUE;
+ DPRINTF(("Got memory to hold back references\n"));
+ }
+else md->offset_vector = offsets;
+
+md->offset_end = ocount;
+md->offset_max = (2*ocount)/3;
+md->offset_overflow = FALSE;
+md->capture_last = -1;
+
+/* Compute the minimum number of offsets that we need to reset each time. Doing
+this makes a huge difference to execution time when there aren't many brackets
+in the pattern. */
+
+resetcount = 2 + re->top_bracket * 2;
+if (resetcount > offsetcount) resetcount = ocount;
+
+/* Reset the working variable associated with each extraction. These should
+never be used unless previously set, but they get saved and restored, and so we
+initialize them to avoid reading uninitialized locations. */
+
+if (md->offset_vector != NULL)
+ {
+ register int *iptr = md->offset_vector + ocount;
+ register int *iend = iptr - resetcount/2 + 1;
+ while (--iptr >= iend) *iptr = -1;
+ }
+
+/* Set up the first character to match, if available. The first_byte value is
+never set for an anchored regular expression, but the anchoring may be forced
+at run time, so we have to test for anchoring. The first char may be unset for
+an unanchored pattern, of course. If there's no first char and the pattern was
+studied, there may be a bitmap of possible first characters. */
+
+if (!anchored)
+ {
+ if ((re->flags & PCRE_FIRSTSET) != 0)
+ {
+ first_byte = re->first_byte & 255;
+ if ((first_byte_caseless = ((re->first_byte & REQ_CASELESS) != 0)) == TRUE)
+ first_byte = md->lcc[first_byte];
+ }
+ else
+ if (!startline && study != NULL &&
+ (study->options & PCRE_STUDY_MAPPED) != 0)
+ start_bits = study->start_bits;
+ }
+
+/* For anchored or unanchored matches, there may be a "last known required
+character" set. */
+
+if ((re->flags & PCRE_REQCHSET) != 0)
+ {
+ req_byte = re->req_byte & 255;
+ req_byte_caseless = (re->req_byte & REQ_CASELESS) != 0;
+ req_byte2 = (tables + fcc_offset)[req_byte]; /* case flipped */
+ }
+
+
+/* ==========================================================================*/
+
+/* Loop for handling unanchored repeated matching attempts; for anchored regexs
+the loop runs just once. */
+
+for(;;)
+ {
+ USPTR save_end_subject = end_subject;
+ USPTR new_start_match;
+
+ /* Reset the maximum number of extractions we might see. */
+
+ if (md->offset_vector != NULL)
+ {
+ register int *iptr = md->offset_vector;
+ register int *iend = iptr + resetcount;
+ while (iptr < iend) *iptr++ = -1;
+ }
+
+ /* Advance to a unique first char if possible. If firstline is TRUE, the
+ start of the match is constrained to the first line of a multiline string.
+ That is, the match must be before or at the first newline. Implement this by
+ temporarily adjusting end_subject so that we stop scanning at a newline. If
+ the match fails at the newline, later code breaks this loop. */
+
+ if (firstline)
+ {
+ USPTR t = start_match;
+ while (t < md->end_subject && !IS_NEWLINE(t)) t++;
+ end_subject = t;
+ }
+
+ /* Now test for a unique first byte */
+
+ if (first_byte >= 0)
+ {
+ if (first_byte_caseless)
+ while (start_match < end_subject &&
+ md->lcc[*start_match] != first_byte)
+ start_match++;
+ else
+ while (start_match < end_subject && *start_match != first_byte)
+ start_match++;
+ }
+
+ /* Or to just after a linebreak for a multiline match if possible */
+
+ else if (startline)
+ {
+ if (start_match > md->start_subject + start_offset)
+ {
+ while (start_match <= end_subject && !WAS_NEWLINE(start_match))
+ start_match++;
+
+ /* If we have just passed a CR and the newline option is ANY or ANYCRLF,
+ and we are now at a LF, advance the match position by one more character.
+ */
+
+ if (start_match[-1] == '\r' &&
+ (md->nltype == NLTYPE_ANY || md->nltype == NLTYPE_ANYCRLF) &&
+ start_match < end_subject &&
+ *start_match == '\n')
+ start_match++;
+ }
+ }
+
+ /* Or to a non-unique first char after study */
+
+ else if (start_bits != NULL)
+ {
+ while (start_match < end_subject)
+ {
+ register unsigned int c = *start_match;
+ if ((start_bits[c/8] & (1 << (c&7))) == 0) start_match++; else break;
+ }
+ }
+
+ /* Restore fudged end_subject */
+
+ end_subject = save_end_subject;
+
+#ifdef DEBUG /* Sigh. Some compilers never learn. */
+ printf(">>>> Match against: ");
+ pchars(start_match, end_subject - start_match, TRUE, md);
+ printf("\n");
+#endif
+
+ /* If req_byte is set, we know that that character must appear in the subject
+ for the match to succeed. If the first character is set, req_byte must be
+ later in the subject; otherwise the test starts at the match point. This
+ optimization can save a huge amount of backtracking in patterns with nested
+ unlimited repeats that aren't going to match. Writing separate code for
+ cased/caseless versions makes it go faster, as does using an autoincrement
+ and backing off on a match.
+
+ HOWEVER: when the subject string is very, very long, searching to its end can
+ take a long time, and give bad performance on quite ordinary patterns. This
+ showed up when somebody was matching something like /^\d+C/ on a 32-megabyte
+ string... so we don't do this when the string is sufficiently long.
+
+ ALSO: this processing is disabled when partial matching is requested.
+ */
+
+ if (req_byte >= 0 &&
+ end_subject - start_match < REQ_BYTE_MAX &&
+ !md->partial)
+ {
+ register USPTR p = start_match + ((first_byte >= 0)? 1 : 0);
+
+ /* We don't need to repeat the search if we haven't yet reached the
+ place we found it at last time. */
+
+ if (p > req_byte_ptr)
+ {
+ if (req_byte_caseless)
+ {
+ while (p < end_subject)
+ {
+ register int pp = *p++;
+ if (pp == req_byte || pp == req_byte2) { p--; break; }
+ }
+ }
+ else
+ {
+ while (p < end_subject)
+ {
+ if (*p++ == req_byte) { p--; break; }
+ }
+ }
+
+ /* If we can't find the required character, break the matching loop,
+ forcing a match failure. */
+
+ if (p >= end_subject)
+ {
+ rc = MATCH_NOMATCH;
+ break;
+ }
+
+ /* If we have found the required character, save the point where we
+ found it, so that we don't search again next time round the loop if
+ the start hasn't passed this character yet. */
+
+ req_byte_ptr = p;
+ }
+ }
+
+ /* OK, we can now run the match. */
+
+ md->start_match_ptr = start_match;
+ md->match_call_count = 0;
+ rc = match(start_match, md->start_code, start_match, 2, md, ims, NULL, 0, 0);
+
+ switch(rc)
+ {
+ /* NOMATCH and PRUNE advance by one character. THEN at this level acts
+ exactly like PRUNE. */
+
+ case MATCH_NOMATCH:
+ case MATCH_PRUNE:
+ case MATCH_THEN:
+ new_start_match = start_match + 1;
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ while(new_start_match < end_subject && (*new_start_match & 0xc0) == 0x80)
+ new_start_match++;
+#endif
+ break;
+
+ /* SKIP passes back the next starting point explicitly. */
+
+ case MATCH_SKIP:
+ new_start_match = md->start_match_ptr;
+ break;
+
+ /* COMMIT disables the bumpalong, but otherwise behaves as NOMATCH. */
+
+ case MATCH_COMMIT:
+ rc = MATCH_NOMATCH;
+ goto ENDLOOP;
+
+ /* Any other return is some kind of error. */
+
+ default:
+ goto ENDLOOP;
+ }
+
+ /* Control reaches here for the various types of "no match at this point"
+ result. Reset the code to MATCH_NOMATCH for subsequent checking. */
+
+ rc = MATCH_NOMATCH;
+
+ /* If PCRE_FIRSTLINE is set, the match must happen before or at the first
+ newline in the subject (though it may continue over the newline). Therefore,
+ if we have just failed to match, starting at a newline, do not continue. */
+
+ if (firstline && IS_NEWLINE(start_match)) break;
+
+ /* Advance to new matching position */
+
+ start_match = new_start_match;
+
+ /* Break the loop if the pattern is anchored or if we have passed the end of
+ the subject. */
+
+ if (anchored || start_match > end_subject) break;
+
+ /* If we have just passed a CR and we are now at a LF, and the pattern does
+ not contain any explicit matches for \r or \n, and the newline option is CRLF
+ or ANY or ANYCRLF, advance the match position by one more character. */
+
+ if (start_match[-1] == '\r' &&
+ start_match < end_subject &&
+ *start_match == '\n' &&
+ (re->flags & PCRE_HASCRORLF) == 0 &&
+ (md->nltype == NLTYPE_ANY ||
+ md->nltype == NLTYPE_ANYCRLF ||
+ md->nllen == 2))
+ start_match++;
+
+ } /* End of for(;;) "bumpalong" loop */
+
+/* ==========================================================================*/
+
+/* We reach here when rc is not MATCH_NOMATCH, or if one of the stopping
+conditions is true:
+
+(1) The pattern is anchored or the match was failed by (*COMMIT);
+
+(2) We are past the end of the subject;
+
+(3) PCRE_FIRSTLINE is set and we have failed to match at a newline, because
+ this option requests that a match occur at or before the first newline in
+ the subject.
+
+When we have a match and the offset vector is big enough to deal with any
+backreferences, captured substring offsets will already be set up. In the case
+where we had to get some local store to hold offsets for backreference
+processing, copy those that we can. In this case there need not be overflow if
+certain parts of the pattern were not used, even though there are more
+capturing parentheses than vector slots. */
+
+ENDLOOP:
+
+if (rc == MATCH_MATCH)
+ {
+ if (using_temporary_offsets)
+ {
+ if (offsetcount >= 4)
+ {
+ memcpy(offsets + 2, md->offset_vector + 2,
+ (offsetcount - 2) * sizeof(int));
+ DPRINTF(("Copied offsets from temporary memory\n"));
+ }
+ if (md->end_offset_top > offsetcount) md->offset_overflow = TRUE;
+ DPRINTF(("Freeing temporary memory\n"));
+ (pcre_free)(md->offset_vector);
+ }
+
+ /* Set the return code to the number of captured strings, or 0 if there are
+ too many to fit into the vector. */
+
+ rc = md->offset_overflow? 0 : md->end_offset_top/2;
+
+ /* If there is space, set up the whole thing as substring 0. The value of
+ md->start_match_ptr might be modified if \K was encountered on the success
+ matching path. */
+
+ if (offsetcount < 2) rc = 0; else
+ {
+ offsets[0] = md->start_match_ptr - md->start_subject;
+ offsets[1] = md->end_match_ptr - md->start_subject;
+ }
+
+ DPRINTF((">>>> returning %d\n", rc));
+ return rc;
+ }
+
+/* Control gets here if there has been an error, or if the overall match
+attempt has failed at all permitted starting positions. */
+
+if (using_temporary_offsets)
+ {
+ DPRINTF(("Freeing temporary memory\n"));
+ (pcre_free)(md->offset_vector);
+ }
+
+if (rc != MATCH_NOMATCH)
+ {
+ DPRINTF((">>>> error: returning %d\n", rc));
+ return rc;
+ }
+else if (md->partial && md->hitend)
+ {
+ DPRINTF((">>>> returning PCRE_ERROR_PARTIAL\n"));
+ return PCRE_ERROR_PARTIAL;
+ }
+else
+ {
+ DPRINTF((">>>> returning PCRE_ERROR_NOMATCH\n"));
+ return PCRE_ERROR_NOMATCH;
+ }
+}
+
+/* End of pcre_exec.c */
diff --git a/src/third_party/pcre-7.4/pcre_fullinfo.c b/src/third_party/pcre-7.4/pcre_fullinfo.c
new file mode 100644
index 00000000000..04e31f69adf
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_fullinfo.c
@@ -0,0 +1,165 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/*PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains the external function pcre_fullinfo(), which returns
+information about a compiled pattern. */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+
+/*************************************************
+* Return info about compiled pattern *
+*************************************************/
+
+/* This is a newer "info" function which has an extensible interface so
+that additional items can be added compatibly.
+
+Arguments:
+ argument_re points to compiled code
+ extra_data points extra data, or NULL
+ what what information is required
+ where where to put the information
+
+Returns: 0 if data returned, negative on error
+*/
+
+PCRE_EXP_DEFN int
+pcre_fullinfo(const pcre *argument_re, const pcre_extra *extra_data, int what,
+ void *where)
+{
+real_pcre internal_re;
+pcre_study_data internal_study;
+const real_pcre *re = (const real_pcre *)argument_re;
+const pcre_study_data *study = NULL;
+
+if (re == NULL || where == NULL) return PCRE_ERROR_NULL;
+
+if (extra_data != NULL && (extra_data->flags & PCRE_EXTRA_STUDY_DATA) != 0)
+ study = (const pcre_study_data *)extra_data->study_data;
+
+if (re->magic_number != MAGIC_NUMBER)
+ {
+ re = _pcre_try_flipped(re, &internal_re, study, &internal_study);
+ if (re == NULL) return PCRE_ERROR_BADMAGIC;
+ if (study != NULL) study = &internal_study;
+ }
+
+switch (what)
+ {
+ case PCRE_INFO_OPTIONS:
+ *((unsigned long int *)where) = re->options & PUBLIC_OPTIONS;
+ break;
+
+ case PCRE_INFO_SIZE:
+ *((size_t *)where) = re->size;
+ break;
+
+ case PCRE_INFO_STUDYSIZE:
+ *((size_t *)where) = (study == NULL)? 0 : study->size;
+ break;
+
+ case PCRE_INFO_CAPTURECOUNT:
+ *((int *)where) = re->top_bracket;
+ break;
+
+ case PCRE_INFO_BACKREFMAX:
+ *((int *)where) = re->top_backref;
+ break;
+
+ case PCRE_INFO_FIRSTBYTE:
+ *((int *)where) =
+ ((re->flags & PCRE_FIRSTSET) != 0)? re->first_byte :
+ ((re->flags & PCRE_STARTLINE) != 0)? -1 : -2;
+ break;
+
+ /* Make sure we pass back the pointer to the bit vector in the external
+ block, not the internal copy (with flipped integer fields). */
+
+ case PCRE_INFO_FIRSTTABLE:
+ *((const uschar **)where) =
+ (study != NULL && (study->options & PCRE_STUDY_MAPPED) != 0)?
+ ((const pcre_study_data *)extra_data->study_data)->start_bits : NULL;
+ break;
+
+ case PCRE_INFO_LASTLITERAL:
+ *((int *)where) =
+ ((re->flags & PCRE_REQCHSET) != 0)? re->req_byte : -1;
+ break;
+
+ case PCRE_INFO_NAMEENTRYSIZE:
+ *((int *)where) = re->name_entry_size;
+ break;
+
+ case PCRE_INFO_NAMECOUNT:
+ *((int *)where) = re->name_count;
+ break;
+
+ case PCRE_INFO_NAMETABLE:
+ *((const uschar **)where) = (const uschar *)re + re->name_table_offset;
+ break;
+
+ case PCRE_INFO_DEFAULT_TABLES:
+ *((const uschar **)where) = (const uschar *)(_pcre_default_tables);
+ break;
+
+ case PCRE_INFO_OKPARTIAL:
+ *((int *)where) = (re->flags & PCRE_NOPARTIAL) == 0;
+ break;
+
+ case PCRE_INFO_JCHANGED:
+ *((int *)where) = (re->flags & PCRE_JCHANGED) != 0;
+ break;
+
+ case PCRE_INFO_HASCRORLF:
+ *((int *)where) = (re->flags & PCRE_HASCRORLF) != 0;
+ break;
+
+ default: return PCRE_ERROR_BADOPTION;
+ }
+
+return 0;
+}
+
+/* End of pcre_fullinfo.c */
diff --git a/src/third_party/pcre-7.4/pcre_get.c b/src/third_party/pcre-7.4/pcre_get.c
new file mode 100644
index 00000000000..fc283c88e1a
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_get.c
@@ -0,0 +1,465 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains some convenience functions for extracting substrings
+from the subject string after a regex match has succeeded. The original idea
+for these functions came from Scott Wimer. */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+
+/*************************************************
+* Find number for named string *
+*************************************************/
+
+/* This function is used by the get_first_set() function below, as well
+as being generally available. It assumes that names are unique.
+
+Arguments:
+ code the compiled regex
+ stringname the name whose number is required
+
+Returns: the number of the named parentheses, or a negative number
+ (PCRE_ERROR_NOSUBSTRING) if not found
+*/
+
+int
+pcre_get_stringnumber(const pcre *code, const char *stringname)
+{
+int rc;
+int entrysize;
+int top, bot;
+uschar *nametable;
+
+if ((rc = pcre_fullinfo(code, NULL, PCRE_INFO_NAMECOUNT, &top)) != 0)
+ return rc;
+if (top <= 0) return PCRE_ERROR_NOSUBSTRING;
+
+if ((rc = pcre_fullinfo(code, NULL, PCRE_INFO_NAMEENTRYSIZE, &entrysize)) != 0)
+ return rc;
+if ((rc = pcre_fullinfo(code, NULL, PCRE_INFO_NAMETABLE, &nametable)) != 0)
+ return rc;
+
+bot = 0;
+while (top > bot)
+ {
+ int mid = (top + bot) / 2;
+ uschar *entry = nametable + entrysize*mid;
+ int c = strcmp(stringname, (char *)(entry + 2));
+ if (c == 0) return (entry[0] << 8) + entry[1];
+ if (c > 0) bot = mid + 1; else top = mid;
+ }
+
+return PCRE_ERROR_NOSUBSTRING;
+}
+
+
+
+/*************************************************
+* Find (multiple) entries for named string *
+*************************************************/
+
+/* This is used by the get_first_set() function below, as well as being
+generally available. It is used when duplicated names are permitted.
+
+Arguments:
+ code the compiled regex
+ stringname the name whose entries required
+ firstptr where to put the pointer to the first entry
+ lastptr where to put the pointer to the last entry
+
+Returns: the length of each entry, or a negative number
+ (PCRE_ERROR_NOSUBSTRING) if not found
+*/
+
+int
+pcre_get_stringtable_entries(const pcre *code, const char *stringname,
+ char **firstptr, char **lastptr)
+{
+int rc;
+int entrysize;
+int top, bot;
+uschar *nametable, *lastentry;
+
+if ((rc = pcre_fullinfo(code, NULL, PCRE_INFO_NAMECOUNT, &top)) != 0)
+ return rc;
+if (top <= 0) return PCRE_ERROR_NOSUBSTRING;
+
+if ((rc = pcre_fullinfo(code, NULL, PCRE_INFO_NAMEENTRYSIZE, &entrysize)) != 0)
+ return rc;
+if ((rc = pcre_fullinfo(code, NULL, PCRE_INFO_NAMETABLE, &nametable)) != 0)
+ return rc;
+
+lastentry = nametable + entrysize * (top - 1);
+bot = 0;
+while (top > bot)
+ {
+ int mid = (top + bot) / 2;
+ uschar *entry = nametable + entrysize*mid;
+ int c = strcmp(stringname, (char *)(entry + 2));
+ if (c == 0)
+ {
+ uschar *first = entry;
+ uschar *last = entry;
+ while (first > nametable)
+ {
+ if (strcmp(stringname, (char *)(first - entrysize + 2)) != 0) break;
+ first -= entrysize;
+ }
+ while (last < lastentry)
+ {
+ if (strcmp(stringname, (char *)(last + entrysize + 2)) != 0) break;
+ last += entrysize;
+ }
+ *firstptr = (char *)first;
+ *lastptr = (char *)last;
+ return entrysize;
+ }
+ if (c > 0) bot = mid + 1; else top = mid;
+ }
+
+return PCRE_ERROR_NOSUBSTRING;
+}
+
+
+
+/*************************************************
+* Find first set of multiple named strings *
+*************************************************/
+
+/* This function allows for duplicate names in the table of named substrings.
+It returns the number of the first one that was set in a pattern match.
+
+Arguments:
+ code the compiled regex
+ stringname the name of the capturing substring
+ ovector the vector of matched substrings
+
+Returns: the number of the first that is set,
+ or the number of the last one if none are set,
+ or a negative number on error
+*/
+
+static int
+get_first_set(const pcre *code, const char *stringname, int *ovector)
+{
+const real_pcre *re = (const real_pcre *)code;
+int entrysize;
+char *first, *last;
+uschar *entry;
+if ((re->options & PCRE_DUPNAMES) == 0 && (re->flags & PCRE_JCHANGED) == 0)
+ return pcre_get_stringnumber(code, stringname);
+entrysize = pcre_get_stringtable_entries(code, stringname, &first, &last);
+if (entrysize <= 0) return entrysize;
+for (entry = (uschar *)first; entry <= (uschar *)last; entry += entrysize)
+ {
+ int n = (entry[0] << 8) + entry[1];
+ if (ovector[n*2] >= 0) return n;
+ }
+return (first[0] << 8) + first[1];
+}
+
+
+
+
+/*************************************************
+* Copy captured string to given buffer *
+*************************************************/
+
+/* This function copies a single captured substring into a given buffer.
+Note that we use memcpy() rather than strncpy() in case there are binary zeros
+in the string.
+
+Arguments:
+ subject the subject string that was matched
+ ovector pointer to the offsets table
+ stringcount the number of substrings that were captured
+ (i.e. the yield of the pcre_exec call, unless
+ that was zero, in which case it should be 1/3
+ of the offset table size)
+ stringnumber the number of the required substring
+ buffer where to put the substring
+ size the size of the buffer
+
+Returns: if successful:
+ the length of the copied string, not including the zero
+ that is put on the end; can be zero
+ if not successful:
+ PCRE_ERROR_NOMEMORY (-6) buffer too small
+ PCRE_ERROR_NOSUBSTRING (-7) no such captured substring
+*/
+
+int
+pcre_copy_substring(const char *subject, int *ovector, int stringcount,
+ int stringnumber, char *buffer, int size)
+{
+int yield;
+if (stringnumber < 0 || stringnumber >= stringcount)
+ return PCRE_ERROR_NOSUBSTRING;
+stringnumber *= 2;
+yield = ovector[stringnumber+1] - ovector[stringnumber];
+if (size < yield + 1) return PCRE_ERROR_NOMEMORY;
+memcpy(buffer, subject + ovector[stringnumber], yield);
+buffer[yield] = 0;
+return yield;
+}
+
+
+
+/*************************************************
+* Copy named captured string to given buffer *
+*************************************************/
+
+/* This function copies a single captured substring into a given buffer,
+identifying it by name. If the regex permits duplicate names, the first
+substring that is set is chosen.
+
+Arguments:
+ code the compiled regex
+ subject the subject string that was matched
+ ovector pointer to the offsets table
+ stringcount the number of substrings that were captured
+ (i.e. the yield of the pcre_exec call, unless
+ that was zero, in which case it should be 1/3
+ of the offset table size)
+ stringname the name of the required substring
+ buffer where to put the substring
+ size the size of the buffer
+
+Returns: if successful:
+ the length of the copied string, not including the zero
+ that is put on the end; can be zero
+ if not successful:
+ PCRE_ERROR_NOMEMORY (-6) buffer too small
+ PCRE_ERROR_NOSUBSTRING (-7) no such captured substring
+*/
+
+int
+pcre_copy_named_substring(const pcre *code, const char *subject, int *ovector,
+ int stringcount, const char *stringname, char *buffer, int size)
+{
+int n = get_first_set(code, stringname, ovector);
+if (n <= 0) return n;
+return pcre_copy_substring(subject, ovector, stringcount, n, buffer, size);
+}
+
+
+
+/*************************************************
+* Copy all captured strings to new store *
+*************************************************/
+
+/* This function gets one chunk of store and builds a list of pointers and all
+of the captured substrings in it. A NULL pointer is put on the end of the list.
+
+Arguments:
+ subject the subject string that was matched
+ ovector pointer to the offsets table
+ stringcount the number of substrings that were captured
+ (i.e. the yield of the pcre_exec call, unless
+ that was zero, in which case it should be 1/3
+ of the offset table size)
+ listptr set to point to the list of pointers
+
+Returns: if successful: 0
+ if not successful:
+ PCRE_ERROR_NOMEMORY (-6) failed to get store
+*/
+
+int
+pcre_get_substring_list(const char *subject, int *ovector, int stringcount,
+ const char ***listptr)
+{
+int i;
+int size = sizeof(char *);
+int double_count = stringcount * 2;
+char **stringlist;
+char *p;
+
+for (i = 0; i < double_count; i += 2)
+ size += sizeof(char *) + ovector[i+1] - ovector[i] + 1;
+
+stringlist = (char **)(pcre_malloc)(size);
+if (stringlist == NULL) return PCRE_ERROR_NOMEMORY;
+
+*listptr = (const char **)stringlist;
+p = (char *)(stringlist + stringcount + 1);
+
+for (i = 0; i < double_count; i += 2)
+ {
+ int len = ovector[i+1] - ovector[i];
+ memcpy(p, subject + ovector[i], len);
+ *stringlist++ = p;
+ p += len;
+ *p++ = 0;
+ }
+
+*stringlist = NULL;
+return 0;
+}
+
+
+
+/*************************************************
+* Free store obtained by get_substring_list *
+*************************************************/
+
+/* This function exists for the benefit of people calling PCRE from non-C
+programs that can call its functions, but not free() or (pcre_free)() directly.
+
+Argument: the result of a previous pcre_get_substring_list()
+Returns: nothing
+*/
+
+void
+pcre_free_substring_list(const char **pointer)
+{
+(pcre_free)((void *)pointer);
+}
+
+
+
+/*************************************************
+* Copy captured string to new store *
+*************************************************/
+
+/* This function copies a single captured substring into a piece of new
+store
+
+Arguments:
+ subject the subject string that was matched
+ ovector pointer to the offsets table
+ stringcount the number of substrings that were captured
+ (i.e. the yield of the pcre_exec call, unless
+ that was zero, in which case it should be 1/3
+ of the offset table size)
+ stringnumber the number of the required substring
+ stringptr where to put a pointer to the substring
+
+Returns: if successful:
+ the length of the string, not including the zero that
+ is put on the end; can be zero
+ if not successful:
+ PCRE_ERROR_NOMEMORY (-6) failed to get store
+ PCRE_ERROR_NOSUBSTRING (-7) substring not present
+*/
+
+int
+pcre_get_substring(const char *subject, int *ovector, int stringcount,
+ int stringnumber, const char **stringptr)
+{
+int yield;
+char *substring;
+if (stringnumber < 0 || stringnumber >= stringcount)
+ return PCRE_ERROR_NOSUBSTRING;
+stringnumber *= 2;
+yield = ovector[stringnumber+1] - ovector[stringnumber];
+substring = (char *)(pcre_malloc)(yield + 1);
+if (substring == NULL) return PCRE_ERROR_NOMEMORY;
+memcpy(substring, subject + ovector[stringnumber], yield);
+substring[yield] = 0;
+*stringptr = substring;
+return yield;
+}
+
+
+
+/*************************************************
+* Copy named captured string to new store *
+*************************************************/
+
+/* This function copies a single captured substring, identified by name, into
+new store. If the regex permits duplicate names, the first substring that is
+set is chosen.
+
+Arguments:
+ code the compiled regex
+ subject the subject string that was matched
+ ovector pointer to the offsets table
+ stringcount the number of substrings that were captured
+ (i.e. the yield of the pcre_exec call, unless
+ that was zero, in which case it should be 1/3
+ of the offset table size)
+ stringname the name of the required substring
+ stringptr where to put the pointer
+
+Returns: if successful:
+ the length of the copied string, not including the zero
+ that is put on the end; can be zero
+ if not successful:
+ PCRE_ERROR_NOMEMORY (-6) couldn't get memory
+ PCRE_ERROR_NOSUBSTRING (-7) no such captured substring
+*/
+
+int
+pcre_get_named_substring(const pcre *code, const char *subject, int *ovector,
+ int stringcount, const char *stringname, const char **stringptr)
+{
+int n = get_first_set(code, stringname, ovector);
+if (n <= 0) return n;
+return pcre_get_substring(subject, ovector, stringcount, n, stringptr);
+}
+
+
+
+
+/*************************************************
+* Free store obtained by get_substring *
+*************************************************/
+
+/* This function exists for the benefit of people calling PCRE from non-C
+programs that can call its functions, but not free() or (pcre_free)() directly.
+
+Argument: the result of a previous pcre_get_substring()
+Returns: nothing
+*/
+
+void
+pcre_free_substring(const char *pointer)
+{
+(pcre_free)((void *)pointer);
+}
+
+/* End of pcre_get.c */
diff --git a/src/third_party/pcre-7.4/pcre_globals.c b/src/third_party/pcre-7.4/pcre_globals.c
new file mode 100644
index 00000000000..4794819148a
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_globals.c
@@ -0,0 +1,63 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains global variables that are exported by the PCRE library.
+PCRE is thread-clean and doesn't use any global variables in the normal sense.
+However, it calls memory allocation and freeing functions via the four
+indirections below, and it can optionally do callouts, using the fifth
+indirection. These values can be changed by the caller, but are shared between
+all threads. However, when compiling for Virtual Pascal, things are done
+differently, and global variables are not used (see pcre.in). */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+#ifndef VPCOMPAT
+PCRE_EXP_DATA_DEFN void *(*pcre_malloc)(size_t) = malloc;
+PCRE_EXP_DATA_DEFN void (*pcre_free)(void *) = free;
+PCRE_EXP_DATA_DEFN void *(*pcre_stack_malloc)(size_t) = malloc;
+PCRE_EXP_DATA_DEFN void (*pcre_stack_free)(void *) = free;
+PCRE_EXP_DATA_DEFN int (*pcre_callout)(pcre_callout_block *) = NULL;
+#endif
+
+/* End of pcre_globals.c */
diff --git a/src/third_party/pcre-7.4/pcre_info.c b/src/third_party/pcre-7.4/pcre_info.c
new file mode 100644
index 00000000000..9bcccbcab37
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_info.c
@@ -0,0 +1,93 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains the external function pcre_info(), which gives some
+information about a compiled pattern. However, use of this function is now
+deprecated, as it has been superseded by pcre_fullinfo(). */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+
+/*************************************************
+* (Obsolete) Return info about compiled pattern *
+*************************************************/
+
+/* This is the original "info" function. It picks potentially useful data out
+of the private structure, but its interface was too rigid. It remains for
+backwards compatibility. The public options are passed back in an int - though
+the re->options field has been expanded to a long int, all the public options
+at the low end of it, and so even on 16-bit systems this will still be OK.
+Therefore, I haven't changed the API for pcre_info().
+
+Arguments:
+ argument_re points to compiled code
+ optptr where to pass back the options
+ first_byte where to pass back the first character,
+ or -1 if multiline and all branches start ^,
+ or -2 otherwise
+
+Returns: number of capturing subpatterns
+ or negative values on error
+*/
+
+PCRE_EXP_DEFN int
+pcre_info(const pcre *argument_re, int *optptr, int *first_byte)
+{
+real_pcre internal_re;
+const real_pcre *re = (const real_pcre *)argument_re;
+if (re == NULL) return PCRE_ERROR_NULL;
+if (re->magic_number != MAGIC_NUMBER)
+ {
+ re = _pcre_try_flipped(re, &internal_re, NULL, NULL);
+ if (re == NULL) return PCRE_ERROR_BADMAGIC;
+ }
+if (optptr != NULL) *optptr = (int)(re->options & PUBLIC_OPTIONS);
+if (first_byte != NULL)
+ *first_byte = ((re->flags & PCRE_FIRSTSET) != 0)? re->first_byte :
+ ((re->flags & PCRE_STARTLINE) != 0)? -1 : -2;
+return re->top_bracket;
+}
+
+/* End of pcre_info.c */
diff --git a/src/third_party/pcre-7.4/pcre_internal.h b/src/third_party/pcre-7.4/pcre_internal.h
new file mode 100644
index 00000000000..5fbb3443333
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_internal.h
@@ -0,0 +1,1117 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+/* This header contains definitions that are shared between the different
+modules, but which are not relevant to the exported API. This includes some
+functions whose names all begin with "_pcre_". */
+
+#ifndef PCRE_INTERNAL_H
+#define PCRE_INTERNAL_H
+
+/* Define DEBUG to get debugging output on stdout. */
+
+#if 0
+#define DEBUG
+#endif
+
+/* Use a macro for debugging printing, 'cause that eliminates the use of #ifdef
+inline, and there are *still* stupid compilers about that don't like indented
+pre-processor statements, or at least there were when I first wrote this. After
+all, it had only been about 10 years then...
+
+It turns out that the Mac Debugging.h header also defines the macro DPRINTF, so
+be absolutely sure we get our version. */
+
+#undef DPRINTF
+#ifdef DEBUG
+#define DPRINTF(p) printf p
+#else
+#define DPRINTF(p) /* Nothing */
+#endif
+
+
+/* Standard C headers plus the external interface definition. The only time
+setjmp and stdarg are used is when NO_RECURSE is set. */
+
+#include <ctype.h>
+#include <limits.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* When compiling a DLL for Windows, the exported symbols have to be declared
+using some MS magic. I found some useful information on this web page:
+http://msdn2.microsoft.com/en-us/library/y4h7bcy6(VS.80).aspx. According to the
+information there, using __declspec(dllexport) without "extern" we have a
+definition; with "extern" we have a declaration. The settings here override the
+setting in pcre.h (which is included below); it defines only PCRE_EXP_DECL,
+which is all that is needed for applications (they just import the symbols). We
+use:
+
+ PCRE_EXP_DECL for declarations
+ PCRE_EXP_DEFN for definitions of exported functions
+ PCRE_EXP_DATA_DEFN for definitions of exported variables
+
+The reason for the two DEFN macros is that in non-Windows environments, one
+does not want to have "extern" before variable definitions because it leads to
+compiler warnings. So we distinguish between functions and variables. In
+Windows, the two should always be the same.
+
+The reason for wrapping this in #ifndef PCRE_EXP_DECL is so that pcretest,
+which is an application, but needs to import this file in order to "peek" at
+internals, can #include pcre.h first to get an application's-eye view.
+
+In principle, people compiling for non-Windows, non-Unix-like (i.e. uncommon,
+special-purpose environments) might want to stick other stuff in front of
+exported symbols. That's why, in the non-Windows case, we set PCRE_EXP_DEFN and
+PCRE_EXP_DATA_DEFN only if they are not already set. */
+
+#ifndef PCRE_EXP_DECL
+/*# ifdef _WIN32
+# ifndef PCRE_STATIC
+# define PCRE_EXP_DECL extern __declspec(dllexport)
+# define PCRE_EXP_DEFN __declspec(dllexport)
+# define PCRE_EXP_DATA_DEFN __declspec(dllexport)
+# else
+# define PCRE_EXP_DECL extern
+# define PCRE_EXP_DEFN
+# define PCRE_EXP_DATA_DEFN
+# endif
+# else*/
+# ifdef __cplusplus
+# define PCRE_EXP_DECL extern "C"
+# else
+# define PCRE_EXP_DECL extern
+# endif
+# ifndef PCRE_EXP_DEFN
+# define PCRE_EXP_DEFN PCRE_EXP_DECL
+# endif
+# ifndef PCRE_EXP_DATA_DEFN
+# define PCRE_EXP_DATA_DEFN
+# endif
+# endif
+/*#endif*/
+
+/* We need to have types that specify unsigned 16-bit and 32-bit integers. We
+cannot determine these outside the compilation (e.g. by running a program as
+part of "configure") because PCRE is often cross-compiled for use on other
+systems. Instead we make use of the maximum sizes that are available at
+preprocessor time in standard C environments. */
+
+#if USHRT_MAX == 65535
+ typedef unsigned short pcre_uint16;
+#elif UINT_MAX == 65535
+ typedef unsigned int pcre_uint16;
+#else
+ #error Cannot determine a type for 16-bit unsigned integers
+#endif
+
+#if UINT_MAX == 4294967295
+ typedef unsigned int pcre_uint32;
+#elif ULONG_MAX == 4294967295
+ typedef unsigned long int pcre_uint32;
+#else
+ #error Cannot determine a type for 32-bit unsigned integers
+#endif
+
+/* All character handling must be done as unsigned characters. Otherwise there
+are problems with top-bit-set characters and functions such as isspace().
+However, we leave the interface to the outside world as char *, because that
+should make things easier for callers. We define a short type for unsigned char
+to save lots of typing. I tried "uchar", but it causes problems on Digital
+Unix, where it is defined in sys/types, so use "uschar" instead. */
+
+typedef unsigned char uschar;
+
+/* This is an unsigned int value that no character can ever have. UTF-8
+characters only go up to 0x7fffffff (though Unicode doesn't go beyond
+0x0010ffff). */
+
+#define NOTACHAR 0xffffffff
+
+/* PCRE is able to support several different kinds of newline (CR, LF, CRLF,
+"any" and "anycrlf" at present). The following macros are used to package up
+testing for newlines. NLBLOCK, PSSTART, and PSEND are defined in the various
+modules to indicate in which datablock the parameters exist, and what the
+start/end of string field names are. */
+
+#define NLTYPE_FIXED 0 /* Newline is a fixed length string */
+#define NLTYPE_ANY 1 /* Newline is any Unicode line ending */
+#define NLTYPE_ANYCRLF 2 /* Newline is CR, LF, or CRLF */
+
+/* This macro checks for a newline at the given position */
+
+#define IS_NEWLINE(p) \
+ ((NLBLOCK->nltype != NLTYPE_FIXED)? \
+ ((p) < NLBLOCK->PSEND && \
+ _pcre_is_newline((p), NLBLOCK->nltype, NLBLOCK->PSEND, &(NLBLOCK->nllen),\
+ utf8)) \
+ : \
+ ((p) <= NLBLOCK->PSEND - NLBLOCK->nllen && \
+ (p)[0] == NLBLOCK->nl[0] && \
+ (NLBLOCK->nllen == 1 || (p)[1] == NLBLOCK->nl[1]) \
+ ) \
+ )
+
+/* This macro checks for a newline immediately preceding the given position */
+
+#define WAS_NEWLINE(p) \
+ ((NLBLOCK->nltype != NLTYPE_FIXED)? \
+ ((p) > NLBLOCK->PSSTART && \
+ _pcre_was_newline((p), NLBLOCK->nltype, NLBLOCK->PSSTART, \
+ &(NLBLOCK->nllen), utf8)) \
+ : \
+ ((p) >= NLBLOCK->PSSTART + NLBLOCK->nllen && \
+ (p)[-NLBLOCK->nllen] == NLBLOCK->nl[0] && \
+ (NLBLOCK->nllen == 1 || (p)[-NLBLOCK->nllen+1] == NLBLOCK->nl[1]) \
+ ) \
+ )
+
+/* When PCRE is compiled as a C++ library, the subject pointer can be replaced
+with a custom type. This makes it possible, for example, to allow pcre_exec()
+to process subject strings that are discontinuous by using a smart pointer
+class. It must always be possible to inspect all of the subject string in
+pcre_exec() because of the way it backtracks. Two macros are required in the
+normal case, for sign-unspecified and unsigned char pointers. The former is
+used for the external interface and appears in pcre.h, which is why its name
+must begin with PCRE_. */
+
+#ifdef CUSTOM_SUBJECT_PTR
+#define PCRE_SPTR CUSTOM_SUBJECT_PTR
+#define USPTR CUSTOM_SUBJECT_PTR
+#else
+#define PCRE_SPTR const char *
+#define USPTR const unsigned char *
+#endif
+
+
+
+/* Include the public PCRE header and the definitions of UCP character property
+values. */
+
+#include "pcre.h"
+#include "ucp.h"
+
+/* When compiling for use with the Virtual Pascal compiler, these functions
+need to have their names changed. PCRE must be compiled with the -DVPCOMPAT
+option on the command line. */
+
+#ifdef VPCOMPAT
+#define strlen(s) _strlen(s)
+#define strncmp(s1,s2,m) _strncmp(s1,s2,m)
+#define memcmp(s,c,n) _memcmp(s,c,n)
+#define memcpy(d,s,n) _memcpy(d,s,n)
+#define memmove(d,s,n) _memmove(d,s,n)
+#define memset(s,c,n) _memset(s,c,n)
+#else /* VPCOMPAT */
+
+/* To cope with SunOS4 and other systems that lack memmove() but have bcopy(),
+define a macro for memmove() if HAVE_MEMMOVE is false, provided that HAVE_BCOPY
+is set. Otherwise, include an emulating function for those systems that have
+neither (there some non-Unix environments where this is the case). */
+
+#ifndef HAVE_MEMMOVE
+#undef memmove /* some systems may have a macro */
+#ifdef HAVE_BCOPY
+#define memmove(a, b, c) bcopy(b, a, c)
+#else /* HAVE_BCOPY */
+static void *
+pcre_memmove(void *d, const void *s, size_t n)
+{
+size_t i;
+unsigned char *dest = (unsigned char *)d;
+const unsigned char *src = (const unsigned char *)s;
+if (dest > src)
+ {
+ dest += n;
+ src += n;
+ for (i = 0; i < n; ++i) *(--dest) = *(--src);
+ return (void *)dest;
+ }
+else
+ {
+ for (i = 0; i < n; ++i) *dest++ = *src++;
+ return (void *)(dest - n);
+ }
+}
+#define memmove(a, b, c) pcre_memmove(a, b, c)
+#endif /* not HAVE_BCOPY */
+#endif /* not HAVE_MEMMOVE */
+#endif /* not VPCOMPAT */
+
+
+/* PCRE keeps offsets in its compiled code as 2-byte quantities (always stored
+in big-endian order) by default. These are used, for example, to link from the
+start of a subpattern to its alternatives and its end. The use of 2 bytes per
+offset limits the size of the compiled regex to around 64K, which is big enough
+for almost everybody. However, I received a request for an even bigger limit.
+For this reason, and also to make the code easier to maintain, the storing and
+loading of offsets from the byte string is now handled by the macros that are
+defined here.
+
+The macros are controlled by the value of LINK_SIZE. This defaults to 2 in
+the config.h file, but can be overridden by using -D on the command line. This
+is automated on Unix systems via the "configure" command. */
+
+#if LINK_SIZE == 2
+
+#define PUT(a,n,d) \
+ (a[n] = (d) >> 8), \
+ (a[(n)+1] = (d) & 255)
+
+#define GET(a,n) \
+ (((a)[n] << 8) | (a)[(n)+1])
+
+#define MAX_PATTERN_SIZE (1 << 16)
+
+
+#elif LINK_SIZE == 3
+
+#define PUT(a,n,d) \
+ (a[n] = (d) >> 16), \
+ (a[(n)+1] = (d) >> 8), \
+ (a[(n)+2] = (d) & 255)
+
+#define GET(a,n) \
+ (((a)[n] << 16) | ((a)[(n)+1] << 8) | (a)[(n)+2])
+
+#define MAX_PATTERN_SIZE (1 << 24)
+
+
+#elif LINK_SIZE == 4
+
+#define PUT(a,n,d) \
+ (a[n] = (d) >> 24), \
+ (a[(n)+1] = (d) >> 16), \
+ (a[(n)+2] = (d) >> 8), \
+ (a[(n)+3] = (d) & 255)
+
+#define GET(a,n) \
+ (((a)[n] << 24) | ((a)[(n)+1] << 16) | ((a)[(n)+2] << 8) | (a)[(n)+3])
+
+#define MAX_PATTERN_SIZE (1 << 30) /* Keep it positive */
+
+
+#else
+#error LINK_SIZE must be either 2, 3, or 4
+#endif
+
+
+/* Convenience macro defined in terms of the others */
+
+#define PUTINC(a,n,d) PUT(a,n,d), a += LINK_SIZE
+
+
+/* PCRE uses some other 2-byte quantities that do not change when the size of
+offsets changes. There are used for repeat counts and for other things such as
+capturing parenthesis numbers in back references. */
+
+#define PUT2(a,n,d) \
+ a[n] = (d) >> 8; \
+ a[(n)+1] = (d) & 255
+
+#define GET2(a,n) \
+ (((a)[n] << 8) | (a)[(n)+1])
+
+#define PUT2INC(a,n,d) PUT2(a,n,d), a += 2
+
+
+/* When UTF-8 encoding is being used, a character is no longer just a single
+byte. The macros for character handling generate simple sequences when used in
+byte-mode, and more complicated ones for UTF-8 characters. BACKCHAR should
+never be called in byte mode. To make sure it can never even appear when UTF-8
+support is omitted, we don't even define it. */
+
+#ifndef SUPPORT_UTF8
+#define GETCHAR(c, eptr) c = *eptr;
+#define GETCHARTEST(c, eptr) c = *eptr;
+#define GETCHARINC(c, eptr) c = *eptr++;
+#define GETCHARINCTEST(c, eptr) c = *eptr++;
+#define GETCHARLEN(c, eptr, len) c = *eptr;
+/* #define BACKCHAR(eptr) */
+
+#else /* SUPPORT_UTF8 */
+
+/* Get the next UTF-8 character, not advancing the pointer. This is called when
+we know we are in UTF-8 mode. */
+
+#define GETCHAR(c, eptr) \
+ c = *eptr; \
+ if (c >= 0xc0) \
+ { \
+ int gcii; \
+ int gcaa = _pcre_utf8_table4[c & 0x3f]; /* Number of additional bytes */ \
+ int gcss = 6*gcaa; \
+ c = (c & _pcre_utf8_table3[gcaa]) << gcss; \
+ for (gcii = 1; gcii <= gcaa; gcii++) \
+ { \
+ gcss -= 6; \
+ c |= (eptr[gcii] & 0x3f) << gcss; \
+ } \
+ }
+
+/* Get the next UTF-8 character, testing for UTF-8 mode, and not advancing the
+pointer. */
+
+#define GETCHARTEST(c, eptr) \
+ c = *eptr; \
+ if (utf8 && c >= 0xc0) \
+ { \
+ int gcii; \
+ int gcaa = _pcre_utf8_table4[c & 0x3f]; /* Number of additional bytes */ \
+ int gcss = 6*gcaa; \
+ c = (c & _pcre_utf8_table3[gcaa]) << gcss; \
+ for (gcii = 1; gcii <= gcaa; gcii++) \
+ { \
+ gcss -= 6; \
+ c |= (eptr[gcii] & 0x3f) << gcss; \
+ } \
+ }
+
+/* Get the next UTF-8 character, advancing the pointer. This is called when we
+know we are in UTF-8 mode. */
+
+#define GETCHARINC(c, eptr) \
+ c = *eptr++; \
+ if (c >= 0xc0) \
+ { \
+ int gcaa = _pcre_utf8_table4[c & 0x3f]; /* Number of additional bytes */ \
+ int gcss = 6*gcaa; \
+ c = (c & _pcre_utf8_table3[gcaa]) << gcss; \
+ while (gcaa-- > 0) \
+ { \
+ gcss -= 6; \
+ c |= (*eptr++ & 0x3f) << gcss; \
+ } \
+ }
+
+/* Get the next character, testing for UTF-8 mode, and advancing the pointer */
+
+#define GETCHARINCTEST(c, eptr) \
+ c = *eptr++; \
+ if (utf8 && c >= 0xc0) \
+ { \
+ int gcaa = _pcre_utf8_table4[c & 0x3f]; /* Number of additional bytes */ \
+ int gcss = 6*gcaa; \
+ c = (c & _pcre_utf8_table3[gcaa]) << gcss; \
+ while (gcaa-- > 0) \
+ { \
+ gcss -= 6; \
+ c |= (*eptr++ & 0x3f) << gcss; \
+ } \
+ }
+
+/* Get the next UTF-8 character, not advancing the pointer, incrementing length
+if there are extra bytes. This is called when we know we are in UTF-8 mode. */
+
+#define GETCHARLEN(c, eptr, len) \
+ c = *eptr; \
+ if (c >= 0xc0) \
+ { \
+ int gcii; \
+ int gcaa = _pcre_utf8_table4[c & 0x3f]; /* Number of additional bytes */ \
+ int gcss = 6*gcaa; \
+ c = (c & _pcre_utf8_table3[gcaa]) << gcss; \
+ for (gcii = 1; gcii <= gcaa; gcii++) \
+ { \
+ gcss -= 6; \
+ c |= (eptr[gcii] & 0x3f) << gcss; \
+ } \
+ len += gcaa; \
+ }
+
+/* If the pointer is not at the start of a character, move it back until
+it is. This is called only in UTF-8 mode - we don't put a test within the macro
+because almost all calls are already within a block of UTF-8 only code. */
+
+#define BACKCHAR(eptr) while((*eptr & 0xc0) == 0x80) eptr--
+
+#endif
+
+
+/* In case there is no definition of offsetof() provided - though any proper
+Standard C system should have one. */
+
+#ifndef offsetof
+#define offsetof(p_type,field) ((size_t)&(((p_type *)0)->field))
+#endif
+
+
+/* These are the public options that can change during matching. */
+
+#define PCRE_IMS (PCRE_CASELESS|PCRE_MULTILINE|PCRE_DOTALL)
+
+/* Private flags containing information about the compiled regex. They used to
+live at the top end of the options word, but that got almost full, so now they
+are in a 16-bit flags word. */
+
+#define PCRE_NOPARTIAL 0x0001 /* can't use partial with this regex */
+#define PCRE_FIRSTSET 0x0002 /* first_byte is set */
+#define PCRE_REQCHSET 0x0004 /* req_byte is set */
+#define PCRE_STARTLINE 0x0008 /* start after \n for multiline */
+#define PCRE_JCHANGED 0x0010 /* j option used in regex */
+#define PCRE_HASCRORLF 0x0020 /* explicit \r or \n in pattern */
+
+/* Options for the "extra" block produced by pcre_study(). */
+
+#define PCRE_STUDY_MAPPED 0x01 /* a map of starting chars exists */
+
+/* Masks for identifying the public options that are permitted at compile
+time, run time, or study time, respectively. */
+
+#define PCRE_NEWLINE_BITS (PCRE_NEWLINE_CR|PCRE_NEWLINE_LF|PCRE_NEWLINE_ANY| \
+ PCRE_NEWLINE_ANYCRLF)
+
+#define PUBLIC_OPTIONS \
+ (PCRE_CASELESS|PCRE_EXTENDED|PCRE_ANCHORED|PCRE_MULTILINE| \
+ PCRE_DOTALL|PCRE_DOLLAR_ENDONLY|PCRE_EXTRA|PCRE_UNGREEDY|PCRE_UTF8| \
+ PCRE_NO_AUTO_CAPTURE|PCRE_NO_UTF8_CHECK|PCRE_AUTO_CALLOUT|PCRE_FIRSTLINE| \
+ PCRE_DUPNAMES|PCRE_NEWLINE_BITS|PCRE_BSR_ANYCRLF|PCRE_BSR_UNICODE)
+
+#define PUBLIC_EXEC_OPTIONS \
+ (PCRE_ANCHORED|PCRE_NOTBOL|PCRE_NOTEOL|PCRE_NOTEMPTY|PCRE_NO_UTF8_CHECK| \
+ PCRE_PARTIAL|PCRE_NEWLINE_BITS|PCRE_BSR_ANYCRLF|PCRE_BSR_UNICODE)
+
+#define PUBLIC_DFA_EXEC_OPTIONS \
+ (PCRE_ANCHORED|PCRE_NOTBOL|PCRE_NOTEOL|PCRE_NOTEMPTY|PCRE_NO_UTF8_CHECK| \
+ PCRE_PARTIAL|PCRE_DFA_SHORTEST|PCRE_DFA_RESTART|PCRE_NEWLINE_BITS| \
+ PCRE_BSR_ANYCRLF|PCRE_BSR_UNICODE)
+
+#define PUBLIC_STUDY_OPTIONS 0 /* None defined */
+
+/* Magic number to provide a small check against being handed junk. Also used
+to detect whether a pattern was compiled on a host of different endianness. */
+
+#define MAGIC_NUMBER 0x50435245UL /* 'PCRE' */
+
+/* Negative values for the firstchar and reqchar variables */
+
+#define REQ_UNSET (-2)
+#define REQ_NONE (-1)
+
+/* The maximum remaining length of subject we are prepared to search for a
+req_byte match. */
+
+#define REQ_BYTE_MAX 1000
+
+/* Flags added to firstbyte or reqbyte; a "non-literal" item is either a
+variable-length repeat, or a anything other than literal characters. */
+
+#define REQ_CASELESS 0x0100 /* indicates caselessness */
+#define REQ_VARY 0x0200 /* reqbyte followed non-literal item */
+
+/* Miscellaneous definitions */
+
+typedef int BOOL;
+
+#define FALSE 0
+#define TRUE 1
+
+/* Escape items that are just an encoding of a particular data value. */
+
+#ifndef ESC_e
+#define ESC_e 27
+#endif
+
+#ifndef ESC_f
+#define ESC_f '\f'
+#endif
+
+#ifndef ESC_n
+#define ESC_n '\n'
+#endif
+
+#ifndef ESC_r
+#define ESC_r '\r'
+#endif
+
+/* We can't officially use ESC_t because it is a POSIX reserved identifier
+(presumably because of all the others like size_t). */
+
+#ifndef ESC_tee
+#define ESC_tee '\t'
+#endif
+
+/* Codes for different types of Unicode property */
+
+#define PT_ANY 0 /* Any property - matches all chars */
+#define PT_LAMP 1 /* L& - the union of Lu, Ll, Lt */
+#define PT_GC 2 /* General characteristic (e.g. L) */
+#define PT_PC 3 /* Particular characteristic (e.g. Lu) */
+#define PT_SC 4 /* Script (e.g. Han) */
+
+/* Flag bits and data types for the extended class (OP_XCLASS) for classes that
+contain UTF-8 characters with values greater than 255. */
+
+#define XCL_NOT 0x01 /* Flag: this is a negative class */
+#define XCL_MAP 0x02 /* Flag: a 32-byte map is present */
+
+#define XCL_END 0 /* Marks end of individual items */
+#define XCL_SINGLE 1 /* Single item (one multibyte char) follows */
+#define XCL_RANGE 2 /* A range (two multibyte chars) follows */
+#define XCL_PROP 3 /* Unicode property (2-byte property code follows) */
+#define XCL_NOTPROP 4 /* Unicode inverted property (ditto) */
+
+/* These are escaped items that aren't just an encoding of a particular data
+value such as \n. They must have non-zero values, as check_escape() returns
+their negation. Also, they must appear in the same order as in the opcode
+definitions below, up to ESC_z. There's a dummy for OP_ANY because it
+corresponds to "." rather than an escape sequence. The final one must be
+ESC_REF as subsequent values are used for backreferences (\1, \2, \3, etc).
+There are two tests in the code for an escape greater than ESC_b and less than
+ESC_Z to detect the types that may be repeated. These are the types that
+consume characters. If any new escapes are put in between that don't consume a
+character, that code will have to change. */
+
+enum { ESC_A = 1, ESC_G, ESC_K, ESC_B, ESC_b, ESC_D, ESC_d, ESC_S, ESC_s,
+ ESC_W, ESC_w, ESC_dum1, ESC_C, ESC_P, ESC_p, ESC_R, ESC_H, ESC_h,
+ ESC_V, ESC_v, ESC_X, ESC_Z, ESC_z, ESC_E, ESC_Q, ESC_k, ESC_REF };
+
+
+/* Opcode table: Starting from 1 (i.e. after OP_END), the values up to
+OP_EOD must correspond in order to the list of escapes immediately above.
+
+*** NOTE NOTE NOTE *** Whenever this list is updated, the two macro definitions
+that follow must also be updated to match. There is also a table called
+"coptable" in pcre_dfa_exec.c that must be updated. */
+
+enum {
+ OP_END, /* 0 End of pattern */
+
+ /* Values corresponding to backslashed metacharacters */
+
+ OP_SOD, /* 1 Start of data: \A */
+ OP_SOM, /* 2 Start of match (subject + offset): \G */
+ OP_SET_SOM, /* 3 Set start of match (\K) */
+ OP_NOT_WORD_BOUNDARY, /* 4 \B */
+ OP_WORD_BOUNDARY, /* 5 \b */
+ OP_NOT_DIGIT, /* 6 \D */
+ OP_DIGIT, /* 7 \d */
+ OP_NOT_WHITESPACE, /* 8 \S */
+ OP_WHITESPACE, /* 9 \s */
+ OP_NOT_WORDCHAR, /* 10 \W */
+ OP_WORDCHAR, /* 11 \w */
+ OP_ANY, /* 12 Match any character */
+ OP_ANYBYTE, /* 13 Match any byte (\C); different to OP_ANY for UTF-8 */
+ OP_NOTPROP, /* 14 \P (not Unicode property) */
+ OP_PROP, /* 15 \p (Unicode property) */
+ OP_ANYNL, /* 16 \R (any newline sequence) */
+ OP_NOT_HSPACE, /* 17 \H (not horizontal whitespace) */
+ OP_HSPACE, /* 18 \h (horizontal whitespace) */
+ OP_NOT_VSPACE, /* 19 \V (not vertical whitespace) */
+ OP_VSPACE, /* 20 \v (vertical whitespace) */
+ OP_EXTUNI, /* 21 \X (extended Unicode sequence */
+ OP_EODN, /* 22 End of data or \n at end of data: \Z. */
+ OP_EOD, /* 23 End of data: \z */
+
+ OP_OPT, /* 24 Set runtime options */
+ OP_CIRC, /* 25 Start of line - varies with multiline switch */
+ OP_DOLL, /* 26 End of line - varies with multiline switch */
+ OP_CHAR, /* 27 Match one character, casefully */
+ OP_CHARNC, /* 28 Match one character, caselessly */
+ OP_NOT, /* 29 Match one character, not the following one */
+
+ OP_STAR, /* 30 The maximizing and minimizing versions of */
+ OP_MINSTAR, /* 31 these six opcodes must come in pairs, with */
+ OP_PLUS, /* 32 the minimizing one second. */
+ OP_MINPLUS, /* 33 This first set applies to single characters.*/
+ OP_QUERY, /* 34 */
+ OP_MINQUERY, /* 35 */
+
+ OP_UPTO, /* 36 From 0 to n matches */
+ OP_MINUPTO, /* 37 */
+ OP_EXACT, /* 38 Exactly n matches */
+
+ OP_POSSTAR, /* 39 Possessified star */
+ OP_POSPLUS, /* 40 Possessified plus */
+ OP_POSQUERY, /* 41 Posesssified query */
+ OP_POSUPTO, /* 42 Possessified upto */
+
+ OP_NOTSTAR, /* 43 The maximizing and minimizing versions of */
+ OP_NOTMINSTAR, /* 44 these six opcodes must come in pairs, with */
+ OP_NOTPLUS, /* 45 the minimizing one second. They must be in */
+ OP_NOTMINPLUS, /* 46 exactly the same order as those above. */
+ OP_NOTQUERY, /* 47 This set applies to "not" single characters. */
+ OP_NOTMINQUERY, /* 48 */
+
+ OP_NOTUPTO, /* 49 From 0 to n matches */
+ OP_NOTMINUPTO, /* 50 */
+ OP_NOTEXACT, /* 51 Exactly n matches */
+
+ OP_NOTPOSSTAR, /* 52 Possessified versions */
+ OP_NOTPOSPLUS, /* 53 */
+ OP_NOTPOSQUERY, /* 54 */
+ OP_NOTPOSUPTO, /* 55 */
+
+ OP_TYPESTAR, /* 56 The maximizing and minimizing versions of */
+ OP_TYPEMINSTAR, /* 57 these six opcodes must come in pairs, with */
+ OP_TYPEPLUS, /* 58 the minimizing one second. These codes must */
+ OP_TYPEMINPLUS, /* 59 be in exactly the same order as those above. */
+ OP_TYPEQUERY, /* 60 This set applies to character types such as \d */
+ OP_TYPEMINQUERY, /* 61 */
+
+ OP_TYPEUPTO, /* 62 From 0 to n matches */
+ OP_TYPEMINUPTO, /* 63 */
+ OP_TYPEEXACT, /* 64 Exactly n matches */
+
+ OP_TYPEPOSSTAR, /* 65 Possessified versions */
+ OP_TYPEPOSPLUS, /* 66 */
+ OP_TYPEPOSQUERY, /* 67 */
+ OP_TYPEPOSUPTO, /* 68 */
+
+ OP_CRSTAR, /* 69 The maximizing and minimizing versions of */
+ OP_CRMINSTAR, /* 70 all these opcodes must come in pairs, with */
+ OP_CRPLUS, /* 71 the minimizing one second. These codes must */
+ OP_CRMINPLUS, /* 72 be in exactly the same order as those above. */
+ OP_CRQUERY, /* 73 These are for character classes and back refs */
+ OP_CRMINQUERY, /* 74 */
+ OP_CRRANGE, /* 75 These are different to the three sets above. */
+ OP_CRMINRANGE, /* 76 */
+
+ OP_CLASS, /* 77 Match a character class, chars < 256 only */
+ OP_NCLASS, /* 78 Same, but the bitmap was created from a negative
+ class - the difference is relevant only when a UTF-8
+ character > 255 is encountered. */
+
+ OP_XCLASS, /* 79 Extended class for handling UTF-8 chars within the
+ class. This does both positive and negative. */
+
+ OP_REF, /* 80 Match a back reference */
+ OP_RECURSE, /* 81 Match a numbered subpattern (possibly recursive) */
+ OP_CALLOUT, /* 82 Call out to external function if provided */
+
+ OP_ALT, /* 83 Start of alternation */
+ OP_KET, /* 84 End of group that doesn't have an unbounded repeat */
+ OP_KETRMAX, /* 85 These two must remain together and in this */
+ OP_KETRMIN, /* 86 order. They are for groups the repeat for ever. */
+
+ /* The assertions must come before BRA, CBRA, ONCE, and COND.*/
+
+ OP_ASSERT, /* 87 Positive lookahead */
+ OP_ASSERT_NOT, /* 88 Negative lookahead */
+ OP_ASSERTBACK, /* 89 Positive lookbehind */
+ OP_ASSERTBACK_NOT, /* 90 Negative lookbehind */
+ OP_REVERSE, /* 91 Move pointer back - used in lookbehind assertions */
+
+ /* ONCE, BRA, CBRA, and COND must come after the assertions, with ONCE first,
+ as there's a test for >= ONCE for a subpattern that isn't an assertion. */
+
+ OP_ONCE, /* 92 Atomic group */
+ OP_BRA, /* 93 Start of non-capturing bracket */
+ OP_CBRA, /* 94 Start of capturing bracket */
+ OP_COND, /* 95 Conditional group */
+
+ /* These three must follow the previous three, in the same order. There's a
+ check for >= SBRA to distinguish the two sets. */
+
+ OP_SBRA, /* 96 Start of non-capturing bracket, check empty */
+ OP_SCBRA, /* 97 Start of capturing bracket, check empty */
+ OP_SCOND, /* 98 Conditional group, check empty */
+
+ OP_CREF, /* 99 Used to hold a capture number as condition */
+ OP_RREF, /* 100 Used to hold a recursion number as condition */
+ OP_DEF, /* 101 The DEFINE condition */
+
+ OP_BRAZERO, /* 102 These two must remain together and in this */
+ OP_BRAMINZERO, /* 103 order. */
+
+ /* These are backtracking control verbs */
+
+ OP_PRUNE, /* 104 */
+ OP_SKIP, /* 105 */
+ OP_THEN, /* 106 */
+ OP_COMMIT, /* 107 */
+
+ /* These are forced failure and success verbs */
+
+ OP_FAIL, /* 108 */
+ OP_ACCEPT /* 109 */
+};
+
+
+/* This macro defines textual names for all the opcodes. These are used only
+for debugging. The macro is referenced only in pcre_printint.c. */
+
+#define OP_NAME_LIST \
+ "End", "\\A", "\\G", "\\K", "\\B", "\\b", "\\D", "\\d", \
+ "\\S", "\\s", "\\W", "\\w", "Any", "Anybyte", \
+ "notprop", "prop", "\\R", "\\H", "\\h", "\\V", "\\v", \
+ "extuni", "\\Z", "\\z", \
+ "Opt", "^", "$", "char", "charnc", "not", \
+ "*", "*?", "+", "+?", "?", "??", "{", "{", "{", \
+ "*+","++", "?+", "{", \
+ "*", "*?", "+", "+?", "?", "??", "{", "{", "{", \
+ "*+","++", "?+", "{", \
+ "*", "*?", "+", "+?", "?", "??", "{", "{", "{", \
+ "*+","++", "?+", "{", \
+ "*", "*?", "+", "+?", "?", "??", "{", "{", \
+ "class", "nclass", "xclass", "Ref", "Recurse", "Callout", \
+ "Alt", "Ket", "KetRmax", "KetRmin", "Assert", "Assert not", \
+ "AssertB", "AssertB not", "Reverse", \
+ "Once", "Bra", "CBra", "Cond", "SBra", "SCBra", "SCond", \
+ "Cond ref", "Cond rec", "Cond def", "Brazero", "Braminzero", \
+ "*PRUNE", "*SKIP", "*THEN", "*COMMIT", "*FAIL", "*ACCEPT"
+
+
+/* This macro defines the length of fixed length operations in the compiled
+regex. The lengths are used when searching for specific things, and also in the
+debugging printing of a compiled regex. We use a macro so that it can be
+defined close to the definitions of the opcodes themselves.
+
+As things have been extended, some of these are no longer fixed lenths, but are
+minima instead. For example, the length of a single-character repeat may vary
+in UTF-8 mode. The code that uses this table must know about such things. */
+
+#define OP_LENGTHS \
+ 1, /* End */ \
+ 1, 1, 1, 1, 1, /* \A, \G, \K, \B, \b */ \
+ 1, 1, 1, 1, 1, 1, /* \D, \d, \S, \s, \W, \w */ \
+ 1, 1, /* Any, Anybyte */ \
+ 3, 3, 1, /* NOTPROP, PROP, EXTUNI */ \
+ 1, 1, 1, 1, 1, /* \R, \H, \h, \V, \v */ \
+ 1, 1, 2, 1, 1, /* \Z, \z, Opt, ^, $ */ \
+ 2, /* Char - the minimum length */ \
+ 2, /* Charnc - the minimum length */ \
+ 2, /* not */ \
+ /* Positive single-char repeats ** These are */ \
+ 2, 2, 2, 2, 2, 2, /* *, *?, +, +?, ?, ?? ** minima in */ \
+ 4, 4, 4, /* upto, minupto, exact ** UTF-8 mode */ \
+ 2, 2, 2, 4, /* *+, ++, ?+, upto+ */ \
+ /* Negative single-char repeats - only for chars < 256 */ \
+ 2, 2, 2, 2, 2, 2, /* NOT *, *?, +, +?, ?, ?? */ \
+ 4, 4, 4, /* NOT upto, minupto, exact */ \
+ 2, 2, 2, 4, /* Possessive *, +, ?, upto */ \
+ /* Positive type repeats */ \
+ 2, 2, 2, 2, 2, 2, /* Type *, *?, +, +?, ?, ?? */ \
+ 4, 4, 4, /* Type upto, minupto, exact */ \
+ 2, 2, 2, 4, /* Possessive *+, ++, ?+, upto+ */ \
+ /* Character class & ref repeats */ \
+ 1, 1, 1, 1, 1, 1, /* *, *?, +, +?, ?, ?? */ \
+ 5, 5, /* CRRANGE, CRMINRANGE */ \
+ 33, /* CLASS */ \
+ 33, /* NCLASS */ \
+ 0, /* XCLASS - variable length */ \
+ 3, /* REF */ \
+ 1+LINK_SIZE, /* RECURSE */ \
+ 2+2*LINK_SIZE, /* CALLOUT */ \
+ 1+LINK_SIZE, /* Alt */ \
+ 1+LINK_SIZE, /* Ket */ \
+ 1+LINK_SIZE, /* KetRmax */ \
+ 1+LINK_SIZE, /* KetRmin */ \
+ 1+LINK_SIZE, /* Assert */ \
+ 1+LINK_SIZE, /* Assert not */ \
+ 1+LINK_SIZE, /* Assert behind */ \
+ 1+LINK_SIZE, /* Assert behind not */ \
+ 1+LINK_SIZE, /* Reverse */ \
+ 1+LINK_SIZE, /* ONCE */ \
+ 1+LINK_SIZE, /* BRA */ \
+ 3+LINK_SIZE, /* CBRA */ \
+ 1+LINK_SIZE, /* COND */ \
+ 1+LINK_SIZE, /* SBRA */ \
+ 3+LINK_SIZE, /* SCBRA */ \
+ 1+LINK_SIZE, /* SCOND */ \
+ 3, /* CREF */ \
+ 3, /* RREF */ \
+ 1, /* DEF */ \
+ 1, 1, /* BRAZERO, BRAMINZERO */ \
+ 1, 1, 1, 1, /* PRUNE, SKIP, THEN, COMMIT, */ \
+ 1, 1 /* FAIL, ACCEPT */
+
+
+/* A magic value for OP_RREF to indicate the "any recursion" condition. */
+
+#define RREF_ANY 0xffff
+
+/* Error code numbers. They are given names so that they can more easily be
+tracked. */
+
+enum { ERR0, ERR1, ERR2, ERR3, ERR4, ERR5, ERR6, ERR7, ERR8, ERR9,
+ ERR10, ERR11, ERR12, ERR13, ERR14, ERR15, ERR16, ERR17, ERR18, ERR19,
+ ERR20, ERR21, ERR22, ERR23, ERR24, ERR25, ERR26, ERR27, ERR28, ERR29,
+ ERR30, ERR31, ERR32, ERR33, ERR34, ERR35, ERR36, ERR37, ERR38, ERR39,
+ ERR40, ERR41, ERR42, ERR43, ERR44, ERR45, ERR46, ERR47, ERR48, ERR49,
+ ERR50, ERR51, ERR52, ERR53, ERR54, ERR55, ERR56, ERR57, ERR58, ERR59,
+ ERR60, ERR61 };
+
+/* The real format of the start of the pcre block; the index of names and the
+code vector run on as long as necessary after the end. We store an explicit
+offset to the name table so that if a regex is compiled on one host, saved, and
+then run on another where the size of pointers is different, all might still
+be well. For the case of compiled-on-4 and run-on-8, we include an extra
+pointer that is always NULL. For future-proofing, a few dummy fields were
+originally included - even though you can never get this planning right - but
+there is only one left now.
+
+NOTE NOTE NOTE:
+Because people can now save and re-use compiled patterns, any additions to this
+structure should be made at the end, and something earlier (e.g. a new
+flag in the options or one of the dummy fields) should indicate that the new
+fields are present. Currently PCRE always sets the dummy fields to zero.
+NOTE NOTE NOTE:
+*/
+
+typedef struct real_pcre {
+ pcre_uint32 magic_number;
+ pcre_uint32 size; /* Total that was malloced */
+ pcre_uint32 options; /* Public options */
+ pcre_uint16 flags; /* Private flags */
+ pcre_uint16 dummy1; /* For future use */
+ pcre_uint16 top_bracket;
+ pcre_uint16 top_backref;
+ pcre_uint16 first_byte;
+ pcre_uint16 req_byte;
+ pcre_uint16 name_table_offset; /* Offset to name table that follows */
+ pcre_uint16 name_entry_size; /* Size of any name items */
+ pcre_uint16 name_count; /* Number of name items */
+ pcre_uint16 ref_count; /* Reference count */
+
+ const unsigned char *tables; /* Pointer to tables or NULL for std */
+ const unsigned char *nullpad; /* NULL padding */
+} real_pcre;
+
+/* The format of the block used to store data from pcre_study(). The same
+remark (see NOTE above) about extending this structure applies. */
+
+typedef struct pcre_study_data {
+ pcre_uint32 size; /* Total that was malloced */
+ pcre_uint32 options;
+ uschar start_bits[32];
+} pcre_study_data;
+
+/* Structure for passing "static" information around between the functions
+doing the compiling, so that they are thread-safe. */
+
+typedef struct compile_data {
+ const uschar *lcc; /* Points to lower casing table */
+ const uschar *fcc; /* Points to case-flipping table */
+ const uschar *cbits; /* Points to character type table */
+ const uschar *ctypes; /* Points to table of type maps */
+ const uschar *start_workspace;/* The start of working space */
+ const uschar *start_code; /* The start of the compiled code */
+ const uschar *start_pattern; /* The start of the pattern */
+ const uschar *end_pattern; /* The end of the pattern */
+ uschar *hwm; /* High watermark of workspace */
+ uschar *name_table; /* The name/number table */
+ int names_found; /* Number of entries so far */
+ int name_entry_size; /* Size of each entry */
+ int bracount; /* Count of capturing parens */
+ int top_backref; /* Maximum back reference */
+ unsigned int backref_map; /* Bitmap of low back refs */
+ int external_options; /* External (initial) options */
+ int external_flags; /* External flag bits to be set */
+ int req_varyopt; /* "After variable item" flag for reqbyte */
+ BOOL had_accept; /* (*ACCEPT) encountered */
+ int nltype; /* Newline type */
+ int nllen; /* Newline string length */
+ uschar nl[4]; /* Newline string when fixed length */
+} compile_data;
+
+/* Structure for maintaining a chain of pointers to the currently incomplete
+branches, for testing for left recursion. */
+
+typedef struct branch_chain {
+ struct branch_chain *outer;
+ uschar *current;
+} branch_chain;
+
+/* Structure for items in a linked list that represents an explicit recursive
+call within the pattern. */
+
+typedef struct recursion_info {
+ struct recursion_info *prevrec; /* Previous recursion record (or NULL) */
+ int group_num; /* Number of group that was called */
+ const uschar *after_call; /* "Return value": points after the call in the expr */
+ USPTR save_start; /* Old value of mstart */
+ int *offset_save; /* Pointer to start of saved offsets */
+ int saved_max; /* Number of saved offsets */
+} recursion_info;
+
+/* Structure for building a chain of data for holding the values of the subject
+pointer at the start of each subpattern, so as to detect when an empty string
+has been matched by a subpattern - to break infinite loops. */
+
+typedef struct eptrblock {
+ struct eptrblock *epb_prev;
+ USPTR epb_saved_eptr;
+} eptrblock;
+
+
+/* Structure for passing "static" information around between the functions
+doing traditional NFA matching, so that they are thread-safe. */
+
+typedef struct match_data {
+ unsigned long int match_call_count; /* As it says */
+ unsigned long int match_limit; /* As it says */
+ unsigned long int match_limit_recursion; /* As it says */
+ int *offset_vector; /* Offset vector */
+ int offset_end; /* One past the end */
+ int offset_max; /* The maximum usable for return data */
+ int nltype; /* Newline type */
+ int nllen; /* Newline string length */
+ uschar nl[4]; /* Newline string when fixed */
+ const uschar *lcc; /* Points to lower casing table */
+ const uschar *ctypes; /* Points to table of type maps */
+ BOOL offset_overflow; /* Set if too many extractions */
+ BOOL notbol; /* NOTBOL flag */
+ BOOL noteol; /* NOTEOL flag */
+ BOOL utf8; /* UTF8 flag */
+ BOOL endonly; /* Dollar not before final \n */
+ BOOL notempty; /* Empty string match not wanted */
+ BOOL partial; /* PARTIAL flag */
+ BOOL hitend; /* Hit the end of the subject at some point */
+ BOOL bsr_anycrlf; /* \R is just any CRLF, not full Unicode */
+ const uschar *start_code; /* For use when recursing */
+ USPTR start_subject; /* Start of the subject string */
+ USPTR end_subject; /* End of the subject string */
+ USPTR start_match_ptr; /* Start of matched string */
+ USPTR end_match_ptr; /* Subject position at end match */
+ int end_offset_top; /* Highwater mark at end of match */
+ int capture_last; /* Most recent capture number */
+ int start_offset; /* The start offset value */
+ eptrblock *eptrchain; /* Chain of eptrblocks for tail recursions */
+ int eptrn; /* Next free eptrblock */
+ recursion_info *recursive; /* Linked list of recursion data */
+ void *callout_data; /* To pass back to callouts */
+} match_data;
+
+/* A similar structure is used for the same purpose by the DFA matching
+functions. */
+
+typedef struct dfa_match_data {
+ const uschar *start_code; /* Start of the compiled pattern */
+ const uschar *start_subject; /* Start of the subject string */
+ const uschar *end_subject; /* End of subject string */
+ const uschar *tables; /* Character tables */
+ int moptions; /* Match options */
+ int poptions; /* Pattern options */
+ int nltype; /* Newline type */
+ int nllen; /* Newline string length */
+ uschar nl[4]; /* Newline string when fixed */
+ void *callout_data; /* To pass back to callouts */
+} dfa_match_data;
+
+/* Bit definitions for entries in the pcre_ctypes table. */
+
+#define ctype_space 0x01
+#define ctype_letter 0x02
+#define ctype_digit 0x04
+#define ctype_xdigit 0x08
+#define ctype_word 0x10 /* alphameric or '_' */
+#define ctype_meta 0x80 /* regexp meta char or zero (end pattern) */
+
+/* Offsets for the bitmap tables in pcre_cbits. Each table contains a set
+of bits for a class map. Some classes are built by combining these tables. */
+
+#define cbit_space 0 /* [:space:] or \s */
+#define cbit_xdigit 32 /* [:xdigit:] */
+#define cbit_digit 64 /* [:digit:] or \d */
+#define cbit_upper 96 /* [:upper:] */
+#define cbit_lower 128 /* [:lower:] */
+#define cbit_word 160 /* [:word:] or \w */
+#define cbit_graph 192 /* [:graph:] */
+#define cbit_print 224 /* [:print:] */
+#define cbit_punct 256 /* [:punct:] */
+#define cbit_cntrl 288 /* [:cntrl:] */
+#define cbit_length 320 /* Length of the cbits table */
+
+/* Offsets of the various tables from the base tables pointer, and
+total length. */
+
+#define lcc_offset 0
+#define fcc_offset 256
+#define cbits_offset 512
+#define ctypes_offset (cbits_offset + cbit_length)
+#define tables_length (ctypes_offset + 256)
+
+/* Layout of the UCP type table that translates property names into types and
+codes. Each entry used to point directly to a name, but to reduce the number of
+relocations in shared libraries, it now has an offset into a single string
+instead. */
+
+typedef struct {
+ pcre_uint16 name_offset;
+ pcre_uint16 type;
+ pcre_uint16 value;
+} ucp_type_table;
+
+
+/* Internal shared data tables. These are tables that are used by more than one
+of the exported public functions. They have to be "external" in the C sense,
+but are not part of the PCRE public API. The data for these tables is in the
+pcre_tables.c module. */
+
+extern const int _pcre_utf8_table1[];
+extern const int _pcre_utf8_table2[];
+extern const int _pcre_utf8_table3[];
+extern const uschar _pcre_utf8_table4[];
+
+extern const int _pcre_utf8_table1_size;
+
+extern const char _pcre_utt_names[];
+extern const ucp_type_table _pcre_utt[];
+extern const int _pcre_utt_size;
+
+extern const uschar _pcre_default_tables[];
+
+extern const uschar _pcre_OP_lengths[];
+
+
+/* Internal shared functions. These are functions that are used by more than
+one of the exported public functions. They have to be "external" in the C
+sense, but are not part of the PCRE public API. */
+
+extern BOOL _pcre_is_newline(const uschar *, int, const uschar *,
+ int *, BOOL);
+extern int _pcre_ord2utf8(int, uschar *);
+extern real_pcre *_pcre_try_flipped(const real_pcre *, real_pcre *,
+ const pcre_study_data *, pcre_study_data *);
+extern int _pcre_ucp_findprop(const unsigned int, int *, int *);
+extern unsigned int _pcre_ucp_othercase(const unsigned int);
+extern int _pcre_valid_utf8(const uschar *, int);
+extern BOOL _pcre_was_newline(const uschar *, int, const uschar *,
+ int *, BOOL);
+extern BOOL _pcre_xclass(int, const uschar *);
+
+#endif
+
+/* End of pcre_internal.h */
diff --git a/src/third_party/pcre-7.4/pcre_maketables.c b/src/third_party/pcre-7.4/pcre_maketables.c
new file mode 100644
index 00000000000..352bea98e7a
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_maketables.c
@@ -0,0 +1,143 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains the external function pcre_maketables(), which builds
+character tables for PCRE in the current locale. The file is compiled on its
+own as part of the PCRE library. However, it is also included in the
+compilation of dftables.c, in which case the macro DFTABLES is defined. */
+
+
+#ifndef DFTABLES
+# ifdef HAVE_CONFIG_H
+# include "config.h"
+# endif
+# include "pcre_internal.h"
+#endif
+
+
+/*************************************************
+* Create PCRE character tables *
+*************************************************/
+
+/* This function builds a set of character tables for use by PCRE and returns
+a pointer to them. They are build using the ctype functions, and consequently
+their contents will depend upon the current locale setting. When compiled as
+part of the library, the store is obtained via pcre_malloc(), but when compiled
+inside dftables, use malloc().
+
+Arguments: none
+Returns: pointer to the contiguous block of data
+*/
+
+const unsigned char *
+pcre_maketables(void)
+{
+unsigned char *yield, *p;
+int i;
+
+#ifndef DFTABLES
+yield = (unsigned char*)(pcre_malloc)(tables_length);
+#else
+yield = (unsigned char*)malloc(tables_length);
+#endif
+
+if (yield == NULL) return NULL;
+p = yield;
+
+/* First comes the lower casing table */
+
+for (i = 0; i < 256; i++) *p++ = tolower(i);
+
+/* Next the case-flipping table */
+
+for (i = 0; i < 256; i++) *p++ = islower(i)? toupper(i) : tolower(i);
+
+/* Then the character class tables. Don't try to be clever and save effort on
+exclusive ones - in some locales things may be different. Note that the table
+for "space" includes everything "isspace" gives, including VT in the default
+locale. This makes it work for the POSIX class [:space:]. Note also that it is
+possible for a character to be alnum or alpha without being lower or upper,
+such as "male and female ordinals" (\xAA and \xBA) in the fr_FR locale (at
+least under Debian Linux's locales as of 12/2005). So we must test for alnum
+specially. */
+
+memset(p, 0, cbit_length);
+for (i = 0; i < 256; i++)
+ {
+ if (isdigit(i)) p[cbit_digit + i/8] |= 1 << (i&7);
+ if (isupper(i)) p[cbit_upper + i/8] |= 1 << (i&7);
+ if (islower(i)) p[cbit_lower + i/8] |= 1 << (i&7);
+ if (isalnum(i)) p[cbit_word + i/8] |= 1 << (i&7);
+ if (i == '_') p[cbit_word + i/8] |= 1 << (i&7);
+ if (isspace(i)) p[cbit_space + i/8] |= 1 << (i&7);
+ if (isxdigit(i))p[cbit_xdigit + i/8] |= 1 << (i&7);
+ if (isgraph(i)) p[cbit_graph + i/8] |= 1 << (i&7);
+ if (isprint(i)) p[cbit_print + i/8] |= 1 << (i&7);
+ if (ispunct(i)) p[cbit_punct + i/8] |= 1 << (i&7);
+ if (iscntrl(i)) p[cbit_cntrl + i/8] |= 1 << (i&7);
+ }
+p += cbit_length;
+
+/* Finally, the character type table. In this, we exclude VT from the white
+space chars, because Perl doesn't recognize it as such for \s and for comments
+within regexes. */
+
+for (i = 0; i < 256; i++)
+ {
+ int x = 0;
+ if (i != 0x0b && isspace(i)) x += ctype_space;
+ if (isalpha(i)) x += ctype_letter;
+ if (isdigit(i)) x += ctype_digit;
+ if (isxdigit(i)) x += ctype_xdigit;
+ if (isalnum(i) || i == '_') x += ctype_word;
+
+ /* Note: strchr includes the terminating zero in the characters it considers.
+ In this instance, that is ok because we want binary zero to be flagged as a
+ meta-character, which in this sense is any character that terminates a run
+ of data characters. */
+
+ if (strchr("\\*+?{^.$|()[", i) != 0) x += ctype_meta;
+ *p++ = x;
+ }
+
+return yield;
+}
+
+/* End of pcre_maketables.c */
diff --git a/src/third_party/pcre-7.4/pcre_newline.c b/src/third_party/pcre-7.4/pcre_newline.c
new file mode 100644
index 00000000000..1708d939589
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_newline.c
@@ -0,0 +1,164 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains internal functions for testing newlines when more than
+one kind of newline is to be recognized. When a newline is found, its length is
+returned. In principle, we could implement several newline "types", each
+referring to a different set of newline characters. At present, PCRE supports
+only NLTYPE_FIXED, which gets handled without these functions, NLTYPE_ANYCRLF,
+and NLTYPE_ANY. The full list of Unicode newline characters is taken from
+http://unicode.org/unicode/reports/tr18/. */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+
+
+/*************************************************
+* Check for newline at given position *
+*************************************************/
+
+/* It is guaranteed that the initial value of ptr is less than the end of the
+string that is being processed.
+
+Arguments:
+ ptr pointer to possible newline
+ type the newline type
+ endptr pointer to the end of the string
+ lenptr where to return the length
+ utf8 TRUE if in utf8 mode
+
+Returns: TRUE or FALSE
+*/
+
+BOOL
+_pcre_is_newline(const uschar *ptr, int type, const uschar *endptr,
+ int *lenptr, BOOL utf8)
+{
+int c;
+if (utf8) { GETCHAR(c, ptr); } else c = *ptr;
+
+if (type == NLTYPE_ANYCRLF) switch(c)
+ {
+ case 0x000a: *lenptr = 1; return TRUE; /* LF */
+ case 0x000d: *lenptr = (ptr < endptr - 1 && ptr[1] == 0x0a)? 2 : 1;
+ return TRUE; /* CR */
+ default: return FALSE;
+ }
+
+/* NLTYPE_ANY */
+
+else switch(c)
+ {
+ case 0x000a: /* LF */
+ case 0x000b: /* VT */
+ case 0x000c: *lenptr = 1; return TRUE; /* FF */
+ case 0x000d: *lenptr = (ptr < endptr - 1 && ptr[1] == 0x0a)? 2 : 1;
+ return TRUE; /* CR */
+ case 0x0085: *lenptr = utf8? 2 : 1; return TRUE; /* NEL */
+ case 0x2028: /* LS */
+ case 0x2029: *lenptr = 3; return TRUE; /* PS */
+ default: return FALSE;
+ }
+}
+
+
+
+/*************************************************
+* Check for newline at previous position *
+*************************************************/
+
+/* It is guaranteed that the initial value of ptr is greater than the start of
+the string that is being processed.
+
+Arguments:
+ ptr pointer to possible newline
+ type the newline type
+ startptr pointer to the start of the string
+ lenptr where to return the length
+ utf8 TRUE if in utf8 mode
+
+Returns: TRUE or FALSE
+*/
+
+BOOL
+_pcre_was_newline(const uschar *ptr, int type, const uschar *startptr,
+ int *lenptr, BOOL utf8)
+{
+int c;
+ptr--;
+#ifdef SUPPORT_UTF8
+if (utf8)
+ {
+ BACKCHAR(ptr);
+ GETCHAR(c, ptr);
+ }
+else c = *ptr;
+#else /* no UTF-8 support */
+c = *ptr;
+#endif /* SUPPORT_UTF8 */
+
+if (type == NLTYPE_ANYCRLF) switch(c)
+ {
+ case 0x000a: *lenptr = (ptr > startptr && ptr[-1] == 0x0d)? 2 : 1;
+ return TRUE; /* LF */
+ case 0x000d: *lenptr = 1; return TRUE; /* CR */
+ default: return FALSE;
+ }
+
+else switch(c)
+ {
+ case 0x000a: *lenptr = (ptr > startptr && ptr[-1] == 0x0d)? 2 : 1;
+ return TRUE; /* LF */
+ case 0x000b: /* VT */
+ case 0x000c: /* FF */
+ case 0x000d: *lenptr = 1; return TRUE; /* CR */
+ case 0x0085: *lenptr = utf8? 2 : 1; return TRUE; /* NEL */
+ case 0x2028: /* LS */
+ case 0x2029: *lenptr = 3; return TRUE; /* PS */
+ default: return FALSE;
+ }
+}
+
+/* End of pcre_newline.c */
diff --git a/src/third_party/pcre-7.4/pcre_ord2utf8.c b/src/third_party/pcre-7.4/pcre_ord2utf8.c
new file mode 100644
index 00000000000..d3904c655d3
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_ord2utf8.c
@@ -0,0 +1,85 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This file contains a private PCRE function that converts an ordinal
+character value into a UTF8 string. */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+
+/*************************************************
+* Convert character value to UTF-8 *
+*************************************************/
+
+/* This function takes an integer value in the range 0 - 0x7fffffff
+and encodes it as a UTF-8 character in 0 to 6 bytes.
+
+Arguments:
+ cvalue the character value
+ buffer pointer to buffer for result - at least 6 bytes long
+
+Returns: number of characters placed in the buffer
+*/
+
+int
+_pcre_ord2utf8(int cvalue, uschar *buffer)
+{
+#ifdef SUPPORT_UTF8
+register int i, j;
+for (i = 0; i < _pcre_utf8_table1_size; i++)
+ if (cvalue <= _pcre_utf8_table1[i]) break;
+buffer += i;
+for (j = i; j > 0; j--)
+ {
+ *buffer-- = 0x80 | (cvalue & 0x3f);
+ cvalue >>= 6;
+ }
+*buffer = _pcre_utf8_table2[i] | cvalue;
+return i + 1;
+#else
+return 0; /* Keep compiler happy; this function won't ever be */
+#endif /* called when SUPPORT_UTF8 is not defined. */
+}
+
+/* End of pcre_ord2utf8.c */
diff --git a/src/third_party/pcre-7.4/pcre_refcount.c b/src/third_party/pcre-7.4/pcre_refcount.c
new file mode 100644
index 00000000000..b14103c7b8e
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_refcount.c
@@ -0,0 +1,82 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains the external function pcre_refcount(), which is an
+auxiliary function that can be used to maintain a reference count in a compiled
+pattern data block. This might be helpful in applications where the block is
+shared by different users. */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+
+/*************************************************
+* Maintain reference count *
+*************************************************/
+
+/* The reference count is a 16-bit field, initialized to zero. It is not
+possible to transfer a non-zero count from one host to a different host that
+has a different byte order - though I can't see why anyone in their right mind
+would ever want to do that!
+
+Arguments:
+ argument_re points to compiled code
+ adjust value to add to the count
+
+Returns: the (possibly updated) count value (a non-negative number), or
+ a negative error number
+*/
+
+PCRE_EXP_DEFN int
+pcre_refcount(pcre *argument_re, int adjust)
+{
+real_pcre *re = (real_pcre *)argument_re;
+if (re == NULL) return PCRE_ERROR_NULL;
+re->ref_count = (-adjust > re->ref_count)? 0 :
+ (adjust + re->ref_count > 65535)? 65535 :
+ re->ref_count + adjust;
+return re->ref_count;
+}
+
+/* End of pcre_refcount.c */
diff --git a/src/third_party/pcre-7.4/pcre_scanner.cc b/src/third_party/pcre-7.4/pcre_scanner.cc
new file mode 100644
index 00000000000..a817a684e21
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_scanner.cc
@@ -0,0 +1,199 @@
+// Copyright (c) 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sanjay Ghemawat
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <vector>
+#include <assert.h>
+
+#include "pcrecpp_internal.h"
+#include "pcre_scanner.h"
+
+using std::vector;
+
+namespace pcrecpp {
+
+Scanner::Scanner()
+ : data_(),
+ input_(data_),
+ skip_(NULL),
+ should_skip_(false),
+ skip_repeat_(false),
+ save_comments_(false),
+ comments_(NULL),
+ comments_offset_(0) {
+}
+
+Scanner::Scanner(const string& in)
+ : data_(in),
+ input_(data_),
+ skip_(NULL),
+ should_skip_(false),
+ skip_repeat_(false),
+ save_comments_(false),
+ comments_(NULL),
+ comments_offset_(0) {
+}
+
+Scanner::~Scanner() {
+ delete skip_;
+ delete comments_;
+}
+
+void Scanner::SetSkipExpression(const char* re) {
+ delete skip_;
+ if (re != NULL) {
+ skip_ = new RE(re);
+ should_skip_ = true;
+ skip_repeat_ = true;
+ ConsumeSkip();
+ } else {
+ skip_ = NULL;
+ should_skip_ = false;
+ skip_repeat_ = false;
+ }
+}
+
+void Scanner::Skip(const char* re) {
+ delete skip_;
+ if (re != NULL) {
+ skip_ = new RE(re);
+ should_skip_ = true;
+ skip_repeat_ = false;
+ ConsumeSkip();
+ } else {
+ skip_ = NULL;
+ should_skip_ = false;
+ skip_repeat_ = false;
+ }
+}
+
+void Scanner::DisableSkip() {
+ assert(skip_ != NULL);
+ should_skip_ = false;
+}
+
+void Scanner::EnableSkip() {
+ assert(skip_ != NULL);
+ should_skip_ = true;
+ ConsumeSkip();
+}
+
+int Scanner::LineNumber() const {
+ // TODO: Make it more efficient by keeping track of the last point
+ // where we computed line numbers and counting newlines since then.
+ // We could use std:count, but not all systems have it. :-(
+ int count = 1;
+ for (const char* p = data_.data(); p < input_.data(); ++p)
+ if (*p == '\n')
+ ++count;
+ return count;
+}
+
+int Scanner::Offset() const {
+ return input_.data() - data_.c_str();
+}
+
+bool Scanner::LookingAt(const RE& re) const {
+ int consumed;
+ return re.DoMatch(input_, RE::ANCHOR_START, &consumed, 0, 0);
+}
+
+
+bool Scanner::Consume(const RE& re,
+ const Arg& arg0,
+ const Arg& arg1,
+ const Arg& arg2) {
+ const bool result = re.Consume(&input_, arg0, arg1, arg2);
+ if (result && should_skip_) ConsumeSkip();
+ return result;
+}
+
+// helper function to consume *skip_ and honour save_comments_
+void Scanner::ConsumeSkip() {
+ const char* start_data = input_.data();
+ while (skip_->Consume(&input_)) {
+ if (!skip_repeat_) {
+ // Only one skip allowed.
+ break;
+ }
+ }
+ if (save_comments_) {
+ if (comments_ == NULL) {
+ comments_ = new vector<StringPiece>;
+ }
+ // already pointing one past end, so no need to +1
+ int length = input_.data() - start_data;
+ if (length > 0) {
+ comments_->push_back(StringPiece(start_data, length));
+ }
+ }
+}
+
+
+void Scanner::GetComments(int start, int end, vector<StringPiece> *ranges) {
+ // short circuit out if we've not yet initialized comments_
+ // (e.g., when save_comments is false)
+ if (!comments_) {
+ return;
+ }
+ // TODO: if we guarantee that comments_ will contain StringPieces
+ // that are ordered by their start, then we can do a binary search
+ // for the first StringPiece at or past start and then scan for the
+ // ones contained in the range, quit early (use equal_range or
+ // lower_bound)
+ for (vector<StringPiece>::const_iterator it = comments_->begin();
+ it != comments_->end(); ++it) {
+ if ((it->data() >= data_.c_str() + start &&
+ it->data() + it->size() <= data_.c_str() + end)) {
+ ranges->push_back(*it);
+ }
+ }
+}
+
+
+void Scanner::GetNextComments(vector<StringPiece> *ranges) {
+ // short circuit out if we've not yet initialized comments_
+ // (e.g., when save_comments is false)
+ if (!comments_) {
+ return;
+ }
+ for (vector<StringPiece>::const_iterator it =
+ comments_->begin() + comments_offset_;
+ it != comments_->end(); ++it) {
+ ranges->push_back(*it);
+ ++comments_offset_;
+ }
+}
+
+} // namespace pcrecpp
diff --git a/src/third_party/pcre-7.4/pcre_scanner.h b/src/third_party/pcre-7.4/pcre_scanner.h
new file mode 100644
index 00000000000..8d2265f8cb2
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_scanner.h
@@ -0,0 +1,172 @@
+// Copyright (c) 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sanjay Ghemawat
+//
+// Regular-expression based scanner for parsing an input stream.
+//
+// Example 1: parse a sequence of "var = number" entries from input:
+//
+// Scanner scanner(input);
+// string var;
+// int number;
+// scanner.SetSkipExpression("\\s+"); // Skip any white space we encounter
+// while (scanner.Consume("(\\w+) = (\\d+)", &var, &number)) {
+// ...;
+// }
+
+#ifndef _PCRE_SCANNER_H
+#define _PCRE_SCANNER_H
+
+#include <assert.h>
+#include <string>
+#include <vector>
+
+#include "pcrecpp.h"
+#include "pcre_stringpiece.h"
+
+namespace pcrecpp {
+
+class Scanner {
+ public:
+ Scanner();
+ explicit Scanner(const std::string& input);
+ ~Scanner();
+
+ // Return current line number. The returned line-number is
+ // one-based. I.e. it returns 1 + the number of consumed newlines.
+ //
+ // Note: this method may be slow. It may take time proportional to
+ // the size of the input.
+ int LineNumber() const;
+
+ // Return the byte-offset that the scanner is looking in the
+ // input data;
+ int Offset() const;
+
+ // Return true iff the start of the remaining input matches "re"
+ bool LookingAt(const RE& re) const;
+
+ // Return true iff all of the following are true
+ // a. the start of the remaining input matches "re",
+ // b. if any arguments are supplied, matched sub-patterns can be
+ // parsed and stored into the arguments.
+ // If it returns true, it skips over the matched input and any
+ // following input that matches the "skip" regular expression.
+ bool Consume(const RE& re,
+ const Arg& arg0 = no_arg,
+ const Arg& arg1 = no_arg,
+ const Arg& arg2 = no_arg
+ // TODO: Allow more arguments?
+ );
+
+ // Set the "skip" regular expression. If after consuming some data,
+ // a prefix of the input matches this RE, it is automatically
+ // skipped. For example, a programming language scanner would use
+ // a skip RE that matches white space and comments.
+ //
+ // scanner.SetSkipExpression("\\s+|//.*|/[*](.|\n)*?[*]/");
+ //
+ // Skipping repeats as long as it succeeds. We used to let people do
+ // this by writing "(...)*" in the regular expression, but that added
+ // up to lots of recursive calls within the pcre library, so now we
+ // control repetition explicitly via the function call API.
+ //
+ // You can pass NULL for "re" if you do not want any data to be skipped.
+ void Skip(const char* re); // DEPRECATED; does *not* repeat
+ void SetSkipExpression(const char* re);
+
+ // Temporarily pause "skip"ing. This
+ // Skip("Foo"); code ; DisableSkip(); code; EnableSkip()
+ // is similar to
+ // Skip("Foo"); code ; Skip(NULL); code ; Skip("Foo");
+ // but avoids creating/deleting new RE objects.
+ void DisableSkip();
+
+ // Reenable previously paused skipping. Any prefix of the input
+ // that matches the skip pattern is immediately dropped.
+ void EnableSkip();
+
+ /***** Special wrappers around SetSkip() for some common idioms *****/
+
+ // Arranges to skip whitespace, C comments, C++ comments.
+ // The overall RE is a disjunction of the following REs:
+ // \\s whitespace
+ // //.*\n C++ comment
+ // /[*](.|\n)*?[*]/ C comment (x*? means minimal repetitions of x)
+ // We get repetition via the semantics of SetSkipExpression, not by using *
+ void SkipCXXComments() {
+ SetSkipExpression("\\s|//.*\n|/[*](?:\n|.)*?[*]/");
+ }
+
+ void set_save_comments(bool comments) {
+ save_comments_ = comments;
+ }
+
+ bool save_comments() {
+ return save_comments_;
+ }
+
+ // Append to vector ranges the comments found in the
+ // byte range [start,end] (inclusive) of the input data.
+ // Only comments that were extracted entirely within that
+ // range are returned: no range splitting of atomically-extracted
+ // comments is performed.
+ void GetComments(int start, int end, std::vector<StringPiece> *ranges);
+
+ // Append to vector ranges the comments added
+ // since the last time this was called. This
+ // functionality is provided for efficiency when
+ // interleaving scanning with parsing.
+ void GetNextComments(std::vector<StringPiece> *ranges);
+
+ private:
+ std::string data_; // All the input data
+ StringPiece input_; // Unprocessed input
+ RE* skip_; // If non-NULL, RE for skipping input
+ bool should_skip_; // If true, use skip_
+ bool skip_repeat_; // If true, repeat skip_ as long as it works
+ bool save_comments_; // If true, aggregate the skip expression
+
+ // the skipped comments
+ // TODO: later consider requiring that the StringPieces be added
+ // in order by their start position
+ std::vector<StringPiece> *comments_;
+
+ // the offset into comments_ that has been returned by GetNextComments
+ int comments_offset_;
+
+ // helper function to consume *skip_ and honour
+ // save_comments_
+ void ConsumeSkip();
+};
+
+} // namespace pcrecpp
+
+#endif /* _PCRE_SCANNER_H */
diff --git a/src/third_party/pcre-7.4/pcre_scanner_unittest.cc b/src/third_party/pcre-7.4/pcre_scanner_unittest.cc
new file mode 100644
index 00000000000..284c8ea99e1
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_scanner_unittest.cc
@@ -0,0 +1,158 @@
+// Copyright (c) 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Greg J. Badros
+//
+// Unittest for scanner, especially GetNextComments and GetComments()
+// functionality.
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+#include <string>
+#include <vector>
+
+#include "pcrecpp.h"
+#include "pcre_stringpiece.h"
+#include "pcre_scanner.h"
+
+#define FLAGS_unittest_stack_size 49152
+
+// Dies with a fatal error if the two values are not equal.
+#define CHECK_EQ(a, b) do { \
+ if ( (a) != (b) ) { \
+ fprintf(stderr, "%s:%d: Check failed because %s != %s\n", \
+ __FILE__, __LINE__, #a, #b); \
+ exit(1); \
+ } \
+} while (0)
+
+using std::vector;
+using pcrecpp::StringPiece;
+using pcrecpp::Scanner;
+
+static void TestScanner() {
+ const char input[] = "\n"
+ "alpha = 1; // this sets alpha\n"
+ "bravo = 2; // bravo is set here\n"
+ "gamma = 33; /* and here is gamma */\n";
+
+ const char *re = "(\\w+) = (\\d+);";
+
+ Scanner s(input);
+ string var;
+ int number;
+ s.SkipCXXComments();
+ s.set_save_comments(true);
+ vector<StringPiece> comments;
+
+ s.Consume(re, &var, &number);
+ CHECK_EQ(var, "alpha");
+ CHECK_EQ(number, 1);
+ CHECK_EQ(s.LineNumber(), 3);
+ s.GetNextComments(&comments);
+ CHECK_EQ(comments.size(), 1);
+ CHECK_EQ(comments[0].as_string(), " // this sets alpha\n");
+ comments.resize(0);
+
+ s.Consume(re, &var, &number);
+ CHECK_EQ(var, "bravo");
+ CHECK_EQ(number, 2);
+ s.GetNextComments(&comments);
+ CHECK_EQ(comments.size(), 1);
+ CHECK_EQ(comments[0].as_string(), " // bravo is set here\n");
+ comments.resize(0);
+
+ s.Consume(re, &var, &number);
+ CHECK_EQ(var, "gamma");
+ CHECK_EQ(number, 33);
+ s.GetNextComments(&comments);
+ CHECK_EQ(comments.size(), 1);
+ CHECK_EQ(comments[0].as_string(), " /* and here is gamma */\n");
+ comments.resize(0);
+
+ s.GetComments(0, sizeof(input), &comments);
+ CHECK_EQ(comments.size(), 3);
+ CHECK_EQ(comments[0].as_string(), " // this sets alpha\n");
+ CHECK_EQ(comments[1].as_string(), " // bravo is set here\n");
+ CHECK_EQ(comments[2].as_string(), " /* and here is gamma */\n");
+ comments.resize(0);
+
+ s.GetComments(0, strchr(input, '/') - input, &comments);
+ CHECK_EQ(comments.size(), 0);
+ comments.resize(0);
+
+ s.GetComments(strchr(input, '/') - input - 1, sizeof(input),
+ &comments);
+ CHECK_EQ(comments.size(), 3);
+ CHECK_EQ(comments[0].as_string(), " // this sets alpha\n");
+ CHECK_EQ(comments[1].as_string(), " // bravo is set here\n");
+ CHECK_EQ(comments[2].as_string(), " /* and here is gamma */\n");
+ comments.resize(0);
+
+ s.GetComments(strchr(input, '/') - input - 1,
+ strchr(input + 1, '\n') - input + 1, &comments);
+ CHECK_EQ(comments.size(), 1);
+ CHECK_EQ(comments[0].as_string(), " // this sets alpha\n");
+ comments.resize(0);
+}
+
+static void TestBigComment() {
+ string input;
+ for (int i = 0; i < 1024; ++i) {
+ char buf[1024]; // definitely big enough
+ sprintf(buf, " # Comment %d\n", i);
+ input += buf;
+ }
+ input += "name = value;\n";
+
+ Scanner s(input.c_str());
+ s.SetSkipExpression("\\s+|#.*\n");
+
+ string name;
+ string value;
+ s.Consume("(\\w+) = (\\w+);", &name, &value);
+ CHECK_EQ(name, "name");
+ CHECK_EQ(value, "value");
+}
+
+// TODO: also test scanner and big-comment in a thread with a
+// small stack size
+
+int main(int argc, char** argv) {
+ TestScanner();
+ TestBigComment();
+
+ // Done
+ printf("OK\n");
+
+ return 0;
+}
diff --git a/src/third_party/pcre-7.4/pcre_stringpiece.cc b/src/third_party/pcre-7.4/pcre_stringpiece.cc
new file mode 100644
index 00000000000..67c0f1fc0e5
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_stringpiece.cc
@@ -0,0 +1,43 @@
+// Copyright (c) 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wilsonh@google.com (Wilson Hsieh)
+//
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+#include "pcrecpp_internal.h"
+#include "pcre_stringpiece.h"
+
+std::ostream& operator<<(std::ostream& o, const pcrecpp::StringPiece& piece) {
+ return (o << piece.as_string());
+}
diff --git a/src/third_party/pcre-7.4/pcre_stringpiece.h b/src/third_party/pcre-7.4/pcre_stringpiece.h
new file mode 100644
index 00000000000..599a3516088
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_stringpiece.h
@@ -0,0 +1,177 @@
+// Copyright (c) 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sanjay Ghemawat
+//
+// A string like object that points into another piece of memory.
+// Useful for providing an interface that allows clients to easily
+// pass in either a "const char*" or a "string".
+//
+// Arghh! I wish C++ literals were automatically of type "string".
+
+#ifndef _PCRE_STRINGPIECE_H
+#define _PCRE_STRINGPIECE_H
+
+#include <string.h>
+#include <string>
+#include <iosfwd> // for ostream forward-declaration
+
+#if 0
+#define HAVE_TYPE_TRAITS
+#include <type_traits.h>
+#elif 0
+#define HAVE_TYPE_TRAITS
+#include <bits/type_traits.h>
+#endif
+
+#include <pcre.h>
+
+using std::string;
+
+namespace pcrecpp {
+
+class PCRECPP_EXP_DEFN StringPiece {
+ private:
+ const char* ptr_;
+ int length_;
+
+ public:
+ // We provide non-explicit singleton constructors so users can pass
+ // in a "const char*" or a "string" wherever a "StringPiece" is
+ // expected.
+ StringPiece()
+ : ptr_(NULL), length_(0) { }
+ StringPiece(const char* str)
+ : ptr_(str), length_(static_cast<int>(strlen(ptr_))) { }
+ StringPiece(const unsigned char* str)
+ : ptr_(reinterpret_cast<const char*>(str)),
+ length_(static_cast<int>(strlen(ptr_))) { }
+ StringPiece(const string& str)
+ : ptr_(str.data()), length_(static_cast<int>(str.size())) { }
+ StringPiece(const char* offset, int len)
+ : ptr_(offset), length_(len) { }
+
+ // data() may return a pointer to a buffer with embedded NULs, and the
+ // returned buffer may or may not be null terminated. Therefore it is
+ // typically a mistake to pass data() to a routine that expects a NUL
+ // terminated string. Use "as_string().c_str()" if you really need to do
+ // this. Or better yet, change your routine so it does not rely on NUL
+ // termination.
+ const char* data() const { return ptr_; }
+ int size() const { return length_; }
+ bool empty() const { return length_ == 0; }
+
+ void clear() { ptr_ = NULL; length_ = 0; }
+ void set(const char* buffer, int len) { ptr_ = buffer; length_ = len; }
+ void set(const char* str) {
+ ptr_ = str;
+ length_ = static_cast<int>(strlen(str));
+ }
+ void set(const void* buffer, int len) {
+ ptr_ = reinterpret_cast<const char*>(buffer);
+ length_ = len;
+ }
+
+ char operator[](int i) const { return ptr_[i]; }
+
+ void remove_prefix(int n) {
+ ptr_ += n;
+ length_ -= n;
+ }
+
+ void remove_suffix(int n) {
+ length_ -= n;
+ }
+
+ bool operator==(const StringPiece& x) const {
+ return ((length_ == x.length_) &&
+ (memcmp(ptr_, x.ptr_, length_) == 0));
+ }
+ bool operator!=(const StringPiece& x) const {
+ return !(*this == x);
+ }
+
+#define STRINGPIECE_BINARY_PREDICATE(cmp,auxcmp) \
+ bool operator cmp (const StringPiece& x) const { \
+ int r = memcmp(ptr_, x.ptr_, length_ < x.length_ ? length_ : x.length_); \
+ return ((r auxcmp 0) || ((r == 0) && (length_ cmp x.length_))); \
+ }
+ STRINGPIECE_BINARY_PREDICATE(<, <);
+ STRINGPIECE_BINARY_PREDICATE(<=, <);
+ STRINGPIECE_BINARY_PREDICATE(>=, >);
+ STRINGPIECE_BINARY_PREDICATE(>, >);
+#undef STRINGPIECE_BINARY_PREDICATE
+
+ int compare(const StringPiece& x) const {
+ int r = memcmp(ptr_, x.ptr_, length_ < x.length_ ? length_ : x.length_);
+ if (r == 0) {
+ if (length_ < x.length_) r = -1;
+ else if (length_ > x.length_) r = +1;
+ }
+ return r;
+ }
+
+ string as_string() const {
+ return string(data(), size());
+ }
+
+ void CopyToString(string* target) const {
+ target->assign(ptr_, length_);
+ }
+
+ // Does "this" start with "x"
+ bool starts_with(const StringPiece& x) const {
+ return ((length_ >= x.length_) && (memcmp(ptr_, x.ptr_, x.length_) == 0));
+ }
+};
+
+} // namespace pcrecpp
+
+// ------------------------------------------------------------------
+// Functions used to create STL containers that use StringPiece
+// Remember that a StringPiece's lifetime had better be less than
+// that of the underlying string or char*. If it is not, then you
+// cannot safely store a StringPiece into an STL container
+// ------------------------------------------------------------------
+
+#ifdef HAVE_TYPE_TRAITS
+// This makes vector<StringPiece> really fast for some STL implementations
+template<> struct __type_traits<pcrecpp::StringPiece> {
+ typedef __true_type has_trivial_default_constructor;
+ typedef __true_type has_trivial_copy_constructor;
+ typedef __true_type has_trivial_assignment_operator;
+ typedef __true_type has_trivial_destructor;
+ typedef __true_type is_POD_type;
+};
+#endif
+
+// allow StringPiece to be logged
+std::ostream& operator<<(std::ostream& o, const pcrecpp::StringPiece& piece);
+
+#endif /* _PCRE_STRINGPIECE_H */
diff --git a/src/third_party/pcre-7.4/pcre_stringpiece.h.in b/src/third_party/pcre-7.4/pcre_stringpiece.h.in
new file mode 100644
index 00000000000..b017661eb4f
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_stringpiece.h.in
@@ -0,0 +1,177 @@
+// Copyright (c) 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sanjay Ghemawat
+//
+// A string like object that points into another piece of memory.
+// Useful for providing an interface that allows clients to easily
+// pass in either a "const char*" or a "string".
+//
+// Arghh! I wish C++ literals were automatically of type "string".
+
+#ifndef _PCRE_STRINGPIECE_H
+#define _PCRE_STRINGPIECE_H
+
+#include <string.h>
+#include <string>
+#include <iosfwd> // for ostream forward-declaration
+
+#if @pcre_have_type_traits@
+#define HAVE_TYPE_TRAITS
+#include <type_traits.h>
+#elif @pcre_have_bits_type_traits@
+#define HAVE_TYPE_TRAITS
+#include <bits/type_traits.h>
+#endif
+
+#include <pcre.h>
+
+using std::string;
+
+namespace pcrecpp {
+
+class PCRECPP_EXP_DEFN StringPiece {
+ private:
+ const char* ptr_;
+ int length_;
+
+ public:
+ // We provide non-explicit singleton constructors so users can pass
+ // in a "const char*" or a "string" wherever a "StringPiece" is
+ // expected.
+ StringPiece()
+ : ptr_(NULL), length_(0) { }
+ StringPiece(const char* str)
+ : ptr_(str), length_(static_cast<int>(strlen(ptr_))) { }
+ StringPiece(const unsigned char* str)
+ : ptr_(reinterpret_cast<const char*>(str)),
+ length_(static_cast<int>(strlen(ptr_))) { }
+ StringPiece(const string& str)
+ : ptr_(str.data()), length_(static_cast<int>(str.size())) { }
+ StringPiece(const char* offset, int len)
+ : ptr_(offset), length_(len) { }
+
+ // data() may return a pointer to a buffer with embedded NULs, and the
+ // returned buffer may or may not be null terminated. Therefore it is
+ // typically a mistake to pass data() to a routine that expects a NUL
+ // terminated string. Use "as_string().c_str()" if you really need to do
+ // this. Or better yet, change your routine so it does not rely on NUL
+ // termination.
+ const char* data() const { return ptr_; }
+ int size() const { return length_; }
+ bool empty() const { return length_ == 0; }
+
+ void clear() { ptr_ = NULL; length_ = 0; }
+ void set(const char* buffer, int len) { ptr_ = buffer; length_ = len; }
+ void set(const char* str) {
+ ptr_ = str;
+ length_ = static_cast<int>(strlen(str));
+ }
+ void set(const void* buffer, int len) {
+ ptr_ = reinterpret_cast<const char*>(buffer);
+ length_ = len;
+ }
+
+ char operator[](int i) const { return ptr_[i]; }
+
+ void remove_prefix(int n) {
+ ptr_ += n;
+ length_ -= n;
+ }
+
+ void remove_suffix(int n) {
+ length_ -= n;
+ }
+
+ bool operator==(const StringPiece& x) const {
+ return ((length_ == x.length_) &&
+ (memcmp(ptr_, x.ptr_, length_) == 0));
+ }
+ bool operator!=(const StringPiece& x) const {
+ return !(*this == x);
+ }
+
+#define STRINGPIECE_BINARY_PREDICATE(cmp,auxcmp) \
+ bool operator cmp (const StringPiece& x) const { \
+ int r = memcmp(ptr_, x.ptr_, length_ < x.length_ ? length_ : x.length_); \
+ return ((r auxcmp 0) || ((r == 0) && (length_ cmp x.length_))); \
+ }
+ STRINGPIECE_BINARY_PREDICATE(<, <);
+ STRINGPIECE_BINARY_PREDICATE(<=, <);
+ STRINGPIECE_BINARY_PREDICATE(>=, >);
+ STRINGPIECE_BINARY_PREDICATE(>, >);
+#undef STRINGPIECE_BINARY_PREDICATE
+
+ int compare(const StringPiece& x) const {
+ int r = memcmp(ptr_, x.ptr_, length_ < x.length_ ? length_ : x.length_);
+ if (r == 0) {
+ if (length_ < x.length_) r = -1;
+ else if (length_ > x.length_) r = +1;
+ }
+ return r;
+ }
+
+ string as_string() const {
+ return string(data(), size());
+ }
+
+ void CopyToString(string* target) const {
+ target->assign(ptr_, length_);
+ }
+
+ // Does "this" start with "x"
+ bool starts_with(const StringPiece& x) const {
+ return ((length_ >= x.length_) && (memcmp(ptr_, x.ptr_, x.length_) == 0));
+ }
+};
+
+} // namespace pcrecpp
+
+// ------------------------------------------------------------------
+// Functions used to create STL containers that use StringPiece
+// Remember that a StringPiece's lifetime had better be less than
+// that of the underlying string or char*. If it is not, then you
+// cannot safely store a StringPiece into an STL container
+// ------------------------------------------------------------------
+
+#ifdef HAVE_TYPE_TRAITS
+// This makes vector<StringPiece> really fast for some STL implementations
+template<> struct __type_traits<pcrecpp::StringPiece> {
+ typedef __true_type has_trivial_default_constructor;
+ typedef __true_type has_trivial_copy_constructor;
+ typedef __true_type has_trivial_assignment_operator;
+ typedef __true_type has_trivial_destructor;
+ typedef __true_type is_POD_type;
+};
+#endif
+
+// allow StringPiece to be logged
+std::ostream& operator<<(std::ostream& o, const pcrecpp::StringPiece& piece);
+
+#endif /* _PCRE_STRINGPIECE_H */
diff --git a/src/third_party/pcre-7.4/pcre_stringpiece_unittest.cc b/src/third_party/pcre-7.4/pcre_stringpiece_unittest.cc
new file mode 100644
index 00000000000..1e821ab6e44
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_stringpiece_unittest.cc
@@ -0,0 +1,151 @@
+// Copyright 2003 and onwards Google Inc.
+// Author: Sanjay Ghemawat
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+#include <map>
+#include <algorithm> // for make_pair
+
+#include "pcrecpp.h"
+#include "pcre_stringpiece.h"
+
+// CHECK dies with a fatal error if condition is not true. It is *not*
+// controlled by NDEBUG, so the check will be executed regardless of
+// compilation mode. Therefore, it is safe to do things like:
+// CHECK(fp->Write(x) == 4)
+#define CHECK(condition) do { \
+ if (!(condition)) { \
+ fprintf(stderr, "%s:%d: Check failed: %s\n", \
+ __FILE__, __LINE__, #condition); \
+ exit(1); \
+ } \
+} while (0)
+
+using std::map;
+using std::make_pair;
+using pcrecpp::StringPiece;
+
+static void CheckSTLComparator() {
+ string s1("foo");
+ string s2("bar");
+ string s3("baz");
+
+ StringPiece p1(s1);
+ StringPiece p2(s2);
+ StringPiece p3(s3);
+
+ typedef map<StringPiece, int> TestMap;
+ TestMap map;
+
+ map.insert(make_pair(p1, 0));
+ map.insert(make_pair(p2, 1));
+ map.insert(make_pair(p3, 2));
+ CHECK(map.size() == 3);
+
+ TestMap::const_iterator iter = map.begin();
+ CHECK(iter->second == 1);
+ ++iter;
+ CHECK(iter->second == 2);
+ ++iter;
+ CHECK(iter->second == 0);
+ ++iter;
+ CHECK(iter == map.end());
+
+ TestMap::iterator new_iter = map.find("zot");
+ CHECK(new_iter == map.end());
+
+ new_iter = map.find("bar");
+ CHECK(new_iter != map.end());
+
+ map.erase(new_iter);
+ CHECK(map.size() == 2);
+
+ iter = map.begin();
+ CHECK(iter->second == 2);
+ ++iter;
+ CHECK(iter->second == 0);
+ ++iter;
+ CHECK(iter == map.end());
+}
+
+static void CheckComparisonOperators() {
+#define CMP_Y(op, x, y) \
+ CHECK( (StringPiece((x)) op StringPiece((y)))); \
+ CHECK( (StringPiece((x)).compare(StringPiece((y))) op 0))
+
+#define CMP_N(op, x, y) \
+ CHECK(!(StringPiece((x)) op StringPiece((y)))); \
+ CHECK(!(StringPiece((x)).compare(StringPiece((y))) op 0))
+
+ CMP_Y(==, "", "");
+ CMP_Y(==, "a", "a");
+ CMP_Y(==, "aa", "aa");
+ CMP_N(==, "a", "");
+ CMP_N(==, "", "a");
+ CMP_N(==, "a", "b");
+ CMP_N(==, "a", "aa");
+ CMP_N(==, "aa", "a");
+
+ CMP_N(!=, "", "");
+ CMP_N(!=, "a", "a");
+ CMP_N(!=, "aa", "aa");
+ CMP_Y(!=, "a", "");
+ CMP_Y(!=, "", "a");
+ CMP_Y(!=, "a", "b");
+ CMP_Y(!=, "a", "aa");
+ CMP_Y(!=, "aa", "a");
+
+ CMP_Y(<, "a", "b");
+ CMP_Y(<, "a", "aa");
+ CMP_Y(<, "aa", "b");
+ CMP_Y(<, "aa", "bb");
+ CMP_N(<, "a", "a");
+ CMP_N(<, "b", "a");
+ CMP_N(<, "aa", "a");
+ CMP_N(<, "b", "aa");
+ CMP_N(<, "bb", "aa");
+
+ CMP_Y(<=, "a", "a");
+ CMP_Y(<=, "a", "b");
+ CMP_Y(<=, "a", "aa");
+ CMP_Y(<=, "aa", "b");
+ CMP_Y(<=, "aa", "bb");
+ CMP_N(<=, "b", "a");
+ CMP_N(<=, "aa", "a");
+ CMP_N(<=, "b", "aa");
+ CMP_N(<=, "bb", "aa");
+
+ CMP_N(>=, "a", "b");
+ CMP_N(>=, "a", "aa");
+ CMP_N(>=, "aa", "b");
+ CMP_N(>=, "aa", "bb");
+ CMP_Y(>=, "a", "a");
+ CMP_Y(>=, "b", "a");
+ CMP_Y(>=, "aa", "a");
+ CMP_Y(>=, "b", "aa");
+ CMP_Y(>=, "bb", "aa");
+
+ CMP_N(>, "a", "a");
+ CMP_N(>, "a", "b");
+ CMP_N(>, "a", "aa");
+ CMP_N(>, "aa", "b");
+ CMP_N(>, "aa", "bb");
+ CMP_Y(>, "b", "a");
+ CMP_Y(>, "aa", "a");
+ CMP_Y(>, "b", "aa");
+ CMP_Y(>, "bb", "aa");
+
+#undef CMP_Y
+#undef CMP_N
+}
+
+int main(int argc, char** argv) {
+ CheckComparisonOperators();
+ CheckSTLComparator();
+
+ printf("OK\n");
+ return 0;
+}
diff --git a/src/third_party/pcre-7.4/pcre_study.c b/src/third_party/pcre-7.4/pcre_study.c
new file mode 100644
index 00000000000..1c283848b13
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_study.c
@@ -0,0 +1,579 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains the external function pcre_study(), along with local
+supporting functions. */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+
+/* Returns from set_start_bits() */
+
+enum { SSB_FAIL, SSB_DONE, SSB_CONTINUE };
+
+
+/*************************************************
+* Set a bit and maybe its alternate case *
+*************************************************/
+
+/* Given a character, set its bit in the table, and also the bit for the other
+version of a letter if we are caseless.
+
+Arguments:
+ start_bits points to the bit map
+ c is the character
+ caseless the caseless flag
+ cd the block with char table pointers
+
+Returns: nothing
+*/
+
+static void
+set_bit(uschar *start_bits, unsigned int c, BOOL caseless, compile_data *cd)
+{
+start_bits[c/8] |= (1 << (c&7));
+if (caseless && (cd->ctypes[c] & ctype_letter) != 0)
+ start_bits[cd->fcc[c]/8] |= (1 << (cd->fcc[c]&7));
+}
+
+
+
+/*************************************************
+* Create bitmap of starting bytes *
+*************************************************/
+
+/* This function scans a compiled unanchored expression recursively and
+attempts to build a bitmap of the set of possible starting bytes. As time goes
+by, we may be able to get more clever at doing this. The SSB_CONTINUE return is
+useful for parenthesized groups in patterns such as (a*)b where the group
+provides some optional starting bytes but scanning must continue at the outer
+level to find at least one mandatory byte. At the outermost level, this
+function fails unless the result is SSB_DONE.
+
+Arguments:
+ code points to an expression
+ start_bits points to a 32-byte table, initialized to 0
+ caseless the current state of the caseless flag
+ utf8 TRUE if in UTF-8 mode
+ cd the block with char table pointers
+
+Returns: SSB_FAIL => Failed to find any starting bytes
+ SSB_DONE => Found mandatory starting bytes
+ SSB_CONTINUE => Found optional starting bytes
+*/
+
+static int
+set_start_bits(const uschar *code, uschar *start_bits, BOOL caseless,
+ BOOL utf8, compile_data *cd)
+{
+register int c;
+int yield = SSB_DONE;
+
+#if 0
+/* ========================================================================= */
+/* The following comment and code was inserted in January 1999. In May 2006,
+when it was observed to cause compiler warnings about unused values, I took it
+out again. If anybody is still using OS/2, they will have to put it back
+manually. */
+
+/* This next statement and the later reference to dummy are here in order to
+trick the optimizer of the IBM C compiler for OS/2 into generating correct
+code. Apparently IBM isn't going to fix the problem, and we would rather not
+disable optimization (in this module it actually makes a big difference, and
+the pcre module can use all the optimization it can get). */
+
+volatile int dummy;
+/* ========================================================================= */
+#endif
+
+do
+ {
+ const uschar *tcode = code + (((int)*code == OP_CBRA)? 3:1) + LINK_SIZE;
+ BOOL try_next = TRUE;
+
+ while (try_next) /* Loop for items in this branch */
+ {
+ int rc;
+ switch(*tcode)
+ {
+ /* Fail if we reach something we don't understand */
+
+ default:
+ return SSB_FAIL;
+
+ /* If we hit a bracket or a positive lookahead assertion, recurse to set
+ bits from within the subpattern. If it can't find anything, we have to
+ give up. If it finds some mandatory character(s), we are done for this
+ branch. Otherwise, carry on scanning after the subpattern. */
+
+ case OP_BRA:
+ case OP_SBRA:
+ case OP_CBRA:
+ case OP_SCBRA:
+ case OP_ONCE:
+ case OP_ASSERT:
+ rc = set_start_bits(tcode, start_bits, caseless, utf8, cd);
+ if (rc == SSB_FAIL) return SSB_FAIL;
+ if (rc == SSB_DONE) try_next = FALSE; else
+ {
+ do tcode += GET(tcode, 1); while (*tcode == OP_ALT);
+ tcode += 1 + LINK_SIZE;
+ }
+ break;
+
+ /* If we hit ALT or KET, it means we haven't found anything mandatory in
+ this branch, though we might have found something optional. For ALT, we
+ continue with the next alternative, but we have to arrange that the final
+ result from subpattern is SSB_CONTINUE rather than SSB_DONE. For KET,
+ return SSB_CONTINUE: if this is the top level, that indicates failure,
+ but after a nested subpattern, it causes scanning to continue. */
+
+ case OP_ALT:
+ yield = SSB_CONTINUE;
+ try_next = FALSE;
+ break;
+
+ case OP_KET:
+ case OP_KETRMAX:
+ case OP_KETRMIN:
+ return SSB_CONTINUE;
+
+ /* Skip over callout */
+
+ case OP_CALLOUT:
+ tcode += 2 + 2*LINK_SIZE;
+ break;
+
+ /* Skip over lookbehind and negative lookahead assertions */
+
+ case OP_ASSERT_NOT:
+ case OP_ASSERTBACK:
+ case OP_ASSERTBACK_NOT:
+ do tcode += GET(tcode, 1); while (*tcode == OP_ALT);
+ tcode += 1 + LINK_SIZE;
+ break;
+
+ /* Skip over an option setting, changing the caseless flag */
+
+ case OP_OPT:
+ caseless = (tcode[1] & PCRE_CASELESS) != 0;
+ tcode += 2;
+ break;
+
+ /* BRAZERO does the bracket, but carries on. */
+
+ case OP_BRAZERO:
+ case OP_BRAMINZERO:
+ if (set_start_bits(++tcode, start_bits, caseless, utf8, cd) == SSB_FAIL)
+ return SSB_FAIL;
+/* =========================================================================
+ See the comment at the head of this function concerning the next line,
+ which was an old fudge for the benefit of OS/2.
+ dummy = 1;
+ ========================================================================= */
+ do tcode += GET(tcode,1); while (*tcode == OP_ALT);
+ tcode += 1 + LINK_SIZE;
+ break;
+
+ /* Single-char * or ? sets the bit and tries the next item */
+
+ case OP_STAR:
+ case OP_MINSTAR:
+ case OP_POSSTAR:
+ case OP_QUERY:
+ case OP_MINQUERY:
+ case OP_POSQUERY:
+ set_bit(start_bits, tcode[1], caseless, cd);
+ tcode += 2;
+#ifdef SUPPORT_UTF8
+ if (utf8 && tcode[-1] >= 0xc0)
+ tcode += _pcre_utf8_table4[tcode[-1] & 0x3f];
+#endif
+ break;
+
+ /* Single-char upto sets the bit and tries the next */
+
+ case OP_UPTO:
+ case OP_MINUPTO:
+ case OP_POSUPTO:
+ set_bit(start_bits, tcode[3], caseless, cd);
+ tcode += 4;
+#ifdef SUPPORT_UTF8
+ if (utf8 && tcode[-1] >= 0xc0)
+ tcode += _pcre_utf8_table4[tcode[-1] & 0x3f];
+#endif
+ break;
+
+ /* At least one single char sets the bit and stops */
+
+ case OP_EXACT: /* Fall through */
+ tcode += 2;
+
+ case OP_CHAR:
+ case OP_CHARNC:
+ case OP_PLUS:
+ case OP_MINPLUS:
+ case OP_POSPLUS:
+ set_bit(start_bits, tcode[1], caseless, cd);
+ try_next = FALSE;
+ break;
+
+ /* Single character type sets the bits and stops */
+
+ case OP_NOT_DIGIT:
+ for (c = 0; c < 32; c++)
+ start_bits[c] |= ~cd->cbits[c+cbit_digit];
+ try_next = FALSE;
+ break;
+
+ case OP_DIGIT:
+ for (c = 0; c < 32; c++)
+ start_bits[c] |= cd->cbits[c+cbit_digit];
+ try_next = FALSE;
+ break;
+
+ /* The cbit_space table has vertical tab as whitespace; we have to
+ discard it. */
+
+ case OP_NOT_WHITESPACE:
+ for (c = 0; c < 32; c++)
+ {
+ int d = cd->cbits[c+cbit_space];
+ if (c == 1) d &= ~0x08;
+ start_bits[c] |= ~d;
+ }
+ try_next = FALSE;
+ break;
+
+ /* The cbit_space table has vertical tab as whitespace; we have to
+ discard it. */
+
+ case OP_WHITESPACE:
+ for (c = 0; c < 32; c++)
+ {
+ int d = cd->cbits[c+cbit_space];
+ if (c == 1) d &= ~0x08;
+ start_bits[c] |= d;
+ }
+ try_next = FALSE;
+ break;
+
+ case OP_NOT_WORDCHAR:
+ for (c = 0; c < 32; c++)
+ start_bits[c] |= ~cd->cbits[c+cbit_word];
+ try_next = FALSE;
+ break;
+
+ case OP_WORDCHAR:
+ for (c = 0; c < 32; c++)
+ start_bits[c] |= cd->cbits[c+cbit_word];
+ try_next = FALSE;
+ break;
+
+ /* One or more character type fudges the pointer and restarts, knowing
+ it will hit a single character type and stop there. */
+
+ case OP_TYPEPLUS:
+ case OP_TYPEMINPLUS:
+ tcode++;
+ break;
+
+ case OP_TYPEEXACT:
+ tcode += 3;
+ break;
+
+ /* Zero or more repeats of character types set the bits and then
+ try again. */
+
+ case OP_TYPEUPTO:
+ case OP_TYPEMINUPTO:
+ case OP_TYPEPOSUPTO:
+ tcode += 2; /* Fall through */
+
+ case OP_TYPESTAR:
+ case OP_TYPEMINSTAR:
+ case OP_TYPEPOSSTAR:
+ case OP_TYPEQUERY:
+ case OP_TYPEMINQUERY:
+ case OP_TYPEPOSQUERY:
+ switch(tcode[1])
+ {
+ case OP_ANY:
+ return SSB_FAIL;
+
+ case OP_NOT_DIGIT:
+ for (c = 0; c < 32; c++)
+ start_bits[c] |= ~cd->cbits[c+cbit_digit];
+ break;
+
+ case OP_DIGIT:
+ for (c = 0; c < 32; c++)
+ start_bits[c] |= cd->cbits[c+cbit_digit];
+ break;
+
+ /* The cbit_space table has vertical tab as whitespace; we have to
+ discard it. */
+
+ case OP_NOT_WHITESPACE:
+ for (c = 0; c < 32; c++)
+ {
+ int d = cd->cbits[c+cbit_space];
+ if (c == 1) d &= ~0x08;
+ start_bits[c] |= ~d;
+ }
+ break;
+
+ /* The cbit_space table has vertical tab as whitespace; we have to
+ discard it. */
+
+ case OP_WHITESPACE:
+ for (c = 0; c < 32; c++)
+ {
+ int d = cd->cbits[c+cbit_space];
+ if (c == 1) d &= ~0x08;
+ start_bits[c] |= d;
+ }
+ break;
+
+ case OP_NOT_WORDCHAR:
+ for (c = 0; c < 32; c++)
+ start_bits[c] |= ~cd->cbits[c+cbit_word];
+ break;
+
+ case OP_WORDCHAR:
+ for (c = 0; c < 32; c++)
+ start_bits[c] |= cd->cbits[c+cbit_word];
+ break;
+ }
+
+ tcode += 2;
+ break;
+
+ /* Character class where all the information is in a bit map: set the
+ bits and either carry on or not, according to the repeat count. If it was
+ a negative class, and we are operating with UTF-8 characters, any byte
+ with a value >= 0xc4 is a potentially valid starter because it starts a
+ character with a value > 255. */
+
+ case OP_NCLASS:
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ {
+ start_bits[24] |= 0xf0; /* Bits for 0xc4 - 0xc8 */
+ memset(start_bits+25, 0xff, 7); /* Bits for 0xc9 - 0xff */
+ }
+#endif
+ /* Fall through */
+
+ case OP_CLASS:
+ {
+ tcode++;
+
+ /* In UTF-8 mode, the bits in a bit map correspond to character
+ values, not to byte values. However, the bit map we are constructing is
+ for byte values. So we have to do a conversion for characters whose
+ value is > 127. In fact, there are only two possible starting bytes for
+ characters in the range 128 - 255. */
+
+#ifdef SUPPORT_UTF8
+ if (utf8)
+ {
+ for (c = 0; c < 16; c++) start_bits[c] |= tcode[c];
+ for (c = 128; c < 256; c++)
+ {
+ if ((tcode[c/8] && (1 << (c&7))) != 0)
+ {
+ int d = (c >> 6) | 0xc0; /* Set bit for this starter */
+ start_bits[d/8] |= (1 << (d&7)); /* and then skip on to the */
+ c = (c & 0xc0) + 0x40 - 1; /* next relevant character. */
+ }
+ }
+ }
+
+ /* In non-UTF-8 mode, the two bit maps are completely compatible. */
+
+ else
+#endif
+ {
+ for (c = 0; c < 32; c++) start_bits[c] |= tcode[c];
+ }
+
+ /* Advance past the bit map, and act on what follows */
+
+ tcode += 32;
+ switch (*tcode)
+ {
+ case OP_CRSTAR:
+ case OP_CRMINSTAR:
+ case OP_CRQUERY:
+ case OP_CRMINQUERY:
+ tcode++;
+ break;
+
+ case OP_CRRANGE:
+ case OP_CRMINRANGE:
+ if (((tcode[1] << 8) + tcode[2]) == 0) tcode += 5;
+ else try_next = FALSE;
+ break;
+
+ default:
+ try_next = FALSE;
+ break;
+ }
+ }
+ break; /* End of bitmap class handling */
+
+ } /* End of switch */
+ } /* End of try_next loop */
+
+ code += GET(code, 1); /* Advance to next branch */
+ }
+while (*code == OP_ALT);
+return yield;
+}
+
+
+
+/*************************************************
+* Study a compiled expression *
+*************************************************/
+
+/* This function is handed a compiled expression that it must study to produce
+information that will speed up the matching. It returns a pcre_extra block
+which then gets handed back to pcre_exec().
+
+Arguments:
+ re points to the compiled expression
+ options contains option bits
+ errorptr points to where to place error messages;
+ set NULL unless error
+
+Returns: pointer to a pcre_extra block, with study_data filled in and the
+ appropriate flag set;
+ NULL on error or if no optimization possible
+*/
+
+PCRE_EXP_DEFN pcre_extra *
+pcre_study(const pcre *external_re, int options, const char **errorptr)
+{
+uschar start_bits[32];
+pcre_extra *extra;
+pcre_study_data *study;
+const uschar *tables;
+uschar *code;
+compile_data compile_block;
+const real_pcre *re = (const real_pcre *)external_re;
+
+*errorptr = NULL;
+
+if (re == NULL || re->magic_number != MAGIC_NUMBER)
+ {
+ *errorptr = "argument is not a compiled regular expression";
+ return NULL;
+ }
+
+if ((options & ~PUBLIC_STUDY_OPTIONS) != 0)
+ {
+ *errorptr = "unknown or incorrect option bit(s) set";
+ return NULL;
+ }
+
+code = (uschar *)re + re->name_table_offset +
+ (re->name_count * re->name_entry_size);
+
+/* For an anchored pattern, or an unanchored pattern that has a first char, or
+a multiline pattern that matches only at "line starts", no further processing
+at present. */
+
+if ((re->options & PCRE_ANCHORED) != 0 ||
+ (re->flags & (PCRE_FIRSTSET|PCRE_STARTLINE)) != 0)
+ return NULL;
+
+/* Set the character tables in the block that is passed around */
+
+tables = re->tables;
+if (tables == NULL)
+ (void)pcre_fullinfo(external_re, NULL, PCRE_INFO_DEFAULT_TABLES,
+ (void *)(&tables));
+
+compile_block.lcc = tables + lcc_offset;
+compile_block.fcc = tables + fcc_offset;
+compile_block.cbits = tables + cbits_offset;
+compile_block.ctypes = tables + ctypes_offset;
+
+/* See if we can find a fixed set of initial characters for the pattern. */
+
+memset(start_bits, 0, 32 * sizeof(uschar));
+if (set_start_bits(code, start_bits, (re->options & PCRE_CASELESS) != 0,
+ (re->options & PCRE_UTF8) != 0, &compile_block) != SSB_DONE) return NULL;
+
+/* Get a pcre_extra block and a pcre_study_data block. The study data is put in
+the latter, which is pointed to by the former, which may also get additional
+data set later by the calling program. At the moment, the size of
+pcre_study_data is fixed. We nevertheless save it in a field for returning via
+the pcre_fullinfo() function so that if it becomes variable in the future, we
+don't have to change that code. */
+
+extra = (pcre_extra *)(pcre_malloc)
+ (sizeof(pcre_extra) + sizeof(pcre_study_data));
+
+if (extra == NULL)
+ {
+ *errorptr = "failed to get memory";
+ return NULL;
+ }
+
+study = (pcre_study_data *)((char *)extra + sizeof(pcre_extra));
+extra->flags = PCRE_EXTRA_STUDY_DATA;
+extra->study_data = study;
+
+study->size = sizeof(pcre_study_data);
+study->options = PCRE_STUDY_MAPPED;
+memcpy(study->start_bits, start_bits, sizeof(start_bits));
+
+return extra;
+}
+
+/* End of pcre_study.c */
diff --git a/src/third_party/pcre-7.4/pcre_tables.c b/src/third_party/pcre-7.4/pcre_tables.c
new file mode 100644
index 00000000000..4b14fd1befa
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_tables.c
@@ -0,0 +1,318 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains some fixed tables that are used by more than one of the
+PCRE code modules. The tables are also #included by the pcretest program, which
+uses macros to change their names from _pcre_xxx to xxxx, thereby avoiding name
+clashes with the library. */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+
+/* Table of sizes for the fixed-length opcodes. It's defined in a macro so that
+the definition is next to the definition of the opcodes in pcre_internal.h. */
+
+const uschar _pcre_OP_lengths[] = { OP_LENGTHS };
+
+
+
+/*************************************************
+* Tables for UTF-8 support *
+*************************************************/
+
+/* These are the breakpoints for different numbers of bytes in a UTF-8
+character. */
+
+#ifdef SUPPORT_UTF8
+
+const int _pcre_utf8_table1[] =
+ { 0x7f, 0x7ff, 0xffff, 0x1fffff, 0x3ffffff, 0x7fffffff};
+
+const int _pcre_utf8_table1_size = sizeof(_pcre_utf8_table1)/sizeof(int);
+
+/* These are the indicator bits and the mask for the data bits to set in the
+first byte of a character, indexed by the number of additional bytes. */
+
+const int _pcre_utf8_table2[] = { 0, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc};
+const int _pcre_utf8_table3[] = { 0xff, 0x1f, 0x0f, 0x07, 0x03, 0x01};
+
+/* Table of the number of extra bytes, indexed by the first byte masked with
+0x3f. The highest number for a valid UTF-8 first byte is in fact 0x3d. */
+
+const uschar _pcre_utf8_table4[] = {
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 };
+
+/* The pcre_utt[] table below translates Unicode property names into type and
+code values. It is searched by binary chop, so must be in collating sequence of
+name. Originally, the table contained pointers to the name strings in the first
+field of each entry. However, that leads to a large number of relocations when
+a shared library is dynamically loaded. A significant reduction is made by
+putting all the names into a single, large string and then using offsets in the
+table itself. Maintenance is more error-prone, but frequent changes to this
+data is unlikely. */
+
+const char _pcre_utt_names[] =
+ "Any\0"
+ "Arabic\0"
+ "Armenian\0"
+ "Balinese\0"
+ "Bengali\0"
+ "Bopomofo\0"
+ "Braille\0"
+ "Buginese\0"
+ "Buhid\0"
+ "C\0"
+ "Canadian_Aboriginal\0"
+ "Cc\0"
+ "Cf\0"
+ "Cherokee\0"
+ "Cn\0"
+ "Co\0"
+ "Common\0"
+ "Coptic\0"
+ "Cs\0"
+ "Cuneiform\0"
+ "Cypriot\0"
+ "Cyrillic\0"
+ "Deseret\0"
+ "Devanagari\0"
+ "Ethiopic\0"
+ "Georgian\0"
+ "Glagolitic\0"
+ "Gothic\0"
+ "Greek\0"
+ "Gujarati\0"
+ "Gurmukhi\0"
+ "Han\0"
+ "Hangul\0"
+ "Hanunoo\0"
+ "Hebrew\0"
+ "Hiragana\0"
+ "Inherited\0"
+ "Kannada\0"
+ "Katakana\0"
+ "Kharoshthi\0"
+ "Khmer\0"
+ "L\0"
+ "L&\0"
+ "Lao\0"
+ "Latin\0"
+ "Limbu\0"
+ "Linear_B\0"
+ "Ll\0"
+ "Lm\0"
+ "Lo\0"
+ "Lt\0"
+ "Lu\0"
+ "M\0"
+ "Malayalam\0"
+ "Mc\0"
+ "Me\0"
+ "Mn\0"
+ "Mongolian\0"
+ "Myanmar\0"
+ "N\0"
+ "Nd\0"
+ "New_Tai_Lue\0"
+ "Nko\0"
+ "Nl\0"
+ "No\0"
+ "Ogham\0"
+ "Old_Italic\0"
+ "Old_Persian\0"
+ "Oriya\0"
+ "Osmanya\0"
+ "P\0"
+ "Pc\0"
+ "Pd\0"
+ "Pe\0"
+ "Pf\0"
+ "Phags_Pa\0"
+ "Phoenician\0"
+ "Pi\0"
+ "Po\0"
+ "Ps\0"
+ "Runic\0"
+ "S\0"
+ "Sc\0"
+ "Shavian\0"
+ "Sinhala\0"
+ "Sk\0"
+ "Sm\0"
+ "So\0"
+ "Syloti_Nagri\0"
+ "Syriac\0"
+ "Tagalog\0"
+ "Tagbanwa\0"
+ "Tai_Le\0"
+ "Tamil\0"
+ "Telugu\0"
+ "Thaana\0"
+ "Thai\0"
+ "Tibetan\0"
+ "Tifinagh\0"
+ "Ugaritic\0"
+ "Yi\0"
+ "Z\0"
+ "Zl\0"
+ "Zp\0"
+ "Zs\0";
+
+const ucp_type_table _pcre_utt[] = {
+ { 0, PT_ANY, 0 },
+ { 4, PT_SC, ucp_Arabic },
+ { 11, PT_SC, ucp_Armenian },
+ { 20, PT_SC, ucp_Balinese },
+ { 29, PT_SC, ucp_Bengali },
+ { 37, PT_SC, ucp_Bopomofo },
+ { 46, PT_SC, ucp_Braille },
+ { 54, PT_SC, ucp_Buginese },
+ { 63, PT_SC, ucp_Buhid },
+ { 69, PT_GC, ucp_C },
+ { 71, PT_SC, ucp_Canadian_Aboriginal },
+ { 91, PT_PC, ucp_Cc },
+ { 94, PT_PC, ucp_Cf },
+ { 97, PT_SC, ucp_Cherokee },
+ { 106, PT_PC, ucp_Cn },
+ { 109, PT_PC, ucp_Co },
+ { 112, PT_SC, ucp_Common },
+ { 119, PT_SC, ucp_Coptic },
+ { 126, PT_PC, ucp_Cs },
+ { 129, PT_SC, ucp_Cuneiform },
+ { 139, PT_SC, ucp_Cypriot },
+ { 147, PT_SC, ucp_Cyrillic },
+ { 156, PT_SC, ucp_Deseret },
+ { 164, PT_SC, ucp_Devanagari },
+ { 175, PT_SC, ucp_Ethiopic },
+ { 184, PT_SC, ucp_Georgian },
+ { 193, PT_SC, ucp_Glagolitic },
+ { 204, PT_SC, ucp_Gothic },
+ { 211, PT_SC, ucp_Greek },
+ { 217, PT_SC, ucp_Gujarati },
+ { 226, PT_SC, ucp_Gurmukhi },
+ { 235, PT_SC, ucp_Han },
+ { 239, PT_SC, ucp_Hangul },
+ { 246, PT_SC, ucp_Hanunoo },
+ { 254, PT_SC, ucp_Hebrew },
+ { 261, PT_SC, ucp_Hiragana },
+ { 270, PT_SC, ucp_Inherited },
+ { 280, PT_SC, ucp_Kannada },
+ { 288, PT_SC, ucp_Katakana },
+ { 297, PT_SC, ucp_Kharoshthi },
+ { 308, PT_SC, ucp_Khmer },
+ { 314, PT_GC, ucp_L },
+ { 316, PT_LAMP, 0 },
+ { 319, PT_SC, ucp_Lao },
+ { 323, PT_SC, ucp_Latin },
+ { 329, PT_SC, ucp_Limbu },
+ { 335, PT_SC, ucp_Linear_B },
+ { 344, PT_PC, ucp_Ll },
+ { 347, PT_PC, ucp_Lm },
+ { 350, PT_PC, ucp_Lo },
+ { 353, PT_PC, ucp_Lt },
+ { 356, PT_PC, ucp_Lu },
+ { 359, PT_GC, ucp_M },
+ { 361, PT_SC, ucp_Malayalam },
+ { 371, PT_PC, ucp_Mc },
+ { 374, PT_PC, ucp_Me },
+ { 377, PT_PC, ucp_Mn },
+ { 380, PT_SC, ucp_Mongolian },
+ { 390, PT_SC, ucp_Myanmar },
+ { 398, PT_GC, ucp_N },
+ { 400, PT_PC, ucp_Nd },
+ { 403, PT_SC, ucp_New_Tai_Lue },
+ { 415, PT_SC, ucp_Nko },
+ { 419, PT_PC, ucp_Nl },
+ { 422, PT_PC, ucp_No },
+ { 425, PT_SC, ucp_Ogham },
+ { 431, PT_SC, ucp_Old_Italic },
+ { 442, PT_SC, ucp_Old_Persian },
+ { 454, PT_SC, ucp_Oriya },
+ { 460, PT_SC, ucp_Osmanya },
+ { 468, PT_GC, ucp_P },
+ { 470, PT_PC, ucp_Pc },
+ { 473, PT_PC, ucp_Pd },
+ { 476, PT_PC, ucp_Pe },
+ { 479, PT_PC, ucp_Pf },
+ { 482, PT_SC, ucp_Phags_Pa },
+ { 491, PT_SC, ucp_Phoenician },
+ { 502, PT_PC, ucp_Pi },
+ { 505, PT_PC, ucp_Po },
+ { 508, PT_PC, ucp_Ps },
+ { 511, PT_SC, ucp_Runic },
+ { 517, PT_GC, ucp_S },
+ { 519, PT_PC, ucp_Sc },
+ { 522, PT_SC, ucp_Shavian },
+ { 530, PT_SC, ucp_Sinhala },
+ { 538, PT_PC, ucp_Sk },
+ { 541, PT_PC, ucp_Sm },
+ { 544, PT_PC, ucp_So },
+ { 547, PT_SC, ucp_Syloti_Nagri },
+ { 560, PT_SC, ucp_Syriac },
+ { 567, PT_SC, ucp_Tagalog },
+ { 575, PT_SC, ucp_Tagbanwa },
+ { 584, PT_SC, ucp_Tai_Le },
+ { 591, PT_SC, ucp_Tamil },
+ { 597, PT_SC, ucp_Telugu },
+ { 604, PT_SC, ucp_Thaana },
+ { 611, PT_SC, ucp_Thai },
+ { 616, PT_SC, ucp_Tibetan },
+ { 624, PT_SC, ucp_Tifinagh },
+ { 633, PT_SC, ucp_Ugaritic },
+ { 642, PT_SC, ucp_Yi },
+ { 645, PT_GC, ucp_Z },
+ { 647, PT_PC, ucp_Zl },
+ { 650, PT_PC, ucp_Zp },
+ { 653, PT_PC, ucp_Zs }
+};
+
+const int _pcre_utt_size = sizeof(_pcre_utt)/sizeof(ucp_type_table);
+
+#endif /* SUPPORT_UTF8 */
+
+/* End of pcre_tables.c */
diff --git a/src/third_party/pcre-7.4/pcre_try_flipped.c b/src/third_party/pcre-7.4/pcre_try_flipped.c
new file mode 100644
index 00000000000..412902bbbeb
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_try_flipped.c
@@ -0,0 +1,137 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains an internal function that tests a compiled pattern to
+see if it was compiled with the opposite endianness. If so, it uses an
+auxiliary local function to flip the appropriate bytes. */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+
+/*************************************************
+* Flip bytes in an integer *
+*************************************************/
+
+/* This function is called when the magic number in a regex doesn't match, in
+order to flip its bytes to see if we are dealing with a pattern that was
+compiled on a host of different endianness. If so, this function is used to
+flip other byte values.
+
+Arguments:
+ value the number to flip
+ n the number of bytes to flip (assumed to be 2 or 4)
+
+Returns: the flipped value
+*/
+
+static unsigned long int
+byteflip(unsigned long int value, int n)
+{
+if (n == 2) return ((value & 0x00ff) << 8) | ((value & 0xff00) >> 8);
+return ((value & 0x000000ff) << 24) |
+ ((value & 0x0000ff00) << 8) |
+ ((value & 0x00ff0000) >> 8) |
+ ((value & 0xff000000) >> 24);
+}
+
+
+
+/*************************************************
+* Test for a byte-flipped compiled regex *
+*************************************************/
+
+/* This function is called from pcre_exec(), pcre_dfa_exec(), and also from
+pcre_fullinfo(). Its job is to test whether the regex is byte-flipped - that
+is, it was compiled on a system of opposite endianness. The function is called
+only when the native MAGIC_NUMBER test fails. If the regex is indeed flipped,
+we flip all the relevant values into a different data block, and return it.
+
+Arguments:
+ re points to the regex
+ study points to study data, or NULL
+ internal_re points to a new regex block
+ internal_study points to a new study block
+
+Returns: the new block if is is indeed a byte-flipped regex
+ NULL if it is not
+*/
+
+real_pcre *
+_pcre_try_flipped(const real_pcre *re, real_pcre *internal_re,
+ const pcre_study_data *study, pcre_study_data *internal_study)
+{
+if (byteflip(re->magic_number, sizeof(re->magic_number)) != MAGIC_NUMBER)
+ return NULL;
+
+*internal_re = *re; /* To copy other fields */
+internal_re->size = byteflip(re->size, sizeof(re->size));
+internal_re->options = byteflip(re->options, sizeof(re->options));
+internal_re->flags = (pcre_uint16)byteflip(re->flags, sizeof(re->flags));
+internal_re->top_bracket =
+ (pcre_uint16)byteflip(re->top_bracket, sizeof(re->top_bracket));
+internal_re->top_backref =
+ (pcre_uint16)byteflip(re->top_backref, sizeof(re->top_backref));
+internal_re->first_byte =
+ (pcre_uint16)byteflip(re->first_byte, sizeof(re->first_byte));
+internal_re->req_byte =
+ (pcre_uint16)byteflip(re->req_byte, sizeof(re->req_byte));
+internal_re->name_table_offset =
+ (pcre_uint16)byteflip(re->name_table_offset, sizeof(re->name_table_offset));
+internal_re->name_entry_size =
+ (pcre_uint16)byteflip(re->name_entry_size, sizeof(re->name_entry_size));
+internal_re->name_count =
+ (pcre_uint16)byteflip(re->name_count, sizeof(re->name_count));
+
+if (study != NULL)
+ {
+ *internal_study = *study; /* To copy other fields */
+ internal_study->size = byteflip(study->size, sizeof(study->size));
+ internal_study->options = byteflip(study->options, sizeof(study->options));
+ }
+
+return internal_re;
+}
+
+/* End of pcre_tryflipped.c */
diff --git a/src/third_party/pcre-7.4/pcre_ucp_searchfuncs.c b/src/third_party/pcre-7.4/pcre_ucp_searchfuncs.c
new file mode 100644
index 00000000000..316163e655a
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_ucp_searchfuncs.c
@@ -0,0 +1,179 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains code for searching the table of Unicode character
+properties. */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+#include "ucp.h" /* Category definitions */
+#include "ucpinternal.h" /* Internal table details */
+#include "ucptable.h" /* The table itself */
+
+
+/* Table to translate from particular type value to the general value. */
+
+static const int ucp_gentype[] = {
+ ucp_C, ucp_C, ucp_C, ucp_C, ucp_C, /* Cc, Cf, Cn, Co, Cs */
+ ucp_L, ucp_L, ucp_L, ucp_L, ucp_L, /* Ll, Lu, Lm, Lo, Lt */
+ ucp_M, ucp_M, ucp_M, /* Mc, Me, Mn */
+ ucp_N, ucp_N, ucp_N, /* Nd, Nl, No */
+ ucp_P, ucp_P, ucp_P, ucp_P, ucp_P, /* Pc, Pd, Pe, Pf, Pi */
+ ucp_P, ucp_P, /* Ps, Po */
+ ucp_S, ucp_S, ucp_S, ucp_S, /* Sc, Sk, Sm, So */
+ ucp_Z, ucp_Z, ucp_Z /* Zl, Zp, Zs */
+};
+
+
+
+/*************************************************
+* Search table and return type *
+*************************************************/
+
+/* Three values are returned: the category is ucp_C, ucp_L, etc. The detailed
+character type is ucp_Lu, ucp_Nd, etc. The script is ucp_Latin, etc.
+
+Arguments:
+ c the character value
+ type_ptr the detailed character type is returned here
+ script_ptr the script is returned here
+
+Returns: the character type category
+*/
+
+int
+_pcre_ucp_findprop(const unsigned int c, int *type_ptr, int *script_ptr)
+{
+int bot = 0;
+int top = sizeof(ucp_table)/sizeof(cnode);
+int mid;
+
+/* The table is searched using a binary chop. You might think that using
+intermediate variables to hold some of the common expressions would speed
+things up, but tests with gcc 3.4.4 on Linux showed that, on the contrary, it
+makes things a lot slower. */
+
+for (;;)
+ {
+ if (top <= bot)
+ {
+ *type_ptr = ucp_Cn;
+ *script_ptr = ucp_Common;
+ return ucp_C;
+ }
+ mid = (bot + top) >> 1;
+ if (c == (ucp_table[mid].f0 & f0_charmask)) break;
+ if (c < (ucp_table[mid].f0 & f0_charmask)) top = mid;
+ else
+ {
+ if ((ucp_table[mid].f0 & f0_rangeflag) != 0 &&
+ c <= (ucp_table[mid].f0 & f0_charmask) +
+ (ucp_table[mid].f1 & f1_rangemask)) break;
+ bot = mid + 1;
+ }
+ }
+
+/* Found an entry in the table. Set the script and detailed type values, and
+return the general type. */
+
+*script_ptr = (ucp_table[mid].f0 & f0_scriptmask) >> f0_scriptshift;
+*type_ptr = (ucp_table[mid].f1 & f1_typemask) >> f1_typeshift;
+
+return ucp_gentype[*type_ptr];
+}
+
+
+
+/*************************************************
+* Search table and return other case *
+*************************************************/
+
+/* If the given character is a letter, and there is another case for the
+letter, return the other case. Otherwise, return -1.
+
+Arguments:
+ c the character value
+
+Returns: the other case or NOTACHAR if none
+*/
+
+unsigned int
+_pcre_ucp_othercase(const unsigned int c)
+{
+int bot = 0;
+int top = sizeof(ucp_table)/sizeof(cnode);
+int mid, offset;
+
+/* The table is searched using a binary chop. You might think that using
+intermediate variables to hold some of the common expressions would speed
+things up, but tests with gcc 3.4.4 on Linux showed that, on the contrary, it
+makes things a lot slower. */
+
+for (;;)
+ {
+ if (top <= bot) return -1;
+ mid = (bot + top) >> 1;
+ if (c == (ucp_table[mid].f0 & f0_charmask)) break;
+ if (c < (ucp_table[mid].f0 & f0_charmask)) top = mid;
+ else
+ {
+ if ((ucp_table[mid].f0 & f0_rangeflag) != 0 &&
+ c <= (ucp_table[mid].f0 & f0_charmask) +
+ (ucp_table[mid].f1 & f1_rangemask)) break;
+ bot = mid + 1;
+ }
+ }
+
+/* Found an entry in the table. Return NOTACHAR for a range entry. Otherwise
+return the other case if there is one, else NOTACHAR. */
+
+if ((ucp_table[mid].f0 & f0_rangeflag) != 0) return NOTACHAR;
+
+offset = ucp_table[mid].f1 & f1_casemask;
+if ((offset & f1_caseneg) != 0) offset |= f1_caseneg;
+return (offset == 0)? NOTACHAR : c + offset;
+}
+
+
+/* End of pcre_ucp_searchfuncs.c */
diff --git a/src/third_party/pcre-7.4/pcre_valid_utf8.c b/src/third_party/pcre-7.4/pcre_valid_utf8.c
new file mode 100644
index 00000000000..189914243dd
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_valid_utf8.c
@@ -0,0 +1,162 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains an internal function for validating UTF-8 character
+strings. */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+
+/*************************************************
+* Validate a UTF-8 string *
+*************************************************/
+
+/* This function is called (optionally) at the start of compile or match, to
+validate that a supposed UTF-8 string is actually valid. The early check means
+that subsequent code can assume it is dealing with a valid string. The check
+can be turned off for maximum performance, but the consequences of supplying
+an invalid string are then undefined.
+
+Originally, this function checked according to RFC 2279, allowing for values in
+the range 0 to 0x7fffffff, up to 6 bytes long, but ensuring that they were in
+the canonical format. Once somebody had pointed out RFC 3629 to me (it
+obsoletes 2279), additional restrictions were applies. The values are now
+limited to be between 0 and 0x0010ffff, no more than 4 bytes long, and the
+subrange 0xd000 to 0xdfff is excluded.
+
+Arguments:
+ string points to the string
+ length length of string, or -1 if the string is zero-terminated
+
+Returns: < 0 if the string is a valid UTF-8 string
+ >= 0 otherwise; the value is the offset of the bad byte
+*/
+
+int
+_pcre_valid_utf8(const uschar *string, int length)
+{
+#ifdef SUPPORT_UTF8
+register const uschar *p;
+
+if (length < 0)
+ {
+ for (p = string; *p != 0; p++);
+ length = p - string;
+ }
+
+for (p = string; length-- > 0; p++)
+ {
+ register int ab;
+ register int c = *p;
+ if (c < 128) continue;
+ if (c < 0xc0) return p - string;
+ ab = _pcre_utf8_table4[c & 0x3f]; /* Number of additional bytes */
+ if (length < ab || ab > 3) return p - string;
+ length -= ab;
+
+ /* Check top bits in the second byte */
+ if ((*(++p) & 0xc0) != 0x80) return p - string;
+
+ /* Check for overlong sequences for each different length, and for the
+ excluded range 0xd000 to 0xdfff. */
+
+ switch (ab)
+ {
+ /* Check for xx00 000x (overlong sequence) */
+
+ case 1:
+ if ((c & 0x3e) == 0) return p - string;
+ continue; /* We know there aren't any more bytes to check */
+
+ /* Check for 1110 0000, xx0x xxxx (overlong sequence) or
+ 1110 1101, 1010 xxxx (0xd000 - 0xdfff) */
+
+ case 2:
+ if ((c == 0xe0 && (*p & 0x20) == 0) ||
+ (c == 0xed && *p >= 0xa0))
+ return p - string;
+ break;
+
+ /* Check for 1111 0000, xx00 xxxx (overlong sequence) or
+ greater than 0x0010ffff (f4 8f bf bf) */
+
+ case 3:
+ if ((c == 0xf0 && (*p & 0x30) == 0) ||
+ (c > 0xf4 ) ||
+ (c == 0xf4 && *p > 0x8f))
+ return p - string;
+ break;
+
+#if 0
+ /* These cases can no longer occur, as we restrict to a maximum of four
+ bytes nowadays. Leave the code here in case we ever want to add an option
+ for longer sequences. */
+
+ /* Check for 1111 1000, xx00 0xxx */
+ case 4:
+ if (c == 0xf8 && (*p & 0x38) == 0) return p - string;
+ break;
+
+ /* Check for leading 0xfe or 0xff, and then for 1111 1100, xx00 00xx */
+ case 5:
+ if (c == 0xfe || c == 0xff ||
+ (c == 0xfc && (*p & 0x3c) == 0)) return p - string;
+ break;
+#endif
+
+ }
+
+ /* Check for valid bytes after the 2nd, if any; all must start 10 */
+ while (--ab > 0)
+ {
+ if ((*(++p) & 0xc0) != 0x80) return p - string;
+ }
+ }
+#endif
+
+return -1;
+}
+
+/* End of pcre_valid_utf8.c */
diff --git a/src/third_party/pcre-7.4/pcre_version.c b/src/third_party/pcre-7.4/pcre_version.c
new file mode 100644
index 00000000000..c3b9ceebd32
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_version.c
@@ -0,0 +1,90 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains the external function pcre_version(), which returns a
+string that identifies the PCRE version that is in use. */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+
+/*************************************************
+* Return version string *
+*************************************************/
+
+/* These macros are the standard way of turning unquoted text into C strings.
+They allow macros like PCRE_MAJOR to be defined without quotes, which is
+convenient for user programs that want to test its value. */
+
+#define STRING(a) # a
+#define XSTRING(s) STRING(s)
+
+/* A problem turned up with PCRE_PRERELEASE, which is defined empty for
+production releases. Originally, it was used naively in this code:
+
+ return XSTRING(PCRE_MAJOR)
+ "." XSTRING(PCRE_MINOR)
+ XSTRING(PCRE_PRERELEASE)
+ " " XSTRING(PCRE_DATE);
+
+However, when PCRE_PRERELEASE is empty, this leads to an attempted expansion of
+STRING(). The C standard states: "If (before argument substitution) any
+argument consists of no preprocessing tokens, the behavior is undefined." It
+turns out the gcc treats this case as a single empty string - which is what we
+really want - but Visual C grumbles about the lack of an argument for the
+macro. Unfortunately, both are within their rights. To cope with both ways of
+handling this, I had resort to some messy hackery that does a test at run time.
+I could find no way of detecting that a macro is defined as an empty string at
+pre-processor time. This hack uses a standard trick for avoiding calling
+the STRING macro with an empty argument when doing the test. */
+
+const char *
+pcre_version(void)
+{
+return (XSTRING(Z PCRE_PRERELEASE)[1] == 0)?
+ XSTRING(PCRE_MAJOR.PCRE_MINOR PCRE_DATE) :
+ XSTRING(PCRE_MAJOR.PCRE_MINOR) XSTRING(PCRE_PRERELEASE PCRE_DATE);
+}
+
+/* End of pcre_version.c */
diff --git a/src/third_party/pcre-7.4/pcre_xclass.c b/src/third_party/pcre-7.4/pcre_xclass.c
new file mode 100644
index 00000000000..cdf1af12fde
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcre_xclass.c
@@ -0,0 +1,148 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module contains an internal function that is used to match an extended
+class (one that contains characters whose values are > 255). It is used by both
+pcre_exec() and pcre_def_exec(). */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "pcre_internal.h"
+
+
+/*************************************************
+* Match character against an XCLASS *
+*************************************************/
+
+/* This function is called to match a character against an extended class that
+might contain values > 255.
+
+Arguments:
+ c the character
+ data points to the flag byte of the XCLASS data
+
+Returns: TRUE if character matches, else FALSE
+*/
+
+BOOL
+_pcre_xclass(int c, const uschar *data)
+{
+int t;
+BOOL negated = (*data & XCL_NOT) != 0;
+
+/* Character values < 256 are matched against a bitmap, if one is present. If
+not, we still carry on, because there may be ranges that start below 256 in the
+additional data. */
+
+if (c < 256)
+ {
+ if ((*data & XCL_MAP) != 0 && (data[1 + c/8] & (1 << (c&7))) != 0)
+ return !negated; /* char found */
+ }
+
+/* First skip the bit map if present. Then match against the list of Unicode
+properties or large chars or ranges that end with a large char. We won't ever
+encounter XCL_PROP or XCL_NOTPROP when UCP support is not compiled. */
+
+if ((*data++ & XCL_MAP) != 0) data += 32;
+
+while ((t = *data++) != XCL_END)
+ {
+ int x, y;
+ if (t == XCL_SINGLE)
+ {
+ GETCHARINC(x, data);
+ if (c == x) return !negated;
+ }
+ else if (t == XCL_RANGE)
+ {
+ GETCHARINC(x, data);
+ GETCHARINC(y, data);
+ if (c >= x && c <= y) return !negated;
+ }
+
+#ifdef SUPPORT_UCP
+ else /* XCL_PROP & XCL_NOTPROP */
+ {
+ int chartype, script;
+ int category = _pcre_ucp_findprop(c, &chartype, &script);
+
+ switch(*data)
+ {
+ case PT_ANY:
+ if (t == XCL_PROP) return !negated;
+ break;
+
+ case PT_LAMP:
+ if ((chartype == ucp_Lu || chartype == ucp_Ll || chartype == ucp_Lt) ==
+ (t == XCL_PROP)) return !negated;
+ break;
+
+ case PT_GC:
+ if ((data[1] == category) == (t == XCL_PROP)) return !negated;
+ break;
+
+ case PT_PC:
+ if ((data[1] == chartype) == (t == XCL_PROP)) return !negated;
+ break;
+
+ case PT_SC:
+ if ((data[1] == script) == (t == XCL_PROP)) return !negated;
+ break;
+
+ /* This should never occur, but compilers may mutter if there is no
+ default. */
+
+ default:
+ return FALSE;
+ }
+
+ data += 2;
+ }
+#endif /* SUPPORT_UCP */
+ }
+
+return negated; /* char did not match */
+}
+
+/* End of pcre_xclass.c */
diff --git a/src/third_party/pcre-7.4/pcrecpp.cc b/src/third_party/pcre-7.4/pcrecpp.cc
new file mode 100644
index 00000000000..ffb7932b00b
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcrecpp.cc
@@ -0,0 +1,857 @@
+// Copyright (c) 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sanjay Ghemawat
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <ctype.h>
+#include <limits.h> /* for SHRT_MIN, USHRT_MAX, etc */
+#include <assert.h>
+#include <errno.h>
+#include <string>
+#include <algorithm>
+
+#include "pcrecpp_internal.h"
+#include "pcre.h"
+#include "pcrecpp.h"
+#include "pcre_stringpiece.h"
+
+
+namespace pcrecpp {
+
+// Maximum number of args we can set
+static const int kMaxArgs = 16;
+static const int kVecSize = (1 + kMaxArgs) * 3; // results + PCRE workspace
+
+// Special object that stands-in for no argument
+PCRECPP_EXP_DEFN Arg no_arg((void*)NULL);
+
+// If a regular expression has no error, its error_ field points here
+static const string empty_string;
+
+// If the user doesn't ask for any options, we just use this one
+static RE_Options default_options;
+
+void RE::Init(const string& pat, const RE_Options* options) {
+ pattern_ = pat;
+ if (options == NULL) {
+ options_ = default_options;
+ } else {
+ options_ = *options;
+ }
+ error_ = &empty_string;
+ re_full_ = NULL;
+ re_partial_ = NULL;
+
+ re_partial_ = Compile(UNANCHORED);
+ if (re_partial_ != NULL) {
+ re_full_ = Compile(ANCHOR_BOTH);
+ }
+}
+
+void RE::Cleanup() {
+ if (re_full_ != NULL) (*pcre_free)(re_full_);
+ if (re_partial_ != NULL) (*pcre_free)(re_partial_);
+ if (error_ != &empty_string) delete error_;
+}
+
+
+RE::~RE() {
+ Cleanup();
+}
+
+
+pcre* RE::Compile(Anchor anchor) {
+ // First, convert RE_Options into pcre options
+ int pcre_options = 0;
+ pcre_options = options_.all_options();
+
+ // Special treatment for anchoring. This is needed because at
+ // runtime pcre only provides an option for anchoring at the
+ // beginning of a string (unless you use offset).
+ //
+ // There are three types of anchoring we want:
+ // UNANCHORED Compile the original pattern, and use
+ // a pcre unanchored match.
+ // ANCHOR_START Compile the original pattern, and use
+ // a pcre anchored match.
+ // ANCHOR_BOTH Tack a "\z" to the end of the original pattern
+ // and use a pcre anchored match.
+
+ const char* compile_error;
+ int eoffset;
+ pcre* re;
+ if (anchor != ANCHOR_BOTH) {
+ re = pcre_compile(pattern_.c_str(), pcre_options,
+ &compile_error, &eoffset, NULL);
+ } else {
+ // Tack a '\z' at the end of RE. Parenthesize it first so that
+ // the '\z' applies to all top-level alternatives in the regexp.
+ string wrapped = "(?:"; // A non-counting grouping operator
+ wrapped += pattern_;
+ wrapped += ")\\z";
+ re = pcre_compile(wrapped.c_str(), pcre_options,
+ &compile_error, &eoffset, NULL);
+ }
+ if (re == NULL) {
+ if (error_ == &empty_string) error_ = new string(compile_error);
+ }
+ return re;
+}
+
+/***** Matching interfaces *****/
+
+bool RE::FullMatch(const StringPiece& text,
+ const Arg& ptr1,
+ const Arg& ptr2,
+ const Arg& ptr3,
+ const Arg& ptr4,
+ const Arg& ptr5,
+ const Arg& ptr6,
+ const Arg& ptr7,
+ const Arg& ptr8,
+ const Arg& ptr9,
+ const Arg& ptr10,
+ const Arg& ptr11,
+ const Arg& ptr12,
+ const Arg& ptr13,
+ const Arg& ptr14,
+ const Arg& ptr15,
+ const Arg& ptr16) const {
+ const Arg* args[kMaxArgs];
+ int n = 0;
+ if (&ptr1 == &no_arg) goto done; args[n++] = &ptr1;
+ if (&ptr2 == &no_arg) goto done; args[n++] = &ptr2;
+ if (&ptr3 == &no_arg) goto done; args[n++] = &ptr3;
+ if (&ptr4 == &no_arg) goto done; args[n++] = &ptr4;
+ if (&ptr5 == &no_arg) goto done; args[n++] = &ptr5;
+ if (&ptr6 == &no_arg) goto done; args[n++] = &ptr6;
+ if (&ptr7 == &no_arg) goto done; args[n++] = &ptr7;
+ if (&ptr8 == &no_arg) goto done; args[n++] = &ptr8;
+ if (&ptr9 == &no_arg) goto done; args[n++] = &ptr9;
+ if (&ptr10 == &no_arg) goto done; args[n++] = &ptr10;
+ if (&ptr11 == &no_arg) goto done; args[n++] = &ptr11;
+ if (&ptr12 == &no_arg) goto done; args[n++] = &ptr12;
+ if (&ptr13 == &no_arg) goto done; args[n++] = &ptr13;
+ if (&ptr14 == &no_arg) goto done; args[n++] = &ptr14;
+ if (&ptr15 == &no_arg) goto done; args[n++] = &ptr15;
+ if (&ptr16 == &no_arg) goto done; args[n++] = &ptr16;
+ done:
+
+ int consumed;
+ int vec[kVecSize];
+ return DoMatchImpl(text, ANCHOR_BOTH, &consumed, args, n, vec, kVecSize);
+}
+
+bool RE::PartialMatch(const StringPiece& text,
+ const Arg& ptr1,
+ const Arg& ptr2,
+ const Arg& ptr3,
+ const Arg& ptr4,
+ const Arg& ptr5,
+ const Arg& ptr6,
+ const Arg& ptr7,
+ const Arg& ptr8,
+ const Arg& ptr9,
+ const Arg& ptr10,
+ const Arg& ptr11,
+ const Arg& ptr12,
+ const Arg& ptr13,
+ const Arg& ptr14,
+ const Arg& ptr15,
+ const Arg& ptr16) const {
+ const Arg* args[kMaxArgs];
+ int n = 0;
+ if (&ptr1 == &no_arg) goto done; args[n++] = &ptr1;
+ if (&ptr2 == &no_arg) goto done; args[n++] = &ptr2;
+ if (&ptr3 == &no_arg) goto done; args[n++] = &ptr3;
+ if (&ptr4 == &no_arg) goto done; args[n++] = &ptr4;
+ if (&ptr5 == &no_arg) goto done; args[n++] = &ptr5;
+ if (&ptr6 == &no_arg) goto done; args[n++] = &ptr6;
+ if (&ptr7 == &no_arg) goto done; args[n++] = &ptr7;
+ if (&ptr8 == &no_arg) goto done; args[n++] = &ptr8;
+ if (&ptr9 == &no_arg) goto done; args[n++] = &ptr9;
+ if (&ptr10 == &no_arg) goto done; args[n++] = &ptr10;
+ if (&ptr11 == &no_arg) goto done; args[n++] = &ptr11;
+ if (&ptr12 == &no_arg) goto done; args[n++] = &ptr12;
+ if (&ptr13 == &no_arg) goto done; args[n++] = &ptr13;
+ if (&ptr14 == &no_arg) goto done; args[n++] = &ptr14;
+ if (&ptr15 == &no_arg) goto done; args[n++] = &ptr15;
+ if (&ptr16 == &no_arg) goto done; args[n++] = &ptr16;
+ done:
+
+ int consumed;
+ int vec[kVecSize];
+ return DoMatchImpl(text, UNANCHORED, &consumed, args, n, vec, kVecSize);
+}
+
+bool RE::Consume(StringPiece* input,
+ const Arg& ptr1,
+ const Arg& ptr2,
+ const Arg& ptr3,
+ const Arg& ptr4,
+ const Arg& ptr5,
+ const Arg& ptr6,
+ const Arg& ptr7,
+ const Arg& ptr8,
+ const Arg& ptr9,
+ const Arg& ptr10,
+ const Arg& ptr11,
+ const Arg& ptr12,
+ const Arg& ptr13,
+ const Arg& ptr14,
+ const Arg& ptr15,
+ const Arg& ptr16) const {
+ const Arg* args[kMaxArgs];
+ int n = 0;
+ if (&ptr1 == &no_arg) goto done; args[n++] = &ptr1;
+ if (&ptr2 == &no_arg) goto done; args[n++] = &ptr2;
+ if (&ptr3 == &no_arg) goto done; args[n++] = &ptr3;
+ if (&ptr4 == &no_arg) goto done; args[n++] = &ptr4;
+ if (&ptr5 == &no_arg) goto done; args[n++] = &ptr5;
+ if (&ptr6 == &no_arg) goto done; args[n++] = &ptr6;
+ if (&ptr7 == &no_arg) goto done; args[n++] = &ptr7;
+ if (&ptr8 == &no_arg) goto done; args[n++] = &ptr8;
+ if (&ptr9 == &no_arg) goto done; args[n++] = &ptr9;
+ if (&ptr10 == &no_arg) goto done; args[n++] = &ptr10;
+ if (&ptr11 == &no_arg) goto done; args[n++] = &ptr11;
+ if (&ptr12 == &no_arg) goto done; args[n++] = &ptr12;
+ if (&ptr13 == &no_arg) goto done; args[n++] = &ptr13;
+ if (&ptr14 == &no_arg) goto done; args[n++] = &ptr14;
+ if (&ptr15 == &no_arg) goto done; args[n++] = &ptr15;
+ if (&ptr16 == &no_arg) goto done; args[n++] = &ptr16;
+ done:
+
+ int consumed;
+ int vec[kVecSize];
+ if (DoMatchImpl(*input, ANCHOR_START, &consumed,
+ args, n, vec, kVecSize)) {
+ input->remove_prefix(consumed);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool RE::FindAndConsume(StringPiece* input,
+ const Arg& ptr1,
+ const Arg& ptr2,
+ const Arg& ptr3,
+ const Arg& ptr4,
+ const Arg& ptr5,
+ const Arg& ptr6,
+ const Arg& ptr7,
+ const Arg& ptr8,
+ const Arg& ptr9,
+ const Arg& ptr10,
+ const Arg& ptr11,
+ const Arg& ptr12,
+ const Arg& ptr13,
+ const Arg& ptr14,
+ const Arg& ptr15,
+ const Arg& ptr16) const {
+ const Arg* args[kMaxArgs];
+ int n = 0;
+ if (&ptr1 == &no_arg) goto done; args[n++] = &ptr1;
+ if (&ptr2 == &no_arg) goto done; args[n++] = &ptr2;
+ if (&ptr3 == &no_arg) goto done; args[n++] = &ptr3;
+ if (&ptr4 == &no_arg) goto done; args[n++] = &ptr4;
+ if (&ptr5 == &no_arg) goto done; args[n++] = &ptr5;
+ if (&ptr6 == &no_arg) goto done; args[n++] = &ptr6;
+ if (&ptr7 == &no_arg) goto done; args[n++] = &ptr7;
+ if (&ptr8 == &no_arg) goto done; args[n++] = &ptr8;
+ if (&ptr9 == &no_arg) goto done; args[n++] = &ptr9;
+ if (&ptr10 == &no_arg) goto done; args[n++] = &ptr10;
+ if (&ptr11 == &no_arg) goto done; args[n++] = &ptr11;
+ if (&ptr12 == &no_arg) goto done; args[n++] = &ptr12;
+ if (&ptr13 == &no_arg) goto done; args[n++] = &ptr13;
+ if (&ptr14 == &no_arg) goto done; args[n++] = &ptr14;
+ if (&ptr15 == &no_arg) goto done; args[n++] = &ptr15;
+ if (&ptr16 == &no_arg) goto done; args[n++] = &ptr16;
+ done:
+
+ int consumed;
+ int vec[kVecSize];
+ if (DoMatchImpl(*input, UNANCHORED, &consumed,
+ args, n, vec, kVecSize)) {
+ input->remove_prefix(consumed);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool RE::Replace(const StringPiece& rewrite,
+ string *str) const {
+ int vec[kVecSize];
+ int matches = TryMatch(*str, 0, UNANCHORED, vec, kVecSize);
+ if (matches == 0)
+ return false;
+
+ string s;
+ if (!Rewrite(&s, rewrite, *str, vec, matches))
+ return false;
+
+ assert(vec[0] >= 0);
+ assert(vec[1] >= 0);
+ str->replace(vec[0], vec[1] - vec[0], s);
+ return true;
+}
+
+// Returns PCRE_NEWLINE_CRLF, PCRE_NEWLINE_CR, or PCRE_NEWLINE_LF.
+// Note that PCRE_NEWLINE_CRLF is defined to be P_N_CR | P_N_LF.
+// Modified by PH to add PCRE_NEWLINE_ANY and PCRE_NEWLINE_ANYCRLF.
+
+static int NewlineMode(int pcre_options) {
+ // TODO: if we can make it threadsafe, cache this var
+ int newline_mode = 0;
+ /* if (newline_mode) return newline_mode; */ // do this once it's cached
+ if (pcre_options & (PCRE_NEWLINE_CRLF|PCRE_NEWLINE_CR|PCRE_NEWLINE_LF|
+ PCRE_NEWLINE_ANY|PCRE_NEWLINE_ANYCRLF)) {
+ newline_mode = (pcre_options &
+ (PCRE_NEWLINE_CRLF|PCRE_NEWLINE_CR|PCRE_NEWLINE_LF|
+ PCRE_NEWLINE_ANY|PCRE_NEWLINE_ANYCRLF));
+ } else {
+ int newline;
+ pcre_config(PCRE_CONFIG_NEWLINE, &newline);
+ if (newline == 10)
+ newline_mode = PCRE_NEWLINE_LF;
+ else if (newline == 13)
+ newline_mode = PCRE_NEWLINE_CR;
+ else if (newline == 3338)
+ newline_mode = PCRE_NEWLINE_CRLF;
+ else if (newline == -1)
+ newline_mode = PCRE_NEWLINE_ANY;
+ else if (newline == -2)
+ newline_mode = PCRE_NEWLINE_ANYCRLF;
+ else
+ assert( ! "Unexpected return value from pcre_config(NEWLINE)");
+ }
+ return newline_mode;
+}
+
+int RE::GlobalReplace(const StringPiece& rewrite,
+ string *str) const {
+ int count = 0;
+ int vec[kVecSize];
+ string out;
+ int start = 0;
+ int lastend = -1;
+
+ for (; start <= static_cast<int>(str->length()); count++) {
+ int matches = TryMatch(*str, start, UNANCHORED, vec, kVecSize);
+ if (matches <= 0)
+ break;
+ int matchstart = vec[0], matchend = vec[1];
+ assert(matchstart >= start);
+ assert(matchend >= matchstart);
+ if (matchstart == matchend && matchstart == lastend) {
+ // advance one character if we matched an empty string at the same
+ // place as the last match occurred
+ matchend = start + 1;
+ // If the current char is CR and we're in CRLF mode, skip LF too.
+ // Note it's better to call pcre_fullinfo() than to examine
+ // all_options(), since options_ could have changed bewteen
+ // compile-time and now, but this is simpler and safe enough.
+ // Modified by PH to add ANY and ANYCRLF.
+ if (start+1 < static_cast<int>(str->length()) &&
+ (*str)[start] == '\r' && (*str)[start+1] == '\n' &&
+ (NewlineMode(options_.all_options()) == PCRE_NEWLINE_CRLF ||
+ NewlineMode(options_.all_options()) == PCRE_NEWLINE_ANY ||
+ NewlineMode(options_.all_options()) == PCRE_NEWLINE_ANYCRLF)
+ ) {
+ matchend++;
+ }
+ // We also need to advance more than one char if we're in utf8 mode.
+#ifdef SUPPORT_UTF8
+ if (options_.utf8()) {
+ while (matchend < static_cast<int>(str->length()) &&
+ ((*str)[matchend] & 0xc0) == 0x80)
+ matchend++;
+ }
+#endif
+ if (matchend <= static_cast<int>(str->length()))
+ out.append(*str, start, matchend - start);
+ start = matchend;
+ } else {
+ out.append(*str, start, matchstart - start);
+ Rewrite(&out, rewrite, *str, vec, matches);
+ start = matchend;
+ lastend = matchend;
+ count++;
+ }
+ }
+
+ if (count == 0)
+ return 0;
+
+ if (start < static_cast<int>(str->length()))
+ out.append(*str, start, str->length() - start);
+ swap(out, *str);
+ return count;
+}
+
+bool RE::Extract(const StringPiece& rewrite,
+ const StringPiece& text,
+ string *out) const {
+ int vec[kVecSize];
+ int matches = TryMatch(text, 0, UNANCHORED, vec, kVecSize);
+ if (matches == 0)
+ return false;
+ out->erase();
+ return Rewrite(out, rewrite, text, vec, matches);
+}
+
+/*static*/ string RE::QuoteMeta(const StringPiece& unquoted) {
+ string result;
+
+ // Escape any ascii character not in [A-Za-z_0-9].
+ //
+ // Note that it's legal to escape a character even if it has no
+ // special meaning in a regular expression -- so this function does
+ // that. (This also makes it identical to the perl function of the
+ // same name; see `perldoc -f quotemeta`.)
+ for (int ii = 0; ii < unquoted.size(); ++ii) {
+ // Note that using 'isalnum' here raises the benchmark time from
+ // 32ns to 58ns:
+ if ((unquoted[ii] < 'a' || unquoted[ii] > 'z') &&
+ (unquoted[ii] < 'A' || unquoted[ii] > 'Z') &&
+ (unquoted[ii] < '0' || unquoted[ii] > '9') &&
+ unquoted[ii] != '_' &&
+ // If this is the part of a UTF8 or Latin1 character, we need
+ // to copy this byte without escaping. Experimentally this is
+ // what works correctly with the regexp library.
+ !(unquoted[ii] & 128)) {
+ result += '\\';
+ }
+ result += unquoted[ii];
+ }
+
+ return result;
+}
+
+/***** Actual matching and rewriting code *****/
+
+int RE::TryMatch(const StringPiece& text,
+ int startpos,
+ Anchor anchor,
+ int *vec,
+ int vecsize) const {
+ pcre* re = (anchor == ANCHOR_BOTH) ? re_full_ : re_partial_;
+ if (re == NULL) {
+ //fprintf(stderr, "Matching against invalid re: %s\n", error_->c_str());
+ return 0;
+ }
+
+ pcre_extra extra = { 0, 0, 0, 0, 0, 0 };
+ if (options_.match_limit() > 0) {
+ extra.flags |= PCRE_EXTRA_MATCH_LIMIT;
+ extra.match_limit = options_.match_limit();
+ }
+ if (options_.match_limit_recursion() > 0) {
+ extra.flags |= PCRE_EXTRA_MATCH_LIMIT_RECURSION;
+ extra.match_limit_recursion = options_.match_limit_recursion();
+ }
+ int rc = pcre_exec(re, // The regular expression object
+ &extra,
+ (text.data() == NULL) ? "" : text.data(),
+ text.size(),
+ startpos,
+ (anchor == UNANCHORED) ? 0 : PCRE_ANCHORED,
+ vec,
+ vecsize);
+
+ // Handle errors
+ if (rc == PCRE_ERROR_NOMATCH) {
+ return 0;
+ } else if (rc < 0) {
+ //fprintf(stderr, "Unexpected return code: %d when matching '%s'\n",
+ // re, pattern_.c_str());
+ return 0;
+ } else if (rc == 0) {
+ // pcre_exec() returns 0 as a special case when the number of
+ // capturing subpatterns exceeds the size of the vector.
+ // When this happens, there is a match and the output vector
+ // is filled, but we miss out on the positions of the extra subpatterns.
+ rc = vecsize / 2;
+ }
+
+ return rc;
+}
+
+bool RE::DoMatchImpl(const StringPiece& text,
+ Anchor anchor,
+ int* consumed,
+ const Arg* const* args,
+ int n,
+ int* vec,
+ int vecsize) const {
+ assert((1 + n) * 3 <= vecsize); // results + PCRE workspace
+ int matches = TryMatch(text, 0, anchor, vec, vecsize);
+ assert(matches >= 0); // TryMatch never returns negatives
+ if (matches == 0)
+ return false;
+
+ *consumed = vec[1];
+
+ if (n == 0 || args == NULL) {
+ // We are not interested in results
+ return true;
+ }
+
+ if (NumberOfCapturingGroups() < n) {
+ // RE has fewer capturing groups than number of arg pointers passed in
+ return false;
+ }
+
+ // If we got here, we must have matched the whole pattern.
+ // We do not need (can not do) any more checks on the value of 'matches' here
+ // -- see the comment for TryMatch.
+ for (int i = 0; i < n; i++) {
+ const int start = vec[2*(i+1)];
+ const int limit = vec[2*(i+1)+1];
+ if (!args[i]->Parse(text.data() + start, limit-start)) {
+ // TODO: Should we indicate what the error was?
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool RE::DoMatch(const StringPiece& text,
+ Anchor anchor,
+ int* consumed,
+ const Arg* const args[],
+ int n) const {
+ assert(n >= 0);
+ size_t const vecsize = (1 + n) * 3; // results + PCRE workspace
+ // (as for kVecSize)
+ int space[21]; // use stack allocation for small vecsize (common case)
+ int* vec = vecsize <= 21 ? space : new int[vecsize];
+ bool retval = DoMatchImpl(text, anchor, consumed, args, n, vec, vecsize);
+ if (vec != space) delete [] vec;
+ return retval;
+}
+
+bool RE::Rewrite(string *out, const StringPiece &rewrite,
+ const StringPiece &text, int *vec, int veclen) const {
+ for (const char *s = rewrite.data(), *end = s + rewrite.size();
+ s < end; s++) {
+ int c = *s;
+ if (c == '\\') {
+ c = *++s;
+ if (isdigit(c)) {
+ int n = (c - '0');
+ if (n >= veclen) {
+ //fprintf(stderr, requested group %d in regexp %.*s\n",
+ // n, rewrite.size(), rewrite.data());
+ return false;
+ }
+ int start = vec[2 * n];
+ if (start >= 0)
+ out->append(text.data() + start, vec[2 * n + 1] - start);
+ } else if (c == '\\') {
+ out->push_back('\\');
+ } else {
+ //fprintf(stderr, "invalid rewrite pattern: %.*s\n",
+ // rewrite.size(), rewrite.data());
+ return false;
+ }
+ } else {
+ out->push_back(c);
+ }
+ }
+ return true;
+}
+
+// Return the number of capturing subpatterns, or -1 if the
+// regexp wasn't valid on construction.
+int RE::NumberOfCapturingGroups() const {
+ if (re_partial_ == NULL) return -1;
+
+ int result;
+ int pcre_retval = pcre_fullinfo(re_partial_, // The regular expression object
+ NULL, // We did not study the pattern
+ PCRE_INFO_CAPTURECOUNT,
+ &result);
+ assert(pcre_retval == 0);
+ return result;
+}
+
+/***** Parsers for various types *****/
+
+bool Arg::parse_null(const char* str, int n, void* dest) {
+ // We fail if somebody asked us to store into a non-NULL void* pointer
+ return (dest == NULL);
+}
+
+bool Arg::parse_string(const char* str, int n, void* dest) {
+ reinterpret_cast<string*>(dest)->assign(str, n);
+ return true;
+}
+
+bool Arg::parse_stringpiece(const char* str, int n, void* dest) {
+ reinterpret_cast<StringPiece*>(dest)->set(str, n);
+ return true;
+}
+
+bool Arg::parse_char(const char* str, int n, void* dest) {
+ if (n != 1) return false;
+ *(reinterpret_cast<char*>(dest)) = str[0];
+ return true;
+}
+
+bool Arg::parse_uchar(const char* str, int n, void* dest) {
+ if (n != 1) return false;
+ *(reinterpret_cast<unsigned char*>(dest)) = str[0];
+ return true;
+}
+
+// Largest number spec that we are willing to parse
+static const int kMaxNumberLength = 32;
+
+// REQUIRES "buf" must have length at least kMaxNumberLength+1
+// REQUIRES "n > 0"
+// Copies "str" into "buf" and null-terminates if necessary.
+// Returns one of:
+// a. "str" if no termination is needed
+// b. "buf" if the string was copied and null-terminated
+// c. "" if the input was invalid and has no hope of being parsed
+static const char* TerminateNumber(char* buf, const char* str, int n) {
+ if ((n > 0) && isspace(*str)) {
+ // We are less forgiving than the strtoxxx() routines and do not
+ // allow leading spaces.
+ return "";
+ }
+
+ // See if the character right after the input text may potentially
+ // look like a digit.
+ if (isdigit(str[n]) ||
+ ((str[n] >= 'a') && (str[n] <= 'f')) ||
+ ((str[n] >= 'A') && (str[n] <= 'F'))) {
+ if (n > kMaxNumberLength) return ""; // Input too big to be a valid number
+ memcpy(buf, str, n);
+ buf[n] = '\0';
+ return buf;
+ } else {
+ // We can parse right out of the supplied string, so return it.
+ return str;
+ }
+}
+
+bool Arg::parse_long_radix(const char* str,
+ int n,
+ void* dest,
+ int radix) {
+ if (n == 0) return false;
+ char buf[kMaxNumberLength+1];
+ str = TerminateNumber(buf, str, n);
+ char* end;
+ errno = 0;
+ long r = strtol(str, &end, radix);
+ if (end != str + n) return false; // Leftover junk
+ if (errno) return false;
+ *(reinterpret_cast<long*>(dest)) = r;
+ return true;
+}
+
+bool Arg::parse_ulong_radix(const char* str,
+ int n,
+ void* dest,
+ int radix) {
+ if (n == 0) return false;
+ char buf[kMaxNumberLength+1];
+ str = TerminateNumber(buf, str, n);
+ if (str[0] == '-') return false; // strtoul() on a negative number?!
+ char* end;
+ errno = 0;
+ unsigned long r = strtoul(str, &end, radix);
+ if (end != str + n) return false; // Leftover junk
+ if (errno) return false;
+ *(reinterpret_cast<unsigned long*>(dest)) = r;
+ return true;
+}
+
+bool Arg::parse_short_radix(const char* str,
+ int n,
+ void* dest,
+ int radix) {
+ long r;
+ if (!parse_long_radix(str, n, &r, radix)) return false; // Could not parse
+ if (r < SHRT_MIN || r > SHRT_MAX) return false; // Out of range
+ *(reinterpret_cast<short*>(dest)) = static_cast<short>(r);
+ return true;
+}
+
+bool Arg::parse_ushort_radix(const char* str,
+ int n,
+ void* dest,
+ int radix) {
+ unsigned long r;
+ if (!parse_ulong_radix(str, n, &r, radix)) return false; // Could not parse
+ if (r > USHRT_MAX) return false; // Out of range
+ *(reinterpret_cast<unsigned short*>(dest)) = static_cast<unsigned short>(r);
+ return true;
+}
+
+bool Arg::parse_int_radix(const char* str,
+ int n,
+ void* dest,
+ int radix) {
+ long r;
+ if (!parse_long_radix(str, n, &r, radix)) return false; // Could not parse
+ if (r < INT_MIN || r > INT_MAX) return false; // Out of range
+ *(reinterpret_cast<int*>(dest)) = r;
+ return true;
+}
+
+bool Arg::parse_uint_radix(const char* str,
+ int n,
+ void* dest,
+ int radix) {
+ unsigned long r;
+ if (!parse_ulong_radix(str, n, &r, radix)) return false; // Could not parse
+ if (r > UINT_MAX) return false; // Out of range
+ *(reinterpret_cast<unsigned int*>(dest)) = r;
+ return true;
+}
+
+bool Arg::parse_longlong_radix(const char* str,
+ int n,
+ void* dest,
+ int radix) {
+#ifndef HAVE_LONG_LONG
+ return false;
+#else
+ if (n == 0) return false;
+ char buf[kMaxNumberLength+1];
+ str = TerminateNumber(buf, str, n);
+ char* end;
+ errno = 0;
+#if defined HAVE_STRTOQ
+ long long r = strtoq(str, &end, radix);
+#elif defined HAVE_STRTOLL
+ long long r = strtoll(str, &end, radix);
+#elif defined HAVE__STRTOI64
+ long long r = _strtoi64(str, &end, radix);
+#else
+#error parse_longlong_radix: cannot convert input to a long-long
+#endif
+ if (end != str + n) return false; // Leftover junk
+ if (errno) return false;
+ *(reinterpret_cast<long long*>(dest)) = r;
+ return true;
+#endif /* HAVE_LONG_LONG */
+}
+
+bool Arg::parse_ulonglong_radix(const char* str,
+ int n,
+ void* dest,
+ int radix) {
+#ifndef HAVE_UNSIGNED_LONG_LONG
+ return false;
+#else
+ if (n == 0) return false;
+ char buf[kMaxNumberLength+1];
+ str = TerminateNumber(buf, str, n);
+ if (str[0] == '-') return false; // strtoull() on a negative number?!
+ char* end;
+ errno = 0;
+#if defined HAVE_STRTOQ
+ unsigned long long r = strtouq(str, &end, radix);
+#elif defined HAVE_STRTOLL
+ unsigned long long r = strtoull(str, &end, radix);
+#elif defined HAVE__STRTOI64
+ unsigned long long r = _strtoui64(str, &end, radix);
+#else
+#error parse_ulonglong_radix: cannot convert input to a long-long
+#endif
+ if (end != str + n) return false; // Leftover junk
+ if (errno) return false;
+ *(reinterpret_cast<unsigned long long*>(dest)) = r;
+ return true;
+#endif /* HAVE_UNSIGNED_LONG_LONG */
+}
+
+bool Arg::parse_double(const char* str, int n, void* dest) {
+ if (n == 0) return false;
+ static const int kMaxLength = 200;
+ char buf[kMaxLength];
+ if (n >= kMaxLength) return false;
+ memcpy(buf, str, n);
+ buf[n] = '\0';
+ errno = 0;
+ char* end;
+ double r = strtod(buf, &end);
+ if (end != buf + n) return false; // Leftover junk
+ if (errno) return false;
+ *(reinterpret_cast<double*>(dest)) = r;
+ return true;
+}
+
+bool Arg::parse_float(const char* str, int n, void* dest) {
+ double r;
+ if (!parse_double(str, n, &r)) return false;
+ *(reinterpret_cast<float*>(dest)) = static_cast<float>(r);
+ return true;
+}
+
+
+#define DEFINE_INTEGER_PARSERS(name) \
+ bool Arg::parse_##name(const char* str, int n, void* dest) { \
+ return parse_##name##_radix(str, n, dest, 10); \
+ } \
+ bool Arg::parse_##name##_hex(const char* str, int n, void* dest) { \
+ return parse_##name##_radix(str, n, dest, 16); \
+ } \
+ bool Arg::parse_##name##_octal(const char* str, int n, void* dest) { \
+ return parse_##name##_radix(str, n, dest, 8); \
+ } \
+ bool Arg::parse_##name##_cradix(const char* str, int n, void* dest) { \
+ return parse_##name##_radix(str, n, dest, 0); \
+ }
+
+DEFINE_INTEGER_PARSERS(short) /* */
+DEFINE_INTEGER_PARSERS(ushort) /* */
+DEFINE_INTEGER_PARSERS(int) /* Don't use semicolons after these */
+DEFINE_INTEGER_PARSERS(uint) /* statements because they can cause */
+DEFINE_INTEGER_PARSERS(long) /* compiler warnings if the checking */
+DEFINE_INTEGER_PARSERS(ulong) /* level is turned up high enough. */
+DEFINE_INTEGER_PARSERS(longlong) /* */
+DEFINE_INTEGER_PARSERS(ulonglong) /* */
+
+#undef DEFINE_INTEGER_PARSERS
+
+} // namespace pcrecpp
diff --git a/src/third_party/pcre-7.4/pcrecpp.h b/src/third_party/pcre-7.4/pcrecpp.h
new file mode 100644
index 00000000000..5a0d5977db5
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcrecpp.h
@@ -0,0 +1,700 @@
+// Copyright (c) 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sanjay Ghemawat
+// Support for PCRE_XXX modifiers added by Giuseppe Maxia, July 2005
+
+#ifndef _PCRECPP_H
+#define _PCRECPP_H
+
+// C++ interface to the pcre regular-expression library. RE supports
+// Perl-style regular expressions (with extensions like \d, \w, \s,
+// ...).
+//
+// -----------------------------------------------------------------------
+// REGEXP SYNTAX:
+//
+// This module is part of the pcre library and hence supports its syntax
+// for regular expressions.
+//
+// The syntax is pretty similar to Perl's. For those not familiar
+// with Perl's regular expressions, here are some examples of the most
+// commonly used extensions:
+//
+// "hello (\\w+) world" -- \w matches a "word" character
+// "version (\\d+)" -- \d matches a digit
+// "hello\\s+world" -- \s matches any whitespace character
+// "\\b(\\w+)\\b" -- \b matches empty string at a word boundary
+// "(?i)hello" -- (?i) turns on case-insensitive matching
+// "/\\*(.*?)\\*/" -- .*? matches . minimum no. of times possible
+//
+// -----------------------------------------------------------------------
+// MATCHING INTERFACE:
+//
+// The "FullMatch" operation checks that supplied text matches a
+// supplied pattern exactly.
+//
+// Example: successful match
+// pcrecpp::RE re("h.*o");
+// re.FullMatch("hello");
+//
+// Example: unsuccessful match (requires full match):
+// pcrecpp::RE re("e");
+// !re.FullMatch("hello");
+//
+// Example: creating a temporary RE object:
+// pcrecpp::RE("h.*o").FullMatch("hello");
+//
+// You can pass in a "const char*" or a "string" for "text". The
+// examples below tend to use a const char*.
+//
+// You can, as in the different examples above, store the RE object
+// explicitly in a variable or use a temporary RE object. The
+// examples below use one mode or the other arbitrarily. Either
+// could correctly be used for any of these examples.
+//
+// -----------------------------------------------------------------------
+// MATCHING WITH SUB-STRING EXTRACTION:
+//
+// You can supply extra pointer arguments to extract matched subpieces.
+//
+// Example: extracts "ruby" into "s" and 1234 into "i"
+// int i;
+// string s;
+// pcrecpp::RE re("(\\w+):(\\d+)");
+// re.FullMatch("ruby:1234", &s, &i);
+//
+// Example: does not try to extract any extra sub-patterns
+// re.FullMatch("ruby:1234", &s);
+//
+// Example: does not try to extract into NULL
+// re.FullMatch("ruby:1234", NULL, &i);
+//
+// Example: integer overflow causes failure
+// !re.FullMatch("ruby:1234567891234", NULL, &i);
+//
+// Example: fails because there aren't enough sub-patterns:
+// !pcrecpp::RE("\\w+:\\d+").FullMatch("ruby:1234", &s);
+//
+// Example: fails because string cannot be stored in integer
+// !pcrecpp::RE("(.*)").FullMatch("ruby", &i);
+//
+// The provided pointer arguments can be pointers to any scalar numeric
+// type, or one of
+// string (matched piece is copied to string)
+// StringPiece (StringPiece is mutated to point to matched piece)
+// T (where "bool T::ParseFrom(const char*, int)" exists)
+// NULL (the corresponding matched sub-pattern is not copied)
+//
+// CAVEAT: An optional sub-pattern that does not exist in the matched
+// string is assigned the empty string. Therefore, the following will
+// return false (because the empty string is not a valid number):
+// int number;
+// pcrecpp::RE::FullMatch("abc", "[a-z]+(\\d+)?", &number);
+//
+// -----------------------------------------------------------------------
+// DO_MATCH
+//
+// The matching interface supports at most 16 arguments per call.
+// If you need more, consider using the more general interface
+// pcrecpp::RE::DoMatch(). See pcrecpp.h for the signature for DoMatch.
+//
+// -----------------------------------------------------------------------
+// PARTIAL MATCHES
+//
+// You can use the "PartialMatch" operation when you want the pattern
+// to match any substring of the text.
+//
+// Example: simple search for a string:
+// pcrecpp::RE("ell").PartialMatch("hello");
+//
+// Example: find first number in a string:
+// int number;
+// pcrecpp::RE re("(\\d+)");
+// re.PartialMatch("x*100 + 20", &number);
+// assert(number == 100);
+//
+// -----------------------------------------------------------------------
+// UTF-8 AND THE MATCHING INTERFACE:
+//
+// By default, pattern and text are plain text, one byte per character.
+// The UTF8 flag, passed to the constructor, causes both pattern
+// and string to be treated as UTF-8 text, still a byte stream but
+// potentially multiple bytes per character. In practice, the text
+// is likelier to be UTF-8 than the pattern, but the match returned
+// may depend on the UTF8 flag, so always use it when matching
+// UTF8 text. E.g., "." will match one byte normally but with UTF8
+// set may match up to three bytes of a multi-byte character.
+//
+// Example:
+// pcrecpp::RE_Options options;
+// options.set_utf8();
+// pcrecpp::RE re(utf8_pattern, options);
+// re.FullMatch(utf8_string);
+//
+// Example: using the convenience function UTF8():
+// pcrecpp::RE re(utf8_pattern, pcrecpp::UTF8());
+// re.FullMatch(utf8_string);
+//
+// NOTE: The UTF8 option is ignored if pcre was not configured with the
+// --enable-utf8 flag.
+//
+// -----------------------------------------------------------------------
+// PASSING MODIFIERS TO THE REGULAR EXPRESSION ENGINE
+//
+// PCRE defines some modifiers to change the behavior of the regular
+// expression engine.
+// The C++ wrapper defines an auxiliary class, RE_Options, as a vehicle
+// to pass such modifiers to a RE class.
+//
+// Currently, the following modifiers are supported
+//
+// modifier description Perl corresponding
+//
+// PCRE_CASELESS case insensitive match /i
+// PCRE_MULTILINE multiple lines match /m
+// PCRE_DOTALL dot matches newlines /s
+// PCRE_DOLLAR_ENDONLY $ matches only at end N/A
+// PCRE_EXTRA strict escape parsing N/A
+// PCRE_EXTENDED ignore whitespaces /x
+// PCRE_UTF8 handles UTF8 chars built-in
+// PCRE_UNGREEDY reverses * and *? N/A
+// PCRE_NO_AUTO_CAPTURE disables matching parens N/A (*)
+//
+// (For a full account on how each modifier works, please check the
+// PCRE API reference manual).
+//
+// (*) Both Perl and PCRE allow non matching parentheses by means of the
+// "?:" modifier within the pattern itself. e.g. (?:ab|cd) does not
+// capture, while (ab|cd) does.
+//
+// For each modifier, there are two member functions whose name is made
+// out of the modifier in lowercase, without the "PCRE_" prefix. For
+// instance, PCRE_CASELESS is handled by
+// bool caseless(),
+// which returns true if the modifier is set, and
+// RE_Options & set_caseless(bool),
+// which sets or unsets the modifier.
+//
+// Moreover, PCRE_EXTRA_MATCH_LIMIT can be accessed through the
+// set_match_limit() and match_limit() member functions.
+// Setting match_limit to a non-zero value will limit the executation of
+// pcre to keep it from doing bad things like blowing the stack or taking
+// an eternity to return a result. A value of 5000 is good enough to stop
+// stack blowup in a 2MB thread stack. Setting match_limit to zero will
+// disable match limiting. Alternately, you can set match_limit_recursion()
+// which uses PCRE_EXTRA_MATCH_LIMIT_RECURSION to limit how much pcre
+// recurses. match_limit() caps the number of matches pcre does;
+// match_limit_recrusion() caps the depth of recursion.
+//
+// Normally, to pass one or more modifiers to a RE class, you declare
+// a RE_Options object, set the appropriate options, and pass this
+// object to a RE constructor. Example:
+//
+// RE_options opt;
+// opt.set_caseless(true);
+//
+// if (RE("HELLO", opt).PartialMatch("hello world")) ...
+//
+// RE_options has two constructors. The default constructor takes no
+// arguments and creates a set of flags that are off by default.
+//
+// The optional parameter 'option_flags' is to facilitate transfer
+// of legacy code from C programs. This lets you do
+// RE(pattern, RE_Options(PCRE_CASELESS|PCRE_MULTILINE)).PartialMatch(str);
+//
+// But new code is better off doing
+// RE(pattern,
+// RE_Options().set_caseless(true).set_multiline(true)).PartialMatch(str);
+// (See below)
+//
+// If you are going to pass one of the most used modifiers, there are some
+// convenience functions that return a RE_Options class with the
+// appropriate modifier already set:
+// CASELESS(), UTF8(), MULTILINE(), DOTALL(), EXTENDED()
+//
+// If you need to set several options at once, and you don't want to go
+// through the pains of declaring a RE_Options object and setting several
+// options, there is a parallel method that give you such ability on the
+// fly. You can concatenate several set_xxxxx member functions, since each
+// of them returns a reference to its class object. e.g.: to pass
+// PCRE_CASELESS, PCRE_EXTENDED, and PCRE_MULTILINE to a RE with one
+// statement, you may write
+//
+// RE(" ^ xyz \\s+ .* blah$", RE_Options()
+// .set_caseless(true)
+// .set_extended(true)
+// .set_multiline(true)).PartialMatch(sometext);
+//
+// -----------------------------------------------------------------------
+// SCANNING TEXT INCREMENTALLY
+//
+// The "Consume" operation may be useful if you want to repeatedly
+// match regular expressions at the front of a string and skip over
+// them as they match. This requires use of the "StringPiece" type,
+// which represents a sub-range of a real string. Like RE, StringPiece
+// is defined in the pcrecpp namespace.
+//
+// Example: read lines of the form "var = value" from a string.
+// string contents = ...; // Fill string somehow
+// pcrecpp::StringPiece input(contents); // Wrap in a StringPiece
+//
+// string var;
+// int value;
+// pcrecpp::RE re("(\\w+) = (\\d+)\n");
+// while (re.Consume(&input, &var, &value)) {
+// ...;
+// }
+//
+// Each successful call to "Consume" will set "var/value", and also
+// advance "input" so it points past the matched text.
+//
+// The "FindAndConsume" operation is similar to "Consume" but does not
+// anchor your match at the beginning of the string. For example, you
+// could extract all words from a string by repeatedly calling
+// pcrecpp::RE("(\\w+)").FindAndConsume(&input, &word)
+//
+// -----------------------------------------------------------------------
+// PARSING HEX/OCTAL/C-RADIX NUMBERS
+//
+// By default, if you pass a pointer to a numeric value, the
+// corresponding text is interpreted as a base-10 number. You can
+// instead wrap the pointer with a call to one of the operators Hex(),
+// Octal(), or CRadix() to interpret the text in another base. The
+// CRadix operator interprets C-style "0" (base-8) and "0x" (base-16)
+// prefixes, but defaults to base-10.
+//
+// Example:
+// int a, b, c, d;
+// pcrecpp::RE re("(.*) (.*) (.*) (.*)");
+// re.FullMatch("100 40 0100 0x40",
+// pcrecpp::Octal(&a), pcrecpp::Hex(&b),
+// pcrecpp::CRadix(&c), pcrecpp::CRadix(&d));
+// will leave 64 in a, b, c, and d.
+//
+// -----------------------------------------------------------------------
+// REPLACING PARTS OF STRINGS
+//
+// You can replace the first match of "pattern" in "str" with
+// "rewrite". Within "rewrite", backslash-escaped digits (\1 to \9)
+// can be used to insert text matching corresponding parenthesized
+// group from the pattern. \0 in "rewrite" refers to the entire
+// matching text. E.g.,
+//
+// string s = "yabba dabba doo";
+// pcrecpp::RE("b+").Replace("d", &s);
+//
+// will leave "s" containing "yada dabba doo". The result is true if
+// the pattern matches and a replacement occurs, or false otherwise.
+//
+// GlobalReplace() is like Replace(), except that it replaces all
+// occurrences of the pattern in the string with the rewrite.
+// Replacements are not subject to re-matching. E.g.,
+//
+// string s = "yabba dabba doo";
+// pcrecpp::RE("b+").GlobalReplace("d", &s);
+//
+// will leave "s" containing "yada dada doo". It returns the number
+// of replacements made.
+//
+// Extract() is like Replace(), except that if the pattern matches,
+// "rewrite" is copied into "out" (an additional argument) with
+// substitutions. The non-matching portions of "text" are ignored.
+// Returns true iff a match occurred and the extraction happened
+// successfully. If no match occurs, the string is left unaffected.
+
+
+#include <string>
+#include <pcre.h>
+#include <pcrecpparg.h> // defines the Arg class
+// This isn't technically needed here, but we include it
+// anyway so folks who include pcrecpp.h don't have to.
+#include <pcre_stringpiece.h>
+
+namespace pcrecpp {
+
+#define PCRE_SET_OR_CLEAR(b, o) \
+ if (b) all_options_ |= (o); else all_options_ &= ~(o); \
+ return *this
+
+#define PCRE_IS_SET(o) \
+ (all_options_ & o) == o
+
+// We convert user-passed pointers into special Arg objects
+PCRECPP_EXP_DECL Arg no_arg;
+
+/***** Compiling regular expressions: the RE class *****/
+
+// RE_Options allow you to set options to be passed along to pcre,
+// along with other options we put on top of pcre.
+// Only 9 modifiers, plus match_limit and match_limit_recursion,
+// are supported now.
+class RE_Options {
+ public:
+ // constructor
+ RE_Options() : match_limit_(0), match_limit_recursion_(0), all_options_(0) {}
+
+ // alternative constructor.
+ // To facilitate transfer of legacy code from C programs
+ //
+ // This lets you do
+ // RE(pattern, RE_Options(PCRE_CASELESS|PCRE_MULTILINE)).PartialMatch(str);
+ // But new code is better off doing
+ // RE(pattern,
+ // RE_Options().set_caseless(true).set_multiline(true)).PartialMatch(str);
+ RE_Options(int option_flags) : match_limit_(0), match_limit_recursion_(0),
+ all_options_(option_flags) {}
+ // we're fine with the default destructor, copy constructor, etc.
+
+ // accessors and mutators
+ int match_limit() const { return match_limit_; };
+ RE_Options &set_match_limit(int limit) {
+ match_limit_ = limit;
+ return *this;
+ }
+
+ int match_limit_recursion() const { return match_limit_recursion_; };
+ RE_Options &set_match_limit_recursion(int limit) {
+ match_limit_recursion_ = limit;
+ return *this;
+ }
+
+ bool caseless() const {
+ return PCRE_IS_SET(PCRE_CASELESS);
+ }
+ RE_Options &set_caseless(bool x) {
+ PCRE_SET_OR_CLEAR(x, PCRE_CASELESS);
+ }
+
+ bool multiline() const {
+ return PCRE_IS_SET(PCRE_MULTILINE);
+ }
+ RE_Options &set_multiline(bool x) {
+ PCRE_SET_OR_CLEAR(x, PCRE_MULTILINE);
+ }
+
+ bool dotall() const {
+ return PCRE_IS_SET(PCRE_DOTALL);
+ }
+ RE_Options &set_dotall(bool x) {
+ PCRE_SET_OR_CLEAR(x,PCRE_DOTALL);
+ }
+
+ bool extended() const {
+ return PCRE_IS_SET(PCRE_EXTENDED);
+ }
+ RE_Options &set_extended(bool x) {
+ PCRE_SET_OR_CLEAR(x,PCRE_EXTENDED);
+ }
+
+ bool dollar_endonly() const {
+ return PCRE_IS_SET(PCRE_DOLLAR_ENDONLY);
+ }
+ RE_Options &set_dollar_endonly(bool x) {
+ PCRE_SET_OR_CLEAR(x,PCRE_DOLLAR_ENDONLY);
+ }
+
+ bool extra() const {
+ return PCRE_IS_SET( PCRE_EXTRA);
+ }
+ RE_Options &set_extra(bool x) {
+ PCRE_SET_OR_CLEAR(x, PCRE_EXTRA);
+ }
+
+ bool ungreedy() const {
+ return PCRE_IS_SET(PCRE_UNGREEDY);
+ }
+ RE_Options &set_ungreedy(bool x) {
+ PCRE_SET_OR_CLEAR(x, PCRE_UNGREEDY);
+ }
+
+ bool utf8() const {
+ return PCRE_IS_SET(PCRE_UTF8);
+ }
+ RE_Options &set_utf8(bool x) {
+ PCRE_SET_OR_CLEAR(x, PCRE_UTF8);
+ }
+
+ bool no_auto_capture() const {
+ return PCRE_IS_SET(PCRE_NO_AUTO_CAPTURE);
+ }
+ RE_Options &set_no_auto_capture(bool x) {
+ PCRE_SET_OR_CLEAR(x, PCRE_NO_AUTO_CAPTURE);
+ }
+
+ RE_Options &set_all_options(int opt) {
+ all_options_ = opt;
+ return *this;
+ }
+ int all_options() const {
+ return all_options_ ;
+ }
+
+ // TODO: add other pcre flags
+
+ private:
+ int match_limit_;
+ int match_limit_recursion_;
+ int all_options_;
+};
+
+// These functions return some common RE_Options
+static inline RE_Options UTF8() {
+ return RE_Options().set_utf8(true);
+}
+
+static inline RE_Options CASELESS() {
+ return RE_Options().set_caseless(true);
+}
+static inline RE_Options MULTILINE() {
+ return RE_Options().set_multiline(true);
+}
+
+static inline RE_Options DOTALL() {
+ return RE_Options().set_dotall(true);
+}
+
+static inline RE_Options EXTENDED() {
+ return RE_Options().set_extended(true);
+}
+
+// Interface for regular expression matching. Also corresponds to a
+// pre-compiled regular expression. An "RE" object is safe for
+// concurrent use by multiple threads.
+class RE {
+ public:
+ // We provide implicit conversions from strings so that users can
+ // pass in a string or a "const char*" wherever an "RE" is expected.
+ RE(const string& pat) { Init(pat, NULL); }
+ RE(const string& pat, const RE_Options& option) { Init(pat, &option); }
+ RE(const char* pat) { Init(pat, NULL); }
+ RE(const char* pat, const RE_Options& option) { Init(pat, &option); }
+ RE(const unsigned char* pat) {
+ Init(reinterpret_cast<const char*>(pat), NULL);
+ }
+ RE(const unsigned char* pat, const RE_Options& option) {
+ Init(reinterpret_cast<const char*>(pat), &option);
+ }
+
+ // Copy constructor & assignment - note that these are expensive
+ // because they recompile the expression.
+ RE(const RE& re) { Init(re.pattern_, &re.options_); }
+ const RE& operator=(const RE& re) {
+ if (this != &re) {
+ Cleanup();
+
+ // This is the code that originally came from Google
+ // Init(re.pattern_.c_str(), &re.options_);
+
+ // This is the replacement from Ari Pollak
+ Init(re.pattern_, &re.options_);
+ }
+ return *this;
+ }
+
+
+ ~RE();
+
+ // The string specification for this RE. E.g.
+ // RE re("ab*c?d+");
+ // re.pattern(); // "ab*c?d+"
+ const string& pattern() const { return pattern_; }
+
+ // If RE could not be created properly, returns an error string.
+ // Else returns the empty string.
+ const string& error() const { return *error_; }
+
+ /***** The useful part: the matching interface *****/
+
+ // This is provided so one can do pattern.ReplaceAll() just as
+ // easily as ReplaceAll(pattern-text, ....)
+
+ bool FullMatch(const StringPiece& text,
+ const Arg& ptr1 = no_arg,
+ const Arg& ptr2 = no_arg,
+ const Arg& ptr3 = no_arg,
+ const Arg& ptr4 = no_arg,
+ const Arg& ptr5 = no_arg,
+ const Arg& ptr6 = no_arg,
+ const Arg& ptr7 = no_arg,
+ const Arg& ptr8 = no_arg,
+ const Arg& ptr9 = no_arg,
+ const Arg& ptr10 = no_arg,
+ const Arg& ptr11 = no_arg,
+ const Arg& ptr12 = no_arg,
+ const Arg& ptr13 = no_arg,
+ const Arg& ptr14 = no_arg,
+ const Arg& ptr15 = no_arg,
+ const Arg& ptr16 = no_arg) const;
+
+ bool PartialMatch(const StringPiece& text,
+ const Arg& ptr1 = no_arg,
+ const Arg& ptr2 = no_arg,
+ const Arg& ptr3 = no_arg,
+ const Arg& ptr4 = no_arg,
+ const Arg& ptr5 = no_arg,
+ const Arg& ptr6 = no_arg,
+ const Arg& ptr7 = no_arg,
+ const Arg& ptr8 = no_arg,
+ const Arg& ptr9 = no_arg,
+ const Arg& ptr10 = no_arg,
+ const Arg& ptr11 = no_arg,
+ const Arg& ptr12 = no_arg,
+ const Arg& ptr13 = no_arg,
+ const Arg& ptr14 = no_arg,
+ const Arg& ptr15 = no_arg,
+ const Arg& ptr16 = no_arg) const;
+
+ bool Consume(StringPiece* input,
+ const Arg& ptr1 = no_arg,
+ const Arg& ptr2 = no_arg,
+ const Arg& ptr3 = no_arg,
+ const Arg& ptr4 = no_arg,
+ const Arg& ptr5 = no_arg,
+ const Arg& ptr6 = no_arg,
+ const Arg& ptr7 = no_arg,
+ const Arg& ptr8 = no_arg,
+ const Arg& ptr9 = no_arg,
+ const Arg& ptr10 = no_arg,
+ const Arg& ptr11 = no_arg,
+ const Arg& ptr12 = no_arg,
+ const Arg& ptr13 = no_arg,
+ const Arg& ptr14 = no_arg,
+ const Arg& ptr15 = no_arg,
+ const Arg& ptr16 = no_arg) const;
+
+ bool FindAndConsume(StringPiece* input,
+ const Arg& ptr1 = no_arg,
+ const Arg& ptr2 = no_arg,
+ const Arg& ptr3 = no_arg,
+ const Arg& ptr4 = no_arg,
+ const Arg& ptr5 = no_arg,
+ const Arg& ptr6 = no_arg,
+ const Arg& ptr7 = no_arg,
+ const Arg& ptr8 = no_arg,
+ const Arg& ptr9 = no_arg,
+ const Arg& ptr10 = no_arg,
+ const Arg& ptr11 = no_arg,
+ const Arg& ptr12 = no_arg,
+ const Arg& ptr13 = no_arg,
+ const Arg& ptr14 = no_arg,
+ const Arg& ptr15 = no_arg,
+ const Arg& ptr16 = no_arg) const;
+
+ bool Replace(const StringPiece& rewrite,
+ string *str) const;
+
+ int GlobalReplace(const StringPiece& rewrite,
+ string *str) const;
+
+ bool Extract(const StringPiece &rewrite,
+ const StringPiece &text,
+ string *out) const;
+
+ // Escapes all potentially meaningful regexp characters in
+ // 'unquoted'. The returned string, used as a regular expression,
+ // will exactly match the original string. For example,
+ // 1.5-2.0?
+ // may become:
+ // 1\.5\-2\.0\?
+ static string QuoteMeta(const StringPiece& unquoted);
+
+
+ /***** Generic matching interface *****/
+
+ // Type of match (TODO: Should be restructured as part of RE_Options)
+ enum Anchor {
+ UNANCHORED, // No anchoring
+ ANCHOR_START, // Anchor at start only
+ ANCHOR_BOTH // Anchor at start and end
+ };
+
+ // General matching routine. Stores the length of the match in
+ // "*consumed" if successful.
+ bool DoMatch(const StringPiece& text,
+ Anchor anchor,
+ int* consumed,
+ const Arg* const* args, int n) const;
+
+ // Return the number of capturing subpatterns, or -1 if the
+ // regexp wasn't valid on construction.
+ int NumberOfCapturingGroups() const;
+
+ private:
+
+ void Init(const string& pattern, const RE_Options* options);
+ void Cleanup();
+
+ // Match against "text", filling in "vec" (up to "vecsize" * 2/3) with
+ // pairs of integers for the beginning and end positions of matched
+ // text. The first pair corresponds to the entire matched text;
+ // subsequent pairs correspond, in order, to parentheses-captured
+ // matches. Returns the number of pairs (one more than the number of
+ // the last subpattern with a match) if matching was successful
+ // and zero if the match failed.
+ // I.e. for RE("(foo)|(bar)|(baz)") it will return 2, 3, and 4 when matching
+ // against "foo", "bar", and "baz" respectively.
+ // When matching RE("(foo)|hello") against "hello", it will return 1.
+ // But the values for all subpattern are filled in into "vec".
+ int TryMatch(const StringPiece& text,
+ int startpos,
+ Anchor anchor,
+ int *vec,
+ int vecsize) const;
+
+ // Append the "rewrite" string, with backslash subsitutions from "text"
+ // and "vec", to string "out".
+ bool Rewrite(string *out,
+ const StringPiece& rewrite,
+ const StringPiece& text,
+ int *vec,
+ int veclen) const;
+
+ // internal implementation for DoMatch
+ bool DoMatchImpl(const StringPiece& text,
+ Anchor anchor,
+ int* consumed,
+ const Arg* const args[],
+ int n,
+ int* vec,
+ int vecsize) const;
+
+ // Compile the regexp for the specified anchoring mode
+ pcre* Compile(Anchor anchor);
+
+ string pattern_;
+ RE_Options options_;
+ pcre* re_full_; // For full matches
+ pcre* re_partial_; // For partial matches
+ const string* error_; // Error indicator (or points to empty string)
+};
+
+} // namespace pcrecpp
+
+#endif /* _PCRECPP_H */
diff --git a/src/third_party/pcre-7.4/pcrecpp_internal.h b/src/third_party/pcre-7.4/pcrecpp_internal.h
new file mode 100644
index 00000000000..0af9478404c
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcrecpp_internal.h
@@ -0,0 +1,68 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/*
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+#ifndef PCRECPP_INTERNAL_H
+#define PCRECPP_INTERNAL_H
+
+/* When compiling a DLL for Windows, the exported symbols have to be declared
+using some MS magic. I found some useful information on this web page:
+http://msdn2.microsoft.com/en-us/library/y4h7bcy6(VS.80).aspx. According to the
+information there, using __declspec(dllexport) without "extern" we have a
+definition; with "extern" we have a declaration. The settings here override the
+setting in pcre.h. We use:
+
+ PCRECPP_EXP_DECL for declarations
+ PCRECPP_EXP_DEFN for definitions of exported functions
+
+*/
+
+#ifndef PCRECPP_EXP_DECL
+# ifdef _WIN32
+# ifndef PCRECPP_STATIC
+# define PCRECPP_EXP_DECL extern __declspec(dllexport)
+# define PCRECPP_EXP_DEFN __declspec(dllexport)
+# else
+# define PCRECPP_EXP_DECL extern
+# define PCRECPP_EXP_DEFN
+# endif
+# else
+# define PCRECPP_EXP_DECL extern
+# define PCRECPP_EXP_DEFN
+# endif
+#endif
+
+#endif /* PCRECPP_INTERNAL_H */
+
+/* End of pcrecpp_internal.h */
diff --git a/src/third_party/pcre-7.4/pcrecpp_unittest.cc b/src/third_party/pcre-7.4/pcrecpp_unittest.cc
new file mode 100644
index 00000000000..463a11c4cb5
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcrecpp_unittest.cc
@@ -0,0 +1,1240 @@
+// -*- coding: utf-8 -*-
+//
+// Copyright (c) 2005 - 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sanjay Ghemawat
+//
+// TODO: Test extractions for PartialMatch/Consume
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdio.h>
+#include <cassert>
+#include <vector>
+#include "pcrecpp.h"
+
+using pcrecpp::StringPiece;
+using pcrecpp::RE;
+using pcrecpp::RE_Options;
+using pcrecpp::Hex;
+using pcrecpp::Octal;
+using pcrecpp::CRadix;
+
+static bool VERBOSE_TEST = false;
+
+// CHECK dies with a fatal error if condition is not true. It is *not*
+// controlled by NDEBUG, so the check will be executed regardless of
+// compilation mode. Therefore, it is safe to do things like:
+// CHECK_EQ(fp->Write(x), 4)
+#define CHECK(condition) do { \
+ if (!(condition)) { \
+ fprintf(stderr, "%s:%d: Check failed: %s\n", \
+ __FILE__, __LINE__, #condition); \
+ exit(1); \
+ } \
+} while (0)
+
+#define CHECK_EQ(a, b) CHECK(a == b)
+
+static void Timing1(int num_iters) {
+ // Same pattern lots of times
+ RE pattern("ruby:\\d+");
+ StringPiece p("ruby:1234");
+ for (int j = num_iters; j > 0; j--) {
+ CHECK(pattern.FullMatch(p));
+ }
+}
+
+static void Timing2(int num_iters) {
+ // Same pattern lots of times
+ RE pattern("ruby:(\\d+)");
+ int i;
+ for (int j = num_iters; j > 0; j--) {
+ CHECK(pattern.FullMatch("ruby:1234", &i));
+ CHECK_EQ(i, 1234);
+ }
+}
+
+static void Timing3(int num_iters) {
+ string text_string;
+ for (int j = num_iters; j > 0; j--) {
+ text_string += "this is another line\n";
+ }
+
+ RE line_matcher(".*\n");
+ string line;
+ StringPiece text(text_string);
+ int counter = 0;
+ while (line_matcher.Consume(&text)) {
+ counter++;
+ }
+ printf("Matched %d lines\n", counter);
+}
+
+#if 0 // uncomment this if you have a way of defining VirtualProcessSize()
+
+static void LeakTest() {
+ // Check for memory leaks
+ unsigned long long initial_size = 0;
+ for (int i = 0; i < 100000; i++) {
+ if (i == 50000) {
+ initial_size = VirtualProcessSize();
+ printf("Size after 50000: %llu\n", initial_size);
+ }
+ char buf[100]; // definitely big enough
+ sprintf(buf, "pat%09d", i);
+ RE newre(buf);
+ }
+ uint64 final_size = VirtualProcessSize();
+ printf("Size after 100000: %llu\n", final_size);
+ const double growth = double(final_size - initial_size) / final_size;
+ printf("Growth: %0.2f%%", growth * 100);
+ CHECK(growth < 0.02); // Allow < 2% growth
+}
+
+#endif
+
+static void RadixTests() {
+ printf("Testing hex\n");
+
+#define CHECK_HEX(type, value) \
+ do { \
+ type v; \
+ CHECK(RE("([0-9a-fA-F]+)[uUlL]*").FullMatch(#value, Hex(&v))); \
+ CHECK_EQ(v, 0x ## value); \
+ CHECK(RE("([0-9a-fA-FxX]+)[uUlL]*").FullMatch("0x" #value, CRadix(&v))); \
+ CHECK_EQ(v, 0x ## value); \
+ } while(0)
+
+ CHECK_HEX(short, 2bad);
+ CHECK_HEX(unsigned short, 2badU);
+ CHECK_HEX(int, dead);
+ CHECK_HEX(unsigned int, deadU);
+ CHECK_HEX(long, 7eadbeefL);
+ CHECK_HEX(unsigned long, deadbeefUL);
+#ifdef HAVE_LONG_LONG
+ CHECK_HEX(long long, 12345678deadbeefLL);
+#endif
+#ifdef HAVE_UNSIGNED_LONG_LONG
+ CHECK_HEX(unsigned long long, cafebabedeadbeefULL);
+#endif
+
+#undef CHECK_HEX
+
+ printf("Testing octal\n");
+
+#define CHECK_OCTAL(type, value) \
+ do { \
+ type v; \
+ CHECK(RE("([0-7]+)[uUlL]*").FullMatch(#value, Octal(&v))); \
+ CHECK_EQ(v, 0 ## value); \
+ CHECK(RE("([0-9a-fA-FxX]+)[uUlL]*").FullMatch("0" #value, CRadix(&v))); \
+ CHECK_EQ(v, 0 ## value); \
+ } while(0)
+
+ CHECK_OCTAL(short, 77777);
+ CHECK_OCTAL(unsigned short, 177777U);
+ CHECK_OCTAL(int, 17777777777);
+ CHECK_OCTAL(unsigned int, 37777777777U);
+ CHECK_OCTAL(long, 17777777777L);
+ CHECK_OCTAL(unsigned long, 37777777777UL);
+#ifdef HAVE_LONG_LONG
+ CHECK_OCTAL(long long, 777777777777777777777LL);
+#endif
+#ifdef HAVE_UNSIGNED_LONG_LONG
+ CHECK_OCTAL(unsigned long long, 1777777777777777777777ULL);
+#endif
+
+#undef CHECK_OCTAL
+
+ printf("Testing decimal\n");
+
+#define CHECK_DECIMAL(type, value) \
+ do { \
+ type v; \
+ CHECK(RE("(-?[0-9]+)[uUlL]*").FullMatch(#value, &v)); \
+ CHECK_EQ(v, value); \
+ CHECK(RE("(-?[0-9a-fA-FxX]+)[uUlL]*").FullMatch(#value, CRadix(&v))); \
+ CHECK_EQ(v, value); \
+ } while(0)
+
+ CHECK_DECIMAL(short, -1);
+ CHECK_DECIMAL(unsigned short, 9999);
+ CHECK_DECIMAL(int, -1000);
+ CHECK_DECIMAL(unsigned int, 12345U);
+ CHECK_DECIMAL(long, -10000000L);
+ CHECK_DECIMAL(unsigned long, 3083324652U);
+#ifdef HAVE_LONG_LONG
+ CHECK_DECIMAL(long long, -100000000000000LL);
+#endif
+#ifdef HAVE_UNSIGNED_LONG_LONG
+ CHECK_DECIMAL(unsigned long long, 1234567890987654321ULL);
+#endif
+
+#undef CHECK_DECIMAL
+
+}
+
+static void TestReplace() {
+ printf("Testing Replace\n");
+
+ struct ReplaceTest {
+ const char *regexp;
+ const char *rewrite;
+ const char *original;
+ const char *single;
+ const char *global;
+ };
+ static const ReplaceTest tests[] = {
+ { "(qu|[b-df-hj-np-tv-z]*)([a-z]+)",
+ "\\2\\1ay",
+ "the quick brown fox jumps over the lazy dogs.",
+ "ethay quick brown fox jumps over the lazy dogs.",
+ "ethay ickquay ownbray oxfay umpsjay overay ethay azylay ogsday." },
+ { "\\w+",
+ "\\0-NOSPAM",
+ "paul.haahr@google.com",
+ "paul-NOSPAM.haahr@google.com",
+ "paul-NOSPAM.haahr-NOSPAM@google-NOSPAM.com-NOSPAM" },
+ { "^",
+ "(START)",
+ "foo",
+ "(START)foo",
+ "(START)foo" },
+ { "^",
+ "(START)",
+ "",
+ "(START)",
+ "(START)" },
+ { "$",
+ "(END)",
+ "",
+ "(END)",
+ "(END)" },
+ { "b",
+ "bb",
+ "ababababab",
+ "abbabababab",
+ "abbabbabbabbabb" },
+ { "b",
+ "bb",
+ "bbbbbb",
+ "bbbbbbb",
+ "bbbbbbbbbbbb" },
+ { "b+",
+ "bb",
+ "bbbbbb",
+ "bb",
+ "bb" },
+ { "b*",
+ "bb",
+ "bbbbbb",
+ "bb",
+ "bb" },
+ { "b*",
+ "bb",
+ "aaaaa",
+ "bbaaaaa",
+ "bbabbabbabbabbabb" },
+ { "b*",
+ "bb",
+ "aa\naa\n",
+ "bbaa\naa\n",
+ "bbabbabb\nbbabbabb\nbb" },
+ { "b*",
+ "bb",
+ "aa\raa\r",
+ "bbaa\raa\r",
+ "bbabbabb\rbbabbabb\rbb" },
+ { "b*",
+ "bb",
+ "aa\r\naa\r\n",
+ "bbaa\r\naa\r\n",
+ "bbabbabb\r\nbbabbabb\r\nbb" },
+#ifdef SUPPORT_UTF8
+ { "b*",
+ "bb",
+ "\xE3\x83\x9B\xE3\x83\xBC\xE3\x83\xA0\xE3\x81\xB8", // utf8
+ "bb\xE3\x83\x9B\xE3\x83\xBC\xE3\x83\xA0\xE3\x81\xB8",
+ "bb\xE3\x83\x9B""bb""\xE3\x83\xBC""bb""\xE3\x83\xA0""bb""\xE3\x81\xB8""bb" },
+ { "b*",
+ "bb",
+ "\xE3\x83\x9B\r\n\xE3\x83\xBC\r\xE3\x83\xA0\n\xE3\x81\xB8\r\n", // utf8
+ "bb\xE3\x83\x9B\r\n\xE3\x83\xBC\r\xE3\x83\xA0\n\xE3\x81\xB8\r\n",
+ ("bb\xE3\x83\x9B""bb\r\nbb""\xE3\x83\xBC""bb\rbb""\xE3\x83\xA0"
+ "bb\nbb""\xE3\x81\xB8""bb\r\nbb") },
+#endif
+ { "", NULL, NULL, NULL, NULL }
+ };
+
+#ifdef SUPPORT_UTF8
+ const bool support_utf8 = true;
+#else
+ const bool support_utf8 = false;
+#endif
+
+ for (const ReplaceTest *t = tests; t->original != NULL; ++t) {
+ RE re(t->regexp, RE_Options(PCRE_NEWLINE_CRLF).set_utf8(support_utf8));
+ assert(re.error().empty());
+ string one(t->original);
+ CHECK(re.Replace(t->rewrite, &one));
+ CHECK_EQ(one, t->single);
+ string all(t->original);
+ CHECK(re.GlobalReplace(t->rewrite, &all) > 0);
+ CHECK_EQ(all, t->global);
+ }
+
+ // One final test: test \r\n replacement when we're not in CRLF mode
+ {
+ RE re("b*", RE_Options(PCRE_NEWLINE_CR).set_utf8(support_utf8));
+ assert(re.error().empty());
+ string all("aa\r\naa\r\n");
+ CHECK(re.GlobalReplace("bb", &all) > 0);
+ CHECK_EQ(all, string("bbabbabb\rbb\nbbabbabb\rbb\nbb"));
+ }
+ {
+ RE re("b*", RE_Options(PCRE_NEWLINE_LF).set_utf8(support_utf8));
+ assert(re.error().empty());
+ string all("aa\r\naa\r\n");
+ CHECK(re.GlobalReplace("bb", &all) > 0);
+ CHECK_EQ(all, string("bbabbabb\rbb\nbbabbabb\rbb\nbb"));
+ }
+ // TODO: test what happens when no PCRE_NEWLINE_* flag is set.
+ // Alas, the answer depends on how pcre was compiled.
+}
+
+static void TestExtract() {
+ printf("Testing Extract\n");
+
+ string s;
+
+ CHECK(RE("(.*)@([^.]*)").Extract("\\2!\\1", "boris@kremvax.ru", &s));
+ CHECK_EQ(s, "kremvax!boris");
+
+ // check the RE interface as well
+ CHECK(RE(".*").Extract("'\\0'", "foo", &s));
+ CHECK_EQ(s, "'foo'");
+ CHECK(!RE("bar").Extract("'\\0'", "baz", &s));
+ CHECK_EQ(s, "'foo'");
+}
+
+static void TestConsume() {
+ printf("Testing Consume\n");
+
+ string word;
+
+ string s(" aaa b!@#$@#$cccc");
+ StringPiece input(s);
+
+ RE r("\\s*(\\w+)"); // matches a word, possibly proceeded by whitespace
+ CHECK(r.Consume(&input, &word));
+ CHECK_EQ(word, "aaa");
+ CHECK(r.Consume(&input, &word));
+ CHECK_EQ(word, "b");
+ CHECK(! r.Consume(&input, &word));
+}
+
+static void TestFindAndConsume() {
+ printf("Testing FindAndConsume\n");
+
+ string word;
+
+ string s(" aaa b!@#$@#$cccc");
+ StringPiece input(s);
+
+ RE r("(\\w+)"); // matches a word
+ CHECK(r.FindAndConsume(&input, &word));
+ CHECK_EQ(word, "aaa");
+ CHECK(r.FindAndConsume(&input, &word));
+ CHECK_EQ(word, "b");
+ CHECK(r.FindAndConsume(&input, &word));
+ CHECK_EQ(word, "cccc");
+ CHECK(! r.FindAndConsume(&input, &word));
+}
+
+static void TestMatchNumberPeculiarity() {
+ printf("Testing match-number peculiaraity\n");
+
+ string word1;
+ string word2;
+ string word3;
+
+ RE r("(foo)|(bar)|(baz)");
+ CHECK(r.PartialMatch("foo", &word1, &word2, &word3));
+ CHECK_EQ(word1, "foo");
+ CHECK_EQ(word2, "");
+ CHECK_EQ(word3, "");
+ CHECK(r.PartialMatch("bar", &word1, &word2, &word3));
+ CHECK_EQ(word1, "");
+ CHECK_EQ(word2, "bar");
+ CHECK_EQ(word3, "");
+ CHECK(r.PartialMatch("baz", &word1, &word2, &word3));
+ CHECK_EQ(word1, "");
+ CHECK_EQ(word2, "");
+ CHECK_EQ(word3, "baz");
+ CHECK(!r.PartialMatch("f", &word1, &word2, &word3));
+
+ string a;
+ CHECK(RE("(foo)|hello").FullMatch("hello", &a));
+ CHECK_EQ(a, "");
+}
+
+static void TestRecursion() {
+ printf("Testing recursion\n");
+
+ // Get one string that passes (sometimes), one that never does.
+ string text_good("abcdefghijk");
+ string text_bad("acdefghijkl");
+
+ // According to pcretest, matching text_good against (\w+)*b
+ // requires match_limit of at least 8192, and match_recursion_limit
+ // of at least 37.
+
+ RE_Options options_ml;
+ options_ml.set_match_limit(8192);
+ RE re("(\\w+)*b", options_ml);
+ CHECK(re.PartialMatch(text_good) == true);
+ CHECK(re.PartialMatch(text_bad) == false);
+ CHECK(re.FullMatch(text_good) == false);
+ CHECK(re.FullMatch(text_bad) == false);
+
+ options_ml.set_match_limit(1024);
+ RE re2("(\\w+)*b", options_ml);
+ CHECK(re2.PartialMatch(text_good) == false); // because of match_limit
+ CHECK(re2.PartialMatch(text_bad) == false);
+ CHECK(re2.FullMatch(text_good) == false);
+ CHECK(re2.FullMatch(text_bad) == false);
+
+ RE_Options options_mlr;
+ options_mlr.set_match_limit_recursion(50);
+ RE re3("(\\w+)*b", options_mlr);
+ CHECK(re3.PartialMatch(text_good) == true);
+ CHECK(re3.PartialMatch(text_bad) == false);
+ CHECK(re3.FullMatch(text_good) == false);
+ CHECK(re3.FullMatch(text_bad) == false);
+
+ options_mlr.set_match_limit_recursion(10);
+ RE re4("(\\w+)*b", options_mlr);
+ CHECK(re4.PartialMatch(text_good) == false);
+ CHECK(re4.PartialMatch(text_bad) == false);
+ CHECK(re4.FullMatch(text_good) == false);
+ CHECK(re4.FullMatch(text_bad) == false);
+}
+
+// A meta-quoted string, interpreted as a pattern, should always match
+// the original unquoted string.
+static void TestQuoteMeta(string unquoted, RE_Options options = RE_Options()) {
+ string quoted = RE::QuoteMeta(unquoted);
+ RE re(quoted, options);
+ CHECK(re.FullMatch(unquoted));
+}
+
+// A string containing meaningful regexp characters, which is then meta-
+// quoted, should not generally match a string the unquoted string does.
+static void NegativeTestQuoteMeta(string unquoted, string should_not_match,
+ RE_Options options = RE_Options()) {
+ string quoted = RE::QuoteMeta(unquoted);
+ RE re(quoted, options);
+ CHECK(!re.FullMatch(should_not_match));
+}
+
+// Tests that quoted meta characters match their original strings,
+// and that a few things that shouldn't match indeed do not.
+static void TestQuotaMetaSimple() {
+ TestQuoteMeta("foo");
+ TestQuoteMeta("foo.bar");
+ TestQuoteMeta("foo\\.bar");
+ TestQuoteMeta("[1-9]");
+ TestQuoteMeta("1.5-2.0?");
+ TestQuoteMeta("\\d");
+ TestQuoteMeta("Who doesn't like ice cream?");
+ TestQuoteMeta("((a|b)c?d*e+[f-h]i)");
+ TestQuoteMeta("((?!)xxx).*yyy");
+ TestQuoteMeta("([");
+}
+
+static void TestQuoteMetaSimpleNegative() {
+ NegativeTestQuoteMeta("foo", "bar");
+ NegativeTestQuoteMeta("...", "bar");
+ NegativeTestQuoteMeta("\\.", ".");
+ NegativeTestQuoteMeta("\\.", "..");
+ NegativeTestQuoteMeta("(a)", "a");
+ NegativeTestQuoteMeta("(a|b)", "a");
+ NegativeTestQuoteMeta("(a|b)", "(a)");
+ NegativeTestQuoteMeta("(a|b)", "a|b");
+ NegativeTestQuoteMeta("[0-9]", "0");
+ NegativeTestQuoteMeta("[0-9]", "0-9");
+ NegativeTestQuoteMeta("[0-9]", "[9]");
+ NegativeTestQuoteMeta("((?!)xxx)", "xxx");
+}
+
+static void TestQuoteMetaLatin1() {
+ TestQuoteMeta("3\xb2 = 9");
+}
+
+static void TestQuoteMetaUtf8() {
+#ifdef SUPPORT_UTF8
+ TestQuoteMeta("Pl\xc3\xa1\x63ido Domingo", pcrecpp::UTF8());
+ TestQuoteMeta("xyz", pcrecpp::UTF8()); // No fancy utf8
+ TestQuoteMeta("\xc2\xb0", pcrecpp::UTF8()); // 2-byte utf8 (degree symbol)
+ TestQuoteMeta("27\xc2\xb0 degrees", pcrecpp::UTF8()); // As a middle character
+ TestQuoteMeta("\xe2\x80\xb3", pcrecpp::UTF8()); // 3-byte utf8 (double prime)
+ TestQuoteMeta("\xf0\x9d\x85\x9f", pcrecpp::UTF8()); // 4-byte utf8 (music note)
+ TestQuoteMeta("27\xc2\xb0"); // Interpreted as Latin-1, but should still work
+ NegativeTestQuoteMeta("27\xc2\xb0", // 2-byte utf (degree symbol)
+ "27\\\xc2\\\xb0",
+ pcrecpp::UTF8());
+#endif
+}
+
+static void TestQuoteMetaAll() {
+ printf("Testing QuoteMeta\n");
+ TestQuotaMetaSimple();
+ TestQuoteMetaSimpleNegative();
+ TestQuoteMetaLatin1();
+ TestQuoteMetaUtf8();
+}
+
+//
+// Options tests contributed by
+// Giuseppe Maxia, CTO, Stardata s.r.l.
+// July 2005
+//
+static void GetOneOptionResult(
+ const char *option_name,
+ const char *regex,
+ const char *str,
+ RE_Options options,
+ bool full,
+ string expected) {
+
+ printf("Testing Option <%s>\n", option_name);
+ if(VERBOSE_TEST)
+ printf("/%s/ finds \"%s\" within \"%s\" \n",
+ regex,
+ expected.c_str(),
+ str);
+ string captured("");
+ if (full)
+ RE(regex,options).FullMatch(str, &captured);
+ else
+ RE(regex,options).PartialMatch(str, &captured);
+ CHECK_EQ(captured, expected);
+}
+
+static void TestOneOption(
+ const char *option_name,
+ const char *regex,
+ const char *str,
+ RE_Options options,
+ bool full,
+ bool assertive = true) {
+
+ printf("Testing Option <%s>\n", option_name);
+ if (VERBOSE_TEST)
+ printf("'%s' %s /%s/ \n",
+ str,
+ (assertive? "matches" : "doesn't match"),
+ regex);
+ if (assertive) {
+ if (full)
+ CHECK(RE(regex,options).FullMatch(str));
+ else
+ CHECK(RE(regex,options).PartialMatch(str));
+ } else {
+ if (full)
+ CHECK(!RE(regex,options).FullMatch(str));
+ else
+ CHECK(!RE(regex,options).PartialMatch(str));
+ }
+}
+
+static void Test_CASELESS() {
+ RE_Options options;
+ RE_Options options2;
+
+ options.set_caseless(true);
+ TestOneOption("CASELESS (class)", "HELLO", "hello", options, false);
+ TestOneOption("CASELESS (class2)", "HELLO", "hello", options2.set_caseless(true), false);
+ TestOneOption("CASELESS (class)", "^[A-Z]+$", "Hello", options, false);
+
+ TestOneOption("CASELESS (function)", "HELLO", "hello", pcrecpp::CASELESS(), false);
+ TestOneOption("CASELESS (function)", "^[A-Z]+$", "Hello", pcrecpp::CASELESS(), false);
+ options.set_caseless(false);
+ TestOneOption("no CASELESS", "HELLO", "hello", options, false, false);
+}
+
+static void Test_MULTILINE() {
+ RE_Options options;
+ RE_Options options2;
+ const char *str = "HELLO\n" "cruel\n" "world\n";
+
+ options.set_multiline(true);
+ TestOneOption("MULTILINE (class)", "^cruel$", str, options, false);
+ TestOneOption("MULTILINE (class2)", "^cruel$", str, options2.set_multiline(true), false);
+ TestOneOption("MULTILINE (function)", "^cruel$", str, pcrecpp::MULTILINE(), false);
+ options.set_multiline(false);
+ TestOneOption("no MULTILINE", "^cruel$", str, options, false, false);
+}
+
+static void Test_DOTALL() {
+ RE_Options options;
+ RE_Options options2;
+ const char *str = "HELLO\n" "cruel\n" "world";
+
+ options.set_dotall(true);
+ TestOneOption("DOTALL (class)", "HELLO.*world", str, options, true);
+ TestOneOption("DOTALL (class2)", "HELLO.*world", str, options2.set_dotall(true), true);
+ TestOneOption("DOTALL (function)", "HELLO.*world", str, pcrecpp::DOTALL(), true);
+ options.set_dotall(false);
+ TestOneOption("no DOTALL", "HELLO.*world", str, options, true, false);
+}
+
+static void Test_DOLLAR_ENDONLY() {
+ RE_Options options;
+ RE_Options options2;
+ const char *str = "HELLO world\n";
+
+ TestOneOption("no DOLLAR_ENDONLY", "world$", str, options, false);
+ options.set_dollar_endonly(true);
+ TestOneOption("DOLLAR_ENDONLY 1", "world$", str, options, false, false);
+ TestOneOption("DOLLAR_ENDONLY 2", "world$", str, options2.set_dollar_endonly(true), false, false);
+}
+
+static void Test_EXTRA() {
+ RE_Options options;
+ const char *str = "HELLO";
+
+ options.set_extra(true);
+ TestOneOption("EXTRA 1", "\\HELL\\O", str, options, true, false );
+ TestOneOption("EXTRA 2", "\\HELL\\O", str, RE_Options().set_extra(true), true, false );
+ options.set_extra(false);
+ TestOneOption("no EXTRA", "\\HELL\\O", str, options, true );
+}
+
+static void Test_EXTENDED() {
+ RE_Options options;
+ RE_Options options2;
+ const char *str = "HELLO world";
+
+ options.set_extended(true);
+ TestOneOption("EXTENDED (class)", "HELLO world", str, options, false, false);
+ TestOneOption("EXTENDED (class2)", "HELLO world", str, options2.set_extended(true), false, false);
+ TestOneOption("EXTENDED (class)",
+ "^ HE L{2} O "
+ "\\s+ "
+ "\\w+ $ ",
+ str,
+ options,
+ false);
+
+ TestOneOption("EXTENDED (function)", "HELLO world", str, pcrecpp::EXTENDED(), false, false);
+ TestOneOption("EXTENDED (function)",
+ "^ HE L{2} O "
+ "\\s+ "
+ "\\w+ $ ",
+ str,
+ pcrecpp::EXTENDED(),
+ false);
+
+ options.set_extended(false);
+ TestOneOption("no EXTENDED", "HELLO world", str, options, false);
+}
+
+static void Test_NO_AUTO_CAPTURE() {
+ RE_Options options;
+ const char *str = "HELLO world";
+ string captured;
+
+ printf("Testing Option <no NO_AUTO_CAPTURE>\n");
+ if (VERBOSE_TEST)
+ printf("parentheses capture text\n");
+ RE re("(world|universe)$", options);
+ CHECK(re.Extract("\\1", str , &captured));
+ CHECK_EQ(captured, "world");
+ options.set_no_auto_capture(true);
+ printf("testing Option <NO_AUTO_CAPTURE>\n");
+ if (VERBOSE_TEST)
+ printf("parentheses do not capture text\n");
+ re.Extract("\\1",str, &captured );
+ CHECK_EQ(captured, "world");
+}
+
+static void Test_UNGREEDY() {
+ RE_Options options;
+ const char *str = "HELLO, 'this' is the 'world'";
+
+ options.set_ungreedy(true);
+ GetOneOptionResult("UNGREEDY 1", "('.*')", str, options, false, "'this'" );
+ GetOneOptionResult("UNGREEDY 2", "('.*')", str, RE_Options().set_ungreedy(true), false, "'this'" );
+ GetOneOptionResult("UNGREEDY", "('.*?')", str, options, false, "'this' is the 'world'" );
+
+ options.set_ungreedy(false);
+ GetOneOptionResult("no UNGREEDY", "('.*')", str, options, false, "'this' is the 'world'" );
+ GetOneOptionResult("no UNGREEDY", "('.*?')", str, options, false, "'this'" );
+}
+
+static void Test_all_options() {
+ const char *str = "HELLO\n" "cruel\n" "world";
+ RE_Options options;
+ options.set_all_options(PCRE_CASELESS | PCRE_DOTALL);
+
+ TestOneOption("all_options (CASELESS|DOTALL)", "^hello.*WORLD", str , options, false);
+ options.set_all_options(0);
+ TestOneOption("all_options (0)", "^hello.*WORLD", str , options, false, false);
+ options.set_all_options(PCRE_MULTILINE | PCRE_EXTENDED);
+
+ TestOneOption("all_options (MULTILINE|EXTENDED)", " ^ c r u e l $ ", str, options, false);
+ TestOneOption("all_options (MULTILINE|EXTENDED) with constructor",
+ " ^ c r u e l $ ",
+ str,
+ RE_Options(PCRE_MULTILINE | PCRE_EXTENDED),
+ false);
+
+ TestOneOption("all_options (MULTILINE|EXTENDED) with concatenation",
+ " ^ c r u e l $ ",
+ str,
+ RE_Options()
+ .set_multiline(true)
+ .set_extended(true),
+ false);
+
+ options.set_all_options(0);
+ TestOneOption("all_options (0)", "^ c r u e l $", str, options, false, false);
+
+}
+
+static void TestOptions() {
+ printf("Testing Options\n");
+ Test_CASELESS();
+ Test_MULTILINE();
+ Test_DOTALL();
+ Test_DOLLAR_ENDONLY();
+ Test_EXTENDED();
+ Test_NO_AUTO_CAPTURE();
+ Test_UNGREEDY();
+ Test_EXTRA();
+ Test_all_options();
+}
+
+static void TestConstructors() {
+ printf("Testing constructors\n");
+
+ RE_Options options;
+ options.set_dotall(true);
+ const char *str = "HELLO\n" "cruel\n" "world";
+
+ RE orig("HELLO.*world", options);
+ CHECK(orig.FullMatch(str));
+
+ RE copy1(orig);
+ CHECK(copy1.FullMatch(str));
+
+ RE copy2("not a match");
+ CHECK(!copy2.FullMatch(str));
+ copy2 = copy1;
+ CHECK(copy2.FullMatch(str));
+ copy2 = orig;
+ CHECK(copy2.FullMatch(str));
+
+ // Make sure when we assign to ourselves, nothing bad happens
+ orig = orig;
+ copy1 = copy1;
+ copy2 = copy2;
+ CHECK(orig.FullMatch(str));
+ CHECK(copy1.FullMatch(str));
+ CHECK(copy2.FullMatch(str));
+}
+
+int main(int argc, char** argv) {
+ // Treat any flag as --help
+ if (argc > 1 && argv[1][0] == '-') {
+ printf("Usage: %s [timing1|timing2|timing3 num-iters]\n"
+ " If 'timingX ###' is specified, run the given timing test\n"
+ " with the given number of iterations, rather than running\n"
+ " the default corectness test.\n", argv[0]);
+ return 0;
+ }
+
+ if (argc > 1) {
+ if ( argc == 2 || atoi(argv[2]) == 0) {
+ printf("timing mode needs a num-iters argument\n");
+ return 1;
+ }
+ if (!strcmp(argv[1], "timing1"))
+ Timing1(atoi(argv[2]));
+ else if (!strcmp(argv[1], "timing2"))
+ Timing2(atoi(argv[2]));
+ else if (!strcmp(argv[1], "timing3"))
+ Timing3(atoi(argv[2]));
+ else
+ printf("Unknown argument '%s'\n", argv[1]);
+ return 0;
+ }
+
+ printf("Testing FullMatch\n");
+
+ int i;
+ string s;
+
+ /***** FullMatch with no args *****/
+
+ CHECK(RE("h.*o").FullMatch("hello"));
+ CHECK(!RE("h.*o").FullMatch("othello")); // Must be anchored at front
+ CHECK(!RE("h.*o").FullMatch("hello!")); // Must be anchored at end
+ CHECK(RE("a*").FullMatch("aaaa")); // Fullmatch with normal op
+ CHECK(RE("a*?").FullMatch("aaaa")); // Fullmatch with nongreedy op
+ CHECK(RE("a*?\\z").FullMatch("aaaa")); // Two unusual ops
+
+ /***** FullMatch with args *****/
+
+ // Zero-arg
+ CHECK(RE("\\d+").FullMatch("1001"));
+
+ // Single-arg
+ CHECK(RE("(\\d+)").FullMatch("1001", &i));
+ CHECK_EQ(i, 1001);
+ CHECK(RE("(-?\\d+)").FullMatch("-123", &i));
+ CHECK_EQ(i, -123);
+ CHECK(!RE("()\\d+").FullMatch("10", &i));
+ CHECK(!RE("(\\d+)").FullMatch("1234567890123456789012345678901234567890",
+ &i));
+
+ // Digits surrounding integer-arg
+ CHECK(RE("1(\\d*)4").FullMatch("1234", &i));
+ CHECK_EQ(i, 23);
+ CHECK(RE("(\\d)\\d+").FullMatch("1234", &i));
+ CHECK_EQ(i, 1);
+ CHECK(RE("(-\\d)\\d+").FullMatch("-1234", &i));
+ CHECK_EQ(i, -1);
+ CHECK(RE("(\\d)").PartialMatch("1234", &i));
+ CHECK_EQ(i, 1);
+ CHECK(RE("(-\\d)").PartialMatch("-1234", &i));
+ CHECK_EQ(i, -1);
+
+ // String-arg
+ CHECK(RE("h(.*)o").FullMatch("hello", &s));
+ CHECK_EQ(s, string("ell"));
+
+ // StringPiece-arg
+ StringPiece sp;
+ CHECK(RE("(\\w+):(\\d+)").FullMatch("ruby:1234", &sp, &i));
+ CHECK_EQ(sp.size(), 4);
+ CHECK(memcmp(sp.data(), "ruby", 4) == 0);
+ CHECK_EQ(i, 1234);
+
+ // Multi-arg
+ CHECK(RE("(\\w+):(\\d+)").FullMatch("ruby:1234", &s, &i));
+ CHECK_EQ(s, string("ruby"));
+ CHECK_EQ(i, 1234);
+
+ // Ignored arg
+ CHECK(RE("(\\w+)(:)(\\d+)").FullMatch("ruby:1234", &s, (void*)NULL, &i));
+ CHECK_EQ(s, string("ruby"));
+ CHECK_EQ(i, 1234);
+
+ // Type tests
+ {
+ char c;
+ CHECK(RE("(H)ello").FullMatch("Hello", &c));
+ CHECK_EQ(c, 'H');
+ }
+ {
+ unsigned char c;
+ CHECK(RE("(H)ello").FullMatch("Hello", &c));
+ CHECK_EQ(c, static_cast<unsigned char>('H'));
+ }
+ {
+ short v;
+ CHECK(RE("(-?\\d+)").FullMatch("100", &v)); CHECK_EQ(v, 100);
+ CHECK(RE("(-?\\d+)").FullMatch("-100", &v)); CHECK_EQ(v, -100);
+ CHECK(RE("(-?\\d+)").FullMatch("32767", &v)); CHECK_EQ(v, 32767);
+ CHECK(RE("(-?\\d+)").FullMatch("-32768", &v)); CHECK_EQ(v, -32768);
+ CHECK(!RE("(-?\\d+)").FullMatch("-32769", &v));
+ CHECK(!RE("(-?\\d+)").FullMatch("32768", &v));
+ }
+ {
+ unsigned short v;
+ CHECK(RE("(\\d+)").FullMatch("100", &v)); CHECK_EQ(v, 100);
+ CHECK(RE("(\\d+)").FullMatch("32767", &v)); CHECK_EQ(v, 32767);
+ CHECK(RE("(\\d+)").FullMatch("65535", &v)); CHECK_EQ(v, 65535);
+ CHECK(!RE("(\\d+)").FullMatch("65536", &v));
+ }
+ {
+ int v;
+ static const int max_value = 0x7fffffff;
+ static const int min_value = -max_value - 1;
+ CHECK(RE("(-?\\d+)").FullMatch("100", &v)); CHECK_EQ(v, 100);
+ CHECK(RE("(-?\\d+)").FullMatch("-100", &v)); CHECK_EQ(v, -100);
+ CHECK(RE("(-?\\d+)").FullMatch("2147483647", &v)); CHECK_EQ(v, max_value);
+ CHECK(RE("(-?\\d+)").FullMatch("-2147483648", &v)); CHECK_EQ(v, min_value);
+ CHECK(!RE("(-?\\d+)").FullMatch("-2147483649", &v));
+ CHECK(!RE("(-?\\d+)").FullMatch("2147483648", &v));
+ }
+ {
+ unsigned int v;
+ static const unsigned int max_value = 0xfffffffful;
+ CHECK(RE("(\\d+)").FullMatch("100", &v)); CHECK_EQ(v, 100);
+ CHECK(RE("(\\d+)").FullMatch("4294967295", &v)); CHECK_EQ(v, max_value);
+ CHECK(!RE("(\\d+)").FullMatch("4294967296", &v));
+ }
+#ifdef HAVE_LONG_LONG
+# if defined(__MINGW__) || defined(__MINGW32__)
+# define LLD "%I64d"
+# define LLU "%I64u"
+# else
+# define LLD "%lld"
+# define LLU "%llu"
+# endif
+ {
+ long long v;
+ static const long long max_value = 0x7fffffffffffffffLL;
+ static const long long min_value = -max_value - 1;
+ char buf[32]; // definitely big enough for a long long
+
+ CHECK(RE("(-?\\d+)").FullMatch("100", &v)); CHECK_EQ(v, 100);
+ CHECK(RE("(-?\\d+)").FullMatch("-100",&v)); CHECK_EQ(v, -100);
+
+ sprintf(buf, LLD, max_value);
+ CHECK(RE("(-?\\d+)").FullMatch(buf,&v)); CHECK_EQ(v, max_value);
+
+ sprintf(buf, LLD, min_value);
+ CHECK(RE("(-?\\d+)").FullMatch(buf,&v)); CHECK_EQ(v, min_value);
+
+ sprintf(buf, LLD, max_value);
+ assert(buf[strlen(buf)-1] != '9');
+ buf[strlen(buf)-1]++;
+ CHECK(!RE("(-?\\d+)").FullMatch(buf, &v));
+
+ sprintf(buf, LLD, min_value);
+ assert(buf[strlen(buf)-1] != '9');
+ buf[strlen(buf)-1]++;
+ CHECK(!RE("(-?\\d+)").FullMatch(buf, &v));
+ }
+#endif
+#if defined HAVE_UNSIGNED_LONG_LONG && defined HAVE_LONG_LONG
+ {
+ unsigned long long v;
+ long long v2;
+ static const unsigned long long max_value = 0xffffffffffffffffULL;
+ char buf[32]; // definitely big enough for a unsigned long long
+
+ CHECK(RE("(-?\\d+)").FullMatch("100",&v)); CHECK_EQ(v, 100);
+ CHECK(RE("(-?\\d+)").FullMatch("-100",&v2)); CHECK_EQ(v2, -100);
+
+ sprintf(buf, LLU, max_value);
+ CHECK(RE("(-?\\d+)").FullMatch(buf,&v)); CHECK_EQ(v, max_value);
+
+ assert(buf[strlen(buf)-1] != '9');
+ buf[strlen(buf)-1]++;
+ CHECK(!RE("(-?\\d+)").FullMatch(buf, &v));
+ }
+#endif
+ {
+ float v;
+ CHECK(RE("(.*)").FullMatch("100", &v));
+ CHECK(RE("(.*)").FullMatch("-100.", &v));
+ CHECK(RE("(.*)").FullMatch("1e23", &v));
+ }
+ {
+ double v;
+ CHECK(RE("(.*)").FullMatch("100", &v));
+ CHECK(RE("(.*)").FullMatch("-100.", &v));
+ CHECK(RE("(.*)").FullMatch("1e23", &v));
+ }
+
+ // Check that matching is fully anchored
+ CHECK(!RE("(\\d+)").FullMatch("x1001", &i));
+ CHECK(!RE("(\\d+)").FullMatch("1001x", &i));
+ CHECK(RE("x(\\d+)").FullMatch("x1001", &i)); CHECK_EQ(i, 1001);
+ CHECK(RE("(\\d+)x").FullMatch("1001x", &i)); CHECK_EQ(i, 1001);
+
+ // Braces
+ CHECK(RE("[0-9a-f+.-]{5,}").FullMatch("0abcd"));
+ CHECK(RE("[0-9a-f+.-]{5,}").FullMatch("0abcde"));
+ CHECK(!RE("[0-9a-f+.-]{5,}").FullMatch("0abc"));
+
+ // Complicated RE
+ CHECK(RE("foo|bar|[A-Z]").FullMatch("foo"));
+ CHECK(RE("foo|bar|[A-Z]").FullMatch("bar"));
+ CHECK(RE("foo|bar|[A-Z]").FullMatch("X"));
+ CHECK(!RE("foo|bar|[A-Z]").FullMatch("XY"));
+
+ // Check full-match handling (needs '$' tacked on internally)
+ CHECK(RE("fo|foo").FullMatch("fo"));
+ CHECK(RE("fo|foo").FullMatch("foo"));
+ CHECK(RE("fo|foo$").FullMatch("fo"));
+ CHECK(RE("fo|foo$").FullMatch("foo"));
+ CHECK(RE("foo$").FullMatch("foo"));
+ CHECK(!RE("foo\\$").FullMatch("foo$bar"));
+ CHECK(!RE("fo|bar").FullMatch("fox"));
+
+ // Uncomment the following if we change the handling of '$' to
+ // prevent it from matching a trailing newline
+ if (false) {
+ // Check that we don't get bitten by pcre's special handling of a
+ // '\n' at the end of the string matching '$'
+ CHECK(!RE("foo$").PartialMatch("foo\n"));
+ }
+
+ // Number of args
+ int a[16];
+ CHECK(RE("").FullMatch(""));
+
+ memset(a, 0, sizeof(0));
+ CHECK(RE("(\\d){1}").FullMatch("1",
+ &a[0]));
+ CHECK_EQ(a[0], 1);
+
+ memset(a, 0, sizeof(0));
+ CHECK(RE("(\\d)(\\d)").FullMatch("12",
+ &a[0], &a[1]));
+ CHECK_EQ(a[0], 1);
+ CHECK_EQ(a[1], 2);
+
+ memset(a, 0, sizeof(0));
+ CHECK(RE("(\\d)(\\d)(\\d)").FullMatch("123",
+ &a[0], &a[1], &a[2]));
+ CHECK_EQ(a[0], 1);
+ CHECK_EQ(a[1], 2);
+ CHECK_EQ(a[2], 3);
+
+ memset(a, 0, sizeof(0));
+ CHECK(RE("(\\d)(\\d)(\\d)(\\d)").FullMatch("1234",
+ &a[0], &a[1], &a[2], &a[3]));
+ CHECK_EQ(a[0], 1);
+ CHECK_EQ(a[1], 2);
+ CHECK_EQ(a[2], 3);
+ CHECK_EQ(a[3], 4);
+
+ memset(a, 0, sizeof(0));
+ CHECK(RE("(\\d)(\\d)(\\d)(\\d)(\\d)").FullMatch("12345",
+ &a[0], &a[1], &a[2],
+ &a[3], &a[4]));
+ CHECK_EQ(a[0], 1);
+ CHECK_EQ(a[1], 2);
+ CHECK_EQ(a[2], 3);
+ CHECK_EQ(a[3], 4);
+ CHECK_EQ(a[4], 5);
+
+ memset(a, 0, sizeof(0));
+ CHECK(RE("(\\d)(\\d)(\\d)(\\d)(\\d)(\\d)").FullMatch("123456",
+ &a[0], &a[1], &a[2],
+ &a[3], &a[4], &a[5]));
+ CHECK_EQ(a[0], 1);
+ CHECK_EQ(a[1], 2);
+ CHECK_EQ(a[2], 3);
+ CHECK_EQ(a[3], 4);
+ CHECK_EQ(a[4], 5);
+ CHECK_EQ(a[5], 6);
+
+ memset(a, 0, sizeof(0));
+ CHECK(RE("(\\d)(\\d)(\\d)(\\d)(\\d)(\\d)(\\d)").FullMatch("1234567",
+ &a[0], &a[1], &a[2], &a[3],
+ &a[4], &a[5], &a[6]));
+ CHECK_EQ(a[0], 1);
+ CHECK_EQ(a[1], 2);
+ CHECK_EQ(a[2], 3);
+ CHECK_EQ(a[3], 4);
+ CHECK_EQ(a[4], 5);
+ CHECK_EQ(a[5], 6);
+ CHECK_EQ(a[6], 7);
+
+ memset(a, 0, sizeof(0));
+ CHECK(RE("(\\d)(\\d)(\\d)(\\d)(\\d)(\\d)(\\d)(\\d)"
+ "(\\d)(\\d)(\\d)(\\d)(\\d)(\\d)(\\d)(\\d)").FullMatch(
+ "1234567890123456",
+ &a[0], &a[1], &a[2], &a[3],
+ &a[4], &a[5], &a[6], &a[7],
+ &a[8], &a[9], &a[10], &a[11],
+ &a[12], &a[13], &a[14], &a[15]));
+ CHECK_EQ(a[0], 1);
+ CHECK_EQ(a[1], 2);
+ CHECK_EQ(a[2], 3);
+ CHECK_EQ(a[3], 4);
+ CHECK_EQ(a[4], 5);
+ CHECK_EQ(a[5], 6);
+ CHECK_EQ(a[6], 7);
+ CHECK_EQ(a[7], 8);
+ CHECK_EQ(a[8], 9);
+ CHECK_EQ(a[9], 0);
+ CHECK_EQ(a[10], 1);
+ CHECK_EQ(a[11], 2);
+ CHECK_EQ(a[12], 3);
+ CHECK_EQ(a[13], 4);
+ CHECK_EQ(a[14], 5);
+ CHECK_EQ(a[15], 6);
+
+ /***** PartialMatch *****/
+
+ printf("Testing PartialMatch\n");
+
+ CHECK(RE("h.*o").PartialMatch("hello"));
+ CHECK(RE("h.*o").PartialMatch("othello"));
+ CHECK(RE("h.*o").PartialMatch("hello!"));
+ CHECK(RE("((((((((((((((((((((x))))))))))))))))))))").PartialMatch("x"));
+
+ /***** other tests *****/
+
+ RadixTests();
+ TestReplace();
+ TestExtract();
+ TestConsume();
+ TestFindAndConsume();
+ TestQuoteMetaAll();
+ TestMatchNumberPeculiarity();
+
+ // Check the pattern() accessor
+ {
+ const string kPattern = "http://([^/]+)/.*";
+ const RE re(kPattern);
+ CHECK_EQ(kPattern, re.pattern());
+ }
+
+ // Check RE error field.
+ {
+ RE re("foo");
+ CHECK(re.error().empty()); // Must have no error
+ }
+
+#ifdef SUPPORT_UTF8
+ // Check UTF-8 handling
+ {
+ printf("Testing UTF-8 handling\n");
+
+ // Three Japanese characters (nihongo)
+ const unsigned char utf8_string[] = {
+ 0xe6, 0x97, 0xa5, // 65e5
+ 0xe6, 0x9c, 0xac, // 627c
+ 0xe8, 0xaa, 0x9e, // 8a9e
+ 0
+ };
+ const unsigned char utf8_pattern[] = {
+ '.',
+ 0xe6, 0x9c, 0xac, // 627c
+ '.',
+ 0
+ };
+
+ // Both should match in either mode, bytes or UTF-8
+ RE re_test1(".........");
+ CHECK(re_test1.FullMatch(utf8_string));
+ RE re_test2("...", pcrecpp::UTF8());
+ CHECK(re_test2.FullMatch(utf8_string));
+
+ // Check that '.' matches one byte or UTF-8 character
+ // according to the mode.
+ string ss;
+ RE re_test3("(.)");
+ CHECK(re_test3.PartialMatch(utf8_string, &ss));
+ CHECK_EQ(ss, string("\xe6"));
+ RE re_test4("(.)", pcrecpp::UTF8());
+ CHECK(re_test4.PartialMatch(utf8_string, &ss));
+ CHECK_EQ(ss, string("\xe6\x97\xa5"));
+
+ // Check that string matches itself in either mode
+ RE re_test5(utf8_string);
+ CHECK(re_test5.FullMatch(utf8_string));
+ RE re_test6(utf8_string, pcrecpp::UTF8());
+ CHECK(re_test6.FullMatch(utf8_string));
+
+ // Check that pattern matches string only in UTF8 mode
+ RE re_test7(utf8_pattern);
+ CHECK(!re_test7.FullMatch(utf8_string));
+ RE re_test8(utf8_pattern, pcrecpp::UTF8());
+ CHECK(re_test8.FullMatch(utf8_string));
+ }
+
+ // Check that ungreedy, UTF8 regular expressions don't match when they
+ // oughtn't -- see bug 82246.
+ {
+ // This code always worked.
+ const char* pattern = "\\w+X";
+ const string target = "a aX";
+ RE match_sentence(pattern);
+ RE match_sentence_re(pattern, pcrecpp::UTF8());
+
+ CHECK(!match_sentence.FullMatch(target));
+ CHECK(!match_sentence_re.FullMatch(target));
+ }
+
+ {
+ const char* pattern = "(?U)\\w+X";
+ const string target = "a aX";
+ RE match_sentence(pattern);
+ RE match_sentence_re(pattern, pcrecpp::UTF8());
+
+ CHECK(!match_sentence.FullMatch(target));
+ CHECK(!match_sentence_re.FullMatch(target));
+ }
+#endif /* def SUPPORT_UTF8 */
+
+ printf("Testing error reporting\n");
+
+ { RE re("a\\1"); CHECK(!re.error().empty()); }
+ {
+ RE re("a[x");
+ CHECK(!re.error().empty());
+ }
+ {
+ RE re("a[z-a]");
+ CHECK(!re.error().empty());
+ }
+ {
+ RE re("a[[:foobar:]]");
+ CHECK(!re.error().empty());
+ }
+ {
+ RE re("a(b");
+ CHECK(!re.error().empty());
+ }
+ {
+ RE re("a\\");
+ CHECK(!re.error().empty());
+ }
+
+ // Test that recursion is stopped
+ TestRecursion();
+
+ // Test Options
+ if (getenv("VERBOSE_TEST") != NULL)
+ VERBOSE_TEST = true;
+ TestOptions();
+
+ // Test the constructors
+ TestConstructors();
+
+ // Done
+ printf("OK\n");
+
+ return 0;
+}
diff --git a/src/third_party/pcre-7.4/pcrecpparg.h b/src/third_party/pcre-7.4/pcrecpparg.h
new file mode 100644
index 00000000000..c5bfae0482d
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcrecpparg.h
@@ -0,0 +1,173 @@
+// Copyright (c) 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sanjay Ghemawat
+
+#ifndef _PCRECPPARG_H
+#define _PCRECPPARG_H
+
+#include <stdlib.h> // for NULL
+#include <string>
+
+#include <pcre.h>
+
+namespace pcrecpp {
+
+class StringPiece;
+
+// Hex/Octal/Binary?
+
+// Special class for parsing into objects that define a ParseFrom() method
+template <class T>
+class _RE_MatchObject {
+ public:
+ static inline bool Parse(const char* str, int n, void* dest) {
+ T* object = reinterpret_cast<T*>(dest);
+ return object->ParseFrom(str, n);
+ }
+};
+
+class PCRECPP_EXP_DEFN Arg {
+ public:
+ // Empty constructor so we can declare arrays of Arg
+ Arg();
+
+ // Constructor specially designed for NULL arguments
+ Arg(void*);
+
+ typedef bool (*Parser)(const char* str, int n, void* dest);
+
+// Type-specific parsers
+#define PCRE_MAKE_PARSER(type,name) \
+ Arg(type* p) : arg_(p), parser_(name) { } \
+ Arg(type* p, Parser parser) : arg_(p), parser_(parser) { }
+
+
+ PCRE_MAKE_PARSER(char, parse_char);
+ PCRE_MAKE_PARSER(unsigned char, parse_uchar);
+ PCRE_MAKE_PARSER(short, parse_short);
+ PCRE_MAKE_PARSER(unsigned short, parse_ushort);
+ PCRE_MAKE_PARSER(int, parse_int);
+ PCRE_MAKE_PARSER(unsigned int, parse_uint);
+ PCRE_MAKE_PARSER(long, parse_long);
+ PCRE_MAKE_PARSER(unsigned long, parse_ulong);
+#if 1
+ PCRE_MAKE_PARSER(long long, parse_longlong);
+#endif
+#if 1
+ PCRE_MAKE_PARSER(unsigned long long, parse_ulonglong);
+#endif
+ PCRE_MAKE_PARSER(float, parse_float);
+ PCRE_MAKE_PARSER(double, parse_double);
+ PCRE_MAKE_PARSER(std::string, parse_string);
+ PCRE_MAKE_PARSER(StringPiece, parse_stringpiece);
+
+#undef PCRE_MAKE_PARSER
+
+ // Generic constructor
+ template <class T> Arg(T*, Parser parser);
+ // Generic constructor template
+ template <class T> Arg(T* p)
+ : arg_(p), parser_(_RE_MatchObject<T>::Parse) {
+ }
+
+ // Parse the data
+ bool Parse(const char* str, int n) const;
+
+ private:
+ void* arg_;
+ Parser parser_;
+
+ static bool parse_null (const char* str, int n, void* dest);
+ static bool parse_char (const char* str, int n, void* dest);
+ static bool parse_uchar (const char* str, int n, void* dest);
+ static bool parse_float (const char* str, int n, void* dest);
+ static bool parse_double (const char* str, int n, void* dest);
+ static bool parse_string (const char* str, int n, void* dest);
+ static bool parse_stringpiece (const char* str, int n, void* dest);
+
+#define PCRE_DECLARE_INTEGER_PARSER(name) \
+ private: \
+ static bool parse_ ## name(const char* str, int n, void* dest); \
+ static bool parse_ ## name ## _radix( \
+ const char* str, int n, void* dest, int radix); \
+ public: \
+ static bool parse_ ## name ## _hex(const char* str, int n, void* dest); \
+ static bool parse_ ## name ## _octal(const char* str, int n, void* dest); \
+ static bool parse_ ## name ## _cradix(const char* str, int n, void* dest)
+
+ PCRE_DECLARE_INTEGER_PARSER(short);
+ PCRE_DECLARE_INTEGER_PARSER(ushort);
+ PCRE_DECLARE_INTEGER_PARSER(int);
+ PCRE_DECLARE_INTEGER_PARSER(uint);
+ PCRE_DECLARE_INTEGER_PARSER(long);
+ PCRE_DECLARE_INTEGER_PARSER(ulong);
+ PCRE_DECLARE_INTEGER_PARSER(longlong);
+ PCRE_DECLARE_INTEGER_PARSER(ulonglong);
+
+#undef PCRE_DECLARE_INTEGER_PARSER
+};
+
+inline Arg::Arg() : arg_(NULL), parser_(parse_null) { }
+inline Arg::Arg(void* p) : arg_(p), parser_(parse_null) { }
+
+inline bool Arg::Parse(const char* str, int n) const {
+ return (*parser_)(str, n, arg_);
+}
+
+// This part of the parser, appropriate only for ints, deals with bases
+#define MAKE_INTEGER_PARSER(type, name) \
+ inline Arg Hex(type* ptr) { \
+ return Arg(ptr, Arg::parse_ ## name ## _hex); } \
+ inline Arg Octal(type* ptr) { \
+ return Arg(ptr, Arg::parse_ ## name ## _octal); } \
+ inline Arg CRadix(type* ptr) { \
+ return Arg(ptr, Arg::parse_ ## name ## _cradix); }
+
+MAKE_INTEGER_PARSER(short, short) /* */
+MAKE_INTEGER_PARSER(unsigned short, ushort) /* */
+MAKE_INTEGER_PARSER(int, int) /* Don't use semicolons */
+MAKE_INTEGER_PARSER(unsigned int, uint) /* after these statement */
+MAKE_INTEGER_PARSER(long, long) /* because they can cause */
+MAKE_INTEGER_PARSER(unsigned long, ulong) /* compiler warnings if */
+#if 1 /* the checking level is */
+MAKE_INTEGER_PARSER(long long, longlong) /* turned up high enough. */
+#endif /* */
+#if 1 /* */
+MAKE_INTEGER_PARSER(unsigned long long, ulonglong) /* */
+#endif
+
+#undef PCRE_IS_SET
+#undef PCRE_SET_OR_CLEAR
+#undef MAKE_INTEGER_PARSER
+
+} // namespace pcrecpp
+
+
+#endif /* _PCRECPPARG_H */
diff --git a/src/third_party/pcre-7.4/pcrecpparg.h.in b/src/third_party/pcre-7.4/pcrecpparg.h.in
new file mode 100644
index 00000000000..83cc44b17a5
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcrecpparg.h.in
@@ -0,0 +1,173 @@
+// Copyright (c) 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: Sanjay Ghemawat
+
+#ifndef _PCRECPPARG_H
+#define _PCRECPPARG_H
+
+#include <stdlib.h> // for NULL
+#include <string>
+
+#include <pcre.h>
+
+namespace pcrecpp {
+
+class StringPiece;
+
+// Hex/Octal/Binary?
+
+// Special class for parsing into objects that define a ParseFrom() method
+template <class T>
+class _RE_MatchObject {
+ public:
+ static inline bool Parse(const char* str, int n, void* dest) {
+ T* object = reinterpret_cast<T*>(dest);
+ return object->ParseFrom(str, n);
+ }
+};
+
+class PCRECPP_EXP_DEFN Arg {
+ public:
+ // Empty constructor so we can declare arrays of Arg
+ Arg();
+
+ // Constructor specially designed for NULL arguments
+ Arg(void*);
+
+ typedef bool (*Parser)(const char* str, int n, void* dest);
+
+// Type-specific parsers
+#define PCRE_MAKE_PARSER(type,name) \
+ Arg(type* p) : arg_(p), parser_(name) { } \
+ Arg(type* p, Parser parser) : arg_(p), parser_(parser) { }
+
+
+ PCRE_MAKE_PARSER(char, parse_char);
+ PCRE_MAKE_PARSER(unsigned char, parse_uchar);
+ PCRE_MAKE_PARSER(short, parse_short);
+ PCRE_MAKE_PARSER(unsigned short, parse_ushort);
+ PCRE_MAKE_PARSER(int, parse_int);
+ PCRE_MAKE_PARSER(unsigned int, parse_uint);
+ PCRE_MAKE_PARSER(long, parse_long);
+ PCRE_MAKE_PARSER(unsigned long, parse_ulong);
+#if @pcre_have_long_long@
+ PCRE_MAKE_PARSER(long long, parse_longlong);
+#endif
+#if @pcre_have_ulong_long@
+ PCRE_MAKE_PARSER(unsigned long long, parse_ulonglong);
+#endif
+ PCRE_MAKE_PARSER(float, parse_float);
+ PCRE_MAKE_PARSER(double, parse_double);
+ PCRE_MAKE_PARSER(std::string, parse_string);
+ PCRE_MAKE_PARSER(StringPiece, parse_stringpiece);
+
+#undef PCRE_MAKE_PARSER
+
+ // Generic constructor
+ template <class T> Arg(T*, Parser parser);
+ // Generic constructor template
+ template <class T> Arg(T* p)
+ : arg_(p), parser_(_RE_MatchObject<T>::Parse) {
+ }
+
+ // Parse the data
+ bool Parse(const char* str, int n) const;
+
+ private:
+ void* arg_;
+ Parser parser_;
+
+ static bool parse_null (const char* str, int n, void* dest);
+ static bool parse_char (const char* str, int n, void* dest);
+ static bool parse_uchar (const char* str, int n, void* dest);
+ static bool parse_float (const char* str, int n, void* dest);
+ static bool parse_double (const char* str, int n, void* dest);
+ static bool parse_string (const char* str, int n, void* dest);
+ static bool parse_stringpiece (const char* str, int n, void* dest);
+
+#define PCRE_DECLARE_INTEGER_PARSER(name) \
+ private: \
+ static bool parse_ ## name(const char* str, int n, void* dest); \
+ static bool parse_ ## name ## _radix( \
+ const char* str, int n, void* dest, int radix); \
+ public: \
+ static bool parse_ ## name ## _hex(const char* str, int n, void* dest); \
+ static bool parse_ ## name ## _octal(const char* str, int n, void* dest); \
+ static bool parse_ ## name ## _cradix(const char* str, int n, void* dest)
+
+ PCRE_DECLARE_INTEGER_PARSER(short);
+ PCRE_DECLARE_INTEGER_PARSER(ushort);
+ PCRE_DECLARE_INTEGER_PARSER(int);
+ PCRE_DECLARE_INTEGER_PARSER(uint);
+ PCRE_DECLARE_INTEGER_PARSER(long);
+ PCRE_DECLARE_INTEGER_PARSER(ulong);
+ PCRE_DECLARE_INTEGER_PARSER(longlong);
+ PCRE_DECLARE_INTEGER_PARSER(ulonglong);
+
+#undef PCRE_DECLARE_INTEGER_PARSER
+};
+
+inline Arg::Arg() : arg_(NULL), parser_(parse_null) { }
+inline Arg::Arg(void* p) : arg_(p), parser_(parse_null) { }
+
+inline bool Arg::Parse(const char* str, int n) const {
+ return (*parser_)(str, n, arg_);
+}
+
+// This part of the parser, appropriate only for ints, deals with bases
+#define MAKE_INTEGER_PARSER(type, name) \
+ inline Arg Hex(type* ptr) { \
+ return Arg(ptr, Arg::parse_ ## name ## _hex); } \
+ inline Arg Octal(type* ptr) { \
+ return Arg(ptr, Arg::parse_ ## name ## _octal); } \
+ inline Arg CRadix(type* ptr) { \
+ return Arg(ptr, Arg::parse_ ## name ## _cradix); }
+
+MAKE_INTEGER_PARSER(short, short) /* */
+MAKE_INTEGER_PARSER(unsigned short, ushort) /* */
+MAKE_INTEGER_PARSER(int, int) /* Don't use semicolons */
+MAKE_INTEGER_PARSER(unsigned int, uint) /* after these statement */
+MAKE_INTEGER_PARSER(long, long) /* because they can cause */
+MAKE_INTEGER_PARSER(unsigned long, ulong) /* compiler warnings if */
+#if @pcre_have_long_long@ /* the checking level is */
+MAKE_INTEGER_PARSER(long long, longlong) /* turned up high enough. */
+#endif /* */
+#if @pcre_have_ulong_long@ /* */
+MAKE_INTEGER_PARSER(unsigned long long, ulonglong) /* */
+#endif
+
+#undef PCRE_IS_SET
+#undef PCRE_SET_OR_CLEAR
+#undef MAKE_INTEGER_PARSER
+
+} // namespace pcrecpp
+
+
+#endif /* _PCRECPPARG_H */
diff --git a/src/third_party/pcre-7.4/pcredemo.c b/src/third_party/pcre-7.4/pcredemo.c
new file mode 100644
index 00000000000..4068e3e04d5
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcredemo.c
@@ -0,0 +1,325 @@
+/*************************************************
+* PCRE DEMONSTRATION PROGRAM *
+*************************************************/
+
+/* This is a demonstration program to illustrate the most straightforward ways
+of calling the PCRE regular expression library from a C program. See the
+pcresample documentation for a short discussion.
+
+Compile thuswise:
+ gcc -Wall pcredemo.c -I/usr/local/include -L/usr/local/lib \
+ -R/usr/local/lib -lpcre
+
+Replace "/usr/local/include" and "/usr/local/lib" with wherever the include and
+library files for PCRE are installed on your system. You don't need -I and -L
+if PCRE is installed in the standard system libraries. Only some operating
+systems (e.g. Solaris) use the -R option.
+*/
+
+
+#include <stdio.h>
+#include <string.h>
+#include <pcre.h>
+
+#define OVECCOUNT 30 /* should be a multiple of 3 */
+
+
+int main(int argc, char **argv)
+{
+pcre *re;
+const char *error;
+char *pattern;
+char *subject;
+unsigned char *name_table;
+int erroffset;
+int find_all;
+int namecount;
+int name_entry_size;
+int ovector[OVECCOUNT];
+int subject_length;
+int rc, i;
+
+
+/**************************************************************************
+* First, sort out the command line. There is only one possible option at *
+* the moment, "-g" to request repeated matching to find all occurrences, *
+* like Perl's /g option. We set the variable find_all to a non-zero value *
+* if the -g option is present. Apart from that, there must be exactly two *
+* arguments. *
+**************************************************************************/
+
+find_all = 0;
+for (i = 1; i < argc; i++)
+ {
+ if (strcmp(argv[i], "-g") == 0) find_all = 1;
+ else break;
+ }
+
+/* After the options, we require exactly two arguments, which are the pattern,
+and the subject string. */
+
+if (argc - i != 2)
+ {
+ printf("Two arguments required: a regex and a subject string\n");
+ return 1;
+ }
+
+pattern = argv[i];
+subject = argv[i+1];
+subject_length = (int)strlen(subject);
+
+
+/*************************************************************************
+* Now we are going to compile the regular expression pattern, and handle *
+* and errors that are detected. *
+*************************************************************************/
+
+re = pcre_compile(
+ pattern, /* the pattern */
+ 0, /* default options */
+ &error, /* for error message */
+ &erroffset, /* for error offset */
+ NULL); /* use default character tables */
+
+/* Compilation failed: print the error message and exit */
+
+if (re == NULL)
+ {
+ printf("PCRE compilation failed at offset %d: %s\n", erroffset, error);
+ return 1;
+ }
+
+
+/*************************************************************************
+* If the compilation succeeded, we call PCRE again, in order to do a *
+* pattern match against the subject string. This does just ONE match. If *
+* further matching is needed, it will be done below. *
+*************************************************************************/
+
+rc = pcre_exec(
+ re, /* the compiled pattern */
+ NULL, /* no extra data - we didn't study the pattern */
+ subject, /* the subject string */
+ subject_length, /* the length of the subject */
+ 0, /* start at offset 0 in the subject */
+ 0, /* default options */
+ ovector, /* output vector for substring information */
+ OVECCOUNT); /* number of elements in the output vector */
+
+/* Matching failed: handle error cases */
+
+if (rc < 0)
+ {
+ switch(rc)
+ {
+ case PCRE_ERROR_NOMATCH: printf("No match\n"); break;
+ /*
+ Handle other special cases if you like
+ */
+ default: printf("Matching error %d\n", rc); break;
+ }
+ pcre_free(re); /* Release memory used for the compiled pattern */
+ return 1;
+ }
+
+/* Match succeded */
+
+printf("\nMatch succeeded at offset %d\n", ovector[0]);
+
+
+/*************************************************************************
+* We have found the first match within the subject string. If the output *
+* vector wasn't big enough, set its size to the maximum. Then output any *
+* substrings that were captured. *
+*************************************************************************/
+
+/* The output vector wasn't big enough */
+
+if (rc == 0)
+ {
+ rc = OVECCOUNT/3;
+ printf("ovector only has room for %d captured substrings\n", rc - 1);
+ }
+
+/* Show substrings stored in the output vector by number. Obviously, in a real
+application you might want to do things other than print them. */
+
+for (i = 0; i < rc; i++)
+ {
+ char *substring_start = subject + ovector[2*i];
+ int substring_length = ovector[2*i+1] - ovector[2*i];
+ printf("%2d: %.*s\n", i, substring_length, substring_start);
+ }
+
+
+/**************************************************************************
+* That concludes the basic part of this demonstration program. We have *
+* compiled a pattern, and performed a single match. The code that follows *
+* first shows how to access named substrings, and then how to code for *
+* repeated matches on the same subject. *
+**************************************************************************/
+
+/* See if there are any named substrings, and if so, show them by name. First
+we have to extract the count of named parentheses from the pattern. */
+
+(void)pcre_fullinfo(
+ re, /* the compiled pattern */
+ NULL, /* no extra data - we didn't study the pattern */
+ PCRE_INFO_NAMECOUNT, /* number of named substrings */
+ &namecount); /* where to put the answer */
+
+if (namecount <= 0) printf("No named substrings\n"); else
+ {
+ unsigned char *tabptr;
+ printf("Named substrings\n");
+
+ /* Before we can access the substrings, we must extract the table for
+ translating names to numbers, and the size of each entry in the table. */
+
+ (void)pcre_fullinfo(
+ re, /* the compiled pattern */
+ NULL, /* no extra data - we didn't study the pattern */
+ PCRE_INFO_NAMETABLE, /* address of the table */
+ &name_table); /* where to put the answer */
+
+ (void)pcre_fullinfo(
+ re, /* the compiled pattern */
+ NULL, /* no extra data - we didn't study the pattern */
+ PCRE_INFO_NAMEENTRYSIZE, /* size of each entry in the table */
+ &name_entry_size); /* where to put the answer */
+
+ /* Now we can scan the table and, for each entry, print the number, the name,
+ and the substring itself. */
+
+ tabptr = name_table;
+ for (i = 0; i < namecount; i++)
+ {
+ int n = (tabptr[0] << 8) | tabptr[1];
+ printf("(%d) %*s: %.*s\n", n, name_entry_size - 3, tabptr + 2,
+ ovector[2*n+1] - ovector[2*n], subject + ovector[2*n]);
+ tabptr += name_entry_size;
+ }
+ }
+
+
+/*************************************************************************
+* If the "-g" option was given on the command line, we want to continue *
+* to search for additional matches in the subject string, in a similar *
+* way to the /g option in Perl. This turns out to be trickier than you *
+* might think because of the possibility of matching an empty string. *
+* What happens is as follows: *
+* *
+* If the previous match was NOT for an empty string, we can just start *
+* the next match at the end of the previous one. *
+* *
+* If the previous match WAS for an empty string, we can't do that, as it *
+* would lead to an infinite loop. Instead, a special call of pcre_exec() *
+* is made with the PCRE_NOTEMPTY and PCRE_ANCHORED flags set. The first *
+* of these tells PCRE that an empty string is not a valid match; other *
+* possibilities must be tried. The second flag restricts PCRE to one *
+* match attempt at the initial string position. If this match succeeds, *
+* an alternative to the empty string match has been found, and we can *
+* proceed round the loop. *
+*************************************************************************/
+
+if (!find_all)
+ {
+ pcre_free(re); /* Release the memory used for the compiled pattern */
+ return 0; /* Finish unless -g was given */
+ }
+
+/* Loop for second and subsequent matches */
+
+for (;;)
+ {
+ int options = 0; /* Normally no options */
+ int start_offset = ovector[1]; /* Start at end of previous match */
+
+ /* If the previous match was for an empty string, we are finished if we are
+ at the end of the subject. Otherwise, arrange to run another match at the
+ same point to see if a non-empty match can be found. */
+
+ if (ovector[0] == ovector[1])
+ {
+ if (ovector[0] == subject_length) break;
+ options = PCRE_NOTEMPTY | PCRE_ANCHORED;
+ }
+
+ /* Run the next matching operation */
+
+ rc = pcre_exec(
+ re, /* the compiled pattern */
+ NULL, /* no extra data - we didn't study the pattern */
+ subject, /* the subject string */
+ subject_length, /* the length of the subject */
+ start_offset, /* starting offset in the subject */
+ options, /* options */
+ ovector, /* output vector for substring information */
+ OVECCOUNT); /* number of elements in the output vector */
+
+ /* This time, a result of NOMATCH isn't an error. If the value in "options"
+ is zero, it just means we have found all possible matches, so the loop ends.
+ Otherwise, it means we have failed to find a non-empty-string match at a
+ point where there was a previous empty-string match. In this case, we do what
+ Perl does: advance the matching position by one, and continue. We do this by
+ setting the "end of previous match" offset, because that is picked up at the
+ top of the loop as the point at which to start again. */
+
+ if (rc == PCRE_ERROR_NOMATCH)
+ {
+ if (options == 0) break;
+ ovector[1] = start_offset + 1;
+ continue; /* Go round the loop again */
+ }
+
+ /* Other matching errors are not recoverable. */
+
+ if (rc < 0)
+ {
+ printf("Matching error %d\n", rc);
+ pcre_free(re); /* Release memory used for the compiled pattern */
+ return 1;
+ }
+
+ /* Match succeded */
+
+ printf("\nMatch succeeded again at offset %d\n", ovector[0]);
+
+ /* The match succeeded, but the output vector wasn't big enough. */
+
+ if (rc == 0)
+ {
+ rc = OVECCOUNT/3;
+ printf("ovector only has room for %d captured substrings\n", rc - 1);
+ }
+
+ /* As before, show substrings stored in the output vector by number, and then
+ also any named substrings. */
+
+ for (i = 0; i < rc; i++)
+ {
+ char *substring_start = subject + ovector[2*i];
+ int substring_length = ovector[2*i+1] - ovector[2*i];
+ printf("%2d: %.*s\n", i, substring_length, substring_start);
+ }
+
+ if (namecount <= 0) printf("No named substrings\n"); else
+ {
+ unsigned char *tabptr = name_table;
+ printf("Named substrings\n");
+ for (i = 0; i < namecount; i++)
+ {
+ int n = (tabptr[0] << 8) | tabptr[1];
+ printf("(%d) %*s: %.*s\n", n, name_entry_size - 3, tabptr + 2,
+ ovector[2*n+1] - ovector[2*n], subject + ovector[2*n]);
+ tabptr += name_entry_size;
+ }
+ }
+ } /* End of loop to find second and subsequent matches */
+
+printf("\n");
+pcre_free(re); /* Release memory used for the compiled pattern */
+return 0;
+}
+
+/* End of pcredemo.c */
diff --git a/src/third_party/pcre-7.4/pcregrep.c b/src/third_party/pcre-7.4/pcregrep.c
new file mode 100644
index 00000000000..b44574e1a4a
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcregrep.c
@@ -0,0 +1,2106 @@
+/*************************************************
+* pcregrep program *
+*************************************************/
+
+/* This is a grep program that uses the PCRE regular expression library to do
+its pattern matching. On a Unix or Win32 system it can recurse into
+directories.
+
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <ctype.h>
+#include <locale.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#include "pcre.h"
+
+#define FALSE 0
+#define TRUE 1
+
+typedef int BOOL;
+
+#define MAX_PATTERN_COUNT 100
+
+#if BUFSIZ > 8192
+#define MBUFTHIRD BUFSIZ
+#else
+#define MBUFTHIRD 8192
+#endif
+
+/* Values for the "filenames" variable, which specifies options for file name
+output. The order is important; it is assumed that a file name is wanted for
+all values greater than FN_DEFAULT. */
+
+enum { FN_NONE, FN_DEFAULT, FN_ONLY, FN_NOMATCH_ONLY, FN_FORCE };
+
+/* Actions for the -d and -D options */
+
+enum { dee_READ, dee_SKIP, dee_RECURSE };
+enum { DEE_READ, DEE_SKIP };
+
+/* Actions for special processing options (flag bits) */
+
+#define PO_WORD_MATCH 0x0001
+#define PO_LINE_MATCH 0x0002
+#define PO_FIXED_STRINGS 0x0004
+
+/* Line ending types */
+
+enum { EL_LF, EL_CR, EL_CRLF, EL_ANY, EL_ANYCRLF };
+
+
+
+/*************************************************
+* Global variables *
+*************************************************/
+
+/* Jeffrey Friedl has some debugging requirements that are not part of the
+regular code. */
+
+#ifdef JFRIEDL_DEBUG
+static int S_arg = -1;
+static unsigned int jfriedl_XR = 0; /* repeat regex attempt this many times */
+static unsigned int jfriedl_XT = 0; /* replicate text this many times */
+static const char *jfriedl_prefix = "";
+static const char *jfriedl_postfix = "";
+#endif
+
+static int endlinetype;
+
+static char *colour_string = (char *)"1;31";
+static char *colour_option = NULL;
+static char *dee_option = NULL;
+static char *DEE_option = NULL;
+static char *newline = NULL;
+static char *pattern_filename = NULL;
+static char *stdin_name = (char *)"(standard input)";
+static char *locale = NULL;
+
+static const unsigned char *pcretables = NULL;
+
+static int pattern_count = 0;
+static pcre **pattern_list = NULL;
+static pcre_extra **hints_list = NULL;
+
+static char *include_pattern = NULL;
+static char *exclude_pattern = NULL;
+
+static pcre *include_compiled = NULL;
+static pcre *exclude_compiled = NULL;
+
+static int after_context = 0;
+static int before_context = 0;
+static int both_context = 0;
+static int dee_action = dee_READ;
+static int DEE_action = DEE_READ;
+static int error_count = 0;
+static int filenames = FN_DEFAULT;
+static int process_options = 0;
+
+static BOOL count_only = FALSE;
+static BOOL do_colour = FALSE;
+static BOOL hyphenpending = FALSE;
+static BOOL invert = FALSE;
+static BOOL multiline = FALSE;
+static BOOL number = FALSE;
+static BOOL only_matching = FALSE;
+static BOOL quiet = FALSE;
+static BOOL silent = FALSE;
+static BOOL utf8 = FALSE;
+
+/* Structure for options and list of them */
+
+enum { OP_NODATA, OP_STRING, OP_OP_STRING, OP_NUMBER, OP_OP_NUMBER,
+ OP_PATLIST };
+
+typedef struct option_item {
+ int type;
+ int one_char;
+ void *dataptr;
+ const char *long_name;
+ const char *help_text;
+} option_item;
+
+/* Options without a single-letter equivalent get a negative value. This can be
+used to identify them. */
+
+#define N_COLOUR (-1)
+#define N_EXCLUDE (-2)
+#define N_HELP (-3)
+#define N_INCLUDE (-4)
+#define N_LABEL (-5)
+#define N_LOCALE (-6)
+#define N_NULL (-7)
+
+static option_item optionlist[] = {
+ { OP_NODATA, N_NULL, NULL, "", " terminate options" },
+ { OP_NODATA, N_HELP, NULL, "help", "display this help and exit" },
+ { OP_NUMBER, 'A', &after_context, "after-context=number", "set number of following context lines" },
+ { OP_NUMBER, 'B', &before_context, "before-context=number", "set number of prior context lines" },
+ { OP_OP_STRING, N_COLOUR, &colour_option, "color=option", "matched text color option" },
+ { OP_NUMBER, 'C', &both_context, "context=number", "set number of context lines, before & after" },
+ { OP_NODATA, 'c', NULL, "count", "print only a count of matching lines per FILE" },
+ { OP_OP_STRING, N_COLOUR, &colour_option, "colour=option", "matched text colour option" },
+ { OP_STRING, 'D', &DEE_option, "devices=action","how to handle devices, FIFOs, and sockets" },
+ { OP_STRING, 'd', &dee_option, "directories=action", "how to handle directories" },
+ { OP_PATLIST, 'e', NULL, "regex(p)", "specify pattern (may be used more than once)" },
+ { OP_NODATA, 'F', NULL, "fixed_strings", "patterns are sets of newline-separated strings" },
+ { OP_STRING, 'f', &pattern_filename, "file=path", "read patterns from file" },
+ { OP_NODATA, 'H', NULL, "with-filename", "force the prefixing filename on output" },
+ { OP_NODATA, 'h', NULL, "no-filename", "suppress the prefixing filename on output" },
+ { OP_NODATA, 'i', NULL, "ignore-case", "ignore case distinctions" },
+ { OP_NODATA, 'l', NULL, "files-with-matches", "print only FILE names containing matches" },
+ { OP_NODATA, 'L', NULL, "files-without-match","print only FILE names not containing matches" },
+ { OP_STRING, N_LABEL, &stdin_name, "label=name", "set name for standard input" },
+ { OP_STRING, N_LOCALE, &locale, "locale=locale", "use the named locale" },
+ { OP_NODATA, 'M', NULL, "multiline", "run in multiline mode" },
+ { OP_STRING, 'N', &newline, "newline=type", "specify newline type (CR, LF, CRLF, ANYCRLF or ANY)" },
+ { OP_NODATA, 'n', NULL, "line-number", "print line number with output lines" },
+ { OP_NODATA, 'o', NULL, "only-matching", "show only the part of the line that matched" },
+ { OP_NODATA, 'q', NULL, "quiet", "suppress output, just set return code" },
+ { OP_NODATA, 'r', NULL, "recursive", "recursively scan sub-directories" },
+ { OP_STRING, N_EXCLUDE,&exclude_pattern, "exclude=pattern","exclude matching files when recursing" },
+ { OP_STRING, N_INCLUDE,&include_pattern, "include=pattern","include matching files when recursing" },
+#ifdef JFRIEDL_DEBUG
+ { OP_OP_NUMBER, 'S', &S_arg, "jeffS", "replace matched (sub)string with X" },
+#endif
+ { OP_NODATA, 's', NULL, "no-messages", "suppress error messages" },
+ { OP_NODATA, 'u', NULL, "utf-8", "use UTF-8 mode" },
+ { OP_NODATA, 'V', NULL, "version", "print version information and exit" },
+ { OP_NODATA, 'v', NULL, "invert-match", "select non-matching lines" },
+ { OP_NODATA, 'w', NULL, "word-regex(p)", "force patterns to match only as words" },
+ { OP_NODATA, 'x', NULL, "line-regex(p)", "force patterns to match only whole lines" },
+ { OP_NODATA, 0, NULL, NULL, NULL }
+};
+
+/* Tables for prefixing and suffixing patterns, according to the -w, -x, and -F
+options. These set the 1, 2, and 4 bits in process_options, respectively. Note
+that the combination of -w and -x has the same effect as -x on its own, so we
+can treat them as the same. */
+
+static const char *prefix[] = {
+ "", "\\b", "^(?:", "^(?:", "\\Q", "\\b\\Q", "^(?:\\Q", "^(?:\\Q" };
+
+static const char *suffix[] = {
+ "", "\\b", ")$", ")$", "\\E", "\\E\\b", "\\E)$", "\\E)$" };
+
+/* UTF-8 tables - used only when the newline setting is "any". */
+
+const int utf8_table3[] = { 0xff, 0x1f, 0x0f, 0x07, 0x03, 0x01};
+
+const char utf8_table4[] = {
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
+ 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 };
+
+
+
+/*************************************************
+* OS-specific functions *
+*************************************************/
+
+/* These functions are defined so that they can be made system specific,
+although at present the only ones are for Unix, Win32, and for "no support". */
+
+
+/************* Directory scanning in Unix ***********/
+
+#if defined HAVE_SYS_STAT_H && defined HAVE_DIRENT_H && defined HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <dirent.h>
+
+typedef DIR directory_type;
+
+static int
+isdirectory(char *filename)
+{
+struct stat statbuf;
+if (stat(filename, &statbuf) < 0)
+ return 0; /* In the expectation that opening as a file will fail */
+return ((statbuf.st_mode & S_IFMT) == S_IFDIR)? '/' : 0;
+}
+
+static directory_type *
+opendirectory(char *filename)
+{
+return opendir(filename);
+}
+
+static char *
+readdirectory(directory_type *dir)
+{
+for (;;)
+ {
+ struct dirent *dent = readdir(dir);
+ if (dent == NULL) return NULL;
+ if (strcmp(dent->d_name, ".") != 0 && strcmp(dent->d_name, "..") != 0)
+ return dent->d_name;
+ }
+/* Control never reaches here */
+}
+
+static void
+closedirectory(directory_type *dir)
+{
+closedir(dir);
+}
+
+
+/************* Test for regular file in Unix **********/
+
+static int
+isregfile(char *filename)
+{
+struct stat statbuf;
+if (stat(filename, &statbuf) < 0)
+ return 1; /* In the expectation that opening as a file will fail */
+return (statbuf.st_mode & S_IFMT) == S_IFREG;
+}
+
+
+/************* Test stdout for being a terminal in Unix **********/
+
+static BOOL
+is_stdout_tty(void)
+{
+return isatty(fileno(stdout));
+}
+
+
+/************* Directory scanning in Win32 ***********/
+
+/* I (Philip Hazel) have no means of testing this code. It was contributed by
+Lionel Fourquaux. David Burgess added a patch to define INVALID_FILE_ATTRIBUTES
+when it did not exist. */
+
+
+#elif HAVE_WINDOWS_H
+
+#ifndef STRICT
+# define STRICT
+#endif
+#ifndef WIN32_LEAN_AND_MEAN
+# define WIN32_LEAN_AND_MEAN
+#endif
+#ifndef INVALID_FILE_ATTRIBUTES
+#define INVALID_FILE_ATTRIBUTES 0xFFFFFFFF
+#endif
+
+#include <windows.h>
+
+typedef struct directory_type
+{
+HANDLE handle;
+BOOL first;
+WIN32_FIND_DATA data;
+} directory_type;
+
+int
+isdirectory(char *filename)
+{
+DWORD attr = GetFileAttributes(filename);
+if (attr == INVALID_FILE_ATTRIBUTES)
+ return 0;
+return ((attr & FILE_ATTRIBUTE_DIRECTORY) != 0) ? '/' : 0;
+}
+
+directory_type *
+opendirectory(char *filename)
+{
+size_t len;
+char *pattern;
+directory_type *dir;
+DWORD err;
+len = strlen(filename);
+pattern = (char *) malloc(len + 3);
+dir = (directory_type *) malloc(sizeof(*dir));
+if ((pattern == NULL) || (dir == NULL))
+ {
+ fprintf(stderr, "pcregrep: malloc failed\n");
+ exit(2);
+ }
+memcpy(pattern, filename, len);
+memcpy(&(pattern[len]), "\\*", 3);
+dir->handle = FindFirstFile(pattern, &(dir->data));
+if (dir->handle != INVALID_HANDLE_VALUE)
+ {
+ free(pattern);
+ dir->first = TRUE;
+ return dir;
+ }
+err = GetLastError();
+free(pattern);
+free(dir);
+errno = (err == ERROR_ACCESS_DENIED) ? EACCES : ENOENT;
+return NULL;
+}
+
+char *
+readdirectory(directory_type *dir)
+{
+for (;;)
+ {
+ if (!dir->first)
+ {
+ if (!FindNextFile(dir->handle, &(dir->data)))
+ return NULL;
+ }
+ else
+ {
+ dir->first = FALSE;
+ }
+ if (strcmp(dir->data.cFileName, ".") != 0 && strcmp(dir->data.cFileName, "..") != 0)
+ return dir->data.cFileName;
+ }
+#ifndef _MSC_VER
+return NULL; /* Keep compiler happy; never executed */
+#endif
+}
+
+void
+closedirectory(directory_type *dir)
+{
+FindClose(dir->handle);
+free(dir);
+}
+
+
+/************* Test for regular file in Win32 **********/
+
+/* I don't know how to do this, or if it can be done; assume all paths are
+regular if they are not directories. */
+
+int isregfile(char *filename)
+{
+return !isdirectory(filename)
+}
+
+
+/************* Test stdout for being a terminal in Win32 **********/
+
+/* I don't know how to do this; assume never */
+
+static BOOL
+is_stdout_tty(void)
+{
+FALSE;
+}
+
+
+/************* Directory scanning when we can't do it ***********/
+
+/* The type is void, and apart from isdirectory(), the functions do nothing. */
+
+#else
+
+typedef void directory_type;
+
+int isdirectory(char *filename) { return 0; }
+directory_type * opendirectory(char *filename) { return (directory_type*)0;}
+char *readdirectory(directory_type *dir) { return (char*)0;}
+void closedirectory(directory_type *dir) {}
+
+
+/************* Test for regular when we can't do it **********/
+
+/* Assume all files are regular. */
+
+int isregfile(char *filename) { return 1; }
+
+
+/************* Test stdout for being a terminal when we can't do it **********/
+
+static BOOL
+is_stdout_tty(void)
+{
+return FALSE;
+}
+
+
+#endif
+
+
+
+#ifndef HAVE_STRERROR
+/*************************************************
+* Provide strerror() for non-ANSI libraries *
+*************************************************/
+
+/* Some old-fashioned systems still around (e.g. SunOS4) don't have strerror()
+in their libraries, but can provide the same facility by this simple
+alternative function. */
+
+extern int sys_nerr;
+extern char *sys_errlist[];
+
+char *
+strerror(int n)
+{
+if (n < 0 || n >= sys_nerr) return "unknown error number";
+return sys_errlist[n];
+}
+#endif /* HAVE_STRERROR */
+
+
+
+/*************************************************
+* Find end of line *
+*************************************************/
+
+/* The length of the endline sequence that is found is set via lenptr. This may
+be zero at the very end of the file if there is no line-ending sequence there.
+
+Arguments:
+ p current position in line
+ endptr end of available data
+ lenptr where to put the length of the eol sequence
+
+Returns: pointer to the last byte of the line
+*/
+
+static char *
+end_of_line(char *p, char *endptr, int *lenptr)
+{
+switch(endlinetype)
+ {
+ default: /* Just in case */
+ case EL_LF:
+ while (p < endptr && *p != '\n') p++;
+ if (p < endptr)
+ {
+ *lenptr = 1;
+ return p + 1;
+ }
+ *lenptr = 0;
+ return endptr;
+
+ case EL_CR:
+ while (p < endptr && *p != '\r') p++;
+ if (p < endptr)
+ {
+ *lenptr = 1;
+ return p + 1;
+ }
+ *lenptr = 0;
+ return endptr;
+
+ case EL_CRLF:
+ for (;;)
+ {
+ while (p < endptr && *p != '\r') p++;
+ if (++p >= endptr)
+ {
+ *lenptr = 0;
+ return endptr;
+ }
+ if (*p == '\n')
+ {
+ *lenptr = 2;
+ return p + 1;
+ }
+ }
+ break;
+
+ case EL_ANYCRLF:
+ while (p < endptr)
+ {
+ int extra = 0;
+ register int c = *((unsigned char *)p);
+
+ if (utf8 && c >= 0xc0)
+ {
+ int gcii, gcss;
+ extra = utf8_table4[c & 0x3f]; /* Number of additional bytes */
+ gcss = 6*extra;
+ c = (c & utf8_table3[extra]) << gcss;
+ for (gcii = 1; gcii <= extra; gcii++)
+ {
+ gcss -= 6;
+ c |= (p[gcii] & 0x3f) << gcss;
+ }
+ }
+
+ p += 1 + extra;
+
+ switch (c)
+ {
+ case 0x0a: /* LF */
+ *lenptr = 1;
+ return p;
+
+ case 0x0d: /* CR */
+ if (p < endptr && *p == 0x0a)
+ {
+ *lenptr = 2;
+ p++;
+ }
+ else *lenptr = 1;
+ return p;
+
+ default:
+ break;
+ }
+ } /* End of loop for ANYCRLF case */
+
+ *lenptr = 0; /* Must have hit the end */
+ return endptr;
+
+ case EL_ANY:
+ while (p < endptr)
+ {
+ int extra = 0;
+ register int c = *((unsigned char *)p);
+
+ if (utf8 && c >= 0xc0)
+ {
+ int gcii, gcss;
+ extra = utf8_table4[c & 0x3f]; /* Number of additional bytes */
+ gcss = 6*extra;
+ c = (c & utf8_table3[extra]) << gcss;
+ for (gcii = 1; gcii <= extra; gcii++)
+ {
+ gcss -= 6;
+ c |= (p[gcii] & 0x3f) << gcss;
+ }
+ }
+
+ p += 1 + extra;
+
+ switch (c)
+ {
+ case 0x0a: /* LF */
+ case 0x0b: /* VT */
+ case 0x0c: /* FF */
+ *lenptr = 1;
+ return p;
+
+ case 0x0d: /* CR */
+ if (p < endptr && *p == 0x0a)
+ {
+ *lenptr = 2;
+ p++;
+ }
+ else *lenptr = 1;
+ return p;
+
+ case 0x85: /* NEL */
+ *lenptr = utf8? 2 : 1;
+ return p;
+
+ case 0x2028: /* LS */
+ case 0x2029: /* PS */
+ *lenptr = 3;
+ return p;
+
+ default:
+ break;
+ }
+ } /* End of loop for ANY case */
+
+ *lenptr = 0; /* Must have hit the end */
+ return endptr;
+ } /* End of overall switch */
+}
+
+
+
+/*************************************************
+* Find start of previous line *
+*************************************************/
+
+/* This is called when looking back for before lines to print.
+
+Arguments:
+ p start of the subsequent line
+ startptr start of available data
+
+Returns: pointer to the start of the previous line
+*/
+
+static char *
+previous_line(char *p, char *startptr)
+{
+switch(endlinetype)
+ {
+ default: /* Just in case */
+ case EL_LF:
+ p--;
+ while (p > startptr && p[-1] != '\n') p--;
+ return p;
+
+ case EL_CR:
+ p--;
+ while (p > startptr && p[-1] != '\n') p--;
+ return p;
+
+ case EL_CRLF:
+ for (;;)
+ {
+ p -= 2;
+ while (p > startptr && p[-1] != '\n') p--;
+ if (p <= startptr + 1 || p[-2] == '\r') return p;
+ }
+ return p; /* But control should never get here */
+
+ case EL_ANY:
+ case EL_ANYCRLF:
+ if (*(--p) == '\n' && p > startptr && p[-1] == '\r') p--;
+ if (utf8) while ((*p & 0xc0) == 0x80) p--;
+
+ while (p > startptr)
+ {
+ register int c;
+ char *pp = p - 1;
+
+ if (utf8)
+ {
+ int extra = 0;
+ while ((*pp & 0xc0) == 0x80) pp--;
+ c = *((unsigned char *)pp);
+ if (c >= 0xc0)
+ {
+ int gcii, gcss;
+ extra = utf8_table4[c & 0x3f]; /* Number of additional bytes */
+ gcss = 6*extra;
+ c = (c & utf8_table3[extra]) << gcss;
+ for (gcii = 1; gcii <= extra; gcii++)
+ {
+ gcss -= 6;
+ c |= (pp[gcii] & 0x3f) << gcss;
+ }
+ }
+ }
+ else c = *((unsigned char *)pp);
+
+ if (endlinetype == EL_ANYCRLF) switch (c)
+ {
+ case 0x0a: /* LF */
+ case 0x0d: /* CR */
+ return p;
+
+ default:
+ break;
+ }
+
+ else switch (c)
+ {
+ case 0x0a: /* LF */
+ case 0x0b: /* VT */
+ case 0x0c: /* FF */
+ case 0x0d: /* CR */
+ case 0x85: /* NEL */
+ case 0x2028: /* LS */
+ case 0x2029: /* PS */
+ return p;
+
+ default:
+ break;
+ }
+
+ p = pp; /* Back one character */
+ } /* End of loop for ANY case */
+
+ return startptr; /* Hit start of data */
+ } /* End of overall switch */
+}
+
+
+
+
+
+/*************************************************
+* Print the previous "after" lines *
+*************************************************/
+
+/* This is called if we are about to lose said lines because of buffer filling,
+and at the end of the file. The data in the line is written using fwrite() so
+that a binary zero does not terminate it.
+
+Arguments:
+ lastmatchnumber the number of the last matching line, plus one
+ lastmatchrestart where we restarted after the last match
+ endptr end of available data
+ printname filename for printing
+
+Returns: nothing
+*/
+
+static void do_after_lines(int lastmatchnumber, char *lastmatchrestart,
+ char *endptr, char *printname)
+{
+if (after_context > 0 && lastmatchnumber > 0)
+ {
+ int count = 0;
+ while (lastmatchrestart < endptr && count++ < after_context)
+ {
+ int ellength;
+ char *pp = lastmatchrestart;
+ if (printname != NULL) fprintf(stdout, "%s-", printname);
+ if (number) fprintf(stdout, "%d-", lastmatchnumber++);
+ pp = end_of_line(pp, endptr, &ellength);
+ fwrite(lastmatchrestart, 1, pp - lastmatchrestart, stdout);
+ lastmatchrestart = pp;
+ }
+ hyphenpending = TRUE;
+ }
+}
+
+
+
+/*************************************************
+* Grep an individual file *
+*************************************************/
+
+/* This is called from grep_or_recurse() below. It uses a buffer that is three
+times the value of MBUFTHIRD. The matching point is never allowed to stray into
+the top third of the buffer, thus keeping more of the file available for
+context printing or for multiline scanning. For large files, the pointer will
+be in the middle third most of the time, so the bottom third is available for
+"before" context printing.
+
+Arguments:
+ in the fopened FILE stream
+ printname the file name if it is to be printed for each match
+ or NULL if the file name is not to be printed
+ it cannot be NULL if filenames[_nomatch]_only is set
+
+Returns: 0 if there was at least one match
+ 1 otherwise (no matches)
+*/
+
+static int
+pcregrep(FILE *in, char *printname)
+{
+int rc = 1;
+int linenumber = 1;
+int lastmatchnumber = 0;
+int count = 0;
+int offsets[99];
+char *lastmatchrestart = NULL;
+char buffer[3*MBUFTHIRD];
+char *ptr = buffer;
+char *endptr;
+size_t bufflength;
+BOOL endhyphenpending = FALSE;
+
+/* Do the first read into the start of the buffer and set up the pointer to
+end of what we have. */
+
+bufflength = fread(buffer, 1, 3*MBUFTHIRD, in);
+endptr = buffer + bufflength;
+
+/* Loop while the current pointer is not at the end of the file. For large
+files, endptr will be at the end of the buffer when we are in the middle of the
+file, but ptr will never get there, because as soon as it gets over 2/3 of the
+way, the buffer is shifted left and re-filled. */
+
+while (ptr < endptr)
+ {
+ int i, endlinelength;
+ int mrc = 0;
+ BOOL match = FALSE;
+ char *t = ptr;
+ size_t length, linelength;
+
+ /* At this point, ptr is at the start of a line. We need to find the length
+ of the subject string to pass to pcre_exec(). In multiline mode, it is the
+ length remainder of the data in the buffer. Otherwise, it is the length of
+ the next line. After matching, we always advance by the length of the next
+ line. In multiline mode the PCRE_FIRSTLINE option is used for compiling, so
+ that any match is constrained to be in the first line. */
+
+ t = end_of_line(t, endptr, &endlinelength);
+ linelength = t - ptr - endlinelength;
+ length = multiline? (size_t)(endptr - ptr) : linelength;
+
+ /* Extra processing for Jeffrey Friedl's debugging. */
+
+#ifdef JFRIEDL_DEBUG
+ if (jfriedl_XT || jfriedl_XR)
+ {
+ #include <sys/time.h>
+ #include <time.h>
+ struct timeval start_time, end_time;
+ struct timezone dummy;
+
+ if (jfriedl_XT)
+ {
+ unsigned long newlen = length * jfriedl_XT + strlen(jfriedl_prefix) + strlen(jfriedl_postfix);
+ const char *orig = ptr;
+ ptr = malloc(newlen + 1);
+ if (!ptr) {
+ printf("out of memory");
+ exit(2);
+ }
+ endptr = ptr;
+ strcpy(endptr, jfriedl_prefix); endptr += strlen(jfriedl_prefix);
+ for (i = 0; i < jfriedl_XT; i++) {
+ strncpy(endptr, orig, length);
+ endptr += length;
+ }
+ strcpy(endptr, jfriedl_postfix); endptr += strlen(jfriedl_postfix);
+ length = newlen;
+ }
+
+ if (gettimeofday(&start_time, &dummy) != 0)
+ perror("bad gettimeofday");
+
+
+ for (i = 0; i < jfriedl_XR; i++)
+ match = (pcre_exec(pattern_list[0], hints_list[0], ptr, length, 0, 0, offsets, 99) >= 0);
+
+ if (gettimeofday(&end_time, &dummy) != 0)
+ perror("bad gettimeofday");
+
+ double delta = ((end_time.tv_sec + (end_time.tv_usec / 1000000.0))
+ -
+ (start_time.tv_sec + (start_time.tv_usec / 1000000.0)));
+
+ printf("%s TIMER[%.4f]\n", match ? "MATCH" : "FAIL", delta);
+ return 0;
+ }
+#endif
+
+
+ /* Run through all the patterns until one matches. Note that we don't include
+ the final newline in the subject string. */
+
+ for (i = 0; i < pattern_count; i++)
+ {
+ mrc = pcre_exec(pattern_list[i], hints_list[i], ptr, length, 0, 0,
+ offsets, 99);
+ if (mrc >= 0) { match = TRUE; break; }
+ if (mrc != PCRE_ERROR_NOMATCH)
+ {
+ fprintf(stderr, "pcregrep: pcre_exec() error %d while matching ", mrc);
+ if (pattern_count > 1) fprintf(stderr, "pattern number %d to ", i+1);
+ fprintf(stderr, "this line:\n");
+ fwrite(ptr, 1, linelength, stderr); /* In case binary zero included */
+ fprintf(stderr, "\n");
+ if (error_count == 0 &&
+ (mrc == PCRE_ERROR_MATCHLIMIT || mrc == PCRE_ERROR_RECURSIONLIMIT))
+ {
+ fprintf(stderr, "pcregrep: error %d means that a resource limit "
+ "was exceeded\n", mrc);
+ fprintf(stderr, "pcregrep: check your regex for nested unlimited loops\n");
+ }
+ if (error_count++ > 20)
+ {
+ fprintf(stderr, "pcregrep: too many errors - abandoned\n");
+ exit(2);
+ }
+ match = invert; /* No more matching; don't show the line again */
+ break;
+ }
+ }
+
+ /* If it's a match or a not-match (as required), do what's wanted. */
+
+ if (match != invert)
+ {
+ BOOL hyphenprinted = FALSE;
+
+ /* We've failed if we want a file that doesn't have any matches. */
+
+ if (filenames == FN_NOMATCH_ONLY) return 1;
+
+ /* Just count if just counting is wanted. */
+
+ if (count_only) count++;
+
+ /* If all we want is a file name, there is no need to scan any more lines
+ in the file. */
+
+ else if (filenames == FN_ONLY)
+ {
+ fprintf(stdout, "%s\n", printname);
+ return 0;
+ }
+
+ /* Likewise, if all we want is a yes/no answer. */
+
+ else if (quiet) return 0;
+
+ /* The --only-matching option prints just the substring that matched, and
+ does not pring any context. */
+
+ else if (only_matching)
+ {
+ if (printname != NULL) fprintf(stdout, "%s:", printname);
+ if (number) fprintf(stdout, "%d:", linenumber);
+ fwrite(ptr + offsets[0], 1, offsets[1] - offsets[0], stdout);
+ fprintf(stdout, "\n");
+ }
+
+ /* This is the default case when none of the above options is set. We print
+ the matching lines(s), possibly preceded and/or followed by other lines of
+ context. */
+
+ else
+ {
+ /* See if there is a requirement to print some "after" lines from a
+ previous match. We never print any overlaps. */
+
+ if (after_context > 0 && lastmatchnumber > 0)
+ {
+ int ellength;
+ int linecount = 0;
+ char *p = lastmatchrestart;
+
+ while (p < ptr && linecount < after_context)
+ {
+ p = end_of_line(p, ptr, &ellength);
+ linecount++;
+ }
+
+ /* It is important to advance lastmatchrestart during this printing so
+ that it interacts correctly with any "before" printing below. Print
+ each line's data using fwrite() in case there are binary zeroes. */
+
+ while (lastmatchrestart < p)
+ {
+ char *pp = lastmatchrestart;
+ if (printname != NULL) fprintf(stdout, "%s-", printname);
+ if (number) fprintf(stdout, "%d-", lastmatchnumber++);
+ pp = end_of_line(pp, endptr, &ellength);
+ fwrite(lastmatchrestart, 1, pp - lastmatchrestart, stdout);
+ lastmatchrestart = pp;
+ }
+ if (lastmatchrestart != ptr) hyphenpending = TRUE;
+ }
+
+ /* If there were non-contiguous lines printed above, insert hyphens. */
+
+ if (hyphenpending)
+ {
+ fprintf(stdout, "--\n");
+ hyphenpending = FALSE;
+ hyphenprinted = TRUE;
+ }
+
+ /* See if there is a requirement to print some "before" lines for this
+ match. Again, don't print overlaps. */
+
+ if (before_context > 0)
+ {
+ int linecount = 0;
+ char *p = ptr;
+
+ while (p > buffer && (lastmatchnumber == 0 || p > lastmatchrestart) &&
+ linecount < before_context)
+ {
+ linecount++;
+ p = previous_line(p, buffer);
+ }
+
+ if (lastmatchnumber > 0 && p > lastmatchrestart && !hyphenprinted)
+ fprintf(stdout, "--\n");
+
+ while (p < ptr)
+ {
+ int ellength;
+ char *pp = p;
+ if (printname != NULL) fprintf(stdout, "%s-", printname);
+ if (number) fprintf(stdout, "%d-", linenumber - linecount--);
+ pp = end_of_line(pp, endptr, &ellength);
+ fwrite(p, 1, pp - p, stdout);
+ p = pp;
+ }
+ }
+
+ /* Now print the matching line(s); ensure we set hyphenpending at the end
+ of the file if any context lines are being output. */
+
+ if (after_context > 0 || before_context > 0)
+ endhyphenpending = TRUE;
+
+ if (printname != NULL) fprintf(stdout, "%s:", printname);
+ if (number) fprintf(stdout, "%d:", linenumber);
+
+ /* In multiline mode, we want to print to the end of the line in which
+ the end of the matched string is found, so we adjust linelength and the
+ line number appropriately, but only when there actually was a match
+ (invert not set). Because the PCRE_FIRSTLINE option is set, the start of
+ the match will always be before the first newline sequence. */
+
+ if (multiline)
+ {
+ int ellength;
+ char *endmatch = ptr;
+ if (!invert)
+ {
+ endmatch += offsets[1];
+ t = ptr;
+ while (t < endmatch)
+ {
+ t = end_of_line(t, endptr, &ellength);
+ if (t <= endmatch) linenumber++; else break;
+ }
+ }
+ endmatch = end_of_line(endmatch, endptr, &ellength);
+ linelength = endmatch - ptr - ellength;
+ }
+
+ /*** NOTE: Use only fwrite() to output the data line, so that binary
+ zeroes are treated as just another data character. */
+
+ /* This extra option, for Jeffrey Friedl's debugging requirements,
+ replaces the matched string, or a specific captured string if it exists,
+ with X. When this happens, colouring is ignored. */
+
+#ifdef JFRIEDL_DEBUG
+ if (S_arg >= 0 && S_arg < mrc)
+ {
+ int first = S_arg * 2;
+ int last = first + 1;
+ fwrite(ptr, 1, offsets[first], stdout);
+ fprintf(stdout, "X");
+ fwrite(ptr + offsets[last], 1, linelength - offsets[last], stdout);
+ }
+ else
+#endif
+
+ /* We have to split the line(s) up if colouring. */
+
+ if (do_colour)
+ {
+ fwrite(ptr, 1, offsets[0], stdout);
+ fprintf(stdout, "%c[%sm", 0x1b, colour_string);
+ fwrite(ptr + offsets[0], 1, offsets[1] - offsets[0], stdout);
+ fprintf(stdout, "%c[00m", 0x1b);
+ fwrite(ptr + offsets[1], 1, (linelength + endlinelength) - offsets[1],
+ stdout);
+ }
+ else fwrite(ptr, 1, linelength + endlinelength, stdout);
+ }
+
+ /* End of doing what has to be done for a match */
+
+ rc = 0; /* Had some success */
+
+ /* Remember where the last match happened for after_context. We remember
+ where we are about to restart, and that line's number. */
+
+ lastmatchrestart = ptr + linelength + endlinelength;
+ lastmatchnumber = linenumber + 1;
+ }
+
+ /* For a match in multiline inverted mode (which of course did not cause
+ anything to be printed), we have to move on to the end of the match before
+ proceeding. */
+
+ if (multiline && invert && match)
+ {
+ int ellength;
+ char *endmatch = ptr + offsets[1];
+ t = ptr;
+ while (t < endmatch)
+ {
+ t = end_of_line(t, endptr, &ellength);
+ if (t <= endmatch) linenumber++; else break;
+ }
+ endmatch = end_of_line(endmatch, endptr, &ellength);
+ linelength = endmatch - ptr - ellength;
+ }
+
+ /* Advance to after the newline and increment the line number. */
+
+ ptr += linelength + endlinelength;
+ linenumber++;
+
+ /* If we haven't yet reached the end of the file (the buffer is full), and
+ the current point is in the top 1/3 of the buffer, slide the buffer down by
+ 1/3 and refill it. Before we do this, if some unprinted "after" lines are
+ about to be lost, print them. */
+
+ if (bufflength >= sizeof(buffer) && ptr > buffer + 2*MBUFTHIRD)
+ {
+ if (after_context > 0 &&
+ lastmatchnumber > 0 &&
+ lastmatchrestart < buffer + MBUFTHIRD)
+ {
+ do_after_lines(lastmatchnumber, lastmatchrestart, endptr, printname);
+ lastmatchnumber = 0;
+ }
+
+ /* Now do the shuffle */
+
+ memmove(buffer, buffer + MBUFTHIRD, 2*MBUFTHIRD);
+ ptr -= MBUFTHIRD;
+ bufflength = 2*MBUFTHIRD + fread(buffer + 2*MBUFTHIRD, 1, MBUFTHIRD, in);
+ endptr = buffer + bufflength;
+
+ /* Adjust any last match point */
+
+ if (lastmatchnumber > 0) lastmatchrestart -= MBUFTHIRD;
+ }
+ } /* Loop through the whole file */
+
+/* End of file; print final "after" lines if wanted; do_after_lines sets
+hyphenpending if it prints something. */
+
+if (!only_matching && !count_only)
+ {
+ do_after_lines(lastmatchnumber, lastmatchrestart, endptr, printname);
+ hyphenpending |= endhyphenpending;
+ }
+
+/* Print the file name if we are looking for those without matches and there
+were none. If we found a match, we won't have got this far. */
+
+if (filenames == FN_NOMATCH_ONLY)
+ {
+ fprintf(stdout, "%s\n", printname);
+ return 0;
+ }
+
+/* Print the match count if wanted */
+
+if (count_only)
+ {
+ if (printname != NULL) fprintf(stdout, "%s:", printname);
+ fprintf(stdout, "%d\n", count);
+ }
+
+return rc;
+}
+
+
+
+/*************************************************
+* Grep a file or recurse into a directory *
+*************************************************/
+
+/* Given a path name, if it's a directory, scan all the files if we are
+recursing; if it's a file, grep it.
+
+Arguments:
+ pathname the path to investigate
+ dir_recurse TRUE if recursing is wanted (-r or -drecurse)
+ only_one_at_top TRUE if the path is the only one at toplevel
+
+Returns: 0 if there was at least one match
+ 1 if there were no matches
+ 2 there was some kind of error
+
+However, file opening failures are suppressed if "silent" is set.
+*/
+
+static int
+grep_or_recurse(char *pathname, BOOL dir_recurse, BOOL only_one_at_top)
+{
+int rc = 1;
+int sep;
+FILE *in;
+
+/* If the file name is "-" we scan stdin */
+
+if (strcmp(pathname, "-") == 0)
+ {
+ return pcregrep(stdin,
+ (filenames > FN_DEFAULT || (filenames == FN_DEFAULT && !only_one_at_top))?
+ stdin_name : NULL);
+ }
+
+
+/* If the file is a directory, skip if skipping or if we are recursing, scan
+each file within it, subject to any include or exclude patterns that were set.
+The scanning code is localized so it can be made system-specific. */
+
+if ((sep = isdirectory(pathname)) != 0)
+ {
+ if (dee_action == dee_SKIP) return 1;
+ if (dee_action == dee_RECURSE)
+ {
+ char buffer[1024];
+ char *nextfile;
+ directory_type *dir = opendirectory(pathname);
+
+ if (dir == NULL)
+ {
+ if (!silent)
+ fprintf(stderr, "pcregrep: Failed to open directory %s: %s\n", pathname,
+ strerror(errno));
+ return 2;
+ }
+
+ while ((nextfile = readdirectory(dir)) != NULL)
+ {
+ int frc, blen;
+ sprintf(buffer, "%.512s%c%.128s", pathname, sep, nextfile);
+ blen = strlen(buffer);
+
+ if (exclude_compiled != NULL &&
+ pcre_exec(exclude_compiled, NULL, buffer, blen, 0, 0, NULL, 0) >= 0)
+ continue;
+
+ if (include_compiled != NULL &&
+ pcre_exec(include_compiled, NULL, buffer, blen, 0, 0, NULL, 0) < 0)
+ continue;
+
+ frc = grep_or_recurse(buffer, dir_recurse, FALSE);
+ if (frc > 1) rc = frc;
+ else if (frc == 0 && rc == 1) rc = 0;
+ }
+
+ closedirectory(dir);
+ return rc;
+ }
+ }
+
+/* If the file is not a directory and not a regular file, skip it if that's
+been requested. */
+
+else if (!isregfile(pathname) && DEE_action == DEE_SKIP) return 1;
+
+/* Control reaches here if we have a regular file, or if we have a directory
+and recursion or skipping was not requested, or if we have anything else and
+skipping was not requested. The scan proceeds. If this is the first and only
+argument at top level, we don't show the file name, unless we are only showing
+the file name, or the filename was forced (-H). */
+
+in = fopen(pathname, "r");
+if (in == NULL)
+ {
+ if (!silent)
+ fprintf(stderr, "pcregrep: Failed to open %s: %s\n", pathname,
+ strerror(errno));
+ return 2;
+ }
+
+rc = pcregrep(in, (filenames > FN_DEFAULT ||
+ (filenames == FN_DEFAULT && !only_one_at_top))? pathname : NULL);
+
+fclose(in);
+return rc;
+}
+
+
+
+
+/*************************************************
+* Usage function *
+*************************************************/
+
+static int
+usage(int rc)
+{
+option_item *op;
+fprintf(stderr, "Usage: pcregrep [-");
+for (op = optionlist; op->one_char != 0; op++)
+ {
+ if (op->one_char > 0) fprintf(stderr, "%c", op->one_char);
+ }
+fprintf(stderr, "] [long options] [pattern] [files]\n");
+fprintf(stderr, "Type `pcregrep --help' for more information.\n");
+return rc;
+}
+
+
+
+
+/*************************************************
+* Help function *
+*************************************************/
+
+static void
+help(void)
+{
+option_item *op;
+
+printf("Usage: pcregrep [OPTION]... [PATTERN] [FILE1 FILE2 ...]\n");
+printf("Search for PATTERN in each FILE or standard input.\n");
+printf("PATTERN must be present if neither -e nor -f is used.\n");
+printf("\"-\" can be used as a file name to mean STDIN.\n\n");
+printf("Example: pcregrep -i 'hello.*world' menu.h main.c\n\n");
+
+printf("Options:\n");
+
+for (op = optionlist; op->one_char != 0; op++)
+ {
+ int n;
+ char s[4];
+ if (op->one_char > 0) sprintf(s, "-%c,", op->one_char); else strcpy(s, " ");
+ printf(" %s --%s%n", s, op->long_name, &n);
+ n = 30 - n;
+ if (n < 1) n = 1;
+ printf("%.*s%s\n", n, " ", op->help_text);
+ }
+
+printf("\nWhen reading patterns from a file instead of using a command line option,\n");
+printf("trailing white space is removed and blank lines are ignored.\n");
+printf("There is a maximum of %d patterns.\n", MAX_PATTERN_COUNT);
+
+printf("\nWith no FILEs, read standard input. If fewer than two FILEs given, assume -h.\n");
+printf("Exit status is 0 if any matches, 1 if no matches, and 2 if trouble.\n");
+}
+
+
+
+
+/*************************************************
+* Handle a single-letter, no data option *
+*************************************************/
+
+static int
+handle_option(int letter, int options)
+{
+switch(letter)
+ {
+ case N_HELP: help(); exit(0);
+ case 'c': count_only = TRUE; break;
+ case 'F': process_options |= PO_FIXED_STRINGS; break;
+ case 'H': filenames = FN_FORCE; break;
+ case 'h': filenames = FN_NONE; break;
+ case 'i': options |= PCRE_CASELESS; break;
+ case 'l': filenames = FN_ONLY; break;
+ case 'L': filenames = FN_NOMATCH_ONLY; break;
+ case 'M': multiline = TRUE; options |= PCRE_MULTILINE|PCRE_FIRSTLINE; break;
+ case 'n': number = TRUE; break;
+ case 'o': only_matching = TRUE; break;
+ case 'q': quiet = TRUE; break;
+ case 'r': dee_action = dee_RECURSE; break;
+ case 's': silent = TRUE; break;
+ case 'u': options |= PCRE_UTF8; utf8 = TRUE; break;
+ case 'v': invert = TRUE; break;
+ case 'w': process_options |= PO_WORD_MATCH; break;
+ case 'x': process_options |= PO_LINE_MATCH; break;
+
+ case 'V':
+ fprintf(stderr, "pcregrep version %s\n", pcre_version());
+ exit(0);
+ break;
+
+ default:
+ fprintf(stderr, "pcregrep: Unknown option -%c\n", letter);
+ exit(usage(2));
+ }
+
+return options;
+}
+
+
+
+
+/*************************************************
+* Construct printed ordinal *
+*************************************************/
+
+/* This turns a number into "1st", "3rd", etc. */
+
+static char *
+ordin(int n)
+{
+static char buffer[8];
+char *p = buffer;
+sprintf(p, "%d", n);
+while (*p != 0) p++;
+switch (n%10)
+ {
+ case 1: strcpy(p, "st"); break;
+ case 2: strcpy(p, "nd"); break;
+ case 3: strcpy(p, "rd"); break;
+ default: strcpy(p, "th"); break;
+ }
+return buffer;
+}
+
+
+
+/*************************************************
+* Compile a single pattern *
+*************************************************/
+
+/* When the -F option has been used, this is called for each substring.
+Otherwise it's called for each supplied pattern.
+
+Arguments:
+ pattern the pattern string
+ options the PCRE options
+ filename the file name, or NULL for a command-line pattern
+ count 0 if this is the only command line pattern, or
+ number of the command line pattern, or
+ linenumber for a pattern from a file
+
+Returns: TRUE on success, FALSE after an error
+*/
+
+static BOOL
+compile_single_pattern(char *pattern, int options, char *filename, int count)
+{
+char buffer[MBUFTHIRD + 16];
+const char *error;
+int errptr;
+
+if (pattern_count >= MAX_PATTERN_COUNT)
+ {
+ fprintf(stderr, "pcregrep: Too many %spatterns (max %d)\n",
+ (filename == NULL)? "command-line " : "", MAX_PATTERN_COUNT);
+ return FALSE;
+ }
+
+sprintf(buffer, "%s%.*s%s", prefix[process_options], MBUFTHIRD, pattern,
+ suffix[process_options]);
+pattern_list[pattern_count] =
+ pcre_compile(buffer, options, &error, &errptr, pcretables);
+if (pattern_list[pattern_count] != NULL)
+ {
+ pattern_count++;
+ return TRUE;
+ }
+
+/* Handle compile errors */
+
+errptr -= (int)strlen(prefix[process_options]);
+if (errptr > (int)strlen(pattern)) errptr = (int)strlen(pattern);
+
+if (filename == NULL)
+ {
+ if (count == 0)
+ fprintf(stderr, "pcregrep: Error in command-line regex "
+ "at offset %d: %s\n", errptr, error);
+ else
+ fprintf(stderr, "pcregrep: Error in %s command-line regex "
+ "at offset %d: %s\n", ordin(count), errptr, error);
+ }
+else
+ {
+ fprintf(stderr, "pcregrep: Error in regex in line %d of %s "
+ "at offset %d: %s\n", count, filename, errptr, error);
+ }
+
+return FALSE;
+}
+
+
+
+/*************************************************
+* Compile one supplied pattern *
+*************************************************/
+
+/* When the -F option has been used, each string may be a list of strings,
+separated by line breaks. They will be matched literally.
+
+Arguments:
+ pattern the pattern string
+ options the PCRE options
+ filename the file name, or NULL for a command-line pattern
+ count 0 if this is the only command line pattern, or
+ number of the command line pattern, or
+ linenumber for a pattern from a file
+
+Returns: TRUE on success, FALSE after an error
+*/
+
+static BOOL
+compile_pattern(char *pattern, int options, char *filename, int count)
+{
+if ((process_options & PO_FIXED_STRINGS) != 0)
+ {
+ char *eop = pattern + strlen(pattern);
+ char buffer[MBUFTHIRD];
+ for(;;)
+ {
+ int ellength;
+ char *p = end_of_line(pattern, eop, &ellength);
+ if (ellength == 0)
+ return compile_single_pattern(pattern, options, filename, count);
+ sprintf(buffer, "%.*s", (int)(p - pattern - ellength), pattern);
+ pattern = p;
+ if (!compile_single_pattern(buffer, options, filename, count))
+ return FALSE;
+ }
+ }
+else return compile_single_pattern(pattern, options, filename, count);
+}
+
+
+
+/*************************************************
+* Main program *
+*************************************************/
+
+/* Returns 0 if something matched, 1 if nothing matched, 2 after an error. */
+
+int
+main(int argc, char **argv)
+{
+int i, j;
+int rc = 1;
+int pcre_options = 0;
+int cmd_pattern_count = 0;
+int hint_count = 0;
+int errptr;
+BOOL only_one_at_top;
+char *patterns[MAX_PATTERN_COUNT];
+const char *locale_from = "--locale";
+const char *error;
+
+/* Set the default line ending value from the default in the PCRE library;
+"lf", "cr", "crlf", and "any" are supported. Anything else is treated as "lf".
+*/
+
+(void)pcre_config(PCRE_CONFIG_NEWLINE, &i);
+switch(i)
+ {
+ default: newline = (char *)"lf"; break;
+ case '\r': newline = (char *)"cr"; break;
+ case ('\r' << 8) | '\n': newline = (char *)"crlf"; break;
+ case -1: newline = (char *)"any"; break;
+ case -2: newline = (char *)"anycrlf"; break;
+ }
+
+/* Process the options */
+
+for (i = 1; i < argc; i++)
+ {
+ option_item *op = NULL;
+ char *option_data = (char *)""; /* default to keep compiler happy */
+ BOOL longop;
+ BOOL longopwasequals = FALSE;
+
+ if (argv[i][0] != '-') break;
+
+ /* If we hit an argument that is just "-", it may be a reference to STDIN,
+ but only if we have previously had -e or -f to define the patterns. */
+
+ if (argv[i][1] == 0)
+ {
+ if (pattern_filename != NULL || pattern_count > 0) break;
+ else exit(usage(2));
+ }
+
+ /* Handle a long name option, or -- to terminate the options */
+
+ if (argv[i][1] == '-')
+ {
+ char *arg = argv[i] + 2;
+ char *argequals = strchr(arg, '=');
+
+ if (*arg == 0) /* -- terminates options */
+ {
+ i++;
+ break; /* out of the options-handling loop */
+ }
+
+ longop = TRUE;
+
+ /* Some long options have data that follows after =, for example file=name.
+ Some options have variations in the long name spelling: specifically, we
+ allow "regexp" because GNU grep allows it, though I personally go along
+ with Jeffrey Friedl and Larry Wall in preferring "regex" without the "p".
+ These options are entered in the table as "regex(p)". No option is in both
+ these categories, fortunately. */
+
+ for (op = optionlist; op->one_char != 0; op++)
+ {
+ char *opbra = strchr(op->long_name, '(');
+ char *equals = strchr(op->long_name, '=');
+ if (opbra == NULL) /* Not a (p) case */
+ {
+ if (equals == NULL) /* Not thing=data case */
+ {
+ if (strcmp(arg, op->long_name) == 0) break;
+ }
+ else /* Special case xxx=data */
+ {
+ int oplen = equals - op->long_name;
+ int arglen = (argequals == NULL)? (int)strlen(arg) : argequals - arg;
+ if (oplen == arglen && strncmp(arg, op->long_name, oplen) == 0)
+ {
+ option_data = arg + arglen;
+ if (*option_data == '=')
+ {
+ option_data++;
+ longopwasequals = TRUE;
+ }
+ break;
+ }
+ }
+ }
+ else /* Special case xxxx(p) */
+ {
+ char buff1[24];
+ char buff2[24];
+ int baselen = opbra - op->long_name;
+ sprintf(buff1, "%.*s", baselen, op->long_name);
+ sprintf(buff2, "%s%.*s", buff1,
+ (int)strlen(op->long_name) - baselen - 2, opbra + 1);
+ if (strcmp(arg, buff1) == 0 || strcmp(arg, buff2) == 0)
+ break;
+ }
+ }
+
+ if (op->one_char == 0)
+ {
+ fprintf(stderr, "pcregrep: Unknown option %s\n", argv[i]);
+ exit(usage(2));
+ }
+ }
+
+
+ /* Jeffrey Friedl's debugging harness uses these additional options which
+ are not in the right form for putting in the option table because they use
+ only one hyphen, yet are more than one character long. By putting them
+ separately here, they will not get displayed as part of the help() output,
+ but I don't think Jeffrey will care about that. */
+
+#ifdef JFRIEDL_DEBUG
+ else if (strcmp(argv[i], "-pre") == 0) {
+ jfriedl_prefix = argv[++i];
+ continue;
+ } else if (strcmp(argv[i], "-post") == 0) {
+ jfriedl_postfix = argv[++i];
+ continue;
+ } else if (strcmp(argv[i], "-XT") == 0) {
+ sscanf(argv[++i], "%d", &jfriedl_XT);
+ continue;
+ } else if (strcmp(argv[i], "-XR") == 0) {
+ sscanf(argv[++i], "%d", &jfriedl_XR);
+ continue;
+ }
+#endif
+
+
+ /* One-char options; many that have no data may be in a single argument; we
+ continue till we hit the last one or one that needs data. */
+
+ else
+ {
+ char *s = argv[i] + 1;
+ longop = FALSE;
+ while (*s != 0)
+ {
+ for (op = optionlist; op->one_char != 0; op++)
+ { if (*s == op->one_char) break; }
+ if (op->one_char == 0)
+ {
+ fprintf(stderr, "pcregrep: Unknown option letter '%c' in \"%s\"\n",
+ *s, argv[i]);
+ exit(usage(2));
+ }
+ if (op->type != OP_NODATA || s[1] == 0)
+ {
+ option_data = s+1;
+ break;
+ }
+ pcre_options = handle_option(*s++, pcre_options);
+ }
+ }
+
+ /* At this point we should have op pointing to a matched option. If the type
+ is NO_DATA, it means that there is no data, and the option might set
+ something in the PCRE options. */
+
+ if (op->type == OP_NODATA)
+ {
+ pcre_options = handle_option(op->one_char, pcre_options);
+ continue;
+ }
+
+ /* If the option type is OP_OP_STRING or OP_OP_NUMBER, it's an option that
+ either has a value or defaults to something. It cannot have data in a
+ separate item. At the moment, the only such options are "colo(u)r" and
+ Jeffrey Friedl's special -S debugging option. */
+
+ if (*option_data == 0 &&
+ (op->type == OP_OP_STRING || op->type == OP_OP_NUMBER))
+ {
+ switch (op->one_char)
+ {
+ case N_COLOUR:
+ colour_option = (char *)"auto";
+ break;
+#ifdef JFRIEDL_DEBUG
+ case 'S':
+ S_arg = 0;
+ break;
+#endif
+ }
+ continue;
+ }
+
+ /* Otherwise, find the data string for the option. */
+
+ if (*option_data == 0)
+ {
+ if (i >= argc - 1 || longopwasequals)
+ {
+ fprintf(stderr, "pcregrep: Data missing after %s\n", argv[i]);
+ exit(usage(2));
+ }
+ option_data = argv[++i];
+ }
+
+ /* If the option type is OP_PATLIST, it's the -e option, which can be called
+ multiple times to create a list of patterns. */
+
+ if (op->type == OP_PATLIST)
+ {
+ if (cmd_pattern_count >= MAX_PATTERN_COUNT)
+ {
+ fprintf(stderr, "pcregrep: Too many command-line patterns (max %d)\n",
+ MAX_PATTERN_COUNT);
+ return 2;
+ }
+ patterns[cmd_pattern_count++] = option_data;
+ }
+
+ /* Otherwise, deal with single string or numeric data values. */
+
+ else if (op->type != OP_NUMBER && op->type != OP_OP_NUMBER)
+ {
+ *((char **)op->dataptr) = option_data;
+ }
+ else
+ {
+ char *endptr;
+ int n = strtoul(option_data, &endptr, 10);
+ if (*endptr != 0)
+ {
+ if (longop)
+ {
+ char *equals = strchr(op->long_name, '=');
+ int nlen = (equals == NULL)? (int)strlen(op->long_name) :
+ equals - op->long_name;
+ fprintf(stderr, "pcregrep: Malformed number \"%s\" after --%.*s\n",
+ option_data, nlen, op->long_name);
+ }
+ else
+ fprintf(stderr, "pcregrep: Malformed number \"%s\" after -%c\n",
+ option_data, op->one_char);
+ exit(usage(2));
+ }
+ *((int *)op->dataptr) = n;
+ }
+ }
+
+/* Options have been decoded. If -C was used, its value is used as a default
+for -A and -B. */
+
+if (both_context > 0)
+ {
+ if (after_context == 0) after_context = both_context;
+ if (before_context == 0) before_context = both_context;
+ }
+
+/* If a locale has not been provided as an option, see if the LC_CTYPE or
+LC_ALL environment variable is set, and if so, use it. */
+
+if (locale == NULL)
+ {
+ locale = getenv("LC_ALL");
+ locale_from = "LCC_ALL";
+ }
+
+if (locale == NULL)
+ {
+ locale = getenv("LC_CTYPE");
+ locale_from = "LC_CTYPE";
+ }
+
+/* If a locale has been provided, set it, and generate the tables the PCRE
+needs. Otherwise, pcretables==NULL, which causes the use of default tables. */
+
+if (locale != NULL)
+ {
+ if (setlocale(LC_CTYPE, locale) == NULL)
+ {
+ fprintf(stderr, "pcregrep: Failed to set locale %s (obtained from %s)\n",
+ locale, locale_from);
+ return 2;
+ }
+ pcretables = pcre_maketables();
+ }
+
+/* Sort out colouring */
+
+if (colour_option != NULL && strcmp(colour_option, "never") != 0)
+ {
+ if (strcmp(colour_option, "always") == 0) do_colour = TRUE;
+ else if (strcmp(colour_option, "auto") == 0) do_colour = is_stdout_tty();
+ else
+ {
+ fprintf(stderr, "pcregrep: Unknown colour setting \"%s\"\n",
+ colour_option);
+ return 2;
+ }
+ if (do_colour)
+ {
+ char *cs = getenv("PCREGREP_COLOUR");
+ if (cs == NULL) cs = getenv("PCREGREP_COLOR");
+ if (cs != NULL) colour_string = cs;
+ }
+ }
+
+/* Interpret the newline type; the default settings are Unix-like. */
+
+if (strcmp(newline, "cr") == 0 || strcmp(newline, "CR") == 0)
+ {
+ pcre_options |= PCRE_NEWLINE_CR;
+ endlinetype = EL_CR;
+ }
+else if (strcmp(newline, "lf") == 0 || strcmp(newline, "LF") == 0)
+ {
+ pcre_options |= PCRE_NEWLINE_LF;
+ endlinetype = EL_LF;
+ }
+else if (strcmp(newline, "crlf") == 0 || strcmp(newline, "CRLF") == 0)
+ {
+ pcre_options |= PCRE_NEWLINE_CRLF;
+ endlinetype = EL_CRLF;
+ }
+else if (strcmp(newline, "any") == 0 || strcmp(newline, "ANY") == 0)
+ {
+ pcre_options |= PCRE_NEWLINE_ANY;
+ endlinetype = EL_ANY;
+ }
+else if (strcmp(newline, "anycrlf") == 0 || strcmp(newline, "ANYCRLF") == 0)
+ {
+ pcre_options |= PCRE_NEWLINE_ANYCRLF;
+ endlinetype = EL_ANYCRLF;
+ }
+else
+ {
+ fprintf(stderr, "pcregrep: Invalid newline specifier \"%s\"\n", newline);
+ return 2;
+ }
+
+/* Interpret the text values for -d and -D */
+
+if (dee_option != NULL)
+ {
+ if (strcmp(dee_option, "read") == 0) dee_action = dee_READ;
+ else if (strcmp(dee_option, "recurse") == 0) dee_action = dee_RECURSE;
+ else if (strcmp(dee_option, "skip") == 0) dee_action = dee_SKIP;
+ else
+ {
+ fprintf(stderr, "pcregrep: Invalid value \"%s\" for -d\n", dee_option);
+ return 2;
+ }
+ }
+
+if (DEE_option != NULL)
+ {
+ if (strcmp(DEE_option, "read") == 0) DEE_action = DEE_READ;
+ else if (strcmp(DEE_option, "skip") == 0) DEE_action = DEE_SKIP;
+ else
+ {
+ fprintf(stderr, "pcregrep: Invalid value \"%s\" for -D\n", DEE_option);
+ return 2;
+ }
+ }
+
+/* Check the values for Jeffrey Friedl's debugging options. */
+
+#ifdef JFRIEDL_DEBUG
+if (S_arg > 9)
+ {
+ fprintf(stderr, "pcregrep: bad value for -S option\n");
+ return 2;
+ }
+if (jfriedl_XT != 0 || jfriedl_XR != 0)
+ {
+ if (jfriedl_XT == 0) jfriedl_XT = 1;
+ if (jfriedl_XR == 0) jfriedl_XR = 1;
+ }
+#endif
+
+/* Get memory to store the pattern and hints lists. */
+
+pattern_list = (pcre **)malloc(MAX_PATTERN_COUNT * sizeof(pcre *));
+hints_list = (pcre_extra **)malloc(MAX_PATTERN_COUNT * sizeof(pcre_extra *));
+
+if (pattern_list == NULL || hints_list == NULL)
+ {
+ fprintf(stderr, "pcregrep: malloc failed\n");
+ goto EXIT2;
+ }
+
+/* If no patterns were provided by -e, and there is no file provided by -f,
+the first argument is the one and only pattern, and it must exist. */
+
+if (cmd_pattern_count == 0 && pattern_filename == NULL)
+ {
+ if (i >= argc) return usage(2);
+ patterns[cmd_pattern_count++] = argv[i++];
+ }
+
+/* Compile the patterns that were provided on the command line, either by
+multiple uses of -e or as a single unkeyed pattern. */
+
+for (j = 0; j < cmd_pattern_count; j++)
+ {
+ if (!compile_pattern(patterns[j], pcre_options, NULL,
+ (j == 0 && cmd_pattern_count == 1)? 0 : j + 1))
+ goto EXIT2;
+ }
+
+/* Compile the regular expressions that are provided in a file. */
+
+if (pattern_filename != NULL)
+ {
+ int linenumber = 0;
+ FILE *f;
+ char *filename;
+ char buffer[MBUFTHIRD];
+
+ if (strcmp(pattern_filename, "-") == 0)
+ {
+ f = stdin;
+ filename = stdin_name;
+ }
+ else
+ {
+ f = fopen(pattern_filename, "r");
+ if (f == NULL)
+ {
+ fprintf(stderr, "pcregrep: Failed to open %s: %s\n", pattern_filename,
+ strerror(errno));
+ goto EXIT2;
+ }
+ filename = pattern_filename;
+ }
+
+ while (fgets(buffer, MBUFTHIRD, f) != NULL)
+ {
+ char *s = buffer + (int)strlen(buffer);
+ while (s > buffer && isspace((unsigned char)(s[-1]))) s--;
+ *s = 0;
+ linenumber++;
+ if (buffer[0] == 0) continue; /* Skip blank lines */
+ if (!compile_pattern(buffer, pcre_options, filename, linenumber))
+ goto EXIT2;
+ }
+
+ if (f != stdin) fclose(f);
+ }
+
+/* Study the regular expressions, as we will be running them many times */
+
+for (j = 0; j < pattern_count; j++)
+ {
+ hints_list[j] = pcre_study(pattern_list[j], 0, &error);
+ if (error != NULL)
+ {
+ char s[16];
+ if (pattern_count == 1) s[0] = 0; else sprintf(s, " number %d", j);
+ fprintf(stderr, "pcregrep: Error while studying regex%s: %s\n", s, error);
+ goto EXIT2;
+ }
+ hint_count++;
+ }
+
+/* If there are include or exclude patterns, compile them. */
+
+if (exclude_pattern != NULL)
+ {
+ exclude_compiled = pcre_compile(exclude_pattern, 0, &error, &errptr,
+ pcretables);
+ if (exclude_compiled == NULL)
+ {
+ fprintf(stderr, "pcregrep: Error in 'exclude' regex at offset %d: %s\n",
+ errptr, error);
+ goto EXIT2;
+ }
+ }
+
+if (include_pattern != NULL)
+ {
+ include_compiled = pcre_compile(include_pattern, 0, &error, &errptr,
+ pcretables);
+ if (include_compiled == NULL)
+ {
+ fprintf(stderr, "pcregrep: Error in 'include' regex at offset %d: %s\n",
+ errptr, error);
+ goto EXIT2;
+ }
+ }
+
+/* If there are no further arguments, do the business on stdin and exit. */
+
+if (i >= argc)
+ {
+ rc = pcregrep(stdin, (filenames > FN_DEFAULT)? stdin_name : NULL);
+ goto EXIT;
+ }
+
+/* Otherwise, work through the remaining arguments as files or directories.
+Pass in the fact that there is only one argument at top level - this suppresses
+the file name if the argument is not a directory and filenames are not
+otherwise forced. */
+
+only_one_at_top = i == argc - 1; /* Catch initial value of i */
+
+for (; i < argc; i++)
+ {
+ int frc = grep_or_recurse(argv[i], dee_action == dee_RECURSE,
+ only_one_at_top);
+ if (frc > 1) rc = frc;
+ else if (frc == 0 && rc == 1) rc = 0;
+ }
+
+EXIT:
+if (pattern_list != NULL)
+ {
+ for (i = 0; i < pattern_count; i++) free(pattern_list[i]);
+ free(pattern_list);
+ }
+if (hints_list != NULL)
+ {
+ for (i = 0; i < hint_count; i++) free(hints_list[i]);
+ free(hints_list);
+ }
+return rc;
+
+EXIT2:
+rc = 2;
+goto EXIT;
+}
+
+/* End of pcregrep */
diff --git a/src/third_party/pcre-7.4/pcreposix.c b/src/third_party/pcre-7.4/pcreposix.c
new file mode 100644
index 00000000000..24f2109489f
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcreposix.c
@@ -0,0 +1,338 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+/* PCRE is a library of functions to support regular expressions whose syntax
+and semantics are as close as possible to those of the Perl 5 language.
+
+ Written by Philip Hazel
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are Permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+/* This module is a wrapper that provides a POSIX API to the underlying PCRE
+functions. */
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+
+/* Ensure that the PCREPOSIX_EXP_xxx macros are set appropriately for
+compiling these functions. This must come before including pcreposix.h, where
+they are set for an application (using these functions) if they have not
+previously been set. */
+
+/*#if defined(_WIN32) && !defined(PCRE_STATIC)
+#error why are we here?
+# define PCREPOSIX_EXP_DECL extern __declspec(dllexport)
+# define PCREPOSIX_EXP_DEFN __declspec(dllexport)
+#endif
+*/
+#include "pcre.h"
+#include "pcre_internal.h"
+#include "pcreposix.h"
+
+
+/* Table to translate PCRE compile time error codes into POSIX error codes. */
+
+static const int eint[] = {
+ 0, /* no error */
+ REG_EESCAPE, /* \ at end of pattern */
+ REG_EESCAPE, /* \c at end of pattern */
+ REG_EESCAPE, /* unrecognized character follows \ */
+ REG_BADBR, /* numbers out of order in {} quantifier */
+ REG_BADBR, /* number too big in {} quantifier */
+ REG_EBRACK, /* missing terminating ] for character class */
+ REG_ECTYPE, /* invalid escape sequence in character class */
+ REG_ERANGE, /* range out of order in character class */
+ REG_BADRPT, /* nothing to repeat */
+ REG_BADRPT, /* operand of unlimited repeat could match the empty string */
+ REG_ASSERT, /* internal error: unexpected repeat */
+ REG_BADPAT, /* unrecognized character after (? */
+ REG_BADPAT, /* POSIX named classes are supported only within a class */
+ REG_EPAREN, /* missing ) */
+ REG_ESUBREG, /* reference to non-existent subpattern */
+ REG_INVARG, /* erroffset passed as NULL */
+ REG_INVARG, /* unknown option bit(s) set */
+ REG_EPAREN, /* missing ) after comment */
+ REG_ESIZE, /* parentheses nested too deeply */
+ REG_ESIZE, /* regular expression too large */
+ REG_ESPACE, /* failed to get memory */
+ REG_EPAREN, /* unmatched brackets */
+ REG_ASSERT, /* internal error: code overflow */
+ REG_BADPAT, /* unrecognized character after (?< */
+ REG_BADPAT, /* lookbehind assertion is not fixed length */
+ REG_BADPAT, /* malformed number or name after (?( */
+ REG_BADPAT, /* conditional group contains more than two branches */
+ REG_BADPAT, /* assertion expected after (?( */
+ REG_BADPAT, /* (?R or (?[+-]digits must be followed by ) */
+ REG_ECTYPE, /* unknown POSIX class name */
+ REG_BADPAT, /* POSIX collating elements are not supported */
+ REG_INVARG, /* this version of PCRE is not compiled with PCRE_UTF8 support */
+ REG_BADPAT, /* spare error */
+ REG_BADPAT, /* character value in \x{...} sequence is too large */
+ REG_BADPAT, /* invalid condition (?(0) */
+ REG_BADPAT, /* \C not allowed in lookbehind assertion */
+ REG_EESCAPE, /* PCRE does not support \L, \l, \N, \U, or \u */
+ REG_BADPAT, /* number after (?C is > 255 */
+ REG_BADPAT, /* closing ) for (?C expected */
+ REG_BADPAT, /* recursive call could loop indefinitely */
+ REG_BADPAT, /* unrecognized character after (?P */
+ REG_BADPAT, /* syntax error in subpattern name (missing terminator) */
+ REG_BADPAT, /* two named subpatterns have the same name */
+ REG_BADPAT, /* invalid UTF-8 string */
+ REG_BADPAT, /* support for \P, \p, and \X has not been compiled */
+ REG_BADPAT, /* malformed \P or \p sequence */
+ REG_BADPAT, /* unknown property name after \P or \p */
+ REG_BADPAT, /* subpattern name is too long (maximum 32 characters) */
+ REG_BADPAT, /* too many named subpatterns (maximum 10,000) */
+ REG_BADPAT, /* repeated subpattern is too long */
+ REG_BADPAT, /* octal value is greater than \377 (not in UTF-8 mode) */
+ REG_BADPAT, /* internal error: overran compiling workspace */
+ REG_BADPAT, /* internal error: previously-checked referenced subpattern not found */
+ REG_BADPAT, /* DEFINE group contains more than one branch */
+ REG_BADPAT, /* repeating a DEFINE group is not allowed */
+ REG_INVARG, /* inconsistent NEWLINE options */
+ REG_BADPAT, /* \g is not followed followed by an (optionally braced) non-zero number */
+ REG_BADPAT, /* (?+ or (?- must be followed by a non-zero number */
+ REG_BADPAT /* number is too big */
+};
+
+/* Table of texts corresponding to POSIX error codes */
+
+static const char *const pstring[] = {
+ "", /* Dummy for value 0 */
+ "internal error", /* REG_ASSERT */
+ "invalid repeat counts in {}", /* BADBR */
+ "pattern error", /* BADPAT */
+ "? * + invalid", /* BADRPT */
+ "unbalanced {}", /* EBRACE */
+ "unbalanced []", /* EBRACK */
+ "collation error - not relevant", /* ECOLLATE */
+ "bad class", /* ECTYPE */
+ "bad escape sequence", /* EESCAPE */
+ "empty expression", /* EMPTY */
+ "unbalanced ()", /* EPAREN */
+ "bad range inside []", /* ERANGE */
+ "expression too big", /* ESIZE */
+ "failed to get memory", /* ESPACE */
+ "bad back reference", /* ESUBREG */
+ "bad argument", /* INVARG */
+ "match failed" /* NOMATCH */
+};
+
+
+
+
+/*************************************************
+* Translate error code to string *
+*************************************************/
+
+PCREPOSIX_EXP_DEFN size_t
+regerror(int errcode, const regex_t *preg, char *errbuf, size_t errbuf_size)
+{
+const char *message, *addmessage;
+size_t length, addlength;
+
+message = (errcode >= (int)(sizeof(pstring)/sizeof(char *)))?
+ "unknown error code" : pstring[errcode];
+length = strlen(message) + 1;
+
+addmessage = " at offset ";
+addlength = (preg != NULL && (int)preg->re_erroffset != -1)?
+ strlen(addmessage) + 6 : 0;
+
+if (errbuf_size > 0)
+ {
+ if (addlength > 0 && errbuf_size >= length + addlength)
+ sprintf(errbuf, "%s%s%-6d", message, addmessage, (int)preg->re_erroffset);
+ else
+ {
+ strncpy(errbuf, message, errbuf_size - 1);
+ errbuf[errbuf_size-1] = 0;
+ }
+ }
+
+return length + addlength;
+}
+
+
+
+
+/*************************************************
+* Free store held by a regex *
+*************************************************/
+
+PCREPOSIX_EXP_DEFN void
+regfree(regex_t *preg)
+{
+(pcre_free)(preg->re_pcre);
+}
+
+
+
+
+/*************************************************
+* Compile a regular expression *
+*************************************************/
+
+/*
+Arguments:
+ preg points to a structure for recording the compiled expression
+ pattern the pattern to compile
+ cflags compilation flags
+
+Returns: 0 on success
+ various non-zero codes on failure
+*/
+
+PCREPOSIX_EXP_DEFN int
+regcomp(regex_t *preg, const char *pattern, int cflags)
+{
+const char *errorptr;
+int erroffset;
+int errorcode;
+int options = 0;
+
+if ((cflags & REG_ICASE) != 0) options |= PCRE_CASELESS;
+if ((cflags & REG_NEWLINE) != 0) options |= PCRE_MULTILINE;
+if ((cflags & REG_DOTALL) != 0) options |= PCRE_DOTALL;
+if ((cflags & REG_NOSUB) != 0) options |= PCRE_NO_AUTO_CAPTURE;
+if ((cflags & REG_UTF8) != 0) options |= PCRE_UTF8;
+
+preg->re_pcre = pcre_compile2(pattern, options, &errorcode, &errorptr,
+ &erroffset, NULL);
+preg->re_erroffset = erroffset;
+
+if (preg->re_pcre == NULL) return eint[errorcode];
+
+preg->re_nsub = pcre_info((const pcre *)preg->re_pcre, NULL, NULL);
+return 0;
+}
+
+
+
+
+/*************************************************
+* Match a regular expression *
+*************************************************/
+
+/* Unfortunately, PCRE requires 3 ints of working space for each captured
+substring, so we have to get and release working store instead of just using
+the POSIX structures as was done in earlier releases when PCRE needed only 2
+ints. However, if the number of possible capturing brackets is small, use a
+block of store on the stack, to reduce the use of malloc/free. The threshold is
+in a macro that can be changed at configure time.
+
+If REG_NOSUB was specified at compile time, the PCRE_NO_AUTO_CAPTURE flag will
+be set. When this is the case, the nmatch and pmatch arguments are ignored, and
+the only result is yes/no/error. */
+
+PCREPOSIX_EXP_DEFN int
+regexec(const regex_t *preg, const char *string, size_t nmatch,
+ regmatch_t pmatch[], int eflags)
+{
+int rc;
+int options = 0;
+int *ovector = NULL;
+int small_ovector[POSIX_MALLOC_THRESHOLD * 3];
+BOOL allocated_ovector = FALSE;
+BOOL nosub =
+ (((const pcre *)preg->re_pcre)->options & PCRE_NO_AUTO_CAPTURE) != 0;
+
+if ((eflags & REG_NOTBOL) != 0) options |= PCRE_NOTBOL;
+if ((eflags & REG_NOTEOL) != 0) options |= PCRE_NOTEOL;
+
+((regex_t *)preg)->re_erroffset = (size_t)(-1); /* Only has meaning after compile */
+
+/* When no string data is being returned, ensure that nmatch is zero.
+Otherwise, ensure the vector for holding the return data is large enough. */
+
+if (nosub) nmatch = 0;
+
+else if (nmatch > 0)
+ {
+ if (nmatch <= POSIX_MALLOC_THRESHOLD)
+ {
+ ovector = &(small_ovector[0]);
+ }
+ else
+ {
+ if (nmatch > INT_MAX/(sizeof(int) * 3)) return REG_ESPACE;
+ ovector = (int *)malloc(sizeof(int) * nmatch * 3);
+ if (ovector == NULL) return REG_ESPACE;
+ allocated_ovector = TRUE;
+ }
+ }
+
+rc = pcre_exec((const pcre *)preg->re_pcre, NULL, string, (int)strlen(string),
+ 0, options, ovector, nmatch * 3);
+
+if (rc == 0) rc = nmatch; /* All captured slots were filled in */
+
+if (rc >= 0)
+ {
+ size_t i;
+ if (!nosub)
+ {
+ for (i = 0; i < (size_t)rc; i++)
+ {
+ pmatch[i].rm_so = ovector[i*2];
+ pmatch[i].rm_eo = ovector[i*2+1];
+ }
+ if (allocated_ovector) free(ovector);
+ for (; i < nmatch; i++) pmatch[i].rm_so = pmatch[i].rm_eo = -1;
+ }
+ return 0;
+ }
+
+else
+ {
+ if (allocated_ovector) free(ovector);
+ switch(rc)
+ {
+ case PCRE_ERROR_NOMATCH: return REG_NOMATCH;
+ case PCRE_ERROR_NULL: return REG_INVARG;
+ case PCRE_ERROR_BADOPTION: return REG_INVARG;
+ case PCRE_ERROR_BADMAGIC: return REG_INVARG;
+ case PCRE_ERROR_UNKNOWN_NODE: return REG_ASSERT;
+ case PCRE_ERROR_NOMEMORY: return REG_ESPACE;
+ case PCRE_ERROR_MATCHLIMIT: return REG_ESPACE;
+ case PCRE_ERROR_BADUTF8: return REG_INVARG;
+ case PCRE_ERROR_BADUTF8_OFFSET: return REG_INVARG;
+ default: return REG_ASSERT;
+ }
+ }
+}
+
+/* End of pcreposix.c */
diff --git a/src/third_party/pcre-7.4/pcreposix.h b/src/third_party/pcre-7.4/pcreposix.h
new file mode 100644
index 00000000000..875e1ff18ba
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcreposix.h
@@ -0,0 +1,142 @@
+/*************************************************
+* Perl-Compatible Regular Expressions *
+*************************************************/
+
+#ifndef _PCREPOSIX_H
+#define _PCREPOSIX_H
+
+/* This is the header for the POSIX wrapper interface to the PCRE Perl-
+Compatible Regular Expression library. It defines the things POSIX says should
+be there. I hope.
+
+ Copyright (c) 1997-2007 University of Cambridge
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+/* Have to include stdlib.h in order to ensure that size_t is defined. */
+
+#include <stdlib.h>
+
+/* Allow for C++ users */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Options, mostly defined by POSIX, but with a couple of extras. */
+
+#define REG_ICASE 0x0001
+#define REG_NEWLINE 0x0002
+#define REG_NOTBOL 0x0004
+#define REG_NOTEOL 0x0008
+#define REG_DOTALL 0x0010 /* NOT defined by POSIX. */
+#define REG_NOSUB 0x0020
+#define REG_UTF8 0x0040 /* NOT defined by POSIX. */
+
+/* This is not used by PCRE, but by defining it we make it easier
+to slot PCRE into existing programs that make POSIX calls. */
+
+#define REG_EXTENDED 0
+
+/* Error values. Not all these are relevant or used by the wrapper. */
+
+enum {
+ REG_ASSERT = 1, /* internal error ? */
+ REG_BADBR, /* invalid repeat counts in {} */
+ REG_BADPAT, /* pattern error */
+ REG_BADRPT, /* ? * + invalid */
+ REG_EBRACE, /* unbalanced {} */
+ REG_EBRACK, /* unbalanced [] */
+ REG_ECOLLATE, /* collation error - not relevant */
+ REG_ECTYPE, /* bad class */
+ REG_EESCAPE, /* bad escape sequence */
+ REG_EMPTY, /* empty expression */
+ REG_EPAREN, /* unbalanced () */
+ REG_ERANGE, /* bad range inside [] */
+ REG_ESIZE, /* expression too big */
+ REG_ESPACE, /* failed to get memory */
+ REG_ESUBREG, /* bad back reference */
+ REG_INVARG, /* bad argument */
+ REG_NOMATCH /* match failed */
+};
+
+
+/* The structure representing a compiled regular expression. */
+
+typedef struct {
+ void *re_pcre;
+ size_t re_nsub;
+ size_t re_erroffset;
+} regex_t;
+
+/* The structure in which a captured offset is returned. */
+
+typedef int regoff_t;
+
+typedef struct {
+ regoff_t rm_so;
+ regoff_t rm_eo;
+} regmatch_t;
+
+/* When an application links to a PCRE DLL in Windows, the symbols that are
+imported have to be identified as such. When building PCRE, the appropriate
+export settings are needed, and are set in pcreposix.c before including this
+file. */
+
+#if defined(_WIN32) && !defined(PCRE_STATIC) && !defined(PCREPOSIX_EXP_DECL)
+# define PCREPOSIX_EXP_DECL extern __declspec(dllimport)
+# define PCREPOSIX_EXP_DEFN __declspec(dllimport)
+#endif
+
+/* By default, we use the standard "extern" declarations. */
+
+#ifndef PCREPOSIX_EXP_DECL
+# ifdef __cplusplus
+# define PCREPOSIX_EXP_DECL extern "C"
+# define PCREPOSIX_EXP_DEFN extern "C"
+# else
+# define PCREPOSIX_EXP_DECL extern
+# define PCREPOSIX_EXP_DEFN extern
+# endif
+#endif
+
+/* The functions */
+
+PCREPOSIX_EXP_DECL int regcomp(regex_t *, const char *, int);
+PCREPOSIX_EXP_DECL int regexec(const regex_t *, const char *, size_t,
+ regmatch_t *, int);
+PCREPOSIX_EXP_DECL size_t regerror(int, const regex_t *, char *, size_t);
+PCREPOSIX_EXP_DECL void regfree(regex_t *);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* End of pcreposix.h */
diff --git a/src/third_party/pcre-7.4/pcretest.c b/src/third_party/pcre-7.4/pcretest.c
new file mode 100644
index 00000000000..a222146e5e5
--- /dev/null
+++ b/src/third_party/pcre-7.4/pcretest.c
@@ -0,0 +1,2396 @@
+/*************************************************
+* PCRE testing program *
+*************************************************/
+
+/* This program was hacked up as a tester for PCRE. I really should have
+written it more tidily in the first place. Will I ever learn? It has grown and
+been extended and consequently is now rather, er, *very* untidy in places.
+
+-----------------------------------------------------------------------------
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+-----------------------------------------------------------------------------
+*/
+
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <ctype.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <time.h>
+#include <locale.h>
+#include <errno.h>
+
+
+/* A number of things vary for Windows builds. Originally, pcretest opened its
+input and output without "b"; then I was told that "b" was needed in some
+environments, so it was added for release 5.0 to both the input and output. (It
+makes no difference on Unix-like systems.) Later I was told that it is wrong
+for the input on Windows. I've now abstracted the modes into two macros that
+are set here, to make it easier to fiddle with them, and removed "b" from the
+input mode under Windows. */
+
+#if defined(_WIN32) || defined(WIN32)
+#include <io.h> /* For _setmode() */
+#include <fcntl.h> /* For _O_BINARY */
+#define INPUT_MODE "r"
+#define OUTPUT_MODE "wb"
+
+#else
+#include <sys/time.h> /* These two includes are needed */
+#include <sys/resource.h> /* for setrlimit(). */
+#define INPUT_MODE "rb"
+#define OUTPUT_MODE "wb"
+#endif
+
+
+/* We have to include pcre_internal.h because we need the internal info for
+displaying the results of pcre_study() and we also need to know about the
+internal macros, structures, and other internal data values; pcretest has
+"inside information" compared to a program that strictly follows the PCRE API.
+
+Although pcre_internal.h does itself include pcre.h, we explicitly include it
+here before pcre_internal.h so that the PCRE_EXP_xxx macros get set
+appropriately for an application, not for building PCRE. */
+
+#include "pcre.h"
+#include "pcre_internal.h"
+
+/* We need access to the data tables that PCRE uses. So as not to have to keep
+two copies, we include the source file here, changing the names of the external
+symbols to prevent clashes. */
+
+#define _pcre_utf8_table1 utf8_table1
+#define _pcre_utf8_table1_size utf8_table1_size
+#define _pcre_utf8_table2 utf8_table2
+#define _pcre_utf8_table3 utf8_table3
+#define _pcre_utf8_table4 utf8_table4
+#define _pcre_utt utt
+#define _pcre_utt_size utt_size
+#define _pcre_utt_names utt_names
+#define _pcre_OP_lengths OP_lengths
+
+#include "pcre_tables.c"
+
+/* We also need the pcre_printint() function for printing out compiled
+patterns. This function is in a separate file so that it can be included in
+pcre_compile.c when that module is compiled with debugging enabled.
+
+The definition of the macro PRINTABLE, which determines whether to print an
+output character as-is or as a hex value when showing compiled patterns, is
+contained in this file. We uses it here also, in cases when the locale has not
+been explicitly changed, so as to get consistent output from systems that
+differ in their output from isprint() even in the "C" locale. */
+
+#include "pcre_printint.src"
+
+#define PRINTHEX(c) (locale_set? isprint(c) : PRINTABLE(c))
+
+
+/* It is possible to compile this test program without including support for
+testing the POSIX interface, though this is not available via the standard
+Makefile. */
+
+#if !defined NOPOSIX
+#include "pcreposix.h"
+#endif
+
+/* It is also possible, for the benefit of the version currently imported into
+Exim, to build pcretest without support for UTF8 (define NOUTF8), without the
+interface to the DFA matcher (NODFA), and without the doublecheck of the old
+"info" function (define NOINFOCHECK). In fact, we automatically cut out the
+UTF8 support if PCRE is built without it. */
+
+#ifndef SUPPORT_UTF8
+#ifndef NOUTF8
+#define NOUTF8
+#endif
+#endif
+
+
+/* Other parameters */
+
+#ifndef CLOCKS_PER_SEC
+#ifdef CLK_TCK
+#define CLOCKS_PER_SEC CLK_TCK
+#else
+#define CLOCKS_PER_SEC 100
+#endif
+#endif
+
+/* This is the default loop count for timing. */
+
+#define LOOPREPEAT 500000
+
+/* Static variables */
+
+static FILE *outfile;
+static int log_store = 0;
+static int callout_count;
+static int callout_extra;
+static int callout_fail_count;
+static int callout_fail_id;
+static int debug_lengths;
+static int first_callout;
+static int locale_set = 0;
+static int show_malloc;
+static int use_utf8;
+static size_t gotten_store;
+
+/* The buffers grow automatically if very long input lines are encountered. */
+
+static int buffer_size = 50000;
+static uschar *buffer = NULL;
+static uschar *dbuffer = NULL;
+static uschar *pbuffer = NULL;
+
+
+
+/*************************************************
+* Read or extend an input line *
+*************************************************/
+
+/* Input lines are read into buffer, but both patterns and data lines can be
+continued over multiple input lines. In addition, if the buffer fills up, we
+want to automatically expand it so as to be able to handle extremely large
+lines that are needed for certain stress tests. When the input buffer is
+expanded, the other two buffers must also be expanded likewise, and the
+contents of pbuffer, which are a copy of the input for callouts, must be
+preserved (for when expansion happens for a data line). This is not the most
+optimal way of handling this, but hey, this is just a test program!
+
+Arguments:
+ f the file to read
+ start where in buffer to start (this *must* be within buffer)
+
+Returns: pointer to the start of new data
+ could be a copy of start, or could be moved
+ NULL if no data read and EOF reached
+*/
+
+static uschar *
+extend_inputline(FILE *f, uschar *start)
+{
+uschar *here = start;
+
+for (;;)
+ {
+ int rlen = buffer_size - (here - buffer);
+
+ if (rlen > 1000)
+ {
+ int dlen;
+ if (fgets((char *)here, rlen, f) == NULL)
+ return (here == start)? NULL : start;
+ dlen = (int)strlen((char *)here);
+ if (dlen > 0 && here[dlen - 1] == '\n') return start;
+ here += dlen;
+ }
+
+ else
+ {
+ int new_buffer_size = 2*buffer_size;
+ uschar *new_buffer = (unsigned char *)malloc(new_buffer_size);
+ uschar *new_dbuffer = (unsigned char *)malloc(new_buffer_size);
+ uschar *new_pbuffer = (unsigned char *)malloc(new_buffer_size);
+
+ if (new_buffer == NULL || new_dbuffer == NULL || new_pbuffer == NULL)
+ {
+ fprintf(stderr, "pcretest: malloc(%d) failed\n", new_buffer_size);
+ exit(1);
+ }
+
+ memcpy(new_buffer, buffer, buffer_size);
+ memcpy(new_pbuffer, pbuffer, buffer_size);
+
+ buffer_size = new_buffer_size;
+
+ start = new_buffer + (start - buffer);
+ here = new_buffer + (here - buffer);
+
+ free(buffer);
+ free(dbuffer);
+ free(pbuffer);
+
+ buffer = new_buffer;
+ dbuffer = new_dbuffer;
+ pbuffer = new_pbuffer;
+ }
+ }
+
+return NULL; /* Control never gets here */
+}
+
+
+
+
+
+
+
+/*************************************************
+* Read number from string *
+*************************************************/
+
+/* We don't use strtoul() because SunOS4 doesn't have it. Rather than mess
+around with conditional compilation, just do the job by hand. It is only used
+for unpicking arguments, so just keep it simple.
+
+Arguments:
+ str string to be converted
+ endptr where to put the end pointer
+
+Returns: the unsigned long
+*/
+
+static int
+get_value(unsigned char *str, unsigned char **endptr)
+{
+int result = 0;
+while(*str != 0 && isspace(*str)) str++;
+while (isdigit(*str)) result = result * 10 + (int)(*str++ - '0');
+*endptr = str;
+return(result);
+}
+
+
+
+
+/*************************************************
+* Convert UTF-8 string to value *
+*************************************************/
+
+/* This function takes one or more bytes that represents a UTF-8 character,
+and returns the value of the character.
+
+Argument:
+ utf8bytes a pointer to the byte vector
+ vptr a pointer to an int to receive the value
+
+Returns: > 0 => the number of bytes consumed
+ -6 to 0 => malformed UTF-8 character at offset = (-return)
+*/
+
+#if !defined NOUTF8
+
+static int
+utf82ord(unsigned char *utf8bytes, int *vptr)
+{
+int c = *utf8bytes++;
+int d = c;
+int i, j, s;
+
+for (i = -1; i < 6; i++) /* i is number of additional bytes */
+ {
+ if ((d & 0x80) == 0) break;
+ d <<= 1;
+ }
+
+if (i == -1) { *vptr = c; return 1; } /* ascii character */
+if (i == 0 || i == 6) return 0; /* invalid UTF-8 */
+
+/* i now has a value in the range 1-5 */
+
+s = 6*i;
+d = (c & utf8_table3[i]) << s;
+
+for (j = 0; j < i; j++)
+ {
+ c = *utf8bytes++;
+ if ((c & 0xc0) != 0x80) return -(j+1);
+ s -= 6;
+ d |= (c & 0x3f) << s;
+ }
+
+/* Check that encoding was the correct unique one */
+
+for (j = 0; j < utf8_table1_size; j++)
+ if (d <= utf8_table1[j]) break;
+if (j != i) return -(i+1);
+
+/* Valid value */
+
+*vptr = d;
+return i+1;
+}
+
+#endif
+
+
+
+/*************************************************
+* Convert character value to UTF-8 *
+*************************************************/
+
+/* This function takes an integer value in the range 0 - 0x7fffffff
+and encodes it as a UTF-8 character in 0 to 6 bytes.
+
+Arguments:
+ cvalue the character value
+ utf8bytes pointer to buffer for result - at least 6 bytes long
+
+Returns: number of characters placed in the buffer
+*/
+
+#if !defined NOUTF8
+
+static int
+ord2utf8(int cvalue, uschar *utf8bytes)
+{
+register int i, j;
+for (i = 0; i < utf8_table1_size; i++)
+ if (cvalue <= utf8_table1[i]) break;
+utf8bytes += i;
+for (j = i; j > 0; j--)
+ {
+ *utf8bytes-- = 0x80 | (cvalue & 0x3f);
+ cvalue >>= 6;
+ }
+*utf8bytes = utf8_table2[i] | cvalue;
+return i + 1;
+}
+
+#endif
+
+
+
+/*************************************************
+* Print character string *
+*************************************************/
+
+/* Character string printing function. Must handle UTF-8 strings in utf8
+mode. Yields number of characters printed. If handed a NULL file, just counts
+chars without printing. */
+
+static int pchars(unsigned char *p, int length, FILE *f)
+{
+int c = 0;
+int yield = 0;
+
+while (length-- > 0)
+ {
+#if !defined NOUTF8
+ if (use_utf8)
+ {
+ int rc = utf82ord(p, &c);
+
+ if (rc > 0 && rc <= length + 1) /* Mustn't run over the end */
+ {
+ length -= rc - 1;
+ p += rc;
+ if (PRINTHEX(c))
+ {
+ if (f != NULL) fprintf(f, "%c", c);
+ yield++;
+ }
+ else
+ {
+ int n = 4;
+ if (f != NULL) fprintf(f, "\\x{%02x}", c);
+ yield += (n <= 0x000000ff)? 2 :
+ (n <= 0x00000fff)? 3 :
+ (n <= 0x0000ffff)? 4 :
+ (n <= 0x000fffff)? 5 : 6;
+ }
+ continue;
+ }
+ }
+#endif
+
+ /* Not UTF-8, or malformed UTF-8 */
+
+ c = *p++;
+ if (PRINTHEX(c))
+ {
+ if (f != NULL) fprintf(f, "%c", c);
+ yield++;
+ }
+ else
+ {
+ if (f != NULL) fprintf(f, "\\x%02x", c);
+ yield += 4;
+ }
+ }
+
+return yield;
+}
+
+
+
+/*************************************************
+* Callout function *
+*************************************************/
+
+/* Called from PCRE as a result of the (?C) item. We print out where we are in
+the match. Yield zero unless more callouts than the fail count, or the callout
+data is not zero. */
+
+static int callout(pcre_callout_block *cb)
+{
+FILE *f = (first_callout | callout_extra)? outfile : NULL;
+int i, pre_start, post_start, subject_length;
+
+if (callout_extra)
+ {
+ fprintf(f, "Callout %d: last capture = %d\n",
+ cb->callout_number, cb->capture_last);
+
+ for (i = 0; i < cb->capture_top * 2; i += 2)
+ {
+ if (cb->offset_vector[i] < 0)
+ fprintf(f, "%2d: <unset>\n", i/2);
+ else
+ {
+ fprintf(f, "%2d: ", i/2);
+ (void)pchars((unsigned char *)cb->subject + cb->offset_vector[i],
+ cb->offset_vector[i+1] - cb->offset_vector[i], f);
+ fprintf(f, "\n");
+ }
+ }
+ }
+
+/* Re-print the subject in canonical form, the first time or if giving full
+datails. On subsequent calls in the same match, we use pchars just to find the
+printed lengths of the substrings. */
+
+if (f != NULL) fprintf(f, "--->");
+
+pre_start = pchars((unsigned char *)cb->subject, cb->start_match, f);
+post_start = pchars((unsigned char *)(cb->subject + cb->start_match),
+ cb->current_position - cb->start_match, f);
+
+subject_length = pchars((unsigned char *)cb->subject, cb->subject_length, NULL);
+
+(void)pchars((unsigned char *)(cb->subject + cb->current_position),
+ cb->subject_length - cb->current_position, f);
+
+if (f != NULL) fprintf(f, "\n");
+
+/* Always print appropriate indicators, with callout number if not already
+shown. For automatic callouts, show the pattern offset. */
+
+if (cb->callout_number == 255)
+ {
+ fprintf(outfile, "%+3d ", cb->pattern_position);
+ if (cb->pattern_position > 99) fprintf(outfile, "\n ");
+ }
+else
+ {
+ if (callout_extra) fprintf(outfile, " ");
+ else fprintf(outfile, "%3d ", cb->callout_number);
+ }
+
+for (i = 0; i < pre_start; i++) fprintf(outfile, " ");
+fprintf(outfile, "^");
+
+if (post_start > 0)
+ {
+ for (i = 0; i < post_start - 1; i++) fprintf(outfile, " ");
+ fprintf(outfile, "^");
+ }
+
+for (i = 0; i < subject_length - pre_start - post_start + 4; i++)
+ fprintf(outfile, " ");
+
+fprintf(outfile, "%.*s", (cb->next_item_length == 0)? 1 : cb->next_item_length,
+ pbuffer + cb->pattern_position);
+
+fprintf(outfile, "\n");
+first_callout = 0;
+
+if (cb->callout_data != NULL)
+ {
+ int callout_data = *((int *)(cb->callout_data));
+ if (callout_data != 0)
+ {
+ fprintf(outfile, "Callout data = %d\n", callout_data);
+ return callout_data;
+ }
+ }
+
+return (cb->callout_number != callout_fail_id)? 0 :
+ (++callout_count >= callout_fail_count)? 1 : 0;
+}
+
+
+/*************************************************
+* Local malloc functions *
+*************************************************/
+
+/* Alternative malloc function, to test functionality and show the size of the
+compiled re. */
+
+static void *new_malloc(size_t size)
+{
+void *block = malloc(size);
+gotten_store = size;
+if (show_malloc)
+ fprintf(outfile, "malloc %3d %p\n", (int)size, block);
+return block;
+}
+
+static void new_free(void *block)
+{
+if (show_malloc)
+ fprintf(outfile, "free %p\n", block);
+free(block);
+}
+
+
+/* For recursion malloc/free, to test stacking calls */
+
+static void *stack_malloc(size_t size)
+{
+void *block = malloc(size);
+if (show_malloc)
+ fprintf(outfile, "stack_malloc %3d %p\n", (int)size, block);
+return block;
+}
+
+static void stack_free(void *block)
+{
+if (show_malloc)
+ fprintf(outfile, "stack_free %p\n", block);
+free(block);
+}
+
+
+/*************************************************
+* Call pcre_fullinfo() *
+*************************************************/
+
+/* Get one piece of information from the pcre_fullinfo() function */
+
+static void new_info(pcre *re, pcre_extra *study, int option, void *ptr)
+{
+int rc;
+if ((rc = pcre_fullinfo(re, study, option, ptr)) < 0)
+ fprintf(outfile, "Error %d from pcre_fullinfo(%d)\n", rc, option);
+}
+
+
+
+/*************************************************
+* Byte flipping function *
+*************************************************/
+
+static unsigned long int
+byteflip(unsigned long int value, int n)
+{
+if (n == 2) return ((value & 0x00ff) << 8) | ((value & 0xff00) >> 8);
+return ((value & 0x000000ff) << 24) |
+ ((value & 0x0000ff00) << 8) |
+ ((value & 0x00ff0000) >> 8) |
+ ((value & 0xff000000) >> 24);
+}
+
+
+
+
+/*************************************************
+* Check match or recursion limit *
+*************************************************/
+
+static int
+check_match_limit(pcre *re, pcre_extra *extra, uschar *bptr, int len,
+ int start_offset, int options, int *use_offsets, int use_size_offsets,
+ int flag, unsigned long int *limit, int errnumber, const char *msg)
+{
+int count;
+int min = 0;
+int mid = 64;
+int max = -1;
+
+extra->flags |= flag;
+
+for (;;)
+ {
+ *limit = mid;
+
+ count = pcre_exec(re, extra, (char *)bptr, len, start_offset, options,
+ use_offsets, use_size_offsets);
+
+ if (count == errnumber)
+ {
+ /* fprintf(outfile, "Testing %s limit = %d\n", msg, mid); */
+ min = mid;
+ mid = (mid == max - 1)? max : (max > 0)? (min + max)/2 : mid*2;
+ }
+
+ else if (count >= 0 || count == PCRE_ERROR_NOMATCH ||
+ count == PCRE_ERROR_PARTIAL)
+ {
+ if (mid == min + 1)
+ {
+ fprintf(outfile, "Minimum %s limit = %d\n", msg, mid);
+ break;
+ }
+ /* fprintf(outfile, "Testing %s limit = %d\n", msg, mid); */
+ max = mid;
+ mid = (min + mid)/2;
+ }
+ else break; /* Some other error */
+ }
+
+extra->flags &= ~flag;
+return count;
+}
+
+
+
+/*************************************************
+* Case-independent strncmp() function *
+*************************************************/
+
+/*
+Arguments:
+ s first string
+ t second string
+ n number of characters to compare
+
+Returns: < 0, = 0, or > 0, according to the comparison
+*/
+
+static int
+strncmpic(uschar *s, uschar *t, int n)
+{
+while (n--)
+ {
+ int c = tolower(*s++) - tolower(*t++);
+ if (c) return c;
+ }
+return 0;
+}
+
+
+
+/*************************************************
+* Check newline indicator *
+*************************************************/
+
+/* This is used both at compile and run-time to check for <xxx> escapes, where
+xxx is LF, CR, CRLF, ANYCRLF, or ANY. Print a message and return 0 if there is
+no match.
+
+Arguments:
+ p points after the leading '<'
+ f file for error message
+
+Returns: appropriate PCRE_NEWLINE_xxx flags, or 0
+*/
+
+static int
+check_newline(uschar *p, FILE *f)
+{
+if (strncmpic(p, (uschar *)"cr>", 3) == 0) return PCRE_NEWLINE_CR;
+if (strncmpic(p, (uschar *)"lf>", 3) == 0) return PCRE_NEWLINE_LF;
+if (strncmpic(p, (uschar *)"crlf>", 5) == 0) return PCRE_NEWLINE_CRLF;
+if (strncmpic(p, (uschar *)"anycrlf>", 8) == 0) return PCRE_NEWLINE_ANYCRLF;
+if (strncmpic(p, (uschar *)"any>", 4) == 0) return PCRE_NEWLINE_ANY;
+if (strncmpic(p, (uschar *)"bsr_anycrlf>", 12) == 0) return PCRE_BSR_ANYCRLF;
+if (strncmpic(p, (uschar *)"bsr_unicode>", 12) == 0) return PCRE_BSR_UNICODE;
+fprintf(f, "Unknown newline type at: <%s\n", p);
+return 0;
+}
+
+
+
+/*************************************************
+* Usage function *
+*************************************************/
+
+static void
+usage(void)
+{
+printf("Usage: pcretest [options] [<input> [<output>]]\n");
+printf(" -b show compiled code (bytecode)\n");
+printf(" -C show PCRE compile-time options and exit\n");
+printf(" -d debug: show compiled code and information (-b and -i)\n");
+#if !defined NODFA
+printf(" -dfa force DFA matching for all subjects\n");
+#endif
+printf(" -help show usage information\n");
+printf(" -i show information about compiled patterns\n"
+ " -m output memory used information\n"
+ " -o <n> set size of offsets vector to <n>\n");
+#if !defined NOPOSIX
+printf(" -p use POSIX interface\n");
+#endif
+printf(" -q quiet: do not output PCRE version number at start\n");
+printf(" -S <n> set stack size to <n> megabytes\n");
+printf(" -s output store (memory) used information\n"
+ " -t time compilation and execution\n");
+printf(" -t <n> time compilation and execution, repeating <n> times\n");
+printf(" -tm time execution (matching) only\n");
+printf(" -tm <n> time execution (matching) only, repeating <n> times\n");
+}
+
+
+
+/*************************************************
+* Main Program *
+*************************************************/
+
+/* Read lines from named file or stdin and write to named file or stdout; lines
+consist of a regular expression, in delimiters and optionally followed by
+options, followed by a set of test data, terminated by an empty line. */
+
+int main(int argc, char **argv)
+{
+FILE *infile = stdin;
+int options = 0;
+int study_options = 0;
+int op = 1;
+int timeit = 0;
+int timeitm = 0;
+int showinfo = 0;
+int showstore = 0;
+int quiet = 0;
+int size_offsets = 45;
+int size_offsets_max;
+int *offsets = NULL;
+#if !defined NOPOSIX
+int posix = 0;
+#endif
+int debug = 0;
+int done = 0;
+int all_use_dfa = 0;
+int yield = 0;
+int stack_size;
+
+/* These vectors store, end-to-end, a list of captured substring names. Assume
+that 1024 is plenty long enough for the few names we'll be testing. */
+
+uschar copynames[1024];
+uschar getnames[1024];
+
+uschar *copynamesptr;
+uschar *getnamesptr;
+
+/* Get buffers from malloc() so that Electric Fence will check their misuse
+when I am debugging. They grow automatically when very long lines are read. */
+
+buffer = (unsigned char *)malloc(buffer_size);
+dbuffer = (unsigned char *)malloc(buffer_size);
+pbuffer = (unsigned char *)malloc(buffer_size);
+
+/* The outfile variable is static so that new_malloc can use it. */
+
+outfile = stdout;
+
+/* The following _setmode() stuff is some Windows magic that tells its runtime
+library to translate CRLF into a single LF character. At least, that's what
+I've been told: never having used Windows I take this all on trust. Originally
+it set 0x8000, but then I was advised that _O_BINARY was better. */
+
+#if defined(_WIN32) || defined(WIN32)
+_setmode( _fileno( stdout ), _O_BINARY );
+#endif
+
+/* Scan options */
+
+while (argc > 1 && argv[op][0] == '-')
+ {
+ unsigned char *endptr;
+
+ if (strcmp(argv[op], "-s") == 0 || strcmp(argv[op], "-m") == 0)
+ showstore = 1;
+ else if (strcmp(argv[op], "-q") == 0) quiet = 1;
+ else if (strcmp(argv[op], "-b") == 0) debug = 1;
+ else if (strcmp(argv[op], "-i") == 0) showinfo = 1;
+ else if (strcmp(argv[op], "-d") == 0) showinfo = debug = 1;
+#if !defined NODFA
+ else if (strcmp(argv[op], "-dfa") == 0) all_use_dfa = 1;
+#endif
+ else if (strcmp(argv[op], "-o") == 0 && argc > 2 &&
+ ((size_offsets = get_value((unsigned char *)argv[op+1], &endptr)),
+ *endptr == 0))
+ {
+ op++;
+ argc--;
+ }
+ else if (strcmp(argv[op], "-t") == 0 || strcmp(argv[op], "-tm") == 0)
+ {
+ int both = argv[op][2] == 0;
+ int temp;
+ if (argc > 2 && (temp = get_value((unsigned char *)argv[op+1], &endptr),
+ *endptr == 0))
+ {
+ timeitm = temp;
+ op++;
+ argc--;
+ }
+ else timeitm = LOOPREPEAT;
+ if (both) timeit = timeitm;
+ }
+ else if (strcmp(argv[op], "-S") == 0 && argc > 2 &&
+ ((stack_size = get_value((unsigned char *)argv[op+1], &endptr)),
+ *endptr == 0))
+ {
+#if defined(_WIN32) || defined(WIN32)
+ printf("PCRE: -S not supported on this OS\n");
+ exit(1);
+#else
+ int rc;
+ struct rlimit rlim;
+ getrlimit(RLIMIT_STACK, &rlim);
+ rlim.rlim_cur = stack_size * 1024 * 1024;
+ rc = setrlimit(RLIMIT_STACK, &rlim);
+ if (rc != 0)
+ {
+ printf("PCRE: setrlimit() failed with error %d\n", rc);
+ exit(1);
+ }
+ op++;
+ argc--;
+#endif
+ }
+#if !defined NOPOSIX
+ else if (strcmp(argv[op], "-p") == 0) posix = 1;
+#endif
+ else if (strcmp(argv[op], "-C") == 0)
+ {
+ int rc;
+ printf("PCRE version %s\n", pcre_version());
+ printf("Compiled with\n");
+ (void)pcre_config(PCRE_CONFIG_UTF8, &rc);
+ printf(" %sUTF-8 support\n", rc? "" : "No ");
+ (void)pcre_config(PCRE_CONFIG_UNICODE_PROPERTIES, &rc);
+ printf(" %sUnicode properties support\n", rc? "" : "No ");
+ (void)pcre_config(PCRE_CONFIG_NEWLINE, &rc);
+ printf(" Newline sequence is %s\n", (rc == '\r')? "CR" :
+ (rc == '\n')? "LF" : (rc == ('\r'<<8 | '\n'))? "CRLF" :
+ (rc == -2)? "ANYCRLF" :
+ (rc == -1)? "ANY" : "???");
+ (void)pcre_config(PCRE_CONFIG_BSR, &rc);
+ printf(" \\R matches %s\n", rc? "CR, LF, or CRLF only" :
+ "all Unicode newlines");
+ (void)pcre_config(PCRE_CONFIG_LINK_SIZE, &rc);
+ printf(" Internal link size = %d\n", rc);
+ (void)pcre_config(PCRE_CONFIG_POSIX_MALLOC_THRESHOLD, &rc);
+ printf(" POSIX malloc threshold = %d\n", rc);
+ (void)pcre_config(PCRE_CONFIG_MATCH_LIMIT, &rc);
+ printf(" Default match limit = %d\n", rc);
+ (void)pcre_config(PCRE_CONFIG_MATCH_LIMIT_RECURSION, &rc);
+ printf(" Default recursion depth limit = %d\n", rc);
+ (void)pcre_config(PCRE_CONFIG_STACKRECURSE, &rc);
+ printf(" Match recursion uses %s\n", rc? "stack" : "heap");
+ goto EXIT;
+ }
+ else if (strcmp(argv[op], "-help") == 0 ||
+ strcmp(argv[op], "--help") == 0)
+ {
+ usage();
+ goto EXIT;
+ }
+ else
+ {
+ printf("** Unknown or malformed option %s\n", argv[op]);
+ usage();
+ yield = 1;
+ goto EXIT;
+ }
+ op++;
+ argc--;
+ }
+
+/* Get the store for the offsets vector, and remember what it was */
+
+size_offsets_max = size_offsets;
+offsets = (int *)malloc(size_offsets_max * sizeof(int));
+if (offsets == NULL)
+ {
+ printf("** Failed to get %d bytes of memory for offsets vector\n",
+ (int)(size_offsets_max * sizeof(int)));
+ yield = 1;
+ goto EXIT;
+ }
+
+/* Sort out the input and output files */
+
+if (argc > 1)
+ {
+ infile = fopen(argv[op], INPUT_MODE);
+ if (infile == NULL)
+ {
+ printf("** Failed to open %s\n", argv[op]);
+ yield = 1;
+ goto EXIT;
+ }
+ }
+
+if (argc > 2)
+ {
+ outfile = fopen(argv[op+1], OUTPUT_MODE);
+ if (outfile == NULL)
+ {
+ printf("** Failed to open %s\n", argv[op+1]);
+ yield = 1;
+ goto EXIT;
+ }
+ }
+
+/* Set alternative malloc function */
+
+pcre_malloc = new_malloc;
+pcre_free = new_free;
+pcre_stack_malloc = stack_malloc;
+pcre_stack_free = stack_free;
+
+/* Heading line unless quiet, then prompt for first regex if stdin */
+
+if (!quiet) fprintf(outfile, "PCRE version %s\n\n", pcre_version());
+
+/* Main loop */
+
+while (!done)
+ {
+ pcre *re = NULL;
+ pcre_extra *extra = NULL;
+
+#if !defined NOPOSIX /* There are still compilers that require no indent */
+ regex_t preg;
+ int do_posix = 0;
+#endif
+
+ const char *error;
+ unsigned char *p, *pp, *ppp;
+ unsigned char *to_file = NULL;
+ const unsigned char *tables = NULL;
+ unsigned long int true_size, true_study_size = 0;
+ size_t size, regex_gotten_store;
+ int do_study = 0;
+ int do_debug = debug;
+ int do_G = 0;
+ int do_g = 0;
+ int do_showinfo = showinfo;
+ int do_showrest = 0;
+ int do_flip = 0;
+ int erroroffset, len, delimiter, poffset;
+
+ use_utf8 = 0;
+ debug_lengths = 1;
+
+ if (infile == stdin) printf(" re> ");
+ if (extend_inputline(infile, buffer) == NULL) break;
+ if (infile != stdin) fprintf(outfile, "%s", (char *)buffer);
+ fflush(outfile);
+
+ p = buffer;
+ while (isspace(*p)) p++;
+ if (*p == 0) continue;
+
+ /* See if the pattern is to be loaded pre-compiled from a file. */
+
+ if (*p == '<' && strchr((char *)(p+1), '<') == NULL)
+ {
+ unsigned long int magic, get_options;
+ uschar sbuf[8];
+ FILE *f;
+
+ p++;
+ pp = p + (int)strlen((char *)p);
+ while (isspace(pp[-1])) pp--;
+ *pp = 0;
+
+ f = fopen((char *)p, "rb");
+ if (f == NULL)
+ {
+ fprintf(outfile, "Failed to open %s: %s\n", p, strerror(errno));
+ continue;
+ }
+
+ if (fread(sbuf, 1, 8, f) != 8) goto FAIL_READ;
+
+ true_size =
+ (sbuf[0] << 24) | (sbuf[1] << 16) | (sbuf[2] << 8) | sbuf[3];
+ true_study_size =
+ (sbuf[4] << 24) | (sbuf[5] << 16) | (sbuf[6] << 8) | sbuf[7];
+
+ re = (real_pcre *)new_malloc(true_size);
+ regex_gotten_store = gotten_store;
+
+ if (fread(re, 1, true_size, f) != true_size) goto FAIL_READ;
+
+ magic = ((real_pcre *)re)->magic_number;
+ if (magic != MAGIC_NUMBER)
+ {
+ if (byteflip(magic, sizeof(magic)) == MAGIC_NUMBER)
+ {
+ do_flip = 1;
+ }
+ else
+ {
+ fprintf(outfile, "Data in %s is not a compiled PCRE regex\n", p);
+ fclose(f);
+ continue;
+ }
+ }
+
+ fprintf(outfile, "Compiled regex%s loaded from %s\n",
+ do_flip? " (byte-inverted)" : "", p);
+
+ /* Need to know if UTF-8 for printing data strings */
+
+ new_info(re, NULL, PCRE_INFO_OPTIONS, &get_options);
+ use_utf8 = (get_options & PCRE_UTF8) != 0;
+
+ /* Now see if there is any following study data */
+
+ if (true_study_size != 0)
+ {
+ pcre_study_data *psd;
+
+ extra = (pcre_extra *)new_malloc(sizeof(pcre_extra) + true_study_size);
+ extra->flags = PCRE_EXTRA_STUDY_DATA;
+
+ psd = (pcre_study_data *)(((char *)extra) + sizeof(pcre_extra));
+ extra->study_data = psd;
+
+ if (fread(psd, 1, true_study_size, f) != true_study_size)
+ {
+ FAIL_READ:
+ fprintf(outfile, "Failed to read data from %s\n", p);
+ if (extra != NULL) new_free(extra);
+ if (re != NULL) new_free(re);
+ fclose(f);
+ continue;
+ }
+ fprintf(outfile, "Study data loaded from %s\n", p);
+ do_study = 1; /* To get the data output if requested */
+ }
+ else fprintf(outfile, "No study data\n");
+
+ fclose(f);
+ goto SHOW_INFO;
+ }
+
+ /* In-line pattern (the usual case). Get the delimiter and seek the end of
+ the pattern; if is isn't complete, read more. */
+
+ delimiter = *p++;
+
+ if (isalnum(delimiter) || delimiter == '\\')
+ {
+ fprintf(outfile, "** Delimiter must not be alphameric or \\\n");
+ goto SKIP_DATA;
+ }
+
+ pp = p;
+ poffset = p - buffer;
+
+ for(;;)
+ {
+ while (*pp != 0)
+ {
+ if (*pp == '\\' && pp[1] != 0) pp++;
+ else if (*pp == delimiter) break;
+ pp++;
+ }
+ if (*pp != 0) break;
+ if (infile == stdin) printf(" > ");
+ if ((pp = extend_inputline(infile, pp)) == NULL)
+ {
+ fprintf(outfile, "** Unexpected EOF\n");
+ done = 1;
+ goto CONTINUE;
+ }
+ if (infile != stdin) fprintf(outfile, "%s", (char *)pp);
+ }
+
+ /* The buffer may have moved while being extended; reset the start of data
+ pointer to the correct relative point in the buffer. */
+
+ p = buffer + poffset;
+
+ /* If the first character after the delimiter is backslash, make
+ the pattern end with backslash. This is purely to provide a way
+ of testing for the error message when a pattern ends with backslash. */
+
+ if (pp[1] == '\\') *pp++ = '\\';
+
+ /* Terminate the pattern at the delimiter, and save a copy of the pattern
+ for callouts. */
+
+ *pp++ = 0;
+ strcpy((char *)pbuffer, (char *)p);
+
+ /* Look for options after final delimiter */
+
+ options = 0;
+ study_options = 0;
+ log_store = showstore; /* default from command line */
+
+ while (*pp != 0)
+ {
+ switch (*pp++)
+ {
+ case 'f': options |= PCRE_FIRSTLINE; break;
+ case 'g': do_g = 1; break;
+ case 'i': options |= PCRE_CASELESS; break;
+ case 'm': options |= PCRE_MULTILINE; break;
+ case 's': options |= PCRE_DOTALL; break;
+ case 'x': options |= PCRE_EXTENDED; break;
+
+ case '+': do_showrest = 1; break;
+ case 'A': options |= PCRE_ANCHORED; break;
+ case 'B': do_debug = 1; break;
+ case 'C': options |= PCRE_AUTO_CALLOUT; break;
+ case 'D': do_debug = do_showinfo = 1; break;
+ case 'E': options |= PCRE_DOLLAR_ENDONLY; break;
+ case 'F': do_flip = 1; break;
+ case 'G': do_G = 1; break;
+ case 'I': do_showinfo = 1; break;
+ case 'J': options |= PCRE_DUPNAMES; break;
+ case 'M': log_store = 1; break;
+ case 'N': options |= PCRE_NO_AUTO_CAPTURE; break;
+
+#if !defined NOPOSIX
+ case 'P': do_posix = 1; break;
+#endif
+
+ case 'S': do_study = 1; break;
+ case 'U': options |= PCRE_UNGREEDY; break;
+ case 'X': options |= PCRE_EXTRA; break;
+ case 'Z': debug_lengths = 0; break;
+ case '8': options |= PCRE_UTF8; use_utf8 = 1; break;
+ case '?': options |= PCRE_NO_UTF8_CHECK; break;
+
+ case 'L':
+ ppp = pp;
+ /* The '\r' test here is so that it works on Windows. */
+ /* The '0' test is just in case this is an unterminated line. */
+ while (*ppp != 0 && *ppp != '\n' && *ppp != '\r' && *ppp != ' ') ppp++;
+ *ppp = 0;
+ if (setlocale(LC_CTYPE, (const char *)pp) == NULL)
+ {
+ fprintf(outfile, "** Failed to set locale \"%s\"\n", pp);
+ goto SKIP_DATA;
+ }
+ locale_set = 1;
+ tables = pcre_maketables();
+ pp = ppp;
+ break;
+
+ case '>':
+ to_file = pp;
+ while (*pp != 0) pp++;
+ while (isspace(pp[-1])) pp--;
+ *pp = 0;
+ break;
+
+ case '<':
+ {
+ int x = check_newline(pp, outfile);
+ if (x == 0) goto SKIP_DATA;
+ options |= x;
+ while (*pp++ != '>');
+ }
+ break;
+
+ case '\r': /* So that it works in Windows */
+ case '\n':
+ case ' ':
+ break;
+
+ default:
+ fprintf(outfile, "** Unknown option '%c'\n", pp[-1]);
+ goto SKIP_DATA;
+ }
+ }
+
+ /* Handle compiling via the POSIX interface, which doesn't support the
+ timing, showing, or debugging options, nor the ability to pass over
+ local character tables. */
+
+#if !defined NOPOSIX
+ if (posix || do_posix)
+ {
+ int rc;
+ int cflags = 0;
+
+ if ((options & PCRE_CASELESS) != 0) cflags |= REG_ICASE;
+ if ((options & PCRE_MULTILINE) != 0) cflags |= REG_NEWLINE;
+ if ((options & PCRE_DOTALL) != 0) cflags |= REG_DOTALL;
+ if ((options & PCRE_NO_AUTO_CAPTURE) != 0) cflags |= REG_NOSUB;
+ if ((options & PCRE_UTF8) != 0) cflags |= REG_UTF8;
+
+ rc = regcomp(&preg, (char *)p, cflags);
+
+ /* Compilation failed; go back for another re, skipping to blank line
+ if non-interactive. */
+
+ if (rc != 0)
+ {
+ (void)regerror(rc, &preg, (char *)buffer, buffer_size);
+ fprintf(outfile, "Failed: POSIX code %d: %s\n", rc, buffer);
+ goto SKIP_DATA;
+ }
+ }
+
+ /* Handle compiling via the native interface */
+
+ else
+#endif /* !defined NOPOSIX */
+
+ {
+ if (timeit > 0)
+ {
+ register int i;
+ clock_t time_taken;
+ clock_t start_time = clock();
+ for (i = 0; i < timeit; i++)
+ {
+ re = pcre_compile((char *)p, options, &error, &erroroffset, tables);
+ if (re != NULL) free(re);
+ }
+ time_taken = clock() - start_time;
+ fprintf(outfile, "Compile time %.4f milliseconds\n",
+ (((double)time_taken * 1000.0) / (double)timeit) /
+ (double)CLOCKS_PER_SEC);
+ }
+
+ re = pcre_compile((char *)p, options, &error, &erroroffset, tables);
+
+ /* Compilation failed; go back for another re, skipping to blank line
+ if non-interactive. */
+
+ if (re == NULL)
+ {
+ fprintf(outfile, "Failed: %s at offset %d\n", error, erroroffset);
+ SKIP_DATA:
+ if (infile != stdin)
+ {
+ for (;;)
+ {
+ if (extend_inputline(infile, buffer) == NULL)
+ {
+ done = 1;
+ goto CONTINUE;
+ }
+ len = (int)strlen((char *)buffer);
+ while (len > 0 && isspace(buffer[len-1])) len--;
+ if (len == 0) break;
+ }
+ fprintf(outfile, "\n");
+ }
+ goto CONTINUE;
+ }
+
+ /* Compilation succeeded; print data if required. There are now two
+ info-returning functions. The old one has a limited interface and
+ returns only limited data. Check that it agrees with the newer one. */
+
+ if (log_store)
+ fprintf(outfile, "Memory allocation (code space): %d\n",
+ (int)(gotten_store -
+ sizeof(real_pcre) -
+ ((real_pcre *)re)->name_count * ((real_pcre *)re)->name_entry_size));
+
+ /* Extract the size for possible writing before possibly flipping it,
+ and remember the store that was got. */
+
+ true_size = ((real_pcre *)re)->size;
+ regex_gotten_store = gotten_store;
+
+ /* If /S was present, study the regexp to generate additional info to
+ help with the matching. */
+
+ if (do_study)
+ {
+ if (timeit > 0)
+ {
+ register int i;
+ clock_t time_taken;
+ clock_t start_time = clock();
+ for (i = 0; i < timeit; i++)
+ extra = pcre_study(re, study_options, &error);
+ time_taken = clock() - start_time;
+ if (extra != NULL) free(extra);
+ fprintf(outfile, " Study time %.4f milliseconds\n",
+ (((double)time_taken * 1000.0) / (double)timeit) /
+ (double)CLOCKS_PER_SEC);
+ }
+ extra = pcre_study(re, study_options, &error);
+ if (error != NULL)
+ fprintf(outfile, "Failed to study: %s\n", error);
+ else if (extra != NULL)
+ true_study_size = ((pcre_study_data *)(extra->study_data))->size;
+ }
+
+ /* If the 'F' option was present, we flip the bytes of all the integer
+ fields in the regex data block and the study block. This is to make it
+ possible to test PCRE's handling of byte-flipped patterns, e.g. those
+ compiled on a different architecture. */
+
+ if (do_flip)
+ {
+ real_pcre *rre = (real_pcre *)re;
+ rre->magic_number =
+ byteflip(rre->magic_number, sizeof(rre->magic_number));
+ rre->size = byteflip(rre->size, sizeof(rre->size));
+ rre->options = byteflip(rre->options, sizeof(rre->options));
+ rre->flags = (pcre_uint16)byteflip(rre->flags, sizeof(rre->flags));
+ rre->top_bracket =
+ (pcre_uint16)byteflip(rre->top_bracket, sizeof(rre->top_bracket));
+ rre->top_backref =
+ (pcre_uint16)byteflip(rre->top_backref, sizeof(rre->top_backref));
+ rre->first_byte =
+ (pcre_uint16)byteflip(rre->first_byte, sizeof(rre->first_byte));
+ rre->req_byte =
+ (pcre_uint16)byteflip(rre->req_byte, sizeof(rre->req_byte));
+ rre->name_table_offset = (pcre_uint16)byteflip(rre->name_table_offset,
+ sizeof(rre->name_table_offset));
+ rre->name_entry_size = (pcre_uint16)byteflip(rre->name_entry_size,
+ sizeof(rre->name_entry_size));
+ rre->name_count = (pcre_uint16)byteflip(rre->name_count,
+ sizeof(rre->name_count));
+
+ if (extra != NULL)
+ {
+ pcre_study_data *rsd = (pcre_study_data *)(extra->study_data);
+ rsd->size = byteflip(rsd->size, sizeof(rsd->size));
+ rsd->options = byteflip(rsd->options, sizeof(rsd->options));
+ }
+ }
+
+ /* Extract information from the compiled data if required */
+
+ SHOW_INFO:
+
+ if (do_debug)
+ {
+ fprintf(outfile, "------------------------------------------------------------------\n");
+ pcre_printint(re, outfile, debug_lengths);
+ }
+
+ if (do_showinfo)
+ {
+ unsigned long int get_options, all_options;
+#if !defined NOINFOCHECK
+ int old_first_char, old_options, old_count;
+#endif
+ int count, backrefmax, first_char, need_char, okpartial, jchanged,
+ hascrorlf;
+ int nameentrysize, namecount;
+ const uschar *nametable;
+
+ new_info(re, NULL, PCRE_INFO_OPTIONS, &get_options);
+ new_info(re, NULL, PCRE_INFO_SIZE, &size);
+ new_info(re, NULL, PCRE_INFO_CAPTURECOUNT, &count);
+ new_info(re, NULL, PCRE_INFO_BACKREFMAX, &backrefmax);
+ new_info(re, NULL, PCRE_INFO_FIRSTBYTE, &first_char);
+ new_info(re, NULL, PCRE_INFO_LASTLITERAL, &need_char);
+ new_info(re, NULL, PCRE_INFO_NAMEENTRYSIZE, &nameentrysize);
+ new_info(re, NULL, PCRE_INFO_NAMECOUNT, &namecount);
+ new_info(re, NULL, PCRE_INFO_NAMETABLE, (void *)&nametable);
+ new_info(re, NULL, PCRE_INFO_OKPARTIAL, &okpartial);
+ new_info(re, NULL, PCRE_INFO_JCHANGED, &jchanged);
+ new_info(re, NULL, PCRE_INFO_HASCRORLF, &hascrorlf);
+
+#if !defined NOINFOCHECK
+ old_count = pcre_info(re, &old_options, &old_first_char);
+ if (count < 0) fprintf(outfile,
+ "Error %d from pcre_info()\n", count);
+ else
+ {
+ if (old_count != count) fprintf(outfile,
+ "Count disagreement: pcre_fullinfo=%d pcre_info=%d\n", count,
+ old_count);
+
+ if (old_first_char != first_char) fprintf(outfile,
+ "First char disagreement: pcre_fullinfo=%d pcre_info=%d\n",
+ first_char, old_first_char);
+
+ if (old_options != (int)get_options) fprintf(outfile,
+ "Options disagreement: pcre_fullinfo=%ld pcre_info=%d\n",
+ get_options, old_options);
+ }
+#endif
+
+ if (size != regex_gotten_store) fprintf(outfile,
+ "Size disagreement: pcre_fullinfo=%d call to malloc for %d\n",
+ (int)size, (int)regex_gotten_store);
+
+ fprintf(outfile, "Capturing subpattern count = %d\n", count);
+ if (backrefmax > 0)
+ fprintf(outfile, "Max back reference = %d\n", backrefmax);
+
+ if (namecount > 0)
+ {
+ fprintf(outfile, "Named capturing subpatterns:\n");
+ while (namecount-- > 0)
+ {
+ fprintf(outfile, " %s %*s%3d\n", nametable + 2,
+ nameentrysize - 3 - (int)strlen((char *)nametable + 2), "",
+ GET2(nametable, 0));
+ nametable += nameentrysize;
+ }
+ }
+
+ if (!okpartial) fprintf(outfile, "Partial matching not supported\n");
+ if (hascrorlf) fprintf(outfile, "Contains explicit CR or LF match\n");
+
+ all_options = ((real_pcre *)re)->options;
+ if (do_flip) all_options = byteflip(all_options, sizeof(all_options));
+
+ if (get_options == 0) fprintf(outfile, "No options\n");
+ else fprintf(outfile, "Options:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ ((get_options & PCRE_ANCHORED) != 0)? " anchored" : "",
+ ((get_options & PCRE_CASELESS) != 0)? " caseless" : "",
+ ((get_options & PCRE_EXTENDED) != 0)? " extended" : "",
+ ((get_options & PCRE_MULTILINE) != 0)? " multiline" : "",
+ ((get_options & PCRE_FIRSTLINE) != 0)? " firstline" : "",
+ ((get_options & PCRE_DOTALL) != 0)? " dotall" : "",
+ ((get_options & PCRE_BSR_ANYCRLF) != 0)? " bsr_anycrlf" : "",
+ ((get_options & PCRE_BSR_UNICODE) != 0)? " bsr_unicode" : "",
+ ((get_options & PCRE_DOLLAR_ENDONLY) != 0)? " dollar_endonly" : "",
+ ((get_options & PCRE_EXTRA) != 0)? " extra" : "",
+ ((get_options & PCRE_UNGREEDY) != 0)? " ungreedy" : "",
+ ((get_options & PCRE_NO_AUTO_CAPTURE) != 0)? " no_auto_capture" : "",
+ ((get_options & PCRE_UTF8) != 0)? " utf8" : "",
+ ((get_options & PCRE_NO_UTF8_CHECK) != 0)? " no_utf8_check" : "",
+ ((get_options & PCRE_DUPNAMES) != 0)? " dupnames" : "");
+
+ if (jchanged) fprintf(outfile, "Duplicate name status changes\n");
+
+ switch (get_options & PCRE_NEWLINE_BITS)
+ {
+ case PCRE_NEWLINE_CR:
+ fprintf(outfile, "Forced newline sequence: CR\n");
+ break;
+
+ case PCRE_NEWLINE_LF:
+ fprintf(outfile, "Forced newline sequence: LF\n");
+ break;
+
+ case PCRE_NEWLINE_CRLF:
+ fprintf(outfile, "Forced newline sequence: CRLF\n");
+ break;
+
+ case PCRE_NEWLINE_ANYCRLF:
+ fprintf(outfile, "Forced newline sequence: ANYCRLF\n");
+ break;
+
+ case PCRE_NEWLINE_ANY:
+ fprintf(outfile, "Forced newline sequence: ANY\n");
+ break;
+
+ default:
+ break;
+ }
+
+ if (first_char == -1)
+ {
+ fprintf(outfile, "First char at start or follows newline\n");
+ }
+ else if (first_char < 0)
+ {
+ fprintf(outfile, "No first char\n");
+ }
+ else
+ {
+ int ch = first_char & 255;
+ const char *caseless = ((first_char & REQ_CASELESS) == 0)?
+ "" : " (caseless)";
+ if (PRINTHEX(ch))
+ fprintf(outfile, "First char = \'%c\'%s\n", ch, caseless);
+ else
+ fprintf(outfile, "First char = %d%s\n", ch, caseless);
+ }
+
+ if (need_char < 0)
+ {
+ fprintf(outfile, "No need char\n");
+ }
+ else
+ {
+ int ch = need_char & 255;
+ const char *caseless = ((need_char & REQ_CASELESS) == 0)?
+ "" : " (caseless)";
+ if (PRINTHEX(ch))
+ fprintf(outfile, "Need char = \'%c\'%s\n", ch, caseless);
+ else
+ fprintf(outfile, "Need char = %d%s\n", ch, caseless);
+ }
+
+ /* Don't output study size; at present it is in any case a fixed
+ value, but it varies, depending on the computer architecture, and
+ so messes up the test suite. (And with the /F option, it might be
+ flipped.) */
+
+ if (do_study)
+ {
+ if (extra == NULL)
+ fprintf(outfile, "Study returned NULL\n");
+ else
+ {
+ uschar *start_bits = NULL;
+ new_info(re, extra, PCRE_INFO_FIRSTTABLE, &start_bits);
+
+ if (start_bits == NULL)
+ fprintf(outfile, "No starting byte set\n");
+ else
+ {
+ int i;
+ int c = 24;
+ fprintf(outfile, "Starting byte set: ");
+ for (i = 0; i < 256; i++)
+ {
+ if ((start_bits[i/8] & (1<<(i&7))) != 0)
+ {
+ if (c > 75)
+ {
+ fprintf(outfile, "\n ");
+ c = 2;
+ }
+ if (PRINTHEX(i) && i != ' ')
+ {
+ fprintf(outfile, "%c ", i);
+ c += 2;
+ }
+ else
+ {
+ fprintf(outfile, "\\x%02x ", i);
+ c += 5;
+ }
+ }
+ }
+ fprintf(outfile, "\n");
+ }
+ }
+ }
+ }
+
+ /* If the '>' option was present, we write out the regex to a file, and
+ that is all. The first 8 bytes of the file are the regex length and then
+ the study length, in big-endian order. */
+
+ if (to_file != NULL)
+ {
+ FILE *f = fopen((char *)to_file, "wb");
+ if (f == NULL)
+ {
+ fprintf(outfile, "Unable to open %s: %s\n", to_file, strerror(errno));
+ }
+ else
+ {
+ uschar sbuf[8];
+ sbuf[0] = (uschar)((true_size >> 24) & 255);
+ sbuf[1] = (uschar)((true_size >> 16) & 255);
+ sbuf[2] = (uschar)((true_size >> 8) & 255);
+ sbuf[3] = (uschar)((true_size) & 255);
+
+ sbuf[4] = (uschar)((true_study_size >> 24) & 255);
+ sbuf[5] = (uschar)((true_study_size >> 16) & 255);
+ sbuf[6] = (uschar)((true_study_size >> 8) & 255);
+ sbuf[7] = (uschar)((true_study_size) & 255);
+
+ if (fwrite(sbuf, 1, 8, f) < 8 ||
+ fwrite(re, 1, true_size, f) < true_size)
+ {
+ fprintf(outfile, "Write error on %s: %s\n", to_file, strerror(errno));
+ }
+ else
+ {
+ fprintf(outfile, "Compiled regex written to %s\n", to_file);
+ if (extra != NULL)
+ {
+ if (fwrite(extra->study_data, 1, true_study_size, f) <
+ true_study_size)
+ {
+ fprintf(outfile, "Write error on %s: %s\n", to_file,
+ strerror(errno));
+ }
+ else fprintf(outfile, "Study data written to %s\n", to_file);
+
+ }
+ }
+ fclose(f);
+ }
+
+ new_free(re);
+ if (extra != NULL) new_free(extra);
+ if (tables != NULL) new_free((void *)tables);
+ continue; /* With next regex */
+ }
+ } /* End of non-POSIX compile */
+
+ /* Read data lines and test them */
+
+ for (;;)
+ {
+ uschar *q;
+ uschar *bptr;
+ int *use_offsets = offsets;
+ int use_size_offsets = size_offsets;
+ int callout_data = 0;
+ int callout_data_set = 0;
+ int count, c;
+ int copystrings = 0;
+ int find_match_limit = 0;
+ int getstrings = 0;
+ int getlist = 0;
+ int gmatched = 0;
+ int start_offset = 0;
+ int g_notempty = 0;
+ int use_dfa = 0;
+
+ options = 0;
+
+ *copynames = 0;
+ *getnames = 0;
+
+ copynamesptr = copynames;
+ getnamesptr = getnames;
+
+ pcre_callout = callout;
+ first_callout = 1;
+ callout_extra = 0;
+ callout_count = 0;
+ callout_fail_count = 999999;
+ callout_fail_id = -1;
+ show_malloc = 0;
+
+ if (extra != NULL) extra->flags &=
+ ~(PCRE_EXTRA_MATCH_LIMIT|PCRE_EXTRA_MATCH_LIMIT_RECURSION);
+
+ len = 0;
+ for (;;)
+ {
+ if (infile == stdin) printf("data> ");
+ if (extend_inputline(infile, buffer + len) == NULL)
+ {
+ if (len > 0) break;
+ done = 1;
+ goto CONTINUE;
+ }
+ if (infile != stdin) fprintf(outfile, "%s", (char *)buffer);
+ len = (int)strlen((char *)buffer);
+ if (buffer[len-1] == '\n') break;
+ }
+
+ while (len > 0 && isspace(buffer[len-1])) len--;
+ buffer[len] = 0;
+ if (len == 0) break;
+
+ p = buffer;
+ while (isspace(*p)) p++;
+
+ bptr = q = dbuffer;
+ while ((c = *p++) != 0)
+ {
+ int i = 0;
+ int n = 0;
+
+ if (c == '\\') switch ((c = *p++))
+ {
+ case 'a': c = 7; break;
+ case 'b': c = '\b'; break;
+ case 'e': c = 27; break;
+ case 'f': c = '\f'; break;
+ case 'n': c = '\n'; break;
+ case 'r': c = '\r'; break;
+ case 't': c = '\t'; break;
+ case 'v': c = '\v'; break;
+
+ case '0': case '1': case '2': case '3':
+ case '4': case '5': case '6': case '7':
+ c -= '0';
+ while (i++ < 2 && isdigit(*p) && *p != '8' && *p != '9')
+ c = c * 8 + *p++ - '0';
+
+#if !defined NOUTF8
+ if (use_utf8 && c > 255)
+ {
+ unsigned char buff8[8];
+ int ii, utn;
+ utn = ord2utf8(c, buff8);
+ for (ii = 0; ii < utn - 1; ii++) *q++ = buff8[ii];
+ c = buff8[ii]; /* Last byte */
+ }
+#endif
+ break;
+
+ case 'x':
+
+ /* Handle \x{..} specially - new Perl thing for utf8 */
+
+#if !defined NOUTF8
+ if (*p == '{')
+ {
+ unsigned char *pt = p;
+ c = 0;
+ while (isxdigit(*(++pt)))
+ c = c * 16 + tolower(*pt) - ((isdigit(*pt))? '0' : 'W');
+ if (*pt == '}')
+ {
+ unsigned char buff8[8];
+ int ii, utn;
+ utn = ord2utf8(c, buff8);
+ for (ii = 0; ii < utn - 1; ii++) *q++ = buff8[ii];
+ c = buff8[ii]; /* Last byte */
+ p = pt + 1;
+ break;
+ }
+ /* Not correct form; fall through */
+ }
+#endif
+
+ /* Ordinary \x */
+
+ c = 0;
+ while (i++ < 2 && isxdigit(*p))
+ {
+ c = c * 16 + tolower(*p) - ((isdigit(*p))? '0' : 'W');
+ p++;
+ }
+ break;
+
+ case 0: /* \ followed by EOF allows for an empty line */
+ p--;
+ continue;
+
+ case '>':
+ while(isdigit(*p)) start_offset = start_offset * 10 + *p++ - '0';
+ continue;
+
+ case 'A': /* Option setting */
+ options |= PCRE_ANCHORED;
+ continue;
+
+ case 'B':
+ options |= PCRE_NOTBOL;
+ continue;
+
+ case 'C':
+ if (isdigit(*p)) /* Set copy string */
+ {
+ while(isdigit(*p)) n = n * 10 + *p++ - '0';
+ copystrings |= 1 << n;
+ }
+ else if (isalnum(*p))
+ {
+ uschar *npp = copynamesptr;
+ while (isalnum(*p)) *npp++ = *p++;
+ *npp++ = 0;
+ *npp = 0;
+ n = pcre_get_stringnumber(re, (char *)copynamesptr);
+ if (n < 0)
+ fprintf(outfile, "no parentheses with name \"%s\"\n", copynamesptr);
+ copynamesptr = npp;
+ }
+ else if (*p == '+')
+ {
+ callout_extra = 1;
+ p++;
+ }
+ else if (*p == '-')
+ {
+ pcre_callout = NULL;
+ p++;
+ }
+ else if (*p == '!')
+ {
+ callout_fail_id = 0;
+ p++;
+ while(isdigit(*p))
+ callout_fail_id = callout_fail_id * 10 + *p++ - '0';
+ callout_fail_count = 0;
+ if (*p == '!')
+ {
+ p++;
+ while(isdigit(*p))
+ callout_fail_count = callout_fail_count * 10 + *p++ - '0';
+ }
+ }
+ else if (*p == '*')
+ {
+ int sign = 1;
+ callout_data = 0;
+ if (*(++p) == '-') { sign = -1; p++; }
+ while(isdigit(*p))
+ callout_data = callout_data * 10 + *p++ - '0';
+ callout_data *= sign;
+ callout_data_set = 1;
+ }
+ continue;
+
+#if !defined NODFA
+ case 'D':
+#if !defined NOPOSIX
+ if (posix || do_posix)
+ printf("** Can't use dfa matching in POSIX mode: \\D ignored\n");
+ else
+#endif
+ use_dfa = 1;
+ continue;
+
+ case 'F':
+ options |= PCRE_DFA_SHORTEST;
+ continue;
+#endif
+
+ case 'G':
+ if (isdigit(*p))
+ {
+ while(isdigit(*p)) n = n * 10 + *p++ - '0';
+ getstrings |= 1 << n;
+ }
+ else if (isalnum(*p))
+ {
+ uschar *npp = getnamesptr;
+ while (isalnum(*p)) *npp++ = *p++;
+ *npp++ = 0;
+ *npp = 0;
+ n = pcre_get_stringnumber(re, (char *)getnamesptr);
+ if (n < 0)
+ fprintf(outfile, "no parentheses with name \"%s\"\n", getnamesptr);
+ getnamesptr = npp;
+ }
+ continue;
+
+ case 'L':
+ getlist = 1;
+ continue;
+
+ case 'M':
+ find_match_limit = 1;
+ continue;
+
+ case 'N':
+ options |= PCRE_NOTEMPTY;
+ continue;
+
+ case 'O':
+ while(isdigit(*p)) n = n * 10 + *p++ - '0';
+ if (n > size_offsets_max)
+ {
+ size_offsets_max = n;
+ free(offsets);
+ use_offsets = offsets = (int *)malloc(size_offsets_max * sizeof(int));
+ if (offsets == NULL)
+ {
+ printf("** Failed to get %d bytes of memory for offsets vector\n",
+ (int)(size_offsets_max * sizeof(int)));
+ yield = 1;
+ goto EXIT;
+ }
+ }
+ use_size_offsets = n;
+ if (n == 0) use_offsets = NULL; /* Ensures it can't write to it */
+ continue;
+
+ case 'P':
+ options |= PCRE_PARTIAL;
+ continue;
+
+ case 'Q':
+ while(isdigit(*p)) n = n * 10 + *p++ - '0';
+ if (extra == NULL)
+ {
+ extra = (pcre_extra *)malloc(sizeof(pcre_extra));
+ extra->flags = 0;
+ }
+ extra->flags |= PCRE_EXTRA_MATCH_LIMIT_RECURSION;
+ extra->match_limit_recursion = n;
+ continue;
+
+ case 'q':
+ while(isdigit(*p)) n = n * 10 + *p++ - '0';
+ if (extra == NULL)
+ {
+ extra = (pcre_extra *)malloc(sizeof(pcre_extra));
+ extra->flags = 0;
+ }
+ extra->flags |= PCRE_EXTRA_MATCH_LIMIT;
+ extra->match_limit = n;
+ continue;
+
+#if !defined NODFA
+ case 'R':
+ options |= PCRE_DFA_RESTART;
+ continue;
+#endif
+
+ case 'S':
+ show_malloc = 1;
+ continue;
+
+ case 'Z':
+ options |= PCRE_NOTEOL;
+ continue;
+
+ case '?':
+ options |= PCRE_NO_UTF8_CHECK;
+ continue;
+
+ case '<':
+ {
+ int x = check_newline(p, outfile);
+ if (x == 0) goto NEXT_DATA;
+ options |= x;
+ while (*p++ != '>');
+ }
+ continue;
+ }
+ *q++ = c;
+ }
+ *q = 0;
+ len = q - dbuffer;
+
+ if ((all_use_dfa || use_dfa) && find_match_limit)
+ {
+ printf("**Match limit not relevant for DFA matching: ignored\n");
+ find_match_limit = 0;
+ }
+
+ /* Handle matching via the POSIX interface, which does not
+ support timing or playing with the match limit or callout data. */
+
+#if !defined NOPOSIX
+ if (posix || do_posix)
+ {
+ int rc;
+ int eflags = 0;
+ regmatch_t *pmatch = NULL;
+ if (use_size_offsets > 0)
+ pmatch = (regmatch_t *)malloc(sizeof(regmatch_t) * use_size_offsets);
+ if ((options & PCRE_NOTBOL) != 0) eflags |= REG_NOTBOL;
+ if ((options & PCRE_NOTEOL) != 0) eflags |= REG_NOTEOL;
+
+ rc = regexec(&preg, (const char *)bptr, use_size_offsets, pmatch, eflags);
+
+ if (rc != 0)
+ {
+ (void)regerror(rc, &preg, (char *)buffer, buffer_size);
+ fprintf(outfile, "No match: POSIX code %d: %s\n", rc, buffer);
+ }
+ else if ((((const pcre *)preg.re_pcre)->options & PCRE_NO_AUTO_CAPTURE)
+ != 0)
+ {
+ fprintf(outfile, "Matched with REG_NOSUB\n");
+ }
+ else
+ {
+ size_t i;
+ for (i = 0; i < (size_t)use_size_offsets; i++)
+ {
+ if (pmatch[i].rm_so >= 0)
+ {
+ fprintf(outfile, "%2d: ", (int)i);
+ (void)pchars(dbuffer + pmatch[i].rm_so,
+ pmatch[i].rm_eo - pmatch[i].rm_so, outfile);
+ fprintf(outfile, "\n");
+ if (i == 0 && do_showrest)
+ {
+ fprintf(outfile, " 0+ ");
+ (void)pchars(dbuffer + pmatch[i].rm_eo, len - pmatch[i].rm_eo,
+ outfile);
+ fprintf(outfile, "\n");
+ }
+ }
+ }
+ }
+ free(pmatch);
+ }
+
+ /* Handle matching via the native interface - repeats for /g and /G */
+
+ else
+#endif /* !defined NOPOSIX */
+
+ for (;; gmatched++) /* Loop for /g or /G */
+ {
+ if (timeitm > 0)
+ {
+ register int i;
+ clock_t time_taken;
+ clock_t start_time = clock();
+
+#if !defined NODFA
+ if (all_use_dfa || use_dfa)
+ {
+ int workspace[1000];
+ for (i = 0; i < timeitm; i++)
+ count = pcre_dfa_exec(re, NULL, (char *)bptr, len, start_offset,
+ options | g_notempty, use_offsets, use_size_offsets, workspace,
+ sizeof(workspace)/sizeof(int));
+ }
+ else
+#endif
+
+ for (i = 0; i < timeitm; i++)
+ count = pcre_exec(re, extra, (char *)bptr, len,
+ start_offset, options | g_notempty, use_offsets, use_size_offsets);
+
+ time_taken = clock() - start_time;
+ fprintf(outfile, "Execute time %.4f milliseconds\n",
+ (((double)time_taken * 1000.0) / (double)timeitm) /
+ (double)CLOCKS_PER_SEC);
+ }
+
+ /* If find_match_limit is set, we want to do repeated matches with
+ varying limits in order to find the minimum value for the match limit and
+ for the recursion limit. */
+
+ if (find_match_limit)
+ {
+ if (extra == NULL)
+ {
+ extra = (pcre_extra *)malloc(sizeof(pcre_extra));
+ extra->flags = 0;
+ }
+
+ (void)check_match_limit(re, extra, bptr, len, start_offset,
+ options|g_notempty, use_offsets, use_size_offsets,
+ PCRE_EXTRA_MATCH_LIMIT, &(extra->match_limit),
+ PCRE_ERROR_MATCHLIMIT, "match()");
+
+ count = check_match_limit(re, extra, bptr, len, start_offset,
+ options|g_notempty, use_offsets, use_size_offsets,
+ PCRE_EXTRA_MATCH_LIMIT_RECURSION, &(extra->match_limit_recursion),
+ PCRE_ERROR_RECURSIONLIMIT, "match() recursion");
+ }
+
+ /* If callout_data is set, use the interface with additional data */
+
+ else if (callout_data_set)
+ {
+ if (extra == NULL)
+ {
+ extra = (pcre_extra *)malloc(sizeof(pcre_extra));
+ extra->flags = 0;
+ }
+ extra->flags |= PCRE_EXTRA_CALLOUT_DATA;
+ extra->callout_data = &callout_data;
+ count = pcre_exec(re, extra, (char *)bptr, len, start_offset,
+ options | g_notempty, use_offsets, use_size_offsets);
+ extra->flags &= ~PCRE_EXTRA_CALLOUT_DATA;
+ }
+
+ /* The normal case is just to do the match once, with the default
+ value of match_limit. */
+
+#if !defined NODFA
+ else if (all_use_dfa || use_dfa)
+ {
+ int workspace[1000];
+ count = pcre_dfa_exec(re, NULL, (char *)bptr, len, start_offset,
+ options | g_notempty, use_offsets, use_size_offsets, workspace,
+ sizeof(workspace)/sizeof(int));
+ if (count == 0)
+ {
+ fprintf(outfile, "Matched, but too many subsidiary matches\n");
+ count = use_size_offsets/2;
+ }
+ }
+#endif
+
+ else
+ {
+ count = pcre_exec(re, extra, (char *)bptr, len,
+ start_offset, options | g_notempty, use_offsets, use_size_offsets);
+ if (count == 0)
+ {
+ fprintf(outfile, "Matched, but too many substrings\n");
+ count = use_size_offsets/3;
+ }
+ }
+
+ /* Matched */
+
+ if (count >= 0)
+ {
+ int i, maxcount;
+
+#if !defined NODFA
+ if (all_use_dfa || use_dfa) maxcount = use_size_offsets/2; else
+#endif
+ maxcount = use_size_offsets/3;
+
+ /* This is a check against a lunatic return value. */
+
+ if (count > maxcount)
+ {
+ fprintf(outfile,
+ "** PCRE error: returned count %d is too big for offset size %d\n",
+ count, use_size_offsets);
+ count = use_size_offsets/3;
+ if (do_g || do_G)
+ {
+ fprintf(outfile, "** /%c loop abandoned\n", do_g? 'g' : 'G');
+ do_g = do_G = FALSE; /* Break g/G loop */
+ }
+ }
+
+ for (i = 0; i < count * 2; i += 2)
+ {
+ if (use_offsets[i] < 0)
+ fprintf(outfile, "%2d: <unset>\n", i/2);
+ else
+ {
+ fprintf(outfile, "%2d: ", i/2);
+ (void)pchars(bptr + use_offsets[i],
+ use_offsets[i+1] - use_offsets[i], outfile);
+ fprintf(outfile, "\n");
+ if (i == 0)
+ {
+ if (do_showrest)
+ {
+ fprintf(outfile, " 0+ ");
+ (void)pchars(bptr + use_offsets[i+1], len - use_offsets[i+1],
+ outfile);
+ fprintf(outfile, "\n");
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < 32; i++)
+ {
+ if ((copystrings & (1 << i)) != 0)
+ {
+ char copybuffer[256];
+ int rc = pcre_copy_substring((char *)bptr, use_offsets, count,
+ i, copybuffer, sizeof(copybuffer));
+ if (rc < 0)
+ fprintf(outfile, "copy substring %d failed %d\n", i, rc);
+ else
+ fprintf(outfile, "%2dC %s (%d)\n", i, copybuffer, rc);
+ }
+ }
+
+ for (copynamesptr = copynames;
+ *copynamesptr != 0;
+ copynamesptr += (int)strlen((char*)copynamesptr) + 1)
+ {
+ char copybuffer[256];
+ int rc = pcre_copy_named_substring(re, (char *)bptr, use_offsets,
+ count, (char *)copynamesptr, copybuffer, sizeof(copybuffer));
+ if (rc < 0)
+ fprintf(outfile, "copy substring %s failed %d\n", copynamesptr, rc);
+ else
+ fprintf(outfile, " C %s (%d) %s\n", copybuffer, rc, copynamesptr);
+ }
+
+ for (i = 0; i < 32; i++)
+ {
+ if ((getstrings & (1 << i)) != 0)
+ {
+ const char *substring;
+ int rc = pcre_get_substring((char *)bptr, use_offsets, count,
+ i, &substring);
+ if (rc < 0)
+ fprintf(outfile, "get substring %d failed %d\n", i, rc);
+ else
+ {
+ fprintf(outfile, "%2dG %s (%d)\n", i, substring, rc);
+ pcre_free_substring(substring);
+ }
+ }
+ }
+
+ for (getnamesptr = getnames;
+ *getnamesptr != 0;
+ getnamesptr += (int)strlen((char*)getnamesptr) + 1)
+ {
+ const char *substring;
+ int rc = pcre_get_named_substring(re, (char *)bptr, use_offsets,
+ count, (char *)getnamesptr, &substring);
+ if (rc < 0)
+ fprintf(outfile, "copy substring %s failed %d\n", getnamesptr, rc);
+ else
+ {
+ fprintf(outfile, " G %s (%d) %s\n", substring, rc, getnamesptr);
+ pcre_free_substring(substring);
+ }
+ }
+
+ if (getlist)
+ {
+ const char **stringlist;
+ int rc = pcre_get_substring_list((char *)bptr, use_offsets, count,
+ &stringlist);
+ if (rc < 0)
+ fprintf(outfile, "get substring list failed %d\n", rc);
+ else
+ {
+ for (i = 0; i < count; i++)
+ fprintf(outfile, "%2dL %s\n", i, stringlist[i]);
+ if (stringlist[i] != NULL)
+ fprintf(outfile, "string list not terminated by NULL\n");
+ /* free((void *)stringlist); */
+ pcre_free_substring_list(stringlist);
+ }
+ }
+ }
+
+ /* There was a partial match */
+
+ else if (count == PCRE_ERROR_PARTIAL)
+ {
+ fprintf(outfile, "Partial match");
+#if !defined NODFA
+ if ((all_use_dfa || use_dfa) && use_size_offsets > 2)
+ fprintf(outfile, ": %.*s", use_offsets[1] - use_offsets[0],
+ bptr + use_offsets[0]);
+#endif
+ fprintf(outfile, "\n");
+ break; /* Out of the /g loop */
+ }
+
+ /* Failed to match. If this is a /g or /G loop and we previously set
+ g_notempty after a null match, this is not necessarily the end. We want
+ to advance the start offset, and continue. We won't be at the end of the
+ string - that was checked before setting g_notempty.
+
+ Complication arises in the case when the newline option is "any" or
+ "anycrlf". If the previous match was at the end of a line terminated by
+ CRLF, an advance of one character just passes the \r, whereas we should
+ prefer the longer newline sequence, as does the code in pcre_exec().
+ Fudge the offset value to achieve this.
+
+ Otherwise, in the case of UTF-8 matching, the advance must be one
+ character, not one byte. */
+
+ else
+ {
+ if (g_notempty != 0)
+ {
+ int onechar = 1;
+ unsigned int obits = ((real_pcre *)re)->options;
+ use_offsets[0] = start_offset;
+ if ((obits & PCRE_NEWLINE_BITS) == 0)
+ {
+ int d;
+ (void)pcre_config(PCRE_CONFIG_NEWLINE, &d);
+ obits = (d == '\r')? PCRE_NEWLINE_CR :
+ (d == '\n')? PCRE_NEWLINE_LF :
+ (d == ('\r'<<8 | '\n'))? PCRE_NEWLINE_CRLF :
+ (d == -2)? PCRE_NEWLINE_ANYCRLF :
+ (d == -1)? PCRE_NEWLINE_ANY : 0;
+ }
+ if (((obits & PCRE_NEWLINE_BITS) == PCRE_NEWLINE_ANY ||
+ (obits & PCRE_NEWLINE_BITS) == PCRE_NEWLINE_ANYCRLF)
+ &&
+ start_offset < len - 1 &&
+ bptr[start_offset] == '\r' &&
+ bptr[start_offset+1] == '\n')
+ onechar++;
+ else if (use_utf8)
+ {
+ while (start_offset + onechar < len)
+ {
+ int tb = bptr[start_offset+onechar];
+ if (tb <= 127) break;
+ tb &= 0xc0;
+ if (tb != 0 && tb != 0xc0) onechar++;
+ }
+ }
+ use_offsets[1] = start_offset + onechar;
+ }
+ else
+ {
+ if (count == PCRE_ERROR_NOMATCH)
+ {
+ if (gmatched == 0) fprintf(outfile, "No match\n");
+ }
+ else fprintf(outfile, "Error %d\n", count);
+ break; /* Out of the /g loop */
+ }
+ }
+
+ /* If not /g or /G we are done */
+
+ if (!do_g && !do_G) break;
+
+ /* If we have matched an empty string, first check to see if we are at
+ the end of the subject. If so, the /g loop is over. Otherwise, mimic
+ what Perl's /g options does. This turns out to be rather cunning. First
+ we set PCRE_NOTEMPTY and PCRE_ANCHORED and try the match again at the
+ same point. If this fails (picked up above) we advance to the next
+ character. */
+
+ g_notempty = 0;
+
+ if (use_offsets[0] == use_offsets[1])
+ {
+ if (use_offsets[0] == len) break;
+ g_notempty = PCRE_NOTEMPTY | PCRE_ANCHORED;
+ }
+
+ /* For /g, update the start offset, leaving the rest alone */
+
+ if (do_g) start_offset = use_offsets[1];
+
+ /* For /G, update the pointer and length */
+
+ else
+ {
+ bptr += use_offsets[1];
+ len -= use_offsets[1];
+ }
+ } /* End of loop for /g and /G */
+
+ NEXT_DATA: continue;
+ } /* End of loop for data lines */
+
+ CONTINUE:
+
+#if !defined NOPOSIX
+ if (posix || do_posix) regfree(&preg);
+#endif
+
+ if (re != NULL) new_free(re);
+ if (extra != NULL) new_free(extra);
+ if (tables != NULL)
+ {
+ new_free((void *)tables);
+ setlocale(LC_CTYPE, "C");
+ locale_set = 0;
+ }
+ }
+
+if (infile == stdin) fprintf(outfile, "\n");
+
+EXIT:
+
+if (infile != NULL && infile != stdin) fclose(infile);
+if (outfile != NULL && outfile != stdout) fclose(outfile);
+
+free(buffer);
+free(dbuffer);
+free(pbuffer);
+free(offsets);
+
+return yield;
+}
+
+/* End of pcretest.c */
diff --git a/src/third_party/pcre-7.4/ucp.h b/src/third_party/pcre-7.4/ucp.h
new file mode 100644
index 00000000000..3a4179b7693
--- /dev/null
+++ b/src/third_party/pcre-7.4/ucp.h
@@ -0,0 +1,133 @@
+/*************************************************
+* Unicode Property Table handler *
+*************************************************/
+
+#ifndef _UCP_H
+#define _UCP_H
+
+/* This file contains definitions of the property values that are returned by
+the function _pcre_ucp_findprop(). New values that are added for new releases
+of Unicode should always be at the end of each enum, for backwards
+compatibility. */
+
+/* These are the general character categories. */
+
+enum {
+ ucp_C, /* Other */
+ ucp_L, /* Letter */
+ ucp_M, /* Mark */
+ ucp_N, /* Number */
+ ucp_P, /* Punctuation */
+ ucp_S, /* Symbol */
+ ucp_Z /* Separator */
+};
+
+/* These are the particular character types. */
+
+enum {
+ ucp_Cc, /* Control */
+ ucp_Cf, /* Format */
+ ucp_Cn, /* Unassigned */
+ ucp_Co, /* Private use */
+ ucp_Cs, /* Surrogate */
+ ucp_Ll, /* Lower case letter */
+ ucp_Lm, /* Modifier letter */
+ ucp_Lo, /* Other letter */
+ ucp_Lt, /* Title case letter */
+ ucp_Lu, /* Upper case letter */
+ ucp_Mc, /* Spacing mark */
+ ucp_Me, /* Enclosing mark */
+ ucp_Mn, /* Non-spacing mark */
+ ucp_Nd, /* Decimal number */
+ ucp_Nl, /* Letter number */
+ ucp_No, /* Other number */
+ ucp_Pc, /* Connector punctuation */
+ ucp_Pd, /* Dash punctuation */
+ ucp_Pe, /* Close punctuation */
+ ucp_Pf, /* Final punctuation */
+ ucp_Pi, /* Initial punctuation */
+ ucp_Po, /* Other punctuation */
+ ucp_Ps, /* Open punctuation */
+ ucp_Sc, /* Currency symbol */
+ ucp_Sk, /* Modifier symbol */
+ ucp_Sm, /* Mathematical symbol */
+ ucp_So, /* Other symbol */
+ ucp_Zl, /* Line separator */
+ ucp_Zp, /* Paragraph separator */
+ ucp_Zs /* Space separator */
+};
+
+/* These are the script identifications. */
+
+enum {
+ ucp_Arabic,
+ ucp_Armenian,
+ ucp_Bengali,
+ ucp_Bopomofo,
+ ucp_Braille,
+ ucp_Buginese,
+ ucp_Buhid,
+ ucp_Canadian_Aboriginal,
+ ucp_Cherokee,
+ ucp_Common,
+ ucp_Coptic,
+ ucp_Cypriot,
+ ucp_Cyrillic,
+ ucp_Deseret,
+ ucp_Devanagari,
+ ucp_Ethiopic,
+ ucp_Georgian,
+ ucp_Glagolitic,
+ ucp_Gothic,
+ ucp_Greek,
+ ucp_Gujarati,
+ ucp_Gurmukhi,
+ ucp_Han,
+ ucp_Hangul,
+ ucp_Hanunoo,
+ ucp_Hebrew,
+ ucp_Hiragana,
+ ucp_Inherited,
+ ucp_Kannada,
+ ucp_Katakana,
+ ucp_Kharoshthi,
+ ucp_Khmer,
+ ucp_Lao,
+ ucp_Latin,
+ ucp_Limbu,
+ ucp_Linear_B,
+ ucp_Malayalam,
+ ucp_Mongolian,
+ ucp_Myanmar,
+ ucp_New_Tai_Lue,
+ ucp_Ogham,
+ ucp_Old_Italic,
+ ucp_Old_Persian,
+ ucp_Oriya,
+ ucp_Osmanya,
+ ucp_Runic,
+ ucp_Shavian,
+ ucp_Sinhala,
+ ucp_Syloti_Nagri,
+ ucp_Syriac,
+ ucp_Tagalog,
+ ucp_Tagbanwa,
+ ucp_Tai_Le,
+ ucp_Tamil,
+ ucp_Telugu,
+ ucp_Thaana,
+ ucp_Thai,
+ ucp_Tibetan,
+ ucp_Tifinagh,
+ ucp_Ugaritic,
+ ucp_Yi,
+ ucp_Balinese, /* New for Unicode 5.0.0 */
+ ucp_Cuneiform, /* New for Unicode 5.0.0 */
+ ucp_Nko, /* New for Unicode 5.0.0 */
+ ucp_Phags_Pa, /* New for Unicode 5.0.0 */
+ ucp_Phoenician /* New for Unicode 5.0.0 */
+};
+
+#endif
+
+/* End of ucp.h */
diff --git a/src/third_party/pcre-7.4/ucpinternal.h b/src/third_party/pcre-7.4/ucpinternal.h
new file mode 100644
index 00000000000..811a373c887
--- /dev/null
+++ b/src/third_party/pcre-7.4/ucpinternal.h
@@ -0,0 +1,92 @@
+/*************************************************
+* Unicode Property Table handler *
+*************************************************/
+
+#ifndef _UCPINTERNAL_H
+#define _UCPINTERNAL_H
+
+/* Internal header file defining the layout of the bits in each pair of 32-bit
+words that form a data item in the table. */
+
+typedef struct cnode {
+ pcre_uint32 f0;
+ pcre_uint32 f1;
+} cnode;
+
+/* Things for the f0 field */
+
+#define f0_scriptmask 0xff000000 /* Mask for script field */
+#define f0_scriptshift 24 /* Shift for script value */
+#define f0_rangeflag 0x00f00000 /* Flag for a range item */
+#define f0_charmask 0x001fffff /* Mask for code point value */
+
+/* Things for the f1 field */
+
+#define f1_typemask 0xfc000000 /* Mask for char type field */
+#define f1_typeshift 26 /* Shift for the type field */
+#define f1_rangemask 0x0000ffff /* Mask for a range offset */
+#define f1_casemask 0x0000ffff /* Mask for a case offset */
+#define f1_caseneg 0xffff8000 /* Bits for negation */
+
+/* The data consists of a vector of structures of type cnode. The two unsigned
+32-bit integers are used as follows:
+
+(f0) (1) The most significant byte holds the script number. The numbers are
+ defined by the enum in ucp.h.
+
+ (2) The 0x00800000 bit is set if this entry defines a range of characters.
+ It is not set if this entry defines a single character
+
+ (3) The 0x00600000 bits are spare.
+
+ (4) The 0x001fffff bits contain the code point. No Unicode code point will
+ ever be greater than 0x0010ffff, so this should be OK for ever.
+
+(f1) (1) The 0xfc000000 bits contain the character type number. The numbers are
+ defined by an enum in ucp.h.
+
+ (2) The 0x03ff0000 bits are spare.
+
+ (3) The 0x0000ffff bits contain EITHER the unsigned offset to the top of
+ range if this entry defines a range, OR the *signed* offset to the
+ character's "other case" partner if this entry defines a single
+ character. There is no partner if the value is zero.
+
+-------------------------------------------------------------------------------
+| script (8) |.|.|.| codepoint (21) || type (6) |.|.| spare (8) | offset (16) |
+-------------------------------------------------------------------------------
+ | | | | |
+ | | |-> spare | |-> spare
+ | | |
+ | |-> spare |-> spare
+ |
+ |-> range flag
+
+The upper/lower casing information is set only for characters that come in
+pairs. The non-one-to-one mappings in the Unicode data are ignored.
+
+When searching the data, proceed as follows:
+
+(1) Set up for a binary chop search.
+
+(2) If the top is not greater than the bottom, the character is not in the
+ table. Its type must therefore be "Cn" ("Undefined").
+
+(3) Find the middle vector element.
+
+(4) Extract the code point and compare. If equal, we are done.
+
+(5) If the test character is smaller, set the top to the current point, and
+ goto (2).
+
+(6) If the current entry defines a range, compute the last character by adding
+ the offset, and see if the test character is within the range. If it is,
+ we are done.
+
+(7) Otherwise, set the bottom to one element past the current point and goto
+ (2).
+*/
+
+#endif /* _UCPINTERNAL_H */
+
+/* End of ucpinternal.h */
diff --git a/src/third_party/pcre-7.4/ucptable.h b/src/third_party/pcre-7.4/ucptable.h
new file mode 100644
index 00000000000..07eaced8f2d
--- /dev/null
+++ b/src/third_party/pcre-7.4/ucptable.h
@@ -0,0 +1,3068 @@
+/* This source module is automatically generated from the Unicode
+property table. See ucpinternal.h for a description of the layout.
+This version was made from the Unicode 5.0.0 tables. */
+
+static const cnode ucp_table[] = {
+ { 0x09800000, 0x0000001f },
+ { 0x09000020, 0x74000000 },
+ { 0x09800021, 0x54000002 },
+ { 0x09000024, 0x5c000000 },
+ { 0x09800025, 0x54000002 },
+ { 0x09000028, 0x58000000 },
+ { 0x09000029, 0x48000000 },
+ { 0x0900002a, 0x54000000 },
+ { 0x0900002b, 0x64000000 },
+ { 0x0900002c, 0x54000000 },
+ { 0x0900002d, 0x44000000 },
+ { 0x0980002e, 0x54000001 },
+ { 0x09800030, 0x34000009 },
+ { 0x0980003a, 0x54000001 },
+ { 0x0980003c, 0x64000002 },
+ { 0x0980003f, 0x54000001 },
+ { 0x21000041, 0x24000020 },
+ { 0x21000042, 0x24000020 },
+ { 0x21000043, 0x24000020 },
+ { 0x21000044, 0x24000020 },
+ { 0x21000045, 0x24000020 },
+ { 0x21000046, 0x24000020 },
+ { 0x21000047, 0x24000020 },
+ { 0x21000048, 0x24000020 },
+ { 0x21000049, 0x24000020 },
+ { 0x2100004a, 0x24000020 },
+ { 0x2100004b, 0x24000020 },
+ { 0x2100004c, 0x24000020 },
+ { 0x2100004d, 0x24000020 },
+ { 0x2100004e, 0x24000020 },
+ { 0x2100004f, 0x24000020 },
+ { 0x21000050, 0x24000020 },
+ { 0x21000051, 0x24000020 },
+ { 0x21000052, 0x24000020 },
+ { 0x21000053, 0x24000020 },
+ { 0x21000054, 0x24000020 },
+ { 0x21000055, 0x24000020 },
+ { 0x21000056, 0x24000020 },
+ { 0x21000057, 0x24000020 },
+ { 0x21000058, 0x24000020 },
+ { 0x21000059, 0x24000020 },
+ { 0x2100005a, 0x24000020 },
+ { 0x0900005b, 0x58000000 },
+ { 0x0900005c, 0x54000000 },
+ { 0x0900005d, 0x48000000 },
+ { 0x0900005e, 0x60000000 },
+ { 0x0900005f, 0x40000000 },
+ { 0x09000060, 0x60000000 },
+ { 0x21000061, 0x1400ffe0 },
+ { 0x21000062, 0x1400ffe0 },
+ { 0x21000063, 0x1400ffe0 },
+ { 0x21000064, 0x1400ffe0 },
+ { 0x21000065, 0x1400ffe0 },
+ { 0x21000066, 0x1400ffe0 },
+ { 0x21000067, 0x1400ffe0 },
+ { 0x21000068, 0x1400ffe0 },
+ { 0x21000069, 0x1400ffe0 },
+ { 0x2100006a, 0x1400ffe0 },
+ { 0x2100006b, 0x1400ffe0 },
+ { 0x2100006c, 0x1400ffe0 },
+ { 0x2100006d, 0x1400ffe0 },
+ { 0x2100006e, 0x1400ffe0 },
+ { 0x2100006f, 0x1400ffe0 },
+ { 0x21000070, 0x1400ffe0 },
+ { 0x21000071, 0x1400ffe0 },
+ { 0x21000072, 0x1400ffe0 },
+ { 0x21000073, 0x1400ffe0 },
+ { 0x21000074, 0x1400ffe0 },
+ { 0x21000075, 0x1400ffe0 },
+ { 0x21000076, 0x1400ffe0 },
+ { 0x21000077, 0x1400ffe0 },
+ { 0x21000078, 0x1400ffe0 },
+ { 0x21000079, 0x1400ffe0 },
+ { 0x2100007a, 0x1400ffe0 },
+ { 0x0900007b, 0x58000000 },
+ { 0x0900007c, 0x64000000 },
+ { 0x0900007d, 0x48000000 },
+ { 0x0900007e, 0x64000000 },
+ { 0x0980007f, 0x00000020 },
+ { 0x090000a0, 0x74000000 },
+ { 0x090000a1, 0x54000000 },
+ { 0x098000a2, 0x5c000003 },
+ { 0x098000a6, 0x68000001 },
+ { 0x090000a8, 0x60000000 },
+ { 0x090000a9, 0x68000000 },
+ { 0x210000aa, 0x14000000 },
+ { 0x090000ab, 0x50000000 },
+ { 0x090000ac, 0x64000000 },
+ { 0x090000ad, 0x04000000 },
+ { 0x090000ae, 0x68000000 },
+ { 0x090000af, 0x60000000 },
+ { 0x090000b0, 0x68000000 },
+ { 0x090000b1, 0x64000000 },
+ { 0x098000b2, 0x3c000001 },
+ { 0x090000b4, 0x60000000 },
+ { 0x090000b5, 0x140002e7 },
+ { 0x090000b6, 0x68000000 },
+ { 0x090000b7, 0x54000000 },
+ { 0x090000b8, 0x60000000 },
+ { 0x090000b9, 0x3c000000 },
+ { 0x210000ba, 0x14000000 },
+ { 0x090000bb, 0x4c000000 },
+ { 0x098000bc, 0x3c000002 },
+ { 0x090000bf, 0x54000000 },
+ { 0x210000c0, 0x24000020 },
+ { 0x210000c1, 0x24000020 },
+ { 0x210000c2, 0x24000020 },
+ { 0x210000c3, 0x24000020 },
+ { 0x210000c4, 0x24000020 },
+ { 0x210000c5, 0x24000020 },
+ { 0x210000c6, 0x24000020 },
+ { 0x210000c7, 0x24000020 },
+ { 0x210000c8, 0x24000020 },
+ { 0x210000c9, 0x24000020 },
+ { 0x210000ca, 0x24000020 },
+ { 0x210000cb, 0x24000020 },
+ { 0x210000cc, 0x24000020 },
+ { 0x210000cd, 0x24000020 },
+ { 0x210000ce, 0x24000020 },
+ { 0x210000cf, 0x24000020 },
+ { 0x210000d0, 0x24000020 },
+ { 0x210000d1, 0x24000020 },
+ { 0x210000d2, 0x24000020 },
+ { 0x210000d3, 0x24000020 },
+ { 0x210000d4, 0x24000020 },
+ { 0x210000d5, 0x24000020 },
+ { 0x210000d6, 0x24000020 },
+ { 0x090000d7, 0x64000000 },
+ { 0x210000d8, 0x24000020 },
+ { 0x210000d9, 0x24000020 },
+ { 0x210000da, 0x24000020 },
+ { 0x210000db, 0x24000020 },
+ { 0x210000dc, 0x24000020 },
+ { 0x210000dd, 0x24000020 },
+ { 0x210000de, 0x24000020 },
+ { 0x210000df, 0x14000000 },
+ { 0x210000e0, 0x1400ffe0 },
+ { 0x210000e1, 0x1400ffe0 },
+ { 0x210000e2, 0x1400ffe0 },
+ { 0x210000e3, 0x1400ffe0 },
+ { 0x210000e4, 0x1400ffe0 },
+ { 0x210000e5, 0x1400ffe0 },
+ { 0x210000e6, 0x1400ffe0 },
+ { 0x210000e7, 0x1400ffe0 },
+ { 0x210000e8, 0x1400ffe0 },
+ { 0x210000e9, 0x1400ffe0 },
+ { 0x210000ea, 0x1400ffe0 },
+ { 0x210000eb, 0x1400ffe0 },
+ { 0x210000ec, 0x1400ffe0 },
+ { 0x210000ed, 0x1400ffe0 },
+ { 0x210000ee, 0x1400ffe0 },
+ { 0x210000ef, 0x1400ffe0 },
+ { 0x210000f0, 0x1400ffe0 },
+ { 0x210000f1, 0x1400ffe0 },
+ { 0x210000f2, 0x1400ffe0 },
+ { 0x210000f3, 0x1400ffe0 },
+ { 0x210000f4, 0x1400ffe0 },
+ { 0x210000f5, 0x1400ffe0 },
+ { 0x210000f6, 0x1400ffe0 },
+ { 0x090000f7, 0x64000000 },
+ { 0x210000f8, 0x1400ffe0 },
+ { 0x210000f9, 0x1400ffe0 },
+ { 0x210000fa, 0x1400ffe0 },
+ { 0x210000fb, 0x1400ffe0 },
+ { 0x210000fc, 0x1400ffe0 },
+ { 0x210000fd, 0x1400ffe0 },
+ { 0x210000fe, 0x1400ffe0 },
+ { 0x210000ff, 0x14000079 },
+ { 0x21000100, 0x24000001 },
+ { 0x21000101, 0x1400ffff },
+ { 0x21000102, 0x24000001 },
+ { 0x21000103, 0x1400ffff },
+ { 0x21000104, 0x24000001 },
+ { 0x21000105, 0x1400ffff },
+ { 0x21000106, 0x24000001 },
+ { 0x21000107, 0x1400ffff },
+ { 0x21000108, 0x24000001 },
+ { 0x21000109, 0x1400ffff },
+ { 0x2100010a, 0x24000001 },
+ { 0x2100010b, 0x1400ffff },
+ { 0x2100010c, 0x24000001 },
+ { 0x2100010d, 0x1400ffff },
+ { 0x2100010e, 0x24000001 },
+ { 0x2100010f, 0x1400ffff },
+ { 0x21000110, 0x24000001 },
+ { 0x21000111, 0x1400ffff },
+ { 0x21000112, 0x24000001 },
+ { 0x21000113, 0x1400ffff },
+ { 0x21000114, 0x24000001 },
+ { 0x21000115, 0x1400ffff },
+ { 0x21000116, 0x24000001 },
+ { 0x21000117, 0x1400ffff },
+ { 0x21000118, 0x24000001 },
+ { 0x21000119, 0x1400ffff },
+ { 0x2100011a, 0x24000001 },
+ { 0x2100011b, 0x1400ffff },
+ { 0x2100011c, 0x24000001 },
+ { 0x2100011d, 0x1400ffff },
+ { 0x2100011e, 0x24000001 },
+ { 0x2100011f, 0x1400ffff },
+ { 0x21000120, 0x24000001 },
+ { 0x21000121, 0x1400ffff },
+ { 0x21000122, 0x24000001 },
+ { 0x21000123, 0x1400ffff },
+ { 0x21000124, 0x24000001 },
+ { 0x21000125, 0x1400ffff },
+ { 0x21000126, 0x24000001 },
+ { 0x21000127, 0x1400ffff },
+ { 0x21000128, 0x24000001 },
+ { 0x21000129, 0x1400ffff },
+ { 0x2100012a, 0x24000001 },
+ { 0x2100012b, 0x1400ffff },
+ { 0x2100012c, 0x24000001 },
+ { 0x2100012d, 0x1400ffff },
+ { 0x2100012e, 0x24000001 },
+ { 0x2100012f, 0x1400ffff },
+ { 0x21000130, 0x2400ff39 },
+ { 0x21000131, 0x1400ff18 },
+ { 0x21000132, 0x24000001 },
+ { 0x21000133, 0x1400ffff },
+ { 0x21000134, 0x24000001 },
+ { 0x21000135, 0x1400ffff },
+ { 0x21000136, 0x24000001 },
+ { 0x21000137, 0x1400ffff },
+ { 0x21000138, 0x14000000 },
+ { 0x21000139, 0x24000001 },
+ { 0x2100013a, 0x1400ffff },
+ { 0x2100013b, 0x24000001 },
+ { 0x2100013c, 0x1400ffff },
+ { 0x2100013d, 0x24000001 },
+ { 0x2100013e, 0x1400ffff },
+ { 0x2100013f, 0x24000001 },
+ { 0x21000140, 0x1400ffff },
+ { 0x21000141, 0x24000001 },
+ { 0x21000142, 0x1400ffff },
+ { 0x21000143, 0x24000001 },
+ { 0x21000144, 0x1400ffff },
+ { 0x21000145, 0x24000001 },
+ { 0x21000146, 0x1400ffff },
+ { 0x21000147, 0x24000001 },
+ { 0x21000148, 0x1400ffff },
+ { 0x21000149, 0x14000000 },
+ { 0x2100014a, 0x24000001 },
+ { 0x2100014b, 0x1400ffff },
+ { 0x2100014c, 0x24000001 },
+ { 0x2100014d, 0x1400ffff },
+ { 0x2100014e, 0x24000001 },
+ { 0x2100014f, 0x1400ffff },
+ { 0x21000150, 0x24000001 },
+ { 0x21000151, 0x1400ffff },
+ { 0x21000152, 0x24000001 },
+ { 0x21000153, 0x1400ffff },
+ { 0x21000154, 0x24000001 },
+ { 0x21000155, 0x1400ffff },
+ { 0x21000156, 0x24000001 },
+ { 0x21000157, 0x1400ffff },
+ { 0x21000158, 0x24000001 },
+ { 0x21000159, 0x1400ffff },
+ { 0x2100015a, 0x24000001 },
+ { 0x2100015b, 0x1400ffff },
+ { 0x2100015c, 0x24000001 },
+ { 0x2100015d, 0x1400ffff },
+ { 0x2100015e, 0x24000001 },
+ { 0x2100015f, 0x1400ffff },
+ { 0x21000160, 0x24000001 },
+ { 0x21000161, 0x1400ffff },
+ { 0x21000162, 0x24000001 },
+ { 0x21000163, 0x1400ffff },
+ { 0x21000164, 0x24000001 },
+ { 0x21000165, 0x1400ffff },
+ { 0x21000166, 0x24000001 },
+ { 0x21000167, 0x1400ffff },
+ { 0x21000168, 0x24000001 },
+ { 0x21000169, 0x1400ffff },
+ { 0x2100016a, 0x24000001 },
+ { 0x2100016b, 0x1400ffff },
+ { 0x2100016c, 0x24000001 },
+ { 0x2100016d, 0x1400ffff },
+ { 0x2100016e, 0x24000001 },
+ { 0x2100016f, 0x1400ffff },
+ { 0x21000170, 0x24000001 },
+ { 0x21000171, 0x1400ffff },
+ { 0x21000172, 0x24000001 },
+ { 0x21000173, 0x1400ffff },
+ { 0x21000174, 0x24000001 },
+ { 0x21000175, 0x1400ffff },
+ { 0x21000176, 0x24000001 },
+ { 0x21000177, 0x1400ffff },
+ { 0x21000178, 0x2400ff87 },
+ { 0x21000179, 0x24000001 },
+ { 0x2100017a, 0x1400ffff },
+ { 0x2100017b, 0x24000001 },
+ { 0x2100017c, 0x1400ffff },
+ { 0x2100017d, 0x24000001 },
+ { 0x2100017e, 0x1400ffff },
+ { 0x2100017f, 0x1400fed4 },
+ { 0x21000180, 0x140000c3 },
+ { 0x21000181, 0x240000d2 },
+ { 0x21000182, 0x24000001 },
+ { 0x21000183, 0x1400ffff },
+ { 0x21000184, 0x24000001 },
+ { 0x21000185, 0x1400ffff },
+ { 0x21000186, 0x240000ce },
+ { 0x21000187, 0x24000001 },
+ { 0x21000188, 0x1400ffff },
+ { 0x21000189, 0x240000cd },
+ { 0x2100018a, 0x240000cd },
+ { 0x2100018b, 0x24000001 },
+ { 0x2100018c, 0x1400ffff },
+ { 0x2100018d, 0x14000000 },
+ { 0x2100018e, 0x2400004f },
+ { 0x2100018f, 0x240000ca },
+ { 0x21000190, 0x240000cb },
+ { 0x21000191, 0x24000001 },
+ { 0x21000192, 0x1400ffff },
+ { 0x21000193, 0x240000cd },
+ { 0x21000194, 0x240000cf },
+ { 0x21000195, 0x14000061 },
+ { 0x21000196, 0x240000d3 },
+ { 0x21000197, 0x240000d1 },
+ { 0x21000198, 0x24000001 },
+ { 0x21000199, 0x1400ffff },
+ { 0x2100019a, 0x140000a3 },
+ { 0x2100019b, 0x14000000 },
+ { 0x2100019c, 0x240000d3 },
+ { 0x2100019d, 0x240000d5 },
+ { 0x2100019e, 0x14000082 },
+ { 0x2100019f, 0x240000d6 },
+ { 0x210001a0, 0x24000001 },
+ { 0x210001a1, 0x1400ffff },
+ { 0x210001a2, 0x24000001 },
+ { 0x210001a3, 0x1400ffff },
+ { 0x210001a4, 0x24000001 },
+ { 0x210001a5, 0x1400ffff },
+ { 0x210001a6, 0x240000da },
+ { 0x210001a7, 0x24000001 },
+ { 0x210001a8, 0x1400ffff },
+ { 0x210001a9, 0x240000da },
+ { 0x218001aa, 0x14000001 },
+ { 0x210001ac, 0x24000001 },
+ { 0x210001ad, 0x1400ffff },
+ { 0x210001ae, 0x240000da },
+ { 0x210001af, 0x24000001 },
+ { 0x210001b0, 0x1400ffff },
+ { 0x210001b1, 0x240000d9 },
+ { 0x210001b2, 0x240000d9 },
+ { 0x210001b3, 0x24000001 },
+ { 0x210001b4, 0x1400ffff },
+ { 0x210001b5, 0x24000001 },
+ { 0x210001b6, 0x1400ffff },
+ { 0x210001b7, 0x240000db },
+ { 0x210001b8, 0x24000001 },
+ { 0x210001b9, 0x1400ffff },
+ { 0x210001ba, 0x14000000 },
+ { 0x210001bb, 0x1c000000 },
+ { 0x210001bc, 0x24000001 },
+ { 0x210001bd, 0x1400ffff },
+ { 0x210001be, 0x14000000 },
+ { 0x210001bf, 0x14000038 },
+ { 0x218001c0, 0x1c000003 },
+ { 0x210001c4, 0x24000002 },
+ { 0x210001c5, 0x2000ffff },
+ { 0x210001c6, 0x1400fffe },
+ { 0x210001c7, 0x24000002 },
+ { 0x210001c8, 0x2000ffff },
+ { 0x210001c9, 0x1400fffe },
+ { 0x210001ca, 0x24000002 },
+ { 0x210001cb, 0x2000ffff },
+ { 0x210001cc, 0x1400fffe },
+ { 0x210001cd, 0x24000001 },
+ { 0x210001ce, 0x1400ffff },
+ { 0x210001cf, 0x24000001 },
+ { 0x210001d0, 0x1400ffff },
+ { 0x210001d1, 0x24000001 },
+ { 0x210001d2, 0x1400ffff },
+ { 0x210001d3, 0x24000001 },
+ { 0x210001d4, 0x1400ffff },
+ { 0x210001d5, 0x24000001 },
+ { 0x210001d6, 0x1400ffff },
+ { 0x210001d7, 0x24000001 },
+ { 0x210001d8, 0x1400ffff },
+ { 0x210001d9, 0x24000001 },
+ { 0x210001da, 0x1400ffff },
+ { 0x210001db, 0x24000001 },
+ { 0x210001dc, 0x1400ffff },
+ { 0x210001dd, 0x1400ffb1 },
+ { 0x210001de, 0x24000001 },
+ { 0x210001df, 0x1400ffff },
+ { 0x210001e0, 0x24000001 },
+ { 0x210001e1, 0x1400ffff },
+ { 0x210001e2, 0x24000001 },
+ { 0x210001e3, 0x1400ffff },
+ { 0x210001e4, 0x24000001 },
+ { 0x210001e5, 0x1400ffff },
+ { 0x210001e6, 0x24000001 },
+ { 0x210001e7, 0x1400ffff },
+ { 0x210001e8, 0x24000001 },
+ { 0x210001e9, 0x1400ffff },
+ { 0x210001ea, 0x24000001 },
+ { 0x210001eb, 0x1400ffff },
+ { 0x210001ec, 0x24000001 },
+ { 0x210001ed, 0x1400ffff },
+ { 0x210001ee, 0x24000001 },
+ { 0x210001ef, 0x1400ffff },
+ { 0x210001f0, 0x14000000 },
+ { 0x210001f1, 0x24000002 },
+ { 0x210001f2, 0x2000ffff },
+ { 0x210001f3, 0x1400fffe },
+ { 0x210001f4, 0x24000001 },
+ { 0x210001f5, 0x1400ffff },
+ { 0x210001f6, 0x2400ff9f },
+ { 0x210001f7, 0x2400ffc8 },
+ { 0x210001f8, 0x24000001 },
+ { 0x210001f9, 0x1400ffff },
+ { 0x210001fa, 0x24000001 },
+ { 0x210001fb, 0x1400ffff },
+ { 0x210001fc, 0x24000001 },
+ { 0x210001fd, 0x1400ffff },
+ { 0x210001fe, 0x24000001 },
+ { 0x210001ff, 0x1400ffff },
+ { 0x21000200, 0x24000001 },
+ { 0x21000201, 0x1400ffff },
+ { 0x21000202, 0x24000001 },
+ { 0x21000203, 0x1400ffff },
+ { 0x21000204, 0x24000001 },
+ { 0x21000205, 0x1400ffff },
+ { 0x21000206, 0x24000001 },
+ { 0x21000207, 0x1400ffff },
+ { 0x21000208, 0x24000001 },
+ { 0x21000209, 0x1400ffff },
+ { 0x2100020a, 0x24000001 },
+ { 0x2100020b, 0x1400ffff },
+ { 0x2100020c, 0x24000001 },
+ { 0x2100020d, 0x1400ffff },
+ { 0x2100020e, 0x24000001 },
+ { 0x2100020f, 0x1400ffff },
+ { 0x21000210, 0x24000001 },
+ { 0x21000211, 0x1400ffff },
+ { 0x21000212, 0x24000001 },
+ { 0x21000213, 0x1400ffff },
+ { 0x21000214, 0x24000001 },
+ { 0x21000215, 0x1400ffff },
+ { 0x21000216, 0x24000001 },
+ { 0x21000217, 0x1400ffff },
+ { 0x21000218, 0x24000001 },
+ { 0x21000219, 0x1400ffff },
+ { 0x2100021a, 0x24000001 },
+ { 0x2100021b, 0x1400ffff },
+ { 0x2100021c, 0x24000001 },
+ { 0x2100021d, 0x1400ffff },
+ { 0x2100021e, 0x24000001 },
+ { 0x2100021f, 0x1400ffff },
+ { 0x21000220, 0x2400ff7e },
+ { 0x21000221, 0x14000000 },
+ { 0x21000222, 0x24000001 },
+ { 0x21000223, 0x1400ffff },
+ { 0x21000224, 0x24000001 },
+ { 0x21000225, 0x1400ffff },
+ { 0x21000226, 0x24000001 },
+ { 0x21000227, 0x1400ffff },
+ { 0x21000228, 0x24000001 },
+ { 0x21000229, 0x1400ffff },
+ { 0x2100022a, 0x24000001 },
+ { 0x2100022b, 0x1400ffff },
+ { 0x2100022c, 0x24000001 },
+ { 0x2100022d, 0x1400ffff },
+ { 0x2100022e, 0x24000001 },
+ { 0x2100022f, 0x1400ffff },
+ { 0x21000230, 0x24000001 },
+ { 0x21000231, 0x1400ffff },
+ { 0x21000232, 0x24000001 },
+ { 0x21000233, 0x1400ffff },
+ { 0x21800234, 0x14000005 },
+ { 0x2100023a, 0x24002a2b },
+ { 0x2100023b, 0x24000001 },
+ { 0x2100023c, 0x1400ffff },
+ { 0x2100023d, 0x2400ff5d },
+ { 0x2100023e, 0x24002a28 },
+ { 0x2180023f, 0x14000001 },
+ { 0x21000241, 0x24000001 },
+ { 0x21000242, 0x1400ffff },
+ { 0x21000243, 0x2400ff3d },
+ { 0x21000244, 0x24000045 },
+ { 0x21000245, 0x24000047 },
+ { 0x21000246, 0x24000001 },
+ { 0x21000247, 0x1400ffff },
+ { 0x21000248, 0x24000001 },
+ { 0x21000249, 0x1400ffff },
+ { 0x2100024a, 0x24000001 },
+ { 0x2100024b, 0x1400ffff },
+ { 0x2100024c, 0x24000001 },
+ { 0x2100024d, 0x1400ffff },
+ { 0x2100024e, 0x24000001 },
+ { 0x2100024f, 0x1400ffff },
+ { 0x21800250, 0x14000002 },
+ { 0x21000253, 0x1400ff2e },
+ { 0x21000254, 0x1400ff32 },
+ { 0x21000255, 0x14000000 },
+ { 0x21000256, 0x1400ff33 },
+ { 0x21000257, 0x1400ff33 },
+ { 0x21000258, 0x14000000 },
+ { 0x21000259, 0x1400ff36 },
+ { 0x2100025a, 0x14000000 },
+ { 0x2100025b, 0x1400ff35 },
+ { 0x2180025c, 0x14000003 },
+ { 0x21000260, 0x1400ff33 },
+ { 0x21800261, 0x14000001 },
+ { 0x21000263, 0x1400ff31 },
+ { 0x21800264, 0x14000003 },
+ { 0x21000268, 0x1400ff2f },
+ { 0x21000269, 0x1400ff2d },
+ { 0x2100026a, 0x14000000 },
+ { 0x2100026b, 0x140029f7 },
+ { 0x2180026c, 0x14000002 },
+ { 0x2100026f, 0x1400ff2d },
+ { 0x21800270, 0x14000001 },
+ { 0x21000272, 0x1400ff2b },
+ { 0x21800273, 0x14000001 },
+ { 0x21000275, 0x1400ff2a },
+ { 0x21800276, 0x14000006 },
+ { 0x2100027d, 0x140029e7 },
+ { 0x2180027e, 0x14000001 },
+ { 0x21000280, 0x1400ff26 },
+ { 0x21800281, 0x14000001 },
+ { 0x21000283, 0x1400ff26 },
+ { 0x21800284, 0x14000003 },
+ { 0x21000288, 0x1400ff26 },
+ { 0x21000289, 0x1400ffbb },
+ { 0x2100028a, 0x1400ff27 },
+ { 0x2100028b, 0x1400ff27 },
+ { 0x2100028c, 0x1400ffb9 },
+ { 0x2180028d, 0x14000004 },
+ { 0x21000292, 0x1400ff25 },
+ { 0x21000293, 0x14000000 },
+ { 0x21000294, 0x1c000000 },
+ { 0x21800295, 0x1400001a },
+ { 0x218002b0, 0x18000011 },
+ { 0x098002c2, 0x60000003 },
+ { 0x098002c6, 0x1800000b },
+ { 0x098002d2, 0x6000000d },
+ { 0x218002e0, 0x18000004 },
+ { 0x098002e5, 0x60000008 },
+ { 0x090002ee, 0x18000000 },
+ { 0x098002ef, 0x60000010 },
+ { 0x1b800300, 0x30000044 },
+ { 0x1b000345, 0x30000054 },
+ { 0x1b800346, 0x30000029 },
+ { 0x13800374, 0x60000001 },
+ { 0x1300037a, 0x18000000 },
+ { 0x1300037b, 0x14000082 },
+ { 0x1300037c, 0x14000082 },
+ { 0x1300037d, 0x14000082 },
+ { 0x0900037e, 0x54000000 },
+ { 0x13800384, 0x60000001 },
+ { 0x13000386, 0x24000026 },
+ { 0x09000387, 0x54000000 },
+ { 0x13000388, 0x24000025 },
+ { 0x13000389, 0x24000025 },
+ { 0x1300038a, 0x24000025 },
+ { 0x1300038c, 0x24000040 },
+ { 0x1300038e, 0x2400003f },
+ { 0x1300038f, 0x2400003f },
+ { 0x13000390, 0x14000000 },
+ { 0x13000391, 0x24000020 },
+ { 0x13000392, 0x24000020 },
+ { 0x13000393, 0x24000020 },
+ { 0x13000394, 0x24000020 },
+ { 0x13000395, 0x24000020 },
+ { 0x13000396, 0x24000020 },
+ { 0x13000397, 0x24000020 },
+ { 0x13000398, 0x24000020 },
+ { 0x13000399, 0x24000020 },
+ { 0x1300039a, 0x24000020 },
+ { 0x1300039b, 0x24000020 },
+ { 0x1300039c, 0x24000020 },
+ { 0x1300039d, 0x24000020 },
+ { 0x1300039e, 0x24000020 },
+ { 0x1300039f, 0x24000020 },
+ { 0x130003a0, 0x24000020 },
+ { 0x130003a1, 0x24000020 },
+ { 0x130003a3, 0x24000020 },
+ { 0x130003a4, 0x24000020 },
+ { 0x130003a5, 0x24000020 },
+ { 0x130003a6, 0x24000020 },
+ { 0x130003a7, 0x24000020 },
+ { 0x130003a8, 0x24000020 },
+ { 0x130003a9, 0x24000020 },
+ { 0x130003aa, 0x24000020 },
+ { 0x130003ab, 0x24000020 },
+ { 0x130003ac, 0x1400ffda },
+ { 0x130003ad, 0x1400ffdb },
+ { 0x130003ae, 0x1400ffdb },
+ { 0x130003af, 0x1400ffdb },
+ { 0x130003b0, 0x14000000 },
+ { 0x130003b1, 0x1400ffe0 },
+ { 0x130003b2, 0x1400ffe0 },
+ { 0x130003b3, 0x1400ffe0 },
+ { 0x130003b4, 0x1400ffe0 },
+ { 0x130003b5, 0x1400ffe0 },
+ { 0x130003b6, 0x1400ffe0 },
+ { 0x130003b7, 0x1400ffe0 },
+ { 0x130003b8, 0x1400ffe0 },
+ { 0x130003b9, 0x1400ffe0 },
+ { 0x130003ba, 0x1400ffe0 },
+ { 0x130003bb, 0x1400ffe0 },
+ { 0x130003bc, 0x1400ffe0 },
+ { 0x130003bd, 0x1400ffe0 },
+ { 0x130003be, 0x1400ffe0 },
+ { 0x130003bf, 0x1400ffe0 },
+ { 0x130003c0, 0x1400ffe0 },
+ { 0x130003c1, 0x1400ffe0 },
+ { 0x130003c2, 0x1400ffe1 },
+ { 0x130003c3, 0x1400ffe0 },
+ { 0x130003c4, 0x1400ffe0 },
+ { 0x130003c5, 0x1400ffe0 },
+ { 0x130003c6, 0x1400ffe0 },
+ { 0x130003c7, 0x1400ffe0 },
+ { 0x130003c8, 0x1400ffe0 },
+ { 0x130003c9, 0x1400ffe0 },
+ { 0x130003ca, 0x1400ffe0 },
+ { 0x130003cb, 0x1400ffe0 },
+ { 0x130003cc, 0x1400ffc0 },
+ { 0x130003cd, 0x1400ffc1 },
+ { 0x130003ce, 0x1400ffc1 },
+ { 0x130003d0, 0x1400ffc2 },
+ { 0x130003d1, 0x1400ffc7 },
+ { 0x138003d2, 0x24000002 },
+ { 0x130003d5, 0x1400ffd1 },
+ { 0x130003d6, 0x1400ffca },
+ { 0x130003d7, 0x14000000 },
+ { 0x130003d8, 0x24000001 },
+ { 0x130003d9, 0x1400ffff },
+ { 0x130003da, 0x24000001 },
+ { 0x130003db, 0x1400ffff },
+ { 0x130003dc, 0x24000001 },
+ { 0x130003dd, 0x1400ffff },
+ { 0x130003de, 0x24000001 },
+ { 0x130003df, 0x1400ffff },
+ { 0x130003e0, 0x24000001 },
+ { 0x130003e1, 0x1400ffff },
+ { 0x0a0003e2, 0x24000001 },
+ { 0x0a0003e3, 0x1400ffff },
+ { 0x0a0003e4, 0x24000001 },
+ { 0x0a0003e5, 0x1400ffff },
+ { 0x0a0003e6, 0x24000001 },
+ { 0x0a0003e7, 0x1400ffff },
+ { 0x0a0003e8, 0x24000001 },
+ { 0x0a0003e9, 0x1400ffff },
+ { 0x0a0003ea, 0x24000001 },
+ { 0x0a0003eb, 0x1400ffff },
+ { 0x0a0003ec, 0x24000001 },
+ { 0x0a0003ed, 0x1400ffff },
+ { 0x0a0003ee, 0x24000001 },
+ { 0x0a0003ef, 0x1400ffff },
+ { 0x130003f0, 0x1400ffaa },
+ { 0x130003f1, 0x1400ffb0 },
+ { 0x130003f2, 0x14000007 },
+ { 0x130003f3, 0x14000000 },
+ { 0x130003f4, 0x2400ffc4 },
+ { 0x130003f5, 0x1400ffa0 },
+ { 0x130003f6, 0x64000000 },
+ { 0x130003f7, 0x24000001 },
+ { 0x130003f8, 0x1400ffff },
+ { 0x130003f9, 0x2400fff9 },
+ { 0x130003fa, 0x24000001 },
+ { 0x130003fb, 0x1400ffff },
+ { 0x130003fc, 0x14000000 },
+ { 0x130003fd, 0x2400ff7e },
+ { 0x130003fe, 0x2400ff7e },
+ { 0x130003ff, 0x2400ff7e },
+ { 0x0c000400, 0x24000050 },
+ { 0x0c000401, 0x24000050 },
+ { 0x0c000402, 0x24000050 },
+ { 0x0c000403, 0x24000050 },
+ { 0x0c000404, 0x24000050 },
+ { 0x0c000405, 0x24000050 },
+ { 0x0c000406, 0x24000050 },
+ { 0x0c000407, 0x24000050 },
+ { 0x0c000408, 0x24000050 },
+ { 0x0c000409, 0x24000050 },
+ { 0x0c00040a, 0x24000050 },
+ { 0x0c00040b, 0x24000050 },
+ { 0x0c00040c, 0x24000050 },
+ { 0x0c00040d, 0x24000050 },
+ { 0x0c00040e, 0x24000050 },
+ { 0x0c00040f, 0x24000050 },
+ { 0x0c000410, 0x24000020 },
+ { 0x0c000411, 0x24000020 },
+ { 0x0c000412, 0x24000020 },
+ { 0x0c000413, 0x24000020 },
+ { 0x0c000414, 0x24000020 },
+ { 0x0c000415, 0x24000020 },
+ { 0x0c000416, 0x24000020 },
+ { 0x0c000417, 0x24000020 },
+ { 0x0c000418, 0x24000020 },
+ { 0x0c000419, 0x24000020 },
+ { 0x0c00041a, 0x24000020 },
+ { 0x0c00041b, 0x24000020 },
+ { 0x0c00041c, 0x24000020 },
+ { 0x0c00041d, 0x24000020 },
+ { 0x0c00041e, 0x24000020 },
+ { 0x0c00041f, 0x24000020 },
+ { 0x0c000420, 0x24000020 },
+ { 0x0c000421, 0x24000020 },
+ { 0x0c000422, 0x24000020 },
+ { 0x0c000423, 0x24000020 },
+ { 0x0c000424, 0x24000020 },
+ { 0x0c000425, 0x24000020 },
+ { 0x0c000426, 0x24000020 },
+ { 0x0c000427, 0x24000020 },
+ { 0x0c000428, 0x24000020 },
+ { 0x0c000429, 0x24000020 },
+ { 0x0c00042a, 0x24000020 },
+ { 0x0c00042b, 0x24000020 },
+ { 0x0c00042c, 0x24000020 },
+ { 0x0c00042d, 0x24000020 },
+ { 0x0c00042e, 0x24000020 },
+ { 0x0c00042f, 0x24000020 },
+ { 0x0c000430, 0x1400ffe0 },
+ { 0x0c000431, 0x1400ffe0 },
+ { 0x0c000432, 0x1400ffe0 },
+ { 0x0c000433, 0x1400ffe0 },
+ { 0x0c000434, 0x1400ffe0 },
+ { 0x0c000435, 0x1400ffe0 },
+ { 0x0c000436, 0x1400ffe0 },
+ { 0x0c000437, 0x1400ffe0 },
+ { 0x0c000438, 0x1400ffe0 },
+ { 0x0c000439, 0x1400ffe0 },
+ { 0x0c00043a, 0x1400ffe0 },
+ { 0x0c00043b, 0x1400ffe0 },
+ { 0x0c00043c, 0x1400ffe0 },
+ { 0x0c00043d, 0x1400ffe0 },
+ { 0x0c00043e, 0x1400ffe0 },
+ { 0x0c00043f, 0x1400ffe0 },
+ { 0x0c000440, 0x1400ffe0 },
+ { 0x0c000441, 0x1400ffe0 },
+ { 0x0c000442, 0x1400ffe0 },
+ { 0x0c000443, 0x1400ffe0 },
+ { 0x0c000444, 0x1400ffe0 },
+ { 0x0c000445, 0x1400ffe0 },
+ { 0x0c000446, 0x1400ffe0 },
+ { 0x0c000447, 0x1400ffe0 },
+ { 0x0c000448, 0x1400ffe0 },
+ { 0x0c000449, 0x1400ffe0 },
+ { 0x0c00044a, 0x1400ffe0 },
+ { 0x0c00044b, 0x1400ffe0 },
+ { 0x0c00044c, 0x1400ffe0 },
+ { 0x0c00044d, 0x1400ffe0 },
+ { 0x0c00044e, 0x1400ffe0 },
+ { 0x0c00044f, 0x1400ffe0 },
+ { 0x0c000450, 0x1400ffb0 },
+ { 0x0c000451, 0x1400ffb0 },
+ { 0x0c000452, 0x1400ffb0 },
+ { 0x0c000453, 0x1400ffb0 },
+ { 0x0c000454, 0x1400ffb0 },
+ { 0x0c000455, 0x1400ffb0 },
+ { 0x0c000456, 0x1400ffb0 },
+ { 0x0c000457, 0x1400ffb0 },
+ { 0x0c000458, 0x1400ffb0 },
+ { 0x0c000459, 0x1400ffb0 },
+ { 0x0c00045a, 0x1400ffb0 },
+ { 0x0c00045b, 0x1400ffb0 },
+ { 0x0c00045c, 0x1400ffb0 },
+ { 0x0c00045d, 0x1400ffb0 },
+ { 0x0c00045e, 0x1400ffb0 },
+ { 0x0c00045f, 0x1400ffb0 },
+ { 0x0c000460, 0x24000001 },
+ { 0x0c000461, 0x1400ffff },
+ { 0x0c000462, 0x24000001 },
+ { 0x0c000463, 0x1400ffff },
+ { 0x0c000464, 0x24000001 },
+ { 0x0c000465, 0x1400ffff },
+ { 0x0c000466, 0x24000001 },
+ { 0x0c000467, 0x1400ffff },
+ { 0x0c000468, 0x24000001 },
+ { 0x0c000469, 0x1400ffff },
+ { 0x0c00046a, 0x24000001 },
+ { 0x0c00046b, 0x1400ffff },
+ { 0x0c00046c, 0x24000001 },
+ { 0x0c00046d, 0x1400ffff },
+ { 0x0c00046e, 0x24000001 },
+ { 0x0c00046f, 0x1400ffff },
+ { 0x0c000470, 0x24000001 },
+ { 0x0c000471, 0x1400ffff },
+ { 0x0c000472, 0x24000001 },
+ { 0x0c000473, 0x1400ffff },
+ { 0x0c000474, 0x24000001 },
+ { 0x0c000475, 0x1400ffff },
+ { 0x0c000476, 0x24000001 },
+ { 0x0c000477, 0x1400ffff },
+ { 0x0c000478, 0x24000001 },
+ { 0x0c000479, 0x1400ffff },
+ { 0x0c00047a, 0x24000001 },
+ { 0x0c00047b, 0x1400ffff },
+ { 0x0c00047c, 0x24000001 },
+ { 0x0c00047d, 0x1400ffff },
+ { 0x0c00047e, 0x24000001 },
+ { 0x0c00047f, 0x1400ffff },
+ { 0x0c000480, 0x24000001 },
+ { 0x0c000481, 0x1400ffff },
+ { 0x0c000482, 0x68000000 },
+ { 0x0c800483, 0x30000003 },
+ { 0x0c800488, 0x2c000001 },
+ { 0x0c00048a, 0x24000001 },
+ { 0x0c00048b, 0x1400ffff },
+ { 0x0c00048c, 0x24000001 },
+ { 0x0c00048d, 0x1400ffff },
+ { 0x0c00048e, 0x24000001 },
+ { 0x0c00048f, 0x1400ffff },
+ { 0x0c000490, 0x24000001 },
+ { 0x0c000491, 0x1400ffff },
+ { 0x0c000492, 0x24000001 },
+ { 0x0c000493, 0x1400ffff },
+ { 0x0c000494, 0x24000001 },
+ { 0x0c000495, 0x1400ffff },
+ { 0x0c000496, 0x24000001 },
+ { 0x0c000497, 0x1400ffff },
+ { 0x0c000498, 0x24000001 },
+ { 0x0c000499, 0x1400ffff },
+ { 0x0c00049a, 0x24000001 },
+ { 0x0c00049b, 0x1400ffff },
+ { 0x0c00049c, 0x24000001 },
+ { 0x0c00049d, 0x1400ffff },
+ { 0x0c00049e, 0x24000001 },
+ { 0x0c00049f, 0x1400ffff },
+ { 0x0c0004a0, 0x24000001 },
+ { 0x0c0004a1, 0x1400ffff },
+ { 0x0c0004a2, 0x24000001 },
+ { 0x0c0004a3, 0x1400ffff },
+ { 0x0c0004a4, 0x24000001 },
+ { 0x0c0004a5, 0x1400ffff },
+ { 0x0c0004a6, 0x24000001 },
+ { 0x0c0004a7, 0x1400ffff },
+ { 0x0c0004a8, 0x24000001 },
+ { 0x0c0004a9, 0x1400ffff },
+ { 0x0c0004aa, 0x24000001 },
+ { 0x0c0004ab, 0x1400ffff },
+ { 0x0c0004ac, 0x24000001 },
+ { 0x0c0004ad, 0x1400ffff },
+ { 0x0c0004ae, 0x24000001 },
+ { 0x0c0004af, 0x1400ffff },
+ { 0x0c0004b0, 0x24000001 },
+ { 0x0c0004b1, 0x1400ffff },
+ { 0x0c0004b2, 0x24000001 },
+ { 0x0c0004b3, 0x1400ffff },
+ { 0x0c0004b4, 0x24000001 },
+ { 0x0c0004b5, 0x1400ffff },
+ { 0x0c0004b6, 0x24000001 },
+ { 0x0c0004b7, 0x1400ffff },
+ { 0x0c0004b8, 0x24000001 },
+ { 0x0c0004b9, 0x1400ffff },
+ { 0x0c0004ba, 0x24000001 },
+ { 0x0c0004bb, 0x1400ffff },
+ { 0x0c0004bc, 0x24000001 },
+ { 0x0c0004bd, 0x1400ffff },
+ { 0x0c0004be, 0x24000001 },
+ { 0x0c0004bf, 0x1400ffff },
+ { 0x0c0004c0, 0x2400000f },
+ { 0x0c0004c1, 0x24000001 },
+ { 0x0c0004c2, 0x1400ffff },
+ { 0x0c0004c3, 0x24000001 },
+ { 0x0c0004c4, 0x1400ffff },
+ { 0x0c0004c5, 0x24000001 },
+ { 0x0c0004c6, 0x1400ffff },
+ { 0x0c0004c7, 0x24000001 },
+ { 0x0c0004c8, 0x1400ffff },
+ { 0x0c0004c9, 0x24000001 },
+ { 0x0c0004ca, 0x1400ffff },
+ { 0x0c0004cb, 0x24000001 },
+ { 0x0c0004cc, 0x1400ffff },
+ { 0x0c0004cd, 0x24000001 },
+ { 0x0c0004ce, 0x1400ffff },
+ { 0x0c0004cf, 0x1400fff1 },
+ { 0x0c0004d0, 0x24000001 },
+ { 0x0c0004d1, 0x1400ffff },
+ { 0x0c0004d2, 0x24000001 },
+ { 0x0c0004d3, 0x1400ffff },
+ { 0x0c0004d4, 0x24000001 },
+ { 0x0c0004d5, 0x1400ffff },
+ { 0x0c0004d6, 0x24000001 },
+ { 0x0c0004d7, 0x1400ffff },
+ { 0x0c0004d8, 0x24000001 },
+ { 0x0c0004d9, 0x1400ffff },
+ { 0x0c0004da, 0x24000001 },
+ { 0x0c0004db, 0x1400ffff },
+ { 0x0c0004dc, 0x24000001 },
+ { 0x0c0004dd, 0x1400ffff },
+ { 0x0c0004de, 0x24000001 },
+ { 0x0c0004df, 0x1400ffff },
+ { 0x0c0004e0, 0x24000001 },
+ { 0x0c0004e1, 0x1400ffff },
+ { 0x0c0004e2, 0x24000001 },
+ { 0x0c0004e3, 0x1400ffff },
+ { 0x0c0004e4, 0x24000001 },
+ { 0x0c0004e5, 0x1400ffff },
+ { 0x0c0004e6, 0x24000001 },
+ { 0x0c0004e7, 0x1400ffff },
+ { 0x0c0004e8, 0x24000001 },
+ { 0x0c0004e9, 0x1400ffff },
+ { 0x0c0004ea, 0x24000001 },
+ { 0x0c0004eb, 0x1400ffff },
+ { 0x0c0004ec, 0x24000001 },
+ { 0x0c0004ed, 0x1400ffff },
+ { 0x0c0004ee, 0x24000001 },
+ { 0x0c0004ef, 0x1400ffff },
+ { 0x0c0004f0, 0x24000001 },
+ { 0x0c0004f1, 0x1400ffff },
+ { 0x0c0004f2, 0x24000001 },
+ { 0x0c0004f3, 0x1400ffff },
+ { 0x0c0004f4, 0x24000001 },
+ { 0x0c0004f5, 0x1400ffff },
+ { 0x0c0004f6, 0x24000001 },
+ { 0x0c0004f7, 0x1400ffff },
+ { 0x0c0004f8, 0x24000001 },
+ { 0x0c0004f9, 0x1400ffff },
+ { 0x0c0004fa, 0x24000001 },
+ { 0x0c0004fb, 0x1400ffff },
+ { 0x0c0004fc, 0x24000001 },
+ { 0x0c0004fd, 0x1400ffff },
+ { 0x0c0004fe, 0x24000001 },
+ { 0x0c0004ff, 0x1400ffff },
+ { 0x0c000500, 0x24000001 },
+ { 0x0c000501, 0x1400ffff },
+ { 0x0c000502, 0x24000001 },
+ { 0x0c000503, 0x1400ffff },
+ { 0x0c000504, 0x24000001 },
+ { 0x0c000505, 0x1400ffff },
+ { 0x0c000506, 0x24000001 },
+ { 0x0c000507, 0x1400ffff },
+ { 0x0c000508, 0x24000001 },
+ { 0x0c000509, 0x1400ffff },
+ { 0x0c00050a, 0x24000001 },
+ { 0x0c00050b, 0x1400ffff },
+ { 0x0c00050c, 0x24000001 },
+ { 0x0c00050d, 0x1400ffff },
+ { 0x0c00050e, 0x24000001 },
+ { 0x0c00050f, 0x1400ffff },
+ { 0x0c000510, 0x24000001 },
+ { 0x0c000511, 0x1400ffff },
+ { 0x0c000512, 0x24000001 },
+ { 0x0c000513, 0x1400ffff },
+ { 0x01000531, 0x24000030 },
+ { 0x01000532, 0x24000030 },
+ { 0x01000533, 0x24000030 },
+ { 0x01000534, 0x24000030 },
+ { 0x01000535, 0x24000030 },
+ { 0x01000536, 0x24000030 },
+ { 0x01000537, 0x24000030 },
+ { 0x01000538, 0x24000030 },
+ { 0x01000539, 0x24000030 },
+ { 0x0100053a, 0x24000030 },
+ { 0x0100053b, 0x24000030 },
+ { 0x0100053c, 0x24000030 },
+ { 0x0100053d, 0x24000030 },
+ { 0x0100053e, 0x24000030 },
+ { 0x0100053f, 0x24000030 },
+ { 0x01000540, 0x24000030 },
+ { 0x01000541, 0x24000030 },
+ { 0x01000542, 0x24000030 },
+ { 0x01000543, 0x24000030 },
+ { 0x01000544, 0x24000030 },
+ { 0x01000545, 0x24000030 },
+ { 0x01000546, 0x24000030 },
+ { 0x01000547, 0x24000030 },
+ { 0x01000548, 0x24000030 },
+ { 0x01000549, 0x24000030 },
+ { 0x0100054a, 0x24000030 },
+ { 0x0100054b, 0x24000030 },
+ { 0x0100054c, 0x24000030 },
+ { 0x0100054d, 0x24000030 },
+ { 0x0100054e, 0x24000030 },
+ { 0x0100054f, 0x24000030 },
+ { 0x01000550, 0x24000030 },
+ { 0x01000551, 0x24000030 },
+ { 0x01000552, 0x24000030 },
+ { 0x01000553, 0x24000030 },
+ { 0x01000554, 0x24000030 },
+ { 0x01000555, 0x24000030 },
+ { 0x01000556, 0x24000030 },
+ { 0x01000559, 0x18000000 },
+ { 0x0180055a, 0x54000005 },
+ { 0x01000561, 0x1400ffd0 },
+ { 0x01000562, 0x1400ffd0 },
+ { 0x01000563, 0x1400ffd0 },
+ { 0x01000564, 0x1400ffd0 },
+ { 0x01000565, 0x1400ffd0 },
+ { 0x01000566, 0x1400ffd0 },
+ { 0x01000567, 0x1400ffd0 },
+ { 0x01000568, 0x1400ffd0 },
+ { 0x01000569, 0x1400ffd0 },
+ { 0x0100056a, 0x1400ffd0 },
+ { 0x0100056b, 0x1400ffd0 },
+ { 0x0100056c, 0x1400ffd0 },
+ { 0x0100056d, 0x1400ffd0 },
+ { 0x0100056e, 0x1400ffd0 },
+ { 0x0100056f, 0x1400ffd0 },
+ { 0x01000570, 0x1400ffd0 },
+ { 0x01000571, 0x1400ffd0 },
+ { 0x01000572, 0x1400ffd0 },
+ { 0x01000573, 0x1400ffd0 },
+ { 0x01000574, 0x1400ffd0 },
+ { 0x01000575, 0x1400ffd0 },
+ { 0x01000576, 0x1400ffd0 },
+ { 0x01000577, 0x1400ffd0 },
+ { 0x01000578, 0x1400ffd0 },
+ { 0x01000579, 0x1400ffd0 },
+ { 0x0100057a, 0x1400ffd0 },
+ { 0x0100057b, 0x1400ffd0 },
+ { 0x0100057c, 0x1400ffd0 },
+ { 0x0100057d, 0x1400ffd0 },
+ { 0x0100057e, 0x1400ffd0 },
+ { 0x0100057f, 0x1400ffd0 },
+ { 0x01000580, 0x1400ffd0 },
+ { 0x01000581, 0x1400ffd0 },
+ { 0x01000582, 0x1400ffd0 },
+ { 0x01000583, 0x1400ffd0 },
+ { 0x01000584, 0x1400ffd0 },
+ { 0x01000585, 0x1400ffd0 },
+ { 0x01000586, 0x1400ffd0 },
+ { 0x01000587, 0x14000000 },
+ { 0x09000589, 0x54000000 },
+ { 0x0100058a, 0x44000000 },
+ { 0x19800591, 0x3000002c },
+ { 0x190005be, 0x54000000 },
+ { 0x190005bf, 0x30000000 },
+ { 0x190005c0, 0x54000000 },
+ { 0x198005c1, 0x30000001 },
+ { 0x190005c3, 0x54000000 },
+ { 0x198005c4, 0x30000001 },
+ { 0x190005c6, 0x54000000 },
+ { 0x190005c7, 0x30000000 },
+ { 0x198005d0, 0x1c00001a },
+ { 0x198005f0, 0x1c000002 },
+ { 0x198005f3, 0x54000001 },
+ { 0x09800600, 0x04000003 },
+ { 0x0000060b, 0x5c000000 },
+ { 0x0980060c, 0x54000001 },
+ { 0x0080060e, 0x68000001 },
+ { 0x00800610, 0x30000005 },
+ { 0x0900061b, 0x54000000 },
+ { 0x0080061e, 0x54000001 },
+ { 0x00800621, 0x1c000019 },
+ { 0x09000640, 0x18000000 },
+ { 0x00800641, 0x1c000009 },
+ { 0x1b80064b, 0x30000013 },
+ { 0x09800660, 0x34000009 },
+ { 0x0080066a, 0x54000003 },
+ { 0x0080066e, 0x1c000001 },
+ { 0x1b000670, 0x30000000 },
+ { 0x00800671, 0x1c000062 },
+ { 0x000006d4, 0x54000000 },
+ { 0x000006d5, 0x1c000000 },
+ { 0x008006d6, 0x30000006 },
+ { 0x090006dd, 0x04000000 },
+ { 0x000006de, 0x2c000000 },
+ { 0x008006df, 0x30000005 },
+ { 0x008006e5, 0x18000001 },
+ { 0x008006e7, 0x30000001 },
+ { 0x000006e9, 0x68000000 },
+ { 0x008006ea, 0x30000003 },
+ { 0x008006ee, 0x1c000001 },
+ { 0x008006f0, 0x34000009 },
+ { 0x008006fa, 0x1c000002 },
+ { 0x008006fd, 0x68000001 },
+ { 0x000006ff, 0x1c000000 },
+ { 0x31800700, 0x5400000d },
+ { 0x3100070f, 0x04000000 },
+ { 0x31000710, 0x1c000000 },
+ { 0x31000711, 0x30000000 },
+ { 0x31800712, 0x1c00001d },
+ { 0x31800730, 0x3000001a },
+ { 0x3180074d, 0x1c000020 },
+ { 0x37800780, 0x1c000025 },
+ { 0x378007a6, 0x3000000a },
+ { 0x370007b1, 0x1c000000 },
+ { 0x3f8007c0, 0x34000009 },
+ { 0x3f8007ca, 0x1c000020 },
+ { 0x3f8007eb, 0x30000008 },
+ { 0x3f8007f4, 0x18000001 },
+ { 0x3f0007f6, 0x68000000 },
+ { 0x3f8007f7, 0x54000002 },
+ { 0x3f0007fa, 0x18000000 },
+ { 0x0e800901, 0x30000001 },
+ { 0x0e000903, 0x28000000 },
+ { 0x0e800904, 0x1c000035 },
+ { 0x0e00093c, 0x30000000 },
+ { 0x0e00093d, 0x1c000000 },
+ { 0x0e80093e, 0x28000002 },
+ { 0x0e800941, 0x30000007 },
+ { 0x0e800949, 0x28000003 },
+ { 0x0e00094d, 0x30000000 },
+ { 0x0e000950, 0x1c000000 },
+ { 0x0e800951, 0x30000003 },
+ { 0x0e800958, 0x1c000009 },
+ { 0x0e800962, 0x30000001 },
+ { 0x09800964, 0x54000001 },
+ { 0x0e800966, 0x34000009 },
+ { 0x09000970, 0x54000000 },
+ { 0x0e80097b, 0x1c000004 },
+ { 0x02000981, 0x30000000 },
+ { 0x02800982, 0x28000001 },
+ { 0x02800985, 0x1c000007 },
+ { 0x0280098f, 0x1c000001 },
+ { 0x02800993, 0x1c000015 },
+ { 0x028009aa, 0x1c000006 },
+ { 0x020009b2, 0x1c000000 },
+ { 0x028009b6, 0x1c000003 },
+ { 0x020009bc, 0x30000000 },
+ { 0x020009bd, 0x1c000000 },
+ { 0x028009be, 0x28000002 },
+ { 0x028009c1, 0x30000003 },
+ { 0x028009c7, 0x28000001 },
+ { 0x028009cb, 0x28000001 },
+ { 0x020009cd, 0x30000000 },
+ { 0x020009ce, 0x1c000000 },
+ { 0x020009d7, 0x28000000 },
+ { 0x028009dc, 0x1c000001 },
+ { 0x028009df, 0x1c000002 },
+ { 0x028009e2, 0x30000001 },
+ { 0x028009e6, 0x34000009 },
+ { 0x028009f0, 0x1c000001 },
+ { 0x028009f2, 0x5c000001 },
+ { 0x028009f4, 0x3c000005 },
+ { 0x020009fa, 0x68000000 },
+ { 0x15800a01, 0x30000001 },
+ { 0x15000a03, 0x28000000 },
+ { 0x15800a05, 0x1c000005 },
+ { 0x15800a0f, 0x1c000001 },
+ { 0x15800a13, 0x1c000015 },
+ { 0x15800a2a, 0x1c000006 },
+ { 0x15800a32, 0x1c000001 },
+ { 0x15800a35, 0x1c000001 },
+ { 0x15800a38, 0x1c000001 },
+ { 0x15000a3c, 0x30000000 },
+ { 0x15800a3e, 0x28000002 },
+ { 0x15800a41, 0x30000001 },
+ { 0x15800a47, 0x30000001 },
+ { 0x15800a4b, 0x30000002 },
+ { 0x15800a59, 0x1c000003 },
+ { 0x15000a5e, 0x1c000000 },
+ { 0x15800a66, 0x34000009 },
+ { 0x15800a70, 0x30000001 },
+ { 0x15800a72, 0x1c000002 },
+ { 0x14800a81, 0x30000001 },
+ { 0x14000a83, 0x28000000 },
+ { 0x14800a85, 0x1c000008 },
+ { 0x14800a8f, 0x1c000002 },
+ { 0x14800a93, 0x1c000015 },
+ { 0x14800aaa, 0x1c000006 },
+ { 0x14800ab2, 0x1c000001 },
+ { 0x14800ab5, 0x1c000004 },
+ { 0x14000abc, 0x30000000 },
+ { 0x14000abd, 0x1c000000 },
+ { 0x14800abe, 0x28000002 },
+ { 0x14800ac1, 0x30000004 },
+ { 0x14800ac7, 0x30000001 },
+ { 0x14000ac9, 0x28000000 },
+ { 0x14800acb, 0x28000001 },
+ { 0x14000acd, 0x30000000 },
+ { 0x14000ad0, 0x1c000000 },
+ { 0x14800ae0, 0x1c000001 },
+ { 0x14800ae2, 0x30000001 },
+ { 0x14800ae6, 0x34000009 },
+ { 0x14000af1, 0x5c000000 },
+ { 0x2b000b01, 0x30000000 },
+ { 0x2b800b02, 0x28000001 },
+ { 0x2b800b05, 0x1c000007 },
+ { 0x2b800b0f, 0x1c000001 },
+ { 0x2b800b13, 0x1c000015 },
+ { 0x2b800b2a, 0x1c000006 },
+ { 0x2b800b32, 0x1c000001 },
+ { 0x2b800b35, 0x1c000004 },
+ { 0x2b000b3c, 0x30000000 },
+ { 0x2b000b3d, 0x1c000000 },
+ { 0x2b000b3e, 0x28000000 },
+ { 0x2b000b3f, 0x30000000 },
+ { 0x2b000b40, 0x28000000 },
+ { 0x2b800b41, 0x30000002 },
+ { 0x2b800b47, 0x28000001 },
+ { 0x2b800b4b, 0x28000001 },
+ { 0x2b000b4d, 0x30000000 },
+ { 0x2b000b56, 0x30000000 },
+ { 0x2b000b57, 0x28000000 },
+ { 0x2b800b5c, 0x1c000001 },
+ { 0x2b800b5f, 0x1c000002 },
+ { 0x2b800b66, 0x34000009 },
+ { 0x2b000b70, 0x68000000 },
+ { 0x2b000b71, 0x1c000000 },
+ { 0x35000b82, 0x30000000 },
+ { 0x35000b83, 0x1c000000 },
+ { 0x35800b85, 0x1c000005 },
+ { 0x35800b8e, 0x1c000002 },
+ { 0x35800b92, 0x1c000003 },
+ { 0x35800b99, 0x1c000001 },
+ { 0x35000b9c, 0x1c000000 },
+ { 0x35800b9e, 0x1c000001 },
+ { 0x35800ba3, 0x1c000001 },
+ { 0x35800ba8, 0x1c000002 },
+ { 0x35800bae, 0x1c00000b },
+ { 0x35800bbe, 0x28000001 },
+ { 0x35000bc0, 0x30000000 },
+ { 0x35800bc1, 0x28000001 },
+ { 0x35800bc6, 0x28000002 },
+ { 0x35800bca, 0x28000002 },
+ { 0x35000bcd, 0x30000000 },
+ { 0x35000bd7, 0x28000000 },
+ { 0x35800be6, 0x34000009 },
+ { 0x35800bf0, 0x3c000002 },
+ { 0x35800bf3, 0x68000005 },
+ { 0x35000bf9, 0x5c000000 },
+ { 0x35000bfa, 0x68000000 },
+ { 0x36800c01, 0x28000002 },
+ { 0x36800c05, 0x1c000007 },
+ { 0x36800c0e, 0x1c000002 },
+ { 0x36800c12, 0x1c000016 },
+ { 0x36800c2a, 0x1c000009 },
+ { 0x36800c35, 0x1c000004 },
+ { 0x36800c3e, 0x30000002 },
+ { 0x36800c41, 0x28000003 },
+ { 0x36800c46, 0x30000002 },
+ { 0x36800c4a, 0x30000003 },
+ { 0x36800c55, 0x30000001 },
+ { 0x36800c60, 0x1c000001 },
+ { 0x36800c66, 0x34000009 },
+ { 0x1c800c82, 0x28000001 },
+ { 0x1c800c85, 0x1c000007 },
+ { 0x1c800c8e, 0x1c000002 },
+ { 0x1c800c92, 0x1c000016 },
+ { 0x1c800caa, 0x1c000009 },
+ { 0x1c800cb5, 0x1c000004 },
+ { 0x1c000cbc, 0x30000000 },
+ { 0x1c000cbd, 0x1c000000 },
+ { 0x1c000cbe, 0x28000000 },
+ { 0x1c000cbf, 0x30000000 },
+ { 0x1c800cc0, 0x28000004 },
+ { 0x1c000cc6, 0x30000000 },
+ { 0x1c800cc7, 0x28000001 },
+ { 0x1c800cca, 0x28000001 },
+ { 0x1c800ccc, 0x30000001 },
+ { 0x1c800cd5, 0x28000001 },
+ { 0x1c000cde, 0x1c000000 },
+ { 0x1c800ce0, 0x1c000001 },
+ { 0x1c800ce2, 0x30000001 },
+ { 0x1c800ce6, 0x34000009 },
+ { 0x1c800cf1, 0x68000001 },
+ { 0x24800d02, 0x28000001 },
+ { 0x24800d05, 0x1c000007 },
+ { 0x24800d0e, 0x1c000002 },
+ { 0x24800d12, 0x1c000016 },
+ { 0x24800d2a, 0x1c00000f },
+ { 0x24800d3e, 0x28000002 },
+ { 0x24800d41, 0x30000002 },
+ { 0x24800d46, 0x28000002 },
+ { 0x24800d4a, 0x28000002 },
+ { 0x24000d4d, 0x30000000 },
+ { 0x24000d57, 0x28000000 },
+ { 0x24800d60, 0x1c000001 },
+ { 0x24800d66, 0x34000009 },
+ { 0x2f800d82, 0x28000001 },
+ { 0x2f800d85, 0x1c000011 },
+ { 0x2f800d9a, 0x1c000017 },
+ { 0x2f800db3, 0x1c000008 },
+ { 0x2f000dbd, 0x1c000000 },
+ { 0x2f800dc0, 0x1c000006 },
+ { 0x2f000dca, 0x30000000 },
+ { 0x2f800dcf, 0x28000002 },
+ { 0x2f800dd2, 0x30000002 },
+ { 0x2f000dd6, 0x30000000 },
+ { 0x2f800dd8, 0x28000007 },
+ { 0x2f800df2, 0x28000001 },
+ { 0x2f000df4, 0x54000000 },
+ { 0x38800e01, 0x1c00002f },
+ { 0x38000e31, 0x30000000 },
+ { 0x38800e32, 0x1c000001 },
+ { 0x38800e34, 0x30000006 },
+ { 0x09000e3f, 0x5c000000 },
+ { 0x38800e40, 0x1c000005 },
+ { 0x38000e46, 0x18000000 },
+ { 0x38800e47, 0x30000007 },
+ { 0x38000e4f, 0x54000000 },
+ { 0x38800e50, 0x34000009 },
+ { 0x38800e5a, 0x54000001 },
+ { 0x20800e81, 0x1c000001 },
+ { 0x20000e84, 0x1c000000 },
+ { 0x20800e87, 0x1c000001 },
+ { 0x20000e8a, 0x1c000000 },
+ { 0x20000e8d, 0x1c000000 },
+ { 0x20800e94, 0x1c000003 },
+ { 0x20800e99, 0x1c000006 },
+ { 0x20800ea1, 0x1c000002 },
+ { 0x20000ea5, 0x1c000000 },
+ { 0x20000ea7, 0x1c000000 },
+ { 0x20800eaa, 0x1c000001 },
+ { 0x20800ead, 0x1c000003 },
+ { 0x20000eb1, 0x30000000 },
+ { 0x20800eb2, 0x1c000001 },
+ { 0x20800eb4, 0x30000005 },
+ { 0x20800ebb, 0x30000001 },
+ { 0x20000ebd, 0x1c000000 },
+ { 0x20800ec0, 0x1c000004 },
+ { 0x20000ec6, 0x18000000 },
+ { 0x20800ec8, 0x30000005 },
+ { 0x20800ed0, 0x34000009 },
+ { 0x20800edc, 0x1c000001 },
+ { 0x39000f00, 0x1c000000 },
+ { 0x39800f01, 0x68000002 },
+ { 0x39800f04, 0x5400000e },
+ { 0x39800f13, 0x68000004 },
+ { 0x39800f18, 0x30000001 },
+ { 0x39800f1a, 0x68000005 },
+ { 0x39800f20, 0x34000009 },
+ { 0x39800f2a, 0x3c000009 },
+ { 0x39000f34, 0x68000000 },
+ { 0x39000f35, 0x30000000 },
+ { 0x39000f36, 0x68000000 },
+ { 0x39000f37, 0x30000000 },
+ { 0x39000f38, 0x68000000 },
+ { 0x39000f39, 0x30000000 },
+ { 0x39000f3a, 0x58000000 },
+ { 0x39000f3b, 0x48000000 },
+ { 0x39000f3c, 0x58000000 },
+ { 0x39000f3d, 0x48000000 },
+ { 0x39800f3e, 0x28000001 },
+ { 0x39800f40, 0x1c000007 },
+ { 0x39800f49, 0x1c000021 },
+ { 0x39800f71, 0x3000000d },
+ { 0x39000f7f, 0x28000000 },
+ { 0x39800f80, 0x30000004 },
+ { 0x39000f85, 0x54000000 },
+ { 0x39800f86, 0x30000001 },
+ { 0x39800f88, 0x1c000003 },
+ { 0x39800f90, 0x30000007 },
+ { 0x39800f99, 0x30000023 },
+ { 0x39800fbe, 0x68000007 },
+ { 0x39000fc6, 0x30000000 },
+ { 0x39800fc7, 0x68000005 },
+ { 0x39000fcf, 0x68000000 },
+ { 0x39800fd0, 0x54000001 },
+ { 0x26801000, 0x1c000021 },
+ { 0x26801023, 0x1c000004 },
+ { 0x26801029, 0x1c000001 },
+ { 0x2600102c, 0x28000000 },
+ { 0x2680102d, 0x30000003 },
+ { 0x26001031, 0x28000000 },
+ { 0x26001032, 0x30000000 },
+ { 0x26801036, 0x30000001 },
+ { 0x26001038, 0x28000000 },
+ { 0x26001039, 0x30000000 },
+ { 0x26801040, 0x34000009 },
+ { 0x2680104a, 0x54000005 },
+ { 0x26801050, 0x1c000005 },
+ { 0x26801056, 0x28000001 },
+ { 0x26801058, 0x30000001 },
+ { 0x100010a0, 0x24001c60 },
+ { 0x100010a1, 0x24001c60 },
+ { 0x100010a2, 0x24001c60 },
+ { 0x100010a3, 0x24001c60 },
+ { 0x100010a4, 0x24001c60 },
+ { 0x100010a5, 0x24001c60 },
+ { 0x100010a6, 0x24001c60 },
+ { 0x100010a7, 0x24001c60 },
+ { 0x100010a8, 0x24001c60 },
+ { 0x100010a9, 0x24001c60 },
+ { 0x100010aa, 0x24001c60 },
+ { 0x100010ab, 0x24001c60 },
+ { 0x100010ac, 0x24001c60 },
+ { 0x100010ad, 0x24001c60 },
+ { 0x100010ae, 0x24001c60 },
+ { 0x100010af, 0x24001c60 },
+ { 0x100010b0, 0x24001c60 },
+ { 0x100010b1, 0x24001c60 },
+ { 0x100010b2, 0x24001c60 },
+ { 0x100010b3, 0x24001c60 },
+ { 0x100010b4, 0x24001c60 },
+ { 0x100010b5, 0x24001c60 },
+ { 0x100010b6, 0x24001c60 },
+ { 0x100010b7, 0x24001c60 },
+ { 0x100010b8, 0x24001c60 },
+ { 0x100010b9, 0x24001c60 },
+ { 0x100010ba, 0x24001c60 },
+ { 0x100010bb, 0x24001c60 },
+ { 0x100010bc, 0x24001c60 },
+ { 0x100010bd, 0x24001c60 },
+ { 0x100010be, 0x24001c60 },
+ { 0x100010bf, 0x24001c60 },
+ { 0x100010c0, 0x24001c60 },
+ { 0x100010c1, 0x24001c60 },
+ { 0x100010c2, 0x24001c60 },
+ { 0x100010c3, 0x24001c60 },
+ { 0x100010c4, 0x24001c60 },
+ { 0x100010c5, 0x24001c60 },
+ { 0x108010d0, 0x1c00002a },
+ { 0x090010fb, 0x54000000 },
+ { 0x100010fc, 0x18000000 },
+ { 0x17801100, 0x1c000059 },
+ { 0x1780115f, 0x1c000043 },
+ { 0x178011a8, 0x1c000051 },
+ { 0x0f801200, 0x1c000048 },
+ { 0x0f80124a, 0x1c000003 },
+ { 0x0f801250, 0x1c000006 },
+ { 0x0f001258, 0x1c000000 },
+ { 0x0f80125a, 0x1c000003 },
+ { 0x0f801260, 0x1c000028 },
+ { 0x0f80128a, 0x1c000003 },
+ { 0x0f801290, 0x1c000020 },
+ { 0x0f8012b2, 0x1c000003 },
+ { 0x0f8012b8, 0x1c000006 },
+ { 0x0f0012c0, 0x1c000000 },
+ { 0x0f8012c2, 0x1c000003 },
+ { 0x0f8012c8, 0x1c00000e },
+ { 0x0f8012d8, 0x1c000038 },
+ { 0x0f801312, 0x1c000003 },
+ { 0x0f801318, 0x1c000042 },
+ { 0x0f00135f, 0x30000000 },
+ { 0x0f001360, 0x68000000 },
+ { 0x0f801361, 0x54000007 },
+ { 0x0f801369, 0x3c000013 },
+ { 0x0f801380, 0x1c00000f },
+ { 0x0f801390, 0x68000009 },
+ { 0x088013a0, 0x1c000054 },
+ { 0x07801401, 0x1c00026b },
+ { 0x0780166d, 0x54000001 },
+ { 0x0780166f, 0x1c000007 },
+ { 0x28001680, 0x74000000 },
+ { 0x28801681, 0x1c000019 },
+ { 0x2800169b, 0x58000000 },
+ { 0x2800169c, 0x48000000 },
+ { 0x2d8016a0, 0x1c00004a },
+ { 0x098016eb, 0x54000002 },
+ { 0x2d8016ee, 0x38000002 },
+ { 0x32801700, 0x1c00000c },
+ { 0x3280170e, 0x1c000003 },
+ { 0x32801712, 0x30000002 },
+ { 0x18801720, 0x1c000011 },
+ { 0x18801732, 0x30000002 },
+ { 0x09801735, 0x54000001 },
+ { 0x06801740, 0x1c000011 },
+ { 0x06801752, 0x30000001 },
+ { 0x33801760, 0x1c00000c },
+ { 0x3380176e, 0x1c000002 },
+ { 0x33801772, 0x30000001 },
+ { 0x1f801780, 0x1c000033 },
+ { 0x1f8017b4, 0x04000001 },
+ { 0x1f0017b6, 0x28000000 },
+ { 0x1f8017b7, 0x30000006 },
+ { 0x1f8017be, 0x28000007 },
+ { 0x1f0017c6, 0x30000000 },
+ { 0x1f8017c7, 0x28000001 },
+ { 0x1f8017c9, 0x3000000a },
+ { 0x1f8017d4, 0x54000002 },
+ { 0x1f0017d7, 0x18000000 },
+ { 0x1f8017d8, 0x54000002 },
+ { 0x1f0017db, 0x5c000000 },
+ { 0x1f0017dc, 0x1c000000 },
+ { 0x1f0017dd, 0x30000000 },
+ { 0x1f8017e0, 0x34000009 },
+ { 0x1f8017f0, 0x3c000009 },
+ { 0x25801800, 0x54000005 },
+ { 0x25001806, 0x44000000 },
+ { 0x25801807, 0x54000003 },
+ { 0x2580180b, 0x30000002 },
+ { 0x2500180e, 0x74000000 },
+ { 0x25801810, 0x34000009 },
+ { 0x25801820, 0x1c000022 },
+ { 0x25001843, 0x18000000 },
+ { 0x25801844, 0x1c000033 },
+ { 0x25801880, 0x1c000028 },
+ { 0x250018a9, 0x30000000 },
+ { 0x22801900, 0x1c00001c },
+ { 0x22801920, 0x30000002 },
+ { 0x22801923, 0x28000003 },
+ { 0x22801927, 0x30000001 },
+ { 0x22801929, 0x28000002 },
+ { 0x22801930, 0x28000001 },
+ { 0x22001932, 0x30000000 },
+ { 0x22801933, 0x28000005 },
+ { 0x22801939, 0x30000002 },
+ { 0x22001940, 0x68000000 },
+ { 0x22801944, 0x54000001 },
+ { 0x22801946, 0x34000009 },
+ { 0x34801950, 0x1c00001d },
+ { 0x34801970, 0x1c000004 },
+ { 0x27801980, 0x1c000029 },
+ { 0x278019b0, 0x28000010 },
+ { 0x278019c1, 0x1c000006 },
+ { 0x278019c8, 0x28000001 },
+ { 0x278019d0, 0x34000009 },
+ { 0x278019de, 0x54000001 },
+ { 0x1f8019e0, 0x6800001f },
+ { 0x05801a00, 0x1c000016 },
+ { 0x05801a17, 0x30000001 },
+ { 0x05801a19, 0x28000002 },
+ { 0x05801a1e, 0x54000001 },
+ { 0x3d801b00, 0x30000003 },
+ { 0x3d001b04, 0x28000000 },
+ { 0x3d801b05, 0x1c00002e },
+ { 0x3d001b34, 0x30000000 },
+ { 0x3d001b35, 0x28000000 },
+ { 0x3d801b36, 0x30000004 },
+ { 0x3d001b3b, 0x28000000 },
+ { 0x3d001b3c, 0x30000000 },
+ { 0x3d801b3d, 0x28000004 },
+ { 0x3d001b42, 0x30000000 },
+ { 0x3d801b43, 0x28000001 },
+ { 0x3d801b45, 0x1c000006 },
+ { 0x3d801b50, 0x34000009 },
+ { 0x3d801b5a, 0x54000006 },
+ { 0x3d801b61, 0x68000009 },
+ { 0x3d801b6b, 0x30000008 },
+ { 0x3d801b74, 0x68000008 },
+ { 0x21801d00, 0x1400002b },
+ { 0x21801d2c, 0x18000035 },
+ { 0x21801d62, 0x14000015 },
+ { 0x0c001d78, 0x18000000 },
+ { 0x21801d79, 0x14000003 },
+ { 0x21001d7d, 0x14000ee6 },
+ { 0x21801d7e, 0x1400001c },
+ { 0x21801d9b, 0x18000024 },
+ { 0x1b801dc0, 0x3000000a },
+ { 0x1b801dfe, 0x30000001 },
+ { 0x21001e00, 0x24000001 },
+ { 0x21001e01, 0x1400ffff },
+ { 0x21001e02, 0x24000001 },
+ { 0x21001e03, 0x1400ffff },
+ { 0x21001e04, 0x24000001 },
+ { 0x21001e05, 0x1400ffff },
+ { 0x21001e06, 0x24000001 },
+ { 0x21001e07, 0x1400ffff },
+ { 0x21001e08, 0x24000001 },
+ { 0x21001e09, 0x1400ffff },
+ { 0x21001e0a, 0x24000001 },
+ { 0x21001e0b, 0x1400ffff },
+ { 0x21001e0c, 0x24000001 },
+ { 0x21001e0d, 0x1400ffff },
+ { 0x21001e0e, 0x24000001 },
+ { 0x21001e0f, 0x1400ffff },
+ { 0x21001e10, 0x24000001 },
+ { 0x21001e11, 0x1400ffff },
+ { 0x21001e12, 0x24000001 },
+ { 0x21001e13, 0x1400ffff },
+ { 0x21001e14, 0x24000001 },
+ { 0x21001e15, 0x1400ffff },
+ { 0x21001e16, 0x24000001 },
+ { 0x21001e17, 0x1400ffff },
+ { 0x21001e18, 0x24000001 },
+ { 0x21001e19, 0x1400ffff },
+ { 0x21001e1a, 0x24000001 },
+ { 0x21001e1b, 0x1400ffff },
+ { 0x21001e1c, 0x24000001 },
+ { 0x21001e1d, 0x1400ffff },
+ { 0x21001e1e, 0x24000001 },
+ { 0x21001e1f, 0x1400ffff },
+ { 0x21001e20, 0x24000001 },
+ { 0x21001e21, 0x1400ffff },
+ { 0x21001e22, 0x24000001 },
+ { 0x21001e23, 0x1400ffff },
+ { 0x21001e24, 0x24000001 },
+ { 0x21001e25, 0x1400ffff },
+ { 0x21001e26, 0x24000001 },
+ { 0x21001e27, 0x1400ffff },
+ { 0x21001e28, 0x24000001 },
+ { 0x21001e29, 0x1400ffff },
+ { 0x21001e2a, 0x24000001 },
+ { 0x21001e2b, 0x1400ffff },
+ { 0x21001e2c, 0x24000001 },
+ { 0x21001e2d, 0x1400ffff },
+ { 0x21001e2e, 0x24000001 },
+ { 0x21001e2f, 0x1400ffff },
+ { 0x21001e30, 0x24000001 },
+ { 0x21001e31, 0x1400ffff },
+ { 0x21001e32, 0x24000001 },
+ { 0x21001e33, 0x1400ffff },
+ { 0x21001e34, 0x24000001 },
+ { 0x21001e35, 0x1400ffff },
+ { 0x21001e36, 0x24000001 },
+ { 0x21001e37, 0x1400ffff },
+ { 0x21001e38, 0x24000001 },
+ { 0x21001e39, 0x1400ffff },
+ { 0x21001e3a, 0x24000001 },
+ { 0x21001e3b, 0x1400ffff },
+ { 0x21001e3c, 0x24000001 },
+ { 0x21001e3d, 0x1400ffff },
+ { 0x21001e3e, 0x24000001 },
+ { 0x21001e3f, 0x1400ffff },
+ { 0x21001e40, 0x24000001 },
+ { 0x21001e41, 0x1400ffff },
+ { 0x21001e42, 0x24000001 },
+ { 0x21001e43, 0x1400ffff },
+ { 0x21001e44, 0x24000001 },
+ { 0x21001e45, 0x1400ffff },
+ { 0x21001e46, 0x24000001 },
+ { 0x21001e47, 0x1400ffff },
+ { 0x21001e48, 0x24000001 },
+ { 0x21001e49, 0x1400ffff },
+ { 0x21001e4a, 0x24000001 },
+ { 0x21001e4b, 0x1400ffff },
+ { 0x21001e4c, 0x24000001 },
+ { 0x21001e4d, 0x1400ffff },
+ { 0x21001e4e, 0x24000001 },
+ { 0x21001e4f, 0x1400ffff },
+ { 0x21001e50, 0x24000001 },
+ { 0x21001e51, 0x1400ffff },
+ { 0x21001e52, 0x24000001 },
+ { 0x21001e53, 0x1400ffff },
+ { 0x21001e54, 0x24000001 },
+ { 0x21001e55, 0x1400ffff },
+ { 0x21001e56, 0x24000001 },
+ { 0x21001e57, 0x1400ffff },
+ { 0x21001e58, 0x24000001 },
+ { 0x21001e59, 0x1400ffff },
+ { 0x21001e5a, 0x24000001 },
+ { 0x21001e5b, 0x1400ffff },
+ { 0x21001e5c, 0x24000001 },
+ { 0x21001e5d, 0x1400ffff },
+ { 0x21001e5e, 0x24000001 },
+ { 0x21001e5f, 0x1400ffff },
+ { 0x21001e60, 0x24000001 },
+ { 0x21001e61, 0x1400ffff },
+ { 0x21001e62, 0x24000001 },
+ { 0x21001e63, 0x1400ffff },
+ { 0x21001e64, 0x24000001 },
+ { 0x21001e65, 0x1400ffff },
+ { 0x21001e66, 0x24000001 },
+ { 0x21001e67, 0x1400ffff },
+ { 0x21001e68, 0x24000001 },
+ { 0x21001e69, 0x1400ffff },
+ { 0x21001e6a, 0x24000001 },
+ { 0x21001e6b, 0x1400ffff },
+ { 0x21001e6c, 0x24000001 },
+ { 0x21001e6d, 0x1400ffff },
+ { 0x21001e6e, 0x24000001 },
+ { 0x21001e6f, 0x1400ffff },
+ { 0x21001e70, 0x24000001 },
+ { 0x21001e71, 0x1400ffff },
+ { 0x21001e72, 0x24000001 },
+ { 0x21001e73, 0x1400ffff },
+ { 0x21001e74, 0x24000001 },
+ { 0x21001e75, 0x1400ffff },
+ { 0x21001e76, 0x24000001 },
+ { 0x21001e77, 0x1400ffff },
+ { 0x21001e78, 0x24000001 },
+ { 0x21001e79, 0x1400ffff },
+ { 0x21001e7a, 0x24000001 },
+ { 0x21001e7b, 0x1400ffff },
+ { 0x21001e7c, 0x24000001 },
+ { 0x21001e7d, 0x1400ffff },
+ { 0x21001e7e, 0x24000001 },
+ { 0x21001e7f, 0x1400ffff },
+ { 0x21001e80, 0x24000001 },
+ { 0x21001e81, 0x1400ffff },
+ { 0x21001e82, 0x24000001 },
+ { 0x21001e83, 0x1400ffff },
+ { 0x21001e84, 0x24000001 },
+ { 0x21001e85, 0x1400ffff },
+ { 0x21001e86, 0x24000001 },
+ { 0x21001e87, 0x1400ffff },
+ { 0x21001e88, 0x24000001 },
+ { 0x21001e89, 0x1400ffff },
+ { 0x21001e8a, 0x24000001 },
+ { 0x21001e8b, 0x1400ffff },
+ { 0x21001e8c, 0x24000001 },
+ { 0x21001e8d, 0x1400ffff },
+ { 0x21001e8e, 0x24000001 },
+ { 0x21001e8f, 0x1400ffff },
+ { 0x21001e90, 0x24000001 },
+ { 0x21001e91, 0x1400ffff },
+ { 0x21001e92, 0x24000001 },
+ { 0x21001e93, 0x1400ffff },
+ { 0x21001e94, 0x24000001 },
+ { 0x21001e95, 0x1400ffff },
+ { 0x21801e96, 0x14000004 },
+ { 0x21001e9b, 0x1400ffc5 },
+ { 0x21001ea0, 0x24000001 },
+ { 0x21001ea1, 0x1400ffff },
+ { 0x21001ea2, 0x24000001 },
+ { 0x21001ea3, 0x1400ffff },
+ { 0x21001ea4, 0x24000001 },
+ { 0x21001ea5, 0x1400ffff },
+ { 0x21001ea6, 0x24000001 },
+ { 0x21001ea7, 0x1400ffff },
+ { 0x21001ea8, 0x24000001 },
+ { 0x21001ea9, 0x1400ffff },
+ { 0x21001eaa, 0x24000001 },
+ { 0x21001eab, 0x1400ffff },
+ { 0x21001eac, 0x24000001 },
+ { 0x21001ead, 0x1400ffff },
+ { 0x21001eae, 0x24000001 },
+ { 0x21001eaf, 0x1400ffff },
+ { 0x21001eb0, 0x24000001 },
+ { 0x21001eb1, 0x1400ffff },
+ { 0x21001eb2, 0x24000001 },
+ { 0x21001eb3, 0x1400ffff },
+ { 0x21001eb4, 0x24000001 },
+ { 0x21001eb5, 0x1400ffff },
+ { 0x21001eb6, 0x24000001 },
+ { 0x21001eb7, 0x1400ffff },
+ { 0x21001eb8, 0x24000001 },
+ { 0x21001eb9, 0x1400ffff },
+ { 0x21001eba, 0x24000001 },
+ { 0x21001ebb, 0x1400ffff },
+ { 0x21001ebc, 0x24000001 },
+ { 0x21001ebd, 0x1400ffff },
+ { 0x21001ebe, 0x24000001 },
+ { 0x21001ebf, 0x1400ffff },
+ { 0x21001ec0, 0x24000001 },
+ { 0x21001ec1, 0x1400ffff },
+ { 0x21001ec2, 0x24000001 },
+ { 0x21001ec3, 0x1400ffff },
+ { 0x21001ec4, 0x24000001 },
+ { 0x21001ec5, 0x1400ffff },
+ { 0x21001ec6, 0x24000001 },
+ { 0x21001ec7, 0x1400ffff },
+ { 0x21001ec8, 0x24000001 },
+ { 0x21001ec9, 0x1400ffff },
+ { 0x21001eca, 0x24000001 },
+ { 0x21001ecb, 0x1400ffff },
+ { 0x21001ecc, 0x24000001 },
+ { 0x21001ecd, 0x1400ffff },
+ { 0x21001ece, 0x24000001 },
+ { 0x21001ecf, 0x1400ffff },
+ { 0x21001ed0, 0x24000001 },
+ { 0x21001ed1, 0x1400ffff },
+ { 0x21001ed2, 0x24000001 },
+ { 0x21001ed3, 0x1400ffff },
+ { 0x21001ed4, 0x24000001 },
+ { 0x21001ed5, 0x1400ffff },
+ { 0x21001ed6, 0x24000001 },
+ { 0x21001ed7, 0x1400ffff },
+ { 0x21001ed8, 0x24000001 },
+ { 0x21001ed9, 0x1400ffff },
+ { 0x21001eda, 0x24000001 },
+ { 0x21001edb, 0x1400ffff },
+ { 0x21001edc, 0x24000001 },
+ { 0x21001edd, 0x1400ffff },
+ { 0x21001ede, 0x24000001 },
+ { 0x21001edf, 0x1400ffff },
+ { 0x21001ee0, 0x24000001 },
+ { 0x21001ee1, 0x1400ffff },
+ { 0x21001ee2, 0x24000001 },
+ { 0x21001ee3, 0x1400ffff },
+ { 0x21001ee4, 0x24000001 },
+ { 0x21001ee5, 0x1400ffff },
+ { 0x21001ee6, 0x24000001 },
+ { 0x21001ee7, 0x1400ffff },
+ { 0x21001ee8, 0x24000001 },
+ { 0x21001ee9, 0x1400ffff },
+ { 0x21001eea, 0x24000001 },
+ { 0x21001eeb, 0x1400ffff },
+ { 0x21001eec, 0x24000001 },
+ { 0x21001eed, 0x1400ffff },
+ { 0x21001eee, 0x24000001 },
+ { 0x21001eef, 0x1400ffff },
+ { 0x21001ef0, 0x24000001 },
+ { 0x21001ef1, 0x1400ffff },
+ { 0x21001ef2, 0x24000001 },
+ { 0x21001ef3, 0x1400ffff },
+ { 0x21001ef4, 0x24000001 },
+ { 0x21001ef5, 0x1400ffff },
+ { 0x21001ef6, 0x24000001 },
+ { 0x21001ef7, 0x1400ffff },
+ { 0x21001ef8, 0x24000001 },
+ { 0x21001ef9, 0x1400ffff },
+ { 0x13001f00, 0x14000008 },
+ { 0x13001f01, 0x14000008 },
+ { 0x13001f02, 0x14000008 },
+ { 0x13001f03, 0x14000008 },
+ { 0x13001f04, 0x14000008 },
+ { 0x13001f05, 0x14000008 },
+ { 0x13001f06, 0x14000008 },
+ { 0x13001f07, 0x14000008 },
+ { 0x13001f08, 0x2400fff8 },
+ { 0x13001f09, 0x2400fff8 },
+ { 0x13001f0a, 0x2400fff8 },
+ { 0x13001f0b, 0x2400fff8 },
+ { 0x13001f0c, 0x2400fff8 },
+ { 0x13001f0d, 0x2400fff8 },
+ { 0x13001f0e, 0x2400fff8 },
+ { 0x13001f0f, 0x2400fff8 },
+ { 0x13001f10, 0x14000008 },
+ { 0x13001f11, 0x14000008 },
+ { 0x13001f12, 0x14000008 },
+ { 0x13001f13, 0x14000008 },
+ { 0x13001f14, 0x14000008 },
+ { 0x13001f15, 0x14000008 },
+ { 0x13001f18, 0x2400fff8 },
+ { 0x13001f19, 0x2400fff8 },
+ { 0x13001f1a, 0x2400fff8 },
+ { 0x13001f1b, 0x2400fff8 },
+ { 0x13001f1c, 0x2400fff8 },
+ { 0x13001f1d, 0x2400fff8 },
+ { 0x13001f20, 0x14000008 },
+ { 0x13001f21, 0x14000008 },
+ { 0x13001f22, 0x14000008 },
+ { 0x13001f23, 0x14000008 },
+ { 0x13001f24, 0x14000008 },
+ { 0x13001f25, 0x14000008 },
+ { 0x13001f26, 0x14000008 },
+ { 0x13001f27, 0x14000008 },
+ { 0x13001f28, 0x2400fff8 },
+ { 0x13001f29, 0x2400fff8 },
+ { 0x13001f2a, 0x2400fff8 },
+ { 0x13001f2b, 0x2400fff8 },
+ { 0x13001f2c, 0x2400fff8 },
+ { 0x13001f2d, 0x2400fff8 },
+ { 0x13001f2e, 0x2400fff8 },
+ { 0x13001f2f, 0x2400fff8 },
+ { 0x13001f30, 0x14000008 },
+ { 0x13001f31, 0x14000008 },
+ { 0x13001f32, 0x14000008 },
+ { 0x13001f33, 0x14000008 },
+ { 0x13001f34, 0x14000008 },
+ { 0x13001f35, 0x14000008 },
+ { 0x13001f36, 0x14000008 },
+ { 0x13001f37, 0x14000008 },
+ { 0x13001f38, 0x2400fff8 },
+ { 0x13001f39, 0x2400fff8 },
+ { 0x13001f3a, 0x2400fff8 },
+ { 0x13001f3b, 0x2400fff8 },
+ { 0x13001f3c, 0x2400fff8 },
+ { 0x13001f3d, 0x2400fff8 },
+ { 0x13001f3e, 0x2400fff8 },
+ { 0x13001f3f, 0x2400fff8 },
+ { 0x13001f40, 0x14000008 },
+ { 0x13001f41, 0x14000008 },
+ { 0x13001f42, 0x14000008 },
+ { 0x13001f43, 0x14000008 },
+ { 0x13001f44, 0x14000008 },
+ { 0x13001f45, 0x14000008 },
+ { 0x13001f48, 0x2400fff8 },
+ { 0x13001f49, 0x2400fff8 },
+ { 0x13001f4a, 0x2400fff8 },
+ { 0x13001f4b, 0x2400fff8 },
+ { 0x13001f4c, 0x2400fff8 },
+ { 0x13001f4d, 0x2400fff8 },
+ { 0x13001f50, 0x14000000 },
+ { 0x13001f51, 0x14000008 },
+ { 0x13001f52, 0x14000000 },
+ { 0x13001f53, 0x14000008 },
+ { 0x13001f54, 0x14000000 },
+ { 0x13001f55, 0x14000008 },
+ { 0x13001f56, 0x14000000 },
+ { 0x13001f57, 0x14000008 },
+ { 0x13001f59, 0x2400fff8 },
+ { 0x13001f5b, 0x2400fff8 },
+ { 0x13001f5d, 0x2400fff8 },
+ { 0x13001f5f, 0x2400fff8 },
+ { 0x13001f60, 0x14000008 },
+ { 0x13001f61, 0x14000008 },
+ { 0x13001f62, 0x14000008 },
+ { 0x13001f63, 0x14000008 },
+ { 0x13001f64, 0x14000008 },
+ { 0x13001f65, 0x14000008 },
+ { 0x13001f66, 0x14000008 },
+ { 0x13001f67, 0x14000008 },
+ { 0x13001f68, 0x2400fff8 },
+ { 0x13001f69, 0x2400fff8 },
+ { 0x13001f6a, 0x2400fff8 },
+ { 0x13001f6b, 0x2400fff8 },
+ { 0x13001f6c, 0x2400fff8 },
+ { 0x13001f6d, 0x2400fff8 },
+ { 0x13001f6e, 0x2400fff8 },
+ { 0x13001f6f, 0x2400fff8 },
+ { 0x13001f70, 0x1400004a },
+ { 0x13001f71, 0x1400004a },
+ { 0x13001f72, 0x14000056 },
+ { 0x13001f73, 0x14000056 },
+ { 0x13001f74, 0x14000056 },
+ { 0x13001f75, 0x14000056 },
+ { 0x13001f76, 0x14000064 },
+ { 0x13001f77, 0x14000064 },
+ { 0x13001f78, 0x14000080 },
+ { 0x13001f79, 0x14000080 },
+ { 0x13001f7a, 0x14000070 },
+ { 0x13001f7b, 0x14000070 },
+ { 0x13001f7c, 0x1400007e },
+ { 0x13001f7d, 0x1400007e },
+ { 0x13001f80, 0x14000008 },
+ { 0x13001f81, 0x14000008 },
+ { 0x13001f82, 0x14000008 },
+ { 0x13001f83, 0x14000008 },
+ { 0x13001f84, 0x14000008 },
+ { 0x13001f85, 0x14000008 },
+ { 0x13001f86, 0x14000008 },
+ { 0x13001f87, 0x14000008 },
+ { 0x13001f88, 0x2000fff8 },
+ { 0x13001f89, 0x2000fff8 },
+ { 0x13001f8a, 0x2000fff8 },
+ { 0x13001f8b, 0x2000fff8 },
+ { 0x13001f8c, 0x2000fff8 },
+ { 0x13001f8d, 0x2000fff8 },
+ { 0x13001f8e, 0x2000fff8 },
+ { 0x13001f8f, 0x2000fff8 },
+ { 0x13001f90, 0x14000008 },
+ { 0x13001f91, 0x14000008 },
+ { 0x13001f92, 0x14000008 },
+ { 0x13001f93, 0x14000008 },
+ { 0x13001f94, 0x14000008 },
+ { 0x13001f95, 0x14000008 },
+ { 0x13001f96, 0x14000008 },
+ { 0x13001f97, 0x14000008 },
+ { 0x13001f98, 0x2000fff8 },
+ { 0x13001f99, 0x2000fff8 },
+ { 0x13001f9a, 0x2000fff8 },
+ { 0x13001f9b, 0x2000fff8 },
+ { 0x13001f9c, 0x2000fff8 },
+ { 0x13001f9d, 0x2000fff8 },
+ { 0x13001f9e, 0x2000fff8 },
+ { 0x13001f9f, 0x2000fff8 },
+ { 0x13001fa0, 0x14000008 },
+ { 0x13001fa1, 0x14000008 },
+ { 0x13001fa2, 0x14000008 },
+ { 0x13001fa3, 0x14000008 },
+ { 0x13001fa4, 0x14000008 },
+ { 0x13001fa5, 0x14000008 },
+ { 0x13001fa6, 0x14000008 },
+ { 0x13001fa7, 0x14000008 },
+ { 0x13001fa8, 0x2000fff8 },
+ { 0x13001fa9, 0x2000fff8 },
+ { 0x13001faa, 0x2000fff8 },
+ { 0x13001fab, 0x2000fff8 },
+ { 0x13001fac, 0x2000fff8 },
+ { 0x13001fad, 0x2000fff8 },
+ { 0x13001fae, 0x2000fff8 },
+ { 0x13001faf, 0x2000fff8 },
+ { 0x13001fb0, 0x14000008 },
+ { 0x13001fb1, 0x14000008 },
+ { 0x13001fb2, 0x14000000 },
+ { 0x13001fb3, 0x14000009 },
+ { 0x13001fb4, 0x14000000 },
+ { 0x13801fb6, 0x14000001 },
+ { 0x13001fb8, 0x2400fff8 },
+ { 0x13001fb9, 0x2400fff8 },
+ { 0x13001fba, 0x2400ffb6 },
+ { 0x13001fbb, 0x2400ffb6 },
+ { 0x13001fbc, 0x2000fff7 },
+ { 0x13001fbd, 0x60000000 },
+ { 0x13001fbe, 0x1400e3db },
+ { 0x13801fbf, 0x60000002 },
+ { 0x13001fc2, 0x14000000 },
+ { 0x13001fc3, 0x14000009 },
+ { 0x13001fc4, 0x14000000 },
+ { 0x13801fc6, 0x14000001 },
+ { 0x13001fc8, 0x2400ffaa },
+ { 0x13001fc9, 0x2400ffaa },
+ { 0x13001fca, 0x2400ffaa },
+ { 0x13001fcb, 0x2400ffaa },
+ { 0x13001fcc, 0x2000fff7 },
+ { 0x13801fcd, 0x60000002 },
+ { 0x13001fd0, 0x14000008 },
+ { 0x13001fd1, 0x14000008 },
+ { 0x13801fd2, 0x14000001 },
+ { 0x13801fd6, 0x14000001 },
+ { 0x13001fd8, 0x2400fff8 },
+ { 0x13001fd9, 0x2400fff8 },
+ { 0x13001fda, 0x2400ff9c },
+ { 0x13001fdb, 0x2400ff9c },
+ { 0x13801fdd, 0x60000002 },
+ { 0x13001fe0, 0x14000008 },
+ { 0x13001fe1, 0x14000008 },
+ { 0x13801fe2, 0x14000002 },
+ { 0x13001fe5, 0x14000007 },
+ { 0x13801fe6, 0x14000001 },
+ { 0x13001fe8, 0x2400fff8 },
+ { 0x13001fe9, 0x2400fff8 },
+ { 0x13001fea, 0x2400ff90 },
+ { 0x13001feb, 0x2400ff90 },
+ { 0x13001fec, 0x2400fff9 },
+ { 0x13801fed, 0x60000002 },
+ { 0x13001ff2, 0x14000000 },
+ { 0x13001ff3, 0x14000009 },
+ { 0x13001ff4, 0x14000000 },
+ { 0x13801ff6, 0x14000001 },
+ { 0x13001ff8, 0x2400ff80 },
+ { 0x13001ff9, 0x2400ff80 },
+ { 0x13001ffa, 0x2400ff82 },
+ { 0x13001ffb, 0x2400ff82 },
+ { 0x13001ffc, 0x2000fff7 },
+ { 0x13801ffd, 0x60000001 },
+ { 0x09802000, 0x7400000a },
+ { 0x0980200b, 0x04000004 },
+ { 0x09802010, 0x44000005 },
+ { 0x09802016, 0x54000001 },
+ { 0x09002018, 0x50000000 },
+ { 0x09002019, 0x4c000000 },
+ { 0x0900201a, 0x58000000 },
+ { 0x0980201b, 0x50000001 },
+ { 0x0900201d, 0x4c000000 },
+ { 0x0900201e, 0x58000000 },
+ { 0x0900201f, 0x50000000 },
+ { 0x09802020, 0x54000007 },
+ { 0x09002028, 0x6c000000 },
+ { 0x09002029, 0x70000000 },
+ { 0x0980202a, 0x04000004 },
+ { 0x0900202f, 0x74000000 },
+ { 0x09802030, 0x54000008 },
+ { 0x09002039, 0x50000000 },
+ { 0x0900203a, 0x4c000000 },
+ { 0x0980203b, 0x54000003 },
+ { 0x0980203f, 0x40000001 },
+ { 0x09802041, 0x54000002 },
+ { 0x09002044, 0x64000000 },
+ { 0x09002045, 0x58000000 },
+ { 0x09002046, 0x48000000 },
+ { 0x09802047, 0x5400000a },
+ { 0x09002052, 0x64000000 },
+ { 0x09002053, 0x54000000 },
+ { 0x09002054, 0x40000000 },
+ { 0x09802055, 0x54000009 },
+ { 0x0900205f, 0x74000000 },
+ { 0x09802060, 0x04000003 },
+ { 0x0980206a, 0x04000005 },
+ { 0x09002070, 0x3c000000 },
+ { 0x21002071, 0x14000000 },
+ { 0x09802074, 0x3c000005 },
+ { 0x0980207a, 0x64000002 },
+ { 0x0900207d, 0x58000000 },
+ { 0x0900207e, 0x48000000 },
+ { 0x2100207f, 0x14000000 },
+ { 0x09802080, 0x3c000009 },
+ { 0x0980208a, 0x64000002 },
+ { 0x0900208d, 0x58000000 },
+ { 0x0900208e, 0x48000000 },
+ { 0x21802090, 0x18000004 },
+ { 0x098020a0, 0x5c000015 },
+ { 0x1b8020d0, 0x3000000c },
+ { 0x1b8020dd, 0x2c000003 },
+ { 0x1b0020e1, 0x30000000 },
+ { 0x1b8020e2, 0x2c000002 },
+ { 0x1b8020e5, 0x3000000a },
+ { 0x09802100, 0x68000001 },
+ { 0x09002102, 0x24000000 },
+ { 0x09802103, 0x68000003 },
+ { 0x09002107, 0x24000000 },
+ { 0x09802108, 0x68000001 },
+ { 0x0900210a, 0x14000000 },
+ { 0x0980210b, 0x24000002 },
+ { 0x0980210e, 0x14000001 },
+ { 0x09802110, 0x24000002 },
+ { 0x09002113, 0x14000000 },
+ { 0x09002114, 0x68000000 },
+ { 0x09002115, 0x24000000 },
+ { 0x09802116, 0x68000002 },
+ { 0x09802119, 0x24000004 },
+ { 0x0980211e, 0x68000005 },
+ { 0x09002124, 0x24000000 },
+ { 0x09002125, 0x68000000 },
+ { 0x13002126, 0x2400e2a3 },
+ { 0x09002127, 0x68000000 },
+ { 0x09002128, 0x24000000 },
+ { 0x09002129, 0x68000000 },
+ { 0x2100212a, 0x2400df41 },
+ { 0x2100212b, 0x2400dfba },
+ { 0x0980212c, 0x24000001 },
+ { 0x0900212e, 0x68000000 },
+ { 0x0900212f, 0x14000000 },
+ { 0x09802130, 0x24000001 },
+ { 0x21002132, 0x2400001c },
+ { 0x09002133, 0x24000000 },
+ { 0x09002134, 0x14000000 },
+ { 0x09802135, 0x1c000003 },
+ { 0x09002139, 0x14000000 },
+ { 0x0980213a, 0x68000001 },
+ { 0x0980213c, 0x14000001 },
+ { 0x0980213e, 0x24000001 },
+ { 0x09802140, 0x64000004 },
+ { 0x09002145, 0x24000000 },
+ { 0x09802146, 0x14000003 },
+ { 0x0900214a, 0x68000000 },
+ { 0x0900214b, 0x64000000 },
+ { 0x0980214c, 0x68000001 },
+ { 0x2100214e, 0x1400ffe4 },
+ { 0x09802153, 0x3c00000c },
+ { 0x09002160, 0x38000010 },
+ { 0x09002161, 0x38000010 },
+ { 0x09002162, 0x38000010 },
+ { 0x09002163, 0x38000010 },
+ { 0x09002164, 0x38000010 },
+ { 0x09002165, 0x38000010 },
+ { 0x09002166, 0x38000010 },
+ { 0x09002167, 0x38000010 },
+ { 0x09002168, 0x38000010 },
+ { 0x09002169, 0x38000010 },
+ { 0x0900216a, 0x38000010 },
+ { 0x0900216b, 0x38000010 },
+ { 0x0900216c, 0x38000010 },
+ { 0x0900216d, 0x38000010 },
+ { 0x0900216e, 0x38000010 },
+ { 0x0900216f, 0x38000010 },
+ { 0x09002170, 0x3800fff0 },
+ { 0x09002171, 0x3800fff0 },
+ { 0x09002172, 0x3800fff0 },
+ { 0x09002173, 0x3800fff0 },
+ { 0x09002174, 0x3800fff0 },
+ { 0x09002175, 0x3800fff0 },
+ { 0x09002176, 0x3800fff0 },
+ { 0x09002177, 0x3800fff0 },
+ { 0x09002178, 0x3800fff0 },
+ { 0x09002179, 0x3800fff0 },
+ { 0x0900217a, 0x3800fff0 },
+ { 0x0900217b, 0x3800fff0 },
+ { 0x0900217c, 0x3800fff0 },
+ { 0x0900217d, 0x3800fff0 },
+ { 0x0900217e, 0x3800fff0 },
+ { 0x0900217f, 0x3800fff0 },
+ { 0x09802180, 0x38000002 },
+ { 0x09002183, 0x24000001 },
+ { 0x21002184, 0x1400ffff },
+ { 0x09802190, 0x64000004 },
+ { 0x09802195, 0x68000004 },
+ { 0x0980219a, 0x64000001 },
+ { 0x0980219c, 0x68000003 },
+ { 0x090021a0, 0x64000000 },
+ { 0x098021a1, 0x68000001 },
+ { 0x090021a3, 0x64000000 },
+ { 0x098021a4, 0x68000001 },
+ { 0x090021a6, 0x64000000 },
+ { 0x098021a7, 0x68000006 },
+ { 0x090021ae, 0x64000000 },
+ { 0x098021af, 0x6800001e },
+ { 0x098021ce, 0x64000001 },
+ { 0x098021d0, 0x68000001 },
+ { 0x090021d2, 0x64000000 },
+ { 0x090021d3, 0x68000000 },
+ { 0x090021d4, 0x64000000 },
+ { 0x098021d5, 0x6800001e },
+ { 0x098021f4, 0x6400010b },
+ { 0x09802300, 0x68000007 },
+ { 0x09802308, 0x64000003 },
+ { 0x0980230c, 0x68000013 },
+ { 0x09802320, 0x64000001 },
+ { 0x09802322, 0x68000006 },
+ { 0x09002329, 0x58000000 },
+ { 0x0900232a, 0x48000000 },
+ { 0x0980232b, 0x68000050 },
+ { 0x0900237c, 0x64000000 },
+ { 0x0980237d, 0x6800001d },
+ { 0x0980239b, 0x64000018 },
+ { 0x098023b4, 0x68000027 },
+ { 0x098023dc, 0x64000005 },
+ { 0x098023e2, 0x68000005 },
+ { 0x09802400, 0x68000026 },
+ { 0x09802440, 0x6800000a },
+ { 0x09802460, 0x3c00003b },
+ { 0x0980249c, 0x68000019 },
+ { 0x090024b6, 0x6800001a },
+ { 0x090024b7, 0x6800001a },
+ { 0x090024b8, 0x6800001a },
+ { 0x090024b9, 0x6800001a },
+ { 0x090024ba, 0x6800001a },
+ { 0x090024bb, 0x6800001a },
+ { 0x090024bc, 0x6800001a },
+ { 0x090024bd, 0x6800001a },
+ { 0x090024be, 0x6800001a },
+ { 0x090024bf, 0x6800001a },
+ { 0x090024c0, 0x6800001a },
+ { 0x090024c1, 0x6800001a },
+ { 0x090024c2, 0x6800001a },
+ { 0x090024c3, 0x6800001a },
+ { 0x090024c4, 0x6800001a },
+ { 0x090024c5, 0x6800001a },
+ { 0x090024c6, 0x6800001a },
+ { 0x090024c7, 0x6800001a },
+ { 0x090024c8, 0x6800001a },
+ { 0x090024c9, 0x6800001a },
+ { 0x090024ca, 0x6800001a },
+ { 0x090024cb, 0x6800001a },
+ { 0x090024cc, 0x6800001a },
+ { 0x090024cd, 0x6800001a },
+ { 0x090024ce, 0x6800001a },
+ { 0x090024cf, 0x6800001a },
+ { 0x090024d0, 0x6800ffe6 },
+ { 0x090024d1, 0x6800ffe6 },
+ { 0x090024d2, 0x6800ffe6 },
+ { 0x090024d3, 0x6800ffe6 },
+ { 0x090024d4, 0x6800ffe6 },
+ { 0x090024d5, 0x6800ffe6 },
+ { 0x090024d6, 0x6800ffe6 },
+ { 0x090024d7, 0x6800ffe6 },
+ { 0x090024d8, 0x6800ffe6 },
+ { 0x090024d9, 0x6800ffe6 },
+ { 0x090024da, 0x6800ffe6 },
+ { 0x090024db, 0x6800ffe6 },
+ { 0x090024dc, 0x6800ffe6 },
+ { 0x090024dd, 0x6800ffe6 },
+ { 0x090024de, 0x6800ffe6 },
+ { 0x090024df, 0x6800ffe6 },
+ { 0x090024e0, 0x6800ffe6 },
+ { 0x090024e1, 0x6800ffe6 },
+ { 0x090024e2, 0x6800ffe6 },
+ { 0x090024e3, 0x6800ffe6 },
+ { 0x090024e4, 0x6800ffe6 },
+ { 0x090024e5, 0x6800ffe6 },
+ { 0x090024e6, 0x6800ffe6 },
+ { 0x090024e7, 0x6800ffe6 },
+ { 0x090024e8, 0x6800ffe6 },
+ { 0x090024e9, 0x6800ffe6 },
+ { 0x098024ea, 0x3c000015 },
+ { 0x09802500, 0x680000b6 },
+ { 0x090025b7, 0x64000000 },
+ { 0x098025b8, 0x68000008 },
+ { 0x090025c1, 0x64000000 },
+ { 0x098025c2, 0x68000035 },
+ { 0x098025f8, 0x64000007 },
+ { 0x09802600, 0x6800006e },
+ { 0x0900266f, 0x64000000 },
+ { 0x09802670, 0x6800002c },
+ { 0x098026a0, 0x68000012 },
+ { 0x09802701, 0x68000003 },
+ { 0x09802706, 0x68000003 },
+ { 0x0980270c, 0x6800001b },
+ { 0x09802729, 0x68000022 },
+ { 0x0900274d, 0x68000000 },
+ { 0x0980274f, 0x68000003 },
+ { 0x09002756, 0x68000000 },
+ { 0x09802758, 0x68000006 },
+ { 0x09802761, 0x68000006 },
+ { 0x09002768, 0x58000000 },
+ { 0x09002769, 0x48000000 },
+ { 0x0900276a, 0x58000000 },
+ { 0x0900276b, 0x48000000 },
+ { 0x0900276c, 0x58000000 },
+ { 0x0900276d, 0x48000000 },
+ { 0x0900276e, 0x58000000 },
+ { 0x0900276f, 0x48000000 },
+ { 0x09002770, 0x58000000 },
+ { 0x09002771, 0x48000000 },
+ { 0x09002772, 0x58000000 },
+ { 0x09002773, 0x48000000 },
+ { 0x09002774, 0x58000000 },
+ { 0x09002775, 0x48000000 },
+ { 0x09802776, 0x3c00001d },
+ { 0x09002794, 0x68000000 },
+ { 0x09802798, 0x68000017 },
+ { 0x098027b1, 0x6800000d },
+ { 0x098027c0, 0x64000004 },
+ { 0x090027c5, 0x58000000 },
+ { 0x090027c6, 0x48000000 },
+ { 0x098027c7, 0x64000003 },
+ { 0x098027d0, 0x64000015 },
+ { 0x090027e6, 0x58000000 },
+ { 0x090027e7, 0x48000000 },
+ { 0x090027e8, 0x58000000 },
+ { 0x090027e9, 0x48000000 },
+ { 0x090027ea, 0x58000000 },
+ { 0x090027eb, 0x48000000 },
+ { 0x098027f0, 0x6400000f },
+ { 0x04802800, 0x680000ff },
+ { 0x09802900, 0x64000082 },
+ { 0x09002983, 0x58000000 },
+ { 0x09002984, 0x48000000 },
+ { 0x09002985, 0x58000000 },
+ { 0x09002986, 0x48000000 },
+ { 0x09002987, 0x58000000 },
+ { 0x09002988, 0x48000000 },
+ { 0x09002989, 0x58000000 },
+ { 0x0900298a, 0x48000000 },
+ { 0x0900298b, 0x58000000 },
+ { 0x0900298c, 0x48000000 },
+ { 0x0900298d, 0x58000000 },
+ { 0x0900298e, 0x48000000 },
+ { 0x0900298f, 0x58000000 },
+ { 0x09002990, 0x48000000 },
+ { 0x09002991, 0x58000000 },
+ { 0x09002992, 0x48000000 },
+ { 0x09002993, 0x58000000 },
+ { 0x09002994, 0x48000000 },
+ { 0x09002995, 0x58000000 },
+ { 0x09002996, 0x48000000 },
+ { 0x09002997, 0x58000000 },
+ { 0x09002998, 0x48000000 },
+ { 0x09802999, 0x6400003e },
+ { 0x090029d8, 0x58000000 },
+ { 0x090029d9, 0x48000000 },
+ { 0x090029da, 0x58000000 },
+ { 0x090029db, 0x48000000 },
+ { 0x098029dc, 0x6400001f },
+ { 0x090029fc, 0x58000000 },
+ { 0x090029fd, 0x48000000 },
+ { 0x098029fe, 0x64000101 },
+ { 0x09802b00, 0x6800001a },
+ { 0x09802b20, 0x68000003 },
+ { 0x11002c00, 0x24000030 },
+ { 0x11002c01, 0x24000030 },
+ { 0x11002c02, 0x24000030 },
+ { 0x11002c03, 0x24000030 },
+ { 0x11002c04, 0x24000030 },
+ { 0x11002c05, 0x24000030 },
+ { 0x11002c06, 0x24000030 },
+ { 0x11002c07, 0x24000030 },
+ { 0x11002c08, 0x24000030 },
+ { 0x11002c09, 0x24000030 },
+ { 0x11002c0a, 0x24000030 },
+ { 0x11002c0b, 0x24000030 },
+ { 0x11002c0c, 0x24000030 },
+ { 0x11002c0d, 0x24000030 },
+ { 0x11002c0e, 0x24000030 },
+ { 0x11002c0f, 0x24000030 },
+ { 0x11002c10, 0x24000030 },
+ { 0x11002c11, 0x24000030 },
+ { 0x11002c12, 0x24000030 },
+ { 0x11002c13, 0x24000030 },
+ { 0x11002c14, 0x24000030 },
+ { 0x11002c15, 0x24000030 },
+ { 0x11002c16, 0x24000030 },
+ { 0x11002c17, 0x24000030 },
+ { 0x11002c18, 0x24000030 },
+ { 0x11002c19, 0x24000030 },
+ { 0x11002c1a, 0x24000030 },
+ { 0x11002c1b, 0x24000030 },
+ { 0x11002c1c, 0x24000030 },
+ { 0x11002c1d, 0x24000030 },
+ { 0x11002c1e, 0x24000030 },
+ { 0x11002c1f, 0x24000030 },
+ { 0x11002c20, 0x24000030 },
+ { 0x11002c21, 0x24000030 },
+ { 0x11002c22, 0x24000030 },
+ { 0x11002c23, 0x24000030 },
+ { 0x11002c24, 0x24000030 },
+ { 0x11002c25, 0x24000030 },
+ { 0x11002c26, 0x24000030 },
+ { 0x11002c27, 0x24000030 },
+ { 0x11002c28, 0x24000030 },
+ { 0x11002c29, 0x24000030 },
+ { 0x11002c2a, 0x24000030 },
+ { 0x11002c2b, 0x24000030 },
+ { 0x11002c2c, 0x24000030 },
+ { 0x11002c2d, 0x24000030 },
+ { 0x11002c2e, 0x24000030 },
+ { 0x11002c30, 0x1400ffd0 },
+ { 0x11002c31, 0x1400ffd0 },
+ { 0x11002c32, 0x1400ffd0 },
+ { 0x11002c33, 0x1400ffd0 },
+ { 0x11002c34, 0x1400ffd0 },
+ { 0x11002c35, 0x1400ffd0 },
+ { 0x11002c36, 0x1400ffd0 },
+ { 0x11002c37, 0x1400ffd0 },
+ { 0x11002c38, 0x1400ffd0 },
+ { 0x11002c39, 0x1400ffd0 },
+ { 0x11002c3a, 0x1400ffd0 },
+ { 0x11002c3b, 0x1400ffd0 },
+ { 0x11002c3c, 0x1400ffd0 },
+ { 0x11002c3d, 0x1400ffd0 },
+ { 0x11002c3e, 0x1400ffd0 },
+ { 0x11002c3f, 0x1400ffd0 },
+ { 0x11002c40, 0x1400ffd0 },
+ { 0x11002c41, 0x1400ffd0 },
+ { 0x11002c42, 0x1400ffd0 },
+ { 0x11002c43, 0x1400ffd0 },
+ { 0x11002c44, 0x1400ffd0 },
+ { 0x11002c45, 0x1400ffd0 },
+ { 0x11002c46, 0x1400ffd0 },
+ { 0x11002c47, 0x1400ffd0 },
+ { 0x11002c48, 0x1400ffd0 },
+ { 0x11002c49, 0x1400ffd0 },
+ { 0x11002c4a, 0x1400ffd0 },
+ { 0x11002c4b, 0x1400ffd0 },
+ { 0x11002c4c, 0x1400ffd0 },
+ { 0x11002c4d, 0x1400ffd0 },
+ { 0x11002c4e, 0x1400ffd0 },
+ { 0x11002c4f, 0x1400ffd0 },
+ { 0x11002c50, 0x1400ffd0 },
+ { 0x11002c51, 0x1400ffd0 },
+ { 0x11002c52, 0x1400ffd0 },
+ { 0x11002c53, 0x1400ffd0 },
+ { 0x11002c54, 0x1400ffd0 },
+ { 0x11002c55, 0x1400ffd0 },
+ { 0x11002c56, 0x1400ffd0 },
+ { 0x11002c57, 0x1400ffd0 },
+ { 0x11002c58, 0x1400ffd0 },
+ { 0x11002c59, 0x1400ffd0 },
+ { 0x11002c5a, 0x1400ffd0 },
+ { 0x11002c5b, 0x1400ffd0 },
+ { 0x11002c5c, 0x1400ffd0 },
+ { 0x11002c5d, 0x1400ffd0 },
+ { 0x11002c5e, 0x1400ffd0 },
+ { 0x21002c60, 0x24000001 },
+ { 0x21002c61, 0x1400ffff },
+ { 0x21002c62, 0x2400d609 },
+ { 0x21002c63, 0x2400f11a },
+ { 0x21002c64, 0x2400d619 },
+ { 0x21002c65, 0x1400d5d5 },
+ { 0x21002c66, 0x1400d5d8 },
+ { 0x21002c67, 0x24000001 },
+ { 0x21002c68, 0x1400ffff },
+ { 0x21002c69, 0x24000001 },
+ { 0x21002c6a, 0x1400ffff },
+ { 0x21002c6b, 0x24000001 },
+ { 0x21002c6c, 0x1400ffff },
+ { 0x21002c74, 0x14000000 },
+ { 0x21002c75, 0x24000001 },
+ { 0x21002c76, 0x1400ffff },
+ { 0x21002c77, 0x14000000 },
+ { 0x0a002c80, 0x24000001 },
+ { 0x0a002c81, 0x1400ffff },
+ { 0x0a002c82, 0x24000001 },
+ { 0x0a002c83, 0x1400ffff },
+ { 0x0a002c84, 0x24000001 },
+ { 0x0a002c85, 0x1400ffff },
+ { 0x0a002c86, 0x24000001 },
+ { 0x0a002c87, 0x1400ffff },
+ { 0x0a002c88, 0x24000001 },
+ { 0x0a002c89, 0x1400ffff },
+ { 0x0a002c8a, 0x24000001 },
+ { 0x0a002c8b, 0x1400ffff },
+ { 0x0a002c8c, 0x24000001 },
+ { 0x0a002c8d, 0x1400ffff },
+ { 0x0a002c8e, 0x24000001 },
+ { 0x0a002c8f, 0x1400ffff },
+ { 0x0a002c90, 0x24000001 },
+ { 0x0a002c91, 0x1400ffff },
+ { 0x0a002c92, 0x24000001 },
+ { 0x0a002c93, 0x1400ffff },
+ { 0x0a002c94, 0x24000001 },
+ { 0x0a002c95, 0x1400ffff },
+ { 0x0a002c96, 0x24000001 },
+ { 0x0a002c97, 0x1400ffff },
+ { 0x0a002c98, 0x24000001 },
+ { 0x0a002c99, 0x1400ffff },
+ { 0x0a002c9a, 0x24000001 },
+ { 0x0a002c9b, 0x1400ffff },
+ { 0x0a002c9c, 0x24000001 },
+ { 0x0a002c9d, 0x1400ffff },
+ { 0x0a002c9e, 0x24000001 },
+ { 0x0a002c9f, 0x1400ffff },
+ { 0x0a002ca0, 0x24000001 },
+ { 0x0a002ca1, 0x1400ffff },
+ { 0x0a002ca2, 0x24000001 },
+ { 0x0a002ca3, 0x1400ffff },
+ { 0x0a002ca4, 0x24000001 },
+ { 0x0a002ca5, 0x1400ffff },
+ { 0x0a002ca6, 0x24000001 },
+ { 0x0a002ca7, 0x1400ffff },
+ { 0x0a002ca8, 0x24000001 },
+ { 0x0a002ca9, 0x1400ffff },
+ { 0x0a002caa, 0x24000001 },
+ { 0x0a002cab, 0x1400ffff },
+ { 0x0a002cac, 0x24000001 },
+ { 0x0a002cad, 0x1400ffff },
+ { 0x0a002cae, 0x24000001 },
+ { 0x0a002caf, 0x1400ffff },
+ { 0x0a002cb0, 0x24000001 },
+ { 0x0a002cb1, 0x1400ffff },
+ { 0x0a002cb2, 0x24000001 },
+ { 0x0a002cb3, 0x1400ffff },
+ { 0x0a002cb4, 0x24000001 },
+ { 0x0a002cb5, 0x1400ffff },
+ { 0x0a002cb6, 0x24000001 },
+ { 0x0a002cb7, 0x1400ffff },
+ { 0x0a002cb8, 0x24000001 },
+ { 0x0a002cb9, 0x1400ffff },
+ { 0x0a002cba, 0x24000001 },
+ { 0x0a002cbb, 0x1400ffff },
+ { 0x0a002cbc, 0x24000001 },
+ { 0x0a002cbd, 0x1400ffff },
+ { 0x0a002cbe, 0x24000001 },
+ { 0x0a002cbf, 0x1400ffff },
+ { 0x0a002cc0, 0x24000001 },
+ { 0x0a002cc1, 0x1400ffff },
+ { 0x0a002cc2, 0x24000001 },
+ { 0x0a002cc3, 0x1400ffff },
+ { 0x0a002cc4, 0x24000001 },
+ { 0x0a002cc5, 0x1400ffff },
+ { 0x0a002cc6, 0x24000001 },
+ { 0x0a002cc7, 0x1400ffff },
+ { 0x0a002cc8, 0x24000001 },
+ { 0x0a002cc9, 0x1400ffff },
+ { 0x0a002cca, 0x24000001 },
+ { 0x0a002ccb, 0x1400ffff },
+ { 0x0a002ccc, 0x24000001 },
+ { 0x0a002ccd, 0x1400ffff },
+ { 0x0a002cce, 0x24000001 },
+ { 0x0a002ccf, 0x1400ffff },
+ { 0x0a002cd0, 0x24000001 },
+ { 0x0a002cd1, 0x1400ffff },
+ { 0x0a002cd2, 0x24000001 },
+ { 0x0a002cd3, 0x1400ffff },
+ { 0x0a002cd4, 0x24000001 },
+ { 0x0a002cd5, 0x1400ffff },
+ { 0x0a002cd6, 0x24000001 },
+ { 0x0a002cd7, 0x1400ffff },
+ { 0x0a002cd8, 0x24000001 },
+ { 0x0a002cd9, 0x1400ffff },
+ { 0x0a002cda, 0x24000001 },
+ { 0x0a002cdb, 0x1400ffff },
+ { 0x0a002cdc, 0x24000001 },
+ { 0x0a002cdd, 0x1400ffff },
+ { 0x0a002cde, 0x24000001 },
+ { 0x0a002cdf, 0x1400ffff },
+ { 0x0a002ce0, 0x24000001 },
+ { 0x0a002ce1, 0x1400ffff },
+ { 0x0a002ce2, 0x24000001 },
+ { 0x0a002ce3, 0x1400ffff },
+ { 0x0a002ce4, 0x14000000 },
+ { 0x0a802ce5, 0x68000005 },
+ { 0x0a802cf9, 0x54000003 },
+ { 0x0a002cfd, 0x3c000000 },
+ { 0x0a802cfe, 0x54000001 },
+ { 0x10002d00, 0x1400e3a0 },
+ { 0x10002d01, 0x1400e3a0 },
+ { 0x10002d02, 0x1400e3a0 },
+ { 0x10002d03, 0x1400e3a0 },
+ { 0x10002d04, 0x1400e3a0 },
+ { 0x10002d05, 0x1400e3a0 },
+ { 0x10002d06, 0x1400e3a0 },
+ { 0x10002d07, 0x1400e3a0 },
+ { 0x10002d08, 0x1400e3a0 },
+ { 0x10002d09, 0x1400e3a0 },
+ { 0x10002d0a, 0x1400e3a0 },
+ { 0x10002d0b, 0x1400e3a0 },
+ { 0x10002d0c, 0x1400e3a0 },
+ { 0x10002d0d, 0x1400e3a0 },
+ { 0x10002d0e, 0x1400e3a0 },
+ { 0x10002d0f, 0x1400e3a0 },
+ { 0x10002d10, 0x1400e3a0 },
+ { 0x10002d11, 0x1400e3a0 },
+ { 0x10002d12, 0x1400e3a0 },
+ { 0x10002d13, 0x1400e3a0 },
+ { 0x10002d14, 0x1400e3a0 },
+ { 0x10002d15, 0x1400e3a0 },
+ { 0x10002d16, 0x1400e3a0 },
+ { 0x10002d17, 0x1400e3a0 },
+ { 0x10002d18, 0x1400e3a0 },
+ { 0x10002d19, 0x1400e3a0 },
+ { 0x10002d1a, 0x1400e3a0 },
+ { 0x10002d1b, 0x1400e3a0 },
+ { 0x10002d1c, 0x1400e3a0 },
+ { 0x10002d1d, 0x1400e3a0 },
+ { 0x10002d1e, 0x1400e3a0 },
+ { 0x10002d1f, 0x1400e3a0 },
+ { 0x10002d20, 0x1400e3a0 },
+ { 0x10002d21, 0x1400e3a0 },
+ { 0x10002d22, 0x1400e3a0 },
+ { 0x10002d23, 0x1400e3a0 },
+ { 0x10002d24, 0x1400e3a0 },
+ { 0x10002d25, 0x1400e3a0 },
+ { 0x3a802d30, 0x1c000035 },
+ { 0x3a002d6f, 0x18000000 },
+ { 0x0f802d80, 0x1c000016 },
+ { 0x0f802da0, 0x1c000006 },
+ { 0x0f802da8, 0x1c000006 },
+ { 0x0f802db0, 0x1c000006 },
+ { 0x0f802db8, 0x1c000006 },
+ { 0x0f802dc0, 0x1c000006 },
+ { 0x0f802dc8, 0x1c000006 },
+ { 0x0f802dd0, 0x1c000006 },
+ { 0x0f802dd8, 0x1c000006 },
+ { 0x09802e00, 0x54000001 },
+ { 0x09002e02, 0x50000000 },
+ { 0x09002e03, 0x4c000000 },
+ { 0x09002e04, 0x50000000 },
+ { 0x09002e05, 0x4c000000 },
+ { 0x09802e06, 0x54000002 },
+ { 0x09002e09, 0x50000000 },
+ { 0x09002e0a, 0x4c000000 },
+ { 0x09002e0b, 0x54000000 },
+ { 0x09002e0c, 0x50000000 },
+ { 0x09002e0d, 0x4c000000 },
+ { 0x09802e0e, 0x54000008 },
+ { 0x09002e17, 0x44000000 },
+ { 0x09002e1c, 0x50000000 },
+ { 0x09002e1d, 0x4c000000 },
+ { 0x16802e80, 0x68000019 },
+ { 0x16802e9b, 0x68000058 },
+ { 0x16802f00, 0x680000d5 },
+ { 0x09802ff0, 0x6800000b },
+ { 0x09003000, 0x74000000 },
+ { 0x09803001, 0x54000002 },
+ { 0x09003004, 0x68000000 },
+ { 0x16003005, 0x18000000 },
+ { 0x09003006, 0x1c000000 },
+ { 0x16003007, 0x38000000 },
+ { 0x09003008, 0x58000000 },
+ { 0x09003009, 0x48000000 },
+ { 0x0900300a, 0x58000000 },
+ { 0x0900300b, 0x48000000 },
+ { 0x0900300c, 0x58000000 },
+ { 0x0900300d, 0x48000000 },
+ { 0x0900300e, 0x58000000 },
+ { 0x0900300f, 0x48000000 },
+ { 0x09003010, 0x58000000 },
+ { 0x09003011, 0x48000000 },
+ { 0x09803012, 0x68000001 },
+ { 0x09003014, 0x58000000 },
+ { 0x09003015, 0x48000000 },
+ { 0x09003016, 0x58000000 },
+ { 0x09003017, 0x48000000 },
+ { 0x09003018, 0x58000000 },
+ { 0x09003019, 0x48000000 },
+ { 0x0900301a, 0x58000000 },
+ { 0x0900301b, 0x48000000 },
+ { 0x0900301c, 0x44000000 },
+ { 0x0900301d, 0x58000000 },
+ { 0x0980301e, 0x48000001 },
+ { 0x09003020, 0x68000000 },
+ { 0x16803021, 0x38000008 },
+ { 0x1b80302a, 0x30000005 },
+ { 0x09003030, 0x44000000 },
+ { 0x09803031, 0x18000004 },
+ { 0x09803036, 0x68000001 },
+ { 0x16803038, 0x38000002 },
+ { 0x1600303b, 0x18000000 },
+ { 0x0900303c, 0x1c000000 },
+ { 0x0900303d, 0x54000000 },
+ { 0x0980303e, 0x68000001 },
+ { 0x1a803041, 0x1c000055 },
+ { 0x1b803099, 0x30000001 },
+ { 0x0980309b, 0x60000001 },
+ { 0x1a80309d, 0x18000001 },
+ { 0x1a00309f, 0x1c000000 },
+ { 0x090030a0, 0x44000000 },
+ { 0x1d8030a1, 0x1c000059 },
+ { 0x090030fb, 0x54000000 },
+ { 0x098030fc, 0x18000002 },
+ { 0x1d0030ff, 0x1c000000 },
+ { 0x03803105, 0x1c000027 },
+ { 0x17803131, 0x1c00005d },
+ { 0x09803190, 0x68000001 },
+ { 0x09803192, 0x3c000003 },
+ { 0x09803196, 0x68000009 },
+ { 0x038031a0, 0x1c000017 },
+ { 0x098031c0, 0x6800000f },
+ { 0x1d8031f0, 0x1c00000f },
+ { 0x17803200, 0x6800001e },
+ { 0x09803220, 0x3c000009 },
+ { 0x0980322a, 0x68000019 },
+ { 0x09003250, 0x68000000 },
+ { 0x09803251, 0x3c00000e },
+ { 0x17803260, 0x6800001f },
+ { 0x09803280, 0x3c000009 },
+ { 0x0980328a, 0x68000026 },
+ { 0x098032b1, 0x3c00000e },
+ { 0x098032c0, 0x6800003e },
+ { 0x09803300, 0x680000ff },
+ { 0x16803400, 0x1c0019b5 },
+ { 0x09804dc0, 0x6800003f },
+ { 0x16804e00, 0x1c0051bb },
+ { 0x3c80a000, 0x1c000014 },
+ { 0x3c00a015, 0x18000000 },
+ { 0x3c80a016, 0x1c000476 },
+ { 0x3c80a490, 0x68000036 },
+ { 0x0980a700, 0x60000016 },
+ { 0x0980a717, 0x18000003 },
+ { 0x0980a720, 0x60000001 },
+ { 0x3080a800, 0x1c000001 },
+ { 0x3000a802, 0x28000000 },
+ { 0x3080a803, 0x1c000002 },
+ { 0x3000a806, 0x30000000 },
+ { 0x3080a807, 0x1c000003 },
+ { 0x3000a80b, 0x30000000 },
+ { 0x3080a80c, 0x1c000016 },
+ { 0x3080a823, 0x28000001 },
+ { 0x3080a825, 0x30000001 },
+ { 0x3000a827, 0x28000000 },
+ { 0x3080a828, 0x68000003 },
+ { 0x4080a840, 0x1c000033 },
+ { 0x4080a874, 0x54000003 },
+ { 0x1780ac00, 0x1c002ba3 },
+ { 0x0980d800, 0x1000037f },
+ { 0x0980db80, 0x1000007f },
+ { 0x0980dc00, 0x100003ff },
+ { 0x0980e000, 0x0c0018ff },
+ { 0x1680f900, 0x1c00012d },
+ { 0x1680fa30, 0x1c00003a },
+ { 0x1680fa70, 0x1c000069 },
+ { 0x2180fb00, 0x14000006 },
+ { 0x0180fb13, 0x14000004 },
+ { 0x1900fb1d, 0x1c000000 },
+ { 0x1900fb1e, 0x30000000 },
+ { 0x1980fb1f, 0x1c000009 },
+ { 0x1900fb29, 0x64000000 },
+ { 0x1980fb2a, 0x1c00000c },
+ { 0x1980fb38, 0x1c000004 },
+ { 0x1900fb3e, 0x1c000000 },
+ { 0x1980fb40, 0x1c000001 },
+ { 0x1980fb43, 0x1c000001 },
+ { 0x1980fb46, 0x1c00006b },
+ { 0x0080fbd3, 0x1c00016a },
+ { 0x0900fd3e, 0x58000000 },
+ { 0x0900fd3f, 0x48000000 },
+ { 0x0080fd50, 0x1c00003f },
+ { 0x0080fd92, 0x1c000035 },
+ { 0x0080fdf0, 0x1c00000b },
+ { 0x0000fdfc, 0x5c000000 },
+ { 0x0900fdfd, 0x68000000 },
+ { 0x1b80fe00, 0x3000000f },
+ { 0x0980fe10, 0x54000006 },
+ { 0x0900fe17, 0x58000000 },
+ { 0x0900fe18, 0x48000000 },
+ { 0x0900fe19, 0x54000000 },
+ { 0x1b80fe20, 0x30000003 },
+ { 0x0900fe30, 0x54000000 },
+ { 0x0980fe31, 0x44000001 },
+ { 0x0980fe33, 0x40000001 },
+ { 0x0900fe35, 0x58000000 },
+ { 0x0900fe36, 0x48000000 },
+ { 0x0900fe37, 0x58000000 },
+ { 0x0900fe38, 0x48000000 },
+ { 0x0900fe39, 0x58000000 },
+ { 0x0900fe3a, 0x48000000 },
+ { 0x0900fe3b, 0x58000000 },
+ { 0x0900fe3c, 0x48000000 },
+ { 0x0900fe3d, 0x58000000 },
+ { 0x0900fe3e, 0x48000000 },
+ { 0x0900fe3f, 0x58000000 },
+ { 0x0900fe40, 0x48000000 },
+ { 0x0900fe41, 0x58000000 },
+ { 0x0900fe42, 0x48000000 },
+ { 0x0900fe43, 0x58000000 },
+ { 0x0900fe44, 0x48000000 },
+ { 0x0980fe45, 0x54000001 },
+ { 0x0900fe47, 0x58000000 },
+ { 0x0900fe48, 0x48000000 },
+ { 0x0980fe49, 0x54000003 },
+ { 0x0980fe4d, 0x40000002 },
+ { 0x0980fe50, 0x54000002 },
+ { 0x0980fe54, 0x54000003 },
+ { 0x0900fe58, 0x44000000 },
+ { 0x0900fe59, 0x58000000 },
+ { 0x0900fe5a, 0x48000000 },
+ { 0x0900fe5b, 0x58000000 },
+ { 0x0900fe5c, 0x48000000 },
+ { 0x0900fe5d, 0x58000000 },
+ { 0x0900fe5e, 0x48000000 },
+ { 0x0980fe5f, 0x54000002 },
+ { 0x0900fe62, 0x64000000 },
+ { 0x0900fe63, 0x44000000 },
+ { 0x0980fe64, 0x64000002 },
+ { 0x0900fe68, 0x54000000 },
+ { 0x0900fe69, 0x5c000000 },
+ { 0x0980fe6a, 0x54000001 },
+ { 0x0080fe70, 0x1c000004 },
+ { 0x0080fe76, 0x1c000086 },
+ { 0x0900feff, 0x04000000 },
+ { 0x0980ff01, 0x54000002 },
+ { 0x0900ff04, 0x5c000000 },
+ { 0x0980ff05, 0x54000002 },
+ { 0x0900ff08, 0x58000000 },
+ { 0x0900ff09, 0x48000000 },
+ { 0x0900ff0a, 0x54000000 },
+ { 0x0900ff0b, 0x64000000 },
+ { 0x0900ff0c, 0x54000000 },
+ { 0x0900ff0d, 0x44000000 },
+ { 0x0980ff0e, 0x54000001 },
+ { 0x0980ff10, 0x34000009 },
+ { 0x0980ff1a, 0x54000001 },
+ { 0x0980ff1c, 0x64000002 },
+ { 0x0980ff1f, 0x54000001 },
+ { 0x2100ff21, 0x24000020 },
+ { 0x2100ff22, 0x24000020 },
+ { 0x2100ff23, 0x24000020 },
+ { 0x2100ff24, 0x24000020 },
+ { 0x2100ff25, 0x24000020 },
+ { 0x2100ff26, 0x24000020 },
+ { 0x2100ff27, 0x24000020 },
+ { 0x2100ff28, 0x24000020 },
+ { 0x2100ff29, 0x24000020 },
+ { 0x2100ff2a, 0x24000020 },
+ { 0x2100ff2b, 0x24000020 },
+ { 0x2100ff2c, 0x24000020 },
+ { 0x2100ff2d, 0x24000020 },
+ { 0x2100ff2e, 0x24000020 },
+ { 0x2100ff2f, 0x24000020 },
+ { 0x2100ff30, 0x24000020 },
+ { 0x2100ff31, 0x24000020 },
+ { 0x2100ff32, 0x24000020 },
+ { 0x2100ff33, 0x24000020 },
+ { 0x2100ff34, 0x24000020 },
+ { 0x2100ff35, 0x24000020 },
+ { 0x2100ff36, 0x24000020 },
+ { 0x2100ff37, 0x24000020 },
+ { 0x2100ff38, 0x24000020 },
+ { 0x2100ff39, 0x24000020 },
+ { 0x2100ff3a, 0x24000020 },
+ { 0x0900ff3b, 0x58000000 },
+ { 0x0900ff3c, 0x54000000 },
+ { 0x0900ff3d, 0x48000000 },
+ { 0x0900ff3e, 0x60000000 },
+ { 0x0900ff3f, 0x40000000 },
+ { 0x0900ff40, 0x60000000 },
+ { 0x2100ff41, 0x1400ffe0 },
+ { 0x2100ff42, 0x1400ffe0 },
+ { 0x2100ff43, 0x1400ffe0 },
+ { 0x2100ff44, 0x1400ffe0 },
+ { 0x2100ff45, 0x1400ffe0 },
+ { 0x2100ff46, 0x1400ffe0 },
+ { 0x2100ff47, 0x1400ffe0 },
+ { 0x2100ff48, 0x1400ffe0 },
+ { 0x2100ff49, 0x1400ffe0 },
+ { 0x2100ff4a, 0x1400ffe0 },
+ { 0x2100ff4b, 0x1400ffe0 },
+ { 0x2100ff4c, 0x1400ffe0 },
+ { 0x2100ff4d, 0x1400ffe0 },
+ { 0x2100ff4e, 0x1400ffe0 },
+ { 0x2100ff4f, 0x1400ffe0 },
+ { 0x2100ff50, 0x1400ffe0 },
+ { 0x2100ff51, 0x1400ffe0 },
+ { 0x2100ff52, 0x1400ffe0 },
+ { 0x2100ff53, 0x1400ffe0 },
+ { 0x2100ff54, 0x1400ffe0 },
+ { 0x2100ff55, 0x1400ffe0 },
+ { 0x2100ff56, 0x1400ffe0 },
+ { 0x2100ff57, 0x1400ffe0 },
+ { 0x2100ff58, 0x1400ffe0 },
+ { 0x2100ff59, 0x1400ffe0 },
+ { 0x2100ff5a, 0x1400ffe0 },
+ { 0x0900ff5b, 0x58000000 },
+ { 0x0900ff5c, 0x64000000 },
+ { 0x0900ff5d, 0x48000000 },
+ { 0x0900ff5e, 0x64000000 },
+ { 0x0900ff5f, 0x58000000 },
+ { 0x0900ff60, 0x48000000 },
+ { 0x0900ff61, 0x54000000 },
+ { 0x0900ff62, 0x58000000 },
+ { 0x0900ff63, 0x48000000 },
+ { 0x0980ff64, 0x54000001 },
+ { 0x1d80ff66, 0x1c000009 },
+ { 0x0900ff70, 0x18000000 },
+ { 0x1d80ff71, 0x1c00002c },
+ { 0x0980ff9e, 0x18000001 },
+ { 0x1780ffa0, 0x1c00001e },
+ { 0x1780ffc2, 0x1c000005 },
+ { 0x1780ffca, 0x1c000005 },
+ { 0x1780ffd2, 0x1c000005 },
+ { 0x1780ffda, 0x1c000002 },
+ { 0x0980ffe0, 0x5c000001 },
+ { 0x0900ffe2, 0x64000000 },
+ { 0x0900ffe3, 0x60000000 },
+ { 0x0900ffe4, 0x68000000 },
+ { 0x0980ffe5, 0x5c000001 },
+ { 0x0900ffe8, 0x68000000 },
+ { 0x0980ffe9, 0x64000003 },
+ { 0x0980ffed, 0x68000001 },
+ { 0x0980fff9, 0x04000002 },
+ { 0x0980fffc, 0x68000001 },
+ { 0x23810000, 0x1c00000b },
+ { 0x2381000d, 0x1c000019 },
+ { 0x23810028, 0x1c000012 },
+ { 0x2381003c, 0x1c000001 },
+ { 0x2381003f, 0x1c00000e },
+ { 0x23810050, 0x1c00000d },
+ { 0x23810080, 0x1c00007a },
+ { 0x09810100, 0x54000001 },
+ { 0x09010102, 0x68000000 },
+ { 0x09810107, 0x3c00002c },
+ { 0x09810137, 0x68000008 },
+ { 0x13810140, 0x38000034 },
+ { 0x13810175, 0x3c000003 },
+ { 0x13810179, 0x68000010 },
+ { 0x1301018a, 0x3c000000 },
+ { 0x29810300, 0x1c00001e },
+ { 0x29810320, 0x3c000003 },
+ { 0x12810330, 0x1c000010 },
+ { 0x12010341, 0x38000000 },
+ { 0x12810342, 0x1c000007 },
+ { 0x1201034a, 0x38000000 },
+ { 0x3b810380, 0x1c00001d },
+ { 0x3b01039f, 0x54000000 },
+ { 0x2a8103a0, 0x1c000023 },
+ { 0x2a8103c8, 0x1c000007 },
+ { 0x2a0103d0, 0x54000000 },
+ { 0x2a8103d1, 0x38000004 },
+ { 0x0d010400, 0x24000028 },
+ { 0x0d010401, 0x24000028 },
+ { 0x0d010402, 0x24000028 },
+ { 0x0d010403, 0x24000028 },
+ { 0x0d010404, 0x24000028 },
+ { 0x0d010405, 0x24000028 },
+ { 0x0d010406, 0x24000028 },
+ { 0x0d010407, 0x24000028 },
+ { 0x0d010408, 0x24000028 },
+ { 0x0d010409, 0x24000028 },
+ { 0x0d01040a, 0x24000028 },
+ { 0x0d01040b, 0x24000028 },
+ { 0x0d01040c, 0x24000028 },
+ { 0x0d01040d, 0x24000028 },
+ { 0x0d01040e, 0x24000028 },
+ { 0x0d01040f, 0x24000028 },
+ { 0x0d010410, 0x24000028 },
+ { 0x0d010411, 0x24000028 },
+ { 0x0d010412, 0x24000028 },
+ { 0x0d010413, 0x24000028 },
+ { 0x0d010414, 0x24000028 },
+ { 0x0d010415, 0x24000028 },
+ { 0x0d010416, 0x24000028 },
+ { 0x0d010417, 0x24000028 },
+ { 0x0d010418, 0x24000028 },
+ { 0x0d010419, 0x24000028 },
+ { 0x0d01041a, 0x24000028 },
+ { 0x0d01041b, 0x24000028 },
+ { 0x0d01041c, 0x24000028 },
+ { 0x0d01041d, 0x24000028 },
+ { 0x0d01041e, 0x24000028 },
+ { 0x0d01041f, 0x24000028 },
+ { 0x0d010420, 0x24000028 },
+ { 0x0d010421, 0x24000028 },
+ { 0x0d010422, 0x24000028 },
+ { 0x0d010423, 0x24000028 },
+ { 0x0d010424, 0x24000028 },
+ { 0x0d010425, 0x24000028 },
+ { 0x0d010426, 0x24000028 },
+ { 0x0d010427, 0x24000028 },
+ { 0x0d010428, 0x1400ffd8 },
+ { 0x0d010429, 0x1400ffd8 },
+ { 0x0d01042a, 0x1400ffd8 },
+ { 0x0d01042b, 0x1400ffd8 },
+ { 0x0d01042c, 0x1400ffd8 },
+ { 0x0d01042d, 0x1400ffd8 },
+ { 0x0d01042e, 0x1400ffd8 },
+ { 0x0d01042f, 0x1400ffd8 },
+ { 0x0d010430, 0x1400ffd8 },
+ { 0x0d010431, 0x1400ffd8 },
+ { 0x0d010432, 0x1400ffd8 },
+ { 0x0d010433, 0x1400ffd8 },
+ { 0x0d010434, 0x1400ffd8 },
+ { 0x0d010435, 0x1400ffd8 },
+ { 0x0d010436, 0x1400ffd8 },
+ { 0x0d010437, 0x1400ffd8 },
+ { 0x0d010438, 0x1400ffd8 },
+ { 0x0d010439, 0x1400ffd8 },
+ { 0x0d01043a, 0x1400ffd8 },
+ { 0x0d01043b, 0x1400ffd8 },
+ { 0x0d01043c, 0x1400ffd8 },
+ { 0x0d01043d, 0x1400ffd8 },
+ { 0x0d01043e, 0x1400ffd8 },
+ { 0x0d01043f, 0x1400ffd8 },
+ { 0x0d010440, 0x1400ffd8 },
+ { 0x0d010441, 0x1400ffd8 },
+ { 0x0d010442, 0x1400ffd8 },
+ { 0x0d010443, 0x1400ffd8 },
+ { 0x0d010444, 0x1400ffd8 },
+ { 0x0d010445, 0x1400ffd8 },
+ { 0x0d010446, 0x1400ffd8 },
+ { 0x0d010447, 0x1400ffd8 },
+ { 0x0d010448, 0x1400ffd8 },
+ { 0x0d010449, 0x1400ffd8 },
+ { 0x0d01044a, 0x1400ffd8 },
+ { 0x0d01044b, 0x1400ffd8 },
+ { 0x0d01044c, 0x1400ffd8 },
+ { 0x0d01044d, 0x1400ffd8 },
+ { 0x0d01044e, 0x1400ffd8 },
+ { 0x0d01044f, 0x1400ffd8 },
+ { 0x2e810450, 0x1c00004d },
+ { 0x2c8104a0, 0x34000009 },
+ { 0x0b810800, 0x1c000005 },
+ { 0x0b010808, 0x1c000000 },
+ { 0x0b81080a, 0x1c00002b },
+ { 0x0b810837, 0x1c000001 },
+ { 0x0b01083c, 0x1c000000 },
+ { 0x0b01083f, 0x1c000000 },
+ { 0x41810900, 0x1c000015 },
+ { 0x41810916, 0x3c000003 },
+ { 0x4101091f, 0x54000000 },
+ { 0x1e010a00, 0x1c000000 },
+ { 0x1e810a01, 0x30000002 },
+ { 0x1e810a05, 0x30000001 },
+ { 0x1e810a0c, 0x30000003 },
+ { 0x1e810a10, 0x1c000003 },
+ { 0x1e810a15, 0x1c000002 },
+ { 0x1e810a19, 0x1c00001a },
+ { 0x1e810a38, 0x30000002 },
+ { 0x1e010a3f, 0x30000000 },
+ { 0x1e810a40, 0x3c000007 },
+ { 0x1e810a50, 0x54000008 },
+ { 0x3e812000, 0x1c00036e },
+ { 0x3e812400, 0x38000062 },
+ { 0x3e812470, 0x54000003 },
+ { 0x0981d000, 0x680000f5 },
+ { 0x0981d100, 0x68000026 },
+ { 0x0981d12a, 0x6800003a },
+ { 0x0981d165, 0x28000001 },
+ { 0x1b81d167, 0x30000002 },
+ { 0x0981d16a, 0x68000002 },
+ { 0x0981d16d, 0x28000005 },
+ { 0x0981d173, 0x04000007 },
+ { 0x1b81d17b, 0x30000007 },
+ { 0x0981d183, 0x68000001 },
+ { 0x1b81d185, 0x30000006 },
+ { 0x0981d18c, 0x6800001d },
+ { 0x1b81d1aa, 0x30000003 },
+ { 0x0981d1ae, 0x6800002f },
+ { 0x1381d200, 0x68000041 },
+ { 0x1381d242, 0x30000002 },
+ { 0x1301d245, 0x68000000 },
+ { 0x0981d300, 0x68000056 },
+ { 0x0981d360, 0x3c000011 },
+ { 0x0981d400, 0x24000019 },
+ { 0x0981d41a, 0x14000019 },
+ { 0x0981d434, 0x24000019 },
+ { 0x0981d44e, 0x14000006 },
+ { 0x0981d456, 0x14000011 },
+ { 0x0981d468, 0x24000019 },
+ { 0x0981d482, 0x14000019 },
+ { 0x0901d49c, 0x24000000 },
+ { 0x0981d49e, 0x24000001 },
+ { 0x0901d4a2, 0x24000000 },
+ { 0x0981d4a5, 0x24000001 },
+ { 0x0981d4a9, 0x24000003 },
+ { 0x0981d4ae, 0x24000007 },
+ { 0x0981d4b6, 0x14000003 },
+ { 0x0901d4bb, 0x14000000 },
+ { 0x0981d4bd, 0x14000006 },
+ { 0x0981d4c5, 0x1400000a },
+ { 0x0981d4d0, 0x24000019 },
+ { 0x0981d4ea, 0x14000019 },
+ { 0x0981d504, 0x24000001 },
+ { 0x0981d507, 0x24000003 },
+ { 0x0981d50d, 0x24000007 },
+ { 0x0981d516, 0x24000006 },
+ { 0x0981d51e, 0x14000019 },
+ { 0x0981d538, 0x24000001 },
+ { 0x0981d53b, 0x24000003 },
+ { 0x0981d540, 0x24000004 },
+ { 0x0901d546, 0x24000000 },
+ { 0x0981d54a, 0x24000006 },
+ { 0x0981d552, 0x14000019 },
+ { 0x0981d56c, 0x24000019 },
+ { 0x0981d586, 0x14000019 },
+ { 0x0981d5a0, 0x24000019 },
+ { 0x0981d5ba, 0x14000019 },
+ { 0x0981d5d4, 0x24000019 },
+ { 0x0981d5ee, 0x14000019 },
+ { 0x0981d608, 0x24000019 },
+ { 0x0981d622, 0x14000019 },
+ { 0x0981d63c, 0x24000019 },
+ { 0x0981d656, 0x14000019 },
+ { 0x0981d670, 0x24000019 },
+ { 0x0981d68a, 0x1400001b },
+ { 0x0981d6a8, 0x24000018 },
+ { 0x0901d6c1, 0x64000000 },
+ { 0x0981d6c2, 0x14000018 },
+ { 0x0901d6db, 0x64000000 },
+ { 0x0981d6dc, 0x14000005 },
+ { 0x0981d6e2, 0x24000018 },
+ { 0x0901d6fb, 0x64000000 },
+ { 0x0981d6fc, 0x14000018 },
+ { 0x0901d715, 0x64000000 },
+ { 0x0981d716, 0x14000005 },
+ { 0x0981d71c, 0x24000018 },
+ { 0x0901d735, 0x64000000 },
+ { 0x0981d736, 0x14000018 },
+ { 0x0901d74f, 0x64000000 },
+ { 0x0981d750, 0x14000005 },
+ { 0x0981d756, 0x24000018 },
+ { 0x0901d76f, 0x64000000 },
+ { 0x0981d770, 0x14000018 },
+ { 0x0901d789, 0x64000000 },
+ { 0x0981d78a, 0x14000005 },
+ { 0x0981d790, 0x24000018 },
+ { 0x0901d7a9, 0x64000000 },
+ { 0x0981d7aa, 0x14000018 },
+ { 0x0901d7c3, 0x64000000 },
+ { 0x0981d7c4, 0x14000005 },
+ { 0x0901d7ca, 0x24000000 },
+ { 0x0901d7cb, 0x14000000 },
+ { 0x0981d7ce, 0x34000031 },
+ { 0x16820000, 0x1c00a6d6 },
+ { 0x1682f800, 0x1c00021d },
+ { 0x090e0001, 0x04000000 },
+ { 0x098e0020, 0x0400005f },
+ { 0x1b8e0100, 0x300000ef },
+ { 0x098f0000, 0x0c00fffd },
+ { 0x09900000, 0x0c00fffd },
+};
diff --git a/src/third_party/pcre.py b/src/third_party/pcre.py
new file mode 100644
index 00000000000..f33a71fe726
--- /dev/null
+++ b/src/third_party/pcre.py
@@ -0,0 +1,42 @@
+
+import os
+
+root = "src/third_party/pcre-7.4"
+
+def getFiles():
+
+ def pcreFilter(x):
+ if x.endswith( "dftables.c" ):
+ return False
+ if x.endswith( "pcredemo.c" ):
+ return False
+ if x.endswith( "pcretest.c" ):
+ return False
+ if x.endswith( "unittest.cc" ):
+ return False
+ if x.endswith( "pcregrep.c" ):
+ return False
+ return x.endswith( ".c" ) or x.endswith( ".cc" )
+
+ files = [ root + "/" + x for x in filter( pcreFilter , os.listdir( root ) ) ]
+
+ return files
+
+def configure( env , fileLists , options ):
+ #fileLists = { "serverOnlyFiles" : [] }
+
+ env.Prepend( CPPPATH=["./" + root + "/"] )
+
+ myenv = env.Clone()
+ myenv.Append( CPPDEFINES=["HAVE_CONFIG_H"] )
+ fileLists["commonFiles"] += [ myenv.Object(f) for f in getFiles() ]
+ fileLists["moduleFiles"]["pcre"] = [ myenv.Object(f) for f in getFiles() ]
+
+def configureSystem( env , fileLists , options ):
+
+ env.Append( LIBS=[ "pcrecpp" ] )
+
+
+if __name__ == "__main__":
+ for x in getFiles():
+ print( x )
diff --git a/src/third_party/sm.py b/src/third_party/sm.py
new file mode 100644
index 00000000000..19a4dcbdae0
--- /dev/null
+++ b/src/third_party/sm.py
@@ -0,0 +1,114 @@
+import os
+import buildscripts.utils
+
+basicFiles = [ "jsapi.c" ,
+ "jsarena.c" ,
+ "jsarray.c" ,
+ "jsatom.c" ,
+ "jsbool.c" ,
+ "jscntxt.c" ,
+ "jsdate.c" ,
+ "jsdbgapi.c" ,
+ "jsdhash.c" ,
+ "jsdtoa.c" ,
+ "jsemit.c" ,
+ "jsexn.c" ,
+ "jsfun.c" ,
+ "jsgc.c" ,
+ "jshash.c" ,
+ "jsiter.c" ,
+ "jsinterp.c" ,
+ "jslock.c" ,
+ "jslog2.c" ,
+ "jslong.c" ,
+ "jsmath.c" ,
+ "jsnum.c" ,
+ "jsobj.c" ,
+ "jsopcode.c" ,
+ "jsparse.c" ,
+ "jsprf.c" ,
+ "jsregexp.c" ,
+ "jsscan.c" ,
+ "jsscope.c" ,
+ "jsscript.c" ,
+ "jsstr.c" ,
+ "jsutil.c" ,
+ "jsxdrapi.c" ,
+ "jsxml.c" ,
+ "prmjtime.c" ]
+
+root = "src/third_party/js-1.7"
+
+def r(x):
+ return "%s/%s" % ( root , x )
+
+def configureBasics( env , fileLists , options ):
+ if options["windows"]:
+ env.Append( CPPDEFINES=[ "XP_WIN" ] )
+ else:
+ env.Append( CPPDEFINES=[ "XP_UNIX" ] )
+
+
+
+def configure( env , fileLists , options ):
+ if not options["usesm"]:
+ return
+
+ configureBasics( env , fileLists , options )
+
+ env.Prepend( CPPPATH=[root] )
+
+ myenv = env.Clone()
+ myenv.Append( CPPDEFINES=[ "JSFILE" , "EXPORT_JS_API" , "JS_C_STRINGS_ARE_UTF8" ] )
+ myenv["CPPFLAGS"] = myenv["CPPFLAGS"].replace( "-Werror" , "" )
+
+ if options["windows"]:
+ myenv["CPPFLAGS"] = myenv["CPPFLAGS"].replace( "/TP" , "" )
+ myenv["CPPFLAGS"] = myenv["CPPFLAGS"].replace( "/O2" , "" )
+ myenv["CPPFLAGS"] = myenv["CPPFLAGS"].replace( "/Gy" , "" )
+ myenv.Append( CPPFLAGS=" /wd4748 " )
+
+
+ if "NDEBUG" in myenv["CPPDEFINES"]:
+ myenv["CPPDEFINES"].remove( "NDEBUG" )
+
+ if os.sys.platform.startswith( "linux" ) or os.sys.platform == "darwin":
+ myenv["CPPDEFINES"] += [ "HAVE_VA_COPY" , "VA_COPY=va_copy" ]
+
+ elif "sunos5" == os.sys.platform:
+ myenv.Append( CPPDEFINES=[ "SOLARIS" , "HAVE_VA_LIST_AS_ARRAY" , "SVR4" , "SYSV" , "HAVE_LOCALTIME_R" ] )
+
+ fileLists["scriptingFiles"] += [ myenv.Object(root + "/" + f) for f in basicFiles ]
+
+ jskwgen = str( myenv.Program( r("jskwgen") , [ r("jskwgen.c") ] )[0] )
+ jscpucfg = str( myenv.Program( r("jscpucfg") , [ r("jscpucfg.c") ] )[0] )
+
+ def buildAutoFile( target , source , env ):
+ outFile = str( target[0] )
+
+ cmd = str( source[0] )
+ if options["nix"]:
+ cmd = "./" + cmd
+
+ output = buildscripts.utils.execsys( cmd )[0]
+ output = output.replace( '\r' , '\n' )
+ out = open( outFile , 'w' )
+ out.write( output )
+ return None
+
+ autoBuilder = myenv.Builder( action = buildAutoFile , suffix = '.h')
+
+ myenv.Append( BUILDERS={ 'Auto' : autoBuilder } )
+ myenv.Auto( r("jsautokw.h") , [ jskwgen ] )
+ myenv.Auto( r("jsautocfg.h") , [ jscpucfg ] )
+
+ myenv.Depends( r("jsscan.c") , r("jsautokw.h") )
+
+
+def configureSystem( env , fileLists , options ):
+ if not options["usesm"]:
+ return
+
+ configureBasics( env , fileLists , options )
+
+ env.Append( LIBS=[ "js" ] )
diff --git a/src/third_party/snappy.py b/src/third_party/snappy.py
new file mode 100644
index 00000000000..7949bbd555f
--- /dev/null
+++ b/src/third_party/snappy.py
@@ -0,0 +1,14 @@
+
+def configure( env , fileLists , options ):
+ #fileLists = { "serverOnlyFiles" : [] }
+
+ myenv = env.Clone()
+ if not options["windows"]:
+ myenv.Append(CPPFLAGS=" -Wno-sign-compare -Wno-unused-function ") #snappy doesn't compile cleanly
+
+ files = ["src/third_party/snappy/snappy.cc", "src/third_party/snappy/snappy-sinksource.cc"]
+
+ fileLists["serverOnlyFiles"] += [ myenv.Object(f) for f in files ]
+
+def configureSystem( env , fileLists , options ):
+ configure( env , fileLists , options )
diff --git a/src/third_party/snappy/COPYING b/src/third_party/snappy/COPYING
new file mode 100755
index 00000000000..8d6bd9fed4e
--- /dev/null
+++ b/src/third_party/snappy/COPYING
@@ -0,0 +1,28 @@
+Copyright 2011, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/third_party/snappy/README b/src/third_party/snappy/README
new file mode 100755
index 00000000000..df8f0e178e2
--- /dev/null
+++ b/src/third_party/snappy/README
@@ -0,0 +1,135 @@
+Snappy, a fast compressor/decompressor.
+
+
+Introduction
+============
+
+Snappy is a compression/decompression library. It does not aim for maximum
+compression, or compatibility with any other compression library; instead,
+it aims for very high speeds and reasonable compression. For instance,
+compared to the fastest mode of zlib, Snappy is an order of magnitude faster
+for most inputs, but the resulting compressed files are anywhere from 20% to
+100% bigger. (For more information, see "Performance", below.)
+
+Snappy has the following properties:
+
+ * Fast: Compression speeds at 250 MB/sec and beyond, with no assembler code.
+ See "Performance" below.
+ * Stable: Over the last few years, Snappy has compressed and decompressed
+ petabytes of data in Google's production environment. The Snappy bitstream
+ format is stable and will not change between versions.
+ * Robust: The Snappy decompressor is designed not to crash in the face of
+ corrupted or malicious input.
+ * Free and open source software: Snappy is licensed under a BSD-type license.
+ For more information, see the included COPYING file.
+
+Snappy has previously been called "Zippy" in some Google presentations
+and the like.
+
+
+Performance
+===========
+
+Snappy is intended to be fast. On a single core of a Core i7 processor
+in 64-bit mode, it compresses at about 250 MB/sec or more and decompresses at
+about 500 MB/sec or more. (These numbers are for the slowest inputs in our
+benchmark suite; others are much faster.) In our tests, Snappy usually
+is faster than algorithms in the same class (e.g. LZO, LZF, FastLZ, QuickLZ,
+etc.) while achieving comparable compression ratios.
+
+Typical compression ratios (based on the benchmark suite) are about 1.5-1.7x
+for plain text, about 2-4x for HTML, and of course 1.0x for JPEGs, PNGs and
+other already-compressed data. Similar numbers for zlib in its fastest mode
+are 2.6-2.8x, 3-7x and 1.0x, respectively. More sophisticated algorithms are
+capable of achieving yet higher compression rates, although usually at the
+expense of speed. Of course, compression ratio will vary significantly with
+the input.
+
+Although Snappy should be fairly portable, it is primarily optimized
+for 64-bit x86-compatible processors, and may run slower in other environments.
+In particular:
+
+ - Snappy uses 64-bit operations in several places to process more data at
+ once than would otherwise be possible.
+ - Snappy assumes unaligned 32- and 64-bit loads and stores are cheap.
+ On some platforms, these must be emulated with single-byte loads
+ and stores, which is much slower.
+ - Snappy assumes little-endian throughout, and needs to byte-swap data in
+ several places if running on a big-endian platform.
+
+Experience has shown that even heavily tuned code can be improved.
+Performance optimizations, whether for 64-bit x86 or other platforms,
+are of course most welcome; see "Contact", below.
+
+
+Usage
+=====
+
+Note that Snappy, both the implementation and the main interface,
+is written in C++. However, several third-party bindings to other languages
+are available; see the Google Code page at http://code.google.com/p/snappy/
+for more information. Also, if you want to use Snappy from C code, you can
+use the included C bindings in snappy-c.h.
+
+To use Snappy from your own C++ program, include the file "snappy.h" from
+your calling file, and link against the compiled library.
+
+There are many ways to call Snappy, but the simplest possible is
+
+ snappy::Compress(input, &output);
+
+and similarly
+
+ snappy::Uncompress(input, &output);
+
+where "input" and "output" are both instances of std::string.
+
+There are other interfaces that are more flexible in various ways, including
+support for custom (non-array) input sources. See the header file for more
+information.
+
+
+Tests and benchmarks
+====================
+
+When you compile Snappy, snappy_unittest is compiled in addition to the
+library itself. You do not need it to use the compressor from your own library,
+but it contains several useful components for Snappy development.
+
+First of all, it contains unit tests, verifying correctness on your machine in
+various scenarios. If you want to change or optimize Snappy, please run the
+tests to verify you have not broken anything. Note that if you have the
+Google Test library installed, unit test behavior (especially failures) will be
+significantly more user-friendly. You can find Google Test at
+
+ http://code.google.com/p/googletest/
+
+You probably also want the gflags library for handling of command-line flags;
+you can find it at
+
+ http://code.google.com/p/google-gflags/
+
+In addition to the unit tests, snappy contains microbenchmarks used to
+tune compression and decompression performance. These are automatically run
+before the unit tests, but you can disable them using the flag
+--run_microbenchmarks=false if you have gflags installed (otherwise you will
+need to edit the source).
+
+Finally, snappy can benchmark Snappy against a few other compression libraries
+(zlib, LZO, LZF, FastLZ and QuickLZ), if they were detected at configure time.
+To benchmark using a given file, give the compression algorithm you want to test
+Snappy against (e.g. --zlib) and then a list of one or more file names on the
+command line. The testdata/ directory contains the files used by the
+microbenchmark, which should provide a reasonably balanced starting point for
+benchmarking. (Note that baddata[1-3].snappy are not intended as benchmarks; they
+are used to verify correctness in the presence of corrupted data in the unit
+test.)
+
+
+Contact
+=======
+
+Snappy is distributed through Google Code. For the latest version, a bug tracker,
+and other information, see
+
+ http://code.google.com/p/snappy/
diff --git a/src/third_party/snappy/config.h b/src/third_party/snappy/config.h
new file mode 100755
index 00000000000..bfc3b30087f
--- /dev/null
+++ b/src/third_party/snappy/config.h
@@ -0,0 +1,124 @@
+/* config.h.in. Generated from configure.ac by autoheader. */
+
+/* Define if building universal (internal helper macro) */
+//#undef AC_APPLE_UNIVERSAL_BUILD
+
+#if defined(_WIN32)
+// signed/unsigned mismatch
+#pragma warning( disable : 4018 )
+#endif
+
+/* Define to 1 if the compiler supports __builtin_ctz and friends. */
+#if defined(__GNUC__)
+#definfe HAVE_BUILTIN_CTZ 1
+#endif
+
+/* Define to 1 if the compiler supports __builtin_expect. */
+#if defined(__GNUC__)
+#definfe HAVE_BUILTIN_EXPECT 1
+#endif
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+#if !defined(_WIN32)
+#define HAVE_DLFCN_H 1
+#endif
+
+/* Use the gflags package for command-line parsing. */
+#undef HAVE_GFLAGS
+
+/* Defined when Google Test is available. */
+#undef HAVE_GTEST
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+#define HAVE_INTTYPES_H 1
+
+/* Define to 1 if you have the `fastlz' library (-lfastlz). */
+#undef HAVE_LIBFASTLZ
+
+/* Define to 1 if you have the `lzf' library (-llzf). */
+#undef HAVE_LIBLZF
+
+/* Define to 1 if you have the `lzo2' library (-llzo2). */
+#undef HAVE_LIBLZO2
+
+/* Define to 1 if you have the `quicklz' library (-lquicklz). */
+#undef HAVE_LIBQUICKLZ
+
+/* Define to 1 if you have the `z' library (-lz). */
+#undef HAVE_LIBZ
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <stddef.h> header file. */
+#define HAVE_STDDEF_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+#define HAVE_STDINT_H 1
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define HAVE_STRING_H 1
+
+/* Define to 1 if you have the <sys/mman.h> header file. */
+#if !defined(_WIN32)
+#define HAVE_SYS_MMAN_H 1
+#endif
+
+/* Define to 1 if you have the <sys/resource.h> header file. */
+#define HAVE_SYS_RESOURCE_H 1
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+#define HAVE_SYS_TYPES_H 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+#define HAVE_UNISTD_H 1
+
+/* Define to 1 if you have the <windows.h> header file. */
+#if defined(_WIN32)
+#define HAVE_WINDOWS_H 1
+#endif
+
+/* Define to the sub-directory in which libtool stores uninstalled libraries.
+ */
+#define LT_OBJDIR "libs/"
+
+/* Name of package */
+#define PACKAGE "snappy"
+
+#define PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define PACKAGE_NAME "snappy"
+
+/* Define to the full name and version of this package. */
+#define PACKAGE_STRING "snappy 1.0.3"
+
+/* Define to the one symbol short name of this package. */
+#define PACKAGE_TARNAME "snappy"
+
+/* Define to the home page for this package. */
+#define PACKAGE_URL ""
+
+/* Define to the version of this package. */
+#define PACKAGE_VERSION "1.0.3"
+
+/* Define to 1 if you have the ANSI C header files. */
+#define STDC_HEADERS 1
+
+/* Version number of package */
+#define VERSION "1.0.3"
+
+/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
+ significant byte first (like Motorola and SPARC, unlike Intel). */
+#if defined(__BIG_ENDIAN__)
+#define WORDS_BIGENDIAN 1
+#endif
diff --git a/src/third_party/snappy/snappy-internal.h b/src/third_party/snappy/snappy-internal.h
new file mode 100755
index 00000000000..a32eda59fb2
--- /dev/null
+++ b/src/third_party/snappy/snappy-internal.h
@@ -0,0 +1,150 @@
+// Copyright 2008 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Internals shared between the Snappy implementation and its unittest.
+
+#ifndef UTIL_SNAPPY_SNAPPY_INTERNAL_H_
+#define UTIL_SNAPPY_SNAPPY_INTERNAL_H_
+
+#include "snappy-stubs-internal.h"
+
+namespace snappy {
+namespace internal {
+
+class WorkingMemory {
+ public:
+ WorkingMemory() : large_table_(NULL) { }
+ ~WorkingMemory() { delete[] large_table_; }
+
+ // Allocates and clears a hash table using memory in "*this",
+ // stores the number of buckets in "*table_size" and returns a pointer to
+ // the base of the hash table.
+ uint16* GetHashTable(size_t input_size, int* table_size);
+
+ private:
+ uint16 small_table_[1<<10]; // 2KB
+ uint16* large_table_; // Allocated only when needed
+
+ DISALLOW_COPY_AND_ASSIGN(WorkingMemory);
+};
+
+// Flat array compression that does not emit the "uncompressed length"
+// prefix. Compresses "input" string to the "*op" buffer.
+//
+// REQUIRES: "input_length <= kBlockSize"
+// REQUIRES: "op" points to an array of memory that is at least
+// "MaxCompressedLength(input_length)" in size.
+// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
+// REQUIRES: "table_size" is a power of two
+//
+// Returns an "end" pointer into "op" buffer.
+// "end - op" is the compressed size of "input".
+char* CompressFragment(const char* input,
+ size_t input_length,
+ char* op,
+ uint16* table,
+ const int table_size);
+
+// Return the largest n such that
+//
+// s1[0,n-1] == s2[0,n-1]
+// and n <= (s2_limit - s2).
+//
+// Does not read *s2_limit or beyond.
+// Does not read *(s1 + (s2_limit - s2)) or beyond.
+// Requires that s2_limit >= s2.
+//
+// Separate implementation for x86_64, for speed. Uses the fact that
+// x86_64 is little endian.
+#if defined(ARCH_K8)
+static inline int FindMatchLength(const char* s1,
+ const char* s2,
+ const char* s2_limit) {
+ DCHECK_GE(s2_limit, s2);
+ int matched = 0;
+
+ // Find out how long the match is. We loop over the data 64 bits at a
+ // time until we find a 64-bit block that doesn't match; then we find
+ // the first non-matching bit and use that to calculate the total
+ // length of the match.
+ while (PREDICT_TRUE(s2 <= s2_limit - 8)) {
+ if (PREDICT_FALSE(UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched))) {
+ s2 += 8;
+ matched += 8;
+ } else {
+ // On current (mid-2008) Opteron models there is a 3% more
+ // efficient code sequence to find the first non-matching byte.
+ // However, what follows is ~10% better on Intel Core 2 and newer,
+ // and we expect AMD's bsf instruction to improve.
+ uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
+ int matching_bits = Bits::FindLSBSetNonZero64(x);
+ matched += matching_bits >> 3;
+ return matched;
+ }
+ }
+ while (PREDICT_TRUE(s2 < s2_limit)) {
+ if (PREDICT_TRUE(s1[matched] == *s2)) {
+ ++s2;
+ ++matched;
+ } else {
+ return matched;
+ }
+ }
+ return matched;
+}
+#else
+static inline int FindMatchLength(const char* s1,
+ const char* s2,
+ const char* s2_limit) {
+ // Implementation based on the x86-64 version, above.
+ DCHECK_GE(s2_limit, s2);
+ int matched = 0;
+
+ while (s2 <= s2_limit - 4 &&
+ UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
+ s2 += 4;
+ matched += 4;
+ }
+ if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
+ uint32 x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
+ int matching_bits = Bits::FindLSBSetNonZero(x);
+ matched += matching_bits >> 3;
+ } else {
+ while ((s2 < s2_limit) && (s1[matched] == *s2)) {
+ ++s2;
+ ++matched;
+ }
+ }
+ return matched;
+}
+#endif
+
+} // end namespace internal
+} // end namespace snappy
+
+#endif // UTIL_SNAPPY_SNAPPY_INTERNAL_H_
diff --git a/src/third_party/snappy/snappy-sinksource.cc b/src/third_party/snappy/snappy-sinksource.cc
new file mode 100755
index 00000000000..1017895f962
--- /dev/null
+++ b/src/third_party/snappy/snappy-sinksource.cc
@@ -0,0 +1,72 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <string.h>
+
+#include "snappy-sinksource.h"
+
+namespace snappy {
+
+Source::~Source() { }
+
+Sink::~Sink() { }
+
+char* Sink::GetAppendBuffer(size_t length, char* scratch) {
+ return scratch;
+}
+
+ByteArraySource::~ByteArraySource() { }
+
+size_t ByteArraySource::Available() const { return left_; }
+
+const char* ByteArraySource::Peek(size_t* len) {
+ *len = left_;
+ return ptr_;
+}
+
+void ByteArraySource::Skip(size_t n) {
+ left_ -= n;
+ ptr_ += n;
+}
+
+UncheckedByteArraySink::~UncheckedByteArraySink() { }
+
+void UncheckedByteArraySink::Append(const char* data, size_t n) {
+ // Do no copying if the caller filled in the result of GetAppendBuffer()
+ if (data != dest_) {
+ memcpy(dest_, data, n);
+ }
+ dest_ += n;
+}
+
+char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
+ return dest_;
+}
+
+
+}
diff --git a/src/third_party/snappy/snappy-sinksource.h b/src/third_party/snappy/snappy-sinksource.h
new file mode 100755
index 00000000000..430baeabb0e
--- /dev/null
+++ b/src/third_party/snappy/snappy-sinksource.h
@@ -0,0 +1,136 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
+#define UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
+
+#include <stddef.h>
+
+
+namespace snappy {
+
+// A Sink is an interface that consumes a sequence of bytes.
+class Sink {
+ public:
+ Sink() { }
+ virtual ~Sink();
+
+ // Append "bytes[0,n-1]" to this.
+ virtual void Append(const char* bytes, size_t n) = 0;
+
+ // Returns a writable buffer of the specified length for appending.
+ // May return a pointer to the caller-owned scratch buffer which
+ // must have at least the indicated length. The returned buffer is
+ // only valid until the next operation on this Sink.
+ //
+ // After writing at most "length" bytes, call Append() with the
+ // pointer returned from this function and the number of bytes
+ // written. Many Append() implementations will avoid copying
+ // bytes if this function returned an internal buffer.
+ //
+ // If a non-scratch buffer is returned, the caller may only pass a
+ // prefix of it to Append(). That is, it is not correct to pass an
+ // interior pointer of the returned array to Append().
+ //
+ // The default implementation always returns the scratch buffer.
+ virtual char* GetAppendBuffer(size_t length, char* scratch);
+
+ private:
+ // No copying
+ Sink(const Sink&);
+ void operator=(const Sink&);
+};
+
+// A Source is an interface that yields a sequence of bytes
+class Source {
+ public:
+ Source() { }
+ virtual ~Source();
+
+ // Return the number of bytes left to read from the source
+ virtual size_t Available() const = 0;
+
+ // Peek at the next flat region of the source. Does not reposition
+ // the source. The returned region is empty iff Available()==0.
+ //
+ // Returns a pointer to the beginning of the region and store its
+ // length in *len.
+ //
+ // The returned region is valid until the next call to Skip() or
+ // until this object is destroyed, whichever occurs first.
+ //
+ // The returned region may be larger than Available() (for example
+ // if this ByteSource is a view on a substring of a larger source).
+ // The caller is responsible for ensuring that it only reads the
+ // Available() bytes.
+ virtual const char* Peek(size_t* len) = 0;
+
+ // Skip the next n bytes. Invalidates any buffer returned by
+ // a previous call to Peek().
+ // REQUIRES: Available() >= n
+ virtual void Skip(size_t n) = 0;
+
+ private:
+ // No copying
+ Source(const Source&);
+ void operator=(const Source&);
+};
+
+// A Source implementation that yields the contents of a flat array
+class ByteArraySource : public Source {
+ public:
+ ByteArraySource(const char* p, size_t n) : ptr_(p), left_(n) { }
+ virtual ~ByteArraySource();
+ virtual size_t Available() const;
+ virtual const char* Peek(size_t* len);
+ virtual void Skip(size_t n);
+ private:
+ const char* ptr_;
+ size_t left_;
+};
+
+// A Sink implementation that writes to a flat array without any bound checks.
+class UncheckedByteArraySink : public Sink {
+ public:
+ explicit UncheckedByteArraySink(char* dest) : dest_(dest) { }
+ virtual ~UncheckedByteArraySink();
+ virtual void Append(const char* data, size_t n);
+ virtual char* GetAppendBuffer(size_t len, char* scratch);
+
+ // Return the current output pointer so that a caller can see how
+ // many bytes were produced.
+ // Note: this is not a Sink method.
+ char* CurrentDestination() const { return dest_; }
+ private:
+ char* dest_;
+};
+
+
+}
+
+#endif // UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
diff --git a/src/third_party/snappy/snappy-stubs-internal.cc b/src/third_party/snappy/snappy-stubs-internal.cc
new file mode 100755
index 00000000000..6ed334371f1
--- /dev/null
+++ b/src/third_party/snappy/snappy-stubs-internal.cc
@@ -0,0 +1,42 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <algorithm>
+#include <string>
+
+#include "snappy-stubs-internal.h"
+
+namespace snappy {
+
+void Varint::Append32(string* s, uint32 value) {
+ char buf[Varint::kMax32];
+ const char* p = Varint::Encode32(buf, value);
+ s->append(buf, p - buf);
+}
+
+} // namespace snappy
diff --git a/src/third_party/snappy/snappy-stubs-internal.h b/src/third_party/snappy/snappy-stubs-internal.h
new file mode 100755
index 00000000000..355a06bc568
--- /dev/null
+++ b/src/third_party/snappy/snappy-stubs-internal.h
@@ -0,0 +1,478 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Various stubs for the open-source version of Snappy.
+
+#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <iostream>
+#include <string>
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef HAVE_SYS_MMAN
+#include <sys/mman.h>
+#endif
+
+#include "snappy-stubs-public.h"
+
+#if defined(__x86_64__)
+
+// Enable 64-bit optimized versions of some routines.
+#define ARCH_K8 1
+
+#endif
+
+// Needed by OS X, among others.
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+// Pull in std::min, std::ostream, and the likes. This is safe because this
+// header file is never used from any public header files.
+using namespace std;
+
+// The size of an array, if known at compile-time.
+// Will give unexpected results if used on a pointer.
+// We undefine it first, since some compilers already have a definition.
+#ifdef ARRAYSIZE
+#undef ARRAYSIZE
+#endif
+#define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
+
+// Static prediction hints.
+#ifdef HAVE_BUILTIN_EXPECT
+#define PREDICT_FALSE(x) (__builtin_expect(x, 0))
+#define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
+#else
+#define PREDICT_FALSE(x) x
+#define PREDICT_TRUE(x) x
+#endif
+
+// This is only used for recomputing the tag byte table used during
+// decompression; for simplicity we just remove it from the open-source
+// version (anyone who wants to regenerate it can just do the call
+// themselves within main()).
+#define DEFINE_bool(flag_name, default_value, description) \
+ bool FLAGS_ ## flag_name = default_value;
+#define DECLARE_bool(flag_name) \
+ extern bool FLAGS_ ## flag_name;
+#define REGISTER_MODULE_INITIALIZER(name, code)
+
+namespace snappy {
+
+static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
+static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
+
+// Logging.
+
+#define LOG(level) LogMessage()
+#define VLOG(level) true ? (void)0 : \
+ snappy::LogMessageVoidify() & snappy::LogMessage()
+
+class LogMessage {
+ public:
+ LogMessage() { }
+ ~LogMessage() {
+ cerr << endl;
+ }
+
+ LogMessage& operator<<(const std::string& msg) {
+ cerr << msg;
+ return *this;
+ }
+ LogMessage& operator<<(int x) {
+ cerr << x;
+ return *this;
+ }
+};
+
+// Asserts, both versions activated in debug mode only,
+// and ones that are always active.
+
+#define CRASH_UNLESS(condition) \
+ PREDICT_TRUE(condition) ? (void)0 : \
+ snappy::LogMessageVoidify() & snappy::LogMessageCrash()
+
+class LogMessageCrash : public LogMessage {
+ public:
+ LogMessageCrash() { }
+ ~LogMessageCrash() {
+ cerr << endl;
+ abort();
+ }
+};
+
+// This class is used to explicitly ignore values in the conditional
+// logging macros. This avoids compiler warnings like "value computed
+// is not used" and "statement has no effect".
+
+class LogMessageVoidify {
+ public:
+ LogMessageVoidify() { }
+ // This has to be an operator with a precedence lower than << but
+ // higher than ?:
+ void operator&(const LogMessage&) { }
+};
+
+#define CHECK(cond) CRASH_UNLESS(cond)
+#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
+#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
+#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
+#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
+#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
+#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
+
+#ifdef NDEBUG
+
+#define DCHECK(cond) CRASH_UNLESS(true)
+#define DCHECK_LE(a, b) CRASH_UNLESS(true)
+#define DCHECK_GE(a, b) CRASH_UNLESS(true)
+#define DCHECK_EQ(a, b) CRASH_UNLESS(true)
+#define DCHECK_NE(a, b) CRASH_UNLESS(true)
+#define DCHECK_LT(a, b) CRASH_UNLESS(true)
+#define DCHECK_GT(a, b) CRASH_UNLESS(true)
+
+#else
+
+#define DCHECK(cond) CHECK(cond)
+#define DCHECK_LE(a, b) CHECK_LE(a, b)
+#define DCHECK_GE(a, b) CHECK_GE(a, b)
+#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
+#define DCHECK_NE(a, b) CHECK_NE(a, b)
+#define DCHECK_LT(a, b) CHECK_LT(a, b)
+#define DCHECK_GT(a, b) CHECK_GT(a, b)
+
+#endif
+
+// Potentially unaligned loads and stores.
+
+#if 1
+//#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || defined(_WIN32)
+
+#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
+#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
+#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
+
+#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
+#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
+#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
+
+#else
+
+// These functions are provided for architectures that don't support
+// unaligned loads and stores.
+
+inline uint16 UNALIGNED_LOAD16(const void *p) {
+ uint16 t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline uint32 UNALIGNED_LOAD32(const void *p) {
+ uint32 t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline uint64 UNALIGNED_LOAD64(const void *p) {
+ uint64 t;
+ memcpy(&t, p, sizeof t);
+ return t;
+}
+
+inline void UNALIGNED_STORE16(void *p, uint16 v) {
+ memcpy(p, &v, sizeof v);
+}
+
+inline void UNALIGNED_STORE32(void *p, uint32 v) {
+ memcpy(p, &v, sizeof v);
+}
+
+inline void UNALIGNED_STORE64(void *p, uint64 v) {
+ memcpy(p, &v, sizeof v);
+}
+
+#endif
+
+// The following guarantees declaration of the byte swap functions.
+#ifdef WORDS_BIGENDIAN
+
+#ifdef _MSC_VER
+#include <stdlib.h>
+#define bswap_16(x) _byteswap_ushort(x)
+#define bswap_32(x) _byteswap_ulong(x)
+#define bswap_64(x) _byteswap_uint64(x)
+
+#elif defined(__APPLE__)
+// Mac OS X / Darwin features
+#include <libkern/OSByteOrder.h>
+#define bswap_16(x) OSSwapInt16(x)
+#define bswap_32(x) OSSwapInt32(x)
+#define bswap_64(x) OSSwapInt64(x)
+
+#else
+#include <byteswap.h>
+#endif
+
+#endif // WORDS_BIGENDIAN
+
+// Convert to little-endian storage, opposite of network format.
+// Convert x from host to little endian: x = LittleEndian.FromHost(x);
+// convert x from little endian to host: x = LittleEndian.ToHost(x);
+//
+// Store values into unaligned memory converting to little endian order:
+// LittleEndian.Store16(p, x);
+//
+// Load unaligned values stored in little endian converting to host order:
+// x = LittleEndian.Load16(p);
+class LittleEndian {
+ public:
+ // Conversion functions.
+#ifdef WORDS_BIGENDIAN
+
+ static uint16 FromHost16(uint16 x) { return bswap_16(x); }
+ static uint16 ToHost16(uint16 x) { return bswap_16(x); }
+
+ static uint32 FromHost32(uint32 x) { return bswap_32(x); }
+ static uint32 ToHost32(uint32 x) { return bswap_32(x); }
+
+ static bool IsLittleEndian() { return false; }
+
+#else // !defined(WORDS_BIGENDIAN)
+
+ static uint16 FromHost16(uint16 x) { return x; }
+ static uint16 ToHost16(uint16 x) { return x; }
+
+ static uint32 FromHost32(uint32 x) { return x; }
+ static uint32 ToHost32(uint32 x) { return x; }
+
+ static bool IsLittleEndian() { return true; }
+
+#endif // !defined(WORDS_BIGENDIAN)
+
+ // Functions to do unaligned loads and stores in little-endian order.
+ static uint16 Load16(const void *p) {
+ return ToHost16(UNALIGNED_LOAD16(p));
+ }
+
+ static void Store16(void *p, uint16 v) {
+ UNALIGNED_STORE16(p, FromHost16(v));
+ }
+
+ static uint32 Load32(const void *p) {
+ return ToHost32(UNALIGNED_LOAD32(p));
+ }
+
+ static void Store32(void *p, uint32 v) {
+ UNALIGNED_STORE32(p, FromHost32(v));
+ }
+};
+
+// Some bit-manipulation functions.
+class Bits {
+ public:
+ // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
+ static int Log2Floor(uint32 n);
+
+ // Return the first set least / most significant bit, 0-indexed. Returns an
+ // undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
+ // that it's 0-indexed.
+ static int FindLSBSetNonZero(uint32 n);
+ static int FindLSBSetNonZero64(uint64 n);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Bits);
+};
+
+#ifdef HAVE_BUILTIN_CTZ
+
+inline int Bits::Log2Floor(uint32 n) {
+ return n == 0 ? -1 : 31 ^ __builtin_clz(n);
+}
+
+inline int Bits::FindLSBSetNonZero(uint32 n) {
+ return __builtin_ctz(n);
+}
+
+inline int Bits::FindLSBSetNonZero64(uint64 n) {
+ return __builtin_ctzll(n);
+}
+
+#else // Portable versions.
+
+inline int Bits::Log2Floor(uint32 n) {
+ if (n == 0)
+ return -1;
+ int log = 0;
+ uint32 value = n;
+ for (int i = 4; i >= 0; --i) {
+ int shift = (1 << i);
+ uint32 x = value >> shift;
+ if (x != 0) {
+ value = x;
+ log += shift;
+ }
+ }
+ assert(value == 1);
+ return log;
+}
+
+inline int Bits::FindLSBSetNonZero(uint32 n) {
+ int rc = 31;
+ for (int i = 4, shift = 1 << 4; i >= 0; --i) {
+ const uint32 x = n << shift;
+ if (x != 0) {
+ n = x;
+ rc -= shift;
+ }
+ shift >>= 1;
+ }
+ return rc;
+}
+
+// FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
+inline int Bits::FindLSBSetNonZero64(uint64 n) {
+ const uint32 bottombits = static_cast<uint32>(n);
+ if (bottombits == 0) {
+ // Bottom bits are zero, so scan in top bits
+ return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
+ } else {
+ return FindLSBSetNonZero(bottombits);
+ }
+}
+
+#endif // End portable versions.
+
+// Variable-length integer encoding.
+class Varint {
+ public:
+ // Maximum lengths of varint encoding of uint32.
+ static const int kMax32 = 5;
+
+ // Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
+ // Never reads a character at or beyond limit. If a valid/terminated varint32
+ // was found in the range, stores it in *OUTPUT and returns a pointer just
+ // past the last byte of the varint32. Else returns NULL. On success,
+ // "result <= limit".
+ static const char* Parse32WithLimit(const char* ptr, const char* limit,
+ uint32* OUTPUT);
+
+ // REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
+ // EFFECTS Encodes "v" into "ptr" and returns a pointer to the
+ // byte just past the last encoded byte.
+ static char* Encode32(char* ptr, uint32 v);
+
+ // EFFECTS Appends the varint representation of "value" to "*s".
+ static void Append32(string* s, uint32 value);
+};
+
+inline const char* Varint::Parse32WithLimit(const char* p,
+ const char* l,
+ uint32* OUTPUT) {
+ const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
+ const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
+ uint32 b, result;
+ if (ptr >= limit) return NULL;
+ b = *(ptr++); result = b & 127; if (b < 128) goto done;
+ if (ptr >= limit) return NULL;
+ b = *(ptr++); result |= (b & 127) << 7; if (b < 128) goto done;
+ if (ptr >= limit) return NULL;
+ b = *(ptr++); result |= (b & 127) << 14; if (b < 128) goto done;
+ if (ptr >= limit) return NULL;
+ b = *(ptr++); result |= (b & 127) << 21; if (b < 128) goto done;
+ if (ptr >= limit) return NULL;
+ b = *(ptr++); result |= (b & 127) << 28; if (b < 16) goto done;
+ return NULL; // Value is too long to be a varint32
+ done:
+ *OUTPUT = result;
+ return reinterpret_cast<const char*>(ptr);
+}
+
+inline char* Varint::Encode32(char* sptr, uint32 v) {
+ // Operate on characters as unsigneds
+ unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
+ static const int B = 128;
+ if (v < (1<<7)) {
+ *(ptr++) = v;
+ } else if (v < (1<<14)) {
+ *(ptr++) = v | B;
+ *(ptr++) = v>>7;
+ } else if (v < (1<<21)) {
+ *(ptr++) = v | B;
+ *(ptr++) = (v>>7) | B;
+ *(ptr++) = v>>14;
+ } else if (v < (1<<28)) {
+ *(ptr++) = v | B;
+ *(ptr++) = (v>>7) | B;
+ *(ptr++) = (v>>14) | B;
+ *(ptr++) = v>>21;
+ } else {
+ *(ptr++) = v | B;
+ *(ptr++) = (v>>7) | B;
+ *(ptr++) = (v>>14) | B;
+ *(ptr++) = (v>>21) | B;
+ *(ptr++) = v>>28;
+ }
+ return reinterpret_cast<char*>(ptr);
+}
+
+// If you know the internal layout of the std::string in use, you can
+// replace this function with one that resizes the string without
+// filling the new space with zeros (if applicable) --
+// it will be non-portable but faster.
+inline void STLStringResizeUninitialized(string* s, size_t new_size) {
+ s->resize(new_size);
+}
+
+// Return a mutable char* pointing to a string's internal buffer,
+// which may not be null-terminated. Writing through this pointer will
+// modify the string.
+//
+// string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
+// next call to a string method that invalidates iterators.
+//
+// As of 2006-04, there is no standard-blessed way of getting a
+// mutable reference to a string's internal buffer. However, issue 530
+// (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
+// proposes this as the method. It will officially be part of the standard
+// for C++0x. This should already work on all current implementations.
+inline char* string_as_array(string* str) {
+ return str->empty() ? NULL : &*str->begin();
+}
+
+} // namespace snappy
+
+#endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
diff --git a/src/third_party/snappy/snappy-stubs-public.h b/src/third_party/snappy/snappy-stubs-public.h
new file mode 100755
index 00000000000..074d4638866
--- /dev/null
+++ b/src/third_party/snappy/snappy-stubs-public.h
@@ -0,0 +1,85 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: sesse@google.com (Steinar H. Gunderson)
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Various type stubs for the open-source version of Snappy.
+//
+// This file cannot include config.h, as it is included from snappy.h,
+// which is a public header. Instead, snappy-stubs-public.h is generated by
+// from snappy-stubs-public.h.in at configure time.
+
+#ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+#define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
+
+#if !defined(_WIN32)
+#include <stdint.h>
+#endif
+
+#if 1
+#include <stddef.h>
+#endif
+
+#define SNAPPY_MAJOR 1
+#define SNAPPY_MINOR 0
+#define SNAPPY_PATCHLEVEL 3
+#define SNAPPY_VERSION \
+ ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
+
+#include <string>
+
+namespace snappy {
+
+#if !defined(_WIN32)
+typedef int8_t int8;
+typedef uint8_t uint8;
+typedef int16_t int16;
+typedef uint16_t uint16;
+typedef int32_t int32;
+typedef uint32_t uint32;
+typedef int64_t int64;
+typedef uint64_t uint64;
+#else
+typedef signed char int8;
+typedef unsigned char uint8;
+typedef short int16;
+typedef unsigned short uint16;
+typedef int int32;
+typedef unsigned int uint32;
+typedef long long int64;
+typedef unsigned long long uint64;
+#endif
+
+typedef std::string string;
+
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&); \
+ void operator=(const TypeName&)
+
+} // namespace snappy
+
+#endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
diff --git a/src/third_party/snappy/snappy.cc b/src/third_party/snappy/snappy.cc
new file mode 100755
index 00000000000..fdc67e886c6
--- /dev/null
+++ b/src/third_party/snappy/snappy.cc
@@ -0,0 +1,1026 @@
+// Copyright 2005 Google Inc. All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "snappy.h"
+#include "snappy-internal.h"
+#include "snappy-sinksource.h"
+
+#include <stdio.h>
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+
+namespace snappy {
+
+// Any hash function will produce a valid compressed bitstream, but a good
+// hash function reduces the number of collisions and thus yields better
+// compression for compressible input, and more speed for incompressible
+// input. Of course, it doesn't hurt if the hash function is reasonably fast
+// either, as it gets called a lot.
+static inline uint32 HashBytes(uint32 bytes, int shift) {
+ uint32 kMul = 0x1e35a7bd;
+ return (bytes * kMul) >> shift;
+}
+static inline uint32 Hash(const char* p, int shift) {
+ return HashBytes(UNALIGNED_LOAD32(p), shift);
+}
+
+size_t MaxCompressedLength(size_t source_len) {
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ return 32 + source_len + source_len/6;
+}
+
+enum {
+ LITERAL = 0,
+ COPY_1_BYTE_OFFSET = 1, // 3 bit length + 3 bits of offset in opcode
+ COPY_2_BYTE_OFFSET = 2,
+ COPY_4_BYTE_OFFSET = 3
+};
+
+// Copy "len" bytes from "src" to "op", one byte at a time. Used for
+// handling COPY operations where the input and output regions may
+// overlap. For example, suppose:
+// src == "ab"
+// op == src + 2
+// len == 20
+// After IncrementalCopy(src, op, len), the result will have
+// eleven copies of "ab"
+// ababababababababababab
+// Note that this does not match the semantics of either memcpy()
+// or memmove().
+static inline void IncrementalCopy(const char* src, char* op, int len) {
+ DCHECK_GT(len, 0);
+ do {
+ *op++ = *src++;
+ } while (--len > 0);
+}
+
+// Equivalent to IncrementalCopy except that it can write up to ten extra
+// bytes after the end of the copy, and that it is faster.
+//
+// The main part of this loop is a simple copy of eight bytes at a time until
+// we've copied (at least) the requested amount of bytes. However, if op and
+// src are less than eight bytes apart (indicating a repeating pattern of
+// length < 8), we first need to expand the pattern in order to get the correct
+// results. For instance, if the buffer looks like this, with the eight-byte
+// <src> and <op> patterns marked as intervals:
+//
+// abxxxxxxxxxxxx
+// [------] src
+// [------] op
+//
+// a single eight-byte copy from <src> to <op> will repeat the pattern once,
+// after which we can move <op> two bytes without moving <src>:
+//
+// ababxxxxxxxxxx
+// [------] src
+// [------] op
+//
+// and repeat the exercise until the two no longer overlap.
+//
+// This allows us to do very well in the special case of one single byte
+// repeated many times, without taking a big hit for more general cases.
+//
+// The worst case of extra writing past the end of the match occurs when
+// op - src == 1 and len == 1; the last copy will read from byte positions
+// [0..7] and write to [4..11], whereas it was only supposed to write to
+// position 1. Thus, ten excess bytes.
+
+namespace {
+
+const int kMaxIncrementCopyOverflow = 10;
+
+} // namespace
+
+static inline void IncrementalCopyFastPath(const char* src, char* op, int len) {
+ while (op - src < 8) {
+ UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
+ len -= op - src;
+ op += op - src;
+ }
+ while (len > 0) {
+ UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
+ src += 8;
+ op += 8;
+ len -= 8;
+ }
+}
+
+static inline char* EmitLiteral(char* op,
+ const char* literal,
+ int len,
+ bool allow_fast_path) {
+ int n = len - 1; // Zero-length literals are disallowed
+ if (n < 60) {
+ // Fits in tag byte
+ *op++ = LITERAL | (n << 2);
+
+ // The vast majority of copies are below 16 bytes, for which a
+ // call to memcpy is overkill. This fast path can sometimes
+ // copy up to 15 bytes too much, but that is okay in the
+ // main loop, since we have a bit to go on for both sides:
+ //
+ // - The input will always have kInputMarginBytes = 15 extra
+ // available bytes, as long as we're in the main loop, and
+ // if not, allow_fast_path = false.
+ // - The output will always have 32 spare bytes (see
+ // MaxCompressedLength).
+ if (allow_fast_path && len <= 16) {
+ UNALIGNED_STORE64(op, UNALIGNED_LOAD64(literal));
+ UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(literal + 8));
+ return op + len;
+ }
+ } else {
+ // Encode in upcoming bytes
+ char* base = op;
+ int count = 0;
+ op++;
+ while (n > 0) {
+ *op++ = n & 0xff;
+ n >>= 8;
+ count++;
+ }
+ assert(count >= 1);
+ assert(count <= 4);
+ *base = LITERAL | ((59+count) << 2);
+ }
+ memcpy(op, literal, len);
+ return op + len;
+}
+
+static inline char* EmitCopyLessThan64(char* op, int offset, int len) {
+ DCHECK_LE(len, 64);
+ DCHECK_GE(len, 4);
+ DCHECK_LT(offset, 65536);
+
+ if ((len < 12) && (offset < 2048)) {
+ int len_minus_4 = len - 4;
+ assert(len_minus_4 < 8); // Must fit in 3 bits
+ *op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) << 5);
+ *op++ = offset & 0xff;
+ } else {
+ *op++ = COPY_2_BYTE_OFFSET | ((len-1) << 2);
+ LittleEndian::Store16(op, offset);
+ op += 2;
+ }
+ return op;
+}
+
+static inline char* EmitCopy(char* op, int offset, int len) {
+ // Emit 64 byte copies but make sure to keep at least four bytes reserved
+ while (len >= 68) {
+ op = EmitCopyLessThan64(op, offset, 64);
+ len -= 64;
+ }
+
+ // Emit an extra 60 byte copy if have too much data to fit in one copy
+ if (len > 64) {
+ op = EmitCopyLessThan64(op, offset, 60);
+ len -= 60;
+ }
+
+ // Emit remainder
+ op = EmitCopyLessThan64(op, offset, len);
+ return op;
+}
+
+
+bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
+ uint32 v = 0;
+ const char* limit = start + n;
+ if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
+ *result = v;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+namespace internal {
+uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
+ // Use smaller hash table when input.size() is smaller, since we
+ // fill the table, incurring O(hash table size) overhead for
+ // compression, and if the input is short, we won't need that
+ // many hash table entries anyway.
+ assert(kMaxHashTableSize >= 256);
+ int htsize = 256;
+ while (htsize < kMaxHashTableSize && htsize < input_size) {
+ htsize <<= 1;
+ }
+ CHECK_EQ(0, htsize & (htsize - 1)) << ": must be power of two";
+ CHECK_LE(htsize, kMaxHashTableSize) << ": hash table too large";
+
+ uint16* table;
+ if (htsize <= ARRAYSIZE(small_table_)) {
+ table = small_table_;
+ } else {
+ if (large_table_ == NULL) {
+ large_table_ = new uint16[kMaxHashTableSize];
+ }
+ table = large_table_;
+ }
+
+ *table_size = htsize;
+ memset(table, 0, htsize * sizeof(*table));
+ return table;
+}
+} // end namespace internal
+
+#if defined(_WIN32)
+// signed/unsigned mismatch
+# pragma warning( disable : 4244 )
+#endif
+
+// For 0 <= offset <= 4, GetUint32AtOffset(UNALIGNED_LOAD64(p), offset) will
+// equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
+// empirically found that overlapping loads such as
+// UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
+// are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
+static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
+ DCHECK(0 <= offset && offset <= 4) << offset;
+ return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
+}
+
+// Flat array compression that does not emit the "uncompressed length"
+// prefix. Compresses "input" string to the "*op" buffer.
+//
+// REQUIRES: "input" is at most "kBlockSize" bytes long.
+// REQUIRES: "op" points to an array of memory that is at least
+// "MaxCompressedLength(input.size())" in size.
+// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
+// REQUIRES: "table_size" is a power of two
+//
+// Returns an "end" pointer into "op" buffer.
+// "end - op" is the compressed size of "input".
+namespace internal {
+char* CompressFragment(const char* const input,
+ const size_t input_size,
+ char* op,
+ uint16* table,
+ const int table_size) {
+ // "ip" is the input pointer, and "op" is the output pointer.
+ const char* ip = input;
+ CHECK_LE(input_size, kBlockSize);
+ CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
+ const int shift = 32 - Bits::Log2Floor(table_size);
+ DCHECK_EQ(kuint32max >> shift, table_size - 1);
+ const char* ip_end = input + input_size;
+ const char* base_ip = ip;
+ // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
+ // [next_emit, ip_end) after the main loop.
+ const char* next_emit = ip;
+
+ const int kInputMarginBytes = 15;
+ if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
+ const char* ip_limit = input + input_size - kInputMarginBytes;
+
+ for (uint32 next_hash = Hash(++ip, shift); ; ) {
+ DCHECK_LT(next_emit, ip);
+ // The body of this loop calls EmitLiteral once and then EmitCopy one or
+ // more times. (The exception is that when we're close to exhausting
+ // the input we goto emit_remainder.)
+ //
+ // In the first iteration of this loop we're just starting, so
+ // there's nothing to copy, so calling EmitLiteral once is
+ // necessary. And we only start a new iteration when the
+ // current iteration has determined that a call to EmitLiteral will
+ // precede the next call to EmitCopy (if any).
+ //
+ // Step 1: Scan forward in the input looking for a 4-byte-long match.
+ // If we get close to exhausting the input then goto emit_remainder.
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned, look at every third byte, etc.. When a match is found,
+ // immediately go back to looking at every byte. This is a small loss
+ // (~5% performance, ~0.1% density) for compressible data due to more
+ // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
+ // win since the compressor quickly "realizes" the data is incompressible
+ // and doesn't bother looking for matches everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since the
+ // last match; dividing it by 32 (ie. right-shifting by five) gives the
+ // number of bytes to move ahead for each iteration.
+ uint32 skip = 32;
+
+ const char* next_ip = ip;
+ const char* candidate;
+ do {
+ ip = next_ip;
+ uint32 hash = next_hash;
+ DCHECK_EQ(hash, Hash(ip, shift));
+ uint32 bytes_between_hash_lookups = skip++ >> 5;
+ next_ip = ip + bytes_between_hash_lookups;
+ if (PREDICT_FALSE(next_ip > ip_limit)) {
+ goto emit_remainder;
+ }
+ next_hash = Hash(next_ip, shift);
+ candidate = base_ip + table[hash];
+ DCHECK_GE(candidate, base_ip);
+ DCHECK_LT(candidate, ip);
+
+ table[hash] = ip - base_ip;
+ } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
+ UNALIGNED_LOAD32(candidate)));
+
+ // Step 2: A 4-byte match has been found. We'll later see if more
+ // than 4 bytes match. But, prior to the match, input
+ // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
+ DCHECK_LE(next_emit + 16, ip_end);
+ op = EmitLiteral(op, next_emit, ip - next_emit, true);
+
+ // Step 3: Call EmitCopy, and then see if another EmitCopy could
+ // be our next move. Repeat until we find no match for the
+ // input immediately after what was consumed by the last EmitCopy call.
+ //
+ // If we exit this loop normally then we need to call EmitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can exit
+ // this loop via goto if we get close to exhausting the input.
+ uint64 input_bytes = 0;
+ uint32 candidate_bytes = 0;
+
+ do {
+ // We have a 4-byte match at ip, and no need to emit any
+ // "literal bytes" prior to ip.
+ const char* base = ip;
+ int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
+ ip += matched;
+ int offset = base - candidate;
+ DCHECK_EQ(0, memcmp(base, candidate, matched));
+ op = EmitCopy(op, offset, matched);
+ // We could immediately start working at ip now, but to improve
+ // compression we first update table[Hash(ip - 1, ...)].
+ const char* insert_tail = ip - 1;
+ next_emit = ip;
+ if (PREDICT_FALSE(ip >= ip_limit)) {
+ goto emit_remainder;
+ }
+ input_bytes = UNALIGNED_LOAD64(insert_tail);
+ uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
+ table[prev_hash] = ip - base_ip - 1;
+ uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
+ candidate = base_ip + table[cur_hash];
+ candidate_bytes = UNALIGNED_LOAD32(candidate);
+ table[cur_hash] = ip - base_ip;
+ } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
+
+ next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
+ ++ip;
+ }
+ }
+
+ emit_remainder:
+ // Emit the remaining bytes as a literal
+ if (next_emit < ip_end) {
+ op = EmitLiteral(op, next_emit, ip_end - next_emit, false);
+ }
+
+ return op;
+}
+} // end namespace internal
+
+// Signature of output types needed by decompression code.
+// The decompression code is templatized on a type that obeys this
+// signature so that we do not pay virtual function call overhead in
+// the middle of a tight decompression loop.
+//
+// class DecompressionWriter {
+// public:
+// // Called before decompression
+// void SetExpectedLength(size_t length);
+//
+// // Called after decompression
+// bool CheckLength() const;
+//
+// // Called repeatedly during decompression
+// bool Append(const char* ip, uint32 length, bool allow_fast_path);
+// bool AppendFromSelf(uint32 offset, uint32 length);
+// };
+//
+// "allow_fast_path" is a parameter that says if there is at least 16
+// readable bytes in "ip". It is currently only used by SnappyArrayWriter.
+
+// -----------------------------------------------------------------------
+// Lookup table for decompression code. Generated by ComputeTable() below.
+// -----------------------------------------------------------------------
+
+// Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits
+static const uint32 wordmask[] = {
+ 0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
+};
+
+// Data stored per entry in lookup table:
+// Range Bits-used Description
+// ------------------------------------
+// 1..64 0..7 Literal/copy length encoded in opcode byte
+// 0..7 8..10 Copy offset encoded in opcode byte / 256
+// 0..4 11..13 Extra bytes after opcode
+//
+// We use eight bits for the length even though 7 would have sufficed
+// because of efficiency reasons:
+// (1) Extracting a byte is faster than a bit-field
+// (2) It properly aligns copy offset so we do not need a <<8
+static const uint16 char_table[256] = {
+ 0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
+ 0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
+ 0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
+ 0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
+ 0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
+ 0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
+ 0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
+ 0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
+ 0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
+ 0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
+ 0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
+ 0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
+ 0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
+ 0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
+ 0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
+ 0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
+ 0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
+ 0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
+ 0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
+ 0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
+ 0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
+ 0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
+ 0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
+ 0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
+ 0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
+ 0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
+ 0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
+ 0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
+ 0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
+ 0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
+ 0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
+ 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
+};
+
+// In debug mode, allow optional computation of the table at startup.
+// Also, check that the decompression table is correct.
+#ifndef NDEBUG
+DEFINE_bool(snappy_dump_decompression_table, false,
+ "If true, we print the decompression table at startup.");
+
+static uint16 MakeEntry(unsigned int extra,
+ unsigned int len,
+ unsigned int copy_offset) {
+ // Check that all of the fields fit within the allocated space
+ DCHECK_EQ(extra, extra & 0x7); // At most 3 bits
+ DCHECK_EQ(copy_offset, copy_offset & 0x7); // At most 3 bits
+ DCHECK_EQ(len, len & 0x7f); // At most 7 bits
+ return len | (copy_offset << 8) | (extra << 11);
+}
+
+static void ComputeTable() {
+ uint16 dst[256];
+
+ // Place invalid entries in all places to detect missing initialization
+ int assigned = 0;
+ for (int i = 0; i < 256; i++) {
+ dst[i] = 0xffff;
+ }
+
+ // Small LITERAL entries. We store (len-1) in the top 6 bits.
+ for (unsigned int len = 1; len <= 60; len++) {
+ dst[LITERAL | ((len-1) << 2)] = MakeEntry(0, len, 0);
+ assigned++;
+ }
+
+ // Large LITERAL entries. We use 60..63 in the high 6 bits to
+ // encode the number of bytes of length info that follow the opcode.
+ for (unsigned int extra_bytes = 1; extra_bytes <= 4; extra_bytes++) {
+ // We set the length field in the lookup table to 1 because extra
+ // bytes encode len-1.
+ dst[LITERAL | ((extra_bytes+59) << 2)] = MakeEntry(extra_bytes, 1, 0);
+ assigned++;
+ }
+
+ // COPY_1_BYTE_OFFSET.
+ //
+ // The tag byte in the compressed data stores len-4 in 3 bits, and
+ // offset/256 in 5 bits. offset%256 is stored in the next byte.
+ //
+ // This format is used for length in range [4..11] and offset in
+ // range [0..2047]
+ for (unsigned int len = 4; len < 12; len++) {
+ for (unsigned int offset = 0; offset < 2048; offset += 256) {
+ dst[COPY_1_BYTE_OFFSET | ((len-4)<<2) | ((offset>>8)<<5)] =
+ MakeEntry(1, len, offset>>8);
+ assigned++;
+ }
+ }
+
+ // COPY_2_BYTE_OFFSET.
+ // Tag contains len-1 in top 6 bits, and offset in next two bytes.
+ for (unsigned int len = 1; len <= 64; len++) {
+ dst[COPY_2_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(2, len, 0);
+ assigned++;
+ }
+
+ // COPY_4_BYTE_OFFSET.
+ // Tag contents len-1 in top 6 bits, and offset in next four bytes.
+ for (unsigned int len = 1; len <= 64; len++) {
+ dst[COPY_4_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(4, len, 0);
+ assigned++;
+ }
+
+ // Check that each entry was initialized exactly once.
+ CHECK_EQ(assigned, 256);
+ for (int i = 0; i < 256; i++) {
+ CHECK_NE(dst[i], 0xffff);
+ }
+
+ if (FLAGS_snappy_dump_decompression_table) {
+ printf("static const uint16 char_table[256] = {\n ");
+ for (int i = 0; i < 256; i++) {
+ printf("0x%04x%s",
+ dst[i],
+ ((i == 255) ? "\n" : (((i%8) == 7) ? ",\n " : ", ")));
+ }
+ printf("};\n");
+ }
+
+ // Check that computed table matched recorded table
+ for (int i = 0; i < 256; i++) {
+ CHECK_EQ(dst[i], char_table[i]);
+ }
+}
+REGISTER_MODULE_INITIALIZER(snappy, ComputeTable());
+#endif /* !NDEBUG */
+
+// Helper class for decompression
+class SnappyDecompressor {
+ private:
+ Source* reader_; // Underlying source of bytes to decompress
+ const char* ip_; // Points to next buffered byte
+ const char* ip_limit_; // Points just past buffered bytes
+ uint32 peeked_; // Bytes peeked from reader (need to skip)
+ bool eof_; // Hit end of input without an error?
+ char scratch_[5]; // Temporary buffer for PeekFast() boundaries
+
+ // Ensure that all of the tag metadata for the next tag is available
+ // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
+ // if (ip_limit_ - ip_ < 5).
+ //
+ // Returns true on success, false on error or end of input.
+ bool RefillTag();
+
+ public:
+ explicit SnappyDecompressor(Source* reader)
+ : reader_(reader),
+ ip_(NULL),
+ ip_limit_(NULL),
+ peeked_(0),
+ eof_(false) {
+ }
+
+ ~SnappyDecompressor() {
+ // Advance past any bytes we peeked at from the reader
+ reader_->Skip(peeked_);
+ }
+
+ // Returns true iff we have hit the end of the input without an error.
+ bool eof() const {
+ return eof_;
+ }
+
+ // Read the uncompressed length stored at the start of the compressed data.
+ // On succcess, stores the length in *result and returns true.
+ // On failure, returns false.
+ bool ReadUncompressedLength(uint32* result) {
+ DCHECK(ip_ == NULL); // Must not have read anything yet
+ // Length is encoded in 1..5 bytes
+ *result = 0;
+ uint32 shift = 0;
+ while (true) {
+ if (shift >= 32) return false;
+ size_t n;
+ const char* ip = reader_->Peek(&n);
+ if (n == 0) return false;
+ const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
+ reader_->Skip(1);
+ *result |= static_cast<uint32>(c & 0x7f) << shift;
+ if (c < 128) {
+ break;
+ }
+ shift += 7;
+ }
+ return true;
+ }
+
+ // Process the next item found in the input.
+ // Returns true if successful, false on error or end of input.
+ template <class Writer>
+ void DecompressAllTags(Writer* writer) {
+ const char* ip = ip_;
+ for ( ;; ) {
+ if (ip_limit_ - ip < 5) {
+ ip_ = ip;
+ if (!RefillTag()) return;
+ ip = ip_;
+ }
+
+ const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
+ const uint32 entry = char_table[c];
+ const uint32 trailer = LittleEndian::Load32(ip) & wordmask[entry >> 11];
+ ip += entry >> 11;
+ const uint32 length = entry & 0xff;
+
+ if ((c & 0x3) == LITERAL) {
+ uint32 literal_length = length + trailer;
+ uint32 avail = ip_limit_ - ip;
+ while (avail < literal_length) {
+ bool allow_fast_path = (avail >= 16);
+ if (!writer->Append(ip, avail, allow_fast_path)) return;
+ literal_length -= avail;
+ reader_->Skip(peeked_);
+ size_t n;
+ ip = reader_->Peek(&n);
+ avail = n;
+ peeked_ = avail;
+ if (avail == 0) return; // Premature end of input
+ ip_limit_ = ip + avail;
+ }
+ bool allow_fast_path = (avail >= 16);
+ if (!writer->Append(ip, literal_length, allow_fast_path)) {
+ return;
+ }
+ ip += literal_length;
+ } else {
+ // copy_offset/256 is encoded in bits 8..10. By just fetching
+ // those bits, we get copy_offset (since the bit-field starts at
+ // bit 8).
+ const uint32 copy_offset = entry & 0x700;
+ if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
+ return;
+ }
+ }
+ }
+ }
+};
+
+bool SnappyDecompressor::RefillTag() {
+ const char* ip = ip_;
+ if (ip == ip_limit_) {
+ // Fetch a new fragment from the reader
+ reader_->Skip(peeked_); // All peeked bytes are used up
+ size_t n;
+ ip = reader_->Peek(&n);
+ peeked_ = n;
+ if (n == 0) {
+ eof_ = true;
+ return false;
+ }
+ ip_limit_ = ip + n;
+ }
+
+ // Read the tag character
+ DCHECK_LT(ip, ip_limit_);
+ const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
+ const uint32 entry = char_table[c];
+ const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c'
+ DCHECK_LE(needed, sizeof(scratch_));
+
+ // Read more bytes from reader if needed
+ uint32 nbuf = ip_limit_ - ip;
+ if (nbuf < needed) {
+ // Stitch together bytes from ip and reader to form the word
+ // contents. We store the needed bytes in "scratch_". They
+ // will be consumed immediately by the caller since we do not
+ // read more than we need.
+ memmove(scratch_, ip, nbuf);
+ reader_->Skip(peeked_); // All peeked bytes are used up
+ peeked_ = 0;
+ while (nbuf < needed) {
+ size_t length;
+ const char* src = reader_->Peek(&length);
+ if (length == 0) return false;
+ uint32 to_add = min<uint32>(needed - nbuf, length);
+ memcpy(scratch_ + nbuf, src, to_add);
+ nbuf += to_add;
+ reader_->Skip(to_add);
+ }
+ DCHECK_EQ(nbuf, needed);
+ ip_ = scratch_;
+ ip_limit_ = scratch_ + needed;
+ } else if (nbuf < 5) {
+ // Have enough bytes, but move into scratch_ so that we do not
+ // read past end of input
+ memmove(scratch_, ip, nbuf);
+ reader_->Skip(peeked_); // All peeked bytes are used up
+ peeked_ = 0;
+ ip_ = scratch_;
+ ip_limit_ = scratch_ + nbuf;
+ } else {
+ // Pass pointer to buffer returned by reader_.
+ ip_ = ip;
+ }
+ return true;
+}
+
+template <typename Writer>
+static bool InternalUncompress(Source* r,
+ Writer* writer,
+ uint32 max_len) {
+ // Read the uncompressed length from the front of the compressed input
+ SnappyDecompressor decompressor(r);
+ uint32 uncompressed_len = 0;
+ if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
+ // Protect against possible DoS attack
+ if (static_cast<uint64>(uncompressed_len) > max_len) {
+ return false;
+ }
+
+ writer->SetExpectedLength(uncompressed_len);
+
+ // Process the entire input
+ decompressor.DecompressAllTags(writer);
+ return (decompressor.eof() && writer->CheckLength());
+}
+
+bool GetUncompressedLength(Source* source, uint32* result) {
+ SnappyDecompressor decompressor(source);
+ return decompressor.ReadUncompressedLength(result);
+}
+
+size_t Compress(Source* reader, Sink* writer) {
+ size_t written = 0;
+ int N = reader->Available();
+ char ulength[Varint::kMax32];
+ char* p = Varint::Encode32(ulength, N);
+ writer->Append(ulength, p-ulength);
+ written += (p - ulength);
+
+ internal::WorkingMemory wmem;
+ char* scratch = NULL;
+ char* scratch_output = NULL;
+
+ while (N > 0) {
+ // Get next block to compress (without copying if possible)
+ size_t fragment_size;
+ const char* fragment = reader->Peek(&fragment_size);
+ DCHECK_NE(fragment_size, 0) << ": premature end of input";
+ const int num_to_read = min(N, kBlockSize);
+ size_t bytes_read = fragment_size;
+
+ int pending_advance = 0;
+ if (bytes_read >= num_to_read) {
+ // Buffer returned by reader is large enough
+ pending_advance = num_to_read;
+ fragment_size = num_to_read;
+ } else {
+ // Read into scratch buffer
+ if (scratch == NULL) {
+ // If this is the last iteration, we want to allocate N bytes
+ // of space, otherwise the max possible kBlockSize space.
+ // num_to_read contains exactly the correct value
+ scratch = new char[num_to_read];
+ }
+ memcpy(scratch, fragment, bytes_read);
+ reader->Skip(bytes_read);
+
+ while (bytes_read < num_to_read) {
+ fragment = reader->Peek(&fragment_size);
+ size_t n = min<size_t>(fragment_size, num_to_read - bytes_read);
+ memcpy(scratch + bytes_read, fragment, n);
+ bytes_read += n;
+ reader->Skip(n);
+ }
+ DCHECK_EQ(bytes_read, num_to_read);
+ fragment = scratch;
+ fragment_size = num_to_read;
+ }
+ DCHECK_EQ(fragment_size, num_to_read);
+
+ // Get encoding table for compression
+ int table_size;
+ uint16* table = wmem.GetHashTable(num_to_read, &table_size);
+
+ // Compress input_fragment and append to dest
+ const int max_output = MaxCompressedLength(num_to_read);
+
+ // Need a scratch buffer for the output, in case the byte sink doesn't
+ // have room for us directly.
+ if (scratch_output == NULL) {
+ scratch_output = new char[max_output];
+ } else {
+ // Since we encode kBlockSize regions followed by a region
+ // which is <= kBlockSize in length, a previously allocated
+ // scratch_output[] region is big enough for this iteration.
+ }
+ char* dest = writer->GetAppendBuffer(max_output, scratch_output);
+ char* end = internal::CompressFragment(fragment, fragment_size,
+ dest, table, table_size);
+ writer->Append(dest, end - dest);
+ written += (end - dest);
+
+ N -= num_to_read;
+ reader->Skip(pending_advance);
+ }
+
+ delete[] scratch;
+ delete[] scratch_output;
+
+ return written;
+}
+
+// -----------------------------------------------------------------------
+// Flat array interfaces
+// -----------------------------------------------------------------------
+
+// A type that writes to a flat array.
+// Note that this is not a "ByteSink", but a type that matches the
+// Writer template argument to SnappyDecompressor::DecompressAllTags().
+class SnappyArrayWriter {
+ private:
+ char* base_;
+ char* op_;
+ char* op_limit_;
+
+ public:
+ inline explicit SnappyArrayWriter(char* dst)
+ : base_(dst),
+ op_(dst) {
+ }
+
+ inline void SetExpectedLength(size_t len) {
+ op_limit_ = op_ + len;
+ }
+
+ inline bool CheckLength() const {
+ return op_ == op_limit_;
+ }
+
+ inline bool Append(const char* ip, uint32 len, bool allow_fast_path) {
+ char* op = op_;
+ const int space_left = op_limit_ - op;
+ if (allow_fast_path && len <= 16 && space_left >= 16) {
+ // Fast path, used for the majority (about 90%) of dynamic invocations.
+ UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
+ UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8));
+ } else {
+ if (space_left < len) {
+ return false;
+ }
+ memcpy(op, ip, len);
+ }
+ op_ = op + len;
+ return true;
+ }
+
+ inline bool AppendFromSelf(uint32 offset, uint32 len) {
+ char* op = op_;
+ const int space_left = op_limit_ - op;
+
+ if (op - base_ <= offset - 1u) { // -1u catches offset==0
+ return false;
+ }
+ if (len <= 16 && offset >= 8 && space_left >= 16) {
+ // Fast path, used for the majority (70-80%) of dynamic invocations.
+ UNALIGNED_STORE64(op, UNALIGNED_LOAD64(op - offset));
+ UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(op - offset + 8));
+ } else {
+ if (space_left >= len + kMaxIncrementCopyOverflow) {
+ IncrementalCopyFastPath(op - offset, op, len);
+ } else {
+ if (space_left < len) {
+ return false;
+ }
+ IncrementalCopy(op - offset, op, len);
+ }
+ }
+
+ op_ = op + len;
+ return true;
+ }
+};
+
+bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
+ ByteArraySource reader(compressed, n);
+ return RawUncompress(&reader, uncompressed);
+}
+
+bool RawUncompress(Source* compressed, char* uncompressed) {
+ SnappyArrayWriter output(uncompressed);
+ return InternalUncompress(compressed, &output, kuint32max);
+}
+
+bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
+ size_t ulength;
+ if (!GetUncompressedLength(compressed, n, &ulength)) {
+ return false;
+ }
+ // Protect against possible DoS attack
+ if ((static_cast<uint64>(ulength) + uncompressed->size()) >
+ uncompressed->max_size()) {
+ return false;
+ }
+ STLStringResizeUninitialized(uncompressed, ulength);
+ return RawUncompress(compressed, n, string_as_array(uncompressed));
+}
+
+
+// A Writer that drops everything on the floor and just does validation
+class SnappyDecompressionValidator {
+ private:
+ size_t expected_;
+ size_t produced_;
+
+ public:
+ inline SnappyDecompressionValidator() : produced_(0) { }
+ inline void SetExpectedLength(size_t len) {
+ expected_ = len;
+ }
+ inline bool CheckLength() const {
+ return expected_ == produced_;
+ }
+ inline bool Append(const char* ip, uint32 len, bool allow_fast_path) {
+ produced_ += len;
+ return produced_ <= expected_;
+ }
+ inline bool AppendFromSelf(uint32 offset, uint32 len) {
+ if (produced_ <= offset - 1u) return false; // -1u catches offset==0
+ produced_ += len;
+ return produced_ <= expected_;
+ }
+};
+
+bool IsValidCompressedBuffer(const char* compressed, size_t n) {
+ ByteArraySource reader(compressed, n);
+ SnappyDecompressionValidator writer;
+ return InternalUncompress(&reader, &writer, kuint32max);
+}
+
+void RawCompress(const char* input,
+ size_t input_length,
+ char* compressed,
+ size_t* compressed_length) {
+ ByteArraySource reader(input, input_length);
+ UncheckedByteArraySink writer(compressed);
+ Compress(&reader, &writer);
+
+ // Compute how many bytes were added
+ *compressed_length = (writer.CurrentDestination() - compressed);
+}
+
+size_t Compress(const char* input, size_t input_length, string* compressed) {
+ // Pre-grow the buffer to the max length of the compressed output
+ compressed->resize(MaxCompressedLength(input_length));
+
+ size_t compressed_length;
+ RawCompress(input, input_length, string_as_array(compressed),
+ &compressed_length);
+ compressed->resize(compressed_length);
+ return compressed_length;
+}
+
+
+} // end namespace snappy
+
diff --git a/src/third_party/snappy/snappy.h b/src/third_party/snappy/snappy.h
new file mode 100755
index 00000000000..8d6ef2294f5
--- /dev/null
+++ b/src/third_party/snappy/snappy.h
@@ -0,0 +1,155 @@
+// Copyright 2005 and onwards Google Inc.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// A light-weight compression algorithm. It is designed for speed of
+// compression and decompression, rather than for the utmost in space
+// savings.
+//
+// For getting better compression ratios when you are compressing data
+// with long repeated sequences or compressing data that is similar to
+// other data, while still compressing fast, you might look at first
+// using BMDiff and then compressing the output of BMDiff with
+// Snappy.
+
+#ifndef UTIL_SNAPPY_SNAPPY_H__
+#define UTIL_SNAPPY_SNAPPY_H__
+
+#include <stddef.h>
+#include <string>
+
+#include "snappy-stubs-public.h"
+
+namespace snappy {
+ class Source;
+ class Sink;
+
+ // ------------------------------------------------------------------------
+ // Generic compression/decompression routines.
+ // ------------------------------------------------------------------------
+
+ // Compress the bytes read from "*source" and append to "*sink". Return the
+ // number of bytes written.
+ size_t Compress(Source* source, Sink* sink);
+
+ bool GetUncompressedLength(Source* source, uint32* result);
+
+ // ------------------------------------------------------------------------
+ // Higher-level string based routines (should be sufficient for most users)
+ // ------------------------------------------------------------------------
+
+ // Sets "*output" to the compressed version of "input[0,input_length-1]".
+ // Original contents of *output are lost.
+ //
+ // REQUIRES: "input[]" is not an alias of "*output".
+ size_t Compress(const char* input, size_t input_length, string* output);
+
+ // Decompresses "compressed[0,compressed_length-1]" to "*uncompressed".
+ // Original contents of "*uncompressed" are lost.
+ //
+ // REQUIRES: "compressed[]" is not an alias of "*uncompressed".
+ //
+ // returns false if the message is corrupted and could not be decompressed
+ bool Uncompress(const char* compressed, size_t compressed_length,
+ string* uncompressed);
+
+
+ // ------------------------------------------------------------------------
+ // Lower-level character array based routines. May be useful for
+ // efficiency reasons in certain circumstances.
+ // ------------------------------------------------------------------------
+
+ // REQUIRES: "compressed" must point to an area of memory that is at
+ // least "MaxCompressedLength(input_length)" bytes in length.
+ //
+ // Takes the data stored in "input[0..input_length]" and stores
+ // it in the array pointed to by "compressed".
+ //
+ // "*compressed_length" is set to the length of the compressed output.
+ //
+ // Example:
+ // char* output = new char[snappy::MaxCompressedLength(input_length)];
+ // size_t output_length;
+ // RawCompress(input, input_length, output, &output_length);
+ // ... Process(output, output_length) ...
+ // delete [] output;
+ void RawCompress(const char* input,
+ size_t input_length,
+ char* compressed,
+ size_t* compressed_length);
+
+ // Given data in "compressed[0..compressed_length-1]" generated by
+ // calling the Snappy::Compress routine, this routine
+ // stores the uncompressed data to
+ // uncompressed[0..GetUncompressedLength(compressed)-1]
+ // returns false if the message is corrupted and could not be decrypted
+ bool RawUncompress(const char* compressed, size_t compressed_length,
+ char* uncompressed);
+
+ // Given data from the byte source 'compressed' generated by calling
+ // the Snappy::Compress routine, this routine stores the uncompressed
+ // data to
+ // uncompressed[0..GetUncompressedLength(compressed,compressed_length)-1]
+ // returns false if the message is corrupted and could not be decrypted
+ bool RawUncompress(Source* compressed, char* uncompressed);
+
+ // Returns the maximal size of the compressed representation of
+ // input data that is "source_bytes" bytes in length;
+ size_t MaxCompressedLength(size_t source_bytes);
+
+ // REQUIRES: "compressed[]" was produced by RawCompress() or Compress()
+ // Returns true and stores the length of the uncompressed data in
+ // *result normally. Returns false on parsing error.
+ // This operation takes O(1) time.
+ bool GetUncompressedLength(const char* compressed, size_t compressed_length,
+ size_t* result);
+
+ // Returns true iff the contents of "compressed[]" can be uncompressed
+ // successfully. Does not return the uncompressed data. Takes
+ // time proportional to compressed_length, but is usually at least
+ // a factor of four faster than actual decompression.
+ bool IsValidCompressedBuffer(const char* compressed,
+ size_t compressed_length);
+
+ // *** DO NOT CHANGE THE VALUE OF kBlockSize ***
+ //
+ // New Compression code chops up the input into blocks of at most
+ // the following size. This ensures that back-references in the
+ // output never cross kBlockSize block boundaries. This can be
+ // helpful in implementing blocked decompression. However the
+ // decompression code should not rely on this guarantee since older
+ // compression code may not obey it.
+ static const int kBlockLog = 15;
+ static const int kBlockSize = 1 << kBlockLog;
+
+ static const int kMaxHashTableBits = 14;
+ static const int kMaxHashTableSize = 1 << kMaxHashTableBits;
+
+} // end namespace snappy
+
+
+#endif // UTIL_SNAPPY_SNAPPY_H__